summaryrefslogtreecommitdiff
path: root/scripts
diff options
context:
space:
mode:
Diffstat (limited to 'scripts')
0 files changed, 0 insertions, 0 deletions
e36c8b67e450dcb7011181658c198bec386e98'>drivers/acpi/acpica/aclocal.h6
-rw-r--r--drivers/acpi/acpica/acnamesp.h2
-rw-r--r--drivers/acpi/acpica/acpredef.h2
-rw-r--r--drivers/acpi/acpica/dbcmds.c2
-rw-r--r--drivers/acpi/acpica/dbconvert.c5
-rw-r--r--drivers/acpi/acpica/dsmethod.c6
-rw-r--r--drivers/acpi/acpica/dsobject.c3
-rw-r--r--drivers/acpi/acpica/evgpeblk.c3
-rw-r--r--drivers/acpi/acpica/evgpeinit.c2
-rw-r--r--drivers/acpi/acpica/evregion.c2
-rw-r--r--drivers/acpi/acpica/exconfig.c4
-rw-r--r--drivers/acpi/acpica/exoparg3.c4
-rw-r--r--drivers/acpi/acpica/nseval.c3
-rw-r--r--drivers/acpi/acpica/nsinit.c137
-rw-r--r--drivers/acpi/acpica/psargs.c9
-rw-r--r--drivers/acpi/acpica/tbinstal.c5
-rw-r--r--drivers/acpi/acpica/tbprint.c7
-rw-r--r--drivers/acpi/acpica/tbutils.c4
-rw-r--r--drivers/acpi/acpica/tbxfload.c40
-rw-r--r--drivers/acpi/acpica/utcache.c2
-rw-r--r--drivers/acpi/acpica/utnonansi.c246
-rw-r--r--drivers/acpi/acpica/uttrack.c2
-rw-r--r--drivers/acpi/acpica/utxferror.c3
-rw-r--r--drivers/acpi/acpica/utxfinit.c67
-rw-r--r--drivers/acpi/apei/apei-base.c6
-rw-r--r--drivers/acpi/apei/einj.c15
-rw-r--r--drivers/acpi/apei/erst.c3
-rw-r--r--drivers/acpi/apei/ghes.c23
-rw-r--r--drivers/acpi/bgrt.c10
-rw-r--r--drivers/acpi/bus.c29
-rw-r--r--drivers/acpi/cppc_acpi.c237
-rw-r--r--drivers/acpi/ec_sys.c3
-rw-r--r--drivers/acpi/fan.c2
-rw-r--r--drivers/acpi/internal.h13
-rw-r--r--drivers/acpi/nfit.c803
-rw-r--r--drivers/acpi/nfit.h30
-rw-r--r--drivers/acpi/osl.c158
-rw-r--r--drivers/acpi/pci_irq.c29
-rw-r--r--drivers/acpi/pmic/intel_pmic_crc.c7
-rw-r--r--drivers/acpi/processor_driver.c2
-rw-r--r--drivers/acpi/processor_idle.c66
-rw-r--r--drivers/acpi/property.c1
-rw-r--r--drivers/acpi/resource.c14
-rw-r--r--drivers/acpi/scan.c1
-rw-r--r--drivers/acpi/sleep.c36
-rw-r--r--drivers/acpi/tables.c12
-rw-r--r--drivers/acpi/utils.c6
-rw-r--r--drivers/android/binder.c31
-rw-r--r--drivers/ata/Kconfig19
-rw-r--r--drivers/ata/Makefile2
-rw-r--r--drivers/ata/ahci.c108
-rw-r--r--drivers/ata/ahci.h1
-rw-r--r--drivers/ata/ahci_mvebu.c14
-rw-r--r--drivers/ata/ahci_octeon.c105
-rw-r--r--drivers/ata/ahci_platform.c4
-rw-r--r--drivers/ata/ahci_seattle.c210
-rw-r--r--drivers/ata/ahci_xgene.c4
-rw-r--r--drivers/ata/libahci.c56
-rw-r--r--drivers/ata/libata-scsi.c4
-rw-r--r--drivers/ata/pata_at91.c3
-rw-r--r--drivers/ata/pata_bf54x.c2
-rw-r--r--drivers/ata/pata_hpt366.c13
-rw-r--r--drivers/ata/pata_macio.c2
-rw-r--r--drivers/ata/sata_via.c133
-rw-r--r--drivers/atm/firestream.c2
-rw-r--r--drivers/base/bus.c15
-rw-r--r--drivers/base/component.c2
-rw-r--r--drivers/base/dd.c24
-rw-r--r--drivers/base/dma-coherent.c53
-rw-r--r--drivers/base/firmware_class.c87
-rw-r--r--drivers/base/memory.c34
-rw-r--r--drivers/base/power/clock_ops.c89
-rw-r--r--drivers/base/power/domain.c60
-rw-r--r--drivers/base/power/domain_governor.c64
-rw-r--r--drivers/base/power/opp/core.c1076
-rw-r--r--drivers/base/power/opp/cpu.c22
-rw-r--r--drivers/base/power/opp/debugfs.c85
-rw-r--r--drivers/base/power/opp/opp.h74
-rw-r--r--drivers/base/power/trace.c4
-rw-r--r--drivers/base/power/wakeup.c2
-rw-r--r--drivers/base/property.c39
-rw-r--r--drivers/base/regmap/internal.h17
-rw-r--r--drivers/base/regmap/regcache-flat.c20
-rw-r--r--drivers/base/regmap/regcache.c45
-rw-r--r--drivers/base/regmap/regmap-irq.c104
-rw-r--r--drivers/base/regmap/regmap-mmio.c262
-rw-r--r--drivers/base/regmap/regmap-spmi.c2
-rw-r--r--drivers/base/regmap/regmap.c241
-rw-r--r--drivers/bcma/Kconfig5
-rw-r--r--drivers/bcma/Makefile1
-rw-r--r--drivers/bcma/bcma_private.h19
-rw-r--r--drivers/bcma/driver_chipcommon.c46
-rw-r--r--drivers/bcma/driver_chipcommon_pflash.c49
-rw-r--r--drivers/bcma/driver_chipcommon_pmu.c94
-rw-r--r--drivers/bcma/driver_chipcommon_sflash.c1
-rw-r--r--drivers/bcma/driver_gpio.c1
-rw-r--r--drivers/bcma/driver_mips.c66
-rw-r--r--drivers/bcma/host_pci.c2
-rw-r--r--drivers/bcma/main.c19
-rw-r--r--drivers/bcma/scan.c5
-rw-r--r--drivers/block/Kconfig10
-rw-r--r--drivers/block/Makefile1
-rw-r--r--drivers/block/aoe/aoeblk.c2
-rw-r--r--drivers/block/aoe/aoecmd.c4
-rw-r--r--drivers/block/brd.c4
-rw-r--r--drivers/block/cpqarray.c1820
-rw-r--r--drivers/block/cpqarray.h126
-rw-r--r--drivers/block/cryptoloop.c48
-rw-r--r--drivers/block/drbd/drbd_int.h20
-rw-r--r--drivers/block/drbd/drbd_main.c16
-rw-r--r--drivers/block/drbd/drbd_nl.c61
-rw-r--r--drivers/block/drbd/drbd_receiver.c56
-rw-r--r--drivers/block/drbd/drbd_worker.c43
-rw-r--r--drivers/block/ida_cmd.h349
-rw-r--r--drivers/block/ida_ioctl.h87
-rw-r--r--drivers/block/loop.c6
-rw-r--r--drivers/block/mtip32xx/mtip32xx.c270
-rw-r--r--drivers/block/mtip32xx/mtip32xx.h11
-rw-r--r--drivers/block/nbd.c334
-rw-r--r--drivers/block/null_blk.c3
-rw-r--r--drivers/block/paride/pd.c4
-rw-r--r--drivers/block/paride/pt.c4
-rw-r--r--drivers/block/rbd.c72
-rw-r--r--drivers/block/virtio_blk.c11
-rw-r--r--drivers/block/xen-blkback/xenbus.c20
-rw-r--r--drivers/block/xen-blkfront.c6
-rw-r--r--drivers/bluetooth/Kconfig11
-rw-r--r--drivers/bluetooth/Makefile1
-rw-r--r--drivers/bluetooth/ath3k.c9
-rw-r--r--drivers/bluetooth/btbcm.c3
-rw-r--r--drivers/bluetooth/btmrvl_sdio.c2
-rw-r--r--drivers/bluetooth/btusb.c4
-rw-r--r--drivers/bluetooth/hci_ag6xx.c337
-rw-r--r--drivers/bluetooth/hci_bcm.c3
-rw-r--r--drivers/bluetooth/hci_intel.c4
-rw-r--r--drivers/bluetooth/hci_ldisc.c6
-rw-r--r--drivers/bluetooth/hci_uart.h8
-rw-r--r--drivers/bus/Kconfig10
-rw-r--r--drivers/bus/arm-cci.c621
-rw-r--r--drivers/bus/imx-weim.c2
-rw-r--r--drivers/bus/mvebu-mbus.c52
-rw-r--r--drivers/bus/sunxi-rsb.c4
-rw-r--r--drivers/bus/uniphier-system-bus.c2
-rw-r--r--drivers/char/Kconfig3
-rw-r--r--drivers/char/agp/frontend.c2
-rw-r--r--drivers/char/agp/intel-gtt.c32
-rw-r--r--drivers/char/agp/uninorth-agp.c1
-rw-r--r--drivers/char/hw_random/Kconfig15
-rw-r--r--drivers/char/hw_random/Makefile1
-rw-r--r--drivers/char/hw_random/bcm63xx-rng.c12
-rw-r--r--drivers/char/hw_random/exynos-rng.c10
-rw-r--r--drivers/char/hw_random/n2-drv.c10
-rw-r--r--drivers/char/hw_random/pic32-rng.c155
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c11
-rw-r--r--drivers/char/ipmi/ipmi_ssif.c13
-rw-r--r--drivers/char/ipmi/ipmi_watchdog.c2
-rw-r--r--drivers/char/mem.c2
-rw-r--r--drivers/char/nvram.c12
-rw-r--r--drivers/char/nwbutton.c5
-rw-r--r--drivers/char/pcmcia/synclink_cs.c21
-rw-r--r--drivers/char/ppdev.c376
-rw-r--r--drivers/char/raw.c4
-rw-r--r--drivers/char/tpm/tpm-chip.c19
-rw-r--r--drivers/char/tpm/tpm.h7
-rw-r--r--drivers/char/tpm/tpm2-cmd.c22
-rw-r--r--drivers/char/tpm/tpm_crb.c196
-rw-r--r--drivers/char/tpm/tpm_eventlog.c14
-rw-r--r--drivers/char/tpm/tpm_tis.c253
-rw-r--r--drivers/char/ttyprintk.c2
-rw-r--r--drivers/char/xillybus/xillybus_core.c4
-rw-r--r--drivers/clk/Kconfig30
-rw-r--r--drivers/clk/Makefile5
-rw-r--r--drivers/clk/at91/clk-generated.c99
-rw-r--r--drivers/clk/at91/clk-h32mx.c40
-rw-r--r--drivers/clk/at91/clk-main.c330
-rw-r--r--drivers/clk/at91/clk-master.c98
-rw-r--r--drivers/clk/at91/clk-peripheral.c137
-rw-r--r--drivers/clk/at91/clk-pll.c150
-rw-r--r--drivers/clk/at91/clk-plldiv.c44
-rw-r--r--drivers/clk/at91/clk-programmable.c100
-rw-r--r--drivers/clk/at91/clk-slow.c43
-rw-r--r--drivers/clk/at91/clk-smd.c60
-rw-r--r--drivers/clk/at91/clk-system.c96
-rw-r--r--drivers/clk/at91/clk-usb.c127
-rw-r--r--drivers/clk/at91/clk-utmi.c80
-rw-r--r--drivers/clk/at91/pmc.c426
-rw-r--r--drivers/clk/at91/pmc.h98
-rw-r--r--drivers/clk/bcm/clk-bcm2835-aux.c4
-rw-r--r--drivers/clk/bcm/clk-bcm2835.c47
-rw-r--r--drivers/clk/bcm/clk-cygnus.c59
-rw-r--r--drivers/clk/bcm/clk-iproc-pll.c41
-rw-r--r--drivers/clk/bcm/clk-iproc.h43
-rw-r--r--drivers/clk/clk-axi-clkgen.c170
-rw-r--r--drivers/clk/clk-composite.c2
-rw-r--r--drivers/clk/clk-divider.c18
-rw-r--r--drivers/clk/clk-efm32gg.c2
-rw-r--r--drivers/clk/clk-fixed-factor.c15
-rw-r--r--drivers/clk/clk-fixed-rate.c18
-rw-r--r--drivers/clk/clk-fractional-divider.c2
-rw-r--r--drivers/clk/clk-gate.c2
-rw-r--r--drivers/clk/clk-gpio.c163
-rw-r--r--drivers/clk/clk-max77686.c3
-rw-r--r--drivers/clk/clk-max77802.c2
-rw-r--r--drivers/clk/clk-mb86s7x.c4
-rw-r--r--drivers/clk/clk-multiplier.c2
-rw-r--r--drivers/clk/clk-mux.c2
-rw-r--r--drivers/clk/clk-palmas.c16
-rw-r--r--drivers/clk/clk-pwm.c2
-rw-r--r--drivers/clk/clk-s2mps11.c111
-rw-r--r--drivers/clk/clk-scpi.c2
-rw-r--r--drivers/clk/clk-si514.c2
-rw-r--r--drivers/clk/clk-si5351.c2
-rw-r--r--drivers/clk/clk-si570.c2
-rw-r--r--drivers/clk/clk-vt8500.c100
-rw-r--r--drivers/clk/clk-xgene.c109
-rw-r--r--drivers/clk/clk.c221
-rw-r--r--drivers/clk/clkdev.c31
-rw-r--r--drivers/clk/h8300/clk-div.c6
-rw-r--r--drivers/clk/h8300/clk-h8s2678.c4
-rw-r--r--drivers/clk/hisilicon/clk-hi3620.c18
-rw-r--r--drivers/clk/hisilicon/clk-hi6220-stub.c2
-rw-r--r--drivers/clk/hisilicon/clk-hi6220.c26
-rw-r--r--drivers/clk/hisilicon/clk-hip04.c6
-rw-r--r--drivers/clk/hisilicon/clk-hix5hd2.c60
-rw-r--r--drivers/clk/imx/clk-busy.c4
-rw-r--r--drivers/clk/imx/clk-fixup-div.c5
-rw-r--r--drivers/clk/imx/clk-fixup-mux.c2
-rw-r--r--drivers/clk/imx/clk-gate-exclusive.c2
-rw-r--r--drivers/clk/imx/clk-imx6q.c132
-rw-r--r--drivers/clk/imx/clk-imx6ul.c65
-rw-r--r--drivers/clk/imx/clk.h2
-rw-r--r--drivers/clk/mediatek/clk-gate.c8
-rw-r--r--drivers/clk/mediatek/clk-gate.h2
-rw-r--r--drivers/clk/mediatek/clk-mtk.c10
-rw-r--r--drivers/clk/mediatek/reset.c2
-rw-r--r--drivers/clk/meson/clkc.c2
-rw-r--r--drivers/clk/mmp/reset.c2
-rw-r--r--drivers/clk/mvebu/Kconfig2
-rw-r--r--drivers/clk/mvebu/common.c13
-rw-r--r--drivers/clk/mvebu/dove-divider.c3
-rw-r--r--drivers/clk/mvebu/kirkwood.c2
-rw-r--r--drivers/clk/mxs/clk-div.c2
-rw-r--r--drivers/clk/mxs/clk.h2
-rw-r--r--drivers/clk/nxp/Makefile1
-rw-r--r--drivers/clk/nxp/clk-lpc18xx-ccu.c4
-rw-r--r--drivers/clk/nxp/clk-lpc18xx-cgu.c2
-rw-r--r--drivers/clk/nxp/clk-lpc18xx-creg.c226
-rw-r--r--drivers/clk/nxp/clk-lpc32xx.c15
-rw-r--r--drivers/clk/pxa/clk-pxa25x.c20
-rw-r--r--drivers/clk/pxa/clk-pxa27x.c40
-rw-r--r--drivers/clk/pxa/clk-pxa3xx.c11
-rw-r--r--drivers/clk/qcom/Kconfig8
-rw-r--r--drivers/clk/qcom/Makefile1
-rw-r--r--drivers/clk/qcom/clk-rcg.c4
-rw-r--r--drivers/clk/qcom/common.c30
-rw-r--r--drivers/clk/qcom/gcc-ipq4019.c1354
-rw-r--r--drivers/clk/qcom/gcc-ipq806x.c37
-rw-r--r--drivers/clk/qcom/gcc-msm8660.c32
-rw-r--r--drivers/clk/qcom/gcc-msm8916.c18
-rw-r--r--drivers/clk/qcom/gcc-msm8960.c46
-rw-r--r--drivers/clk/qcom/gcc-msm8974.c1
-rw-r--r--drivers/clk/qcom/gcc-msm8996.c98
-rw-r--r--drivers/clk/qcom/gdsc.c89
-rw-r--r--drivers/clk/qcom/gdsc.h34
-rw-r--r--drivers/clk/qcom/mmcc-msm8960.c35
-rw-r--r--drivers/clk/qcom/mmcc-msm8974.c15
-rw-r--r--drivers/clk/qcom/mmcc-msm8996.c157
-rw-r--r--drivers/clk/qcom/reset.c2
-rw-r--r--drivers/clk/qcom/reset.h2
-rw-r--r--drivers/clk/renesas/Makefile (renamed from drivers/clk/shmobile/Makefile)0
-rw-r--r--drivers/clk/renesas/clk-div6.c (renamed from drivers/clk/shmobile/clk-div6.c)3
-rw-r--r--drivers/clk/renesas/clk-div6.h (renamed from drivers/clk/shmobile/clk-div6.h)4
-rw-r--r--drivers/clk/renesas/clk-emev2.c (renamed from drivers/clk/shmobile/clk-emev2.c)0
-rw-r--r--drivers/clk/renesas/clk-mstp.c (renamed from drivers/clk/shmobile/clk-mstp.c)2
-rw-r--r--drivers/clk/renesas/clk-r8a73a4.c (renamed from drivers/clk/shmobile/clk-r8a73a4.c)2
-rw-r--r--drivers/clk/renesas/clk-r8a7740.c (renamed from drivers/clk/shmobile/clk-r8a7740.c)2
-rw-r--r--drivers/clk/renesas/clk-r8a7778.c (renamed from drivers/clk/shmobile/clk-r8a7778.c)2
-rw-r--r--drivers/clk/renesas/clk-r8a7779.c (renamed from drivers/clk/shmobile/clk-r8a7779.c)2
-rw-r--r--drivers/clk/renesas/clk-rcar-gen2.c (renamed from drivers/clk/shmobile/clk-rcar-gen2.c)2
-rw-r--r--drivers/clk/renesas/clk-rz.c (renamed from drivers/clk/shmobile/clk-rz.c)2
-rw-r--r--drivers/clk/renesas/clk-sh73a0.c (renamed from drivers/clk/shmobile/clk-sh73a0.c)2
-rw-r--r--drivers/clk/renesas/r8a7795-cpg-mssr.c (renamed from drivers/clk/shmobile/r8a7795-cpg-mssr.c)255
-rw-r--r--drivers/clk/renesas/renesas-cpg-mssr.c (renamed from drivers/clk/shmobile/renesas-cpg-mssr.c)7
-rw-r--r--drivers/clk/renesas/renesas-cpg-mssr.h (renamed from drivers/clk/shmobile/renesas-cpg-mssr.h)2
-rw-r--r--drivers/clk/rockchip/clk-cpu.c8
-rw-r--r--drivers/clk/rockchip/clk-inverter.c8
-rw-r--r--drivers/clk/rockchip/clk-mmc-phase.c8
-rw-r--r--drivers/clk/rockchip/clk-pll.c5
-rw-r--r--drivers/clk/rockchip/clk-rk3036.c37
-rw-r--r--drivers/clk/rockchip/clk-rk3188.c27
-rw-r--r--drivers/clk/rockchip/clk-rk3228.c56
-rw-r--r--drivers/clk/rockchip/clk-rk3288.c38
-rw-r--r--drivers/clk/rockchip/clk-rk3368.c175
-rw-r--r--drivers/clk/rockchip/clk.c68
-rw-r--r--drivers/clk/rockchip/clk.h28
-rw-r--r--drivers/clk/rockchip/softrst.c2
-rw-r--r--drivers/clk/samsung/Kconfig19
-rw-r--r--drivers/clk/samsung/Makefile4
-rw-r--r--drivers/clk/samsung/clk-exynos4.c12
-rw-r--r--drivers/clk/samsung/clk-exynos4415.c2
-rw-r--r--drivers/clk/samsung/clk-exynos5250.c10
-rw-r--r--drivers/clk/samsung/clk-exynos5260.c36
-rw-r--r--drivers/clk/samsung/clk-exynos5420.c12
-rw-r--r--drivers/clk/samsung/clk-exynos5433.c208
-rw-r--r--drivers/clk/samsung/clk-exynos5440.c12
-rw-r--r--drivers/clk/samsung/clk-exynos7.c12
-rw-r--r--drivers/clk/samsung/clk-s3c2410.c2
-rw-r--r--drivers/clk/samsung/clk-s3c2412.c4
-rw-r--r--drivers/clk/samsung/clk-s3c2443.c8
-rw-r--r--drivers/clk/samsung/clk-s3c64xx.c8
-rw-r--r--drivers/clk/samsung/clk-s5pv210.c10
-rw-r--r--drivers/clk/sirf/clk-atlas7.c2
-rw-r--r--drivers/clk/socfpga/clk-gate-a10.c6
-rw-r--r--drivers/clk/socfpga/clk-periph-a10.c7
-rw-r--r--drivers/clk/socfpga/clk-pll-a10.c2
-rw-r--r--drivers/clk/spear/spear1310_clock.c21
-rw-r--r--drivers/clk/spear/spear1340_clock.c16
-rw-r--r--drivers/clk/spear/spear3xx_clock.c8
-rw-r--r--drivers/clk/spear/spear6xx_clock.c6
-rw-r--r--drivers/clk/st/clk-flexgen.c4
-rw-r--r--drivers/clk/st/clkgen-fsyn.c8
-rw-r--r--drivers/clk/st/clkgen-mux.c13
-rw-r--r--drivers/clk/sunxi/clk-a10-hosc.c3
-rw-r--r--drivers/clk/sunxi/clk-a10-ve.c2
-rw-r--r--drivers/clk/sunxi/clk-a20-gmac.c2
-rw-r--r--drivers/clk/sunxi/clk-factors.c123
-rw-r--r--drivers/clk/sunxi/clk-factors.h26
-rw-r--r--drivers/clk/sunxi/clk-mod0.c23
-rw-r--r--drivers/clk/sunxi/clk-simple-gates.c4
-rw-r--r--drivers/clk/sunxi/clk-sun6i-apb0-gates.c2
-rw-r--r--drivers/clk/sunxi/clk-sun6i-ar100.c235
-rw-r--r--drivers/clk/sunxi/clk-sun8i-apb0.c2
-rw-r--r--drivers/clk/sunxi/clk-sun8i-bus-gates.c3
-rw-r--r--drivers/clk/sunxi/clk-sun8i-mbus.c132
-rw-r--r--drivers/clk/sunxi/clk-sun9i-core.c86
-rw-r--r--drivers/clk/sunxi/clk-sun9i-mmc.c2
-rw-r--r--drivers/clk/sunxi/clk-sunxi.c863
-rw-r--r--drivers/clk/sunxi/clk-usb.c26
-rw-r--r--drivers/clk/tegra/clk-audio-sync.c2
-rw-r--r--drivers/clk/tegra/clk-dfll.c1
-rw-r--r--drivers/clk/tegra/clk-tegra-fixed.c6
-rw-r--r--drivers/clk/tegra/clk-tegra114.c3
-rw-r--r--drivers/clk/tegra/clk-tegra20.c10
-rw-r--r--drivers/clk/tegra/clk.c2
-rw-r--r--drivers/clk/ti/Kconfig6
-rw-r--r--drivers/clk/ti/Makefile6
-rw-r--r--drivers/clk/ti/adpll.c983
-rw-r--r--drivers/clk/ti/apll.c20
-rw-r--r--drivers/clk/ti/clk-814x.c53
-rw-r--r--drivers/clk/ti/clk.c4
-rw-r--r--drivers/clk/ti/clkt_dpll.c6
-rw-r--r--drivers/clk/ti/clockdomain.c2
-rw-r--r--drivers/clk/ti/composite.c10
-rw-r--r--drivers/clk/ti/divider.c2
-rw-r--r--drivers/clk/ti/dpll.c27
-rw-r--r--drivers/clk/ti/dpll3xxx.c17
-rw-r--r--drivers/clk/ti/dpll44xx.c8
-rw-r--r--drivers/clk/ti/gate.c2
-rw-r--r--drivers/clk/ti/mux.c6
-rw-r--r--drivers/clk/ux500/abx500-clk.c5
-rw-r--r--drivers/clk/ux500/u8500_of_clk.c74
-rw-r--r--drivers/clk/ux500/u8540_clk.c80
-rw-r--r--drivers/clk/versatile/clk-icst.c2
-rw-r--r--drivers/clk/versatile/clk-impd1.c3
-rw-r--r--drivers/clk/versatile/clk-realview.c8
-rw-r--r--drivers/clk/versatile/clk-sp810.c4
-rw-r--r--drivers/clk/versatile/clk-vexpress-osc.c2
-rw-r--r--drivers/clk/x86/clk-lpt.c4
-rw-r--r--drivers/clk/zynq/clkc.c3
-rw-r--r--drivers/clocksource/Kconfig1
-rw-r--r--drivers/clocksource/arm_arch_timer.c136
-rw-r--r--drivers/clocksource/arm_global_timer.c18
-rw-r--r--drivers/clocksource/exynos_mct.c2
-rw-r--r--drivers/clocksource/rockchip_timer.c21
-rw-r--r--drivers/clocksource/tango_xtal.c2
-rw-r--r--drivers/clocksource/time-lpc32xx.c66
-rw-r--r--drivers/clocksource/time-pistachio.c4
-rw-r--r--drivers/cpufreq/Kconfig1
-rw-r--r--drivers/cpufreq/acpi-cpufreq.c218
-rw-r--r--drivers/cpufreq/amd_freq_sensitivity.c8
-rw-r--r--drivers/cpufreq/cpufreq-dt.c303
-rw-r--r--drivers/cpufreq/cpufreq.c430
-rw-r--r--drivers/cpufreq/cpufreq_conservative.c282
-rw-r--r--drivers/cpufreq/cpufreq_governor.c760
-rw-r--r--drivers/cpufreq/cpufreq_governor.h261
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c445
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.h30
-rw-r--r--drivers/cpufreq/cpufreq_performance.c18
-rw-r--r--drivers/cpufreq/cpufreq_powersave.c10
-rw-r--r--drivers/cpufreq/cpufreq_userspace.c10
-rw-r--r--drivers/cpufreq/intel_pstate.c470
-rw-r--r--drivers/cpufreq/powernv-cpufreq.c222
-rw-r--r--drivers/cpufreq/pxa2xx-cpufreq.c2
-rw-r--r--drivers/cpufreq/s5pv210-cpufreq.c2
-rw-r--r--drivers/cpufreq/sti-cpufreq.c4
-rw-r--r--drivers/cpuidle/cpuidle-arm.c2
-rw-r--r--drivers/cpuidle/governors/menu.c97
-rw-r--r--drivers/crypto/Kconfig7
-rw-r--r--drivers/crypto/atmel-aes.c10
-rw-r--r--drivers/crypto/atmel-sha-regs.h4
-rw-r--r--drivers/crypto/atmel-sha.c200
-rw-r--r--drivers/crypto/atmel-tdes.c4
-rw-r--r--drivers/crypto/caam/ctrl.c2
-rw-r--r--drivers/crypto/caam/jr.c2
-rw-r--r--drivers/crypto/caam/regs.h3
-rw-r--r--drivers/crypto/ccp/Makefile2
-rw-r--r--drivers/crypto/ccp/ccp-crypto-aes-cmac.c39
-rw-r--r--drivers/crypto/ccp/ccp-crypto-aes.c12
-rw-r--r--drivers/crypto/ccp/ccp-crypto-sha.c52
-rw-r--r--drivers/crypto/ccp/ccp-crypto.h22
-rw-r--r--drivers/crypto/ccp/ccp-dev-v3.c533
-rw-r--r--drivers/crypto/ccp/ccp-dev.c471
-rw-r--r--drivers/crypto/ccp/ccp-dev.h155
-rw-r--r--drivers/crypto/ccp/ccp-ops.c381
-rw-r--r--drivers/crypto/ccp/ccp-pci.c23
-rw-r--r--drivers/crypto/ccp/ccp-platform.c48
-rw-r--r--drivers/crypto/ixp4xx_crypto.c26
-rw-r--r--drivers/crypto/marvell/cesa.c2
-rw-r--r--drivers/crypto/marvell/cesa.h3
-rw-r--r--drivers/crypto/marvell/hash.c106
-rw-r--r--drivers/crypto/nx/nx-842.c2
-rw-r--r--drivers/crypto/omap-aes.c97
-rw-r--r--drivers/crypto/qat/qat_common/adf_accel_devices.h4
-rw-r--r--drivers/crypto/qat/qat_common/adf_aer.c1
-rw-r--r--drivers/crypto/qat/qat_common/adf_cfg_user.h6
-rw-r--r--drivers/crypto/qat/qat_common/adf_common_drv.h11
-rw-r--r--drivers/crypto/qat/qat_common/adf_ctl_drv.c6
-rw-r--r--drivers/crypto/qat/qat_common/adf_hw_arbiter.c19
-rw-r--r--drivers/crypto/qat/qat_common/adf_sriov.c26
-rw-r--r--drivers/crypto/qat/qat_common/icp_qat_uclo.h42
-rw-r--r--drivers/crypto/qat/qat_common/qat_algs.c6
-rw-r--r--drivers/crypto/qat/qat_common/qat_asym_algs.c70
-rw-r--r--drivers/crypto/qat/qat_common/qat_uclo.c2
-rw-r--r--drivers/crypto/rockchip/Makefile1
-rw-r--r--drivers/crypto/rockchip/rk3288_crypto.c28
-rw-r--r--drivers/crypto/rockchip/rk3288_crypto.h56
-rw-r--r--drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c20
-rw-r--r--drivers/crypto/rockchip/rk3288_crypto_ahash.c404
-rw-r--r--drivers/crypto/s5p-sss.c12
-rw-r--r--drivers/crypto/sahara.c19
-rw-r--r--drivers/crypto/sunxi-ss/sun4i-ss-cipher.c5
-rw-r--r--drivers/crypto/talitos.c87
-rw-r--r--drivers/crypto/ux500/cryp/cryp_core.c4
-rw-r--r--drivers/crypto/ux500/hash/hash_core.c4
-rw-r--r--drivers/devfreq/Kconfig2
-rw-r--r--drivers/dma-buf/dma-buf.c67
-rw-r--r--drivers/dma/Kconfig16
-rw-r--r--drivers/dma/Makefile2
-rw-r--r--drivers/dma/acpi-dma.c2
-rw-r--r--drivers/dma/at_xdmac.c42
-rw-r--r--drivers/dma/dmaengine.c1
-rw-r--r--drivers/dma/dw/core.c34
-rw-r--r--drivers/dma/dw/regs.h2
-rw-r--r--drivers/dma/edma.c126
-rw-r--r--drivers/dma/ep93xx_dma.c28
-rw-r--r--drivers/dma/fsldma.c2
-rw-r--r--drivers/dma/hsu/hsu.c13
-rw-r--r--drivers/dma/hsu/hsu.h3
-rw-r--r--drivers/dma/idma64.c3
-rw-r--r--drivers/dma/idma64.h4
-rw-r--r--drivers/dma/ioat/dma.c268
-rw-r--r--drivers/dma/ioat/dma.h23
-rw-r--r--drivers/dma/ioat/hw.h2
-rw-r--r--drivers/dma/ioat/init.c49
-rw-r--r--drivers/dma/ioat/prep.c2
-rw-r--r--drivers/dma/iop-adma.c8
-rw-r--r--drivers/dma/mic_x100_dma.c2
-rw-r--r--drivers/dma/mv_xor.c4
-rw-r--r--drivers/dma/omap-dma.c34
-rw-r--r--drivers/dma/pl330.c101
-rw-r--r--drivers/dma/pxa_dma.c39
-rw-r--r--drivers/dma/qcom/Kconfig29
-rw-r--r--drivers/dma/qcom/Makefile3
-rw-r--r--drivers/dma/qcom/bam_dma.c (renamed from drivers/dma/qcom_bam_dma.c)37
-rw-r--r--drivers/dma/qcom/hidma.c706
-rw-r--r--drivers/dma/qcom/hidma.h160
-rw-r--r--drivers/dma/qcom/hidma_mgmt.c302
-rw-r--r--drivers/dma/qcom/hidma_mgmt.h39
-rw-r--r--drivers/dma/qcom/hidma_mgmt_sys.c295
-rw-r--r--drivers/dma/sh/Kconfig6
-rw-r--r--drivers/dma/sh/rcar-dmac.c2
-rw-r--r--drivers/dma/sh/shdmac.c2
-rw-r--r--drivers/dma/sirf-dma.c10
-rw-r--r--drivers/dma/sun4i-dma.c1
-rw-r--r--drivers/dma/tegra20-apb-dma.c47
-rw-r--r--drivers/dma/xilinx/xilinx_vdma.c206
-rw-r--r--drivers/edac/Kconfig26
-rw-r--r--drivers/edac/Makefile2
-rw-r--r--drivers/edac/altera_edac.c492
-rw-r--r--drivers/edac/amd64_edac.c2
-rw-r--r--drivers/edac/debugfs.c2
-rw-r--r--drivers/edac/edac_mc.c64
-rw-r--r--drivers/edac/edac_pci.c67
-rw-r--r--drivers/edac/i7core_edac.c2
-rw-r--r--drivers/edac/mce_amd.c335
-rw-r--r--drivers/edac/mpc85xx_edac.c2
-rw-r--r--drivers/edac/sb_edac.c56
-rw-r--r--drivers/edac/xgene_edac.c70
-rw-r--r--drivers/extcon/extcon-arizona.c4
-rw-r--r--drivers/extcon/extcon-gpio.c2
-rw-r--r--drivers/extcon/extcon-max14577.c3
-rw-r--r--drivers/extcon/extcon-max77693.c12
-rw-r--r--drivers/extcon/extcon-max77843.c5
-rw-r--r--drivers/extcon/extcon-max8997.c3
-rw-r--r--drivers/extcon/extcon-palmas.c53
-rw-r--r--drivers/extcon/extcon-rt8973a.c8
-rw-r--r--drivers/extcon/extcon-sm5502.c8
-rw-r--r--drivers/firewire/core-cdev.c4
-rw-r--r--drivers/firewire/nosy.c18
-rw-r--r--drivers/firewire/ohci.c5
-rw-r--r--drivers/firmware/Kconfig20
-rw-r--r--drivers/firmware/Makefile1
-rw-r--r--drivers/firmware/arm_scpi.c17
-rw-r--r--drivers/firmware/broadcom/bcm47xx_nvram.c5
-rw-r--r--drivers/firmware/efi/arm-init.c32
-rw-r--r--drivers/firmware/efi/efi.c41
-rw-r--r--drivers/firmware/efi/efivars.c4
-rw-r--r--drivers/firmware/efi/esrt.c5
-rw-r--r--drivers/firmware/efi/libstub/Makefile6
-rw-r--r--drivers/firmware/efi/libstub/arm-stub.c44
-rw-r--r--drivers/firmware/efi/libstub/arm32-stub.c17
-rw-r--r--drivers/firmware/efi/libstub/arm64-stub.c112
-rw-r--r--drivers/firmware/efi/libstub/efi-stub-helper.c7
-rw-r--r--drivers/firmware/efi/libstub/efistub.h19
-rw-r--r--drivers/firmware/efi/libstub/fdt.c14
-rw-r--r--drivers/firmware/efi/libstub/random.c135
-rw-r--r--drivers/firmware/efi/runtime-wrappers.c113
-rw-r--r--drivers/firmware/efi/vars.c53
-rw-r--r--drivers/firmware/iscsi_ibft.c4
-rw-r--r--drivers/firmware/psci.c120
-rw-r--r--drivers/firmware/qemu_fw_cfg.c773
-rw-r--r--drivers/gpio/Kconfig91
-rw-r--r--drivers/gpio/Makefile10
-rw-r--r--drivers/gpio/devres.c2
-rw-r--r--drivers/gpio/gpio-104-dio-48e.c430
-rw-r--r--drivers/gpio/gpio-104-idi-48.c30
-rw-r--r--drivers/gpio/gpio-104-idio-16.c27
-rw-r--r--drivers/gpio/gpio-74xx-mmio.c11
-rw-r--r--drivers/gpio/gpio-adnp.c11
-rw-r--r--drivers/gpio/gpio-adp5520.c13
-rw-r--r--drivers/gpio/gpio-adp5588.c4
-rw-r--r--drivers/gpio/gpio-amd8111.c7
-rw-r--r--drivers/gpio/gpio-arizona.c12
-rw-r--r--drivers/gpio/gpio-ath79.c264
-rw-r--r--drivers/gpio/gpio-bcm-kona.c2
-rw-r--r--drivers/gpio/gpio-brcmstb.c13
-rw-r--r--drivers/gpio/gpio-clps711x.c11
-rw-r--r--drivers/gpio/gpio-crystalcove.c9
-rw-r--r--drivers/gpio/gpio-cs5535.c20
-rw-r--r--drivers/gpio/gpio-da9052.c11
-rw-r--r--drivers/gpio/gpio-da9055.c16
-rw-r--r--drivers/gpio/gpio-davinci.c5
-rw-r--r--drivers/gpio/gpio-dln2.c16
-rw-r--r--drivers/gpio/gpio-ep93xx.c2
-rw-r--r--drivers/gpio/gpio-f7188x.c53
-rw-r--r--drivers/gpio/gpio-ge.c2
-rw-r--r--drivers/gpio/gpio-generic.c11
-rw-r--r--drivers/gpio/gpio-ich.c51
-rw-r--r--drivers/gpio/gpio-iop.c2
-rw-r--r--drivers/gpio/gpio-janz-ttl.c12
-rw-r--r--drivers/gpio/gpio-kempld.c11
-rw-r--r--drivers/gpio/gpio-ks8695.c12
-rw-r--r--drivers/gpio/gpio-lp3943.c12
-rw-r--r--drivers/gpio/gpio-lpc32xx.c2
-rw-r--r--drivers/gpio/gpio-lynxpoint.c4
-rw-r--r--drivers/gpio/gpio-mc9s08dz60.c13
-rw-r--r--drivers/gpio/gpio-mcp23s08.c25
-rw-r--r--drivers/gpio/gpio-menz127.c199
-rw-r--r--drivers/gpio/gpio-moxart.c5
-rw-r--r--drivers/gpio/gpio-mpc5200.c1
-rw-r--r--drivers/gpio/gpio-mpc8xxx.c255
-rw-r--r--drivers/gpio/gpio-mvebu.c11
-rw-r--r--drivers/gpio/gpio-mxc.c6
-rw-r--r--drivers/gpio/gpio-octeon.c10
-rw-r--r--drivers/gpio/gpio-omap.c57
-rw-r--r--drivers/gpio/gpio-palmas.c12
-rw-r--r--drivers/gpio/gpio-pca953x.c13
-rw-r--r--drivers/gpio/gpio-pcf857x.c10
-rw-r--r--drivers/gpio/gpio-pisosr.c183
-rw-r--r--drivers/gpio/gpio-pxa.c4
-rw-r--r--drivers/gpio/gpio-rc5t583.c12
-rw-r--r--drivers/gpio/gpio-rcar.c65
-rw-r--r--drivers/gpio/gpio-rdc321x.c13
-rw-r--r--drivers/gpio/gpio-sch.c11
-rw-r--r--drivers/gpio/gpio-sch311x.c8
-rw-r--r--drivers/gpio/gpio-spear-spics.c2
-rw-r--r--drivers/gpio/gpio-sta2x11.c2
-rw-r--r--drivers/gpio/gpio-stp-xway.c2
-rw-r--r--drivers/gpio/gpio-sx150x.c18
-rw-r--r--drivers/gpio/gpio-syscon.c11
-rw-r--r--drivers/gpio/gpio-tb10x.c22
-rw-r--r--drivers/gpio/gpio-tc3589x.c13
-rw-r--r--drivers/gpio/gpio-tegra.c2
-rw-r--r--drivers/gpio/gpio-timberdale.c25
-rw-r--r--drivers/gpio/gpio-tpic2810.c170
-rw-r--r--drivers/gpio/gpio-tps65086.c139
-rw-r--r--drivers/gpio/gpio-tps65218.c222
-rw-r--r--drivers/gpio/gpio-tps6586x.c12
-rw-r--r--drivers/gpio/gpio-tps65910.c12
-rw-r--r--drivers/gpio/gpio-tps65912.c174
-rw-r--r--drivers/gpio/gpio-ts4800.c81
-rw-r--r--drivers/gpio/gpio-ts5500.c9
-rw-r--r--drivers/gpio/gpio-twl6040.c9
-rw-r--r--drivers/gpio/gpio-ucb1400.c3
-rw-r--r--drivers/gpio/gpio-viperboard.c24
-rw-r--r--drivers/gpio/gpio-vx855.c12
-rw-r--r--drivers/gpio/gpio-wm831x.c12
-rw-r--r--drivers/gpio/gpio-wm8350.c12
-rw-r--r--drivers/gpio/gpio-wm8994.c17
-rw-r--r--drivers/gpio/gpio-ws16c48.c427
-rw-r--r--drivers/gpio/gpio-xgene-sb.c266
-rw-r--r--drivers/gpio/gpio-xgene.c16
-rw-r--r--drivers/gpio/gpiolib-acpi.c20
-rw-r--r--drivers/gpio/gpiolib-sysfs.c51
-rw-r--r--drivers/gpio/gpiolib.c883
-rw-r--r--drivers/gpio/gpiolib.h79
-rw-r--r--drivers/gpu/drm/Kconfig12
-rw-r--r--drivers/gpu/drm/Makefile6
-rw-r--r--drivers/gpu/drm/amd/acp/Kconfig14
-rw-r--r--drivers/gpu/drm/amd/acp/Makefile8
-rw-r--r--drivers/gpu/drm/amd/acp/acp_hw.c (renamed from drivers/gpu/drm/nouveau/nvkm/engine/gr/gm206.c)52
-rw-r--r--drivers/gpu/drm/amd/acp/include/acp_gfx_if.h34
-rw-r--r--drivers/gpu/drm/amd/amdgpu/Makefile17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h324
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c504
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_acp.h42
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c57
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c83
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c503
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c87
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c52
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.c57
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c555
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c201
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c89
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_job.c171
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c110
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c86
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c355
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c201
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c60
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c108
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_semaphore.c102
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c238
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_test.c237
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h66
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c176
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c164
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c139
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c772
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atombios_dp.c20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atombios_encoders.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ci_dpm.c49
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik.c310
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik_sdma.c104
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cikd.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v10_0.c45
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v11_0.c50
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v8_0.c45
-rw-r--r--drivers/gpu/drm/amd/amdgpu/fiji_smc.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c2000
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c263
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c30
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c43
-rw-r--r--drivers/gpu/drm/amd/amdgpu/iceland_smc.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c112
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c110
-rw-r--r--drivers/gpu/drm/amd/amdgpu/tonga_ih.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/tonga_smc.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c54
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c53
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c56
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v2_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v3_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vi.c401
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vid.h2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_chardev.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_module.c15
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process.c2
-rw-r--r--drivers/gpu/drm/amd/include/amd_acpi.h2
-rw-r--r--drivers/gpu/drm/amd/include/amd_shared.h1
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dce/dce_8_0_d.h1
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dce/dce_8_0_enum.h1117
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dce/dce_8_0_sh_mask.h12
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/gca/gfx_7_2_enum.h6
-rw-r--r--drivers/gpu/drm/amd/include/cgs_common.h8
-rw-r--r--drivers/gpu/drm/amd/include/ivsrcid/ivsrcid_vislands30.h102
-rw-r--r--drivers/gpu/drm/amd/include/kgd_kfd_interface.h2
-rw-r--r--drivers/gpu/drm/amd/powerplay/Makefile14
-rw-r--r--drivers/gpu/drm/amd/powerplay/amd_powerplay.c206
-rw-r--r--drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c4
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c212
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c197
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.h12
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/functiontables.c9
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c109
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/ppevvmath.h4
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c127
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.h12
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.c4
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h78
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h44
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/hwmgr.h12
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/pp_endian.h38
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smumgr.h21
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.h18
-rw-r--r--drivers/gpu/drm/amd/scheduler/gpu_scheduler.c10
-rw-r--r--drivers/gpu/drm/amd/scheduler/sched_fence.c23
-rw-r--r--drivers/gpu/drm/arm/Kconfig27
-rw-r--r--drivers/gpu/drm/arm/Makefile2
-rw-r--r--drivers/gpu/drm/arm/hdlcd_crtc.c327
-rw-r--r--drivers/gpu/drm/arm/hdlcd_drv.c542
-rw-r--r--drivers/gpu/drm/arm/hdlcd_drv.h42
-rw-r--r--drivers/gpu/drm/arm/hdlcd_regs.h87
-rw-r--r--drivers/gpu/drm/armada/armada_drv.c3
-rw-r--r--drivers/gpu/drm/armada/armada_gem.c4
-rw-r--r--drivers/gpu/drm/ast/ast_mode.c16
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c27
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c10
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h3
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c2
-rw-r--r--drivers/gpu/drm/bochs/bochs_drv.c4
-rw-r--r--drivers/gpu/drm/bochs/bochs_kms.c16
-rw-r--r--drivers/gpu/drm/bridge/dw-hdmi.c8
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_drv.c5
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_mode.c22
-rw-r--r--drivers/gpu/drm/drm_atomic.c131
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c426
-rw-r--r--drivers/gpu/drm/drm_bridge.c12
-rw-r--r--drivers/gpu/drm/drm_crtc.c140
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c68
-rw-r--r--drivers/gpu/drm/drm_dp_aux_dev.c368
-rw-r--r--drivers/gpu/drm/drm_dp_helper.c43
-rw-r--r--drivers/gpu/drm/drm_dp_mst_topology.c29
-rw-r--r--drivers/gpu/drm/drm_edid.c53
-rw-r--r--drivers/gpu/drm/drm_edid_load.c17
-rw-r--r--drivers/gpu/drm/drm_encoder_slave.c3
-rw-r--r--drivers/gpu/drm/drm_fb_cma_helper.c3
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c56
-rw-r--r--drivers/gpu/drm/drm_fops.c305
-rw-r--r--drivers/gpu/drm/drm_gem.c4
-rw-r--r--drivers/gpu/drm/drm_gem_cma_helper.c13
-rw-r--r--drivers/gpu/drm/drm_irq.c18
-rw-r--r--drivers/gpu/drm/drm_kms_helper_common.c60
-rw-r--r--drivers/gpu/drm/drm_mipi_dsi.c127
-rw-r--r--drivers/gpu/drm/drm_modes.c3
-rw-r--r--drivers/gpu/drm/drm_of.c34
-rw-r--r--drivers/gpu/drm/drm_prime.c10
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_buffer.c219
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_drv.h3
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem.c60
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem.h17
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c36
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gpu.c73
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gpu.h4
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_mmu.c2
-rw-r--r--drivers/gpu/drm/etnaviv/state_3d.xml.h9
-rw-r--r--drivers/gpu/drm/exynos/Kconfig2
-rw-r--r--drivers/gpu/drm/exynos/Makefile7
-rw-r--r--drivers/gpu/drm/exynos/exynos5433_drm_decon.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos7_drm_decon.c1
-rw-r--r--drivers/gpu/drm/exynos/exynos_dp_core.c15
-rw-r--r--drivers/gpu/drm/exynos/exynos_dp_core.h4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_core.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dpi.c8
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c281
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.h10
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dsi.c242
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fb.c11
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fbdev.c13
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fbdev.h23
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimc.c28
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimd.c34
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_g2d.c44
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.c23
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.h4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_iommu.c39
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_iommu.h4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_ipp.c29
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_mic.c3
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_plane.c12
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_rotator.c1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_vidi.c8
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c2
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c148
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c70
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.h4
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_kms.c24
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c140
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_crt.c1
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_display.c13
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_dp.c2
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_hdmi.c1
-rw-r--r--drivers/gpu/drm/gma500/framebuffer.c20
-rw-r--r--drivers/gpu/drm/gma500/gma_display.c14
-rw-r--r--drivers/gpu/drm/gma500/gma_display.h6
-rw-r--r--drivers/gpu/drm/gma500/intel_gmbus.c2
-rw-r--r--drivers/gpu/drm/gma500/mdfld_dsi_output.c12
-rw-r--r--drivers/gpu/drm/gma500/mdfld_intel_display.c2
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_crtc.c1
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_hdmi.c1
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.c9
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_display.c1
-rw-r--r--drivers/gpu/drm/i2c/ch7006_drv.c2
-rw-r--r--drivers/gpu/drm/i2c/sil164_drv.c9
-rw-r--r--drivers/gpu/drm/i2c/tda998x_drv.c19
-rw-r--r--drivers/gpu/drm/i915/Kconfig13
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c131
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c260
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c71
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h193
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c192
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c71
-rw-r--r--drivers/gpu/drm/i915/i915_gem_dmabuf.c37
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c6
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c169
-rw-r--r--drivers/gpu/drm/i915/i915_gem_fence.c10
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c80
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.h16
-rw-r--r--drivers/gpu/drm/i915/i915_gem_shrinker.c78
-rw-r--r--drivers/gpu/drm/i915/i915_gem_stolen.c10
-rw-r--r--drivers/gpu/drm/i915/i915_gem_userptr.c214
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c14
-rw-r--r--drivers/gpu/drm/i915/i915_guc_reg.h1
-rw-r--r--drivers/gpu/drm/i915/i915_guc_submission.c199
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c295
-rw-r--r--drivers/gpu/drm/i915/i915_params.c19
-rw-r--r--drivers/gpu/drm/i915/i915_params.h67
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h63
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c2
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c8
-rw-r--r--drivers/gpu/drm/i915/i915_trace.h27
-rw-r--r--drivers/gpu/drm/i915/intel_atomic.c3
-rw-r--r--drivers/gpu/drm/i915/intel_atomic_plane.c14
-rw-r--r--drivers/gpu/drm/i915/intel_audio.c12
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c412
-rw-r--r--drivers/gpu/drm/i915/intel_bios.h60
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c22
-rw-r--r--drivers/gpu/drm/i915/intel_csr.c25
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c344
-rw-r--r--drivers/gpu/drm/i915/intel_display.c1726
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c84
-rw-r--r--drivers/gpu/drm/i915/intel_dp_mst.c37
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h92
-rw-r--r--drivers/gpu/drm/i915/intel_dsi.c27
-rw-r--r--drivers/gpu/drm/i915/intel_dsi.h5
-rw-r--r--drivers/gpu/drm/i915/intel_dsi_panel_vbt.c163
-rw-r--r--drivers/gpu/drm/i915/intel_dsi_pll.c95
-rw-r--r--drivers/gpu/drm/i915/intel_fbc.c952
-rw-r--r--drivers/gpu/drm/i915/intel_fbdev.c21
-rw-r--r--drivers/gpu/drm/i915/intel_guc.h10
-rw-r--r--drivers/gpu/drm/i915/intel_guc_fwif.h122
-rw-r--r--drivers/gpu/drm/i915/intel_guc_loader.c19
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c28
-rw-r--r--drivers/gpu/drm/i915/intel_i2c.c6
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c645
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.h26
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c17
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c24
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c329
-rw-r--r--drivers/gpu/drm/i915/intel_psr.c63
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c124
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h20
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c74
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c4
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo_regs.h76
-rw-r--r--drivers/gpu/drm/i915/intel_sideband.c23
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c189
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c16
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c147
-rw-r--r--drivers/gpu/drm/imx/dw_hdmi-imx.c23
-rw-r--r--drivers/gpu/drm/imx/imx-drm-core.c54
-rw-r--r--drivers/gpu/drm/imx/imx-drm.h2
-rw-r--r--drivers/gpu/drm/imx/imx-ldb.c13
-rw-r--r--drivers/gpu/drm/imx/imx-tve.c8
-rw-r--r--drivers/gpu/drm/imx/ipuv3-crtc.c135
-rw-r--r--drivers/gpu/drm/imx/ipuv3-plane.c126
-rw-r--r--drivers/gpu/drm/imx/ipuv3-plane.h4
-rw-r--r--drivers/gpu/drm/imx/parallel-display.c8
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_mode.c21
-rw-r--r--drivers/gpu/drm/msm/Makefile3
-rw-r--r--drivers/gpu/drm/msm/adreno/a2xx.xml.h11
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx.xml.h493
-rw-r--r--drivers/gpu/drm/msm/adreno/a4xx.xml.h1267
-rw-r--r--drivers/gpu/drm/msm/adreno/a4xx_gpu.c108
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_common.xml.h30
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_device.c8
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c44
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.h6
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h48
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi.xml.h5
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_cfg.c3
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_host.c118
-rw-r--r--drivers/gpu/drm/msm/dsi/mmss_cc.xml.h5
-rw-r--r--drivers/gpu/drm/msm/dsi/pll/dsi_pll.h4
-rw-r--r--drivers/gpu/drm/msm/dsi/sfpb.xml.h5
-rw-r--r--drivers/gpu/drm/msm/edp/edp.xml.h5
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.c174
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.h110
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.xml.h647
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_audio.c10
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_bridge.c56
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_connector.c172
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_hdcp.c166
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_i2c.c20
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_phy.c230
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c503
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_phy_8996.c766
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_phy_8x60.c196
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_phy_8x74.c94
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_pll_8960.c461
-rw-r--r--drivers/gpu/drm/msm/hdmi/qfprom.xml.h5
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h5
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c24
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_dsi_encoder.c8
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_dtv_encoder.c8
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c28
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h2
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_lcdc_encoder.c8
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h5
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c9
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c23
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c8
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c13
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h1
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp_common.xml.h5
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c14
-rw-r--r--drivers/gpu/drm/msm/msm_drv.h7
-rw-r--r--drivers/gpu/drm/msm/msm_fbdev.c4
-rw-r--r--drivers/gpu/drm/msm/msm_gem_submit.c20
-rw-r--r--drivers/gpu/drm/msm/msm_iommu.c6
-rw-r--r--drivers/gpu/drm/msm/msm_kms.h1
-rw-r--r--drivers/gpu/drm/msm/msm_mmu.h4
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/crtc.c8
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/cla06f.h26
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/class.h5
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/device.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/device.h127
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/firmware.h11
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/gpuobj.h4
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/tegra.h5
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/ce.h3
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h4
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h3
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/msenc.h4
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/nvdec.h4
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/nvenc.h4
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/vic.h4
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/extdev.h3
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/iccsense.h16
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/clk.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/devinit.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/i2c.h33
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/ibus.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/iccsense.h17
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/ltc.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/secboot.h59
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/volt.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_abi16.c18
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_acpi.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_chan.c3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c25
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c20
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_hwmon.c77
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_platform.c7
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/firmware.c61
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/gpuobj.c20
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/ramht.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/subdev.c87
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/Kbuild4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ce/Kbuild3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ce/gm107.c55
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ce/gm200.c (renamed from drivers/gpu/drm/nouveau/nvkm/engine/ce/gm204.c)10
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/base.c288
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c27
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/coregm200.c (renamed from drivers/gpu/drm/nouveau/nvkm/engine/disp/coregm204.c)4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/gm200.c (renamed from drivers/gpu/drm/nouveau/nvkm/engine/disp/gm204.c)12
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/outpdp.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgm200.c (renamed from drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgm204.c)14
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm200.c (renamed from drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm204.c)40
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild7
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/changk104.h5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c54
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.h9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c270
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h55
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk110.c46
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm107.c46
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm200.c (renamed from drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm204.c)8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm20b.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c186
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk110.c34
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogm200.c (renamed from drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogm204.c)2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h24
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm200.c147
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm204.c1049
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm206.c74
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm20b.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpc.fuc2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgf100.fuc3.h80
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgf117.fuc3.h82
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk104.fuc3.h82
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk110.fuc3.h82
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk208.fuc5.h68
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgm107.fuc5.h70
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hub.fuc2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgf100.fuc3.h252
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgf117.fuc3.h252
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk104.fuc3.h238
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk110.fuc3.h238
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk208.fuc5.h210
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgm107.fuc5.h210
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c85
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h20
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c181
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gm200.c207
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gm204.c373
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gm20b.c12
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/msenc/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/nvdec/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/nvenc/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/pm/nv40.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/vic/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/Kbuild2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/extdev.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/iccsense.c100
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c13
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.c292
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.h65
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/gm20b.c198
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/Kbuild2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gf100.c14
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm107.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm200.c (renamed from drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm204.c)12
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.c13
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/Kbuild6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm200.c (renamed from drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm204.c)28
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gm200.c (renamed from drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gm204.c)8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/pad.h4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padgm200.c (renamed from drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padgm204.c)20
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/ibus/Kbuild2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gm200.c (renamed from drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gm204.c)6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/Kbuild2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/base.c232
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/gf100.c31
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/priv.h16
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/ltc/Kbuild2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gf100.c8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm107.c8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm200.c (renamed from drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm204.c)14
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf100.fuc3.h1598
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf119.fuc4.h1494
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gk208.fuc5.h1420
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gt215.fuc3.h1746
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/host.fuc3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/kernel.fuc26
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/macros.fuc6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/test.fuc3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/Kbuild3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/base.c288
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.c1489
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm20b.c233
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/priv.h226
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/volt/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk20a.c57
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk20a.h49
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/volt/gm20b.c56
-rw-r--r--drivers/gpu/drm/omapdrm/Kconfig1
-rw-r--r--drivers/gpu/drm/omapdrm/Makefile2
-rw-r--r--drivers/gpu/drm/omapdrm/displays/connector-dvi.c55
-rw-r--r--drivers/gpu/drm/omapdrm/displays/connector-hdmi.c39
-rw-r--r--drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c41
-rw-r--r--drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c118
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c49
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c53
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c44
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c83
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c41
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c43
-rw-r--r--drivers/gpu/drm/omapdrm/dss/Makefile3
-rw-r--r--drivers/gpu/drm/omapdrm/dss/apply.c1702
-rw-r--r--drivers/gpu/drm/omapdrm/dss/core.c36
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dispc-compat.c667
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dispc.c71
-rw-r--r--drivers/gpu/drm/omapdrm/dss/display-sysfs.c356
-rw-r--r--drivers/gpu/drm/omapdrm/dss/display.c49
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dpi.c46
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dsi.c61
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dss.h43
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi4.c50
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi5.c54
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi5_core.c42
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi_wp.c32
-rw-r--r--drivers/gpu/drm/omapdrm/dss/manager-sysfs.c531
-rw-r--r--drivers/gpu/drm/omapdrm/dss/manager.c263
-rw-r--r--drivers/gpu/drm/omapdrm/dss/omapdss.h96
-rw-r--r--drivers/gpu/drm/omapdrm/dss/output.c54
-rw-r--r--drivers/gpu/drm/omapdrm/dss/overlay-sysfs.c456
-rw-r--r--drivers/gpu/drm/omapdrm/dss/overlay.c202
-rw-r--r--drivers/gpu/drm/omapdrm/dss/rfbi.c2
-rw-r--r--drivers/gpu/drm/omapdrm/dss/sdi.c36
-rw-r--r--drivers/gpu/drm/omapdrm/dss/venc.c27
-rw-r--r--drivers/gpu/drm/omapdrm/omap_connector.c4
-rw-r--r--drivers/gpu/drm/omapdrm/omap_crtc.c140
-rw-r--r--drivers/gpu/drm/omapdrm/omap_dmm_tiler.c67
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.c94
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.h6
-rw-r--r--drivers/gpu/drm/omapdrm/omap_encoder.c7
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fb.c16
-rw-r--r--drivers/gpu/drm/omapdrm/omap_gem.c344
-rw-r--r--drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c60
-rw-r--r--drivers/gpu/drm/omapdrm/omap_plane.c6
-rw-r--r--drivers/gpu/drm/panel/panel-simple.c82
-rw-r--r--drivers/gpu/drm/qxl/qxl_display.c22
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.h2
-rw-r--r--drivers/gpu/drm/radeon/atom.c7
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c22
-rw-r--r--drivers/gpu/drm/radeon/atombios_dp.c24
-rw-r--r--drivers/gpu/drm/radeon/atombios_encoders.c18
-rw-r--r--drivers/gpu/drm/radeon/btc_dpm.c41
-rw-r--r--drivers/gpu/drm/radeon/ci_dpm.c42
-rw-r--r--drivers/gpu/drm/radeon/ci_smc.c8
-rw-r--r--drivers/gpu/drm/radeon/cik.c1708
-rw-r--r--drivers/gpu/drm/radeon/cik_sdma.c9
-rw-r--r--drivers/gpu/drm/radeon/cypress_dpm.c8
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c156
-rw-r--r--drivers/gpu/drm/radeon/evergreen_cs.c32
-rw-r--r--drivers/gpu/drm/radeon/evergreen_hdmi.c2
-rw-r--r--drivers/gpu/drm/radeon/evergreen_reg.h46
-rw-r--r--drivers/gpu/drm/radeon/kv_dpm.c4
-rw-r--r--drivers/gpu/drm/radeon/ni.c4
-rw-r--r--drivers/gpu/drm/radeon/ni_dpm.c170
-rw-r--r--drivers/gpu/drm/radeon/ni_reg.h2
-rw-r--r--drivers/gpu/drm/radeon/r100.c10
-rw-r--r--drivers/gpu/drm/radeon/r600.c18
-rw-r--r--drivers/gpu/drm/radeon/r600_cs.c20
-rw-r--r--drivers/gpu/drm/radeon/r600_dma.c9
-rw-r--r--drivers/gpu/drm/radeon/r600_dpm.c6
-rw-r--r--drivers/gpu/drm/radeon/r600_hdmi.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon.h2
-rw-r--r--drivers/gpu/drm/radeon/radeon_acpi.h2
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_atpx_handler.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_combios.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c13
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c15
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c21
-rw-r--r--drivers/gpu/drm/radeon/radeon_dp_auxch.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_dp_mst.c53
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c21
-rw-r--r--drivers/gpu/drm/radeon/radeon_fb.c25
-rw-r--r--drivers/gpu/drm/radeon/radeon_fence.c40
-rw-r--r--drivers/gpu/drm/radeon/radeon_ib.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_irq_kms.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_kfd.c25
-rw-r--r--drivers/gpu/drm/radeon/radeon_kfd.h2
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_crtc.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_encoders.c92
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c10
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c7
-rw-r--r--drivers/gpu/drm/radeon/radeon_semaphore.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c13
-rw-r--r--drivers/gpu/drm/radeon/radeon_uvd.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_vce.c33
-rw-r--r--drivers/gpu/drm/radeon/radeon_vm.c19
-rw-r--r--drivers/gpu/drm/radeon/rs780_dpm.c2
-rw-r--r--drivers/gpu/drm/radeon/rv6xx_dpm.c18
-rw-r--r--drivers/gpu/drm/radeon/rv740_dpm.c16
-rw-r--r--drivers/gpu/drm/radeon/rv770_dpm.c46
-rw-r--r--drivers/gpu/drm/radeon/si.c969
-rw-r--r--drivers/gpu/drm/radeon/si_dpm.c105
-rw-r--r--drivers/gpu/drm/radeon/sumo_dpm.c6
-rw-r--r--drivers/gpu/drm/radeon/trinity_dpm.c24
-rw-r--r--drivers/gpu/drm/radeon/uvd_v1_0.c10
-rw-r--r--drivers/gpu/drm/radeon/vce_v2_0.c2
-rw-r--r--drivers/gpu/drm/rcar-du/Kconfig15
-rw-r--r--drivers/gpu/drm/rcar-du/Makefile2
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_crtc.c104
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_crtc.h6
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_drv.c228
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_drv.h12
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_encoder.c6
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_group.c74
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_group.h4
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_hdmicon.c11
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_hdmienc.c16
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_kms.c313
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c11
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.c142
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.h6
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_plane.c465
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_plane.h21
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_regs.h21
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_vgacon.c11
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_vsp.c384
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_vsp.h76
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_lvds_regs.h26
-rw-r--r--drivers/gpu/drm/rockchip/Kconfig8
-rw-r--r--drivers/gpu/drm/rockchip/Makefile1
-rw-r--r--drivers/gpu/drm/rockchip/dw-mipi-dsi.c10
-rw-r--r--drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c15
-rw-r--r--drivers/gpu/drm/rockchip/inno_hdmi.c938
-rw-r--r--drivers/gpu/drm/rockchip/inno_hdmi.h362
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_drv.c52
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_drv.h3
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.c79
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_crtc.c28
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_crtc.h2
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_drv.c8
-rw-r--r--drivers/gpu/drm/sti/sti_awg_utils.c78
-rw-r--r--drivers/gpu/drm/sti/sti_compositor.c4
-rw-r--r--drivers/gpu/drm/sti/sti_crtc.c1
-rw-r--r--drivers/gpu/drm/sti/sti_cursor.c204
-rw-r--r--drivers/gpu/drm/sti/sti_drv.c141
-rw-r--r--drivers/gpu/drm/sti/sti_dvo.c78
-rw-r--r--drivers/gpu/drm/sti/sti_gdp.c479
-rw-r--r--drivers/gpu/drm/sti/sti_hda.c108
-rw-r--r--drivers/gpu/drm/sti/sti_hdmi.c400
-rw-r--r--drivers/gpu/drm/sti/sti_hdmi.h31
-rw-r--r--drivers/gpu/drm/sti/sti_hqvdp.c451
-rw-r--r--drivers/gpu/drm/sti/sti_mixer.c146
-rw-r--r--drivers/gpu/drm/sti/sti_mixer.h4
-rw-r--r--drivers/gpu/drm/sti/sti_plane.c63
-rw-r--r--drivers/gpu/drm/sti/sti_plane.h17
-rw-r--r--drivers/gpu/drm/sti/sti_tvout.c301
-rw-r--r--drivers/gpu/drm/sti/sti_vid.c125
-rw-r--r--drivers/gpu/drm/sti/sti_vid.h4
-rw-r--r--drivers/gpu/drm/sti/sti_vtg.c200
-rw-r--r--drivers/gpu/drm/sti/sti_vtg.h5
-rw-r--r--drivers/gpu/drm/tegra/dc.c17
-rw-r--r--drivers/gpu/drm/tegra/drm.c4
-rw-r--r--drivers/gpu/drm/tegra/drm.h1
-rw-r--r--drivers/gpu/drm/tegra/gem.c11
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_crtc.c331
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_drv.c138
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_drv.h6
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_panel.c29
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_tfp410.c33
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c17
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c4
-rw-r--r--drivers/gpu/drm/udl/udl_drv.c14
-rw-r--r--drivers/gpu/drm/udl/udl_encoder.c8
-rw-r--r--drivers/gpu/drm/udl/udl_fb.c8
-rw-r--r--drivers/gpu/drm/udl/udl_gem.c2
-rw-r--r--drivers/gpu/drm/udl/udl_modeset.c9
-rw-r--r--drivers/gpu/drm/vc4/vc4_bo.c12
-rw-r--r--drivers/gpu/drm/vc4/vc4_crtc.c210
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.c10
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.h50
-rw-r--r--drivers/gpu/drm/vc4/vc4_gem.c123
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi.c42
-rw-r--r--drivers/gpu/drm/vc4/vc4_hvs.c97
-rw-r--r--drivers/gpu/drm/vc4/vc4_irq.c58
-rw-r--r--drivers/gpu/drm/vc4/vc4_kms.c9
-rw-r--r--drivers/gpu/drm/vc4/vc4_plane.c603
-rw-r--r--drivers/gpu/drm/vc4/vc4_regs.h120
-rw-r--r--drivers/gpu/drm/vc4/vc4_v3d.c1
-rw-r--r--drivers/gpu/drm/via/via_dmablit.c5
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_display.c28
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.c2
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_plane.c16
-rw-r--r--drivers/gpu/drm/vmwgfx/device_include/svga3d_surfacedefs.h20
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c14
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h10
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c32
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fb.c6
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fence.c87
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fence.h2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c163
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.h16
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c19
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c181
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c453
-rw-r--r--drivers/gpu/host1x/bus.c4
-rw-r--r--drivers/gpu/host1x/cdma.c8
-rw-r--r--drivers/gpu/host1x/job.c12
-rw-r--r--drivers/gpu/ipu-v3/ipu-common.c36
-rw-r--r--drivers/gpu/ipu-v3/ipu-cpmem.c79
-rw-r--r--drivers/gpu/ipu-v3/ipu-dc.c53
-rw-r--r--drivers/gpu/ipu-v3/ipu-dmfc.c8
-rw-r--r--drivers/gpu/vga/vga_switcheroo.c119
-rw-r--r--drivers/hid/Kconfig6
-rw-r--r--drivers/hid/Makefile1
-rw-r--r--drivers/hid/hid-cmedia.c168
-rw-r--r--drivers/hid/hid-core.c107
-rw-r--r--drivers/hid/hid-corsair.c3
-rw-r--r--drivers/hid/hid-dr.c4
-rw-r--r--drivers/hid/hid-ids.h16
-rw-r--r--drivers/hid/hid-lenovo.c16
-rw-r--r--drivers/hid/hid-lg.c10
-rw-r--r--drivers/hid/hid-logitech-hidpp.c701
-rw-r--r--drivers/hid/hid-microsoft.c8
-rw-r--r--drivers/hid/hid-multitouch.c31
-rw-r--r--drivers/hid/hid-penmount.c8
-rw-r--r--drivers/hid/hid-rmi.c11
-rw-r--r--drivers/hid/hid-sony.c182
-rw-r--r--drivers/hid/hid-thingm.c135
-rw-r--r--drivers/hid/hid-wiimote-modules.c14
-rw-r--r--drivers/hid/i2c-hid/i2c-hid.c60
-rw-r--r--drivers/hid/uhid.c2
-rw-r--r--drivers/hid/usbhid/hid-core.c73
-rw-r--r--drivers/hid/usbhid/hid-quirks.c4
-rw-r--r--drivers/hid/wacom_sys.c465
-rw-r--r--drivers/hid/wacom_wac.c266
-rw-r--r--drivers/hid/wacom_wac.h8
-rw-r--r--drivers/hsi/clients/nokia-modem.c2
-rw-r--r--drivers/hsi/clients/ssi_protocol.c16
-rw-r--r--drivers/hv/channel.c36
-rw-r--r--drivers/hv/channel_mgmt.c262
-rw-r--r--drivers/hv/connection.c20
-rw-r--r--drivers/hv/hv.c36
-rw-r--r--drivers/hv/hv_fcopy.c2
-rw-r--r--drivers/hv/hv_kvp.c2
-rw-r--r--drivers/hv/hv_snapshot.c2
-rw-r--r--drivers/hv/hv_util.c1
-rw-r--r--drivers/hv/hv_utils_transport.c3
-rw-r--r--drivers/hv/hyperv_vmbus.h42
-rw-r--r--drivers/hv/ring_buffer.c57
-rw-r--r--drivers/hv/vmbus_drv.c117
-rw-r--r--drivers/hwmon/Kconfig31
-rw-r--r--drivers/hwmon/Makefile4
-rw-r--r--drivers/hwmon/iio_hwmon.c11
-rw-r--r--drivers/hwmon/ltc2990.c161
-rw-r--r--drivers/hwmon/max1111.c6
-rw-r--r--drivers/hwmon/nsa320-hwmon.c215
-rw-r--r--drivers/hwmon/ntc_thermistor.c44
-rw-r--r--drivers/hwmon/pmbus/Kconfig4
-rw-r--r--drivers/hwmon/pmbus/adm1275.c56
-rw-r--r--drivers/hwmon/scpi-hwmon.c14
-rw-r--r--drivers/hwmon/vexpress-hwmon.c (renamed from drivers/hwmon/vexpress.c)0
-rw-r--r--drivers/hwtracing/coresight/Kconfig1
-rw-r--r--drivers/hwtracing/coresight/Makefile4
-rw-r--r--drivers/hwtracing/coresight/coresight-etb10.c293
-rw-r--r--drivers/hwtracing/coresight/coresight-etm-perf.c393
-rw-r--r--drivers/hwtracing/coresight/coresight-etm-perf.h (renamed from drivers/gpu/drm/omapdrm/dss/dispc-compat.h)22
-rw-r--r--drivers/hwtracing/coresight/coresight-etm.h142
-rw-r--r--drivers/hwtracing/coresight/coresight-etm3x-sysfs.c1272
-rw-r--r--drivers/hwtracing/coresight/coresight-etm3x.c1687
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x.c37
-rw-r--r--drivers/hwtracing/coresight/coresight-funnel.c21
-rw-r--r--drivers/hwtracing/coresight/coresight-priv.h15
-rw-r--r--drivers/hwtracing/coresight/coresight-replicator-qcom.c19
-rw-r--r--drivers/hwtracing/coresight/coresight-replicator.c25
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc.c35
-rw-r--r--drivers/hwtracing/coresight/coresight-tpiu.c23
-rw-r--r--drivers/hwtracing/coresight/coresight.c388
-rw-r--r--drivers/hwtracing/coresight/of_coresight.c3
-rw-r--r--drivers/hwtracing/intel_th/Kconfig1
-rw-r--r--drivers/hwtracing/intel_th/core.c30
-rw-r--r--drivers/hwtracing/intel_th/gth.c32
-rw-r--r--drivers/hwtracing/intel_th/gth.h3
-rw-r--r--drivers/hwtracing/intel_th/intel_th.h41
-rw-r--r--drivers/hwtracing/intel_th/msu.c9
-rw-r--r--drivers/hwtracing/intel_th/pci.c12
-rw-r--r--drivers/hwtracing/intel_th/sth.c11
-rw-r--r--drivers/hwtracing/stm/Kconfig16
-rw-r--r--drivers/hwtracing/stm/Makefile2
-rw-r--r--drivers/hwtracing/stm/core.c175
-rw-r--r--drivers/hwtracing/stm/dummy_stm.c71
-rw-r--r--drivers/hwtracing/stm/heartbeat.c130
-rw-r--r--drivers/hwtracing/stm/policy.c25
-rw-r--r--drivers/hwtracing/stm/stm.h2
-rw-r--r--drivers/i2c/busses/Kconfig14
-rw-r--r--drivers/i2c/busses/i2c-bcm-iproc.c177
-rw-r--r--drivers/i2c/busses/i2c-cadence.c14
-rw-r--r--drivers/i2c/busses/i2c-cpm.c4
-rw-r--r--drivers/i2c/busses/i2c-designware-baytrail.c5
-rw-r--r--drivers/i2c/busses/i2c-designware-core.c22
-rw-r--r--drivers/i2c/busses/i2c-designware-core.h2
-rw-r--r--drivers/i2c/busses/i2c-designware-platdrv.c1
-rw-r--r--drivers/i2c/busses/i2c-exynos5.c24
-rw-r--r--drivers/i2c/busses/i2c-i801.c8
-rw-r--r--drivers/i2c/busses/i2c-imx.c12
-rw-r--r--drivers/i2c/busses/i2c-ismt.c2
-rw-r--r--drivers/i2c/busses/i2c-jz4780.c7
-rw-r--r--drivers/i2c/busses/i2c-mt65xx.c42
-rw-r--r--drivers/i2c/busses/i2c-octeon.c307
-rw-r--r--drivers/i2c/busses/i2c-piix4.c48
-rw-r--r--drivers/i2c/busses/i2c-qup.c977
-rw-r--r--drivers/i2c/busses/i2c-rcar.c2
-rw-r--r--drivers/i2c/busses/i2c-rk3x.c1
-rw-r--r--drivers/i2c/busses/i2c-tegra.c4
-rw-r--r--drivers/i2c/busses/i2c-xiic.c88
-rw-r--r--drivers/i2c/i2c-boardinfo.c6
-rw-r--r--drivers/i2c/i2c-core.c49
-rw-r--r--drivers/i2c/i2c-dev.c14
-rw-r--r--drivers/i2c/i2c-mux.c8
-rw-r--r--drivers/i2c/i2c-smbus.c8
-rw-r--r--drivers/i2c/i2c-stub.c8
-rw-r--r--drivers/i2c/muxes/Kconfig9
-rw-r--r--drivers/i2c/muxes/Makefile2
-rw-r--r--drivers/i2c/muxes/i2c-demux-pinctrl.c291
-rw-r--r--drivers/ide/hpt366.c9
-rw-r--r--drivers/ide/icside.c2
-rw-r--r--drivers/ide/palm_bk3710.c2
-rw-r--r--drivers/ide/pdc202xx_new.c1
-rw-r--r--drivers/ide/pmac.c1
-rw-r--r--drivers/idle/intel_idle.c230
-rw-r--r--drivers/iio/accel/Kconfig2
-rw-r--r--drivers/iio/accel/bmc150-accel-core.c7
-rw-r--r--drivers/iio/accel/mma8452.c206
-rw-r--r--drivers/iio/accel/st_accel_core.c16
-rw-r--r--drivers/iio/adc/Kconfig61
-rw-r--r--drivers/iio/adc/Makefile5
-rw-r--r--drivers/iio/adc/at91-sama5d2_adc.c510
-rw-r--r--drivers/iio/adc/axp288_adc.c2
-rw-r--r--drivers/iio/adc/fsl-imx25-gcq.c417
-rw-r--r--drivers/iio/adc/ina2xx-adc.c155
-rw-r--r--drivers/iio/adc/max1363.c12
-rw-r--r--drivers/iio/adc/mcp320x.c31
-rw-r--r--drivers/iio/adc/mcp3422.c9
-rw-r--r--drivers/iio/adc/mxs-lradc.c (renamed from drivers/staging/iio/adc/mxs-lradc.c)13
-rw-r--r--drivers/iio/adc/palmas_gpadc.c6
-rw-r--r--drivers/iio/adc/ti-adc081c.c2
-rw-r--r--drivers/iio/adc/ti-adc0832.c288
-rw-r--r--drivers/iio/adc/ti-ads1015.c612
-rw-r--r--drivers/iio/buffer/industrialio-buffer-dmaengine.c7
-rw-r--r--drivers/iio/chemical/Kconfig14
-rw-r--r--drivers/iio/chemical/Makefile1
-rw-r--r--drivers/iio/chemical/atlas-ph-sensor.c509
-rw-r--r--drivers/iio/chemical/vz89x.c2
-rw-r--r--drivers/iio/common/st_sensors/st_sensors_core.c6
-rw-r--r--drivers/iio/common/st_sensors/st_sensors_core.h8
-rw-r--r--drivers/iio/common/st_sensors/st_sensors_trigger.c52
-rw-r--r--drivers/iio/dac/Kconfig39
-rw-r--r--drivers/iio/dac/Makefile3
-rw-r--r--drivers/iio/dac/ad5064.c391
-rw-r--r--drivers/iio/dac/ad5761.c430
-rw-r--r--drivers/iio/dac/mcp4725.c87
-rw-r--r--drivers/iio/dac/stx104.c152
-rw-r--r--drivers/iio/dac/vf610_dac.c298
-rw-r--r--drivers/iio/gyro/bmg160_core.c9
-rw-r--r--drivers/iio/gyro/st_gyro_core.c15
-rw-r--r--drivers/iio/health/Kconfig32
-rw-r--r--drivers/iio/health/Makefile2
-rw-r--r--drivers/iio/health/afe4403.c708
-rw-r--r--drivers/iio/health/afe4404.c679
-rw-r--r--drivers/iio/health/afe440x.h191
-rw-r--r--drivers/iio/health/max30100.c84
-rw-r--r--drivers/iio/humidity/Kconfig6
-rw-r--r--drivers/iio/humidity/dht11.c77
-rw-r--r--drivers/iio/humidity/hdc100x.c2
-rw-r--r--drivers/iio/humidity/htu21.c2
-rw-r--r--drivers/iio/humidity/si7005.c3
-rw-r--r--drivers/iio/humidity/si7020.c3
-rw-r--r--drivers/iio/imu/inv_mpu6050/Kconfig23
-rw-r--r--drivers/iio/imu/inv_mpu6050/Makefile8
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_acpi.c24
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_core.c458
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c232
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h37
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c54
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c99
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c32
-rw-r--r--drivers/iio/industrialio-buffer.c60
-rw-r--r--drivers/iio/industrialio-core.c1
-rw-r--r--drivers/iio/light/apds9960.c3
-rw-r--r--drivers/iio/light/bh1750.c2
-rw-r--r--drivers/iio/light/jsa1212.c2
-rw-r--r--drivers/iio/light/opt3001.c156
-rw-r--r--drivers/iio/magnetometer/Kconfig33
-rw-r--r--drivers/iio/magnetometer/Makefile4
-rw-r--r--drivers/iio/magnetometer/ak8975.c10
-rw-r--r--drivers/iio/magnetometer/hmc5843.h (renamed from drivers/staging/iio/magnetometer/hmc5843.h)5
-rw-r--r--drivers/iio/magnetometer/hmc5843_core.c (renamed from drivers/staging/iio/magnetometer/hmc5843_core.c)138
-rw-r--r--drivers/iio/magnetometer/hmc5843_i2c.c (renamed from drivers/staging/iio/magnetometer/hmc5843_i2c.c)3
-rw-r--r--drivers/iio/magnetometer/hmc5843_spi.c (renamed from drivers/staging/iio/magnetometer/hmc5843_spi.c)3
-rw-r--r--drivers/iio/magnetometer/st_magn.h1
-rw-r--r--drivers/iio/magnetometer/st_magn_core.c4
-rw-r--r--drivers/iio/potentiometer/Kconfig12
-rw-r--r--drivers/iio/potentiometer/Makefile1
-rw-r--r--drivers/iio/potentiometer/mcp4531.c2
-rw-r--r--drivers/iio/potentiometer/tpl0102.c166
-rw-r--r--drivers/iio/pressure/Kconfig43
-rw-r--r--drivers/iio/pressure/Makefile2
-rw-r--r--drivers/iio/pressure/mpl115.c66
-rw-r--r--drivers/iio/pressure/mpl115.h24
-rw-r--r--drivers/iio/pressure/mpl115_i2c.c67
-rw-r--r--drivers/iio/pressure/mpl115_spi.c106
-rw-r--r--drivers/iio/pressure/ms5611.h4
-rw-r--r--drivers/iio/pressure/ms5611_core.c122
-rw-r--r--drivers/iio/pressure/ms5611_i2c.c11
-rw-r--r--drivers/iio/pressure/ms5611_spi.c12
-rw-r--r--drivers/iio/pressure/ms5637.c2
-rw-r--r--drivers/iio/pressure/st_pressure_core.c8
-rw-r--r--drivers/iio/pressure/t5403.c2
-rw-r--r--drivers/iio/proximity/pulsedlight-lidar-lite-v2.c2
-rw-r--r--drivers/iio/temperature/mlx90614.c2
-rw-r--r--drivers/iio/temperature/tmp006.c2
-rw-r--r--drivers/iio/temperature/tsys01.c2
-rw-r--r--drivers/iio/temperature/tsys02d.c2
-rw-r--r--drivers/infiniband/Kconfig3
-rw-r--r--drivers/infiniband/Makefile1
-rw-r--r--drivers/infiniband/core/cache.c18
-rw-r--r--drivers/infiniband/core/cma.c22
-rw-r--r--drivers/infiniband/core/cma_configfs.c31
-rw-r--r--drivers/infiniband/core/device.c43
-rw-r--r--drivers/infiniband/core/fmr_pool.c37
-rw-r--r--drivers/infiniband/core/iwcm.c190
-rw-r--r--drivers/infiniband/core/iwpm_msg.c12
-rw-r--r--drivers/infiniband/core/iwpm_util.c14
-rw-r--r--drivers/infiniband/core/iwpm_util.h2
-rw-r--r--drivers/infiniband/core/packer.c14
-rw-r--r--drivers/infiniband/core/sa_query.c18
-rw-r--r--drivers/infiniband/core/ucm.c12
-rw-r--r--drivers/infiniband/core/ucma.c9
-rw-r--r--drivers/infiniband/core/ud_header.c23
-rw-r--r--drivers/infiniband/core/umem.c2
-rw-r--r--drivers/infiniband/core/umem_odp.c8
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c33
-rw-r--r--drivers/infiniband/core/uverbs_main.c85
-rw-r--r--drivers/infiniband/core/verbs.c207
-rw-r--r--drivers/infiniband/hw/Makefile1
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_cm.c16
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_provider.c5
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c274
-rw-r--r--drivers/infiniband/hw/cxgb4/cq.c11
-rw-r--r--drivers/infiniband/hw/cxgb4/device.c72
-rw-r--r--drivers/infiniband/hw/cxgb4/iw_cxgb4.h49
-rw-r--r--drivers/infiniband/hw/cxgb4/mem.c12
-rw-r--r--drivers/infiniband/hw/cxgb4/provider.c7
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c127
-rw-r--r--drivers/infiniband/hw/cxgb4/t4fw_ri_api.h99
-rw-r--r--drivers/infiniband/hw/i40iw/Kconfig7
-rw-r--r--drivers/infiniband/hw/i40iw/Makefile9
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw.h570
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_cm.c4137
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_cm.h456
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_ctrl.c4743
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_d.h1713
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_hmc.c821
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_hmc.h241
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_hw.c730
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_main.c1910
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_osdep.h215
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_p.h106
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_pble.c618
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_pble.h131
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_puda.c1436
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_puda.h183
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_register.h1030
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_status.h100
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_type.h1312
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_ucontext.h107
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_uk.c1204
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_user.h442
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_utils.c1270
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_verbs.c2437
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_verbs.h173
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_vf.c85
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_vf.h62
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_virtchnl.c748
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_virtchnl.h124
-rw-r--r--drivers/infiniband/hw/mlx4/Kconfig1
-rw-r--r--drivers/infiniband/hw/mlx4/alias_GUID.c6
-rw-r--r--drivers/infiniband/hw/mlx4/main.c79
-rw-r--r--drivers/infiniband/hw/mlx4/mlx4_ib.h3
-rw-r--r--drivers/infiniband/hw/mlx4/mr.c4
-rw-r--r--drivers/infiniband/hw/mlx5/Makefile2
-rw-r--r--drivers/infiniband/hw/mlx5/cq.c104
-rw-r--r--drivers/infiniband/hw/mlx5/gsi.c548
-rw-r--r--drivers/infiniband/hw/mlx5/ib_virt.c194
-rw-r--r--drivers/infiniband/hw/mlx5/mad.c166
-rw-r--r--drivers/infiniband/hw/mlx5/main.c138
-rw-r--r--drivers/infiniband/hw/mlx5/mlx5_ib.h118
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c601
-rw-r--r--drivers/infiniband/hw/mlx5/odp.c10
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c271
-rw-r--r--drivers/infiniband/hw/mlx5/user.h7
-rw-r--r--drivers/infiniband/hw/mthca/mthca_memfree.c3
-rw-r--r--drivers/infiniband/hw/nes/Kconfig1
-rw-r--r--drivers/infiniband/hw/nes/nes.c25
-rw-r--r--drivers/infiniband/hw/nes/nes_cm.c361
-rw-r--r--drivers/infiniband/hw/nes/nes_cm.h11
-rw-r--r--drivers/infiniband/hw/nes/nes_hw.c44
-rw-r--r--drivers/infiniband/hw/nes/nes_hw.h7
-rw-r--r--drivers/infiniband/hw/nes/nes_nic.c10
-rw-r--r--drivers/infiniband/hw/nes/nes_verbs.c5
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma.h8
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_ah.c77
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_ah.h5
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_hw.c33
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_main.c4
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_sli.h16
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_stats.c4
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_verbs.c38
-rw-r--r--drivers/infiniband/hw/qib/Kconfig2
-rw-r--r--drivers/infiniband/hw/qib/Makefile10
-rw-r--r--drivers/infiniband/hw/qib/qib.h33
-rw-r--r--drivers/infiniband/hw/qib/qib_common.h3
-rw-r--r--drivers/infiniband/hw/qib/qib_cq.c545
-rw-r--r--drivers/infiniband/hw/qib/qib_driver.c71
-rw-r--r--drivers/infiniband/hw/qib/qib_file_ops.c5
-rw-r--r--drivers/infiniband/hw/qib/qib_iba6120.c8
-rw-r--r--drivers/infiniband/hw/qib/qib_iba7322.c6
-rw-r--r--drivers/infiniband/hw/qib/qib_init.c25
-rw-r--r--drivers/infiniband/hw/qib/qib_intr.c2
-rw-r--r--drivers/infiniband/hw/qib/qib_keys.c186
-rw-r--r--drivers/infiniband/hw/qib/qib_mad.c338
-rw-r--r--drivers/infiniband/hw/qib/qib_mmap.c174
-rw-r--r--drivers/infiniband/hw/qib/qib_mr.c490
-rw-r--r--drivers/infiniband/hw/qib/qib_qp.c1178
-rw-r--r--drivers/infiniband/hw/qib/qib_rc.c409
-rw-r--r--drivers/infiniband/hw/qib/qib_ruc.c191
-rw-r--r--drivers/infiniband/hw/qib/qib_sdma.c41
-rw-r--r--drivers/infiniband/hw/qib/qib_srq.c380
-rw-r--r--drivers/infiniband/hw/qib/qib_sysfs.c85
-rw-r--r--drivers/infiniband/hw/qib/qib_uc.c79
-rw-r--r--drivers/infiniband/hw/qib/qib_ud.c142
-rw-r--r--drivers/infiniband/hw/qib/qib_user_pages.c3
-rw-r--r--drivers/infiniband/hw/qib/qib_verbs.c1223
-rw-r--r--drivers/infiniband/hw/qib/qib_verbs.h812
-rw-r--r--drivers/infiniband/hw/qib/qib_verbs_mcast.c363
-rw-r--r--drivers/infiniband/hw/usnic/usnic_ib_qp_grp.c2
-rw-r--r--drivers/infiniband/hw/usnic/usnic_ib_verbs.c10
-rw-r--r--drivers/infiniband/hw/usnic/usnic_uiom.c2
-rw-r--r--drivers/infiniband/sw/Makefile1
-rw-r--r--drivers/infiniband/sw/rdmavt/Kconfig6
-rw-r--r--drivers/infiniband/sw/rdmavt/Makefile13
-rw-r--r--drivers/infiniband/sw/rdmavt/ah.c196
-rw-r--r--drivers/infiniband/sw/rdmavt/ah.h59
-rw-r--r--drivers/infiniband/sw/rdmavt/cq.c (renamed from drivers/staging/rdma/hfi1/cq.c)325
-rw-r--r--drivers/infiniband/sw/rdmavt/cq.h64
-rw-r--r--drivers/infiniband/sw/rdmavt/dma.c184
-rw-r--r--drivers/infiniband/sw/rdmavt/dma.h53
-rw-r--r--drivers/infiniband/sw/rdmavt/mad.c171
-rw-r--r--drivers/infiniband/sw/rdmavt/mad.h60
-rw-r--r--drivers/infiniband/sw/rdmavt/mcast.c (renamed from drivers/staging/rdma/hfi1/verbs_mcast.c)262
-rw-r--r--drivers/infiniband/sw/rdmavt/mcast.h58
-rw-r--r--drivers/infiniband/sw/rdmavt/mmap.c (renamed from drivers/staging/rdma/hfi1/mmap.c)142
-rw-r--r--drivers/infiniband/sw/rdmavt/mmap.h63
-rw-r--r--drivers/infiniband/sw/rdmavt/mr.c830
-rw-r--r--drivers/infiniband/sw/rdmavt/mr.h92
-rw-r--r--drivers/infiniband/sw/rdmavt/pd.c119
-rw-r--r--drivers/infiniband/sw/rdmavt/pd.h58
-rw-r--r--drivers/infiniband/sw/rdmavt/qp.c1696
-rw-r--r--drivers/infiniband/sw/rdmavt/qp.h69
-rw-r--r--drivers/infiniband/sw/rdmavt/srq.c (renamed from drivers/staging/rdma/hfi1/srq.c)204
-rw-r--r--drivers/infiniband/sw/rdmavt/srq.h62
-rw-r--r--drivers/infiniband/sw/rdmavt/trace.c49
-rw-r--r--drivers/infiniband/sw/rdmavt/trace.h187
-rw-r--r--drivers/infiniband/sw/rdmavt/vt.c873
-rw-r--r--drivers/infiniband/sw/rdmavt/vt.h104
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib.h4
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_cm.c23
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c45
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c65
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_verbs.c5
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.c25
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.h7
-rw-r--r--drivers/infiniband/ulp/iser/iser_initiator.c7
-rw-r--r--drivers/infiniband/ulp/iser/iser_verbs.c38
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.c819
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.h74
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c40
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.c935
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.h31
-rw-r--r--drivers/input/Kconfig2
-rw-r--r--drivers/input/Makefile2
-rw-r--r--drivers/input/input-compat.c6
-rw-r--r--drivers/input/input-compat.h14
-rw-r--r--drivers/input/input.c2
-rw-r--r--drivers/input/joystick/xpad.c2
-rw-r--r--drivers/input/keyboard/Kconfig2
-rw-r--r--drivers/input/keyboard/goldfish_events.c17
-rw-r--r--drivers/input/keyboard/snvs_pwrkey.c8
-rw-r--r--drivers/input/keyboard/spear-keyboard.c6
-rw-r--r--drivers/input/misc/arizona-haptics.c1
-rw-r--r--drivers/input/misc/ati_remote2.c36
-rw-r--r--drivers/input/misc/ims-pcu.c4
-rw-r--r--drivers/input/misc/max8997_haptic.c6
-rw-r--r--drivers/input/misc/pmic8xxx-pwrkey.c7
-rw-r--r--drivers/input/misc/powermate.c3
-rw-r--r--drivers/input/misc/rotary_encoder.c403
-rw-r--r--drivers/input/misc/twl4030-vibra.c1
-rw-r--r--drivers/input/misc/twl6040-vibra.c25
-rw-r--r--drivers/input/misc/uinput.c4
-rw-r--r--drivers/input/mouse/Kconfig10
-rw-r--r--drivers/input/mouse/Makefile1
-rw-r--r--drivers/input/mouse/byd.c512
-rw-r--r--drivers/input/mouse/byd.h18
-rw-r--r--drivers/input/mouse/cyapa.c22
-rw-r--r--drivers/input/mouse/cyapa.h14
-rw-r--r--drivers/input/mouse/cyapa_gen3.c108
-rw-r--r--drivers/input/mouse/cyapa_gen5.c99
-rw-r--r--drivers/input/mouse/cyapa_gen6.c4
-rw-r--r--drivers/input/mouse/psmouse-base.c14
-rw-r--r--drivers/input/mouse/psmouse.h1
-rw-r--r--drivers/input/mouse/synaptics.c5
-rw-r--r--drivers/input/rmi4/Kconfig63
-rw-r--r--drivers/input/rmi4/Makefile13
-rw-r--r--drivers/input/rmi4/rmi_2d_sensor.c329
-rw-r--r--drivers/input/rmi4/rmi_2d_sensor.h87
-rw-r--r--drivers/input/rmi4/rmi_bus.c419
-rw-r--r--drivers/input/rmi4/rmi_bus.h182
-rw-r--r--drivers/input/rmi4/rmi_driver.c1054
-rw-r--r--drivers/input/rmi4/rmi_driver.h105
-rw-r--r--drivers/input/rmi4/rmi_f01.c624
-rw-r--r--drivers/input/rmi4/rmi_f11.c1317
-rw-r--r--drivers/input/rmi4/rmi_f12.c457
-rw-r--r--drivers/input/rmi4/rmi_f30.c407
-rw-r--r--drivers/input/rmi4/rmi_i2c.c397
-rw-r--r--drivers/input/rmi4/rmi_spi.c589
-rw-r--r--drivers/input/tablet/gtco.c10
-rw-r--r--drivers/input/touchscreen/Kconfig26
-rw-r--r--drivers/input/touchscreen/Makefile2
-rw-r--r--drivers/input/touchscreen/ad7879-i2c.c10
-rw-r--r--drivers/input/touchscreen/ad7879-spi.c10
-rw-r--r--drivers/input/touchscreen/ad7879.c160
-rw-r--r--drivers/input/touchscreen/atmel_mxt_ts.c28
-rw-r--r--drivers/input/touchscreen/cyttsp_core.c194
-rw-r--r--drivers/input/touchscreen/cyttsp_core.h10
-rw-r--r--drivers/input/touchscreen/cyttsp_i2c.c10
-rw-r--r--drivers/input/touchscreen/cyttsp_spi.c10
-rw-r--r--drivers/input/touchscreen/fsl-imx25-tcq.c596
-rw-r--r--drivers/input/touchscreen/melfas_mip4.c1543
-rw-r--r--drivers/input/touchscreen/stmpe-ts.c31
-rw-r--r--drivers/input/touchscreen/sur40.c21
-rw-r--r--drivers/input/touchscreen/wdt87xx_i2c.c2
-rw-r--r--drivers/input/touchscreen/zforce_ts.c4
-rw-r--r--drivers/iommu/Kconfig42
-rw-r--r--drivers/iommu/Makefile2
-rw-r--r--drivers/iommu/amd_iommu.c87
-rw-r--r--drivers/iommu/amd_iommu_v2.c1
-rw-r--r--drivers/iommu/arm-smmu-v3.c50
-rw-r--r--drivers/iommu/arm-smmu.c89
-rw-r--r--drivers/iommu/dma-iommu.c4
-rw-r--r--drivers/iommu/exynos-iommu.c608
-rw-r--r--drivers/iommu/intel-iommu.c2
-rw-r--r--drivers/iommu/io-pgtable-arm-v7s.c846
-rw-r--r--drivers/iommu/io-pgtable-arm.c34
-rw-r--r--drivers/iommu/io-pgtable.c5
-rw-r--r--drivers/iommu/io-pgtable.h53
-rw-r--r--drivers/iommu/iommu.c6
-rw-r--r--drivers/iommu/mtk_iommu.c736
-rw-r--r--drivers/iommu/of_iommu.c1
-rw-r--r--drivers/iommu/rockchip-iommu.c214
-rw-r--r--drivers/irqchip/Kconfig42
-rw-r--r--drivers/irqchip/Makefile8
-rw-r--r--drivers/irqchip/irq-alpine-msi.c293
-rw-r--r--drivers/irqchip/irq-armada-370-xp.c156
-rw-r--r--drivers/irqchip/irq-ath79-cpu.c97
-rw-r--r--drivers/irqchip/irq-ath79-misc.c189
-rw-r--r--drivers/irqchip/irq-atmel-aic-common.c14
-rw-r--r--drivers/irqchip/irq-atmel-aic-common.h7
-rw-r--r--drivers/irqchip/irq-atmel-aic.c9
-rw-r--r--drivers/irqchip/irq-atmel-aic5.c9
-rw-r--r--drivers/irqchip/irq-bcm2836.c1
-rw-r--r--drivers/irqchip/irq-bcm6345-l1.c364
-rw-r--r--drivers/irqchip/irq-gic-realview.c44
-rw-r--r--drivers/irqchip/irq-gic-v2m.c14
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c10
-rw-r--r--drivers/irqchip/irq-gic-v3.c349
-rw-r--r--drivers/irqchip/irq-gic.c2
-rw-r--r--drivers/irqchip/irq-mbigen.c38
-rw-r--r--drivers/irqchip/irq-mips-gic.c354
-rw-r--r--drivers/irqchip/irq-mvebu-odmi.c236
-rw-r--r--drivers/irqchip/irq-mxs.c2
-rw-r--r--drivers/irqchip/irq-sunxi-nmi.c4
-rw-r--r--drivers/irqchip/irq-tango.c232
-rw-r--r--drivers/irqchip/irq-tegra.c14
-rw-r--r--drivers/irqchip/irq-ts4800.c2
-rw-r--r--drivers/isdn/Makefile3
-rw-r--r--drivers/isdn/hardware/eicon/debug.c4
-rw-r--r--drivers/isdn/hardware/eicon/divamnt.c30
-rw-r--r--drivers/isdn/hardware/mISDN/ipac.h41
-rw-r--r--drivers/isdn/hardware/mISDN/mISDNipac.c2
-rw-r--r--drivers/isdn/hisax/isac.c15
-rw-r--r--drivers/isdn/i4l/Kconfig10
-rw-r--r--drivers/isdn/i4l/isdn_tty.c12
-rw-r--r--drivers/isdn/mISDN/clock.c69
-rw-r--r--drivers/isdn/mISDN/socket.c3
-rw-r--r--drivers/leds/Kconfig9
-rw-r--r--drivers/leds/Makefile1
-rw-r--r--drivers/leds/dell-led.c126
-rw-r--r--drivers/leds/led-class.c2
-rw-r--r--drivers/leds/led-core.c45
-rw-r--r--drivers/leds/led-triggers.c13
-rw-r--r--drivers/leds/leds-88pm860x.c12
-rw-r--r--drivers/leds/leds-da903x.c12
-rw-r--r--drivers/leds/leds-gpio.c6
-rw-r--r--drivers/leds/leds-is31fl32xx.c508
-rw-r--r--drivers/leds/leds-lm3533.c12
-rw-r--r--drivers/leds/leds-lp3944.c7
-rw-r--r--drivers/leds/leds-lp8788.c14
-rw-r--r--drivers/leds/leds-max8997.c14
-rw-r--r--drivers/leds/leds-s3c24xx.c19
-rw-r--r--drivers/leds/leds-wm831x-status.c13
-rw-r--r--drivers/lguest/interrupts_and_traps.c6
-rw-r--r--drivers/lguest/lg.h1
-rw-r--r--drivers/lguest/x86/core.c6
-rw-r--r--drivers/lightnvm/core.c26
-rw-r--r--drivers/lightnvm/gennvm.c91
-rw-r--r--drivers/lightnvm/gennvm.h6
-rw-r--r--drivers/lightnvm/rrpc.c203
-rw-r--r--drivers/lightnvm/rrpc.h16
-rw-r--r--drivers/macintosh/macio_asic.c1
-rw-r--r--drivers/mailbox/Kconfig37
-rw-r--r--drivers/mailbox/Makefile8
-rw-r--r--drivers/mailbox/hi6220-mailbox.c395
-rw-r--r--drivers/mailbox/mailbox-test.c69
-rw-r--r--drivers/mailbox/mailbox-xgene-slimpro.c284
-rw-r--r--drivers/mailbox/mailbox.c4
-rw-r--r--drivers/mailbox/pcc.c111
-rw-r--r--drivers/mailbox/rockchip-mailbox.c283
-rw-r--r--drivers/mailbox/ti-msgmgr.c639
-rw-r--r--drivers/md/Kconfig11
-rw-r--r--drivers/md/Makefile2
-rw-r--r--drivers/md/bcache/super.c46
-rw-r--r--drivers/md/bitmap.c25
-rw-r--r--drivers/md/bitmap.h4
-rw-r--r--drivers/md/dm-cache-metadata.c122
-rw-r--r--drivers/md/dm-cache-metadata.h4
-rw-r--r--drivers/md/dm-cache-policy-mq.c1473
-rw-r--r--drivers/md/dm-cache-policy-smq.c92
-rw-r--r--drivers/md/dm-cache-target.c16
-rw-r--r--drivers/md/dm-crypt.c95
-rw-r--r--drivers/md/dm-delay.c2
-rw-r--r--drivers/md/dm-flakey.c2
-rw-r--r--drivers/md/dm-ioctl.c5
-rw-r--r--drivers/md/dm-log-writes.c2
-rw-r--r--drivers/md/dm-mpath.c216
-rw-r--r--drivers/md/dm-path-selector.h5
-rw-r--r--drivers/md/dm-queue-length.c37
-rw-r--r--drivers/md/dm-raid1.c2
-rw-r--r--drivers/md/dm-round-robin.c85
-rw-r--r--drivers/md/dm-service-time.c35
-rw-r--r--drivers/md/dm-snap.c11
-rw-r--r--drivers/md/dm-table.c66
-rw-r--r--drivers/md/dm-target.c3
-rw-r--r--drivers/md/dm-thin-metadata.c7
-rw-r--r--drivers/md/dm-thin.c23
-rw-r--r--drivers/md/dm-verity-fec.c2
-rw-r--r--drivers/md/dm-verity-target.c12
-rw-r--r--drivers/md/dm.c602
-rw-r--r--drivers/md/dm.h4
-rw-r--r--drivers/md/md.c9
-rw-r--r--drivers/md/multipath.c4
-rw-r--r--drivers/md/raid0.c2
-rw-r--r--drivers/md/raid1.c10
-rw-r--r--drivers/md/raid10.c7
-rw-r--r--drivers/md/raid5.c65
-rw-r--r--drivers/md/raid5.h4
-rw-r--r--drivers/media/common/b2c2/flexcop-fe-tuner.c4
-rw-r--r--drivers/media/common/b2c2/flexcop.c4
-rw-r--r--drivers/media/common/cypress_firmware.c2
-rw-r--r--drivers/media/common/cypress_firmware.h2
-rw-r--r--drivers/media/common/siano/smscoreapi.c4
-rw-r--r--drivers/media/common/siano/smsdvb-main.c7
-rw-r--r--drivers/media/dvb-core/dvb-usb-ids.h9
-rw-r--r--drivers/media/dvb-core/dvb_frontend.c218
-rw-r--r--drivers/media/dvb-core/dvb_frontend.h3
-rw-r--r--drivers/media/dvb-core/dvbdev.c15
-rw-r--r--drivers/media/dvb-frontends/Kconfig8
-rw-r--r--drivers/media/dvb-frontends/Makefile1
-rw-r--r--drivers/media/dvb-frontends/af9013.c8
-rw-r--r--drivers/media/dvb-frontends/af9033.c7
-rw-r--r--drivers/media/dvb-frontends/as102_fe.c4
-rw-r--r--drivers/media/dvb-frontends/atbm8830.c4
-rw-r--r--drivers/media/dvb-frontends/au8522.h1
-rw-r--r--drivers/media/dvb-frontends/au8522_decoder.c7
-rw-r--r--drivers/media/dvb-frontends/au8522_dig.c4
-rw-r--r--drivers/media/dvb-frontends/au8522_priv.h11
-rw-r--r--drivers/media/dvb-frontends/bcm3510.c4
-rw-r--r--drivers/media/dvb-frontends/bcm3510.h2
-rw-r--r--drivers/media/dvb-frontends/bcm3510_priv.h2
-rw-r--r--drivers/media/dvb-frontends/cx22700.c4
-rw-r--r--drivers/media/dvb-frontends/cx22702.c4
-rw-r--r--drivers/media/dvb-frontends/cx24110.c4
-rw-r--r--drivers/media/dvb-frontends/cx24117.c4
-rw-r--r--drivers/media/dvb-frontends/cx24120.c8
-rw-r--r--drivers/media/dvb-frontends/cx24123.c4
-rw-r--r--drivers/media/dvb-frontends/cxd2820r_c.c4
-rw-r--r--drivers/media/dvb-frontends/cxd2820r_core.c20
-rw-r--r--drivers/media/dvb-frontends/cxd2820r_priv.h9
-rw-r--r--drivers/media/dvb-frontends/cxd2820r_t.c4
-rw-r--r--drivers/media/dvb-frontends/cxd2820r_t2.c6
-rw-r--r--drivers/media/dvb-frontends/cxd2841er.c4
-rw-r--r--drivers/media/dvb-frontends/dib0070.c2
-rw-r--r--drivers/media/dvb-frontends/dib0090.c16
-rw-r--r--drivers/media/dvb-frontends/dib3000.h6
-rw-r--r--drivers/media/dvb-frontends/dib3000mb.c17
-rw-r--r--drivers/media/dvb-frontends/dib3000mb_priv.h2
-rw-r--r--drivers/media/dvb-frontends/dib3000mc.c10
-rw-r--r--drivers/media/dvb-frontends/dib3000mc.h2
-rw-r--r--drivers/media/dvb-frontends/dib7000m.c8
-rw-r--r--drivers/media/dvb-frontends/dib7000p.c10
-rw-r--r--drivers/media/dvb-frontends/dib8000.c77
-rw-r--r--drivers/media/dvb-frontends/dib9000.c31
-rw-r--r--drivers/media/dvb-frontends/dibx000_common.c2
-rw-r--r--drivers/media/dvb-frontends/drx39xyj/drxj.c11
-rw-r--r--drivers/media/dvb-frontends/dvb_dummy_fe.c7
-rw-r--r--drivers/media/dvb-frontends/hd29l2.c4
-rw-r--r--drivers/media/dvb-frontends/l64781.c4
-rw-r--r--drivers/media/dvb-frontends/lg2160.c62
-rw-r--r--drivers/media/dvb-frontends/lgdt3305.c4
-rw-r--r--drivers/media/dvb-frontends/lgdt3306a.c4
-rw-r--r--drivers/media/dvb-frontends/lgdt330x.c5
-rw-r--r--drivers/media/dvb-frontends/lgs8gl5.c5
-rw-r--r--drivers/media/dvb-frontends/lgs8gxx.c13
-rw-r--r--drivers/media/dvb-frontends/m88ds3103.c4
-rw-r--r--drivers/media/dvb-frontends/m88rs2000.c5
-rw-r--r--drivers/media/dvb-frontends/mb86a20s.c11
-rw-r--r--drivers/media/dvb-frontends/mn88473.c (renamed from drivers/staging/media/mn88473/mn88473.c)388
-rw-r--r--drivers/media/dvb-frontends/mn88473.h14
-rw-r--r--drivers/media/dvb-frontends/mn88473_priv.h (renamed from drivers/staging/media/mn88473/mn88473_priv.h)7
-rw-r--r--drivers/media/dvb-frontends/mt312.c4
-rw-r--r--drivers/media/dvb-frontends/mt352.c4
-rw-r--r--drivers/media/dvb-frontends/or51132.c4
-rw-r--r--drivers/media/dvb-frontends/rtl2830.c7
-rw-r--r--drivers/media/dvb-frontends/rtl2832.c155
-rw-r--r--drivers/media/dvb-frontends/rtl2832.h4
-rw-r--r--drivers/media/dvb-frontends/rtl2832_priv.h1
-rw-r--r--drivers/media/dvb-frontends/s5h1409.c4
-rw-r--r--drivers/media/dvb-frontends/s5h1411.c4
-rw-r--r--drivers/media/dvb-frontends/s5h1420.c4
-rw-r--r--drivers/media/dvb-frontends/s921.c4
-rw-r--r--drivers/media/dvb-frontends/si2165.c28
-rw-r--r--drivers/media/dvb-frontends/stb0899_drv.c4
-rw-r--r--drivers/media/dvb-frontends/stb6100.c2
-rw-r--r--drivers/media/dvb-frontends/stv0297.c4
-rw-r--r--drivers/media/dvb-frontends/stv0299.c8
-rw-r--r--drivers/media/dvb-frontends/stv0367.c8
-rw-r--r--drivers/media/dvb-frontends/stv0900_core.c11
-rw-r--r--drivers/media/dvb-frontends/stv6110x.c4
-rw-r--r--drivers/media/dvb-frontends/stv6110x.h4
-rw-r--r--drivers/media/dvb-frontends/stv6110x_priv.h2
-rw-r--r--drivers/media/dvb-frontends/tc90522.c10
-rw-r--r--drivers/media/dvb-frontends/tda10021.c4
-rw-r--r--drivers/media/dvb-frontends/tda10023.c4
-rw-r--r--drivers/media/dvb-frontends/tda10048.c4
-rw-r--r--drivers/media/dvb-frontends/tda1004x.c4
-rw-r--r--drivers/media/dvb-frontends/tda10071.c4
-rw-r--r--drivers/media/dvb-frontends/tda10086.c4
-rw-r--r--drivers/media/dvb-frontends/tda8083.c4
-rw-r--r--drivers/media/dvb-frontends/ts2020.c4
-rw-r--r--drivers/media/dvb-frontends/ves1820.c4
-rw-r--r--drivers/media/dvb-frontends/ves1x93.c4
-rw-r--r--drivers/media/dvb-frontends/zl10353.c4
-rw-r--r--drivers/media/i2c/adv7511.c43
-rw-r--r--drivers/media/i2c/adv7604.c230
-rw-r--r--drivers/media/i2c/adv7842.c20
-rw-r--r--drivers/media/i2c/msp3400-driver.c14
-rw-r--r--drivers/media/i2c/msp3400-driver.h5
-rw-r--r--drivers/media/i2c/mt9v011.c15
-rw-r--r--drivers/media/i2c/mt9v032.c28
-rw-r--r--drivers/media/i2c/ov2659.c8
-rw-r--r--drivers/media/i2c/ov9650.c4
-rw-r--r--drivers/media/i2c/s5c73m3/s5c73m3-core.c4
-rw-r--r--drivers/media/i2c/s5c73m3/s5c73m3-spi.c1
-rw-r--r--drivers/media/i2c/s5k5baf.c5
-rw-r--r--drivers/media/i2c/saa7115.c19
-rw-r--r--drivers/media/i2c/soc_camera/mt9m001.c2
-rw-r--r--drivers/media/i2c/soc_camera/mt9t031.c2
-rw-r--r--drivers/media/i2c/soc_camera/mt9v022.c2
-rw-r--r--drivers/media/i2c/tc358743.c55
-rw-r--r--drivers/media/i2c/tvp514x.c6
-rw-r--r--drivers/media/i2c/tvp5150.c452
-rw-r--r--drivers/media/i2c/tvp7002.c6
-rw-r--r--drivers/media/i2c/vpx3220.c2
-rw-r--r--drivers/media/media-device.c168
-rw-r--r--drivers/media/media-devnode.c1
-rw-r--r--drivers/media/media-entity.c94
-rw-r--r--drivers/media/pci/b2c2/flexcop-pci.c2
-rw-r--r--drivers/media/pci/bt8xx/bttv-driver.c53
-rw-r--r--drivers/media/pci/bt8xx/dst.c4
-rw-r--r--drivers/media/pci/bt8xx/dvb-bt8xx.c4
-rw-r--r--drivers/media/pci/cx23885/cx23885-dvb.c19
-rw-r--r--drivers/media/pci/cx88/cx88-dvb.c3
-rw-r--r--drivers/media/pci/ddbridge/ddbridge-core.c2
-rw-r--r--drivers/media/pci/ivtv/ivtv-queue.c2
-rw-r--r--drivers/media/pci/ivtv/ivtv-udma.c4
-rw-r--r--drivers/media/pci/ivtv/ivtv-yuv.c10
-rw-r--r--drivers/media/pci/netup_unidvb/netup_unidvb_core.c7
-rw-r--r--drivers/media/pci/ngene/ngene-cards.c2
-rw-r--r--drivers/media/pci/pt3/pt3.c3
-rw-r--r--drivers/media/pci/saa7134/saa7134-cards.c1851
-rw-r--r--drivers/media/pci/saa7134/saa7134-core.c195
-rw-r--r--drivers/media/pci/saa7134/saa7134-dvb.c9
-rw-r--r--drivers/media/pci/saa7134/saa7134-empress.c3
-rw-r--r--drivers/media/pci/saa7134/saa7134-go7007.c2
-rw-r--r--drivers/media/pci/saa7134/saa7134-input.c21
-rw-r--r--drivers/media/pci/saa7134/saa7134-tvaudio.c13
-rw-r--r--drivers/media/pci/saa7134/saa7134-video.c108
-rw-r--r--drivers/media/pci/saa7134/saa7134.h46
-rw-r--r--drivers/media/pci/ttpci/av7110.c15
-rw-r--r--drivers/media/pci/ttpci/budget.c36
-rw-r--r--drivers/media/platform/Kconfig22
-rw-r--r--drivers/media/platform/Makefile3
-rw-r--r--drivers/media/platform/coda/coda-bit.c12
-rw-r--r--drivers/media/platform/coda/coda-common.c106
-rw-r--r--drivers/media/platform/coda/coda.h3
-rw-r--r--drivers/media/platform/davinci/dm644x_ccdc.c2
-rw-r--r--drivers/media/platform/exynos-gsc/gsc-m2m.c2
-rw-r--r--drivers/media/platform/exynos4-is/fimc-lite.c12
-rw-r--r--drivers/media/platform/exynos4-is/media-dev.c33
-rw-r--r--drivers/media/platform/exynos4-is/mipi-csis.c10
-rw-r--r--drivers/media/platform/omap3isp/isp.c226
-rw-r--r--drivers/media/platform/omap3isp/isp.h4
-rw-r--r--drivers/media/platform/omap3isp/ispccdc.c2
-rw-r--r--drivers/media/platform/omap3isp/isppreview.c14
-rw-r--r--drivers/media/platform/omap3isp/ispvideo.c116
-rw-r--r--drivers/media/platform/omap3isp/ispvideo.h1
-rw-r--r--drivers/media/platform/omap3isp/omap3isp.h8
-rw-r--r--drivers/media/platform/rcar_jpu.c1
-rw-r--r--drivers/media/platform/s3c-camif/camif-core.c12
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_enc.c12
-rw-r--r--drivers/media/platform/soc_camera/Kconfig29
-rw-r--r--drivers/media/platform/soc_camera/Makefile3
-rw-r--r--drivers/media/platform/soc_camera/atmel-isi.c4
-rw-r--r--drivers/media/platform/soc_camera/pxa_camera.c478
-rw-r--r--drivers/media/platform/soc_camera/rcar_vin.c41
-rw-r--r--drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c14
-rw-r--r--drivers/media/platform/sti/c8sectpfe/c8sectpfe-dvb.c2
-rw-r--r--drivers/media/platform/ti-vpe/Makefile4
-rw-r--r--drivers/media/platform/ti-vpe/cal.c1947
-rw-r--r--drivers/media/platform/ti-vpe/cal_regs.h479
-rw-r--r--drivers/media/platform/vim2m.c2
-rw-r--r--drivers/media/platform/vivid/vivid-osd.c2
-rw-r--r--drivers/media/platform/vivid/vivid-tpg.c32
-rw-r--r--drivers/media/platform/vivid/vivid-tpg.h2
-rw-r--r--drivers/media/platform/vivid/vivid-vid-common.c39
-rw-r--r--drivers/media/platform/vsp1/Makefile3
-rw-r--r--drivers/media/platform/vsp1/vsp1.h29
-rw-r--r--drivers/media/platform/vsp1/vsp1_bru.c33
-rw-r--r--drivers/media/platform/vsp1/vsp1_bru.h3
-rw-r--r--drivers/media/platform/vsp1/vsp1_dl.c305
-rw-r--r--drivers/media/platform/vsp1/vsp1_dl.h42
-rw-r--r--drivers/media/platform/vsp1/vsp1_drm.c597
-rw-r--r--drivers/media/platform/vsp1/vsp1_drm.h49
-rw-r--r--drivers/media/platform/vsp1/vsp1_drv.c382
-rw-r--r--drivers/media/platform/vsp1/vsp1_entity.c31
-rw-r--r--drivers/media/platform/vsp1/vsp1_entity.h14
-rw-r--r--drivers/media/platform/vsp1/vsp1_hsit.c2
-rw-r--r--drivers/media/platform/vsp1/vsp1_lif.c11
-rw-r--r--drivers/media/platform/vsp1/vsp1_lut.c7
-rw-r--r--drivers/media/platform/vsp1/vsp1_pipe.c426
-rw-r--r--drivers/media/platform/vsp1/vsp1_pipe.h134
-rw-r--r--drivers/media/platform/vsp1/vsp1_regs.h32
-rw-r--r--drivers/media/platform/vsp1/vsp1_rpf.c88
-rw-r--r--drivers/media/platform/vsp1/vsp1_rwpf.h29
-rw-r--r--drivers/media/platform/vsp1/vsp1_sru.c9
-rw-r--r--drivers/media/platform/vsp1/vsp1_uds.c8
-rw-r--r--drivers/media/platform/vsp1/vsp1_video.c518
-rw-r--r--drivers/media/platform/vsp1/vsp1_video.h111
-rw-r--r--drivers/media/platform/vsp1/vsp1_wpf.c98
-rw-r--r--drivers/media/radio/radio-si476x.c4
-rw-r--r--drivers/media/radio/tea575x.c21
-rw-r--r--drivers/media/radio/wl128x/fmdrv_common.c2
-rw-r--r--drivers/media/rc/ati_remote.c47
-rw-r--r--drivers/media/rc/igorplugusb.c17
-rw-r--r--drivers/media/rc/keymaps/rc-avermedia-rm-ks.c56
-rw-r--r--drivers/media/rc/lirc_dev.c7
-rw-r--r--drivers/media/rc/mceusb.c5
-rw-r--r--drivers/media/rc/nuvoton-cir.c358
-rw-r--r--drivers/media/rc/nuvoton-cir.h15
-rw-r--r--drivers/media/rc/rc-core-priv.h6
-rw-r--r--drivers/media/rc/rc-ir-raw.c23
-rw-r--r--drivers/media/rc/rc-main.c48
-rw-r--r--drivers/media/rc/sunxi-cir.c1
-rw-r--r--drivers/media/tuners/m88rs6000t.c11
-rw-r--r--drivers/media/tuners/r820t.c2
-rw-r--r--drivers/media/tuners/si2157.c39
-rw-r--r--drivers/media/tuners/si2157.h5
-rw-r--r--drivers/media/tuners/si2157_priv.h8
-rw-r--r--drivers/media/tuners/tuner-xc2028.c6
-rw-r--r--drivers/media/tuners/xc4000.c2
-rw-r--r--drivers/media/usb/airspy/airspy.c11
-rw-r--r--drivers/media/usb/as102/as102_drv.h2
-rw-r--r--drivers/media/usb/as102/as102_usb_drv.c2
-rw-r--r--drivers/media/usb/au0828/au0828-cards.c4
-rw-r--r--drivers/media/usb/au0828/au0828-core.c468
-rw-r--r--drivers/media/usb/au0828/au0828-dvb.c12
-rw-r--r--drivers/media/usb/au0828/au0828-input.c4
-rw-r--r--drivers/media/usb/au0828/au0828-video.c253
-rw-r--r--drivers/media/usb/au0828/au0828.h36
-rw-r--r--drivers/media/usb/b2c2/flexcop-usb.c2
-rw-r--r--drivers/media/usb/cpia2/cpia2_core.c2
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-417.c2
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-audio.c5
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-cards.c68
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-dvb.c10
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-video.c47
-rw-r--r--drivers/media/usb/cx231xx/cx231xx.h4
-rw-r--r--drivers/media/usb/dvb-usb-v2/af9035.c6
-rw-r--r--drivers/media/usb/dvb-usb-v2/af9035.h3
-rw-r--r--drivers/media/usb/dvb-usb-v2/dvb_usb.h2
-rw-r--r--drivers/media/usb/dvb-usb-v2/dvb_usb_common.h2
-rw-r--r--drivers/media/usb/dvb-usb-v2/dvb_usb_core.c15
-rw-r--r--drivers/media/usb/dvb-usb-v2/dvb_usb_urb.c2
-rw-r--r--drivers/media/usb/dvb-usb-v2/dvbsky.c7
-rw-r--r--drivers/media/usb/dvb-usb-v2/mxl111sf-demod.c4
-rw-r--r--drivers/media/usb/dvb-usb-v2/mxl111sf-tuner.c6
-rw-r--r--drivers/media/usb/dvb-usb-v2/mxl111sf-tuner.h8
-rw-r--r--drivers/media/usb/dvb-usb-v2/mxl111sf.c4
-rw-r--r--drivers/media/usb/dvb-usb-v2/rtl28xxu.c32
-rw-r--r--drivers/media/usb/dvb-usb-v2/usb_urb.c2
-rw-r--r--drivers/media/usb/dvb-usb/a800.c4
-rw-r--r--drivers/media/usb/dvb-usb/af9005-fe.c4
-rw-r--r--drivers/media/usb/dvb-usb/cxusb.c4
-rw-r--r--drivers/media/usb/dvb-usb/dib0700_core.c2
-rw-r--r--drivers/media/usb/dvb-usb/dib0700_devices.c77
-rw-r--r--drivers/media/usb/dvb-usb/dibusb-common.c2
-rw-r--r--drivers/media/usb/dvb-usb/dibusb-mb.c6
-rw-r--r--drivers/media/usb/dvb-usb/dibusb-mc.c6
-rw-r--r--drivers/media/usb/dvb-usb/dibusb.h2
-rw-r--r--drivers/media/usb/dvb-usb/digitv.c4
-rw-r--r--drivers/media/usb/dvb-usb/dtt200u-fe.c7
-rw-r--r--drivers/media/usb/dvb-usb/dtt200u.c4
-rw-r--r--drivers/media/usb/dvb-usb/dtt200u.h2
-rw-r--r--drivers/media/usb/dvb-usb/dvb-usb-common.h2
-rw-r--r--drivers/media/usb/dvb-usb/dvb-usb-dvb.c13
-rw-r--r--drivers/media/usb/dvb-usb/dvb-usb-firmware.c2
-rw-r--r--drivers/media/usb/dvb-usb/dvb-usb-i2c.c2
-rw-r--r--drivers/media/usb/dvb-usb/dvb-usb-init.c4
-rw-r--r--drivers/media/usb/dvb-usb/dvb-usb-remote.c2
-rw-r--r--drivers/media/usb/dvb-usb/dvb-usb-urb.c2
-rw-r--r--drivers/media/usb/dvb-usb/dvb-usb.h2
-rw-r--r--drivers/media/usb/dvb-usb/dw2102.c105
-rw-r--r--drivers/media/usb/dvb-usb/friio-fe.c27
-rw-r--r--drivers/media/usb/dvb-usb/nova-t-usb2.c4
-rw-r--r--drivers/media/usb/dvb-usb/technisat-usb2.c43
-rw-r--r--drivers/media/usb/dvb-usb/ttusb2.c2
-rw-r--r--drivers/media/usb/dvb-usb/umt-010.c4
-rw-r--r--drivers/media/usb/dvb-usb/usb-urb.c2
-rw-r--r--drivers/media/usb/dvb-usb/vp702x-fe.c2
-rw-r--r--drivers/media/usb/dvb-usb/vp702x.c4
-rw-r--r--drivers/media/usb/dvb-usb/vp7045-fe.c2
-rw-r--r--drivers/media/usb/dvb-usb/vp7045.c4
-rw-r--r--drivers/media/usb/dvb-usb/vp7045.h2
-rw-r--r--drivers/media/usb/em28xx/em28xx-camera.c4
-rw-r--r--drivers/media/usb/em28xx/em28xx-cards.c246
-rw-r--r--drivers/media/usb/em28xx/em28xx-dvb.c21
-rw-r--r--drivers/media/usb/em28xx/em28xx-video.c213
-rw-r--r--drivers/media/usb/em28xx/em28xx.h21
-rw-r--r--drivers/media/usb/go7007/go7007-priv.h2
-rw-r--r--drivers/media/usb/go7007/go7007-usb.c4
-rw-r--r--drivers/media/usb/gspca/ov519.c43
-rw-r--r--drivers/media/usb/gspca/touptek.c8
-rw-r--r--drivers/media/usb/gspca/w996Xcf.c8
-rw-r--r--drivers/media/usb/hdpvr/hdpvr-core.c2
-rw-r--r--drivers/media/usb/hdpvr/hdpvr-video.c6
-rw-r--r--drivers/media/usb/msi2500/msi2500.c2
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-context.c2
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-hdw.c3
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-io.c2
-rw-r--r--drivers/media/usb/pwc/pwc-if.c6
-rw-r--r--drivers/media/usb/siano/smsusb.c30
-rw-r--r--drivers/media/usb/stk1160/stk1160-video.c1
-rw-r--r--drivers/media/usb/usbtv/usbtv-video.c37
-rw-r--r--drivers/media/usb/usbtv/usbtv.h1
-rw-r--r--drivers/media/usb/usbvision/usbvision-video.c36
-rw-r--r--drivers/media/usb/uvc/uvc_driver.c20
-rw-r--r--drivers/media/usb/uvc/uvcvideo.h12
-rw-r--r--drivers/media/v4l2-core/Kconfig1
-rw-r--r--drivers/media/v4l2-core/Makefile1
-rw-r--r--drivers/media/v4l2-core/tuner-core.c26
-rw-r--r--drivers/media/v4l2-core/v4l2-async.c7
-rw-r--r--drivers/media/v4l2-core/v4l2-compat-ioctl32.c21
-rw-r--r--drivers/media/v4l2-core/v4l2-ctrls.c34
-rw-r--r--drivers/media/v4l2-core/v4l2-dv-timings.c3
-rw-r--r--drivers/media/v4l2-core/v4l2-fh.c2
-rw-r--r--drivers/media/v4l2-core/v4l2-ioctl.c36
-rw-r--r--drivers/media/v4l2-core/v4l2-mc.c403
-rw-r--r--drivers/media/v4l2-core/v4l2-of.c2
-rw-r--r--drivers/media/v4l2-core/videobuf-core.c10
-rw-r--r--drivers/media/v4l2-core/videobuf-dma-sg.c5
-rw-r--r--drivers/media/v4l2-core/videobuf2-core.c25
-rw-r--r--drivers/media/v4l2-core/videobuf2-dma-contig.c33
-rw-r--r--drivers/media/v4l2-core/videobuf2-dvb.c13
-rw-r--r--drivers/media/v4l2-core/videobuf2-memops.c2
-rw-r--r--drivers/media/v4l2-core/videobuf2-v4l2.c14
-rw-r--r--drivers/memory/Kconfig8
-rw-r--r--drivers/memory/Makefile1
-rw-r--r--drivers/memory/fsl_ifc.c2
-rw-r--r--drivers/memory/mtk-smi.c273
-rw-r--r--drivers/memory/omap-gpmc.c30
-rw-r--r--drivers/memstick/host/r592.c3
-rw-r--r--drivers/message/fusion/mptbase.c42
-rw-r--r--drivers/mfd/Kconfig100
-rw-r--r--drivers/mfd/Makefile9
-rw-r--r--drivers/mfd/act8945a.c102
-rw-r--r--drivers/mfd/as3711.c4
-rw-r--r--drivers/mfd/axp20x-i2c.c104
-rw-r--r--drivers/mfd/axp20x-rsb.c80
-rw-r--r--drivers/mfd/axp20x.c105
-rw-r--r--drivers/mfd/cs47l24-tables.c10
-rw-r--r--drivers/mfd/da9062-core.c23
-rw-r--r--drivers/mfd/da9063-i2c.c36
-rw-r--r--drivers/mfd/db8500-prcmu.c7
-rw-r--r--drivers/mfd/fsl-imx25-tsadc.c203
-rw-r--r--drivers/mfd/intel-lpss-acpi.c12
-rw-r--r--drivers/mfd/intel-lpss-pci.c31
-rw-r--r--drivers/mfd/intel-lpss.c1
-rw-r--r--drivers/mfd/intel_quark_i2c_gpio.c26
-rw-r--r--drivers/mfd/ipaq-micro.c2
-rw-r--r--drivers/mfd/max77686.c88
-rw-r--r--drivers/mfd/menelaus.c2
-rw-r--r--drivers/mfd/mt6397-core.c105
-rw-r--r--drivers/mfd/rc5t583.c4
-rw-r--r--drivers/mfd/stmpe.c35
-rw-r--r--drivers/mfd/syscon.c19
-rw-r--r--drivers/mfd/tps65010.c21
-rw-r--r--drivers/mfd/tps65086.c149
-rw-r--r--drivers/mfd/tps65090.c5
-rw-r--r--drivers/mfd/tps65912-core.c240
-rw-r--r--drivers/mfd/tps65912-i2c.c162
-rw-r--r--drivers/mfd/tps65912-irq.c217
-rw-r--r--drivers/mfd/tps65912-spi.c160
-rw-r--r--drivers/mfd/wm5102-tables.c16
-rw-r--r--drivers/mfd/wm5110-tables.c82
-rw-r--r--drivers/mfd/wm8998-tables.c12
-rw-r--r--drivers/misc/Kconfig282
-rw-r--r--drivers/misc/Makefile1
-rw-r--r--drivers/misc/ad525x_dpot.c4
-rw-r--r--drivers/misc/apds990x.c8
-rw-r--r--drivers/misc/arm-charlcd.c24
-rw-r--r--drivers/misc/atmel-ssc.c1
-rw-r--r--drivers/misc/bh1770glc.c8
-rw-r--r--drivers/misc/c2port/core.c8
-rw-r--r--drivers/misc/cxl/Makefile1
-rw-r--r--drivers/misc/cxl/api.c83
-rw-r--r--drivers/misc/cxl/base.c32
-rw-r--r--drivers/misc/cxl/context.c18
-rw-r--r--drivers/misc/cxl/cxl.h290
-rw-r--r--drivers/misc/cxl/debugfs.c4
-rw-r--r--drivers/misc/cxl/fault.c25
-rw-r--r--drivers/misc/cxl/file.c28
-rw-r--r--drivers/misc/cxl/flash.c538
-rw-r--r--drivers/misc/cxl/guest.c1177
-rw-r--r--drivers/misc/cxl/hcalls.c647
-rw-r--r--drivers/misc/cxl/hcalls.h204
-rw-r--r--drivers/misc/cxl/irq.c310
-rw-r--r--drivers/misc/cxl/main.c122
-rw-r--r--drivers/misc/cxl/native.c500
-rw-r--r--drivers/misc/cxl/of.c513
-rw-r--r--drivers/misc/cxl/pci.c268
-rw-r--r--drivers/misc/cxl/sysfs.c128
-rw-r--r--drivers/misc/cxl/trace.h193
-rw-r--r--drivers/misc/cxl/vphb.c167
-rw-r--r--drivers/misc/eeprom/Kconfig6
-rw-r--r--drivers/misc/eeprom/at24.c130
-rw-r--r--drivers/misc/eeprom/at25.c148
-rw-r--r--drivers/misc/eeprom/eeprom.c2
-rw-r--r--drivers/misc/eeprom/eeprom_93xx46.c332
-rw-r--r--drivers/misc/genwqe/card_sysfs.c2
-rw-r--r--drivers/misc/ibmasm/ibmasm.h9
-rw-r--r--drivers/misc/ibmasm/ibmasmfs.c4
-rw-r--r--drivers/misc/lis3lv02d/lis3lv02d_i2c.c8
-rw-r--r--drivers/misc/lkdtm.c158
-rw-r--r--drivers/misc/mei/Kconfig6
-rw-r--r--drivers/misc/mei/Makefile1
-rw-r--r--drivers/misc/mei/amthif.c130
-rw-r--r--drivers/misc/mei/bus-fixup.c41
-rw-r--r--drivers/misc/mei/bus.c57
-rw-r--r--drivers/misc/mei/client.c189
-rw-r--r--drivers/misc/mei/client.h27
-rw-r--r--drivers/misc/mei/debugfs.c65
-rw-r--r--drivers/misc/mei/hbm.c24
-rw-r--r--drivers/misc/mei/hw-me-regs.h4
-rw-r--r--drivers/misc/mei/hw-me.c10
-rw-r--r--drivers/misc/mei/hw-txe.c10
-rw-r--r--drivers/misc/mei/hw.h32
-rw-r--r--drivers/misc/mei/init.c20
-rw-r--r--drivers/misc/mei/interrupt.c94
-rw-r--r--drivers/misc/mei/main.c106
-rw-r--r--drivers/misc/mei/mei-trace.c2
-rw-r--r--drivers/misc/mei/mei-trace.h40
-rw-r--r--drivers/misc/mei/mei_dev.h118
-rw-r--r--drivers/misc/mei/pci-me.c7
-rw-r--r--drivers/misc/mei/pci-txe.c4
-rw-r--r--drivers/misc/mei/wd.c391
-rw-r--r--drivers/misc/mic/Kconfig44
-rw-r--r--drivers/misc/mic/Makefile1
-rw-r--r--drivers/misc/mic/bus/Makefile1
-rw-r--r--drivers/misc/mic/bus/cosm_bus.h2
-rw-r--r--drivers/misc/mic/bus/vop_bus.c203
-rw-r--r--drivers/misc/mic/bus/vop_bus.h140
-rw-r--r--drivers/misc/mic/card/Makefile1
-rw-r--r--drivers/misc/mic/card/mic_device.c89
-rw-r--r--drivers/misc/mic/card/mic_device.h3
-rw-r--r--drivers/misc/mic/card/mic_virtio.c634
-rw-r--r--drivers/misc/mic/card/mic_virtio.h76
-rw-r--r--drivers/misc/mic/card/mic_x100.c1
-rw-r--r--drivers/misc/mic/cosm/cosm_main.c13
-rw-r--r--drivers/misc/mic/host/Makefile2
-rw-r--r--drivers/misc/mic/host/mic_boot.c125
-rw-r--r--drivers/misc/mic/host/mic_debugfs.c190
-rw-r--r--drivers/misc/mic/host/mic_device.h9
-rw-r--r--drivers/misc/mic/host/mic_fops.c222
-rw-r--r--drivers/misc/mic/host/mic_fops.h32
-rw-r--r--drivers/misc/mic/host/mic_main.c49
-rw-r--r--drivers/misc/mic/host/mic_virtio.c811
-rw-r--r--drivers/misc/mic/host/mic_x100.c19
-rw-r--r--drivers/misc/mic/scif/scif_dma.c41
-rw-r--r--drivers/misc/mic/scif/scif_rma.c9
-rw-r--r--drivers/misc/mic/vop/Makefile9
-rw-r--r--drivers/misc/mic/vop/vop_debugfs.c232
-rw-r--r--drivers/misc/mic/vop/vop_main.c755
-rw-r--r--drivers/misc/mic/vop/vop_main.h (renamed from drivers/misc/mic/host/mic_virtio.h)129
-rw-r--r--drivers/misc/mic/vop/vop_vringh.c1170
-rw-r--r--drivers/misc/panel.c (renamed from drivers/staging/panel/panel.c)136
-rw-r--r--drivers/misc/pch_phub.c10
-rw-r--r--drivers/misc/sgi-gru/grufault.c3
-rw-r--r--drivers/misc/sram.c5
-rw-r--r--drivers/misc/ti-st/st_core.c1
-rw-r--r--drivers/misc/vmw_vmci/vmci_driver.c2
-rw-r--r--drivers/misc/vmw_vmci/vmci_queue_pair.c2
-rw-r--r--drivers/mmc/card/block.c52
-rw-r--r--drivers/mmc/card/mmc_test.c1
-rw-r--r--drivers/mmc/card/sdio_uart.c14
-rw-r--r--drivers/mmc/core/core.c29
-rw-r--r--drivers/mmc/core/debugfs.c2
-rw-r--r--drivers/mmc/core/host.c7
-rw-r--r--drivers/mmc/core/mmc.c4
-rw-r--r--drivers/mmc/core/mmc_ops.c19
-rw-r--r--drivers/mmc/core/pwrseq_simple.c1
-rw-r--r--drivers/mmc/core/sd.c2
-rw-r--r--drivers/mmc/core/sd_ops.c7
-rw-r--r--drivers/mmc/core/sdio.c2
-rw-r--r--drivers/mmc/core/sdio_ops.c3
-rw-r--r--drivers/mmc/host/Kconfig26
-rw-r--r--drivers/mmc/host/Makefile1
-rw-r--r--drivers/mmc/host/atmel-mci.c11
-rw-r--r--drivers/mmc/host/bfin_sdh.c3
-rw-r--r--drivers/mmc/host/davinci_mmc.c15
-rw-r--r--drivers/mmc/host/dw_mmc-exynos.c31
-rw-r--r--drivers/mmc/host/dw_mmc-pltfm.c19
-rw-r--r--drivers/mmc/host/dw_mmc-rockchip.c7
-rw-r--r--drivers/mmc/host/dw_mmc.c101
-rw-r--r--drivers/mmc/host/dw_mmc.h6
-rw-r--r--drivers/mmc/host/jz4740_mmc.c2
-rw-r--r--drivers/mmc/host/mmc_spi.c6
-rw-r--r--drivers/mmc/host/mmci.c1
-rw-r--r--drivers/mmc/host/mtk-sd.c19
-rw-r--r--drivers/mmc/host/mxcmmc.c3
-rw-r--r--drivers/mmc/host/of_mmc_spi.c2
-rw-r--r--drivers/mmc/host/omap_hsmmc.c9
-rw-r--r--drivers/mmc/host/pxamci.c6
-rw-r--r--drivers/mmc/host/s3cmci.c3
-rw-r--r--drivers/mmc/host/sdhci-acpi.c128
-rw-r--r--drivers/mmc/host/sdhci-bcm2835.c14
-rw-r--r--drivers/mmc/host/sdhci-esdhc-imx.c38
-rw-r--r--drivers/mmc/host/sdhci-iproc.c40
-rw-r--r--drivers/mmc/host/sdhci-msm.c25
-rw-r--r--drivers/mmc/host/sdhci-of-arasan.c109
-rw-r--r--drivers/mmc/host/sdhci-of-at91.c53
-rw-r--r--drivers/mmc/host/sdhci-of-esdhc.c19
-rw-r--r--drivers/mmc/host/sdhci-pci-core.c40
-rw-r--r--drivers/mmc/host/sdhci-pci.h3
-rw-r--r--drivers/mmc/host/sdhci-pic32.c257
-rw-r--r--drivers/mmc/host/sdhci-pltfm.h1
-rw-r--r--drivers/mmc/host/sdhci-pxav2.c1
-rw-r--r--drivers/mmc/host/sdhci-pxav3.c48
-rw-r--r--drivers/mmc/host/sdhci-st.c40
-rw-r--r--drivers/mmc/host/sdhci-tegra.c76
-rw-r--r--drivers/mmc/host/sdhci.c539
-rw-r--r--drivers/mmc/host/sdhci.h8
-rw-r--r--drivers/mmc/host/sdricoh_cs.c26
-rw-r--r--drivers/mmc/host/sh_mmcif.c4
-rw-r--r--drivers/mmc/host/sh_mobile_sdhi.c54
-rw-r--r--drivers/mmc/host/sunxi-mmc.c100
-rw-r--r--drivers/mmc/host/tmio_mmc_dma.c15
-rw-r--r--drivers/mmc/host/tmio_mmc_pio.c29
-rw-r--r--drivers/mmc/host/usdhi6rol0.c4
-rw-r--r--drivers/mtd/Kconfig2
-rw-r--r--drivers/mtd/bcm47xxpart.c42
-rw-r--r--drivers/mtd/bcm63xxpart.c182
-rw-r--r--drivers/mtd/devices/block2mtd.c6
-rw-r--r--drivers/mtd/devices/docg3.c5
-rw-r--r--drivers/mtd/devices/mtdram.c5
-rw-r--r--drivers/mtd/mtdpart.c5
-rw-r--r--drivers/mtd/mtdswap.c24
-rw-r--r--drivers/mtd/nand/Kconfig10
-rw-r--r--drivers/mtd/nand/Makefile1
-rw-r--r--drivers/mtd/nand/atmel_nand.c89
-rw-r--r--drivers/mtd/nand/atmel_nand_ecc.h9
-rw-r--r--drivers/mtd/nand/atmel_nand_nfc.h3
-rw-r--r--drivers/mtd/nand/brcmnand/brcmnand.c42
-rw-r--r--drivers/mtd/nand/cafe_nand.c2
-rw-r--r--drivers/mtd/nand/diskonchip.c2
-rw-r--r--drivers/mtd/nand/docg4.c3
-rw-r--r--drivers/mtd/nand/gpmi-nand/gpmi-nand.c73
-rw-r--r--drivers/mtd/nand/hisi504_nand.c1
-rw-r--r--drivers/mtd/nand/jz4740_nand.c3
-rw-r--r--drivers/mtd/nand/lpc32xx_mlc.c2
-rw-r--r--drivers/mtd/nand/mpc5121_nfc.c7
-rw-r--r--drivers/mtd/nand/nand_base.c88
-rw-r--r--drivers/mtd/nand/nand_bbt.c2
-rw-r--r--drivers/mtd/nand/nand_bch.c27
-rw-r--r--drivers/mtd/nand/nand_ids.c4
-rw-r--r--drivers/mtd/nand/nandsim.c6
-rw-r--r--drivers/mtd/nand/nuc900_nand.c2
-rw-r--r--drivers/mtd/nand/omap2.c28
-rw-r--r--drivers/mtd/nand/plat_nand.c1
-rw-r--r--drivers/mtd/nand/pxa3xx_nand.c191
-rw-r--r--drivers/mtd/nand/qcom_nandc.c2223
-rw-r--r--drivers/mtd/nand/s3c2410.c3
-rw-r--r--drivers/mtd/nand/sunxi_nand.c287
-rw-r--r--drivers/mtd/nand/vf610_nfc.c2
-rw-r--r--drivers/mtd/onenand/omap2.c2
-rw-r--r--drivers/mtd/onenand/onenand_base.c32
-rw-r--r--drivers/mtd/onenand/onenand_bbt.c5
-rw-r--r--drivers/mtd/spi-nor/Kconfig3
-rw-r--r--drivers/mtd/spi-nor/fsl-quadspi.c167
-rw-r--r--drivers/mtd/spi-nor/mtk-quadspi.c4
-rw-r--r--drivers/mtd/spi-nor/spi-nor.c246
-rw-r--r--drivers/mtd/tests/mtd_nandecctest.c2
-rw-r--r--drivers/mtd/tests/oobtest.c49
-rw-r--r--drivers/mtd/ubi/misc.c49
-rw-r--r--drivers/mtd/ubi/ubi.h16
-rw-r--r--drivers/net/Kconfig13
-rw-r--r--drivers/net/Makefile1
-rw-r--r--drivers/net/bonding/bond_3ad.c44
-rw-r--r--drivers/net/bonding/bond_alb.c2
-rw-r--r--drivers/net/bonding/bond_main.c104
-rw-r--r--drivers/net/bonding/bond_options.c7
-rw-r--r--drivers/net/can/Kconfig66
-rw-r--r--drivers/net/can/Makefile17
-rw-r--r--drivers/net/can/ifi_canfd/Kconfig8
-rw-r--r--drivers/net/can/ifi_canfd/Makefile5
-rw-r--r--drivers/net/can/ifi_canfd/ifi_canfd.c944
-rw-r--r--drivers/net/can/rcar_can.c3
-rw-r--r--drivers/net/can/sja1000/sja1000_platform.c87
-rw-r--r--drivers/net/can/spi/mcp251x.c2
-rw-r--r--drivers/net/can/usb/ems_usb.c8
-rw-r--r--drivers/net/can/usb/gs_usb.c24
-rw-r--r--drivers/net/dsa/Kconfig2
-rw-r--r--drivers/net/dsa/Makefile4
-rw-r--r--drivers/net/dsa/bcm_sf2.c18
-rw-r--r--drivers/net/dsa/bcm_sf2.h2
-rw-r--r--drivers/net/dsa/mv88e6123.c (renamed from drivers/net/dsa/mv88e6123_61_65.c)20
-rw-r--r--drivers/net/dsa/mv88e6171.c8
-rw-r--r--drivers/net/dsa/mv88e6352.c8
-rw-r--r--drivers/net/dsa/mv88e6xxx.c768
-rw-r--r--drivers/net/dsa/mv88e6xxx.h37
-rw-r--r--drivers/net/ethernet/3com/3c59x.c14
-rw-r--r--drivers/net/ethernet/Kconfig2
-rw-r--r--drivers/net/ethernet/Makefile2
-rw-r--r--drivers/net/ethernet/altera/altera_tse_main.c1
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-common.h10
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-dcb.c39
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-dev.c388
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-drv.c42
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c16
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-main.c6
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-mdio.c53
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe.h11
-rw-r--r--drivers/net/ethernet/apm/xgene/Makefile3
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_cle.c735
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_cle.h297
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_hw.c31
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_hw.h14
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_main.c556
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_main.h45
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_ring2.c12
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.h2
-rw-r--r--drivers/net/ethernet/arc/emac.h60
-rw-r--r--drivers/net/ethernet/arc/emac_main.c35
-rw-r--r--drivers/net/ethernet/arc/emac_mdio.c39
-rw-r--r--drivers/net/ethernet/arc/emac_rockchip.c41
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_main.c4
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl2.c2
-rw-r--r--drivers/net/ethernet/aurora/nb8800.c14
-rw-r--r--drivers/net/ethernet/broadcom/Kconfig10
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.c41
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.h6
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h19
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c17
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h13
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c57
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c5
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h45
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c207
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c12
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c6
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c505
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h69
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c325
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_nvm_defs.h14
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c63
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c26
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.h6
-rw-r--r--drivers/net/ethernet/brocade/bna/bna_tx_rx.c2
-rw-r--r--drivers/net/ethernet/cadence/macb.c183
-rw-r--r--drivers/net/ethernet/cadence/macb.h6
-rw-r--r--drivers/net/ethernet/cavium/Kconfig13
-rw-r--r--drivers/net/ethernet/cavium/Makefile1
-rw-r--r--drivers/net/ethernet/cavium/octeon/Makefile (renamed from drivers/net/ethernet/octeon/Makefile)0
-rw-r--r--drivers/net/ethernet/cavium/octeon/octeon_mgmt.c (renamed from drivers/net/ethernet/octeon/octeon_mgmt.c)0
-rw-r--r--drivers/net/ethernet/cavium/thunder/nic.h64
-rw-r--r--drivers/net/ethernet/cavium/thunder/nic_main.c6
-rw-r--r--drivers/net/ethernet/cavium/thunder/nic_reg.h2
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c2
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_main.c40
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.c40
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.c78
-rw-r--r--drivers/net/ethernet/chelsio/Kconfig11
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/Makefile1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h57
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c34
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c235
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_ppm.c464
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_ppm.h310
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h11
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/l2t.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/l2t.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c47
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c135
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_msg.h219
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_values.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h7
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/adapter.h8
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c385
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/sge.c55
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h21
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c143
-rw-r--r--drivers/net/ethernet/cisco/enic/enic.h22
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_cq.c2
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_dev.c45
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_intr.c3
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_rq.c4
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_wq.c4
-rw-r--r--drivers/net/ethernet/dec/tulip/tulip_core.c14
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h36
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c133
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.h57
-rw-r--r--drivers/net/ethernet/emulex/benet/be_ethtool.c23
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c216
-rw-r--r--drivers/net/ethernet/ethoc.c1
-rw-r--r--drivers/net/ethernet/ezchip/nps_enet.c248
-rw-r--r--drivers/net/ethernet/ezchip/nps_enet.h350
-rw-r--r--drivers/net/ethernet/freescale/fec.h38
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c414
-rw-r--r--drivers/net/ethernet/freescale/fman/fman.c108
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_dtsec.c7
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c110
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ptp.c33
-rw-r--r--drivers/net/ethernet/hisilicon/Kconfig1
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hnae.h5
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c84
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c41
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c15
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h6
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c54
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h1
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c40
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c5
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c196
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h23
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h10
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_enet.c48
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_ethtool.c63
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.c5
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c62
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.h4
-rw-r--r--drivers/net/ethernet/intel/Kconfig9
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c21
-rw-r--r--drivers/net/ethernet/intel/e1000e/defines.h5
-rw-r--r--drivers/net/ethernet/intel/e1000e/hw.h4
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.c30
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.h7
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c4
-rw-r--r--drivers/net/ethernet/intel/e1000e/ptp.c85
-rw-r--r--drivers/net/ethernet/intel/e1000e/regs.h4
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_main.c12
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_netdev.c11
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_pf.c30
-rw-r--r--drivers/net/ethernet/intel/i40e/Makefile1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h67
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.c20
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h87
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_client.c1012
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_client.h232
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_common.c776
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_dcb.c27
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_debugfs.c328
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_devids.h6
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c296
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_fcoe.c30
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c538
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_nvm.c5
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_prototype.h39
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_register.h48
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c878
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.h89
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_type.h23
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl.h34
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c283
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h8
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_adminq.c5
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h87
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_common.c125
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_prototype.h15
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.c826
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.h80
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf.h7
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c124
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_main.c121
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c6
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.c41
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.h4
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_defines.h3
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_hw.h4
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mac.c213
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mac.h5
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mbx.c18
-rw-r--r--drivers/net/ethernet/intel/igb/igb.h4
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c4
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c959
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ptp.c3
-rw-r--r--drivers/net/ethernet/intel/igbvf/mbx.c20
-rw-r--r--drivers/net/ethernet/intel/igbvf/netdev.c143
-rw-r--r--drivers/net/ethernet/intel/igbvf/vf.h1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h17
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c10
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c398
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_model.h103
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ethtool.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c18
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/vf.c4
-rw-r--r--drivers/net/ethernet/jme.c26
-rw-r--r--drivers/net/ethernet/marvell/Kconfig22
-rw-r--r--drivers/net/ethernet/marvell/Makefile1
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c578
-rw-r--r--drivers/net/ethernet/marvell/mvneta_bm.c487
-rw-r--r--drivers/net/ethernet/marvell/mvneta_bm.h182
-rw-r--r--drivers/net/ethernet/marvell/mvpp2.c18
-rw-r--r--drivers/net/ethernet/marvell/pxa168_eth.c2
-rw-r--r--drivers/net/ethernet/mediatek/Kconfig17
-rw-r--r--drivers/net/ethernet/mediatek/Makefile5
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c1808
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.h421
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/Kconfig1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c362
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_main.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c23
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_port.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c19
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/intf.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c242
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mcg.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/port.c33
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Kconfig19
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Makefile6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c20
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h101
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_clock.c25
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c302
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c196
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c516
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c20
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c429
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.h51
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c65
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c29
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c273
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.h15
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw.c57
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c83
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mr.c54
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/port.c254
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/uar.c33
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/vport.c148
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/vxlan.c196
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/vxlan.h68
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/Kconfig1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.c55
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/port.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c280
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h12
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/switchx2.c20
-rw-r--r--drivers/net/ethernet/micrel/ks8842.c10
-rw-r--r--drivers/net/ethernet/moxa/moxart_ether.c4
-rw-r--r--drivers/net/ethernet/myricom/myri10ge/myri10ge.c4
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net.h2
-rw-r--r--drivers/net/ethernet/nuvoton/w90p910_ether.c2
-rw-r--r--drivers/net/ethernet/nvidia/forcedeth.c8
-rw-r--r--drivers/net/ethernet/octeon/Kconfig14
-rw-r--r--drivers/net/ethernet/pasemi/Kconfig5
-rw-r--r--drivers/net/ethernet/pasemi/pasemi_mac.c50
-rw-r--r--drivers/net/ethernet/pasemi/pasemi_mac.h4
-rw-r--r--drivers/net/ethernet/pasemi/pasemi_mac_ethtool.c1
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c14
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c13
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed.h51
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_cxt.c5
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev.c519
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev_api.h11
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hsi.h2619
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hw.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c22
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_init_ops.c155
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_int.c2221
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_int.h6
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_l2.c586
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_main.c46
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.c334
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.h14
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_reg_addr.h60
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sp.h13
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sp_commands.c81
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_spq.c15
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede.h40
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ethtool.c6
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_main.c1113
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic.h7
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c24
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c3
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c8
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge.h2
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_main.c11
-rw-r--r--drivers/net/ethernet/qualcomm/qca_spi.c2
-rw-r--r--drivers/net/ethernet/realtek/r8169.c21
-rw-r--r--drivers/net/ethernet/renesas/Kconfig4
-rw-r--r--drivers/net/ethernet/renesas/ravb.h4
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c85
-rw-r--r--drivers/net/ethernet/renesas/ravb_ptp.c25
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c115
-rw-r--r--drivers/net/ethernet/rocker/Makefile1
-rw-r--r--drivers/net/ethernet/rocker/rocker.c5495
-rw-r--r--drivers/net/ethernet/rocker/rocker.h583
-rw-r--r--drivers/net/ethernet/rocker/rocker_hw.h467
-rw-r--r--drivers/net/ethernet/rocker/rocker_main.c2909
-rw-r--r--drivers/net/ethernet/rocker/rocker_ofdpa.c2958
-rw-r--r--drivers/net/ethernet/rocker/rocker_tlv.c53
-rw-r--r--drivers/net/ethernet/rocker/rocker_tlv.h201
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/Makefile2
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c4
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_xpcs.c91
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_xpcs.h38
-rw-r--r--drivers/net/ethernet/sfc/ef10.c15
-rw-r--r--drivers/net/ethernet/sfc/efx.h3
-rw-r--r--drivers/net/ethernet/sfc/ethtool.c184
-rw-r--r--drivers/net/ethernet/sfc/tx.c10
-rw-r--r--drivers/net/ethernet/smsc/smc911x.c87
-rw-r--r--drivers/net/ethernet/smsc/smc911x.h63
-rw-r--r--drivers/net/ethernet/smsc/smc91x.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/chain_mode.c37
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h39
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/descs.h330
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/descs_com.h77
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c66
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac100.h1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000.h3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c111
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c22
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h39
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c21
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/enh_desc.c226
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/norm_desc.c154
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/ring_mode.c32
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h9
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c41
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c495
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c23
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c126
-rw-r--r--drivers/net/ethernet/sun/Kconfig16
-rw-r--r--drivers/net/ethernet/sun/Makefile2
-rw-r--r--drivers/net/ethernet/sun/ldmvsw.c468
-rw-r--r--drivers/net/ethernet/sun/niu.c2
-rw-r--r--drivers/net/ethernet/sun/sungem.c1
-rw-r--r--drivers/net/ethernet/sun/sunvnet.c1733
-rw-r--r--drivers/net/ethernet/sun/sunvnet.h114
-rw-r--r--drivers/net/ethernet/sun/sunvnet_common.c1732
-rw-r--r--drivers/net/ethernet/sun/sunvnet_common.h145
-rw-r--r--drivers/net/ethernet/synopsys/dwc_eth_qos.c45
-rw-r--r--drivers/net/ethernet/ti/cpsw.c71
-rw-r--r--drivers/net/ethernet/ti/cpsw.h1
-rw-r--r--drivers/net/ethernet/ti/davinci_emac.c8
-rw-r--r--drivers/net/ethernet/ti/netcp_core.c14
-rw-r--r--drivers/net/ethernet/toshiba/ps3_gelic_wireless.c2
-rw-r--r--drivers/net/ethernet/toshiba/spider_net.c1
-rw-r--r--drivers/net/geneve.c209
-rw-r--r--drivers/net/hamradio/baycom_ser_fdx.c12
-rw-r--r--drivers/net/hamradio/dmascc.c13
-rw-r--r--drivers/net/hyperv/hyperv_net.h11
-rw-r--r--drivers/net/hyperv/netvsc_drv.c75
-rw-r--r--drivers/net/hyperv/rndis_filter.c35
-rw-r--r--drivers/net/ieee802154/at86rf230.c25
-rw-r--r--drivers/net/ieee802154/mrf24j40.c1
-rw-r--r--drivers/net/ipvlan/ipvlan.h10
-rw-r--r--drivers/net/ipvlan/ipvlan_core.c46
-rw-r--r--drivers/net/ipvlan/ipvlan_main.c22
-rw-r--r--drivers/net/irda/irtty-sir.c10
-rw-r--r--drivers/net/macsec.c3323
-rw-r--r--drivers/net/macvlan.c12
-rw-r--r--drivers/net/macvtap.c11
-rw-r--r--drivers/net/phy/Kconfig22
-rw-r--r--drivers/net/phy/Makefile2
-rw-r--r--drivers/net/phy/at803x.c72
-rw-r--r--drivers/net/phy/bcm7xxx.c64
-rw-r--r--drivers/net/phy/dp83848.c88
-rw-r--r--drivers/net/phy/fixed_phy.c11
-rw-r--r--drivers/net/phy/marvell.c52
-rw-r--r--drivers/net/phy/mdio-cavium.c153
-rw-r--r--drivers/net/phy/mdio-cavium.h119
-rw-r--r--drivers/net/phy/mdio-octeon.c280
-rw-r--r--drivers/net/phy/mdio-sun4i.c4
-rw-r--r--drivers/net/phy/mdio-thunder.c154
-rw-r--r--drivers/net/phy/micrel.c37
-rw-r--r--drivers/net/phy/phy.c8
-rw-r--r--drivers/net/phy/spi_ks8995.c306
-rw-r--r--drivers/net/ppp/ppp_generic.c58
-rw-r--r--drivers/net/ppp/ppp_mppe.c99
-rw-r--r--drivers/net/rionet.c277
-rw-r--r--drivers/net/team/team.c24
-rw-r--r--drivers/net/tun.c29
-rw-r--r--drivers/net/usb/ax88172a.c1
-rw-r--r--drivers/net/usb/cdc_mbim.c9
-rw-r--r--drivers/net/usb/cdc_ncm.c33
-rw-r--r--drivers/net/usb/lan78xx.c401
-rw-r--r--drivers/net/usb/lan78xx.h1
-rw-r--r--drivers/net/usb/pegasus.c10
-rw-r--r--drivers/net/usb/plusb.c2
-rw-r--r--drivers/net/usb/qmi_wwan.c11
-rw-r--r--drivers/net/usb/smsc75xx.c12
-rw-r--r--drivers/net/usb/smsc95xx.c12
-rw-r--r--drivers/net/usb/usbnet.c7
-rw-r--r--drivers/net/veth.c26
-rw-r--r--drivers/net/virtio_net.c64
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c93
-rw-r--r--drivers/net/vmxnet3/vmxnet3_int.h4
-rw-r--r--drivers/net/vrf.c212
-rw-r--r--drivers/net/vxlan.c697
-rw-r--r--drivers/net/wan/farsync.c2
-rw-r--r--drivers/net/wan/lmc/lmc_main.c27
-rw-r--r--drivers/net/wireless/ath/ath10k/Kconfig6
-rw-r--r--drivers/net/wireless/ath/ath10k/Makefile2
-rw-r--r--drivers/net/wireless/ath/ath10k/ahb.c933
-rw-r--r--drivers/net/wireless/ath/ath10k/ahb.h87
-rw-r--r--drivers/net/wireless/ath/ath10k/core.c48
-rw-r--r--drivers/net/wireless/ath/ath10k/core.h20
-rw-r--r--drivers/net/wireless/ath/ath10k/debug.c46
-rw-r--r--drivers/net/wireless/ath/ath10k/debug.h7
-rw-r--r--drivers/net/wireless/ath/ath10k/debugfs_sta.c41
-rw-r--r--drivers/net/wireless/ath/ath10k/htt.c8
-rw-r--r--drivers/net/wireless/ath/ath10k/htt.h169
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_rx.c54
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_tx.c146
-rw-r--r--drivers/net/wireless/ath/ath10k/hw.c39
-rw-r--r--drivers/net/wireless/ath/ath10k/hw.h24
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.c64
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.c171
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.h49
-rw-r--r--drivers/net/wireless/ath/ath10k/targaddrs.h3
-rw-r--r--drivers/net/wireless/ath/ath10k/trace.h15
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi-ops.h11
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi-tlv.c1
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.c162
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.h92
-rw-r--r--drivers/net/wireless/ath/ath9k/ani.c36
-rw-r--r--drivers/net/wireless/ath/ath9k/ar5008_phy.c8
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_phy.c5
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_aic.c79
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_aic.h1
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.c6
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_hw.c3
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.c12
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.h3
-rw-r--r--drivers/net/wireless/ath/ath9k/ar953x_initvals.h65
-rw-r--r--drivers/net/wireless/ath/ath9k/calib.c38
-rw-r--r--drivers/net/wireless/ath/ath9k/channel.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom.c7
-rw-r--r--drivers/net/wireless/ath/ath9k/hif_usb.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_main.c7
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c10
-rw-r--r--drivers/net/wireless/ath/ath9k/init.c29
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c13
-rw-r--r--drivers/net/wireless/ath/ath9k/reg.h4
-rw-r--r--drivers/net/wireless/ath/carl9170/fwcmd.h8
-rw-r--r--drivers/net/wireless/ath/carl9170/fwdesc.h6
-rw-r--r--drivers/net/wireless/ath/carl9170/hw.h73
-rw-r--r--drivers/net/wireless/ath/carl9170/main.c8
-rw-r--r--drivers/net/wireless/ath/carl9170/version.h6
-rw-r--r--drivers/net/wireless/ath/wcn36xx/main.c8
-rw-r--r--drivers/net/wireless/ath/wil6210/cfg80211.c172
-rw-r--r--drivers/net/wireless/ath/wil6210/debugfs.c6
-rw-r--r--drivers/net/wireless/ath/wil6210/main.c112
-rw-r--r--drivers/net/wireless/ath/wil6210/netdev.c5
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx.c46
-rw-r--r--drivers/net/wireless/ath/wil6210/wil6210.h11
-rw-r--r--drivers/net/wireless/ath/wil6210/wmi.c174
-rw-r--r--drivers/net/wireless/atmel/at76c50x-usb.c2
-rw-r--r--drivers/net/wireless/broadcom/b43/main.c20
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c105
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h4
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c640
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h20
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c45
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.h3
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c152
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.h43
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c180
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h9
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c14
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.h10
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c38
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.h20
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c57
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.h68
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h29
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c11
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.h2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c22
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.h4
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c26
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c216
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c116
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.h2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c16
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c8
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/include/brcmu_wifi.h2
-rw-r--r--drivers/net/wireless/intel/iwlegacy/4965-mac.c8
-rw-r--r--drivers/net/wireless/intel/iwlegacy/4965.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/Kconfig12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/led.c5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/lib.c20
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c13
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/main.c8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-7000.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-8000.c7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-9000.c7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-config.h6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-csr.h59
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-devtrace-iwlwifi.h31
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-drv.c59
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-fh.h84
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-fw-error-dump.h3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h42
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-fw.h13
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-modparams.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c148
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-prph.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-trans.h21
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/constants.h8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/d3.c88
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c75
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c120
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw-api-d3.h1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rx.h99
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw-api-sta.h69
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h157
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw.c61
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c45
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c111
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mvm.h142
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/nvm.c9
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ops.c228
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/power.c131
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/quota.c16
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs.c28
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rx.c63
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c171
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/scan.c45
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.c84
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.h25
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/time-event.c15
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/time-event.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tt.c495
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tx.c324
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/utils.c34
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/drv.c213
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/internal.h142
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/rx.c858
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans.c432
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx.c87
-rw-r--r--drivers/net/wireless/intersil/hostap/hostap_hw.c15
-rw-r--r--drivers/net/wireless/intersil/orinoco/mic.c29
-rw-r--r--drivers/net/wireless/intersil/orinoco/mic.h4
-rw-r--r--drivers/net/wireless/intersil/orinoco/orinoco.h4
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c16
-rw-r--r--drivers/net/wireless/marvell/libertas/cfg.c38
-rw-r--r--drivers/net/wireless/marvell/libertas/cmd.c40
-rw-r--r--drivers/net/wireless/marvell/libertas/cmdresp.c9
-rw-r--r--drivers/net/wireless/marvell/libertas/dev.h1
-rw-r--r--drivers/net/wireless/marvell/libertas/if_sdio.c2
-rw-r--r--drivers/net/wireless/marvell/libertas/if_usb.c1
-rw-r--r--drivers/net/wireless/marvell/libertas/main.c10
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11h.c6
-rw-r--r--drivers/net/wireless/marvell/mwifiex/README10
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cfg80211.c274
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cmdevt.c13
-rw-r--r--drivers/net/wireless/marvell/mwifiex/debugfs.c61
-rw-r--r--drivers/net/wireless/marvell/mwifiex/decl.h24
-rw-r--r--drivers/net/wireless/marvell/mwifiex/fw.h90
-rw-r--r--drivers/net/wireless/marvell/mwifiex/init.c16
-rw-r--r--drivers/net/wireless/marvell/mwifiex/ioctl.h11
-rw-r--r--drivers/net/wireless/marvell/mwifiex/join.c15
-rw-r--r--drivers/net/wireless/marvell/mwifiex/main.c14
-rw-r--r--drivers/net/wireless/marvell/mwifiex/main.h49
-rw-r--r--drivers/net/wireless/marvell/mwifiex/pcie.c325
-rw-r--r--drivers/net/wireless/marvell/mwifiex/pcie.h50
-rw-r--r--drivers/net/wireless/marvell/mwifiex/scan.c310
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sdio.c51
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_cmd.c23
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c6
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_event.c20
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_ioctl.c34
-rw-r--r--drivers/net/wireless/marvell/mwifiex/tdls.c7
-rw-r--r--drivers/net/wireless/marvell/mwifiex/uap_cmd.c2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/usb.c4
-rw-r--r--drivers/net/wireless/marvell/mwifiex/util.c20
-rw-r--r--drivers/net/wireless/marvell/mwifiex/wmm.c7
-rw-r--r--drivers/net/wireless/marvell/mwl8k.c10
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/main.c8
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/mcu.c4
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800lib.c7
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800lib.h4
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800usb.c1
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00.h18
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00debug.c9
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt61pci.h20
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/Kconfig2
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c3495
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h693
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h152
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/core.c68
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/pci.c3
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rc.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.h2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c23
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c12
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c6
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.h2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.h2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.h2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c8
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c20
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c27
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.h2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/wifi.h22
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_mac80211.c19
-rw-r--r--drivers/net/wireless/st/cw1200/cw1200_spi.c9
-rw-r--r--drivers/net/wireless/st/cw1200/pm.h9
-rw-r--r--drivers/net/wireless/st/cw1200/sta.c4
-rw-r--r--drivers/net/wireless/st/cw1200/sta.h4
-rw-r--r--drivers/net/wireless/ti/wl18xx/debugfs.c66
-rw-r--r--drivers/net/wireless/ti/wl18xx/event.c3
-rw-r--r--drivers/net/wireless/ti/wlcore/Kconfig2
-rw-r--r--drivers/net/wireless/ti/wlcore/event.c2
-rw-r--r--drivers/net/wireless/ti/wlcore/init.c5
-rw-r--r--drivers/net/wireless/ti/wlcore/main.c16
-rw-r--r--drivers/net/wireless/ti/wlcore/spi.c86
-rw-r--r--drivers/net/wireless/ti/wlcore/wlcore.h1
-rw-r--r--drivers/net/xen-netback/common.h2
-rw-r--r--drivers/net/xen-netback/netback.c66
-rw-r--r--drivers/net/xen-netback/xenbus.c91
-rw-r--r--drivers/nfc/microread/i2c.c8
-rw-r--r--drivers/nfc/pn544/i2c.c14
-rw-r--r--drivers/nfc/s3fwrn5/firmware.c36
-rw-r--r--drivers/ntb/hw/amd/ntb_hw_amd.c30
-rw-r--r--drivers/ntb/ntb_transport.c31
-rw-r--r--drivers/ntb/test/ntb_perf.c78
-rw-r--r--drivers/nvdimm/blk.c18
-rw-r--r--drivers/nvdimm/btt.c21
-rw-r--r--drivers/nvdimm/bus.c135
-rw-r--r--drivers/nvdimm/core.c111
-rw-r--r--drivers/nvdimm/dimm_devs.c6
-rw-r--r--drivers/nvdimm/e820.c2
-rw-r--r--drivers/nvdimm/namespace_devs.c7
-rw-r--r--drivers/nvdimm/nd.h8
-rw-r--r--drivers/nvdimm/pfn.h23
-rw-r--r--drivers/nvdimm/pfn_devs.c63
-rw-r--r--drivers/nvdimm/pmem.c276
-rw-r--r--drivers/nvdimm/region.c12
-rw-r--r--drivers/nvme/host/Kconfig6
-rw-r--r--drivers/nvme/host/Makefile10
-rw-r--r--drivers/nvme/host/core.c166
-rw-r--r--drivers/nvme/host/lightnvm.c63
-rw-r--r--drivers/nvme/host/nvme.h14
-rw-r--r--drivers/nvme/host/pci.c286
-rw-r--r--drivers/nvmem/Kconfig24
-rw-r--r--drivers/nvmem/Makefile4
-rw-r--r--drivers/nvmem/core.c145
-rw-r--r--drivers/nvmem/imx-ocotp.c2
-rw-r--r--drivers/nvmem/lpc18xx_eeprom.c330
-rw-r--r--drivers/nvmem/mtk-efuse.c110
-rw-r--r--drivers/nvmem/mxs-ocotp.c4
-rw-r--r--drivers/nvmem/rockchip-efuse.c90
-rw-r--r--drivers/nvmem/sunxi_sid.c9
-rw-r--r--drivers/of/base.c15
-rw-r--r--drivers/of/fdt.c51
-rw-r--r--drivers/of/fdt_address.c11
-rw-r--r--drivers/of/of_mdio.c15
-rw-r--r--drivers/of/of_pci.c1
-rw-r--r--drivers/of/of_reserved_mem.c4
-rw-r--r--drivers/of/resolver.c7
-rw-r--r--drivers/of/unittest.c5
-rw-r--r--drivers/oprofile/oprofilefs.c4
-rw-r--r--drivers/parisc/Kconfig2
-rw-r--r--drivers/parisc/eisa_enumerator.c4
-rw-r--r--drivers/pci/Kconfig11
-rw-r--r--drivers/pci/Makefile1
-rw-r--r--drivers/pci/access.c275
-rw-r--r--drivers/pci/bus.c14
-rw-r--r--drivers/pci/host/Kconfig47
-rw-r--r--drivers/pci/host/Makefile6
-rw-r--r--drivers/pci/host/pci-dra7xx.c11
-rw-r--r--drivers/pci/host/pci-exynos.c13
-rw-r--r--drivers/pci/host/pci-host-common.c194
-rw-r--r--drivers/pci/host/pci-host-common.h47
-rw-r--r--drivers/pci/host/pci-host-generic.c181
-rw-r--r--drivers/pci/host/pci-hyperv.c2346
-rw-r--r--drivers/pci/host/pci-imx6.c186
-rw-r--r--drivers/pci/host/pci-keystone.c13
-rw-r--r--drivers/pci/host/pci-layerscape.c1
-rw-r--r--drivers/pci/host/pci-tegra.c85
-rw-r--r--drivers/pci/host/pci-thunder-ecam.c403
-rw-r--r--drivers/pci/host/pci-thunder-pem.c346
-rw-r--r--drivers/pci/host/pcie-altera.c3
-rw-r--r--drivers/pci/host/pcie-designware-plat.c138
-rw-r--r--drivers/pci/host/pcie-designware.c44
-rw-r--r--drivers/pci/host/pcie-designware.h6
-rw-r--r--drivers/pci/host/pcie-qcom.c12
-rw-r--r--drivers/pci/host/pcie-rcar.c14
-rw-r--r--drivers/pci/host/pcie-spear13xx.c14
-rw-r--r--drivers/pci/host/pcie-xilinx-nwl.c881
-rw-r--r--drivers/pci/host/pcie-xilinx.c191
-rw-r--r--drivers/pci/hotplug/s390_pci_hpc.c8
-rw-r--r--drivers/pci/iov.c14
-rw-r--r--drivers/pci/pci-label.c2
-rw-r--r--drivers/pci/pci-sysfs.c96
-rw-r--r--drivers/pci/pci.c44
-rw-r--r--drivers/pci/pci.h17
-rw-r--r--drivers/pci/pcie/Kconfig7
-rw-r--r--drivers/pci/pcie/aer/aer_inject.c90
-rw-r--r--drivers/pci/pcie/pme.c11
-rw-r--r--drivers/pci/probe.c45
-rw-r--r--drivers/pci/quirks.c46
-rw-r--r--drivers/pci/remove.c5
-rw-r--r--drivers/pci/rom.c83
-rw-r--r--drivers/pci/setup-bus.c1
-rw-r--r--drivers/pci/setup-res.c6
-rw-r--r--drivers/pcmcia/bfin_cf_pcmcia.c2
-rw-r--r--drivers/pcmcia/db1xxx_ss.c11
-rw-r--r--drivers/pcmcia/pxa2xx_vpac270.c1
-rw-r--r--drivers/perf/arm_pmu.c120
-rw-r--r--drivers/phy/Kconfig16
-rw-r--r--drivers/phy/Makefile2
-rw-r--r--drivers/phy/phy-dm816x-usb.c4
-rw-r--r--drivers/phy/phy-rcar-gen3-usb2.c83
-rw-r--r--drivers/phy/phy-rockchip-dp.c154
-rw-r--r--drivers/phy/phy-rockchip-emmc.c232
-rw-r--r--drivers/phy/phy-rockchip-usb.c233
-rw-r--r--drivers/phy/phy-twl4030-usb.c4
-rw-r--r--drivers/pinctrl/Kconfig52
-rw-r--r--drivers/pinctrl/Makefile16
-rw-r--r--drivers/pinctrl/bcm/pinctrl-bcm2835.c2
-rw-r--r--drivers/pinctrl/bcm/pinctrl-iproc-gpio.c33
-rw-r--r--drivers/pinctrl/core.c35
-rw-r--r--drivers/pinctrl/core.h4
-rw-r--r--drivers/pinctrl/freescale/Kconfig1
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx.c53
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx.h1
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx50.c1
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx53.c1
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx6dl.c1
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx6q.c1
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx6sl.c1
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx6sx.c1
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx6ul.c1
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx7d.c1
-rw-r--r--drivers/pinctrl/intel/pinctrl-intel.c39
-rw-r--r--drivers/pinctrl/mediatek/Kconfig22
-rw-r--r--drivers/pinctrl/mediatek/Makefile12
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt2701.c585
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt6397.c10
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt7623.c379
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt8127.c8
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt8135.c8
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt8173.c8
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mtk-common.c64
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mtk-common.h12
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mtk-mt2701.h2323
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mtk-mt7623.h1936
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson.c143
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson.h21
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson8.c137
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson8b.c182
-rw-r--r--drivers/pinctrl/nomadik/pinctrl-nomadik-stn8815.c7
-rw-r--r--drivers/pinctrl/nomadik/pinctrl-nomadik.c2
-rw-r--r--drivers/pinctrl/pinctrl-amd.c4
-rw-r--r--drivers/pinctrl/pinctrl-at91-pio4.c6
-rw-r--r--drivers/pinctrl/pinctrl-coh901.c2
-rw-r--r--drivers/pinctrl/pinctrl-lpc18xx.c148
-rw-r--r--drivers/pinctrl/pinctrl-pic32.c2312
-rw-r--r--drivers/pinctrl/pinctrl-pic32.h141
-rw-r--r--drivers/pinctrl/pinctrl-pistachio.c24
-rw-r--r--drivers/pinctrl/pinctrl-rockchip.c369
-rw-r--r--drivers/pinctrl/pinctrl-single.c14
-rw-r--r--drivers/pinctrl/pinctrl-st.c1
-rw-r--r--drivers/pinctrl/pinctrl-xway.c17
-rw-r--r--drivers/pinctrl/pinctrl-zynq.c2
-rw-r--r--drivers/pinctrl/pinmux.c13
-rw-r--r--drivers/pinctrl/pxa/pinctrl-pxa2xx.c3
-rw-r--r--drivers/pinctrl/qcom/Kconfig8
-rw-r--r--drivers/pinctrl/qcom/Makefile1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ipq4019.c453
-rw-r--r--drivers/pinctrl/qcom/pinctrl-spmi-mpp.c30
-rw-r--r--drivers/pinctrl/sh-pfc/Kconfig56
-rw-r--r--drivers/pinctrl/sh-pfc/Makefile7
-rw-r--r--drivers/pinctrl/sh-pfc/core.c28
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7778.c690
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7779.c1136
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7790.c774
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7791.c624
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7794.c1405
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7795.c915
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-sh7734.c412
-rw-r--r--drivers/pinctrl/sh-pfc/sh_pfc.h98
-rw-r--r--drivers/pinctrl/sirf/pinctrl-atlas7.c18
-rw-r--r--drivers/pinctrl/stm32/Kconfig16
-rw-r--r--drivers/pinctrl/stm32/Makefile5
-rw-r--r--drivers/pinctrl/stm32/pinctrl-stm32.c829
-rw-r--r--drivers/pinctrl/stm32/pinctrl-stm32.h51
-rw-r--r--drivers/pinctrl/stm32/pinctrl-stm32f429.c1591
-rw-r--r--drivers/pinctrl/sunxi/Kconfig36
-rw-r--r--drivers/pinctrl/sunxi/Makefile4
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun50i-a64.c601
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun7i-a20.c32
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun8i-a33.c1
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun8i-h3-r.c106
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun9i-a80-r.c9
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sunxi.c28
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sunxi.h21
-rw-r--r--drivers/pinctrl/tegra/Kconfig30
-rw-r--r--drivers/pinctrl/tegra/Makefile7
-rw-r--r--drivers/pinctrl/tegra/pinctrl-tegra-xusb.c (renamed from drivers/pinctrl/pinctrl-tegra-xusb.c)4
-rw-r--r--drivers/pinctrl/tegra/pinctrl-tegra.c (renamed from drivers/pinctrl/pinctrl-tegra.c)4
-rw-r--r--drivers/pinctrl/tegra/pinctrl-tegra.h (renamed from drivers/pinctrl/pinctrl-tegra.h)0
-rw-r--r--drivers/pinctrl/tegra/pinctrl-tegra114.c (renamed from drivers/pinctrl/pinctrl-tegra114.c)0
-rw-r--r--drivers/pinctrl/tegra/pinctrl-tegra124.c (renamed from drivers/pinctrl/pinctrl-tegra124.c)0
-rw-r--r--drivers/pinctrl/tegra/pinctrl-tegra20.c (renamed from drivers/pinctrl/pinctrl-tegra20.c)0
-rw-r--r--drivers/pinctrl/tegra/pinctrl-tegra210.c (renamed from drivers/pinctrl/pinctrl-tegra210.c)0
-rw-r--r--drivers/pinctrl/tegra/pinctrl-tegra30.c (renamed from drivers/pinctrl/pinctrl-tegra30.c)0
-rw-r--r--drivers/pinctrl/uniphier/Kconfig14
-rw-r--r--drivers/pinctrl/uniphier/Makefile14
-rw-r--r--drivers/pinctrl/uniphier/pinctrl-uniphier-ld4.c (renamed from drivers/pinctrl/uniphier/pinctrl-ph1-ld4.c)0
-rw-r--r--drivers/pinctrl/uniphier/pinctrl-uniphier-ld6b.c (renamed from drivers/pinctrl/uniphier/pinctrl-ph1-ld6b.c)0
-rw-r--r--drivers/pinctrl/uniphier/pinctrl-uniphier-pro4.c (renamed from drivers/pinctrl/uniphier/pinctrl-ph1-pro4.c)0
-rw-r--r--drivers/pinctrl/uniphier/pinctrl-uniphier-pro5.c (renamed from drivers/pinctrl/uniphier/pinctrl-ph1-pro5.c)0
-rw-r--r--drivers/pinctrl/uniphier/pinctrl-uniphier-pxs2.c (renamed from drivers/pinctrl/uniphier/pinctrl-proxstream2.c)0
-rw-r--r--drivers/pinctrl/uniphier/pinctrl-uniphier-sld8.c (renamed from drivers/pinctrl/uniphier/pinctrl-ph1-sld8.c)0
-rw-r--r--drivers/platform/Kconfig3
-rw-r--r--drivers/platform/goldfish/Kconfig19
-rw-r--r--drivers/platform/goldfish/Makefile2
-rw-r--r--drivers/platform/goldfish/goldfish_pipe.c182
-rw-r--r--drivers/platform/x86/Kconfig15
-rw-r--r--drivers/platform/x86/Makefile1
-rw-r--r--drivers/platform/x86/alienware-wmi.c286
-rw-r--r--drivers/platform/x86/apple-gmux.c111
-rw-r--r--drivers/platform/x86/asus-nb-wmi.c9
-rw-r--r--drivers/platform/x86/dell-laptop.c370
-rw-r--r--drivers/platform/x86/dell-rbtn.c15
-rw-r--r--drivers/platform/x86/dell-smbios.c193
-rw-r--r--drivers/platform/x86/dell-smbios.h46
-rw-r--r--drivers/platform/x86/dell-wmi.c238
-rw-r--r--drivers/platform/x86/fujitsu-laptop.c8
-rw-r--r--drivers/platform/x86/hp-wmi.c46
-rw-r--r--drivers/platform/x86/hp_accel.c6
-rw-r--r--drivers/platform/x86/ideapad-laptop.c14
-rw-r--r--drivers/platform/x86/intel-hid.c5
-rw-r--r--drivers/platform/x86/intel_pmc_ipc.c56
-rw-r--r--drivers/platform/x86/intel_punit_ipc.c48
-rw-r--r--drivers/platform/x86/intel_scu_ipc.c35
-rw-r--r--drivers/platform/x86/intel_telemetry_pltdrv.c15
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c8
-rw-r--r--drivers/platform/x86/toshiba_acpi.c117
-rw-r--r--drivers/pnp/pnpacpi/rsparser.c4
-rw-r--r--drivers/power/88pm860x_charger.c2
-rw-r--r--drivers/power/Kconfig7
-rw-r--r--drivers/power/Makefile1
-rw-r--r--drivers/power/ab8500_btemp.c15
-rw-r--r--drivers/power/ab8500_charger.c16
-rw-r--r--drivers/power/ab8500_fg.c15
-rw-r--r--drivers/power/abx500_chargalg.c14
-rw-r--r--drivers/power/act8945a_charger.c359
-rw-r--r--drivers/power/avs/rockchip-io-domain.c58
-rw-r--r--drivers/power/bq2415x_charger.c22
-rw-r--r--drivers/power/bq24735-charger.c146
-rw-r--r--drivers/power/bq27xxx_battery.c12
-rw-r--r--drivers/power/bq27xxx_battery_i2c.c24
-rw-r--r--drivers/power/charger-manager.c27
-rw-r--r--drivers/power/collie_battery.c3
-rw-r--r--drivers/power/goldfish_battery.c17
-rw-r--r--drivers/power/ipaq_micro_battery.c4
-rw-r--r--drivers/power/isp1704_charger.c19
-rw-r--r--drivers/power/jz4740-battery.c2
-rw-r--r--drivers/power/lp8788-charger.c2
-rw-r--r--drivers/power/pm2301_charger.c22
-rw-r--r--drivers/power/power_supply_sysfs.c3
-rw-r--r--drivers/power/reset/Kconfig2
-rw-r--r--drivers/power/reset/arm-versatile-reboot.c39
-rw-r--r--drivers/powercap/intel_rapl.c221
-rw-r--r--drivers/ptp/ptp_chardev.c27
-rw-r--r--drivers/pwm/Kconfig2
-rw-r--r--drivers/pwm/pwm-brcmstb.c4
-rw-r--r--drivers/pwm/pwm-fsl-ftm.c2
-rw-r--r--drivers/pwm/pwm-img.c5
-rw-r--r--drivers/pwm/pwm-lpc18xx-sct.c5
-rw-r--r--drivers/pwm/pwm-omap-dmtimer.c74
-rw-r--r--drivers/rapidio/Kconfig8
-rw-r--r--drivers/rapidio/devices/Makefile1
-rw-r--r--drivers/rapidio/devices/rio_mport_cdev.c2733
-rw-r--r--drivers/rapidio/devices/tsi721.c1034
-rw-r--r--drivers/rapidio/devices/tsi721.h87
-rw-r--r--drivers/rapidio/devices/tsi721_dma.c397
-rw-r--r--drivers/rapidio/rio-driver.c12
-rw-r--r--drivers/rapidio/rio-scan.c135
-rw-r--r--drivers/rapidio/rio.c441
-rw-r--r--drivers/rapidio/rio.h5
-rw-r--r--drivers/regulator/Kconfig28
-rw-r--r--drivers/regulator/Makefile9
-rw-r--r--drivers/regulator/act8865-regulator.c8
-rw-r--r--drivers/regulator/act8945a-regulator.c165
-rw-r--r--drivers/regulator/ad5398.c6
-rw-r--r--drivers/regulator/axp20x-regulator.c69
-rw-r--r--drivers/regulator/core.c75
-rw-r--r--drivers/regulator/da9063-regulator.c2
-rw-r--r--drivers/regulator/da9210-regulator.c5
-rw-r--r--drivers/regulator/fan53555.c18
-rw-r--r--drivers/regulator/gpio-regulator.c8
-rw-r--r--drivers/regulator/helpers.c23
-rw-r--r--drivers/regulator/hi655x-regulator.c227
-rw-r--r--drivers/regulator/lp872x.c38
-rw-r--r--drivers/regulator/ltc3589.c15
-rw-r--r--drivers/regulator/max77620-regulator.c813
-rw-r--r--drivers/regulator/max77686-regulator.c (renamed from drivers/regulator/max77686.c)0
-rw-r--r--drivers/regulator/max77802-regulator.c (renamed from drivers/regulator/max77802.c)0
-rw-r--r--drivers/regulator/mt6397-regulator.c15
-rw-r--r--drivers/regulator/of_regulator.c21
-rw-r--r--drivers/regulator/pv88060-regulator.c8
-rw-r--r--drivers/regulator/pv88090-regulator.c8
-rw-r--r--drivers/regulator/pwm-regulator.c29
-rw-r--r--drivers/regulator/s2mps11.c71
-rw-r--r--drivers/regulator/s5m8767.c13
-rw-r--r--drivers/regulator/tps65912-regulator.c613
-rw-r--r--drivers/regulator/vexpress-regulator.c (renamed from drivers/regulator/vexpress.c)0
-rw-r--r--drivers/remoteproc/Kconfig9
-rw-r--r--drivers/remoteproc/Makefile1
-rw-r--r--drivers/remoteproc/remoteproc_core.c4
-rw-r--r--drivers/remoteproc/remoteproc_debugfs.c36
-rw-r--r--drivers/remoteproc/st_remoteproc.c297
-rw-r--r--drivers/remoteproc/wkup_m3_rproc.c1
-rw-r--r--drivers/reset/Makefile1
-rw-r--r--drivers/reset/core.c10
-rw-r--r--drivers/reset/hisilicon/hi6220_reset.c6
-rw-r--r--drivers/reset/reset-ath79.c2
-rw-r--r--drivers/reset/reset-berlin.c5
-rw-r--r--drivers/reset/reset-lpc18xx.c2
-rw-r--r--drivers/reset/reset-pistachio.c154
-rw-r--r--drivers/reset/reset-socfpga.c2
-rw-r--r--drivers/reset/reset-sunxi.c2
-rw-r--r--drivers/reset/reset-zynq.c2
-rw-r--r--drivers/reset/sti/reset-syscfg.c2
-rw-r--r--drivers/rtc/Kconfig252
-rw-r--r--drivers/rtc/Makefile5
-rw-r--r--drivers/rtc/class.c13
-rw-r--r--drivers/rtc/interface.c54
-rw-r--r--drivers/rtc/rtc-abx80x.c252
-rw-r--r--drivers/rtc/rtc-as3722.c2
-rw-r--r--drivers/rtc/rtc-asm9260.c355
-rw-r--r--drivers/rtc/rtc-ds1305.c4
-rw-r--r--drivers/rtc/rtc-ds1307.c411
-rw-r--r--drivers/rtc/rtc-ds1685.c9
-rw-r--r--drivers/rtc/rtc-ds3232.c476
-rw-r--r--drivers/rtc/rtc-ds3234.c171
-rw-r--r--drivers/rtc/rtc-generic.c12
-rw-r--r--drivers/rtc/rtc-hym8563.c2
-rw-r--r--drivers/rtc/rtc-m41t80.c6
-rw-r--r--drivers/rtc/rtc-max77686.c565
-rw-r--r--drivers/rtc/rtc-max77802.c502
-rw-r--r--drivers/rtc/rtc-mcp795.c10
-rw-r--r--drivers/rtc/rtc-mt6397.c1
-rw-r--r--drivers/rtc/rtc-palmas.c3
-rw-r--r--drivers/rtc/rtc-pcf2123.c271
-rw-r--r--drivers/rtc/rtc-pcf2127.c335
-rw-r--r--drivers/rtc/rtc-pcf85063.c162
-rw-r--r--drivers/rtc/rtc-pcf8523.c25
-rw-r--r--drivers/rtc/rtc-pic32.c411
-rw-r--r--drivers/rtc/rtc-rv3029c2.c723
-rw-r--r--drivers/rtc/rtc-rv8803.c67
-rw-r--r--drivers/rtc/rtc-rx6110.c402
-rw-r--r--drivers/rtc/rtc-rx8025.c27
-rw-r--r--drivers/rtc/rtc-s3c.c19
-rw-r--r--drivers/rtc/rtc-s5m.c8
-rw-r--r--drivers/rtc/rtc-sysfs.c35
-rw-r--r--drivers/rtc/rtc-tps6586x.c2
-rw-r--r--drivers/rtc/rtc-tps65910.c2
-rw-r--r--drivers/rtc/rtc-tps80031.c2
-rw-r--r--drivers/rtc/rtc-vr41xx.c13
-rw-r--r--drivers/s390/block/dasd_alias.c216
-rw-r--r--drivers/s390/block/dasd_devmap.c10
-rw-r--r--drivers/s390/block/dasd_diag.c78
-rw-r--r--drivers/s390/block/dasd_eckd.c347
-rw-r--r--drivers/s390/block/dasd_eckd.h3
-rw-r--r--drivers/s390/block/dasd_fba.c28
-rw-r--r--drivers/s390/block/dasd_genhd.c4
-rw-r--r--drivers/s390/block/dasd_int.h11
-rw-r--r--drivers/s390/block/dasd_ioctl.c38
-rw-r--r--drivers/s390/block/dasd_proc.c5
-rw-r--r--drivers/s390/block/dcssblk.c18
-rw-r--r--drivers/s390/block/scm_blk.c2
-rw-r--r--drivers/s390/char/con3215.c3
-rw-r--r--drivers/s390/char/monreader.c10
-rw-r--r--drivers/s390/char/sclp_cmd.c27
-rw-r--r--drivers/s390/char/sclp_cpi_sys.c6
-rw-r--r--drivers/s390/char/sclp_ctl.c12
-rw-r--r--drivers/s390/char/tape_core.c4
-rw-r--r--drivers/s390/char/vmlogrdr.c6
-rw-r--r--drivers/s390/cio/blacklist.c9
-rw-r--r--drivers/s390/cio/ccwreq.c13
-rw-r--r--drivers/s390/cio/cio.c2
-rw-r--r--drivers/s390/cio/device.c23
-rw-r--r--drivers/s390/net/lcs.c4
-rw-r--r--drivers/s390/net/qeth_l3_main.c2
-rw-r--r--drivers/s390/virtio/virtio_ccw.c15
-rw-r--r--drivers/scsi/Kconfig1
-rw-r--r--drivers/scsi/NCR5380.c133
-rw-r--r--drivers/scsi/aacraid/aachba.c27
-rw-r--r--drivers/scsi/aacraid/aacraid.h14
-rw-r--r--drivers/scsi/aacraid/commctrl.c13
-rw-r--r--drivers/scsi/aacraid/comminit.c6
-rw-r--r--drivers/scsi/aacraid/commsup.c69
-rw-r--r--drivers/scsi/aacraid/dpcsup.c2
-rw-r--r--drivers/scsi/aacraid/linit.c202
-rw-r--r--drivers/scsi/aacraid/src.c30
-rw-r--r--drivers/scsi/aha1542.c3
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_osm.c1
-rw-r--r--drivers/scsi/arm/acornscsi.c3
-rw-r--r--drivers/scsi/arm/fas216.c2
-rw-r--r--drivers/scsi/atari_NCR5380.c133
-rw-r--r--drivers/scsi/be2iscsi/be.h20
-rw-r--r--drivers/scsi/be2iscsi/be_cmds.c867
-rw-r--r--drivers/scsi/be2iscsi/be_cmds.h148
-rw-r--r--drivers/scsi/be2iscsi/be_iscsi.c62
-rw-r--r--drivers/scsi/be2iscsi/be_main.c313
-rw-r--r--drivers/scsi/be2iscsi/be_main.h25
-rw-r--r--drivers/scsi/be2iscsi/be_mgmt.c573
-rw-r--r--drivers/scsi/be2iscsi/be_mgmt.h2
-rw-r--r--drivers/scsi/bfa/bfa_core.c19
-rw-r--r--drivers/scsi/bfa/bfa_cs.h41
-rw-r--r--drivers/scsi/bfa/bfa_ioc.c2
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_fcoe.c9
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_io.c5
-rw-r--r--drivers/scsi/cxgbi/cxgb4i/cxgb4i.c2
-rw-r--r--drivers/scsi/cxgbi/cxgb4i/cxgb4i.h17
-rw-r--r--drivers/scsi/cxgbi/libcxgbi.c1
-rw-r--r--drivers/scsi/cxlflash/common.h9
-rw-r--r--drivers/scsi/cxlflash/main.c224
-rw-r--r--drivers/scsi/cxlflash/main.h5
-rw-r--r--drivers/scsi/cxlflash/superpipe.c195
-rw-r--r--drivers/scsi/cxlflash/superpipe.h1
-rw-r--r--drivers/scsi/device_handler/Kconfig8
-rw-r--r--drivers/scsi/device_handler/scsi_dh_alua.c980
-rw-r--r--drivers/scsi/device_handler/scsi_dh_emc.c7
-rw-r--r--drivers/scsi/device_handler/scsi_dh_rdac.c38
-rw-r--r--drivers/scsi/dpt_i2o.c3
-rw-r--r--drivers/scsi/esas2r/esas2r_ioctl.c5
-rw-r--r--drivers/scsi/fcoe/fcoe_ctlr.c3
-rw-r--r--drivers/scsi/fcoe/fcoe_transport.c36
-rw-r--r--drivers/scsi/fdomain.c2
-rw-r--r--drivers/scsi/fnic/fnic_scsi.c13
-rw-r--r--drivers/scsi/gdth.c7
-rw-r--r--drivers/scsi/gdth_proc.c11
-rw-r--r--drivers/scsi/hisi_sas/Makefile2
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas.h43
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_main.c136
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v1_hw.c100
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v2_hw.c2214
-rw-r--r--drivers/scsi/hosts.c29
-rw-r--r--drivers/scsi/hpsa.c52
-rw-r--r--drivers/scsi/hpsa.h3
-rw-r--r--drivers/scsi/hpsa_cmd.h5
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.c3
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.c122
-rw-r--r--drivers/scsi/ibmvscsi/viosrp.h26
-rw-r--r--drivers/scsi/imm.c7
-rw-r--r--drivers/scsi/iscsi_boot_sysfs.c5
-rw-r--r--drivers/scsi/iscsi_tcp.c54
-rw-r--r--drivers/scsi/iscsi_tcp.h4
-rw-r--r--drivers/scsi/libiscsi_tcp.c29
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c4
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c5
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c5
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c17
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c43
-rw-r--r--drivers/scsi/mac53c94.c2
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.h353
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c1079
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fp.c2
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.c1287
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.h136
-rw-r--r--drivers/scsi/mesh.c2
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2.h82
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h127
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2_init.h22
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2_ioc.h117
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2_raid.h5
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2_sas.h10
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2_tool.h5
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2_type.h5
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.c186
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.h47
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_ctl.c37
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c272
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_transport.c25
-rw-r--r--drivers/scsi/mvumi.c4
-rw-r--r--drivers/scsi/osd/osd_initiator.c3
-rw-r--r--drivers/scsi/ppa.c46
-rw-r--r--drivers/scsi/qla1280.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c22
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.c201
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.h34
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c146
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h57
-rw-r--r--drivers/scsi/qla2xxx/qla_dfs.c55
-rw-r--r--drivers/scsi/qla2xxx/qla_fw.h14
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h2
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c119
-rw-r--r--drivers/scsi/qla2xxx/qla_inline.h4
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c27
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c10
-rw-r--r--drivers/scsi/qla2xxx/qla_sup.c70
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c73
-rw-r--r--drivers/scsi/qla2xxx/qla_target.h2
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h2
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.c79
-rw-r--r--drivers/scsi/qla4xxx/ql4_83xx.c6
-rw-r--r--drivers/scsi/qlogicpti.c4
-rw-r--r--drivers/scsi/scsi.c3
-rw-r--r--drivers/scsi/scsi_common.c12
-rw-r--r--drivers/scsi/scsi_devinfo.c2
-rw-r--r--drivers/scsi/scsi_lib.c1
-rw-r--r--drivers/scsi/scsi_pm.c10
-rw-r--r--drivers/scsi/scsi_sas_internal.h2
-rw-r--r--drivers/scsi/scsi_scan.c23
-rw-r--r--drivers/scsi/scsi_sysfs.c112
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c2
-rw-r--r--drivers/scsi/scsi_transport_sas.c2
-rw-r--r--drivers/scsi/sd.c51
-rw-r--r--drivers/scsi/sd.h7
-rw-r--r--drivers/scsi/sg.c3
-rw-r--r--drivers/scsi/sim710.c3
-rw-r--r--drivers/scsi/snic/snic_ctl.c2
-rw-r--r--drivers/scsi/st.c128
-rw-r--r--drivers/scsi/stex.c153
-rw-r--r--drivers/scsi/storvsc_drv.c5
-rw-r--r--drivers/scsi/ufs/Kconfig1
-rw-r--r--drivers/scsi/ufs/ufs-qcom.c155
-rw-r--r--drivers/scsi/ufs/ufs-qcom.h9
-rw-r--r--drivers/scsi/ufs/ufs.h35
-rw-r--r--drivers/scsi/ufs/ufs_quirks.h151
-rw-r--r--drivers/scsi/ufs/ufshcd-pltfrm.c19
-rw-r--r--drivers/scsi/ufs/ufshcd.c1211
-rw-r--r--drivers/scsi/ufs/ufshcd.h46
-rw-r--r--drivers/scsi/ufs/ufshci.h4
-rw-r--r--drivers/scsi/ufs/unipro.h22
-rw-r--r--drivers/sh/superhyway/superhyway.c2
-rw-r--r--drivers/soc/Kconfig1
-rw-r--r--drivers/soc/Makefile1
-rw-r--r--drivers/soc/fsl/qe/gpio.c2
-rw-r--r--drivers/soc/fsl/qe/qe_common.c66
-rw-r--r--drivers/soc/fsl/qe/qe_ic.c11
-rw-r--r--drivers/soc/mediatek/mtk-pmic-wrap.c41
-rw-r--r--drivers/soc/mediatek/mtk-scpsys.c49
-rw-r--r--drivers/soc/rockchip/pm_domains.c34
-rw-r--r--drivers/soc/samsung/Kconfig13
-rw-r--r--drivers/soc/samsung/Makefile2
-rw-r--r--drivers/soc/samsung/exynos-pmu.c141
-rw-r--r--drivers/soc/samsung/exynos-pmu.h44
-rw-r--r--drivers/soc/samsung/exynos3250-pmu.c175
-rw-r--r--drivers/soc/samsung/exynos4-pmu.c222
-rw-r--r--drivers/soc/samsung/exynos5250-pmu.c195
-rw-r--r--drivers/soc/samsung/exynos5420-pmu.c280
-rw-r--r--drivers/soc/sunxi/sunxi_sram.c5
-rw-r--r--drivers/soc/ti/knav_qmss.h4
-rw-r--r--drivers/soc/ti/knav_qmss_acc.c14
-rw-r--r--drivers/soc/ti/knav_qmss_queue.c22
-rw-r--r--drivers/spi/Kconfig17
-rw-r--r--drivers/spi/Makefile2
-rw-r--r--drivers/spi/spi-axi-spi-engine.c1
-rw-r--r--drivers/spi/spi-bcm53xx.c78
-rw-r--r--drivers/spi/spi-cadence.c244
-rw-r--r--drivers/spi/spi-davinci.c76
-rw-r--r--drivers/spi/spi-dln2.c2
-rw-r--r--drivers/spi/spi-dw-pci.c2
-rw-r--r--drivers/spi/spi-fsl-dspi.c11
-rw-r--r--drivers/spi/spi-fsl-espi.c30
-rw-r--r--drivers/spi/spi-imx.c16
-rw-r--r--drivers/spi/spi-omap2-mcspi.c83
-rw-r--r--drivers/spi/spi-pic32-sqi.c727
-rw-r--r--drivers/spi/spi-pic32.c878
-rw-r--r--drivers/spi/spi-pxa2xx.c3
-rw-r--r--drivers/spi/spi-rockchip.c28
-rw-r--r--drivers/spi/spi-ti-qspi.c45
-rw-r--r--drivers/spi/spi.c11
-rw-r--r--drivers/spmi/spmi-pmic-arb.c153
-rw-r--r--drivers/ssb/Kconfig1
-rw-r--r--drivers/staging/Kconfig10
-rw-r--r--drivers/staging/Makefile10
-rw-r--r--drivers/staging/android/Kconfig9
-rw-r--r--drivers/staging/android/ashmem.c47
-rw-r--r--drivers/staging/android/ion/hisilicon/hi6220_ion.c5
-rw-r--r--drivers/staging/android/ion/ion.c143
-rw-r--r--drivers/staging/android/ion/ion.h20
-rw-r--r--drivers/staging/android/ion/ion_carveout_heap.c8
-rw-r--r--drivers/staging/android/ion/ion_page_pool.c4
-rw-r--r--drivers/staging/android/ion/ion_system_heap.c2
-rw-r--r--drivers/staging/android/ion/ion_test.c8
-rw-r--r--drivers/staging/android/ion/tegra/tegra_ion.c7
-rw-r--r--drivers/staging/android/lowmemorykiller.c26
-rw-r--r--drivers/staging/android/sw_sync.c191
-rw-r--r--drivers/staging/android/sw_sync.h8
-rw-r--r--drivers/staging/android/sync.c469
-rw-r--r--drivers/staging/android/sync.h241
-rw-r--r--drivers/staging/android/sync_debug.c221
-rw-r--r--drivers/staging/android/timed_gpio.c5
-rw-r--r--drivers/staging/android/trace/sync.h44
-rw-r--r--drivers/staging/android/uapi/ashmem.h1
-rw-r--r--drivers/staging/android/uapi/sync.h37
-rw-r--r--drivers/staging/board/armadillo800eva.c1
-rw-r--r--drivers/staging/board/board.c1
-rw-r--r--drivers/staging/clocking-wizard/clk-xlnx-clock-wizard.c9
-rw-r--r--drivers/staging/comedi/TODO1
-rw-r--r--drivers/staging/comedi/comedi.h599
-rw-r--r--drivers/staging/comedi/comedi_fops.c86
-rw-r--r--drivers/staging/comedi/comedi_pcmcia.h3
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_3xxx.c2
-rw-r--r--drivers/staging/comedi/drivers/amplc_pci230.c4
-rw-r--r--drivers/staging/comedi/drivers/cb_pcidas64.c68
-rw-r--r--drivers/staging/comedi/drivers/cb_pcimdda.c141
-rw-r--r--drivers/staging/comedi/drivers/comedi_isadma.c3
-rw-r--r--drivers/staging/comedi/drivers/contec_pci_dio.c47
-rw-r--r--drivers/staging/comedi/drivers/dt2801.c18
-rw-r--r--drivers/staging/comedi/drivers/dt282x.c4
-rw-r--r--drivers/staging/comedi/drivers/dt3000.c2
-rw-r--r--drivers/staging/comedi/drivers/gsc_hpdi.c32
-rw-r--r--drivers/staging/comedi/drivers/mite.c72
-rw-r--r--drivers/staging/comedi/drivers/mite.h3
-rw-r--r--drivers/staging/comedi/drivers/ni_mio_c_common.c0
-rw-r--r--drivers/staging/comedi/drivers/ni_mio_common.c637
-rw-r--r--drivers/staging/comedi/drivers/ni_pcidio.c4
-rw-r--r--drivers/staging/comedi/drivers/ni_pcimio.c20
-rw-r--r--drivers/staging/comedi/drivers/ni_tiocmd.c2
-rw-r--r--drivers/staging/comedi/drivers/rtd520.c5
-rw-r--r--drivers/staging/comedi/drivers/s626.c33
-rw-r--r--drivers/staging/dgap/Kconfig6
-rw-r--r--drivers/staging/dgap/Makefile1
-rw-r--r--drivers/staging/dgap/dgap.c7079
-rw-r--r--drivers/staging/dgap/dgap.h1229
-rw-r--r--drivers/staging/dgnc/dgnc_cls.c2
-rw-r--r--drivers/staging/dgnc/dgnc_driver.c57
-rw-r--r--drivers/staging/dgnc/dgnc_driver.h8
-rw-r--r--drivers/staging/dgnc/dgnc_mgmt.c2
-rw-r--r--drivers/staging/dgnc/dgnc_neo.c8
-rw-r--r--drivers/staging/dgnc/dgnc_neo.h1
-rw-r--r--drivers/staging/dgnc/dgnc_pci.h1
-rw-r--r--drivers/staging/dgnc/dgnc_tty.c54
-rw-r--r--drivers/staging/dgnc/digi.h32
-rw-r--r--drivers/staging/emxx_udc/emxx_udc.c55
-rw-r--r--drivers/staging/fbtft/Kconfig12
-rw-r--r--drivers/staging/fbtft/Makefile2
-rw-r--r--drivers/staging/fbtft/fb_agm1264k-fl.c4
-rw-r--r--drivers/staging/fbtft/fb_hx8340bn.c144
-rw-r--r--drivers/staging/fbtft/fb_hx8347d.c9
-rw-r--r--drivers/staging/fbtft/fb_hx8353d.c52
-rw-r--r--drivers/staging/fbtft/fb_hx8357d.c144
-rw-r--r--drivers/staging/fbtft/fb_hx8357d.h60
-rw-r--r--drivers/staging/fbtft/fb_ili9163.c189
-rw-r--r--drivers/staging/fbtft/fb_ili9320.c13
-rw-r--r--drivers/staging/fbtft/fb_ili9325.c81
-rw-r--r--drivers/staging/fbtft/fb_ili9340.c34
-rw-r--r--drivers/staging/fbtft/fb_ili9341.c69
-rw-r--r--drivers/staging/fbtft/fb_ili9481.c33
-rw-r--r--drivers/staging/fbtft/fb_ili9486.c35
-rw-r--r--drivers/staging/fbtft/fb_ra8875.c6
-rw-r--r--drivers/staging/fbtft/fb_s6d02a1.c42
-rw-r--r--drivers/staging/fbtft/fb_ssd1305.c216
-rw-r--r--drivers/staging/fbtft/fb_ssd1325.c205
-rw-r--r--drivers/staging/fbtft/fb_st7735r.c42
-rw-r--r--drivers/staging/fbtft/fb_tinylcd.c28
-rw-r--r--drivers/staging/fbtft/fb_uc1611.c4
-rw-r--r--drivers/staging/fbtft/fb_uc1701.c27
-rw-r--r--drivers/staging/fbtft/fbtft-bus.c8
-rw-r--r--drivers/staging/fbtft/fbtft-core.c36
-rw-r--r--drivers/staging/fbtft/fbtft.h8
-rw-r--r--drivers/staging/fbtft/fbtft_device.c75
-rw-r--r--drivers/staging/fsl-mc/bus/Kconfig3
-rw-r--r--drivers/staging/fsl-mc/bus/Makefile2
-rw-r--r--drivers/staging/fsl-mc/bus/dprc-driver.c333
-rw-r--r--drivers/staging/fsl-mc/bus/irq-gic-v3-its-fsl-mc-msi.c125
-rw-r--r--drivers/staging/fsl-mc/bus/mc-allocator.c201
-rw-r--r--drivers/staging/fsl-mc/bus/mc-bus.c24
-rw-r--r--drivers/staging/fsl-mc/bus/mc-msi.c276
-rw-r--r--drivers/staging/fsl-mc/bus/mc-sys.c9
-rw-r--r--drivers/staging/fsl-mc/include/dprc.h2
-rw-r--r--drivers/staging/fsl-mc/include/mc-private.h41
-rw-r--r--drivers/staging/fsl-mc/include/mc.h26
-rw-r--r--drivers/staging/fwserial/dma_fifo.c10
-rw-r--r--drivers/staging/fwserial/fwserial.c17
-rw-r--r--drivers/staging/fwserial/fwserial.h1
-rw-r--r--drivers/staging/gdm724x/gdm_lte.c8
-rw-r--r--drivers/staging/gdm724x/gdm_mux.c20
-rw-r--r--drivers/staging/gdm724x/gdm_usb.c34
-rw-r--r--drivers/staging/gdm72xx/Kconfig63
-rw-r--r--drivers/staging/gdm72xx/Makefile6
-rw-r--r--drivers/staging/gdm72xx/TODO2
-rw-r--r--drivers/staging/gdm72xx/gdm_qos.c438
-rw-r--r--drivers/staging/gdm72xx/gdm_qos.h74
-rw-r--r--drivers/staging/gdm72xx/gdm_sdio.c700
-rw-r--r--drivers/staging/gdm72xx/gdm_sdio.h63
-rw-r--r--drivers/staging/gdm72xx/gdm_usb.c789
-rw-r--r--drivers/staging/gdm72xx/gdm_usb.h78
-rw-r--r--drivers/staging/gdm72xx/gdm_wimax.c815
-rw-r--r--drivers/staging/gdm72xx/gdm_wimax.h49
-rw-r--r--drivers/staging/gdm72xx/hci.h213
-rw-r--r--drivers/staging/gdm72xx/netlink_k.c156
-rw-r--r--drivers/staging/gdm72xx/netlink_k.h25
-rw-r--r--drivers/staging/gdm72xx/sdio_boot.c158
-rw-r--r--drivers/staging/gdm72xx/sdio_boot.h21
-rw-r--r--drivers/staging/gdm72xx/usb_boot.c363
-rw-r--r--drivers/staging/gdm72xx/usb_boot.h22
-rw-r--r--drivers/staging/gdm72xx/usb_ids.h86
-rw-r--r--drivers/staging/gdm72xx/wm_ioctl.h101
-rw-r--r--drivers/staging/goldfish/goldfish_audio.c18
-rw-r--r--drivers/staging/goldfish/goldfish_nand.c50
-rw-r--r--drivers/staging/gs_fpgaboot/gs_fpgaboot.c15
-rw-r--r--drivers/staging/i4l/Documentation/README.act2000104
-rw-r--r--drivers/staging/i4l/Documentation/README.icn148
-rw-r--r--drivers/staging/i4l/Documentation/README.pcbit40
-rw-r--r--drivers/staging/i4l/Documentation/README.sc281
-rw-r--r--drivers/staging/i4l/Kconfig13
-rw-r--r--drivers/staging/i4l/Makefile5
-rw-r--r--drivers/staging/i4l/TODO3
-rw-r--r--drivers/staging/i4l/act2000/Kconfig (renamed from drivers/isdn/act2000/Kconfig)0
-rw-r--r--drivers/staging/i4l/act2000/Makefile (renamed from drivers/isdn/act2000/Makefile)0
-rw-r--r--drivers/staging/i4l/act2000/act2000.h (renamed from drivers/isdn/act2000/act2000.h)0
-rw-r--r--drivers/staging/i4l/act2000/act2000_isa.c (renamed from drivers/isdn/act2000/act2000_isa.c)0
-rw-r--r--drivers/staging/i4l/act2000/act2000_isa.h (renamed from drivers/isdn/act2000/act2000_isa.h)0
-rw-r--r--drivers/staging/i4l/act2000/capi.c (renamed from drivers/isdn/act2000/capi.c)0
-rw-r--r--drivers/staging/i4l/act2000/capi.h (renamed from drivers/isdn/act2000/capi.h)0
-rw-r--r--drivers/staging/i4l/act2000/module.c (renamed from drivers/isdn/act2000/module.c)0
-rw-r--r--drivers/staging/i4l/icn/Kconfig (renamed from drivers/isdn/icn/Kconfig)0
-rw-r--r--drivers/staging/i4l/icn/Makefile (renamed from drivers/isdn/icn/Makefile)0
-rw-r--r--drivers/staging/i4l/icn/icn.c (renamed from drivers/isdn/icn/icn.c)2
-rw-r--r--drivers/staging/i4l/icn/icn.h (renamed from drivers/isdn/icn/icn.h)0
-rw-r--r--drivers/staging/i4l/pcbit/Kconfig (renamed from drivers/isdn/pcbit/Kconfig)0
-rw-r--r--drivers/staging/i4l/pcbit/Makefile (renamed from drivers/isdn/pcbit/Makefile)0
-rw-r--r--drivers/staging/i4l/pcbit/callbacks.c (renamed from drivers/isdn/pcbit/callbacks.c)0
-rw-r--r--drivers/staging/i4l/pcbit/callbacks.h (renamed from drivers/isdn/pcbit/callbacks.h)0
-rw-r--r--drivers/staging/i4l/pcbit/capi.c (renamed from drivers/isdn/pcbit/capi.c)0
-rw-r--r--drivers/staging/i4l/pcbit/capi.h (renamed from drivers/isdn/pcbit/capi.h)0
-rw-r--r--drivers/staging/i4l/pcbit/drv.c (renamed from drivers/isdn/pcbit/drv.c)0
-rw-r--r--drivers/staging/i4l/pcbit/edss1.c (renamed from drivers/isdn/pcbit/edss1.c)0
-rw-r--r--drivers/staging/i4l/pcbit/edss1.h (renamed from drivers/isdn/pcbit/edss1.h)0
-rw-r--r--drivers/staging/i4l/pcbit/layer2.c (renamed from drivers/isdn/pcbit/layer2.c)0
-rw-r--r--drivers/staging/i4l/pcbit/layer2.h (renamed from drivers/isdn/pcbit/layer2.h)0
-rw-r--r--drivers/staging/i4l/pcbit/module.c (renamed from drivers/isdn/pcbit/module.c)0
-rw-r--r--drivers/staging/i4l/pcbit/pcbit.h (renamed from drivers/isdn/pcbit/pcbit.h)0
-rw-r--r--drivers/staging/iio/Documentation/sysfs-bus-iio-light28
-rw-r--r--drivers/staging/iio/Kconfig1
-rw-r--r--drivers/staging/iio/Makefile1
-rw-r--r--drivers/staging/iio/TODO8
-rw-r--r--drivers/staging/iio/accel/lis3l02dq.h15
-rw-r--r--drivers/staging/iio/accel/lis3l02dq_core.c4
-rw-r--r--drivers/staging/iio/accel/sca3000_core.c3
-rw-r--r--drivers/staging/iio/accel/sca3000_ring.c3
-rw-r--r--drivers/staging/iio/adc/Kconfig18
-rw-r--r--drivers/staging/iio/adc/Makefile1
-rw-r--r--drivers/staging/iio/adc/ad7192.c78
-rw-r--r--drivers/staging/iio/adc/ad7280a.c4
-rw-r--r--drivers/staging/iio/adc/ad7606.h10
-rw-r--r--drivers/staging/iio/adc/ad7606_core.c53
-rw-r--r--drivers/staging/iio/adc/ad7606_par.c32
-rw-r--r--drivers/staging/iio/adc/ad7606_spi.c32
-rw-r--r--drivers/staging/iio/adc/ad7816.c4
-rw-r--r--drivers/staging/iio/adc/spear_adc.c33
-rw-r--r--drivers/staging/iio/addac/adt7316-i2c.c2
-rw-r--r--drivers/staging/iio/addac/adt7316.c15
-rw-r--r--drivers/staging/iio/cdc/ad7150.c34
-rw-r--r--drivers/staging/iio/cdc/ad7746.c4
-rw-r--r--drivers/staging/iio/impedance-analyzer/ad5933.c8
-rw-r--r--drivers/staging/iio/light/isl29018.c75
-rw-r--r--drivers/staging/iio/light/isl29028.c31
-rw-r--r--drivers/staging/iio/light/tsl2583.c92
-rw-r--r--drivers/staging/iio/light/tsl2x7x_core.c8
-rw-r--r--drivers/staging/iio/magnetometer/Kconfig40
-rw-r--r--drivers/staging/iio/magnetometer/Makefile7
-rw-r--r--drivers/staging/iio/meter/ade7754.c5
-rw-r--r--drivers/staging/iio/meter/ade7758_core.c5
-rw-r--r--drivers/staging/iio/meter/ade7854-i2c.c6
-rw-r--r--drivers/staging/iio/meter/ade7854-spi.c7
-rw-r--r--drivers/staging/iio/meter/ade7854.c25
-rw-r--r--drivers/staging/iio/resolver/ad2s1200.c12
-rw-r--r--drivers/staging/iio/resolver/ad2s1210.c39
-rw-r--r--drivers/staging/iio/trigger/Kconfig10
-rw-r--r--drivers/staging/iio/trigger/Makefile1
-rw-r--r--drivers/staging/iio/trigger/iio-trig-periodic-rtc.c216
-rw-r--r--drivers/staging/lustre/Kconfig4
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs.h12
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_cpu.h5
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_debug.h2
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_ioctl.h61
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_private.h7
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_string.h2
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/linux/libcfs.h3
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/linux/linux-cpu.h5
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/linux/linux-mem.h4
-rw-r--r--drivers/staging/lustre/include/linux/lnet/api.h23
-rw-r--r--drivers/staging/lustre/include/linux/lnet/lib-dlc.h122
-rw-r--r--drivers/staging/lustre/include/linux/lnet/lib-lnet.h129
-rw-r--r--drivers/staging/lustre/include/linux/lnet/lib-types.h103
-rw-r--r--drivers/staging/lustre/include/linux/lnet/lnetctl.h104
-rw-r--r--drivers/staging/lustre/include/linux/lnet/lnetst.h104
-rw-r--r--drivers/staging/lustre/include/linux/lnet/nidstr.h9
-rw-r--r--drivers/staging/lustre/include/linux/lnet/socklnd.h9
-rw-r--r--drivers/staging/lustre/include/linux/lnet/types.h51
-rw-r--r--drivers/staging/lustre/lnet/Kconfig14
-rw-r--r--drivers/staging/lustre/lnet/Makefile2
-rw-r--r--drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c705
-rw-r--r--drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h159
-rw-r--r--drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c1048
-rw-r--r--drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_modparams.c8
-rw-r--r--drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c631
-rw-r--r--drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h31
-rw-r--r--drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c789
-rw-r--r--drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib.c197
-rw-r--r--drivers/staging/lustre/lnet/klnds/socklnd/socklnd_modparams.c11
-rw-r--r--drivers/staging/lustre/lnet/klnds/socklnd/socklnd_proto.c175
-rw-r--r--drivers/staging/lustre/lnet/libcfs/Makefile (renamed from drivers/staging/lustre/lustre/libcfs/Makefile)7
-rw-r--r--drivers/staging/lustre/lnet/libcfs/debug.c (renamed from drivers/staging/lustre/lustre/libcfs/debug.c)35
-rw-r--r--drivers/staging/lustre/lnet/libcfs/fail.c (renamed from drivers/staging/lustre/lustre/libcfs/fail.c)3
-rw-r--r--drivers/staging/lustre/lnet/libcfs/hash.c (renamed from drivers/staging/lustre/lustre/libcfs/hash.c)153
-rw-r--r--drivers/staging/lustre/lnet/libcfs/libcfs_cpu.c (renamed from drivers/staging/lustre/lustre/libcfs/libcfs_cpu.c)19
-rw-r--r--drivers/staging/lustre/lnet/libcfs/libcfs_lock.c (renamed from drivers/staging/lustre/lustre/libcfs/libcfs_lock.c)12
-rw-r--r--drivers/staging/lustre/lnet/libcfs/libcfs_mem.c (renamed from drivers/staging/lustre/lustre/libcfs/libcfs_mem.c)14
-rw-r--r--drivers/staging/lustre/lnet/libcfs/libcfs_string.c (renamed from drivers/staging/lustre/lustre/libcfs/libcfs_string.c)77
-rw-r--r--drivers/staging/lustre/lnet/libcfs/linux/linux-cpu.c (renamed from drivers/staging/lustre/lustre/libcfs/linux/linux-cpu.c)78
-rw-r--r--drivers/staging/lustre/lnet/libcfs/linux/linux-crypto-adler.c (renamed from drivers/staging/lustre/lustre/libcfs/linux/linux-crypto-adler.c)0
-rw-r--r--drivers/staging/lustre/lnet/libcfs/linux/linux-crypto.c (renamed from drivers/staging/lustre/lustre/libcfs/linux/linux-crypto.c)117
-rw-r--r--drivers/staging/lustre/lnet/libcfs/linux/linux-crypto.h (renamed from drivers/staging/lustre/lustre/libcfs/linux/linux-crypto.h)0
-rw-r--r--drivers/staging/lustre/lnet/libcfs/linux/linux-curproc.c (renamed from drivers/staging/lustre/lustre/libcfs/linux/linux-curproc.c)7
-rw-r--r--drivers/staging/lustre/lnet/libcfs/linux/linux-debug.c (renamed from drivers/staging/lustre/lustre/libcfs/linux/linux-debug.c)23
-rw-r--r--drivers/staging/lustre/lnet/libcfs/linux/linux-mem.c (renamed from drivers/staging/lustre/lustre/libcfs/linux/linux-mem.c)2
-rw-r--r--drivers/staging/lustre/lnet/libcfs/linux/linux-module.c (renamed from drivers/staging/lustre/lustre/libcfs/linux/linux-module.c)85
-rw-r--r--drivers/staging/lustre/lnet/libcfs/linux/linux-prim.c (renamed from drivers/staging/lustre/lustre/libcfs/linux/linux-prim.c)0
-rw-r--r--drivers/staging/lustre/lnet/libcfs/linux/linux-tracefile.c (renamed from drivers/staging/lustre/lustre/libcfs/linux/linux-tracefile.c)31
-rw-r--r--drivers/staging/lustre/lnet/libcfs/module.c (renamed from drivers/staging/lustre/lustre/libcfs/module.c)303
-rw-r--r--drivers/staging/lustre/lnet/libcfs/prng.c (renamed from drivers/staging/lustre/lustre/libcfs/prng.c)13
-rw-r--r--drivers/staging/lustre/lnet/libcfs/tracefile.c (renamed from drivers/staging/lustre/lustre/libcfs/tracefile.c)181
-rw-r--r--drivers/staging/lustre/lnet/libcfs/tracefile.h (renamed from drivers/staging/lustre/lustre/libcfs/tracefile.h)93
-rw-r--r--drivers/staging/lustre/lnet/libcfs/workitem.c (renamed from drivers/staging/lustre/lustre/libcfs/workitem.c)50
-rw-r--r--drivers/staging/lustre/lnet/lnet/Makefile2
-rw-r--r--drivers/staging/lustre/lnet/lnet/acceptor.c113
-rw-r--r--drivers/staging/lustre/lnet/lnet/api-ni.c1534
-rw-r--r--drivers/staging/lustre/lnet/lnet/config.c315
-rw-r--r--drivers/staging/lustre/lnet/lnet/lib-eq.c82
-rw-r--r--drivers/staging/lustre/lnet/lnet/lib-md.c105
-rw-r--r--drivers/staging/lustre/lnet/lnet/lib-me.c21
-rw-r--r--drivers/staging/lustre/lnet/lnet/lib-move.c725
-rw-r--r--drivers/staging/lustre/lnet/lnet/lib-msg.c114
-rw-r--r--drivers/staging/lustre/lnet/lnet/lib-ptl.c258
-rw-r--r--drivers/staging/lustre/lnet/lnet/lib-socket.c149
-rw-r--r--drivers/staging/lustre/lnet/lnet/lo.c14
-rw-r--r--drivers/staging/lustre/lnet/lnet/module.c121
-rw-r--r--drivers/staging/lustre/lnet/lnet/net_fault.c1025
-rw-r--r--drivers/staging/lustre/lnet/lnet/nidstrings.c119
-rw-r--r--drivers/staging/lustre/lnet/lnet/peer.c235
-rw-r--r--drivers/staging/lustre/lnet/lnet/router.c677
-rw-r--r--drivers/staging/lustre/lnet/lnet/router_proc.c189
-rw-r--r--drivers/staging/lustre/lnet/selftest/brw_test.c208
-rw-r--r--drivers/staging/lustre/lnet/selftest/conctl.c363
-rw-r--r--drivers/staging/lustre/lnet/selftest/conrpc.c350
-rw-r--r--drivers/staging/lustre/lnet/selftest/conrpc.h63
-rw-r--r--drivers/staging/lustre/lnet/selftest/console.c537
-rw-r--r--drivers/staging/lustre/lnet/selftest/console.h181
-rw-r--r--drivers/staging/lustre/lnet/selftest/framework.c482
-rw-r--r--drivers/staging/lustre/lnet/selftest/module.c32
-rw-r--r--drivers/staging/lustre/lnet/selftest/ping_test.c55
-rw-r--r--drivers/staging/lustre/lnet/selftest/rpc.c453
-rw-r--r--drivers/staging/lustre/lnet/selftest/rpc.h184
-rw-r--r--drivers/staging/lustre/lnet/selftest/selftest.h332
-rw-r--r--drivers/staging/lustre/lnet/selftest/timer.c25
-rw-r--r--drivers/staging/lustre/lnet/selftest/timer.h14
-rw-r--r--drivers/staging/lustre/lustre/Kconfig4
-rw-r--r--drivers/staging/lustre/lustre/Makefile2
-rw-r--r--drivers/staging/lustre/lustre/fid/fid_request.c32
-rw-r--r--drivers/staging/lustre/lustre/fid/lproc_fid.c16
-rw-r--r--drivers/staging/lustre/lustre/fld/fld_cache.c33
-rw-r--r--drivers/staging/lustre/lustre/fld/fld_internal.h35
-rw-r--r--drivers/staging/lustre/lustre/fld/fld_request.c69
-rw-r--r--drivers/staging/lustre/lustre/fld/lproc_fld.c14
-rw-r--r--drivers/staging/lustre/lustre/include/cl_object.h491
-rw-r--r--drivers/staging/lustre/lustre/include/lclient.h7
-rw-r--r--drivers/staging/lustre/lustre/include/linux/lustre_patchless_compat.h2
-rw-r--r--drivers/staging/lustre/lustre/include/linux/obd.h18
-rw-r--r--drivers/staging/lustre/lustre/include/lprocfs_status.h57
-rw-r--r--drivers/staging/lustre/lustre/include/lu_object.h24
-rw-r--r--drivers/staging/lustre/lustre/include/lu_ref.h4
-rw-r--r--drivers/staging/lustre/lustre/include/lustre/ll_fiemap.h63
-rw-r--r--drivers/staging/lustre/lustre/include/lustre/lustre_build_version.h2
-rw-r--r--drivers/staging/lustre/lustre/include/lustre/lustre_idl.h797
-rw-r--r--drivers/staging/lustre/lustre/include/lustre/lustre_user.h101
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_cfg.h20
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_disk.h252
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_dlm.h133
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_dlm_flags.h55
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_export.h91
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_fid.h20
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_fld.h33
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_handles.h3
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_import.h3
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_kernelcomm.h55
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_lib.h27
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_lite.h50
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_log.h15
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_mdc.h20
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_net.h86
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_req_layout.h13
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_sec.h128
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_ver.h18
-rw-r--r--drivers/staging/lustre/lustre/include/obd.h132
-rw-r--r--drivers/staging/lustre/lustre/include/obd_cksum.h21
-rw-r--r--drivers/staging/lustre/lustre/include/obd_class.h182
-rw-r--r--drivers/staging/lustre/lustre/include/obd_support.h21
-rw-r--r--drivers/staging/lustre/lustre/include/uapi_kernelcomm.h (renamed from drivers/staging/lustre/include/linux/libcfs/libcfs_kernelcomm.h)72
-rw-r--r--drivers/staging/lustre/lustre/lclient/glimpse.c3
-rw-r--r--drivers/staging/lustre/lustre/lclient/lcommon_cl.c81
-rw-r--r--drivers/staging/lustre/lustre/lclient/lcommon_misc.c6
-rw-r--r--drivers/staging/lustre/lustre/ldlm/interval_tree.c8
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_extent.c25
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_flock.c64
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_internal.h15
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_lib.c107
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_lock.c184
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c91
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_pool.c19
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_request.c235
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_resource.c73
-rw-r--r--drivers/staging/lustre/lustre/llite/dcache.c41
-rw-r--r--drivers/staging/lustre/lustre/llite/dir.c175
-rw-r--r--drivers/staging/lustre/lustre/llite/file.c457
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_close.c39
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_internal.h175
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_lib.c228
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_mmap.c48
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_nfs.c54
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_rmtacl.c18
-rw-r--r--drivers/staging/lustre/lustre/llite/lloop.c42
-rw-r--r--drivers/staging/lustre/lustre/llite/lproc_llite.c51
-rw-r--r--drivers/staging/lustre/lustre/llite/namei.c106
-rw-r--r--drivers/staging/lustre/lustre/llite/remote_perm.c4
-rw-r--r--drivers/staging/lustre/lustre/llite/rw.c136
-rw-r--r--drivers/staging/lustre/lustre/llite/rw26.c107
-rw-r--r--drivers/staging/lustre/lustre/llite/statahead.c112
-rw-r--r--drivers/staging/lustre/lustre/llite/super25.c33
-rw-r--r--drivers/staging/lustre/lustre/llite/symlink.c8
-rw-r--r--drivers/staging/lustre/lustre/llite/vvp_dev.c20
-rw-r--r--drivers/staging/lustre/lustre/llite/vvp_internal.h13
-rw-r--r--drivers/staging/lustre/lustre/llite/vvp_io.c80
-rw-r--r--drivers/staging/lustre/lustre/llite/vvp_object.c8
-rw-r--r--drivers/staging/lustre/lustre/llite/vvp_page.c51
-rw-r--r--drivers/staging/lustre/lustre/llite/xattr.c53
-rw-r--r--drivers/staging/lustre/lustre/llite/xattr_cache.c45
-rw-r--r--drivers/staging/lustre/lustre/lmv/lmv_fld.c3
-rw-r--r--drivers/staging/lustre/lustre/lmv/lmv_intent.c22
-rw-r--r--drivers/staging/lustre/lustre/lmv/lmv_internal.h10
-rw-r--r--drivers/staging/lustre/lustre/lmv/lmv_obd.c303
-rw-r--r--drivers/staging/lustre/lustre/lmv/lproc_lmv.c4
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_cl_internal.h93
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_dev.c48
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_ea.c11
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_internal.h15
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_io.c32
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_lock.c135
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_merge.c3
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_obd.c203
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_object.c117
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_offset.c20
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_pack.c36
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_page.c9
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_pool.c49
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_request.c64
-rw-r--r--drivers/staging/lustre/lustre/lov/lovsub_dev.c7
-rw-r--r--drivers/staging/lustre/lustre/lov/lovsub_lock.c14
-rw-r--r--drivers/staging/lustre/lustre/lov/lovsub_object.c6
-rw-r--r--drivers/staging/lustre/lustre/lov/lovsub_page.c2
-rw-r--r--drivers/staging/lustre/lustre/lov/lproc_lov.c34
-rw-r--r--drivers/staging/lustre/lustre/mdc/mdc_internal.h16
-rw-r--r--drivers/staging/lustre/lustre/mdc/mdc_lib.c15
-rw-r--r--drivers/staging/lustre/lustre/mdc/mdc_locks.c176
-rw-r--r--drivers/staging/lustre/lustre/mdc/mdc_reint.c40
-rw-r--r--drivers/staging/lustre/lustre/mdc/mdc_request.c204
-rw-r--r--drivers/staging/lustre/lustre/mgc/mgc_request.c183
-rw-r--r--drivers/staging/lustre/lustre/obdclass/Makefile10
-rw-r--r--drivers/staging/lustre/lustre/obdclass/acl.c10
-rw-r--r--drivers/staging/lustre/lustre/obdclass/cl_io.c161
-rw-r--r--drivers/staging/lustre/lustre/obdclass/cl_lock.c98
-rw-r--r--drivers/staging/lustre/lustre/obdclass/cl_object.c56
-rw-r--r--drivers/staging/lustre/lustre/obdclass/cl_page.c113
-rw-r--r--drivers/staging/lustre/lustre/obdclass/class_obd.c57
-rw-r--r--drivers/staging/lustre/lustre/obdclass/genops.c109
-rw-r--r--drivers/staging/lustre/lustre/obdclass/kernelcomm.c (renamed from drivers/staging/lustre/lustre/libcfs/kernel_user_comm.c)80
-rw-r--r--drivers/staging/lustre/lustre/obdclass/linux/linux-module.c30
-rw-r--r--drivers/staging/lustre/lustre/obdclass/linux/linux-obdo.c5
-rw-r--r--drivers/staging/lustre/lustre/obdclass/linux/linux-sysctl.c14
-rw-r--r--drivers/staging/lustre/lustre/obdclass/llog.c41
-rw-r--r--drivers/staging/lustre/lustre/obdclass/llog_cat.c6
-rw-r--r--drivers/staging/lustre/lustre/obdclass/llog_obd.c10
-rw-r--r--drivers/staging/lustre/lustre/obdclass/llog_swab.c6
-rw-r--r--drivers/staging/lustre/lustre/obdclass/lprocfs_counters.c10
-rw-r--r--drivers/staging/lustre/lustre/obdclass/lprocfs_status.c243
-rw-r--r--drivers/staging/lustre/lustre/obdclass/lu_object.c110
-rw-r--r--drivers/staging/lustre/lustre/obdclass/lustre_handles.c15
-rw-r--r--drivers/staging/lustre/lustre/obdclass/lustre_peer.c12
-rw-r--r--drivers/staging/lustre/lustre/obdclass/obd_config.c66
-rw-r--r--drivers/staging/lustre/lustre/obdclass/obd_mount.c114
-rw-r--r--drivers/staging/lustre/lustre/obdclass/obdo.c6
-rw-r--r--drivers/staging/lustre/lustre/obdecho/echo_client.c499
-rw-r--r--drivers/staging/lustre/lustre/obdecho/echo_internal.h5
-rw-r--r--drivers/staging/lustre/lustre/osc/lproc_osc.c69
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_cache.c371
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_cl_internal.h41
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_dev.c10
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_internal.h8
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_io.c32
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_lock.c140
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_object.c6
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_page.c213
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_quota.c39
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_request.c365
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/client.c132
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/connection.c5
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/events.c68
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/import.c176
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/layout.c145
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/llog_client.c28
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/llog_net.c2
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c60
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/niobuf.c76
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/nrs.c133
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/nrs_fifo.c12
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/pack_generic.c93
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/pinger.c31
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/ptlrpc_internal.h2
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/ptlrpc_module.c2
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/ptlrpcd.c35
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/recover.c34
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/sec.c91
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c17
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/sec_config.c33
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/sec_gc.c10
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/sec_lproc.c6
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/sec_null.c7
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/sec_plain.c22
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/service.c309
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/wiretest.c272
-rw-r--r--drivers/staging/media/Kconfig8
-rw-r--r--drivers/staging/media/Makefile5
-rw-r--r--drivers/staging/media/bcm2048/radio-bcm2048.c7
-rw-r--r--drivers/staging/media/davinci_vpfe/davinci_vpfe_user.h4
-rw-r--r--drivers/staging/media/davinci_vpfe/dm365_ipipe.c11
-rw-r--r--drivers/staging/media/davinci_vpfe/dm365_ipipe_hw.c14
-rw-r--r--drivers/staging/media/davinci_vpfe/dm365_ipipe_hw.h7
-rw-r--r--drivers/staging/media/davinci_vpfe/dm365_ipipeif.c7
-rw-r--r--drivers/staging/media/davinci_vpfe/dm365_isif.c24
-rw-r--r--drivers/staging/media/davinci_vpfe/dm365_resizer.c20
-rw-r--r--drivers/staging/media/davinci_vpfe/vpfe_mc_capture.c6
-rw-r--r--drivers/staging/media/davinci_vpfe/vpfe_video.c46
-rw-r--r--drivers/staging/media/lirc/lirc_parallel.c24
-rw-r--r--drivers/staging/media/lirc/lirc_zilog.c4
-rw-r--r--drivers/staging/media/mn88472/mn88472.c12
-rw-r--r--drivers/staging/media/mn88473/Kconfig7
-rw-r--r--drivers/staging/media/mn88473/Makefile5
-rw-r--r--drivers/staging/media/mn88473/TODO21
-rw-r--r--drivers/staging/media/mx2/Kconfig15
-rw-r--r--drivers/staging/media/mx2/Makefile3
-rw-r--r--drivers/staging/media/mx2/TODO10
-rw-r--r--drivers/staging/media/mx2/mx2_camera.c (renamed from drivers/media/platform/soc_camera/mx2_camera.c)0
-rw-r--r--drivers/staging/media/mx3/Kconfig15
-rw-r--r--drivers/staging/media/mx3/Makefile3
-rw-r--r--drivers/staging/media/mx3/TODO10
-rw-r--r--drivers/staging/media/mx3/mx3_camera.c (renamed from drivers/media/platform/soc_camera/mx3_camera.c)12
-rw-r--r--drivers/staging/media/omap1/Kconfig13
-rw-r--r--drivers/staging/media/omap1/Makefile3
-rw-r--r--drivers/staging/media/omap1/TODO8
-rw-r--r--drivers/staging/media/omap1/omap1_camera.c (renamed from drivers/media/platform/soc_camera/omap1_camera.c)0
-rw-r--r--drivers/staging/media/omap4iss/iss.c213
-rw-r--r--drivers/staging/media/omap4iss/iss.h6
-rw-r--r--drivers/staging/media/omap4iss/iss_video.c15
-rw-r--r--drivers/staging/media/omap4iss/iss_video.h1
-rw-r--r--drivers/staging/media/timb/Kconfig11
-rw-r--r--drivers/staging/media/timb/Makefile1
-rw-r--r--drivers/staging/media/timb/timblogiw.c (renamed from drivers/media/platform/timblogiw.c)0
-rw-r--r--drivers/staging/most/aim-cdev/cdev.c374
-rw-r--r--drivers/staging/most/aim-network/networking.c11
-rw-r--r--drivers/staging/most/hdm-dim2/dim2_hal.c4
-rw-r--r--drivers/staging/most/hdm-dim2/dim2_hal.h7
-rw-r--r--drivers/staging/most/hdm-dim2/dim2_hdm.c80
-rw-r--r--drivers/staging/most/hdm-dim2/dim2_hdm.h2
-rw-r--r--drivers/staging/most/hdm-dim2/dim2_sysfs.c4
-rw-r--r--drivers/staging/most/hdm-usb/hdm_usb.c21
-rw-r--r--drivers/staging/most/mostcore/core.c194
-rw-r--r--drivers/staging/most/mostcore/mostcore.h3
-rw-r--r--drivers/staging/mt29f_spinand/mt29f_spinand.c87
-rw-r--r--drivers/staging/mt29f_spinand/mt29f_spinand.h1
-rw-r--r--drivers/staging/netlogic/platform_net.c14
-rw-r--r--drivers/staging/netlogic/xlr_net.c201
-rw-r--r--drivers/staging/netlogic/xlr_net.h978
-rw-r--r--drivers/staging/nvec/TODO2
-rw-r--r--drivers/staging/nvec/nvec.c99
-rw-r--r--drivers/staging/nvec/nvec.h5
-rw-r--r--drivers/staging/nvec/nvec_paz00.c13
-rw-r--r--drivers/staging/nvec/nvec_power.c14
-rw-r--r--drivers/staging/nvec/nvec_ps2.c2
-rw-r--r--drivers/staging/octeon-usb/TODO7
-rw-r--r--drivers/staging/octeon-usb/octeon-hcd.c1094
-rw-r--r--drivers/staging/octeon-usb/octeon-hcd.h536
-rw-r--r--drivers/staging/octeon/ethernet-mdio.c28
-rw-r--r--drivers/staging/octeon/ethernet-mem.c4
-rw-r--r--drivers/staging/octeon/ethernet-rgmii.c291
-rw-r--r--drivers/staging/octeon/ethernet-rx.c60
-rw-r--r--drivers/staging/octeon/ethernet-spi.c2
-rw-r--r--drivers/staging/octeon/ethernet-tx.c62
-rw-r--r--drivers/staging/octeon/ethernet.c68
-rw-r--r--drivers/staging/octeon/octeon-ethernet.h5
-rw-r--r--drivers/staging/panel/Kconfig278
-rw-r--r--drivers/staging/panel/Makefile1
-rw-r--r--drivers/staging/panel/TODO8
-rw-r--r--drivers/staging/panel/lcd-panel-cgram.txt24
-rw-r--r--drivers/staging/rdma/hfi1/Kconfig14
-rw-r--r--drivers/staging/rdma/hfi1/Makefile10
-rw-r--r--drivers/staging/rdma/hfi1/TODO2
-rw-r--r--drivers/staging/rdma/hfi1/affinity.c430
-rw-r--r--drivers/staging/rdma/hfi1/affinity.h91
-rw-r--r--drivers/staging/rdma/hfi1/aspm.h309
-rw-r--r--drivers/staging/rdma/hfi1/chip.c2462
-rw-r--r--drivers/staging/rdma/hfi1/chip.h151
-rw-r--r--drivers/staging/rdma/hfi1/chip_registers.h20
-rw-r--r--drivers/staging/rdma/hfi1/common.h12
-rw-r--r--drivers/staging/rdma/hfi1/debugfs.c332
-rw-r--r--drivers/staging/rdma/hfi1/debugfs.h5
-rw-r--r--drivers/staging/rdma/hfi1/device.c5
-rw-r--r--drivers/staging/rdma/hfi1/device.h5
-rw-r--r--drivers/staging/rdma/hfi1/diag.c105
-rw-r--r--drivers/staging/rdma/hfi1/dma.c17
-rw-r--r--drivers/staging/rdma/hfi1/driver.c348
-rw-r--r--drivers/staging/rdma/hfi1/efivar.c11
-rw-r--r--drivers/staging/rdma/hfi1/efivar.h5
-rw-r--r--drivers/staging/rdma/hfi1/eprom.c117
-rw-r--r--drivers/staging/rdma/hfi1/eprom.h7
-rw-r--r--drivers/staging/rdma/hfi1/file_ops.c614
-rw-r--r--drivers/staging/rdma/hfi1/firmware.c587
-rw-r--r--drivers/staging/rdma/hfi1/hfi.h259
-rw-r--r--drivers/staging/rdma/hfi1/init.c192
-rw-r--r--drivers/staging/rdma/hfi1/intr.c29
-rw-r--r--drivers/staging/rdma/hfi1/iowait.h126
-rw-r--r--drivers/staging/rdma/hfi1/keys.c356
-rw-r--r--drivers/staging/rdma/hfi1/mad.c1048
-rw-r--r--drivers/staging/rdma/hfi1/mad.h14
-rw-r--r--drivers/staging/rdma/hfi1/mmu_rb.c302
-rw-r--r--drivers/staging/rdma/hfi1/mmu_rb.h74
-rw-r--r--drivers/staging/rdma/hfi1/mr.c473
-rw-r--r--drivers/staging/rdma/hfi1/opa_compat.h20
-rw-r--r--drivers/staging/rdma/hfi1/pcie.c278
-rw-r--r--drivers/staging/rdma/hfi1/pio.c365
-rw-r--r--drivers/staging/rdma/hfi1/pio.h118
-rw-r--r--drivers/staging/rdma/hfi1/pio_copy.c73
-rw-r--r--drivers/staging/rdma/hfi1/platform.c893
-rw-r--r--drivers/staging/rdma/hfi1/platform.h (renamed from drivers/staging/rdma/hfi1/platform_config.h)58
-rw-r--r--drivers/staging/rdma/hfi1/qp.c1644
-rw-r--r--drivers/staging/rdma/hfi1/qp.h198
-rw-r--r--drivers/staging/rdma/hfi1/qsfp.c270
-rw-r--r--drivers/staging/rdma/hfi1/qsfp.h57
-rw-r--r--drivers/staging/rdma/hfi1/rc.c765
-rw-r--r--drivers/staging/rdma/hfi1/ruc.c373
-rw-r--r--drivers/staging/rdma/hfi1/sdma.c365
-rw-r--r--drivers/staging/rdma/hfi1/sdma.h116
-rw-r--r--drivers/staging/rdma/hfi1/sdma_txreq.h135
-rw-r--r--drivers/staging/rdma/hfi1/sysfs.c136
-rw-r--r--drivers/staging/rdma/hfi1/trace.c53
-rw-r--r--drivers/staging/rdma/hfi1/trace.h1477
-rw-r--r--drivers/staging/rdma/hfi1/twsi.c205
-rw-r--r--drivers/staging/rdma/hfi1/twsi.h9
-rw-r--r--drivers/staging/rdma/hfi1/uc.c164
-rw-r--r--drivers/staging/rdma/hfi1/ud.c250
-rw-r--r--drivers/staging/rdma/hfi1/user_exp_rcv.c1047
-rw-r--r--drivers/staging/rdma/hfi1/user_exp_rcv.h13
-rw-r--r--drivers/staging/rdma/hfi1/user_pages.c74
-rw-r--r--drivers/staging/rdma/hfi1/user_sdma.c645
-rw-r--r--drivers/staging/rdma/hfi1/user_sdma.h11
-rw-r--r--drivers/staging/rdma/hfi1/verbs.c1667
-rw-r--r--drivers/staging/rdma/hfi1/verbs.h824
-rw-r--r--drivers/staging/rdma/hfi1/verbs_txreq.c149
-rw-r--r--drivers/staging/rdma/hfi1/verbs_txreq.h116
-rw-r--r--drivers/staging/rtl8188eu/Makefile2
-rw-r--r--drivers/staging/rtl8188eu/TODO2
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_ap.c137
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_cmd.c15
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_debug.c14
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_efuse.c16
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_ieee80211.c92
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_iol.c13
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_mlme.c25
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_mlme_ext.c22
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_recv.c77
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_rf.c17
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_security.c28
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_sta_mgt.c30
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_wlan_util.c5
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_xmit.c91
-rw-r--r--drivers/staging/rtl8188eu/hal/bb_cfg.c26
-rw-r--r--drivers/staging/rtl8188eu/hal/fw.c50
-rw-r--r--drivers/staging/rtl8188eu/hal/odm.c4
-rw-r--r--drivers/staging/rtl8188eu/hal/phy.c26
-rw-r--r--drivers/staging/rtl8188eu/hal/pwrseqcmd.c15
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188e_dm.c4
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188eu_xmit.c4
-rw-r--r--drivers/staging/rtl8188eu/hal/usb_halinit.c3
-rw-r--r--drivers/staging/rtl8188eu/include/Hal8188EPhyCfg.h4
-rw-r--r--drivers/staging/rtl8188eu/include/basic_types.h29
-rw-r--r--drivers/staging/rtl8188eu/include/drv_types.h8
-rw-r--r--drivers/staging/rtl8188eu/include/ieee80211_ext.h290
-rw-r--r--drivers/staging/rtl8188eu/include/odm.h9
-rw-r--r--drivers/staging/rtl8188eu/include/odm_HWConfig.h2
-rw-r--r--drivers/staging/rtl8188eu/include/pwrseq.h214
-rw-r--r--drivers/staging/rtl8188eu/include/pwrseqcmd.h28
-rw-r--r--drivers/staging/rtl8188eu/include/rtl8188e_hal.h2
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_mlme_ext.h2
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_rf.h1
-rw-r--r--drivers/staging/rtl8188eu/os_dep/ioctl_linux.c9
-rw-r--r--drivers/staging/rtl8188eu/os_dep/os_intfs.c8
-rw-r--r--drivers/staging/rtl8188eu/os_dep/osdep_service.c8
-rw-r--r--drivers/staging/rtl8188eu/os_dep/usb_intf.c14
-rw-r--r--drivers/staging/rtl8192e/dot11d.h3
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c8
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_core.c56
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_core.h2
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_dm.c9
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_pm.c2
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_ps.c14
-rw-r--r--drivers/staging/rtl8192e/rtl819x_BAProc.c6
-rw-r--r--drivers/staging/rtl8192e/rtllib.h3
-rw-r--r--drivers/staging/rtl8192e/rtllib_crypt_ccmp.c4
-rw-r--r--drivers/staging/rtl8192e/rtllib_crypt_tkip.c102
-rw-r--r--drivers/staging/rtl8192e/rtllib_crypt_wep.c48
-rw-r--r--drivers/staging/rtl8192e/rtllib_module.c5
-rw-r--r--drivers/staging/rtl8192e/rtllib_rx.c6
-rw-r--r--drivers/staging/rtl8192e/rtllib_softmac.c63
-rw-r--r--drivers/staging/rtl8192e/rtllib_softmac_wx.c4
-rw-r--r--drivers/staging/rtl8192e/rtllib_wx.c2
-rw-r--r--drivers/staging/rtl8192u/ieee80211/dot11d.c6
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211.h6
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_crypt.c3
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_ccmp.c4
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c92
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_wep.c49
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_module.c1
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c26
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c79
-rw-r--r--drivers/staging/rtl8192u/ieee80211/rtl819x_BAProc.c8
-rw-r--r--drivers/staging/rtl8192u/ieee80211/rtl819x_TSProc.c14
-rw-r--r--drivers/staging/rtl8192u/r8190_rtl8256.c24
-rw-r--r--drivers/staging/rtl8192u/r8192U_core.c45
-rw-r--r--drivers/staging/rtl8192u/r8192U_dm.c65
-rw-r--r--drivers/staging/rtl8192u/r8192U_wx.c38
-rw-r--r--drivers/staging/rtl8192u/r819xU_phy.c3
-rw-r--r--drivers/staging/rtl8712/TODO12
-rw-r--r--drivers/staging/rtl8712/drv_types.h1
-rw-r--r--drivers/staging/rtl8712/ieee80211.c2
-rw-r--r--drivers/staging/rtl8712/ieee80211.h112
-rw-r--r--drivers/staging/rtl8712/os_intfs.c3
-rw-r--r--drivers/staging/rtl8712/recv_linux.c7
-rw-r--r--drivers/staging/rtl8712/rtl8712_cmd.c4
-rw-r--r--drivers/staging/rtl8712/rtl8712_efuse.c8
-rw-r--r--drivers/staging/rtl8712/rtl8712_io.c77
-rw-r--r--drivers/staging/rtl8712/rtl871x_cmd.c60
-rw-r--r--drivers/staging/rtl8712/rtl871x_cmd.h2
-rw-r--r--drivers/staging/rtl8712/rtl871x_io.c2
-rw-r--r--drivers/staging/rtl8712/rtl871x_ioctl_linux.c10
-rw-r--r--drivers/staging/rtl8712/rtl871x_ioctl_rtl.c45
-rw-r--r--drivers/staging/rtl8712/rtl871x_mlme.c24
-rw-r--r--drivers/staging/rtl8712/rtl871x_mp.c12
-rw-r--r--drivers/staging/rtl8712/rtl871x_mp_ioctl.h118
-rw-r--r--drivers/staging/rtl8712/rtl871x_recv.c16
-rw-r--r--drivers/staging/rtl8712/rtl871x_sta_mgt.c27
-rw-r--r--drivers/staging/rtl8712/rtl871x_xmit.c45
-rw-r--r--drivers/staging/rtl8712/usb_intf.c22
-rw-r--r--drivers/staging/rtl8712/usb_ops.c26
-rw-r--r--drivers/staging/rtl8712/usb_ops_linux.c9
-rw-r--r--drivers/staging/rtl8712/xmit_linux.c17
-rw-r--r--drivers/staging/rtl8723au/TODO5
-rw-r--r--drivers/staging/rtl8723au/core/rtw_ap.c193
-rw-r--r--drivers/staging/rtl8723au/core/rtw_cmd.c3
-rw-r--r--drivers/staging/rtl8723au/core/rtw_efuse.c4
-rw-r--r--drivers/staging/rtl8723au/core/rtw_mlme.c49
-rw-r--r--drivers/staging/rtl8723au/core/rtw_mlme_ext.c36
-rw-r--r--drivers/staging/rtl8723au/core/rtw_recv.c169
-rw-r--r--drivers/staging/rtl8723au/core/rtw_security.c8
-rw-r--r--drivers/staging/rtl8723au/core/rtw_sta_mgt.c44
-rw-r--r--drivers/staging/rtl8723au/core/rtw_xmit.c116
-rw-r--r--drivers/staging/rtl8723au/hal/HalHWImg8723A_BB.c6
-rw-r--r--drivers/staging/rtl8723au/hal/HalHWImg8723A_MAC.c2
-rw-r--r--drivers/staging/rtl8723au/hal/HalHWImg8723A_RF.c2
-rw-r--r--drivers/staging/rtl8723au/hal/hal_com.c2
-rw-r--r--drivers/staging/rtl8723au/hal/odm.c8
-rw-r--r--drivers/staging/rtl8723au/hal/odm_HWConfig.c4
-rw-r--r--drivers/staging/rtl8723au/hal/rtl8723a_bt-coexist.c46
-rw-r--r--drivers/staging/rtl8723au/hal/rtl8723a_cmd.c6
-rw-r--r--drivers/staging/rtl8723au/hal/rtl8723a_hal_init.c16
-rw-r--r--drivers/staging/rtl8723au/hal/usb_halinit.c9
-rw-r--r--drivers/staging/rtl8723au/hal/usb_ops_linux.c6
-rw-r--r--drivers/staging/rtl8723au/include/odm_HWConfig.h2
-rw-r--r--drivers/staging/rtl8723au/include/osdep_service.h6
-rw-r--r--drivers/staging/rtl8723au/include/rtl8723a_cmd.h2
-rw-r--r--drivers/staging/rtl8723au/include/rtw_ap.h3
-rw-r--r--drivers/staging/rtl8723au/os_dep/ioctl_cfg80211.c43
-rw-r--r--drivers/staging/rtl8723au/os_dep/usb_ops_linux.c9
-rw-r--r--drivers/staging/rtl8723au/os_dep/xmit_linux.c2
-rw-r--r--drivers/staging/rts5208/ms.c140
-rw-r--r--drivers/staging/rts5208/rtsx.c20
-rw-r--r--drivers/staging/rts5208/rtsx_scsi.c31
-rw-r--r--drivers/staging/rts5208/rtsx_transport.c163
-rw-r--r--drivers/staging/rts5208/sd.c28
-rw-r--r--drivers/staging/rts5208/spi.c4
-rw-r--r--drivers/staging/rts5208/xd.c40
-rw-r--r--drivers/staging/skein/threefish_block.c2145
-rw-r--r--drivers/staging/slicoss/slic.h2
-rw-r--r--drivers/staging/slicoss/slicoss.c80
-rw-r--r--drivers/staging/sm750fb/ddk750_chip.c132
-rw-r--r--drivers/staging/sm750fb/ddk750_display.c200
-rw-r--r--drivers/staging/sm750fb/ddk750_dvi.c38
-rw-r--r--drivers/staging/sm750fb/ddk750_dvi.h3
-rw-r--r--drivers/staging/sm750fb/ddk750_help.h1
-rw-r--r--drivers/staging/sm750fb/ddk750_hwi2c.c38
-rw-r--r--drivers/staging/sm750fb/ddk750_mode.c161
-rw-r--r--drivers/staging/sm750fb/ddk750_power.c54
-rw-r--r--drivers/staging/sm750fb/ddk750_power.h11
-rw-r--r--drivers/staging/sm750fb/ddk750_reg.h3002
-rw-r--r--drivers/staging/sm750fb/ddk750_sii164.c6
-rw-r--r--drivers/staging/sm750fb/ddk750_sii164.h5
-rw-r--r--drivers/staging/sm750fb/sm750.c204
-rw-r--r--drivers/staging/sm750fb/sm750.h4
-rw-r--r--drivers/staging/sm750fb/sm750_accel.c148
-rw-r--r--drivers/staging/sm750fb/sm750_accel.h278
-rw-r--r--drivers/staging/sm750fb/sm750_cursor.c62
-rw-r--r--drivers/staging/sm750fb/sm750_help.h56
-rw-r--r--drivers/staging/sm750fb/sm750_hw.c211
-rw-r--r--drivers/staging/speakup/buffers.c4
-rw-r--r--drivers/staging/speakup/devsynth.c4
-rw-r--r--drivers/staging/speakup/fakekey.c6
-rw-r--r--drivers/staging/speakup/i18n.c5
-rw-r--r--drivers/staging/speakup/keyhelp.c22
-rw-r--r--drivers/staging/speakup/kobjects.c54
-rw-r--r--drivers/staging/speakup/main.c35
-rw-r--r--drivers/staging/speakup/serialio.c10
-rw-r--r--drivers/staging/speakup/speakup_acntpc.c5
-rw-r--r--drivers/staging/speakup/speakup_acntsa.c4
-rw-r--r--drivers/staging/speakup/speakup_apollo.c4
-rw-r--r--drivers/staging/speakup/speakup_audptr.c4
-rw-r--r--drivers/staging/speakup/speakup_bns.c4
-rw-r--r--drivers/staging/speakup/speakup_decext.c28
-rw-r--r--drivers/staging/speakup/speakup_decpc.c4
-rw-r--r--drivers/staging/speakup/speakup_dectlk.c4
-rw-r--r--drivers/staging/speakup/speakup_dtlk.c4
-rw-r--r--drivers/staging/speakup/speakup_dummy.c4
-rw-r--r--drivers/staging/speakup/speakup_keypc.c4
-rw-r--r--drivers/staging/speakup/speakup_ltlk.c4
-rw-r--r--drivers/staging/speakup/speakup_soft.c3
-rw-r--r--drivers/staging/speakup/speakup_spkout.c4
-rw-r--r--drivers/staging/speakup/speakup_txprt.c4
-rw-r--r--drivers/staging/speakup/spk_priv.h4
-rw-r--r--drivers/staging/speakup/spk_priv_keyinfo.h4
-rw-r--r--drivers/staging/speakup/spkguide.txt1
-rw-r--r--drivers/staging/speakup/synth.c2
-rw-r--r--drivers/staging/speakup/varhandlers.c50
-rw-r--r--drivers/staging/staging.c19
-rw-r--r--drivers/staging/ste_rmi4/Kconfig9
-rw-r--r--drivers/staging/ste_rmi4/Makefile4
-rw-r--r--drivers/staging/ste_rmi4/TODO7
-rw-r--r--drivers/staging/ste_rmi4/synaptics_i2c_rmi4.c1140
-rw-r--r--drivers/staging/ste_rmi4/synaptics_i2c_rmi4.h46
-rw-r--r--drivers/staging/unisys/MAINTAINERS2
-rw-r--r--drivers/staging/unisys/include/guestlinuxdebug.h13
-rw-r--r--drivers/staging/unisys/include/iochannel.h14
-rw-r--r--drivers/staging/unisys/visorbus/controlvmchannel.h237
-rw-r--r--drivers/staging/unisys/visorbus/vbusdeviceinfo.h7
-rw-r--r--drivers/staging/unisys/visorbus/visorbus_main.c97
-rw-r--r--drivers/staging/unisys/visorbus/visorchannel.c22
-rw-r--r--drivers/staging/unisys/visorbus/visorchipset.c165
-rw-r--r--drivers/staging/unisys/visorhba/visorhba_main.c8
-rw-r--r--drivers/staging/unisys/visorinput/Kconfig2
-rw-r--r--drivers/staging/unisys/visorinput/ultrainputreport.h45
-rw-r--r--drivers/staging/unisys/visorinput/visorinput.c10
-rw-r--r--drivers/staging/unisys/visornic/visornic_main.c68
-rw-r--r--drivers/staging/vme/devices/vme_pio2_core.c24
-rw-r--r--drivers/staging/vt6655/card.c22
-rw-r--r--drivers/staging/vt6655/channel.c67
-rw-r--r--drivers/staging/vt6655/channel.h2
-rw-r--r--drivers/staging/vt6655/device_main.c31
-rw-r--r--drivers/staging/vt6655/key.c8
-rw-r--r--drivers/staging/vt6655/mac.c503
-rw-r--r--drivers/staging/vt6655/mac.h84
-rw-r--r--drivers/staging/vt6655/power.c66
-rw-r--r--drivers/staging/vt6655/power.h8
-rw-r--r--drivers/staging/vt6655/rf.c178
-rw-r--r--drivers/staging/vt6655/rxtx.c12
-rw-r--r--drivers/staging/vt6656/device.h4
-rw-r--r--drivers/staging/vt6656/main_usb.c30
-rw-r--r--drivers/staging/vt6656/power.c2
-rw-r--r--drivers/staging/vt6656/rf.c4
-rw-r--r--drivers/staging/vt6656/rxtx.c12
-rw-r--r--drivers/staging/vt6656/usbpipe.c21
-rw-r--r--drivers/staging/wilc1000/Makefile6
-rw-r--r--drivers/staging/wilc1000/coreconfigurator.c426
-rw-r--r--drivers/staging/wilc1000/coreconfigurator.h97
-rw-r--r--drivers/staging/wilc1000/host_interface.c2081
-rw-r--r--drivers/staging/wilc1000/host_interface.h135
-rw-r--r--drivers/staging/wilc1000/linux_mon.c134
-rw-r--r--drivers/staging/wilc1000/linux_wlan.c514
-rw-r--r--drivers/staging/wilc1000/linux_wlan_common.h166
-rw-r--r--drivers/staging/wilc1000/wilc_debugfs.c53
-rw-r--r--drivers/staging/wilc1000/wilc_msgqueue.c143
-rw-r--r--drivers/staging/wilc1000/wilc_msgqueue.h110
-rw-r--r--drivers/staging/wilc1000/wilc_sdio.c152
-rw-r--r--drivers/staging/wilc1000/wilc_spi.c51
-rw-r--r--drivers/staging/wilc1000/wilc_wfi_cfgoperations.c1156
-rw-r--r--drivers/staging/wilc1000/wilc_wfi_cfgoperations.h1
-rw-r--r--drivers/staging/wilc1000/wilc_wfi_netdevice.h17
-rw-r--r--drivers/staging/wilc1000/wilc_wlan.c583
-rw-r--r--drivers/staging/wilc1000/wilc_wlan.h25
-rw-r--r--drivers/staging/wilc1000/wilc_wlan_cfg.c91
-rw-r--r--drivers/staging/wilc1000/wilc_wlan_cfg.h18
-rw-r--r--drivers/staging/wilc1000/wilc_wlan_if.h31
-rw-r--r--drivers/staging/wlan-ng/cfg80211.c41
-rw-r--r--drivers/staging/wlan-ng/hfa384x.h5
-rw-r--r--drivers/staging/wlan-ng/hfa384x_usb.c171
-rw-r--r--drivers/staging/wlan-ng/p80211conv.c132
-rw-r--r--drivers/staging/wlan-ng/p80211netdev.c8
-rw-r--r--drivers/staging/wlan-ng/p80211wep.c10
-rw-r--r--drivers/staging/wlan-ng/prism2fw.c6
-rw-r--r--drivers/staging/wlan-ng/prism2mgmt.c2
-rw-r--r--drivers/staging/wlan-ng/prism2mgmt.h1
-rw-r--r--drivers/staging/wlan-ng/prism2mib.c36
-rw-r--r--drivers/staging/wlan-ng/prism2sta.c31
-rw-r--r--drivers/staging/wlan-ng/prism2usb.c20
-rw-r--r--drivers/staging/xgifb/XGI_main_26.c26
-rw-r--r--drivers/staging/xgifb/vb_def.h1
-rw-r--r--drivers/staging/xgifb/vb_init.c3
-rw-r--r--drivers/staging/xgifb/vb_setmode.c43
-rw-r--r--drivers/staging/xgifb/vb_struct.h2
-rw-r--r--drivers/staging/xgifb/vb_table.h3
-rw-r--r--drivers/staging/xgifb/vgatypes.h7
-rw-r--r--drivers/target/iscsi/iscsi_target.c86
-rw-r--r--drivers/target/iscsi/iscsi_target_auth.c98
-rw-r--r--drivers/target/iscsi/iscsi_target_configfs.c93
-rw-r--r--drivers/target/iscsi/iscsi_target_login.c45
-rw-r--r--drivers/target/loopback/tcm_loop.c46
-rw-r--r--drivers/target/sbp/sbp_target.c95
-rw-r--r--drivers/target/target_core_configfs.c203
-rw-r--r--drivers/target/target_core_device.c41
-rw-r--r--drivers/target/target_core_fabric_configfs.c250
-rw-r--r--drivers/target/target_core_iblock.c34
-rw-r--r--drivers/target/target_core_internal.h9
-rw-r--r--drivers/target/target_core_spc.c3
-rw-r--r--drivers/target/target_core_stat.c41
-rw-r--r--drivers/target/target_core_tmr.c1
-rw-r--r--drivers/target/target_core_tpg.c21
-rw-r--r--drivers/target/target_core_transport.c66
-rw-r--r--drivers/target/target_core_user.c267
-rw-r--r--drivers/target/tcm_fc/tfc_cmd.c20
-rw-r--r--drivers/target/tcm_fc/tfc_sess.c44
-rw-r--r--drivers/thermal/Kconfig23
-rw-r--r--drivers/thermal/Makefile1
-rw-r--r--drivers/thermal/hisi_thermal.c4
-rw-r--r--drivers/thermal/intel_pch_thermal.c6
-rw-r--r--drivers/thermal/mtk_thermal.c624
-rw-r--r--drivers/thermal/of-thermal.c85
-rw-r--r--drivers/thermal/power_allocator.c2
-rw-r--r--drivers/thermal/rcar_thermal.c3
-rw-r--r--drivers/thermal/rockchip_thermal.c239
-rw-r--r--drivers/thermal/samsung/Kconfig1
-rw-r--r--drivers/thermal/samsung/exynos_tmu.c19
-rw-r--r--drivers/thermal/tegra_soctherm.c2
-rw-r--r--drivers/thermal/thermal_core.c23
-rw-r--r--drivers/thermal/ti-soc-thermal/ti-bandgap.c10
-rw-r--r--drivers/tty/Kconfig2
-rw-r--r--drivers/tty/amiserial.c28
-rw-r--r--drivers/tty/cyclades.c22
-rw-r--r--drivers/tty/ehv_bytechan.c40
-rw-r--r--drivers/tty/goldfish.c42
-rw-r--r--drivers/tty/hvc/hvc_vio.c29
-rw-r--r--drivers/tty/hvc/hvc_xen.c81
-rw-r--r--drivers/tty/isicom.c3
-rw-r--r--drivers/tty/mxser.c21
-rw-r--r--drivers/tty/n_gsm.c22
-rw-r--r--drivers/tty/n_hdlc.c19
-rw-r--r--drivers/tty/n_tty.c117
-rw-r--r--drivers/tty/nozomi.c2
-rw-r--r--drivers/tty/pty.c109
-rw-r--r--drivers/tty/rocket.c18
-rw-r--r--drivers/tty/rocket_int.h1
-rw-r--r--drivers/tty/serial/68328serial.c1322
-rw-r--r--drivers/tty/serial/8250/8250.h14
-rw-r--r--drivers/tty/serial/8250/8250_accent.c13
-rw-r--r--drivers/tty/serial/8250/8250_acorn.c2
-rw-r--r--drivers/tty/serial/8250/8250_bcm2835aux.c146
-rw-r--r--drivers/tty/serial/8250/8250_boca.c41
-rw-r--r--drivers/tty/serial/8250/8250_core.c30
-rw-r--r--drivers/tty/serial/8250/8250_dw.c137
-rw-r--r--drivers/tty/serial/8250/8250_early.c38
-rw-r--r--drivers/tty/serial/8250/8250_exar_st16c554.c17
-rw-r--r--drivers/tty/serial/8250/8250_fourport.c28
-rw-r--r--drivers/tty/serial/8250/8250_gsc.c7
-rw-r--r--drivers/tty/serial/8250/8250_hp300.c27
-rw-r--r--drivers/tty/serial/8250/8250_hub6.c2
-rw-r--r--drivers/tty/serial/8250/8250_ingenic.c14
-rw-r--r--drivers/tty/serial/8250/8250_moxa.c157
-rw-r--r--drivers/tty/serial/8250/8250_mtk.c51
-rw-r--r--drivers/tty/serial/8250/8250_of.c1
-rw-r--r--drivers/tty/serial/8250/8250_omap.c40
-rw-r--r--drivers/tty/serial/8250/8250_pci.c147
-rw-r--r--drivers/tty/serial/8250/8250_pnp.c48
-rw-r--r--drivers/tty/serial/8250/8250_port.c471
-rw-r--r--drivers/tty/serial/8250/8250_uniphier.c2
-rw-r--r--drivers/tty/serial/8250/Kconfig50
-rw-r--r--drivers/tty/serial/8250/Makefile2
-rw-r--r--drivers/tty/serial/8250/serial_cs.c90
-rw-r--r--drivers/tty/serial/Kconfig42
-rw-r--r--drivers/tty/serial/Makefile2
-rw-r--r--drivers/tty/serial/amba-pl011.c24
-rw-r--r--drivers/tty/serial/arc_uart.c1
-rw-r--r--drivers/tty/serial/atmel_serial.c180
-rw-r--r--drivers/tty/serial/clps711x.c10
-rw-r--r--drivers/tty/serial/crisv10.c33
-rw-r--r--drivers/tty/serial/digicolor-usart.c9
-rw-r--r--drivers/tty/serial/earlycon.c118
-rw-r--r--drivers/tty/serial/ifx6x60.c3
-rw-r--r--drivers/tty/serial/imx.c5
-rw-r--r--drivers/tty/serial/jsm/jsm_tty.c13
-rw-r--r--drivers/tty/serial/m32r_sio.c134
-rw-r--r--drivers/tty/serial/m32r_sio.h49
-rw-r--r--drivers/tty/serial/meson_uart.c8
-rw-r--r--drivers/tty/serial/mpc52xx_uart.c8
-rw-r--r--drivers/tty/serial/mpsc.c178
-rw-r--r--drivers/tty/serial/msm_serial.c2
-rw-r--r--drivers/tty/serial/mvebu-uart.c650
-rw-r--r--drivers/tty/serial/omap-serial.c2
-rw-r--r--drivers/tty/serial/samsung.c24
-rw-r--r--drivers/tty/serial/sc16is7xx.c24
-rw-r--r--drivers/tty/serial/serial_core.c125
-rw-r--r--drivers/tty/serial/serial_ks8695.c2
-rw-r--r--drivers/tty/serial/serial_mctrl_gpio.c3
-rw-r--r--drivers/tty/serial/sh-sci.c178
-rw-r--r--drivers/tty/serial/sh-sci.h15
-rw-r--r--drivers/tty/serial/sprd_serial.c2
-rw-r--r--drivers/tty/serial/uartlite.c53
-rw-r--r--drivers/tty/serial/xilinx_uartps.c543
-rw-r--r--drivers/tty/serial/zs.c4
-rw-r--r--drivers/tty/synclink.c23
-rw-r--r--drivers/tty/synclink_gt.c19
-rw-r--r--drivers/tty/synclinkmp.c63
-rw-r--r--drivers/tty/tty_audit.c237
-rw-r--r--drivers/tty/tty_buffer.c39
-rw-r--r--drivers/tty/tty_io.c270
-rw-r--r--drivers/tty/tty_ioctl.c12
-rw-r--r--drivers/tty/tty_ldisc.c198
-rw-r--r--drivers/tty/tty_mutex.c8
-rw-r--r--drivers/tty/tty_port.c11
-rw-r--r--drivers/tty/vt/keyboard.c14
-rw-r--r--drivers/tty/vt/selection.c2
-rw-r--r--drivers/tty/vt/vt.c2
-rw-r--r--drivers/usb/Makefile2
-rw-r--r--drivers/usb/atm/cxacru.c2
-rw-r--r--drivers/usb/chipidea/ci_hdrc_imx.c13
-rw-r--r--drivers/usb/chipidea/core.c3
-rw-r--r--drivers/usb/chipidea/debug.c5
-rw-r--r--drivers/usb/chipidea/otg_fsm.c29
-rw-r--r--drivers/usb/chipidea/otg_fsm.h2
-rw-r--r--drivers/usb/chipidea/udc.c4
-rw-r--r--drivers/usb/class/cdc-acm.c74
-rw-r--r--drivers/usb/class/cdc-acm.h1
-rw-r--r--drivers/usb/class/usbtmc.c331
-rw-r--r--drivers/usb/common/common.c23
-rw-r--r--drivers/usb/common/usb-otg-fsm.c87
-rw-r--r--drivers/usb/core/Makefile2
-rw-r--r--drivers/usb/core/buffer.c18
-rw-r--r--drivers/usb/core/config.c37
-rw-r--r--drivers/usb/core/devices.c26
-rw-r--r--drivers/usb/core/devio.c301
-rw-r--r--drivers/usb/core/driver.c6
-rw-r--r--drivers/usb/core/file.c9
-rw-r--r--drivers/usb/core/hcd-pci.c12
-rw-r--r--drivers/usb/core/hcd.c114
-rw-r--r--drivers/usb/core/hub.c122
-rw-r--r--drivers/usb/core/hub.h7
-rw-r--r--drivers/usb/core/of.c47
-rw-r--r--drivers/usb/core/port.c6
-rw-r--r--drivers/usb/core/sysfs.c68
-rw-r--r--drivers/usb/core/urb.c3
-rw-r--r--drivers/usb/core/usb.c24
-rw-r--r--drivers/usb/core/usb.h2
-rw-r--r--drivers/usb/dwc2/Kconfig1
-rw-r--r--drivers/usb/dwc2/core.c1884
-rw-r--r--drivers/usb/dwc2/core.h151
-rw-r--r--drivers/usb/dwc2/gadget.c125
-rw-r--r--drivers/usb/dwc2/hcd.c2255
-rw-r--r--drivers/usb/dwc2/hcd.h134
-rw-r--r--drivers/usb/dwc2/hcd_ddma.c49
-rw-r--r--drivers/usb/dwc2/hcd_intr.c174
-rw-r--r--drivers/usb/dwc2/hcd_queue.c1941
-rw-r--r--drivers/usb/dwc2/platform.c38
-rw-r--r--drivers/usb/dwc3/core.c102
-rw-r--r--drivers/usb/dwc3/core.h11
-rw-r--r--drivers/usb/dwc3/debugfs.c13
-rw-r--r--drivers/usb/dwc3/dwc3-keystone.c5
-rw-r--r--drivers/usb/dwc3/dwc3-of-simple.c9
-rw-r--r--drivers/usb/dwc3/dwc3-omap.c12
-rw-r--r--drivers/usb/dwc3/dwc3-pci.c2
-rw-r--r--drivers/usb/dwc3/ep0.c9
-rw-r--r--drivers/usb/dwc3/gadget.c47
-rw-r--r--drivers/usb/gadget/composite.c154
-rw-r--r--drivers/usb/gadget/config.c9
-rw-r--r--drivers/usb/gadget/configfs.c37
-rw-r--r--drivers/usb/gadget/function/f_acm.c6
-rw-r--r--drivers/usb/gadget/function/f_ecm.c2
-rw-r--r--drivers/usb/gadget/function/f_eem.c2
-rw-r--r--drivers/usb/gadget/function/f_fs.c164
-rw-r--r--drivers/usb/gadget/function/f_hid.c2
-rw-r--r--drivers/usb/gadget/function/f_loopback.c2
-rw-r--r--drivers/usb/gadget/function/f_mass_storage.c8
-rw-r--r--drivers/usb/gadget/function/f_midi.c211
-rw-r--r--drivers/usb/gadget/function/f_ncm.c2
-rw-r--r--drivers/usb/gadget/function/f_obex.c3
-rw-r--r--drivers/usb/gadget/function/f_phonet.c2
-rw-r--r--drivers/usb/gadget/function/f_printer.c2
-rw-r--r--drivers/usb/gadget/function/f_rndis.c7
-rw-r--r--drivers/usb/gadget/function/f_serial.c2
-rw-r--r--drivers/usb/gadget/function/f_sourcesink.c2
-rw-r--r--drivers/usb/gadget/function/f_subset.c2
-rw-r--r--drivers/usb/gadget/function/f_tcm.c195
-rw-r--r--drivers/usb/gadget/function/f_uac1.c3
-rw-r--r--drivers/usb/gadget/function/f_uac2.c3
-rw-r--r--drivers/usb/gadget/function/rndis.c20
-rw-r--r--drivers/usb/gadget/function/tcm.h2
-rw-r--r--drivers/usb/gadget/function/uvc_configfs.c198
-rw-r--r--drivers/usb/gadget/legacy/Kconfig3
-rw-r--r--drivers/usb/gadget/legacy/inode.c32
-rw-r--r--drivers/usb/gadget/udc/Kconfig7
-rw-r--r--drivers/usb/gadget/udc/amd5536udc.c2
-rw-r--r--drivers/usb/gadget/udc/atmel_usba_udc.c36
-rw-r--r--drivers/usb/gadget/udc/atmel_usba_udc.h2
-rw-r--r--drivers/usb/gadget/udc/bdc/bdc_udc.c5
-rw-r--r--drivers/usb/gadget/udc/goku_udc.c2
-rw-r--r--drivers/usb/gadget/udc/lpc32xx_udc.c103
-rw-r--r--drivers/usb/gadget/udc/net2280.c8
-rw-r--r--drivers/usb/gadget/udc/pch_udc.c8
-rw-r--r--drivers/usb/gadget/udc/pxa25x_udc.c530
-rw-r--r--drivers/usb/gadget/udc/pxa25x_udc.h11
-rw-r--r--drivers/usb/gadget/udc/udc-core.c36
-rw-r--r--drivers/usb/host/Kconfig18
-rw-r--r--drivers/usb/host/Makefile3
-rw-r--r--drivers/usb/host/bcma-hcd.c83
-rw-r--r--drivers/usb/host/ehci-atmel.c6
-rw-r--r--drivers/usb/host/ehci-dbg.c477
-rw-r--r--drivers/usb/host/ehci-fsl.c24
-rw-r--r--drivers/usb/host/ehci-hcd.c22
-rw-r--r--drivers/usb/host/ehci-hub.c6
-rw-r--r--drivers/usb/host/ehci-msm.c66
-rw-r--r--drivers/usb/host/ehci-pci.c8
-rw-r--r--drivers/usb/host/ehci-platform.c6
-rw-r--r--drivers/usb/host/ehci-q.c104
-rw-r--r--drivers/usb/host/ehci-sched.c524
-rw-r--r--drivers/usb/host/ehci-st.c6
-rw-r--r--drivers/usb/host/ehci-timer.c5
-rw-r--r--drivers/usb/host/ehci.h99
-rw-r--r--drivers/usb/host/fotg210-hcd.c15
-rw-r--r--drivers/usb/host/fsl-mph-dr-of.c7
-rw-r--r--drivers/usb/host/max3421-hcd.c16
-rw-r--r--drivers/usb/host/ohci-at91.c10
-rw-r--r--drivers/usb/host/ohci-nxp.c87
-rw-r--r--drivers/usb/host/ohci-platform.c6
-rw-r--r--drivers/usb/host/ohci-pxa27x.c2
-rw-r--r--drivers/usb/host/ohci-st.c6
-rw-r--r--drivers/usb/host/ohci.h2
-rw-r--r--drivers/usb/host/oxu210hp-hcd.c15
-rw-r--r--drivers/usb/host/pci-quirks.c3
-rw-r--r--drivers/usb/host/r8a66597-hcd.c11
-rw-r--r--drivers/usb/host/u132-hcd.c18
-rw-r--r--drivers/usb/host/xhci-hub.c27
-rw-r--r--drivers/usb/host/xhci-mem.c186
-rw-r--r--drivers/usb/host/xhci-mtk.c10
-rw-r--r--drivers/usb/host/xhci-pci.c5
-rw-r--r--drivers/usb/host/xhci-plat.c19
-rw-r--r--drivers/usb/host/xhci-plat.h2
-rw-r--r--drivers/usb/host/xhci-ring.c137
-rw-r--r--drivers/usb/host/xhci.c32
-rw-r--r--drivers/usb/host/xhci.h17
-rw-r--r--drivers/usb/misc/chaoskey.c122
-rw-r--r--drivers/usb/misc/idmouse.c2
-rw-r--r--drivers/usb/misc/iowarrior.c6
-rw-r--r--drivers/usb/misc/sisusbvga/sisusb.c1543
-rw-r--r--drivers/usb/mon/mon_main.c9
-rw-r--r--drivers/usb/musb/Kconfig2
-rw-r--r--drivers/usb/musb/jz4740.c4
-rw-r--r--drivers/usb/musb/musb_core.c2
-rw-r--r--drivers/usb/musb/musb_core.h2
-rw-r--r--drivers/usb/musb/musb_gadget.c6
-rw-r--r--drivers/usb/musb/musb_host.c2
-rw-r--r--drivers/usb/musb/musbhsdma.c8
-rw-r--r--drivers/usb/musb/sunxi.c1
-rw-r--r--drivers/usb/musb/tusb6010_omap.c4
-rw-r--r--drivers/usb/musb/ux500_dma.c3
-rw-r--r--drivers/usb/phy/phy-am335x.c1
-rw-r--r--drivers/usb/phy/phy-generic.c11
-rw-r--r--drivers/usb/phy/phy-isp1301-omap.c2
-rw-r--r--drivers/usb/phy/phy-qcom-8x16-usb.c72
-rw-r--r--drivers/usb/renesas_usbhs/Kconfig2
-rw-r--r--drivers/usb/renesas_usbhs/Makefile2
-rw-r--r--drivers/usb/renesas_usbhs/common.c14
-rw-r--r--drivers/usb/renesas_usbhs/fifo.c24
-rw-r--r--drivers/usb/renesas_usbhs/fifo.h20
-rw-r--r--drivers/usb/renesas_usbhs/mod_gadget.c8
-rw-r--r--drivers/usb/renesas_usbhs/pipe.c6
-rw-r--r--drivers/usb/renesas_usbhs/pipe.h2
-rw-r--r--drivers/usb/renesas_usbhs/rcar3.c54
-rw-r--r--drivers/usb/renesas_usbhs/rcar3.h3
-rw-r--r--drivers/usb/serial/ch341.c2
-rw-r--r--drivers/usb/serial/console.c6
-rw-r--r--drivers/usb/serial/cp210x.c311
-rw-r--r--drivers/usb/serial/cyberjack.c3
-rw-r--r--drivers/usb/serial/cypress_m8.c14
-rw-r--r--drivers/usb/serial/digi_acceleport.c27
-rw-r--r--drivers/usb/serial/ftdi_sio.c14
-rw-r--r--drivers/usb/serial/ftdi_sio.h8
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h8
-rw-r--r--drivers/usb/serial/garmin_gps.c51
-rw-r--r--drivers/usb/serial/io_edgeport.c4
-rw-r--r--drivers/usb/serial/iuu_phoenix.c4
-rw-r--r--drivers/usb/serial/keyspan.c2
-rw-r--r--drivers/usb/serial/kl5kusb105.c3
-rw-r--r--drivers/usb/serial/mct_u232.c11
-rw-r--r--drivers/usb/serial/mos7720.c4
-rw-r--r--drivers/usb/serial/mos7840.c8
-rw-r--r--drivers/usb/serial/option.c2
-rw-r--r--drivers/usb/serial/quatech2.c2
-rw-r--r--drivers/usb/serial/safe_serial.c11
-rw-r--r--drivers/usb/storage/debug.c12
-rw-r--r--drivers/usb/storage/debug.h3
-rw-r--r--drivers/usb/storage/ene_ub6250.c4
-rw-r--r--drivers/usb/storage/scsiglue.c2
-rw-r--r--drivers/usb/storage/sddr09.c18
-rw-r--r--drivers/usb/storage/uas.c59
-rw-r--r--drivers/usb/storage/unusual_uas.h7
-rw-r--r--drivers/usb/storage/usb.c5
-rw-r--r--drivers/usb/usbip/usbip_common.c11
-rw-r--r--drivers/usb/usbip/usbip_event.c5
-rw-r--r--drivers/usb/usbip/usbip_protocol.txt358
-rw-r--r--drivers/usb/usbip/vhci_hcd.c88
-rw-r--r--drivers/usb/usbip/vhci_rx.c30
-rw-r--r--drivers/usb/usbip/vhci_sysfs.c19
-rw-r--r--drivers/usb/usbip/vhci_tx.c14
-rw-r--r--drivers/usb/wusbcore/crypto.c30
-rw-r--r--drivers/usb/wusbcore/wusbhc.h2
-rw-r--r--drivers/vfio/pci/Kconfig4
-rw-r--r--drivers/vfio/pci/Makefile1
-rw-r--r--drivers/vfio/pci/vfio_pci.c175
-rw-r--r--drivers/vfio/pci/vfio_pci_config.c45
-rw-r--r--drivers/vfio/pci/vfio_pci_igd.c280
-rw-r--r--drivers/vfio/pci/vfio_pci_intrs.c17
-rw-r--r--drivers/vfio/pci/vfio_pci_private.h39
-rw-r--r--drivers/vfio/pci/vfio_pci_rdwr.c9
-rw-r--r--drivers/vfio/vfio.c70
-rw-r--r--drivers/vhost/net.c80
-rw-r--r--drivers/vhost/scsi.c101
-rw-r--r--drivers/vhost/test.c2
-rw-r--r--drivers/vhost/vhost.c69
-rw-r--r--drivers/vhost/vhost.h5
-rw-r--r--drivers/video/fbdev/Kconfig7
-rw-r--r--drivers/video/fbdev/acornfb.c4
-rw-r--r--drivers/video/fbdev/amba-clcd-versatile.c14
-rw-r--r--drivers/video/fbdev/amba-clcd.c19
-rw-r--r--drivers/video/fbdev/atafb.c3
-rw-r--r--drivers/video/fbdev/atmel_lcdfb.c11
-rw-r--r--drivers/video/fbdev/aty/aty128fb.c1
-rw-r--r--drivers/video/fbdev/aty/radeon_base.c1
-rw-r--r--drivers/video/fbdev/au1100fb.c22
-rw-r--r--drivers/video/fbdev/bf537-lq035.c23
-rw-r--r--drivers/video/fbdev/bt431.h43
-rw-r--r--drivers/video/fbdev/bt455.h68
-rw-r--r--drivers/video/fbdev/da8xx-fb.c7
-rw-r--r--drivers/video/fbdev/ep93xx-fb.c8
-rw-r--r--drivers/video/fbdev/exynos/Kconfig6
-rw-r--r--drivers/video/fbdev/exynos/Makefile6
-rw-r--r--drivers/video/fbdev/exynos/exynos_mipi_dsi.c7
-rw-r--r--drivers/video/fbdev/gbefb.c8
-rw-r--r--drivers/video/fbdev/imsttfb.c1
-rw-r--r--drivers/video/fbdev/imxfb.c12
-rw-r--r--drivers/video/fbdev/intelfb/intelfbdrv.c2
-rw-r--r--drivers/video/fbdev/matrox/matroxfb_base.h1
-rw-r--r--drivers/video/fbdev/metronomefb.c6
-rw-r--r--drivers/video/fbdev/mx3fb.c9
-rw-r--r--drivers/video/fbdev/n411.c12
-rw-r--r--drivers/video/fbdev/nuc900fb.c8
-rw-r--r--drivers/video/fbdev/offb.c4
-rw-r--r--drivers/video/fbdev/omap/lcd_h3.c2
-rw-r--r--drivers/video/fbdev/omap/lcd_osk.c3
-rw-r--r--drivers/video/fbdev/omap/lcd_palmtt.c2
-rw-r--r--drivers/video/fbdev/omap/lcdc.c16
-rw-r--r--drivers/video/fbdev/omap/omapfb_main.c22
-rw-r--r--drivers/video/fbdev/omap2/omapfb/displays/panel-sharp-ls037v7dw01.c12
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/dispc.h1
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/dss.h78
-rw-r--r--drivers/video/fbdev/pmag-aa-fb.c602
-rw-r--r--drivers/video/fbdev/pmag-ba-fb.c2
-rw-r--r--drivers/video/fbdev/pvr2fb.c6
-rw-r--r--drivers/video/fbdev/pxa168fb.c8
-rw-r--r--drivers/video/fbdev/pxafb.c4
-rw-r--r--drivers/video/fbdev/s3c-fb.c7
-rw-r--r--drivers/video/fbdev/s3c2410fb.c8
-rw-r--r--drivers/video/fbdev/sa1100fb.c8
-rw-r--r--drivers/video/fbdev/simplefb.c4
-rw-r--r--drivers/video/fbdev/sis/init301.c10
-rw-r--r--drivers/video/fbdev/skeletonfb.c17
-rw-r--r--drivers/video/fbdev/sunxvr1000.c42
-rw-r--r--drivers/video/fbdev/sunxvr2500.c39
-rw-r--r--drivers/video/fbdev/sunxvr500.c42
-rw-r--r--drivers/virt/fsl_hypervisor.c5
-rw-r--r--drivers/virtio/Kconfig2
-rw-r--r--drivers/virtio/virtio_balloon.c128
-rw-r--r--drivers/virtio/virtio_mmio.c67
-rw-r--r--drivers/virtio/virtio_pci_common.c2
-rw-r--r--drivers/virtio/virtio_pci_common.h6
-rw-r--r--drivers/virtio/virtio_pci_legacy.c42
-rw-r--r--drivers/virtio/virtio_pci_modern.c72
-rw-r--r--drivers/virtio/virtio_ring.c439
-rw-r--r--drivers/vme/bridges/vme_ca91cx42.c11
-rw-r--r--drivers/w1/masters/omap_hdq.c1
-rw-r--r--drivers/w1/w1.c1
-rw-r--r--drivers/watchdog/Kconfig67
-rw-r--r--drivers/watchdog/Makefile4
-rw-r--r--drivers/watchdog/atlas7_wdt.c5
-rw-r--r--drivers/watchdog/bcm47xx_wdt.c3
-rw-r--r--drivers/watchdog/da9063_wdt.c3
-rw-r--r--drivers/watchdog/digicolor_wdt.c3
-rw-r--r--drivers/watchdog/dw_wdt.c323
-rw-r--r--drivers/watchdog/ebc-c384_wdt.c188
-rw-r--r--drivers/watchdog/hpwdt.c20
-rw-r--r--drivers/watchdog/imgpdc_wdt.c3
-rw-r--r--drivers/watchdog/imx2_wdt.c77
-rw-r--r--drivers/watchdog/lpc18xx_wdt.c3
-rw-r--r--drivers/watchdog/mei_wdt.c724
-rw-r--r--drivers/watchdog/meson_wdt.c3
-rw-r--r--drivers/watchdog/moxart_wdt.c3
-rw-r--r--drivers/watchdog/mtk_wdt.c3
-rw-r--r--drivers/watchdog/ni903x_wdt.c270
-rw-r--r--drivers/watchdog/pnx4008_wdt.c42
-rw-r--r--drivers/watchdog/qcom-wdt.c3
-rw-r--r--drivers/watchdog/rc32434_wdt.c2
-rw-r--r--drivers/watchdog/s3c2410_wdt.c19
-rw-r--r--drivers/watchdog/sbsa_gwdt.c408
-rw-r--r--drivers/watchdog/sunxi_wdt.c3
-rw-r--r--drivers/watchdog/tangox_wdt.c14
-rw-r--r--drivers/watchdog/w83627hf_wdt.c22
-rw-r--r--drivers/watchdog/watchdog_core.c4
-rw-r--r--drivers/watchdog/watchdog_dev.c207
-rw-r--r--drivers/watchdog/ziirave_wdt.c2
-rw-r--r--drivers/xen/Kconfig23
-rw-r--r--drivers/xen/balloon.c33
-rw-r--r--drivers/xen/events/events_2l.c6
-rw-r--r--drivers/xen/events/events_base.c30
-rw-r--r--drivers/xen/events/events_fifo.c1
-rw-r--r--drivers/xen/evtchn.c20
-rw-r--r--drivers/xen/features.c2
-rw-r--r--drivers/xen/grant-table.c1
-rw-r--r--drivers/xen/platform-pci.c22
-rw-r--r--drivers/xen/sys-hypervisor.c59
-rw-r--r--drivers/xen/xen-balloon.c14
-rw-r--r--drivers/xen/xen-pciback/conf_space.c2
-rw-r--r--drivers/xen/xen-pciback/pciback_ops.c2
-rw-r--r--drivers/xen/xen-pciback/xenbus.c2
-rw-r--r--drivers/xen/xen-scsiback.c281
-rw-r--r--drivers/xen/xen-selfballoon.c1
-rw-r--r--drivers/xen/xenbus/xenbus_dev_backend.c13
-rw-r--r--drivers/xen/xenbus/xenbus_dev_frontend.c13
-rw-r--r--drivers/xen/xenbus/xenbus_xs.c1
-rw-r--r--drivers/xen/xenfs/xensyms.c1
-rw-r--r--drivers/zorro/zorro-sysfs.c3
4958 files changed, 327488 insertions, 170321 deletions
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile
index cb648a49543a..edeb2d1d99be 100644
--- a/drivers/acpi/Makefile
+++ b/drivers/acpi/Makefile
@@ -43,6 +43,7 @@ acpi-y += pci_root.o pci_link.o pci_irq.o
acpi-y += acpi_lpss.o acpi_apd.o
acpi-y += acpi_platform.o
acpi-y += acpi_pnp.o
+acpi-$(CONFIG_ARM_AMBA) += acpi_amba.o
acpi-y += int340x_thermal.o
acpi-y += power.o
acpi-y += event.o
diff --git a/drivers/acpi/acpi_amba.c b/drivers/acpi/acpi_amba.c
new file mode 100644
index 000000000000..2a61b54ab968
--- /dev/null
+++ b/drivers/acpi/acpi_amba.c
@@ -0,0 +1,122 @@
+
+/*
+ * ACPI support for platform bus type.
+ *
+ * Copyright (C) 2015, Linaro Ltd
+ * Author: Graeme Gregory <graeme.gregory@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/acpi.h>
+#include <linux/amba/bus.h>
+#include <linux/clkdev.h>
+#include <linux/clk-provider.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/ioport.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+#include "internal.h"
+
+static const struct acpi_device_id amba_id_list[] = {
+ {"ARMH0061", 0}, /* PL061 GPIO Device */
+ {"", 0},
+};
+
+static void amba_register_dummy_clk(void)
+{
+ static struct clk *amba_dummy_clk;
+
+ /* If clock already registered */
+ if (amba_dummy_clk)
+ return;
+
+ amba_dummy_clk = clk_register_fixed_rate(NULL, "apb_pclk", NULL,
+ CLK_IS_ROOT, 0);
+ clk_register_clkdev(amba_dummy_clk, "apb_pclk", NULL);
+}
+
+static int amba_handler_attach(struct acpi_device *adev,
+ const struct acpi_device_id *id)
+{
+ struct amba_device *dev;
+ struct resource_entry *rentry;
+ struct list_head resource_list;
+ bool address_found = false;
+ int irq_no = 0;
+ int ret;
+
+ /* If the ACPI node already has a physical device attached, skip it. */
+ if (adev->physical_node_count)
+ return 0;
+
+ dev = amba_device_alloc(dev_name(&adev->dev), 0, 0);
+ if (!dev) {
+ dev_err(&adev->dev, "%s(): amba_device_alloc() failed\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+ INIT_LIST_HEAD(&resource_list);
+ ret = acpi_dev_get_resources(adev, &resource_list, NULL, NULL);
+ if (ret < 0)
+ goto err_free;
+
+ list_for_each_entry(rentry, &resource_list, node) {
+ switch (resource_type(rentry->res)) {
+ case IORESOURCE_MEM:
+ if (!address_found) {
+ dev->res = *rentry->res;
+ address_found = true;
+ }
+ break;
+ case IORESOURCE_IRQ:
+ if (irq_no < AMBA_NR_IRQS)
+ dev->irq[irq_no++] = rentry->res->start;
+ break;
+ default:
+ dev_warn(&adev->dev, "Invalid resource\n");
+ break;
+ }
+ }
+
+ acpi_dev_free_resource_list(&resource_list);
+
+ /*
+ * If the ACPI node has a parent and that parent has a physical device
+ * attached to it, that physical device should be the parent of
+ * the amba device we are about to create.
+ */
+ if (adev->parent)
+ dev->dev.parent = acpi_get_first_physical_node(adev->parent);
+
+ ACPI_COMPANION_SET(&dev->dev, adev);
+
+ ret = amba_device_add(dev, &iomem_resource);
+ if (ret) {
+ dev_err(&adev->dev, "%s(): amba_device_add() failed (%d)\n",
+ __func__, ret);
+ goto err_free;
+ }
+
+ return 1;
+
+err_free:
+ amba_device_put(dev);
+ return ret;
+}
+
+static struct acpi_scan_handler amba_handler = {
+ .ids = amba_id_list,
+ .attach = amba_handler_attach,
+};
+
+void __init acpi_amba_init(void)
+{
+ amba_register_dummy_clk();
+ acpi_scan_add_handler(&amba_handler);
+}
diff --git a/drivers/acpi/acpi_apd.c b/drivers/acpi/acpi_apd.c
index d507cf6deda0..f245bf35bedb 100644
--- a/drivers/acpi/acpi_apd.c
+++ b/drivers/acpi/acpi_apd.c
@@ -143,7 +143,9 @@ static const struct acpi_device_id acpi_apd_device_ids[] = {
/* Generic apd devices */
#ifdef CONFIG_X86_AMD_PLATFORM_DEVICE
{ "AMD0010", APD_ADDR(cz_i2c_desc) },
+ { "AMDI0010", APD_ADDR(cz_i2c_desc) },
{ "AMD0020", APD_ADDR(cz_uart_desc) },
+ { "AMDI0020", APD_ADDR(cz_uart_desc) },
{ "AMD0030", },
#endif
#ifdef CONFIG_ARM64
diff --git a/drivers/acpi/acpi_platform.c b/drivers/acpi/acpi_platform.c
index 296b7a14893a..159f7f19abce 100644
--- a/drivers/acpi/acpi_platform.c
+++ b/drivers/acpi/acpi_platform.c
@@ -43,7 +43,6 @@ static const struct acpi_device_id forbidden_id_list[] = {
struct platform_device *acpi_create_platform_device(struct acpi_device *adev)
{
struct platform_device *pdev = NULL;
- struct acpi_device *acpi_parent;
struct platform_device_info pdevinfo;
struct resource_entry *rentry;
struct list_head resource_list;
@@ -62,7 +61,7 @@ struct platform_device *acpi_create_platform_device(struct acpi_device *adev)
if (count < 0) {
return NULL;
} else if (count > 0) {
- resources = kmalloc(count * sizeof(struct resource),
+ resources = kzalloc(count * sizeof(struct resource),
GFP_KERNEL);
if (!resources) {
dev_err(&adev->dev, "No memory for resources\n");
@@ -82,22 +81,8 @@ struct platform_device *acpi_create_platform_device(struct acpi_device *adev)
* attached to it, that physical device should be the parent of the
* platform device we are about to create.
*/
- pdevinfo.parent = NULL;
- acpi_parent = adev->parent;
- if (acpi_parent) {
- struct acpi_device_physical_node *entry;
- struct list_head *list;
-
- mutex_lock(&acpi_parent->physical_node_lock);
- list = &acpi_parent->physical_node_list;
- if (!list_empty(list)) {
- entry = list_first_entry(list,
- struct acpi_device_physical_node,
- node);
- pdevinfo.parent = entry->dev;
- }
- mutex_unlock(&acpi_parent->physical_node_lock);
- }
+ pdevinfo.parent = adev->parent ?
+ acpi_get_first_physical_node(adev->parent) : NULL;
pdevinfo.name = dev_name(&adev->dev);
pdevinfo.id = -1;
pdevinfo.res = resources;
diff --git a/drivers/acpi/acpi_processor.c b/drivers/acpi/acpi_processor.c
index 6979186dbd4b..0d92d0f915e9 100644
--- a/drivers/acpi/acpi_processor.c
+++ b/drivers/acpi/acpi_processor.c
@@ -491,6 +491,58 @@ static void acpi_processor_remove(struct acpi_device *device)
}
#endif /* CONFIG_ACPI_HOTPLUG_CPU */
+#ifdef CONFIG_X86
+static bool acpi_hwp_native_thermal_lvt_set;
+static acpi_status __init acpi_hwp_native_thermal_lvt_osc(acpi_handle handle,
+ u32 lvl,
+ void *context,
+ void **rv)
+{
+ u8 sb_uuid_str[] = "4077A616-290C-47BE-9EBD-D87058713953";
+ u32 capbuf[2];
+ struct acpi_osc_context osc_context = {
+ .uuid_str = sb_uuid_str,
+ .rev = 1,
+ .cap.length = 8,
+ .cap.pointer = capbuf,
+ };
+
+ if (acpi_hwp_native_thermal_lvt_set)
+ return AE_CTRL_TERMINATE;
+
+ capbuf[0] = 0x0000;
+ capbuf[1] = 0x1000; /* set bit 12 */
+
+ if (ACPI_SUCCESS(acpi_run_osc(handle, &osc_context))) {
+ if (osc_context.ret.pointer && osc_context.ret.length > 1) {
+ u32 *capbuf_ret = osc_context.ret.pointer;
+
+ if (capbuf_ret[1] & 0x1000) {
+ acpi_handle_info(handle,
+ "_OSC native thermal LVT Acked\n");
+ acpi_hwp_native_thermal_lvt_set = true;
+ }
+ }
+ kfree(osc_context.ret.pointer);
+ }
+
+ return AE_OK;
+}
+
+void __init acpi_early_processor_osc(void)
+{
+ if (boot_cpu_has(X86_FEATURE_HWP)) {
+ acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT,
+ ACPI_UINT32_MAX,
+ acpi_hwp_native_thermal_lvt_osc,
+ NULL, NULL, NULL);
+ acpi_get_devices(ACPI_PROCESSOR_DEVICE_HID,
+ acpi_hwp_native_thermal_lvt_osc,
+ NULL, NULL);
+ }
+}
+#endif
+
/*
* The following ACPI IDs are known to be suitable for representing as
* processor devices.
@@ -514,7 +566,24 @@ static struct acpi_scan_handler processor_handler = {
},
};
+static int acpi_processor_container_attach(struct acpi_device *dev,
+ const struct acpi_device_id *id)
+{
+ return 1;
+}
+
+static const struct acpi_device_id processor_container_ids[] = {
+ { ACPI_PROCESSOR_CONTAINER_HID, },
+ { }
+};
+
+static struct acpi_scan_handler processor_container_handler = {
+ .ids = processor_container_ids,
+ .attach = acpi_processor_container_attach,
+};
+
void __init acpi_processor_init(void)
{
acpi_scan_add_handler_with_hotplug(&processor_handler, "processor");
+ acpi_scan_add_handler(&processor_container_handler);
}
diff --git a/drivers/acpi/acpi_video.c b/drivers/acpi/acpi_video.c
index a76f8be1bfe7..4361bc98ef4c 100644
--- a/drivers/acpi/acpi_video.c
+++ b/drivers/acpi/acpi_video.c
@@ -218,13 +218,6 @@ struct acpi_video_device {
struct thermal_cooling_device *cooling_dev;
};
-static const char device_decode[][30] = {
- "motherboard VGA device",
- "PCI VGA device",
- "AGP VGA device",
- "UNKNOWN",
-};
-
static void acpi_video_device_notify(acpi_handle handle, u32 event, void *data);
static void acpi_video_device_rebind(struct acpi_video_bus *video);
static void acpi_video_device_bind(struct acpi_video_bus *video,
diff --git a/drivers/acpi/acpica/acglobal.h b/drivers/acpi/acpica/acglobal.h
index 55c8197036f3..51b073b68f16 100644
--- a/drivers/acpi/acpica/acglobal.h
+++ b/drivers/acpi/acpica/acglobal.h
@@ -165,7 +165,7 @@ ACPI_GLOBAL(u8, acpi_gbl_next_owner_id_offset);
/* Initialization sequencing */
-ACPI_INIT_GLOBAL(u8, acpi_gbl_reg_methods_enabled, FALSE);
+ACPI_INIT_GLOBAL(u8, acpi_gbl_namespace_initialized, FALSE);
/* Misc */
diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h
index e4977fac9c1d..9562a10a1a18 100644
--- a/drivers/acpi/acpica/aclocal.h
+++ b/drivers/acpi/acpica/aclocal.h
@@ -85,7 +85,7 @@ union acpi_parse_object;
#define ACPI_MTX_MEMORY 5 /* Debug memory tracking lists */
#define ACPI_MAX_MUTEX 5
-#define ACPI_NUM_MUTEX ACPI_MAX_MUTEX+1
+#define ACPI_NUM_MUTEX (ACPI_MAX_MUTEX+1)
/* Lock structure for reader/writer interfaces */
@@ -103,11 +103,11 @@ struct acpi_rw_lock {
#define ACPI_LOCK_HARDWARE 1
#define ACPI_MAX_LOCK 1
-#define ACPI_NUM_LOCK ACPI_MAX_LOCK+1
+#define ACPI_NUM_LOCK (ACPI_MAX_LOCK+1)
/* This Thread ID means that the mutex is not in use (unlocked) */
-#define ACPI_MUTEX_NOT_ACQUIRED (acpi_thread_id) 0
+#define ACPI_MUTEX_NOT_ACQUIRED ((acpi_thread_id) 0)
/* This Thread ID means an invalid thread ID */
diff --git a/drivers/acpi/acpica/acnamesp.h b/drivers/acpi/acpica/acnamesp.h
index 9684ed61284d..022d69cb345a 100644
--- a/drivers/acpi/acpica/acnamesp.h
+++ b/drivers/acpi/acpica/acnamesp.h
@@ -88,7 +88,7 @@
*/
acpi_status acpi_ns_initialize_objects(void);
-acpi_status acpi_ns_initialize_devices(void);
+acpi_status acpi_ns_initialize_devices(u32 flags);
/*
* nsload - Namespace loading
diff --git a/drivers/acpi/acpica/acpredef.h b/drivers/acpi/acpica/acpredef.h
index 52f6bee52d47..5faeab41e302 100644
--- a/drivers/acpi/acpica/acpredef.h
+++ b/drivers/acpi/acpica/acpredef.h
@@ -1125,7 +1125,7 @@ const union acpi_predefined_info acpi_gbl_resource_names[] = {
PACKAGE_INFO(0, 0, 0, 0, 0, 0) /* Table terminator */
};
-static const union acpi_predefined_info acpi_gbl_scope_names[] = {
+const union acpi_predefined_info acpi_gbl_scope_names[] = {
{{"_GPE", 0, 0}},
{{"_PR_", 0, 0}},
{{"_SB_", 0, 0}},
diff --git a/drivers/acpi/acpica/dbcmds.c b/drivers/acpi/acpica/dbcmds.c
index 7ec62c461280..772178c96ccf 100644
--- a/drivers/acpi/acpica/dbcmds.c
+++ b/drivers/acpi/acpica/dbcmds.c
@@ -348,7 +348,7 @@ void acpi_db_display_table_info(char *table_arg)
} else {
/* If the pointer is null, the table has been unloaded */
- ACPI_INFO((AE_INFO, "%4.4s - Table has been unloaded",
+ ACPI_INFO(("%4.4s - Table has been unloaded",
table_desc->signature.ascii));
}
}
diff --git a/drivers/acpi/acpica/dbconvert.c b/drivers/acpi/acpica/dbconvert.c
index 9fee88f1c654..68f4e0f4b095 100644
--- a/drivers/acpi/acpica/dbconvert.c
+++ b/drivers/acpi/acpica/dbconvert.c
@@ -408,7 +408,7 @@ void acpi_db_dump_pld_buffer(union acpi_object *obj_desc)
new_buffer = acpi_db_encode_pld_buffer(pld_info);
if (!new_buffer) {
- return;
+ goto exit;
}
/* The two bit-packed buffers should match */
@@ -479,6 +479,7 @@ void acpi_db_dump_pld_buffer(union acpi_object *obj_desc)
pld_info->horizontal_offset);
}
- ACPI_FREE(pld_info);
ACPI_FREE(new_buffer);
+exit:
+ ACPI_FREE(pld_info);
}
diff --git a/drivers/acpi/acpica/dsmethod.c b/drivers/acpi/acpica/dsmethod.c
index 6a72047aae1c..da198b864107 100644
--- a/drivers/acpi/acpica/dsmethod.c
+++ b/drivers/acpi/acpica/dsmethod.c
@@ -428,6 +428,9 @@ acpi_ds_begin_method_execution(struct acpi_namespace_node *method_node,
obj_desc->method.mutex->mutex.
original_sync_level =
obj_desc->method.mutex->mutex.sync_level;
+
+ obj_desc->method.mutex->mutex.thread_id =
+ acpi_os_get_thread_id();
}
}
@@ -809,8 +812,7 @@ acpi_ds_terminate_control_method(union acpi_operand_object *method_desc,
if (method_desc->method.
info_flags & ACPI_METHOD_SERIALIZED_PENDING) {
if (walk_state) {
- ACPI_INFO((AE_INFO,
- "Marking method %4.4s as Serialized "
+ ACPI_INFO(("Marking method %4.4s as Serialized "
"because of AE_ALREADY_EXISTS error",
walk_state->method_node->name.
ascii));
diff --git a/drivers/acpi/acpica/dsobject.c b/drivers/acpi/acpica/dsobject.c
index c303e9d9266f..a91de2b4603c 100644
--- a/drivers/acpi/acpica/dsobject.c
+++ b/drivers/acpi/acpica/dsobject.c
@@ -524,8 +524,7 @@ acpi_ds_build_internal_package_obj(struct acpi_walk_state *walk_state,
arg = arg->common.next;
}
- ACPI_INFO((AE_INFO,
- "Actual Package length (%u) is larger than "
+ ACPI_INFO(("Actual Package length (%u) is larger than "
"NumElements field (%u), truncated",
i, element_count));
} else if (i < element_count) {
diff --git a/drivers/acpi/acpica/evgpeblk.c b/drivers/acpi/acpica/evgpeblk.c
index 9275e626ed8d..447fa1cac64f 100644
--- a/drivers/acpi/acpica/evgpeblk.c
+++ b/drivers/acpi/acpica/evgpeblk.c
@@ -499,8 +499,7 @@ acpi_ev_initialize_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
}
if (gpe_enabled_count) {
- ACPI_INFO((AE_INFO,
- "Enabled %u GPEs in block %02X to %02X",
+ ACPI_INFO(("Enabled %u GPEs in block %02X to %02X",
gpe_enabled_count, (u32)gpe_block->block_base_number,
(u32)(gpe_block->block_base_number +
(gpe_block->gpe_count - 1))));
diff --git a/drivers/acpi/acpica/evgpeinit.c b/drivers/acpi/acpica/evgpeinit.c
index 9fdd8d09141b..7dc75474c897 100644
--- a/drivers/acpi/acpica/evgpeinit.c
+++ b/drivers/acpi/acpica/evgpeinit.c
@@ -281,7 +281,7 @@ void acpi_ev_update_gpes(acpi_owner_id table_owner_id)
}
if (walk_info.count) {
- ACPI_INFO((AE_INFO, "Enabled %u new GPEs", walk_info.count));
+ ACPI_INFO(("Enabled %u new GPEs", walk_info.count));
}
(void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
diff --git a/drivers/acpi/acpica/evregion.c b/drivers/acpi/acpica/evregion.c
index 47092b4d633c..63924d1c737a 100644
--- a/drivers/acpi/acpica/evregion.c
+++ b/drivers/acpi/acpica/evregion.c
@@ -600,7 +600,7 @@ acpi_ev_execute_reg_method(union acpi_operand_object *region_obj, u32 function)
if (region_obj2->extra.method_REG == NULL ||
region_obj->region.handler == NULL ||
- !acpi_gbl_reg_methods_enabled) {
+ !acpi_gbl_namespace_initialized) {
return_ACPI_STATUS(AE_OK);
}
diff --git a/drivers/acpi/acpica/exconfig.c b/drivers/acpi/acpica/exconfig.c
index 011df210b7b2..f74161301037 100644
--- a/drivers/acpi/acpica/exconfig.c
+++ b/drivers/acpi/acpica/exconfig.c
@@ -252,7 +252,7 @@ acpi_ex_load_table_op(struct acpi_walk_state *walk_state,
status = acpi_get_table_by_index(table_index, &table);
if (ACPI_SUCCESS(status)) {
- ACPI_INFO((AE_INFO, "Dynamic OEM Table Load:"));
+ ACPI_INFO(("Dynamic OEM Table Load:"));
acpi_tb_print_table_header(0, table);
}
@@ -472,7 +472,7 @@ acpi_ex_load_op(union acpi_operand_object *obj_desc,
/* Install the new table into the local data structures */
- ACPI_INFO((AE_INFO, "Dynamic OEM Table Load:"));
+ ACPI_INFO(("Dynamic OEM Table Load:"));
(void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
status = acpi_tb_install_standard_table(ACPI_PTR_TO_PHYSADDR(table),
diff --git a/drivers/acpi/acpica/exoparg3.c b/drivers/acpi/acpica/exoparg3.c
index 28eb861c44eb..5aa21c4eda1d 100644
--- a/drivers/acpi/acpica/exoparg3.c
+++ b/drivers/acpi/acpica/exoparg3.c
@@ -123,8 +123,10 @@ acpi_status acpi_ex_opcode_3A_0T_0R(struct acpi_walk_state *walk_state)
* op is intended for use by disassemblers in order to properly
* disassemble control method invocations. The opcode or group of
* opcodes should be surrounded by an "if (0)" clause to ensure that
- * AML interpreters never see the opcode.
+ * AML interpreters never see the opcode. Thus, something is
+ * wrong if an external opcode ever gets here.
*/
+ ACPI_ERROR((AE_INFO, "Executed External Op"));
status = AE_OK;
goto cleanup;
diff --git a/drivers/acpi/acpica/nseval.c b/drivers/acpi/acpica/nseval.c
index 65d58bea4320..5d59cfcef6f4 100644
--- a/drivers/acpi/acpica/nseval.c
+++ b/drivers/acpi/acpica/nseval.c
@@ -378,8 +378,7 @@ void acpi_ns_exec_module_code_list(void)
acpi_ut_remove_reference(prev);
}
- ACPI_INFO((AE_INFO,
- "Executed %u blocks of module-level executable AML code",
+ ACPI_INFO(("Executed %u blocks of module-level executable AML code",
method_count));
ACPI_FREE(info);
diff --git a/drivers/acpi/acpica/nsinit.c b/drivers/acpi/acpica/nsinit.c
index bd75d46234a4..d4aa8b696ee9 100644
--- a/drivers/acpi/acpica/nsinit.c
+++ b/drivers/acpi/acpica/nsinit.c
@@ -46,6 +46,7 @@
#include "acnamesp.h"
#include "acdispat.h"
#include "acinterp.h"
+#include "acevents.h"
#define _COMPONENT ACPI_NAMESPACE
ACPI_MODULE_NAME("nsinit")
@@ -83,6 +84,8 @@ acpi_status acpi_ns_initialize_objects(void)
ACPI_FUNCTION_TRACE(ns_initialize_objects);
+ ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
+ "[Init] Completing Initialization of ACPI Objects\n"));
ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
"**** Starting initialization of namespace objects ****\n"));
ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT,
@@ -133,82 +136,108 @@ acpi_status acpi_ns_initialize_objects(void)
*
******************************************************************************/
-acpi_status acpi_ns_initialize_devices(void)
+acpi_status acpi_ns_initialize_devices(u32 flags)
{
- acpi_status status;
+ acpi_status status = AE_OK;
struct acpi_device_walk_info info;
ACPI_FUNCTION_TRACE(ns_initialize_devices);
- /* Init counters */
+ if (!(flags & ACPI_NO_DEVICE_INIT)) {
+ ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
+ "[Init] Initializing ACPI Devices\n"));
- info.device_count = 0;
- info.num_STA = 0;
- info.num_INI = 0;
+ /* Init counters */
- ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT,
- "Initializing Device/Processor/Thermal objects "
- "and executing _INI/_STA methods:\n"));
+ info.device_count = 0;
+ info.num_STA = 0;
+ info.num_INI = 0;
- /* Tree analysis: find all subtrees that contain _INI methods */
+ ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT,
+ "Initializing Device/Processor/Thermal objects "
+ "and executing _INI/_STA methods:\n"));
- status = acpi_ns_walk_namespace(ACPI_TYPE_ANY, ACPI_ROOT_OBJECT,
- ACPI_UINT32_MAX, FALSE,
- acpi_ns_find_ini_methods, NULL, &info,
- NULL);
- if (ACPI_FAILURE(status)) {
- goto error_exit;
- }
+ /* Tree analysis: find all subtrees that contain _INI methods */
+
+ status = acpi_ns_walk_namespace(ACPI_TYPE_ANY, ACPI_ROOT_OBJECT,
+ ACPI_UINT32_MAX, FALSE,
+ acpi_ns_find_ini_methods, NULL,
+ &info, NULL);
+ if (ACPI_FAILURE(status)) {
+ goto error_exit;
+ }
+
+ /* Allocate the evaluation information block */
- /* Allocate the evaluation information block */
+ info.evaluate_info =
+ ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_evaluate_info));
+ if (!info.evaluate_info) {
+ status = AE_NO_MEMORY;
+ goto error_exit;
+ }
- info.evaluate_info =
- ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_evaluate_info));
- if (!info.evaluate_info) {
- status = AE_NO_MEMORY;
- goto error_exit;
+ /*
+ * Execute the "global" _INI method that may appear at the root.
+ * This support is provided for Windows compatibility (Vista+) and
+ * is not part of the ACPI specification.
+ */
+ info.evaluate_info->prefix_node = acpi_gbl_root_node;
+ info.evaluate_info->relative_pathname = METHOD_NAME__INI;
+ info.evaluate_info->parameters = NULL;
+ info.evaluate_info->flags = ACPI_IGNORE_RETURN_VALUE;
+
+ status = acpi_ns_evaluate(info.evaluate_info);
+ if (ACPI_SUCCESS(status)) {
+ info.num_INI++;
+ }
}
/*
- * Execute the "global" _INI method that may appear at the root. This
- * support is provided for Windows compatibility (Vista+) and is not
- * part of the ACPI specification.
+ * Run all _REG methods
+ *
+ * Note: Any objects accessed by the _REG methods will be automatically
+ * initialized, even if they contain executable AML (see the call to
+ * acpi_ns_initialize_objects below).
*/
- info.evaluate_info->prefix_node = acpi_gbl_root_node;
- info.evaluate_info->relative_pathname = METHOD_NAME__INI;
- info.evaluate_info->parameters = NULL;
- info.evaluate_info->flags = ACPI_IGNORE_RETURN_VALUE;
+ if (!(flags & ACPI_NO_ADDRESS_SPACE_INIT)) {
+ ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
+ "[Init] Executing _REG OpRegion methods\n"));
- status = acpi_ns_evaluate(info.evaluate_info);
- if (ACPI_SUCCESS(status)) {
- info.num_INI++;
+ status = acpi_ev_initialize_op_regions();
+ if (ACPI_FAILURE(status)) {
+ goto error_exit;
+ }
}
- /* Walk namespace to execute all _INIs on present devices */
+ if (!(flags & ACPI_NO_DEVICE_INIT)) {
- status = acpi_ns_walk_namespace(ACPI_TYPE_ANY, ACPI_ROOT_OBJECT,
- ACPI_UINT32_MAX, FALSE,
- acpi_ns_init_one_device, NULL, &info,
- NULL);
+ /* Walk namespace to execute all _INIs on present devices */
- /*
- * Any _OSI requests should be completed by now. If the BIOS has
- * requested any Windows OSI strings, we will always truncate
- * I/O addresses to 16 bits -- for Windows compatibility.
- */
- if (acpi_gbl_osi_data >= ACPI_OSI_WIN_2000) {
- acpi_gbl_truncate_io_addresses = TRUE;
- }
+ status = acpi_ns_walk_namespace(ACPI_TYPE_ANY, ACPI_ROOT_OBJECT,
+ ACPI_UINT32_MAX, FALSE,
+ acpi_ns_init_one_device, NULL,
+ &info, NULL);
- ACPI_FREE(info.evaluate_info);
- if (ACPI_FAILURE(status)) {
- goto error_exit;
- }
+ /*
+ * Any _OSI requests should be completed by now. If the BIOS has
+ * requested any Windows OSI strings, we will always truncate
+ * I/O addresses to 16 bits -- for Windows compatibility.
+ */
+ if (acpi_gbl_osi_data >= ACPI_OSI_WIN_2000) {
+ acpi_gbl_truncate_io_addresses = TRUE;
+ }
- ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT,
- " Executed %u _INI methods requiring %u _STA executions "
- "(examined %u objects)\n",
- info.num_INI, info.num_STA, info.device_count));
+ ACPI_FREE(info.evaluate_info);
+ if (ACPI_FAILURE(status)) {
+ goto error_exit;
+ }
+
+ ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT,
+ " Executed %u _INI methods requiring %u _STA executions "
+ "(examined %u objects)\n",
+ info.num_INI, info.num_STA,
+ info.device_count));
+ }
return_ACPI_STATUS(status);
diff --git a/drivers/acpi/acpica/psargs.c b/drivers/acpi/acpica/psargs.c
index 305218539df2..d48cbed342c1 100644
--- a/drivers/acpi/acpica/psargs.c
+++ b/drivers/acpi/acpica/psargs.c
@@ -269,8 +269,7 @@ acpi_ps_get_next_namepath(struct acpi_walk_state *walk_state,
*/
if (ACPI_SUCCESS(status) &&
possible_method_call && (node->type == ACPI_TYPE_METHOD)) {
- if (GET_CURRENT_ARG_TYPE(walk_state->arg_types) ==
- ARGP_SUPERNAME) {
+ if (walk_state->opcode == AML_UNLOAD_OP) {
/*
* acpi_ps_get_next_namestring has increased the AML pointer,
* so we need to restore the saved AML pointer for method call.
@@ -697,7 +696,7 @@ static union acpi_parse_object *acpi_ps_get_next_field(struct acpi_parse_state
*
* PARAMETERS: walk_state - Current state
* parser_state - Current parser state object
- * arg_type - The parser argument type (ARGP_*)
+ * arg_type - The argument type (AML_*_ARG)
* return_arg - Where the next arg is returned
*
* RETURN: Status, and an op object containing the next argument.
@@ -817,9 +816,9 @@ acpi_ps_get_next_arg(struct acpi_walk_state *walk_state,
return_ACPI_STATUS(AE_NO_MEMORY);
}
- /* super_name allows argument to be a method call */
+ /* To support super_name arg of Unload */
- if (arg_type == ARGP_SUPERNAME) {
+ if (walk_state->opcode == AML_UNLOAD_OP) {
status =
acpi_ps_get_next_namepath(walk_state,
parser_state, arg,
diff --git a/drivers/acpi/acpica/tbinstal.c b/drivers/acpi/acpica/tbinstal.c
index b661a1e013fb..4dc6108de4ff 100644
--- a/drivers/acpi/acpica/tbinstal.c
+++ b/drivers/acpi/acpica/tbinstal.c
@@ -267,8 +267,7 @@ acpi_tb_install_standard_table(acpi_physical_address address,
if (!reload &&
acpi_gbl_disable_ssdt_table_install &&
ACPI_COMPARE_NAME(&new_table_desc.signature, ACPI_SIG_SSDT)) {
- ACPI_INFO((AE_INFO,
- "Ignoring installation of %4.4s at %8.8X%8.8X",
+ ACPI_INFO(("Ignoring installation of %4.4s at %8.8X%8.8X",
new_table_desc.signature.ascii,
ACPI_FORMAT_UINT64(address)));
goto release_and_exit;
@@ -432,7 +431,7 @@ finish_override:
return;
}
- ACPI_INFO((AE_INFO, "%4.4s 0x%8.8X%8.8X"
+ ACPI_INFO(("%4.4s 0x%8.8X%8.8X"
" %s table override, new table: 0x%8.8X%8.8X",
old_table_desc->signature.ascii,
ACPI_FORMAT_UINT64(old_table_desc->address),
diff --git a/drivers/acpi/acpica/tbprint.c b/drivers/acpi/acpica/tbprint.c
index fd4146d4ff49..26d61dbace0a 100644
--- a/drivers/acpi/acpica/tbprint.c
+++ b/drivers/acpi/acpica/tbprint.c
@@ -132,7 +132,7 @@ acpi_tb_print_table_header(acpi_physical_address address,
/* FACS only has signature and length fields */
- ACPI_INFO((AE_INFO, "%-4.4s 0x%8.8X%8.8X %06X",
+ ACPI_INFO(("%-4.4s 0x%8.8X%8.8X %06X",
header->signature, ACPI_FORMAT_UINT64(address),
header->length));
} else if (ACPI_VALIDATE_RSDP_SIG(header->signature)) {
@@ -144,7 +144,7 @@ acpi_tb_print_table_header(acpi_physical_address address,
ACPI_OEM_ID_SIZE);
acpi_tb_fix_string(local_header.oem_id, ACPI_OEM_ID_SIZE);
- ACPI_INFO((AE_INFO, "RSDP 0x%8.8X%8.8X %06X (v%.2d %-6.6s)",
+ ACPI_INFO(("RSDP 0x%8.8X%8.8X %06X (v%.2d %-6.6s)",
ACPI_FORMAT_UINT64(address),
(ACPI_CAST_PTR(struct acpi_table_rsdp, header)->
revision >
@@ -158,8 +158,7 @@ acpi_tb_print_table_header(acpi_physical_address address,
acpi_tb_cleanup_table_header(&local_header, header);
- ACPI_INFO((AE_INFO,
- "%-4.4s 0x%8.8X%8.8X"
+ ACPI_INFO(("%-4.4s 0x%8.8X%8.8X"
" %06X (v%.2d %-6.6s %-8.8s %08X %-4.4s %08X)",
local_header.signature, ACPI_FORMAT_UINT64(address),
local_header.length, local_header.revision,
diff --git a/drivers/acpi/acpica/tbutils.c b/drivers/acpi/acpica/tbutils.c
index 3269bef371d7..9240c76d2823 100644
--- a/drivers/acpi/acpica/tbutils.c
+++ b/drivers/acpi/acpica/tbutils.c
@@ -174,9 +174,7 @@ struct acpi_table_header *acpi_tb_copy_dsdt(u32 table_index)
ACPI_TABLE_ORIGIN_INTERNAL_VIRTUAL,
new_table);
- ACPI_INFO((AE_INFO,
- "Forced DSDT copy: length 0x%05X copied locally, original unmapped",
- new_table->length));
+ ACPI_INFO(("Forced DSDT copy: length 0x%05X copied locally, original unmapped", new_table->length));
return (new_table);
}
diff --git a/drivers/acpi/acpica/tbxfload.c b/drivers/acpi/acpica/tbxfload.c
index 278666e39563..3151968c10d1 100644
--- a/drivers/acpi/acpica/tbxfload.c
+++ b/drivers/acpi/acpica/tbxfload.c
@@ -47,6 +47,7 @@
#include "accommon.h"
#include "acnamesp.h"
#include "actables.h"
+#include "acevents.h"
#define _COMPONENT ACPI_TABLES
ACPI_MODULE_NAME("tbxfload")
@@ -68,6 +69,25 @@ acpi_status __init acpi_load_tables(void)
ACPI_FUNCTION_TRACE(acpi_load_tables);
+ /*
+ * Install the default operation region handlers. These are the
+ * handlers that are defined by the ACPI specification to be
+ * "always accessible" -- namely, system_memory, system_IO, and
+ * PCI_Config. This also means that no _REG methods need to be
+ * run for these address spaces. We need to have these handlers
+ * installed before any AML code can be executed, especially any
+ * module-level code (11/2015).
+ * Note that we allow OSPMs to install their own region handlers
+ * between acpi_initialize_subsystem() and acpi_load_tables() to use
+ * their customized default region handlers.
+ */
+ status = acpi_ev_install_region_handlers();
+ if (ACPI_FAILURE(status) && status != AE_ALREADY_EXISTS) {
+ ACPI_EXCEPTION((AE_INFO, status,
+ "During Region initialization"));
+ return_ACPI_STATUS(status);
+ }
+
/* Load the namespace from the tables */
status = acpi_tb_load_namespace();
@@ -83,6 +103,20 @@ acpi_status __init acpi_load_tables(void)
"While loading namespace from ACPI tables"));
}
+ if (!acpi_gbl_group_module_level_code) {
+ /*
+ * Initialize the objects that remain uninitialized. This
+ * runs the executable AML that may be part of the
+ * declaration of these objects:
+ * operation_regions, buffer_fields, Buffers, and Packages.
+ */
+ status = acpi_ns_initialize_objects();
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+ }
+
+ acpi_gbl_namespace_initialized = TRUE;
return_ACPI_STATUS(status);
}
@@ -206,9 +240,7 @@ acpi_status acpi_tb_load_namespace(void)
}
if (!tables_failed) {
- ACPI_INFO((AE_INFO,
- "%u ACPI AML tables successfully acquired and loaded\n",
- tables_loaded));
+ ACPI_INFO(("%u ACPI AML tables successfully acquired and loaded\n", tables_loaded));
} else {
ACPI_ERROR((AE_INFO,
"%u table load failures, %u successful",
@@ -301,7 +333,7 @@ acpi_status acpi_load_table(struct acpi_table_header *table)
/* Install the table and load it into the namespace */
- ACPI_INFO((AE_INFO, "Host-directed Dynamic ACPI Table Load:"));
+ ACPI_INFO(("Host-directed Dynamic ACPI Table Load:"));
(void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
status = acpi_tb_install_standard_table(ACPI_PTR_TO_PHYSADDR(table),
diff --git a/drivers/acpi/acpica/utcache.c b/drivers/acpi/acpica/utcache.c
index c9a720f2274a..f8e9978888e1 100644
--- a/drivers/acpi/acpica/utcache.c
+++ b/drivers/acpi/acpica/utcache.c
@@ -245,7 +245,7 @@ void *acpi_os_acquire_object(struct acpi_memory_list *cache)
acpi_status status;
void *object;
- ACPI_FUNCTION_NAME(os_acquire_object);
+ ACPI_FUNCTION_TRACE(os_acquire_object);
if (!cache) {
return_PTR(NULL);
diff --git a/drivers/acpi/acpica/utnonansi.c b/drivers/acpi/acpica/utnonansi.c
index c427a5cda465..d5c3adf19bd0 100644
--- a/drivers/acpi/acpica/utnonansi.c
+++ b/drivers/acpi/acpica/utnonansi.c
@@ -140,6 +140,67 @@ int acpi_ut_stricmp(char *string1, char *string2)
return (c1 - c2);
}
+#if defined (ACPI_DEBUGGER) || defined (ACPI_APPLICATION)
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_safe_strcpy, acpi_ut_safe_strcat, acpi_ut_safe_strncat
+ *
+ * PARAMETERS: Adds a "DestSize" parameter to each of the standard string
+ * functions. This is the size of the Destination buffer.
+ *
+ * RETURN: TRUE if the operation would overflow the destination buffer.
+ *
+ * DESCRIPTION: Safe versions of standard Clib string functions. Ensure that
+ * the result of the operation will not overflow the output string
+ * buffer.
+ *
+ * NOTE: These functions are typically only helpful for processing
+ * user input and command lines. For most ACPICA code, the
+ * required buffer length is precisely calculated before buffer
+ * allocation, so the use of these functions is unnecessary.
+ *
+ ******************************************************************************/
+
+u8 acpi_ut_safe_strcpy(char *dest, acpi_size dest_size, char *source)
+{
+
+ if (strlen(source) >= dest_size) {
+ return (TRUE);
+ }
+
+ strcpy(dest, source);
+ return (FALSE);
+}
+
+u8 acpi_ut_safe_strcat(char *dest, acpi_size dest_size, char *source)
+{
+
+ if ((strlen(dest) + strlen(source)) >= dest_size) {
+ return (TRUE);
+ }
+
+ strcat(dest, source);
+ return (FALSE);
+}
+
+u8
+acpi_ut_safe_strncat(char *dest,
+ acpi_size dest_size,
+ char *source, acpi_size max_transfer_length)
+{
+ acpi_size actual_transfer_length;
+
+ actual_transfer_length = ACPI_MIN(max_transfer_length, strlen(source));
+
+ if ((strlen(dest) + actual_transfer_length) >= dest_size) {
+ return (TRUE);
+ }
+
+ strncat(dest, source, max_transfer_length);
+ return (FALSE);
+}
+#endif
+
/*******************************************************************************
*
* FUNCTION: acpi_ut_strtoul64
@@ -155,7 +216,15 @@ int acpi_ut_stricmp(char *string1, char *string2)
* 32-bit or 64-bit conversion, depending on the current mode
* of the interpreter.
*
- * NOTE: Does not support Octal strings, not needed.
+ * NOTES: acpi_gbl_integer_byte_width should be set to the proper width.
+ * For the core ACPICA code, this width depends on the DSDT
+ * version. For iASL, the default byte width is always 8.
+ *
+ * Does not support Octal strings, not needed at this time.
+ *
+ * There is an earlier version of the function after this one,
+ * below. It is slightly different than this one, and the two
+ * may eventually may need to be merged. (01/2016).
*
******************************************************************************/
@@ -171,7 +240,7 @@ acpi_status acpi_ut_strtoul64(char *string, u32 base, u64 *ret_integer)
u8 sign_of0x = 0;
u8 term = 0;
- ACPI_FUNCTION_TRACE_STR(ut_stroul64, string);
+ ACPI_FUNCTION_TRACE_STR(ut_strtoul64, string);
switch (base) {
case ACPI_ANY_BASE:
@@ -318,63 +387,162 @@ error_exit:
}
}
-#if defined (ACPI_DEBUGGER) || defined (ACPI_APPLICATION)
+#ifdef _OBSOLETE_FUNCTIONS
+/* TBD: use version in ACPICA main code base? */
+/* DONE: 01/2016 */
+
/*******************************************************************************
*
- * FUNCTION: acpi_ut_safe_strcpy, acpi_ut_safe_strcat, acpi_ut_safe_strncat
+ * FUNCTION: strtoul64
*
- * PARAMETERS: Adds a "DestSize" parameter to each of the standard string
- * functions. This is the size of the Destination buffer.
+ * PARAMETERS: string - Null terminated string
+ * terminater - Where a pointer to the terminating byte
+ * is returned
+ * base - Radix of the string
*
- * RETURN: TRUE if the operation would overflow the destination buffer.
+ * RETURN: Converted value
*
- * DESCRIPTION: Safe versions of standard Clib string functions. Ensure that
- * the result of the operation will not overflow the output string
- * buffer.
- *
- * NOTE: These functions are typically only helpful for processing
- * user input and command lines. For most ACPICA code, the
- * required buffer length is precisely calculated before buffer
- * allocation, so the use of these functions is unnecessary.
+ * DESCRIPTION: Convert a string into an unsigned value.
*
******************************************************************************/
-u8 acpi_ut_safe_strcpy(char *dest, acpi_size dest_size, char *source)
+acpi_status strtoul64(char *string, u32 base, u64 *ret_integer)
{
+ u32 index;
+ u32 sign;
+ u64 return_value = 0;
+ acpi_status status = AE_OK;
- if (strlen(source) >= dest_size) {
- return (TRUE);
+ *ret_integer = 0;
+
+ switch (base) {
+ case 0:
+ case 8:
+ case 10:
+ case 16:
+
+ break;
+
+ default:
+ /*
+ * The specified Base parameter is not in the domain of
+ * this function:
+ */
+ return (AE_BAD_PARAMETER);
}
- strcpy(dest, source);
- return (FALSE);
-}
+ /* Skip over any white space in the buffer: */
-u8 acpi_ut_safe_strcat(char *dest, acpi_size dest_size, char *source)
-{
+ while (isspace((int)*string) || *string == '\t') {
+ ++string;
+ }
- if ((strlen(dest) + strlen(source)) >= dest_size) {
- return (TRUE);
+ /*
+ * The buffer may contain an optional plus or minus sign.
+ * If it does, then skip over it but remember what is was:
+ */
+ if (*string == '-') {
+ sign = ACPI_SIGN_NEGATIVE;
+ ++string;
+ } else if (*string == '+') {
+ ++string;
+ sign = ACPI_SIGN_POSITIVE;
+ } else {
+ sign = ACPI_SIGN_POSITIVE;
}
- strcat(dest, source);
- return (FALSE);
-}
+ /*
+ * If the input parameter Base is zero, then we need to
+ * determine if it is octal, decimal, or hexadecimal:
+ */
+ if (base == 0) {
+ if (*string == '0') {
+ if (tolower((int)*(++string)) == 'x') {
+ base = 16;
+ ++string;
+ } else {
+ base = 8;
+ }
+ } else {
+ base = 10;
+ }
+ }
-u8
-acpi_ut_safe_strncat(char *dest,
- acpi_size dest_size,
- char *source, acpi_size max_transfer_length)
-{
- acpi_size actual_transfer_length;
+ /*
+ * For octal and hexadecimal bases, skip over the leading
+ * 0 or 0x, if they are present.
+ */
+ if (base == 8 && *string == '0') {
+ string++;
+ }
- actual_transfer_length = ACPI_MIN(max_transfer_length, strlen(source));
+ if (base == 16 && *string == '0' && tolower((int)*(++string)) == 'x') {
+ string++;
+ }
- if ((strlen(dest) + actual_transfer_length) >= dest_size) {
- return (TRUE);
+ /* Main loop: convert the string to an unsigned long */
+
+ while (*string) {
+ if (isdigit((int)*string)) {
+ index = ((u8)*string) - '0';
+ } else {
+ index = (u8)toupper((int)*string);
+ if (isupper((int)index)) {
+ index = index - 'A' + 10;
+ } else {
+ goto error_exit;
+ }
+ }
+
+ if (index >= base) {
+ goto error_exit;
+ }
+
+ /* Check to see if value is out of range: */
+
+ if (return_value > ((ACPI_UINT64_MAX - (u64)index) / (u64)base)) {
+ goto error_exit;
+ } else {
+ return_value *= base;
+ return_value += index;
+ }
+
+ ++string;
}
- strncat(dest, source, max_transfer_length);
- return (FALSE);
+ /* If a minus sign was present, then "the conversion is negated": */
+
+ if (sign == ACPI_SIGN_NEGATIVE) {
+ return_value = (ACPI_UINT32_MAX - return_value) + 1;
+ }
+
+ *ret_integer = return_value;
+ return (status);
+
+error_exit:
+ switch (base) {
+ case 8:
+
+ status = AE_BAD_OCTAL_CONSTANT;
+ break;
+
+ case 10:
+
+ status = AE_BAD_DECIMAL_CONSTANT;
+ break;
+
+ case 16:
+
+ status = AE_BAD_HEX_CONSTANT;
+ break;
+
+ default:
+
+ /* Base validated above */
+
+ break;
+ }
+
+ return (status);
}
#endif
diff --git a/drivers/acpi/acpica/uttrack.c b/drivers/acpi/acpica/uttrack.c
index c7c2bb8f3559..60c406a8efcb 100644
--- a/drivers/acpi/acpica/uttrack.c
+++ b/drivers/acpi/acpica/uttrack.c
@@ -712,7 +712,7 @@ void acpi_ut_dump_allocations(u32 component, const char *module)
/* Print summary */
if (!num_outstanding) {
- ACPI_INFO((AE_INFO, "No outstanding allocations"));
+ ACPI_INFO(("No outstanding allocations"));
} else {
ACPI_ERROR((AE_INFO, "%u(0x%X) Outstanding allocations",
num_outstanding, num_outstanding));
diff --git a/drivers/acpi/acpica/utxferror.c b/drivers/acpi/acpica/utxferror.c
index 6fe59597b599..d9f15cbcd8a0 100644
--- a/drivers/acpi/acpica/utxferror.c
+++ b/drivers/acpi/acpica/utxferror.c
@@ -175,8 +175,7 @@ ACPI_EXPORT_SYMBOL(acpi_warning)
* TBD: module_name and line_number args are not needed, should be removed.
*
******************************************************************************/
-void ACPI_INTERNAL_VAR_XFACE
-acpi_info(const char *module_name, u32 line_number, const char *format, ...)
+void ACPI_INTERNAL_VAR_XFACE acpi_info(const char *format, ...)
{
va_list arg_list;
diff --git a/drivers/acpi/acpica/utxfinit.c b/drivers/acpi/acpica/utxfinit.c
index 721b87cce908..75b5f27da267 100644
--- a/drivers/acpi/acpica/utxfinit.c
+++ b/drivers/acpi/acpica/utxfinit.c
@@ -154,21 +154,6 @@ acpi_status __init acpi_enable_subsystem(u32 flags)
*/
acpi_gbl_early_initialization = FALSE;
- /*
- * Install the default operation region handlers. These are the
- * handlers that are defined by the ACPI specification to be
- * "always accessible" -- namely, system_memory, system_IO, and
- * PCI_Config. This also means that no _REG methods need to be
- * run for these address spaces. We need to have these handlers
- * installed before any AML code can be executed, especially any
- * module-level code (11/2015).
- */
- status = acpi_ev_install_region_handlers();
- if (ACPI_FAILURE(status)) {
- ACPI_EXCEPTION((AE_INFO, status,
- "During Region initialization"));
- return_ACPI_STATUS(status);
- }
#if (!ACPI_REDUCED_HARDWARE)
/* Enable ACPI mode */
@@ -260,23 +245,6 @@ acpi_status __init acpi_initialize_objects(u32 flags)
ACPI_FUNCTION_TRACE(acpi_initialize_objects);
- /*
- * Run all _REG methods
- *
- * Note: Any objects accessed by the _REG methods will be automatically
- * initialized, even if they contain executable AML (see the call to
- * acpi_ns_initialize_objects below).
- */
- acpi_gbl_reg_methods_enabled = TRUE;
- if (!(flags & ACPI_NO_ADDRESS_SPACE_INIT)) {
- ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
- "[Init] Executing _REG OpRegion methods\n"));
-
- status = acpi_ev_initialize_op_regions();
- if (ACPI_FAILURE(status)) {
- return_ACPI_STATUS(status);
- }
- }
#ifdef ACPI_EXEC_APP
/*
* This call implements the "initialization file" option for acpi_exec.
@@ -299,32 +267,27 @@ acpi_status __init acpi_initialize_objects(u32 flags)
*/
if (acpi_gbl_group_module_level_code) {
acpi_ns_exec_module_code_list();
- }
-
- /*
- * Initialize the objects that remain uninitialized. This runs the
- * executable AML that may be part of the declaration of these objects:
- * operation_regions, buffer_fields, Buffers, and Packages.
- */
- if (!(flags & ACPI_NO_OBJECT_INIT)) {
- ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
- "[Init] Completing Initialization of ACPI Objects\n"));
- status = acpi_ns_initialize_objects();
- if (ACPI_FAILURE(status)) {
- return_ACPI_STATUS(status);
+ /*
+ * Initialize the objects that remain uninitialized. This
+ * runs the executable AML that may be part of the
+ * declaration of these objects:
+ * operation_regions, buffer_fields, Buffers, and Packages.
+ */
+ if (!(flags & ACPI_NO_OBJECT_INIT)) {
+ status = acpi_ns_initialize_objects();
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
}
}
/*
- * Initialize all device objects in the namespace. This runs the device
- * _STA and _INI methods.
+ * Initialize all device/region objects in the namespace. This runs
+ * the device _STA and _INI methods and region _REG methods.
*/
- if (!(flags & ACPI_NO_DEVICE_INIT)) {
- ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
- "[Init] Initializing ACPI Devices\n"));
-
- status = acpi_ns_initialize_devices();
+ if (!(flags & (ACPI_NO_DEVICE_INIT | ACPI_NO_ADDRESS_SPACE_INIT))) {
+ status = acpi_ns_initialize_devices(flags);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
diff --git a/drivers/acpi/apei/apei-base.c b/drivers/acpi/apei/apei-base.c
index a2c8d7adb6eb..da370e1d31f4 100644
--- a/drivers/acpi/apei/apei-base.c
+++ b/drivers/acpi/apei/apei-base.c
@@ -536,7 +536,8 @@ int apei_resources_request(struct apei_resources *resources,
goto err_unmap_ioport;
}
- return 0;
+ goto arch_res_fini;
+
err_unmap_ioport:
list_for_each_entry(res, &resources->ioport, list) {
if (res == res_bak)
@@ -551,7 +552,8 @@ err_unmap_iomem:
release_mem_region(res->start, res->end - res->start);
}
arch_res_fini:
- apei_resources_fini(&arch_res);
+ if (arch_apei_filter_addr)
+ apei_resources_fini(&arch_res);
nvs_res_fini:
apei_resources_fini(&nvs_resources);
return rc;
diff --git a/drivers/acpi/apei/einj.c b/drivers/acpi/apei/einj.c
index 0431883653be..559c1173de1c 100644
--- a/drivers/acpi/apei/einj.c
+++ b/drivers/acpi/apei/einj.c
@@ -519,7 +519,7 @@ static int einj_error_inject(u32 type, u32 flags, u64 param1, u64 param2,
u64 param3, u64 param4)
{
int rc;
- unsigned long pfn;
+ u64 base_addr, size;
/* If user manually set "flags", make sure it is legal */
if (flags && (flags &
@@ -545,10 +545,17 @@ static int einj_error_inject(u32 type, u32 flags, u64 param1, u64 param2,
/*
* Disallow crazy address masks that give BIOS leeway to pick
* injection address almost anywhere. Insist on page or
- * better granularity and that target address is normal RAM.
+ * better granularity and that target address is normal RAM or
+ * NVDIMM.
*/
- pfn = PFN_DOWN(param1 & param2);
- if (!page_is_ram(pfn) || ((param2 & PAGE_MASK) != PAGE_MASK))
+ base_addr = param1 & param2;
+ size = ~param2 + 1;
+
+ if (((param2 & PAGE_MASK) != PAGE_MASK) ||
+ ((region_intersects(base_addr, size, IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE)
+ != REGION_INTERSECTS) &&
+ (region_intersects(base_addr, size, IORESOURCE_MEM, IORES_DESC_PERSISTENT_MEMORY)
+ != REGION_INTERSECTS)))
return -EINVAL;
inject:
diff --git a/drivers/acpi/apei/erst.c b/drivers/acpi/apei/erst.c
index 6e6bc1059301..006c3894c6ea 100644
--- a/drivers/acpi/apei/erst.c
+++ b/drivers/acpi/apei/erst.c
@@ -1207,6 +1207,9 @@ static int __init erst_init(void)
"Failed to allocate %lld bytes for persistent store error log.\n",
erst_erange.size);
+ /* Cleanup ERST Resources */
+ apei_resources_fini(&erst_resources);
+
return 0;
err_release_erange:
diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
index 3dd9c462d22a..60746ef904e4 100644
--- a/drivers/acpi/apei/ghes.c
+++ b/drivers/acpi/apei/ghes.c
@@ -26,7 +26,7 @@
*/
#include <linux/kernel.h>
-#include <linux/module.h>
+#include <linux/moduleparam.h>
#include <linux/init.h>
#include <linux/acpi.h>
#include <linux/io.h>
@@ -79,6 +79,11 @@
((struct acpi_hest_generic_status *) \
((struct ghes_estatus_node *)(estatus_node) + 1))
+/*
+ * This driver isn't really modular, however for the time being,
+ * continuing to use module_param is the easiest way to remain
+ * compatible with existing boot arg use cases.
+ */
bool ghes_disable;
module_param_named(disable, ghes_disable, bool, 0);
@@ -1148,18 +1153,4 @@ err_ioremap_exit:
err:
return rc;
}
-
-static void __exit ghes_exit(void)
-{
- platform_driver_unregister(&ghes_platform_driver);
- ghes_estatus_pool_exit();
- ghes_ioremap_exit();
-}
-
-module_init(ghes_init);
-module_exit(ghes_exit);
-
-MODULE_AUTHOR("Huang Ying");
-MODULE_DESCRIPTION("APEI Generic Hardware Error Source support");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:GHES");
+device_initcall(ghes_init);
diff --git a/drivers/acpi/bgrt.c b/drivers/acpi/bgrt.c
index a83e3c62c5a9..75f128e766a9 100644
--- a/drivers/acpi/bgrt.c
+++ b/drivers/acpi/bgrt.c
@@ -1,4 +1,6 @@
/*
+ * BGRT boot graphic support
+ * Authors: Matthew Garrett, Josh Triplett <josh@joshtriplett.org>
* Copyright 2012 Red Hat, Inc <mjg@redhat.com>
* Copyright 2012 Intel Corporation
*
@@ -8,7 +10,6 @@
*/
#include <linux/kernel.h>
-#include <linux/module.h>
#include <linux/init.h>
#include <linux/device.h>
#include <linux/sysfs.h>
@@ -103,9 +104,4 @@ out_kobject:
kobject_put(bgrt_kobj);
return ret;
}
-
-module_init(bgrt_init);
-
-MODULE_AUTHOR("Matthew Garrett, Josh Triplett <josh@joshtriplett.org>");
-MODULE_DESCRIPTION("BGRT boot graphic support");
-MODULE_LICENSE("GPL");
+device_initcall(bgrt_init);
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index 891c42d1cd65..c068c829b453 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -479,24 +479,38 @@ static void acpi_device_remove_notify_handler(struct acpi_device *device)
Device Matching
-------------------------------------------------------------------------- */
-static struct acpi_device *acpi_primary_dev_companion(struct acpi_device *adev,
- const struct device *dev)
+/**
+ * acpi_get_first_physical_node - Get first physical node of an ACPI device
+ * @adev: ACPI device in question
+ *
+ * Return: First physical node of ACPI device @adev
+ */
+struct device *acpi_get_first_physical_node(struct acpi_device *adev)
{
struct mutex *physical_node_lock = &adev->physical_node_lock;
+ struct device *phys_dev;
mutex_lock(physical_node_lock);
if (list_empty(&adev->physical_node_list)) {
- adev = NULL;
+ phys_dev = NULL;
} else {
const struct acpi_device_physical_node *node;
node = list_first_entry(&adev->physical_node_list,
struct acpi_device_physical_node, node);
- if (node->dev != dev)
- adev = NULL;
+
+ phys_dev = node->dev;
}
mutex_unlock(physical_node_lock);
- return adev;
+ return phys_dev;
+}
+
+static struct acpi_device *acpi_primary_dev_companion(struct acpi_device *adev,
+ const struct device *dev)
+{
+ const struct device *phys_dev = acpi_get_first_physical_node(adev);
+
+ return phys_dev && phys_dev == dev ? adev : NULL;
}
/**
@@ -1005,6 +1019,9 @@ static int __init acpi_bus_init(void)
goto error1;
}
+ /* Set capability bits for _OSC under processor scope */
+ acpi_early_processor_osc();
+
/*
* _OSC method may exist in module level code,
* so it must be run after ACPI_FULL_INITIALIZATION
diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c
index 6730f965b379..8adac69dba3d 100644
--- a/drivers/acpi/cppc_acpi.c
+++ b/drivers/acpi/cppc_acpi.c
@@ -39,6 +39,7 @@
#include <linux/cpufreq.h>
#include <linux/delay.h>
+#include <linux/ktime.h>
#include <acpi/cppc_acpi.h>
/*
@@ -63,58 +64,140 @@ static struct mbox_chan *pcc_channel;
static void __iomem *pcc_comm_addr;
static u64 comm_base_addr;
static int pcc_subspace_idx = -1;
-static u16 pcc_cmd_delay;
static bool pcc_channel_acquired;
+static ktime_t deadline;
+static unsigned int pcc_mpar, pcc_mrtt;
+
+/* pcc mapped address + header size + offset within PCC subspace */
+#define GET_PCC_VADDR(offs) (pcc_comm_addr + 0x8 + (offs))
/*
* Arbitrary Retries in case the remote processor is slow to respond
- * to PCC commands.
+ * to PCC commands. Keeping it high enough to cover emulators where
+ * the processors run painfully slow.
*/
#define NUM_RETRIES 500
+static int check_pcc_chan(void)
+{
+ int ret = -EIO;
+ struct acpi_pcct_shared_memory __iomem *generic_comm_base = pcc_comm_addr;
+ ktime_t next_deadline = ktime_add(ktime_get(), deadline);
+
+ /* Retry in case the remote processor was too slow to catch up. */
+ while (!ktime_after(ktime_get(), next_deadline)) {
+ /*
+ * Per spec, prior to boot the PCC space wil be initialized by
+ * platform and should have set the command completion bit when
+ * PCC can be used by OSPM
+ */
+ if (readw_relaxed(&generic_comm_base->status) & PCC_CMD_COMPLETE) {
+ ret = 0;
+ break;
+ }
+ /*
+ * Reducing the bus traffic in case this loop takes longer than
+ * a few retries.
+ */
+ udelay(3);
+ }
+
+ return ret;
+}
+
static int send_pcc_cmd(u16 cmd)
{
- int retries, result = -EIO;
- struct acpi_pcct_hw_reduced *pcct_ss = pcc_channel->con_priv;
+ int ret = -EIO;
struct acpi_pcct_shared_memory *generic_comm_base =
(struct acpi_pcct_shared_memory *) pcc_comm_addr;
- u32 cmd_latency = pcct_ss->latency;
+ static ktime_t last_cmd_cmpl_time, last_mpar_reset;
+ static int mpar_count;
+ unsigned int time_delta;
- /* Min time OS should wait before sending next command. */
- udelay(pcc_cmd_delay);
+ /*
+ * For CMD_WRITE we know for a fact the caller should have checked
+ * the channel before writing to PCC space
+ */
+ if (cmd == CMD_READ) {
+ ret = check_pcc_chan();
+ if (ret)
+ return ret;
+ }
+
+ /*
+ * Handle the Minimum Request Turnaround Time(MRTT)
+ * "The minimum amount of time that OSPM must wait after the completion
+ * of a command before issuing the next command, in microseconds"
+ */
+ if (pcc_mrtt) {
+ time_delta = ktime_us_delta(ktime_get(), last_cmd_cmpl_time);
+ if (pcc_mrtt > time_delta)
+ udelay(pcc_mrtt - time_delta);
+ }
+
+ /*
+ * Handle the non-zero Maximum Periodic Access Rate(MPAR)
+ * "The maximum number of periodic requests that the subspace channel can
+ * support, reported in commands per minute. 0 indicates no limitation."
+ *
+ * This parameter should be ideally zero or large enough so that it can
+ * handle maximum number of requests that all the cores in the system can
+ * collectively generate. If it is not, we will follow the spec and just
+ * not send the request to the platform after hitting the MPAR limit in
+ * any 60s window
+ */
+ if (pcc_mpar) {
+ if (mpar_count == 0) {
+ time_delta = ktime_ms_delta(ktime_get(), last_mpar_reset);
+ if (time_delta < 60 * MSEC_PER_SEC) {
+ pr_debug("PCC cmd not sent due to MPAR limit");
+ return -EIO;
+ }
+ last_mpar_reset = ktime_get();
+ mpar_count = pcc_mpar;
+ }
+ mpar_count--;
+ }
/* Write to the shared comm region. */
- writew(cmd, &generic_comm_base->command);
+ writew_relaxed(cmd, &generic_comm_base->command);
/* Flip CMD COMPLETE bit */
- writew(0, &generic_comm_base->status);
+ writew_relaxed(0, &generic_comm_base->status);
/* Ring doorbell */
- result = mbox_send_message(pcc_channel, &cmd);
- if (result < 0) {
+ ret = mbox_send_message(pcc_channel, &cmd);
+ if (ret < 0) {
pr_err("Err sending PCC mbox message. cmd:%d, ret:%d\n",
- cmd, result);
- return result;
+ cmd, ret);
+ return ret;
}
- /* Wait for a nominal time to let platform process command. */
- udelay(cmd_latency);
-
- /* Retry in case the remote processor was too slow to catch up. */
- for (retries = NUM_RETRIES; retries > 0; retries--) {
- if (readw_relaxed(&generic_comm_base->status) & PCC_CMD_COMPLETE) {
- result = 0;
- break;
- }
+ /*
+ * For READs we need to ensure the cmd completed to ensure
+ * the ensuing read()s can proceed. For WRITEs we dont care
+ * because the actual write()s are done before coming here
+ * and the next READ or WRITE will check if the channel
+ * is busy/free at the entry of this call.
+ *
+ * If Minimum Request Turnaround Time is non-zero, we need
+ * to record the completion time of both READ and WRITE
+ * command for proper handling of MRTT, so we need to check
+ * for pcc_mrtt in addition to CMD_READ
+ */
+ if (cmd == CMD_READ || pcc_mrtt) {
+ ret = check_pcc_chan();
+ if (pcc_mrtt)
+ last_cmd_cmpl_time = ktime_get();
}
- mbox_client_txdone(pcc_channel, result);
- return result;
+ mbox_client_txdone(pcc_channel, ret);
+ return ret;
}
static void cppc_chan_tx_done(struct mbox_client *cl, void *msg, int ret)
{
- if (ret)
+ if (ret < 0)
pr_debug("TX did not complete: CMD sent:%x, ret:%d\n",
*(u16 *)msg, ret);
else
@@ -306,6 +389,7 @@ static int register_pcc_channel(int pcc_subspace_idx)
{
struct acpi_pcct_hw_reduced *cppc_ss;
unsigned int len;
+ u64 usecs_lat;
if (pcc_subspace_idx >= 0) {
pcc_channel = pcc_mbox_request_channel(&cppc_mbox_cl,
@@ -335,7 +419,16 @@ static int register_pcc_channel(int pcc_subspace_idx)
*/
comm_base_addr = cppc_ss->base_address;
len = cppc_ss->length;
- pcc_cmd_delay = cppc_ss->min_turnaround_time;
+
+ /*
+ * cppc_ss->latency is just a Nominal value. In reality
+ * the remote processor could be much slower to reply.
+ * So add an arbitrary amount of wait on top of Nominal.
+ */
+ usecs_lat = NUM_RETRIES * cppc_ss->latency;
+ deadline = ns_to_ktime(usecs_lat * NSEC_PER_USEC);
+ pcc_mrtt = cppc_ss->min_turnaround_time;
+ pcc_mpar = cppc_ss->max_access_rate;
pcc_comm_addr = acpi_os_ioremap(comm_base_addr, len);
if (!pcc_comm_addr) {
@@ -546,29 +639,74 @@ void acpi_cppc_processor_exit(struct acpi_processor *pr)
}
EXPORT_SYMBOL_GPL(acpi_cppc_processor_exit);
-static u64 get_phys_addr(struct cpc_reg *reg)
-{
- /* PCC communication addr space begins at byte offset 0x8. */
- if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM)
- return (u64)comm_base_addr + 0x8 + reg->address;
- else
- return reg->address;
-}
+/*
+ * Since cpc_read and cpc_write are called while holding pcc_lock, it should be
+ * as fast as possible. We have already mapped the PCC subspace during init, so
+ * we can directly write to it.
+ */
-static void cpc_read(struct cpc_reg *reg, u64 *val)
+static int cpc_read(struct cpc_reg *reg, u64 *val)
{
- u64 addr = get_phys_addr(reg);
+ int ret_val = 0;
- acpi_os_read_memory((acpi_physical_address)addr,
- val, reg->bit_width);
+ *val = 0;
+ if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM) {
+ void __iomem *vaddr = GET_PCC_VADDR(reg->address);
+
+ switch (reg->bit_width) {
+ case 8:
+ *val = readb_relaxed(vaddr);
+ break;
+ case 16:
+ *val = readw_relaxed(vaddr);
+ break;
+ case 32:
+ *val = readl_relaxed(vaddr);
+ break;
+ case 64:
+ *val = readq_relaxed(vaddr);
+ break;
+ default:
+ pr_debug("Error: Cannot read %u bit width from PCC\n",
+ reg->bit_width);
+ ret_val = -EFAULT;
+ }
+ } else
+ ret_val = acpi_os_read_memory((acpi_physical_address)reg->address,
+ val, reg->bit_width);
+ return ret_val;
}
-static void cpc_write(struct cpc_reg *reg, u64 val)
+static int cpc_write(struct cpc_reg *reg, u64 val)
{
- u64 addr = get_phys_addr(reg);
+ int ret_val = 0;
- acpi_os_write_memory((acpi_physical_address)addr,
- val, reg->bit_width);
+ if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM) {
+ void __iomem *vaddr = GET_PCC_VADDR(reg->address);
+
+ switch (reg->bit_width) {
+ case 8:
+ writeb_relaxed(val, vaddr);
+ break;
+ case 16:
+ writew_relaxed(val, vaddr);
+ break;
+ case 32:
+ writel_relaxed(val, vaddr);
+ break;
+ case 64:
+ writeq_relaxed(val, vaddr);
+ break;
+ default:
+ pr_debug("Error: Cannot write %u bit width to PCC\n",
+ reg->bit_width);
+ ret_val = -EFAULT;
+ break;
+ }
+ } else
+ ret_val = acpi_os_write_memory((acpi_physical_address)reg->address,
+ val, reg->bit_width);
+ return ret_val;
}
/**
@@ -604,7 +742,7 @@ int cppc_get_perf_caps(int cpunum, struct cppc_perf_caps *perf_caps)
(ref_perf->cpc_entry.reg.space_id == ACPI_ADR_SPACE_PLATFORM_COMM) ||
(nom_perf->cpc_entry.reg.space_id == ACPI_ADR_SPACE_PLATFORM_COMM)) {
/* Ring doorbell once to update PCC subspace */
- if (send_pcc_cmd(CMD_READ)) {
+ if (send_pcc_cmd(CMD_READ) < 0) {
ret = -EIO;
goto out_err;
}
@@ -662,7 +800,7 @@ int cppc_get_perf_ctrs(int cpunum, struct cppc_perf_fb_ctrs *perf_fb_ctrs)
if ((delivered_reg->cpc_entry.reg.space_id == ACPI_ADR_SPACE_PLATFORM_COMM) ||
(reference_reg->cpc_entry.reg.space_id == ACPI_ADR_SPACE_PLATFORM_COMM)) {
/* Ring doorbell once to update PCC subspace */
- if (send_pcc_cmd(CMD_READ)) {
+ if (send_pcc_cmd(CMD_READ) < 0) {
ret = -EIO;
goto out_err;
}
@@ -713,6 +851,13 @@ int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls)
spin_lock(&pcc_lock);
+ /* If this is PCC reg, check if channel is free before writing */
+ if (desired_reg->cpc_entry.reg.space_id == ACPI_ADR_SPACE_PLATFORM_COMM) {
+ ret = check_pcc_chan();
+ if (ret)
+ goto busy_channel;
+ }
+
/*
* Skip writing MIN/MAX until Linux knows how to come up with
* useful values.
@@ -722,10 +867,10 @@ int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls)
/* Is this a PCC reg ?*/
if (desired_reg->cpc_entry.reg.space_id == ACPI_ADR_SPACE_PLATFORM_COMM) {
/* Ring doorbell so Remote can get our perf request. */
- if (send_pcc_cmd(CMD_WRITE))
+ if (send_pcc_cmd(CMD_WRITE) < 0)
ret = -EIO;
}
-
+busy_channel:
spin_unlock(&pcc_lock);
return ret;
diff --git a/drivers/acpi/ec_sys.c b/drivers/acpi/ec_sys.c
index bea8e425a8de..6c7dd7af789e 100644
--- a/drivers/acpi/ec_sys.c
+++ b/drivers/acpi/ec_sys.c
@@ -73,6 +73,9 @@ static ssize_t acpi_ec_write_io(struct file *f, const char __user *buf,
loff_t init_off = *off;
int err = 0;
+ if (!write_support)
+ return -EINVAL;
+
if (*off >= EC_SPACE_SIZE)
return 0;
if (*off + count >= EC_SPACE_SIZE) {
diff --git a/drivers/acpi/fan.c b/drivers/acpi/fan.c
index 6322db64b4a4..384cfc3083e1 100644
--- a/drivers/acpi/fan.c
+++ b/drivers/acpi/fan.c
@@ -46,7 +46,7 @@ MODULE_DEVICE_TABLE(acpi, fan_device_ids);
#ifdef CONFIG_PM_SLEEP
static int acpi_fan_suspend(struct device *dev);
static int acpi_fan_resume(struct device *dev);
-static struct dev_pm_ops acpi_fan_pm = {
+static const struct dev_pm_ops acpi_fan_pm = {
.resume = acpi_fan_resume,
.freeze = acpi_fan_suspend,
.thaw = acpi_fan_resume,
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
index 1e6833a5cd44..7c188472d9c2 100644
--- a/drivers/acpi/internal.h
+++ b/drivers/acpi/internal.h
@@ -20,6 +20,7 @@
#define PREFIX "ACPI: "
+void acpi_initrd_initialize_tables(void);
acpi_status acpi_os_initialize1(void);
void init_acpi_device_notify(void);
int acpi_scan_init(void);
@@ -29,6 +30,11 @@ void acpi_processor_init(void);
void acpi_platform_init(void);
void acpi_pnp_init(void);
void acpi_int340x_thermal_init(void);
+#ifdef CONFIG_ARM_AMBA
+void acpi_amba_init(void);
+#else
+static inline void acpi_amba_init(void) {}
+#endif
int acpi_sysfs_init(void);
void acpi_container_init(void);
void acpi_memory_hotplug_init(void);
@@ -106,6 +112,7 @@ bool acpi_device_is_present(struct acpi_device *adev);
bool acpi_device_is_battery(struct acpi_device *adev);
bool acpi_device_is_first_physical_node(struct acpi_device *adev,
const struct device *dev);
+struct device *acpi_get_first_physical_node(struct acpi_device *adev);
/* --------------------------------------------------------------------------
Device Matching and Notification
@@ -138,6 +145,12 @@ void acpi_early_processor_set_pdc(void);
static inline void acpi_early_processor_set_pdc(void) {}
#endif
+#ifdef CONFIG_X86
+void acpi_early_processor_osc(void);
+#else
+static inline void acpi_early_processor_osc(void) {}
+#endif
+
/* --------------------------------------------------------------------------
Embedded Controller
-------------------------------------------------------------------------- */
diff --git a/drivers/acpi/nfit.c b/drivers/acpi/nfit.c
index 35947ac87644..63cc9dbe4f3b 100644
--- a/drivers/acpi/nfit.c
+++ b/drivers/acpi/nfit.c
@@ -21,6 +21,7 @@
#include <linux/sort.h>
#include <linux/pmem.h>
#include <linux/io.h>
+#include <linux/nd.h>
#include <asm/cacheflush.h>
#include "nfit.h"
@@ -34,6 +35,18 @@ static bool force_enable_dimms;
module_param(force_enable_dimms, bool, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(force_enable_dimms, "Ignore _STA (ACPI DIMM device) status");
+static unsigned int scrub_timeout = NFIT_ARS_TIMEOUT;
+module_param(scrub_timeout, uint, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(scrub_timeout, "Initial scrub timeout in seconds");
+
+/* after three payloads of overflow, it's dead jim */
+static unsigned int scrub_overflow_abort = 3;
+module_param(scrub_overflow_abort, uint, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(scrub_overflow_abort,
+ "Number of times we overflow ARS results before abort");
+
+static struct workqueue_struct *nfit_wq;
+
struct nfit_table_prev {
struct list_head spas;
struct list_head memdevs;
@@ -72,9 +85,90 @@ static struct acpi_device *to_acpi_dev(struct acpi_nfit_desc *acpi_desc)
return to_acpi_device(acpi_desc->dev);
}
+static int xlat_status(void *buf, unsigned int cmd)
+{
+ struct nd_cmd_clear_error *clear_err;
+ struct nd_cmd_ars_status *ars_status;
+ struct nd_cmd_ars_start *ars_start;
+ struct nd_cmd_ars_cap *ars_cap;
+ u16 flags;
+
+ switch (cmd) {
+ case ND_CMD_ARS_CAP:
+ ars_cap = buf;
+ if ((ars_cap->status & 0xffff) == NFIT_ARS_CAP_NONE)
+ return -ENOTTY;
+
+ /* Command failed */
+ if (ars_cap->status & 0xffff)
+ return -EIO;
+
+ /* No supported scan types for this range */
+ flags = ND_ARS_PERSISTENT | ND_ARS_VOLATILE;
+ if ((ars_cap->status >> 16 & flags) == 0)
+ return -ENOTTY;
+ break;
+ case ND_CMD_ARS_START:
+ ars_start = buf;
+ /* ARS is in progress */
+ if ((ars_start->status & 0xffff) == NFIT_ARS_START_BUSY)
+ return -EBUSY;
+
+ /* Command failed */
+ if (ars_start->status & 0xffff)
+ return -EIO;
+ break;
+ case ND_CMD_ARS_STATUS:
+ ars_status = buf;
+ /* Command failed */
+ if (ars_status->status & 0xffff)
+ return -EIO;
+ /* Check extended status (Upper two bytes) */
+ if (ars_status->status == NFIT_ARS_STATUS_DONE)
+ return 0;
+
+ /* ARS is in progress */
+ if (ars_status->status == NFIT_ARS_STATUS_BUSY)
+ return -EBUSY;
+
+ /* No ARS performed for the current boot */
+ if (ars_status->status == NFIT_ARS_STATUS_NONE)
+ return -EAGAIN;
+
+ /*
+ * ARS interrupted, either we overflowed or some other
+ * agent wants the scan to stop. If we didn't overflow
+ * then just continue with the returned results.
+ */
+ if (ars_status->status == NFIT_ARS_STATUS_INTR) {
+ if (ars_status->flags & NFIT_ARS_F_OVERFLOW)
+ return -ENOSPC;
+ return 0;
+ }
+
+ /* Unknown status */
+ if (ars_status->status >> 16)
+ return -EIO;
+ break;
+ case ND_CMD_CLEAR_ERROR:
+ clear_err = buf;
+ if (clear_err->status & 0xffff)
+ return -EIO;
+ if (!clear_err->cleared)
+ return -EIO;
+ if (clear_err->length > clear_err->cleared)
+ return clear_err->cleared;
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
static int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc,
struct nvdimm *nvdimm, unsigned int cmd, void *buf,
- unsigned int buf_len)
+ unsigned int buf_len, int *cmd_rc)
{
struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc);
const struct nd_cmd_desc *desc = NULL;
@@ -185,14 +279,19 @@ static int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc,
* unfilled in the output buffer
*/
rc = buf_len - offset - in_buf.buffer.length;
+ if (cmd_rc)
+ *cmd_rc = xlat_status(buf, cmd);
} else {
dev_err(dev, "%s:%s underrun cmd: %s buf_len: %d out_len: %d\n",
__func__, dimm_name, cmd_name, buf_len,
offset);
rc = -ENXIO;
}
- } else
+ } else {
rc = 0;
+ if (cmd_rc)
+ *cmd_rc = xlat_status(buf, cmd);
+ }
out:
ACPI_FREE(out_obj);
@@ -675,12 +774,11 @@ static struct attribute_group acpi_nfit_attribute_group = {
.attrs = acpi_nfit_attributes,
};
-const struct attribute_group *acpi_nfit_attribute_groups[] = {
+static const struct attribute_group *acpi_nfit_attribute_groups[] = {
&nvdimm_bus_attribute_group,
&acpi_nfit_attribute_group,
NULL,
};
-EXPORT_SYMBOL_GPL(acpi_nfit_attribute_groups);
static struct acpi_nfit_memory_map *to_nfit_memdev(struct device *dev)
{
@@ -917,7 +1015,7 @@ static void acpi_nfit_init_dsms(struct acpi_nfit_desc *acpi_desc)
if (!adev)
return;
- for (i = ND_CMD_ARS_CAP; i <= ND_CMD_ARS_STATUS; i++)
+ for (i = ND_CMD_ARS_CAP; i <= ND_CMD_CLEAR_ERROR; i++)
if (acpi_check_dsm(adev->handle, uuid, 1, 1ULL << i))
set_bit(i, &nd_desc->dsm_mask);
}
@@ -1105,7 +1203,7 @@ static void write_blk_ctl(struct nfit_blk *nfit_blk, unsigned int bw,
writeq(cmd, mmio->addr.base + offset);
wmb_blk(nfit_blk);
- if (nfit_blk->dimm_flags & ND_BLK_DCR_LATCH)
+ if (nfit_blk->dimm_flags & NFIT_BLK_DCR_LATCH)
readq(mmio->addr.base + offset);
}
@@ -1141,7 +1239,7 @@ static int acpi_nfit_blk_single_io(struct nfit_blk *nfit_blk,
memcpy_to_pmem(mmio->addr.aperture + offset,
iobuf + copied, c);
else {
- if (nfit_blk->dimm_flags & ND_BLK_READ_FLUSH)
+ if (nfit_blk->dimm_flags & NFIT_BLK_READ_FLUSH)
mmio_flush_range((void __force *)
mmio->addr.aperture + offset, c);
@@ -1328,13 +1426,13 @@ static int acpi_nfit_blk_get_flags(struct nvdimm_bus_descriptor *nd_desc,
memset(&flags, 0, sizeof(flags));
rc = nd_desc->ndctl(nd_desc, nvdimm, ND_CMD_DIMM_FLAGS, &flags,
- sizeof(flags));
+ sizeof(flags), NULL);
if (rc >= 0 && flags.status == 0)
nfit_blk->dimm_flags = flags.flags;
else if (rc == -ENOTTY) {
/* fall back to a conservative default */
- nfit_blk->dimm_flags = ND_BLK_DCR_LATCH | ND_BLK_READ_FLUSH;
+ nfit_blk->dimm_flags = NFIT_BLK_DCR_LATCH | NFIT_BLK_READ_FLUSH;
rc = 0;
} else
rc = -ENXIO;
@@ -1473,93 +1571,85 @@ static void acpi_nfit_blk_region_disable(struct nvdimm_bus *nvdimm_bus,
/* devm will free nfit_blk */
}
-static int ars_get_cap(struct nvdimm_bus_descriptor *nd_desc,
- struct nd_cmd_ars_cap *cmd, u64 addr, u64 length)
+static int ars_get_cap(struct acpi_nfit_desc *acpi_desc,
+ struct nd_cmd_ars_cap *cmd, struct nfit_spa *nfit_spa)
{
- cmd->address = addr;
- cmd->length = length;
+ struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
+ struct acpi_nfit_system_address *spa = nfit_spa->spa;
+ int cmd_rc, rc;
- return nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_CAP, cmd,
- sizeof(*cmd));
+ cmd->address = spa->address;
+ cmd->length = spa->length;
+ rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_CAP, cmd,
+ sizeof(*cmd), &cmd_rc);
+ if (rc < 0)
+ return rc;
+ return cmd_rc;
}
-static int ars_do_start(struct nvdimm_bus_descriptor *nd_desc,
- struct nd_cmd_ars_start *cmd, u64 addr, u64 length)
+static int ars_start(struct acpi_nfit_desc *acpi_desc, struct nfit_spa *nfit_spa)
{
int rc;
+ int cmd_rc;
+ struct nd_cmd_ars_start ars_start;
+ struct acpi_nfit_system_address *spa = nfit_spa->spa;
+ struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
- cmd->address = addr;
- cmd->length = length;
- cmd->type = ND_ARS_PERSISTENT;
+ memset(&ars_start, 0, sizeof(ars_start));
+ ars_start.address = spa->address;
+ ars_start.length = spa->length;
+ if (nfit_spa_type(spa) == NFIT_SPA_PM)
+ ars_start.type = ND_ARS_PERSISTENT;
+ else if (nfit_spa_type(spa) == NFIT_SPA_VOLATILE)
+ ars_start.type = ND_ARS_VOLATILE;
+ else
+ return -ENOTTY;
- while (1) {
- rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_START, cmd,
- sizeof(*cmd));
- if (rc)
- return rc;
- switch (cmd->status) {
- case 0:
- return 0;
- case 1:
- /* ARS unsupported, but we should never get here */
- return 0;
- case 6:
- /* ARS is in progress */
- msleep(1000);
- break;
- default:
- return -ENXIO;
- }
- }
+ rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_START, &ars_start,
+ sizeof(ars_start), &cmd_rc);
+
+ if (rc < 0)
+ return rc;
+ return cmd_rc;
}
-static int ars_get_status(struct nvdimm_bus_descriptor *nd_desc,
- struct nd_cmd_ars_status *cmd, u32 size)
+static int ars_continue(struct acpi_nfit_desc *acpi_desc)
{
- int rc;
+ int rc, cmd_rc;
+ struct nd_cmd_ars_start ars_start;
+ struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
+ struct nd_cmd_ars_status *ars_status = acpi_desc->ars_status;
+
+ memset(&ars_start, 0, sizeof(ars_start));
+ ars_start.address = ars_status->restart_address;
+ ars_start.length = ars_status->restart_length;
+ ars_start.type = ars_status->type;
+ rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_START, &ars_start,
+ sizeof(ars_start), &cmd_rc);
+ if (rc < 0)
+ return rc;
+ return cmd_rc;
+}
- while (1) {
- rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_STATUS, cmd,
- size);
- if (rc || cmd->status & 0xffff)
- return -ENXIO;
+static int ars_get_status(struct acpi_nfit_desc *acpi_desc)
+{
+ struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
+ struct nd_cmd_ars_status *ars_status = acpi_desc->ars_status;
+ int rc, cmd_rc;
- /* Check extended status (Upper two bytes) */
- switch (cmd->status >> 16) {
- case 0:
- return 0;
- case 1:
- /* ARS is in progress */
- msleep(1000);
- break;
- case 2:
- /* No ARS performed for the current boot */
- return 0;
- case 3:
- /* TODO: error list overflow support */
- default:
- return -ENXIO;
- }
- }
+ rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_STATUS, ars_status,
+ acpi_desc->ars_status_size, &cmd_rc);
+ if (rc < 0)
+ return rc;
+ return cmd_rc;
}
static int ars_status_process_records(struct nvdimm_bus *nvdimm_bus,
- struct nd_cmd_ars_status *ars_status, u64 start)
+ struct nd_cmd_ars_status *ars_status)
{
int rc;
u32 i;
- /*
- * The address field returned by ars_status should be either
- * less than or equal to the address we last started ARS for.
- * The (start, length) returned by ars_status should also have
- * non-zero overlap with the range we started ARS for.
- * If this is not the case, bail.
- */
- if (ars_status->address > start ||
- (ars_status->address + ars_status->length < start))
- return -ENXIO;
-
for (i = 0; i < ars_status->num_records; i++) {
rc = nvdimm_bus_add_poison(nvdimm_bus,
ars_status->records[i].err_address,
@@ -1571,118 +1661,56 @@ static int ars_status_process_records(struct nvdimm_bus *nvdimm_bus,
return 0;
}
-static int acpi_nfit_find_poison(struct acpi_nfit_desc *acpi_desc,
- struct nd_region_desc *ndr_desc)
+static void acpi_nfit_remove_resource(void *data)
{
- struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
- struct nvdimm_bus *nvdimm_bus = acpi_desc->nvdimm_bus;
- struct nd_cmd_ars_status *ars_status = NULL;
- struct nd_cmd_ars_start *ars_start = NULL;
- struct nd_cmd_ars_cap *ars_cap = NULL;
- u64 start, len, cur, remaining;
- u32 ars_status_size;
- int rc;
-
- ars_cap = kzalloc(sizeof(*ars_cap), GFP_KERNEL);
- if (!ars_cap)
- return -ENOMEM;
-
- start = ndr_desc->res->start;
- len = ndr_desc->res->end - ndr_desc->res->start + 1;
+ struct resource *res = data;
- /*
- * If ARS is unimplemented, unsupported, or if the 'Persistent Memory
- * Scrub' flag in extended status is not set, skip this but continue
- * initialization
- */
- rc = ars_get_cap(nd_desc, ars_cap, start, len);
- if (rc == -ENOTTY) {
- dev_dbg(acpi_desc->dev,
- "Address Range Scrub is not implemented, won't create an error list\n");
- rc = 0;
- goto out;
- }
- if (rc)
- goto out;
-
- if ((ars_cap->status & 0xffff) ||
- !(ars_cap->status >> 16 & ND_ARS_PERSISTENT)) {
- dev_warn(acpi_desc->dev,
- "ARS unsupported (status: 0x%x), won't create an error list\n",
- ars_cap->status);
- goto out;
- }
-
- /*
- * Check if a full-range ARS has been run. If so, use those results
- * without having to start a new ARS.
- */
- ars_status_size = ars_cap->max_ars_out;
- ars_status = kzalloc(ars_status_size, GFP_KERNEL);
- if (!ars_status) {
- rc = -ENOMEM;
- goto out;
- }
+ remove_resource(res);
+}
- rc = ars_get_status(nd_desc, ars_status, ars_status_size);
- if (rc)
- goto out;
+static int acpi_nfit_insert_resource(struct acpi_nfit_desc *acpi_desc,
+ struct nd_region_desc *ndr_desc)
+{
+ struct resource *res, *nd_res = ndr_desc->res;
+ int is_pmem, ret;
- if (ars_status->address <= start &&
- (ars_status->address + ars_status->length >= start + len)) {
- rc = ars_status_process_records(nvdimm_bus, ars_status, start);
- goto out;
- }
+ /* No operation if the region is already registered as PMEM */
+ is_pmem = region_intersects(nd_res->start, resource_size(nd_res),
+ IORESOURCE_MEM, IORES_DESC_PERSISTENT_MEMORY);
+ if (is_pmem == REGION_INTERSECTS)
+ return 0;
- /*
- * ARS_STATUS can overflow if the number of poison entries found is
- * greater than the maximum buffer size (ars_cap->max_ars_out)
- * To detect overflow, check if the length field of ars_status
- * is less than the length we supplied. If so, process the
- * error entries we got, adjust the start point, and start again
- */
- ars_start = kzalloc(sizeof(*ars_start), GFP_KERNEL);
- if (!ars_start)
+ res = devm_kzalloc(acpi_desc->dev, sizeof(*res), GFP_KERNEL);
+ if (!res)
return -ENOMEM;
- cur = start;
- remaining = len;
- do {
- u64 done, end;
+ res->name = "Persistent Memory";
+ res->start = nd_res->start;
+ res->end = nd_res->end;
+ res->flags = IORESOURCE_MEM;
+ res->desc = IORES_DESC_PERSISTENT_MEMORY;
- rc = ars_do_start(nd_desc, ars_start, cur, remaining);
- if (rc)
- goto out;
+ ret = insert_resource(&iomem_resource, res);
+ if (ret)
+ return ret;
- rc = ars_get_status(nd_desc, ars_status, ars_status_size);
- if (rc)
- goto out;
-
- rc = ars_status_process_records(nvdimm_bus, ars_status, cur);
- if (rc)
- goto out;
-
- end = min(cur + remaining,
- ars_status->address + ars_status->length);
- done = end - cur;
- cur += done;
- remaining -= done;
- } while (remaining);
+ ret = devm_add_action(acpi_desc->dev, acpi_nfit_remove_resource, res);
+ if (ret) {
+ remove_resource(res);
+ return ret;
+ }
- out:
- kfree(ars_cap);
- kfree(ars_start);
- kfree(ars_status);
- return rc;
+ return 0;
}
static int acpi_nfit_init_mapping(struct acpi_nfit_desc *acpi_desc,
struct nd_mapping *nd_mapping, struct nd_region_desc *ndr_desc,
struct acpi_nfit_memory_map *memdev,
- struct acpi_nfit_system_address *spa)
+ struct nfit_spa *nfit_spa)
{
struct nvdimm *nvdimm = acpi_nfit_dimm_by_handle(acpi_desc,
memdev->device_handle);
+ struct acpi_nfit_system_address *spa = nfit_spa->spa;
struct nd_blk_region_desc *ndbr_desc;
struct nfit_mem *nfit_mem;
int blk_valid = 0;
@@ -1718,7 +1746,9 @@ static int acpi_nfit_init_mapping(struct acpi_nfit_desc *acpi_desc,
ndbr_desc->enable = acpi_nfit_blk_region_enable;
ndbr_desc->disable = acpi_nfit_blk_region_disable;
ndbr_desc->do_io = acpi_desc->blk_do_io;
- if (!nvdimm_blk_region_create(acpi_desc->nvdimm_bus, ndr_desc))
+ nfit_spa->nd_region = nvdimm_blk_region_create(acpi_desc->nvdimm_bus,
+ ndr_desc);
+ if (!nfit_spa->nd_region)
return -ENOMEM;
break;
}
@@ -1738,7 +1768,7 @@ static int acpi_nfit_register_region(struct acpi_nfit_desc *acpi_desc,
struct resource res;
int count = 0, rc;
- if (nfit_spa->is_registered)
+ if (nfit_spa->nd_region)
return 0;
if (spa->range_index == 0) {
@@ -1775,47 +1805,332 @@ static int acpi_nfit_register_region(struct acpi_nfit_desc *acpi_desc,
}
nd_mapping = &nd_mappings[count++];
rc = acpi_nfit_init_mapping(acpi_desc, nd_mapping, ndr_desc,
- memdev, spa);
+ memdev, nfit_spa);
if (rc)
- return rc;
+ goto out;
}
ndr_desc->nd_mapping = nd_mappings;
ndr_desc->num_mappings = count;
rc = acpi_nfit_init_interleave_set(acpi_desc, ndr_desc, spa);
if (rc)
- return rc;
+ goto out;
nvdimm_bus = acpi_desc->nvdimm_bus;
if (nfit_spa_type(spa) == NFIT_SPA_PM) {
- rc = acpi_nfit_find_poison(acpi_desc, ndr_desc);
+ rc = acpi_nfit_insert_resource(acpi_desc, ndr_desc);
if (rc) {
- dev_err(acpi_desc->dev,
- "error while performing ARS to find poison: %d\n",
+ dev_warn(acpi_desc->dev,
+ "failed to insert pmem resource to iomem: %d\n",
rc);
- return rc;
+ goto out;
}
- if (!nvdimm_pmem_region_create(nvdimm_bus, ndr_desc))
- return -ENOMEM;
+
+ nfit_spa->nd_region = nvdimm_pmem_region_create(nvdimm_bus,
+ ndr_desc);
+ if (!nfit_spa->nd_region)
+ rc = -ENOMEM;
} else if (nfit_spa_type(spa) == NFIT_SPA_VOLATILE) {
- if (!nvdimm_volatile_region_create(nvdimm_bus, ndr_desc))
- return -ENOMEM;
+ nfit_spa->nd_region = nvdimm_volatile_region_create(nvdimm_bus,
+ ndr_desc);
+ if (!nfit_spa->nd_region)
+ rc = -ENOMEM;
}
- nfit_spa->is_registered = 1;
+ out:
+ if (rc)
+ dev_err(acpi_desc->dev, "failed to register spa range %d\n",
+ nfit_spa->spa->range_index);
+ return rc;
+}
+
+static int ars_status_alloc(struct acpi_nfit_desc *acpi_desc,
+ u32 max_ars)
+{
+ struct device *dev = acpi_desc->dev;
+ struct nd_cmd_ars_status *ars_status;
+
+ if (acpi_desc->ars_status && acpi_desc->ars_status_size >= max_ars) {
+ memset(acpi_desc->ars_status, 0, acpi_desc->ars_status_size);
+ return 0;
+ }
+
+ if (acpi_desc->ars_status)
+ devm_kfree(dev, acpi_desc->ars_status);
+ acpi_desc->ars_status = NULL;
+ ars_status = devm_kzalloc(dev, max_ars, GFP_KERNEL);
+ if (!ars_status)
+ return -ENOMEM;
+ acpi_desc->ars_status = ars_status;
+ acpi_desc->ars_status_size = max_ars;
return 0;
}
-static int acpi_nfit_register_regions(struct acpi_nfit_desc *acpi_desc)
+static int acpi_nfit_query_poison(struct acpi_nfit_desc *acpi_desc,
+ struct nfit_spa *nfit_spa)
+{
+ struct acpi_nfit_system_address *spa = nfit_spa->spa;
+ int rc;
+
+ if (!nfit_spa->max_ars) {
+ struct nd_cmd_ars_cap ars_cap;
+
+ memset(&ars_cap, 0, sizeof(ars_cap));
+ rc = ars_get_cap(acpi_desc, &ars_cap, nfit_spa);
+ if (rc < 0)
+ return rc;
+ nfit_spa->max_ars = ars_cap.max_ars_out;
+ nfit_spa->clear_err_unit = ars_cap.clear_err_unit;
+ /* check that the supported scrub types match the spa type */
+ if (nfit_spa_type(spa) == NFIT_SPA_VOLATILE &&
+ ((ars_cap.status >> 16) & ND_ARS_VOLATILE) == 0)
+ return -ENOTTY;
+ else if (nfit_spa_type(spa) == NFIT_SPA_PM &&
+ ((ars_cap.status >> 16) & ND_ARS_PERSISTENT) == 0)
+ return -ENOTTY;
+ }
+
+ if (ars_status_alloc(acpi_desc, nfit_spa->max_ars))
+ return -ENOMEM;
+
+ rc = ars_get_status(acpi_desc);
+ if (rc < 0 && rc != -ENOSPC)
+ return rc;
+
+ if (ars_status_process_records(acpi_desc->nvdimm_bus,
+ acpi_desc->ars_status))
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void acpi_nfit_async_scrub(struct acpi_nfit_desc *acpi_desc,
+ struct nfit_spa *nfit_spa)
{
+ struct acpi_nfit_system_address *spa = nfit_spa->spa;
+ unsigned int overflow_retry = scrub_overflow_abort;
+ u64 init_ars_start = 0, init_ars_len = 0;
+ struct device *dev = acpi_desc->dev;
+ unsigned int tmo = scrub_timeout;
+ int rc;
+
+ if (nfit_spa->ars_done || !nfit_spa->nd_region)
+ return;
+
+ rc = ars_start(acpi_desc, nfit_spa);
+ /*
+ * If we timed out the initial scan we'll still be busy here,
+ * and will wait another timeout before giving up permanently.
+ */
+ if (rc < 0 && rc != -EBUSY)
+ return;
+
+ do {
+ u64 ars_start, ars_len;
+
+ if (acpi_desc->cancel)
+ break;
+ rc = acpi_nfit_query_poison(acpi_desc, nfit_spa);
+ if (rc == -ENOTTY)
+ break;
+ if (rc == -EBUSY && !tmo) {
+ dev_warn(dev, "range %d ars timeout, aborting\n",
+ spa->range_index);
+ break;
+ }
+
+ if (rc == -EBUSY) {
+ /*
+ * Note, entries may be appended to the list
+ * while the lock is dropped, but the workqueue
+ * being active prevents entries being deleted /
+ * freed.
+ */
+ mutex_unlock(&acpi_desc->init_mutex);
+ ssleep(1);
+ tmo--;
+ mutex_lock(&acpi_desc->init_mutex);
+ continue;
+ }
+
+ /* we got some results, but there are more pending... */
+ if (rc == -ENOSPC && overflow_retry--) {
+ if (!init_ars_len) {
+ init_ars_len = acpi_desc->ars_status->length;
+ init_ars_start = acpi_desc->ars_status->address;
+ }
+ rc = ars_continue(acpi_desc);
+ }
+
+ if (rc < 0) {
+ dev_warn(dev, "range %d ars continuation failed\n",
+ spa->range_index);
+ break;
+ }
+
+ if (init_ars_len) {
+ ars_start = init_ars_start;
+ ars_len = init_ars_len;
+ } else {
+ ars_start = acpi_desc->ars_status->address;
+ ars_len = acpi_desc->ars_status->length;
+ }
+ dev_dbg(dev, "spa range: %d ars from %#llx + %#llx complete\n",
+ spa->range_index, ars_start, ars_len);
+ /* notify the region about new poison entries */
+ nvdimm_region_notify(nfit_spa->nd_region,
+ NVDIMM_REVALIDATE_POISON);
+ break;
+ } while (1);
+}
+
+static void acpi_nfit_scrub(struct work_struct *work)
+{
+ struct device *dev;
+ u64 init_scrub_length = 0;
struct nfit_spa *nfit_spa;
+ u64 init_scrub_address = 0;
+ bool init_ars_done = false;
+ struct acpi_nfit_desc *acpi_desc;
+ unsigned int tmo = scrub_timeout;
+ unsigned int overflow_retry = scrub_overflow_abort;
+ acpi_desc = container_of(work, typeof(*acpi_desc), work);
+ dev = acpi_desc->dev;
+
+ /*
+ * We scrub in 2 phases. The first phase waits for any platform
+ * firmware initiated scrubs to complete and then we go search for the
+ * affected spa regions to mark them scanned. In the second phase we
+ * initiate a directed scrub for every range that was not scrubbed in
+ * phase 1.
+ */
+
+ /* process platform firmware initiated scrubs */
+ retry:
+ mutex_lock(&acpi_desc->init_mutex);
list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
- int rc = acpi_nfit_register_region(acpi_desc, nfit_spa);
+ struct nd_cmd_ars_status *ars_status;
+ struct acpi_nfit_system_address *spa;
+ u64 ars_start, ars_len;
+ int rc;
- if (rc)
- return rc;
+ if (acpi_desc->cancel)
+ break;
+
+ if (nfit_spa->nd_region)
+ continue;
+
+ if (init_ars_done) {
+ /*
+ * No need to re-query, we're now just
+ * reconciling all the ranges covered by the
+ * initial scrub
+ */
+ rc = 0;
+ } else
+ rc = acpi_nfit_query_poison(acpi_desc, nfit_spa);
+
+ if (rc == -ENOTTY) {
+ /* no ars capability, just register spa and move on */
+ acpi_nfit_register_region(acpi_desc, nfit_spa);
+ continue;
+ }
+
+ if (rc == -EBUSY && !tmo) {
+ /* fallthrough to directed scrub in phase 2 */
+ dev_warn(dev, "timeout awaiting ars results, continuing...\n");
+ break;
+ } else if (rc == -EBUSY) {
+ mutex_unlock(&acpi_desc->init_mutex);
+ ssleep(1);
+ tmo--;
+ goto retry;
+ }
+
+ /* we got some results, but there are more pending... */
+ if (rc == -ENOSPC && overflow_retry--) {
+ ars_status = acpi_desc->ars_status;
+ /*
+ * Record the original scrub range, so that we
+ * can recall all the ranges impacted by the
+ * initial scrub.
+ */
+ if (!init_scrub_length) {
+ init_scrub_length = ars_status->length;
+ init_scrub_address = ars_status->address;
+ }
+ rc = ars_continue(acpi_desc);
+ if (rc == 0) {
+ mutex_unlock(&acpi_desc->init_mutex);
+ goto retry;
+ }
+ }
+
+ if (rc < 0) {
+ /*
+ * Initial scrub failed, we'll give it one more
+ * try below...
+ */
+ break;
+ }
+
+ /* We got some final results, record completed ranges */
+ ars_status = acpi_desc->ars_status;
+ if (init_scrub_length) {
+ ars_start = init_scrub_address;
+ ars_len = ars_start + init_scrub_length;
+ } else {
+ ars_start = ars_status->address;
+ ars_len = ars_status->length;
+ }
+ spa = nfit_spa->spa;
+
+ if (!init_ars_done) {
+ init_ars_done = true;
+ dev_dbg(dev, "init scrub %#llx + %#llx complete\n",
+ ars_start, ars_len);
+ }
+ if (ars_start <= spa->address && ars_start + ars_len
+ >= spa->address + spa->length)
+ acpi_nfit_register_region(acpi_desc, nfit_spa);
}
+
+ /*
+ * For all the ranges not covered by an initial scrub we still
+ * want to see if there are errors, but it's ok to discover them
+ * asynchronously.
+ */
+ list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
+ /*
+ * Flag all the ranges that still need scrubbing, but
+ * register them now to make data available.
+ */
+ if (nfit_spa->nd_region)
+ nfit_spa->ars_done = 1;
+ else
+ acpi_nfit_register_region(acpi_desc, nfit_spa);
+ }
+
+ list_for_each_entry(nfit_spa, &acpi_desc->spas, list)
+ acpi_nfit_async_scrub(acpi_desc, nfit_spa);
+ mutex_unlock(&acpi_desc->init_mutex);
+}
+
+static int acpi_nfit_register_regions(struct acpi_nfit_desc *acpi_desc)
+{
+ struct nfit_spa *nfit_spa;
+ int rc;
+
+ list_for_each_entry(nfit_spa, &acpi_desc->spas, list)
+ if (nfit_spa_type(nfit_spa->spa) == NFIT_SPA_DCR) {
+ /* BLK regions don't need to wait for ars results */
+ rc = acpi_nfit_register_region(acpi_desc, nfit_spa);
+ if (rc)
+ return rc;
+ }
+
+ queue_work(nfit_wq, &acpi_desc->work);
return 0;
}
@@ -1901,15 +2216,64 @@ int acpi_nfit_init(struct acpi_nfit_desc *acpi_desc, acpi_size sz)
}
EXPORT_SYMBOL_GPL(acpi_nfit_init);
-static struct acpi_nfit_desc *acpi_nfit_desc_init(struct acpi_device *adev)
+struct acpi_nfit_flush_work {
+ struct work_struct work;
+ struct completion cmp;
+};
+
+static void flush_probe(struct work_struct *work)
{
- struct nvdimm_bus_descriptor *nd_desc;
- struct acpi_nfit_desc *acpi_desc;
- struct device *dev = &adev->dev;
+ struct acpi_nfit_flush_work *flush;
- acpi_desc = devm_kzalloc(dev, sizeof(*acpi_desc), GFP_KERNEL);
- if (!acpi_desc)
- return ERR_PTR(-ENOMEM);
+ flush = container_of(work, typeof(*flush), work);
+ complete(&flush->cmp);
+}
+
+static int acpi_nfit_flush_probe(struct nvdimm_bus_descriptor *nd_desc)
+{
+ struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc);
+ struct device *dev = acpi_desc->dev;
+ struct acpi_nfit_flush_work flush;
+
+ /* bounce the device lock to flush acpi_nfit_add / acpi_nfit_notify */
+ device_lock(dev);
+ device_unlock(dev);
+
+ /*
+ * Scrub work could take 10s of seconds, userspace may give up so we
+ * need to be interruptible while waiting.
+ */
+ INIT_WORK_ONSTACK(&flush.work, flush_probe);
+ COMPLETION_INITIALIZER_ONSTACK(flush.cmp);
+ queue_work(nfit_wq, &flush.work);
+ return wait_for_completion_interruptible(&flush.cmp);
+}
+
+static int acpi_nfit_clear_to_send(struct nvdimm_bus_descriptor *nd_desc,
+ struct nvdimm *nvdimm, unsigned int cmd)
+{
+ struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc);
+
+ if (nvdimm)
+ return 0;
+ if (cmd != ND_CMD_ARS_START)
+ return 0;
+
+ /*
+ * The kernel and userspace may race to initiate a scrub, but
+ * the scrub thread is prepared to lose that initial race. It
+ * just needs guarantees that any ars it initiates are not
+ * interrupted by any intervening start reqeusts from userspace.
+ */
+ if (work_busy(&acpi_desc->work))
+ return -EBUSY;
+
+ return 0;
+}
+
+void acpi_nfit_desc_init(struct acpi_nfit_desc *acpi_desc, struct device *dev)
+{
+ struct nvdimm_bus_descriptor *nd_desc;
dev_set_drvdata(dev, acpi_desc);
acpi_desc->dev = dev;
@@ -1917,14 +2281,10 @@ static struct acpi_nfit_desc *acpi_nfit_desc_init(struct acpi_device *adev)
nd_desc = &acpi_desc->nd_desc;
nd_desc->provider_name = "ACPI.NFIT";
nd_desc->ndctl = acpi_nfit_ctl;
+ nd_desc->flush_probe = acpi_nfit_flush_probe;
+ nd_desc->clear_to_send = acpi_nfit_clear_to_send;
nd_desc->attr_groups = acpi_nfit_attribute_groups;
- acpi_desc->nvdimm_bus = nvdimm_bus_register(dev, nd_desc);
- if (!acpi_desc->nvdimm_bus) {
- devm_kfree(dev, acpi_desc);
- return ERR_PTR(-ENXIO);
- }
-
INIT_LIST_HEAD(&acpi_desc->spa_maps);
INIT_LIST_HEAD(&acpi_desc->spas);
INIT_LIST_HEAD(&acpi_desc->dcrs);
@@ -1935,9 +2295,9 @@ static struct acpi_nfit_desc *acpi_nfit_desc_init(struct acpi_device *adev)
INIT_LIST_HEAD(&acpi_desc->dimms);
mutex_init(&acpi_desc->spa_map_mutex);
mutex_init(&acpi_desc->init_mutex);
-
- return acpi_desc;
+ INIT_WORK(&acpi_desc->work, acpi_nfit_scrub);
}
+EXPORT_SYMBOL_GPL(acpi_nfit_desc_init);
static int acpi_nfit_add(struct acpi_device *adev)
{
@@ -1956,12 +2316,13 @@ static int acpi_nfit_add(struct acpi_device *adev)
return 0;
}
- acpi_desc = acpi_nfit_desc_init(adev);
- if (IS_ERR(acpi_desc)) {
- dev_err(dev, "%s: error initializing acpi_desc: %ld\n",
- __func__, PTR_ERR(acpi_desc));
- return PTR_ERR(acpi_desc);
- }
+ acpi_desc = devm_kzalloc(dev, sizeof(*acpi_desc), GFP_KERNEL);
+ if (!acpi_desc)
+ return -ENOMEM;
+ acpi_nfit_desc_init(acpi_desc, &adev->dev);
+ acpi_desc->nvdimm_bus = nvdimm_bus_register(dev, &acpi_desc->nd_desc);
+ if (!acpi_desc->nvdimm_bus)
+ return -ENOMEM;
/*
* Save the acpi header for later and then skip it,
@@ -2000,6 +2361,8 @@ static int acpi_nfit_remove(struct acpi_device *adev)
{
struct acpi_nfit_desc *acpi_desc = dev_get_drvdata(&adev->dev);
+ acpi_desc->cancel = 1;
+ flush_workqueue(nfit_wq);
nvdimm_bus_unregister(acpi_desc->nvdimm_bus);
return 0;
}
@@ -2024,12 +2387,19 @@ static void acpi_nfit_notify(struct acpi_device *adev, u32 event)
}
if (!acpi_desc) {
- acpi_desc = acpi_nfit_desc_init(adev);
- if (IS_ERR(acpi_desc)) {
- dev_err(dev, "%s: error initializing acpi_desc: %ld\n",
- __func__, PTR_ERR(acpi_desc));
+ acpi_desc = devm_kzalloc(dev, sizeof(*acpi_desc), GFP_KERNEL);
+ if (!acpi_desc)
goto out_unlock;
- }
+ acpi_nfit_desc_init(acpi_desc, &adev->dev);
+ acpi_desc->nvdimm_bus = nvdimm_bus_register(dev, &acpi_desc->nd_desc);
+ if (!acpi_desc->nvdimm_bus)
+ goto out_unlock;
+ } else {
+ /*
+ * Finish previous registration before considering new
+ * regions.
+ */
+ flush_workqueue(nfit_wq);
}
/* Evaluate _FIT */
@@ -2097,12 +2467,17 @@ static __init int nfit_init(void)
acpi_str_to_uuid(UUID_NFIT_BUS, nfit_uuid[NFIT_DEV_BUS]);
acpi_str_to_uuid(UUID_NFIT_DIMM, nfit_uuid[NFIT_DEV_DIMM]);
+ nfit_wq = create_singlethread_workqueue("nfit");
+ if (!nfit_wq)
+ return -ENOMEM;
+
return acpi_bus_register_driver(&acpi_nfit_driver);
}
static __exit void nfit_exit(void)
{
acpi_bus_unregister_driver(&acpi_nfit_driver);
+ destroy_workqueue(nfit_wq);
}
module_init(nfit_init);
diff --git a/drivers/acpi/nfit.h b/drivers/acpi/nfit.h
index 3d549a383659..c75576b2d50e 100644
--- a/drivers/acpi/nfit.h
+++ b/drivers/acpi/nfit.h
@@ -14,6 +14,7 @@
*/
#ifndef __NFIT_H__
#define __NFIT_H__
+#include <linux/workqueue.h>
#include <linux/libnvdimm.h>
#include <linux/types.h>
#include <linux/uuid.h>
@@ -40,15 +41,32 @@ enum nfit_uuids {
NFIT_UUID_MAX,
};
+enum nfit_fic {
+ NFIT_FIC_BYTE = 0x101, /* byte-addressable energy backed */
+ NFIT_FIC_BLK = 0x201, /* block-addressable non-energy backed */
+ NFIT_FIC_BYTEN = 0x301, /* byte-addressable non-energy backed */
+};
+
enum {
- ND_BLK_READ_FLUSH = 1,
- ND_BLK_DCR_LATCH = 2,
+ NFIT_BLK_READ_FLUSH = 1,
+ NFIT_BLK_DCR_LATCH = 2,
+ NFIT_ARS_STATUS_DONE = 0,
+ NFIT_ARS_STATUS_BUSY = 1 << 16,
+ NFIT_ARS_STATUS_NONE = 2 << 16,
+ NFIT_ARS_STATUS_INTR = 3 << 16,
+ NFIT_ARS_START_BUSY = 6,
+ NFIT_ARS_CAP_NONE = 1,
+ NFIT_ARS_F_OVERFLOW = 1,
+ NFIT_ARS_TIMEOUT = 90,
};
struct nfit_spa {
struct acpi_nfit_system_address *spa;
struct list_head list;
- int is_registered;
+ struct nd_region *nd_region;
+ unsigned int ars_done:1;
+ u32 clear_err_unit;
+ u32 max_ars;
};
struct nfit_dcr {
@@ -110,6 +128,10 @@ struct acpi_nfit_desc {
struct list_head idts;
struct nvdimm_bus *nvdimm_bus;
struct device *dev;
+ struct nd_cmd_ars_status *ars_status;
+ size_t ars_status_size;
+ struct work_struct work;
+ unsigned int cancel:1;
unsigned long dimm_dsm_force_en;
unsigned long bus_dsm_force_en;
int (*blk_do_io)(struct nd_blk_region *ndbr, resource_size_t dpa,
@@ -182,5 +204,5 @@ static inline struct acpi_nfit_desc *to_acpi_desc(
const u8 *to_nfit_uuid(enum nfit_uuids id);
int acpi_nfit_init(struct acpi_nfit_desc *nfit, acpi_size sz);
-extern const struct attribute_group *acpi_nfit_attribute_groups[];
+void acpi_nfit_desc_init(struct acpi_nfit_desc *acpi_desc, struct device *dev);
#endif /* __NFIT_H__ */
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index 67da6fb72274..814d5f83b75e 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -602,6 +602,14 @@ acpi_os_predefined_override(const struct acpi_predefined_names *init_val,
return AE_OK;
}
+static void acpi_table_taint(struct acpi_table_header *table)
+{
+ pr_warn(PREFIX
+ "Override [%4.4s-%8.8s], this is unsafe: tainting kernel\n",
+ table->signature, table->oem_table_id);
+ add_taint(TAINT_OVERRIDDEN_ACPI_TABLE, LOCKDEP_NOW_UNRELIABLE);
+}
+
#ifdef CONFIG_ACPI_INITRD_TABLE_OVERRIDE
#include <linux/earlycpio.h>
#include <linux/memblock.h>
@@ -636,6 +644,7 @@ static const char * const table_sigs[] = {
#define ACPI_OVERRIDE_TABLES 64
static struct cpio_data __initdata acpi_initrd_files[ACPI_OVERRIDE_TABLES];
+static DECLARE_BITMAP(acpi_initrd_installed, ACPI_OVERRIDE_TABLES);
#define MAP_CHUNK_SIZE (NR_FIX_BTMAPS << PAGE_SHIFT)
@@ -746,96 +755,125 @@ void __init acpi_initrd_override(void *data, size_t size)
}
}
}
-#endif /* CONFIG_ACPI_INITRD_TABLE_OVERRIDE */
-static void acpi_table_taint(struct acpi_table_header *table)
+acpi_status
+acpi_os_physical_table_override(struct acpi_table_header *existing_table,
+ acpi_physical_address *address, u32 *length)
{
- pr_warn(PREFIX
- "Override [%4.4s-%8.8s], this is unsafe: tainting kernel\n",
- table->signature, table->oem_table_id);
- add_taint(TAINT_OVERRIDDEN_ACPI_TABLE, LOCKDEP_NOW_UNRELIABLE);
-}
+ int table_offset = 0;
+ int table_index = 0;
+ struct acpi_table_header *table;
+ u32 table_length;
+ *length = 0;
+ *address = 0;
+ if (!acpi_tables_addr)
+ return AE_OK;
-acpi_status
-acpi_os_table_override(struct acpi_table_header * existing_table,
- struct acpi_table_header ** new_table)
-{
- if (!existing_table || !new_table)
- return AE_BAD_PARAMETER;
+ while (table_offset + ACPI_HEADER_SIZE <= all_tables_size) {
+ table = acpi_os_map_memory(acpi_tables_addr + table_offset,
+ ACPI_HEADER_SIZE);
+ if (table_offset + table->length > all_tables_size) {
+ acpi_os_unmap_memory(table, ACPI_HEADER_SIZE);
+ WARN_ON(1);
+ return AE_OK;
+ }
- *new_table = NULL;
+ table_length = table->length;
-#ifdef CONFIG_ACPI_CUSTOM_DSDT
- if (strncmp(existing_table->signature, "DSDT", 4) == 0)
- *new_table = (struct acpi_table_header *)AmlCode;
-#endif
- if (*new_table != NULL)
+ /* Only override tables matched */
+ if (test_bit(table_index, acpi_initrd_installed) ||
+ memcmp(existing_table->signature, table->signature, 4) ||
+ memcmp(table->oem_table_id, existing_table->oem_table_id,
+ ACPI_OEM_TABLE_ID_SIZE)) {
+ acpi_os_unmap_memory(table, ACPI_HEADER_SIZE);
+ goto next_table;
+ }
+
+ *length = table_length;
+ *address = acpi_tables_addr + table_offset;
acpi_table_taint(existing_table);
+ acpi_os_unmap_memory(table, ACPI_HEADER_SIZE);
+ set_bit(table_index, acpi_initrd_installed);
+ break;
+
+next_table:
+ table_offset += table_length;
+ table_index++;
+ }
return AE_OK;
}
-acpi_status
-acpi_os_physical_table_override(struct acpi_table_header *existing_table,
- acpi_physical_address *address,
- u32 *table_length)
+void __init acpi_initrd_initialize_tables(void)
{
-#ifndef CONFIG_ACPI_INITRD_TABLE_OVERRIDE
- *table_length = 0;
- *address = 0;
- return AE_OK;
-#else
int table_offset = 0;
+ int table_index = 0;
+ u32 table_length;
struct acpi_table_header *table;
- *table_length = 0;
- *address = 0;
-
if (!acpi_tables_addr)
- return AE_OK;
-
- do {
- if (table_offset + ACPI_HEADER_SIZE > all_tables_size) {
- WARN_ON(1);
- return AE_OK;
- }
+ return;
+ while (table_offset + ACPI_HEADER_SIZE <= all_tables_size) {
table = acpi_os_map_memory(acpi_tables_addr + table_offset,
ACPI_HEADER_SIZE);
-
if (table_offset + table->length > all_tables_size) {
acpi_os_unmap_memory(table, ACPI_HEADER_SIZE);
WARN_ON(1);
- return AE_OK;
+ return;
}
- table_offset += table->length;
+ table_length = table->length;
- if (memcmp(existing_table->signature, table->signature, 4)) {
- acpi_os_unmap_memory(table,
- ACPI_HEADER_SIZE);
- continue;
- }
-
- /* Only override tables with matching oem id */
- if (memcmp(table->oem_table_id, existing_table->oem_table_id,
- ACPI_OEM_TABLE_ID_SIZE)) {
- acpi_os_unmap_memory(table,
- ACPI_HEADER_SIZE);
- continue;
+ /* Skip RSDT/XSDT which should only be used for override */
+ if (test_bit(table_index, acpi_initrd_installed) ||
+ ACPI_COMPARE_NAME(table->signature, ACPI_SIG_RSDT) ||
+ ACPI_COMPARE_NAME(table->signature, ACPI_SIG_XSDT)) {
+ acpi_os_unmap_memory(table, ACPI_HEADER_SIZE);
+ goto next_table;
}
- table_offset -= table->length;
- *table_length = table->length;
+ acpi_table_taint(table);
acpi_os_unmap_memory(table, ACPI_HEADER_SIZE);
- *address = acpi_tables_addr + table_offset;
- break;
- } while (table_offset + ACPI_HEADER_SIZE < all_tables_size);
+ acpi_install_table(acpi_tables_addr + table_offset, TRUE);
+ set_bit(table_index, acpi_initrd_installed);
+next_table:
+ table_offset += table_length;
+ table_index++;
+ }
+}
+#else
+acpi_status
+acpi_os_physical_table_override(struct acpi_table_header *existing_table,
+ acpi_physical_address *address,
+ u32 *table_length)
+{
+ *table_length = 0;
+ *address = 0;
+ return AE_OK;
+}
+
+void __init acpi_initrd_initialize_tables(void)
+{
+}
+#endif /* CONFIG_ACPI_INITRD_TABLE_OVERRIDE */
- if (*address != 0)
+acpi_status
+acpi_os_table_override(struct acpi_table_header *existing_table,
+ struct acpi_table_header **new_table)
+{
+ if (!existing_table || !new_table)
+ return AE_BAD_PARAMETER;
+
+ *new_table = NULL;
+
+#ifdef CONFIG_ACPI_CUSTOM_DSDT
+ if (strncmp(existing_table->signature, "DSDT", 4) == 0)
+ *new_table = (struct acpi_table_header *)AmlCode;
+#endif
+ if (*new_table != NULL)
acpi_table_taint(existing_table);
return AE_OK;
-#endif
}
static irqreturn_t acpi_irq(int irq, void *dev_id)
diff --git a/drivers/acpi/pci_irq.c b/drivers/acpi/pci_irq.c
index c8e169e46673..2c45dd3acc17 100644
--- a/drivers/acpi/pci_irq.c
+++ b/drivers/acpi/pci_irq.c
@@ -33,6 +33,7 @@
#include <linux/pci.h>
#include <linux/acpi.h>
#include <linux/slab.h>
+#include <linux/interrupt.h>
#define PREFIX "ACPI: "
@@ -387,6 +388,23 @@ static inline int acpi_isa_register_gsi(struct pci_dev *dev)
}
#endif
+static inline bool acpi_pci_irq_valid(struct pci_dev *dev, u8 pin)
+{
+#ifdef CONFIG_X86
+ /*
+ * On x86 irq line 0xff means "unknown" or "no connection"
+ * (PCI 3.0, Section 6.2.4, footnote on page 223).
+ */
+ if (dev->irq == 0xff) {
+ dev->irq = IRQ_NOTCONNECTED;
+ dev_warn(&dev->dev, "PCI INT %c: not connected\n",
+ pin_name(pin));
+ return false;
+ }
+#endif
+ return true;
+}
+
int acpi_pci_irq_enable(struct pci_dev *dev)
{
struct acpi_prt_entry *entry;
@@ -431,11 +449,14 @@ int acpi_pci_irq_enable(struct pci_dev *dev)
} else
gsi = -1;
- /*
- * No IRQ known to the ACPI subsystem - maybe the BIOS /
- * driver reported one, then use it. Exit in any case.
- */
if (gsi < 0) {
+ /*
+ * No IRQ known to the ACPI subsystem - maybe the BIOS /
+ * driver reported one, then use it. Exit in any case.
+ */
+ if (!acpi_pci_irq_valid(dev, pin))
+ return 0;
+
if (acpi_isa_register_gsi(dev))
dev_warn(&dev->dev, "PCI INT %c: no GSI\n",
pin_name(pin));
diff --git a/drivers/acpi/pmic/intel_pmic_crc.c b/drivers/acpi/pmic/intel_pmic_crc.c
index 42df46a86c25..fcd1852dcdee 100644
--- a/drivers/acpi/pmic/intel_pmic_crc.c
+++ b/drivers/acpi/pmic/intel_pmic_crc.c
@@ -13,7 +13,7 @@
* GNU General Public License for more details.
*/
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/acpi.h>
#include <linux/mfd/intel_soc_pmic.h>
#include <linux/regmap.h>
@@ -205,7 +205,4 @@ static int __init intel_crc_pmic_opregion_driver_init(void)
{
return platform_driver_register(&intel_crc_pmic_opregion_driver);
}
-module_init(intel_crc_pmic_opregion_driver_init);
-
-MODULE_DESCRIPTION("CrystalCove ACPI operation region driver");
-MODULE_LICENSE("GPL");
+device_initcall(intel_crc_pmic_opregion_driver_init);
diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c
index 11154a330f07..d2fa8cb82d2b 100644
--- a/drivers/acpi/processor_driver.c
+++ b/drivers/acpi/processor_driver.c
@@ -314,7 +314,6 @@ static int __init acpi_processor_driver_init(void)
if (result < 0)
return result;
- acpi_processor_syscore_init();
register_hotcpu_notifier(&acpi_cpu_notifier);
acpi_thermal_cpufreq_init();
acpi_processor_ppc_init();
@@ -330,7 +329,6 @@ static void __exit acpi_processor_driver_exit(void)
acpi_processor_ppc_exit();
acpi_thermal_cpufreq_exit();
unregister_hotcpu_notifier(&acpi_cpu_notifier);
- acpi_processor_syscore_exit();
driver_unregister(&acpi_processor_driver);
}
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index 175c86bee3a9..444e3745c8b3 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -23,6 +23,7 @@
*
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*/
+#define pr_fmt(fmt) "ACPI: " fmt
#include <linux/module.h>
#include <linux/acpi.h>
@@ -30,7 +31,6 @@
#include <linux/sched.h> /* need_resched() */
#include <linux/tick.h>
#include <linux/cpuidle.h>
-#include <linux/syscore_ops.h>
#include <acpi/processor.h>
/*
@@ -43,8 +43,6 @@
#include <asm/apic.h>
#endif
-#define PREFIX "ACPI: "
-
#define ACPI_PROCESSOR_CLASS "processor"
#define _COMPONENT ACPI_PROCESSOR_COMPONENT
ACPI_MODULE_NAME("processor_idle");
@@ -61,8 +59,8 @@ module_param(latency_factor, uint, 0644);
static DEFINE_PER_CPU(struct cpuidle_device *, acpi_cpuidle_device);
-static DEFINE_PER_CPU(struct acpi_processor_cx * [CPUIDLE_STATE_MAX],
- acpi_cstate);
+static
+DEFINE_PER_CPU(struct acpi_processor_cx * [CPUIDLE_STATE_MAX], acpi_cstate);
static int disabled_by_idle_boot_param(void)
{
@@ -81,9 +79,9 @@ static int set_max_cstate(const struct dmi_system_id *id)
if (max_cstate > ACPI_PROCESSOR_MAX_POWER)
return 0;
- printk(KERN_NOTICE PREFIX "%s detected - limiting to C%ld max_cstate."
- " Override with \"processor.max_cstate=%d\"\n", id->ident,
- (long)id->driver_data, ACPI_PROCESSOR_MAX_POWER + 1);
+ pr_notice("%s detected - limiting to C%ld max_cstate."
+ " Override with \"processor.max_cstate=%d\"\n", id->ident,
+ (long)id->driver_data, ACPI_PROCESSOR_MAX_POWER + 1);
max_cstate = (long)id->driver_data;
@@ -194,42 +192,6 @@ static void lapic_timer_state_broadcast(struct acpi_processor *pr,
#endif
-#ifdef CONFIG_PM_SLEEP
-static u32 saved_bm_rld;
-
-static int acpi_processor_suspend(void)
-{
- acpi_read_bit_register(ACPI_BITREG_BUS_MASTER_RLD, &saved_bm_rld);
- return 0;
-}
-
-static void acpi_processor_resume(void)
-{
- u32 resumed_bm_rld = 0;
-
- acpi_read_bit_register(ACPI_BITREG_BUS_MASTER_RLD, &resumed_bm_rld);
- if (resumed_bm_rld == saved_bm_rld)
- return;
-
- acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_RLD, saved_bm_rld);
-}
-
-static struct syscore_ops acpi_processor_syscore_ops = {
- .suspend = acpi_processor_suspend,
- .resume = acpi_processor_resume,
-};
-
-void acpi_processor_syscore_init(void)
-{
- register_syscore_ops(&acpi_processor_syscore_ops);
-}
-
-void acpi_processor_syscore_exit(void)
-{
- unregister_syscore_ops(&acpi_processor_syscore_ops);
-}
-#endif /* CONFIG_PM_SLEEP */
-
#if defined(CONFIG_X86)
static void tsc_check_state(int state)
{
@@ -351,7 +313,7 @@ static int acpi_processor_get_power_info_cst(struct acpi_processor *pr)
/* There must be at least 2 elements */
if (!cst || (cst->type != ACPI_TYPE_PACKAGE) || cst->package.count < 2) {
- printk(KERN_ERR PREFIX "not enough elements in _CST\n");
+ pr_err("not enough elements in _CST\n");
ret = -EFAULT;
goto end;
}
@@ -360,7 +322,7 @@ static int acpi_processor_get_power_info_cst(struct acpi_processor *pr)
/* Validate number of power states. */
if (count < 1 || count != cst->package.count - 1) {
- printk(KERN_ERR PREFIX "count given by _CST is not valid\n");
+ pr_err("count given by _CST is not valid\n");
ret = -EFAULT;
goto end;
}
@@ -469,11 +431,9 @@ static int acpi_processor_get_power_info_cst(struct acpi_processor *pr)
* (From 1 through ACPI_PROCESSOR_MAX_POWER - 1)
*/
if (current_count >= (ACPI_PROCESSOR_MAX_POWER - 1)) {
- printk(KERN_WARNING
- "Limiting number of power states to max (%d)\n",
- ACPI_PROCESSOR_MAX_POWER);
- printk(KERN_WARNING
- "Please increase ACPI_PROCESSOR_MAX_POWER if needed.\n");
+ pr_warn("Limiting number of power states to max (%d)\n",
+ ACPI_PROCESSOR_MAX_POWER);
+ pr_warn("Please increase ACPI_PROCESSOR_MAX_POWER if needed.\n");
break;
}
}
@@ -1097,8 +1057,8 @@ int acpi_processor_power_init(struct acpi_processor *pr)
retval = cpuidle_register_driver(&acpi_idle_driver);
if (retval)
return retval;
- printk(KERN_DEBUG "ACPI: %s registered with cpuidle\n",
- acpi_idle_driver.name);
+ pr_debug("%s registered with cpuidle\n",
+ acpi_idle_driver.name);
}
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
diff --git a/drivers/acpi/property.c b/drivers/acpi/property.c
index 2aee41655ce9..f2fd3fee588a 100644
--- a/drivers/acpi/property.c
+++ b/drivers/acpi/property.c
@@ -816,6 +816,7 @@ struct fwnode_handle *acpi_get_next_subnode(struct device *dev,
next = adev->node.next;
if (next == head) {
child = NULL;
+ adev = ACPI_COMPANION(dev);
goto nondev;
}
adev = list_entry(next, struct acpi_device, node);
diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c
index d02fd53042a5..56241eb341f4 100644
--- a/drivers/acpi/resource.c
+++ b/drivers/acpi/resource.c
@@ -27,8 +27,20 @@
#ifdef CONFIG_X86
#define valid_IRQ(i) (((i) != 0) && ((i) != 2))
+static inline bool acpi_iospace_resource_valid(struct resource *res)
+{
+ /* On X86 IO space is limited to the [0 - 64K] IO port range */
+ return res->end < 0x10003;
+}
#else
#define valid_IRQ(i) (true)
+/*
+ * ACPI IO descriptors on arches other than X86 contain MMIO CPU physical
+ * addresses mapping IO space in CPU physical address space, IO space
+ * resources can be placed anywhere in the 64-bit physical address space.
+ */
+static inline bool
+acpi_iospace_resource_valid(struct resource *res) { return true; }
#endif
static bool acpi_dev_resource_len_valid(u64 start, u64 end, u64 len, bool io)
@@ -127,7 +139,7 @@ static void acpi_dev_ioresource_flags(struct resource *res, u64 len,
if (!acpi_dev_resource_len_valid(res->start, res->end, len, true))
res->flags |= IORESOURCE_DISABLED | IORESOURCE_UNSET;
- if (res->end >= 0x10003)
+ if (!acpi_iospace_resource_valid(res))
res->flags |= IORESOURCE_DISABLED | IORESOURCE_UNSET;
if (io_decode == ACPI_DECODE_16)
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 407a3760e8de..5f28cf778349 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -1930,6 +1930,7 @@ int __init acpi_scan_init(void)
acpi_memory_hotplug_init();
acpi_pnp_init();
acpi_int340x_thermal_init();
+ acpi_amba_init();
acpi_scan_add_handler(&generic_device_handler);
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index 9cb975200cac..2a8b59644297 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -19,6 +19,7 @@
#include <linux/reboot.h>
#include <linux/acpi.h>
#include <linux/module.h>
+#include <linux/syscore_ops.h>
#include <asm/io.h>
#include <trace/events/power.h>
@@ -677,6 +678,39 @@ static void acpi_sleep_suspend_setup(void)
static inline void acpi_sleep_suspend_setup(void) {}
#endif /* !CONFIG_SUSPEND */
+#ifdef CONFIG_PM_SLEEP
+static u32 saved_bm_rld;
+
+static int acpi_save_bm_rld(void)
+{
+ acpi_read_bit_register(ACPI_BITREG_BUS_MASTER_RLD, &saved_bm_rld);
+ return 0;
+}
+
+static void acpi_restore_bm_rld(void)
+{
+ u32 resumed_bm_rld = 0;
+
+ acpi_read_bit_register(ACPI_BITREG_BUS_MASTER_RLD, &resumed_bm_rld);
+ if (resumed_bm_rld == saved_bm_rld)
+ return;
+
+ acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_RLD, saved_bm_rld);
+}
+
+static struct syscore_ops acpi_sleep_syscore_ops = {
+ .suspend = acpi_save_bm_rld,
+ .resume = acpi_restore_bm_rld,
+};
+
+void acpi_sleep_syscore_init(void)
+{
+ register_syscore_ops(&acpi_sleep_syscore_ops);
+}
+#else
+static inline void acpi_sleep_syscore_init(void) {}
+#endif /* CONFIG_PM_SLEEP */
+
#ifdef CONFIG_HIBERNATION
static unsigned long s4_hardware_signature;
static struct acpi_table_facs *facs;
@@ -714,6 +748,7 @@ static int acpi_hibernation_enter(void)
static void acpi_hibernation_leave(void)
{
+ pm_set_resume_via_firmware();
/*
* If ACPI is not enabled by the BIOS and the boot kernel, we need to
* enable it here.
@@ -839,6 +874,7 @@ int __init acpi_sleep_init(void)
sleep_states[ACPI_STATE_S0] = 1;
+ acpi_sleep_syscore_init();
acpi_sleep_suspend_setup();
acpi_sleep_hibernate_setup();
diff --git a/drivers/acpi/tables.c b/drivers/acpi/tables.c
index 6c0f0794aa82..f49c02442d65 100644
--- a/drivers/acpi/tables.c
+++ b/drivers/acpi/tables.c
@@ -32,6 +32,7 @@
#include <linux/errno.h>
#include <linux/acpi.h>
#include <linux/bootmem.h>
+#include "internal.h"
#define ACPI_MAX_TABLES 128
@@ -456,6 +457,7 @@ int __init acpi_table_init(void)
status = acpi_initialize_tables(initial_tables, ACPI_MAX_TABLES, 0);
if (ACPI_FAILURE(status))
return -EINVAL;
+ acpi_initrd_initialize_tables();
check_multiple_madt();
return 0;
@@ -484,3 +486,13 @@ static int __init acpi_force_table_verification_setup(char *s)
}
early_param("acpi_force_table_verification", acpi_force_table_verification_setup);
+
+static int __init acpi_force_32bit_fadt_addr(char *s)
+{
+ pr_info("Forcing 32 Bit FADT addresses\n");
+ acpi_gbl_use32_bit_fadt_addresses = TRUE;
+
+ return 0;
+}
+
+early_param("acpi_force_32bit_fadt_addr", acpi_force_32bit_fadt_addr);
diff --git a/drivers/acpi/utils.c b/drivers/acpi/utils.c
index f2f9873bb5c3..050673f0c0b3 100644
--- a/drivers/acpi/utils.c
+++ b/drivers/acpi/utils.c
@@ -201,10 +201,6 @@ acpi_extract_package(union acpi_object *package,
u8 **pointer = NULL;
union acpi_object *element = &(package->package.elements[i]);
- if (!element) {
- return AE_BAD_DATA;
- }
-
switch (element->type) {
case ACPI_TYPE_INTEGER:
@@ -696,7 +692,7 @@ bool acpi_check_dsm(acpi_handle handle, const u8 *uuid, int rev, u64 funcs)
mask = obj->integer.value;
else if (obj->type == ACPI_TYPE_BUFFER)
for (i = 0; i < obj->buffer.length && i < 8; i++)
- mask |= (((u8)obj->buffer.pointer[i]) << (i * 8));
+ mask |= (((u64)obj->buffer.pointer[i]) << (i * 8));
ACPI_FREE(obj);
/*
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index 7d00b7a015ea..16288e777ec3 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -1321,6 +1321,7 @@ static void binder_transaction(struct binder_proc *proc,
struct binder_transaction *t;
struct binder_work *tcomplete;
binder_size_t *offp, *off_end;
+ binder_size_t off_min;
struct binder_proc *target_proc;
struct binder_thread *target_thread = NULL;
struct binder_node *target_node = NULL;
@@ -1522,18 +1523,24 @@ static void binder_transaction(struct binder_proc *proc,
goto err_bad_offset;
}
off_end = (void *)offp + tr->offsets_size;
+ off_min = 0;
for (; offp < off_end; offp++) {
struct flat_binder_object *fp;
if (*offp > t->buffer->data_size - sizeof(*fp) ||
+ *offp < off_min ||
t->buffer->data_size < sizeof(*fp) ||
!IS_ALIGNED(*offp, sizeof(u32))) {
- binder_user_error("%d:%d got transaction with invalid offset, %lld\n",
- proc->pid, thread->pid, (u64)*offp);
+ binder_user_error("%d:%d got transaction with invalid offset, %lld (min %lld, max %lld)\n",
+ proc->pid, thread->pid, (u64)*offp,
+ (u64)off_min,
+ (u64)(t->buffer->data_size -
+ sizeof(*fp)));
return_error = BR_FAILED_REPLY;
goto err_bad_offset;
}
fp = (struct flat_binder_object *)(t->buffer->data + *offp);
+ off_min = *offp + sizeof(struct flat_binder_object);
switch (fp->type) {
case BINDER_TYPE_BINDER:
case BINDER_TYPE_WEAK_BINDER: {
@@ -2737,6 +2744,10 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
/*pr_info("binder_ioctl: %d:%d %x %lx\n",
proc->pid, current->pid, cmd, arg);*/
+ if (unlikely(current->mm != proc->vma_vm_mm)) {
+ pr_err("current mm mismatch proc mm\n");
+ return -EINVAL;
+ }
trace_binder_ioctl(cmd, arg);
ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
@@ -2951,6 +2962,7 @@ static int binder_open(struct inode *nodp, struct file *filp)
return -ENOMEM;
get_task_struct(current);
proc->tsk = current;
+ proc->vma_vm_mm = current->mm;
INIT_LIST_HEAD(&proc->todo);
init_waitqueue_head(&proc->wait);
proc->default_priority = task_nice(current);
@@ -3593,13 +3605,24 @@ static int binder_transactions_show(struct seq_file *m, void *unused)
static int binder_proc_show(struct seq_file *m, void *unused)
{
+ struct binder_proc *itr;
struct binder_proc *proc = m->private;
int do_lock = !binder_debug_no_lock;
+ bool valid_proc = false;
if (do_lock)
binder_lock(__func__);
- seq_puts(m, "binder proc state:\n");
- print_binder_proc(m, proc, 1);
+
+ hlist_for_each_entry(itr, &binder_procs, proc_node) {
+ if (itr == proc) {
+ valid_proc = true;
+ break;
+ }
+ }
+ if (valid_proc) {
+ seq_puts(m, "binder proc state:\n");
+ print_binder_proc(m, proc, 1);
+ }
if (do_lock)
binder_unlock(__func__);
return 0;
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index 861643ea91b5..cfa936a32513 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -151,6 +151,15 @@ config AHCI_MVEBU
If unsure, say N.
+config AHCI_OCTEON
+ tristate "Cavium Octeon Soc Serial ATA"
+ depends on SATA_AHCI_PLATFORM && CAVIUM_OCTEON_SOC
+ default y
+ help
+ This option enables support for Cavium Octeon SoC Serial ATA.
+
+ If unsure, say N.
+
config AHCI_SUNXI
tristate "Allwinner sunxi AHCI SATA support"
depends on ARCH_SUNXI
@@ -193,6 +202,14 @@ config SATA_FSL
If unsure, say N.
+config SATA_AHCI_SEATTLE
+ tristate "AMD Seattle 6.0Gbps AHCI SATA host controller support"
+ depends on ARCH_SEATTLE
+ help
+ This option enables support for AMD Seattle SATA host controller.
+
+ If unsure, say N
+
config SATA_INIC162X
tristate "Initio 162x SATA support (Very Experimental)"
depends on PCI
@@ -355,7 +372,7 @@ config SATA_PROMISE
config SATA_RCAR
tristate "Renesas R-Car SATA support"
- depends on ARCH_SHMOBILE || COMPILE_TEST
+ depends on ARCH_RENESAS || COMPILE_TEST
help
This option enables support for Renesas R-Car Serial ATA.
diff --git a/drivers/ata/Makefile b/drivers/ata/Makefile
index af45effac18c..0b2afb7e5f35 100644
--- a/drivers/ata/Makefile
+++ b/drivers/ata/Makefile
@@ -4,6 +4,7 @@ obj-$(CONFIG_ATA) += libata.o
# non-SFF interface
obj-$(CONFIG_SATA_AHCI) += ahci.o libahci.o
obj-$(CONFIG_SATA_ACARD_AHCI) += acard-ahci.o libahci.o
+obj-$(CONFIG_SATA_AHCI_SEATTLE) += ahci_seattle.o libahci.o libahci_platform.o
obj-$(CONFIG_SATA_AHCI_PLATFORM) += ahci_platform.o libahci.o libahci_platform.o
obj-$(CONFIG_SATA_FSL) += sata_fsl.o
obj-$(CONFIG_SATA_INIC162X) += sata_inic162x.o
@@ -15,6 +16,7 @@ obj-$(CONFIG_AHCI_CEVA) += ahci_ceva.o libahci.o libahci_platform.o
obj-$(CONFIG_AHCI_DA850) += ahci_da850.o libahci.o libahci_platform.o
obj-$(CONFIG_AHCI_IMX) += ahci_imx.o libahci.o libahci_platform.o
obj-$(CONFIG_AHCI_MVEBU) += ahci_mvebu.o libahci.o libahci_platform.o
+obj-$(CONFIG_AHCI_OCTEON) += ahci_octeon.o
obj-$(CONFIG_AHCI_SUNXI) += ahci_sunxi.o libahci.o libahci_platform.o
obj-$(CONFIG_AHCI_ST) += ahci_st.o libahci.o libahci_platform.o
obj-$(CONFIG_AHCI_TEGRA) += ahci_tegra.o libahci.o libahci_platform.o
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 146dc0b8ec61..a83bbcc58b4c 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -85,6 +85,7 @@ enum board_ids {
};
static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
+static void ahci_remove_one(struct pci_dev *dev);
static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class,
unsigned long deadline);
static int ahci_avn_hardreset(struct ata_link *link, unsigned int *class,
@@ -94,9 +95,13 @@ static bool is_mcp89_apple(struct pci_dev *pdev);
static int ahci_p5wdh_hardreset(struct ata_link *link, unsigned int *class,
unsigned long deadline);
#ifdef CONFIG_PM
-static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg);
-static int ahci_pci_device_resume(struct pci_dev *pdev);
+static int ahci_pci_device_runtime_suspend(struct device *dev);
+static int ahci_pci_device_runtime_resume(struct device *dev);
+#ifdef CONFIG_PM_SLEEP
+static int ahci_pci_device_suspend(struct device *dev);
+static int ahci_pci_device_resume(struct device *dev);
#endif
+#endif /* CONFIG_PM */
static struct scsi_host_template ahci_sht = {
AHCI_SHT("ahci"),
@@ -371,15 +376,11 @@ static const struct pci_device_id ahci_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, 0x2826), board_ahci }, /* Lewisburg RAID*/
{ PCI_VDEVICE(INTEL, 0x2827), board_ahci }, /* Lewisburg RAID*/
{ PCI_VDEVICE(INTEL, 0xa182), board_ahci }, /* Lewisburg AHCI*/
- { PCI_VDEVICE(INTEL, 0xa184), board_ahci }, /* Lewisburg RAID*/
{ PCI_VDEVICE(INTEL, 0xa186), board_ahci }, /* Lewisburg RAID*/
- { PCI_VDEVICE(INTEL, 0xa18e), board_ahci }, /* Lewisburg RAID*/
{ PCI_VDEVICE(INTEL, 0xa1d2), board_ahci }, /* Lewisburg RAID*/
{ PCI_VDEVICE(INTEL, 0xa1d6), board_ahci }, /* Lewisburg RAID*/
{ PCI_VDEVICE(INTEL, 0xa202), board_ahci }, /* Lewisburg AHCI*/
- { PCI_VDEVICE(INTEL, 0xa204), board_ahci }, /* Lewisburg RAID*/
{ PCI_VDEVICE(INTEL, 0xa206), board_ahci }, /* Lewisburg RAID*/
- { PCI_VDEVICE(INTEL, 0xa20e), board_ahci }, /* Lewisburg RAID*/
{ PCI_VDEVICE(INTEL, 0xa252), board_ahci }, /* Lewisburg RAID*/
{ PCI_VDEVICE(INTEL, 0xa256), board_ahci }, /* Lewisburg RAID*/
@@ -563,16 +564,20 @@ static const struct pci_device_id ahci_pci_tbl[] = {
{ } /* terminate list */
};
+static const struct dev_pm_ops ahci_pci_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(ahci_pci_device_suspend, ahci_pci_device_resume)
+ SET_RUNTIME_PM_OPS(ahci_pci_device_runtime_suspend,
+ ahci_pci_device_runtime_resume, NULL)
+};
static struct pci_driver ahci_pci_driver = {
.name = DRV_NAME,
.id_table = ahci_pci_tbl,
.probe = ahci_init_one,
- .remove = ata_pci_remove_one,
-#ifdef CONFIG_PM
- .suspend = ahci_pci_device_suspend,
- .resume = ahci_pci_device_resume,
-#endif
+ .remove = ahci_remove_one,
+ .driver = {
+ .pm = &ahci_pci_pm_ops,
+ },
};
#if defined(CONFIG_PATA_MARVELL) || defined(CONFIG_PATA_MARVELL_MODULE)
@@ -801,42 +806,66 @@ static int ahci_avn_hardreset(struct ata_link *link, unsigned int *class,
#ifdef CONFIG_PM
-static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
+static void ahci_pci_disable_interrupts(struct ata_host *host)
{
- struct ata_host *host = pci_get_drvdata(pdev);
struct ahci_host_priv *hpriv = host->private_data;
void __iomem *mmio = hpriv->mmio;
u32 ctl;
- if (mesg.event & PM_EVENT_SUSPEND &&
- hpriv->flags & AHCI_HFLAG_NO_SUSPEND) {
- dev_err(&pdev->dev,
- "BIOS update required for suspend/resume\n");
- return -EIO;
- }
+ /* AHCI spec rev1.1 section 8.3.3:
+ * Software must disable interrupts prior to requesting a
+ * transition of the HBA to D3 state.
+ */
+ ctl = readl(mmio + HOST_CTL);
+ ctl &= ~HOST_IRQ_EN;
+ writel(ctl, mmio + HOST_CTL);
+ readl(mmio + HOST_CTL); /* flush */
+}
- if (mesg.event & PM_EVENT_SLEEP) {
- /* AHCI spec rev1.1 section 8.3.3:
- * Software must disable interrupts prior to requesting a
- * transition of the HBA to D3 state.
- */
- ctl = readl(mmio + HOST_CTL);
- ctl &= ~HOST_IRQ_EN;
- writel(ctl, mmio + HOST_CTL);
- readl(mmio + HOST_CTL); /* flush */
- }
+static int ahci_pci_device_runtime_suspend(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct ata_host *host = pci_get_drvdata(pdev);
- return ata_pci_device_suspend(pdev, mesg);
+ ahci_pci_disable_interrupts(host);
+ return 0;
}
-static int ahci_pci_device_resume(struct pci_dev *pdev)
+static int ahci_pci_device_runtime_resume(struct device *dev)
{
+ struct pci_dev *pdev = to_pci_dev(dev);
struct ata_host *host = pci_get_drvdata(pdev);
int rc;
- rc = ata_pci_device_do_resume(pdev);
+ rc = ahci_pci_reset_controller(host);
if (rc)
return rc;
+ ahci_pci_init_controller(host);
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int ahci_pci_device_suspend(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct ata_host *host = pci_get_drvdata(pdev);
+ struct ahci_host_priv *hpriv = host->private_data;
+
+ if (hpriv->flags & AHCI_HFLAG_NO_SUSPEND) {
+ dev_err(&pdev->dev,
+ "BIOS update required for suspend/resume\n");
+ return -EIO;
+ }
+
+ ahci_pci_disable_interrupts(host);
+ return ata_host_suspend(host, PMSG_SUSPEND);
+}
+
+static int ahci_pci_device_resume(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct ata_host *host = pci_get_drvdata(pdev);
+ int rc;
/* Apple BIOS helpfully mangles the registers on resume */
if (is_mcp89_apple(pdev))
@@ -856,6 +885,8 @@ static int ahci_pci_device_resume(struct pci_dev *pdev)
}
#endif
+#endif /* CONFIG_PM */
+
static int ahci_configure_dma_masks(struct pci_dev *pdev, int using_dac)
{
int rc;
@@ -1718,7 +1749,18 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_master(pdev);
- return ahci_host_activate(host, &ahci_sht);
+ rc = ahci_host_activate(host, &ahci_sht);
+ if (rc)
+ return rc;
+
+ pm_runtime_put_noidle(&pdev->dev);
+ return 0;
+}
+
+static void ahci_remove_one(struct pci_dev *pdev)
+{
+ pm_runtime_get_noresume(&pdev->dev);
+ ata_pci_remove_one(pdev);
}
module_pci_driver(ahci_pci_driver);
diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h
index 167ba7e3b92e..70b06bcfb7e3 100644
--- a/drivers/ata/ahci.h
+++ b/drivers/ata/ahci.h
@@ -335,6 +335,7 @@ struct ahci_host_priv {
void __iomem * mmio; /* bus-independent mem map */
u32 cap; /* cap to use */
u32 cap2; /* cap2 to use */
+ u32 version; /* cached version */
u32 port_map; /* port map to use */
u32 saved_cap; /* saved initial cap */
u32 saved_cap2; /* saved initial cap2 */
diff --git a/drivers/ata/ahci_mvebu.c b/drivers/ata/ahci_mvebu.c
index f7a7fa81740e..de7128d81e9c 100644
--- a/drivers/ata/ahci_mvebu.c
+++ b/drivers/ata/ahci_mvebu.c
@@ -112,12 +112,15 @@ static int ahci_mvebu_probe(struct platform_device *pdev)
if (rc)
return rc;
- dram = mv_mbus_dram_info();
- if (!dram)
- return -ENODEV;
+ if (of_device_is_compatible(pdev->dev.of_node,
+ "marvell,armada-380-ahci")) {
+ dram = mv_mbus_dram_info();
+ if (!dram)
+ return -ENODEV;
- ahci_mvebu_mbus_config(hpriv, dram);
- ahci_mvebu_regret_option(hpriv);
+ ahci_mvebu_mbus_config(hpriv, dram);
+ ahci_mvebu_regret_option(hpriv);
+ }
rc = ahci_platform_init_host(pdev, hpriv, &ahci_mvebu_port_info,
&ahci_platform_sht);
@@ -133,6 +136,7 @@ disable_resources:
static const struct of_device_id ahci_mvebu_of_match[] = {
{ .compatible = "marvell,armada-380-ahci", },
+ { .compatible = "marvell,armada-3700-ahci", },
{ },
};
MODULE_DEVICE_TABLE(of, ahci_mvebu_of_match);
diff --git a/drivers/ata/ahci_octeon.c b/drivers/ata/ahci_octeon.c
new file mode 100644
index 000000000000..ea865fe953e1
--- /dev/null
+++ b/drivers/ata/ahci_octeon.c
@@ -0,0 +1,105 @@
+/*
+ * SATA glue for Cavium Octeon III SOCs.
+ *
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2010-2015 Cavium Networks
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/dma-mapping.h>
+#include <linux/platform_device.h>
+#include <linux/of_platform.h>
+
+#include <asm/octeon/octeon.h>
+#include <asm/bitfield.h>
+
+#define CVMX_SATA_UCTL_SHIM_CFG 0xE8
+
+#define SATA_UCTL_ENDIAN_MODE_BIG 1
+#define SATA_UCTL_ENDIAN_MODE_LITTLE 0
+#define SATA_UCTL_ENDIAN_MODE_MASK 3
+
+#define SATA_UCTL_DMA_ENDIAN_MODE_SHIFT 8
+#define SATA_UCTL_CSR_ENDIAN_MODE_SHIFT 0
+#define SATA_UCTL_DMA_READ_CMD_SHIFT 12
+
+static int ahci_octeon_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->of_node;
+ struct resource *res;
+ void __iomem *base;
+ u64 cfg;
+ int ret;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "Platform resource[0] is missing\n");
+ return -ENODEV;
+ }
+
+ base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ cfg = cvmx_readq_csr(base + CVMX_SATA_UCTL_SHIM_CFG);
+
+ cfg &= ~(SATA_UCTL_ENDIAN_MODE_MASK << SATA_UCTL_DMA_ENDIAN_MODE_SHIFT);
+ cfg &= ~(SATA_UCTL_ENDIAN_MODE_MASK << SATA_UCTL_CSR_ENDIAN_MODE_SHIFT);
+
+#ifdef __BIG_ENDIAN
+ cfg |= SATA_UCTL_ENDIAN_MODE_BIG << SATA_UCTL_DMA_ENDIAN_MODE_SHIFT;
+ cfg |= SATA_UCTL_ENDIAN_MODE_BIG << SATA_UCTL_CSR_ENDIAN_MODE_SHIFT;
+#else
+ cfg |= SATA_UCTL_ENDIAN_MODE_LITTLE << SATA_UCTL_DMA_ENDIAN_MODE_SHIFT;
+ cfg |= SATA_UCTL_ENDIAN_MODE_LITTLE << SATA_UCTL_CSR_ENDIAN_MODE_SHIFT;
+#endif
+
+ cfg |= 1 << SATA_UCTL_DMA_READ_CMD_SHIFT;
+
+ cvmx_writeq_csr(base + CVMX_SATA_UCTL_SHIM_CFG, cfg);
+
+ if (!node) {
+ dev_err(dev, "no device node, failed to add octeon sata\n");
+ return -ENODEV;
+ }
+
+ ret = of_platform_populate(node, NULL, NULL, dev);
+ if (ret) {
+ dev_err(dev, "failed to add ahci-platform core\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ahci_octeon_remove(struct platform_device *pdev)
+{
+ return 0;
+}
+
+static const struct of_device_id octeon_ahci_match[] = {
+ { .compatible = "cavium,octeon-7130-sata-uctl", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, octeon_ahci_match);
+
+static struct platform_driver ahci_octeon_driver = {
+ .probe = ahci_octeon_probe,
+ .remove = ahci_octeon_remove,
+ .driver = {
+ .name = "octeon-ahci",
+ .of_match_table = octeon_ahci_match,
+ },
+};
+
+module_platform_driver(ahci_octeon_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Cavium, Inc. <support@cavium.com>");
+MODULE_DESCRIPTION("Cavium Inc. sata config.");
diff --git a/drivers/ata/ahci_platform.c b/drivers/ata/ahci_platform.c
index 04975b851c23..62a04c8fb5c9 100644
--- a/drivers/ata/ahci_platform.c
+++ b/drivers/ata/ahci_platform.c
@@ -51,6 +51,9 @@ static int ahci_probe(struct platform_device *pdev)
if (rc)
return rc;
+ of_property_read_u32(dev->of_node,
+ "ports-implemented", &hpriv->force_port_map);
+
if (of_device_is_compatible(dev->of_node, "hisilicon,hisi-ahci"))
hpriv->flags |= AHCI_HFLAG_NO_FBS | AHCI_HFLAG_NO_NCQ;
@@ -76,6 +79,7 @@ static const struct of_device_id ahci_of_match[] = {
{ .compatible = "ibm,476gtr-ahci", },
{ .compatible = "snps,dwc-ahci", },
{ .compatible = "hisilicon,hisi-ahci", },
+ { .compatible = "cavium,octeon-7130-ahci", },
{},
};
MODULE_DEVICE_TABLE(of, ahci_of_match);
diff --git a/drivers/ata/ahci_seattle.c b/drivers/ata/ahci_seattle.c
new file mode 100644
index 000000000000..6e702ab57220
--- /dev/null
+++ b/drivers/ata/ahci_seattle.c
@@ -0,0 +1,210 @@
+/*
+ * AMD Seattle AHCI SATA driver
+ *
+ * Copyright (c) 2015, Advanced Micro Devices
+ * Author: Brijesh Singh <brijesh.singh@amd.com>
+ *
+ * based on the AHCI SATA platform driver by Jeff Garzik and Anton Vorontsov
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pm.h>
+#include <linux/device.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/libata.h>
+#include <linux/ahci_platform.h>
+#include <linux/acpi.h>
+#include <linux/pci_ids.h>
+#include "ahci.h"
+
+/* SGPIO Control Register definition
+ *
+ * Bit Type Description
+ * 31 RW OD7.2 (activity)
+ * 30 RW OD7.1 (locate)
+ * 29 RW OD7.0 (fault)
+ * 28...8 RW OD6.2...OD0.0 (3bits per port, 1 bit per LED)
+ * 7 RO SGPIO feature flag
+ * 6:4 RO Reserved
+ * 3:0 RO Number of ports (0 means no port supported)
+ */
+#define ACTIVITY_BIT_POS(x) (8 + (3 * x))
+#define LOCATE_BIT_POS(x) (ACTIVITY_BIT_POS(x) + 1)
+#define FAULT_BIT_POS(x) (LOCATE_BIT_POS(x) + 1)
+
+#define ACTIVITY_MASK 0x00010000
+#define LOCATE_MASK 0x00080000
+#define FAULT_MASK 0x00400000
+
+#define DRV_NAME "ahci-seattle"
+
+static ssize_t seattle_transmit_led_message(struct ata_port *ap, u32 state,
+ ssize_t size);
+
+struct seattle_plat_data {
+ void __iomem *sgpio_ctrl;
+};
+
+static struct ata_port_operations ahci_port_ops = {
+ .inherits = &ahci_ops,
+};
+
+static const struct ata_port_info ahci_port_info = {
+ .flags = AHCI_FLAG_COMMON,
+ .pio_mask = ATA_PIO4,
+ .udma_mask = ATA_UDMA6,
+ .port_ops = &ahci_port_ops,
+};
+
+static struct ata_port_operations ahci_seattle_ops = {
+ .inherits = &ahci_ops,
+ .transmit_led_message = seattle_transmit_led_message,
+};
+
+static const struct ata_port_info ahci_port_seattle_info = {
+ .flags = AHCI_FLAG_COMMON | ATA_FLAG_EM | ATA_FLAG_SW_ACTIVITY,
+ .link_flags = ATA_LFLAG_SW_ACTIVITY,
+ .pio_mask = ATA_PIO4,
+ .udma_mask = ATA_UDMA6,
+ .port_ops = &ahci_seattle_ops,
+};
+
+static struct scsi_host_template ahci_platform_sht = {
+ AHCI_SHT(DRV_NAME),
+};
+
+static ssize_t seattle_transmit_led_message(struct ata_port *ap, u32 state,
+ ssize_t size)
+{
+ struct ahci_host_priv *hpriv = ap->host->private_data;
+ struct ahci_port_priv *pp = ap->private_data;
+ struct seattle_plat_data *plat_data = hpriv->plat_data;
+ unsigned long flags;
+ int pmp;
+ struct ahci_em_priv *emp;
+ u32 val;
+
+ /* get the slot number from the message */
+ pmp = (state & EM_MSG_LED_PMP_SLOT) >> 8;
+ if (pmp >= EM_MAX_SLOTS)
+ return -EINVAL;
+ emp = &pp->em_priv[pmp];
+
+ val = ioread32(plat_data->sgpio_ctrl);
+ if (state & ACTIVITY_MASK)
+ val |= 1 << ACTIVITY_BIT_POS((ap->port_no));
+ else
+ val &= ~(1 << ACTIVITY_BIT_POS((ap->port_no)));
+
+ if (state & LOCATE_MASK)
+ val |= 1 << LOCATE_BIT_POS((ap->port_no));
+ else
+ val &= ~(1 << LOCATE_BIT_POS((ap->port_no)));
+
+ if (state & FAULT_MASK)
+ val |= 1 << FAULT_BIT_POS((ap->port_no));
+ else
+ val &= ~(1 << FAULT_BIT_POS((ap->port_no)));
+
+ iowrite32(val, plat_data->sgpio_ctrl);
+
+ spin_lock_irqsave(ap->lock, flags);
+
+ /* save off new led state for port/slot */
+ emp->led_state = state;
+
+ spin_unlock_irqrestore(ap->lock, flags);
+
+ return size;
+}
+
+static const struct ata_port_info *ahci_seattle_get_port_info(
+ struct platform_device *pdev, struct ahci_host_priv *hpriv)
+{
+ struct device *dev = &pdev->dev;
+ struct seattle_plat_data *plat_data;
+ u32 val;
+
+ plat_data = devm_kzalloc(dev, sizeof(*plat_data), GFP_KERNEL);
+ if (IS_ERR(plat_data))
+ return &ahci_port_info;
+
+ plat_data->sgpio_ctrl = devm_ioremap_resource(dev,
+ platform_get_resource(pdev, IORESOURCE_MEM, 1));
+ if (IS_ERR(plat_data->sgpio_ctrl))
+ return &ahci_port_info;
+
+ val = ioread32(plat_data->sgpio_ctrl);
+
+ if (!(val & 0xf))
+ return &ahci_port_info;
+
+ hpriv->em_loc = 0;
+ hpriv->em_buf_sz = 4;
+ hpriv->em_msg_type = EM_MSG_TYPE_LED;
+ hpriv->plat_data = plat_data;
+
+ dev_info(dev, "SGPIO LED control is enabled.\n");
+ return &ahci_port_seattle_info;
+}
+
+static int ahci_seattle_probe(struct platform_device *pdev)
+{
+ int rc;
+ struct ahci_host_priv *hpriv;
+
+ hpriv = ahci_platform_get_resources(pdev);
+ if (IS_ERR(hpriv))
+ return PTR_ERR(hpriv);
+
+ rc = ahci_platform_enable_resources(hpriv);
+ if (rc)
+ return rc;
+
+ rc = ahci_platform_init_host(pdev, hpriv,
+ ahci_seattle_get_port_info(pdev, hpriv),
+ &ahci_platform_sht);
+ if (rc)
+ goto disable_resources;
+
+ return 0;
+disable_resources:
+ ahci_platform_disable_resources(hpriv);
+ return rc;
+}
+
+static SIMPLE_DEV_PM_OPS(ahci_pm_ops, ahci_platform_suspend,
+ ahci_platform_resume);
+
+static const struct acpi_device_id ahci_acpi_match[] = {
+ { "AMDI0600", 0 },
+ {}
+};
+MODULE_DEVICE_TABLE(acpi, ahci_acpi_match);
+
+static struct platform_driver ahci_seattle_driver = {
+ .probe = ahci_seattle_probe,
+ .remove = ata_platform_remove_one,
+ .driver = {
+ .name = DRV_NAME,
+ .acpi_match_table = ahci_acpi_match,
+ .pm = &ahci_pm_ops,
+ },
+};
+module_platform_driver(ahci_seattle_driver);
+
+MODULE_DESCRIPTION("Seattle AHCI SATA platform driver");
+MODULE_AUTHOR("Brijesh Singh <brijesh.singh@amd.com>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/ata/ahci_xgene.c b/drivers/ata/ahci_xgene.c
index 8e3f7faf00d3..73b19b277138 100644
--- a/drivers/ata/ahci_xgene.c
+++ b/drivers/ata/ahci_xgene.c
@@ -821,9 +821,9 @@ static int xgene_ahci_probe(struct platform_device *pdev)
dev_warn(&pdev->dev, "%s: Error reading device info. Assume version1\n",
__func__);
version = XGENE_AHCI_V1;
- }
- if (info->valid & ACPI_VALID_CID)
+ } else if (info->valid & ACPI_VALID_CID) {
version = XGENE_AHCI_V2;
+ }
}
}
#endif
diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
index 85ea5142a095..a5d7c1c2a05e 100644
--- a/drivers/ata/libahci.c
+++ b/drivers/ata/libahci.c
@@ -225,6 +225,31 @@ static void ahci_enable_ahci(void __iomem *mmio)
WARN_ON(1);
}
+/**
+ * ahci_rpm_get_port - Make sure the port is powered on
+ * @ap: Port to power on
+ *
+ * Whenever there is need to access the AHCI host registers outside of
+ * normal execution paths, call this function to make sure the host is
+ * actually powered on.
+ */
+static int ahci_rpm_get_port(struct ata_port *ap)
+{
+ return pm_runtime_get_sync(ap->dev);
+}
+
+/**
+ * ahci_rpm_put_port - Undoes ahci_rpm_get_port()
+ * @ap: Port to power down
+ *
+ * Undoes ahci_rpm_get_port() and possibly powers down the AHCI host
+ * if it has no more active users.
+ */
+static void ahci_rpm_put_port(struct ata_port *ap)
+{
+ pm_runtime_put(ap->dev);
+}
+
static ssize_t ahci_show_host_caps(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -251,9 +276,8 @@ static ssize_t ahci_show_host_version(struct device *dev,
struct Scsi_Host *shost = class_to_shost(dev);
struct ata_port *ap = ata_shost_to_port(shost);
struct ahci_host_priv *hpriv = ap->host->private_data;
- void __iomem *mmio = hpriv->mmio;
- return sprintf(buf, "%x\n", readl(mmio + HOST_VERSION));
+ return sprintf(buf, "%x\n", hpriv->version);
}
static ssize_t ahci_show_port_cmd(struct device *dev,
@@ -262,8 +286,13 @@ static ssize_t ahci_show_port_cmd(struct device *dev,
struct Scsi_Host *shost = class_to_shost(dev);
struct ata_port *ap = ata_shost_to_port(shost);
void __iomem *port_mmio = ahci_port_base(ap);
+ ssize_t ret;
- return sprintf(buf, "%x\n", readl(port_mmio + PORT_CMD));
+ ahci_rpm_get_port(ap);
+ ret = sprintf(buf, "%x\n", readl(port_mmio + PORT_CMD));
+ ahci_rpm_put_port(ap);
+
+ return ret;
}
static ssize_t ahci_read_em_buffer(struct device *dev,
@@ -279,17 +308,20 @@ static ssize_t ahci_read_em_buffer(struct device *dev,
size_t count;
int i;
+ ahci_rpm_get_port(ap);
spin_lock_irqsave(ap->lock, flags);
em_ctl = readl(mmio + HOST_EM_CTL);
if (!(ap->flags & ATA_FLAG_EM) || em_ctl & EM_CTL_XMT ||
!(hpriv->em_msg_type & EM_MSG_TYPE_SGPIO)) {
spin_unlock_irqrestore(ap->lock, flags);
+ ahci_rpm_put_port(ap);
return -EINVAL;
}
if (!(em_ctl & EM_CTL_MR)) {
spin_unlock_irqrestore(ap->lock, flags);
+ ahci_rpm_put_port(ap);
return -EAGAIN;
}
@@ -317,6 +349,7 @@ static ssize_t ahci_read_em_buffer(struct device *dev,
}
spin_unlock_irqrestore(ap->lock, flags);
+ ahci_rpm_put_port(ap);
return i;
}
@@ -341,11 +374,13 @@ static ssize_t ahci_store_em_buffer(struct device *dev,
size % 4 || size > hpriv->em_buf_sz)
return -EINVAL;
+ ahci_rpm_get_port(ap);
spin_lock_irqsave(ap->lock, flags);
em_ctl = readl(mmio + HOST_EM_CTL);
if (em_ctl & EM_CTL_TM) {
spin_unlock_irqrestore(ap->lock, flags);
+ ahci_rpm_put_port(ap);
return -EBUSY;
}
@@ -358,6 +393,7 @@ static ssize_t ahci_store_em_buffer(struct device *dev,
writel(em_ctl | EM_CTL_TM, mmio + HOST_EM_CTL);
spin_unlock_irqrestore(ap->lock, flags);
+ ahci_rpm_put_port(ap);
return size;
}
@@ -371,7 +407,9 @@ static ssize_t ahci_show_em_supported(struct device *dev,
void __iomem *mmio = hpriv->mmio;
u32 em_ctl;
+ ahci_rpm_get_port(ap);
em_ctl = readl(mmio + HOST_EM_CTL);
+ ahci_rpm_put_port(ap);
return sprintf(buf, "%s%s%s%s\n",
em_ctl & EM_CTL_LED ? "led " : "",
@@ -469,6 +507,7 @@ void ahci_save_initial_config(struct device *dev, struct ahci_host_priv *hpriv)
dev_info(dev, "forcing port_map 0x%x -> 0x%x\n",
port_map, hpriv->force_port_map);
port_map = hpriv->force_port_map;
+ hpriv->saved_port_map = port_map;
}
if (hpriv->mask_port_map) {
@@ -509,6 +548,7 @@ void ahci_save_initial_config(struct device *dev, struct ahci_host_priv *hpriv)
/* record values to use during operation */
hpriv->cap = cap;
hpriv->cap2 = cap2;
+ hpriv->version = readl(mmio + HOST_VERSION);
hpriv->port_map = port_map;
if (!hpriv->start_engine)
@@ -1014,6 +1054,7 @@ static ssize_t ahci_transmit_led_message(struct ata_port *ap, u32 state,
else
return -EINVAL;
+ ahci_rpm_get_port(ap);
spin_lock_irqsave(ap->lock, flags);
/*
@@ -1023,6 +1064,7 @@ static ssize_t ahci_transmit_led_message(struct ata_port *ap, u32 state,
em_ctl = readl(mmio + HOST_EM_CTL);
if (em_ctl & EM_CTL_TM) {
spin_unlock_irqrestore(ap->lock, flags);
+ ahci_rpm_put_port(ap);
return -EBUSY;
}
@@ -1050,6 +1092,8 @@ static ssize_t ahci_transmit_led_message(struct ata_port *ap, u32 state,
emp->led_state = state;
spin_unlock_irqrestore(ap->lock, flags);
+ ahci_rpm_put_port(ap);
+
return size;
}
@@ -2215,6 +2259,8 @@ static void ahci_pmp_detach(struct ata_port *ap)
int ahci_port_resume(struct ata_port *ap)
{
+ ahci_rpm_get_port(ap);
+
ahci_power_up(ap);
ahci_start_port(ap);
@@ -2241,6 +2287,7 @@ static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg)
ata_port_freeze(ap);
}
+ ahci_rpm_put_port(ap);
return rc;
}
#endif
@@ -2356,11 +2403,10 @@ static void ahci_port_stop(struct ata_port *ap)
void ahci_print_info(struct ata_host *host, const char *scc_s)
{
struct ahci_host_priv *hpriv = host->private_data;
- void __iomem *mmio = hpriv->mmio;
u32 vers, cap, cap2, impl, speed;
const char *speed_s;
- vers = readl(mmio + HOST_VERSION);
+ vers = hpriv->version;
cap = hpriv->cap;
cap2 = hpriv->cap2;
impl = hpriv->port_map;
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index e417e1a1d02c..567859ce0512 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -174,13 +174,13 @@ static ssize_t ata_scsi_park_show(struct device *device,
struct ata_port *ap;
struct ata_link *link;
struct ata_device *dev;
- unsigned long flags, now;
+ unsigned long now;
unsigned int uninitialized_var(msecs);
int rc = 0;
ap = ata_shost_to_port(sdev->host);
- spin_lock_irqsave(ap->lock, flags);
+ spin_lock_irq(ap->lock);
dev = ata_scsi_find_dev(ap, sdev);
if (!dev) {
rc = -ENODEV;
diff --git a/drivers/ata/pata_at91.c b/drivers/ata/pata_at91.c
index ace0a4de3449..9f27b14009f9 100644
--- a/drivers/ata/pata_at91.c
+++ b/drivers/ata/pata_at91.c
@@ -30,8 +30,7 @@
#include <linux/ata_platform.h>
#include <linux/platform_data/atmel.h>
#include <linux/regmap.h>
-
-#include <asm/gpio.h>
+#include <linux/gpio.h>
#define DRV_NAME "pata_at91"
#define DRV_VERSION "0.3"
diff --git a/drivers/ata/pata_bf54x.c b/drivers/ata/pata_bf54x.c
index dd7410019d15..ec748d31928d 100644
--- a/drivers/ata/pata_bf54x.c
+++ b/drivers/ata/pata_bf54x.c
@@ -36,8 +36,8 @@
#include <scsi/scsi_host.h>
#include <linux/libata.h>
#include <linux/platform_device.h>
+#include <linux/gpio.h>
#include <asm/dma.h>
-#include <asm/gpio.h>
#include <asm/portmux.h>
#define DRV_NAME "pata-bf54x"
diff --git a/drivers/ata/pata_hpt366.c b/drivers/ata/pata_hpt366.c
index 0038dc4c06c7..e5fb7525a5df 100644
--- a/drivers/ata/pata_hpt366.c
+++ b/drivers/ata/pata_hpt366.c
@@ -176,17 +176,14 @@ static int hpt_dma_blacklisted(const struct ata_device *dev, char *modestr,
const char * const list[])
{
unsigned char model_num[ATA_ID_PROD_LEN + 1];
- int i = 0;
+ int i;
ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
- while (list[i] != NULL) {
- if (!strcmp(list[i], model_num)) {
- pr_warn("%s is not supported for %s\n",
- modestr, list[i]);
- return 1;
- }
- i++;
+ i = match_string(list, -1, model_num);
+ if (i >= 0) {
+ pr_warn("%s is not supported for %s\n", modestr, list[i]);
+ return 1;
}
return 0;
}
diff --git a/drivers/ata/pata_macio.c b/drivers/ata/pata_macio.c
index e3d4b059fcd1..e347e7acd8ed 100644
--- a/drivers/ata/pata_macio.c
+++ b/drivers/ata/pata_macio.c
@@ -22,6 +22,7 @@
#include <linux/scatterlist.h>
#include <linux/of.h>
#include <linux/gfp.h>
+#include <linux/pci.h>
#include <scsi/scsi.h>
#include <scsi/scsi_host.h>
@@ -30,7 +31,6 @@
#include <asm/macio.h>
#include <asm/io.h>
#include <asm/dbdma.h>
-#include <asm/pci-bridge.h>
#include <asm/machdep.h>
#include <asm/pmac_feature.h>
#include <asm/mediabay.h>
diff --git a/drivers/ata/sata_via.c b/drivers/ata/sata_via.c
index 17d31fc009ab..0636d84fbefe 100644
--- a/drivers/ata/sata_via.c
+++ b/drivers/ata/sata_via.c
@@ -61,6 +61,7 @@ enum {
SATA_CHAN_ENAB = 0x40, /* SATA channel enable */
SATA_INT_GATE = 0x41, /* SATA interrupt gating */
SATA_NATIVE_MODE = 0x42, /* Native mode enable */
+ SVIA_MISC_3 = 0x46, /* Miscellaneous Control III */
PATA_UDMA_TIMING = 0xB3, /* PATA timing for DMA/ cable detect */
PATA_PIO_TIMING = 0xAB, /* PATA timing register */
@@ -71,9 +72,18 @@ enum {
NATIVE_MODE_ALL = (1 << 7) | (1 << 6) | (1 << 5) | (1 << 4),
SATA_EXT_PHY = (1 << 6), /* 0==use PATA, 1==ext phy */
+
+ SATA_HOTPLUG = (1 << 5), /* enable IRQ on hotplug */
+};
+
+struct svia_priv {
+ bool wd_workaround;
};
static int svia_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
+#ifdef CONFIG_PM_SLEEP
+static int svia_pci_device_resume(struct pci_dev *pdev);
+#endif
static int svia_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
static int svia_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
static int vt8251_scr_read(struct ata_link *link, unsigned int scr, u32 *val);
@@ -85,6 +95,7 @@ static void vt6420_bmdma_start(struct ata_queued_cmd *qc);
static int vt6421_pata_cable_detect(struct ata_port *ap);
static void vt6421_set_pio_mode(struct ata_port *ap, struct ata_device *adev);
static void vt6421_set_dma_mode(struct ata_port *ap, struct ata_device *adev);
+static void vt6421_error_handler(struct ata_port *ap);
static const struct pci_device_id svia_pci_tbl[] = {
{ PCI_VDEVICE(VIA, 0x5337), vt6420 },
@@ -105,7 +116,7 @@ static struct pci_driver svia_pci_driver = {
.probe = svia_init_one,
#ifdef CONFIG_PM_SLEEP
.suspend = ata_pci_device_suspend,
- .resume = ata_pci_device_resume,
+ .resume = svia_pci_device_resume,
#endif
.remove = ata_pci_remove_one,
};
@@ -137,6 +148,7 @@ static struct ata_port_operations vt6421_sata_ops = {
.inherits = &svia_base_ops,
.scr_read = svia_scr_read,
.scr_write = svia_scr_write,
+ .error_handler = vt6421_error_handler,
};
static struct ata_port_operations vt8251_ops = {
@@ -536,7 +548,67 @@ static int vt8251_prepare_host(struct pci_dev *pdev, struct ata_host **r_host)
return 0;
}
-static void svia_configure(struct pci_dev *pdev, int board_id)
+static void svia_wd_fix(struct pci_dev *pdev)
+{
+ u8 tmp8;
+
+ pci_read_config_byte(pdev, 0x52, &tmp8);
+ pci_write_config_byte(pdev, 0x52, tmp8 | BIT(2));
+}
+
+static irqreturn_t vt6421_interrupt(int irq, void *dev_instance)
+{
+ struct ata_host *host = dev_instance;
+ irqreturn_t rc = ata_bmdma_interrupt(irq, dev_instance);
+
+ /* if the IRQ was not handled, it might be a hotplug IRQ */
+ if (rc != IRQ_HANDLED) {
+ u32 serror;
+ unsigned long flags;
+
+ spin_lock_irqsave(&host->lock, flags);
+ /* check for hotplug on port 0 */
+ svia_scr_read(&host->ports[0]->link, SCR_ERROR, &serror);
+ if (serror & SERR_PHYRDY_CHG) {
+ ata_ehi_hotplugged(&host->ports[0]->link.eh_info);
+ ata_port_freeze(host->ports[0]);
+ rc = IRQ_HANDLED;
+ }
+ /* check for hotplug on port 1 */
+ svia_scr_read(&host->ports[1]->link, SCR_ERROR, &serror);
+ if (serror & SERR_PHYRDY_CHG) {
+ ata_ehi_hotplugged(&host->ports[1]->link.eh_info);
+ ata_port_freeze(host->ports[1]);
+ rc = IRQ_HANDLED;
+ }
+ spin_unlock_irqrestore(&host->lock, flags);
+ }
+
+ return rc;
+}
+
+static void vt6421_error_handler(struct ata_port *ap)
+{
+ struct svia_priv *hpriv = ap->host->private_data;
+ struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+ u32 serror;
+
+ /* see svia_configure() for description */
+ if (!hpriv->wd_workaround) {
+ svia_scr_read(&ap->link, SCR_ERROR, &serror);
+ if (serror == 0x1000500) {
+ ata_port_warn(ap, "Incompatible drive: enabling workaround. This slows down transfer rate to ~60 MB/s");
+ svia_wd_fix(pdev);
+ hpriv->wd_workaround = true;
+ ap->link.eh_context.i.flags |= ATA_EHI_QUIET;
+ }
+ }
+
+ ata_sff_error_handler(ap);
+}
+
+static void svia_configure(struct pci_dev *pdev, int board_id,
+ struct svia_priv *hpriv)
{
u8 tmp8;
@@ -572,6 +644,16 @@ static void svia_configure(struct pci_dev *pdev, int board_id)
pci_write_config_byte(pdev, SATA_NATIVE_MODE, tmp8);
}
+ /* enable IRQ on hotplug */
+ pci_read_config_byte(pdev, SVIA_MISC_3, &tmp8);
+ if ((tmp8 & SATA_HOTPLUG) != SATA_HOTPLUG) {
+ dev_dbg(&pdev->dev,
+ "enabling SATA hotplug (0x%x)\n",
+ (int) tmp8);
+ tmp8 |= SATA_HOTPLUG;
+ pci_write_config_byte(pdev, SVIA_MISC_3, tmp8);
+ }
+
/*
* vt6420/1 has problems talking to some drives. The following
* is the fix from Joseph Chan <JosephChan@via.com.tw>.
@@ -593,11 +675,15 @@ static void svia_configure(struct pci_dev *pdev, int board_id)
* https://bugzilla.kernel.org/show_bug.cgi?id=15173
* http://article.gmane.org/gmane.linux.ide/46352
* http://thread.gmane.org/gmane.linux.kernel/1062139
+ *
+ * As the fix slows down data transfer, apply it only if the error
+ * actually appears - see vt6421_error_handler()
+ * Apply the fix always on vt6420 as we don't know if SCR_ERROR can be
+ * read safely.
*/
- if (board_id == vt6420 || board_id == vt6421) {
- pci_read_config_byte(pdev, 0x52, &tmp8);
- tmp8 |= 1 << 2;
- pci_write_config_byte(pdev, 0x52, tmp8);
+ if (board_id == vt6420) {
+ svia_wd_fix(pdev);
+ hpriv->wd_workaround = true;
}
}
@@ -608,6 +694,7 @@ static int svia_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
struct ata_host *host = NULL;
int board_id = (int) ent->driver_data;
const unsigned *bar_sizes;
+ struct svia_priv *hpriv;
ata_print_version_once(&pdev->dev, DRV_VERSION);
@@ -647,11 +734,39 @@ static int svia_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (rc)
return rc;
- svia_configure(pdev, board_id);
+ hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
+ if (!hpriv)
+ return -ENOMEM;
+ host->private_data = hpriv;
+
+ svia_configure(pdev, board_id, hpriv);
pci_set_master(pdev);
- return ata_host_activate(host, pdev->irq, ata_bmdma_interrupt,
- IRQF_SHARED, &svia_sht);
+ if (board_id == vt6421)
+ return ata_host_activate(host, pdev->irq, vt6421_interrupt,
+ IRQF_SHARED, &svia_sht);
+ else
+ return ata_host_activate(host, pdev->irq, ata_bmdma_interrupt,
+ IRQF_SHARED, &svia_sht);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int svia_pci_device_resume(struct pci_dev *pdev)
+{
+ struct ata_host *host = pci_get_drvdata(pdev);
+ struct svia_priv *hpriv = host->private_data;
+ int rc;
+
+ rc = ata_pci_device_do_resume(pdev);
+ if (rc)
+ return rc;
+
+ if (hpriv->wd_workaround)
+ svia_wd_fix(pdev);
+ ata_host_resume(host);
+
+ return 0;
}
+#endif
module_pci_driver(svia_pci_driver);
diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
index 82f2ae0d7cc4..a969a7e443be 100644
--- a/drivers/atm/firestream.c
+++ b/drivers/atm/firestream.c
@@ -168,7 +168,7 @@ static char *res_strings[] = {
"reserved 14",
"Unrecognized cell",
"reserved 16",
- "reassemby abort: AAL5 abort",
+ "reassembly abort: AAL5 abort",
"packet purged",
"packet ageing timeout",
"channel ageing timeout",
diff --git a/drivers/base/bus.c b/drivers/base/bus.c
index 500592486e88..6470eb8088f4 100644
--- a/drivers/base/bus.c
+++ b/drivers/base/bus.c
@@ -149,8 +149,7 @@ EXPORT_SYMBOL_GPL(bus_remove_file);
static void bus_release(struct kobject *kobj)
{
- struct subsys_private *priv =
- container_of(kobj, typeof(*priv), subsys.kobj);
+ struct subsys_private *priv = to_subsys_private(kobj);
struct bus_type *bus = priv->bus;
kfree(priv);
@@ -1019,13 +1018,11 @@ static void device_insertion_sort_klist(struct device *a, struct list_head *list
int (*compare)(const struct device *a,
const struct device *b))
{
- struct list_head *pos;
struct klist_node *n;
struct device_private *dev_prv;
struct device *b;
- list_for_each(pos, list) {
- n = container_of(pos, struct klist_node, n_node);
+ list_for_each_entry(n, list, n_node) {
dev_prv = to_device_private_bus(n);
b = dev_prv->device;
if (compare(a, b) <= 0) {
@@ -1042,8 +1039,7 @@ void bus_sort_breadthfirst(struct bus_type *bus,
const struct device *b))
{
LIST_HEAD(sorted_devices);
- struct list_head *pos, *tmp;
- struct klist_node *n;
+ struct klist_node *n, *tmp;
struct device_private *dev_prv;
struct device *dev;
struct klist *device_klist;
@@ -1051,8 +1047,7 @@ void bus_sort_breadthfirst(struct bus_type *bus,
device_klist = bus_get_device_klist(bus);
spin_lock(&device_klist->k_lock);
- list_for_each_safe(pos, tmp, &device_klist->k_list) {
- n = container_of(pos, struct klist_node, n_node);
+ list_for_each_entry_safe(n, tmp, &device_klist->k_list, n_node) {
dev_prv = to_device_private_bus(n);
dev = dev_prv->device;
device_insertion_sort_klist(dev, &sorted_devices, compare);
@@ -1107,7 +1102,7 @@ struct device *subsys_dev_iter_next(struct subsys_dev_iter *iter)
knode = klist_next(&iter->ki);
if (!knode)
return NULL;
- dev = container_of(knode, struct device_private, knode_bus)->device;
+ dev = to_device_private_bus(knode)->device;
if (!iter->type || iter->type == dev->type)
return dev;
}
diff --git a/drivers/base/component.c b/drivers/base/component.c
index 04a1582e80bb..89b032f2ffd2 100644
--- a/drivers/base/component.c
+++ b/drivers/base/component.c
@@ -267,7 +267,7 @@ void component_match_add_release(struct device *master,
}
if (match->num == match->alloc) {
- size_t new_size = match ? match->alloc + 16 : 15;
+ size_t new_size = match->alloc + 16;
int ret;
ret = component_match_realloc(master, match, new_size);
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index c4da2df62e02..16688f50729c 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -560,6 +560,7 @@ static int __device_attach_driver(struct device_driver *drv, void *_data)
struct device_attach_data *data = _data;
struct device *dev = data->dev;
bool async_allowed;
+ int ret;
/*
* Check if device has already been claimed. This may
@@ -570,8 +571,17 @@ static int __device_attach_driver(struct device_driver *drv, void *_data)
if (dev->driver)
return -EBUSY;
- if (!driver_match_device(drv, dev))
+ ret = driver_match_device(drv, dev);
+ if (ret == 0) {
+ /* no match */
return 0;
+ } else if (ret == -EPROBE_DEFER) {
+ dev_dbg(dev, "Device match requests probe deferral\n");
+ driver_deferred_probe_add(dev);
+ } else if (ret < 0) {
+ dev_dbg(dev, "Bus failed to match device: %d", ret);
+ return ret;
+ } /* ret > 0 means positive match */
async_allowed = driver_allows_async_probing(drv);
@@ -691,6 +701,7 @@ void device_initial_probe(struct device *dev)
static int __driver_attach(struct device *dev, void *data)
{
struct device_driver *drv = data;
+ int ret;
/*
* Lock device and try to bind to it. We drop the error
@@ -702,8 +713,17 @@ static int __driver_attach(struct device *dev, void *data)
* is an error.
*/
- if (!driver_match_device(drv, dev))
+ ret = driver_match_device(drv, dev);
+ if (ret == 0) {
+ /* no match */
return 0;
+ } else if (ret == -EPROBE_DEFER) {
+ dev_dbg(dev, "Device match requests probe deferral\n");
+ driver_deferred_probe_add(dev);
+ } else if (ret < 0) {
+ dev_dbg(dev, "Bus failed to match device: %d", ret);
+ return ret;
+ } /* ret > 0 means positive match */
if (dev->parent) /* Needed for USB */
device_lock(dev->parent);
diff --git a/drivers/base/dma-coherent.c b/drivers/base/dma-coherent.c
index 55b83983a9c0..bdf28f7dd5e8 100644
--- a/drivers/base/dma-coherent.c
+++ b/drivers/base/dma-coherent.c
@@ -2,6 +2,7 @@
* Coherent per-device memory handling.
* Borrowed from i386
*/
+#include <linux/io.h>
#include <linux/slab.h>
#include <linux/kernel.h>
#include <linux/module.h>
@@ -17,9 +18,9 @@ struct dma_coherent_mem {
spinlock_t spinlock;
};
-static int dma_init_coherent_memory(phys_addr_t phys_addr, dma_addr_t device_addr,
- size_t size, int flags,
- struct dma_coherent_mem **mem)
+static bool dma_init_coherent_memory(
+ phys_addr_t phys_addr, dma_addr_t device_addr, size_t size, int flags,
+ struct dma_coherent_mem **mem)
{
struct dma_coherent_mem *dma_mem = NULL;
void __iomem *mem_base = NULL;
@@ -31,7 +32,10 @@ static int dma_init_coherent_memory(phys_addr_t phys_addr, dma_addr_t device_add
if (!size)
goto out;
- mem_base = ioremap(phys_addr, size);
+ if (flags & DMA_MEMORY_MAP)
+ mem_base = memremap(phys_addr, size, MEMREMAP_WC);
+ else
+ mem_base = ioremap(phys_addr, size);
if (!mem_base)
goto out;
@@ -50,24 +54,28 @@ static int dma_init_coherent_memory(phys_addr_t phys_addr, dma_addr_t device_add
spin_lock_init(&dma_mem->spinlock);
*mem = dma_mem;
-
- if (flags & DMA_MEMORY_MAP)
- return DMA_MEMORY_MAP;
-
- return DMA_MEMORY_IO;
+ return true;
out:
kfree(dma_mem);
- if (mem_base)
- iounmap(mem_base);
- return 0;
+ if (mem_base) {
+ if (flags & DMA_MEMORY_MAP)
+ memunmap(mem_base);
+ else
+ iounmap(mem_base);
+ }
+ return false;
}
static void dma_release_coherent_memory(struct dma_coherent_mem *mem)
{
if (!mem)
return;
- iounmap(mem->virt_base);
+
+ if (mem->flags & DMA_MEMORY_MAP)
+ memunmap(mem->virt_base);
+ else
+ iounmap(mem->virt_base);
kfree(mem->bitmap);
kfree(mem);
}
@@ -88,15 +96,13 @@ int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
dma_addr_t device_addr, size_t size, int flags)
{
struct dma_coherent_mem *mem;
- int ret;
- ret = dma_init_coherent_memory(phys_addr, device_addr, size, flags,
- &mem);
- if (ret == 0)
+ if (!dma_init_coherent_memory(phys_addr, device_addr, size, flags,
+ &mem))
return 0;
if (dma_assign_coherent_memory(dev, mem) == 0)
- return ret;
+ return flags & DMA_MEMORY_MAP ? DMA_MEMORY_MAP : DMA_MEMORY_IO;
dma_release_coherent_memory(mem);
return 0;
@@ -181,7 +187,10 @@ int dma_alloc_from_coherent(struct device *dev, ssize_t size,
*/
*dma_handle = mem->device_base + (pageno << PAGE_SHIFT);
*ret = mem->virt_base + (pageno << PAGE_SHIFT);
- memset(*ret, 0, size);
+ if (mem->flags & DMA_MEMORY_MAP)
+ memset(*ret, 0, size);
+ else
+ memset_io(*ret, 0, size);
spin_unlock_irqrestore(&mem->spinlock, flags);
return 1;
@@ -281,9 +290,9 @@ static int rmem_dma_device_init(struct reserved_mem *rmem, struct device *dev)
struct dma_coherent_mem *mem = rmem->priv;
if (!mem &&
- dma_init_coherent_memory(rmem->base, rmem->base, rmem->size,
- DMA_MEMORY_MAP | DMA_MEMORY_EXCLUSIVE,
- &mem) != DMA_MEMORY_MAP) {
+ !dma_init_coherent_memory(rmem->base, rmem->base, rmem->size,
+ DMA_MEMORY_MAP | DMA_MEMORY_EXCLUSIVE,
+ &mem)) {
pr_err("Reserved memory: failed to init DMA memory pool at %pa, size %ld MiB\n",
&rmem->base, (unsigned long)rmem->size / SZ_1M);
return -ENODEV;
diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c
index b9250e564ebf..773fc3099769 100644
--- a/drivers/base/firmware_class.c
+++ b/drivers/base/firmware_class.c
@@ -23,6 +23,7 @@
#include <linux/sched.h>
#include <linux/file.h>
#include <linux/list.h>
+#include <linux/fs.h>
#include <linux/async.h>
#include <linux/pm.h>
#include <linux/suspend.h>
@@ -257,7 +258,7 @@ static void __fw_free_buf(struct kref *ref)
vunmap(buf->data);
for (i = 0; i < buf->nr_pages; i++)
__free_page(buf->pages[i]);
- kfree(buf->pages);
+ vfree(buf->pages);
} else
#endif
vfree(buf->data);
@@ -291,40 +292,19 @@ static const char * const fw_path[] = {
module_param_string(path, fw_path_para, sizeof(fw_path_para), 0644);
MODULE_PARM_DESC(path, "customized firmware image search path with a higher priority than default path");
-static int fw_read_file_contents(struct file *file, struct firmware_buf *fw_buf)
+static void fw_finish_direct_load(struct device *device,
+ struct firmware_buf *buf)
{
- int size;
- char *buf;
- int rc;
-
- if (!S_ISREG(file_inode(file)->i_mode))
- return -EINVAL;
- size = i_size_read(file_inode(file));
- if (size <= 0)
- return -EINVAL;
- buf = vmalloc(size);
- if (!buf)
- return -ENOMEM;
- rc = kernel_read(file, 0, buf, size);
- if (rc != size) {
- if (rc > 0)
- rc = -EIO;
- goto fail;
- }
- rc = security_kernel_fw_from_file(file, buf, size);
- if (rc)
- goto fail;
- fw_buf->data = buf;
- fw_buf->size = size;
- return 0;
-fail:
- vfree(buf);
- return rc;
+ mutex_lock(&fw_lock);
+ set_bit(FW_STATUS_DONE, &buf->status);
+ complete_all(&buf->completion);
+ mutex_unlock(&fw_lock);
}
static int fw_get_filesystem_firmware(struct device *device,
struct firmware_buf *buf)
{
+ loff_t size;
int i, len;
int rc = -ENOENT;
char *path;
@@ -334,8 +314,6 @@ static int fw_get_filesystem_firmware(struct device *device,
return -ENOMEM;
for (i = 0; i < ARRAY_SIZE(fw_path); i++) {
- struct file *file;
-
/* skip the unset customized path */
if (!fw_path[i][0])
continue;
@@ -347,28 +325,25 @@ static int fw_get_filesystem_firmware(struct device *device,
break;
}
- file = filp_open(path, O_RDONLY, 0);
- if (IS_ERR(file))
+ buf->size = 0;
+ rc = kernel_read_file_from_path(path, &buf->data, &size,
+ INT_MAX, READING_FIRMWARE);
+ if (rc) {
+ if (rc == -ENOENT)
+ dev_dbg(device, "loading %s failed with error %d\n",
+ path, rc);
+ else
+ dev_warn(device, "loading %s failed with error %d\n",
+ path, rc);
continue;
- rc = fw_read_file_contents(file, buf);
- fput(file);
- if (rc)
- dev_warn(device, "firmware, attempted to load %s, but failed with error %d\n",
- path, rc);
- else
- break;
+ }
+ dev_dbg(device, "direct-loading %s\n", buf->fw_id);
+ buf->size = size;
+ fw_finish_direct_load(device, buf);
+ break;
}
__putname(path);
- if (!rc) {
- dev_dbg(device, "firmware: direct-loading firmware %s\n",
- buf->fw_id);
- mutex_lock(&fw_lock);
- set_bit(FW_STATUS_DONE, &buf->status);
- complete_all(&buf->completion);
- mutex_unlock(&fw_lock);
- }
-
return rc;
}
@@ -660,7 +635,7 @@ static ssize_t firmware_loading_store(struct device *dev,
if (!test_bit(FW_STATUS_DONE, &fw_buf->status)) {
for (i = 0; i < fw_buf->nr_pages; i++)
__free_page(fw_buf->pages[i]);
- kfree(fw_buf->pages);
+ vfree(fw_buf->pages);
fw_buf->pages = NULL;
fw_buf->page_array_size = 0;
fw_buf->nr_pages = 0;
@@ -685,8 +660,9 @@ static ssize_t firmware_loading_store(struct device *dev,
dev_err(dev, "%s: map pages failed\n",
__func__);
else
- rc = security_kernel_fw_from_file(NULL,
- fw_buf->data, fw_buf->size);
+ rc = security_kernel_post_read_file(NULL,
+ fw_buf->data, fw_buf->size,
+ READING_FIRMWARE);
/*
* Same logic as fw_load_abort, only the DONE bit
@@ -770,8 +746,7 @@ static int fw_realloc_buffer(struct firmware_priv *fw_priv, int min_size)
buf->page_array_size * 2);
struct page **new_pages;
- new_pages = kmalloc(new_array_size * sizeof(void *),
- GFP_KERNEL);
+ new_pages = vmalloc(new_array_size * sizeof(void *));
if (!new_pages) {
fw_load_abort(fw_priv);
return -ENOMEM;
@@ -780,7 +755,7 @@ static int fw_realloc_buffer(struct firmware_priv *fw_priv, int min_size)
buf->page_array_size * sizeof(void *));
memset(&new_pages[buf->page_array_size], 0, sizeof(void *) *
(new_array_size - buf->page_array_size));
- kfree(buf->pages);
+ vfree(buf->pages);
buf->pages = new_pages;
buf->page_array_size = new_array_size;
}
@@ -1051,7 +1026,7 @@ _request_firmware_prepare(struct firmware **firmware_p, const char *name,
}
if (fw_get_builtin_firmware(firmware, name)) {
- dev_dbg(device, "firmware: using built-in firmware %s\n", name);
+ dev_dbg(device, "using built-in %s\n", name);
return 0; /* assigned */
}
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index 213456c2b123..f46dba8b7092 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -251,7 +251,7 @@ memory_block_action(unsigned long phys_index, unsigned long action, int online_t
return ret;
}
-static int memory_block_change_state(struct memory_block *mem,
+int memory_block_change_state(struct memory_block *mem,
unsigned long to_state, unsigned long from_state_req)
{
int ret = 0;
@@ -439,6 +439,37 @@ print_block_size(struct device *dev, struct device_attribute *attr,
static DEVICE_ATTR(block_size_bytes, 0444, print_block_size, NULL);
/*
+ * Memory auto online policy.
+ */
+
+static ssize_t
+show_auto_online_blocks(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ if (memhp_auto_online)
+ return sprintf(buf, "online\n");
+ else
+ return sprintf(buf, "offline\n");
+}
+
+static ssize_t
+store_auto_online_blocks(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ if (sysfs_streq(buf, "online"))
+ memhp_auto_online = true;
+ else if (sysfs_streq(buf, "offline"))
+ memhp_auto_online = false;
+ else
+ return -EINVAL;
+
+ return count;
+}
+
+static DEVICE_ATTR(auto_online_blocks, 0644, show_auto_online_blocks,
+ store_auto_online_blocks);
+
+/*
* Some architectures will have custom drivers to do this, and
* will not need to do it from userspace. The fake hot-add code
* as well as ppc64 will do all of their discovery in userspace
@@ -746,6 +777,7 @@ static struct attribute *memory_root_attrs[] = {
#endif
&dev_attr_block_size_bytes.attr,
+ &dev_attr_auto_online_blocks.attr,
NULL
};
diff --git a/drivers/base/power/clock_ops.c b/drivers/base/power/clock_ops.c
index 272a52ebafc0..0e64a1b5e62a 100644
--- a/drivers/base/power/clock_ops.c
+++ b/drivers/base/power/clock_ops.c
@@ -137,6 +137,62 @@ int pm_clk_add_clk(struct device *dev, struct clk *clk)
return __pm_clk_add(dev, NULL, clk);
}
+
+/**
+ * of_pm_clk_add_clks - Start using device clock(s) for power management.
+ * @dev: Device whose clock(s) is going to be used for power management.
+ *
+ * Add a series of clocks described in the 'clocks' device-tree node for
+ * a device to the list of clocks used for the power management of @dev.
+ * On success, returns the number of clocks added. Returns a negative
+ * error code if there are no clocks in the device node for the device
+ * or if adding a clock fails.
+ */
+int of_pm_clk_add_clks(struct device *dev)
+{
+ struct clk **clks;
+ unsigned int i, count;
+ int ret;
+
+ if (!dev || !dev->of_node)
+ return -EINVAL;
+
+ count = of_count_phandle_with_args(dev->of_node, "clocks",
+ "#clock-cells");
+ if (count == 0)
+ return -ENODEV;
+
+ clks = kcalloc(count, sizeof(*clks), GFP_KERNEL);
+ if (!clks)
+ return -ENOMEM;
+
+ for (i = 0; i < count; i++) {
+ clks[i] = of_clk_get(dev->of_node, i);
+ if (IS_ERR(clks[i])) {
+ ret = PTR_ERR(clks[i]);
+ goto error;
+ }
+
+ ret = pm_clk_add_clk(dev, clks[i]);
+ if (ret) {
+ clk_put(clks[i]);
+ goto error;
+ }
+ }
+
+ kfree(clks);
+
+ return i;
+
+error:
+ while (i--)
+ pm_clk_remove_clk(dev, clks[i]);
+
+ kfree(clks);
+
+ return ret;
+}
+
/**
* __pm_clk_remove - Destroy PM clock entry.
* @ce: PM clock entry to destroy.
@@ -198,6 +254,39 @@ void pm_clk_remove(struct device *dev, const char *con_id)
}
/**
+ * pm_clk_remove_clk - Stop using a device clock for power management.
+ * @dev: Device whose clock should not be used for PM any more.
+ * @clk: Clock pointer
+ *
+ * Remove the clock pointed to by @clk from the list of clocks used for
+ * the power management of @dev.
+ */
+void pm_clk_remove_clk(struct device *dev, struct clk *clk)
+{
+ struct pm_subsys_data *psd = dev_to_psd(dev);
+ struct pm_clock_entry *ce;
+
+ if (!psd || !clk)
+ return;
+
+ spin_lock_irq(&psd->lock);
+
+ list_for_each_entry(ce, &psd->clock_list, node) {
+ if (clk == ce->clk)
+ goto remove;
+ }
+
+ spin_unlock_irq(&psd->lock);
+ return;
+
+ remove:
+ list_del(&ce->node);
+ spin_unlock_irq(&psd->lock);
+
+ __pm_clk_remove(ce);
+}
+
+/**
* pm_clk_init - Initialize a device's list of power management clocks.
* @dev: Device to initialize the list of PM clocks for.
*
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index 301b785f9f56..56705b52758e 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -104,6 +104,7 @@ static void genpd_sd_counter_inc(struct generic_pm_domain *genpd)
static int genpd_power_on(struct generic_pm_domain *genpd, bool timed)
{
+ unsigned int state_idx = genpd->state_idx;
ktime_t time_start;
s64 elapsed_ns;
int ret;
@@ -120,10 +121,10 @@ static int genpd_power_on(struct generic_pm_domain *genpd, bool timed)
return ret;
elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
- if (elapsed_ns <= genpd->power_on_latency_ns)
+ if (elapsed_ns <= genpd->states[state_idx].power_on_latency_ns)
return ret;
- genpd->power_on_latency_ns = elapsed_ns;
+ genpd->states[state_idx].power_on_latency_ns = elapsed_ns;
genpd->max_off_time_changed = true;
pr_debug("%s: Power-%s latency exceeded, new value %lld ns\n",
genpd->name, "on", elapsed_ns);
@@ -133,6 +134,7 @@ static int genpd_power_on(struct generic_pm_domain *genpd, bool timed)
static int genpd_power_off(struct generic_pm_domain *genpd, bool timed)
{
+ unsigned int state_idx = genpd->state_idx;
ktime_t time_start;
s64 elapsed_ns;
int ret;
@@ -149,10 +151,10 @@ static int genpd_power_off(struct generic_pm_domain *genpd, bool timed)
return ret;
elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
- if (elapsed_ns <= genpd->power_off_latency_ns)
+ if (elapsed_ns <= genpd->states[state_idx].power_off_latency_ns)
return ret;
- genpd->power_off_latency_ns = elapsed_ns;
+ genpd->states[state_idx].power_off_latency_ns = elapsed_ns;
genpd->max_off_time_changed = true;
pr_debug("%s: Power-%s latency exceeded, new value %lld ns\n",
genpd->name, "off", elapsed_ns);
@@ -485,8 +487,13 @@ static int pm_genpd_runtime_resume(struct device *dev)
if (timed && runtime_pm)
time_start = ktime_get();
- genpd_start_dev(genpd, dev);
- genpd_restore_dev(genpd, dev);
+ ret = genpd_start_dev(genpd, dev);
+ if (ret)
+ goto err_poweroff;
+
+ ret = genpd_restore_dev(genpd, dev);
+ if (ret)
+ goto err_stop;
/* Update resume latency value if the measured time exceeds it. */
if (timed && runtime_pm) {
@@ -501,6 +508,17 @@ static int pm_genpd_runtime_resume(struct device *dev)
}
return 0;
+
+err_stop:
+ genpd_stop_dev(genpd, dev);
+err_poweroff:
+ if (!dev->power.irq_safe) {
+ mutex_lock(&genpd->lock);
+ genpd_poweroff(genpd, 0);
+ mutex_unlock(&genpd->lock);
+ }
+
+ return ret;
}
static bool pd_ignore_unused;
@@ -585,6 +603,8 @@ static void pm_genpd_sync_poweroff(struct generic_pm_domain *genpd,
|| atomic_read(&genpd->sd_count) > 0)
return;
+ /* Choose the deepest state when suspending */
+ genpd->state_idx = genpd->state_count - 1;
genpd_power_off(genpd, timed);
genpd->status = GPD_STATE_POWER_OFF;
@@ -1378,7 +1398,7 @@ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
mutex_lock(&subdomain->lock);
mutex_lock_nested(&genpd->lock, SINGLE_DEPTH_NESTING);
- if (!list_empty(&subdomain->slave_links) || subdomain->device_count) {
+ if (!list_empty(&subdomain->master_links) || subdomain->device_count) {
pr_warn("%s: unable to remove subdomain %s\n", genpd->name,
subdomain->name);
ret = -EBUSY;
@@ -1508,6 +1528,20 @@ void pm_genpd_init(struct generic_pm_domain *genpd,
genpd->dev_ops.start = pm_clk_resume;
}
+ if (genpd->state_idx >= GENPD_MAX_NUM_STATES) {
+ pr_warn("Initial state index out of bounds.\n");
+ genpd->state_idx = GENPD_MAX_NUM_STATES - 1;
+ }
+
+ if (genpd->state_count > GENPD_MAX_NUM_STATES) {
+ pr_warn("Limiting states to %d\n", GENPD_MAX_NUM_STATES);
+ genpd->state_count = GENPD_MAX_NUM_STATES;
+ }
+
+ /* Use only one "off" state if there were no states declared */
+ if (genpd->state_count == 0)
+ genpd->state_count = 1;
+
mutex_lock(&gpd_list_lock);
list_add(&genpd->gpd_list_node, &gpd_list);
mutex_unlock(&gpd_list_lock);
@@ -1668,6 +1702,9 @@ struct generic_pm_domain *of_genpd_get_from_provider(
struct generic_pm_domain *genpd = ERR_PTR(-ENOENT);
struct of_genpd_provider *provider;
+ if (!genpdspec)
+ return ERR_PTR(-EINVAL);
+
mutex_lock(&of_genpd_mutex);
/* Check if we have such a provider in our array */
@@ -1864,6 +1901,7 @@ static int pm_genpd_summary_one(struct seq_file *s,
struct pm_domain_data *pm_data;
const char *kobj_path;
struct gpd_link *link;
+ char state[16];
int ret;
ret = mutex_lock_interruptible(&genpd->lock);
@@ -1872,7 +1910,13 @@ static int pm_genpd_summary_one(struct seq_file *s,
if (WARN_ON(genpd->status >= ARRAY_SIZE(status_lookup)))
goto exit;
- seq_printf(s, "%-30s %-15s ", genpd->name, status_lookup[genpd->status]);
+ if (genpd->status == GPD_STATE_POWER_OFF)
+ snprintf(state, sizeof(state), "%s-%u",
+ status_lookup[genpd->status], genpd->state_idx);
+ else
+ snprintf(state, sizeof(state), "%s",
+ status_lookup[genpd->status]);
+ seq_printf(s, "%-30s %-15s ", genpd->name, state);
/*
* Modifications on the list require holding locks on both
diff --git a/drivers/base/power/domain_governor.c b/drivers/base/power/domain_governor.c
index 1e937ac5f456..00a5436dd44b 100644
--- a/drivers/base/power/domain_governor.c
+++ b/drivers/base/power/domain_governor.c
@@ -98,7 +98,8 @@ static bool default_stop_ok(struct device *dev)
*
* This routine must be executed under the PM domain's lock.
*/
-static bool default_power_down_ok(struct dev_pm_domain *pd)
+static bool __default_power_down_ok(struct dev_pm_domain *pd,
+ unsigned int state)
{
struct generic_pm_domain *genpd = pd_to_genpd(pd);
struct gpd_link *link;
@@ -106,27 +107,9 @@ static bool default_power_down_ok(struct dev_pm_domain *pd)
s64 min_off_time_ns;
s64 off_on_time_ns;
- if (genpd->max_off_time_changed) {
- struct gpd_link *link;
-
- /*
- * We have to invalidate the cached results for the masters, so
- * use the observation that default_power_down_ok() is not
- * going to be called for any master until this instance
- * returns.
- */
- list_for_each_entry(link, &genpd->slave_links, slave_node)
- link->master->max_off_time_changed = true;
-
- genpd->max_off_time_changed = false;
- genpd->cached_power_down_ok = false;
- genpd->max_off_time_ns = -1;
- } else {
- return genpd->cached_power_down_ok;
- }
+ off_on_time_ns = genpd->states[state].power_off_latency_ns +
+ genpd->states[state].power_on_latency_ns;
- off_on_time_ns = genpd->power_off_latency_ns +
- genpd->power_on_latency_ns;
min_off_time_ns = -1;
/*
@@ -186,8 +169,6 @@ static bool default_power_down_ok(struct dev_pm_domain *pd)
min_off_time_ns = constraint_ns;
}
- genpd->cached_power_down_ok = true;
-
/*
* If the computed minimum device off time is negative, there are no
* latency constraints, so the domain can spend arbitrary time in the
@@ -201,10 +182,45 @@ static bool default_power_down_ok(struct dev_pm_domain *pd)
* time and the time needed to turn the domain on is the maximum
* theoretical time this domain can spend in the "off" state.
*/
- genpd->max_off_time_ns = min_off_time_ns - genpd->power_on_latency_ns;
+ genpd->max_off_time_ns = min_off_time_ns -
+ genpd->states[state].power_on_latency_ns;
return true;
}
+static bool default_power_down_ok(struct dev_pm_domain *pd)
+{
+ struct generic_pm_domain *genpd = pd_to_genpd(pd);
+ struct gpd_link *link;
+
+ if (!genpd->max_off_time_changed)
+ return genpd->cached_power_down_ok;
+
+ /*
+ * We have to invalidate the cached results for the masters, so
+ * use the observation that default_power_down_ok() is not
+ * going to be called for any master until this instance
+ * returns.
+ */
+ list_for_each_entry(link, &genpd->slave_links, slave_node)
+ link->master->max_off_time_changed = true;
+
+ genpd->max_off_time_ns = -1;
+ genpd->max_off_time_changed = false;
+ genpd->cached_power_down_ok = true;
+ genpd->state_idx = genpd->state_count - 1;
+
+ /* Find a state to power down to, starting from the deepest. */
+ while (!__default_power_down_ok(pd, genpd->state_idx)) {
+ if (genpd->state_idx == 0) {
+ genpd->cached_power_down_ok = false;
+ break;
+ }
+ genpd->state_idx--;
+ }
+
+ return genpd->cached_power_down_ok;
+}
+
static bool always_on_power_down_ok(struct dev_pm_domain *domain)
{
return false;
diff --git a/drivers/base/power/opp/core.c b/drivers/base/power/opp/core.c
index cf351d3dab1c..d8f4cc22856c 100644
--- a/drivers/base/power/opp/core.c
+++ b/drivers/base/power/opp/core.c
@@ -13,50 +13,52 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/clk.h>
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/device.h>
#include <linux/of.h>
#include <linux/export.h>
+#include <linux/regulator/consumer.h>
#include "opp.h"
/*
- * The root of the list of all devices. All device_opp structures branch off
- * from here, with each device_opp containing the list of opp it supports in
+ * The root of the list of all opp-tables. All opp_table structures branch off
+ * from here, with each opp_table containing the list of opps it supports in
* various states of availability.
*/
-static LIST_HEAD(dev_opp_list);
+static LIST_HEAD(opp_tables);
/* Lock to allow exclusive modification to the device and opp lists */
-DEFINE_MUTEX(dev_opp_list_lock);
+DEFINE_MUTEX(opp_table_lock);
#define opp_rcu_lockdep_assert() \
do { \
RCU_LOCKDEP_WARN(!rcu_read_lock_held() && \
- !lockdep_is_held(&dev_opp_list_lock), \
- "Missing rcu_read_lock() or " \
- "dev_opp_list_lock protection"); \
+ !lockdep_is_held(&opp_table_lock), \
+ "Missing rcu_read_lock() or " \
+ "opp_table_lock protection"); \
} while (0)
-static struct device_list_opp *_find_list_dev(const struct device *dev,
- struct device_opp *dev_opp)
+static struct opp_device *_find_opp_dev(const struct device *dev,
+ struct opp_table *opp_table)
{
- struct device_list_opp *list_dev;
+ struct opp_device *opp_dev;
- list_for_each_entry(list_dev, &dev_opp->dev_list, node)
- if (list_dev->dev == dev)
- return list_dev;
+ list_for_each_entry(opp_dev, &opp_table->dev_list, node)
+ if (opp_dev->dev == dev)
+ return opp_dev;
return NULL;
}
-static struct device_opp *_managed_opp(const struct device_node *np)
+static struct opp_table *_managed_opp(const struct device_node *np)
{
- struct device_opp *dev_opp;
+ struct opp_table *opp_table;
- list_for_each_entry_rcu(dev_opp, &dev_opp_list, node) {
- if (dev_opp->np == np) {
+ list_for_each_entry_rcu(opp_table, &opp_tables, node) {
+ if (opp_table->np == np) {
/*
* Multiple devices can point to the same OPP table and
* so will have same node-pointer, np.
@@ -64,7 +66,7 @@ static struct device_opp *_managed_opp(const struct device_node *np)
* But the OPPs will be considered as shared only if the
* OPP table contains a "opp-shared" property.
*/
- return dev_opp->shared_opp ? dev_opp : NULL;
+ return opp_table->shared_opp ? opp_table : NULL;
}
}
@@ -72,24 +74,24 @@ static struct device_opp *_managed_opp(const struct device_node *np)
}
/**
- * _find_device_opp() - find device_opp struct using device pointer
- * @dev: device pointer used to lookup device OPPs
+ * _find_opp_table() - find opp_table struct using device pointer
+ * @dev: device pointer used to lookup OPP table
*
- * Search list of device OPPs for one containing matching device. Does a RCU
- * reader operation to grab the pointer needed.
+ * Search OPP table for one containing matching device. Does a RCU reader
+ * operation to grab the pointer needed.
*
- * Return: pointer to 'struct device_opp' if found, otherwise -ENODEV or
+ * Return: pointer to 'struct opp_table' if found, otherwise -ENODEV or
* -EINVAL based on type of error.
*
* Locking: For readers, this function must be called under rcu_read_lock().
- * device_opp is a RCU protected pointer, which means that device_opp is valid
+ * opp_table is a RCU protected pointer, which means that opp_table is valid
* as long as we are under RCU lock.
*
- * For Writers, this function must be called with dev_opp_list_lock held.
+ * For Writers, this function must be called with opp_table_lock held.
*/
-struct device_opp *_find_device_opp(struct device *dev)
+struct opp_table *_find_opp_table(struct device *dev)
{
- struct device_opp *dev_opp;
+ struct opp_table *opp_table;
opp_rcu_lockdep_assert();
@@ -98,9 +100,9 @@ struct device_opp *_find_device_opp(struct device *dev)
return ERR_PTR(-EINVAL);
}
- list_for_each_entry_rcu(dev_opp, &dev_opp_list, node)
- if (_find_list_dev(dev, dev_opp))
- return dev_opp;
+ list_for_each_entry_rcu(opp_table, &opp_tables, node)
+ if (_find_opp_dev(dev, opp_table))
+ return opp_table;
return ERR_PTR(-ENODEV);
}
@@ -213,16 +215,16 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_is_turbo);
*/
unsigned long dev_pm_opp_get_max_clock_latency(struct device *dev)
{
- struct device_opp *dev_opp;
+ struct opp_table *opp_table;
unsigned long clock_latency_ns;
rcu_read_lock();
- dev_opp = _find_device_opp(dev);
- if (IS_ERR(dev_opp))
+ opp_table = _find_opp_table(dev);
+ if (IS_ERR(opp_table))
clock_latency_ns = 0;
else
- clock_latency_ns = dev_opp->clock_latency_ns_max;
+ clock_latency_ns = opp_table->clock_latency_ns_max;
rcu_read_unlock();
return clock_latency_ns;
@@ -230,6 +232,79 @@ unsigned long dev_pm_opp_get_max_clock_latency(struct device *dev)
EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_clock_latency);
/**
+ * dev_pm_opp_get_max_volt_latency() - Get max voltage latency in nanoseconds
+ * @dev: device for which we do this operation
+ *
+ * Return: This function returns the max voltage latency in nanoseconds.
+ *
+ * Locking: This function takes rcu_read_lock().
+ */
+unsigned long dev_pm_opp_get_max_volt_latency(struct device *dev)
+{
+ struct opp_table *opp_table;
+ struct dev_pm_opp *opp;
+ struct regulator *reg;
+ unsigned long latency_ns = 0;
+ unsigned long min_uV = ~0, max_uV = 0;
+ int ret;
+
+ rcu_read_lock();
+
+ opp_table = _find_opp_table(dev);
+ if (IS_ERR(opp_table)) {
+ rcu_read_unlock();
+ return 0;
+ }
+
+ reg = opp_table->regulator;
+ if (IS_ERR(reg)) {
+ /* Regulator may not be required for device */
+ rcu_read_unlock();
+ return 0;
+ }
+
+ list_for_each_entry_rcu(opp, &opp_table->opp_list, node) {
+ if (!opp->available)
+ continue;
+
+ if (opp->u_volt_min < min_uV)
+ min_uV = opp->u_volt_min;
+ if (opp->u_volt_max > max_uV)
+ max_uV = opp->u_volt_max;
+ }
+
+ rcu_read_unlock();
+
+ /*
+ * The caller needs to ensure that opp_table (and hence the regulator)
+ * isn't freed, while we are executing this routine.
+ */
+ ret = regulator_set_voltage_time(reg, min_uV, max_uV);
+ if (ret > 0)
+ latency_ns = ret * 1000;
+
+ return latency_ns;
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_volt_latency);
+
+/**
+ * dev_pm_opp_get_max_transition_latency() - Get max transition latency in
+ * nanoseconds
+ * @dev: device for which we do this operation
+ *
+ * Return: This function returns the max transition latency, in nanoseconds, to
+ * switch from one OPP to other.
+ *
+ * Locking: This function takes rcu_read_lock().
+ */
+unsigned long dev_pm_opp_get_max_transition_latency(struct device *dev)
+{
+ return dev_pm_opp_get_max_volt_latency(dev) +
+ dev_pm_opp_get_max_clock_latency(dev);
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_transition_latency);
+
+/**
* dev_pm_opp_get_suspend_opp() - Get suspend opp
* @dev: device for which we do this operation
*
@@ -244,21 +319,21 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_clock_latency);
*/
struct dev_pm_opp *dev_pm_opp_get_suspend_opp(struct device *dev)
{
- struct device_opp *dev_opp;
+ struct opp_table *opp_table;
opp_rcu_lockdep_assert();
- dev_opp = _find_device_opp(dev);
- if (IS_ERR(dev_opp) || !dev_opp->suspend_opp ||
- !dev_opp->suspend_opp->available)
+ opp_table = _find_opp_table(dev);
+ if (IS_ERR(opp_table) || !opp_table->suspend_opp ||
+ !opp_table->suspend_opp->available)
return NULL;
- return dev_opp->suspend_opp;
+ return opp_table->suspend_opp;
}
EXPORT_SYMBOL_GPL(dev_pm_opp_get_suspend_opp);
/**
- * dev_pm_opp_get_opp_count() - Get number of opps available in the opp list
+ * dev_pm_opp_get_opp_count() - Get number of opps available in the opp table
* @dev: device for which we do this operation
*
* Return: This function returns the number of available opps if there are any,
@@ -268,21 +343,21 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_get_suspend_opp);
*/
int dev_pm_opp_get_opp_count(struct device *dev)
{
- struct device_opp *dev_opp;
+ struct opp_table *opp_table;
struct dev_pm_opp *temp_opp;
int count = 0;
rcu_read_lock();
- dev_opp = _find_device_opp(dev);
- if (IS_ERR(dev_opp)) {
- count = PTR_ERR(dev_opp);
- dev_err(dev, "%s: device OPP not found (%d)\n",
+ opp_table = _find_opp_table(dev);
+ if (IS_ERR(opp_table)) {
+ count = PTR_ERR(opp_table);
+ dev_err(dev, "%s: OPP table not found (%d)\n",
__func__, count);
goto out_unlock;
}
- list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) {
+ list_for_each_entry_rcu(temp_opp, &opp_table->opp_list, node) {
if (temp_opp->available)
count++;
}
@@ -299,7 +374,7 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_get_opp_count);
* @freq: frequency to search for
* @available: true/false - match for available opp
*
- * Return: Searches for exact match in the opp list and returns pointer to the
+ * Return: Searches for exact match in the opp table and returns pointer to the
* matching opp if found, else returns ERR_PTR in case of error and should
* be handled using IS_ERR. Error return values can be:
* EINVAL: for bad pointer
@@ -323,19 +398,20 @@ struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
unsigned long freq,
bool available)
{
- struct device_opp *dev_opp;
+ struct opp_table *opp_table;
struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
opp_rcu_lockdep_assert();
- dev_opp = _find_device_opp(dev);
- if (IS_ERR(dev_opp)) {
- int r = PTR_ERR(dev_opp);
- dev_err(dev, "%s: device OPP not found (%d)\n", __func__, r);
+ opp_table = _find_opp_table(dev);
+ if (IS_ERR(opp_table)) {
+ int r = PTR_ERR(opp_table);
+
+ dev_err(dev, "%s: OPP table not found (%d)\n", __func__, r);
return ERR_PTR(r);
}
- list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) {
+ list_for_each_entry_rcu(temp_opp, &opp_table->opp_list, node) {
if (temp_opp->available == available &&
temp_opp->rate == freq) {
opp = temp_opp;
@@ -371,7 +447,7 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_exact);
struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev,
unsigned long *freq)
{
- struct device_opp *dev_opp;
+ struct opp_table *opp_table;
struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
opp_rcu_lockdep_assert();
@@ -381,11 +457,11 @@ struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev,
return ERR_PTR(-EINVAL);
}
- dev_opp = _find_device_opp(dev);
- if (IS_ERR(dev_opp))
- return ERR_CAST(dev_opp);
+ opp_table = _find_opp_table(dev);
+ if (IS_ERR(opp_table))
+ return ERR_CAST(opp_table);
- list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) {
+ list_for_each_entry_rcu(temp_opp, &opp_table->opp_list, node) {
if (temp_opp->available && temp_opp->rate >= *freq) {
opp = temp_opp;
*freq = opp->rate;
@@ -421,7 +497,7 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_ceil);
struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev,
unsigned long *freq)
{
- struct device_opp *dev_opp;
+ struct opp_table *opp_table;
struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
opp_rcu_lockdep_assert();
@@ -431,11 +507,11 @@ struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev,
return ERR_PTR(-EINVAL);
}
- dev_opp = _find_device_opp(dev);
- if (IS_ERR(dev_opp))
- return ERR_CAST(dev_opp);
+ opp_table = _find_opp_table(dev);
+ if (IS_ERR(opp_table))
+ return ERR_CAST(opp_table);
- list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) {
+ list_for_each_entry_rcu(temp_opp, &opp_table->opp_list, node) {
if (temp_opp->available) {
/* go to the next node, before choosing prev */
if (temp_opp->rate > *freq)
@@ -451,130 +527,343 @@ struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev,
}
EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_floor);
-/* List-dev Helpers */
-static void _kfree_list_dev_rcu(struct rcu_head *head)
+/*
+ * The caller needs to ensure that opp_table (and hence the clk) isn't freed,
+ * while clk returned here is used.
+ */
+static struct clk *_get_opp_clk(struct device *dev)
+{
+ struct opp_table *opp_table;
+ struct clk *clk;
+
+ rcu_read_lock();
+
+ opp_table = _find_opp_table(dev);
+ if (IS_ERR(opp_table)) {
+ dev_err(dev, "%s: device opp doesn't exist\n", __func__);
+ clk = ERR_CAST(opp_table);
+ goto unlock;
+ }
+
+ clk = opp_table->clk;
+ if (IS_ERR(clk))
+ dev_err(dev, "%s: No clock available for the device\n",
+ __func__);
+
+unlock:
+ rcu_read_unlock();
+ return clk;
+}
+
+static int _set_opp_voltage(struct device *dev, struct regulator *reg,
+ unsigned long u_volt, unsigned long u_volt_min,
+ unsigned long u_volt_max)
+{
+ int ret;
+
+ /* Regulator not available for device */
+ if (IS_ERR(reg)) {
+ dev_dbg(dev, "%s: regulator not available: %ld\n", __func__,
+ PTR_ERR(reg));
+ return 0;
+ }
+
+ dev_dbg(dev, "%s: voltages (mV): %lu %lu %lu\n", __func__, u_volt_min,
+ u_volt, u_volt_max);
+
+ ret = regulator_set_voltage_triplet(reg, u_volt_min, u_volt,
+ u_volt_max);
+ if (ret)
+ dev_err(dev, "%s: failed to set voltage (%lu %lu %lu mV): %d\n",
+ __func__, u_volt_min, u_volt, u_volt_max, ret);
+
+ return ret;
+}
+
+/**
+ * dev_pm_opp_set_rate() - Configure new OPP based on frequency
+ * @dev: device for which we do this operation
+ * @target_freq: frequency to achieve
+ *
+ * This configures the power-supplies and clock source to the levels specified
+ * by the OPP corresponding to the target_freq.
+ *
+ * Locking: This function takes rcu_read_lock().
+ */
+int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq)
+{
+ struct opp_table *opp_table;
+ struct dev_pm_opp *old_opp, *opp;
+ struct regulator *reg;
+ struct clk *clk;
+ unsigned long freq, old_freq;
+ unsigned long u_volt, u_volt_min, u_volt_max;
+ unsigned long ou_volt, ou_volt_min, ou_volt_max;
+ int ret;
+
+ if (unlikely(!target_freq)) {
+ dev_err(dev, "%s: Invalid target frequency %lu\n", __func__,
+ target_freq);
+ return -EINVAL;
+ }
+
+ clk = _get_opp_clk(dev);
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ freq = clk_round_rate(clk, target_freq);
+ if ((long)freq <= 0)
+ freq = target_freq;
+
+ old_freq = clk_get_rate(clk);
+
+ /* Return early if nothing to do */
+ if (old_freq == freq) {
+ dev_dbg(dev, "%s: old/new frequencies (%lu Hz) are same, nothing to do\n",
+ __func__, freq);
+ return 0;
+ }
+
+ rcu_read_lock();
+
+ opp_table = _find_opp_table(dev);
+ if (IS_ERR(opp_table)) {
+ dev_err(dev, "%s: device opp doesn't exist\n", __func__);
+ rcu_read_unlock();
+ return PTR_ERR(opp_table);
+ }
+
+ old_opp = dev_pm_opp_find_freq_ceil(dev, &old_freq);
+ if (!IS_ERR(old_opp)) {
+ ou_volt = old_opp->u_volt;
+ ou_volt_min = old_opp->u_volt_min;
+ ou_volt_max = old_opp->u_volt_max;
+ } else {
+ dev_err(dev, "%s: failed to find current OPP for freq %lu (%ld)\n",
+ __func__, old_freq, PTR_ERR(old_opp));
+ }
+
+ opp = dev_pm_opp_find_freq_ceil(dev, &freq);
+ if (IS_ERR(opp)) {
+ ret = PTR_ERR(opp);
+ dev_err(dev, "%s: failed to find OPP for freq %lu (%d)\n",
+ __func__, freq, ret);
+ rcu_read_unlock();
+ return ret;
+ }
+
+ u_volt = opp->u_volt;
+ u_volt_min = opp->u_volt_min;
+ u_volt_max = opp->u_volt_max;
+
+ reg = opp_table->regulator;
+
+ rcu_read_unlock();
+
+ /* Scaling up? Scale voltage before frequency */
+ if (freq > old_freq) {
+ ret = _set_opp_voltage(dev, reg, u_volt, u_volt_min,
+ u_volt_max);
+ if (ret)
+ goto restore_voltage;
+ }
+
+ /* Change frequency */
+
+ dev_dbg(dev, "%s: switching OPP: %lu Hz --> %lu Hz\n",
+ __func__, old_freq, freq);
+
+ ret = clk_set_rate(clk, freq);
+ if (ret) {
+ dev_err(dev, "%s: failed to set clock rate: %d\n", __func__,
+ ret);
+ goto restore_voltage;
+ }
+
+ /* Scaling down? Scale voltage after frequency */
+ if (freq < old_freq) {
+ ret = _set_opp_voltage(dev, reg, u_volt, u_volt_min,
+ u_volt_max);
+ if (ret)
+ goto restore_freq;
+ }
+
+ return 0;
+
+restore_freq:
+ if (clk_set_rate(clk, old_freq))
+ dev_err(dev, "%s: failed to restore old-freq (%lu Hz)\n",
+ __func__, old_freq);
+restore_voltage:
+ /* This shouldn't harm even if the voltages weren't updated earlier */
+ if (!IS_ERR(old_opp))
+ _set_opp_voltage(dev, reg, ou_volt, ou_volt_min, ou_volt_max);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_set_rate);
+
+/* OPP-dev Helpers */
+static void _kfree_opp_dev_rcu(struct rcu_head *head)
{
- struct device_list_opp *list_dev;
+ struct opp_device *opp_dev;
- list_dev = container_of(head, struct device_list_opp, rcu_head);
- kfree_rcu(list_dev, rcu_head);
+ opp_dev = container_of(head, struct opp_device, rcu_head);
+ kfree_rcu(opp_dev, rcu_head);
}
-static void _remove_list_dev(struct device_list_opp *list_dev,
- struct device_opp *dev_opp)
+static void _remove_opp_dev(struct opp_device *opp_dev,
+ struct opp_table *opp_table)
{
- opp_debug_unregister(list_dev, dev_opp);
- list_del(&list_dev->node);
- call_srcu(&dev_opp->srcu_head.srcu, &list_dev->rcu_head,
- _kfree_list_dev_rcu);
+ opp_debug_unregister(opp_dev, opp_table);
+ list_del(&opp_dev->node);
+ call_srcu(&opp_table->srcu_head.srcu, &opp_dev->rcu_head,
+ _kfree_opp_dev_rcu);
}
-struct device_list_opp *_add_list_dev(const struct device *dev,
- struct device_opp *dev_opp)
+struct opp_device *_add_opp_dev(const struct device *dev,
+ struct opp_table *opp_table)
{
- struct device_list_opp *list_dev;
+ struct opp_device *opp_dev;
int ret;
- list_dev = kzalloc(sizeof(*list_dev), GFP_KERNEL);
- if (!list_dev)
+ opp_dev = kzalloc(sizeof(*opp_dev), GFP_KERNEL);
+ if (!opp_dev)
return NULL;
- /* Initialize list-dev */
- list_dev->dev = dev;
- list_add_rcu(&list_dev->node, &dev_opp->dev_list);
+ /* Initialize opp-dev */
+ opp_dev->dev = dev;
+ list_add_rcu(&opp_dev->node, &opp_table->dev_list);
- /* Create debugfs entries for the dev_opp */
- ret = opp_debug_register(list_dev, dev_opp);
+ /* Create debugfs entries for the opp_table */
+ ret = opp_debug_register(opp_dev, opp_table);
if (ret)
dev_err(dev, "%s: Failed to register opp debugfs (%d)\n",
__func__, ret);
- return list_dev;
+ return opp_dev;
}
/**
- * _add_device_opp() - Find device OPP table or allocate a new one
+ * _add_opp_table() - Find OPP table or allocate a new one
* @dev: device for which we do this operation
*
* It tries to find an existing table first, if it couldn't find one, it
* allocates a new OPP table and returns that.
*
- * Return: valid device_opp pointer if success, else NULL.
+ * Return: valid opp_table pointer if success, else NULL.
*/
-static struct device_opp *_add_device_opp(struct device *dev)
+static struct opp_table *_add_opp_table(struct device *dev)
{
- struct device_opp *dev_opp;
- struct device_list_opp *list_dev;
+ struct opp_table *opp_table;
+ struct opp_device *opp_dev;
+ struct device_node *np;
+ int ret;
- /* Check for existing list for 'dev' first */
- dev_opp = _find_device_opp(dev);
- if (!IS_ERR(dev_opp))
- return dev_opp;
+ /* Check for existing table for 'dev' first */
+ opp_table = _find_opp_table(dev);
+ if (!IS_ERR(opp_table))
+ return opp_table;
/*
- * Allocate a new device OPP table. In the infrequent case where a new
+ * Allocate a new OPP table. In the infrequent case where a new
* device is needed to be added, we pay this penalty.
*/
- dev_opp = kzalloc(sizeof(*dev_opp), GFP_KERNEL);
- if (!dev_opp)
+ opp_table = kzalloc(sizeof(*opp_table), GFP_KERNEL);
+ if (!opp_table)
return NULL;
- INIT_LIST_HEAD(&dev_opp->dev_list);
+ INIT_LIST_HEAD(&opp_table->dev_list);
- list_dev = _add_list_dev(dev, dev_opp);
- if (!list_dev) {
- kfree(dev_opp);
+ opp_dev = _add_opp_dev(dev, opp_table);
+ if (!opp_dev) {
+ kfree(opp_table);
return NULL;
}
- srcu_init_notifier_head(&dev_opp->srcu_head);
- INIT_LIST_HEAD(&dev_opp->opp_list);
+ /*
+ * Only required for backward compatibility with v1 bindings, but isn't
+ * harmful for other cases. And so we do it unconditionally.
+ */
+ np = of_node_get(dev->of_node);
+ if (np) {
+ u32 val;
+
+ if (!of_property_read_u32(np, "clock-latency", &val))
+ opp_table->clock_latency_ns_max = val;
+ of_property_read_u32(np, "voltage-tolerance",
+ &opp_table->voltage_tolerance_v1);
+ of_node_put(np);
+ }
+
+ /* Set regulator to a non-NULL error value */
+ opp_table->regulator = ERR_PTR(-ENXIO);
+
+ /* Find clk for the device */
+ opp_table->clk = clk_get(dev, NULL);
+ if (IS_ERR(opp_table->clk)) {
+ ret = PTR_ERR(opp_table->clk);
+ if (ret != -EPROBE_DEFER)
+ dev_dbg(dev, "%s: Couldn't find clock: %d\n", __func__,
+ ret);
+ }
- /* Secure the device list modification */
- list_add_rcu(&dev_opp->node, &dev_opp_list);
- return dev_opp;
+ srcu_init_notifier_head(&opp_table->srcu_head);
+ INIT_LIST_HEAD(&opp_table->opp_list);
+
+ /* Secure the device table modification */
+ list_add_rcu(&opp_table->node, &opp_tables);
+ return opp_table;
}
/**
- * _kfree_device_rcu() - Free device_opp RCU handler
+ * _kfree_device_rcu() - Free opp_table RCU handler
* @head: RCU head
*/
static void _kfree_device_rcu(struct rcu_head *head)
{
- struct device_opp *device_opp = container_of(head, struct device_opp, rcu_head);
+ struct opp_table *opp_table = container_of(head, struct opp_table,
+ rcu_head);
- kfree_rcu(device_opp, rcu_head);
+ kfree_rcu(opp_table, rcu_head);
}
/**
- * _remove_device_opp() - Removes a device OPP table
- * @dev_opp: device OPP table to be removed.
+ * _remove_opp_table() - Removes a OPP table
+ * @opp_table: OPP table to be removed.
*
- * Removes/frees device OPP table it it doesn't contain any OPPs.
+ * Removes/frees OPP table if it doesn't contain any OPPs.
*/
-static void _remove_device_opp(struct device_opp *dev_opp)
+static void _remove_opp_table(struct opp_table *opp_table)
{
- struct device_list_opp *list_dev;
+ struct opp_device *opp_dev;
+
+ if (!list_empty(&opp_table->opp_list))
+ return;
- if (!list_empty(&dev_opp->opp_list))
+ if (opp_table->supported_hw)
return;
- if (dev_opp->supported_hw)
+ if (opp_table->prop_name)
return;
- if (dev_opp->prop_name)
+ if (!IS_ERR(opp_table->regulator))
return;
- list_dev = list_first_entry(&dev_opp->dev_list, struct device_list_opp,
- node);
+ /* Release clk */
+ if (!IS_ERR(opp_table->clk))
+ clk_put(opp_table->clk);
- _remove_list_dev(list_dev, dev_opp);
+ opp_dev = list_first_entry(&opp_table->dev_list, struct opp_device,
+ node);
+
+ _remove_opp_dev(opp_dev, opp_table);
/* dev_list must be empty now */
- WARN_ON(!list_empty(&dev_opp->dev_list));
+ WARN_ON(!list_empty(&opp_table->dev_list));
- list_del_rcu(&dev_opp->node);
- call_srcu(&dev_opp->srcu_head.srcu, &dev_opp->rcu_head,
+ list_del_rcu(&opp_table->node);
+ call_srcu(&opp_table->srcu_head.srcu, &opp_table->rcu_head,
_kfree_device_rcu);
}
@@ -591,17 +880,17 @@ static void _kfree_opp_rcu(struct rcu_head *head)
/**
* _opp_remove() - Remove an OPP from a table definition
- * @dev_opp: points back to the device_opp struct this opp belongs to
+ * @opp_table: points back to the opp_table struct this opp belongs to
* @opp: pointer to the OPP to remove
* @notify: OPP_EVENT_REMOVE notification should be sent or not
*
- * This function removes an opp definition from the opp list.
+ * This function removes an opp definition from the opp table.
*
- * Locking: The internal device_opp and opp structures are RCU protected.
+ * Locking: The internal opp_table and opp structures are RCU protected.
* It is assumed that the caller holds required mutex for an RCU updater
* strategy.
*/
-static void _opp_remove(struct device_opp *dev_opp,
+static void _opp_remove(struct opp_table *opp_table,
struct dev_pm_opp *opp, bool notify)
{
/*
@@ -609,22 +898,23 @@ static void _opp_remove(struct device_opp *dev_opp,
* frequency/voltage list.
*/
if (notify)
- srcu_notifier_call_chain(&dev_opp->srcu_head, OPP_EVENT_REMOVE, opp);
+ srcu_notifier_call_chain(&opp_table->srcu_head,
+ OPP_EVENT_REMOVE, opp);
opp_debug_remove_one(opp);
list_del_rcu(&opp->node);
- call_srcu(&dev_opp->srcu_head.srcu, &opp->rcu_head, _kfree_opp_rcu);
+ call_srcu(&opp_table->srcu_head.srcu, &opp->rcu_head, _kfree_opp_rcu);
- _remove_device_opp(dev_opp);
+ _remove_opp_table(opp_table);
}
/**
- * dev_pm_opp_remove() - Remove an OPP from OPP list
+ * dev_pm_opp_remove() - Remove an OPP from OPP table
* @dev: device for which we do this operation
* @freq: OPP to remove with matching 'freq'
*
- * This function removes an opp from the opp list.
+ * This function removes an opp from the opp table.
*
- * Locking: The internal device_opp and opp structures are RCU protected.
+ * Locking: The internal opp_table and opp structures are RCU protected.
* Hence this function internally uses RCU updater strategy with mutex locks
* to keep the integrity of the internal data structures. Callers should ensure
* that this function is *NOT* called under RCU protection or in contexts where
@@ -633,17 +923,17 @@ static void _opp_remove(struct device_opp *dev_opp,
void dev_pm_opp_remove(struct device *dev, unsigned long freq)
{
struct dev_pm_opp *opp;
- struct device_opp *dev_opp;
+ struct opp_table *opp_table;
bool found = false;
- /* Hold our list modification lock here */
- mutex_lock(&dev_opp_list_lock);
+ /* Hold our table modification lock here */
+ mutex_lock(&opp_table_lock);
- dev_opp = _find_device_opp(dev);
- if (IS_ERR(dev_opp))
+ opp_table = _find_opp_table(dev);
+ if (IS_ERR(opp_table))
goto unlock;
- list_for_each_entry(opp, &dev_opp->opp_list, node) {
+ list_for_each_entry(opp, &opp_table->opp_list, node) {
if (opp->rate == freq) {
found = true;
break;
@@ -656,14 +946,14 @@ void dev_pm_opp_remove(struct device *dev, unsigned long freq)
goto unlock;
}
- _opp_remove(dev_opp, opp, true);
+ _opp_remove(opp_table, opp, true);
unlock:
- mutex_unlock(&dev_opp_list_lock);
+ mutex_unlock(&opp_table_lock);
}
EXPORT_SYMBOL_GPL(dev_pm_opp_remove);
static struct dev_pm_opp *_allocate_opp(struct device *dev,
- struct device_opp **dev_opp)
+ struct opp_table **opp_table)
{
struct dev_pm_opp *opp;
@@ -674,8 +964,8 @@ static struct dev_pm_opp *_allocate_opp(struct device *dev,
INIT_LIST_HEAD(&opp->node);
- *dev_opp = _add_device_opp(dev);
- if (!*dev_opp) {
+ *opp_table = _add_opp_table(dev);
+ if (!*opp_table) {
kfree(opp);
return NULL;
}
@@ -683,22 +973,38 @@ static struct dev_pm_opp *_allocate_opp(struct device *dev,
return opp;
}
+static bool _opp_supported_by_regulators(struct dev_pm_opp *opp,
+ struct opp_table *opp_table)
+{
+ struct regulator *reg = opp_table->regulator;
+
+ if (!IS_ERR(reg) &&
+ !regulator_is_supported_voltage(reg, opp->u_volt_min,
+ opp->u_volt_max)) {
+ pr_warn("%s: OPP minuV: %lu maxuV: %lu, not supported by regulator\n",
+ __func__, opp->u_volt_min, opp->u_volt_max);
+ return false;
+ }
+
+ return true;
+}
+
static int _opp_add(struct device *dev, struct dev_pm_opp *new_opp,
- struct device_opp *dev_opp)
+ struct opp_table *opp_table)
{
struct dev_pm_opp *opp;
- struct list_head *head = &dev_opp->opp_list;
+ struct list_head *head = &opp_table->opp_list;
int ret;
/*
* Insert new OPP in order of increasing frequency and discard if
* already present.
*
- * Need to use &dev_opp->opp_list in the condition part of the 'for'
+ * Need to use &opp_table->opp_list in the condition part of the 'for'
* loop, don't replace it with head otherwise it will become an infinite
* loop.
*/
- list_for_each_entry_rcu(opp, &dev_opp->opp_list, node) {
+ list_for_each_entry_rcu(opp, &opp_table->opp_list, node) {
if (new_opp->rate > opp->rate) {
head = &opp->node;
continue;
@@ -716,14 +1022,20 @@ static int _opp_add(struct device *dev, struct dev_pm_opp *new_opp,
0 : -EEXIST;
}
- new_opp->dev_opp = dev_opp;
+ new_opp->opp_table = opp_table;
list_add_rcu(&new_opp->node, head);
- ret = opp_debug_create_one(new_opp, dev_opp);
+ ret = opp_debug_create_one(new_opp, opp_table);
if (ret)
dev_err(dev, "%s: Failed to register opp to debugfs (%d)\n",
__func__, ret);
+ if (!_opp_supported_by_regulators(new_opp, opp_table)) {
+ new_opp->available = false;
+ dev_warn(dev, "%s: OPP not supported by regulators (%lu)\n",
+ __func__, new_opp->rate);
+ }
+
return 0;
}
@@ -734,14 +1046,14 @@ static int _opp_add(struct device *dev, struct dev_pm_opp *new_opp,
* @u_volt: Voltage in uVolts for this OPP
* @dynamic: Dynamically added OPPs.
*
- * This function adds an opp definition to the opp list and returns status.
+ * This function adds an opp definition to the opp table and returns status.
* The opp is made available by default and it can be controlled using
* dev_pm_opp_enable/disable functions and may be removed by dev_pm_opp_remove.
*
* NOTE: "dynamic" parameter impacts OPPs added by the dev_pm_opp_of_add_table
* and freed by dev_pm_opp_of_remove_table.
*
- * Locking: The internal device_opp and opp structures are RCU protected.
+ * Locking: The internal opp_table and opp structures are RCU protected.
* Hence this function internally uses RCU updater strategy with mutex locks
* to keep the integrity of the internal data structures. Callers should ensure
* that this function is *NOT* called under RCU protection or in contexts where
@@ -757,14 +1069,15 @@ static int _opp_add(struct device *dev, struct dev_pm_opp *new_opp,
static int _opp_add_v1(struct device *dev, unsigned long freq, long u_volt,
bool dynamic)
{
- struct device_opp *dev_opp;
+ struct opp_table *opp_table;
struct dev_pm_opp *new_opp;
+ unsigned long tol;
int ret;
- /* Hold our list modification lock here */
- mutex_lock(&dev_opp_list_lock);
+ /* Hold our table modification lock here */
+ mutex_lock(&opp_table_lock);
- new_opp = _allocate_opp(dev, &dev_opp);
+ new_opp = _allocate_opp(dev, &opp_table);
if (!new_opp) {
ret = -ENOMEM;
goto unlock;
@@ -772,33 +1085,36 @@ static int _opp_add_v1(struct device *dev, unsigned long freq, long u_volt,
/* populate the opp table */
new_opp->rate = freq;
+ tol = u_volt * opp_table->voltage_tolerance_v1 / 100;
new_opp->u_volt = u_volt;
+ new_opp->u_volt_min = u_volt - tol;
+ new_opp->u_volt_max = u_volt + tol;
new_opp->available = true;
new_opp->dynamic = dynamic;
- ret = _opp_add(dev, new_opp, dev_opp);
+ ret = _opp_add(dev, new_opp, opp_table);
if (ret)
goto free_opp;
- mutex_unlock(&dev_opp_list_lock);
+ mutex_unlock(&opp_table_lock);
/*
* Notify the changes in the availability of the operable
* frequency/voltage list.
*/
- srcu_notifier_call_chain(&dev_opp->srcu_head, OPP_EVENT_ADD, new_opp);
+ srcu_notifier_call_chain(&opp_table->srcu_head, OPP_EVENT_ADD, new_opp);
return 0;
free_opp:
- _opp_remove(dev_opp, new_opp, false);
+ _opp_remove(opp_table, new_opp, false);
unlock:
- mutex_unlock(&dev_opp_list_lock);
+ mutex_unlock(&opp_table_lock);
return ret;
}
/* TODO: Support multiple regulators */
static int opp_parse_supplies(struct dev_pm_opp *opp, struct device *dev,
- struct device_opp *dev_opp)
+ struct opp_table *opp_table)
{
u32 microvolt[3] = {0};
u32 val;
@@ -807,9 +1123,9 @@ static int opp_parse_supplies(struct dev_pm_opp *opp, struct device *dev,
char name[NAME_MAX];
/* Search for "opp-microvolt-<name>" */
- if (dev_opp->prop_name) {
+ if (opp_table->prop_name) {
snprintf(name, sizeof(name), "opp-microvolt-%s",
- dev_opp->prop_name);
+ opp_table->prop_name);
prop = of_find_property(opp->np, name, NULL);
}
@@ -844,14 +1160,20 @@ static int opp_parse_supplies(struct dev_pm_opp *opp, struct device *dev,
}
opp->u_volt = microvolt[0];
- opp->u_volt_min = microvolt[1];
- opp->u_volt_max = microvolt[2];
+
+ if (count == 1) {
+ opp->u_volt_min = opp->u_volt;
+ opp->u_volt_max = opp->u_volt;
+ } else {
+ opp->u_volt_min = microvolt[1];
+ opp->u_volt_max = microvolt[2];
+ }
/* Search for "opp-microamp-<name>" */
prop = NULL;
- if (dev_opp->prop_name) {
+ if (opp_table->prop_name) {
snprintf(name, sizeof(name), "opp-microamp-%s",
- dev_opp->prop_name);
+ opp_table->prop_name);
prop = of_find_property(opp->np, name, NULL);
}
@@ -878,7 +1200,7 @@ static int opp_parse_supplies(struct dev_pm_opp *opp, struct device *dev,
* OPPs, which are available for those versions, based on its 'opp-supported-hw'
* property.
*
- * Locking: The internal device_opp and opp structures are RCU protected.
+ * Locking: The internal opp_table and opp structures are RCU protected.
* Hence this function internally uses RCU updater strategy with mutex locks
* to keep the integrity of the internal data structures. Callers should ensure
* that this function is *NOT* called under RCU protection or in contexts where
@@ -887,44 +1209,44 @@ static int opp_parse_supplies(struct dev_pm_opp *opp, struct device *dev,
int dev_pm_opp_set_supported_hw(struct device *dev, const u32 *versions,
unsigned int count)
{
- struct device_opp *dev_opp;
+ struct opp_table *opp_table;
int ret = 0;
- /* Hold our list modification lock here */
- mutex_lock(&dev_opp_list_lock);
+ /* Hold our table modification lock here */
+ mutex_lock(&opp_table_lock);
- dev_opp = _add_device_opp(dev);
- if (!dev_opp) {
+ opp_table = _add_opp_table(dev);
+ if (!opp_table) {
ret = -ENOMEM;
goto unlock;
}
- /* Make sure there are no concurrent readers while updating dev_opp */
- WARN_ON(!list_empty(&dev_opp->opp_list));
+ /* Make sure there are no concurrent readers while updating opp_table */
+ WARN_ON(!list_empty(&opp_table->opp_list));
- /* Do we already have a version hierarchy associated with dev_opp? */
- if (dev_opp->supported_hw) {
+ /* Do we already have a version hierarchy associated with opp_table? */
+ if (opp_table->supported_hw) {
dev_err(dev, "%s: Already have supported hardware list\n",
__func__);
ret = -EBUSY;
goto err;
}
- dev_opp->supported_hw = kmemdup(versions, count * sizeof(*versions),
+ opp_table->supported_hw = kmemdup(versions, count * sizeof(*versions),
GFP_KERNEL);
- if (!dev_opp->supported_hw) {
+ if (!opp_table->supported_hw) {
ret = -ENOMEM;
goto err;
}
- dev_opp->supported_hw_count = count;
- mutex_unlock(&dev_opp_list_lock);
+ opp_table->supported_hw_count = count;
+ mutex_unlock(&opp_table_lock);
return 0;
err:
- _remove_device_opp(dev_opp);
+ _remove_opp_table(opp_table);
unlock:
- mutex_unlock(&dev_opp_list_lock);
+ mutex_unlock(&opp_table_lock);
return ret;
}
@@ -932,13 +1254,13 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_set_supported_hw);
/**
* dev_pm_opp_put_supported_hw() - Releases resources blocked for supported hw
- * @dev: Device for which supported-hw has to be set.
+ * @dev: Device for which supported-hw has to be put.
*
* This is required only for the V2 bindings, and is called for a matching
- * dev_pm_opp_set_supported_hw(). Until this is called, the device_opp structure
+ * dev_pm_opp_set_supported_hw(). Until this is called, the opp_table structure
* will not be freed.
*
- * Locking: The internal device_opp and opp structures are RCU protected.
+ * Locking: The internal opp_table and opp structures are RCU protected.
* Hence this function internally uses RCU updater strategy with mutex locks
* to keep the integrity of the internal data structures. Callers should ensure
* that this function is *NOT* called under RCU protection or in contexts where
@@ -946,42 +1268,43 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_set_supported_hw);
*/
void dev_pm_opp_put_supported_hw(struct device *dev)
{
- struct device_opp *dev_opp;
+ struct opp_table *opp_table;
- /* Hold our list modification lock here */
- mutex_lock(&dev_opp_list_lock);
+ /* Hold our table modification lock here */
+ mutex_lock(&opp_table_lock);
- /* Check for existing list for 'dev' first */
- dev_opp = _find_device_opp(dev);
- if (IS_ERR(dev_opp)) {
- dev_err(dev, "Failed to find dev_opp: %ld\n", PTR_ERR(dev_opp));
+ /* Check for existing table for 'dev' first */
+ opp_table = _find_opp_table(dev);
+ if (IS_ERR(opp_table)) {
+ dev_err(dev, "Failed to find opp_table: %ld\n",
+ PTR_ERR(opp_table));
goto unlock;
}
- /* Make sure there are no concurrent readers while updating dev_opp */
- WARN_ON(!list_empty(&dev_opp->opp_list));
+ /* Make sure there are no concurrent readers while updating opp_table */
+ WARN_ON(!list_empty(&opp_table->opp_list));
- if (!dev_opp->supported_hw) {
+ if (!opp_table->supported_hw) {
dev_err(dev, "%s: Doesn't have supported hardware list\n",
__func__);
goto unlock;
}
- kfree(dev_opp->supported_hw);
- dev_opp->supported_hw = NULL;
- dev_opp->supported_hw_count = 0;
+ kfree(opp_table->supported_hw);
+ opp_table->supported_hw = NULL;
+ opp_table->supported_hw_count = 0;
- /* Try freeing device_opp if this was the last blocking resource */
- _remove_device_opp(dev_opp);
+ /* Try freeing opp_table if this was the last blocking resource */
+ _remove_opp_table(opp_table);
unlock:
- mutex_unlock(&dev_opp_list_lock);
+ mutex_unlock(&opp_table_lock);
}
EXPORT_SYMBOL_GPL(dev_pm_opp_put_supported_hw);
/**
* dev_pm_opp_set_prop_name() - Set prop-extn name
- * @dev: Device for which the regulator has to be set.
+ * @dev: Device for which the prop-name has to be set.
* @name: name to postfix to properties.
*
* This is required only for the V2 bindings, and it enables a platform to
@@ -989,7 +1312,7 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_put_supported_hw);
* which the extension will apply are opp-microvolt and opp-microamp. OPP core
* should postfix the property name with -<name> while looking for them.
*
- * Locking: The internal device_opp and opp structures are RCU protected.
+ * Locking: The internal opp_table and opp structures are RCU protected.
* Hence this function internally uses RCU updater strategy with mutex locks
* to keep the integrity of the internal data structures. Callers should ensure
* that this function is *NOT* called under RCU protection or in contexts where
@@ -997,42 +1320,42 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_put_supported_hw);
*/
int dev_pm_opp_set_prop_name(struct device *dev, const char *name)
{
- struct device_opp *dev_opp;
+ struct opp_table *opp_table;
int ret = 0;
- /* Hold our list modification lock here */
- mutex_lock(&dev_opp_list_lock);
+ /* Hold our table modification lock here */
+ mutex_lock(&opp_table_lock);
- dev_opp = _add_device_opp(dev);
- if (!dev_opp) {
+ opp_table = _add_opp_table(dev);
+ if (!opp_table) {
ret = -ENOMEM;
goto unlock;
}
- /* Make sure there are no concurrent readers while updating dev_opp */
- WARN_ON(!list_empty(&dev_opp->opp_list));
+ /* Make sure there are no concurrent readers while updating opp_table */
+ WARN_ON(!list_empty(&opp_table->opp_list));
- /* Do we already have a prop-name associated with dev_opp? */
- if (dev_opp->prop_name) {
+ /* Do we already have a prop-name associated with opp_table? */
+ if (opp_table->prop_name) {
dev_err(dev, "%s: Already have prop-name %s\n", __func__,
- dev_opp->prop_name);
+ opp_table->prop_name);
ret = -EBUSY;
goto err;
}
- dev_opp->prop_name = kstrdup(name, GFP_KERNEL);
- if (!dev_opp->prop_name) {
+ opp_table->prop_name = kstrdup(name, GFP_KERNEL);
+ if (!opp_table->prop_name) {
ret = -ENOMEM;
goto err;
}
- mutex_unlock(&dev_opp_list_lock);
+ mutex_unlock(&opp_table_lock);
return 0;
err:
- _remove_device_opp(dev_opp);
+ _remove_opp_table(opp_table);
unlock:
- mutex_unlock(&dev_opp_list_lock);
+ mutex_unlock(&opp_table_lock);
return ret;
}
@@ -1040,13 +1363,13 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_set_prop_name);
/**
* dev_pm_opp_put_prop_name() - Releases resources blocked for prop-name
- * @dev: Device for which the regulator has to be set.
+ * @dev: Device for which the prop-name has to be put.
*
* This is required only for the V2 bindings, and is called for a matching
- * dev_pm_opp_set_prop_name(). Until this is called, the device_opp structure
+ * dev_pm_opp_set_prop_name(). Until this is called, the opp_table structure
* will not be freed.
*
- * Locking: The internal device_opp and opp structures are RCU protected.
+ * Locking: The internal opp_table and opp structures are RCU protected.
* Hence this function internally uses RCU updater strategy with mutex locks
* to keep the integrity of the internal data structures. Callers should ensure
* that this function is *NOT* called under RCU protection or in contexts where
@@ -1054,45 +1377,154 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_set_prop_name);
*/
void dev_pm_opp_put_prop_name(struct device *dev)
{
- struct device_opp *dev_opp;
+ struct opp_table *opp_table;
- /* Hold our list modification lock here */
- mutex_lock(&dev_opp_list_lock);
+ /* Hold our table modification lock here */
+ mutex_lock(&opp_table_lock);
- /* Check for existing list for 'dev' first */
- dev_opp = _find_device_opp(dev);
- if (IS_ERR(dev_opp)) {
- dev_err(dev, "Failed to find dev_opp: %ld\n", PTR_ERR(dev_opp));
+ /* Check for existing table for 'dev' first */
+ opp_table = _find_opp_table(dev);
+ if (IS_ERR(opp_table)) {
+ dev_err(dev, "Failed to find opp_table: %ld\n",
+ PTR_ERR(opp_table));
goto unlock;
}
- /* Make sure there are no concurrent readers while updating dev_opp */
- WARN_ON(!list_empty(&dev_opp->opp_list));
+ /* Make sure there are no concurrent readers while updating opp_table */
+ WARN_ON(!list_empty(&opp_table->opp_list));
- if (!dev_opp->prop_name) {
+ if (!opp_table->prop_name) {
dev_err(dev, "%s: Doesn't have a prop-name\n", __func__);
goto unlock;
}
- kfree(dev_opp->prop_name);
- dev_opp->prop_name = NULL;
+ kfree(opp_table->prop_name);
+ opp_table->prop_name = NULL;
- /* Try freeing device_opp if this was the last blocking resource */
- _remove_device_opp(dev_opp);
+ /* Try freeing opp_table if this was the last blocking resource */
+ _remove_opp_table(opp_table);
unlock:
- mutex_unlock(&dev_opp_list_lock);
+ mutex_unlock(&opp_table_lock);
}
EXPORT_SYMBOL_GPL(dev_pm_opp_put_prop_name);
-static bool _opp_is_supported(struct device *dev, struct device_opp *dev_opp,
+/**
+ * dev_pm_opp_set_regulator() - Set regulator name for the device
+ * @dev: Device for which regulator name is being set.
+ * @name: Name of the regulator.
+ *
+ * In order to support OPP switching, OPP layer needs to know the name of the
+ * device's regulator, as the core would be required to switch voltages as well.
+ *
+ * This must be called before any OPPs are initialized for the device.
+ *
+ * Locking: The internal opp_table and opp structures are RCU protected.
+ * Hence this function internally uses RCU updater strategy with mutex locks
+ * to keep the integrity of the internal data structures. Callers should ensure
+ * that this function is *NOT* called under RCU protection or in contexts where
+ * mutex cannot be locked.
+ */
+int dev_pm_opp_set_regulator(struct device *dev, const char *name)
+{
+ struct opp_table *opp_table;
+ struct regulator *reg;
+ int ret;
+
+ mutex_lock(&opp_table_lock);
+
+ opp_table = _add_opp_table(dev);
+ if (!opp_table) {
+ ret = -ENOMEM;
+ goto unlock;
+ }
+
+ /* This should be called before OPPs are initialized */
+ if (WARN_ON(!list_empty(&opp_table->opp_list))) {
+ ret = -EBUSY;
+ goto err;
+ }
+
+ /* Already have a regulator set */
+ if (WARN_ON(!IS_ERR(opp_table->regulator))) {
+ ret = -EBUSY;
+ goto err;
+ }
+ /* Allocate the regulator */
+ reg = regulator_get_optional(dev, name);
+ if (IS_ERR(reg)) {
+ ret = PTR_ERR(reg);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "%s: no regulator (%s) found: %d\n",
+ __func__, name, ret);
+ goto err;
+ }
+
+ opp_table->regulator = reg;
+
+ mutex_unlock(&opp_table_lock);
+ return 0;
+
+err:
+ _remove_opp_table(opp_table);
+unlock:
+ mutex_unlock(&opp_table_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_set_regulator);
+
+/**
+ * dev_pm_opp_put_regulator() - Releases resources blocked for regulator
+ * @dev: Device for which regulator was set.
+ *
+ * Locking: The internal opp_table and opp structures are RCU protected.
+ * Hence this function internally uses RCU updater strategy with mutex locks
+ * to keep the integrity of the internal data structures. Callers should ensure
+ * that this function is *NOT* called under RCU protection or in contexts where
+ * mutex cannot be locked.
+ */
+void dev_pm_opp_put_regulator(struct device *dev)
+{
+ struct opp_table *opp_table;
+
+ mutex_lock(&opp_table_lock);
+
+ /* Check for existing table for 'dev' first */
+ opp_table = _find_opp_table(dev);
+ if (IS_ERR(opp_table)) {
+ dev_err(dev, "Failed to find opp_table: %ld\n",
+ PTR_ERR(opp_table));
+ goto unlock;
+ }
+
+ if (IS_ERR(opp_table->regulator)) {
+ dev_err(dev, "%s: Doesn't have regulator set\n", __func__);
+ goto unlock;
+ }
+
+ /* Make sure there are no concurrent readers while updating opp_table */
+ WARN_ON(!list_empty(&opp_table->opp_list));
+
+ regulator_put(opp_table->regulator);
+ opp_table->regulator = ERR_PTR(-ENXIO);
+
+ /* Try freeing opp_table if this was the last blocking resource */
+ _remove_opp_table(opp_table);
+
+unlock:
+ mutex_unlock(&opp_table_lock);
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_put_regulator);
+
+static bool _opp_is_supported(struct device *dev, struct opp_table *opp_table,
struct device_node *np)
{
- unsigned int count = dev_opp->supported_hw_count;
+ unsigned int count = opp_table->supported_hw_count;
u32 version;
int ret;
- if (!dev_opp->supported_hw)
+ if (!opp_table->supported_hw)
return true;
while (count--) {
@@ -1105,7 +1537,7 @@ static bool _opp_is_supported(struct device *dev, struct device_opp *dev_opp,
}
/* Both of these are bitwise masks of the versions */
- if (!(version & dev_opp->supported_hw[count]))
+ if (!(version & opp_table->supported_hw[count]))
return false;
}
@@ -1117,11 +1549,11 @@ static bool _opp_is_supported(struct device *dev, struct device_opp *dev_opp,
* @dev: device for which we do this operation
* @np: device node
*
- * This function adds an opp definition to the opp list and returns status. The
+ * This function adds an opp definition to the opp table and returns status. The
* opp can be controlled using dev_pm_opp_enable/disable functions and may be
* removed by dev_pm_opp_remove.
*
- * Locking: The internal device_opp and opp structures are RCU protected.
+ * Locking: The internal opp_table and opp structures are RCU protected.
* Hence this function internally uses RCU updater strategy with mutex locks
* to keep the integrity of the internal data structures. Callers should ensure
* that this function is *NOT* called under RCU protection or in contexts where
@@ -1137,16 +1569,16 @@ static bool _opp_is_supported(struct device *dev, struct device_opp *dev_opp,
*/
static int _opp_add_static_v2(struct device *dev, struct device_node *np)
{
- struct device_opp *dev_opp;
+ struct opp_table *opp_table;
struct dev_pm_opp *new_opp;
u64 rate;
u32 val;
int ret;
- /* Hold our list modification lock here */
- mutex_lock(&dev_opp_list_lock);
+ /* Hold our table modification lock here */
+ mutex_lock(&opp_table_lock);
- new_opp = _allocate_opp(dev, &dev_opp);
+ new_opp = _allocate_opp(dev, &opp_table);
if (!new_opp) {
ret = -ENOMEM;
goto unlock;
@@ -1159,7 +1591,7 @@ static int _opp_add_static_v2(struct device *dev, struct device_node *np)
}
/* Check if the OPP supports hardware's hierarchy of versions or not */
- if (!_opp_is_supported(dev, dev_opp, np)) {
+ if (!_opp_is_supported(dev, opp_table, np)) {
dev_dbg(dev, "OPP not supported by hardware: %llu\n", rate);
goto free_opp;
}
@@ -1179,30 +1611,30 @@ static int _opp_add_static_v2(struct device *dev, struct device_node *np)
if (!of_property_read_u32(np, "clock-latency-ns", &val))
new_opp->clock_latency_ns = val;
- ret = opp_parse_supplies(new_opp, dev, dev_opp);
+ ret = opp_parse_supplies(new_opp, dev, opp_table);
if (ret)
goto free_opp;
- ret = _opp_add(dev, new_opp, dev_opp);
+ ret = _opp_add(dev, new_opp, opp_table);
if (ret)
goto free_opp;
/* OPP to select on device suspend */
if (of_property_read_bool(np, "opp-suspend")) {
- if (dev_opp->suspend_opp) {
+ if (opp_table->suspend_opp) {
dev_warn(dev, "%s: Multiple suspend OPPs found (%lu %lu)\n",
- __func__, dev_opp->suspend_opp->rate,
+ __func__, opp_table->suspend_opp->rate,
new_opp->rate);
} else {
new_opp->suspend = true;
- dev_opp->suspend_opp = new_opp;
+ opp_table->suspend_opp = new_opp;
}
}
- if (new_opp->clock_latency_ns > dev_opp->clock_latency_ns_max)
- dev_opp->clock_latency_ns_max = new_opp->clock_latency_ns;
+ if (new_opp->clock_latency_ns > opp_table->clock_latency_ns_max)
+ opp_table->clock_latency_ns_max = new_opp->clock_latency_ns;
- mutex_unlock(&dev_opp_list_lock);
+ mutex_unlock(&opp_table_lock);
pr_debug("%s: turbo:%d rate:%lu uv:%lu uvmin:%lu uvmax:%lu latency:%lu\n",
__func__, new_opp->turbo, new_opp->rate, new_opp->u_volt,
@@ -1213,13 +1645,13 @@ static int _opp_add_static_v2(struct device *dev, struct device_node *np)
* Notify the changes in the availability of the operable
* frequency/voltage list.
*/
- srcu_notifier_call_chain(&dev_opp->srcu_head, OPP_EVENT_ADD, new_opp);
+ srcu_notifier_call_chain(&opp_table->srcu_head, OPP_EVENT_ADD, new_opp);
return 0;
free_opp:
- _opp_remove(dev_opp, new_opp, false);
+ _opp_remove(opp_table, new_opp, false);
unlock:
- mutex_unlock(&dev_opp_list_lock);
+ mutex_unlock(&opp_table_lock);
return ret;
}
@@ -1229,11 +1661,11 @@ unlock:
* @freq: Frequency in Hz for this OPP
* @u_volt: Voltage in uVolts for this OPP
*
- * This function adds an opp definition to the opp list and returns status.
+ * This function adds an opp definition to the opp table and returns status.
* The opp is made available by default and it can be controlled using
* dev_pm_opp_enable/disable functions.
*
- * Locking: The internal device_opp and opp structures are RCU protected.
+ * Locking: The internal opp_table and opp structures are RCU protected.
* Hence this function internally uses RCU updater strategy with mutex locks
* to keep the integrity of the internal data structures. Callers should ensure
* that this function is *NOT* called under RCU protection or in contexts where
@@ -1265,7 +1697,7 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_add);
* copy operation, returns 0 if no modification was done OR modification was
* successful.
*
- * Locking: The internal device_opp and opp structures are RCU protected.
+ * Locking: The internal opp_table and opp structures are RCU protected.
* Hence this function internally uses RCU updater strategy with mutex locks to
* keep the integrity of the internal data structures. Callers should ensure
* that this function is *NOT* called under RCU protection or in contexts where
@@ -1274,7 +1706,7 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_add);
static int _opp_set_availability(struct device *dev, unsigned long freq,
bool availability_req)
{
- struct device_opp *dev_opp;
+ struct opp_table *opp_table;
struct dev_pm_opp *new_opp, *tmp_opp, *opp = ERR_PTR(-ENODEV);
int r = 0;
@@ -1283,18 +1715,18 @@ static int _opp_set_availability(struct device *dev, unsigned long freq,
if (!new_opp)
return -ENOMEM;
- mutex_lock(&dev_opp_list_lock);
+ mutex_lock(&opp_table_lock);
- /* Find the device_opp */
- dev_opp = _find_device_opp(dev);
- if (IS_ERR(dev_opp)) {
- r = PTR_ERR(dev_opp);
+ /* Find the opp_table */
+ opp_table = _find_opp_table(dev);
+ if (IS_ERR(opp_table)) {
+ r = PTR_ERR(opp_table);
dev_warn(dev, "%s: Device OPP not found (%d)\n", __func__, r);
goto unlock;
}
/* Do we have the frequency? */
- list_for_each_entry(tmp_opp, &dev_opp->opp_list, node) {
+ list_for_each_entry(tmp_opp, &opp_table->opp_list, node) {
if (tmp_opp->rate == freq) {
opp = tmp_opp;
break;
@@ -1315,21 +1747,21 @@ static int _opp_set_availability(struct device *dev, unsigned long freq,
new_opp->available = availability_req;
list_replace_rcu(&opp->node, &new_opp->node);
- mutex_unlock(&dev_opp_list_lock);
- call_srcu(&dev_opp->srcu_head.srcu, &opp->rcu_head, _kfree_opp_rcu);
+ mutex_unlock(&opp_table_lock);
+ call_srcu(&opp_table->srcu_head.srcu, &opp->rcu_head, _kfree_opp_rcu);
/* Notify the change of the OPP availability */
if (availability_req)
- srcu_notifier_call_chain(&dev_opp->srcu_head, OPP_EVENT_ENABLE,
- new_opp);
+ srcu_notifier_call_chain(&opp_table->srcu_head,
+ OPP_EVENT_ENABLE, new_opp);
else
- srcu_notifier_call_chain(&dev_opp->srcu_head, OPP_EVENT_DISABLE,
- new_opp);
+ srcu_notifier_call_chain(&opp_table->srcu_head,
+ OPP_EVENT_DISABLE, new_opp);
return 0;
unlock:
- mutex_unlock(&dev_opp_list_lock);
+ mutex_unlock(&opp_table_lock);
kfree(new_opp);
return r;
}
@@ -1343,7 +1775,7 @@ unlock:
* corresponding error value. It is meant to be used for users an OPP available
* after being temporarily made unavailable with dev_pm_opp_disable.
*
- * Locking: The internal device_opp and opp structures are RCU protected.
+ * Locking: The internal opp_table and opp structures are RCU protected.
* Hence this function indirectly uses RCU and mutex locks to keep the
* integrity of the internal data structures. Callers should ensure that
* this function is *NOT* called under RCU protection or in contexts where
@@ -1369,7 +1801,7 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_enable);
* control by users to make this OPP not available until the circumstances are
* right to make it available again (with a call to dev_pm_opp_enable).
*
- * Locking: The internal device_opp and opp structures are RCU protected.
+ * Locking: The internal opp_table and opp structures are RCU protected.
* Hence this function indirectly uses RCU and mutex locks to keep the
* integrity of the internal data structures. Callers should ensure that
* this function is *NOT* called under RCU protection or in contexts where
@@ -1387,26 +1819,26 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_disable);
/**
* dev_pm_opp_get_notifier() - find notifier_head of the device with opp
- * @dev: device pointer used to lookup device OPPs.
+ * @dev: device pointer used to lookup OPP table.
*
* Return: pointer to notifier head if found, otherwise -ENODEV or
* -EINVAL based on type of error casted as pointer. value must be checked
* with IS_ERR to determine valid pointer or error result.
*
- * Locking: This function must be called under rcu_read_lock(). dev_opp is a RCU
- * protected pointer. The reason for the same is that the opp pointer which is
- * returned will remain valid for use with opp_get_{voltage, freq} only while
+ * Locking: This function must be called under rcu_read_lock(). opp_table is a
+ * RCU protected pointer. The reason for the same is that the opp pointer which
+ * is returned will remain valid for use with opp_get_{voltage, freq} only while
* under the locked area. The pointer returned must be used prior to unlocking
* with rcu_read_unlock() to maintain the integrity of the pointer.
*/
struct srcu_notifier_head *dev_pm_opp_get_notifier(struct device *dev)
{
- struct device_opp *dev_opp = _find_device_opp(dev);
+ struct opp_table *opp_table = _find_opp_table(dev);
- if (IS_ERR(dev_opp))
- return ERR_CAST(dev_opp); /* matching type */
+ if (IS_ERR(opp_table))
+ return ERR_CAST(opp_table); /* matching type */
- return &dev_opp->srcu_head;
+ return &opp_table->srcu_head;
}
EXPORT_SYMBOL_GPL(dev_pm_opp_get_notifier);
@@ -1414,11 +1846,11 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_get_notifier);
/**
* dev_pm_opp_of_remove_table() - Free OPP table entries created from static DT
* entries
- * @dev: device pointer used to lookup device OPPs.
+ * @dev: device pointer used to lookup OPP table.
*
* Free OPPs created using static entries present in DT.
*
- * Locking: The internal device_opp and opp structures are RCU protected.
+ * Locking: The internal opp_table and opp structures are RCU protected.
* Hence this function indirectly uses RCU updater strategy with mutex locks
* to keep the integrity of the internal data structures. Callers should ensure
* that this function is *NOT* called under RCU protection or in contexts where
@@ -1426,38 +1858,38 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_get_notifier);
*/
void dev_pm_opp_of_remove_table(struct device *dev)
{
- struct device_opp *dev_opp;
+ struct opp_table *opp_table;
struct dev_pm_opp *opp, *tmp;
- /* Hold our list modification lock here */
- mutex_lock(&dev_opp_list_lock);
+ /* Hold our table modification lock here */
+ mutex_lock(&opp_table_lock);
- /* Check for existing list for 'dev' */
- dev_opp = _find_device_opp(dev);
- if (IS_ERR(dev_opp)) {
- int error = PTR_ERR(dev_opp);
+ /* Check for existing table for 'dev' */
+ opp_table = _find_opp_table(dev);
+ if (IS_ERR(opp_table)) {
+ int error = PTR_ERR(opp_table);
if (error != -ENODEV)
- WARN(1, "%s: dev_opp: %d\n",
+ WARN(1, "%s: opp_table: %d\n",
IS_ERR_OR_NULL(dev) ?
"Invalid device" : dev_name(dev),
error);
goto unlock;
}
- /* Find if dev_opp manages a single device */
- if (list_is_singular(&dev_opp->dev_list)) {
+ /* Find if opp_table manages a single device */
+ if (list_is_singular(&opp_table->dev_list)) {
/* Free static OPPs */
- list_for_each_entry_safe(opp, tmp, &dev_opp->opp_list, node) {
+ list_for_each_entry_safe(opp, tmp, &opp_table->opp_list, node) {
if (!opp->dynamic)
- _opp_remove(dev_opp, opp, true);
+ _opp_remove(opp_table, opp, true);
}
} else {
- _remove_list_dev(_find_list_dev(dev, dev_opp), dev_opp);
+ _remove_opp_dev(_find_opp_dev(dev, opp_table), opp_table);
}
unlock:
- mutex_unlock(&dev_opp_list_lock);
+ mutex_unlock(&opp_table_lock);
}
EXPORT_SYMBOL_GPL(dev_pm_opp_of_remove_table);
@@ -1478,22 +1910,22 @@ struct device_node *_of_get_opp_desc_node(struct device *dev)
static int _of_add_opp_table_v2(struct device *dev, struct device_node *opp_np)
{
struct device_node *np;
- struct device_opp *dev_opp;
+ struct opp_table *opp_table;
int ret = 0, count = 0;
- mutex_lock(&dev_opp_list_lock);
+ mutex_lock(&opp_table_lock);
- dev_opp = _managed_opp(opp_np);
- if (dev_opp) {
+ opp_table = _managed_opp(opp_np);
+ if (opp_table) {
/* OPPs are already managed */
- if (!_add_list_dev(dev, dev_opp))
+ if (!_add_opp_dev(dev, opp_table))
ret = -ENOMEM;
- mutex_unlock(&dev_opp_list_lock);
+ mutex_unlock(&opp_table_lock);
return ret;
}
- mutex_unlock(&dev_opp_list_lock);
+ mutex_unlock(&opp_table_lock);
- /* We have opp-list node now, iterate over it and add OPPs */
+ /* We have opp-table node now, iterate over it and add OPPs */
for_each_available_child_of_node(opp_np, np) {
count++;
@@ -1509,19 +1941,19 @@ static int _of_add_opp_table_v2(struct device *dev, struct device_node *opp_np)
if (WARN_ON(!count))
return -ENOENT;
- mutex_lock(&dev_opp_list_lock);
+ mutex_lock(&opp_table_lock);
- dev_opp = _find_device_opp(dev);
- if (WARN_ON(IS_ERR(dev_opp))) {
- ret = PTR_ERR(dev_opp);
- mutex_unlock(&dev_opp_list_lock);
+ opp_table = _find_opp_table(dev);
+ if (WARN_ON(IS_ERR(opp_table))) {
+ ret = PTR_ERR(opp_table);
+ mutex_unlock(&opp_table_lock);
goto free_table;
}
- dev_opp->np = opp_np;
- dev_opp->shared_opp = of_property_read_bool(opp_np, "opp-shared");
+ opp_table->np = opp_np;
+ opp_table->shared_opp = of_property_read_bool(opp_np, "opp-shared");
- mutex_unlock(&dev_opp_list_lock);
+ mutex_unlock(&opp_table_lock);
return 0;
@@ -1550,7 +1982,7 @@ static int _of_add_opp_table_v1(struct device *dev)
*/
nr = prop->length / sizeof(u32);
if (nr % 2) {
- dev_err(dev, "%s: Invalid OPP list\n", __func__);
+ dev_err(dev, "%s: Invalid OPP table\n", __func__);
return -EINVAL;
}
@@ -1570,11 +2002,11 @@ static int _of_add_opp_table_v1(struct device *dev)
/**
* dev_pm_opp_of_add_table() - Initialize opp table from device tree
- * @dev: device pointer used to lookup device OPPs.
+ * @dev: device pointer used to lookup OPP table.
*
* Register the initial OPP table with the OPP library for given device.
*
- * Locking: The internal device_opp and opp structures are RCU protected.
+ * Locking: The internal opp_table and opp structures are RCU protected.
* Hence this function indirectly uses RCU updater strategy with mutex locks
* to keep the integrity of the internal data structures. Callers should ensure
* that this function is *NOT* called under RCU protection or in contexts where
diff --git a/drivers/base/power/opp/cpu.c b/drivers/base/power/opp/cpu.c
index 9f0c15570f64..ba2bdbd932ef 100644
--- a/drivers/base/power/opp/cpu.c
+++ b/drivers/base/power/opp/cpu.c
@@ -31,7 +31,7 @@
* @table: Cpufreq table returned back to caller
*
* Generate a cpufreq table for a provided device- this assumes that the
- * opp list is already initialized and ready for usage.
+ * opp table is already initialized and ready for usage.
*
* This function allocates required memory for the cpufreq table. It is
* expected that the caller does the required maintenance such as freeing
@@ -44,7 +44,7 @@
* WARNING: It is important for the callers to ensure refreshing their copy of
* the table if any of the mentioned functions have been invoked in the interim.
*
- * Locking: The internal device_opp and opp structures are RCU protected.
+ * Locking: The internal opp_table and opp structures are RCU protected.
* Since we just use the regular accessor functions to access the internal data
* structures, we use RCU read lock inside this function. As a result, users of
* this function DONOT need to use explicit locks for invoking.
@@ -122,15 +122,15 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_free_cpufreq_table);
/* Required only for V1 bindings, as v2 can manage it from DT itself */
int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev, cpumask_var_t cpumask)
{
- struct device_list_opp *list_dev;
- struct device_opp *dev_opp;
+ struct opp_device *opp_dev;
+ struct opp_table *opp_table;
struct device *dev;
int cpu, ret = 0;
- mutex_lock(&dev_opp_list_lock);
+ mutex_lock(&opp_table_lock);
- dev_opp = _find_device_opp(cpu_dev);
- if (IS_ERR(dev_opp)) {
+ opp_table = _find_opp_table(cpu_dev);
+ if (IS_ERR(opp_table)) {
ret = -EINVAL;
goto unlock;
}
@@ -146,15 +146,15 @@ int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev, cpumask_var_t cpumask)
continue;
}
- list_dev = _add_list_dev(dev, dev_opp);
- if (!list_dev) {
- dev_err(dev, "%s: failed to add list-dev for cpu%d device\n",
+ opp_dev = _add_opp_dev(dev, opp_table);
+ if (!opp_dev) {
+ dev_err(dev, "%s: failed to add opp-dev for cpu%d device\n",
__func__, cpu);
continue;
}
}
unlock:
- mutex_unlock(&dev_opp_list_lock);
+ mutex_unlock(&opp_table_lock);
return ret;
}
diff --git a/drivers/base/power/opp/debugfs.c b/drivers/base/power/opp/debugfs.c
index ddfe4773e922..ef1ae6b52042 100644
--- a/drivers/base/power/opp/debugfs.c
+++ b/drivers/base/power/opp/debugfs.c
@@ -34,9 +34,9 @@ void opp_debug_remove_one(struct dev_pm_opp *opp)
debugfs_remove_recursive(opp->dentry);
}
-int opp_debug_create_one(struct dev_pm_opp *opp, struct device_opp *dev_opp)
+int opp_debug_create_one(struct dev_pm_opp *opp, struct opp_table *opp_table)
{
- struct dentry *pdentry = dev_opp->dentry;
+ struct dentry *pdentry = opp_table->dentry;
struct dentry *d;
char name[25]; /* 20 chars for 64 bit value + 5 (opp:\0) */
@@ -83,52 +83,52 @@ int opp_debug_create_one(struct dev_pm_opp *opp, struct device_opp *dev_opp)
return 0;
}
-static int device_opp_debug_create_dir(struct device_list_opp *list_dev,
- struct device_opp *dev_opp)
+static int opp_list_debug_create_dir(struct opp_device *opp_dev,
+ struct opp_table *opp_table)
{
- const struct device *dev = list_dev->dev;
+ const struct device *dev = opp_dev->dev;
struct dentry *d;
- opp_set_dev_name(dev, dev_opp->dentry_name);
+ opp_set_dev_name(dev, opp_table->dentry_name);
/* Create device specific directory */
- d = debugfs_create_dir(dev_opp->dentry_name, rootdir);
+ d = debugfs_create_dir(opp_table->dentry_name, rootdir);
if (!d) {
dev_err(dev, "%s: Failed to create debugfs dir\n", __func__);
return -ENOMEM;
}
- list_dev->dentry = d;
- dev_opp->dentry = d;
+ opp_dev->dentry = d;
+ opp_table->dentry = d;
return 0;
}
-static int device_opp_debug_create_link(struct device_list_opp *list_dev,
- struct device_opp *dev_opp)
+static int opp_list_debug_create_link(struct opp_device *opp_dev,
+ struct opp_table *opp_table)
{
- const struct device *dev = list_dev->dev;
+ const struct device *dev = opp_dev->dev;
char name[NAME_MAX];
struct dentry *d;
- opp_set_dev_name(list_dev->dev, name);
+ opp_set_dev_name(opp_dev->dev, name);
/* Create device specific directory link */
- d = debugfs_create_symlink(name, rootdir, dev_opp->dentry_name);
+ d = debugfs_create_symlink(name, rootdir, opp_table->dentry_name);
if (!d) {
dev_err(dev, "%s: Failed to create link\n", __func__);
return -ENOMEM;
}
- list_dev->dentry = d;
+ opp_dev->dentry = d;
return 0;
}
/**
* opp_debug_register - add a device opp node to the debugfs 'opp' directory
- * @list_dev: list-dev pointer for device
- * @dev_opp: the device-opp being added
+ * @opp_dev: opp-dev pointer for device
+ * @opp_table: the device-opp being added
*
* Dynamically adds device specific directory in debugfs 'opp' directory. If the
* device-opp is shared with other devices, then links will be created for all
@@ -136,73 +136,72 @@ static int device_opp_debug_create_link(struct device_list_opp *list_dev,
*
* Return: 0 on success, otherwise negative error.
*/
-int opp_debug_register(struct device_list_opp *list_dev,
- struct device_opp *dev_opp)
+int opp_debug_register(struct opp_device *opp_dev, struct opp_table *opp_table)
{
if (!rootdir) {
pr_debug("%s: Uninitialized rootdir\n", __func__);
return -EINVAL;
}
- if (dev_opp->dentry)
- return device_opp_debug_create_link(list_dev, dev_opp);
+ if (opp_table->dentry)
+ return opp_list_debug_create_link(opp_dev, opp_table);
- return device_opp_debug_create_dir(list_dev, dev_opp);
+ return opp_list_debug_create_dir(opp_dev, opp_table);
}
-static void opp_migrate_dentry(struct device_list_opp *list_dev,
- struct device_opp *dev_opp)
+static void opp_migrate_dentry(struct opp_device *opp_dev,
+ struct opp_table *opp_table)
{
- struct device_list_opp *new_dev;
+ struct opp_device *new_dev;
const struct device *dev;
struct dentry *dentry;
- /* Look for next list-dev */
- list_for_each_entry(new_dev, &dev_opp->dev_list, node)
- if (new_dev != list_dev)
+ /* Look for next opp-dev */
+ list_for_each_entry(new_dev, &opp_table->dev_list, node)
+ if (new_dev != opp_dev)
break;
/* new_dev is guaranteed to be valid here */
dev = new_dev->dev;
debugfs_remove_recursive(new_dev->dentry);
- opp_set_dev_name(dev, dev_opp->dentry_name);
+ opp_set_dev_name(dev, opp_table->dentry_name);
- dentry = debugfs_rename(rootdir, list_dev->dentry, rootdir,
- dev_opp->dentry_name);
+ dentry = debugfs_rename(rootdir, opp_dev->dentry, rootdir,
+ opp_table->dentry_name);
if (!dentry) {
dev_err(dev, "%s: Failed to rename link from: %s to %s\n",
- __func__, dev_name(list_dev->dev), dev_name(dev));
+ __func__, dev_name(opp_dev->dev), dev_name(dev));
return;
}
new_dev->dentry = dentry;
- dev_opp->dentry = dentry;
+ opp_table->dentry = dentry;
}
/**
* opp_debug_unregister - remove a device opp node from debugfs opp directory
- * @list_dev: list-dev pointer for device
- * @dev_opp: the device-opp being removed
+ * @opp_dev: opp-dev pointer for device
+ * @opp_table: the device-opp being removed
*
* Dynamically removes device specific directory from debugfs 'opp' directory.
*/
-void opp_debug_unregister(struct device_list_opp *list_dev,
- struct device_opp *dev_opp)
+void opp_debug_unregister(struct opp_device *opp_dev,
+ struct opp_table *opp_table)
{
- if (list_dev->dentry == dev_opp->dentry) {
+ if (opp_dev->dentry == opp_table->dentry) {
/* Move the real dentry object under another device */
- if (!list_is_singular(&dev_opp->dev_list)) {
- opp_migrate_dentry(list_dev, dev_opp);
+ if (!list_is_singular(&opp_table->dev_list)) {
+ opp_migrate_dentry(opp_dev, opp_table);
goto out;
}
- dev_opp->dentry = NULL;
+ opp_table->dentry = NULL;
}
- debugfs_remove_recursive(list_dev->dentry);
+ debugfs_remove_recursive(opp_dev->dentry);
out:
- list_dev->dentry = NULL;
+ opp_dev->dentry = NULL;
}
static int __init opp_debug_init(void)
diff --git a/drivers/base/power/opp/opp.h b/drivers/base/power/opp/opp.h
index 690638ef36ee..f67f806fcf3a 100644
--- a/drivers/base/power/opp/opp.h
+++ b/drivers/base/power/opp/opp.h
@@ -22,13 +22,16 @@
#include <linux/rculist.h>
#include <linux/rcupdate.h>
+struct clk;
+struct regulator;
+
/* Lock to allow exclusive modification to the device and opp lists */
-extern struct mutex dev_opp_list_lock;
+extern struct mutex opp_table_lock;
/*
* Internal data structure organization with the OPP layer library is as
* follows:
- * dev_opp_list (root)
+ * opp_tables (root)
* |- device 1 (represents voltage domain 1)
* | |- opp 1 (availability, freq, voltage)
* | |- opp 2 ..
@@ -37,18 +40,18 @@ extern struct mutex dev_opp_list_lock;
* |- device 2 (represents the next voltage domain)
* ...
* `- device m (represents mth voltage domain)
- * device 1, 2.. are represented by dev_opp structure while each opp
+ * device 1, 2.. are represented by opp_table structure while each opp
* is represented by the opp structure.
*/
/**
* struct dev_pm_opp - Generic OPP description structure
- * @node: opp list node. The nodes are maintained throughout the lifetime
+ * @node: opp table node. The nodes are maintained throughout the lifetime
* of boot. It is expected only an optimal set of OPPs are
* added to the library by the SoC framework.
- * RCU usage: opp list is traversed with RCU locks. node
+ * RCU usage: opp table is traversed with RCU locks. node
* modification is possible realtime, hence the modifications
- * are protected by the dev_opp_list_lock for integrity.
+ * are protected by the opp_table_lock for integrity.
* IMPORTANT: the opp nodes should be maintained in increasing
* order.
* @available: true/false - marks if this OPP as available or not
@@ -62,7 +65,7 @@ extern struct mutex dev_opp_list_lock;
* @u_amp: Maximum current drawn by the device in microamperes
* @clock_latency_ns: Latency (in nanoseconds) of switching to this OPP's
* frequency from any other OPP's frequency.
- * @dev_opp: points back to the device_opp struct this opp belongs to
+ * @opp_table: points back to the opp_table struct this opp belongs to
* @rcu_head: RCU callback head used for deferred freeing
* @np: OPP's device node.
* @dentry: debugfs dentry pointer (per opp)
@@ -84,7 +87,7 @@ struct dev_pm_opp {
unsigned long u_amp;
unsigned long clock_latency_ns;
- struct device_opp *dev_opp;
+ struct opp_table *opp_table;
struct rcu_head rcu_head;
struct device_node *np;
@@ -95,16 +98,16 @@ struct dev_pm_opp {
};
/**
- * struct device_list_opp - devices managed by 'struct device_opp'
+ * struct opp_device - devices managed by 'struct opp_table'
* @node: list node
* @dev: device to which the struct object belongs
* @rcu_head: RCU callback head used for deferred freeing
* @dentry: debugfs dentry pointer (per device)
*
- * This is an internal data structure maintaining the list of devices that are
- * managed by 'struct device_opp'.
+ * This is an internal data structure maintaining the devices that are managed
+ * by 'struct opp_table'.
*/
-struct device_list_opp {
+struct opp_device {
struct list_head node;
const struct device *dev;
struct rcu_head rcu_head;
@@ -115,16 +118,16 @@ struct device_list_opp {
};
/**
- * struct device_opp - Device opp structure
- * @node: list node - contains the devices with OPPs that
+ * struct opp_table - Device opp structure
+ * @node: table node - contains the devices with OPPs that
* have been registered. Nodes once added are not modified in this
- * list.
- * RCU usage: nodes are not modified in the list of device_opp,
- * however addition is possible and is secured by dev_opp_list_lock
+ * table.
+ * RCU usage: nodes are not modified in the table of opp_table,
+ * however addition is possible and is secured by opp_table_lock
* @srcu_head: notifier head to notify the OPP availability changes.
* @rcu_head: RCU callback head used for deferred freeing
* @dev_list: list of devices that share these OPPs
- * @opp_list: list of opps
+ * @opp_list: table of opps
* @np: struct device_node pointer for opp's DT node.
* @clock_latency_ns_max: Max clock latency in nanoseconds.
* @shared_opp: OPP is shared between multiple devices.
@@ -132,9 +135,13 @@ struct device_list_opp {
* @supported_hw: Array of version number to support.
* @supported_hw_count: Number of elements in supported_hw array.
* @prop_name: A name to postfix to many DT properties, while parsing them.
+ * @clk: Device's clock handle
+ * @regulator: Supply regulator
* @dentry: debugfs dentry pointer of the real device directory (not links).
* @dentry_name: Name of the real dentry.
*
+ * @voltage_tolerance_v1: In percentage, for v1 bindings only.
+ *
* This is an internal data structure maintaining the link to opps attached to
* a device. This structure is not meant to be shared to users as it is
* meant for book keeping and private to OPP library.
@@ -143,7 +150,7 @@ struct device_list_opp {
* need to wait for the grace period of both of them before freeing any
* resources. And so we have used kfree_rcu() from within call_srcu() handlers.
*/
-struct device_opp {
+struct opp_table {
struct list_head node;
struct srcu_notifier_head srcu_head;
@@ -153,12 +160,18 @@ struct device_opp {
struct device_node *np;
unsigned long clock_latency_ns_max;
+
+ /* For backward compatibility with v1 bindings */
+ unsigned int voltage_tolerance_v1;
+
bool shared_opp;
struct dev_pm_opp *suspend_opp;
unsigned int *supported_hw;
unsigned int supported_hw_count;
const char *prop_name;
+ struct clk *clk;
+ struct regulator *regulator;
#ifdef CONFIG_DEBUG_FS
struct dentry *dentry;
@@ -167,30 +180,27 @@ struct device_opp {
};
/* Routines internal to opp core */
-struct device_opp *_find_device_opp(struct device *dev);
-struct device_list_opp *_add_list_dev(const struct device *dev,
- struct device_opp *dev_opp);
+struct opp_table *_find_opp_table(struct device *dev);
+struct opp_device *_add_opp_dev(const struct device *dev, struct opp_table *opp_table);
struct device_node *_of_get_opp_desc_node(struct device *dev);
#ifdef CONFIG_DEBUG_FS
void opp_debug_remove_one(struct dev_pm_opp *opp);
-int opp_debug_create_one(struct dev_pm_opp *opp, struct device_opp *dev_opp);
-int opp_debug_register(struct device_list_opp *list_dev,
- struct device_opp *dev_opp);
-void opp_debug_unregister(struct device_list_opp *list_dev,
- struct device_opp *dev_opp);
+int opp_debug_create_one(struct dev_pm_opp *opp, struct opp_table *opp_table);
+int opp_debug_register(struct opp_device *opp_dev, struct opp_table *opp_table);
+void opp_debug_unregister(struct opp_device *opp_dev, struct opp_table *opp_table);
#else
static inline void opp_debug_remove_one(struct dev_pm_opp *opp) {}
static inline int opp_debug_create_one(struct dev_pm_opp *opp,
- struct device_opp *dev_opp)
+ struct opp_table *opp_table)
{ return 0; }
-static inline int opp_debug_register(struct device_list_opp *list_dev,
- struct device_opp *dev_opp)
+static inline int opp_debug_register(struct opp_device *opp_dev,
+ struct opp_table *opp_table)
{ return 0; }
-static inline void opp_debug_unregister(struct device_list_opp *list_dev,
- struct device_opp *dev_opp)
+static inline void opp_debug_unregister(struct opp_device *opp_dev,
+ struct opp_table *opp_table)
{ }
#endif /* DEBUG_FS */
diff --git a/drivers/base/power/trace.c b/drivers/base/power/trace.c
index a311cfa4c5bd..a6975795e7f3 100644
--- a/drivers/base/power/trace.c
+++ b/drivers/base/power/trace.c
@@ -166,14 +166,14 @@ void generate_pm_trace(const void *tracedata, unsigned int user)
}
EXPORT_SYMBOL(generate_pm_trace);
-extern char __tracedata_start, __tracedata_end;
+extern char __tracedata_start[], __tracedata_end[];
static int show_file_hash(unsigned int value)
{
int match;
char *tracedata;
match = 0;
- for (tracedata = &__tracedata_start ; tracedata < &__tracedata_end ;
+ for (tracedata = __tracedata_start ; tracedata < __tracedata_end ;
tracedata += 2 + sizeof(unsigned long)) {
unsigned short lineno = *(unsigned short *)tracedata;
const char *file = *(const char **)(tracedata + 2);
diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
index a1e0b9ab847a..5fb7718f256c 100644
--- a/drivers/base/power/wakeup.c
+++ b/drivers/base/power/wakeup.c
@@ -246,6 +246,8 @@ static int device_wakeup_attach(struct device *dev, struct wakeup_source *ws)
return -EEXIST;
}
dev->power.wakeup = ws;
+ if (dev->power.wakeirq)
+ device_wakeup_attach_irq(dev, dev->power.wakeirq);
spin_unlock_irq(&dev->power.lock);
return 0;
}
diff --git a/drivers/base/property.c b/drivers/base/property.c
index c359351d50f1..7f692accdc90 100644
--- a/drivers/base/property.c
+++ b/drivers/base/property.c
@@ -21,7 +21,7 @@
static inline bool is_pset_node(struct fwnode_handle *fwnode)
{
- return fwnode && fwnode->type == FWNODE_PDATA;
+ return !IS_ERR_OR_NULL(fwnode) && fwnode->type == FWNODE_PDATA;
}
static inline struct property_set *to_pset_node(struct fwnode_handle *fwnode)
@@ -218,7 +218,8 @@ bool fwnode_property_present(struct fwnode_handle *fwnode, const char *propname)
bool ret;
ret = __fwnode_property_present(fwnode, propname);
- if (ret == false && fwnode && fwnode->secondary)
+ if (ret == false && !IS_ERR_OR_NULL(fwnode) &&
+ !IS_ERR_OR_NULL(fwnode->secondary))
ret = __fwnode_property_present(fwnode->secondary, propname);
return ret;
}
@@ -423,7 +424,8 @@ EXPORT_SYMBOL_GPL(device_property_match_string);
int _ret_; \
_ret_ = FWNODE_PROP_READ(_fwnode_, _propname_, _type_, _proptype_, \
_val_, _nval_); \
- if (_ret_ == -EINVAL && _fwnode_ && _fwnode_->secondary) \
+ if (_ret_ == -EINVAL && !IS_ERR_OR_NULL(_fwnode_) && \
+ !IS_ERR_OR_NULL(_fwnode_->secondary)) \
_ret_ = FWNODE_PROP_READ(_fwnode_->secondary, _propname_, _type_, \
_proptype_, _val_, _nval_); \
_ret_; \
@@ -593,7 +595,8 @@ int fwnode_property_read_string_array(struct fwnode_handle *fwnode,
int ret;
ret = __fwnode_property_read_string_array(fwnode, propname, val, nval);
- if (ret == -EINVAL && fwnode && fwnode->secondary)
+ if (ret == -EINVAL && !IS_ERR_OR_NULL(fwnode) &&
+ !IS_ERR_OR_NULL(fwnode->secondary))
ret = __fwnode_property_read_string_array(fwnode->secondary,
propname, val, nval);
return ret;
@@ -621,7 +624,8 @@ int fwnode_property_read_string(struct fwnode_handle *fwnode,
int ret;
ret = __fwnode_property_read_string(fwnode, propname, val);
- if (ret == -EINVAL && fwnode && fwnode->secondary)
+ if (ret == -EINVAL && !IS_ERR_OR_NULL(fwnode) &&
+ !IS_ERR_OR_NULL(fwnode->secondary))
ret = __fwnode_property_read_string(fwnode->secondary,
propname, val);
return ret;
@@ -647,7 +651,7 @@ int fwnode_property_match_string(struct fwnode_handle *fwnode,
const char *propname, const char *string)
{
const char **values;
- int nval, ret, i;
+ int nval, ret;
nval = fwnode_property_read_string_array(fwnode, propname, NULL, 0);
if (nval < 0)
@@ -664,13 +668,9 @@ int fwnode_property_match_string(struct fwnode_handle *fwnode,
if (ret < 0)
goto out;
- ret = -ENODATA;
- for (i = 0; i < nval; i++) {
- if (!strcmp(values[i], string)) {
- ret = i;
- break;
- }
- }
+ ret = match_string(values, nval, string);
+ if (ret < 0)
+ ret = -ENODATA;
out:
kfree(values);
return ret;
@@ -820,11 +820,16 @@ void device_remove_property_set(struct device *dev)
* the pset. If there is no real firmware node (ACPI/DT) primary
* will hold the pset.
*/
- if (!is_pset_node(fwnode))
- fwnode = fwnode->secondary;
- if (!IS_ERR(fwnode) && is_pset_node(fwnode))
+ if (is_pset_node(fwnode)) {
+ set_primary_fwnode(dev, NULL);
pset_free_set(to_pset_node(fwnode));
- set_secondary_fwnode(dev, NULL);
+ } else {
+ fwnode = fwnode->secondary;
+ if (!IS_ERR(fwnode) && is_pset_node(fwnode)) {
+ set_secondary_fwnode(dev, NULL);
+ pset_free_set(to_pset_node(fwnode));
+ }
+ }
}
EXPORT_SYMBOL_GPL(device_remove_property_set);
diff --git a/drivers/base/regmap/internal.h b/drivers/base/regmap/internal.h
index 3df977054781..a0380338946a 100644
--- a/drivers/base/regmap/internal.h
+++ b/drivers/base/regmap/internal.h
@@ -13,6 +13,7 @@
#ifndef _REGMAP_INTERNAL_H
#define _REGMAP_INTERNAL_H
+#include <linux/device.h>
#include <linux/regmap.h>
#include <linux/fs.h>
#include <linux/list.h>
@@ -110,6 +111,7 @@ struct regmap {
/* number of bits to (left) shift the reg value when formatting*/
int reg_shift;
int reg_stride;
+ int reg_stride_order;
/* regcache specific members */
const struct regcache_ops *cache_ops;
@@ -263,4 +265,19 @@ static inline const char *regmap_name(const struct regmap *map)
return map->name;
}
+static inline unsigned int regmap_get_offset(const struct regmap *map,
+ unsigned int index)
+{
+ if (map->reg_stride_order >= 0)
+ return index << map->reg_stride_order;
+ else
+ return index * map->reg_stride;
+}
+
+static inline unsigned int regcache_get_index_by_order(const struct regmap *map,
+ unsigned int reg)
+{
+ return reg >> map->reg_stride_order;
+}
+
#endif
diff --git a/drivers/base/regmap/regcache-flat.c b/drivers/base/regmap/regcache-flat.c
index 686c9e0b930e..3ee72550b1e3 100644
--- a/drivers/base/regmap/regcache-flat.c
+++ b/drivers/base/regmap/regcache-flat.c
@@ -16,20 +16,30 @@
#include "internal.h"
+static inline unsigned int regcache_flat_get_index(const struct regmap *map,
+ unsigned int reg)
+{
+ return regcache_get_index_by_order(map, reg);
+}
+
static int regcache_flat_init(struct regmap *map)
{
int i;
unsigned int *cache;
- map->cache = kcalloc(map->max_register + 1, sizeof(unsigned int),
- GFP_KERNEL);
+ if (!map || map->reg_stride_order < 0)
+ return -EINVAL;
+
+ map->cache = kcalloc(regcache_flat_get_index(map, map->max_register)
+ + 1, sizeof(unsigned int), GFP_KERNEL);
if (!map->cache)
return -ENOMEM;
cache = map->cache;
for (i = 0; i < map->num_reg_defaults; i++)
- cache[map->reg_defaults[i].reg] = map->reg_defaults[i].def;
+ cache[regcache_flat_get_index(map, map->reg_defaults[i].reg)] =
+ map->reg_defaults[i].def;
return 0;
}
@@ -47,7 +57,7 @@ static int regcache_flat_read(struct regmap *map,
{
unsigned int *cache = map->cache;
- *value = cache[reg];
+ *value = cache[regcache_flat_get_index(map, reg)];
return 0;
}
@@ -57,7 +67,7 @@ static int regcache_flat_write(struct regmap *map, unsigned int reg,
{
unsigned int *cache = map->cache;
- cache[reg] = value;
+ cache[regcache_flat_get_index(map, reg)] = value;
return 0;
}
diff --git a/drivers/base/regmap/regcache.c b/drivers/base/regmap/regcache.c
index 348be3a35410..4170b7d95276 100644
--- a/drivers/base/regmap/regcache.c
+++ b/drivers/base/regmap/regcache.c
@@ -30,7 +30,7 @@ static int regcache_hw_init(struct regmap *map)
int i, j;
int ret;
int count;
- unsigned int val;
+ unsigned int reg, val;
void *tmp_buf;
if (!map->num_reg_defaults_raw)
@@ -57,7 +57,7 @@ static int regcache_hw_init(struct regmap *map)
bool cache_bypass = map->cache_bypass;
dev_warn(map->dev, "No cache defaults, reading back from HW\n");
- /* Bypass the cache access till data read from HW*/
+ /* Bypass the cache access till data read from HW */
map->cache_bypass = true;
tmp_buf = kmalloc(map->cache_size_raw, GFP_KERNEL);
if (!tmp_buf) {
@@ -65,29 +65,48 @@ static int regcache_hw_init(struct regmap *map)
goto err_free;
}
ret = regmap_raw_read(map, 0, tmp_buf,
- map->num_reg_defaults_raw);
+ map->cache_size_raw);
map->cache_bypass = cache_bypass;
- if (ret < 0)
- goto err_cache_free;
-
- map->reg_defaults_raw = tmp_buf;
- map->cache_free = 1;
+ if (ret == 0) {
+ map->reg_defaults_raw = tmp_buf;
+ map->cache_free = 1;
+ } else {
+ kfree(tmp_buf);
+ }
}
/* fill the reg_defaults */
for (i = 0, j = 0; i < map->num_reg_defaults_raw; i++) {
- if (regmap_volatile(map, i * map->reg_stride))
+ reg = i * map->reg_stride;
+
+ if (!regmap_readable(map, reg))
continue;
- val = regcache_get_val(map, map->reg_defaults_raw, i);
- map->reg_defaults[j].reg = i * map->reg_stride;
+
+ if (regmap_volatile(map, reg))
+ continue;
+
+ if (map->reg_defaults_raw) {
+ val = regcache_get_val(map, map->reg_defaults_raw, i);
+ } else {
+ bool cache_bypass = map->cache_bypass;
+
+ map->cache_bypass = true;
+ ret = regmap_read(map, reg, &val);
+ map->cache_bypass = cache_bypass;
+ if (ret != 0) {
+ dev_err(map->dev, "Failed to read %d: %d\n",
+ reg, ret);
+ goto err_free;
+ }
+ }
+
+ map->reg_defaults[j].reg = reg;
map->reg_defaults[j].def = val;
j++;
}
return 0;
-err_cache_free:
- kfree(tmp_buf);
err_free:
kfree(map->reg_defaults);
diff --git a/drivers/base/regmap/regmap-irq.c b/drivers/base/regmap/regmap-irq.c
index 9b0d202414d0..26f799e71c82 100644
--- a/drivers/base/regmap/regmap-irq.c
+++ b/drivers/base/regmap/regmap-irq.c
@@ -379,6 +379,7 @@ static int regmap_irq_map(struct irq_domain *h, unsigned int virq,
irq_set_chip_data(virq, data);
irq_set_chip(virq, &data->irq_chip);
irq_set_nested_thread(virq, 1);
+ irq_set_parent(virq, data->irq);
irq_set_noprobe(virq);
return 0;
@@ -655,13 +656,34 @@ EXPORT_SYMBOL_GPL(regmap_add_irq_chip);
*
* @irq: Primary IRQ for the device
* @d: regmap_irq_chip_data allocated by regmap_add_irq_chip()
+ *
+ * This function also dispose all mapped irq on chip.
*/
void regmap_del_irq_chip(int irq, struct regmap_irq_chip_data *d)
{
+ unsigned int virq;
+ int hwirq;
+
if (!d)
return;
free_irq(irq, d);
+
+ /* Dispose all virtual irq from irq domain before removing it */
+ for (hwirq = 0; hwirq < d->chip->num_irqs; hwirq++) {
+ /* Ignore hwirq if holes in the IRQ list */
+ if (!d->chip->irqs[hwirq].mask)
+ continue;
+
+ /*
+ * Find the virtual irq of hwirq on chip and if it is
+ * there then dispose it
+ */
+ virq = irq_find_mapping(d->domain, hwirq);
+ if (virq)
+ irq_dispose_mapping(virq);
+ }
+
irq_domain_remove(d->domain);
kfree(d->type_buf);
kfree(d->type_buf_def);
@@ -674,6 +696,88 @@ void regmap_del_irq_chip(int irq, struct regmap_irq_chip_data *d)
}
EXPORT_SYMBOL_GPL(regmap_del_irq_chip);
+static void devm_regmap_irq_chip_release(struct device *dev, void *res)
+{
+ struct regmap_irq_chip_data *d = *(struct regmap_irq_chip_data **)res;
+
+ regmap_del_irq_chip(d->irq, d);
+}
+
+static int devm_regmap_irq_chip_match(struct device *dev, void *res, void *data)
+
+{
+ struct regmap_irq_chip_data **r = res;
+
+ if (!r || !*r) {
+ WARN_ON(!r || !*r);
+ return 0;
+ }
+ return *r == data;
+}
+
+/**
+ * devm_regmap_add_irq_chip(): Resource manager regmap_add_irq_chip()
+ *
+ * @dev: The device pointer on which irq_chip belongs to.
+ * @map: The regmap for the device.
+ * @irq: The IRQ the device uses to signal interrupts
+ * @irq_flags: The IRQF_ flags to use for the primary interrupt.
+ * @chip: Configuration for the interrupt controller.
+ * @data: Runtime data structure for the controller, allocated on success
+ *
+ * Returns 0 on success or an errno on failure.
+ *
+ * The regmap_irq_chip data automatically be released when the device is
+ * unbound.
+ */
+int devm_regmap_add_irq_chip(struct device *dev, struct regmap *map, int irq,
+ int irq_flags, int irq_base,
+ const struct regmap_irq_chip *chip,
+ struct regmap_irq_chip_data **data)
+{
+ struct regmap_irq_chip_data **ptr, *d;
+ int ret;
+
+ ptr = devres_alloc(devm_regmap_irq_chip_release, sizeof(*ptr),
+ GFP_KERNEL);
+ if (!ptr)
+ return -ENOMEM;
+
+ ret = regmap_add_irq_chip(map, irq, irq_flags, irq_base,
+ chip, &d);
+ if (ret < 0) {
+ devres_free(ptr);
+ return ret;
+ }
+
+ *ptr = d;
+ devres_add(dev, ptr);
+ *data = d;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(devm_regmap_add_irq_chip);
+
+/**
+ * devm_regmap_del_irq_chip(): Resource managed regmap_del_irq_chip()
+ *
+ * @dev: Device for which which resource was allocated.
+ * @irq: Primary IRQ for the device
+ * @d: regmap_irq_chip_data allocated by regmap_add_irq_chip()
+ */
+void devm_regmap_del_irq_chip(struct device *dev, int irq,
+ struct regmap_irq_chip_data *data)
+{
+ int rc;
+
+ WARN_ON(irq != data->irq);
+ rc = devres_release(dev, devm_regmap_irq_chip_release,
+ devm_regmap_irq_chip_match, data);
+
+ if (rc != 0)
+ WARN_ON(rc);
+}
+EXPORT_SYMBOL_GPL(devm_regmap_del_irq_chip);
+
/**
* regmap_irq_chip_get_base(): Retrieve interrupt base for a regmap IRQ chip
*
diff --git a/drivers/base/regmap/regmap-mmio.c b/drivers/base/regmap/regmap-mmio.c
index eea51569f0eb..5189fd6182f6 100644
--- a/drivers/base/regmap/regmap-mmio.c
+++ b/drivers/base/regmap/regmap-mmio.c
@@ -23,28 +23,18 @@
#include <linux/regmap.h>
#include <linux/slab.h>
+#include "internal.h"
+
struct regmap_mmio_context {
void __iomem *regs;
- unsigned reg_bytes;
unsigned val_bytes;
- unsigned pad_bytes;
struct clk *clk;
-};
-static inline void regmap_mmio_regsize_check(size_t reg_size)
-{
- switch (reg_size) {
- case 1:
- case 2:
- case 4:
-#ifdef CONFIG_64BIT
- case 8:
-#endif
- break;
- default:
- BUG();
- }
-}
+ void (*reg_write)(struct regmap_mmio_context *ctx,
+ unsigned int reg, unsigned int val);
+ unsigned int (*reg_read)(struct regmap_mmio_context *ctx,
+ unsigned int reg);
+};
static int regmap_mmio_regbits_check(size_t reg_bits)
{
@@ -88,72 +78,62 @@ static int regmap_mmio_get_min_stride(size_t val_bits)
return min_stride;
}
-static inline void regmap_mmio_count_check(size_t count, u32 offset)
+static void regmap_mmio_write8(struct regmap_mmio_context *ctx,
+ unsigned int reg,
+ unsigned int val)
+{
+ writeb(val, ctx->regs + reg);
+}
+
+static void regmap_mmio_write16le(struct regmap_mmio_context *ctx,
+ unsigned int reg,
+ unsigned int val)
+{
+ writew(val, ctx->regs + reg);
+}
+
+static void regmap_mmio_write16be(struct regmap_mmio_context *ctx,
+ unsigned int reg,
+ unsigned int val)
+{
+ iowrite16be(val, ctx->regs + reg);
+}
+
+static void regmap_mmio_write32le(struct regmap_mmio_context *ctx,
+ unsigned int reg,
+ unsigned int val)
{
- BUG_ON(count <= offset);
+ writel(val, ctx->regs + reg);
}
-static inline unsigned int
-regmap_mmio_get_offset(const void *reg, size_t reg_size)
+static void regmap_mmio_write32be(struct regmap_mmio_context *ctx,
+ unsigned int reg,
+ unsigned int val)
{
- switch (reg_size) {
- case 1:
- return *(u8 *)reg;
- case 2:
- return *(u16 *)reg;
- case 4:
- return *(u32 *)reg;
+ iowrite32be(val, ctx->regs + reg);
+}
+
#ifdef CONFIG_64BIT
- case 8:
- return *(u64 *)reg;
-#endif
- default:
- BUG();
- }
+static void regmap_mmio_write64le(struct regmap_mmio_context *ctx,
+ unsigned int reg,
+ unsigned int val)
+{
+ writeq(val, ctx->regs + reg);
}
+#endif
-static int regmap_mmio_gather_write(void *context,
- const void *reg, size_t reg_size,
- const void *val, size_t val_size)
+static int regmap_mmio_write(void *context, unsigned int reg, unsigned int val)
{
struct regmap_mmio_context *ctx = context;
- unsigned int offset;
int ret;
- regmap_mmio_regsize_check(reg_size);
-
if (!IS_ERR(ctx->clk)) {
ret = clk_enable(ctx->clk);
if (ret < 0)
return ret;
}
- offset = regmap_mmio_get_offset(reg, reg_size);
-
- while (val_size) {
- switch (ctx->val_bytes) {
- case 1:
- writeb(*(u8 *)val, ctx->regs + offset);
- break;
- case 2:
- writew(*(u16 *)val, ctx->regs + offset);
- break;
- case 4:
- writel(*(u32 *)val, ctx->regs + offset);
- break;
-#ifdef CONFIG_64BIT
- case 8:
- writeq(*(u64 *)val, ctx->regs + offset);
- break;
-#endif
- default:
- /* Should be caught by regmap_mmio_check_config */
- BUG();
- }
- val_size -= ctx->val_bytes;
- val += ctx->val_bytes;
- offset += ctx->val_bytes;
- }
+ ctx->reg_write(ctx, reg, val);
if (!IS_ERR(ctx->clk))
clk_disable(ctx->clk);
@@ -161,59 +141,56 @@ static int regmap_mmio_gather_write(void *context,
return 0;
}
-static int regmap_mmio_write(void *context, const void *data, size_t count)
+static unsigned int regmap_mmio_read8(struct regmap_mmio_context *ctx,
+ unsigned int reg)
{
- struct regmap_mmio_context *ctx = context;
- unsigned int offset = ctx->reg_bytes + ctx->pad_bytes;
+ return readb(ctx->regs + reg);
+}
+
+static unsigned int regmap_mmio_read16le(struct regmap_mmio_context *ctx,
+ unsigned int reg)
+{
+ return readw(ctx->regs + reg);
+}
- regmap_mmio_count_check(count, offset);
+static unsigned int regmap_mmio_read16be(struct regmap_mmio_context *ctx,
+ unsigned int reg)
+{
+ return ioread16be(ctx->regs + reg);
+}
- return regmap_mmio_gather_write(context, data, ctx->reg_bytes,
- data + offset, count - offset);
+static unsigned int regmap_mmio_read32le(struct regmap_mmio_context *ctx,
+ unsigned int reg)
+{
+ return readl(ctx->regs + reg);
}
-static int regmap_mmio_read(void *context,
- const void *reg, size_t reg_size,
- void *val, size_t val_size)
+static unsigned int regmap_mmio_read32be(struct regmap_mmio_context *ctx,
+ unsigned int reg)
+{
+ return ioread32be(ctx->regs + reg);
+}
+
+#ifdef CONFIG_64BIT
+static unsigned int regmap_mmio_read64le(struct regmap_mmio_context *ctx,
+ unsigned int reg)
+{
+ return readq(ctx->regs + reg);
+}
+#endif
+
+static int regmap_mmio_read(void *context, unsigned int reg, unsigned int *val)
{
struct regmap_mmio_context *ctx = context;
- unsigned int offset;
int ret;
- regmap_mmio_regsize_check(reg_size);
-
if (!IS_ERR(ctx->clk)) {
ret = clk_enable(ctx->clk);
if (ret < 0)
return ret;
}
- offset = regmap_mmio_get_offset(reg, reg_size);
-
- while (val_size) {
- switch (ctx->val_bytes) {
- case 1:
- *(u8 *)val = readb(ctx->regs + offset);
- break;
- case 2:
- *(u16 *)val = readw(ctx->regs + offset);
- break;
- case 4:
- *(u32 *)val = readl(ctx->regs + offset);
- break;
-#ifdef CONFIG_64BIT
- case 8:
- *(u64 *)val = readq(ctx->regs + offset);
- break;
-#endif
- default:
- /* Should be caught by regmap_mmio_check_config */
- BUG();
- }
- val_size -= ctx->val_bytes;
- val += ctx->val_bytes;
- offset += ctx->val_bytes;
- }
+ *val = ctx->reg_read(ctx, reg);
if (!IS_ERR(ctx->clk))
clk_disable(ctx->clk);
@@ -232,14 +209,12 @@ static void regmap_mmio_free_context(void *context)
kfree(context);
}
-static struct regmap_bus regmap_mmio = {
+static const struct regmap_bus regmap_mmio = {
.fast_io = true,
- .write = regmap_mmio_write,
- .gather_write = regmap_mmio_gather_write,
- .read = regmap_mmio_read,
+ .reg_write = regmap_mmio_write,
+ .reg_read = regmap_mmio_read,
.free_context = regmap_mmio_free_context,
- .reg_format_endian_default = REGMAP_ENDIAN_NATIVE,
- .val_format_endian_default = REGMAP_ENDIAN_NATIVE,
+ .val_format_endian_default = REGMAP_ENDIAN_LITTLE,
};
static struct regmap_mmio_context *regmap_mmio_gen_context(struct device *dev,
@@ -265,24 +240,71 @@ static struct regmap_mmio_context *regmap_mmio_gen_context(struct device *dev,
if (config->reg_stride < min_stride)
return ERR_PTR(-EINVAL);
- switch (config->reg_format_endian) {
- case REGMAP_ENDIAN_DEFAULT:
- case REGMAP_ENDIAN_NATIVE:
- break;
- default:
- return ERR_PTR(-EINVAL);
- }
-
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return ERR_PTR(-ENOMEM);
ctx->regs = regs;
ctx->val_bytes = config->val_bits / 8;
- ctx->reg_bytes = config->reg_bits / 8;
- ctx->pad_bytes = config->pad_bits / 8;
ctx->clk = ERR_PTR(-ENODEV);
+ switch (regmap_get_val_endian(dev, &regmap_mmio, config)) {
+ case REGMAP_ENDIAN_DEFAULT:
+ case REGMAP_ENDIAN_LITTLE:
+#ifdef __LITTLE_ENDIAN
+ case REGMAP_ENDIAN_NATIVE:
+#endif
+ switch (config->val_bits) {
+ case 8:
+ ctx->reg_read = regmap_mmio_read8;
+ ctx->reg_write = regmap_mmio_write8;
+ break;
+ case 16:
+ ctx->reg_read = regmap_mmio_read16le;
+ ctx->reg_write = regmap_mmio_write16le;
+ break;
+ case 32:
+ ctx->reg_read = regmap_mmio_read32le;
+ ctx->reg_write = regmap_mmio_write32le;
+ break;
+#ifdef CONFIG_64BIT
+ case 64:
+ ctx->reg_read = regmap_mmio_read64le;
+ ctx->reg_write = regmap_mmio_write64le;
+ break;
+#endif
+ default:
+ ret = -EINVAL;
+ goto err_free;
+ }
+ break;
+ case REGMAP_ENDIAN_BIG:
+#ifdef __BIG_ENDIAN
+ case REGMAP_ENDIAN_NATIVE:
+#endif
+ switch (config->val_bits) {
+ case 8:
+ ctx->reg_read = regmap_mmio_read8;
+ ctx->reg_write = regmap_mmio_write8;
+ break;
+ case 16:
+ ctx->reg_read = regmap_mmio_read16be;
+ ctx->reg_write = regmap_mmio_write16be;
+ break;
+ case 32:
+ ctx->reg_read = regmap_mmio_read32be;
+ ctx->reg_write = regmap_mmio_write32be;
+ break;
+ default:
+ ret = -EINVAL;
+ goto err_free;
+ }
+ break;
+ default:
+ ret = -EINVAL;
+ goto err_free;
+ }
+
if (clk_id == NULL)
return ctx;
diff --git a/drivers/base/regmap/regmap-spmi.c b/drivers/base/regmap/regmap-spmi.c
index 7e58f6560399..4a36e415e938 100644
--- a/drivers/base/regmap/regmap-spmi.c
+++ b/drivers/base/regmap/regmap-spmi.c
@@ -142,7 +142,7 @@ static int regmap_spmi_ext_read(void *context,
while (val_size) {
len = min_t(size_t, val_size, 8);
- err = spmi_ext_register_readl(context, addr, val, val_size);
+ err = spmi_ext_register_readl(context, addr, val, len);
if (err)
goto err_out;
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
index ee54e841de4a..df2d2ef5d6b3 100644
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -19,6 +19,7 @@
#include <linux/rbtree.h>
#include <linux/sched.h>
#include <linux/delay.h>
+#include <linux/log2.h>
#define CREATE_TRACE_POINTS
#include "trace.h"
@@ -557,6 +558,8 @@ enum regmap_endian regmap_get_val_endian(struct device *dev,
endian = REGMAP_ENDIAN_BIG;
else if (of_property_read_bool(np, "little-endian"))
endian = REGMAP_ENDIAN_LITTLE;
+ else if (of_property_read_bool(np, "native-endian"))
+ endian = REGMAP_ENDIAN_NATIVE;
/* If the endianness was specified in DT, use that */
if (endian != REGMAP_ENDIAN_DEFAULT)
@@ -638,6 +641,10 @@ struct regmap *__regmap_init(struct device *dev,
map->reg_stride = config->reg_stride;
else
map->reg_stride = 1;
+ if (is_power_of_2(map->reg_stride))
+ map->reg_stride_order = ilog2(map->reg_stride);
+ else
+ map->reg_stride_order = -1;
map->use_single_read = config->use_single_rw || !bus || !bus->read;
map->use_single_write = config->use_single_rw || !bus || !bus->write;
map->can_multi_write = config->can_multi_write && bus && bus->write;
@@ -1308,7 +1315,7 @@ int _regmap_raw_write(struct regmap *map, unsigned int reg,
if (map->writeable_reg)
for (i = 0; i < val_len / map->format.val_bytes; i++)
if (!map->writeable_reg(map->dev,
- reg + (i * map->reg_stride)))
+ reg + regmap_get_offset(map, i)))
return -EINVAL;
if (!map->cache_bypass && map->format.parse_val) {
@@ -1316,7 +1323,8 @@ int _regmap_raw_write(struct regmap *map, unsigned int reg,
int val_bytes = map->format.val_bytes;
for (i = 0; i < val_len / val_bytes; i++) {
ival = map->format.parse_val(val + (i * val_bytes));
- ret = regcache_write(map, reg + (i * map->reg_stride),
+ ret = regcache_write(map,
+ reg + regmap_get_offset(map, i),
ival);
if (ret) {
dev_err(map->dev,
@@ -1690,100 +1698,63 @@ int regmap_raw_write(struct regmap *map, unsigned int reg,
EXPORT_SYMBOL_GPL(regmap_raw_write);
/**
- * regmap_field_write(): Write a value to a single register field
- *
- * @field: Register field to write to
- * @val: Value to be written
- *
- * A value of zero will be returned on success, a negative errno will
- * be returned in error cases.
- */
-int regmap_field_write(struct regmap_field *field, unsigned int val)
-{
- return regmap_update_bits(field->regmap, field->reg,
- field->mask, val << field->shift);
-}
-EXPORT_SYMBOL_GPL(regmap_field_write);
-
-/**
- * regmap_field_update_bits(): Perform a read/modify/write cycle
- * on the register field
+ * regmap_field_update_bits_base():
+ * Perform a read/modify/write cycle on the register field
+ * with change, async, force option
*
* @field: Register field to write to
* @mask: Bitmask to change
* @val: Value to be written
+ * @change: Boolean indicating if a write was done
+ * @async: Boolean indicating asynchronously
+ * @force: Boolean indicating use force update
*
* A value of zero will be returned on success, a negative errno will
* be returned in error cases.
*/
-int regmap_field_update_bits(struct regmap_field *field, unsigned int mask, unsigned int val)
+int regmap_field_update_bits_base(struct regmap_field *field,
+ unsigned int mask, unsigned int val,
+ bool *change, bool async, bool force)
{
mask = (mask << field->shift) & field->mask;
- return regmap_update_bits(field->regmap, field->reg,
- mask, val << field->shift);
+ return regmap_update_bits_base(field->regmap, field->reg,
+ mask, val << field->shift,
+ change, async, force);
}
-EXPORT_SYMBOL_GPL(regmap_field_update_bits);
+EXPORT_SYMBOL_GPL(regmap_field_update_bits_base);
/**
- * regmap_fields_write(): Write a value to a single register field with port ID
- *
- * @field: Register field to write to
- * @id: port ID
- * @val: Value to be written
- *
- * A value of zero will be returned on success, a negative errno will
- * be returned in error cases.
- */
-int regmap_fields_write(struct regmap_field *field, unsigned int id,
- unsigned int val)
-{
- if (id >= field->id_size)
- return -EINVAL;
-
- return regmap_update_bits(field->regmap,
- field->reg + (field->id_offset * id),
- field->mask, val << field->shift);
-}
-EXPORT_SYMBOL_GPL(regmap_fields_write);
-
-int regmap_fields_force_write(struct regmap_field *field, unsigned int id,
- unsigned int val)
-{
- if (id >= field->id_size)
- return -EINVAL;
-
- return regmap_write_bits(field->regmap,
- field->reg + (field->id_offset * id),
- field->mask, val << field->shift);
-}
-EXPORT_SYMBOL_GPL(regmap_fields_force_write);
-
-/**
- * regmap_fields_update_bits(): Perform a read/modify/write cycle
- * on the register field
+ * regmap_fields_update_bits_base():
+ * Perform a read/modify/write cycle on the register field
+ * with change, async, force option
*
* @field: Register field to write to
* @id: port ID
* @mask: Bitmask to change
* @val: Value to be written
+ * @change: Boolean indicating if a write was done
+ * @async: Boolean indicating asynchronously
+ * @force: Boolean indicating use force update
*
* A value of zero will be returned on success, a negative errno will
* be returned in error cases.
*/
-int regmap_fields_update_bits(struct regmap_field *field, unsigned int id,
- unsigned int mask, unsigned int val)
+int regmap_fields_update_bits_base(struct regmap_field *field, unsigned int id,
+ unsigned int mask, unsigned int val,
+ bool *change, bool async, bool force)
{
if (id >= field->id_size)
return -EINVAL;
mask = (mask << field->shift) & field->mask;
- return regmap_update_bits(field->regmap,
- field->reg + (field->id_offset * id),
- mask, val << field->shift);
+ return regmap_update_bits_base(field->regmap,
+ field->reg + (field->id_offset * id),
+ mask, val << field->shift,
+ change, async, force);
}
-EXPORT_SYMBOL_GPL(regmap_fields_update_bits);
+EXPORT_SYMBOL_GPL(regmap_fields_update_bits_base);
/*
* regmap_bulk_write(): Write multiple registers to the device
@@ -1846,8 +1817,9 @@ int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val,
goto out;
}
- ret = _regmap_write(map, reg + (i * map->reg_stride),
- ival);
+ ret = _regmap_write(map,
+ reg + regmap_get_offset(map, i),
+ ival);
if (ret != 0)
goto out;
}
@@ -2253,6 +2225,9 @@ static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
WARN_ON(!map->bus);
+ if (!map->bus || !map->bus->read)
+ return -EINVAL;
+
range = _regmap_range_lookup(map, reg);
if (range) {
ret = _regmap_select_page(map, &reg, range,
@@ -2416,7 +2391,7 @@ int regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
* cost as we expect to hit the cache.
*/
for (i = 0; i < val_count; i++) {
- ret = _regmap_read(map, reg + (i * map->reg_stride),
+ ret = _regmap_read(map, reg + regmap_get_offset(map, i),
&v);
if (ret != 0)
goto out;
@@ -2568,7 +2543,7 @@ int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val,
} else {
for (i = 0; i < val_count; i++) {
unsigned int ival;
- ret = regmap_read(map, reg + (i * map->reg_stride),
+ ret = regmap_read(map, reg + regmap_get_offset(map, i),
&ival);
if (ret != 0)
return ret;
@@ -2648,138 +2623,36 @@ static int _regmap_update_bits(struct regmap *map, unsigned int reg,
}
/**
- * regmap_update_bits: Perform a read/modify/write cycle on the register map
- *
- * @map: Register map to update
- * @reg: Register to update
- * @mask: Bitmask to change
- * @val: New value for bitmask
- *
- * Returns zero for success, a negative number on error.
- */
-int regmap_update_bits(struct regmap *map, unsigned int reg,
- unsigned int mask, unsigned int val)
-{
- int ret;
-
- map->lock(map->lock_arg);
- ret = _regmap_update_bits(map, reg, mask, val, NULL, false);
- map->unlock(map->lock_arg);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(regmap_update_bits);
-
-/**
- * regmap_write_bits: Perform a read/modify/write cycle on the register map
- *
- * @map: Register map to update
- * @reg: Register to update
- * @mask: Bitmask to change
- * @val: New value for bitmask
- *
- * Returns zero for success, a negative number on error.
- */
-int regmap_write_bits(struct regmap *map, unsigned int reg,
- unsigned int mask, unsigned int val)
-{
- int ret;
-
- map->lock(map->lock_arg);
- ret = _regmap_update_bits(map, reg, mask, val, NULL, true);
- map->unlock(map->lock_arg);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(regmap_write_bits);
-
-/**
- * regmap_update_bits_async: Perform a read/modify/write cycle on the register
- * map asynchronously
- *
- * @map: Register map to update
- * @reg: Register to update
- * @mask: Bitmask to change
- * @val: New value for bitmask
- *
- * With most buses the read must be done synchronously so this is most
- * useful for devices with a cache which do not need to interact with
- * the hardware to determine the current register value.
- *
- * Returns zero for success, a negative number on error.
- */
-int regmap_update_bits_async(struct regmap *map, unsigned int reg,
- unsigned int mask, unsigned int val)
-{
- int ret;
-
- map->lock(map->lock_arg);
-
- map->async = true;
-
- ret = _regmap_update_bits(map, reg, mask, val, NULL, false);
-
- map->async = false;
-
- map->unlock(map->lock_arg);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(regmap_update_bits_async);
-
-/**
- * regmap_update_bits_check: Perform a read/modify/write cycle on the
- * register map and report if updated
- *
- * @map: Register map to update
- * @reg: Register to update
- * @mask: Bitmask to change
- * @val: New value for bitmask
- * @change: Boolean indicating if a write was done
- *
- * Returns zero for success, a negative number on error.
- */
-int regmap_update_bits_check(struct regmap *map, unsigned int reg,
- unsigned int mask, unsigned int val,
- bool *change)
-{
- int ret;
-
- map->lock(map->lock_arg);
- ret = _regmap_update_bits(map, reg, mask, val, change, false);
- map->unlock(map->lock_arg);
- return ret;
-}
-EXPORT_SYMBOL_GPL(regmap_update_bits_check);
-
-/**
- * regmap_update_bits_check_async: Perform a read/modify/write cycle on the
- * register map asynchronously and report if
- * updated
+ * regmap_update_bits_base:
+ * Perform a read/modify/write cycle on the
+ * register map with change, async, force option
*
* @map: Register map to update
* @reg: Register to update
* @mask: Bitmask to change
* @val: New value for bitmask
* @change: Boolean indicating if a write was done
+ * @async: Boolean indicating asynchronously
+ * @force: Boolean indicating use force update
*
+ * if async was true,
* With most buses the read must be done synchronously so this is most
* useful for devices with a cache which do not need to interact with
* the hardware to determine the current register value.
*
* Returns zero for success, a negative number on error.
*/
-int regmap_update_bits_check_async(struct regmap *map, unsigned int reg,
- unsigned int mask, unsigned int val,
- bool *change)
+int regmap_update_bits_base(struct regmap *map, unsigned int reg,
+ unsigned int mask, unsigned int val,
+ bool *change, bool async, bool force)
{
int ret;
map->lock(map->lock_arg);
- map->async = true;
+ map->async = async;
- ret = _regmap_update_bits(map, reg, mask, val, change, false);
+ ret = _regmap_update_bits(map, reg, mask, val, change, force);
map->async = false;
@@ -2787,7 +2660,7 @@ int regmap_update_bits_check_async(struct regmap *map, unsigned int reg,
return ret;
}
-EXPORT_SYMBOL_GPL(regmap_update_bits_check_async);
+EXPORT_SYMBOL_GPL(regmap_update_bits_base);
void regmap_async_complete_cb(struct regmap_async *async, int ret)
{
diff --git a/drivers/bcma/Kconfig b/drivers/bcma/Kconfig
index 023d448ed3fa..efdc2ae8441a 100644
--- a/drivers/bcma/Kconfig
+++ b/drivers/bcma/Kconfig
@@ -70,6 +70,11 @@ config BCMA_DRIVER_MIPS
If unsure, say N
+config BCMA_PFLASH
+ bool
+ depends on BCMA_DRIVER_MIPS
+ default y
+
config BCMA_SFLASH
bool
depends on BCMA_DRIVER_MIPS
diff --git a/drivers/bcma/Makefile b/drivers/bcma/Makefile
index f32af9b76bcd..087948a1d20d 100644
--- a/drivers/bcma/Makefile
+++ b/drivers/bcma/Makefile
@@ -1,6 +1,7 @@
bcma-y += main.o scan.o core.o sprom.o
bcma-y += driver_chipcommon.o driver_chipcommon_pmu.o
bcma-y += driver_chipcommon_b.o
+bcma-$(CONFIG_BCMA_PFLASH) += driver_chipcommon_pflash.o
bcma-$(CONFIG_BCMA_SFLASH) += driver_chipcommon_sflash.o
bcma-$(CONFIG_BCMA_NFLASH) += driver_chipcommon_nflash.o
bcma-$(CONFIG_BCMA_DRIVER_PCI) += driver_pci.o
diff --git a/drivers/bcma/bcma_private.h b/drivers/bcma/bcma_private.h
index 38f156745d53..eda09090cb52 100644
--- a/drivers/bcma/bcma_private.h
+++ b/drivers/bcma/bcma_private.h
@@ -47,10 +47,6 @@ int bcma_sprom_get(struct bcma_bus *bus);
void bcma_core_chipcommon_early_init(struct bcma_drv_cc *cc);
void bcma_core_chipcommon_init(struct bcma_drv_cc *cc);
void bcma_chipco_bcm4331_ext_pa_lines_ctl(struct bcma_drv_cc *cc, bool enable);
-#ifdef CONFIG_BCMA_DRIVER_MIPS
-void bcma_chipco_serial_init(struct bcma_drv_cc *cc);
-extern struct platform_device bcma_pflash_dev;
-#endif /* CONFIG_BCMA_DRIVER_MIPS */
/* driver_chipcommon_b.c */
int bcma_core_chipcommon_b_init(struct bcma_drv_cc_b *ccb);
@@ -62,6 +58,21 @@ void bcma_pmu_init(struct bcma_drv_cc *cc);
u32 bcma_pmu_get_alp_clock(struct bcma_drv_cc *cc);
u32 bcma_pmu_get_cpu_clock(struct bcma_drv_cc *cc);
+/**************************************************
+ * driver_chipcommon_sflash.c
+ **************************************************/
+
+#ifdef CONFIG_BCMA_PFLASH
+extern struct platform_device bcma_pflash_dev;
+int bcma_pflash_init(struct bcma_drv_cc *cc);
+#else
+static inline int bcma_pflash_init(struct bcma_drv_cc *cc)
+{
+ bcma_err(cc->core->bus, "Parallel flash not supported\n");
+ return 0;
+}
+#endif /* CONFIG_BCMA_PFLASH */
+
#ifdef CONFIG_BCMA_SFLASH
/* driver_chipcommon_sflash.c */
int bcma_sflash_init(struct bcma_drv_cc *cc);
diff --git a/drivers/bcma/driver_chipcommon.c b/drivers/bcma/driver_chipcommon.c
index b7c8a8d4e6d1..921ce1834673 100644
--- a/drivers/bcma/driver_chipcommon.c
+++ b/drivers/bcma/driver_chipcommon.c
@@ -15,6 +15,8 @@
#include <linux/platform_device.h>
#include <linux/bcma/bcma.h>
+static void bcma_chipco_serial_init(struct bcma_drv_cc *cc);
+
static inline u32 bcma_cc_write32_masked(struct bcma_drv_cc *cc, u16 offset,
u32 mask, u32 value)
{
@@ -113,8 +115,37 @@ int bcma_chipco_watchdog_register(struct bcma_drv_cc *cc)
return 0;
}
+static void bcma_core_chipcommon_flash_detect(struct bcma_drv_cc *cc)
+{
+ struct bcma_bus *bus = cc->core->bus;
+
+ switch (cc->capabilities & BCMA_CC_CAP_FLASHT) {
+ case BCMA_CC_FLASHT_STSER:
+ case BCMA_CC_FLASHT_ATSER:
+ bcma_debug(bus, "Found serial flash\n");
+ bcma_sflash_init(cc);
+ break;
+ case BCMA_CC_FLASHT_PARA:
+ bcma_debug(bus, "Found parallel flash\n");
+ bcma_pflash_init(cc);
+ break;
+ default:
+ bcma_err(bus, "Flash type not supported\n");
+ }
+
+ if (cc->core->id.rev == 38 ||
+ bus->chipinfo.id == BCMA_CHIP_ID_BCM4706) {
+ if (cc->capabilities & BCMA_CC_CAP_NFLASH) {
+ bcma_debug(bus, "Found NAND flash\n");
+ bcma_nflash_init(cc);
+ }
+ }
+}
+
void bcma_core_chipcommon_early_init(struct bcma_drv_cc *cc)
{
+ struct bcma_bus *bus = cc->core->bus;
+
if (cc->early_setup_done)
return;
@@ -129,6 +160,12 @@ void bcma_core_chipcommon_early_init(struct bcma_drv_cc *cc)
if (cc->capabilities & BCMA_CC_CAP_PMU)
bcma_pmu_early_init(cc);
+ if (IS_BUILTIN(CONFIG_BCM47XX) && bus->hosttype == BCMA_HOSTTYPE_SOC)
+ bcma_chipco_serial_init(cc);
+
+ if (bus->hosttype == BCMA_HOSTTYPE_SOC)
+ bcma_core_chipcommon_flash_detect(cc);
+
cc->early_setup_done = true;
}
@@ -185,11 +222,12 @@ u32 bcma_chipco_watchdog_timer_set(struct bcma_drv_cc *cc, u32 ticks)
ticks = 2;
else if (ticks > maxt)
ticks = maxt;
- bcma_cc_write32(cc, BCMA_CC_PMU_WATCHDOG, ticks);
+ bcma_pmu_write32(cc, BCMA_CC_PMU_WATCHDOG, ticks);
} else {
struct bcma_bus *bus = cc->core->bus;
if (bus->chipinfo.id != BCMA_CHIP_ID_BCM4707 &&
+ bus->chipinfo.id != BCMA_CHIP_ID_BCM47094 &&
bus->chipinfo.id != BCMA_CHIP_ID_BCM53018)
bcma_core_set_clockmode(cc->core,
ticks ? BCMA_CLKMODE_FAST : BCMA_CLKMODE_DYNAMIC);
@@ -314,9 +352,9 @@ u32 bcma_chipco_gpio_pulldown(struct bcma_drv_cc *cc, u32 mask, u32 value)
return res;
}
-#ifdef CONFIG_BCMA_DRIVER_MIPS
-void bcma_chipco_serial_init(struct bcma_drv_cc *cc)
+static void bcma_chipco_serial_init(struct bcma_drv_cc *cc)
{
+#if IS_BUILTIN(CONFIG_BCM47XX)
unsigned int irq;
u32 baud_base;
u32 i;
@@ -358,5 +396,5 @@ void bcma_chipco_serial_init(struct bcma_drv_cc *cc)
ports[i].baud_base = baud_base;
ports[i].reg_shift = 0;
}
+#endif /* CONFIG_BCM47XX */
}
-#endif /* CONFIG_BCMA_DRIVER_MIPS */
diff --git a/drivers/bcma/driver_chipcommon_pflash.c b/drivers/bcma/driver_chipcommon_pflash.c
new file mode 100644
index 000000000000..3b497c9ee0d4
--- /dev/null
+++ b/drivers/bcma/driver_chipcommon_pflash.c
@@ -0,0 +1,49 @@
+/*
+ * Broadcom specific AMBA
+ * ChipCommon parallel flash
+ *
+ * Licensed under the GNU/GPL. See COPYING for details.
+ */
+
+#include "bcma_private.h"
+
+#include <linux/bcma/bcma.h>
+#include <linux/mtd/physmap.h>
+#include <linux/platform_device.h>
+
+static const char * const part_probes[] = { "bcm47xxpart", NULL };
+
+static struct physmap_flash_data bcma_pflash_data = {
+ .part_probe_types = part_probes,
+};
+
+static struct resource bcma_pflash_resource = {
+ .name = "bcma_pflash",
+ .flags = IORESOURCE_MEM,
+};
+
+struct platform_device bcma_pflash_dev = {
+ .name = "physmap-flash",
+ .dev = {
+ .platform_data = &bcma_pflash_data,
+ },
+ .resource = &bcma_pflash_resource,
+ .num_resources = 1,
+};
+
+int bcma_pflash_init(struct bcma_drv_cc *cc)
+{
+ struct bcma_pflash *pflash = &cc->pflash;
+
+ pflash->present = true;
+
+ if (!(bcma_read32(cc->core, BCMA_CC_FLASH_CFG) & BCMA_CC_FLASH_CFG_DS))
+ bcma_pflash_data.width = 1;
+ else
+ bcma_pflash_data.width = 2;
+
+ bcma_pflash_resource.start = BCMA_SOC_FLASH2;
+ bcma_pflash_resource.end = BCMA_SOC_FLASH2 + BCMA_SOC_FLASH2_SZ;
+
+ return 0;
+}
diff --git a/drivers/bcma/driver_chipcommon_pmu.c b/drivers/bcma/driver_chipcommon_pmu.c
index fe0d48cb1778..f1eb4d3e1d57 100644
--- a/drivers/bcma/driver_chipcommon_pmu.c
+++ b/drivers/bcma/driver_chipcommon_pmu.c
@@ -15,44 +15,44 @@
u32 bcma_chipco_pll_read(struct bcma_drv_cc *cc, u32 offset)
{
- bcma_cc_write32(cc, BCMA_CC_PLLCTL_ADDR, offset);
- bcma_cc_read32(cc, BCMA_CC_PLLCTL_ADDR);
- return bcma_cc_read32(cc, BCMA_CC_PLLCTL_DATA);
+ bcma_pmu_write32(cc, BCMA_CC_PMU_PLLCTL_ADDR, offset);
+ bcma_pmu_read32(cc, BCMA_CC_PMU_PLLCTL_ADDR);
+ return bcma_pmu_read32(cc, BCMA_CC_PMU_PLLCTL_DATA);
}
EXPORT_SYMBOL_GPL(bcma_chipco_pll_read);
void bcma_chipco_pll_write(struct bcma_drv_cc *cc, u32 offset, u32 value)
{
- bcma_cc_write32(cc, BCMA_CC_PLLCTL_ADDR, offset);
- bcma_cc_read32(cc, BCMA_CC_PLLCTL_ADDR);
- bcma_cc_write32(cc, BCMA_CC_PLLCTL_DATA, value);
+ bcma_pmu_write32(cc, BCMA_CC_PMU_PLLCTL_ADDR, offset);
+ bcma_pmu_read32(cc, BCMA_CC_PMU_PLLCTL_ADDR);
+ bcma_pmu_write32(cc, BCMA_CC_PMU_PLLCTL_DATA, value);
}
EXPORT_SYMBOL_GPL(bcma_chipco_pll_write);
void bcma_chipco_pll_maskset(struct bcma_drv_cc *cc, u32 offset, u32 mask,
u32 set)
{
- bcma_cc_write32(cc, BCMA_CC_PLLCTL_ADDR, offset);
- bcma_cc_read32(cc, BCMA_CC_PLLCTL_ADDR);
- bcma_cc_maskset32(cc, BCMA_CC_PLLCTL_DATA, mask, set);
+ bcma_pmu_write32(cc, BCMA_CC_PMU_PLLCTL_ADDR, offset);
+ bcma_pmu_read32(cc, BCMA_CC_PMU_PLLCTL_ADDR);
+ bcma_pmu_maskset32(cc, BCMA_CC_PMU_PLLCTL_DATA, mask, set);
}
EXPORT_SYMBOL_GPL(bcma_chipco_pll_maskset);
void bcma_chipco_chipctl_maskset(struct bcma_drv_cc *cc,
u32 offset, u32 mask, u32 set)
{
- bcma_cc_write32(cc, BCMA_CC_CHIPCTL_ADDR, offset);
- bcma_cc_read32(cc, BCMA_CC_CHIPCTL_ADDR);
- bcma_cc_maskset32(cc, BCMA_CC_CHIPCTL_DATA, mask, set);
+ bcma_pmu_write32(cc, BCMA_CC_PMU_CHIPCTL_ADDR, offset);
+ bcma_pmu_read32(cc, BCMA_CC_PMU_CHIPCTL_ADDR);
+ bcma_pmu_maskset32(cc, BCMA_CC_PMU_CHIPCTL_DATA, mask, set);
}
EXPORT_SYMBOL_GPL(bcma_chipco_chipctl_maskset);
void bcma_chipco_regctl_maskset(struct bcma_drv_cc *cc, u32 offset, u32 mask,
u32 set)
{
- bcma_cc_write32(cc, BCMA_CC_REGCTL_ADDR, offset);
- bcma_cc_read32(cc, BCMA_CC_REGCTL_ADDR);
- bcma_cc_maskset32(cc, BCMA_CC_REGCTL_DATA, mask, set);
+ bcma_pmu_write32(cc, BCMA_CC_PMU_REGCTL_ADDR, offset);
+ bcma_pmu_read32(cc, BCMA_CC_PMU_REGCTL_ADDR);
+ bcma_pmu_maskset32(cc, BCMA_CC_PMU_REGCTL_DATA, mask, set);
}
EXPORT_SYMBOL_GPL(bcma_chipco_regctl_maskset);
@@ -60,18 +60,18 @@ static u32 bcma_pmu_xtalfreq(struct bcma_drv_cc *cc)
{
u32 ilp_ctl, alp_hz;
- if (!(bcma_cc_read32(cc, BCMA_CC_PMU_STAT) &
+ if (!(bcma_pmu_read32(cc, BCMA_CC_PMU_STAT) &
BCMA_CC_PMU_STAT_EXT_LPO_AVAIL))
return 0;
- bcma_cc_write32(cc, BCMA_CC_PMU_XTAL_FREQ,
- BIT(BCMA_CC_PMU_XTAL_FREQ_MEASURE_SHIFT));
+ bcma_pmu_write32(cc, BCMA_CC_PMU_XTAL_FREQ,
+ BIT(BCMA_CC_PMU_XTAL_FREQ_MEASURE_SHIFT));
usleep_range(1000, 2000);
- ilp_ctl = bcma_cc_read32(cc, BCMA_CC_PMU_XTAL_FREQ);
+ ilp_ctl = bcma_pmu_read32(cc, BCMA_CC_PMU_XTAL_FREQ);
ilp_ctl &= BCMA_CC_PMU_XTAL_FREQ_ILPCTL_MASK;
- bcma_cc_write32(cc, BCMA_CC_PMU_XTAL_FREQ, 0);
+ bcma_pmu_write32(cc, BCMA_CC_PMU_XTAL_FREQ, 0);
alp_hz = ilp_ctl * 32768 / 4;
return (alp_hz + 50000) / 100000 * 100;
@@ -127,8 +127,8 @@ static void bcma_pmu2_pll_init0(struct bcma_drv_cc *cc, u32 xtalfreq)
mask = (u32)~(BCMA_RES_4314_HT_AVAIL |
BCMA_RES_4314_MACPHY_CLK_AVAIL);
- bcma_cc_mask32(cc, BCMA_CC_PMU_MINRES_MSK, mask);
- bcma_cc_mask32(cc, BCMA_CC_PMU_MAXRES_MSK, mask);
+ bcma_pmu_mask32(cc, BCMA_CC_PMU_MINRES_MSK, mask);
+ bcma_pmu_mask32(cc, BCMA_CC_PMU_MAXRES_MSK, mask);
bcma_wait_value(cc->core, BCMA_CLKCTLST,
BCMA_CLKCTLST_HAVEHT, 0, 20000);
break;
@@ -140,7 +140,7 @@ static void bcma_pmu2_pll_init0(struct bcma_drv_cc *cc, u32 xtalfreq)
/* Flush */
if (cc->pmu.rev >= 2)
- bcma_cc_set32(cc, BCMA_CC_PMU_CTL, BCMA_CC_PMU_CTL_PLL_UPD);
+ bcma_pmu_set32(cc, BCMA_CC_PMU_CTL, BCMA_CC_PMU_CTL_PLL_UPD);
/* TODO: Do we need to update OTP? */
}
@@ -195,9 +195,9 @@ static void bcma_pmu_resources_init(struct bcma_drv_cc *cc)
/* Set the resource masks. */
if (min_msk)
- bcma_cc_write32(cc, BCMA_CC_PMU_MINRES_MSK, min_msk);
+ bcma_pmu_write32(cc, BCMA_CC_PMU_MINRES_MSK, min_msk);
if (max_msk)
- bcma_cc_write32(cc, BCMA_CC_PMU_MAXRES_MSK, max_msk);
+ bcma_pmu_write32(cc, BCMA_CC_PMU_MAXRES_MSK, max_msk);
/*
* Add some delay; allow resources to come up and settle.
@@ -269,23 +269,33 @@ static void bcma_pmu_workarounds(struct bcma_drv_cc *cc)
void bcma_pmu_early_init(struct bcma_drv_cc *cc)
{
+ struct bcma_bus *bus = cc->core->bus;
u32 pmucap;
- pmucap = bcma_cc_read32(cc, BCMA_CC_PMU_CAP);
+ if (cc->core->id.rev >= 35 &&
+ cc->capabilities_ext & BCMA_CC_CAP_EXT_AOB_PRESENT) {
+ cc->pmu.core = bcma_find_core(bus, BCMA_CORE_PMU);
+ if (!cc->pmu.core)
+ bcma_warn(bus, "Couldn't find expected PMU core");
+ }
+ if (!cc->pmu.core)
+ cc->pmu.core = cc->core;
+
+ pmucap = bcma_pmu_read32(cc, BCMA_CC_PMU_CAP);
cc->pmu.rev = (pmucap & BCMA_CC_PMU_CAP_REVISION);
- bcma_debug(cc->core->bus, "Found rev %u PMU (capabilities 0x%08X)\n",
- cc->pmu.rev, pmucap);
+ bcma_debug(bus, "Found rev %u PMU (capabilities 0x%08X)\n", cc->pmu.rev,
+ pmucap);
}
void bcma_pmu_init(struct bcma_drv_cc *cc)
{
if (cc->pmu.rev == 1)
- bcma_cc_mask32(cc, BCMA_CC_PMU_CTL,
- ~BCMA_CC_PMU_CTL_NOILPONW);
+ bcma_pmu_mask32(cc, BCMA_CC_PMU_CTL,
+ ~BCMA_CC_PMU_CTL_NOILPONW);
else
- bcma_cc_set32(cc, BCMA_CC_PMU_CTL,
- BCMA_CC_PMU_CTL_NOILPONW);
+ bcma_pmu_set32(cc, BCMA_CC_PMU_CTL,
+ BCMA_CC_PMU_CTL_NOILPONW);
bcma_pmu_pll_init(cc);
bcma_pmu_resources_init(cc);
@@ -472,8 +482,8 @@ u32 bcma_pmu_get_cpu_clock(struct bcma_drv_cc *cc)
static void bcma_pmu_spuravoid_pll_write(struct bcma_drv_cc *cc, u32 offset,
u32 value)
{
- bcma_cc_write32(cc, BCMA_CC_PLLCTL_ADDR, offset);
- bcma_cc_write32(cc, BCMA_CC_PLLCTL_DATA, value);
+ bcma_pmu_write32(cc, BCMA_CC_PMU_PLLCTL_ADDR, offset);
+ bcma_pmu_write32(cc, BCMA_CC_PMU_PLLCTL_DATA, value);
}
void bcma_pmu_spuravoid_pllupdate(struct bcma_drv_cc *cc, int spuravoid)
@@ -497,20 +507,20 @@ void bcma_pmu_spuravoid_pllupdate(struct bcma_drv_cc *cc, int spuravoid)
bus->chipinfo.id == BCMA_CHIP_ID_BCM53572) ? 6 : 0;
/* RMW only the P1 divider */
- bcma_cc_write32(cc, BCMA_CC_PLLCTL_ADDR,
+ bcma_pmu_write32(cc, BCMA_CC_PMU_PLLCTL_ADDR,
BCMA_CC_PMU_PLL_CTL0 + phypll_offset);
- tmp = bcma_cc_read32(cc, BCMA_CC_PLLCTL_DATA);
+ tmp = bcma_pmu_read32(cc, BCMA_CC_PMU_PLLCTL_DATA);
tmp &= (~(BCMA_CC_PMU1_PLL0_PC0_P1DIV_MASK));
tmp |= (bcm5357_bcm43236_p1div[spuravoid] << BCMA_CC_PMU1_PLL0_PC0_P1DIV_SHIFT);
- bcma_cc_write32(cc, BCMA_CC_PLLCTL_DATA, tmp);
+ bcma_pmu_write32(cc, BCMA_CC_PMU_PLLCTL_DATA, tmp);
/* RMW only the int feedback divider */
- bcma_cc_write32(cc, BCMA_CC_PLLCTL_ADDR,
+ bcma_pmu_write32(cc, BCMA_CC_PMU_PLLCTL_ADDR,
BCMA_CC_PMU_PLL_CTL2 + phypll_offset);
- tmp = bcma_cc_read32(cc, BCMA_CC_PLLCTL_DATA);
+ tmp = bcma_pmu_read32(cc, BCMA_CC_PMU_PLLCTL_DATA);
tmp &= ~(BCMA_CC_PMU1_PLL0_PC2_NDIV_INT_MASK);
tmp |= (bcm5357_bcm43236_ndiv[spuravoid]) << BCMA_CC_PMU1_PLL0_PC2_NDIV_INT_SHIFT;
- bcma_cc_write32(cc, BCMA_CC_PLLCTL_DATA, tmp);
+ bcma_pmu_write32(cc, BCMA_CC_PMU_PLLCTL_DATA, tmp);
tmp = BCMA_CC_PMU_CTL_PLL_UPD;
break;
@@ -646,7 +656,7 @@ void bcma_pmu_spuravoid_pllupdate(struct bcma_drv_cc *cc, int spuravoid)
break;
}
- tmp |= bcma_cc_read32(cc, BCMA_CC_PMU_CTL);
- bcma_cc_write32(cc, BCMA_CC_PMU_CTL, tmp);
+ tmp |= bcma_pmu_read32(cc, BCMA_CC_PMU_CTL);
+ bcma_pmu_write32(cc, BCMA_CC_PMU_CTL, tmp);
}
EXPORT_SYMBOL_GPL(bcma_pmu_spuravoid_pllupdate);
diff --git a/drivers/bcma/driver_chipcommon_sflash.c b/drivers/bcma/driver_chipcommon_sflash.c
index 7e11ef4cb7db..04d706ca5f43 100644
--- a/drivers/bcma/driver_chipcommon_sflash.c
+++ b/drivers/bcma/driver_chipcommon_sflash.c
@@ -38,6 +38,7 @@ static const struct bcma_sflash_tbl_e bcma_sflash_st_tbl[] = {
{ "M25P32", 0x15, 0x10000, 64, },
{ "M25P64", 0x16, 0x10000, 128, },
{ "M25FL128", 0x17, 0x10000, 256, },
+ { "MX25L25635F", 0x18, 0x10000, 512, },
{ NULL },
};
diff --git a/drivers/bcma/driver_gpio.c b/drivers/bcma/driver_gpio.c
index 98067f757fb0..771a2a253440 100644
--- a/drivers/bcma/driver_gpio.c
+++ b/drivers/bcma/driver_gpio.c
@@ -192,6 +192,7 @@ int bcma_gpio_init(struct bcma_drv_cc *cc)
case BCMA_CHIP_ID_BCM4707:
case BCMA_CHIP_ID_BCM5357:
case BCMA_CHIP_ID_BCM53572:
+ case BCMA_CHIP_ID_BCM47094:
chip->ngpio = 32;
break;
default:
diff --git a/drivers/bcma/driver_mips.c b/drivers/bcma/driver_mips.c
index 24424f3fef96..96f171328200 100644
--- a/drivers/bcma/driver_mips.c
+++ b/drivers/bcma/driver_mips.c
@@ -14,8 +14,6 @@
#include <linux/bcma/bcma.h>
-#include <linux/mtd/physmap.h>
-#include <linux/platform_device.h>
#include <linux/serial.h>
#include <linux/serial_core.h>
#include <linux/serial_reg.h>
@@ -32,26 +30,6 @@ enum bcma_boot_dev {
BCMA_BOOT_DEV_NAND,
};
-static const char * const part_probes[] = { "bcm47xxpart", NULL };
-
-static struct physmap_flash_data bcma_pflash_data = {
- .part_probe_types = part_probes,
-};
-
-static struct resource bcma_pflash_resource = {
- .name = "bcma_pflash",
- .flags = IORESOURCE_MEM,
-};
-
-struct platform_device bcma_pflash_dev = {
- .name = "physmap-flash",
- .dev = {
- .platform_data = &bcma_pflash_data,
- },
- .resource = &bcma_pflash_resource,
- .num_resources = 1,
-};
-
/* The 47162a0 hangs when reading MIPS DMP registers registers */
static inline bool bcma_core_mips_bcm47162a0_quirk(struct bcma_device *dev)
{
@@ -272,48 +250,11 @@ static enum bcma_boot_dev bcma_boot_dev(struct bcma_bus *bus)
return BCMA_BOOT_DEV_SERIAL;
}
-static void bcma_core_mips_flash_detect(struct bcma_drv_mips *mcore)
+static void bcma_core_mips_nvram_init(struct bcma_drv_mips *mcore)
{
struct bcma_bus *bus = mcore->core->bus;
- struct bcma_drv_cc *cc = &bus->drv_cc;
- struct bcma_pflash *pflash = &cc->pflash;
enum bcma_boot_dev boot_dev;
- switch (cc->capabilities & BCMA_CC_CAP_FLASHT) {
- case BCMA_CC_FLASHT_STSER:
- case BCMA_CC_FLASHT_ATSER:
- bcma_debug(bus, "Found serial flash\n");
- bcma_sflash_init(cc);
- break;
- case BCMA_CC_FLASHT_PARA:
- bcma_debug(bus, "Found parallel flash\n");
- pflash->present = true;
- pflash->window = BCMA_SOC_FLASH2;
- pflash->window_size = BCMA_SOC_FLASH2_SZ;
-
- if ((bcma_read32(cc->core, BCMA_CC_FLASH_CFG) &
- BCMA_CC_FLASH_CFG_DS) == 0)
- pflash->buswidth = 1;
- else
- pflash->buswidth = 2;
-
- bcma_pflash_data.width = pflash->buswidth;
- bcma_pflash_resource.start = pflash->window;
- bcma_pflash_resource.end = pflash->window + pflash->window_size;
-
- break;
- default:
- bcma_err(bus, "Flash type not supported\n");
- }
-
- if (cc->core->id.rev == 38 ||
- bus->chipinfo.id == BCMA_CHIP_ID_BCM4706) {
- if (cc->capabilities & BCMA_CC_CAP_NFLASH) {
- bcma_debug(bus, "Found NAND flash\n");
- bcma_nflash_init(cc);
- }
- }
-
/* Determine flash type this SoC boots from */
boot_dev = bcma_boot_dev(bus);
switch (boot_dev) {
@@ -337,13 +278,10 @@ static void bcma_core_mips_flash_detect(struct bcma_drv_mips *mcore)
void bcma_core_mips_early_init(struct bcma_drv_mips *mcore)
{
- struct bcma_bus *bus = mcore->core->bus;
-
if (mcore->early_setup_done)
return;
- bcma_chipco_serial_init(&bus->drv_cc);
- bcma_core_mips_flash_detect(mcore);
+ bcma_core_mips_nvram_init(mcore);
mcore->early_setup_done = true;
}
diff --git a/drivers/bcma/host_pci.c b/drivers/bcma/host_pci.c
index 0856189c065f..cae5385cf499 100644
--- a/drivers/bcma/host_pci.c
+++ b/drivers/bcma/host_pci.c
@@ -294,7 +294,7 @@ static const struct pci_device_id bcma_pci_bridge_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4358) },
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4359) },
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4360) },
- { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4365) },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, 0x4365, PCI_VENDOR_ID_DELL, 0x0016) },
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x43a0) },
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x43a9) },
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x43aa) },
diff --git a/drivers/bcma/main.c b/drivers/bcma/main.c
index c466f752b067..1f635471f318 100644
--- a/drivers/bcma/main.c
+++ b/drivers/bcma/main.c
@@ -136,7 +136,6 @@ static bool bcma_is_core_needed_early(u16 core_id)
return false;
}
-#if defined(CONFIG_OF) && defined(CONFIG_OF_ADDRESS)
static struct device_node *bcma_of_find_child_device(struct platform_device *parent,
struct bcma_device *core)
{
@@ -184,7 +183,7 @@ static unsigned int bcma_of_get_irq(struct platform_device *parent,
struct of_phandle_args out_irq;
int ret;
- if (!parent || !parent->dev.of_node)
+ if (!IS_ENABLED(CONFIG_OF_IRQ) || !parent || !parent->dev.of_node)
return 0;
ret = bcma_of_irq_parse(parent, core, &out_irq, num);
@@ -202,23 +201,15 @@ static void bcma_of_fill_device(struct platform_device *parent,
{
struct device_node *node;
+ if (!IS_ENABLED(CONFIG_OF_IRQ))
+ return;
+
node = bcma_of_find_child_device(parent, core);
if (node)
core->dev.of_node = node;
core->irq = bcma_of_get_irq(parent, core, 0);
}
-#else
-static void bcma_of_fill_device(struct platform_device *parent,
- struct bcma_device *core)
-{
-}
-static inline unsigned int bcma_of_get_irq(struct platform_device *parent,
- struct bcma_device *core, int num)
-{
- return 0;
-}
-#endif /* CONFIG_OF */
unsigned int bcma_core_irq(struct bcma_device *core, int num)
{
@@ -350,7 +341,7 @@ static int bcma_register_devices(struct bcma_bus *bus)
bcma_register_core(bus, core);
}
-#ifdef CONFIG_BCMA_DRIVER_MIPS
+#ifdef CONFIG_BCMA_PFLASH
if (bus->drv_cc.pflash.present) {
err = platform_device_register(&bcma_pflash_dev);
if (err)
diff --git a/drivers/bcma/scan.c b/drivers/bcma/scan.c
index df806b9c5490..4a2d1b235fb5 100644
--- a/drivers/bcma/scan.c
+++ b/drivers/bcma/scan.c
@@ -98,6 +98,9 @@ static const struct bcma_device_id_name bcma_bcm_device_names[] = {
{ BCMA_CORE_SHIM, "SHIM" },
{ BCMA_CORE_PCIE2, "PCIe Gen2" },
{ BCMA_CORE_ARM_CR4, "ARM CR4" },
+ { BCMA_CORE_GCI, "GCI" },
+ { BCMA_CORE_CMEM, "CNDS DDR2/3 memory controller" },
+ { BCMA_CORE_ARM_CA7, "ARM CA7" },
{ BCMA_CORE_DEFAULT, "Default" },
};
@@ -315,6 +318,8 @@ static int bcma_get_next_core(struct bcma_bus *bus, u32 __iomem **eromptr,
switch (core->id.id) {
case BCMA_CORE_4706_MAC_GBIT_COMMON:
case BCMA_CORE_NS_CHIPCOMMON_B:
+ case BCMA_CORE_PMU:
+ case BCMA_CORE_GCI:
/* Not used yet: case BCMA_CORE_OOB_ROUTER: */
break;
default:
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
index 29819e719afa..39dd30b6ef86 100644
--- a/drivers/block/Kconfig
+++ b/drivers/block/Kconfig
@@ -110,16 +110,6 @@ source "drivers/block/mtip32xx/Kconfig"
source "drivers/block/zram/Kconfig"
-config BLK_CPQ_DA
- tristate "Compaq SMART2 support"
- depends on PCI && VIRT_TO_BUS && 0
- help
- This is the driver for Compaq Smart Array controllers. Everyone
- using these boards should say Y here. See the file
- <file:Documentation/blockdev/cpqarray.txt> for the current list of
- boards supported by this driver, and for further information on the
- use of this driver.
-
config BLK_CPQ_CISS_DA
tristate "Compaq Smart Array 5xxx support"
depends on PCI
diff --git a/drivers/block/Makefile b/drivers/block/Makefile
index 671329023ec2..1e9661e26f29 100644
--- a/drivers/block/Makefile
+++ b/drivers/block/Makefile
@@ -15,7 +15,6 @@ obj-$(CONFIG_ATARI_FLOPPY) += ataflop.o
obj-$(CONFIG_AMIGA_Z2RAM) += z2ram.o
obj-$(CONFIG_BLK_DEV_RAM) += brd.o
obj-$(CONFIG_BLK_DEV_LOOP) += loop.o
-obj-$(CONFIG_BLK_CPQ_DA) += cpqarray.o
obj-$(CONFIG_BLK_CPQ_CISS_DA) += cciss.o
obj-$(CONFIG_BLK_DEV_DAC960) += DAC960.o
obj-$(CONFIG_XILINX_SYSACE) += xsysace.o
diff --git a/drivers/block/aoe/aoeblk.c b/drivers/block/aoe/aoeblk.c
index dd73e1ff1759..ec9d8610b25f 100644
--- a/drivers/block/aoe/aoeblk.c
+++ b/drivers/block/aoe/aoeblk.c
@@ -397,7 +397,7 @@ aoeblk_gdalloc(void *vp)
WARN_ON(d->flags & DEVFL_UP);
blk_queue_max_hw_sectors(q, BLK_DEF_MAX_SECTORS);
q->backing_dev_info.name = "aoe";
- q->backing_dev_info.ra_pages = READ_AHEAD / PAGE_CACHE_SIZE;
+ q->backing_dev_info.ra_pages = READ_AHEAD / PAGE_SIZE;
d->bufpool = mp;
d->blkq = gd->queue = q;
q->queuedata = d;
diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c
index d048d2009e89..437b3a822f44 100644
--- a/drivers/block/aoe/aoecmd.c
+++ b/drivers/block/aoe/aoecmd.c
@@ -875,7 +875,7 @@ bio_pageinc(struct bio *bio)
* compound pages is no longer allowed by the kernel.
*/
page = compound_head(bv.bv_page);
- atomic_inc(&page->_count);
+ page_ref_inc(page);
}
}
@@ -888,7 +888,7 @@ bio_pagedec(struct bio *bio)
bio_for_each_segment(bv, bio, iter) {
page = compound_head(bv.bv_page);
- atomic_dec(&page->_count);
+ page_ref_dec(page);
}
}
diff --git a/drivers/block/brd.c b/drivers/block/brd.c
index cb27190e9f39..51a071e32221 100644
--- a/drivers/block/brd.c
+++ b/drivers/block/brd.c
@@ -341,7 +341,7 @@ static blk_qc_t brd_make_request(struct request_queue *q, struct bio *bio)
if (unlikely(bio->bi_rw & REQ_DISCARD)) {
if (sector & ((PAGE_SIZE >> SECTOR_SHIFT) - 1) ||
- bio->bi_iter.bi_size & PAGE_MASK)
+ bio->bi_iter.bi_size & ~PAGE_MASK)
goto io_error;
discard_from_brd(brd, sector, bio->bi_iter.bi_size);
goto out;
@@ -374,7 +374,7 @@ static int brd_rw_page(struct block_device *bdev, sector_t sector,
struct page *page, int rw)
{
struct brd_device *brd = bdev->bd_disk->private_data;
- int err = brd_do_bvec(brd, page, PAGE_CACHE_SIZE, 0, rw, sector);
+ int err = brd_do_bvec(brd, page, PAGE_SIZE, 0, rw, sector);
page_endio(page, rw & WRITE, err);
return err;
}
diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
deleted file mode 100644
index f749df9e15cd..000000000000
--- a/drivers/block/cpqarray.c
+++ /dev/null
@@ -1,1820 +0,0 @@
-/*
- * Disk Array driver for Compaq SMART2 Controllers
- * Copyright 1998 Compaq Computer Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- * Questions/Comments/Bugfixes to iss_storagedev@hp.com
- *
- */
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/pci.h>
-#include <linux/bio.h>
-#include <linux/interrupt.h>
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/delay.h>
-#include <linux/major.h>
-#include <linux/fs.h>
-#include <linux/blkpg.h>
-#include <linux/timer.h>
-#include <linux/proc_fs.h>
-#include <linux/seq_file.h>
-#include <linux/init.h>
-#include <linux/hdreg.h>
-#include <linux/mutex.h>
-#include <linux/spinlock.h>
-#include <linux/blkdev.h>
-#include <linux/genhd.h>
-#include <linux/scatterlist.h>
-#include <asm/uaccess.h>
-#include <asm/io.h>
-
-
-#define SMART2_DRIVER_VERSION(maj,min,submin) ((maj<<16)|(min<<8)|(submin))
-
-#define DRIVER_NAME "Compaq SMART2 Driver (v 2.6.0)"
-#define DRIVER_VERSION SMART2_DRIVER_VERSION(2,6,0)
-
-/* Embedded module documentation macros - see modules.h */
-/* Original author Chris Frantz - Compaq Computer Corporation */
-MODULE_AUTHOR("Compaq Computer Corporation");
-MODULE_DESCRIPTION("Driver for Compaq Smart2 Array Controllers version 2.6.0");
-MODULE_LICENSE("GPL");
-
-#include "cpqarray.h"
-#include "ida_cmd.h"
-#include "smart1,2.h"
-#include "ida_ioctl.h"
-
-#define READ_AHEAD 128
-#define NR_CMDS 128 /* This could probably go as high as ~400 */
-
-#define MAX_CTLR 8
-#define CTLR_SHIFT 8
-
-#define CPQARRAY_DMA_MASK 0xFFFFFFFF /* 32 bit DMA */
-
-static DEFINE_MUTEX(cpqarray_mutex);
-static int nr_ctlr;
-static ctlr_info_t *hba[MAX_CTLR];
-
-static int eisa[8];
-
-#define NR_PRODUCTS ARRAY_SIZE(products)
-
-/* board_id = Subsystem Device ID & Vendor ID
- * product = Marketing Name for the board
- * access = Address of the struct of function pointers
- */
-static struct board_type products[] = {
- { 0x0040110E, "IDA", &smart1_access },
- { 0x0140110E, "IDA-2", &smart1_access },
- { 0x1040110E, "IAES", &smart1_access },
- { 0x2040110E, "SMART", &smart1_access },
- { 0x3040110E, "SMART-2/E", &smart2e_access },
- { 0x40300E11, "SMART-2/P", &smart2_access },
- { 0x40310E11, "SMART-2SL", &smart2_access },
- { 0x40320E11, "Smart Array 3200", &smart2_access },
- { 0x40330E11, "Smart Array 3100ES", &smart2_access },
- { 0x40340E11, "Smart Array 221", &smart2_access },
- { 0x40400E11, "Integrated Array", &smart4_access },
- { 0x40480E11, "Compaq Raid LC2", &smart4_access },
- { 0x40500E11, "Smart Array 4200", &smart4_access },
- { 0x40510E11, "Smart Array 4250ES", &smart4_access },
- { 0x40580E11, "Smart Array 431", &smart4_access },
-};
-
-/* define the PCI info for the PCI cards this driver can control */
-static const struct pci_device_id cpqarray_pci_device_id[] =
-{
- { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_COMPAQ_42XX,
- 0x0E11, 0x4058, 0, 0, 0}, /* SA431 */
- { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_COMPAQ_42XX,
- 0x0E11, 0x4051, 0, 0, 0}, /* SA4250ES */
- { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_COMPAQ_42XX,
- 0x0E11, 0x4050, 0, 0, 0}, /* SA4200 */
- { PCI_VENDOR_ID_NCR, PCI_DEVICE_ID_NCR_53C1510,
- 0x0E11, 0x4048, 0, 0, 0}, /* LC2 */
- { PCI_VENDOR_ID_NCR, PCI_DEVICE_ID_NCR_53C1510,
- 0x0E11, 0x4040, 0, 0, 0}, /* Integrated Array */
- { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_SMART2P,
- 0x0E11, 0x4034, 0, 0, 0}, /* SA 221 */
- { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_SMART2P,
- 0x0E11, 0x4033, 0, 0, 0}, /* SA 3100ES*/
- { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_SMART2P,
- 0x0E11, 0x4032, 0, 0, 0}, /* SA 3200*/
- { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_SMART2P,
- 0x0E11, 0x4031, 0, 0, 0}, /* SA 2SL*/
- { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_SMART2P,
- 0x0E11, 0x4030, 0, 0, 0}, /* SA 2P */
- { 0 }
-};
-
-MODULE_DEVICE_TABLE(pci, cpqarray_pci_device_id);
-
-static struct gendisk *ida_gendisk[MAX_CTLR][NWD];
-
-/* Debug... */
-#define DBG(s) do { s } while(0)
-/* Debug (general info)... */
-#define DBGINFO(s) do { } while(0)
-/* Debug Paranoid... */
-#define DBGP(s) do { } while(0)
-/* Debug Extra Paranoid... */
-#define DBGPX(s) do { } while(0)
-
-static int cpqarray_pci_init(ctlr_info_t *c, struct pci_dev *pdev);
-static void __iomem *remap_pci_mem(ulong base, ulong size);
-static int cpqarray_eisa_detect(void);
-static int pollcomplete(int ctlr);
-static void getgeometry(int ctlr);
-static void start_fwbk(int ctlr);
-
-static cmdlist_t * cmd_alloc(ctlr_info_t *h, int get_from_pool);
-static void cmd_free(ctlr_info_t *h, cmdlist_t *c, int got_from_pool);
-
-static void free_hba(int i);
-static int alloc_cpqarray_hba(void);
-
-static int sendcmd(
- __u8 cmd,
- int ctlr,
- void *buff,
- size_t size,
- unsigned int blk,
- unsigned int blkcnt,
- unsigned int log_unit );
-
-static int ida_unlocked_open(struct block_device *bdev, fmode_t mode);
-static void ida_release(struct gendisk *disk, fmode_t mode);
-static int ida_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg);
-static int ida_getgeo(struct block_device *bdev, struct hd_geometry *geo);
-static int ida_ctlr_ioctl(ctlr_info_t *h, int dsk, ida_ioctl_t *io);
-
-static void do_ida_request(struct request_queue *q);
-static void start_io(ctlr_info_t *h);
-
-static inline void addQ(cmdlist_t **Qptr, cmdlist_t *c);
-static inline cmdlist_t *removeQ(cmdlist_t **Qptr, cmdlist_t *c);
-static inline void complete_command(cmdlist_t *cmd, int timeout);
-
-static irqreturn_t do_ida_intr(int irq, void *dev_id);
-static void ida_timer(unsigned long tdata);
-static int ida_revalidate(struct gendisk *disk);
-static int revalidate_allvol(ctlr_info_t *host);
-static int cpqarray_register_ctlr(int ctlr, struct pci_dev *pdev);
-
-#ifdef CONFIG_PROC_FS
-static void ida_procinit(int i);
-#else
-static void ida_procinit(int i) {}
-#endif
-
-static inline drv_info_t *get_drv(struct gendisk *disk)
-{
- return disk->private_data;
-}
-
-static inline ctlr_info_t *get_host(struct gendisk *disk)
-{
- return disk->queue->queuedata;
-}
-
-
-static const struct block_device_operations ida_fops = {
- .owner = THIS_MODULE,
- .open = ida_unlocked_open,
- .release = ida_release,
- .ioctl = ida_ioctl,
- .getgeo = ida_getgeo,
- .revalidate_disk= ida_revalidate,
-};
-
-
-#ifdef CONFIG_PROC_FS
-
-static struct proc_dir_entry *proc_array;
-static const struct file_operations ida_proc_fops;
-
-/*
- * Get us a file in /proc/array that says something about each controller.
- * Create /proc/array if it doesn't exist yet.
- */
-static void __init ida_procinit(int i)
-{
- if (proc_array == NULL) {
- proc_array = proc_mkdir("driver/cpqarray", NULL);
- if (!proc_array) return;
- }
-
- proc_create_data(hba[i]->devname, 0, proc_array, &ida_proc_fops, hba[i]);
-}
-
-/*
- * Report information about this controller.
- */
-static int ida_proc_show(struct seq_file *m, void *v)
-{
- int i, ctlr;
- ctlr_info_t *h = (ctlr_info_t*)m->private;
- drv_info_t *drv;
-#ifdef CPQ_PROC_PRINT_QUEUES
- cmdlist_t *c;
- unsigned long flags;
-#endif
-
- ctlr = h->ctlr;
- seq_printf(m, "%s: Compaq %s Controller\n"
- " Board ID: 0x%08lx\n"
- " Firmware Revision: %c%c%c%c\n"
- " Controller Sig: 0x%08lx\n"
- " Memory Address: 0x%08lx\n"
- " I/O Port: 0x%04x\n"
- " IRQ: %d\n"
- " Logical drives: %d\n"
- " Physical drives: %d\n\n"
- " Current Q depth: %d\n"
- " Max Q depth since init: %d\n\n",
- h->devname,
- h->product_name,
- (unsigned long)h->board_id,
- h->firm_rev[0], h->firm_rev[1], h->firm_rev[2], h->firm_rev[3],
- (unsigned long)h->ctlr_sig, (unsigned long)h->vaddr,
- (unsigned int) h->io_mem_addr, (unsigned int)h->intr,
- h->log_drives, h->phys_drives,
- h->Qdepth, h->maxQsinceinit);
-
- seq_puts(m, "Logical Drive Info:\n");
-
- for(i=0; i<h->log_drives; i++) {
- drv = &h->drv[i];
- seq_printf(m, "ida/c%dd%d: blksz=%d nr_blks=%d\n",
- ctlr, i, drv->blk_size, drv->nr_blks);
- }
-
-#ifdef CPQ_PROC_PRINT_QUEUES
- spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
- seq_puts(m, "\nCurrent Queues:\n");
-
- c = h->reqQ;
- seq_printf(m, "reqQ = %p", c);
- if (c) c=c->next;
- while(c && c != h->reqQ) {
- seq_printf(m, "->%p", c);
- c=c->next;
- }
-
- c = h->cmpQ;
- seq_printf(m, "\ncmpQ = %p", c);
- if (c) c=c->next;
- while(c && c != h->cmpQ) {
- seq_printf(m, "->%p", c);
- c=c->next;
- }
-
- seq_putc(m, '\n');
- spin_unlock_irqrestore(IDA_LOCK(h->ctlr), flags);
-#endif
- seq_printf(m, "nr_allocs = %d\nnr_frees = %d\n",
- h->nr_allocs, h->nr_frees);
- return 0;
-}
-
-static int ida_proc_open(struct inode *inode, struct file *file)
-{
- return single_open(file, ida_proc_show, PDE_DATA(inode));
-}
-
-static const struct file_operations ida_proc_fops = {
- .owner = THIS_MODULE,
- .open = ida_proc_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-#endif /* CONFIG_PROC_FS */
-
-module_param_array(eisa, int, NULL, 0);
-
-static void release_io_mem(ctlr_info_t *c)
-{
- /* if IO mem was not protected do nothing */
- if( c->io_mem_addr == 0)
- return;
- release_region(c->io_mem_addr, c->io_mem_length);
- c->io_mem_addr = 0;
- c->io_mem_length = 0;
-}
-
-static void cpqarray_remove_one(int i)
-{
- int j;
- char buff[4];
-
- /* sendcmd will turn off interrupt, and send the flush...
- * To write all data in the battery backed cache to disks
- * no data returned, but don't want to send NULL to sendcmd */
- if( sendcmd(FLUSH_CACHE, i, buff, 4, 0, 0, 0))
- {
- printk(KERN_WARNING "Unable to flush cache on controller %d\n",
- i);
- }
- free_irq(hba[i]->intr, hba[i]);
- iounmap(hba[i]->vaddr);
- unregister_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname);
- del_timer(&hba[i]->timer);
- remove_proc_entry(hba[i]->devname, proc_array);
- pci_free_consistent(hba[i]->pci_dev,
- NR_CMDS * sizeof(cmdlist_t), (hba[i]->cmd_pool),
- hba[i]->cmd_pool_dhandle);
- kfree(hba[i]->cmd_pool_bits);
- for(j = 0; j < NWD; j++) {
- if (ida_gendisk[i][j]->flags & GENHD_FL_UP)
- del_gendisk(ida_gendisk[i][j]);
- put_disk(ida_gendisk[i][j]);
- }
- blk_cleanup_queue(hba[i]->queue);
- release_io_mem(hba[i]);
- free_hba(i);
-}
-
-static void cpqarray_remove_one_pci(struct pci_dev *pdev)
-{
- int i;
- ctlr_info_t *tmp_ptr;
-
- if (pci_get_drvdata(pdev) == NULL) {
- printk( KERN_ERR "cpqarray: Unable to remove device \n");
- return;
- }
-
- tmp_ptr = pci_get_drvdata(pdev);
- i = tmp_ptr->ctlr;
- if (hba[i] == NULL) {
- printk(KERN_ERR "cpqarray: controller %d appears to have"
- "already been removed \n", i);
- return;
- }
- pci_set_drvdata(pdev, NULL);
-
- cpqarray_remove_one(i);
-}
-
-/* removing an instance that was not removed automatically..
- * must be an eisa card.
- */
-static void cpqarray_remove_one_eisa(int i)
-{
- if (hba[i] == NULL) {
- printk(KERN_ERR "cpqarray: controller %d appears to have"
- "already been removed \n", i);
- return;
- }
- cpqarray_remove_one(i);
-}
-
-/* pdev is NULL for eisa */
-static int cpqarray_register_ctlr(int i, struct pci_dev *pdev)
-{
- struct request_queue *q;
- int j;
-
- /*
- * register block devices
- * Find disks and fill in structs
- * Get an interrupt, set the Q depth and get into /proc
- */
-
- /* If this successful it should insure that we are the only */
- /* instance of the driver */
- if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) {
- goto Enomem4;
- }
- hba[i]->access.set_intr_mask(hba[i], 0);
- if (request_irq(hba[i]->intr, do_ida_intr, IRQF_SHARED,
- hba[i]->devname, hba[i]))
- {
- printk(KERN_ERR "cpqarray: Unable to get irq %d for %s\n",
- hba[i]->intr, hba[i]->devname);
- goto Enomem3;
- }
-
- for (j=0; j<NWD; j++) {
- ida_gendisk[i][j] = alloc_disk(1 << NWD_SHIFT);
- if (!ida_gendisk[i][j])
- goto Enomem2;
- }
-
- hba[i]->cmd_pool = pci_alloc_consistent(
- hba[i]->pci_dev, NR_CMDS * sizeof(cmdlist_t),
- &(hba[i]->cmd_pool_dhandle));
- hba[i]->cmd_pool_bits = kcalloc(
- DIV_ROUND_UP(NR_CMDS, BITS_PER_LONG), sizeof(unsigned long),
- GFP_KERNEL);
-
- if (!hba[i]->cmd_pool_bits || !hba[i]->cmd_pool)
- goto Enomem1;
-
- memset(hba[i]->cmd_pool, 0, NR_CMDS * sizeof(cmdlist_t));
- printk(KERN_INFO "cpqarray: Finding drives on %s",
- hba[i]->devname);
-
- spin_lock_init(&hba[i]->lock);
- q = blk_init_queue(do_ida_request, &hba[i]->lock);
- if (!q)
- goto Enomem1;
-
- hba[i]->queue = q;
- q->queuedata = hba[i];
-
- getgeometry(i);
- start_fwbk(i);
-
- ida_procinit(i);
-
- if (pdev)
- blk_queue_bounce_limit(q, hba[i]->pci_dev->dma_mask);
-
- /* This is a hardware imposed limit. */
- blk_queue_max_segments(q, SG_MAX);
-
- init_timer(&hba[i]->timer);
- hba[i]->timer.expires = jiffies + IDA_TIMER;
- hba[i]->timer.data = (unsigned long)hba[i];
- hba[i]->timer.function = ida_timer;
- add_timer(&hba[i]->timer);
-
- /* Enable IRQ now that spinlock and rate limit timer are set up */
- hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY);
-
- for(j=0; j<NWD; j++) {
- struct gendisk *disk = ida_gendisk[i][j];
- drv_info_t *drv = &hba[i]->drv[j];
- sprintf(disk->disk_name, "ida/c%dd%d", i, j);
- disk->major = COMPAQ_SMART2_MAJOR + i;
- disk->first_minor = j<<NWD_SHIFT;
- disk->fops = &ida_fops;
- if (j && !drv->nr_blks)
- continue;
- blk_queue_logical_block_size(hba[i]->queue, drv->blk_size);
- set_capacity(disk, drv->nr_blks);
- disk->queue = hba[i]->queue;
- disk->private_data = drv;
- add_disk(disk);
- }
-
- /* done ! */
- return(i);
-
-Enomem1:
- nr_ctlr = i;
- kfree(hba[i]->cmd_pool_bits);
- if (hba[i]->cmd_pool)
- pci_free_consistent(hba[i]->pci_dev, NR_CMDS*sizeof(cmdlist_t),
- hba[i]->cmd_pool, hba[i]->cmd_pool_dhandle);
-Enomem2:
- while (j--) {
- put_disk(ida_gendisk[i][j]);
- ida_gendisk[i][j] = NULL;
- }
- free_irq(hba[i]->intr, hba[i]);
-Enomem3:
- unregister_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname);
-Enomem4:
- if (pdev)
- pci_set_drvdata(pdev, NULL);
- release_io_mem(hba[i]);
- free_hba(i);
-
- printk( KERN_ERR "cpqarray: out of memory");
-
- return -1;
-}
-
-static int cpqarray_init_one(struct pci_dev *pdev,
- const struct pci_device_id *ent)
-{
- int i;
-
- printk(KERN_DEBUG "cpqarray: Device 0x%x has been found at"
- " bus %d dev %d func %d\n",
- pdev->device, pdev->bus->number, PCI_SLOT(pdev->devfn),
- PCI_FUNC(pdev->devfn));
- i = alloc_cpqarray_hba();
- if( i < 0 )
- return (-1);
- memset(hba[i], 0, sizeof(ctlr_info_t));
- sprintf(hba[i]->devname, "ida%d", i);
- hba[i]->ctlr = i;
- /* Initialize the pdev driver private data */
- pci_set_drvdata(pdev, hba[i]);
-
- if (cpqarray_pci_init(hba[i], pdev) != 0) {
- pci_set_drvdata(pdev, NULL);
- release_io_mem(hba[i]);
- free_hba(i);
- return -1;
- }
-
- return (cpqarray_register_ctlr(i, pdev));
-}
-
-static struct pci_driver cpqarray_pci_driver = {
- .name = "cpqarray",
- .probe = cpqarray_init_one,
- .remove = cpqarray_remove_one_pci,
- .id_table = cpqarray_pci_device_id,
-};
-
-/*
- * This is it. Find all the controllers and register them.
- * returns the number of block devices registered.
- */
-static int __init cpqarray_init(void)
-{
- int num_cntlrs_reg = 0;
- int i;
- int rc = 0;
-
- /* detect controllers */
- printk(DRIVER_NAME "\n");
-
- rc = pci_register_driver(&cpqarray_pci_driver);
- if (rc)
- return rc;
- cpqarray_eisa_detect();
-
- for (i=0; i < MAX_CTLR; i++) {
- if (hba[i] != NULL)
- num_cntlrs_reg++;
- }
-
- if (num_cntlrs_reg)
- return 0;
- else {
- pci_unregister_driver(&cpqarray_pci_driver);
- return -ENODEV;
- }
-}
-
-/* Function to find the first free pointer into our hba[] array */
-/* Returns -1 if no free entries are left. */
-static int alloc_cpqarray_hba(void)
-{
- int i;
-
- for(i=0; i< MAX_CTLR; i++) {
- if (hba[i] == NULL) {
- hba[i] = kmalloc(sizeof(ctlr_info_t), GFP_KERNEL);
- if(hba[i]==NULL) {
- printk(KERN_ERR "cpqarray: out of memory.\n");
- return (-1);
- }
- return (i);
- }
- }
- printk(KERN_WARNING "cpqarray: This driver supports a maximum"
- " of 8 controllers.\n");
- return(-1);
-}
-
-static void free_hba(int i)
-{
- kfree(hba[i]);
- hba[i]=NULL;
-}
-
-/*
- * Find the IO address of the controller, its IRQ and so forth. Fill
- * in some basic stuff into the ctlr_info_t structure.
- */
-static int cpqarray_pci_init(ctlr_info_t *c, struct pci_dev *pdev)
-{
- ushort vendor_id, device_id, command;
- unchar cache_line_size, latency_timer;
- unchar irq, revision;
- unsigned long addr[6];
- __u32 board_id;
-
- int i;
-
- c->pci_dev = pdev;
- pci_set_master(pdev);
- if (pci_enable_device(pdev)) {
- printk(KERN_ERR "cpqarray: Unable to Enable PCI device\n");
- return -1;
- }
- vendor_id = pdev->vendor;
- device_id = pdev->device;
- revision = pdev->revision;
- irq = pdev->irq;
-
- for(i=0; i<6; i++)
- addr[i] = pci_resource_start(pdev, i);
-
- if (pci_set_dma_mask(pdev, CPQARRAY_DMA_MASK) != 0)
- {
- printk(KERN_ERR "cpqarray: Unable to set DMA mask\n");
- return -1;
- }
-
- pci_read_config_word(pdev, PCI_COMMAND, &command);
- pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &cache_line_size);
- pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &latency_timer);
-
- pci_read_config_dword(pdev, 0x2c, &board_id);
-
- /* check to see if controller has been disabled */
- if(!(command & 0x02)) {
- printk(KERN_WARNING
- "cpqarray: controller appears to be disabled\n");
- return(-1);
- }
-
-DBGINFO(
- printk("vendor_id = %x\n", vendor_id);
- printk("device_id = %x\n", device_id);
- printk("command = %x\n", command);
- for(i=0; i<6; i++)
- printk("addr[%d] = %lx\n", i, addr[i]);
- printk("revision = %x\n", revision);
- printk("irq = %x\n", irq);
- printk("cache_line_size = %x\n", cache_line_size);
- printk("latency_timer = %x\n", latency_timer);
- printk("board_id = %x\n", board_id);
-);
-
- c->intr = irq;
-
- for(i=0; i<6; i++) {
- if (pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE_IO)
- { /* IO space */
- c->io_mem_addr = addr[i];
- c->io_mem_length = pci_resource_end(pdev, i)
- - pci_resource_start(pdev, i) + 1;
- if(!request_region( c->io_mem_addr, c->io_mem_length,
- "cpqarray"))
- {
- printk( KERN_WARNING "cpqarray I/O memory range already in use addr %lx length = %ld\n", c->io_mem_addr, c->io_mem_length);
- c->io_mem_addr = 0;
- c->io_mem_length = 0;
- }
- break;
- }
- }
-
- c->paddr = 0;
- for(i=0; i<6; i++)
- if (!(pci_resource_flags(pdev, i) &
- PCI_BASE_ADDRESS_SPACE_IO)) {
- c->paddr = pci_resource_start (pdev, i);
- break;
- }
- if (!c->paddr)
- return -1;
- c->vaddr = remap_pci_mem(c->paddr, 128);
- if (!c->vaddr)
- return -1;
- c->board_id = board_id;
-
- for(i=0; i<NR_PRODUCTS; i++) {
- if (board_id == products[i].board_id) {
- c->product_name = products[i].product_name;
- c->access = *(products[i].access);
- break;
- }
- }
- if (i == NR_PRODUCTS) {
- printk(KERN_WARNING "cpqarray: Sorry, I don't know how"
- " to access the SMART Array controller %08lx\n",
- (unsigned long)board_id);
- return -1;
- }
-
- return 0;
-}
-
-/*
- * Map (physical) PCI mem into (virtual) kernel space
- */
-static void __iomem *remap_pci_mem(ulong base, ulong size)
-{
- ulong page_base = ((ulong) base) & PAGE_MASK;
- ulong page_offs = ((ulong) base) - page_base;
- void __iomem *page_remapped = ioremap(page_base, page_offs+size);
-
- return (page_remapped ? (page_remapped + page_offs) : NULL);
-}
-
-#ifndef MODULE
-/*
- * Config string is a comma separated set of i/o addresses of EISA cards.
- */
-static int cpqarray_setup(char *str)
-{
- int i, ints[9];
-
- (void)get_options(str, ARRAY_SIZE(ints), ints);
-
- for(i=0; i<ints[0] && i<8; i++)
- eisa[i] = ints[i+1];
- return 1;
-}
-
-__setup("smart2=", cpqarray_setup);
-
-#endif
-
-/*
- * Find an EISA controller's signature. Set up an hba if we find it.
- */
-static int cpqarray_eisa_detect(void)
-{
- int i=0, j;
- __u32 board_id;
- int intr;
- int ctlr;
- int num_ctlr = 0;
-
- while(i<8 && eisa[i]) {
- ctlr = alloc_cpqarray_hba();
- if(ctlr == -1)
- break;
- board_id = inl(eisa[i]+0xC80);
- for(j=0; j < NR_PRODUCTS; j++)
- if (board_id == products[j].board_id)
- break;
-
- if (j == NR_PRODUCTS) {
- printk(KERN_WARNING "cpqarray: Sorry, I don't know how"
- " to access the SMART Array controller %08lx\n", (unsigned long)board_id);
- continue;
- }
-
- memset(hba[ctlr], 0, sizeof(ctlr_info_t));
- hba[ctlr]->io_mem_addr = eisa[i];
- hba[ctlr]->io_mem_length = 0x7FF;
- if(!request_region(hba[ctlr]->io_mem_addr,
- hba[ctlr]->io_mem_length,
- "cpqarray"))
- {
- printk(KERN_WARNING "cpqarray: I/O range already in "
- "use addr = %lx length = %ld\n",
- hba[ctlr]->io_mem_addr,
- hba[ctlr]->io_mem_length);
- free_hba(ctlr);
- continue;
- }
-
- /*
- * Read the config register to find our interrupt
- */
- intr = inb(eisa[i]+0xCC0) >> 4;
- if (intr & 1) intr = 11;
- else if (intr & 2) intr = 10;
- else if (intr & 4) intr = 14;
- else if (intr & 8) intr = 15;
-
- hba[ctlr]->intr = intr;
- sprintf(hba[ctlr]->devname, "ida%d", nr_ctlr);
- hba[ctlr]->product_name = products[j].product_name;
- hba[ctlr]->access = *(products[j].access);
- hba[ctlr]->ctlr = ctlr;
- hba[ctlr]->board_id = board_id;
- hba[ctlr]->pci_dev = NULL; /* not PCI */
-
-DBGINFO(
- printk("i = %d, j = %d\n", i, j);
- printk("irq = %x\n", intr);
- printk("product name = %s\n", products[j].product_name);
- printk("board_id = %x\n", board_id);
-);
-
- num_ctlr++;
- i++;
-
- if (cpqarray_register_ctlr(ctlr, NULL) == -1)
- printk(KERN_WARNING
- "cpqarray: Can't register EISA controller %d\n",
- ctlr);
-
- }
-
- return num_ctlr;
-}
-
-/*
- * Open. Make sure the device is really there.
- */
-static int ida_open(struct block_device *bdev, fmode_t mode)
-{
- drv_info_t *drv = get_drv(bdev->bd_disk);
- ctlr_info_t *host = get_host(bdev->bd_disk);
-
- DBGINFO(printk("ida_open %s\n", bdev->bd_disk->disk_name));
- /*
- * Root is allowed to open raw volume zero even if it's not configured
- * so array config can still work. I don't think I really like this,
- * but I'm already using way to many device nodes to claim another one
- * for "raw controller".
- */
- if (!drv->nr_blks) {
- if (!capable(CAP_SYS_RAWIO))
- return -ENXIO;
- if (!capable(CAP_SYS_ADMIN) && drv != host->drv)
- return -ENXIO;
- }
- host->usage_count++;
- return 0;
-}
-
-static int ida_unlocked_open(struct block_device *bdev, fmode_t mode)
-{
- int ret;
-
- mutex_lock(&cpqarray_mutex);
- ret = ida_open(bdev, mode);
- mutex_unlock(&cpqarray_mutex);
-
- return ret;
-}
-
-/*
- * Close. Sync first.
- */
-static void ida_release(struct gendisk *disk, fmode_t mode)
-{
- ctlr_info_t *host;
-
- mutex_lock(&cpqarray_mutex);
- host = get_host(disk);
- host->usage_count--;
- mutex_unlock(&cpqarray_mutex);
-}
-
-/*
- * Enqueuing and dequeuing functions for cmdlists.
- */
-static inline void addQ(cmdlist_t **Qptr, cmdlist_t *c)
-{
- if (*Qptr == NULL) {
- *Qptr = c;
- c->next = c->prev = c;
- } else {
- c->prev = (*Qptr)->prev;
- c->next = (*Qptr);
- (*Qptr)->prev->next = c;
- (*Qptr)->prev = c;
- }
-}
-
-static inline cmdlist_t *removeQ(cmdlist_t **Qptr, cmdlist_t *c)
-{
- if (c && c->next != c) {
- if (*Qptr == c) *Qptr = c->next;
- c->prev->next = c->next;
- c->next->prev = c->prev;
- } else {
- *Qptr = NULL;
- }
- return c;
-}
-
-/*
- * Get a request and submit it to the controller.
- * This routine needs to grab all the requests it possibly can from the
- * req Q and submit them. Interrupts are off (and need to be off) when you
- * are in here (either via the dummy do_ida_request functions or by being
- * called from the interrupt handler
- */
-static void do_ida_request(struct request_queue *q)
-{
- ctlr_info_t *h = q->queuedata;
- cmdlist_t *c;
- struct request *creq;
- struct scatterlist tmp_sg[SG_MAX];
- int i, dir, seg;
-
-queue_next:
- creq = blk_peek_request(q);
- if (!creq)
- goto startio;
-
- BUG_ON(creq->nr_phys_segments > SG_MAX);
-
- if ((c = cmd_alloc(h,1)) == NULL)
- goto startio;
-
- blk_start_request(creq);
-
- c->ctlr = h->ctlr;
- c->hdr.unit = (drv_info_t *)(creq->rq_disk->private_data) - h->drv;
- c->hdr.size = sizeof(rblk_t) >> 2;
- c->size += sizeof(rblk_t);
-
- c->req.hdr.blk = blk_rq_pos(creq);
- c->rq = creq;
-DBGPX(
- printk("sector=%d, nr_sectors=%u\n",
- blk_rq_pos(creq), blk_rq_sectors(creq));
-);
- sg_init_table(tmp_sg, SG_MAX);
- seg = blk_rq_map_sg(q, creq, tmp_sg);
-
- /* Now do all the DMA Mappings */
- if (rq_data_dir(creq) == READ)
- dir = PCI_DMA_FROMDEVICE;
- else
- dir = PCI_DMA_TODEVICE;
- for( i=0; i < seg; i++)
- {
- c->req.sg[i].size = tmp_sg[i].length;
- c->req.sg[i].addr = (__u32) pci_map_page(h->pci_dev,
- sg_page(&tmp_sg[i]),
- tmp_sg[i].offset,
- tmp_sg[i].length, dir);
- }
-DBGPX( printk("Submitting %u sectors in %d segments\n", blk_rq_sectors(creq), seg); );
- c->req.hdr.sg_cnt = seg;
- c->req.hdr.blk_cnt = blk_rq_sectors(creq);
- c->req.hdr.cmd = (rq_data_dir(creq) == READ) ? IDA_READ : IDA_WRITE;
- c->type = CMD_RWREQ;
-
- /* Put the request on the tail of the request queue */
- addQ(&h->reqQ, c);
- h->Qdepth++;
- if (h->Qdepth > h->maxQsinceinit)
- h->maxQsinceinit = h->Qdepth;
-
- goto queue_next;
-
-startio:
- start_io(h);
-}
-
-/*
- * start_io submits everything on a controller's request queue
- * and moves it to the completion queue.
- *
- * Interrupts had better be off if you're in here
- */
-static void start_io(ctlr_info_t *h)
-{
- cmdlist_t *c;
-
- while((c = h->reqQ) != NULL) {
- /* Can't do anything if we're busy */
- if (h->access.fifo_full(h) == 0)
- return;
-
- /* Get the first entry from the request Q */
- removeQ(&h->reqQ, c);
- h->Qdepth--;
-
- /* Tell the controller to do our bidding */
- h->access.submit_command(h, c);
-
- /* Get onto the completion Q */
- addQ(&h->cmpQ, c);
- }
-}
-
-/*
- * Mark all buffers that cmd was responsible for
- */
-static inline void complete_command(cmdlist_t *cmd, int timeout)
-{
- struct request *rq = cmd->rq;
- int error = 0;
- int i, ddir;
-
- if (cmd->req.hdr.rcode & RCODE_NONFATAL &&
- (hba[cmd->ctlr]->misc_tflags & MISC_NONFATAL_WARN) == 0) {
- printk(KERN_NOTICE "Non Fatal error on ida/c%dd%d\n",
- cmd->ctlr, cmd->hdr.unit);
- hba[cmd->ctlr]->misc_tflags |= MISC_NONFATAL_WARN;
- }
- if (cmd->req.hdr.rcode & RCODE_FATAL) {
- printk(KERN_WARNING "Fatal error on ida/c%dd%d\n",
- cmd->ctlr, cmd->hdr.unit);
- error = -EIO;
- }
- if (cmd->req.hdr.rcode & RCODE_INVREQ) {
- printk(KERN_WARNING "Invalid request on ida/c%dd%d = (cmd=%x sect=%d cnt=%d sg=%d ret=%x)\n",
- cmd->ctlr, cmd->hdr.unit, cmd->req.hdr.cmd,
- cmd->req.hdr.blk, cmd->req.hdr.blk_cnt,
- cmd->req.hdr.sg_cnt, cmd->req.hdr.rcode);
- error = -EIO;
- }
- if (timeout)
- error = -EIO;
- /* unmap the DMA mapping for all the scatter gather elements */
- if (cmd->req.hdr.cmd == IDA_READ)
- ddir = PCI_DMA_FROMDEVICE;
- else
- ddir = PCI_DMA_TODEVICE;
- for(i=0; i<cmd->req.hdr.sg_cnt; i++)
- pci_unmap_page(hba[cmd->ctlr]->pci_dev, cmd->req.sg[i].addr,
- cmd->req.sg[i].size, ddir);
-
- DBGPX(printk("Done with %p\n", rq););
- __blk_end_request_all(rq, error);
-}
-
-/*
- * The controller will interrupt us upon completion of commands.
- * Find the command on the completion queue, remove it, tell the OS and
- * try to queue up more IO
- */
-static irqreturn_t do_ida_intr(int irq, void *dev_id)
-{
- ctlr_info_t *h = dev_id;
- cmdlist_t *c;
- unsigned long istat;
- unsigned long flags;
- __u32 a,a1;
-
- istat = h->access.intr_pending(h);
- /* Is this interrupt for us? */
- if (istat == 0)
- return IRQ_NONE;
-
- /*
- * If there are completed commands in the completion queue,
- * we had better do something about it.
- */
- spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
- if (istat & FIFO_NOT_EMPTY) {
- while((a = h->access.command_completed(h))) {
- a1 = a; a &= ~3;
- if ((c = h->cmpQ) == NULL)
- {
- printk(KERN_WARNING "cpqarray: Completion of %08lx ignored\n", (unsigned long)a1);
- continue;
- }
- while(c->busaddr != a) {
- c = c->next;
- if (c == h->cmpQ)
- break;
- }
- /*
- * If we've found the command, take it off the
- * completion Q and free it
- */
- if (c->busaddr == a) {
- removeQ(&h->cmpQ, c);
- /* Check for invalid command.
- * Controller returns command error,
- * But rcode = 0.
- */
-
- if((a1 & 0x03) && (c->req.hdr.rcode == 0))
- {
- c->req.hdr.rcode = RCODE_INVREQ;
- }
- if (c->type == CMD_RWREQ) {
- complete_command(c, 0);
- cmd_free(h, c, 1);
- } else if (c->type == CMD_IOCTL_PEND) {
- c->type = CMD_IOCTL_DONE;
- }
- continue;
- }
- }
- }
-
- /*
- * See if we can queue up some more IO
- */
- do_ida_request(h->queue);
- spin_unlock_irqrestore(IDA_LOCK(h->ctlr), flags);
- return IRQ_HANDLED;
-}
-
-/*
- * This timer was for timing out requests that haven't happened after
- * IDA_TIMEOUT. That wasn't such a good idea. This timer is used to
- * reset a flags structure so we don't flood the user with
- * "Non-Fatal error" messages.
- */
-static void ida_timer(unsigned long tdata)
-{
- ctlr_info_t *h = (ctlr_info_t*)tdata;
-
- h->timer.expires = jiffies + IDA_TIMER;
- add_timer(&h->timer);
- h->misc_tflags = 0;
-}
-
-static int ida_getgeo(struct block_device *bdev, struct hd_geometry *geo)
-{
- drv_info_t *drv = get_drv(bdev->bd_disk);
-
- if (drv->cylinders) {
- geo->heads = drv->heads;
- geo->sectors = drv->sectors;
- geo->cylinders = drv->cylinders;
- } else {
- geo->heads = 0xff;
- geo->sectors = 0x3f;
- geo->cylinders = drv->nr_blks / (0xff*0x3f);
- }
-
- return 0;
-}
-
-/*
- * ida_ioctl does some miscellaneous stuff like reporting drive geometry,
- * setting readahead and submitting commands from userspace to the controller.
- */
-static int ida_locked_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg)
-{
- drv_info_t *drv = get_drv(bdev->bd_disk);
- ctlr_info_t *host = get_host(bdev->bd_disk);
- int error;
- ida_ioctl_t __user *io = (ida_ioctl_t __user *)arg;
- ida_ioctl_t *my_io;
-
- switch(cmd) {
- case IDAGETDRVINFO:
- if (copy_to_user(&io->c.drv, drv, sizeof(drv_info_t)))
- return -EFAULT;
- return 0;
- case IDAPASSTHRU:
- if (!capable(CAP_SYS_RAWIO))
- return -EPERM;
- my_io = kmalloc(sizeof(ida_ioctl_t), GFP_KERNEL);
- if (!my_io)
- return -ENOMEM;
- error = -EFAULT;
- if (copy_from_user(my_io, io, sizeof(*my_io)))
- goto out_passthru;
- error = ida_ctlr_ioctl(host, drv - host->drv, my_io);
- if (error)
- goto out_passthru;
- error = -EFAULT;
- if (copy_to_user(io, my_io, sizeof(*my_io)))
- goto out_passthru;
- error = 0;
-out_passthru:
- kfree(my_io);
- return error;
- case IDAGETCTLRSIG:
- if (!arg) return -EINVAL;
- if (put_user(host->ctlr_sig, (int __user *)arg))
- return -EFAULT;
- return 0;
- case IDAREVALIDATEVOLS:
- if (MINOR(bdev->bd_dev) != 0)
- return -ENXIO;
- return revalidate_allvol(host);
- case IDADRIVERVERSION:
- if (!arg) return -EINVAL;
- if (put_user(DRIVER_VERSION, (unsigned long __user *)arg))
- return -EFAULT;
- return 0;
- case IDAGETPCIINFO:
- {
-
- ida_pci_info_struct pciinfo;
-
- if (!arg) return -EINVAL;
- memset(&pciinfo, 0, sizeof(pciinfo));
- pciinfo.bus = host->pci_dev->bus->number;
- pciinfo.dev_fn = host->pci_dev->devfn;
- pciinfo.board_id = host->board_id;
- if(copy_to_user((void __user *) arg, &pciinfo,
- sizeof( ida_pci_info_struct)))
- return -EFAULT;
- return(0);
- }
-
- default:
- return -EINVAL;
- }
-
-}
-
-static int ida_ioctl(struct block_device *bdev, fmode_t mode,
- unsigned int cmd, unsigned long param)
-{
- int ret;
-
- mutex_lock(&cpqarray_mutex);
- ret = ida_locked_ioctl(bdev, mode, cmd, param);
- mutex_unlock(&cpqarray_mutex);
-
- return ret;
-}
-
-/*
- * ida_ctlr_ioctl is for passing commands to the controller from userspace.
- * The command block (io) has already been copied to kernel space for us,
- * however, any elements in the sglist need to be copied to kernel space
- * or copied back to userspace.
- *
- * Only root may perform a controller passthru command, however I'm not doing
- * any serious sanity checking on the arguments. Doing an IDA_WRITE_MEDIA and
- * putting a 64M buffer in the sglist is probably a *bad* idea.
- */
-static int ida_ctlr_ioctl(ctlr_info_t *h, int dsk, ida_ioctl_t *io)
-{
- int ctlr = h->ctlr;
- cmdlist_t *c;
- void *p = NULL;
- unsigned long flags;
- int error;
-
- if ((c = cmd_alloc(h, 0)) == NULL)
- return -ENOMEM;
- c->ctlr = ctlr;
- c->hdr.unit = (io->unit & UNITVALID) ? (io->unit & ~UNITVALID) : dsk;
- c->hdr.size = sizeof(rblk_t) >> 2;
- c->size += sizeof(rblk_t);
-
- c->req.hdr.cmd = io->cmd;
- c->req.hdr.blk = io->blk;
- c->req.hdr.blk_cnt = io->blk_cnt;
- c->type = CMD_IOCTL_PEND;
-
- /* Pre submit processing */
- switch(io->cmd) {
- case PASSTHRU_A:
- p = memdup_user(io->sg[0].addr, io->sg[0].size);
- if (IS_ERR(p)) {
- error = PTR_ERR(p);
- cmd_free(h, c, 0);
- return error;
- }
- c->req.hdr.blk = pci_map_single(h->pci_dev, &(io->c),
- sizeof(ida_ioctl_t),
- PCI_DMA_BIDIRECTIONAL);
- c->req.sg[0].size = io->sg[0].size;
- c->req.sg[0].addr = pci_map_single(h->pci_dev, p,
- c->req.sg[0].size, PCI_DMA_BIDIRECTIONAL);
- c->req.hdr.sg_cnt = 1;
- break;
- case IDA_READ:
- case READ_FLASH_ROM:
- case SENSE_CONTROLLER_PERFORMANCE:
- p = kmalloc(io->sg[0].size, GFP_KERNEL);
- if (!p)
- {
- error = -ENOMEM;
- cmd_free(h, c, 0);
- return(error);
- }
-
- c->req.sg[0].size = io->sg[0].size;
- c->req.sg[0].addr = pci_map_single(h->pci_dev, p,
- c->req.sg[0].size, PCI_DMA_BIDIRECTIONAL);
- c->req.hdr.sg_cnt = 1;
- break;
- case IDA_WRITE:
- case IDA_WRITE_MEDIA:
- case DIAG_PASS_THRU:
- case COLLECT_BUFFER:
- case WRITE_FLASH_ROM:
- p = memdup_user(io->sg[0].addr, io->sg[0].size);
- if (IS_ERR(p)) {
- error = PTR_ERR(p);
- cmd_free(h, c, 0);
- return error;
- }
- c->req.sg[0].size = io->sg[0].size;
- c->req.sg[0].addr = pci_map_single(h->pci_dev, p,
- c->req.sg[0].size, PCI_DMA_BIDIRECTIONAL);
- c->req.hdr.sg_cnt = 1;
- break;
- default:
- c->req.sg[0].size = sizeof(io->c);
- c->req.sg[0].addr = pci_map_single(h->pci_dev,&io->c,
- c->req.sg[0].size, PCI_DMA_BIDIRECTIONAL);
- c->req.hdr.sg_cnt = 1;
- }
-
- /* Put the request on the tail of the request queue */
- spin_lock_irqsave(IDA_LOCK(ctlr), flags);
- addQ(&h->reqQ, c);
- h->Qdepth++;
- start_io(h);
- spin_unlock_irqrestore(IDA_LOCK(ctlr), flags);
-
- /* Wait for completion */
- while(c->type != CMD_IOCTL_DONE)
- schedule();
-
- /* Unmap the DMA */
- pci_unmap_single(h->pci_dev, c->req.sg[0].addr, c->req.sg[0].size,
- PCI_DMA_BIDIRECTIONAL);
- /* Post submit processing */
- switch(io->cmd) {
- case PASSTHRU_A:
- pci_unmap_single(h->pci_dev, c->req.hdr.blk,
- sizeof(ida_ioctl_t),
- PCI_DMA_BIDIRECTIONAL);
- case IDA_READ:
- case DIAG_PASS_THRU:
- case SENSE_CONTROLLER_PERFORMANCE:
- case READ_FLASH_ROM:
- if (copy_to_user(io->sg[0].addr, p, io->sg[0].size)) {
- kfree(p);
- return -EFAULT;
- }
- /* fall through and free p */
- case IDA_WRITE:
- case IDA_WRITE_MEDIA:
- case COLLECT_BUFFER:
- case WRITE_FLASH_ROM:
- kfree(p);
- break;
- default:;
- /* Nothing to do */
- }
-
- io->rcode = c->req.hdr.rcode;
- cmd_free(h, c, 0);
- return(0);
-}
-
-/*
- * Commands are pre-allocated in a large block. Here we use a simple bitmap
- * scheme to suballocte them to the driver. Operations that are not time
- * critical (and can wait for kmalloc and possibly sleep) can pass in NULL
- * as the first argument to get a new command.
- */
-static cmdlist_t * cmd_alloc(ctlr_info_t *h, int get_from_pool)
-{
- cmdlist_t * c;
- int i;
- dma_addr_t cmd_dhandle;
-
- if (!get_from_pool) {
- c = (cmdlist_t*)pci_alloc_consistent(h->pci_dev,
- sizeof(cmdlist_t), &cmd_dhandle);
- if(c==NULL)
- return NULL;
- } else {
- do {
- i = find_first_zero_bit(h->cmd_pool_bits, NR_CMDS);
- if (i == NR_CMDS)
- return NULL;
- } while(test_and_set_bit(i&(BITS_PER_LONG-1), h->cmd_pool_bits+(i/BITS_PER_LONG)) != 0);
- c = h->cmd_pool + i;
- cmd_dhandle = h->cmd_pool_dhandle + i*sizeof(cmdlist_t);
- h->nr_allocs++;
- }
-
- memset(c, 0, sizeof(cmdlist_t));
- c->busaddr = cmd_dhandle;
- return c;
-}
-
-static void cmd_free(ctlr_info_t *h, cmdlist_t *c, int got_from_pool)
-{
- int i;
-
- if (!got_from_pool) {
- pci_free_consistent(h->pci_dev, sizeof(cmdlist_t), c,
- c->busaddr);
- } else {
- i = c - h->cmd_pool;
- clear_bit(i&(BITS_PER_LONG-1), h->cmd_pool_bits+(i/BITS_PER_LONG));
- h->nr_frees++;
- }
-}
-
-/***********************************************************************
- name: sendcmd
- Send a command to an IDA using the memory mapped FIFO interface
- and wait for it to complete.
- This routine should only be called at init time.
-***********************************************************************/
-static int sendcmd(
- __u8 cmd,
- int ctlr,
- void *buff,
- size_t size,
- unsigned int blk,
- unsigned int blkcnt,
- unsigned int log_unit )
-{
- cmdlist_t *c;
- int complete;
- unsigned long temp;
- unsigned long i;
- ctlr_info_t *info_p = hba[ctlr];
-
- c = cmd_alloc(info_p, 1);
- if(!c)
- return IO_ERROR;
- c->ctlr = ctlr;
- c->hdr.unit = log_unit;
- c->hdr.prio = 0;
- c->hdr.size = sizeof(rblk_t) >> 2;
- c->size += sizeof(rblk_t);
-
- /* The request information. */
- c->req.hdr.next = 0;
- c->req.hdr.rcode = 0;
- c->req.bp = 0;
- c->req.hdr.sg_cnt = 1;
- c->req.hdr.reserved = 0;
-
- if (size == 0)
- c->req.sg[0].size = 512;
- else
- c->req.sg[0].size = size;
-
- c->req.hdr.blk = blk;
- c->req.hdr.blk_cnt = blkcnt;
- c->req.hdr.cmd = (unsigned char) cmd;
- c->req.sg[0].addr = (__u32) pci_map_single(info_p->pci_dev,
- buff, c->req.sg[0].size, PCI_DMA_BIDIRECTIONAL);
- /*
- * Disable interrupt
- */
- info_p->access.set_intr_mask(info_p, 0);
- /* Make sure there is room in the command FIFO */
- /* Actually it should be completely empty at this time. */
- for (i = 200000; i > 0; i--) {
- temp = info_p->access.fifo_full(info_p);
- if (temp != 0) {
- break;
- }
- udelay(10);
-DBG(
- printk(KERN_WARNING "cpqarray ida%d: idaSendPciCmd FIFO full,"
- " waiting!\n", ctlr);
-);
- }
- /*
- * Send the cmd
- */
- info_p->access.submit_command(info_p, c);
- complete = pollcomplete(ctlr);
-
- pci_unmap_single(info_p->pci_dev, (dma_addr_t) c->req.sg[0].addr,
- c->req.sg[0].size, PCI_DMA_BIDIRECTIONAL);
- if (complete != 1) {
- if (complete != c->busaddr) {
- printk( KERN_WARNING
- "cpqarray ida%d: idaSendPciCmd "
- "Invalid command list address returned! (%08lx)\n",
- ctlr, (unsigned long)complete);
- cmd_free(info_p, c, 1);
- return (IO_ERROR);
- }
- } else {
- printk( KERN_WARNING
- "cpqarray ida%d: idaSendPciCmd Timeout out, "
- "No command list address returned!\n",
- ctlr);
- cmd_free(info_p, c, 1);
- return (IO_ERROR);
- }
-
- if (c->req.hdr.rcode & 0x00FE) {
- if (!(c->req.hdr.rcode & BIG_PROBLEM)) {
- printk( KERN_WARNING
- "cpqarray ida%d: idaSendPciCmd, error: "
- "Controller failed at init time "
- "cmd: 0x%x, return code = 0x%x\n",
- ctlr, c->req.hdr.cmd, c->req.hdr.rcode);
-
- cmd_free(info_p, c, 1);
- return (IO_ERROR);
- }
- }
- cmd_free(info_p, c, 1);
- return (IO_OK);
-}
-
-/*
- * revalidate_allvol is for online array config utilities. After a
- * utility reconfigures the drives in the array, it can use this function
- * (through an ioctl) to make the driver zap any previous disk structs for
- * that controller and get new ones.
- *
- * Right now I'm using the getgeometry() function to do this, but this
- * function should probably be finer grained and allow you to revalidate one
- * particualar logical volume (instead of all of them on a particular
- * controller).
- */
-static int revalidate_allvol(ctlr_info_t *host)
-{
- int ctlr = host->ctlr;
- int i;
- unsigned long flags;
-
- spin_lock_irqsave(IDA_LOCK(ctlr), flags);
- if (host->usage_count > 1) {
- spin_unlock_irqrestore(IDA_LOCK(ctlr), flags);
- printk(KERN_WARNING "cpqarray: Device busy for volume"
- " revalidation (usage=%d)\n", host->usage_count);
- return -EBUSY;
- }
- host->usage_count++;
- spin_unlock_irqrestore(IDA_LOCK(ctlr), flags);
-
- /*
- * Set the partition and block size structures for all volumes
- * on this controller to zero. We will reread all of this data
- */
- set_capacity(ida_gendisk[ctlr][0], 0);
- for (i = 1; i < NWD; i++) {
- struct gendisk *disk = ida_gendisk[ctlr][i];
- if (disk->flags & GENHD_FL_UP)
- del_gendisk(disk);
- }
- memset(host->drv, 0, sizeof(drv_info_t)*NWD);
-
- /*
- * Tell the array controller not to give us any interrupts while
- * we check the new geometry. Then turn interrupts back on when
- * we're done.
- */
- host->access.set_intr_mask(host, 0);
- getgeometry(ctlr);
- host->access.set_intr_mask(host, FIFO_NOT_EMPTY);
-
- for(i=0; i<NWD; i++) {
- struct gendisk *disk = ida_gendisk[ctlr][i];
- drv_info_t *drv = &host->drv[i];
- if (i && !drv->nr_blks)
- continue;
- blk_queue_logical_block_size(host->queue, drv->blk_size);
- set_capacity(disk, drv->nr_blks);
- disk->queue = host->queue;
- disk->private_data = drv;
- if (i)
- add_disk(disk);
- }
-
- host->usage_count--;
- return 0;
-}
-
-static int ida_revalidate(struct gendisk *disk)
-{
- drv_info_t *drv = disk->private_data;
- set_capacity(disk, drv->nr_blks);
- return 0;
-}
-
-/********************************************************************
- name: pollcomplete
- Wait polling for a command to complete.
- The memory mapped FIFO is polled for the completion.
- Used only at init time, interrupts disabled.
- ********************************************************************/
-static int pollcomplete(int ctlr)
-{
- int done;
- int i;
-
- /* Wait (up to 2 seconds) for a command to complete */
-
- for (i = 200000; i > 0; i--) {
- done = hba[ctlr]->access.command_completed(hba[ctlr]);
- if (done == 0) {
- udelay(10); /* a short fixed delay */
- } else
- return (done);
- }
- /* Invalid address to tell caller we ran out of time */
- return 1;
-}
-/*****************************************************************
- start_fwbk
- Starts controller firmwares background processing.
- Currently only the Integrated Raid controller needs this done.
- If the PCI mem address registers are written to after this,
- data corruption may occur
-*****************************************************************/
-static void start_fwbk(int ctlr)
-{
- id_ctlr_t *id_ctlr_buf;
- int ret_code;
-
- if( (hba[ctlr]->board_id != 0x40400E11)
- && (hba[ctlr]->board_id != 0x40480E11) )
-
- /* Not a Integrated Raid, so there is nothing for us to do */
- return;
- printk(KERN_DEBUG "cpqarray: Starting firmware's background"
- " processing\n");
- /* Command does not return anything, but idasend command needs a
- buffer */
- id_ctlr_buf = kmalloc(sizeof(id_ctlr_t), GFP_KERNEL);
- if(id_ctlr_buf==NULL)
- {
- printk(KERN_WARNING "cpqarray: Out of memory. "
- "Unable to start background processing.\n");
- return;
- }
- ret_code = sendcmd(RESUME_BACKGROUND_ACTIVITY, ctlr,
- id_ctlr_buf, 0, 0, 0, 0);
- if(ret_code != IO_OK)
- printk(KERN_WARNING "cpqarray: Unable to start"
- " background processing\n");
-
- kfree(id_ctlr_buf);
-}
-/*****************************************************************
- getgeometry
- Get ida logical volume geometry from the controller
- This is a large bit of code which once existed in two flavors,
- It is used only at init time.
-*****************************************************************/
-static void getgeometry(int ctlr)
-{
- id_log_drv_t *id_ldrive;
- id_ctlr_t *id_ctlr_buf;
- sense_log_drv_stat_t *id_lstatus_buf;
- config_t *sense_config_buf;
- unsigned int log_unit, log_index;
- int ret_code, size;
- drv_info_t *drv;
- ctlr_info_t *info_p = hba[ctlr];
- int i;
-
- info_p->log_drv_map = 0;
-
- id_ldrive = kzalloc(sizeof(id_log_drv_t), GFP_KERNEL);
- if (!id_ldrive) {
- printk( KERN_ERR "cpqarray: out of memory.\n");
- goto err_0;
- }
-
- id_ctlr_buf = kzalloc(sizeof(id_ctlr_t), GFP_KERNEL);
- if (!id_ctlr_buf) {
- printk( KERN_ERR "cpqarray: out of memory.\n");
- goto err_1;
- }
-
- id_lstatus_buf = kzalloc(sizeof(sense_log_drv_stat_t), GFP_KERNEL);
- if (!id_lstatus_buf) {
- printk( KERN_ERR "cpqarray: out of memory.\n");
- goto err_2;
- }
-
- sense_config_buf = kzalloc(sizeof(config_t), GFP_KERNEL);
- if (!sense_config_buf) {
- printk( KERN_ERR "cpqarray: out of memory.\n");
- goto err_3;
- }
-
- info_p->phys_drives = 0;
- info_p->log_drv_map = 0;
- info_p->drv_assign_map = 0;
- info_p->drv_spare_map = 0;
- info_p->mp_failed_drv_map = 0; /* only initialized here */
- /* Get controllers info for this logical drive */
- ret_code = sendcmd(ID_CTLR, ctlr, id_ctlr_buf, 0, 0, 0, 0);
- if (ret_code == IO_ERROR) {
- /*
- * If can't get controller info, set the logical drive map to 0,
- * so the idastubopen will fail on all logical drives
- * on the controller.
- */
- printk(KERN_ERR "cpqarray: error sending ID controller\n");
- goto err_4;
- }
-
- info_p->log_drives = id_ctlr_buf->nr_drvs;
- for(i=0;i<4;i++)
- info_p->firm_rev[i] = id_ctlr_buf->firm_rev[i];
- info_p->ctlr_sig = id_ctlr_buf->cfg_sig;
-
- printk(" (%s)\n", info_p->product_name);
- /*
- * Initialize logical drive map to zero
- */
- log_index = 0;
- /*
- * Get drive geometry for all logical drives
- */
- if (id_ctlr_buf->nr_drvs > 16)
- printk(KERN_WARNING "cpqarray ida%d: This driver supports "
- "16 logical drives per controller.\n. "
- " Additional drives will not be "
- "detected\n", ctlr);
-
- for (log_unit = 0;
- (log_index < id_ctlr_buf->nr_drvs)
- && (log_unit < NWD);
- log_unit++) {
- size = sizeof(sense_log_drv_stat_t);
-
- /*
- Send "Identify logical drive status" cmd
- */
- ret_code = sendcmd(SENSE_LOG_DRV_STAT,
- ctlr, id_lstatus_buf, size, 0, 0, log_unit);
- if (ret_code == IO_ERROR) {
- /*
- If can't get logical drive status, set
- the logical drive map to 0, so the
- idastubopen will fail for all logical drives
- on the controller.
- */
- info_p->log_drv_map = 0;
- printk( KERN_WARNING
- "cpqarray ida%d: idaGetGeometry - Controller"
- " failed to report status of logical drive %d\n"
- "Access to this controller has been disabled\n",
- ctlr, log_unit);
- goto err_4;
- }
- /*
- Make sure the logical drive is configured
- */
- if (id_lstatus_buf->status != LOG_NOT_CONF) {
- ret_code = sendcmd(ID_LOG_DRV, ctlr, id_ldrive,
- sizeof(id_log_drv_t), 0, 0, log_unit);
- /*
- If error, the bit for this
- logical drive won't be set and
- idastubopen will return error.
- */
- if (ret_code != IO_ERROR) {
- drv = &info_p->drv[log_unit];
- drv->blk_size = id_ldrive->blk_size;
- drv->nr_blks = id_ldrive->nr_blks;
- drv->cylinders = id_ldrive->drv.cyl;
- drv->heads = id_ldrive->drv.heads;
- drv->sectors = id_ldrive->drv.sect_per_track;
- info_p->log_drv_map |= (1 << log_unit);
-
- printk(KERN_INFO "cpqarray ida/c%dd%d: blksz=%d nr_blks=%d\n",
- ctlr, log_unit, drv->blk_size, drv->nr_blks);
- ret_code = sendcmd(SENSE_CONFIG,
- ctlr, sense_config_buf,
- sizeof(config_t), 0, 0, log_unit);
- if (ret_code == IO_ERROR) {
- info_p->log_drv_map = 0;
- printk(KERN_ERR "cpqarray: error sending sense config\n");
- goto err_4;
- }
-
- info_p->phys_drives =
- sense_config_buf->ctlr_phys_drv;
- info_p->drv_assign_map
- |= sense_config_buf->drv_asgn_map;
- info_p->drv_assign_map
- |= sense_config_buf->spare_asgn_map;
- info_p->drv_spare_map
- |= sense_config_buf->spare_asgn_map;
- } /* end of if no error on id_ldrive */
- log_index = log_index + 1;
- } /* end of if logical drive configured */
- } /* end of for log_unit */
-
- /* Free all the buffers and return */
-err_4:
- kfree(sense_config_buf);
-err_3:
- kfree(id_lstatus_buf);
-err_2:
- kfree(id_ctlr_buf);
-err_1:
- kfree(id_ldrive);
-err_0:
- return;
-}
-
-static void __exit cpqarray_exit(void)
-{
- int i;
-
- pci_unregister_driver(&cpqarray_pci_driver);
-
- /* Double check that all controller entries have been removed */
- for(i=0; i<MAX_CTLR; i++) {
- if (hba[i] != NULL) {
- printk(KERN_WARNING "cpqarray: Removing EISA "
- "controller %d\n", i);
- cpqarray_remove_one_eisa(i);
- }
- }
-
- remove_proc_entry("driver/cpqarray", NULL);
-}
-
-module_init(cpqarray_init)
-module_exit(cpqarray_exit)
diff --git a/drivers/block/cpqarray.h b/drivers/block/cpqarray.h
deleted file mode 100644
index be73e9d579c5..000000000000
--- a/drivers/block/cpqarray.h
+++ /dev/null
@@ -1,126 +0,0 @@
-/*
- * Disk Array driver for Compaq SMART2 Controllers
- * Copyright 1998 Compaq Computer Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- * Questions/Comments/Bugfixes to iss_storagedev@hp.com
- *
- * If you want to make changes, improve or add functionality to this
- * driver, you'll probably need the Compaq Array Controller Interface
- * Specificiation (Document number ECG086/1198)
- */
-#ifndef CPQARRAY_H
-#define CPQARRAY_H
-
-#ifdef __KERNEL__
-#include <linux/blkdev.h>
-#include <linux/slab.h>
-#include <linux/proc_fs.h>
-#include <linux/timer.h>
-#endif
-
-#include "ida_cmd.h"
-
-#define IO_OK 0
-#define IO_ERROR 1
-#define NWD 16
-#define NWD_SHIFT 4
-
-#define IDA_TIMER (5*HZ)
-#define IDA_TIMEOUT (10*HZ)
-
-#define MISC_NONFATAL_WARN 0x01
-
-typedef struct {
- unsigned blk_size;
- unsigned nr_blks;
- unsigned cylinders;
- unsigned heads;
- unsigned sectors;
- int usage_count;
-} drv_info_t;
-
-#ifdef __KERNEL__
-
-struct ctlr_info;
-typedef struct ctlr_info ctlr_info_t;
-
-struct access_method {
- void (*submit_command)(ctlr_info_t *h, cmdlist_t *c);
- void (*set_intr_mask)(ctlr_info_t *h, unsigned long val);
- unsigned long (*fifo_full)(ctlr_info_t *h);
- unsigned long (*intr_pending)(ctlr_info_t *h);
- unsigned long (*command_completed)(ctlr_info_t *h);
-};
-
-struct board_type {
- __u32 board_id;
- char *product_name;
- struct access_method *access;
-};
-
-struct ctlr_info {
- int ctlr;
- char devname[8];
- __u32 log_drv_map;
- __u32 drv_assign_map;
- __u32 drv_spare_map;
- __u32 mp_failed_drv_map;
-
- char firm_rev[4];
- int ctlr_sig;
-
- int log_drives;
- int phys_drives;
-
- struct pci_dev *pci_dev; /* NULL if EISA */
- __u32 board_id;
- char *product_name;
-
- void __iomem *vaddr;
- unsigned long paddr;
- unsigned long io_mem_addr;
- unsigned long io_mem_length;
- int intr;
- int usage_count;
- drv_info_t drv[NWD];
- struct proc_dir_entry *proc;
-
- struct access_method access;
-
- cmdlist_t *reqQ;
- cmdlist_t *cmpQ;
- cmdlist_t *cmd_pool;
- dma_addr_t cmd_pool_dhandle;
- unsigned long *cmd_pool_bits;
- struct request_queue *queue;
- spinlock_t lock;
-
- unsigned int Qdepth;
- unsigned int maxQsinceinit;
-
- unsigned int nr_requests;
- unsigned int nr_allocs;
- unsigned int nr_frees;
- struct timer_list timer;
- unsigned int misc_tflags;
-};
-
-#define IDA_LOCK(i) (&hba[i]->lock)
-
-#endif
-
-#endif /* CPQARRAY_H */
diff --git a/drivers/block/cryptoloop.c b/drivers/block/cryptoloop.c
index 99e773cb70d0..3d31761c0ed0 100644
--- a/drivers/block/cryptoloop.c
+++ b/drivers/block/cryptoloop.c
@@ -21,9 +21,9 @@
#include <linux/module.h>
+#include <crypto/skcipher.h>
#include <linux/init.h>
#include <linux/string.h>
-#include <linux/crypto.h>
#include <linux/blkdev.h>
#include <linux/scatterlist.h>
#include <asm/uaccess.h>
@@ -46,7 +46,7 @@ cryptoloop_init(struct loop_device *lo, const struct loop_info64 *info)
char *cipher;
char *mode;
char *cmsp = cms; /* c-m string pointer */
- struct crypto_blkcipher *tfm;
+ struct crypto_skcipher *tfm;
/* encryption breaks for non sector aligned offsets */
@@ -82,12 +82,12 @@ cryptoloop_init(struct loop_device *lo, const struct loop_info64 *info)
*cmsp++ = ')';
*cmsp = 0;
- tfm = crypto_alloc_blkcipher(cms, 0, CRYPTO_ALG_ASYNC);
+ tfm = crypto_alloc_skcipher(cms, 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(tfm))
return PTR_ERR(tfm);
- err = crypto_blkcipher_setkey(tfm, info->lo_encrypt_key,
- info->lo_encrypt_key_size);
+ err = crypto_skcipher_setkey(tfm, info->lo_encrypt_key,
+ info->lo_encrypt_key_size);
if (err != 0)
goto out_free_tfm;
@@ -96,17 +96,14 @@ cryptoloop_init(struct loop_device *lo, const struct loop_info64 *info)
return 0;
out_free_tfm:
- crypto_free_blkcipher(tfm);
+ crypto_free_skcipher(tfm);
out:
return err;
}
-typedef int (*encdec_cbc_t)(struct blkcipher_desc *desc,
- struct scatterlist *sg_out,
- struct scatterlist *sg_in,
- unsigned int nsg);
+typedef int (*encdec_cbc_t)(struct skcipher_request *req);
static int
cryptoloop_transfer(struct loop_device *lo, int cmd,
@@ -114,11 +111,8 @@ cryptoloop_transfer(struct loop_device *lo, int cmd,
struct page *loop_page, unsigned loop_off,
int size, sector_t IV)
{
- struct crypto_blkcipher *tfm = lo->key_data;
- struct blkcipher_desc desc = {
- .tfm = tfm,
- .flags = CRYPTO_TFM_REQ_MAY_SLEEP,
- };
+ struct crypto_skcipher *tfm = lo->key_data;
+ SKCIPHER_REQUEST_ON_STACK(req, tfm);
struct scatterlist sg_out;
struct scatterlist sg_in;
@@ -127,6 +121,10 @@ cryptoloop_transfer(struct loop_device *lo, int cmd,
unsigned in_offs, out_offs;
int err;
+ skcipher_request_set_tfm(req, tfm);
+ skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP,
+ NULL, NULL);
+
sg_init_table(&sg_out, 1);
sg_init_table(&sg_in, 1);
@@ -135,13 +133,13 @@ cryptoloop_transfer(struct loop_device *lo, int cmd,
in_offs = raw_off;
out_page = loop_page;
out_offs = loop_off;
- encdecfunc = crypto_blkcipher_crt(tfm)->decrypt;
+ encdecfunc = crypto_skcipher_decrypt;
} else {
in_page = loop_page;
in_offs = loop_off;
out_page = raw_page;
out_offs = raw_off;
- encdecfunc = crypto_blkcipher_crt(tfm)->encrypt;
+ encdecfunc = crypto_skcipher_encrypt;
}
while (size > 0) {
@@ -152,10 +150,10 @@ cryptoloop_transfer(struct loop_device *lo, int cmd,
sg_set_page(&sg_in, in_page, sz, in_offs);
sg_set_page(&sg_out, out_page, sz, out_offs);
- desc.info = iv;
- err = encdecfunc(&desc, &sg_out, &sg_in, sz);
+ skcipher_request_set_crypt(req, &sg_in, &sg_out, sz, iv);
+ err = encdecfunc(req);
if (err)
- return err;
+ goto out;
IV++;
size -= sz;
@@ -163,7 +161,11 @@ cryptoloop_transfer(struct loop_device *lo, int cmd,
out_offs += sz;
}
- return 0;
+ err = 0;
+
+out:
+ skcipher_request_zero(req);
+ return err;
}
static int
@@ -175,9 +177,9 @@ cryptoloop_ioctl(struct loop_device *lo, int cmd, unsigned long arg)
static int
cryptoloop_release(struct loop_device *lo)
{
- struct crypto_blkcipher *tfm = lo->key_data;
+ struct crypto_skcipher *tfm = lo->key_data;
if (tfm != NULL) {
- crypto_free_blkcipher(tfm);
+ crypto_free_skcipher(tfm);
lo->key_data = NULL;
return 0;
}
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
index 34bc84efc29e..7a1cf7eaa71d 100644
--- a/drivers/block/drbd/drbd_int.h
+++ b/drivers/block/drbd/drbd_int.h
@@ -26,13 +26,13 @@
#ifndef _DRBD_INT_H
#define _DRBD_INT_H
+#include <crypto/hash.h>
#include <linux/compiler.h>
#include <linux/types.h>
#include <linux/list.h>
#include <linux/sched.h>
#include <linux/bitops.h>
#include <linux/slab.h>
-#include <linux/crypto.h>
#include <linux/ratelimit.h>
#include <linux/tcp.h>
#include <linux/mutex.h>
@@ -724,11 +724,11 @@ struct drbd_connection {
struct list_head transfer_log; /* all requests not yet fully processed */
- struct crypto_hash *cram_hmac_tfm;
- struct crypto_hash *integrity_tfm; /* checksums we compute, updates protected by connection->data->mutex */
- struct crypto_hash *peer_integrity_tfm; /* checksums we verify, only accessed from receiver thread */
- struct crypto_hash *csums_tfm;
- struct crypto_hash *verify_tfm;
+ struct crypto_shash *cram_hmac_tfm;
+ struct crypto_ahash *integrity_tfm; /* checksums we compute, updates protected by connection->data->mutex */
+ struct crypto_ahash *peer_integrity_tfm; /* checksums we verify, only accessed from receiver thread */
+ struct crypto_ahash *csums_tfm;
+ struct crypto_ahash *verify_tfm;
void *int_dig_in;
void *int_dig_vv;
@@ -1327,8 +1327,8 @@ struct bm_extent {
#endif
#endif
-/* BIO_MAX_SIZE is 256 * PAGE_CACHE_SIZE,
- * so for typical PAGE_CACHE_SIZE of 4k, that is (1<<20) Byte.
+/* BIO_MAX_SIZE is 256 * PAGE_SIZE,
+ * so for typical PAGE_SIZE of 4k, that is (1<<20) Byte.
* Since we may live in a mixed-platform cluster,
* we limit us to a platform agnostic constant here for now.
* A followup commit may allow even bigger BIO sizes,
@@ -1524,8 +1524,8 @@ static inline void ov_out_of_sync_print(struct drbd_device *device)
}
-extern void drbd_csum_bio(struct crypto_hash *, struct bio *, void *);
-extern void drbd_csum_ee(struct crypto_hash *, struct drbd_peer_request *, void *);
+extern void drbd_csum_bio(struct crypto_ahash *, struct bio *, void *);
+extern void drbd_csum_ee(struct crypto_ahash *, struct drbd_peer_request *, void *);
/* worker callbacks */
extern int w_e_end_data_req(struct drbd_work *, int);
extern int w_e_end_rsdata_req(struct drbd_work *, int);
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index 5b43dfb79819..fa209773d494 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -1340,7 +1340,7 @@ void drbd_send_ack_dp(struct drbd_peer_device *peer_device, enum drbd_packet cmd
struct p_data *dp, int data_size)
{
if (peer_device->connection->peer_integrity_tfm)
- data_size -= crypto_hash_digestsize(peer_device->connection->peer_integrity_tfm);
+ data_size -= crypto_ahash_digestsize(peer_device->connection->peer_integrity_tfm);
_drbd_send_ack(peer_device, cmd, dp->sector, cpu_to_be32(data_size),
dp->block_id);
}
@@ -1629,7 +1629,7 @@ int drbd_send_dblock(struct drbd_peer_device *peer_device, struct drbd_request *
sock = &peer_device->connection->data;
p = drbd_prepare_command(peer_device, sock);
digest_size = peer_device->connection->integrity_tfm ?
- crypto_hash_digestsize(peer_device->connection->integrity_tfm) : 0;
+ crypto_ahash_digestsize(peer_device->connection->integrity_tfm) : 0;
if (!p)
return -EIO;
@@ -1718,7 +1718,7 @@ int drbd_send_block(struct drbd_peer_device *peer_device, enum drbd_packet cmd,
p = drbd_prepare_command(peer_device, sock);
digest_size = peer_device->connection->integrity_tfm ?
- crypto_hash_digestsize(peer_device->connection->integrity_tfm) : 0;
+ crypto_ahash_digestsize(peer_device->connection->integrity_tfm) : 0;
if (!p)
return -EIO;
@@ -2498,11 +2498,11 @@ void conn_free_crypto(struct drbd_connection *connection)
{
drbd_free_sock(connection);
- crypto_free_hash(connection->csums_tfm);
- crypto_free_hash(connection->verify_tfm);
- crypto_free_hash(connection->cram_hmac_tfm);
- crypto_free_hash(connection->integrity_tfm);
- crypto_free_hash(connection->peer_integrity_tfm);
+ crypto_free_ahash(connection->csums_tfm);
+ crypto_free_ahash(connection->verify_tfm);
+ crypto_free_shash(connection->cram_hmac_tfm);
+ crypto_free_ahash(connection->integrity_tfm);
+ crypto_free_ahash(connection->peer_integrity_tfm);
kfree(connection->int_dig_in);
kfree(connection->int_dig_vv);
diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
index c055c5e12f24..1fd1dccebb6b 100644
--- a/drivers/block/drbd/drbd_nl.c
+++ b/drivers/block/drbd/drbd_nl.c
@@ -1178,7 +1178,7 @@ static void drbd_setup_queue_param(struct drbd_device *device, struct drbd_backi
blk_queue_max_hw_sectors(q, max_hw_sectors);
/* This is the workaround for "bio would need to, but cannot, be split" */
blk_queue_max_segments(q, max_segments ? max_segments : BLK_MAX_SEGMENTS);
- blk_queue_segment_boundary(q, PAGE_CACHE_SIZE-1);
+ blk_queue_segment_boundary(q, PAGE_SIZE-1);
if (b) {
struct drbd_connection *connection = first_peer_device(device)->connection;
@@ -2160,19 +2160,34 @@ check_net_options(struct drbd_connection *connection, struct net_conf *new_net_c
}
struct crypto {
- struct crypto_hash *verify_tfm;
- struct crypto_hash *csums_tfm;
- struct crypto_hash *cram_hmac_tfm;
- struct crypto_hash *integrity_tfm;
+ struct crypto_ahash *verify_tfm;
+ struct crypto_ahash *csums_tfm;
+ struct crypto_shash *cram_hmac_tfm;
+ struct crypto_ahash *integrity_tfm;
};
static int
-alloc_hash(struct crypto_hash **tfm, char *tfm_name, int err_alg)
+alloc_shash(struct crypto_shash **tfm, char *tfm_name, int err_alg)
{
if (!tfm_name[0])
return NO_ERROR;
- *tfm = crypto_alloc_hash(tfm_name, 0, CRYPTO_ALG_ASYNC);
+ *tfm = crypto_alloc_shash(tfm_name, 0, 0);
+ if (IS_ERR(*tfm)) {
+ *tfm = NULL;
+ return err_alg;
+ }
+
+ return NO_ERROR;
+}
+
+static int
+alloc_ahash(struct crypto_ahash **tfm, char *tfm_name, int err_alg)
+{
+ if (!tfm_name[0])
+ return NO_ERROR;
+
+ *tfm = crypto_alloc_ahash(tfm_name, 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(*tfm)) {
*tfm = NULL;
return err_alg;
@@ -2187,24 +2202,24 @@ alloc_crypto(struct crypto *crypto, struct net_conf *new_net_conf)
char hmac_name[CRYPTO_MAX_ALG_NAME];
enum drbd_ret_code rv;
- rv = alloc_hash(&crypto->csums_tfm, new_net_conf->csums_alg,
- ERR_CSUMS_ALG);
+ rv = alloc_ahash(&crypto->csums_tfm, new_net_conf->csums_alg,
+ ERR_CSUMS_ALG);
if (rv != NO_ERROR)
return rv;
- rv = alloc_hash(&crypto->verify_tfm, new_net_conf->verify_alg,
- ERR_VERIFY_ALG);
+ rv = alloc_ahash(&crypto->verify_tfm, new_net_conf->verify_alg,
+ ERR_VERIFY_ALG);
if (rv != NO_ERROR)
return rv;
- rv = alloc_hash(&crypto->integrity_tfm, new_net_conf->integrity_alg,
- ERR_INTEGRITY_ALG);
+ rv = alloc_ahash(&crypto->integrity_tfm, new_net_conf->integrity_alg,
+ ERR_INTEGRITY_ALG);
if (rv != NO_ERROR)
return rv;
if (new_net_conf->cram_hmac_alg[0] != 0) {
snprintf(hmac_name, CRYPTO_MAX_ALG_NAME, "hmac(%s)",
new_net_conf->cram_hmac_alg);
- rv = alloc_hash(&crypto->cram_hmac_tfm, hmac_name,
- ERR_AUTH_ALG);
+ rv = alloc_shash(&crypto->cram_hmac_tfm, hmac_name,
+ ERR_AUTH_ALG);
}
return rv;
@@ -2212,10 +2227,10 @@ alloc_crypto(struct crypto *crypto, struct net_conf *new_net_conf)
static void free_crypto(struct crypto *crypto)
{
- crypto_free_hash(crypto->cram_hmac_tfm);
- crypto_free_hash(crypto->integrity_tfm);
- crypto_free_hash(crypto->csums_tfm);
- crypto_free_hash(crypto->verify_tfm);
+ crypto_free_shash(crypto->cram_hmac_tfm);
+ crypto_free_ahash(crypto->integrity_tfm);
+ crypto_free_ahash(crypto->csums_tfm);
+ crypto_free_ahash(crypto->verify_tfm);
}
int drbd_adm_net_opts(struct sk_buff *skb, struct genl_info *info)
@@ -2292,23 +2307,23 @@ int drbd_adm_net_opts(struct sk_buff *skb, struct genl_info *info)
rcu_assign_pointer(connection->net_conf, new_net_conf);
if (!rsr) {
- crypto_free_hash(connection->csums_tfm);
+ crypto_free_ahash(connection->csums_tfm);
connection->csums_tfm = crypto.csums_tfm;
crypto.csums_tfm = NULL;
}
if (!ovr) {
- crypto_free_hash(connection->verify_tfm);
+ crypto_free_ahash(connection->verify_tfm);
connection->verify_tfm = crypto.verify_tfm;
crypto.verify_tfm = NULL;
}
- crypto_free_hash(connection->integrity_tfm);
+ crypto_free_ahash(connection->integrity_tfm);
connection->integrity_tfm = crypto.integrity_tfm;
if (connection->cstate >= C_WF_REPORT_PARAMS && connection->agreed_pro_version >= 100)
/* Do this without trying to take connection->data.mutex again. */
__drbd_send_protocol(connection, P_PROTOCOL_UPDATE);
- crypto_free_hash(connection->cram_hmac_tfm);
+ crypto_free_shash(connection->cram_hmac_tfm);
connection->cram_hmac_tfm = crypto.cram_hmac_tfm;
mutex_unlock(&connection->resource->conf_update);
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index 1957fe8601dc..050aaa1c0350 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -1627,7 +1627,7 @@ read_in_block(struct drbd_peer_device *peer_device, u64 id, sector_t sector,
digest_size = 0;
if (!trim && peer_device->connection->peer_integrity_tfm) {
- digest_size = crypto_hash_digestsize(peer_device->connection->peer_integrity_tfm);
+ digest_size = crypto_ahash_digestsize(peer_device->connection->peer_integrity_tfm);
/*
* FIXME: Receive the incoming digest into the receive buffer
* here, together with its struct p_data?
@@ -1741,7 +1741,7 @@ static int recv_dless_read(struct drbd_peer_device *peer_device, struct drbd_req
digest_size = 0;
if (peer_device->connection->peer_integrity_tfm) {
- digest_size = crypto_hash_digestsize(peer_device->connection->peer_integrity_tfm);
+ digest_size = crypto_ahash_digestsize(peer_device->connection->peer_integrity_tfm);
err = drbd_recv_all_warn(peer_device->connection, dig_in, digest_size);
if (err)
return err;
@@ -3321,7 +3321,7 @@ static int receive_protocol(struct drbd_connection *connection, struct packet_in
int p_proto, p_discard_my_data, p_two_primaries, cf;
struct net_conf *nc, *old_net_conf, *new_net_conf = NULL;
char integrity_alg[SHARED_SECRET_MAX] = "";
- struct crypto_hash *peer_integrity_tfm = NULL;
+ struct crypto_ahash *peer_integrity_tfm = NULL;
void *int_dig_in = NULL, *int_dig_vv = NULL;
p_proto = be32_to_cpu(p->protocol);
@@ -3402,14 +3402,14 @@ static int receive_protocol(struct drbd_connection *connection, struct packet_in
* change.
*/
- peer_integrity_tfm = crypto_alloc_hash(integrity_alg, 0, CRYPTO_ALG_ASYNC);
+ peer_integrity_tfm = crypto_alloc_ahash(integrity_alg, 0, CRYPTO_ALG_ASYNC);
if (!peer_integrity_tfm) {
drbd_err(connection, "peer data-integrity-alg %s not supported\n",
integrity_alg);
goto disconnect;
}
- hash_size = crypto_hash_digestsize(peer_integrity_tfm);
+ hash_size = crypto_ahash_digestsize(peer_integrity_tfm);
int_dig_in = kmalloc(hash_size, GFP_KERNEL);
int_dig_vv = kmalloc(hash_size, GFP_KERNEL);
if (!(int_dig_in && int_dig_vv)) {
@@ -3439,7 +3439,7 @@ static int receive_protocol(struct drbd_connection *connection, struct packet_in
mutex_unlock(&connection->resource->conf_update);
mutex_unlock(&connection->data.mutex);
- crypto_free_hash(connection->peer_integrity_tfm);
+ crypto_free_ahash(connection->peer_integrity_tfm);
kfree(connection->int_dig_in);
kfree(connection->int_dig_vv);
connection->peer_integrity_tfm = peer_integrity_tfm;
@@ -3457,7 +3457,7 @@ static int receive_protocol(struct drbd_connection *connection, struct packet_in
disconnect_rcu_unlock:
rcu_read_unlock();
disconnect:
- crypto_free_hash(peer_integrity_tfm);
+ crypto_free_ahash(peer_integrity_tfm);
kfree(int_dig_in);
kfree(int_dig_vv);
conn_request_state(connection, NS(conn, C_DISCONNECTING), CS_HARD);
@@ -3469,15 +3469,15 @@ disconnect:
* return: NULL (alg name was "")
* ERR_PTR(error) if something goes wrong
* or the crypto hash ptr, if it worked out ok. */
-static struct crypto_hash *drbd_crypto_alloc_digest_safe(const struct drbd_device *device,
+static struct crypto_ahash *drbd_crypto_alloc_digest_safe(const struct drbd_device *device,
const char *alg, const char *name)
{
- struct crypto_hash *tfm;
+ struct crypto_ahash *tfm;
if (!alg[0])
return NULL;
- tfm = crypto_alloc_hash(alg, 0, CRYPTO_ALG_ASYNC);
+ tfm = crypto_alloc_ahash(alg, 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(tfm)) {
drbd_err(device, "Can not allocate \"%s\" as %s (reason: %ld)\n",
alg, name, PTR_ERR(tfm));
@@ -3530,8 +3530,8 @@ static int receive_SyncParam(struct drbd_connection *connection, struct packet_i
struct drbd_device *device;
struct p_rs_param_95 *p;
unsigned int header_size, data_size, exp_max_sz;
- struct crypto_hash *verify_tfm = NULL;
- struct crypto_hash *csums_tfm = NULL;
+ struct crypto_ahash *verify_tfm = NULL;
+ struct crypto_ahash *csums_tfm = NULL;
struct net_conf *old_net_conf, *new_net_conf = NULL;
struct disk_conf *old_disk_conf = NULL, *new_disk_conf = NULL;
const int apv = connection->agreed_pro_version;
@@ -3678,14 +3678,14 @@ static int receive_SyncParam(struct drbd_connection *connection, struct packet_i
if (verify_tfm) {
strcpy(new_net_conf->verify_alg, p->verify_alg);
new_net_conf->verify_alg_len = strlen(p->verify_alg) + 1;
- crypto_free_hash(peer_device->connection->verify_tfm);
+ crypto_free_ahash(peer_device->connection->verify_tfm);
peer_device->connection->verify_tfm = verify_tfm;
drbd_info(device, "using verify-alg: \"%s\"\n", p->verify_alg);
}
if (csums_tfm) {
strcpy(new_net_conf->csums_alg, p->csums_alg);
new_net_conf->csums_alg_len = strlen(p->csums_alg) + 1;
- crypto_free_hash(peer_device->connection->csums_tfm);
+ crypto_free_ahash(peer_device->connection->csums_tfm);
peer_device->connection->csums_tfm = csums_tfm;
drbd_info(device, "using csums-alg: \"%s\"\n", p->csums_alg);
}
@@ -3729,9 +3729,9 @@ disconnect:
mutex_unlock(&connection->resource->conf_update);
/* just for completeness: actually not needed,
* as this is not reached if csums_tfm was ok. */
- crypto_free_hash(csums_tfm);
+ crypto_free_ahash(csums_tfm);
/* but free the verify_tfm again, if csums_tfm did not work out */
- crypto_free_hash(verify_tfm);
+ crypto_free_ahash(verify_tfm);
conn_request_state(peer_device->connection, NS(conn, C_DISCONNECTING), CS_HARD);
return -EIO;
}
@@ -4925,14 +4925,13 @@ static int drbd_do_auth(struct drbd_connection *connection)
{
struct drbd_socket *sock;
char my_challenge[CHALLENGE_LEN]; /* 64 Bytes... */
- struct scatterlist sg;
char *response = NULL;
char *right_response = NULL;
char *peers_ch = NULL;
unsigned int key_len;
char secret[SHARED_SECRET_MAX]; /* 64 byte */
unsigned int resp_size;
- struct hash_desc desc;
+ SHASH_DESC_ON_STACK(desc, connection->cram_hmac_tfm);
struct packet_info pi;
struct net_conf *nc;
int err, rv;
@@ -4945,12 +4944,12 @@ static int drbd_do_auth(struct drbd_connection *connection)
memcpy(secret, nc->shared_secret, key_len);
rcu_read_unlock();
- desc.tfm = connection->cram_hmac_tfm;
- desc.flags = 0;
+ desc->tfm = connection->cram_hmac_tfm;
+ desc->flags = 0;
- rv = crypto_hash_setkey(connection->cram_hmac_tfm, (u8 *)secret, key_len);
+ rv = crypto_shash_setkey(connection->cram_hmac_tfm, (u8 *)secret, key_len);
if (rv) {
- drbd_err(connection, "crypto_hash_setkey() failed with %d\n", rv);
+ drbd_err(connection, "crypto_shash_setkey() failed with %d\n", rv);
rv = -1;
goto fail;
}
@@ -5011,7 +5010,7 @@ static int drbd_do_auth(struct drbd_connection *connection)
goto fail;
}
- resp_size = crypto_hash_digestsize(connection->cram_hmac_tfm);
+ resp_size = crypto_shash_digestsize(connection->cram_hmac_tfm);
response = kmalloc(resp_size, GFP_NOIO);
if (response == NULL) {
drbd_err(connection, "kmalloc of response failed\n");
@@ -5019,10 +5018,7 @@ static int drbd_do_auth(struct drbd_connection *connection)
goto fail;
}
- sg_init_table(&sg, 1);
- sg_set_buf(&sg, peers_ch, pi.size);
-
- rv = crypto_hash_digest(&desc, &sg, sg.length, response);
+ rv = crypto_shash_digest(desc, peers_ch, pi.size, response);
if (rv) {
drbd_err(connection, "crypto_hash_digest() failed with %d\n", rv);
rv = -1;
@@ -5070,9 +5066,8 @@ static int drbd_do_auth(struct drbd_connection *connection)
goto fail;
}
- sg_set_buf(&sg, my_challenge, CHALLENGE_LEN);
-
- rv = crypto_hash_digest(&desc, &sg, sg.length, right_response);
+ rv = crypto_shash_digest(desc, my_challenge, CHALLENGE_LEN,
+ right_response);
if (rv) {
drbd_err(connection, "crypto_hash_digest() failed with %d\n", rv);
rv = -1;
@@ -5091,6 +5086,7 @@ static int drbd_do_auth(struct drbd_connection *connection)
kfree(peers_ch);
kfree(response);
kfree(right_response);
+ shash_desc_zero(desc);
return rv;
}
diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c
index eff716c27b1f..4d87499f0d54 100644
--- a/drivers/block/drbd/drbd_worker.c
+++ b/drivers/block/drbd/drbd_worker.c
@@ -274,51 +274,56 @@ void drbd_request_endio(struct bio *bio)
complete_master_bio(device, &m);
}
-void drbd_csum_ee(struct crypto_hash *tfm, struct drbd_peer_request *peer_req, void *digest)
+void drbd_csum_ee(struct crypto_ahash *tfm, struct drbd_peer_request *peer_req, void *digest)
{
- struct hash_desc desc;
+ AHASH_REQUEST_ON_STACK(req, tfm);
struct scatterlist sg;
struct page *page = peer_req->pages;
struct page *tmp;
unsigned len;
- desc.tfm = tfm;
- desc.flags = 0;
+ ahash_request_set_tfm(req, tfm);
+ ahash_request_set_callback(req, 0, NULL, NULL);
sg_init_table(&sg, 1);
- crypto_hash_init(&desc);
+ crypto_ahash_init(req);
while ((tmp = page_chain_next(page))) {
/* all but the last page will be fully used */
sg_set_page(&sg, page, PAGE_SIZE, 0);
- crypto_hash_update(&desc, &sg, sg.length);
+ ahash_request_set_crypt(req, &sg, NULL, sg.length);
+ crypto_ahash_update(req);
page = tmp;
}
/* and now the last, possibly only partially used page */
len = peer_req->i.size & (PAGE_SIZE - 1);
sg_set_page(&sg, page, len ?: PAGE_SIZE, 0);
- crypto_hash_update(&desc, &sg, sg.length);
- crypto_hash_final(&desc, digest);
+ ahash_request_set_crypt(req, &sg, digest, sg.length);
+ crypto_ahash_finup(req);
+ ahash_request_zero(req);
}
-void drbd_csum_bio(struct crypto_hash *tfm, struct bio *bio, void *digest)
+void drbd_csum_bio(struct crypto_ahash *tfm, struct bio *bio, void *digest)
{
- struct hash_desc desc;
+ AHASH_REQUEST_ON_STACK(req, tfm);
struct scatterlist sg;
struct bio_vec bvec;
struct bvec_iter iter;
- desc.tfm = tfm;
- desc.flags = 0;
+ ahash_request_set_tfm(req, tfm);
+ ahash_request_set_callback(req, 0, NULL, NULL);
sg_init_table(&sg, 1);
- crypto_hash_init(&desc);
+ crypto_ahash_init(req);
bio_for_each_segment(bvec, bio, iter) {
sg_set_page(&sg, bvec.bv_page, bvec.bv_len, bvec.bv_offset);
- crypto_hash_update(&desc, &sg, sg.length);
+ ahash_request_set_crypt(req, &sg, NULL, sg.length);
+ crypto_ahash_update(req);
}
- crypto_hash_final(&desc, digest);
+ ahash_request_set_crypt(req, NULL, digest, 0);
+ crypto_ahash_final(req);
+ ahash_request_zero(req);
}
/* MAYBE merge common code with w_e_end_ov_req */
@@ -337,7 +342,7 @@ static int w_e_send_csum(struct drbd_work *w, int cancel)
if (unlikely((peer_req->flags & EE_WAS_ERROR) != 0))
goto out;
- digest_size = crypto_hash_digestsize(peer_device->connection->csums_tfm);
+ digest_size = crypto_ahash_digestsize(peer_device->connection->csums_tfm);
digest = kmalloc(digest_size, GFP_NOIO);
if (digest) {
sector_t sector = peer_req->i.sector;
@@ -1113,7 +1118,7 @@ int w_e_end_csum_rs_req(struct drbd_work *w, int cancel)
* a real fix would be much more involved,
* introducing more locking mechanisms */
if (peer_device->connection->csums_tfm) {
- digest_size = crypto_hash_digestsize(peer_device->connection->csums_tfm);
+ digest_size = crypto_ahash_digestsize(peer_device->connection->csums_tfm);
D_ASSERT(device, digest_size == di->digest_size);
digest = kmalloc(digest_size, GFP_NOIO);
}
@@ -1163,7 +1168,7 @@ int w_e_end_ov_req(struct drbd_work *w, int cancel)
if (unlikely(cancel))
goto out;
- digest_size = crypto_hash_digestsize(peer_device->connection->verify_tfm);
+ digest_size = crypto_ahash_digestsize(peer_device->connection->verify_tfm);
digest = kmalloc(digest_size, GFP_NOIO);
if (!digest) {
err = 1; /* terminate the connection in case the allocation failed */
@@ -1235,7 +1240,7 @@ int w_e_end_ov_reply(struct drbd_work *w, int cancel)
di = peer_req->digest;
if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) {
- digest_size = crypto_hash_digestsize(peer_device->connection->verify_tfm);
+ digest_size = crypto_ahash_digestsize(peer_device->connection->verify_tfm);
digest = kmalloc(digest_size, GFP_NOIO);
if (digest) {
drbd_csum_ee(peer_device->connection->verify_tfm, peer_req, digest);
diff --git a/drivers/block/ida_cmd.h b/drivers/block/ida_cmd.h
deleted file mode 100644
index 98b5746b3089..000000000000
--- a/drivers/block/ida_cmd.h
+++ /dev/null
@@ -1,349 +0,0 @@
-/*
- * Disk Array driver for Compaq SMART2 Controllers
- * Copyright 1998 Compaq Computer Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- * Questions/Comments/Bugfixes to iss_storagedev@hp.com
- *
- */
-#ifndef ARRAYCMD_H
-#define ARRAYCMD_H
-
-#include <asm/types.h>
-#if 0
-#include <linux/blkdev.h>
-#endif
-
-/* for the Smart Array 42XX cards */
-#define S42XX_REQUEST_PORT_OFFSET 0x40
-#define S42XX_REPLY_INTR_MASK_OFFSET 0x34
-#define S42XX_REPLY_PORT_OFFSET 0x44
-#define S42XX_INTR_STATUS 0x30
-
-#define S42XX_INTR_OFF 0x08
-#define S42XX_INTR_PENDING 0x08
-
-#define COMMAND_FIFO 0x04
-#define COMMAND_COMPLETE_FIFO 0x08
-#define INTR_MASK 0x0C
-#define INTR_STATUS 0x10
-#define INTR_PENDING 0x14
-
-#define FIFO_NOT_EMPTY 0x01
-#define FIFO_NOT_FULL 0x02
-
-#define BIG_PROBLEM 0x40
-#define LOG_NOT_CONF 2
-
-#pragma pack(1)
-typedef struct {
- __u32 size;
- __u32 addr;
-} sg_t;
-
-#define RCODE_NONFATAL 0x02
-#define RCODE_FATAL 0x04
-#define RCODE_INVREQ 0x10
-typedef struct {
- __u16 next;
- __u8 cmd;
- __u8 rcode;
- __u32 blk;
- __u16 blk_cnt;
- __u8 sg_cnt;
- __u8 reserved;
-} rhdr_t;
-
-#define SG_MAX 32
-typedef struct {
- rhdr_t hdr;
- sg_t sg[SG_MAX];
- __u32 bp;
-} rblk_t;
-
-typedef struct {
- __u8 unit;
- __u8 prio;
- __u16 size;
-} chdr_t;
-
-#define CMD_RWREQ 0x00
-#define CMD_IOCTL_PEND 0x01
-#define CMD_IOCTL_DONE 0x02
-
-typedef struct cmdlist {
- chdr_t hdr;
- rblk_t req;
- __u32 size;
- int retry_cnt;
- __u32 busaddr;
- int ctlr;
- struct cmdlist *prev;
- struct cmdlist *next;
- struct request *rq;
- int type;
-} cmdlist_t;
-
-#define ID_CTLR 0x11
-typedef struct {
- __u8 nr_drvs;
- __u32 cfg_sig;
- __u8 firm_rev[4];
- __u8 rom_rev[4];
- __u8 hw_rev;
- __u32 bb_rev;
- __u32 drv_present_map;
- __u32 ext_drv_map;
- __u32 board_id;
- __u8 cfg_error;
- __u32 non_disk_bits;
- __u8 bad_ram_addr;
- __u8 cpu_rev;
- __u8 pdpi_rev;
- __u8 epic_rev;
- __u8 wcxc_rev;
- __u8 marketing_rev;
- __u8 ctlr_flags;
- __u8 host_flags;
- __u8 expand_dis;
- __u8 scsi_chips;
- __u32 max_req_blocks;
- __u32 ctlr_clock;
- __u8 drvs_per_bus;
- __u16 big_drv_present_map[8];
- __u16 big_ext_drv_map[8];
- __u16 big_non_disk_map[8];
- __u16 task_flags;
- __u8 icl_bus;
- __u8 red_modes;
- __u8 cur_red_mode;
- __u8 red_ctlr_stat;
- __u8 red_fail_reason;
- __u8 reserved[403];
-} id_ctlr_t;
-
-typedef struct {
- __u16 cyl;
- __u8 heads;
- __u8 xsig;
- __u8 psectors;
- __u16 wpre;
- __u8 maxecc;
- __u8 drv_ctrl;
- __u16 pcyls;
- __u8 pheads;
- __u16 landz;
- __u8 sect_per_track;
- __u8 cksum;
-} drv_param_t;
-
-#define ID_LOG_DRV 0x10
-typedef struct {
- __u16 blk_size;
- __u32 nr_blks;
- drv_param_t drv;
- __u8 fault_tol;
- __u8 reserved;
- __u8 bios_disable;
-} id_log_drv_t;
-
-#define ID_LOG_DRV_EXT 0x18
-typedef struct {
- __u32 log_drv_id;
- __u8 log_drv_label[64];
- __u8 reserved[418];
-} id_log_drv_ext_t;
-
-#define SENSE_LOG_DRV_STAT 0x12
-typedef struct {
- __u8 status;
- __u32 fail_map;
- __u16 read_err[32];
- __u16 write_err[32];
- __u8 drv_err_data[256];
- __u8 drq_timeout[32];
- __u32 blks_to_recover;
- __u8 drv_recovering;
- __u16 remap_cnt[32];
- __u32 replace_drv_map;
- __u32 act_spare_map;
- __u8 spare_stat;
- __u8 spare_repl_map[32];
- __u32 repl_ok_map;
- __u8 media_exch;
- __u8 cache_fail;
- __u8 expn_fail;
- __u8 unit_flags;
- __u16 big_fail_map[8];
- __u16 big_remap_map[128];
- __u16 big_repl_map[8];
- __u16 big_act_spare_map[8];
- __u8 big_spar_repl_map[128];
- __u16 big_repl_ok_map[8];
- __u8 big_drv_rebuild;
- __u8 reserved[36];
-} sense_log_drv_stat_t;
-
-#define START_RECOVER 0x13
-
-#define ID_PHYS_DRV 0x15
-typedef struct {
- __u8 scsi_bus;
- __u8 scsi_id;
- __u16 blk_size;
- __u32 nr_blks;
- __u32 rsvd_blks;
- __u8 drv_model[40];
- __u8 drv_sn[40];
- __u8 drv_fw[8];
- __u8 scsi_iq_bits;
- __u8 compaq_drv_stmp;
- __u8 last_fail;
- __u8 phys_drv_flags;
- __u8 phys_drv_flags1;
- __u8 scsi_lun;
- __u8 phys_drv_flags2;
- __u8 reserved;
- __u32 spi_speed_rules;
- __u8 phys_connector[2];
- __u8 phys_box_on_bus;
- __u8 phys_bay_in_box;
-} id_phys_drv_t;
-
-#define BLINK_DRV_LEDS 0x16
-typedef struct {
- __u32 blink_duration;
- __u32 reserved;
- __u8 blink[256];
- __u8 reserved1[248];
-} blink_drv_leds_t;
-
-#define SENSE_BLINK_LEDS 0x17
-typedef struct {
- __u32 blink_duration;
- __u32 btime_elap;
- __u8 blink[256];
- __u8 reserved1[248];
-} sense_blink_leds_t;
-
-#define IDA_READ 0x20
-#define IDA_WRITE 0x30
-#define IDA_WRITE_MEDIA 0x31
-#define RESET_TO_DIAG 0x40
-#define DIAG_PASS_THRU 0x41
-
-#define SENSE_CONFIG 0x50
-#define SET_CONFIG 0x51
-typedef struct {
- __u32 cfg_sig;
- __u16 compat_port;
- __u8 data_dist_mode;
- __u8 surf_an_ctrl;
- __u16 ctlr_phys_drv;
- __u16 log_unit_phys_drv;
- __u16 fault_tol_mode;
- __u8 phys_drv_param[16];
- drv_param_t drv;
- __u32 drv_asgn_map;
- __u16 dist_factor;
- __u32 spare_asgn_map;
- __u8 reserved[6];
- __u16 os;
- __u8 ctlr_order;
- __u8 extra_info;
- __u32 data_offs;
- __u8 parity_backedout_write_drvs;
- __u8 parity_dist_mode;
- __u8 parity_shift_fact;
- __u8 bios_disable_flag;
- __u32 blks_on_vol;
- __u32 blks_per_drv;
- __u8 scratch[16];
- __u16 big_drv_map[8];
- __u16 big_spare_map[8];
- __u8 ss_source_vol;
- __u8 mix_drv_cap_range;
- struct {
- __u16 big_drv_map[8];
- __u32 blks_per_drv;
- __u16 fault_tol_mode;
- __u16 dist_factor;
- } MDC_range[4];
- __u8 reserved1[248];
-} config_t;
-
-#define BYPASS_VOL_STATE 0x52
-#define SS_CREATE_VOL 0x53
-#define CHANGE_CONFIG 0x54
-#define SENSE_ORIG_CONF 0x55
-#define REORDER_LOG_DRV 0x56
-typedef struct {
- __u8 old_units[32];
-} reorder_log_drv_t;
-
-#define LABEL_LOG_DRV 0x57
-typedef struct {
- __u8 log_drv_label[64];
-} label_log_drv_t;
-
-#define SS_TO_VOL 0x58
-
-#define SET_SURF_DELAY 0x60
-typedef struct {
- __u16 delay;
- __u8 reserved[510];
-} surf_delay_t;
-
-#define SET_OVERHEAT_DELAY 0x61
-typedef struct {
- __u16 delay;
-} overhead_delay_t;
-
-#define SET_MP_DELAY
-typedef struct {
- __u16 delay;
- __u8 reserved[510];
-} mp_delay_t;
-
-#define PASSTHRU_A 0x91
-typedef struct {
- __u8 target;
- __u8 bus;
- __u8 lun;
- __u32 timeout;
- __u32 flags;
- __u8 status;
- __u8 error;
- __u8 cdb_len;
- __u8 sense_error;
- __u8 sense_key;
- __u32 sense_info;
- __u8 sense_code;
- __u8 sense_qual;
- __u32 residual;
- __u8 reserved[4];
- __u8 cdb[12];
-} scsi_param_t;
-
-#define RESUME_BACKGROUND_ACTIVITY 0x99
-#define SENSE_CONTROLLER_PERFORMANCE 0xa8
-#define FLUSH_CACHE 0xc2
-#define COLLECT_BUFFER 0xd2
-#define READ_FLASH_ROM 0xf6
-#define WRITE_FLASH_ROM 0xf7
-#pragma pack()
-
-#endif /* ARRAYCMD_H */
diff --git a/drivers/block/ida_ioctl.h b/drivers/block/ida_ioctl.h
deleted file mode 100644
index 888fff9caed0..000000000000
--- a/drivers/block/ida_ioctl.h
+++ /dev/null
@@ -1,87 +0,0 @@
-/*
- * Disk Array driver for Compaq SMART2 Controllers
- * Copyright 1998 Compaq Computer Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- * Questions/Comments/Bugfixes to iss_storagedev@hp.com
- *
- */
-#ifndef IDA_IOCTL_H
-#define IDA_IOCTL_H
-
-#include "ida_cmd.h"
-#include "cpqarray.h"
-
-#define IDAGETDRVINFO 0x27272828
-#define IDAPASSTHRU 0x28282929
-#define IDAGETCTLRSIG 0x29293030
-#define IDAREVALIDATEVOLS 0x30303131
-#define IDADRIVERVERSION 0x31313232
-#define IDAGETPCIINFO 0x32323333
-
-typedef struct _ida_pci_info_struct
-{
- unsigned char bus;
- unsigned char dev_fn;
- __u32 board_id;
-} ida_pci_info_struct;
-/*
- * Normally, the ioctl determines the logical unit for this command by
- * the major,minor number of the fd passed to ioctl. If you need to send
- * a command to a different/nonexistant unit (such as during config), you
- * can override the normal behavior by setting the unit valid bit. (Normally,
- * it should be zero) The controller the command is sent to is still
- * determined by the major number of the open device.
- */
-
-#define UNITVALID 0x80
-typedef struct {
- __u8 cmd;
- __u8 rcode;
- __u8 unit;
- __u32 blk;
- __u16 blk_cnt;
-
-/* currently, sg_cnt is assumed to be 1: only the 0th element of sg is used */
- struct {
- void __user *addr;
- size_t size;
- } sg[SG_MAX];
- int sg_cnt;
-
- union ctlr_cmds {
- drv_info_t drv;
- unsigned char buf[1024];
-
- id_ctlr_t id_ctlr;
- drv_param_t drv_param;
- id_log_drv_t id_log_drv;
- id_log_drv_ext_t id_log_drv_ext;
- sense_log_drv_stat_t sense_log_drv_stat;
- id_phys_drv_t id_phys_drv;
- blink_drv_leds_t blink_drv_leds;
- sense_blink_leds_t sense_blink_leds;
- config_t config;
- reorder_log_drv_t reorder_log_drv;
- label_log_drv_t label_log_drv;
- surf_delay_t surf_delay;
- overhead_delay_t overhead_delay;
- mp_delay_t mp_delay;
- scsi_param_t scsi_param;
- } c;
-} ida_ioctl_t;
-
-#endif /* IDA_IOCTL_H */
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 423f4ca7d712..80cf8add46ff 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -488,6 +488,12 @@ static int lo_rw_aio(struct loop_device *lo, struct loop_cmd *cmd,
bvec = __bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter);
iov_iter_bvec(&iter, ITER_BVEC | rw, bvec,
bio_segments(bio), blk_rq_bytes(cmd->rq));
+ /*
+ * This bio may be started from the middle of the 'bvec'
+ * because of bio splitting, so offset from the bvec must
+ * be passed to iov iterator
+ */
+ iter.iov_offset = bio->bi_iter.bi_bvec_done;
cmd->iocb.ki_pos = pos;
cmd->iocb.ki_filp = file;
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
index 9b180dbbd03c..25824c1697c5 100644
--- a/drivers/block/mtip32xx/mtip32xx.c
+++ b/drivers/block/mtip32xx/mtip32xx.c
@@ -173,7 +173,13 @@ static struct mtip_cmd *mtip_get_int_command(struct driver_data *dd)
{
struct request *rq;
+ if (mtip_check_surprise_removal(dd->pdev))
+ return NULL;
+
rq = blk_mq_alloc_request(dd->queue, 0, BLK_MQ_REQ_RESERVED);
+ if (IS_ERR(rq))
+ return NULL;
+
return blk_mq_rq_to_pdu(rq);
}
@@ -233,15 +239,9 @@ static void mtip_async_complete(struct mtip_port *port,
"Command tag %d failed due to TFE\n", tag);
}
- /* Unmap the DMA scatter list entries */
- dma_unmap_sg(&dd->pdev->dev, cmd->sg, cmd->scatter_ents, cmd->direction);
-
rq = mtip_rq_from_tag(dd, tag);
- if (unlikely(cmd->unaligned))
- up(&port->cmd_slot_unal);
-
- blk_mq_end_request(rq, status ? -EIO : 0);
+ blk_mq_complete_request(rq, status);
}
/*
@@ -581,6 +581,8 @@ static void mtip_completion(struct mtip_port *port,
dev_warn(&port->dd->pdev->dev,
"Internal command %d completed with TFE\n", tag);
+ command->comp_func = NULL;
+ command->comp_data = NULL;
complete(waiting);
}
@@ -618,8 +620,6 @@ static void mtip_handle_tfe(struct driver_data *dd)
port = dd->port;
- set_bit(MTIP_PF_EH_ACTIVE_BIT, &port->flags);
-
if (test_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags)) {
cmd = mtip_cmd_from_tag(dd, MTIP_TAG_INTERNAL);
dbg_printk(MTIP_DRV_NAME " TFE for the internal command\n");
@@ -628,7 +628,7 @@ static void mtip_handle_tfe(struct driver_data *dd)
cmd->comp_func(port, MTIP_TAG_INTERNAL,
cmd, PORT_IRQ_TF_ERR);
}
- goto handle_tfe_exit;
+ return;
}
/* clear the tag accumulator */
@@ -701,7 +701,7 @@ static void mtip_handle_tfe(struct driver_data *dd)
fail_reason = "thermal shutdown";
}
if (buf[288] == 0xBF) {
- set_bit(MTIP_DDF_SEC_LOCK_BIT, &dd->dd_flag);
+ set_bit(MTIP_DDF_REBUILD_FAILED_BIT, &dd->dd_flag);
dev_info(&dd->pdev->dev,
"Drive indicates rebuild has failed. Secure erase required.\n");
fail_all_ncq_cmds = 1;
@@ -771,11 +771,6 @@ static void mtip_handle_tfe(struct driver_data *dd)
}
}
print_tags(dd, "reissued (TFE)", tagaccum, cmd_cnt);
-
-handle_tfe_exit:
- /* clear eh_active */
- clear_bit(MTIP_PF_EH_ACTIVE_BIT, &port->flags);
- wake_up_interruptible(&port->svc_wait);
}
/*
@@ -1007,6 +1002,7 @@ static bool mtip_pause_ncq(struct mtip_port *port,
(fis->features == 0x27 || fis->features == 0x72 ||
fis->features == 0x62 || fis->features == 0x26))) {
clear_bit(MTIP_DDF_SEC_LOCK_BIT, &port->dd->dd_flag);
+ clear_bit(MTIP_DDF_REBUILD_FAILED_BIT, &port->dd->dd_flag);
/* Com reset after secure erase or lowlevel format */
mtip_restart_port(port);
clear_bit(MTIP_PF_SE_ACTIVE_BIT, &port->flags);
@@ -1021,12 +1017,14 @@ static bool mtip_pause_ncq(struct mtip_port *port,
*
* @port Pointer to port data structure
* @timeout Max duration to wait (ms)
+ * @atomic gfp_t flag to indicate blockable context or not
*
* return value
* 0 Success
* -EBUSY Commands still active
*/
-static int mtip_quiesce_io(struct mtip_port *port, unsigned long timeout)
+static int mtip_quiesce_io(struct mtip_port *port, unsigned long timeout,
+ gfp_t atomic)
{
unsigned long to;
unsigned int n;
@@ -1037,16 +1035,21 @@ static int mtip_quiesce_io(struct mtip_port *port, unsigned long timeout)
to = jiffies + msecs_to_jiffies(timeout);
do {
if (test_bit(MTIP_PF_SVC_THD_ACTIVE_BIT, &port->flags) &&
- test_bit(MTIP_PF_ISSUE_CMDS_BIT, &port->flags)) {
+ test_bit(MTIP_PF_ISSUE_CMDS_BIT, &port->flags) &&
+ atomic == GFP_KERNEL) {
msleep(20);
continue; /* svc thd is actively issuing commands */
}
- msleep(100);
+ if (atomic == GFP_KERNEL)
+ msleep(100);
+ else {
+ cpu_relax();
+ udelay(100);
+ }
+
if (mtip_check_surprise_removal(port->dd->pdev))
goto err_fault;
- if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &port->dd->dd_flag))
- goto err_fault;
/*
* Ignore s_active bit 0 of array element 0.
@@ -1099,6 +1102,7 @@ static int mtip_exec_internal_command(struct mtip_port *port,
struct mtip_cmd *int_cmd;
struct driver_data *dd = port->dd;
int rv = 0;
+ unsigned long start;
/* Make sure the buffer is 8 byte aligned. This is asic specific. */
if (buffer & 0x00000007) {
@@ -1107,6 +1111,10 @@ static int mtip_exec_internal_command(struct mtip_port *port,
}
int_cmd = mtip_get_int_command(dd);
+ if (!int_cmd) {
+ dbg_printk(MTIP_DRV_NAME "Unable to allocate tag for PIO cmd\n");
+ return -EFAULT;
+ }
set_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags);
@@ -1119,7 +1127,7 @@ static int mtip_exec_internal_command(struct mtip_port *port,
if (fis->command != ATA_CMD_STANDBYNOW1) {
/* wait for io to complete if non atomic */
if (mtip_quiesce_io(port,
- MTIP_QUIESCE_IO_TIMEOUT_MS) < 0) {
+ MTIP_QUIESCE_IO_TIMEOUT_MS, atomic) < 0) {
dev_warn(&dd->pdev->dev,
"Failed to quiesce IO\n");
mtip_put_int_command(dd, int_cmd);
@@ -1162,6 +1170,8 @@ static int mtip_exec_internal_command(struct mtip_port *port,
/* Populate the command header */
int_cmd->command_header->byte_count = 0;
+ start = jiffies;
+
/* Issue the command to the hardware */
mtip_issue_non_ncq_command(port, MTIP_TAG_INTERNAL);
@@ -1170,10 +1180,12 @@ static int mtip_exec_internal_command(struct mtip_port *port,
if ((rv = wait_for_completion_interruptible_timeout(
&wait,
msecs_to_jiffies(timeout))) <= 0) {
+
if (rv == -ERESTARTSYS) { /* interrupted */
dev_err(&dd->pdev->dev,
- "Internal command [%02X] was interrupted after %lu ms\n",
- fis->command, timeout);
+ "Internal command [%02X] was interrupted after %u ms\n",
+ fis->command,
+ jiffies_to_msecs(jiffies - start));
rv = -EINTR;
goto exec_ic_exit;
} else if (rv == 0) /* timeout */
@@ -2039,7 +2051,7 @@ static int exec_drive_taskfile(struct driver_data *dd,
outbuf,
taskout,
DMA_TO_DEVICE);
- if (outbuf_dma == 0) {
+ if (pci_dma_mapping_error(dd->pdev, outbuf_dma)) {
err = -ENOMEM;
goto abort;
}
@@ -2056,7 +2068,7 @@ static int exec_drive_taskfile(struct driver_data *dd,
inbuf_dma = pci_map_single(dd->pdev,
inbuf,
taskin, DMA_FROM_DEVICE);
- if (inbuf_dma == 0) {
+ if (pci_dma_mapping_error(dd->pdev, inbuf_dma)) {
err = -ENOMEM;
goto abort;
}
@@ -2890,6 +2902,42 @@ static int mtip_ftl_rebuild_poll(struct driver_data *dd)
return -EFAULT;
}
+static void mtip_softirq_done_fn(struct request *rq)
+{
+ struct mtip_cmd *cmd = blk_mq_rq_to_pdu(rq);
+ struct driver_data *dd = rq->q->queuedata;
+
+ /* Unmap the DMA scatter list entries */
+ dma_unmap_sg(&dd->pdev->dev, cmd->sg, cmd->scatter_ents,
+ cmd->direction);
+
+ if (unlikely(cmd->unaligned))
+ up(&dd->port->cmd_slot_unal);
+
+ blk_mq_end_request(rq, rq->errors);
+}
+
+static void mtip_abort_cmd(struct request *req, void *data,
+ bool reserved)
+{
+ struct driver_data *dd = data;
+
+ dbg_printk(MTIP_DRV_NAME " Aborting request, tag = %d\n", req->tag);
+
+ clear_bit(req->tag, dd->port->cmds_to_issue);
+ req->errors = -EIO;
+ mtip_softirq_done_fn(req);
+}
+
+static void mtip_queue_cmd(struct request *req, void *data,
+ bool reserved)
+{
+ struct driver_data *dd = data;
+
+ set_bit(req->tag, dd->port->cmds_to_issue);
+ blk_abort_request(req);
+}
+
/*
* service thread to issue queued commands
*
@@ -2902,7 +2950,7 @@ static int mtip_ftl_rebuild_poll(struct driver_data *dd)
static int mtip_service_thread(void *data)
{
struct driver_data *dd = (struct driver_data *)data;
- unsigned long slot, slot_start, slot_wrap;
+ unsigned long slot, slot_start, slot_wrap, to;
unsigned int num_cmd_slots = dd->slot_groups * 32;
struct mtip_port *port = dd->port;
@@ -2917,9 +2965,7 @@ static int mtip_service_thread(void *data)
* is in progress nor error handling is active
*/
wait_event_interruptible(port->svc_wait, (port->flags) &&
- !(port->flags & MTIP_PF_PAUSE_IO));
-
- set_bit(MTIP_PF_SVC_THD_ACTIVE_BIT, &port->flags);
+ (port->flags & MTIP_PF_SVC_THD_WORK));
if (kthread_should_stop() ||
test_bit(MTIP_PF_SVC_THD_STOP_BIT, &port->flags))
@@ -2929,6 +2975,8 @@ static int mtip_service_thread(void *data)
&dd->dd_flag)))
goto st_out;
+ set_bit(MTIP_PF_SVC_THD_ACTIVE_BIT, &port->flags);
+
restart_eh:
/* Demux bits: start with error handling */
if (test_bit(MTIP_PF_EH_ACTIVE_BIT, &port->flags)) {
@@ -2939,6 +2987,32 @@ restart_eh:
if (test_bit(MTIP_PF_EH_ACTIVE_BIT, &port->flags))
goto restart_eh;
+ if (test_bit(MTIP_PF_TO_ACTIVE_BIT, &port->flags)) {
+ to = jiffies + msecs_to_jiffies(5000);
+
+ do {
+ mdelay(100);
+ } while (atomic_read(&dd->irq_workers_active) != 0 &&
+ time_before(jiffies, to));
+
+ if (atomic_read(&dd->irq_workers_active) != 0)
+ dev_warn(&dd->pdev->dev,
+ "Completion workers still active!");
+
+ spin_lock(dd->queue->queue_lock);
+ blk_mq_all_tag_busy_iter(*dd->tags.tags,
+ mtip_queue_cmd, dd);
+ spin_unlock(dd->queue->queue_lock);
+
+ set_bit(MTIP_PF_ISSUE_CMDS_BIT, &dd->port->flags);
+
+ if (mtip_device_reset(dd))
+ blk_mq_all_tag_busy_iter(*dd->tags.tags,
+ mtip_abort_cmd, dd);
+
+ clear_bit(MTIP_PF_TO_ACTIVE_BIT, &dd->port->flags);
+ }
+
if (test_bit(MTIP_PF_ISSUE_CMDS_BIT, &port->flags)) {
slot = 1;
/* used to restrict the loop to one iteration */
@@ -2971,10 +3045,8 @@ restart_eh:
}
if (test_bit(MTIP_PF_REBUILD_BIT, &port->flags)) {
- if (mtip_ftl_rebuild_poll(dd) < 0)
- set_bit(MTIP_DDF_REBUILD_FAILED_BIT,
- &dd->dd_flag);
- clear_bit(MTIP_PF_REBUILD_BIT, &port->flags);
+ if (mtip_ftl_rebuild_poll(dd) == 0)
+ clear_bit(MTIP_PF_REBUILD_BIT, &port->flags);
}
}
@@ -3089,7 +3161,7 @@ static int mtip_hw_get_identify(struct driver_data *dd)
if (buf[288] == 0xBF) {
dev_info(&dd->pdev->dev,
"Drive indicates rebuild has failed.\n");
- /* TODO */
+ set_bit(MTIP_DDF_REBUILD_FAILED_BIT, &dd->dd_flag);
}
}
@@ -3263,20 +3335,25 @@ out1:
return rv;
}
-static void mtip_standby_drive(struct driver_data *dd)
+static int mtip_standby_drive(struct driver_data *dd)
{
- if (dd->sr)
- return;
+ int rv = 0;
+ if (dd->sr || !dd->port)
+ return -ENODEV;
/*
* Send standby immediate (E0h) to the drive so that it
* saves its state.
*/
if (!test_bit(MTIP_PF_REBUILD_BIT, &dd->port->flags) &&
- !test_bit(MTIP_DDF_SEC_LOCK_BIT, &dd->dd_flag))
- if (mtip_standby_immediate(dd->port))
+ !test_bit(MTIP_DDF_REBUILD_FAILED_BIT, &dd->dd_flag) &&
+ !test_bit(MTIP_DDF_SEC_LOCK_BIT, &dd->dd_flag)) {
+ rv = mtip_standby_immediate(dd->port);
+ if (rv)
dev_warn(&dd->pdev->dev,
"STANDBY IMMEDIATE failed\n");
+ }
+ return rv;
}
/*
@@ -3289,10 +3366,6 @@ static void mtip_standby_drive(struct driver_data *dd)
*/
static int mtip_hw_exit(struct driver_data *dd)
{
- /*
- * Send standby immediate (E0h) to the drive so that it
- * saves its state.
- */
if (!dd->sr) {
/* de-initialize the port. */
mtip_deinit_port(dd->port);
@@ -3334,8 +3407,7 @@ static int mtip_hw_shutdown(struct driver_data *dd)
* Send standby immediate (E0h) to the drive so that it
* saves its state.
*/
- if (!dd->sr && dd->port)
- mtip_standby_immediate(dd->port);
+ mtip_standby_drive(dd);
return 0;
}
@@ -3358,7 +3430,7 @@ static int mtip_hw_suspend(struct driver_data *dd)
* Send standby immediate (E0h) to the drive
* so that it saves its state.
*/
- if (mtip_standby_immediate(dd->port) != 0) {
+ if (mtip_standby_drive(dd) != 0) {
dev_err(&dd->pdev->dev,
"Failed standby-immediate command\n");
return -EFAULT;
@@ -3596,6 +3668,28 @@ static int mtip_block_getgeo(struct block_device *dev,
return 0;
}
+static int mtip_block_open(struct block_device *dev, fmode_t mode)
+{
+ struct driver_data *dd;
+
+ if (dev && dev->bd_disk) {
+ dd = (struct driver_data *) dev->bd_disk->private_data;
+
+ if (dd) {
+ if (test_bit(MTIP_DDF_REMOVAL_BIT,
+ &dd->dd_flag)) {
+ return -ENODEV;
+ }
+ return 0;
+ }
+ }
+ return -ENODEV;
+}
+
+void mtip_block_release(struct gendisk *disk, fmode_t mode)
+{
+}
+
/*
* Block device operation function.
*
@@ -3603,6 +3697,8 @@ static int mtip_block_getgeo(struct block_device *dev,
* layer.
*/
static const struct block_device_operations mtip_block_ops = {
+ .open = mtip_block_open,
+ .release = mtip_block_release,
.ioctl = mtip_block_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = mtip_block_compat_ioctl,
@@ -3664,10 +3760,9 @@ static int mtip_submit_request(struct blk_mq_hw_ctx *hctx, struct request *rq)
rq_data_dir(rq))) {
return -ENODATA;
}
- if (unlikely(test_bit(MTIP_DDF_SEC_LOCK_BIT, &dd->dd_flag)))
+ if (unlikely(test_bit(MTIP_DDF_SEC_LOCK_BIT, &dd->dd_flag) ||
+ test_bit(MTIP_DDF_REBUILD_FAILED_BIT, &dd->dd_flag)))
return -ENODATA;
- if (test_bit(MTIP_DDF_REBUILD_FAILED_BIT, &dd->dd_flag))
- return -ENXIO;
}
if (rq->cmd_flags & REQ_DISCARD) {
@@ -3779,11 +3874,32 @@ static int mtip_init_cmd(void *data, struct request *rq, unsigned int hctx_idx,
return 0;
}
+static enum blk_eh_timer_return mtip_cmd_timeout(struct request *req,
+ bool reserved)
+{
+ struct driver_data *dd = req->q->queuedata;
+
+ if (reserved)
+ goto exit_handler;
+
+ if (test_bit(req->tag, dd->port->cmds_to_issue))
+ goto exit_handler;
+
+ if (test_and_set_bit(MTIP_PF_TO_ACTIVE_BIT, &dd->port->flags))
+ goto exit_handler;
+
+ wake_up_interruptible(&dd->port->svc_wait);
+exit_handler:
+ return BLK_EH_RESET_TIMER;
+}
+
static struct blk_mq_ops mtip_mq_ops = {
.queue_rq = mtip_queue_rq,
.map_queue = blk_mq_map_queue,
.init_request = mtip_init_cmd,
.exit_request = mtip_free_cmd,
+ .complete = mtip_softirq_done_fn,
+ .timeout = mtip_cmd_timeout,
};
/*
@@ -3850,7 +3966,6 @@ static int mtip_block_initialize(struct driver_data *dd)
mtip_hw_debugfs_init(dd);
-skip_create_disk:
memset(&dd->tags, 0, sizeof(dd->tags));
dd->tags.ops = &mtip_mq_ops;
dd->tags.nr_hw_queues = 1;
@@ -3860,12 +3975,13 @@ skip_create_disk:
dd->tags.numa_node = dd->numa_node;
dd->tags.flags = BLK_MQ_F_SHOULD_MERGE;
dd->tags.driver_data = dd;
+ dd->tags.timeout = MTIP_NCQ_CMD_TIMEOUT_MS;
rv = blk_mq_alloc_tag_set(&dd->tags);
if (rv) {
dev_err(&dd->pdev->dev,
"Unable to allocate request queue\n");
- goto block_queue_alloc_init_error;
+ goto block_queue_alloc_tag_error;
}
/* Allocate the request queue. */
@@ -3880,6 +3996,7 @@ skip_create_disk:
dd->disk->queue = dd->queue;
dd->queue->queuedata = dd;
+skip_create_disk:
/* Initialize the protocol layer. */
wait_for_rebuild = mtip_hw_get_identify(dd);
if (wait_for_rebuild < 0) {
@@ -3976,8 +4093,9 @@ kthread_run_error:
read_capacity_error:
init_hw_cmds_error:
blk_cleanup_queue(dd->queue);
- blk_mq_free_tag_set(&dd->tags);
block_queue_alloc_init_error:
+ blk_mq_free_tag_set(&dd->tags);
+block_queue_alloc_tag_error:
mtip_hw_debugfs_exit(dd);
disk_index_error:
spin_lock(&rssd_index_lock);
@@ -3994,6 +4112,22 @@ protocol_init_error:
return rv;
}
+static void mtip_no_dev_cleanup(struct request *rq, void *data, bool reserv)
+{
+ struct driver_data *dd = (struct driver_data *)data;
+ struct mtip_cmd *cmd;
+
+ if (likely(!reserv))
+ blk_mq_complete_request(rq, -ENODEV);
+ else if (test_bit(MTIP_PF_IC_ACTIVE_BIT, &dd->port->flags)) {
+
+ cmd = mtip_cmd_from_tag(dd, MTIP_TAG_INTERNAL);
+ if (cmd->comp_func)
+ cmd->comp_func(dd->port, MTIP_TAG_INTERNAL,
+ cmd, -ENODEV);
+ }
+}
+
/*
* Block layer deinitialization function.
*
@@ -4025,12 +4159,23 @@ static int mtip_block_remove(struct driver_data *dd)
}
}
- if (!dd->sr)
- mtip_standby_drive(dd);
+ if (!dd->sr) {
+ /*
+ * Explicitly wait here for IOs to quiesce,
+ * as mtip_standby_drive usually won't wait for IOs.
+ */
+ if (!mtip_quiesce_io(dd->port, MTIP_QUIESCE_IO_TIMEOUT_MS,
+ GFP_KERNEL))
+ mtip_standby_drive(dd);
+ }
else
dev_info(&dd->pdev->dev, "device %s surprise removal\n",
dd->disk->disk_name);
+ blk_mq_freeze_queue_start(dd->queue);
+ blk_mq_stop_hw_queues(dd->queue);
+ blk_mq_all_tag_busy_iter(dd->tags.tags[0], mtip_no_dev_cleanup, dd);
+
/*
* Delete our gendisk structure. This also removes the device
* from /dev
@@ -4040,7 +4185,8 @@ static int mtip_block_remove(struct driver_data *dd)
dd->bdev = NULL;
}
if (dd->disk) {
- del_gendisk(dd->disk);
+ if (test_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag))
+ del_gendisk(dd->disk);
if (dd->disk->queue) {
blk_cleanup_queue(dd->queue);
blk_mq_free_tag_set(&dd->tags);
@@ -4081,7 +4227,8 @@ static int mtip_block_shutdown(struct driver_data *dd)
dev_info(&dd->pdev->dev,
"Shutting down %s ...\n", dd->disk->disk_name);
- del_gendisk(dd->disk);
+ if (test_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag))
+ del_gendisk(dd->disk);
if (dd->disk->queue) {
blk_cleanup_queue(dd->queue);
blk_mq_free_tag_set(&dd->tags);
@@ -4426,7 +4573,7 @@ static void mtip_pci_remove(struct pci_dev *pdev)
struct driver_data *dd = pci_get_drvdata(pdev);
unsigned long flags, to;
- set_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag);
+ set_bit(MTIP_DDF_REMOVAL_BIT, &dd->dd_flag);
spin_lock_irqsave(&dev_lock, flags);
list_del_init(&dd->online_list);
@@ -4443,12 +4590,17 @@ static void mtip_pci_remove(struct pci_dev *pdev)
} while (atomic_read(&dd->irq_workers_active) != 0 &&
time_before(jiffies, to));
+ if (!dd->sr)
+ fsync_bdev(dd->bdev);
+
if (atomic_read(&dd->irq_workers_active) != 0) {
dev_warn(&dd->pdev->dev,
"Completion workers still active!\n");
}
- blk_mq_stop_hw_queues(dd->queue);
+ blk_set_queue_dying(dd->queue);
+ set_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag);
+
/* Clean up the block layer. */
mtip_block_remove(dd);
diff --git a/drivers/block/mtip32xx/mtip32xx.h b/drivers/block/mtip32xx/mtip32xx.h
index 3274784008eb..7617888f7944 100644
--- a/drivers/block/mtip32xx/mtip32xx.h
+++ b/drivers/block/mtip32xx/mtip32xx.h
@@ -134,16 +134,24 @@ enum {
MTIP_PF_EH_ACTIVE_BIT = 1, /* error handling */
MTIP_PF_SE_ACTIVE_BIT = 2, /* secure erase */
MTIP_PF_DM_ACTIVE_BIT = 3, /* download microcde */
+ MTIP_PF_TO_ACTIVE_BIT = 9, /* timeout handling */
MTIP_PF_PAUSE_IO = ((1 << MTIP_PF_IC_ACTIVE_BIT) |
(1 << MTIP_PF_EH_ACTIVE_BIT) |
(1 << MTIP_PF_SE_ACTIVE_BIT) |
- (1 << MTIP_PF_DM_ACTIVE_BIT)),
+ (1 << MTIP_PF_DM_ACTIVE_BIT) |
+ (1 << MTIP_PF_TO_ACTIVE_BIT)),
MTIP_PF_SVC_THD_ACTIVE_BIT = 4,
MTIP_PF_ISSUE_CMDS_BIT = 5,
MTIP_PF_REBUILD_BIT = 6,
MTIP_PF_SVC_THD_STOP_BIT = 8,
+ MTIP_PF_SVC_THD_WORK = ((1 << MTIP_PF_EH_ACTIVE_BIT) |
+ (1 << MTIP_PF_ISSUE_CMDS_BIT) |
+ (1 << MTIP_PF_REBUILD_BIT) |
+ (1 << MTIP_PF_SVC_THD_STOP_BIT) |
+ (1 << MTIP_PF_TO_ACTIVE_BIT)),
+
/* below are bit numbers in 'dd_flag' defined in driver_data */
MTIP_DDF_SEC_LOCK_BIT = 0,
MTIP_DDF_REMOVE_PENDING_BIT = 1,
@@ -153,6 +161,7 @@ enum {
MTIP_DDF_RESUME_BIT = 6,
MTIP_DDF_INIT_DONE_BIT = 7,
MTIP_DDF_REBUILD_FAILED_BIT = 8,
+ MTIP_DDF_REMOVAL_BIT = 9,
MTIP_DDF_STOP_IO = ((1 << MTIP_DDF_REMOVE_PENDING_BIT) |
(1 << MTIP_DDF_SEC_LOCK_BIT) |
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index e4c5cc107934..08afbc7a2bb8 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -57,10 +57,12 @@ struct nbd_device {
int blksize;
loff_t bytesize;
int xmit_timeout;
+ bool timedout;
bool disconnect; /* a disconnect has been requested by user */
struct timer_list timeout_timer;
- spinlock_t tasks_lock;
+ /* protects initialization and shutdown of the socket */
+ spinlock_t sock_lock;
struct task_struct *task_recv;
struct task_struct *task_send;
@@ -98,6 +100,11 @@ static inline struct device *nbd_to_dev(struct nbd_device *nbd)
return disk_to_dev(nbd->disk);
}
+static bool nbd_is_connected(struct nbd_device *nbd)
+{
+ return !!nbd->task_recv;
+}
+
static const char *nbdcmd_to_ascii(int cmd)
{
switch (cmd) {
@@ -110,6 +117,42 @@ static const char *nbdcmd_to_ascii(int cmd)
return "invalid";
}
+static int nbd_size_clear(struct nbd_device *nbd, struct block_device *bdev)
+{
+ bdev->bd_inode->i_size = 0;
+ set_capacity(nbd->disk, 0);
+ kobject_uevent(&nbd_to_dev(nbd)->kobj, KOBJ_CHANGE);
+
+ return 0;
+}
+
+static void nbd_size_update(struct nbd_device *nbd, struct block_device *bdev)
+{
+ if (!nbd_is_connected(nbd))
+ return;
+
+ bdev->bd_inode->i_size = nbd->bytesize;
+ set_capacity(nbd->disk, nbd->bytesize >> 9);
+ kobject_uevent(&nbd_to_dev(nbd)->kobj, KOBJ_CHANGE);
+}
+
+static int nbd_size_set(struct nbd_device *nbd, struct block_device *bdev,
+ int blocksize, int nr_blocks)
+{
+ int ret;
+
+ ret = set_blocksize(bdev, blocksize);
+ if (ret)
+ return ret;
+
+ nbd->blksize = blocksize;
+ nbd->bytesize = (loff_t)blocksize * (loff_t)nr_blocks;
+
+ nbd_size_update(nbd, bdev);
+
+ return 0;
+}
+
static void nbd_end_request(struct nbd_device *nbd, struct request *req)
{
int error = req->errors ? -EIO : 0;
@@ -129,13 +172,20 @@ static void nbd_end_request(struct nbd_device *nbd, struct request *req)
*/
static void sock_shutdown(struct nbd_device *nbd)
{
- if (!nbd->sock)
+ spin_lock_irq(&nbd->sock_lock);
+
+ if (!nbd->sock) {
+ spin_unlock_irq(&nbd->sock_lock);
return;
+ }
dev_warn(disk_to_dev(nbd->disk), "shutting down socket\n");
kernel_sock_shutdown(nbd->sock, SHUT_RDWR);
+ sockfd_put(nbd->sock);
nbd->sock = NULL;
- del_timer_sync(&nbd->timeout_timer);
+ spin_unlock_irq(&nbd->sock_lock);
+
+ del_timer(&nbd->timeout_timer);
}
static void nbd_xmit_timeout(unsigned long arg)
@@ -146,19 +196,16 @@ static void nbd_xmit_timeout(unsigned long arg)
if (list_empty(&nbd->queue_head))
return;
- nbd->disconnect = true;
+ spin_lock_irqsave(&nbd->sock_lock, flags);
- spin_lock_irqsave(&nbd->tasks_lock, flags);
+ nbd->timedout = true;
- if (nbd->task_recv)
- force_sig(SIGKILL, nbd->task_recv);
-
- if (nbd->task_send)
- force_sig(SIGKILL, nbd->task_send);
+ if (nbd->sock)
+ kernel_sock_shutdown(nbd->sock, SHUT_RDWR);
- spin_unlock_irqrestore(&nbd->tasks_lock, flags);
+ spin_unlock_irqrestore(&nbd->sock_lock, flags);
- dev_err(nbd_to_dev(nbd), "Connection timed out, killed receiver and sender, shutting down connection\n");
+ dev_err(nbd_to_dev(nbd), "Connection timed out, shutting down connection\n");
}
/*
@@ -171,7 +218,6 @@ static int sock_xmit(struct nbd_device *nbd, int send, void *buf, int size,
int result;
struct msghdr msg;
struct kvec iov;
- sigset_t blocked, oldset;
unsigned long pflags = current->flags;
if (unlikely(!sock)) {
@@ -181,11 +227,6 @@ static int sock_xmit(struct nbd_device *nbd, int send, void *buf, int size,
return -EINVAL;
}
- /* Allow interception of SIGKILL only
- * Don't allow other signals to interrupt the transmission */
- siginitsetinv(&blocked, sigmask(SIGKILL));
- sigprocmask(SIG_SETMASK, &blocked, &oldset);
-
current->flags |= PF_MEMALLOC;
do {
sock->sk->sk_allocation = GFP_NOIO | __GFP_MEMALLOC;
@@ -212,7 +253,6 @@ static int sock_xmit(struct nbd_device *nbd, int send, void *buf, int size,
buf += result;
} while (size > 0);
- sigprocmask(SIG_SETMASK, &oldset, NULL);
tsk_restore_flags(current, pflags, PF_MEMALLOC);
if (!send && nbd->xmit_timeout)
@@ -402,31 +442,28 @@ static struct device_attribute pid_attr = {
.show = pid_show,
};
-static int nbd_thread_recv(struct nbd_device *nbd)
+static int nbd_thread_recv(struct nbd_device *nbd, struct block_device *bdev)
{
struct request *req;
int ret;
- unsigned long flags;
BUG_ON(nbd->magic != NBD_MAGIC);
sk_set_memalloc(nbd->sock->sk);
- spin_lock_irqsave(&nbd->tasks_lock, flags);
nbd->task_recv = current;
- spin_unlock_irqrestore(&nbd->tasks_lock, flags);
ret = device_create_file(disk_to_dev(nbd->disk), &pid_attr);
if (ret) {
dev_err(disk_to_dev(nbd->disk), "device_create_file failed!\n");
- spin_lock_irqsave(&nbd->tasks_lock, flags);
nbd->task_recv = NULL;
- spin_unlock_irqrestore(&nbd->tasks_lock, flags);
return ret;
}
+ nbd_size_update(nbd, bdev);
+
while (1) {
req = nbd_read_stat(nbd);
if (IS_ERR(req)) {
@@ -437,21 +474,11 @@ static int nbd_thread_recv(struct nbd_device *nbd)
nbd_end_request(nbd, req);
}
+ nbd_size_clear(nbd, bdev);
+
device_remove_file(disk_to_dev(nbd->disk), &pid_attr);
- spin_lock_irqsave(&nbd->tasks_lock, flags);
nbd->task_recv = NULL;
- spin_unlock_irqrestore(&nbd->tasks_lock, flags);
-
- if (signal_pending(current)) {
- ret = kernel_dequeue_signal(NULL);
- dev_warn(nbd_to_dev(nbd), "pid %d, %s, got signal %d\n",
- task_pid_nr(current), current->comm, ret);
- mutex_lock(&nbd->tx_lock);
- sock_shutdown(nbd);
- mutex_unlock(&nbd->tx_lock);
- ret = -ETIMEDOUT;
- }
return ret;
}
@@ -544,11 +571,8 @@ static int nbd_thread_send(void *data)
{
struct nbd_device *nbd = data;
struct request *req;
- unsigned long flags;
- spin_lock_irqsave(&nbd->tasks_lock, flags);
nbd->task_send = current;
- spin_unlock_irqrestore(&nbd->tasks_lock, flags);
set_user_nice(current, MIN_NICE);
while (!kthread_should_stop() || !list_empty(&nbd->waiting_queue)) {
@@ -557,17 +581,6 @@ static int nbd_thread_send(void *data)
kthread_should_stop() ||
!list_empty(&nbd->waiting_queue));
- if (signal_pending(current)) {
- int ret = kernel_dequeue_signal(NULL);
-
- dev_warn(nbd_to_dev(nbd), "pid %d, %s, got signal %d\n",
- task_pid_nr(current), current->comm, ret);
- mutex_lock(&nbd->tx_lock);
- sock_shutdown(nbd);
- mutex_unlock(&nbd->tx_lock);
- break;
- }
-
/* extract request */
if (list_empty(&nbd->waiting_queue))
continue;
@@ -582,13 +595,7 @@ static int nbd_thread_send(void *data)
nbd_handle_req(nbd, req);
}
- spin_lock_irqsave(&nbd->tasks_lock, flags);
nbd->task_send = NULL;
- spin_unlock_irqrestore(&nbd->tasks_lock, flags);
-
- /* Clear maybe pending signals */
- if (signal_pending(current))
- kernel_dequeue_signal(NULL);
return 0;
}
@@ -618,8 +625,8 @@ static void nbd_request_handler(struct request_queue *q)
req, req->cmd_type);
if (unlikely(!nbd->sock)) {
- dev_err(disk_to_dev(nbd->disk),
- "Attempted send on closed socket\n");
+ dev_err_ratelimited(disk_to_dev(nbd->disk),
+ "Attempted send on closed socket\n");
req->errors++;
nbd_end_request(nbd, req);
spin_lock_irq(q->queue_lock);
@@ -636,6 +643,61 @@ static void nbd_request_handler(struct request_queue *q)
}
}
+static int nbd_set_socket(struct nbd_device *nbd, struct socket *sock)
+{
+ int ret = 0;
+
+ spin_lock_irq(&nbd->sock_lock);
+
+ if (nbd->sock) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ nbd->sock = sock;
+
+out:
+ spin_unlock_irq(&nbd->sock_lock);
+
+ return ret;
+}
+
+/* Reset all properties of an NBD device */
+static void nbd_reset(struct nbd_device *nbd)
+{
+ nbd->disconnect = false;
+ nbd->timedout = false;
+ nbd->blksize = 1024;
+ nbd->bytesize = 0;
+ set_capacity(nbd->disk, 0);
+ nbd->flags = 0;
+ nbd->xmit_timeout = 0;
+ queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, nbd->disk->queue);
+ del_timer_sync(&nbd->timeout_timer);
+}
+
+static void nbd_bdev_reset(struct block_device *bdev)
+{
+ set_device_ro(bdev, false);
+ bdev->bd_inode->i_size = 0;
+ if (max_part > 0) {
+ blkdev_reread_part(bdev);
+ bdev->bd_invalidated = 1;
+ }
+}
+
+static void nbd_parse_flags(struct nbd_device *nbd, struct block_device *bdev)
+{
+ if (nbd->flags & NBD_FLAG_READ_ONLY)
+ set_device_ro(bdev, true);
+ if (nbd->flags & NBD_FLAG_SEND_TRIM)
+ queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, nbd->disk->queue);
+ if (nbd->flags & NBD_FLAG_SEND_FLUSH)
+ blk_queue_flush(nbd->disk->queue, REQ_FLUSH);
+ else
+ blk_queue_flush(nbd->disk->queue, 0);
+}
+
static int nbd_dev_dbg_init(struct nbd_device *nbd);
static void nbd_dev_dbg_close(struct nbd_device *nbd);
@@ -668,48 +730,40 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
return 0;
}
- case NBD_CLEAR_SOCK: {
- struct socket *sock = nbd->sock;
- nbd->sock = NULL;
+ case NBD_CLEAR_SOCK:
+ sock_shutdown(nbd);
nbd_clear_que(nbd);
BUG_ON(!list_empty(&nbd->queue_head));
BUG_ON(!list_empty(&nbd->waiting_queue));
kill_bdev(bdev);
- if (sock)
- sockfd_put(sock);
return 0;
- }
case NBD_SET_SOCK: {
- struct socket *sock;
int err;
- if (nbd->sock)
- return -EBUSY;
- sock = sockfd_lookup(arg, &err);
- if (sock) {
- nbd->sock = sock;
- if (max_part > 0)
- bdev->bd_invalidated = 1;
- nbd->disconnect = false; /* we're connected now */
- return 0;
- }
- return -EINVAL;
+ struct socket *sock = sockfd_lookup(arg, &err);
+
+ if (!sock)
+ return err;
+
+ err = nbd_set_socket(nbd, sock);
+ if (!err && max_part)
+ bdev->bd_invalidated = 1;
+
+ return err;
}
- case NBD_SET_BLKSIZE:
- nbd->blksize = arg;
- nbd->bytesize &= ~(nbd->blksize-1);
- bdev->bd_inode->i_size = nbd->bytesize;
- set_blocksize(bdev, nbd->blksize);
- set_capacity(nbd->disk, nbd->bytesize >> 9);
- return 0;
+ case NBD_SET_BLKSIZE: {
+ loff_t bsize = div_s64(nbd->bytesize, arg);
+
+ return nbd_size_set(nbd, bdev, arg, bsize);
+ }
case NBD_SET_SIZE:
- nbd->bytesize = arg & ~(nbd->blksize-1);
- bdev->bd_inode->i_size = nbd->bytesize;
- set_blocksize(bdev, nbd->blksize);
- set_capacity(nbd->disk, nbd->bytesize >> 9);
- return 0;
+ return nbd_size_set(nbd, bdev, nbd->blksize,
+ arg / nbd->blksize);
+
+ case NBD_SET_SIZE_BLOCKS:
+ return nbd_size_set(nbd, bdev, nbd->blksize, arg);
case NBD_SET_TIMEOUT:
nbd->xmit_timeout = arg * HZ;
@@ -725,16 +779,8 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
nbd->flags = arg;
return 0;
- case NBD_SET_SIZE_BLOCKS:
- nbd->bytesize = ((u64) arg) * nbd->blksize;
- bdev->bd_inode->i_size = nbd->bytesize;
- set_blocksize(bdev, nbd->blksize);
- set_capacity(nbd->disk, nbd->bytesize >> 9);
- return 0;
-
case NBD_DO_IT: {
struct task_struct *thread;
- struct socket *sock;
int error;
if (nbd->task_recv)
@@ -744,15 +790,7 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
mutex_unlock(&nbd->tx_lock);
- if (nbd->flags & NBD_FLAG_READ_ONLY)
- set_device_ro(bdev, true);
- if (nbd->flags & NBD_FLAG_SEND_TRIM)
- queue_flag_set_unlocked(QUEUE_FLAG_DISCARD,
- nbd->disk->queue);
- if (nbd->flags & NBD_FLAG_SEND_FLUSH)
- blk_queue_flush(nbd->disk->queue, REQ_FLUSH);
- else
- blk_queue_flush(nbd->disk->queue, 0);
+ nbd_parse_flags(nbd, bdev);
thread = kthread_run(nbd_thread_send, nbd, "%s",
nbd_name(nbd));
@@ -762,29 +800,24 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
}
nbd_dev_dbg_init(nbd);
- error = nbd_thread_recv(nbd);
+ error = nbd_thread_recv(nbd, bdev);
nbd_dev_dbg_close(nbd);
kthread_stop(thread);
mutex_lock(&nbd->tx_lock);
sock_shutdown(nbd);
- sock = nbd->sock;
- nbd->sock = NULL;
nbd_clear_que(nbd);
kill_bdev(bdev);
- queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, nbd->disk->queue);
- set_device_ro(bdev, false);
- if (sock)
- sockfd_put(sock);
- nbd->flags = 0;
- nbd->bytesize = 0;
- bdev->bd_inode->i_size = 0;
- set_capacity(nbd->disk, 0);
- if (max_part > 0)
- blkdev_reread_part(bdev);
+ nbd_bdev_reset(bdev);
+
if (nbd->disconnect) /* user requested, ignore socket errors */
- return 0;
+ error = 0;
+ if (nbd->timedout)
+ error = -ETIMEDOUT;
+
+ nbd_reset(nbd);
+
return error;
}
@@ -892,50 +925,23 @@ static const struct file_operations nbd_dbg_flags_ops = {
static int nbd_dev_dbg_init(struct nbd_device *nbd)
{
struct dentry *dir;
- struct dentry *f;
+
+ if (!nbd_dbg_dir)
+ return -EIO;
dir = debugfs_create_dir(nbd_name(nbd), nbd_dbg_dir);
- if (IS_ERR_OR_NULL(dir)) {
- dev_err(nbd_to_dev(nbd), "Failed to create debugfs dir for '%s' (%ld)\n",
- nbd_name(nbd), PTR_ERR(dir));
- return PTR_ERR(dir);
+ if (!dir) {
+ dev_err(nbd_to_dev(nbd), "Failed to create debugfs dir for '%s'\n",
+ nbd_name(nbd));
+ return -EIO;
}
nbd->dbg_dir = dir;
- f = debugfs_create_file("tasks", 0444, dir, nbd, &nbd_dbg_tasks_ops);
- if (IS_ERR_OR_NULL(f)) {
- dev_err(nbd_to_dev(nbd), "Failed to create debugfs file 'tasks', %ld\n",
- PTR_ERR(f));
- return PTR_ERR(f);
- }
-
- f = debugfs_create_u64("size_bytes", 0444, dir, &nbd->bytesize);
- if (IS_ERR_OR_NULL(f)) {
- dev_err(nbd_to_dev(nbd), "Failed to create debugfs file 'size_bytes', %ld\n",
- PTR_ERR(f));
- return PTR_ERR(f);
- }
-
- f = debugfs_create_u32("timeout", 0444, dir, &nbd->xmit_timeout);
- if (IS_ERR_OR_NULL(f)) {
- dev_err(nbd_to_dev(nbd), "Failed to create debugfs file 'timeout', %ld\n",
- PTR_ERR(f));
- return PTR_ERR(f);
- }
-
- f = debugfs_create_u32("blocksize", 0444, dir, &nbd->blksize);
- if (IS_ERR_OR_NULL(f)) {
- dev_err(nbd_to_dev(nbd), "Failed to create debugfs file 'blocksize', %ld\n",
- PTR_ERR(f));
- return PTR_ERR(f);
- }
-
- f = debugfs_create_file("flags", 0444, dir, &nbd, &nbd_dbg_flags_ops);
- if (IS_ERR_OR_NULL(f)) {
- dev_err(nbd_to_dev(nbd), "Failed to create debugfs file 'flags', %ld\n",
- PTR_ERR(f));
- return PTR_ERR(f);
- }
+ debugfs_create_file("tasks", 0444, dir, nbd, &nbd_dbg_tasks_ops);
+ debugfs_create_u64("size_bytes", 0444, dir, &nbd->bytesize);
+ debugfs_create_u32("timeout", 0444, dir, &nbd->xmit_timeout);
+ debugfs_create_u32("blocksize", 0444, dir, &nbd->blksize);
+ debugfs_create_file("flags", 0444, dir, &nbd, &nbd_dbg_flags_ops);
return 0;
}
@@ -950,8 +956,8 @@ static int nbd_dbg_init(void)
struct dentry *dbg_dir;
dbg_dir = debugfs_create_dir("nbd", NULL);
- if (IS_ERR(dbg_dir))
- return PTR_ERR(dbg_dir);
+ if (!dbg_dir)
+ return -EIO;
nbd_dbg_dir = dbg_dir;
@@ -1069,7 +1075,7 @@ static int __init nbd_init(void)
nbd_dev[i].magic = NBD_MAGIC;
INIT_LIST_HEAD(&nbd_dev[i].waiting_queue);
spin_lock_init(&nbd_dev[i].queue_lock);
- spin_lock_init(&nbd_dev[i].tasks_lock);
+ spin_lock_init(&nbd_dev[i].sock_lock);
INIT_LIST_HEAD(&nbd_dev[i].queue_head);
mutex_init(&nbd_dev[i].tx_lock);
init_timer(&nbd_dev[i].timeout_timer);
@@ -1077,14 +1083,12 @@ static int __init nbd_init(void)
nbd_dev[i].timeout_timer.data = (unsigned long)&nbd_dev[i];
init_waitqueue_head(&nbd_dev[i].active_wq);
init_waitqueue_head(&nbd_dev[i].waiting_wq);
- nbd_dev[i].blksize = 1024;
- nbd_dev[i].bytesize = 0;
disk->major = NBD_MAJOR;
disk->first_minor = i << part_shift;
disk->fops = &nbd_fops;
disk->private_data = &nbd_dev[i];
sprintf(disk->disk_name, "nbd%d", i);
- set_capacity(disk, 0);
+ nbd_reset(&nbd_dev[i]);
add_disk(disk);
}
diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c
index 64a7b5971b57..cab97593ba54 100644
--- a/drivers/block/null_blk.c
+++ b/drivers/block/null_blk.c
@@ -742,10 +742,11 @@ static int null_add_dev(void)
add_disk(disk);
+done:
mutex_lock(&lock);
list_add_tail(&nullb->list, &nullb_list);
mutex_unlock(&lock);
-done:
+
return 0;
out_cleanup_lightnvm:
diff --git a/drivers/block/paride/pd.c b/drivers/block/paride/pd.c
index 562b5a4ca7b7..78a39f736c64 100644
--- a/drivers/block/paride/pd.c
+++ b/drivers/block/paride/pd.c
@@ -126,7 +126,7 @@
*/
#include <linux/types.h>
-static bool verbose = 0;
+static int verbose = 0;
static int major = PD_MAJOR;
static char *name = PD_NAME;
static int cluster = 64;
@@ -161,7 +161,7 @@ enum {D_PRT, D_PRO, D_UNI, D_MOD, D_GEO, D_SBY, D_DLY, D_SLV};
static DEFINE_MUTEX(pd_mutex);
static DEFINE_SPINLOCK(pd_lock);
-module_param(verbose, bool, 0);
+module_param(verbose, int, 0);
module_param(major, int, 0);
module_param(name, charp, 0);
module_param(cluster, int, 0);
diff --git a/drivers/block/paride/pt.c b/drivers/block/paride/pt.c
index 1740d75e8a32..216a94fed5b4 100644
--- a/drivers/block/paride/pt.c
+++ b/drivers/block/paride/pt.c
@@ -117,7 +117,7 @@
*/
-static bool verbose = 0;
+static int verbose = 0;
static int major = PT_MAJOR;
static char *name = PT_NAME;
static int disable = 0;
@@ -152,7 +152,7 @@ static int (*drives[4])[6] = {&drive0, &drive1, &drive2, &drive3};
#include <asm/uaccess.h>
-module_param(verbose, bool, 0);
+module_param(verbose, int, 0);
module_param(major, int, 0);
module_param(name, charp, 0);
module_param_array(drive0, int, NULL, 0);
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 4a876785b68c..0ede6d7e2568 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -538,7 +538,6 @@ static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
u8 *order, u64 *snap_size);
static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
u64 *snap_features);
-static u64 rbd_snap_id_by_name(struct rbd_device *rbd_dev, const char *name);
static int rbd_open(struct block_device *bdev, fmode_t mode)
{
@@ -1847,14 +1846,12 @@ static void rbd_osd_req_callback(struct ceph_osd_request *osd_req,
if (osd_req->r_result < 0)
obj_request->result = osd_req->r_result;
- rbd_assert(osd_req->r_num_ops <= CEPH_OSD_MAX_OP);
-
/*
* We support a 64-bit length, but ultimately it has to be
* passed to the block layer, which just supports a 32-bit
* length field.
*/
- obj_request->xferred = osd_req->r_reply_op_len[0];
+ obj_request->xferred = osd_req->r_ops[0].outdata_len;
rbd_assert(obj_request->xferred < (u64)UINT_MAX);
opcode = osd_req->r_ops[0].op;
@@ -1955,7 +1952,7 @@ static struct ceph_osd_request *rbd_osd_req_create(
osdc = &rbd_dev->rbd_client->client->osdc;
osd_req = ceph_osdc_alloc_request(osdc, snapc, num_ops, false,
- GFP_ATOMIC);
+ GFP_NOIO);
if (!osd_req)
return NULL; /* ENOMEM */
@@ -2004,7 +2001,7 @@ rbd_osd_req_create_copyup(struct rbd_obj_request *obj_request)
rbd_dev = img_request->rbd_dev;
osdc = &rbd_dev->rbd_client->client->osdc;
osd_req = ceph_osdc_alloc_request(osdc, snapc, num_osd_ops,
- false, GFP_ATOMIC);
+ false, GFP_NOIO);
if (!osd_req)
return NULL; /* ENOMEM */
@@ -2506,7 +2503,7 @@ static int rbd_img_request_fill(struct rbd_img_request *img_request,
bio_chain_clone_range(&bio_list,
&bio_offset,
clone_size,
- GFP_ATOMIC);
+ GFP_NOIO);
if (!obj_request->bio_list)
goto out_unwind;
} else if (type == OBJ_REQUEST_PAGES) {
@@ -3129,9 +3126,6 @@ static void rbd_watch_cb(u64 ver, u64 notify_id, u8 opcode, void *data)
struct rbd_device *rbd_dev = (struct rbd_device *)data;
int ret;
- if (!rbd_dev)
- return;
-
dout("%s: \"%s\" notify_id %llu opcode %u\n", __func__,
rbd_dev->header_name, (unsigned long long)notify_id,
(unsigned int)opcode);
@@ -3265,6 +3259,9 @@ static void rbd_dev_header_unwatch_sync(struct rbd_device *rbd_dev)
ceph_osdc_cancel_event(rbd_dev->watch_event);
rbd_dev->watch_event = NULL;
+
+ dout("%s flushing notifies\n", __func__);
+ ceph_osdc_flush_notifies(&rbd_dev->rbd_client->client->osdc);
}
/*
@@ -3644,21 +3641,14 @@ static void rbd_exists_validate(struct rbd_device *rbd_dev)
static void rbd_dev_update_size(struct rbd_device *rbd_dev)
{
sector_t size;
- bool removing;
/*
- * Don't hold the lock while doing disk operations,
- * or lock ordering will conflict with the bdev mutex via:
- * rbd_add() -> blkdev_get() -> rbd_open()
+ * If EXISTS is not set, rbd_dev->disk may be NULL, so don't
+ * try to update its size. If REMOVING is set, updating size
+ * is just useless work since the device can't be opened.
*/
- spin_lock_irq(&rbd_dev->lock);
- removing = test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags);
- spin_unlock_irq(&rbd_dev->lock);
- /*
- * If the device is being removed, rbd_dev->disk has
- * been destroyed, so don't try to update its size
- */
- if (!removing) {
+ if (test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags) &&
+ !test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags)) {
size = (sector_t)rbd_dev->mapping.size / SECTOR_SIZE;
dout("setting size to %llu sectors", (unsigned long long)size);
set_capacity(rbd_dev->disk, size);
@@ -4193,7 +4183,7 @@ static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
__le64 features;
__le64 incompat;
} __attribute__ ((packed)) features_buf = { 0 };
- u64 incompat;
+ u64 unsup;
int ret;
ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
@@ -4206,9 +4196,12 @@ static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
if (ret < sizeof (features_buf))
return -ERANGE;
- incompat = le64_to_cpu(features_buf.incompat);
- if (incompat & ~RBD_FEATURES_SUPPORTED)
+ unsup = le64_to_cpu(features_buf.incompat) & ~RBD_FEATURES_SUPPORTED;
+ if (unsup) {
+ rbd_warn(rbd_dev, "image uses unsupported features: 0x%llx",
+ unsup);
return -ENXIO;
+ }
*snap_features = le64_to_cpu(features_buf.features);
@@ -5189,6 +5182,10 @@ out_err:
return ret;
}
+/*
+ * rbd_dev->header_rwsem must be locked for write and will be unlocked
+ * upon return.
+ */
static int rbd_dev_device_setup(struct rbd_device *rbd_dev)
{
int ret;
@@ -5197,7 +5194,7 @@ static int rbd_dev_device_setup(struct rbd_device *rbd_dev)
ret = rbd_dev_id_get(rbd_dev);
if (ret)
- return ret;
+ goto err_out_unlock;
BUILD_BUG_ON(DEV_NAME_LEN
< sizeof (RBD_DRV_NAME) + MAX_INT_FORMAT_WIDTH);
@@ -5238,8 +5235,9 @@ static int rbd_dev_device_setup(struct rbd_device *rbd_dev)
/* Everything's ready. Announce the disk to the world. */
set_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
- add_disk(rbd_dev->disk);
+ up_write(&rbd_dev->header_rwsem);
+ add_disk(rbd_dev->disk);
pr_info("%s: added with size 0x%llx\n", rbd_dev->disk->disk_name,
(unsigned long long) rbd_dev->mapping.size);
@@ -5254,6 +5252,8 @@ err_out_blkdev:
unregister_blkdev(rbd_dev->major, rbd_dev->name);
err_out_id:
rbd_dev_id_put(rbd_dev);
+err_out_unlock:
+ up_write(&rbd_dev->header_rwsem);
return ret;
}
@@ -5444,6 +5444,7 @@ static ssize_t do_rbd_add(struct bus_type *bus,
spec = NULL; /* rbd_dev now owns this */
rbd_opts = NULL; /* rbd_dev now owns this */
+ down_write(&rbd_dev->header_rwsem);
rc = rbd_dev_image_probe(rbd_dev, 0);
if (rc < 0)
goto err_out_rbd_dev;
@@ -5473,6 +5474,7 @@ out:
return rc;
err_out_rbd_dev:
+ up_write(&rbd_dev->header_rwsem);
rbd_dev_destroy(rbd_dev);
err_out_client:
rbd_put_client(rbdc);
@@ -5579,12 +5581,6 @@ static ssize_t do_rbd_remove(struct bus_type *bus,
return ret;
rbd_dev_header_unwatch_sync(rbd_dev);
- /*
- * flush remaining watch callbacks - these must be complete
- * before the osd_client is shutdown
- */
- dout("%s: flushing notifies", __func__);
- ceph_osdc_flush_notifies(&rbd_dev->rbd_client->client->osdc);
/*
* Don't free anything from rbd_dev->disk until after all
@@ -5643,18 +5639,12 @@ static void rbd_sysfs_cleanup(void)
static int rbd_slab_init(void)
{
rbd_assert(!rbd_img_request_cache);
- rbd_img_request_cache = kmem_cache_create("rbd_img_request",
- sizeof (struct rbd_img_request),
- __alignof__(struct rbd_img_request),
- 0, NULL);
+ rbd_img_request_cache = KMEM_CACHE(rbd_img_request, 0);
if (!rbd_img_request_cache)
return -ENOMEM;
rbd_assert(!rbd_obj_request_cache);
- rbd_obj_request_cache = kmem_cache_create("rbd_obj_request",
- sizeof (struct rbd_obj_request),
- __alignof__(struct rbd_obj_request),
- 0, NULL);
+ rbd_obj_request_cache = KMEM_CACHE(rbd_obj_request, 0);
if (!rbd_obj_request_cache)
goto out_err;
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 6ca35495a5be..28cff0d23d82 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -477,8 +477,13 @@ static int virtblk_get_cache_mode(struct virtio_device *vdev)
err = virtio_cread_feature(vdev, VIRTIO_BLK_F_CONFIG_WCE,
struct virtio_blk_config, wce,
&writeback);
+
+ /*
+ * If WCE is not configurable and flush is not available,
+ * assume no writeback cache is in use.
+ */
if (err)
- writeback = virtio_has_feature(vdev, VIRTIO_BLK_F_WCE);
+ writeback = virtio_has_feature(vdev, VIRTIO_BLK_F_FLUSH);
return writeback;
}
@@ -833,14 +838,14 @@ static const struct virtio_device_id id_table[] = {
static unsigned int features_legacy[] = {
VIRTIO_BLK_F_SEG_MAX, VIRTIO_BLK_F_SIZE_MAX, VIRTIO_BLK_F_GEOMETRY,
VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE, VIRTIO_BLK_F_SCSI,
- VIRTIO_BLK_F_WCE, VIRTIO_BLK_F_TOPOLOGY, VIRTIO_BLK_F_CONFIG_WCE,
+ VIRTIO_BLK_F_FLUSH, VIRTIO_BLK_F_TOPOLOGY, VIRTIO_BLK_F_CONFIG_WCE,
VIRTIO_BLK_F_MQ,
}
;
static unsigned int features[] = {
VIRTIO_BLK_F_SEG_MAX, VIRTIO_BLK_F_SIZE_MAX, VIRTIO_BLK_F_GEOMETRY,
VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE,
- VIRTIO_BLK_F_WCE, VIRTIO_BLK_F_TOPOLOGY, VIRTIO_BLK_F_CONFIG_WCE,
+ VIRTIO_BLK_F_FLUSH, VIRTIO_BLK_F_TOPOLOGY, VIRTIO_BLK_F_CONFIG_WCE,
VIRTIO_BLK_F_MQ,
};
diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
index 876763f7f13e..26aa080e243c 100644
--- a/drivers/block/xen-blkback/xenbus.c
+++ b/drivers/block/xen-blkback/xenbus.c
@@ -23,8 +23,7 @@
#include <xen/grant_table.h>
#include "common.h"
-/* Enlarge the array size in order to fully show blkback name. */
-#define BLKBACK_NAME_LEN (20)
+/* On the XenBus the max length of 'ring-ref%u'. */
#define RINGREF_NAME_LEN (20)
struct backend_info {
@@ -76,7 +75,7 @@ static int blkback_name(struct xen_blkif *blkif, char *buf)
else
devname = devpath;
- snprintf(buf, BLKBACK_NAME_LEN, "blkback.%d.%s", blkif->domid, devname);
+ snprintf(buf, TASK_COMM_LEN, "%d.%s", blkif->domid, devname);
kfree(devpath);
return 0;
@@ -85,7 +84,7 @@ static int blkback_name(struct xen_blkif *blkif, char *buf)
static void xen_update_blkif_status(struct xen_blkif *blkif)
{
int err;
- char name[BLKBACK_NAME_LEN];
+ char name[TASK_COMM_LEN];
struct xen_blkif_ring *ring;
int i;
@@ -618,6 +617,14 @@ static int xen_blkbk_probe(struct xenbus_device *dev,
goto fail;
}
+ err = xenbus_printf(XBT_NIL, dev->nodename,
+ "feature-max-indirect-segments", "%u",
+ MAX_INDIRECT_SEGMENTS);
+ if (err)
+ dev_warn(&dev->dev,
+ "writing %s/feature-max-indirect-segments (%d)",
+ dev->nodename, err);
+
/* Multi-queue: advertise how many queues are supported by us.*/
err = xenbus_printf(XBT_NIL, dev->nodename,
"multi-queue-max-queues", "%u", xenblk_max_queues);
@@ -849,11 +856,6 @@ again:
dev->nodename);
goto abort;
}
- err = xenbus_printf(xbt, dev->nodename, "feature-max-indirect-segments", "%u",
- MAX_INDIRECT_SEGMENTS);
- if (err)
- dev_warn(&dev->dev, "writing %s/feature-max-indirect-segments (%d)",
- dev->nodename, err);
err = xenbus_printf(xbt, dev->nodename, "sectors", "%llu",
(unsigned long long)vbd_sz(&be->blkif->vbd));
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 83eb9e6bf8b0..6405b6557792 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -125,8 +125,10 @@ static const struct block_device_operations xlvbd_block_fops;
*/
static unsigned int xen_blkif_max_segments = 32;
-module_param_named(max, xen_blkif_max_segments, int, S_IRUGO);
-MODULE_PARM_DESC(max, "Maximum amount of segments in indirect requests (default is 32)");
+module_param_named(max_indirect_segments, xen_blkif_max_segments, uint,
+ S_IRUGO);
+MODULE_PARM_DESC(max_indirect_segments,
+ "Maximum amount of segments in indirect requests (default is 32)");
static unsigned int xen_blkif_max_queues = 4;
module_param_named(max_queues, xen_blkif_max_queues, uint, S_IRUGO);
diff --git a/drivers/bluetooth/Kconfig b/drivers/bluetooth/Kconfig
index ec6af1595062..cf50fd2e96df 100644
--- a/drivers/bluetooth/Kconfig
+++ b/drivers/bluetooth/Kconfig
@@ -169,6 +169,17 @@ config BT_HCIUART_QCA
Say Y here to compile support for QCA protocol.
+config BT_HCIUART_AG6XX
+ bool "Intel AG6XX protocol support"
+ depends on BT_HCIUART
+ select BT_HCIUART_H4
+ select BT_INTEL
+ help
+ The Intel/AG6XX protocol support enables Bluetooth HCI over serial
+ port interface for Intel ibt 2.1 Bluetooth controllers.
+
+ Say Y here to compile support for Intel AG6XX protocol.
+
config BT_HCIBCM203X
tristate "HCI BCM203x USB driver"
depends on USB
diff --git a/drivers/bluetooth/Makefile b/drivers/bluetooth/Makefile
index 07c9cf381e5a..9c18939fc5c9 100644
--- a/drivers/bluetooth/Makefile
+++ b/drivers/bluetooth/Makefile
@@ -36,6 +36,7 @@ hci_uart-$(CONFIG_BT_HCIUART_3WIRE) += hci_h5.o
hci_uart-$(CONFIG_BT_HCIUART_INTEL) += hci_intel.o
hci_uart-$(CONFIG_BT_HCIUART_BCM) += hci_bcm.o
hci_uart-$(CONFIG_BT_HCIUART_QCA) += hci_qca.o
+hci_uart-$(CONFIG_BT_HCIUART_AG6XX) += hci_ag6xx.o
hci_uart-objs := $(hci_uart-y)
ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
index fa893c3ec408..47ca4b39d306 100644
--- a/drivers/bluetooth/ath3k.c
+++ b/drivers/bluetooth/ath3k.c
@@ -82,6 +82,7 @@ static const struct usb_device_id ath3k_table[] = {
{ USB_DEVICE(0x0489, 0xe05f) },
{ USB_DEVICE(0x0489, 0xe076) },
{ USB_DEVICE(0x0489, 0xe078) },
+ { USB_DEVICE(0x0489, 0xe095) },
{ USB_DEVICE(0x04c5, 0x1330) },
{ USB_DEVICE(0x04CA, 0x3004) },
{ USB_DEVICE(0x04CA, 0x3005) },
@@ -92,6 +93,7 @@ static const struct usb_device_id ath3k_table[] = {
{ USB_DEVICE(0x04CA, 0x300d) },
{ USB_DEVICE(0x04CA, 0x300f) },
{ USB_DEVICE(0x04CA, 0x3010) },
+ { USB_DEVICE(0x04CA, 0x3014) },
{ USB_DEVICE(0x0930, 0x0219) },
{ USB_DEVICE(0x0930, 0x021c) },
{ USB_DEVICE(0x0930, 0x0220) },
@@ -113,10 +115,12 @@ static const struct usb_device_id ath3k_table[] = {
{ USB_DEVICE(0x13d3, 0x3362) },
{ USB_DEVICE(0x13d3, 0x3375) },
{ USB_DEVICE(0x13d3, 0x3393) },
+ { USB_DEVICE(0x13d3, 0x3395) },
{ USB_DEVICE(0x13d3, 0x3402) },
{ USB_DEVICE(0x13d3, 0x3408) },
{ USB_DEVICE(0x13d3, 0x3423) },
{ USB_DEVICE(0x13d3, 0x3432) },
+ { USB_DEVICE(0x13d3, 0x3472) },
{ USB_DEVICE(0x13d3, 0x3474) },
/* Atheros AR5BBU12 with sflash firmware */
@@ -144,6 +148,7 @@ static const struct usb_device_id ath3k_blist_tbl[] = {
{ USB_DEVICE(0x0489, 0xe05f), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0489, 0xe076), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0489, 0xe078), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0489, 0xe095), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x04c5, 0x1330), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x04ca, 0x3004), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 },
@@ -154,6 +159,7 @@ static const struct usb_device_id ath3k_blist_tbl[] = {
{ USB_DEVICE(0x04ca, 0x300d), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x04ca, 0x300f), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x04ca, 0x3010), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x04ca, 0x3014), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0930, 0x021c), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0930, 0x0220), .driver_info = BTUSB_ATH3012 },
@@ -175,10 +181,12 @@ static const struct usb_device_id ath3k_blist_tbl[] = {
{ USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x13d3, 0x3395), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x13d3, 0x3402), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x13d3, 0x3408), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x13d3, 0x3423), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x13d3, 0x3432), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x13d3, 0x3472), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x13d3, 0x3474), .driver_info = BTUSB_ATH3012 },
/* Atheros AR5BBU22 with sflash firmware */
@@ -497,6 +505,7 @@ static int ath3k_probe(struct usb_interface *intf,
/* match device ID in ath3k blacklist table */
if (!id->driver_info) {
const struct usb_device_id *match;
+
match = usb_match_id(intf, ath3k_blist_tbl);
if (match)
id = match;
diff --git a/drivers/bluetooth/btbcm.c b/drivers/bluetooth/btbcm.c
index 0b697946e9bc..fdb44829ab6f 100644
--- a/drivers/bluetooth/btbcm.c
+++ b/drivers/bluetooth/btbcm.c
@@ -467,7 +467,7 @@ int btbcm_setup_patchram(struct hci_dev *hdev)
err = request_firmware(&fw, fw_name, &hdev->dev);
if (err < 0) {
BT_INFO("%s: BCM: Patch %s not found", hdev->name, fw_name);
- return 0;
+ goto done;
}
btbcm_patchram(hdev, fw);
@@ -501,6 +501,7 @@ int btbcm_setup_patchram(struct hci_dev *hdev)
BT_INFO("%s: %s", hdev->name, (char *)(skb->data + 1));
kfree_skb(skb);
+done:
btbcm_check_bdaddr(hdev);
set_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks);
diff --git a/drivers/bluetooth/btmrvl_sdio.c b/drivers/bluetooth/btmrvl_sdio.c
index 6ed8acfcfa9c..c6ef248de5e4 100644
--- a/drivers/bluetooth/btmrvl_sdio.c
+++ b/drivers/bluetooth/btmrvl_sdio.c
@@ -371,7 +371,7 @@ static int btmrvl_sdio_verify_fw_download(struct btmrvl_sdio_card *card,
if (firmwarestat == FIRMWARE_READY)
return 0;
- msleep(10);
+ msleep(100);
}
return -ETIMEDOUT;
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index a191e318fab8..0d4e372e426d 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -196,6 +196,7 @@ static const struct usb_device_id blacklist_table[] = {
{ USB_DEVICE(0x0489, 0xe05f), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0489, 0xe076), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0489, 0xe078), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0489, 0xe095), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x04c5, 0x1330), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x04ca, 0x3004), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 },
@@ -206,6 +207,7 @@ static const struct usb_device_id blacklist_table[] = {
{ USB_DEVICE(0x04ca, 0x300d), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x04ca, 0x300f), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x04ca, 0x3010), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x04ca, 0x3014), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0930, 0x021c), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0930, 0x0220), .driver_info = BTUSB_ATH3012 },
@@ -227,10 +229,12 @@ static const struct usb_device_id blacklist_table[] = {
{ USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x13d3, 0x3395), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x13d3, 0x3402), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x13d3, 0x3408), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x13d3, 0x3423), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x13d3, 0x3432), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x13d3, 0x3472), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x13d3, 0x3474), .driver_info = BTUSB_ATH3012 },
/* Atheros AR5BBU12 with sflash firmware */
diff --git a/drivers/bluetooth/hci_ag6xx.c b/drivers/bluetooth/hci_ag6xx.c
new file mode 100644
index 000000000000..6923d17a022f
--- /dev/null
+++ b/drivers/bluetooth/hci_ag6xx.c
@@ -0,0 +1,337 @@
+/*
+ *
+ * Bluetooth HCI UART driver for Intel/AG6xx devices
+ *
+ * Copyright (C) 2016 Intel Corporation
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/skbuff.h>
+#include <linux/firmware.h>
+#include <linux/module.h>
+#include <linux/tty.h>
+
+#include <net/bluetooth/bluetooth.h>
+#include <net/bluetooth/hci_core.h>
+
+#include "hci_uart.h"
+#include "btintel.h"
+
+struct ag6xx_data {
+ struct sk_buff *rx_skb;
+ struct sk_buff_head txq;
+};
+
+struct pbn_entry {
+ __le32 addr;
+ __le32 plen;
+ __u8 data[0];
+} __packed;
+
+static int ag6xx_open(struct hci_uart *hu)
+{
+ struct ag6xx_data *ag6xx;
+
+ BT_DBG("hu %p", hu);
+
+ ag6xx = kzalloc(sizeof(*ag6xx), GFP_KERNEL);
+ if (!ag6xx)
+ return -ENOMEM;
+
+ skb_queue_head_init(&ag6xx->txq);
+
+ hu->priv = ag6xx;
+ return 0;
+}
+
+static int ag6xx_close(struct hci_uart *hu)
+{
+ struct ag6xx_data *ag6xx = hu->priv;
+
+ BT_DBG("hu %p", hu);
+
+ skb_queue_purge(&ag6xx->txq);
+ kfree_skb(ag6xx->rx_skb);
+ kfree(ag6xx);
+
+ hu->priv = NULL;
+ return 0;
+}
+
+static int ag6xx_flush(struct hci_uart *hu)
+{
+ struct ag6xx_data *ag6xx = hu->priv;
+
+ BT_DBG("hu %p", hu);
+
+ skb_queue_purge(&ag6xx->txq);
+ return 0;
+}
+
+static struct sk_buff *ag6xx_dequeue(struct hci_uart *hu)
+{
+ struct ag6xx_data *ag6xx = hu->priv;
+ struct sk_buff *skb;
+
+ skb = skb_dequeue(&ag6xx->txq);
+ if (!skb)
+ return skb;
+
+ /* Prepend skb with frame type */
+ memcpy(skb_push(skb, 1), &bt_cb(skb)->pkt_type, 1);
+ return skb;
+}
+
+static int ag6xx_enqueue(struct hci_uart *hu, struct sk_buff *skb)
+{
+ struct ag6xx_data *ag6xx = hu->priv;
+
+ skb_queue_tail(&ag6xx->txq, skb);
+ return 0;
+}
+
+static const struct h4_recv_pkt ag6xx_recv_pkts[] = {
+ { H4_RECV_ACL, .recv = hci_recv_frame },
+ { H4_RECV_SCO, .recv = hci_recv_frame },
+ { H4_RECV_EVENT, .recv = hci_recv_frame },
+};
+
+static int ag6xx_recv(struct hci_uart *hu, const void *data, int count)
+{
+ struct ag6xx_data *ag6xx = hu->priv;
+
+ if (!test_bit(HCI_UART_REGISTERED, &hu->flags))
+ return -EUNATCH;
+
+ ag6xx->rx_skb = h4_recv_buf(hu->hdev, ag6xx->rx_skb, data, count,
+ ag6xx_recv_pkts,
+ ARRAY_SIZE(ag6xx_recv_pkts));
+ if (IS_ERR(ag6xx->rx_skb)) {
+ int err = PTR_ERR(ag6xx->rx_skb);
+ bt_dev_err(hu->hdev, "Frame reassembly failed (%d)", err);
+ ag6xx->rx_skb = NULL;
+ return err;
+ }
+
+ return count;
+}
+
+static int intel_mem_write(struct hci_dev *hdev, u32 addr, u32 plen,
+ const void *data)
+{
+ /* Can write a maximum of 247 bytes per HCI command.
+ * HCI cmd Header (3), Intel mem write header (6), data (247).
+ */
+ while (plen > 0) {
+ struct sk_buff *skb;
+ u8 cmd_param[253], fragment_len = (plen > 247) ? 247 : plen;
+ __le32 leaddr = cpu_to_le32(addr);
+
+ memcpy(cmd_param, &leaddr, 4);
+ cmd_param[4] = 0;
+ cmd_param[5] = fragment_len;
+ memcpy(cmd_param + 6, data, fragment_len);
+
+ skb = __hci_cmd_sync(hdev, 0xfc8e, fragment_len + 6, cmd_param,
+ HCI_INIT_TIMEOUT);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+ kfree_skb(skb);
+
+ plen -= fragment_len;
+ data += fragment_len;
+ addr += fragment_len;
+ }
+
+ return 0;
+}
+
+static int ag6xx_setup(struct hci_uart *hu)
+{
+ struct hci_dev *hdev = hu->hdev;
+ struct sk_buff *skb;
+ struct intel_version ver;
+ const struct firmware *fw;
+ const u8 *fw_ptr;
+ char fwname[64];
+ bool patched = false;
+ int err;
+
+ hu->hdev->set_diag = btintel_set_diag;
+ hu->hdev->set_bdaddr = btintel_set_bdaddr;
+
+ err = btintel_enter_mfg(hdev);
+ if (err)
+ return err;
+
+ err = btintel_read_version(hdev, &ver);
+ if (err)
+ return err;
+
+ btintel_version_info(hdev, &ver);
+
+ /* The hardware platform number has a fixed value of 0x37 and
+ * for now only accept this single value.
+ */
+ if (ver.hw_platform != 0x37) {
+ bt_dev_err(hdev, "Unsupported Intel hardware platform: 0x%X",
+ ver.hw_platform);
+ return -EINVAL;
+ }
+
+ /* Only the hardware variant iBT 2.1 (AG6XX) is supported by this
+ * firmware setup method.
+ */
+ if (ver.hw_variant != 0x0a) {
+ bt_dev_err(hdev, "Unsupported Intel hardware variant: 0x%x",
+ ver.hw_variant);
+ return -EINVAL;
+ }
+
+ snprintf(fwname, sizeof(fwname), "intel/ibt-hw-%x.%x.bddata",
+ ver.hw_platform, ver.hw_variant);
+
+ err = request_firmware(&fw, fwname, &hdev->dev);
+ if (err < 0) {
+ bt_dev_err(hdev, "Failed to open Intel bddata file: %s (%d)",
+ fwname, err);
+ goto patch;
+ }
+ fw_ptr = fw->data;
+
+ bt_dev_info(hdev, "Applying bddata (%s)", fwname);
+
+ skb = __hci_cmd_sync_ev(hdev, 0xfc2f, fw->size, fw->data,
+ HCI_EV_CMD_STATUS, HCI_CMD_TIMEOUT);
+ if (IS_ERR(skb)) {
+ bt_dev_err(hdev, "Applying bddata failed (%ld)", PTR_ERR(skb));
+ release_firmware(fw);
+ return PTR_ERR(skb);
+ }
+ kfree_skb(skb);
+
+ release_firmware(fw);
+
+patch:
+ /* If there is no applied patch, fw_patch_num is always 0x00. In other
+ * cases, current firmware is already patched. No need to patch it.
+ */
+ if (ver.fw_patch_num) {
+ bt_dev_info(hdev, "Device is already patched. patch num: %02x",
+ ver.fw_patch_num);
+ patched = true;
+ goto complete;
+ }
+
+ snprintf(fwname, sizeof(fwname),
+ "intel/ibt-hw-%x.%x.%x-fw-%x.%x.%x.%x.%x.pbn",
+ ver.hw_platform, ver.hw_variant, ver.hw_revision,
+ ver.fw_variant, ver.fw_revision, ver.fw_build_num,
+ ver.fw_build_ww, ver.fw_build_yy);
+
+ err = request_firmware(&fw, fwname, &hdev->dev);
+ if (err < 0) {
+ bt_dev_err(hdev, "Failed to open Intel patch file: %s(%d)",
+ fwname, err);
+ goto complete;
+ }
+ fw_ptr = fw->data;
+
+ bt_dev_info(hdev, "Patching firmware file (%s)", fwname);
+
+ /* PBN patch file contains a list of binary patches to be applied on top
+ * of the embedded firmware. Each patch entry header contains the target
+ * address and patch size.
+ *
+ * Patch entry:
+ * | addr(le) | patch_len(le) | patch_data |
+ * | 4 Bytes | 4 Bytes | n Bytes |
+ *
+ * PBN file is terminated by a patch entry whose address is 0xffffffff.
+ */
+ while (fw->size > fw_ptr - fw->data) {
+ struct pbn_entry *pbn = (void *)fw_ptr;
+ u32 addr, plen;
+
+ if (pbn->addr == 0xffffffff) {
+ bt_dev_info(hdev, "Patching complete");
+ patched = true;
+ break;
+ }
+
+ addr = le32_to_cpu(pbn->addr);
+ plen = le32_to_cpu(pbn->plen);
+
+ if (fw->data + fw->size <= pbn->data + plen) {
+ bt_dev_info(hdev, "Invalid patch len (%d)", plen);
+ break;
+ }
+
+ bt_dev_info(hdev, "Patching %td/%zu", (fw_ptr - fw->data),
+ fw->size);
+
+ err = intel_mem_write(hdev, addr, plen, pbn->data);
+ if (err) {
+ bt_dev_err(hdev, "Patching failed");
+ break;
+ }
+
+ fw_ptr = pbn->data + plen;
+ }
+
+ release_firmware(fw);
+
+complete:
+ /* Exit manufacturing mode and reset */
+ err = btintel_exit_mfg(hdev, true, patched);
+ if (err)
+ return err;
+
+ /* Set the event mask for Intel specific vendor events. This enables
+ * a few extra events that are useful during general operation.
+ */
+ btintel_set_event_mask_mfg(hdev, false);
+
+ btintel_check_bdaddr(hdev);
+ return 0;
+}
+
+static const struct hci_uart_proto ag6xx_proto = {
+ .id = HCI_UART_AG6XX,
+ .name = "AG6XX",
+ .manufacturer = 2,
+ .open = ag6xx_open,
+ .close = ag6xx_close,
+ .flush = ag6xx_flush,
+ .setup = ag6xx_setup,
+ .recv = ag6xx_recv,
+ .enqueue = ag6xx_enqueue,
+ .dequeue = ag6xx_dequeue,
+};
+
+int __init ag6xx_init(void)
+{
+ return hci_uart_register_proto(&ag6xx_proto);
+}
+
+int __exit ag6xx_deinit(void)
+{
+ return hci_uart_unregister_proto(&ag6xx_proto);
+}
diff --git a/drivers/bluetooth/hci_bcm.c b/drivers/bluetooth/hci_bcm.c
index 5f3de181e744..d8881dc0600c 100644
--- a/drivers/bluetooth/hci_bcm.c
+++ b/drivers/bluetooth/hci_bcm.c
@@ -820,10 +820,13 @@ static const struct acpi_device_id bcm_acpi_match[] = {
{ "BCM2E3D", 0 },
{ "BCM2E3F", 0 },
{ "BCM2E40", 0 },
+ { "BCM2E54", 0 },
+ { "BCM2E55", 0 },
{ "BCM2E64", 0 },
{ "BCM2E65", 0 },
{ "BCM2E67", 0 },
{ "BCM2E7B", 0 },
+ { "BCM2E7C", 0 },
{ },
};
MODULE_DEVICE_TABLE(acpi, bcm_acpi_match);
diff --git a/drivers/bluetooth/hci_intel.c b/drivers/bluetooth/hci_intel.c
index 3d63ea37bd4c..91d605147b10 100644
--- a/drivers/bluetooth/hci_intel.c
+++ b/drivers/bluetooth/hci_intel.c
@@ -488,7 +488,7 @@ static int intel_set_baudrate(struct hci_uart *hu, unsigned int speed)
clear_bit(STATE_BOOTING, &intel->flags);
/* In case of timeout, try to continue anyway */
- if (err && err != ETIMEDOUT)
+ if (err && err != -ETIMEDOUT)
return err;
bt_dev_info(hdev, "Change controller speed to %d", speed);
@@ -581,7 +581,7 @@ static int intel_setup(struct hci_uart *hu)
clear_bit(STATE_BOOTING, &intel->flags);
/* In case of timeout, try to continue anyway */
- if (err && err != ETIMEDOUT)
+ if (err && err != -ETIMEDOUT)
return err;
set_bit(STATE_BOOTLOADER, &intel->flags);
diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c
index 73202624133b..c00168a5bb80 100644
--- a/drivers/bluetooth/hci_ldisc.c
+++ b/drivers/bluetooth/hci_ldisc.c
@@ -804,6 +804,9 @@ static int __init hci_uart_init(void)
#ifdef CONFIG_BT_HCIUART_QCA
qca_init();
#endif
+#ifdef CONFIG_BT_HCIUART_AG6XX
+ ag6xx_init();
+#endif
return 0;
}
@@ -836,6 +839,9 @@ static void __exit hci_uart_exit(void)
#ifdef CONFIG_BT_HCIUART_QCA
qca_deinit();
#endif
+#ifdef CONFIG_BT_HCIUART_AG6XX
+ ag6xx_deinit();
+#endif
/* Release tty registration of line discipline */
err = tty_unregister_ldisc(N_HCI);
diff --git a/drivers/bluetooth/hci_uart.h b/drivers/bluetooth/hci_uart.h
index 82c92f1b65b4..4814ff08f427 100644
--- a/drivers/bluetooth/hci_uart.h
+++ b/drivers/bluetooth/hci_uart.h
@@ -35,7 +35,7 @@
#define HCIUARTGETFLAGS _IOR('U', 204, int)
/* UART protocols */
-#define HCI_UART_MAX_PROTO 9
+#define HCI_UART_MAX_PROTO 10
#define HCI_UART_H4 0
#define HCI_UART_BCSP 1
@@ -46,6 +46,7 @@
#define HCI_UART_INTEL 6
#define HCI_UART_BCM 7
#define HCI_UART_QCA 8
+#define HCI_UART_AG6XX 9
#define HCI_UART_RAW_DEVICE 0
#define HCI_UART_RESET_ON_INIT 1
@@ -182,3 +183,8 @@ int bcm_deinit(void);
int qca_init(void);
int qca_deinit(void);
#endif
+
+#ifdef CONFIG_BT_HCIUART_AG6XX
+int ag6xx_init(void);
+int ag6xx_deinit(void);
+#endif
diff --git a/drivers/bus/Kconfig b/drivers/bus/Kconfig
index 9a92c072a485..d4a3a3133da5 100644
--- a/drivers/bus/Kconfig
+++ b/drivers/bus/Kconfig
@@ -34,15 +34,15 @@ config ARM_CCI400_PORT_CTRL
Low level power management driver for CCI400 cache coherent
interconnect for ARM platforms.
-config ARM_CCI500_PMU
- bool "ARM CCI500 PMU support"
+config ARM_CCI5xx_PMU
+ bool "ARM CCI-500/CCI-550 PMU support"
depends on (ARM && CPU_V7) || ARM64
depends on PERF_EVENTS
select ARM_CCI_PMU
help
- Support for PMU events monitoring on the ARM CCI-500 cache coherent
- interconnect. CCI-500 provides 8 independent event counters, which
- can count events pertaining to the slave/master interfaces as well
+ Support for PMU events monitoring on the ARM CCI-500/CCI-550 cache
+ coherent interconnects. Both of them provide 8 independent event counters,
+ which can count events pertaining to the slave/master interfaces as well
as the internal events to the CCI.
If unsure, say Y
diff --git a/drivers/bus/arm-cci.c b/drivers/bus/arm-cci.c
index 577cc4bf6a9d..a49b28378d59 100644
--- a/drivers/bus/arm-cci.c
+++ b/drivers/bus/arm-cci.c
@@ -52,8 +52,9 @@ static const struct of_device_id arm_cci_matches[] = {
#ifdef CONFIG_ARM_CCI400_COMMON
{.compatible = "arm,cci-400", .data = CCI400_PORTS_DATA },
#endif
-#ifdef CONFIG_ARM_CCI500_PMU
+#ifdef CONFIG_ARM_CCI5xx_PMU
{ .compatible = "arm,cci-500", },
+ { .compatible = "arm,cci-550", },
#endif
{},
};
@@ -92,7 +93,7 @@ static const struct of_device_id arm_cci_matches[] = {
enum {
CCI_IF_SLAVE,
CCI_IF_MASTER,
-#ifdef CONFIG_ARM_CCI500_PMU
+#ifdef CONFIG_ARM_CCI5xx_PMU
CCI_IF_GLOBAL,
#endif
CCI_IF_MAX,
@@ -121,13 +122,12 @@ struct cci_pmu_model {
u32 fixed_hw_cntrs;
u32 num_hw_cntrs;
u32 cntr_size;
- u64 nformat_attrs;
- u64 nevent_attrs;
- struct dev_ext_attribute *format_attrs;
- struct dev_ext_attribute *event_attrs;
+ struct attribute **format_attrs;
+ struct attribute **event_attrs;
struct event_range event_ranges[CCI_IF_MAX];
int (*validate_hw_event)(struct cci_pmu *, unsigned long);
int (*get_event_idx)(struct cci_pmu *, struct cci_pmu_hw_events *, unsigned long);
+ void (*write_counters)(struct cci_pmu *, unsigned long *);
};
static struct cci_pmu_model cci_pmu_models[];
@@ -155,19 +155,24 @@ enum cci_models {
CCI400_R0,
CCI400_R1,
#endif
-#ifdef CONFIG_ARM_CCI500_PMU
+#ifdef CONFIG_ARM_CCI5xx_PMU
CCI500_R0,
+ CCI550_R0,
#endif
CCI_MODEL_MAX
};
+static void pmu_write_counters(struct cci_pmu *cci_pmu,
+ unsigned long *mask);
static ssize_t cci_pmu_format_show(struct device *dev,
struct device_attribute *attr, char *buf);
static ssize_t cci_pmu_event_show(struct device *dev,
struct device_attribute *attr, char *buf);
-#define CCI_EXT_ATTR_ENTRY(_name, _func, _config) \
- { __ATTR(_name, S_IRUGO, _func, NULL), (void *)_config }
+#define CCI_EXT_ATTR_ENTRY(_name, _func, _config) \
+ &((struct dev_ext_attribute[]) { \
+ { __ATTR(_name, S_IRUGO, _func, NULL), (void *)_config } \
+ })[0].attr.attr
#define CCI_FORMAT_EXT_ATTR_ENTRY(_name, _config) \
CCI_EXT_ATTR_ENTRY(_name, cci_pmu_format_show, (char *)_config)
@@ -242,12 +247,13 @@ enum cci400_perf_events {
static ssize_t cci400_pmu_cycle_event_show(struct device *dev,
struct device_attribute *attr, char *buf);
-static struct dev_ext_attribute cci400_pmu_format_attrs[] = {
+static struct attribute *cci400_pmu_format_attrs[] = {
CCI_FORMAT_EXT_ATTR_ENTRY(event, "config:0-4"),
CCI_FORMAT_EXT_ATTR_ENTRY(source, "config:5-7"),
+ NULL
};
-static struct dev_ext_attribute cci400_r0_pmu_event_attrs[] = {
+static struct attribute *cci400_r0_pmu_event_attrs[] = {
/* Slave events */
CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_any, 0x0),
CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_device, 0x01),
@@ -279,9 +285,10 @@ static struct dev_ext_attribute cci400_r0_pmu_event_attrs[] = {
CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_stall_tt_full, 0x1A),
/* Special event for cycles counter */
CCI400_CYCLE_EVENT_EXT_ATTR_ENTRY(cycles, 0xff),
+ NULL
};
-static struct dev_ext_attribute cci400_r1_pmu_event_attrs[] = {
+static struct attribute *cci400_r1_pmu_event_attrs[] = {
/* Slave events */
CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_any, 0x0),
CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_device, 0x01),
@@ -325,6 +332,7 @@ static struct dev_ext_attribute cci400_r1_pmu_event_attrs[] = {
CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_unique_or_line_unique_addr_hazard, 0x11),
/* Special event for cycles counter */
CCI400_CYCLE_EVENT_EXT_ATTR_ENTRY(cycles, 0xff),
+ NULL
};
static ssize_t cci400_pmu_cycle_event_show(struct device *dev,
@@ -420,72 +428,68 @@ static inline struct cci_pmu_model *probe_cci_model(struct platform_device *pdev
}
#endif /* CONFIG_ARM_CCI400_PMU */
-#ifdef CONFIG_ARM_CCI500_PMU
+#ifdef CONFIG_ARM_CCI5xx_PMU
/*
- * CCI500 provides 8 independent event counters that can count
- * any of the events available.
- *
- * CCI500 PMU event id is an 9-bit value made of two parts.
+ * CCI5xx PMU event id is an 9-bit value made of two parts.
* bits [8:5] - Source for the event
- * 0x0-0x6 - Slave interfaces
- * 0x8-0xD - Master interfaces
- * 0xf - Global Events
- * 0x7,0xe - Reserved
- *
* bits [4:0] - Event code (specific to type of interface)
+ *
+ *
*/
/* Port ids */
-#define CCI500_PORT_S0 0x0
-#define CCI500_PORT_S1 0x1
-#define CCI500_PORT_S2 0x2
-#define CCI500_PORT_S3 0x3
-#define CCI500_PORT_S4 0x4
-#define CCI500_PORT_S5 0x5
-#define CCI500_PORT_S6 0x6
-
-#define CCI500_PORT_M0 0x8
-#define CCI500_PORT_M1 0x9
-#define CCI500_PORT_M2 0xa
-#define CCI500_PORT_M3 0xb
-#define CCI500_PORT_M4 0xc
-#define CCI500_PORT_M5 0xd
-
-#define CCI500_PORT_GLOBAL 0xf
-
-#define CCI500_PMU_EVENT_MASK 0x1ffUL
-#define CCI500_PMU_EVENT_SOURCE_SHIFT 0x5
-#define CCI500_PMU_EVENT_SOURCE_MASK 0xf
-#define CCI500_PMU_EVENT_CODE_SHIFT 0x0
-#define CCI500_PMU_EVENT_CODE_MASK 0x1f
-
-#define CCI500_PMU_EVENT_SOURCE(event) \
- ((event >> CCI500_PMU_EVENT_SOURCE_SHIFT) & CCI500_PMU_EVENT_SOURCE_MASK)
-#define CCI500_PMU_EVENT_CODE(event) \
- ((event >> CCI500_PMU_EVENT_CODE_SHIFT) & CCI500_PMU_EVENT_CODE_MASK)
-
-#define CCI500_SLAVE_PORT_MIN_EV 0x00
-#define CCI500_SLAVE_PORT_MAX_EV 0x1f
-#define CCI500_MASTER_PORT_MIN_EV 0x00
-#define CCI500_MASTER_PORT_MAX_EV 0x06
-#define CCI500_GLOBAL_PORT_MIN_EV 0x00
-#define CCI500_GLOBAL_PORT_MAX_EV 0x0f
-
-
-#define CCI500_GLOBAL_EVENT_EXT_ATTR_ENTRY(_name, _config) \
- CCI_EXT_ATTR_ENTRY(_name, cci500_pmu_global_event_show, \
+#define CCI5xx_PORT_S0 0x0
+#define CCI5xx_PORT_S1 0x1
+#define CCI5xx_PORT_S2 0x2
+#define CCI5xx_PORT_S3 0x3
+#define CCI5xx_PORT_S4 0x4
+#define CCI5xx_PORT_S5 0x5
+#define CCI5xx_PORT_S6 0x6
+
+#define CCI5xx_PORT_M0 0x8
+#define CCI5xx_PORT_M1 0x9
+#define CCI5xx_PORT_M2 0xa
+#define CCI5xx_PORT_M3 0xb
+#define CCI5xx_PORT_M4 0xc
+#define CCI5xx_PORT_M5 0xd
+#define CCI5xx_PORT_M6 0xe
+
+#define CCI5xx_PORT_GLOBAL 0xf
+
+#define CCI5xx_PMU_EVENT_MASK 0x1ffUL
+#define CCI5xx_PMU_EVENT_SOURCE_SHIFT 0x5
+#define CCI5xx_PMU_EVENT_SOURCE_MASK 0xf
+#define CCI5xx_PMU_EVENT_CODE_SHIFT 0x0
+#define CCI5xx_PMU_EVENT_CODE_MASK 0x1f
+
+#define CCI5xx_PMU_EVENT_SOURCE(event) \
+ ((event >> CCI5xx_PMU_EVENT_SOURCE_SHIFT) & CCI5xx_PMU_EVENT_SOURCE_MASK)
+#define CCI5xx_PMU_EVENT_CODE(event) \
+ ((event >> CCI5xx_PMU_EVENT_CODE_SHIFT) & CCI5xx_PMU_EVENT_CODE_MASK)
+
+#define CCI5xx_SLAVE_PORT_MIN_EV 0x00
+#define CCI5xx_SLAVE_PORT_MAX_EV 0x1f
+#define CCI5xx_MASTER_PORT_MIN_EV 0x00
+#define CCI5xx_MASTER_PORT_MAX_EV 0x06
+#define CCI5xx_GLOBAL_PORT_MIN_EV 0x00
+#define CCI5xx_GLOBAL_PORT_MAX_EV 0x0f
+
+
+#define CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(_name, _config) \
+ CCI_EXT_ATTR_ENTRY(_name, cci5xx_pmu_global_event_show, \
(unsigned long) _config)
-static ssize_t cci500_pmu_global_event_show(struct device *dev,
+static ssize_t cci5xx_pmu_global_event_show(struct device *dev,
struct device_attribute *attr, char *buf);
-static struct dev_ext_attribute cci500_pmu_format_attrs[] = {
+static struct attribute *cci5xx_pmu_format_attrs[] = {
CCI_FORMAT_EXT_ATTR_ENTRY(event, "config:0-4"),
CCI_FORMAT_EXT_ATTR_ENTRY(source, "config:5-8"),
+ NULL,
};
-static struct dev_ext_attribute cci500_pmu_event_attrs[] = {
+static struct attribute *cci5xx_pmu_event_attrs[] = {
/* Slave events */
CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_arvalid, 0x0),
CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_dev, 0x1),
@@ -530,63 +534,73 @@ static struct dev_ext_attribute cci500_pmu_event_attrs[] = {
CCI_EVENT_EXT_ATTR_ENTRY(mi_w_resp_stall, 0x6),
/* Global events */
- CCI500_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_access_filter_bank_0_1, 0x0),
- CCI500_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_access_filter_bank_2_3, 0x1),
- CCI500_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_access_filter_bank_4_5, 0x2),
- CCI500_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_access_filter_bank_6_7, 0x3),
- CCI500_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_access_miss_filter_bank_0_1, 0x4),
- CCI500_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_access_miss_filter_bank_2_3, 0x5),
- CCI500_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_access_miss_filter_bank_4_5, 0x6),
- CCI500_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_access_miss_filter_bank_6_7, 0x7),
- CCI500_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_back_invalidation, 0x8),
- CCI500_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_stall_alloc_busy, 0x9),
- CCI500_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_stall_tt_full, 0xA),
- CCI500_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_wrq, 0xB),
- CCI500_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_cd_hs, 0xC),
- CCI500_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_rq_stall_addr_hazard, 0xD),
- CCI500_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snopp_rq_stall_tt_full, 0xE),
- CCI500_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_rq_tzmp1_prot, 0xF),
+ CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_access_filter_bank_0_1, 0x0),
+ CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_access_filter_bank_2_3, 0x1),
+ CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_access_filter_bank_4_5, 0x2),
+ CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_access_filter_bank_6_7, 0x3),
+ CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_access_miss_filter_bank_0_1, 0x4),
+ CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_access_miss_filter_bank_2_3, 0x5),
+ CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_access_miss_filter_bank_4_5, 0x6),
+ CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_access_miss_filter_bank_6_7, 0x7),
+ CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_back_invalidation, 0x8),
+ CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_stall_alloc_busy, 0x9),
+ CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_stall_tt_full, 0xA),
+ CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_wrq, 0xB),
+ CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_cd_hs, 0xC),
+ CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_rq_stall_addr_hazard, 0xD),
+ CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snopp_rq_stall_tt_full, 0xE),
+ CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_rq_tzmp1_prot, 0xF),
+ NULL
};
-static ssize_t cci500_pmu_global_event_show(struct device *dev,
+static ssize_t cci5xx_pmu_global_event_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct dev_ext_attribute *eattr = container_of(attr,
struct dev_ext_attribute, attr);
/* Global events have single fixed source code */
return snprintf(buf, PAGE_SIZE, "event=0x%lx,source=0x%x\n",
- (unsigned long)eattr->var, CCI500_PORT_GLOBAL);
+ (unsigned long)eattr->var, CCI5xx_PORT_GLOBAL);
}
+/*
+ * CCI500 provides 8 independent event counters that can count
+ * any of the events available.
+ * CCI500 PMU event source ids
+ * 0x0-0x6 - Slave interfaces
+ * 0x8-0xD - Master interfaces
+ * 0xf - Global Events
+ * 0x7,0xe - Reserved
+ */
static int cci500_validate_hw_event(struct cci_pmu *cci_pmu,
unsigned long hw_event)
{
- u32 ev_source = CCI500_PMU_EVENT_SOURCE(hw_event);
- u32 ev_code = CCI500_PMU_EVENT_CODE(hw_event);
+ u32 ev_source = CCI5xx_PMU_EVENT_SOURCE(hw_event);
+ u32 ev_code = CCI5xx_PMU_EVENT_CODE(hw_event);
int if_type;
- if (hw_event & ~CCI500_PMU_EVENT_MASK)
+ if (hw_event & ~CCI5xx_PMU_EVENT_MASK)
return -ENOENT;
switch (ev_source) {
- case CCI500_PORT_S0:
- case CCI500_PORT_S1:
- case CCI500_PORT_S2:
- case CCI500_PORT_S3:
- case CCI500_PORT_S4:
- case CCI500_PORT_S5:
- case CCI500_PORT_S6:
+ case CCI5xx_PORT_S0:
+ case CCI5xx_PORT_S1:
+ case CCI5xx_PORT_S2:
+ case CCI5xx_PORT_S3:
+ case CCI5xx_PORT_S4:
+ case CCI5xx_PORT_S5:
+ case CCI5xx_PORT_S6:
if_type = CCI_IF_SLAVE;
break;
- case CCI500_PORT_M0:
- case CCI500_PORT_M1:
- case CCI500_PORT_M2:
- case CCI500_PORT_M3:
- case CCI500_PORT_M4:
- case CCI500_PORT_M5:
+ case CCI5xx_PORT_M0:
+ case CCI5xx_PORT_M1:
+ case CCI5xx_PORT_M2:
+ case CCI5xx_PORT_M3:
+ case CCI5xx_PORT_M4:
+ case CCI5xx_PORT_M5:
if_type = CCI_IF_MASTER;
break;
- case CCI500_PORT_GLOBAL:
+ case CCI5xx_PORT_GLOBAL:
if_type = CCI_IF_GLOBAL;
break;
default:
@@ -599,7 +613,118 @@ static int cci500_validate_hw_event(struct cci_pmu *cci_pmu,
return -ENOENT;
}
-#endif /* CONFIG_ARM_CCI500_PMU */
+
+/*
+ * CCI550 provides 8 independent event counters that can count
+ * any of the events available.
+ * CCI550 PMU event source ids
+ * 0x0-0x6 - Slave interfaces
+ * 0x8-0xe - Master interfaces
+ * 0xf - Global Events
+ * 0x7 - Reserved
+ */
+static int cci550_validate_hw_event(struct cci_pmu *cci_pmu,
+ unsigned long hw_event)
+{
+ u32 ev_source = CCI5xx_PMU_EVENT_SOURCE(hw_event);
+ u32 ev_code = CCI5xx_PMU_EVENT_CODE(hw_event);
+ int if_type;
+
+ if (hw_event & ~CCI5xx_PMU_EVENT_MASK)
+ return -ENOENT;
+
+ switch (ev_source) {
+ case CCI5xx_PORT_S0:
+ case CCI5xx_PORT_S1:
+ case CCI5xx_PORT_S2:
+ case CCI5xx_PORT_S3:
+ case CCI5xx_PORT_S4:
+ case CCI5xx_PORT_S5:
+ case CCI5xx_PORT_S6:
+ if_type = CCI_IF_SLAVE;
+ break;
+ case CCI5xx_PORT_M0:
+ case CCI5xx_PORT_M1:
+ case CCI5xx_PORT_M2:
+ case CCI5xx_PORT_M3:
+ case CCI5xx_PORT_M4:
+ case CCI5xx_PORT_M5:
+ case CCI5xx_PORT_M6:
+ if_type = CCI_IF_MASTER;
+ break;
+ case CCI5xx_PORT_GLOBAL:
+ if_type = CCI_IF_GLOBAL;
+ break;
+ default:
+ return -ENOENT;
+ }
+
+ if (ev_code >= cci_pmu->model->event_ranges[if_type].min &&
+ ev_code <= cci_pmu->model->event_ranges[if_type].max)
+ return hw_event;
+
+ return -ENOENT;
+}
+
+#endif /* CONFIG_ARM_CCI5xx_PMU */
+
+/*
+ * Program the CCI PMU counters which have PERF_HES_ARCH set
+ * with the event period and mark them ready before we enable
+ * PMU.
+ */
+static void cci_pmu_sync_counters(struct cci_pmu *cci_pmu)
+{
+ int i;
+ struct cci_pmu_hw_events *cci_hw = &cci_pmu->hw_events;
+
+ DECLARE_BITMAP(mask, cci_pmu->num_cntrs);
+
+ bitmap_zero(mask, cci_pmu->num_cntrs);
+ for_each_set_bit(i, cci_pmu->hw_events.used_mask, cci_pmu->num_cntrs) {
+ struct perf_event *event = cci_hw->events[i];
+
+ if (WARN_ON(!event))
+ continue;
+
+ /* Leave the events which are not counting */
+ if (event->hw.state & PERF_HES_STOPPED)
+ continue;
+ if (event->hw.state & PERF_HES_ARCH) {
+ set_bit(i, mask);
+ event->hw.state &= ~PERF_HES_ARCH;
+ }
+ }
+
+ pmu_write_counters(cci_pmu, mask);
+}
+
+/* Should be called with cci_pmu->hw_events->pmu_lock held */
+static void __cci_pmu_enable_nosync(struct cci_pmu *cci_pmu)
+{
+ u32 val;
+
+ /* Enable all the PMU counters. */
+ val = readl_relaxed(cci_ctrl_base + CCI_PMCR) | CCI_PMCR_CEN;
+ writel(val, cci_ctrl_base + CCI_PMCR);
+}
+
+/* Should be called with cci_pmu->hw_events->pmu_lock held */
+static void __cci_pmu_enable_sync(struct cci_pmu *cci_pmu)
+{
+ cci_pmu_sync_counters(cci_pmu);
+ __cci_pmu_enable_nosync(cci_pmu);
+}
+
+/* Should be called with cci_pmu->hw_events->pmu_lock held */
+static void __cci_pmu_disable(void)
+{
+ u32 val;
+
+ /* Disable all the PMU counters. */
+ val = readl_relaxed(cci_ctrl_base + CCI_PMCR) & ~CCI_PMCR_CEN;
+ writel(val, cci_ctrl_base + CCI_PMCR);
+}
static ssize_t cci_pmu_format_show(struct device *dev,
struct device_attribute *attr, char *buf)
@@ -633,8 +758,8 @@ static u32 pmu_read_register(struct cci_pmu *cci_pmu, int idx, unsigned int offs
static void pmu_write_register(struct cci_pmu *cci_pmu, u32 value,
int idx, unsigned int offset)
{
- return writel_relaxed(value, cci_pmu->base +
- CCI_PMU_CNTR_BASE(cci_pmu->model, idx) + offset);
+ writel_relaxed(value, cci_pmu->base +
+ CCI_PMU_CNTR_BASE(cci_pmu->model, idx) + offset);
}
static void pmu_disable_counter(struct cci_pmu *cci_pmu, int idx)
@@ -647,12 +772,56 @@ static void pmu_enable_counter(struct cci_pmu *cci_pmu, int idx)
pmu_write_register(cci_pmu, 1, idx, CCI_PMU_CNTR_CTRL);
}
+static bool __maybe_unused
+pmu_counter_is_enabled(struct cci_pmu *cci_pmu, int idx)
+{
+ return (pmu_read_register(cci_pmu, idx, CCI_PMU_CNTR_CTRL) & 0x1) != 0;
+}
+
static void pmu_set_event(struct cci_pmu *cci_pmu, int idx, unsigned long event)
{
pmu_write_register(cci_pmu, event, idx, CCI_PMU_EVT_SEL);
}
/*
+ * For all counters on the CCI-PMU, disable any 'enabled' counters,
+ * saving the changed counters in the mask, so that we can restore
+ * it later using pmu_restore_counters. The mask is private to the
+ * caller. We cannot rely on the used_mask maintained by the CCI_PMU
+ * as it only tells us if the counter is assigned to perf_event or not.
+ * The state of the perf_event cannot be locked by the PMU layer, hence
+ * we check the individual counter status (which can be locked by
+ * cci_pm->hw_events->pmu_lock).
+ *
+ * @mask should be initialised to empty by the caller.
+ */
+static void __maybe_unused
+pmu_save_counters(struct cci_pmu *cci_pmu, unsigned long *mask)
+{
+ int i;
+
+ for (i = 0; i < cci_pmu->num_cntrs; i++) {
+ if (pmu_counter_is_enabled(cci_pmu, i)) {
+ set_bit(i, mask);
+ pmu_disable_counter(cci_pmu, i);
+ }
+ }
+}
+
+/*
+ * Restore the status of the counters. Reversal of the pmu_save_counters().
+ * For each counter set in the mask, enable the counter back.
+ */
+static void __maybe_unused
+pmu_restore_counters(struct cci_pmu *cci_pmu, unsigned long *mask)
+{
+ int i;
+
+ for_each_set_bit(i, mask, cci_pmu->num_cntrs)
+ pmu_enable_counter(cci_pmu, i);
+}
+
+/*
* Returns the number of programmable counters actually implemented
* by the cci
*/
@@ -754,18 +923,98 @@ static u32 pmu_read_counter(struct perf_event *event)
return value;
}
-static void pmu_write_counter(struct perf_event *event, u32 value)
+static void pmu_write_counter(struct cci_pmu *cci_pmu, u32 value, int idx)
{
- struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu);
- struct hw_perf_event *hw_counter = &event->hw;
- int idx = hw_counter->idx;
+ pmu_write_register(cci_pmu, value, idx, CCI_PMU_CNTR);
+}
- if (unlikely(!pmu_is_valid_counter(cci_pmu, idx)))
- dev_err(&cci_pmu->plat_device->dev, "Invalid CCI PMU counter %d\n", idx);
+static void __pmu_write_counters(struct cci_pmu *cci_pmu, unsigned long *mask)
+{
+ int i;
+ struct cci_pmu_hw_events *cci_hw = &cci_pmu->hw_events;
+
+ for_each_set_bit(i, mask, cci_pmu->num_cntrs) {
+ struct perf_event *event = cci_hw->events[i];
+
+ if (WARN_ON(!event))
+ continue;
+ pmu_write_counter(cci_pmu, local64_read(&event->hw.prev_count), i);
+ }
+}
+
+static void pmu_write_counters(struct cci_pmu *cci_pmu, unsigned long *mask)
+{
+ if (cci_pmu->model->write_counters)
+ cci_pmu->model->write_counters(cci_pmu, mask);
else
- pmu_write_register(cci_pmu, value, idx, CCI_PMU_CNTR);
+ __pmu_write_counters(cci_pmu, mask);
}
+#ifdef CONFIG_ARM_CCI5xx_PMU
+
+/*
+ * CCI-500/CCI-550 has advanced power saving policies, which could gate the
+ * clocks to the PMU counters, which makes the writes to them ineffective.
+ * The only way to write to those counters is when the global counters
+ * are enabled and the particular counter is enabled.
+ *
+ * So we do the following :
+ *
+ * 1) Disable all the PMU counters, saving their current state
+ * 2) Enable the global PMU profiling, now that all counters are
+ * disabled.
+ *
+ * For each counter to be programmed, repeat steps 3-7:
+ *
+ * 3) Write an invalid event code to the event control register for the
+ counter, so that the counters are not modified.
+ * 4) Enable the counter control for the counter.
+ * 5) Set the counter value
+ * 6) Disable the counter
+ * 7) Restore the event in the target counter
+ *
+ * 8) Disable the global PMU.
+ * 9) Restore the status of the rest of the counters.
+ *
+ * We choose an event which for CCI-5xx is guaranteed not to count.
+ * We use the highest possible event code (0x1f) for the master interface 0.
+ */
+#define CCI5xx_INVALID_EVENT ((CCI5xx_PORT_M0 << CCI5xx_PMU_EVENT_SOURCE_SHIFT) | \
+ (CCI5xx_PMU_EVENT_CODE_MASK << CCI5xx_PMU_EVENT_CODE_SHIFT))
+static void cci5xx_pmu_write_counters(struct cci_pmu *cci_pmu, unsigned long *mask)
+{
+ int i;
+ DECLARE_BITMAP(saved_mask, cci_pmu->num_cntrs);
+
+ bitmap_zero(saved_mask, cci_pmu->num_cntrs);
+ pmu_save_counters(cci_pmu, saved_mask);
+
+ /*
+ * Now that all the counters are disabled, we can safely turn the PMU on,
+ * without syncing the status of the counters
+ */
+ __cci_pmu_enable_nosync(cci_pmu);
+
+ for_each_set_bit(i, mask, cci_pmu->num_cntrs) {
+ struct perf_event *event = cci_pmu->hw_events.events[i];
+
+ if (WARN_ON(!event))
+ continue;
+
+ pmu_set_event(cci_pmu, i, CCI5xx_INVALID_EVENT);
+ pmu_enable_counter(cci_pmu, i);
+ pmu_write_counter(cci_pmu, local64_read(&event->hw.prev_count), i);
+ pmu_disable_counter(cci_pmu, i);
+ pmu_set_event(cci_pmu, i, event->hw.config_base);
+ }
+
+ __cci_pmu_disable();
+
+ pmu_restore_counters(cci_pmu, saved_mask);
+}
+
+#endif /* CONFIG_ARM_CCI5xx_PMU */
+
static u64 pmu_event_update(struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
@@ -789,7 +1038,7 @@ static void pmu_read(struct perf_event *event)
pmu_event_update(event);
}
-void pmu_event_set_period(struct perf_event *event)
+static void pmu_event_set_period(struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
/*
@@ -800,7 +1049,14 @@ void pmu_event_set_period(struct perf_event *event)
*/
u64 val = 1ULL << 31;
local64_set(&hwc->prev_count, val);
- pmu_write_counter(event, val);
+
+ /*
+ * CCI PMU uses PERF_HES_ARCH to keep track of the counters, whose
+ * values needs to be sync-ed with the s/w state before the PMU is
+ * enabled.
+ * Mark this counter for sync.
+ */
+ hwc->state |= PERF_HES_ARCH;
}
static irqreturn_t pmu_handle_irq(int irq_num, void *dev)
@@ -811,6 +1067,9 @@ static irqreturn_t pmu_handle_irq(int irq_num, void *dev)
int idx, handled = IRQ_NONE;
raw_spin_lock_irqsave(&events->pmu_lock, flags);
+
+ /* Disable the PMU while we walk through the counters */
+ __cci_pmu_disable();
/*
* Iterate over counters and update the corresponding perf events.
* This should work regardless of whether we have per-counter overflow
@@ -818,13 +1077,10 @@ static irqreturn_t pmu_handle_irq(int irq_num, void *dev)
*/
for (idx = 0; idx <= CCI_PMU_CNTR_LAST(cci_pmu); idx++) {
struct perf_event *event = events->events[idx];
- struct hw_perf_event *hw_counter;
if (!event)
continue;
- hw_counter = &event->hw;
-
/* Did this counter overflow? */
if (!(pmu_read_register(cci_pmu, idx, CCI_PMU_OVRFLW) &
CCI_PMU_OVRFLW_FLAG))
@@ -837,6 +1093,9 @@ static irqreturn_t pmu_handle_irq(int irq_num, void *dev)
pmu_event_set_period(event);
handled = IRQ_HANDLED;
}
+
+ /* Enable the PMU and sync possibly overflowed counters */
+ __cci_pmu_enable_sync(cci_pmu);
raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
return IRQ_RETVAL(handled);
@@ -875,16 +1134,12 @@ static void cci_pmu_enable(struct pmu *pmu)
struct cci_pmu_hw_events *hw_events = &cci_pmu->hw_events;
int enabled = bitmap_weight(hw_events->used_mask, cci_pmu->num_cntrs);
unsigned long flags;
- u32 val;
if (!enabled)
return;
raw_spin_lock_irqsave(&hw_events->pmu_lock, flags);
-
- /* Enable all the PMU counters. */
- val = readl_relaxed(cci_ctrl_base + CCI_PMCR) | CCI_PMCR_CEN;
- writel(val, cci_ctrl_base + CCI_PMCR);
+ __cci_pmu_enable_sync(cci_pmu);
raw_spin_unlock_irqrestore(&hw_events->pmu_lock, flags);
}
@@ -894,13 +1149,9 @@ static void cci_pmu_disable(struct pmu *pmu)
struct cci_pmu *cci_pmu = to_cci_pmu(pmu);
struct cci_pmu_hw_events *hw_events = &cci_pmu->hw_events;
unsigned long flags;
- u32 val;
raw_spin_lock_irqsave(&hw_events->pmu_lock, flags);
-
- /* Disable all the PMU counters. */
- val = readl_relaxed(cci_ctrl_base + CCI_PMCR) & ~CCI_PMCR_CEN;
- writel(val, cci_ctrl_base + CCI_PMCR);
+ __cci_pmu_disable();
raw_spin_unlock_irqrestore(&hw_events->pmu_lock, flags);
}
@@ -1176,9 +1427,8 @@ static int cci_pmu_event_init(struct perf_event *event)
static ssize_t pmu_cpumask_attr_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct dev_ext_attribute *eattr = container_of(attr,
- struct dev_ext_attribute, attr);
- struct cci_pmu *cci_pmu = eattr->var;
+ struct pmu *pmu = dev_get_drvdata(dev);
+ struct cci_pmu *cci_pmu = to_cci_pmu(pmu);
int n = scnprintf(buf, PAGE_SIZE - 1, "%*pbl",
cpumask_pr_args(&cci_pmu->cpus));
@@ -1187,13 +1437,11 @@ static ssize_t pmu_cpumask_attr_show(struct device *dev,
return n;
}
-static struct dev_ext_attribute pmu_cpumask_attr = {
- __ATTR(cpumask, S_IRUGO, pmu_cpumask_attr_show, NULL),
- NULL, /* Populated in cci_pmu_init */
-};
+static struct device_attribute pmu_cpumask_attr =
+ __ATTR(cpumask, S_IRUGO, pmu_cpumask_attr_show, NULL);
static struct attribute *pmu_attrs[] = {
- &pmu_cpumask_attr.attr.attr,
+ &pmu_cpumask_attr.attr,
NULL,
};
@@ -1218,60 +1466,14 @@ static const struct attribute_group *pmu_attr_groups[] = {
NULL
};
-static struct attribute **alloc_attrs(struct platform_device *pdev,
- int n, struct dev_ext_attribute *source)
-{
- int i;
- struct attribute **attrs;
-
- /* Alloc n + 1 (for terminating NULL) */
- attrs = devm_kcalloc(&pdev->dev, n + 1, sizeof(struct attribute *),
- GFP_KERNEL);
- if (!attrs)
- return attrs;
- for(i = 0; i < n; i++)
- attrs[i] = &source[i].attr.attr;
- return attrs;
-}
-
-static int cci_pmu_init_attrs(struct cci_pmu *cci_pmu, struct platform_device *pdev)
-{
- const struct cci_pmu_model *model = cci_pmu->model;
- struct attribute **attrs;
-
- /*
- * All allocations below are managed, hence doesn't need to be
- * free'd explicitly in case of an error.
- */
-
- if (model->nevent_attrs) {
- attrs = alloc_attrs(pdev, model->nevent_attrs,
- model->event_attrs);
- if (!attrs)
- return -ENOMEM;
- pmu_event_attr_group.attrs = attrs;
- }
- if (model->nformat_attrs) {
- attrs = alloc_attrs(pdev, model->nformat_attrs,
- model->format_attrs);
- if (!attrs)
- return -ENOMEM;
- pmu_format_attr_group.attrs = attrs;
- }
- pmu_cpumask_attr.var = cci_pmu;
-
- return 0;
-}
-
static int cci_pmu_init(struct cci_pmu *cci_pmu, struct platform_device *pdev)
{
- char *name = cci_pmu->model->name;
+ const struct cci_pmu_model *model = cci_pmu->model;
+ char *name = model->name;
u32 num_cntrs;
- int rc;
- rc = cci_pmu_init_attrs(cci_pmu, pdev);
- if (rc)
- return rc;
+ pmu_event_attr_group.attrs = model->event_attrs;
+ pmu_format_attr_group.attrs = model->format_attrs;
cci_pmu->pmu = (struct pmu) {
.name = cci_pmu->model->name,
@@ -1314,7 +1516,7 @@ static int cci_pmu_cpu_notifier(struct notifier_block *self,
if (!cpumask_test_and_clear_cpu(cpu, &cci_pmu->cpus))
break;
target = cpumask_any_but(cpu_online_mask, cpu);
- if (target < 0) // UP, last CPU
+ if (target >= nr_cpu_ids) // UP, last CPU
break;
/*
* TODO: migrate context once core races on event->ctx have
@@ -1336,9 +1538,7 @@ static struct cci_pmu_model cci_pmu_models[] = {
.num_hw_cntrs = 4,
.cntr_size = SZ_4K,
.format_attrs = cci400_pmu_format_attrs,
- .nformat_attrs = ARRAY_SIZE(cci400_pmu_format_attrs),
.event_attrs = cci400_r0_pmu_event_attrs,
- .nevent_attrs = ARRAY_SIZE(cci400_r0_pmu_event_attrs),
.event_ranges = {
[CCI_IF_SLAVE] = {
CCI400_R0_SLAVE_PORT_MIN_EV,
@@ -1358,9 +1558,7 @@ static struct cci_pmu_model cci_pmu_models[] = {
.num_hw_cntrs = 4,
.cntr_size = SZ_4K,
.format_attrs = cci400_pmu_format_attrs,
- .nformat_attrs = ARRAY_SIZE(cci400_pmu_format_attrs),
.event_attrs = cci400_r1_pmu_event_attrs,
- .nevent_attrs = ARRAY_SIZE(cci400_r1_pmu_event_attrs),
.event_ranges = {
[CCI_IF_SLAVE] = {
CCI400_R1_SLAVE_PORT_MIN_EV,
@@ -1375,31 +1573,54 @@ static struct cci_pmu_model cci_pmu_models[] = {
.get_event_idx = cci400_get_event_idx,
},
#endif
-#ifdef CONFIG_ARM_CCI500_PMU
+#ifdef CONFIG_ARM_CCI5xx_PMU
[CCI500_R0] = {
.name = "CCI_500",
.fixed_hw_cntrs = 0,
.num_hw_cntrs = 8,
.cntr_size = SZ_64K,
- .format_attrs = cci500_pmu_format_attrs,
- .nformat_attrs = ARRAY_SIZE(cci500_pmu_format_attrs),
- .event_attrs = cci500_pmu_event_attrs,
- .nevent_attrs = ARRAY_SIZE(cci500_pmu_event_attrs),
+ .format_attrs = cci5xx_pmu_format_attrs,
+ .event_attrs = cci5xx_pmu_event_attrs,
.event_ranges = {
[CCI_IF_SLAVE] = {
- CCI500_SLAVE_PORT_MIN_EV,
- CCI500_SLAVE_PORT_MAX_EV,
+ CCI5xx_SLAVE_PORT_MIN_EV,
+ CCI5xx_SLAVE_PORT_MAX_EV,
},
[CCI_IF_MASTER] = {
- CCI500_MASTER_PORT_MIN_EV,
- CCI500_MASTER_PORT_MAX_EV,
+ CCI5xx_MASTER_PORT_MIN_EV,
+ CCI5xx_MASTER_PORT_MAX_EV,
},
[CCI_IF_GLOBAL] = {
- CCI500_GLOBAL_PORT_MIN_EV,
- CCI500_GLOBAL_PORT_MAX_EV,
+ CCI5xx_GLOBAL_PORT_MIN_EV,
+ CCI5xx_GLOBAL_PORT_MAX_EV,
},
},
.validate_hw_event = cci500_validate_hw_event,
+ .write_counters = cci5xx_pmu_write_counters,
+ },
+ [CCI550_R0] = {
+ .name = "CCI_550",
+ .fixed_hw_cntrs = 0,
+ .num_hw_cntrs = 8,
+ .cntr_size = SZ_64K,
+ .format_attrs = cci5xx_pmu_format_attrs,
+ .event_attrs = cci5xx_pmu_event_attrs,
+ .event_ranges = {
+ [CCI_IF_SLAVE] = {
+ CCI5xx_SLAVE_PORT_MIN_EV,
+ CCI5xx_SLAVE_PORT_MAX_EV,
+ },
+ [CCI_IF_MASTER] = {
+ CCI5xx_MASTER_PORT_MIN_EV,
+ CCI5xx_MASTER_PORT_MAX_EV,
+ },
+ [CCI_IF_GLOBAL] = {
+ CCI5xx_GLOBAL_PORT_MIN_EV,
+ CCI5xx_GLOBAL_PORT_MAX_EV,
+ },
+ },
+ .validate_hw_event = cci550_validate_hw_event,
+ .write_counters = cci5xx_pmu_write_counters,
},
#endif
};
@@ -1419,11 +1640,15 @@ static const struct of_device_id arm_cci_pmu_matches[] = {
.data = &cci_pmu_models[CCI400_R1],
},
#endif
-#ifdef CONFIG_ARM_CCI500_PMU
+#ifdef CONFIG_ARM_CCI5xx_PMU
{
.compatible = "arm,cci-500-pmu,r0",
.data = &cci_pmu_models[CCI500_R0],
},
+ {
+ .compatible = "arm,cci-550-pmu,r0",
+ .data = &cci_pmu_models[CCI550_R0],
+ },
#endif
{},
};
diff --git a/drivers/bus/imx-weim.c b/drivers/bus/imx-weim.c
index e98d15eaa799..1827fc4d15c1 100644
--- a/drivers/bus/imx-weim.c
+++ b/drivers/bus/imx-weim.c
@@ -150,7 +150,7 @@ static int __init weim_parse_dt(struct platform_device *pdev,
return ret;
}
- for_each_child_of_node(pdev->dev.of_node, child) {
+ for_each_available_child_of_node(pdev->dev.of_node, child) {
if (!child->name)
continue;
diff --git a/drivers/bus/mvebu-mbus.c b/drivers/bus/mvebu-mbus.c
index c43c3d2baf73..ce54a0160faa 100644
--- a/drivers/bus/mvebu-mbus.c
+++ b/drivers/bus/mvebu-mbus.c
@@ -948,6 +948,58 @@ void mvebu_mbus_get_pcie_io_aperture(struct resource *res)
*res = mbus_state.pcie_io_aperture;
}
+int mvebu_mbus_get_dram_win_info(phys_addr_t phyaddr, u8 *target, u8 *attr)
+{
+ const struct mbus_dram_target_info *dram;
+ int i;
+
+ /* Get dram info */
+ dram = mv_mbus_dram_info();
+ if (!dram) {
+ pr_err("missing DRAM information\n");
+ return -ENODEV;
+ }
+
+ /* Try to find matching DRAM window for phyaddr */
+ for (i = 0; i < dram->num_cs; i++) {
+ const struct mbus_dram_window *cs = dram->cs + i;
+
+ if (cs->base <= phyaddr &&
+ phyaddr <= (cs->base + cs->size - 1)) {
+ *target = dram->mbus_dram_target_id;
+ *attr = cs->mbus_attr;
+ return 0;
+ }
+ }
+
+ pr_err("invalid dram address %pa\n", &phyaddr);
+ return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(mvebu_mbus_get_dram_win_info);
+
+int mvebu_mbus_get_io_win_info(phys_addr_t phyaddr, u32 *size, u8 *target,
+ u8 *attr)
+{
+ int win;
+
+ for (win = 0; win < mbus_state.soc->num_wins; win++) {
+ u64 wbase;
+ int enabled;
+
+ mvebu_mbus_read_window(&mbus_state, win, &enabled, &wbase,
+ size, target, attr, NULL);
+
+ if (!enabled)
+ continue;
+
+ if (wbase <= phyaddr && phyaddr <= wbase + *size)
+ return win;
+ }
+
+ return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(mvebu_mbus_get_io_win_info);
+
static __init int mvebu_mbus_debugfs_init(void)
{
struct mvebu_mbus_state *s = &mbus_state;
diff --git a/drivers/bus/sunxi-rsb.c b/drivers/bus/sunxi-rsb.c
index 25996e256110..795c9d9c96a6 100644
--- a/drivers/bus/sunxi-rsb.c
+++ b/drivers/bus/sunxi-rsb.c
@@ -330,7 +330,7 @@ static int sunxi_rsb_read(struct sunxi_rsb *rsb, u8 rtaddr, u8 addr,
cmd = RSB_CMD_RD32;
break;
default:
- dev_err(rsb->dev, "Invalid access width: %d\n", len);
+ dev_err(rsb->dev, "Invalid access width: %zd\n", len);
return -EINVAL;
}
@@ -372,7 +372,7 @@ static int sunxi_rsb_write(struct sunxi_rsb *rsb, u8 rtaddr, u8 addr,
cmd = RSB_CMD_WR32;
break;
default:
- dev_err(rsb->dev, "Invalid access width: %d\n", len);
+ dev_err(rsb->dev, "Invalid access width: %zd\n", len);
return -EINVAL;
}
diff --git a/drivers/bus/uniphier-system-bus.c b/drivers/bus/uniphier-system-bus.c
index 834a2aeaf27a..350b7309c26d 100644
--- a/drivers/bus/uniphier-system-bus.c
+++ b/drivers/bus/uniphier-system-bus.c
@@ -108,7 +108,7 @@ static int uniphier_system_bus_check_overlap(
for (i = 0; i < ARRAY_SIZE(priv->bank); i++) {
for (j = i + 1; j < ARRAY_SIZE(priv->bank); j++) {
- if (priv->bank[i].end > priv->bank[j].base ||
+ if (priv->bank[i].end > priv->bank[j].base &&
priv->bank[i].base < priv->bank[j].end) {
dev_err(priv->dev,
"region overlap between bank%d and bank%d\n",
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index a043107da2af..3ec0766ed5e9 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -328,7 +328,8 @@ config JS_RTC
config GEN_RTC
tristate "Generic /dev/rtc emulation"
- depends on RTC!=y && !IA64 && !ARM && !M32R && !MIPS && !SPARC && !FRV && !S390 && !SUPERH && !AVR32 && !BLACKFIN && !UML
+ depends on RTC!=y
+ depends on ALPHA || M68K || MN10300 || PARISC || PPC || X86
---help---
If you say Y here and create a character special file /dev/rtc with
major number 10 and minor number 135 using mknod ("man mknod"), you
diff --git a/drivers/char/agp/frontend.c b/drivers/char/agp/frontend.c
index 09f17eb73486..0f64d149c98d 100644
--- a/drivers/char/agp/frontend.c
+++ b/drivers/char/agp/frontend.c
@@ -156,7 +156,7 @@ static pgprot_t agp_convert_mmap_flags(int prot)
{
unsigned long prot_bits;
- prot_bits = calc_vm_prot_bits(prot) | VM_SHARED;
+ prot_bits = calc_vm_prot_bits(prot, 0) | VM_SHARED;
return vm_get_page_prot(prot_bits);
}
diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c
index 1341a94cc779..aef87fdbd187 100644
--- a/drivers/char/agp/intel-gtt.c
+++ b/drivers/char/agp/intel-gtt.c
@@ -555,8 +555,10 @@ static unsigned int intel_gtt_mappable_entries(void)
static void intel_gtt_teardown_scratch_page(void)
{
set_pages_wb(intel_private.scratch_page, 1);
- pci_unmap_page(intel_private.pcidev, intel_private.scratch_page_dma,
- PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+ if (intel_private.needs_dmar)
+ pci_unmap_page(intel_private.pcidev,
+ intel_private.scratch_page_dma,
+ PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
__free_page(intel_private.scratch_page);
}
@@ -1346,16 +1348,6 @@ int intel_gmch_probe(struct pci_dev *bridge_pdev, struct pci_dev *gpu_pdev,
{
int i, mask;
- /*
- * Can be called from the fake agp driver but also directly from
- * drm/i915.ko. Hence we need to check whether everything is set up
- * already.
- */
- if (intel_private.driver) {
- intel_private.refcount++;
- return 1;
- }
-
for (i = 0; intel_gtt_chipsets[i].name != NULL; i++) {
if (gpu_pdev) {
if (gpu_pdev->device ==
@@ -1376,16 +1368,26 @@ int intel_gmch_probe(struct pci_dev *bridge_pdev, struct pci_dev *gpu_pdev,
if (!intel_private.driver)
return 0;
- intel_private.refcount++;
-
#if IS_ENABLED(CONFIG_AGP_INTEL)
if (bridge) {
+ if (INTEL_GTT_GEN > 1)
+ return 0;
+
bridge->driver = &intel_fake_agp_driver;
bridge->dev_private_data = &intel_private;
bridge->dev = bridge_pdev;
}
#endif
+
+ /*
+ * Can be called from the fake agp driver but also directly from
+ * drm/i915.ko. Hence we need to check whether everything is set up
+ * already.
+ */
+ if (intel_private.refcount++)
+ return 1;
+
intel_private.bridge_dev = pci_dev_get(bridge_pdev);
dev_info(&bridge_pdev->dev, "Intel %s Chipset\n", intel_gtt_chipsets[i].name);
@@ -1430,6 +1432,8 @@ void intel_gmch_remove(void)
if (--intel_private.refcount)
return;
+ if (intel_private.scratch_page)
+ intel_gtt_teardown_scratch_page();
if (intel_private.pcidev)
pci_dev_put(intel_private.pcidev);
if (intel_private.bridge_dev)
diff --git a/drivers/char/agp/uninorth-agp.c b/drivers/char/agp/uninorth-agp.c
index 05755441250c..fdced547ad59 100644
--- a/drivers/char/agp/uninorth-agp.c
+++ b/drivers/char/agp/uninorth-agp.c
@@ -10,7 +10,6 @@
#include <linux/delay.h>
#include <linux/vmalloc.h>
#include <asm/uninorth.h>
-#include <asm/pci-bridge.h>
#include <asm/prom.h>
#include <asm/pmac_feature.h>
#include "agp.h"
diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig
index ff00331bff49..67ee8b08ab53 100644
--- a/drivers/char/hw_random/Kconfig
+++ b/drivers/char/hw_random/Kconfig
@@ -77,7 +77,7 @@ config HW_RANDOM_ATMEL
config HW_RANDOM_BCM63XX
tristate "Broadcom BCM63xx Random Number Generator support"
- depends on BCM63XX
+ depends on BCM63XX || BMIPS_GENERIC
default HW_RANDOM
---help---
This driver provides kernel-side support for the Random Number
@@ -382,6 +382,19 @@ config HW_RANDOM_STM32
If unsure, say N.
+config HW_RANDOM_PIC32
+ tristate "Microchip PIC32 Random Number Generator support"
+ depends on HW_RANDOM && MACH_PIC32
+ default y
+ ---help---
+ This driver provides kernel-side support for the Random Number
+ Generator hardware found on a PIC32.
+
+ To compile this driver as a module, choose M here. the
+ module will be called pic32-rng.
+
+ If unsure, say Y.
+
endif # HW_RANDOM
config UML_RANDOM
diff --git a/drivers/char/hw_random/Makefile b/drivers/char/hw_random/Makefile
index 5ad397635128..f5a6fa7690e7 100644
--- a/drivers/char/hw_random/Makefile
+++ b/drivers/char/hw_random/Makefile
@@ -33,3 +33,4 @@ obj-$(CONFIG_HW_RANDOM_MSM) += msm-rng.o
obj-$(CONFIG_HW_RANDOM_ST) += st-rng.o
obj-$(CONFIG_HW_RANDOM_XGENE) += xgene-rng.o
obj-$(CONFIG_HW_RANDOM_STM32) += stm32-rng.o
+obj-$(CONFIG_HW_RANDOM_PIC32) += pic32-rng.o
diff --git a/drivers/char/hw_random/bcm63xx-rng.c b/drivers/char/hw_random/bcm63xx-rng.c
index 4b31f1387f37..5132c9cde50d 100644
--- a/drivers/char/hw_random/bcm63xx-rng.c
+++ b/drivers/char/hw_random/bcm63xx-rng.c
@@ -12,6 +12,7 @@
#include <linux/clk.h>
#include <linux/platform_device.h>
#include <linux/hw_random.h>
+#include <linux/of.h>
#define RNG_CTRL 0x00
#define RNG_EN (1 << 0)
@@ -79,10 +80,8 @@ static int bcm63xx_rng_data_read(struct hwrng *rng, u32 *data)
static int bcm63xx_rng_probe(struct platform_device *pdev)
{
struct resource *r;
- struct clk *clk;
int ret;
struct bcm63xx_rng_priv *priv;
- struct hwrng *rng;
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!r) {
@@ -132,10 +131,19 @@ static int bcm63xx_rng_probe(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_OF
+static const struct of_device_id bcm63xx_rng_of_match[] = {
+ { .compatible = "brcm,bcm6368-rng", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, bcm63xx_rng_of_match);
+#endif
+
static struct platform_driver bcm63xx_rng_driver = {
.probe = bcm63xx_rng_probe,
.driver = {
.name = "bcm63xx-rng",
+ .of_match_table = of_match_ptr(bcm63xx_rng_of_match),
},
};
diff --git a/drivers/char/hw_random/exynos-rng.c b/drivers/char/hw_random/exynos-rng.c
index 30cf4623184f..ada081232528 100644
--- a/drivers/char/hw_random/exynos-rng.c
+++ b/drivers/char/hw_random/exynos-rng.c
@@ -144,8 +144,7 @@ static int exynos_rng_probe(struct platform_device *pdev)
return devm_hwrng_register(&pdev->dev, &exynos_rng->rng);
}
-#ifdef CONFIG_PM
-static int exynos_rng_runtime_suspend(struct device *dev)
+static int __maybe_unused exynos_rng_runtime_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct exynos_rng *exynos_rng = platform_get_drvdata(pdev);
@@ -155,7 +154,7 @@ static int exynos_rng_runtime_suspend(struct device *dev)
return 0;
}
-static int exynos_rng_runtime_resume(struct device *dev)
+static int __maybe_unused exynos_rng_runtime_resume(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct exynos_rng *exynos_rng = platform_get_drvdata(pdev);
@@ -163,12 +162,12 @@ static int exynos_rng_runtime_resume(struct device *dev)
return clk_prepare_enable(exynos_rng->clk);
}
-static int exynos_rng_suspend(struct device *dev)
+static int __maybe_unused exynos_rng_suspend(struct device *dev)
{
return pm_runtime_force_suspend(dev);
}
-static int exynos_rng_resume(struct device *dev)
+static int __maybe_unused exynos_rng_resume(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct exynos_rng *exynos_rng = platform_get_drvdata(pdev);
@@ -180,7 +179,6 @@ static int exynos_rng_resume(struct device *dev)
return exynos_rng_configure(exynos_rng);
}
-#endif
static const struct dev_pm_ops exynos_rng_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(exynos_rng_suspend, exynos_rng_resume)
diff --git a/drivers/char/hw_random/n2-drv.c b/drivers/char/hw_random/n2-drv.c
index 843d6f6aee7a..3b06c1d6cfb2 100644
--- a/drivers/char/hw_random/n2-drv.c
+++ b/drivers/char/hw_random/n2-drv.c
@@ -743,6 +743,16 @@ static const struct of_device_id n2rng_match[] = {
.compatible = "SUNW,kt-rng",
.data = (void *) 1,
},
+ {
+ .name = "random-number-generator",
+ .compatible = "ORCL,m4-rng",
+ .data = (void *) 1,
+ },
+ {
+ .name = "random-number-generator",
+ .compatible = "ORCL,m7-rng",
+ .data = (void *) 1,
+ },
{},
};
MODULE_DEVICE_TABLE(of, n2rng_match);
diff --git a/drivers/char/hw_random/pic32-rng.c b/drivers/char/hw_random/pic32-rng.c
new file mode 100644
index 000000000000..108897bea2d0
--- /dev/null
+++ b/drivers/char/hw_random/pic32-rng.c
@@ -0,0 +1,155 @@
+/*
+ * PIC32 RNG driver
+ *
+ * Joshua Henderson <joshua.henderson@microchip.com>
+ * Copyright (C) 2016 Microchip Technology Inc. All rights reserved.
+ *
+ * This program is free software; you can distribute it and/or modify it
+ * under the terms of the GNU General Public License (Version 2) as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/err.h>
+#include <linux/hw_random.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#define RNGCON 0x04
+#define TRNGEN BIT(8)
+#define PRNGEN BIT(9)
+#define PRNGCONT BIT(10)
+#define TRNGMOD BIT(11)
+#define SEEDLOAD BIT(12)
+#define RNGPOLY1 0x08
+#define RNGPOLY2 0x0C
+#define RNGNUMGEN1 0x10
+#define RNGNUMGEN2 0x14
+#define RNGSEED1 0x18
+#define RNGSEED2 0x1C
+#define RNGRCNT 0x20
+#define RCNT_MASK 0x7F
+
+struct pic32_rng {
+ void __iomem *base;
+ struct hwrng rng;
+ struct clk *clk;
+};
+
+/*
+ * The TRNG can generate up to 24Mbps. This is a timeout that should be safe
+ * enough given the instructions in the loop and that the TRNG may not always
+ * be at maximum rate.
+ */
+#define RNG_TIMEOUT 500
+
+static int pic32_rng_read(struct hwrng *rng, void *buf, size_t max,
+ bool wait)
+{
+ struct pic32_rng *priv = container_of(rng, struct pic32_rng, rng);
+ u64 *data = buf;
+ u32 t;
+ unsigned int timeout = RNG_TIMEOUT;
+
+ if (max < 8)
+ return 0;
+
+ do {
+ t = readl(priv->base + RNGRCNT) & RCNT_MASK;
+ if (t == 64) {
+ /* TRNG value comes through the seed registers */
+ *data = ((u64)readl(priv->base + RNGSEED2) << 32) +
+ readl(priv->base + RNGSEED1);
+ return 8;
+ }
+ } while (wait && --timeout);
+
+ return -EIO;
+}
+
+static int pic32_rng_probe(struct platform_device *pdev)
+{
+ struct pic32_rng *priv;
+ struct resource *res;
+ u32 v;
+ int ret;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ priv->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(priv->base))
+ return PTR_ERR(priv->base);
+
+ priv->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(priv->clk))
+ return PTR_ERR(priv->clk);
+
+ ret = clk_prepare_enable(priv->clk);
+ if (ret)
+ return ret;
+
+ /* enable TRNG in enhanced mode */
+ v = TRNGEN | TRNGMOD;
+ writel(v, priv->base + RNGCON);
+
+ priv->rng.name = pdev->name;
+ priv->rng.read = pic32_rng_read;
+
+ ret = hwrng_register(&priv->rng);
+ if (ret)
+ goto err_register;
+
+ platform_set_drvdata(pdev, priv);
+
+ return 0;
+
+err_register:
+ clk_disable_unprepare(priv->clk);
+ return ret;
+}
+
+static int pic32_rng_remove(struct platform_device *pdev)
+{
+ struct pic32_rng *rng = platform_get_drvdata(pdev);
+
+ hwrng_unregister(&rng->rng);
+ writel(0, rng->base + RNGCON);
+ clk_disable_unprepare(rng->clk);
+ return 0;
+}
+
+static const struct of_device_id pic32_rng_of_match[] = {
+ { .compatible = "microchip,pic32mzda-rng", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, pic32_rng_of_match);
+
+static struct platform_driver pic32_rng_driver = {
+ .probe = pic32_rng_probe,
+ .remove = pic32_rng_remove,
+ .driver = {
+ .name = "pic32-rng",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(pic32_rng_of_match),
+ },
+};
+
+module_platform_driver(pic32_rng_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Joshua Henderson <joshua.henderson@microchip.com>");
+MODULE_DESCRIPTION("Microchip PIC32 RNG Driver");
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index 7fddd8696211..1e25b5205724 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -849,7 +849,7 @@ static enum si_sm_result smi_event_handler(struct smi_info *smi_info,
smi_inc_stat(smi_info, complete_transactions);
handle_transaction_done(smi_info);
- si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0);
+ goto restart;
} else if (si_sm_result == SI_SM_HOSED) {
smi_inc_stat(smi_info, hosed_count);
@@ -866,7 +866,7 @@ static enum si_sm_result smi_event_handler(struct smi_info *smi_info,
*/
return_hosed_msg(smi_info, IPMI_ERR_UNSPECIFIED);
}
- si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0);
+ goto restart;
}
/*
@@ -1363,12 +1363,12 @@ MODULE_PARM_DESC(trydmi, "Setting this to zero will disable the"
" default scan of the interfaces identified via DMI");
#endif
module_param_named(tryplatform, si_tryplatform, bool, 0);
-MODULE_PARM_DESC(tryacpi, "Setting this to zero will disable the"
+MODULE_PARM_DESC(tryplatform, "Setting this to zero will disable the"
" default scan of the interfaces identified via platform"
" interfaces like openfirmware");
#ifdef CONFIG_PCI
module_param_named(trypci, si_trypci, bool, 0);
-MODULE_PARM_DESC(tryacpi, "Setting this to zero will disable the"
+MODULE_PARM_DESC(trypci, "Setting this to zero will disable the"
" default scan of the interfaces identified via pci");
#endif
module_param_named(trydefaults, si_trydefaults, bool, 0);
@@ -2690,6 +2690,9 @@ static int acpi_ipmi_probe(struct platform_device *dev)
unsigned long long tmp;
int rv = -EINVAL;
+ if (!si_tryacpi)
+ return 0;
+
handle = ACPI_HANDLE(&dev->dev);
if (!handle)
return -ENODEV;
diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c
index 5f1c3d08ba65..8b3be8b92573 100644
--- a/drivers/char/ipmi/ipmi_ssif.c
+++ b/drivers/char/ipmi/ipmi_ssif.c
@@ -920,23 +920,18 @@ static void msg_written_handler(struct ssif_info *ssif_info, int result,
msg_done_handler(ssif_info, -EIO, NULL, 0);
}
} else {
+ /* Ready to request the result. */
unsigned long oflags, *flags;
- bool got_alert;
ssif_inc_stat(ssif_info, sent_messages);
ssif_inc_stat(ssif_info, sent_messages_parts);
flags = ipmi_ssif_lock_cond(ssif_info, &oflags);
- got_alert = ssif_info->got_alert;
- if (got_alert) {
+ if (ssif_info->got_alert) {
+ /* The result is already ready, just start it. */
ssif_info->got_alert = false;
- ssif_info->waiting_alert = false;
- }
-
- if (got_alert) {
ipmi_ssif_unlock_cond(ssif_info, flags);
- /* The alert already happened, try now. */
- retry_timeout((unsigned long) ssif_info);
+ start_get(ssif_info);
} else {
/* Wait a jiffie then request the next message */
ssif_info->waiting_alert = true;
diff --git a/drivers/char/ipmi/ipmi_watchdog.c b/drivers/char/ipmi/ipmi_watchdog.c
index 096f0cef4da1..4facc7517a6a 100644
--- a/drivers/char/ipmi/ipmi_watchdog.c
+++ b/drivers/char/ipmi/ipmi_watchdog.c
@@ -1140,7 +1140,7 @@ ipmi_nmi(unsigned int val, struct pt_regs *regs)
the timer. So do so. */
pretimeout_since_last_heartbeat = 1;
if (atomic_inc_and_test(&preop_panic_excl))
- panic(PFX "pre-timeout");
+ nmi_panic(regs, PFX "pre-timeout");
}
return NMI_HANDLED;
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index 4f6f94c43412..71025c2f6bbb 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -695,7 +695,7 @@ static loff_t memory_lseek(struct file *file, loff_t offset, int orig)
offset += file->f_pos;
case SEEK_SET:
/* to avoid userland mistaking f_pos=-9 as -EBADF=-9 */
- if (IS_ERR_VALUE((unsigned long long)offset)) {
+ if ((unsigned long long)offset >= -MAX_ERRNO) {
ret = -EOVERFLOW;
break;
}
diff --git a/drivers/char/nvram.c b/drivers/char/nvram.c
index 01292328a456..678fa97e41fb 100644
--- a/drivers/char/nvram.c
+++ b/drivers/char/nvram.c
@@ -496,12 +496,12 @@ static void pc_set_checksum(void)
#ifdef CONFIG_PROC_FS
-static char *floppy_types[] = {
+static const char * const floppy_types[] = {
"none", "5.25'' 360k", "5.25'' 1.2M", "3.5'' 720k", "3.5'' 1.44M",
"3.5'' 2.88M", "3.5'' 2.88M"
};
-static char *gfx_types[] = {
+static const char * const gfx_types[] = {
"EGA, VGA, ... (with BIOS)",
"CGA (40 cols)",
"CGA (80 cols)",
@@ -602,7 +602,7 @@ static void atari_set_checksum(void)
static struct {
unsigned char val;
- char *name;
+ const char *name;
} boot_prefs[] = {
{ 0x80, "TOS" },
{ 0x40, "ASV" },
@@ -611,7 +611,7 @@ static struct {
{ 0x00, "unspecified" }
};
-static char *languages[] = {
+static const char * const languages[] = {
"English (US)",
"German",
"French",
@@ -623,7 +623,7 @@ static char *languages[] = {
"Swiss (German)"
};
-static char *dateformat[] = {
+static const char * const dateformat[] = {
"MM%cDD%cYY",
"DD%cMM%cYY",
"YY%cMM%cDD",
@@ -634,7 +634,7 @@ static char *dateformat[] = {
"7 (undefined)"
};
-static char *colors[] = {
+static const char * const colors[] = {
"2", "4", "16", "256", "65536", "??", "??", "??"
};
diff --git a/drivers/char/nwbutton.c b/drivers/char/nwbutton.c
index 76c490fa0511..0e184426db98 100644
--- a/drivers/char/nwbutton.c
+++ b/drivers/char/nwbutton.c
@@ -129,10 +129,9 @@ static void button_consume_callbacks (int bpcount)
static void button_sequence_finished (unsigned long parameters)
{
-#ifdef CONFIG_NWBUTTON_REBOOT /* Reboot using button is enabled */
- if (button_press_count == reboot_count)
+ if (IS_ENABLED(CONFIG_NWBUTTON_REBOOT) &&
+ button_press_count == reboot_count)
kill_cad_pid(SIGINT, 1); /* Ask init to reboot us */
-#endif /* CONFIG_NWBUTTON_REBOOT */
button_consume_callbacks (button_press_count);
bcount = sprintf (button_output_buffer, "%d\n", button_press_count);
button_press_count = 0; /* Reset the button press counter */
diff --git a/drivers/char/pcmcia/synclink_cs.c b/drivers/char/pcmcia/synclink_cs.c
index 45df4bf914f8..22c27652e46a 100644
--- a/drivers/char/pcmcia/synclink_cs.c
+++ b/drivers/char/pcmcia/synclink_cs.c
@@ -1349,7 +1349,7 @@ static void shutdown(MGSLPC_INFO * info, struct tty_struct *tty)
/* TODO:disable interrupts instead of reset to preserve signal states */
reset_device(info);
- if (!tty || tty->termios.c_cflag & HUPCL) {
+ if (!tty || C_HUPCL(tty)) {
info->serial_signals &= ~(SerialSignal_RTS | SerialSignal_DTR);
set_signals(info);
}
@@ -1390,7 +1390,7 @@ static void mgslpc_program_hw(MGSLPC_INFO *info, struct tty_struct *tty)
port_irq_enable(info, (unsigned char) PVR_DSR | PVR_RI);
get_signals(info);
- if (info->netcount || (tty && (tty->termios.c_cflag & CREAD)))
+ if (info->netcount || (tty && C_CREAD(tty)))
rx_start(info);
spin_unlock_irqrestore(&info->lock, flags);
@@ -1733,7 +1733,7 @@ static void mgslpc_throttle(struct tty_struct * tty)
if (I_IXOFF(tty))
mgslpc_send_xchar(tty, STOP_CHAR(tty));
- if (tty->termios.c_cflag & CRTSCTS) {
+ if (C_CRTSCTS(tty)) {
spin_lock_irqsave(&info->lock, flags);
info->serial_signals &= ~SerialSignal_RTS;
set_signals(info);
@@ -1762,7 +1762,7 @@ static void mgslpc_unthrottle(struct tty_struct * tty)
mgslpc_send_xchar(tty, START_CHAR(tty));
}
- if (tty->termios.c_cflag & CRTSCTS) {
+ if (C_CRTSCTS(tty)) {
spin_lock_irqsave(&info->lock, flags);
info->serial_signals |= SerialSignal_RTS;
set_signals(info);
@@ -2306,8 +2306,7 @@ static void mgslpc_set_termios(struct tty_struct *tty, struct ktermios *old_term
mgslpc_change_params(info, tty);
/* Handle transition to B0 status */
- if (old_termios->c_cflag & CBAUD &&
- !(tty->termios.c_cflag & CBAUD)) {
+ if ((old_termios->c_cflag & CBAUD) && !C_BAUD(tty)) {
info->serial_signals &= ~(SerialSignal_RTS | SerialSignal_DTR);
spin_lock_irqsave(&info->lock, flags);
set_signals(info);
@@ -2315,21 +2314,17 @@ static void mgslpc_set_termios(struct tty_struct *tty, struct ktermios *old_term
}
/* Handle transition away from B0 status */
- if (!(old_termios->c_cflag & CBAUD) &&
- tty->termios.c_cflag & CBAUD) {
+ if (!(old_termios->c_cflag & CBAUD) && C_BAUD(tty)) {
info->serial_signals |= SerialSignal_DTR;
- if (!(tty->termios.c_cflag & CRTSCTS) ||
- !test_bit(TTY_THROTTLED, &tty->flags)) {
+ if (!C_CRTSCTS(tty) || !test_bit(TTY_THROTTLED, &tty->flags))
info->serial_signals |= SerialSignal_RTS;
- }
spin_lock_irqsave(&info->lock, flags);
set_signals(info);
spin_unlock_irqrestore(&info->lock, flags);
}
/* Handle turning off CRTSCTS */
- if (old_termios->c_cflag & CRTSCTS &&
- !(tty->termios.c_cflag & CRTSCTS)) {
+ if (old_termios->c_cflag & CRTSCTS && !C_CRTSCTS(tty)) {
tty->hw_stopped = 0;
tx_release(tty);
}
diff --git a/drivers/char/ppdev.c b/drivers/char/ppdev.c
index ae0b42b66e55..f8a483c67b07 100644
--- a/drivers/char/ppdev.c
+++ b/drivers/char/ppdev.c
@@ -69,12 +69,13 @@
#include <linux/ppdev.h>
#include <linux/mutex.h>
#include <linux/uaccess.h>
+#include <linux/compat.h>
#define PP_VERSION "ppdev: user-space parallel port driver"
#define CHRDEV "ppdev"
struct pp_struct {
- struct pardevice * pdev;
+ struct pardevice *pdev;
wait_queue_head_t irq_wait;
atomic_t irqc;
unsigned int flags;
@@ -98,18 +99,26 @@ struct pp_struct {
#define ROUND_UP(x,y) (((x)+(y)-1)/(y))
static DEFINE_MUTEX(pp_do_mutex);
-static inline void pp_enable_irq (struct pp_struct *pp)
+
+/* define fixed sized ioctl cmd for y2038 migration */
+#define PPGETTIME32 _IOR(PP_IOCTL, 0x95, s32[2])
+#define PPSETTIME32 _IOW(PP_IOCTL, 0x96, s32[2])
+#define PPGETTIME64 _IOR(PP_IOCTL, 0x95, s64[2])
+#define PPSETTIME64 _IOW(PP_IOCTL, 0x96, s64[2])
+
+static inline void pp_enable_irq(struct pp_struct *pp)
{
struct parport *port = pp->pdev->port;
- port->ops->enable_irq (port);
+
+ port->ops->enable_irq(port);
}
-static ssize_t pp_read (struct file * file, char __user * buf, size_t count,
- loff_t * ppos)
+static ssize_t pp_read(struct file *file, char __user *buf, size_t count,
+ loff_t *ppos)
{
unsigned int minor = iminor(file_inode(file));
struct pp_struct *pp = file->private_data;
- char * kbuffer;
+ char *kbuffer;
ssize_t bytes_read = 0;
struct parport *pport;
int mode;
@@ -125,16 +134,15 @@ static ssize_t pp_read (struct file * file, char __user * buf, size_t count,
return 0;
kbuffer = kmalloc(min_t(size_t, count, PP_BUFFER_SIZE), GFP_KERNEL);
- if (!kbuffer) {
+ if (!kbuffer)
return -ENOMEM;
- }
pport = pp->pdev->port;
mode = pport->ieee1284.mode & ~(IEEE1284_DEVICEID | IEEE1284_ADDR);
- parport_set_timeout (pp->pdev,
- (file->f_flags & O_NONBLOCK) ?
- PARPORT_INACTIVITY_O_NONBLOCK :
- pp->default_inactivity);
+ parport_set_timeout(pp->pdev,
+ (file->f_flags & O_NONBLOCK) ?
+ PARPORT_INACTIVITY_O_NONBLOCK :
+ pp->default_inactivity);
while (bytes_read == 0) {
ssize_t need = min_t(unsigned long, count, PP_BUFFER_SIZE);
@@ -144,20 +152,17 @@ static ssize_t pp_read (struct file * file, char __user * buf, size_t count,
int flags = 0;
size_t (*fn)(struct parport *, void *, size_t, int);
- if (pp->flags & PP_W91284PIC) {
+ if (pp->flags & PP_W91284PIC)
flags |= PARPORT_W91284PIC;
- }
- if (pp->flags & PP_FASTREAD) {
+ if (pp->flags & PP_FASTREAD)
flags |= PARPORT_EPP_FAST;
- }
- if (pport->ieee1284.mode & IEEE1284_ADDR) {
+ if (pport->ieee1284.mode & IEEE1284_ADDR)
fn = pport->ops->epp_read_addr;
- } else {
+ else
fn = pport->ops->epp_read_data;
- }
bytes_read = (*fn)(pport, kbuffer, need, flags);
} else {
- bytes_read = parport_read (pport, kbuffer, need);
+ bytes_read = parport_read(pport, kbuffer, need);
}
if (bytes_read != 0)
@@ -168,7 +173,7 @@ static ssize_t pp_read (struct file * file, char __user * buf, size_t count,
break;
}
- if (signal_pending (current)) {
+ if (signal_pending(current)) {
bytes_read = -ERESTARTSYS;
break;
}
@@ -176,22 +181,22 @@ static ssize_t pp_read (struct file * file, char __user * buf, size_t count,
cond_resched();
}
- parport_set_timeout (pp->pdev, pp->default_inactivity);
+ parport_set_timeout(pp->pdev, pp->default_inactivity);
- if (bytes_read > 0 && copy_to_user (buf, kbuffer, bytes_read))
+ if (bytes_read > 0 && copy_to_user(buf, kbuffer, bytes_read))
bytes_read = -EFAULT;
- kfree (kbuffer);
- pp_enable_irq (pp);
+ kfree(kbuffer);
+ pp_enable_irq(pp);
return bytes_read;
}
-static ssize_t pp_write (struct file * file, const char __user * buf,
- size_t count, loff_t * ppos)
+static ssize_t pp_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
{
unsigned int minor = iminor(file_inode(file));
struct pp_struct *pp = file->private_data;
- char * kbuffer;
+ char *kbuffer;
ssize_t bytes_written = 0;
ssize_t wrote;
int mode;
@@ -204,21 +209,21 @@ static ssize_t pp_write (struct file * file, const char __user * buf,
}
kbuffer = kmalloc(min_t(size_t, count, PP_BUFFER_SIZE), GFP_KERNEL);
- if (!kbuffer) {
+ if (!kbuffer)
return -ENOMEM;
- }
+
pport = pp->pdev->port;
mode = pport->ieee1284.mode & ~(IEEE1284_DEVICEID | IEEE1284_ADDR);
- parport_set_timeout (pp->pdev,
- (file->f_flags & O_NONBLOCK) ?
- PARPORT_INACTIVITY_O_NONBLOCK :
- pp->default_inactivity);
+ parport_set_timeout(pp->pdev,
+ (file->f_flags & O_NONBLOCK) ?
+ PARPORT_INACTIVITY_O_NONBLOCK :
+ pp->default_inactivity);
while (bytes_written < count) {
ssize_t n = min_t(unsigned long, count - bytes_written, PP_BUFFER_SIZE);
- if (copy_from_user (kbuffer, buf + bytes_written, n)) {
+ if (copy_from_user(kbuffer, buf + bytes_written, n)) {
bytes_written = -EFAULT;
break;
}
@@ -226,20 +231,19 @@ static ssize_t pp_write (struct file * file, const char __user * buf,
if ((pp->flags & PP_FASTWRITE) && (mode == IEEE1284_MODE_EPP)) {
/* do a fast EPP write */
if (pport->ieee1284.mode & IEEE1284_ADDR) {
- wrote = pport->ops->epp_write_addr (pport,
+ wrote = pport->ops->epp_write_addr(pport,
kbuffer, n, PARPORT_EPP_FAST);
} else {
- wrote = pport->ops->epp_write_data (pport,
+ wrote = pport->ops->epp_write_data(pport,
kbuffer, n, PARPORT_EPP_FAST);
}
} else {
- wrote = parport_write (pp->pdev->port, kbuffer, n);
+ wrote = parport_write(pp->pdev->port, kbuffer, n);
}
if (wrote <= 0) {
- if (!bytes_written) {
+ if (!bytes_written)
bytes_written = wrote;
- }
break;
}
@@ -251,36 +255,36 @@ static ssize_t pp_write (struct file * file, const char __user * buf,
break;
}
- if (signal_pending (current))
+ if (signal_pending(current))
break;
cond_resched();
}
- parport_set_timeout (pp->pdev, pp->default_inactivity);
+ parport_set_timeout(pp->pdev, pp->default_inactivity);
- kfree (kbuffer);
- pp_enable_irq (pp);
+ kfree(kbuffer);
+ pp_enable_irq(pp);
return bytes_written;
}
-static void pp_irq (void *private)
+static void pp_irq(void *private)
{
struct pp_struct *pp = private;
if (pp->irqresponse) {
- parport_write_control (pp->pdev->port, pp->irqctl);
+ parport_write_control(pp->pdev->port, pp->irqctl);
pp->irqresponse = 0;
}
- atomic_inc (&pp->irqc);
- wake_up_interruptible (&pp->irq_wait);
+ atomic_inc(&pp->irqc);
+ wake_up_interruptible(&pp->irq_wait);
}
-static int register_device (int minor, struct pp_struct *pp)
+static int register_device(int minor, struct pp_struct *pp)
{
struct parport *port;
- struct pardevice * pdev = NULL;
+ struct pardevice *pdev = NULL;
char *name;
int fl;
@@ -288,30 +292,30 @@ static int register_device (int minor, struct pp_struct *pp)
if (name == NULL)
return -ENOMEM;
- port = parport_find_number (minor);
+ port = parport_find_number(minor);
if (!port) {
- printk (KERN_WARNING "%s: no associated port!\n", name);
- kfree (name);
+ printk(KERN_WARNING "%s: no associated port!\n", name);
+ kfree(name);
return -ENXIO;
}
fl = (pp->flags & PP_EXCL) ? PARPORT_FLAG_EXCL : 0;
- pdev = parport_register_device (port, name, NULL,
- NULL, pp_irq, fl, pp);
- parport_put_port (port);
+ pdev = parport_register_device(port, name, NULL,
+ NULL, pp_irq, fl, pp);
+ parport_put_port(port);
if (!pdev) {
- printk (KERN_WARNING "%s: failed to register device!\n", name);
- kfree (name);
+ printk(KERN_WARNING "%s: failed to register device!\n", name);
+ kfree(name);
return -ENXIO;
}
pp->pdev = pdev;
- pr_debug("%s: registered pardevice\n", name);
+ dev_dbg(&pdev->dev, "registered pardevice\n");
return 0;
}
-static enum ieee1284_phase init_phase (int mode)
+static enum ieee1284_phase init_phase(int mode)
{
switch (mode & ~(IEEE1284_DEVICEID
| IEEE1284_ADDR)) {
@@ -322,11 +326,27 @@ static enum ieee1284_phase init_phase (int mode)
return IEEE1284_PH_FWD_IDLE;
}
+static int pp_set_timeout(struct pardevice *pdev, long tv_sec, int tv_usec)
+{
+ long to_jiffies;
+
+ if ((tv_sec < 0) || (tv_usec < 0))
+ return -EINVAL;
+
+ to_jiffies = usecs_to_jiffies(tv_usec);
+ to_jiffies += tv_sec * HZ;
+ if (to_jiffies <= 0)
+ return -EINVAL;
+
+ pdev->timeout = to_jiffies;
+ return 0;
+}
+
static int pp_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
unsigned int minor = iminor(file_inode(file));
struct pp_struct *pp = file->private_data;
- struct parport * port;
+ struct parport *port;
void __user *argp = (void __user *)arg;
/* First handle the cases that don't take arguments. */
@@ -337,19 +357,19 @@ static int pp_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
int ret;
if (pp->flags & PP_CLAIMED) {
- pr_debug(CHRDEV "%x: you've already got it!\n", minor);
+ dev_dbg(&pp->pdev->dev, "you've already got it!\n");
return -EINVAL;
}
/* Deferred device registration. */
if (!pp->pdev) {
- int err = register_device (minor, pp);
- if (err) {
+ int err = register_device(minor, pp);
+
+ if (err)
return err;
- }
}
- ret = parport_claim_or_block (pp->pdev);
+ ret = parport_claim_or_block(pp->pdev);
if (ret < 0)
return ret;
@@ -357,7 +377,7 @@ static int pp_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
/* For interrupt-reporting to work, we need to be
* informed of each interrupt. */
- pp_enable_irq (pp);
+ pp_enable_irq(pp);
/* We may need to fix up the state machine. */
info = &pp->pdev->port->ieee1284;
@@ -365,15 +385,15 @@ static int pp_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
pp->saved_state.phase = info->phase;
info->mode = pp->state.mode;
info->phase = pp->state.phase;
- pp->default_inactivity = parport_set_timeout (pp->pdev, 0);
- parport_set_timeout (pp->pdev, pp->default_inactivity);
+ pp->default_inactivity = parport_set_timeout(pp->pdev, 0);
+ parport_set_timeout(pp->pdev, pp->default_inactivity);
return 0;
}
case PPEXCL:
if (pp->pdev) {
- pr_debug(CHRDEV "%x: too late for PPEXCL; "
- "already registered\n", minor);
+ dev_dbg(&pp->pdev->dev,
+ "too late for PPEXCL; already registered\n");
if (pp->flags & PP_EXCL)
/* But it's not really an error. */
return 0;
@@ -388,11 +408,12 @@ static int pp_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
case PPSETMODE:
{
int mode;
- if (copy_from_user (&mode, argp, sizeof (mode)))
+
+ if (copy_from_user(&mode, argp, sizeof(mode)))
return -EFAULT;
/* FIXME: validate mode */
pp->state.mode = mode;
- pp->state.phase = init_phase (mode);
+ pp->state.phase = init_phase(mode);
if (pp->flags & PP_CLAIMED) {
pp->pdev->port->ieee1284.mode = mode;
@@ -405,28 +426,27 @@ static int pp_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
int mode;
- if (pp->flags & PP_CLAIMED) {
+ if (pp->flags & PP_CLAIMED)
mode = pp->pdev->port->ieee1284.mode;
- } else {
+ else
mode = pp->state.mode;
- }
- if (copy_to_user (argp, &mode, sizeof (mode))) {
+
+ if (copy_to_user(argp, &mode, sizeof(mode)))
return -EFAULT;
- }
return 0;
}
case PPSETPHASE:
{
int phase;
- if (copy_from_user (&phase, argp, sizeof (phase))) {
+
+ if (copy_from_user(&phase, argp, sizeof(phase)))
return -EFAULT;
- }
+
/* FIXME: validate phase */
pp->state.phase = phase;
- if (pp->flags & PP_CLAIMED) {
+ if (pp->flags & PP_CLAIMED)
pp->pdev->port->ieee1284.phase = phase;
- }
return 0;
}
@@ -434,38 +454,34 @@ static int pp_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
int phase;
- if (pp->flags & PP_CLAIMED) {
+ if (pp->flags & PP_CLAIMED)
phase = pp->pdev->port->ieee1284.phase;
- } else {
+ else
phase = pp->state.phase;
- }
- if (copy_to_user (argp, &phase, sizeof (phase))) {
+ if (copy_to_user(argp, &phase, sizeof(phase)))
return -EFAULT;
- }
return 0;
}
case PPGETMODES:
{
unsigned int modes;
- port = parport_find_number (minor);
+ port = parport_find_number(minor);
if (!port)
return -ENODEV;
modes = port->modes;
parport_put_port(port);
- if (copy_to_user (argp, &modes, sizeof (modes))) {
+ if (copy_to_user(argp, &modes, sizeof(modes)))
return -EFAULT;
- }
return 0;
}
case PPSETFLAGS:
{
int uflags;
- if (copy_from_user (&uflags, argp, sizeof (uflags))) {
+ if (copy_from_user(&uflags, argp, sizeof(uflags)))
return -EFAULT;
- }
pp->flags &= ~PP_FLAGMASK;
pp->flags |= (uflags & PP_FLAGMASK);
return 0;
@@ -475,9 +491,8 @@ static int pp_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
int uflags;
uflags = pp->flags & PP_FLAGMASK;
- if (copy_to_user (argp, &uflags, sizeof (uflags))) {
+ if (copy_to_user(argp, &uflags, sizeof(uflags)))
return -EFAULT;
- }
return 0;
}
} /* end switch() */
@@ -495,27 +510,28 @@ static int pp_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
unsigned char reg;
unsigned char mask;
int mode;
+ s32 time32[2];
+ s64 time64[2];
+ struct timespec64 ts;
int ret;
- struct timeval par_timeout;
- long to_jiffies;
case PPRSTATUS:
- reg = parport_read_status (port);
- if (copy_to_user (argp, &reg, sizeof (reg)))
+ reg = parport_read_status(port);
+ if (copy_to_user(argp, &reg, sizeof(reg)))
return -EFAULT;
return 0;
case PPRDATA:
- reg = parport_read_data (port);
- if (copy_to_user (argp, &reg, sizeof (reg)))
+ reg = parport_read_data(port);
+ if (copy_to_user(argp, &reg, sizeof(reg)))
return -EFAULT;
return 0;
case PPRCONTROL:
- reg = parport_read_control (port);
- if (copy_to_user (argp, &reg, sizeof (reg)))
+ reg = parport_read_control(port);
+ if (copy_to_user(argp, &reg, sizeof(reg)))
return -EFAULT;
return 0;
case PPYIELD:
- parport_yield_blocking (pp->pdev);
+ parport_yield_blocking(pp->pdev);
return 0;
case PPRELEASE:
@@ -525,45 +541,45 @@ static int pp_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
pp->state.phase = info->phase;
info->mode = pp->saved_state.mode;
info->phase = pp->saved_state.phase;
- parport_release (pp->pdev);
+ parport_release(pp->pdev);
pp->flags &= ~PP_CLAIMED;
return 0;
case PPWCONTROL:
- if (copy_from_user (&reg, argp, sizeof (reg)))
+ if (copy_from_user(&reg, argp, sizeof(reg)))
return -EFAULT;
- parport_write_control (port, reg);
+ parport_write_control(port, reg);
return 0;
case PPWDATA:
- if (copy_from_user (&reg, argp, sizeof (reg)))
+ if (copy_from_user(&reg, argp, sizeof(reg)))
return -EFAULT;
- parport_write_data (port, reg);
+ parport_write_data(port, reg);
return 0;
case PPFCONTROL:
- if (copy_from_user (&mask, argp,
- sizeof (mask)))
+ if (copy_from_user(&mask, argp,
+ sizeof(mask)))
return -EFAULT;
- if (copy_from_user (&reg, 1 + (unsigned char __user *) arg,
- sizeof (reg)))
+ if (copy_from_user(&reg, 1 + (unsigned char __user *) arg,
+ sizeof(reg)))
return -EFAULT;
- parport_frob_control (port, mask, reg);
+ parport_frob_control(port, mask, reg);
return 0;
case PPDATADIR:
- if (copy_from_user (&mode, argp, sizeof (mode)))
+ if (copy_from_user(&mode, argp, sizeof(mode)))
return -EFAULT;
if (mode)
- port->ops->data_reverse (port);
+ port->ops->data_reverse(port);
else
- port->ops->data_forward (port);
+ port->ops->data_forward(port);
return 0;
case PPNEGOT:
- if (copy_from_user (&mode, argp, sizeof (mode)))
+ if (copy_from_user(&mode, argp, sizeof(mode)))
return -EFAULT;
- switch ((ret = parport_negotiate (port, mode))) {
+ switch ((ret = parport_negotiate(port, mode))) {
case 0: break;
case -1: /* handshake failed, peripheral not IEEE 1284 */
ret = -EIO;
@@ -572,11 +588,11 @@ static int pp_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
ret = -ENXIO;
break;
}
- pp_enable_irq (pp);
+ pp_enable_irq(pp);
return ret;
case PPWCTLONIRQ:
- if (copy_from_user (&reg, argp, sizeof (reg)))
+ if (copy_from_user(&reg, argp, sizeof(reg)))
return -EFAULT;
/* Remember what to set the control lines to, for next
@@ -586,39 +602,50 @@ static int pp_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
return 0;
case PPCLRIRQ:
- ret = atomic_read (&pp->irqc);
- if (copy_to_user (argp, &ret, sizeof (ret)))
+ ret = atomic_read(&pp->irqc);
+ if (copy_to_user(argp, &ret, sizeof(ret)))
return -EFAULT;
- atomic_sub (ret, &pp->irqc);
+ atomic_sub(ret, &pp->irqc);
return 0;
- case PPSETTIME:
- if (copy_from_user (&par_timeout, argp, sizeof(struct timeval))) {
+ case PPSETTIME32:
+ if (copy_from_user(time32, argp, sizeof(time32)))
return -EFAULT;
- }
- /* Convert to jiffies, place in pp->pdev->timeout */
- if ((par_timeout.tv_sec < 0) || (par_timeout.tv_usec < 0)) {
- return -EINVAL;
- }
- to_jiffies = ROUND_UP(par_timeout.tv_usec, 1000000/HZ);
- to_jiffies += par_timeout.tv_sec * (long)HZ;
- if (to_jiffies <= 0) {
+
+ return pp_set_timeout(pp->pdev, time32[0], time32[1]);
+
+ case PPSETTIME64:
+ if (copy_from_user(time64, argp, sizeof(time64)))
+ return -EFAULT;
+
+ return pp_set_timeout(pp->pdev, time64[0], time64[1]);
+
+ case PPGETTIME32:
+ jiffies_to_timespec64(pp->pdev->timeout, &ts);
+ time32[0] = ts.tv_sec;
+ time32[1] = ts.tv_nsec / NSEC_PER_USEC;
+ if ((time32[0] < 0) || (time32[1] < 0))
return -EINVAL;
- }
- pp->pdev->timeout = to_jiffies;
+
+ if (copy_to_user(argp, time32, sizeof(time32)))
+ return -EFAULT;
+
return 0;
- case PPGETTIME:
- to_jiffies = pp->pdev->timeout;
- memset(&par_timeout, 0, sizeof(par_timeout));
- par_timeout.tv_sec = to_jiffies / HZ;
- par_timeout.tv_usec = (to_jiffies % (long)HZ) * (1000000/HZ);
- if (copy_to_user (argp, &par_timeout, sizeof(struct timeval)))
+ case PPGETTIME64:
+ jiffies_to_timespec64(pp->pdev->timeout, &ts);
+ time64[0] = ts.tv_sec;
+ time64[1] = ts.tv_nsec / NSEC_PER_USEC;
+ if ((time64[0] < 0) || (time64[1] < 0))
+ return -EINVAL;
+
+ if (copy_to_user(argp, time64, sizeof(time64)))
return -EFAULT;
+
return 0;
default:
- pr_debug(CHRDEV "%x: What? (cmd=0x%x)\n", minor, cmd);
+ dev_dbg(&pp->pdev->dev, "What? (cmd=0x%x)\n", cmd);
return -EINVAL;
}
@@ -629,13 +656,22 @@ static int pp_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
static long pp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
long ret;
+
mutex_lock(&pp_do_mutex);
ret = pp_do_ioctl(file, cmd, arg);
mutex_unlock(&pp_do_mutex);
return ret;
}
-static int pp_open (struct inode * inode, struct file * file)
+#ifdef CONFIG_COMPAT
+static long pp_compat_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ return pp_ioctl(file, cmd, (unsigned long)compat_ptr(arg));
+}
+#endif
+
+static int pp_open(struct inode *inode, struct file *file)
{
unsigned int minor = iminor(inode);
struct pp_struct *pp;
@@ -643,16 +679,16 @@ static int pp_open (struct inode * inode, struct file * file)
if (minor >= PARPORT_MAX)
return -ENXIO;
- pp = kmalloc (sizeof (struct pp_struct), GFP_KERNEL);
+ pp = kmalloc(sizeof(struct pp_struct), GFP_KERNEL);
if (!pp)
return -ENOMEM;
pp->state.mode = IEEE1284_MODE_COMPAT;
- pp->state.phase = init_phase (pp->state.mode);
+ pp->state.phase = init_phase(pp->state.mode);
pp->flags = 0;
pp->irqresponse = 0;
- atomic_set (&pp->irqc, 0);
- init_waitqueue_head (&pp->irq_wait);
+ atomic_set(&pp->irqc, 0);
+ init_waitqueue_head(&pp->irq_wait);
/* Defer the actual device registration until the first claim.
* That way, we know whether or not the driver wants to have
@@ -664,7 +700,7 @@ static int pp_open (struct inode * inode, struct file * file)
return 0;
}
-static int pp_release (struct inode * inode, struct file * file)
+static int pp_release(struct inode *inode, struct file *file)
{
unsigned int minor = iminor(inode);
struct pp_struct *pp = file->private_data;
@@ -673,10 +709,10 @@ static int pp_release (struct inode * inode, struct file * file)
compat_negot = 0;
if (!(pp->flags & PP_CLAIMED) && pp->pdev &&
(pp->state.mode != IEEE1284_MODE_COMPAT)) {
- struct ieee1284_info *info;
+ struct ieee1284_info *info;
/* parport released, but not in compatibility mode */
- parport_claim_or_block (pp->pdev);
+ parport_claim_or_block(pp->pdev);
pp->flags |= PP_CLAIMED;
info = &pp->pdev->port->ieee1284;
pp->saved_state.mode = info->mode;
@@ -689,9 +725,9 @@ static int pp_release (struct inode * inode, struct file * file)
compat_negot = 2;
}
if (compat_negot) {
- parport_negotiate (pp->pdev->port, IEEE1284_MODE_COMPAT);
- pr_debug(CHRDEV "%x: negotiated back to compatibility "
- "mode because user-space forgot\n", minor);
+ parport_negotiate(pp->pdev->port, IEEE1284_MODE_COMPAT);
+ dev_dbg(&pp->pdev->dev,
+ "negotiated back to compatibility mode because user-space forgot\n");
}
if (pp->flags & PP_CLAIMED) {
@@ -702,7 +738,7 @@ static int pp_release (struct inode * inode, struct file * file)
pp->state.phase = info->phase;
info->mode = pp->saved_state.mode;
info->phase = pp->saved_state.phase;
- parport_release (pp->pdev);
+ parport_release(pp->pdev);
if (compat_negot != 1) {
pr_debug(CHRDEV "%x: released pardevice "
"because user-space forgot\n", minor);
@@ -711,25 +747,26 @@ static int pp_release (struct inode * inode, struct file * file)
if (pp->pdev) {
const char *name = pp->pdev->name;
- parport_unregister_device (pp->pdev);
- kfree (name);
+
+ parport_unregister_device(pp->pdev);
+ kfree(name);
pp->pdev = NULL;
pr_debug(CHRDEV "%x: unregistered pardevice\n", minor);
}
- kfree (pp);
+ kfree(pp);
return 0;
}
/* No kernel lock held - fine */
-static unsigned int pp_poll (struct file * file, poll_table * wait)
+static unsigned int pp_poll(struct file *file, poll_table *wait)
{
struct pp_struct *pp = file->private_data;
unsigned int mask = 0;
- poll_wait (file, &pp->irq_wait, wait);
- if (atomic_read (&pp->irqc))
+ poll_wait(file, &pp->irq_wait, wait);
+ if (atomic_read(&pp->irqc))
mask |= POLLIN | POLLRDNORM;
return mask;
@@ -744,6 +781,9 @@ static const struct file_operations pp_fops = {
.write = pp_write,
.poll = pp_poll,
.unlocked_ioctl = pp_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = pp_compat_ioctl,
+#endif
.open = pp_open,
.release = pp_release,
};
@@ -765,13 +805,13 @@ static struct parport_driver pp_driver = {
.detach = pp_detach,
};
-static int __init ppdev_init (void)
+static int __init ppdev_init(void)
{
int err = 0;
- if (register_chrdev (PP_MAJOR, CHRDEV, &pp_fops)) {
- printk (KERN_WARNING CHRDEV ": unable to get major %d\n",
- PP_MAJOR);
+ if (register_chrdev(PP_MAJOR, CHRDEV, &pp_fops)) {
+ printk(KERN_WARNING CHRDEV ": unable to get major %d\n",
+ PP_MAJOR);
return -EIO;
}
ppdev_class = class_create(THIS_MODULE, CHRDEV);
@@ -781,11 +821,11 @@ static int __init ppdev_init (void)
}
err = parport_register_driver(&pp_driver);
if (err < 0) {
- printk (KERN_WARNING CHRDEV ": unable to register with parport\n");
+ printk(KERN_WARNING CHRDEV ": unable to register with parport\n");
goto out_class;
}
- printk (KERN_INFO PP_VERSION "\n");
+ printk(KERN_INFO PP_VERSION "\n");
goto out;
out_class:
@@ -796,12 +836,12 @@ out:
return err;
}
-static void __exit ppdev_cleanup (void)
+static void __exit ppdev_cleanup(void)
{
/* Clean up all parport stuff */
parport_unregister_driver(&pp_driver);
class_destroy(ppdev_class);
- unregister_chrdev (PP_MAJOR, CHRDEV);
+ unregister_chrdev(PP_MAJOR, CHRDEV);
}
module_init(ppdev_init);
diff --git a/drivers/char/raw.c b/drivers/char/raw.c
index 9b9809b709a5..e83b2adc014a 100644
--- a/drivers/char/raw.c
+++ b/drivers/char/raw.c
@@ -334,10 +334,8 @@ static int __init raw_init(void)
cdev_init(&raw_cdev, &raw_fops);
ret = cdev_add(&raw_cdev, dev, max_raw_minors);
- if (ret) {
+ if (ret)
goto error_region;
- }
-
raw_class = class_create(THIS_MODULE, "raw");
if (IS_ERR(raw_class)) {
printk(KERN_ERR "Error creating raw class.\n");
diff --git a/drivers/char/tpm/tpm-chip.c b/drivers/char/tpm/tpm-chip.c
index 45cc39aabeee..274dd0123237 100644
--- a/drivers/char/tpm/tpm-chip.c
+++ b/drivers/char/tpm/tpm-chip.c
@@ -88,6 +88,7 @@ struct tpm_chip *tpmm_chip_alloc(struct device *dev,
const struct tpm_class_ops *ops)
{
struct tpm_chip *chip;
+ int rc;
chip = kzalloc(sizeof(*chip), GFP_KERNEL);
if (chip == NULL)
@@ -136,11 +137,17 @@ struct tpm_chip *tpmm_chip_alloc(struct device *dev,
chip->cdev.owner = chip->pdev->driver->owner;
chip->cdev.kobj.parent = &chip->dev.kobj;
+ rc = devm_add_action(dev, (void (*)(void *)) put_device, &chip->dev);
+ if (rc) {
+ put_device(&chip->dev);
+ return ERR_PTR(rc);
+ }
+
return chip;
}
EXPORT_SYMBOL_GPL(tpmm_chip_alloc);
-static int tpm_dev_add_device(struct tpm_chip *chip)
+static int tpm_add_char_device(struct tpm_chip *chip)
{
int rc;
@@ -151,7 +158,6 @@ static int tpm_dev_add_device(struct tpm_chip *chip)
chip->devname, MAJOR(chip->dev.devt),
MINOR(chip->dev.devt), rc);
- device_unregister(&chip->dev);
return rc;
}
@@ -162,16 +168,17 @@ static int tpm_dev_add_device(struct tpm_chip *chip)
chip->devname, MAJOR(chip->dev.devt),
MINOR(chip->dev.devt), rc);
+ cdev_del(&chip->cdev);
return rc;
}
return rc;
}
-static void tpm_dev_del_device(struct tpm_chip *chip)
+static void tpm_del_char_device(struct tpm_chip *chip)
{
cdev_del(&chip->cdev);
- device_unregister(&chip->dev);
+ device_del(&chip->dev);
}
static int tpm1_chip_register(struct tpm_chip *chip)
@@ -222,7 +229,7 @@ int tpm_chip_register(struct tpm_chip *chip)
tpm_add_ppi(chip);
- rc = tpm_dev_add_device(chip);
+ rc = tpm_add_char_device(chip);
if (rc)
goto out_err;
@@ -274,6 +281,6 @@ void tpm_chip_unregister(struct tpm_chip *chip)
sysfs_remove_link(&chip->pdev->kobj, "ppi");
tpm1_chip_unregister(chip);
- tpm_dev_del_device(chip);
+ tpm_del_char_device(chip);
}
EXPORT_SYMBOL_GPL(tpm_chip_unregister);
diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h
index 542a80cbfd9c..28b477e8da6a 100644
--- a/drivers/char/tpm/tpm.h
+++ b/drivers/char/tpm/tpm.h
@@ -128,13 +128,6 @@ enum tpm2_startup_types {
TPM2_SU_STATE = 0x0001,
};
-enum tpm2_start_method {
- TPM2_START_ACPI = 2,
- TPM2_START_FIFO = 6,
- TPM2_START_CRB = 7,
- TPM2_START_CRB_WITH_ACPI = 8,
-};
-
struct tpm_chip;
struct tpm_vendor_specific {
diff --git a/drivers/char/tpm/tpm2-cmd.c b/drivers/char/tpm/tpm2-cmd.c
index 45a634016f95..b28e4da3d2cf 100644
--- a/drivers/char/tpm/tpm2-cmd.c
+++ b/drivers/char/tpm/tpm2-cmd.c
@@ -20,7 +20,11 @@
#include <keys/trusted-type.h>
enum tpm2_object_attributes {
- TPM2_ATTR_USER_WITH_AUTH = BIT(6),
+ TPM2_OA_USER_WITH_AUTH = BIT(6),
+};
+
+enum tpm2_session_attributes {
+ TPM2_SA_CONTINUE_SESSION = BIT(0),
};
struct tpm2_startup_in {
@@ -478,22 +482,18 @@ int tpm2_seal_trusted(struct tpm_chip *chip,
tpm_buf_append_u8(&buf, payload->migratable);
/* public */
- if (options->policydigest)
- tpm_buf_append_u16(&buf, 14 + options->digest_len);
- else
- tpm_buf_append_u16(&buf, 14);
-
+ tpm_buf_append_u16(&buf, 14 + options->policydigest_len);
tpm_buf_append_u16(&buf, TPM2_ALG_KEYEDHASH);
tpm_buf_append_u16(&buf, hash);
/* policy */
- if (options->policydigest) {
+ if (options->policydigest_len) {
tpm_buf_append_u32(&buf, 0);
- tpm_buf_append_u16(&buf, options->digest_len);
+ tpm_buf_append_u16(&buf, options->policydigest_len);
tpm_buf_append(&buf, options->policydigest,
- options->digest_len);
+ options->policydigest_len);
} else {
- tpm_buf_append_u32(&buf, TPM2_ATTR_USER_WITH_AUTH);
+ tpm_buf_append_u32(&buf, TPM2_OA_USER_WITH_AUTH);
tpm_buf_append_u16(&buf, 0);
}
@@ -631,7 +631,7 @@ static int tpm2_unseal(struct tpm_chip *chip,
options->policyhandle ?
options->policyhandle : TPM2_RS_PW,
NULL /* nonce */, 0,
- 0 /* session_attributes */,
+ TPM2_SA_CONTINUE_SESSION,
options->blobauth /* hmac */,
TPM_DIGEST_SIZE);
diff --git a/drivers/char/tpm/tpm_crb.c b/drivers/char/tpm/tpm_crb.c
index 8342cf51ffdc..a12b31940344 100644
--- a/drivers/char/tpm/tpm_crb.c
+++ b/drivers/char/tpm/tpm_crb.c
@@ -34,14 +34,6 @@ enum crb_defaults {
CRB_ACPI_START_INDEX = 1,
};
-struct acpi_tpm2 {
- struct acpi_table_header hdr;
- u16 platform_class;
- u16 reserved;
- u64 control_area_pa;
- u32 start_method;
-} __packed;
-
enum crb_ca_request {
CRB_CA_REQ_GO_IDLE = BIT(0),
CRB_CA_REQ_CMD_READY = BIT(1),
@@ -85,6 +77,8 @@ enum crb_flags {
struct crb_priv {
unsigned int flags;
+ struct resource res;
+ void __iomem *iobase;
struct crb_control_area __iomem *cca;
u8 __iomem *cmd;
u8 __iomem *rsp;
@@ -97,7 +91,7 @@ static u8 crb_status(struct tpm_chip *chip)
struct crb_priv *priv = chip->vendor.priv;
u8 sts = 0;
- if ((le32_to_cpu(ioread32(&priv->cca->start)) & CRB_START_INVOKE) !=
+ if ((ioread32(&priv->cca->start) & CRB_START_INVOKE) !=
CRB_START_INVOKE)
sts |= CRB_STS_COMPLETE;
@@ -113,7 +107,7 @@ static int crb_recv(struct tpm_chip *chip, u8 *buf, size_t count)
if (count < 6)
return -EIO;
- if (le32_to_cpu(ioread32(&priv->cca->sts)) & CRB_CA_STS_ERROR)
+ if (ioread32(&priv->cca->sts) & CRB_CA_STS_ERROR)
return -EIO;
memcpy_fromio(buf, priv->rsp, 6);
@@ -149,11 +143,11 @@ static int crb_send(struct tpm_chip *chip, u8 *buf, size_t len)
struct crb_priv *priv = chip->vendor.priv;
int rc = 0;
- if (len > le32_to_cpu(ioread32(&priv->cca->cmd_size))) {
+ if (len > ioread32(&priv->cca->cmd_size)) {
dev_err(&chip->dev,
"invalid command count value %x %zx\n",
(unsigned int) len,
- (size_t) le32_to_cpu(ioread32(&priv->cca->cmd_size)));
+ (size_t) ioread32(&priv->cca->cmd_size));
return -E2BIG;
}
@@ -189,7 +183,7 @@ static void crb_cancel(struct tpm_chip *chip)
static bool crb_req_canceled(struct tpm_chip *chip, u8 status)
{
struct crb_priv *priv = chip->vendor.priv;
- u32 cancel = le32_to_cpu(ioread32(&priv->cca->cancel));
+ u32 cancel = ioread32(&priv->cca->cancel);
return (cancel & CRB_CANCEL_INVOKE) == CRB_CANCEL_INVOKE;
}
@@ -204,97 +198,145 @@ static const struct tpm_class_ops tpm_crb = {
.req_complete_val = CRB_STS_COMPLETE,
};
-static int crb_acpi_add(struct acpi_device *device)
+static int crb_init(struct acpi_device *device, struct crb_priv *priv)
{
struct tpm_chip *chip;
- struct acpi_tpm2 *buf;
+ int rc;
+
+ chip = tpmm_chip_alloc(&device->dev, &tpm_crb);
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
+
+ chip->vendor.priv = priv;
+ chip->acpi_dev_handle = device->handle;
+ chip->flags = TPM_CHIP_FLAG_TPM2;
+
+ rc = tpm_get_timeouts(chip);
+ if (rc)
+ return rc;
+
+ rc = tpm2_do_selftest(chip);
+ if (rc)
+ return rc;
+
+ return tpm_chip_register(chip);
+}
+
+static int crb_check_resource(struct acpi_resource *ares, void *data)
+{
+ struct crb_priv *priv = data;
+ struct resource res;
+
+ if (acpi_dev_resource_memory(ares, &res)) {
+ priv->res = res;
+ priv->res.name = NULL;
+ }
+
+ return 1;
+}
+
+static void __iomem *crb_map_res(struct device *dev, struct crb_priv *priv,
+ u64 start, u32 size)
+{
+ struct resource new_res = {
+ .start = start,
+ .end = start + size - 1,
+ .flags = IORESOURCE_MEM,
+ };
+
+ /* Detect a 64 bit address on a 32 bit system */
+ if (start != new_res.start)
+ return ERR_PTR(-EINVAL);
+
+ if (!resource_contains(&priv->res, &new_res))
+ return devm_ioremap_resource(dev, &new_res);
+
+ return priv->iobase + (new_res.start - priv->res.start);
+}
+
+static int crb_map_io(struct acpi_device *device, struct crb_priv *priv,
+ struct acpi_table_tpm2 *buf)
+{
+ struct list_head resources;
+ struct device *dev = &device->dev;
+ u64 pa;
+ int ret;
+
+ INIT_LIST_HEAD(&resources);
+ ret = acpi_dev_get_resources(device, &resources, crb_check_resource,
+ priv);
+ if (ret < 0)
+ return ret;
+ acpi_dev_free_resource_list(&resources);
+
+ if (resource_type(&priv->res) != IORESOURCE_MEM) {
+ dev_err(dev,
+ FW_BUG "TPM2 ACPI table does not define a memory resource\n");
+ return -EINVAL;
+ }
+
+ priv->iobase = devm_ioremap_resource(dev, &priv->res);
+ if (IS_ERR(priv->iobase))
+ return PTR_ERR(priv->iobase);
+
+ priv->cca = crb_map_res(dev, priv, buf->control_address, 0x1000);
+ if (IS_ERR(priv->cca))
+ return PTR_ERR(priv->cca);
+
+ pa = ((u64) ioread32(&priv->cca->cmd_pa_high) << 32) |
+ (u64) ioread32(&priv->cca->cmd_pa_low);
+ priv->cmd = crb_map_res(dev, priv, pa, ioread32(&priv->cca->cmd_size));
+ if (IS_ERR(priv->cmd))
+ return PTR_ERR(priv->cmd);
+
+ memcpy_fromio(&pa, &priv->cca->rsp_pa, 8);
+ pa = le64_to_cpu(pa);
+ priv->rsp = crb_map_res(dev, priv, pa, ioread32(&priv->cca->rsp_size));
+ return PTR_ERR_OR_ZERO(priv->rsp);
+}
+
+static int crb_acpi_add(struct acpi_device *device)
+{
+ struct acpi_table_tpm2 *buf;
struct crb_priv *priv;
struct device *dev = &device->dev;
acpi_status status;
u32 sm;
- u64 pa;
int rc;
status = acpi_get_table(ACPI_SIG_TPM2, 1,
(struct acpi_table_header **) &buf);
- if (ACPI_FAILURE(status)) {
- dev_err(dev, "failed to get TPM2 ACPI table\n");
- return -ENODEV;
+ if (ACPI_FAILURE(status) || buf->header.length < sizeof(*buf)) {
+ dev_err(dev, FW_BUG "failed to get TPM2 ACPI table\n");
+ return -EINVAL;
}
/* Should the FIFO driver handle this? */
- if (buf->start_method == TPM2_START_FIFO)
+ sm = buf->start_method;
+ if (sm == ACPI_TPM2_MEMORY_MAPPED)
return -ENODEV;
- chip = tpmm_chip_alloc(dev, &tpm_crb);
- if (IS_ERR(chip))
- return PTR_ERR(chip);
-
- chip->flags = TPM_CHIP_FLAG_TPM2;
-
- if (buf->hdr.length < sizeof(struct acpi_tpm2)) {
- dev_err(dev, "TPM2 ACPI table has wrong size");
- return -EINVAL;
- }
-
- priv = (struct crb_priv *) devm_kzalloc(dev, sizeof(struct crb_priv),
- GFP_KERNEL);
- if (!priv) {
- dev_err(dev, "failed to devm_kzalloc for private data\n");
+ priv = devm_kzalloc(dev, sizeof(struct crb_priv), GFP_KERNEL);
+ if (!priv)
return -ENOMEM;
- }
-
- sm = le32_to_cpu(buf->start_method);
/* The reason for the extra quirk is that the PTT in 4th Gen Core CPUs
* report only ACPI start but in practice seems to require both
* ACPI start and CRB start.
*/
- if (sm == TPM2_START_CRB || sm == TPM2_START_FIFO ||
+ if (sm == ACPI_TPM2_COMMAND_BUFFER || sm == ACPI_TPM2_MEMORY_MAPPED ||
!strcmp(acpi_device_hid(device), "MSFT0101"))
priv->flags |= CRB_FL_CRB_START;
- if (sm == TPM2_START_ACPI || sm == TPM2_START_CRB_WITH_ACPI)
+ if (sm == ACPI_TPM2_START_METHOD ||
+ sm == ACPI_TPM2_COMMAND_BUFFER_WITH_START_METHOD)
priv->flags |= CRB_FL_ACPI_START;
- priv->cca = (struct crb_control_area __iomem *)
- devm_ioremap_nocache(dev, buf->control_area_pa, 0x1000);
- if (!priv->cca) {
- dev_err(dev, "ioremap of the control area failed\n");
- return -ENOMEM;
- }
-
- pa = ((u64) le32_to_cpu(ioread32(&priv->cca->cmd_pa_high)) << 32) |
- (u64) le32_to_cpu(ioread32(&priv->cca->cmd_pa_low));
- priv->cmd = devm_ioremap_nocache(dev, pa,
- ioread32(&priv->cca->cmd_size));
- if (!priv->cmd) {
- dev_err(dev, "ioremap of the command buffer failed\n");
- return -ENOMEM;
- }
-
- memcpy_fromio(&pa, &priv->cca->rsp_pa, 8);
- pa = le64_to_cpu(pa);
- priv->rsp = devm_ioremap_nocache(dev, pa,
- ioread32(&priv->cca->rsp_size));
- if (!priv->rsp) {
- dev_err(dev, "ioremap of the response buffer failed\n");
- return -ENOMEM;
- }
-
- chip->vendor.priv = priv;
-
- rc = tpm_get_timeouts(chip);
+ rc = crb_map_io(device, priv, buf);
if (rc)
return rc;
- chip->acpi_dev_handle = device->handle;
-
- rc = tpm2_do_selftest(chip);
- if (rc)
- return rc;
-
- return tpm_chip_register(chip);
+ return crb_init(device, priv);
}
static int crb_acpi_remove(struct acpi_device *device)
@@ -302,11 +344,11 @@ static int crb_acpi_remove(struct acpi_device *device)
struct device *dev = &device->dev;
struct tpm_chip *chip = dev_get_drvdata(dev);
- tpm_chip_unregister(chip);
-
if (chip->flags & TPM_CHIP_FLAG_TPM2)
tpm2_shutdown(chip, TPM2_SU_CLEAR);
+ tpm_chip_unregister(chip);
+
return 0;
}
diff --git a/drivers/char/tpm/tpm_eventlog.c b/drivers/char/tpm/tpm_eventlog.c
index bd72fb04225e..4e6940acf639 100644
--- a/drivers/char/tpm/tpm_eventlog.c
+++ b/drivers/char/tpm/tpm_eventlog.c
@@ -232,7 +232,7 @@ static int tpm_binary_bios_measurements_show(struct seq_file *m, void *v)
{
struct tcpa_event *event = v;
struct tcpa_event temp_event;
- char *tempPtr;
+ char *temp_ptr;
int i;
memcpy(&temp_event, event, sizeof(struct tcpa_event));
@@ -242,10 +242,16 @@ static int tpm_binary_bios_measurements_show(struct seq_file *m, void *v)
temp_event.event_type = do_endian_conversion(event->event_type);
temp_event.event_size = do_endian_conversion(event->event_size);
- tempPtr = (char *)&temp_event;
+ temp_ptr = (char *) &temp_event;
- for (i = 0; i < sizeof(struct tcpa_event) + temp_event.event_size; i++)
- seq_putc(m, tempPtr[i]);
+ for (i = 0; i < (sizeof(struct tcpa_event) - 1) ; i++)
+ seq_putc(m, temp_ptr[i]);
+
+ temp_ptr = (char *) v;
+
+ for (i = (sizeof(struct tcpa_event) - 1);
+ i < (sizeof(struct tcpa_event) + temp_event.event_size); i++)
+ seq_putc(m, temp_ptr[i]);
return 0;
diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c
index 8a3509cb10da..a507006728e0 100644
--- a/drivers/char/tpm/tpm_tis.c
+++ b/drivers/char/tpm/tpm_tis.c
@@ -28,7 +28,6 @@
#include <linux/wait.h>
#include <linux/acpi.h>
#include <linux/freezer.h>
-#include <acpi/actbl2.h>
#include "tpm.h"
enum tis_access {
@@ -60,22 +59,18 @@ enum tis_int_flags {
};
enum tis_defaults {
- TIS_MEM_BASE = 0xFED40000,
TIS_MEM_LEN = 0x5000,
TIS_SHORT_TIMEOUT = 750, /* ms */
TIS_LONG_TIMEOUT = 2000, /* 2 sec */
};
struct tpm_info {
- unsigned long start;
- unsigned long len;
- unsigned int irq;
-};
-
-static struct tpm_info tis_default_info = {
- .start = TIS_MEM_BASE,
- .len = TIS_MEM_LEN,
- .irq = 0,
+ struct resource res;
+ /* irq > 0 means: use irq $irq;
+ * irq = 0 means: autoprobe for an irq;
+ * irq = -1 means: no irq support
+ */
+ int irq;
};
/* Some timeout values are needed before it is known whether the chip is
@@ -118,39 +113,11 @@ static inline int is_itpm(struct acpi_device *dev)
{
return has_hid(dev, "INTC0102");
}
-
-static inline int is_fifo(struct acpi_device *dev)
-{
- struct acpi_table_tpm2 *tbl;
- acpi_status st;
-
- /* TPM 1.2 FIFO */
- if (!has_hid(dev, "MSFT0101"))
- return 1;
-
- st = acpi_get_table(ACPI_SIG_TPM2, 1,
- (struct acpi_table_header **) &tbl);
- if (ACPI_FAILURE(st)) {
- dev_err(&dev->dev, "failed to get TPM2 ACPI table\n");
- return 0;
- }
-
- if (le32_to_cpu(tbl->start_method) != TPM2_START_FIFO)
- return 0;
-
- /* TPM 2.0 FIFO */
- return 1;
-}
#else
static inline int is_itpm(struct acpi_device *dev)
{
return 0;
}
-
-static inline int is_fifo(struct acpi_device *dev)
-{
- return 1;
-}
#endif
/* Before we attempt to access the TPM we must see that the valid bit is set.
@@ -716,9 +683,9 @@ static int tpm_tis_init(struct device *dev, struct tpm_info *tpm_info,
chip->acpi_dev_handle = acpi_dev_handle;
#endif
- chip->vendor.iobase = devm_ioremap(dev, tpm_info->start, tpm_info->len);
- if (!chip->vendor.iobase)
- return -EIO;
+ chip->vendor.iobase = devm_ioremap_resource(dev, &tpm_info->res);
+ if (IS_ERR(chip->vendor.iobase))
+ return PTR_ERR(chip->vendor.iobase);
/* Maximum timeouts */
chip->vendor.timeout_a = TIS_TIMEOUT_A_MAX;
@@ -807,7 +774,7 @@ static int tpm_tis_init(struct device *dev, struct tpm_info *tpm_info,
/* INTERRUPT Setup */
init_waitqueue_head(&chip->vendor.read_queue);
init_waitqueue_head(&chip->vendor.int_queue);
- if (interrupts) {
+ if (interrupts && tpm_info->irq != -1) {
if (tpm_info->irq) {
tpm_tis_probe_irq_single(chip, intmask, IRQF_SHARED,
tpm_info->irq);
@@ -893,29 +860,29 @@ static int tpm_tis_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(tpm_tis_pm, tpm_pm_suspend, tpm_tis_resume);
-#ifdef CONFIG_PNP
static int tpm_tis_pnp_init(struct pnp_dev *pnp_dev,
- const struct pnp_device_id *pnp_id)
+ const struct pnp_device_id *pnp_id)
{
- struct tpm_info tpm_info = tis_default_info;
+ struct tpm_info tpm_info = {};
acpi_handle acpi_dev_handle = NULL;
+ struct resource *res;
- tpm_info.start = pnp_mem_start(pnp_dev, 0);
- tpm_info.len = pnp_mem_len(pnp_dev, 0);
+ res = pnp_get_resource(pnp_dev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENODEV;
+ tpm_info.res = *res;
if (pnp_irq_valid(pnp_dev, 0))
tpm_info.irq = pnp_irq(pnp_dev, 0);
else
- interrupts = false;
+ tpm_info.irq = -1;
-#ifdef CONFIG_ACPI
if (pnp_acpi_device(pnp_dev)) {
if (is_itpm(pnp_acpi_device(pnp_dev)))
itpm = true;
- acpi_dev_handle = pnp_acpi_device(pnp_dev)->handle;
+ acpi_dev_handle = ACPI_HANDLE(&pnp_dev->dev);
}
-#endif
return tpm_tis_init(&pnp_dev->dev, &tpm_info, acpi_dev_handle);
}
@@ -956,7 +923,6 @@ static struct pnp_driver tis_pnp_driver = {
module_param_string(hid, tpm_pnp_tbl[TIS_HID_USR_IDX].id,
sizeof(tpm_pnp_tbl[TIS_HID_USR_IDX].id), 0444);
MODULE_PARM_DESC(hid, "Set additional specific HID for this driver to probe");
-#endif
#ifdef CONFIG_ACPI
static int tpm_check_resource(struct acpi_resource *ares, void *data)
@@ -964,11 +930,11 @@ static int tpm_check_resource(struct acpi_resource *ares, void *data)
struct tpm_info *tpm_info = (struct tpm_info *) data;
struct resource res;
- if (acpi_dev_resource_interrupt(ares, 0, &res)) {
+ if (acpi_dev_resource_interrupt(ares, 0, &res))
tpm_info->irq = res.start;
- } else if (acpi_dev_resource_memory(ares, &res)) {
- tpm_info->start = res.start;
- tpm_info->len = resource_size(&res);
+ else if (acpi_dev_resource_memory(ares, &res)) {
+ tpm_info->res = res;
+ tpm_info->res.name = NULL;
}
return 1;
@@ -976,14 +942,25 @@ static int tpm_check_resource(struct acpi_resource *ares, void *data)
static int tpm_tis_acpi_init(struct acpi_device *acpi_dev)
{
+ struct acpi_table_tpm2 *tbl;
+ acpi_status st;
struct list_head resources;
- struct tpm_info tpm_info = tis_default_info;
+ struct tpm_info tpm_info = {};
int ret;
- if (!is_fifo(acpi_dev))
+ st = acpi_get_table(ACPI_SIG_TPM2, 1,
+ (struct acpi_table_header **) &tbl);
+ if (ACPI_FAILURE(st) || tbl->header.length < sizeof(*tbl)) {
+ dev_err(&acpi_dev->dev,
+ FW_BUG "failed to get TPM2 ACPI table\n");
+ return -EINVAL;
+ }
+
+ if (tbl->start_method != ACPI_TPM2_MEMORY_MAPPED)
return -ENODEV;
INIT_LIST_HEAD(&resources);
+ tpm_info.irq = -1;
ret = acpi_dev_get_resources(acpi_dev, &resources, tpm_check_resource,
&tpm_info);
if (ret < 0)
@@ -991,8 +968,11 @@ static int tpm_tis_acpi_init(struct acpi_device *acpi_dev)
acpi_dev_free_resource_list(&resources);
- if (!tpm_info.irq)
- interrupts = false;
+ if (resource_type(&tpm_info.res) != IORESOURCE_MEM) {
+ dev_err(&acpi_dev->dev,
+ FW_BUG "TPM2 ACPI table does not define a memory resource\n");
+ return -EINVAL;
+ }
if (is_itpm(acpi_dev))
itpm = true;
@@ -1031,80 +1011,135 @@ static struct acpi_driver tis_acpi_driver = {
};
#endif
+static struct platform_device *force_pdev;
+
+static int tpm_tis_plat_probe(struct platform_device *pdev)
+{
+ struct tpm_info tpm_info = {};
+ struct resource *res;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res == NULL) {
+ dev_err(&pdev->dev, "no memory resource defined\n");
+ return -ENODEV;
+ }
+ tpm_info.res = *res;
+
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (res) {
+ tpm_info.irq = res->start;
+ } else {
+ if (pdev == force_pdev)
+ tpm_info.irq = -1;
+ else
+ /* When forcing auto probe the IRQ */
+ tpm_info.irq = 0;
+ }
+
+ return tpm_tis_init(&pdev->dev, &tpm_info, NULL);
+}
+
+static int tpm_tis_plat_remove(struct platform_device *pdev)
+{
+ struct tpm_chip *chip = dev_get_drvdata(&pdev->dev);
+
+ tpm_chip_unregister(chip);
+ tpm_tis_remove(chip);
+
+ return 0;
+}
+
static struct platform_driver tis_drv = {
+ .probe = tpm_tis_plat_probe,
+ .remove = tpm_tis_plat_remove,
.driver = {
.name = "tpm_tis",
.pm = &tpm_tis_pm,
},
};
-static struct platform_device *pdev;
-
static bool force;
+#ifdef CONFIG_X86
module_param(force, bool, 0444);
MODULE_PARM_DESC(force, "Force device probe rather than using ACPI entry");
+#endif
+
+static int tpm_tis_force_device(void)
+{
+ struct platform_device *pdev;
+ static const struct resource x86_resources[] = {
+ {
+ .start = 0xFED40000,
+ .end = 0xFED40000 + TIS_MEM_LEN - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ };
+
+ if (!force)
+ return 0;
+
+ /* The driver core will match the name tpm_tis of the device to
+ * the tpm_tis platform driver and complete the setup via
+ * tpm_tis_plat_probe
+ */
+ pdev = platform_device_register_simple("tpm_tis", -1, x86_resources,
+ ARRAY_SIZE(x86_resources));
+ if (IS_ERR(pdev))
+ return PTR_ERR(pdev);
+ force_pdev = pdev;
+
+ return 0;
+}
+
static int __init init_tis(void)
{
int rc;
-#ifdef CONFIG_PNP
- if (!force) {
- rc = pnp_register_driver(&tis_pnp_driver);
- if (rc)
- return rc;
- }
-#endif
+
+ rc = tpm_tis_force_device();
+ if (rc)
+ goto err_force;
+
+ rc = platform_driver_register(&tis_drv);
+ if (rc)
+ goto err_platform;
+
#ifdef CONFIG_ACPI
- if (!force) {
- rc = acpi_bus_register_driver(&tis_acpi_driver);
- if (rc) {
-#ifdef CONFIG_PNP
- pnp_unregister_driver(&tis_pnp_driver);
-#endif
- return rc;
- }
- }
+ rc = acpi_bus_register_driver(&tis_acpi_driver);
+ if (rc)
+ goto err_acpi;
#endif
- if (!force)
- return 0;
- rc = platform_driver_register(&tis_drv);
- if (rc < 0)
- return rc;
- pdev = platform_device_register_simple("tpm_tis", -1, NULL, 0);
- if (IS_ERR(pdev)) {
- rc = PTR_ERR(pdev);
- goto err_dev;
+ if (IS_ENABLED(CONFIG_PNP)) {
+ rc = pnp_register_driver(&tis_pnp_driver);
+ if (rc)
+ goto err_pnp;
}
- rc = tpm_tis_init(&pdev->dev, &tis_default_info, NULL);
- if (rc)
- goto err_init;
+
return 0;
-err_init:
- platform_device_unregister(pdev);
-err_dev:
- platform_driver_unregister(&tis_drv);
+
+err_pnp:
+#ifdef CONFIG_ACPI
+ acpi_bus_unregister_driver(&tis_acpi_driver);
+err_acpi:
+#endif
+ platform_device_unregister(force_pdev);
+err_platform:
+ if (force_pdev)
+ platform_device_unregister(force_pdev);
+err_force:
return rc;
}
static void __exit cleanup_tis(void)
{
- struct tpm_chip *chip;
-#if defined(CONFIG_PNP) || defined(CONFIG_ACPI)
- if (!force) {
+ pnp_unregister_driver(&tis_pnp_driver);
#ifdef CONFIG_ACPI
- acpi_bus_unregister_driver(&tis_acpi_driver);
-#endif
-#ifdef CONFIG_PNP
- pnp_unregister_driver(&tis_pnp_driver);
+ acpi_bus_unregister_driver(&tis_acpi_driver);
#endif
- return;
- }
-#endif
- chip = dev_get_drvdata(&pdev->dev);
- tpm_chip_unregister(chip);
- tpm_tis_remove(chip);
- platform_device_unregister(pdev);
platform_driver_unregister(&tis_drv);
+
+ if (force_pdev)
+ platform_device_unregister(force_pdev);
}
module_init(init_tis);
diff --git a/drivers/char/ttyprintk.c b/drivers/char/ttyprintk.c
index a15ce4ef39cd..b098d2d0b7c4 100644
--- a/drivers/char/ttyprintk.c
+++ b/drivers/char/ttyprintk.c
@@ -171,7 +171,7 @@ static const struct tty_operations ttyprintk_ops = {
.ioctl = tpk_ioctl,
};
-static struct tty_port_operations null_ops = { };
+static const struct tty_port_operations null_ops = { };
static struct tty_driver *ttyprintk_driver;
diff --git a/drivers/char/xillybus/xillybus_core.c b/drivers/char/xillybus/xillybus_core.c
index 77d6c127e691..dcd19f3f182e 100644
--- a/drivers/char/xillybus/xillybus_core.c
+++ b/drivers/char/xillybus/xillybus_core.c
@@ -509,7 +509,7 @@ static int xilly_setupchannels(struct xilly_endpoint *ep,
channel->log2_element_size = ((format > 2) ?
2 : format);
- bytebufsize = channel->rd_buf_size = bufsize *
+ bytebufsize = bufsize *
(1 << channel->log2_element_size);
buffers = devm_kcalloc(dev, bufnum,
@@ -523,6 +523,7 @@ static int xilly_setupchannels(struct xilly_endpoint *ep,
if (!is_writebuf) {
channel->num_rd_buffers = bufnum;
+ channel->rd_buf_size = bytebufsize;
channel->rd_allow_partial = allowpartial;
channel->rd_synchronous = synchronous;
channel->rd_exclusive_open = exclusive_open;
@@ -533,6 +534,7 @@ static int xilly_setupchannels(struct xilly_endpoint *ep,
bufnum, bytebufsize);
} else if (channelnum > 0) {
channel->num_wr_buffers = bufnum;
+ channel->wr_buf_size = bytebufsize;
channel->seekable = seekable;
channel->wr_supports_nonempty = supports_nonempty;
diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig
index eca8e019e005..16f7d33421d8 100644
--- a/drivers/clk/Kconfig
+++ b/drivers/clk/Kconfig
@@ -6,9 +6,6 @@ config CLKDEV_LOOKUP
config HAVE_CLK_PREPARE
bool
-config HAVE_MACH_CLKDEV
- bool
-
config COMMON_CLK
bool
select HAVE_CLK_PREPARE
@@ -99,6 +96,14 @@ config COMMON_CLK_SI570
This driver supports Silicon Labs 570/571/598/599 programmable
clock generators.
+config COMMON_CLK_CDCE706
+ tristate "Clock driver for TI CDCE706 clock synthesizer"
+ depends on I2C
+ select REGMAP_I2C
+ select RATIONAL
+ ---help---
+ This driver supports TI CDCE706 programmable 3-PLL clock synthesizer.
+
config COMMON_CLK_CDCE925
tristate "Clock driver for TI CDCE925 devices"
depends on I2C
@@ -190,23 +195,14 @@ config COMMON_CLK_PWM
config COMMON_CLK_PXA
def_bool COMMON_CLK && ARCH_PXA
---help---
- Sypport for the Marvell PXA SoC.
-
-config COMMON_CLK_CDCE706
- tristate "Clock driver for TI CDCE706 clock synthesizer"
- depends on I2C
- select REGMAP_I2C
- select RATIONAL
- ---help---
- This driver supports TI CDCE706 programmable 3-PLL clock synthesizer.
+ Support for the Marvell PXA SoC.
source "drivers/clk/bcm/Kconfig"
source "drivers/clk/hisilicon/Kconfig"
-source "drivers/clk/qcom/Kconfig"
-
-endmenu
-
source "drivers/clk/mvebu/Kconfig"
-
+source "drivers/clk/qcom/Kconfig"
source "drivers/clk/samsung/Kconfig"
source "drivers/clk/tegra/Kconfig"
+source "drivers/clk/ti/Kconfig"
+
+endmenu
diff --git a/drivers/clk/Makefile b/drivers/clk/Makefile
index bae4be6501df..46869d696e4d 100644
--- a/drivers/clk/Makefile
+++ b/drivers/clk/Makefile
@@ -70,15 +70,14 @@ obj-$(CONFIG_COMMON_CLK_PXA) += pxa/
obj-$(CONFIG_COMMON_CLK_QCOM) += qcom/
obj-$(CONFIG_ARCH_ROCKCHIP) += rockchip/
obj-$(CONFIG_COMMON_CLK_SAMSUNG) += samsung/
-obj-$(CONFIG_ARCH_SHMOBILE_MULTI) += shmobile/
-obj-$(CONFIG_ARCH_RENESAS) += shmobile/
+obj-$(CONFIG_ARCH_RENESAS) += renesas/
obj-$(CONFIG_ARCH_SIRF) += sirf/
obj-$(CONFIG_ARCH_SOCFPGA) += socfpga/
obj-$(CONFIG_PLAT_SPEAR) += spear/
obj-$(CONFIG_ARCH_STI) += st/
obj-$(CONFIG_ARCH_SUNXI) += sunxi/
obj-$(CONFIG_ARCH_TEGRA) += tegra/
-obj-$(CONFIG_ARCH_OMAP2PLUS) += ti/
+obj-y += ti/
obj-$(CONFIG_ARCH_U8500) += ux500/
obj-$(CONFIG_COMMON_CLK_VERSATILE) += versatile/
obj-$(CONFIG_X86) += x86/
diff --git a/drivers/clk/at91/clk-generated.c b/drivers/clk/at91/clk-generated.c
index abc80949e1dd..e1aa210dd7aa 100644
--- a/drivers/clk/at91/clk-generated.c
+++ b/drivers/clk/at91/clk-generated.c
@@ -15,8 +15,8 @@
#include <linux/clkdev.h>
#include <linux/clk/at91_pmc.h>
#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/io.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
#include "pmc.h"
@@ -28,8 +28,9 @@
struct clk_generated {
struct clk_hw hw;
- struct at91_pmc *pmc;
+ struct regmap *regmap;
struct clk_range range;
+ spinlock_t *lock;
u32 id;
u32 gckdiv;
u8 parent_id;
@@ -41,49 +42,52 @@ struct clk_generated {
static int clk_generated_enable(struct clk_hw *hw)
{
struct clk_generated *gck = to_clk_generated(hw);
- struct at91_pmc *pmc = gck->pmc;
- u32 tmp;
+ unsigned long flags;
pr_debug("GCLK: %s, gckdiv = %d, parent id = %d\n",
__func__, gck->gckdiv, gck->parent_id);
- pmc_lock(pmc);
- pmc_write(pmc, AT91_PMC_PCR, (gck->id & AT91_PMC_PCR_PID_MASK));
- tmp = pmc_read(pmc, AT91_PMC_PCR) &
- ~(AT91_PMC_PCR_GCKDIV_MASK | AT91_PMC_PCR_GCKCSS_MASK);
- pmc_write(pmc, AT91_PMC_PCR, tmp | AT91_PMC_PCR_GCKCSS(gck->parent_id)
- | AT91_PMC_PCR_CMD
- | AT91_PMC_PCR_GCKDIV(gck->gckdiv)
- | AT91_PMC_PCR_GCKEN);
- pmc_unlock(pmc);
+ spin_lock_irqsave(gck->lock, flags);
+ regmap_write(gck->regmap, AT91_PMC_PCR,
+ (gck->id & AT91_PMC_PCR_PID_MASK));
+ regmap_update_bits(gck->regmap, AT91_PMC_PCR,
+ AT91_PMC_PCR_GCKDIV_MASK | AT91_PMC_PCR_GCKCSS_MASK |
+ AT91_PMC_PCR_CMD | AT91_PMC_PCR_GCKEN,
+ AT91_PMC_PCR_GCKCSS(gck->parent_id) |
+ AT91_PMC_PCR_CMD |
+ AT91_PMC_PCR_GCKDIV(gck->gckdiv) |
+ AT91_PMC_PCR_GCKEN);
+ spin_unlock_irqrestore(gck->lock, flags);
return 0;
}
static void clk_generated_disable(struct clk_hw *hw)
{
struct clk_generated *gck = to_clk_generated(hw);
- struct at91_pmc *pmc = gck->pmc;
- u32 tmp;
-
- pmc_lock(pmc);
- pmc_write(pmc, AT91_PMC_PCR, (gck->id & AT91_PMC_PCR_PID_MASK));
- tmp = pmc_read(pmc, AT91_PMC_PCR) & ~AT91_PMC_PCR_GCKEN;
- pmc_write(pmc, AT91_PMC_PCR, tmp | AT91_PMC_PCR_CMD);
- pmc_unlock(pmc);
+ unsigned long flags;
+
+ spin_lock_irqsave(gck->lock, flags);
+ regmap_write(gck->regmap, AT91_PMC_PCR,
+ (gck->id & AT91_PMC_PCR_PID_MASK));
+ regmap_update_bits(gck->regmap, AT91_PMC_PCR,
+ AT91_PMC_PCR_CMD | AT91_PMC_PCR_GCKEN,
+ AT91_PMC_PCR_CMD);
+ spin_unlock_irqrestore(gck->lock, flags);
}
static int clk_generated_is_enabled(struct clk_hw *hw)
{
struct clk_generated *gck = to_clk_generated(hw);
- struct at91_pmc *pmc = gck->pmc;
- int ret;
+ unsigned long flags;
+ unsigned int status;
- pmc_lock(pmc);
- pmc_write(pmc, AT91_PMC_PCR, (gck->id & AT91_PMC_PCR_PID_MASK));
- ret = !!(pmc_read(pmc, AT91_PMC_PCR) & AT91_PMC_PCR_GCKEN);
- pmc_unlock(pmc);
+ spin_lock_irqsave(gck->lock, flags);
+ regmap_write(gck->regmap, AT91_PMC_PCR,
+ (gck->id & AT91_PMC_PCR_PID_MASK));
+ regmap_read(gck->regmap, AT91_PMC_PCR, &status);
+ spin_unlock_irqrestore(gck->lock, flags);
- return ret;
+ return status & AT91_PMC_PCR_GCKEN ? 1 : 0;
}
static unsigned long
@@ -214,13 +218,14 @@ static const struct clk_ops generated_ops = {
*/
static void clk_generated_startup(struct clk_generated *gck)
{
- struct at91_pmc *pmc = gck->pmc;
u32 tmp;
+ unsigned long flags;
- pmc_lock(pmc);
- pmc_write(pmc, AT91_PMC_PCR, (gck->id & AT91_PMC_PCR_PID_MASK));
- tmp = pmc_read(pmc, AT91_PMC_PCR);
- pmc_unlock(pmc);
+ spin_lock_irqsave(gck->lock, flags);
+ regmap_write(gck->regmap, AT91_PMC_PCR,
+ (gck->id & AT91_PMC_PCR_PID_MASK));
+ regmap_read(gck->regmap, AT91_PMC_PCR, &tmp);
+ spin_unlock_irqrestore(gck->lock, flags);
gck->parent_id = (tmp & AT91_PMC_PCR_GCKCSS_MASK)
>> AT91_PMC_PCR_GCKCSS_OFFSET;
@@ -229,8 +234,8 @@ static void clk_generated_startup(struct clk_generated *gck)
}
static struct clk * __init
-at91_clk_register_generated(struct at91_pmc *pmc, const char *name,
- const char **parent_names, u8 num_parents,
+at91_clk_register_generated(struct regmap *regmap, spinlock_t *lock, const char
+ *name, const char **parent_names, u8 num_parents,
u8 id, const struct clk_range *range)
{
struct clk_generated *gck;
@@ -249,7 +254,8 @@ at91_clk_register_generated(struct at91_pmc *pmc, const char *name,
gck->id = id;
gck->hw.init = &init;
- gck->pmc = pmc;
+ gck->regmap = regmap;
+ gck->lock = lock;
gck->range = *range;
clk = clk_register(NULL, &gck->hw);
@@ -261,20 +267,20 @@ at91_clk_register_generated(struct at91_pmc *pmc, const char *name,
return clk;
}
-void __init of_sama5d2_clk_generated_setup(struct device_node *np,
- struct at91_pmc *pmc)
+void __init of_sama5d2_clk_generated_setup(struct device_node *np)
{
int num;
u32 id;
const char *name;
struct clk *clk;
- int num_parents;
+ unsigned int num_parents;
const char *parent_names[GENERATED_SOURCE_MAX];
struct device_node *gcknp;
struct clk_range range = CLK_RANGE(0, 0);
+ struct regmap *regmap;
num_parents = of_clk_get_parent_count(np);
- if (num_parents <= 0 || num_parents > GENERATED_SOURCE_MAX)
+ if (num_parents == 0 || num_parents > GENERATED_SOURCE_MAX)
return;
of_clk_parent_fill(np, parent_names, num_parents);
@@ -283,6 +289,10 @@ void __init of_sama5d2_clk_generated_setup(struct device_node *np,
if (!num || num > PERIPHERAL_MAX)
return;
+ regmap = syscon_node_to_regmap(of_get_parent(np));
+ if (IS_ERR(regmap))
+ return;
+
for_each_child_of_node(np, gcknp) {
if (of_property_read_u32(gcknp, "reg", &id))
continue;
@@ -296,11 +306,14 @@ void __init of_sama5d2_clk_generated_setup(struct device_node *np,
of_at91_get_clk_range(gcknp, "atmel,clk-output-range",
&range);
- clk = at91_clk_register_generated(pmc, name, parent_names,
- num_parents, id, &range);
+ clk = at91_clk_register_generated(regmap, &pmc_pcr_lock, name,
+ parent_names, num_parents,
+ id, &range);
if (IS_ERR(clk))
continue;
of_clk_add_provider(gcknp, of_clk_src_simple_get, clk);
}
}
+CLK_OF_DECLARE(of_sama5d2_clk_generated_setup, "atmel,sama5d2-clk-generated",
+ of_sama5d2_clk_generated_setup);
diff --git a/drivers/clk/at91/clk-h32mx.c b/drivers/clk/at91/clk-h32mx.c
index 61566bcefa53..819f5842fa66 100644
--- a/drivers/clk/at91/clk-h32mx.c
+++ b/drivers/clk/at91/clk-h32mx.c
@@ -15,15 +15,9 @@
#include <linux/clk-provider.h>
#include <linux/clkdev.h>
#include <linux/clk/at91_pmc.h>
-#include <linux/delay.h>
#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/of_irq.h>
-#include <linux/io.h>
-#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/sched.h>
-#include <linux/wait.h>
+#include <linux/regmap.h>
+#include <linux/mfd/syscon.h>
#include "pmc.h"
@@ -31,7 +25,7 @@
struct clk_sama5d4_h32mx {
struct clk_hw hw;
- struct at91_pmc *pmc;
+ struct regmap *regmap;
};
#define to_clk_sama5d4_h32mx(hw) container_of(hw, struct clk_sama5d4_h32mx, hw)
@@ -40,8 +34,10 @@ static unsigned long clk_sama5d4_h32mx_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct clk_sama5d4_h32mx *h32mxclk = to_clk_sama5d4_h32mx(hw);
+ unsigned int mckr;
- if (pmc_read(h32mxclk->pmc, AT91_PMC_MCKR) & AT91_PMC_H32MXDIV)
+ regmap_read(h32mxclk->regmap, AT91_PMC_MCKR, &mckr);
+ if (mckr & AT91_PMC_H32MXDIV)
return parent_rate / 2;
if (parent_rate > H32MX_MAX_FREQ)
@@ -70,18 +66,16 @@ static int clk_sama5d4_h32mx_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
struct clk_sama5d4_h32mx *h32mxclk = to_clk_sama5d4_h32mx(hw);
- struct at91_pmc *pmc = h32mxclk->pmc;
- u32 tmp;
+ u32 mckr = 0;
if (parent_rate != rate && (parent_rate / 2) != rate)
return -EINVAL;
- pmc_lock(pmc);
- tmp = pmc_read(pmc, AT91_PMC_MCKR) & ~AT91_PMC_H32MXDIV;
if ((parent_rate / 2) == rate)
- tmp |= AT91_PMC_H32MXDIV;
- pmc_write(pmc, AT91_PMC_MCKR, tmp);
- pmc_unlock(pmc);
+ mckr = AT91_PMC_H32MXDIV;
+
+ regmap_update_bits(h32mxclk->regmap, AT91_PMC_MCKR,
+ AT91_PMC_H32MXDIV, mckr);
return 0;
}
@@ -92,14 +86,18 @@ static const struct clk_ops h32mx_ops = {
.set_rate = clk_sama5d4_h32mx_set_rate,
};
-void __init of_sama5d4_clk_h32mx_setup(struct device_node *np,
- struct at91_pmc *pmc)
+static void __init of_sama5d4_clk_h32mx_setup(struct device_node *np)
{
struct clk_sama5d4_h32mx *h32mxclk;
struct clk_init_data init;
const char *parent_name;
+ struct regmap *regmap;
struct clk *clk;
+ regmap = syscon_node_to_regmap(of_get_parent(np));
+ if (IS_ERR(regmap))
+ return;
+
h32mxclk = kzalloc(sizeof(*h32mxclk), GFP_KERNEL);
if (!h32mxclk)
return;
@@ -113,7 +111,7 @@ void __init of_sama5d4_clk_h32mx_setup(struct device_node *np,
init.flags = CLK_SET_RATE_GATE;
h32mxclk->hw.init = &init;
- h32mxclk->pmc = pmc;
+ h32mxclk->regmap = regmap;
clk = clk_register(NULL, &h32mxclk->hw);
if (!clk) {
@@ -123,3 +121,5 @@ void __init of_sama5d4_clk_h32mx_setup(struct device_node *np,
of_clk_add_provider(np, of_clk_src_simple_get, clk);
}
+CLK_OF_DECLARE(of_sama5d4_clk_h32mx_setup, "atmel,sama5d4-clk-h32mx",
+ of_sama5d4_clk_h32mx_setup);
diff --git a/drivers/clk/at91/clk-main.c b/drivers/clk/at91/clk-main.c
index fd7247deabdc..58b5baca670c 100644
--- a/drivers/clk/at91/clk-main.c
+++ b/drivers/clk/at91/clk-main.c
@@ -13,13 +13,8 @@
#include <linux/clk/at91_pmc.h>
#include <linux/delay.h>
#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/of_irq.h>
-#include <linux/io.h>
-#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/sched.h>
-#include <linux/wait.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
#include "pmc.h"
@@ -34,18 +29,14 @@
struct clk_main_osc {
struct clk_hw hw;
- struct at91_pmc *pmc;
- unsigned int irq;
- wait_queue_head_t wait;
+ struct regmap *regmap;
};
#define to_clk_main_osc(hw) container_of(hw, struct clk_main_osc, hw)
struct clk_main_rc_osc {
struct clk_hw hw;
- struct at91_pmc *pmc;
- unsigned int irq;
- wait_queue_head_t wait;
+ struct regmap *regmap;
unsigned long frequency;
unsigned long accuracy;
};
@@ -54,51 +45,47 @@ struct clk_main_rc_osc {
struct clk_rm9200_main {
struct clk_hw hw;
- struct at91_pmc *pmc;
+ struct regmap *regmap;
};
#define to_clk_rm9200_main(hw) container_of(hw, struct clk_rm9200_main, hw)
struct clk_sam9x5_main {
struct clk_hw hw;
- struct at91_pmc *pmc;
- unsigned int irq;
- wait_queue_head_t wait;
+ struct regmap *regmap;
u8 parent;
};
#define to_clk_sam9x5_main(hw) container_of(hw, struct clk_sam9x5_main, hw)
-static irqreturn_t clk_main_osc_irq_handler(int irq, void *dev_id)
+static inline bool clk_main_osc_ready(struct regmap *regmap)
{
- struct clk_main_osc *osc = dev_id;
+ unsigned int status;
- wake_up(&osc->wait);
- disable_irq_nosync(osc->irq);
+ regmap_read(regmap, AT91_PMC_SR, &status);
- return IRQ_HANDLED;
+ return status & AT91_PMC_MOSCS;
}
static int clk_main_osc_prepare(struct clk_hw *hw)
{
struct clk_main_osc *osc = to_clk_main_osc(hw);
- struct at91_pmc *pmc = osc->pmc;
+ struct regmap *regmap = osc->regmap;
u32 tmp;
- tmp = pmc_read(pmc, AT91_CKGR_MOR) & ~MOR_KEY_MASK;
+ regmap_read(regmap, AT91_CKGR_MOR, &tmp);
+ tmp &= ~MOR_KEY_MASK;
+
if (tmp & AT91_PMC_OSCBYPASS)
return 0;
if (!(tmp & AT91_PMC_MOSCEN)) {
tmp |= AT91_PMC_MOSCEN | AT91_PMC_KEY;
- pmc_write(pmc, AT91_CKGR_MOR, tmp);
+ regmap_write(regmap, AT91_CKGR_MOR, tmp);
}
- while (!(pmc_read(pmc, AT91_PMC_SR) & AT91_PMC_MOSCS)) {
- enable_irq(osc->irq);
- wait_event(osc->wait,
- pmc_read(pmc, AT91_PMC_SR) & AT91_PMC_MOSCS);
- }
+ while (!clk_main_osc_ready(regmap))
+ cpu_relax();
return 0;
}
@@ -106,9 +93,10 @@ static int clk_main_osc_prepare(struct clk_hw *hw)
static void clk_main_osc_unprepare(struct clk_hw *hw)
{
struct clk_main_osc *osc = to_clk_main_osc(hw);
- struct at91_pmc *pmc = osc->pmc;
- u32 tmp = pmc_read(pmc, AT91_CKGR_MOR);
+ struct regmap *regmap = osc->regmap;
+ u32 tmp;
+ regmap_read(regmap, AT91_CKGR_MOR, &tmp);
if (tmp & AT91_PMC_OSCBYPASS)
return;
@@ -116,20 +104,22 @@ static void clk_main_osc_unprepare(struct clk_hw *hw)
return;
tmp &= ~(AT91_PMC_KEY | AT91_PMC_MOSCEN);
- pmc_write(pmc, AT91_CKGR_MOR, tmp | AT91_PMC_KEY);
+ regmap_write(regmap, AT91_CKGR_MOR, tmp | AT91_PMC_KEY);
}
static int clk_main_osc_is_prepared(struct clk_hw *hw)
{
struct clk_main_osc *osc = to_clk_main_osc(hw);
- struct at91_pmc *pmc = osc->pmc;
- u32 tmp = pmc_read(pmc, AT91_CKGR_MOR);
+ struct regmap *regmap = osc->regmap;
+ u32 tmp, status;
+ regmap_read(regmap, AT91_CKGR_MOR, &tmp);
if (tmp & AT91_PMC_OSCBYPASS)
return 1;
- return !!((pmc_read(pmc, AT91_PMC_SR) & AT91_PMC_MOSCS) &&
- (pmc_read(pmc, AT91_CKGR_MOR) & AT91_PMC_MOSCEN));
+ regmap_read(regmap, AT91_PMC_SR, &status);
+
+ return (status & AT91_PMC_MOSCS) && (tmp & AT91_PMC_MOSCEN);
}
static const struct clk_ops main_osc_ops = {
@@ -139,18 +129,16 @@ static const struct clk_ops main_osc_ops = {
};
static struct clk * __init
-at91_clk_register_main_osc(struct at91_pmc *pmc,
- unsigned int irq,
+at91_clk_register_main_osc(struct regmap *regmap,
const char *name,
const char *parent_name,
bool bypass)
{
- int ret;
struct clk_main_osc *osc;
struct clk *clk = NULL;
struct clk_init_data init;
- if (!pmc || !irq || !name || !parent_name)
+ if (!name || !parent_name)
return ERR_PTR(-EINVAL);
osc = kzalloc(sizeof(*osc), GFP_KERNEL);
@@ -164,85 +152,70 @@ at91_clk_register_main_osc(struct at91_pmc *pmc,
init.flags = CLK_IGNORE_UNUSED;
osc->hw.init = &init;
- osc->pmc = pmc;
- osc->irq = irq;
-
- init_waitqueue_head(&osc->wait);
- irq_set_status_flags(osc->irq, IRQ_NOAUTOEN);
- ret = request_irq(osc->irq, clk_main_osc_irq_handler,
- IRQF_TRIGGER_HIGH, name, osc);
- if (ret) {
- kfree(osc);
- return ERR_PTR(ret);
- }
+ osc->regmap = regmap;
if (bypass)
- pmc_write(pmc, AT91_CKGR_MOR,
- (pmc_read(pmc, AT91_CKGR_MOR) &
- ~(MOR_KEY_MASK | AT91_PMC_MOSCEN)) |
- AT91_PMC_OSCBYPASS | AT91_PMC_KEY);
+ regmap_update_bits(regmap,
+ AT91_CKGR_MOR, MOR_KEY_MASK |
+ AT91_PMC_MOSCEN,
+ AT91_PMC_OSCBYPASS | AT91_PMC_KEY);
clk = clk_register(NULL, &osc->hw);
- if (IS_ERR(clk)) {
- free_irq(irq, osc);
+ if (IS_ERR(clk))
kfree(osc);
- }
return clk;
}
-void __init of_at91rm9200_clk_main_osc_setup(struct device_node *np,
- struct at91_pmc *pmc)
+static void __init of_at91rm9200_clk_main_osc_setup(struct device_node *np)
{
struct clk *clk;
- unsigned int irq;
const char *name = np->name;
const char *parent_name;
+ struct regmap *regmap;
bool bypass;
of_property_read_string(np, "clock-output-names", &name);
bypass = of_property_read_bool(np, "atmel,osc-bypass");
parent_name = of_clk_get_parent_name(np, 0);
- irq = irq_of_parse_and_map(np, 0);
- if (!irq)
+ regmap = syscon_node_to_regmap(of_get_parent(np));
+ if (IS_ERR(regmap))
return;
- clk = at91_clk_register_main_osc(pmc, irq, name, parent_name, bypass);
+ clk = at91_clk_register_main_osc(regmap, name, parent_name, bypass);
if (IS_ERR(clk))
return;
of_clk_add_provider(np, of_clk_src_simple_get, clk);
}
+CLK_OF_DECLARE(at91rm9200_clk_main_osc, "atmel,at91rm9200-clk-main-osc",
+ of_at91rm9200_clk_main_osc_setup);
-static irqreturn_t clk_main_rc_osc_irq_handler(int irq, void *dev_id)
+static bool clk_main_rc_osc_ready(struct regmap *regmap)
{
- struct clk_main_rc_osc *osc = dev_id;
+ unsigned int status;
- wake_up(&osc->wait);
- disable_irq_nosync(osc->irq);
+ regmap_read(regmap, AT91_PMC_SR, &status);
- return IRQ_HANDLED;
+ return status & AT91_PMC_MOSCRCS;
}
static int clk_main_rc_osc_prepare(struct clk_hw *hw)
{
struct clk_main_rc_osc *osc = to_clk_main_rc_osc(hw);
- struct at91_pmc *pmc = osc->pmc;
- u32 tmp;
+ struct regmap *regmap = osc->regmap;
+ unsigned int mor;
- tmp = pmc_read(pmc, AT91_CKGR_MOR) & ~MOR_KEY_MASK;
+ regmap_read(regmap, AT91_CKGR_MOR, &mor);
- if (!(tmp & AT91_PMC_MOSCRCEN)) {
- tmp |= AT91_PMC_MOSCRCEN | AT91_PMC_KEY;
- pmc_write(pmc, AT91_CKGR_MOR, tmp);
- }
+ if (!(mor & AT91_PMC_MOSCRCEN))
+ regmap_update_bits(regmap, AT91_CKGR_MOR,
+ MOR_KEY_MASK | AT91_PMC_MOSCRCEN,
+ AT91_PMC_MOSCRCEN | AT91_PMC_KEY);
- while (!(pmc_read(pmc, AT91_PMC_SR) & AT91_PMC_MOSCRCS)) {
- enable_irq(osc->irq);
- wait_event(osc->wait,
- pmc_read(pmc, AT91_PMC_SR) & AT91_PMC_MOSCRCS);
- }
+ while (!clk_main_rc_osc_ready(regmap))
+ cpu_relax();
return 0;
}
@@ -250,23 +223,28 @@ static int clk_main_rc_osc_prepare(struct clk_hw *hw)
static void clk_main_rc_osc_unprepare(struct clk_hw *hw)
{
struct clk_main_rc_osc *osc = to_clk_main_rc_osc(hw);
- struct at91_pmc *pmc = osc->pmc;
- u32 tmp = pmc_read(pmc, AT91_CKGR_MOR);
+ struct regmap *regmap = osc->regmap;
+ unsigned int mor;
+
+ regmap_read(regmap, AT91_CKGR_MOR, &mor);
- if (!(tmp & AT91_PMC_MOSCRCEN))
+ if (!(mor & AT91_PMC_MOSCRCEN))
return;
- tmp &= ~(MOR_KEY_MASK | AT91_PMC_MOSCRCEN);
- pmc_write(pmc, AT91_CKGR_MOR, tmp | AT91_PMC_KEY);
+ regmap_update_bits(regmap, AT91_CKGR_MOR,
+ MOR_KEY_MASK | AT91_PMC_MOSCRCEN, AT91_PMC_KEY);
}
static int clk_main_rc_osc_is_prepared(struct clk_hw *hw)
{
struct clk_main_rc_osc *osc = to_clk_main_rc_osc(hw);
- struct at91_pmc *pmc = osc->pmc;
+ struct regmap *regmap = osc->regmap;
+ unsigned int mor, status;
- return !!((pmc_read(pmc, AT91_PMC_SR) & AT91_PMC_MOSCRCS) &&
- (pmc_read(pmc, AT91_CKGR_MOR) & AT91_PMC_MOSCRCEN));
+ regmap_read(regmap, AT91_CKGR_MOR, &mor);
+ regmap_read(regmap, AT91_PMC_SR, &status);
+
+ return (mor & AT91_PMC_MOSCRCEN) && (status & AT91_PMC_MOSCRCS);
}
static unsigned long clk_main_rc_osc_recalc_rate(struct clk_hw *hw,
@@ -294,17 +272,15 @@ static const struct clk_ops main_rc_osc_ops = {
};
static struct clk * __init
-at91_clk_register_main_rc_osc(struct at91_pmc *pmc,
- unsigned int irq,
+at91_clk_register_main_rc_osc(struct regmap *regmap,
const char *name,
u32 frequency, u32 accuracy)
{
- int ret;
struct clk_main_rc_osc *osc;
struct clk *clk = NULL;
struct clk_init_data init;
- if (!pmc || !irq || !name || !frequency)
+ if (!name || !frequency)
return ERR_PTR(-EINVAL);
osc = kzalloc(sizeof(*osc), GFP_KERNEL);
@@ -315,66 +291,56 @@ at91_clk_register_main_rc_osc(struct at91_pmc *pmc,
init.ops = &main_rc_osc_ops;
init.parent_names = NULL;
init.num_parents = 0;
- init.flags = CLK_IS_ROOT | CLK_IGNORE_UNUSED;
+ init.flags = CLK_IGNORE_UNUSED;
osc->hw.init = &init;
- osc->pmc = pmc;
- osc->irq = irq;
+ osc->regmap = regmap;
osc->frequency = frequency;
osc->accuracy = accuracy;
- init_waitqueue_head(&osc->wait);
- irq_set_status_flags(osc->irq, IRQ_NOAUTOEN);
- ret = request_irq(osc->irq, clk_main_rc_osc_irq_handler,
- IRQF_TRIGGER_HIGH, name, osc);
- if (ret)
- return ERR_PTR(ret);
-
clk = clk_register(NULL, &osc->hw);
- if (IS_ERR(clk)) {
- free_irq(irq, osc);
+ if (IS_ERR(clk))
kfree(osc);
- }
return clk;
}
-void __init of_at91sam9x5_clk_main_rc_osc_setup(struct device_node *np,
- struct at91_pmc *pmc)
+static void __init of_at91sam9x5_clk_main_rc_osc_setup(struct device_node *np)
{
struct clk *clk;
- unsigned int irq;
u32 frequency = 0;
u32 accuracy = 0;
const char *name = np->name;
+ struct regmap *regmap;
of_property_read_string(np, "clock-output-names", &name);
of_property_read_u32(np, "clock-frequency", &frequency);
of_property_read_u32(np, "clock-accuracy", &accuracy);
- irq = irq_of_parse_and_map(np, 0);
- if (!irq)
+ regmap = syscon_node_to_regmap(of_get_parent(np));
+ if (IS_ERR(regmap))
return;
- clk = at91_clk_register_main_rc_osc(pmc, irq, name, frequency,
- accuracy);
+ clk = at91_clk_register_main_rc_osc(regmap, name, frequency, accuracy);
if (IS_ERR(clk))
return;
of_clk_add_provider(np, of_clk_src_simple_get, clk);
}
+CLK_OF_DECLARE(at91sam9x5_clk_main_rc_osc, "atmel,at91sam9x5-clk-main-rc-osc",
+ of_at91sam9x5_clk_main_rc_osc_setup);
-static int clk_main_probe_frequency(struct at91_pmc *pmc)
+static int clk_main_probe_frequency(struct regmap *regmap)
{
unsigned long prep_time, timeout;
- u32 tmp;
+ unsigned int mcfr;
timeout = jiffies + usecs_to_jiffies(MAINFRDY_TIMEOUT);
do {
prep_time = jiffies;
- tmp = pmc_read(pmc, AT91_CKGR_MCFR);
- if (tmp & AT91_PMC_MAINRDY)
+ regmap_read(regmap, AT91_CKGR_MCFR, &mcfr);
+ if (mcfr & AT91_PMC_MAINRDY)
return 0;
usleep_range(MAINF_LOOP_MIN_WAIT, MAINF_LOOP_MAX_WAIT);
} while (time_before(prep_time, timeout));
@@ -382,34 +348,37 @@ static int clk_main_probe_frequency(struct at91_pmc *pmc)
return -ETIMEDOUT;
}
-static unsigned long clk_main_recalc_rate(struct at91_pmc *pmc,
+static unsigned long clk_main_recalc_rate(struct regmap *regmap,
unsigned long parent_rate)
{
- u32 tmp;
+ unsigned int mcfr;
if (parent_rate)
return parent_rate;
pr_warn("Main crystal frequency not set, using approximate value\n");
- tmp = pmc_read(pmc, AT91_CKGR_MCFR);
- if (!(tmp & AT91_PMC_MAINRDY))
+ regmap_read(regmap, AT91_CKGR_MCFR, &mcfr);
+ if (!(mcfr & AT91_PMC_MAINRDY))
return 0;
- return ((tmp & AT91_PMC_MAINF) * SLOW_CLOCK_FREQ) / MAINF_DIV;
+ return ((mcfr & AT91_PMC_MAINF) * SLOW_CLOCK_FREQ) / MAINF_DIV;
}
static int clk_rm9200_main_prepare(struct clk_hw *hw)
{
struct clk_rm9200_main *clkmain = to_clk_rm9200_main(hw);
- return clk_main_probe_frequency(clkmain->pmc);
+ return clk_main_probe_frequency(clkmain->regmap);
}
static int clk_rm9200_main_is_prepared(struct clk_hw *hw)
{
struct clk_rm9200_main *clkmain = to_clk_rm9200_main(hw);
+ unsigned int status;
+
+ regmap_read(clkmain->regmap, AT91_CKGR_MCFR, &status);
- return !!(pmc_read(clkmain->pmc, AT91_CKGR_MCFR) & AT91_PMC_MAINRDY);
+ return status & AT91_PMC_MAINRDY ? 1 : 0;
}
static unsigned long clk_rm9200_main_recalc_rate(struct clk_hw *hw,
@@ -417,7 +386,7 @@ static unsigned long clk_rm9200_main_recalc_rate(struct clk_hw *hw,
{
struct clk_rm9200_main *clkmain = to_clk_rm9200_main(hw);
- return clk_main_recalc_rate(clkmain->pmc, parent_rate);
+ return clk_main_recalc_rate(clkmain->regmap, parent_rate);
}
static const struct clk_ops rm9200_main_ops = {
@@ -427,7 +396,7 @@ static const struct clk_ops rm9200_main_ops = {
};
static struct clk * __init
-at91_clk_register_rm9200_main(struct at91_pmc *pmc,
+at91_clk_register_rm9200_main(struct regmap *regmap,
const char *name,
const char *parent_name)
{
@@ -435,7 +404,7 @@ at91_clk_register_rm9200_main(struct at91_pmc *pmc,
struct clk *clk = NULL;
struct clk_init_data init;
- if (!pmc || !name)
+ if (!name)
return ERR_PTR(-EINVAL);
if (!parent_name)
@@ -452,7 +421,7 @@ at91_clk_register_rm9200_main(struct at91_pmc *pmc,
init.flags = 0;
clkmain->hw.init = &init;
- clkmain->pmc = pmc;
+ clkmain->regmap = regmap;
clk = clk_register(NULL, &clkmain->hw);
if (IS_ERR(clk))
@@ -461,52 +430,54 @@ at91_clk_register_rm9200_main(struct at91_pmc *pmc,
return clk;
}
-void __init of_at91rm9200_clk_main_setup(struct device_node *np,
- struct at91_pmc *pmc)
+static void __init of_at91rm9200_clk_main_setup(struct device_node *np)
{
struct clk *clk;
const char *parent_name;
const char *name = np->name;
+ struct regmap *regmap;
parent_name = of_clk_get_parent_name(np, 0);
of_property_read_string(np, "clock-output-names", &name);
- clk = at91_clk_register_rm9200_main(pmc, name, parent_name);
+ regmap = syscon_node_to_regmap(of_get_parent(np));
+ if (IS_ERR(regmap))
+ return;
+
+ clk = at91_clk_register_rm9200_main(regmap, name, parent_name);
if (IS_ERR(clk))
return;
of_clk_add_provider(np, of_clk_src_simple_get, clk);
}
+CLK_OF_DECLARE(at91rm9200_clk_main, "atmel,at91rm9200-clk-main",
+ of_at91rm9200_clk_main_setup);
-static irqreturn_t clk_sam9x5_main_irq_handler(int irq, void *dev_id)
+static inline bool clk_sam9x5_main_ready(struct regmap *regmap)
{
- struct clk_sam9x5_main *clkmain = dev_id;
+ unsigned int status;
- wake_up(&clkmain->wait);
- disable_irq_nosync(clkmain->irq);
+ regmap_read(regmap, AT91_PMC_SR, &status);
- return IRQ_HANDLED;
+ return status & AT91_PMC_MOSCSELS ? 1 : 0;
}
static int clk_sam9x5_main_prepare(struct clk_hw *hw)
{
struct clk_sam9x5_main *clkmain = to_clk_sam9x5_main(hw);
- struct at91_pmc *pmc = clkmain->pmc;
+ struct regmap *regmap = clkmain->regmap;
- while (!(pmc_read(pmc, AT91_PMC_SR) & AT91_PMC_MOSCSELS)) {
- enable_irq(clkmain->irq);
- wait_event(clkmain->wait,
- pmc_read(pmc, AT91_PMC_SR) & AT91_PMC_MOSCSELS);
- }
+ while (!clk_sam9x5_main_ready(regmap))
+ cpu_relax();
- return clk_main_probe_frequency(pmc);
+ return clk_main_probe_frequency(regmap);
}
static int clk_sam9x5_main_is_prepared(struct clk_hw *hw)
{
struct clk_sam9x5_main *clkmain = to_clk_sam9x5_main(hw);
- return !!(pmc_read(clkmain->pmc, AT91_PMC_SR) & AT91_PMC_MOSCSELS);
+ return clk_sam9x5_main_ready(clkmain->regmap);
}
static unsigned long clk_sam9x5_main_recalc_rate(struct clk_hw *hw,
@@ -514,30 +485,28 @@ static unsigned long clk_sam9x5_main_recalc_rate(struct clk_hw *hw,
{
struct clk_sam9x5_main *clkmain = to_clk_sam9x5_main(hw);
- return clk_main_recalc_rate(clkmain->pmc, parent_rate);
+ return clk_main_recalc_rate(clkmain->regmap, parent_rate);
}
static int clk_sam9x5_main_set_parent(struct clk_hw *hw, u8 index)
{
struct clk_sam9x5_main *clkmain = to_clk_sam9x5_main(hw);
- struct at91_pmc *pmc = clkmain->pmc;
- u32 tmp;
+ struct regmap *regmap = clkmain->regmap;
+ unsigned int tmp;
if (index > 1)
return -EINVAL;
- tmp = pmc_read(pmc, AT91_CKGR_MOR) & ~MOR_KEY_MASK;
+ regmap_read(regmap, AT91_CKGR_MOR, &tmp);
+ tmp &= ~MOR_KEY_MASK;
if (index && !(tmp & AT91_PMC_MOSCSEL))
- pmc_write(pmc, AT91_CKGR_MOR, tmp | AT91_PMC_MOSCSEL);
+ regmap_write(regmap, AT91_CKGR_MOR, tmp | AT91_PMC_MOSCSEL);
else if (!index && (tmp & AT91_PMC_MOSCSEL))
- pmc_write(pmc, AT91_CKGR_MOR, tmp & ~AT91_PMC_MOSCSEL);
+ regmap_write(regmap, AT91_CKGR_MOR, tmp & ~AT91_PMC_MOSCSEL);
- while (!(pmc_read(pmc, AT91_PMC_SR) & AT91_PMC_MOSCSELS)) {
- enable_irq(clkmain->irq);
- wait_event(clkmain->wait,
- pmc_read(pmc, AT91_PMC_SR) & AT91_PMC_MOSCSELS);
- }
+ while (!clk_sam9x5_main_ready(regmap))
+ cpu_relax();
return 0;
}
@@ -545,8 +514,11 @@ static int clk_sam9x5_main_set_parent(struct clk_hw *hw, u8 index)
static u8 clk_sam9x5_main_get_parent(struct clk_hw *hw)
{
struct clk_sam9x5_main *clkmain = to_clk_sam9x5_main(hw);
+ unsigned int status;
+
+ regmap_read(clkmain->regmap, AT91_CKGR_MOR, &status);
- return !!(pmc_read(clkmain->pmc, AT91_CKGR_MOR) & AT91_PMC_MOSCEN);
+ return status & AT91_PMC_MOSCEN ? 1 : 0;
}
static const struct clk_ops sam9x5_main_ops = {
@@ -558,18 +530,17 @@ static const struct clk_ops sam9x5_main_ops = {
};
static struct clk * __init
-at91_clk_register_sam9x5_main(struct at91_pmc *pmc,
- unsigned int irq,
+at91_clk_register_sam9x5_main(struct regmap *regmap,
const char *name,
const char **parent_names,
int num_parents)
{
- int ret;
struct clk_sam9x5_main *clkmain;
struct clk *clk = NULL;
struct clk_init_data init;
+ unsigned int status;
- if (!pmc || !irq || !name)
+ if (!name)
return ERR_PTR(-EINVAL);
if (!parent_names || !num_parents)
@@ -586,51 +557,42 @@ at91_clk_register_sam9x5_main(struct at91_pmc *pmc,
init.flags = CLK_SET_PARENT_GATE;
clkmain->hw.init = &init;
- clkmain->pmc = pmc;
- clkmain->irq = irq;
- clkmain->parent = !!(pmc_read(clkmain->pmc, AT91_CKGR_MOR) &
- AT91_PMC_MOSCEN);
- init_waitqueue_head(&clkmain->wait);
- irq_set_status_flags(clkmain->irq, IRQ_NOAUTOEN);
- ret = request_irq(clkmain->irq, clk_sam9x5_main_irq_handler,
- IRQF_TRIGGER_HIGH, name, clkmain);
- if (ret)
- return ERR_PTR(ret);
+ clkmain->regmap = regmap;
+ regmap_read(clkmain->regmap, AT91_CKGR_MOR, &status);
+ clkmain->parent = status & AT91_PMC_MOSCEN ? 1 : 0;
clk = clk_register(NULL, &clkmain->hw);
- if (IS_ERR(clk)) {
- free_irq(clkmain->irq, clkmain);
+ if (IS_ERR(clk))
kfree(clkmain);
- }
return clk;
}
-void __init of_at91sam9x5_clk_main_setup(struct device_node *np,
- struct at91_pmc *pmc)
+static void __init of_at91sam9x5_clk_main_setup(struct device_node *np)
{
struct clk *clk;
const char *parent_names[2];
- int num_parents;
- unsigned int irq;
+ unsigned int num_parents;
const char *name = np->name;
+ struct regmap *regmap;
num_parents = of_clk_get_parent_count(np);
- if (num_parents <= 0 || num_parents > 2)
+ if (num_parents == 0 || num_parents > 2)
return;
of_clk_parent_fill(np, parent_names, num_parents);
+ regmap = syscon_node_to_regmap(of_get_parent(np));
+ if (IS_ERR(regmap))
+ return;
of_property_read_string(np, "clock-output-names", &name);
- irq = irq_of_parse_and_map(np, 0);
- if (!irq)
- return;
-
- clk = at91_clk_register_sam9x5_main(pmc, irq, name, parent_names,
+ clk = at91_clk_register_sam9x5_main(regmap, name, parent_names,
num_parents);
if (IS_ERR(clk))
return;
of_clk_add_provider(np, of_clk_src_simple_get, clk);
}
+CLK_OF_DECLARE(at91sam9x5_clk_main, "atmel,at91sam9x5-clk-main",
+ of_at91sam9x5_clk_main_setup);
diff --git a/drivers/clk/at91/clk-master.c b/drivers/clk/at91/clk-master.c
index 620ea323356b..d1021e106191 100644
--- a/drivers/clk/at91/clk-master.c
+++ b/drivers/clk/at91/clk-master.c
@@ -12,13 +12,8 @@
#include <linux/clkdev.h>
#include <linux/clk/at91_pmc.h>
#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/of_irq.h>
-#include <linux/io.h>
-#include <linux/wait.h>
-#include <linux/sched.h>
-#include <linux/interrupt.h>
-#include <linux/irq.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
#include "pmc.h"
@@ -44,32 +39,26 @@ struct clk_master_layout {
struct clk_master {
struct clk_hw hw;
- struct at91_pmc *pmc;
- unsigned int irq;
- wait_queue_head_t wait;
+ struct regmap *regmap;
const struct clk_master_layout *layout;
const struct clk_master_characteristics *characteristics;
};
-static irqreturn_t clk_master_irq_handler(int irq, void *dev_id)
+static inline bool clk_master_ready(struct regmap *regmap)
{
- struct clk_master *master = (struct clk_master *)dev_id;
+ unsigned int status;
- wake_up(&master->wait);
- disable_irq_nosync(master->irq);
+ regmap_read(regmap, AT91_PMC_SR, &status);
- return IRQ_HANDLED;
+ return status & AT91_PMC_MCKRDY ? 1 : 0;
}
+
static int clk_master_prepare(struct clk_hw *hw)
{
struct clk_master *master = to_clk_master(hw);
- struct at91_pmc *pmc = master->pmc;
- while (!(pmc_read(pmc, AT91_PMC_SR) & AT91_PMC_MCKRDY)) {
- enable_irq(master->irq);
- wait_event(master->wait,
- pmc_read(pmc, AT91_PMC_SR) & AT91_PMC_MCKRDY);
- }
+ while (!clk_master_ready(master->regmap))
+ cpu_relax();
return 0;
}
@@ -78,7 +67,7 @@ static int clk_master_is_prepared(struct clk_hw *hw)
{
struct clk_master *master = to_clk_master(hw);
- return !!(pmc_read(master->pmc, AT91_PMC_SR) & AT91_PMC_MCKRDY);
+ return clk_master_ready(master->regmap);
}
static unsigned long clk_master_recalc_rate(struct clk_hw *hw,
@@ -88,18 +77,16 @@ static unsigned long clk_master_recalc_rate(struct clk_hw *hw,
u8 div;
unsigned long rate = parent_rate;
struct clk_master *master = to_clk_master(hw);
- struct at91_pmc *pmc = master->pmc;
const struct clk_master_layout *layout = master->layout;
const struct clk_master_characteristics *characteristics =
master->characteristics;
- u32 tmp;
+ unsigned int mckr;
- pmc_lock(pmc);
- tmp = pmc_read(pmc, AT91_PMC_MCKR) & layout->mask;
- pmc_unlock(pmc);
+ regmap_read(master->regmap, AT91_PMC_MCKR, &mckr);
+ mckr &= layout->mask;
- pres = (tmp >> layout->pres_shift) & MASTER_PRES_MASK;
- div = (tmp >> MASTER_DIV_SHIFT) & MASTER_DIV_MASK;
+ pres = (mckr >> layout->pres_shift) & MASTER_PRES_MASK;
+ div = (mckr >> MASTER_DIV_SHIFT) & MASTER_DIV_MASK;
if (characteristics->have_div3_pres && pres == MASTER_PRES_MAX)
rate /= 3;
@@ -119,9 +106,11 @@ static unsigned long clk_master_recalc_rate(struct clk_hw *hw,
static u8 clk_master_get_parent(struct clk_hw *hw)
{
struct clk_master *master = to_clk_master(hw);
- struct at91_pmc *pmc = master->pmc;
+ unsigned int mckr;
- return pmc_read(pmc, AT91_PMC_MCKR) & AT91_PMC_CSS;
+ regmap_read(master->regmap, AT91_PMC_MCKR, &mckr);
+
+ return mckr & AT91_PMC_CSS;
}
static const struct clk_ops master_ops = {
@@ -132,18 +121,17 @@ static const struct clk_ops master_ops = {
};
static struct clk * __init
-at91_clk_register_master(struct at91_pmc *pmc, unsigned int irq,
+at91_clk_register_master(struct regmap *regmap,
const char *name, int num_parents,
const char **parent_names,
const struct clk_master_layout *layout,
const struct clk_master_characteristics *characteristics)
{
- int ret;
struct clk_master *master;
struct clk *clk = NULL;
struct clk_init_data init;
- if (!pmc || !irq || !name || !num_parents || !parent_names)
+ if (!name || !num_parents || !parent_names)
return ERR_PTR(-EINVAL);
master = kzalloc(sizeof(*master), GFP_KERNEL);
@@ -159,20 +147,10 @@ at91_clk_register_master(struct at91_pmc *pmc, unsigned int irq,
master->hw.init = &init;
master->layout = layout;
master->characteristics = characteristics;
- master->pmc = pmc;
- master->irq = irq;
- init_waitqueue_head(&master->wait);
- irq_set_status_flags(master->irq, IRQ_NOAUTOEN);
- ret = request_irq(master->irq, clk_master_irq_handler,
- IRQF_TRIGGER_HIGH, "clk-master", master);
- if (ret) {
- kfree(master);
- return ERR_PTR(ret);
- }
+ master->regmap = regmap;
clk = clk_register(NULL, &master->hw);
if (IS_ERR(clk)) {
- free_irq(master->irq, master);
kfree(master);
}
@@ -217,18 +195,18 @@ out_free_characteristics:
}
static void __init
-of_at91_clk_master_setup(struct device_node *np, struct at91_pmc *pmc,
+of_at91_clk_master_setup(struct device_node *np,
const struct clk_master_layout *layout)
{
struct clk *clk;
- int num_parents;
- unsigned int irq;
+ unsigned int num_parents;
const char *parent_names[MASTER_SOURCE_MAX];
const char *name = np->name;
struct clk_master_characteristics *characteristics;
+ struct regmap *regmap;
num_parents = of_clk_get_parent_count(np);
- if (num_parents <= 0 || num_parents > MASTER_SOURCE_MAX)
+ if (num_parents == 0 || num_parents > MASTER_SOURCE_MAX)
return;
of_clk_parent_fill(np, parent_names, num_parents);
@@ -239,11 +217,11 @@ of_at91_clk_master_setup(struct device_node *np, struct at91_pmc *pmc,
if (!characteristics)
return;
- irq = irq_of_parse_and_map(np, 0);
- if (!irq)
- goto out_free_characteristics;
+ regmap = syscon_node_to_regmap(of_get_parent(np));
+ if (IS_ERR(regmap))
+ return;
- clk = at91_clk_register_master(pmc, irq, name, num_parents,
+ clk = at91_clk_register_master(regmap, name, num_parents,
parent_names, layout,
characteristics);
if (IS_ERR(clk))
@@ -256,14 +234,16 @@ out_free_characteristics:
kfree(characteristics);
}
-void __init of_at91rm9200_clk_master_setup(struct device_node *np,
- struct at91_pmc *pmc)
+static void __init of_at91rm9200_clk_master_setup(struct device_node *np)
{
- of_at91_clk_master_setup(np, pmc, &at91rm9200_master_layout);
+ of_at91_clk_master_setup(np, &at91rm9200_master_layout);
}
+CLK_OF_DECLARE(at91rm9200_clk_master, "atmel,at91rm9200-clk-master",
+ of_at91rm9200_clk_master_setup);
-void __init of_at91sam9x5_clk_master_setup(struct device_node *np,
- struct at91_pmc *pmc)
+static void __init of_at91sam9x5_clk_master_setup(struct device_node *np)
{
- of_at91_clk_master_setup(np, pmc, &at91sam9x5_master_layout);
+ of_at91_clk_master_setup(np, &at91sam9x5_master_layout);
}
+CLK_OF_DECLARE(at91sam9x5_clk_master, "atmel,at91sam9x5-clk-master",
+ of_at91sam9x5_clk_master_setup);
diff --git a/drivers/clk/at91/clk-peripheral.c b/drivers/clk/at91/clk-peripheral.c
index 58f3b568e9cb..fd160728e990 100644
--- a/drivers/clk/at91/clk-peripheral.c
+++ b/drivers/clk/at91/clk-peripheral.c
@@ -12,11 +12,13 @@
#include <linux/clkdev.h>
#include <linux/clk/at91_pmc.h>
#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/io.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
#include "pmc.h"
+DEFINE_SPINLOCK(pmc_pcr_lock);
+
#define PERIPHERAL_MAX 64
#define PERIPHERAL_AT91RM9200 0
@@ -33,7 +35,7 @@
struct clk_peripheral {
struct clk_hw hw;
- struct at91_pmc *pmc;
+ struct regmap *regmap;
u32 id;
};
@@ -41,8 +43,9 @@ struct clk_peripheral {
struct clk_sam9x5_peripheral {
struct clk_hw hw;
- struct at91_pmc *pmc;
+ struct regmap *regmap;
struct clk_range range;
+ spinlock_t *lock;
u32 id;
u32 div;
bool auto_div;
@@ -54,7 +57,6 @@ struct clk_sam9x5_peripheral {
static int clk_peripheral_enable(struct clk_hw *hw)
{
struct clk_peripheral *periph = to_clk_peripheral(hw);
- struct at91_pmc *pmc = periph->pmc;
int offset = AT91_PMC_PCER;
u32 id = periph->id;
@@ -62,14 +64,14 @@ static int clk_peripheral_enable(struct clk_hw *hw)
return 0;
if (id > PERIPHERAL_ID_MAX)
offset = AT91_PMC_PCER1;
- pmc_write(pmc, offset, PERIPHERAL_MASK(id));
+ regmap_write(periph->regmap, offset, PERIPHERAL_MASK(id));
+
return 0;
}
static void clk_peripheral_disable(struct clk_hw *hw)
{
struct clk_peripheral *periph = to_clk_peripheral(hw);
- struct at91_pmc *pmc = periph->pmc;
int offset = AT91_PMC_PCDR;
u32 id = periph->id;
@@ -77,21 +79,23 @@ static void clk_peripheral_disable(struct clk_hw *hw)
return;
if (id > PERIPHERAL_ID_MAX)
offset = AT91_PMC_PCDR1;
- pmc_write(pmc, offset, PERIPHERAL_MASK(id));
+ regmap_write(periph->regmap, offset, PERIPHERAL_MASK(id));
}
static int clk_peripheral_is_enabled(struct clk_hw *hw)
{
struct clk_peripheral *periph = to_clk_peripheral(hw);
- struct at91_pmc *pmc = periph->pmc;
int offset = AT91_PMC_PCSR;
+ unsigned int status;
u32 id = periph->id;
if (id < PERIPHERAL_ID_MIN)
return 1;
if (id > PERIPHERAL_ID_MAX)
offset = AT91_PMC_PCSR1;
- return !!(pmc_read(pmc, offset) & PERIPHERAL_MASK(id));
+ regmap_read(periph->regmap, offset, &status);
+
+ return status & PERIPHERAL_MASK(id) ? 1 : 0;
}
static const struct clk_ops peripheral_ops = {
@@ -101,14 +105,14 @@ static const struct clk_ops peripheral_ops = {
};
static struct clk * __init
-at91_clk_register_peripheral(struct at91_pmc *pmc, const char *name,
+at91_clk_register_peripheral(struct regmap *regmap, const char *name,
const char *parent_name, u32 id)
{
struct clk_peripheral *periph;
struct clk *clk = NULL;
struct clk_init_data init;
- if (!pmc || !name || !parent_name || id > PERIPHERAL_ID_MAX)
+ if (!name || !parent_name || id > PERIPHERAL_ID_MAX)
return ERR_PTR(-EINVAL);
periph = kzalloc(sizeof(*periph), GFP_KERNEL);
@@ -123,7 +127,7 @@ at91_clk_register_peripheral(struct at91_pmc *pmc, const char *name,
periph->id = id;
periph->hw.init = &init;
- periph->pmc = pmc;
+ periph->regmap = regmap;
clk = clk_register(NULL, &periph->hw);
if (IS_ERR(clk))
@@ -160,53 +164,58 @@ static void clk_sam9x5_peripheral_autodiv(struct clk_sam9x5_peripheral *periph)
static int clk_sam9x5_peripheral_enable(struct clk_hw *hw)
{
struct clk_sam9x5_peripheral *periph = to_clk_sam9x5_peripheral(hw);
- struct at91_pmc *pmc = periph->pmc;
- u32 tmp;
+ unsigned long flags;
if (periph->id < PERIPHERAL_ID_MIN)
return 0;
- pmc_lock(pmc);
- pmc_write(pmc, AT91_PMC_PCR, (periph->id & AT91_PMC_PCR_PID_MASK));
- tmp = pmc_read(pmc, AT91_PMC_PCR) & ~AT91_PMC_PCR_DIV_MASK;
- pmc_write(pmc, AT91_PMC_PCR, tmp | AT91_PMC_PCR_DIV(periph->div)
- | AT91_PMC_PCR_CMD
- | AT91_PMC_PCR_EN);
- pmc_unlock(pmc);
+ spin_lock_irqsave(periph->lock, flags);
+ regmap_write(periph->regmap, AT91_PMC_PCR,
+ (periph->id & AT91_PMC_PCR_PID_MASK));
+ regmap_update_bits(periph->regmap, AT91_PMC_PCR,
+ AT91_PMC_PCR_DIV_MASK | AT91_PMC_PCR_CMD |
+ AT91_PMC_PCR_EN,
+ AT91_PMC_PCR_DIV(periph->div) |
+ AT91_PMC_PCR_CMD |
+ AT91_PMC_PCR_EN);
+ spin_unlock_irqrestore(periph->lock, flags);
+
return 0;
}
static void clk_sam9x5_peripheral_disable(struct clk_hw *hw)
{
struct clk_sam9x5_peripheral *periph = to_clk_sam9x5_peripheral(hw);
- struct at91_pmc *pmc = periph->pmc;
- u32 tmp;
+ unsigned long flags;
if (periph->id < PERIPHERAL_ID_MIN)
return;
- pmc_lock(pmc);
- pmc_write(pmc, AT91_PMC_PCR, (periph->id & AT91_PMC_PCR_PID_MASK));
- tmp = pmc_read(pmc, AT91_PMC_PCR) & ~AT91_PMC_PCR_EN;
- pmc_write(pmc, AT91_PMC_PCR, tmp | AT91_PMC_PCR_CMD);
- pmc_unlock(pmc);
+ spin_lock_irqsave(periph->lock, flags);
+ regmap_write(periph->regmap, AT91_PMC_PCR,
+ (periph->id & AT91_PMC_PCR_PID_MASK));
+ regmap_update_bits(periph->regmap, AT91_PMC_PCR,
+ AT91_PMC_PCR_EN | AT91_PMC_PCR_CMD,
+ AT91_PMC_PCR_CMD);
+ spin_unlock_irqrestore(periph->lock, flags);
}
static int clk_sam9x5_peripheral_is_enabled(struct clk_hw *hw)
{
struct clk_sam9x5_peripheral *periph = to_clk_sam9x5_peripheral(hw);
- struct at91_pmc *pmc = periph->pmc;
- int ret;
+ unsigned long flags;
+ unsigned int status;
if (periph->id < PERIPHERAL_ID_MIN)
return 1;
- pmc_lock(pmc);
- pmc_write(pmc, AT91_PMC_PCR, (periph->id & AT91_PMC_PCR_PID_MASK));
- ret = !!(pmc_read(pmc, AT91_PMC_PCR) & AT91_PMC_PCR_EN);
- pmc_unlock(pmc);
+ spin_lock_irqsave(periph->lock, flags);
+ regmap_write(periph->regmap, AT91_PMC_PCR,
+ (periph->id & AT91_PMC_PCR_PID_MASK));
+ regmap_read(periph->regmap, AT91_PMC_PCR, &status);
+ spin_unlock_irqrestore(periph->lock, flags);
- return ret;
+ return status & AT91_PMC_PCR_EN ? 1 : 0;
}
static unsigned long
@@ -214,19 +223,20 @@ clk_sam9x5_peripheral_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct clk_sam9x5_peripheral *periph = to_clk_sam9x5_peripheral(hw);
- struct at91_pmc *pmc = periph->pmc;
- u32 tmp;
+ unsigned long flags;
+ unsigned int status;
if (periph->id < PERIPHERAL_ID_MIN)
return parent_rate;
- pmc_lock(pmc);
- pmc_write(pmc, AT91_PMC_PCR, (periph->id & AT91_PMC_PCR_PID_MASK));
- tmp = pmc_read(pmc, AT91_PMC_PCR);
- pmc_unlock(pmc);
+ spin_lock_irqsave(periph->lock, flags);
+ regmap_write(periph->regmap, AT91_PMC_PCR,
+ (periph->id & AT91_PMC_PCR_PID_MASK));
+ regmap_read(periph->regmap, AT91_PMC_PCR, &status);
+ spin_unlock_irqrestore(periph->lock, flags);
- if (tmp & AT91_PMC_PCR_EN) {
- periph->div = PERIPHERAL_RSHIFT(tmp);
+ if (status & AT91_PMC_PCR_EN) {
+ periph->div = PERIPHERAL_RSHIFT(status);
periph->auto_div = false;
} else {
clk_sam9x5_peripheral_autodiv(periph);
@@ -318,15 +328,15 @@ static const struct clk_ops sam9x5_peripheral_ops = {
};
static struct clk * __init
-at91_clk_register_sam9x5_peripheral(struct at91_pmc *pmc, const char *name,
- const char *parent_name, u32 id,
- const struct clk_range *range)
+at91_clk_register_sam9x5_peripheral(struct regmap *regmap, spinlock_t *lock,
+ const char *name, const char *parent_name,
+ u32 id, const struct clk_range *range)
{
struct clk_sam9x5_peripheral *periph;
struct clk *clk = NULL;
struct clk_init_data init;
- if (!pmc || !name || !parent_name)
+ if (!name || !parent_name)
return ERR_PTR(-EINVAL);
periph = kzalloc(sizeof(*periph), GFP_KERNEL);
@@ -342,7 +352,8 @@ at91_clk_register_sam9x5_peripheral(struct at91_pmc *pmc, const char *name,
periph->id = id;
periph->hw.init = &init;
periph->div = 0;
- periph->pmc = pmc;
+ periph->regmap = regmap;
+ periph->lock = lock;
periph->auto_div = true;
periph->range = *range;
@@ -356,7 +367,7 @@ at91_clk_register_sam9x5_peripheral(struct at91_pmc *pmc, const char *name,
}
static void __init
-of_at91_clk_periph_setup(struct device_node *np, struct at91_pmc *pmc, u8 type)
+of_at91_clk_periph_setup(struct device_node *np, u8 type)
{
int num;
u32 id;
@@ -364,6 +375,7 @@ of_at91_clk_periph_setup(struct device_node *np, struct at91_pmc *pmc, u8 type)
const char *parent_name;
const char *name;
struct device_node *periphclknp;
+ struct regmap *regmap;
parent_name = of_clk_get_parent_name(np, 0);
if (!parent_name)
@@ -373,6 +385,10 @@ of_at91_clk_periph_setup(struct device_node *np, struct at91_pmc *pmc, u8 type)
if (!num || num > PERIPHERAL_MAX)
return;
+ regmap = syscon_node_to_regmap(of_get_parent(np));
+ if (IS_ERR(regmap))
+ return;
+
for_each_child_of_node(np, periphclknp) {
if (of_property_read_u32(periphclknp, "reg", &id))
continue;
@@ -384,7 +400,7 @@ of_at91_clk_periph_setup(struct device_node *np, struct at91_pmc *pmc, u8 type)
name = periphclknp->name;
if (type == PERIPHERAL_AT91RM9200) {
- clk = at91_clk_register_peripheral(pmc, name,
+ clk = at91_clk_register_peripheral(regmap, name,
parent_name, id);
} else {
struct clk_range range = CLK_RANGE(0, 0);
@@ -393,7 +409,9 @@ of_at91_clk_periph_setup(struct device_node *np, struct at91_pmc *pmc, u8 type)
"atmel,clk-output-range",
&range);
- clk = at91_clk_register_sam9x5_peripheral(pmc, name,
+ clk = at91_clk_register_sam9x5_peripheral(regmap,
+ &pmc_pcr_lock,
+ name,
parent_name,
id, &range);
}
@@ -405,14 +423,17 @@ of_at91_clk_periph_setup(struct device_node *np, struct at91_pmc *pmc, u8 type)
}
}
-void __init of_at91rm9200_clk_periph_setup(struct device_node *np,
- struct at91_pmc *pmc)
+static void __init of_at91rm9200_clk_periph_setup(struct device_node *np)
{
- of_at91_clk_periph_setup(np, pmc, PERIPHERAL_AT91RM9200);
+ of_at91_clk_periph_setup(np, PERIPHERAL_AT91RM9200);
}
+CLK_OF_DECLARE(at91rm9200_clk_periph, "atmel,at91rm9200-clk-peripheral",
+ of_at91rm9200_clk_periph_setup);
-void __init of_at91sam9x5_clk_periph_setup(struct device_node *np,
- struct at91_pmc *pmc)
+static void __init of_at91sam9x5_clk_periph_setup(struct device_node *np)
{
- of_at91_clk_periph_setup(np, pmc, PERIPHERAL_AT91SAM9X5);
+ of_at91_clk_periph_setup(np, PERIPHERAL_AT91SAM9X5);
}
+CLK_OF_DECLARE(at91sam9x5_clk_periph, "atmel,at91sam9x5-clk-peripheral",
+ of_at91sam9x5_clk_periph_setup);
+
diff --git a/drivers/clk/at91/clk-pll.c b/drivers/clk/at91/clk-pll.c
index 18b60f4895a6..fb2e0b56d4b7 100644
--- a/drivers/clk/at91/clk-pll.c
+++ b/drivers/clk/at91/clk-pll.c
@@ -12,14 +12,8 @@
#include <linux/clkdev.h>
#include <linux/clk/at91_pmc.h>
#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/of_irq.h>
-#include <linux/io.h>
-#include <linux/kernel.h>
-#include <linux/wait.h>
-#include <linux/sched.h>
-#include <linux/interrupt.h>
-#include <linux/irq.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
#include "pmc.h"
@@ -58,9 +52,7 @@ struct clk_pll_layout {
struct clk_pll {
struct clk_hw hw;
- struct at91_pmc *pmc;
- unsigned int irq;
- wait_queue_head_t wait;
+ struct regmap *regmap;
u8 id;
u8 div;
u8 range;
@@ -69,20 +61,19 @@ struct clk_pll {
const struct clk_pll_characteristics *characteristics;
};
-static irqreturn_t clk_pll_irq_handler(int irq, void *dev_id)
+static inline bool clk_pll_ready(struct regmap *regmap, int id)
{
- struct clk_pll *pll = (struct clk_pll *)dev_id;
+ unsigned int status;
- wake_up(&pll->wait);
- disable_irq_nosync(pll->irq);
+ regmap_read(regmap, AT91_PMC_SR, &status);
- return IRQ_HANDLED;
+ return status & PLL_STATUS_MASK(id) ? 1 : 0;
}
static int clk_pll_prepare(struct clk_hw *hw)
{
struct clk_pll *pll = to_clk_pll(hw);
- struct at91_pmc *pmc = pll->pmc;
+ struct regmap *regmap = pll->regmap;
const struct clk_pll_layout *layout = pll->layout;
const struct clk_pll_characteristics *characteristics =
pll->characteristics;
@@ -90,39 +81,34 @@ static int clk_pll_prepare(struct clk_hw *hw)
u32 mask = PLL_STATUS_MASK(id);
int offset = PLL_REG(id);
u8 out = 0;
- u32 pllr, icpr;
+ unsigned int pllr;
+ unsigned int status;
u8 div;
u16 mul;
- pllr = pmc_read(pmc, offset);
+ regmap_read(regmap, offset, &pllr);
div = PLL_DIV(pllr);
mul = PLL_MUL(pllr, layout);
- if ((pmc_read(pmc, AT91_PMC_SR) & mask) &&
+ regmap_read(regmap, AT91_PMC_SR, &status);
+ if ((status & mask) &&
(div == pll->div && mul == pll->mul))
return 0;
if (characteristics->out)
out = characteristics->out[pll->range];
- if (characteristics->icpll) {
- icpr = pmc_read(pmc, AT91_PMC_PLLICPR) & ~PLL_ICPR_MASK(id);
- icpr |= (characteristics->icpll[pll->range] <<
- PLL_ICPR_SHIFT(id));
- pmc_write(pmc, AT91_PMC_PLLICPR, icpr);
- }
- pllr &= ~layout->pllr_mask;
- pllr |= layout->pllr_mask &
- (pll->div | (PLL_MAX_COUNT << PLL_COUNT_SHIFT) |
- (out << PLL_OUT_SHIFT) |
- ((pll->mul & layout->mul_mask) << layout->mul_shift));
- pmc_write(pmc, offset, pllr);
-
- while (!(pmc_read(pmc, AT91_PMC_SR) & mask)) {
- enable_irq(pll->irq);
- wait_event(pll->wait,
- pmc_read(pmc, AT91_PMC_SR) & mask);
- }
+ if (characteristics->icpll)
+ regmap_update_bits(regmap, AT91_PMC_PLLICPR, PLL_ICPR_MASK(id),
+ characteristics->icpll[pll->range] << PLL_ICPR_SHIFT(id));
+
+ regmap_update_bits(regmap, offset, layout->pllr_mask,
+ pll->div | (PLL_MAX_COUNT << PLL_COUNT_SHIFT) |
+ (out << PLL_OUT_SHIFT) |
+ ((pll->mul & layout->mul_mask) << layout->mul_shift));
+
+ while (!clk_pll_ready(regmap, pll->id))
+ cpu_relax();
return 0;
}
@@ -130,32 +116,35 @@ static int clk_pll_prepare(struct clk_hw *hw)
static int clk_pll_is_prepared(struct clk_hw *hw)
{
struct clk_pll *pll = to_clk_pll(hw);
- struct at91_pmc *pmc = pll->pmc;
- return !!(pmc_read(pmc, AT91_PMC_SR) &
- PLL_STATUS_MASK(pll->id));
+ return clk_pll_ready(pll->regmap, pll->id);
}
static void clk_pll_unprepare(struct clk_hw *hw)
{
struct clk_pll *pll = to_clk_pll(hw);
- struct at91_pmc *pmc = pll->pmc;
- const struct clk_pll_layout *layout = pll->layout;
- int offset = PLL_REG(pll->id);
- u32 tmp = pmc_read(pmc, offset) & ~(layout->pllr_mask);
+ unsigned int mask = pll->layout->pllr_mask;
- pmc_write(pmc, offset, tmp);
+ regmap_update_bits(pll->regmap, PLL_REG(pll->id), mask, ~mask);
}
static unsigned long clk_pll_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct clk_pll *pll = to_clk_pll(hw);
+ unsigned int pllr;
+ u16 mul;
+ u8 div;
- if (!pll->div || !pll->mul)
+ regmap_read(pll->regmap, PLL_REG(pll->id), &pllr);
+
+ div = PLL_DIV(pllr);
+ mul = PLL_MUL(pllr, pll->layout);
+
+ if (!div || !mul)
return 0;
- return (parent_rate / pll->div) * (pll->mul + 1);
+ return (parent_rate / div) * (mul + 1);
}
static long clk_pll_get_best_div_mul(struct clk_pll *pll, unsigned long rate,
@@ -308,7 +297,7 @@ static const struct clk_ops pll_ops = {
};
static struct clk * __init
-at91_clk_register_pll(struct at91_pmc *pmc, unsigned int irq, const char *name,
+at91_clk_register_pll(struct regmap *regmap, const char *name,
const char *parent_name, u8 id,
const struct clk_pll_layout *layout,
const struct clk_pll_characteristics *characteristics)
@@ -316,9 +305,8 @@ at91_clk_register_pll(struct at91_pmc *pmc, unsigned int irq, const char *name,
struct clk_pll *pll;
struct clk *clk = NULL;
struct clk_init_data init;
- int ret;
int offset = PLL_REG(id);
- u32 tmp;
+ unsigned int pllr;
if (id > PLL_MAX_ID)
return ERR_PTR(-EINVAL);
@@ -337,23 +325,13 @@ at91_clk_register_pll(struct at91_pmc *pmc, unsigned int irq, const char *name,
pll->hw.init = &init;
pll->layout = layout;
pll->characteristics = characteristics;
- pll->pmc = pmc;
- pll->irq = irq;
- tmp = pmc_read(pmc, offset) & layout->pllr_mask;
- pll->div = PLL_DIV(tmp);
- pll->mul = PLL_MUL(tmp, layout);
- init_waitqueue_head(&pll->wait);
- irq_set_status_flags(pll->irq, IRQ_NOAUTOEN);
- ret = request_irq(pll->irq, clk_pll_irq_handler, IRQF_TRIGGER_HIGH,
- id ? "clk-pllb" : "clk-plla", pll);
- if (ret) {
- kfree(pll);
- return ERR_PTR(ret);
- }
+ pll->regmap = regmap;
+ regmap_read(regmap, offset, &pllr);
+ pll->div = PLL_DIV(pllr);
+ pll->mul = PLL_MUL(pllr, layout);
clk = clk_register(NULL, &pll->hw);
if (IS_ERR(clk)) {
- free_irq(pll->irq, pll);
kfree(pll);
}
@@ -483,12 +461,12 @@ out_free_characteristics:
}
static void __init
-of_at91_clk_pll_setup(struct device_node *np, struct at91_pmc *pmc,
+of_at91_clk_pll_setup(struct device_node *np,
const struct clk_pll_layout *layout)
{
u32 id;
- unsigned int irq;
struct clk *clk;
+ struct regmap *regmap;
const char *parent_name;
const char *name = np->name;
struct clk_pll_characteristics *characteristics;
@@ -500,15 +478,15 @@ of_at91_clk_pll_setup(struct device_node *np, struct at91_pmc *pmc,
of_property_read_string(np, "clock-output-names", &name);
- characteristics = of_at91_clk_pll_get_characteristics(np);
- if (!characteristics)
+ regmap = syscon_node_to_regmap(of_get_parent(np));
+ if (IS_ERR(regmap))
return;
- irq = irq_of_parse_and_map(np, 0);
- if (!irq)
+ characteristics = of_at91_clk_pll_get_characteristics(np);
+ if (!characteristics)
return;
- clk = at91_clk_register_pll(pmc, irq, name, parent_name, id, layout,
+ clk = at91_clk_register_pll(regmap, name, parent_name, id, layout,
characteristics);
if (IS_ERR(clk))
goto out_free_characteristics;
@@ -520,26 +498,30 @@ out_free_characteristics:
kfree(characteristics);
}
-void __init of_at91rm9200_clk_pll_setup(struct device_node *np,
- struct at91_pmc *pmc)
+static void __init of_at91rm9200_clk_pll_setup(struct device_node *np)
{
- of_at91_clk_pll_setup(np, pmc, &at91rm9200_pll_layout);
+ of_at91_clk_pll_setup(np, &at91rm9200_pll_layout);
}
+CLK_OF_DECLARE(at91rm9200_clk_pll, "atmel,at91rm9200-clk-pll",
+ of_at91rm9200_clk_pll_setup);
-void __init of_at91sam9g45_clk_pll_setup(struct device_node *np,
- struct at91_pmc *pmc)
+static void __init of_at91sam9g45_clk_pll_setup(struct device_node *np)
{
- of_at91_clk_pll_setup(np, pmc, &at91sam9g45_pll_layout);
+ of_at91_clk_pll_setup(np, &at91sam9g45_pll_layout);
}
+CLK_OF_DECLARE(at91sam9g45_clk_pll, "atmel,at91sam9g45-clk-pll",
+ of_at91sam9g45_clk_pll_setup);
-void __init of_at91sam9g20_clk_pllb_setup(struct device_node *np,
- struct at91_pmc *pmc)
+static void __init of_at91sam9g20_clk_pllb_setup(struct device_node *np)
{
- of_at91_clk_pll_setup(np, pmc, &at91sam9g20_pllb_layout);
+ of_at91_clk_pll_setup(np, &at91sam9g20_pllb_layout);
}
+CLK_OF_DECLARE(at91sam9g20_clk_pllb, "atmel,at91sam9g20-clk-pllb",
+ of_at91sam9g20_clk_pllb_setup);
-void __init of_sama5d3_clk_pll_setup(struct device_node *np,
- struct at91_pmc *pmc)
+static void __init of_sama5d3_clk_pll_setup(struct device_node *np)
{
- of_at91_clk_pll_setup(np, pmc, &sama5d3_pll_layout);
+ of_at91_clk_pll_setup(np, &sama5d3_pll_layout);
}
+CLK_OF_DECLARE(sama5d3_clk_pll, "atmel,sama5d3-clk-pll",
+ of_sama5d3_clk_pll_setup);
diff --git a/drivers/clk/at91/clk-plldiv.c b/drivers/clk/at91/clk-plldiv.c
index ea226562bb40..2bed26481027 100644
--- a/drivers/clk/at91/clk-plldiv.c
+++ b/drivers/clk/at91/clk-plldiv.c
@@ -12,8 +12,8 @@
#include <linux/clkdev.h>
#include <linux/clk/at91_pmc.h>
#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/io.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
#include "pmc.h"
@@ -21,16 +21,18 @@
struct clk_plldiv {
struct clk_hw hw;
- struct at91_pmc *pmc;
+ struct regmap *regmap;
};
static unsigned long clk_plldiv_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct clk_plldiv *plldiv = to_clk_plldiv(hw);
- struct at91_pmc *pmc = plldiv->pmc;
+ unsigned int mckr;
- if (pmc_read(pmc, AT91_PMC_MCKR) & AT91_PMC_PLLADIV2)
+ regmap_read(plldiv->regmap, AT91_PMC_MCKR, &mckr);
+
+ if (mckr & AT91_PMC_PLLADIV2)
return parent_rate / 2;
return parent_rate;
@@ -57,18 +59,12 @@ static int clk_plldiv_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
struct clk_plldiv *plldiv = to_clk_plldiv(hw);
- struct at91_pmc *pmc = plldiv->pmc;
- u32 tmp;
- if (parent_rate != rate && (parent_rate / 2) != rate)
+ if ((parent_rate != rate) && (parent_rate / 2 != rate))
return -EINVAL;
- pmc_lock(pmc);
- tmp = pmc_read(pmc, AT91_PMC_MCKR) & ~AT91_PMC_PLLADIV2;
- if ((parent_rate / 2) == rate)
- tmp |= AT91_PMC_PLLADIV2;
- pmc_write(pmc, AT91_PMC_MCKR, tmp);
- pmc_unlock(pmc);
+ regmap_update_bits(plldiv->regmap, AT91_PMC_MCKR, AT91_PMC_PLLADIV2,
+ parent_rate != rate ? AT91_PMC_PLLADIV2 : 0);
return 0;
}
@@ -80,7 +76,7 @@ static const struct clk_ops plldiv_ops = {
};
static struct clk * __init
-at91_clk_register_plldiv(struct at91_pmc *pmc, const char *name,
+at91_clk_register_plldiv(struct regmap *regmap, const char *name,
const char *parent_name)
{
struct clk_plldiv *plldiv;
@@ -98,7 +94,7 @@ at91_clk_register_plldiv(struct at91_pmc *pmc, const char *name,
init.flags = CLK_SET_RATE_GATE;
plldiv->hw.init = &init;
- plldiv->pmc = pmc;
+ plldiv->regmap = regmap;
clk = clk_register(NULL, &plldiv->hw);
@@ -109,27 +105,27 @@ at91_clk_register_plldiv(struct at91_pmc *pmc, const char *name,
}
static void __init
-of_at91_clk_plldiv_setup(struct device_node *np, struct at91_pmc *pmc)
+of_at91sam9x5_clk_plldiv_setup(struct device_node *np)
{
struct clk *clk;
const char *parent_name;
const char *name = np->name;
+ struct regmap *regmap;
parent_name = of_clk_get_parent_name(np, 0);
of_property_read_string(np, "clock-output-names", &name);
- clk = at91_clk_register_plldiv(pmc, name, parent_name);
+ regmap = syscon_node_to_regmap(of_get_parent(np));
+ if (IS_ERR(regmap))
+ return;
+ clk = at91_clk_register_plldiv(regmap, name, parent_name);
if (IS_ERR(clk))
return;
of_clk_add_provider(np, of_clk_src_simple_get, clk);
return;
}
-
-void __init of_at91sam9x5_clk_plldiv_setup(struct device_node *np,
- struct at91_pmc *pmc)
-{
- of_at91_clk_plldiv_setup(np, pmc);
-}
+CLK_OF_DECLARE(at91sam9x5_clk_plldiv, "atmel,at91sam9x5-clk-plldiv",
+ of_at91sam9x5_clk_plldiv_setup);
diff --git a/drivers/clk/at91/clk-programmable.c b/drivers/clk/at91/clk-programmable.c
index 14b270b85fec..10f846cc8db1 100644
--- a/drivers/clk/at91/clk-programmable.c
+++ b/drivers/clk/at91/clk-programmable.c
@@ -12,10 +12,8 @@
#include <linux/clkdev.h>
#include <linux/clk/at91_pmc.h>
#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/io.h>
-#include <linux/wait.h>
-#include <linux/sched.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
#include "pmc.h"
@@ -24,6 +22,7 @@
#define PROG_STATUS_MASK(id) (1 << ((id) + 8))
#define PROG_PRES_MASK 0x7
+#define PROG_PRES(layout, pckr) ((pckr >> layout->pres_shift) & PROG_PRES_MASK)
#define PROG_MAX_RM9200_CSS 3
struct clk_programmable_layout {
@@ -34,7 +33,7 @@ struct clk_programmable_layout {
struct clk_programmable {
struct clk_hw hw;
- struct at91_pmc *pmc;
+ struct regmap *regmap;
u8 id;
const struct clk_programmable_layout *layout;
};
@@ -44,14 +43,12 @@ struct clk_programmable {
static unsigned long clk_programmable_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
- u32 pres;
struct clk_programmable *prog = to_clk_programmable(hw);
- struct at91_pmc *pmc = prog->pmc;
- const struct clk_programmable_layout *layout = prog->layout;
+ unsigned int pckr;
+
+ regmap_read(prog->regmap, AT91_PMC_PCKR(prog->id), &pckr);
- pres = (pmc_read(pmc, AT91_PMC_PCKR(prog->id)) >> layout->pres_shift) &
- PROG_PRES_MASK;
- return parent_rate >> pres;
+ return parent_rate >> PROG_PRES(prog->layout, pckr);
}
static int clk_programmable_determine_rate(struct clk_hw *hw,
@@ -101,36 +98,36 @@ static int clk_programmable_set_parent(struct clk_hw *hw, u8 index)
{
struct clk_programmable *prog = to_clk_programmable(hw);
const struct clk_programmable_layout *layout = prog->layout;
- struct at91_pmc *pmc = prog->pmc;
- u32 tmp = pmc_read(pmc, AT91_PMC_PCKR(prog->id)) & ~layout->css_mask;
+ unsigned int mask = layout->css_mask;
+ unsigned int pckr = 0;
if (layout->have_slck_mck)
- tmp &= AT91_PMC_CSSMCK_MCK;
+ mask |= AT91_PMC_CSSMCK_MCK;
if (index > layout->css_mask) {
- if (index > PROG_MAX_RM9200_CSS && layout->have_slck_mck) {
- tmp |= AT91_PMC_CSSMCK_MCK;
- return 0;
- } else {
+ if (index > PROG_MAX_RM9200_CSS && !layout->have_slck_mck)
return -EINVAL;
- }
+
+ pckr |= AT91_PMC_CSSMCK_MCK;
}
- pmc_write(pmc, AT91_PMC_PCKR(prog->id), tmp | index);
+ regmap_update_bits(prog->regmap, AT91_PMC_PCKR(prog->id), mask, pckr);
+
return 0;
}
static u8 clk_programmable_get_parent(struct clk_hw *hw)
{
- u32 tmp;
- u8 ret;
struct clk_programmable *prog = to_clk_programmable(hw);
- struct at91_pmc *pmc = prog->pmc;
const struct clk_programmable_layout *layout = prog->layout;
+ unsigned int pckr;
+ u8 ret;
+
+ regmap_read(prog->regmap, AT91_PMC_PCKR(prog->id), &pckr);
+
+ ret = pckr & layout->css_mask;
- tmp = pmc_read(pmc, AT91_PMC_PCKR(prog->id));
- ret = tmp & layout->css_mask;
- if (layout->have_slck_mck && (tmp & AT91_PMC_CSSMCK_MCK) && !ret)
+ if (layout->have_slck_mck && (pckr & AT91_PMC_CSSMCK_MCK) && !ret)
ret = PROG_MAX_RM9200_CSS + 1;
return ret;
@@ -140,26 +137,27 @@ static int clk_programmable_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
struct clk_programmable *prog = to_clk_programmable(hw);
- struct at91_pmc *pmc = prog->pmc;
const struct clk_programmable_layout *layout = prog->layout;
unsigned long div = parent_rate / rate;
+ unsigned int pckr;
int shift = 0;
- u32 tmp = pmc_read(pmc, AT91_PMC_PCKR(prog->id)) &
- ~(PROG_PRES_MASK << layout->pres_shift);
+
+ regmap_read(prog->regmap, AT91_PMC_PCKR(prog->id), &pckr);
if (!div)
return -EINVAL;
shift = fls(div) - 1;
- if (div != (1<<shift))
+ if (div != (1 << shift))
return -EINVAL;
if (shift >= PROG_PRES_MASK)
return -EINVAL;
- pmc_write(pmc, AT91_PMC_PCKR(prog->id),
- tmp | (shift << layout->pres_shift));
+ regmap_update_bits(prog->regmap, AT91_PMC_PCKR(prog->id),
+ PROG_PRES_MASK << layout->pres_shift,
+ shift << layout->pres_shift);
return 0;
}
@@ -173,7 +171,7 @@ static const struct clk_ops programmable_ops = {
};
static struct clk * __init
-at91_clk_register_programmable(struct at91_pmc *pmc,
+at91_clk_register_programmable(struct regmap *regmap,
const char *name, const char **parent_names,
u8 num_parents, u8 id,
const struct clk_programmable_layout *layout)
@@ -198,7 +196,7 @@ at91_clk_register_programmable(struct at91_pmc *pmc,
prog->id = id;
prog->layout = layout;
prog->hw.init = &init;
- prog->pmc = pmc;
+ prog->regmap = regmap;
clk = clk_register(NULL, &prog->hw);
if (IS_ERR(clk))
@@ -226,19 +224,20 @@ static const struct clk_programmable_layout at91sam9x5_programmable_layout = {
};
static void __init
-of_at91_clk_prog_setup(struct device_node *np, struct at91_pmc *pmc,
+of_at91_clk_prog_setup(struct device_node *np,
const struct clk_programmable_layout *layout)
{
int num;
u32 id;
struct clk *clk;
- int num_parents;
+ unsigned int num_parents;
const char *parent_names[PROG_SOURCE_MAX];
const char *name;
struct device_node *progclknp;
+ struct regmap *regmap;
num_parents = of_clk_get_parent_count(np);
- if (num_parents <= 0 || num_parents > PROG_SOURCE_MAX)
+ if (num_parents == 0 || num_parents > PROG_SOURCE_MAX)
return;
of_clk_parent_fill(np, parent_names, num_parents);
@@ -247,6 +246,10 @@ of_at91_clk_prog_setup(struct device_node *np, struct at91_pmc *pmc,
if (!num || num > (PROG_ID_MAX + 1))
return;
+ regmap = syscon_node_to_regmap(of_get_parent(np));
+ if (IS_ERR(regmap))
+ return;
+
for_each_child_of_node(np, progclknp) {
if (of_property_read_u32(progclknp, "reg", &id))
continue;
@@ -254,7 +257,7 @@ of_at91_clk_prog_setup(struct device_node *np, struct at91_pmc *pmc,
if (of_property_read_string(np, "clock-output-names", &name))
name = progclknp->name;
- clk = at91_clk_register_programmable(pmc, name,
+ clk = at91_clk_register_programmable(regmap, name,
parent_names, num_parents,
id, layout);
if (IS_ERR(clk))
@@ -265,20 +268,23 @@ of_at91_clk_prog_setup(struct device_node *np, struct at91_pmc *pmc,
}
-void __init of_at91rm9200_clk_prog_setup(struct device_node *np,
- struct at91_pmc *pmc)
+static void __init of_at91rm9200_clk_prog_setup(struct device_node *np)
{
- of_at91_clk_prog_setup(np, pmc, &at91rm9200_programmable_layout);
+ of_at91_clk_prog_setup(np, &at91rm9200_programmable_layout);
}
+CLK_OF_DECLARE(at91rm9200_clk_prog, "atmel,at91rm9200-clk-programmable",
+ of_at91rm9200_clk_prog_setup);
-void __init of_at91sam9g45_clk_prog_setup(struct device_node *np,
- struct at91_pmc *pmc)
+static void __init of_at91sam9g45_clk_prog_setup(struct device_node *np)
{
- of_at91_clk_prog_setup(np, pmc, &at91sam9g45_programmable_layout);
+ of_at91_clk_prog_setup(np, &at91sam9g45_programmable_layout);
}
+CLK_OF_DECLARE(at91sam9g45_clk_prog, "atmel,at91sam9g45-clk-programmable",
+ of_at91sam9g45_clk_prog_setup);
-void __init of_at91sam9x5_clk_prog_setup(struct device_node *np,
- struct at91_pmc *pmc)
+static void __init of_at91sam9x5_clk_prog_setup(struct device_node *np)
{
- of_at91_clk_prog_setup(np, pmc, &at91sam9x5_programmable_layout);
+ of_at91_clk_prog_setup(np, &at91sam9x5_programmable_layout);
}
+CLK_OF_DECLARE(at91sam9x5_clk_prog, "atmel,at91sam9x5-clk-programmable",
+ of_at91sam9x5_clk_prog_setup);
diff --git a/drivers/clk/at91/clk-slow.c b/drivers/clk/at91/clk-slow.c
index 6f99a530ead6..61090b1146cf 100644
--- a/drivers/clk/at91/clk-slow.c
+++ b/drivers/clk/at91/clk-slow.c
@@ -12,17 +12,11 @@
#include <linux/clk-provider.h>
#include <linux/clkdev.h>
-#include <linux/slab.h>
#include <linux/clk/at91_pmc.h>
#include <linux/delay.h>
#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/of_irq.h>
-#include <linux/io.h>
-#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/sched.h>
-#include <linux/wait.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
#include "pmc.h"
#include "sckc.h"
@@ -58,7 +52,7 @@ struct clk_slow_rc_osc {
struct clk_sam9260_slow {
struct clk_hw hw;
- struct at91_pmc *pmc;
+ struct regmap *regmap;
};
#define to_clk_sam9260_slow(hw) container_of(hw, struct clk_sam9260_slow, hw)
@@ -251,7 +245,7 @@ at91_clk_register_slow_rc_osc(void __iomem *sckcr,
init.ops = &slow_rc_osc_ops;
init.parent_names = NULL;
init.num_parents = 0;
- init.flags = CLK_IS_ROOT | CLK_IGNORE_UNUSED;
+ init.flags = CLK_IGNORE_UNUSED;
osc->hw.init = &init;
osc->sckcr = sckcr;
@@ -366,11 +360,11 @@ void __init of_at91sam9x5_clk_slow_setup(struct device_node *np,
{
struct clk *clk;
const char *parent_names[2];
- int num_parents;
+ unsigned int num_parents;
const char *name = np->name;
num_parents = of_clk_get_parent_count(np);
- if (num_parents <= 0 || num_parents > 2)
+ if (num_parents == 0 || num_parents > 2)
return;
of_clk_parent_fill(np, parent_names, num_parents);
@@ -388,8 +382,11 @@ void __init of_at91sam9x5_clk_slow_setup(struct device_node *np,
static u8 clk_sam9260_slow_get_parent(struct clk_hw *hw)
{
struct clk_sam9260_slow *slowck = to_clk_sam9260_slow(hw);
+ unsigned int status;
+
+ regmap_read(slowck->regmap, AT91_PMC_SR, &status);
- return !!(pmc_read(slowck->pmc, AT91_PMC_SR) & AT91_PMC_OSCSEL);
+ return status & AT91_PMC_OSCSEL ? 1 : 0;
}
static const struct clk_ops sam9260_slow_ops = {
@@ -397,7 +394,7 @@ static const struct clk_ops sam9260_slow_ops = {
};
static struct clk * __init
-at91_clk_register_sam9260_slow(struct at91_pmc *pmc,
+at91_clk_register_sam9260_slow(struct regmap *regmap,
const char *name,
const char **parent_names,
int num_parents)
@@ -406,7 +403,7 @@ at91_clk_register_sam9260_slow(struct at91_pmc *pmc,
struct clk *clk = NULL;
struct clk_init_data init;
- if (!pmc || !name)
+ if (!name)
return ERR_PTR(-EINVAL);
if (!parent_names || !num_parents)
@@ -423,7 +420,7 @@ at91_clk_register_sam9260_slow(struct at91_pmc *pmc,
init.flags = 0;
slowck->hw.init = &init;
- slowck->pmc = pmc;
+ slowck->regmap = regmap;
clk = clk_register(NULL, &slowck->hw);
if (IS_ERR(clk))
@@ -432,26 +429,32 @@ at91_clk_register_sam9260_slow(struct at91_pmc *pmc,
return clk;
}
-void __init of_at91sam9260_clk_slow_setup(struct device_node *np,
- struct at91_pmc *pmc)
+static void __init of_at91sam9260_clk_slow_setup(struct device_node *np)
{
struct clk *clk;
const char *parent_names[2];
- int num_parents;
+ unsigned int num_parents;
const char *name = np->name;
+ struct regmap *regmap;
num_parents = of_clk_get_parent_count(np);
if (num_parents != 2)
return;
of_clk_parent_fill(np, parent_names, num_parents);
+ regmap = syscon_node_to_regmap(of_get_parent(np));
+ if (IS_ERR(regmap))
+ return;
of_property_read_string(np, "clock-output-names", &name);
- clk = at91_clk_register_sam9260_slow(pmc, name, parent_names,
+ clk = at91_clk_register_sam9260_slow(regmap, name, parent_names,
num_parents);
if (IS_ERR(clk))
return;
of_clk_add_provider(np, of_clk_src_simple_get, clk);
}
+
+CLK_OF_DECLARE(at91sam9260_clk_slow, "atmel,at91sam9260-clk-slow",
+ of_at91sam9260_clk_slow_setup);
diff --git a/drivers/clk/at91/clk-smd.c b/drivers/clk/at91/clk-smd.c
index a7f8501cfa05..3c04b069d5b8 100644
--- a/drivers/clk/at91/clk-smd.c
+++ b/drivers/clk/at91/clk-smd.c
@@ -12,8 +12,8 @@
#include <linux/clkdev.h>
#include <linux/clk/at91_pmc.h>
#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/io.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
#include "pmc.h"
@@ -24,7 +24,7 @@
struct at91sam9x5_clk_smd {
struct clk_hw hw;
- struct at91_pmc *pmc;
+ struct regmap *regmap;
};
#define to_at91sam9x5_clk_smd(hw) \
@@ -33,13 +33,13 @@ struct at91sam9x5_clk_smd {
static unsigned long at91sam9x5_clk_smd_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
- u32 tmp;
- u8 smddiv;
struct at91sam9x5_clk_smd *smd = to_at91sam9x5_clk_smd(hw);
- struct at91_pmc *pmc = smd->pmc;
+ unsigned int smdr;
+ u8 smddiv;
+
+ regmap_read(smd->regmap, AT91_PMC_SMD, &smdr);
+ smddiv = (smdr & AT91_PMC_SMD_DIV) >> SMD_DIV_SHIFT;
- tmp = pmc_read(pmc, AT91_PMC_SMD);
- smddiv = (tmp & AT91_PMC_SMD_DIV) >> SMD_DIV_SHIFT;
return parent_rate / (smddiv + 1);
}
@@ -67,40 +67,38 @@ static long at91sam9x5_clk_smd_round_rate(struct clk_hw *hw, unsigned long rate,
static int at91sam9x5_clk_smd_set_parent(struct clk_hw *hw, u8 index)
{
- u32 tmp;
struct at91sam9x5_clk_smd *smd = to_at91sam9x5_clk_smd(hw);
- struct at91_pmc *pmc = smd->pmc;
if (index > 1)
return -EINVAL;
- tmp = pmc_read(pmc, AT91_PMC_SMD) & ~AT91_PMC_SMDS;
- if (index)
- tmp |= AT91_PMC_SMDS;
- pmc_write(pmc, AT91_PMC_SMD, tmp);
+
+ regmap_update_bits(smd->regmap, AT91_PMC_SMD, AT91_PMC_SMDS,
+ index ? AT91_PMC_SMDS : 0);
+
return 0;
}
static u8 at91sam9x5_clk_smd_get_parent(struct clk_hw *hw)
{
struct at91sam9x5_clk_smd *smd = to_at91sam9x5_clk_smd(hw);
- struct at91_pmc *pmc = smd->pmc;
+ unsigned int smdr;
- return pmc_read(pmc, AT91_PMC_SMD) & AT91_PMC_SMDS;
+ regmap_read(smd->regmap, AT91_PMC_SMD, &smdr);
+
+ return smdr & AT91_PMC_SMDS;
}
static int at91sam9x5_clk_smd_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
- u32 tmp;
struct at91sam9x5_clk_smd *smd = to_at91sam9x5_clk_smd(hw);
- struct at91_pmc *pmc = smd->pmc;
unsigned long div = parent_rate / rate;
if (parent_rate % rate || div < 1 || div > (SMD_MAX_DIV + 1))
return -EINVAL;
- tmp = pmc_read(pmc, AT91_PMC_SMD) & ~AT91_PMC_SMD_DIV;
- tmp |= (div - 1) << SMD_DIV_SHIFT;
- pmc_write(pmc, AT91_PMC_SMD, tmp);
+
+ regmap_update_bits(smd->regmap, AT91_PMC_SMD, AT91_PMC_SMD_DIV,
+ (div - 1) << SMD_DIV_SHIFT);
return 0;
}
@@ -114,7 +112,7 @@ static const struct clk_ops at91sam9x5_smd_ops = {
};
static struct clk * __init
-at91sam9x5_clk_register_smd(struct at91_pmc *pmc, const char *name,
+at91sam9x5_clk_register_smd(struct regmap *regmap, const char *name,
const char **parent_names, u8 num_parents)
{
struct at91sam9x5_clk_smd *smd;
@@ -132,7 +130,7 @@ at91sam9x5_clk_register_smd(struct at91_pmc *pmc, const char *name,
init.flags = CLK_SET_RATE_GATE | CLK_SET_PARENT_GATE;
smd->hw.init = &init;
- smd->pmc = pmc;
+ smd->regmap = regmap;
clk = clk_register(NULL, &smd->hw);
if (IS_ERR(clk))
@@ -141,26 +139,32 @@ at91sam9x5_clk_register_smd(struct at91_pmc *pmc, const char *name,
return clk;
}
-void __init of_at91sam9x5_clk_smd_setup(struct device_node *np,
- struct at91_pmc *pmc)
+static void __init of_at91sam9x5_clk_smd_setup(struct device_node *np)
{
struct clk *clk;
- int num_parents;
+ unsigned int num_parents;
const char *parent_names[SMD_SOURCE_MAX];
const char *name = np->name;
+ struct regmap *regmap;
num_parents = of_clk_get_parent_count(np);
- if (num_parents <= 0 || num_parents > SMD_SOURCE_MAX)
+ if (num_parents == 0 || num_parents > SMD_SOURCE_MAX)
return;
of_clk_parent_fill(np, parent_names, num_parents);
of_property_read_string(np, "clock-output-names", &name);
- clk = at91sam9x5_clk_register_smd(pmc, name, parent_names,
+ regmap = syscon_node_to_regmap(of_get_parent(np));
+ if (IS_ERR(regmap))
+ return;
+
+ clk = at91sam9x5_clk_register_smd(regmap, name, parent_names,
num_parents);
if (IS_ERR(clk))
return;
of_clk_add_provider(np, of_clk_src_simple_get, clk);
}
+CLK_OF_DECLARE(at91sam9x5_clk_smd, "atmel,at91sam9x5-clk-smd",
+ of_at91sam9x5_clk_smd_setup);
diff --git a/drivers/clk/at91/clk-system.c b/drivers/clk/at91/clk-system.c
index 3f5314344286..8f35d8172909 100644
--- a/drivers/clk/at91/clk-system.c
+++ b/drivers/clk/at91/clk-system.c
@@ -12,13 +12,8 @@
#include <linux/clkdev.h>
#include <linux/clk/at91_pmc.h>
#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/io.h>
-#include <linux/irq.h>
-#include <linux/of_irq.h>
-#include <linux/interrupt.h>
-#include <linux/wait.h>
-#include <linux/sched.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
#include "pmc.h"
@@ -29,9 +24,7 @@
#define to_clk_system(hw) container_of(hw, struct clk_system, hw)
struct clk_system {
struct clk_hw hw;
- struct at91_pmc *pmc;
- unsigned int irq;
- wait_queue_head_t wait;
+ struct regmap *regmap;
u8 id;
};
@@ -39,58 +32,54 @@ static inline int is_pck(int id)
{
return (id >= 8) && (id <= 15);
}
-static irqreturn_t clk_system_irq_handler(int irq, void *dev_id)
+
+static inline bool clk_system_ready(struct regmap *regmap, int id)
{
- struct clk_system *sys = (struct clk_system *)dev_id;
+ unsigned int status;
- wake_up(&sys->wait);
- disable_irq_nosync(sys->irq);
+ regmap_read(regmap, AT91_PMC_SR, &status);
- return IRQ_HANDLED;
+ return status & (1 << id) ? 1 : 0;
}
static int clk_system_prepare(struct clk_hw *hw)
{
struct clk_system *sys = to_clk_system(hw);
- struct at91_pmc *pmc = sys->pmc;
- u32 mask = 1 << sys->id;
- pmc_write(pmc, AT91_PMC_SCER, mask);
+ regmap_write(sys->regmap, AT91_PMC_SCER, 1 << sys->id);
if (!is_pck(sys->id))
return 0;
- while (!(pmc_read(pmc, AT91_PMC_SR) & mask)) {
- if (sys->irq) {
- enable_irq(sys->irq);
- wait_event(sys->wait,
- pmc_read(pmc, AT91_PMC_SR) & mask);
- } else
- cpu_relax();
- }
+ while (!clk_system_ready(sys->regmap, sys->id))
+ cpu_relax();
+
return 0;
}
static void clk_system_unprepare(struct clk_hw *hw)
{
struct clk_system *sys = to_clk_system(hw);
- struct at91_pmc *pmc = sys->pmc;
- pmc_write(pmc, AT91_PMC_SCDR, 1 << sys->id);
+ regmap_write(sys->regmap, AT91_PMC_SCDR, 1 << sys->id);
}
static int clk_system_is_prepared(struct clk_hw *hw)
{
struct clk_system *sys = to_clk_system(hw);
- struct at91_pmc *pmc = sys->pmc;
+ unsigned int status;
+
+ regmap_read(sys->regmap, AT91_PMC_SCSR, &status);
- if (!(pmc_read(pmc, AT91_PMC_SCSR) & (1 << sys->id)))
+ if (!(status & (1 << sys->id)))
return 0;
if (!is_pck(sys->id))
return 1;
- return !!(pmc_read(pmc, AT91_PMC_SR) & (1 << sys->id));
+ regmap_read(sys->regmap, AT91_PMC_SR, &status);
+
+ return status & (1 << sys->id) ? 1 : 0;
}
static const struct clk_ops system_ops = {
@@ -100,13 +89,12 @@ static const struct clk_ops system_ops = {
};
static struct clk * __init
-at91_clk_register_system(struct at91_pmc *pmc, const char *name,
- const char *parent_name, u8 id, int irq)
+at91_clk_register_system(struct regmap *regmap, const char *name,
+ const char *parent_name, u8 id)
{
struct clk_system *sys;
struct clk *clk = NULL;
struct clk_init_data init;
- int ret;
if (!parent_name || id > SYSTEM_MAX_ID)
return ERR_PTR(-EINVAL);
@@ -123,44 +111,33 @@ at91_clk_register_system(struct at91_pmc *pmc, const char *name,
sys->id = id;
sys->hw.init = &init;
- sys->pmc = pmc;
- sys->irq = irq;
- if (irq) {
- init_waitqueue_head(&sys->wait);
- irq_set_status_flags(sys->irq, IRQ_NOAUTOEN);
- ret = request_irq(sys->irq, clk_system_irq_handler,
- IRQF_TRIGGER_HIGH, name, sys);
- if (ret) {
- kfree(sys);
- return ERR_PTR(ret);
- }
- }
+ sys->regmap = regmap;
clk = clk_register(NULL, &sys->hw);
- if (IS_ERR(clk)) {
- if (irq)
- free_irq(sys->irq, sys);
+ if (IS_ERR(clk))
kfree(sys);
- }
return clk;
}
-static void __init
-of_at91_clk_sys_setup(struct device_node *np, struct at91_pmc *pmc)
+static void __init of_at91rm9200_clk_sys_setup(struct device_node *np)
{
int num;
- int irq = 0;
u32 id;
struct clk *clk;
const char *name;
struct device_node *sysclknp;
const char *parent_name;
+ struct regmap *regmap;
num = of_get_child_count(np);
if (num > (SYSTEM_MAX_ID + 1))
return;
+ regmap = syscon_node_to_regmap(of_get_parent(np));
+ if (IS_ERR(regmap))
+ return;
+
for_each_child_of_node(np, sysclknp) {
if (of_property_read_u32(sysclknp, "reg", &id))
continue;
@@ -168,21 +145,14 @@ of_at91_clk_sys_setup(struct device_node *np, struct at91_pmc *pmc)
if (of_property_read_string(np, "clock-output-names", &name))
name = sysclknp->name;
- if (is_pck(id))
- irq = irq_of_parse_and_map(sysclknp, 0);
-
parent_name = of_clk_get_parent_name(sysclknp, 0);
- clk = at91_clk_register_system(pmc, name, parent_name, id, irq);
+ clk = at91_clk_register_system(regmap, name, parent_name, id);
if (IS_ERR(clk))
continue;
of_clk_add_provider(sysclknp, of_clk_src_simple_get, clk);
}
}
-
-void __init of_at91rm9200_clk_sys_setup(struct device_node *np,
- struct at91_pmc *pmc)
-{
- of_at91_clk_sys_setup(np, pmc);
-}
+CLK_OF_DECLARE(at91rm9200_clk_sys, "atmel,at91rm9200-clk-system",
+ of_at91rm9200_clk_sys_setup);
diff --git a/drivers/clk/at91/clk-usb.c b/drivers/clk/at91/clk-usb.c
index 8ab8502778a2..d80bdb0a8b02 100644
--- a/drivers/clk/at91/clk-usb.c
+++ b/drivers/clk/at91/clk-usb.c
@@ -12,8 +12,8 @@
#include <linux/clkdev.h>
#include <linux/clk/at91_pmc.h>
#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/io.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
#include "pmc.h"
@@ -27,7 +27,7 @@
struct at91sam9x5_clk_usb {
struct clk_hw hw;
- struct at91_pmc *pmc;
+ struct regmap *regmap;
};
#define to_at91sam9x5_clk_usb(hw) \
@@ -35,7 +35,7 @@ struct at91sam9x5_clk_usb {
struct at91rm9200_clk_usb {
struct clk_hw hw;
- struct at91_pmc *pmc;
+ struct regmap *regmap;
u32 divisors[4];
};
@@ -45,13 +45,12 @@ struct at91rm9200_clk_usb {
static unsigned long at91sam9x5_clk_usb_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
- u32 tmp;
- u8 usbdiv;
struct at91sam9x5_clk_usb *usb = to_at91sam9x5_clk_usb(hw);
- struct at91_pmc *pmc = usb->pmc;
+ unsigned int usbr;
+ u8 usbdiv;
- tmp = pmc_read(pmc, AT91_PMC_USB);
- usbdiv = (tmp & AT91_PMC_OHCIUSBDIV) >> SAM9X5_USB_DIV_SHIFT;
+ regmap_read(usb->regmap, AT91_PMC_USB, &usbr);
+ usbdiv = (usbr & AT91_PMC_OHCIUSBDIV) >> SAM9X5_USB_DIV_SHIFT;
return DIV_ROUND_CLOSEST(parent_rate, (usbdiv + 1));
}
@@ -109,33 +108,31 @@ static int at91sam9x5_clk_usb_determine_rate(struct clk_hw *hw,
static int at91sam9x5_clk_usb_set_parent(struct clk_hw *hw, u8 index)
{
- u32 tmp;
struct at91sam9x5_clk_usb *usb = to_at91sam9x5_clk_usb(hw);
- struct at91_pmc *pmc = usb->pmc;
if (index > 1)
return -EINVAL;
- tmp = pmc_read(pmc, AT91_PMC_USB) & ~AT91_PMC_USBS;
- if (index)
- tmp |= AT91_PMC_USBS;
- pmc_write(pmc, AT91_PMC_USB, tmp);
+
+ regmap_update_bits(usb->regmap, AT91_PMC_USB, AT91_PMC_USBS,
+ index ? AT91_PMC_USBS : 0);
+
return 0;
}
static u8 at91sam9x5_clk_usb_get_parent(struct clk_hw *hw)
{
struct at91sam9x5_clk_usb *usb = to_at91sam9x5_clk_usb(hw);
- struct at91_pmc *pmc = usb->pmc;
+ unsigned int usbr;
- return pmc_read(pmc, AT91_PMC_USB) & AT91_PMC_USBS;
+ regmap_read(usb->regmap, AT91_PMC_USB, &usbr);
+
+ return usbr & AT91_PMC_USBS;
}
static int at91sam9x5_clk_usb_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
- u32 tmp;
struct at91sam9x5_clk_usb *usb = to_at91sam9x5_clk_usb(hw);
- struct at91_pmc *pmc = usb->pmc;
unsigned long div;
if (!rate)
@@ -145,9 +142,8 @@ static int at91sam9x5_clk_usb_set_rate(struct clk_hw *hw, unsigned long rate,
if (div > SAM9X5_USB_MAX_DIV + 1 || !div)
return -EINVAL;
- tmp = pmc_read(pmc, AT91_PMC_USB) & ~AT91_PMC_OHCIUSBDIV;
- tmp |= (div - 1) << SAM9X5_USB_DIV_SHIFT;
- pmc_write(pmc, AT91_PMC_USB, tmp);
+ regmap_update_bits(usb->regmap, AT91_PMC_USB, AT91_PMC_OHCIUSBDIV,
+ (div - 1) << SAM9X5_USB_DIV_SHIFT);
return 0;
}
@@ -163,28 +159,28 @@ static const struct clk_ops at91sam9x5_usb_ops = {
static int at91sam9n12_clk_usb_enable(struct clk_hw *hw)
{
struct at91sam9x5_clk_usb *usb = to_at91sam9x5_clk_usb(hw);
- struct at91_pmc *pmc = usb->pmc;
- pmc_write(pmc, AT91_PMC_USB,
- pmc_read(pmc, AT91_PMC_USB) | AT91_PMC_USBS);
+ regmap_update_bits(usb->regmap, AT91_PMC_USB, AT91_PMC_USBS,
+ AT91_PMC_USBS);
+
return 0;
}
static void at91sam9n12_clk_usb_disable(struct clk_hw *hw)
{
struct at91sam9x5_clk_usb *usb = to_at91sam9x5_clk_usb(hw);
- struct at91_pmc *pmc = usb->pmc;
- pmc_write(pmc, AT91_PMC_USB,
- pmc_read(pmc, AT91_PMC_USB) & ~AT91_PMC_USBS);
+ regmap_update_bits(usb->regmap, AT91_PMC_USB, AT91_PMC_USBS, 0);
}
static int at91sam9n12_clk_usb_is_enabled(struct clk_hw *hw)
{
struct at91sam9x5_clk_usb *usb = to_at91sam9x5_clk_usb(hw);
- struct at91_pmc *pmc = usb->pmc;
+ unsigned int usbr;
- return !!(pmc_read(pmc, AT91_PMC_USB) & AT91_PMC_USBS);
+ regmap_read(usb->regmap, AT91_PMC_USB, &usbr);
+
+ return usbr & AT91_PMC_USBS;
}
static const struct clk_ops at91sam9n12_usb_ops = {
@@ -197,7 +193,7 @@ static const struct clk_ops at91sam9n12_usb_ops = {
};
static struct clk * __init
-at91sam9x5_clk_register_usb(struct at91_pmc *pmc, const char *name,
+at91sam9x5_clk_register_usb(struct regmap *regmap, const char *name,
const char **parent_names, u8 num_parents)
{
struct at91sam9x5_clk_usb *usb;
@@ -216,7 +212,7 @@ at91sam9x5_clk_register_usb(struct at91_pmc *pmc, const char *name,
CLK_SET_RATE_PARENT;
usb->hw.init = &init;
- usb->pmc = pmc;
+ usb->regmap = regmap;
clk = clk_register(NULL, &usb->hw);
if (IS_ERR(clk))
@@ -226,7 +222,7 @@ at91sam9x5_clk_register_usb(struct at91_pmc *pmc, const char *name,
}
static struct clk * __init
-at91sam9n12_clk_register_usb(struct at91_pmc *pmc, const char *name,
+at91sam9n12_clk_register_usb(struct regmap *regmap, const char *name,
const char *parent_name)
{
struct at91sam9x5_clk_usb *usb;
@@ -244,7 +240,7 @@ at91sam9n12_clk_register_usb(struct at91_pmc *pmc, const char *name,
init.flags = CLK_SET_RATE_GATE | CLK_SET_RATE_PARENT;
usb->hw.init = &init;
- usb->pmc = pmc;
+ usb->regmap = regmap;
clk = clk_register(NULL, &usb->hw);
if (IS_ERR(clk))
@@ -257,12 +253,12 @@ static unsigned long at91rm9200_clk_usb_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct at91rm9200_clk_usb *usb = to_at91rm9200_clk_usb(hw);
- struct at91_pmc *pmc = usb->pmc;
- u32 tmp;
+ unsigned int pllbr;
u8 usbdiv;
- tmp = pmc_read(pmc, AT91_CKGR_PLLBR);
- usbdiv = (tmp & AT91_PMC_USBDIV) >> RM9200_USB_DIV_SHIFT;
+ regmap_read(usb->regmap, AT91_CKGR_PLLBR, &pllbr);
+
+ usbdiv = (pllbr & AT91_PMC_USBDIV) >> RM9200_USB_DIV_SHIFT;
if (usb->divisors[usbdiv])
return parent_rate / usb->divisors[usbdiv];
@@ -310,10 +306,8 @@ static long at91rm9200_clk_usb_round_rate(struct clk_hw *hw, unsigned long rate,
static int at91rm9200_clk_usb_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
- u32 tmp;
int i;
struct at91rm9200_clk_usb *usb = to_at91rm9200_clk_usb(hw);
- struct at91_pmc *pmc = usb->pmc;
unsigned long div;
if (!rate)
@@ -323,10 +317,10 @@ static int at91rm9200_clk_usb_set_rate(struct clk_hw *hw, unsigned long rate,
for (i = 0; i < RM9200_USB_DIV_TAB_SIZE; i++) {
if (usb->divisors[i] == div) {
- tmp = pmc_read(pmc, AT91_CKGR_PLLBR) &
- ~AT91_PMC_USBDIV;
- tmp |= i << RM9200_USB_DIV_SHIFT;
- pmc_write(pmc, AT91_CKGR_PLLBR, tmp);
+ regmap_update_bits(usb->regmap, AT91_CKGR_PLLBR,
+ AT91_PMC_USBDIV,
+ i << RM9200_USB_DIV_SHIFT);
+
return 0;
}
}
@@ -341,7 +335,7 @@ static const struct clk_ops at91rm9200_usb_ops = {
};
static struct clk * __init
-at91rm9200_clk_register_usb(struct at91_pmc *pmc, const char *name,
+at91rm9200_clk_register_usb(struct regmap *regmap, const char *name,
const char *parent_name, const u32 *divisors)
{
struct at91rm9200_clk_usb *usb;
@@ -359,7 +353,7 @@ at91rm9200_clk_register_usb(struct at91_pmc *pmc, const char *name,
init.flags = CLK_SET_RATE_PARENT;
usb->hw.init = &init;
- usb->pmc = pmc;
+ usb->regmap = regmap;
memcpy(usb->divisors, divisors, sizeof(usb->divisors));
clk = clk_register(NULL, &usb->hw);
@@ -369,35 +363,42 @@ at91rm9200_clk_register_usb(struct at91_pmc *pmc, const char *name,
return clk;
}
-void __init of_at91sam9x5_clk_usb_setup(struct device_node *np,
- struct at91_pmc *pmc)
+static void __init of_at91sam9x5_clk_usb_setup(struct device_node *np)
{
struct clk *clk;
- int num_parents;
+ unsigned int num_parents;
const char *parent_names[USB_SOURCE_MAX];
const char *name = np->name;
+ struct regmap *regmap;
num_parents = of_clk_get_parent_count(np);
- if (num_parents <= 0 || num_parents > USB_SOURCE_MAX)
+ if (num_parents == 0 || num_parents > USB_SOURCE_MAX)
return;
of_clk_parent_fill(np, parent_names, num_parents);
of_property_read_string(np, "clock-output-names", &name);
- clk = at91sam9x5_clk_register_usb(pmc, name, parent_names, num_parents);
+ regmap = syscon_node_to_regmap(of_get_parent(np));
+ if (IS_ERR(regmap))
+ return;
+
+ clk = at91sam9x5_clk_register_usb(regmap, name, parent_names,
+ num_parents);
if (IS_ERR(clk))
return;
of_clk_add_provider(np, of_clk_src_simple_get, clk);
}
+CLK_OF_DECLARE(at91sam9x5_clk_usb, "atmel,at91sam9x5-clk-usb",
+ of_at91sam9x5_clk_usb_setup);
-void __init of_at91sam9n12_clk_usb_setup(struct device_node *np,
- struct at91_pmc *pmc)
+static void __init of_at91sam9n12_clk_usb_setup(struct device_node *np)
{
struct clk *clk;
const char *parent_name;
const char *name = np->name;
+ struct regmap *regmap;
parent_name = of_clk_get_parent_name(np, 0);
if (!parent_name)
@@ -405,20 +406,26 @@ void __init of_at91sam9n12_clk_usb_setup(struct device_node *np,
of_property_read_string(np, "clock-output-names", &name);
- clk = at91sam9n12_clk_register_usb(pmc, name, parent_name);
+ regmap = syscon_node_to_regmap(of_get_parent(np));
+ if (IS_ERR(regmap))
+ return;
+
+ clk = at91sam9n12_clk_register_usb(regmap, name, parent_name);
if (IS_ERR(clk))
return;
of_clk_add_provider(np, of_clk_src_simple_get, clk);
}
+CLK_OF_DECLARE(at91sam9n12_clk_usb, "atmel,at91sam9n12-clk-usb",
+ of_at91sam9n12_clk_usb_setup);
-void __init of_at91rm9200_clk_usb_setup(struct device_node *np,
- struct at91_pmc *pmc)
+static void __init of_at91rm9200_clk_usb_setup(struct device_node *np)
{
struct clk *clk;
const char *parent_name;
const char *name = np->name;
u32 divisors[4] = {0, 0, 0, 0};
+ struct regmap *regmap;
parent_name = of_clk_get_parent_name(np, 0);
if (!parent_name)
@@ -430,9 +437,15 @@ void __init of_at91rm9200_clk_usb_setup(struct device_node *np,
of_property_read_string(np, "clock-output-names", &name);
- clk = at91rm9200_clk_register_usb(pmc, name, parent_name, divisors);
+ regmap = syscon_node_to_regmap(of_get_parent(np));
+ if (IS_ERR(regmap))
+ return;
+
+ clk = at91rm9200_clk_register_usb(regmap, name, parent_name, divisors);
if (IS_ERR(clk))
return;
of_clk_add_provider(np, of_clk_src_simple_get, clk);
}
+CLK_OF_DECLARE(at91rm9200_clk_usb, "atmel,at91rm9200-clk-usb",
+ of_at91rm9200_clk_usb_setup);
diff --git a/drivers/clk/at91/clk-utmi.c b/drivers/clk/at91/clk-utmi.c
index ca561e90a60f..61fcf399e58c 100644
--- a/drivers/clk/at91/clk-utmi.c
+++ b/drivers/clk/at91/clk-utmi.c
@@ -11,14 +11,9 @@
#include <linux/clk-provider.h>
#include <linux/clkdev.h>
#include <linux/clk/at91_pmc.h>
-#include <linux/interrupt.h>
-#include <linux/irq.h>
#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/of_irq.h>
-#include <linux/io.h>
-#include <linux/sched.h>
-#include <linux/wait.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
#include "pmc.h"
@@ -26,37 +21,30 @@
struct clk_utmi {
struct clk_hw hw;
- struct at91_pmc *pmc;
- unsigned int irq;
- wait_queue_head_t wait;
+ struct regmap *regmap;
};
#define to_clk_utmi(hw) container_of(hw, struct clk_utmi, hw)
-static irqreturn_t clk_utmi_irq_handler(int irq, void *dev_id)
+static inline bool clk_utmi_ready(struct regmap *regmap)
{
- struct clk_utmi *utmi = (struct clk_utmi *)dev_id;
+ unsigned int status;
- wake_up(&utmi->wait);
- disable_irq_nosync(utmi->irq);
+ regmap_read(regmap, AT91_PMC_SR, &status);
- return IRQ_HANDLED;
+ return status & AT91_PMC_LOCKU;
}
static int clk_utmi_prepare(struct clk_hw *hw)
{
struct clk_utmi *utmi = to_clk_utmi(hw);
- struct at91_pmc *pmc = utmi->pmc;
- u32 tmp = pmc_read(pmc, AT91_CKGR_UCKR) | AT91_PMC_UPLLEN |
- AT91_PMC_UPLLCOUNT | AT91_PMC_BIASEN;
+ unsigned int uckr = AT91_PMC_UPLLEN | AT91_PMC_UPLLCOUNT |
+ AT91_PMC_BIASEN;
- pmc_write(pmc, AT91_CKGR_UCKR, tmp);
+ regmap_update_bits(utmi->regmap, AT91_CKGR_UCKR, uckr, uckr);
- while (!(pmc_read(pmc, AT91_PMC_SR) & AT91_PMC_LOCKU)) {
- enable_irq(utmi->irq);
- wait_event(utmi->wait,
- pmc_read(pmc, AT91_PMC_SR) & AT91_PMC_LOCKU);
- }
+ while (!clk_utmi_ready(utmi->regmap))
+ cpu_relax();
return 0;
}
@@ -64,18 +52,15 @@ static int clk_utmi_prepare(struct clk_hw *hw)
static int clk_utmi_is_prepared(struct clk_hw *hw)
{
struct clk_utmi *utmi = to_clk_utmi(hw);
- struct at91_pmc *pmc = utmi->pmc;
- return !!(pmc_read(pmc, AT91_PMC_SR) & AT91_PMC_LOCKU);
+ return clk_utmi_ready(utmi->regmap);
}
static void clk_utmi_unprepare(struct clk_hw *hw)
{
struct clk_utmi *utmi = to_clk_utmi(hw);
- struct at91_pmc *pmc = utmi->pmc;
- u32 tmp = pmc_read(pmc, AT91_CKGR_UCKR) & ~AT91_PMC_UPLLEN;
- pmc_write(pmc, AT91_CKGR_UCKR, tmp);
+ regmap_update_bits(utmi->regmap, AT91_CKGR_UCKR, AT91_PMC_UPLLEN, 0);
}
static unsigned long clk_utmi_recalc_rate(struct clk_hw *hw,
@@ -93,10 +78,9 @@ static const struct clk_ops utmi_ops = {
};
static struct clk * __init
-at91_clk_register_utmi(struct at91_pmc *pmc, unsigned int irq,
+at91_clk_register_utmi(struct regmap *regmap,
const char *name, const char *parent_name)
{
- int ret;
struct clk_utmi *utmi;
struct clk *clk = NULL;
struct clk_init_data init;
@@ -112,52 +96,36 @@ at91_clk_register_utmi(struct at91_pmc *pmc, unsigned int irq,
init.flags = CLK_SET_RATE_GATE;
utmi->hw.init = &init;
- utmi->pmc = pmc;
- utmi->irq = irq;
- init_waitqueue_head(&utmi->wait);
- irq_set_status_flags(utmi->irq, IRQ_NOAUTOEN);
- ret = request_irq(utmi->irq, clk_utmi_irq_handler,
- IRQF_TRIGGER_HIGH, "clk-utmi", utmi);
- if (ret) {
- kfree(utmi);
- return ERR_PTR(ret);
- }
+ utmi->regmap = regmap;
clk = clk_register(NULL, &utmi->hw);
- if (IS_ERR(clk)) {
- free_irq(utmi->irq, utmi);
+ if (IS_ERR(clk))
kfree(utmi);
- }
return clk;
}
-static void __init
-of_at91_clk_utmi_setup(struct device_node *np, struct at91_pmc *pmc)
+static void __init of_at91sam9x5_clk_utmi_setup(struct device_node *np)
{
- unsigned int irq;
struct clk *clk;
const char *parent_name;
const char *name = np->name;
+ struct regmap *regmap;
parent_name = of_clk_get_parent_name(np, 0);
of_property_read_string(np, "clock-output-names", &name);
- irq = irq_of_parse_and_map(np, 0);
- if (!irq)
+ regmap = syscon_node_to_regmap(of_get_parent(np));
+ if (IS_ERR(regmap))
return;
- clk = at91_clk_register_utmi(pmc, irq, name, parent_name);
+ clk = at91_clk_register_utmi(regmap, name, parent_name);
if (IS_ERR(clk))
return;
of_clk_add_provider(np, of_clk_src_simple_get, clk);
return;
}
-
-void __init of_at91sam9x5_clk_utmi_setup(struct device_node *np,
- struct at91_pmc *pmc)
-{
- of_at91_clk_utmi_setup(np, pmc);
-}
+CLK_OF_DECLARE(at91sam9x5_clk_utmi, "atmel,at91sam9x5-clk-utmi",
+ of_at91sam9x5_clk_utmi_setup);
diff --git a/drivers/clk/at91/pmc.c b/drivers/clk/at91/pmc.c
index 8476b570779b..526df5ba042d 100644
--- a/drivers/clk/at91/pmc.c
+++ b/drivers/clk/at91/pmc.c
@@ -12,36 +12,13 @@
#include <linux/clkdev.h>
#include <linux/clk/at91_pmc.h>
#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/io.h>
-#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/irqchip/chained_irq.h>
-#include <linux/irqdomain.h>
-#include <linux/of_irq.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
#include <asm/proc-fns.h>
#include "pmc.h"
-void __iomem *at91_pmc_base;
-EXPORT_SYMBOL_GPL(at91_pmc_base);
-
-void at91rm9200_idle(void)
-{
- /*
- * Disable the processor clock. The processor will be automatically
- * re-enabled by an interrupt or by a reset.
- */
- at91_pmc_write(AT91_PMC_SCDR, AT91_PMC_PCK);
-}
-
-void at91sam9_idle(void)
-{
- at91_pmc_write(AT91_PMC_SCDR, AT91_PMC_PCK);
- cpu_do_idle();
-}
-
int of_at91_get_clk_range(struct device_node *np, const char *propname,
struct clk_range *range)
{
@@ -64,402 +41,3 @@ int of_at91_get_clk_range(struct device_node *np, const char *propname,
return 0;
}
EXPORT_SYMBOL_GPL(of_at91_get_clk_range);
-
-static void pmc_irq_mask(struct irq_data *d)
-{
- struct at91_pmc *pmc = irq_data_get_irq_chip_data(d);
-
- pmc_write(pmc, AT91_PMC_IDR, 1 << d->hwirq);
-}
-
-static void pmc_irq_unmask(struct irq_data *d)
-{
- struct at91_pmc *pmc = irq_data_get_irq_chip_data(d);
-
- pmc_write(pmc, AT91_PMC_IER, 1 << d->hwirq);
-}
-
-static int pmc_irq_set_type(struct irq_data *d, unsigned type)
-{
- if (type != IRQ_TYPE_LEVEL_HIGH) {
- pr_warn("PMC: type not supported (support only IRQ_TYPE_LEVEL_HIGH type)\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-static void pmc_irq_suspend(struct irq_data *d)
-{
- struct at91_pmc *pmc = irq_data_get_irq_chip_data(d);
-
- pmc->imr = pmc_read(pmc, AT91_PMC_IMR);
- pmc_write(pmc, AT91_PMC_IDR, pmc->imr);
-}
-
-static void pmc_irq_resume(struct irq_data *d)
-{
- struct at91_pmc *pmc = irq_data_get_irq_chip_data(d);
-
- pmc_write(pmc, AT91_PMC_IER, pmc->imr);
-}
-
-static struct irq_chip pmc_irq = {
- .name = "PMC",
- .irq_disable = pmc_irq_mask,
- .irq_mask = pmc_irq_mask,
- .irq_unmask = pmc_irq_unmask,
- .irq_set_type = pmc_irq_set_type,
- .irq_suspend = pmc_irq_suspend,
- .irq_resume = pmc_irq_resume,
-};
-
-static struct lock_class_key pmc_lock_class;
-
-static int pmc_irq_map(struct irq_domain *h, unsigned int virq,
- irq_hw_number_t hw)
-{
- struct at91_pmc *pmc = h->host_data;
-
- irq_set_lockdep_class(virq, &pmc_lock_class);
-
- irq_set_chip_and_handler(virq, &pmc_irq,
- handle_level_irq);
- irq_set_chip_data(virq, pmc);
-
- return 0;
-}
-
-static int pmc_irq_domain_xlate(struct irq_domain *d,
- struct device_node *ctrlr,
- const u32 *intspec, unsigned int intsize,
- irq_hw_number_t *out_hwirq,
- unsigned int *out_type)
-{
- struct at91_pmc *pmc = d->host_data;
- const struct at91_pmc_caps *caps = pmc->caps;
-
- if (WARN_ON(intsize < 1))
- return -EINVAL;
-
- *out_hwirq = intspec[0];
-
- if (!(caps->available_irqs & (1 << *out_hwirq)))
- return -EINVAL;
-
- *out_type = IRQ_TYPE_LEVEL_HIGH;
-
- return 0;
-}
-
-static const struct irq_domain_ops pmc_irq_ops = {
- .map = pmc_irq_map,
- .xlate = pmc_irq_domain_xlate,
-};
-
-static irqreturn_t pmc_irq_handler(int irq, void *data)
-{
- struct at91_pmc *pmc = (struct at91_pmc *)data;
- unsigned long sr;
- int n;
-
- sr = pmc_read(pmc, AT91_PMC_SR) & pmc_read(pmc, AT91_PMC_IMR);
- if (!sr)
- return IRQ_NONE;
-
- for_each_set_bit(n, &sr, BITS_PER_LONG)
- generic_handle_irq(irq_find_mapping(pmc->irqdomain, n));
-
- return IRQ_HANDLED;
-}
-
-static const struct at91_pmc_caps at91rm9200_caps = {
- .available_irqs = AT91_PMC_MOSCS | AT91_PMC_LOCKA | AT91_PMC_LOCKB |
- AT91_PMC_MCKRDY | AT91_PMC_PCK0RDY |
- AT91_PMC_PCK1RDY | AT91_PMC_PCK2RDY |
- AT91_PMC_PCK3RDY,
-};
-
-static const struct at91_pmc_caps at91sam9260_caps = {
- .available_irqs = AT91_PMC_MOSCS | AT91_PMC_LOCKA | AT91_PMC_LOCKB |
- AT91_PMC_MCKRDY | AT91_PMC_PCK0RDY |
- AT91_PMC_PCK1RDY,
-};
-
-static const struct at91_pmc_caps at91sam9g45_caps = {
- .available_irqs = AT91_PMC_MOSCS | AT91_PMC_LOCKA | AT91_PMC_MCKRDY |
- AT91_PMC_LOCKU | AT91_PMC_PCK0RDY |
- AT91_PMC_PCK1RDY,
-};
-
-static const struct at91_pmc_caps at91sam9n12_caps = {
- .available_irqs = AT91_PMC_MOSCS | AT91_PMC_LOCKA | AT91_PMC_LOCKB |
- AT91_PMC_MCKRDY | AT91_PMC_PCK0RDY |
- AT91_PMC_PCK1RDY | AT91_PMC_MOSCSELS |
- AT91_PMC_MOSCRCS | AT91_PMC_CFDEV,
-};
-
-static const struct at91_pmc_caps at91sam9x5_caps = {
- .available_irqs = AT91_PMC_MOSCS | AT91_PMC_LOCKA | AT91_PMC_MCKRDY |
- AT91_PMC_LOCKU | AT91_PMC_PCK0RDY |
- AT91_PMC_PCK1RDY | AT91_PMC_MOSCSELS |
- AT91_PMC_MOSCRCS | AT91_PMC_CFDEV,
-};
-
-static const struct at91_pmc_caps sama5d2_caps = {
- .available_irqs = AT91_PMC_MOSCS | AT91_PMC_LOCKA | AT91_PMC_MCKRDY |
- AT91_PMC_LOCKU | AT91_PMC_PCK0RDY |
- AT91_PMC_PCK1RDY | AT91_PMC_PCK2RDY |
- AT91_PMC_MOSCSELS | AT91_PMC_MOSCRCS |
- AT91_PMC_CFDEV | AT91_PMC_GCKRDY,
-};
-
-static const struct at91_pmc_caps sama5d3_caps = {
- .available_irqs = AT91_PMC_MOSCS | AT91_PMC_LOCKA | AT91_PMC_MCKRDY |
- AT91_PMC_LOCKU | AT91_PMC_PCK0RDY |
- AT91_PMC_PCK1RDY | AT91_PMC_PCK2RDY |
- AT91_PMC_MOSCSELS | AT91_PMC_MOSCRCS |
- AT91_PMC_CFDEV,
-};
-
-static struct at91_pmc *__init at91_pmc_init(struct device_node *np,
- void __iomem *regbase, int virq,
- const struct at91_pmc_caps *caps)
-{
- struct at91_pmc *pmc;
-
- if (!regbase || !virq || !caps)
- return NULL;
-
- at91_pmc_base = regbase;
-
- pmc = kzalloc(sizeof(*pmc), GFP_KERNEL);
- if (!pmc)
- return NULL;
-
- spin_lock_init(&pmc->lock);
- pmc->regbase = regbase;
- pmc->virq = virq;
- pmc->caps = caps;
-
- pmc->irqdomain = irq_domain_add_linear(np, 32, &pmc_irq_ops, pmc);
-
- if (!pmc->irqdomain)
- goto out_free_pmc;
-
- pmc_write(pmc, AT91_PMC_IDR, 0xffffffff);
- if (request_irq(pmc->virq, pmc_irq_handler,
- IRQF_SHARED | IRQF_COND_SUSPEND, "pmc", pmc))
- goto out_remove_irqdomain;
-
- return pmc;
-
-out_remove_irqdomain:
- irq_domain_remove(pmc->irqdomain);
-out_free_pmc:
- kfree(pmc);
-
- return NULL;
-}
-
-static const struct of_device_id pmc_clk_ids[] __initconst = {
- /* Slow oscillator */
- {
- .compatible = "atmel,at91sam9260-clk-slow",
- .data = of_at91sam9260_clk_slow_setup,
- },
- /* Main clock */
- {
- .compatible = "atmel,at91rm9200-clk-main-osc",
- .data = of_at91rm9200_clk_main_osc_setup,
- },
- {
- .compatible = "atmel,at91sam9x5-clk-main-rc-osc",
- .data = of_at91sam9x5_clk_main_rc_osc_setup,
- },
- {
- .compatible = "atmel,at91rm9200-clk-main",
- .data = of_at91rm9200_clk_main_setup,
- },
- {
- .compatible = "atmel,at91sam9x5-clk-main",
- .data = of_at91sam9x5_clk_main_setup,
- },
- /* PLL clocks */
- {
- .compatible = "atmel,at91rm9200-clk-pll",
- .data = of_at91rm9200_clk_pll_setup,
- },
- {
- .compatible = "atmel,at91sam9g45-clk-pll",
- .data = of_at91sam9g45_clk_pll_setup,
- },
- {
- .compatible = "atmel,at91sam9g20-clk-pllb",
- .data = of_at91sam9g20_clk_pllb_setup,
- },
- {
- .compatible = "atmel,sama5d3-clk-pll",
- .data = of_sama5d3_clk_pll_setup,
- },
- {
- .compatible = "atmel,at91sam9x5-clk-plldiv",
- .data = of_at91sam9x5_clk_plldiv_setup,
- },
- /* Master clock */
- {
- .compatible = "atmel,at91rm9200-clk-master",
- .data = of_at91rm9200_clk_master_setup,
- },
- {
- .compatible = "atmel,at91sam9x5-clk-master",
- .data = of_at91sam9x5_clk_master_setup,
- },
- /* System clocks */
- {
- .compatible = "atmel,at91rm9200-clk-system",
- .data = of_at91rm9200_clk_sys_setup,
- },
- /* Peripheral clocks */
- {
- .compatible = "atmel,at91rm9200-clk-peripheral",
- .data = of_at91rm9200_clk_periph_setup,
- },
- {
- .compatible = "atmel,at91sam9x5-clk-peripheral",
- .data = of_at91sam9x5_clk_periph_setup,
- },
- /* Programmable clocks */
- {
- .compatible = "atmel,at91rm9200-clk-programmable",
- .data = of_at91rm9200_clk_prog_setup,
- },
- {
- .compatible = "atmel,at91sam9g45-clk-programmable",
- .data = of_at91sam9g45_clk_prog_setup,
- },
- {
- .compatible = "atmel,at91sam9x5-clk-programmable",
- .data = of_at91sam9x5_clk_prog_setup,
- },
- /* UTMI clock */
-#if defined(CONFIG_HAVE_AT91_UTMI)
- {
- .compatible = "atmel,at91sam9x5-clk-utmi",
- .data = of_at91sam9x5_clk_utmi_setup,
- },
-#endif
- /* USB clock */
-#if defined(CONFIG_HAVE_AT91_USB_CLK)
- {
- .compatible = "atmel,at91rm9200-clk-usb",
- .data = of_at91rm9200_clk_usb_setup,
- },
- {
- .compatible = "atmel,at91sam9x5-clk-usb",
- .data = of_at91sam9x5_clk_usb_setup,
- },
- {
- .compatible = "atmel,at91sam9n12-clk-usb",
- .data = of_at91sam9n12_clk_usb_setup,
- },
-#endif
- /* SMD clock */
-#if defined(CONFIG_HAVE_AT91_SMD)
- {
- .compatible = "atmel,at91sam9x5-clk-smd",
- .data = of_at91sam9x5_clk_smd_setup,
- },
-#endif
-#if defined(CONFIG_HAVE_AT91_H32MX)
- {
- .compatible = "atmel,sama5d4-clk-h32mx",
- .data = of_sama5d4_clk_h32mx_setup,
- },
-#endif
-#if defined(CONFIG_HAVE_AT91_GENERATED_CLK)
- {
- .compatible = "atmel,sama5d2-clk-generated",
- .data = of_sama5d2_clk_generated_setup,
- },
-#endif
- { /*sentinel*/ }
-};
-
-static void __init of_at91_pmc_setup(struct device_node *np,
- const struct at91_pmc_caps *caps)
-{
- struct at91_pmc *pmc;
- struct device_node *childnp;
- void (*clk_setup)(struct device_node *, struct at91_pmc *);
- const struct of_device_id *clk_id;
- void __iomem *regbase = of_iomap(np, 0);
- int virq;
-
- if (!regbase)
- return;
-
- virq = irq_of_parse_and_map(np, 0);
- if (!virq)
- return;
-
- pmc = at91_pmc_init(np, regbase, virq, caps);
- if (!pmc)
- return;
- for_each_child_of_node(np, childnp) {
- clk_id = of_match_node(pmc_clk_ids, childnp);
- if (!clk_id)
- continue;
- clk_setup = clk_id->data;
- clk_setup(childnp, pmc);
- }
-}
-
-static void __init of_at91rm9200_pmc_setup(struct device_node *np)
-{
- of_at91_pmc_setup(np, &at91rm9200_caps);
-}
-CLK_OF_DECLARE(at91rm9200_clk_pmc, "atmel,at91rm9200-pmc",
- of_at91rm9200_pmc_setup);
-
-static void __init of_at91sam9260_pmc_setup(struct device_node *np)
-{
- of_at91_pmc_setup(np, &at91sam9260_caps);
-}
-CLK_OF_DECLARE(at91sam9260_clk_pmc, "atmel,at91sam9260-pmc",
- of_at91sam9260_pmc_setup);
-
-static void __init of_at91sam9g45_pmc_setup(struct device_node *np)
-{
- of_at91_pmc_setup(np, &at91sam9g45_caps);
-}
-CLK_OF_DECLARE(at91sam9g45_clk_pmc, "atmel,at91sam9g45-pmc",
- of_at91sam9g45_pmc_setup);
-
-static void __init of_at91sam9n12_pmc_setup(struct device_node *np)
-{
- of_at91_pmc_setup(np, &at91sam9n12_caps);
-}
-CLK_OF_DECLARE(at91sam9n12_clk_pmc, "atmel,at91sam9n12-pmc",
- of_at91sam9n12_pmc_setup);
-
-static void __init of_at91sam9x5_pmc_setup(struct device_node *np)
-{
- of_at91_pmc_setup(np, &at91sam9x5_caps);
-}
-CLK_OF_DECLARE(at91sam9x5_clk_pmc, "atmel,at91sam9x5-pmc",
- of_at91sam9x5_pmc_setup);
-
-static void __init of_sama5d2_pmc_setup(struct device_node *np)
-{
- of_at91_pmc_setup(np, &sama5d2_caps);
-}
-CLK_OF_DECLARE(sama5d2_clk_pmc, "atmel,sama5d2-pmc",
- of_sama5d2_pmc_setup);
-
-static void __init of_sama5d3_pmc_setup(struct device_node *np)
-{
- of_at91_pmc_setup(np, &sama5d3_caps);
-}
-CLK_OF_DECLARE(sama5d3_clk_pmc, "atmel,sama5d3-pmc",
- of_sama5d3_pmc_setup);
diff --git a/drivers/clk/at91/pmc.h b/drivers/clk/at91/pmc.h
index f65739272779..5771fff0ee3f 100644
--- a/drivers/clk/at91/pmc.h
+++ b/drivers/clk/at91/pmc.h
@@ -14,8 +14,11 @@
#include <linux/io.h>
#include <linux/irqdomain.h>
+#include <linux/regmap.h>
#include <linux/spinlock.h>
+extern spinlock_t pmc_pcr_lock;
+
struct clk_range {
unsigned long min;
unsigned long max;
@@ -23,102 +26,7 @@ struct clk_range {
#define CLK_RANGE(MIN, MAX) {.min = MIN, .max = MAX,}
-struct at91_pmc_caps {
- u32 available_irqs;
-};
-
-struct at91_pmc {
- void __iomem *regbase;
- int virq;
- spinlock_t lock;
- const struct at91_pmc_caps *caps;
- struct irq_domain *irqdomain;
- u32 imr;
-};
-
-static inline void pmc_lock(struct at91_pmc *pmc)
-{
- spin_lock(&pmc->lock);
-}
-
-static inline void pmc_unlock(struct at91_pmc *pmc)
-{
- spin_unlock(&pmc->lock);
-}
-
-static inline u32 pmc_read(struct at91_pmc *pmc, int offset)
-{
- return readl(pmc->regbase + offset);
-}
-
-static inline void pmc_write(struct at91_pmc *pmc, int offset, u32 value)
-{
- writel(value, pmc->regbase + offset);
-}
-
int of_at91_get_clk_range(struct device_node *np, const char *propname,
struct clk_range *range);
-void of_at91sam9260_clk_slow_setup(struct device_node *np,
- struct at91_pmc *pmc);
-
-void of_at91rm9200_clk_main_osc_setup(struct device_node *np,
- struct at91_pmc *pmc);
-void of_at91sam9x5_clk_main_rc_osc_setup(struct device_node *np,
- struct at91_pmc *pmc);
-void of_at91rm9200_clk_main_setup(struct device_node *np,
- struct at91_pmc *pmc);
-void of_at91sam9x5_clk_main_setup(struct device_node *np,
- struct at91_pmc *pmc);
-
-void of_at91rm9200_clk_pll_setup(struct device_node *np,
- struct at91_pmc *pmc);
-void of_at91sam9g45_clk_pll_setup(struct device_node *np,
- struct at91_pmc *pmc);
-void of_at91sam9g20_clk_pllb_setup(struct device_node *np,
- struct at91_pmc *pmc);
-void of_sama5d3_clk_pll_setup(struct device_node *np,
- struct at91_pmc *pmc);
-void of_at91sam9x5_clk_plldiv_setup(struct device_node *np,
- struct at91_pmc *pmc);
-
-void of_at91rm9200_clk_master_setup(struct device_node *np,
- struct at91_pmc *pmc);
-void of_at91sam9x5_clk_master_setup(struct device_node *np,
- struct at91_pmc *pmc);
-
-void of_at91rm9200_clk_sys_setup(struct device_node *np,
- struct at91_pmc *pmc);
-
-void of_at91rm9200_clk_periph_setup(struct device_node *np,
- struct at91_pmc *pmc);
-void of_at91sam9x5_clk_periph_setup(struct device_node *np,
- struct at91_pmc *pmc);
-
-void of_at91rm9200_clk_prog_setup(struct device_node *np,
- struct at91_pmc *pmc);
-void of_at91sam9g45_clk_prog_setup(struct device_node *np,
- struct at91_pmc *pmc);
-void of_at91sam9x5_clk_prog_setup(struct device_node *np,
- struct at91_pmc *pmc);
-
-void of_at91sam9x5_clk_utmi_setup(struct device_node *np,
- struct at91_pmc *pmc);
-
-void of_at91rm9200_clk_usb_setup(struct device_node *np,
- struct at91_pmc *pmc);
-void of_at91sam9x5_clk_usb_setup(struct device_node *np,
- struct at91_pmc *pmc);
-void of_at91sam9n12_clk_usb_setup(struct device_node *np,
- struct at91_pmc *pmc);
-
-void of_at91sam9x5_clk_smd_setup(struct device_node *np,
- struct at91_pmc *pmc);
-
-void of_sama5d4_clk_h32mx_setup(struct device_node *np,
- struct at91_pmc *pmc);
-
-void of_sama5d2_clk_generated_setup(struct device_node *np,
- struct at91_pmc *pmc);
-
#endif /* __PMC_H_ */
diff --git a/drivers/clk/bcm/clk-bcm2835-aux.c b/drivers/clk/bcm/clk-bcm2835-aux.c
index e4f89e28b5ec..3a177ade6e6c 100644
--- a/drivers/clk/bcm/clk-bcm2835-aux.c
+++ b/drivers/clk/bcm/clk-bcm2835-aux.c
@@ -38,8 +38,8 @@ static int bcm2835_aux_clk_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
reg = devm_ioremap_resource(dev, res);
- if (!reg)
- return -ENODEV;
+ if (IS_ERR(reg))
+ return PTR_ERR(reg);
onecell = devm_kmalloc(dev, sizeof(*onecell), GFP_KERNEL);
if (!onecell)
diff --git a/drivers/clk/bcm/clk-bcm2835.c b/drivers/clk/bcm/clk-bcm2835.c
index 015e687ffabe..c74ed3fd496d 100644
--- a/drivers/clk/bcm/clk-bcm2835.c
+++ b/drivers/clk/bcm/clk-bcm2835.c
@@ -88,10 +88,23 @@
#define CM_HSMDIV 0x08c
#define CM_OTPCTL 0x090
#define CM_OTPDIV 0x094
+#define CM_PCMCTL 0x098
+#define CM_PCMDIV 0x09c
#define CM_PWMCTL 0x0a0
#define CM_PWMDIV 0x0a4
+#define CM_SLIMCTL 0x0a8
+#define CM_SLIMDIV 0x0ac
#define CM_SMICTL 0x0b0
#define CM_SMIDIV 0x0b4
+/* no definition for 0x0b8 and 0x0bc */
+#define CM_TCNTCTL 0x0c0
+#define CM_TCNTDIV 0x0c4
+#define CM_TECCTL 0x0c8
+#define CM_TECDIV 0x0cc
+#define CM_TD0CTL 0x0d0
+#define CM_TD0DIV 0x0d4
+#define CM_TD1CTL 0x0d8
+#define CM_TD1DIV 0x0dc
#define CM_TSENSCTL 0x0e0
#define CM_TSENSDIV 0x0e4
#define CM_TIMERCTL 0x0e8
@@ -311,21 +324,18 @@ void __init bcm2835_init_clocks(void)
struct clk *clk;
int ret;
- clk = clk_register_fixed_rate(NULL, "apb_pclk", NULL, CLK_IS_ROOT,
- 126000000);
+ clk = clk_register_fixed_rate(NULL, "apb_pclk", NULL, 0, 126000000);
if (IS_ERR(clk))
pr_err("apb_pclk not registered\n");
- clk = clk_register_fixed_rate(NULL, "uart0_pclk", NULL, CLK_IS_ROOT,
- 3000000);
+ clk = clk_register_fixed_rate(NULL, "uart0_pclk", NULL, 0, 3000000);
if (IS_ERR(clk))
pr_err("uart0_pclk not registered\n");
ret = clk_register_clkdev(clk, NULL, "20201000.uart");
if (ret)
pr_err("uart0_pclk alias not registered\n");
- clk = clk_register_fixed_rate(NULL, "uart1_pclk", NULL, CLK_IS_ROOT,
- 125000000);
+ clk = clk_register_fixed_rate(NULL, "uart1_pclk", NULL, 0, 125000000);
if (IS_ERR(clk))
pr_err("uart1_pclk not registered\n");
ret = clk_register_clkdev(clk, NULL, "20215000.uart");
@@ -1060,16 +1070,7 @@ static long bcm2835_pll_divider_round_rate(struct clk_hw *hw,
static unsigned long bcm2835_pll_divider_get_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
- struct bcm2835_pll_divider *divider = bcm2835_pll_divider_from_hw(hw);
- struct bcm2835_cprman *cprman = divider->cprman;
- const struct bcm2835_pll_divider_data *data = divider->data;
- u32 div = cprman_read(cprman, data->a2w_reg);
-
- div &= (1 << A2W_PLL_DIV_BITS) - 1;
- if (div == 0)
- div = 256;
-
- return parent_rate / div;
+ return clk_divider_ops.recalc_rate(hw, parent_rate);
}
static void bcm2835_pll_divider_off(struct clk_hw *hw)
@@ -1107,13 +1108,15 @@ static int bcm2835_pll_divider_set_rate(struct clk_hw *hw,
struct bcm2835_pll_divider *divider = bcm2835_pll_divider_from_hw(hw);
struct bcm2835_cprman *cprman = divider->cprman;
const struct bcm2835_pll_divider_data *data = divider->data;
- u32 cm;
- int ret;
+ u32 cm, div, max_div = 1 << A2W_PLL_DIV_BITS;
- ret = clk_divider_ops.set_rate(hw, rate, parent_rate);
- if (ret)
- return ret;
+ div = DIV_ROUND_UP_ULL(parent_rate, rate);
+
+ div = min(div, max_div);
+ if (div == max_div)
+ div = 0;
+ cprman_write(cprman, data->a2w_reg, div);
cm = cprman_read(cprman, data->cm_reg);
cprman_write(cprman, data->cm_reg, cm | data->load_mask);
cprman_write(cprman, data->cm_reg, cm & ~data->load_mask);
@@ -1428,7 +1431,7 @@ bcm2835_register_pll_divider(struct bcm2835_cprman *cprman,
divider->div.reg = cprman->regs + data->a2w_reg;
divider->div.shift = A2W_PLL_DIV_SHIFT;
divider->div.width = A2W_PLL_DIV_BITS;
- divider->div.flags = 0;
+ divider->div.flags = CLK_DIVIDER_MAX_AT_ZERO;
divider->div.lock = &cprman->regs_lock;
divider->div.hw.init = &init;
divider->div.table = NULL;
diff --git a/drivers/clk/bcm/clk-cygnus.c b/drivers/clk/bcm/clk-cygnus.c
index 3a228b6d4fee..464fdc4bc66b 100644
--- a/drivers/clk/bcm/clk-cygnus.c
+++ b/drivers/clk/bcm/clk-cygnus.c
@@ -268,3 +268,62 @@ static void __init cygnus_asiu_init(struct device_node *node)
iproc_asiu_setup(node, asiu_div, asiu_gate, ARRAY_SIZE(asiu_div));
}
CLK_OF_DECLARE(cygnus_asiu_clk, "brcm,cygnus-asiu-clk", cygnus_asiu_init);
+
+/*
+ * AUDIO PLL VCO frequency parameter table
+ *
+ * PLL output frequency = ((ndiv_int + ndiv_frac / 2^20) *
+ * (parent clock rate / pdiv)
+ *
+ * On Cygnus, parent is the 25MHz oscillator
+ */
+static const struct iproc_pll_vco_param audiopll_vco_params[] = {
+ /* rate (Hz) ndiv_int ndiv_frac pdiv */
+ { 1354750204UL, 54, 199238, 1 },
+ { 1769470191UL, 70, 816639, 1 },
+};
+
+static const struct iproc_pll_ctrl audiopll = {
+ .flags = IPROC_CLK_PLL_NEEDS_SW_CFG | IPROC_CLK_PLL_HAS_NDIV_FRAC |
+ IPROC_CLK_PLL_USER_MODE_ON | IPROC_CLK_PLL_RESET_ACTIVE_LOW,
+ .reset = RESET_VAL(0x5c, 0, 1),
+ .dig_filter = DF_VAL(0x48, 0, 3, 6, 4, 3, 3),
+ .sw_ctrl = SW_CTRL_VAL(0x4, 0),
+ .ndiv_int = REG_VAL(0x8, 0, 10),
+ .ndiv_frac = REG_VAL(0x8, 10, 20),
+ .pdiv = REG_VAL(0x44, 0, 4),
+ .vco_ctrl = VCO_CTRL_VAL(0x0c, 0x10),
+ .status = REG_VAL(0x54, 0, 1),
+ .macro_mode = REG_VAL(0x0, 0, 3),
+};
+
+static const struct iproc_clk_ctrl audiopll_clk[] = {
+ [BCM_CYGNUS_AUDIOPLL_CH0] = {
+ .channel = BCM_CYGNUS_AUDIOPLL_CH0,
+ .flags = IPROC_CLK_AON |
+ IPROC_CLK_MCLK_DIV_BY_2,
+ .enable = ENABLE_VAL(0x14, 8, 10, 9),
+ .mdiv = REG_VAL(0x14, 0, 8),
+ },
+ [BCM_CYGNUS_AUDIOPLL_CH1] = {
+ .channel = BCM_CYGNUS_AUDIOPLL_CH1,
+ .flags = IPROC_CLK_AON,
+ .enable = ENABLE_VAL(0x18, 8, 10, 9),
+ .mdiv = REG_VAL(0x18, 0, 8),
+ },
+ [BCM_CYGNUS_AUDIOPLL_CH2] = {
+ .channel = BCM_CYGNUS_AUDIOPLL_CH2,
+ .flags = IPROC_CLK_AON,
+ .enable = ENABLE_VAL(0x1c, 8, 10, 9),
+ .mdiv = REG_VAL(0x1c, 0, 8),
+ },
+};
+
+static void __init cygnus_audiopll_clk_init(struct device_node *node)
+{
+ iproc_pll_clk_setup(node, &audiopll, audiopll_vco_params,
+ ARRAY_SIZE(audiopll_vco_params), audiopll_clk,
+ ARRAY_SIZE(audiopll_clk));
+}
+CLK_OF_DECLARE(cygnus_audiopll, "brcm,cygnus-audiopll",
+ cygnus_audiopll_clk_init);
diff --git a/drivers/clk/bcm/clk-iproc-pll.c b/drivers/clk/bcm/clk-iproc-pll.c
index afd5891ac9e6..fd492a5dad12 100644
--- a/drivers/clk/bcm/clk-iproc-pll.c
+++ b/drivers/clk/bcm/clk-iproc-pll.c
@@ -25,6 +25,12 @@
#define PLL_VCO_HIGH_SHIFT 19
#define PLL_VCO_LOW_SHIFT 30
+/*
+ * PLL MACRO_SELECT modes 0 to 5 choose pre-calculated PLL output frequencies
+ * from a look-up table. Mode 7 allows user to manipulate PLL clock dividers
+ */
+#define PLL_USER_MODE 7
+
/* number of delay loops waiting for PLL to lock */
#define LOCK_DELAY 100
@@ -215,7 +221,10 @@ static void __pll_put_in_reset(struct iproc_pll *pll)
const struct iproc_pll_reset_ctrl *reset = &ctrl->reset;
val = readl(pll->control_base + reset->offset);
- val &= ~(1 << reset->reset_shift | 1 << reset->p_reset_shift);
+ if (ctrl->flags & IPROC_CLK_PLL_RESET_ACTIVE_LOW)
+ val |= BIT(reset->reset_shift) | BIT(reset->p_reset_shift);
+ else
+ val &= ~(BIT(reset->reset_shift) | BIT(reset->p_reset_shift));
iproc_pll_write(pll, pll->control_base, reset->offset, val);
}
@@ -236,7 +245,10 @@ static void __pll_bring_out_reset(struct iproc_pll *pll, unsigned int kp,
iproc_pll_write(pll, pll->control_base, dig_filter->offset, val);
val = readl(pll->control_base + reset->offset);
- val |= 1 << reset->reset_shift | 1 << reset->p_reset_shift;
+ if (ctrl->flags & IPROC_CLK_PLL_RESET_ACTIVE_LOW)
+ val &= ~(BIT(reset->reset_shift) | BIT(reset->p_reset_shift));
+ else
+ val |= BIT(reset->reset_shift) | BIT(reset->p_reset_shift);
iproc_pll_write(pll, pll->control_base, reset->offset, val);
}
@@ -292,6 +304,16 @@ static int pll_set_rate(struct iproc_clk *clk, unsigned int rate_index,
/* put PLL in reset */
__pll_put_in_reset(pll);
+ /* set PLL in user mode before modifying PLL controls */
+ if (ctrl->flags & IPROC_CLK_PLL_USER_MODE_ON) {
+ val = readl(pll->control_base + ctrl->macro_mode.offset);
+ val &= ~(bit_mask(ctrl->macro_mode.width) <<
+ ctrl->macro_mode.shift);
+ val |= PLL_USER_MODE << ctrl->macro_mode.shift;
+ iproc_pll_write(pll, pll->control_base,
+ ctrl->macro_mode.offset, val);
+ }
+
iproc_pll_write(pll, pll->control_base, ctrl->vco_ctrl.u_offset, 0);
val = readl(pll->control_base + ctrl->vco_ctrl.l_offset);
@@ -505,7 +527,10 @@ static unsigned long iproc_clk_recalc_rate(struct clk_hw *hw,
if (mdiv == 0)
mdiv = 256;
- clk->rate = parent_rate / mdiv;
+ if (ctrl->flags & IPROC_CLK_MCLK_DIV_BY_2)
+ clk->rate = parent_rate / (mdiv * 2);
+ else
+ clk->rate = parent_rate / mdiv;
return clk->rate;
}
@@ -543,7 +568,10 @@ static int iproc_clk_set_rate(struct clk_hw *hw, unsigned long rate,
if (rate == 0 || parent_rate == 0)
return -EINVAL;
- div = DIV_ROUND_UP(parent_rate, rate);
+ if (ctrl->flags & IPROC_CLK_MCLK_DIV_BY_2)
+ div = DIV_ROUND_UP(parent_rate, rate * 2);
+ else
+ div = DIV_ROUND_UP(parent_rate, rate);
if (div > 256)
return -EINVAL;
@@ -555,7 +583,10 @@ static int iproc_clk_set_rate(struct clk_hw *hw, unsigned long rate,
val |= div << ctrl->mdiv.shift;
}
iproc_pll_write(pll, pll->control_base, ctrl->mdiv.offset, val);
- clk->rate = parent_rate / div;
+ if (ctrl->flags & IPROC_CLK_MCLK_DIV_BY_2)
+ clk->rate = parent_rate / (div * 2);
+ else
+ clk->rate = parent_rate / div;
return 0;
}
diff --git a/drivers/clk/bcm/clk-iproc.h b/drivers/clk/bcm/clk-iproc.h
index 8988de70a98c..2148b4ea9f28 100644
--- a/drivers/clk/bcm/clk-iproc.h
+++ b/drivers/clk/bcm/clk-iproc.h
@@ -61,6 +61,26 @@
#define IPROC_CLK_PLL_SPLIT_STAT_CTRL BIT(6)
/*
+ * Some PLLs have an additional divide by 2 in master clock calculation;
+ * MCLK = VCO_freq / (Mdiv * 2). Identify this to let the driver know
+ * of modified calculations
+ */
+#define IPROC_CLK_MCLK_DIV_BY_2 BIT(7)
+
+/*
+ * Some PLLs provide a look up table for the leaf clock frequencies and
+ * auto calculates VCO frequency parameters based on the provided leaf
+ * clock frequencies. They have a user mode that allows the divider
+ * controls to be determined by the user
+ */
+#define IPROC_CLK_PLL_USER_MODE_ON BIT(8)
+
+/*
+ * Some PLLs have an active low reset
+ */
+#define IPROC_CLK_PLL_RESET_ACTIVE_LOW BIT(9)
+
+/*
* Parameters for VCO frequency configuration
*
* VCO frequency =
@@ -149,6 +169,7 @@ struct iproc_pll_ctrl {
struct iproc_clk_reg_op pdiv;
struct iproc_pll_vco_ctrl vco_ctrl;
struct iproc_clk_reg_op status;
+ struct iproc_clk_reg_op macro_mode;
};
/*
@@ -183,16 +204,16 @@ struct iproc_asiu_div {
unsigned int low_width;
};
-void __init iproc_armpll_setup(struct device_node *node);
-void __init iproc_pll_clk_setup(struct device_node *node,
- const struct iproc_pll_ctrl *pll_ctrl,
- const struct iproc_pll_vco_param *vco,
- unsigned int num_vco_entries,
- const struct iproc_clk_ctrl *clk_ctrl,
- unsigned int num_clks);
-void __init iproc_asiu_setup(struct device_node *node,
- const struct iproc_asiu_div *div,
- const struct iproc_asiu_gate *gate,
- unsigned int num_clks);
+void iproc_armpll_setup(struct device_node *node);
+void iproc_pll_clk_setup(struct device_node *node,
+ const struct iproc_pll_ctrl *pll_ctrl,
+ const struct iproc_pll_vco_param *vco,
+ unsigned int num_vco_entries,
+ const struct iproc_clk_ctrl *clk_ctrl,
+ unsigned int num_clks);
+void iproc_asiu_setup(struct device_node *node,
+ const struct iproc_asiu_div *div,
+ const struct iproc_asiu_gate *gate,
+ unsigned int num_clks);
#endif /* _CLK_IPROC_H */
diff --git a/drivers/clk/clk-axi-clkgen.c b/drivers/clk/clk-axi-clkgen.c
index 3bcd42fbb55e..3294db3b4e4e 100644
--- a/drivers/clk/clk-axi-clkgen.c
+++ b/drivers/clk/clk-axi-clkgen.c
@@ -16,19 +16,8 @@
#include <linux/module.h>
#include <linux/err.h>
-#define AXI_CLKGEN_V1_REG_UPDATE_ENABLE 0x04
-#define AXI_CLKGEN_V1_REG_CLK_OUT1 0x08
-#define AXI_CLKGEN_V1_REG_CLK_OUT2 0x0c
-#define AXI_CLKGEN_V1_REG_CLK_DIV 0x10
-#define AXI_CLKGEN_V1_REG_CLK_FB1 0x14
-#define AXI_CLKGEN_V1_REG_CLK_FB2 0x18
-#define AXI_CLKGEN_V1_REG_LOCK1 0x1c
-#define AXI_CLKGEN_V1_REG_LOCK2 0x20
-#define AXI_CLKGEN_V1_REG_LOCK3 0x24
-#define AXI_CLKGEN_V1_REG_FILTER1 0x28
-#define AXI_CLKGEN_V1_REG_FILTER2 0x2c
-
#define AXI_CLKGEN_V2_REG_RESET 0x40
+#define AXI_CLKGEN_V2_REG_CLKSEL 0x44
#define AXI_CLKGEN_V2_REG_DRP_CNTRL 0x70
#define AXI_CLKGEN_V2_REG_DRP_STATUS 0x74
@@ -51,40 +40,11 @@
#define MMCM_REG_FILTER1 0x4e
#define MMCM_REG_FILTER2 0x4f
-struct axi_clkgen;
-
-struct axi_clkgen_mmcm_ops {
- void (*enable)(struct axi_clkgen *axi_clkgen, bool enable);
- int (*write)(struct axi_clkgen *axi_clkgen, unsigned int reg,
- unsigned int val, unsigned int mask);
- int (*read)(struct axi_clkgen *axi_clkgen, unsigned int reg,
- unsigned int *val);
-};
-
struct axi_clkgen {
void __iomem *base;
- const struct axi_clkgen_mmcm_ops *mmcm_ops;
struct clk_hw clk_hw;
};
-static void axi_clkgen_mmcm_enable(struct axi_clkgen *axi_clkgen,
- bool enable)
-{
- axi_clkgen->mmcm_ops->enable(axi_clkgen, enable);
-}
-
-static int axi_clkgen_mmcm_write(struct axi_clkgen *axi_clkgen,
- unsigned int reg, unsigned int val, unsigned int mask)
-{
- return axi_clkgen->mmcm_ops->write(axi_clkgen, reg, val, mask);
-}
-
-static int axi_clkgen_mmcm_read(struct axi_clkgen *axi_clkgen,
- unsigned int reg, unsigned int *val)
-{
- return axi_clkgen->mmcm_ops->read(axi_clkgen, reg, val);
-}
-
static uint32_t axi_clkgen_lookup_filter(unsigned int m)
{
switch (m) {
@@ -207,70 +167,6 @@ static void axi_clkgen_read(struct axi_clkgen *axi_clkgen,
*val = readl(axi_clkgen->base + reg);
}
-static unsigned int axi_clkgen_v1_map_mmcm_reg(unsigned int reg)
-{
- switch (reg) {
- case MMCM_REG_CLKOUT0_1:
- return AXI_CLKGEN_V1_REG_CLK_OUT1;
- case MMCM_REG_CLKOUT0_2:
- return AXI_CLKGEN_V1_REG_CLK_OUT2;
- case MMCM_REG_CLK_FB1:
- return AXI_CLKGEN_V1_REG_CLK_FB1;
- case MMCM_REG_CLK_FB2:
- return AXI_CLKGEN_V1_REG_CLK_FB2;
- case MMCM_REG_CLK_DIV:
- return AXI_CLKGEN_V1_REG_CLK_DIV;
- case MMCM_REG_LOCK1:
- return AXI_CLKGEN_V1_REG_LOCK1;
- case MMCM_REG_LOCK2:
- return AXI_CLKGEN_V1_REG_LOCK2;
- case MMCM_REG_LOCK3:
- return AXI_CLKGEN_V1_REG_LOCK3;
- case MMCM_REG_FILTER1:
- return AXI_CLKGEN_V1_REG_FILTER1;
- case MMCM_REG_FILTER2:
- return AXI_CLKGEN_V1_REG_FILTER2;
- default:
- return 0;
- }
-}
-
-static int axi_clkgen_v1_mmcm_write(struct axi_clkgen *axi_clkgen,
- unsigned int reg, unsigned int val, unsigned int mask)
-{
- reg = axi_clkgen_v1_map_mmcm_reg(reg);
- if (reg == 0)
- return -EINVAL;
-
- axi_clkgen_write(axi_clkgen, reg, val);
-
- return 0;
-}
-
-static int axi_clkgen_v1_mmcm_read(struct axi_clkgen *axi_clkgen,
- unsigned int reg, unsigned int *val)
-{
- reg = axi_clkgen_v1_map_mmcm_reg(reg);
- if (reg == 0)
- return -EINVAL;
-
- axi_clkgen_read(axi_clkgen, reg, val);
-
- return 0;
-}
-
-static void axi_clkgen_v1_mmcm_enable(struct axi_clkgen *axi_clkgen,
- bool enable)
-{
- axi_clkgen_write(axi_clkgen, AXI_CLKGEN_V1_REG_UPDATE_ENABLE, enable);
-}
-
-static const struct axi_clkgen_mmcm_ops axi_clkgen_v1_mmcm_ops = {
- .write = axi_clkgen_v1_mmcm_write,
- .read = axi_clkgen_v1_mmcm_read,
- .enable = axi_clkgen_v1_mmcm_enable,
-};
-
static int axi_clkgen_wait_non_busy(struct axi_clkgen *axi_clkgen)
{
unsigned int timeout = 10000;
@@ -286,7 +182,7 @@ static int axi_clkgen_wait_non_busy(struct axi_clkgen *axi_clkgen)
return val & 0xffff;
}
-static int axi_clkgen_v2_mmcm_read(struct axi_clkgen *axi_clkgen,
+static int axi_clkgen_mmcm_read(struct axi_clkgen *axi_clkgen,
unsigned int reg, unsigned int *val)
{
unsigned int reg_val;
@@ -310,7 +206,7 @@ static int axi_clkgen_v2_mmcm_read(struct axi_clkgen *axi_clkgen,
return 0;
}
-static int axi_clkgen_v2_mmcm_write(struct axi_clkgen *axi_clkgen,
+static int axi_clkgen_mmcm_write(struct axi_clkgen *axi_clkgen,
unsigned int reg, unsigned int val, unsigned int mask)
{
unsigned int reg_val = 0;
@@ -321,7 +217,7 @@ static int axi_clkgen_v2_mmcm_write(struct axi_clkgen *axi_clkgen,
return ret;
if (mask != 0xffff) {
- axi_clkgen_v2_mmcm_read(axi_clkgen, reg, &reg_val);
+ axi_clkgen_mmcm_read(axi_clkgen, reg, &reg_val);
reg_val &= ~mask;
}
@@ -332,7 +228,7 @@ static int axi_clkgen_v2_mmcm_write(struct axi_clkgen *axi_clkgen,
return 0;
}
-static void axi_clkgen_v2_mmcm_enable(struct axi_clkgen *axi_clkgen,
+static void axi_clkgen_mmcm_enable(struct axi_clkgen *axi_clkgen,
bool enable)
{
unsigned int val = AXI_CLKGEN_V2_RESET_ENABLE;
@@ -343,12 +239,6 @@ static void axi_clkgen_v2_mmcm_enable(struct axi_clkgen *axi_clkgen,
axi_clkgen_write(axi_clkgen, AXI_CLKGEN_V2_REG_RESET, val);
}
-static const struct axi_clkgen_mmcm_ops axi_clkgen_v2_mmcm_ops = {
- .write = axi_clkgen_v2_mmcm_write,
- .read = axi_clkgen_v2_mmcm_read,
- .enable = axi_clkgen_v2_mmcm_enable,
-};
-
static struct axi_clkgen *clk_hw_to_axi_clkgen(struct clk_hw *clk_hw)
{
return container_of(clk_hw, struct axi_clkgen, clk_hw);
@@ -438,10 +328,7 @@ static unsigned long axi_clkgen_recalc_rate(struct clk_hw *clk_hw,
tmp = (unsigned long long)(parent_rate / d) * m;
do_div(tmp, dout);
- if (tmp > ULONG_MAX)
- return ULONG_MAX;
-
- return tmp;
+ return min_t(unsigned long long, tmp, ULONG_MAX);
}
static int axi_clkgen_enable(struct clk_hw *clk_hw)
@@ -460,21 +347,38 @@ static void axi_clkgen_disable(struct clk_hw *clk_hw)
axi_clkgen_mmcm_enable(axi_clkgen, false);
}
+static int axi_clkgen_set_parent(struct clk_hw *clk_hw, u8 index)
+{
+ struct axi_clkgen *axi_clkgen = clk_hw_to_axi_clkgen(clk_hw);
+
+ axi_clkgen_write(axi_clkgen, AXI_CLKGEN_V2_REG_CLKSEL, index);
+
+ return 0;
+}
+
+static u8 axi_clkgen_get_parent(struct clk_hw *clk_hw)
+{
+ struct axi_clkgen *axi_clkgen = clk_hw_to_axi_clkgen(clk_hw);
+ unsigned int parent;
+
+ axi_clkgen_read(axi_clkgen, AXI_CLKGEN_V2_REG_CLKSEL, &parent);
+
+ return parent;
+}
+
static const struct clk_ops axi_clkgen_ops = {
.recalc_rate = axi_clkgen_recalc_rate,
.round_rate = axi_clkgen_round_rate,
.set_rate = axi_clkgen_set_rate,
.enable = axi_clkgen_enable,
.disable = axi_clkgen_disable,
+ .set_parent = axi_clkgen_set_parent,
+ .get_parent = axi_clkgen_get_parent,
};
static const struct of_device_id axi_clkgen_ids[] = {
{
- .compatible = "adi,axi-clkgen-1.00.a",
- .data = &axi_clkgen_v1_mmcm_ops
- }, {
.compatible = "adi,axi-clkgen-2.00.a",
- .data = &axi_clkgen_v2_mmcm_ops,
},
{ },
};
@@ -485,10 +389,11 @@ static int axi_clkgen_probe(struct platform_device *pdev)
const struct of_device_id *id;
struct axi_clkgen *axi_clkgen;
struct clk_init_data init;
- const char *parent_name;
+ const char *parent_names[2];
const char *clk_name;
struct resource *mem;
struct clk *clk;
+ unsigned int i;
if (!pdev->dev.of_node)
return -ENODEV;
@@ -501,26 +406,29 @@ static int axi_clkgen_probe(struct platform_device *pdev)
if (!axi_clkgen)
return -ENOMEM;
- axi_clkgen->mmcm_ops = id->data;
-
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
axi_clkgen->base = devm_ioremap_resource(&pdev->dev, mem);
if (IS_ERR(axi_clkgen->base))
return PTR_ERR(axi_clkgen->base);
- parent_name = of_clk_get_parent_name(pdev->dev.of_node, 0);
- if (!parent_name)
+ init.num_parents = of_clk_get_parent_count(pdev->dev.of_node);
+ if (init.num_parents < 1 || init.num_parents > 2)
return -EINVAL;
+ for (i = 0; i < init.num_parents; i++) {
+ parent_names[i] = of_clk_get_parent_name(pdev->dev.of_node, i);
+ if (!parent_names[i])
+ return -EINVAL;
+ }
+
clk_name = pdev->dev.of_node->name;
of_property_read_string(pdev->dev.of_node, "clock-output-names",
&clk_name);
init.name = clk_name;
init.ops = &axi_clkgen_ops;
- init.flags = CLK_SET_RATE_GATE;
- init.parent_names = &parent_name;
- init.num_parents = 1;
+ init.flags = CLK_SET_RATE_GATE | CLK_SET_PARENT_GATE;
+ init.parent_names = parent_names;
axi_clkgen_mmcm_enable(axi_clkgen, false);
diff --git a/drivers/clk/clk-composite.c b/drivers/clk/clk-composite.c
index 4735de0660cc..1f903e1f86a2 100644
--- a/drivers/clk/clk-composite.c
+++ b/drivers/clk/clk-composite.c
@@ -19,8 +19,6 @@
#include <linux/err.h>
#include <linux/slab.h>
-#define to_clk_composite(_hw) container_of(_hw, struct clk_composite, hw)
-
static u8 clk_composite_get_parent(struct clk_hw *hw)
{
struct clk_composite *composite = to_clk_composite(hw);
diff --git a/drivers/clk/clk-divider.c b/drivers/clk/clk-divider.c
index ded3ff4b91b9..00e035b51c69 100644
--- a/drivers/clk/clk-divider.c
+++ b/drivers/clk/clk-divider.c
@@ -28,8 +28,6 @@
* parent - fixed parent. No clk_set_parent support
*/
-#define to_clk_divider(_hw) container_of(_hw, struct clk_divider, hw)
-
#define div_mask(width) ((1 << (width)) - 1)
static unsigned int _get_table_maxdiv(const struct clk_div_table *table,
@@ -305,9 +303,8 @@ static int clk_divider_bestdiv(struct clk_hw *hw, unsigned long rate,
*/
maxdiv = min(ULONG_MAX / rate, maxdiv);
- for (i = 1; i <= maxdiv; i = _next_div(table, i, flags)) {
- if (!_is_valid_div(table, i, flags))
- continue;
+ for (i = _next_div(table, 0, flags); i <= maxdiv;
+ i = _next_div(table, i, flags)) {
if (rate * i == parent_rate_saved) {
/*
* It's the most ideal case if the requested rate can be
@@ -423,6 +420,12 @@ const struct clk_ops clk_divider_ops = {
};
EXPORT_SYMBOL_GPL(clk_divider_ops);
+const struct clk_ops clk_divider_ro_ops = {
+ .recalc_rate = clk_divider_recalc_rate,
+ .round_rate = clk_divider_round_rate,
+};
+EXPORT_SYMBOL_GPL(clk_divider_ro_ops);
+
static struct clk *_register_divider(struct device *dev, const char *name,
const char *parent_name, unsigned long flags,
void __iomem *reg, u8 shift, u8 width,
@@ -446,7 +449,10 @@ static struct clk *_register_divider(struct device *dev, const char *name,
return ERR_PTR(-ENOMEM);
init.name = name;
- init.ops = &clk_divider_ops;
+ if (clk_divider_flags & CLK_DIVIDER_READ_ONLY)
+ init.ops = &clk_divider_ro_ops;
+ else
+ init.ops = &clk_divider_ops;
init.flags = flags | CLK_IS_BASIC;
init.parent_names = (parent_name ? &parent_name: NULL);
init.num_parents = (parent_name ? 1 : 0);
diff --git a/drivers/clk/clk-efm32gg.c b/drivers/clk/clk-efm32gg.c
index bac4553f04b8..22e4c659704e 100644
--- a/drivers/clk/clk-efm32gg.c
+++ b/drivers/clk/clk-efm32gg.c
@@ -36,7 +36,7 @@ static void __init efm32gg_cmu_init(struct device_node *np)
}
clk[clk_HFXO] = clk_register_fixed_rate(NULL, "HFXO", NULL,
- CLK_IS_ROOT, 48000000);
+ 0, 48000000);
clk[clk_HFPERCLKUSART0] = clk_register_gate(NULL, "HFPERCLK.USART0",
"HFXO", 0, base + CMU_HFPERCLKEN0, 0, 0, NULL);
diff --git a/drivers/clk/clk-fixed-factor.c b/drivers/clk/clk-fixed-factor.c
index 83de57aeceea..053448e2453d 100644
--- a/drivers/clk/clk-fixed-factor.c
+++ b/drivers/clk/clk-fixed-factor.c
@@ -23,8 +23,6 @@
* parent - fixed parent. No clk_set_parent support
*/
-#define to_clk_fixed_factor(_hw) container_of(_hw, struct clk_fixed_factor, hw)
-
static unsigned long clk_factor_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
@@ -102,6 +100,19 @@ struct clk *clk_register_fixed_factor(struct device *dev, const char *name,
}
EXPORT_SYMBOL_GPL(clk_register_fixed_factor);
+void clk_unregister_fixed_factor(struct clk *clk)
+{
+ struct clk_hw *hw;
+
+ hw = __clk_get_hw(clk);
+ if (!hw)
+ return;
+
+ clk_unregister(clk);
+ kfree(to_clk_fixed_factor(hw));
+}
+EXPORT_SYMBOL_GPL(clk_unregister_fixed_factor);
+
#ifdef CONFIG_OF
/**
* of_fixed_factor_clk_setup() - Setup function for simple fixed factor clock
diff --git a/drivers/clk/clk-fixed-rate.c b/drivers/clk/clk-fixed-rate.c
index f85ec8d1711f..cd9dc925b3f8 100644
--- a/drivers/clk/clk-fixed-rate.c
+++ b/drivers/clk/clk-fixed-rate.c
@@ -26,8 +26,6 @@
* parent - fixed parent. No clk_set_parent support
*/
-#define to_clk_fixed_rate(_hw) container_of(_hw, struct clk_fixed_rate, hw)
-
static unsigned long clk_fixed_rate_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
@@ -106,6 +104,19 @@ struct clk *clk_register_fixed_rate(struct device *dev, const char *name,
}
EXPORT_SYMBOL_GPL(clk_register_fixed_rate);
+void clk_unregister_fixed_rate(struct clk *clk)
+{
+ struct clk_hw *hw;
+
+ hw = __clk_get_hw(clk);
+ if (!hw)
+ return;
+
+ clk_unregister(clk);
+ kfree(to_clk_fixed_rate(hw));
+}
+EXPORT_SYMBOL_GPL(clk_unregister_fixed_rate);
+
#ifdef CONFIG_OF
/**
* of_fixed_clk_setup() - Setup function for simple fixed rate clock
@@ -125,8 +136,7 @@ void of_fixed_clk_setup(struct device_node *node)
of_property_read_string(node, "clock-output-names", &clk_name);
clk = clk_register_fixed_rate_with_accuracy(NULL, clk_name, NULL,
- CLK_IS_ROOT, rate,
- accuracy);
+ 0, rate, accuracy);
if (!IS_ERR(clk))
of_clk_add_provider(node, of_clk_src_simple_get, clk);
}
diff --git a/drivers/clk/clk-fractional-divider.c b/drivers/clk/clk-fractional-divider.c
index 5c4955e33f7a..1abcd76b4993 100644
--- a/drivers/clk/clk-fractional-divider.c
+++ b/drivers/clk/clk-fractional-divider.c
@@ -16,8 +16,6 @@
#include <linux/slab.h>
#include <linux/rational.h>
-#define to_clk_fd(_hw) container_of(_hw, struct clk_fractional_divider, hw)
-
static unsigned long clk_fd_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
diff --git a/drivers/clk/clk-gate.c b/drivers/clk/clk-gate.c
index de0b322f5f58..d0d8ec8e1f1b 100644
--- a/drivers/clk/clk-gate.c
+++ b/drivers/clk/clk-gate.c
@@ -26,8 +26,6 @@
* parent - fixed parent. No clk_set_parent support
*/
-#define to_clk_gate(_hw) container_of(_hw, struct clk_gate, hw)
-
/*
* It works on following logic:
*
diff --git a/drivers/clk/clk-gpio.c b/drivers/clk/clk-gpio.c
index 7b09a265d79f..08f65acc5d57 100644
--- a/drivers/clk/clk-gpio.c
+++ b/drivers/clk/clk-gpio.c
@@ -20,6 +20,8 @@
#include <linux/of_gpio.h>
#include <linux/err.h>
#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/of_device.h>
/**
* DOC: basic gpio gated clock which can be enabled and disabled
@@ -31,8 +33,6 @@
* parent - fixed parent. No clk_set_parent support
*/
-#define to_clk_gpio(_hw) container_of(_hw, struct clk_gpio, hw)
-
static int clk_gpio_gate_enable(struct clk_hw *hw)
{
struct clk_gpio *clk = to_clk_gpio(hw);
@@ -201,134 +201,69 @@ struct clk *clk_register_gpio_mux(struct device *dev, const char *name,
}
EXPORT_SYMBOL_GPL(clk_register_gpio_mux);
-#ifdef CONFIG_OF
-/**
- * clk_register_get() has to be delayed, because -EPROBE_DEFER
- * can not be handled properly at of_clk_init() call time.
- */
-
-struct clk_gpio_delayed_register_data {
- const char *gpio_name;
- int num_parents;
- const char **parent_names;
- struct device_node *node;
- struct mutex lock;
- struct clk *clk;
- struct clk *(*clk_register_get)(const char *name,
- const char * const *parent_names, u8 num_parents,
- unsigned gpio, bool active_low);
-};
-
-static struct clk *of_clk_gpio_delayed_register_get(
- struct of_phandle_args *clkspec, void *_data)
+static int gpio_clk_driver_probe(struct platform_device *pdev)
{
- struct clk_gpio_delayed_register_data *data = _data;
- struct clk *clk;
+ struct device_node *node = pdev->dev.of_node;
+ const char **parent_names, *gpio_name;
+ unsigned int num_parents;
int gpio;
enum of_gpio_flags of_flags;
+ struct clk *clk;
+ bool active_low, is_mux;
- mutex_lock(&data->lock);
+ num_parents = of_clk_get_parent_count(node);
+ if (num_parents) {
+ parent_names = devm_kcalloc(&pdev->dev, num_parents,
+ sizeof(char *), GFP_KERNEL);
+ if (!parent_names)
+ return -ENOMEM;
- if (data->clk) {
- mutex_unlock(&data->lock);
- return data->clk;
+ of_clk_parent_fill(node, parent_names, num_parents);
+ } else {
+ parent_names = NULL;
}
- gpio = of_get_named_gpio_flags(data->node, data->gpio_name, 0,
- &of_flags);
+ is_mux = of_device_is_compatible(node, "gpio-mux-clock");
+
+ gpio_name = is_mux ? "select-gpios" : "enable-gpios";
+ gpio = of_get_named_gpio_flags(node, gpio_name, 0, &of_flags);
if (gpio < 0) {
- mutex_unlock(&data->lock);
if (gpio == -EPROBE_DEFER)
pr_debug("%s: %s: GPIOs not yet available, retry later\n",
- data->node->name, __func__);
+ node->name, __func__);
else
pr_err("%s: %s: Can't get '%s' DT property\n",
- data->node->name, __func__,
- data->gpio_name);
- return ERR_PTR(gpio);
+ node->name, __func__,
+ gpio_name);
+ return gpio;
}
- clk = data->clk_register_get(data->node->name, data->parent_names,
- data->num_parents, gpio, of_flags & OF_GPIO_ACTIVE_LOW);
- if (IS_ERR(clk))
- goto out;
-
- data->clk = clk;
-out:
- mutex_unlock(&data->lock);
-
- return clk;
-}
-
-static struct clk *of_clk_gpio_gate_delayed_register_get(const char *name,
- const char * const *parent_names, u8 num_parents,
- unsigned gpio, bool active_low)
-{
- return clk_register_gpio_gate(NULL, name, parent_names ?
- parent_names[0] : NULL, gpio, active_low, 0);
-}
-
-static struct clk *of_clk_gpio_mux_delayed_register_get(const char *name,
- const char * const *parent_names, u8 num_parents, unsigned gpio,
- bool active_low)
-{
- return clk_register_gpio_mux(NULL, name, parent_names, num_parents,
- gpio, active_low, 0);
-}
-
-static void __init of_gpio_clk_setup(struct device_node *node,
- const char *gpio_name,
- struct clk *(*clk_register_get)(const char *name,
- const char * const *parent_names,
- u8 num_parents,
- unsigned gpio, bool active_low))
-{
- struct clk_gpio_delayed_register_data *data;
- const char **parent_names;
- int i, num_parents;
-
- num_parents = of_clk_get_parent_count(node);
- if (num_parents < 0)
- num_parents = 0;
-
- data = kzalloc(sizeof(*data), GFP_KERNEL);
- if (!data)
- return;
-
- if (num_parents) {
- parent_names = kcalloc(num_parents, sizeof(char *), GFP_KERNEL);
- if (!parent_names) {
- kfree(data);
- return;
- }
-
- for (i = 0; i < num_parents; i++)
- parent_names[i] = of_clk_get_parent_name(node, i);
- } else {
- parent_names = NULL;
- }
+ active_low = of_flags & OF_GPIO_ACTIVE_LOW;
- data->num_parents = num_parents;
- data->parent_names = parent_names;
- data->node = node;
- data->gpio_name = gpio_name;
- data->clk_register_get = clk_register_get;
- mutex_init(&data->lock);
+ if (is_mux)
+ clk = clk_register_gpio_mux(&pdev->dev, node->name,
+ parent_names, num_parents, gpio, active_low, 0);
+ else
+ clk = clk_register_gpio_gate(&pdev->dev, node->name,
+ parent_names ? parent_names[0] : NULL, gpio,
+ active_low, 0);
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
- of_clk_add_provider(node, of_clk_gpio_delayed_register_get, data);
+ return of_clk_add_provider(node, of_clk_src_simple_get, clk);
}
-static void __init of_gpio_gate_clk_setup(struct device_node *node)
-{
- of_gpio_clk_setup(node, "enable-gpios",
- of_clk_gpio_gate_delayed_register_get);
-}
-CLK_OF_DECLARE(gpio_gate_clk, "gpio-gate-clock", of_gpio_gate_clk_setup);
+static const struct of_device_id gpio_clk_match_table[] = {
+ { .compatible = "gpio-mux-clock" },
+ { .compatible = "gpio-gate-clock" },
+ { }
+};
-void __init of_gpio_mux_clk_setup(struct device_node *node)
-{
- of_gpio_clk_setup(node, "select-gpios",
- of_clk_gpio_mux_delayed_register_get);
-}
-CLK_OF_DECLARE(gpio_mux_clk, "gpio-mux-clock", of_gpio_mux_clk_setup);
-#endif
+static struct platform_driver gpio_clk_driver = {
+ .probe = gpio_clk_driver_probe,
+ .driver = {
+ .name = "gpio-clk",
+ .of_match_table = gpio_clk_match_table,
+ },
+};
+builtin_platform_driver(gpio_clk_driver);
diff --git a/drivers/clk/clk-max77686.c b/drivers/clk/clk-max77686.c
index 446c2fe76dc2..9b6f2772e948 100644
--- a/drivers/clk/clk-max77686.c
+++ b/drivers/clk/clk-max77686.c
@@ -38,17 +38,14 @@ static struct clk_init_data max77686_clks_init[MAX77686_CLKS_NUM] = {
[MAX77686_CLK_AP] = {
.name = "32khz_ap",
.ops = &max_gen_clk_ops,
- .flags = CLK_IS_ROOT,
},
[MAX77686_CLK_CP] = {
.name = "32khz_cp",
.ops = &max_gen_clk_ops,
- .flags = CLK_IS_ROOT,
},
[MAX77686_CLK_PMIC] = {
.name = "32khz_pmic",
.ops = &max_gen_clk_ops,
- .flags = CLK_IS_ROOT,
},
};
diff --git a/drivers/clk/clk-max77802.c b/drivers/clk/clk-max77802.c
index 4a89f7979ba0..355dd2e522c3 100644
--- a/drivers/clk/clk-max77802.c
+++ b/drivers/clk/clk-max77802.c
@@ -39,12 +39,10 @@ static struct clk_init_data max77802_clks_init[MAX77802_CLKS_NUM] = {
[MAX77802_CLK_32K_AP] = {
.name = "32khz_ap",
.ops = &max_gen_clk_ops,
- .flags = CLK_IS_ROOT,
},
[MAX77802_CLK_32K_CP] = {
.name = "32khz_cp",
.ops = &max_gen_clk_ops,
- .flags = CLK_IS_ROOT,
},
};
diff --git a/drivers/clk/clk-mb86s7x.c b/drivers/clk/clk-mb86s7x.c
index f39c25a22f43..e0817754ca3e 100644
--- a/drivers/clk/clk-mb86s7x.c
+++ b/drivers/clk/clk-mb86s7x.c
@@ -217,7 +217,7 @@ static struct clk *crg11_get(struct of_phandle_args *clkspec, void *data)
init.name = clkp;
init.num_parents = 0;
init.ops = &crg_port_ops;
- init.flags = CLK_IS_ROOT;
+ init.flags = 0;
crgclk->hw.init = &init;
crgclk->cntrlr = cntrlr;
crgclk->domain = domain;
@@ -341,7 +341,7 @@ struct clk *mb86s7x_clclk_register(struct device *cpu_dev)
init.name = dev_name(cpu_dev);
init.ops = &clk_clc_ops;
- init.flags = CLK_IS_ROOT | CLK_GET_RATE_NOCACHE;
+ init.flags = CLK_GET_RATE_NOCACHE;
init.num_parents = 0;
return devm_clk_register(cpu_dev, &clc->hw);
diff --git a/drivers/clk/clk-multiplier.c b/drivers/clk/clk-multiplier.c
index fe7806506bf3..9e449c7b751c 100644
--- a/drivers/clk/clk-multiplier.c
+++ b/drivers/clk/clk-multiplier.c
@@ -14,8 +14,6 @@
#include <linux/of.h>
#include <linux/slab.h>
-#define to_clk_multiplier(_hw) container_of(_hw, struct clk_multiplier, hw)
-
static unsigned long __get_mult(struct clk_multiplier *mult,
unsigned long rate,
unsigned long parent_rate)
diff --git a/drivers/clk/clk-mux.c b/drivers/clk/clk-mux.c
index 5ed03c8a8df9..252188fd8bcd 100644
--- a/drivers/clk/clk-mux.c
+++ b/drivers/clk/clk-mux.c
@@ -26,8 +26,6 @@
* parent - parent is adjustable through clk_set_parent
*/
-#define to_clk_mux(_hw) container_of(_hw, struct clk_mux, hw)
-
static u8 clk_mux_get_parent(struct clk_hw *hw)
{
struct clk_mux *mux = to_clk_mux(hw);
diff --git a/drivers/clk/clk-palmas.c b/drivers/clk/clk-palmas.c
index 8e3039f0c3f9..9c0b8e6b1ab3 100644
--- a/drivers/clk/clk-palmas.c
+++ b/drivers/clk/clk-palmas.c
@@ -44,7 +44,7 @@ struct palmas_clock_info {
struct clk *clk;
struct clk_hw hw;
struct palmas *palmas;
- struct palmas_clk32k_desc *clk_desc;
+ const struct palmas_clk32k_desc *clk_desc;
int ext_control_pin;
};
@@ -125,10 +125,10 @@ static struct clk_ops palmas_clks_ops = {
struct palmas_clks_of_match_data {
struct clk_init_data init;
- struct palmas_clk32k_desc desc;
+ const struct palmas_clk32k_desc desc;
};
-static struct palmas_clks_of_match_data palmas_of_clk32kg = {
+static const struct palmas_clks_of_match_data palmas_of_clk32kg = {
.init = {
.name = "clk32kg",
.ops = &palmas_clks_ops,
@@ -144,7 +144,7 @@ static struct palmas_clks_of_match_data palmas_of_clk32kg = {
},
};
-static struct palmas_clks_of_match_data palmas_of_clk32kgaudio = {
+static const struct palmas_clks_of_match_data palmas_of_clk32kgaudio = {
.init = {
.name = "clk32kgaudio",
.ops = &palmas_clks_ops,
@@ -240,14 +240,14 @@ static int palmas_clks_probe(struct platform_device *pdev)
{
struct palmas *palmas = dev_get_drvdata(pdev->dev.parent);
struct device_node *node = pdev->dev.of_node;
- struct palmas_clks_of_match_data *match_data;
- const struct of_device_id *match;
+ const struct palmas_clks_of_match_data *match_data;
struct palmas_clock_info *cinfo;
struct clk *clk;
int ret;
- match = of_match_device(palmas_clks_of_match, &pdev->dev);
- match_data = (struct palmas_clks_of_match_data *)match->data;
+ match_data = of_device_get_match_data(&pdev->dev);
+ if (!match_data)
+ return 1;
cinfo = devm_kzalloc(&pdev->dev, sizeof(*cinfo), GFP_KERNEL);
if (!cinfo)
diff --git a/drivers/clk/clk-pwm.c b/drivers/clk/clk-pwm.c
index 328fcfcefd8c..883045814dac 100644
--- a/drivers/clk/clk-pwm.c
+++ b/drivers/clk/clk-pwm.c
@@ -95,7 +95,7 @@ static int clk_pwm_probe(struct platform_device *pdev)
init.name = clk_name;
init.ops = &clk_pwm_ops;
- init.flags = CLK_IS_BASIC | CLK_IS_ROOT;
+ init.flags = CLK_IS_BASIC;
init.num_parents = 0;
clk_pwm->pwm = pwm;
diff --git a/drivers/clk/clk-s2mps11.c b/drivers/clk/clk-s2mps11.c
index d266299dfdb1..f8c83977c7fa 100644
--- a/drivers/clk/clk-s2mps11.c
+++ b/drivers/clk/clk-s2mps11.c
@@ -28,11 +28,6 @@
#include <linux/mfd/samsung/s5m8767.h>
#include <linux/mfd/samsung/core.h>
-#define s2mps11_name(a) (a->hw.init->name)
-
-static struct clk **clk_table;
-static struct clk_onecell_data clk_data;
-
enum {
S2MPS11_CLK_AP = 0,
S2MPS11_CLK_CP,
@@ -99,52 +94,19 @@ static struct clk_ops s2mps11_clk_ops = {
.recalc_rate = s2mps11_clk_recalc_rate,
};
+/* This s2mps11_clks_init tructure is common to s2mps11, s2mps13 and s2mps14 */
static struct clk_init_data s2mps11_clks_init[S2MPS11_CLKS_NUM] = {
[S2MPS11_CLK_AP] = {
.name = "s2mps11_ap",
.ops = &s2mps11_clk_ops,
- .flags = CLK_IS_ROOT,
},
[S2MPS11_CLK_CP] = {
.name = "s2mps11_cp",
.ops = &s2mps11_clk_ops,
- .flags = CLK_IS_ROOT,
},
[S2MPS11_CLK_BT] = {
.name = "s2mps11_bt",
.ops = &s2mps11_clk_ops,
- .flags = CLK_IS_ROOT,
- },
-};
-
-static struct clk_init_data s2mps13_clks_init[S2MPS11_CLKS_NUM] = {
- [S2MPS11_CLK_AP] = {
- .name = "s2mps13_ap",
- .ops = &s2mps11_clk_ops,
- .flags = CLK_IS_ROOT,
- },
- [S2MPS11_CLK_CP] = {
- .name = "s2mps13_cp",
- .ops = &s2mps11_clk_ops,
- .flags = CLK_IS_ROOT,
- },
- [S2MPS11_CLK_BT] = {
- .name = "s2mps13_bt",
- .ops = &s2mps11_clk_ops,
- .flags = CLK_IS_ROOT,
- },
-};
-
-static struct clk_init_data s2mps14_clks_init[S2MPS11_CLKS_NUM] = {
- [S2MPS11_CLK_AP] = {
- .name = "s2mps14_ap",
- .ops = &s2mps11_clk_ops,
- .flags = CLK_IS_ROOT,
- },
- [S2MPS11_CLK_BT] = {
- .name = "s2mps14_bt",
- .ops = &s2mps11_clk_ops,
- .flags = CLK_IS_ROOT,
},
};
@@ -164,12 +126,9 @@ static struct device_node *s2mps11_clk_parse_dt(struct platform_device *pdev,
return ERR_PTR(-EINVAL);
}
- for (i = 0; i < S2MPS11_CLKS_NUM; i++) {
- if (!clks_init[i].name)
- continue; /* Skip clocks not present in some devices */
+ for (i = 0; i < S2MPS11_CLKS_NUM; i++)
of_property_read_string_index(clk_np, "clock-output-names", i,
&clks_init[i].name);
- }
return clk_np;
}
@@ -177,39 +136,38 @@ static struct device_node *s2mps11_clk_parse_dt(struct platform_device *pdev,
static int s2mps11_clk_probe(struct platform_device *pdev)
{
struct sec_pmic_dev *iodev = dev_get_drvdata(pdev->dev.parent);
- struct s2mps11_clk *s2mps11_clks, *s2mps11_clk;
+ struct s2mps11_clk *s2mps11_clks;
+ struct clk_onecell_data *clk_data;
unsigned int s2mps11_reg;
- struct clk_init_data *clks_init;
int i, ret = 0;
+ enum sec_device_type hwid = platform_get_device_id(pdev)->driver_data;
s2mps11_clks = devm_kcalloc(&pdev->dev, S2MPS11_CLKS_NUM,
- sizeof(*s2mps11_clk), GFP_KERNEL);
+ sizeof(*s2mps11_clks), GFP_KERNEL);
if (!s2mps11_clks)
return -ENOMEM;
- s2mps11_clk = s2mps11_clks;
+ clk_data = devm_kzalloc(&pdev->dev, sizeof(*clk_data), GFP_KERNEL);
+ if (!clk_data)
+ return -ENOMEM;
- clk_table = devm_kcalloc(&pdev->dev, S2MPS11_CLKS_NUM,
+ clk_data->clks = devm_kcalloc(&pdev->dev, S2MPS11_CLKS_NUM,
sizeof(struct clk *), GFP_KERNEL);
- if (!clk_table)
+ if (!clk_data->clks)
return -ENOMEM;
- switch(platform_get_device_id(pdev)->driver_data) {
+ switch (hwid) {
case S2MPS11X:
s2mps11_reg = S2MPS11_REG_RTC_CTRL;
- clks_init = s2mps11_clks_init;
break;
case S2MPS13X:
s2mps11_reg = S2MPS13_REG_RTCCTRL;
- clks_init = s2mps13_clks_init;
break;
case S2MPS14X:
s2mps11_reg = S2MPS14_REG_RTCCTRL;
- clks_init = s2mps14_clks_init;
break;
case S5M8767X:
s2mps11_reg = S5M8767_REG_CTRL1;
- clks_init = s2mps11_clks_init;
break;
default:
dev_err(&pdev->dev, "Invalid device type\n");
@@ -217,46 +175,39 @@ static int s2mps11_clk_probe(struct platform_device *pdev)
}
/* Store clocks of_node in first element of s2mps11_clks array */
- s2mps11_clks->clk_np = s2mps11_clk_parse_dt(pdev, clks_init);
+ s2mps11_clks->clk_np = s2mps11_clk_parse_dt(pdev, s2mps11_clks_init);
if (IS_ERR(s2mps11_clks->clk_np))
return PTR_ERR(s2mps11_clks->clk_np);
- for (i = 0; i < S2MPS11_CLKS_NUM; i++, s2mps11_clk++) {
- if (!clks_init[i].name)
+ for (i = 0; i < S2MPS11_CLKS_NUM; i++) {
+ if (i == S2MPS11_CLK_CP && hwid == S2MPS14X)
continue; /* Skip clocks not present in some devices */
- s2mps11_clk->iodev = iodev;
- s2mps11_clk->hw.init = &clks_init[i];
- s2mps11_clk->mask = 1 << i;
- s2mps11_clk->reg = s2mps11_reg;
-
- s2mps11_clk->clk = devm_clk_register(&pdev->dev,
- &s2mps11_clk->hw);
- if (IS_ERR(s2mps11_clk->clk)) {
+ s2mps11_clks[i].iodev = iodev;
+ s2mps11_clks[i].hw.init = &s2mps11_clks_init[i];
+ s2mps11_clks[i].mask = 1 << i;
+ s2mps11_clks[i].reg = s2mps11_reg;
+
+ s2mps11_clks[i].clk = devm_clk_register(&pdev->dev,
+ &s2mps11_clks[i].hw);
+ if (IS_ERR(s2mps11_clks[i].clk)) {
dev_err(&pdev->dev, "Fail to register : %s\n",
- s2mps11_name(s2mps11_clk));
- ret = PTR_ERR(s2mps11_clk->clk);
+ s2mps11_clks_init[i].name);
+ ret = PTR_ERR(s2mps11_clks[i].clk);
goto err_reg;
}
- s2mps11_clk->lookup = clkdev_create(s2mps11_clk->clk,
- s2mps11_name(s2mps11_clk), NULL);
- if (!s2mps11_clk->lookup) {
+ s2mps11_clks[i].lookup = clkdev_create(s2mps11_clks[i].clk,
+ s2mps11_clks_init[i].name, NULL);
+ if (!s2mps11_clks[i].lookup) {
ret = -ENOMEM;
goto err_reg;
}
+ clk_data->clks[i] = s2mps11_clks[i].clk;
}
- for (i = 0; i < S2MPS11_CLKS_NUM; i++) {
- /* Skip clocks not present on S2MPS14 */
- if (!clks_init[i].name)
- continue;
- clk_table[i] = s2mps11_clks[i].clk;
- }
-
- clk_data.clks = clk_table;
- clk_data.clk_num = S2MPS11_CLKS_NUM;
+ clk_data->clk_num = S2MPS11_CLKS_NUM;
of_clk_add_provider(s2mps11_clks->clk_np, of_clk_src_onecell_get,
- &clk_data);
+ clk_data);
platform_set_drvdata(pdev, s2mps11_clks);
diff --git a/drivers/clk/clk-scpi.c b/drivers/clk/clk-scpi.c
index 89e9ca78bb94..6962ee5d1e9a 100644
--- a/drivers/clk/clk-scpi.c
+++ b/drivers/clk/clk-scpi.c
@@ -155,7 +155,7 @@ scpi_clk_ops_init(struct device *dev, const struct of_device_id *match,
unsigned long min = 0, max = 0;
init.name = name;
- init.flags = CLK_IS_ROOT;
+ init.flags = 0;
init.num_parents = 0;
init.ops = match->data;
sclk->hw.init = &init;
diff --git a/drivers/clk/clk-si514.c b/drivers/clk/clk-si514.c
index 6af7dce54241..ceef25b0990b 100644
--- a/drivers/clk/clk-si514.c
+++ b/drivers/clk/clk-si514.c
@@ -313,7 +313,7 @@ static int si514_probe(struct i2c_client *client,
return -ENOMEM;
init.ops = &si514_clk_ops;
- init.flags = CLK_IS_ROOT;
+ init.flags = 0;
init.num_parents = 0;
data->hw.init = &init;
data->i2c_client = client;
diff --git a/drivers/clk/clk-si5351.c b/drivers/clk/clk-si5351.c
index 850316ac8831..b1bc12c045d3 100644
--- a/drivers/clk/clk-si5351.c
+++ b/drivers/clk/clk-si5351.c
@@ -1495,7 +1495,7 @@ static int si5351_i2c_probe(struct i2c_client *client,
if (drvdata->variant == SI5351_VARIANT_B) {
init.name = si5351_pll_names[2];
init.ops = &si5351_vxco_ops;
- init.flags = CLK_IS_ROOT;
+ init.flags = 0;
init.parent_names = NULL;
init.num_parents = 0;
} else {
diff --git a/drivers/clk/clk-si570.c b/drivers/clk/clk-si570.c
index cf478aa9fa5d..d56648521a95 100644
--- a/drivers/clk/clk-si570.c
+++ b/drivers/clk/clk-si570.c
@@ -418,7 +418,7 @@ static int si570_probe(struct i2c_client *client,
return -ENOMEM;
init.ops = &si570_clk_ops;
- init.flags = CLK_IS_ROOT;
+ init.flags = 0;
init.num_parents = 0;
data->hw.init = &init;
data->i2c_client = client;
diff --git a/drivers/clk/clk-vt8500.c b/drivers/clk/clk-vt8500.c
index 37e928846ec5..b0f76a84f1e9 100644
--- a/drivers/clk/clk-vt8500.c
+++ b/drivers/clk/clk-vt8500.c
@@ -355,7 +355,7 @@ CLK_OF_DECLARE(vt8500_device, "via,vt8500-device-clock", vtwm_device_clk_init);
#define WM8850_BITS_TO_VAL(m, d1, d2) \
((((m / 2) - 1) << 16) | ((d1 - 1) << 8) | d2)
-static void vt8500_find_pll_bits(unsigned long rate, unsigned long parent_rate,
+static int vt8500_find_pll_bits(unsigned long rate, unsigned long parent_rate,
u32 *multiplier, u32 *prediv)
{
unsigned long tclk;
@@ -365,7 +365,7 @@ static void vt8500_find_pll_bits(unsigned long rate, unsigned long parent_rate,
pr_err("%s: requested rate out of range\n", __func__);
*multiplier = 0;
*prediv = 1;
- return;
+ return -EINVAL;
}
if (rate <= parent_rate * 31)
/* use the prediv to double the resolution */
@@ -379,12 +379,15 @@ static void vt8500_find_pll_bits(unsigned long rate, unsigned long parent_rate,
if (tclk != rate)
pr_warn("%s: requested rate %lu, found rate %lu\n", __func__,
rate, tclk);
+
+ return 0;
}
-static void wm8650_find_pll_bits(unsigned long rate, unsigned long parent_rate,
+static int wm8650_find_pll_bits(unsigned long rate, unsigned long parent_rate,
u32 *multiplier, u32 *divisor1, u32 *divisor2)
{
- u32 mul, div1, div2;
+ u32 mul, div1;
+ int div2;
u32 best_mul, best_div1, best_div2;
unsigned long tclk, rate_err, best_err;
@@ -403,7 +406,7 @@ static void wm8650_find_pll_bits(unsigned long rate, unsigned long parent_rate,
*multiplier = mul;
*divisor1 = div1;
*divisor2 = div2;
- return;
+ return 0;
}
if (rate_err < best_err) {
@@ -414,12 +417,19 @@ static void wm8650_find_pll_bits(unsigned long rate, unsigned long parent_rate,
}
}
+ if (best_err == (unsigned long)-1) {
+ pr_warn("%s: impossible rate %lu\n", __func__, rate);
+ return -EINVAL;
+ }
+
/* if we got here, it wasn't an exact match */
pr_warn("%s: requested rate %lu, found rate %lu\n", __func__, rate,
rate - best_err);
*multiplier = best_mul;
*divisor1 = best_div1;
*divisor2 = best_div2;
+
+ return 0;
}
static u32 wm8750_get_filter(u32 parent_rate, u32 divisor1)
@@ -449,10 +459,11 @@ static u32 wm8750_get_filter(u32 parent_rate, u32 divisor1)
return 0;
}
-static void wm8750_find_pll_bits(unsigned long rate, unsigned long parent_rate,
+static int wm8750_find_pll_bits(unsigned long rate, unsigned long parent_rate,
u32 *filter, u32 *multiplier, u32 *divisor1, u32 *divisor2)
{
- u32 mul, div1, div2;
+ u32 mul;
+ int div1, div2;
u32 best_mul, best_div1, best_div2;
unsigned long tclk, rate_err, best_err;
@@ -472,7 +483,7 @@ static void wm8750_find_pll_bits(unsigned long rate, unsigned long parent_rate,
*multiplier = mul;
*divisor1 = div1;
*divisor2 = div2;
- return;
+ return 0;
}
if (rate_err < best_err) {
@@ -483,6 +494,11 @@ static void wm8750_find_pll_bits(unsigned long rate, unsigned long parent_rate,
}
}
+ if (best_err == (unsigned long)-1) {
+ pr_warn("%s: impossible rate %lu\n", __func__, rate);
+ return -EINVAL;
+ }
+
/* if we got here, it wasn't an exact match */
pr_warn("%s: requested rate %lu, found rate %lu\n", __func__, rate,
rate - best_err);
@@ -491,12 +507,15 @@ static void wm8750_find_pll_bits(unsigned long rate, unsigned long parent_rate,
*multiplier = best_mul;
*divisor1 = best_div1;
*divisor2 = best_div2;
+
+ return 0;
}
-static void wm8850_find_pll_bits(unsigned long rate, unsigned long parent_rate,
+static int wm8850_find_pll_bits(unsigned long rate, unsigned long parent_rate,
u32 *multiplier, u32 *divisor1, u32 *divisor2)
{
- u32 mul, div1, div2;
+ u32 mul;
+ int div1, div2;
u32 best_mul, best_div1, best_div2;
unsigned long tclk, rate_err, best_err;
@@ -516,7 +535,7 @@ static void wm8850_find_pll_bits(unsigned long rate, unsigned long parent_rate,
*multiplier = mul;
*divisor1 = div1;
*divisor2 = div2;
- return;
+ return 0;
}
if (rate_err < best_err) {
@@ -527,6 +546,11 @@ static void wm8850_find_pll_bits(unsigned long rate, unsigned long parent_rate,
}
}
+ if (best_err == (unsigned long)-1) {
+ pr_warn("%s: impossible rate %lu\n", __func__, rate);
+ return -EINVAL;
+ }
+
/* if we got here, it wasn't an exact match */
pr_warn("%s: requested rate %lu, found rate %lu\n", __func__, rate,
rate - best_err);
@@ -534,6 +558,8 @@ static void wm8850_find_pll_bits(unsigned long rate, unsigned long parent_rate,
*multiplier = best_mul;
*divisor1 = best_div1;
*divisor2 = best_div2;
+
+ return 0;
}
static int vtwm_pll_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -543,31 +569,39 @@ static int vtwm_pll_set_rate(struct clk_hw *hw, unsigned long rate,
u32 filter, mul, div1, div2;
u32 pll_val;
unsigned long flags = 0;
+ int ret;
/* sanity check */
switch (pll->type) {
case PLL_TYPE_VT8500:
- vt8500_find_pll_bits(rate, parent_rate, &mul, &div1);
- pll_val = VT8500_BITS_TO_VAL(mul, div1);
+ ret = vt8500_find_pll_bits(rate, parent_rate, &mul, &div1);
+ if (!ret)
+ pll_val = VT8500_BITS_TO_VAL(mul, div1);
break;
case PLL_TYPE_WM8650:
- wm8650_find_pll_bits(rate, parent_rate, &mul, &div1, &div2);
- pll_val = WM8650_BITS_TO_VAL(mul, div1, div2);
+ ret = wm8650_find_pll_bits(rate, parent_rate, &mul, &div1, &div2);
+ if (!ret)
+ pll_val = WM8650_BITS_TO_VAL(mul, div1, div2);
break;
case PLL_TYPE_WM8750:
- wm8750_find_pll_bits(rate, parent_rate, &filter, &mul, &div1, &div2);
- pll_val = WM8750_BITS_TO_VAL(filter, mul, div1, div2);
+ ret = wm8750_find_pll_bits(rate, parent_rate, &filter, &mul, &div1, &div2);
+ if (!ret)
+ pll_val = WM8750_BITS_TO_VAL(filter, mul, div1, div2);
break;
case PLL_TYPE_WM8850:
- wm8850_find_pll_bits(rate, parent_rate, &mul, &div1, &div2);
- pll_val = WM8850_BITS_TO_VAL(mul, div1, div2);
+ ret = wm8850_find_pll_bits(rate, parent_rate, &mul, &div1, &div2);
+ if (!ret)
+ pll_val = WM8850_BITS_TO_VAL(mul, div1, div2);
break;
default:
pr_err("%s: invalid pll type\n", __func__);
- return 0;
+ ret = -EINVAL;
}
+ if (ret)
+ return ret;
+
spin_lock_irqsave(pll->lock, flags);
vt8500_pmc_wait_busy();
@@ -585,28 +619,36 @@ static long vtwm_pll_round_rate(struct clk_hw *hw, unsigned long rate,
struct clk_pll *pll = to_clk_pll(hw);
u32 filter, mul, div1, div2;
long round_rate;
+ int ret;
switch (pll->type) {
case PLL_TYPE_VT8500:
- vt8500_find_pll_bits(rate, *prate, &mul, &div1);
- round_rate = VT8500_BITS_TO_FREQ(*prate, mul, div1);
+ ret = vt8500_find_pll_bits(rate, *prate, &mul, &div1);
+ if (!ret)
+ round_rate = VT8500_BITS_TO_FREQ(*prate, mul, div1);
break;
case PLL_TYPE_WM8650:
- wm8650_find_pll_bits(rate, *prate, &mul, &div1, &div2);
- round_rate = WM8650_BITS_TO_FREQ(*prate, mul, div1, div2);
+ ret = wm8650_find_pll_bits(rate, *prate, &mul, &div1, &div2);
+ if (!ret)
+ round_rate = WM8650_BITS_TO_FREQ(*prate, mul, div1, div2);
break;
case PLL_TYPE_WM8750:
- wm8750_find_pll_bits(rate, *prate, &filter, &mul, &div1, &div2);
- round_rate = WM8750_BITS_TO_FREQ(*prate, mul, div1, div2);
+ ret = wm8750_find_pll_bits(rate, *prate, &filter, &mul, &div1, &div2);
+ if (!ret)
+ round_rate = WM8750_BITS_TO_FREQ(*prate, mul, div1, div2);
break;
case PLL_TYPE_WM8850:
- wm8850_find_pll_bits(rate, *prate, &mul, &div1, &div2);
- round_rate = WM8850_BITS_TO_FREQ(*prate, mul, div1, div2);
+ ret = wm8850_find_pll_bits(rate, *prate, &mul, &div1, &div2);
+ if (!ret)
+ round_rate = WM8850_BITS_TO_FREQ(*prate, mul, div1, div2);
break;
default:
- round_rate = 0;
+ ret = -EINVAL;
}
+ if (ret)
+ return ret;
+
return round_rate;
}
diff --git a/drivers/clk/clk-xgene.c b/drivers/clk/clk-xgene.c
index 10224b01b97c..d73450b60b28 100644
--- a/drivers/clk/clk-xgene.c
+++ b/drivers/clk/clk-xgene.c
@@ -29,7 +29,9 @@
#include <linux/of_address.h>
/* Register SCU_PCPPLL bit fields */
-#define N_DIV_RD(src) (((src) & 0x000001ff))
+#define N_DIV_RD(src) ((src) & 0x000001ff)
+#define SC_N_DIV_RD(src) ((src) & 0x0000007f)
+#define SC_OUTDIV2(src) (((src) & 0x00000100) >> 8)
/* Register SCU_SOCPLL bit fields */
#define CLKR_RD(src) (((src) & 0x07000000)>>24)
@@ -48,7 +50,7 @@ static inline u32 xgene_clk_read(void __iomem *csr)
static inline void xgene_clk_write(u32 data, void __iomem *csr)
{
- return writel_relaxed(data, csr);
+ writel_relaxed(data, csr);
}
/* PLL Clock */
@@ -63,6 +65,7 @@ struct xgene_clk_pll {
spinlock_t *lock;
u32 pll_offset;
enum xgene_pll_type type;
+ int version;
};
#define to_xgene_clk_pll(_hw) container_of(_hw, struct xgene_clk_pll, hw)
@@ -92,27 +95,37 @@ static unsigned long xgene_clk_pll_recalc_rate(struct clk_hw *hw,
pll = xgene_clk_read(pllclk->reg + pllclk->pll_offset);
- if (pllclk->type == PLL_TYPE_PCP) {
- /*
- * PLL VCO = Reference clock * NF
- * PCP PLL = PLL_VCO / 2
- */
- nout = 2;
- fvco = parent_rate * (N_DIV_RD(pll) + 4);
+ if (pllclk->version <= 1) {
+ if (pllclk->type == PLL_TYPE_PCP) {
+ /*
+ * PLL VCO = Reference clock * NF
+ * PCP PLL = PLL_VCO / 2
+ */
+ nout = 2;
+ fvco = parent_rate * (N_DIV_RD(pll) + 4);
+ } else {
+ /*
+ * Fref = Reference Clock / NREF;
+ * Fvco = Fref * NFB;
+ * Fout = Fvco / NOUT;
+ */
+ nref = CLKR_RD(pll) + 1;
+ nout = CLKOD_RD(pll) + 1;
+ nfb = CLKF_RD(pll);
+ fref = parent_rate / nref;
+ fvco = fref * nfb;
+ }
} else {
/*
- * Fref = Reference Clock / NREF;
- * Fvco = Fref * NFB;
- * Fout = Fvco / NOUT;
+ * fvco = Reference clock * FBDIVC
+ * PLL freq = fvco / NOUT
*/
- nref = CLKR_RD(pll) + 1;
- nout = CLKOD_RD(pll) + 1;
- nfb = CLKF_RD(pll);
- fref = parent_rate / nref;
- fvco = fref * nfb;
+ nout = SC_OUTDIV2(pll) ? 2 : 3;
+ fvco = parent_rate * SC_N_DIV_RD(pll);
}
- pr_debug("%s pll recalc rate %ld parent %ld\n", clk_hw_get_name(hw),
- fvco / nout, parent_rate);
+ pr_debug("%s pll recalc rate %ld parent %ld version %d\n",
+ clk_hw_get_name(hw), fvco / nout, parent_rate,
+ pllclk->version);
return fvco / nout;
}
@@ -125,7 +138,7 @@ static const struct clk_ops xgene_clk_pll_ops = {
static struct clk *xgene_register_clk_pll(struct device *dev,
const char *name, const char *parent_name,
unsigned long flags, void __iomem *reg, u32 pll_offset,
- u32 type, spinlock_t *lock)
+ u32 type, spinlock_t *lock, int version)
{
struct xgene_clk_pll *apmclk;
struct clk *clk;
@@ -144,6 +157,7 @@ static struct clk *xgene_register_clk_pll(struct device *dev,
init.parent_names = parent_name ? &parent_name : NULL;
init.num_parents = parent_name ? 1 : 0;
+ apmclk->version = version;
apmclk->reg = reg;
apmclk->lock = lock;
apmclk->pll_offset = pll_offset;
@@ -160,26 +174,37 @@ static struct clk *xgene_register_clk_pll(struct device *dev,
return clk;
}
+static int xgene_pllclk_version(struct device_node *np)
+{
+ if (of_device_is_compatible(np, "apm,xgene-socpll-clock"))
+ return 1;
+ if (of_device_is_compatible(np, "apm,xgene-pcppll-clock"))
+ return 1;
+ return 2;
+}
+
static void xgene_pllclk_init(struct device_node *np, enum xgene_pll_type pll_type)
{
- const char *clk_name = np->full_name;
- struct clk *clk;
- void __iomem *reg;
+ const char *clk_name = np->full_name;
+ struct clk *clk;
+ void __iomem *reg;
+ int version = xgene_pllclk_version(np);
- reg = of_iomap(np, 0);
- if (reg == NULL) {
- pr_err("Unable to map CSR register for %s\n", np->full_name);
- return;
- }
- of_property_read_string(np, "clock-output-names", &clk_name);
- clk = xgene_register_clk_pll(NULL,
- clk_name, of_clk_get_parent_name(np, 0),
- CLK_IS_ROOT, reg, 0, pll_type, &clk_lock);
- if (!IS_ERR(clk)) {
- of_clk_add_provider(np, of_clk_src_simple_get, clk);
- clk_register_clkdev(clk, clk_name, NULL);
- pr_debug("Add %s clock PLL\n", clk_name);
- }
+ reg = of_iomap(np, 0);
+ if (reg == NULL) {
+ pr_err("Unable to map CSR register for %s\n", np->full_name);
+ return;
+ }
+ of_property_read_string(np, "clock-output-names", &clk_name);
+ clk = xgene_register_clk_pll(NULL,
+ clk_name, of_clk_get_parent_name(np, 0),
+ CLK_IS_ROOT, reg, 0, pll_type, &clk_lock,
+ version);
+ if (!IS_ERR(clk)) {
+ of_clk_add_provider(np, of_clk_src_simple_get, clk);
+ clk_register_clkdev(clk, clk_name, NULL);
+ pr_debug("Add %s clock PLL\n", clk_name);
+ }
}
static void xgene_socpllclk_init(struct device_node *np)
@@ -351,8 +376,8 @@ static int xgene_clk_set_rate(struct clk_hw *hw, unsigned long rate,
/* Set new divider */
data = xgene_clk_read(pclk->param.divider_reg +
pclk->param.reg_divider_offset);
- data &= ~((1 << pclk->param.reg_divider_width) - 1)
- << pclk->param.reg_divider_shift;
+ data &= ~(((1 << pclk->param.reg_divider_width) - 1)
+ << pclk->param.reg_divider_shift);
data |= divider;
xgene_clk_write(data, pclk->param.divider_reg +
pclk->param.reg_divider_offset);
@@ -460,7 +485,7 @@ static void __init xgene_devclk_init(struct device_node *np)
rc = of_address_to_resource(np, i, &res);
if (rc != 0) {
if (i == 0) {
- pr_err("no DTS register for %s\n",
+ pr_err("no DTS register for %s\n",
np->full_name);
return;
}
@@ -518,4 +543,8 @@ err:
CLK_OF_DECLARE(xgene_socpll_clock, "apm,xgene-socpll-clock", xgene_socpllclk_init);
CLK_OF_DECLARE(xgene_pcppll_clock, "apm,xgene-pcppll-clock", xgene_pcppllclk_init);
+CLK_OF_DECLARE(xgene_socpll_v2_clock, "apm,xgene-socpll-v2-clock",
+ xgene_socpllclk_init);
+CLK_OF_DECLARE(xgene_pcppll_v2_clock, "apm,xgene-pcppll-v2-clock",
+ xgene_pcppllclk_init);
CLK_OF_DECLARE(xgene_dev_clock, "apm,xgene-device-clock", xgene_devclk_init);
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index b4db67a446c8..fb74dc1f7520 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -350,13 +350,12 @@ static struct clk_core *clk_core_get_parent_by_index(struct clk_core *core,
{
if (!core || index >= core->num_parents)
return NULL;
- else if (!core->parents)
- return clk_core_lookup(core->parent_names[index]);
- else if (!core->parents[index])
- return core->parents[index] =
- clk_core_lookup(core->parent_names[index]);
- else
- return core->parents[index];
+
+ if (!core->parents[index])
+ core->parents[index] =
+ clk_core_lookup(core->parent_names[index]);
+
+ return core->parents[index];
}
struct clk_hw *
@@ -386,7 +385,7 @@ static unsigned long clk_core_get_rate_nolock(struct clk_core *core)
ret = core->rate;
- if (core->flags & CLK_IS_ROOT)
+ if (!core->num_parents)
goto out;
if (!core->parent)
@@ -1067,30 +1066,12 @@ static int clk_fetch_parent_index(struct clk_core *core,
{
int i;
- if (!core->parents) {
- core->parents = kcalloc(core->num_parents,
- sizeof(struct clk *), GFP_KERNEL);
- if (!core->parents)
- return -ENOMEM;
- }
-
- /*
- * find index of new parent clock using cached parent ptrs,
- * or if not yet cached, use string name comparison and cache
- * them now to avoid future calls to clk_core_lookup.
- */
- for (i = 0; i < core->num_parents; i++) {
- if (core->parents[i] == parent)
- return i;
-
- if (core->parents[i])
- continue;
+ if (!parent)
+ return -EINVAL;
- if (!strcmp(core->parent_names[i], parent->name)) {
- core->parents[i] = clk_core_lookup(parent->name);
+ for (i = 0; i < core->num_parents; i++)
+ if (clk_core_get_parent_by_index(core, i) == parent)
return i;
- }
- }
return -EINVAL;
}
@@ -1677,56 +1658,14 @@ struct clk *clk_get_parent(struct clk *clk)
}
EXPORT_SYMBOL_GPL(clk_get_parent);
-/*
- * .get_parent is mandatory for clocks with multiple possible parents. It is
- * optional for single-parent clocks. Always call .get_parent if it is
- * available and WARN if it is missing for multi-parent clocks.
- *
- * For single-parent clocks without .get_parent, first check to see if the
- * .parents array exists, and if so use it to avoid an expensive tree
- * traversal. If .parents does not exist then walk the tree.
- */
static struct clk_core *__clk_init_parent(struct clk_core *core)
{
- struct clk_core *ret = NULL;
- u8 index;
+ u8 index = 0;
- /* handle the trivial cases */
+ if (core->num_parents > 1 && core->ops->get_parent)
+ index = core->ops->get_parent(core->hw);
- if (!core->num_parents)
- goto out;
-
- if (core->num_parents == 1) {
- if (IS_ERR_OR_NULL(core->parent))
- core->parent = clk_core_lookup(core->parent_names[0]);
- ret = core->parent;
- goto out;
- }
-
- if (!core->ops->get_parent) {
- WARN(!core->ops->get_parent,
- "%s: multi-parent clocks must implement .get_parent\n",
- __func__);
- goto out;
- }
-
- /*
- * Do our best to cache parent clocks in core->parents. This prevents
- * unnecessary and expensive lookups. We don't set core->parent here;
- * that is done by the calling function.
- */
-
- index = core->ops->get_parent(core->hw);
-
- if (!core->parents)
- core->parents =
- kcalloc(core->num_parents, sizeof(struct clk *),
- GFP_KERNEL);
-
- ret = clk_core_get_parent_by_index(core, index);
-
-out:
- return ret;
+ return clk_core_get_parent_by_index(core, index);
}
static void clk_core_reparent(struct clk_core *core,
@@ -1809,13 +1748,13 @@ static int clk_core_set_parent(struct clk_core *core, struct clk_core *parent)
/* try finding the new parent index */
if (parent) {
p_index = clk_fetch_parent_index(core, parent);
- p_rate = parent->rate;
if (p_index < 0) {
pr_debug("%s: clk %s can not be parent of clk %s\n",
__func__, parent->name, core->name);
ret = p_index;
goto out;
}
+ p_rate = parent->rate;
}
/* propagate PRE_RATE_CHANGE notifications */
@@ -1902,6 +1841,10 @@ int clk_set_phase(struct clk *clk, int degrees)
clk_prepare_lock();
+ /* bail early if nothing to do */
+ if (degrees == clk->core->phase)
+ goto out;
+
trace_clk_set_phase(clk->core, degrees);
if (clk->core->ops->set_phase)
@@ -1912,6 +1855,7 @@ int clk_set_phase(struct clk *clk, int degrees)
if (!ret)
clk->core->phase = degrees;
+out:
clk_prepare_unlock();
return ret;
@@ -2218,7 +2162,7 @@ unlock:
*
* Dynamically removes a clk and all its child nodes from the
* debugfs clk directory if clk->dentry points to debugfs created by
- * clk_debug_register in __clk_init.
+ * clk_debug_register in __clk_core_init.
*/
static void clk_debug_unregister(struct clk_core *core)
{
@@ -2303,26 +2247,22 @@ static inline void clk_debug_unregister(struct clk_core *core)
#endif
/**
- * __clk_init - initialize the data structures in a struct clk
- * @dev: device initializing this clk, placeholder for now
- * @clk: clk being initialized
+ * __clk_core_init - initialize the data structures in a struct clk_core
+ * @core: clk_core being initialized
*
* Initializes the lists in struct clk_core, queries the hardware for the
* parent and rate and sets them both.
*/
-static int __clk_init(struct device *dev, struct clk *clk_user)
+static int __clk_core_init(struct clk_core *core)
{
int i, ret = 0;
struct clk_core *orphan;
struct hlist_node *tmp2;
- struct clk_core *core;
unsigned long rate;
- if (!clk_user)
+ if (!core)
return -EINVAL;
- core = clk_user->core;
-
clk_prepare_lock();
/* check to see if a clock with this name is already registered */
@@ -2337,22 +2277,29 @@ static int __clk_init(struct device *dev, struct clk *clk_user)
if (core->ops->set_rate &&
!((core->ops->round_rate || core->ops->determine_rate) &&
core->ops->recalc_rate)) {
- pr_warning("%s: %s must implement .round_rate or .determine_rate in addition to .recalc_rate\n",
- __func__, core->name);
+ pr_err("%s: %s must implement .round_rate or .determine_rate in addition to .recalc_rate\n",
+ __func__, core->name);
ret = -EINVAL;
goto out;
}
if (core->ops->set_parent && !core->ops->get_parent) {
- pr_warning("%s: %s must implement .get_parent & .set_parent\n",
- __func__, core->name);
+ pr_err("%s: %s must implement .get_parent & .set_parent\n",
+ __func__, core->name);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (core->num_parents > 1 && !core->ops->get_parent) {
+ pr_err("%s: %s must implement .get_parent as it has multi parents\n",
+ __func__, core->name);
ret = -EINVAL;
goto out;
}
if (core->ops->set_rate_and_parent &&
!(core->ops->set_parent && core->ops->set_rate)) {
- pr_warn("%s: %s must implement .set_parent & .set_rate\n",
+ pr_err("%s: %s must implement .set_parent & .set_rate\n",
__func__, core->name);
ret = -EINVAL;
goto out;
@@ -2364,37 +2311,12 @@ static int __clk_init(struct device *dev, struct clk *clk_user)
"%s: invalid NULL in %s's .parent_names\n",
__func__, core->name);
- /*
- * Allocate an array of struct clk *'s to avoid unnecessary string
- * look-ups of clk's possible parents. This can fail for clocks passed
- * in to clk_init during early boot; thus any access to core->parents[]
- * must always check for a NULL pointer and try to populate it if
- * necessary.
- *
- * If core->parents is not NULL we skip this entire block. This allows
- * for clock drivers to statically initialize core->parents.
- */
- if (core->num_parents > 1 && !core->parents) {
- core->parents = kcalloc(core->num_parents, sizeof(struct clk *),
- GFP_KERNEL);
- /*
- * clk_core_lookup returns NULL for parents that have not been
- * clk_init'd; thus any access to clk->parents[] must check
- * for a NULL pointer. We can always perform lazy lookups for
- * missing parents later on.
- */
- if (core->parents)
- for (i = 0; i < core->num_parents; i++)
- core->parents[i] =
- clk_core_lookup(core->parent_names[i]);
- }
-
core->parent = __clk_init_parent(core);
/*
- * Populate core->parent if parent has already been __clk_init'd. If
- * parent has not yet been __clk_init'd then place clk in the orphan
- * list. If clk has set the CLK_IS_ROOT flag then place it in the root
+ * Populate core->parent if parent has already been clk_core_init'd. If
+ * parent has not yet been clk_core_init'd then place clk in the orphan
+ * list. If clk doesn't have any parents then place it in the root
* clk list.
*
* Every time a new clk is clk_init'd then we walk the list of orphan
@@ -2405,7 +2327,7 @@ static int __clk_init(struct device *dev, struct clk *clk_user)
hlist_add_head(&core->child_node,
&core->parent->children);
core->orphan = core->parent->orphan;
- } else if (core->flags & CLK_IS_ROOT) {
+ } else if (!core->num_parents) {
hlist_add_head(&core->child_node, &clk_root_list);
core->orphan = false;
} else {
@@ -2454,24 +2376,15 @@ static int __clk_init(struct device *dev, struct clk *clk_user)
core->rate = core->req_rate = rate;
/*
- * walk the list of orphan clocks and reparent any that are children of
- * this clock
+ * walk the list of orphan clocks and reparent any that newly finds a
+ * parent.
*/
hlist_for_each_entry_safe(orphan, tmp2, &clk_orphan_list, child_node) {
- if (orphan->num_parents && orphan->ops->get_parent) {
- i = orphan->ops->get_parent(orphan->hw);
- if (i >= 0 && i < orphan->num_parents &&
- !strcmp(core->name, orphan->parent_names[i]))
- clk_core_reparent(orphan, core);
- continue;
- }
+ struct clk_core *parent = __clk_init_parent(orphan);
- for (i = 0; i < orphan->num_parents; i++)
- if (!strcmp(core->name, orphan->parent_names[i])) {
- clk_core_reparent(orphan, core);
- break;
- }
- }
+ if (parent)
+ clk_core_reparent(orphan, parent);
+ }
/*
* optional platform-specific magic
@@ -2585,21 +2498,31 @@ struct clk *clk_register(struct device *dev, struct clk_hw *hw)
}
}
+ /* avoid unnecessary string look-ups of clk_core's possible parents. */
+ core->parents = kcalloc(core->num_parents, sizeof(*core->parents),
+ GFP_KERNEL);
+ if (!core->parents) {
+ ret = -ENOMEM;
+ goto fail_parents;
+ };
+
INIT_HLIST_HEAD(&core->clks);
hw->clk = __clk_create_clk(hw, NULL, NULL);
if (IS_ERR(hw->clk)) {
ret = PTR_ERR(hw->clk);
- goto fail_parent_names_copy;
+ goto fail_parents;
}
- ret = __clk_init(dev, hw->clk);
+ ret = __clk_core_init(core);
if (!ret)
return hw->clk;
__clk_free_clk(hw->clk);
hw->clk = NULL;
+fail_parents:
+ kfree(core->parents);
fail_parent_names_copy:
while (--i >= 0)
kfree_const(core->parent_names[i]);
@@ -2683,7 +2606,7 @@ void clk_unregister(struct clk *clk)
if (clk->core->ops == &clk_nodrv_ops) {
pr_err("%s: unregistered clock: %s\n", __func__,
clk->core->name);
- return;
+ goto unlock;
}
/*
* Assign empty clock ops for consumers that might still hold
@@ -2709,7 +2632,7 @@ void clk_unregister(struct clk *clk)
pr_warn("%s: unregistering prepared clock: %s\n",
__func__, clk->core->name);
kref_put(&clk->core->ref, __clk_release);
-
+unlock:
clk_prepare_unlock();
}
EXPORT_SYMBOL_GPL(clk_unregister);
@@ -3061,10 +2984,23 @@ struct clk *of_clk_get_from_provider(struct of_phandle_args *clkspec)
{
return __of_clk_get_from_provider(clkspec, NULL, __func__);
}
+EXPORT_SYMBOL_GPL(of_clk_get_from_provider);
-int of_clk_get_parent_count(struct device_node *np)
+/**
+ * of_clk_get_parent_count() - Count the number of clocks a device node has
+ * @np: device node to count
+ *
+ * Returns: The number of clocks that are possible parents of this node
+ */
+unsigned int of_clk_get_parent_count(struct device_node *np)
{
- return of_count_phandle_with_args(np, "clocks", "#clock-cells");
+ int count;
+
+ count = of_count_phandle_with_args(np, "clocks", "#clock-cells");
+ if (count < 0)
+ return 0;
+
+ return count;
}
EXPORT_SYMBOL_GPL(of_clk_get_parent_count);
@@ -3214,6 +3150,9 @@ void __init of_clk_init(const struct of_device_id *matches)
for_each_matching_node_and_match(np, matches, &match) {
struct clock_provider *parent;
+ if (!of_device_is_available(np))
+ continue;
+
parent = kzalloc(sizeof(*parent), GFP_KERNEL);
if (!parent) {
list_for_each_entry_safe(clk_provider, next,
diff --git a/drivers/clk/clkdev.c b/drivers/clk/clkdev.c
index 779b6ff0c7ad..eb20b941154b 100644
--- a/drivers/clk/clkdev.c
+++ b/drivers/clk/clkdev.c
@@ -353,11 +353,25 @@ void clkdev_drop(struct clk_lookup *cl)
}
EXPORT_SYMBOL(clkdev_drop);
+static struct clk_lookup *__clk_register_clkdev(struct clk_hw *hw,
+ const char *con_id,
+ const char *dev_id, ...)
+{
+ struct clk_lookup *cl;
+ va_list ap;
+
+ va_start(ap, dev_id);
+ cl = vclkdev_create(hw, con_id, dev_id, ap);
+ va_end(ap);
+
+ return cl;
+}
+
/**
* clk_register_clkdev - register one clock lookup for a struct clk
* @clk: struct clk to associate with all clk_lookups
* @con_id: connection ID string on device
- * @dev_id: format string describing device name
+ * @dev_id: string describing device name
*
* con_id or dev_id may be NULL as a wildcard, just as in the rest of
* clkdev.
@@ -368,17 +382,22 @@ EXPORT_SYMBOL(clkdev_drop);
* after clk_register().
*/
int clk_register_clkdev(struct clk *clk, const char *con_id,
- const char *dev_fmt, ...)
+ const char *dev_id)
{
struct clk_lookup *cl;
- va_list ap;
if (IS_ERR(clk))
return PTR_ERR(clk);
- va_start(ap, dev_fmt);
- cl = vclkdev_create(__clk_get_hw(clk), con_id, dev_fmt, ap);
- va_end(ap);
+ /*
+ * Since dev_id can be NULL, and NULL is handled specially, we must
+ * pass it as either a NULL format string, or with "%s".
+ */
+ if (dev_id)
+ cl = __clk_register_clkdev(__clk_get_hw(clk), con_id, "%s",
+ dev_id);
+ else
+ cl = __clk_register_clkdev(__clk_get_hw(clk), con_id, NULL);
return cl ? 0 : -ENOMEM;
}
diff --git a/drivers/clk/h8300/clk-div.c b/drivers/clk/h8300/clk-div.c
index d71d01157dbb..4bf44a25d950 100644
--- a/drivers/clk/h8300/clk-div.c
+++ b/drivers/clk/h8300/clk-div.c
@@ -13,7 +13,7 @@ static DEFINE_SPINLOCK(clklock);
static void __init h8300_div_clk_setup(struct device_node *node)
{
- int num_parents;
+ unsigned int num_parents;
struct clk *clk;
const char *clk_name = node->name;
const char *parent_name;
@@ -22,7 +22,7 @@ static void __init h8300_div_clk_setup(struct device_node *node)
int offset;
num_parents = of_clk_get_parent_count(node);
- if (num_parents < 1) {
+ if (!num_parents) {
pr_err("%s: no parent found", clk_name);
return;
}
@@ -34,7 +34,7 @@ static void __init h8300_div_clk_setup(struct device_node *node)
}
offset = (unsigned long)divcr & 3;
offset = (3 - offset) * 8;
- divcr = (void *)((unsigned long)divcr & ~3);
+ divcr = (void __iomem *)((unsigned long)divcr & ~3);
parent_name = of_clk_get_parent_name(node, 0);
of_property_read_u32(node, "renesas,width", &width);
diff --git a/drivers/clk/h8300/clk-h8s2678.c b/drivers/clk/h8300/clk-h8s2678.c
index 6cf38dc1c929..c9c2fd575ef7 100644
--- a/drivers/clk/h8300/clk-h8s2678.c
+++ b/drivers/clk/h8300/clk-h8s2678.c
@@ -83,7 +83,7 @@ static const struct clk_ops pll_ops = {
static void __init h8s2678_pll_clk_setup(struct device_node *node)
{
- int num_parents;
+ unsigned int num_parents;
struct clk *clk;
const char *clk_name = node->name;
const char *parent_name;
@@ -91,7 +91,7 @@ static void __init h8s2678_pll_clk_setup(struct device_node *node)
struct clk_init_data init;
num_parents = of_clk_get_parent_count(node);
- if (num_parents < 1) {
+ if (!num_parents) {
pr_err("%s: no parent found", clk_name);
return;
}
diff --git a/drivers/clk/hisilicon/clk-hi3620.c b/drivers/clk/hisilicon/clk-hi3620.c
index 7d03fe17d66f..d04a104ce1b4 100644
--- a/drivers/clk/hisilicon/clk-hi3620.c
+++ b/drivers/clk/hisilicon/clk-hi3620.c
@@ -78,15 +78,15 @@ static const char *const mmc3_mux_p[] __initconst = { "armpll2", "armpll3", };
/* fixed rate clocks */
static struct hisi_fixed_rate_clock hi3620_fixed_rate_clks[] __initdata = {
- { HI3620_OSC32K, "osc32k", NULL, CLK_IS_ROOT, 32768, },
- { HI3620_OSC26M, "osc26m", NULL, CLK_IS_ROOT, 26000000, },
- { HI3620_PCLK, "pclk", NULL, CLK_IS_ROOT, 26000000, },
- { HI3620_PLL_ARM0, "armpll0", NULL, CLK_IS_ROOT, 1600000000, },
- { HI3620_PLL_ARM1, "armpll1", NULL, CLK_IS_ROOT, 1600000000, },
- { HI3620_PLL_PERI, "armpll2", NULL, CLK_IS_ROOT, 1440000000, },
- { HI3620_PLL_USB, "armpll3", NULL, CLK_IS_ROOT, 1440000000, },
- { HI3620_PLL_HDMI, "armpll4", NULL, CLK_IS_ROOT, 1188000000, },
- { HI3620_PLL_GPU, "armpll5", NULL, CLK_IS_ROOT, 1300000000, },
+ { HI3620_OSC32K, "osc32k", NULL, 0, 32768, },
+ { HI3620_OSC26M, "osc26m", NULL, 0, 26000000, },
+ { HI3620_PCLK, "pclk", NULL, 0, 26000000, },
+ { HI3620_PLL_ARM0, "armpll0", NULL, 0, 1600000000, },
+ { HI3620_PLL_ARM1, "armpll1", NULL, 0, 1600000000, },
+ { HI3620_PLL_PERI, "armpll2", NULL, 0, 1440000000, },
+ { HI3620_PLL_USB, "armpll3", NULL, 0, 1440000000, },
+ { HI3620_PLL_HDMI, "armpll4", NULL, 0, 1188000000, },
+ { HI3620_PLL_GPU, "armpll5", NULL, 0, 1300000000, },
};
/* fixed factor clocks */
diff --git a/drivers/clk/hisilicon/clk-hi6220-stub.c b/drivers/clk/hisilicon/clk-hi6220-stub.c
index 8afb40ef40ce..329a09214d12 100644
--- a/drivers/clk/hisilicon/clk-hi6220-stub.c
+++ b/drivers/clk/hisilicon/clk-hi6220-stub.c
@@ -235,7 +235,7 @@ static int hi6220_stub_clk_probe(struct platform_device *pdev)
init.name = "acpu0";
init.ops = &hi6220_stub_clk_ops;
init.num_parents = 0;
- init.flags = CLK_IS_ROOT;
+ init.flags = 0;
clk = devm_clk_register(dev, &stub_clk->hw);
if (IS_ERR(clk))
diff --git a/drivers/clk/hisilicon/clk-hi6220.c b/drivers/clk/hisilicon/clk-hi6220.c
index 4563343b6420..f02cb41d40a4 100644
--- a/drivers/clk/hisilicon/clk-hi6220.c
+++ b/drivers/clk/hisilicon/clk-hi6220.c
@@ -26,19 +26,19 @@
/* clocks in AO (always on) controller */
static struct hisi_fixed_rate_clock hi6220_fixed_rate_clks[] __initdata = {
- { HI6220_REF32K, "ref32k", NULL, CLK_IS_ROOT, 32764, },
- { HI6220_CLK_TCXO, "clk_tcxo", NULL, CLK_IS_ROOT, 19200000, },
- { HI6220_MMC1_PAD, "mmc1_pad", NULL, CLK_IS_ROOT, 100000000, },
- { HI6220_MMC2_PAD, "mmc2_pad", NULL, CLK_IS_ROOT, 100000000, },
- { HI6220_MMC0_PAD, "mmc0_pad", NULL, CLK_IS_ROOT, 200000000, },
- { HI6220_PLL_BBP, "bbppll0", NULL, CLK_IS_ROOT, 245760000, },
- { HI6220_PLL_GPU, "gpupll", NULL, CLK_IS_ROOT, 1000000000,},
- { HI6220_PLL1_DDR, "ddrpll1", NULL, CLK_IS_ROOT, 1066000000,},
- { HI6220_PLL_SYS, "syspll", NULL, CLK_IS_ROOT, 1200000000,},
- { HI6220_PLL_SYS_MEDIA, "media_syspll", NULL, CLK_IS_ROOT, 1200000000,},
- { HI6220_DDR_SRC, "ddr_sel_src", NULL, CLK_IS_ROOT, 1200000000,},
- { HI6220_PLL_MEDIA, "media_pll", NULL, CLK_IS_ROOT, 1440000000,},
- { HI6220_PLL_DDR, "ddrpll0", NULL, CLK_IS_ROOT, 1600000000,},
+ { HI6220_REF32K, "ref32k", NULL, 0, 32764, },
+ { HI6220_CLK_TCXO, "clk_tcxo", NULL, 0, 19200000, },
+ { HI6220_MMC1_PAD, "mmc1_pad", NULL, 0, 100000000, },
+ { HI6220_MMC2_PAD, "mmc2_pad", NULL, 0, 100000000, },
+ { HI6220_MMC0_PAD, "mmc0_pad", NULL, 0, 200000000, },
+ { HI6220_PLL_BBP, "bbppll0", NULL, 0, 245760000, },
+ { HI6220_PLL_GPU, "gpupll", NULL, 0, 1000000000,},
+ { HI6220_PLL1_DDR, "ddrpll1", NULL, 0, 1066000000,},
+ { HI6220_PLL_SYS, "syspll", NULL, 0, 1200000000,},
+ { HI6220_PLL_SYS_MEDIA, "media_syspll", NULL, 0, 1200000000,},
+ { HI6220_DDR_SRC, "ddr_sel_src", NULL, 0, 1200000000,},
+ { HI6220_PLL_MEDIA, "media_pll", NULL, 0, 1440000000,},
+ { HI6220_PLL_DDR, "ddrpll0", NULL, 0, 1600000000,},
};
static struct hisi_fixed_factor_clock hi6220_fixed_factor_clks[] __initdata = {
diff --git a/drivers/clk/hisilicon/clk-hip04.c b/drivers/clk/hisilicon/clk-hip04.c
index 8ca967308343..b38e03da1d02 100644
--- a/drivers/clk/hisilicon/clk-hip04.c
+++ b/drivers/clk/hisilicon/clk-hip04.c
@@ -36,9 +36,9 @@
/* fixed rate clocks */
static struct hisi_fixed_rate_clock hip04_fixed_rate_clks[] __initdata = {
- { HIP04_OSC50M, "osc50m", NULL, CLK_IS_ROOT, 50000000, },
- { HIP04_CLK_50M, "clk50m", NULL, CLK_IS_ROOT, 50000000, },
- { HIP04_CLK_168M, "clk168m", NULL, CLK_IS_ROOT, 168750000, },
+ { HIP04_OSC50M, "osc50m", NULL, 0, 50000000, },
+ { HIP04_CLK_50M, "clk50m", NULL, 0, 50000000, },
+ { HIP04_CLK_168M, "clk168m", NULL, 0, 168750000, },
};
static void __init hip04_clk_init(struct device_node *np)
diff --git a/drivers/clk/hisilicon/clk-hix5hd2.c b/drivers/clk/hisilicon/clk-hix5hd2.c
index 0aaf29da8491..14b05efa3c2a 100644
--- a/drivers/clk/hisilicon/clk-hix5hd2.c
+++ b/drivers/clk/hisilicon/clk-hix5hd2.c
@@ -14,36 +14,36 @@
#include "clk.h"
static struct hisi_fixed_rate_clock hix5hd2_fixed_rate_clks[] __initdata = {
- { HIX5HD2_FIXED_1200M, "1200m", NULL, CLK_IS_ROOT, 1200000000, },
- { HIX5HD2_FIXED_400M, "400m", NULL, CLK_IS_ROOT, 400000000, },
- { HIX5HD2_FIXED_48M, "48m", NULL, CLK_IS_ROOT, 48000000, },
- { HIX5HD2_FIXED_24M, "24m", NULL, CLK_IS_ROOT, 24000000, },
- { HIX5HD2_FIXED_600M, "600m", NULL, CLK_IS_ROOT, 600000000, },
- { HIX5HD2_FIXED_300M, "300m", NULL, CLK_IS_ROOT, 300000000, },
- { HIX5HD2_FIXED_75M, "75m", NULL, CLK_IS_ROOT, 75000000, },
- { HIX5HD2_FIXED_200M, "200m", NULL, CLK_IS_ROOT, 200000000, },
- { HIX5HD2_FIXED_100M, "100m", NULL, CLK_IS_ROOT, 100000000, },
- { HIX5HD2_FIXED_40M, "40m", NULL, CLK_IS_ROOT, 40000000, },
- { HIX5HD2_FIXED_150M, "150m", NULL, CLK_IS_ROOT, 150000000, },
- { HIX5HD2_FIXED_1728M, "1728m", NULL, CLK_IS_ROOT, 1728000000, },
- { HIX5HD2_FIXED_28P8M, "28p8m", NULL, CLK_IS_ROOT, 28000000, },
- { HIX5HD2_FIXED_432M, "432m", NULL, CLK_IS_ROOT, 432000000, },
- { HIX5HD2_FIXED_345P6M, "345p6m", NULL, CLK_IS_ROOT, 345000000, },
- { HIX5HD2_FIXED_288M, "288m", NULL, CLK_IS_ROOT, 288000000, },
- { HIX5HD2_FIXED_60M, "60m", NULL, CLK_IS_ROOT, 60000000, },
- { HIX5HD2_FIXED_750M, "750m", NULL, CLK_IS_ROOT, 750000000, },
- { HIX5HD2_FIXED_500M, "500m", NULL, CLK_IS_ROOT, 500000000, },
- { HIX5HD2_FIXED_54M, "54m", NULL, CLK_IS_ROOT, 54000000, },
- { HIX5HD2_FIXED_27M, "27m", NULL, CLK_IS_ROOT, 27000000, },
- { HIX5HD2_FIXED_1500M, "1500m", NULL, CLK_IS_ROOT, 1500000000, },
- { HIX5HD2_FIXED_375M, "375m", NULL, CLK_IS_ROOT, 375000000, },
- { HIX5HD2_FIXED_187M, "187m", NULL, CLK_IS_ROOT, 187000000, },
- { HIX5HD2_FIXED_250M, "250m", NULL, CLK_IS_ROOT, 250000000, },
- { HIX5HD2_FIXED_125M, "125m", NULL, CLK_IS_ROOT, 125000000, },
- { HIX5HD2_FIXED_2P02M, "2m", NULL, CLK_IS_ROOT, 2000000, },
- { HIX5HD2_FIXED_50M, "50m", NULL, CLK_IS_ROOT, 50000000, },
- { HIX5HD2_FIXED_25M, "25m", NULL, CLK_IS_ROOT, 25000000, },
- { HIX5HD2_FIXED_83M, "83m", NULL, CLK_IS_ROOT, 83333333, },
+ { HIX5HD2_FIXED_1200M, "1200m", NULL, 0, 1200000000, },
+ { HIX5HD2_FIXED_400M, "400m", NULL, 0, 400000000, },
+ { HIX5HD2_FIXED_48M, "48m", NULL, 0, 48000000, },
+ { HIX5HD2_FIXED_24M, "24m", NULL, 0, 24000000, },
+ { HIX5HD2_FIXED_600M, "600m", NULL, 0, 600000000, },
+ { HIX5HD2_FIXED_300M, "300m", NULL, 0, 300000000, },
+ { HIX5HD2_FIXED_75M, "75m", NULL, 0, 75000000, },
+ { HIX5HD2_FIXED_200M, "200m", NULL, 0, 200000000, },
+ { HIX5HD2_FIXED_100M, "100m", NULL, 0, 100000000, },
+ { HIX5HD2_FIXED_40M, "40m", NULL, 0, 40000000, },
+ { HIX5HD2_FIXED_150M, "150m", NULL, 0, 150000000, },
+ { HIX5HD2_FIXED_1728M, "1728m", NULL, 0, 1728000000, },
+ { HIX5HD2_FIXED_28P8M, "28p8m", NULL, 0, 28000000, },
+ { HIX5HD2_FIXED_432M, "432m", NULL, 0, 432000000, },
+ { HIX5HD2_FIXED_345P6M, "345p6m", NULL, 0, 345000000, },
+ { HIX5HD2_FIXED_288M, "288m", NULL, 0, 288000000, },
+ { HIX5HD2_FIXED_60M, "60m", NULL, 0, 60000000, },
+ { HIX5HD2_FIXED_750M, "750m", NULL, 0, 750000000, },
+ { HIX5HD2_FIXED_500M, "500m", NULL, 0, 500000000, },
+ { HIX5HD2_FIXED_54M, "54m", NULL, 0, 54000000, },
+ { HIX5HD2_FIXED_27M, "27m", NULL, 0, 27000000, },
+ { HIX5HD2_FIXED_1500M, "1500m", NULL, 0, 1500000000, },
+ { HIX5HD2_FIXED_375M, "375m", NULL, 0, 375000000, },
+ { HIX5HD2_FIXED_187M, "187m", NULL, 0, 187000000, },
+ { HIX5HD2_FIXED_250M, "250m", NULL, 0, 250000000, },
+ { HIX5HD2_FIXED_125M, "125m", NULL, 0, 125000000, },
+ { HIX5HD2_FIXED_2P02M, "2m", NULL, 0, 2000000, },
+ { HIX5HD2_FIXED_50M, "50m", NULL, 0, 50000000, },
+ { HIX5HD2_FIXED_25M, "25m", NULL, 0, 25000000, },
+ { HIX5HD2_FIXED_83M, "83m", NULL, 0, 83333333, },
};
static const char *const sfc_mux_p[] __initconst = {
diff --git a/drivers/clk/imx/clk-busy.c b/drivers/clk/imx/clk-busy.c
index 4bb1bc419b79..5cc99590f9a3 100644
--- a/drivers/clk/imx/clk-busy.c
+++ b/drivers/clk/imx/clk-busy.c
@@ -38,7 +38,7 @@ struct clk_busy_divider {
static inline struct clk_busy_divider *to_clk_busy_divider(struct clk_hw *hw)
{
- struct clk_divider *div = container_of(hw, struct clk_divider, hw);
+ struct clk_divider *div = to_clk_divider(hw);
return container_of(div, struct clk_busy_divider, div);
}
@@ -123,7 +123,7 @@ struct clk_busy_mux {
static inline struct clk_busy_mux *to_clk_busy_mux(struct clk_hw *hw)
{
- struct clk_mux *mux = container_of(hw, struct clk_mux, hw);
+ struct clk_mux *mux = to_clk_mux(hw);
return container_of(mux, struct clk_busy_mux, mux);
}
diff --git a/drivers/clk/imx/clk-fixup-div.c b/drivers/clk/imx/clk-fixup-div.c
index 21db020b1f2d..ce5722732715 100644
--- a/drivers/clk/imx/clk-fixup-div.c
+++ b/drivers/clk/imx/clk-fixup-div.c
@@ -15,7 +15,6 @@
#include <linux/slab.h>
#include "clk.h"
-#define to_clk_div(_hw) container_of(_hw, struct clk_divider, hw)
#define div_mask(d) ((1 << (d->width)) - 1)
/**
@@ -35,7 +34,7 @@ struct clk_fixup_div {
static inline struct clk_fixup_div *to_clk_fixup_div(struct clk_hw *hw)
{
- struct clk_divider *divider = to_clk_div(hw);
+ struct clk_divider *divider = to_clk_divider(hw);
return container_of(divider, struct clk_fixup_div, divider);
}
@@ -60,7 +59,7 @@ static int clk_fixup_div_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
struct clk_fixup_div *fixup_div = to_clk_fixup_div(hw);
- struct clk_divider *div = to_clk_div(hw);
+ struct clk_divider *div = to_clk_divider(hw);
unsigned int divider, value;
unsigned long flags = 0;
u32 val;
diff --git a/drivers/clk/imx/clk-fixup-mux.c b/drivers/clk/imx/clk-fixup-mux.c
index 0d40b35c557c..c9b327e0a8dd 100644
--- a/drivers/clk/imx/clk-fixup-mux.c
+++ b/drivers/clk/imx/clk-fixup-mux.c
@@ -15,8 +15,6 @@
#include <linux/slab.h>
#include "clk.h"
-#define to_clk_mux(_hw) container_of(_hw, struct clk_mux, hw)
-
/**
* struct clk_fixup_mux - imx integer fixup multiplexer clock
* @mux: the parent class
diff --git a/drivers/clk/imx/clk-gate-exclusive.c b/drivers/clk/imx/clk-gate-exclusive.c
index c12f5f2e04dc..3bd9dee618b2 100644
--- a/drivers/clk/imx/clk-gate-exclusive.c
+++ b/drivers/clk/imx/clk-gate-exclusive.c
@@ -31,7 +31,7 @@ struct clk_gate_exclusive {
static int clk_gate_exclusive_enable(struct clk_hw *hw)
{
- struct clk_gate *gate = container_of(hw, struct clk_gate, hw);
+ struct clk_gate *gate = to_clk_gate(hw);
struct clk_gate_exclusive *exgate = container_of(gate,
struct clk_gate_exclusive, gate);
u32 val = readl(gate->reg);
diff --git a/drivers/clk/imx/clk-imx6q.c b/drivers/clk/imx/clk-imx6q.c
index f0efc6feeec2..2beb396fe652 100644
--- a/drivers/clk/imx/clk-imx6q.c
+++ b/drivers/clk/imx/clk-imx6q.c
@@ -34,7 +34,9 @@ static const char *periph2_sels[] = { "periph2_pre", "periph2_clk2", };
static const char *axi_sels[] = { "periph", "pll2_pfd2_396m", "periph", "pll3_pfd1_540m", };
static const char *audio_sels[] = { "pll4_audio_div", "pll3_pfd2_508m", "pll3_pfd3_454m", "pll3_usb_otg", };
static const char *gpu_axi_sels[] = { "axi", "ahb", };
+static const char *pre_axi_sels[] = { "axi", "ahb", };
static const char *gpu2d_core_sels[] = { "axi", "pll3_usb_otg", "pll2_pfd0_352m", "pll2_pfd2_396m", };
+static const char *gpu2d_core_sels_2[] = { "mmdc_ch0_axi", "pll3_usb_otg", "pll2_pfd1_594m", "pll3_pfd0_720m",};
static const char *gpu3d_core_sels[] = { "mmdc_ch0_axi", "pll3_usb_otg", "pll2_pfd1_594m", "pll2_pfd2_396m", };
static const char *gpu3d_shader_sels[] = { "mmdc_ch0_axi", "pll3_usb_otg", "pll2_pfd1_594m", "pll3_pfd0_720m", };
static const char *ipu_sels[] = { "mmdc_ch0_axi", "pll2_pfd2_396m", "pll3_120m", "pll3_pfd1_540m", };
@@ -44,15 +46,24 @@ static const char *ipu1_di0_sels[] = { "ipu1_di0_pre", "dummy", "dummy", "ldb_di
static const char *ipu1_di1_sels[] = { "ipu1_di1_pre", "dummy", "dummy", "ldb_di0", "ldb_di1", };
static const char *ipu2_di0_sels[] = { "ipu2_di0_pre", "dummy", "dummy", "ldb_di0", "ldb_di1", };
static const char *ipu2_di1_sels[] = { "ipu2_di1_pre", "dummy", "dummy", "ldb_di0", "ldb_di1", };
+static const char *ipu1_di0_sels_2[] = { "ipu1_di0_pre", "dummy", "dummy", "ldb_di0_podf", "ldb_di1_podf", };
+static const char *ipu1_di1_sels_2[] = { "ipu1_di1_pre", "dummy", "dummy", "ldb_di0_podf", "ldb_di1_podf", };
+static const char *ipu2_di0_sels_2[] = { "ipu2_di0_pre", "dummy", "dummy", "ldb_di0_podf", "ldb_di1_podf", };
+static const char *ipu2_di1_sels_2[] = { "ipu2_di1_pre", "dummy", "dummy", "ldb_di0_podf", "ldb_di1_podf", };
static const char *hsi_tx_sels[] = { "pll3_120m", "pll2_pfd2_396m", };
static const char *pcie_axi_sels[] = { "axi", "ahb", };
static const char *ssi_sels[] = { "pll3_pfd2_508m", "pll3_pfd3_454m", "pll4_audio_div", };
static const char *usdhc_sels[] = { "pll2_pfd2_396m", "pll2_pfd0_352m", };
static const char *enfc_sels[] = { "pll2_pfd0_352m", "pll2_bus", "pll3_usb_otg", "pll2_pfd2_396m", };
+static const char *enfc_sels_2[] = {"pll2_pfd0_352m", "pll2_bus", "pll3_usb_otg", "pll2_pfd2_396m", "pll3_pfd3_454m", "dummy", };
static const char *eim_sels[] = { "pll2_pfd2_396m", "pll3_usb_otg", "axi", "pll2_pfd0_352m", };
static const char *eim_slow_sels[] = { "axi", "pll3_usb_otg", "pll2_pfd2_396m", "pll2_pfd0_352m", };
static const char *vdo_axi_sels[] = { "axi", "ahb", };
static const char *vpu_axi_sels[] = { "axi", "pll2_pfd2_396m", "pll2_pfd0_352m", };
+static const char *uart_sels[] = { "pll3_80m", "osc", };
+static const char *ipg_per_sels[] = { "ipg", "osc", };
+static const char *ecspi_sels[] = { "pll3_60m", "osc", };
+static const char *can_sels[] = { "pll3_60m", "osc", "pll3_80m", };
static const char *cko1_sels[] = { "pll3_usb_otg", "pll2_bus", "pll1_sys", "pll5_video_div",
"dummy", "axi", "enfc", "ipu1_di0", "ipu1_di1", "ipu2_di0",
"ipu2_di1", "ahb", "ipg", "ipg_per", "ckil", "pll4_audio_div", };
@@ -121,12 +132,19 @@ static unsigned int share_count_ssi2;
static unsigned int share_count_ssi3;
static unsigned int share_count_mipi_core_cfg;
static unsigned int share_count_spdif;
+static unsigned int share_count_prg0;
+static unsigned int share_count_prg1;
static inline int clk_on_imx6q(void)
{
return of_machine_is_compatible("fsl,imx6q");
}
+static inline int clk_on_imx6qp(void)
+{
+ return of_machine_is_compatible("fsl,imx6qp");
+}
+
static inline int clk_on_imx6dl(void)
{
return of_machine_is_compatible("fsl,imx6dl");
@@ -265,7 +283,7 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node)
clk[IMX6QDL_CLK_TWD] = imx_clk_fixed_factor("twd", "arm", 1, 2);
clk[IMX6QDL_CLK_GPT_3M] = imx_clk_fixed_factor("gpt_3m", "osc", 1, 8);
clk[IMX6QDL_CLK_VIDEO_27M] = imx_clk_fixed_factor("video_27m", "pll3_pfd1_540m", 1, 20);
- if (clk_on_imx6dl()) {
+ if (clk_on_imx6dl() || clk_on_imx6qp()) {
clk[IMX6QDL_CLK_GPU2D_AXI] = imx_clk_fixed_factor("gpu2d_axi", "mmdc_ch0_axi_podf", 1, 1);
clk[IMX6QDL_CLK_GPU3D_AXI] = imx_clk_fixed_factor("gpu3d_axi", "mmdc_ch0_axi_podf", 1, 1);
}
@@ -294,7 +312,15 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node)
clk[IMX6QDL_CLK_GPU2D_AXI] = imx_clk_mux("gpu2d_axi", base + 0x18, 0, 1, gpu_axi_sels, ARRAY_SIZE(gpu_axi_sels));
clk[IMX6QDL_CLK_GPU3D_AXI] = imx_clk_mux("gpu3d_axi", base + 0x18, 1, 1, gpu_axi_sels, ARRAY_SIZE(gpu_axi_sels));
}
- clk[IMX6QDL_CLK_GPU2D_CORE_SEL] = imx_clk_mux("gpu2d_core_sel", base + 0x18, 16, 2, gpu2d_core_sels, ARRAY_SIZE(gpu2d_core_sels));
+ if (clk_on_imx6qp()) {
+ clk[IMX6QDL_CLK_CAN_SEL] = imx_clk_mux("can_sel", base + 0x20, 8, 2, can_sels, ARRAY_SIZE(can_sels));
+ clk[IMX6QDL_CLK_ECSPI_SEL] = imx_clk_mux("ecspi_sel", base + 0x38, 18, 1, ecspi_sels, ARRAY_SIZE(ecspi_sels));
+ clk[IMX6QDL_CLK_IPG_PER_SEL] = imx_clk_mux("ipg_per_sel", base + 0x1c, 6, 1, ipg_per_sels, ARRAY_SIZE(ipg_per_sels));
+ clk[IMX6QDL_CLK_UART_SEL] = imx_clk_mux("uart_sel", base + 0x24, 6, 1, uart_sels, ARRAY_SIZE(uart_sels));
+ clk[IMX6QDL_CLK_GPU2D_CORE_SEL] = imx_clk_mux("gpu2d_core_sel", base + 0x18, 16, 2, gpu2d_core_sels_2, ARRAY_SIZE(gpu2d_core_sels_2));
+ } else {
+ clk[IMX6QDL_CLK_GPU2D_CORE_SEL] = imx_clk_mux("gpu2d_core_sel", base + 0x18, 16, 2, gpu2d_core_sels, ARRAY_SIZE(gpu2d_core_sels));
+ }
clk[IMX6QDL_CLK_GPU3D_CORE_SEL] = imx_clk_mux("gpu3d_core_sel", base + 0x18, 4, 2, gpu3d_core_sels, ARRAY_SIZE(gpu3d_core_sels));
clk[IMX6QDL_CLK_GPU3D_SHADER_SEL] = imx_clk_mux("gpu3d_shader_sel", base + 0x18, 8, 2, gpu3d_shader_sels, ARRAY_SIZE(gpu3d_shader_sels));
clk[IMX6QDL_CLK_IPU1_SEL] = imx_clk_mux("ipu1_sel", base + 0x3c, 9, 2, ipu_sels, ARRAY_SIZE(ipu_sels));
@@ -305,22 +331,40 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node)
clk[IMX6QDL_CLK_IPU1_DI1_PRE_SEL] = imx_clk_mux_flags("ipu1_di1_pre_sel", base + 0x34, 15, 3, ipu_di_pre_sels, ARRAY_SIZE(ipu_di_pre_sels), CLK_SET_RATE_PARENT);
clk[IMX6QDL_CLK_IPU2_DI0_PRE_SEL] = imx_clk_mux_flags("ipu2_di0_pre_sel", base + 0x38, 6, 3, ipu_di_pre_sels, ARRAY_SIZE(ipu_di_pre_sels), CLK_SET_RATE_PARENT);
clk[IMX6QDL_CLK_IPU2_DI1_PRE_SEL] = imx_clk_mux_flags("ipu2_di1_pre_sel", base + 0x38, 15, 3, ipu_di_pre_sels, ARRAY_SIZE(ipu_di_pre_sels), CLK_SET_RATE_PARENT);
- clk[IMX6QDL_CLK_IPU1_DI0_SEL] = imx_clk_mux_flags("ipu1_di0_sel", base + 0x34, 0, 3, ipu1_di0_sels, ARRAY_SIZE(ipu1_di0_sels), CLK_SET_RATE_PARENT);
- clk[IMX6QDL_CLK_IPU1_DI1_SEL] = imx_clk_mux_flags("ipu1_di1_sel", base + 0x34, 9, 3, ipu1_di1_sels, ARRAY_SIZE(ipu1_di1_sels), CLK_SET_RATE_PARENT);
- clk[IMX6QDL_CLK_IPU2_DI0_SEL] = imx_clk_mux_flags("ipu2_di0_sel", base + 0x38, 0, 3, ipu2_di0_sels, ARRAY_SIZE(ipu2_di0_sels), CLK_SET_RATE_PARENT);
- clk[IMX6QDL_CLK_IPU2_DI1_SEL] = imx_clk_mux_flags("ipu2_di1_sel", base + 0x38, 9, 3, ipu2_di1_sels, ARRAY_SIZE(ipu2_di1_sels), CLK_SET_RATE_PARENT);
clk[IMX6QDL_CLK_HSI_TX_SEL] = imx_clk_mux("hsi_tx_sel", base + 0x30, 28, 1, hsi_tx_sels, ARRAY_SIZE(hsi_tx_sels));
clk[IMX6QDL_CLK_PCIE_AXI_SEL] = imx_clk_mux("pcie_axi_sel", base + 0x18, 10, 1, pcie_axi_sels, ARRAY_SIZE(pcie_axi_sels));
- clk[IMX6QDL_CLK_SSI1_SEL] = imx_clk_fixup_mux("ssi1_sel", base + 0x1c, 10, 2, ssi_sels, ARRAY_SIZE(ssi_sels), imx_cscmr1_fixup);
- clk[IMX6QDL_CLK_SSI2_SEL] = imx_clk_fixup_mux("ssi2_sel", base + 0x1c, 12, 2, ssi_sels, ARRAY_SIZE(ssi_sels), imx_cscmr1_fixup);
- clk[IMX6QDL_CLK_SSI3_SEL] = imx_clk_fixup_mux("ssi3_sel", base + 0x1c, 14, 2, ssi_sels, ARRAY_SIZE(ssi_sels), imx_cscmr1_fixup);
- clk[IMX6QDL_CLK_USDHC1_SEL] = imx_clk_fixup_mux("usdhc1_sel", base + 0x1c, 16, 1, usdhc_sels, ARRAY_SIZE(usdhc_sels), imx_cscmr1_fixup);
- clk[IMX6QDL_CLK_USDHC2_SEL] = imx_clk_fixup_mux("usdhc2_sel", base + 0x1c, 17, 1, usdhc_sels, ARRAY_SIZE(usdhc_sels), imx_cscmr1_fixup);
- clk[IMX6QDL_CLK_USDHC3_SEL] = imx_clk_fixup_mux("usdhc3_sel", base + 0x1c, 18, 1, usdhc_sels, ARRAY_SIZE(usdhc_sels), imx_cscmr1_fixup);
- clk[IMX6QDL_CLK_USDHC4_SEL] = imx_clk_fixup_mux("usdhc4_sel", base + 0x1c, 19, 1, usdhc_sels, ARRAY_SIZE(usdhc_sels), imx_cscmr1_fixup);
- clk[IMX6QDL_CLK_ENFC_SEL] = imx_clk_mux("enfc_sel", base + 0x2c, 16, 2, enfc_sels, ARRAY_SIZE(enfc_sels));
- clk[IMX6QDL_CLK_EIM_SEL] = imx_clk_fixup_mux("eim_sel", base + 0x1c, 27, 2, eim_sels, ARRAY_SIZE(eim_sels), imx_cscmr1_fixup);
- clk[IMX6QDL_CLK_EIM_SLOW_SEL] = imx_clk_fixup_mux("eim_slow_sel", base + 0x1c, 29, 2, eim_slow_sels, ARRAY_SIZE(eim_slow_sels), imx_cscmr1_fixup);
+ if (clk_on_imx6qp()) {
+ clk[IMX6QDL_CLK_IPU1_DI0_SEL] = imx_clk_mux_flags("ipu1_di0_sel", base + 0x34, 0, 3, ipu1_di0_sels_2, ARRAY_SIZE(ipu1_di0_sels_2), CLK_SET_RATE_PARENT);
+ clk[IMX6QDL_CLK_IPU1_DI1_SEL] = imx_clk_mux_flags("ipu1_di1_sel", base + 0x34, 9, 3, ipu1_di1_sels_2, ARRAY_SIZE(ipu1_di1_sels_2), CLK_SET_RATE_PARENT);
+ clk[IMX6QDL_CLK_IPU2_DI0_SEL] = imx_clk_mux_flags("ipu2_di0_sel", base + 0x38, 0, 3, ipu2_di0_sels_2, ARRAY_SIZE(ipu2_di0_sels_2), CLK_SET_RATE_PARENT);
+ clk[IMX6QDL_CLK_IPU2_DI1_SEL] = imx_clk_mux_flags("ipu2_di1_sel", base + 0x38, 9, 3, ipu2_di1_sels_2, ARRAY_SIZE(ipu2_di1_sels_2), CLK_SET_RATE_PARENT);
+ clk[IMX6QDL_CLK_SSI1_SEL] = imx_clk_mux("ssi1_sel", base + 0x1c, 10, 2, ssi_sels, ARRAY_SIZE(ssi_sels));
+ clk[IMX6QDL_CLK_SSI2_SEL] = imx_clk_mux("ssi2_sel", base + 0x1c, 12, 2, ssi_sels, ARRAY_SIZE(ssi_sels));
+ clk[IMX6QDL_CLK_SSI3_SEL] = imx_clk_mux("ssi3_sel", base + 0x1c, 14, 2, ssi_sels, ARRAY_SIZE(ssi_sels));
+ clk[IMX6QDL_CLK_USDHC1_SEL] = imx_clk_mux("usdhc1_sel", base + 0x1c, 16, 1, usdhc_sels, ARRAY_SIZE(usdhc_sels));
+ clk[IMX6QDL_CLK_USDHC2_SEL] = imx_clk_mux("usdhc2_sel", base + 0x1c, 17, 1, usdhc_sels, ARRAY_SIZE(usdhc_sels));
+ clk[IMX6QDL_CLK_USDHC3_SEL] = imx_clk_mux("usdhc3_sel", base + 0x1c, 18, 1, usdhc_sels, ARRAY_SIZE(usdhc_sels));
+ clk[IMX6QDL_CLK_USDHC4_SEL] = imx_clk_mux("usdhc4_sel", base + 0x1c, 19, 1, usdhc_sels, ARRAY_SIZE(usdhc_sels));
+ clk[IMX6QDL_CLK_ENFC_SEL] = imx_clk_mux("enfc_sel", base + 0x2c, 15, 3, enfc_sels_2, ARRAY_SIZE(enfc_sels_2));
+ clk[IMX6QDL_CLK_EIM_SEL] = imx_clk_mux("eim_sel", base + 0x1c, 27, 2, eim_sels, ARRAY_SIZE(eim_sels));
+ clk[IMX6QDL_CLK_EIM_SLOW_SEL] = imx_clk_mux("eim_slow_sel", base + 0x1c, 29, 2, eim_slow_sels, ARRAY_SIZE(eim_slow_sels));
+ clk[IMX6QDL_CLK_PRE_AXI] = imx_clk_mux("pre_axi", base + 0x18, 1, 1, pre_axi_sels, ARRAY_SIZE(pre_axi_sels));
+ } else {
+ clk[IMX6QDL_CLK_IPU1_DI0_SEL] = imx_clk_mux_flags("ipu1_di0_sel", base + 0x34, 0, 3, ipu1_di0_sels, ARRAY_SIZE(ipu1_di0_sels), CLK_SET_RATE_PARENT);
+ clk[IMX6QDL_CLK_IPU1_DI1_SEL] = imx_clk_mux_flags("ipu1_di1_sel", base + 0x34, 9, 3, ipu1_di1_sels, ARRAY_SIZE(ipu1_di1_sels), CLK_SET_RATE_PARENT);
+ clk[IMX6QDL_CLK_IPU2_DI0_SEL] = imx_clk_mux_flags("ipu2_di0_sel", base + 0x38, 0, 3, ipu2_di0_sels, ARRAY_SIZE(ipu2_di0_sels), CLK_SET_RATE_PARENT);
+ clk[IMX6QDL_CLK_IPU2_DI1_SEL] = imx_clk_mux_flags("ipu2_di1_sel", base + 0x38, 9, 3, ipu2_di1_sels, ARRAY_SIZE(ipu2_di1_sels), CLK_SET_RATE_PARENT);
+ clk[IMX6QDL_CLK_SSI1_SEL] = imx_clk_fixup_mux("ssi1_sel", base + 0x1c, 10, 2, ssi_sels, ARRAY_SIZE(ssi_sels), imx_cscmr1_fixup);
+ clk[IMX6QDL_CLK_SSI2_SEL] = imx_clk_fixup_mux("ssi2_sel", base + 0x1c, 12, 2, ssi_sels, ARRAY_SIZE(ssi_sels), imx_cscmr1_fixup);
+ clk[IMX6QDL_CLK_SSI3_SEL] = imx_clk_fixup_mux("ssi3_sel", base + 0x1c, 14, 2, ssi_sels, ARRAY_SIZE(ssi_sels), imx_cscmr1_fixup);
+ clk[IMX6QDL_CLK_USDHC1_SEL] = imx_clk_fixup_mux("usdhc1_sel", base + 0x1c, 16, 1, usdhc_sels, ARRAY_SIZE(usdhc_sels), imx_cscmr1_fixup);
+ clk[IMX6QDL_CLK_USDHC2_SEL] = imx_clk_fixup_mux("usdhc2_sel", base + 0x1c, 17, 1, usdhc_sels, ARRAY_SIZE(usdhc_sels), imx_cscmr1_fixup);
+ clk[IMX6QDL_CLK_USDHC3_SEL] = imx_clk_fixup_mux("usdhc3_sel", base + 0x1c, 18, 1, usdhc_sels, ARRAY_SIZE(usdhc_sels), imx_cscmr1_fixup);
+ clk[IMX6QDL_CLK_USDHC4_SEL] = imx_clk_fixup_mux("usdhc4_sel", base + 0x1c, 19, 1, usdhc_sels, ARRAY_SIZE(usdhc_sels), imx_cscmr1_fixup);
+ clk[IMX6QDL_CLK_ENFC_SEL] = imx_clk_mux("enfc_sel", base + 0x2c, 16, 2, enfc_sels, ARRAY_SIZE(enfc_sels));
+ clk[IMX6QDL_CLK_EIM_SEL] = imx_clk_fixup_mux("eim_sel", base + 0x1c, 27, 2, eim_sels, ARRAY_SIZE(eim_sels), imx_cscmr1_fixup);
+ clk[IMX6QDL_CLK_EIM_SLOW_SEL] = imx_clk_fixup_mux("eim_slow_sel", base + 0x1c, 29, 2, eim_slow_sels, ARRAY_SIZE(eim_slow_sels), imx_cscmr1_fixup);
+ }
clk[IMX6QDL_CLK_VDO_AXI_SEL] = imx_clk_mux("vdo_axi_sel", base + 0x18, 11, 1, vdo_axi_sels, ARRAY_SIZE(vdo_axi_sels));
clk[IMX6QDL_CLK_VPU_AXI_SEL] = imx_clk_mux("vpu_axi_sel", base + 0x18, 14, 2, vpu_axi_sels, ARRAY_SIZE(vpu_axi_sels));
clk[IMX6QDL_CLK_CKO1_SEL] = imx_clk_mux("cko1_sel", base + 0x60, 0, 4, cko1_sels, ARRAY_SIZE(cko1_sels));
@@ -335,23 +379,33 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node)
clk[IMX6QDL_CLK_PERIPH_CLK2] = imx_clk_divider("periph_clk2", "periph_clk2_sel", base + 0x14, 27, 3);
clk[IMX6QDL_CLK_PERIPH2_CLK2] = imx_clk_divider("periph2_clk2", "periph2_clk2_sel", base + 0x14, 0, 3);
clk[IMX6QDL_CLK_IPG] = imx_clk_divider("ipg", "ahb", base + 0x14, 8, 2);
- clk[IMX6QDL_CLK_IPG_PER] = imx_clk_fixup_divider("ipg_per", "ipg", base + 0x1c, 0, 6, imx_cscmr1_fixup);
clk[IMX6QDL_CLK_ESAI_PRED] = imx_clk_divider("esai_pred", "esai_sel", base + 0x28, 9, 3);
clk[IMX6QDL_CLK_ESAI_PODF] = imx_clk_divider("esai_podf", "esai_pred", base + 0x28, 25, 3);
clk[IMX6QDL_CLK_ASRC_PRED] = imx_clk_divider("asrc_pred", "asrc_sel", base + 0x30, 12, 3);
clk[IMX6QDL_CLK_ASRC_PODF] = imx_clk_divider("asrc_podf", "asrc_pred", base + 0x30, 9, 3);
clk[IMX6QDL_CLK_SPDIF_PRED] = imx_clk_divider("spdif_pred", "spdif_sel", base + 0x30, 25, 3);
clk[IMX6QDL_CLK_SPDIF_PODF] = imx_clk_divider("spdif_podf", "spdif_pred", base + 0x30, 22, 3);
- clk[IMX6QDL_CLK_CAN_ROOT] = imx_clk_divider("can_root", "pll3_60m", base + 0x20, 2, 6);
- clk[IMX6QDL_CLK_ECSPI_ROOT] = imx_clk_divider("ecspi_root", "pll3_60m", base + 0x38, 19, 6);
+ if (clk_on_imx6qp()) {
+ clk[IMX6QDL_CLK_IPG_PER] = imx_clk_divider("ipg_per", "ipg_per_sel", base + 0x1c, 0, 6);
+ clk[IMX6QDL_CLK_ECSPI_ROOT] = imx_clk_divider("ecspi_root", "ecspi_sel", base + 0x38, 19, 6);
+ clk[IMX6QDL_CLK_CAN_ROOT] = imx_clk_divider("can_root", "can_sel", base + 0x20, 2, 6);
+ clk[IMX6QDL_CLK_UART_SERIAL_PODF] = imx_clk_divider("uart_serial_podf", "uart_sel", base + 0x24, 0, 6);
+ clk[IMX6QDL_CLK_LDB_DI0_DIV_3_5] = imx_clk_fixed_factor("ldb_di0_div_3_5", "ldb_di0", 2, 7);
+ clk[IMX6QDL_CLK_LDB_DI1_DIV_3_5] = imx_clk_fixed_factor("ldb_di1_div_3_5", "ldb_di1", 2, 7);
+ } else {
+ clk[IMX6QDL_CLK_ECSPI_ROOT] = imx_clk_divider("ecspi_root", "pll3_60m", base + 0x38, 19, 6);
+ clk[IMX6QDL_CLK_CAN_ROOT] = imx_clk_divider("can_root", "pll3_60m", base + 0x20, 2, 6);
+ clk[IMX6QDL_CLK_IPG_PER] = imx_clk_fixup_divider("ipg_per", "ipg", base + 0x1c, 0, 6, imx_cscmr1_fixup);
+ clk[IMX6QDL_CLK_UART_SERIAL_PODF] = imx_clk_divider("uart_serial_podf", "pll3_80m", base + 0x24, 0, 6);
+ clk[IMX6QDL_CLK_LDB_DI0_DIV_3_5] = imx_clk_fixed_factor("ldb_di0_div_3_5", "ldb_di0_sel", 2, 7);
+ clk[IMX6QDL_CLK_LDB_DI1_DIV_3_5] = imx_clk_fixed_factor("ldb_di1_div_3_5", "ldb_di1_sel", 2, 7);
+ }
clk[IMX6QDL_CLK_GPU2D_CORE_PODF] = imx_clk_divider("gpu2d_core_podf", "gpu2d_core_sel", base + 0x18, 23, 3);
clk[IMX6QDL_CLK_GPU3D_CORE_PODF] = imx_clk_divider("gpu3d_core_podf", "gpu3d_core_sel", base + 0x18, 26, 3);
clk[IMX6QDL_CLK_GPU3D_SHADER] = imx_clk_divider("gpu3d_shader", "gpu3d_shader_sel", base + 0x18, 29, 3);
clk[IMX6QDL_CLK_IPU1_PODF] = imx_clk_divider("ipu1_podf", "ipu1_sel", base + 0x3c, 11, 3);
clk[IMX6QDL_CLK_IPU2_PODF] = imx_clk_divider("ipu2_podf", "ipu2_sel", base + 0x3c, 16, 3);
- clk[IMX6QDL_CLK_LDB_DI0_DIV_3_5] = imx_clk_fixed_factor("ldb_di0_div_3_5", "ldb_di0_sel", 2, 7);
clk[IMX6QDL_CLK_LDB_DI0_PODF] = imx_clk_divider_flags("ldb_di0_podf", "ldb_di0_div_3_5", base + 0x20, 10, 1, 0);
- clk[IMX6QDL_CLK_LDB_DI1_DIV_3_5] = imx_clk_fixed_factor("ldb_di1_div_3_5", "ldb_di1_sel", 2, 7);
clk[IMX6QDL_CLK_LDB_DI1_PODF] = imx_clk_divider_flags("ldb_di1_podf", "ldb_di1_div_3_5", base + 0x20, 11, 1, 0);
clk[IMX6QDL_CLK_IPU1_DI0_PRE] = imx_clk_divider("ipu1_di0_pre", "ipu1_di0_pre_sel", base + 0x34, 3, 3);
clk[IMX6QDL_CLK_IPU1_DI1_PRE] = imx_clk_divider("ipu1_di1_pre", "ipu1_di1_pre_sel", base + 0x34, 12, 3);
@@ -364,15 +418,19 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node)
clk[IMX6QDL_CLK_SSI2_PODF] = imx_clk_divider("ssi2_podf", "ssi2_pred", base + 0x2c, 0, 6);
clk[IMX6QDL_CLK_SSI3_PRED] = imx_clk_divider("ssi3_pred", "ssi3_sel", base + 0x28, 22, 3);
clk[IMX6QDL_CLK_SSI3_PODF] = imx_clk_divider("ssi3_podf", "ssi3_pred", base + 0x28, 16, 6);
- clk[IMX6QDL_CLK_UART_SERIAL_PODF] = imx_clk_divider("uart_serial_podf", "pll3_80m", base + 0x24, 0, 6);
clk[IMX6QDL_CLK_USDHC1_PODF] = imx_clk_divider("usdhc1_podf", "usdhc1_sel", base + 0x24, 11, 3);
clk[IMX6QDL_CLK_USDHC2_PODF] = imx_clk_divider("usdhc2_podf", "usdhc2_sel", base + 0x24, 16, 3);
clk[IMX6QDL_CLK_USDHC3_PODF] = imx_clk_divider("usdhc3_podf", "usdhc3_sel", base + 0x24, 19, 3);
clk[IMX6QDL_CLK_USDHC4_PODF] = imx_clk_divider("usdhc4_podf", "usdhc4_sel", base + 0x24, 22, 3);
clk[IMX6QDL_CLK_ENFC_PRED] = imx_clk_divider("enfc_pred", "enfc_sel", base + 0x2c, 18, 3);
clk[IMX6QDL_CLK_ENFC_PODF] = imx_clk_divider("enfc_podf", "enfc_pred", base + 0x2c, 21, 6);
- clk[IMX6QDL_CLK_EIM_PODF] = imx_clk_fixup_divider("eim_podf", "eim_sel", base + 0x1c, 20, 3, imx_cscmr1_fixup);
- clk[IMX6QDL_CLK_EIM_SLOW_PODF] = imx_clk_fixup_divider("eim_slow_podf", "eim_slow_sel", base + 0x1c, 23, 3, imx_cscmr1_fixup);
+ if (clk_on_imx6qp()) {
+ clk[IMX6QDL_CLK_EIM_PODF] = imx_clk_divider("eim_podf", "eim_sel", base + 0x1c, 20, 3);
+ clk[IMX6QDL_CLK_EIM_SLOW_PODF] = imx_clk_divider("eim_slow_podf", "eim_slow_sel", base + 0x1c, 23, 3);
+ } else {
+ clk[IMX6QDL_CLK_EIM_PODF] = imx_clk_fixup_divider("eim_podf", "eim_sel", base + 0x1c, 20, 3, imx_cscmr1_fixup);
+ clk[IMX6QDL_CLK_EIM_SLOW_PODF] = imx_clk_fixup_divider("eim_slow_podf", "eim_slow_sel", base + 0x1c, 23, 3, imx_cscmr1_fixup);
+ }
clk[IMX6QDL_CLK_VPU_AXI_PODF] = imx_clk_divider("vpu_axi_podf", "vpu_axi_sel", base + 0x24, 25, 3);
clk[IMX6QDL_CLK_CKO1_PODF] = imx_clk_divider("cko1_podf", "cko1_sel", base + 0x60, 4, 3);
clk[IMX6QDL_CLK_CKO2_PODF] = imx_clk_divider("cko2_podf", "cko2_sel", base + 0x60, 21, 3);
@@ -380,7 +438,12 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node)
/* name parent_name reg shift width busy: reg, shift */
clk[IMX6QDL_CLK_AXI] = imx_clk_busy_divider("axi", "axi_sel", base + 0x14, 16, 3, base + 0x48, 0);
clk[IMX6QDL_CLK_MMDC_CH0_AXI_PODF] = imx_clk_busy_divider("mmdc_ch0_axi_podf", "periph", base + 0x14, 19, 3, base + 0x48, 4);
- clk[IMX6QDL_CLK_MMDC_CH1_AXI_PODF] = imx_clk_busy_divider("mmdc_ch1_axi_podf", "periph2", base + 0x14, 3, 3, base + 0x48, 2);
+ if (clk_on_imx6qp()) {
+ clk[IMX6QDL_CLK_MMDC_CH1_AXI_CG] = imx_clk_gate("mmdc_ch1_axi_cg", "periph2", base + 0x4, 18);
+ clk[IMX6QDL_CLK_MMDC_CH1_AXI_PODF] = imx_clk_busy_divider("mmdc_ch1_axi_podf", "mmdc_ch1_axi_cg", base + 0x14, 3, 3, base + 0x48, 2);
+ } else {
+ clk[IMX6QDL_CLK_MMDC_CH1_AXI_PODF] = imx_clk_busy_divider("mmdc_ch1_axi_podf", "periph2", base + 0x14, 3, 3, base + 0x48, 2);
+ }
clk[IMX6QDL_CLK_ARM] = imx_clk_busy_divider("arm", "pll1_sw", base + 0x10, 0, 3, base + 0x48, 16);
clk[IMX6QDL_CLK_AHB] = imx_clk_busy_divider("ahb", "periph", base + 0x14, 10, 3, base + 0x48, 1);
@@ -432,8 +495,13 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node)
clk[IMX6QDL_CLK_IPU1_DI1] = imx_clk_gate2("ipu1_di1", "ipu1_di1_sel", base + 0x74, 4);
clk[IMX6QDL_CLK_IPU2] = imx_clk_gate2("ipu2", "ipu2_podf", base + 0x74, 6);
clk[IMX6QDL_CLK_IPU2_DI0] = imx_clk_gate2("ipu2_di0", "ipu2_di0_sel", base + 0x74, 8);
- clk[IMX6QDL_CLK_LDB_DI0] = imx_clk_gate2("ldb_di0", "ldb_di0_podf", base + 0x74, 12);
- clk[IMX6QDL_CLK_LDB_DI1] = imx_clk_gate2("ldb_di1", "ldb_di1_podf", base + 0x74, 14);
+ if (clk_on_imx6qp()) {
+ clk[IMX6QDL_CLK_LDB_DI0] = imx_clk_gate2("ldb_di0", "ldb_di0_sel", base + 0x74, 12);
+ clk[IMX6QDL_CLK_LDB_DI1] = imx_clk_gate2("ldb_di1", "ldb_di1_sel", base + 0x74, 14);
+ } else {
+ clk[IMX6QDL_CLK_LDB_DI0] = imx_clk_gate2("ldb_di0", "ldb_di0_podf", base + 0x74, 12);
+ clk[IMX6QDL_CLK_LDB_DI1] = imx_clk_gate2("ldb_di1", "ldb_di1_podf", base + 0x74, 14);
+ }
clk[IMX6QDL_CLK_IPU2_DI1] = imx_clk_gate2("ipu2_di1", "ipu2_di1_sel", base + 0x74, 10);
clk[IMX6QDL_CLK_HSI_TX] = imx_clk_gate2_shared("hsi_tx", "hsi_tx_podf", base + 0x74, 16, &share_count_mipi_core_cfg);
clk[IMX6QDL_CLK_MIPI_CORE_CFG] = imx_clk_gate2_shared("mipi_core_cfg", "video_27m", base + 0x74, 16, &share_count_mipi_core_cfg);
@@ -482,6 +550,16 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node)
clk[IMX6QDL_CLK_EIM_SLOW] = imx_clk_gate2("eim_slow", "eim_slow_podf", base + 0x80, 10);
clk[IMX6QDL_CLK_VDO_AXI] = imx_clk_gate2("vdo_axi", "vdo_axi_sel", base + 0x80, 12);
clk[IMX6QDL_CLK_VPU_AXI] = imx_clk_gate2("vpu_axi", "vpu_axi_podf", base + 0x80, 14);
+ if (clk_on_imx6qp()) {
+ clk[IMX6QDL_CLK_PRE0] = imx_clk_gate2("pre0", "pre_axi", base + 0x80, 16);
+ clk[IMX6QDL_CLK_PRE1] = imx_clk_gate2("pre1", "pre_axi", base + 0x80, 18);
+ clk[IMX6QDL_CLK_PRE2] = imx_clk_gate2("pre2", "pre_axi", base + 0x80, 20);
+ clk[IMX6QDL_CLK_PRE3] = imx_clk_gate2("pre3", "pre_axi", base + 0x80, 22);
+ clk[IMX6QDL_CLK_PRG0_AXI] = imx_clk_gate2_shared("prg0_axi", "ipu1_podf", base + 0x80, 24, &share_count_prg0);
+ clk[IMX6QDL_CLK_PRG1_AXI] = imx_clk_gate2_shared("prg1_axi", "ipu2_podf", base + 0x80, 26, &share_count_prg1);
+ clk[IMX6QDL_CLK_PRG0_APB] = imx_clk_gate2_shared("prg0_apb", "ipg", base + 0x80, 24, &share_count_prg0);
+ clk[IMX6QDL_CLK_PRG1_APB] = imx_clk_gate2_shared("prg1_apb", "ipg", base + 0x80, 26, &share_count_prg1);
+ }
clk[IMX6QDL_CLK_CKO1] = imx_clk_gate("cko1", "cko1_podf", base + 0x60, 7);
clk[IMX6QDL_CLK_CKO2] = imx_clk_gate("cko2", "cko2_podf", base + 0x60, 24);
diff --git a/drivers/clk/imx/clk-imx6ul.c b/drivers/clk/imx/clk-imx6ul.c
index 08692d74b884..0f1f17a8f3ed 100644
--- a/drivers/clk/imx/clk-imx6ul.c
+++ b/drivers/clk/imx/clk-imx6ul.c
@@ -157,9 +157,9 @@ static void __init imx6ul_clocks_init(struct device_node *ccm_node)
clk_set_parent(clks[IMX6UL_PLL7_BYPASS], clks[IMX6UL_CLK_PLL7]);
clks[IMX6UL_CLK_PLL1_SYS] = imx_clk_fixed_factor("pll1_sys", "pll1_bypass", 1, 1);
- clks[IMX6UL_CLK_PLL2_BUS] = imx_clk_gate("pll2_bus", "pll2_bypass", base + 0x30, 13);
- clks[IMX6UL_CLK_PLL3_USB_OTG] = imx_clk_gate("pll3_usb_otg", "pll3_bypass", base + 0x10, 13);
- clks[IMX6UL_CLK_PLL4_AUDIO] = imx_clk_gate("pll4_audio", "pll4_bypass", base + 0x70, 13);
+ clks[IMX6UL_CLK_PLL2_BUS] = imx_clk_gate("pll2_bus", "pll2_bypass", base + 0x30, 13);
+ clks[IMX6UL_CLK_PLL3_USB_OTG] = imx_clk_gate("pll3_usb_otg", "pll3_bypass", base + 0x10, 13);
+ clks[IMX6UL_CLK_PLL4_AUDIO] = imx_clk_gate("pll4_audio", "pll4_bypass", base + 0x70, 13);
clks[IMX6UL_CLK_PLL5_VIDEO] = imx_clk_gate("pll5_video", "pll5_bypass", base + 0xa0, 13);
clks[IMX6UL_CLK_PLL6_ENET] = imx_clk_gate("pll6_enet", "pll6_bypass", base + 0xe0, 13);
clks[IMX6UL_CLK_PLL7_USB_HOST] = imx_clk_gate("pll7_usb_host", "pll7_bypass", base + 0x20, 13);
@@ -196,8 +196,8 @@ static void __init imx6ul_clocks_init(struct device_node *ccm_node)
base + 0xe0, 2, 2, 0, clk_enet_ref_table, &imx_ccm_lock);
clks[IMX6UL_CLK_ENET2_REF_125M] = imx_clk_gate("enet_ref_125m", "enet2_ref", base + 0xe0, 20);
- clks[IMX6UL_CLK_ENET_PTP_REF] = imx_clk_fixed_factor("enet_ptp_ref", "pll6_enet", 1, 20);
- clks[IMX6UL_CLK_ENET_PTP] = imx_clk_gate("enet_ptp", "enet_ptp_ref", base + 0xe0, 21);
+ clks[IMX6UL_CLK_ENET_PTP_REF] = imx_clk_fixed_factor("enet_ptp_ref", "pll6_enet", 1, 20);
+ clks[IMX6UL_CLK_ENET_PTP] = imx_clk_gate("enet_ptp", "enet_ptp_ref", base + 0xe0, 21);
clks[IMX6UL_CLK_PLL4_POST_DIV] = clk_register_divider_table(NULL, "pll4_post_div", "pll4_audio",
CLK_SET_RATE_PARENT | CLK_SET_RATE_GATE, base + 0x70, 19, 2, 0, post_div_table, &imx_ccm_lock);
@@ -210,8 +210,8 @@ static void __init imx6ul_clocks_init(struct device_node *ccm_node)
/* name parent_name mult div */
clks[IMX6UL_CLK_PLL2_198M] = imx_clk_fixed_factor("pll2_198m", "pll2_pfd2_396m", 1, 2);
- clks[IMX6UL_CLK_PLL3_80M] = imx_clk_fixed_factor("pll3_80m", "pll3_usb_otg", 1, 6);
- clks[IMX6UL_CLK_PLL3_60M] = imx_clk_fixed_factor("pll3_60m", "pll3_usb_otg", 1, 8);
+ clks[IMX6UL_CLK_PLL3_80M] = imx_clk_fixed_factor("pll3_80m", "pll3_usb_otg", 1, 6);
+ clks[IMX6UL_CLK_PLL3_60M] = imx_clk_fixed_factor("pll3_60m", "pll3_usb_otg", 1, 8);
clks[IMX6UL_CLK_GPT_3M] = imx_clk_fixed_factor("gpt_3m", "osc", 1, 8);
np = ccm_node;
@@ -219,34 +219,34 @@ static void __init imx6ul_clocks_init(struct device_node *ccm_node)
WARN_ON(!base);
clks[IMX6UL_CA7_SECONDARY_SEL] = imx_clk_mux("ca7_secondary_sel", base + 0xc, 3, 1, ca7_secondary_sels, ARRAY_SIZE(ca7_secondary_sels));
- clks[IMX6UL_CLK_STEP] = imx_clk_mux("step", base + 0x0c, 8, 1, step_sels, ARRAY_SIZE(step_sels));
- clks[IMX6UL_CLK_PLL1_SW] = imx_clk_mux_flags("pll1_sw", base + 0x0c, 2, 1, pll1_sw_sels, ARRAY_SIZE(pll1_sw_sels), 0);
+ clks[IMX6UL_CLK_STEP] = imx_clk_mux("step", base + 0x0c, 8, 1, step_sels, ARRAY_SIZE(step_sels));
+ clks[IMX6UL_CLK_PLL1_SW] = imx_clk_mux_flags("pll1_sw", base + 0x0c, 2, 1, pll1_sw_sels, ARRAY_SIZE(pll1_sw_sels), 0);
clks[IMX6UL_CLK_AXI_ALT_SEL] = imx_clk_mux("axi_alt_sel", base + 0x14, 7, 1, axi_alt_sels, ARRAY_SIZE(axi_alt_sels));
- clks[IMX6UL_CLK_AXI_SEL] = imx_clk_mux_flags("axi_sel", base + 0x14, 6, 1, axi_sels, ARRAY_SIZE(axi_sels), 0);
- clks[IMX6UL_CLK_PERIPH_PRE] = imx_clk_mux("periph_pre", base + 0x18, 18, 2, periph_pre_sels, ARRAY_SIZE(periph_pre_sels));
- clks[IMX6UL_CLK_PERIPH2_PRE] = imx_clk_mux("periph2_pre", base + 0x18, 21, 2, periph2_pre_sels, ARRAY_SIZE(periph2_pre_sels));
+ clks[IMX6UL_CLK_AXI_SEL] = imx_clk_mux_flags("axi_sel", base + 0x14, 6, 1, axi_sels, ARRAY_SIZE(axi_sels), 0);
+ clks[IMX6UL_CLK_PERIPH_PRE] = imx_clk_mux("periph_pre", base + 0x18, 18, 2, periph_pre_sels, ARRAY_SIZE(periph_pre_sels));
+ clks[IMX6UL_CLK_PERIPH2_PRE] = imx_clk_mux("periph2_pre", base + 0x18, 21, 2, periph2_pre_sels, ARRAY_SIZE(periph2_pre_sels));
clks[IMX6UL_CLK_PERIPH_CLK2_SEL] = imx_clk_mux("periph_clk2_sel", base + 0x18, 12, 2, periph_clk2_sels, ARRAY_SIZE(periph_clk2_sels));
clks[IMX6UL_CLK_PERIPH2_CLK2_SEL] = imx_clk_mux("periph2_clk2_sel", base + 0x18, 20, 1, periph2_clk2_sels, ARRAY_SIZE(periph2_clk2_sels));
- clks[IMX6UL_CLK_EIM_SLOW_SEL] = imx_clk_mux("eim_slow_sel", base + 0x1c, 29, 2, eim_slow_sels, ARRAY_SIZE(eim_slow_sels));
+ clks[IMX6UL_CLK_EIM_SLOW_SEL] = imx_clk_mux("eim_slow_sel", base + 0x1c, 29, 2, eim_slow_sels, ARRAY_SIZE(eim_slow_sels));
clks[IMX6UL_CLK_GPMI_SEL] = imx_clk_mux("gpmi_sel", base + 0x1c, 19, 1, gpmi_sels, ARRAY_SIZE(gpmi_sels));
- clks[IMX6UL_CLK_BCH_SEL] = imx_clk_mux("bch_sel", base + 0x1c, 18, 1, bch_sels, ARRAY_SIZE(bch_sels));
+ clks[IMX6UL_CLK_BCH_SEL] = imx_clk_mux("bch_sel", base + 0x1c, 18, 1, bch_sels, ARRAY_SIZE(bch_sels));
clks[IMX6UL_CLK_USDHC2_SEL] = imx_clk_mux("usdhc2_sel", base + 0x1c, 17, 1, usdhc_sels, ARRAY_SIZE(usdhc_sels));
clks[IMX6UL_CLK_USDHC1_SEL] = imx_clk_mux("usdhc1_sel", base + 0x1c, 16, 1, usdhc_sels, ARRAY_SIZE(usdhc_sels));
- clks[IMX6UL_CLK_SAI3_SEL] = imx_clk_mux("sai3_sel", base + 0x1c, 14, 2, sai_sels, ARRAY_SIZE(sai_sels));
+ clks[IMX6UL_CLK_SAI3_SEL] = imx_clk_mux("sai3_sel", base + 0x1c, 14, 2, sai_sels, ARRAY_SIZE(sai_sels));
clks[IMX6UL_CLK_SAI2_SEL] = imx_clk_mux("sai2_sel", base + 0x1c, 12, 2, sai_sels, ARRAY_SIZE(sai_sels));
- clks[IMX6UL_CLK_SAI1_SEL] = imx_clk_mux("sai1_sel", base + 0x1c, 10, 2, sai_sels, ARRAY_SIZE(sai_sels));
- clks[IMX6UL_CLK_QSPI1_SEL] = imx_clk_mux("qspi1_sel", base + 0x1c, 7, 3, qspi1_sels, ARRAY_SIZE(qspi1_sels));
- clks[IMX6UL_CLK_PERCLK_SEL] = imx_clk_mux("perclk_sel", base + 0x1c, 6, 1, perclk_sels, ARRAY_SIZE(perclk_sels));
- clks[IMX6UL_CLK_CAN_SEL] = imx_clk_mux("can_sel", base + 0x20, 8, 2, can_sels, ARRAY_SIZE(can_sels));
+ clks[IMX6UL_CLK_SAI1_SEL] = imx_clk_mux("sai1_sel", base + 0x1c, 10, 2, sai_sels, ARRAY_SIZE(sai_sels));
+ clks[IMX6UL_CLK_QSPI1_SEL] = imx_clk_mux("qspi1_sel", base + 0x1c, 7, 3, qspi1_sels, ARRAY_SIZE(qspi1_sels));
+ clks[IMX6UL_CLK_PERCLK_SEL] = imx_clk_mux("perclk_sel", base + 0x1c, 6, 1, perclk_sels, ARRAY_SIZE(perclk_sels));
+ clks[IMX6UL_CLK_CAN_SEL] = imx_clk_mux("can_sel", base + 0x20, 8, 2, can_sels, ARRAY_SIZE(can_sels));
clks[IMX6UL_CLK_UART_SEL] = imx_clk_mux("uart_sel", base + 0x24, 6, 1, uart_sels, ARRAY_SIZE(uart_sels));
clks[IMX6UL_CLK_ENFC_SEL] = imx_clk_mux("enfc_sel", base + 0x2c, 15, 3, enfc_sels, ARRAY_SIZE(enfc_sels));
clks[IMX6UL_CLK_LDB_DI0_SEL] = imx_clk_mux("ldb_di0_sel", base + 0x2c, 9, 3, ldb_di0_sels, ARRAY_SIZE(ldb_di0_sels));
clks[IMX6UL_CLK_SPDIF_SEL] = imx_clk_mux("spdif_sel", base + 0x30, 20, 2, spdif_sels, ARRAY_SIZE(spdif_sels));
- clks[IMX6UL_CLK_SIM_PRE_SEL] = imx_clk_mux("sim_pre_sel", base + 0x34, 15, 3, sim_pre_sels, ARRAY_SIZE(sim_pre_sels));
- clks[IMX6UL_CLK_SIM_SEL] = imx_clk_mux("sim_sel", base + 0x34, 9, 3, sim_sels, ARRAY_SIZE(sim_sels));
+ clks[IMX6UL_CLK_SIM_PRE_SEL] = imx_clk_mux("sim_pre_sel", base + 0x34, 15, 3, sim_pre_sels, ARRAY_SIZE(sim_pre_sels));
+ clks[IMX6UL_CLK_SIM_SEL] = imx_clk_mux("sim_sel", base + 0x34, 9, 3, sim_sels, ARRAY_SIZE(sim_sels));
clks[IMX6UL_CLK_ECSPI_SEL] = imx_clk_mux("ecspi_sel", base + 0x38, 18, 1, ecspi_sels, ARRAY_SIZE(ecspi_sels));
clks[IMX6UL_CLK_LCDIF_PRE_SEL] = imx_clk_mux("lcdif_pre_sel", base + 0x38, 15, 3, lcdif_pre_sels, ARRAY_SIZE(lcdif_pre_sels));
- clks[IMX6UL_CLK_LCDIF_SEL] = imx_clk_mux("lcdif_sel", base + 0x38, 9, 3, lcdif_sels, ARRAY_SIZE(lcdif_sels));
+ clks[IMX6UL_CLK_LCDIF_SEL] = imx_clk_mux("lcdif_sel", base + 0x38, 9, 3, lcdif_sels, ARRAY_SIZE(lcdif_sels));
clks[IMX6UL_CLK_LDB_DI0_DIV_SEL] = imx_clk_mux("ldb_di0", base + 0x20, 10, 1, ldb_di0_div_sels, ARRAY_SIZE(ldb_di0_div_sels));
clks[IMX6UL_CLK_LDB_DI1_DIV_SEL] = imx_clk_mux("ldb_di1", base + 0x20, 11, 1, ldb_di1_div_sels, ARRAY_SIZE(ldb_di1_div_sels));
@@ -259,11 +259,11 @@ static void __init imx6ul_clocks_init(struct device_node *ccm_node)
clks[IMX6UL_CLK_PERIPH] = imx_clk_busy_mux("periph", base + 0x14, 25, 1, base + 0x48, 5, periph_sels, ARRAY_SIZE(periph_sels));
clks[IMX6UL_CLK_PERIPH2] = imx_clk_busy_mux("periph2", base + 0x14, 26, 1, base + 0x48, 3, periph2_sels, ARRAY_SIZE(periph2_sels));
- clks[IMX6UL_CLK_PERIPH_CLK2] = imx_clk_divider("periph_clk2", "periph_clk2_sel", base + 0x14, 27, 3);
- clks[IMX6UL_CLK_PERIPH2_CLK2] = imx_clk_divider("periph2_clk2", "periph2_clk2_sel", base + 0x14, 0, 3);
+ clks[IMX6UL_CLK_PERIPH_CLK2] = imx_clk_divider("periph_clk2", "periph_clk2_sel", base + 0x14, 27, 3);
+ clks[IMX6UL_CLK_PERIPH2_CLK2] = imx_clk_divider("periph2_clk2", "periph2_clk2_sel", base + 0x14, 0, 3);
clks[IMX6UL_CLK_IPG] = imx_clk_divider("ipg", "ahb", base + 0x14, 8, 2);
clks[IMX6UL_CLK_LCDIF_PODF] = imx_clk_divider("lcdif_podf", "lcdif_pred", base + 0x18, 23, 3);
- clks[IMX6UL_CLK_QSPI1_PDOF] = imx_clk_divider("qspi1_podf", "qspi1_sel", base + 0x1c, 26, 3);
+ clks[IMX6UL_CLK_QSPI1_PDOF] = imx_clk_divider("qspi1_podf", "qspi1_sel", base + 0x1c, 26, 3);
clks[IMX6UL_CLK_EIM_SLOW_PODF] = imx_clk_divider("eim_slow_podf", "eim_slow_sel", base + 0x1c, 23, 3);
clks[IMX6UL_CLK_PERCLK] = imx_clk_divider("perclk", "perclk_sel", base + 0x1c, 0, 6);
clks[IMX6UL_CLK_CAN_PODF] = imx_clk_divider("can_podf", "can_sel", base + 0x20, 2, 6);
@@ -287,14 +287,14 @@ static void __init imx6ul_clocks_init(struct device_node *ccm_node)
clks[IMX6UL_CLK_LCDIF_PRED] = imx_clk_divider("lcdif_pred", "lcdif_pre_sel", base + 0x38, 12, 3);
clks[IMX6UL_CLK_CSI_PODF] = imx_clk_divider("csi_podf", "csi_sel", base + 0x3c, 11, 3);
- clks[IMX6UL_CLK_ARM] = imx_clk_busy_divider("arm", "pll1_sw", base + 0x10, 0, 3, base + 0x48, 16);
+ clks[IMX6UL_CLK_ARM] = imx_clk_busy_divider("arm", "pll1_sw", base + 0x10, 0, 3, base + 0x48, 16);
clks[IMX6UL_CLK_MMDC_PODF] = imx_clk_busy_divider("mmdc_podf", "periph2", base + 0x14, 3, 3, base + 0x48, 2);
clks[IMX6UL_CLK_AXI_PODF] = imx_clk_busy_divider("axi_podf", "axi_sel", base + 0x14, 16, 3, base + 0x48, 0);
clks[IMX6UL_CLK_AHB] = imx_clk_busy_divider("ahb", "periph", base + 0x14, 10, 3, base + 0x48, 1);
/* CCGR0 */
- clks[IMX6UL_CLK_AIPSTZ1] = imx_clk_gate2("aips_tz1", "ahb", base + 0x68, 0);
- clks[IMX6UL_CLK_AIPSTZ2] = imx_clk_gate2("aips_tz2", "ahb", base + 0x68, 2);
+ clks[IMX6UL_CLK_AIPSTZ1] = imx_clk_gate2("aips_tz1", "ahb", base + 0x68, 0);
+ clks[IMX6UL_CLK_AIPSTZ2] = imx_clk_gate2("aips_tz2", "ahb", base + 0x68, 2);
clks[IMX6UL_CLK_APBHDMA] = imx_clk_gate2("apbh_dma", "bch_podf", base + 0x68, 4);
clks[IMX6UL_CLK_ASRC_IPG] = imx_clk_gate2_shared("asrc_ipg", "ahb", base + 0x68, 6, &share_count_asrc);
clks[IMX6UL_CLK_ASRC_MEM] = imx_clk_gate2_shared("asrc_mem", "ahb", base + 0x68, 6, &share_count_asrc);
@@ -302,7 +302,7 @@ static void __init imx6ul_clocks_init(struct device_node *ccm_node)
clks[IMX6UL_CLK_CAAM_ACLK] = imx_clk_gate2("caam_aclk", "ahb", base + 0x68, 10);
clks[IMX6UL_CLK_CAAM_IPG] = imx_clk_gate2("caam_ipg", "ipg", base + 0x68, 12);
clks[IMX6UL_CLK_CAN1_IPG] = imx_clk_gate2("can1_ipg", "ipg", base + 0x68, 14);
- clks[IMX6UL_CLK_CAN1_SERIAL] = imx_clk_gate2("can1_serial", "can_podf", base + 0x68, 16);
+ clks[IMX6UL_CLK_CAN1_SERIAL] = imx_clk_gate2("can1_serial", "can_podf", base + 0x68, 16);
clks[IMX6UL_CLK_CAN2_IPG] = imx_clk_gate2("can2_ipg", "ipg", base + 0x68, 18);
clks[IMX6UL_CLK_CAN2_SERIAL] = imx_clk_gate2("can2_serial", "can_podf", base + 0x68, 20);
clks[IMX6UL_CLK_GPT2_BUS] = imx_clk_gate2("gpt_bus", "perclk", base + 0x68, 24);
@@ -331,7 +331,7 @@ static void __init imx6ul_clocks_init(struct device_node *ccm_node)
clks[IMX6UL_CLK_CSI] = imx_clk_gate2("csi", "csi_podf", base + 0x70, 2);
clks[IMX6UL_CLK_I2C1] = imx_clk_gate2("i2c1", "perclk", base + 0x70, 6);
clks[IMX6UL_CLK_I2C2] = imx_clk_gate2("i2c2", "perclk", base + 0x70, 8);
- clks[IMX6UL_CLK_I2C3] = imx_clk_gate2("i2c3", "perclk", base + 0x70, 10);
+ clks[IMX6UL_CLK_I2C3] = imx_clk_gate2("i2c3", "perclk", base + 0x70, 10);
clks[IMX6UL_CLK_OCOTP] = imx_clk_gate2("ocotp", "ipg", base + 0x70, 12);
clks[IMX6UL_CLK_IOMUXC] = imx_clk_gate2("iomuxc", "lcdif_podf", base + 0x70, 14);
clks[IMX6UL_CLK_LCDIF_APB] = imx_clk_gate2("lcdif_apb", "axi", base + 0x70, 28);
@@ -365,6 +365,7 @@ static void __init imx6ul_clocks_init(struct device_node *ccm_node)
/* CCGR5 */
clks[IMX6UL_CLK_ROM] = imx_clk_gate2("rom", "ahb", base + 0x7c, 0);
clks[IMX6UL_CLK_SDMA] = imx_clk_gate2("sdma", "ahb", base + 0x7c, 6);
+ clks[IMX6UL_CLK_KPP] = imx_clk_gate2("kpp", "ipg", base + 0x7c, 8);
clks[IMX6UL_CLK_WDOG2] = imx_clk_gate2("wdog2", "ipg", base + 0x7c, 10);
clks[IMX6UL_CLK_SPBA] = imx_clk_gate2("spba", "ipg", base + 0x7c, 12);
clks[IMX6UL_CLK_SPDIF] = imx_clk_gate2_shared("spdif", "spdif_podf", base + 0x7c, 14, &share_count_audio);
@@ -391,10 +392,10 @@ static void __init imx6ul_clocks_init(struct device_node *ccm_node)
clks[IMX6UL_CLK_UART8_IPG] = imx_clk_gate2("uart8_ipg", "ipg", base + 0x80, 14);
clks[IMX6UL_CLK_UART8_SERIAL] = imx_clk_gate2("uart8_serial", "uart_podf", base + 0x80, 14);
clks[IMX6UL_CLK_WDOG3] = imx_clk_gate2("wdog3", "ipg", base + 0x80, 20);
- clks[IMX6UL_CLK_I2C4] = imx_clk_gate2("i2c4", "perclk", base + 0x80, 24);
+ clks[IMX6UL_CLK_I2C4] = imx_clk_gate2("i2c4", "perclk", base + 0x80, 24);
clks[IMX6UL_CLK_PWM5] = imx_clk_gate2("pwm5", "perclk", base + 0x80, 26);
clks[IMX6UL_CLK_PWM6] = imx_clk_gate2("pwm6", "perclk", base + 0x80, 28);
- clks[IMX6UL_CLK_PWM7] = imx_clk_gate2("Pwm7", "perclk", base + 0x80, 30);
+ clks[IMX6UL_CLK_PWM7] = imx_clk_gate2("pwm7", "perclk", base + 0x80, 30);
/* mask handshake of mmdc */
writel_relaxed(BM_CCM_CCDR_MMDC_CH0_MASK, base + CCDR);
diff --git a/drivers/clk/imx/clk.h b/drivers/clk/imx/clk.h
index c94ac5c26226..d942f5748d08 100644
--- a/drivers/clk/imx/clk.h
+++ b/drivers/clk/imx/clk.h
@@ -87,7 +87,7 @@ struct clk *imx_clk_fixup_mux(const char *name, void __iomem *reg,
static inline struct clk *imx_clk_fixed(const char *name, int rate)
{
- return clk_register_fixed_rate(NULL, name, NULL, CLK_IS_ROOT, rate);
+ return clk_register_fixed_rate(NULL, name, NULL, 0, rate);
}
static inline struct clk *imx_clk_divider(const char *name, const char *parent,
diff --git a/drivers/clk/mediatek/clk-gate.c b/drivers/clk/mediatek/clk-gate.c
index 576bdb7c98b8..2a76901bf04b 100644
--- a/drivers/clk/mediatek/clk-gate.c
+++ b/drivers/clk/mediatek/clk-gate.c
@@ -25,7 +25,7 @@
static int mtk_cg_bit_is_cleared(struct clk_hw *hw)
{
- struct mtk_clk_gate *cg = to_clk_gate(hw);
+ struct mtk_clk_gate *cg = to_mtk_clk_gate(hw);
u32 val;
regmap_read(cg->regmap, cg->sta_ofs, &val);
@@ -37,7 +37,7 @@ static int mtk_cg_bit_is_cleared(struct clk_hw *hw)
static int mtk_cg_bit_is_set(struct clk_hw *hw)
{
- struct mtk_clk_gate *cg = to_clk_gate(hw);
+ struct mtk_clk_gate *cg = to_mtk_clk_gate(hw);
u32 val;
regmap_read(cg->regmap, cg->sta_ofs, &val);
@@ -49,14 +49,14 @@ static int mtk_cg_bit_is_set(struct clk_hw *hw)
static void mtk_cg_set_bit(struct clk_hw *hw)
{
- struct mtk_clk_gate *cg = to_clk_gate(hw);
+ struct mtk_clk_gate *cg = to_mtk_clk_gate(hw);
regmap_write(cg->regmap, cg->set_ofs, BIT(cg->bit));
}
static void mtk_cg_clr_bit(struct clk_hw *hw)
{
- struct mtk_clk_gate *cg = to_clk_gate(hw);
+ struct mtk_clk_gate *cg = to_mtk_clk_gate(hw);
regmap_write(cg->regmap, cg->clr_ofs, BIT(cg->bit));
}
diff --git a/drivers/clk/mediatek/clk-gate.h b/drivers/clk/mediatek/clk-gate.h
index 11e25c992948..b1821603b887 100644
--- a/drivers/clk/mediatek/clk-gate.h
+++ b/drivers/clk/mediatek/clk-gate.h
@@ -29,7 +29,7 @@ struct mtk_clk_gate {
u8 bit;
};
-static inline struct mtk_clk_gate *to_clk_gate(struct clk_hw *hw)
+static inline struct mtk_clk_gate *to_mtk_clk_gate(struct clk_hw *hw)
{
return container_of(hw, struct mtk_clk_gate, hw);
}
diff --git a/drivers/clk/mediatek/clk-mtk.c b/drivers/clk/mediatek/clk-mtk.c
index cf08db6c130c..5ada644e6200 100644
--- a/drivers/clk/mediatek/clk-mtk.c
+++ b/drivers/clk/mediatek/clk-mtk.c
@@ -58,8 +58,8 @@ void __init mtk_clk_register_fixed_clks(const struct mtk_fixed_clk *clks,
for (i = 0; i < num; i++) {
const struct mtk_fixed_clk *rc = &clks[i];
- clk = clk_register_fixed_rate(NULL, rc->name, rc->parent,
- rc->parent ? 0 : CLK_IS_ROOT, rc->rate);
+ clk = clk_register_fixed_rate(NULL, rc->name, rc->parent, 0,
+ rc->rate);
if (IS_ERR(clk)) {
pr_err("Failed to register clk %s: %ld\n",
@@ -209,12 +209,14 @@ struct clk * __init mtk_clk_register_composite(const struct mtk_composite *mc,
mc->flags);
if (IS_ERR(clk)) {
- kfree(gate);
- kfree(mux);
+ ret = PTR_ERR(clk);
+ goto err_out;
}
return clk;
err_out:
+ kfree(div);
+ kfree(gate);
kfree(mux);
return ERR_PTR(ret);
diff --git a/drivers/clk/mediatek/reset.c b/drivers/clk/mediatek/reset.c
index 9e9fe4b19ac4..309049d41f1b 100644
--- a/drivers/clk/mediatek/reset.c
+++ b/drivers/clk/mediatek/reset.c
@@ -57,7 +57,7 @@ static int mtk_reset(struct reset_controller_dev *rcdev,
return mtk_reset_deassert(rcdev, id);
}
-static struct reset_control_ops mtk_reset_ops = {
+static const struct reset_control_ops mtk_reset_ops = {
.assert = mtk_reset_assert,
.deassert = mtk_reset_deassert,
.reset = mtk_reset,
diff --git a/drivers/clk/meson/clkc.c b/drivers/clk/meson/clkc.c
index c83ae1367abc..d920d410b51d 100644
--- a/drivers/clk/meson/clkc.c
+++ b/drivers/clk/meson/clkc.c
@@ -198,7 +198,7 @@ meson_clk_register_fixed_rate(const struct clk_conf *clk_conf,
}
void __init meson_clk_register_clks(const struct clk_conf *clk_confs,
- size_t nr_confs,
+ unsigned int nr_confs,
void __iomem *clk_base)
{
unsigned int i;
diff --git a/drivers/clk/mmp/reset.c b/drivers/clk/mmp/reset.c
index b54da1fe73f0..b4e4d6aa2631 100644
--- a/drivers/clk/mmp/reset.c
+++ b/drivers/clk/mmp/reset.c
@@ -74,7 +74,7 @@ static int mmp_clk_reset_deassert(struct reset_controller_dev *rcdev,
return 0;
}
-static struct reset_control_ops mmp_clk_reset_ops = {
+static const struct reset_control_ops mmp_clk_reset_ops = {
.assert = mmp_clk_reset_assert,
.deassert = mmp_clk_reset_deassert,
};
diff --git a/drivers/clk/mvebu/Kconfig b/drivers/clk/mvebu/Kconfig
index 27696255486d..eaee8f099c8c 100644
--- a/drivers/clk/mvebu/Kconfig
+++ b/drivers/clk/mvebu/Kconfig
@@ -11,7 +11,6 @@ config ARMADA_370_CLK
bool
select MVEBU_CLK_COMMON
select MVEBU_CLK_CPU
- select MVEBU_CLK_COREDIV
config ARMADA_375_CLK
bool
@@ -29,7 +28,6 @@ config ARMADA_XP_CLK
bool
select MVEBU_CLK_COMMON
select MVEBU_CLK_CPU
- select MVEBU_CLK_COREDIV
config DOVE_CLK
bool
diff --git a/drivers/clk/mvebu/common.c b/drivers/clk/mvebu/common.c
index 28aac67e7b92..66be2e0c82b4 100644
--- a/drivers/clk/mvebu/common.c
+++ b/drivers/clk/mvebu/common.c
@@ -137,8 +137,8 @@ void __init mvebu_coreclk_setup(struct device_node *np,
of_property_read_string_index(np, "clock-output-names", 0,
&tclk_name);
rate = desc->get_tclk_freq(base);
- clk_data.clks[0] = clk_register_fixed_rate(NULL, tclk_name, NULL,
- CLK_IS_ROOT, rate);
+ clk_data.clks[0] = clk_register_fixed_rate(NULL, tclk_name, NULL, 0,
+ rate);
WARN_ON(IS_ERR(clk_data.clks[0]));
/* Register CPU clock */
@@ -150,8 +150,8 @@ void __init mvebu_coreclk_setup(struct device_node *np,
&& desc->is_sscg_enabled(base))
rate = desc->fix_sscg_deviation(rate);
- clk_data.clks[1] = clk_register_fixed_rate(NULL, cpuclk_name, NULL,
- CLK_IS_ROOT, rate);
+ clk_data.clks[1] = clk_register_fixed_rate(NULL, cpuclk_name, NULL, 0,
+ rate);
WARN_ON(IS_ERR(clk_data.clks[1]));
/* Register fixed-factor clocks derived from CPU clock */
@@ -174,8 +174,7 @@ void __init mvebu_coreclk_setup(struct device_node *np,
2 + desc->num_ratios, &name);
rate = desc->get_refclk_freq(base);
clk_data.clks[2 + desc->num_ratios] =
- clk_register_fixed_rate(NULL, name, NULL,
- CLK_IS_ROOT, rate);
+ clk_register_fixed_rate(NULL, name, NULL, 0, rate);
WARN_ON(IS_ERR(clk_data.clks[2 + desc->num_ratios]));
}
@@ -199,8 +198,6 @@ struct clk_gating_ctrl {
u32 saved_reg;
};
-#define to_clk_gate(_hw) container_of(_hw, struct clk_gate, hw)
-
static struct clk_gating_ctrl *ctrl;
static struct clk *clk_gating_get_src(
diff --git a/drivers/clk/mvebu/dove-divider.c b/drivers/clk/mvebu/dove-divider.c
index 3e0b52daa35f..4091f3cfee19 100644
--- a/drivers/clk/mvebu/dove-divider.c
+++ b/drivers/clk/mvebu/dove-divider.c
@@ -225,8 +225,7 @@ static int dove_divider_init(struct device *dev, void __iomem *base,
* Create the core PLL clock. We treat this as a fixed rate
* clock as we don't know any better, and documentation is sparse.
*/
- clk = clk_register_fixed_rate(dev, core_pll[0], NULL, CLK_IS_ROOT,
- 2000000000UL);
+ clk = clk_register_fixed_rate(dev, core_pll[0], NULL, 0, 2000000000UL);
if (IS_ERR(clk))
return PTR_ERR(clk);
diff --git a/drivers/clk/mvebu/kirkwood.c b/drivers/clk/mvebu/kirkwood.c
index 99550f25975e..a2a8d614039d 100644
--- a/drivers/clk/mvebu/kirkwood.c
+++ b/drivers/clk/mvebu/kirkwood.c
@@ -256,8 +256,6 @@ static const struct clk_muxing_soc_desc kirkwood_mux_desc[] __initconst = {
11, 1, 0 },
};
-#define to_clk_mux(_hw) container_of(_hw, struct clk_mux, hw)
-
static struct clk *clk_muxing_get_src(
struct of_phandle_args *clkspec, void *data)
{
diff --git a/drivers/clk/mxs/clk-div.c b/drivers/clk/mxs/clk-div.c
index 049ee27d5a22..f75e989c578f 100644
--- a/drivers/clk/mxs/clk-div.c
+++ b/drivers/clk/mxs/clk-div.c
@@ -33,7 +33,7 @@ struct clk_div {
static inline struct clk_div *to_clk_div(struct clk_hw *hw)
{
- struct clk_divider *divider = container_of(hw, struct clk_divider, hw);
+ struct clk_divider *divider = to_clk_divider(hw);
return container_of(divider, struct clk_div, divider);
}
diff --git a/drivers/clk/mxs/clk.h b/drivers/clk/mxs/clk.h
index a4590956d2a2..5a264a486ad9 100644
--- a/drivers/clk/mxs/clk.h
+++ b/drivers/clk/mxs/clk.h
@@ -38,7 +38,7 @@ struct clk *mxs_clk_frac(const char *name, const char *parent_name,
static inline struct clk *mxs_clk_fixed(const char *name, int rate)
{
- return clk_register_fixed_rate(NULL, name, NULL, CLK_IS_ROOT, rate);
+ return clk_register_fixed_rate(NULL, name, NULL, 0, rate);
}
static inline struct clk *mxs_clk_gate(const char *name,
diff --git a/drivers/clk/nxp/Makefile b/drivers/clk/nxp/Makefile
index 607bd48c6563..d456ee6cc3d3 100644
--- a/drivers/clk/nxp/Makefile
+++ b/drivers/clk/nxp/Makefile
@@ -1,3 +1,4 @@
obj-$(CONFIG_ARCH_LPC18XX) += clk-lpc18xx-cgu.o
obj-$(CONFIG_ARCH_LPC18XX) += clk-lpc18xx-ccu.o
+obj-$(CONFIG_ARCH_LPC18XX) += clk-lpc18xx-creg.o
obj-$(CONFIG_ARCH_LPC32XX) += clk-lpc32xx.o
diff --git a/drivers/clk/nxp/clk-lpc18xx-ccu.c b/drivers/clk/nxp/clk-lpc18xx-ccu.c
index 13aabbb3acbe..f7136b94fd0e 100644
--- a/drivers/clk/nxp/clk-lpc18xx-ccu.c
+++ b/drivers/clk/nxp/clk-lpc18xx-ccu.c
@@ -28,8 +28,6 @@
#define CCU_BRANCH_IS_BUS BIT(0)
#define CCU_BRANCH_HAVE_DIV2 BIT(1)
-#define to_clk_gate(_hw) container_of(_hw, struct clk_gate, hw)
-
struct lpc18xx_branch_clk_data {
const char **name;
int num;
@@ -222,7 +220,7 @@ static void lpc18xx_ccu_register_branch_gate_div(struct lpc18xx_clk_branch *bran
div->width = 1;
div_hw = &div->hw;
- div_ops = &clk_divider_ops;
+ div_ops = &clk_divider_ro_ops;
}
branch->gate.reg = branch->offset + reg_base;
diff --git a/drivers/clk/nxp/clk-lpc18xx-cgu.c b/drivers/clk/nxp/clk-lpc18xx-cgu.c
index c924572fc9bc..2531174b399e 100644
--- a/drivers/clk/nxp/clk-lpc18xx-cgu.c
+++ b/drivers/clk/nxp/clk-lpc18xx-cgu.c
@@ -605,7 +605,7 @@ static void __init lpc18xx_cgu_register_source_clks(struct device_node *np,
/* Register the internal 12 MHz RC oscillator (IRC) */
clk = clk_register_fixed_rate(NULL, clk_src_names[CLK_SRC_IRC],
- NULL, CLK_IS_ROOT, 12000000);
+ NULL, 0, 12000000);
if (IS_ERR(clk))
pr_warn("%s: failed to register irc clk\n", __func__);
diff --git a/drivers/clk/nxp/clk-lpc18xx-creg.c b/drivers/clk/nxp/clk-lpc18xx-creg.c
new file mode 100644
index 000000000000..d44b61afa2dc
--- /dev/null
+++ b/drivers/clk/nxp/clk-lpc18xx-creg.c
@@ -0,0 +1,226 @@
+/*
+ * Clk driver for NXP LPC18xx/43xx Configuration Registers (CREG)
+ *
+ * Copyright (C) 2015 Joachim Eastwood <manabian@gmail.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#define LPC18XX_CREG_CREG0 0x004
+#define LPC18XX_CREG_CREG0_EN1KHZ BIT(0)
+#define LPC18XX_CREG_CREG0_EN32KHZ BIT(1)
+#define LPC18XX_CREG_CREG0_RESET32KHZ BIT(2)
+#define LPC18XX_CREG_CREG0_PD32KHZ BIT(3)
+
+#define to_clk_creg(_hw) container_of(_hw, struct clk_creg_data, hw)
+
+enum {
+ CREG_CLK_1KHZ,
+ CREG_CLK_32KHZ,
+ CREG_CLK_MAX,
+};
+
+struct clk_creg_data {
+ struct clk_hw hw;
+ const char *name;
+ struct regmap *reg;
+ unsigned int en_mask;
+ const struct clk_ops *ops;
+};
+
+#define CREG_CLK(_name, _emask, _ops) \
+{ \
+ .name = _name, \
+ .en_mask = LPC18XX_CREG_CREG0_##_emask, \
+ .ops = &_ops, \
+}
+
+static int clk_creg_32k_prepare(struct clk_hw *hw)
+{
+ struct clk_creg_data *creg = to_clk_creg(hw);
+ int ret;
+
+ ret = regmap_update_bits(creg->reg, LPC18XX_CREG_CREG0,
+ LPC18XX_CREG_CREG0_PD32KHZ |
+ LPC18XX_CREG_CREG0_RESET32KHZ, 0);
+
+ /*
+ * Powering up the 32k oscillator takes a long while
+ * and sadly there aren't any status bit to poll.
+ */
+ msleep(2500);
+
+ return ret;
+}
+
+static void clk_creg_32k_unprepare(struct clk_hw *hw)
+{
+ struct clk_creg_data *creg = to_clk_creg(hw);
+
+ regmap_update_bits(creg->reg, LPC18XX_CREG_CREG0,
+ LPC18XX_CREG_CREG0_PD32KHZ,
+ LPC18XX_CREG_CREG0_PD32KHZ);
+}
+
+static int clk_creg_32k_is_prepared(struct clk_hw *hw)
+{
+ struct clk_creg_data *creg = to_clk_creg(hw);
+ u32 reg;
+
+ regmap_read(creg->reg, LPC18XX_CREG_CREG0, &reg);
+
+ return !(reg & LPC18XX_CREG_CREG0_PD32KHZ) &&
+ !(reg & LPC18XX_CREG_CREG0_RESET32KHZ);
+}
+
+static unsigned long clk_creg_1k_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ return parent_rate / 32;
+}
+
+static int clk_creg_enable(struct clk_hw *hw)
+{
+ struct clk_creg_data *creg = to_clk_creg(hw);
+
+ return regmap_update_bits(creg->reg, LPC18XX_CREG_CREG0,
+ creg->en_mask, creg->en_mask);
+}
+
+static void clk_creg_disable(struct clk_hw *hw)
+{
+ struct clk_creg_data *creg = to_clk_creg(hw);
+
+ regmap_update_bits(creg->reg, LPC18XX_CREG_CREG0,
+ creg->en_mask, 0);
+}
+
+static int clk_creg_is_enabled(struct clk_hw *hw)
+{
+ struct clk_creg_data *creg = to_clk_creg(hw);
+ u32 reg;
+
+ regmap_read(creg->reg, LPC18XX_CREG_CREG0, &reg);
+
+ return !!(reg & creg->en_mask);
+}
+
+static const struct clk_ops clk_creg_32k = {
+ .enable = clk_creg_enable,
+ .disable = clk_creg_disable,
+ .is_enabled = clk_creg_is_enabled,
+ .prepare = clk_creg_32k_prepare,
+ .unprepare = clk_creg_32k_unprepare,
+ .is_prepared = clk_creg_32k_is_prepared,
+};
+
+static const struct clk_ops clk_creg_1k = {
+ .enable = clk_creg_enable,
+ .disable = clk_creg_disable,
+ .is_enabled = clk_creg_is_enabled,
+ .recalc_rate = clk_creg_1k_recalc_rate,
+};
+
+static struct clk_creg_data clk_creg_clocks[] = {
+ [CREG_CLK_1KHZ] = CREG_CLK("1khz_clk", EN1KHZ, clk_creg_1k),
+ [CREG_CLK_32KHZ] = CREG_CLK("32khz_clk", EN32KHZ, clk_creg_32k),
+};
+
+static struct clk *clk_register_creg_clk(struct device *dev,
+ struct clk_creg_data *creg_clk,
+ const char **parent_name,
+ struct regmap *syscon)
+{
+ struct clk_init_data init;
+
+ init.ops = creg_clk->ops;
+ init.name = creg_clk->name;
+ init.parent_names = parent_name;
+ init.num_parents = 1;
+
+ creg_clk->reg = syscon;
+ creg_clk->hw.init = &init;
+
+ if (dev)
+ return devm_clk_register(dev, &creg_clk->hw);
+
+ return clk_register(NULL, &creg_clk->hw);
+}
+
+static struct clk *clk_creg_early[CREG_CLK_MAX];
+static struct clk_onecell_data clk_creg_early_data = {
+ .clks = clk_creg_early,
+ .clk_num = CREG_CLK_MAX,
+};
+
+static void __init lpc18xx_creg_clk_init(struct device_node *np)
+{
+ const char *clk_32khz_parent;
+ struct regmap *syscon;
+
+ syscon = syscon_node_to_regmap(np->parent);
+ if (IS_ERR(syscon)) {
+ pr_err("%s: syscon lookup failed\n", __func__);
+ return;
+ }
+
+ clk_32khz_parent = of_clk_get_parent_name(np, 0);
+
+ clk_creg_early[CREG_CLK_32KHZ] =
+ clk_register_creg_clk(NULL, &clk_creg_clocks[CREG_CLK_32KHZ],
+ &clk_32khz_parent, syscon);
+ clk_creg_early[CREG_CLK_1KHZ] = ERR_PTR(-EPROBE_DEFER);
+
+ of_clk_add_provider(np, of_clk_src_onecell_get, &clk_creg_early_data);
+}
+CLK_OF_DECLARE(lpc18xx_creg_clk, "nxp,lpc1850-creg-clk", lpc18xx_creg_clk_init);
+
+static struct clk *clk_creg[CREG_CLK_MAX];
+static struct clk_onecell_data clk_creg_data = {
+ .clks = clk_creg,
+ .clk_num = CREG_CLK_MAX,
+};
+
+static int lpc18xx_creg_clk_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct regmap *syscon;
+
+ syscon = syscon_node_to_regmap(np->parent);
+ if (IS_ERR(syscon)) {
+ dev_err(&pdev->dev, "syscon lookup failed\n");
+ return PTR_ERR(syscon);
+ }
+
+ clk_creg[CREG_CLK_32KHZ] = clk_creg_early[CREG_CLK_32KHZ];
+ clk_creg[CREG_CLK_1KHZ] =
+ clk_register_creg_clk(NULL, &clk_creg_clocks[CREG_CLK_1KHZ],
+ &clk_creg_clocks[CREG_CLK_32KHZ].name,
+ syscon);
+
+ return of_clk_add_provider(np, of_clk_src_onecell_get, &clk_creg_data);
+}
+
+static const struct of_device_id lpc18xx_creg_clk_of_match[] = {
+ { .compatible = "nxp,lpc1850-creg-clk" },
+ {},
+};
+
+static struct platform_driver lpc18xx_creg_clk_driver = {
+ .probe = lpc18xx_creg_clk_probe,
+ .driver = {
+ .name = "lpc18xx-creg-clk",
+ .of_match_table = lpc18xx_creg_clk_of_match,
+ },
+};
+builtin_platform_driver(lpc18xx_creg_clk_driver);
diff --git a/drivers/clk/nxp/clk-lpc32xx.c b/drivers/clk/nxp/clk-lpc32xx.c
index 10dd0fdaa474..481b2646b496 100644
--- a/drivers/clk/nxp/clk-lpc32xx.c
+++ b/drivers/clk/nxp/clk-lpc32xx.c
@@ -87,7 +87,7 @@ enum {
enum {
/* Start from the last defined clock in dt bindings */
- LPC32XX_CLK_ADC_DIV = LPC32XX_CLK_ADC + 1,
+ LPC32XX_CLK_ADC_DIV = LPC32XX_CLK_HCLK_PLL + 1,
LPC32XX_CLK_ADC_RTC,
LPC32XX_CLK_TEST1,
LPC32XX_CLK_TEST2,
@@ -96,7 +96,6 @@ enum {
LPC32XX_CLK_OSC,
LPC32XX_CLK_SYS,
LPC32XX_CLK_PLL397X,
- LPC32XX_CLK_HCLK_PLL,
LPC32XX_CLK_HCLK_DIV_PERIPH,
LPC32XX_CLK_HCLK_DIV,
LPC32XX_CLK_HCLK,
@@ -589,7 +588,8 @@ static long clk_hclk_pll_round_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *parent_rate)
{
struct lpc32xx_pll_clk *clk = to_lpc32xx_pll_clk(hw);
- u64 m_i, m, n, p, o = rate, i = *parent_rate, d = (u64)rate << 6;
+ u64 m_i, o = rate, i = *parent_rate, d = (u64)rate << 6;
+ u64 m = 0, n = 0, p = 0;
int p_i, n_i;
pr_debug("%s: %lu/%lu\n", clk_hw_get_name(hw), *parent_rate, rate);
@@ -1429,6 +1429,8 @@ static struct clk * __init lpc32xx_clk_register(u32 id)
hw = &clk_hw->hw0.div.hw;
else if (clk_hw->type == CLK_GATE)
hw = &clk_hw->hw0.gate.hw;
+ else
+ return ERR_PTR(-EINVAL);
hw->init = &clk_init;
clk = clk_register(NULL, hw);
@@ -1515,7 +1517,7 @@ static void __init lpc32xx_clk_init(struct device_node *np)
return;
}
- for (i = 0; i < LPC32XX_CLK_MAX; i++) {
+ for (i = 1; i < LPC32XX_CLK_MAX; i++) {
clk[i] = lpc32xx_clk_register(i);
if (IS_ERR(clk[i])) {
pr_err("failed to register %s clock: %ld\n",
@@ -1526,9 +1528,6 @@ static void __init lpc32xx_clk_init(struct device_node *np)
of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data);
- /* For 13MHz osc valid output range of PLL is from 156MHz to 266.5MHz */
- clk_set_rate(clk[LPC32XX_CLK_HCLK_PLL], 208000000);
-
/* Set 48MHz rate of USB PLL clock */
clk_set_rate(clk[LPC32XX_CLK_USB_PLL], 48000000);
@@ -1555,7 +1554,7 @@ static void __init lpc32xx_usb_clk_init(struct device_node *np)
return;
}
- for (i = 0; i < LPC32XX_USB_CLK_MAX; i++) {
+ for (i = 1; i < LPC32XX_USB_CLK_MAX; i++) {
usb_clk[i] = lpc32xx_clk_register(i + LPC32XX_CLK_USB_OFFSET);
if (IS_ERR(usb_clk[i])) {
pr_err("failed to register %s clock: %ld\n",
diff --git a/drivers/clk/pxa/clk-pxa25x.c b/drivers/clk/pxa/clk-pxa25x.c
index b7747229db9a..a98b98e2a9e4 100644
--- a/drivers/clk/pxa/clk-pxa25x.c
+++ b/drivers/clk/pxa/clk-pxa25x.c
@@ -84,7 +84,7 @@ unsigned int pxa25x_get_clk_frequency_khz(int info)
static unsigned long clk_pxa25x_memory_get_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
- unsigned long cccr = CCCR;
+ unsigned long cccr = readl(CCCR);
unsigned int m = M_clk_mult[(cccr >> 5) & 0x03];
return parent_rate / m;
@@ -99,7 +99,7 @@ PARENTS(pxa25x_osc3) = { "osc_3_6864mhz", "osc_3_6864mhz" };
#define PXA25X_CKEN(dev_id, con_id, parents, mult, div, \
bit, is_lp, flags) \
PXA_CKEN(dev_id, con_id, bit, parents, mult, div, mult, div, \
- is_lp, &CKEN, CKEN_ ## bit, flags)
+ is_lp, CKEN, CKEN_ ## bit, flags)
#define PXA25X_PBUS95_CKEN(dev_id, con_id, bit, mult_hp, div_hp, delay) \
PXA25X_CKEN(dev_id, con_id, pxa25x_pbus95_parents, mult_hp, \
div_hp, bit, NULL, 0)
@@ -112,10 +112,10 @@ PARENTS(pxa25x_osc3) = { "osc_3_6864mhz", "osc_3_6864mhz" };
#define PXA25X_CKEN_1RATE(dev_id, con_id, bit, parents, delay) \
PXA_CKEN_1RATE(dev_id, con_id, bit, parents, \
- &CKEN, CKEN_ ## bit, 0)
+ CKEN, CKEN_ ## bit, 0)
#define PXA25X_CKEN_1RATE_AO(dev_id, con_id, bit, parents, delay) \
PXA_CKEN_1RATE(dev_id, con_id, bit, parents, \
- &CKEN, CKEN_ ## bit, CLK_IGNORE_UNUSED)
+ CKEN, CKEN_ ## bit, CLK_IGNORE_UNUSED)
static struct desc_clk_cken pxa25x_clocks[] __initdata = {
PXA25X_PBUS95_CKEN("pxa2xx-mci.0", NULL, MMC, 1, 5, 0),
@@ -162,7 +162,7 @@ MUX_RO_RATE_RO_OPS(clk_pxa25x_core, "core");
static unsigned long clk_pxa25x_run_get_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
- unsigned long cccr = CCCR;
+ unsigned long cccr = readl(CCCR);
unsigned int n2 = N2_clk_mult[(cccr >> 7) & 0x07];
return (parent_rate / n2) * 2;
@@ -173,7 +173,7 @@ RATE_RO_OPS(clk_pxa25x_run, "run");
static unsigned long clk_pxa25x_cpll_get_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
- unsigned long clkcfg, cccr = CCCR;
+ unsigned long clkcfg, cccr = readl(CCCR);
unsigned int l, m, n2, t;
asm("mrc\tp14, 0, %0, c6, c0, 0" : "=r" (clkcfg));
@@ -200,12 +200,10 @@ static void __init pxa25x_register_core(void)
static void __init pxa25x_register_plls(void)
{
clk_register_fixed_rate(NULL, "osc_3_6864mhz", NULL,
- CLK_GET_RATE_NOCACHE | CLK_IS_ROOT,
- 3686400);
+ CLK_GET_RATE_NOCACHE, 3686400);
clk_register_fixed_rate(NULL, "osc_32_768khz", NULL,
- CLK_GET_RATE_NOCACHE | CLK_IS_ROOT,
- 32768);
- clk_register_fixed_rate(NULL, "clk_dummy", NULL, CLK_IS_ROOT, 0);
+ CLK_GET_RATE_NOCACHE, 32768);
+ clk_register_fixed_rate(NULL, "clk_dummy", NULL, 0, 0);
clk_register_fixed_factor(NULL, "ppll_95_85mhz", "osc_3_6864mhz",
0, 26, 1);
clk_register_fixed_factor(NULL, "ppll_147_46mhz", "osc_3_6864mhz",
diff --git a/drivers/clk/pxa/clk-pxa27x.c b/drivers/clk/pxa/clk-pxa27x.c
index 5b82d30baf9f..c40b1804f58c 100644
--- a/drivers/clk/pxa/clk-pxa27x.c
+++ b/drivers/clk/pxa/clk-pxa27x.c
@@ -85,7 +85,7 @@ unsigned int pxa27x_get_clk_frequency_khz(int info)
bool pxa27x_is_ppll_disabled(void)
{
- unsigned long ccsr = CCSR;
+ unsigned long ccsr = readl(CCSR);
return ccsr & (1 << CCCR_PPDIS_BIT);
}
@@ -93,7 +93,7 @@ bool pxa27x_is_ppll_disabled(void)
#define PXA27X_CKEN(dev_id, con_id, parents, mult_hp, div_hp, \
bit, is_lp, flags) \
PXA_CKEN(dev_id, con_id, bit, parents, 1, 1, mult_hp, div_hp, \
- is_lp, &CKEN, CKEN_ ## bit, flags)
+ is_lp, CKEN, CKEN_ ## bit, flags)
#define PXA27X_PBUS_CKEN(dev_id, con_id, bit, mult_hp, div_hp, delay) \
PXA27X_CKEN(dev_id, con_id, pxa27x_pbus_parents, mult_hp, \
div_hp, bit, pxa27x_is_ppll_disabled, 0)
@@ -106,10 +106,10 @@ PARENTS(pxa27x_membus) = { "lcd_base", "lcd_base" };
#define PXA27X_CKEN_1RATE(dev_id, con_id, bit, parents, delay) \
PXA_CKEN_1RATE(dev_id, con_id, bit, parents, \
- &CKEN, CKEN_ ## bit, 0)
+ CKEN, CKEN_ ## bit, 0)
#define PXA27X_CKEN_1RATE_AO(dev_id, con_id, bit, parents, delay) \
PXA_CKEN_1RATE(dev_id, con_id, bit, parents, \
- &CKEN, CKEN_ ## bit, CLK_IGNORE_UNUSED)
+ CKEN, CKEN_ ## bit, CLK_IGNORE_UNUSED)
static struct desc_clk_cken pxa27x_clocks[] __initdata = {
PXA27X_PBUS_CKEN("pxa2xx-uart.0", NULL, FFUART, 2, 42, 1),
@@ -151,7 +151,7 @@ static unsigned long clk_pxa27x_cpll_get_rate(struct clk_hw *hw,
unsigned long clkcfg;
unsigned int t, ht;
unsigned int l, L, n2, N;
- unsigned long ccsr = CCSR;
+ unsigned long ccsr = readl(CCSR);
asm("mrc\tp14, 0, %0, c6, c0, 0" : "=r" (clkcfg));
t = clkcfg & (1 << 0);
@@ -171,8 +171,8 @@ static unsigned long clk_pxa27x_lcd_base_get_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
unsigned int l, osc_forced;
- unsigned long ccsr = CCSR;
- unsigned long cccr = CCCR;
+ unsigned long ccsr = readl(CCSR);
+ unsigned long cccr = readl(CCCR);
l = ccsr & CCSR_L_MASK;
osc_forced = ccsr & (1 << CCCR_CPDIS_BIT);
@@ -193,7 +193,7 @@ static unsigned long clk_pxa27x_lcd_base_get_rate(struct clk_hw *hw,
static u8 clk_pxa27x_lcd_base_get_parent(struct clk_hw *hw)
{
unsigned int osc_forced;
- unsigned long ccsr = CCSR;
+ unsigned long ccsr = readl(CCSR);
osc_forced = ccsr & (1 << CCCR_CPDIS_BIT);
if (osc_forced)
@@ -208,12 +208,12 @@ MUX_RO_RATE_RO_OPS(clk_pxa27x_lcd_base, "lcd_base");
static void __init pxa27x_register_plls(void)
{
clk_register_fixed_rate(NULL, "osc_13mhz", NULL,
- CLK_GET_RATE_NOCACHE | CLK_IS_ROOT,
+ CLK_GET_RATE_NOCACHE,
13 * MHz);
clk_register_fixed_rate(NULL, "osc_32_768khz", NULL,
- CLK_GET_RATE_NOCACHE | CLK_IS_ROOT,
+ CLK_GET_RATE_NOCACHE,
32768 * KHz);
- clk_register_fixed_rate(NULL, "clk_dummy", NULL, CLK_IS_ROOT, 0);
+ clk_register_fixed_rate(NULL, "clk_dummy", NULL, 0, 0);
clk_register_fixed_factor(NULL, "ppll_312mhz", "osc_13mhz", 0, 24, 1);
}
@@ -222,7 +222,7 @@ static unsigned long clk_pxa27x_core_get_rate(struct clk_hw *hw,
{
unsigned long clkcfg;
unsigned int t, ht, b, osc_forced;
- unsigned long ccsr = CCSR;
+ unsigned long ccsr = readl(CCSR);
osc_forced = ccsr & (1 << CCCR_CPDIS_BIT);
asm("mrc\tp14, 0, %0, c6, c0, 0" : "=r" (clkcfg));
@@ -242,7 +242,7 @@ static u8 clk_pxa27x_core_get_parent(struct clk_hw *hw)
{
unsigned long clkcfg;
unsigned int t, ht, b, osc_forced;
- unsigned long ccsr = CCSR;
+ unsigned long ccsr = readl(CCSR);
osc_forced = ccsr & (1 << CCCR_CPDIS_BIT);
if (osc_forced)
@@ -263,7 +263,7 @@ MUX_RO_RATE_RO_OPS(clk_pxa27x_core, "core");
static unsigned long clk_pxa27x_run_get_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
- unsigned long ccsr = CCSR;
+ unsigned long ccsr = readl(CCSR);
unsigned int n2 = (ccsr & CCSR_N2_MASK) >> CCSR_N2_SHIFT;
return (parent_rate / n2) * 2;
@@ -285,7 +285,7 @@ static unsigned long clk_pxa27x_system_bus_get_rate(struct clk_hw *hw,
{
unsigned long clkcfg;
unsigned int b, osc_forced;
- unsigned long ccsr = CCSR;
+ unsigned long ccsr = readl(CCSR);
osc_forced = ccsr & (1 << CCCR_CPDIS_BIT);
asm("mrc\tp14, 0, %0, c6, c0, 0" : "=r" (clkcfg));
@@ -302,7 +302,7 @@ static unsigned long clk_pxa27x_system_bus_get_rate(struct clk_hw *hw,
static u8 clk_pxa27x_system_bus_get_parent(struct clk_hw *hw)
{
unsigned int osc_forced;
- unsigned long ccsr = CCSR;
+ unsigned long ccsr = readl(CCSR);
osc_forced = ccsr & (1 << CCCR_CPDIS_BIT);
if (osc_forced)
@@ -318,8 +318,8 @@ static unsigned long clk_pxa27x_memory_get_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
unsigned int a, l, osc_forced;
- unsigned long cccr = CCCR;
- unsigned long ccsr = CCSR;
+ unsigned long cccr = readl(CCCR);
+ unsigned long ccsr = readl(CCSR);
osc_forced = ccsr & (1 << CCCR_CPDIS_BIT);
a = cccr & (1 << CCCR_A_BIT);
@@ -337,8 +337,8 @@ static unsigned long clk_pxa27x_memory_get_rate(struct clk_hw *hw,
static u8 clk_pxa27x_memory_get_parent(struct clk_hw *hw)
{
unsigned int osc_forced, a;
- unsigned long cccr = CCCR;
- unsigned long ccsr = CCSR;
+ unsigned long cccr = readl(CCCR);
+ unsigned long ccsr = readl(CCSR);
osc_forced = ccsr & (1 << CCCR_CPDIS_BIT);
a = cccr & (1 << CCCR_A_BIT);
diff --git a/drivers/clk/pxa/clk-pxa3xx.c b/drivers/clk/pxa/clk-pxa3xx.c
index 4af4eed5f89f..42bdaa772be0 100644
--- a/drivers/clk/pxa/clk-pxa3xx.c
+++ b/drivers/clk/pxa/clk-pxa3xx.c
@@ -284,15 +284,15 @@ static void __init pxa3xx_register_core(void)
static void __init pxa3xx_register_plls(void)
{
clk_register_fixed_rate(NULL, "osc_13mhz", NULL,
- CLK_GET_RATE_NOCACHE | CLK_IS_ROOT,
+ CLK_GET_RATE_NOCACHE,
13 * MHz);
clk_register_fixed_rate(NULL, "osc_32_768khz", NULL,
- CLK_GET_RATE_NOCACHE | CLK_IS_ROOT,
+ CLK_GET_RATE_NOCACHE,
32768);
clk_register_fixed_rate(NULL, "ring_osc_120mhz", NULL,
- CLK_GET_RATE_NOCACHE | CLK_IS_ROOT,
+ CLK_GET_RATE_NOCACHE,
120 * MHz);
- clk_register_fixed_rate(NULL, "clk_dummy", NULL, CLK_IS_ROOT, 0);
+ clk_register_fixed_rate(NULL, "clk_dummy", NULL, 0, 0);
clk_register_fixed_factor(NULL, "spll_624mhz", "osc_13mhz", 0, 48, 1);
clk_register_fixed_factor(NULL, "ring_osc_60mhz", "ring_osc_120mhz",
0, 1, 2);
@@ -334,8 +334,7 @@ static void __init pxa3xx_base_clocks_init(void)
clk_register_clk_pxa3xx_system_bus();
clk_register_clk_pxa3xx_ac97();
clk_register_clk_pxa3xx_smemc();
- clk_register_gate(NULL, "CLK_POUT", "osc_13mhz", 0,
- (void __iomem *)&OSCC, 11, 0, NULL);
+ clk_register_gate(NULL, "CLK_POUT", "osc_13mhz", 0, OSCC, 11, 0, NULL);
clkdev_pxa_register(CLK_OSTIMER, "OSTIMER0", NULL,
clk_register_fixed_factor(NULL, "os-timer0",
"osc_13mhz", 0, 1, 4));
diff --git a/drivers/clk/qcom/Kconfig b/drivers/clk/qcom/Kconfig
index b552eceec2be..95e3b3e0fa1c 100644
--- a/drivers/clk/qcom/Kconfig
+++ b/drivers/clk/qcom/Kconfig
@@ -28,6 +28,14 @@ config APQ_MMCC_8084
Say Y if you want to support multimedia devices such as display,
graphics, video encode/decode, camera, etc.
+config IPQ_GCC_4019
+ tristate "IPQ4019 Global Clock Controller"
+ depends on COMMON_CLK_QCOM
+ help
+ Support for the global clock controller on ipq4019 devices.
+ Say Y if you want to use peripheral devices such as UART, SPI,
+ i2c, USB, SD/eMMC, etc.
+
config IPQ_GCC_806X
tristate "IPQ806x Global Clock Controller"
depends on COMMON_CLK_QCOM
diff --git a/drivers/clk/qcom/Makefile b/drivers/clk/qcom/Makefile
index dc4280b85db1..2a25f4e75f49 100644
--- a/drivers/clk/qcom/Makefile
+++ b/drivers/clk/qcom/Makefile
@@ -14,6 +14,7 @@ clk-qcom-$(CONFIG_QCOM_GDSC) += gdsc.o
obj-$(CONFIG_APQ_GCC_8084) += gcc-apq8084.o
obj-$(CONFIG_APQ_MMCC_8084) += mmcc-apq8084.o
+obj-$(CONFIG_IPQ_GCC_4019) += gcc-ipq4019.o
obj-$(CONFIG_IPQ_GCC_806X) += gcc-ipq806x.o
obj-$(CONFIG_IPQ_LCC_806X) += lcc-ipq806x.o
obj-$(CONFIG_MSM_GCC_8660) += gcc-msm8660.o
diff --git a/drivers/clk/qcom/clk-rcg.c b/drivers/clk/qcom/clk-rcg.c
index bfbb28f450c2..67ce7c146a6a 100644
--- a/drivers/clk/qcom/clk-rcg.c
+++ b/drivers/clk/qcom/clk-rcg.c
@@ -638,7 +638,6 @@ static int clk_rcg_pixel_set_rate(struct clk_hw *hw, unsigned long rate,
return ret;
src = ns_to_src(&rcg->s, ns);
- f.pre_div = ns_to_pre_div(&rcg->p, ns) + 1;
for (i = 0; i < num_parents; i++) {
if (src == rcg->s.parent_map[i].cfg) {
@@ -647,6 +646,9 @@ static int clk_rcg_pixel_set_rate(struct clk_hw *hw, unsigned long rate,
}
}
+ /* bypass the pre divider */
+ f.pre_div = 1;
+
/* let us find appropriate m/n values for this */
for (; frac->num; frac++) {
request = (rate * frac->den) / frac->num;
diff --git a/drivers/clk/qcom/common.c b/drivers/clk/qcom/common.c
index c112ebaba70d..f7c226ab4307 100644
--- a/drivers/clk/qcom/common.c
+++ b/drivers/clk/qcom/common.c
@@ -119,7 +119,6 @@ static int _qcom_cc_register_board_clk(struct device *dev, const char *path,
fixed->hw.init = &init_data;
init_data.name = path;
- init_data.flags = CLK_IS_ROOT;
init_data.ops = &clk_fixed_rate_ops;
clk = devm_clk_register(dev, &fixed->hw);
@@ -185,6 +184,7 @@ int qcom_cc_really_probe(struct platform_device *pdev,
struct clk **clks;
struct qcom_reset_controller *reset;
struct qcom_cc *cc;
+ struct gdsc_desc *scd;
size_t num_clks = desc->num_clks;
struct clk_regmap **rclks = desc->clks;
@@ -213,7 +213,11 @@ int qcom_cc_really_probe(struct platform_device *pdev,
if (ret)
return ret;
- devm_add_action(dev, qcom_cc_del_clk_provider, pdev->dev.of_node);
+ ret = devm_add_action_or_reset(dev, qcom_cc_del_clk_provider,
+ pdev->dev.of_node);
+
+ if (ret)
+ return ret;
reset = &cc->reset;
reset->rcdev.of_node = dev->of_node;
@@ -227,18 +231,28 @@ int qcom_cc_really_probe(struct platform_device *pdev,
if (ret)
return ret;
- devm_add_action(dev, qcom_cc_reset_unregister, &reset->rcdev);
+ ret = devm_add_action_or_reset(dev, qcom_cc_reset_unregister,
+ &reset->rcdev);
+
+ if (ret)
+ return ret;
if (desc->gdscs && desc->num_gdscs) {
- ret = gdsc_register(dev, desc->gdscs, desc->num_gdscs,
- &reset->rcdev, regmap);
+ scd = devm_kzalloc(dev, sizeof(*scd), GFP_KERNEL);
+ if (!scd)
+ return -ENOMEM;
+ scd->dev = dev;
+ scd->scs = desc->gdscs;
+ scd->num = desc->num_gdscs;
+ ret = gdsc_register(scd, &reset->rcdev, regmap);
+ if (ret)
+ return ret;
+ ret = devm_add_action_or_reset(dev, qcom_cc_gdsc_unregister,
+ scd);
if (ret)
return ret;
}
- devm_add_action(dev, qcom_cc_gdsc_unregister, dev);
-
-
return 0;
}
EXPORT_SYMBOL_GPL(qcom_cc_really_probe);
diff --git a/drivers/clk/qcom/gcc-ipq4019.c b/drivers/clk/qcom/gcc-ipq4019.c
new file mode 100644
index 000000000000..3cd1af0af0d9
--- /dev/null
+++ b/drivers/clk/qcom/gcc-ipq4019.c
@@ -0,0 +1,1354 @@
+/*
+ * Copyright (c) 2015 The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/clk-provider.h>
+#include <linux/regmap.h>
+#include <linux/reset-controller.h>
+
+#include <dt-bindings/clock/qcom,gcc-ipq4019.h>
+
+#include "common.h"
+#include "clk-regmap.h"
+#include "clk-rcg.h"
+#include "clk-branch.h"
+#include "reset.h"
+
+enum {
+ P_XO,
+ P_FEPLL200,
+ P_FEPLL500,
+ P_DDRPLL,
+ P_FEPLLWCSS2G,
+ P_FEPLLWCSS5G,
+ P_FEPLL125DLY,
+ P_DDRPLLAPSS,
+};
+
+static struct parent_map gcc_xo_200_500_map[] = {
+ { P_XO, 0 },
+ { P_FEPLL200, 1 },
+ { P_FEPLL500, 2 },
+};
+
+static const char * const gcc_xo_200_500[] = {
+ "xo",
+ "fepll200",
+ "fepll500",
+};
+
+static struct parent_map gcc_xo_200_map[] = {
+ { P_XO, 0 },
+ { P_FEPLL200, 1 },
+};
+
+static const char * const gcc_xo_200[] = {
+ "xo",
+ "fepll200",
+};
+
+static struct parent_map gcc_xo_200_spi_map[] = {
+ { P_XO, 0 },
+ { P_FEPLL200, 2 },
+};
+
+static const char * const gcc_xo_200_spi[] = {
+ "xo",
+ "fepll200",
+};
+
+static struct parent_map gcc_xo_sdcc1_500_map[] = {
+ { P_XO, 0 },
+ { P_DDRPLL, 1 },
+ { P_FEPLL500, 2 },
+};
+
+static const char * const gcc_xo_sdcc1_500[] = {
+ "xo",
+ "ddrpll",
+ "fepll500",
+};
+
+static struct parent_map gcc_xo_wcss2g_map[] = {
+ { P_XO, 0 },
+ { P_FEPLLWCSS2G, 1 },
+};
+
+static const char * const gcc_xo_wcss2g[] = {
+ "xo",
+ "fepllwcss2g",
+};
+
+static struct parent_map gcc_xo_wcss5g_map[] = {
+ { P_XO, 0 },
+ { P_FEPLLWCSS5G, 1 },
+};
+
+static const char * const gcc_xo_wcss5g[] = {
+ "xo",
+ "fepllwcss5g",
+};
+
+static struct parent_map gcc_xo_125_dly_map[] = {
+ { P_XO, 0 },
+ { P_FEPLL125DLY, 1 },
+};
+
+static const char * const gcc_xo_125_dly[] = {
+ "xo",
+ "fepll125dly",
+};
+
+static struct parent_map gcc_xo_ddr_500_200_map[] = {
+ { P_XO, 0 },
+ { P_FEPLL200, 3 },
+ { P_FEPLL500, 2 },
+ { P_DDRPLLAPSS, 1 },
+};
+
+static const char * const gcc_xo_ddr_500_200[] = {
+ "xo",
+ "fepll200",
+ "fepll500",
+ "ddrpllapss",
+};
+
+#define F(f, s, h, m, n) { (f), (s), (2 * (h) - 1), (m), (n) }
+
+static const struct freq_tbl ftbl_gcc_audio_pwm_clk[] = {
+ F(48000000, P_XO, 1, 0, 0),
+ F(200000000, P_FEPLL200, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 audio_clk_src = {
+ .cmd_rcgr = 0x1b000,
+ .hid_width = 5,
+ .parent_map = gcc_xo_200_map,
+ .freq_tbl = ftbl_gcc_audio_pwm_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "audio_clk_src",
+ .parent_names = gcc_xo_200,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+
+ },
+};
+
+static struct clk_branch gcc_audio_ahb_clk = {
+ .halt_reg = 0x1b010,
+ .clkr = {
+ .enable_reg = 0x1b010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_audio_ahb_clk",
+ .parent_names = (const char *[]){
+ "pcnoc_clk_src",
+ },
+ .flags = CLK_SET_RATE_PARENT,
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_audio_pwm_clk = {
+ .halt_reg = 0x1b00C,
+ .clkr = {
+ .enable_reg = 0x1b00C,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_audio_pwm_clk",
+ .parent_names = (const char *[]){
+ "audio_clk_src",
+ },
+ .flags = CLK_SET_RATE_PARENT,
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_blsp1_qup1_2_i2c_apps_clk[] = {
+ F(19200000, P_XO, 1, 2, 5),
+ F(24000000, P_XO, 1, 1, 2),
+ { }
+};
+
+static struct clk_rcg2 blsp1_qup1_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x200c,
+ .hid_width = 5,
+ .parent_map = gcc_xo_200_map,
+ .freq_tbl = ftbl_gcc_blsp1_qup1_2_i2c_apps_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_qup1_i2c_apps_clk_src",
+ .parent_names = gcc_xo_200,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup1_i2c_apps_clk = {
+ .halt_reg = 0x2008,
+ .clkr = {
+ .enable_reg = 0x2008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup1_i2c_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp1_qup1_i2c_apps_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_rcg2 blsp1_qup2_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x3000,
+ .hid_width = 5,
+ .parent_map = gcc_xo_200_map,
+ .freq_tbl = ftbl_gcc_blsp1_qup1_2_i2c_apps_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_qup2_i2c_apps_clk_src",
+ .parent_names = gcc_xo_200,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup2_i2c_apps_clk = {
+ .halt_reg = 0x3010,
+ .clkr = {
+ .enable_reg = 0x3010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup2_i2c_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp1_qup2_i2c_apps_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_blsp1_qup1_2_spi_apps_clk[] = {
+ F(960000, P_XO, 12, 1, 4),
+ F(4800000, P_XO, 1, 1, 10),
+ F(9600000, P_XO, 1, 1, 5),
+ F(15000000, P_XO, 1, 1, 3),
+ F(19200000, P_XO, 1, 2, 5),
+ F(24000000, P_XO, 1, 1, 2),
+ F(48000000, P_XO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 blsp1_qup1_spi_apps_clk_src = {
+ .cmd_rcgr = 0x2024,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_200_spi_map,
+ .freq_tbl = ftbl_gcc_blsp1_qup1_2_spi_apps_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_qup1_spi_apps_clk_src",
+ .parent_names = gcc_xo_200_spi,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup1_spi_apps_clk = {
+ .halt_reg = 0x2004,
+ .clkr = {
+ .enable_reg = 0x2004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup1_spi_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp1_qup1_spi_apps_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_rcg2 blsp1_qup2_spi_apps_clk_src = {
+ .cmd_rcgr = 0x3014,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .freq_tbl = ftbl_gcc_blsp1_qup1_2_spi_apps_clk,
+ .parent_map = gcc_xo_200_spi_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_qup2_spi_apps_clk_src",
+ .parent_names = gcc_xo_200_spi,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup2_spi_apps_clk = {
+ .halt_reg = 0x300c,
+ .clkr = {
+ .enable_reg = 0x300c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup2_spi_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp1_qup2_spi_apps_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_blsp1_uart1_2_apps_clk[] = {
+ F(1843200, P_FEPLL200, 1, 144, 15625),
+ F(3686400, P_FEPLL200, 1, 288, 15625),
+ F(7372800, P_FEPLL200, 1, 576, 15625),
+ F(14745600, P_FEPLL200, 1, 1152, 15625),
+ F(16000000, P_FEPLL200, 1, 2, 25),
+ F(24000000, P_XO, 1, 1, 2),
+ F(32000000, P_FEPLL200, 1, 4, 25),
+ F(40000000, P_FEPLL200, 1, 1, 5),
+ F(46400000, P_FEPLL200, 1, 29, 125),
+ F(48000000, P_XO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 blsp1_uart1_apps_clk_src = {
+ .cmd_rcgr = 0x2044,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .freq_tbl = ftbl_gcc_blsp1_uart1_2_apps_clk,
+ .parent_map = gcc_xo_200_spi_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_uart1_apps_clk_src",
+ .parent_names = gcc_xo_200_spi,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_branch gcc_blsp1_uart1_apps_clk = {
+ .halt_reg = 0x203c,
+ .clkr = {
+ .enable_reg = 0x203c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_uart1_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp1_uart1_apps_clk_src",
+ },
+ .flags = CLK_SET_RATE_PARENT,
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_rcg2 blsp1_uart2_apps_clk_src = {
+ .cmd_rcgr = 0x3034,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .freq_tbl = ftbl_gcc_blsp1_uart1_2_apps_clk,
+ .parent_map = gcc_xo_200_spi_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_uart2_apps_clk_src",
+ .parent_names = gcc_xo_200_spi,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_branch gcc_blsp1_uart2_apps_clk = {
+ .halt_reg = 0x302c,
+ .clkr = {
+ .enable_reg = 0x302c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_uart2_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp1_uart2_apps_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_gp_clk[] = {
+ F(1250000, P_FEPLL200, 1, 16, 0),
+ F(2500000, P_FEPLL200, 1, 8, 0),
+ F(5000000, P_FEPLL200, 1, 4, 0),
+ { }
+};
+
+static struct clk_rcg2 gp1_clk_src = {
+ .cmd_rcgr = 0x8004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .freq_tbl = ftbl_gcc_gp_clk,
+ .parent_map = gcc_xo_200_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gp1_clk_src",
+ .parent_names = gcc_xo_200,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_branch gcc_gp1_clk = {
+ .halt_reg = 0x8000,
+ .clkr = {
+ .enable_reg = 0x8000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gp1_clk",
+ .parent_names = (const char *[]){
+ "gp1_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_rcg2 gp2_clk_src = {
+ .cmd_rcgr = 0x9004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .freq_tbl = ftbl_gcc_gp_clk,
+ .parent_map = gcc_xo_200_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gp2_clk_src",
+ .parent_names = gcc_xo_200,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_branch gcc_gp2_clk = {
+ .halt_reg = 0x9000,
+ .clkr = {
+ .enable_reg = 0x9000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gp2_clk",
+ .parent_names = (const char *[]){
+ "gp2_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_rcg2 gp3_clk_src = {
+ .cmd_rcgr = 0xa004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .freq_tbl = ftbl_gcc_gp_clk,
+ .parent_map = gcc_xo_200_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gp3_clk_src",
+ .parent_names = gcc_xo_200,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_branch gcc_gp3_clk = {
+ .halt_reg = 0xa000,
+ .clkr = {
+ .enable_reg = 0xa000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gp3_clk",
+ .parent_names = (const char *[]){
+ "gp3_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_sdcc1_apps_clk[] = {
+ F(144000, P_XO, 1, 3, 240),
+ F(400000, P_XO, 1, 1, 0),
+ F(20000000, P_FEPLL500, 1, 1, 25),
+ F(25000000, P_FEPLL500, 1, 1, 20),
+ F(50000000, P_FEPLL500, 1, 1, 10),
+ F(100000000, P_FEPLL500, 1, 1, 5),
+ F(193000000, P_DDRPLL, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 sdcc1_apps_clk_src = {
+ .cmd_rcgr = 0x18004,
+ .hid_width = 5,
+ .freq_tbl = ftbl_gcc_sdcc1_apps_clk,
+ .parent_map = gcc_xo_sdcc1_500_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "sdcc1_apps_clk_src",
+ .parent_names = gcc_xo_sdcc1_500,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_apps_clk[] = {
+ F(48000000, P_XO, 1, 0, 0),
+ F(200000000, P_FEPLL200, 1, 0, 0),
+ F(500000000, P_FEPLL500, 1, 0, 0),
+ F(626000000, P_DDRPLLAPSS, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 apps_clk_src = {
+ .cmd_rcgr = 0x1900c,
+ .hid_width = 5,
+ .freq_tbl = ftbl_gcc_apps_clk,
+ .parent_map = gcc_xo_ddr_500_200_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "apps_clk_src",
+ .parent_names = gcc_xo_ddr_500_200,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_apps_ahb_clk[] = {
+ F(48000000, P_XO, 1, 0, 0),
+ F(100000000, P_FEPLL200, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 apps_ahb_clk_src = {
+ .cmd_rcgr = 0x19014,
+ .hid_width = 5,
+ .parent_map = gcc_xo_200_500_map,
+ .freq_tbl = ftbl_gcc_apps_ahb_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "apps_ahb_clk_src",
+ .parent_names = gcc_xo_200_500,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_branch gcc_apss_ahb_clk = {
+ .halt_reg = 0x19004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x6000,
+ .enable_mask = BIT(14),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_apss_ahb_clk",
+ .parent_names = (const char *[]){
+ "apps_ahb_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_ahb_clk = {
+ .halt_reg = 0x1008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x6000,
+ .enable_mask = BIT(10),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_ahb_clk",
+ .parent_names = (const char *[]){
+ "pcnoc_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_dcd_xo_clk = {
+ .halt_reg = 0x2103c,
+ .clkr = {
+ .enable_reg = 0x2103c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_dcd_xo_clk",
+ .parent_names = (const char *[]){
+ "xo",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_boot_rom_ahb_clk = {
+ .halt_reg = 0x1300c,
+ .clkr = {
+ .enable_reg = 0x1300c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_boot_rom_ahb_clk",
+ .parent_names = (const char *[]){
+ "pcnoc_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_branch gcc_crypto_ahb_clk = {
+ .halt_reg = 0x16024,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x6000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_crypto_ahb_clk",
+ .parent_names = (const char *[]){
+ "pcnoc_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_crypto_axi_clk = {
+ .halt_reg = 0x16020,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x6000,
+ .enable_mask = BIT(1),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_crypto_axi_clk",
+ .parent_names = (const char *[]){
+ "fepll125",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_crypto_clk = {
+ .halt_reg = 0x1601c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x6000,
+ .enable_mask = BIT(2),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_crypto_clk",
+ .parent_names = (const char *[]){
+ "fepll125",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ess_clk = {
+ .halt_reg = 0x12010,
+ .clkr = {
+ .enable_reg = 0x12010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ess_clk",
+ .parent_names = (const char *[]){
+ "fephy_125m_dly_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_branch gcc_imem_axi_clk = {
+ .halt_reg = 0xe004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x6000,
+ .enable_mask = BIT(17),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_imem_axi_clk",
+ .parent_names = (const char *[]){
+ "fepll200",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_imem_cfg_ahb_clk = {
+ .halt_reg = 0xe008,
+ .clkr = {
+ .enable_reg = 0xe008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_imem_cfg_ahb_clk",
+ .parent_names = (const char *[]){
+ "pcnoc_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_ahb_clk = {
+ .halt_reg = 0x1d00c,
+ .clkr = {
+ .enable_reg = 0x1d00c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_ahb_clk",
+ .parent_names = (const char *[]){
+ "pcnoc_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_axi_m_clk = {
+ .halt_reg = 0x1d004,
+ .clkr = {
+ .enable_reg = 0x1d004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_axi_m_clk",
+ .parent_names = (const char *[]){
+ "fepll200",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_axi_s_clk = {
+ .halt_reg = 0x1d008,
+ .clkr = {
+ .enable_reg = 0x1d008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_axi_s_clk",
+ .parent_names = (const char *[]){
+ "fepll200",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_prng_ahb_clk = {
+ .halt_reg = 0x13004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x6000,
+ .enable_mask = BIT(8),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_prng_ahb_clk",
+ .parent_names = (const char *[]){
+ "pcnoc_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qpic_ahb_clk = {
+ .halt_reg = 0x1c008,
+ .clkr = {
+ .enable_reg = 0x1c008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qpic_ahb_clk",
+ .parent_names = (const char *[]){
+ "pcnoc_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qpic_clk = {
+ .halt_reg = 0x1c004,
+ .clkr = {
+ .enable_reg = 0x1c004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qpic_clk",
+ .parent_names = (const char *[]){
+ "pcnoc_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc1_ahb_clk = {
+ .halt_reg = 0x18010,
+ .clkr = {
+ .enable_reg = 0x18010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc1_ahb_clk",
+ .parent_names = (const char *[]){
+ "pcnoc_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc1_apps_clk = {
+ .halt_reg = 0x1800c,
+ .clkr = {
+ .enable_reg = 0x1800c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc1_apps_clk",
+ .parent_names = (const char *[]){
+ "sdcc1_apps_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_branch gcc_tlmm_ahb_clk = {
+ .halt_reg = 0x5004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x6000,
+ .enable_mask = BIT(5),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_tlmm_ahb_clk",
+ .parent_names = (const char *[]){
+ "pcnoc_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb2_master_clk = {
+ .halt_reg = 0x1e00c,
+ .clkr = {
+ .enable_reg = 0x1e00c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb2_master_clk",
+ .parent_names = (const char *[]){
+ "pcnoc_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb2_sleep_clk = {
+ .halt_reg = 0x1e010,
+ .clkr = {
+ .enable_reg = 0x1e010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb2_sleep_clk",
+ .parent_names = (const char *[]){
+ "gcc_sleep_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb2_mock_utmi_clk = {
+ .halt_reg = 0x1e014,
+ .clkr = {
+ .enable_reg = 0x1e014,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb2_mock_utmi_clk",
+ .parent_names = (const char *[]){
+ "usb30_mock_utmi_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_usb30_mock_utmi_clk[] = {
+ F(2000000, P_FEPLL200, 10, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 usb30_mock_utmi_clk_src = {
+ .cmd_rcgr = 0x1e000,
+ .hid_width = 5,
+ .parent_map = gcc_xo_200_map,
+ .freq_tbl = ftbl_gcc_usb30_mock_utmi_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "usb30_mock_utmi_clk_src",
+ .parent_names = gcc_xo_200,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_branch gcc_usb3_master_clk = {
+ .halt_reg = 0x1e028,
+ .clkr = {
+ .enable_reg = 0x1e028,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_master_clk",
+ .parent_names = (const char *[]){
+ "fepll125",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_sleep_clk = {
+ .halt_reg = 0x1e02C,
+ .clkr = {
+ .enable_reg = 0x1e02C,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_sleep_clk",
+ .parent_names = (const char *[]){
+ "gcc_sleep_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_mock_utmi_clk = {
+ .halt_reg = 0x1e030,
+ .clkr = {
+ .enable_reg = 0x1e030,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_mock_utmi_clk",
+ .parent_names = (const char *[]){
+ "usb30_mock_utmi_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_fephy_dly_clk[] = {
+ F(125000000, P_FEPLL125DLY, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 fephy_125m_dly_clk_src = {
+ .cmd_rcgr = 0x12000,
+ .hid_width = 5,
+ .parent_map = gcc_xo_125_dly_map,
+ .freq_tbl = ftbl_gcc_fephy_dly_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "fephy_125m_dly_clk_src",
+ .parent_names = gcc_xo_125_dly,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+
+static const struct freq_tbl ftbl_gcc_wcss2g_clk[] = {
+ F(48000000, P_XO, 1, 0, 0),
+ F(250000000, P_FEPLLWCSS2G, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 wcss2g_clk_src = {
+ .cmd_rcgr = 0x1f000,
+ .hid_width = 5,
+ .freq_tbl = ftbl_gcc_wcss2g_clk,
+ .parent_map = gcc_xo_wcss2g_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "wcss2g_clk_src",
+ .parent_names = gcc_xo_wcss2g,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_branch gcc_wcss2g_clk = {
+ .halt_reg = 0x1f00C,
+ .clkr = {
+ .enable_reg = 0x1f00C,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_wcss2g_clk",
+ .parent_names = (const char *[]){
+ "wcss2g_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_branch gcc_wcss2g_ref_clk = {
+ .halt_reg = 0x1f00C,
+ .clkr = {
+ .enable_reg = 0x1f00C,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_wcss2g_ref_clk",
+ .parent_names = (const char *[]){
+ "xo",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_branch gcc_wcss2g_rtc_clk = {
+ .halt_reg = 0x1f010,
+ .clkr = {
+ .enable_reg = 0x1f010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_wcss2g_rtc_clk",
+ .parent_names = (const char *[]){
+ "gcc_sleep_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_wcss5g_clk[] = {
+ F(48000000, P_XO, 1, 0, 0),
+ F(250000000, P_FEPLLWCSS5G, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 wcss5g_clk_src = {
+ .cmd_rcgr = 0x20000,
+ .hid_width = 5,
+ .parent_map = gcc_xo_wcss5g_map,
+ .freq_tbl = ftbl_gcc_wcss5g_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "wcss5g_clk_src",
+ .parent_names = gcc_xo_wcss5g,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_branch gcc_wcss5g_clk = {
+ .halt_reg = 0x2000c,
+ .clkr = {
+ .enable_reg = 0x2000c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_wcss5g_clk",
+ .parent_names = (const char *[]){
+ "wcss5g_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_branch gcc_wcss5g_ref_clk = {
+ .halt_reg = 0x2000c,
+ .clkr = {
+ .enable_reg = 0x2000c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_wcss5g_ref_clk",
+ .parent_names = (const char *[]){
+ "xo",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_branch gcc_wcss5g_rtc_clk = {
+ .halt_reg = 0x20010,
+ .clkr = {
+ .enable_reg = 0x20010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_wcss5g_rtc_clk",
+ .parent_names = (const char *[]){
+ "gcc_sleep_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_regmap *gcc_ipq4019_clocks[] = {
+ [AUDIO_CLK_SRC] = &audio_clk_src.clkr,
+ [BLSP1_QUP1_I2C_APPS_CLK_SRC] = &blsp1_qup1_i2c_apps_clk_src.clkr,
+ [BLSP1_QUP1_SPI_APPS_CLK_SRC] = &blsp1_qup1_spi_apps_clk_src.clkr,
+ [BLSP1_QUP2_I2C_APPS_CLK_SRC] = &blsp1_qup2_i2c_apps_clk_src.clkr,
+ [BLSP1_QUP2_SPI_APPS_CLK_SRC] = &blsp1_qup2_spi_apps_clk_src.clkr,
+ [BLSP1_UART1_APPS_CLK_SRC] = &blsp1_uart1_apps_clk_src.clkr,
+ [BLSP1_UART2_APPS_CLK_SRC] = &blsp1_uart2_apps_clk_src.clkr,
+ [GCC_USB3_MOCK_UTMI_CLK_SRC] = &usb30_mock_utmi_clk_src.clkr,
+ [GCC_APPS_CLK_SRC] = &apps_clk_src.clkr,
+ [GCC_APPS_AHB_CLK_SRC] = &apps_ahb_clk_src.clkr,
+ [GP1_CLK_SRC] = &gp1_clk_src.clkr,
+ [GP2_CLK_SRC] = &gp2_clk_src.clkr,
+ [GP3_CLK_SRC] = &gp3_clk_src.clkr,
+ [SDCC1_APPS_CLK_SRC] = &sdcc1_apps_clk_src.clkr,
+ [FEPHY_125M_DLY_CLK_SRC] = &fephy_125m_dly_clk_src.clkr,
+ [WCSS2G_CLK_SRC] = &wcss2g_clk_src.clkr,
+ [WCSS5G_CLK_SRC] = &wcss5g_clk_src.clkr,
+ [GCC_APSS_AHB_CLK] = &gcc_apss_ahb_clk.clkr,
+ [GCC_AUDIO_AHB_CLK] = &gcc_audio_ahb_clk.clkr,
+ [GCC_AUDIO_PWM_CLK] = &gcc_audio_pwm_clk.clkr,
+ [GCC_BLSP1_AHB_CLK] = &gcc_blsp1_ahb_clk.clkr,
+ [GCC_BLSP1_QUP1_I2C_APPS_CLK] = &gcc_blsp1_qup1_i2c_apps_clk.clkr,
+ [GCC_BLSP1_QUP1_SPI_APPS_CLK] = &gcc_blsp1_qup1_spi_apps_clk.clkr,
+ [GCC_BLSP1_QUP2_I2C_APPS_CLK] = &gcc_blsp1_qup2_i2c_apps_clk.clkr,
+ [GCC_BLSP1_QUP2_SPI_APPS_CLK] = &gcc_blsp1_qup2_spi_apps_clk.clkr,
+ [GCC_BLSP1_UART1_APPS_CLK] = &gcc_blsp1_uart1_apps_clk.clkr,
+ [GCC_BLSP1_UART2_APPS_CLK] = &gcc_blsp1_uart2_apps_clk.clkr,
+ [GCC_DCD_XO_CLK] = &gcc_dcd_xo_clk.clkr,
+ [GCC_GP1_CLK] = &gcc_gp1_clk.clkr,
+ [GCC_GP2_CLK] = &gcc_gp2_clk.clkr,
+ [GCC_GP3_CLK] = &gcc_gp3_clk.clkr,
+ [GCC_BOOT_ROM_AHB_CLK] = &gcc_boot_rom_ahb_clk.clkr,
+ [GCC_CRYPTO_AHB_CLK] = &gcc_crypto_ahb_clk.clkr,
+ [GCC_CRYPTO_AXI_CLK] = &gcc_crypto_axi_clk.clkr,
+ [GCC_CRYPTO_CLK] = &gcc_crypto_clk.clkr,
+ [GCC_ESS_CLK] = &gcc_ess_clk.clkr,
+ [GCC_IMEM_AXI_CLK] = &gcc_imem_axi_clk.clkr,
+ [GCC_IMEM_CFG_AHB_CLK] = &gcc_imem_cfg_ahb_clk.clkr,
+ [GCC_PCIE_AHB_CLK] = &gcc_pcie_ahb_clk.clkr,
+ [GCC_PCIE_AXI_M_CLK] = &gcc_pcie_axi_m_clk.clkr,
+ [GCC_PCIE_AXI_S_CLK] = &gcc_pcie_axi_s_clk.clkr,
+ [GCC_PRNG_AHB_CLK] = &gcc_prng_ahb_clk.clkr,
+ [GCC_QPIC_AHB_CLK] = &gcc_qpic_ahb_clk.clkr,
+ [GCC_QPIC_CLK] = &gcc_qpic_clk.clkr,
+ [GCC_SDCC1_AHB_CLK] = &gcc_sdcc1_ahb_clk.clkr,
+ [GCC_SDCC1_APPS_CLK] = &gcc_sdcc1_apps_clk.clkr,
+ [GCC_TLMM_AHB_CLK] = &gcc_tlmm_ahb_clk.clkr,
+ [GCC_USB2_MASTER_CLK] = &gcc_usb2_master_clk.clkr,
+ [GCC_USB2_SLEEP_CLK] = &gcc_usb2_sleep_clk.clkr,
+ [GCC_USB2_MOCK_UTMI_CLK] = &gcc_usb2_mock_utmi_clk.clkr,
+ [GCC_USB3_MASTER_CLK] = &gcc_usb3_master_clk.clkr,
+ [GCC_USB3_SLEEP_CLK] = &gcc_usb3_sleep_clk.clkr,
+ [GCC_USB3_MOCK_UTMI_CLK] = &gcc_usb3_mock_utmi_clk.clkr,
+ [GCC_WCSS2G_CLK] = &gcc_wcss2g_clk.clkr,
+ [GCC_WCSS2G_REF_CLK] = &gcc_wcss2g_ref_clk.clkr,
+ [GCC_WCSS2G_RTC_CLK] = &gcc_wcss2g_rtc_clk.clkr,
+ [GCC_WCSS5G_CLK] = &gcc_wcss5g_clk.clkr,
+ [GCC_WCSS5G_REF_CLK] = &gcc_wcss5g_ref_clk.clkr,
+ [GCC_WCSS5G_RTC_CLK] = &gcc_wcss5g_rtc_clk.clkr,
+};
+
+static const struct qcom_reset_map gcc_ipq4019_resets[] = {
+ [WIFI0_CPU_INIT_RESET] = { 0x1f008, 5 },
+ [WIFI0_RADIO_SRIF_RESET] = { 0x1f008, 4 },
+ [WIFI0_RADIO_WARM_RESET] = { 0x1f008, 3 },
+ [WIFI0_RADIO_COLD_RESET] = { 0x1f008, 2 },
+ [WIFI0_CORE_WARM_RESET] = { 0x1f008, 1 },
+ [WIFI0_CORE_COLD_RESET] = { 0x1f008, 0 },
+ [WIFI1_CPU_INIT_RESET] = { 0x20008, 5 },
+ [WIFI1_RADIO_SRIF_RESET] = { 0x20008, 4 },
+ [WIFI1_RADIO_WARM_RESET] = { 0x20008, 3 },
+ [WIFI1_RADIO_COLD_RESET] = { 0x20008, 2 },
+ [WIFI1_CORE_WARM_RESET] = { 0x20008, 1 },
+ [WIFI1_CORE_COLD_RESET] = { 0x20008, 0 },
+ [USB3_UNIPHY_PHY_ARES] = { 0x1e038, 5 },
+ [USB3_HSPHY_POR_ARES] = { 0x1e038, 4 },
+ [USB3_HSPHY_S_ARES] = { 0x1e038, 2 },
+ [USB2_HSPHY_POR_ARES] = { 0x1e01c, 4 },
+ [USB2_HSPHY_S_ARES] = { 0x1e01c, 2 },
+ [PCIE_PHY_AHB_ARES] = { 0x1d010, 11 },
+ [PCIE_AHB_ARES] = { 0x1d010, 10 },
+ [PCIE_PWR_ARES] = { 0x1d010, 9 },
+ [PCIE_PIPE_STICKY_ARES] = { 0x1d010, 8 },
+ [PCIE_AXI_M_STICKY_ARES] = { 0x1d010, 7 },
+ [PCIE_PHY_ARES] = { 0x1d010, 6 },
+ [PCIE_PARF_XPU_ARES] = { 0x1d010, 5 },
+ [PCIE_AXI_S_XPU_ARES] = { 0x1d010, 4 },
+ [PCIE_AXI_M_VMIDMT_ARES] = { 0x1d010, 3 },
+ [PCIE_PIPE_ARES] = { 0x1d010, 2 },
+ [PCIE_AXI_S_ARES] = { 0x1d010, 1 },
+ [PCIE_AXI_M_ARES] = { 0x1d010, 0 },
+ [ESS_RESET] = { 0x12008, 0},
+ [GCC_BLSP1_BCR] = {0x01000, 0},
+ [GCC_BLSP1_QUP1_BCR] = {0x02000, 0},
+ [GCC_BLSP1_UART1_BCR] = {0x02038, 0},
+ [GCC_BLSP1_QUP2_BCR] = {0x03008, 0},
+ [GCC_BLSP1_UART2_BCR] = {0x03028, 0},
+ [GCC_BIMC_BCR] = {0x04000, 0},
+ [GCC_TLMM_BCR] = {0x05000, 0},
+ [GCC_IMEM_BCR] = {0x0E000, 0},
+ [GCC_ESS_BCR] = {0x12008, 0},
+ [GCC_PRNG_BCR] = {0x13000, 0},
+ [GCC_BOOT_ROM_BCR] = {0x13008, 0},
+ [GCC_CRYPTO_BCR] = {0x16000, 0},
+ [GCC_SDCC1_BCR] = {0x18000, 0},
+ [GCC_SEC_CTRL_BCR] = {0x1A000, 0},
+ [GCC_AUDIO_BCR] = {0x1B008, 0},
+ [GCC_QPIC_BCR] = {0x1C000, 0},
+ [GCC_PCIE_BCR] = {0x1D000, 0},
+ [GCC_USB2_BCR] = {0x1E008, 0},
+ [GCC_USB2_PHY_BCR] = {0x1E018, 0},
+ [GCC_USB3_BCR] = {0x1E024, 0},
+ [GCC_USB3_PHY_BCR] = {0x1E034, 0},
+ [GCC_SYSTEM_NOC_BCR] = {0x21000, 0},
+ [GCC_PCNOC_BCR] = {0x2102C, 0},
+ [GCC_DCD_BCR] = {0x21038, 0},
+ [GCC_SNOC_BUS_TIMEOUT0_BCR] = {0x21064, 0},
+ [GCC_SNOC_BUS_TIMEOUT1_BCR] = {0x2106C, 0},
+ [GCC_SNOC_BUS_TIMEOUT2_BCR] = {0x21074, 0},
+ [GCC_SNOC_BUS_TIMEOUT3_BCR] = {0x2107C, 0},
+ [GCC_PCNOC_BUS_TIMEOUT0_BCR] = {0x21084, 0},
+ [GCC_PCNOC_BUS_TIMEOUT1_BCR] = {0x2108C, 0},
+ [GCC_PCNOC_BUS_TIMEOUT2_BCR] = {0x21094, 0},
+ [GCC_PCNOC_BUS_TIMEOUT3_BCR] = {0x2109C, 0},
+ [GCC_PCNOC_BUS_TIMEOUT4_BCR] = {0x210A4, 0},
+ [GCC_PCNOC_BUS_TIMEOUT5_BCR] = {0x210AC, 0},
+ [GCC_PCNOC_BUS_TIMEOUT6_BCR] = {0x210B4, 0},
+ [GCC_PCNOC_BUS_TIMEOUT7_BCR] = {0x210BC, 0},
+ [GCC_PCNOC_BUS_TIMEOUT8_BCR] = {0x210C4, 0},
+ [GCC_PCNOC_BUS_TIMEOUT9_BCR] = {0x210CC, 0},
+ [GCC_TCSR_BCR] = {0x22000, 0},
+ [GCC_MPM_BCR] = {0x24000, 0},
+ [GCC_SPDM_BCR] = {0x25000, 0},
+};
+
+static const struct regmap_config gcc_ipq4019_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x2dfff,
+ .fast_io = true,
+};
+
+static const struct qcom_cc_desc gcc_ipq4019_desc = {
+ .config = &gcc_ipq4019_regmap_config,
+ .clks = gcc_ipq4019_clocks,
+ .num_clks = ARRAY_SIZE(gcc_ipq4019_clocks),
+ .resets = gcc_ipq4019_resets,
+ .num_resets = ARRAY_SIZE(gcc_ipq4019_resets),
+};
+
+static const struct of_device_id gcc_ipq4019_match_table[] = {
+ { .compatible = "qcom,gcc-ipq4019" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, gcc_ipq4019_match_table);
+
+static int gcc_ipq4019_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+
+ clk_register_fixed_rate(dev, "fepll125", "xo", 0, 200000000);
+ clk_register_fixed_rate(dev, "fepll125dly", "xo", 0, 200000000);
+ clk_register_fixed_rate(dev, "fepllwcss2g", "xo", 0, 200000000);
+ clk_register_fixed_rate(dev, "fepllwcss5g", "xo", 0, 200000000);
+ clk_register_fixed_rate(dev, "fepll200", "xo", 0, 200000000);
+ clk_register_fixed_rate(dev, "fepll500", "xo", 0, 200000000);
+ clk_register_fixed_rate(dev, "ddrpllapss", "xo", 0, 666000000);
+
+ return qcom_cc_probe(pdev, &gcc_ipq4019_desc);
+}
+
+static struct platform_driver gcc_ipq4019_driver = {
+ .probe = gcc_ipq4019_probe,
+ .driver = {
+ .name = "qcom,gcc-ipq4019",
+ .owner = THIS_MODULE,
+ .of_match_table = gcc_ipq4019_match_table,
+ },
+};
+
+static int __init gcc_ipq4019_init(void)
+{
+ return platform_driver_register(&gcc_ipq4019_driver);
+}
+core_initcall(gcc_ipq4019_init);
+
+static void __exit gcc_ipq4019_exit(void)
+{
+ platform_driver_unregister(&gcc_ipq4019_driver);
+}
+module_exit(gcc_ipq4019_exit);
+
+MODULE_ALIAS("platform:gcc-ipq4019");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("QCOM GCC IPQ4019 driver");
diff --git a/drivers/clk/qcom/gcc-ipq806x.c b/drivers/clk/qcom/gcc-ipq806x.c
index dd5402bac620..52a7d3959875 100644
--- a/drivers/clk/qcom/gcc-ipq806x.c
+++ b/drivers/clk/qcom/gcc-ipq806x.c
@@ -890,7 +890,6 @@ static struct clk_branch gsbi1_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "gsbi1_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -906,7 +905,6 @@ static struct clk_branch gsbi2_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "gsbi2_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -922,7 +920,6 @@ static struct clk_branch gsbi4_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "gsbi4_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -938,7 +935,6 @@ static struct clk_branch gsbi5_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "gsbi5_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -954,7 +950,6 @@ static struct clk_branch gsbi6_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "gsbi6_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -970,7 +965,6 @@ static struct clk_branch gsbi7_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "gsbi7_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -1144,7 +1138,6 @@ static struct clk_branch pmem_clk = {
.hw.init = &(struct clk_init_data){
.name = "pmem_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -1308,7 +1301,6 @@ static struct clk_branch sdc1_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "sdc1_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -1324,7 +1316,6 @@ static struct clk_branch sdc3_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "sdc3_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -1394,7 +1385,6 @@ static struct clk_branch tsif_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "tsif_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -1410,7 +1400,6 @@ static struct clk_branch dma_bam_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "dma_bam_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -1425,7 +1414,6 @@ static struct clk_branch adm0_clk = {
.hw.init = &(struct clk_init_data){
.name = "adm0_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -1442,7 +1430,6 @@ static struct clk_branch adm0_pbus_clk = {
.hw.init = &(struct clk_init_data){
.name = "adm0_pbus_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -1457,7 +1444,6 @@ static struct clk_branch pmic_arb0_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "pmic_arb0_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -1472,7 +1458,6 @@ static struct clk_branch pmic_arb1_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "pmic_arb1_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -1487,7 +1472,6 @@ static struct clk_branch pmic_ssbi2_clk = {
.hw.init = &(struct clk_init_data){
.name = "pmic_ssbi2_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -1504,7 +1488,6 @@ static struct clk_branch rpm_msg_ram_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "rpm_msg_ram_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -1563,7 +1546,6 @@ static struct clk_branch pcie_a_clk = {
.hw.init = &(struct clk_init_data){
.name = "pcie_a_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -1577,7 +1559,6 @@ static struct clk_branch pcie_aux_clk = {
.hw.init = &(struct clk_init_data){
.name = "pcie_aux_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -1591,7 +1572,6 @@ static struct clk_branch pcie_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "pcie_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -1605,7 +1585,6 @@ static struct clk_branch pcie_phy_clk = {
.hw.init = &(struct clk_init_data){
.name = "pcie_phy_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -1659,7 +1638,6 @@ static struct clk_branch pcie1_a_clk = {
.hw.init = &(struct clk_init_data){
.name = "pcie1_a_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -1673,7 +1651,6 @@ static struct clk_branch pcie1_aux_clk = {
.hw.init = &(struct clk_init_data){
.name = "pcie1_aux_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -1687,7 +1664,6 @@ static struct clk_branch pcie1_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "pcie1_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -1701,7 +1677,6 @@ static struct clk_branch pcie1_phy_clk = {
.hw.init = &(struct clk_init_data){
.name = "pcie1_phy_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -1755,7 +1730,6 @@ static struct clk_branch pcie2_a_clk = {
.hw.init = &(struct clk_init_data){
.name = "pcie2_a_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -1769,7 +1743,6 @@ static struct clk_branch pcie2_aux_clk = {
.hw.init = &(struct clk_init_data){
.name = "pcie2_aux_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -1783,7 +1756,6 @@ static struct clk_branch pcie2_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "pcie2_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -1797,7 +1769,6 @@ static struct clk_branch pcie2_phy_clk = {
.hw.init = &(struct clk_init_data){
.name = "pcie2_phy_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -1887,7 +1858,6 @@ static struct clk_branch sata_a_clk = {
.hw.init = &(struct clk_init_data){
.name = "sata_a_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -1901,7 +1871,6 @@ static struct clk_branch sata_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "sata_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -1915,7 +1884,6 @@ static struct clk_branch sfab_sata_s_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "sfab_sata_s_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -1929,7 +1897,6 @@ static struct clk_branch sata_phy_cfg_clk = {
.hw.init = &(struct clk_init_data){
.name = "sata_phy_cfg_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2139,7 +2106,6 @@ static struct clk_branch usb_hs1_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "usb_hs1_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2218,7 +2184,6 @@ static struct clk_branch usb_fs1_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "usb_fs1_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2234,7 +2199,6 @@ static struct clk_branch ebi2_clk = {
.hw.init = &(struct clk_init_data){
.name = "ebi2_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2248,7 +2212,6 @@ static struct clk_branch ebi2_aon_clk = {
.hw.init = &(struct clk_init_data){
.name = "ebi2_always_on_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
diff --git a/drivers/clk/qcom/gcc-msm8660.c b/drivers/clk/qcom/gcc-msm8660.c
index ad413036f7c7..6dc55864979c 100644
--- a/drivers/clk/qcom/gcc-msm8660.c
+++ b/drivers/clk/qcom/gcc-msm8660.c
@@ -1479,7 +1479,6 @@ static struct clk_branch pmem_clk = {
.hw.init = &(struct clk_init_data){
.name = "pmem_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2027,7 +2026,6 @@ static struct clk_branch gsbi1_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "gsbi1_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2041,7 +2039,6 @@ static struct clk_branch gsbi2_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "gsbi2_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2055,7 +2052,6 @@ static struct clk_branch gsbi3_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "gsbi3_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2069,7 +2065,6 @@ static struct clk_branch gsbi4_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "gsbi4_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2083,7 +2078,6 @@ static struct clk_branch gsbi5_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "gsbi5_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2097,7 +2091,6 @@ static struct clk_branch gsbi6_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "gsbi6_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2111,7 +2104,6 @@ static struct clk_branch gsbi7_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "gsbi7_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2125,7 +2117,6 @@ static struct clk_branch gsbi8_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "gsbi8_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2139,7 +2130,6 @@ static struct clk_branch gsbi9_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "gsbi9_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2153,7 +2143,6 @@ static struct clk_branch gsbi10_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "gsbi10_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2167,7 +2156,6 @@ static struct clk_branch gsbi11_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "gsbi11_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2181,7 +2169,6 @@ static struct clk_branch gsbi12_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "gsbi12_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2195,7 +2182,6 @@ static struct clk_branch tsif_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "tsif_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2209,7 +2195,6 @@ static struct clk_branch usb_fs1_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "usb_fs1_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2223,7 +2208,6 @@ static struct clk_branch usb_fs2_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "usb_fs2_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2237,7 +2221,6 @@ static struct clk_branch usb_hs1_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "usb_hs1_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2251,7 +2234,6 @@ static struct clk_branch sdc1_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "sdc1_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2265,7 +2247,6 @@ static struct clk_branch sdc2_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "sdc2_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2279,7 +2260,6 @@ static struct clk_branch sdc3_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "sdc3_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2293,7 +2273,6 @@ static struct clk_branch sdc4_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "sdc4_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2307,7 +2286,6 @@ static struct clk_branch sdc5_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "sdc5_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2322,7 +2300,6 @@ static struct clk_branch adm0_clk = {
.hw.init = &(struct clk_init_data){
.name = "adm0_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2337,7 +2314,6 @@ static struct clk_branch adm0_pbus_clk = {
.hw.init = &(struct clk_init_data){
.name = "adm0_pbus_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2352,7 +2328,6 @@ static struct clk_branch adm1_clk = {
.hw.init = &(struct clk_init_data){
.name = "adm1_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2367,7 +2342,6 @@ static struct clk_branch adm1_pbus_clk = {
.hw.init = &(struct clk_init_data){
.name = "adm1_pbus_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2382,7 +2356,6 @@ static struct clk_branch modem_ahb1_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "modem_ahb1_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2397,7 +2370,6 @@ static struct clk_branch modem_ahb2_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "modem_ahb2_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2412,7 +2384,6 @@ static struct clk_branch pmic_arb0_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "pmic_arb0_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2427,7 +2398,6 @@ static struct clk_branch pmic_arb1_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "pmic_arb1_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2442,7 +2412,6 @@ static struct clk_branch pmic_ssbi2_clk = {
.hw.init = &(struct clk_init_data){
.name = "pmic_ssbi2_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2459,7 +2428,6 @@ static struct clk_branch rpm_msg_ram_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "rpm_msg_ram_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
diff --git a/drivers/clk/qcom/gcc-msm8916.c b/drivers/clk/qcom/gcc-msm8916.c
index 8cc9b2868b41..9c29080a84d8 100644
--- a/drivers/clk/qcom/gcc-msm8916.c
+++ b/drivers/clk/qcom/gcc-msm8916.c
@@ -2590,6 +2590,23 @@ static struct clk_branch gcc_mss_cfg_ahb_clk = {
},
};
+static struct clk_branch gcc_mss_q6_bimc_axi_clk = {
+ .halt_reg = 0x49004,
+ .clkr = {
+ .enable_reg = 0x49004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mss_q6_bimc_axi_clk",
+ .parent_names = (const char *[]){
+ "bimc_ddr_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
static struct clk_branch gcc_oxili_ahb_clk = {
.halt_reg = 0x59028,
.clkr = {
@@ -3227,6 +3244,7 @@ static struct clk_regmap *gcc_msm8916_clocks[] = {
[GCC_ULTAUDIO_LPAIF_SEC_I2S_CLK] = &gcc_ultaudio_lpaif_sec_i2s_clk.clkr,
[GCC_ULTAUDIO_LPAIF_AUX_I2S_CLK] = &gcc_ultaudio_lpaif_aux_i2s_clk.clkr,
[GCC_CODEC_DIGCODEC_CLK] = &gcc_codec_digcodec_clk.clkr,
+ [GCC_MSS_Q6_BIMC_AXI_CLK] = &gcc_mss_q6_bimc_axi_clk.clkr,
};
static struct gdsc *gcc_msm8916_gdscs[] = {
diff --git a/drivers/clk/qcom/gcc-msm8960.c b/drivers/clk/qcom/gcc-msm8960.c
index 983dd7dc89a7..eb551c75fba6 100644
--- a/drivers/clk/qcom/gcc-msm8960.c
+++ b/drivers/clk/qcom/gcc-msm8960.c
@@ -1546,7 +1546,6 @@ static struct clk_branch pmem_clk = {
.hw.init = &(struct clk_init_data){
.name = "pmem_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2143,7 +2142,6 @@ static struct clk_branch usb_hsic_hsio_cal_clk = {
.hw.init = &(struct clk_init_data){
.name = "usb_hsic_hsio_cal_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2293,7 +2291,6 @@ static struct clk_branch ce1_core_clk = {
.hw.init = &(struct clk_init_data){
.name = "ce1_core_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2307,7 +2304,6 @@ static struct clk_branch ce1_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "ce1_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2323,7 +2319,6 @@ static struct clk_branch dma_bam_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "dma_bam_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2339,7 +2334,6 @@ static struct clk_branch gsbi1_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "gsbi1_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2355,7 +2349,6 @@ static struct clk_branch gsbi2_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "gsbi2_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2371,7 +2364,6 @@ static struct clk_branch gsbi3_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "gsbi3_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2387,7 +2379,6 @@ static struct clk_branch gsbi4_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "gsbi4_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2403,7 +2394,6 @@ static struct clk_branch gsbi5_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "gsbi5_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2419,7 +2409,6 @@ static struct clk_branch gsbi6_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "gsbi6_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2435,7 +2424,6 @@ static struct clk_branch gsbi7_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "gsbi7_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2451,7 +2439,6 @@ static struct clk_branch gsbi8_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "gsbi8_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2467,7 +2454,6 @@ static struct clk_branch gsbi9_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "gsbi9_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2483,7 +2469,6 @@ static struct clk_branch gsbi10_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "gsbi10_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2499,7 +2484,6 @@ static struct clk_branch gsbi11_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "gsbi11_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2515,7 +2499,6 @@ static struct clk_branch gsbi12_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "gsbi12_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2531,7 +2514,6 @@ static struct clk_branch tsif_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "tsif_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2545,7 +2527,6 @@ static struct clk_branch usb_fs1_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "usb_fs1_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2559,7 +2540,6 @@ static struct clk_branch usb_fs2_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "usb_fs2_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2575,7 +2555,6 @@ static struct clk_branch usb_hs1_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "usb_hs1_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2589,7 +2568,6 @@ static struct clk_branch usb_hs3_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "usb_hs3_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2603,7 +2581,6 @@ static struct clk_branch usb_hs4_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "usb_hs4_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2617,7 +2594,6 @@ static struct clk_branch usb_hsic_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "usb_hsic_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2633,7 +2609,6 @@ static struct clk_branch sdc1_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "sdc1_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2649,7 +2624,6 @@ static struct clk_branch sdc2_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "sdc2_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2665,7 +2639,6 @@ static struct clk_branch sdc3_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "sdc3_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2681,7 +2654,6 @@ static struct clk_branch sdc4_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "sdc4_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2697,7 +2669,6 @@ static struct clk_branch sdc5_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "sdc5_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2712,7 +2683,6 @@ static struct clk_branch adm0_clk = {
.hw.init = &(struct clk_init_data){
.name = "adm0_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2729,7 +2699,6 @@ static struct clk_branch adm0_pbus_clk = {
.hw.init = &(struct clk_init_data){
.name = "adm0_pbus_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2753,7 +2722,7 @@ static struct clk_rcg ce3_src = {
},
.freq_tbl = clk_tbl_ce3,
.clkr = {
- .enable_reg = 0x2c08,
+ .enable_reg = 0x36c0,
.enable_mask = BIT(7),
.hw.init = &(struct clk_init_data){
.name = "ce3_src",
@@ -2769,7 +2738,7 @@ static struct clk_branch ce3_core_clk = {
.halt_reg = 0x2fdc,
.halt_bit = 5,
.clkr = {
- .enable_reg = 0x36c4,
+ .enable_reg = 0x36cc,
.enable_mask = BIT(4),
.hw.init = &(struct clk_init_data){
.name = "ce3_core_clk",
@@ -2883,7 +2852,6 @@ static struct clk_branch sata_a_clk = {
.hw.init = &(struct clk_init_data){
.name = "sata_a_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2897,7 +2865,6 @@ static struct clk_branch sata_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "sata_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2911,7 +2878,6 @@ static struct clk_branch sfab_sata_s_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "sfab_sata_s_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2925,7 +2891,6 @@ static struct clk_branch sata_phy_cfg_clk = {
.hw.init = &(struct clk_init_data){
.name = "sata_phy_cfg_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2939,7 +2904,6 @@ static struct clk_branch pcie_phy_ref_clk = {
.hw.init = &(struct clk_init_data){
.name = "pcie_phy_ref_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2953,7 +2917,6 @@ static struct clk_branch pcie_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "pcie_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2967,7 +2930,6 @@ static struct clk_branch pcie_a_clk = {
.hw.init = &(struct clk_init_data){
.name = "pcie_a_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2982,7 +2944,6 @@ static struct clk_branch pmic_arb0_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "pmic_arb0_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2997,7 +2958,6 @@ static struct clk_branch pmic_arb1_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "pmic_arb1_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -3012,7 +2972,6 @@ static struct clk_branch pmic_ssbi2_clk = {
.hw.init = &(struct clk_init_data){
.name = "pmic_ssbi2_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -3029,7 +2988,6 @@ static struct clk_branch rpm_msg_ram_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "rpm_msg_ram_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
diff --git a/drivers/clk/qcom/gcc-msm8974.c b/drivers/clk/qcom/gcc-msm8974.c
index 335952db309b..00915209e7c5 100644
--- a/drivers/clk/qcom/gcc-msm8974.c
+++ b/drivers/clk/qcom/gcc-msm8974.c
@@ -1965,7 +1965,6 @@ static struct clk_branch gcc_mss_q6_bimc_axi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_mss_q6_bimc_axi_clk",
- .flags = CLK_IS_ROOT,
.ops = &clk_branch2_ops,
},
},
diff --git a/drivers/clk/qcom/gcc-msm8996.c b/drivers/clk/qcom/gcc-msm8996.c
index 16d7c323db49..c9b96f318d9c 100644
--- a/drivers/clk/qcom/gcc-msm8996.c
+++ b/drivers/clk/qcom/gcc-msm8996.c
@@ -30,6 +30,7 @@
#include "clk-rcg.h"
#include "clk-branch.h"
#include "reset.h"
+#include "gdsc.h"
#define F(f, s, h, m, n) { (f), (s), (2 * (h) - 1), (m), (n) }
@@ -1320,7 +1321,7 @@ static struct clk_branch gcc_mmss_bimc_gfx_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_mmss_bimc_gfx_clk",
- .flags = CLK_SET_RATE_PARENT | CLK_IS_ROOT,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -2314,7 +2315,7 @@ static struct clk_branch gcc_bimc_gfx_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_bimc_gfx_clk",
- .flags = CLK_SET_RATE_PARENT | CLK_IS_ROOT,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -2814,7 +2815,6 @@ static struct clk_branch gcc_ufs_sys_clk_core_clk = {
.hw.init = &(struct clk_init_data){
.name = "gcc_ufs_sys_clk_core_clk",
.ops = &clk_branch2_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2827,7 +2827,6 @@ static struct clk_branch gcc_ufs_tx_symbol_clk_core_clk = {
.hw.init = &(struct clk_init_data){
.name = "gcc_ufs_tx_symbol_clk_core_clk",
.ops = &clk_branch2_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -3059,6 +3058,83 @@ static struct clk_hw *gcc_msm8996_hws[] = {
&ufs_ice_core_postdiv_clk_src.hw,
};
+static struct gdsc aggre0_noc_gdsc = {
+ .gdscr = 0x81004,
+ .gds_hw_ctrl = 0x81028,
+ .pd = {
+ .name = "aggre0_noc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
+};
+
+static struct gdsc hlos1_vote_aggre0_noc_gdsc = {
+ .gdscr = 0x7d024,
+ .pd = {
+ .name = "hlos1_vote_aggre0_noc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
+};
+
+static struct gdsc hlos1_vote_lpass_adsp_gdsc = {
+ .gdscr = 0x7d034,
+ .pd = {
+ .name = "hlos1_vote_lpass_adsp",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
+};
+
+static struct gdsc hlos1_vote_lpass_core_gdsc = {
+ .gdscr = 0x7d038,
+ .pd = {
+ .name = "hlos1_vote_lpass_core",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
+};
+
+static struct gdsc usb30_gdsc = {
+ .gdscr = 0xf004,
+ .pd = {
+ .name = "usb30",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc pcie0_gdsc = {
+ .gdscr = 0x6b004,
+ .pd = {
+ .name = "pcie0",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc pcie1_gdsc = {
+ .gdscr = 0x6d004,
+ .pd = {
+ .name = "pcie1",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc pcie2_gdsc = {
+ .gdscr = 0x6e004,
+ .pd = {
+ .name = "pcie2",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc ufs_gdsc = {
+ .gdscr = 0x75004,
+ .pd = {
+ .name = "ufs",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
static struct clk_regmap *gcc_msm8996_clocks[] = {
[GPLL0_EARLY] = &gpll0_early.clkr,
[GPLL0] = &gpll0.clkr,
@@ -3245,6 +3321,18 @@ static struct clk_regmap *gcc_msm8996_clocks[] = {
[GCC_RX1_USB2_CLKREF_CLK] = &gcc_rx1_usb2_clkref_clk.clkr,
};
+static struct gdsc *gcc_msm8996_gdscs[] = {
+ [AGGRE0_NOC_GDSC] = &aggre0_noc_gdsc,
+ [HLOS1_VOTE_AGGRE0_NOC_GDSC] = &hlos1_vote_aggre0_noc_gdsc,
+ [HLOS1_VOTE_LPASS_ADSP_GDSC] = &hlos1_vote_lpass_adsp_gdsc,
+ [HLOS1_VOTE_LPASS_CORE_GDSC] = &hlos1_vote_lpass_core_gdsc,
+ [USB30_GDSC] = &usb30_gdsc,
+ [PCIE0_GDSC] = &pcie0_gdsc,
+ [PCIE1_GDSC] = &pcie1_gdsc,
+ [PCIE2_GDSC] = &pcie2_gdsc,
+ [UFS_GDSC] = &ufs_gdsc,
+};
+
static const struct qcom_reset_map gcc_msm8996_resets[] = {
[GCC_SYSTEM_NOC_BCR] = { 0x4000 },
[GCC_CONFIG_NOC_BCR] = { 0x5000 },
@@ -3363,6 +3451,8 @@ static const struct qcom_cc_desc gcc_msm8996_desc = {
.num_clks = ARRAY_SIZE(gcc_msm8996_clocks),
.resets = gcc_msm8996_resets,
.num_resets = ARRAY_SIZE(gcc_msm8996_resets),
+ .gdscs = gcc_msm8996_gdscs,
+ .num_gdscs = ARRAY_SIZE(gcc_msm8996_gdscs),
};
static const struct of_device_id gcc_msm8996_match_table[] = {
diff --git a/drivers/clk/qcom/gdsc.c b/drivers/clk/qcom/gdsc.c
index da9fad8b642b..f12d7b2bddd7 100644
--- a/drivers/clk/qcom/gdsc.c
+++ b/drivers/clk/qcom/gdsc.c
@@ -16,6 +16,7 @@
#include <linux/err.h>
#include <linux/jiffies.h>
#include <linux/kernel.h>
+#include <linux/ktime.h>
#include <linux/pm_domain.h>
#include <linux/regmap.h>
#include <linux/reset-controller.h>
@@ -42,12 +43,12 @@
#define domain_to_gdsc(domain) container_of(domain, struct gdsc, pd)
-static int gdsc_is_enabled(struct gdsc *sc)
+static int gdsc_is_enabled(struct gdsc *sc, unsigned int reg)
{
u32 val;
int ret;
- ret = regmap_read(sc->regmap, sc->gdscr, &val);
+ ret = regmap_read(sc->regmap, reg, &val);
if (ret)
return ret;
@@ -58,28 +59,46 @@ static int gdsc_toggle_logic(struct gdsc *sc, bool en)
{
int ret;
u32 val = en ? 0 : SW_COLLAPSE_MASK;
- u32 check = en ? PWR_ON_MASK : 0;
- unsigned long timeout;
+ ktime_t start;
+ unsigned int status_reg = sc->gdscr;
ret = regmap_update_bits(sc->regmap, sc->gdscr, SW_COLLAPSE_MASK, val);
if (ret)
return ret;
- timeout = jiffies + usecs_to_jiffies(TIMEOUT_US);
- do {
- ret = regmap_read(sc->regmap, sc->gdscr, &val);
- if (ret)
- return ret;
+ /* If disabling votable gdscs, don't poll on status */
+ if ((sc->flags & VOTABLE) && !en) {
+ /*
+ * Add a short delay here to ensure that an enable
+ * right after it was disabled does not put it in an
+ * unknown state
+ */
+ udelay(TIMEOUT_US);
+ return 0;
+ }
- if ((val & PWR_ON_MASK) == check)
- return 0;
- } while (time_before(jiffies, timeout));
+ if (sc->gds_hw_ctrl) {
+ status_reg = sc->gds_hw_ctrl;
+ /*
+ * The gds hw controller asserts/de-asserts the status bit soon
+ * after it receives a power on/off request from a master.
+ * The controller then takes around 8 xo cycles to start its
+ * internal state machine and update the status bit. During
+ * this time, the status bit does not reflect the true status
+ * of the core.
+ * Add a delay of 1 us between writing to the SW_COLLAPSE bit
+ * and polling the status bit.
+ */
+ udelay(1);
+ }
- ret = regmap_read(sc->regmap, sc->gdscr, &val);
- if (ret)
- return ret;
+ start = ktime_get();
+ do {
+ if (gdsc_is_enabled(sc, status_reg) == en)
+ return 0;
+ } while (ktime_us_delta(ktime_get(), start) < TIMEOUT_US);
- if ((val & PWR_ON_MASK) == check)
+ if (gdsc_is_enabled(sc, status_reg) == en)
return 0;
return -ETIMEDOUT;
@@ -165,6 +184,7 @@ static int gdsc_init(struct gdsc *sc)
{
u32 mask, val;
int on, ret;
+ unsigned int reg;
/*
* Disable HW trigger: collapse/restore occur based on registers writes.
@@ -185,10 +205,18 @@ static int gdsc_init(struct gdsc *sc)
return ret;
}
- on = gdsc_is_enabled(sc);
+ reg = sc->gds_hw_ctrl ? sc->gds_hw_ctrl : sc->gdscr;
+ on = gdsc_is_enabled(sc, reg);
if (on < 0)
return on;
+ /*
+ * Votable GDSCs can be ON due to Vote from other masters.
+ * If a Votable GDSC is ON, make sure we have a Vote.
+ */
+ if ((sc->flags & VOTABLE) && on)
+ gdsc_enable(&sc->pd);
+
if (on || (sc->pwrsts & PWRSTS_RET))
gdsc_force_mem_on(sc);
else
@@ -201,11 +229,14 @@ static int gdsc_init(struct gdsc *sc)
return 0;
}
-int gdsc_register(struct device *dev, struct gdsc **scs, size_t num,
+int gdsc_register(struct gdsc_desc *desc,
struct reset_controller_dev *rcdev, struct regmap *regmap)
{
int i, ret;
struct genpd_onecell_data *data;
+ struct device *dev = desc->dev;
+ struct gdsc **scs = desc->scs;
+ size_t num = desc->num;
data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
if (!data)
@@ -228,10 +259,30 @@ int gdsc_register(struct device *dev, struct gdsc **scs, size_t num,
data->domains[i] = &scs[i]->pd;
}
+ /* Add subdomains */
+ for (i = 0; i < num; i++) {
+ if (!scs[i])
+ continue;
+ if (scs[i]->parent)
+ pm_genpd_add_subdomain(scs[i]->parent, &scs[i]->pd);
+ }
+
return of_genpd_add_provider_onecell(dev->of_node, data);
}
-void gdsc_unregister(struct device *dev)
+void gdsc_unregister(struct gdsc_desc *desc)
{
+ int i;
+ struct device *dev = desc->dev;
+ struct gdsc **scs = desc->scs;
+ size_t num = desc->num;
+
+ /* Remove subdomains */
+ for (i = 0; i < num; i++) {
+ if (!scs[i])
+ continue;
+ if (scs[i]->parent)
+ pm_genpd_remove_subdomain(scs[i]->parent, &scs[i]->pd);
+ }
of_genpd_del_provider(dev->of_node);
}
diff --git a/drivers/clk/qcom/gdsc.h b/drivers/clk/qcom/gdsc.h
index 5ded26884f08..3bf497c36bdf 100644
--- a/drivers/clk/qcom/gdsc.h
+++ b/drivers/clk/qcom/gdsc.h
@@ -20,18 +20,12 @@
struct regmap;
struct reset_controller_dev;
-/* Powerdomain allowable state bitfields */
-#define PWRSTS_OFF BIT(0)
-#define PWRSTS_RET BIT(1)
-#define PWRSTS_ON BIT(2)
-#define PWRSTS_OFF_ON (PWRSTS_OFF | PWRSTS_ON)
-#define PWRSTS_RET_ON (PWRSTS_RET | PWRSTS_ON)
-
/**
* struct gdsc - Globally Distributed Switch Controller
* @pd: generic power domain
* @regmap: regmap for MMIO accesses
* @gdscr: gsdc control register
+ * @gds_hw_ctrl: gds_hw_ctrl register
* @cxcs: offsets of branch registers to toggle mem/periph bits in
* @cxc_count: number of @cxcs
* @pwrsts: Possible powerdomain power states
@@ -41,28 +35,44 @@ struct reset_controller_dev;
*/
struct gdsc {
struct generic_pm_domain pd;
+ struct generic_pm_domain *parent;
struct regmap *regmap;
unsigned int gdscr;
+ unsigned int gds_hw_ctrl;
unsigned int *cxcs;
unsigned int cxc_count;
const u8 pwrsts;
+/* Powerdomain allowable state bitfields */
+#define PWRSTS_OFF BIT(0)
+#define PWRSTS_RET BIT(1)
+#define PWRSTS_ON BIT(2)
+#define PWRSTS_OFF_ON (PWRSTS_OFF | PWRSTS_ON)
+#define PWRSTS_RET_ON (PWRSTS_RET | PWRSTS_ON)
+ const u8 flags;
+#define VOTABLE BIT(0)
struct reset_controller_dev *rcdev;
unsigned int *resets;
unsigned int reset_count;
};
+struct gdsc_desc {
+ struct device *dev;
+ struct gdsc **scs;
+ size_t num;
+};
+
#ifdef CONFIG_QCOM_GDSC
-int gdsc_register(struct device *, struct gdsc **, size_t n,
- struct reset_controller_dev *, struct regmap *);
-void gdsc_unregister(struct device *);
+int gdsc_register(struct gdsc_desc *desc, struct reset_controller_dev *,
+ struct regmap *);
+void gdsc_unregister(struct gdsc_desc *desc);
#else
-static inline int gdsc_register(struct device *d, struct gdsc **g, size_t n,
+static inline int gdsc_register(struct gdsc_desc *desc,
struct reset_controller_dev *rcdev,
struct regmap *r)
{
return -ENOSYS;
}
-static inline void gdsc_unregister(struct device *d) {};
+static inline void gdsc_unregister(struct gdsc_desc *desc) {};
#endif /* CONFIG_QCOM_GDSC */
#endif /* __QCOM_GDSC_H__ */
diff --git a/drivers/clk/qcom/mmcc-msm8960.c b/drivers/clk/qcom/mmcc-msm8960.c
index 00e36192a1de..7f21421c87d6 100644
--- a/drivers/clk/qcom/mmcc-msm8960.c
+++ b/drivers/clk/qcom/mmcc-msm8960.c
@@ -1789,7 +1789,6 @@ static struct clk_branch gmem_axi_clk = {
.hw.init = &(struct clk_init_data){
.name = "gmem_axi_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -1805,7 +1804,6 @@ static struct clk_branch ijpeg_axi_clk = {
.hw.init = &(struct clk_init_data){
.name = "ijpeg_axi_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -1821,7 +1819,6 @@ static struct clk_branch mmss_imem_axi_clk = {
.hw.init = &(struct clk_init_data){
.name = "mmss_imem_axi_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -1835,7 +1832,6 @@ static struct clk_branch jpegd_axi_clk = {
.hw.init = &(struct clk_init_data){
.name = "jpegd_axi_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -1851,7 +1847,6 @@ static struct clk_branch vcodec_axi_b_clk = {
.hw.init = &(struct clk_init_data){
.name = "vcodec_axi_b_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -1867,7 +1862,6 @@ static struct clk_branch vcodec_axi_a_clk = {
.hw.init = &(struct clk_init_data){
.name = "vcodec_axi_a_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -1883,7 +1877,6 @@ static struct clk_branch vcodec_axi_clk = {
.hw.init = &(struct clk_init_data){
.name = "vcodec_axi_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -1897,7 +1890,6 @@ static struct clk_branch vfe_axi_clk = {
.hw.init = &(struct clk_init_data){
.name = "vfe_axi_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -1913,7 +1905,6 @@ static struct clk_branch mdp_axi_clk = {
.hw.init = &(struct clk_init_data){
.name = "mdp_axi_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -1929,7 +1920,6 @@ static struct clk_branch rot_axi_clk = {
.hw.init = &(struct clk_init_data){
.name = "rot_axi_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -1945,7 +1935,6 @@ static struct clk_branch vcap_axi_clk = {
.hw.init = &(struct clk_init_data){
.name = "vcap_axi_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -1961,7 +1950,6 @@ static struct clk_branch vpe_axi_clk = {
.hw.init = &(struct clk_init_data){
.name = "vpe_axi_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -1977,7 +1965,6 @@ static struct clk_branch gfx3d_axi_clk = {
.hw.init = &(struct clk_init_data){
.name = "gfx3d_axi_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -1991,7 +1978,6 @@ static struct clk_branch amp_ahb_clk = {
.hw.init = &(struct clk_init_data){
.name = "amp_ahb_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2005,7 +1991,6 @@ static struct clk_branch csi_ahb_clk = {
.hw.init = &(struct clk_init_data){
.name = "csi_ahb_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT
},
},
};
@@ -2019,7 +2004,6 @@ static struct clk_branch dsi_m_ahb_clk = {
.hw.init = &(struct clk_init_data){
.name = "dsi_m_ahb_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2035,7 +2019,6 @@ static struct clk_branch dsi_s_ahb_clk = {
.hw.init = &(struct clk_init_data){
.name = "dsi_s_ahb_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2049,7 +2032,6 @@ static struct clk_branch dsi2_m_ahb_clk = {
.hw.init = &(struct clk_init_data){
.name = "dsi2_m_ahb_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT
},
},
};
@@ -2065,7 +2047,6 @@ static struct clk_branch dsi2_s_ahb_clk = {
.hw.init = &(struct clk_init_data){
.name = "dsi2_s_ahb_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2425,7 +2406,6 @@ static struct clk_branch gfx2d0_ahb_clk = {
.hw.init = &(struct clk_init_data){
.name = "gfx2d0_ahb_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2441,7 +2421,6 @@ static struct clk_branch gfx2d1_ahb_clk = {
.hw.init = &(struct clk_init_data){
.name = "gfx2d1_ahb_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2457,7 +2436,6 @@ static struct clk_branch gfx3d_ahb_clk = {
.hw.init = &(struct clk_init_data){
.name = "gfx3d_ahb_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2473,7 +2451,6 @@ static struct clk_branch hdmi_m_ahb_clk = {
.hw.init = &(struct clk_init_data){
.name = "hdmi_m_ahb_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2489,7 +2466,6 @@ static struct clk_branch hdmi_s_ahb_clk = {
.hw.init = &(struct clk_init_data){
.name = "hdmi_s_ahb_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2503,7 +2479,6 @@ static struct clk_branch ijpeg_ahb_clk = {
.hw.init = &(struct clk_init_data){
.name = "ijpeg_ahb_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT
},
},
};
@@ -2519,7 +2494,6 @@ static struct clk_branch mmss_imem_ahb_clk = {
.hw.init = &(struct clk_init_data){
.name = "mmss_imem_ahb_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT
},
},
};
@@ -2533,7 +2507,6 @@ static struct clk_branch jpegd_ahb_clk = {
.hw.init = &(struct clk_init_data){
.name = "jpegd_ahb_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2547,7 +2520,6 @@ static struct clk_branch mdp_ahb_clk = {
.hw.init = &(struct clk_init_data){
.name = "mdp_ahb_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2561,7 +2533,6 @@ static struct clk_branch rot_ahb_clk = {
.hw.init = &(struct clk_init_data){
.name = "rot_ahb_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT
},
},
};
@@ -2577,7 +2548,6 @@ static struct clk_branch smmu_ahb_clk = {
.hw.init = &(struct clk_init_data){
.name = "smmu_ahb_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2591,7 +2561,6 @@ static struct clk_branch tv_enc_ahb_clk = {
.hw.init = &(struct clk_init_data){
.name = "tv_enc_ahb_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2605,7 +2574,6 @@ static struct clk_branch vcap_ahb_clk = {
.hw.init = &(struct clk_init_data){
.name = "vcap_ahb_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2621,7 +2589,6 @@ static struct clk_branch vcodec_ahb_clk = {
.hw.init = &(struct clk_init_data){
.name = "vcodec_ahb_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2635,7 +2602,6 @@ static struct clk_branch vfe_ahb_clk = {
.hw.init = &(struct clk_init_data){
.name = "vfe_ahb_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2649,7 +2615,6 @@ static struct clk_branch vpe_ahb_clk = {
.hw.init = &(struct clk_init_data){
.name = "vpe_ahb_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
diff --git a/drivers/clk/qcom/mmcc-msm8974.c b/drivers/clk/qcom/mmcc-msm8974.c
index 9d790bcadf25..715e7cd94125 100644
--- a/drivers/clk/qcom/mmcc-msm8974.c
+++ b/drivers/clk/qcom/mmcc-msm8974.c
@@ -2400,6 +2400,7 @@ static struct gdsc oxilicx_gdsc = {
.pd = {
.name = "oxilicx",
},
+ .parent = &oxili_gdsc.pd,
.pwrsts = PWRSTS_OFF_ON,
};
@@ -2615,7 +2616,6 @@ MODULE_DEVICE_TABLE(of, mmcc_msm8974_match_table);
static int mmcc_msm8974_probe(struct platform_device *pdev)
{
struct regmap *regmap;
- int ret;
regmap = qcom_cc_map(pdev, &mmcc_msm8974_desc);
if (IS_ERR(regmap))
@@ -2624,22 +2624,11 @@ static int mmcc_msm8974_probe(struct platform_device *pdev)
clk_pll_configure_sr_hpm_lp(&mmpll1, regmap, &mmpll1_config, true);
clk_pll_configure_sr_hpm_lp(&mmpll3, regmap, &mmpll3_config, false);
- ret = qcom_cc_really_probe(pdev, &mmcc_msm8974_desc, regmap);
- if (ret)
- return ret;
-
- return pm_genpd_add_subdomain(&oxili_gdsc.pd, &oxilicx_gdsc.pd);
-}
-
-static int mmcc_msm8974_remove(struct platform_device *pdev)
-{
- pm_genpd_remove_subdomain(&oxili_gdsc.pd, &oxilicx_gdsc.pd);
- return 0;
+ return qcom_cc_really_probe(pdev, &mmcc_msm8974_desc, regmap);
}
static struct platform_driver mmcc_msm8974_driver = {
.probe = mmcc_msm8974_probe,
- .remove = mmcc_msm8974_remove,
.driver = {
.name = "mmcc-msm8974",
.of_match_table = mmcc_msm8974_match_table,
diff --git a/drivers/clk/qcom/mmcc-msm8996.c b/drivers/clk/qcom/mmcc-msm8996.c
index 064f3eaa39d0..6df7ff36b416 100644
--- a/drivers/clk/qcom/mmcc-msm8996.c
+++ b/drivers/clk/qcom/mmcc-msm8996.c
@@ -32,6 +32,7 @@
#include "clk-rcg.h"
#include "clk-branch.h"
#include "reset.h"
+#include "gdsc.h"
#define F(f, s, h, m, n) { (f), (s), (2 * (h) - 1), (m), (n) }
@@ -2917,6 +2918,144 @@ static struct clk_hw *mmcc_msm8996_hws[] = {
&gpll0_div.hw,
};
+static struct gdsc mmagic_video_gdsc = {
+ .gdscr = 0x119c,
+ .gds_hw_ctrl = 0x120c,
+ .pd = {
+ .name = "mmagic_video",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
+};
+
+static struct gdsc mmagic_mdss_gdsc = {
+ .gdscr = 0x247c,
+ .gds_hw_ctrl = 0x2480,
+ .pd = {
+ .name = "mmagic_mdss",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
+};
+
+static struct gdsc mmagic_camss_gdsc = {
+ .gdscr = 0x3c4c,
+ .gds_hw_ctrl = 0x3c50,
+ .pd = {
+ .name = "mmagic_camss",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
+};
+
+static struct gdsc venus_gdsc = {
+ .gdscr = 0x1024,
+ .cxcs = (unsigned int []){ 0x1028, 0x1034, 0x1038 },
+ .cxc_count = 3,
+ .pd = {
+ .name = "venus",
+ },
+ .parent = &mmagic_video_gdsc.pd,
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc venus_core0_gdsc = {
+ .gdscr = 0x1040,
+ .cxcs = (unsigned int []){ 0x1048 },
+ .cxc_count = 1,
+ .pd = {
+ .name = "venus_core0",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc venus_core1_gdsc = {
+ .gdscr = 0x1044,
+ .cxcs = (unsigned int []){ 0x104c },
+ .cxc_count = 1,
+ .pd = {
+ .name = "venus_core1",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc camss_gdsc = {
+ .gdscr = 0x34a0,
+ .cxcs = (unsigned int []){ 0x36bc, 0x36c4 },
+ .cxc_count = 2,
+ .pd = {
+ .name = "camss",
+ },
+ .parent = &mmagic_camss_gdsc.pd,
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc vfe0_gdsc = {
+ .gdscr = 0x3664,
+ .cxcs = (unsigned int []){ 0x36a8 },
+ .cxc_count = 1,
+ .pd = {
+ .name = "vfe0",
+ },
+ .parent = &camss_gdsc.pd,
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc vfe1_gdsc = {
+ .gdscr = 0x3674,
+ .cxcs = (unsigned int []){ 0x36ac },
+ .cxc_count = 1,
+ .pd = {
+ .name = "vfe0",
+ },
+ .parent = &camss_gdsc.pd,
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc jpeg_gdsc = {
+ .gdscr = 0x35a4,
+ .cxcs = (unsigned int []){ 0x35a8, 0x35b0, 0x35c0, 0x35b8 },
+ .cxc_count = 4,
+ .pd = {
+ .name = "jpeg",
+ },
+ .parent = &camss_gdsc.pd,
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc cpp_gdsc = {
+ .gdscr = 0x36d4,
+ .cxcs = (unsigned int []){ 0x36b0 },
+ .cxc_count = 1,
+ .pd = {
+ .name = "cpp",
+ },
+ .parent = &camss_gdsc.pd,
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc fd_gdsc = {
+ .gdscr = 0x3b64,
+ .cxcs = (unsigned int []){ 0x3b68, 0x3b6c },
+ .cxc_count = 2,
+ .pd = {
+ .name = "fd",
+ },
+ .parent = &camss_gdsc.pd,
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc mdss_gdsc = {
+ .gdscr = 0x2304,
+ .cxcs = (unsigned int []){ 0x2310, 0x231c },
+ .cxc_count = 2,
+ .pd = {
+ .name = "mdss",
+ },
+ .parent = &mmagic_mdss_gdsc.pd,
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
static struct clk_regmap *mmcc_msm8996_clocks[] = {
[MMPLL0_EARLY] = &mmpll0_early.clkr,
[MMPLL0_PLL] = &mmpll0.clkr,
@@ -3093,6 +3232,22 @@ static struct clk_regmap *mmcc_msm8996_clocks[] = {
[FD_AHB_CLK] = &fd_ahb_clk.clkr,
};
+static struct gdsc *mmcc_msm8996_gdscs[] = {
+ [MMAGIC_VIDEO_GDSC] = &mmagic_video_gdsc,
+ [MMAGIC_MDSS_GDSC] = &mmagic_mdss_gdsc,
+ [MMAGIC_CAMSS_GDSC] = &mmagic_camss_gdsc,
+ [VENUS_GDSC] = &venus_gdsc,
+ [VENUS_CORE0_GDSC] = &venus_core0_gdsc,
+ [VENUS_CORE1_GDSC] = &venus_core1_gdsc,
+ [CAMSS_GDSC] = &camss_gdsc,
+ [VFE0_GDSC] = &vfe0_gdsc,
+ [VFE1_GDSC] = &vfe1_gdsc,
+ [JPEG_GDSC] = &jpeg_gdsc,
+ [CPP_GDSC] = &cpp_gdsc,
+ [FD_GDSC] = &fd_gdsc,
+ [MDSS_GDSC] = &mdss_gdsc,
+};
+
static const struct qcom_reset_map mmcc_msm8996_resets[] = {
[MMAGICAHB_BCR] = { 0x5020 },
[MMAGIC_CFG_BCR] = { 0x5050 },
@@ -3170,6 +3325,8 @@ static const struct qcom_cc_desc mmcc_msm8996_desc = {
.num_clks = ARRAY_SIZE(mmcc_msm8996_clocks),
.resets = mmcc_msm8996_resets,
.num_resets = ARRAY_SIZE(mmcc_msm8996_resets),
+ .gdscs = mmcc_msm8996_gdscs,
+ .num_gdscs = ARRAY_SIZE(mmcc_msm8996_gdscs),
};
static const struct of_device_id mmcc_msm8996_match_table[] = {
diff --git a/drivers/clk/qcom/reset.c b/drivers/clk/qcom/reset.c
index 6c977d3a8590..0324d8daab9b 100644
--- a/drivers/clk/qcom/reset.c
+++ b/drivers/clk/qcom/reset.c
@@ -55,7 +55,7 @@ qcom_reset_deassert(struct reset_controller_dev *rcdev, unsigned long id)
return regmap_update_bits(rst->regmap, map->reg, mask, 0);
}
-struct reset_control_ops qcom_reset_ops = {
+const struct reset_control_ops qcom_reset_ops = {
.reset = qcom_reset,
.assert = qcom_reset_assert,
.deassert = qcom_reset_deassert,
diff --git a/drivers/clk/qcom/reset.h b/drivers/clk/qcom/reset.h
index 0e11e2130f97..cda877927d43 100644
--- a/drivers/clk/qcom/reset.h
+++ b/drivers/clk/qcom/reset.h
@@ -32,6 +32,6 @@ struct qcom_reset_controller {
#define to_qcom_reset_controller(r) \
container_of(r, struct qcom_reset_controller, rcdev);
-extern struct reset_control_ops qcom_reset_ops;
+extern const struct reset_control_ops qcom_reset_ops;
#endif
diff --git a/drivers/clk/shmobile/Makefile b/drivers/clk/renesas/Makefile
index 7e2579b30326..7e2579b30326 100644
--- a/drivers/clk/shmobile/Makefile
+++ b/drivers/clk/renesas/Makefile
diff --git a/drivers/clk/shmobile/clk-div6.c b/drivers/clk/renesas/clk-div6.c
index 999994769450..0627860233cb 100644
--- a/drivers/clk/shmobile/clk-div6.c
+++ b/drivers/clk/renesas/clk-div6.c
@@ -82,9 +82,8 @@ static unsigned long cpg_div6_clock_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct div6_clock *clock = to_div6_clock(hw);
- unsigned int div = (clk_readl(clock->reg) & CPG_DIV6_DIV_MASK) + 1;
- return parent_rate / div;
+ return parent_rate / clock->div;
}
static unsigned int cpg_div6_clock_calc_div(unsigned long rate,
diff --git a/drivers/clk/shmobile/clk-div6.h b/drivers/clk/renesas/clk-div6.h
index 9a85a95188da..567b31d2bfa5 100644
--- a/drivers/clk/shmobile/clk-div6.h
+++ b/drivers/clk/renesas/clk-div6.h
@@ -1,5 +1,5 @@
-#ifndef __SHMOBILE_CLK_DIV6_H__
-#define __SHMOBILE_CLK_DIV6_H__
+#ifndef __RENESAS_CLK_DIV6_H__
+#define __RENESAS_CLK_DIV6_H__
struct clk *cpg_div6_register(const char *name, unsigned int num_parents,
const char **parent_names, void __iomem *reg);
diff --git a/drivers/clk/shmobile/clk-emev2.c b/drivers/clk/renesas/clk-emev2.c
index a91825471c79..a91825471c79 100644
--- a/drivers/clk/shmobile/clk-emev2.c
+++ b/drivers/clk/renesas/clk-emev2.c
diff --git a/drivers/clk/shmobile/clk-mstp.c b/drivers/clk/renesas/clk-mstp.c
index 3b09716ebda2..3d44e183aedd 100644
--- a/drivers/clk/shmobile/clk-mstp.c
+++ b/drivers/clk/renesas/clk-mstp.c
@@ -14,7 +14,7 @@
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/clkdev.h>
-#include <linux/clk/shmobile.h>
+#include <linux/clk/renesas.h>
#include <linux/device.h>
#include <linux/io.h>
#include <linux/of.h>
diff --git a/drivers/clk/shmobile/clk-r8a73a4.c b/drivers/clk/renesas/clk-r8a73a4.c
index 9326204bed9d..28d204bb659e 100644
--- a/drivers/clk/shmobile/clk-r8a73a4.c
+++ b/drivers/clk/renesas/clk-r8a73a4.c
@@ -9,7 +9,7 @@
*/
#include <linux/clk-provider.h>
-#include <linux/clk/shmobile.h>
+#include <linux/clk/renesas.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/slab.h>
diff --git a/drivers/clk/shmobile/clk-r8a7740.c b/drivers/clk/renesas/clk-r8a7740.c
index 1e6b1da58065..2f7ce6696b6c 100644
--- a/drivers/clk/shmobile/clk-r8a7740.c
+++ b/drivers/clk/renesas/clk-r8a7740.c
@@ -9,7 +9,7 @@
*/
#include <linux/clk-provider.h>
-#include <linux/clk/shmobile.h>
+#include <linux/clk/renesas.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/slab.h>
diff --git a/drivers/clk/shmobile/clk-r8a7778.c b/drivers/clk/renesas/clk-r8a7778.c
index b1741551fff2..40e3a501a50e 100644
--- a/drivers/clk/shmobile/clk-r8a7778.c
+++ b/drivers/clk/renesas/clk-r8a7778.c
@@ -9,7 +9,7 @@
*/
#include <linux/clk-provider.h>
-#include <linux/clk/shmobile.h>
+#include <linux/clk/renesas.h>
#include <linux/of_address.h>
#include <linux/slab.h>
diff --git a/drivers/clk/shmobile/clk-r8a7779.c b/drivers/clk/renesas/clk-r8a7779.c
index 92275c5f2c60..cf2a37df03b1 100644
--- a/drivers/clk/shmobile/clk-r8a7779.c
+++ b/drivers/clk/renesas/clk-r8a7779.c
@@ -11,7 +11,7 @@
*/
#include <linux/clk-provider.h>
-#include <linux/clk/shmobile.h>
+#include <linux/clk/renesas.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/of.h>
diff --git a/drivers/clk/shmobile/clk-rcar-gen2.c b/drivers/clk/renesas/clk-rcar-gen2.c
index 841977240305..00e6aba4b9c0 100644
--- a/drivers/clk/shmobile/clk-rcar-gen2.c
+++ b/drivers/clk/renesas/clk-rcar-gen2.c
@@ -11,7 +11,7 @@
*/
#include <linux/clk-provider.h>
-#include <linux/clk/shmobile.h>
+#include <linux/clk/renesas.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/math64.h>
diff --git a/drivers/clk/shmobile/clk-rz.c b/drivers/clk/renesas/clk-rz.c
index 9766e3cb595f..f6312c62f16b 100644
--- a/drivers/clk/shmobile/clk-rz.c
+++ b/drivers/clk/renesas/clk-rz.c
@@ -10,7 +10,7 @@
*/
#include <linux/clk-provider.h>
-#include <linux/clk/shmobile.h>
+#include <linux/clk/renesas.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/of.h>
diff --git a/drivers/clk/shmobile/clk-sh73a0.c b/drivers/clk/renesas/clk-sh73a0.c
index 8966f8bbfd72..eea38f6ea77e 100644
--- a/drivers/clk/shmobile/clk-sh73a0.c
+++ b/drivers/clk/renesas/clk-sh73a0.c
@@ -9,7 +9,7 @@
*/
#include <linux/clk-provider.h>
-#include <linux/clk/shmobile.h>
+#include <linux/clk/renesas.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/of.h>
diff --git a/drivers/clk/shmobile/r8a7795-cpg-mssr.c b/drivers/clk/renesas/r8a7795-cpg-mssr.c
index 13e994772dfd..b2198aef5ed4 100644
--- a/drivers/clk/shmobile/r8a7795-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a7795-cpg-mssr.c
@@ -20,6 +20,7 @@
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/of.h>
+#include <linux/slab.h>
#include <dt-bindings/clock/r8a7795-cpg-mssr.h>
@@ -61,6 +62,7 @@ enum r8a7795_clk_types {
CLK_TYPE_GEN3_PLL2,
CLK_TYPE_GEN3_PLL3,
CLK_TYPE_GEN3_PLL4,
+ CLK_TYPE_GEN3_SD,
};
static const struct cpg_core_clk r8a7795_core_clks[] __initconst = {
@@ -99,11 +101,18 @@ static const struct cpg_core_clk r8a7795_core_clks[] __initconst = {
DEF_FIXED("s3d1", R8A7795_CLK_S3D1, CLK_S3, 1, 1),
DEF_FIXED("s3d2", R8A7795_CLK_S3D2, CLK_S3, 2, 1),
DEF_FIXED("s3d4", R8A7795_CLK_S3D4, CLK_S3, 4, 1),
+
+ DEF_SD("sd0", R8A7795_CLK_SD0, CLK_PLL1_DIV2, 0x0074),
+ DEF_SD("sd1", R8A7795_CLK_SD1, CLK_PLL1_DIV2, 0x0078),
+ DEF_SD("sd2", R8A7795_CLK_SD2, CLK_PLL1_DIV2, 0x0268),
+ DEF_SD("sd3", R8A7795_CLK_SD3, CLK_PLL1_DIV2, 0x026c),
+
DEF_FIXED("cl", R8A7795_CLK_CL, CLK_PLL1_DIV2, 48, 1),
DEF_FIXED("cp", R8A7795_CLK_CP, CLK_EXTAL, 2, 1),
DEF_DIV6P1("mso", R8A7795_CLK_MSO, CLK_PLL1_DIV4, 0x014),
DEF_DIV6P1("hdmi", R8A7795_CLK_HDMI, CLK_PLL1_DIV2, 0x250),
+ DEF_DIV6P1("canfd", R8A7795_CLK_CANFD, CLK_PLL1_DIV4, 0x244),
};
static const struct mssr_mod_clk r8a7795_mod_clks[] __initconst = {
@@ -120,8 +129,17 @@ static const struct mssr_mod_clk r8a7795_mod_clks[] __initconst = {
DEF_MOD("sys-dmac1", 218, R8A7795_CLK_S3D1),
DEF_MOD("sys-dmac0", 219, R8A7795_CLK_S3D1),
DEF_MOD("scif2", 310, R8A7795_CLK_S3D4),
+ DEF_MOD("sdif3", 311, R8A7795_CLK_SD3),
+ DEF_MOD("sdif2", 312, R8A7795_CLK_SD2),
+ DEF_MOD("sdif1", 313, R8A7795_CLK_SD1),
+ DEF_MOD("sdif0", 314, R8A7795_CLK_SD0),
DEF_MOD("pcie1", 318, R8A7795_CLK_S3D1),
DEF_MOD("pcie0", 319, R8A7795_CLK_S3D1),
+ DEF_MOD("usb3-if1", 327, R8A7795_CLK_S3D1),
+ DEF_MOD("usb3-if0", 328, R8A7795_CLK_S3D1),
+ DEF_MOD("usb-dmac0", 330, R8A7795_CLK_S3D1),
+ DEF_MOD("usb-dmac1", 331, R8A7795_CLK_S3D1),
+ DEF_MOD("intc-ex", 407, R8A7795_CLK_CP),
DEF_MOD("intc-ap", 408, R8A7795_CLK_S3D1),
DEF_MOD("audmac0", 502, R8A7795_CLK_S3D4),
DEF_MOD("audmac1", 501, R8A7795_CLK_S3D4),
@@ -130,6 +148,21 @@ static const struct mssr_mod_clk r8a7795_mod_clks[] __initconst = {
DEF_MOD("hscif2", 518, R8A7795_CLK_S3D1),
DEF_MOD("hscif1", 519, R8A7795_CLK_S3D1),
DEF_MOD("hscif0", 520, R8A7795_CLK_S3D1),
+ DEF_MOD("fcpvd3", 600, R8A7795_CLK_S2D1),
+ DEF_MOD("fcpvd2", 601, R8A7795_CLK_S2D1),
+ DEF_MOD("fcpvd1", 602, R8A7795_CLK_S2D1),
+ DEF_MOD("fcpvd0", 603, R8A7795_CLK_S2D1),
+ DEF_MOD("fcpvb1", 606, R8A7795_CLK_S2D1),
+ DEF_MOD("fcpvb0", 607, R8A7795_CLK_S2D1),
+ DEF_MOD("fcpvi2", 609, R8A7795_CLK_S2D1),
+ DEF_MOD("fcpvi1", 610, R8A7795_CLK_S2D1),
+ DEF_MOD("fcpvi0", 611, R8A7795_CLK_S2D1),
+ DEF_MOD("fcpf2", 613, R8A7795_CLK_S2D1),
+ DEF_MOD("fcpf1", 614, R8A7795_CLK_S2D1),
+ DEF_MOD("fcpf0", 615, R8A7795_CLK_S2D1),
+ DEF_MOD("fcpci1", 616, R8A7795_CLK_S2D1),
+ DEF_MOD("fcpci0", 617, R8A7795_CLK_S2D1),
+ DEF_MOD("fcpcs", 619, R8A7795_CLK_S2D1),
DEF_MOD("vspd3", 620, R8A7795_CLK_S2D1),
DEF_MOD("vspd2", 621, R8A7795_CLK_S2D1),
DEF_MOD("vspd1", 622, R8A7795_CLK_S2D1),
@@ -147,6 +180,7 @@ static const struct mssr_mod_clk r8a7795_mod_clks[] __initconst = {
DEF_MOD("du2", 722, R8A7795_CLK_S2D1),
DEF_MOD("du1", 723, R8A7795_CLK_S2D1),
DEF_MOD("du0", 724, R8A7795_CLK_S2D1),
+ DEF_MOD("lvds", 727, R8A7795_CLK_S2D1),
DEF_MOD("hdmi1", 728, R8A7795_CLK_HDMI),
DEF_MOD("hdmi0", 729, R8A7795_CLK_HDMI),
DEF_MOD("etheravb", 812, R8A7795_CLK_S3D2),
@@ -159,6 +193,9 @@ static const struct mssr_mod_clk r8a7795_mod_clks[] __initconst = {
DEF_MOD("gpio2", 910, R8A7795_CLK_CP),
DEF_MOD("gpio1", 911, R8A7795_CLK_CP),
DEF_MOD("gpio0", 912, R8A7795_CLK_CP),
+ DEF_MOD("can-fd", 914, R8A7795_CLK_S3D2),
+ DEF_MOD("can-if1", 915, R8A7795_CLK_S3D4),
+ DEF_MOD("can-if0", 916, R8A7795_CLK_S3D4),
DEF_MOD("i2c6", 918, R8A7795_CLK_S3D2),
DEF_MOD("i2c5", 919, R8A7795_CLK_S3D2),
DEF_MOD("i2c4", 927, R8A7795_CLK_S3D2),
@@ -198,6 +235,221 @@ static const unsigned int r8a7795_crit_mod_clks[] __initconst = {
MOD_CLK_ID(408), /* INTC-AP (GIC) */
};
+/* -----------------------------------------------------------------------------
+ * SDn Clock
+ *
+ */
+#define CPG_SD_STP_HCK BIT(9)
+#define CPG_SD_STP_CK BIT(8)
+
+#define CPG_SD_STP_MASK (CPG_SD_STP_HCK | CPG_SD_STP_CK)
+#define CPG_SD_FC_MASK (0x7 << 2 | 0x3 << 0)
+
+#define CPG_SD_DIV_TABLE_DATA(stp_hck, stp_ck, sd_srcfc, sd_fc, sd_div) \
+{ \
+ .val = ((stp_hck) ? CPG_SD_STP_HCK : 0) | \
+ ((stp_ck) ? CPG_SD_STP_CK : 0) | \
+ ((sd_srcfc) << 2) | \
+ ((sd_fc) << 0), \
+ .div = (sd_div), \
+}
+
+struct sd_div_table {
+ u32 val;
+ unsigned int div;
+};
+
+struct sd_clock {
+ struct clk_hw hw;
+ void __iomem *reg;
+ const struct sd_div_table *div_table;
+ unsigned int div_num;
+ unsigned int div_min;
+ unsigned int div_max;
+};
+
+/* SDn divider
+ * sd_srcfc sd_fc div
+ * stp_hck stp_ck (div) (div) = sd_srcfc x sd_fc
+ *-------------------------------------------------------------------
+ * 0 0 0 (1) 1 (4) 4
+ * 0 0 1 (2) 1 (4) 8
+ * 1 0 2 (4) 1 (4) 16
+ * 1 0 3 (8) 1 (4) 32
+ * 1 0 4 (16) 1 (4) 64
+ * 0 0 0 (1) 0 (2) 2
+ * 0 0 1 (2) 0 (2) 4
+ * 1 0 2 (4) 0 (2) 8
+ * 1 0 3 (8) 0 (2) 16
+ * 1 0 4 (16) 0 (2) 32
+ */
+static const struct sd_div_table cpg_sd_div_table[] = {
+/* CPG_SD_DIV_TABLE_DATA(stp_hck, stp_ck, sd_srcfc, sd_fc, sd_div) */
+ CPG_SD_DIV_TABLE_DATA(0, 0, 0, 1, 4),
+ CPG_SD_DIV_TABLE_DATA(0, 0, 1, 1, 8),
+ CPG_SD_DIV_TABLE_DATA(1, 0, 2, 1, 16),
+ CPG_SD_DIV_TABLE_DATA(1, 0, 3, 1, 32),
+ CPG_SD_DIV_TABLE_DATA(1, 0, 4, 1, 64),
+ CPG_SD_DIV_TABLE_DATA(0, 0, 0, 0, 2),
+ CPG_SD_DIV_TABLE_DATA(0, 0, 1, 0, 4),
+ CPG_SD_DIV_TABLE_DATA(1, 0, 2, 0, 8),
+ CPG_SD_DIV_TABLE_DATA(1, 0, 3, 0, 16),
+ CPG_SD_DIV_TABLE_DATA(1, 0, 4, 0, 32),
+};
+
+#define to_sd_clock(_hw) container_of(_hw, struct sd_clock, hw)
+
+static int cpg_sd_clock_enable(struct clk_hw *hw)
+{
+ struct sd_clock *clock = to_sd_clock(hw);
+ u32 val, sd_fc;
+ unsigned int i;
+
+ val = clk_readl(clock->reg);
+
+ sd_fc = val & CPG_SD_FC_MASK;
+ for (i = 0; i < clock->div_num; i++)
+ if (sd_fc == (clock->div_table[i].val & CPG_SD_FC_MASK))
+ break;
+
+ if (i >= clock->div_num)
+ return -EINVAL;
+
+ val &= ~(CPG_SD_STP_MASK);
+ val |= clock->div_table[i].val & CPG_SD_STP_MASK;
+
+ clk_writel(val, clock->reg);
+
+ return 0;
+}
+
+static void cpg_sd_clock_disable(struct clk_hw *hw)
+{
+ struct sd_clock *clock = to_sd_clock(hw);
+
+ clk_writel(clk_readl(clock->reg) | CPG_SD_STP_MASK, clock->reg);
+}
+
+static int cpg_sd_clock_is_enabled(struct clk_hw *hw)
+{
+ struct sd_clock *clock = to_sd_clock(hw);
+
+ return !(clk_readl(clock->reg) & CPG_SD_STP_MASK);
+}
+
+static unsigned long cpg_sd_clock_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct sd_clock *clock = to_sd_clock(hw);
+ unsigned long rate = parent_rate;
+ u32 val, sd_fc;
+ unsigned int i;
+
+ val = clk_readl(clock->reg);
+
+ sd_fc = val & CPG_SD_FC_MASK;
+ for (i = 0; i < clock->div_num; i++)
+ if (sd_fc == (clock->div_table[i].val & CPG_SD_FC_MASK))
+ break;
+
+ if (i >= clock->div_num)
+ return -EINVAL;
+
+ return DIV_ROUND_CLOSEST(rate, clock->div_table[i].div);
+}
+
+static unsigned int cpg_sd_clock_calc_div(struct sd_clock *clock,
+ unsigned long rate,
+ unsigned long parent_rate)
+{
+ unsigned int div;
+
+ if (!rate)
+ rate = 1;
+
+ div = DIV_ROUND_CLOSEST(parent_rate, rate);
+
+ return clamp_t(unsigned int, div, clock->div_min, clock->div_max);
+}
+
+static long cpg_sd_clock_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
+{
+ struct sd_clock *clock = to_sd_clock(hw);
+ unsigned int div = cpg_sd_clock_calc_div(clock, rate, *parent_rate);
+
+ return DIV_ROUND_CLOSEST(*parent_rate, div);
+}
+
+static int cpg_sd_clock_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct sd_clock *clock = to_sd_clock(hw);
+ unsigned int div = cpg_sd_clock_calc_div(clock, rate, parent_rate);
+ u32 val;
+ unsigned int i;
+
+ for (i = 0; i < clock->div_num; i++)
+ if (div == clock->div_table[i].div)
+ break;
+
+ if (i >= clock->div_num)
+ return -EINVAL;
+
+ val = clk_readl(clock->reg);
+ val &= ~(CPG_SD_STP_MASK | CPG_SD_FC_MASK);
+ val |= clock->div_table[i].val & (CPG_SD_STP_MASK | CPG_SD_FC_MASK);
+ clk_writel(val, clock->reg);
+
+ return 0;
+}
+
+static const struct clk_ops cpg_sd_clock_ops = {
+ .enable = cpg_sd_clock_enable,
+ .disable = cpg_sd_clock_disable,
+ .is_enabled = cpg_sd_clock_is_enabled,
+ .recalc_rate = cpg_sd_clock_recalc_rate,
+ .round_rate = cpg_sd_clock_round_rate,
+ .set_rate = cpg_sd_clock_set_rate,
+};
+
+static struct clk * __init cpg_sd_clk_register(const struct cpg_core_clk *core,
+ void __iomem *base,
+ const char *parent_name)
+{
+ struct clk_init_data init;
+ struct sd_clock *clock;
+ struct clk *clk;
+ unsigned int i;
+
+ clock = kzalloc(sizeof(*clock), GFP_KERNEL);
+ if (!clock)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = core->name;
+ init.ops = &cpg_sd_clock_ops;
+ init.flags = CLK_IS_BASIC | CLK_SET_RATE_PARENT;
+ init.parent_names = &parent_name;
+ init.num_parents = 1;
+
+ clock->reg = base + core->offset;
+ clock->hw.init = &init;
+ clock->div_table = cpg_sd_div_table;
+ clock->div_num = ARRAY_SIZE(cpg_sd_div_table);
+
+ clock->div_max = clock->div_table[0].div;
+ clock->div_min = clock->div_max;
+ for (i = 1; i < clock->div_num; i++) {
+ clock->div_max = max(clock->div_max, clock->div_table[i].div);
+ clock->div_min = min(clock->div_min, clock->div_table[i].div);
+ }
+
+ clk = clk_register(NULL, &clock->hw);
+ if (IS_ERR(clk))
+ kfree(clock);
+
+ return clk;
+}
#define CPG_PLL0CR 0x00d8
#define CPG_PLL2CR 0x002c
@@ -323,6 +575,9 @@ struct clk * __init r8a7795_cpg_clk_register(struct device *dev,
mult = (((value >> 24) & 0x7f) + 1) * 2;
break;
+ case CLK_TYPE_GEN3_SD:
+ return cpg_sd_clk_register(core, base, __clk_get_name(parent));
+
default:
return ERR_PTR(-EINVAL);
}
diff --git a/drivers/clk/shmobile/renesas-cpg-mssr.c b/drivers/clk/renesas/renesas-cpg-mssr.c
index 9a4d888164bb..58e24b326a48 100644
--- a/drivers/clk/shmobile/renesas-cpg-mssr.c
+++ b/drivers/clk/renesas/renesas-cpg-mssr.c
@@ -348,6 +348,7 @@ static void __init cpg_mssr_register_mod_clk(const struct mssr_mod_clk *mod,
#else
dev_dbg(dev, "Ignoring MSTP %s to prevent disabling\n",
mod->name);
+ kfree(clock);
return;
#endif
}
@@ -568,7 +569,11 @@ static int __init cpg_mssr_probe(struct platform_device *pdev)
if (error)
return error;
- devm_add_action(dev, cpg_mssr_del_clk_provider, np);
+ error = devm_add_action_or_reset(dev,
+ cpg_mssr_del_clk_provider,
+ np);
+ if (error)
+ return error;
error = cpg_mssr_add_clk_domain(dev, info->core_pm_clks,
info->num_core_pm_clks);
diff --git a/drivers/clk/shmobile/renesas-cpg-mssr.h b/drivers/clk/renesas/renesas-cpg-mssr.h
index e09f03cbf086..952b6957233b 100644
--- a/drivers/clk/shmobile/renesas-cpg-mssr.h
+++ b/drivers/clk/renesas/renesas-cpg-mssr.h
@@ -53,6 +53,8 @@ enum clk_types {
DEF_BASE(_name, _id, CLK_TYPE_FF, _parent, .div = _div, .mult = _mult)
#define DEF_DIV6P1(_name, _id, _parent, _offset) \
DEF_BASE(_name, _id, CLK_TYPE_DIV6P1, _parent, .offset = _offset)
+#define DEF_SD(_name, _id, _parent, _offset) \
+ DEF_BASE(_name, _id, CLK_TYPE_GEN3_SD, _parent, .offset = _offset)
/*
diff --git a/drivers/clk/rockchip/clk-cpu.c b/drivers/clk/rockchip/clk-cpu.c
index d07374f48caf..4e73ed5cab58 100644
--- a/drivers/clk/rockchip/clk-cpu.c
+++ b/drivers/clk/rockchip/clk-cpu.c
@@ -116,7 +116,7 @@ static void rockchip_cpuclk_set_dividers(struct rockchip_cpuclk *cpuclk,
pr_debug("%s: setting reg 0x%x to 0x%x\n",
__func__, clksel->reg, clksel->val);
- writel(clksel->val , cpuclk->reg_base + clksel->reg);
+ writel(clksel->val, cpuclk->reg_base + clksel->reg);
}
}
@@ -290,14 +290,14 @@ struct clk *rockchip_clk_register_cpuclk(const char *name,
pr_err("%s: could not lookup parent clock %s\n",
__func__, parent_names[0]);
ret = -EINVAL;
- goto free_cpuclk;
+ goto free_alt_parent;
}
ret = clk_notifier_register(clk, &cpuclk->clk_nb);
if (ret) {
pr_err("%s: failed to register clock notifier for %s\n",
__func__, name);
- goto free_cpuclk;
+ goto free_alt_parent;
}
if (nrates > 0) {
@@ -326,6 +326,8 @@ free_rate_table:
kfree(cpuclk->rate_table);
unregister_notifier:
clk_notifier_unregister(clk, &cpuclk->clk_nb);
+free_alt_parent:
+ clk_disable_unprepare(cpuclk->alt_parent);
free_cpuclk:
kfree(cpuclk);
return ERR_PTR(ret);
diff --git a/drivers/clk/rockchip/clk-inverter.c b/drivers/clk/rockchip/clk-inverter.c
index 7cbf43beb3c6..dcb6e37f3da1 100644
--- a/drivers/clk/rockchip/clk-inverter.c
+++ b/drivers/clk/rockchip/clk-inverter.c
@@ -90,7 +90,7 @@ struct clk *rockchip_clk_register_inverter(const char *name,
inv_clock = kmalloc(sizeof(*inv_clock), GFP_KERNEL);
if (!inv_clock)
- return NULL;
+ return ERR_PTR(-ENOMEM);
init.name = name;
init.num_parents = num_parents;
@@ -106,11 +106,7 @@ struct clk *rockchip_clk_register_inverter(const char *name,
clk = clk_register(NULL, &inv_clock->hw);
if (IS_ERR(clk))
- goto err_free;
+ kfree(inv_clock);
return clk;
-
-err_free:
- kfree(inv_clock);
- return NULL;
}
diff --git a/drivers/clk/rockchip/clk-mmc-phase.c b/drivers/clk/rockchip/clk-mmc-phase.c
index 2685644826a0..e0dc7e83403a 100644
--- a/drivers/clk/rockchip/clk-mmc-phase.c
+++ b/drivers/clk/rockchip/clk-mmc-phase.c
@@ -150,7 +150,7 @@ struct clk *rockchip_clk_register_mmc(const char *name,
mmc_clock = kmalloc(sizeof(*mmc_clock), GFP_KERNEL);
if (!mmc_clock)
- return NULL;
+ return ERR_PTR(-ENOMEM);
init.name = name;
init.num_parents = num_parents;
@@ -172,11 +172,7 @@ struct clk *rockchip_clk_register_mmc(const char *name,
clk = clk_register(NULL, &mmc_clock->hw);
if (IS_ERR(clk))
- goto err_free;
+ kfree(mmc_clock);
return clk;
-
-err_free:
- kfree(mmc_clock);
- return NULL;
}
diff --git a/drivers/clk/rockchip/clk-pll.c b/drivers/clk/rockchip/clk-pll.c
index b7e66c9dd9f2..5de797e34d54 100644
--- a/drivers/clk/rockchip/clk-pll.c
+++ b/drivers/clk/rockchip/clk-pll.c
@@ -94,6 +94,11 @@ static int rockchip_pll_wait_lock(struct rockchip_clk_pll *pll)
unsigned int val;
int delay = 24000000, ret;
+ if (IS_ERR(grf)) {
+ pr_err("%s: grf regmap not available\n", __func__);
+ return PTR_ERR(grf);
+ }
+
while (delay > 0) {
ret = regmap_read(grf, pll->lock_offset, &val);
if (ret) {
diff --git a/drivers/clk/rockchip/clk-rk3036.c b/drivers/clk/rockchip/clk-rk3036.c
index bc7fbac83ab7..7cdb2d61f3e0 100644
--- a/drivers/clk/rockchip/clk-rk3036.c
+++ b/drivers/clk/rockchip/clk-rk3036.c
@@ -177,6 +177,8 @@ static struct rockchip_clk_branch rk3036_clk_branches[] __initdata = {
GATE(0, "gpll_armclk", "gpll", CLK_IGNORE_UNUSED,
RK2928_CLKGATE_CON(0), 6, GFLAGS),
+ FACTOR(0, "xin12m", "xin24m", 0, 1, 2),
+
/*
* Clock-Architecture Diagram 2
*/
@@ -187,6 +189,7 @@ static struct rockchip_clk_branch rk3036_clk_branches[] __initdata = {
RK2928_CLKGATE_CON(0), 8, GFLAGS),
COMPOSITE_NOGATE(0, "ddrphy2x", mux_ddrphy_p, CLK_IGNORE_UNUSED,
RK2928_CLKSEL_CON(26), 8, 1, MFLAGS, 0, 2, DFLAGS | CLK_DIVIDER_POWER_OF_TWO),
+ FACTOR(0, "ddrphy", "ddrphy2x", 0, 1, 2),
COMPOSITE_NOMUX(0, "pclk_dbg", "armclk", CLK_IGNORE_UNUSED,
RK2928_CLKSEL_CON(1), 0, 4, DFLAGS | CLK_DIVIDER_READ_ONLY,
@@ -263,6 +266,8 @@ static struct rockchip_clk_branch rk3036_clk_branches[] __initdata = {
COMPOSITE(0, "aclk_vcodec", mux_pll_src_3plls_p, 0,
RK2928_CLKSEL_CON(32), 14, 2, MFLAGS, 8, 5, DFLAGS,
RK2928_CLKGATE_CON(3), 11, GFLAGS),
+ FACTOR_GATE(HCLK_VCODEC, "hclk_vcodec", "aclk_vcodec", 0, 1, 4,
+ RK2928_CLKGATE_CON(3), 12, GFLAGS),
COMPOSITE(0, "aclk_hvec", mux_pll_src_3plls_p, 0,
RK2928_CLKSEL_CON(20), 0, 2, MFLAGS, 2, 5, DFLAGS,
@@ -343,7 +348,7 @@ static struct rockchip_clk_branch rk3036_clk_branches[] __initdata = {
RK2928_CLKSEL_CON(16), 0, 2, MFLAGS, 2, 5, DFLAGS,
RK2928_CLKGATE_CON(10), 5, GFLAGS),
- COMPOSITE_NOGATE(0, "mac_pll_src", mux_pll_src_3plls_p, 0,
+ COMPOSITE_NOGATE(SCLK_MACPLL, "mac_pll_src", mux_pll_src_3plls_p, CLK_SET_RATE_NO_REPARENT,
RK2928_CLKSEL_CON(21), 0, 2, MFLAGS, 9, 5, DFLAGS),
MUX(SCLK_MACREF, "mac_clk_ref", mux_mac_p, CLK_SET_RATE_PARENT,
RK2928_CLKSEL_CON(21), 3, 1, MFLAGS),
@@ -351,6 +356,7 @@ static struct rockchip_clk_branch rk3036_clk_branches[] __initdata = {
COMPOSITE_NOMUX(SCLK_MAC, "mac_clk", "mac_clk_ref", 0,
RK2928_CLKSEL_CON(21), 4, 5, DFLAGS,
RK2928_CLKGATE_CON(2), 6, GFLAGS),
+ FACTOR(0, "sclk_macref_out", "hclk_peri_src", 0, 1, 2),
MUX(SCLK_HDMI, "dclk_hdmi", mux_dclk_p, 0,
RK2928_CLKSEL_CON(31), 0, 1, MFLAGS),
@@ -376,11 +382,9 @@ static struct rockchip_clk_branch rk3036_clk_branches[] __initdata = {
GATE(ACLK_VIO, "aclk_vio", "aclk_disp1_pre", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(6), 13, GFLAGS),
GATE(ACLK_LCDC, "aclk_lcdc", "aclk_disp1_pre", 0, RK2928_CLKGATE_CON(9), 6, GFLAGS),
- GATE(HCLK_VIO_BUS, "hclk_vio_bus", "hclk_disp_pre", 0, RK2928_CLKGATE_CON(6), 12, GFLAGS),
+ GATE(HCLK_VIO_BUS, "hclk_vio_bus", "hclk_disp_pre", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(6), 12, GFLAGS),
GATE(HCLK_LCDC, "hclk_lcdc", "hclk_disp_pre", 0, RK2928_CLKGATE_CON(9), 5, GFLAGS),
- /* hclk_video gates */
- GATE(HCLK_VCODEC, "hclk_vcodec", "hclk_disp_pre", 0, RK2928_CLKGATE_CON(3), 12, GFLAGS),
/* xin24m gates */
GATE(SCLK_PVTM_CORE, "sclk_pvtm_core", "xin24m", 0, RK2928_CLKGATE_CON(10), 0, GFLAGS),
@@ -404,7 +408,7 @@ static struct rockchip_clk_branch rk3036_clk_branches[] __initdata = {
GATE(HCLK_OTG1, "hclk_otg1", "hclk_peri", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(7), 3, GFLAGS),
GATE(HCLK_I2S, "hclk_i2s", "hclk_peri", 0, RK2928_CLKGATE_CON(7), 2, GFLAGS),
GATE(0, "hclk_sfc", "hclk_peri", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(3), 14, GFLAGS),
- GATE(0, "hclk_mac", "hclk_peri", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(3), 15, GFLAGS),
+ GATE(HCLK_MAC, "hclk_mac", "hclk_peri", 0, RK2928_CLKGATE_CON(3), 5, GFLAGS),
/* pclk_peri gates */
GATE(0, "pclk_peri_matrix", "pclk_peri", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(4), 1, GFLAGS),
@@ -444,34 +448,11 @@ static void __init rk3036_clk_init(struct device_node *np)
rockchip_clk_init(np, reg_base, CLK_NR_CLKS);
- /* xin12m is created by an cru-internal divider */
- clk = clk_register_fixed_factor(NULL, "xin12m", "xin24m", 0, 1, 2);
- if (IS_ERR(clk))
- pr_warn("%s: could not register clock xin12m: %ld\n",
- __func__, PTR_ERR(clk));
-
clk = clk_register_fixed_factor(NULL, "usb480m", "xin24m", 0, 20, 1);
if (IS_ERR(clk))
pr_warn("%s: could not register clock usb480m: %ld\n",
__func__, PTR_ERR(clk));
- clk = clk_register_fixed_factor(NULL, "ddrphy", "ddrphy2x", 0, 1, 2);
- if (IS_ERR(clk))
- pr_warn("%s: could not register clock ddrphy: %ld\n",
- __func__, PTR_ERR(clk));
-
- clk = clk_register_fixed_factor(NULL, "hclk_vcodec_pre",
- "aclk_vcodec", 0, 1, 4);
- if (IS_ERR(clk))
- pr_warn("%s: could not register clock hclk_vcodec_pre: %ld\n",
- __func__, PTR_ERR(clk));
-
- clk = clk_register_fixed_factor(NULL, "sclk_macref_out",
- "hclk_peri_src", 0, 1, 2);
- if (IS_ERR(clk))
- pr_warn("%s: could not register clock sclk_macref_out: %ld\n",
- __func__, PTR_ERR(clk));
-
rockchip_clk_register_plls(rk3036_pll_clks,
ARRAY_SIZE(rk3036_pll_clks),
RK3036_GRF_SOC_STATUS0);
diff --git a/drivers/clk/rockchip/clk-rk3188.c b/drivers/clk/rockchip/clk-rk3188.c
index 7f7444cbf6fc..40bab3901491 100644
--- a/drivers/clk/rockchip/clk-rk3188.c
+++ b/drivers/clk/rockchip/clk-rk3188.c
@@ -339,13 +339,15 @@ static struct rockchip_clk_branch common_clk_branches[] __initdata = {
INVERTER(0, "pclk_cif0", "pclkin_cif0",
RK2928_CLKSEL_CON(30), 8, IFLAGS),
+ FACTOR(0, "xin12m", "xin24m", 0, 1, 2),
+
/*
* the 480m are generated inside the usb block from these clocks,
* but they are also a source for the hsicphy clock.
*/
- GATE(SCLK_OTGPHY0, "sclk_otgphy0", "usb480m", CLK_IGNORE_UNUSED,
+ GATE(SCLK_OTGPHY0, "sclk_otgphy0", "xin24m", CLK_IGNORE_UNUSED,
RK2928_CLKGATE_CON(1), 5, GFLAGS),
- GATE(SCLK_OTGPHY1, "sclk_otgphy1", "usb480m", CLK_IGNORE_UNUSED,
+ GATE(SCLK_OTGPHY1, "sclk_otgphy1", "xin24m", CLK_IGNORE_UNUSED,
RK2928_CLKGATE_CON(1), 6, GFLAGS),
COMPOSITE(0, "mac_src", mux_mac_p, 0,
@@ -605,7 +607,7 @@ static struct rockchip_clk_branch rk3066a_clk_branches[] __initdata = {
GATE(SCLK_TIMER2, "timer2", "xin24m", 0,
RK2928_CLKGATE_CON(3), 2, GFLAGS),
- COMPOSITE_NOMUX(0, "sclk_tsadc", "xin24m", 0,
+ COMPOSITE_NOMUX(SCLK_TSADC, "sclk_tsadc", "xin24m", 0,
RK2928_CLKSEL_CON(34), 0, 16, DFLAGS,
RK2928_CLKGATE_CON(2), 15, GFLAGS),
@@ -662,11 +664,11 @@ static struct clk_div_table div_rk3188_aclk_core_t[] = {
{ /* sentinel */ },
};
-PNAME(mux_hsicphy_p) = { "sclk_otgphy0", "sclk_otgphy1",
+PNAME(mux_hsicphy_p) = { "sclk_otgphy0_480m", "sclk_otgphy1_480m",
"gpll", "cpll" };
static struct rockchip_clk_branch rk3188_i2s0_fracmux __initdata =
- MUX(SCLK_I2S0, "sclk_i2s0", mux_sclk_i2s0_p, 0,
+ MUX(SCLK_I2S0, "sclk_i2s0", mux_sclk_i2s0_p, CLK_SET_RATE_PARENT,
RK2928_CLKSEL_CON(3), 8, 2, MFLAGS);
static struct rockchip_clk_branch rk3188_clk_branches[] __initdata = {
@@ -722,7 +724,7 @@ static struct rockchip_clk_branch rk3188_clk_branches[] __initdata = {
COMPOSITE_NOMUX(0, "i2s0_pre", "i2s_src", 0,
RK2928_CLKSEL_CON(3), 0, 7, DFLAGS,
RK2928_CLKGATE_CON(0), 9, GFLAGS),
- COMPOSITE_FRACMUX(0, "i2s0_frac", "i2s0_pre", 0,
+ COMPOSITE_FRACMUX(0, "i2s0_frac", "i2s0_pre", CLK_SET_RATE_PARENT,
RK2928_CLKSEL_CON(7), 0,
RK2928_CLKGATE_CON(0), 10, GFLAGS,
&rk3188_i2s0_fracmux),
@@ -748,12 +750,12 @@ static const char *const rk3188_critical_clocks[] __initconst = {
"hclk_peri",
"pclk_cpu",
"pclk_peri",
+ "hclk_cpubus"
};
static void __init rk3188_common_clk_init(struct device_node *np)
{
void __iomem *reg_base;
- struct clk *clk;
reg_base = of_iomap(np, 0);
if (!reg_base) {
@@ -763,17 +765,6 @@ static void __init rk3188_common_clk_init(struct device_node *np)
rockchip_clk_init(np, reg_base, CLK_NR_CLKS);
- /* xin12m is created by an cru-internal divider */
- clk = clk_register_fixed_factor(NULL, "xin12m", "xin24m", 0, 1, 2);
- if (IS_ERR(clk))
- pr_warn("%s: could not register clock xin12m: %ld\n",
- __func__, PTR_ERR(clk));
-
- clk = clk_register_fixed_factor(NULL, "usb480m", "xin24m", 0, 20, 1);
- if (IS_ERR(clk))
- pr_warn("%s: could not register clock usb480m: %ld\n",
- __func__, PTR_ERR(clk));
-
rockchip_clk_register_branches(common_clk_branches,
ARRAY_SIZE(common_clk_branches));
diff --git a/drivers/clk/rockchip/clk-rk3228.c b/drivers/clk/rockchip/clk-rk3228.c
index 981a50205339..7702d2855e9c 100644
--- a/drivers/clk/rockchip/clk-rk3228.c
+++ b/drivers/clk/rockchip/clk-rk3228.c
@@ -187,7 +187,7 @@ static struct rockchip_clk_branch rk3228_clk_branches[] __initdata = {
RK2928_CLKGATE_CON(7), 1, GFLAGS),
GATE(0, "ddrc", "ddrphy_pre", CLK_IGNORE_UNUSED,
RK2928_CLKGATE_CON(8), 5, GFLAGS),
- GATE(0, "ddrphy", "ddrphy_pre", CLK_IGNORE_UNUSED,
+ FACTOR_GATE(0, "ddrphy", "ddrphy4x", CLK_IGNORE_UNUSED, 1, 4,
RK2928_CLKGATE_CON(7), 0, GFLAGS),
/* PD_CORE */
@@ -240,13 +240,13 @@ static struct rockchip_clk_branch rk3228_clk_branches[] __initdata = {
COMPOSITE(0, "aclk_vpu_pre", mux_pll_src_4plls_p, 0,
RK2928_CLKSEL_CON(32), 5, 2, MFLAGS, 0, 5, DFLAGS,
RK2928_CLKGATE_CON(3), 11, GFLAGS),
- GATE(0, "hclk_vpu_src", "aclk_vpu_pre", 0,
+ FACTOR_GATE(0, "hclk_vpu_pre", "aclk_vpu_pre", 0, 1, 4,
RK2928_CLKGATE_CON(4), 4, GFLAGS),
COMPOSITE(0, "aclk_rkvdec_pre", mux_pll_src_4plls_p, 0,
RK2928_CLKSEL_CON(28), 6, 2, MFLAGS, 0, 5, DFLAGS,
RK2928_CLKGATE_CON(3), 2, GFLAGS),
- GATE(0, "hclk_rkvdec_src", "aclk_rkvdec_pre", 0,
+ FACTOR_GATE(0, "hclk_rkvdec_pre", "aclk_rkvdec_pre", 0, 1, 4,
RK2928_CLKGATE_CON(4), 5, GFLAGS),
COMPOSITE(0, "sclk_vdec_cabac", mux_pll_src_4plls_p, 0,
@@ -285,7 +285,7 @@ static struct rockchip_clk_branch rk3228_clk_branches[] __initdata = {
RK2928_CLKSEL_CON(23), 14, 2, MFLAGS, 8, 6, DFLAGS,
RK2928_CLKGATE_CON(3), 5, GFLAGS),
- GATE(0, "sclk_hdmi_hdcp", "xin24m", 0,
+ GATE(SCLK_HDMI_HDCP, "sclk_hdmi_hdcp", "xin24m", 0,
RK2928_CLKGATE_CON(3), 7, GFLAGS),
COMPOSITE(0, "sclk_hdmi_cec", mux_sclk_hdmi_cec_p, 0,
@@ -364,13 +364,15 @@ static struct rockchip_clk_branch rk3228_clk_branches[] __initdata = {
RK2928_CLKGATE_CON(3), 1, GFLAGS),
MUX(0, "sclk_vop_src", mux_sclk_vop_src_p, 0,
RK2928_CLKSEL_CON(27), 0, 1, MFLAGS),
- DIV(0, "dclk_hdmiphy", "sclk_vop_src", 0,
+ DIV(DCLK_HDMI_PHY, "dclk_hdmiphy", "sclk_vop_src", 0,
RK2928_CLKSEL_CON(29), 0, 3, DFLAGS),
DIV(0, "sclk_vop_pre", "sclk_vop_src", 0,
RK2928_CLKSEL_CON(27), 8, 8, DFLAGS),
- MUX(0, "dclk_vop", mux_dclk_vop_p, 0,
+ MUX(DCLK_VOP, "dclk_vop", mux_dclk_vop_p, 0,
RK2928_CLKSEL_CON(27), 1, 1, MFLAGS),
+ FACTOR(0, "xin12m", "xin24m", 0, 1, 2),
+
COMPOSITE(0, "i2s0_src", mux_pll_src_2plls_p, 0,
RK2928_CLKSEL_CON(9), 15, 1, MFLAGS, 0, 7, DFLAGS,
RK2928_CLKGATE_CON(0), 3, GFLAGS),
@@ -422,7 +424,7 @@ static struct rockchip_clk_branch rk3228_clk_branches[] __initdata = {
GATE(0, "sclk_otgphy1", "xin24m", 0,
RK2928_CLKGATE_CON(1), 6, GFLAGS),
- COMPOSITE_NOMUX(0, "sclk_tsadc", "xin24m", 0,
+ COMPOSITE_NOMUX(SCLK_TSADC, "sclk_tsadc", "xin24m", 0,
RK2928_CLKSEL_CON(24), 6, 10, DFLAGS,
RK2928_CLKGATE_CON(2), 8, GFLAGS),
@@ -503,7 +505,7 @@ static struct rockchip_clk_branch rk3228_clk_branches[] __initdata = {
GATE(0, "aclk_iep", "aclk_iep_pre", 0, RK2928_CLKGATE_CON(13), 2, GFLAGS),
GATE(0, "aclk_iep_noc", "aclk_iep_pre", 0, RK2928_CLKGATE_CON(13), 9, GFLAGS),
- GATE(0, "aclk_vop", "aclk_vop_pre", 0, RK2928_CLKGATE_CON(13), 5, GFLAGS),
+ GATE(ACLK_VOP, "aclk_vop", "aclk_vop_pre", 0, RK2928_CLKGATE_CON(13), 5, GFLAGS),
GATE(0, "aclk_vop_noc", "aclk_vop_pre", 0, RK2928_CLKGATE_CON(13), 12, GFLAGS),
GATE(0, "aclk_hdcp", "aclk_hdcp_pre", 0, RK2928_CLKGATE_CON(14), 10, GFLAGS),
@@ -511,13 +513,13 @@ static struct rockchip_clk_branch rk3228_clk_branches[] __initdata = {
GATE(0, "hclk_rga", "hclk_vio_pre", 0, RK2928_CLKGATE_CON(13), 1, GFLAGS),
GATE(0, "hclk_iep", "hclk_vio_pre", 0, RK2928_CLKGATE_CON(13), 3, GFLAGS),
- GATE(0, "hclk_vop", "hclk_vio_pre", 0, RK2928_CLKGATE_CON(13), 6, GFLAGS),
+ GATE(HCLK_VOP, "hclk_vop", "hclk_vio_pre", 0, RK2928_CLKGATE_CON(13), 6, GFLAGS),
GATE(0, "hclk_vio_ahb_arbi", "hclk_vio_pre", 0, RK2928_CLKGATE_CON(13), 7, GFLAGS),
GATE(0, "hclk_vio_noc", "hclk_vio_pre", 0, RK2928_CLKGATE_CON(13), 8, GFLAGS),
GATE(0, "hclk_vop_noc", "hclk_vio_pre", 0, RK2928_CLKGATE_CON(13), 13, GFLAGS),
GATE(0, "hclk_vio_h2p", "hclk_vio_pre", 0, RK2928_CLKGATE_CON(14), 7, GFLAGS),
GATE(0, "hclk_hdcp_mmu", "hclk_vio_pre", 0, RK2928_CLKGATE_CON(14), 12, GFLAGS),
- GATE(0, "pclk_hdmi_ctrl", "hclk_vio_pre", 0, RK2928_CLKGATE_CON(14), 6, GFLAGS),
+ GATE(PCLK_HDMI_CTRL, "pclk_hdmi_ctrl", "hclk_vio_pre", 0, RK2928_CLKGATE_CON(14), 6, GFLAGS),
GATE(0, "pclk_vio_h2p", "hclk_vio_pre", 0, RK2928_CLKGATE_CON(14), 8, GFLAGS),
GATE(0, "pclk_hdcp", "hclk_vio_pre", 0, RK2928_CLKGATE_CON(14), 11, GFLAGS),
@@ -582,7 +584,7 @@ static struct rockchip_clk_branch rk3228_clk_branches[] __initdata = {
GATE(PCLK_UART0, "pclk_uart0", "pclk_cpu", 0, RK2928_CLKGATE_CON(9), 12, GFLAGS),
GATE(PCLK_UART1, "pclk_uart1", "pclk_cpu", 0, RK2928_CLKGATE_CON(9), 13, GFLAGS),
GATE(PCLK_UART2, "pclk_uart2", "pclk_cpu", 0, RK2928_CLKGATE_CON(9), 14, GFLAGS),
- GATE(0, "pclk_tsadc", "pclk_cpu", 0, RK2928_CLKGATE_CON(9), 15, GFLAGS),
+ GATE(PCLK_TSADC, "pclk_tsadc", "pclk_cpu", 0, RK2928_CLKGATE_CON(9), 15, GFLAGS),
GATE(PCLK_GRF, "pclk_grf", "pclk_cpu", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(10), 0, GFLAGS),
GATE(0, "pclk_cru", "pclk_cpu", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(10), 1, GFLAGS),
GATE(0, "pclk_sgrf", "pclk_cpu", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(10), 2, GFLAGS),
@@ -590,7 +592,7 @@ static struct rockchip_clk_branch rk3228_clk_branches[] __initdata = {
GATE(0, "pclk_ddrphy", "pclk_phy_pre", 0, RK2928_CLKGATE_CON(10), 3, GFLAGS),
GATE(0, "pclk_acodecphy", "pclk_phy_pre", 0, RK2928_CLKGATE_CON(10), 5, GFLAGS),
- GATE(0, "pclk_hdmiphy", "pclk_phy_pre", 0, RK2928_CLKGATE_CON(10), 7, GFLAGS),
+ GATE(PCLK_HDMI_PHY, "pclk_hdmiphy", "pclk_phy_pre", 0, RK2928_CLKGATE_CON(10), 7, GFLAGS),
GATE(0, "pclk_vdacphy", "pclk_phy_pre", 0, RK2928_CLKGATE_CON(10), 8, GFLAGS),
GATE(0, "pclk_phy_noc", "pclk_phy_pre", 0, RK2928_CLKGATE_CON(10), 9, GFLAGS),
@@ -605,13 +607,13 @@ static struct rockchip_clk_branch rk3228_clk_branches[] __initdata = {
/* PD_MMC */
MMC(SCLK_SDMMC_DRV, "sdmmc_drv", "sclk_sdmmc", RK3228_SDMMC_CON0, 1),
- MMC(SCLK_SDMMC_SAMPLE, "sdmmc_sample", "sclk_sdmmc", RK3228_SDMMC_CON1, 1),
+ MMC(SCLK_SDMMC_SAMPLE, "sdmmc_sample", "sclk_sdmmc", RK3228_SDMMC_CON1, 0),
MMC(SCLK_SDIO_DRV, "sdio_drv", "sclk_sdio", RK3228_SDIO_CON0, 1),
- MMC(SCLK_SDIO_SAMPLE, "sdio_sample", "sclk_sdio", RK3228_SDIO_CON1, 1),
+ MMC(SCLK_SDIO_SAMPLE, "sdio_sample", "sclk_sdio", RK3228_SDIO_CON1, 0),
MMC(SCLK_EMMC_DRV, "emmc_drv", "sclk_emmc", RK3228_EMMC_CON0, 1),
- MMC(SCLK_EMMC_SAMPLE, "emmc_sample", "sclk_emmc", RK3228_EMMC_CON1, 1),
+ MMC(SCLK_EMMC_SAMPLE, "emmc_sample", "sclk_emmc", RK3228_EMMC_CON1, 0),
};
static const char *const rk3228_critical_clocks[] __initconst = {
@@ -624,7 +626,6 @@ static const char *const rk3228_critical_clocks[] __initconst = {
static void __init rk3228_clk_init(struct device_node *np)
{
void __iomem *reg_base;
- struct clk *clk;
reg_base = of_iomap(np, 0);
if (!reg_base) {
@@ -634,29 +635,6 @@ static void __init rk3228_clk_init(struct device_node *np)
rockchip_clk_init(np, reg_base, CLK_NR_CLKS);
- /* xin12m is created by an cru-internal divider */
- clk = clk_register_fixed_factor(NULL, "xin12m", "xin24m", 0, 1, 2);
- if (IS_ERR(clk))
- pr_warn("%s: could not register clock xin12m: %ld\n",
- __func__, PTR_ERR(clk));
-
- clk = clk_register_fixed_factor(NULL, "ddrphy_pre", "ddrphy4x", 0, 1, 4);
- if (IS_ERR(clk))
- pr_warn("%s: could not register clock ddrphy_pre: %ld\n",
- __func__, PTR_ERR(clk));
-
- clk = clk_register_fixed_factor(NULL, "hclk_vpu_pre",
- "hclk_vpu_src", 0, 1, 4);
- if (IS_ERR(clk))
- pr_warn("%s: could not register clock hclk_vpu_pre: %ld\n",
- __func__, PTR_ERR(clk));
-
- clk = clk_register_fixed_factor(NULL, "hclk_rkvdec_pre",
- "hclk_rkvdec_src", 0, 1, 4);
- if (IS_ERR(clk))
- pr_warn("%s: could not register clock hclk_rkvdec_pre: %ld\n",
- __func__, PTR_ERR(clk));
-
rockchip_clk_register_plls(rk3228_pll_clks,
ARRAY_SIZE(rk3228_pll_clks),
RK3228_GRF_SOC_STATUS0);
diff --git a/drivers/clk/rockchip/clk-rk3288.c b/drivers/clk/rockchip/clk-rk3288.c
index 984fc187d12e..3cb72163a512 100644
--- a/drivers/clk/rockchip/clk-rk3288.c
+++ b/drivers/clk/rockchip/clk-rk3288.c
@@ -195,8 +195,8 @@ PNAME(mux_hsadcout_p) = { "hsadc_src", "ext_hsadc" };
PNAME(mux_edp_24m_p) = { "ext_edp_24m", "xin24m" };
PNAME(mux_tspout_p) = { "cpll", "gpll", "npll", "xin27m" };
-PNAME(mux_usbphy480m_p) = { "sclk_otgphy1", "sclk_otgphy2",
- "sclk_otgphy0" };
+PNAME(mux_usbphy480m_p) = { "sclk_otgphy1_480m", "sclk_otgphy2_480m",
+ "sclk_otgphy0_480m" };
PNAME(mux_hsicphy480m_p) = { "cpll", "gpll", "usbphy480m_src" };
PNAME(mux_hsicphy12m_p) = { "hsicphy12m_xin12m", "hsicphy12m_usbphy" };
@@ -333,6 +333,8 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = {
GATE(0, "aclk_bus_2pmu", "aclk_cpu_pre", CLK_IGNORE_UNUSED,
RK3288_CLKGATE_CON(0), 7, GFLAGS),
+ FACTOR(0, "xin12m", "xin24m", 0, 1, 2),
+
COMPOSITE(0, "i2s_src", mux_pll_src_cpll_gpll_p, 0,
RK3288_CLKSEL_CON(4), 15, 1, MFLAGS, 0, 7, DFLAGS,
RK3288_CLKGATE_CON(4), 1, GFLAGS),
@@ -399,12 +401,10 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = {
*/
GATE(ACLK_VCODEC, "aclk_vcodec", "aclk_vdpu", 0,
RK3288_CLKGATE_CON(9), 0, GFLAGS),
- /*
- * We introduce a virtul node of hclk_vodec_pre_v to split one clock
- * struct with a gate and a fix divider into two node in software.
- */
- GATE(0, "hclk_vcodec_pre_v", "aclk_vdpu", 0,
+
+ FACTOR_GATE(0, "hclk_vcodec_pre", "aclk_vdpu", 0, 1, 4,
RK3288_CLKGATE_CON(3), 10, GFLAGS),
+
GATE(HCLK_VCODEC, "hclk_vcodec", "hclk_vcodec_pre", 0,
RK3288_CLKGATE_CON(9), 1, GFLAGS),
@@ -537,11 +537,11 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = {
RK3288_CLKSEL_CON(35), 6, 2, MFLAGS, 0, 5, DFLAGS,
RK3288_CLKGATE_CON(4), 10, GFLAGS),
- GATE(SCLK_OTGPHY0, "sclk_otgphy0", "usb480m", CLK_IGNORE_UNUSED,
+ GATE(SCLK_OTGPHY0, "sclk_otgphy0", "xin24m", CLK_IGNORE_UNUSED,
RK3288_CLKGATE_CON(13), 4, GFLAGS),
- GATE(SCLK_OTGPHY1, "sclk_otgphy1", "usb480m", CLK_IGNORE_UNUSED,
+ GATE(SCLK_OTGPHY1, "sclk_otgphy1", "xin24m", CLK_IGNORE_UNUSED,
RK3288_CLKGATE_CON(13), 5, GFLAGS),
- GATE(SCLK_OTGPHY2, "sclk_otgphy2", "usb480m", CLK_IGNORE_UNUSED,
+ GATE(SCLK_OTGPHY2, "sclk_otgphy2", "xin24m", CLK_IGNORE_UNUSED,
RK3288_CLKGATE_CON(13), 6, GFLAGS),
GATE(SCLK_OTG_ADP, "sclk_otg_adp", "xin32k", CLK_IGNORE_UNUSED,
RK3288_CLKGATE_CON(13), 7, GFLAGS),
@@ -888,24 +888,6 @@ static void __init rk3288_clk_init(struct device_node *np)
rockchip_clk_init(np, rk3288_cru_base, CLK_NR_CLKS);
- /* xin12m is created by an cru-internal divider */
- clk = clk_register_fixed_factor(NULL, "xin12m", "xin24m", 0, 1, 2);
- if (IS_ERR(clk))
- pr_warn("%s: could not register clock xin12m: %ld\n",
- __func__, PTR_ERR(clk));
-
-
- clk = clk_register_fixed_factor(NULL, "usb480m", "xin24m", 0, 20, 1);
- if (IS_ERR(clk))
- pr_warn("%s: could not register clock usb480m: %ld\n",
- __func__, PTR_ERR(clk));
-
- clk = clk_register_fixed_factor(NULL, "hclk_vcodec_pre",
- "hclk_vcodec_pre_v", 0, 1, 4);
- if (IS_ERR(clk))
- pr_warn("%s: could not register clock hclk_vcodec_pre: %ld\n",
- __func__, PTR_ERR(clk));
-
/* Watchdog pclk is controlled by RK3288_SGRF_SOC_CON0[1]. */
clk = clk_register_fixed_factor(NULL, "pclk_wdt", "pclk_pd_alive", 0, 1, 1);
if (IS_ERR(clk))
diff --git a/drivers/clk/rockchip/clk-rk3368.c b/drivers/clk/rockchip/clk-rk3368.c
index 21f3ea909fab..a2bb12200465 100644
--- a/drivers/clk/rockchip/clk-rk3368.c
+++ b/drivers/clk/rockchip/clk-rk3368.c
@@ -121,7 +121,7 @@ PNAME(mux_i2s_2ch_p) = { "i2s_2ch_src", "i2s_2ch_frac",
"dummy", "xin12m" };
PNAME(mux_spdif_8ch_p) = { "spdif_8ch_pre", "spdif_8ch_frac",
"ext_i2s", "xin12m" };
-PNAME(mux_edp_24m_p) = { "dummy", "xin24m" };
+PNAME(mux_edp_24m_p) = { "xin24m", "dummy" };
PNAME(mux_vip_out_p) = { "vip_src", "xin24m" };
PNAME(mux_usbphy480m_p) = { "usbotg_out", "xin24m" };
PNAME(mux_hsic_usbphy480m_p) = { "usbotg_out", "dummy" };
@@ -165,7 +165,7 @@ static const struct rockchip_cpuclk_reg_data rk3368_cpuclkb_data = {
.core_reg = RK3368_CLKSEL_CON(0),
.div_core_shift = 0,
.div_core_mask = 0x1f,
- .mux_core_shift = 15,
+ .mux_core_shift = 7,
};
static const struct rockchip_cpuclk_reg_data rk3368_cpuclkl_data = {
@@ -218,36 +218,66 @@ static const struct rockchip_cpuclk_reg_data rk3368_cpuclkl_data = {
}
static struct rockchip_cpuclk_rate_table rk3368_cpuclkb_rates[] __initdata = {
- RK3368_CPUCLKB_RATE(1512000000, 2, 6, 6),
- RK3368_CPUCLKB_RATE(1488000000, 2, 5, 5),
- RK3368_CPUCLKB_RATE(1416000000, 2, 5, 5),
- RK3368_CPUCLKB_RATE(1200000000, 2, 4, 4),
- RK3368_CPUCLKB_RATE(1008000000, 2, 4, 4),
- RK3368_CPUCLKB_RATE( 816000000, 2, 3, 3),
- RK3368_CPUCLKB_RATE( 696000000, 2, 3, 3),
- RK3368_CPUCLKB_RATE( 600000000, 2, 2, 2),
- RK3368_CPUCLKB_RATE( 408000000, 2, 2, 2),
- RK3368_CPUCLKB_RATE( 312000000, 2, 2, 2),
+ RK3368_CPUCLKB_RATE(1512000000, 1, 5, 5),
+ RK3368_CPUCLKB_RATE(1488000000, 1, 4, 4),
+ RK3368_CPUCLKB_RATE(1416000000, 1, 4, 4),
+ RK3368_CPUCLKB_RATE(1200000000, 1, 3, 3),
+ RK3368_CPUCLKB_RATE(1008000000, 1, 3, 3),
+ RK3368_CPUCLKB_RATE( 816000000, 1, 2, 2),
+ RK3368_CPUCLKB_RATE( 696000000, 1, 2, 2),
+ RK3368_CPUCLKB_RATE( 600000000, 1, 1, 1),
+ RK3368_CPUCLKB_RATE( 408000000, 1, 1, 1),
+ RK3368_CPUCLKB_RATE( 312000000, 1, 1, 1),
};
static struct rockchip_cpuclk_rate_table rk3368_cpuclkl_rates[] __initdata = {
- RK3368_CPUCLKL_RATE(1512000000, 2, 7, 7),
- RK3368_CPUCLKL_RATE(1488000000, 2, 6, 6),
- RK3368_CPUCLKL_RATE(1416000000, 2, 6, 6),
- RK3368_CPUCLKL_RATE(1200000000, 2, 5, 5),
- RK3368_CPUCLKL_RATE(1008000000, 2, 5, 5),
- RK3368_CPUCLKL_RATE( 816000000, 2, 4, 4),
- RK3368_CPUCLKL_RATE( 696000000, 2, 3, 3),
- RK3368_CPUCLKL_RATE( 600000000, 2, 3, 3),
- RK3368_CPUCLKL_RATE( 408000000, 2, 2, 2),
- RK3368_CPUCLKL_RATE( 312000000, 2, 2, 2),
+ RK3368_CPUCLKL_RATE(1512000000, 1, 6, 6),
+ RK3368_CPUCLKL_RATE(1488000000, 1, 5, 5),
+ RK3368_CPUCLKL_RATE(1416000000, 1, 5, 5),
+ RK3368_CPUCLKL_RATE(1200000000, 1, 4, 4),
+ RK3368_CPUCLKL_RATE(1008000000, 1, 4, 4),
+ RK3368_CPUCLKL_RATE( 816000000, 1, 3, 3),
+ RK3368_CPUCLKL_RATE( 696000000, 1, 2, 2),
+ RK3368_CPUCLKL_RATE( 600000000, 1, 2, 2),
+ RK3368_CPUCLKL_RATE( 408000000, 1, 1, 1),
+ RK3368_CPUCLKL_RATE( 312000000, 1, 1, 1),
};
+static struct rockchip_clk_branch rk3368_i2s_8ch_fracmux __initdata =
+ MUX(0, "i2s_8ch_pre", mux_i2s_8ch_pre_p, CLK_SET_RATE_PARENT,
+ RK3368_CLKSEL_CON(27), 8, 2, MFLAGS);
+
+static struct rockchip_clk_branch rk3368_spdif_8ch_fracmux __initdata =
+ MUX(0, "spdif_8ch_pre", mux_spdif_8ch_p, CLK_SET_RATE_PARENT,
+ RK3368_CLKSEL_CON(31), 8, 2, MFLAGS);
+
+static struct rockchip_clk_branch rk3368_i2s_2ch_fracmux __initdata =
+ MUX(0, "i2s_2ch_pre", mux_i2s_2ch_p, CLK_SET_RATE_PARENT,
+ RK3368_CLKSEL_CON(53), 8, 2, MFLAGS);
+
+static struct rockchip_clk_branch rk3368_uart0_fracmux __initdata =
+ MUX(SCLK_UART0, "sclk_uart0", mux_uart0_p, CLK_SET_RATE_PARENT,
+ RK3368_CLKSEL_CON(33), 8, 2, MFLAGS);
+
+static struct rockchip_clk_branch rk3368_uart1_fracmux __initdata =
+ MUX(SCLK_UART1, "sclk_uart1", mux_uart1_p, CLK_SET_RATE_PARENT,
+ RK3368_CLKSEL_CON(35), 8, 2, MFLAGS);
+
+static struct rockchip_clk_branch rk3368_uart3_fracmux __initdata =
+ MUX(SCLK_UART3, "sclk_uart3", mux_uart3_p, CLK_SET_RATE_PARENT,
+ RK3368_CLKSEL_CON(39), 8, 2, MFLAGS);
+
+static struct rockchip_clk_branch rk3368_uart4_fracmux __initdata =
+ MUX(SCLK_UART4, "sclk_uart4", mux_uart4_p, CLK_SET_RATE_PARENT,
+ RK3368_CLKSEL_CON(41), 8, 2, MFLAGS);
+
static struct rockchip_clk_branch rk3368_clk_branches[] __initdata = {
/*
* Clock-Architecture Diagram 2
*/
+ FACTOR(0, "xin12m", "xin24m", 0, 1, 2),
+
MUX(SCLK_USBPHY480M, "usbphy_480m", mux_usbphy480m_p, CLK_SET_RATE_PARENT,
RK3368_CLKSEL_CON(13), 8, 1, MFLAGS),
@@ -299,7 +329,7 @@ static struct rockchip_clk_branch rk3368_clk_branches[] __initdata = {
COMPOSITE_NOGATE_DIVTBL(0, "ddrphy_src", mux_ddrphy_p, CLK_IGNORE_UNUSED,
RK3368_CLKSEL_CON(13), 4, 1, MFLAGS, 0, 2, DFLAGS, div_ddrphy_t),
- GATE(0, "sclk_ddr", "ddrphy_div4", CLK_IGNORE_UNUSED,
+ FACTOR_GATE(0, "sclk_ddr", "ddrphy_src", CLK_IGNORE_UNUSED, 1, 4,
RK3368_CLKGATE_CON(6), 14, GFLAGS),
GATE(0, "sclk_ddr4x", "ddrphy_src", CLK_IGNORE_UNUSED,
RK3368_CLKGATE_CON(6), 15, GFLAGS),
@@ -337,11 +367,10 @@ static struct rockchip_clk_branch rk3368_clk_branches[] __initdata = {
COMPOSITE(0, "i2s_8ch_src", mux_pll_src_cpll_gpll_p, 0,
RK3368_CLKSEL_CON(27), 12, 1, MFLAGS, 0, 7, DFLAGS,
RK3368_CLKGATE_CON(6), 1, GFLAGS),
- COMPOSITE_FRAC(0, "i2s_8ch_frac", "i2s_8ch_src", CLK_SET_RATE_PARENT,
- RK3368_CLKSEL_CON(28), 0,
- RK3368_CLKGATE_CON(6), 2, GFLAGS),
- MUX(0, "i2s_8ch_pre", mux_i2s_8ch_pre_p, CLK_SET_RATE_PARENT,
- RK3368_CLKSEL_CON(27), 8, 2, MFLAGS),
+ COMPOSITE_FRACMUX(0, "i2s_8ch_frac", "i2s_8ch_src", CLK_SET_RATE_PARENT,
+ RK3368_CLKSEL_CON(28), 0,
+ RK3368_CLKGATE_CON(6), 2, GFLAGS,
+ &rk3368_i2s_8ch_fracmux),
COMPOSITE_NODIV(SCLK_I2S_8CH_OUT, "i2s_8ch_clkout", mux_i2s_8ch_clkout_p, 0,
RK3368_CLKSEL_CON(27), 15, 1, MFLAGS,
RK3368_CLKGATE_CON(6), 0, GFLAGS),
@@ -350,21 +379,21 @@ static struct rockchip_clk_branch rk3368_clk_branches[] __initdata = {
COMPOSITE(0, "spdif_8ch_src", mux_pll_src_cpll_gpll_p, 0,
RK3368_CLKSEL_CON(31), 12, 1, MFLAGS, 0, 7, DFLAGS,
RK3368_CLKGATE_CON(6), 4, GFLAGS),
- COMPOSITE_FRAC(0, "spdif_8ch_frac", "spdif_8ch_src", CLK_SET_RATE_PARENT,
- RK3368_CLKSEL_CON(32), 0,
- RK3368_CLKGATE_CON(6), 5, GFLAGS),
- COMPOSITE_NODIV(SCLK_SPDIF_8CH, "sclk_spdif_8ch", mux_spdif_8ch_p, 0,
- RK3368_CLKSEL_CON(31), 8, 2, MFLAGS,
- RK3368_CLKGATE_CON(6), 6, GFLAGS),
+ COMPOSITE_FRACMUX(0, "spdif_8ch_frac", "spdif_8ch_src", CLK_SET_RATE_PARENT,
+ RK3368_CLKSEL_CON(32), 0,
+ RK3368_CLKGATE_CON(6), 5, GFLAGS,
+ &rk3368_spdif_8ch_fracmux),
+ GATE(SCLK_SPDIF_8CH, "sclk_spdif_8ch", "spdif_8ch_pre", CLK_SET_RATE_PARENT,
+ RK3368_CLKGATE_CON(6), 6, GFLAGS),
COMPOSITE(0, "i2s_2ch_src", mux_pll_src_cpll_gpll_p, 0,
RK3368_CLKSEL_CON(53), 12, 1, MFLAGS, 0, 7, DFLAGS,
RK3368_CLKGATE_CON(5), 13, GFLAGS),
- COMPOSITE_FRAC(0, "i2s_2ch_frac", "i2s_2ch_src", CLK_SET_RATE_PARENT,
- RK3368_CLKSEL_CON(54), 0,
- RK3368_CLKGATE_CON(5), 14, GFLAGS),
- COMPOSITE_NODIV(SCLK_I2S_2CH, "sclk_i2s_2ch", mux_i2s_2ch_p, 0,
- RK3368_CLKSEL_CON(53), 8, 2, MFLAGS,
- RK3368_CLKGATE_CON(5), 15, GFLAGS),
+ COMPOSITE_FRACMUX(0, "i2s_2ch_frac", "i2s_2ch_src", CLK_SET_RATE_PARENT,
+ RK3368_CLKSEL_CON(54), 0,
+ RK3368_CLKGATE_CON(5), 14, GFLAGS,
+ &rk3368_i2s_2ch_fracmux),
+ GATE(SCLK_I2S_2CH, "sclk_i2s_2ch", "i2s_2ch_pre", CLK_SET_RATE_PARENT,
+ RK3368_CLKGATE_CON(5), 15, GFLAGS),
COMPOSITE(0, "sclk_tsp", mux_pll_src_cpll_gpll_npll_p, 0,
RK3368_CLKSEL_CON(46), 6, 2, MFLAGS, 0, 5, DFLAGS,
@@ -384,18 +413,18 @@ static struct rockchip_clk_branch rk3368_clk_branches[] __initdata = {
* Clock-Architecture Diagram 3
*/
- COMPOSITE(0, "aclk_vepu", mux_pll_src_cpll_gpll_usb_p, 0,
+ COMPOSITE(0, "aclk_vepu", mux_pll_src_cpll_gpll_npll_usb_p, 0,
RK3368_CLKSEL_CON(15), 6, 2, MFLAGS, 0, 5, DFLAGS,
RK3368_CLKGATE_CON(4), 6, GFLAGS),
- COMPOSITE(0, "aclk_vdpu", mux_pll_src_cpll_gpll_usb_p, 0,
+ COMPOSITE(0, "aclk_vdpu", mux_pll_src_cpll_gpll_npll_usb_p, 0,
RK3368_CLKSEL_CON(15), 14, 2, MFLAGS, 8, 5, DFLAGS,
RK3368_CLKGATE_CON(4), 7, GFLAGS),
/*
- * We introduce a virtual node of hclk_vodec_pre_v to split one clock
- * struct with a gate and a fix divider into two node in software.
+ * We use aclk_vdpu by default ---GRF_SOC_CON0[7] setting in system,
+ * so we ignore the mux and make clocks nodes as following,
*/
- GATE(0, "hclk_video_pre_v", "aclk_vdpu", 0,
+ FACTOR_GATE(0, "hclk_video_pre", "aclk_vdpu", 0, 1, 4,
RK3368_CLKGATE_CON(4), 8, GFLAGS),
COMPOSITE(0, "sclk_hevc_cabac_src", mux_pll_src_cpll_gpll_npll_usb_p, 0,
@@ -442,7 +471,7 @@ static struct rockchip_clk_branch rk3368_clk_branches[] __initdata = {
GATE(SCLK_HDMI_HDCP, "sclk_hdmi_hdcp", "xin24m", 0,
RK3368_CLKGATE_CON(4), 13, GFLAGS),
GATE(SCLK_HDMI_CEC, "sclk_hdmi_cec", "xin32k", 0,
- RK3368_CLKGATE_CON(5), 12, GFLAGS),
+ RK3368_CLKGATE_CON(4), 12, GFLAGS),
COMPOSITE_NODIV(0, "vip_src", mux_pll_src_cpll_gpll_p, 0,
RK3368_CLKSEL_CON(21), 15, 1, MFLAGS,
@@ -560,38 +589,34 @@ static struct rockchip_clk_branch rk3368_clk_branches[] __initdata = {
COMPOSITE(0, "uart0_src", mux_pll_src_cpll_gpll_usb_usb_p, 0,
RK3368_CLKSEL_CON(33), 12, 2, MFLAGS, 0, 7, DFLAGS,
RK3368_CLKGATE_CON(2), 0, GFLAGS),
- COMPOSITE_FRAC(0, "uart0_frac", "uart0_src", CLK_SET_RATE_PARENT,
- RK3368_CLKSEL_CON(34), 0,
- RK3368_CLKGATE_CON(2), 1, GFLAGS),
- MUX(SCLK_UART0, "sclk_uart0", mux_uart0_p, CLK_SET_RATE_PARENT,
- RK3368_CLKSEL_CON(33), 8, 2, MFLAGS),
+ COMPOSITE_FRACMUX(0, "uart0_frac", "uart0_src", CLK_SET_RATE_PARENT,
+ RK3368_CLKSEL_CON(34), 0,
+ RK3368_CLKGATE_CON(2), 1, GFLAGS,
+ &rk3368_uart0_fracmux),
COMPOSITE_NOMUX(0, "uart1_src", "uart_src", 0,
RK3368_CLKSEL_CON(35), 0, 7, DFLAGS,
RK3368_CLKGATE_CON(2), 2, GFLAGS),
- COMPOSITE_FRAC(0, "uart1_frac", "uart1_src", CLK_SET_RATE_PARENT,
- RK3368_CLKSEL_CON(36), 0,
- RK3368_CLKGATE_CON(2), 3, GFLAGS),
- MUX(SCLK_UART1, "sclk_uart1", mux_uart1_p, CLK_SET_RATE_PARENT,
- RK3368_CLKSEL_CON(35), 8, 2, MFLAGS),
+ COMPOSITE_FRACMUX(0, "uart1_frac", "uart1_src", CLK_SET_RATE_PARENT,
+ RK3368_CLKSEL_CON(36), 0,
+ RK3368_CLKGATE_CON(2), 3, GFLAGS,
+ &rk3368_uart1_fracmux),
COMPOSITE_NOMUX(0, "uart3_src", "uart_src", 0,
RK3368_CLKSEL_CON(39), 0, 7, DFLAGS,
RK3368_CLKGATE_CON(2), 6, GFLAGS),
- COMPOSITE_FRAC(0, "uart3_frac", "uart3_src", CLK_SET_RATE_PARENT,
- RK3368_CLKSEL_CON(40), 0,
- RK3368_CLKGATE_CON(2), 7, GFLAGS),
- MUX(SCLK_UART3, "sclk_uart3", mux_uart3_p, CLK_SET_RATE_PARENT,
- RK3368_CLKSEL_CON(39), 8, 2, MFLAGS),
+ COMPOSITE_FRACMUX(0, "uart3_frac", "uart3_src", CLK_SET_RATE_PARENT,
+ RK3368_CLKSEL_CON(40), 0,
+ RK3368_CLKGATE_CON(2), 7, GFLAGS,
+ &rk3368_uart3_fracmux),
COMPOSITE_NOMUX(0, "uart4_src", "uart_src", 0,
RK3368_CLKSEL_CON(41), 0, 7, DFLAGS,
RK3368_CLKGATE_CON(2), 8, GFLAGS),
- COMPOSITE_FRAC(0, "uart4_frac", "uart4_src", CLK_SET_RATE_PARENT,
- RK3368_CLKSEL_CON(42), 0,
- RK3368_CLKGATE_CON(2), 9, GFLAGS),
- MUX(SCLK_UART4, "sclk_uart4", mux_uart4_p, CLK_SET_RATE_PARENT,
- RK3368_CLKSEL_CON(41), 8, 2, MFLAGS),
+ COMPOSITE_FRACMUX(0, "uart4_frac", "uart4_src", CLK_SET_RATE_PARENT,
+ RK3368_CLKSEL_CON(42), 0,
+ RK3368_CLKGATE_CON(2), 9, GFLAGS,
+ &rk3368_uart4_fracmux),
COMPOSITE(0, "mac_pll_src", mux_pll_src_npll_cpll_gpll_p, 0,
RK3368_CLKSEL_CON(43), 6, 2, MFLAGS, 0, 5, DFLAGS,
@@ -842,24 +867,6 @@ static void __init rk3368_clk_init(struct device_node *np)
rockchip_clk_init(np, reg_base, CLK_NR_CLKS);
- /* xin12m is created by a cru-internal divider */
- clk = clk_register_fixed_factor(NULL, "xin12m", "xin24m", 0, 1, 2);
- if (IS_ERR(clk))
- pr_warn("%s: could not register clock xin12m: %ld\n",
- __func__, PTR_ERR(clk));
-
- /* ddrphy_div4 is created by a cru-internal divider */
- clk = clk_register_fixed_factor(NULL, "ddrphy_div4", "ddrphy_src", 0, 1, 4);
- if (IS_ERR(clk))
- pr_warn("%s: could not register clock xin12m: %ld\n",
- __func__, PTR_ERR(clk));
-
- clk = clk_register_fixed_factor(NULL, "hclk_video_pre",
- "hclk_video_pre_v", 0, 1, 4);
- if (IS_ERR(clk))
- pr_warn("%s: could not register clock hclk_vcodec_pre: %ld\n",
- __func__, PTR_ERR(clk));
-
/* Watchdog pclk is controlled by sgrf_soc_con3[7]. */
clk = clk_register_fixed_factor(NULL, "pclk_wdt", "pclk_pd_alive", 0, 1, 1);
if (IS_ERR(clk))
diff --git a/drivers/clk/rockchip/clk.c b/drivers/clk/rockchip/clk.c
index d9a0b5d4d47f..ec06350c78c4 100644
--- a/drivers/clk/rockchip/clk.c
+++ b/drivers/clk/rockchip/clk.c
@@ -70,7 +70,7 @@ static struct clk *rockchip_clk_register_branch(const char *name,
if (gate_offset >= 0) {
gate = kzalloc(sizeof(*gate), GFP_KERNEL);
if (!gate)
- return ERR_PTR(-ENOMEM);
+ goto err_gate;
gate->flags = gate_flags;
gate->reg = base + gate_offset;
@@ -82,7 +82,7 @@ static struct clk *rockchip_clk_register_branch(const char *name,
if (div_width > 0) {
div = kzalloc(sizeof(*div), GFP_KERNEL);
if (!div)
- return ERR_PTR(-ENOMEM);
+ goto err_div;
div->flags = div_flags;
div->reg = base + muxdiv_offset;
@@ -90,7 +90,9 @@ static struct clk *rockchip_clk_register_branch(const char *name,
div->width = div_width;
div->lock = lock;
div->table = div_table;
- div_ops = &clk_divider_ops;
+ div_ops = (div_flags & CLK_DIVIDER_READ_ONLY)
+ ? &clk_divider_ro_ops
+ : &clk_divider_ops;
}
clk = clk_register_composite(NULL, name, parent_names, num_parents,
@@ -100,6 +102,11 @@ static struct clk *rockchip_clk_register_branch(const char *name,
flags);
return clk;
+err_div:
+ kfree(gate);
+err_gate:
+ kfree(mux);
+ return ERR_PTR(-ENOMEM);
}
struct rockchip_clk_frac {
@@ -260,6 +267,53 @@ static struct clk *rockchip_clk_register_frac_branch(const char *name,
return clk;
}
+static struct clk *rockchip_clk_register_factor_branch(const char *name,
+ const char *const *parent_names, u8 num_parents,
+ void __iomem *base, unsigned int mult, unsigned int div,
+ int gate_offset, u8 gate_shift, u8 gate_flags,
+ unsigned long flags, spinlock_t *lock)
+{
+ struct clk *clk;
+ struct clk_gate *gate = NULL;
+ struct clk_fixed_factor *fix = NULL;
+
+ /* without gate, register a simple factor clock */
+ if (gate_offset == 0) {
+ return clk_register_fixed_factor(NULL, name,
+ parent_names[0], flags, mult,
+ div);
+ }
+
+ gate = kzalloc(sizeof(*gate), GFP_KERNEL);
+ if (!gate)
+ return ERR_PTR(-ENOMEM);
+
+ gate->flags = gate_flags;
+ gate->reg = base + gate_offset;
+ gate->bit_idx = gate_shift;
+ gate->lock = lock;
+
+ fix = kzalloc(sizeof(*fix), GFP_KERNEL);
+ if (!fix) {
+ kfree(gate);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ fix->mult = mult;
+ fix->div = div;
+
+ clk = clk_register_composite(NULL, name, parent_names, num_parents,
+ NULL, NULL,
+ &fix->hw, &clk_fixed_factor_ops,
+ &gate->hw, &clk_gate_ops, flags);
+ if (IS_ERR(clk)) {
+ kfree(fix);
+ kfree(gate);
+ }
+
+ return clk;
+}
+
static DEFINE_SPINLOCK(clk_lock);
static struct clk **clk_table;
static void __iomem *reg_base;
@@ -395,6 +449,14 @@ void __init rockchip_clk_register_branches(
reg_base + list->muxdiv_offset,
list->div_shift, list->div_flags, &clk_lock);
break;
+ case branch_factor:
+ clk = rockchip_clk_register_factor_branch(
+ list->name, list->parent_names,
+ list->num_parents, reg_base,
+ list->div_shift, list->div_width,
+ list->gate_offset, list->gate_shift,
+ list->gate_flags, flags, &clk_lock);
+ break;
}
/* none of the cases above matched */
diff --git a/drivers/clk/rockchip/clk.h b/drivers/clk/rockchip/clk.h
index ff8bd23a93ec..39c198bbcbee 100644
--- a/drivers/clk/rockchip/clk.h
+++ b/drivers/clk/rockchip/clk.h
@@ -254,6 +254,7 @@ enum rockchip_clk_branch_type {
branch_gate,
branch_mmc,
branch_inverter,
+ branch_factor,
};
struct rockchip_clk_branch {
@@ -508,6 +509,33 @@ struct rockchip_clk_branch {
.div_flags = if, \
}
+#define FACTOR(_id, cname, pname, f, fm, fd) \
+ { \
+ .id = _id, \
+ .branch_type = branch_factor, \
+ .name = cname, \
+ .parent_names = (const char *[]){ pname }, \
+ .num_parents = 1, \
+ .flags = f, \
+ .div_shift = fm, \
+ .div_width = fd, \
+ }
+
+#define FACTOR_GATE(_id, cname, pname, f, fm, fd, go, gb, gf) \
+ { \
+ .id = _id, \
+ .branch_type = branch_factor, \
+ .name = cname, \
+ .parent_names = (const char *[]){ pname }, \
+ .num_parents = 1, \
+ .flags = f, \
+ .div_shift = fm, \
+ .div_width = fd, \
+ .gate_offset = go, \
+ .gate_shift = gb, \
+ .gate_flags = gf, \
+ }
+
void rockchip_clk_init(struct device_node *np, void __iomem *base,
unsigned long nr_clks);
struct regmap *rockchip_clk_get_grf(void);
diff --git a/drivers/clk/rockchip/softrst.c b/drivers/clk/rockchip/softrst.c
index 552f7bb15bc5..21218987bbc3 100644
--- a/drivers/clk/rockchip/softrst.c
+++ b/drivers/clk/rockchip/softrst.c
@@ -81,7 +81,7 @@ static int rockchip_softrst_deassert(struct reset_controller_dev *rcdev,
return 0;
}
-static struct reset_control_ops rockchip_softrst_ops = {
+static const struct reset_control_ops rockchip_softrst_ops = {
.assert = rockchip_softrst_assert,
.deassert = rockchip_softrst_deassert,
};
diff --git a/drivers/clk/samsung/Kconfig b/drivers/clk/samsung/Kconfig
index 84196ecdaa12..20c5fe92ab4a 100644
--- a/drivers/clk/samsung/Kconfig
+++ b/drivers/clk/samsung/Kconfig
@@ -1,9 +1,17 @@
+# Recent Exynos platforms should just select COMMON_CLK_SAMSUNG:
config COMMON_CLK_SAMSUNG
- bool
- select COMMON_CLK
+ bool "Samsung Exynos clock controller support" if COMPILE_TEST
+ # Clocks on ARM64 SoCs (e.g. Exynos5433, Exynos7) are chosen by
+ # EXYNOS_ARM64_COMMON_CLK to avoid building them on ARMv7:
+ select EXYNOS_ARM64_COMMON_CLK if ARM64 && ARCH_EXYNOS
+
+config EXYNOS_ARM64_COMMON_CLK
+ bool "Samsung Exynos ARMv8-family clock controller support" if COMPILE_TEST
+ depends on COMMON_CLK_SAMSUNG
+# For S3C24XX platforms, select following symbols:
config S3C2410_COMMON_CLK
- bool
+ bool "Samsung S3C2410 clock controller support" if COMPILE_TEST
select COMMON_CLK_SAMSUNG
help
Build the s3c2410 clock driver based on the common clock framework.
@@ -17,10 +25,9 @@ config S3C2410_COMMON_DCLK
framework.
config S3C2412_COMMON_CLK
- bool
+ bool "Samsung S3C2412 clock controller support" if COMPILE_TEST
select COMMON_CLK_SAMSUNG
config S3C2443_COMMON_CLK
- bool
+ bool "Samsung S3C2443 clock controller support" if COMPILE_TEST
select COMMON_CLK_SAMSUNG
-
diff --git a/drivers/clk/samsung/Makefile b/drivers/clk/samsung/Makefile
index 5f6833ea355d..fc367d4b2902 100644
--- a/drivers/clk/samsung/Makefile
+++ b/drivers/clk/samsung/Makefile
@@ -10,11 +10,11 @@ obj-$(CONFIG_SOC_EXYNOS5250) += clk-exynos5250.o
obj-$(CONFIG_SOC_EXYNOS5260) += clk-exynos5260.o
obj-$(CONFIG_SOC_EXYNOS5410) += clk-exynos5410.o
obj-$(CONFIG_SOC_EXYNOS5420) += clk-exynos5420.o
-obj-$(CONFIG_ARCH_EXYNOS) += clk-exynos5433.o
+obj-$(CONFIG_EXYNOS_ARM64_COMMON_CLK) += clk-exynos5433.o
obj-$(CONFIG_SOC_EXYNOS5440) += clk-exynos5440.o
obj-$(CONFIG_ARCH_EXYNOS) += clk-exynos-audss.o
obj-$(CONFIG_ARCH_EXYNOS) += clk-exynos-clkout.o
-obj-$(CONFIG_ARCH_EXYNOS7) += clk-exynos7.o
+obj-$(CONFIG_EXYNOS_ARM64_COMMON_CLK) += clk-exynos7.o
obj-$(CONFIG_S3C2410_COMMON_CLK)+= clk-s3c2410.o
obj-$(CONFIG_S3C2410_COMMON_DCLK)+= clk-s3c2410-dclk.o
obj-$(CONFIG_S3C2412_COMMON_CLK)+= clk-s3c2412.o
diff --git a/drivers/clk/samsung/clk-exynos4.c b/drivers/clk/samsung/clk-exynos4.c
index ac03e4fe2871..7b3d0f975987 100644
--- a/drivers/clk/samsung/clk-exynos4.c
+++ b/drivers/clk/samsung/clk-exynos4.c
@@ -500,19 +500,19 @@ PNAME(clkout_cpu_p4x12) = { "fout_apll_div_2", "none", "none", "none",
/* fixed rate clocks generated outside the soc */
static struct samsung_fixed_rate_clock exynos4_fixed_rate_ext_clks[] __initdata = {
- FRATE(CLK_XXTI, "xxti", NULL, CLK_IS_ROOT, 0),
- FRATE(CLK_XUSBXTI, "xusbxti", NULL, CLK_IS_ROOT, 0),
+ FRATE(CLK_XXTI, "xxti", NULL, 0, 0),
+ FRATE(CLK_XUSBXTI, "xusbxti", NULL, 0, 0),
};
/* fixed rate clocks generated inside the soc */
static struct samsung_fixed_rate_clock exynos4_fixed_rate_clks[] __initdata = {
- FRATE(0, "sclk_hdmi24m", NULL, CLK_IS_ROOT, 24000000),
+ FRATE(0, "sclk_hdmi24m", NULL, 0, 24000000),
FRATE(CLK_SCLK_HDMIPHY, "sclk_hdmiphy", "hdmi", 0, 27000000),
- FRATE(0, "sclk_usbphy0", NULL, CLK_IS_ROOT, 48000000),
+ FRATE(0, "sclk_usbphy0", NULL, 0, 48000000),
};
static struct samsung_fixed_rate_clock exynos4210_fixed_rate_clks[] __initdata = {
- FRATE(0, "sclk_usbphy1", NULL, CLK_IS_ROOT, 48000000),
+ FRATE(0, "sclk_usbphy1", NULL, 0, 48000000),
};
static struct samsung_fixed_factor_clock exynos4_fixed_factor_clks[] __initdata = {
@@ -1251,7 +1251,7 @@ static void __init exynos4_clk_register_finpll(struct samsung_clk_provider *ctx)
fclk.id = CLK_FIN_PLL;
fclk.name = "fin_pll";
fclk.parent_name = NULL;
- fclk.flags = CLK_IS_ROOT;
+ fclk.flags = 0;
fclk.fixed_rate = finpll_f;
samsung_clk_register_fixed_rate(ctx, &fclk, 1);
diff --git a/drivers/clk/samsung/clk-exynos4415.c b/drivers/clk/samsung/clk-exynos4415.c
index 92c39f6efec8..86ee06b226bd 100644
--- a/drivers/clk/samsung/clk-exynos4415.c
+++ b/drivers/clk/samsung/clk-exynos4415.c
@@ -274,7 +274,7 @@ static struct samsung_fixed_factor_clock exynos4415_fixed_factor_clks[] __initda
};
static struct samsung_fixed_rate_clock exynos4415_fixed_rate_clks[] __initdata = {
- FRATE(CLK_SCLK_HDMIPHY, "sclk_hdmiphy", NULL, CLK_IS_ROOT, 27000000),
+ FRATE(CLK_SCLK_HDMIPHY, "sclk_hdmiphy", NULL, 0, 27000000),
};
static struct samsung_mux_clock exynos4415_mux_clks[] __initdata = {
diff --git a/drivers/clk/samsung/clk-exynos5250.c b/drivers/clk/samsung/clk-exynos5250.c
index 5bebf8cb0d70..837197db4ffb 100644
--- a/drivers/clk/samsung/clk-exynos5250.c
+++ b/drivers/clk/samsung/clk-exynos5250.c
@@ -262,15 +262,15 @@ PNAME(mout_spdif_p) = { "sclk_audio0", "sclk_audio1", "sclk_audio2",
/* fixed rate clocks generated outside the soc */
static struct samsung_fixed_rate_clock exynos5250_fixed_rate_ext_clks[] __initdata = {
- FRATE(CLK_FIN_PLL, "fin_pll", NULL, CLK_IS_ROOT, 0),
+ FRATE(CLK_FIN_PLL, "fin_pll", NULL, 0, 0),
};
/* fixed rate clocks generated inside the soc */
static struct samsung_fixed_rate_clock exynos5250_fixed_rate_clks[] __initdata = {
- FRATE(CLK_SCLK_HDMIPHY, "sclk_hdmiphy", NULL, CLK_IS_ROOT, 24000000),
- FRATE(0, "sclk_hdmi27m", NULL, CLK_IS_ROOT, 27000000),
- FRATE(0, "sclk_dptxphy", NULL, CLK_IS_ROOT, 24000000),
- FRATE(0, "sclk_uhostphy", NULL, CLK_IS_ROOT, 48000000),
+ FRATE(CLK_SCLK_HDMIPHY, "sclk_hdmiphy", NULL, 0, 24000000),
+ FRATE(0, "sclk_hdmi27m", NULL, 0, 27000000),
+ FRATE(0, "sclk_dptxphy", NULL, 0, 24000000),
+ FRATE(0, "sclk_uhostphy", NULL, 0, 48000000),
};
static struct samsung_fixed_factor_clock exynos5250_fixed_factor_clks[] __initdata = {
diff --git a/drivers/clk/samsung/clk-exynos5260.c b/drivers/clk/samsung/clk-exynos5260.c
index d1a29f6c1084..7a7ed075a573 100644
--- a/drivers/clk/samsung/clk-exynos5260.c
+++ b/drivers/clk/samsung/clk-exynos5260.c
@@ -1432,42 +1432,38 @@ static unsigned long top_clk_regs[] __initdata = {
/* fixed rate clocks generated inside the soc */
static struct samsung_fixed_rate_clock fixed_rate_clks[] __initdata = {
FRATE(PHYCLK_DPTX_PHY_CH3_TXD_CLK, "phyclk_dptx_phy_ch3_txd_clk", NULL,
- CLK_IS_ROOT, 270000000),
+ 0, 270000000),
FRATE(PHYCLK_DPTX_PHY_CH2_TXD_CLK, "phyclk_dptx_phy_ch2_txd_clk", NULL,
- CLK_IS_ROOT, 270000000),
+ 0, 270000000),
FRATE(PHYCLK_DPTX_PHY_CH1_TXD_CLK, "phyclk_dptx_phy_ch1_txd_clk", NULL,
- CLK_IS_ROOT, 270000000),
+ 0, 270000000),
FRATE(PHYCLK_DPTX_PHY_CH0_TXD_CLK, "phyclk_dptx_phy_ch0_txd_clk", NULL,
- CLK_IS_ROOT, 270000000),
+ 0, 270000000),
FRATE(phyclk_hdmi_phy_tmds_clko, "phyclk_hdmi_phy_tmds_clko", NULL,
- CLK_IS_ROOT, 250000000),
+ 0, 250000000),
FRATE(PHYCLK_HDMI_PHY_PIXEL_CLKO, "phyclk_hdmi_phy_pixel_clko", NULL,
- CLK_IS_ROOT, 1660000000),
+ 0, 1660000000),
FRATE(PHYCLK_HDMI_LINK_O_TMDS_CLKHI, "phyclk_hdmi_link_o_tmds_clkhi",
- NULL, CLK_IS_ROOT, 125000000),
+ NULL, 0, 125000000),
FRATE(PHYCLK_MIPI_DPHY_4L_M_TXBYTECLKHS,
"phyclk_mipi_dphy_4l_m_txbyte_clkhs" , NULL,
- CLK_IS_ROOT, 187500000),
+ 0, 187500000),
FRATE(PHYCLK_DPTX_PHY_O_REF_CLK_24M, "phyclk_dptx_phy_o_ref_clk_24m",
- NULL, CLK_IS_ROOT, 24000000),
+ NULL, 0, 24000000),
FRATE(PHYCLK_DPTX_PHY_CLK_DIV2, "phyclk_dptx_phy_clk_div2", NULL,
- CLK_IS_ROOT, 135000000),
+ 0, 135000000),
FRATE(PHYCLK_MIPI_DPHY_4L_M_RXCLKESC0,
- "phyclk_mipi_dphy_4l_m_rxclkesc0", NULL,
- CLK_IS_ROOT, 20000000),
+ "phyclk_mipi_dphy_4l_m_rxclkesc0", NULL, 0, 20000000),
FRATE(PHYCLK_USBHOST20_PHY_PHYCLOCK, "phyclk_usbhost20_phy_phyclock",
- NULL, CLK_IS_ROOT, 60000000),
+ NULL, 0, 60000000),
FRATE(PHYCLK_USBHOST20_PHY_FREECLK, "phyclk_usbhost20_phy_freeclk",
- NULL, CLK_IS_ROOT, 60000000),
+ NULL, 0, 60000000),
FRATE(PHYCLK_USBHOST20_PHY_CLK48MOHCI,
- "phyclk_usbhost20_phy_clk48mohci",
- NULL, CLK_IS_ROOT, 48000000),
+ "phyclk_usbhost20_phy_clk48mohci", NULL, 0, 48000000),
FRATE(PHYCLK_USBDRD30_UDRD30_PIPE_PCLK,
- "phyclk_usbdrd30_udrd30_pipe_pclk", NULL,
- CLK_IS_ROOT, 125000000),
+ "phyclk_usbdrd30_udrd30_pipe_pclk", NULL, 0, 125000000),
FRATE(PHYCLK_USBDRD30_UDRD30_PHYCLOCK,
- "phyclk_usbdrd30_udrd30_phyclock", NULL,
- CLK_IS_ROOT, 60000000),
+ "phyclk_usbdrd30_udrd30_phyclock", NULL, 0, 60000000),
};
PNAME(mout_memtop_pll_user_p) = {"fin_pll", "dout_mem_pll"};
diff --git a/drivers/clk/samsung/clk-exynos5420.c b/drivers/clk/samsung/clk-exynos5420.c
index d048dedd8b72..be03ed0fcb6b 100644
--- a/drivers/clk/samsung/clk-exynos5420.c
+++ b/drivers/clk/samsung/clk-exynos5420.c
@@ -480,16 +480,16 @@ PNAME(mout_group15_5800_p) = { "dout_osc_div", "mout_sw_aclk550_cam" };
/* fixed rate clocks generated outside the soc */
static struct samsung_fixed_rate_clock
exynos5x_fixed_rate_ext_clks[] __initdata = {
- FRATE(CLK_FIN_PLL, "fin_pll", NULL, CLK_IS_ROOT, 0),
+ FRATE(CLK_FIN_PLL, "fin_pll", NULL, 0, 0),
};
/* fixed rate clocks generated inside the soc */
static struct samsung_fixed_rate_clock exynos5x_fixed_rate_clks[] __initdata = {
- FRATE(CLK_SCLK_HDMIPHY, "sclk_hdmiphy", NULL, CLK_IS_ROOT, 24000000),
- FRATE(0, "sclk_pwi", NULL, CLK_IS_ROOT, 24000000),
- FRATE(0, "sclk_usbh20", NULL, CLK_IS_ROOT, 48000000),
- FRATE(0, "mphy_refclk_ixtal24", NULL, CLK_IS_ROOT, 48000000),
- FRATE(0, "sclk_usbh20_scan_clk", NULL, CLK_IS_ROOT, 480000000),
+ FRATE(CLK_SCLK_HDMIPHY, "sclk_hdmiphy", NULL, 0, 24000000),
+ FRATE(0, "sclk_pwi", NULL, 0, 24000000),
+ FRATE(0, "sclk_usbh20", NULL, 0, 48000000),
+ FRATE(0, "mphy_refclk_ixtal24", NULL, 0, 48000000),
+ FRATE(0, "sclk_usbh20_scan_clk", NULL, 0, 480000000),
};
static struct samsung_fixed_factor_clock
diff --git a/drivers/clk/samsung/clk-exynos5433.c b/drivers/clk/samsung/clk-exynos5433.c
index cee062c588de..128527b8fbeb 100644
--- a/drivers/clk/samsung/clk-exynos5433.c
+++ b/drivers/clk/samsung/clk-exynos5433.c
@@ -142,17 +142,6 @@ static unsigned long top_clk_regs[] __initdata = {
MUX_ENABLE_TOP_FSYS1,
MUX_ENABLE_TOP_PERIC0,
MUX_ENABLE_TOP_PERIC1,
- MUX_STAT_TOP0,
- MUX_STAT_TOP1,
- MUX_STAT_TOP2,
- MUX_STAT_TOP3,
- MUX_STAT_TOP4,
- MUX_STAT_TOP_MSCL,
- MUX_STAT_TOP_CAM1,
- MUX_STAT_TOP_FSYS0,
- MUX_STAT_TOP_FSYS1,
- MUX_STAT_TOP_PERIC0,
- MUX_STAT_TOP_PERIC1,
DIV_TOP0,
DIV_TOP1,
DIV_TOP2,
@@ -170,22 +159,6 @@ static unsigned long top_clk_regs[] __initdata = {
DIV_TOP_PERIC3,
DIV_TOP_PERIC4,
DIV_TOP_PLL_FREQ_DET,
- DIV_STAT_TOP0,
- DIV_STAT_TOP1,
- DIV_STAT_TOP2,
- DIV_STAT_TOP3,
- DIV_STAT_TOP4,
- DIV_STAT_TOP_MSCL,
- DIV_STAT_TOP_CAM10,
- DIV_STAT_TOP_CAM11,
- DIV_STAT_TOP_FSYS0,
- DIV_STAT_TOP_FSYS1,
- DIV_STAT_TOP_FSYS2,
- DIV_STAT_TOP_PERIC0,
- DIV_STAT_TOP_PERIC1,
- DIV_STAT_TOP_PERIC2,
- DIV_STAT_TOP_PERIC3,
- DIV_STAT_TOP_PLL_FREQ_DET,
ENABLE_ACLK_TOP,
ENABLE_SCLK_TOP,
ENABLE_SCLK_TOP_MSCL,
@@ -251,18 +224,18 @@ static struct samsung_fixed_factor_clock top_fixed_factor_clks[] __initdata = {
static struct samsung_fixed_rate_clock top_fixed_clks[] __initdata = {
/* Xi2s{0|1}CDCLK input clock for I2S/PCM */
- FRATE(0, "ioclk_audiocdclk1", NULL, CLK_IS_ROOT, 100000000),
- FRATE(0, "ioclk_audiocdclk0", NULL, CLK_IS_ROOT, 100000000),
+ FRATE(0, "ioclk_audiocdclk1", NULL, 0, 100000000),
+ FRATE(0, "ioclk_audiocdclk0", NULL, 0, 100000000),
/* Xi2s1SDI input clock for SPDIF */
- FRATE(0, "ioclk_spdif_extclk", NULL, CLK_IS_ROOT, 100000000),
+ FRATE(0, "ioclk_spdif_extclk", NULL, 0, 100000000),
/* XspiCLK[4:0] input clock for SPI */
- FRATE(0, "ioclk_spi4_clk_in", NULL, CLK_IS_ROOT, 50000000),
- FRATE(0, "ioclk_spi3_clk_in", NULL, CLK_IS_ROOT, 50000000),
- FRATE(0, "ioclk_spi2_clk_in", NULL, CLK_IS_ROOT, 50000000),
- FRATE(0, "ioclk_spi1_clk_in", NULL, CLK_IS_ROOT, 50000000),
- FRATE(0, "ioclk_spi0_clk_in", NULL, CLK_IS_ROOT, 50000000),
+ FRATE(0, "ioclk_spi4_clk_in", NULL, 0, 50000000),
+ FRATE(0, "ioclk_spi3_clk_in", NULL, 0, 50000000),
+ FRATE(0, "ioclk_spi2_clk_in", NULL, 0, 50000000),
+ FRATE(0, "ioclk_spi1_clk_in", NULL, 0, 50000000),
+ FRATE(0, "ioclk_spi0_clk_in", NULL, 0, 50000000),
/* Xi2s1SCLK input clock for I2S1_BCLK */
- FRATE(0, "ioclk_i2s1_bclk_in", NULL, CLK_IS_ROOT, 12288000),
+ FRATE(0, "ioclk_i2s1_bclk_in", NULL, 0, 12288000),
};
static struct samsung_mux_clock top_mux_clks[] __initdata = {
@@ -490,9 +463,9 @@ static struct samsung_div_clock top_div_clks[] __initdata = {
DIV(CLK_DIV_SCLK_ISP_SENSOR1_A, "div_sclk_isp_sensor1_a",
"mout_sclk_isp_sensor1", DIV_TOP_CAM11, 8, 4),
DIV(CLK_DIV_SCLK_ISP_SENSOR0_B, "div_sclk_isp_sensor0_b",
- "div_sclk_isp_sensor0_a", DIV_TOP_CAM11, 12, 4),
+ "div_sclk_isp_sensor0_a", DIV_TOP_CAM11, 4, 4),
DIV(CLK_DIV_SCLK_ISP_SENSOR0_A, "div_sclk_isp_sensor0_a",
- "mout_sclk_isp_sensor0", DIV_TOP_CAM11, 8, 4),
+ "mout_sclk_isp_sensor0", DIV_TOP_CAM11, 0, 4),
/* DIV_TOP_FSYS0 */
DIV(CLK_DIV_SCLK_MMC1_B, "div_sclk_mmc1_b", "div_sclk_mmc1_a",
@@ -999,26 +972,12 @@ static unsigned long mif_clk_regs[] __initdata = {
MUX_ENABLE_MIF5,
MUX_ENABLE_MIF6,
MUX_ENABLE_MIF7,
- MUX_STAT_MIF0,
- MUX_STAT_MIF1,
- MUX_STAT_MIF2,
- MUX_STAT_MIF3,
- MUX_STAT_MIF4,
- MUX_STAT_MIF5,
- MUX_STAT_MIF6,
- MUX_STAT_MIF7,
DIV_MIF1,
DIV_MIF2,
DIV_MIF3,
DIV_MIF4,
DIV_MIF5,
DIV_MIF_PLL_FREQ_DET,
- DIV_STAT_MIF1,
- DIV_STAT_MIF2,
- DIV_STAT_MIF3,
- DIV_STAT_MIF4,
- DIV_STAT_MIF5,
- DIV_STAT_MIF_PLL_FREQ_DET,
ENABLE_ACLK_MIF0,
ENABLE_ACLK_MIF1,
ENABLE_ACLK_MIF2,
@@ -1565,7 +1524,6 @@ CLK_OF_DECLARE(exynos5433_cmu_mif, "samsung,exynos5433-cmu-mif",
static unsigned long peric_clk_regs[] __initdata = {
DIV_PERIC,
- DIV_STAT_PERIC,
ENABLE_ACLK_PERIC,
ENABLE_PCLK_PERIC0,
ENABLE_PCLK_PERIC1,
@@ -2012,11 +1970,6 @@ static unsigned long fsys_clk_regs[] __initdata = {
MUX_ENABLE_FSYS2,
MUX_ENABLE_FSYS3,
MUX_ENABLE_FSYS4,
- MUX_STAT_FSYS0,
- MUX_STAT_FSYS1,
- MUX_STAT_FSYS2,
- MUX_STAT_FSYS3,
- MUX_STAT_FSYS4,
MUX_IGNORE_FSYS2,
MUX_IGNORE_FSYS3,
ENABLE_ACLK_FSYS0,
@@ -2031,42 +1984,40 @@ static struct samsung_fixed_rate_clock fsys_fixed_clks[] __initdata = {
/* PHY clocks from USBDRD30_PHY */
FRATE(CLK_PHYCLK_USBDRD30_UDRD30_PHYCLOCK_PHY,
"phyclk_usbdrd30_udrd30_phyclock_phy", NULL,
- CLK_IS_ROOT, 60000000),
+ 0, 60000000),
FRATE(CLK_PHYCLK_USBDRD30_UDRD30_PIPE_PCLK_PHY,
"phyclk_usbdrd30_udrd30_pipe_pclk_phy", NULL,
- CLK_IS_ROOT, 125000000),
+ 0, 125000000),
/* PHY clocks from USBHOST30_PHY */
FRATE(CLK_PHYCLK_USBHOST30_UHOST30_PHYCLOCK_PHY,
"phyclk_usbhost30_uhost30_phyclock_phy", NULL,
- CLK_IS_ROOT, 60000000),
+ 0, 60000000),
FRATE(CLK_PHYCLK_USBHOST30_UHOST30_PIPE_PCLK_PHY,
"phyclk_usbhost30_uhost30_pipe_pclk_phy", NULL,
- CLK_IS_ROOT, 125000000),
+ 0, 125000000),
/* PHY clocks from USBHOST20_PHY */
FRATE(CLK_PHYCLK_USBHOST20_PHY_FREECLK_PHY,
- "phyclk_usbhost20_phy_freeclk_phy", NULL, CLK_IS_ROOT,
- 60000000),
+ "phyclk_usbhost20_phy_freeclk_phy", NULL, 0, 60000000),
FRATE(CLK_PHYCLK_USBHOST20_PHY_PHYCLOCK_PHY,
- "phyclk_usbhost20_phy_phyclock_phy", NULL, CLK_IS_ROOT,
- 60000000),
+ "phyclk_usbhost20_phy_phyclock_phy", NULL, 0, 60000000),
FRATE(CLK_PHYCLK_USBHOST20_PHY_CLK48MOHCI_PHY,
"phyclk_usbhost20_phy_clk48mohci_phy", NULL,
- CLK_IS_ROOT, 48000000),
+ 0, 48000000),
FRATE(CLK_PHYCLK_USBHOST20_PHY_HSIC1_PHY,
- "phyclk_usbhost20_phy_hsic1_phy", NULL, CLK_IS_ROOT,
+ "phyclk_usbhost20_phy_hsic1_phy", NULL, 0,
60000000),
/* PHY clocks from UFS_PHY */
FRATE(CLK_PHYCLK_UFS_TX0_SYMBOL_PHY, "phyclk_ufs_tx0_symbol_phy",
- NULL, CLK_IS_ROOT, 300000000),
+ NULL, 0, 300000000),
FRATE(CLK_PHYCLK_UFS_RX0_SYMBOL_PHY, "phyclk_ufs_rx0_symbol_phy",
- NULL, CLK_IS_ROOT, 300000000),
+ NULL, 0, 300000000),
FRATE(CLK_PHYCLK_UFS_TX1_SYMBOL_PHY, "phyclk_ufs_tx1_symbol_phy",
- NULL, CLK_IS_ROOT, 300000000),
+ NULL, 0, 300000000),
FRATE(CLK_PHYCLK_UFS_RX1_SYMBOL_PHY, "phyclk_ufs_rx1_symbol_phy",
- NULL, CLK_IS_ROOT, 300000000),
+ NULL, 0, 300000000),
/* PHY clocks from LLI_PHY */
FRATE(CLK_PHYCLK_LLI_MPHY_TO_UFS_PHY, "phyclk_lli_mphy_to_ufs_phy",
- NULL, CLK_IS_ROOT, 26000000),
+ NULL, 0, 26000000),
};
static struct samsung_mux_clock fsys_mux_clks[] __initdata = {
@@ -2362,9 +2313,7 @@ CLK_OF_DECLARE(exynos5433_cmu_fsys, "samsung,exynos5433-cmu-fsys",
static unsigned long g2d_clk_regs[] __initdata = {
MUX_SEL_G2D0,
MUX_SEL_ENABLE_G2D0,
- MUX_SEL_STAT_G2D0,
DIV_G2D,
- DIV_STAT_G2D,
DIV_ENABLE_ACLK_G2D,
DIV_ENABLE_ACLK_G2D_SECURE_SMMU_G2D,
DIV_ENABLE_PCLK_G2D,
@@ -2520,16 +2469,9 @@ static unsigned long disp_clk_regs[] __initdata = {
MUX_ENABLE_DISP2,
MUX_ENABLE_DISP3,
MUX_ENABLE_DISP4,
- MUX_STAT_DISP0,
- MUX_STAT_DISP1,
- MUX_STAT_DISP2,
- MUX_STAT_DISP3,
- MUX_STAT_DISP4,
MUX_IGNORE_DISP2,
DIV_DISP,
DIV_DISP_PLL_FREQ_DET,
- DIV_STAT_DISP,
- DIV_STAT_DISP_PLL_FREQ_DET,
ENABLE_ACLK_DISP0,
ENABLE_ACLK_DISP1,
ENABLE_PCLK_DISP,
@@ -2604,18 +2546,16 @@ static struct samsung_fixed_factor_clock disp_fixed_factor_clks[] __initdata = {
static struct samsung_fixed_rate_clock disp_fixed_clks[] __initdata = {
/* PHY clocks from MIPI_DPHY1 */
- FRATE(0, "phyclk_mipidphy1_bitclkdiv8_phy", NULL, CLK_IS_ROOT,
- 188000000),
- FRATE(0, "phyclk_mipidphy1_rxclkesc0_phy", NULL, CLK_IS_ROOT,
- 100000000),
+ FRATE(0, "phyclk_mipidphy1_bitclkdiv8_phy", NULL, 0, 188000000),
+ FRATE(0, "phyclk_mipidphy1_rxclkesc0_phy", NULL, 0, 100000000),
/* PHY clocks from MIPI_DPHY0 */
- FRATE(0, "phyclk_mipidphy0_bitclkdiv8_phy", NULL, CLK_IS_ROOT,
- 188000000),
- FRATE(0, "phyclk_mipidphy0_rxclkesc0_phy", NULL, CLK_IS_ROOT,
- 100000000),
+ FRATE(0, "phyclk_mipidphy0_bitclkdiv8_phy", NULL, 0, 188000000),
+ FRATE(0, "phyclk_mipidphy0_rxclkesc0_phy", NULL, 0, 100000000),
/* PHY clocks from HDMI_PHY */
- FRATE(0, "phyclk_hdmiphy_tmds_clko_phy", NULL, CLK_IS_ROOT, 300000000),
- FRATE(0, "phyclk_hdmiphy_pixel_clko_phy", NULL, CLK_IS_ROOT, 166000000),
+ FRATE(CLK_PHYCLK_HDMIPHY_TMDS_CLKO_PHY, "phyclk_hdmiphy_tmds_clko_phy",
+ NULL, 0, 300000000),
+ FRATE(CLK_PHYCLK_HDMIPHY_PIXEL_CLKO_PHY, "phyclk_hdmiphy_pixel_clko_phy",
+ NULL, 0, 166000000),
};
static struct samsung_mux_clock disp_mux_clks[] __initdata = {
@@ -2820,6 +2760,8 @@ static struct samsung_gate_clock disp_gate_clks[] __initdata = {
ENABLE_PCLK_DISP, 2, 0, 0),
GATE(CLK_PCLK_DECON_TV, "pclk_decon_tv", "div_pclk_disp",
ENABLE_PCLK_DISP, 1, 0, 0),
+ GATE(CLK_PCLK_DECON, "pclk_decon", "div_pclk_disp",
+ ENABLE_PCLK_DISP, 0, 0, 0),
/* ENABLE_SCLK_DISP */
GATE(CLK_PHYCLK_MIPIDPHY1_BITCLKDIV8, "phyclk_mipidphy1_bitclkdiv8",
@@ -2919,11 +2861,8 @@ static unsigned long aud_clk_regs[] __initdata = {
MUX_SEL_AUD1,
MUX_ENABLE_AUD0,
MUX_ENABLE_AUD1,
- MUX_STAT_AUD0,
DIV_AUD0,
DIV_AUD1,
- DIV_STAT_AUD0,
- DIV_STAT_AUD1,
ENABLE_ACLK_AUD,
ENABLE_PCLK_AUD,
ENABLE_SCLK_AUD0,
@@ -2937,9 +2876,9 @@ PNAME(mout_aud_pll_user_aud_p) = { "oscclk", "fout_aud_pll", };
PNAME(mout_sclk_aud_pcm_p) = { "mout_aud_pll_user", "ioclk_audiocdclk0",};
static struct samsung_fixed_rate_clock aud_fixed_clks[] __initdata = {
- FRATE(0, "ioclk_jtag_tclk", NULL, CLK_IS_ROOT, 33000000),
- FRATE(0, "ioclk_slimbus_clk", NULL, CLK_IS_ROOT, 25000000),
- FRATE(0, "ioclk_i2s_bclk", NULL, CLK_IS_ROOT, 50000000),
+ FRATE(0, "ioclk_jtag_tclk", NULL, 0, 33000000),
+ FRATE(0, "ioclk_slimbus_clk", NULL, 0, 25000000),
+ FRATE(0, "ioclk_i2s_bclk", NULL, 0, 50000000),
};
static struct samsung_mux_clock aud_mux_clks[] __initdata = {
@@ -3087,7 +3026,6 @@ PNAME(mout_aclk_bus2_400_p) = { "oscclk", "aclk_bus2_400", };
#define CMU_BUS_COMMON_CLK_REGS \
DIV_BUS, \
- DIV_STAT_BUS, \
ENABLE_ACLK_BUS, \
ENABLE_PCLK_BUS, \
ENABLE_IP_BUS0, \
@@ -3100,7 +3038,6 @@ static unsigned long bus01_clk_regs[] __initdata = {
static unsigned long bus2_clk_regs[] __initdata = {
MUX_SEL_BUS2,
MUX_ENABLE_BUS2,
- MUX_STAT_BUS2,
CMU_BUS_COMMON_CLK_REGS,
};
@@ -3259,11 +3196,8 @@ static unsigned long g3d_clk_regs[] __initdata = {
G3D_PLL_FREQ_DET,
MUX_SEL_G3D,
MUX_ENABLE_G3D,
- MUX_STAT_G3D,
DIV_G3D,
DIV_G3D_PLL_FREQ_DET,
- DIV_STAT_G3D,
- DIV_STAT_G3D_PLL_FREQ_DET,
ENABLE_ACLK_G3D,
ENABLE_PCLK_G3D,
ENABLE_SCLK_G3D,
@@ -3379,7 +3313,6 @@ CLK_OF_DECLARE(exynos5433_cmu_g3d, "samsung,exynos5433-cmu-g3d",
static unsigned long gscl_clk_regs[] __initdata = {
MUX_SEL_GSCL,
MUX_ENABLE_GSCL,
- MUX_STAT_GSCL,
ENABLE_ACLK_GSCL,
ENABLE_ACLK_GSCL_SECURE_SMMU_GSCL0,
ENABLE_ACLK_GSCL_SECURE_SMMU_GSCL1,
@@ -3472,11 +3405,11 @@ static struct samsung_gate_clock gscl_gate_clks[] __initdata = {
/* ENABLE_PCLK_GSCL_SECURE_SMMU_GSCL1 */
GATE(CLK_PCLK_SMMU_GSCL1, "pclk_smmu_gscl1", "mout_aclk_gscl_111_user",
- ENABLE_PCLK_GSCL_SECURE_SMMU_GSCL0, 0, 0, 0),
+ ENABLE_PCLK_GSCL_SECURE_SMMU_GSCL1, 0, 0, 0),
/* ENABLE_PCLK_GSCL_SECURE_SMMU_GSCL2 */
GATE(CLK_PCLK_SMMU_GSCL2, "pclk_smmu_gscl2", "mout_aclk_gscl_111_user",
- ENABLE_PCLK_GSCL_SECURE_SMMU_GSCL0, 0, 0, 0),
+ ENABLE_PCLK_GSCL_SECURE_SMMU_GSCL2, 0, 0, 0),
};
static struct samsung_cmu_info gscl_cmu_info __initdata = {
@@ -3543,15 +3476,9 @@ static unsigned long apollo_clk_regs[] __initdata = {
MUX_ENABLE_APOLLO0,
MUX_ENABLE_APOLLO1,
MUX_ENABLE_APOLLO2,
- MUX_STAT_APOLLO0,
- MUX_STAT_APOLLO1,
- MUX_STAT_APOLLO2,
DIV_APOLLO0,
DIV_APOLLO1,
DIV_APOLLO_PLL_FREQ_DET,
- DIV_STAT_APOLLO0,
- DIV_STAT_APOLLO1,
- DIV_STAT_APOLLO_PLL_FREQ_DET,
ENABLE_ACLK_APOLLO,
ENABLE_PCLK_APOLLO,
ENABLE_SCLK_APOLLO,
@@ -3735,15 +3662,9 @@ static unsigned long atlas_clk_regs[] __initdata = {
MUX_ENABLE_ATLAS0,
MUX_ENABLE_ATLAS1,
MUX_ENABLE_ATLAS2,
- MUX_STAT_ATLAS0,
- MUX_STAT_ATLAS1,
- MUX_STAT_ATLAS2,
DIV_ATLAS0,
DIV_ATLAS1,
DIV_ATLAS_PLL_FREQ_DET,
- DIV_STAT_ATLAS0,
- DIV_STAT_ATLAS1,
- DIV_STAT_ATLAS_PLL_FREQ_DET,
ENABLE_ACLK_ATLAS,
ENABLE_PCLK_ATLAS,
ENABLE_SCLK_ATLAS,
@@ -3937,10 +3858,7 @@ static unsigned long mscl_clk_regs[] __initdata = {
MUX_SEL_MSCL1,
MUX_ENABLE_MSCL0,
MUX_ENABLE_MSCL1,
- MUX_STAT_MSCL0,
- MUX_STAT_MSCL1,
DIV_MSCL,
- DIV_STAT_MSCL,
ENABLE_ACLK_MSCL,
ENABLE_ACLK_MSCL_SECURE_SMMU_M2MSCALER0,
ENABLE_ACLK_MSCL_SECURE_SMMU_M2MSCALER1,
@@ -4097,9 +4015,7 @@ CLK_OF_DECLARE(exynos5433_cmu_mscl, "samsung,exynos5433-cmu-mscl",
static unsigned long mfc_clk_regs[] __initdata = {
MUX_SEL_MFC,
MUX_ENABLE_MFC,
- MUX_STAT_MFC,
DIV_MFC,
- DIV_STAT_MFC,
ENABLE_ACLK_MFC,
ENABLE_ACLK_MFC_SECURE_SMMU_MFC,
ENABLE_PCLK_MFC,
@@ -4207,9 +4123,7 @@ CLK_OF_DECLARE(exynos5433_cmu_mfc, "samsung,exynos5433-cmu-mfc",
static unsigned long hevc_clk_regs[] __initdata = {
MUX_SEL_HEVC,
MUX_ENABLE_HEVC,
- MUX_STAT_HEVC,
DIV_HEVC,
- DIV_STAT_HEVC,
ENABLE_ACLK_HEVC,
ENABLE_ACLK_HEVC_SECURE_SMMU_HEVC,
ENABLE_PCLK_HEVC,
@@ -4321,9 +4235,7 @@ CLK_OF_DECLARE(exynos5433_cmu_hevc, "samsung,exynos5433-cmu-hevc",
static unsigned long isp_clk_regs[] __initdata = {
MUX_SEL_ISP,
MUX_ENABLE_ISP,
- MUX_STAT_ISP,
DIV_ISP,
- DIV_STAT_ISP,
ENABLE_ACLK_ISP0,
ENABLE_ACLK_ISP1,
ENABLE_ACLK_ISP2,
@@ -4603,20 +4515,11 @@ static unsigned long cam0_clk_regs[] __initdata = {
MUX_ENABLE_CAM02,
MUX_ENABLE_CAM03,
MUX_ENABLE_CAM04,
- MUX_STAT_CAM00,
- MUX_STAT_CAM01,
- MUX_STAT_CAM02,
- MUX_STAT_CAM03,
- MUX_STAT_CAM04,
MUX_IGNORE_CAM01,
DIV_CAM00,
DIV_CAM01,
DIV_CAM02,
DIV_CAM03,
- DIV_STAT_CAM00,
- DIV_STAT_CAM01,
- DIV_STAT_CAM02,
- DIV_STAT_CAM03,
ENABLE_ACLK_CAM00,
ENABLE_ACLK_CAM01,
ENABLE_ACLK_CAM02,
@@ -4687,9 +4590,9 @@ PNAME(mout_sclk_pixelasync_lite_c_init_a_p) = {
static struct samsung_fixed_rate_clock cam0_fixed_clks[] __initdata = {
FRATE(CLK_PHYCLK_RXBYTEECLKHS0_S4_PHY, "phyclk_rxbyteclkhs0_s4_phy",
- NULL, CLK_IS_ROOT, 100000000),
+ NULL, 0, 100000000),
FRATE(CLK_PHYCLK_RXBYTEECLKHS0_S2A_PHY, "phyclk_rxbyteclkhs0_s2a_phy",
- NULL, CLK_IS_ROOT, 100000000),
+ NULL, 0, 100000000),
};
static struct samsung_mux_clock cam0_mux_clks[] __initdata = {
@@ -4749,21 +4652,21 @@ static struct samsung_mux_clock cam0_mux_clks[] __initdata = {
MUX(CLK_MOUT_SCLK_LITE_FREECNT_C, "mout_sclk_lite_freecnt_c",
mout_sclk_lite_freecnt_c_p, MUX_SEL_CAM04, 24, 1),
MUX(CLK_MOUT_SCLK_LITE_FREECNT_B, "mout_sclk_lite_freecnt_b",
- mout_sclk_lite_freecnt_b_p, MUX_SEL_CAM04, 24, 1),
+ mout_sclk_lite_freecnt_b_p, MUX_SEL_CAM04, 20, 1),
MUX(CLK_MOUT_SCLK_LITE_FREECNT_A, "mout_sclk_lite_freecnt_a",
- mout_sclk_lite_freecnt_a_p, MUX_SEL_CAM04, 24, 1),
+ mout_sclk_lite_freecnt_a_p, MUX_SEL_CAM04, 16, 1),
MUX(CLK_MOUT_SCLK_PIXELASYNC_LITE_C_B, "mout_sclk_pixelasync_lite_c_b",
- mout_sclk_pixelasync_lite_c_b_p, MUX_SEL_CAM04, 24, 1),
+ mout_sclk_pixelasync_lite_c_b_p, MUX_SEL_CAM04, 12, 1),
MUX(CLK_MOUT_SCLK_PIXELASYNC_LITE_C_A, "mout_sclk_pixelasync_lite_c_a",
- mout_sclk_pixelasync_lite_c_a_p, MUX_SEL_CAM04, 24, 1),
+ mout_sclk_pixelasync_lite_c_a_p, MUX_SEL_CAM04, 8, 1),
MUX(CLK_MOUT_SCLK_PIXELASYNC_LITE_C_INIT_B,
"mout_sclk_pixelasync_lite_c_init_b",
mout_sclk_pixelasync_lite_c_init_b_p,
- MUX_SEL_CAM04, 24, 1),
+ MUX_SEL_CAM04, 4, 1),
MUX(CLK_MOUT_SCLK_PIXELASYNC_LITE_C_INIT_A,
"mout_sclk_pixelasync_lite_c_init_a",
mout_sclk_pixelasync_lite_c_init_a_p,
- MUX_SEL_CAM04, 24, 1),
+ MUX_SEL_CAM04, 0, 1),
};
static struct samsung_div_clock cam0_div_clks[] __initdata = {
@@ -5074,14 +4977,9 @@ static unsigned long cam1_clk_regs[] __initdata = {
MUX_ENABLE_CAM10,
MUX_ENABLE_CAM11,
MUX_ENABLE_CAM12,
- MUX_STAT_CAM10,
- MUX_STAT_CAM11,
- MUX_STAT_CAM12,
MUX_IGNORE_CAM11,
DIV_CAM10,
DIV_CAM11,
- DIV_STAT_CAM10,
- DIV_STAT_CAM11,
ENABLE_ACLK_CAM10,
ENABLE_ACLK_CAM11,
ENABLE_ACLK_CAM12,
@@ -5120,7 +5018,7 @@ PNAME(mout_aclk_lite_c_a_p) = { "mout_aclk_cam1_552_user",
static struct samsung_fixed_rate_clock cam1_fixed_clks[] __initdata = {
FRATE(CLK_PHYCLK_RXBYTEECLKHS0_S2B, "phyclk_rxbyteclkhs0_s2b_phy", NULL,
- CLK_IS_ROOT, 100000000),
+ 0, 100000000),
};
static struct samsung_mux_clock cam1_mux_clks[] __initdata = {
@@ -5134,9 +5032,9 @@ static struct samsung_mux_clock cam1_mux_clks[] __initdata = {
MUX(CLK_MOUT_ACLK_CAM1_333_USER, "mout_aclk_cam1_333_user",
mout_aclk_cam1_333_user_p, MUX_SEL_CAM10, 8, 1),
MUX(CLK_MOUT_ACLK_CAM1_400_USER, "mout_aclk_cam1_400_user",
- mout_aclk_cam1_400_user_p, MUX_SEL_CAM01, 4, 1),
+ mout_aclk_cam1_400_user_p, MUX_SEL_CAM10, 4, 1),
MUX(CLK_MOUT_ACLK_CAM1_552_USER, "mout_aclk_cam1_552_user",
- mout_aclk_cam1_552_user_p, MUX_SEL_CAM01, 0, 1),
+ mout_aclk_cam1_552_user_p, MUX_SEL_CAM10, 0, 1),
/* MUX_SEL_CAM11 */
MUX(CLK_MOUT_PHYCLK_RXBYTECLKHS0_S2B_USER,
@@ -5161,7 +5059,7 @@ static struct samsung_mux_clock cam1_mux_clks[] __initdata = {
static struct samsung_div_clock cam1_div_clks[] __initdata = {
/* DIV_CAM10 */
- DIV(CLK_DIV_SCLK_ISP_WPWM, "div_sclk_isp_wpwm",
+ DIV(CLK_DIV_SCLK_ISP_MPWM, "div_sclk_isp_mpwm",
"div_pclk_cam1_83", DIV_CAM10, 16, 2),
DIV(CLK_DIV_PCLK_CAM1_83, "div_pclk_cam1_83",
"mout_aclk_cam1_333_user", DIV_CAM10, 12, 2),
@@ -5355,7 +5253,7 @@ static struct samsung_gate_clock cam1_gate_clks[] __initdata = {
ENABLE_PCLK_CAM1, 5, CLK_IGNORE_UNUSED, 0),
GATE(CLK_PCLK_ISP_I2C0, "pclk_isp_i2c0", "div_pclk_cam1_83",
ENABLE_PCLK_CAM1, 4, CLK_IGNORE_UNUSED, 0),
- GATE(CLK_PCLK_ISP_MPWM, "pclk_isp_wpwm", "div_pclk_cam1_83",
+ GATE(CLK_PCLK_ISP_MPWM, "pclk_isp_mpwm", "div_pclk_cam1_83",
ENABLE_PCLK_CAM1, 3, CLK_IGNORE_UNUSED, 0),
GATE(CLK_PCLK_FD, "pclk_fd", "div_pclk_fd",
ENABLE_PCLK_CAM1, 3, CLK_IGNORE_UNUSED, 0),
@@ -5388,7 +5286,7 @@ static struct samsung_gate_clock cam1_gate_clks[] __initdata = {
ENABLE_SCLK_CAM1, 5, 0, 0),
GATE(CLK_SCLK_ISP_SPI0, "sclk_isp_spi0", "mout_sclk_isp_spi0_user",
ENABLE_SCLK_CAM1, 4, 0, 0),
- GATE(CLK_SCLK_ISP_MPWM, "sclk_isp_wpwm", "div_sclk_isp_wpwm",
+ GATE(CLK_SCLK_ISP_MPWM, "sclk_isp_mpwm", "div_sclk_isp_mpwm",
ENABLE_SCLK_CAM1, 3, 0, 0),
GATE(CLK_PCLK_DBG_ISP, "sclk_dbg_isp", "div_pclk_dbg_cam1",
ENABLE_SCLK_CAM1, 2, 0, 0),
diff --git a/drivers/clk/samsung/clk-exynos5440.c b/drivers/clk/samsung/clk-exynos5440.c
index 590813871ffe..c57cff1e1798 100644
--- a/drivers/clk/samsung/clk-exynos5440.c
+++ b/drivers/clk/samsung/clk-exynos5440.c
@@ -31,16 +31,16 @@ PNAME(mout_spi_p) = { "div125", "div200" };
/* fixed rate clocks generated outside the soc */
static struct samsung_fixed_rate_clock exynos5440_fixed_rate_ext_clks[] __initdata = {
- FRATE(0, "xtal", NULL, CLK_IS_ROOT, 0),
+ FRATE(0, "xtal", NULL, 0, 0),
};
/* fixed rate clocks */
static struct samsung_fixed_rate_clock exynos5440_fixed_rate_clks[] __initdata = {
- FRATE(0, "ppll", NULL, CLK_IS_ROOT, 1000000000),
- FRATE(0, "usb_phy0", NULL, CLK_IS_ROOT, 60000000),
- FRATE(0, "usb_phy1", NULL, CLK_IS_ROOT, 60000000),
- FRATE(0, "usb_ohci12", NULL, CLK_IS_ROOT, 12000000),
- FRATE(0, "usb_ohci48", NULL, CLK_IS_ROOT, 48000000),
+ FRATE(0, "ppll", NULL, 0, 1000000000),
+ FRATE(0, "usb_phy0", NULL, 0, 60000000),
+ FRATE(0, "usb_phy1", NULL, 0, 60000000),
+ FRATE(0, "usb_ohci12", NULL, 0, 12000000),
+ FRATE(0, "usb_ohci48", NULL, 0, 48000000),
};
/* fixed factor clocks */
diff --git a/drivers/clk/samsung/clk-exynos7.c b/drivers/clk/samsung/clk-exynos7.c
index 55f8e2e24ab8..ad68d463b12c 100644
--- a/drivers/clk/samsung/clk-exynos7.c
+++ b/drivers/clk/samsung/clk-exynos7.c
@@ -894,10 +894,8 @@ PNAME(mout_phyclk_usbdrd300_udrd30_pipe_pclk_user_p) = { "fin_pll",
/* fixed rate clocks used in the FSYS0 block */
static struct samsung_fixed_rate_clock fixed_rate_clks_fsys0[] __initdata = {
- FRATE(0, "phyclk_usbdrd300_udrd30_phyclock", NULL,
- CLK_IS_ROOT, 60000000),
- FRATE(0, "phyclk_usbdrd300_udrd30_pipe_pclk", NULL,
- CLK_IS_ROOT, 125000000),
+ FRATE(0, "phyclk_usbdrd300_udrd30_phyclock", NULL, 0, 60000000),
+ FRATE(0, "phyclk_usbdrd300_udrd30_pipe_pclk", NULL, 0, 125000000),
};
static unsigned long fsys0_clk_regs[] __initdata = {
@@ -1009,11 +1007,11 @@ PNAME(mout_phyclk_ufs20_rx1_user_p) = { "fin_pll", "phyclk_ufs20_rx1_symbol" };
/* fixed rate clocks used in the FSYS1 block */
static struct samsung_fixed_rate_clock fixed_rate_clks_fsys1[] __initdata = {
FRATE(PHYCLK_UFS20_TX0_SYMBOL, "phyclk_ufs20_tx0_symbol", NULL,
- CLK_IS_ROOT, 300000000),
+ 0, 300000000),
FRATE(PHYCLK_UFS20_RX0_SYMBOL, "phyclk_ufs20_rx0_symbol", NULL,
- CLK_IS_ROOT, 300000000),
+ 0, 300000000),
FRATE(PHYCLK_UFS20_RX1_SYMBOL, "phyclk_ufs20_rx1_symbol", NULL,
- CLK_IS_ROOT, 300000000),
+ 0, 300000000),
};
static unsigned long fsys1_clk_regs[] __initdata = {
diff --git a/drivers/clk/samsung/clk-s3c2410.c b/drivers/clk/samsung/clk-s3c2410.c
index 0945a8852299..d7b011c1fcf8 100644
--- a/drivers/clk/samsung/clk-s3c2410.c
+++ b/drivers/clk/samsung/clk-s3c2410.c
@@ -344,7 +344,7 @@ struct samsung_mux_clock s3c2442_muxes[] __initdata = {
*/
#define XTI 1
struct samsung_fixed_rate_clock s3c2410_common_frate_clks[] __initdata = {
- FRATE(XTI, "xti", NULL, CLK_IS_ROOT, 0),
+ FRATE(XTI, "xti", NULL, 0, 0),
};
static void __init s3c2410_common_clk_register_fixed_ext(
diff --git a/drivers/clk/samsung/clk-s3c2412.c b/drivers/clk/samsung/clk-s3c2412.c
index 44d6a9f4f5b2..effe3736ec6b 100644
--- a/drivers/clk/samsung/clk-s3c2412.c
+++ b/drivers/clk/samsung/clk-s3c2412.c
@@ -232,8 +232,8 @@ static struct notifier_block s3c2412_restart_handler = {
*/
#define XTI 1
struct samsung_fixed_rate_clock s3c2412_common_frate_clks[] __initdata = {
- FRATE(XTI, "xti", NULL, CLK_IS_ROOT, 0),
- FRATE(0, "ext", NULL, CLK_IS_ROOT, 0),
+ FRATE(XTI, "xti", NULL, 0, 0),
+ FRATE(0, "ext", NULL, 0, 0),
};
static void __init s3c2412_common_clk_register_fixed_ext(
diff --git a/drivers/clk/samsung/clk-s3c2443.c b/drivers/clk/samsung/clk-s3c2443.c
index 2c0a1ea3c80c..37562783b25e 100644
--- a/drivers/clk/samsung/clk-s3c2443.c
+++ b/drivers/clk/samsung/clk-s3c2443.c
@@ -371,10 +371,10 @@ static struct notifier_block s3c2443_restart_handler = {
* Only necessary until the devicetree-move is complete
*/
struct samsung_fixed_rate_clock s3c2443_common_frate_clks[] __initdata = {
- FRATE(0, "xti", NULL, CLK_IS_ROOT, 0),
- FRATE(0, "ext", NULL, CLK_IS_ROOT, 0),
- FRATE(0, "ext_i2s", NULL, CLK_IS_ROOT, 0),
- FRATE(0, "ext_uart", NULL, CLK_IS_ROOT, 0),
+ FRATE(0, "xti", NULL, 0, 0),
+ FRATE(0, "ext", NULL, 0, 0),
+ FRATE(0, "ext_i2s", NULL, 0, 0),
+ FRATE(0, "ext_uart", NULL, 0, 0),
};
static void __init s3c2443_common_clk_register_fixed_ext(
diff --git a/drivers/clk/samsung/clk-s3c64xx.c b/drivers/clk/samsung/clk-s3c64xx.c
index d325ed1e196b..60aa775bd374 100644
--- a/drivers/clk/samsung/clk-s3c64xx.c
+++ b/drivers/clk/samsung/clk-s3c64xx.c
@@ -176,14 +176,14 @@ PNAME(audio2_p6410) = { "mout_epll", "dout_mpll", "fin_pll", "iiscdclk2",
/* Fixed rate clocks generated outside the SoC. */
FIXED_RATE_CLOCKS(s3c64xx_fixed_rate_ext_clks) __initdata = {
- FRATE(0, "fin_pll", NULL, CLK_IS_ROOT, 0),
- FRATE(0, "xusbxti", NULL, CLK_IS_ROOT, 0),
+ FRATE(0, "fin_pll", NULL, 0, 0),
+ FRATE(0, "xusbxti", NULL, 0, 0),
};
/* Fixed rate clocks generated inside the SoC. */
FIXED_RATE_CLOCKS(s3c64xx_fixed_rate_clks) __initdata = {
- FRATE(CLK27M, "clk27m", NULL, CLK_IS_ROOT, 27000000),
- FRATE(CLK48M, "clk48m", NULL, CLK_IS_ROOT, 48000000),
+ FRATE(CLK27M, "clk27m", NULL, 0, 27000000),
+ FRATE(CLK48M, "clk48m", NULL, 0, 48000000),
};
/* List of clock muxes present on all S3C64xx SoCs. */
diff --git a/drivers/clk/samsung/clk-s5pv210.c b/drivers/clk/samsung/clk-s5pv210.c
index 759aaf342bea..52302262045d 100644
--- a/drivers/clk/samsung/clk-s5pv210.c
+++ b/drivers/clk/samsung/clk-s5pv210.c
@@ -503,15 +503,15 @@ static const struct samsung_mux_clock s5p6442_mux_clks[] __initconst = {
/* S5PV210-specific fixed rate clocks generated inside the SoC. */
static const struct samsung_fixed_rate_clock s5pv210_frate_clks[] __initconst = {
- FRATE(SCLK_HDMI27M, "sclk_hdmi27m", NULL, CLK_IS_ROOT, 27000000),
- FRATE(SCLK_HDMIPHY, "sclk_hdmiphy", NULL, CLK_IS_ROOT, 27000000),
- FRATE(SCLK_USBPHY0, "sclk_usbphy0", NULL, CLK_IS_ROOT, 48000000),
- FRATE(SCLK_USBPHY1, "sclk_usbphy1", NULL, CLK_IS_ROOT, 48000000),
+ FRATE(SCLK_HDMI27M, "sclk_hdmi27m", NULL, 0, 27000000),
+ FRATE(SCLK_HDMIPHY, "sclk_hdmiphy", NULL, 0, 27000000),
+ FRATE(SCLK_USBPHY0, "sclk_usbphy0", NULL, 0, 48000000),
+ FRATE(SCLK_USBPHY1, "sclk_usbphy1", NULL, 0, 48000000),
};
/* S5P6442-specific fixed rate clocks generated inside the SoC. */
static const struct samsung_fixed_rate_clock s5p6442_frate_clks[] __initconst = {
- FRATE(SCLK_USBPHY0, "sclk_usbphy0", NULL, CLK_IS_ROOT, 30000000),
+ FRATE(SCLK_USBPHY0, "sclk_usbphy0", NULL, 0, 30000000),
};
/* Common clock dividers. */
diff --git a/drivers/clk/sirf/clk-atlas7.c b/drivers/clk/sirf/clk-atlas7.c
index 957aae63e7cc..d0c6c9a2d06a 100644
--- a/drivers/clk/sirf/clk-atlas7.c
+++ b/drivers/clk/sirf/clk-atlas7.c
@@ -1423,7 +1423,7 @@ static int atlas7_reset_module(struct reset_controller_dev *rcdev,
return 0;
}
-static struct reset_control_ops atlas7_rst_ops = {
+static const struct reset_control_ops atlas7_rst_ops = {
.reset = atlas7_reset_module,
};
diff --git a/drivers/clk/socfpga/clk-gate-a10.c b/drivers/clk/socfpga/clk-gate-a10.c
index 1cebf253e8fd..c2d572748167 100644
--- a/drivers/clk/socfpga/clk-gate-a10.c
+++ b/drivers/clk/socfpga/clk-gate-a10.c
@@ -115,7 +115,6 @@ static void __init __socfpga_gate_init(struct device_node *node,
const char *parent_name[SOCFPGA_MAX_PARENTS];
struct clk_init_data init;
int rc;
- int i = 0;
socfpga_clk = kzalloc(sizeof(*socfpga_clk), GFP_KERNEL);
if (WARN_ON(!socfpga_clk))
@@ -167,12 +166,9 @@ static void __init __socfpga_gate_init(struct device_node *node,
init.name = clk_name;
init.ops = ops;
init.flags = 0;
- while (i < SOCFPGA_MAX_PARENTS && (parent_name[i] =
- of_clk_get_parent_name(node, i)) != NULL)
- i++;
+ init.num_parents = of_clk_parent_fill(node, parent_name, SOCFPGA_MAX_PARENTS);
init.parent_names = parent_name;
- init.num_parents = i;
socfpga_clk->hw.hw.init = &init;
clk = clk_register(NULL, &socfpga_clk->hw.hw);
diff --git a/drivers/clk/socfpga/clk-periph-a10.c b/drivers/clk/socfpga/clk-periph-a10.c
index 1f397cb72e89..70993f1e88bc 100644
--- a/drivers/clk/socfpga/clk-periph-a10.c
+++ b/drivers/clk/socfpga/clk-periph-a10.c
@@ -74,7 +74,7 @@ static __init void __socfpga_periph_init(struct device_node *node,
struct clk *clk;
struct socfpga_periph_clk *periph_clk;
const char *clk_name = node->name;
- const char *parent_name;
+ const char *parent_name[SOCFPGA_MAX_PARENTS];
struct clk_init_data init;
int rc;
u32 fixed_div;
@@ -109,9 +109,8 @@ static __init void __socfpga_periph_init(struct device_node *node,
init.ops = ops;
init.flags = 0;
- parent_name = of_clk_get_parent_name(node, 0);
- init.num_parents = 1;
- init.parent_names = &parent_name;
+ init.num_parents = of_clk_parent_fill(node, parent_name, SOCFPGA_MAX_PARENTS);
+ init.parent_names = parent_name;
periph_clk->hw.hw.init = &init;
diff --git a/drivers/clk/socfpga/clk-pll-a10.c b/drivers/clk/socfpga/clk-pll-a10.c
index 402d630bd531..35fabe1a32c3 100644
--- a/drivers/clk/socfpga/clk-pll-a10.c
+++ b/drivers/clk/socfpga/clk-pll-a10.c
@@ -74,7 +74,7 @@ static struct clk_ops clk_pll_ops = {
.get_parent = clk_pll_get_parent,
};
-static struct __init clk * __socfpga_pll_init(struct device_node *node,
+static struct clk * __init __socfpga_pll_init(struct device_node *node,
const struct clk_ops *ops)
{
u32 reg;
diff --git a/drivers/clk/spear/spear1310_clock.c b/drivers/clk/spear/spear1310_clock.c
index 009bd1410cfa..2f86e3f94efa 100644
--- a/drivers/clk/spear/spear1310_clock.c
+++ b/drivers/clk/spear/spear1310_clock.c
@@ -386,24 +386,20 @@ void __init spear1310_clk_init(void __iomem *misc_base, void __iomem *ras_base)
{
struct clk *clk, *clk1;
- clk = clk_register_fixed_rate(NULL, "osc_32k_clk", NULL, CLK_IS_ROOT,
- 32000);
+ clk = clk_register_fixed_rate(NULL, "osc_32k_clk", NULL, 0, 32000);
clk_register_clkdev(clk, "osc_32k_clk", NULL);
- clk = clk_register_fixed_rate(NULL, "osc_24m_clk", NULL, CLK_IS_ROOT,
- 24000000);
+ clk = clk_register_fixed_rate(NULL, "osc_24m_clk", NULL, 0, 24000000);
clk_register_clkdev(clk, "osc_24m_clk", NULL);
- clk = clk_register_fixed_rate(NULL, "osc_25m_clk", NULL, CLK_IS_ROOT,
- 25000000);
+ clk = clk_register_fixed_rate(NULL, "osc_25m_clk", NULL, 0, 25000000);
clk_register_clkdev(clk, "osc_25m_clk", NULL);
- clk = clk_register_fixed_rate(NULL, "gmii_pad_clk", NULL, CLK_IS_ROOT,
- 125000000);
+ clk = clk_register_fixed_rate(NULL, "gmii_pad_clk", NULL, 0, 125000000);
clk_register_clkdev(clk, "gmii_pad_clk", NULL);
- clk = clk_register_fixed_rate(NULL, "i2s_src_pad_clk", NULL,
- CLK_IS_ROOT, 12288000);
+ clk = clk_register_fixed_rate(NULL, "i2s_src_pad_clk", NULL, 0,
+ 12288000);
clk_register_clkdev(clk, "i2s_src_pad_clk", NULL);
/* clock derived from 32 KHz osc clk */
@@ -897,11 +893,10 @@ void __init spear1310_clk_init(void __iomem *misc_base, void __iomem *ras_base)
&_lock);
clk_register_clkdev(clk, "ras_apb_clk", NULL);
- clk = clk_register_fixed_rate(NULL, "ras_plclk0_clk", NULL, CLK_IS_ROOT,
+ clk = clk_register_fixed_rate(NULL, "ras_plclk0_clk", NULL, 0,
50000000);
- clk = clk_register_fixed_rate(NULL, "ras_tx50_clk", NULL, CLK_IS_ROOT,
- 50000000);
+ clk = clk_register_fixed_rate(NULL, "ras_tx50_clk", NULL, 0, 50000000);
clk = clk_register_gate(NULL, "can0_clk", "apb_clk", 0,
SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_CAN0_CLK_ENB, 0,
diff --git a/drivers/clk/spear/spear1340_clock.c b/drivers/clk/spear/spear1340_clock.c
index 9c7abfd951ba..cbb19a90f2d6 100644
--- a/drivers/clk/spear/spear1340_clock.c
+++ b/drivers/clk/spear/spear1340_clock.c
@@ -443,24 +443,20 @@ void __init spear1340_clk_init(void __iomem *misc_base)
{
struct clk *clk, *clk1;
- clk = clk_register_fixed_rate(NULL, "osc_32k_clk", NULL, CLK_IS_ROOT,
- 32000);
+ clk = clk_register_fixed_rate(NULL, "osc_32k_clk", NULL, 0, 32000);
clk_register_clkdev(clk, "osc_32k_clk", NULL);
- clk = clk_register_fixed_rate(NULL, "osc_24m_clk", NULL, CLK_IS_ROOT,
- 24000000);
+ clk = clk_register_fixed_rate(NULL, "osc_24m_clk", NULL, 0, 24000000);
clk_register_clkdev(clk, "osc_24m_clk", NULL);
- clk = clk_register_fixed_rate(NULL, "osc_25m_clk", NULL, CLK_IS_ROOT,
- 25000000);
+ clk = clk_register_fixed_rate(NULL, "osc_25m_clk", NULL, 0, 25000000);
clk_register_clkdev(clk, "osc_25m_clk", NULL);
- clk = clk_register_fixed_rate(NULL, "gmii_pad_clk", NULL, CLK_IS_ROOT,
- 125000000);
+ clk = clk_register_fixed_rate(NULL, "gmii_pad_clk", NULL, 0, 125000000);
clk_register_clkdev(clk, "gmii_pad_clk", NULL);
- clk = clk_register_fixed_rate(NULL, "i2s_src_pad_clk", NULL,
- CLK_IS_ROOT, 12288000);
+ clk = clk_register_fixed_rate(NULL, "i2s_src_pad_clk", NULL, 0,
+ 12288000);
clk_register_clkdev(clk, "i2s_src_pad_clk", NULL);
/* clock derived from 32 KHz osc clk */
diff --git a/drivers/clk/spear/spear3xx_clock.c b/drivers/clk/spear/spear3xx_clock.c
index 404a55edd613..c403c66b6583 100644
--- a/drivers/clk/spear/spear3xx_clock.c
+++ b/drivers/clk/spear/spear3xx_clock.c
@@ -251,7 +251,7 @@ static void __init spear320_clk_init(void __iomem *soc_config_base,
struct clk *clk;
clk = clk_register_fixed_rate(NULL, "smii_125m_pad_clk", NULL,
- CLK_IS_ROOT, 125000000);
+ 0, 125000000);
clk_register_clkdev(clk, "smii_125m_pad", NULL);
clk = clk_register_fixed_factor(NULL, "clcd_clk", "ras_pll3_clk", 0,
@@ -391,12 +391,10 @@ void __init spear3xx_clk_init(void __iomem *misc_base, void __iomem *soc_config_
{
struct clk *clk, *clk1, *ras_apb_clk;
- clk = clk_register_fixed_rate(NULL, "osc_32k_clk", NULL, CLK_IS_ROOT,
- 32000);
+ clk = clk_register_fixed_rate(NULL, "osc_32k_clk", NULL, 0, 32000);
clk_register_clkdev(clk, "osc_32k_clk", NULL);
- clk = clk_register_fixed_rate(NULL, "osc_24m_clk", NULL, CLK_IS_ROOT,
- 24000000);
+ clk = clk_register_fixed_rate(NULL, "osc_24m_clk", NULL, 0, 24000000);
clk_register_clkdev(clk, "osc_24m_clk", NULL);
/* clock derived from 32 KHz osc clk */
diff --git a/drivers/clk/spear/spear6xx_clock.c b/drivers/clk/spear/spear6xx_clock.c
index e24f85cd4300..7c9383c3c2c6 100644
--- a/drivers/clk/spear/spear6xx_clock.c
+++ b/drivers/clk/spear/spear6xx_clock.c
@@ -117,12 +117,10 @@ void __init spear6xx_clk_init(void __iomem *misc_base)
{
struct clk *clk, *clk1;
- clk = clk_register_fixed_rate(NULL, "osc_32k_clk", NULL, CLK_IS_ROOT,
- 32000);
+ clk = clk_register_fixed_rate(NULL, "osc_32k_clk", NULL, 0, 32000);
clk_register_clkdev(clk, "osc_32k_clk", NULL);
- clk = clk_register_fixed_rate(NULL, "osc_30m_clk", NULL, CLK_IS_ROOT,
- 30000000);
+ clk = clk_register_fixed_rate(NULL, "osc_30m_clk", NULL, 0, 30000000);
clk_register_clkdev(clk, "osc_30m_clk", NULL);
/* clock derived from 32 KHz osc clk */
diff --git a/drivers/clk/st/clk-flexgen.c b/drivers/clk/st/clk-flexgen.c
index 24d99594c0b3..627267c7ec5c 100644
--- a/drivers/clk/st/clk-flexgen.c
+++ b/drivers/clk/st/clk-flexgen.c
@@ -244,10 +244,10 @@ static const char ** __init flexgen_get_parents(struct device_node *np,
int *num_parents)
{
const char **parents;
- int nparents;
+ unsigned int nparents;
nparents = of_clk_get_parent_count(np);
- if (WARN_ON(nparents <= 0))
+ if (WARN_ON(!nparents))
return NULL;
parents = kcalloc(nparents, sizeof(const char *), GFP_KERNEL);
diff --git a/drivers/clk/st/clkgen-fsyn.c b/drivers/clk/st/clkgen-fsyn.c
index ccb324d97160..dec4eaaecc00 100644
--- a/drivers/clk/st/clkgen-fsyn.c
+++ b/drivers/clk/st/clkgen-fsyn.c
@@ -574,12 +574,16 @@ static int quadfs_pll_fs660c32_set_rate(struct clk_hw *hw, unsigned long rate,
struct stm_fs params;
long hwrate = 0;
unsigned long flags = 0;
+ int ret;
if (!rate || !parent_rate)
return -EINVAL;
- if (!clk_fs660c32_vco_get_params(parent_rate, rate, &params))
- clk_fs660c32_vco_get_rate(parent_rate, &params, &hwrate);
+ ret = clk_fs660c32_vco_get_params(parent_rate, rate, &params);
+ if (ret)
+ return ret;
+
+ clk_fs660c32_vco_get_rate(parent_rate, &params, &hwrate);
pr_debug("%s: %s new rate %ld [ndiv=0x%x]\n",
__func__, clk_hw_get_name(hw),
diff --git a/drivers/clk/st/clkgen-mux.c b/drivers/clk/st/clkgen-mux.c
index 5dc5ce217960..b1e10ffe7a44 100644
--- a/drivers/clk/st/clkgen-mux.c
+++ b/drivers/clk/st/clkgen-mux.c
@@ -26,10 +26,10 @@ static const char ** __init clkgen_mux_get_parents(struct device_node *np,
int *num_parents)
{
const char **parents;
- int nparents;
+ unsigned int nparents;
nparents = of_clk_get_parent_count(np);
- if (WARN_ON(nparents <= 0))
+ if (WARN_ON(!nparents))
return ERR_PTR(-EINVAL);
parents = kcalloc(nparents, sizeof(const char *), GFP_KERNEL);
@@ -822,11 +822,10 @@ err:
if (!clk_data->clks[i])
continue;
- composite = container_of(__clk_get_hw(clk_data->clks[i]),
- struct clk_composite, hw);
- kfree(container_of(composite->gate_hw, struct clk_gate, hw));
- kfree(container_of(composite->rate_hw, struct clk_divider, hw));
- kfree(container_of(composite->mux_hw, struct clk_mux, hw));
+ composite = to_clk_composite(__clk_get_hw(clk_data->clks[i]));
+ kfree(to_clk_gate(composite->gate_hw));
+ kfree(to_clk_divider(composite->rate_hw));
+ kfree(to_clk_mux(composite->mux_hw));
}
kfree(clk_data->clks);
diff --git a/drivers/clk/sunxi/clk-a10-hosc.c b/drivers/clk/sunxi/clk-a10-hosc.c
index 0481d5d673d6..6b598c6a0213 100644
--- a/drivers/clk/sunxi/clk-a10-hosc.c
+++ b/drivers/clk/sunxi/clk-a10-hosc.c
@@ -15,9 +15,9 @@
*/
#include <linux/clk-provider.h>
-#include <linux/clkdev.h>
#include <linux/of.h>
#include <linux/of_address.h>
+#include <linux/slab.h>
#define SUNXI_OSC24M_GATE 0
@@ -61,7 +61,6 @@ static void __init sun4i_osc_clk_setup(struct device_node *node)
goto err_free_gate;
of_clk_add_provider(node, of_clk_src_simple_get, clk);
- clk_register_clkdev(clk, clk_name, NULL);
return;
diff --git a/drivers/clk/sunxi/clk-a10-ve.c b/drivers/clk/sunxi/clk-a10-ve.c
index 044c1717b762..d9ea22ec4e25 100644
--- a/drivers/clk/sunxi/clk-a10-ve.c
+++ b/drivers/clk/sunxi/clk-a10-ve.c
@@ -85,7 +85,7 @@ static int sunxi_ve_of_xlate(struct reset_controller_dev *rcdev,
return 0;
}
-static struct reset_control_ops sunxi_ve_reset_ops = {
+static const struct reset_control_ops sunxi_ve_reset_ops = {
.assert = sunxi_ve_reset_assert,
.deassert = sunxi_ve_reset_deassert,
};
diff --git a/drivers/clk/sunxi/clk-a20-gmac.c b/drivers/clk/sunxi/clk-a20-gmac.c
index 1611b036421c..3437f734c9bf 100644
--- a/drivers/clk/sunxi/clk-a20-gmac.c
+++ b/drivers/clk/sunxi/clk-a20-gmac.c
@@ -17,7 +17,6 @@
*/
#include <linux/clk-provider.h>
-#include <linux/clkdev.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/slab.h>
@@ -107,7 +106,6 @@ static void __init sun7i_a20_gmac_clk_setup(struct device_node *node)
goto iounmap_reg;
of_clk_add_provider(node, of_clk_src_simple_get, clk);
- clk_register_clkdev(clk, clk_name, NULL);
return;
diff --git a/drivers/clk/sunxi/clk-factors.c b/drivers/clk/sunxi/clk-factors.c
index 59428dbd607a..ddefe9668863 100644
--- a/drivers/clk/sunxi/clk-factors.c
+++ b/drivers/clk/sunxi/clk-factors.c
@@ -48,7 +48,7 @@ static unsigned long clk_factors_recalc_rate(struct clk_hw *hw,
u32 reg;
unsigned long rate;
struct clk_factors *factors = to_clk_factors(hw);
- struct clk_factors_config *config = factors->config;
+ const struct clk_factors_config *config = factors->config;
/* Fetch the register value */
reg = readl(factors->reg);
@@ -63,18 +63,28 @@ static unsigned long clk_factors_recalc_rate(struct clk_hw *hw,
if (config->pwidth != SUNXI_FACTORS_NOT_APPLICABLE)
p = FACTOR_GET(config->pshift, config->pwidth, reg);
- /* Calculate the rate */
- rate = (parent_rate * (n + config->n_start) * (k + 1) >> p) / (m + 1);
+ if (factors->recalc) {
+ struct factors_request factors_req = {
+ .parent_rate = parent_rate,
+ .n = n,
+ .k = k,
+ .m = m,
+ .p = p,
+ };
- return rate;
-}
+ /* get mux details from mux clk structure */
+ if (factors->mux)
+ factors_req.parent_index =
+ (reg >> factors->mux->shift) &
+ factors->mux->mask;
-static long clk_factors_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
-{
- struct clk_factors *factors = to_clk_factors(hw);
- factors->get_factors((u32 *)&rate, (u32)*parent_rate,
- NULL, NULL, NULL, NULL);
+ factors->recalc(&factors_req);
+
+ return factors_req.rate;
+ }
+
+ /* Calculate the rate */
+ rate = (parent_rate * (n + config->n_start) * (k + 1) >> p) / (m + 1);
return rate;
}
@@ -82,6 +92,7 @@ static long clk_factors_round_rate(struct clk_hw *hw, unsigned long rate,
static int clk_factors_determine_rate(struct clk_hw *hw,
struct clk_rate_request *req)
{
+ struct clk_factors *factors = to_clk_factors(hw);
struct clk_hw *parent, *best_parent = NULL;
int i, num_parents;
unsigned long parent_rate, best = 0, child_rate, best_child_rate = 0;
@@ -89,6 +100,10 @@ static int clk_factors_determine_rate(struct clk_hw *hw,
/* find the parent that can help provide the fastest rate <= rate */
num_parents = clk_hw_get_num_parents(hw);
for (i = 0; i < num_parents; i++) {
+ struct factors_request factors_req = {
+ .rate = req->rate,
+ .parent_index = i,
+ };
parent = clk_hw_get_parent_by_index(hw, i);
if (!parent)
continue;
@@ -97,8 +112,9 @@ static int clk_factors_determine_rate(struct clk_hw *hw,
else
parent_rate = clk_hw_get_rate(parent);
- child_rate = clk_factors_round_rate(hw, req->rate,
- &parent_rate);
+ factors_req.parent_rate = parent_rate;
+ factors->get_factors(&factors_req);
+ child_rate = factors_req.rate;
if (child_rate <= req->rate && child_rate > best_child_rate) {
best_parent = parent;
@@ -120,13 +136,16 @@ static int clk_factors_determine_rate(struct clk_hw *hw,
static int clk_factors_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
- u8 n = 0, k = 0, m = 0, p = 0;
+ struct factors_request req = {
+ .rate = rate,
+ .parent_rate = parent_rate,
+ };
u32 reg;
struct clk_factors *factors = to_clk_factors(hw);
- struct clk_factors_config *config = factors->config;
+ const struct clk_factors_config *config = factors->config;
unsigned long flags = 0;
- factors->get_factors((u32 *)&rate, (u32)parent_rate, &n, &k, &m, &p);
+ factors->get_factors(&req);
if (factors->lock)
spin_lock_irqsave(factors->lock, flags);
@@ -135,10 +154,10 @@ static int clk_factors_set_rate(struct clk_hw *hw, unsigned long rate,
reg = readl(factors->reg);
/* Set up the new factors - macros do not do anything if width is 0 */
- reg = FACTOR_SET(config->nshift, config->nwidth, reg, n);
- reg = FACTOR_SET(config->kshift, config->kwidth, reg, k);
- reg = FACTOR_SET(config->mshift, config->mwidth, reg, m);
- reg = FACTOR_SET(config->pshift, config->pwidth, reg, p);
+ reg = FACTOR_SET(config->nshift, config->nwidth, reg, req.n);
+ reg = FACTOR_SET(config->kshift, config->kwidth, reg, req.k);
+ reg = FACTOR_SET(config->mshift, config->mwidth, reg, req.m);
+ reg = FACTOR_SET(config->pshift, config->pwidth, reg, req.p);
/* Apply them now */
writel(reg, factors->reg);
@@ -155,7 +174,6 @@ static int clk_factors_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops clk_factors_ops = {
.determine_rate = clk_factors_determine_rate,
.recalc_rate = clk_factors_recalc_rate,
- .round_rate = clk_factors_round_rate,
.set_rate = clk_factors_set_rate,
};
@@ -172,7 +190,7 @@ struct clk *sunxi_factors_register(struct device_node *node,
struct clk_hw *mux_hw = NULL;
const char *clk_name = node->name;
const char *parents[FACTORS_MAX_PARENTS];
- int i = 0;
+ int ret, i = 0;
/* if we have a mux, we will have >1 parents */
i = of_clk_parent_fill(node, parents, FACTORS_MAX_PARENTS);
@@ -188,21 +206,22 @@ struct clk *sunxi_factors_register(struct device_node *node,
factors = kzalloc(sizeof(struct clk_factors), GFP_KERNEL);
if (!factors)
- return NULL;
+ goto err_factors;
/* set up factors properties */
factors->reg = reg;
factors->config = data->table;
factors->get_factors = data->getter;
+ factors->recalc = data->recalc;
factors->lock = lock;
/* Add a gate if this factor clock can be gated */
if (data->enable) {
gate = kzalloc(sizeof(struct clk_gate), GFP_KERNEL);
- if (!gate) {
- kfree(factors);
- return NULL;
- }
+ if (!gate)
+ goto err_gate;
+
+ factors->gate = gate;
/* set up gate properties */
gate->reg = reg;
@@ -214,11 +233,10 @@ struct clk *sunxi_factors_register(struct device_node *node,
/* Add a mux if this factor clock can be muxed */
if (data->mux) {
mux = kzalloc(sizeof(struct clk_mux), GFP_KERNEL);
- if (!mux) {
- kfree(factors);
- kfree(gate);
- return NULL;
- }
+ if (!mux)
+ goto err_mux;
+
+ factors->mux = mux;
/* set up gate properties */
mux->reg = reg;
@@ -233,11 +251,44 @@ struct clk *sunxi_factors_register(struct device_node *node,
mux_hw, &clk_mux_ops,
&factors->hw, &clk_factors_ops,
gate_hw, &clk_gate_ops, 0);
+ if (IS_ERR(clk))
+ goto err_register;
- if (!IS_ERR(clk)) {
- of_clk_add_provider(node, of_clk_src_simple_get, clk);
- clk_register_clkdev(clk, clk_name, NULL);
- }
+ ret = of_clk_add_provider(node, of_clk_src_simple_get, clk);
+ if (ret)
+ goto err_provider;
return clk;
+
+err_provider:
+ /* TODO: The composite clock stuff will leak a bit here. */
+ clk_unregister(clk);
+err_register:
+ kfree(mux);
+err_mux:
+ kfree(gate);
+err_gate:
+ kfree(factors);
+err_factors:
+ return NULL;
+}
+
+void sunxi_factors_unregister(struct device_node *node, struct clk *clk)
+{
+ struct clk_hw *hw = __clk_get_hw(clk);
+ struct clk_factors *factors;
+ const char *name;
+
+ if (!hw)
+ return;
+
+ factors = to_clk_factors(hw);
+ name = clk_hw_get_name(hw);
+
+ of_clk_del_provider(node);
+ /* TODO: The composite clock stuff will leak a bit here. */
+ clk_unregister(clk);
+ kfree(factors->mux);
+ kfree(factors->gate);
+ kfree(factors);
}
diff --git a/drivers/clk/sunxi/clk-factors.h b/drivers/clk/sunxi/clk-factors.h
index 171085ab5513..1e63c5b2d5f4 100644
--- a/drivers/clk/sunxi/clk-factors.h
+++ b/drivers/clk/sunxi/clk-factors.h
@@ -2,7 +2,6 @@
#define __MACH_SUNXI_CLK_FACTORS_H
#include <linux/clk-provider.h>
-#include <linux/clkdev.h>
#include <linux/spinlock.h>
#define SUNXI_FACTORS_NOT_APPLICABLE (0)
@@ -19,21 +18,36 @@ struct clk_factors_config {
u8 n_start;
};
+struct factors_request {
+ unsigned long rate;
+ unsigned long parent_rate;
+ u8 parent_index;
+ u8 n;
+ u8 k;
+ u8 m;
+ u8 p;
+};
+
struct factors_data {
int enable;
int mux;
int muxmask;
- struct clk_factors_config *table;
- void (*getter) (u32 *rate, u32 parent_rate, u8 *n, u8 *k, u8 *m, u8 *p);
+ const struct clk_factors_config *table;
+ void (*getter)(struct factors_request *req);
+ void (*recalc)(struct factors_request *req);
const char *name;
};
struct clk_factors {
struct clk_hw hw;
void __iomem *reg;
- struct clk_factors_config *config;
- void (*get_factors) (u32 *rate, u32 parent, u8 *n, u8 *k, u8 *m, u8 *p);
+ const struct clk_factors_config *config;
+ void (*get_factors)(struct factors_request *req);
+ void (*recalc)(struct factors_request *req);
spinlock_t *lock;
+ /* for cleanup */
+ struct clk_mux *mux;
+ struct clk_gate *gate;
};
struct clk *sunxi_factors_register(struct device_node *node,
@@ -41,4 +55,6 @@ struct clk *sunxi_factors_register(struct device_node *node,
spinlock_t *lock,
void __iomem *reg);
+void sunxi_factors_unregister(struct device_node *node, struct clk *clk);
+
#endif
diff --git a/drivers/clk/sunxi/clk-mod0.c b/drivers/clk/sunxi/clk-mod0.c
index d167e1efb927..b38d71cec74c 100644
--- a/drivers/clk/sunxi/clk-mod0.c
+++ b/drivers/clk/sunxi/clk-mod0.c
@@ -15,6 +15,7 @@
*/
#include <linux/clk.h>
+#include <linux/clkdev.h>
#include <linux/clk-provider.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
@@ -28,17 +29,16 @@
* rate = (parent_rate >> p) / (m + 1);
*/
-static void sun4i_a10_get_mod0_factors(u32 *freq, u32 parent_rate,
- u8 *n, u8 *k, u8 *m, u8 *p)
+static void sun4i_a10_get_mod0_factors(struct factors_request *req)
{
u8 div, calcm, calcp;
/* These clocks can only divide, so we will never be able to achieve
* frequencies higher than the parent frequency */
- if (*freq > parent_rate)
- *freq = parent_rate;
+ if (req->rate > req->parent_rate)
+ req->rate = req->parent_rate;
- div = DIV_ROUND_UP(parent_rate, *freq);
+ div = DIV_ROUND_UP(req->parent_rate, req->rate);
if (div < 16)
calcp = 0;
@@ -51,18 +51,13 @@ static void sun4i_a10_get_mod0_factors(u32 *freq, u32 parent_rate,
calcm = DIV_ROUND_UP(div, 1 << calcp);
- *freq = (parent_rate >> calcp) / calcm;
-
- /* we were called to round the frequency, we can now return */
- if (n == NULL)
- return;
-
- *m = calcm - 1;
- *p = calcp;
+ req->rate = (req->parent_rate >> calcp) / calcm;
+ req->m = calcm - 1;
+ req->p = calcp;
}
/* user manual says "n" but it's really "p" */
-static struct clk_factors_config sun4i_a10_mod0_config = {
+static const struct clk_factors_config sun4i_a10_mod0_config = {
.mshift = 0,
.mwidth = 4,
.pshift = 16,
diff --git a/drivers/clk/sunxi/clk-simple-gates.c b/drivers/clk/sunxi/clk-simple-gates.c
index f4da52b5ca0e..a085c3bc127c 100644
--- a/drivers/clk/sunxi/clk-simple-gates.c
+++ b/drivers/clk/sunxi/clk-simple-gates.c
@@ -98,6 +98,8 @@ static void __init sunxi_simple_gates_init(struct device_node *node)
sunxi_simple_gates_setup(node, NULL, 0);
}
+CLK_OF_DECLARE(sun4i_a10_gates, "allwinner,sun4i-a10-gates-clk",
+ sunxi_simple_gates_init);
CLK_OF_DECLARE(sun4i_a10_apb0, "allwinner,sun4i-a10-apb0-gates-clk",
sunxi_simple_gates_init);
CLK_OF_DECLARE(sun4i_a10_apb1, "allwinner,sun4i-a10-apb1-gates-clk",
@@ -130,6 +132,8 @@ CLK_OF_DECLARE(sun8i_a23_apb2, "allwinner,sun8i-a23-apb2-gates-clk",
sunxi_simple_gates_init);
CLK_OF_DECLARE(sun8i_a33_ahb1, "allwinner,sun8i-a33-ahb1-gates-clk",
sunxi_simple_gates_init);
+CLK_OF_DECLARE(sun8i_a83t_apb0, "allwinner,sun8i-a83t-apb0-gates-clk",
+ sunxi_simple_gates_init);
CLK_OF_DECLARE(sun9i_a80_ahb0, "allwinner,sun9i-a80-ahb0-gates-clk",
sunxi_simple_gates_init);
CLK_OF_DECLARE(sun9i_a80_ahb1, "allwinner,sun9i-a80-ahb1-gates-clk",
diff --git a/drivers/clk/sunxi/clk-sun6i-apb0-gates.c b/drivers/clk/sunxi/clk-sun6i-apb0-gates.c
index 23d042aabb4f..68021fa5ecd9 100644
--- a/drivers/clk/sunxi/clk-sun6i-apb0-gates.c
+++ b/drivers/clk/sunxi/clk-sun6i-apb0-gates.c
@@ -9,7 +9,6 @@
*/
#include <linux/clk-provider.h>
-#include <linux/clkdev.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
@@ -87,7 +86,6 @@ static int sun6i_a31_apb0_gates_clk_probe(struct platform_device *pdev)
clk_parent, 0, reg, i,
0, NULL);
WARN_ON(IS_ERR(clk_data->clks[i]));
- clk_register_clkdev(clk_data->clks[i], clk_name, NULL);
j++;
}
diff --git a/drivers/clk/sunxi/clk-sun6i-ar100.c b/drivers/clk/sunxi/clk-sun6i-ar100.c
index 20887686bdbe..84a187e55360 100644
--- a/drivers/clk/sunxi/clk-sun6i-ar100.c
+++ b/drivers/clk/sunxi/clk-sun6i-ar100.c
@@ -8,211 +8,97 @@
*
*/
+#include <linux/bitops.h>
#include <linux/clk-provider.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
+#include <linux/spinlock.h>
-#define SUN6I_AR100_MAX_PARENTS 4
-#define SUN6I_AR100_SHIFT_MASK 0x3
-#define SUN6I_AR100_SHIFT_MAX SUN6I_AR100_SHIFT_MASK
-#define SUN6I_AR100_SHIFT_SHIFT 4
-#define SUN6I_AR100_DIV_MASK 0x1f
-#define SUN6I_AR100_DIV_MAX (SUN6I_AR100_DIV_MASK + 1)
-#define SUN6I_AR100_DIV_SHIFT 8
-#define SUN6I_AR100_MUX_MASK 0x3
-#define SUN6I_AR100_MUX_SHIFT 16
-
-struct ar100_clk {
- struct clk_hw hw;
- void __iomem *reg;
-};
-
-static inline struct ar100_clk *to_ar100_clk(struct clk_hw *hw)
-{
- return container_of(hw, struct ar100_clk, hw);
-}
-
-static unsigned long ar100_recalc_rate(struct clk_hw *hw,
- unsigned long parent_rate)
-{
- struct ar100_clk *clk = to_ar100_clk(hw);
- u32 val = readl(clk->reg);
- int shift = (val >> SUN6I_AR100_SHIFT_SHIFT) & SUN6I_AR100_SHIFT_MASK;
- int div = (val >> SUN6I_AR100_DIV_SHIFT) & SUN6I_AR100_DIV_MASK;
-
- return (parent_rate >> shift) / (div + 1);
-}
-
-static int ar100_determine_rate(struct clk_hw *hw,
- struct clk_rate_request *req)
-{
- int nparents = clk_hw_get_num_parents(hw);
- long best_rate = -EINVAL;
- int i;
-
- req->best_parent_hw = NULL;
-
- for (i = 0; i < nparents; i++) {
- unsigned long parent_rate;
- unsigned long tmp_rate;
- struct clk_hw *parent;
- unsigned long div;
- int shift;
-
- parent = clk_hw_get_parent_by_index(hw, i);
- parent_rate = clk_hw_get_rate(parent);
- div = DIV_ROUND_UP(parent_rate, req->rate);
-
- /*
- * The AR100 clk contains 2 divisors:
- * - one power of 2 divisor
- * - one regular divisor
- *
- * First check if we can safely shift (or divide by a power
- * of 2) without losing precision on the requested rate.
- */
- shift = ffs(div) - 1;
- if (shift > SUN6I_AR100_SHIFT_MAX)
- shift = SUN6I_AR100_SHIFT_MAX;
-
- div >>= shift;
-
- /*
- * Then if the divisor is still bigger than what the HW
- * actually supports, use a bigger shift (or power of 2
- * divider) value and accept to lose some precision.
- */
- while (div > SUN6I_AR100_DIV_MAX) {
- shift++;
- div >>= 1;
- if (shift > SUN6I_AR100_SHIFT_MAX)
- break;
- }
-
- /*
- * If the shift value (or power of 2 divider) is bigger
- * than what the HW actually support, skip this parent.
- */
- if (shift > SUN6I_AR100_SHIFT_MAX)
- continue;
-
- tmp_rate = (parent_rate >> shift) / div;
- if (!req->best_parent_hw || tmp_rate > best_rate) {
- req->best_parent_hw = parent;
- req->best_parent_rate = parent_rate;
- best_rate = tmp_rate;
- }
- }
-
- if (best_rate < 0)
- return best_rate;
-
- req->rate = best_rate;
-
- return 0;
-}
-
-static int ar100_set_parent(struct clk_hw *hw, u8 index)
-{
- struct ar100_clk *clk = to_ar100_clk(hw);
- u32 val = readl(clk->reg);
-
- if (index >= SUN6I_AR100_MAX_PARENTS)
- return -EINVAL;
-
- val &= ~(SUN6I_AR100_MUX_MASK << SUN6I_AR100_MUX_SHIFT);
- val |= (index << SUN6I_AR100_MUX_SHIFT);
- writel(val, clk->reg);
-
- return 0;
-}
+#include "clk-factors.h"
-static u8 ar100_get_parent(struct clk_hw *hw)
-{
- struct ar100_clk *clk = to_ar100_clk(hw);
- return (readl(clk->reg) >> SUN6I_AR100_MUX_SHIFT) &
- SUN6I_AR100_MUX_MASK;
-}
-
-static int ar100_set_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long parent_rate)
+/**
+ * sun6i_get_ar100_factors - Calculates factors p, m for AR100
+ *
+ * AR100 rate is calculated as follows
+ * rate = (parent_rate >> p) / (m + 1);
+ */
+static void sun6i_get_ar100_factors(struct factors_request *req)
{
- unsigned long div = parent_rate / rate;
- struct ar100_clk *clk = to_ar100_clk(hw);
- u32 val = readl(clk->reg);
+ unsigned long div;
int shift;
- if (parent_rate % rate)
- return -EINVAL;
+ /* clock only divides */
+ if (req->rate > req->parent_rate)
+ req->rate = req->parent_rate;
- shift = ffs(div) - 1;
- if (shift > SUN6I_AR100_SHIFT_MAX)
- shift = SUN6I_AR100_SHIFT_MAX;
+ div = DIV_ROUND_UP(req->parent_rate, req->rate);
- div >>= shift;
+ if (div < 32)
+ shift = 0;
+ else if (div >> 1 < 32)
+ shift = 1;
+ else if (div >> 2 < 32)
+ shift = 2;
+ else
+ shift = 3;
- if (div > SUN6I_AR100_DIV_MAX)
- return -EINVAL;
+ div >>= shift;
- val &= ~((SUN6I_AR100_SHIFT_MASK << SUN6I_AR100_SHIFT_SHIFT) |
- (SUN6I_AR100_DIV_MASK << SUN6I_AR100_DIV_SHIFT));
- val |= (shift << SUN6I_AR100_SHIFT_SHIFT) |
- (div << SUN6I_AR100_DIV_SHIFT);
- writel(val, clk->reg);
+ if (div > 32)
+ div = 32;
- return 0;
+ req->rate = (req->parent_rate >> shift) / div;
+ req->m = div - 1;
+ req->p = shift;
}
-static struct clk_ops ar100_ops = {
- .recalc_rate = ar100_recalc_rate,
- .determine_rate = ar100_determine_rate,
- .set_parent = ar100_set_parent,
- .get_parent = ar100_get_parent,
- .set_rate = ar100_set_rate,
+static const struct clk_factors_config sun6i_ar100_config = {
+ .mwidth = 5,
+ .mshift = 8,
+ .pwidth = 2,
+ .pshift = 4,
};
+static const struct factors_data sun6i_ar100_data = {
+ .mux = 16,
+ .muxmask = GENMASK(1, 0),
+ .table = &sun6i_ar100_config,
+ .getter = sun6i_get_ar100_factors,
+};
+
+static DEFINE_SPINLOCK(sun6i_ar100_lock);
+
static int sun6i_a31_ar100_clk_probe(struct platform_device *pdev)
{
- const char *parents[SUN6I_AR100_MAX_PARENTS];
struct device_node *np = pdev->dev.of_node;
- const char *clk_name = np->name;
- struct clk_init_data init;
- struct ar100_clk *ar100;
struct resource *r;
+ void __iomem *reg;
struct clk *clk;
- int nparents;
-
- ar100 = devm_kzalloc(&pdev->dev, sizeof(*ar100), GFP_KERNEL);
- if (!ar100)
- return -ENOMEM;
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- ar100->reg = devm_ioremap_resource(&pdev->dev, r);
- if (IS_ERR(ar100->reg))
- return PTR_ERR(ar100->reg);
+ reg = devm_ioremap_resource(&pdev->dev, r);
+ if (IS_ERR(reg))
+ return PTR_ERR(reg);
- nparents = of_clk_get_parent_count(np);
- if (nparents > SUN6I_AR100_MAX_PARENTS)
- nparents = SUN6I_AR100_MAX_PARENTS;
-
- of_clk_parent_fill(np, parents, nparents);
+ clk = sunxi_factors_register(np, &sun6i_ar100_data, &sun6i_ar100_lock,
+ reg);
+ if (!clk)
+ return -ENOMEM;
- of_property_read_string(np, "clock-output-names", &clk_name);
+ platform_set_drvdata(pdev, clk);
- init.name = clk_name;
- init.ops = &ar100_ops;
- init.parent_names = parents;
- init.num_parents = nparents;
- init.flags = 0;
+ return 0;
+}
- ar100->hw.init = &init;
+static int sun6i_a31_ar100_clk_remove(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct clk *clk = platform_get_drvdata(pdev);
- clk = clk_register(&pdev->dev, &ar100->hw);
- if (IS_ERR(clk))
- return PTR_ERR(clk);
+ sunxi_factors_unregister(np, clk);
- return of_clk_add_provider(np, of_clk_src_simple_get, clk);
+ return 0;
}
static const struct of_device_id sun6i_a31_ar100_clk_dt_ids[] = {
@@ -227,6 +113,7 @@ static struct platform_driver sun6i_a31_ar100_clk_driver = {
.of_match_table = sun6i_a31_ar100_clk_dt_ids,
},
.probe = sun6i_a31_ar100_clk_probe,
+ .remove = sun6i_a31_ar100_clk_remove,
};
module_platform_driver(sun6i_a31_ar100_clk_driver);
diff --git a/drivers/clk/sunxi/clk-sun8i-apb0.c b/drivers/clk/sunxi/clk-sun8i-apb0.c
index 7ba61103a6f5..2ea61debffc1 100644
--- a/drivers/clk/sunxi/clk-sun8i-apb0.c
+++ b/drivers/clk/sunxi/clk-sun8i-apb0.c
@@ -36,7 +36,7 @@ static struct clk *sun8i_a23_apb0_register(struct device_node *node,
/* The A23 APB0 clock is a standard 2 bit wide divider clock */
clk = clk_register_divider(NULL, clk_name, clk_parent, 0, reg,
- 0, 2, CLK_DIVIDER_POWER_OF_TWO, NULL);
+ 0, 2, 0, NULL);
if (IS_ERR(clk))
return clk;
diff --git a/drivers/clk/sunxi/clk-sun8i-bus-gates.c b/drivers/clk/sunxi/clk-sun8i-bus-gates.c
index e32d18ba252b..63fdb790df29 100644
--- a/drivers/clk/sunxi/clk-sun8i-bus-gates.c
+++ b/drivers/clk/sunxi/clk-sun8i-bus-gates.c
@@ -17,7 +17,6 @@
* GNU General Public License for more details.
*/
-#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/of.h>
#include <linux/of_address.h>
@@ -110,3 +109,5 @@ err_unmap:
CLK_OF_DECLARE(sun8i_h3_bus_gates, "allwinner,sun8i-h3-bus-gates-clk",
sun8i_h3_bus_gates_init);
+CLK_OF_DECLARE(sun8i_a83t_bus_gates, "allwinner,sun8i-a83t-bus-gates-clk",
+ sun8i_h3_bus_gates_init);
diff --git a/drivers/clk/sunxi/clk-sun8i-mbus.c b/drivers/clk/sunxi/clk-sun8i-mbus.c
index bf117a636d23..411d3033a96e 100644
--- a/drivers/clk/sunxi/clk-sun8i-mbus.c
+++ b/drivers/clk/sunxi/clk-sun8i-mbus.c
@@ -15,74 +15,106 @@
*/
#include <linux/clk.h>
+#include <linux/clkdev.h>
#include <linux/clk-provider.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
#include <linux/of_address.h>
-#include "clk-factors.h"
+#define SUN8I_MBUS_ENABLE 31
+#define SUN8I_MBUS_MUX_SHIFT 24
+#define SUN8I_MBUS_MUX_MASK 0x3
+#define SUN8I_MBUS_DIV_SHIFT 0
+#define SUN8I_MBUS_DIV_WIDTH 3
+#define SUN8I_MBUS_MAX_PARENTS 4
-/**
- * sun8i_a23_get_mbus_factors() - calculates m factor for MBUS clocks
- * MBUS rate is calculated as follows
- * rate = parent_rate / (m + 1);
- */
+static DEFINE_SPINLOCK(sun8i_a23_mbus_lock);
-static void sun8i_a23_get_mbus_factors(u32 *freq, u32 parent_rate,
- u8 *n, u8 *k, u8 *m, u8 *p)
+static void __init sun8i_a23_mbus_setup(struct device_node *node)
{
- u8 div;
+ int num_parents = of_clk_get_parent_count(node);
+ const char **parents;
+ const char *clk_name = node->name;
+ struct resource res;
+ struct clk_divider *div;
+ struct clk_gate *gate;
+ struct clk_mux *mux;
+ struct clk *clk;
+ void __iomem *reg;
+ int err;
- /*
- * These clocks can only divide, so we will never be able to
- * achieve frequencies higher than the parent frequency
- */
- if (*freq > parent_rate)
- *freq = parent_rate;
+ parents = kcalloc(num_parents, sizeof(*parents), GFP_KERNEL);
+ if (!parents)
+ return;
- div = DIV_ROUND_UP(parent_rate, *freq);
+ reg = of_io_request_and_map(node, 0, of_node_full_name(node));
+ if (!reg) {
+ pr_err("Could not get registers for sun8i-mbus-clk\n");
+ goto err_free_parents;
+ }
- if (div > 8)
- div = 8;
+ div = kzalloc(sizeof(*div), GFP_KERNEL);
+ if (!div)
+ goto err_unmap;
- *freq = parent_rate / div;
+ mux = kzalloc(sizeof(*mux), GFP_KERNEL);
+ if (!mux)
+ goto err_free_div;
- /* we were called to round the frequency, we can now return */
- if (m == NULL)
- return;
+ gate = kzalloc(sizeof(*gate), GFP_KERNEL);
+ if (!gate)
+ goto err_free_mux;
- *m = div - 1;
-}
+ of_property_read_string(node, "clock-output-names", &clk_name);
+ of_clk_parent_fill(node, parents, num_parents);
-static struct clk_factors_config sun8i_a23_mbus_config = {
- .mshift = 0,
- .mwidth = 3,
-};
+ gate->reg = reg;
+ gate->bit_idx = SUN8I_MBUS_ENABLE;
+ gate->lock = &sun8i_a23_mbus_lock;
-static const struct factors_data sun8i_a23_mbus_data __initconst = {
- .enable = 31,
- .mux = 24,
- .muxmask = BIT(1) | BIT(0),
- .table = &sun8i_a23_mbus_config,
- .getter = sun8i_a23_get_mbus_factors,
-};
+ div->reg = reg;
+ div->shift = SUN8I_MBUS_DIV_SHIFT;
+ div->width = SUN8I_MBUS_DIV_WIDTH;
+ div->lock = &sun8i_a23_mbus_lock;
-static DEFINE_SPINLOCK(sun8i_a23_mbus_lock);
-
-static void __init sun8i_a23_mbus_setup(struct device_node *node)
-{
- struct clk *mbus;
- void __iomem *reg;
+ mux->reg = reg;
+ mux->shift = SUN8I_MBUS_MUX_SHIFT;
+ mux->mask = SUN8I_MBUS_MUX_MASK;
+ mux->lock = &sun8i_a23_mbus_lock;
- reg = of_iomap(node, 0);
- if (!reg) {
- pr_err("Could not get registers for a23-mbus-clk\n");
- return;
- }
+ clk = clk_register_composite(NULL, clk_name, parents, num_parents,
+ &mux->hw, &clk_mux_ops,
+ &div->hw, &clk_divider_ops,
+ &gate->hw, &clk_gate_ops,
+ 0);
+ if (IS_ERR(clk))
+ goto err_free_gate;
- mbus = sunxi_factors_register(node, &sun8i_a23_mbus_data,
- &sun8i_a23_mbus_lock, reg);
+ err = of_clk_add_provider(node, of_clk_src_simple_get, clk);
+ if (err)
+ goto err_unregister_clk;
+ kfree(parents); /* parents is deep copied */
/* The MBUS clocks needs to be always enabled */
- __clk_get(mbus);
- clk_prepare_enable(mbus);
+ __clk_get(clk);
+ clk_prepare_enable(clk);
+
+ return;
+
+err_unregister_clk:
+ /* TODO: The composite clock stuff will leak a bit here. */
+ clk_unregister(clk);
+err_free_gate:
+ kfree(gate);
+err_free_mux:
+ kfree(mux);
+err_free_div:
+ kfree(div);
+err_unmap:
+ iounmap(reg);
+ of_address_to_resource(node, 0, &res);
+ release_mem_region(res.start, resource_size(&res));
+err_free_parents:
+ kfree(parents);
}
CLK_OF_DECLARE(sun8i_a23_mbus, "allwinner,sun8i-a23-mbus-clk", sun8i_a23_mbus_setup);
diff --git a/drivers/clk/sunxi/clk-sun9i-core.c b/drivers/clk/sunxi/clk-sun9i-core.c
index 6c4c98324d3c..43f014f85803 100644
--- a/drivers/clk/sunxi/clk-sun9i-core.c
+++ b/drivers/clk/sunxi/clk-sun9i-core.c
@@ -15,6 +15,7 @@
*/
#include <linux/clk.h>
+#include <linux/clkdev.h>
#include <linux/clk-provider.h>
#include <linux/of.h>
#include <linux/of_address.h>
@@ -32,15 +33,14 @@
* p and m are named div1 and div2 in Allwinner's SDK
*/
-static void sun9i_a80_get_pll4_factors(u32 *freq, u32 parent_rate,
- u8 *n_ret, u8 *k, u8 *m_ret, u8 *p_ret)
+static void sun9i_a80_get_pll4_factors(struct factors_request *req)
{
int n;
int m = 1;
int p = 1;
/* Normalize value to a 6 MHz multiple (24 MHz / 4) */
- n = DIV_ROUND_UP(*freq, 6000000);
+ n = DIV_ROUND_UP(req->rate, 6000000);
/* If n is too large switch to steps of 12 MHz */
if (n > 255) {
@@ -60,18 +60,13 @@ static void sun9i_a80_get_pll4_factors(u32 *freq, u32 parent_rate,
else if (n < 12)
n = 12;
- *freq = ((24000000 * n) >> p) / (m + 1);
-
- /* we were called to round the frequency, we can now return */
- if (n_ret == NULL)
- return;
-
- *n_ret = n;
- *m_ret = m;
- *p_ret = p;
+ req->rate = ((24000000 * n) >> p) / (m + 1);
+ req->n = n;
+ req->m = m;
+ req->p = p;
}
-static struct clk_factors_config sun9i_a80_pll4_config = {
+static const struct clk_factors_config sun9i_a80_pll4_config = {
.mshift = 18,
.mwidth = 1,
.nshift = 8,
@@ -111,30 +106,24 @@ CLK_OF_DECLARE(sun9i_a80_pll4, "allwinner,sun9i-a80-pll4-clk", sun9i_a80_pll4_se
* rate = parent_rate / (m + 1);
*/
-static void sun9i_a80_get_gt_factors(u32 *freq, u32 parent_rate,
- u8 *n, u8 *k, u8 *m, u8 *p)
+static void sun9i_a80_get_gt_factors(struct factors_request *req)
{
u32 div;
- if (parent_rate < *freq)
- *freq = parent_rate;
+ if (req->parent_rate < req->rate)
+ req->rate = req->parent_rate;
- div = DIV_ROUND_UP(parent_rate, *freq);
+ div = DIV_ROUND_UP(req->parent_rate, req->rate);
/* maximum divider is 4 */
if (div > 4)
div = 4;
- *freq = parent_rate / div;
-
- /* we were called to round the frequency, we can now return */
- if (!m)
- return;
-
- *m = div;
+ req->rate = req->parent_rate / div;
+ req->m = div;
}
-static struct clk_factors_config sun9i_a80_gt_config = {
+static const struct clk_factors_config sun9i_a80_gt_config = {
.mshift = 0,
.mwidth = 2,
};
@@ -176,30 +165,24 @@ CLK_OF_DECLARE(sun9i_a80_gt, "allwinner,sun9i-a80-gt-clk", sun9i_a80_gt_setup);
* rate = parent_rate >> p;
*/
-static void sun9i_a80_get_ahb_factors(u32 *freq, u32 parent_rate,
- u8 *n, u8 *k, u8 *m, u8 *p)
+static void sun9i_a80_get_ahb_factors(struct factors_request *req)
{
u32 _p;
- if (parent_rate < *freq)
- *freq = parent_rate;
+ if (req->parent_rate < req->rate)
+ req->rate = req->parent_rate;
- _p = order_base_2(DIV_ROUND_UP(parent_rate, *freq));
+ _p = order_base_2(DIV_ROUND_UP(req->parent_rate, req->rate));
/* maximum p is 3 */
if (_p > 3)
_p = 3;
- *freq = parent_rate >> _p;
-
- /* we were called to round the frequency, we can now return */
- if (!p)
- return;
-
- *p = _p;
+ req->rate = req->parent_rate >> _p;
+ req->p = _p;
}
-static struct clk_factors_config sun9i_a80_ahb_config = {
+static const struct clk_factors_config sun9i_a80_ahb_config = {
.pshift = 0,
.pwidth = 2,
};
@@ -262,34 +245,25 @@ CLK_OF_DECLARE(sun9i_a80_apb0, "allwinner,sun9i-a80-apb0-clk", sun9i_a80_apb0_se
* rate = (parent_rate >> p) / (m + 1);
*/
-static void sun9i_a80_get_apb1_factors(u32 *freq, u32 parent_rate,
- u8 *n, u8 *k, u8 *m, u8 *p)
+static void sun9i_a80_get_apb1_factors(struct factors_request *req)
{
u32 div;
- u8 calcm, calcp;
- if (parent_rate < *freq)
- *freq = parent_rate;
+ if (req->parent_rate < req->rate)
+ req->rate = req->parent_rate;
- div = DIV_ROUND_UP(parent_rate, *freq);
+ div = DIV_ROUND_UP(req->parent_rate, req->rate);
/* Highest possible divider is 256 (p = 3, m = 31) */
if (div > 256)
div = 256;
- calcp = order_base_2(div);
- calcm = (parent_rate >> calcp) - 1;
- *freq = (parent_rate >> calcp) / (calcm + 1);
-
- /* we were called to round the frequency, we can now return */
- if (n == NULL)
- return;
-
- *m = calcm;
- *p = calcp;
+ req->p = order_base_2(div);
+ req->m = (req->parent_rate >> req->p) - 1;
+ req->rate = (req->parent_rate >> req->p) / (req->m + 1);
}
-static struct clk_factors_config sun9i_a80_apb1_config = {
+static const struct clk_factors_config sun9i_a80_apb1_config = {
.mshift = 0,
.mwidth = 5,
.pshift = 16,
diff --git a/drivers/clk/sunxi/clk-sun9i-mmc.c b/drivers/clk/sunxi/clk-sun9i-mmc.c
index a9b176139aca..028dd832a39f 100644
--- a/drivers/clk/sunxi/clk-sun9i-mmc.c
+++ b/drivers/clk/sunxi/clk-sun9i-mmc.c
@@ -83,7 +83,7 @@ static int sun9i_mmc_reset_deassert(struct reset_controller_dev *rcdev,
return 0;
}
-static struct reset_control_ops sun9i_mmc_reset_ops = {
+static const struct reset_control_ops sun9i_mmc_reset_ops = {
.assert = sun9i_mmc_reset_assert,
.deassert = sun9i_mmc_reset_deassert,
};
diff --git a/drivers/clk/sunxi/clk-sunxi.c b/drivers/clk/sunxi/clk-sunxi.c
index 5ba2188ee99c..91de0a006773 100644
--- a/drivers/clk/sunxi/clk-sunxi.c
+++ b/drivers/clk/sunxi/clk-sunxi.c
@@ -28,214 +28,6 @@
static DEFINE_SPINLOCK(clk_lock);
-/**
- * sun6i_a31_ahb1_clk_setup() - Setup function for a31 ahb1 composite clk
- */
-
-#define SUN6I_AHB1_MAX_PARENTS 4
-#define SUN6I_AHB1_MUX_PARENT_PLL6 3
-#define SUN6I_AHB1_MUX_SHIFT 12
-/* un-shifted mask is what mux_clk expects */
-#define SUN6I_AHB1_MUX_MASK 0x3
-#define SUN6I_AHB1_MUX_GET_PARENT(reg) ((reg >> SUN6I_AHB1_MUX_SHIFT) & \
- SUN6I_AHB1_MUX_MASK)
-
-#define SUN6I_AHB1_DIV_SHIFT 4
-#define SUN6I_AHB1_DIV_MASK (0x3 << SUN6I_AHB1_DIV_SHIFT)
-#define SUN6I_AHB1_DIV_GET(reg) ((reg & SUN6I_AHB1_DIV_MASK) >> \
- SUN6I_AHB1_DIV_SHIFT)
-#define SUN6I_AHB1_DIV_SET(reg, div) ((reg & ~SUN6I_AHB1_DIV_MASK) | \
- (div << SUN6I_AHB1_DIV_SHIFT))
-#define SUN6I_AHB1_PLL6_DIV_SHIFT 6
-#define SUN6I_AHB1_PLL6_DIV_MASK (0x3 << SUN6I_AHB1_PLL6_DIV_SHIFT)
-#define SUN6I_AHB1_PLL6_DIV_GET(reg) ((reg & SUN6I_AHB1_PLL6_DIV_MASK) >> \
- SUN6I_AHB1_PLL6_DIV_SHIFT)
-#define SUN6I_AHB1_PLL6_DIV_SET(reg, div) ((reg & ~SUN6I_AHB1_PLL6_DIV_MASK) | \
- (div << SUN6I_AHB1_PLL6_DIV_SHIFT))
-
-struct sun6i_ahb1_clk {
- struct clk_hw hw;
- void __iomem *reg;
-};
-
-#define to_sun6i_ahb1_clk(_hw) container_of(_hw, struct sun6i_ahb1_clk, hw)
-
-static unsigned long sun6i_ahb1_clk_recalc_rate(struct clk_hw *hw,
- unsigned long parent_rate)
-{
- struct sun6i_ahb1_clk *ahb1 = to_sun6i_ahb1_clk(hw);
- unsigned long rate;
- u32 reg;
-
- /* Fetch the register value */
- reg = readl(ahb1->reg);
-
- /* apply pre-divider first if parent is pll6 */
- if (SUN6I_AHB1_MUX_GET_PARENT(reg) == SUN6I_AHB1_MUX_PARENT_PLL6)
- parent_rate /= SUN6I_AHB1_PLL6_DIV_GET(reg) + 1;
-
- /* clk divider */
- rate = parent_rate >> SUN6I_AHB1_DIV_GET(reg);
-
- return rate;
-}
-
-static long sun6i_ahb1_clk_round(unsigned long rate, u8 *divp, u8 *pre_divp,
- u8 parent, unsigned long parent_rate)
-{
- u8 div, calcp, calcm = 1;
-
- /*
- * clock can only divide, so we will never be able to achieve
- * frequencies higher than the parent frequency
- */
- if (parent_rate && rate > parent_rate)
- rate = parent_rate;
-
- div = DIV_ROUND_UP(parent_rate, rate);
-
- /* calculate pre-divider if parent is pll6 */
- if (parent == SUN6I_AHB1_MUX_PARENT_PLL6) {
- if (div < 4)
- calcp = 0;
- else if (div / 2 < 4)
- calcp = 1;
- else if (div / 4 < 4)
- calcp = 2;
- else
- calcp = 3;
-
- calcm = DIV_ROUND_UP(div, 1 << calcp);
- } else {
- calcp = __roundup_pow_of_two(div);
- calcp = calcp > 3 ? 3 : calcp;
- }
-
- /* we were asked to pass back divider values */
- if (divp) {
- *divp = calcp;
- *pre_divp = calcm - 1;
- }
-
- return (parent_rate / calcm) >> calcp;
-}
-
-static int sun6i_ahb1_clk_determine_rate(struct clk_hw *hw,
- struct clk_rate_request *req)
-{
- struct clk_hw *parent, *best_parent = NULL;
- int i, num_parents;
- unsigned long parent_rate, best = 0, child_rate, best_child_rate = 0;
-
- /* find the parent that can help provide the fastest rate <= rate */
- num_parents = clk_hw_get_num_parents(hw);
- for (i = 0; i < num_parents; i++) {
- parent = clk_hw_get_parent_by_index(hw, i);
- if (!parent)
- continue;
- if (clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT)
- parent_rate = clk_hw_round_rate(parent, req->rate);
- else
- parent_rate = clk_hw_get_rate(parent);
-
- child_rate = sun6i_ahb1_clk_round(req->rate, NULL, NULL, i,
- parent_rate);
-
- if (child_rate <= req->rate && child_rate > best_child_rate) {
- best_parent = parent;
- best = parent_rate;
- best_child_rate = child_rate;
- }
- }
-
- if (!best_parent)
- return -EINVAL;
-
- req->best_parent_hw = best_parent;
- req->best_parent_rate = best;
- req->rate = best_child_rate;
-
- return 0;
-}
-
-static int sun6i_ahb1_clk_set_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long parent_rate)
-{
- struct sun6i_ahb1_clk *ahb1 = to_sun6i_ahb1_clk(hw);
- unsigned long flags;
- u8 div, pre_div, parent;
- u32 reg;
-
- spin_lock_irqsave(&clk_lock, flags);
-
- reg = readl(ahb1->reg);
-
- /* need to know which parent is used to apply pre-divider */
- parent = SUN6I_AHB1_MUX_GET_PARENT(reg);
- sun6i_ahb1_clk_round(rate, &div, &pre_div, parent, parent_rate);
-
- reg = SUN6I_AHB1_DIV_SET(reg, div);
- reg = SUN6I_AHB1_PLL6_DIV_SET(reg, pre_div);
- writel(reg, ahb1->reg);
-
- spin_unlock_irqrestore(&clk_lock, flags);
-
- return 0;
-}
-
-static const struct clk_ops sun6i_ahb1_clk_ops = {
- .determine_rate = sun6i_ahb1_clk_determine_rate,
- .recalc_rate = sun6i_ahb1_clk_recalc_rate,
- .set_rate = sun6i_ahb1_clk_set_rate,
-};
-
-static void __init sun6i_ahb1_clk_setup(struct device_node *node)
-{
- struct clk *clk;
- struct sun6i_ahb1_clk *ahb1;
- struct clk_mux *mux;
- const char *clk_name = node->name;
- const char *parents[SUN6I_AHB1_MAX_PARENTS];
- void __iomem *reg;
- int i;
-
- reg = of_io_request_and_map(node, 0, of_node_full_name(node));
- if (IS_ERR(reg))
- return;
-
- /* we have a mux, we will have >1 parents */
- i = of_clk_parent_fill(node, parents, SUN6I_AHB1_MAX_PARENTS);
- of_property_read_string(node, "clock-output-names", &clk_name);
-
- ahb1 = kzalloc(sizeof(struct sun6i_ahb1_clk), GFP_KERNEL);
- if (!ahb1)
- return;
-
- mux = kzalloc(sizeof(struct clk_mux), GFP_KERNEL);
- if (!mux) {
- kfree(ahb1);
- return;
- }
-
- /* set up clock properties */
- mux->reg = reg;
- mux->shift = SUN6I_AHB1_MUX_SHIFT;
- mux->mask = SUN6I_AHB1_MUX_MASK;
- mux->lock = &clk_lock;
- ahb1->reg = reg;
-
- clk = clk_register_composite(NULL, clk_name, parents, i,
- &mux->hw, &clk_mux_ops,
- &ahb1->hw, &sun6i_ahb1_clk_ops,
- NULL, NULL, 0);
-
- if (!IS_ERR(clk)) {
- of_clk_add_provider(node, of_clk_src_simple_get, clk);
- clk_register_clkdev(clk, clk_name, NULL);
- }
-}
-CLK_OF_DECLARE(sun6i_a31_ahb1, "allwinner,sun6i-a31-ahb1-clk", sun6i_ahb1_clk_setup);
-
/* Maximum number of parents our clocks have */
#define SUNXI_MAX_PARENTS 5
@@ -246,49 +38,45 @@ CLK_OF_DECLARE(sun6i_a31_ahb1, "allwinner,sun6i-a31-ahb1-clk", sun6i_ahb1_clk_se
* parent_rate is always 24Mhz
*/
-static void sun4i_get_pll1_factors(u32 *freq, u32 parent_rate,
- u8 *n, u8 *k, u8 *m, u8 *p)
+static void sun4i_get_pll1_factors(struct factors_request *req)
{
u8 div;
/* Normalize value to a 6M multiple */
- div = *freq / 6000000;
- *freq = 6000000 * div;
-
- /* we were called to round the frequency, we can now return */
- if (n == NULL)
- return;
+ div = req->rate / 6000000;
+ req->rate = 6000000 * div;
/* m is always zero for pll1 */
- *m = 0;
+ req->m = 0;
/* k is 1 only on these cases */
- if (*freq >= 768000000 || *freq == 42000000 || *freq == 54000000)
- *k = 1;
+ if (req->rate >= 768000000 || req->rate == 42000000 ||
+ req->rate == 54000000)
+ req->k = 1;
else
- *k = 0;
+ req->k = 0;
/* p will be 3 for divs under 10 */
if (div < 10)
- *p = 3;
+ req->p = 3;
/* p will be 2 for divs between 10 - 20 and odd divs under 32 */
else if (div < 20 || (div < 32 && (div & 1)))
- *p = 2;
+ req->p = 2;
/* p will be 1 for even divs under 32, divs under 40 and odd pairs
* of divs between 40-62 */
else if (div < 40 || (div < 64 && (div & 2)))
- *p = 1;
+ req->p = 1;
/* any other entries have p = 0 */
else
- *p = 0;
+ req->p = 0;
/* calculate a suitable n based on k and p */
- div <<= *p;
- div /= (*k + 1);
- *n = div / 4;
+ div <<= req->p;
+ div /= (req->k + 1);
+ req->n = div / 4;
}
/**
@@ -297,15 +85,14 @@ static void sun4i_get_pll1_factors(u32 *freq, u32 parent_rate,
* rate = parent_rate * (n + 1) * (k + 1) / (m + 1);
* parent_rate should always be 24MHz
*/
-static void sun6i_a31_get_pll1_factors(u32 *freq, u32 parent_rate,
- u8 *n, u8 *k, u8 *m, u8 *p)
+static void sun6i_a31_get_pll1_factors(struct factors_request *req)
{
/*
* We can operate only on MHz, this will make our life easier
* later.
*/
- u32 freq_mhz = *freq / 1000000;
- u32 parent_freq_mhz = parent_rate / 1000000;
+ u32 freq_mhz = req->rate / 1000000;
+ u32 parent_freq_mhz = req->parent_rate / 1000000;
/*
* Round down the frequency to the closest multiple of either
@@ -319,28 +106,20 @@ static void sun6i_a31_get_pll1_factors(u32 *freq, u32 parent_rate,
else
freq_mhz = round_freq_16;
- *freq = freq_mhz * 1000000;
-
- /*
- * If the factors pointer are null, we were just called to
- * round down the frequency.
- * Exit.
- */
- if (n == NULL)
- return;
+ req->rate = freq_mhz * 1000000;
/* If the frequency is a multiple of 32 MHz, k is always 3 */
if (!(freq_mhz % 32))
- *k = 3;
+ req->k = 3;
/* If the frequency is a multiple of 9 MHz, k is always 2 */
else if (!(freq_mhz % 9))
- *k = 2;
+ req->k = 2;
/* If the frequency is a multiple of 8 MHz, k is always 1 */
else if (!(freq_mhz % 8))
- *k = 1;
+ req->k = 1;
/* Otherwise, we don't use the k factor */
else
- *k = 0;
+ req->k = 0;
/*
* If the frequency is a multiple of 2 but not a multiple of
@@ -351,27 +130,28 @@ static void sun6i_a31_get_pll1_factors(u32 *freq, u32 parent_rate,
* somehow relates to this frequency.
*/
if ((freq_mhz % 6) == 2 || (freq_mhz % 6) == 4)
- *m = 2;
+ req->m = 2;
/*
* If the frequency is a multiple of 6MHz, but the factor is
* odd, m will be 3
*/
else if ((freq_mhz / 6) & 1)
- *m = 3;
+ req->m = 3;
/* Otherwise, we end up with m = 1 */
else
- *m = 1;
+ req->m = 1;
/* Calculate n thanks to the above factors we already got */
- *n = freq_mhz * (*m + 1) / ((*k + 1) * parent_freq_mhz) - 1;
+ req->n = freq_mhz * (req->m + 1) / ((req->k + 1) * parent_freq_mhz)
+ - 1;
/*
* If n end up being outbound, and that we can still decrease
* m, do it.
*/
- if ((*n + 1) > 31 && (*m + 1) > 1) {
- *n = (*n + 1) / 2 - 1;
- *m = (*m + 1) / 2 - 1;
+ if ((req->n + 1) > 31 && (req->m + 1) > 1) {
+ req->n = (req->n + 1) / 2 - 1;
+ req->m = (req->m + 1) / 2 - 1;
}
}
@@ -382,45 +162,41 @@ static void sun6i_a31_get_pll1_factors(u32 *freq, u32 parent_rate,
* parent_rate is always 24Mhz
*/
-static void sun8i_a23_get_pll1_factors(u32 *freq, u32 parent_rate,
- u8 *n, u8 *k, u8 *m, u8 *p)
+static void sun8i_a23_get_pll1_factors(struct factors_request *req)
{
u8 div;
/* Normalize value to a 6M multiple */
- div = *freq / 6000000;
- *freq = 6000000 * div;
-
- /* we were called to round the frequency, we can now return */
- if (n == NULL)
- return;
+ div = req->rate / 6000000;
+ req->rate = 6000000 * div;
/* m is always zero for pll1 */
- *m = 0;
+ req->m = 0;
/* k is 1 only on these cases */
- if (*freq >= 768000000 || *freq == 42000000 || *freq == 54000000)
- *k = 1;
+ if (req->rate >= 768000000 || req->rate == 42000000 ||
+ req->rate == 54000000)
+ req->k = 1;
else
- *k = 0;
+ req->k = 0;
/* p will be 2 for divs under 20 and odd divs under 32 */
if (div < 20 || (div < 32 && (div & 1)))
- *p = 2;
+ req->p = 2;
/* p will be 1 for even divs under 32, divs under 40 and odd pairs
* of divs between 40-62 */
else if (div < 40 || (div < 64 && (div & 2)))
- *p = 1;
+ req->p = 1;
/* any other entries have p = 0 */
else
- *p = 0;
+ req->p = 0;
/* calculate a suitable n based on k and p */
- div <<= *p;
- div /= (*k + 1);
- *n = div / 4 - 1;
+ div <<= req->p;
+ div /= (req->k + 1);
+ req->n = div / 4 - 1;
}
/**
@@ -430,29 +206,24 @@ static void sun8i_a23_get_pll1_factors(u32 *freq, u32 parent_rate,
* parent_rate is always 24Mhz
*/
-static void sun4i_get_pll5_factors(u32 *freq, u32 parent_rate,
- u8 *n, u8 *k, u8 *m, u8 *p)
+static void sun4i_get_pll5_factors(struct factors_request *req)
{
u8 div;
/* Normalize value to a parent_rate multiple (24M) */
- div = *freq / parent_rate;
- *freq = parent_rate * div;
-
- /* we were called to round the frequency, we can now return */
- if (n == NULL)
- return;
+ div = req->rate / req->parent_rate;
+ req->rate = req->parent_rate * div;
if (div < 31)
- *k = 0;
+ req->k = 0;
else if (div / 2 < 31)
- *k = 1;
+ req->k = 1;
else if (div / 3 < 31)
- *k = 2;
+ req->k = 2;
else
- *k = 3;
+ req->k = 3;
- *n = DIV_ROUND_UP(div, (*k+1));
+ req->n = DIV_ROUND_UP(div, (req->k + 1));
}
/**
@@ -462,24 +233,19 @@ static void sun4i_get_pll5_factors(u32 *freq, u32 parent_rate,
* parent_rate is always 24Mhz
*/
-static void sun6i_a31_get_pll6_factors(u32 *freq, u32 parent_rate,
- u8 *n, u8 *k, u8 *m, u8 *p)
+static void sun6i_a31_get_pll6_factors(struct factors_request *req)
{
u8 div;
/* Normalize value to a parent_rate multiple (24M) */
- div = *freq / parent_rate;
- *freq = parent_rate * div;
-
- /* we were called to round the frequency, we can now return */
- if (n == NULL)
- return;
+ div = req->rate / req->parent_rate;
+ req->rate = req->parent_rate * div;
- *k = div / 32;
- if (*k > 3)
- *k = 3;
+ req->k = div / 32;
+ if (req->k > 3)
+ req->k = 3;
- *n = DIV_ROUND_UP(div, (*k+1)) - 1;
+ req->n = DIV_ROUND_UP(div, (req->k + 1)) - 1;
}
/**
@@ -488,37 +254,94 @@ static void sun6i_a31_get_pll6_factors(u32 *freq, u32 parent_rate,
* rate = parent_rate >> p
*/
-static void sun5i_a13_get_ahb_factors(u32 *freq, u32 parent_rate,
- u8 *n, u8 *k, u8 *m, u8 *p)
+static void sun5i_a13_get_ahb_factors(struct factors_request *req)
{
u32 div;
/* divide only */
- if (parent_rate < *freq)
- *freq = parent_rate;
+ if (req->parent_rate < req->rate)
+ req->rate = req->parent_rate;
/*
* user manual says valid speed is 8k ~ 276M, but tests show it
* can work at speeds up to 300M, just after reparenting to pll6
*/
- if (*freq < 8000)
- *freq = 8000;
- if (*freq > 300000000)
- *freq = 300000000;
+ if (req->rate < 8000)
+ req->rate = 8000;
+ if (req->rate > 300000000)
+ req->rate = 300000000;
- div = order_base_2(DIV_ROUND_UP(parent_rate, *freq));
+ div = order_base_2(DIV_ROUND_UP(req->parent_rate, req->rate));
/* p = 0 ~ 3 */
if (div > 3)
div = 3;
- *freq = parent_rate >> div;
+ req->rate = req->parent_rate >> div;
- /* we were called to round the frequency, we can now return */
- if (p == NULL)
- return;
+ req->p = div;
+}
+
+#define SUN6I_AHB1_PARENT_PLL6 3
+
+/**
+ * sun6i_a31_get_ahb_factors() - calculates m, p factors for AHB
+ * AHB rate is calculated as follows
+ * rate = parent_rate >> p
+ *
+ * if parent is pll6, then
+ * parent_rate = pll6 rate / (m + 1)
+ */
+
+static void sun6i_get_ahb1_factors(struct factors_request *req)
+{
+ u8 div, calcp, calcm = 1;
+
+ /*
+ * clock can only divide, so we will never be able to achieve
+ * frequencies higher than the parent frequency
+ */
+ if (req->parent_rate && req->rate > req->parent_rate)
+ req->rate = req->parent_rate;
+
+ div = DIV_ROUND_UP(req->parent_rate, req->rate);
+
+ /* calculate pre-divider if parent is pll6 */
+ if (req->parent_index == SUN6I_AHB1_PARENT_PLL6) {
+ if (div < 4)
+ calcp = 0;
+ else if (div / 2 < 4)
+ calcp = 1;
+ else if (div / 4 < 4)
+ calcp = 2;
+ else
+ calcp = 3;
+
+ calcm = DIV_ROUND_UP(div, 1 << calcp);
+ } else {
+ calcp = __roundup_pow_of_two(div);
+ calcp = calcp > 3 ? 3 : calcp;
+ }
- *p = div;
+ req->rate = (req->parent_rate / calcm) >> calcp;
+ req->p = calcp;
+ req->m = calcm - 1;
+}
+
+/**
+ * sun6i_ahb1_recalc() - calculates AHB clock rate from m, p factors and
+ * parent index
+ */
+static void sun6i_ahb1_recalc(struct factors_request *req)
+{
+ req->rate = req->parent_rate;
+
+ /* apply pre-divider first if parent is pll6 */
+ if (req->parent_index == SUN6I_AHB1_PARENT_PLL6)
+ req->rate /= req->m + 1;
+
+ /* clk divider */
+ req->rate >>= req->p;
}
/**
@@ -527,39 +350,34 @@ static void sun5i_a13_get_ahb_factors(u32 *freq, u32 parent_rate,
* rate = (parent_rate >> p) / (m + 1);
*/
-static void sun4i_get_apb1_factors(u32 *freq, u32 parent_rate,
- u8 *n, u8 *k, u8 *m, u8 *p)
+static void sun4i_get_apb1_factors(struct factors_request *req)
{
u8 calcm, calcp;
+ int div;
- if (parent_rate < *freq)
- *freq = parent_rate;
+ if (req->parent_rate < req->rate)
+ req->rate = req->parent_rate;
- parent_rate = DIV_ROUND_UP(parent_rate, *freq);
+ div = DIV_ROUND_UP(req->parent_rate, req->rate);
/* Invalid rate! */
- if (parent_rate > 32)
+ if (div > 32)
return;
- if (parent_rate <= 4)
+ if (div <= 4)
calcp = 0;
- else if (parent_rate <= 8)
+ else if (div <= 8)
calcp = 1;
- else if (parent_rate <= 16)
+ else if (div <= 16)
calcp = 2;
else
calcp = 3;
- calcm = (parent_rate >> calcp) - 1;
+ calcm = (req->parent_rate >> calcp) - 1;
- *freq = (parent_rate >> calcp) / (calcm + 1);
-
- /* we were called to round the frequency, we can now return */
- if (n == NULL)
- return;
-
- *m = calcm;
- *p = calcp;
+ req->rate = (req->parent_rate >> calcp) / (calcm + 1);
+ req->m = calcm;
+ req->p = calcp;
}
@@ -571,17 +389,16 @@ static void sun4i_get_apb1_factors(u32 *freq, u32 parent_rate,
* rate = (parent_rate >> p) / (m + 1);
*/
-static void sun7i_a20_get_out_factors(u32 *freq, u32 parent_rate,
- u8 *n, u8 *k, u8 *m, u8 *p)
+static void sun7i_a20_get_out_factors(struct factors_request *req)
{
u8 div, calcm, calcp;
/* These clocks can only divide, so we will never be able to achieve
* frequencies higher than the parent frequency */
- if (*freq > parent_rate)
- *freq = parent_rate;
+ if (req->rate > req->parent_rate)
+ req->rate = req->parent_rate;
- div = DIV_ROUND_UP(parent_rate, *freq);
+ div = DIV_ROUND_UP(req->parent_rate, req->rate);
if (div < 32)
calcp = 0;
@@ -594,21 +411,16 @@ static void sun7i_a20_get_out_factors(u32 *freq, u32 parent_rate,
calcm = DIV_ROUND_UP(div, 1 << calcp);
- *freq = (parent_rate >> calcp) / calcm;
-
- /* we were called to round the frequency, we can now return */
- if (n == NULL)
- return;
-
- *m = calcm - 1;
- *p = calcp;
+ req->rate = (req->parent_rate >> calcp) / calcm;
+ req->m = calcm - 1;
+ req->p = calcp;
}
/**
* sunxi_factors_clk_setup() - Setup function for factor clocks
*/
-static struct clk_factors_config sun4i_pll1_config = {
+static const struct clk_factors_config sun4i_pll1_config = {
.nshift = 8,
.nwidth = 5,
.kshift = 4,
@@ -619,7 +431,7 @@ static struct clk_factors_config sun4i_pll1_config = {
.pwidth = 2,
};
-static struct clk_factors_config sun6i_a31_pll1_config = {
+static const struct clk_factors_config sun6i_a31_pll1_config = {
.nshift = 8,
.nwidth = 5,
.kshift = 4,
@@ -629,7 +441,7 @@ static struct clk_factors_config sun6i_a31_pll1_config = {
.n_start = 1,
};
-static struct clk_factors_config sun8i_a23_pll1_config = {
+static const struct clk_factors_config sun8i_a23_pll1_config = {
.nshift = 8,
.nwidth = 5,
.kshift = 4,
@@ -641,14 +453,14 @@ static struct clk_factors_config sun8i_a23_pll1_config = {
.n_start = 1,
};
-static struct clk_factors_config sun4i_pll5_config = {
+static const struct clk_factors_config sun4i_pll5_config = {
.nshift = 8,
.nwidth = 5,
.kshift = 4,
.kwidth = 2,
};
-static struct clk_factors_config sun6i_a31_pll6_config = {
+static const struct clk_factors_config sun6i_a31_pll6_config = {
.nshift = 8,
.nwidth = 5,
.kshift = 4,
@@ -656,12 +468,19 @@ static struct clk_factors_config sun6i_a31_pll6_config = {
.n_start = 1,
};
-static struct clk_factors_config sun5i_a13_ahb_config = {
+static const struct clk_factors_config sun5i_a13_ahb_config = {
.pshift = 4,
.pwidth = 2,
};
-static struct clk_factors_config sun4i_apb1_config = {
+static const struct clk_factors_config sun6i_ahb1_config = {
+ .mshift = 6,
+ .mwidth = 2,
+ .pshift = 4,
+ .pwidth = 2,
+};
+
+static const struct clk_factors_config sun4i_apb1_config = {
.mshift = 0,
.mwidth = 5,
.pshift = 16,
@@ -669,7 +488,7 @@ static struct clk_factors_config sun4i_apb1_config = {
};
/* user manual says "n" but it's really "p" */
-static struct clk_factors_config sun7i_a20_out_config = {
+static const struct clk_factors_config sun7i_a20_out_config = {
.mshift = 8,
.mwidth = 5,
.pshift = 20,
@@ -728,6 +547,14 @@ static const struct factors_data sun5i_a13_ahb_data __initconst = {
.getter = sun5i_a13_get_ahb_factors,
};
+static const struct factors_data sun6i_ahb1_data __initconst = {
+ .mux = 12,
+ .muxmask = BIT(1) | BIT(0),
+ .table = &sun6i_ahb1_config,
+ .getter = sun6i_get_ahb1_factors,
+ .recalc = sun6i_ahb1_recalc,
+};
+
static const struct factors_data sun4i_apb1_data __initconst = {
.mux = 24,
.muxmask = BIT(1) | BIT(0),
@@ -758,6 +585,61 @@ static struct clk * __init sunxi_factors_clk_setup(struct device_node *node,
return sunxi_factors_register(node, data, &clk_lock, reg);
}
+static void __init sun4i_pll1_clk_setup(struct device_node *node)
+{
+ sunxi_factors_clk_setup(node, &sun4i_pll1_data);
+}
+CLK_OF_DECLARE(sun4i_pll1, "allwinner,sun4i-a10-pll1-clk",
+ sun4i_pll1_clk_setup);
+
+static void __init sun6i_pll1_clk_setup(struct device_node *node)
+{
+ sunxi_factors_clk_setup(node, &sun6i_a31_pll1_data);
+}
+CLK_OF_DECLARE(sun6i_pll1, "allwinner,sun6i-a31-pll1-clk",
+ sun6i_pll1_clk_setup);
+
+static void __init sun8i_pll1_clk_setup(struct device_node *node)
+{
+ sunxi_factors_clk_setup(node, &sun8i_a23_pll1_data);
+}
+CLK_OF_DECLARE(sun8i_pll1, "allwinner,sun8i-a23-pll1-clk",
+ sun8i_pll1_clk_setup);
+
+static void __init sun7i_pll4_clk_setup(struct device_node *node)
+{
+ sunxi_factors_clk_setup(node, &sun7i_a20_pll4_data);
+}
+CLK_OF_DECLARE(sun7i_pll4, "allwinner,sun7i-a20-pll4-clk",
+ sun7i_pll4_clk_setup);
+
+static void __init sun5i_ahb_clk_setup(struct device_node *node)
+{
+ sunxi_factors_clk_setup(node, &sun5i_a13_ahb_data);
+}
+CLK_OF_DECLARE(sun5i_ahb, "allwinner,sun5i-a13-ahb-clk",
+ sun5i_ahb_clk_setup);
+
+static void __init sun6i_ahb1_clk_setup(struct device_node *node)
+{
+ sunxi_factors_clk_setup(node, &sun6i_ahb1_data);
+}
+CLK_OF_DECLARE(sun6i_a31_ahb1, "allwinner,sun6i-a31-ahb1-clk",
+ sun6i_ahb1_clk_setup);
+
+static void __init sun4i_apb1_clk_setup(struct device_node *node)
+{
+ sunxi_factors_clk_setup(node, &sun4i_apb1_data);
+}
+CLK_OF_DECLARE(sun4i_apb1, "allwinner,sun4i-a10-apb1-clk",
+ sun4i_apb1_clk_setup);
+
+static void __init sun7i_out_clk_setup(struct device_node *node)
+{
+ sunxi_factors_clk_setup(node, &sun7i_a20_out_data);
+}
+CLK_OF_DECLARE(sun7i_out, "allwinner,sun7i-a20-out-clk",
+ sun7i_out_clk_setup);
/**
@@ -782,8 +664,8 @@ static const struct mux_data sun8i_h3_ahb2_mux_data __initconst = {
.shift = 0,
};
-static void __init sunxi_mux_clk_setup(struct device_node *node,
- struct mux_data *data)
+static struct clk * __init sunxi_mux_clk_setup(struct device_node *node,
+ const struct mux_data *data)
{
struct clk *clk;
const char *clk_name = node->name;
@@ -792,21 +674,71 @@ static void __init sunxi_mux_clk_setup(struct device_node *node,
int i;
reg = of_iomap(node, 0);
+ if (!reg) {
+ pr_err("Could not map registers for mux-clk: %s\n",
+ of_node_full_name(node));
+ return NULL;
+ }
i = of_clk_parent_fill(node, parents, SUNXI_MAX_PARENTS);
- of_property_read_string(node, "clock-output-names", &clk_name);
+ if (of_property_read_string(node, "clock-output-names", &clk_name)) {
+ pr_err("%s: could not read clock-output-names from \"%s\"\n",
+ __func__, of_node_full_name(node));
+ goto out_unmap;
+ }
clk = clk_register_mux(NULL, clk_name, parents, i,
CLK_SET_RATE_PARENT, reg,
data->shift, SUNXI_MUX_GATE_WIDTH,
0, &clk_lock);
- if (clk) {
- of_clk_add_provider(node, of_clk_src_simple_get, clk);
- clk_register_clkdev(clk, clk_name, NULL);
+ if (IS_ERR(clk)) {
+ pr_err("%s: failed to register mux clock %s: %ld\n", __func__,
+ clk_name, PTR_ERR(clk));
+ goto out_unmap;
+ }
+
+ if (of_clk_add_provider(node, of_clk_src_simple_get, clk)) {
+ pr_err("%s: failed to add clock provider for %s\n",
+ __func__, clk_name);
+ clk_unregister_divider(clk);
+ goto out_unmap;
}
+
+ return clk;
+out_unmap:
+ iounmap(reg);
+ return NULL;
+}
+
+static void __init sun4i_cpu_clk_setup(struct device_node *node)
+{
+ struct clk *clk;
+
+ clk = sunxi_mux_clk_setup(node, &sun4i_cpu_mux_data);
+ if (!clk)
+ return;
+
+ /* Protect CPU clock */
+ __clk_get(clk);
+ clk_prepare_enable(clk);
+}
+CLK_OF_DECLARE(sun4i_cpu, "allwinner,sun4i-a10-cpu-clk",
+ sun4i_cpu_clk_setup);
+
+static void __init sun6i_ahb1_mux_clk_setup(struct device_node *node)
+{
+ sunxi_mux_clk_setup(node, &sun6i_a31_ahb1_mux_data);
}
+CLK_OF_DECLARE(sun6i_ahb1_mux, "allwinner,sun6i-a31-ahb1-mux-clk",
+ sun6i_ahb1_mux_clk_setup);
+static void __init sun8i_ahb2_clk_setup(struct device_node *node)
+{
+ sunxi_mux_clk_setup(node, &sun8i_h3_ahb2_mux_data);
+}
+CLK_OF_DECLARE(sun8i_ahb2, "allwinner,sun8i-h3-ahb2-clk",
+ sun8i_ahb2_clk_setup);
/**
@@ -865,7 +797,7 @@ static const struct div_data sun4i_apb0_data __initconst = {
};
static void __init sunxi_divider_clk_setup(struct device_node *node,
- struct div_data *data)
+ const struct div_data *data)
{
struct clk *clk;
const char *clk_name = node->name;
@@ -873,21 +805,77 @@ static void __init sunxi_divider_clk_setup(struct device_node *node,
void __iomem *reg;
reg = of_iomap(node, 0);
+ if (!reg) {
+ pr_err("Could not map registers for mux-clk: %s\n",
+ of_node_full_name(node));
+ return;
+ }
clk_parent = of_clk_get_parent_name(node, 0);
- of_property_read_string(node, "clock-output-names", &clk_name);
+ if (of_property_read_string(node, "clock-output-names", &clk_name)) {
+ pr_err("%s: could not read clock-output-names from \"%s\"\n",
+ __func__, of_node_full_name(node));
+ goto out_unmap;
+ }
clk = clk_register_divider_table(NULL, clk_name, clk_parent, 0,
reg, data->shift, data->width,
data->pow ? CLK_DIVIDER_POWER_OF_TWO : 0,
data->table, &clk_lock);
- if (clk) {
- of_clk_add_provider(node, of_clk_src_simple_get, clk);
- clk_register_clkdev(clk, clk_name, NULL);
+ if (IS_ERR(clk)) {
+ pr_err("%s: failed to register divider clock %s: %ld\n",
+ __func__, clk_name, PTR_ERR(clk));
+ goto out_unmap;
}
+
+ if (of_clk_add_provider(node, of_clk_src_simple_get, clk)) {
+ pr_err("%s: failed to add clock provider for %s\n",
+ __func__, clk_name);
+ goto out_unregister;
+ }
+
+ if (clk_register_clkdev(clk, clk_name, NULL)) {
+ of_clk_del_provider(node);
+ goto out_unregister;
+ }
+
+ return;
+out_unregister:
+ clk_unregister_divider(clk);
+
+out_unmap:
+ iounmap(reg);
}
+static void __init sun4i_ahb_clk_setup(struct device_node *node)
+{
+ sunxi_divider_clk_setup(node, &sun4i_ahb_data);
+}
+CLK_OF_DECLARE(sun4i_ahb, "allwinner,sun4i-a10-ahb-clk",
+ sun4i_ahb_clk_setup);
+
+static void __init sun4i_apb0_clk_setup(struct device_node *node)
+{
+ sunxi_divider_clk_setup(node, &sun4i_apb0_data);
+}
+CLK_OF_DECLARE(sun4i_apb0, "allwinner,sun4i-a10-apb0-clk",
+ sun4i_apb0_clk_setup);
+
+static void __init sun4i_axi_clk_setup(struct device_node *node)
+{
+ sunxi_divider_clk_setup(node, &sun4i_axi_data);
+}
+CLK_OF_DECLARE(sun4i_axi, "allwinner,sun4i-a10-axi-clk",
+ sun4i_axi_clk_setup);
+
+static void __init sun8i_axi_clk_setup(struct device_node *node)
+{
+ sunxi_divider_clk_setup(node, &sun8i_a23_axi_data);
+}
+CLK_OF_DECLARE(sun8i_axi, "allwinner,sun8i-a23-axi-clk",
+ sun8i_axi_clk_setup);
+
/**
@@ -975,8 +963,8 @@ static const struct divs_data sun6i_a31_pll6_divs_data __initconst = {
* |________________________|
*/
-static void __init sunxi_divs_clk_setup(struct device_node *node,
- struct divs_data *data)
+static struct clk ** __init sunxi_divs_clk_setup(struct device_node *node,
+ const struct divs_data *data)
{
struct clk_onecell_data *clk_data;
const char *parent;
@@ -997,13 +985,20 @@ static void __init sunxi_divs_clk_setup(struct device_node *node,
/* Set up factor clock that we will be dividing */
pclk = sunxi_factors_clk_setup(node, data->factors);
+ if (!pclk)
+ return NULL;
parent = __clk_get_name(pclk);
reg = of_iomap(node, 0);
+ if (!reg) {
+ pr_err("Could not map registers for divs-clk: %s\n",
+ of_node_full_name(node));
+ return NULL;
+ }
clk_data = kmalloc(sizeof(struct clk_onecell_data), GFP_KERNEL);
if (!clk_data)
- return;
+ goto out_unmap;
clks = kcalloc(ndivs, sizeof(*clks), GFP_KERNEL);
if (!clks)
@@ -1081,146 +1076,54 @@ static void __init sunxi_divs_clk_setup(struct device_node *node,
clkflags);
WARN_ON(IS_ERR(clk_data->clks[i]));
- clk_register_clkdev(clks[i], clk_name, NULL);
}
/* Adjust to the real max */
clk_data->clk_num = i;
- of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
-
- return;
+ if (of_clk_add_provider(node, of_clk_src_onecell_get, clk_data)) {
+ pr_err("%s: failed to add clock provider for %s\n",
+ __func__, clk_name);
+ goto free_gate;
+ }
+ return clks;
free_gate:
kfree(gate);
free_clks:
kfree(clks);
free_clkdata:
kfree(clk_data);
+out_unmap:
+ iounmap(reg);
+ return NULL;
}
-
-
-/* Matches for factors clocks */
-static const struct of_device_id clk_factors_match[] __initconst = {
- {.compatible = "allwinner,sun4i-a10-pll1-clk", .data = &sun4i_pll1_data,},
- {.compatible = "allwinner,sun6i-a31-pll1-clk", .data = &sun6i_a31_pll1_data,},
- {.compatible = "allwinner,sun8i-a23-pll1-clk", .data = &sun8i_a23_pll1_data,},
- {.compatible = "allwinner,sun7i-a20-pll4-clk", .data = &sun7i_a20_pll4_data,},
- {.compatible = "allwinner,sun5i-a13-ahb-clk", .data = &sun5i_a13_ahb_data,},
- {.compatible = "allwinner,sun4i-a10-apb1-clk", .data = &sun4i_apb1_data,},
- {.compatible = "allwinner,sun7i-a20-out-clk", .data = &sun7i_a20_out_data,},
- {}
-};
-
-/* Matches for divider clocks */
-static const struct of_device_id clk_div_match[] __initconst = {
- {.compatible = "allwinner,sun4i-a10-axi-clk", .data = &sun4i_axi_data,},
- {.compatible = "allwinner,sun8i-a23-axi-clk", .data = &sun8i_a23_axi_data,},
- {.compatible = "allwinner,sun4i-a10-ahb-clk", .data = &sun4i_ahb_data,},
- {.compatible = "allwinner,sun4i-a10-apb0-clk", .data = &sun4i_apb0_data,},
- {}
-};
-
-/* Matches for divided outputs */
-static const struct of_device_id clk_divs_match[] __initconst = {
- {.compatible = "allwinner,sun4i-a10-pll5-clk", .data = &pll5_divs_data,},
- {.compatible = "allwinner,sun4i-a10-pll6-clk", .data = &pll6_divs_data,},
- {.compatible = "allwinner,sun6i-a31-pll6-clk", .data = &sun6i_a31_pll6_divs_data,},
- {}
-};
-
-/* Matches for mux clocks */
-static const struct of_device_id clk_mux_match[] __initconst = {
- {.compatible = "allwinner,sun4i-a10-cpu-clk", .data = &sun4i_cpu_mux_data,},
- {.compatible = "allwinner,sun6i-a31-ahb1-mux-clk", .data = &sun6i_a31_ahb1_mux_data,},
- {.compatible = "allwinner,sun8i-h3-ahb2-clk", .data = &sun8i_h3_ahb2_mux_data,},
- {}
-};
-
-
-static void __init of_sunxi_table_clock_setup(const struct of_device_id *clk_match,
- void *function)
-{
- struct device_node *np;
- const struct div_data *data;
- const struct of_device_id *match;
- void (*setup_function)(struct device_node *, const void *) = function;
-
- for_each_matching_node_and_match(np, clk_match, &match) {
- data = match->data;
- setup_function(np, data);
- }
-}
-
-static void __init sunxi_init_clocks(const char *clocks[], int nclocks)
-{
- unsigned int i;
-
- /* Register divided output clocks */
- of_sunxi_table_clock_setup(clk_divs_match, sunxi_divs_clk_setup);
-
- /* Register factor clocks */
- of_sunxi_table_clock_setup(clk_factors_match, sunxi_factors_clk_setup);
-
- /* Register divider clocks */
- of_sunxi_table_clock_setup(clk_div_match, sunxi_divider_clk_setup);
-
- /* Register mux clocks */
- of_sunxi_table_clock_setup(clk_mux_match, sunxi_mux_clk_setup);
-
- /* Protect the clocks that needs to stay on */
- for (i = 0; i < nclocks; i++) {
- struct clk *clk = clk_get(NULL, clocks[i]);
-
- if (!IS_ERR(clk))
- clk_prepare_enable(clk);
- }
-}
-
-static const char *sun4i_a10_critical_clocks[] __initdata = {
- "pll5_ddr",
-};
-
-static void __init sun4i_a10_init_clocks(struct device_node *node)
+static void __init sun4i_pll5_clk_setup(struct device_node *node)
{
- sunxi_init_clocks(sun4i_a10_critical_clocks,
- ARRAY_SIZE(sun4i_a10_critical_clocks));
-}
-CLK_OF_DECLARE(sun4i_a10_clk_init, "allwinner,sun4i-a10", sun4i_a10_init_clocks);
+ struct clk **clks;
-static const char *sun5i_critical_clocks[] __initdata = {
- "cpu",
- "pll5_ddr",
-};
+ clks = sunxi_divs_clk_setup(node, &pll5_divs_data);
+ if (!clks)
+ return;
-static void __init sun5i_init_clocks(struct device_node *node)
-{
- sunxi_init_clocks(sun5i_critical_clocks,
- ARRAY_SIZE(sun5i_critical_clocks));
+ /* Protect PLL5_DDR */
+ __clk_get(clks[0]);
+ clk_prepare_enable(clks[0]);
}
-CLK_OF_DECLARE(sun5i_a10s_clk_init, "allwinner,sun5i-a10s", sun5i_init_clocks);
-CLK_OF_DECLARE(sun5i_a13_clk_init, "allwinner,sun5i-a13", sun5i_init_clocks);
-CLK_OF_DECLARE(sun5i_r8_clk_init, "allwinner,sun5i-r8", sun5i_init_clocks);
-CLK_OF_DECLARE(sun7i_a20_clk_init, "allwinner,sun7i-a20", sun5i_init_clocks);
-
-static const char *sun6i_critical_clocks[] __initdata = {
- "cpu",
-};
+CLK_OF_DECLARE(sun4i_pll5, "allwinner,sun4i-a10-pll5-clk",
+ sun4i_pll5_clk_setup);
-static void __init sun6i_init_clocks(struct device_node *node)
+static void __init sun4i_pll6_clk_setup(struct device_node *node)
{
- sunxi_init_clocks(sun6i_critical_clocks,
- ARRAY_SIZE(sun6i_critical_clocks));
+ sunxi_divs_clk_setup(node, &pll6_divs_data);
}
-CLK_OF_DECLARE(sun6i_a31_clk_init, "allwinner,sun6i-a31", sun6i_init_clocks);
-CLK_OF_DECLARE(sun6i_a31s_clk_init, "allwinner,sun6i-a31s", sun6i_init_clocks);
-CLK_OF_DECLARE(sun8i_a23_clk_init, "allwinner,sun8i-a23", sun6i_init_clocks);
-CLK_OF_DECLARE(sun8i_a33_clk_init, "allwinner,sun8i-a33", sun6i_init_clocks);
-CLK_OF_DECLARE(sun8i_h3_clk_init, "allwinner,sun8i-h3", sun6i_init_clocks);
+CLK_OF_DECLARE(sun4i_pll6, "allwinner,sun4i-a10-pll6-clk",
+ sun4i_pll6_clk_setup);
-static void __init sun9i_init_clocks(struct device_node *node)
+static void __init sun6i_pll6_clk_setup(struct device_node *node)
{
- sunxi_init_clocks(NULL, 0);
+ sunxi_divs_clk_setup(node, &sun6i_a31_pll6_divs_data);
}
-CLK_OF_DECLARE(sun9i_a80_clk_init, "allwinner,sun9i-a80", sun9i_init_clocks);
+CLK_OF_DECLARE(sun6i_pll6, "allwinner,sun6i-a31-pll6-clk",
+ sun6i_pll6_clk_setup);
diff --git a/drivers/clk/sunxi/clk-usb.c b/drivers/clk/sunxi/clk-usb.c
index 67b8e38f4ee9..fe0c3d169377 100644
--- a/drivers/clk/sunxi/clk-usb.c
+++ b/drivers/clk/sunxi/clk-usb.c
@@ -76,7 +76,7 @@ static int sunxi_usb_reset_deassert(struct reset_controller_dev *rcdev,
return 0;
}
-static struct reset_control_ops sunxi_usb_reset_ops = {
+static const struct reset_control_ops sunxi_usb_reset_ops = {
.assert = sunxi_usb_reset_assert,
.deassert = sunxi_usb_reset_deassert,
};
@@ -216,6 +216,18 @@ static void __init sun8i_a23_usb_setup(struct device_node *node)
}
CLK_OF_DECLARE(sun8i_a23_usb, "allwinner,sun8i-a23-usb-clk", sun8i_a23_usb_setup);
+static const struct usb_clk_data sun8i_h3_usb_clk_data __initconst = {
+ .clk_mask = BIT(19) | BIT(18) | BIT(17) | BIT(16) |
+ BIT(11) | BIT(10) | BIT(9) | BIT(8),
+ .reset_mask = BIT(3) | BIT(2) | BIT(1) | BIT(0),
+};
+
+static void __init sun8i_h3_usb_setup(struct device_node *node)
+{
+ sunxi_usb_clk_setup(node, &sun8i_h3_usb_clk_data, &sun4i_a10_usb_lock);
+}
+CLK_OF_DECLARE(sun8i_h3_usb, "allwinner,sun8i-h3-usb-clk", sun8i_h3_usb_setup);
+
static const struct usb_clk_data sun9i_a80_usb_mod_data __initconst = {
.clk_mask = BIT(6) | BIT(5) | BIT(4) | BIT(3) | BIT(2) | BIT(1),
.reset_mask = BIT(19) | BIT(18) | BIT(17),
@@ -243,15 +255,3 @@ static void __init sun9i_a80_usb_phy_setup(struct device_node *node)
sunxi_usb_clk_setup(node, &sun9i_a80_usb_phy_data, &a80_usb_phy_lock);
}
CLK_OF_DECLARE(sun9i_a80_usb_phy, "allwinner,sun9i-a80-usb-phy-clk", sun9i_a80_usb_phy_setup);
-
-static const struct usb_clk_data sun8i_h3_usb_clk_data __initconst = {
- .clk_mask = BIT(19) | BIT(18) | BIT(17) | BIT(16) |
- BIT(11) | BIT(10) | BIT(9) | BIT(8),
- .reset_mask = BIT(3) | BIT(2) | BIT(1) | BIT(0),
-};
-
-static void __init sun8i_h3_usb_setup(struct device_node *node)
-{
- sunxi_usb_clk_setup(node, &sun8i_h3_usb_clk_data, &sun4i_a10_usb_lock);
-}
-CLK_OF_DECLARE(sun8i_h3_usb, "allwinner,sun8i-h3-usb-clk", sun8i_h3_usb_setup);
diff --git a/drivers/clk/tegra/clk-audio-sync.c b/drivers/clk/tegra/clk-audio-sync.c
index c0f7843e80e6..92d04ce2dee6 100644
--- a/drivers/clk/tegra/clk-audio-sync.c
+++ b/drivers/clk/tegra/clk-audio-sync.c
@@ -72,7 +72,7 @@ struct clk *tegra_clk_register_sync_source(const char *name,
init.ops = &tegra_clk_sync_source_ops;
init.name = name;
- init.flags = CLK_IS_ROOT;
+ init.flags = 0;
init.parent_names = NULL;
init.num_parents = 0;
diff --git a/drivers/clk/tegra/clk-dfll.c b/drivers/clk/tegra/clk-dfll.c
index 86a307b17eb0..19bfa07e24b1 100644
--- a/drivers/clk/tegra/clk-dfll.c
+++ b/drivers/clk/tegra/clk-dfll.c
@@ -995,7 +995,6 @@ static const struct clk_ops dfll_clk_ops = {
};
static struct clk_init_data dfll_clk_init_data = {
- .flags = CLK_IS_ROOT,
.ops = &dfll_clk_ops,
.num_parents = 0,
};
diff --git a/drivers/clk/tegra/clk-tegra-fixed.c b/drivers/clk/tegra/clk-tegra-fixed.c
index da0b5941c89f..d64ec7a1b976 100644
--- a/drivers/clk/tegra/clk-tegra-fixed.c
+++ b/drivers/clk/tegra/clk-tegra-fixed.c
@@ -52,8 +52,7 @@ int __init tegra_osc_clk_init(void __iomem *clk_base, struct tegra_clk *clks,
return -EINVAL;
}
- osc = clk_register_fixed_rate(NULL, "osc", NULL, CLK_IS_ROOT,
- *osc_freq);
+ osc = clk_register_fixed_rate(NULL, "osc", NULL, 0, *osc_freq);
dt_clk = tegra_lookup_dt_id(tegra_clk_clk_m, clks);
if (!dt_clk)
@@ -88,8 +87,7 @@ void __init tegra_fixed_clk_init(struct tegra_clk *tegra_clks)
/* clk_32k */
dt_clk = tegra_lookup_dt_id(tegra_clk_clk_32k, tegra_clks);
if (dt_clk) {
- clk = clk_register_fixed_rate(NULL, "clk_32k", NULL,
- CLK_IS_ROOT, 32768);
+ clk = clk_register_fixed_rate(NULL, "clk_32k", NULL, 0, 32768);
*dt_clk = clk;
}
diff --git a/drivers/clk/tegra/clk-tegra114.c b/drivers/clk/tegra/clk-tegra114.c
index 4a24aa4bbdea..df47ec3169c3 100644
--- a/drivers/clk/tegra/clk-tegra114.c
+++ b/drivers/clk/tegra/clk-tegra114.c
@@ -972,8 +972,7 @@ static void __init tegra114_fixed_clk_init(void __iomem *clk_base)
struct clk *clk;
/* clk_32k */
- clk = clk_register_fixed_rate(NULL, "clk_32k", NULL, CLK_IS_ROOT,
- 32768);
+ clk = clk_register_fixed_rate(NULL, "clk_32k", NULL, 0, 32768);
clks[TEGRA114_CLK_CLK_32K] = clk;
/* clk_m_div2 */
diff --git a/drivers/clk/tegra/clk-tegra20.c b/drivers/clk/tegra/clk-tegra20.c
index 7a48e986c4c9..7ad63837694f 100644
--- a/drivers/clk/tegra/clk-tegra20.c
+++ b/drivers/clk/tegra/clk-tegra20.c
@@ -837,15 +837,13 @@ static void __init tegra20_periph_clk_init(void)
clks[TEGRA20_CLK_PEX] = clk;
/* cdev1 */
- clk = clk_register_fixed_rate(NULL, "cdev1_fixed", NULL, CLK_IS_ROOT,
- 26000000);
+ clk = clk_register_fixed_rate(NULL, "cdev1_fixed", NULL, 0, 26000000);
clk = tegra_clk_register_periph_gate("cdev1", "cdev1_fixed", 0,
clk_base, 0, 94, periph_clk_enb_refcnt);
clks[TEGRA20_CLK_CDEV1] = clk;
/* cdev2 */
- clk = clk_register_fixed_rate(NULL, "cdev2_fixed", NULL, CLK_IS_ROOT,
- 26000000);
+ clk = clk_register_fixed_rate(NULL, "cdev2_fixed", NULL, 0, 26000000);
clk = tegra_clk_register_periph_gate("cdev2", "cdev2_fixed", 0,
clk_base, 0, 93, periph_clk_enb_refcnt);
clks[TEGRA20_CLK_CDEV2] = clk;
@@ -879,8 +877,8 @@ static void __init tegra20_osc_clk_init(void)
input_freq = tegra20_clk_measure_input_freq();
/* clk_m */
- clk = clk_register_fixed_rate(NULL, "clk_m", NULL, CLK_IS_ROOT |
- CLK_IGNORE_UNUSED, input_freq);
+ clk = clk_register_fixed_rate(NULL, "clk_m", NULL, CLK_IGNORE_UNUSED,
+ input_freq);
clks[TEGRA20_CLK_CLK_M] = clk;
/* pll_ref */
diff --git a/drivers/clk/tegra/clk.c b/drivers/clk/tegra/clk.c
index 2a3a4fe803d6..f60fe2e344ca 100644
--- a/drivers/clk/tegra/clk.c
+++ b/drivers/clk/tegra/clk.c
@@ -271,7 +271,7 @@ void __init tegra_init_from_table(struct tegra_clk_init_table *tbl,
}
}
-static struct reset_control_ops rst_ops = {
+static const struct reset_control_ops rst_ops = {
.assert = tegra_clk_rst_assert,
.deassert = tegra_clk_rst_deassert,
};
diff --git a/drivers/clk/ti/Kconfig b/drivers/clk/ti/Kconfig
new file mode 100644
index 000000000000..271341787e67
--- /dev/null
+++ b/drivers/clk/ti/Kconfig
@@ -0,0 +1,6 @@
+config COMMON_CLK_TI_ADPLL
+ tristate "Clock driver for dm814x ADPLL"
+ depends on ARCH_OMAP2PLUS || COMPILE_TEST
+ default y if SOC_TI81XX
+ ---help---
+ ADPLL clock driver for the dm814x SoC using common clock framework.
diff --git a/drivers/clk/ti/Makefile b/drivers/clk/ti/Makefile
index d4ac96087ccd..0deac9821039 100644
--- a/drivers/clk/ti/Makefile
+++ b/drivers/clk/ti/Makefile
@@ -1,3 +1,5 @@
+ifeq ($(CONFIG_ARCH_OMAP2PLUS), y)
+
obj-y += clk.o autoidle.o clockdomain.o
clk-common = dpll.o composite.o divider.o gate.o \
fixed-factor.o mux.o apll.o \
@@ -18,3 +20,7 @@ obj-$(CONFIG_SOC_AM43XX) += $(clk-common) dpll3xxx.o clk-43xx.o
ifdef CONFIG_ATAGS
obj-$(CONFIG_ARCH_OMAP3) += clk-3xxx-legacy.o
endif
+
+endif # CONFIG_ARCH_OMAP2PLUS
+
+obj-$(CONFIG_COMMON_CLK_TI_ADPLL) += adpll.o
diff --git a/drivers/clk/ti/adpll.c b/drivers/clk/ti/adpll.c
new file mode 100644
index 000000000000..255cafb18336
--- /dev/null
+++ b/drivers/clk/ti/adpll.c
@@ -0,0 +1,983 @@
+/*
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/clk-provider.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/math64.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/string.h>
+
+#define ADPLL_PLLSS_MMR_LOCK_OFFSET 0x00 /* Managed by MPPULL */
+#define ADPLL_PLLSS_MMR_LOCK_ENABLED 0x1f125B64
+#define ADPLL_PLLSS_MMR_UNLOCK_MAGIC 0x1eda4c3d
+
+#define ADPLL_PWRCTRL_OFFSET 0x00
+#define ADPLL_PWRCTRL_PONIN 5
+#define ADPLL_PWRCTRL_PGOODIN 4
+#define ADPLL_PWRCTRL_RET 3
+#define ADPLL_PWRCTRL_ISORET 2
+#define ADPLL_PWRCTRL_ISOSCAN 1
+#define ADPLL_PWRCTRL_OFFMODE 0
+
+#define ADPLL_CLKCTRL_OFFSET 0x04
+#define ADPLL_CLKCTRL_CLKDCOLDOEN 29
+#define ADPLL_CLKCTRL_IDLE 23
+#define ADPLL_CLKCTRL_CLKOUTEN 20
+#define ADPLL_CLKINPHIFSEL_ADPLL_S 19 /* REVISIT: which bit? */
+#define ADPLL_CLKCTRL_CLKOUTLDOEN_ADPLL_LJ 19
+#define ADPLL_CLKCTRL_ULOWCLKEN 18
+#define ADPLL_CLKCTRL_CLKDCOLDOPWDNZ 17
+#define ADPLL_CLKCTRL_M2PWDNZ 16
+#define ADPLL_CLKCTRL_M3PWDNZ_ADPLL_S 15
+#define ADPLL_CLKCTRL_LOWCURRSTDBY_ADPLL_S 13
+#define ADPLL_CLKCTRL_LPMODE_ADPLL_S 12
+#define ADPLL_CLKCTRL_REGM4XEN_ADPLL_S 10
+#define ADPLL_CLKCTRL_SELFREQDCO_ADPLL_LJ 10
+#define ADPLL_CLKCTRL_TINITZ 0
+
+#define ADPLL_TENABLE_OFFSET 0x08
+#define ADPLL_TENABLEDIV_OFFSET 0x8c
+
+#define ADPLL_M2NDIV_OFFSET 0x10
+#define ADPLL_M2NDIV_M2 16
+#define ADPLL_M2NDIV_M2_ADPLL_S_WIDTH 5
+#define ADPLL_M2NDIV_M2_ADPLL_LJ_WIDTH 7
+
+#define ADPLL_MN2DIV_OFFSET 0x14
+#define ADPLL_MN2DIV_N2 16
+
+#define ADPLL_FRACDIV_OFFSET 0x18
+#define ADPLL_FRACDIV_REGSD 24
+#define ADPLL_FRACDIV_FRACTIONALM 0
+#define ADPLL_FRACDIV_FRACTIONALM_MASK 0x3ffff
+
+#define ADPLL_BWCTRL_OFFSET 0x1c
+#define ADPLL_BWCTRL_BWCONTROL 1
+#define ADPLL_BWCTRL_BW_INCR_DECRZ 0
+
+#define ADPLL_RESERVED_OFFSET 0x20
+
+#define ADPLL_STATUS_OFFSET 0x24
+#define ADPLL_STATUS_PONOUT 31
+#define ADPLL_STATUS_PGOODOUT 30
+#define ADPLL_STATUS_LDOPWDN 29
+#define ADPLL_STATUS_RECAL_BSTATUS3 28
+#define ADPLL_STATUS_RECAL_OPPIN 27
+#define ADPLL_STATUS_PHASELOCK 10
+#define ADPLL_STATUS_FREQLOCK 9
+#define ADPLL_STATUS_BYPASSACK 8
+#define ADPLL_STATUS_LOSSREF 6
+#define ADPLL_STATUS_CLKOUTENACK 5
+#define ADPLL_STATUS_LOCK2 4
+#define ADPLL_STATUS_M2CHANGEACK 3
+#define ADPLL_STATUS_HIGHJITTER 1
+#define ADPLL_STATUS_BYPASS 0
+#define ADPLL_STATUS_PREPARED_MASK (BIT(ADPLL_STATUS_PHASELOCK) | \
+ BIT(ADPLL_STATUS_FREQLOCK))
+
+#define ADPLL_M3DIV_OFFSET 0x28 /* Only on MPUPLL */
+#define ADPLL_M3DIV_M3 0
+#define ADPLL_M3DIV_M3_WIDTH 5
+#define ADPLL_M3DIV_M3_MASK 0x1f
+
+#define ADPLL_RAMPCTRL_OFFSET 0x2c /* Only on MPUPLL */
+#define ADPLL_RAMPCTRL_CLKRAMPLEVEL 19
+#define ADPLL_RAMPCTRL_CLKRAMPRATE 16
+#define ADPLL_RAMPCTRL_RELOCK_RAMP_EN 0
+
+#define MAX_ADPLL_INPUTS 3
+#define MAX_ADPLL_OUTPUTS 4
+#define ADPLL_MAX_RETRIES 5
+
+#define to_dco(_hw) container_of(_hw, struct ti_adpll_dco_data, hw)
+#define to_adpll(_hw) container_of(_hw, struct ti_adpll_data, dco)
+#define to_clkout(_hw) container_of(_hw, struct ti_adpll_clkout_data, hw)
+
+enum ti_adpll_clocks {
+ TI_ADPLL_DCO,
+ TI_ADPLL_DCO_GATE,
+ TI_ADPLL_N2,
+ TI_ADPLL_M2,
+ TI_ADPLL_M2_GATE,
+ TI_ADPLL_BYPASS,
+ TI_ADPLL_HIF,
+ TI_ADPLL_DIV2,
+ TI_ADPLL_CLKOUT,
+ TI_ADPLL_CLKOUT2,
+ TI_ADPLL_M3,
+};
+
+#define TI_ADPLL_NR_CLOCKS (TI_ADPLL_M3 + 1)
+
+enum ti_adpll_inputs {
+ TI_ADPLL_CLKINP,
+ TI_ADPLL_CLKINPULOW,
+ TI_ADPLL_CLKINPHIF,
+};
+
+enum ti_adpll_s_outputs {
+ TI_ADPLL_S_DCOCLKLDO,
+ TI_ADPLL_S_CLKOUT,
+ TI_ADPLL_S_CLKOUTX2,
+ TI_ADPLL_S_CLKOUTHIF,
+};
+
+enum ti_adpll_lj_outputs {
+ TI_ADPLL_LJ_CLKDCOLDO,
+ TI_ADPLL_LJ_CLKOUT,
+ TI_ADPLL_LJ_CLKOUTLDO,
+};
+
+struct ti_adpll_platform_data {
+ const bool is_type_s;
+ const int nr_max_inputs;
+ const int nr_max_outputs;
+ const int output_index;
+};
+
+struct ti_adpll_clock {
+ struct clk *clk;
+ struct clk_lookup *cl;
+ void (*unregister)(struct clk *clk);
+};
+
+struct ti_adpll_dco_data {
+ struct clk_hw hw;
+};
+
+struct ti_adpll_clkout_data {
+ struct ti_adpll_data *adpll;
+ struct clk_gate gate;
+ struct clk_hw hw;
+};
+
+struct ti_adpll_data {
+ struct device *dev;
+ const struct ti_adpll_platform_data *c;
+ struct device_node *np;
+ unsigned long pa;
+ void __iomem *iobase;
+ void __iomem *regs;
+ spinlock_t lock; /* For ADPLL shared register access */
+ const char *parent_names[MAX_ADPLL_INPUTS];
+ struct clk *parent_clocks[MAX_ADPLL_INPUTS];
+ struct ti_adpll_clock *clocks;
+ struct clk_onecell_data outputs;
+ struct ti_adpll_dco_data dco;
+};
+
+static const char *ti_adpll_clk_get_name(struct ti_adpll_data *d,
+ int output_index,
+ const char *postfix)
+{
+ const char *name;
+ int err;
+
+ if (output_index >= 0) {
+ err = of_property_read_string_index(d->np,
+ "clock-output-names",
+ output_index,
+ &name);
+ if (err)
+ return NULL;
+ } else {
+ const char *base_name = "adpll";
+ char *buf;
+
+ buf = devm_kzalloc(d->dev, 8 + 1 + strlen(base_name) + 1 +
+ strlen(postfix), GFP_KERNEL);
+ if (!buf)
+ return NULL;
+ sprintf(buf, "%08lx.%s.%s", d->pa, base_name, postfix);
+ name = buf;
+ }
+
+ return name;
+}
+
+#define ADPLL_MAX_CON_ID 16 /* See MAX_CON_ID */
+
+static int ti_adpll_setup_clock(struct ti_adpll_data *d, struct clk *clock,
+ int index, int output_index, const char *name,
+ void (*unregister)(struct clk *clk))
+{
+ struct clk_lookup *cl;
+ const char *postfix = NULL;
+ char con_id[ADPLL_MAX_CON_ID];
+
+ d->clocks[index].clk = clock;
+ d->clocks[index].unregister = unregister;
+
+ /* Separate con_id in format "pll040dcoclkldo" to fit MAX_CON_ID */
+ postfix = strrchr(name, '.');
+ if (strlen(postfix) > 1) {
+ if (strlen(postfix) > ADPLL_MAX_CON_ID)
+ dev_warn(d->dev, "clock %s con_id lookup may fail\n",
+ name);
+ snprintf(con_id, 16, "pll%03lx%s", d->pa & 0xfff, postfix + 1);
+ cl = clkdev_create(clock, con_id, NULL);
+ if (!cl)
+ return -ENOMEM;
+ d->clocks[index].cl = cl;
+ } else {
+ dev_warn(d->dev, "no con_id for clock %s\n", name);
+ }
+
+ if (output_index < 0)
+ return 0;
+
+ d->outputs.clks[output_index] = clock;
+ d->outputs.clk_num++;
+
+ return 0;
+}
+
+static int ti_adpll_init_divider(struct ti_adpll_data *d,
+ enum ti_adpll_clocks index,
+ int output_index, char *name,
+ struct clk *parent_clock,
+ void __iomem *reg,
+ u8 shift, u8 width,
+ u8 clk_divider_flags)
+{
+ const char *child_name;
+ const char *parent_name;
+ struct clk *clock;
+
+ child_name = ti_adpll_clk_get_name(d, output_index, name);
+ if (!child_name)
+ return -EINVAL;
+
+ parent_name = __clk_get_name(parent_clock);
+ clock = clk_register_divider(d->dev, child_name, parent_name, 0,
+ reg, shift, width, clk_divider_flags,
+ &d->lock);
+ if (IS_ERR(clock)) {
+ dev_err(d->dev, "failed to register divider %s: %li\n",
+ name, PTR_ERR(clock));
+ return PTR_ERR(clock);
+ }
+
+ return ti_adpll_setup_clock(d, clock, index, output_index, child_name,
+ clk_unregister_divider);
+}
+
+static int ti_adpll_init_mux(struct ti_adpll_data *d,
+ enum ti_adpll_clocks index,
+ char *name, struct clk *clk0,
+ struct clk *clk1,
+ void __iomem *reg,
+ u8 shift)
+{
+ const char *child_name;
+ const char *parents[2];
+ struct clk *clock;
+
+ child_name = ti_adpll_clk_get_name(d, -ENODEV, name);
+ if (!child_name)
+ return -ENOMEM;
+ parents[0] = __clk_get_name(clk0);
+ parents[1] = __clk_get_name(clk1);
+ clock = clk_register_mux(d->dev, child_name, parents, 2, 0,
+ reg, shift, 1, 0, &d->lock);
+ if (IS_ERR(clock)) {
+ dev_err(d->dev, "failed to register mux %s: %li\n",
+ name, PTR_ERR(clock));
+ return PTR_ERR(clock);
+ }
+
+ return ti_adpll_setup_clock(d, clock, index, -ENODEV, child_name,
+ clk_unregister_mux);
+}
+
+static int ti_adpll_init_gate(struct ti_adpll_data *d,
+ enum ti_adpll_clocks index,
+ int output_index, char *name,
+ struct clk *parent_clock,
+ void __iomem *reg,
+ u8 bit_idx,
+ u8 clk_gate_flags)
+{
+ const char *child_name;
+ const char *parent_name;
+ struct clk *clock;
+
+ child_name = ti_adpll_clk_get_name(d, output_index, name);
+ if (!child_name)
+ return -EINVAL;
+
+ parent_name = __clk_get_name(parent_clock);
+ clock = clk_register_gate(d->dev, child_name, parent_name, 0,
+ reg, bit_idx, clk_gate_flags,
+ &d->lock);
+ if (IS_ERR(clock)) {
+ dev_err(d->dev, "failed to register gate %s: %li\n",
+ name, PTR_ERR(clock));
+ return PTR_ERR(clock);
+ }
+
+ return ti_adpll_setup_clock(d, clock, index, output_index, child_name,
+ clk_unregister_gate);
+}
+
+static int ti_adpll_init_fixed_factor(struct ti_adpll_data *d,
+ enum ti_adpll_clocks index,
+ char *name,
+ struct clk *parent_clock,
+ unsigned int mult,
+ unsigned int div)
+{
+ const char *child_name;
+ const char *parent_name;
+ struct clk *clock;
+
+ child_name = ti_adpll_clk_get_name(d, -ENODEV, name);
+ if (!child_name)
+ return -ENOMEM;
+
+ parent_name = __clk_get_name(parent_clock);
+ clock = clk_register_fixed_factor(d->dev, child_name, parent_name,
+ 0, mult, div);
+ if (IS_ERR(clock))
+ return PTR_ERR(clock);
+
+ return ti_adpll_setup_clock(d, clock, index, -ENODEV, child_name,
+ clk_unregister);
+}
+
+static void ti_adpll_set_idle_bypass(struct ti_adpll_data *d)
+{
+ unsigned long flags;
+ u32 v;
+
+ spin_lock_irqsave(&d->lock, flags);
+ v = readl_relaxed(d->regs + ADPLL_CLKCTRL_OFFSET);
+ v |= BIT(ADPLL_CLKCTRL_IDLE);
+ writel_relaxed(v, d->regs + ADPLL_CLKCTRL_OFFSET);
+ spin_unlock_irqrestore(&d->lock, flags);
+}
+
+static void ti_adpll_clear_idle_bypass(struct ti_adpll_data *d)
+{
+ unsigned long flags;
+ u32 v;
+
+ spin_lock_irqsave(&d->lock, flags);
+ v = readl_relaxed(d->regs + ADPLL_CLKCTRL_OFFSET);
+ v &= ~BIT(ADPLL_CLKCTRL_IDLE);
+ writel_relaxed(v, d->regs + ADPLL_CLKCTRL_OFFSET);
+ spin_unlock_irqrestore(&d->lock, flags);
+}
+
+static bool ti_adpll_clock_is_bypass(struct ti_adpll_data *d)
+{
+ u32 v;
+
+ v = readl_relaxed(d->regs + ADPLL_STATUS_OFFSET);
+
+ return v & BIT(ADPLL_STATUS_BYPASS);
+}
+
+/*
+ * Locked and bypass are not actually mutually exclusive: if you only care
+ * about the DCO clock and not CLKOUT you can clear M2PWDNZ before enabling
+ * the PLL, resulting in status (FREQLOCK | PHASELOCK | BYPASS) after lock.
+ */
+static bool ti_adpll_is_locked(struct ti_adpll_data *d)
+{
+ u32 v = readl_relaxed(d->regs + ADPLL_STATUS_OFFSET);
+
+ return (v & ADPLL_STATUS_PREPARED_MASK) == ADPLL_STATUS_PREPARED_MASK;
+}
+
+static int ti_adpll_wait_lock(struct ti_adpll_data *d)
+{
+ int retries = ADPLL_MAX_RETRIES;
+
+ do {
+ if (ti_adpll_is_locked(d))
+ return 0;
+ usleep_range(200, 300);
+ } while (retries--);
+
+ dev_err(d->dev, "pll failed to lock\n");
+ return -ETIMEDOUT;
+}
+
+static int ti_adpll_prepare(struct clk_hw *hw)
+{
+ struct ti_adpll_dco_data *dco = to_dco(hw);
+ struct ti_adpll_data *d = to_adpll(dco);
+
+ ti_adpll_clear_idle_bypass(d);
+ ti_adpll_wait_lock(d);
+
+ return 0;
+}
+
+static void ti_adpll_unprepare(struct clk_hw *hw)
+{
+ struct ti_adpll_dco_data *dco = to_dco(hw);
+ struct ti_adpll_data *d = to_adpll(dco);
+
+ ti_adpll_set_idle_bypass(d);
+}
+
+static int ti_adpll_is_prepared(struct clk_hw *hw)
+{
+ struct ti_adpll_dco_data *dco = to_dco(hw);
+ struct ti_adpll_data *d = to_adpll(dco);
+
+ return ti_adpll_is_locked(d);
+}
+
+/*
+ * Note that the DCO clock is never subject to bypass: if the PLL is off,
+ * dcoclk is low.
+ */
+static unsigned long ti_adpll_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct ti_adpll_dco_data *dco = to_dco(hw);
+ struct ti_adpll_data *d = to_adpll(dco);
+ u32 frac_m, divider, v;
+ u64 rate;
+ unsigned long flags;
+
+ if (ti_adpll_clock_is_bypass(d))
+ return 0;
+
+ spin_lock_irqsave(&d->lock, flags);
+ frac_m = readl_relaxed(d->regs + ADPLL_FRACDIV_OFFSET);
+ frac_m &= ADPLL_FRACDIV_FRACTIONALM_MASK;
+ rate = (u64)readw_relaxed(d->regs + ADPLL_MN2DIV_OFFSET) << 18;
+ rate += frac_m;
+ rate *= parent_rate;
+ divider = (readw_relaxed(d->regs + ADPLL_M2NDIV_OFFSET) + 1) << 18;
+ spin_unlock_irqrestore(&d->lock, flags);
+
+ do_div(rate, divider);
+
+ if (d->c->is_type_s) {
+ v = readl_relaxed(d->regs + ADPLL_CLKCTRL_OFFSET);
+ if (v & BIT(ADPLL_CLKCTRL_REGM4XEN_ADPLL_S))
+ rate *= 4;
+ rate *= 2;
+ }
+
+ return rate;
+}
+
+/* PLL parent is always clkinp, bypass only affects the children */
+static u8 ti_adpll_get_parent(struct clk_hw *hw)
+{
+ return 0;
+}
+
+static struct clk_ops ti_adpll_ops = {
+ .prepare = ti_adpll_prepare,
+ .unprepare = ti_adpll_unprepare,
+ .is_prepared = ti_adpll_is_prepared,
+ .recalc_rate = ti_adpll_recalc_rate,
+ .get_parent = ti_adpll_get_parent,
+};
+
+static int ti_adpll_init_dco(struct ti_adpll_data *d)
+{
+ struct clk_init_data init;
+ struct clk *clock;
+ const char *postfix;
+ int width, err;
+
+ d->outputs.clks = devm_kzalloc(d->dev, sizeof(struct clk *) *
+ MAX_ADPLL_OUTPUTS,
+ GFP_KERNEL);
+ if (!d->outputs.clks)
+ return -ENOMEM;
+
+ if (d->c->output_index < 0)
+ postfix = "dco";
+ else
+ postfix = NULL;
+
+ init.name = ti_adpll_clk_get_name(d, d->c->output_index, postfix);
+ if (!init.name)
+ return -EINVAL;
+
+ init.parent_names = d->parent_names;
+ init.num_parents = d->c->nr_max_inputs;
+ init.ops = &ti_adpll_ops;
+ init.flags = CLK_GET_RATE_NOCACHE;
+ d->dco.hw.init = &init;
+
+ if (d->c->is_type_s)
+ width = 5;
+ else
+ width = 4;
+
+ /* Internal input clock divider N2 */
+ err = ti_adpll_init_divider(d, TI_ADPLL_N2, -ENODEV, "n2",
+ d->parent_clocks[TI_ADPLL_CLKINP],
+ d->regs + ADPLL_MN2DIV_OFFSET,
+ ADPLL_MN2DIV_N2, width, 0);
+ if (err)
+ return err;
+
+ clock = devm_clk_register(d->dev, &d->dco.hw);
+ if (IS_ERR(clock))
+ return PTR_ERR(clock);
+
+ return ti_adpll_setup_clock(d, clock, TI_ADPLL_DCO, d->c->output_index,
+ init.name, NULL);
+}
+
+static int ti_adpll_clkout_enable(struct clk_hw *hw)
+{
+ struct ti_adpll_clkout_data *co = to_clkout(hw);
+ struct clk_hw *gate_hw = &co->gate.hw;
+
+ __clk_hw_set_clk(gate_hw, hw);
+
+ return clk_gate_ops.enable(gate_hw);
+}
+
+static void ti_adpll_clkout_disable(struct clk_hw *hw)
+{
+ struct ti_adpll_clkout_data *co = to_clkout(hw);
+ struct clk_hw *gate_hw = &co->gate.hw;
+
+ __clk_hw_set_clk(gate_hw, hw);
+ clk_gate_ops.disable(gate_hw);
+}
+
+static int ti_adpll_clkout_is_enabled(struct clk_hw *hw)
+{
+ struct ti_adpll_clkout_data *co = to_clkout(hw);
+ struct clk_hw *gate_hw = &co->gate.hw;
+
+ __clk_hw_set_clk(gate_hw, hw);
+
+ return clk_gate_ops.is_enabled(gate_hw);
+}
+
+/* Setting PLL bypass puts clkout and clkoutx2 into bypass */
+static u8 ti_adpll_clkout_get_parent(struct clk_hw *hw)
+{
+ struct ti_adpll_clkout_data *co = to_clkout(hw);
+ struct ti_adpll_data *d = co->adpll;
+
+ return ti_adpll_clock_is_bypass(d);
+}
+
+static int ti_adpll_init_clkout(struct ti_adpll_data *d,
+ enum ti_adpll_clocks index,
+ int output_index, int gate_bit,
+ char *name, struct clk *clk0,
+ struct clk *clk1)
+{
+ struct ti_adpll_clkout_data *co;
+ struct clk_init_data init;
+ struct clk_ops *ops;
+ const char *parent_names[2];
+ const char *child_name;
+ struct clk *clock;
+ int err;
+
+ co = devm_kzalloc(d->dev, sizeof(*co), GFP_KERNEL);
+ if (!co)
+ return -ENOMEM;
+ co->adpll = d;
+
+ err = of_property_read_string_index(d->np,
+ "clock-output-names",
+ output_index,
+ &child_name);
+ if (err)
+ return err;
+
+ ops = devm_kzalloc(d->dev, sizeof(*ops), GFP_KERNEL);
+ if (!ops)
+ return -ENOMEM;
+
+ init.name = child_name;
+ init.ops = ops;
+ init.flags = CLK_IS_BASIC;
+ co->hw.init = &init;
+ parent_names[0] = __clk_get_name(clk0);
+ parent_names[1] = __clk_get_name(clk1);
+ init.parent_names = parent_names;
+ init.num_parents = 2;
+
+ ops->get_parent = ti_adpll_clkout_get_parent;
+ ops->determine_rate = __clk_mux_determine_rate;
+ if (gate_bit) {
+ co->gate.lock = &d->lock;
+ co->gate.reg = d->regs + ADPLL_CLKCTRL_OFFSET;
+ co->gate.bit_idx = gate_bit;
+ ops->enable = ti_adpll_clkout_enable;
+ ops->disable = ti_adpll_clkout_disable;
+ ops->is_enabled = ti_adpll_clkout_is_enabled;
+ }
+
+ clock = devm_clk_register(d->dev, &co->hw);
+ if (IS_ERR(clock)) {
+ dev_err(d->dev, "failed to register output %s: %li\n",
+ name, PTR_ERR(clock));
+ return PTR_ERR(clock);
+ }
+
+ return ti_adpll_setup_clock(d, clock, index, output_index, child_name,
+ NULL);
+}
+
+static int ti_adpll_init_children_adpll_s(struct ti_adpll_data *d)
+{
+ int err;
+
+ if (!d->c->is_type_s)
+ return 0;
+
+ /* Internal mux, sources from divider N2 or clkinpulow */
+ err = ti_adpll_init_mux(d, TI_ADPLL_BYPASS, "bypass",
+ d->clocks[TI_ADPLL_N2].clk,
+ d->parent_clocks[TI_ADPLL_CLKINPULOW],
+ d->regs + ADPLL_CLKCTRL_OFFSET,
+ ADPLL_CLKCTRL_ULOWCLKEN);
+ if (err)
+ return err;
+
+ /* Internal divider M2, sources DCO */
+ err = ti_adpll_init_divider(d, TI_ADPLL_M2, -ENODEV, "m2",
+ d->clocks[TI_ADPLL_DCO].clk,
+ d->regs + ADPLL_M2NDIV_OFFSET,
+ ADPLL_M2NDIV_M2,
+ ADPLL_M2NDIV_M2_ADPLL_S_WIDTH,
+ CLK_DIVIDER_ONE_BASED);
+ if (err)
+ return err;
+
+ /* Internal fixed divider, after M2 before clkout */
+ err = ti_adpll_init_fixed_factor(d, TI_ADPLL_DIV2, "div2",
+ d->clocks[TI_ADPLL_M2].clk,
+ 1, 2);
+ if (err)
+ return err;
+
+ /* Output clkout with a mux and gate, sources from div2 or bypass */
+ err = ti_adpll_init_clkout(d, TI_ADPLL_CLKOUT, TI_ADPLL_S_CLKOUT,
+ ADPLL_CLKCTRL_CLKOUTEN, "clkout",
+ d->clocks[TI_ADPLL_DIV2].clk,
+ d->clocks[TI_ADPLL_BYPASS].clk);
+ if (err)
+ return err;
+
+ /* Output clkoutx2 with a mux and gate, sources from M2 or bypass */
+ err = ti_adpll_init_clkout(d, TI_ADPLL_CLKOUT2, TI_ADPLL_S_CLKOUTX2, 0,
+ "clkout2", d->clocks[TI_ADPLL_M2].clk,
+ d->clocks[TI_ADPLL_BYPASS].clk);
+ if (err)
+ return err;
+
+ /* Internal mux, sources from DCO and clkinphif */
+ if (d->parent_clocks[TI_ADPLL_CLKINPHIF]) {
+ err = ti_adpll_init_mux(d, TI_ADPLL_HIF, "hif",
+ d->clocks[TI_ADPLL_DCO].clk,
+ d->parent_clocks[TI_ADPLL_CLKINPHIF],
+ d->regs + ADPLL_CLKCTRL_OFFSET,
+ ADPLL_CLKINPHIFSEL_ADPLL_S);
+ if (err)
+ return err;
+ }
+
+ /* Output clkouthif with a divider M3, sources from hif */
+ err = ti_adpll_init_divider(d, TI_ADPLL_M3, TI_ADPLL_S_CLKOUTHIF, "m3",
+ d->clocks[TI_ADPLL_HIF].clk,
+ d->regs + ADPLL_M3DIV_OFFSET,
+ ADPLL_M3DIV_M3,
+ ADPLL_M3DIV_M3_WIDTH,
+ CLK_DIVIDER_ONE_BASED);
+ if (err)
+ return err;
+
+ /* Output clock dcoclkldo is the DCO */
+
+ return 0;
+}
+
+static int ti_adpll_init_children_adpll_lj(struct ti_adpll_data *d)
+{
+ int err;
+
+ if (d->c->is_type_s)
+ return 0;
+
+ /* Output clkdcoldo, gated output of DCO */
+ err = ti_adpll_init_gate(d, TI_ADPLL_DCO_GATE, TI_ADPLL_LJ_CLKDCOLDO,
+ "clkdcoldo", d->clocks[TI_ADPLL_DCO].clk,
+ d->regs + ADPLL_CLKCTRL_OFFSET,
+ ADPLL_CLKCTRL_CLKDCOLDOEN, 0);
+ if (err)
+ return err;
+
+ /* Internal divider M2, sources from DCO */
+ err = ti_adpll_init_divider(d, TI_ADPLL_M2, -ENODEV,
+ "m2", d->clocks[TI_ADPLL_DCO].clk,
+ d->regs + ADPLL_M2NDIV_OFFSET,
+ ADPLL_M2NDIV_M2,
+ ADPLL_M2NDIV_M2_ADPLL_LJ_WIDTH,
+ CLK_DIVIDER_ONE_BASED);
+ if (err)
+ return err;
+
+ /* Output clkoutldo, gated output of M2 */
+ err = ti_adpll_init_gate(d, TI_ADPLL_M2_GATE, TI_ADPLL_LJ_CLKOUTLDO,
+ "clkoutldo", d->clocks[TI_ADPLL_M2].clk,
+ d->regs + ADPLL_CLKCTRL_OFFSET,
+ ADPLL_CLKCTRL_CLKOUTLDOEN_ADPLL_LJ,
+ 0);
+ if (err)
+ return err;
+
+ /* Internal mux, sources from divider N2 or clkinpulow */
+ err = ti_adpll_init_mux(d, TI_ADPLL_BYPASS, "bypass",
+ d->clocks[TI_ADPLL_N2].clk,
+ d->parent_clocks[TI_ADPLL_CLKINPULOW],
+ d->regs + ADPLL_CLKCTRL_OFFSET,
+ ADPLL_CLKCTRL_ULOWCLKEN);
+ if (err)
+ return err;
+
+ /* Output clkout, sources M2 or bypass */
+ err = ti_adpll_init_clkout(d, TI_ADPLL_CLKOUT, TI_ADPLL_S_CLKOUT,
+ ADPLL_CLKCTRL_CLKOUTEN, "clkout",
+ d->clocks[TI_ADPLL_M2].clk,
+ d->clocks[TI_ADPLL_BYPASS].clk);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static void ti_adpll_free_resources(struct ti_adpll_data *d)
+{
+ int i;
+
+ for (i = TI_ADPLL_M3; i >= 0; i--) {
+ struct ti_adpll_clock *ac = &d->clocks[i];
+
+ if (!ac || IS_ERR_OR_NULL(ac->clk))
+ continue;
+ if (ac->cl)
+ clkdev_drop(ac->cl);
+ if (ac->unregister)
+ ac->unregister(ac->clk);
+ }
+}
+
+/* MPU PLL manages the lock register for all PLLs */
+static void ti_adpll_unlock_all(void __iomem *reg)
+{
+ u32 v;
+
+ v = readl_relaxed(reg);
+ if (v == ADPLL_PLLSS_MMR_LOCK_ENABLED)
+ writel_relaxed(ADPLL_PLLSS_MMR_UNLOCK_MAGIC, reg);
+}
+
+static int ti_adpll_init_registers(struct ti_adpll_data *d)
+{
+ int register_offset = 0;
+
+ if (d->c->is_type_s) {
+ register_offset = 8;
+ ti_adpll_unlock_all(d->iobase + ADPLL_PLLSS_MMR_LOCK_OFFSET);
+ }
+
+ d->regs = d->iobase + register_offset + ADPLL_PWRCTRL_OFFSET;
+
+ return 0;
+}
+
+static int ti_adpll_init_inputs(struct ti_adpll_data *d)
+{
+ const char *error = "need at least %i inputs";
+ struct clk *clock;
+ int nr_inputs;
+
+ nr_inputs = of_clk_get_parent_count(d->np);
+ if (nr_inputs < d->c->nr_max_inputs) {
+ dev_err(d->dev, error, nr_inputs);
+ return -EINVAL;
+ }
+ of_clk_parent_fill(d->np, d->parent_names, nr_inputs);
+
+ clock = devm_clk_get(d->dev, d->parent_names[0]);
+ if (IS_ERR(clock)) {
+ dev_err(d->dev, "could not get clkinp\n");
+ return PTR_ERR(clock);
+ }
+ d->parent_clocks[TI_ADPLL_CLKINP] = clock;
+
+ clock = devm_clk_get(d->dev, d->parent_names[1]);
+ if (IS_ERR(clock)) {
+ dev_err(d->dev, "could not get clkinpulow clock\n");
+ return PTR_ERR(clock);
+ }
+ d->parent_clocks[TI_ADPLL_CLKINPULOW] = clock;
+
+ if (d->c->is_type_s) {
+ clock = devm_clk_get(d->dev, d->parent_names[2]);
+ if (IS_ERR(clock)) {
+ dev_err(d->dev, "could not get clkinphif clock\n");
+ return PTR_ERR(clock);
+ }
+ d->parent_clocks[TI_ADPLL_CLKINPHIF] = clock;
+ }
+
+ return 0;
+}
+
+static const struct ti_adpll_platform_data ti_adpll_type_s = {
+ .is_type_s = true,
+ .nr_max_inputs = MAX_ADPLL_INPUTS,
+ .nr_max_outputs = MAX_ADPLL_OUTPUTS,
+ .output_index = TI_ADPLL_S_DCOCLKLDO,
+};
+
+static const struct ti_adpll_platform_data ti_adpll_type_lj = {
+ .is_type_s = false,
+ .nr_max_inputs = MAX_ADPLL_INPUTS - 1,
+ .nr_max_outputs = MAX_ADPLL_OUTPUTS - 1,
+ .output_index = -EINVAL,
+};
+
+static const struct of_device_id ti_adpll_match[] = {
+ { .compatible = "ti,dm814-adpll-s-clock", &ti_adpll_type_s },
+ { .compatible = "ti,dm814-adpll-lj-clock", &ti_adpll_type_lj },
+ {},
+};
+MODULE_DEVICE_TABLE(of, ti_adpll_match);
+
+static int ti_adpll_probe(struct platform_device *pdev)
+{
+ struct device_node *node = pdev->dev.of_node;
+ struct device *dev = &pdev->dev;
+ const struct of_device_id *match;
+ const struct ti_adpll_platform_data *pdata;
+ struct ti_adpll_data *d;
+ struct resource *res;
+ int err;
+
+ match = of_match_device(ti_adpll_match, dev);
+ if (match)
+ pdata = match->data;
+ else
+ return -ENODEV;
+
+ d = devm_kzalloc(dev, sizeof(*d), GFP_KERNEL);
+ if (!d)
+ return -ENOMEM;
+ d->dev = dev;
+ d->np = node;
+ d->c = pdata;
+ dev_set_drvdata(d->dev, d);
+ spin_lock_init(&d->lock);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENODEV;
+ d->pa = res->start;
+
+ d->iobase = devm_ioremap_resource(dev, res);
+ if (IS_ERR(d->iobase)) {
+ dev_err(dev, "could not get IO base: %li\n",
+ PTR_ERR(d->iobase));
+ return PTR_ERR(d->iobase);
+ }
+
+ err = ti_adpll_init_registers(d);
+ if (err)
+ return err;
+
+ err = ti_adpll_init_inputs(d);
+ if (err)
+ return err;
+
+ d->clocks = devm_kzalloc(d->dev, sizeof(struct ti_adpll_clock) *
+ TI_ADPLL_NR_CLOCKS,
+ GFP_KERNEL);
+ if (!d->clocks)
+ return -ENOMEM;
+
+ err = ti_adpll_init_dco(d);
+ if (err) {
+ dev_err(dev, "could not register dco: %i\n", err);
+ goto free;
+ }
+
+ err = ti_adpll_init_children_adpll_s(d);
+ if (err)
+ goto free;
+ err = ti_adpll_init_children_adpll_lj(d);
+ if (err)
+ goto free;
+
+ err = of_clk_add_provider(d->np, of_clk_src_onecell_get, &d->outputs);
+ if (err)
+ goto free;
+
+ return 0;
+
+free:
+ WARN_ON(1);
+ ti_adpll_free_resources(d);
+
+ return err;
+}
+
+static int ti_adpll_remove(struct platform_device *pdev)
+{
+ struct ti_adpll_data *d = dev_get_drvdata(&pdev->dev);
+
+ ti_adpll_free_resources(d);
+
+ return 0;
+}
+
+static struct platform_driver ti_adpll_driver = {
+ .driver = {
+ .name = "ti-adpll",
+ .of_match_table = ti_adpll_match,
+ },
+ .probe = ti_adpll_probe,
+ .remove = ti_adpll_remove,
+};
+
+static int __init ti_adpll_init(void)
+{
+ return platform_driver_register(&ti_adpll_driver);
+}
+core_initcall(ti_adpll_init);
+
+static void __exit ti_adpll_exit(void)
+{
+ platform_driver_unregister(&ti_adpll_driver);
+}
+module_exit(ti_adpll_exit);
+
+MODULE_DESCRIPTION("Clock driver for dm814x ADPLL");
+MODULE_ALIAS("platform:dm814-adpll-clock");
+MODULE_AUTHOR("Tony LIndgren <tony@atomide.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/ti/apll.c b/drivers/clk/ti/apll.c
index b336a8c11e2a..6411e132faa2 100644
--- a/drivers/clk/ti/apll.c
+++ b/drivers/clk/ti/apll.c
@@ -140,11 +140,21 @@ static void __init omap_clk_register_apll(struct clk_hw *hw,
struct dpll_data *ad = clk_hw->dpll_data;
struct clk *clk;
- ad->clk_ref = of_clk_get(node, 0);
- ad->clk_bypass = of_clk_get(node, 1);
+ clk = of_clk_get(node, 0);
+ if (IS_ERR(clk)) {
+ pr_debug("clk-ref for %s not ready, retry\n",
+ node->name);
+ if (!ti_clk_retry_init(node, hw, omap_clk_register_apll))
+ return;
+
+ goto cleanup;
+ }
- if (IS_ERR(ad->clk_ref) || IS_ERR(ad->clk_bypass)) {
- pr_debug("clk-ref or clk-bypass for %s not ready, retry\n",
+ ad->clk_ref = __clk_get_hw(clk);
+
+ clk = of_clk_get(node, 1);
+ if (IS_ERR(clk)) {
+ pr_debug("clk-bypass for %s not ready, retry\n",
node->name);
if (!ti_clk_retry_init(node, hw, omap_clk_register_apll))
return;
@@ -152,6 +162,8 @@ static void __init omap_clk_register_apll(struct clk_hw *hw,
goto cleanup;
}
+ ad->clk_bypass = __clk_get_hw(clk);
+
clk = clk_register(NULL, &clk_hw->hw);
if (!IS_ERR(clk)) {
of_clk_add_provider(node, of_clk_src_simple_get, clk);
diff --git a/drivers/clk/ti/clk-814x.c b/drivers/clk/ti/clk-814x.c
index 9e85fcc74cc9..52c6efc53731 100644
--- a/drivers/clk/ti/clk-814x.c
+++ b/drivers/clk/ti/clk-814x.c
@@ -5,8 +5,10 @@
*/
#include <linux/kernel.h>
+#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/clk/ti.h>
+#include <linux/of_platform.h>
#include "clock.h"
@@ -27,11 +29,62 @@ static struct ti_dt_clk dm814_clks[] = {
{ .node_name = NULL },
};
+static bool timer_clocks_initialized;
+
+static int __init dm814x_adpll_early_init(void)
+{
+ struct device_node *np;
+
+ if (!timer_clocks_initialized)
+ return -ENODEV;
+
+ np = of_find_node_by_name(NULL, "pllss");
+ if (!np) {
+ pr_err("Could not find node for plls\n");
+ return -ENODEV;
+ }
+
+ of_platform_populate(np, NULL, NULL, NULL);
+
+ return 0;
+}
+core_initcall(dm814x_adpll_early_init);
+
+static const char * const init_clocks[] = {
+ "pll040clkout", /* MPU 481c5040.adpll.clkout */
+ "pll290clkout", /* DDR 481c5290.adpll.clkout */
+};
+
+static int __init dm814x_adpll_enable_init_clocks(void)
+{
+ int i, err;
+
+ if (!timer_clocks_initialized)
+ return -ENODEV;
+
+ for (i = 0; i < ARRAY_SIZE(init_clocks); i++) {
+ struct clk *clock;
+
+ clock = clk_get(NULL, init_clocks[i]);
+ if (WARN(IS_ERR(clock), "could not find init clock %s\n",
+ init_clocks[i]))
+ continue;
+ err = clk_prepare_enable(clock);
+ if (WARN(err, "could not enable init clock %s\n",
+ init_clocks[i]))
+ continue;
+ }
+
+ return 0;
+}
+postcore_initcall(dm814x_adpll_enable_init_clocks);
+
int __init dm814x_dt_clk_init(void)
{
ti_dt_clocks_register(dm814_clks);
omap2_clk_disable_autoidle_all();
omap2_clk_enable_init_clocks(NULL, 0);
+ timer_clocks_initialized = true;
return 0;
}
diff --git a/drivers/clk/ti/clk.c b/drivers/clk/ti/clk.c
index b5bcd77e8d0f..5fcf247759ac 100644
--- a/drivers/clk/ti/clk.c
+++ b/drivers/clk/ti/clk.c
@@ -305,8 +305,8 @@ struct clk __init *ti_clk_register_clk(struct ti_clk *setup)
case TI_CLK_FIXED:
fixed = setup->data;
- clk = clk_register_fixed_rate(NULL, setup->name, NULL,
- CLK_IS_ROOT, fixed->frequency);
+ clk = clk_register_fixed_rate(NULL, setup->name, NULL, 0,
+ fixed->frequency);
break;
case TI_CLK_MUX:
clk = ti_clk_register_mux(setup);
diff --git a/drivers/clk/ti/clkt_dpll.c b/drivers/clk/ti/clkt_dpll.c
index b5cc6f66ae5d..032c658a5f5e 100644
--- a/drivers/clk/ti/clkt_dpll.c
+++ b/drivers/clk/ti/clkt_dpll.c
@@ -254,7 +254,7 @@ unsigned long omap2_get_dpll_rate(struct clk_hw_omap *clk)
v >>= __ffs(dd->enable_mask);
if (_omap2_dpll_is_in_bypass(v))
- return clk_get_rate(dd->clk_bypass);
+ return clk_hw_get_rate(dd->clk_bypass);
v = ti_clk_ll_ops->clk_readl(dd->mult_div1_reg);
dpll_mult = v & dd->mult_mask;
@@ -262,7 +262,7 @@ unsigned long omap2_get_dpll_rate(struct clk_hw_omap *clk)
dpll_div = v & dd->div1_mask;
dpll_div >>= __ffs(dd->div1_mask);
- dpll_clk = (u64)clk_get_rate(dd->clk_ref) * dpll_mult;
+ dpll_clk = (u64)clk_hw_get_rate(dd->clk_ref) * dpll_mult;
do_div(dpll_clk, dpll_div + 1);
return dpll_clk;
@@ -301,7 +301,7 @@ long omap2_dpll_round_rate(struct clk_hw *hw, unsigned long target_rate,
dd = clk->dpll_data;
- ref_rate = clk_get_rate(dd->clk_ref);
+ ref_rate = clk_hw_get_rate(dd->clk_ref);
clk_name = clk_hw_get_name(hw);
pr_debug("clock: %s: starting DPLL round_rate, target rate %lu\n",
clk_name, target_rate);
diff --git a/drivers/clk/ti/clockdomain.c b/drivers/clk/ti/clockdomain.c
index b9bc3b8df659..6cf9dd189a92 100644
--- a/drivers/clk/ti/clockdomain.c
+++ b/drivers/clk/ti/clockdomain.c
@@ -109,7 +109,7 @@ static void __init of_ti_clockdomain_setup(struct device_node *node)
struct clk_hw *clk_hw;
const char *clkdm_name = node->name;
int i;
- int num_clks;
+ unsigned int num_clks;
num_clks = of_clk_get_parent_count(node);
diff --git a/drivers/clk/ti/composite.c b/drivers/clk/ti/composite.c
index dbef218fe5ec..1cf70f452e1e 100644
--- a/drivers/clk/ti/composite.c
+++ b/drivers/clk/ti/composite.c
@@ -28,8 +28,6 @@
#undef pr_fmt
#define pr_fmt(fmt) "%s: " fmt, __func__
-#define to_clk_divider(_hw) container_of(_hw, struct clk_divider, hw)
-
static unsigned long ti_composite_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
@@ -236,14 +234,14 @@ cleanup:
static void __init of_ti_composite_clk_setup(struct device_node *node)
{
- int num_clks;
+ unsigned int num_clks;
int i;
struct clk_hw_omap_comp *cclk;
/* Number of component clocks to be put inside this clock */
num_clks = of_clk_get_parent_count(node);
- if (num_clks < 1) {
+ if (!num_clks) {
pr_err("composite clk %s must have component(s)\n", node->name);
return;
}
@@ -273,13 +271,13 @@ CLK_OF_DECLARE(ti_composite_clock, "ti,composite-clock",
int __init ti_clk_add_component(struct device_node *node, struct clk_hw *hw,
int type)
{
- int num_parents;
+ unsigned int num_parents;
const char **parent_names;
struct component_clk *clk;
num_parents = of_clk_get_parent_count(node);
- if (num_parents < 1) {
+ if (!num_parents) {
pr_err("component-clock %s must have parent(s)\n", node->name);
return -EINVAL;
}
diff --git a/drivers/clk/ti/divider.c b/drivers/clk/ti/divider.c
index df2558350fc1..b4e5de16e561 100644
--- a/drivers/clk/ti/divider.c
+++ b/drivers/clk/ti/divider.c
@@ -26,8 +26,6 @@
#undef pr_fmt
#define pr_fmt(fmt) "%s: " fmt, __func__
-#define to_clk_divider(_hw) container_of(_hw, struct clk_divider, hw)
-
#define div_mask(d) ((1 << ((d)->width)) - 1)
static unsigned int _get_table_maxdiv(const struct clk_div_table *table)
diff --git a/drivers/clk/ti/dpll.c b/drivers/clk/ti/dpll.c
index 5519b386edc0..3bc9959f71c3 100644
--- a/drivers/clk/ti/dpll.c
+++ b/drivers/clk/ti/dpll.c
@@ -147,11 +147,22 @@ static void __init _register_dpll(struct clk_hw *hw,
struct dpll_data *dd = clk_hw->dpll_data;
struct clk *clk;
- dd->clk_ref = of_clk_get(node, 0);
- dd->clk_bypass = of_clk_get(node, 1);
+ clk = of_clk_get(node, 0);
+ if (IS_ERR(clk)) {
+ pr_debug("clk-ref missing for %s, retry later\n",
+ node->name);
+ if (!ti_clk_retry_init(node, hw, _register_dpll))
+ return;
- if (IS_ERR(dd->clk_ref) || IS_ERR(dd->clk_bypass)) {
- pr_debug("clk-ref or clk-bypass missing for %s, retry later\n",
+ goto cleanup;
+ }
+
+ dd->clk_ref = __clk_get_hw(clk);
+
+ clk = of_clk_get(node, 1);
+
+ if (IS_ERR(clk)) {
+ pr_debug("clk-bypass missing for %s, retry later\n",
node->name);
if (!ti_clk_retry_init(node, hw, _register_dpll))
return;
@@ -159,6 +170,8 @@ static void __init _register_dpll(struct clk_hw *hw,
goto cleanup;
}
+ dd->clk_bypass = __clk_get_hw(clk);
+
/* register the clock */
clk = clk_register(NULL, &clk_hw->hw);
@@ -251,8 +264,8 @@ struct clk *ti_clk_register_dpll(struct ti_clk *setup)
dd->recal_en_bit = dpll->recal_en_bit;
dd->recal_st_bit = dpll->recal_st_bit;
- dd->clk_ref = clk_ref;
- dd->clk_bypass = clk_bypass;
+ dd->clk_ref = __clk_get_hw(clk_ref);
+ dd->clk_bypass = __clk_get_hw(clk_bypass);
if (dpll->flags & CLKF_CORE)
ops = &omap3_dpll_core_ck_ops;
@@ -361,7 +374,7 @@ static void __init of_ti_dpll_setup(struct device_node *node,
init->ops = ops;
init->num_parents = of_clk_get_parent_count(node);
- if (init->num_parents < 1) {
+ if (!init->num_parents) {
pr_err("%s must have parent(s)\n", node->name);
goto cleanup;
}
diff --git a/drivers/clk/ti/dpll3xxx.c b/drivers/clk/ti/dpll3xxx.c
index cc739291a3ce..88f2ce81ba55 100644
--- a/drivers/clk/ti/dpll3xxx.c
+++ b/drivers/clk/ti/dpll3xxx.c
@@ -98,7 +98,7 @@ static u16 _omap3_dpll_compute_freqsel(struct clk_hw_omap *clk, u8 n)
unsigned long fint;
u16 f = 0;
- fint = clk_get_rate(clk->dpll_data->clk_ref) / n;
+ fint = clk_hw_get_rate(clk->dpll_data->clk_ref) / n;
pr_debug("clock: fint is %lu\n", fint);
@@ -460,12 +460,11 @@ int omap3_noncore_dpll_enable(struct clk_hw *hw)
parent = clk_hw_get_parent(hw);
- if (clk_hw_get_rate(hw) ==
- clk_hw_get_rate(__clk_get_hw(dd->clk_bypass))) {
- WARN_ON(parent != __clk_get_hw(dd->clk_bypass));
+ if (clk_hw_get_rate(hw) == clk_hw_get_rate(dd->clk_bypass)) {
+ WARN_ON(parent != dd->clk_bypass);
r = _omap3_noncore_dpll_bypass(clk);
} else {
- WARN_ON(parent != __clk_get_hw(dd->clk_ref));
+ WARN_ON(parent != dd->clk_ref);
r = _omap3_noncore_dpll_lock(clk);
}
@@ -513,13 +512,13 @@ int omap3_noncore_dpll_determine_rate(struct clk_hw *hw,
if (!dd)
return -EINVAL;
- if (clk_get_rate(dd->clk_bypass) == req->rate &&
+ if (clk_hw_get_rate(dd->clk_bypass) == req->rate &&
(dd->modes & (1 << DPLL_LOW_POWER_BYPASS))) {
- req->best_parent_hw = __clk_get_hw(dd->clk_bypass);
+ req->best_parent_hw = dd->clk_bypass;
} else {
req->rate = omap2_dpll_round_rate(hw, req->rate,
&req->best_parent_rate);
- req->best_parent_hw = __clk_get_hw(dd->clk_ref);
+ req->best_parent_hw = dd->clk_ref;
}
req->best_parent_rate = req->rate;
@@ -577,7 +576,7 @@ int omap3_noncore_dpll_set_rate(struct clk_hw *hw, unsigned long rate,
if (!dd)
return -EINVAL;
- if (clk_hw_get_parent(hw) != __clk_get_hw(dd->clk_ref))
+ if (clk_hw_get_parent(hw) != dd->clk_ref)
return -EINVAL;
if (dd->last_rounded_rate == 0)
diff --git a/drivers/clk/ti/dpll44xx.c b/drivers/clk/ti/dpll44xx.c
index 660d7436ac24..82c05b55a7be 100644
--- a/drivers/clk/ti/dpll44xx.c
+++ b/drivers/clk/ti/dpll44xx.c
@@ -94,7 +94,7 @@ static void omap4_dpll_lpmode_recalc(struct dpll_data *dd)
{
long fint, fout;
- fint = clk_get_rate(dd->clk_ref) / (dd->last_rounded_n + 1);
+ fint = clk_hw_get_rate(dd->clk_ref) / (dd->last_rounded_n + 1);
fout = fint * dd->last_rounded_m;
if ((fint < OMAP4_DPLL_LP_FINT_MAX) && (fout < OMAP4_DPLL_LP_FOUT_MAX))
@@ -212,13 +212,13 @@ int omap4_dpll_regm4xen_determine_rate(struct clk_hw *hw,
if (!dd)
return -EINVAL;
- if (clk_get_rate(dd->clk_bypass) == req->rate &&
+ if (clk_hw_get_rate(dd->clk_bypass) == req->rate &&
(dd->modes & (1 << DPLL_LOW_POWER_BYPASS))) {
- req->best_parent_hw = __clk_get_hw(dd->clk_bypass);
+ req->best_parent_hw = dd->clk_bypass;
} else {
req->rate = omap4_dpll_regm4xen_round_rate(hw, req->rate,
&req->best_parent_rate);
- req->best_parent_hw = __clk_get_hw(dd->clk_ref);
+ req->best_parent_hw = dd->clk_ref;
}
req->best_parent_rate = req->rate;
diff --git a/drivers/clk/ti/gate.c b/drivers/clk/ti/gate.c
index 5429d3534363..bc05f276f32b 100644
--- a/drivers/clk/ti/gate.c
+++ b/drivers/clk/ti/gate.c
@@ -24,8 +24,6 @@
#include "clock.h"
-#define to_clk_divider(_hw) container_of(_hw, struct clk_divider, hw)
-
#undef pr_fmt
#define pr_fmt(fmt) "%s: " fmt, __func__
diff --git a/drivers/clk/ti/mux.c b/drivers/clk/ti/mux.c
index dab9ba88b9d6..44777ab6fdeb 100644
--- a/drivers/clk/ti/mux.c
+++ b/drivers/clk/ti/mux.c
@@ -26,8 +26,6 @@
#undef pr_fmt
#define pr_fmt(fmt) "%s: " fmt, __func__
-#define to_clk_mux(_hw) container_of(_hw, struct clk_mux, hw)
-
static u8 ti_clk_mux_get_parent(struct clk_hw *hw)
{
struct clk_mux *mux = to_clk_mux(hw);
@@ -180,7 +178,7 @@ static void of_mux_clk_setup(struct device_node *node)
{
struct clk *clk;
void __iomem *reg;
- int num_parents;
+ unsigned int num_parents;
const char **parent_names;
u8 clk_mux_flags = 0;
u32 mask = 0;
@@ -263,7 +261,7 @@ struct clk_hw *ti_clk_build_component_mux(struct ti_clk_mux *setup)
static void __init of_ti_composite_mux_clk_setup(struct device_node *node)
{
struct clk_mux *mux;
- int num_parents;
+ unsigned int num_parents;
u32 val;
mux = kzalloc(sizeof(*mux), GFP_KERNEL);
diff --git a/drivers/clk/ux500/abx500-clk.c b/drivers/clk/ux500/abx500-clk.c
index 222425d08ab6..a07c31e6f26d 100644
--- a/drivers/clk/ux500/abx500-clk.c
+++ b/drivers/clk/ux500/abx500-clk.c
@@ -40,8 +40,7 @@ static int ab8500_reg_clks(struct device *dev)
return ret;
/* ab8500_sysclk */
- clk = clk_reg_prcmu_gate("ab8500_sysclk", NULL, PRCMU_SYSCLK,
- CLK_IS_ROOT);
+ clk = clk_reg_prcmu_gate("ab8500_sysclk", NULL, PRCMU_SYSCLK, 0);
clk_register_clkdev(clk, "sysclk", "ab8500-usb.0");
clk_register_clkdev(clk, "sysclk", "ab-iddet.0");
clk_register_clkdev(clk, "sysclk", "snd-soc-mop500.0");
@@ -68,7 +67,7 @@ static int ab8500_reg_clks(struct device *dev)
clk = clk_reg_sysctrl_gate_fixed_rate(dev, "ulpclk", NULL,
AB8500_SYSULPCLKCTRL1, AB8500_SYSULPCLKCTRL1_ULPCLKREQ,
AB8500_SYSULPCLKCTRL1_ULPCLKREQ,
- 38400000, 9000, CLK_IS_ROOT);
+ 38400000, 9000, 0);
clk_register_clkdev(clk, "ulpclk", "snd-soc-mop500.0");
/* ab8500_intclk */
diff --git a/drivers/clk/ux500/u8500_of_clk.c b/drivers/clk/ux500/u8500_of_clk.c
index 271c09644652..9a736d939806 100644
--- a/drivers/clk/ux500/u8500_of_clk.c
+++ b/drivers/clk/ux500/u8500_of_clk.c
@@ -91,21 +91,21 @@ void u8500_clk_init(void)
/* Clock sources */
clk = clk_reg_prcmu_gate("soc0_pll", NULL, PRCMU_PLLSOC0,
- CLK_IS_ROOT|CLK_IGNORE_UNUSED);
+ CLK_IGNORE_UNUSED);
prcmu_clk[PRCMU_PLLSOC0] = clk;
clk = clk_reg_prcmu_gate("soc1_pll", NULL, PRCMU_PLLSOC1,
- CLK_IS_ROOT|CLK_IGNORE_UNUSED);
+ CLK_IGNORE_UNUSED);
prcmu_clk[PRCMU_PLLSOC1] = clk;
clk = clk_reg_prcmu_gate("ddr_pll", NULL, PRCMU_PLLDDR,
- CLK_IS_ROOT|CLK_IGNORE_UNUSED);
+ CLK_IGNORE_UNUSED);
prcmu_clk[PRCMU_PLLDDR] = clk;
/* FIXME: Add sys, ulp and int clocks here. */
rtc_clk = clk_register_fixed_rate(NULL, "rtc32k", "NULL",
- CLK_IS_ROOT|CLK_IGNORE_UNUSED,
+ CLK_IGNORE_UNUSED,
32768);
/* PRCMU clocks */
@@ -126,105 +126,101 @@ void u8500_clk_init(void)
clk = clk_reg_prcmu_gate("sgclk", sgaclk_parent,
PRCMU_SGACLK, 0);
else
- clk = clk_reg_prcmu_gate("sgclk", NULL,
- PRCMU_SGACLK, CLK_IS_ROOT);
+ clk = clk_reg_prcmu_gate("sgclk", NULL, PRCMU_SGACLK, 0);
prcmu_clk[PRCMU_SGACLK] = clk;
- clk = clk_reg_prcmu_gate("uartclk", NULL, PRCMU_UARTCLK, CLK_IS_ROOT);
+ clk = clk_reg_prcmu_gate("uartclk", NULL, PRCMU_UARTCLK, 0);
prcmu_clk[PRCMU_UARTCLK] = clk;
- clk = clk_reg_prcmu_gate("msp02clk", NULL, PRCMU_MSP02CLK, CLK_IS_ROOT);
+ clk = clk_reg_prcmu_gate("msp02clk", NULL, PRCMU_MSP02CLK, 0);
prcmu_clk[PRCMU_MSP02CLK] = clk;
- clk = clk_reg_prcmu_gate("msp1clk", NULL, PRCMU_MSP1CLK, CLK_IS_ROOT);
+ clk = clk_reg_prcmu_gate("msp1clk", NULL, PRCMU_MSP1CLK, 0);
prcmu_clk[PRCMU_MSP1CLK] = clk;
- clk = clk_reg_prcmu_gate("i2cclk", NULL, PRCMU_I2CCLK, CLK_IS_ROOT);
+ clk = clk_reg_prcmu_gate("i2cclk", NULL, PRCMU_I2CCLK, 0);
prcmu_clk[PRCMU_I2CCLK] = clk;
- clk = clk_reg_prcmu_gate("slimclk", NULL, PRCMU_SLIMCLK, CLK_IS_ROOT);
+ clk = clk_reg_prcmu_gate("slimclk", NULL, PRCMU_SLIMCLK, 0);
prcmu_clk[PRCMU_SLIMCLK] = clk;
- clk = clk_reg_prcmu_gate("per1clk", NULL, PRCMU_PER1CLK, CLK_IS_ROOT);
+ clk = clk_reg_prcmu_gate("per1clk", NULL, PRCMU_PER1CLK, 0);
prcmu_clk[PRCMU_PER1CLK] = clk;
- clk = clk_reg_prcmu_gate("per2clk", NULL, PRCMU_PER2CLK, CLK_IS_ROOT);
+ clk = clk_reg_prcmu_gate("per2clk", NULL, PRCMU_PER2CLK, 0);
prcmu_clk[PRCMU_PER2CLK] = clk;
- clk = clk_reg_prcmu_gate("per3clk", NULL, PRCMU_PER3CLK, CLK_IS_ROOT);
+ clk = clk_reg_prcmu_gate("per3clk", NULL, PRCMU_PER3CLK, 0);
prcmu_clk[PRCMU_PER3CLK] = clk;
- clk = clk_reg_prcmu_gate("per5clk", NULL, PRCMU_PER5CLK, CLK_IS_ROOT);
+ clk = clk_reg_prcmu_gate("per5clk", NULL, PRCMU_PER5CLK, 0);
prcmu_clk[PRCMU_PER5CLK] = clk;
- clk = clk_reg_prcmu_gate("per6clk", NULL, PRCMU_PER6CLK, CLK_IS_ROOT);
+ clk = clk_reg_prcmu_gate("per6clk", NULL, PRCMU_PER6CLK, 0);
prcmu_clk[PRCMU_PER6CLK] = clk;
- clk = clk_reg_prcmu_gate("per7clk", NULL, PRCMU_PER7CLK, CLK_IS_ROOT);
+ clk = clk_reg_prcmu_gate("per7clk", NULL, PRCMU_PER7CLK, 0);
prcmu_clk[PRCMU_PER7CLK] = clk;
clk = clk_reg_prcmu_scalable("lcdclk", NULL, PRCMU_LCDCLK, 0,
- CLK_IS_ROOT|CLK_SET_RATE_GATE);
+ CLK_SET_RATE_GATE);
prcmu_clk[PRCMU_LCDCLK] = clk;
- clk = clk_reg_prcmu_opp_gate("bmlclk", NULL, PRCMU_BMLCLK, CLK_IS_ROOT);
+ clk = clk_reg_prcmu_opp_gate("bmlclk", NULL, PRCMU_BMLCLK, 0);
prcmu_clk[PRCMU_BMLCLK] = clk;
clk = clk_reg_prcmu_scalable("hsitxclk", NULL, PRCMU_HSITXCLK, 0,
- CLK_IS_ROOT|CLK_SET_RATE_GATE);
+ CLK_SET_RATE_GATE);
prcmu_clk[PRCMU_HSITXCLK] = clk;
clk = clk_reg_prcmu_scalable("hsirxclk", NULL, PRCMU_HSIRXCLK, 0,
- CLK_IS_ROOT|CLK_SET_RATE_GATE);
+ CLK_SET_RATE_GATE);
prcmu_clk[PRCMU_HSIRXCLK] = clk;
clk = clk_reg_prcmu_scalable("hdmiclk", NULL, PRCMU_HDMICLK, 0,
- CLK_IS_ROOT|CLK_SET_RATE_GATE);
+ CLK_SET_RATE_GATE);
prcmu_clk[PRCMU_HDMICLK] = clk;
- clk = clk_reg_prcmu_gate("apeatclk", NULL, PRCMU_APEATCLK, CLK_IS_ROOT);
+ clk = clk_reg_prcmu_gate("apeatclk", NULL, PRCMU_APEATCLK, 0);
prcmu_clk[PRCMU_APEATCLK] = clk;
clk = clk_reg_prcmu_scalable("apetraceclk", NULL, PRCMU_APETRACECLK, 0,
- CLK_IS_ROOT|CLK_SET_RATE_GATE);
+ CLK_SET_RATE_GATE);
prcmu_clk[PRCMU_APETRACECLK] = clk;
- clk = clk_reg_prcmu_gate("mcdeclk", NULL, PRCMU_MCDECLK, CLK_IS_ROOT);
+ clk = clk_reg_prcmu_gate("mcdeclk", NULL, PRCMU_MCDECLK, 0);
prcmu_clk[PRCMU_MCDECLK] = clk;
- clk = clk_reg_prcmu_opp_gate("ipi2cclk", NULL, PRCMU_IPI2CCLK,
- CLK_IS_ROOT);
+ clk = clk_reg_prcmu_opp_gate("ipi2cclk", NULL, PRCMU_IPI2CCLK, 0);
prcmu_clk[PRCMU_IPI2CCLK] = clk;
- clk = clk_reg_prcmu_gate("dsialtclk", NULL, PRCMU_DSIALTCLK,
- CLK_IS_ROOT);
+ clk = clk_reg_prcmu_gate("dsialtclk", NULL, PRCMU_DSIALTCLK, 0);
prcmu_clk[PRCMU_DSIALTCLK] = clk;
- clk = clk_reg_prcmu_gate("dmaclk", NULL, PRCMU_DMACLK, CLK_IS_ROOT);
+ clk = clk_reg_prcmu_gate("dmaclk", NULL, PRCMU_DMACLK, 0);
prcmu_clk[PRCMU_DMACLK] = clk;
- clk = clk_reg_prcmu_gate("b2r2clk", NULL, PRCMU_B2R2CLK, CLK_IS_ROOT);
+ clk = clk_reg_prcmu_gate("b2r2clk", NULL, PRCMU_B2R2CLK, 0);
prcmu_clk[PRCMU_B2R2CLK] = clk;
clk = clk_reg_prcmu_scalable("tvclk", NULL, PRCMU_TVCLK, 0,
- CLK_IS_ROOT|CLK_SET_RATE_GATE);
+ CLK_SET_RATE_GATE);
prcmu_clk[PRCMU_TVCLK] = clk;
- clk = clk_reg_prcmu_gate("sspclk", NULL, PRCMU_SSPCLK, CLK_IS_ROOT);
+ clk = clk_reg_prcmu_gate("sspclk", NULL, PRCMU_SSPCLK, 0);
prcmu_clk[PRCMU_SSPCLK] = clk;
- clk = clk_reg_prcmu_gate("rngclk", NULL, PRCMU_RNGCLK, CLK_IS_ROOT);
+ clk = clk_reg_prcmu_gate("rngclk", NULL, PRCMU_RNGCLK, 0);
prcmu_clk[PRCMU_RNGCLK] = clk;
- clk = clk_reg_prcmu_gate("uiccclk", NULL, PRCMU_UICCCLK, CLK_IS_ROOT);
+ clk = clk_reg_prcmu_gate("uiccclk", NULL, PRCMU_UICCCLK, 0);
prcmu_clk[PRCMU_UICCCLK] = clk;
- clk = clk_reg_prcmu_gate("timclk", NULL, PRCMU_TIMCLK, CLK_IS_ROOT);
+ clk = clk_reg_prcmu_gate("timclk", NULL, PRCMU_TIMCLK, 0);
prcmu_clk[PRCMU_TIMCLK] = clk;
clk = clk_reg_prcmu_opp_volt_scalable("sdmmcclk", NULL, PRCMU_SDMMCCLK,
- 100000000,
- CLK_IS_ROOT|CLK_SET_RATE_GATE);
+ 100000000, CLK_SET_RATE_GATE);
prcmu_clk[PRCMU_SDMMCCLK] = clk;
clk = clk_reg_prcmu_scalable("dsi_pll", "hdmiclk",
@@ -252,7 +248,7 @@ void u8500_clk_init(void)
prcmu_clk[PRCMU_DSI2ESCCLK] = clk;
clk = clk_reg_prcmu_scalable_rate("armss", NULL,
- PRCMU_ARMSS, 0, CLK_IS_ROOT|CLK_IGNORE_UNUSED);
+ PRCMU_ARMSS, 0, CLK_IGNORE_UNUSED);
prcmu_clk[PRCMU_ARMSS] = clk;
twd_clk = clk_register_fixed_factor(NULL, "smp_twd", "armss",
diff --git a/drivers/clk/ux500/u8540_clk.c b/drivers/clk/ux500/u8540_clk.c
index d7bcb7a86615..86549e59fb42 100644
--- a/drivers/clk/ux500/u8540_clk.c
+++ b/drivers/clk/ux500/u8540_clk.c
@@ -56,28 +56,28 @@ void u8540_clk_init(void)
/* Clock sources. */
/* Fixed ClockGen */
clk = clk_reg_prcmu_gate("soc0_pll", NULL, PRCMU_PLLSOC0,
- CLK_IS_ROOT|CLK_IGNORE_UNUSED);
+ CLK_IGNORE_UNUSED);
clk_register_clkdev(clk, "soc0_pll", NULL);
clk = clk_reg_prcmu_gate("soc1_pll", NULL, PRCMU_PLLSOC1,
- CLK_IS_ROOT|CLK_IGNORE_UNUSED);
+ CLK_IGNORE_UNUSED);
clk_register_clkdev(clk, "soc1_pll", NULL);
clk = clk_reg_prcmu_gate("ddr_pll", NULL, PRCMU_PLLDDR,
- CLK_IS_ROOT|CLK_IGNORE_UNUSED);
+ CLK_IGNORE_UNUSED);
clk_register_clkdev(clk, "ddr_pll", NULL);
clk = clk_register_fixed_rate(NULL, "rtc32k", NULL,
- CLK_IS_ROOT|CLK_IGNORE_UNUSED,
+ CLK_IGNORE_UNUSED,
32768);
clk_register_clkdev(clk, "clk32k", NULL);
clk_register_clkdev(clk, "apb_pclk", "rtc-pl031");
clk = clk_register_fixed_rate(NULL, "ulp38m4", NULL,
- CLK_IS_ROOT|CLK_IGNORE_UNUSED,
+ CLK_IGNORE_UNUSED,
38400000);
- clk = clk_reg_prcmu_gate("uartclk", NULL, PRCMU_UARTCLK, CLK_IS_ROOT);
+ clk = clk_reg_prcmu_gate("uartclk", NULL, PRCMU_UARTCLK, 0);
clk_register_clkdev(clk, NULL, "UART");
/* msp02clk needs a abx500 clk as parent. Handle by abx500 clk driver */
@@ -85,120 +85,116 @@ void u8540_clk_init(void)
PRCMU_MSP02CLK, 0);
clk_register_clkdev(clk, NULL, "MSP02");
- clk = clk_reg_prcmu_gate("msp1clk", NULL, PRCMU_MSP1CLK, CLK_IS_ROOT);
+ clk = clk_reg_prcmu_gate("msp1clk", NULL, PRCMU_MSP1CLK, 0);
clk_register_clkdev(clk, NULL, "MSP1");
- clk = clk_reg_prcmu_gate("i2cclk", NULL, PRCMU_I2CCLK, CLK_IS_ROOT);
+ clk = clk_reg_prcmu_gate("i2cclk", NULL, PRCMU_I2CCLK, 0);
clk_register_clkdev(clk, NULL, "I2C");
- clk = clk_reg_prcmu_gate("slimclk", NULL, PRCMU_SLIMCLK, CLK_IS_ROOT);
+ clk = clk_reg_prcmu_gate("slimclk", NULL, PRCMU_SLIMCLK, 0);
clk_register_clkdev(clk, NULL, "slim");
- clk = clk_reg_prcmu_gate("per1clk", NULL, PRCMU_PER1CLK, CLK_IS_ROOT);
+ clk = clk_reg_prcmu_gate("per1clk", NULL, PRCMU_PER1CLK, 0);
clk_register_clkdev(clk, NULL, "PERIPH1");
- clk = clk_reg_prcmu_gate("per2clk", NULL, PRCMU_PER2CLK, CLK_IS_ROOT);
+ clk = clk_reg_prcmu_gate("per2clk", NULL, PRCMU_PER2CLK, 0);
clk_register_clkdev(clk, NULL, "PERIPH2");
- clk = clk_reg_prcmu_gate("per3clk", NULL, PRCMU_PER3CLK, CLK_IS_ROOT);
+ clk = clk_reg_prcmu_gate("per3clk", NULL, PRCMU_PER3CLK, 0);
clk_register_clkdev(clk, NULL, "PERIPH3");
- clk = clk_reg_prcmu_gate("per5clk", NULL, PRCMU_PER5CLK, CLK_IS_ROOT);
+ clk = clk_reg_prcmu_gate("per5clk", NULL, PRCMU_PER5CLK, 0);
clk_register_clkdev(clk, NULL, "PERIPH5");
- clk = clk_reg_prcmu_gate("per6clk", NULL, PRCMU_PER6CLK, CLK_IS_ROOT);
+ clk = clk_reg_prcmu_gate("per6clk", NULL, PRCMU_PER6CLK, 0);
clk_register_clkdev(clk, NULL, "PERIPH6");
- clk = clk_reg_prcmu_gate("per7clk", NULL, PRCMU_PER7CLK, CLK_IS_ROOT);
+ clk = clk_reg_prcmu_gate("per7clk", NULL, PRCMU_PER7CLK, 0);
clk_register_clkdev(clk, NULL, "PERIPH7");
clk = clk_reg_prcmu_scalable("lcdclk", NULL, PRCMU_LCDCLK, 0,
- CLK_IS_ROOT|CLK_SET_RATE_GATE);
+ CLK_SET_RATE_GATE);
clk_register_clkdev(clk, NULL, "lcd");
clk_register_clkdev(clk, "lcd", "mcde");
- clk = clk_reg_prcmu_opp_gate("bmlclk", NULL, PRCMU_BMLCLK,
- CLK_IS_ROOT);
+ clk = clk_reg_prcmu_opp_gate("bmlclk", NULL, PRCMU_BMLCLK, 0);
clk_register_clkdev(clk, NULL, "bml");
clk = clk_reg_prcmu_scalable("hsitxclk", NULL, PRCMU_HSITXCLK, 0,
- CLK_IS_ROOT|CLK_SET_RATE_GATE);
+ CLK_SET_RATE_GATE);
clk = clk_reg_prcmu_scalable("hsirxclk", NULL, PRCMU_HSIRXCLK, 0,
- CLK_IS_ROOT|CLK_SET_RATE_GATE);
+ CLK_SET_RATE_GATE);
clk = clk_reg_prcmu_scalable("hdmiclk", NULL, PRCMU_HDMICLK, 0,
- CLK_IS_ROOT|CLK_SET_RATE_GATE);
+ CLK_SET_RATE_GATE);
clk_register_clkdev(clk, NULL, "hdmi");
clk_register_clkdev(clk, "hdmi", "mcde");
- clk = clk_reg_prcmu_gate("apeatclk", NULL, PRCMU_APEATCLK, CLK_IS_ROOT);
+ clk = clk_reg_prcmu_gate("apeatclk", NULL, PRCMU_APEATCLK, 0);
clk_register_clkdev(clk, NULL, "apeat");
- clk = clk_reg_prcmu_gate("apetraceclk", NULL, PRCMU_APETRACECLK,
- CLK_IS_ROOT);
+ clk = clk_reg_prcmu_gate("apetraceclk", NULL, PRCMU_APETRACECLK, 0);
clk_register_clkdev(clk, NULL, "apetrace");
- clk = clk_reg_prcmu_gate("mcdeclk", NULL, PRCMU_MCDECLK, CLK_IS_ROOT);
+ clk = clk_reg_prcmu_gate("mcdeclk", NULL, PRCMU_MCDECLK, 0);
clk_register_clkdev(clk, NULL, "mcde");
clk_register_clkdev(clk, "mcde", "mcde");
clk_register_clkdev(clk, NULL, "dsilink.0");
clk_register_clkdev(clk, NULL, "dsilink.1");
clk_register_clkdev(clk, NULL, "dsilink.2");
- clk = clk_reg_prcmu_opp_gate("ipi2cclk", NULL, PRCMU_IPI2CCLK,
- CLK_IS_ROOT);
+ clk = clk_reg_prcmu_opp_gate("ipi2cclk", NULL, PRCMU_IPI2CCLK, 0);
clk_register_clkdev(clk, NULL, "ipi2");
- clk = clk_reg_prcmu_gate("dsialtclk", NULL, PRCMU_DSIALTCLK,
- CLK_IS_ROOT);
+ clk = clk_reg_prcmu_gate("dsialtclk", NULL, PRCMU_DSIALTCLK, 0);
clk_register_clkdev(clk, NULL, "dsialt");
- clk = clk_reg_prcmu_gate("dmaclk", NULL, PRCMU_DMACLK, CLK_IS_ROOT);
+ clk = clk_reg_prcmu_gate("dmaclk", NULL, PRCMU_DMACLK, 0);
clk_register_clkdev(clk, NULL, "dma40.0");
- clk = clk_reg_prcmu_gate("b2r2clk", NULL, PRCMU_B2R2CLK, CLK_IS_ROOT);
+ clk = clk_reg_prcmu_gate("b2r2clk", NULL, PRCMU_B2R2CLK, 0);
clk_register_clkdev(clk, NULL, "b2r2");
clk_register_clkdev(clk, NULL, "b2r2_core");
clk_register_clkdev(clk, NULL, "U8500-B2R2.0");
clk_register_clkdev(clk, NULL, "b2r2_1_core");
clk = clk_reg_prcmu_scalable("tvclk", NULL, PRCMU_TVCLK, 0,
- CLK_IS_ROOT|CLK_SET_RATE_GATE);
+ CLK_SET_RATE_GATE);
clk_register_clkdev(clk, NULL, "tv");
clk_register_clkdev(clk, "tv", "mcde");
- clk = clk_reg_prcmu_gate("sspclk", NULL, PRCMU_SSPCLK, CLK_IS_ROOT);
+ clk = clk_reg_prcmu_gate("sspclk", NULL, PRCMU_SSPCLK, 0);
clk_register_clkdev(clk, NULL, "SSP");
- clk = clk_reg_prcmu_gate("rngclk", NULL, PRCMU_RNGCLK, CLK_IS_ROOT);
+ clk = clk_reg_prcmu_gate("rngclk", NULL, PRCMU_RNGCLK, 0);
clk_register_clkdev(clk, NULL, "rngclk");
- clk = clk_reg_prcmu_gate("uiccclk", NULL, PRCMU_UICCCLK, CLK_IS_ROOT);
+ clk = clk_reg_prcmu_gate("uiccclk", NULL, PRCMU_UICCCLK, 0);
clk_register_clkdev(clk, NULL, "uicc");
- clk = clk_reg_prcmu_gate("timclk", NULL, PRCMU_TIMCLK, CLK_IS_ROOT);
+ clk = clk_reg_prcmu_gate("timclk", NULL, PRCMU_TIMCLK, 0);
clk_register_clkdev(clk, NULL, "mtu0");
clk_register_clkdev(clk, NULL, "mtu1");
clk = clk_reg_prcmu_opp_volt_scalable("sdmmcclk", NULL,
PRCMU_SDMMCCLK, 100000000,
- CLK_IS_ROOT|CLK_SET_RATE_GATE);
+ CLK_SET_RATE_GATE);
clk_register_clkdev(clk, NULL, "sdmmc");
clk = clk_reg_prcmu_opp_volt_scalable("sdmmchclk", NULL,
PRCMU_SDMMCHCLK, 400000000,
- CLK_IS_ROOT|CLK_SET_RATE_GATE);
+ CLK_SET_RATE_GATE);
clk_register_clkdev(clk, NULL, "sdmmchclk");
- clk = clk_reg_prcmu_gate("hvaclk", NULL, PRCMU_HVACLK, CLK_IS_ROOT);
+ clk = clk_reg_prcmu_gate("hvaclk", NULL, PRCMU_HVACLK, 0);
clk_register_clkdev(clk, NULL, "hva");
- clk = clk_reg_prcmu_gate("g1clk", NULL, PRCMU_G1CLK, CLK_IS_ROOT);
+ clk = clk_reg_prcmu_gate("g1clk", NULL, PRCMU_G1CLK, 0);
clk_register_clkdev(clk, NULL, "g1");
clk = clk_reg_prcmu_scalable("spare1clk", NULL, PRCMU_SPARE1CLK, 0,
- CLK_IS_ROOT|CLK_SET_RATE_GATE);
+ CLK_SET_RATE_GATE);
clk_register_clkdev(clk, "dsilcd", "mcde");
clk = clk_reg_prcmu_scalable("dsi_pll", "hdmiclk",
@@ -244,7 +240,7 @@ void u8540_clk_init(void)
clk_register_clkdev(clk, "dsilp2", "mcde");
clk = clk_reg_prcmu_scalable_rate("armss", NULL,
- PRCMU_ARMSS, 0, CLK_IS_ROOT|CLK_IGNORE_UNUSED);
+ PRCMU_ARMSS, 0, CLK_IGNORE_UNUSED);
clk_register_clkdev(clk, "armss", NULL);
clk = clk_register_fixed_factor(NULL, "smp_twd", "armss",
diff --git a/drivers/clk/versatile/clk-icst.c b/drivers/clk/versatile/clk-icst.c
index 3bca438ecd19..5e9b65278e4c 100644
--- a/drivers/clk/versatile/clk-icst.c
+++ b/drivers/clk/versatile/clk-icst.c
@@ -170,7 +170,7 @@ static struct clk *icst_clk_setup(struct device *dev,
init.name = name;
init.ops = &icst_ops;
- init.flags = CLK_IS_ROOT;
+ init.flags = 0;
init.parent_names = (parent_name ? &parent_name : NULL);
init.num_parents = (parent_name ? 1 : 0);
icst->map = map;
diff --git a/drivers/clk/versatile/clk-impd1.c b/drivers/clk/versatile/clk-impd1.c
index 65c842a21c62..74c3216dbb00 100644
--- a/drivers/clk/versatile/clk-impd1.c
+++ b/drivers/clk/versatile/clk-impd1.c
@@ -98,8 +98,7 @@ void integrator_impd1_clk_init(void __iomem *base, unsigned int id)
/* Register the fixed rate PCLK */
imc->pclkname = kasprintf(GFP_KERNEL, "lm%x-pclk", id);
- pclk = clk_register_fixed_rate(NULL, imc->pclkname, NULL,
- CLK_IS_ROOT, 0);
+ pclk = clk_register_fixed_rate(NULL, imc->pclkname, NULL, 0, 0);
imc->pclk = pclk;
imc->vco1name = kasprintf(GFP_KERNEL, "lm%x-vco1", id);
diff --git a/drivers/clk/versatile/clk-realview.c b/drivers/clk/versatile/clk-realview.c
index bd4dd2463e23..c56efc70ac16 100644
--- a/drivers/clk/versatile/clk-realview.c
+++ b/drivers/clk/versatile/clk-realview.c
@@ -56,12 +56,11 @@ void __init realview_clk_init(void __iomem *sysbase, bool is_pb1176)
struct clk *clk;
/* APB clock dummy */
- clk = clk_register_fixed_rate(NULL, "apb_pclk", NULL, CLK_IS_ROOT, 0);
+ clk = clk_register_fixed_rate(NULL, "apb_pclk", NULL, 0, 0);
clk_register_clkdev(clk, "apb_pclk", NULL);
/* 24 MHz clock */
- clk = clk_register_fixed_rate(NULL, "clk24mhz", NULL, CLK_IS_ROOT,
- 24000000);
+ clk = clk_register_fixed_rate(NULL, "clk24mhz", NULL, 0, 24000000);
clk_register_clkdev(clk, NULL, "dev:uart0");
clk_register_clkdev(clk, NULL, "dev:uart1");
clk_register_clkdev(clk, NULL, "dev:uart2");
@@ -81,8 +80,7 @@ void __init realview_clk_init(void __iomem *sysbase, bool is_pb1176)
/* 1 MHz clock */
- clk = clk_register_fixed_rate(NULL, "clk1mhz", NULL, CLK_IS_ROOT,
- 1000000);
+ clk = clk_register_fixed_rate(NULL, "clk1mhz", NULL, 0, 1000000);
clk_register_clkdev(clk, NULL, "sp804");
/* ICST VCO clock */
diff --git a/drivers/clk/versatile/clk-sp810.c b/drivers/clk/versatile/clk-sp810.c
index e78755e0ef78..1fe1e8d970cf 100644
--- a/drivers/clk/versatile/clk-sp810.c
+++ b/drivers/clk/versatile/clk-sp810.c
@@ -92,6 +92,7 @@ static void __init clk_sp810_of_setup(struct device_node *node)
int num = ARRAY_SIZE(parent_names);
char name[12];
struct clk_init_data init;
+ static int instance;
int i;
bool deprecated;
@@ -117,7 +118,7 @@ static void __init clk_sp810_of_setup(struct device_node *node)
deprecated = !of_find_property(node, "assigned-clock-parents", NULL);
for (i = 0; i < ARRAY_SIZE(sp810->timerclken); i++) {
- snprintf(name, ARRAY_SIZE(name), "timerclken%d", i);
+ snprintf(name, sizeof(name), "sp810_%d_%d", instance, i);
sp810->timerclken[i].sp810 = sp810;
sp810->timerclken[i].channel = i;
@@ -138,5 +139,6 @@ static void __init clk_sp810_of_setup(struct device_node *node)
}
of_clk_add_provider(node, clk_sp810_timerclken_of_get, sp810);
+ instance++;
}
CLK_OF_DECLARE(sp810, "arm,sp810", clk_sp810_of_setup);
diff --git a/drivers/clk/versatile/clk-vexpress-osc.c b/drivers/clk/versatile/clk-vexpress-osc.c
index 89c0609e180b..7e5add7d7752 100644
--- a/drivers/clk/versatile/clk-vexpress-osc.c
+++ b/drivers/clk/versatile/clk-vexpress-osc.c
@@ -94,7 +94,7 @@ static int vexpress_osc_probe(struct platform_device *pdev)
init.name = dev_name(&pdev->dev);
init.ops = &vexpress_osc_ops;
- init.flags = CLK_IS_ROOT;
+ init.flags = 0;
init.num_parents = 0;
osc->hw.init = &init;
diff --git a/drivers/clk/x86/clk-lpt.c b/drivers/clk/x86/clk-lpt.c
index f827083defc4..6b40eb89ae19 100644
--- a/drivers/clk/x86/clk-lpt.c
+++ b/drivers/clk/x86/clk-lpt.c
@@ -10,8 +10,6 @@
* published by the Free Software Foundation.
*/
-#include <linux/clk.h>
-#include <linux/clkdev.h>
#include <linux/clk-provider.h>
#include <linux/err.h>
#include <linux/module.h>
@@ -30,7 +28,7 @@ static int lpt_clk_probe(struct platform_device *pdev)
/* LPSS free running clock */
drvdata->name = "lpss_clk";
clk = clk_register_fixed_rate(&pdev->dev, drvdata->name, NULL,
- CLK_IS_ROOT, 100000000);
+ 0, 100000000);
if (IS_ERR(clk))
return PTR_ERR(clk);
diff --git a/drivers/clk/zynq/clkc.c b/drivers/clk/zynq/clkc.c
index 38a65c3e62fc..88a2cab37f62 100644
--- a/drivers/clk/zynq/clkc.c
+++ b/drivers/clk/zynq/clkc.c
@@ -265,8 +265,7 @@ static void __init zynq_clk_setup(struct device_node *np)
pr_warn("ps_clk frequency not specified, using 33 MHz.\n");
tmp = 33333333;
}
- ps_clk = clk_register_fixed_rate(NULL, "ps_clk", NULL, CLK_IS_ROOT,
- tmp);
+ ps_clk = clk_register_fixed_rate(NULL, "ps_clk", NULL, 0, tmp);
/* PLLs */
clk = clk_register_zynq_pll("armpll_int", "ps_clk", SLCR_ARMPLL_CTRL,
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index 33db7406c0e2..c346be650892 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -160,6 +160,7 @@ config CLKSRC_EFM32
config CLKSRC_LPC32XX
bool "Clocksource for LPC32XX" if COMPILE_TEST
depends on GENERIC_CLOCKEVENTS && HAS_IOMEM
+ depends on ARM
select CLKSRC_MMIO
select CLKSRC_OF
help
diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c
index c64d543d64bf..5152b3898155 100644
--- a/drivers/clocksource/arm_arch_timer.c
+++ b/drivers/clocksource/arm_arch_timer.c
@@ -32,6 +32,14 @@
#define CNTTIDR 0x08
#define CNTTIDR_VIRT(n) (BIT(1) << ((n) * 4))
+#define CNTACR(n) (0x40 + ((n) * 4))
+#define CNTACR_RPCT BIT(0)
+#define CNTACR_RVCT BIT(1)
+#define CNTACR_RFRQ BIT(2)
+#define CNTACR_RVOFF BIT(3)
+#define CNTACR_RWVT BIT(4)
+#define CNTACR_RWPT BIT(5)
+
#define CNTVCT_LO 0x08
#define CNTVCT_HI 0x0c
#define CNTFRQ 0x10
@@ -67,7 +75,7 @@ static int arch_timer_ppi[MAX_TIMER_PPI];
static struct clock_event_device __percpu *arch_timer_evt;
-static bool arch_timer_use_virtual = true;
+static enum ppi_nr arch_timer_uses_ppi = VIRT_PPI;
static bool arch_timer_c3stop;
static bool arch_timer_mem_use_virtual;
@@ -263,14 +271,22 @@ static void __arch_timer_setup(unsigned type,
clk->name = "arch_sys_timer";
clk->rating = 450;
clk->cpumask = cpumask_of(smp_processor_id());
- if (arch_timer_use_virtual) {
- clk->irq = arch_timer_ppi[VIRT_PPI];
+ clk->irq = arch_timer_ppi[arch_timer_uses_ppi];
+ switch (arch_timer_uses_ppi) {
+ case VIRT_PPI:
clk->set_state_shutdown = arch_timer_shutdown_virt;
+ clk->set_state_oneshot_stopped = arch_timer_shutdown_virt;
clk->set_next_event = arch_timer_set_next_event_virt;
- } else {
- clk->irq = arch_timer_ppi[PHYS_SECURE_PPI];
+ break;
+ case PHYS_SECURE_PPI:
+ case PHYS_NONSECURE_PPI:
+ case HYP_PPI:
clk->set_state_shutdown = arch_timer_shutdown_phys;
+ clk->set_state_oneshot_stopped = arch_timer_shutdown_phys;
clk->set_next_event = arch_timer_set_next_event_phys;
+ break;
+ default:
+ BUG();
}
} else {
clk->features |= CLOCK_EVT_FEAT_DYNIRQ;
@@ -279,10 +295,12 @@ static void __arch_timer_setup(unsigned type,
clk->cpumask = cpu_all_mask;
if (arch_timer_mem_use_virtual) {
clk->set_state_shutdown = arch_timer_shutdown_virt_mem;
+ clk->set_state_oneshot_stopped = arch_timer_shutdown_virt_mem;
clk->set_next_event =
arch_timer_set_next_event_virt_mem;
} else {
clk->set_state_shutdown = arch_timer_shutdown_phys_mem;
+ clk->set_state_oneshot_stopped = arch_timer_shutdown_phys_mem;
clk->set_next_event =
arch_timer_set_next_event_phys_mem;
}
@@ -338,17 +356,20 @@ static void arch_counter_set_user_access(void)
arch_timer_set_cntkctl(cntkctl);
}
+static bool arch_timer_has_nonsecure_ppi(void)
+{
+ return (arch_timer_uses_ppi == PHYS_SECURE_PPI &&
+ arch_timer_ppi[PHYS_NONSECURE_PPI]);
+}
+
static int arch_timer_setup(struct clock_event_device *clk)
{
__arch_timer_setup(ARCH_CP15_TIMER, clk);
- if (arch_timer_use_virtual)
- enable_percpu_irq(arch_timer_ppi[VIRT_PPI], 0);
- else {
- enable_percpu_irq(arch_timer_ppi[PHYS_SECURE_PPI], 0);
- if (arch_timer_ppi[PHYS_NONSECURE_PPI])
- enable_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI], 0);
- }
+ enable_percpu_irq(arch_timer_ppi[arch_timer_uses_ppi], 0);
+
+ if (arch_timer_has_nonsecure_ppi())
+ enable_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI], 0);
arch_counter_set_user_access();
if (IS_ENABLED(CONFIG_ARM_ARCH_TIMER_EVTSTREAM))
@@ -390,7 +411,7 @@ static void arch_timer_banner(unsigned type)
(unsigned long)arch_timer_rate / 1000000,
(unsigned long)(arch_timer_rate / 10000) % 100,
type & ARCH_CP15_TIMER ?
- arch_timer_use_virtual ? "virt" : "phys" :
+ (arch_timer_uses_ppi == VIRT_PPI) ? "virt" : "phys" :
"",
type == (ARCH_CP15_TIMER | ARCH_MEM_TIMER) ? "/" : "",
type & ARCH_MEM_TIMER ?
@@ -460,7 +481,7 @@ static void __init arch_counter_register(unsigned type)
/* Register the CP15 based counter if we have one */
if (type & ARCH_CP15_TIMER) {
- if (IS_ENABLED(CONFIG_ARM64) || arch_timer_use_virtual)
+ if (IS_ENABLED(CONFIG_ARM64) || arch_timer_uses_ppi == VIRT_PPI)
arch_timer_read_counter = arch_counter_get_cntvct;
else
arch_timer_read_counter = arch_counter_get_cntpct;
@@ -490,13 +511,9 @@ static void arch_timer_stop(struct clock_event_device *clk)
pr_debug("arch_timer_teardown disable IRQ%d cpu #%d\n",
clk->irq, smp_processor_id());
- if (arch_timer_use_virtual)
- disable_percpu_irq(arch_timer_ppi[VIRT_PPI]);
- else {
- disable_percpu_irq(arch_timer_ppi[PHYS_SECURE_PPI]);
- if (arch_timer_ppi[PHYS_NONSECURE_PPI])
- disable_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI]);
- }
+ disable_percpu_irq(arch_timer_ppi[arch_timer_uses_ppi]);
+ if (arch_timer_has_nonsecure_ppi())
+ disable_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI]);
clk->set_state_shutdown(clk);
}
@@ -562,12 +579,14 @@ static int __init arch_timer_register(void)
goto out;
}
- if (arch_timer_use_virtual) {
- ppi = arch_timer_ppi[VIRT_PPI];
+ ppi = arch_timer_ppi[arch_timer_uses_ppi];
+ switch (arch_timer_uses_ppi) {
+ case VIRT_PPI:
err = request_percpu_irq(ppi, arch_timer_handler_virt,
"arch_timer", arch_timer_evt);
- } else {
- ppi = arch_timer_ppi[PHYS_SECURE_PPI];
+ break;
+ case PHYS_SECURE_PPI:
+ case PHYS_NONSECURE_PPI:
err = request_percpu_irq(ppi, arch_timer_handler_phys,
"arch_timer", arch_timer_evt);
if (!err && arch_timer_ppi[PHYS_NONSECURE_PPI]) {
@@ -578,6 +597,13 @@ static int __init arch_timer_register(void)
free_percpu_irq(arch_timer_ppi[PHYS_SECURE_PPI],
arch_timer_evt);
}
+ break;
+ case HYP_PPI:
+ err = request_percpu_irq(ppi, arch_timer_handler_phys,
+ "arch_timer", arch_timer_evt);
+ break;
+ default:
+ BUG();
}
if (err) {
@@ -602,15 +628,10 @@ static int __init arch_timer_register(void)
out_unreg_notify:
unregister_cpu_notifier(&arch_timer_cpu_nb);
out_free_irq:
- if (arch_timer_use_virtual)
- free_percpu_irq(arch_timer_ppi[VIRT_PPI], arch_timer_evt);
- else {
- free_percpu_irq(arch_timer_ppi[PHYS_SECURE_PPI],
+ free_percpu_irq(arch_timer_ppi[arch_timer_uses_ppi], arch_timer_evt);
+ if (arch_timer_has_nonsecure_ppi())
+ free_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI],
arch_timer_evt);
- if (arch_timer_ppi[PHYS_NONSECURE_PPI])
- free_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI],
- arch_timer_evt);
- }
out_free:
free_percpu(arch_timer_evt);
@@ -697,12 +718,25 @@ static void __init arch_timer_init(void)
*
* If no interrupt provided for virtual timer, we'll have to
* stick to the physical timer. It'd better be accessible...
+ *
+ * On ARMv8.1 with VH extensions, the kernel runs in HYP. VHE
+ * accesses to CNTP_*_EL1 registers are silently redirected to
+ * their CNTHP_*_EL2 counterparts, and use a different PPI
+ * number.
*/
if (is_hyp_mode_available() || !arch_timer_ppi[VIRT_PPI]) {
- arch_timer_use_virtual = false;
+ bool has_ppi;
- if (!arch_timer_ppi[PHYS_SECURE_PPI] ||
- !arch_timer_ppi[PHYS_NONSECURE_PPI]) {
+ if (is_kernel_in_hyp_mode()) {
+ arch_timer_uses_ppi = HYP_PPI;
+ has_ppi = !!arch_timer_ppi[HYP_PPI];
+ } else {
+ arch_timer_uses_ppi = PHYS_SECURE_PPI;
+ has_ppi = (!!arch_timer_ppi[PHYS_SECURE_PPI] ||
+ !!arch_timer_ppi[PHYS_NONSECURE_PPI]);
+ }
+
+ if (!has_ppi) {
pr_warn("arch_timer: No interrupt available, giving up\n");
return;
}
@@ -735,7 +769,7 @@ static void __init arch_timer_of_init(struct device_node *np)
*/
if (IS_ENABLED(CONFIG_ARM) &&
of_property_read_bool(np, "arm,cpu-registers-not-fw-configured"))
- arch_timer_use_virtual = false;
+ arch_timer_uses_ppi = PHYS_SECURE_PPI;
arch_timer_init();
}
@@ -757,7 +791,6 @@ static void __init arch_timer_mem_init(struct device_node *np)
}
cnttidr = readl_relaxed(cntctlbase + CNTTIDR);
- iounmap(cntctlbase);
/*
* Try to find a virtual capable frame. Otherwise fall back to a
@@ -765,20 +798,31 @@ static void __init arch_timer_mem_init(struct device_node *np)
*/
for_each_available_child_of_node(np, frame) {
int n;
+ u32 cntacr;
if (of_property_read_u32(frame, "frame-number", &n)) {
pr_err("arch_timer: Missing frame-number\n");
- of_node_put(best_frame);
of_node_put(frame);
- return;
+ goto out;
}
- if (cnttidr & CNTTIDR_VIRT(n)) {
+ /* Try enabling everything, and see what sticks */
+ cntacr = CNTACR_RFRQ | CNTACR_RWPT | CNTACR_RPCT |
+ CNTACR_RWVT | CNTACR_RVOFF | CNTACR_RVCT;
+ writel_relaxed(cntacr, cntctlbase + CNTACR(n));
+ cntacr = readl_relaxed(cntctlbase + CNTACR(n));
+
+ if ((cnttidr & CNTTIDR_VIRT(n)) &&
+ !(~cntacr & (CNTACR_RWVT | CNTACR_RVCT))) {
of_node_put(best_frame);
best_frame = frame;
arch_timer_mem_use_virtual = true;
break;
}
+
+ if (~cntacr & (CNTACR_RWPT | CNTACR_RPCT))
+ continue;
+
of_node_put(best_frame);
best_frame = of_node_get(frame);
}
@@ -786,24 +830,26 @@ static void __init arch_timer_mem_init(struct device_node *np)
base = arch_counter_base = of_iomap(best_frame, 0);
if (!base) {
pr_err("arch_timer: Can't map frame's registers\n");
- of_node_put(best_frame);
- return;
+ goto out;
}
if (arch_timer_mem_use_virtual)
irq = irq_of_parse_and_map(best_frame, 1);
else
irq = irq_of_parse_and_map(best_frame, 0);
- of_node_put(best_frame);
+
if (!irq) {
pr_err("arch_timer: Frame missing %s irq",
arch_timer_mem_use_virtual ? "virt" : "phys");
- return;
+ goto out;
}
arch_timer_detect_rate(base, np);
arch_timer_mem_register(base, irq);
arch_timer_common_init();
+out:
+ iounmap(cntctlbase);
+ of_node_put(best_frame);
}
CLOCKSOURCE_OF_DECLARE(armv7_arch_timer_mem, "arm,armv7-timer-mem",
arch_timer_mem_init);
diff --git a/drivers/clocksource/arm_global_timer.c b/drivers/clocksource/arm_global_timer.c
index d189d8cb69f7..9df0d1699d22 100644
--- a/drivers/clocksource/arm_global_timer.c
+++ b/drivers/clocksource/arm_global_timer.c
@@ -16,6 +16,7 @@
#include <linux/clockchips.h>
#include <linux/cpu.h>
#include <linux/clk.h>
+#include <linux/delay.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/of.h>
@@ -174,6 +175,7 @@ static int gt_clockevents_init(struct clock_event_device *clk)
clk->set_state_shutdown = gt_clockevent_shutdown;
clk->set_state_periodic = gt_clockevent_set_periodic;
clk->set_state_oneshot = gt_clockevent_shutdown;
+ clk->set_state_oneshot_stopped = gt_clockevent_shutdown;
clk->set_next_event = gt_clockevent_set_next_event;
clk->cpumask = cpumask_of(cpu);
clk->rating = 300;
@@ -221,6 +223,21 @@ static u64 notrace gt_sched_clock_read(void)
}
#endif
+static unsigned long gt_read_long(void)
+{
+ return readl_relaxed(gt_base + GT_COUNTER0);
+}
+
+static struct delay_timer gt_delay_timer = {
+ .read_current_timer = gt_read_long,
+};
+
+static void __init gt_delay_timer_init(void)
+{
+ gt_delay_timer.freq = gt_clk_rate;
+ register_current_timer_delay(&gt_delay_timer);
+}
+
static void __init gt_clocksource_init(void)
{
writel(0, gt_base + GT_CONTROL);
@@ -317,6 +334,7 @@ static void __init global_timer_of_register(struct device_node *np)
/* Immediately configure the timer on the boot CPU */
gt_clocksource_init();
gt_clockevents_init(this_cpu_ptr(gt_evt));
+ gt_delay_timer_init();
return;
diff --git a/drivers/clocksource/exynos_mct.c b/drivers/clocksource/exynos_mct.c
index ff44082a0827..be09bc0b5e26 100644
--- a/drivers/clocksource/exynos_mct.c
+++ b/drivers/clocksource/exynos_mct.c
@@ -313,6 +313,7 @@ static struct clock_event_device mct_comp_device = {
.set_state_periodic = mct_set_state_periodic,
.set_state_shutdown = mct_set_state_shutdown,
.set_state_oneshot = mct_set_state_shutdown,
+ .set_state_oneshot_stopped = mct_set_state_shutdown,
.tick_resume = mct_set_state_shutdown,
};
@@ -452,6 +453,7 @@ static int exynos4_local_timer_setup(struct mct_clock_event_device *mevt)
evt->set_state_periodic = set_state_periodic;
evt->set_state_shutdown = set_state_shutdown;
evt->set_state_oneshot = set_state_shutdown;
+ evt->set_state_oneshot_stopped = set_state_shutdown;
evt->tick_resume = set_state_shutdown;
evt->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
evt->rating = 450;
diff --git a/drivers/clocksource/rockchip_timer.c b/drivers/clocksource/rockchip_timer.c
index 8c77a529d0d4..b991b288c803 100644
--- a/drivers/clocksource/rockchip_timer.c
+++ b/drivers/clocksource/rockchip_timer.c
@@ -122,23 +122,23 @@ static void __init rk_timer_init(struct device_node *np)
pclk = of_clk_get_by_name(np, "pclk");
if (IS_ERR(pclk)) {
pr_err("Failed to get pclk for '%s'\n", TIMER_NAME);
- return;
+ goto out_unmap;
}
if (clk_prepare_enable(pclk)) {
pr_err("Failed to enable pclk for '%s'\n", TIMER_NAME);
- return;
+ goto out_unmap;
}
timer_clk = of_clk_get_by_name(np, "timer");
if (IS_ERR(timer_clk)) {
pr_err("Failed to get timer clock for '%s'\n", TIMER_NAME);
- return;
+ goto out_timer_clk;
}
if (clk_prepare_enable(timer_clk)) {
pr_err("Failed to enable timer clock\n");
- return;
+ goto out_timer_clk;
}
bc_timer.freq = clk_get_rate(timer_clk);
@@ -146,7 +146,7 @@ static void __init rk_timer_init(struct device_node *np)
irq = irq_of_parse_and_map(np, 0);
if (!irq) {
pr_err("Failed to map interrupts for '%s'\n", TIMER_NAME);
- return;
+ goto out_irq;
}
ce->name = TIMER_NAME;
@@ -164,10 +164,19 @@ static void __init rk_timer_init(struct device_node *np)
ret = request_irq(irq, rk_timer_interrupt, IRQF_TIMER, TIMER_NAME, ce);
if (ret) {
pr_err("Failed to initialize '%s': %d\n", TIMER_NAME, ret);
- return;
+ goto out_irq;
}
clockevents_config_and_register(ce, bc_timer.freq, 1, UINT_MAX);
+
+ return;
+
+out_irq:
+ clk_disable_unprepare(timer_clk);
+out_timer_clk:
+ clk_disable_unprepare(pclk);
+out_unmap:
+ iounmap(bc_timer.base);
}
CLOCKSOURCE_OF_DECLARE(rk_timer, "rockchip,rk3288-timer", rk_timer_init);
diff --git a/drivers/clocksource/tango_xtal.c b/drivers/clocksource/tango_xtal.c
index 2bcecafdeaea..c407c47a3232 100644
--- a/drivers/clocksource/tango_xtal.c
+++ b/drivers/clocksource/tango_xtal.c
@@ -42,7 +42,7 @@ static void __init tango_clocksource_init(struct device_node *np)
ret = clocksource_mmio_init(xtal_in_cnt, "tango-xtal", xtal_freq, 350,
32, clocksource_mmio_readl_up);
- if (!ret) {
+ if (ret) {
pr_err("%s: registration failed\n", np->full_name);
return;
}
diff --git a/drivers/clocksource/time-lpc32xx.c b/drivers/clocksource/time-lpc32xx.c
index 1316876b487a..daae61e8c820 100644
--- a/drivers/clocksource/time-lpc32xx.c
+++ b/drivers/clocksource/time-lpc32xx.c
@@ -18,6 +18,7 @@
#include <linux/clk.h>
#include <linux/clockchips.h>
#include <linux/clocksource.h>
+#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/kernel.h>
@@ -43,6 +44,7 @@
struct lpc32xx_clock_event_ddata {
struct clock_event_device evtdev;
void __iomem *base;
+ u32 ticks_per_jiffy;
};
/* Needed for the sched clock */
@@ -53,6 +55,15 @@ static u64 notrace lpc32xx_read_sched_clock(void)
return readl(clocksource_timer_counter);
}
+static unsigned long lpc32xx_delay_timer_read(void)
+{
+ return readl(clocksource_timer_counter);
+}
+
+static struct delay_timer lpc32xx_delay_timer = {
+ .read_current_timer = lpc32xx_delay_timer_read,
+};
+
static int lpc32xx_clkevt_next_event(unsigned long delta,
struct clock_event_device *evtdev)
{
@@ -60,14 +71,13 @@ static int lpc32xx_clkevt_next_event(unsigned long delta,
container_of(evtdev, struct lpc32xx_clock_event_ddata, evtdev);
/*
- * Place timer in reset and program the delta in the prescale
- * register (PR). When the prescale counter matches the value
- * in PR the counter register is incremented and the compare
- * match will trigger. After setup the timer is released from
- * reset and enabled.
+ * Place timer in reset and program the delta in the match
+ * channel 0 (MR0). When the timer counter matches the value
+ * in MR0 register the match will trigger an interrupt.
+ * After setup the timer is released from reset and enabled.
*/
writel_relaxed(LPC32XX_TIMER_TCR_CRST, ddata->base + LPC32XX_TIMER_TCR);
- writel_relaxed(delta, ddata->base + LPC32XX_TIMER_PR);
+ writel_relaxed(delta, ddata->base + LPC32XX_TIMER_MR0);
writel_relaxed(LPC32XX_TIMER_TCR_CEN, ddata->base + LPC32XX_TIMER_TCR);
return 0;
@@ -86,11 +96,39 @@ static int lpc32xx_clkevt_shutdown(struct clock_event_device *evtdev)
static int lpc32xx_clkevt_oneshot(struct clock_event_device *evtdev)
{
+ struct lpc32xx_clock_event_ddata *ddata =
+ container_of(evtdev, struct lpc32xx_clock_event_ddata, evtdev);
+
/*
* When using oneshot, we must also disable the timer
* to wait for the first call to set_next_event().
*/
- return lpc32xx_clkevt_shutdown(evtdev);
+ writel_relaxed(0, ddata->base + LPC32XX_TIMER_TCR);
+
+ /* Enable interrupt, reset on match and stop on match (MCR). */
+ writel_relaxed(LPC32XX_TIMER_MCR_MR0I | LPC32XX_TIMER_MCR_MR0R |
+ LPC32XX_TIMER_MCR_MR0S, ddata->base + LPC32XX_TIMER_MCR);
+ return 0;
+}
+
+static int lpc32xx_clkevt_periodic(struct clock_event_device *evtdev)
+{
+ struct lpc32xx_clock_event_ddata *ddata =
+ container_of(evtdev, struct lpc32xx_clock_event_ddata, evtdev);
+
+ /* Enable interrupt and reset on match. */
+ writel_relaxed(LPC32XX_TIMER_MCR_MR0I | LPC32XX_TIMER_MCR_MR0R,
+ ddata->base + LPC32XX_TIMER_MCR);
+
+ /*
+ * Place timer in reset and program the delta in the match
+ * channel 0 (MR0).
+ */
+ writel_relaxed(LPC32XX_TIMER_TCR_CRST, ddata->base + LPC32XX_TIMER_TCR);
+ writel_relaxed(ddata->ticks_per_jiffy, ddata->base + LPC32XX_TIMER_MR0);
+ writel_relaxed(LPC32XX_TIMER_TCR_CEN, ddata->base + LPC32XX_TIMER_TCR);
+
+ return 0;
}
static irqreturn_t lpc32xx_clock_event_handler(int irq, void *dev_id)
@@ -108,11 +146,13 @@ static irqreturn_t lpc32xx_clock_event_handler(int irq, void *dev_id)
static struct lpc32xx_clock_event_ddata lpc32xx_clk_event_ddata = {
.evtdev = {
.name = "lpc3220 clockevent",
- .features = CLOCK_EVT_FEAT_ONESHOT,
+ .features = CLOCK_EVT_FEAT_ONESHOT |
+ CLOCK_EVT_FEAT_PERIODIC,
.rating = 300,
.set_next_event = lpc32xx_clkevt_next_event,
.set_state_shutdown = lpc32xx_clkevt_shutdown,
.set_state_oneshot = lpc32xx_clkevt_oneshot,
+ .set_state_periodic = lpc32xx_clkevt_periodic,
},
};
@@ -162,6 +202,8 @@ static int __init lpc32xx_clocksource_init(struct device_node *np)
}
clocksource_timer_counter = base + LPC32XX_TIMER_TC;
+ lpc32xx_delay_timer.freq = rate;
+ register_current_timer_delay(&lpc32xx_delay_timer);
sched_clock_register(lpc32xx_read_sched_clock, 32, rate);
return 0;
@@ -210,18 +252,16 @@ static int __init lpc32xx_clockevent_init(struct device_node *np)
/*
* Disable timer and clear any pending interrupt (IR) on match
- * channel 0 (MR0). Configure a compare match value of 1 on MR0
- * and enable interrupt, reset on match and stop on match (MCR).
+ * channel 0 (MR0). Clear the prescaler as it's not used.
*/
writel_relaxed(0, base + LPC32XX_TIMER_TCR);
+ writel_relaxed(0, base + LPC32XX_TIMER_PR);
writel_relaxed(0, base + LPC32XX_TIMER_CTCR);
writel_relaxed(LPC32XX_TIMER_IR_MR0INT, base + LPC32XX_TIMER_IR);
- writel_relaxed(1, base + LPC32XX_TIMER_MR0);
- writel_relaxed(LPC32XX_TIMER_MCR_MR0I | LPC32XX_TIMER_MCR_MR0R |
- LPC32XX_TIMER_MCR_MR0S, base + LPC32XX_TIMER_MCR);
rate = clk_get_rate(clk);
lpc32xx_clk_event_ddata.base = base;
+ lpc32xx_clk_event_ddata.ticks_per_jiffy = DIV_ROUND_CLOSEST(rate, HZ);
clockevents_config_and_register(&lpc32xx_clk_event_ddata.evtdev,
rate, 1, -1);
diff --git a/drivers/clocksource/time-pistachio.c b/drivers/clocksource/time-pistachio.c
index 3269d9ef7a18..376e59bc5fa0 100644
--- a/drivers/clocksource/time-pistachio.c
+++ b/drivers/clocksource/time-pistachio.c
@@ -163,7 +163,7 @@ static void __init pistachio_clksrc_of_init(struct device_node *node)
periph_regs = syscon_regmap_lookup_by_phandle(node, "img,cr-periph");
if (IS_ERR(periph_regs)) {
- pr_err("cannot get peripheral regmap (%lu)\n",
+ pr_err("cannot get peripheral regmap (%ld)\n",
PTR_ERR(periph_regs));
return;
}
@@ -176,7 +176,7 @@ static void __init pistachio_clksrc_of_init(struct device_node *node)
sys_clk = of_clk_get_by_name(node, "sys");
if (IS_ERR(sys_clk)) {
- pr_err("clock get failed (%lu)\n", PTR_ERR(sys_clk));
+ pr_err("clock get failed (%ld)\n", PTR_ERR(sys_clk));
return;
}
diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig
index f93511031177..a7f45853c103 100644
--- a/drivers/cpufreq/Kconfig
+++ b/drivers/cpufreq/Kconfig
@@ -19,6 +19,7 @@ config CPU_FREQ
if CPU_FREQ
config CPU_FREQ_GOV_COMMON
+ select IRQ_WORK
bool
config CPU_FREQ_BOOST_SW
diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
index 51eef87bbc37..fb5712141040 100644
--- a/drivers/cpufreq/acpi-cpufreq.c
+++ b/drivers/cpufreq/acpi-cpufreq.c
@@ -70,6 +70,8 @@ struct acpi_cpufreq_data {
unsigned int cpu_feature;
unsigned int acpi_perf_cpu;
cpumask_var_t freqdomain_cpus;
+ void (*cpu_freq_write)(struct acpi_pct_register *reg, u32 val);
+ u32 (*cpu_freq_read)(struct acpi_pct_register *reg);
};
/* acpi_perf_data is a pointer to percpu data. */
@@ -243,125 +245,119 @@ static unsigned extract_freq(u32 val, struct acpi_cpufreq_data *data)
}
}
-struct msr_addr {
- u32 reg;
-};
+static u32 cpu_freq_read_intel(struct acpi_pct_register *not_used)
+{
+ u32 val, dummy;
-struct io_addr {
- u16 port;
- u8 bit_width;
-};
+ rdmsr(MSR_IA32_PERF_CTL, val, dummy);
+ return val;
+}
+
+static void cpu_freq_write_intel(struct acpi_pct_register *not_used, u32 val)
+{
+ u32 lo, hi;
+
+ rdmsr(MSR_IA32_PERF_CTL, lo, hi);
+ lo = (lo & ~INTEL_MSR_RANGE) | (val & INTEL_MSR_RANGE);
+ wrmsr(MSR_IA32_PERF_CTL, lo, hi);
+}
+
+static u32 cpu_freq_read_amd(struct acpi_pct_register *not_used)
+{
+ u32 val, dummy;
+
+ rdmsr(MSR_AMD_PERF_CTL, val, dummy);
+ return val;
+}
+
+static void cpu_freq_write_amd(struct acpi_pct_register *not_used, u32 val)
+{
+ wrmsr(MSR_AMD_PERF_CTL, val, 0);
+}
+
+static u32 cpu_freq_read_io(struct acpi_pct_register *reg)
+{
+ u32 val;
+
+ acpi_os_read_port(reg->address, &val, reg->bit_width);
+ return val;
+}
+
+static void cpu_freq_write_io(struct acpi_pct_register *reg, u32 val)
+{
+ acpi_os_write_port(reg->address, val, reg->bit_width);
+}
struct drv_cmd {
- unsigned int type;
- const struct cpumask *mask;
- union {
- struct msr_addr msr;
- struct io_addr io;
- } addr;
+ struct acpi_pct_register *reg;
u32 val;
+ union {
+ void (*write)(struct acpi_pct_register *reg, u32 val);
+ u32 (*read)(struct acpi_pct_register *reg);
+ } func;
};
/* Called via smp_call_function_single(), on the target CPU */
static void do_drv_read(void *_cmd)
{
struct drv_cmd *cmd = _cmd;
- u32 h;
- switch (cmd->type) {
- case SYSTEM_INTEL_MSR_CAPABLE:
- case SYSTEM_AMD_MSR_CAPABLE:
- rdmsr(cmd->addr.msr.reg, cmd->val, h);
- break;
- case SYSTEM_IO_CAPABLE:
- acpi_os_read_port((acpi_io_address)cmd->addr.io.port,
- &cmd->val,
- (u32)cmd->addr.io.bit_width);
- break;
- default:
- break;
- }
+ cmd->val = cmd->func.read(cmd->reg);
}
-/* Called via smp_call_function_many(), on the target CPUs */
-static void do_drv_write(void *_cmd)
+static u32 drv_read(struct acpi_cpufreq_data *data, const struct cpumask *mask)
{
- struct drv_cmd *cmd = _cmd;
- u32 lo, hi;
+ struct acpi_processor_performance *perf = to_perf_data(data);
+ struct drv_cmd cmd = {
+ .reg = &perf->control_register,
+ .func.read = data->cpu_freq_read,
+ };
+ int err;
- switch (cmd->type) {
- case SYSTEM_INTEL_MSR_CAPABLE:
- rdmsr(cmd->addr.msr.reg, lo, hi);
- lo = (lo & ~INTEL_MSR_RANGE) | (cmd->val & INTEL_MSR_RANGE);
- wrmsr(cmd->addr.msr.reg, lo, hi);
- break;
- case SYSTEM_AMD_MSR_CAPABLE:
- wrmsr(cmd->addr.msr.reg, cmd->val, 0);
- break;
- case SYSTEM_IO_CAPABLE:
- acpi_os_write_port((acpi_io_address)cmd->addr.io.port,
- cmd->val,
- (u32)cmd->addr.io.bit_width);
- break;
- default:
- break;
- }
+ err = smp_call_function_any(mask, do_drv_read, &cmd, 1);
+ WARN_ON_ONCE(err); /* smp_call_function_any() was buggy? */
+ return cmd.val;
}
-static void drv_read(struct drv_cmd *cmd)
+/* Called via smp_call_function_many(), on the target CPUs */
+static void do_drv_write(void *_cmd)
{
- int err;
- cmd->val = 0;
+ struct drv_cmd *cmd = _cmd;
- err = smp_call_function_any(cmd->mask, do_drv_read, cmd, 1);
- WARN_ON_ONCE(err); /* smp_call_function_any() was buggy? */
+ cmd->func.write(cmd->reg, cmd->val);
}
-static void drv_write(struct drv_cmd *cmd)
+static void drv_write(struct acpi_cpufreq_data *data,
+ const struct cpumask *mask, u32 val)
{
+ struct acpi_processor_performance *perf = to_perf_data(data);
+ struct drv_cmd cmd = {
+ .reg = &perf->control_register,
+ .val = val,
+ .func.write = data->cpu_freq_write,
+ };
int this_cpu;
this_cpu = get_cpu();
- if (cpumask_test_cpu(this_cpu, cmd->mask))
- do_drv_write(cmd);
- smp_call_function_many(cmd->mask, do_drv_write, cmd, 1);
+ if (cpumask_test_cpu(this_cpu, mask))
+ do_drv_write(&cmd);
+
+ smp_call_function_many(mask, do_drv_write, &cmd, 1);
put_cpu();
}
-static u32
-get_cur_val(const struct cpumask *mask, struct acpi_cpufreq_data *data)
+static u32 get_cur_val(const struct cpumask *mask, struct acpi_cpufreq_data *data)
{
- struct acpi_processor_performance *perf;
- struct drv_cmd cmd;
+ u32 val;
if (unlikely(cpumask_empty(mask)))
return 0;
- switch (data->cpu_feature) {
- case SYSTEM_INTEL_MSR_CAPABLE:
- cmd.type = SYSTEM_INTEL_MSR_CAPABLE;
- cmd.addr.msr.reg = MSR_IA32_PERF_CTL;
- break;
- case SYSTEM_AMD_MSR_CAPABLE:
- cmd.type = SYSTEM_AMD_MSR_CAPABLE;
- cmd.addr.msr.reg = MSR_AMD_PERF_CTL;
- break;
- case SYSTEM_IO_CAPABLE:
- cmd.type = SYSTEM_IO_CAPABLE;
- perf = to_perf_data(data);
- cmd.addr.io.port = perf->control_register.address;
- cmd.addr.io.bit_width = perf->control_register.bit_width;
- break;
- default:
- return 0;
- }
-
- cmd.mask = mask;
- drv_read(&cmd);
+ val = drv_read(data, mask);
- pr_debug("get_cur_val = %u\n", cmd.val);
+ pr_debug("get_cur_val = %u\n", val);
- return cmd.val;
+ return val;
}
static unsigned int get_cur_freq_on_cpu(unsigned int cpu)
@@ -416,7 +412,7 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy,
{
struct acpi_cpufreq_data *data = policy->driver_data;
struct acpi_processor_performance *perf;
- struct drv_cmd cmd;
+ const struct cpumask *mask;
unsigned int next_perf_state = 0; /* Index into perf table */
int result = 0;
@@ -434,42 +430,21 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy,
} else {
pr_debug("Already at target state (P%d)\n",
next_perf_state);
- goto out;
+ return 0;
}
}
- switch (data->cpu_feature) {
- case SYSTEM_INTEL_MSR_CAPABLE:
- cmd.type = SYSTEM_INTEL_MSR_CAPABLE;
- cmd.addr.msr.reg = MSR_IA32_PERF_CTL;
- cmd.val = (u32) perf->states[next_perf_state].control;
- break;
- case SYSTEM_AMD_MSR_CAPABLE:
- cmd.type = SYSTEM_AMD_MSR_CAPABLE;
- cmd.addr.msr.reg = MSR_AMD_PERF_CTL;
- cmd.val = (u32) perf->states[next_perf_state].control;
- break;
- case SYSTEM_IO_CAPABLE:
- cmd.type = SYSTEM_IO_CAPABLE;
- cmd.addr.io.port = perf->control_register.address;
- cmd.addr.io.bit_width = perf->control_register.bit_width;
- cmd.val = (u32) perf->states[next_perf_state].control;
- break;
- default:
- result = -ENODEV;
- goto out;
- }
-
- /* cpufreq holds the hotplug lock, so we are safe from here on */
- if (policy->shared_type != CPUFREQ_SHARED_TYPE_ANY)
- cmd.mask = policy->cpus;
- else
- cmd.mask = cpumask_of(policy->cpu);
+ /*
+ * The core won't allow CPUs to go away until the governor has been
+ * stopped, so we can rely on the stability of policy->cpus.
+ */
+ mask = policy->shared_type == CPUFREQ_SHARED_TYPE_ANY ?
+ cpumask_of(policy->cpu) : policy->cpus;
- drv_write(&cmd);
+ drv_write(data, mask, perf->states[next_perf_state].control);
if (acpi_pstate_strict) {
- if (!check_freqs(cmd.mask, data->freq_table[index].frequency,
+ if (!check_freqs(mask, data->freq_table[index].frequency,
data)) {
pr_debug("acpi_cpufreq_target failed (%d)\n",
policy->cpu);
@@ -480,7 +455,6 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy,
if (!result)
perf->state = next_perf_state;
-out:
return result;
}
@@ -540,8 +514,10 @@ static int boost_notify(struct notifier_block *nb, unsigned long action,
*/
switch (action) {
- case CPU_UP_PREPARE:
- case CPU_UP_PREPARE_FROZEN:
+ case CPU_DOWN_FAILED:
+ case CPU_DOWN_FAILED_FROZEN:
+ case CPU_ONLINE:
+ case CPU_ONLINE_FROZEN:
boost_set_msrs(acpi_cpufreq_driver.boost_enabled, cpumask);
break;
@@ -740,15 +716,21 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
}
pr_debug("SYSTEM IO addr space\n");
data->cpu_feature = SYSTEM_IO_CAPABLE;
+ data->cpu_freq_read = cpu_freq_read_io;
+ data->cpu_freq_write = cpu_freq_write_io;
break;
case ACPI_ADR_SPACE_FIXED_HARDWARE:
pr_debug("HARDWARE addr space\n");
if (check_est_cpu(cpu)) {
data->cpu_feature = SYSTEM_INTEL_MSR_CAPABLE;
+ data->cpu_freq_read = cpu_freq_read_intel;
+ data->cpu_freq_write = cpu_freq_write_intel;
break;
}
if (check_amd_hwpstate_cpu(cpu)) {
data->cpu_feature = SYSTEM_AMD_MSR_CAPABLE;
+ data->cpu_freq_read = cpu_freq_read_amd;
+ data->cpu_freq_write = cpu_freq_write_amd;
break;
}
result = -ENODEV;
diff --git a/drivers/cpufreq/amd_freq_sensitivity.c b/drivers/cpufreq/amd_freq_sensitivity.c
index f6b79ab0070b..404360cad25c 100644
--- a/drivers/cpufreq/amd_freq_sensitivity.c
+++ b/drivers/cpufreq/amd_freq_sensitivity.c
@@ -21,7 +21,7 @@
#include <asm/msr.h>
#include <asm/cpufeature.h>
-#include "cpufreq_governor.h"
+#include "cpufreq_ondemand.h"
#define MSR_AMD64_FREQ_SENSITIVITY_ACTUAL 0xc0010080
#define MSR_AMD64_FREQ_SENSITIVITY_REFERENCE 0xc0010081
@@ -45,10 +45,10 @@ static unsigned int amd_powersave_bias_target(struct cpufreq_policy *policy,
long d_actual, d_reference;
struct msr actual, reference;
struct cpu_data_t *data = &per_cpu(cpu_data, policy->cpu);
- struct dbs_data *od_data = policy->governor_data;
+ struct policy_dbs_info *policy_dbs = policy->governor_data;
+ struct dbs_data *od_data = policy_dbs->dbs_data;
struct od_dbs_tuners *od_tuners = od_data->tuners;
- struct od_cpu_dbs_info_s *od_info =
- od_data->cdata->get_cpu_dbs_info_s(policy->cpu);
+ struct od_policy_dbs_info *od_info = to_dbs_info(policy_dbs);
if (!od_info->freq_table)
return freq_next;
diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c
index 0ca74d070058..5f8dbe640a20 100644
--- a/drivers/cpufreq/cpufreq-dt.c
+++ b/drivers/cpufreq/cpufreq-dt.c
@@ -4,9 +4,6 @@
* Copyright (C) 2014 Linaro.
* Viresh Kumar <viresh.kumar@linaro.org>
*
- * The OPP code in function set_target() is reused from
- * drivers/cpufreq/omap-cpufreq.c
- *
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
@@ -31,9 +28,8 @@
struct private_data {
struct device *cpu_dev;
- struct regulator *cpu_reg;
struct thermal_cooling_device *cdev;
- unsigned int voltage_tolerance; /* in percentage */
+ const char *reg_name;
};
static struct freq_attr *cpufreq_dt_attr[] = {
@@ -44,175 +40,128 @@ static struct freq_attr *cpufreq_dt_attr[] = {
static int set_target(struct cpufreq_policy *policy, unsigned int index)
{
- struct dev_pm_opp *opp;
- struct cpufreq_frequency_table *freq_table = policy->freq_table;
- struct clk *cpu_clk = policy->clk;
struct private_data *priv = policy->driver_data;
- struct device *cpu_dev = priv->cpu_dev;
- struct regulator *cpu_reg = priv->cpu_reg;
- unsigned long volt = 0, tol = 0;
- int volt_old = 0;
- unsigned int old_freq, new_freq;
- long freq_Hz, freq_exact;
- int ret;
- freq_Hz = clk_round_rate(cpu_clk, freq_table[index].frequency * 1000);
- if (freq_Hz <= 0)
- freq_Hz = freq_table[index].frequency * 1000;
-
- freq_exact = freq_Hz;
- new_freq = freq_Hz / 1000;
- old_freq = clk_get_rate(cpu_clk) / 1000;
+ return dev_pm_opp_set_rate(priv->cpu_dev,
+ policy->freq_table[index].frequency * 1000);
+}
- if (!IS_ERR(cpu_reg)) {
- unsigned long opp_freq;
+/*
+ * An earlier version of opp-v1 bindings used to name the regulator
+ * "cpu0-supply", we still need to handle that for backwards compatibility.
+ */
+static const char *find_supply_name(struct device *dev)
+{
+ struct device_node *np;
+ struct property *pp;
+ int cpu = dev->id;
+ const char *name = NULL;
- rcu_read_lock();
- opp = dev_pm_opp_find_freq_ceil(cpu_dev, &freq_Hz);
- if (IS_ERR(opp)) {
- rcu_read_unlock();
- dev_err(cpu_dev, "failed to find OPP for %ld\n",
- freq_Hz);
- return PTR_ERR(opp);
- }
- volt = dev_pm_opp_get_voltage(opp);
- opp_freq = dev_pm_opp_get_freq(opp);
- rcu_read_unlock();
- tol = volt * priv->voltage_tolerance / 100;
- volt_old = regulator_get_voltage(cpu_reg);
- dev_dbg(cpu_dev, "Found OPP: %ld kHz, %ld uV\n",
- opp_freq / 1000, volt);
- }
+ np = of_node_get(dev->of_node);
- dev_dbg(cpu_dev, "%u MHz, %d mV --> %u MHz, %ld mV\n",
- old_freq / 1000, (volt_old > 0) ? volt_old / 1000 : -1,
- new_freq / 1000, volt ? volt / 1000 : -1);
+ /* This must be valid for sure */
+ if (WARN_ON(!np))
+ return NULL;
- /* scaling up? scale voltage before frequency */
- if (!IS_ERR(cpu_reg) && new_freq > old_freq) {
- ret = regulator_set_voltage_tol(cpu_reg, volt, tol);
- if (ret) {
- dev_err(cpu_dev, "failed to scale voltage up: %d\n",
- ret);
- return ret;
+ /* Try "cpu0" for older DTs */
+ if (!cpu) {
+ pp = of_find_property(np, "cpu0-supply", NULL);
+ if (pp) {
+ name = "cpu0";
+ goto node_put;
}
}
- ret = clk_set_rate(cpu_clk, freq_exact);
- if (ret) {
- dev_err(cpu_dev, "failed to set clock rate: %d\n", ret);
- if (!IS_ERR(cpu_reg) && volt_old > 0)
- regulator_set_voltage_tol(cpu_reg, volt_old, tol);
- return ret;
+ pp = of_find_property(np, "cpu-supply", NULL);
+ if (pp) {
+ name = "cpu";
+ goto node_put;
}
- /* scaling down? scale voltage after frequency */
- if (!IS_ERR(cpu_reg) && new_freq < old_freq) {
- ret = regulator_set_voltage_tol(cpu_reg, volt, tol);
- if (ret) {
- dev_err(cpu_dev, "failed to scale voltage down: %d\n",
- ret);
- clk_set_rate(cpu_clk, old_freq * 1000);
- }
- }
-
- return ret;
+ dev_dbg(dev, "no regulator for cpu%d\n", cpu);
+node_put:
+ of_node_put(np);
+ return name;
}
-static int allocate_resources(int cpu, struct device **cdev,
- struct regulator **creg, struct clk **cclk)
+static int resources_available(void)
{
struct device *cpu_dev;
struct regulator *cpu_reg;
struct clk *cpu_clk;
int ret = 0;
- char *reg_cpu0 = "cpu0", *reg_cpu = "cpu", *reg;
+ const char *name;
- cpu_dev = get_cpu_device(cpu);
+ cpu_dev = get_cpu_device(0);
if (!cpu_dev) {
- pr_err("failed to get cpu%d device\n", cpu);
+ pr_err("failed to get cpu0 device\n");
return -ENODEV;
}
- /* Try "cpu0" for older DTs */
- if (!cpu)
- reg = reg_cpu0;
- else
- reg = reg_cpu;
-
-try_again:
- cpu_reg = regulator_get_optional(cpu_dev, reg);
- ret = PTR_ERR_OR_ZERO(cpu_reg);
+ cpu_clk = clk_get(cpu_dev, NULL);
+ ret = PTR_ERR_OR_ZERO(cpu_clk);
if (ret) {
/*
- * If cpu's regulator supply node is present, but regulator is
- * not yet registered, we should try defering probe.
+ * If cpu's clk node is present, but clock is not yet
+ * registered, we should try defering probe.
*/
- if (ret == -EPROBE_DEFER) {
- dev_dbg(cpu_dev, "cpu%d regulator not ready, retry\n",
- cpu);
- return ret;
- }
-
- /* Try with "cpu-supply" */
- if (reg == reg_cpu0) {
- reg = reg_cpu;
- goto try_again;
- }
+ if (ret == -EPROBE_DEFER)
+ dev_dbg(cpu_dev, "clock not ready, retry\n");
+ else
+ dev_err(cpu_dev, "failed to get clock: %d\n", ret);
- dev_dbg(cpu_dev, "no regulator for cpu%d: %d\n", cpu, ret);
+ return ret;
}
- cpu_clk = clk_get(cpu_dev, NULL);
- ret = PTR_ERR_OR_ZERO(cpu_clk);
- if (ret) {
- /* put regulator */
- if (!IS_ERR(cpu_reg))
- regulator_put(cpu_reg);
+ clk_put(cpu_clk);
+
+ name = find_supply_name(cpu_dev);
+ /* Platform doesn't require regulator */
+ if (!name)
+ return 0;
+ cpu_reg = regulator_get_optional(cpu_dev, name);
+ ret = PTR_ERR_OR_ZERO(cpu_reg);
+ if (ret) {
/*
- * If cpu's clk node is present, but clock is not yet
- * registered, we should try defering probe.
+ * If cpu's regulator supply node is present, but regulator is
+ * not yet registered, we should try defering probe.
*/
if (ret == -EPROBE_DEFER)
- dev_dbg(cpu_dev, "cpu%d clock not ready, retry\n", cpu);
+ dev_dbg(cpu_dev, "cpu0 regulator not ready, retry\n");
else
- dev_err(cpu_dev, "failed to get cpu%d clock: %d\n", cpu,
- ret);
- } else {
- *cdev = cpu_dev;
- *creg = cpu_reg;
- *cclk = cpu_clk;
+ dev_dbg(cpu_dev, "no regulator for cpu0: %d\n", ret);
+
+ return ret;
}
- return ret;
+ regulator_put(cpu_reg);
+ return 0;
}
static int cpufreq_init(struct cpufreq_policy *policy)
{
struct cpufreq_frequency_table *freq_table;
- struct device_node *np;
struct private_data *priv;
struct device *cpu_dev;
- struct regulator *cpu_reg;
struct clk *cpu_clk;
struct dev_pm_opp *suspend_opp;
- unsigned long min_uV = ~0, max_uV = 0;
unsigned int transition_latency;
- bool need_update = false;
+ bool opp_v1 = false;
+ const char *name;
int ret;
- ret = allocate_resources(policy->cpu, &cpu_dev, &cpu_reg, &cpu_clk);
- if (ret) {
- pr_err("%s: Failed to allocate resources: %d\n", __func__, ret);
- return ret;
+ cpu_dev = get_cpu_device(policy->cpu);
+ if (!cpu_dev) {
+ pr_err("failed to get cpu%d device\n", policy->cpu);
+ return -ENODEV;
}
- np = of_node_get(cpu_dev->of_node);
- if (!np) {
- dev_err(cpu_dev, "failed to find cpu%d node\n", policy->cpu);
- ret = -ENOENT;
- goto out_put_reg_clk;
+ cpu_clk = clk_get(cpu_dev, NULL);
+ if (IS_ERR(cpu_clk)) {
+ ret = PTR_ERR(cpu_clk);
+ dev_err(cpu_dev, "%s: failed to get clk: %d\n", __func__, ret);
+ return ret;
}
/* Get OPP-sharing information from "operating-points-v2" bindings */
@@ -223,9 +172,23 @@ static int cpufreq_init(struct cpufreq_policy *policy)
* finding shared-OPPs for backward compatibility.
*/
if (ret == -ENOENT)
- need_update = true;
+ opp_v1 = true;
else
- goto out_node_put;
+ goto out_put_clk;
+ }
+
+ /*
+ * OPP layer will be taking care of regulators now, but it needs to know
+ * the name of the regulator first.
+ */
+ name = find_supply_name(cpu_dev);
+ if (name) {
+ ret = dev_pm_opp_set_regulator(cpu_dev, name);
+ if (ret) {
+ dev_err(cpu_dev, "Failed to set regulator for cpu%d: %d\n",
+ policy->cpu, ret);
+ goto out_put_clk;
+ }
}
/*
@@ -246,12 +209,12 @@ static int cpufreq_init(struct cpufreq_policy *policy)
*/
ret = dev_pm_opp_get_opp_count(cpu_dev);
if (ret <= 0) {
- pr_debug("OPP table is not ready, deferring probe\n");
+ dev_dbg(cpu_dev, "OPP table is not ready, deferring probe\n");
ret = -EPROBE_DEFER;
goto out_free_opp;
}
- if (need_update) {
+ if (opp_v1) {
struct cpufreq_dt_platform_data *pd = cpufreq_get_driver_data();
if (!pd || !pd->independent_clocks)
@@ -265,10 +228,6 @@ static int cpufreq_init(struct cpufreq_policy *policy)
if (ret)
dev_err(cpu_dev, "%s: failed to mark OPPs as shared: %d\n",
__func__, ret);
-
- of_property_read_u32(np, "clock-latency", &transition_latency);
- } else {
- transition_latency = dev_pm_opp_get_max_clock_latency(cpu_dev);
}
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
@@ -277,62 +236,16 @@ static int cpufreq_init(struct cpufreq_policy *policy)
goto out_free_opp;
}
- of_property_read_u32(np, "voltage-tolerance", &priv->voltage_tolerance);
-
- if (!transition_latency)
- transition_latency = CPUFREQ_ETERNAL;
-
- if (!IS_ERR(cpu_reg)) {
- unsigned long opp_freq = 0;
-
- /*
- * Disable any OPPs where the connected regulator isn't able to
- * provide the specified voltage and record minimum and maximum
- * voltage levels.
- */
- while (1) {
- struct dev_pm_opp *opp;
- unsigned long opp_uV, tol_uV;
-
- rcu_read_lock();
- opp = dev_pm_opp_find_freq_ceil(cpu_dev, &opp_freq);
- if (IS_ERR(opp)) {
- rcu_read_unlock();
- break;
- }
- opp_uV = dev_pm_opp_get_voltage(opp);
- rcu_read_unlock();
-
- tol_uV = opp_uV * priv->voltage_tolerance / 100;
- if (regulator_is_supported_voltage(cpu_reg,
- opp_uV - tol_uV,
- opp_uV + tol_uV)) {
- if (opp_uV < min_uV)
- min_uV = opp_uV;
- if (opp_uV > max_uV)
- max_uV = opp_uV;
- } else {
- dev_pm_opp_disable(cpu_dev, opp_freq);
- }
-
- opp_freq++;
- }
-
- ret = regulator_set_voltage_time(cpu_reg, min_uV, max_uV);
- if (ret > 0)
- transition_latency += ret * 1000;
- }
+ priv->reg_name = name;
ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table);
if (ret) {
- pr_err("failed to init cpufreq table: %d\n", ret);
+ dev_err(cpu_dev, "failed to init cpufreq table: %d\n", ret);
goto out_free_priv;
}
priv->cpu_dev = cpu_dev;
- priv->cpu_reg = cpu_reg;
policy->driver_data = priv;
-
policy->clk = cpu_clk;
rcu_read_lock();
@@ -357,9 +270,11 @@ static int cpufreq_init(struct cpufreq_policy *policy)
cpufreq_dt_attr[1] = &cpufreq_freq_attr_scaling_boost_freqs;
}
- policy->cpuinfo.transition_latency = transition_latency;
+ transition_latency = dev_pm_opp_get_max_transition_latency(cpu_dev);
+ if (!transition_latency)
+ transition_latency = CPUFREQ_ETERNAL;
- of_node_put(np);
+ policy->cpuinfo.transition_latency = transition_latency;
return 0;
@@ -369,12 +284,10 @@ out_free_priv:
kfree(priv);
out_free_opp:
dev_pm_opp_of_cpumask_remove_table(policy->cpus);
-out_node_put:
- of_node_put(np);
-out_put_reg_clk:
+ if (name)
+ dev_pm_opp_put_regulator(cpu_dev);
+out_put_clk:
clk_put(cpu_clk);
- if (!IS_ERR(cpu_reg))
- regulator_put(cpu_reg);
return ret;
}
@@ -386,9 +299,10 @@ static int cpufreq_exit(struct cpufreq_policy *policy)
cpufreq_cooling_unregister(priv->cdev);
dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table);
dev_pm_opp_of_cpumask_remove_table(policy->related_cpus);
+ if (priv->reg_name)
+ dev_pm_opp_put_regulator(priv->cpu_dev);
+
clk_put(policy->clk);
- if (!IS_ERR(priv->cpu_reg))
- regulator_put(priv->cpu_reg);
kfree(priv);
return 0;
@@ -441,9 +355,6 @@ static struct cpufreq_driver dt_cpufreq_driver = {
static int dt_cpufreq_probe(struct platform_device *pdev)
{
- struct device *cpu_dev;
- struct regulator *cpu_reg;
- struct clk *cpu_clk;
int ret;
/*
@@ -453,19 +364,15 @@ static int dt_cpufreq_probe(struct platform_device *pdev)
*
* FIXME: Is checking this only for CPU0 sufficient ?
*/
- ret = allocate_resources(0, &cpu_dev, &cpu_reg, &cpu_clk);
+ ret = resources_available();
if (ret)
return ret;
- clk_put(cpu_clk);
- if (!IS_ERR(cpu_reg))
- regulator_put(cpu_reg);
-
dt_cpufreq_driver.driver_data = dev_get_platdata(&pdev->dev);
ret = cpufreq_register_driver(&dt_cpufreq_driver);
if (ret)
- dev_err(cpu_dev, "failed register driver: %d\n", ret);
+ dev_err(&pdev->dev, "failed register driver: %d\n", ret);
return ret;
}
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index e979ec78b695..c4acfc5273b3 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -38,48 +38,10 @@ static inline bool policy_is_inactive(struct cpufreq_policy *policy)
return cpumask_empty(policy->cpus);
}
-static bool suitable_policy(struct cpufreq_policy *policy, bool active)
-{
- return active == !policy_is_inactive(policy);
-}
-
-/* Finds Next Acive/Inactive policy */
-static struct cpufreq_policy *next_policy(struct cpufreq_policy *policy,
- bool active)
-{
- do {
- /* No more policies in the list */
- if (list_is_last(&policy->policy_list, &cpufreq_policy_list))
- return NULL;
-
- policy = list_next_entry(policy, policy_list);
- } while (!suitable_policy(policy, active));
-
- return policy;
-}
-
-static struct cpufreq_policy *first_policy(bool active)
-{
- struct cpufreq_policy *policy;
-
- /* No policies in the list */
- if (list_empty(&cpufreq_policy_list))
- return NULL;
-
- policy = list_first_entry(&cpufreq_policy_list, typeof(*policy),
- policy_list);
-
- if (!suitable_policy(policy, active))
- policy = next_policy(policy, active);
-
- return policy;
-}
-
/* Macros to iterate over CPU policies */
-#define for_each_suitable_policy(__policy, __active) \
- for (__policy = first_policy(__active); \
- __policy; \
- __policy = next_policy(__policy, __active))
+#define for_each_suitable_policy(__policy, __active) \
+ list_for_each_entry(__policy, &cpufreq_policy_list, policy_list) \
+ if ((__active) == !policy_is_inactive(__policy))
#define for_each_active_policy(__policy) \
for_each_suitable_policy(__policy, true)
@@ -102,7 +64,6 @@ static LIST_HEAD(cpufreq_governor_list);
static struct cpufreq_driver *cpufreq_driver;
static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
static DEFINE_RWLOCK(cpufreq_driver_lock);
-DEFINE_MUTEX(cpufreq_governor_lock);
/* Flag to suspend/resume CPUFreq governors */
static bool cpufreq_suspended;
@@ -113,10 +74,9 @@ static inline bool has_target(void)
}
/* internal prototypes */
-static int __cpufreq_governor(struct cpufreq_policy *policy,
- unsigned int event);
+static int cpufreq_governor(struct cpufreq_policy *policy, unsigned int event);
static unsigned int __cpufreq_get(struct cpufreq_policy *policy);
-static void handle_update(struct work_struct *work);
+static int cpufreq_start_governor(struct cpufreq_policy *policy);
/**
* Two notifier lists: the "policy" list is involved in the
@@ -818,12 +778,7 @@ static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
ssize_t ret;
down_read(&policy->rwsem);
-
- if (fattr->show)
- ret = fattr->show(policy, buf);
- else
- ret = -EIO;
-
+ ret = fattr->show(policy, buf);
up_read(&policy->rwsem);
return ret;
@@ -838,18 +793,12 @@ static ssize_t store(struct kobject *kobj, struct attribute *attr,
get_online_cpus();
- if (!cpu_online(policy->cpu))
- goto unlock;
-
- down_write(&policy->rwsem);
-
- if (fattr->store)
+ if (cpu_online(policy->cpu)) {
+ down_write(&policy->rwsem);
ret = fattr->store(policy, buf, count);
- else
- ret = -EIO;
+ up_write(&policy->rwsem);
+ }
- up_write(&policy->rwsem);
-unlock:
put_online_cpus();
return ret;
@@ -959,6 +908,11 @@ static int cpufreq_add_dev_interface(struct cpufreq_policy *policy)
return cpufreq_add_dev_symlink(policy);
}
+__weak struct cpufreq_governor *cpufreq_default_governor(void)
+{
+ return NULL;
+}
+
static int cpufreq_init_policy(struct cpufreq_policy *policy)
{
struct cpufreq_governor *gov = NULL;
@@ -968,11 +922,14 @@ static int cpufreq_init_policy(struct cpufreq_policy *policy)
/* Update governor of new_policy to the governor used before hotplug */
gov = find_governor(policy->last_governor);
- if (gov)
+ if (gov) {
pr_debug("Restoring governor %s for cpu %d\n",
policy->governor->name, policy->cpu);
- else
- gov = CPUFREQ_DEFAULT_GOVERNOR;
+ } else {
+ gov = cpufreq_default_governor();
+ if (!gov)
+ return -ENODATA;
+ }
new_policy.governor = gov;
@@ -996,36 +953,42 @@ static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy, unsigned int cp
if (cpumask_test_cpu(cpu, policy->cpus))
return 0;
+ down_write(&policy->rwsem);
if (has_target()) {
- ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
+ ret = cpufreq_governor(policy, CPUFREQ_GOV_STOP);
if (ret) {
pr_err("%s: Failed to stop governor\n", __func__);
- return ret;
+ goto unlock;
}
}
- down_write(&policy->rwsem);
cpumask_set_cpu(cpu, policy->cpus);
- up_write(&policy->rwsem);
if (has_target()) {
- ret = __cpufreq_governor(policy, CPUFREQ_GOV_START);
- if (!ret)
- ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
-
- if (ret) {
+ ret = cpufreq_start_governor(policy);
+ if (ret)
pr_err("%s: Failed to start governor\n", __func__);
- return ret;
- }
}
- return 0;
+unlock:
+ up_write(&policy->rwsem);
+ return ret;
+}
+
+static void handle_update(struct work_struct *work)
+{
+ struct cpufreq_policy *policy =
+ container_of(work, struct cpufreq_policy, update);
+ unsigned int cpu = policy->cpu;
+ pr_debug("handle_update for cpu %u called\n", cpu);
+ cpufreq_update_policy(cpu);
}
static struct cpufreq_policy *cpufreq_policy_alloc(unsigned int cpu)
{
struct device *dev = get_cpu_device(cpu);
struct cpufreq_policy *policy;
+ int ret;
if (WARN_ON(!dev))
return NULL;
@@ -1043,7 +1006,13 @@ static struct cpufreq_policy *cpufreq_policy_alloc(unsigned int cpu)
if (!zalloc_cpumask_var(&policy->real_cpus, GFP_KERNEL))
goto err_free_rcpumask;
- kobject_init(&policy->kobj, &ktype_cpufreq);
+ ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq,
+ cpufreq_global_kobject, "policy%u", cpu);
+ if (ret) {
+ pr_err("%s: failed to init policy->kobj: %d\n", __func__, ret);
+ goto err_free_real_cpus;
+ }
+
INIT_LIST_HEAD(&policy->policy_list);
init_rwsem(&policy->rwsem);
spin_lock_init(&policy->transition_lock);
@@ -1054,6 +1023,8 @@ static struct cpufreq_policy *cpufreq_policy_alloc(unsigned int cpu)
policy->cpu = cpu;
return policy;
+err_free_real_cpus:
+ free_cpumask_var(policy->real_cpus);
err_free_rcpumask:
free_cpumask_var(policy->related_cpus);
err_free_cpumask:
@@ -1158,16 +1129,6 @@ static int cpufreq_online(unsigned int cpu)
cpumask_copy(policy->related_cpus, policy->cpus);
/* Remember CPUs present at the policy creation time. */
cpumask_and(policy->real_cpus, policy->cpus, cpu_present_mask);
-
- /* Name and add the kobject */
- ret = kobject_add(&policy->kobj, cpufreq_global_kobject,
- "policy%u",
- cpumask_first(policy->related_cpus));
- if (ret) {
- pr_err("%s: failed to add policy->kobj: %d\n", __func__,
- ret);
- goto out_exit_policy;
- }
}
/*
@@ -1309,9 +1270,10 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
return ret;
}
-static void cpufreq_offline_prepare(unsigned int cpu)
+static void cpufreq_offline(unsigned int cpu)
{
struct cpufreq_policy *policy;
+ int ret;
pr_debug("%s: unregistering CPU %u\n", __func__, cpu);
@@ -1321,13 +1283,13 @@ static void cpufreq_offline_prepare(unsigned int cpu)
return;
}
+ down_write(&policy->rwsem);
if (has_target()) {
- int ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
+ ret = cpufreq_governor(policy, CPUFREQ_GOV_STOP);
if (ret)
pr_err("%s: Failed to stop governor\n", __func__);
}
- down_write(&policy->rwsem);
cpumask_clear_cpu(cpu, policy->cpus);
if (policy_is_inactive(policy)) {
@@ -1340,39 +1302,24 @@ static void cpufreq_offline_prepare(unsigned int cpu)
/* Nominate new CPU */
policy->cpu = cpumask_any(policy->cpus);
}
- up_write(&policy->rwsem);
/* Start governor again for active policy */
if (!policy_is_inactive(policy)) {
if (has_target()) {
- int ret = __cpufreq_governor(policy, CPUFREQ_GOV_START);
- if (!ret)
- ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
-
+ ret = cpufreq_start_governor(policy);
if (ret)
pr_err("%s: Failed to start governor\n", __func__);
}
- } else if (cpufreq_driver->stop_cpu) {
- cpufreq_driver->stop_cpu(policy);
- }
-}
-static void cpufreq_offline_finish(unsigned int cpu)
-{
- struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
-
- if (!policy) {
- pr_debug("%s: No cpu_data found\n", __func__);
- return;
+ goto unlock;
}
- /* Only proceed for inactive policies */
- if (!policy_is_inactive(policy))
- return;
+ if (cpufreq_driver->stop_cpu)
+ cpufreq_driver->stop_cpu(policy);
/* If cpu is last user of policy, free policy */
if (has_target()) {
- int ret = __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
+ ret = cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
if (ret)
pr_err("%s: Failed to exit governor\n", __func__);
}
@@ -1386,6 +1333,9 @@ static void cpufreq_offline_finish(unsigned int cpu)
cpufreq_driver->exit(policy);
policy->freq_table = NULL;
}
+
+unlock:
+ up_write(&policy->rwsem);
}
/**
@@ -1401,10 +1351,8 @@ static void cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
if (!policy)
return;
- if (cpu_online(cpu)) {
- cpufreq_offline_prepare(cpu);
- cpufreq_offline_finish(cpu);
- }
+ if (cpu_online(cpu))
+ cpufreq_offline(cpu);
cpumask_clear_cpu(cpu, policy->real_cpus);
remove_cpu_dev_symlink(policy, cpu);
@@ -1413,15 +1361,6 @@ static void cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
cpufreq_policy_free(policy, true);
}
-static void handle_update(struct work_struct *work)
-{
- struct cpufreq_policy *policy =
- container_of(work, struct cpufreq_policy, update);
- unsigned int cpu = policy->cpu;
- pr_debug("handle_update for cpu %u called\n", cpu);
- cpufreq_update_policy(cpu);
-}
-
/**
* cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're
* in deep trouble.
@@ -1457,9 +1396,17 @@ unsigned int cpufreq_quick_get(unsigned int cpu)
{
struct cpufreq_policy *policy;
unsigned int ret_freq = 0;
+ unsigned long flags;
- if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get)
- return cpufreq_driver->get(cpu);
+ read_lock_irqsave(&cpufreq_driver_lock, flags);
+
+ if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get) {
+ ret_freq = cpufreq_driver->get(cpu);
+ read_unlock_irqrestore(&cpufreq_driver_lock, flags);
+ return ret_freq;
+ }
+
+ read_unlock_irqrestore(&cpufreq_driver_lock, flags);
policy = cpufreq_cpu_get(cpu);
if (policy) {
@@ -1540,6 +1487,27 @@ unsigned int cpufreq_get(unsigned int cpu)
}
EXPORT_SYMBOL(cpufreq_get);
+static unsigned int cpufreq_update_current_freq(struct cpufreq_policy *policy)
+{
+ unsigned int new_freq;
+
+ if (cpufreq_suspended)
+ return 0;
+
+ new_freq = cpufreq_driver->get(policy->cpu);
+ if (!new_freq)
+ return 0;
+
+ if (!policy->cur) {
+ pr_debug("cpufreq: Driver did not initialize current freq\n");
+ policy->cur = new_freq;
+ } else if (policy->cur != new_freq && has_target()) {
+ cpufreq_out_of_sync(policy, new_freq);
+ }
+
+ return new_freq;
+}
+
static struct subsys_interface cpufreq_interface = {
.name = "cpufreq",
.subsys = &cpu_subsys,
@@ -1584,21 +1552,30 @@ EXPORT_SYMBOL(cpufreq_generic_suspend);
void cpufreq_suspend(void)
{
struct cpufreq_policy *policy;
+ int ret;
if (!cpufreq_driver)
return;
- if (!has_target())
+ if (!has_target() && !cpufreq_driver->suspend)
goto suspend;
pr_debug("%s: Suspending Governors\n", __func__);
for_each_active_policy(policy) {
- if (__cpufreq_governor(policy, CPUFREQ_GOV_STOP))
- pr_err("%s: Failed to stop governor for policy: %p\n",
- __func__, policy);
- else if (cpufreq_driver->suspend
- && cpufreq_driver->suspend(policy))
+ if (has_target()) {
+ down_write(&policy->rwsem);
+ ret = cpufreq_governor(policy, CPUFREQ_GOV_STOP);
+ up_write(&policy->rwsem);
+
+ if (ret) {
+ pr_err("%s: Failed to stop governor for policy: %p\n",
+ __func__, policy);
+ continue;
+ }
+ }
+
+ if (cpufreq_driver->suspend && cpufreq_driver->suspend(policy))
pr_err("%s: Failed to suspend driver: %p\n", __func__,
policy);
}
@@ -1616,37 +1593,32 @@ suspend:
void cpufreq_resume(void)
{
struct cpufreq_policy *policy;
+ int ret;
if (!cpufreq_driver)
return;
cpufreq_suspended = false;
- if (!has_target())
+ if (!has_target() && !cpufreq_driver->resume)
return;
pr_debug("%s: Resuming Governors\n", __func__);
for_each_active_policy(policy) {
- if (cpufreq_driver->resume && cpufreq_driver->resume(policy))
+ if (cpufreq_driver->resume && cpufreq_driver->resume(policy)) {
pr_err("%s: Failed to resume driver: %p\n", __func__,
policy);
- else if (__cpufreq_governor(policy, CPUFREQ_GOV_START)
- || __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS))
- pr_err("%s: Failed to start governor for policy: %p\n",
- __func__, policy);
- }
+ } else if (has_target()) {
+ down_write(&policy->rwsem);
+ ret = cpufreq_start_governor(policy);
+ up_write(&policy->rwsem);
- /*
- * schedule call cpufreq_update_policy() for first-online CPU, as that
- * wouldn't be hotplugged-out on suspend. It will verify that the
- * current freq is in sync with what we believe it to be.
- */
- policy = cpufreq_cpu_get_raw(cpumask_first(cpu_online_mask));
- if (WARN_ON(!policy))
- return;
-
- schedule_work(&policy->update);
+ if (ret)
+ pr_err("%s: Failed to start governor for policy: %p\n",
+ __func__, policy);
+ }
+ }
}
/**
@@ -1846,7 +1818,8 @@ int __cpufreq_driver_target(struct cpufreq_policy *policy,
unsigned int relation)
{
unsigned int old_target_freq = target_freq;
- int retval = -EINVAL;
+ struct cpufreq_frequency_table *freq_table;
+ int index, retval;
if (cpufreq_disabled())
return -ENODEV;
@@ -1873,34 +1846,28 @@ int __cpufreq_driver_target(struct cpufreq_policy *policy,
policy->restore_freq = policy->cur;
if (cpufreq_driver->target)
- retval = cpufreq_driver->target(policy, target_freq, relation);
- else if (cpufreq_driver->target_index) {
- struct cpufreq_frequency_table *freq_table;
- int index;
-
- freq_table = cpufreq_frequency_get_table(policy->cpu);
- if (unlikely(!freq_table)) {
- pr_err("%s: Unable to find freq_table\n", __func__);
- goto out;
- }
+ return cpufreq_driver->target(policy, target_freq, relation);
- retval = cpufreq_frequency_table_target(policy, freq_table,
- target_freq, relation, &index);
- if (unlikely(retval)) {
- pr_err("%s: Unable to find matching freq\n", __func__);
- goto out;
- }
+ if (!cpufreq_driver->target_index)
+ return -EINVAL;
- if (freq_table[index].frequency == policy->cur) {
- retval = 0;
- goto out;
- }
+ freq_table = cpufreq_frequency_get_table(policy->cpu);
+ if (unlikely(!freq_table)) {
+ pr_err("%s: Unable to find freq_table\n", __func__);
+ return -EINVAL;
+ }
- retval = __target_index(policy, freq_table, index);
+ retval = cpufreq_frequency_table_target(policy, freq_table, target_freq,
+ relation, &index);
+ if (unlikely(retval)) {
+ pr_err("%s: Unable to find matching freq\n", __func__);
+ return retval;
}
-out:
- return retval;
+ if (freq_table[index].frequency == policy->cur)
+ return 0;
+
+ return __target_index(policy, freq_table, index);
}
EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
@@ -1920,20 +1887,14 @@ int cpufreq_driver_target(struct cpufreq_policy *policy,
}
EXPORT_SYMBOL_GPL(cpufreq_driver_target);
-static int __cpufreq_governor(struct cpufreq_policy *policy,
- unsigned int event)
+__weak struct cpufreq_governor *cpufreq_fallback_governor(void)
{
- int ret;
+ return NULL;
+}
- /* Only must be defined when default governor is known to have latency
- restrictions, like e.g. conservative or ondemand.
- That this is the case is already ensured in Kconfig
- */
-#ifdef CONFIG_CPU_FREQ_GOV_PERFORMANCE
- struct cpufreq_governor *gov = &cpufreq_gov_performance;
-#else
- struct cpufreq_governor *gov = NULL;
-#endif
+static int cpufreq_governor(struct cpufreq_policy *policy, unsigned int event)
+{
+ int ret;
/* Don't start any governor operations if we are entering suspend */
if (cpufreq_suspended)
@@ -1948,12 +1909,14 @@ static int __cpufreq_governor(struct cpufreq_policy *policy,
if (policy->governor->max_transition_latency &&
policy->cpuinfo.transition_latency >
policy->governor->max_transition_latency) {
- if (!gov)
- return -EINVAL;
- else {
+ struct cpufreq_governor *gov = cpufreq_fallback_governor();
+
+ if (gov) {
pr_warn("%s governor failed, too long transition latency of HW, fallback to %s governor\n",
policy->governor->name, gov->name);
policy->governor = gov;
+ } else {
+ return -EINVAL;
}
}
@@ -1963,21 +1926,6 @@ static int __cpufreq_governor(struct cpufreq_policy *policy,
pr_debug("%s: for CPU %u, event %u\n", __func__, policy->cpu, event);
- mutex_lock(&cpufreq_governor_lock);
- if ((policy->governor_enabled && event == CPUFREQ_GOV_START)
- || (!policy->governor_enabled
- && (event == CPUFREQ_GOV_LIMITS || event == CPUFREQ_GOV_STOP))) {
- mutex_unlock(&cpufreq_governor_lock);
- return -EBUSY;
- }
-
- if (event == CPUFREQ_GOV_STOP)
- policy->governor_enabled = false;
- else if (event == CPUFREQ_GOV_START)
- policy->governor_enabled = true;
-
- mutex_unlock(&cpufreq_governor_lock);
-
ret = policy->governor->governor(policy, event);
if (!ret) {
@@ -1985,14 +1933,6 @@ static int __cpufreq_governor(struct cpufreq_policy *policy,
policy->governor->initialized++;
else if (event == CPUFREQ_GOV_POLICY_EXIT)
policy->governor->initialized--;
- } else {
- /* Restore original values */
- mutex_lock(&cpufreq_governor_lock);
- if (event == CPUFREQ_GOV_STOP)
- policy->governor_enabled = true;
- else if (event == CPUFREQ_GOV_START)
- policy->governor_enabled = false;
- mutex_unlock(&cpufreq_governor_lock);
}
if (((event == CPUFREQ_GOV_POLICY_INIT) && ret) ||
@@ -2002,6 +1942,17 @@ static int __cpufreq_governor(struct cpufreq_policy *policy,
return ret;
}
+static int cpufreq_start_governor(struct cpufreq_policy *policy)
+{
+ int ret;
+
+ if (cpufreq_driver->get && !cpufreq_driver->setpolicy)
+ cpufreq_update_current_freq(policy);
+
+ ret = cpufreq_governor(policy, CPUFREQ_GOV_START);
+ return ret ? ret : cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
+}
+
int cpufreq_register_governor(struct cpufreq_governor *governor)
{
int err;
@@ -2138,8 +2089,10 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy,
return cpufreq_driver->setpolicy(new_policy);
}
- if (new_policy->governor == policy->governor)
- goto out;
+ if (new_policy->governor == policy->governor) {
+ pr_debug("cpufreq: governor limits update\n");
+ return cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
+ }
pr_debug("governor switch\n");
@@ -2147,7 +2100,7 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy,
old_gov = policy->governor;
/* end old governor */
if (old_gov) {
- ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
+ ret = cpufreq_governor(policy, CPUFREQ_GOV_STOP);
if (ret) {
/* This can happen due to race with other operations */
pr_debug("%s: Failed to Stop Governor: %s (%d)\n",
@@ -2155,10 +2108,7 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy,
return ret;
}
- up_write(&policy->rwsem);
- ret = __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
- down_write(&policy->rwsem);
-
+ ret = cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
if (ret) {
pr_err("%s: Failed to Exit Governor: %s (%d)\n",
__func__, old_gov->name, ret);
@@ -2168,32 +2118,27 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy,
/* start new governor */
policy->governor = new_policy->governor;
- ret = __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT);
+ ret = cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT);
if (!ret) {
- ret = __cpufreq_governor(policy, CPUFREQ_GOV_START);
- if (!ret)
- goto out;
-
- up_write(&policy->rwsem);
- __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
- down_write(&policy->rwsem);
+ ret = cpufreq_start_governor(policy);
+ if (!ret) {
+ pr_debug("cpufreq: governor change\n");
+ return 0;
+ }
+ cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
}
/* new governor failed, so re-start old one */
pr_debug("starting governor %s failed\n", policy->governor->name);
if (old_gov) {
policy->governor = old_gov;
- if (__cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT))
+ if (cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT))
policy->governor = NULL;
else
- __cpufreq_governor(policy, CPUFREQ_GOV_START);
+ cpufreq_start_governor(policy);
}
return ret;
-
- out:
- pr_debug("governor: change or update limits\n");
- return __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
}
/**
@@ -2224,19 +2169,11 @@ int cpufreq_update_policy(unsigned int cpu)
* -> ask driver for current freq and notify governors about a change
*/
if (cpufreq_driver->get && !cpufreq_driver->setpolicy) {
- new_policy.cur = cpufreq_driver->get(cpu);
+ new_policy.cur = cpufreq_update_current_freq(policy);
if (WARN_ON(!new_policy.cur)) {
ret = -EIO;
goto unlock;
}
-
- if (!policy->cur) {
- pr_debug("Driver did not initialize current freq\n");
- policy->cur = new_policy.cur;
- } else {
- if (policy->cur != new_policy.cur && has_target())
- cpufreq_out_of_sync(policy, new_policy.cur);
- }
}
ret = cpufreq_set_policy(policy, &new_policy);
@@ -2260,11 +2197,7 @@ static int cpufreq_cpu_callback(struct notifier_block *nfb,
break;
case CPU_DOWN_PREPARE:
- cpufreq_offline_prepare(cpu);
- break;
-
- case CPU_POST_DEAD:
- cpufreq_offline_finish(cpu);
+ cpufreq_offline(cpu);
break;
case CPU_DOWN_FAILED:
@@ -2297,8 +2230,11 @@ static int cpufreq_boost_set_sw(int state)
__func__);
break;
}
+
+ down_write(&policy->rwsem);
policy->user_policy.max = policy->max;
- __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
+ cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
+ up_write(&policy->rwsem);
}
}
@@ -2384,7 +2320,7 @@ EXPORT_SYMBOL_GPL(cpufreq_boost_enabled);
* submitted by the CPU Frequency driver.
*
* Registers a CPU Frequency driver to this core code. This code
- * returns zero on success, -EBUSY when another driver got here first
+ * returns zero on success, -EEXIST when another driver got here first
* (and isn't unregistered in the meantime).
*
*/
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
index 606ad74abe6e..bf4913f6453b 100644
--- a/drivers/cpufreq/cpufreq_conservative.c
+++ b/drivers/cpufreq/cpufreq_conservative.c
@@ -14,6 +14,22 @@
#include <linux/slab.h>
#include "cpufreq_governor.h"
+struct cs_policy_dbs_info {
+ struct policy_dbs_info policy_dbs;
+ unsigned int down_skip;
+ unsigned int requested_freq;
+};
+
+static inline struct cs_policy_dbs_info *to_dbs_info(struct policy_dbs_info *policy_dbs)
+{
+ return container_of(policy_dbs, struct cs_policy_dbs_info, policy_dbs);
+}
+
+struct cs_dbs_tuners {
+ unsigned int down_threshold;
+ unsigned int freq_step;
+};
+
/* Conservative governor macros */
#define DEF_FREQUENCY_UP_THRESHOLD (80)
#define DEF_FREQUENCY_DOWN_THRESHOLD (20)
@@ -21,21 +37,6 @@
#define DEF_SAMPLING_DOWN_FACTOR (1)
#define MAX_SAMPLING_DOWN_FACTOR (10)
-static DEFINE_PER_CPU(struct cs_cpu_dbs_info_s, cs_cpu_dbs_info);
-
-static int cs_cpufreq_governor_dbs(struct cpufreq_policy *policy,
- unsigned int event);
-
-#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE
-static
-#endif
-struct cpufreq_governor cpufreq_gov_conservative = {
- .name = "conservative",
- .governor = cs_cpufreq_governor_dbs,
- .max_transition_latency = TRANSITION_LATENCY_LIMIT,
- .owner = THIS_MODULE,
-};
-
static inline unsigned int get_freq_target(struct cs_dbs_tuners *cs_tuners,
struct cpufreq_policy *policy)
{
@@ -57,27 +58,28 @@ static inline unsigned int get_freq_target(struct cs_dbs_tuners *cs_tuners,
* Any frequency increase takes it to the maximum frequency. Frequency reduction
* happens at minimum steps of 5% (default) of maximum frequency
*/
-static void cs_check_cpu(int cpu, unsigned int load)
+static unsigned int cs_dbs_timer(struct cpufreq_policy *policy)
{
- struct cs_cpu_dbs_info_s *dbs_info = &per_cpu(cs_cpu_dbs_info, cpu);
- struct cpufreq_policy *policy = dbs_info->cdbs.shared->policy;
- struct dbs_data *dbs_data = policy->governor_data;
+ struct policy_dbs_info *policy_dbs = policy->governor_data;
+ struct cs_policy_dbs_info *dbs_info = to_dbs_info(policy_dbs);
+ struct dbs_data *dbs_data = policy_dbs->dbs_data;
struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
+ unsigned int load = dbs_update(policy);
/*
* break out if we 'cannot' reduce the speed as the user might
* want freq_step to be zero
*/
if (cs_tuners->freq_step == 0)
- return;
+ goto out;
/* Check for frequency increase */
- if (load > cs_tuners->up_threshold) {
+ if (load > dbs_data->up_threshold) {
dbs_info->down_skip = 0;
/* if we are already at full speed then break out early */
if (dbs_info->requested_freq == policy->max)
- return;
+ goto out;
dbs_info->requested_freq += get_freq_target(cs_tuners, policy);
@@ -86,12 +88,12 @@ static void cs_check_cpu(int cpu, unsigned int load)
__cpufreq_driver_target(policy, dbs_info->requested_freq,
CPUFREQ_RELATION_H);
- return;
+ goto out;
}
/* if sampling_down_factor is active break out early */
- if (++dbs_info->down_skip < cs_tuners->sampling_down_factor)
- return;
+ if (++dbs_info->down_skip < dbs_data->sampling_down_factor)
+ goto out;
dbs_info->down_skip = 0;
/* Check for frequency decrease */
@@ -101,7 +103,7 @@ static void cs_check_cpu(int cpu, unsigned int load)
* if we cannot reduce the frequency anymore, break out early
*/
if (policy->cur == policy->min)
- return;
+ goto out;
freq_target = get_freq_target(cs_tuners, policy);
if (dbs_info->requested_freq > freq_target)
@@ -111,58 +113,25 @@ static void cs_check_cpu(int cpu, unsigned int load)
__cpufreq_driver_target(policy, dbs_info->requested_freq,
CPUFREQ_RELATION_L);
- return;
}
-}
-
-static unsigned int cs_dbs_timer(struct cpufreq_policy *policy, bool modify_all)
-{
- struct dbs_data *dbs_data = policy->governor_data;
- struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
-
- if (modify_all)
- dbs_check_cpu(dbs_data, policy->cpu);
- return delay_for_sampling_rate(cs_tuners->sampling_rate);
+ out:
+ return dbs_data->sampling_rate;
}
static int dbs_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
- void *data)
-{
- struct cpufreq_freqs *freq = data;
- struct cs_cpu_dbs_info_s *dbs_info =
- &per_cpu(cs_cpu_dbs_info, freq->cpu);
- struct cpufreq_policy *policy = cpufreq_cpu_get_raw(freq->cpu);
-
- if (!policy)
- return 0;
-
- /* policy isn't governed by conservative governor */
- if (policy->governor != &cpufreq_gov_conservative)
- return 0;
-
- /*
- * we only care if our internally tracked freq moves outside the 'valid'
- * ranges of frequency available to us otherwise we do not change it
- */
- if (dbs_info->requested_freq > policy->max
- || dbs_info->requested_freq < policy->min)
- dbs_info->requested_freq = freq->new;
-
- return 0;
-}
+ void *data);
static struct notifier_block cs_cpufreq_notifier_block = {
.notifier_call = dbs_cpufreq_notifier,
};
/************************** sysfs interface ************************/
-static struct common_dbs_data cs_dbs_cdata;
+static struct dbs_governor cs_dbs_gov;
static ssize_t store_sampling_down_factor(struct dbs_data *dbs_data,
const char *buf, size_t count)
{
- struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
unsigned int input;
int ret;
ret = sscanf(buf, "%u", &input);
@@ -170,22 +139,7 @@ static ssize_t store_sampling_down_factor(struct dbs_data *dbs_data,
if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1)
return -EINVAL;
- cs_tuners->sampling_down_factor = input;
- return count;
-}
-
-static ssize_t store_sampling_rate(struct dbs_data *dbs_data, const char *buf,
- size_t count)
-{
- struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
- unsigned int input;
- int ret;
- ret = sscanf(buf, "%u", &input);
-
- if (ret != 1)
- return -EINVAL;
-
- cs_tuners->sampling_rate = max(input, dbs_data->min_sampling_rate);
+ dbs_data->sampling_down_factor = input;
return count;
}
@@ -200,7 +154,7 @@ static ssize_t store_up_threshold(struct dbs_data *dbs_data, const char *buf,
if (ret != 1 || input > 100 || input <= cs_tuners->down_threshold)
return -EINVAL;
- cs_tuners->up_threshold = input;
+ dbs_data->up_threshold = input;
return count;
}
@@ -214,7 +168,7 @@ static ssize_t store_down_threshold(struct dbs_data *dbs_data, const char *buf,
/* cannot be lower than 11 otherwise freq will not fall */
if (ret != 1 || input < 11 || input > 100 ||
- input >= cs_tuners->up_threshold)
+ input >= dbs_data->up_threshold)
return -EINVAL;
cs_tuners->down_threshold = input;
@@ -224,8 +178,7 @@ static ssize_t store_down_threshold(struct dbs_data *dbs_data, const char *buf,
static ssize_t store_ignore_nice_load(struct dbs_data *dbs_data,
const char *buf, size_t count)
{
- struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
- unsigned int input, j;
+ unsigned int input;
int ret;
ret = sscanf(buf, "%u", &input);
@@ -235,21 +188,14 @@ static ssize_t store_ignore_nice_load(struct dbs_data *dbs_data,
if (input > 1)
input = 1;
- if (input == cs_tuners->ignore_nice_load) /* nothing to do */
+ if (input == dbs_data->ignore_nice_load) /* nothing to do */
return count;
- cs_tuners->ignore_nice_load = input;
+ dbs_data->ignore_nice_load = input;
/* we need to re-evaluate prev_cpu_idle */
- for_each_online_cpu(j) {
- struct cs_cpu_dbs_info_s *dbs_info;
- dbs_info = &per_cpu(cs_cpu_dbs_info, j);
- dbs_info->cdbs.prev_cpu_idle = get_cpu_idle_time(j,
- &dbs_info->cdbs.prev_cpu_wall, 0);
- if (cs_tuners->ignore_nice_load)
- dbs_info->cdbs.prev_cpu_nice =
- kcpustat_cpu(j).cpustat[CPUTIME_NICE];
- }
+ gov_update_cpu_data(dbs_data);
+
return count;
}
@@ -275,55 +221,47 @@ static ssize_t store_freq_step(struct dbs_data *dbs_data, const char *buf,
return count;
}
-show_store_one(cs, sampling_rate);
-show_store_one(cs, sampling_down_factor);
-show_store_one(cs, up_threshold);
-show_store_one(cs, down_threshold);
-show_store_one(cs, ignore_nice_load);
-show_store_one(cs, freq_step);
-declare_show_sampling_rate_min(cs);
-
-gov_sys_pol_attr_rw(sampling_rate);
-gov_sys_pol_attr_rw(sampling_down_factor);
-gov_sys_pol_attr_rw(up_threshold);
-gov_sys_pol_attr_rw(down_threshold);
-gov_sys_pol_attr_rw(ignore_nice_load);
-gov_sys_pol_attr_rw(freq_step);
-gov_sys_pol_attr_ro(sampling_rate_min);
-
-static struct attribute *dbs_attributes_gov_sys[] = {
- &sampling_rate_min_gov_sys.attr,
- &sampling_rate_gov_sys.attr,
- &sampling_down_factor_gov_sys.attr,
- &up_threshold_gov_sys.attr,
- &down_threshold_gov_sys.attr,
- &ignore_nice_load_gov_sys.attr,
- &freq_step_gov_sys.attr,
+gov_show_one_common(sampling_rate);
+gov_show_one_common(sampling_down_factor);
+gov_show_one_common(up_threshold);
+gov_show_one_common(ignore_nice_load);
+gov_show_one_common(min_sampling_rate);
+gov_show_one(cs, down_threshold);
+gov_show_one(cs, freq_step);
+
+gov_attr_rw(sampling_rate);
+gov_attr_rw(sampling_down_factor);
+gov_attr_rw(up_threshold);
+gov_attr_rw(ignore_nice_load);
+gov_attr_ro(min_sampling_rate);
+gov_attr_rw(down_threshold);
+gov_attr_rw(freq_step);
+
+static struct attribute *cs_attributes[] = {
+ &min_sampling_rate.attr,
+ &sampling_rate.attr,
+ &sampling_down_factor.attr,
+ &up_threshold.attr,
+ &down_threshold.attr,
+ &ignore_nice_load.attr,
+ &freq_step.attr,
NULL
};
-static struct attribute_group cs_attr_group_gov_sys = {
- .attrs = dbs_attributes_gov_sys,
- .name = "conservative",
-};
+/************************** sysfs end ************************/
-static struct attribute *dbs_attributes_gov_pol[] = {
- &sampling_rate_min_gov_pol.attr,
- &sampling_rate_gov_pol.attr,
- &sampling_down_factor_gov_pol.attr,
- &up_threshold_gov_pol.attr,
- &down_threshold_gov_pol.attr,
- &ignore_nice_load_gov_pol.attr,
- &freq_step_gov_pol.attr,
- NULL
-};
+static struct policy_dbs_info *cs_alloc(void)
+{
+ struct cs_policy_dbs_info *dbs_info;
-static struct attribute_group cs_attr_group_gov_pol = {
- .attrs = dbs_attributes_gov_pol,
- .name = "conservative",
-};
+ dbs_info = kzalloc(sizeof(*dbs_info), GFP_KERNEL);
+ return dbs_info ? &dbs_info->policy_dbs : NULL;
+}
-/************************** sysfs end ************************/
+static void cs_free(struct policy_dbs_info *policy_dbs)
+{
+ kfree(to_dbs_info(policy_dbs));
+}
static int cs_init(struct dbs_data *dbs_data, bool notify)
{
@@ -335,11 +273,11 @@ static int cs_init(struct dbs_data *dbs_data, bool notify)
return -ENOMEM;
}
- tuners->up_threshold = DEF_FREQUENCY_UP_THRESHOLD;
tuners->down_threshold = DEF_FREQUENCY_DOWN_THRESHOLD;
- tuners->sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR;
- tuners->ignore_nice_load = 0;
tuners->freq_step = DEF_FREQUENCY_STEP;
+ dbs_data->up_threshold = DEF_FREQUENCY_UP_THRESHOLD;
+ dbs_data->sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR;
+ dbs_data->ignore_nice_load = 0;
dbs_data->tuners = tuners;
dbs_data->min_sampling_rate = MIN_SAMPLING_RATE_RATIO *
@@ -361,35 +299,66 @@ static void cs_exit(struct dbs_data *dbs_data, bool notify)
kfree(dbs_data->tuners);
}
-define_get_cpu_dbs_routines(cs_cpu_dbs_info);
+static void cs_start(struct cpufreq_policy *policy)
+{
+ struct cs_policy_dbs_info *dbs_info = to_dbs_info(policy->governor_data);
+
+ dbs_info->down_skip = 0;
+ dbs_info->requested_freq = policy->cur;
+}
-static struct common_dbs_data cs_dbs_cdata = {
- .governor = GOV_CONSERVATIVE,
- .attr_group_gov_sys = &cs_attr_group_gov_sys,
- .attr_group_gov_pol = &cs_attr_group_gov_pol,
- .get_cpu_cdbs = get_cpu_cdbs,
- .get_cpu_dbs_info_s = get_cpu_dbs_info_s,
+static struct dbs_governor cs_dbs_gov = {
+ .gov = {
+ .name = "conservative",
+ .governor = cpufreq_governor_dbs,
+ .max_transition_latency = TRANSITION_LATENCY_LIMIT,
+ .owner = THIS_MODULE,
+ },
+ .kobj_type = { .default_attrs = cs_attributes },
.gov_dbs_timer = cs_dbs_timer,
- .gov_check_cpu = cs_check_cpu,
+ .alloc = cs_alloc,
+ .free = cs_free,
.init = cs_init,
.exit = cs_exit,
- .mutex = __MUTEX_INITIALIZER(cs_dbs_cdata.mutex),
+ .start = cs_start,
};
-static int cs_cpufreq_governor_dbs(struct cpufreq_policy *policy,
- unsigned int event)
+#define CPU_FREQ_GOV_CONSERVATIVE (&cs_dbs_gov.gov)
+
+static int dbs_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
+ void *data)
{
- return cpufreq_governor_dbs(policy, &cs_dbs_cdata, event);
+ struct cpufreq_freqs *freq = data;
+ struct cpufreq_policy *policy = cpufreq_cpu_get_raw(freq->cpu);
+ struct cs_policy_dbs_info *dbs_info;
+
+ if (!policy)
+ return 0;
+
+ /* policy isn't governed by conservative governor */
+ if (policy->governor != CPU_FREQ_GOV_CONSERVATIVE)
+ return 0;
+
+ dbs_info = to_dbs_info(policy->governor_data);
+ /*
+ * we only care if our internally tracked freq moves outside the 'valid'
+ * ranges of frequency available to us otherwise we do not change it
+ */
+ if (dbs_info->requested_freq > policy->max
+ || dbs_info->requested_freq < policy->min)
+ dbs_info->requested_freq = freq->new;
+
+ return 0;
}
static int __init cpufreq_gov_dbs_init(void)
{
- return cpufreq_register_governor(&cpufreq_gov_conservative);
+ return cpufreq_register_governor(CPU_FREQ_GOV_CONSERVATIVE);
}
static void __exit cpufreq_gov_dbs_exit(void)
{
- cpufreq_unregister_governor(&cpufreq_gov_conservative);
+ cpufreq_unregister_governor(CPU_FREQ_GOV_CONSERVATIVE);
}
MODULE_AUTHOR("Alexander Clouter <alex@digriz.org.uk>");
@@ -399,6 +368,11 @@ MODULE_DESCRIPTION("'cpufreq_conservative' - A dynamic cpufreq governor for "
MODULE_LICENSE("GPL");
#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE
+struct cpufreq_governor *cpufreq_default_governor(void)
+{
+ return CPU_FREQ_GOV_CONSERVATIVE;
+}
+
fs_initcall(cpufreq_gov_dbs_init);
#else
module_init(cpufreq_gov_dbs_init);
diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
index e0d111024d48..5f1147fa9239 100644
--- a/drivers/cpufreq/cpufreq_governor.c
+++ b/drivers/cpufreq/cpufreq_governor.c
@@ -18,95 +18,189 @@
#include <linux/export.h>
#include <linux/kernel_stat.h>
+#include <linux/sched.h>
#include <linux/slab.h>
#include "cpufreq_governor.h"
-static struct attribute_group *get_sysfs_attr(struct dbs_data *dbs_data)
-{
- if (have_governor_per_policy())
- return dbs_data->cdata->attr_group_gov_pol;
- else
- return dbs_data->cdata->attr_group_gov_sys;
-}
+static DEFINE_PER_CPU(struct cpu_dbs_info, cpu_dbs);
+
+static DEFINE_MUTEX(gov_dbs_data_mutex);
-void dbs_check_cpu(struct dbs_data *dbs_data, int cpu)
+/* Common sysfs tunables */
+/**
+ * store_sampling_rate - update sampling rate effective immediately if needed.
+ *
+ * If new rate is smaller than the old, simply updating
+ * dbs.sampling_rate might not be appropriate. For example, if the
+ * original sampling_rate was 1 second and the requested new sampling rate is 10
+ * ms because the user needs immediate reaction from ondemand governor, but not
+ * sure if higher frequency will be required or not, then, the governor may
+ * change the sampling rate too late; up to 1 second later. Thus, if we are
+ * reducing the sampling rate, we need to make the new value effective
+ * immediately.
+ *
+ * This must be called with dbs_data->mutex held, otherwise traversing
+ * policy_dbs_list isn't safe.
+ */
+ssize_t store_sampling_rate(struct dbs_data *dbs_data, const char *buf,
+ size_t count)
{
- struct cpu_dbs_info *cdbs = dbs_data->cdata->get_cpu_cdbs(cpu);
- struct od_dbs_tuners *od_tuners = dbs_data->tuners;
- struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
- struct cpufreq_policy *policy = cdbs->shared->policy;
- unsigned int sampling_rate;
- unsigned int max_load = 0;
- unsigned int ignore_nice;
- unsigned int j;
+ struct policy_dbs_info *policy_dbs;
+ unsigned int rate;
+ int ret;
+ ret = sscanf(buf, "%u", &rate);
+ if (ret != 1)
+ return -EINVAL;
- if (dbs_data->cdata->governor == GOV_ONDEMAND) {
- struct od_cpu_dbs_info_s *od_dbs_info =
- dbs_data->cdata->get_cpu_dbs_info_s(cpu);
+ dbs_data->sampling_rate = max(rate, dbs_data->min_sampling_rate);
+ /*
+ * We are operating under dbs_data->mutex and so the list and its
+ * entries can't be freed concurrently.
+ */
+ list_for_each_entry(policy_dbs, &dbs_data->policy_dbs_list, list) {
+ mutex_lock(&policy_dbs->timer_mutex);
/*
- * Sometimes, the ondemand governor uses an additional
- * multiplier to give long delays. So apply this multiplier to
- * the 'sampling_rate', so as to keep the wake-up-from-idle
- * detection logic a bit conservative.
+ * On 32-bit architectures this may race with the
+ * sample_delay_ns read in dbs_update_util_handler(), but that
+ * really doesn't matter. If the read returns a value that's
+ * too big, the sample will be skipped, but the next invocation
+ * of dbs_update_util_handler() (when the update has been
+ * completed) will take a sample.
+ *
+ * If this runs in parallel with dbs_work_handler(), we may end
+ * up overwriting the sample_delay_ns value that it has just
+ * written, but it will be corrected next time a sample is
+ * taken, so it shouldn't be significant.
*/
- sampling_rate = od_tuners->sampling_rate;
- sampling_rate *= od_dbs_info->rate_mult;
+ gov_update_sample_delay(policy_dbs, 0);
+ mutex_unlock(&policy_dbs->timer_mutex);
+ }
- ignore_nice = od_tuners->ignore_nice_load;
- } else {
- sampling_rate = cs_tuners->sampling_rate;
- ignore_nice = cs_tuners->ignore_nice_load;
+ return count;
+}
+EXPORT_SYMBOL_GPL(store_sampling_rate);
+
+/**
+ * gov_update_cpu_data - Update CPU load data.
+ * @dbs_data: Top-level governor data pointer.
+ *
+ * Update CPU load data for all CPUs in the domain governed by @dbs_data
+ * (that may be a single policy or a bunch of them if governor tunables are
+ * system-wide).
+ *
+ * Call under the @dbs_data mutex.
+ */
+void gov_update_cpu_data(struct dbs_data *dbs_data)
+{
+ struct policy_dbs_info *policy_dbs;
+
+ list_for_each_entry(policy_dbs, &dbs_data->policy_dbs_list, list) {
+ unsigned int j;
+
+ for_each_cpu(j, policy_dbs->policy->cpus) {
+ struct cpu_dbs_info *j_cdbs = &per_cpu(cpu_dbs, j);
+
+ j_cdbs->prev_cpu_idle = get_cpu_idle_time(j, &j_cdbs->prev_cpu_wall,
+ dbs_data->io_is_busy);
+ if (dbs_data->ignore_nice_load)
+ j_cdbs->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE];
+ }
}
+}
+EXPORT_SYMBOL_GPL(gov_update_cpu_data);
+
+static inline struct dbs_data *to_dbs_data(struct kobject *kobj)
+{
+ return container_of(kobj, struct dbs_data, kobj);
+}
+
+static inline struct governor_attr *to_gov_attr(struct attribute *attr)
+{
+ return container_of(attr, struct governor_attr, attr);
+}
+
+static ssize_t governor_show(struct kobject *kobj, struct attribute *attr,
+ char *buf)
+{
+ struct dbs_data *dbs_data = to_dbs_data(kobj);
+ struct governor_attr *gattr = to_gov_attr(attr);
+
+ return gattr->show(dbs_data, buf);
+}
+
+static ssize_t governor_store(struct kobject *kobj, struct attribute *attr,
+ const char *buf, size_t count)
+{
+ struct dbs_data *dbs_data = to_dbs_data(kobj);
+ struct governor_attr *gattr = to_gov_attr(attr);
+ int ret = -EBUSY;
+
+ mutex_lock(&dbs_data->mutex);
+
+ if (dbs_data->usage_count)
+ ret = gattr->store(dbs_data, buf, count);
+
+ mutex_unlock(&dbs_data->mutex);
+
+ return ret;
+}
+
+/*
+ * Sysfs Ops for accessing governor attributes.
+ *
+ * All show/store invocations for governor specific sysfs attributes, will first
+ * call the below show/store callbacks and the attribute specific callback will
+ * be called from within it.
+ */
+static const struct sysfs_ops governor_sysfs_ops = {
+ .show = governor_show,
+ .store = governor_store,
+};
+
+unsigned int dbs_update(struct cpufreq_policy *policy)
+{
+ struct policy_dbs_info *policy_dbs = policy->governor_data;
+ struct dbs_data *dbs_data = policy_dbs->dbs_data;
+ unsigned int ignore_nice = dbs_data->ignore_nice_load;
+ unsigned int max_load = 0;
+ unsigned int sampling_rate, io_busy, j;
+
+ /*
+ * Sometimes governors may use an additional multiplier to increase
+ * sample delays temporarily. Apply that multiplier to sampling_rate
+ * so as to keep the wake-up-from-idle detection logic a bit
+ * conservative.
+ */
+ sampling_rate = dbs_data->sampling_rate * policy_dbs->rate_mult;
+ /*
+ * For the purpose of ondemand, waiting for disk IO is an indication
+ * that you're performance critical, and not that the system is actually
+ * idle, so do not add the iowait time to the CPU idle time then.
+ */
+ io_busy = dbs_data->io_is_busy;
/* Get Absolute Load */
for_each_cpu(j, policy->cpus) {
- struct cpu_dbs_info *j_cdbs;
+ struct cpu_dbs_info *j_cdbs = &per_cpu(cpu_dbs, j);
u64 cur_wall_time, cur_idle_time;
unsigned int idle_time, wall_time;
unsigned int load;
- int io_busy = 0;
- j_cdbs = dbs_data->cdata->get_cpu_cdbs(j);
-
- /*
- * For the purpose of ondemand, waiting for disk IO is
- * an indication that you're performance critical, and
- * not that the system is actually idle. So do not add
- * the iowait time to the cpu idle time.
- */
- if (dbs_data->cdata->governor == GOV_ONDEMAND)
- io_busy = od_tuners->io_is_busy;
cur_idle_time = get_cpu_idle_time(j, &cur_wall_time, io_busy);
- wall_time = (unsigned int)
- (cur_wall_time - j_cdbs->prev_cpu_wall);
+ wall_time = cur_wall_time - j_cdbs->prev_cpu_wall;
j_cdbs->prev_cpu_wall = cur_wall_time;
- if (cur_idle_time < j_cdbs->prev_cpu_idle)
- cur_idle_time = j_cdbs->prev_cpu_idle;
-
- idle_time = (unsigned int)
- (cur_idle_time - j_cdbs->prev_cpu_idle);
+ idle_time = cur_idle_time - j_cdbs->prev_cpu_idle;
j_cdbs->prev_cpu_idle = cur_idle_time;
if (ignore_nice) {
- u64 cur_nice;
- unsigned long cur_nice_jiffies;
-
- cur_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE] -
- cdbs->prev_cpu_nice;
- /*
- * Assumption: nice time between sampling periods will
- * be less than 2^32 jiffies for 32 bit sys
- */
- cur_nice_jiffies = (unsigned long)
- cputime64_to_jiffies64(cur_nice);
+ u64 cur_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE];
- cdbs->prev_cpu_nice =
- kcpustat_cpu(j).cpustat[CPUTIME_NICE];
- idle_time += jiffies_to_usecs(cur_nice_jiffies);
+ idle_time += cputime_to_usecs(cur_nice - j_cdbs->prev_cpu_nice);
+ j_cdbs->prev_cpu_nice = cur_nice;
}
if (unlikely(!wall_time || wall_time < idle_time))
@@ -128,10 +222,10 @@ void dbs_check_cpu(struct dbs_data *dbs_data, int cpu)
* dropped down. So we perform the copy only once, upon the
* first wake-up from idle.)
*
- * Detecting this situation is easy: the governor's deferrable
- * timer would not have fired during CPU-idle periods. Hence
- * an unusually large 'wall_time' (as compared to the sampling
- * rate) indicates this scenario.
+ * Detecting this situation is easy: the governor's utilization
+ * update handler would not have run during CPU-idle periods.
+ * Hence, an unusually large 'wall_time' (as compared to the
+ * sampling rate) indicates this scenario.
*
* prev_load can be zero in two cases and we must recalculate it
* for both cases:
@@ -156,222 +250,224 @@ void dbs_check_cpu(struct dbs_data *dbs_data, int cpu)
if (load > max_load)
max_load = load;
}
-
- dbs_data->cdata->gov_check_cpu(cpu, max_load);
+ return max_load;
}
-EXPORT_SYMBOL_GPL(dbs_check_cpu);
+EXPORT_SYMBOL_GPL(dbs_update);
-void gov_add_timers(struct cpufreq_policy *policy, unsigned int delay)
+static void gov_set_update_util(struct policy_dbs_info *policy_dbs,
+ unsigned int delay_us)
{
- struct dbs_data *dbs_data = policy->governor_data;
- struct cpu_dbs_info *cdbs;
+ struct cpufreq_policy *policy = policy_dbs->policy;
int cpu;
+ gov_update_sample_delay(policy_dbs, delay_us);
+ policy_dbs->last_sample_time = 0;
+
for_each_cpu(cpu, policy->cpus) {
- cdbs = dbs_data->cdata->get_cpu_cdbs(cpu);
- cdbs->timer.expires = jiffies + delay;
- add_timer_on(&cdbs->timer, cpu);
+ struct cpu_dbs_info *cdbs = &per_cpu(cpu_dbs, cpu);
+
+ cpufreq_set_update_util_data(cpu, &cdbs->update_util);
}
}
-EXPORT_SYMBOL_GPL(gov_add_timers);
-static inline void gov_cancel_timers(struct cpufreq_policy *policy)
+static inline void gov_clear_update_util(struct cpufreq_policy *policy)
{
- struct dbs_data *dbs_data = policy->governor_data;
- struct cpu_dbs_info *cdbs;
int i;
- for_each_cpu(i, policy->cpus) {
- cdbs = dbs_data->cdata->get_cpu_cdbs(i);
- del_timer_sync(&cdbs->timer);
- }
-}
+ for_each_cpu(i, policy->cpus)
+ cpufreq_set_update_util_data(i, NULL);
-void gov_cancel_work(struct cpu_common_dbs_info *shared)
-{
- /* Tell dbs_timer_handler() to skip queuing up work items. */
- atomic_inc(&shared->skip_work);
- /*
- * If dbs_timer_handler() is already running, it may not notice the
- * incremented skip_work, so wait for it to complete to prevent its work
- * item from being queued up after the cancel_work_sync() below.
- */
- gov_cancel_timers(shared->policy);
- /*
- * In case dbs_timer_handler() managed to run and spawn a work item
- * before the timers have been canceled, wait for that work item to
- * complete and then cancel all of the timers set up by it. If
- * dbs_timer_handler() runs again at that point, it will see the
- * positive value of skip_work and won't spawn any more work items.
- */
- cancel_work_sync(&shared->work);
- gov_cancel_timers(shared->policy);
- atomic_set(&shared->skip_work, 0);
+ synchronize_sched();
}
-EXPORT_SYMBOL_GPL(gov_cancel_work);
-/* Will return if we need to evaluate cpu load again or not */
-static bool need_load_eval(struct cpu_common_dbs_info *shared,
- unsigned int sampling_rate)
+static void gov_cancel_work(struct cpufreq_policy *policy)
{
- if (policy_is_shared(shared->policy)) {
- ktime_t time_now = ktime_get();
- s64 delta_us = ktime_us_delta(time_now, shared->time_stamp);
-
- /* Do nothing if we recently have sampled */
- if (delta_us < (s64)(sampling_rate / 2))
- return false;
- else
- shared->time_stamp = time_now;
- }
+ struct policy_dbs_info *policy_dbs = policy->governor_data;
- return true;
+ gov_clear_update_util(policy_dbs->policy);
+ irq_work_sync(&policy_dbs->irq_work);
+ cancel_work_sync(&policy_dbs->work);
+ atomic_set(&policy_dbs->work_count, 0);
+ policy_dbs->work_in_progress = false;
}
static void dbs_work_handler(struct work_struct *work)
{
- struct cpu_common_dbs_info *shared = container_of(work, struct
- cpu_common_dbs_info, work);
+ struct policy_dbs_info *policy_dbs;
struct cpufreq_policy *policy;
- struct dbs_data *dbs_data;
- unsigned int sampling_rate, delay;
- bool eval_load;
-
- policy = shared->policy;
- dbs_data = policy->governor_data;
-
- /* Kill all timers */
- gov_cancel_timers(policy);
+ struct dbs_governor *gov;
- if (dbs_data->cdata->governor == GOV_CONSERVATIVE) {
- struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
+ policy_dbs = container_of(work, struct policy_dbs_info, work);
+ policy = policy_dbs->policy;
+ gov = dbs_governor_of(policy);
- sampling_rate = cs_tuners->sampling_rate;
- } else {
- struct od_dbs_tuners *od_tuners = dbs_data->tuners;
-
- sampling_rate = od_tuners->sampling_rate;
- }
-
- eval_load = need_load_eval(shared, sampling_rate);
+ /*
+ * Make sure cpufreq_governor_limits() isn't evaluating load or the
+ * ondemand governor isn't updating the sampling rate in parallel.
+ */
+ mutex_lock(&policy_dbs->timer_mutex);
+ gov_update_sample_delay(policy_dbs, gov->gov_dbs_timer(policy));
+ mutex_unlock(&policy_dbs->timer_mutex);
+ /* Allow the utilization update handler to queue up more work. */
+ atomic_set(&policy_dbs->work_count, 0);
/*
- * Make sure cpufreq_governor_limits() isn't evaluating load in
- * parallel.
+ * If the update below is reordered with respect to the sample delay
+ * modification, the utilization update handler may end up using a stale
+ * sample delay value.
*/
- mutex_lock(&shared->timer_mutex);
- delay = dbs_data->cdata->gov_dbs_timer(policy, eval_load);
- mutex_unlock(&shared->timer_mutex);
+ smp_wmb();
+ policy_dbs->work_in_progress = false;
+}
- atomic_dec(&shared->skip_work);
+static void dbs_irq_work(struct irq_work *irq_work)
+{
+ struct policy_dbs_info *policy_dbs;
- gov_add_timers(policy, delay);
+ policy_dbs = container_of(irq_work, struct policy_dbs_info, irq_work);
+ schedule_work_on(smp_processor_id(), &policy_dbs->work);
}
-static void dbs_timer_handler(unsigned long data)
+static void dbs_update_util_handler(struct update_util_data *data, u64 time,
+ unsigned long util, unsigned long max)
{
- struct cpu_dbs_info *cdbs = (struct cpu_dbs_info *)data;
- struct cpu_common_dbs_info *shared = cdbs->shared;
+ struct cpu_dbs_info *cdbs = container_of(data, struct cpu_dbs_info, update_util);
+ struct policy_dbs_info *policy_dbs = cdbs->policy_dbs;
+ u64 delta_ns, lst;
/*
- * Timer handler may not be allowed to queue the work at the moment,
- * because:
- * - Another timer handler has done that
- * - We are stopping the governor
- * - Or we are updating the sampling rate of the ondemand governor
+ * The work may not be allowed to be queued up right now.
+ * Possible reasons:
+ * - Work has already been queued up or is in progress.
+ * - It is too early (too little time from the previous sample).
*/
- if (atomic_inc_return(&shared->skip_work) > 1)
- atomic_dec(&shared->skip_work);
- else
- queue_work(system_wq, &shared->work);
-}
+ if (policy_dbs->work_in_progress)
+ return;
-static void set_sampling_rate(struct dbs_data *dbs_data,
- unsigned int sampling_rate)
-{
- if (dbs_data->cdata->governor == GOV_CONSERVATIVE) {
- struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
- cs_tuners->sampling_rate = sampling_rate;
- } else {
- struct od_dbs_tuners *od_tuners = dbs_data->tuners;
- od_tuners->sampling_rate = sampling_rate;
+ /*
+ * If the reads below are reordered before the check above, the value
+ * of sample_delay_ns used in the computation may be stale.
+ */
+ smp_rmb();
+ lst = READ_ONCE(policy_dbs->last_sample_time);
+ delta_ns = time - lst;
+ if ((s64)delta_ns < policy_dbs->sample_delay_ns)
+ return;
+
+ /*
+ * If the policy is not shared, the irq_work may be queued up right away
+ * at this point. Otherwise, we need to ensure that only one of the
+ * CPUs sharing the policy will do that.
+ */
+ if (policy_dbs->is_shared) {
+ if (!atomic_add_unless(&policy_dbs->work_count, 1, 1))
+ return;
+
+ /*
+ * If another CPU updated last_sample_time in the meantime, we
+ * shouldn't be here, so clear the work counter and bail out.
+ */
+ if (unlikely(lst != READ_ONCE(policy_dbs->last_sample_time))) {
+ atomic_set(&policy_dbs->work_count, 0);
+ return;
+ }
}
+
+ policy_dbs->last_sample_time = time;
+ policy_dbs->work_in_progress = true;
+ irq_work_queue(&policy_dbs->irq_work);
}
-static int alloc_common_dbs_info(struct cpufreq_policy *policy,
- struct common_dbs_data *cdata)
+static struct policy_dbs_info *alloc_policy_dbs_info(struct cpufreq_policy *policy,
+ struct dbs_governor *gov)
{
- struct cpu_common_dbs_info *shared;
+ struct policy_dbs_info *policy_dbs;
int j;
- /* Allocate memory for the common information for policy->cpus */
- shared = kzalloc(sizeof(*shared), GFP_KERNEL);
- if (!shared)
- return -ENOMEM;
+ /* Allocate memory for per-policy governor data. */
+ policy_dbs = gov->alloc();
+ if (!policy_dbs)
+ return NULL;
- /* Set shared for all CPUs, online+offline */
- for_each_cpu(j, policy->related_cpus)
- cdata->get_cpu_cdbs(j)->shared = shared;
+ policy_dbs->policy = policy;
+ mutex_init(&policy_dbs->timer_mutex);
+ atomic_set(&policy_dbs->work_count, 0);
+ init_irq_work(&policy_dbs->irq_work, dbs_irq_work);
+ INIT_WORK(&policy_dbs->work, dbs_work_handler);
- mutex_init(&shared->timer_mutex);
- atomic_set(&shared->skip_work, 0);
- INIT_WORK(&shared->work, dbs_work_handler);
- return 0;
+ /* Set policy_dbs for all CPUs, online+offline */
+ for_each_cpu(j, policy->related_cpus) {
+ struct cpu_dbs_info *j_cdbs = &per_cpu(cpu_dbs, j);
+
+ j_cdbs->policy_dbs = policy_dbs;
+ j_cdbs->update_util.func = dbs_update_util_handler;
+ }
+ return policy_dbs;
}
-static void free_common_dbs_info(struct cpufreq_policy *policy,
- struct common_dbs_data *cdata)
+static void free_policy_dbs_info(struct policy_dbs_info *policy_dbs,
+ struct dbs_governor *gov)
{
- struct cpu_dbs_info *cdbs = cdata->get_cpu_cdbs(policy->cpu);
- struct cpu_common_dbs_info *shared = cdbs->shared;
int j;
- mutex_destroy(&shared->timer_mutex);
+ mutex_destroy(&policy_dbs->timer_mutex);
- for_each_cpu(j, policy->cpus)
- cdata->get_cpu_cdbs(j)->shared = NULL;
+ for_each_cpu(j, policy_dbs->policy->related_cpus) {
+ struct cpu_dbs_info *j_cdbs = &per_cpu(cpu_dbs, j);
- kfree(shared);
+ j_cdbs->policy_dbs = NULL;
+ j_cdbs->update_util.func = NULL;
+ }
+ gov->free(policy_dbs);
}
-static int cpufreq_governor_init(struct cpufreq_policy *policy,
- struct dbs_data *dbs_data,
- struct common_dbs_data *cdata)
+static int cpufreq_governor_init(struct cpufreq_policy *policy)
{
+ struct dbs_governor *gov = dbs_governor_of(policy);
+ struct dbs_data *dbs_data;
+ struct policy_dbs_info *policy_dbs;
unsigned int latency;
- int ret;
+ int ret = 0;
/* State should be equivalent to EXIT */
if (policy->governor_data)
return -EBUSY;
- if (dbs_data) {
- if (WARN_ON(have_governor_per_policy()))
- return -EINVAL;
+ policy_dbs = alloc_policy_dbs_info(policy, gov);
+ if (!policy_dbs)
+ return -ENOMEM;
- ret = alloc_common_dbs_info(policy, cdata);
- if (ret)
- return ret;
+ /* Protect gov->gdbs_data against concurrent updates. */
+ mutex_lock(&gov_dbs_data_mutex);
+ dbs_data = gov->gdbs_data;
+ if (dbs_data) {
+ if (WARN_ON(have_governor_per_policy())) {
+ ret = -EINVAL;
+ goto free_policy_dbs_info;
+ }
+ policy_dbs->dbs_data = dbs_data;
+ policy->governor_data = policy_dbs;
+
+ mutex_lock(&dbs_data->mutex);
dbs_data->usage_count++;
- policy->governor_data = dbs_data;
- return 0;
+ list_add(&policy_dbs->list, &dbs_data->policy_dbs_list);
+ mutex_unlock(&dbs_data->mutex);
+ goto out;
}
dbs_data = kzalloc(sizeof(*dbs_data), GFP_KERNEL);
- if (!dbs_data)
- return -ENOMEM;
-
- ret = alloc_common_dbs_info(policy, cdata);
- if (ret)
- goto free_dbs_data;
+ if (!dbs_data) {
+ ret = -ENOMEM;
+ goto free_policy_dbs_info;
+ }
- dbs_data->cdata = cdata;
- dbs_data->usage_count = 1;
+ INIT_LIST_HEAD(&dbs_data->policy_dbs_list);
+ mutex_init(&dbs_data->mutex);
- ret = cdata->init(dbs_data, !policy->governor->initialized);
+ ret = gov->init(dbs_data, !policy->governor->initialized);
if (ret)
- goto free_common_dbs_info;
+ goto free_policy_dbs_info;
/* policy latency is in ns. Convert it to us first */
latency = policy->cpuinfo.transition_latency / 1000;
@@ -381,216 +477,156 @@ static int cpufreq_governor_init(struct cpufreq_policy *policy,
/* Bring kernel and HW constraints together */
dbs_data->min_sampling_rate = max(dbs_data->min_sampling_rate,
MIN_LATENCY_MULTIPLIER * latency);
- set_sampling_rate(dbs_data, max(dbs_data->min_sampling_rate,
- latency * LATENCY_MULTIPLIER));
+ dbs_data->sampling_rate = max(dbs_data->min_sampling_rate,
+ LATENCY_MULTIPLIER * latency);
if (!have_governor_per_policy())
- cdata->gdbs_data = dbs_data;
+ gov->gdbs_data = dbs_data;
- policy->governor_data = dbs_data;
+ policy->governor_data = policy_dbs;
- ret = sysfs_create_group(get_governor_parent_kobj(policy),
- get_sysfs_attr(dbs_data));
- if (ret)
- goto reset_gdbs_data;
+ policy_dbs->dbs_data = dbs_data;
+ dbs_data->usage_count = 1;
+ list_add(&policy_dbs->list, &dbs_data->policy_dbs_list);
- return 0;
+ gov->kobj_type.sysfs_ops = &governor_sysfs_ops;
+ ret = kobject_init_and_add(&dbs_data->kobj, &gov->kobj_type,
+ get_governor_parent_kobj(policy),
+ "%s", gov->gov.name);
+ if (!ret)
+ goto out;
+
+ /* Failure, so roll back. */
+ pr_err("cpufreq: Governor initialization failed (dbs_data kobject init error %d)\n", ret);
-reset_gdbs_data:
policy->governor_data = NULL;
if (!have_governor_per_policy())
- cdata->gdbs_data = NULL;
- cdata->exit(dbs_data, !policy->governor->initialized);
-free_common_dbs_info:
- free_common_dbs_info(policy, cdata);
-free_dbs_data:
+ gov->gdbs_data = NULL;
+ gov->exit(dbs_data, !policy->governor->initialized);
kfree(dbs_data);
+
+free_policy_dbs_info:
+ free_policy_dbs_info(policy_dbs, gov);
+
+out:
+ mutex_unlock(&gov_dbs_data_mutex);
return ret;
}
-static int cpufreq_governor_exit(struct cpufreq_policy *policy,
- struct dbs_data *dbs_data)
+static int cpufreq_governor_exit(struct cpufreq_policy *policy)
{
- struct common_dbs_data *cdata = dbs_data->cdata;
- struct cpu_dbs_info *cdbs = cdata->get_cpu_cdbs(policy->cpu);
+ struct dbs_governor *gov = dbs_governor_of(policy);
+ struct policy_dbs_info *policy_dbs = policy->governor_data;
+ struct dbs_data *dbs_data = policy_dbs->dbs_data;
+ int count;
- /* State should be equivalent to INIT */
- if (!cdbs->shared || cdbs->shared->policy)
- return -EBUSY;
+ /* Protect gov->gdbs_data against concurrent updates. */
+ mutex_lock(&gov_dbs_data_mutex);
+
+ mutex_lock(&dbs_data->mutex);
+ list_del(&policy_dbs->list);
+ count = --dbs_data->usage_count;
+ mutex_unlock(&dbs_data->mutex);
- if (!--dbs_data->usage_count) {
- sysfs_remove_group(get_governor_parent_kobj(policy),
- get_sysfs_attr(dbs_data));
+ if (!count) {
+ kobject_put(&dbs_data->kobj);
policy->governor_data = NULL;
if (!have_governor_per_policy())
- cdata->gdbs_data = NULL;
+ gov->gdbs_data = NULL;
- cdata->exit(dbs_data, policy->governor->initialized == 1);
+ gov->exit(dbs_data, policy->governor->initialized == 1);
+ mutex_destroy(&dbs_data->mutex);
kfree(dbs_data);
} else {
policy->governor_data = NULL;
}
- free_common_dbs_info(policy, cdata);
+ free_policy_dbs_info(policy_dbs, gov);
+
+ mutex_unlock(&gov_dbs_data_mutex);
return 0;
}
-static int cpufreq_governor_start(struct cpufreq_policy *policy,
- struct dbs_data *dbs_data)
+static int cpufreq_governor_start(struct cpufreq_policy *policy)
{
- struct common_dbs_data *cdata = dbs_data->cdata;
- unsigned int sampling_rate, ignore_nice, j, cpu = policy->cpu;
- struct cpu_dbs_info *cdbs = cdata->get_cpu_cdbs(cpu);
- struct cpu_common_dbs_info *shared = cdbs->shared;
- int io_busy = 0;
+ struct dbs_governor *gov = dbs_governor_of(policy);
+ struct policy_dbs_info *policy_dbs = policy->governor_data;
+ struct dbs_data *dbs_data = policy_dbs->dbs_data;
+ unsigned int sampling_rate, ignore_nice, j;
+ unsigned int io_busy;
if (!policy->cur)
return -EINVAL;
- /* State should be equivalent to INIT */
- if (!shared || shared->policy)
- return -EBUSY;
+ policy_dbs->is_shared = policy_is_shared(policy);
+ policy_dbs->rate_mult = 1;
- if (cdata->governor == GOV_CONSERVATIVE) {
- struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
-
- sampling_rate = cs_tuners->sampling_rate;
- ignore_nice = cs_tuners->ignore_nice_load;
- } else {
- struct od_dbs_tuners *od_tuners = dbs_data->tuners;
-
- sampling_rate = od_tuners->sampling_rate;
- ignore_nice = od_tuners->ignore_nice_load;
- io_busy = od_tuners->io_is_busy;
- }
-
- shared->policy = policy;
- shared->time_stamp = ktime_get();
+ sampling_rate = dbs_data->sampling_rate;
+ ignore_nice = dbs_data->ignore_nice_load;
+ io_busy = dbs_data->io_is_busy;
for_each_cpu(j, policy->cpus) {
- struct cpu_dbs_info *j_cdbs = cdata->get_cpu_cdbs(j);
+ struct cpu_dbs_info *j_cdbs = &per_cpu(cpu_dbs, j);
unsigned int prev_load;
- j_cdbs->prev_cpu_idle =
- get_cpu_idle_time(j, &j_cdbs->prev_cpu_wall, io_busy);
+ j_cdbs->prev_cpu_idle = get_cpu_idle_time(j, &j_cdbs->prev_cpu_wall, io_busy);
- prev_load = (unsigned int)(j_cdbs->prev_cpu_wall -
- j_cdbs->prev_cpu_idle);
- j_cdbs->prev_load = 100 * prev_load /
- (unsigned int)j_cdbs->prev_cpu_wall;
+ prev_load = j_cdbs->prev_cpu_wall - j_cdbs->prev_cpu_idle;
+ j_cdbs->prev_load = 100 * prev_load / (unsigned int)j_cdbs->prev_cpu_wall;
if (ignore_nice)
j_cdbs->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE];
-
- __setup_timer(&j_cdbs->timer, dbs_timer_handler,
- (unsigned long)j_cdbs,
- TIMER_DEFERRABLE | TIMER_IRQSAFE);
}
- if (cdata->governor == GOV_CONSERVATIVE) {
- struct cs_cpu_dbs_info_s *cs_dbs_info =
- cdata->get_cpu_dbs_info_s(cpu);
-
- cs_dbs_info->down_skip = 0;
- cs_dbs_info->requested_freq = policy->cur;
- } else {
- struct od_ops *od_ops = cdata->gov_ops;
- struct od_cpu_dbs_info_s *od_dbs_info = cdata->get_cpu_dbs_info_s(cpu);
-
- od_dbs_info->rate_mult = 1;
- od_dbs_info->sample_type = OD_NORMAL_SAMPLE;
- od_ops->powersave_bias_init_cpu(cpu);
- }
+ gov->start(policy);
- gov_add_timers(policy, delay_for_sampling_rate(sampling_rate));
+ gov_set_update_util(policy_dbs, sampling_rate);
return 0;
}
-static int cpufreq_governor_stop(struct cpufreq_policy *policy,
- struct dbs_data *dbs_data)
+static int cpufreq_governor_stop(struct cpufreq_policy *policy)
{
- struct cpu_dbs_info *cdbs = dbs_data->cdata->get_cpu_cdbs(policy->cpu);
- struct cpu_common_dbs_info *shared = cdbs->shared;
-
- /* State should be equivalent to START */
- if (!shared || !shared->policy)
- return -EBUSY;
-
- gov_cancel_work(shared);
- shared->policy = NULL;
-
+ gov_cancel_work(policy);
return 0;
}
-static int cpufreq_governor_limits(struct cpufreq_policy *policy,
- struct dbs_data *dbs_data)
+static int cpufreq_governor_limits(struct cpufreq_policy *policy)
{
- struct common_dbs_data *cdata = dbs_data->cdata;
- unsigned int cpu = policy->cpu;
- struct cpu_dbs_info *cdbs = cdata->get_cpu_cdbs(cpu);
+ struct policy_dbs_info *policy_dbs = policy->governor_data;
- /* State should be equivalent to START */
- if (!cdbs->shared || !cdbs->shared->policy)
- return -EBUSY;
+ mutex_lock(&policy_dbs->timer_mutex);
+
+ if (policy->max < policy->cur)
+ __cpufreq_driver_target(policy, policy->max, CPUFREQ_RELATION_H);
+ else if (policy->min > policy->cur)
+ __cpufreq_driver_target(policy, policy->min, CPUFREQ_RELATION_L);
+
+ gov_update_sample_delay(policy_dbs, 0);
- mutex_lock(&cdbs->shared->timer_mutex);
- if (policy->max < cdbs->shared->policy->cur)
- __cpufreq_driver_target(cdbs->shared->policy, policy->max,
- CPUFREQ_RELATION_H);
- else if (policy->min > cdbs->shared->policy->cur)
- __cpufreq_driver_target(cdbs->shared->policy, policy->min,
- CPUFREQ_RELATION_L);
- dbs_check_cpu(dbs_data, cpu);
- mutex_unlock(&cdbs->shared->timer_mutex);
+ mutex_unlock(&policy_dbs->timer_mutex);
return 0;
}
-int cpufreq_governor_dbs(struct cpufreq_policy *policy,
- struct common_dbs_data *cdata, unsigned int event)
+int cpufreq_governor_dbs(struct cpufreq_policy *policy, unsigned int event)
{
- struct dbs_data *dbs_data;
- int ret;
-
- /* Lock governor to block concurrent initialization of governor */
- mutex_lock(&cdata->mutex);
-
- if (have_governor_per_policy())
- dbs_data = policy->governor_data;
- else
- dbs_data = cdata->gdbs_data;
-
- if (!dbs_data && (event != CPUFREQ_GOV_POLICY_INIT)) {
- ret = -EINVAL;
- goto unlock;
- }
-
- switch (event) {
- case CPUFREQ_GOV_POLICY_INIT:
- ret = cpufreq_governor_init(policy, dbs_data, cdata);
- break;
- case CPUFREQ_GOV_POLICY_EXIT:
- ret = cpufreq_governor_exit(policy, dbs_data);
- break;
- case CPUFREQ_GOV_START:
- ret = cpufreq_governor_start(policy, dbs_data);
- break;
- case CPUFREQ_GOV_STOP:
- ret = cpufreq_governor_stop(policy, dbs_data);
- break;
- case CPUFREQ_GOV_LIMITS:
- ret = cpufreq_governor_limits(policy, dbs_data);
- break;
- default:
- ret = -EINVAL;
+ if (event == CPUFREQ_GOV_POLICY_INIT) {
+ return cpufreq_governor_init(policy);
+ } else if (policy->governor_data) {
+ switch (event) {
+ case CPUFREQ_GOV_POLICY_EXIT:
+ return cpufreq_governor_exit(policy);
+ case CPUFREQ_GOV_START:
+ return cpufreq_governor_start(policy);
+ case CPUFREQ_GOV_STOP:
+ return cpufreq_governor_stop(policy);
+ case CPUFREQ_GOV_LIMITS:
+ return cpufreq_governor_limits(policy);
+ }
}
-
-unlock:
- mutex_unlock(&cdata->mutex);
-
- return ret;
+ return -EINVAL;
}
EXPORT_SYMBOL_GPL(cpufreq_governor_dbs);
diff --git a/drivers/cpufreq/cpufreq_governor.h b/drivers/cpufreq/cpufreq_governor.h
index 91e767a058a7..61ff82fe0613 100644
--- a/drivers/cpufreq/cpufreq_governor.h
+++ b/drivers/cpufreq/cpufreq_governor.h
@@ -18,6 +18,7 @@
#define _CPUFREQ_GOVERNOR_H
#include <linux/atomic.h>
+#include <linux/irq_work.h>
#include <linux/cpufreq.h>
#include <linux/kernel_stat.h>
#include <linux/module.h>
@@ -41,96 +42,68 @@
enum {OD_NORMAL_SAMPLE, OD_SUB_SAMPLE};
/*
- * Macro for creating governors sysfs routines
- *
- * - gov_sys: One governor instance per whole system
- * - gov_pol: One governor instance per policy
+ * Abbreviations:
+ * dbs: used as a shortform for demand based switching It helps to keep variable
+ * names smaller, simpler
+ * cdbs: common dbs
+ * od_*: On-demand governor
+ * cs_*: Conservative governor
*/
-/* Create attributes */
-#define gov_sys_attr_ro(_name) \
-static struct global_attr _name##_gov_sys = \
-__ATTR(_name, 0444, show_##_name##_gov_sys, NULL)
-
-#define gov_sys_attr_rw(_name) \
-static struct global_attr _name##_gov_sys = \
-__ATTR(_name, 0644, show_##_name##_gov_sys, store_##_name##_gov_sys)
-
-#define gov_pol_attr_ro(_name) \
-static struct freq_attr _name##_gov_pol = \
-__ATTR(_name, 0444, show_##_name##_gov_pol, NULL)
-
-#define gov_pol_attr_rw(_name) \
-static struct freq_attr _name##_gov_pol = \
-__ATTR(_name, 0644, show_##_name##_gov_pol, store_##_name##_gov_pol)
+/* Governor demand based switching data (per-policy or global). */
+struct dbs_data {
+ int usage_count;
+ void *tuners;
+ unsigned int min_sampling_rate;
+ unsigned int ignore_nice_load;
+ unsigned int sampling_rate;
+ unsigned int sampling_down_factor;
+ unsigned int up_threshold;
+ unsigned int io_is_busy;
-#define gov_sys_pol_attr_rw(_name) \
- gov_sys_attr_rw(_name); \
- gov_pol_attr_rw(_name)
+ struct kobject kobj;
+ struct list_head policy_dbs_list;
+ /*
+ * Protect concurrent updates to governor tunables from sysfs,
+ * policy_dbs_list and usage_count.
+ */
+ struct mutex mutex;
+};
-#define gov_sys_pol_attr_ro(_name) \
- gov_sys_attr_ro(_name); \
- gov_pol_attr_ro(_name)
+/* Governor's specific attributes */
+struct dbs_data;
+struct governor_attr {
+ struct attribute attr;
+ ssize_t (*show)(struct dbs_data *dbs_data, char *buf);
+ ssize_t (*store)(struct dbs_data *dbs_data, const char *buf,
+ size_t count);
+};
-/* Create show/store routines */
-#define show_one(_gov, file_name) \
-static ssize_t show_##file_name##_gov_sys \
-(struct kobject *kobj, struct attribute *attr, char *buf) \
+#define gov_show_one(_gov, file_name) \
+static ssize_t show_##file_name \
+(struct dbs_data *dbs_data, char *buf) \
{ \
- struct _gov##_dbs_tuners *tuners = _gov##_dbs_cdata.gdbs_data->tuners; \
- return sprintf(buf, "%u\n", tuners->file_name); \
-} \
- \
-static ssize_t show_##file_name##_gov_pol \
-(struct cpufreq_policy *policy, char *buf) \
-{ \
- struct dbs_data *dbs_data = policy->governor_data; \
struct _gov##_dbs_tuners *tuners = dbs_data->tuners; \
return sprintf(buf, "%u\n", tuners->file_name); \
}
-#define store_one(_gov, file_name) \
-static ssize_t store_##file_name##_gov_sys \
-(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) \
-{ \
- struct dbs_data *dbs_data = _gov##_dbs_cdata.gdbs_data; \
- return store_##file_name(dbs_data, buf, count); \
-} \
- \
-static ssize_t store_##file_name##_gov_pol \
-(struct cpufreq_policy *policy, const char *buf, size_t count) \
+#define gov_show_one_common(file_name) \
+static ssize_t show_##file_name \
+(struct dbs_data *dbs_data, char *buf) \
{ \
- struct dbs_data *dbs_data = policy->governor_data; \
- return store_##file_name(dbs_data, buf, count); \
+ return sprintf(buf, "%u\n", dbs_data->file_name); \
}
-#define show_store_one(_gov, file_name) \
-show_one(_gov, file_name); \
-store_one(_gov, file_name)
+#define gov_attr_ro(_name) \
+static struct governor_attr _name = \
+__ATTR(_name, 0444, show_##_name, NULL)
-/* create helper routines */
-#define define_get_cpu_dbs_routines(_dbs_info) \
-static struct cpu_dbs_info *get_cpu_cdbs(int cpu) \
-{ \
- return &per_cpu(_dbs_info, cpu).cdbs; \
-} \
- \
-static void *get_cpu_dbs_info_s(int cpu) \
-{ \
- return &per_cpu(_dbs_info, cpu); \
-}
-
-/*
- * Abbreviations:
- * dbs: used as a shortform for demand based switching It helps to keep variable
- * names smaller, simpler
- * cdbs: common dbs
- * od_*: On-demand governor
- * cs_*: Conservative governor
- */
+#define gov_attr_rw(_name) \
+static struct governor_attr _name = \
+__ATTR(_name, 0644, show_##_name, store_##_name)
/* Common to all CPUs of a policy */
-struct cpu_common_dbs_info {
+struct policy_dbs_info {
struct cpufreq_policy *policy;
/*
* Per policy mutex that serializes load evaluation from limit-change
@@ -138,11 +111,27 @@ struct cpu_common_dbs_info {
*/
struct mutex timer_mutex;
- ktime_t time_stamp;
- atomic_t skip_work;
+ u64 last_sample_time;
+ s64 sample_delay_ns;
+ atomic_t work_count;
+ struct irq_work irq_work;
struct work_struct work;
+ /* dbs_data may be shared between multiple policy objects */
+ struct dbs_data *dbs_data;
+ struct list_head list;
+ /* Multiplier for increasing sample delay temporarily. */
+ unsigned int rate_mult;
+ /* Status indicators */
+ bool is_shared; /* This object is used by multiple CPUs */
+ bool work_in_progress; /* Work is being queued up or in progress */
};
+static inline void gov_update_sample_delay(struct policy_dbs_info *policy_dbs,
+ unsigned int delay_us)
+{
+ policy_dbs->sample_delay_ns = delay_us * NSEC_PER_USEC;
+}
+
/* Per cpu structures */
struct cpu_dbs_info {
u64 prev_cpu_idle;
@@ -155,54 +144,14 @@ struct cpu_dbs_info {
* wake-up from idle.
*/
unsigned int prev_load;
- struct timer_list timer;
- struct cpu_common_dbs_info *shared;
-};
-
-struct od_cpu_dbs_info_s {
- struct cpu_dbs_info cdbs;
- struct cpufreq_frequency_table *freq_table;
- unsigned int freq_lo;
- unsigned int freq_lo_jiffies;
- unsigned int freq_hi_jiffies;
- unsigned int rate_mult;
- unsigned int sample_type:1;
-};
-
-struct cs_cpu_dbs_info_s {
- struct cpu_dbs_info cdbs;
- unsigned int down_skip;
- unsigned int requested_freq;
-};
-
-/* Per policy Governors sysfs tunables */
-struct od_dbs_tuners {
- unsigned int ignore_nice_load;
- unsigned int sampling_rate;
- unsigned int sampling_down_factor;
- unsigned int up_threshold;
- unsigned int powersave_bias;
- unsigned int io_is_busy;
-};
-
-struct cs_dbs_tuners {
- unsigned int ignore_nice_load;
- unsigned int sampling_rate;
- unsigned int sampling_down_factor;
- unsigned int up_threshold;
- unsigned int down_threshold;
- unsigned int freq_step;
+ struct update_util_data update_util;
+ struct policy_dbs_info *policy_dbs;
};
/* Common Governor data across policies */
-struct dbs_data;
-struct common_dbs_data {
- /* Common across governors */
- #define GOV_ONDEMAND 0
- #define GOV_CONSERVATIVE 1
- int governor;
- struct attribute_group *attr_group_gov_sys; /* one governor - system */
- struct attribute_group *attr_group_gov_pol; /* one governor - policy */
+struct dbs_governor {
+ struct cpufreq_governor gov;
+ struct kobj_type kobj_type;
/*
* Common data for platforms that don't set
@@ -210,74 +159,32 @@ struct common_dbs_data {
*/
struct dbs_data *gdbs_data;
- struct cpu_dbs_info *(*get_cpu_cdbs)(int cpu);
- void *(*get_cpu_dbs_info_s)(int cpu);
- unsigned int (*gov_dbs_timer)(struct cpufreq_policy *policy,
- bool modify_all);
- void (*gov_check_cpu)(int cpu, unsigned int load);
+ unsigned int (*gov_dbs_timer)(struct cpufreq_policy *policy);
+ struct policy_dbs_info *(*alloc)(void);
+ void (*free)(struct policy_dbs_info *policy_dbs);
int (*init)(struct dbs_data *dbs_data, bool notify);
void (*exit)(struct dbs_data *dbs_data, bool notify);
-
- /* Governor specific ops, see below */
- void *gov_ops;
-
- /*
- * Protects governor's data (struct dbs_data and struct common_dbs_data)
- */
- struct mutex mutex;
+ void (*start)(struct cpufreq_policy *policy);
};
-/* Governor Per policy data */
-struct dbs_data {
- struct common_dbs_data *cdata;
- unsigned int min_sampling_rate;
- int usage_count;
- void *tuners;
-};
+static inline struct dbs_governor *dbs_governor_of(struct cpufreq_policy *policy)
+{
+ return container_of(policy->governor, struct dbs_governor, gov);
+}
-/* Governor specific ops, will be passed to dbs_data->gov_ops */
+/* Governor specific operations */
struct od_ops {
- void (*powersave_bias_init_cpu)(int cpu);
unsigned int (*powersave_bias_target)(struct cpufreq_policy *policy,
unsigned int freq_next, unsigned int relation);
- void (*freq_increase)(struct cpufreq_policy *policy, unsigned int freq);
};
-static inline int delay_for_sampling_rate(unsigned int sampling_rate)
-{
- int delay = usecs_to_jiffies(sampling_rate);
-
- /* We want all CPUs to do sampling nearly on same jiffy */
- if (num_online_cpus() > 1)
- delay -= jiffies % delay;
-
- return delay;
-}
-
-#define declare_show_sampling_rate_min(_gov) \
-static ssize_t show_sampling_rate_min_gov_sys \
-(struct kobject *kobj, struct attribute *attr, char *buf) \
-{ \
- struct dbs_data *dbs_data = _gov##_dbs_cdata.gdbs_data; \
- return sprintf(buf, "%u\n", dbs_data->min_sampling_rate); \
-} \
- \
-static ssize_t show_sampling_rate_min_gov_pol \
-(struct cpufreq_policy *policy, char *buf) \
-{ \
- struct dbs_data *dbs_data = policy->governor_data; \
- return sprintf(buf, "%u\n", dbs_data->min_sampling_rate); \
-}
-
-extern struct mutex cpufreq_governor_lock;
-
-void gov_add_timers(struct cpufreq_policy *policy, unsigned int delay);
-void gov_cancel_work(struct cpu_common_dbs_info *shared);
-void dbs_check_cpu(struct dbs_data *dbs_data, int cpu);
-int cpufreq_governor_dbs(struct cpufreq_policy *policy,
- struct common_dbs_data *cdata, unsigned int event);
+unsigned int dbs_update(struct cpufreq_policy *policy);
+int cpufreq_governor_dbs(struct cpufreq_policy *policy, unsigned int event);
void od_register_powersave_bias_handler(unsigned int (*f)
(struct cpufreq_policy *, unsigned int, unsigned int),
unsigned int powersave_bias);
void od_unregister_powersave_bias_handler(void);
+ssize_t store_sampling_rate(struct dbs_data *dbs_data, const char *buf,
+ size_t count);
+void gov_update_cpu_data(struct dbs_data *dbs_data);
#endif /* _CPUFREQ_GOVERNOR_H */
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index eae51070c034..acd80272ded6 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -16,7 +16,8 @@
#include <linux/percpu-defs.h>
#include <linux/slab.h>
#include <linux/tick.h>
-#include "cpufreq_governor.h"
+
+#include "cpufreq_ondemand.h"
/* On-demand governor macros */
#define DEF_FREQUENCY_UP_THRESHOLD (80)
@@ -27,24 +28,10 @@
#define MIN_FREQUENCY_UP_THRESHOLD (11)
#define MAX_FREQUENCY_UP_THRESHOLD (100)
-static DEFINE_PER_CPU(struct od_cpu_dbs_info_s, od_cpu_dbs_info);
-
static struct od_ops od_ops;
-#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND
-static struct cpufreq_governor cpufreq_gov_ondemand;
-#endif
-
static unsigned int default_powersave_bias;
-static void ondemand_powersave_bias_init_cpu(int cpu)
-{
- struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, cpu);
-
- dbs_info->freq_table = cpufreq_frequency_get_table(cpu);
- dbs_info->freq_lo = 0;
-}
-
/*
* Not all CPUs want IO time to be accounted as busy; this depends on how
* efficient idling at a higher frequency/voltage is.
@@ -70,8 +57,8 @@ static int should_io_be_busy(void)
/*
* Find right freq to be set now with powersave_bias on.
- * Returns the freq_hi to be used right now and will set freq_hi_jiffies,
- * freq_lo, and freq_lo_jiffies in percpu area for averaging freqs.
+ * Returns the freq_hi to be used right now and will set freq_hi_delay_us,
+ * freq_lo, and freq_lo_delay_us in percpu area for averaging freqs.
*/
static unsigned int generic_powersave_bias_target(struct cpufreq_policy *policy,
unsigned int freq_next, unsigned int relation)
@@ -79,15 +66,15 @@ static unsigned int generic_powersave_bias_target(struct cpufreq_policy *policy,
unsigned int freq_req, freq_reduc, freq_avg;
unsigned int freq_hi, freq_lo;
unsigned int index = 0;
- unsigned int jiffies_total, jiffies_hi, jiffies_lo;
- struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info,
- policy->cpu);
- struct dbs_data *dbs_data = policy->governor_data;
+ unsigned int delay_hi_us;
+ struct policy_dbs_info *policy_dbs = policy->governor_data;
+ struct od_policy_dbs_info *dbs_info = to_dbs_info(policy_dbs);
+ struct dbs_data *dbs_data = policy_dbs->dbs_data;
struct od_dbs_tuners *od_tuners = dbs_data->tuners;
if (!dbs_info->freq_table) {
dbs_info->freq_lo = 0;
- dbs_info->freq_lo_jiffies = 0;
+ dbs_info->freq_lo_delay_us = 0;
return freq_next;
}
@@ -110,31 +97,30 @@ static unsigned int generic_powersave_bias_target(struct cpufreq_policy *policy,
/* Find out how long we have to be in hi and lo freqs */
if (freq_hi == freq_lo) {
dbs_info->freq_lo = 0;
- dbs_info->freq_lo_jiffies = 0;
+ dbs_info->freq_lo_delay_us = 0;
return freq_lo;
}
- jiffies_total = usecs_to_jiffies(od_tuners->sampling_rate);
- jiffies_hi = (freq_avg - freq_lo) * jiffies_total;
- jiffies_hi += ((freq_hi - freq_lo) / 2);
- jiffies_hi /= (freq_hi - freq_lo);
- jiffies_lo = jiffies_total - jiffies_hi;
+ delay_hi_us = (freq_avg - freq_lo) * dbs_data->sampling_rate;
+ delay_hi_us += (freq_hi - freq_lo) / 2;
+ delay_hi_us /= freq_hi - freq_lo;
+ dbs_info->freq_hi_delay_us = delay_hi_us;
dbs_info->freq_lo = freq_lo;
- dbs_info->freq_lo_jiffies = jiffies_lo;
- dbs_info->freq_hi_jiffies = jiffies_hi;
+ dbs_info->freq_lo_delay_us = dbs_data->sampling_rate - delay_hi_us;
return freq_hi;
}
-static void ondemand_powersave_bias_init(void)
+static void ondemand_powersave_bias_init(struct cpufreq_policy *policy)
{
- int i;
- for_each_online_cpu(i) {
- ondemand_powersave_bias_init_cpu(i);
- }
+ struct od_policy_dbs_info *dbs_info = to_dbs_info(policy->governor_data);
+
+ dbs_info->freq_table = cpufreq_frequency_get_table(policy->cpu);
+ dbs_info->freq_lo = 0;
}
static void dbs_freq_increase(struct cpufreq_policy *policy, unsigned int freq)
{
- struct dbs_data *dbs_data = policy->governor_data;
+ struct policy_dbs_info *policy_dbs = policy->governor_data;
+ struct dbs_data *dbs_data = policy_dbs->dbs_data;
struct od_dbs_tuners *od_tuners = dbs_data->tuners;
if (od_tuners->powersave_bias)
@@ -152,21 +138,21 @@ static void dbs_freq_increase(struct cpufreq_policy *policy, unsigned int freq)
* (default), then we try to increase frequency. Else, we adjust the frequency
* proportional to load.
*/
-static void od_check_cpu(int cpu, unsigned int load)
+static void od_update(struct cpufreq_policy *policy)
{
- struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, cpu);
- struct cpufreq_policy *policy = dbs_info->cdbs.shared->policy;
- struct dbs_data *dbs_data = policy->governor_data;
+ struct policy_dbs_info *policy_dbs = policy->governor_data;
+ struct od_policy_dbs_info *dbs_info = to_dbs_info(policy_dbs);
+ struct dbs_data *dbs_data = policy_dbs->dbs_data;
struct od_dbs_tuners *od_tuners = dbs_data->tuners;
+ unsigned int load = dbs_update(policy);
dbs_info->freq_lo = 0;
/* Check for frequency increase */
- if (load > od_tuners->up_threshold) {
+ if (load > dbs_data->up_threshold) {
/* If switching to max speed, apply sampling_down_factor */
if (policy->cur < policy->max)
- dbs_info->rate_mult =
- od_tuners->sampling_down_factor;
+ policy_dbs->rate_mult = dbs_data->sampling_down_factor;
dbs_freq_increase(policy, policy->max);
} else {
/* Calculate the next frequency proportional to load */
@@ -177,177 +163,70 @@ static void od_check_cpu(int cpu, unsigned int load)
freq_next = min_f + load * (max_f - min_f) / 100;
/* No longer fully busy, reset rate_mult */
- dbs_info->rate_mult = 1;
+ policy_dbs->rate_mult = 1;
- if (!od_tuners->powersave_bias) {
- __cpufreq_driver_target(policy, freq_next,
- CPUFREQ_RELATION_C);
- return;
- }
+ if (od_tuners->powersave_bias)
+ freq_next = od_ops.powersave_bias_target(policy,
+ freq_next,
+ CPUFREQ_RELATION_L);
- freq_next = od_ops.powersave_bias_target(policy, freq_next,
- CPUFREQ_RELATION_L);
__cpufreq_driver_target(policy, freq_next, CPUFREQ_RELATION_C);
}
}
-static unsigned int od_dbs_timer(struct cpufreq_policy *policy, bool modify_all)
+static unsigned int od_dbs_timer(struct cpufreq_policy *policy)
{
- struct dbs_data *dbs_data = policy->governor_data;
- unsigned int cpu = policy->cpu;
- struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info,
- cpu);
- struct od_dbs_tuners *od_tuners = dbs_data->tuners;
- int delay = 0, sample_type = dbs_info->sample_type;
-
- if (!modify_all)
- goto max_delay;
+ struct policy_dbs_info *policy_dbs = policy->governor_data;
+ struct dbs_data *dbs_data = policy_dbs->dbs_data;
+ struct od_policy_dbs_info *dbs_info = to_dbs_info(policy_dbs);
+ int sample_type = dbs_info->sample_type;
/* Common NORMAL_SAMPLE setup */
dbs_info->sample_type = OD_NORMAL_SAMPLE;
- if (sample_type == OD_SUB_SAMPLE) {
- delay = dbs_info->freq_lo_jiffies;
+ /*
+ * OD_SUB_SAMPLE doesn't make sense if sample_delay_ns is 0, so ignore
+ * it then.
+ */
+ if (sample_type == OD_SUB_SAMPLE && policy_dbs->sample_delay_ns > 0) {
__cpufreq_driver_target(policy, dbs_info->freq_lo,
CPUFREQ_RELATION_H);
- } else {
- dbs_check_cpu(dbs_data, cpu);
- if (dbs_info->freq_lo) {
- /* Setup timer for SUB_SAMPLE */
- dbs_info->sample_type = OD_SUB_SAMPLE;
- delay = dbs_info->freq_hi_jiffies;
- }
+ return dbs_info->freq_lo_delay_us;
}
-max_delay:
- if (!delay)
- delay = delay_for_sampling_rate(od_tuners->sampling_rate
- * dbs_info->rate_mult);
-
- return delay;
-}
-
-/************************** sysfs interface ************************/
-static struct common_dbs_data od_dbs_cdata;
-
-/**
- * update_sampling_rate - update sampling rate effective immediately if needed.
- * @new_rate: new sampling rate
- *
- * If new rate is smaller than the old, simply updating
- * dbs_tuners_int.sampling_rate might not be appropriate. For example, if the
- * original sampling_rate was 1 second and the requested new sampling rate is 10
- * ms because the user needs immediate reaction from ondemand governor, but not
- * sure if higher frequency will be required or not, then, the governor may
- * change the sampling rate too late; up to 1 second later. Thus, if we are
- * reducing the sampling rate, we need to make the new value effective
- * immediately.
- */
-static void update_sampling_rate(struct dbs_data *dbs_data,
- unsigned int new_rate)
-{
- struct od_dbs_tuners *od_tuners = dbs_data->tuners;
- struct cpumask cpumask;
- int cpu;
-
- od_tuners->sampling_rate = new_rate = max(new_rate,
- dbs_data->min_sampling_rate);
-
- /*
- * Lock governor so that governor start/stop can't execute in parallel.
- */
- mutex_lock(&od_dbs_cdata.mutex);
-
- cpumask_copy(&cpumask, cpu_online_mask);
-
- for_each_cpu(cpu, &cpumask) {
- struct cpufreq_policy *policy;
- struct od_cpu_dbs_info_s *dbs_info;
- struct cpu_dbs_info *cdbs;
- struct cpu_common_dbs_info *shared;
- unsigned long next_sampling, appointed_at;
-
- dbs_info = &per_cpu(od_cpu_dbs_info, cpu);
- cdbs = &dbs_info->cdbs;
- shared = cdbs->shared;
-
- /*
- * A valid shared and shared->policy means governor hasn't
- * stopped or exited yet.
- */
- if (!shared || !shared->policy)
- continue;
-
- policy = shared->policy;
-
- /* clear all CPUs of this policy */
- cpumask_andnot(&cpumask, &cpumask, policy->cpus);
+ od_update(policy);
- /*
- * Update sampling rate for CPUs whose policy is governed by
- * dbs_data. In case of governor_per_policy, only a single
- * policy will be governed by dbs_data, otherwise there can be
- * multiple policies that are governed by the same dbs_data.
- */
- if (dbs_data != policy->governor_data)
- continue;
-
- /*
- * Checking this for any CPU should be fine, timers for all of
- * them are scheduled together.
- */
- next_sampling = jiffies + usecs_to_jiffies(new_rate);
- appointed_at = dbs_info->cdbs.timer.expires;
-
- if (time_before(next_sampling, appointed_at)) {
- gov_cancel_work(shared);
- gov_add_timers(policy, usecs_to_jiffies(new_rate));
-
- }
+ if (dbs_info->freq_lo) {
+ /* Setup timer for SUB_SAMPLE */
+ dbs_info->sample_type = OD_SUB_SAMPLE;
+ return dbs_info->freq_hi_delay_us;
}
- mutex_unlock(&od_dbs_cdata.mutex);
+ return dbs_data->sampling_rate * policy_dbs->rate_mult;
}
-static ssize_t store_sampling_rate(struct dbs_data *dbs_data, const char *buf,
- size_t count)
-{
- unsigned int input;
- int ret;
- ret = sscanf(buf, "%u", &input);
- if (ret != 1)
- return -EINVAL;
-
- update_sampling_rate(dbs_data, input);
- return count;
-}
+/************************** sysfs interface ************************/
+static struct dbs_governor od_dbs_gov;
static ssize_t store_io_is_busy(struct dbs_data *dbs_data, const char *buf,
size_t count)
{
- struct od_dbs_tuners *od_tuners = dbs_data->tuners;
unsigned int input;
int ret;
- unsigned int j;
ret = sscanf(buf, "%u", &input);
if (ret != 1)
return -EINVAL;
- od_tuners->io_is_busy = !!input;
+ dbs_data->io_is_busy = !!input;
/* we need to re-evaluate prev_cpu_idle */
- for_each_online_cpu(j) {
- struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info,
- j);
- dbs_info->cdbs.prev_cpu_idle = get_cpu_idle_time(j,
- &dbs_info->cdbs.prev_cpu_wall, od_tuners->io_is_busy);
- }
+ gov_update_cpu_data(dbs_data);
+
return count;
}
static ssize_t store_up_threshold(struct dbs_data *dbs_data, const char *buf,
size_t count)
{
- struct od_dbs_tuners *od_tuners = dbs_data->tuners;
unsigned int input;
int ret;
ret = sscanf(buf, "%u", &input);
@@ -357,40 +236,43 @@ static ssize_t store_up_threshold(struct dbs_data *dbs_data, const char *buf,
return -EINVAL;
}
- od_tuners->up_threshold = input;
+ dbs_data->up_threshold = input;
return count;
}
static ssize_t store_sampling_down_factor(struct dbs_data *dbs_data,
const char *buf, size_t count)
{
- struct od_dbs_tuners *od_tuners = dbs_data->tuners;
- unsigned int input, j;
+ struct policy_dbs_info *policy_dbs;
+ unsigned int input;
int ret;
ret = sscanf(buf, "%u", &input);
if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1)
return -EINVAL;
- od_tuners->sampling_down_factor = input;
+
+ dbs_data->sampling_down_factor = input;
/* Reset down sampling multiplier in case it was active */
- for_each_online_cpu(j) {
- struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info,
- j);
- dbs_info->rate_mult = 1;
+ list_for_each_entry(policy_dbs, &dbs_data->policy_dbs_list, list) {
+ /*
+ * Doing this without locking might lead to using different
+ * rate_mult values in od_update() and od_dbs_timer().
+ */
+ mutex_lock(&policy_dbs->timer_mutex);
+ policy_dbs->rate_mult = 1;
+ mutex_unlock(&policy_dbs->timer_mutex);
}
+
return count;
}
static ssize_t store_ignore_nice_load(struct dbs_data *dbs_data,
const char *buf, size_t count)
{
- struct od_dbs_tuners *od_tuners = dbs_data->tuners;
unsigned int input;
int ret;
- unsigned int j;
-
ret = sscanf(buf, "%u", &input);
if (ret != 1)
return -EINVAL;
@@ -398,22 +280,14 @@ static ssize_t store_ignore_nice_load(struct dbs_data *dbs_data,
if (input > 1)
input = 1;
- if (input == od_tuners->ignore_nice_load) { /* nothing to do */
+ if (input == dbs_data->ignore_nice_load) { /* nothing to do */
return count;
}
- od_tuners->ignore_nice_load = input;
+ dbs_data->ignore_nice_load = input;
/* we need to re-evaluate prev_cpu_idle */
- for_each_online_cpu(j) {
- struct od_cpu_dbs_info_s *dbs_info;
- dbs_info = &per_cpu(od_cpu_dbs_info, j);
- dbs_info->cdbs.prev_cpu_idle = get_cpu_idle_time(j,
- &dbs_info->cdbs.prev_cpu_wall, od_tuners->io_is_busy);
- if (od_tuners->ignore_nice_load)
- dbs_info->cdbs.prev_cpu_nice =
- kcpustat_cpu(j).cpustat[CPUTIME_NICE];
+ gov_update_cpu_data(dbs_data);
- }
return count;
}
@@ -421,6 +295,7 @@ static ssize_t store_powersave_bias(struct dbs_data *dbs_data, const char *buf,
size_t count)
{
struct od_dbs_tuners *od_tuners = dbs_data->tuners;
+ struct policy_dbs_info *policy_dbs;
unsigned int input;
int ret;
ret = sscanf(buf, "%u", &input);
@@ -432,59 +307,54 @@ static ssize_t store_powersave_bias(struct dbs_data *dbs_data, const char *buf,
input = 1000;
od_tuners->powersave_bias = input;
- ondemand_powersave_bias_init();
+
+ list_for_each_entry(policy_dbs, &dbs_data->policy_dbs_list, list)
+ ondemand_powersave_bias_init(policy_dbs->policy);
+
return count;
}
-show_store_one(od, sampling_rate);
-show_store_one(od, io_is_busy);
-show_store_one(od, up_threshold);
-show_store_one(od, sampling_down_factor);
-show_store_one(od, ignore_nice_load);
-show_store_one(od, powersave_bias);
-declare_show_sampling_rate_min(od);
-
-gov_sys_pol_attr_rw(sampling_rate);
-gov_sys_pol_attr_rw(io_is_busy);
-gov_sys_pol_attr_rw(up_threshold);
-gov_sys_pol_attr_rw(sampling_down_factor);
-gov_sys_pol_attr_rw(ignore_nice_load);
-gov_sys_pol_attr_rw(powersave_bias);
-gov_sys_pol_attr_ro(sampling_rate_min);
-
-static struct attribute *dbs_attributes_gov_sys[] = {
- &sampling_rate_min_gov_sys.attr,
- &sampling_rate_gov_sys.attr,
- &up_threshold_gov_sys.attr,
- &sampling_down_factor_gov_sys.attr,
- &ignore_nice_load_gov_sys.attr,
- &powersave_bias_gov_sys.attr,
- &io_is_busy_gov_sys.attr,
+gov_show_one_common(sampling_rate);
+gov_show_one_common(up_threshold);
+gov_show_one_common(sampling_down_factor);
+gov_show_one_common(ignore_nice_load);
+gov_show_one_common(min_sampling_rate);
+gov_show_one_common(io_is_busy);
+gov_show_one(od, powersave_bias);
+
+gov_attr_rw(sampling_rate);
+gov_attr_rw(io_is_busy);
+gov_attr_rw(up_threshold);
+gov_attr_rw(sampling_down_factor);
+gov_attr_rw(ignore_nice_load);
+gov_attr_rw(powersave_bias);
+gov_attr_ro(min_sampling_rate);
+
+static struct attribute *od_attributes[] = {
+ &min_sampling_rate.attr,
+ &sampling_rate.attr,
+ &up_threshold.attr,
+ &sampling_down_factor.attr,
+ &ignore_nice_load.attr,
+ &powersave_bias.attr,
+ &io_is_busy.attr,
NULL
};
-static struct attribute_group od_attr_group_gov_sys = {
- .attrs = dbs_attributes_gov_sys,
- .name = "ondemand",
-};
+/************************** sysfs end ************************/
-static struct attribute *dbs_attributes_gov_pol[] = {
- &sampling_rate_min_gov_pol.attr,
- &sampling_rate_gov_pol.attr,
- &up_threshold_gov_pol.attr,
- &sampling_down_factor_gov_pol.attr,
- &ignore_nice_load_gov_pol.attr,
- &powersave_bias_gov_pol.attr,
- &io_is_busy_gov_pol.attr,
- NULL
-};
+static struct policy_dbs_info *od_alloc(void)
+{
+ struct od_policy_dbs_info *dbs_info;
-static struct attribute_group od_attr_group_gov_pol = {
- .attrs = dbs_attributes_gov_pol,
- .name = "ondemand",
-};
+ dbs_info = kzalloc(sizeof(*dbs_info), GFP_KERNEL);
+ return dbs_info ? &dbs_info->policy_dbs : NULL;
+}
-/************************** sysfs end ************************/
+static void od_free(struct policy_dbs_info *policy_dbs)
+{
+ kfree(to_dbs_info(policy_dbs));
+}
static int od_init(struct dbs_data *dbs_data, bool notify)
{
@@ -503,7 +373,7 @@ static int od_init(struct dbs_data *dbs_data, bool notify)
put_cpu();
if (idle_time != -1ULL) {
/* Idle micro accounting is supported. Use finer thresholds */
- tuners->up_threshold = MICRO_FREQUENCY_UP_THRESHOLD;
+ dbs_data->up_threshold = MICRO_FREQUENCY_UP_THRESHOLD;
/*
* In nohz/micro accounting case we set the minimum frequency
* not depending on HZ, but fixed (very low). The deferred
@@ -511,17 +381,17 @@ static int od_init(struct dbs_data *dbs_data, bool notify)
*/
dbs_data->min_sampling_rate = MICRO_FREQUENCY_MIN_SAMPLE_RATE;
} else {
- tuners->up_threshold = DEF_FREQUENCY_UP_THRESHOLD;
+ dbs_data->up_threshold = DEF_FREQUENCY_UP_THRESHOLD;
/* For correct statistics, we need 10 ticks for each measure */
dbs_data->min_sampling_rate = MIN_SAMPLING_RATE_RATIO *
jiffies_to_usecs(10);
}
- tuners->sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR;
- tuners->ignore_nice_load = 0;
+ dbs_data->sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR;
+ dbs_data->ignore_nice_load = 0;
tuners->powersave_bias = default_powersave_bias;
- tuners->io_is_busy = should_io_be_busy();
+ dbs_data->io_is_busy = should_io_be_busy();
dbs_data->tuners = tuners;
return 0;
@@ -532,33 +402,38 @@ static void od_exit(struct dbs_data *dbs_data, bool notify)
kfree(dbs_data->tuners);
}
-define_get_cpu_dbs_routines(od_cpu_dbs_info);
+static void od_start(struct cpufreq_policy *policy)
+{
+ struct od_policy_dbs_info *dbs_info = to_dbs_info(policy->governor_data);
+
+ dbs_info->sample_type = OD_NORMAL_SAMPLE;
+ ondemand_powersave_bias_init(policy);
+}
static struct od_ops od_ops = {
- .powersave_bias_init_cpu = ondemand_powersave_bias_init_cpu,
.powersave_bias_target = generic_powersave_bias_target,
- .freq_increase = dbs_freq_increase,
};
-static struct common_dbs_data od_dbs_cdata = {
- .governor = GOV_ONDEMAND,
- .attr_group_gov_sys = &od_attr_group_gov_sys,
- .attr_group_gov_pol = &od_attr_group_gov_pol,
- .get_cpu_cdbs = get_cpu_cdbs,
- .get_cpu_dbs_info_s = get_cpu_dbs_info_s,
+static struct dbs_governor od_dbs_gov = {
+ .gov = {
+ .name = "ondemand",
+ .governor = cpufreq_governor_dbs,
+ .max_transition_latency = TRANSITION_LATENCY_LIMIT,
+ .owner = THIS_MODULE,
+ },
+ .kobj_type = { .default_attrs = od_attributes },
.gov_dbs_timer = od_dbs_timer,
- .gov_check_cpu = od_check_cpu,
- .gov_ops = &od_ops,
+ .alloc = od_alloc,
+ .free = od_free,
.init = od_init,
.exit = od_exit,
- .mutex = __MUTEX_INITIALIZER(od_dbs_cdata.mutex),
+ .start = od_start,
};
+#define CPU_FREQ_GOV_ONDEMAND (&od_dbs_gov.gov)
+
static void od_set_powersave_bias(unsigned int powersave_bias)
{
- struct cpufreq_policy *policy;
- struct dbs_data *dbs_data;
- struct od_dbs_tuners *od_tuners;
unsigned int cpu;
cpumask_t done;
@@ -567,22 +442,25 @@ static void od_set_powersave_bias(unsigned int powersave_bias)
get_online_cpus();
for_each_online_cpu(cpu) {
- struct cpu_common_dbs_info *shared;
+ struct cpufreq_policy *policy;
+ struct policy_dbs_info *policy_dbs;
+ struct dbs_data *dbs_data;
+ struct od_dbs_tuners *od_tuners;
if (cpumask_test_cpu(cpu, &done))
continue;
- shared = per_cpu(od_cpu_dbs_info, cpu).cdbs.shared;
- if (!shared)
+ policy = cpufreq_cpu_get_raw(cpu);
+ if (!policy || policy->governor != CPU_FREQ_GOV_ONDEMAND)
continue;
- policy = shared->policy;
- cpumask_or(&done, &done, policy->cpus);
-
- if (policy->governor != &cpufreq_gov_ondemand)
+ policy_dbs = policy->governor_data;
+ if (!policy_dbs)
continue;
- dbs_data = policy->governor_data;
+ cpumask_or(&done, &done, policy->cpus);
+
+ dbs_data = policy_dbs->dbs_data;
od_tuners = dbs_data->tuners;
od_tuners->powersave_bias = default_powersave_bias;
}
@@ -605,30 +483,14 @@ void od_unregister_powersave_bias_handler(void)
}
EXPORT_SYMBOL_GPL(od_unregister_powersave_bias_handler);
-static int od_cpufreq_governor_dbs(struct cpufreq_policy *policy,
- unsigned int event)
-{
- return cpufreq_governor_dbs(policy, &od_dbs_cdata, event);
-}
-
-#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND
-static
-#endif
-struct cpufreq_governor cpufreq_gov_ondemand = {
- .name = "ondemand",
- .governor = od_cpufreq_governor_dbs,
- .max_transition_latency = TRANSITION_LATENCY_LIMIT,
- .owner = THIS_MODULE,
-};
-
static int __init cpufreq_gov_dbs_init(void)
{
- return cpufreq_register_governor(&cpufreq_gov_ondemand);
+ return cpufreq_register_governor(CPU_FREQ_GOV_ONDEMAND);
}
static void __exit cpufreq_gov_dbs_exit(void)
{
- cpufreq_unregister_governor(&cpufreq_gov_ondemand);
+ cpufreq_unregister_governor(CPU_FREQ_GOV_ONDEMAND);
}
MODULE_AUTHOR("Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>");
@@ -638,6 +500,11 @@ MODULE_DESCRIPTION("'cpufreq_ondemand' - A dynamic cpufreq governor for "
MODULE_LICENSE("GPL");
#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND
+struct cpufreq_governor *cpufreq_default_governor(void)
+{
+ return CPU_FREQ_GOV_ONDEMAND;
+}
+
fs_initcall(cpufreq_gov_dbs_init);
#else
module_init(cpufreq_gov_dbs_init);
diff --git a/drivers/cpufreq/cpufreq_ondemand.h b/drivers/cpufreq/cpufreq_ondemand.h
new file mode 100644
index 000000000000..f0121db3cd9e
--- /dev/null
+++ b/drivers/cpufreq/cpufreq_ondemand.h
@@ -0,0 +1,30 @@
+/*
+ * Header file for CPUFreq ondemand governor and related code.
+ *
+ * Copyright (C) 2016, Intel Corporation
+ * Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "cpufreq_governor.h"
+
+struct od_policy_dbs_info {
+ struct policy_dbs_info policy_dbs;
+ struct cpufreq_frequency_table *freq_table;
+ unsigned int freq_lo;
+ unsigned int freq_lo_delay_us;
+ unsigned int freq_hi_delay_us;
+ unsigned int sample_type:1;
+};
+
+static inline struct od_policy_dbs_info *to_dbs_info(struct policy_dbs_info *policy_dbs)
+{
+ return container_of(policy_dbs, struct od_policy_dbs_info, policy_dbs);
+}
+
+struct od_dbs_tuners {
+ unsigned int powersave_bias;
+};
diff --git a/drivers/cpufreq/cpufreq_performance.c b/drivers/cpufreq/cpufreq_performance.c
index cf117deb39b1..af9f4b96f5a8 100644
--- a/drivers/cpufreq/cpufreq_performance.c
+++ b/drivers/cpufreq/cpufreq_performance.c
@@ -33,10 +33,7 @@ static int cpufreq_governor_performance(struct cpufreq_policy *policy,
return 0;
}
-#ifdef CONFIG_CPU_FREQ_GOV_PERFORMANCE_MODULE
-static
-#endif
-struct cpufreq_governor cpufreq_gov_performance = {
+static struct cpufreq_governor cpufreq_gov_performance = {
.name = "performance",
.governor = cpufreq_governor_performance,
.owner = THIS_MODULE,
@@ -52,6 +49,19 @@ static void __exit cpufreq_gov_performance_exit(void)
cpufreq_unregister_governor(&cpufreq_gov_performance);
}
+#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE
+struct cpufreq_governor *cpufreq_default_governor(void)
+{
+ return &cpufreq_gov_performance;
+}
+#endif
+#ifndef CONFIG_CPU_FREQ_GOV_PERFORMANCE_MODULE
+struct cpufreq_governor *cpufreq_fallback_governor(void)
+{
+ return &cpufreq_gov_performance;
+}
+#endif
+
MODULE_AUTHOR("Dominik Brodowski <linux@brodo.de>");
MODULE_DESCRIPTION("CPUfreq policy governor 'performance'");
MODULE_LICENSE("GPL");
diff --git a/drivers/cpufreq/cpufreq_powersave.c b/drivers/cpufreq/cpufreq_powersave.c
index e3b874c235ea..b8b400232a74 100644
--- a/drivers/cpufreq/cpufreq_powersave.c
+++ b/drivers/cpufreq/cpufreq_powersave.c
@@ -33,10 +33,7 @@ static int cpufreq_governor_powersave(struct cpufreq_policy *policy,
return 0;
}
-#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE
-static
-#endif
-struct cpufreq_governor cpufreq_gov_powersave = {
+static struct cpufreq_governor cpufreq_gov_powersave = {
.name = "powersave",
.governor = cpufreq_governor_powersave,
.owner = THIS_MODULE,
@@ -57,6 +54,11 @@ MODULE_DESCRIPTION("CPUfreq policy governor 'powersave'");
MODULE_LICENSE("GPL");
#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE
+struct cpufreq_governor *cpufreq_default_governor(void)
+{
+ return &cpufreq_gov_powersave;
+}
+
fs_initcall(cpufreq_gov_powersave_init);
#else
module_init(cpufreq_gov_powersave_init);
diff --git a/drivers/cpufreq/cpufreq_userspace.c b/drivers/cpufreq/cpufreq_userspace.c
index 4dbf1db16aca..4d16f45ee1da 100644
--- a/drivers/cpufreq/cpufreq_userspace.c
+++ b/drivers/cpufreq/cpufreq_userspace.c
@@ -89,10 +89,7 @@ static int cpufreq_governor_userspace(struct cpufreq_policy *policy,
return rc;
}
-#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE
-static
-#endif
-struct cpufreq_governor cpufreq_gov_userspace = {
+static struct cpufreq_governor cpufreq_gov_userspace = {
.name = "userspace",
.governor = cpufreq_governor_userspace,
.store_setspeed = cpufreq_set,
@@ -116,6 +113,11 @@ MODULE_DESCRIPTION("CPUfreq policy governor 'userspace'");
MODULE_LICENSE("GPL");
#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE
+struct cpufreq_governor *cpufreq_default_governor(void)
+{
+ return &cpufreq_gov_userspace;
+}
+
fs_initcall(cpufreq_gov_userspace_init);
#else
module_init(cpufreq_gov_userspace_init);
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index cd83d477e32d..b230ebaae66c 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -64,6 +64,25 @@ static inline int ceiling_fp(int32_t x)
return ret;
}
+/**
+ * struct sample - Store performance sample
+ * @core_pct_busy: Ratio of APERF/MPERF in percent, which is actual
+ * performance during last sample period
+ * @busy_scaled: Scaled busy value which is used to calculate next
+ * P state. This can be different than core_pct_busy
+ * to account for cpu idle period
+ * @aperf: Difference of actual performance frequency clock count
+ * read from APERF MSR between last and current sample
+ * @mperf: Difference of maximum performance frequency clock count
+ * read from MPERF MSR between last and current sample
+ * @tsc: Difference of time stamp counter between last and
+ * current sample
+ * @freq: Effective frequency calculated from APERF/MPERF
+ * @time: Current time from scheduler
+ *
+ * This structure is used in the cpudata structure to store performance sample
+ * data for choosing next P State.
+ */
struct sample {
int32_t core_pct_busy;
int32_t busy_scaled;
@@ -71,9 +90,23 @@ struct sample {
u64 mperf;
u64 tsc;
int freq;
- ktime_t time;
+ u64 time;
};
+/**
+ * struct pstate_data - Store P state data
+ * @current_pstate: Current requested P state
+ * @min_pstate: Min P state possible for this platform
+ * @max_pstate: Max P state possible for this platform
+ * @max_pstate_physical:This is physical Max P state for a processor
+ * This can be higher than the max_pstate which can
+ * be limited by platform thermal design power limits
+ * @scaling: Scaling factor to convert frequency to cpufreq
+ * frequency units
+ * @turbo_pstate: Max Turbo P state possible for this platform
+ *
+ * Stores the per cpu model P state limits and current P state.
+ */
struct pstate_data {
int current_pstate;
int min_pstate;
@@ -83,6 +116,19 @@ struct pstate_data {
int turbo_pstate;
};
+/**
+ * struct vid_data - Stores voltage information data
+ * @min: VID data for this platform corresponding to
+ * the lowest P state
+ * @max: VID data corresponding to the highest P State.
+ * @turbo: VID data for turbo P state
+ * @ratio: Ratio of (vid max - vid min) /
+ * (max P state - Min P State)
+ *
+ * Stores the voltage data for DVFS (Dynamic Voltage and Frequency Scaling)
+ * This data is used in Atom platforms, where in addition to target P state,
+ * the voltage data needs to be specified to select next P State.
+ */
struct vid_data {
int min;
int max;
@@ -90,6 +136,18 @@ struct vid_data {
int32_t ratio;
};
+/**
+ * struct _pid - Stores PID data
+ * @setpoint: Target set point for busyness or performance
+ * @integral: Storage for accumulated error values
+ * @p_gain: PID proportional gain
+ * @i_gain: PID integral gain
+ * @d_gain: PID derivative gain
+ * @deadband: PID deadband
+ * @last_err: Last error storage for integral part of PID calculation
+ *
+ * Stores PID coefficients and last error for PID controller.
+ */
struct _pid {
int setpoint;
int32_t integral;
@@ -100,16 +158,33 @@ struct _pid {
int32_t last_err;
};
+/**
+ * struct cpudata - Per CPU instance data storage
+ * @cpu: CPU number for this instance data
+ * @update_util: CPUFreq utility callback information
+ * @pstate: Stores P state limits for this CPU
+ * @vid: Stores VID limits for this CPU
+ * @pid: Stores PID parameters for this CPU
+ * @last_sample_time: Last Sample time
+ * @prev_aperf: Last APERF value read from APERF MSR
+ * @prev_mperf: Last MPERF value read from MPERF MSR
+ * @prev_tsc: Last timestamp counter (TSC) value
+ * @prev_cummulative_iowait: IO Wait time difference from last and
+ * current sample
+ * @sample: Storage for storing last Sample data
+ *
+ * This structure stores per CPU instance data for all CPUs.
+ */
struct cpudata {
int cpu;
- struct timer_list timer;
+ struct update_util_data update_util;
struct pstate_data pstate;
struct vid_data vid;
struct _pid pid;
- ktime_t last_sample_time;
+ u64 last_sample_time;
u64 prev_aperf;
u64 prev_mperf;
u64 prev_tsc;
@@ -118,8 +193,22 @@ struct cpudata {
};
static struct cpudata **all_cpu_data;
+
+/**
+ * struct pid_adjust_policy - Stores static PID configuration data
+ * @sample_rate_ms: PID calculation sample rate in ms
+ * @sample_rate_ns: Sample rate calculation in ns
+ * @deadband: PID deadband
+ * @setpoint: PID Setpoint
+ * @p_gain_pct: PID proportional gain
+ * @i_gain_pct: PID integral gain
+ * @d_gain_pct: PID derivative gain
+ *
+ * Stores per CPU model static PID configuration data.
+ */
struct pstate_adjust_policy {
int sample_rate_ms;
+ s64 sample_rate_ns;
int deadband;
int setpoint;
int p_gain_pct;
@@ -127,17 +216,36 @@ struct pstate_adjust_policy {
int i_gain_pct;
};
+/**
+ * struct pstate_funcs - Per CPU model specific callbacks
+ * @get_max: Callback to get maximum non turbo effective P state
+ * @get_max_physical: Callback to get maximum non turbo physical P state
+ * @get_min: Callback to get minimum P state
+ * @get_turbo: Callback to get turbo P state
+ * @get_scaling: Callback to get frequency scaling factor
+ * @get_val: Callback to convert P state to actual MSR write value
+ * @get_vid: Callback to get VID data for Atom platforms
+ * @get_target_pstate: Callback to a function to calculate next P state to use
+ *
+ * Core and Atom CPU models have different way to get P State limits. This
+ * structure is used to store those callbacks.
+ */
struct pstate_funcs {
int (*get_max)(void);
int (*get_max_physical)(void);
int (*get_min)(void);
int (*get_turbo)(void);
int (*get_scaling)(void);
- void (*set)(struct cpudata*, int pstate);
+ u64 (*get_val)(struct cpudata*, int pstate);
void (*get_vid)(struct cpudata *);
int32_t (*get_target_pstate)(struct cpudata *);
};
+/**
+ * struct cpu_defaults- Per CPU model default config data
+ * @pid_policy: PID config data
+ * @funcs: Callback function data
+ */
struct cpu_defaults {
struct pstate_adjust_policy pid_policy;
struct pstate_funcs funcs;
@@ -150,6 +258,34 @@ static struct pstate_adjust_policy pid_params;
static struct pstate_funcs pstate_funcs;
static int hwp_active;
+
+/**
+ * struct perf_limits - Store user and policy limits
+ * @no_turbo: User requested turbo state from intel_pstate sysfs
+ * @turbo_disabled: Platform turbo status either from msr
+ * MSR_IA32_MISC_ENABLE or when maximum available pstate
+ * matches the maximum turbo pstate
+ * @max_perf_pct: Effective maximum performance limit in percentage, this
+ * is minimum of either limits enforced by cpufreq policy
+ * or limits from user set limits via intel_pstate sysfs
+ * @min_perf_pct: Effective minimum performance limit in percentage, this
+ * is maximum of either limits enforced by cpufreq policy
+ * or limits from user set limits via intel_pstate sysfs
+ * @max_perf: This is a scaled value between 0 to 255 for max_perf_pct
+ * This value is used to limit max pstate
+ * @min_perf: This is a scaled value between 0 to 255 for min_perf_pct
+ * This value is used to limit min pstate
+ * @max_policy_pct: The maximum performance in percentage enforced by
+ * cpufreq setpolicy interface
+ * @max_sysfs_pct: The maximum performance in percentage enforced by
+ * intel pstate sysfs interface
+ * @min_policy_pct: The minimum performance in percentage enforced by
+ * cpufreq setpolicy interface
+ * @min_sysfs_pct: The minimum performance in percentage enforced by
+ * intel pstate sysfs interface
+ *
+ * Storage for user and policy defined limits.
+ */
struct perf_limits {
int no_turbo;
int turbo_disabled;
@@ -197,8 +333,8 @@ static struct perf_limits *limits = &powersave_limits;
static inline void pid_reset(struct _pid *pid, int setpoint, int busy,
int deadband, int integral) {
- pid->setpoint = setpoint;
- pid->deadband = deadband;
+ pid->setpoint = int_tofp(setpoint);
+ pid->deadband = int_tofp(deadband);
pid->integral = int_tofp(integral);
pid->last_err = int_tofp(setpoint) - int_tofp(busy);
}
@@ -224,9 +360,9 @@ static signed int pid_calc(struct _pid *pid, int32_t busy)
int32_t pterm, dterm, fp_error;
int32_t integral_limit;
- fp_error = int_tofp(pid->setpoint) - busy;
+ fp_error = pid->setpoint - busy;
- if (abs(fp_error) <= int_tofp(pid->deadband))
+ if (abs(fp_error) <= pid->deadband)
return 0;
pterm = mul_fp(pid->p_gain, fp_error);
@@ -286,7 +422,7 @@ static inline void update_turbo_state(void)
cpu->pstate.max_pstate == cpu->pstate.turbo_pstate);
}
-static void intel_pstate_hwp_set(void)
+static void intel_pstate_hwp_set(const struct cpumask *cpumask)
{
int min, hw_min, max, hw_max, cpu, range, adj_range;
u64 value, cap;
@@ -296,9 +432,7 @@ static void intel_pstate_hwp_set(void)
hw_max = HWP_HIGHEST_PERF(cap);
range = hw_max - hw_min;
- get_online_cpus();
-
- for_each_online_cpu(cpu) {
+ for_each_cpu(cpu, cpumask) {
rdmsrl_on_cpu(cpu, MSR_HWP_REQUEST, &value);
adj_range = limits->min_perf_pct * range / 100;
min = hw_min + adj_range;
@@ -317,7 +451,20 @@ static void intel_pstate_hwp_set(void)
value |= HWP_MAX_PERF(max);
wrmsrl_on_cpu(cpu, MSR_HWP_REQUEST, value);
}
+}
+static int intel_pstate_hwp_set_policy(struct cpufreq_policy *policy)
+{
+ if (hwp_active)
+ intel_pstate_hwp_set(policy->cpus);
+
+ return 0;
+}
+
+static void intel_pstate_hwp_set_online_cpus(void)
+{
+ get_online_cpus();
+ intel_pstate_hwp_set(cpu_online_mask);
put_online_cpus();
}
@@ -439,7 +586,7 @@ static ssize_t store_no_turbo(struct kobject *a, struct attribute *b,
limits->no_turbo = clamp_t(int, input, 0, 1);
if (hwp_active)
- intel_pstate_hwp_set();
+ intel_pstate_hwp_set_online_cpus();
return count;
}
@@ -465,7 +612,7 @@ static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b,
int_tofp(100));
if (hwp_active)
- intel_pstate_hwp_set();
+ intel_pstate_hwp_set_online_cpus();
return count;
}
@@ -490,7 +637,7 @@ static ssize_t store_min_perf_pct(struct kobject *a, struct attribute *b,
int_tofp(100));
if (hwp_active)
- intel_pstate_hwp_set();
+ intel_pstate_hwp_set_online_cpus();
return count;
}
@@ -531,6 +678,9 @@ static void __init intel_pstate_sysfs_expose_params(void)
static void intel_pstate_hwp_enable(struct cpudata *cpudata)
{
+ /* First disable HWP notification interrupt as we don't process them */
+ wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x00);
+
wrmsrl_on_cpu(cpudata->cpu, MSR_PM_ENABLE, 0x1);
}
@@ -558,7 +708,7 @@ static int atom_get_turbo_pstate(void)
return value & 0x7F;
}
-static void atom_set_pstate(struct cpudata *cpudata, int pstate)
+static u64 atom_get_val(struct cpudata *cpudata, int pstate)
{
u64 val;
int32_t vid_fp;
@@ -578,9 +728,7 @@ static void atom_set_pstate(struct cpudata *cpudata, int pstate)
if (pstate > cpudata->pstate.max_pstate)
vid = cpudata->vid.turbo;
- val |= vid;
-
- wrmsrl_on_cpu(cpudata->cpu, MSR_IA32_PERF_CTL, val);
+ return val | vid;
}
static int silvermont_get_scaling(void)
@@ -673,6 +821,11 @@ static int core_get_max_pstate(void)
if (err)
goto skip_tar;
+ /* For level 1 and 2, bits[23:16] contain the ratio */
+ if (tdp_ctrl)
+ tdp_ratio >>= 16;
+
+ tdp_ratio &= 0xff; /* ratios are only 8 bits long */
if (tdp_ratio - 1 == tar) {
max_pstate = tar;
pr_debug("max_pstate=TAC %x\n", max_pstate);
@@ -704,7 +857,7 @@ static inline int core_get_scaling(void)
return 100000;
}
-static void core_set_pstate(struct cpudata *cpudata, int pstate)
+static u64 core_get_val(struct cpudata *cpudata, int pstate)
{
u64 val;
@@ -712,7 +865,7 @@ static void core_set_pstate(struct cpudata *cpudata, int pstate)
if (limits->no_turbo && !limits->turbo_disabled)
val |= (u64)1 << 32;
- wrmsrl_on_cpu(cpudata->cpu, MSR_IA32_PERF_CTL, val);
+ return val;
}
static int knl_get_turbo_pstate(void)
@@ -743,7 +896,7 @@ static struct cpu_defaults core_params = {
.get_min = core_get_min_pstate,
.get_turbo = core_get_turbo_pstate,
.get_scaling = core_get_scaling,
- .set = core_set_pstate,
+ .get_val = core_get_val,
.get_target_pstate = get_target_pstate_use_performance,
},
};
@@ -762,7 +915,7 @@ static struct cpu_defaults silvermont_params = {
.get_max_physical = atom_get_max_pstate,
.get_min = atom_get_min_pstate,
.get_turbo = atom_get_turbo_pstate,
- .set = atom_set_pstate,
+ .get_val = atom_get_val,
.get_scaling = silvermont_get_scaling,
.get_vid = atom_get_vid,
.get_target_pstate = get_target_pstate_use_cpu_load,
@@ -783,7 +936,7 @@ static struct cpu_defaults airmont_params = {
.get_max_physical = atom_get_max_pstate,
.get_min = atom_get_min_pstate,
.get_turbo = atom_get_turbo_pstate,
- .set = atom_set_pstate,
+ .get_val = atom_get_val,
.get_scaling = airmont_get_scaling,
.get_vid = atom_get_vid,
.get_target_pstate = get_target_pstate_use_cpu_load,
@@ -805,7 +958,7 @@ static struct cpu_defaults knl_params = {
.get_min = core_get_min_pstate,
.get_turbo = knl_get_turbo_pstate,
.get_scaling = core_get_scaling,
- .set = core_set_pstate,
+ .get_val = core_get_val,
.get_target_pstate = get_target_pstate_use_performance,
},
};
@@ -824,33 +977,32 @@ static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max)
* policy, or by cpu specific default values determined through
* experimentation.
*/
- max_perf_adj = fp_toint(mul_fp(int_tofp(max_perf), limits->max_perf));
+ max_perf_adj = fp_toint(max_perf * limits->max_perf);
*max = clamp_t(int, max_perf_adj,
cpu->pstate.min_pstate, cpu->pstate.turbo_pstate);
- min_perf = fp_toint(mul_fp(int_tofp(max_perf), limits->min_perf));
+ min_perf = fp_toint(max_perf * limits->min_perf);
*min = clamp_t(int, min_perf, cpu->pstate.min_pstate, max_perf);
}
-static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate, bool force)
+static inline void intel_pstate_record_pstate(struct cpudata *cpu, int pstate)
{
- int max_perf, min_perf;
-
- if (force) {
- update_turbo_state();
-
- intel_pstate_get_min_max(cpu, &min_perf, &max_perf);
-
- pstate = clamp_t(int, pstate, min_perf, max_perf);
-
- if (pstate == cpu->pstate.current_pstate)
- return;
- }
trace_cpu_frequency(pstate * cpu->pstate.scaling, cpu->cpu);
-
cpu->pstate.current_pstate = pstate;
+}
+
+static void intel_pstate_set_min_pstate(struct cpudata *cpu)
+{
+ int pstate = cpu->pstate.min_pstate;
- pstate_funcs.set(cpu, pstate);
+ intel_pstate_record_pstate(cpu, pstate);
+ /*
+ * Generally, there is no guarantee that this code will always run on
+ * the CPU being updated, so force the register update to run on the
+ * right CPU.
+ */
+ wrmsrl_on_cpu(cpu->cpu, MSR_IA32_PERF_CTL,
+ pstate_funcs.get_val(cpu, pstate));
}
static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
@@ -863,7 +1015,8 @@ static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
if (pstate_funcs.get_vid)
pstate_funcs.get_vid(cpu);
- intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate, false);
+
+ intel_pstate_set_min_pstate(cpu);
}
static inline void intel_pstate_calc_busy(struct cpudata *cpu)
@@ -874,16 +1027,10 @@ static inline void intel_pstate_calc_busy(struct cpudata *cpu)
core_pct = int_tofp(sample->aperf) * int_tofp(100);
core_pct = div64_u64(core_pct, int_tofp(sample->mperf));
- sample->freq = fp_toint(
- mul_fp(int_tofp(
- cpu->pstate.max_pstate_physical *
- cpu->pstate.scaling / 100),
- core_pct));
-
sample->core_pct_busy = (int32_t)core_pct;
}
-static inline void intel_pstate_sample(struct cpudata *cpu)
+static inline bool intel_pstate_sample(struct cpudata *cpu, u64 time)
{
u64 aperf, mperf;
unsigned long flags;
@@ -893,14 +1040,14 @@ static inline void intel_pstate_sample(struct cpudata *cpu)
rdmsrl(MSR_IA32_APERF, aperf);
rdmsrl(MSR_IA32_MPERF, mperf);
tsc = rdtsc();
- if ((cpu->prev_mperf == mperf) || (cpu->prev_tsc == tsc)) {
+ if (cpu->prev_mperf == mperf || cpu->prev_tsc == tsc) {
local_irq_restore(flags);
- return;
+ return false;
}
local_irq_restore(flags);
cpu->last_sample_time = cpu->sample.time;
- cpu->sample.time = ktime_get();
+ cpu->sample.time = time;
cpu->sample.aperf = aperf;
cpu->sample.mperf = mperf;
cpu->sample.tsc = tsc;
@@ -908,27 +1055,24 @@ static inline void intel_pstate_sample(struct cpudata *cpu)
cpu->sample.mperf -= cpu->prev_mperf;
cpu->sample.tsc -= cpu->prev_tsc;
- intel_pstate_calc_busy(cpu);
-
cpu->prev_aperf = aperf;
cpu->prev_mperf = mperf;
cpu->prev_tsc = tsc;
+ /*
+ * First time this function is invoked in a given cycle, all of the
+ * previous sample data fields are equal to zero or stale and they must
+ * be populated with meaningful numbers for things to work, so assume
+ * that sample.time will always be reset before setting the utilization
+ * update hook and make the caller skip the sample then.
+ */
+ return !!cpu->last_sample_time;
}
-static inline void intel_hwp_set_sample_time(struct cpudata *cpu)
-{
- int delay;
-
- delay = msecs_to_jiffies(50);
- mod_timer_pinned(&cpu->timer, jiffies + delay);
-}
-
-static inline void intel_pstate_set_sample_time(struct cpudata *cpu)
+static inline int32_t get_avg_frequency(struct cpudata *cpu)
{
- int delay;
-
- delay = msecs_to_jiffies(pid_params.sample_rate_ms);
- mod_timer_pinned(&cpu->timer, jiffies + delay);
+ return fp_toint(mul_fp(cpu->sample.core_pct_busy,
+ int_tofp(cpu->pstate.max_pstate_physical *
+ cpu->pstate.scaling / 100)));
}
static inline int32_t get_target_pstate_use_cpu_load(struct cpudata *cpu)
@@ -954,7 +1098,6 @@ static inline int32_t get_target_pstate_use_cpu_load(struct cpudata *cpu)
mperf = cpu->sample.mperf + delta_iowait_mperf;
cpu->prev_cummulative_iowait = cummulative_iowait;
-
/*
* The load can be estimated as the ratio of the mperf counter
* running at a constant frequency during active periods
@@ -970,8 +1113,7 @@ static inline int32_t get_target_pstate_use_cpu_load(struct cpudata *cpu)
static inline int32_t get_target_pstate_use_performance(struct cpudata *cpu)
{
int32_t core_busy, max_pstate, current_pstate, sample_ratio;
- s64 duration_us;
- u32 sample_time;
+ u64 duration_ns;
/*
* core_busy is the ratio of actual performance to max
@@ -990,25 +1132,41 @@ static inline int32_t get_target_pstate_use_performance(struct cpudata *cpu)
core_busy = mul_fp(core_busy, div_fp(max_pstate, current_pstate));
/*
- * Since we have a deferred timer, it will not fire unless
- * we are in C0. So, determine if the actual elapsed time
- * is significantly greater (3x) than our sample interval. If it
- * is, then we were idle for a long enough period of time
- * to adjust our busyness.
+ * Since our utilization update callback will not run unless we are
+ * in C0, check if the actual elapsed time is significantly greater (3x)
+ * than our sample interval. If it is, then we were idle for a long
+ * enough period of time to adjust our busyness.
*/
- sample_time = pid_params.sample_rate_ms * USEC_PER_MSEC;
- duration_us = ktime_us_delta(cpu->sample.time,
- cpu->last_sample_time);
- if (duration_us > sample_time * 3) {
- sample_ratio = div_fp(int_tofp(sample_time),
- int_tofp(duration_us));
+ duration_ns = cpu->sample.time - cpu->last_sample_time;
+ if ((s64)duration_ns > pid_params.sample_rate_ns * 3) {
+ sample_ratio = div_fp(int_tofp(pid_params.sample_rate_ns),
+ int_tofp(duration_ns));
core_busy = mul_fp(core_busy, sample_ratio);
+ } else {
+ sample_ratio = div_fp(100 * cpu->sample.mperf, cpu->sample.tsc);
+ if (sample_ratio < int_tofp(1))
+ core_busy = 0;
}
cpu->sample.busy_scaled = core_busy;
return cpu->pstate.current_pstate - pid_calc(&cpu->pid, core_busy);
}
+static inline void intel_pstate_update_pstate(struct cpudata *cpu, int pstate)
+{
+ int max_perf, min_perf;
+
+ update_turbo_state();
+
+ intel_pstate_get_min_max(cpu, &min_perf, &max_perf);
+ pstate = clamp_t(int, pstate, min_perf, max_perf);
+ if (pstate == cpu->pstate.current_pstate)
+ return;
+
+ intel_pstate_record_pstate(cpu, pstate);
+ wrmsrl(MSR_IA32_PERF_CTL, pstate_funcs.get_val(cpu, pstate));
+}
+
static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu)
{
int from, target_pstate;
@@ -1018,7 +1176,7 @@ static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu)
target_pstate = pstate_funcs.get_target_pstate(cpu);
- intel_pstate_set_pstate(cpu, target_pstate, true);
+ intel_pstate_update_pstate(cpu, target_pstate);
sample = &cpu->sample;
trace_pstate_sample(fp_toint(sample->core_pct_busy),
@@ -1028,26 +1186,24 @@ static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu)
sample->mperf,
sample->aperf,
sample->tsc,
- sample->freq);
-}
-
-static void intel_hwp_timer_func(unsigned long __data)
-{
- struct cpudata *cpu = (struct cpudata *) __data;
-
- intel_pstate_sample(cpu);
- intel_hwp_set_sample_time(cpu);
+ get_avg_frequency(cpu));
}
-static void intel_pstate_timer_func(unsigned long __data)
+static void intel_pstate_update_util(struct update_util_data *data, u64 time,
+ unsigned long util, unsigned long max)
{
- struct cpudata *cpu = (struct cpudata *) __data;
+ struct cpudata *cpu = container_of(data, struct cpudata, update_util);
+ u64 delta_ns = time - cpu->sample.time;
- intel_pstate_sample(cpu);
+ if ((s64)delta_ns >= pid_params.sample_rate_ns) {
+ bool sample_taken = intel_pstate_sample(cpu, time);
- intel_pstate_adjust_busy_pstate(cpu);
-
- intel_pstate_set_sample_time(cpu);
+ if (sample_taken) {
+ intel_pstate_calc_busy(cpu);
+ if (!hwp_active)
+ intel_pstate_adjust_busy_pstate(cpu);
+ }
+ }
}
#define ICPU(model, policy) \
@@ -1095,24 +1251,17 @@ static int intel_pstate_init_cpu(unsigned int cpunum)
cpu->cpu = cpunum;
- if (hwp_active)
+ if (hwp_active) {
intel_pstate_hwp_enable(cpu);
+ pid_params.sample_rate_ms = 50;
+ pid_params.sample_rate_ns = 50 * NSEC_PER_MSEC;
+ }
intel_pstate_get_cpu_pstates(cpu);
- init_timer_deferrable(&cpu->timer);
- cpu->timer.data = (unsigned long)cpu;
- cpu->timer.expires = jiffies + HZ/100;
-
- if (!hwp_active)
- cpu->timer.function = intel_pstate_timer_func;
- else
- cpu->timer.function = intel_hwp_timer_func;
-
intel_pstate_busy_pid_reset(cpu);
- intel_pstate_sample(cpu);
- add_timer_on(&cpu->timer, cpunum);
+ cpu->update_util.func = intel_pstate_update_util;
pr_debug("intel_pstate: controlling: cpu %d\n", cpunum);
@@ -1128,7 +1277,36 @@ static unsigned int intel_pstate_get(unsigned int cpu_num)
if (!cpu)
return 0;
sample = &cpu->sample;
- return sample->freq;
+ return get_avg_frequency(cpu);
+}
+
+static void intel_pstate_set_update_util_hook(unsigned int cpu_num)
+{
+ struct cpudata *cpu = all_cpu_data[cpu_num];
+
+ /* Prevent intel_pstate_update_util() from using stale data. */
+ cpu->sample.time = 0;
+ cpufreq_set_update_util_data(cpu_num, &cpu->update_util);
+}
+
+static void intel_pstate_clear_update_util_hook(unsigned int cpu)
+{
+ cpufreq_set_update_util_data(cpu, NULL);
+ synchronize_sched();
+}
+
+static void intel_pstate_set_performance_limits(struct perf_limits *limits)
+{
+ limits->no_turbo = 0;
+ limits->turbo_disabled = 0;
+ limits->max_perf_pct = 100;
+ limits->max_perf = int_tofp(1);
+ limits->min_perf_pct = 100;
+ limits->min_perf = int_tofp(1);
+ limits->max_policy_pct = 100;
+ limits->max_sysfs_pct = 100;
+ limits->min_policy_pct = 0;
+ limits->min_sysfs_pct = 0;
}
static int intel_pstate_set_policy(struct cpufreq_policy *policy)
@@ -1136,17 +1314,20 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
if (!policy->cpuinfo.max_freq)
return -ENODEV;
- if (policy->policy == CPUFREQ_POLICY_PERFORMANCE &&
- policy->max >= policy->cpuinfo.max_freq) {
- pr_debug("intel_pstate: set performance\n");
+ intel_pstate_clear_update_util_hook(policy->cpu);
+
+ if (policy->policy == CPUFREQ_POLICY_PERFORMANCE) {
limits = &performance_limits;
- if (hwp_active)
- intel_pstate_hwp_set();
- return 0;
+ if (policy->max >= policy->cpuinfo.max_freq) {
+ pr_debug("intel_pstate: set performance\n");
+ intel_pstate_set_performance_limits(limits);
+ goto out;
+ }
+ } else {
+ pr_debug("intel_pstate: set powersave\n");
+ limits = &powersave_limits;
}
- pr_debug("intel_pstate: set powersave\n");
- limits = &powersave_limits;
limits->min_policy_pct = (policy->min * 100) / policy->cpuinfo.max_freq;
limits->min_policy_pct = clamp_t(int, limits->min_policy_pct, 0 , 100);
limits->max_policy_pct = DIV_ROUND_UP(policy->max * 100,
@@ -1172,8 +1353,10 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
limits->max_perf = div_fp(int_tofp(limits->max_perf_pct),
int_tofp(100));
- if (hwp_active)
- intel_pstate_hwp_set();
+ out:
+ intel_pstate_set_update_util_hook(policy->cpu);
+
+ intel_pstate_hwp_set_policy(policy);
return 0;
}
@@ -1196,11 +1379,12 @@ static void intel_pstate_stop_cpu(struct cpufreq_policy *policy)
pr_debug("intel_pstate: CPU %d exiting\n", cpu_num);
- del_timer_sync(&all_cpu_data[cpu_num]->timer);
+ intel_pstate_clear_update_util_hook(cpu_num);
+
if (hwp_active)
return;
- intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate, false);
+ intel_pstate_set_min_pstate(cpu);
}
static int intel_pstate_cpu_init(struct cpufreq_policy *policy)
@@ -1236,6 +1420,7 @@ static struct cpufreq_driver intel_pstate_driver = {
.flags = CPUFREQ_CONST_LOOPS,
.verify = intel_pstate_verify_policy,
.setpolicy = intel_pstate_set_policy,
+ .resume = intel_pstate_hwp_set_policy,
.get = intel_pstate_get,
.init = intel_pstate_cpu_init,
.stop_cpu = intel_pstate_stop_cpu,
@@ -1260,6 +1445,7 @@ static int intel_pstate_msrs_not_valid(void)
static void copy_pid_params(struct pstate_adjust_policy *policy)
{
pid_params.sample_rate_ms = policy->sample_rate_ms;
+ pid_params.sample_rate_ns = pid_params.sample_rate_ms * NSEC_PER_MSEC;
pid_params.p_gain_pct = policy->p_gain_pct;
pid_params.i_gain_pct = policy->i_gain_pct;
pid_params.d_gain_pct = policy->d_gain_pct;
@@ -1274,7 +1460,7 @@ static void copy_cpu_funcs(struct pstate_funcs *funcs)
pstate_funcs.get_min = funcs->get_min;
pstate_funcs.get_turbo = funcs->get_turbo;
pstate_funcs.get_scaling = funcs->get_scaling;
- pstate_funcs.set = funcs->set;
+ pstate_funcs.get_val = funcs->get_val;
pstate_funcs.get_vid = funcs->get_vid;
pstate_funcs.get_target_pstate = funcs->get_target_pstate;
@@ -1397,6 +1583,11 @@ static inline bool intel_pstate_platform_pwr_mgmt_exists(void) { return false; }
static inline bool intel_pstate_has_acpi_ppc(void) { return false; }
#endif /* CONFIG_ACPI */
+static const struct x86_cpu_id hwp_support_ids[] __initconst = {
+ { X86_VENDOR_INTEL, 6, X86_MODEL_ANY, X86_FEATURE_HWP },
+ {}
+};
+
static int __init intel_pstate_init(void)
{
int cpu, rc = 0;
@@ -1406,17 +1597,16 @@ static int __init intel_pstate_init(void)
if (no_load)
return -ENODEV;
+ if (x86_match_cpu(hwp_support_ids) && !no_hwp) {
+ copy_cpu_funcs(&core_params.funcs);
+ hwp_active++;
+ goto hwp_cpu_matched;
+ }
+
id = x86_match_cpu(intel_pstate_cpu_ids);
if (!id)
return -ENODEV;
- /*
- * The Intel pstate driver will be ignored if the platform
- * firmware has its own power management modes.
- */
- if (intel_pstate_platform_pwr_mgmt_exists())
- return -ENODEV;
-
cpu_def = (struct cpu_defaults *)id->driver_data;
copy_pid_params(&cpu_def->pid_policy);
@@ -1425,17 +1615,20 @@ static int __init intel_pstate_init(void)
if (intel_pstate_msrs_not_valid())
return -ENODEV;
+hwp_cpu_matched:
+ /*
+ * The Intel pstate driver will be ignored if the platform
+ * firmware has its own power management modes.
+ */
+ if (intel_pstate_platform_pwr_mgmt_exists())
+ return -ENODEV;
+
pr_info("Intel P-state driver initializing.\n");
all_cpu_data = vzalloc(sizeof(void *) * num_possible_cpus());
if (!all_cpu_data)
return -ENOMEM;
- if (static_cpu_has_safe(X86_FEATURE_HWP) && !no_hwp) {
- pr_info("intel_pstate: HWP enabled\n");
- hwp_active++;
- }
-
if (!hwp_active && hwp_only)
goto out;
@@ -1446,12 +1639,15 @@ static int __init intel_pstate_init(void)
intel_pstate_debug_expose_params();
intel_pstate_sysfs_expose_params();
+ if (hwp_active)
+ pr_info("intel_pstate: HWP enabled\n");
+
return rc;
out:
get_online_cpus();
for_each_online_cpu(cpu) {
if (all_cpu_data[cpu]) {
- del_timer_sync(&all_cpu_data[cpu]->timer);
+ intel_pstate_clear_update_util_hook(cpu);
kfree(all_cpu_data[cpu]);
}
}
diff --git a/drivers/cpufreq/powernv-cpufreq.c b/drivers/cpufreq/powernv-cpufreq.c
index 547890fd9572..39ac78c94be0 100644
--- a/drivers/cpufreq/powernv-cpufreq.c
+++ b/drivers/cpufreq/powernv-cpufreq.c
@@ -28,6 +28,8 @@
#include <linux/of.h>
#include <linux/reboot.h>
#include <linux/slab.h>
+#include <linux/cpu.h>
+#include <trace/events/power.h>
#include <asm/cputhreads.h>
#include <asm/firmware.h>
@@ -43,15 +45,39 @@
static struct cpufreq_frequency_table powernv_freqs[POWERNV_MAX_PSTATES+1];
static bool rebooting, throttled, occ_reset;
+static const char * const throttle_reason[] = {
+ "No throttling",
+ "Power Cap",
+ "Processor Over Temperature",
+ "Power Supply Failure",
+ "Over Current",
+ "OCC Reset"
+};
+
+enum throttle_reason_type {
+ NO_THROTTLE = 0,
+ POWERCAP,
+ CPU_OVERTEMP,
+ POWER_SUPPLY_FAILURE,
+ OVERCURRENT,
+ OCC_RESET_THROTTLE,
+ OCC_MAX_REASON
+};
+
static struct chip {
unsigned int id;
bool throttled;
+ bool restore;
+ u8 throttle_reason;
cpumask_t mask;
struct work_struct throttle;
- bool restore;
+ int throttle_turbo;
+ int throttle_sub_turbo;
+ int reason[OCC_MAX_REASON];
} *chips;
static int nr_chips;
+static DEFINE_PER_CPU(struct chip *, chip_info);
/*
* Note: The set of pstates consists of contiguous integers, the
@@ -183,6 +209,42 @@ static struct freq_attr *powernv_cpu_freq_attr[] = {
NULL,
};
+#define throttle_attr(name, member) \
+static ssize_t name##_show(struct cpufreq_policy *policy, char *buf) \
+{ \
+ struct chip *chip = per_cpu(chip_info, policy->cpu); \
+ \
+ return sprintf(buf, "%u\n", chip->member); \
+} \
+ \
+static struct freq_attr throttle_attr_##name = __ATTR_RO(name) \
+
+throttle_attr(unthrottle, reason[NO_THROTTLE]);
+throttle_attr(powercap, reason[POWERCAP]);
+throttle_attr(overtemp, reason[CPU_OVERTEMP]);
+throttle_attr(supply_fault, reason[POWER_SUPPLY_FAILURE]);
+throttle_attr(overcurrent, reason[OVERCURRENT]);
+throttle_attr(occ_reset, reason[OCC_RESET_THROTTLE]);
+throttle_attr(turbo_stat, throttle_turbo);
+throttle_attr(sub_turbo_stat, throttle_sub_turbo);
+
+static struct attribute *throttle_attrs[] = {
+ &throttle_attr_unthrottle.attr,
+ &throttle_attr_powercap.attr,
+ &throttle_attr_overtemp.attr,
+ &throttle_attr_supply_fault.attr,
+ &throttle_attr_overcurrent.attr,
+ &throttle_attr_occ_reset.attr,
+ &throttle_attr_turbo_stat.attr,
+ &throttle_attr_sub_turbo_stat.attr,
+ NULL,
+};
+
+static const struct attribute_group throttle_attr_grp = {
+ .name = "throttle_stats",
+ .attrs = throttle_attrs,
+};
+
/* Helper routines */
/* Access helpers to power mgt SPR */
@@ -311,34 +373,36 @@ static inline unsigned int get_nominal_index(void)
static void powernv_cpufreq_throttle_check(void *data)
{
+ struct chip *chip;
unsigned int cpu = smp_processor_id();
unsigned long pmsr;
- int pmsr_pmax, i;
+ int pmsr_pmax;
pmsr = get_pmspr(SPRN_PMSR);
-
- for (i = 0; i < nr_chips; i++)
- if (chips[i].id == cpu_to_chip_id(cpu))
- break;
+ chip = this_cpu_read(chip_info);
/* Check for Pmax Capping */
pmsr_pmax = (s8)PMSR_MAX(pmsr);
if (pmsr_pmax != powernv_pstate_info.max) {
- if (chips[i].throttled)
+ if (chip->throttled)
goto next;
- chips[i].throttled = true;
- if (pmsr_pmax < powernv_pstate_info.nominal)
- pr_crit("CPU %d on Chip %u has Pmax reduced below nominal frequency (%d < %d)\n",
- cpu, chips[i].id, pmsr_pmax,
- powernv_pstate_info.nominal);
- else
- pr_info("CPU %d on Chip %u has Pmax reduced below turbo frequency (%d < %d)\n",
- cpu, chips[i].id, pmsr_pmax,
- powernv_pstate_info.max);
- } else if (chips[i].throttled) {
- chips[i].throttled = false;
- pr_info("CPU %d on Chip %u has Pmax restored to %d\n", cpu,
- chips[i].id, pmsr_pmax);
+ chip->throttled = true;
+ if (pmsr_pmax < powernv_pstate_info.nominal) {
+ pr_warn_once("CPU %d on Chip %u has Pmax reduced below nominal frequency (%d < %d)\n",
+ cpu, chip->id, pmsr_pmax,
+ powernv_pstate_info.nominal);
+ chip->throttle_sub_turbo++;
+ } else {
+ chip->throttle_turbo++;
+ }
+ trace_powernv_throttle(chip->id,
+ throttle_reason[chip->throttle_reason],
+ pmsr_pmax);
+ } else if (chip->throttled) {
+ chip->throttled = false;
+ trace_powernv_throttle(chip->id,
+ throttle_reason[chip->throttle_reason],
+ pmsr_pmax);
}
/* Check if Psafe_mode_active is set in PMSR. */
@@ -356,7 +420,7 @@ next:
if (throttled) {
pr_info("PMSR = %16lx\n", pmsr);
- pr_crit("CPU Frequency could be throttled\n");
+ pr_warn("CPU Frequency could be throttled\n");
}
}
@@ -397,6 +461,21 @@ static int powernv_cpufreq_cpu_init(struct cpufreq_policy *policy)
for (i = 0; i < threads_per_core; i++)
cpumask_set_cpu(base + i, policy->cpus);
+ if (!policy->driver_data) {
+ int ret;
+
+ ret = sysfs_create_group(&policy->kobj, &throttle_attr_grp);
+ if (ret) {
+ pr_info("Failed to create throttle stats directory for cpu %d\n",
+ policy->cpu);
+ return ret;
+ }
+ /*
+ * policy->driver_data is used as a flag for one-time
+ * creation of throttle sysfs files.
+ */
+ policy->driver_data = policy;
+ }
return cpufreq_table_validate_and_show(policy, powernv_freqs);
}
@@ -423,18 +502,19 @@ void powernv_cpufreq_work_fn(struct work_struct *work)
{
struct chip *chip = container_of(work, struct chip, throttle);
unsigned int cpu;
- cpumask_var_t mask;
+ cpumask_t mask;
- smp_call_function_any(&chip->mask,
+ get_online_cpus();
+ cpumask_and(&mask, &chip->mask, cpu_online_mask);
+ smp_call_function_any(&mask,
powernv_cpufreq_throttle_check, NULL, 0);
if (!chip->restore)
- return;
+ goto out;
chip->restore = false;
- cpumask_copy(mask, &chip->mask);
- for_each_cpu_and(cpu, mask, cpu_online_mask) {
- int index, tcpu;
+ for_each_cpu(cpu, &mask) {
+ int index;
struct cpufreq_policy policy;
cpufreq_get_policy(&policy, cpu);
@@ -442,20 +522,12 @@ void powernv_cpufreq_work_fn(struct work_struct *work)
policy.cur,
CPUFREQ_RELATION_C, &index);
powernv_cpufreq_target_index(&policy, index);
- for_each_cpu(tcpu, policy.cpus)
- cpumask_clear_cpu(tcpu, mask);
+ cpumask_andnot(&mask, &mask, policy.cpus);
}
+out:
+ put_online_cpus();
}
-static char throttle_reason[][30] = {
- "No throttling",
- "Power Cap",
- "Processor Over Temperature",
- "Power Supply Failure",
- "Over Current",
- "OCC Reset"
- };
-
static int powernv_cpufreq_occ_msg(struct notifier_block *nb,
unsigned long msg_type, void *_msg)
{
@@ -481,7 +553,7 @@ static int powernv_cpufreq_occ_msg(struct notifier_block *nb,
*/
if (!throttled) {
throttled = true;
- pr_crit("CPU frequency is throttled for duration\n");
+ pr_warn("CPU frequency is throttled for duration\n");
}
break;
@@ -505,23 +577,20 @@ static int powernv_cpufreq_occ_msg(struct notifier_block *nb,
return 0;
}
- if (omsg.throttle_status &&
- omsg.throttle_status <= OCC_MAX_THROTTLE_STATUS)
- pr_info("OCC: Chip %u Pmax reduced due to %s\n",
- (unsigned int)omsg.chip,
- throttle_reason[omsg.throttle_status]);
- else if (!omsg.throttle_status)
- pr_info("OCC: Chip %u %s\n", (unsigned int)omsg.chip,
- throttle_reason[omsg.throttle_status]);
- else
- return 0;
-
for (i = 0; i < nr_chips; i++)
- if (chips[i].id == omsg.chip) {
- if (!omsg.throttle_status)
- chips[i].restore = true;
- schedule_work(&chips[i].throttle);
- }
+ if (chips[i].id == omsg.chip)
+ break;
+
+ if (omsg.throttle_status >= 0 &&
+ omsg.throttle_status <= OCC_MAX_THROTTLE_STATUS) {
+ chips[i].throttle_reason = omsg.throttle_status;
+ chips[i].reason[omsg.throttle_status]++;
+ }
+
+ if (!omsg.throttle_status)
+ chips[i].restore = true;
+
+ schedule_work(&chips[i].throttle);
}
return 0;
}
@@ -566,21 +635,33 @@ static int init_chip_info(void)
}
}
- chips = kmalloc_array(nr_chips, sizeof(struct chip), GFP_KERNEL);
+ chips = kcalloc(nr_chips, sizeof(struct chip), GFP_KERNEL);
if (!chips)
return -ENOMEM;
for (i = 0; i < nr_chips; i++) {
chips[i].id = chip[i];
- chips[i].throttled = false;
cpumask_copy(&chips[i].mask, cpumask_of_node(chip[i]));
INIT_WORK(&chips[i].throttle, powernv_cpufreq_work_fn);
- chips[i].restore = false;
+ for_each_cpu(cpu, &chips[i].mask)
+ per_cpu(chip_info, cpu) = &chips[i];
}
return 0;
}
+static inline void clean_chip_info(void)
+{
+ kfree(chips);
+}
+
+static inline void unregister_all_notifiers(void)
+{
+ opal_message_notifier_unregister(OPAL_MSG_OCC,
+ &powernv_cpufreq_opal_nb);
+ unregister_reboot_notifier(&powernv_cpufreq_reboot_nb);
+}
+
static int __init powernv_cpufreq_init(void)
{
int rc = 0;
@@ -591,28 +672,35 @@ static int __init powernv_cpufreq_init(void)
/* Discover pstates from device tree and init */
rc = init_powernv_pstates();
- if (rc) {
- pr_info("powernv-cpufreq disabled. System does not support PState control\n");
- return rc;
- }
+ if (rc)
+ goto out;
/* Populate chip info */
rc = init_chip_info();
if (rc)
- return rc;
+ goto out;
register_reboot_notifier(&powernv_cpufreq_reboot_nb);
opal_message_notifier_register(OPAL_MSG_OCC, &powernv_cpufreq_opal_nb);
- return cpufreq_register_driver(&powernv_cpufreq_driver);
+
+ rc = cpufreq_register_driver(&powernv_cpufreq_driver);
+ if (!rc)
+ return 0;
+
+ pr_info("Failed to register the cpufreq driver (%d)\n", rc);
+ unregister_all_notifiers();
+ clean_chip_info();
+out:
+ pr_info("Platform driver disabled. System does not support PState control\n");
+ return rc;
}
module_init(powernv_cpufreq_init);
static void __exit powernv_cpufreq_exit(void)
{
- unregister_reboot_notifier(&powernv_cpufreq_reboot_nb);
- opal_message_notifier_unregister(OPAL_MSG_OCC,
- &powernv_cpufreq_opal_nb);
cpufreq_unregister_driver(&powernv_cpufreq_driver);
+ unregister_all_notifiers();
+ clean_chip_info();
}
module_exit(powernv_cpufreq_exit);
diff --git a/drivers/cpufreq/pxa2xx-cpufreq.c b/drivers/cpufreq/pxa2xx-cpufreq.c
index 096377232747..46fee1539cc8 100644
--- a/drivers/cpufreq/pxa2xx-cpufreq.c
+++ b/drivers/cpufreq/pxa2xx-cpufreq.c
@@ -319,7 +319,7 @@ static int pxa_set_target(struct cpufreq_policy *policy, unsigned int idx)
local_irq_save(flags);
/* Set new the CCCR and prepare CCLKCFG */
- CCCR = pxa_freq_settings[idx].cccr;
+ writel(pxa_freq_settings[idx].cccr, CCCR);
cclkcfg = pxa_freq_settings[idx].cclkcfg;
asm volatile(" \n\
diff --git a/drivers/cpufreq/s5pv210-cpufreq.c b/drivers/cpufreq/s5pv210-cpufreq.c
index 051a8a8224cd..a145b319d171 100644
--- a/drivers/cpufreq/s5pv210-cpufreq.c
+++ b/drivers/cpufreq/s5pv210-cpufreq.c
@@ -576,10 +576,8 @@ static struct cpufreq_driver s5pv210_driver = {
.get = cpufreq_generic_get,
.init = s5pv210_cpu_init,
.name = "s5pv210",
-#ifdef CONFIG_PM
.suspend = cpufreq_generic_suspend,
.resume = cpufreq_generic_suspend, /* We need to set SLEEP FREQ again */
-#endif
};
static struct notifier_block s5pv210_cpufreq_reboot_notifier = {
diff --git a/drivers/cpufreq/sti-cpufreq.c b/drivers/cpufreq/sti-cpufreq.c
index a9c659f58974..04042038ec4b 100644
--- a/drivers/cpufreq/sti-cpufreq.c
+++ b/drivers/cpufreq/sti-cpufreq.c
@@ -259,6 +259,10 @@ static int sti_cpufreq_init(void)
{
int ret;
+ if ((!of_machine_is_compatible("st,stih407")) &&
+ (!of_machine_is_compatible("st,stih410")))
+ return -ENODEV;
+
ddata.cpu = get_cpu_device(0);
if (!ddata.cpu) {
dev_err(ddata.cpu, "Failed to get device for CPU0\n");
diff --git a/drivers/cpuidle/cpuidle-arm.c b/drivers/cpuidle/cpuidle-arm.c
index 545069d5fdfb..e342565e8715 100644
--- a/drivers/cpuidle/cpuidle-arm.c
+++ b/drivers/cpuidle/cpuidle-arm.c
@@ -50,7 +50,7 @@ static int arm_enter_idle_state(struct cpuidle_device *dev,
* call the CPU ops suspend protocol with idle index as a
* parameter.
*/
- arm_cpuidle_suspend(idx);
+ ret = arm_cpuidle_suspend(idx);
cpu_pm_exit();
}
diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c
index 0742b3296673..03d38c291de6 100644
--- a/drivers/cpuidle/governors/menu.c
+++ b/drivers/cpuidle/governors/menu.c
@@ -196,11 +196,11 @@ static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev);
* of points is below a threshold. If it is... then use the
* average of these 8 points as the estimated value.
*/
-static void get_typical_interval(struct menu_device *data)
+static unsigned int get_typical_interval(struct menu_device *data)
{
int i, divisor;
- unsigned int max, thresh;
- uint64_t avg, stddev;
+ unsigned int max, thresh, avg;
+ uint64_t sum, variance;
thresh = UINT_MAX; /* Discard outliers above this value */
@@ -208,55 +208,52 @@ again:
/* First calculate the average of past intervals */
max = 0;
- avg = 0;
+ sum = 0;
divisor = 0;
for (i = 0; i < INTERVALS; i++) {
unsigned int value = data->intervals[i];
if (value <= thresh) {
- avg += value;
+ sum += value;
divisor++;
if (value > max)
max = value;
}
}
if (divisor == INTERVALS)
- avg >>= INTERVAL_SHIFT;
+ avg = sum >> INTERVAL_SHIFT;
else
- do_div(avg, divisor);
+ avg = div_u64(sum, divisor);
- /* Then try to determine standard deviation */
- stddev = 0;
+ /* Then try to determine variance */
+ variance = 0;
for (i = 0; i < INTERVALS; i++) {
unsigned int value = data->intervals[i];
if (value <= thresh) {
- int64_t diff = value - avg;
- stddev += diff * diff;
+ int64_t diff = (int64_t)value - avg;
+ variance += diff * diff;
}
}
if (divisor == INTERVALS)
- stddev >>= INTERVAL_SHIFT;
+ variance >>= INTERVAL_SHIFT;
else
- do_div(stddev, divisor);
+ do_div(variance, divisor);
/*
- * The typical interval is obtained when standard deviation is small
- * or standard deviation is small compared to the average interval.
- *
- * int_sqrt() formal parameter type is unsigned long. When the
- * greatest difference to an outlier exceeds ~65 ms * sqrt(divisor)
- * the resulting squared standard deviation exceeds the input domain
- * of int_sqrt on platforms where unsigned long is 32 bits in size.
- * In such case reject the candidate average.
+ * The typical interval is obtained when standard deviation is
+ * small (stddev <= 20 us, variance <= 400 us^2) or standard
+ * deviation is small compared to the average interval (avg >
+ * 6*stddev, avg^2 > 36*variance). The average is smaller than
+ * UINT_MAX aka U32_MAX, so computing its square does not
+ * overflow a u64. We simply reject this candidate average if
+ * the standard deviation is greater than 715 s (which is
+ * rather unlikely).
*
* Use this result only if there is no timer to wake us up sooner.
*/
- if (likely(stddev <= ULONG_MAX)) {
- stddev = int_sqrt(stddev);
- if (((avg > stddev * 6) && (divisor * 4 >= INTERVALS * 3))
- || stddev <= 20) {
- if (data->next_timer_us > avg)
- data->predicted_us = avg;
- return;
+ if (likely(variance <= U64_MAX/36)) {
+ if ((((u64)avg*avg > variance*36) && (divisor * 4 >= INTERVALS * 3))
+ || variance <= 400) {
+ return avg;
}
}
@@ -270,7 +267,7 @@ again:
* with sporadic activity with a bunch of short pauses.
*/
if ((divisor * 4) <= INTERVALS * 3)
- return;
+ return UINT_MAX;
thresh = max - 1;
goto again;
@@ -287,6 +284,7 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
int latency_req = pm_qos_request(PM_QOS_CPU_DMA_LATENCY);
int i;
unsigned int interactivity_req;
+ unsigned int expected_interval;
unsigned long nr_iowaiters, cpu_load;
if (data->needs_update) {
@@ -313,32 +311,43 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
data->correction_factor[data->bucket],
RESOLUTION * DECAY);
- get_typical_interval(data);
-
- /*
- * Performance multiplier defines a minimum predicted idle
- * duration / latency ratio. Adjust the latency limit if
- * necessary.
- */
- interactivity_req = data->predicted_us / performance_multiplier(nr_iowaiters, cpu_load);
- if (latency_req > interactivity_req)
- latency_req = interactivity_req;
+ expected_interval = get_typical_interval(data);
+ expected_interval = min(expected_interval, data->next_timer_us);
if (CPUIDLE_DRIVER_STATE_START > 0) {
- data->last_state_idx = CPUIDLE_DRIVER_STATE_START - 1;
+ struct cpuidle_state *s = &drv->states[CPUIDLE_DRIVER_STATE_START];
+ unsigned int polling_threshold;
+
/*
* We want to default to C1 (hlt), not to busy polling
- * unless the timer is happening really really soon.
+ * unless the timer is happening really really soon, or
+ * C1's exit latency exceeds the user configured limit.
*/
- if (interactivity_req > 20 &&
- !drv->states[CPUIDLE_DRIVER_STATE_START].disabled &&
- dev->states_usage[CPUIDLE_DRIVER_STATE_START].disable == 0)
+ polling_threshold = max_t(unsigned int, 20, s->target_residency);
+ if (data->next_timer_us > polling_threshold &&
+ latency_req > s->exit_latency && !s->disabled &&
+ !dev->states_usage[CPUIDLE_DRIVER_STATE_START].disable)
data->last_state_idx = CPUIDLE_DRIVER_STATE_START;
+ else
+ data->last_state_idx = CPUIDLE_DRIVER_STATE_START - 1;
} else {
data->last_state_idx = CPUIDLE_DRIVER_STATE_START;
}
/*
+ * Use the lowest expected idle interval to pick the idle state.
+ */
+ data->predicted_us = min(data->predicted_us, expected_interval);
+
+ /*
+ * Use the performance multiplier and the user-configurable
+ * latency_req to determine the maximum exit latency.
+ */
+ interactivity_req = data->predicted_us / performance_multiplier(nr_iowaiters, cpu_load);
+ if (latency_req > interactivity_req)
+ latency_req = interactivity_req;
+
+ /*
* Find the idle state with the lowest power while satisfying
* our constraints.
*/
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 07d494276aad..477fffdb4f49 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -296,6 +296,7 @@ config CRYPTO_DEV_OMAP_AES
depends on ARCH_OMAP2 || ARCH_OMAP3 || ARCH_OMAP2PLUS
select CRYPTO_AES
select CRYPTO_BLKCIPHER
+ select CRYPTO_ENGINE
help
OMAP processors have AES module accelerator. Select this if you
want to use the OMAP module for AES algorithms.
@@ -487,7 +488,7 @@ config CRYPTO_DEV_IMGTEC_HASH
config CRYPTO_DEV_SUN4I_SS
tristate "Support for Allwinner Security System cryptographic accelerator"
- depends on ARCH_SUNXI
+ depends on ARCH_SUNXI && !64BIT
select CRYPTO_MD5
select CRYPTO_SHA1
select CRYPTO_AES
@@ -507,6 +508,10 @@ config CRYPTO_DEV_ROCKCHIP
depends on OF && ARCH_ROCKCHIP
select CRYPTO_AES
select CRYPTO_DES
+ select CRYPTO_MD5
+ select CRYPTO_SHA1
+ select CRYPTO_SHA256
+ select CRYPTO_HASH
select CRYPTO_BLKCIPHER
help
diff --git a/drivers/crypto/atmel-aes.c b/drivers/crypto/atmel-aes.c
index 3eb3f1279fb7..e3d40a8dfffb 100644
--- a/drivers/crypto/atmel-aes.c
+++ b/drivers/crypto/atmel-aes.c
@@ -369,12 +369,6 @@ static inline size_t atmel_aes_padlen(size_t len, size_t block_size)
return len ? block_size - len : 0;
}
-static inline struct aead_request *
-aead_request_cast(struct crypto_async_request *req)
-{
- return container_of(req, struct aead_request, base);
-}
-
static struct atmel_aes_dev *atmel_aes_find_dev(struct atmel_aes_base_ctx *ctx)
{
struct atmel_aes_dev *aes_dd = NULL;
@@ -2085,9 +2079,9 @@ static int atmel_aes_probe(struct platform_device *pdev)
}
aes_dd->io_base = devm_ioremap_resource(&pdev->dev, aes_res);
- if (!aes_dd->io_base) {
+ if (IS_ERR(aes_dd->io_base)) {
dev_err(dev, "can't ioremap\n");
- err = -ENOMEM;
+ err = PTR_ERR(aes_dd->io_base);
goto res_err;
}
diff --git a/drivers/crypto/atmel-sha-regs.h b/drivers/crypto/atmel-sha-regs.h
index 83b2d7425666..e08897109cab 100644
--- a/drivers/crypto/atmel-sha-regs.h
+++ b/drivers/crypto/atmel-sha-regs.h
@@ -8,6 +8,8 @@
#define SHA_CR_START (1 << 0)
#define SHA_CR_FIRST (1 << 4)
#define SHA_CR_SWRST (1 << 8)
+#define SHA_CR_WUIHV (1 << 12)
+#define SHA_CR_WUIEHV (1 << 13)
#define SHA_MR 0x04
#define SHA_MR_MODE_MASK (0x3 << 0)
@@ -15,6 +17,8 @@
#define SHA_MR_MODE_AUTO 0x1
#define SHA_MR_MODE_PDC 0x2
#define SHA_MR_PROCDLY (1 << 4)
+#define SHA_MR_UIHV (1 << 5)
+#define SHA_MR_UIEHV (1 << 6)
#define SHA_MR_ALGO_SHA1 (0 << 8)
#define SHA_MR_ALGO_SHA256 (1 << 8)
#define SHA_MR_ALGO_SHA384 (2 << 8)
diff --git a/drivers/crypto/atmel-sha.c b/drivers/crypto/atmel-sha.c
index 8bf9914d4d15..97e34799e077 100644
--- a/drivers/crypto/atmel-sha.c
+++ b/drivers/crypto/atmel-sha.c
@@ -53,6 +53,7 @@
#define SHA_FLAGS_FINUP BIT(16)
#define SHA_FLAGS_SG BIT(17)
+#define SHA_FLAGS_ALGO_MASK GENMASK(22, 18)
#define SHA_FLAGS_SHA1 BIT(18)
#define SHA_FLAGS_SHA224 BIT(19)
#define SHA_FLAGS_SHA256 BIT(20)
@@ -60,11 +61,12 @@
#define SHA_FLAGS_SHA512 BIT(22)
#define SHA_FLAGS_ERROR BIT(23)
#define SHA_FLAGS_PAD BIT(24)
+#define SHA_FLAGS_RESTORE BIT(25)
#define SHA_OP_UPDATE 1
#define SHA_OP_FINAL 2
-#define SHA_BUFFER_LEN PAGE_SIZE
+#define SHA_BUFFER_LEN (PAGE_SIZE / 16)
#define ATMEL_SHA_DMA_THRESHOLD 56
@@ -73,10 +75,15 @@ struct atmel_sha_caps {
bool has_dualbuff;
bool has_sha224;
bool has_sha_384_512;
+ bool has_uihv;
};
struct atmel_sha_dev;
+/*
+ * .statesize = sizeof(struct atmel_sha_reqctx) must be <= PAGE_SIZE / 8 as
+ * tested by the ahash_prepare_alg() function.
+ */
struct atmel_sha_reqctx {
struct atmel_sha_dev *dd;
unsigned long flags;
@@ -95,7 +102,7 @@ struct atmel_sha_reqctx {
size_t block_size;
- u8 buffer[0] __aligned(sizeof(u32));
+ u8 buffer[SHA_BUFFER_LEN + SHA512_BLOCK_SIZE] __aligned(sizeof(u32));
};
struct atmel_sha_ctx {
@@ -122,6 +129,7 @@ struct atmel_sha_dev {
spinlock_t lock;
int err;
struct tasklet_struct done_task;
+ struct tasklet_struct queue_task;
unsigned long flags;
struct crypto_queue queue;
@@ -317,7 +325,8 @@ static int atmel_sha_init(struct ahash_request *req)
static void atmel_sha_write_ctrl(struct atmel_sha_dev *dd, int dma)
{
struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
- u32 valcr = 0, valmr = SHA_MR_MODE_AUTO;
+ u32 valmr = SHA_MR_MODE_AUTO;
+ unsigned int i, hashsize = 0;
if (likely(dma)) {
if (!dd->caps.has_dma)
@@ -329,22 +338,62 @@ static void atmel_sha_write_ctrl(struct atmel_sha_dev *dd, int dma)
atmel_sha_write(dd, SHA_IER, SHA_INT_DATARDY);
}
- if (ctx->flags & SHA_FLAGS_SHA1)
+ switch (ctx->flags & SHA_FLAGS_ALGO_MASK) {
+ case SHA_FLAGS_SHA1:
valmr |= SHA_MR_ALGO_SHA1;
- else if (ctx->flags & SHA_FLAGS_SHA224)
+ hashsize = SHA1_DIGEST_SIZE;
+ break;
+
+ case SHA_FLAGS_SHA224:
valmr |= SHA_MR_ALGO_SHA224;
- else if (ctx->flags & SHA_FLAGS_SHA256)
+ hashsize = SHA256_DIGEST_SIZE;
+ break;
+
+ case SHA_FLAGS_SHA256:
valmr |= SHA_MR_ALGO_SHA256;
- else if (ctx->flags & SHA_FLAGS_SHA384)
+ hashsize = SHA256_DIGEST_SIZE;
+ break;
+
+ case SHA_FLAGS_SHA384:
valmr |= SHA_MR_ALGO_SHA384;
- else if (ctx->flags & SHA_FLAGS_SHA512)
+ hashsize = SHA512_DIGEST_SIZE;
+ break;
+
+ case SHA_FLAGS_SHA512:
valmr |= SHA_MR_ALGO_SHA512;
+ hashsize = SHA512_DIGEST_SIZE;
+ break;
+
+ default:
+ break;
+ }
/* Setting CR_FIRST only for the first iteration */
- if (!(ctx->digcnt[0] || ctx->digcnt[1]))
- valcr = SHA_CR_FIRST;
+ if (!(ctx->digcnt[0] || ctx->digcnt[1])) {
+ atmel_sha_write(dd, SHA_CR, SHA_CR_FIRST);
+ } else if (dd->caps.has_uihv && (ctx->flags & SHA_FLAGS_RESTORE)) {
+ const u32 *hash = (const u32 *)ctx->digest;
+
+ /*
+ * Restore the hardware context: update the User Initialize
+ * Hash Value (UIHV) with the value saved when the latest
+ * 'update' operation completed on this very same crypto
+ * request.
+ */
+ ctx->flags &= ~SHA_FLAGS_RESTORE;
+ atmel_sha_write(dd, SHA_CR, SHA_CR_WUIHV);
+ for (i = 0; i < hashsize / sizeof(u32); ++i)
+ atmel_sha_write(dd, SHA_REG_DIN(i), hash[i]);
+ atmel_sha_write(dd, SHA_CR, SHA_CR_FIRST);
+ valmr |= SHA_MR_UIHV;
+ }
+ /*
+ * WARNING: If the UIHV feature is not available, the hardware CANNOT
+ * process concurrent requests: the internal registers used to store
+ * the hash/digest are still set to the partial digest output values
+ * computed during the latest round.
+ */
- atmel_sha_write(dd, SHA_CR, valcr);
atmel_sha_write(dd, SHA_MR, valmr);
}
@@ -713,23 +762,31 @@ static void atmel_sha_copy_hash(struct ahash_request *req)
{
struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
u32 *hash = (u32 *)ctx->digest;
- int i;
+ unsigned int i, hashsize;
- if (ctx->flags & SHA_FLAGS_SHA1)
- for (i = 0; i < SHA1_DIGEST_SIZE / sizeof(u32); i++)
- hash[i] = atmel_sha_read(ctx->dd, SHA_REG_DIGEST(i));
- else if (ctx->flags & SHA_FLAGS_SHA224)
- for (i = 0; i < SHA224_DIGEST_SIZE / sizeof(u32); i++)
- hash[i] = atmel_sha_read(ctx->dd, SHA_REG_DIGEST(i));
- else if (ctx->flags & SHA_FLAGS_SHA256)
- for (i = 0; i < SHA256_DIGEST_SIZE / sizeof(u32); i++)
- hash[i] = atmel_sha_read(ctx->dd, SHA_REG_DIGEST(i));
- else if (ctx->flags & SHA_FLAGS_SHA384)
- for (i = 0; i < SHA384_DIGEST_SIZE / sizeof(u32); i++)
- hash[i] = atmel_sha_read(ctx->dd, SHA_REG_DIGEST(i));
- else
- for (i = 0; i < SHA512_DIGEST_SIZE / sizeof(u32); i++)
- hash[i] = atmel_sha_read(ctx->dd, SHA_REG_DIGEST(i));
+ switch (ctx->flags & SHA_FLAGS_ALGO_MASK) {
+ case SHA_FLAGS_SHA1:
+ hashsize = SHA1_DIGEST_SIZE;
+ break;
+
+ case SHA_FLAGS_SHA224:
+ case SHA_FLAGS_SHA256:
+ hashsize = SHA256_DIGEST_SIZE;
+ break;
+
+ case SHA_FLAGS_SHA384:
+ case SHA_FLAGS_SHA512:
+ hashsize = SHA512_DIGEST_SIZE;
+ break;
+
+ default:
+ /* Should not happen... */
+ return;
+ }
+
+ for (i = 0; i < hashsize / sizeof(u32); ++i)
+ hash[i] = atmel_sha_read(ctx->dd, SHA_REG_DIGEST(i));
+ ctx->flags |= SHA_FLAGS_RESTORE;
}
static void atmel_sha_copy_ready_hash(struct ahash_request *req)
@@ -788,7 +845,7 @@ static void atmel_sha_finish_req(struct ahash_request *req, int err)
req->base.complete(&req->base, err);
/* handle new request */
- tasklet_schedule(&dd->done_task);
+ tasklet_schedule(&dd->queue_task);
}
static int atmel_sha_hw_init(struct atmel_sha_dev *dd)
@@ -922,36 +979,17 @@ static int atmel_sha_update(struct ahash_request *req)
static int atmel_sha_final(struct ahash_request *req)
{
struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
- struct atmel_sha_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
- struct atmel_sha_dev *dd = tctx->dd;
-
- int err = 0;
ctx->flags |= SHA_FLAGS_FINUP;
if (ctx->flags & SHA_FLAGS_ERROR)
return 0; /* uncompleted hash is not needed */
- if (ctx->bufcnt) {
- return atmel_sha_enqueue(req, SHA_OP_FINAL);
- } else if (!(ctx->flags & SHA_FLAGS_PAD)) { /* add padding */
- err = atmel_sha_hw_init(dd);
- if (err)
- goto err1;
-
- dd->flags |= SHA_FLAGS_BUSY;
- err = atmel_sha_final_req(dd);
- } else {
+ if (ctx->flags & SHA_FLAGS_PAD)
/* copy ready hash (+ finalize hmac) */
return atmel_sha_finish(req);
- }
-
-err1:
- if (err != -EINPROGRESS)
- /* done_task will not finish it, so do it here */
- atmel_sha_finish_req(req, err);
- return err;
+ return atmel_sha_enqueue(req, SHA_OP_FINAL);
}
static int atmel_sha_finup(struct ahash_request *req)
@@ -979,11 +1017,27 @@ static int atmel_sha_digest(struct ahash_request *req)
return atmel_sha_init(req) ?: atmel_sha_finup(req);
}
+
+static int atmel_sha_export(struct ahash_request *req, void *out)
+{
+ const struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
+
+ memcpy(out, ctx, sizeof(*ctx));
+ return 0;
+}
+
+static int atmel_sha_import(struct ahash_request *req, const void *in)
+{
+ struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
+
+ memcpy(ctx, in, sizeof(*ctx));
+ return 0;
+}
+
static int atmel_sha_cra_init(struct crypto_tfm *tfm)
{
crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
- sizeof(struct atmel_sha_reqctx) +
- SHA_BUFFER_LEN + SHA512_BLOCK_SIZE);
+ sizeof(struct atmel_sha_reqctx));
return 0;
}
@@ -995,8 +1049,11 @@ static struct ahash_alg sha_1_256_algs[] = {
.final = atmel_sha_final,
.finup = atmel_sha_finup,
.digest = atmel_sha_digest,
+ .export = atmel_sha_export,
+ .import = atmel_sha_import,
.halg = {
.digestsize = SHA1_DIGEST_SIZE,
+ .statesize = sizeof(struct atmel_sha_reqctx),
.base = {
.cra_name = "sha1",
.cra_driver_name = "atmel-sha1",
@@ -1016,8 +1073,11 @@ static struct ahash_alg sha_1_256_algs[] = {
.final = atmel_sha_final,
.finup = atmel_sha_finup,
.digest = atmel_sha_digest,
+ .export = atmel_sha_export,
+ .import = atmel_sha_import,
.halg = {
.digestsize = SHA256_DIGEST_SIZE,
+ .statesize = sizeof(struct atmel_sha_reqctx),
.base = {
.cra_name = "sha256",
.cra_driver_name = "atmel-sha256",
@@ -1039,8 +1099,11 @@ static struct ahash_alg sha_224_alg = {
.final = atmel_sha_final,
.finup = atmel_sha_finup,
.digest = atmel_sha_digest,
+ .export = atmel_sha_export,
+ .import = atmel_sha_import,
.halg = {
.digestsize = SHA224_DIGEST_SIZE,
+ .statesize = sizeof(struct atmel_sha_reqctx),
.base = {
.cra_name = "sha224",
.cra_driver_name = "atmel-sha224",
@@ -1062,8 +1125,11 @@ static struct ahash_alg sha_384_512_algs[] = {
.final = atmel_sha_final,
.finup = atmel_sha_finup,
.digest = atmel_sha_digest,
+ .export = atmel_sha_export,
+ .import = atmel_sha_import,
.halg = {
.digestsize = SHA384_DIGEST_SIZE,
+ .statesize = sizeof(struct atmel_sha_reqctx),
.base = {
.cra_name = "sha384",
.cra_driver_name = "atmel-sha384",
@@ -1083,8 +1149,11 @@ static struct ahash_alg sha_384_512_algs[] = {
.final = atmel_sha_final,
.finup = atmel_sha_finup,
.digest = atmel_sha_digest,
+ .export = atmel_sha_export,
+ .import = atmel_sha_import,
.halg = {
.digestsize = SHA512_DIGEST_SIZE,
+ .statesize = sizeof(struct atmel_sha_reqctx),
.base = {
.cra_name = "sha512",
.cra_driver_name = "atmel-sha512",
@@ -1100,16 +1169,18 @@ static struct ahash_alg sha_384_512_algs[] = {
},
};
+static void atmel_sha_queue_task(unsigned long data)
+{
+ struct atmel_sha_dev *dd = (struct atmel_sha_dev *)data;
+
+ atmel_sha_handle_queue(dd, NULL);
+}
+
static void atmel_sha_done_task(unsigned long data)
{
struct atmel_sha_dev *dd = (struct atmel_sha_dev *)data;
int err = 0;
- if (!(SHA_FLAGS_BUSY & dd->flags)) {
- atmel_sha_handle_queue(dd, NULL);
- return;
- }
-
if (SHA_FLAGS_CPU & dd->flags) {
if (SHA_FLAGS_OUTPUT_READY & dd->flags) {
dd->flags &= ~SHA_FLAGS_OUTPUT_READY;
@@ -1272,14 +1343,23 @@ static void atmel_sha_get_cap(struct atmel_sha_dev *dd)
dd->caps.has_dualbuff = 0;
dd->caps.has_sha224 = 0;
dd->caps.has_sha_384_512 = 0;
+ dd->caps.has_uihv = 0;
/* keep only major version number */
switch (dd->hw_version & 0xff0) {
+ case 0x510:
+ dd->caps.has_dma = 1;
+ dd->caps.has_dualbuff = 1;
+ dd->caps.has_sha224 = 1;
+ dd->caps.has_sha_384_512 = 1;
+ dd->caps.has_uihv = 1;
+ break;
case 0x420:
dd->caps.has_dma = 1;
dd->caps.has_dualbuff = 1;
dd->caps.has_sha224 = 1;
dd->caps.has_sha_384_512 = 1;
+ dd->caps.has_uihv = 1;
break;
case 0x410:
dd->caps.has_dma = 1;
@@ -1366,6 +1446,8 @@ static int atmel_sha_probe(struct platform_device *pdev)
tasklet_init(&sha_dd->done_task, atmel_sha_done_task,
(unsigned long)sha_dd);
+ tasklet_init(&sha_dd->queue_task, atmel_sha_queue_task,
+ (unsigned long)sha_dd);
crypto_init_queue(&sha_dd->queue, ATMEL_SHA_QUEUE_LENGTH);
@@ -1404,9 +1486,9 @@ static int atmel_sha_probe(struct platform_device *pdev)
}
sha_dd->io_base = devm_ioremap_resource(&pdev->dev, sha_res);
- if (!sha_dd->io_base) {
+ if (IS_ERR(sha_dd->io_base)) {
dev_err(dev, "can't ioremap\n");
- err = -ENOMEM;
+ err = PTR_ERR(sha_dd->io_base);
goto res_err;
}
@@ -1464,6 +1546,7 @@ err_sha_dma:
iclk_unprepare:
clk_unprepare(sha_dd->iclk);
res_err:
+ tasklet_kill(&sha_dd->queue_task);
tasklet_kill(&sha_dd->done_task);
sha_dd_err:
dev_err(dev, "initialization failed.\n");
@@ -1484,6 +1567,7 @@ static int atmel_sha_remove(struct platform_device *pdev)
atmel_sha_unregister_algs(sha_dd);
+ tasklet_kill(&sha_dd->queue_task);
tasklet_kill(&sha_dd->done_task);
if (sha_dd->caps.has_dma)
diff --git a/drivers/crypto/atmel-tdes.c b/drivers/crypto/atmel-tdes.c
index 2c7a628d0375..bf467d7be35c 100644
--- a/drivers/crypto/atmel-tdes.c
+++ b/drivers/crypto/atmel-tdes.c
@@ -1417,9 +1417,9 @@ static int atmel_tdes_probe(struct platform_device *pdev)
}
tdes_dd->io_base = devm_ioremap_resource(&pdev->dev, tdes_res);
- if (!tdes_dd->io_base) {
+ if (IS_ERR(tdes_dd->io_base)) {
dev_err(dev, "can't ioremap\n");
- err = -ENOMEM;
+ err = PTR_ERR(tdes_dd->io_base);
goto res_err;
}
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
index 69d4a1326fee..44d30b45f3cc 100644
--- a/drivers/crypto/caam/ctrl.c
+++ b/drivers/crypto/caam/ctrl.c
@@ -534,7 +534,7 @@ static int caam_probe(struct platform_device *pdev)
* long pointers in master configuration register
*/
clrsetbits_32(&ctrl->mcr, MCFGR_AWCACHE_MASK, MCFGR_AWCACHE_CACH |
- MCFGR_AWCACHE_BUFF | MCFGR_WDENABLE |
+ MCFGR_AWCACHE_BUFF | MCFGR_WDENABLE | MCFGR_LARGE_BURST |
(sizeof(dma_addr_t) == sizeof(u64) ? MCFGR_LONG_PTR : 0));
/*
diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c
index f7e0d8d4c3da..6fd63a600614 100644
--- a/drivers/crypto/caam/jr.c
+++ b/drivers/crypto/caam/jr.c
@@ -65,7 +65,7 @@ static int caam_reset_hw_jr(struct device *dev)
/*
* Shutdown JobR independent of platform property code
*/
-int caam_jr_shutdown(struct device *dev)
+static int caam_jr_shutdown(struct device *dev)
{
struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
dma_addr_t inpbusaddr, outbusaddr;
diff --git a/drivers/crypto/caam/regs.h b/drivers/crypto/caam/regs.h
index a8a79975682f..0ba9c40597dc 100644
--- a/drivers/crypto/caam/regs.h
+++ b/drivers/crypto/caam/regs.h
@@ -455,7 +455,8 @@ struct caam_ctrl {
#define MCFGR_AXIPIPE_MASK (0xf << MCFGR_AXIPIPE_SHIFT)
#define MCFGR_AXIPRI 0x00000008 /* Assert AXI priority sideband */
-#define MCFGR_BURST_64 0x00000001 /* Max burst size */
+#define MCFGR_LARGE_BURST 0x00000004 /* 128/256-byte burst size */
+#define MCFGR_BURST_64 0x00000001 /* 64-byte burst size */
/* JRSTART register offsets */
#define JRSTART_JR0_START 0x00000001 /* Start Job ring 0 */
diff --git a/drivers/crypto/ccp/Makefile b/drivers/crypto/ccp/Makefile
index 55a1f3951578..b750592cc936 100644
--- a/drivers/crypto/ccp/Makefile
+++ b/drivers/crypto/ccp/Makefile
@@ -1,5 +1,5 @@
obj-$(CONFIG_CRYPTO_DEV_CCP_DD) += ccp.o
-ccp-objs := ccp-dev.o ccp-ops.o ccp-platform.o
+ccp-objs := ccp-dev.o ccp-ops.o ccp-dev-v3.o ccp-platform.o
ccp-$(CONFIG_PCI) += ccp-pci.o
obj-$(CONFIG_CRYPTO_DEV_CCP_CRYPTO) += ccp-crypto.o
diff --git a/drivers/crypto/ccp/ccp-crypto-aes-cmac.c b/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
index d89f20c04266..60fc0fa26fd3 100644
--- a/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
+++ b/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
@@ -220,6 +220,42 @@ static int ccp_aes_cmac_digest(struct ahash_request *req)
return ccp_aes_cmac_finup(req);
}
+static int ccp_aes_cmac_export(struct ahash_request *req, void *out)
+{
+ struct ccp_aes_cmac_req_ctx *rctx = ahash_request_ctx(req);
+ struct ccp_aes_cmac_exp_ctx state;
+
+ /* Don't let anything leak to 'out' */
+ memset(&state, 0, sizeof(state));
+
+ state.null_msg = rctx->null_msg;
+ memcpy(state.iv, rctx->iv, sizeof(state.iv));
+ state.buf_count = rctx->buf_count;
+ memcpy(state.buf, rctx->buf, sizeof(state.buf));
+
+ /* 'out' may not be aligned so memcpy from local variable */
+ memcpy(out, &state, sizeof(state));
+
+ return 0;
+}
+
+static int ccp_aes_cmac_import(struct ahash_request *req, const void *in)
+{
+ struct ccp_aes_cmac_req_ctx *rctx = ahash_request_ctx(req);
+ struct ccp_aes_cmac_exp_ctx state;
+
+ /* 'in' may not be aligned so memcpy to local variable */
+ memcpy(&state, in, sizeof(state));
+
+ memset(rctx, 0, sizeof(*rctx));
+ rctx->null_msg = state.null_msg;
+ memcpy(rctx->iv, state.iv, sizeof(rctx->iv));
+ rctx->buf_count = state.buf_count;
+ memcpy(rctx->buf, state.buf, sizeof(rctx->buf));
+
+ return 0;
+}
+
static int ccp_aes_cmac_setkey(struct crypto_ahash *tfm, const u8 *key,
unsigned int key_len)
{
@@ -352,10 +388,13 @@ int ccp_register_aes_cmac_algs(struct list_head *head)
alg->final = ccp_aes_cmac_final;
alg->finup = ccp_aes_cmac_finup;
alg->digest = ccp_aes_cmac_digest;
+ alg->export = ccp_aes_cmac_export;
+ alg->import = ccp_aes_cmac_import;
alg->setkey = ccp_aes_cmac_setkey;
halg = &alg->halg;
halg->digestsize = AES_BLOCK_SIZE;
+ halg->statesize = sizeof(struct ccp_aes_cmac_exp_ctx);
base = &halg->base;
snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "cmac(aes)");
diff --git a/drivers/crypto/ccp/ccp-crypto-aes.c b/drivers/crypto/ccp/ccp-crypto-aes.c
index 7984f910884d..89291c15015c 100644
--- a/drivers/crypto/ccp/ccp-crypto-aes.c
+++ b/drivers/crypto/ccp/ccp-crypto-aes.c
@@ -1,7 +1,7 @@
/*
* AMD Cryptographic Coprocessor (CCP) AES crypto API support
*
- * Copyright (C) 2013 Advanced Micro Devices, Inc.
+ * Copyright (C) 2013,2016 Advanced Micro Devices, Inc.
*
* Author: Tom Lendacky <thomas.lendacky@amd.com>
*
@@ -259,6 +259,7 @@ static struct crypto_alg ccp_aes_rfc3686_defaults = {
struct ccp_aes_def {
enum ccp_aes_mode mode;
+ unsigned int version;
const char *name;
const char *driver_name;
unsigned int blocksize;
@@ -269,6 +270,7 @@ struct ccp_aes_def {
static struct ccp_aes_def aes_algs[] = {
{
.mode = CCP_AES_MODE_ECB,
+ .version = CCP_VERSION(3, 0),
.name = "ecb(aes)",
.driver_name = "ecb-aes-ccp",
.blocksize = AES_BLOCK_SIZE,
@@ -277,6 +279,7 @@ static struct ccp_aes_def aes_algs[] = {
},
{
.mode = CCP_AES_MODE_CBC,
+ .version = CCP_VERSION(3, 0),
.name = "cbc(aes)",
.driver_name = "cbc-aes-ccp",
.blocksize = AES_BLOCK_SIZE,
@@ -285,6 +288,7 @@ static struct ccp_aes_def aes_algs[] = {
},
{
.mode = CCP_AES_MODE_CFB,
+ .version = CCP_VERSION(3, 0),
.name = "cfb(aes)",
.driver_name = "cfb-aes-ccp",
.blocksize = AES_BLOCK_SIZE,
@@ -293,6 +297,7 @@ static struct ccp_aes_def aes_algs[] = {
},
{
.mode = CCP_AES_MODE_OFB,
+ .version = CCP_VERSION(3, 0),
.name = "ofb(aes)",
.driver_name = "ofb-aes-ccp",
.blocksize = 1,
@@ -301,6 +306,7 @@ static struct ccp_aes_def aes_algs[] = {
},
{
.mode = CCP_AES_MODE_CTR,
+ .version = CCP_VERSION(3, 0),
.name = "ctr(aes)",
.driver_name = "ctr-aes-ccp",
.blocksize = 1,
@@ -309,6 +315,7 @@ static struct ccp_aes_def aes_algs[] = {
},
{
.mode = CCP_AES_MODE_CTR,
+ .version = CCP_VERSION(3, 0),
.name = "rfc3686(ctr(aes))",
.driver_name = "rfc3686-ctr-aes-ccp",
.blocksize = 1,
@@ -357,8 +364,11 @@ static int ccp_register_aes_alg(struct list_head *head,
int ccp_register_aes_algs(struct list_head *head)
{
int i, ret;
+ unsigned int ccpversion = ccp_version();
for (i = 0; i < ARRAY_SIZE(aes_algs); i++) {
+ if (aes_algs[i].version > ccpversion)
+ continue;
ret = ccp_register_aes_alg(head, &aes_algs[i]);
if (ret)
return ret;
diff --git a/drivers/crypto/ccp/ccp-crypto-sha.c b/drivers/crypto/ccp/ccp-crypto-sha.c
index d14b3f28e010..8f36af62fe95 100644
--- a/drivers/crypto/ccp/ccp-crypto-sha.c
+++ b/drivers/crypto/ccp/ccp-crypto-sha.c
@@ -1,7 +1,7 @@
/*
* AMD Cryptographic Coprocessor (CCP) SHA crypto API support
*
- * Copyright (C) 2013 Advanced Micro Devices, Inc.
+ * Copyright (C) 2013,2016 Advanced Micro Devices, Inc.
*
* Author: Tom Lendacky <thomas.lendacky@amd.com>
*
@@ -207,6 +207,46 @@ static int ccp_sha_digest(struct ahash_request *req)
return ccp_sha_finup(req);
}
+static int ccp_sha_export(struct ahash_request *req, void *out)
+{
+ struct ccp_sha_req_ctx *rctx = ahash_request_ctx(req);
+ struct ccp_sha_exp_ctx state;
+
+ /* Don't let anything leak to 'out' */
+ memset(&state, 0, sizeof(state));
+
+ state.type = rctx->type;
+ state.msg_bits = rctx->msg_bits;
+ state.first = rctx->first;
+ memcpy(state.ctx, rctx->ctx, sizeof(state.ctx));
+ state.buf_count = rctx->buf_count;
+ memcpy(state.buf, rctx->buf, sizeof(state.buf));
+
+ /* 'out' may not be aligned so memcpy from local variable */
+ memcpy(out, &state, sizeof(state));
+
+ return 0;
+}
+
+static int ccp_sha_import(struct ahash_request *req, const void *in)
+{
+ struct ccp_sha_req_ctx *rctx = ahash_request_ctx(req);
+ struct ccp_sha_exp_ctx state;
+
+ /* 'in' may not be aligned so memcpy to local variable */
+ memcpy(&state, in, sizeof(state));
+
+ memset(rctx, 0, sizeof(*rctx));
+ rctx->type = state.type;
+ rctx->msg_bits = state.msg_bits;
+ rctx->first = state.first;
+ memcpy(rctx->ctx, state.ctx, sizeof(rctx->ctx));
+ rctx->buf_count = state.buf_count;
+ memcpy(rctx->buf, state.buf, sizeof(rctx->buf));
+
+ return 0;
+}
+
static int ccp_sha_setkey(struct crypto_ahash *tfm, const u8 *key,
unsigned int key_len)
{
@@ -304,6 +344,7 @@ static void ccp_hmac_sha_cra_exit(struct crypto_tfm *tfm)
}
struct ccp_sha_def {
+ unsigned int version;
const char *name;
const char *drv_name;
enum ccp_sha_type type;
@@ -313,6 +354,7 @@ struct ccp_sha_def {
static struct ccp_sha_def sha_algs[] = {
{
+ .version = CCP_VERSION(3, 0),
.name = "sha1",
.drv_name = "sha1-ccp",
.type = CCP_SHA_TYPE_1,
@@ -320,6 +362,7 @@ static struct ccp_sha_def sha_algs[] = {
.block_size = SHA1_BLOCK_SIZE,
},
{
+ .version = CCP_VERSION(3, 0),
.name = "sha224",
.drv_name = "sha224-ccp",
.type = CCP_SHA_TYPE_224,
@@ -327,6 +370,7 @@ static struct ccp_sha_def sha_algs[] = {
.block_size = SHA224_BLOCK_SIZE,
},
{
+ .version = CCP_VERSION(3, 0),
.name = "sha256",
.drv_name = "sha256-ccp",
.type = CCP_SHA_TYPE_256,
@@ -403,9 +447,12 @@ static int ccp_register_sha_alg(struct list_head *head,
alg->final = ccp_sha_final;
alg->finup = ccp_sha_finup;
alg->digest = ccp_sha_digest;
+ alg->export = ccp_sha_export;
+ alg->import = ccp_sha_import;
halg = &alg->halg;
halg->digestsize = def->digest_size;
+ halg->statesize = sizeof(struct ccp_sha_exp_ctx);
base = &halg->base;
snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name);
@@ -440,8 +487,11 @@ static int ccp_register_sha_alg(struct list_head *head,
int ccp_register_sha_algs(struct list_head *head)
{
int i, ret;
+ unsigned int ccpversion = ccp_version();
for (i = 0; i < ARRAY_SIZE(sha_algs); i++) {
+ if (sha_algs[i].version > ccpversion)
+ continue;
ret = ccp_register_sha_alg(head, &sha_algs[i]);
if (ret)
return ret;
diff --git a/drivers/crypto/ccp/ccp-crypto.h b/drivers/crypto/ccp/ccp-crypto.h
index 76a96f0f44c6..a326ec20bfa8 100644
--- a/drivers/crypto/ccp/ccp-crypto.h
+++ b/drivers/crypto/ccp/ccp-crypto.h
@@ -129,6 +129,15 @@ struct ccp_aes_cmac_req_ctx {
struct ccp_cmd cmd;
};
+struct ccp_aes_cmac_exp_ctx {
+ unsigned int null_msg;
+
+ u8 iv[AES_BLOCK_SIZE];
+
+ unsigned int buf_count;
+ u8 buf[AES_BLOCK_SIZE];
+};
+
/***** SHA related defines *****/
#define MAX_SHA_CONTEXT_SIZE SHA256_DIGEST_SIZE
#define MAX_SHA_BLOCK_SIZE SHA256_BLOCK_SIZE
@@ -171,6 +180,19 @@ struct ccp_sha_req_ctx {
struct ccp_cmd cmd;
};
+struct ccp_sha_exp_ctx {
+ enum ccp_sha_type type;
+
+ u64 msg_bits;
+
+ unsigned int first;
+
+ u8 ctx[MAX_SHA_CONTEXT_SIZE];
+
+ unsigned int buf_count;
+ u8 buf[MAX_SHA_BLOCK_SIZE];
+};
+
/***** Common Context Structure *****/
struct ccp_ctx {
int (*complete)(struct crypto_async_request *req, int ret);
diff --git a/drivers/crypto/ccp/ccp-dev-v3.c b/drivers/crypto/ccp/ccp-dev-v3.c
new file mode 100644
index 000000000000..7d5eab49179e
--- /dev/null
+++ b/drivers/crypto/ccp/ccp-dev-v3.c
@@ -0,0 +1,533 @@
+/*
+ * AMD Cryptographic Coprocessor (CCP) driver
+ *
+ * Copyright (C) 2013,2016 Advanced Micro Devices, Inc.
+ *
+ * Author: Tom Lendacky <thomas.lendacky@amd.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/kthread.h>
+#include <linux/interrupt.h>
+#include <linux/ccp.h>
+
+#include "ccp-dev.h"
+
+static int ccp_do_cmd(struct ccp_op *op, u32 *cr, unsigned int cr_count)
+{
+ struct ccp_cmd_queue *cmd_q = op->cmd_q;
+ struct ccp_device *ccp = cmd_q->ccp;
+ void __iomem *cr_addr;
+ u32 cr0, cmd;
+ unsigned int i;
+ int ret = 0;
+
+ /* We could read a status register to see how many free slots
+ * are actually available, but reading that register resets it
+ * and you could lose some error information.
+ */
+ cmd_q->free_slots--;
+
+ cr0 = (cmd_q->id << REQ0_CMD_Q_SHIFT)
+ | (op->jobid << REQ0_JOBID_SHIFT)
+ | REQ0_WAIT_FOR_WRITE;
+
+ if (op->soc)
+ cr0 |= REQ0_STOP_ON_COMPLETE
+ | REQ0_INT_ON_COMPLETE;
+
+ if (op->ioc || !cmd_q->free_slots)
+ cr0 |= REQ0_INT_ON_COMPLETE;
+
+ /* Start at CMD_REQ1 */
+ cr_addr = ccp->io_regs + CMD_REQ0 + CMD_REQ_INCR;
+
+ mutex_lock(&ccp->req_mutex);
+
+ /* Write CMD_REQ1 through CMD_REQx first */
+ for (i = 0; i < cr_count; i++, cr_addr += CMD_REQ_INCR)
+ iowrite32(*(cr + i), cr_addr);
+
+ /* Tell the CCP to start */
+ wmb();
+ iowrite32(cr0, ccp->io_regs + CMD_REQ0);
+
+ mutex_unlock(&ccp->req_mutex);
+
+ if (cr0 & REQ0_INT_ON_COMPLETE) {
+ /* Wait for the job to complete */
+ ret = wait_event_interruptible(cmd_q->int_queue,
+ cmd_q->int_rcvd);
+ if (ret || cmd_q->cmd_error) {
+ /* On error delete all related jobs from the queue */
+ cmd = (cmd_q->id << DEL_Q_ID_SHIFT)
+ | op->jobid;
+
+ iowrite32(cmd, ccp->io_regs + DEL_CMD_Q_JOB);
+
+ if (!ret)
+ ret = -EIO;
+ } else if (op->soc) {
+ /* Delete just head job from the queue on SoC */
+ cmd = DEL_Q_ACTIVE
+ | (cmd_q->id << DEL_Q_ID_SHIFT)
+ | op->jobid;
+
+ iowrite32(cmd, ccp->io_regs + DEL_CMD_Q_JOB);
+ }
+
+ cmd_q->free_slots = CMD_Q_DEPTH(cmd_q->q_status);
+
+ cmd_q->int_rcvd = 0;
+ }
+
+ return ret;
+}
+
+static int ccp_perform_aes(struct ccp_op *op)
+{
+ u32 cr[6];
+
+ /* Fill out the register contents for REQ1 through REQ6 */
+ cr[0] = (CCP_ENGINE_AES << REQ1_ENGINE_SHIFT)
+ | (op->u.aes.type << REQ1_AES_TYPE_SHIFT)
+ | (op->u.aes.mode << REQ1_AES_MODE_SHIFT)
+ | (op->u.aes.action << REQ1_AES_ACTION_SHIFT)
+ | (op->ksb_key << REQ1_KEY_KSB_SHIFT);
+ cr[1] = op->src.u.dma.length - 1;
+ cr[2] = ccp_addr_lo(&op->src.u.dma);
+ cr[3] = (op->ksb_ctx << REQ4_KSB_SHIFT)
+ | (CCP_MEMTYPE_SYSTEM << REQ4_MEMTYPE_SHIFT)
+ | ccp_addr_hi(&op->src.u.dma);
+ cr[4] = ccp_addr_lo(&op->dst.u.dma);
+ cr[5] = (CCP_MEMTYPE_SYSTEM << REQ6_MEMTYPE_SHIFT)
+ | ccp_addr_hi(&op->dst.u.dma);
+
+ if (op->u.aes.mode == CCP_AES_MODE_CFB)
+ cr[0] |= ((0x7f) << REQ1_AES_CFB_SIZE_SHIFT);
+
+ if (op->eom)
+ cr[0] |= REQ1_EOM;
+
+ if (op->init)
+ cr[0] |= REQ1_INIT;
+
+ return ccp_do_cmd(op, cr, ARRAY_SIZE(cr));
+}
+
+static int ccp_perform_xts_aes(struct ccp_op *op)
+{
+ u32 cr[6];
+
+ /* Fill out the register contents for REQ1 through REQ6 */
+ cr[0] = (CCP_ENGINE_XTS_AES_128 << REQ1_ENGINE_SHIFT)
+ | (op->u.xts.action << REQ1_AES_ACTION_SHIFT)
+ | (op->u.xts.unit_size << REQ1_XTS_AES_SIZE_SHIFT)
+ | (op->ksb_key << REQ1_KEY_KSB_SHIFT);
+ cr[1] = op->src.u.dma.length - 1;
+ cr[2] = ccp_addr_lo(&op->src.u.dma);
+ cr[3] = (op->ksb_ctx << REQ4_KSB_SHIFT)
+ | (CCP_MEMTYPE_SYSTEM << REQ4_MEMTYPE_SHIFT)
+ | ccp_addr_hi(&op->src.u.dma);
+ cr[4] = ccp_addr_lo(&op->dst.u.dma);
+ cr[5] = (CCP_MEMTYPE_SYSTEM << REQ6_MEMTYPE_SHIFT)
+ | ccp_addr_hi(&op->dst.u.dma);
+
+ if (op->eom)
+ cr[0] |= REQ1_EOM;
+
+ if (op->init)
+ cr[0] |= REQ1_INIT;
+
+ return ccp_do_cmd(op, cr, ARRAY_SIZE(cr));
+}
+
+static int ccp_perform_sha(struct ccp_op *op)
+{
+ u32 cr[6];
+
+ /* Fill out the register contents for REQ1 through REQ6 */
+ cr[0] = (CCP_ENGINE_SHA << REQ1_ENGINE_SHIFT)
+ | (op->u.sha.type << REQ1_SHA_TYPE_SHIFT)
+ | REQ1_INIT;
+ cr[1] = op->src.u.dma.length - 1;
+ cr[2] = ccp_addr_lo(&op->src.u.dma);
+ cr[3] = (op->ksb_ctx << REQ4_KSB_SHIFT)
+ | (CCP_MEMTYPE_SYSTEM << REQ4_MEMTYPE_SHIFT)
+ | ccp_addr_hi(&op->src.u.dma);
+
+ if (op->eom) {
+ cr[0] |= REQ1_EOM;
+ cr[4] = lower_32_bits(op->u.sha.msg_bits);
+ cr[5] = upper_32_bits(op->u.sha.msg_bits);
+ } else {
+ cr[4] = 0;
+ cr[5] = 0;
+ }
+
+ return ccp_do_cmd(op, cr, ARRAY_SIZE(cr));
+}
+
+static int ccp_perform_rsa(struct ccp_op *op)
+{
+ u32 cr[6];
+
+ /* Fill out the register contents for REQ1 through REQ6 */
+ cr[0] = (CCP_ENGINE_RSA << REQ1_ENGINE_SHIFT)
+ | (op->u.rsa.mod_size << REQ1_RSA_MOD_SIZE_SHIFT)
+ | (op->ksb_key << REQ1_KEY_KSB_SHIFT)
+ | REQ1_EOM;
+ cr[1] = op->u.rsa.input_len - 1;
+ cr[2] = ccp_addr_lo(&op->src.u.dma);
+ cr[3] = (op->ksb_ctx << REQ4_KSB_SHIFT)
+ | (CCP_MEMTYPE_SYSTEM << REQ4_MEMTYPE_SHIFT)
+ | ccp_addr_hi(&op->src.u.dma);
+ cr[4] = ccp_addr_lo(&op->dst.u.dma);
+ cr[5] = (CCP_MEMTYPE_SYSTEM << REQ6_MEMTYPE_SHIFT)
+ | ccp_addr_hi(&op->dst.u.dma);
+
+ return ccp_do_cmd(op, cr, ARRAY_SIZE(cr));
+}
+
+static int ccp_perform_passthru(struct ccp_op *op)
+{
+ u32 cr[6];
+
+ /* Fill out the register contents for REQ1 through REQ6 */
+ cr[0] = (CCP_ENGINE_PASSTHRU << REQ1_ENGINE_SHIFT)
+ | (op->u.passthru.bit_mod << REQ1_PT_BW_SHIFT)
+ | (op->u.passthru.byte_swap << REQ1_PT_BS_SHIFT);
+
+ if (op->src.type == CCP_MEMTYPE_SYSTEM)
+ cr[1] = op->src.u.dma.length - 1;
+ else
+ cr[1] = op->dst.u.dma.length - 1;
+
+ if (op->src.type == CCP_MEMTYPE_SYSTEM) {
+ cr[2] = ccp_addr_lo(&op->src.u.dma);
+ cr[3] = (CCP_MEMTYPE_SYSTEM << REQ4_MEMTYPE_SHIFT)
+ | ccp_addr_hi(&op->src.u.dma);
+
+ if (op->u.passthru.bit_mod != CCP_PASSTHRU_BITWISE_NOOP)
+ cr[3] |= (op->ksb_key << REQ4_KSB_SHIFT);
+ } else {
+ cr[2] = op->src.u.ksb * CCP_KSB_BYTES;
+ cr[3] = (CCP_MEMTYPE_KSB << REQ4_MEMTYPE_SHIFT);
+ }
+
+ if (op->dst.type == CCP_MEMTYPE_SYSTEM) {
+ cr[4] = ccp_addr_lo(&op->dst.u.dma);
+ cr[5] = (CCP_MEMTYPE_SYSTEM << REQ6_MEMTYPE_SHIFT)
+ | ccp_addr_hi(&op->dst.u.dma);
+ } else {
+ cr[4] = op->dst.u.ksb * CCP_KSB_BYTES;
+ cr[5] = (CCP_MEMTYPE_KSB << REQ6_MEMTYPE_SHIFT);
+ }
+
+ if (op->eom)
+ cr[0] |= REQ1_EOM;
+
+ return ccp_do_cmd(op, cr, ARRAY_SIZE(cr));
+}
+
+static int ccp_perform_ecc(struct ccp_op *op)
+{
+ u32 cr[6];
+
+ /* Fill out the register contents for REQ1 through REQ6 */
+ cr[0] = REQ1_ECC_AFFINE_CONVERT
+ | (CCP_ENGINE_ECC << REQ1_ENGINE_SHIFT)
+ | (op->u.ecc.function << REQ1_ECC_FUNCTION_SHIFT)
+ | REQ1_EOM;
+ cr[1] = op->src.u.dma.length - 1;
+ cr[2] = ccp_addr_lo(&op->src.u.dma);
+ cr[3] = (CCP_MEMTYPE_SYSTEM << REQ4_MEMTYPE_SHIFT)
+ | ccp_addr_hi(&op->src.u.dma);
+ cr[4] = ccp_addr_lo(&op->dst.u.dma);
+ cr[5] = (CCP_MEMTYPE_SYSTEM << REQ6_MEMTYPE_SHIFT)
+ | ccp_addr_hi(&op->dst.u.dma);
+
+ return ccp_do_cmd(op, cr, ARRAY_SIZE(cr));
+}
+
+static int ccp_trng_read(struct hwrng *rng, void *data, size_t max, bool wait)
+{
+ struct ccp_device *ccp = container_of(rng, struct ccp_device, hwrng);
+ u32 trng_value;
+ int len = min_t(int, sizeof(trng_value), max);
+
+ /*
+ * Locking is provided by the caller so we can update device
+ * hwrng-related fields safely
+ */
+ trng_value = ioread32(ccp->io_regs + TRNG_OUT_REG);
+ if (!trng_value) {
+ /* Zero is returned if not data is available or if a
+ * bad-entropy error is present. Assume an error if
+ * we exceed TRNG_RETRIES reads of zero.
+ */
+ if (ccp->hwrng_retries++ > TRNG_RETRIES)
+ return -EIO;
+
+ return 0;
+ }
+
+ /* Reset the counter and save the rng value */
+ ccp->hwrng_retries = 0;
+ memcpy(data, &trng_value, len);
+
+ return len;
+}
+
+static int ccp_init(struct ccp_device *ccp)
+{
+ struct device *dev = ccp->dev;
+ struct ccp_cmd_queue *cmd_q;
+ struct dma_pool *dma_pool;
+ char dma_pool_name[MAX_DMAPOOL_NAME_LEN];
+ unsigned int qmr, qim, i;
+ int ret;
+
+ /* Find available queues */
+ qim = 0;
+ qmr = ioread32(ccp->io_regs + Q_MASK_REG);
+ for (i = 0; i < MAX_HW_QUEUES; i++) {
+ if (!(qmr & (1 << i)))
+ continue;
+
+ /* Allocate a dma pool for this queue */
+ snprintf(dma_pool_name, sizeof(dma_pool_name), "%s_q%d",
+ ccp->name, i);
+ dma_pool = dma_pool_create(dma_pool_name, dev,
+ CCP_DMAPOOL_MAX_SIZE,
+ CCP_DMAPOOL_ALIGN, 0);
+ if (!dma_pool) {
+ dev_err(dev, "unable to allocate dma pool\n");
+ ret = -ENOMEM;
+ goto e_pool;
+ }
+
+ cmd_q = &ccp->cmd_q[ccp->cmd_q_count];
+ ccp->cmd_q_count++;
+
+ cmd_q->ccp = ccp;
+ cmd_q->id = i;
+ cmd_q->dma_pool = dma_pool;
+
+ /* Reserve 2 KSB regions for the queue */
+ cmd_q->ksb_key = KSB_START + ccp->ksb_start++;
+ cmd_q->ksb_ctx = KSB_START + ccp->ksb_start++;
+ ccp->ksb_count -= 2;
+
+ /* Preset some register values and masks that are queue
+ * number dependent
+ */
+ cmd_q->reg_status = ccp->io_regs + CMD_Q_STATUS_BASE +
+ (CMD_Q_STATUS_INCR * i);
+ cmd_q->reg_int_status = ccp->io_regs + CMD_Q_INT_STATUS_BASE +
+ (CMD_Q_STATUS_INCR * i);
+ cmd_q->int_ok = 1 << (i * 2);
+ cmd_q->int_err = 1 << ((i * 2) + 1);
+
+ cmd_q->free_slots = CMD_Q_DEPTH(ioread32(cmd_q->reg_status));
+
+ init_waitqueue_head(&cmd_q->int_queue);
+
+ /* Build queue interrupt mask (two interrupts per queue) */
+ qim |= cmd_q->int_ok | cmd_q->int_err;
+
+#ifdef CONFIG_ARM64
+ /* For arm64 set the recommended queue cache settings */
+ iowrite32(ccp->axcache, ccp->io_regs + CMD_Q_CACHE_BASE +
+ (CMD_Q_CACHE_INC * i));
+#endif
+
+ dev_dbg(dev, "queue #%u available\n", i);
+ }
+ if (ccp->cmd_q_count == 0) {
+ dev_notice(dev, "no command queues available\n");
+ ret = -EIO;
+ goto e_pool;
+ }
+ dev_notice(dev, "%u command queues available\n", ccp->cmd_q_count);
+
+ /* Disable and clear interrupts until ready */
+ iowrite32(0x00, ccp->io_regs + IRQ_MASK_REG);
+ for (i = 0; i < ccp->cmd_q_count; i++) {
+ cmd_q = &ccp->cmd_q[i];
+
+ ioread32(cmd_q->reg_int_status);
+ ioread32(cmd_q->reg_status);
+ }
+ iowrite32(qim, ccp->io_regs + IRQ_STATUS_REG);
+
+ /* Request an irq */
+ ret = ccp->get_irq(ccp);
+ if (ret) {
+ dev_err(dev, "unable to allocate an IRQ\n");
+ goto e_pool;
+ }
+
+ /* Initialize the queues used to wait for KSB space and suspend */
+ init_waitqueue_head(&ccp->ksb_queue);
+ init_waitqueue_head(&ccp->suspend_queue);
+
+ /* Create a kthread for each queue */
+ for (i = 0; i < ccp->cmd_q_count; i++) {
+ struct task_struct *kthread;
+
+ cmd_q = &ccp->cmd_q[i];
+
+ kthread = kthread_create(ccp_cmd_queue_thread, cmd_q,
+ "%s-q%u", ccp->name, cmd_q->id);
+ if (IS_ERR(kthread)) {
+ dev_err(dev, "error creating queue thread (%ld)\n",
+ PTR_ERR(kthread));
+ ret = PTR_ERR(kthread);
+ goto e_kthread;
+ }
+
+ cmd_q->kthread = kthread;
+ wake_up_process(kthread);
+ }
+
+ /* Register the RNG */
+ ccp->hwrng.name = ccp->rngname;
+ ccp->hwrng.read = ccp_trng_read;
+ ret = hwrng_register(&ccp->hwrng);
+ if (ret) {
+ dev_err(dev, "error registering hwrng (%d)\n", ret);
+ goto e_kthread;
+ }
+
+ ccp_add_device(ccp);
+
+ /* Enable interrupts */
+ iowrite32(qim, ccp->io_regs + IRQ_MASK_REG);
+
+ return 0;
+
+e_kthread:
+ for (i = 0; i < ccp->cmd_q_count; i++)
+ if (ccp->cmd_q[i].kthread)
+ kthread_stop(ccp->cmd_q[i].kthread);
+
+ ccp->free_irq(ccp);
+
+e_pool:
+ for (i = 0; i < ccp->cmd_q_count; i++)
+ dma_pool_destroy(ccp->cmd_q[i].dma_pool);
+
+ return ret;
+}
+
+static void ccp_destroy(struct ccp_device *ccp)
+{
+ struct ccp_cmd_queue *cmd_q;
+ struct ccp_cmd *cmd;
+ unsigned int qim, i;
+
+ /* Remove this device from the list of available units first */
+ ccp_del_device(ccp);
+
+ /* Unregister the RNG */
+ hwrng_unregister(&ccp->hwrng);
+
+ /* Stop the queue kthreads */
+ for (i = 0; i < ccp->cmd_q_count; i++)
+ if (ccp->cmd_q[i].kthread)
+ kthread_stop(ccp->cmd_q[i].kthread);
+
+ /* Build queue interrupt mask (two interrupt masks per queue) */
+ qim = 0;
+ for (i = 0; i < ccp->cmd_q_count; i++) {
+ cmd_q = &ccp->cmd_q[i];
+ qim |= cmd_q->int_ok | cmd_q->int_err;
+ }
+
+ /* Disable and clear interrupts */
+ iowrite32(0x00, ccp->io_regs + IRQ_MASK_REG);
+ for (i = 0; i < ccp->cmd_q_count; i++) {
+ cmd_q = &ccp->cmd_q[i];
+
+ ioread32(cmd_q->reg_int_status);
+ ioread32(cmd_q->reg_status);
+ }
+ iowrite32(qim, ccp->io_regs + IRQ_STATUS_REG);
+
+ ccp->free_irq(ccp);
+
+ for (i = 0; i < ccp->cmd_q_count; i++)
+ dma_pool_destroy(ccp->cmd_q[i].dma_pool);
+
+ /* Flush the cmd and backlog queue */
+ while (!list_empty(&ccp->cmd)) {
+ /* Invoke the callback directly with an error code */
+ cmd = list_first_entry(&ccp->cmd, struct ccp_cmd, entry);
+ list_del(&cmd->entry);
+ cmd->callback(cmd->data, -ENODEV);
+ }
+ while (!list_empty(&ccp->backlog)) {
+ /* Invoke the callback directly with an error code */
+ cmd = list_first_entry(&ccp->backlog, struct ccp_cmd, entry);
+ list_del(&cmd->entry);
+ cmd->callback(cmd->data, -ENODEV);
+ }
+}
+
+static irqreturn_t ccp_irq_handler(int irq, void *data)
+{
+ struct device *dev = data;
+ struct ccp_device *ccp = dev_get_drvdata(dev);
+ struct ccp_cmd_queue *cmd_q;
+ u32 q_int, status;
+ unsigned int i;
+
+ status = ioread32(ccp->io_regs + IRQ_STATUS_REG);
+
+ for (i = 0; i < ccp->cmd_q_count; i++) {
+ cmd_q = &ccp->cmd_q[i];
+
+ q_int = status & (cmd_q->int_ok | cmd_q->int_err);
+ if (q_int) {
+ cmd_q->int_status = status;
+ cmd_q->q_status = ioread32(cmd_q->reg_status);
+ cmd_q->q_int_status = ioread32(cmd_q->reg_int_status);
+
+ /* On error, only save the first error value */
+ if ((q_int & cmd_q->int_err) && !cmd_q->cmd_error)
+ cmd_q->cmd_error = CMD_Q_ERROR(cmd_q->q_status);
+
+ cmd_q->int_rcvd = 1;
+
+ /* Acknowledge the interrupt and wake the kthread */
+ iowrite32(q_int, ccp->io_regs + IRQ_STATUS_REG);
+ wake_up_interruptible(&cmd_q->int_queue);
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+static struct ccp_actions ccp3_actions = {
+ .perform_aes = ccp_perform_aes,
+ .perform_xts_aes = ccp_perform_xts_aes,
+ .perform_sha = ccp_perform_sha,
+ .perform_rsa = ccp_perform_rsa,
+ .perform_passthru = ccp_perform_passthru,
+ .perform_ecc = ccp_perform_ecc,
+ .init = ccp_init,
+ .destroy = ccp_destroy,
+ .irqhandler = ccp_irq_handler,
+};
+
+struct ccp_vdata ccpv3 = {
+ .version = CCP_VERSION(3, 0),
+ .perform = &ccp3_actions,
+};
diff --git a/drivers/crypto/ccp/ccp-dev.c b/drivers/crypto/ccp/ccp-dev.c
index 861bacc1bb94..4dbc18727235 100644
--- a/drivers/crypto/ccp/ccp-dev.c
+++ b/drivers/crypto/ccp/ccp-dev.c
@@ -1,7 +1,7 @@
/*
* AMD Cryptographic Coprocessor (CCP) driver
*
- * Copyright (C) 2013 Advanced Micro Devices, Inc.
+ * Copyright (C) 2013,2016 Advanced Micro Devices, Inc.
*
* Author: Tom Lendacky <thomas.lendacky@amd.com>
*
@@ -16,6 +16,8 @@
#include <linux/sched.h>
#include <linux/interrupt.h>
#include <linux/spinlock.h>
+#include <linux/rwlock_types.h>
+#include <linux/types.h>
#include <linux/mutex.h>
#include <linux/delay.h>
#include <linux/hw_random.h>
@@ -37,20 +39,107 @@ struct ccp_tasklet_data {
struct ccp_cmd *cmd;
};
-static struct ccp_device *ccp_dev;
-static inline struct ccp_device *ccp_get_device(void)
+/* List of CCPs, CCP count, read-write access lock, and access functions
+ *
+ * Lock structure: get ccp_unit_lock for reading whenever we need to
+ * examine the CCP list. While holding it for reading we can acquire
+ * the RR lock to update the round-robin next-CCP pointer. The unit lock
+ * must be acquired before the RR lock.
+ *
+ * If the unit-lock is acquired for writing, we have total control over
+ * the list, so there's no value in getting the RR lock.
+ */
+static DEFINE_RWLOCK(ccp_unit_lock);
+static LIST_HEAD(ccp_units);
+
+/* Round-robin counter */
+static DEFINE_SPINLOCK(ccp_rr_lock);
+static struct ccp_device *ccp_rr;
+
+/* Ever-increasing value to produce unique unit numbers */
+static atomic_t ccp_unit_ordinal;
+unsigned int ccp_increment_unit_ordinal(void)
{
- return ccp_dev;
+ return atomic_inc_return(&ccp_unit_ordinal);
}
-static inline void ccp_add_device(struct ccp_device *ccp)
+/**
+ * ccp_add_device - add a CCP device to the list
+ *
+ * @ccp: ccp_device struct pointer
+ *
+ * Put this CCP on the unit list, which makes it available
+ * for use.
+ *
+ * Returns zero if a CCP device is present, -ENODEV otherwise.
+ */
+void ccp_add_device(struct ccp_device *ccp)
{
- ccp_dev = ccp;
+ unsigned long flags;
+
+ write_lock_irqsave(&ccp_unit_lock, flags);
+ list_add_tail(&ccp->entry, &ccp_units);
+ if (!ccp_rr)
+ /* We already have the list lock (we're first) so this
+ * pointer can't change on us. Set its initial value.
+ */
+ ccp_rr = ccp;
+ write_unlock_irqrestore(&ccp_unit_lock, flags);
}
-static inline void ccp_del_device(struct ccp_device *ccp)
+/**
+ * ccp_del_device - remove a CCP device from the list
+ *
+ * @ccp: ccp_device struct pointer
+ *
+ * Remove this unit from the list of devices. If the next device
+ * up for use is this one, adjust the pointer. If this is the last
+ * device, NULL the pointer.
+ */
+void ccp_del_device(struct ccp_device *ccp)
{
- ccp_dev = NULL;
+ unsigned long flags;
+
+ write_lock_irqsave(&ccp_unit_lock, flags);
+ if (ccp_rr == ccp) {
+ /* ccp_unit_lock is read/write; any read access
+ * will be suspended while we make changes to the
+ * list and RR pointer.
+ */
+ if (list_is_last(&ccp_rr->entry, &ccp_units))
+ ccp_rr = list_first_entry(&ccp_units, struct ccp_device,
+ entry);
+ else
+ ccp_rr = list_next_entry(ccp_rr, entry);
+ }
+ list_del(&ccp->entry);
+ if (list_empty(&ccp_units))
+ ccp_rr = NULL;
+ write_unlock_irqrestore(&ccp_unit_lock, flags);
+}
+
+static struct ccp_device *ccp_get_device(void)
+{
+ unsigned long flags;
+ struct ccp_device *dp = NULL;
+
+ /* We round-robin through the unit list.
+ * The (ccp_rr) pointer refers to the next unit to use.
+ */
+ read_lock_irqsave(&ccp_unit_lock, flags);
+ if (!list_empty(&ccp_units)) {
+ spin_lock(&ccp_rr_lock);
+ dp = ccp_rr;
+ if (list_is_last(&ccp_rr->entry, &ccp_units))
+ ccp_rr = list_first_entry(&ccp_units, struct ccp_device,
+ entry);
+ else
+ ccp_rr = list_next_entry(ccp_rr, entry);
+ spin_unlock(&ccp_rr_lock);
+ }
+ read_unlock_irqrestore(&ccp_unit_lock, flags);
+
+ return dp;
}
/**
@@ -60,14 +149,41 @@ static inline void ccp_del_device(struct ccp_device *ccp)
*/
int ccp_present(void)
{
- if (ccp_get_device())
- return 0;
+ unsigned long flags;
+ int ret;
- return -ENODEV;
+ read_lock_irqsave(&ccp_unit_lock, flags);
+ ret = list_empty(&ccp_units);
+ read_unlock_irqrestore(&ccp_unit_lock, flags);
+
+ return ret ? -ENODEV : 0;
}
EXPORT_SYMBOL_GPL(ccp_present);
/**
+ * ccp_version - get the version of the CCP device
+ *
+ * Returns the version from the first unit on the list;
+ * otherwise a zero if no CCP device is present
+ */
+unsigned int ccp_version(void)
+{
+ struct ccp_device *dp;
+ unsigned long flags;
+ int ret = 0;
+
+ read_lock_irqsave(&ccp_unit_lock, flags);
+ if (!list_empty(&ccp_units)) {
+ dp = list_first_entry(&ccp_units, struct ccp_device, entry);
+ ret = dp->vdata->version;
+ }
+ read_unlock_irqrestore(&ccp_unit_lock, flags);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(ccp_version);
+
+/**
* ccp_enqueue_cmd - queue an operation for processing by the CCP
*
* @cmd: ccp_cmd struct to be processed
@@ -221,7 +337,12 @@ static void ccp_do_cmd_complete(unsigned long data)
complete(&tdata->completion);
}
-static int ccp_cmd_queue_thread(void *data)
+/**
+ * ccp_cmd_queue_thread - create a kernel thread to manage a CCP queue
+ *
+ * @data: thread-specific data
+ */
+int ccp_cmd_queue_thread(void *data)
{
struct ccp_cmd_queue *cmd_q = (struct ccp_cmd_queue *)data;
struct ccp_cmd *cmd;
@@ -257,35 +378,6 @@ static int ccp_cmd_queue_thread(void *data)
return 0;
}
-static int ccp_trng_read(struct hwrng *rng, void *data, size_t max, bool wait)
-{
- struct ccp_device *ccp = container_of(rng, struct ccp_device, hwrng);
- u32 trng_value;
- int len = min_t(int, sizeof(trng_value), max);
-
- /*
- * Locking is provided by the caller so we can update device
- * hwrng-related fields safely
- */
- trng_value = ioread32(ccp->io_regs + TRNG_OUT_REG);
- if (!trng_value) {
- /* Zero is returned if not data is available or if a
- * bad-entropy error is present. Assume an error if
- * we exceed TRNG_RETRIES reads of zero.
- */
- if (ccp->hwrng_retries++ > TRNG_RETRIES)
- return -EIO;
-
- return 0;
- }
-
- /* Reset the counter and save the rng value */
- ccp->hwrng_retries = 0;
- memcpy(data, &trng_value, len);
-
- return len;
-}
-
/**
* ccp_alloc_struct - allocate and initialize the ccp_device struct
*
@@ -309,253 +401,11 @@ struct ccp_device *ccp_alloc_struct(struct device *dev)
ccp->ksb_count = KSB_COUNT;
ccp->ksb_start = 0;
- return ccp;
-}
-
-/**
- * ccp_init - initialize the CCP device
- *
- * @ccp: ccp_device struct
- */
-int ccp_init(struct ccp_device *ccp)
-{
- struct device *dev = ccp->dev;
- struct ccp_cmd_queue *cmd_q;
- struct dma_pool *dma_pool;
- char dma_pool_name[MAX_DMAPOOL_NAME_LEN];
- unsigned int qmr, qim, i;
- int ret;
-
- /* Find available queues */
- qim = 0;
- qmr = ioread32(ccp->io_regs + Q_MASK_REG);
- for (i = 0; i < MAX_HW_QUEUES; i++) {
- if (!(qmr & (1 << i)))
- continue;
-
- /* Allocate a dma pool for this queue */
- snprintf(dma_pool_name, sizeof(dma_pool_name), "ccp_q%d", i);
- dma_pool = dma_pool_create(dma_pool_name, dev,
- CCP_DMAPOOL_MAX_SIZE,
- CCP_DMAPOOL_ALIGN, 0);
- if (!dma_pool) {
- dev_err(dev, "unable to allocate dma pool\n");
- ret = -ENOMEM;
- goto e_pool;
- }
-
- cmd_q = &ccp->cmd_q[ccp->cmd_q_count];
- ccp->cmd_q_count++;
-
- cmd_q->ccp = ccp;
- cmd_q->id = i;
- cmd_q->dma_pool = dma_pool;
-
- /* Reserve 2 KSB regions for the queue */
- cmd_q->ksb_key = KSB_START + ccp->ksb_start++;
- cmd_q->ksb_ctx = KSB_START + ccp->ksb_start++;
- ccp->ksb_count -= 2;
-
- /* Preset some register values and masks that are queue
- * number dependent
- */
- cmd_q->reg_status = ccp->io_regs + CMD_Q_STATUS_BASE +
- (CMD_Q_STATUS_INCR * i);
- cmd_q->reg_int_status = ccp->io_regs + CMD_Q_INT_STATUS_BASE +
- (CMD_Q_STATUS_INCR * i);
- cmd_q->int_ok = 1 << (i * 2);
- cmd_q->int_err = 1 << ((i * 2) + 1);
-
- cmd_q->free_slots = CMD_Q_DEPTH(ioread32(cmd_q->reg_status));
-
- init_waitqueue_head(&cmd_q->int_queue);
-
- /* Build queue interrupt mask (two interrupts per queue) */
- qim |= cmd_q->int_ok | cmd_q->int_err;
+ ccp->ord = ccp_increment_unit_ordinal();
+ snprintf(ccp->name, MAX_CCP_NAME_LEN, "ccp-%u", ccp->ord);
+ snprintf(ccp->rngname, MAX_CCP_NAME_LEN, "ccp-%u-rng", ccp->ord);
-#ifdef CONFIG_ARM64
- /* For arm64 set the recommended queue cache settings */
- iowrite32(ccp->axcache, ccp->io_regs + CMD_Q_CACHE_BASE +
- (CMD_Q_CACHE_INC * i));
-#endif
-
- dev_dbg(dev, "queue #%u available\n", i);
- }
- if (ccp->cmd_q_count == 0) {
- dev_notice(dev, "no command queues available\n");
- ret = -EIO;
- goto e_pool;
- }
- dev_notice(dev, "%u command queues available\n", ccp->cmd_q_count);
-
- /* Disable and clear interrupts until ready */
- iowrite32(0x00, ccp->io_regs + IRQ_MASK_REG);
- for (i = 0; i < ccp->cmd_q_count; i++) {
- cmd_q = &ccp->cmd_q[i];
-
- ioread32(cmd_q->reg_int_status);
- ioread32(cmd_q->reg_status);
- }
- iowrite32(qim, ccp->io_regs + IRQ_STATUS_REG);
-
- /* Request an irq */
- ret = ccp->get_irq(ccp);
- if (ret) {
- dev_err(dev, "unable to allocate an IRQ\n");
- goto e_pool;
- }
-
- /* Initialize the queues used to wait for KSB space and suspend */
- init_waitqueue_head(&ccp->ksb_queue);
- init_waitqueue_head(&ccp->suspend_queue);
-
- /* Create a kthread for each queue */
- for (i = 0; i < ccp->cmd_q_count; i++) {
- struct task_struct *kthread;
-
- cmd_q = &ccp->cmd_q[i];
-
- kthread = kthread_create(ccp_cmd_queue_thread, cmd_q,
- "ccp-q%u", cmd_q->id);
- if (IS_ERR(kthread)) {
- dev_err(dev, "error creating queue thread (%ld)\n",
- PTR_ERR(kthread));
- ret = PTR_ERR(kthread);
- goto e_kthread;
- }
-
- cmd_q->kthread = kthread;
- wake_up_process(kthread);
- }
-
- /* Register the RNG */
- ccp->hwrng.name = "ccp-rng";
- ccp->hwrng.read = ccp_trng_read;
- ret = hwrng_register(&ccp->hwrng);
- if (ret) {
- dev_err(dev, "error registering hwrng (%d)\n", ret);
- goto e_kthread;
- }
-
- /* Make the device struct available before enabling interrupts */
- ccp_add_device(ccp);
-
- /* Enable interrupts */
- iowrite32(qim, ccp->io_regs + IRQ_MASK_REG);
-
- return 0;
-
-e_kthread:
- for (i = 0; i < ccp->cmd_q_count; i++)
- if (ccp->cmd_q[i].kthread)
- kthread_stop(ccp->cmd_q[i].kthread);
-
- ccp->free_irq(ccp);
-
-e_pool:
- for (i = 0; i < ccp->cmd_q_count; i++)
- dma_pool_destroy(ccp->cmd_q[i].dma_pool);
-
- return ret;
-}
-
-/**
- * ccp_destroy - tear down the CCP device
- *
- * @ccp: ccp_device struct
- */
-void ccp_destroy(struct ccp_device *ccp)
-{
- struct ccp_cmd_queue *cmd_q;
- struct ccp_cmd *cmd;
- unsigned int qim, i;
-
- /* Remove general access to the device struct */
- ccp_del_device(ccp);
-
- /* Unregister the RNG */
- hwrng_unregister(&ccp->hwrng);
-
- /* Stop the queue kthreads */
- for (i = 0; i < ccp->cmd_q_count; i++)
- if (ccp->cmd_q[i].kthread)
- kthread_stop(ccp->cmd_q[i].kthread);
-
- /* Build queue interrupt mask (two interrupt masks per queue) */
- qim = 0;
- for (i = 0; i < ccp->cmd_q_count; i++) {
- cmd_q = &ccp->cmd_q[i];
- qim |= cmd_q->int_ok | cmd_q->int_err;
- }
-
- /* Disable and clear interrupts */
- iowrite32(0x00, ccp->io_regs + IRQ_MASK_REG);
- for (i = 0; i < ccp->cmd_q_count; i++) {
- cmd_q = &ccp->cmd_q[i];
-
- ioread32(cmd_q->reg_int_status);
- ioread32(cmd_q->reg_status);
- }
- iowrite32(qim, ccp->io_regs + IRQ_STATUS_REG);
-
- ccp->free_irq(ccp);
-
- for (i = 0; i < ccp->cmd_q_count; i++)
- dma_pool_destroy(ccp->cmd_q[i].dma_pool);
-
- /* Flush the cmd and backlog queue */
- while (!list_empty(&ccp->cmd)) {
- /* Invoke the callback directly with an error code */
- cmd = list_first_entry(&ccp->cmd, struct ccp_cmd, entry);
- list_del(&cmd->entry);
- cmd->callback(cmd->data, -ENODEV);
- }
- while (!list_empty(&ccp->backlog)) {
- /* Invoke the callback directly with an error code */
- cmd = list_first_entry(&ccp->backlog, struct ccp_cmd, entry);
- list_del(&cmd->entry);
- cmd->callback(cmd->data, -ENODEV);
- }
-}
-
-/**
- * ccp_irq_handler - handle interrupts generated by the CCP device
- *
- * @irq: the irq associated with the interrupt
- * @data: the data value supplied when the irq was created
- */
-irqreturn_t ccp_irq_handler(int irq, void *data)
-{
- struct device *dev = data;
- struct ccp_device *ccp = dev_get_drvdata(dev);
- struct ccp_cmd_queue *cmd_q;
- u32 q_int, status;
- unsigned int i;
-
- status = ioread32(ccp->io_regs + IRQ_STATUS_REG);
-
- for (i = 0; i < ccp->cmd_q_count; i++) {
- cmd_q = &ccp->cmd_q[i];
-
- q_int = status & (cmd_q->int_ok | cmd_q->int_err);
- if (q_int) {
- cmd_q->int_status = status;
- cmd_q->q_status = ioread32(cmd_q->reg_status);
- cmd_q->q_int_status = ioread32(cmd_q->reg_int_status);
-
- /* On error, only save the first error value */
- if ((q_int & cmd_q->int_err) && !cmd_q->cmd_error)
- cmd_q->cmd_error = CMD_Q_ERROR(cmd_q->q_status);
-
- cmd_q->int_rcvd = 1;
-
- /* Acknowledge the interrupt and wake the kthread */
- iowrite32(q_int, ccp->io_regs + IRQ_STATUS_REG);
- wake_up_interruptible(&cmd_q->int_queue);
- }
- }
-
- return IRQ_HANDLED;
+ return ccp;
}
#ifdef CONFIG_PM
@@ -577,41 +427,22 @@ bool ccp_queues_suspended(struct ccp_device *ccp)
}
#endif
-#ifdef CONFIG_X86
-static const struct x86_cpu_id ccp_support[] = {
- { X86_VENDOR_AMD, 22, },
- { },
-};
-#endif
-
static int __init ccp_mod_init(void)
{
#ifdef CONFIG_X86
- struct cpuinfo_x86 *cpuinfo = &boot_cpu_data;
int ret;
- if (!x86_match_cpu(ccp_support))
- return -ENODEV;
-
- switch (cpuinfo->x86) {
- case 22:
- if ((cpuinfo->x86_model < 48) || (cpuinfo->x86_model > 63))
- return -ENODEV;
-
- ret = ccp_pci_init();
- if (ret)
- return ret;
-
- /* Don't leave the driver loaded if init failed */
- if (!ccp_get_device()) {
- ccp_pci_exit();
- return -ENODEV;
- }
-
- return 0;
+ ret = ccp_pci_init();
+ if (ret)
+ return ret;
- break;
+ /* Don't leave the driver loaded if init failed */
+ if (ccp_present() != 0) {
+ ccp_pci_exit();
+ return -ENODEV;
}
+
+ return 0;
#endif
#ifdef CONFIG_ARM64
@@ -622,7 +453,7 @@ static int __init ccp_mod_init(void)
return ret;
/* Don't leave the driver loaded if init failed */
- if (!ccp_get_device()) {
+ if (ccp_present() != 0) {
ccp_platform_exit();
return -ENODEV;
}
@@ -636,13 +467,7 @@ static int __init ccp_mod_init(void)
static void __exit ccp_mod_exit(void)
{
#ifdef CONFIG_X86
- struct cpuinfo_x86 *cpuinfo = &boot_cpu_data;
-
- switch (cpuinfo->x86) {
- case 22:
- ccp_pci_exit();
- break;
- }
+ ccp_pci_exit();
#endif
#ifdef CONFIG_ARM64
diff --git a/drivers/crypto/ccp/ccp-dev.h b/drivers/crypto/ccp/ccp-dev.h
index 6ff89031fb96..7745d0be491d 100644
--- a/drivers/crypto/ccp/ccp-dev.h
+++ b/drivers/crypto/ccp/ccp-dev.h
@@ -1,7 +1,7 @@
/*
* AMD Cryptographic Coprocessor (CCP) driver
*
- * Copyright (C) 2013 Advanced Micro Devices, Inc.
+ * Copyright (C) 2013,2016 Advanced Micro Devices, Inc.
*
* Author: Tom Lendacky <thomas.lendacky@amd.com>
*
@@ -23,6 +23,7 @@
#include <linux/hw_random.h>
#include <linux/bitops.h>
+#define MAX_CCP_NAME_LEN 16
#define MAX_DMAPOOL_NAME_LEN 32
#define MAX_HW_QUEUES 5
@@ -140,6 +141,29 @@
#define CCP_ECC_RESULT_OFFSET 60
#define CCP_ECC_RESULT_SUCCESS 0x0001
+struct ccp_op;
+
+/* Structure for computation functions that are device-specific */
+struct ccp_actions {
+ int (*perform_aes)(struct ccp_op *);
+ int (*perform_xts_aes)(struct ccp_op *);
+ int (*perform_sha)(struct ccp_op *);
+ int (*perform_rsa)(struct ccp_op *);
+ int (*perform_passthru)(struct ccp_op *);
+ int (*perform_ecc)(struct ccp_op *);
+ int (*init)(struct ccp_device *);
+ void (*destroy)(struct ccp_device *);
+ irqreturn_t (*irqhandler)(int, void *);
+};
+
+/* Structure to hold CCP version-specific values */
+struct ccp_vdata {
+ unsigned int version;
+ struct ccp_actions *perform;
+};
+
+extern struct ccp_vdata ccpv3;
+
struct ccp_device;
struct ccp_cmd;
@@ -184,6 +208,13 @@ struct ccp_cmd_queue {
} ____cacheline_aligned;
struct ccp_device {
+ struct list_head entry;
+
+ struct ccp_vdata *vdata;
+ unsigned int ord;
+ char name[MAX_CCP_NAME_LEN];
+ char rngname[MAX_CCP_NAME_LEN];
+
struct device *dev;
/*
@@ -258,18 +289,132 @@ struct ccp_device {
unsigned int axcache;
};
+enum ccp_memtype {
+ CCP_MEMTYPE_SYSTEM = 0,
+ CCP_MEMTYPE_KSB,
+ CCP_MEMTYPE_LOCAL,
+ CCP_MEMTYPE__LAST,
+};
+
+struct ccp_dma_info {
+ dma_addr_t address;
+ unsigned int offset;
+ unsigned int length;
+ enum dma_data_direction dir;
+};
+
+struct ccp_dm_workarea {
+ struct device *dev;
+ struct dma_pool *dma_pool;
+ unsigned int length;
+
+ u8 *address;
+ struct ccp_dma_info dma;
+};
+
+struct ccp_sg_workarea {
+ struct scatterlist *sg;
+ int nents;
+
+ struct scatterlist *dma_sg;
+ struct device *dma_dev;
+ unsigned int dma_count;
+ enum dma_data_direction dma_dir;
+
+ unsigned int sg_used;
+
+ u64 bytes_left;
+};
+
+struct ccp_data {
+ struct ccp_sg_workarea sg_wa;
+ struct ccp_dm_workarea dm_wa;
+};
+
+struct ccp_mem {
+ enum ccp_memtype type;
+ union {
+ struct ccp_dma_info dma;
+ u32 ksb;
+ } u;
+};
+
+struct ccp_aes_op {
+ enum ccp_aes_type type;
+ enum ccp_aes_mode mode;
+ enum ccp_aes_action action;
+};
+
+struct ccp_xts_aes_op {
+ enum ccp_aes_action action;
+ enum ccp_xts_aes_unit_size unit_size;
+};
+
+struct ccp_sha_op {
+ enum ccp_sha_type type;
+ u64 msg_bits;
+};
+
+struct ccp_rsa_op {
+ u32 mod_size;
+ u32 input_len;
+};
+
+struct ccp_passthru_op {
+ enum ccp_passthru_bitwise bit_mod;
+ enum ccp_passthru_byteswap byte_swap;
+};
+
+struct ccp_ecc_op {
+ enum ccp_ecc_function function;
+};
+
+struct ccp_op {
+ struct ccp_cmd_queue *cmd_q;
+
+ u32 jobid;
+ u32 ioc;
+ u32 soc;
+ u32 ksb_key;
+ u32 ksb_ctx;
+ u32 init;
+ u32 eom;
+
+ struct ccp_mem src;
+ struct ccp_mem dst;
+
+ union {
+ struct ccp_aes_op aes;
+ struct ccp_xts_aes_op xts;
+ struct ccp_sha_op sha;
+ struct ccp_rsa_op rsa;
+ struct ccp_passthru_op passthru;
+ struct ccp_ecc_op ecc;
+ } u;
+};
+
+static inline u32 ccp_addr_lo(struct ccp_dma_info *info)
+{
+ return lower_32_bits(info->address + info->offset);
+}
+
+static inline u32 ccp_addr_hi(struct ccp_dma_info *info)
+{
+ return upper_32_bits(info->address + info->offset) & 0x0000ffff;
+}
+
int ccp_pci_init(void);
void ccp_pci_exit(void);
int ccp_platform_init(void);
void ccp_platform_exit(void);
+void ccp_add_device(struct ccp_device *ccp);
+void ccp_del_device(struct ccp_device *ccp);
+
struct ccp_device *ccp_alloc_struct(struct device *dev);
-int ccp_init(struct ccp_device *ccp);
-void ccp_destroy(struct ccp_device *ccp);
bool ccp_queues_suspended(struct ccp_device *ccp);
-
-irqreturn_t ccp_irq_handler(int irq, void *data);
+int ccp_cmd_queue_thread(void *data);
int ccp_run_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd);
diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c
index 6613aee79b87..eefdf595f758 100644
--- a/drivers/crypto/ccp/ccp-ops.c
+++ b/drivers/crypto/ccp/ccp-ops.c
@@ -1,7 +1,7 @@
/*
* AMD Cryptographic Coprocessor (CCP) driver
*
- * Copyright (C) 2013 Advanced Micro Devices, Inc.
+ * Copyright (C) 2013,2016 Advanced Micro Devices, Inc.
*
* Author: Tom Lendacky <thomas.lendacky@amd.com>
*
@@ -13,124 +13,12 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/pci.h>
-#include <linux/pci_ids.h>
-#include <linux/kthread.h>
-#include <linux/sched.h>
#include <linux/interrupt.h>
-#include <linux/spinlock.h>
-#include <linux/mutex.h>
-#include <linux/delay.h>
-#include <linux/ccp.h>
-#include <linux/scatterlist.h>
#include <crypto/scatterwalk.h>
-#include <crypto/sha.h>
+#include <linux/ccp.h>
#include "ccp-dev.h"
-enum ccp_memtype {
- CCP_MEMTYPE_SYSTEM = 0,
- CCP_MEMTYPE_KSB,
- CCP_MEMTYPE_LOCAL,
- CCP_MEMTYPE__LAST,
-};
-
-struct ccp_dma_info {
- dma_addr_t address;
- unsigned int offset;
- unsigned int length;
- enum dma_data_direction dir;
-};
-
-struct ccp_dm_workarea {
- struct device *dev;
- struct dma_pool *dma_pool;
- unsigned int length;
-
- u8 *address;
- struct ccp_dma_info dma;
-};
-
-struct ccp_sg_workarea {
- struct scatterlist *sg;
- int nents;
-
- struct scatterlist *dma_sg;
- struct device *dma_dev;
- unsigned int dma_count;
- enum dma_data_direction dma_dir;
-
- unsigned int sg_used;
-
- u64 bytes_left;
-};
-
-struct ccp_data {
- struct ccp_sg_workarea sg_wa;
- struct ccp_dm_workarea dm_wa;
-};
-
-struct ccp_mem {
- enum ccp_memtype type;
- union {
- struct ccp_dma_info dma;
- u32 ksb;
- } u;
-};
-
-struct ccp_aes_op {
- enum ccp_aes_type type;
- enum ccp_aes_mode mode;
- enum ccp_aes_action action;
-};
-
-struct ccp_xts_aes_op {
- enum ccp_aes_action action;
- enum ccp_xts_aes_unit_size unit_size;
-};
-
-struct ccp_sha_op {
- enum ccp_sha_type type;
- u64 msg_bits;
-};
-
-struct ccp_rsa_op {
- u32 mod_size;
- u32 input_len;
-};
-
-struct ccp_passthru_op {
- enum ccp_passthru_bitwise bit_mod;
- enum ccp_passthru_byteswap byte_swap;
-};
-
-struct ccp_ecc_op {
- enum ccp_ecc_function function;
-};
-
-struct ccp_op {
- struct ccp_cmd_queue *cmd_q;
-
- u32 jobid;
- u32 ioc;
- u32 soc;
- u32 ksb_key;
- u32 ksb_ctx;
- u32 init;
- u32 eom;
-
- struct ccp_mem src;
- struct ccp_mem dst;
-
- union {
- struct ccp_aes_op aes;
- struct ccp_xts_aes_op xts;
- struct ccp_sha_op sha;
- struct ccp_rsa_op rsa;
- struct ccp_passthru_op passthru;
- struct ccp_ecc_op ecc;
- } u;
-};
-
/* SHA initial context values */
static const __be32 ccp_sha1_init[CCP_SHA_CTXSIZE / sizeof(__be32)] = {
cpu_to_be32(SHA1_H0), cpu_to_be32(SHA1_H1),
@@ -152,253 +40,6 @@ static const __be32 ccp_sha256_init[CCP_SHA_CTXSIZE / sizeof(__be32)] = {
cpu_to_be32(SHA256_H6), cpu_to_be32(SHA256_H7),
};
-static u32 ccp_addr_lo(struct ccp_dma_info *info)
-{
- return lower_32_bits(info->address + info->offset);
-}
-
-static u32 ccp_addr_hi(struct ccp_dma_info *info)
-{
- return upper_32_bits(info->address + info->offset) & 0x0000ffff;
-}
-
-static int ccp_do_cmd(struct ccp_op *op, u32 *cr, unsigned int cr_count)
-{
- struct ccp_cmd_queue *cmd_q = op->cmd_q;
- struct ccp_device *ccp = cmd_q->ccp;
- void __iomem *cr_addr;
- u32 cr0, cmd;
- unsigned int i;
- int ret = 0;
-
- /* We could read a status register to see how many free slots
- * are actually available, but reading that register resets it
- * and you could lose some error information.
- */
- cmd_q->free_slots--;
-
- cr0 = (cmd_q->id << REQ0_CMD_Q_SHIFT)
- | (op->jobid << REQ0_JOBID_SHIFT)
- | REQ0_WAIT_FOR_WRITE;
-
- if (op->soc)
- cr0 |= REQ0_STOP_ON_COMPLETE
- | REQ0_INT_ON_COMPLETE;
-
- if (op->ioc || !cmd_q->free_slots)
- cr0 |= REQ0_INT_ON_COMPLETE;
-
- /* Start at CMD_REQ1 */
- cr_addr = ccp->io_regs + CMD_REQ0 + CMD_REQ_INCR;
-
- mutex_lock(&ccp->req_mutex);
-
- /* Write CMD_REQ1 through CMD_REQx first */
- for (i = 0; i < cr_count; i++, cr_addr += CMD_REQ_INCR)
- iowrite32(*(cr + i), cr_addr);
-
- /* Tell the CCP to start */
- wmb();
- iowrite32(cr0, ccp->io_regs + CMD_REQ0);
-
- mutex_unlock(&ccp->req_mutex);
-
- if (cr0 & REQ0_INT_ON_COMPLETE) {
- /* Wait for the job to complete */
- ret = wait_event_interruptible(cmd_q->int_queue,
- cmd_q->int_rcvd);
- if (ret || cmd_q->cmd_error) {
- /* On error delete all related jobs from the queue */
- cmd = (cmd_q->id << DEL_Q_ID_SHIFT)
- | op->jobid;
-
- iowrite32(cmd, ccp->io_regs + DEL_CMD_Q_JOB);
-
- if (!ret)
- ret = -EIO;
- } else if (op->soc) {
- /* Delete just head job from the queue on SoC */
- cmd = DEL_Q_ACTIVE
- | (cmd_q->id << DEL_Q_ID_SHIFT)
- | op->jobid;
-
- iowrite32(cmd, ccp->io_regs + DEL_CMD_Q_JOB);
- }
-
- cmd_q->free_slots = CMD_Q_DEPTH(cmd_q->q_status);
-
- cmd_q->int_rcvd = 0;
- }
-
- return ret;
-}
-
-static int ccp_perform_aes(struct ccp_op *op)
-{
- u32 cr[6];
-
- /* Fill out the register contents for REQ1 through REQ6 */
- cr[0] = (CCP_ENGINE_AES << REQ1_ENGINE_SHIFT)
- | (op->u.aes.type << REQ1_AES_TYPE_SHIFT)
- | (op->u.aes.mode << REQ1_AES_MODE_SHIFT)
- | (op->u.aes.action << REQ1_AES_ACTION_SHIFT)
- | (op->ksb_key << REQ1_KEY_KSB_SHIFT);
- cr[1] = op->src.u.dma.length - 1;
- cr[2] = ccp_addr_lo(&op->src.u.dma);
- cr[3] = (op->ksb_ctx << REQ4_KSB_SHIFT)
- | (CCP_MEMTYPE_SYSTEM << REQ4_MEMTYPE_SHIFT)
- | ccp_addr_hi(&op->src.u.dma);
- cr[4] = ccp_addr_lo(&op->dst.u.dma);
- cr[5] = (CCP_MEMTYPE_SYSTEM << REQ6_MEMTYPE_SHIFT)
- | ccp_addr_hi(&op->dst.u.dma);
-
- if (op->u.aes.mode == CCP_AES_MODE_CFB)
- cr[0] |= ((0x7f) << REQ1_AES_CFB_SIZE_SHIFT);
-
- if (op->eom)
- cr[0] |= REQ1_EOM;
-
- if (op->init)
- cr[0] |= REQ1_INIT;
-
- return ccp_do_cmd(op, cr, ARRAY_SIZE(cr));
-}
-
-static int ccp_perform_xts_aes(struct ccp_op *op)
-{
- u32 cr[6];
-
- /* Fill out the register contents for REQ1 through REQ6 */
- cr[0] = (CCP_ENGINE_XTS_AES_128 << REQ1_ENGINE_SHIFT)
- | (op->u.xts.action << REQ1_AES_ACTION_SHIFT)
- | (op->u.xts.unit_size << REQ1_XTS_AES_SIZE_SHIFT)
- | (op->ksb_key << REQ1_KEY_KSB_SHIFT);
- cr[1] = op->src.u.dma.length - 1;
- cr[2] = ccp_addr_lo(&op->src.u.dma);
- cr[3] = (op->ksb_ctx << REQ4_KSB_SHIFT)
- | (CCP_MEMTYPE_SYSTEM << REQ4_MEMTYPE_SHIFT)
- | ccp_addr_hi(&op->src.u.dma);
- cr[4] = ccp_addr_lo(&op->dst.u.dma);
- cr[5] = (CCP_MEMTYPE_SYSTEM << REQ6_MEMTYPE_SHIFT)
- | ccp_addr_hi(&op->dst.u.dma);
-
- if (op->eom)
- cr[0] |= REQ1_EOM;
-
- if (op->init)
- cr[0] |= REQ1_INIT;
-
- return ccp_do_cmd(op, cr, ARRAY_SIZE(cr));
-}
-
-static int ccp_perform_sha(struct ccp_op *op)
-{
- u32 cr[6];
-
- /* Fill out the register contents for REQ1 through REQ6 */
- cr[0] = (CCP_ENGINE_SHA << REQ1_ENGINE_SHIFT)
- | (op->u.sha.type << REQ1_SHA_TYPE_SHIFT)
- | REQ1_INIT;
- cr[1] = op->src.u.dma.length - 1;
- cr[2] = ccp_addr_lo(&op->src.u.dma);
- cr[3] = (op->ksb_ctx << REQ4_KSB_SHIFT)
- | (CCP_MEMTYPE_SYSTEM << REQ4_MEMTYPE_SHIFT)
- | ccp_addr_hi(&op->src.u.dma);
-
- if (op->eom) {
- cr[0] |= REQ1_EOM;
- cr[4] = lower_32_bits(op->u.sha.msg_bits);
- cr[5] = upper_32_bits(op->u.sha.msg_bits);
- } else {
- cr[4] = 0;
- cr[5] = 0;
- }
-
- return ccp_do_cmd(op, cr, ARRAY_SIZE(cr));
-}
-
-static int ccp_perform_rsa(struct ccp_op *op)
-{
- u32 cr[6];
-
- /* Fill out the register contents for REQ1 through REQ6 */
- cr[0] = (CCP_ENGINE_RSA << REQ1_ENGINE_SHIFT)
- | (op->u.rsa.mod_size << REQ1_RSA_MOD_SIZE_SHIFT)
- | (op->ksb_key << REQ1_KEY_KSB_SHIFT)
- | REQ1_EOM;
- cr[1] = op->u.rsa.input_len - 1;
- cr[2] = ccp_addr_lo(&op->src.u.dma);
- cr[3] = (op->ksb_ctx << REQ4_KSB_SHIFT)
- | (CCP_MEMTYPE_SYSTEM << REQ4_MEMTYPE_SHIFT)
- | ccp_addr_hi(&op->src.u.dma);
- cr[4] = ccp_addr_lo(&op->dst.u.dma);
- cr[5] = (CCP_MEMTYPE_SYSTEM << REQ6_MEMTYPE_SHIFT)
- | ccp_addr_hi(&op->dst.u.dma);
-
- return ccp_do_cmd(op, cr, ARRAY_SIZE(cr));
-}
-
-static int ccp_perform_passthru(struct ccp_op *op)
-{
- u32 cr[6];
-
- /* Fill out the register contents for REQ1 through REQ6 */
- cr[0] = (CCP_ENGINE_PASSTHRU << REQ1_ENGINE_SHIFT)
- | (op->u.passthru.bit_mod << REQ1_PT_BW_SHIFT)
- | (op->u.passthru.byte_swap << REQ1_PT_BS_SHIFT);
-
- if (op->src.type == CCP_MEMTYPE_SYSTEM)
- cr[1] = op->src.u.dma.length - 1;
- else
- cr[1] = op->dst.u.dma.length - 1;
-
- if (op->src.type == CCP_MEMTYPE_SYSTEM) {
- cr[2] = ccp_addr_lo(&op->src.u.dma);
- cr[3] = (CCP_MEMTYPE_SYSTEM << REQ4_MEMTYPE_SHIFT)
- | ccp_addr_hi(&op->src.u.dma);
-
- if (op->u.passthru.bit_mod != CCP_PASSTHRU_BITWISE_NOOP)
- cr[3] |= (op->ksb_key << REQ4_KSB_SHIFT);
- } else {
- cr[2] = op->src.u.ksb * CCP_KSB_BYTES;
- cr[3] = (CCP_MEMTYPE_KSB << REQ4_MEMTYPE_SHIFT);
- }
-
- if (op->dst.type == CCP_MEMTYPE_SYSTEM) {
- cr[4] = ccp_addr_lo(&op->dst.u.dma);
- cr[5] = (CCP_MEMTYPE_SYSTEM << REQ6_MEMTYPE_SHIFT)
- | ccp_addr_hi(&op->dst.u.dma);
- } else {
- cr[4] = op->dst.u.ksb * CCP_KSB_BYTES;
- cr[5] = (CCP_MEMTYPE_KSB << REQ6_MEMTYPE_SHIFT);
- }
-
- if (op->eom)
- cr[0] |= REQ1_EOM;
-
- return ccp_do_cmd(op, cr, ARRAY_SIZE(cr));
-}
-
-static int ccp_perform_ecc(struct ccp_op *op)
-{
- u32 cr[6];
-
- /* Fill out the register contents for REQ1 through REQ6 */
- cr[0] = REQ1_ECC_AFFINE_CONVERT
- | (CCP_ENGINE_ECC << REQ1_ENGINE_SHIFT)
- | (op->u.ecc.function << REQ1_ECC_FUNCTION_SHIFT)
- | REQ1_EOM;
- cr[1] = op->src.u.dma.length - 1;
- cr[2] = ccp_addr_lo(&op->src.u.dma);
- cr[3] = (CCP_MEMTYPE_SYSTEM << REQ4_MEMTYPE_SHIFT)
- | ccp_addr_hi(&op->src.u.dma);
- cr[4] = ccp_addr_lo(&op->dst.u.dma);
- cr[5] = (CCP_MEMTYPE_SYSTEM << REQ6_MEMTYPE_SHIFT)
- | ccp_addr_hi(&op->dst.u.dma);
-
- return ccp_do_cmd(op, cr, ARRAY_SIZE(cr));
-}
-
static u32 ccp_alloc_ksb(struct ccp_device *ccp, unsigned int count)
{
int start;
@@ -837,7 +478,7 @@ static int ccp_copy_to_from_ksb(struct ccp_cmd_queue *cmd_q,
op.u.passthru.byte_swap = byte_swap;
- return ccp_perform_passthru(&op);
+ return cmd_q->ccp->vdata->perform->perform_passthru(&op);
}
static int ccp_copy_to_ksb(struct ccp_cmd_queue *cmd_q,
@@ -969,7 +610,7 @@ static int ccp_run_aes_cmac_cmd(struct ccp_cmd_queue *cmd_q,
}
}
- ret = ccp_perform_aes(&op);
+ ret = cmd_q->ccp->vdata->perform->perform_aes(&op);
if (ret) {
cmd->engine_error = cmd_q->cmd_error;
goto e_src;
@@ -1131,7 +772,7 @@ static int ccp_run_aes_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
op.soc = 1;
}
- ret = ccp_perform_aes(&op);
+ ret = cmd_q->ccp->vdata->perform->perform_aes(&op);
if (ret) {
cmd->engine_error = cmd_q->cmd_error;
goto e_dst;
@@ -1296,7 +937,7 @@ static int ccp_run_xts_aes_cmd(struct ccp_cmd_queue *cmd_q,
if (!src.sg_wa.bytes_left)
op.eom = 1;
- ret = ccp_perform_xts_aes(&op);
+ ret = cmd_q->ccp->vdata->perform->perform_xts_aes(&op);
if (ret) {
cmd->engine_error = cmd_q->cmd_error;
goto e_dst;
@@ -1453,7 +1094,7 @@ static int ccp_run_sha_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
if (sha->final && !src.sg_wa.bytes_left)
op.eom = 1;
- ret = ccp_perform_sha(&op);
+ ret = cmd_q->ccp->vdata->perform->perform_sha(&op);
if (ret) {
cmd->engine_error = cmd_q->cmd_error;
goto e_data;
@@ -1633,7 +1274,7 @@ static int ccp_run_rsa_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
op.u.rsa.mod_size = rsa->key_size;
op.u.rsa.input_len = i_len;
- ret = ccp_perform_rsa(&op);
+ ret = cmd_q->ccp->vdata->perform->perform_rsa(&op);
if (ret) {
cmd->engine_error = cmd_q->cmd_error;
goto e_dst;
@@ -1758,7 +1399,7 @@ static int ccp_run_passthru_cmd(struct ccp_cmd_queue *cmd_q,
op.dst.u.dma.offset = dst.sg_wa.sg_used;
op.dst.u.dma.length = op.src.u.dma.length;
- ret = ccp_perform_passthru(&op);
+ ret = cmd_q->ccp->vdata->perform->perform_passthru(&op);
if (ret) {
cmd->engine_error = cmd_q->cmd_error;
goto e_dst;
@@ -1870,7 +1511,7 @@ static int ccp_run_ecc_mm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
op.u.ecc.function = cmd->u.ecc.function;
- ret = ccp_perform_ecc(&op);
+ ret = cmd_q->ccp->vdata->perform->perform_ecc(&op);
if (ret) {
cmd->engine_error = cmd_q->cmd_error;
goto e_dst;
@@ -2034,7 +1675,7 @@ static int ccp_run_ecc_pm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
op.u.ecc.function = cmd->u.ecc.function;
- ret = ccp_perform_ecc(&op);
+ ret = cmd_q->ccp->vdata->perform->perform_ecc(&op);
if (ret) {
cmd->engine_error = cmd_q->cmd_error;
goto e_dst;
diff --git a/drivers/crypto/ccp/ccp-pci.c b/drivers/crypto/ccp/ccp-pci.c
index 7690467c42f8..0bf262e36b6b 100644
--- a/drivers/crypto/ccp/ccp-pci.c
+++ b/drivers/crypto/ccp/ccp-pci.c
@@ -1,7 +1,7 @@
/*
* AMD Cryptographic Coprocessor (CCP) driver
*
- * Copyright (C) 2013 Advanced Micro Devices, Inc.
+ * Copyright (C) 2013,2016 Advanced Micro Devices, Inc.
*
* Author: Tom Lendacky <thomas.lendacky@amd.com>
*
@@ -59,9 +59,11 @@ static int ccp_get_msix_irqs(struct ccp_device *ccp)
ccp_pci->msix_count = ret;
for (v = 0; v < ccp_pci->msix_count; v++) {
/* Set the interrupt names and request the irqs */
- snprintf(ccp_pci->msix[v].name, name_len, "ccp-%u", v);
+ snprintf(ccp_pci->msix[v].name, name_len, "%s-%u",
+ ccp->name, v);
ccp_pci->msix[v].vector = msix_entry[v].vector;
- ret = request_irq(ccp_pci->msix[v].vector, ccp_irq_handler,
+ ret = request_irq(ccp_pci->msix[v].vector,
+ ccp->vdata->perform->irqhandler,
0, ccp_pci->msix[v].name, dev);
if (ret) {
dev_notice(dev, "unable to allocate MSI-X IRQ (%d)\n",
@@ -94,7 +96,8 @@ static int ccp_get_msi_irq(struct ccp_device *ccp)
return ret;
ccp->irq = pdev->irq;
- ret = request_irq(ccp->irq, ccp_irq_handler, 0, "ccp", dev);
+ ret = request_irq(ccp->irq, ccp->vdata->perform->irqhandler, 0,
+ ccp->name, dev);
if (ret) {
dev_notice(dev, "unable to allocate MSI IRQ (%d)\n", ret);
goto e_msi;
@@ -179,6 +182,12 @@ static int ccp_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto e_err;
ccp->dev_specific = ccp_pci;
+ ccp->vdata = (struct ccp_vdata *)id->driver_data;
+ if (!ccp->vdata || !ccp->vdata->version) {
+ ret = -ENODEV;
+ dev_err(dev, "missing driver data\n");
+ goto e_err;
+ }
ccp->get_irq = ccp_get_irqs;
ccp->free_irq = ccp_free_irqs;
@@ -221,7 +230,7 @@ static int ccp_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
dev_set_drvdata(dev, ccp);
- ret = ccp_init(ccp);
+ ret = ccp->vdata->perform->init(ccp);
if (ret)
goto e_iomap;
@@ -251,7 +260,7 @@ static void ccp_pci_remove(struct pci_dev *pdev)
if (!ccp)
return;
- ccp_destroy(ccp);
+ ccp->vdata->perform->destroy(ccp);
pci_iounmap(pdev, ccp->io_map);
@@ -312,7 +321,7 @@ static int ccp_pci_resume(struct pci_dev *pdev)
#endif
static const struct pci_device_id ccp_pci_table[] = {
- { PCI_VDEVICE(AMD, 0x1537), },
+ { PCI_VDEVICE(AMD, 0x1537), (kernel_ulong_t)&ccpv3 },
/* Last entry must be zero */
{ 0, }
};
diff --git a/drivers/crypto/ccp/ccp-platform.c b/drivers/crypto/ccp/ccp-platform.c
index 66dd7c9d08c3..351f28d8c336 100644
--- a/drivers/crypto/ccp/ccp-platform.c
+++ b/drivers/crypto/ccp/ccp-platform.c
@@ -1,7 +1,7 @@
/*
* AMD Cryptographic Coprocessor (CCP) driver
*
- * Copyright (C) 2014 Advanced Micro Devices, Inc.
+ * Copyright (C) 2014,2016 Advanced Micro Devices, Inc.
*
* Author: Tom Lendacky <thomas.lendacky@amd.com>
*
@@ -32,6 +32,33 @@ struct ccp_platform {
int coherent;
};
+static const struct acpi_device_id ccp_acpi_match[];
+static const struct of_device_id ccp_of_match[];
+
+static struct ccp_vdata *ccp_get_of_version(struct platform_device *pdev)
+{
+#ifdef CONFIG_OF
+ const struct of_device_id *match;
+
+ match = of_match_node(ccp_of_match, pdev->dev.of_node);
+ if (match && match->data)
+ return (struct ccp_vdata *)match->data;
+#endif
+ return 0;
+}
+
+static struct ccp_vdata *ccp_get_acpi_version(struct platform_device *pdev)
+{
+#ifdef CONFIG_ACPI
+ const struct acpi_device_id *match;
+
+ match = acpi_match_device(ccp_acpi_match, &pdev->dev);
+ if (match && match->driver_data)
+ return (struct ccp_vdata *)match->driver_data;
+#endif
+ return 0;
+}
+
static int ccp_get_irq(struct ccp_device *ccp)
{
struct device *dev = ccp->dev;
@@ -43,7 +70,8 @@ static int ccp_get_irq(struct ccp_device *ccp)
return ret;
ccp->irq = ret;
- ret = request_irq(ccp->irq, ccp_irq_handler, 0, "ccp", dev);
+ ret = request_irq(ccp->irq, ccp->vdata->perform->irqhandler, 0,
+ ccp->name, dev);
if (ret) {
dev_notice(dev, "unable to allocate IRQ (%d)\n", ret);
return ret;
@@ -106,6 +134,13 @@ static int ccp_platform_probe(struct platform_device *pdev)
goto e_err;
ccp->dev_specific = ccp_platform;
+ ccp->vdata = pdev->dev.of_node ? ccp_get_of_version(pdev)
+ : ccp_get_acpi_version(pdev);
+ if (!ccp->vdata || !ccp->vdata->version) {
+ ret = -ENODEV;
+ dev_err(dev, "missing driver data\n");
+ goto e_err;
+ }
ccp->get_irq = ccp_get_irqs;
ccp->free_irq = ccp_free_irqs;
@@ -137,7 +172,7 @@ static int ccp_platform_probe(struct platform_device *pdev)
dev_set_drvdata(dev, ccp);
- ret = ccp_init(ccp);
+ ret = ccp->vdata->perform->init(ccp);
if (ret)
goto e_err;
@@ -155,7 +190,7 @@ static int ccp_platform_remove(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct ccp_device *ccp = dev_get_drvdata(dev);
- ccp_destroy(ccp);
+ ccp->vdata->perform->destroy(ccp);
dev_notice(dev, "disabled\n");
@@ -214,7 +249,7 @@ static int ccp_platform_resume(struct platform_device *pdev)
#ifdef CONFIG_ACPI
static const struct acpi_device_id ccp_acpi_match[] = {
- { "AMDI0C00", 0 },
+ { "AMDI0C00", (kernel_ulong_t)&ccpv3 },
{ },
};
MODULE_DEVICE_TABLE(acpi, ccp_acpi_match);
@@ -222,7 +257,8 @@ MODULE_DEVICE_TABLE(acpi, ccp_acpi_match);
#ifdef CONFIG_OF
static const struct of_device_id ccp_of_match[] = {
- { .compatible = "amd,ccp-seattle-v1a" },
+ { .compatible = "amd,ccp-seattle-v1a",
+ .data = (const void *)&ccpv3 },
{ },
};
MODULE_DEVICE_TABLE(of, ccp_of_match);
diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c
index e52496a172d0..2296934455fc 100644
--- a/drivers/crypto/ixp4xx_crypto.c
+++ b/drivers/crypto/ixp4xx_crypto.c
@@ -1031,6 +1031,18 @@ static int aead_perform(struct aead_request *req, int encrypt,
BUG_ON(ivsize && !req->iv);
memcpy(crypt->iv, req->iv, ivsize);
+ buf = chainup_buffers(dev, req->src, crypt->auth_len,
+ &src_hook, flags, src_direction);
+ req_ctx->src = src_hook.next;
+ crypt->src_buf = src_hook.phys_next;
+ if (!buf)
+ goto free_buf_src;
+
+ lastlen = buf->buf_len;
+ if (lastlen >= authsize)
+ crypt->icv_rev_aes = buf->phys_addr +
+ buf->buf_len - authsize;
+
req_ctx->dst = NULL;
if (req->src != req->dst) {
@@ -1055,20 +1067,6 @@ static int aead_perform(struct aead_request *req, int encrypt,
}
}
- buf = chainup_buffers(dev, req->src, crypt->auth_len,
- &src_hook, flags, src_direction);
- req_ctx->src = src_hook.next;
- crypt->src_buf = src_hook.phys_next;
- if (!buf)
- goto free_buf_src;
-
- if (!encrypt || !req_ctx->dst) {
- lastlen = buf->buf_len;
- if (lastlen >= authsize)
- crypt->icv_rev_aes = buf->phys_addr +
- buf->buf_len - authsize;
- }
-
if (unlikely(lastlen < authsize)) {
/* The 12 hmac bytes are scattered,
* we need to copy them into a safe buffer */
diff --git a/drivers/crypto/marvell/cesa.c b/drivers/crypto/marvell/cesa.c
index c0656e7f37b5..80239ae69527 100644
--- a/drivers/crypto/marvell/cesa.c
+++ b/drivers/crypto/marvell/cesa.c
@@ -420,7 +420,7 @@ static int mv_cesa_probe(struct platform_device *pdev)
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
cesa->regs = devm_ioremap_resource(dev, res);
if (IS_ERR(cesa->regs))
- return -ENOMEM;
+ return PTR_ERR(cesa->regs);
ret = mv_cesa_dev_dma_init(cesa);
if (ret)
diff --git a/drivers/crypto/marvell/cesa.h b/drivers/crypto/marvell/cesa.h
index bd985e72520b..74071e45ada0 100644
--- a/drivers/crypto/marvell/cesa.h
+++ b/drivers/crypto/marvell/cesa.h
@@ -588,6 +588,7 @@ struct mv_cesa_ahash_dma_req {
struct mv_cesa_tdma_req base;
u8 *padding;
dma_addr_t padding_dma;
+ u8 *cache;
dma_addr_t cache_dma;
};
@@ -609,7 +610,7 @@ struct mv_cesa_ahash_req {
struct mv_cesa_ahash_std_req std;
} req;
struct mv_cesa_op_ctx op_tmpl;
- u8 *cache;
+ u8 cache[CESA_MAX_HASH_BLOCK_SIZE];
unsigned int cache_ptr;
u64 len;
int src_nents;
diff --git a/drivers/crypto/marvell/hash.c b/drivers/crypto/marvell/hash.c
index 683cca9ac3c4..7ca2e0f9dc2e 100644
--- a/drivers/crypto/marvell/hash.c
+++ b/drivers/crypto/marvell/hash.c
@@ -45,69 +45,25 @@ mv_cesa_ahash_req_iter_next_op(struct mv_cesa_ahash_dma_iter *iter)
return mv_cesa_req_dma_iter_next_op(&iter->base);
}
-static inline int mv_cesa_ahash_dma_alloc_cache(struct mv_cesa_ahash_req *creq,
- gfp_t flags)
+static inline int
+mv_cesa_ahash_dma_alloc_cache(struct mv_cesa_ahash_dma_req *req, gfp_t flags)
{
- struct mv_cesa_ahash_dma_req *dreq = &creq->req.dma;
-
- creq->cache = dma_pool_alloc(cesa_dev->dma->cache_pool, flags,
- &dreq->cache_dma);
- if (!creq->cache)
- return -ENOMEM;
-
- return 0;
-}
-
-static inline int mv_cesa_ahash_std_alloc_cache(struct mv_cesa_ahash_req *creq,
- gfp_t flags)
-{
- creq->cache = kzalloc(CESA_MAX_HASH_BLOCK_SIZE, flags);
- if (!creq->cache)
+ req->cache = dma_pool_alloc(cesa_dev->dma->cache_pool, flags,
+ &req->cache_dma);
+ if (!req->cache)
return -ENOMEM;
return 0;
}
-static int mv_cesa_ahash_alloc_cache(struct ahash_request *req)
-{
- struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
- gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
- GFP_KERNEL : GFP_ATOMIC;
- int ret;
-
- if (creq->cache)
- return 0;
-
- if (creq->req.base.type == CESA_DMA_REQ)
- ret = mv_cesa_ahash_dma_alloc_cache(creq, flags);
- else
- ret = mv_cesa_ahash_std_alloc_cache(creq, flags);
-
- return ret;
-}
-
-static inline void mv_cesa_ahash_dma_free_cache(struct mv_cesa_ahash_req *creq)
-{
- dma_pool_free(cesa_dev->dma->cache_pool, creq->cache,
- creq->req.dma.cache_dma);
-}
-
-static inline void mv_cesa_ahash_std_free_cache(struct mv_cesa_ahash_req *creq)
-{
- kfree(creq->cache);
-}
-
-static void mv_cesa_ahash_free_cache(struct mv_cesa_ahash_req *creq)
+static inline void
+mv_cesa_ahash_dma_free_cache(struct mv_cesa_ahash_dma_req *req)
{
- if (!creq->cache)
+ if (!req->cache)
return;
- if (creq->req.base.type == CESA_DMA_REQ)
- mv_cesa_ahash_dma_free_cache(creq);
- else
- mv_cesa_ahash_std_free_cache(creq);
-
- creq->cache = NULL;
+ dma_pool_free(cesa_dev->dma->cache_pool, req->cache,
+ req->cache_dma);
}
static int mv_cesa_ahash_dma_alloc_padding(struct mv_cesa_ahash_dma_req *req,
@@ -146,6 +102,7 @@ static inline void mv_cesa_ahash_dma_cleanup(struct ahash_request *req)
struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents, DMA_TO_DEVICE);
+ mv_cesa_ahash_dma_free_cache(&creq->req.dma);
mv_cesa_dma_cleanup(&creq->req.dma.base);
}
@@ -161,8 +118,6 @@ static void mv_cesa_ahash_last_cleanup(struct ahash_request *req)
{
struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
- mv_cesa_ahash_free_cache(creq);
-
if (creq->req.base.type == CESA_DMA_REQ)
mv_cesa_ahash_dma_last_cleanup(req);
}
@@ -445,14 +400,6 @@ static inline int mv_cesa_ahash_cra_init(struct crypto_tfm *tfm)
static int mv_cesa_ahash_cache_req(struct ahash_request *req, bool *cached)
{
struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
- int ret;
-
- if (((creq->cache_ptr + req->nbytes) & CESA_HASH_BLOCK_SIZE_MSK) &&
- !creq->last_req) {
- ret = mv_cesa_ahash_alloc_cache(req);
- if (ret)
- return ret;
- }
if (creq->cache_ptr + req->nbytes < 64 && !creq->last_req) {
*cached = true;
@@ -505,10 +452,17 @@ mv_cesa_ahash_dma_add_cache(struct mv_cesa_tdma_chain *chain,
gfp_t flags)
{
struct mv_cesa_ahash_dma_req *ahashdreq = &creq->req.dma;
+ int ret;
if (!creq->cache_ptr)
return 0;
+ ret = mv_cesa_ahash_dma_alloc_cache(ahashdreq, flags);
+ if (ret)
+ return ret;
+
+ memcpy(ahashdreq->cache, creq->cache, creq->cache_ptr);
+
return mv_cesa_dma_add_data_transfer(chain,
CESA_SA_DATA_SRAM_OFFSET,
ahashdreq->cache_dma,
@@ -848,10 +802,6 @@ static int mv_cesa_ahash_import(struct ahash_request *req, const void *hash,
if (!cache_ptr)
return 0;
- ret = mv_cesa_ahash_alloc_cache(req);
- if (ret)
- return ret;
-
memcpy(creq->cache, cache, cache_ptr);
creq->cache_ptr = cache_ptr;
@@ -860,9 +810,14 @@ static int mv_cesa_ahash_import(struct ahash_request *req, const void *hash,
static int mv_cesa_md5_init(struct ahash_request *req)
{
+ struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
struct mv_cesa_op_ctx tmpl = { };
mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_MD5);
+ creq->state[0] = MD5_H0;
+ creq->state[1] = MD5_H1;
+ creq->state[2] = MD5_H2;
+ creq->state[3] = MD5_H3;
mv_cesa_ahash_init(req, &tmpl, true);
@@ -923,9 +878,15 @@ struct ahash_alg mv_md5_alg = {
static int mv_cesa_sha1_init(struct ahash_request *req)
{
+ struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
struct mv_cesa_op_ctx tmpl = { };
mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_SHA1);
+ creq->state[0] = SHA1_H0;
+ creq->state[1] = SHA1_H1;
+ creq->state[2] = SHA1_H2;
+ creq->state[3] = SHA1_H3;
+ creq->state[4] = SHA1_H4;
mv_cesa_ahash_init(req, &tmpl, false);
@@ -986,9 +947,18 @@ struct ahash_alg mv_sha1_alg = {
static int mv_cesa_sha256_init(struct ahash_request *req)
{
+ struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
struct mv_cesa_op_ctx tmpl = { };
mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_SHA256);
+ creq->state[0] = SHA256_H0;
+ creq->state[1] = SHA256_H1;
+ creq->state[2] = SHA256_H2;
+ creq->state[3] = SHA256_H3;
+ creq->state[4] = SHA256_H4;
+ creq->state[5] = SHA256_H5;
+ creq->state[6] = SHA256_H6;
+ creq->state[7] = SHA256_H7;
mv_cesa_ahash_init(req, &tmpl, false);
diff --git a/drivers/crypto/nx/nx-842.c b/drivers/crypto/nx/nx-842.c
index 046c1c45411b..d94e25df503b 100644
--- a/drivers/crypto/nx/nx-842.c
+++ b/drivers/crypto/nx/nx-842.c
@@ -308,7 +308,7 @@ int nx842_crypto_compress(struct crypto_tfm *tfm,
h = !n && add_header ? hdrsize : 0;
if (ignore)
- pr_warn("interal error, ignore is set %x\n", ignore);
+ pr_warn("internal error, ignore is set %x\n", ignore);
ret = compress(ctx, &p, &hdr->group[n], &c, &ignore, h);
if (ret)
diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c
index dd355bd19474..d420ec751c7c 100644
--- a/drivers/crypto/omap-aes.c
+++ b/drivers/crypto/omap-aes.c
@@ -36,6 +36,7 @@
#include <linux/interrupt.h>
#include <crypto/scatterwalk.h>
#include <crypto/aes.h>
+#include <crypto/algapi.h>
#define DST_MAXBURST 4
#define DMA_MIN (DST_MAXBURST * sizeof(u32))
@@ -152,13 +153,10 @@ struct omap_aes_dev {
unsigned long flags;
int err;
- spinlock_t lock;
- struct crypto_queue queue;
-
struct tasklet_struct done_task;
- struct tasklet_struct queue_task;
struct ablkcipher_request *req;
+ struct crypto_engine *engine;
/*
* total is used by PIO mode for book keeping so introduce
@@ -532,9 +530,7 @@ static void omap_aes_finish_req(struct omap_aes_dev *dd, int err)
pr_debug("err: %d\n", err);
- dd->flags &= ~FLAGS_BUSY;
-
- req->base.complete(&req->base, err);
+ crypto_finalize_request(dd->engine, req, err);
}
static int omap_aes_crypt_dma_stop(struct omap_aes_dev *dd)
@@ -604,34 +600,25 @@ static int omap_aes_copy_sgs(struct omap_aes_dev *dd)
}
static int omap_aes_handle_queue(struct omap_aes_dev *dd,
- struct ablkcipher_request *req)
+ struct ablkcipher_request *req)
{
- struct crypto_async_request *async_req, *backlog;
- struct omap_aes_ctx *ctx;
- struct omap_aes_reqctx *rctx;
- unsigned long flags;
- int err, ret = 0, len;
-
- spin_lock_irqsave(&dd->lock, flags);
if (req)
- ret = ablkcipher_enqueue_request(&dd->queue, req);
- if (dd->flags & FLAGS_BUSY) {
- spin_unlock_irqrestore(&dd->lock, flags);
- return ret;
- }
- backlog = crypto_get_backlog(&dd->queue);
- async_req = crypto_dequeue_request(&dd->queue);
- if (async_req)
- dd->flags |= FLAGS_BUSY;
- spin_unlock_irqrestore(&dd->lock, flags);
+ return crypto_transfer_request_to_engine(dd->engine, req);
- if (!async_req)
- return ret;
+ return 0;
+}
- if (backlog)
- backlog->complete(backlog, -EINPROGRESS);
+static int omap_aes_prepare_req(struct crypto_engine *engine,
+ struct ablkcipher_request *req)
+{
+ struct omap_aes_ctx *ctx = crypto_ablkcipher_ctx(
+ crypto_ablkcipher_reqtfm(req));
+ struct omap_aes_dev *dd = omap_aes_find_dev(ctx);
+ struct omap_aes_reqctx *rctx;
+ int len;
- req = ablkcipher_request_cast(async_req);
+ if (!dd)
+ return -ENODEV;
/* assign new request to device */
dd->req = req;
@@ -662,16 +649,20 @@ static int omap_aes_handle_queue(struct omap_aes_dev *dd,
dd->ctx = ctx;
ctx->dd = dd;
- err = omap_aes_write_ctrl(dd);
- if (!err)
- err = omap_aes_crypt_dma_start(dd);
- if (err) {
- /* aes_task will not finish it, so do it here */
- omap_aes_finish_req(dd, err);
- tasklet_schedule(&dd->queue_task);
- }
+ return omap_aes_write_ctrl(dd);
+}
- return ret; /* return ret, which is enqueue return value */
+static int omap_aes_crypt_req(struct crypto_engine *engine,
+ struct ablkcipher_request *req)
+{
+ struct omap_aes_ctx *ctx = crypto_ablkcipher_ctx(
+ crypto_ablkcipher_reqtfm(req));
+ struct omap_aes_dev *dd = omap_aes_find_dev(ctx);
+
+ if (!dd)
+ return -ENODEV;
+
+ return omap_aes_crypt_dma_start(dd);
}
static void omap_aes_done_task(unsigned long data)
@@ -704,18 +695,10 @@ static void omap_aes_done_task(unsigned long data)
}
omap_aes_finish_req(dd, 0);
- omap_aes_handle_queue(dd, NULL);
pr_debug("exit\n");
}
-static void omap_aes_queue_task(unsigned long data)
-{
- struct omap_aes_dev *dd = (struct omap_aes_dev *)data;
-
- omap_aes_handle_queue(dd, NULL);
-}
-
static int omap_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
{
struct omap_aes_ctx *ctx = crypto_ablkcipher_ctx(
@@ -1175,9 +1158,6 @@ static int omap_aes_probe(struct platform_device *pdev)
dd->dev = dev;
platform_set_drvdata(pdev, dd);
- spin_lock_init(&dd->lock);
- crypto_init_queue(&dd->queue, OMAP_AES_QUEUE_LENGTH);
-
err = (dev->of_node) ? omap_aes_get_res_of(dd, dev, &res) :
omap_aes_get_res_pdev(dd, pdev, &res);
if (err)
@@ -1209,7 +1189,6 @@ static int omap_aes_probe(struct platform_device *pdev)
(reg & dd->pdata->minor_mask) >> dd->pdata->minor_shift);
tasklet_init(&dd->done_task, omap_aes_done_task, (unsigned long)dd);
- tasklet_init(&dd->queue_task, omap_aes_queue_task, (unsigned long)dd);
err = omap_aes_dma_init(dd);
if (err && AES_REG_IRQ_STATUS(dd) && AES_REG_IRQ_ENABLE(dd)) {
@@ -1250,7 +1229,20 @@ static int omap_aes_probe(struct platform_device *pdev)
}
}
+ /* Initialize crypto engine */
+ dd->engine = crypto_engine_alloc_init(dev, 1);
+ if (!dd->engine)
+ goto err_algs;
+
+ dd->engine->prepare_request = omap_aes_prepare_req;
+ dd->engine->crypt_one_request = omap_aes_crypt_req;
+ err = crypto_engine_start(dd->engine);
+ if (err)
+ goto err_engine;
+
return 0;
+err_engine:
+ crypto_engine_exit(dd->engine);
err_algs:
for (i = dd->pdata->algs_info_size - 1; i >= 0; i--)
for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--)
@@ -1260,7 +1252,6 @@ err_algs:
omap_aes_dma_cleanup(dd);
err_irq:
tasklet_kill(&dd->done_task);
- tasklet_kill(&dd->queue_task);
pm_runtime_disable(dev);
err_res:
dd = NULL;
@@ -1286,8 +1277,8 @@ static int omap_aes_remove(struct platform_device *pdev)
crypto_unregister_alg(
&dd->pdata->algs_info[i].algs_list[j]);
+ crypto_engine_exit(dd->engine);
tasklet_kill(&dd->done_task);
- tasklet_kill(&dd->queue_task);
omap_aes_dma_cleanup(dd);
pm_runtime_disable(dd->dev);
dd = NULL;
diff --git a/drivers/crypto/qat/qat_common/adf_accel_devices.h b/drivers/crypto/qat/qat_common/adf_accel_devices.h
index f96d427e502c..5a07208ce778 100644
--- a/drivers/crypto/qat/qat_common/adf_accel_devices.h
+++ b/drivers/crypto/qat/qat_common/adf_accel_devices.h
@@ -55,8 +55,8 @@
#define ADF_DH895XCC_DEVICE_NAME "dh895xcc"
#define ADF_DH895XCCVF_DEVICE_NAME "dh895xccvf"
-#define ADF_C62X_DEVICE_NAME "c62x"
-#define ADF_C62XVF_DEVICE_NAME "c62xvf"
+#define ADF_C62X_DEVICE_NAME "c6xx"
+#define ADF_C62XVF_DEVICE_NAME "c6xxvf"
#define ADF_C3XXX_DEVICE_NAME "c3xxx"
#define ADF_C3XXXVF_DEVICE_NAME "c3xxxvf"
#define ADF_DH895XCC_PCI_DEVICE_ID 0x435
diff --git a/drivers/crypto/qat/qat_common/adf_aer.c b/drivers/crypto/qat/qat_common/adf_aer.c
index e78a1d7d88fc..b40d9c8dad96 100644
--- a/drivers/crypto/qat/qat_common/adf_aer.c
+++ b/drivers/crypto/qat/qat_common/adf_aer.c
@@ -121,7 +121,6 @@ static void adf_device_reset_worker(struct work_struct *work)
adf_dev_restarting_notify(accel_dev);
adf_dev_stop(accel_dev);
adf_dev_shutdown(accel_dev);
- adf_dev_restore(accel_dev);
if (adf_dev_init(accel_dev) || adf_dev_start(accel_dev)) {
/* The device hanged and we can't restart it so stop here */
dev_err(&GET_DEV(accel_dev), "Restart device failed\n");
diff --git a/drivers/crypto/qat/qat_common/adf_cfg_user.h b/drivers/crypto/qat/qat_common/adf_cfg_user.h
index ef5988afd4c6..b5484bfa6996 100644
--- a/drivers/crypto/qat/qat_common/adf_cfg_user.h
+++ b/drivers/crypto/qat/qat_common/adf_cfg_user.h
@@ -58,7 +58,7 @@ struct adf_user_cfg_key_val {
uint64_t padding3;
};
enum adf_cfg_val_type type;
-};
+} __packed;
struct adf_user_cfg_section {
char name[ADF_CFG_MAX_SECTION_LEN_IN_BYTES];
@@ -70,7 +70,7 @@ struct adf_user_cfg_section {
struct adf_user_cfg_section *next;
uint64_t padding3;
};
-};
+} __packed;
struct adf_user_cfg_ctl_data {
union {
@@ -78,5 +78,5 @@ struct adf_user_cfg_ctl_data {
uint64_t padding;
};
uint8_t device_id;
-};
+} __packed;
#endif
diff --git a/drivers/crypto/qat/qat_common/adf_common_drv.h b/drivers/crypto/qat/qat_common/adf_common_drv.h
index 0e82ce3c383e..976b01e58afb 100644
--- a/drivers/crypto/qat/qat_common/adf_common_drv.h
+++ b/drivers/crypto/qat/qat_common/adf_common_drv.h
@@ -236,6 +236,8 @@ void adf_enable_vf2pf_interrupts(struct adf_accel_dev *accel_dev,
uint32_t vf_mask);
void adf_enable_pf2vf_interrupts(struct adf_accel_dev *accel_dev);
void adf_disable_pf2vf_interrupts(struct adf_accel_dev *accel_dev);
+int adf_init_pf_wq(void);
+void adf_exit_pf_wq(void);
#else
static inline int adf_sriov_configure(struct pci_dev *pdev, int numvfs)
{
@@ -253,5 +255,14 @@ static inline void adf_enable_pf2vf_interrupts(struct adf_accel_dev *accel_dev)
static inline void adf_disable_pf2vf_interrupts(struct adf_accel_dev *accel_dev)
{
}
+
+static inline int adf_init_pf_wq(void)
+{
+ return 0;
+}
+
+static inline void adf_exit_pf_wq(void)
+{
+}
#endif
#endif
diff --git a/drivers/crypto/qat/qat_common/adf_ctl_drv.c b/drivers/crypto/qat/qat_common/adf_ctl_drv.c
index 5c897e6e7994..3c3f948290ca 100644
--- a/drivers/crypto/qat/qat_common/adf_ctl_drv.c
+++ b/drivers/crypto/qat/qat_common/adf_ctl_drv.c
@@ -462,12 +462,17 @@ static int __init adf_register_ctl_device_driver(void)
if (adf_init_aer())
goto err_aer;
+ if (adf_init_pf_wq())
+ goto err_pf_wq;
+
if (qat_crypto_register())
goto err_crypto_register;
return 0;
err_crypto_register:
+ adf_exit_pf_wq();
+err_pf_wq:
adf_exit_aer();
err_aer:
adf_chr_drv_destroy();
@@ -480,6 +485,7 @@ static void __exit adf_unregister_ctl_device_driver(void)
{
adf_chr_drv_destroy();
adf_exit_aer();
+ adf_exit_pf_wq();
qat_crypto_unregister();
adf_clean_vf_map(false);
mutex_destroy(&adf_ctl_lock);
diff --git a/drivers/crypto/qat/qat_common/adf_hw_arbiter.c b/drivers/crypto/qat/qat_common/adf_hw_arbiter.c
index f267d9e42e0b..d7dd18d9bef8 100644
--- a/drivers/crypto/qat/qat_common/adf_hw_arbiter.c
+++ b/drivers/crypto/qat/qat_common/adf_hw_arbiter.c
@@ -49,7 +49,6 @@
#include "adf_transport_internal.h"
#define ADF_ARB_NUM 4
-#define ADF_ARB_REQ_RING_NUM 8
#define ADF_ARB_REG_SIZE 0x4
#define ADF_ARB_WTR_SIZE 0x20
#define ADF_ARB_OFFSET 0x30000
@@ -64,15 +63,6 @@
ADF_CSR_WR(csr_addr, ADF_ARB_RINGSRVARBEN_OFFSET + \
(ADF_ARB_REG_SLOT * index), value)
-#define WRITE_CSR_ARB_RESPORDERING(csr_addr, index, value) \
- ADF_CSR_WR(csr_addr, (ADF_ARB_OFFSET + \
- ADF_ARB_RO_EN_OFFSET) + (ADF_ARB_REG_SIZE * index), value)
-
-#define WRITE_CSR_ARB_WEIGHT(csr_addr, arb, index, value) \
- ADF_CSR_WR(csr_addr, (ADF_ARB_OFFSET + \
- ADF_ARB_WTR_OFFSET) + (ADF_ARB_WTR_SIZE * arb) + \
- (ADF_ARB_REG_SIZE * index), value)
-
#define WRITE_CSR_ARB_SARCONFIG(csr_addr, index, value) \
ADF_CSR_WR(csr_addr, ADF_ARB_OFFSET + \
(ADF_ARB_REG_SIZE * index), value)
@@ -99,15 +89,6 @@ int adf_init_arb(struct adf_accel_dev *accel_dev)
for (arb = 0; arb < ADF_ARB_NUM; arb++)
WRITE_CSR_ARB_SARCONFIG(csr, arb, arb_cfg);
- /* Setup service weighting */
- for (arb = 0; arb < ADF_ARB_NUM; arb++)
- for (i = 0; i < ADF_ARB_REQ_RING_NUM; i++)
- WRITE_CSR_ARB_WEIGHT(csr, arb, i, 0xFFFFFFFF);
-
- /* Setup ring response ordering */
- for (i = 0; i < ADF_ARB_REQ_RING_NUM; i++)
- WRITE_CSR_ARB_RESPORDERING(csr, i, 0xFFFFFFFF);
-
/* Setup worker queue registers */
for (i = 0; i < hw_data->num_engines; i++)
WRITE_CSR_ARB_WQCFG(csr, i, i);
diff --git a/drivers/crypto/qat/qat_common/adf_sriov.c b/drivers/crypto/qat/qat_common/adf_sriov.c
index 1117a8b58280..38a0415e767d 100644
--- a/drivers/crypto/qat/qat_common/adf_sriov.c
+++ b/drivers/crypto/qat/qat_common/adf_sriov.c
@@ -119,11 +119,6 @@ static int adf_enable_sriov(struct adf_accel_dev *accel_dev)
int i;
u32 reg;
- /* Workqueue for PF2VF responses */
- pf2vf_resp_wq = create_workqueue("qat_pf2vf_resp_wq");
- if (!pf2vf_resp_wq)
- return -ENOMEM;
-
for (i = 0, vf_info = accel_dev->pf.vf_info; i < totalvfs;
i++, vf_info++) {
/* This ptr will be populated when VFs will be created */
@@ -216,11 +211,6 @@ void adf_disable_sriov(struct adf_accel_dev *accel_dev)
kfree(accel_dev->pf.vf_info);
accel_dev->pf.vf_info = NULL;
-
- if (pf2vf_resp_wq) {
- destroy_workqueue(pf2vf_resp_wq);
- pf2vf_resp_wq = NULL;
- }
}
EXPORT_SYMBOL_GPL(adf_disable_sriov);
@@ -304,3 +294,19 @@ int adf_sriov_configure(struct pci_dev *pdev, int numvfs)
return numvfs;
}
EXPORT_SYMBOL_GPL(adf_sriov_configure);
+
+int __init adf_init_pf_wq(void)
+{
+ /* Workqueue for PF2VF responses */
+ pf2vf_resp_wq = create_workqueue("qat_pf2vf_resp_wq");
+
+ return !pf2vf_resp_wq ? -ENOMEM : 0;
+}
+
+void adf_exit_pf_wq(void)
+{
+ if (pf2vf_resp_wq) {
+ destroy_workqueue(pf2vf_resp_wq);
+ pf2vf_resp_wq = NULL;
+ }
+}
diff --git a/drivers/crypto/qat/qat_common/icp_qat_uclo.h b/drivers/crypto/qat/qat_common/icp_qat_uclo.h
index d97db990955d..5d1ee7e53492 100644
--- a/drivers/crypto/qat/qat_common/icp_qat_uclo.h
+++ b/drivers/crypto/qat/qat_common/icp_qat_uclo.h
@@ -112,27 +112,27 @@ enum icp_qat_uof_mem_region {
};
enum icp_qat_uof_regtype {
- ICP_NO_DEST,
- ICP_GPA_REL,
- ICP_GPA_ABS,
- ICP_GPB_REL,
- ICP_GPB_ABS,
- ICP_SR_REL,
- ICP_SR_RD_REL,
- ICP_SR_WR_REL,
- ICP_SR_ABS,
- ICP_SR_RD_ABS,
- ICP_SR_WR_ABS,
- ICP_DR_REL,
- ICP_DR_RD_REL,
- ICP_DR_WR_REL,
- ICP_DR_ABS,
- ICP_DR_RD_ABS,
- ICP_DR_WR_ABS,
- ICP_LMEM,
- ICP_LMEM0,
- ICP_LMEM1,
- ICP_NEIGH_REL,
+ ICP_NO_DEST = 0,
+ ICP_GPA_REL = 1,
+ ICP_GPA_ABS = 2,
+ ICP_GPB_REL = 3,
+ ICP_GPB_ABS = 4,
+ ICP_SR_REL = 5,
+ ICP_SR_RD_REL = 6,
+ ICP_SR_WR_REL = 7,
+ ICP_SR_ABS = 8,
+ ICP_SR_RD_ABS = 9,
+ ICP_SR_WR_ABS = 10,
+ ICP_DR_REL = 19,
+ ICP_DR_RD_REL = 20,
+ ICP_DR_WR_REL = 21,
+ ICP_DR_ABS = 22,
+ ICP_DR_RD_ABS = 23,
+ ICP_DR_WR_ABS = 24,
+ ICP_LMEM = 26,
+ ICP_LMEM0 = 27,
+ ICP_LMEM1 = 28,
+ ICP_NEIGH_REL = 31,
};
enum icp_qat_css_fwtype {
diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c
index 59e4c3af15ed..1e8852a8a057 100644
--- a/drivers/crypto/qat/qat_common/qat_algs.c
+++ b/drivers/crypto/qat/qat_common/qat_algs.c
@@ -1064,8 +1064,7 @@ static int qat_alg_aead_init(struct crypto_aead *tfm,
if (IS_ERR(ctx->hash_tfm))
return PTR_ERR(ctx->hash_tfm);
ctx->qat_hash_alg = hash;
- crypto_aead_set_reqsize(tfm, sizeof(struct aead_request) +
- sizeof(struct qat_crypto_request));
+ crypto_aead_set_reqsize(tfm, sizeof(struct qat_crypto_request));
return 0;
}
@@ -1114,8 +1113,7 @@ static int qat_alg_ablkcipher_init(struct crypto_tfm *tfm)
struct qat_alg_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
spin_lock_init(&ctx->lock);
- tfm->crt_ablkcipher.reqsize = sizeof(struct ablkcipher_request) +
- sizeof(struct qat_crypto_request);
+ tfm->crt_ablkcipher.reqsize = sizeof(struct qat_crypto_request);
ctx->tfm = tfm;
return 0;
}
diff --git a/drivers/crypto/qat/qat_common/qat_asym_algs.c b/drivers/crypto/qat/qat_common/qat_asym_algs.c
index 51c594fdacdc..e5c0727d4876 100644
--- a/drivers/crypto/qat/qat_common/qat_asym_algs.c
+++ b/drivers/crypto/qat/qat_common/qat_asym_algs.c
@@ -340,14 +340,16 @@ static int qat_rsa_enc(struct akcipher_request *req)
if (!ret)
return -EINPROGRESS;
-unmap_src:
- if (qat_req->src_align)
- dma_free_coherent(dev, ctx->key_sz, qat_req->src_align,
- qat_req->in.enc.m);
- else
- if (!dma_mapping_error(dev, qat_req->in.enc.m))
- dma_unmap_single(dev, qat_req->in.enc.m, ctx->key_sz,
- DMA_TO_DEVICE);
+
+ if (!dma_mapping_error(dev, qat_req->phy_out))
+ dma_unmap_single(dev, qat_req->phy_out,
+ sizeof(struct qat_rsa_output_params),
+ DMA_TO_DEVICE);
+unmap_in_params:
+ if (!dma_mapping_error(dev, qat_req->phy_in))
+ dma_unmap_single(dev, qat_req->phy_in,
+ sizeof(struct qat_rsa_input_params),
+ DMA_TO_DEVICE);
unmap_dst:
if (qat_req->dst_align)
dma_free_coherent(dev, ctx->key_sz, qat_req->dst_align,
@@ -356,15 +358,14 @@ unmap_dst:
if (!dma_mapping_error(dev, qat_req->out.enc.c))
dma_unmap_single(dev, qat_req->out.enc.c, ctx->key_sz,
DMA_FROM_DEVICE);
-unmap_in_params:
- if (!dma_mapping_error(dev, qat_req->phy_in))
- dma_unmap_single(dev, qat_req->phy_in,
- sizeof(struct qat_rsa_input_params),
- DMA_TO_DEVICE);
- if (!dma_mapping_error(dev, qat_req->phy_out))
- dma_unmap_single(dev, qat_req->phy_out,
- sizeof(struct qat_rsa_output_params),
- DMA_TO_DEVICE);
+unmap_src:
+ if (qat_req->src_align)
+ dma_free_coherent(dev, ctx->key_sz, qat_req->src_align,
+ qat_req->in.enc.m);
+ else
+ if (!dma_mapping_error(dev, qat_req->in.enc.m))
+ dma_unmap_single(dev, qat_req->in.enc.m, ctx->key_sz,
+ DMA_TO_DEVICE);
return ret;
}
@@ -472,14 +473,16 @@ static int qat_rsa_dec(struct akcipher_request *req)
if (!ret)
return -EINPROGRESS;
-unmap_src:
- if (qat_req->src_align)
- dma_free_coherent(dev, ctx->key_sz, qat_req->src_align,
- qat_req->in.dec.c);
- else
- if (!dma_mapping_error(dev, qat_req->in.dec.c))
- dma_unmap_single(dev, qat_req->in.dec.c, ctx->key_sz,
- DMA_TO_DEVICE);
+
+ if (!dma_mapping_error(dev, qat_req->phy_out))
+ dma_unmap_single(dev, qat_req->phy_out,
+ sizeof(struct qat_rsa_output_params),
+ DMA_TO_DEVICE);
+unmap_in_params:
+ if (!dma_mapping_error(dev, qat_req->phy_in))
+ dma_unmap_single(dev, qat_req->phy_in,
+ sizeof(struct qat_rsa_input_params),
+ DMA_TO_DEVICE);
unmap_dst:
if (qat_req->dst_align)
dma_free_coherent(dev, ctx->key_sz, qat_req->dst_align,
@@ -488,15 +491,14 @@ unmap_dst:
if (!dma_mapping_error(dev, qat_req->out.dec.m))
dma_unmap_single(dev, qat_req->out.dec.m, ctx->key_sz,
DMA_FROM_DEVICE);
-unmap_in_params:
- if (!dma_mapping_error(dev, qat_req->phy_in))
- dma_unmap_single(dev, qat_req->phy_in,
- sizeof(struct qat_rsa_input_params),
- DMA_TO_DEVICE);
- if (!dma_mapping_error(dev, qat_req->phy_out))
- dma_unmap_single(dev, qat_req->phy_out,
- sizeof(struct qat_rsa_output_params),
- DMA_TO_DEVICE);
+unmap_src:
+ if (qat_req->src_align)
+ dma_free_coherent(dev, ctx->key_sz, qat_req->src_align,
+ qat_req->in.dec.c);
+ else
+ if (!dma_mapping_error(dev, qat_req->in.dec.c))
+ dma_unmap_single(dev, qat_req->in.dec.c, ctx->key_sz,
+ DMA_TO_DEVICE);
return ret;
}
diff --git a/drivers/crypto/qat/qat_common/qat_uclo.c b/drivers/crypto/qat/qat_common/qat_uclo.c
index 25d15f19c2b3..9b961b37a282 100644
--- a/drivers/crypto/qat/qat_common/qat_uclo.c
+++ b/drivers/crypto/qat/qat_common/qat_uclo.c
@@ -688,7 +688,7 @@ static int qat_uclo_map_ae(struct icp_qat_fw_loader_handle *handle, int max_ae)
int mflag = 0;
struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
- for (ae = 0; ae <= max_ae; ae++) {
+ for (ae = 0; ae < max_ae; ae++) {
if (!test_bit(ae,
(unsigned long *)&handle->hal_handle->ae_mask))
continue;
diff --git a/drivers/crypto/rockchip/Makefile b/drivers/crypto/rockchip/Makefile
index 7051c6c715f3..30f91297b4b6 100644
--- a/drivers/crypto/rockchip/Makefile
+++ b/drivers/crypto/rockchip/Makefile
@@ -1,3 +1,4 @@
obj-$(CONFIG_CRYPTO_DEV_ROCKCHIP) += rk_crypto.o
rk_crypto-objs := rk3288_crypto.o \
rk3288_crypto_ablkcipher.o \
+ rk3288_crypto_ahash.o
diff --git a/drivers/crypto/rockchip/rk3288_crypto.c b/drivers/crypto/rockchip/rk3288_crypto.c
index da9c73dce4af..af508258d2ea 100644
--- a/drivers/crypto/rockchip/rk3288_crypto.c
+++ b/drivers/crypto/rockchip/rk3288_crypto.c
@@ -208,6 +208,8 @@ static void rk_crypto_tasklet_cb(unsigned long data)
if (crypto_tfm_alg_type(async_req->tfm) == CRYPTO_ALG_TYPE_ABLKCIPHER)
dev->ablk_req = ablkcipher_request_cast(async_req);
+ else
+ dev->ahash_req = ahash_request_cast(async_req);
err = dev->start(dev);
if (err)
dev->complete(dev, err);
@@ -220,6 +222,9 @@ static struct rk_crypto_tmp *rk_cipher_algs[] = {
&rk_cbc_des_alg,
&rk_ecb_des3_ede_alg,
&rk_cbc_des3_ede_alg,
+ &rk_ahash_sha1,
+ &rk_ahash_sha256,
+ &rk_ahash_md5,
};
static int rk_crypto_register(struct rk_crypto_info *crypto_info)
@@ -229,15 +234,24 @@ static int rk_crypto_register(struct rk_crypto_info *crypto_info)
for (i = 0; i < ARRAY_SIZE(rk_cipher_algs); i++) {
rk_cipher_algs[i]->dev = crypto_info;
- err = crypto_register_alg(&rk_cipher_algs[i]->alg);
+ if (rk_cipher_algs[i]->type == ALG_TYPE_CIPHER)
+ err = crypto_register_alg(
+ &rk_cipher_algs[i]->alg.crypto);
+ else
+ err = crypto_register_ahash(
+ &rk_cipher_algs[i]->alg.hash);
if (err)
goto err_cipher_algs;
}
return 0;
err_cipher_algs:
- for (k = 0; k < i; k++)
- crypto_unregister_alg(&rk_cipher_algs[k]->alg);
+ for (k = 0; k < i; k++) {
+ if (rk_cipher_algs[i]->type == ALG_TYPE_CIPHER)
+ crypto_unregister_alg(&rk_cipher_algs[k]->alg.crypto);
+ else
+ crypto_unregister_ahash(&rk_cipher_algs[i]->alg.hash);
+ }
return err;
}
@@ -245,8 +259,12 @@ static void rk_crypto_unregister(void)
{
unsigned int i;
- for (i = 0; i < ARRAY_SIZE(rk_cipher_algs); i++)
- crypto_unregister_alg(&rk_cipher_algs[i]->alg);
+ for (i = 0; i < ARRAY_SIZE(rk_cipher_algs); i++) {
+ if (rk_cipher_algs[i]->type == ALG_TYPE_CIPHER)
+ crypto_unregister_alg(&rk_cipher_algs[i]->alg.crypto);
+ else
+ crypto_unregister_ahash(&rk_cipher_algs[i]->alg.hash);
+ }
}
static void rk_crypto_action(void *data)
diff --git a/drivers/crypto/rockchip/rk3288_crypto.h b/drivers/crypto/rockchip/rk3288_crypto.h
index e499c2c6c903..d7b71fea320b 100644
--- a/drivers/crypto/rockchip/rk3288_crypto.h
+++ b/drivers/crypto/rockchip/rk3288_crypto.h
@@ -6,6 +6,10 @@
#include <crypto/algapi.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
+#include <crypto/internal/hash.h>
+
+#include <crypto/md5.h>
+#include <crypto/sha.h>
#define _SBF(v, f) ((v) << (f))
@@ -149,6 +153,28 @@
#define RK_CRYPTO_TDES_KEY3_0 0x0130
#define RK_CRYPTO_TDES_KEY3_1 0x0134
+/* HASH */
+#define RK_CRYPTO_HASH_CTRL 0x0180
+#define RK_CRYPTO_HASH_SWAP_DO BIT(3)
+#define RK_CRYPTO_HASH_SWAP_DI BIT(2)
+#define RK_CRYPTO_HASH_SHA1 _SBF(0x00, 0)
+#define RK_CRYPTO_HASH_MD5 _SBF(0x01, 0)
+#define RK_CRYPTO_HASH_SHA256 _SBF(0x02, 0)
+#define RK_CRYPTO_HASH_PRNG _SBF(0x03, 0)
+
+#define RK_CRYPTO_HASH_STS 0x0184
+#define RK_CRYPTO_HASH_DONE BIT(0)
+
+#define RK_CRYPTO_HASH_MSG_LEN 0x0188
+#define RK_CRYPTO_HASH_DOUT_0 0x018c
+#define RK_CRYPTO_HASH_DOUT_1 0x0190
+#define RK_CRYPTO_HASH_DOUT_2 0x0194
+#define RK_CRYPTO_HASH_DOUT_3 0x0198
+#define RK_CRYPTO_HASH_DOUT_4 0x019c
+#define RK_CRYPTO_HASH_DOUT_5 0x01a0
+#define RK_CRYPTO_HASH_DOUT_6 0x01a4
+#define RK_CRYPTO_HASH_DOUT_7 0x01a8
+
#define CRYPTO_READ(dev, offset) \
readl_relaxed(((dev)->reg + (offset)))
#define CRYPTO_WRITE(dev, offset, val) \
@@ -166,6 +192,7 @@ struct rk_crypto_info {
struct crypto_queue queue;
struct tasklet_struct crypto_tasklet;
struct ablkcipher_request *ablk_req;
+ struct ahash_request *ahash_req;
/* device lock */
spinlock_t lock;
@@ -195,15 +222,36 @@ struct rk_crypto_info {
void (*unload_data)(struct rk_crypto_info *dev);
};
+/* the private variable of hash */
+struct rk_ahash_ctx {
+ struct rk_crypto_info *dev;
+ /* for fallback */
+ struct crypto_ahash *fallback_tfm;
+};
+
+/* the privete variable of hash for fallback */
+struct rk_ahash_rctx {
+ struct ahash_request fallback_req;
+};
+
/* the private variable of cipher */
struct rk_cipher_ctx {
struct rk_crypto_info *dev;
unsigned int keylen;
};
+enum alg_type {
+ ALG_TYPE_HASH,
+ ALG_TYPE_CIPHER,
+};
+
struct rk_crypto_tmp {
- struct rk_crypto_info *dev;
- struct crypto_alg alg;
+ struct rk_crypto_info *dev;
+ union {
+ struct crypto_alg crypto;
+ struct ahash_alg hash;
+ } alg;
+ enum alg_type type;
};
extern struct rk_crypto_tmp rk_ecb_aes_alg;
@@ -213,4 +261,8 @@ extern struct rk_crypto_tmp rk_cbc_des_alg;
extern struct rk_crypto_tmp rk_ecb_des3_ede_alg;
extern struct rk_crypto_tmp rk_cbc_des3_ede_alg;
+extern struct rk_crypto_tmp rk_ahash_sha1;
+extern struct rk_crypto_tmp rk_ahash_sha256;
+extern struct rk_crypto_tmp rk_ahash_md5;
+
#endif
diff --git a/drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c b/drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c
index d98b681f6c06..b5a3afe222e4 100644
--- a/drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c
+++ b/drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c
@@ -336,7 +336,7 @@ static int rk_ablk_cra_init(struct crypto_tfm *tfm)
struct crypto_alg *alg = tfm->__crt_alg;
struct rk_crypto_tmp *algt;
- algt = container_of(alg, struct rk_crypto_tmp, alg);
+ algt = container_of(alg, struct rk_crypto_tmp, alg.crypto);
ctx->dev = algt->dev;
ctx->dev->align_size = crypto_tfm_alg_alignmask(tfm) + 1;
@@ -357,7 +357,8 @@ static void rk_ablk_cra_exit(struct crypto_tfm *tfm)
}
struct rk_crypto_tmp rk_ecb_aes_alg = {
- .alg = {
+ .type = ALG_TYPE_CIPHER,
+ .alg.crypto = {
.cra_name = "ecb(aes)",
.cra_driver_name = "ecb-aes-rk",
.cra_priority = 300,
@@ -381,7 +382,8 @@ struct rk_crypto_tmp rk_ecb_aes_alg = {
};
struct rk_crypto_tmp rk_cbc_aes_alg = {
- .alg = {
+ .type = ALG_TYPE_CIPHER,
+ .alg.crypto = {
.cra_name = "cbc(aes)",
.cra_driver_name = "cbc-aes-rk",
.cra_priority = 300,
@@ -406,7 +408,8 @@ struct rk_crypto_tmp rk_cbc_aes_alg = {
};
struct rk_crypto_tmp rk_ecb_des_alg = {
- .alg = {
+ .type = ALG_TYPE_CIPHER,
+ .alg.crypto = {
.cra_name = "ecb(des)",
.cra_driver_name = "ecb-des-rk",
.cra_priority = 300,
@@ -430,7 +433,8 @@ struct rk_crypto_tmp rk_ecb_des_alg = {
};
struct rk_crypto_tmp rk_cbc_des_alg = {
- .alg = {
+ .type = ALG_TYPE_CIPHER,
+ .alg.crypto = {
.cra_name = "cbc(des)",
.cra_driver_name = "cbc-des-rk",
.cra_priority = 300,
@@ -455,7 +459,8 @@ struct rk_crypto_tmp rk_cbc_des_alg = {
};
struct rk_crypto_tmp rk_ecb_des3_ede_alg = {
- .alg = {
+ .type = ALG_TYPE_CIPHER,
+ .alg.crypto = {
.cra_name = "ecb(des3_ede)",
.cra_driver_name = "ecb-des3-ede-rk",
.cra_priority = 300,
@@ -480,7 +485,8 @@ struct rk_crypto_tmp rk_ecb_des3_ede_alg = {
};
struct rk_crypto_tmp rk_cbc_des3_ede_alg = {
- .alg = {
+ .type = ALG_TYPE_CIPHER,
+ .alg.crypto = {
.cra_name = "cbc(des3_ede)",
.cra_driver_name = "cbc-des3-ede-rk",
.cra_priority = 300,
diff --git a/drivers/crypto/rockchip/rk3288_crypto_ahash.c b/drivers/crypto/rockchip/rk3288_crypto_ahash.c
new file mode 100644
index 000000000000..718588219f75
--- /dev/null
+++ b/drivers/crypto/rockchip/rk3288_crypto_ahash.c
@@ -0,0 +1,404 @@
+/*
+ * Crypto acceleration support for Rockchip RK3288
+ *
+ * Copyright (c) 2015, Fuzhou Rockchip Electronics Co., Ltd
+ *
+ * Author: Zain Wang <zain.wang@rock-chips.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * Some ideas are from marvell/cesa.c and s5p-sss.c driver.
+ */
+#include "rk3288_crypto.h"
+
+/*
+ * IC can not process zero message hash,
+ * so we put the fixed hash out when met zero message.
+ */
+
+static int zero_message_process(struct ahash_request *req)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ int rk_digest_size = crypto_ahash_digestsize(tfm);
+
+ switch (rk_digest_size) {
+ case SHA1_DIGEST_SIZE:
+ memcpy(req->result, sha1_zero_message_hash, rk_digest_size);
+ break;
+ case SHA256_DIGEST_SIZE:
+ memcpy(req->result, sha256_zero_message_hash, rk_digest_size);
+ break;
+ case MD5_DIGEST_SIZE:
+ memcpy(req->result, md5_zero_message_hash, rk_digest_size);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void rk_ahash_crypto_complete(struct rk_crypto_info *dev, int err)
+{
+ if (dev->ahash_req->base.complete)
+ dev->ahash_req->base.complete(&dev->ahash_req->base, err);
+}
+
+static void rk_ahash_reg_init(struct rk_crypto_info *dev)
+{
+ int reg_status = 0;
+
+ reg_status = CRYPTO_READ(dev, RK_CRYPTO_CTRL) |
+ RK_CRYPTO_HASH_FLUSH | _SBF(0xffff, 16);
+ CRYPTO_WRITE(dev, RK_CRYPTO_CTRL, reg_status);
+
+ reg_status = CRYPTO_READ(dev, RK_CRYPTO_CTRL);
+ reg_status &= (~RK_CRYPTO_HASH_FLUSH);
+ reg_status |= _SBF(0xffff, 16);
+ CRYPTO_WRITE(dev, RK_CRYPTO_CTRL, reg_status);
+
+ memset_io(dev->reg + RK_CRYPTO_HASH_DOUT_0, 0, 32);
+
+ CRYPTO_WRITE(dev, RK_CRYPTO_INTENA, RK_CRYPTO_HRDMA_ERR_ENA |
+ RK_CRYPTO_HRDMA_DONE_ENA);
+
+ CRYPTO_WRITE(dev, RK_CRYPTO_INTSTS, RK_CRYPTO_HRDMA_ERR_INT |
+ RK_CRYPTO_HRDMA_DONE_INT);
+
+ CRYPTO_WRITE(dev, RK_CRYPTO_HASH_CTRL, dev->mode |
+ RK_CRYPTO_HASH_SWAP_DO);
+
+ CRYPTO_WRITE(dev, RK_CRYPTO_CONF, RK_CRYPTO_BYTESWAP_HRFIFO |
+ RK_CRYPTO_BYTESWAP_BRFIFO |
+ RK_CRYPTO_BYTESWAP_BTFIFO);
+
+ CRYPTO_WRITE(dev, RK_CRYPTO_HASH_MSG_LEN, dev->total);
+}
+
+static int rk_ahash_init(struct ahash_request *req)
+{
+ struct rk_ahash_rctx *rctx = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct rk_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
+
+ ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
+ rctx->fallback_req.base.flags = req->base.flags &
+ CRYPTO_TFM_REQ_MAY_SLEEP;
+
+ return crypto_ahash_init(&rctx->fallback_req);
+}
+
+static int rk_ahash_update(struct ahash_request *req)
+{
+ struct rk_ahash_rctx *rctx = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct rk_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
+
+ ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
+ rctx->fallback_req.base.flags = req->base.flags &
+ CRYPTO_TFM_REQ_MAY_SLEEP;
+ rctx->fallback_req.nbytes = req->nbytes;
+ rctx->fallback_req.src = req->src;
+
+ return crypto_ahash_update(&rctx->fallback_req);
+}
+
+static int rk_ahash_final(struct ahash_request *req)
+{
+ struct rk_ahash_rctx *rctx = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct rk_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
+
+ ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
+ rctx->fallback_req.base.flags = req->base.flags &
+ CRYPTO_TFM_REQ_MAY_SLEEP;
+ rctx->fallback_req.result = req->result;
+
+ return crypto_ahash_final(&rctx->fallback_req);
+}
+
+static int rk_ahash_finup(struct ahash_request *req)
+{
+ struct rk_ahash_rctx *rctx = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct rk_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
+
+ ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
+ rctx->fallback_req.base.flags = req->base.flags &
+ CRYPTO_TFM_REQ_MAY_SLEEP;
+
+ rctx->fallback_req.nbytes = req->nbytes;
+ rctx->fallback_req.src = req->src;
+ rctx->fallback_req.result = req->result;
+
+ return crypto_ahash_finup(&rctx->fallback_req);
+}
+
+static int rk_ahash_import(struct ahash_request *req, const void *in)
+{
+ struct rk_ahash_rctx *rctx = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct rk_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
+
+ ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
+ rctx->fallback_req.base.flags = req->base.flags &
+ CRYPTO_TFM_REQ_MAY_SLEEP;
+
+ return crypto_ahash_import(&rctx->fallback_req, in);
+}
+
+static int rk_ahash_export(struct ahash_request *req, void *out)
+{
+ struct rk_ahash_rctx *rctx = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct rk_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
+
+ ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
+ rctx->fallback_req.base.flags = req->base.flags &
+ CRYPTO_TFM_REQ_MAY_SLEEP;
+
+ return crypto_ahash_export(&rctx->fallback_req, out);
+}
+
+static int rk_ahash_digest(struct ahash_request *req)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct rk_ahash_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
+ struct rk_crypto_info *dev = NULL;
+ unsigned long flags;
+ int ret;
+
+ if (!req->nbytes)
+ return zero_message_process(req);
+
+ dev = tctx->dev;
+ dev->total = req->nbytes;
+ dev->left_bytes = req->nbytes;
+ dev->aligned = 0;
+ dev->mode = 0;
+ dev->align_size = 4;
+ dev->sg_dst = NULL;
+ dev->sg_src = req->src;
+ dev->first = req->src;
+ dev->nents = sg_nents(req->src);
+
+ switch (crypto_ahash_digestsize(tfm)) {
+ case SHA1_DIGEST_SIZE:
+ dev->mode = RK_CRYPTO_HASH_SHA1;
+ break;
+ case SHA256_DIGEST_SIZE:
+ dev->mode = RK_CRYPTO_HASH_SHA256;
+ break;
+ case MD5_DIGEST_SIZE:
+ dev->mode = RK_CRYPTO_HASH_MD5;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ rk_ahash_reg_init(dev);
+
+ spin_lock_irqsave(&dev->lock, flags);
+ ret = crypto_enqueue_request(&dev->queue, &req->base);
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ tasklet_schedule(&dev->crypto_tasklet);
+
+ /*
+ * it will take some time to process date after last dma transmission.
+ *
+ * waiting time is relative with the last date len,
+ * so cannot set a fixed time here.
+ * 10-50 makes system not call here frequently wasting
+ * efficiency, and make it response quickly when dma
+ * complete.
+ */
+ while (!CRYPTO_READ(dev, RK_CRYPTO_HASH_STS))
+ usleep_range(10, 50);
+
+ memcpy_fromio(req->result, dev->reg + RK_CRYPTO_HASH_DOUT_0,
+ crypto_ahash_digestsize(tfm));
+
+ return 0;
+}
+
+static void crypto_ahash_dma_start(struct rk_crypto_info *dev)
+{
+ CRYPTO_WRITE(dev, RK_CRYPTO_HRDMAS, dev->addr_in);
+ CRYPTO_WRITE(dev, RK_CRYPTO_HRDMAL, (dev->count + 3) / 4);
+ CRYPTO_WRITE(dev, RK_CRYPTO_CTRL, RK_CRYPTO_HASH_START |
+ (RK_CRYPTO_HASH_START << 16));
+}
+
+static int rk_ahash_set_data_start(struct rk_crypto_info *dev)
+{
+ int err;
+
+ err = dev->load_data(dev, dev->sg_src, NULL);
+ if (!err)
+ crypto_ahash_dma_start(dev);
+ return err;
+}
+
+static int rk_ahash_start(struct rk_crypto_info *dev)
+{
+ return rk_ahash_set_data_start(dev);
+}
+
+static int rk_ahash_crypto_rx(struct rk_crypto_info *dev)
+{
+ int err = 0;
+
+ dev->unload_data(dev);
+ if (dev->left_bytes) {
+ if (dev->aligned) {
+ if (sg_is_last(dev->sg_src)) {
+ dev_warn(dev->dev, "[%s:%d], Lack of data\n",
+ __func__, __LINE__);
+ err = -ENOMEM;
+ goto out_rx;
+ }
+ dev->sg_src = sg_next(dev->sg_src);
+ }
+ err = rk_ahash_set_data_start(dev);
+ } else {
+ dev->complete(dev, 0);
+ }
+
+out_rx:
+ return err;
+}
+
+static int rk_cra_hash_init(struct crypto_tfm *tfm)
+{
+ struct rk_ahash_ctx *tctx = crypto_tfm_ctx(tfm);
+ struct rk_crypto_tmp *algt;
+ struct ahash_alg *alg = __crypto_ahash_alg(tfm->__crt_alg);
+
+ const char *alg_name = crypto_tfm_alg_name(tfm);
+
+ algt = container_of(alg, struct rk_crypto_tmp, alg.hash);
+
+ tctx->dev = algt->dev;
+ tctx->dev->addr_vir = (void *)__get_free_page(GFP_KERNEL);
+ if (!tctx->dev->addr_vir) {
+ dev_err(tctx->dev->dev, "failed to kmalloc for addr_vir\n");
+ return -ENOMEM;
+ }
+ tctx->dev->start = rk_ahash_start;
+ tctx->dev->update = rk_ahash_crypto_rx;
+ tctx->dev->complete = rk_ahash_crypto_complete;
+
+ /* for fallback */
+ tctx->fallback_tfm = crypto_alloc_ahash(alg_name, 0,
+ CRYPTO_ALG_NEED_FALLBACK);
+ if (IS_ERR(tctx->fallback_tfm)) {
+ dev_err(tctx->dev->dev, "Could not load fallback driver.\n");
+ return PTR_ERR(tctx->fallback_tfm);
+ }
+ crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
+ sizeof(struct rk_ahash_rctx) +
+ crypto_ahash_reqsize(tctx->fallback_tfm));
+
+ return tctx->dev->enable_clk(tctx->dev);
+}
+
+static void rk_cra_hash_exit(struct crypto_tfm *tfm)
+{
+ struct rk_ahash_ctx *tctx = crypto_tfm_ctx(tfm);
+
+ free_page((unsigned long)tctx->dev->addr_vir);
+ return tctx->dev->disable_clk(tctx->dev);
+}
+
+struct rk_crypto_tmp rk_ahash_sha1 = {
+ .type = ALG_TYPE_HASH,
+ .alg.hash = {
+ .init = rk_ahash_init,
+ .update = rk_ahash_update,
+ .final = rk_ahash_final,
+ .finup = rk_ahash_finup,
+ .export = rk_ahash_export,
+ .import = rk_ahash_import,
+ .digest = rk_ahash_digest,
+ .halg = {
+ .digestsize = SHA1_DIGEST_SIZE,
+ .statesize = sizeof(struct sha1_state),
+ .base = {
+ .cra_name = "sha1",
+ .cra_driver_name = "rk-sha1",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = SHA1_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct rk_ahash_ctx),
+ .cra_alignmask = 3,
+ .cra_init = rk_cra_hash_init,
+ .cra_exit = rk_cra_hash_exit,
+ .cra_module = THIS_MODULE,
+ }
+ }
+ }
+};
+
+struct rk_crypto_tmp rk_ahash_sha256 = {
+ .type = ALG_TYPE_HASH,
+ .alg.hash = {
+ .init = rk_ahash_init,
+ .update = rk_ahash_update,
+ .final = rk_ahash_final,
+ .finup = rk_ahash_finup,
+ .export = rk_ahash_export,
+ .import = rk_ahash_import,
+ .digest = rk_ahash_digest,
+ .halg = {
+ .digestsize = SHA256_DIGEST_SIZE,
+ .statesize = sizeof(struct sha256_state),
+ .base = {
+ .cra_name = "sha256",
+ .cra_driver_name = "rk-sha256",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = SHA256_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct rk_ahash_ctx),
+ .cra_alignmask = 3,
+ .cra_init = rk_cra_hash_init,
+ .cra_exit = rk_cra_hash_exit,
+ .cra_module = THIS_MODULE,
+ }
+ }
+ }
+};
+
+struct rk_crypto_tmp rk_ahash_md5 = {
+ .type = ALG_TYPE_HASH,
+ .alg.hash = {
+ .init = rk_ahash_init,
+ .update = rk_ahash_update,
+ .final = rk_ahash_final,
+ .finup = rk_ahash_finup,
+ .export = rk_ahash_export,
+ .import = rk_ahash_import,
+ .digest = rk_ahash_digest,
+ .halg = {
+ .digestsize = MD5_DIGEST_SIZE,
+ .statesize = sizeof(struct md5_state),
+ .base = {
+ .cra_name = "md5",
+ .cra_driver_name = "rk-md5",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = SHA1_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct rk_ahash_ctx),
+ .cra_alignmask = 3,
+ .cra_init = rk_cra_hash_init,
+ .cra_exit = rk_cra_hash_exit,
+ .cra_module = THIS_MODULE,
+ }
+ }
+ }
+};
diff --git a/drivers/crypto/s5p-sss.c b/drivers/crypto/s5p-sss.c
index f214a8755827..5f161a9777e3 100644
--- a/drivers/crypto/s5p-sss.c
+++ b/drivers/crypto/s5p-sss.c
@@ -224,6 +224,7 @@ static inline struct samsung_aes_variant *find_s5p_sss_version
{
if (IS_ENABLED(CONFIG_OF) && (pdev->dev.of_node)) {
const struct of_device_id *match;
+
match = of_match_node(s5p_sss_dt_match,
pdev->dev.of_node);
return (struct samsung_aes_variant *)match->data;
@@ -382,7 +383,7 @@ static void s5p_set_aes(struct s5p_aes_dev *dev,
void __iomem *keystart;
if (iv)
- memcpy(dev->aes_ioaddr + SSS_REG_AES_IV_DATA(0), iv, 0x10);
+ memcpy_toio(dev->aes_ioaddr + SSS_REG_AES_IV_DATA(0), iv, 0x10);
if (keylen == AES_KEYSIZE_256)
keystart = dev->aes_ioaddr + SSS_REG_AES_KEY_DATA(0);
@@ -391,13 +392,12 @@ static void s5p_set_aes(struct s5p_aes_dev *dev,
else
keystart = dev->aes_ioaddr + SSS_REG_AES_KEY_DATA(4);
- memcpy(keystart, key, keylen);
+ memcpy_toio(keystart, key, keylen);
}
static void s5p_aes_crypt_start(struct s5p_aes_dev *dev, unsigned long mode)
{
struct ablkcipher_request *req = dev->req;
-
uint32_t aes_control;
int err;
unsigned long flags;
@@ -518,7 +518,7 @@ static int s5p_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
struct s5p_aes_dev *dev = ctx->dev;
if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE)) {
- pr_err("request size is not exact amount of AES blocks\n");
+ dev_err(dev->dev, "request size is not exact amount of AES blocks\n");
return -EINVAL;
}
@@ -566,7 +566,7 @@ static int s5p_aes_cbc_decrypt(struct ablkcipher_request *req)
static int s5p_aes_cra_init(struct crypto_tfm *tfm)
{
- struct s5p_aes_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct s5p_aes_ctx *ctx = crypto_tfm_ctx(tfm);
ctx->dev = s5p_dev;
tfm->crt_ablkcipher.reqsize = sizeof(struct s5p_aes_reqctx);
@@ -701,7 +701,7 @@ static int s5p_aes_probe(struct platform_device *pdev)
goto err_algs;
}
- pr_info("s5p-sss driver registered\n");
+ dev_info(dev, "s5p-sss driver registered\n");
return 0;
diff --git a/drivers/crypto/sahara.c b/drivers/crypto/sahara.c
index 6c4f91c5e6b3..c3f3d89e4831 100644
--- a/drivers/crypto/sahara.c
+++ b/drivers/crypto/sahara.c
@@ -182,7 +182,6 @@ struct sahara_sha_reqctx {
u8 buf[SAHARA_MAX_SHA_BLOCK_SIZE];
u8 rembuf[SAHARA_MAX_SHA_BLOCK_SIZE];
u8 context[SHA256_DIGEST_SIZE + 4];
- struct mutex mutex;
unsigned int mode;
unsigned int digest_size;
unsigned int context_size;
@@ -1096,7 +1095,6 @@ static int sahara_sha_enqueue(struct ahash_request *req, int last)
if (!req->nbytes && !last)
return 0;
- mutex_lock(&rctx->mutex);
rctx->last = last;
if (!rctx->active) {
@@ -1109,7 +1107,6 @@ static int sahara_sha_enqueue(struct ahash_request *req, int last)
mutex_unlock(&dev->queue_mutex);
wake_up_process(dev->kthread);
- mutex_unlock(&rctx->mutex);
return ret;
}
@@ -1137,8 +1134,6 @@ static int sahara_sha_init(struct ahash_request *req)
rctx->context_size = rctx->digest_size + 4;
rctx->active = 0;
- mutex_init(&rctx->mutex);
-
return 0;
}
@@ -1167,26 +1162,18 @@ static int sahara_sha_digest(struct ahash_request *req)
static int sahara_sha_export(struct ahash_request *req, void *out)
{
- struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
- struct sahara_ctx *ctx = crypto_ahash_ctx(ahash);
struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
- memcpy(out, ctx, sizeof(struct sahara_ctx));
- memcpy(out + sizeof(struct sahara_sha_reqctx), rctx,
- sizeof(struct sahara_sha_reqctx));
+ memcpy(out, rctx, sizeof(struct sahara_sha_reqctx));
return 0;
}
static int sahara_sha_import(struct ahash_request *req, const void *in)
{
- struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
- struct sahara_ctx *ctx = crypto_ahash_ctx(ahash);
struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
- memcpy(ctx, in, sizeof(struct sahara_ctx));
- memcpy(rctx, in + sizeof(struct sahara_sha_reqctx),
- sizeof(struct sahara_sha_reqctx));
+ memcpy(rctx, in, sizeof(struct sahara_sha_reqctx));
return 0;
}
@@ -1272,6 +1259,7 @@ static struct ahash_alg sha_v3_algs[] = {
.export = sahara_sha_export,
.import = sahara_sha_import,
.halg.digestsize = SHA1_DIGEST_SIZE,
+ .halg.statesize = sizeof(struct sahara_sha_reqctx),
.halg.base = {
.cra_name = "sha1",
.cra_driver_name = "sahara-sha1",
@@ -1299,6 +1287,7 @@ static struct ahash_alg sha_v4_algs[] = {
.export = sahara_sha_export,
.import = sahara_sha_import,
.halg.digestsize = SHA256_DIGEST_SIZE,
+ .halg.statesize = sizeof(struct sahara_sha_reqctx),
.halg.base = {
.cra_name = "sha256",
.cra_driver_name = "sahara-sha256",
diff --git a/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c b/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c
index a19ee127edca..7be3fbcd8d78 100644
--- a/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c
+++ b/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c
@@ -251,11 +251,10 @@ static int sun4i_ss_cipher_poll(struct ablkcipher_request *areq)
spaces = readl(ss->base + SS_FCSR);
rx_cnt = SS_RXFIFO_SPACES(spaces);
tx_cnt = SS_TXFIFO_SPACES(spaces);
- dev_dbg(ss->dev, "%x %u/%u %u/%u cnt=%u %u/%u %u/%u cnt=%u %u %u\n",
+ dev_dbg(ss->dev, "%x %u/%u %u/%u cnt=%u %u/%u %u/%u cnt=%u %u\n",
mode,
oi, mi.length, ileft, areq->nbytes, rx_cnt,
- oo, mo.length, oleft, areq->nbytes, tx_cnt,
- todo, ob);
+ oo, mo.length, oleft, areq->nbytes, tx_cnt, ob);
if (tx_cnt == 0)
continue;
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index a0d4a08313ae..aae05547b924 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -63,6 +63,14 @@ static void to_talitos_ptr(struct talitos_ptr *ptr, dma_addr_t dma_addr,
ptr->eptr = upper_32_bits(dma_addr);
}
+static void copy_talitos_ptr(struct talitos_ptr *dst_ptr,
+ struct talitos_ptr *src_ptr, bool is_sec1)
+{
+ dst_ptr->ptr = src_ptr->ptr;
+ if (!is_sec1)
+ dst_ptr->eptr = src_ptr->eptr;
+}
+
static void to_talitos_ptr_len(struct talitos_ptr *ptr, unsigned int len,
bool is_sec1)
{
@@ -1083,21 +1091,20 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
sg_count = dma_map_sg(dev, areq->src, edesc->src_nents ?: 1,
(areq->src == areq->dst) ? DMA_BIDIRECTIONAL
: DMA_TO_DEVICE);
-
/* hmac data */
desc->ptr[1].len = cpu_to_be16(areq->assoclen);
if (sg_count > 1 &&
(ret = sg_to_link_tbl_offset(areq->src, sg_count, 0,
areq->assoclen,
&edesc->link_tbl[tbl_off])) > 1) {
- tbl_off += ret;
-
to_talitos_ptr(&desc->ptr[1], edesc->dma_link_tbl + tbl_off *
sizeof(struct talitos_ptr), 0);
desc->ptr[1].j_extent = DESC_PTR_LNKTBL_JUMP;
dma_sync_single_for_device(dev, edesc->dma_link_tbl,
edesc->dma_len, DMA_BIDIRECTIONAL);
+
+ tbl_off += ret;
} else {
to_talitos_ptr(&desc->ptr[1], sg_dma_address(areq->src), 0);
desc->ptr[1].j_extent = 0;
@@ -1126,11 +1133,13 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
if (edesc->desc.hdr & DESC_HDR_MODE1_MDEU_CICV)
sg_link_tbl_len += authsize;
- if (sg_count > 1 &&
- (ret = sg_to_link_tbl_offset(areq->src, sg_count, areq->assoclen,
- sg_link_tbl_len,
- &edesc->link_tbl[tbl_off])) > 1) {
- tbl_off += ret;
+ if (sg_count == 1) {
+ to_talitos_ptr(&desc->ptr[4], sg_dma_address(areq->src) +
+ areq->assoclen, 0);
+ } else if ((ret = sg_to_link_tbl_offset(areq->src, sg_count,
+ areq->assoclen, sg_link_tbl_len,
+ &edesc->link_tbl[tbl_off])) >
+ 1) {
desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP;
to_talitos_ptr(&desc->ptr[4], edesc->dma_link_tbl +
tbl_off *
@@ -1138,8 +1147,10 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
dma_sync_single_for_device(dev, edesc->dma_link_tbl,
edesc->dma_len,
DMA_BIDIRECTIONAL);
- } else
- to_talitos_ptr(&desc->ptr[4], sg_dma_address(areq->src), 0);
+ tbl_off += ret;
+ } else {
+ copy_talitos_ptr(&desc->ptr[4], &edesc->link_tbl[tbl_off], 0);
+ }
/* cipher out */
desc->ptr[5].len = cpu_to_be16(cryptlen);
@@ -1151,11 +1162,13 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
edesc->icv_ool = false;
- if (sg_count > 1 &&
- (sg_count = sg_to_link_tbl_offset(areq->dst, sg_count,
+ if (sg_count == 1) {
+ to_talitos_ptr(&desc->ptr[5], sg_dma_address(areq->dst) +
+ areq->assoclen, 0);
+ } else if ((sg_count =
+ sg_to_link_tbl_offset(areq->dst, sg_count,
areq->assoclen, cryptlen,
- &edesc->link_tbl[tbl_off])) >
- 1) {
+ &edesc->link_tbl[tbl_off])) > 1) {
struct talitos_ptr *tbl_ptr = &edesc->link_tbl[tbl_off];
to_talitos_ptr(&desc->ptr[5], edesc->dma_link_tbl +
@@ -1178,8 +1191,9 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
edesc->dma_len, DMA_BIDIRECTIONAL);
edesc->icv_ool = true;
- } else
- to_talitos_ptr(&desc->ptr[5], sg_dma_address(areq->dst), 0);
+ } else {
+ copy_talitos_ptr(&desc->ptr[5], &edesc->link_tbl[tbl_off], 0);
+ }
/* iv out */
map_single_talitos_ptr(dev, &desc->ptr[6], ivsize, ctx->iv,
@@ -2629,21 +2643,11 @@ struct talitos_crypto_alg {
struct talitos_alg_template algt;
};
-static int talitos_cra_init(struct crypto_tfm *tfm)
+static int talitos_init_common(struct talitos_ctx *ctx,
+ struct talitos_crypto_alg *talitos_alg)
{
- struct crypto_alg *alg = tfm->__crt_alg;
- struct talitos_crypto_alg *talitos_alg;
- struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
struct talitos_private *priv;
- if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_AHASH)
- talitos_alg = container_of(__crypto_ahash_alg(alg),
- struct talitos_crypto_alg,
- algt.alg.hash);
- else
- talitos_alg = container_of(alg, struct talitos_crypto_alg,
- algt.alg.crypto);
-
/* update context with ptr to dev */
ctx->dev = talitos_alg->dev;
@@ -2661,10 +2665,33 @@ static int talitos_cra_init(struct crypto_tfm *tfm)
return 0;
}
+static int talitos_cra_init(struct crypto_tfm *tfm)
+{
+ struct crypto_alg *alg = tfm->__crt_alg;
+ struct talitos_crypto_alg *talitos_alg;
+ struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_AHASH)
+ talitos_alg = container_of(__crypto_ahash_alg(alg),
+ struct talitos_crypto_alg,
+ algt.alg.hash);
+ else
+ talitos_alg = container_of(alg, struct talitos_crypto_alg,
+ algt.alg.crypto);
+
+ return talitos_init_common(ctx, talitos_alg);
+}
+
static int talitos_cra_init_aead(struct crypto_aead *tfm)
{
- talitos_cra_init(crypto_aead_tfm(tfm));
- return 0;
+ struct aead_alg *alg = crypto_aead_alg(tfm);
+ struct talitos_crypto_alg *talitos_alg;
+ struct talitos_ctx *ctx = crypto_aead_ctx(tfm);
+
+ talitos_alg = container_of(alg, struct talitos_crypto_alg,
+ algt.alg.aead);
+
+ return talitos_init_common(ctx, talitos_alg);
}
static int talitos_cra_init_ahash(struct crypto_tfm *tfm)
diff --git a/drivers/crypto/ux500/cryp/cryp_core.c b/drivers/crypto/ux500/cryp/cryp_core.c
index 4c243c1ffc7f..790f7cadc1ed 100644
--- a/drivers/crypto/ux500/cryp/cryp_core.c
+++ b/drivers/crypto/ux500/cryp/cryp_core.c
@@ -1440,9 +1440,9 @@ static int ux500_cryp_probe(struct platform_device *pdev)
device_data->phybase = res->start;
device_data->base = devm_ioremap_resource(dev, res);
- if (!device_data->base) {
+ if (IS_ERR(device_data->base)) {
dev_err(dev, "[%s]: ioremap failed!", __func__);
- ret = -ENOMEM;
+ ret = PTR_ERR(device_data->base);
goto out;
}
diff --git a/drivers/crypto/ux500/hash/hash_core.c b/drivers/crypto/ux500/hash/hash_core.c
index d6fdc583ce5d..574e87c7f2b8 100644
--- a/drivers/crypto/ux500/hash/hash_core.c
+++ b/drivers/crypto/ux500/hash/hash_core.c
@@ -1659,9 +1659,9 @@ static int ux500_hash_probe(struct platform_device *pdev)
device_data->phybase = res->start;
device_data->base = devm_ioremap_resource(dev, res);
- if (!device_data->base) {
+ if (IS_ERR(device_data->base)) {
dev_err(dev, "%s: ioremap() failed!\n", __func__);
- ret = -ENOMEM;
+ ret = PTR_ERR(device_data->base);
goto out;
}
spin_lock_init(&device_data->ctx_lock);
diff --git a/drivers/devfreq/Kconfig b/drivers/devfreq/Kconfig
index 64281bb2f650..4de78c552251 100644
--- a/drivers/devfreq/Kconfig
+++ b/drivers/devfreq/Kconfig
@@ -61,7 +61,7 @@ config DEVFREQ_GOV_USERSPACE
Sets the frequency at the user specified one.
This governor returns the user configured frequency if there
has been an input to /sys/devices/.../power/devfreq_set_freq.
- Otherwise, the governor does not change the frequnecy
+ Otherwise, the governor does not change the frequency
given at the initialization.
comment "DEVFREQ Drivers"
diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
index 155c1464948e..4a2c07ee6677 100644
--- a/drivers/dma-buf/dma-buf.c
+++ b/drivers/dma-buf/dma-buf.c
@@ -34,6 +34,8 @@
#include <linux/poll.h>
#include <linux/reservation.h>
+#include <uapi/linux/dma-buf.h>
+
static inline int is_dma_buf_file(struct file *);
struct dma_buf_list {
@@ -251,11 +253,55 @@ out:
return events;
}
+static long dma_buf_ioctl(struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ struct dma_buf *dmabuf;
+ struct dma_buf_sync sync;
+ enum dma_data_direction direction;
+ int ret;
+
+ dmabuf = file->private_data;
+
+ switch (cmd) {
+ case DMA_BUF_IOCTL_SYNC:
+ if (copy_from_user(&sync, (void __user *) arg, sizeof(sync)))
+ return -EFAULT;
+
+ if (sync.flags & ~DMA_BUF_SYNC_VALID_FLAGS_MASK)
+ return -EINVAL;
+
+ switch (sync.flags & DMA_BUF_SYNC_RW) {
+ case DMA_BUF_SYNC_READ:
+ direction = DMA_FROM_DEVICE;
+ break;
+ case DMA_BUF_SYNC_WRITE:
+ direction = DMA_TO_DEVICE;
+ break;
+ case DMA_BUF_SYNC_RW:
+ direction = DMA_BIDIRECTIONAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (sync.flags & DMA_BUF_SYNC_END)
+ ret = dma_buf_end_cpu_access(dmabuf, direction);
+ else
+ ret = dma_buf_begin_cpu_access(dmabuf, direction);
+
+ return ret;
+ default:
+ return -ENOTTY;
+ }
+}
+
static const struct file_operations dma_buf_fops = {
.release = dma_buf_release,
.mmap = dma_buf_mmap_internal,
.llseek = dma_buf_llseek,
.poll = dma_buf_poll,
+ .unlocked_ioctl = dma_buf_ioctl,
};
/*
@@ -539,13 +585,11 @@ EXPORT_SYMBOL_GPL(dma_buf_unmap_attachment);
* preparations. Coherency is only guaranteed in the specified range for the
* specified access direction.
* @dmabuf: [in] buffer to prepare cpu access for.
- * @start: [in] start of range for cpu access.
- * @len: [in] length of range for cpu access.
* @direction: [in] length of range for cpu access.
*
* Can return negative error values, returns 0 on success.
*/
-int dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start, size_t len,
+int dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
enum dma_data_direction direction)
{
int ret = 0;
@@ -554,8 +598,7 @@ int dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start, size_t len,
return -EINVAL;
if (dmabuf->ops->begin_cpu_access)
- ret = dmabuf->ops->begin_cpu_access(dmabuf, start,
- len, direction);
+ ret = dmabuf->ops->begin_cpu_access(dmabuf, direction);
return ret;
}
@@ -567,19 +610,21 @@ EXPORT_SYMBOL_GPL(dma_buf_begin_cpu_access);
* actions. Coherency is only guaranteed in the specified range for the
* specified access direction.
* @dmabuf: [in] buffer to complete cpu access for.
- * @start: [in] start of range for cpu access.
- * @len: [in] length of range for cpu access.
* @direction: [in] length of range for cpu access.
*
- * This call must always succeed.
+ * Can return negative error values, returns 0 on success.
*/
-void dma_buf_end_cpu_access(struct dma_buf *dmabuf, size_t start, size_t len,
- enum dma_data_direction direction)
+int dma_buf_end_cpu_access(struct dma_buf *dmabuf,
+ enum dma_data_direction direction)
{
+ int ret = 0;
+
WARN_ON(!dmabuf);
if (dmabuf->ops->end_cpu_access)
- dmabuf->ops->end_cpu_access(dmabuf, start, len, direction);
+ ret = dmabuf->ops->end_cpu_access(dmabuf, direction);
+
+ return ret;
}
EXPORT_SYMBOL_GPL(dma_buf_end_cpu_access);
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 79b1390f2016..d96d87c56f2e 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -341,12 +341,13 @@ config MV_XOR
config MXS_DMA
bool "MXS DMA support"
- depends on SOC_IMX23 || SOC_IMX28 || SOC_IMX6Q
+ depends on SOC_IMX23 || SOC_IMX28 || SOC_IMX6Q || SOC_IMX6UL
select STMP_DEVICE
select DMA_ENGINE
help
Support the MXS DMA engine. This engine including APBH-DMA
- and APBX-DMA is integrated into Freescale i.MX23/28/MX6Q/MX6DL chips.
+ and APBX-DMA is integrated into Freescale
+ i.MX23/28/MX6Q/MX6DL/MX6UL chips.
config MX3_IPU
bool "MX3x Image Processing Unit support"
@@ -408,15 +409,6 @@ config PXA_DMA
16 to 32 channels for peripheral to memory or memory to memory
transfers.
-config QCOM_BAM_DMA
- tristate "QCOM BAM DMA support"
- depends on ARCH_QCOM || (COMPILE_TEST && OF && ARM)
- select DMA_ENGINE
- select DMA_VIRTUAL_CHANNELS
- ---help---
- Enable support for the QCOM BAM DMA controller. This controller
- provides DMA capabilities for a variety of on-chip devices.
-
config SIRF_DMA
tristate "CSR SiRFprimaII/SiRFmarco DMA support"
depends on ARCH_SIRF
@@ -539,6 +531,8 @@ config ZX_DMA
# driver files
source "drivers/dma/bestcomm/Kconfig"
+source "drivers/dma/qcom/Kconfig"
+
source "drivers/dma/dw/Kconfig"
source "drivers/dma/hsu/Kconfig"
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index 2dd0a067a0ca..6084127c1486 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -52,7 +52,6 @@ obj-$(CONFIG_PCH_DMA) += pch_dma.o
obj-$(CONFIG_PL330_DMA) += pl330.o
obj-$(CONFIG_PPC_BESTCOMM) += bestcomm/
obj-$(CONFIG_PXA_DMA) += pxa_dma.o
-obj-$(CONFIG_QCOM_BAM_DMA) += qcom_bam_dma.o
obj-$(CONFIG_RENESAS_DMA) += sh/
obj-$(CONFIG_SIRF_DMA) += sirf-dma.o
obj-$(CONFIG_STE_DMA40) += ste_dma40.o ste_dma40_ll.o
@@ -67,4 +66,5 @@ obj-$(CONFIG_TI_EDMA) += edma.o
obj-$(CONFIG_XGENE_DMA) += xgene-dma.o
obj-$(CONFIG_ZX_DMA) += zx296702_dma.o
+obj-y += qcom/
obj-y += xilinx/
diff --git a/drivers/dma/acpi-dma.c b/drivers/dma/acpi-dma.c
index eed6bda01790..4a748c3435d7 100644
--- a/drivers/dma/acpi-dma.c
+++ b/drivers/dma/acpi-dma.c
@@ -438,7 +438,7 @@ struct dma_chan *acpi_dma_request_slave_chan_by_name(struct device *dev,
return ERR_PTR(-ENODEV);
}
- dev_dbg(dev, "found DMA channel \"%s\" at index %d\n", name, index);
+ dev_dbg(dev, "Looking for DMA channel \"%s\" at index %d...\n", name, index);
return acpi_dma_request_slave_chan_by_index(dev, index);
}
EXPORT_SYMBOL_GPL(acpi_dma_request_slave_chan_by_name);
diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
index 64f5d1bdbb48..8e304b1befc5 100644
--- a/drivers/dma/at_xdmac.c
+++ b/drivers/dma/at_xdmac.c
@@ -176,6 +176,7 @@
#define AT_XDMAC_MAX_CHAN 0x20
#define AT_XDMAC_MAX_CSIZE 16 /* 16 data */
#define AT_XDMAC_MAX_DWIDTH 8 /* 64 bits */
+#define AT_XDMAC_RESIDUE_MAX_RETRIES 5
#define AT_XDMAC_DMA_BUSWIDTHS\
(BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) |\
@@ -1395,8 +1396,8 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
struct at_xdmac_desc *desc, *_desc;
struct list_head *descs_list;
enum dma_status ret;
- int residue;
- u32 cur_nda, mask, value;
+ int residue, retry;
+ u32 cur_nda, check_nda, cur_ubc, mask, value;
u8 dwidth = 0;
unsigned long flags;
@@ -1433,7 +1434,42 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
cpu_relax();
}
+ /*
+ * When processing the residue, we need to read two registers but we
+ * can't do it in an atomic way. AT_XDMAC_CNDA is used to find where
+ * we stand in the descriptor list and AT_XDMAC_CUBC is used
+ * to know how many data are remaining for the current descriptor.
+ * Since the dma channel is not paused to not loose data, between the
+ * AT_XDMAC_CNDA and AT_XDMAC_CUBC read, we may have change of
+ * descriptor.
+ * For that reason, after reading AT_XDMAC_CUBC, we check if we are
+ * still using the same descriptor by reading a second time
+ * AT_XDMAC_CNDA. If AT_XDMAC_CNDA has changed, it means we have to
+ * read again AT_XDMAC_CUBC.
+ * Memory barriers are used to ensure the read order of the registers.
+ * A max number of retries is set because unlikely it can never ends if
+ * we are transferring a lot of data with small buffers.
+ */
cur_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 0xfffffffc;
+ rmb();
+ cur_ubc = at_xdmac_chan_read(atchan, AT_XDMAC_CUBC);
+ for (retry = 0; retry < AT_XDMAC_RESIDUE_MAX_RETRIES; retry++) {
+ rmb();
+ check_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 0xfffffffc;
+
+ if (likely(cur_nda == check_nda))
+ break;
+
+ cur_nda = check_nda;
+ rmb();
+ cur_ubc = at_xdmac_chan_read(atchan, AT_XDMAC_CUBC);
+ }
+
+ if (unlikely(retry >= AT_XDMAC_RESIDUE_MAX_RETRIES)) {
+ ret = DMA_ERROR;
+ goto spin_unlock;
+ }
+
/*
* Remove size of all microblocks already transferred and the current
* one. Then add the remaining size to transfer of the current
@@ -1446,7 +1482,7 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
if ((desc->lld.mbr_nda & 0xfffffffc) == cur_nda)
break;
}
- residue += at_xdmac_chan_read(atchan, AT_XDMAC_CUBC) << dwidth;
+ residue += cur_ubc << dwidth;
dma_set_residue(txstate, residue);
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index c50a247be2e0..0cb259c59916 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -496,6 +496,7 @@ int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps)
caps->src_addr_widths = device->src_addr_widths;
caps->dst_addr_widths = device->dst_addr_widths;
caps->directions = device->directions;
+ caps->max_burst = device->max_burst;
caps->residue_granularity = device->residue_granularity;
caps->descriptor_reuse = device->descriptor_reuse;
diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c
index 5ad0ec1f0e29..97199b3c25a2 100644
--- a/drivers/dma/dw/core.c
+++ b/drivers/dma/dw/core.c
@@ -130,26 +130,14 @@ static void dwc_desc_put(struct dw_dma_chan *dwc, struct dw_desc *desc)
static void dwc_initialize(struct dw_dma_chan *dwc)
{
struct dw_dma *dw = to_dw_dma(dwc->chan.device);
- struct dw_dma_slave *dws = dwc->chan.private;
u32 cfghi = DWC_CFGH_FIFO_MODE;
u32 cfglo = DWC_CFGL_CH_PRIOR(dwc->priority);
if (dwc->initialized == true)
return;
- if (dws) {
- /*
- * We need controller-specific data to set up slave
- * transfers.
- */
- BUG_ON(!dws->dma_dev || dws->dma_dev != dw->dma.dev);
-
- cfghi |= DWC_CFGH_DST_PER(dws->dst_id);
- cfghi |= DWC_CFGH_SRC_PER(dws->src_id);
- } else {
- cfghi |= DWC_CFGH_DST_PER(dwc->dst_id);
- cfghi |= DWC_CFGH_SRC_PER(dwc->src_id);
- }
+ cfghi |= DWC_CFGH_DST_PER(dwc->dst_id);
+ cfghi |= DWC_CFGH_SRC_PER(dwc->src_id);
channel_writel(dwc, CFG_LO, cfglo);
channel_writel(dwc, CFG_HI, cfghi);
@@ -941,7 +929,7 @@ bool dw_dma_filter(struct dma_chan *chan, void *param)
struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
struct dw_dma_slave *dws = param;
- if (!dws || dws->dma_dev != chan->device->dev)
+ if (dws->dma_dev != chan->device->dev)
return false;
/* We have to copy data since dws can be temporary storage */
@@ -1165,6 +1153,14 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan)
* doesn't mean what you think it means), and status writeback.
*/
+ /*
+ * We need controller-specific data to set up slave transfers.
+ */
+ if (chan->private && !dw_dma_filter(chan, chan->private)) {
+ dev_warn(chan2dev(chan), "Wrong controller-specific data\n");
+ return -EINVAL;
+ }
+
/* Enable controller here if needed */
if (!dw->in_use)
dw_dma_on(dw);
@@ -1226,6 +1222,14 @@ static void dwc_free_chan_resources(struct dma_chan *chan)
spin_lock_irqsave(&dwc->lock, flags);
list_splice_init(&dwc->free_list, &list);
dwc->descs_allocated = 0;
+
+ /* Clear custom channel configuration */
+ dwc->src_id = 0;
+ dwc->dst_id = 0;
+
+ dwc->src_master = 0;
+ dwc->dst_master = 0;
+
dwc->initialized = false;
/* Disable interrupts */
diff --git a/drivers/dma/dw/regs.h b/drivers/dma/dw/regs.h
index 241ff2b1402b..0a50c18d85b8 100644
--- a/drivers/dma/dw/regs.h
+++ b/drivers/dma/dw/regs.h
@@ -150,7 +150,7 @@ enum dw_dma_msize {
#define DWC_CTLL_DST_INC (0<<7) /* DAR update/not */
#define DWC_CTLL_DST_DEC (1<<7)
#define DWC_CTLL_DST_FIX (2<<7)
-#define DWC_CTLL_SRC_INC (0<<7) /* SAR update/not */
+#define DWC_CTLL_SRC_INC (0<<9) /* SAR update/not */
#define DWC_CTLL_SRC_DEC (1<<9)
#define DWC_CTLL_SRC_FIX (2<<9)
#define DWC_CTLL_DST_MSIZE(n) ((n)<<11) /* burst, #elements */
diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c
index e3d7fcb69b4c..04070baab78a 100644
--- a/drivers/dma/edma.c
+++ b/drivers/dma/edma.c
@@ -869,6 +869,13 @@ static int edma_terminate_all(struct dma_chan *chan)
return 0;
}
+static void edma_synchronize(struct dma_chan *chan)
+{
+ struct edma_chan *echan = to_edma_chan(chan);
+
+ vchan_synchronize(&echan->vchan);
+}
+
static int edma_slave_config(struct dma_chan *chan,
struct dma_slave_config *cfg)
{
@@ -1231,6 +1238,7 @@ static struct dma_async_tx_descriptor *edma_prep_dma_cyclic(
struct edma_desc *edesc;
dma_addr_t src_addr, dst_addr;
enum dma_slave_buswidth dev_width;
+ bool use_intermediate = false;
u32 burst;
int i, ret, nslots;
@@ -1272,8 +1280,21 @@ static struct dma_async_tx_descriptor *edma_prep_dma_cyclic(
* but the synchronization is difficult to achieve with Cyclic and
* cannot be guaranteed, so we error out early.
*/
- if (nslots > MAX_NR_SG)
- return NULL;
+ if (nslots > MAX_NR_SG) {
+ /*
+ * If the burst and period sizes are the same, we can put
+ * the full buffer into a single period and activate
+ * intermediate interrupts. This will produce interrupts
+ * after each burst, which is also after each desired period.
+ */
+ if (burst == period_len) {
+ period_len = buf_len;
+ nslots = 2;
+ use_intermediate = true;
+ } else {
+ return NULL;
+ }
+ }
edesc = kzalloc(sizeof(*edesc) + nslots * sizeof(edesc->pset[0]),
GFP_ATOMIC);
@@ -1351,8 +1372,13 @@ static struct dma_async_tx_descriptor *edma_prep_dma_cyclic(
/*
* Enable period interrupt only if it is requested
*/
- if (tx_flags & DMA_PREP_INTERRUPT)
+ if (tx_flags & DMA_PREP_INTERRUPT) {
edesc->pset[i].param.opt |= TCINTEN;
+
+ /* Also enable intermediate interrupts if necessary */
+ if (use_intermediate)
+ edesc->pset[i].param.opt |= ITCINTEN;
+ }
}
/* Place the cyclic channel to highest priority queue */
@@ -1365,36 +1391,36 @@ static struct dma_async_tx_descriptor *edma_prep_dma_cyclic(
static void edma_completion_handler(struct edma_chan *echan)
{
struct device *dev = echan->vchan.chan.device->dev;
- struct edma_desc *edesc = echan->edesc;
-
- if (!edesc)
- return;
+ struct edma_desc *edesc;
spin_lock(&echan->vchan.lock);
- if (edesc->cyclic) {
- vchan_cyclic_callback(&edesc->vdesc);
- spin_unlock(&echan->vchan.lock);
- return;
- } else if (edesc->processed == edesc->pset_nr) {
- edesc->residue = 0;
- edma_stop(echan);
- vchan_cookie_complete(&edesc->vdesc);
- echan->edesc = NULL;
-
- dev_dbg(dev, "Transfer completed on channel %d\n",
- echan->ch_num);
- } else {
- dev_dbg(dev, "Sub transfer completed on channel %d\n",
- echan->ch_num);
-
- edma_pause(echan);
-
- /* Update statistics for tx_status */
- edesc->residue -= edesc->sg_len;
- edesc->residue_stat = edesc->residue;
- edesc->processed_stat = edesc->processed;
+ edesc = echan->edesc;
+ if (edesc) {
+ if (edesc->cyclic) {
+ vchan_cyclic_callback(&edesc->vdesc);
+ spin_unlock(&echan->vchan.lock);
+ return;
+ } else if (edesc->processed == edesc->pset_nr) {
+ edesc->residue = 0;
+ edma_stop(echan);
+ vchan_cookie_complete(&edesc->vdesc);
+ echan->edesc = NULL;
+
+ dev_dbg(dev, "Transfer completed on channel %d\n",
+ echan->ch_num);
+ } else {
+ dev_dbg(dev, "Sub transfer completed on channel %d\n",
+ echan->ch_num);
+
+ edma_pause(echan);
+
+ /* Update statistics for tx_status */
+ edesc->residue -= edesc->sg_len;
+ edesc->residue_stat = edesc->residue;
+ edesc->processed_stat = edesc->processed;
+ }
+ edma_execute(echan);
}
- edma_execute(echan);
spin_unlock(&echan->vchan.lock);
}
@@ -1563,32 +1589,6 @@ static irqreturn_t dma_ccerr_handler(int irq, void *data)
return IRQ_HANDLED;
}
-static void edma_tc_set_pm_state(struct edma_tc *tc, bool enable)
-{
- struct platform_device *tc_pdev;
- int ret;
-
- if (!IS_ENABLED(CONFIG_OF) || !tc)
- return;
-
- tc_pdev = of_find_device_by_node(tc->node);
- if (!tc_pdev) {
- pr_err("%s: TPTC device is not found\n", __func__);
- return;
- }
- if (!pm_runtime_enabled(&tc_pdev->dev))
- pm_runtime_enable(&tc_pdev->dev);
-
- if (enable)
- ret = pm_runtime_get_sync(&tc_pdev->dev);
- else
- ret = pm_runtime_put_sync(&tc_pdev->dev);
-
- if (ret < 0)
- pr_err("%s: pm_runtime_%s_sync() failed for %s\n", __func__,
- enable ? "get" : "put", dev_name(&tc_pdev->dev));
-}
-
/* Alloc channel resources */
static int edma_alloc_chan_resources(struct dma_chan *chan)
{
@@ -1625,8 +1625,6 @@ static int edma_alloc_chan_resources(struct dma_chan *chan)
EDMA_CHAN_SLOT(echan->ch_num), chan->chan_id,
echan->hw_triggered ? "HW" : "SW");
- edma_tc_set_pm_state(echan->tc, true);
-
return 0;
err_slot:
@@ -1663,7 +1661,6 @@ static void edma_free_chan_resources(struct dma_chan *chan)
echan->alloced = false;
}
- edma_tc_set_pm_state(echan->tc, false);
echan->tc = NULL;
echan->hw_triggered = false;
@@ -1837,6 +1834,7 @@ static void edma_dma_init(struct edma_cc *ecc, bool legacy_mode)
s_ddev->device_pause = edma_dma_pause;
s_ddev->device_resume = edma_dma_resume;
s_ddev->device_terminate_all = edma_terminate_all;
+ s_ddev->device_synchronize = edma_synchronize;
s_ddev->src_addr_widths = EDMA_DMA_BUSWIDTHS;
s_ddev->dst_addr_widths = EDMA_DMA_BUSWIDTHS;
@@ -1862,6 +1860,7 @@ static void edma_dma_init(struct edma_cc *ecc, bool legacy_mode)
m_ddev->device_pause = edma_dma_pause;
m_ddev->device_resume = edma_dma_resume;
m_ddev->device_terminate_all = edma_terminate_all;
+ m_ddev->device_synchronize = edma_synchronize;
m_ddev->src_addr_widths = EDMA_DMA_BUSWIDTHS;
m_ddev->dst_addr_widths = EDMA_DMA_BUSWIDTHS;
@@ -2408,10 +2407,8 @@ static int edma_pm_suspend(struct device *dev)
int i;
for (i = 0; i < ecc->num_channels; i++) {
- if (echan[i].alloced) {
+ if (echan[i].alloced)
edma_setup_interrupt(&echan[i], false);
- edma_tc_set_pm_state(echan[i].tc, false);
- }
}
return 0;
@@ -2441,8 +2438,6 @@ static int edma_pm_resume(struct device *dev)
/* Set up channel -> slot mapping for the entry slot */
edma_set_chmap(&echan[i], echan[i].slot[0]);
-
- edma_tc_set_pm_state(echan[i].tc, true);
}
}
@@ -2466,7 +2461,8 @@ static struct platform_driver edma_driver = {
static int edma_tptc_probe(struct platform_device *pdev)
{
- return 0;
+ pm_runtime_enable(&pdev->dev);
+ return pm_runtime_get_sync(&pdev->dev);
}
static struct platform_driver edma_tptc_driver = {
diff --git a/drivers/dma/ep93xx_dma.c b/drivers/dma/ep93xx_dma.c
index 57ff46284f15..21f08cc3352b 100644
--- a/drivers/dma/ep93xx_dma.c
+++ b/drivers/dma/ep93xx_dma.c
@@ -421,23 +421,25 @@ static int m2p_hw_interrupt(struct ep93xx_dma_chan *edmac)
desc->size);
}
- switch (irq_status & (M2P_INTERRUPT_STALL | M2P_INTERRUPT_NFB)) {
- case M2P_INTERRUPT_STALL:
- /* Disable interrupts */
- control = readl(edmac->regs + M2P_CONTROL);
- control &= ~(M2P_CONTROL_STALLINT | M2P_CONTROL_NFBINT);
- m2p_set_control(edmac, control);
-
- return INTERRUPT_DONE;
-
- case M2P_INTERRUPT_NFB:
- if (ep93xx_dma_advance_active(edmac))
- m2p_fill_desc(edmac);
+ /*
+ * Even latest E2 silicon revision sometimes assert STALL interrupt
+ * instead of NFB. Therefore we treat them equally, basing on the
+ * amount of data we still have to transfer.
+ */
+ if (!(irq_status & (M2P_INTERRUPT_STALL | M2P_INTERRUPT_NFB)))
+ return INTERRUPT_UNKNOWN;
+ if (ep93xx_dma_advance_active(edmac)) {
+ m2p_fill_desc(edmac);
return INTERRUPT_NEXT_BUFFER;
}
- return INTERRUPT_UNKNOWN;
+ /* Disable interrupts */
+ control = readl(edmac->regs + M2P_CONTROL);
+ control &= ~(M2P_CONTROL_STALLINT | M2P_CONTROL_NFBINT);
+ m2p_set_control(edmac, control);
+
+ return INTERRUPT_DONE;
}
/*
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c
index 2209f75fdf05..aac85c30c2cf 100644
--- a/drivers/dma/fsldma.c
+++ b/drivers/dma/fsldma.c
@@ -522,6 +522,8 @@ static dma_cookie_t fsldma_run_tx_complete_actions(struct fsldma_chan *chan,
chan_dbg(chan, "LD %p callback\n", desc);
txd->callback(txd->callback_param);
}
+
+ dma_descriptor_unmap(txd);
}
/* Run any dependencies */
diff --git a/drivers/dma/hsu/hsu.c b/drivers/dma/hsu/hsu.c
index eef145edb936..ee510515ce18 100644
--- a/drivers/dma/hsu/hsu.c
+++ b/drivers/dma/hsu/hsu.c
@@ -64,10 +64,10 @@ static void hsu_dma_chan_start(struct hsu_dma_chan *hsuc)
if (hsuc->direction == DMA_MEM_TO_DEV) {
bsr = config->dst_maxburst;
- mtsr = config->dst_addr_width;
+ mtsr = config->src_addr_width;
} else if (hsuc->direction == DMA_DEV_TO_MEM) {
bsr = config->src_maxburst;
- mtsr = config->src_addr_width;
+ mtsr = config->dst_addr_width;
}
hsu_chan_disable(hsuc);
@@ -135,7 +135,7 @@ static u32 hsu_dma_chan_get_sr(struct hsu_dma_chan *hsuc)
sr = hsu_chan_readl(hsuc, HSU_CH_SR);
spin_unlock_irqrestore(&hsuc->vchan.lock, flags);
- return sr;
+ return sr & ~(HSU_CH_SR_DESCE_ANY | HSU_CH_SR_CDESC_ANY);
}
irqreturn_t hsu_dma_irq(struct hsu_dma_chip *chip, unsigned short nr)
@@ -254,10 +254,13 @@ static void hsu_dma_issue_pending(struct dma_chan *chan)
static size_t hsu_dma_active_desc_size(struct hsu_dma_chan *hsuc)
{
struct hsu_dma_desc *desc = hsuc->desc;
- size_t bytes = desc->length;
+ size_t bytes = 0;
int i;
- i = desc->active % HSU_DMA_CHAN_NR_DESC;
+ for (i = desc->active; i < desc->nents; i++)
+ bytes += desc->sg[i].len;
+
+ i = HSU_DMA_CHAN_NR_DESC - 1;
do {
bytes += hsu_chan_readl(hsuc, HSU_CH_DxTSR(i));
} while (--i >= 0);
diff --git a/drivers/dma/hsu/hsu.h b/drivers/dma/hsu/hsu.h
index 578a8ee8cd05..6b070c22b1df 100644
--- a/drivers/dma/hsu/hsu.h
+++ b/drivers/dma/hsu/hsu.h
@@ -41,6 +41,9 @@
#define HSU_CH_SR_DESCTO(x) BIT(8 + (x))
#define HSU_CH_SR_DESCTO_ANY (BIT(11) | BIT(10) | BIT(9) | BIT(8))
#define HSU_CH_SR_CHE BIT(15)
+#define HSU_CH_SR_DESCE(x) BIT(16 + (x))
+#define HSU_CH_SR_DESCE_ANY (BIT(19) | BIT(18) | BIT(17) | BIT(16))
+#define HSU_CH_SR_CDESC_ANY (BIT(31) | BIT(30))
/* Bits in HSU_CH_CR */
#define HSU_CH_CR_CHA BIT(0)
diff --git a/drivers/dma/idma64.c b/drivers/dma/idma64.c
index 3cb7b2c78197..1953e57505f4 100644
--- a/drivers/dma/idma64.c
+++ b/drivers/dma/idma64.c
@@ -289,6 +289,9 @@ static void idma64_desc_fill(struct idma64_chan *idma64c,
/* Trigger an interrupt after the last block is transfered */
lli->ctllo |= IDMA64C_CTLL_INT_EN;
+
+ /* Disable LLP transfer in the last block */
+ lli->ctllo &= ~(IDMA64C_CTLL_LLP_S_EN | IDMA64C_CTLL_LLP_D_EN);
}
static struct dma_async_tx_descriptor *idma64_prep_slave_sg(
diff --git a/drivers/dma/idma64.h b/drivers/dma/idma64.h
index 8423f13ed0da..6b816878e5e7 100644
--- a/drivers/dma/idma64.h
+++ b/drivers/dma/idma64.h
@@ -16,7 +16,7 @@
#include <linux/spinlock.h>
#include <linux/types.h>
-#include <asm-generic/io-64-nonatomic-lo-hi.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
#include "virt-dma.h"
@@ -71,7 +71,7 @@
#define IDMA64C_CFGH_SRC_PER(x) ((x) << 0) /* src peripheral */
#define IDMA64C_CFGH_DST_PER(x) ((x) << 4) /* dst peripheral */
#define IDMA64C_CFGH_RD_ISSUE_THD(x) ((x) << 8)
-#define IDMA64C_CFGH_RW_ISSUE_THD(x) ((x) << 18)
+#define IDMA64C_CFGH_WR_ISSUE_THD(x) ((x) << 18)
/* Interrupt registers */
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c
index 21539d5c54c3..bd09961443b1 100644
--- a/drivers/dma/ioat/dma.c
+++ b/drivers/dma/ioat/dma.c
@@ -31,6 +31,7 @@
#include <linux/dma-mapping.h>
#include <linux/workqueue.h>
#include <linux/prefetch.h>
+#include <linux/sizes.h>
#include "dma.h"
#include "registers.h"
#include "hw.h"
@@ -290,24 +291,30 @@ static dma_cookie_t ioat_tx_submit_unlock(struct dma_async_tx_descriptor *tx)
}
static struct ioat_ring_ent *
-ioat_alloc_ring_ent(struct dma_chan *chan, gfp_t flags)
+ioat_alloc_ring_ent(struct dma_chan *chan, int idx, gfp_t flags)
{
struct ioat_dma_descriptor *hw;
struct ioat_ring_ent *desc;
struct ioatdma_device *ioat_dma;
+ struct ioatdma_chan *ioat_chan = to_ioat_chan(chan);
+ int chunk;
dma_addr_t phys;
+ u8 *pos;
+ off_t offs;
ioat_dma = to_ioatdma_device(chan->device);
- hw = pci_pool_alloc(ioat_dma->dma_pool, flags, &phys);
- if (!hw)
- return NULL;
+
+ chunk = idx / IOAT_DESCS_PER_2M;
+ idx &= (IOAT_DESCS_PER_2M - 1);
+ offs = idx * IOAT_DESC_SZ;
+ pos = (u8 *)ioat_chan->descs[chunk].virt + offs;
+ phys = ioat_chan->descs[chunk].hw + offs;
+ hw = (struct ioat_dma_descriptor *)pos;
memset(hw, 0, sizeof(*hw));
desc = kmem_cache_zalloc(ioat_cache, flags);
- if (!desc) {
- pci_pool_free(ioat_dma->dma_pool, hw, phys);
+ if (!desc)
return NULL;
- }
dma_async_tx_descriptor_init(&desc->txd, chan);
desc->txd.tx_submit = ioat_tx_submit_unlock;
@@ -318,32 +325,63 @@ ioat_alloc_ring_ent(struct dma_chan *chan, gfp_t flags)
void ioat_free_ring_ent(struct ioat_ring_ent *desc, struct dma_chan *chan)
{
- struct ioatdma_device *ioat_dma;
-
- ioat_dma = to_ioatdma_device(chan->device);
- pci_pool_free(ioat_dma->dma_pool, desc->hw, desc->txd.phys);
kmem_cache_free(ioat_cache, desc);
}
struct ioat_ring_ent **
ioat_alloc_ring(struct dma_chan *c, int order, gfp_t flags)
{
+ struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
struct ioat_ring_ent **ring;
- int descs = 1 << order;
- int i;
-
- if (order > ioat_get_max_alloc_order())
- return NULL;
+ int total_descs = 1 << order;
+ int i, chunks;
/* allocate the array to hold the software ring */
- ring = kcalloc(descs, sizeof(*ring), flags);
+ ring = kcalloc(total_descs, sizeof(*ring), flags);
if (!ring)
return NULL;
- for (i = 0; i < descs; i++) {
- ring[i] = ioat_alloc_ring_ent(c, flags);
+
+ ioat_chan->desc_chunks = chunks = (total_descs * IOAT_DESC_SZ) / SZ_2M;
+
+ for (i = 0; i < chunks; i++) {
+ struct ioat_descs *descs = &ioat_chan->descs[i];
+
+ descs->virt = dma_alloc_coherent(to_dev(ioat_chan),
+ SZ_2M, &descs->hw, flags);
+ if (!descs->virt && (i > 0)) {
+ int idx;
+
+ for (idx = 0; idx < i; idx++) {
+ dma_free_coherent(to_dev(ioat_chan), SZ_2M,
+ descs->virt, descs->hw);
+ descs->virt = NULL;
+ descs->hw = 0;
+ }
+
+ ioat_chan->desc_chunks = 0;
+ kfree(ring);
+ return NULL;
+ }
+ }
+
+ for (i = 0; i < total_descs; i++) {
+ ring[i] = ioat_alloc_ring_ent(c, i, flags);
if (!ring[i]) {
+ int idx;
+
while (i--)
ioat_free_ring_ent(ring[i], c);
+
+ for (idx = 0; idx < ioat_chan->desc_chunks; idx++) {
+ dma_free_coherent(to_dev(ioat_chan),
+ SZ_2M,
+ ioat_chan->descs[idx].virt,
+ ioat_chan->descs[idx].hw);
+ ioat_chan->descs[idx].virt = NULL;
+ ioat_chan->descs[idx].hw = 0;
+ }
+
+ ioat_chan->desc_chunks = 0;
kfree(ring);
return NULL;
}
@@ -351,7 +389,7 @@ ioat_alloc_ring(struct dma_chan *c, int order, gfp_t flags)
}
/* link descs */
- for (i = 0; i < descs-1; i++) {
+ for (i = 0; i < total_descs-1; i++) {
struct ioat_ring_ent *next = ring[i+1];
struct ioat_dma_descriptor *hw = ring[i]->hw;
@@ -362,114 +400,6 @@ ioat_alloc_ring(struct dma_chan *c, int order, gfp_t flags)
return ring;
}
-static bool reshape_ring(struct ioatdma_chan *ioat_chan, int order)
-{
- /* reshape differs from normal ring allocation in that we want
- * to allocate a new software ring while only
- * extending/truncating the hardware ring
- */
- struct dma_chan *c = &ioat_chan->dma_chan;
- const u32 curr_size = ioat_ring_size(ioat_chan);
- const u16 active = ioat_ring_active(ioat_chan);
- const u32 new_size = 1 << order;
- struct ioat_ring_ent **ring;
- u32 i;
-
- if (order > ioat_get_max_alloc_order())
- return false;
-
- /* double check that we have at least 1 free descriptor */
- if (active == curr_size)
- return false;
-
- /* when shrinking, verify that we can hold the current active
- * set in the new ring
- */
- if (active >= new_size)
- return false;
-
- /* allocate the array to hold the software ring */
- ring = kcalloc(new_size, sizeof(*ring), GFP_NOWAIT);
- if (!ring)
- return false;
-
- /* allocate/trim descriptors as needed */
- if (new_size > curr_size) {
- /* copy current descriptors to the new ring */
- for (i = 0; i < curr_size; i++) {
- u16 curr_idx = (ioat_chan->tail+i) & (curr_size-1);
- u16 new_idx = (ioat_chan->tail+i) & (new_size-1);
-
- ring[new_idx] = ioat_chan->ring[curr_idx];
- set_desc_id(ring[new_idx], new_idx);
- }
-
- /* add new descriptors to the ring */
- for (i = curr_size; i < new_size; i++) {
- u16 new_idx = (ioat_chan->tail+i) & (new_size-1);
-
- ring[new_idx] = ioat_alloc_ring_ent(c, GFP_NOWAIT);
- if (!ring[new_idx]) {
- while (i--) {
- u16 new_idx = (ioat_chan->tail+i) &
- (new_size-1);
-
- ioat_free_ring_ent(ring[new_idx], c);
- }
- kfree(ring);
- return false;
- }
- set_desc_id(ring[new_idx], new_idx);
- }
-
- /* hw link new descriptors */
- for (i = curr_size-1; i < new_size; i++) {
- u16 new_idx = (ioat_chan->tail+i) & (new_size-1);
- struct ioat_ring_ent *next =
- ring[(new_idx+1) & (new_size-1)];
- struct ioat_dma_descriptor *hw = ring[new_idx]->hw;
-
- hw->next = next->txd.phys;
- }
- } else {
- struct ioat_dma_descriptor *hw;
- struct ioat_ring_ent *next;
-
- /* copy current descriptors to the new ring, dropping the
- * removed descriptors
- */
- for (i = 0; i < new_size; i++) {
- u16 curr_idx = (ioat_chan->tail+i) & (curr_size-1);
- u16 new_idx = (ioat_chan->tail+i) & (new_size-1);
-
- ring[new_idx] = ioat_chan->ring[curr_idx];
- set_desc_id(ring[new_idx], new_idx);
- }
-
- /* free deleted descriptors */
- for (i = new_size; i < curr_size; i++) {
- struct ioat_ring_ent *ent;
-
- ent = ioat_get_ring_ent(ioat_chan, ioat_chan->tail+i);
- ioat_free_ring_ent(ent, c);
- }
-
- /* fix up hardware ring */
- hw = ring[(ioat_chan->tail+new_size-1) & (new_size-1)]->hw;
- next = ring[(ioat_chan->tail+new_size) & (new_size-1)];
- hw->next = next->txd.phys;
- }
-
- dev_dbg(to_dev(ioat_chan), "%s: allocated %d descriptors\n",
- __func__, new_size);
-
- kfree(ioat_chan->ring);
- ioat_chan->ring = ring;
- ioat_chan->alloc_order = order;
-
- return true;
-}
-
/**
* ioat_check_space_lock - verify space and grab ring producer lock
* @ioat: ioat,3 channel (ring) to operate on
@@ -478,9 +408,6 @@ static bool reshape_ring(struct ioatdma_chan *ioat_chan, int order)
int ioat_check_space_lock(struct ioatdma_chan *ioat_chan, int num_descs)
__acquires(&ioat_chan->prep_lock)
{
- bool retry;
-
- retry:
spin_lock_bh(&ioat_chan->prep_lock);
/* never allow the last descriptor to be consumed, we need at
* least one free at all times to allow for on-the-fly ring
@@ -493,24 +420,8 @@ int ioat_check_space_lock(struct ioatdma_chan *ioat_chan, int num_descs)
ioat_chan->produce = num_descs;
return 0; /* with ioat->prep_lock held */
}
- retry = test_and_set_bit(IOAT_RESHAPE_PENDING, &ioat_chan->state);
spin_unlock_bh(&ioat_chan->prep_lock);
- /* is another cpu already trying to expand the ring? */
- if (retry)
- goto retry;
-
- spin_lock_bh(&ioat_chan->cleanup_lock);
- spin_lock_bh(&ioat_chan->prep_lock);
- retry = reshape_ring(ioat_chan, ioat_chan->alloc_order + 1);
- clear_bit(IOAT_RESHAPE_PENDING, &ioat_chan->state);
- spin_unlock_bh(&ioat_chan->prep_lock);
- spin_unlock_bh(&ioat_chan->cleanup_lock);
-
- /* if we were able to expand the ring retry the allocation */
- if (retry)
- goto retry;
-
dev_dbg_ratelimited(to_dev(ioat_chan),
"%s: ring full! num_descs: %d (%x:%x:%x)\n",
__func__, num_descs, ioat_chan->head,
@@ -823,19 +734,6 @@ static void check_active(struct ioatdma_chan *ioat_chan)
if (test_and_clear_bit(IOAT_CHAN_ACTIVE, &ioat_chan->state))
mod_timer(&ioat_chan->timer, jiffies + IDLE_TIMEOUT);
- else if (ioat_chan->alloc_order > ioat_get_alloc_order()) {
- /* if the ring is idle, empty, and oversized try to step
- * down the size
- */
- reshape_ring(ioat_chan, ioat_chan->alloc_order - 1);
-
- /* keep shrinking until we get back to our minimum
- * default size
- */
- if (ioat_chan->alloc_order > ioat_get_alloc_order())
- mod_timer(&ioat_chan->timer, jiffies + IDLE_TIMEOUT);
- }
-
}
void ioat_timer_event(unsigned long data)
@@ -916,40 +814,6 @@ ioat_tx_status(struct dma_chan *c, dma_cookie_t cookie,
return dma_cookie_status(c, cookie, txstate);
}
-static int ioat_irq_reinit(struct ioatdma_device *ioat_dma)
-{
- struct pci_dev *pdev = ioat_dma->pdev;
- int irq = pdev->irq, i;
-
- if (!is_bwd_ioat(pdev))
- return 0;
-
- switch (ioat_dma->irq_mode) {
- case IOAT_MSIX:
- for (i = 0; i < ioat_dma->dma_dev.chancnt; i++) {
- struct msix_entry *msix = &ioat_dma->msix_entries[i];
- struct ioatdma_chan *ioat_chan;
-
- ioat_chan = ioat_chan_by_index(ioat_dma, i);
- devm_free_irq(&pdev->dev, msix->vector, ioat_chan);
- }
-
- pci_disable_msix(pdev);
- break;
- case IOAT_MSI:
- pci_disable_msi(pdev);
- /* fall through */
- case IOAT_INTX:
- devm_free_irq(&pdev->dev, irq, ioat_dma);
- break;
- default:
- return 0;
- }
- ioat_dma->irq_mode = IOAT_NOIRQ;
-
- return ioat_dma_setup_interrupts(ioat_dma);
-}
-
int ioat_reset_hw(struct ioatdma_chan *ioat_chan)
{
/* throw away whatever the channel was doing and get it
@@ -989,9 +853,21 @@ int ioat_reset_hw(struct ioatdma_chan *ioat_chan)
}
}
+ if (is_bwd_ioat(pdev) && (ioat_dma->irq_mode == IOAT_MSIX)) {
+ ioat_dma->msixtba0 = readq(ioat_dma->reg_base + 0x1000);
+ ioat_dma->msixdata0 = readq(ioat_dma->reg_base + 0x1008);
+ ioat_dma->msixpba = readq(ioat_dma->reg_base + 0x1800);
+ }
+
+
err = ioat_reset_sync(ioat_chan, msecs_to_jiffies(200));
- if (!err)
- err = ioat_irq_reinit(ioat_dma);
+ if (!err) {
+ if (is_bwd_ioat(pdev) && (ioat_dma->irq_mode == IOAT_MSIX)) {
+ writeq(ioat_dma->msixtba0, ioat_dma->reg_base + 0x1000);
+ writeq(ioat_dma->msixdata0, ioat_dma->reg_base + 0x1008);
+ writeq(ioat_dma->msixpba, ioat_dma->reg_base + 0x1800);
+ }
+ }
if (err)
dev_err(&pdev->dev, "Failed to reset: %d\n", err);
diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h
index b8f48074789f..a9bc1a15b0d1 100644
--- a/drivers/dma/ioat/dma.h
+++ b/drivers/dma/ioat/dma.h
@@ -62,7 +62,6 @@ enum ioat_irq_mode {
* struct ioatdma_device - internal representation of a IOAT device
* @pdev: PCI-Express device
* @reg_base: MMIO register space base address
- * @dma_pool: for allocating DMA descriptors
* @completion_pool: DMA buffers for completion ops
* @sed_hw_pool: DMA super descriptor pools
* @dma_dev: embedded struct dma_device
@@ -76,8 +75,7 @@ enum ioat_irq_mode {
struct ioatdma_device {
struct pci_dev *pdev;
void __iomem *reg_base;
- struct pci_pool *dma_pool;
- struct pci_pool *completion_pool;
+ struct dma_pool *completion_pool;
#define MAX_SED_POOLS 5
struct dma_pool *sed_hw_pool[MAX_SED_POOLS];
struct dma_device dma_dev;
@@ -88,6 +86,16 @@ struct ioatdma_device {
struct dca_provider *dca;
enum ioat_irq_mode irq_mode;
u32 cap;
+
+ /* shadow version for CB3.3 chan reset errata workaround */
+ u64 msixtba0;
+ u64 msixdata0;
+ u32 msixpba;
+};
+
+struct ioat_descs {
+ void *virt;
+ dma_addr_t hw;
};
struct ioatdma_chan {
@@ -100,7 +108,6 @@ struct ioatdma_chan {
#define IOAT_COMPLETION_ACK 1
#define IOAT_RESET_PENDING 2
#define IOAT_KOBJ_INIT_FAIL 3
- #define IOAT_RESHAPE_PENDING 4
#define IOAT_RUN 5
#define IOAT_CHAN_ACTIVE 6
struct timer_list timer;
@@ -133,6 +140,8 @@ struct ioatdma_chan {
u16 produce;
struct ioat_ring_ent **ring;
spinlock_t prep_lock;
+ struct ioat_descs descs[2];
+ int desc_chunks;
};
struct ioat_sysfs_entry {
@@ -302,10 +311,8 @@ static inline bool is_ioat_bug(unsigned long err)
}
#define IOAT_MAX_ORDER 16
-#define ioat_get_alloc_order() \
- (min(ioat_ring_alloc_order, IOAT_MAX_ORDER))
-#define ioat_get_max_alloc_order() \
- (min(ioat_ring_max_alloc_order, IOAT_MAX_ORDER))
+#define IOAT_MAX_DESCS 65536
+#define IOAT_DESCS_PER_2M 32768
static inline u32 ioat_ring_size(struct ioatdma_chan *ioat_chan)
{
diff --git a/drivers/dma/ioat/hw.h b/drivers/dma/ioat/hw.h
index 690e3b4f8202..8e67895bcca3 100644
--- a/drivers/dma/ioat/hw.h
+++ b/drivers/dma/ioat/hw.h
@@ -73,6 +73,8 @@
int system_has_dca_enabled(struct pci_dev *pdev);
+#define IOAT_DESC_SZ 64
+
struct ioat_dma_descriptor {
uint32_t size;
union {
diff --git a/drivers/dma/ioat/init.c b/drivers/dma/ioat/init.c
index 4ef0c5e07912..efdee1a69fc4 100644
--- a/drivers/dma/ioat/init.c
+++ b/drivers/dma/ioat/init.c
@@ -28,6 +28,7 @@
#include <linux/prefetch.h>
#include <linux/dca.h>
#include <linux/aer.h>
+#include <linux/sizes.h>
#include "dma.h"
#include "registers.h"
#include "hw.h"
@@ -136,14 +137,6 @@ int ioat_pending_level = 4;
module_param(ioat_pending_level, int, 0644);
MODULE_PARM_DESC(ioat_pending_level,
"high-water mark for pushing ioat descriptors (default: 4)");
-int ioat_ring_alloc_order = 8;
-module_param(ioat_ring_alloc_order, int, 0644);
-MODULE_PARM_DESC(ioat_ring_alloc_order,
- "ioat+: allocate 2^n descriptors per channel (default: 8 max: 16)");
-int ioat_ring_max_alloc_order = IOAT_MAX_ORDER;
-module_param(ioat_ring_max_alloc_order, int, 0644);
-MODULE_PARM_DESC(ioat_ring_max_alloc_order,
- "ioat+: upper limit for ring size (default: 16)");
static char ioat_interrupt_style[32] = "msix";
module_param_string(ioat_interrupt_style, ioat_interrupt_style,
sizeof(ioat_interrupt_style), 0644);
@@ -504,23 +497,14 @@ static int ioat_probe(struct ioatdma_device *ioat_dma)
struct pci_dev *pdev = ioat_dma->pdev;
struct device *dev = &pdev->dev;
- /* DMA coherent memory pool for DMA descriptor allocations */
- ioat_dma->dma_pool = pci_pool_create("dma_desc_pool", pdev,
- sizeof(struct ioat_dma_descriptor),
- 64, 0);
- if (!ioat_dma->dma_pool) {
- err = -ENOMEM;
- goto err_dma_pool;
- }
-
- ioat_dma->completion_pool = pci_pool_create("completion_pool", pdev,
+ ioat_dma->completion_pool = dma_pool_create("completion_pool", dev,
sizeof(u64),
SMP_CACHE_BYTES,
SMP_CACHE_BYTES);
if (!ioat_dma->completion_pool) {
err = -ENOMEM;
- goto err_completion_pool;
+ goto err_out;
}
ioat_enumerate_channels(ioat_dma);
@@ -546,10 +530,8 @@ static int ioat_probe(struct ioatdma_device *ioat_dma)
err_self_test:
ioat_disable_interrupts(ioat_dma);
err_setup_interrupts:
- pci_pool_destroy(ioat_dma->completion_pool);
-err_completion_pool:
- pci_pool_destroy(ioat_dma->dma_pool);
-err_dma_pool:
+ dma_pool_destroy(ioat_dma->completion_pool);
+err_out:
return err;
}
@@ -559,8 +541,7 @@ static int ioat_register(struct ioatdma_device *ioat_dma)
if (err) {
ioat_disable_interrupts(ioat_dma);
- pci_pool_destroy(ioat_dma->completion_pool);
- pci_pool_destroy(ioat_dma->dma_pool);
+ dma_pool_destroy(ioat_dma->completion_pool);
}
return err;
@@ -576,8 +557,7 @@ static void ioat_dma_remove(struct ioatdma_device *ioat_dma)
dma_async_device_unregister(dma);
- pci_pool_destroy(ioat_dma->dma_pool);
- pci_pool_destroy(ioat_dma->completion_pool);
+ dma_pool_destroy(ioat_dma->completion_pool);
INIT_LIST_HEAD(&dma->channels);
}
@@ -666,10 +646,19 @@ static void ioat_free_chan_resources(struct dma_chan *c)
ioat_free_ring_ent(desc, c);
}
+ for (i = 0; i < ioat_chan->desc_chunks; i++) {
+ dma_free_coherent(to_dev(ioat_chan), SZ_2M,
+ ioat_chan->descs[i].virt,
+ ioat_chan->descs[i].hw);
+ ioat_chan->descs[i].virt = NULL;
+ ioat_chan->descs[i].hw = 0;
+ }
+ ioat_chan->desc_chunks = 0;
+
kfree(ioat_chan->ring);
ioat_chan->ring = NULL;
ioat_chan->alloc_order = 0;
- pci_pool_free(ioat_dma->completion_pool, ioat_chan->completion,
+ dma_pool_free(ioat_dma->completion_pool, ioat_chan->completion,
ioat_chan->completion_dma);
spin_unlock_bh(&ioat_chan->prep_lock);
spin_unlock_bh(&ioat_chan->cleanup_lock);
@@ -701,7 +690,7 @@ static int ioat_alloc_chan_resources(struct dma_chan *c)
/* allocate a completion writeback area */
/* doing 2 32bit writes to mmio since 1 64b write doesn't work */
ioat_chan->completion =
- pci_pool_alloc(ioat_chan->ioat_dma->completion_pool,
+ dma_pool_alloc(ioat_chan->ioat_dma->completion_pool,
GFP_KERNEL, &ioat_chan->completion_dma);
if (!ioat_chan->completion)
return -ENOMEM;
@@ -712,7 +701,7 @@ static int ioat_alloc_chan_resources(struct dma_chan *c)
writel(((u64)ioat_chan->completion_dma) >> 32,
ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH);
- order = ioat_get_alloc_order();
+ order = IOAT_MAX_ORDER;
ring = ioat_alloc_ring(c, order, GFP_KERNEL);
if (!ring)
return -ENOMEM;
diff --git a/drivers/dma/ioat/prep.c b/drivers/dma/ioat/prep.c
index 6bb4a13a8fbd..243421af888f 100644
--- a/drivers/dma/ioat/prep.c
+++ b/drivers/dma/ioat/prep.c
@@ -26,7 +26,7 @@
#include "hw.h"
#include "dma.h"
-#define MAX_SCF 1024
+#define MAX_SCF 256
/* provide a lookup table for setting the source address in the base or
* extended descriptor of an xor or pq descriptor
diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c
index e4f43125e0fb..f039cfadf17b 100644
--- a/drivers/dma/iop-adma.c
+++ b/drivers/dma/iop-adma.c
@@ -1300,10 +1300,10 @@ static int iop_adma_probe(struct platform_device *pdev)
* note: writecombine gives slightly better performance, but
* requires that we explicitly flush the writes
*/
- adev->dma_desc_pool_virt = dma_alloc_writecombine(&pdev->dev,
- plat_data->pool_size,
- &adev->dma_desc_pool,
- GFP_KERNEL);
+ adev->dma_desc_pool_virt = dma_alloc_wc(&pdev->dev,
+ plat_data->pool_size,
+ &adev->dma_desc_pool,
+ GFP_KERNEL);
if (!adev->dma_desc_pool_virt) {
ret = -ENOMEM;
goto err_free_adev;
diff --git a/drivers/dma/mic_x100_dma.c b/drivers/dma/mic_x100_dma.c
index 068e920ecb68..1502b24b7c7d 100644
--- a/drivers/dma/mic_x100_dma.c
+++ b/drivers/dma/mic_x100_dma.c
@@ -483,7 +483,7 @@ static int mic_dma_setup_irq(struct mic_dma_chan *ch)
mic_dma_intr_handler, mic_dma_thread_fn,
"mic dma_channel", ch, ch->ch_num);
if (IS_ERR(ch->cookie))
- return IS_ERR(ch->cookie);
+ return PTR_ERR(ch->cookie);
return 0;
}
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
index 14091f878f80..3922a5d56806 100644
--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -964,8 +964,8 @@ mv_xor_channel_add(struct mv_xor_device *xordev,
* requires that we explicitly flush the writes
*/
mv_chan->dma_desc_pool_virt =
- dma_alloc_writecombine(&pdev->dev, MV_XOR_POOL_SIZE,
- &mv_chan->dma_desc_pool, GFP_KERNEL);
+ dma_alloc_wc(&pdev->dev, MV_XOR_POOL_SIZE, &mv_chan->dma_desc_pool,
+ GFP_KERNEL);
if (!mv_chan->dma_desc_pool_virt)
return ERR_PTR(-ENOMEM);
diff --git a/drivers/dma/omap-dma.c b/drivers/dma/omap-dma.c
index 9794b073d7d7..1e984e18c126 100644
--- a/drivers/dma/omap-dma.c
+++ b/drivers/dma/omap-dma.c
@@ -48,6 +48,7 @@ struct omap_chan {
unsigned dma_sig;
bool cyclic;
bool paused;
+ bool running;
int dma_ch;
struct omap_desc *desc;
@@ -294,6 +295,8 @@ static void omap_dma_start(struct omap_chan *c, struct omap_desc *d)
/* Enable channel */
omap_dma_chan_write(c, CCR, d->ccr | CCR_ENABLE);
+
+ c->running = true;
}
static void omap_dma_stop(struct omap_chan *c)
@@ -355,6 +358,8 @@ static void omap_dma_stop(struct omap_chan *c)
omap_dma_chan_write(c, CLNK_CTRL, val);
}
+
+ c->running = false;
}
static void omap_dma_start_sg(struct omap_chan *c, struct omap_desc *d,
@@ -673,15 +678,20 @@ static enum dma_status omap_dma_tx_status(struct dma_chan *chan,
struct omap_chan *c = to_omap_dma_chan(chan);
struct virt_dma_desc *vd;
enum dma_status ret;
- uint32_t ccr;
unsigned long flags;
- ccr = omap_dma_chan_read(c, CCR);
- /* The channel is no longer active, handle the completion right away */
- if (!(ccr & CCR_ENABLE))
- omap_dma_callback(c->dma_ch, 0, c);
-
ret = dma_cookie_status(chan, cookie, txstate);
+
+ if (!c->paused && c->running) {
+ uint32_t ccr = omap_dma_chan_read(c, CCR);
+ /*
+ * The channel is no longer active, set the return value
+ * accordingly
+ */
+ if (!(ccr & CCR_ENABLE))
+ ret = DMA_COMPLETE;
+ }
+
if (ret == DMA_COMPLETE || !txstate)
return ret;
@@ -945,9 +955,7 @@ static struct dma_async_tx_descriptor *omap_dma_prep_dma_memcpy(
d->ccr = c->ccr;
d->ccr |= CCR_DST_AMODE_POSTINC | CCR_SRC_AMODE_POSTINC;
- d->cicr = CICR_DROP_IE;
- if (tx_flags & DMA_PREP_INTERRUPT)
- d->cicr |= CICR_FRAME_IE;
+ d->cicr = CICR_DROP_IE | CICR_FRAME_IE;
d->csdp = data_type;
@@ -1009,6 +1017,13 @@ static int omap_dma_terminate_all(struct dma_chan *chan)
return 0;
}
+static void omap_dma_synchronize(struct dma_chan *chan)
+{
+ struct omap_chan *c = to_omap_dma_chan(chan);
+
+ vchan_synchronize(&c->vc);
+}
+
static int omap_dma_pause(struct dma_chan *chan)
{
struct omap_chan *c = to_omap_dma_chan(chan);
@@ -1112,6 +1127,7 @@ static int omap_dma_probe(struct platform_device *pdev)
od->ddev.device_pause = omap_dma_pause;
od->ddev.device_resume = omap_dma_resume;
od->ddev.device_terminate_all = omap_dma_terminate_all;
+ od->ddev.device_synchronize = omap_dma_synchronize;
od->ddev.src_addr_widths = OMAP_DMA_BUSWIDTHS;
od->ddev.dst_addr_widths = OMAP_DMA_BUSWIDTHS;
od->ddev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index 17ee758b419f..372b4359da97 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -33,6 +33,9 @@
#define PL330_MAX_CHAN 8
#define PL330_MAX_IRQS 32
#define PL330_MAX_PERI 32
+#define PL330_MAX_BURST 16
+
+#define PL330_QUIRK_BROKEN_NO_FLUSHP BIT(0)
enum pl330_cachectrl {
CCTRL0, /* Noncacheable and nonbufferable */
@@ -488,6 +491,17 @@ struct pl330_dmac {
/* Peripheral channels connected to this DMAC */
unsigned int num_peripherals;
struct dma_pl330_chan *peripherals; /* keep at end */
+ int quirks;
+};
+
+static struct pl330_of_quirks {
+ char *quirk;
+ int id;
+} of_quirks[] = {
+ {
+ .quirk = "arm,pl330-broken-no-flushp",
+ .id = PL330_QUIRK_BROKEN_NO_FLUSHP,
+ }
};
struct dma_pl330_desc {
@@ -1137,47 +1151,67 @@ static inline int _ldst_memtomem(unsigned dry_run, u8 buf[],
return off;
}
-static inline int _ldst_devtomem(unsigned dry_run, u8 buf[],
- const struct _xfer_spec *pxs, int cyc)
+static inline int _ldst_devtomem(struct pl330_dmac *pl330, unsigned dry_run,
+ u8 buf[], const struct _xfer_spec *pxs,
+ int cyc)
{
int off = 0;
+ enum pl330_cond cond;
+
+ if (pl330->quirks & PL330_QUIRK_BROKEN_NO_FLUSHP)
+ cond = BURST;
+ else
+ cond = SINGLE;
while (cyc--) {
- off += _emit_WFP(dry_run, &buf[off], SINGLE, pxs->desc->peri);
- off += _emit_LDP(dry_run, &buf[off], SINGLE, pxs->desc->peri);
+ off += _emit_WFP(dry_run, &buf[off], cond, pxs->desc->peri);
+ off += _emit_LDP(dry_run, &buf[off], cond, pxs->desc->peri);
off += _emit_ST(dry_run, &buf[off], ALWAYS);
- off += _emit_FLUSHP(dry_run, &buf[off], pxs->desc->peri);
+
+ if (!(pl330->quirks & PL330_QUIRK_BROKEN_NO_FLUSHP))
+ off += _emit_FLUSHP(dry_run, &buf[off],
+ pxs->desc->peri);
}
return off;
}
-static inline int _ldst_memtodev(unsigned dry_run, u8 buf[],
- const struct _xfer_spec *pxs, int cyc)
+static inline int _ldst_memtodev(struct pl330_dmac *pl330,
+ unsigned dry_run, u8 buf[],
+ const struct _xfer_spec *pxs, int cyc)
{
int off = 0;
+ enum pl330_cond cond;
+
+ if (pl330->quirks & PL330_QUIRK_BROKEN_NO_FLUSHP)
+ cond = BURST;
+ else
+ cond = SINGLE;
while (cyc--) {
- off += _emit_WFP(dry_run, &buf[off], SINGLE, pxs->desc->peri);
+ off += _emit_WFP(dry_run, &buf[off], cond, pxs->desc->peri);
off += _emit_LD(dry_run, &buf[off], ALWAYS);
- off += _emit_STP(dry_run, &buf[off], SINGLE, pxs->desc->peri);
- off += _emit_FLUSHP(dry_run, &buf[off], pxs->desc->peri);
+ off += _emit_STP(dry_run, &buf[off], cond, pxs->desc->peri);
+
+ if (!(pl330->quirks & PL330_QUIRK_BROKEN_NO_FLUSHP))
+ off += _emit_FLUSHP(dry_run, &buf[off],
+ pxs->desc->peri);
}
return off;
}
-static int _bursts(unsigned dry_run, u8 buf[],
+static int _bursts(struct pl330_dmac *pl330, unsigned dry_run, u8 buf[],
const struct _xfer_spec *pxs, int cyc)
{
int off = 0;
switch (pxs->desc->rqtype) {
case DMA_MEM_TO_DEV:
- off += _ldst_memtodev(dry_run, &buf[off], pxs, cyc);
+ off += _ldst_memtodev(pl330, dry_run, &buf[off], pxs, cyc);
break;
case DMA_DEV_TO_MEM:
- off += _ldst_devtomem(dry_run, &buf[off], pxs, cyc);
+ off += _ldst_devtomem(pl330, dry_run, &buf[off], pxs, cyc);
break;
case DMA_MEM_TO_MEM:
off += _ldst_memtomem(dry_run, &buf[off], pxs, cyc);
@@ -1191,7 +1225,7 @@ static int _bursts(unsigned dry_run, u8 buf[],
}
/* Returns bytes consumed and updates bursts */
-static inline int _loop(unsigned dry_run, u8 buf[],
+static inline int _loop(struct pl330_dmac *pl330, unsigned dry_run, u8 buf[],
unsigned long *bursts, const struct _xfer_spec *pxs)
{
int cyc, cycmax, szlp, szlpend, szbrst, off;
@@ -1199,7 +1233,7 @@ static inline int _loop(unsigned dry_run, u8 buf[],
struct _arg_LPEND lpend;
if (*bursts == 1)
- return _bursts(dry_run, buf, pxs, 1);
+ return _bursts(pl330, dry_run, buf, pxs, 1);
/* Max iterations possible in DMALP is 256 */
if (*bursts >= 256*256) {
@@ -1217,7 +1251,7 @@ static inline int _loop(unsigned dry_run, u8 buf[],
}
szlp = _emit_LP(1, buf, 0, 0);
- szbrst = _bursts(1, buf, pxs, 1);
+ szbrst = _bursts(pl330, 1, buf, pxs, 1);
lpend.cond = ALWAYS;
lpend.forever = false;
@@ -1249,7 +1283,7 @@ static inline int _loop(unsigned dry_run, u8 buf[],
off += _emit_LP(dry_run, &buf[off], 1, lcnt1);
ljmp1 = off;
- off += _bursts(dry_run, &buf[off], pxs, cyc);
+ off += _bursts(pl330, dry_run, &buf[off], pxs, cyc);
lpend.cond = ALWAYS;
lpend.forever = false;
@@ -1272,8 +1306,9 @@ static inline int _loop(unsigned dry_run, u8 buf[],
return off;
}
-static inline int _setup_loops(unsigned dry_run, u8 buf[],
- const struct _xfer_spec *pxs)
+static inline int _setup_loops(struct pl330_dmac *pl330,
+ unsigned dry_run, u8 buf[],
+ const struct _xfer_spec *pxs)
{
struct pl330_xfer *x = &pxs->desc->px;
u32 ccr = pxs->ccr;
@@ -1282,15 +1317,16 @@ static inline int _setup_loops(unsigned dry_run, u8 buf[],
while (bursts) {
c = bursts;
- off += _loop(dry_run, &buf[off], &c, pxs);
+ off += _loop(pl330, dry_run, &buf[off], &c, pxs);
bursts -= c;
}
return off;
}
-static inline int _setup_xfer(unsigned dry_run, u8 buf[],
- const struct _xfer_spec *pxs)
+static inline int _setup_xfer(struct pl330_dmac *pl330,
+ unsigned dry_run, u8 buf[],
+ const struct _xfer_spec *pxs)
{
struct pl330_xfer *x = &pxs->desc->px;
int off = 0;
@@ -1301,7 +1337,7 @@ static inline int _setup_xfer(unsigned dry_run, u8 buf[],
off += _emit_MOV(dry_run, &buf[off], DAR, x->dst_addr);
/* Setup Loop(s) */
- off += _setup_loops(dry_run, &buf[off], pxs);
+ off += _setup_loops(pl330, dry_run, &buf[off], pxs);
return off;
}
@@ -1310,8 +1346,9 @@ static inline int _setup_xfer(unsigned dry_run, u8 buf[],
* A req is a sequence of one or more xfer units.
* Returns the number of bytes taken to setup the MC for the req.
*/
-static int _setup_req(unsigned dry_run, struct pl330_thread *thrd,
- unsigned index, struct _xfer_spec *pxs)
+static int _setup_req(struct pl330_dmac *pl330, unsigned dry_run,
+ struct pl330_thread *thrd, unsigned index,
+ struct _xfer_spec *pxs)
{
struct _pl330_req *req = &thrd->req[index];
struct pl330_xfer *x;
@@ -1328,7 +1365,7 @@ static int _setup_req(unsigned dry_run, struct pl330_thread *thrd,
if (x->bytes % (BRST_SIZE(pxs->ccr) * BRST_LEN(pxs->ccr)))
return -EINVAL;
- off += _setup_xfer(dry_run, &buf[off], pxs);
+ off += _setup_xfer(pl330, dry_run, &buf[off], pxs);
/* DMASEV peripheral/event */
off += _emit_SEV(dry_run, &buf[off], thrd->ev);
@@ -1422,7 +1459,7 @@ static int pl330_submit_req(struct pl330_thread *thrd,
xs.desc = desc;
/* First dry run to check if req is acceptable */
- ret = _setup_req(1, thrd, idx, &xs);
+ ret = _setup_req(pl330, 1, thrd, idx, &xs);
if (ret < 0)
goto xfer_exit;
@@ -1436,7 +1473,7 @@ static int pl330_submit_req(struct pl330_thread *thrd,
/* Hook the request */
thrd->lstenq = idx;
thrd->req[idx].desc = desc;
- _setup_req(0, thrd, idx, &xs);
+ _setup_req(pl330, 0, thrd, idx, &xs);
ret = 0;
@@ -2781,6 +2818,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
struct resource *res;
int i, ret, irq;
int num_chan;
+ struct device_node *np = adev->dev.of_node;
pdat = dev_get_platdata(&adev->dev);
@@ -2800,6 +2838,11 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
pl330->mcbufsz = pdat ? pdat->mcbuf_sz : 0;
+ /* get quirk */
+ for (i = 0; i < ARRAY_SIZE(of_quirks); i++)
+ if (of_property_read_bool(np, of_quirks[i].quirk))
+ pl330->quirks |= of_quirks[i].id;
+
res = &adev->res;
pl330->base = devm_ioremap_resource(&adev->dev, res);
if (IS_ERR(pl330->base))
@@ -2895,6 +2938,8 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
pd->dst_addr_widths = PL330_DMA_BUSWIDTHS;
pd->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
pd->residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
+ pd->max_burst = ((pl330->quirks & PL330_QUIRK_BROKEN_NO_FLUSHP) ?
+ 1 : PL330_MAX_BURST);
ret = dma_async_device_register(pd);
if (ret) {
diff --git a/drivers/dma/pxa_dma.c b/drivers/dma/pxa_dma.c
index debca824bed6..77c1c44009d8 100644
--- a/drivers/dma/pxa_dma.c
+++ b/drivers/dma/pxa_dma.c
@@ -122,6 +122,7 @@ struct pxad_chan {
struct pxad_device {
struct dma_device slave;
int nr_chans;
+ int nr_requestors;
void __iomem *base;
struct pxad_phy *phys;
spinlock_t phy_lock; /* Phy association */
@@ -473,7 +474,7 @@ static void pxad_free_phy(struct pxad_chan *chan)
return;
/* clear the channel mapping in DRCMR */
- if (chan->drcmr <= DRCMR_CHLNUM) {
+ if (chan->drcmr <= pdev->nr_requestors) {
reg = pxad_drcmr(chan->drcmr);
writel_relaxed(0, chan->phy->base + reg);
}
@@ -509,6 +510,7 @@ static bool is_running_chan_misaligned(struct pxad_chan *chan)
static void phy_enable(struct pxad_phy *phy, bool misaligned)
{
+ struct pxad_device *pdev;
u32 reg, dalgn;
if (!phy->vchan)
@@ -518,7 +520,8 @@ static void phy_enable(struct pxad_phy *phy, bool misaligned)
"%s(); phy=%p(%d) misaligned=%d\n", __func__,
phy, phy->idx, misaligned);
- if (phy->vchan->drcmr <= DRCMR_CHLNUM) {
+ pdev = to_pxad_dev(phy->vchan->vc.chan.device);
+ if (phy->vchan->drcmr <= pdev->nr_requestors) {
reg = pxad_drcmr(phy->vchan->drcmr);
writel_relaxed(DRCMR_MAPVLD | phy->idx, phy->base + reg);
}
@@ -914,6 +917,7 @@ static void pxad_get_config(struct pxad_chan *chan,
{
u32 maxburst = 0, dev_addr = 0;
enum dma_slave_buswidth width = DMA_SLAVE_BUSWIDTH_UNDEFINED;
+ struct pxad_device *pdev = to_pxad_dev(chan->vc.chan.device);
*dcmd = 0;
if (dir == DMA_DEV_TO_MEM) {
@@ -922,7 +926,7 @@ static void pxad_get_config(struct pxad_chan *chan,
dev_addr = chan->cfg.src_addr;
*dev_src = dev_addr;
*dcmd |= PXA_DCMD_INCTRGADDR;
- if (chan->drcmr <= DRCMR_CHLNUM)
+ if (chan->drcmr <= pdev->nr_requestors)
*dcmd |= PXA_DCMD_FLOWSRC;
}
if (dir == DMA_MEM_TO_DEV) {
@@ -931,7 +935,7 @@ static void pxad_get_config(struct pxad_chan *chan,
dev_addr = chan->cfg.dst_addr;
*dev_dst = dev_addr;
*dcmd |= PXA_DCMD_INCSRCADDR;
- if (chan->drcmr <= DRCMR_CHLNUM)
+ if (chan->drcmr <= pdev->nr_requestors)
*dcmd |= PXA_DCMD_FLOWTRG;
}
if (dir == DMA_MEM_TO_MEM)
@@ -1341,13 +1345,15 @@ static struct dma_chan *pxad_dma_xlate(struct of_phandle_args *dma_spec,
static int pxad_init_dmadev(struct platform_device *op,
struct pxad_device *pdev,
- unsigned int nr_phy_chans)
+ unsigned int nr_phy_chans,
+ unsigned int nr_requestors)
{
int ret;
unsigned int i;
struct pxad_chan *c;
pdev->nr_chans = nr_phy_chans;
+ pdev->nr_requestors = nr_requestors;
INIT_LIST_HEAD(&pdev->slave.channels);
pdev->slave.device_alloc_chan_resources = pxad_alloc_chan_resources;
pdev->slave.device_free_chan_resources = pxad_free_chan_resources;
@@ -1382,7 +1388,7 @@ static int pxad_probe(struct platform_device *op)
const struct of_device_id *of_id;
struct mmp_dma_platdata *pdata = dev_get_platdata(&op->dev);
struct resource *iores;
- int ret, dma_channels = 0;
+ int ret, dma_channels = 0, nb_requestors = 0;
const enum dma_slave_buswidth widths =
DMA_SLAVE_BUSWIDTH_1_BYTE | DMA_SLAVE_BUSWIDTH_2_BYTES |
DMA_SLAVE_BUSWIDTH_4_BYTES;
@@ -1399,13 +1405,23 @@ static int pxad_probe(struct platform_device *op)
return PTR_ERR(pdev->base);
of_id = of_match_device(pxad_dt_ids, &op->dev);
- if (of_id)
+ if (of_id) {
of_property_read_u32(op->dev.of_node, "#dma-channels",
&dma_channels);
- else if (pdata && pdata->dma_channels)
+ ret = of_property_read_u32(op->dev.of_node, "#dma-requests",
+ &nb_requestors);
+ if (ret) {
+ dev_warn(pdev->slave.dev,
+ "#dma-requests set to default 32 as missing in OF: %d",
+ ret);
+ nb_requestors = 32;
+ };
+ } else if (pdata && pdata->dma_channels) {
dma_channels = pdata->dma_channels;
- else
+ nb_requestors = pdata->nb_requestors;
+ } else {
dma_channels = 32; /* default 32 channel */
+ }
dma_cap_set(DMA_SLAVE, pdev->slave.cap_mask);
dma_cap_set(DMA_MEMCPY, pdev->slave.cap_mask);
@@ -1423,7 +1439,7 @@ static int pxad_probe(struct platform_device *op)
pdev->slave.descriptor_reuse = true;
pdev->slave.dev = &op->dev;
- ret = pxad_init_dmadev(op, pdev, dma_channels);
+ ret = pxad_init_dmadev(op, pdev, dma_channels, nb_requestors);
if (ret) {
dev_err(pdev->slave.dev, "unable to register\n");
return ret;
@@ -1442,7 +1458,8 @@ static int pxad_probe(struct platform_device *op)
platform_set_drvdata(op, pdev);
pxad_init_debugfs(pdev);
- dev_info(pdev->slave.dev, "initialized %d channels\n", dma_channels);
+ dev_info(pdev->slave.dev, "initialized %d channels on %d requestors\n",
+ dma_channels, nb_requestors);
return 0;
}
diff --git a/drivers/dma/qcom/Kconfig b/drivers/dma/qcom/Kconfig
new file mode 100644
index 000000000000..a7761c4025f4
--- /dev/null
+++ b/drivers/dma/qcom/Kconfig
@@ -0,0 +1,29 @@
+config QCOM_BAM_DMA
+ tristate "QCOM BAM DMA support"
+ depends on ARCH_QCOM || (COMPILE_TEST && OF && ARM)
+ select DMA_ENGINE
+ select DMA_VIRTUAL_CHANNELS
+ ---help---
+ Enable support for the QCOM BAM DMA controller. This controller
+ provides DMA capabilities for a variety of on-chip devices.
+
+config QCOM_HIDMA_MGMT
+ tristate "Qualcomm Technologies HIDMA Management support"
+ select DMA_ENGINE
+ help
+ Enable support for the Qualcomm Technologies HIDMA Management.
+ Each DMA device requires one management interface driver
+ for basic initialization before QCOM_HIDMA channel driver can
+ start managing the channels. In a virtualized environment,
+ the guest OS would run QCOM_HIDMA channel driver and the
+ host would run the QCOM_HIDMA_MGMT management driver.
+
+config QCOM_HIDMA
+ tristate "Qualcomm Technologies HIDMA Channel support"
+ select DMA_ENGINE
+ help
+ Enable support for the Qualcomm Technologies HIDMA controller.
+ The HIDMA controller supports optimized buffer copies
+ (user to kernel, kernel to kernel, etc.). It only supports
+ memcpy interface. The core is not intended for general
+ purpose slave DMA.
diff --git a/drivers/dma/qcom/Makefile b/drivers/dma/qcom/Makefile
new file mode 100644
index 000000000000..bfea6990229f
--- /dev/null
+++ b/drivers/dma/qcom/Makefile
@@ -0,0 +1,3 @@
+obj-$(CONFIG_QCOM_BAM_DMA) += bam_dma.o
+obj-$(CONFIG_QCOM_HIDMA_MGMT) += hdma_mgmt.o
+hdma_mgmt-objs := hidma_mgmt.o hidma_mgmt_sys.o
diff --git a/drivers/dma/qcom_bam_dma.c b/drivers/dma/qcom/bam_dma.c
index 5a250cdc8376..d5e0a9c3ad5d 100644
--- a/drivers/dma/qcom_bam_dma.c
+++ b/drivers/dma/qcom/bam_dma.c
@@ -49,13 +49,13 @@
#include <linux/clk.h>
#include <linux/dmaengine.h>
-#include "dmaengine.h"
-#include "virt-dma.h"
+#include "../dmaengine.h"
+#include "../virt-dma.h"
struct bam_desc_hw {
- u32 addr; /* Buffer physical address */
- u16 size; /* Buffer size in bytes */
- u16 flags;
+ __le32 addr; /* Buffer physical address */
+ __le16 size; /* Buffer size in bytes */
+ __le16 flags;
};
#define DESC_FLAG_INT BIT(15)
@@ -502,8 +502,8 @@ static int bam_alloc_chan(struct dma_chan *chan)
return 0;
/* allocate FIFO descriptor space, but only if necessary */
- bchan->fifo_virt = dma_alloc_writecombine(bdev->dev, BAM_DESC_FIFO_SIZE,
- &bchan->fifo_phys, GFP_KERNEL);
+ bchan->fifo_virt = dma_alloc_wc(bdev->dev, BAM_DESC_FIFO_SIZE,
+ &bchan->fifo_phys, GFP_KERNEL);
if (!bchan->fifo_virt) {
dev_err(bdev->dev, "Failed to allocate desc fifo\n");
@@ -538,8 +538,8 @@ static void bam_free_chan(struct dma_chan *chan)
bam_reset_channel(bchan);
spin_unlock_irqrestore(&bchan->vc.lock, flags);
- dma_free_writecombine(bdev->dev, BAM_DESC_FIFO_SIZE, bchan->fifo_virt,
- bchan->fifo_phys);
+ dma_free_wc(bdev->dev, BAM_DESC_FIFO_SIZE, bchan->fifo_virt,
+ bchan->fifo_phys);
bchan->fifo_virt = NULL;
/* mask irq for pipe/channel */
@@ -632,14 +632,15 @@ static struct dma_async_tx_descriptor *bam_prep_slave_sg(struct dma_chan *chan,
unsigned int curr_offset = 0;
do {
- desc->addr = sg_dma_address(sg) + curr_offset;
+ desc->addr = cpu_to_le32(sg_dma_address(sg) +
+ curr_offset);
if (remainder > BAM_MAX_DATA_SIZE) {
- desc->size = BAM_MAX_DATA_SIZE;
+ desc->size = cpu_to_le16(BAM_MAX_DATA_SIZE);
remainder -= BAM_MAX_DATA_SIZE;
curr_offset += BAM_MAX_DATA_SIZE;
} else {
- desc->size = remainder;
+ desc->size = cpu_to_le16(remainder);
remainder = 0;
}
@@ -915,9 +916,11 @@ static void bam_start_dma(struct bam_chan *bchan)
/* set any special flags on the last descriptor */
if (async_desc->num_desc == async_desc->xfer_len)
- desc[async_desc->xfer_len - 1].flags = async_desc->flags;
+ desc[async_desc->xfer_len - 1].flags =
+ cpu_to_le16(async_desc->flags);
else
- desc[async_desc->xfer_len - 1].flags |= DESC_FLAG_INT;
+ desc[async_desc->xfer_len - 1].flags |=
+ cpu_to_le16(DESC_FLAG_INT);
if (bchan->tail + async_desc->xfer_len > MAX_DESCRIPTORS) {
u32 partial = MAX_DESCRIPTORS - bchan->tail;
@@ -1231,9 +1234,9 @@ static int bam_dma_remove(struct platform_device *pdev)
bam_dma_terminate_all(&bdev->channels[i].vc.chan);
tasklet_kill(&bdev->channels[i].vc.task);
- dma_free_writecombine(bdev->dev, BAM_DESC_FIFO_SIZE,
- bdev->channels[i].fifo_virt,
- bdev->channels[i].fifo_phys);
+ dma_free_wc(bdev->dev, BAM_DESC_FIFO_SIZE,
+ bdev->channels[i].fifo_virt,
+ bdev->channels[i].fifo_phys);
}
tasklet_kill(&bdev->task);
diff --git a/drivers/dma/qcom/hidma.c b/drivers/dma/qcom/hidma.c
new file mode 100644
index 000000000000..cccc78efbca9
--- /dev/null
+++ b/drivers/dma/qcom/hidma.c
@@ -0,0 +1,706 @@
+/*
+ * Qualcomm Technologies HIDMA DMA engine interface
+ *
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/*
+ * Copyright (C) Freescale Semicondutor, Inc. 2007, 2008.
+ * Copyright (C) Semihalf 2009
+ * Copyright (C) Ilya Yanok, Emcraft Systems 2010
+ * Copyright (C) Alexander Popov, Promcontroller 2014
+ *
+ * Written by Piotr Ziecik <kosmo@semihalf.com>. Hardware description
+ * (defines, structures and comments) was taken from MPC5121 DMA driver
+ * written by Hongjun Chen <hong-jun.chen@freescale.com>.
+ *
+ * Approved as OSADL project by a majority of OSADL members and funded
+ * by OSADL membership fees in 2009; for details see www.osadl.org.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called COPYING.
+ */
+
+/* Linux Foundation elects GPLv2 license only. */
+
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/of_dma.h>
+#include <linux/property.h>
+#include <linux/delay.h>
+#include <linux/acpi.h>
+#include <linux/irq.h>
+#include <linux/atomic.h>
+#include <linux/pm_runtime.h>
+
+#include "../dmaengine.h"
+#include "hidma.h"
+
+/*
+ * Default idle time is 2 seconds. This parameter can
+ * be overridden by changing the following
+ * /sys/bus/platform/devices/QCOM8061:<xy>/power/autosuspend_delay_ms
+ * during kernel boot.
+ */
+#define HIDMA_AUTOSUSPEND_TIMEOUT 2000
+#define HIDMA_ERR_INFO_SW 0xFF
+#define HIDMA_ERR_CODE_UNEXPECTED_TERMINATE 0x0
+#define HIDMA_NR_DEFAULT_DESC 10
+
+static inline struct hidma_dev *to_hidma_dev(struct dma_device *dmadev)
+{
+ return container_of(dmadev, struct hidma_dev, ddev);
+}
+
+static inline
+struct hidma_dev *to_hidma_dev_from_lldev(struct hidma_lldev **_lldevp)
+{
+ return container_of(_lldevp, struct hidma_dev, lldev);
+}
+
+static inline struct hidma_chan *to_hidma_chan(struct dma_chan *dmach)
+{
+ return container_of(dmach, struct hidma_chan, chan);
+}
+
+static inline
+struct hidma_desc *to_hidma_desc(struct dma_async_tx_descriptor *t)
+{
+ return container_of(t, struct hidma_desc, desc);
+}
+
+static void hidma_free(struct hidma_dev *dmadev)
+{
+ INIT_LIST_HEAD(&dmadev->ddev.channels);
+}
+
+static unsigned int nr_desc_prm;
+module_param(nr_desc_prm, uint, 0644);
+MODULE_PARM_DESC(nr_desc_prm, "number of descriptors (default: 0)");
+
+
+/* process completed descriptors */
+static void hidma_process_completed(struct hidma_chan *mchan)
+{
+ struct dma_device *ddev = mchan->chan.device;
+ struct hidma_dev *mdma = to_hidma_dev(ddev);
+ struct dma_async_tx_descriptor *desc;
+ dma_cookie_t last_cookie;
+ struct hidma_desc *mdesc;
+ unsigned long irqflags;
+ struct list_head list;
+
+ INIT_LIST_HEAD(&list);
+
+ /* Get all completed descriptors */
+ spin_lock_irqsave(&mchan->lock, irqflags);
+ list_splice_tail_init(&mchan->completed, &list);
+ spin_unlock_irqrestore(&mchan->lock, irqflags);
+
+ /* Execute callbacks and run dependencies */
+ list_for_each_entry(mdesc, &list, node) {
+ enum dma_status llstat;
+
+ desc = &mdesc->desc;
+
+ spin_lock_irqsave(&mchan->lock, irqflags);
+ dma_cookie_complete(desc);
+ spin_unlock_irqrestore(&mchan->lock, irqflags);
+
+ llstat = hidma_ll_status(mdma->lldev, mdesc->tre_ch);
+ if (desc->callback && (llstat == DMA_COMPLETE))
+ desc->callback(desc->callback_param);
+
+ last_cookie = desc->cookie;
+ dma_run_dependencies(desc);
+ }
+
+ /* Free descriptors */
+ spin_lock_irqsave(&mchan->lock, irqflags);
+ list_splice_tail_init(&list, &mchan->free);
+ spin_unlock_irqrestore(&mchan->lock, irqflags);
+
+}
+
+/*
+ * Called once for each submitted descriptor.
+ * PM is locked once for each descriptor that is currently
+ * in execution.
+ */
+static void hidma_callback(void *data)
+{
+ struct hidma_desc *mdesc = data;
+ struct hidma_chan *mchan = to_hidma_chan(mdesc->desc.chan);
+ struct dma_device *ddev = mchan->chan.device;
+ struct hidma_dev *dmadev = to_hidma_dev(ddev);
+ unsigned long irqflags;
+ bool queued = false;
+
+ spin_lock_irqsave(&mchan->lock, irqflags);
+ if (mdesc->node.next) {
+ /* Delete from the active list, add to completed list */
+ list_move_tail(&mdesc->node, &mchan->completed);
+ queued = true;
+
+ /* calculate the next running descriptor */
+ mchan->running = list_first_entry(&mchan->active,
+ struct hidma_desc, node);
+ }
+ spin_unlock_irqrestore(&mchan->lock, irqflags);
+
+ hidma_process_completed(mchan);
+
+ if (queued) {
+ pm_runtime_mark_last_busy(dmadev->ddev.dev);
+ pm_runtime_put_autosuspend(dmadev->ddev.dev);
+ }
+}
+
+static int hidma_chan_init(struct hidma_dev *dmadev, u32 dma_sig)
+{
+ struct hidma_chan *mchan;
+ struct dma_device *ddev;
+
+ mchan = devm_kzalloc(dmadev->ddev.dev, sizeof(*mchan), GFP_KERNEL);
+ if (!mchan)
+ return -ENOMEM;
+
+ ddev = &dmadev->ddev;
+ mchan->dma_sig = dma_sig;
+ mchan->dmadev = dmadev;
+ mchan->chan.device = ddev;
+ dma_cookie_init(&mchan->chan);
+
+ INIT_LIST_HEAD(&mchan->free);
+ INIT_LIST_HEAD(&mchan->prepared);
+ INIT_LIST_HEAD(&mchan->active);
+ INIT_LIST_HEAD(&mchan->completed);
+
+ spin_lock_init(&mchan->lock);
+ list_add_tail(&mchan->chan.device_node, &ddev->channels);
+ dmadev->ddev.chancnt++;
+ return 0;
+}
+
+static void hidma_issue_task(unsigned long arg)
+{
+ struct hidma_dev *dmadev = (struct hidma_dev *)arg;
+
+ pm_runtime_get_sync(dmadev->ddev.dev);
+ hidma_ll_start(dmadev->lldev);
+}
+
+static void hidma_issue_pending(struct dma_chan *dmach)
+{
+ struct hidma_chan *mchan = to_hidma_chan(dmach);
+ struct hidma_dev *dmadev = mchan->dmadev;
+ unsigned long flags;
+ int status;
+
+ spin_lock_irqsave(&mchan->lock, flags);
+ if (!mchan->running) {
+ struct hidma_desc *desc = list_first_entry(&mchan->active,
+ struct hidma_desc,
+ node);
+ mchan->running = desc;
+ }
+ spin_unlock_irqrestore(&mchan->lock, flags);
+
+ /* PM will be released in hidma_callback function. */
+ status = pm_runtime_get(dmadev->ddev.dev);
+ if (status < 0)
+ tasklet_schedule(&dmadev->task);
+ else
+ hidma_ll_start(dmadev->lldev);
+}
+
+static enum dma_status hidma_tx_status(struct dma_chan *dmach,
+ dma_cookie_t cookie,
+ struct dma_tx_state *txstate)
+{
+ struct hidma_chan *mchan = to_hidma_chan(dmach);
+ enum dma_status ret;
+
+ ret = dma_cookie_status(dmach, cookie, txstate);
+ if (ret == DMA_COMPLETE)
+ return ret;
+
+ if (mchan->paused && (ret == DMA_IN_PROGRESS)) {
+ unsigned long flags;
+ dma_cookie_t runcookie;
+
+ spin_lock_irqsave(&mchan->lock, flags);
+ if (mchan->running)
+ runcookie = mchan->running->desc.cookie;
+ else
+ runcookie = -EINVAL;
+
+ if (runcookie == cookie)
+ ret = DMA_PAUSED;
+
+ spin_unlock_irqrestore(&mchan->lock, flags);
+ }
+
+ return ret;
+}
+
+/*
+ * Submit descriptor to hardware.
+ * Lock the PM for each descriptor we are sending.
+ */
+static dma_cookie_t hidma_tx_submit(struct dma_async_tx_descriptor *txd)
+{
+ struct hidma_chan *mchan = to_hidma_chan(txd->chan);
+ struct hidma_dev *dmadev = mchan->dmadev;
+ struct hidma_desc *mdesc;
+ unsigned long irqflags;
+ dma_cookie_t cookie;
+
+ pm_runtime_get_sync(dmadev->ddev.dev);
+ if (!hidma_ll_isenabled(dmadev->lldev)) {
+ pm_runtime_mark_last_busy(dmadev->ddev.dev);
+ pm_runtime_put_autosuspend(dmadev->ddev.dev);
+ return -ENODEV;
+ }
+
+ mdesc = container_of(txd, struct hidma_desc, desc);
+ spin_lock_irqsave(&mchan->lock, irqflags);
+
+ /* Move descriptor to active */
+ list_move_tail(&mdesc->node, &mchan->active);
+
+ /* Update cookie */
+ cookie = dma_cookie_assign(txd);
+
+ hidma_ll_queue_request(dmadev->lldev, mdesc->tre_ch);
+ spin_unlock_irqrestore(&mchan->lock, irqflags);
+
+ return cookie;
+}
+
+static int hidma_alloc_chan_resources(struct dma_chan *dmach)
+{
+ struct hidma_chan *mchan = to_hidma_chan(dmach);
+ struct hidma_dev *dmadev = mchan->dmadev;
+ struct hidma_desc *mdesc, *tmp;
+ unsigned long irqflags;
+ LIST_HEAD(descs);
+ unsigned int i;
+ int rc = 0;
+
+ if (mchan->allocated)
+ return 0;
+
+ /* Alloc descriptors for this channel */
+ for (i = 0; i < dmadev->nr_descriptors; i++) {
+ mdesc = kzalloc(sizeof(struct hidma_desc), GFP_NOWAIT);
+ if (!mdesc) {
+ rc = -ENOMEM;
+ break;
+ }
+ dma_async_tx_descriptor_init(&mdesc->desc, dmach);
+ mdesc->desc.tx_submit = hidma_tx_submit;
+
+ rc = hidma_ll_request(dmadev->lldev, mchan->dma_sig,
+ "DMA engine", hidma_callback, mdesc,
+ &mdesc->tre_ch);
+ if (rc) {
+ dev_err(dmach->device->dev,
+ "channel alloc failed at %u\n", i);
+ kfree(mdesc);
+ break;
+ }
+ list_add_tail(&mdesc->node, &descs);
+ }
+
+ if (rc) {
+ /* return the allocated descriptors */
+ list_for_each_entry_safe(mdesc, tmp, &descs, node) {
+ hidma_ll_free(dmadev->lldev, mdesc->tre_ch);
+ kfree(mdesc);
+ }
+ return rc;
+ }
+
+ spin_lock_irqsave(&mchan->lock, irqflags);
+ list_splice_tail_init(&descs, &mchan->free);
+ mchan->allocated = true;
+ spin_unlock_irqrestore(&mchan->lock, irqflags);
+ return 1;
+}
+
+static struct dma_async_tx_descriptor *
+hidma_prep_dma_memcpy(struct dma_chan *dmach, dma_addr_t dest, dma_addr_t src,
+ size_t len, unsigned long flags)
+{
+ struct hidma_chan *mchan = to_hidma_chan(dmach);
+ struct hidma_desc *mdesc = NULL;
+ struct hidma_dev *mdma = mchan->dmadev;
+ unsigned long irqflags;
+
+ /* Get free descriptor */
+ spin_lock_irqsave(&mchan->lock, irqflags);
+ if (!list_empty(&mchan->free)) {
+ mdesc = list_first_entry(&mchan->free, struct hidma_desc, node);
+ list_del(&mdesc->node);
+ }
+ spin_unlock_irqrestore(&mchan->lock, irqflags);
+
+ if (!mdesc)
+ return NULL;
+
+ hidma_ll_set_transfer_params(mdma->lldev, mdesc->tre_ch,
+ src, dest, len, flags);
+
+ /* Place descriptor in prepared list */
+ spin_lock_irqsave(&mchan->lock, irqflags);
+ list_add_tail(&mdesc->node, &mchan->prepared);
+ spin_unlock_irqrestore(&mchan->lock, irqflags);
+
+ return &mdesc->desc;
+}
+
+static int hidma_terminate_channel(struct dma_chan *chan)
+{
+ struct hidma_chan *mchan = to_hidma_chan(chan);
+ struct hidma_dev *dmadev = to_hidma_dev(mchan->chan.device);
+ struct hidma_desc *tmp, *mdesc;
+ unsigned long irqflags;
+ LIST_HEAD(list);
+ int rc;
+
+ pm_runtime_get_sync(dmadev->ddev.dev);
+ /* give completed requests a chance to finish */
+ hidma_process_completed(mchan);
+
+ spin_lock_irqsave(&mchan->lock, irqflags);
+ list_splice_init(&mchan->active, &list);
+ list_splice_init(&mchan->prepared, &list);
+ list_splice_init(&mchan->completed, &list);
+ spin_unlock_irqrestore(&mchan->lock, irqflags);
+
+ /* this suspends the existing transfer */
+ rc = hidma_ll_pause(dmadev->lldev);
+ if (rc) {
+ dev_err(dmadev->ddev.dev, "channel did not pause\n");
+ goto out;
+ }
+
+ /* return all user requests */
+ list_for_each_entry_safe(mdesc, tmp, &list, node) {
+ struct dma_async_tx_descriptor *txd = &mdesc->desc;
+ dma_async_tx_callback callback = mdesc->desc.callback;
+ void *param = mdesc->desc.callback_param;
+
+ dma_descriptor_unmap(txd);
+
+ if (callback)
+ callback(param);
+
+ dma_run_dependencies(txd);
+
+ /* move myself to free_list */
+ list_move(&mdesc->node, &mchan->free);
+ }
+
+ rc = hidma_ll_resume(dmadev->lldev);
+out:
+ pm_runtime_mark_last_busy(dmadev->ddev.dev);
+ pm_runtime_put_autosuspend(dmadev->ddev.dev);
+ return rc;
+}
+
+static int hidma_terminate_all(struct dma_chan *chan)
+{
+ struct hidma_chan *mchan = to_hidma_chan(chan);
+ struct hidma_dev *dmadev = to_hidma_dev(mchan->chan.device);
+ int rc;
+
+ rc = hidma_terminate_channel(chan);
+ if (rc)
+ return rc;
+
+ /* reinitialize the hardware */
+ pm_runtime_get_sync(dmadev->ddev.dev);
+ rc = hidma_ll_setup(dmadev->lldev);
+ pm_runtime_mark_last_busy(dmadev->ddev.dev);
+ pm_runtime_put_autosuspend(dmadev->ddev.dev);
+ return rc;
+}
+
+static void hidma_free_chan_resources(struct dma_chan *dmach)
+{
+ struct hidma_chan *mchan = to_hidma_chan(dmach);
+ struct hidma_dev *mdma = mchan->dmadev;
+ struct hidma_desc *mdesc, *tmp;
+ unsigned long irqflags;
+ LIST_HEAD(descs);
+
+ /* terminate running transactions and free descriptors */
+ hidma_terminate_channel(dmach);
+
+ spin_lock_irqsave(&mchan->lock, irqflags);
+
+ /* Move data */
+ list_splice_tail_init(&mchan->free, &descs);
+
+ /* Free descriptors */
+ list_for_each_entry_safe(mdesc, tmp, &descs, node) {
+ hidma_ll_free(mdma->lldev, mdesc->tre_ch);
+ list_del(&mdesc->node);
+ kfree(mdesc);
+ }
+
+ mchan->allocated = 0;
+ spin_unlock_irqrestore(&mchan->lock, irqflags);
+}
+
+static int hidma_pause(struct dma_chan *chan)
+{
+ struct hidma_chan *mchan;
+ struct hidma_dev *dmadev;
+
+ mchan = to_hidma_chan(chan);
+ dmadev = to_hidma_dev(mchan->chan.device);
+ if (!mchan->paused) {
+ pm_runtime_get_sync(dmadev->ddev.dev);
+ if (hidma_ll_pause(dmadev->lldev))
+ dev_warn(dmadev->ddev.dev, "channel did not stop\n");
+ mchan->paused = true;
+ pm_runtime_mark_last_busy(dmadev->ddev.dev);
+ pm_runtime_put_autosuspend(dmadev->ddev.dev);
+ }
+ return 0;
+}
+
+static int hidma_resume(struct dma_chan *chan)
+{
+ struct hidma_chan *mchan;
+ struct hidma_dev *dmadev;
+ int rc = 0;
+
+ mchan = to_hidma_chan(chan);
+ dmadev = to_hidma_dev(mchan->chan.device);
+ if (mchan->paused) {
+ pm_runtime_get_sync(dmadev->ddev.dev);
+ rc = hidma_ll_resume(dmadev->lldev);
+ if (!rc)
+ mchan->paused = false;
+ else
+ dev_err(dmadev->ddev.dev,
+ "failed to resume the channel");
+ pm_runtime_mark_last_busy(dmadev->ddev.dev);
+ pm_runtime_put_autosuspend(dmadev->ddev.dev);
+ }
+ return rc;
+}
+
+static irqreturn_t hidma_chirq_handler(int chirq, void *arg)
+{
+ struct hidma_lldev *lldev = arg;
+
+ /*
+ * All interrupts are request driven.
+ * HW doesn't send an interrupt by itself.
+ */
+ return hidma_ll_inthandler(chirq, lldev);
+}
+
+static int hidma_probe(struct platform_device *pdev)
+{
+ struct hidma_dev *dmadev;
+ struct resource *trca_resource;
+ struct resource *evca_resource;
+ int chirq;
+ void __iomem *evca;
+ void __iomem *trca;
+ int rc;
+
+ pm_runtime_set_autosuspend_delay(&pdev->dev, HIDMA_AUTOSUSPEND_TIMEOUT);
+ pm_runtime_use_autosuspend(&pdev->dev);
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+
+ trca_resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ trca = devm_ioremap_resource(&pdev->dev, trca_resource);
+ if (IS_ERR(trca)) {
+ rc = -ENOMEM;
+ goto bailout;
+ }
+
+ evca_resource = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ evca = devm_ioremap_resource(&pdev->dev, evca_resource);
+ if (IS_ERR(evca)) {
+ rc = -ENOMEM;
+ goto bailout;
+ }
+
+ /*
+ * This driver only handles the channel IRQs.
+ * Common IRQ is handled by the management driver.
+ */
+ chirq = platform_get_irq(pdev, 0);
+ if (chirq < 0) {
+ rc = -ENODEV;
+ goto bailout;
+ }
+
+ dmadev = devm_kzalloc(&pdev->dev, sizeof(*dmadev), GFP_KERNEL);
+ if (!dmadev) {
+ rc = -ENOMEM;
+ goto bailout;
+ }
+
+ INIT_LIST_HEAD(&dmadev->ddev.channels);
+ spin_lock_init(&dmadev->lock);
+ dmadev->ddev.dev = &pdev->dev;
+ pm_runtime_get_sync(dmadev->ddev.dev);
+
+ dma_cap_set(DMA_MEMCPY, dmadev->ddev.cap_mask);
+ if (WARN_ON(!pdev->dev.dma_mask)) {
+ rc = -ENXIO;
+ goto dmafree;
+ }
+
+ dmadev->dev_evca = evca;
+ dmadev->evca_resource = evca_resource;
+ dmadev->dev_trca = trca;
+ dmadev->trca_resource = trca_resource;
+ dmadev->ddev.device_prep_dma_memcpy = hidma_prep_dma_memcpy;
+ dmadev->ddev.device_alloc_chan_resources = hidma_alloc_chan_resources;
+ dmadev->ddev.device_free_chan_resources = hidma_free_chan_resources;
+ dmadev->ddev.device_tx_status = hidma_tx_status;
+ dmadev->ddev.device_issue_pending = hidma_issue_pending;
+ dmadev->ddev.device_pause = hidma_pause;
+ dmadev->ddev.device_resume = hidma_resume;
+ dmadev->ddev.device_terminate_all = hidma_terminate_all;
+ dmadev->ddev.copy_align = 8;
+
+ device_property_read_u32(&pdev->dev, "desc-count",
+ &dmadev->nr_descriptors);
+
+ if (!dmadev->nr_descriptors && nr_desc_prm)
+ dmadev->nr_descriptors = nr_desc_prm;
+
+ if (!dmadev->nr_descriptors)
+ dmadev->nr_descriptors = HIDMA_NR_DEFAULT_DESC;
+
+ dmadev->chidx = readl(dmadev->dev_trca + 0x28);
+
+ /* Set DMA mask to 64 bits. */
+ rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (rc) {
+ dev_warn(&pdev->dev, "unable to set coherent mask to 64");
+ rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (rc)
+ goto dmafree;
+ }
+
+ dmadev->lldev = hidma_ll_init(dmadev->ddev.dev,
+ dmadev->nr_descriptors, dmadev->dev_trca,
+ dmadev->dev_evca, dmadev->chidx);
+ if (!dmadev->lldev) {
+ rc = -EPROBE_DEFER;
+ goto dmafree;
+ }
+
+ rc = devm_request_irq(&pdev->dev, chirq, hidma_chirq_handler, 0,
+ "qcom-hidma", dmadev->lldev);
+ if (rc)
+ goto uninit;
+
+ INIT_LIST_HEAD(&dmadev->ddev.channels);
+ rc = hidma_chan_init(dmadev, 0);
+ if (rc)
+ goto uninit;
+
+ rc = dma_async_device_register(&dmadev->ddev);
+ if (rc)
+ goto uninit;
+
+ dmadev->irq = chirq;
+ tasklet_init(&dmadev->task, hidma_issue_task, (unsigned long)dmadev);
+ dev_info(&pdev->dev, "HI-DMA engine driver registration complete\n");
+ platform_set_drvdata(pdev, dmadev);
+ pm_runtime_mark_last_busy(dmadev->ddev.dev);
+ pm_runtime_put_autosuspend(dmadev->ddev.dev);
+ return 0;
+
+uninit:
+ hidma_ll_uninit(dmadev->lldev);
+dmafree:
+ if (dmadev)
+ hidma_free(dmadev);
+bailout:
+ pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+ return rc;
+}
+
+static int hidma_remove(struct platform_device *pdev)
+{
+ struct hidma_dev *dmadev = platform_get_drvdata(pdev);
+
+ pm_runtime_get_sync(dmadev->ddev.dev);
+ dma_async_device_unregister(&dmadev->ddev);
+ devm_free_irq(dmadev->ddev.dev, dmadev->irq, dmadev->lldev);
+ hidma_ll_uninit(dmadev->lldev);
+ hidma_free(dmadev);
+
+ dev_info(&pdev->dev, "HI-DMA engine removed\n");
+ pm_runtime_put_sync_suspend(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+
+ return 0;
+}
+
+#if IS_ENABLED(CONFIG_ACPI)
+static const struct acpi_device_id hidma_acpi_ids[] = {
+ {"QCOM8061"},
+ {},
+};
+#endif
+
+static const struct of_device_id hidma_match[] = {
+ {.compatible = "qcom,hidma-1.0",},
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, hidma_match);
+
+static struct platform_driver hidma_driver = {
+ .probe = hidma_probe,
+ .remove = hidma_remove,
+ .driver = {
+ .name = "hidma",
+ .of_match_table = hidma_match,
+ .acpi_match_table = ACPI_PTR(hidma_acpi_ids),
+ },
+};
+
+module_platform_driver(hidma_driver);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/qcom/hidma.h b/drivers/dma/qcom/hidma.h
new file mode 100644
index 000000000000..231e306f6d87
--- /dev/null
+++ b/drivers/dma/qcom/hidma.h
@@ -0,0 +1,160 @@
+/*
+ * Qualcomm Technologies HIDMA data structures
+ *
+ * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef QCOM_HIDMA_H
+#define QCOM_HIDMA_H
+
+#include <linux/kfifo.h>
+#include <linux/interrupt.h>
+#include <linux/dmaengine.h>
+
+#define TRE_SIZE 32 /* each TRE is 32 bytes */
+#define TRE_CFG_IDX 0
+#define TRE_LEN_IDX 1
+#define TRE_SRC_LOW_IDX 2
+#define TRE_SRC_HI_IDX 3
+#define TRE_DEST_LOW_IDX 4
+#define TRE_DEST_HI_IDX 5
+
+struct hidma_tx_status {
+ u8 err_info; /* error record in this transfer */
+ u8 err_code; /* completion code */
+};
+
+struct hidma_tre {
+ atomic_t allocated; /* if this channel is allocated */
+ bool queued; /* flag whether this is pending */
+ u16 status; /* status */
+ u32 chidx; /* index of the tre */
+ u32 dma_sig; /* signature of the tre */
+ const char *dev_name; /* name of the device */
+ void (*callback)(void *data); /* requester callback */
+ void *data; /* Data associated with this channel*/
+ struct hidma_lldev *lldev; /* lldma device pointer */
+ u32 tre_local[TRE_SIZE / sizeof(u32) + 1]; /* TRE local copy */
+ u32 tre_index; /* the offset where this was written*/
+ u32 int_flags; /* interrupt flags */
+};
+
+struct hidma_lldev {
+ bool initialized; /* initialized flag */
+ u8 trch_state; /* trch_state of the device */
+ u8 evch_state; /* evch_state of the device */
+ u8 chidx; /* channel index in the core */
+ u32 nr_tres; /* max number of configs */
+ spinlock_t lock; /* reentrancy */
+ struct hidma_tre *trepool; /* trepool of user configs */
+ struct device *dev; /* device */
+ void __iomem *trca; /* Transfer Channel address */
+ void __iomem *evca; /* Event Channel address */
+ struct hidma_tre
+ **pending_tre_list; /* Pointers to pending TREs */
+ struct hidma_tx_status
+ *tx_status_list; /* Pointers to pending TREs status*/
+ s32 pending_tre_count; /* Number of TREs pending */
+
+ void *tre_ring; /* TRE ring */
+ dma_addr_t tre_ring_handle; /* TRE ring to be shared with HW */
+ u32 tre_ring_size; /* Byte size of the ring */
+ u32 tre_processed_off; /* last processed TRE */
+
+ void *evre_ring; /* EVRE ring */
+ dma_addr_t evre_ring_handle; /* EVRE ring to be shared with HW */
+ u32 evre_ring_size; /* Byte size of the ring */
+ u32 evre_processed_off; /* last processed EVRE */
+
+ u32 tre_write_offset; /* TRE write location */
+ struct tasklet_struct task; /* task delivering notifications */
+ DECLARE_KFIFO_PTR(handoff_fifo,
+ struct hidma_tre *); /* pending TREs FIFO */
+};
+
+struct hidma_desc {
+ struct dma_async_tx_descriptor desc;
+ /* link list node for this channel*/
+ struct list_head node;
+ u32 tre_ch;
+};
+
+struct hidma_chan {
+ bool paused;
+ bool allocated;
+ char dbg_name[16];
+ u32 dma_sig;
+
+ /*
+ * active descriptor on this channel
+ * It is used by the DMA complete notification to
+ * locate the descriptor that initiated the transfer.
+ */
+ struct dentry *debugfs;
+ struct dentry *stats;
+ struct hidma_dev *dmadev;
+ struct hidma_desc *running;
+
+ struct dma_chan chan;
+ struct list_head free;
+ struct list_head prepared;
+ struct list_head active;
+ struct list_head completed;
+
+ /* Lock for this structure */
+ spinlock_t lock;
+};
+
+struct hidma_dev {
+ int irq;
+ int chidx;
+ u32 nr_descriptors;
+
+ struct hidma_lldev *lldev;
+ void __iomem *dev_trca;
+ struct resource *trca_resource;
+ void __iomem *dev_evca;
+ struct resource *evca_resource;
+
+ /* used to protect the pending channel list*/
+ spinlock_t lock;
+ struct dma_device ddev;
+
+ struct dentry *debugfs;
+ struct dentry *stats;
+
+ /* Task delivering issue_pending */
+ struct tasklet_struct task;
+};
+
+int hidma_ll_request(struct hidma_lldev *llhndl, u32 dev_id,
+ const char *dev_name,
+ void (*callback)(void *data), void *data, u32 *tre_ch);
+
+void hidma_ll_free(struct hidma_lldev *llhndl, u32 tre_ch);
+enum dma_status hidma_ll_status(struct hidma_lldev *llhndl, u32 tre_ch);
+bool hidma_ll_isenabled(struct hidma_lldev *llhndl);
+void hidma_ll_queue_request(struct hidma_lldev *llhndl, u32 tre_ch);
+void hidma_ll_start(struct hidma_lldev *llhndl);
+int hidma_ll_pause(struct hidma_lldev *llhndl);
+int hidma_ll_resume(struct hidma_lldev *llhndl);
+void hidma_ll_set_transfer_params(struct hidma_lldev *llhndl, u32 tre_ch,
+ dma_addr_t src, dma_addr_t dest, u32 len, u32 flags);
+int hidma_ll_setup(struct hidma_lldev *lldev);
+struct hidma_lldev *hidma_ll_init(struct device *dev, u32 max_channels,
+ void __iomem *trca, void __iomem *evca,
+ u8 chidx);
+int hidma_ll_uninit(struct hidma_lldev *llhndl);
+irqreturn_t hidma_ll_inthandler(int irq, void *arg);
+void hidma_cleanup_pending_tre(struct hidma_lldev *llhndl, u8 err_info,
+ u8 err_code);
+#endif
diff --git a/drivers/dma/qcom/hidma_mgmt.c b/drivers/dma/qcom/hidma_mgmt.c
new file mode 100644
index 000000000000..ef491b893f40
--- /dev/null
+++ b/drivers/dma/qcom/hidma_mgmt.c
@@ -0,0 +1,302 @@
+/*
+ * Qualcomm Technologies HIDMA DMA engine Management interface
+ *
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/dmaengine.h>
+#include <linux/acpi.h>
+#include <linux/of.h>
+#include <linux/property.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/uaccess.h>
+#include <linux/slab.h>
+#include <linux/pm_runtime.h>
+#include <linux/bitops.h>
+
+#include "hidma_mgmt.h"
+
+#define HIDMA_QOS_N_OFFSET 0x300
+#define HIDMA_CFG_OFFSET 0x400
+#define HIDMA_MAX_BUS_REQ_LEN_OFFSET 0x41C
+#define HIDMA_MAX_XACTIONS_OFFSET 0x420
+#define HIDMA_HW_VERSION_OFFSET 0x424
+#define HIDMA_CHRESET_TIMEOUT_OFFSET 0x418
+
+#define HIDMA_MAX_WR_XACTIONS_MASK GENMASK(4, 0)
+#define HIDMA_MAX_RD_XACTIONS_MASK GENMASK(4, 0)
+#define HIDMA_WEIGHT_MASK GENMASK(6, 0)
+#define HIDMA_MAX_BUS_REQ_LEN_MASK GENMASK(15, 0)
+#define HIDMA_CHRESET_TIMEOUT_MASK GENMASK(19, 0)
+
+#define HIDMA_MAX_WR_XACTIONS_BIT_POS 16
+#define HIDMA_MAX_BUS_WR_REQ_BIT_POS 16
+#define HIDMA_WRR_BIT_POS 8
+#define HIDMA_PRIORITY_BIT_POS 15
+
+#define HIDMA_AUTOSUSPEND_TIMEOUT 2000
+#define HIDMA_MAX_CHANNEL_WEIGHT 15
+
+int hidma_mgmt_setup(struct hidma_mgmt_dev *mgmtdev)
+{
+ unsigned int i;
+ u32 val;
+
+ if (!is_power_of_2(mgmtdev->max_write_request) ||
+ (mgmtdev->max_write_request < 128) ||
+ (mgmtdev->max_write_request > 1024)) {
+ dev_err(&mgmtdev->pdev->dev, "invalid write request %d\n",
+ mgmtdev->max_write_request);
+ return -EINVAL;
+ }
+
+ if (!is_power_of_2(mgmtdev->max_read_request) ||
+ (mgmtdev->max_read_request < 128) ||
+ (mgmtdev->max_read_request > 1024)) {
+ dev_err(&mgmtdev->pdev->dev, "invalid read request %d\n",
+ mgmtdev->max_read_request);
+ return -EINVAL;
+ }
+
+ if (mgmtdev->max_wr_xactions > HIDMA_MAX_WR_XACTIONS_MASK) {
+ dev_err(&mgmtdev->pdev->dev,
+ "max_wr_xactions cannot be bigger than %ld\n",
+ HIDMA_MAX_WR_XACTIONS_MASK);
+ return -EINVAL;
+ }
+
+ if (mgmtdev->max_rd_xactions > HIDMA_MAX_RD_XACTIONS_MASK) {
+ dev_err(&mgmtdev->pdev->dev,
+ "max_rd_xactions cannot be bigger than %ld\n",
+ HIDMA_MAX_RD_XACTIONS_MASK);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < mgmtdev->dma_channels; i++) {
+ if (mgmtdev->priority[i] > 1) {
+ dev_err(&mgmtdev->pdev->dev,
+ "priority can be 0 or 1\n");
+ return -EINVAL;
+ }
+
+ if (mgmtdev->weight[i] > HIDMA_MAX_CHANNEL_WEIGHT) {
+ dev_err(&mgmtdev->pdev->dev,
+ "max value of weight can be %d.\n",
+ HIDMA_MAX_CHANNEL_WEIGHT);
+ return -EINVAL;
+ }
+
+ /* weight needs to be at least one */
+ if (mgmtdev->weight[i] == 0)
+ mgmtdev->weight[i] = 1;
+ }
+
+ pm_runtime_get_sync(&mgmtdev->pdev->dev);
+ val = readl(mgmtdev->virtaddr + HIDMA_MAX_BUS_REQ_LEN_OFFSET);
+ val &= ~(HIDMA_MAX_BUS_REQ_LEN_MASK << HIDMA_MAX_BUS_WR_REQ_BIT_POS);
+ val |= mgmtdev->max_write_request << HIDMA_MAX_BUS_WR_REQ_BIT_POS;
+ val &= ~HIDMA_MAX_BUS_REQ_LEN_MASK;
+ val |= mgmtdev->max_read_request;
+ writel(val, mgmtdev->virtaddr + HIDMA_MAX_BUS_REQ_LEN_OFFSET);
+
+ val = readl(mgmtdev->virtaddr + HIDMA_MAX_XACTIONS_OFFSET);
+ val &= ~(HIDMA_MAX_WR_XACTIONS_MASK << HIDMA_MAX_WR_XACTIONS_BIT_POS);
+ val |= mgmtdev->max_wr_xactions << HIDMA_MAX_WR_XACTIONS_BIT_POS;
+ val &= ~HIDMA_MAX_RD_XACTIONS_MASK;
+ val |= mgmtdev->max_rd_xactions;
+ writel(val, mgmtdev->virtaddr + HIDMA_MAX_XACTIONS_OFFSET);
+
+ mgmtdev->hw_version =
+ readl(mgmtdev->virtaddr + HIDMA_HW_VERSION_OFFSET);
+ mgmtdev->hw_version_major = (mgmtdev->hw_version >> 28) & 0xF;
+ mgmtdev->hw_version_minor = (mgmtdev->hw_version >> 16) & 0xF;
+
+ for (i = 0; i < mgmtdev->dma_channels; i++) {
+ u32 weight = mgmtdev->weight[i];
+ u32 priority = mgmtdev->priority[i];
+
+ val = readl(mgmtdev->virtaddr + HIDMA_QOS_N_OFFSET + (4 * i));
+ val &= ~(1 << HIDMA_PRIORITY_BIT_POS);
+ val |= (priority & 0x1) << HIDMA_PRIORITY_BIT_POS;
+ val &= ~(HIDMA_WEIGHT_MASK << HIDMA_WRR_BIT_POS);
+ val |= (weight & HIDMA_WEIGHT_MASK) << HIDMA_WRR_BIT_POS;
+ writel(val, mgmtdev->virtaddr + HIDMA_QOS_N_OFFSET + (4 * i));
+ }
+
+ val = readl(mgmtdev->virtaddr + HIDMA_CHRESET_TIMEOUT_OFFSET);
+ val &= ~HIDMA_CHRESET_TIMEOUT_MASK;
+ val |= mgmtdev->chreset_timeout_cycles & HIDMA_CHRESET_TIMEOUT_MASK;
+ writel(val, mgmtdev->virtaddr + HIDMA_CHRESET_TIMEOUT_OFFSET);
+
+ pm_runtime_mark_last_busy(&mgmtdev->pdev->dev);
+ pm_runtime_put_autosuspend(&mgmtdev->pdev->dev);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(hidma_mgmt_setup);
+
+static int hidma_mgmt_probe(struct platform_device *pdev)
+{
+ struct hidma_mgmt_dev *mgmtdev;
+ struct resource *res;
+ void __iomem *virtaddr;
+ int irq;
+ int rc;
+ u32 val;
+
+ pm_runtime_set_autosuspend_delay(&pdev->dev, HIDMA_AUTOSUSPEND_TIMEOUT);
+ pm_runtime_use_autosuspend(&pdev->dev);
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+ pm_runtime_get_sync(&pdev->dev);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ virtaddr = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(virtaddr)) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(&pdev->dev, "irq resources not found\n");
+ rc = irq;
+ goto out;
+ }
+
+ mgmtdev = devm_kzalloc(&pdev->dev, sizeof(*mgmtdev), GFP_KERNEL);
+ if (!mgmtdev) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ mgmtdev->pdev = pdev;
+ mgmtdev->addrsize = resource_size(res);
+ mgmtdev->virtaddr = virtaddr;
+
+ rc = device_property_read_u32(&pdev->dev, "dma-channels",
+ &mgmtdev->dma_channels);
+ if (rc) {
+ dev_err(&pdev->dev, "number of channels missing\n");
+ goto out;
+ }
+
+ rc = device_property_read_u32(&pdev->dev,
+ "channel-reset-timeout-cycles",
+ &mgmtdev->chreset_timeout_cycles);
+ if (rc) {
+ dev_err(&pdev->dev, "channel reset timeout missing\n");
+ goto out;
+ }
+
+ rc = device_property_read_u32(&pdev->dev, "max-write-burst-bytes",
+ &mgmtdev->max_write_request);
+ if (rc) {
+ dev_err(&pdev->dev, "max-write-burst-bytes missing\n");
+ goto out;
+ }
+
+ rc = device_property_read_u32(&pdev->dev, "max-read-burst-bytes",
+ &mgmtdev->max_read_request);
+ if (rc) {
+ dev_err(&pdev->dev, "max-read-burst-bytes missing\n");
+ goto out;
+ }
+
+ rc = device_property_read_u32(&pdev->dev, "max-write-transactions",
+ &mgmtdev->max_wr_xactions);
+ if (rc) {
+ dev_err(&pdev->dev, "max-write-transactions missing\n");
+ goto out;
+ }
+
+ rc = device_property_read_u32(&pdev->dev, "max-read-transactions",
+ &mgmtdev->max_rd_xactions);
+ if (rc) {
+ dev_err(&pdev->dev, "max-read-transactions missing\n");
+ goto out;
+ }
+
+ mgmtdev->priority = devm_kcalloc(&pdev->dev,
+ mgmtdev->dma_channels,
+ sizeof(*mgmtdev->priority),
+ GFP_KERNEL);
+ if (!mgmtdev->priority) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ mgmtdev->weight = devm_kcalloc(&pdev->dev,
+ mgmtdev->dma_channels,
+ sizeof(*mgmtdev->weight), GFP_KERNEL);
+ if (!mgmtdev->weight) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ rc = hidma_mgmt_setup(mgmtdev);
+ if (rc) {
+ dev_err(&pdev->dev, "setup failed\n");
+ goto out;
+ }
+
+ /* start the HW */
+ val = readl(mgmtdev->virtaddr + HIDMA_CFG_OFFSET);
+ val |= 1;
+ writel(val, mgmtdev->virtaddr + HIDMA_CFG_OFFSET);
+
+ rc = hidma_mgmt_init_sys(mgmtdev);
+ if (rc) {
+ dev_err(&pdev->dev, "sysfs setup failed\n");
+ goto out;
+ }
+
+ dev_info(&pdev->dev,
+ "HW rev: %d.%d @ %pa with %d physical channels\n",
+ mgmtdev->hw_version_major, mgmtdev->hw_version_minor,
+ &res->start, mgmtdev->dma_channels);
+
+ platform_set_drvdata(pdev, mgmtdev);
+ pm_runtime_mark_last_busy(&pdev->dev);
+ pm_runtime_put_autosuspend(&pdev->dev);
+ return 0;
+out:
+ pm_runtime_put_sync_suspend(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+ return rc;
+}
+
+#if IS_ENABLED(CONFIG_ACPI)
+static const struct acpi_device_id hidma_mgmt_acpi_ids[] = {
+ {"QCOM8060"},
+ {},
+};
+#endif
+
+static const struct of_device_id hidma_mgmt_match[] = {
+ {.compatible = "qcom,hidma-mgmt-1.0",},
+ {},
+};
+MODULE_DEVICE_TABLE(of, hidma_mgmt_match);
+
+static struct platform_driver hidma_mgmt_driver = {
+ .probe = hidma_mgmt_probe,
+ .driver = {
+ .name = "hidma-mgmt",
+ .of_match_table = hidma_mgmt_match,
+ .acpi_match_table = ACPI_PTR(hidma_mgmt_acpi_ids),
+ },
+};
+
+module_platform_driver(hidma_mgmt_driver);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/qcom/hidma_mgmt.h b/drivers/dma/qcom/hidma_mgmt.h
new file mode 100644
index 000000000000..f7daf33769f4
--- /dev/null
+++ b/drivers/dma/qcom/hidma_mgmt.h
@@ -0,0 +1,39 @@
+/*
+ * Qualcomm Technologies HIDMA Management common header
+ *
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+struct hidma_mgmt_dev {
+ u8 hw_version_major;
+ u8 hw_version_minor;
+
+ u32 max_wr_xactions;
+ u32 max_rd_xactions;
+ u32 max_write_request;
+ u32 max_read_request;
+ u32 dma_channels;
+ u32 chreset_timeout_cycles;
+ u32 hw_version;
+ u32 *priority;
+ u32 *weight;
+
+ /* Hardware device constants */
+ void __iomem *virtaddr;
+ resource_size_t addrsize;
+
+ struct kobject **chroots;
+ struct platform_device *pdev;
+};
+
+int hidma_mgmt_init_sys(struct hidma_mgmt_dev *dev);
+int hidma_mgmt_setup(struct hidma_mgmt_dev *mgmtdev);
diff --git a/drivers/dma/qcom/hidma_mgmt_sys.c b/drivers/dma/qcom/hidma_mgmt_sys.c
new file mode 100644
index 000000000000..d61f1068a34b
--- /dev/null
+++ b/drivers/dma/qcom/hidma_mgmt_sys.c
@@ -0,0 +1,295 @@
+/*
+ * Qualcomm Technologies HIDMA Management SYS interface
+ *
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/sysfs.h>
+#include <linux/platform_device.h>
+
+#include "hidma_mgmt.h"
+
+struct hidma_chan_attr {
+ struct hidma_mgmt_dev *mdev;
+ int index;
+ struct kobj_attribute attr;
+};
+
+struct hidma_mgmt_fileinfo {
+ char *name;
+ int mode;
+ int (*get)(struct hidma_mgmt_dev *mdev);
+ int (*set)(struct hidma_mgmt_dev *mdev, u64 val);
+};
+
+#define IMPLEMENT_GETSET(name) \
+static int get_##name(struct hidma_mgmt_dev *mdev) \
+{ \
+ return mdev->name; \
+} \
+static int set_##name(struct hidma_mgmt_dev *mdev, u64 val) \
+{ \
+ u64 tmp; \
+ int rc; \
+ \
+ tmp = mdev->name; \
+ mdev->name = val; \
+ rc = hidma_mgmt_setup(mdev); \
+ if (rc) \
+ mdev->name = tmp; \
+ return rc; \
+}
+
+#define DECLARE_ATTRIBUTE(name, mode) \
+ {#name, mode, get_##name, set_##name}
+
+IMPLEMENT_GETSET(hw_version_major)
+IMPLEMENT_GETSET(hw_version_minor)
+IMPLEMENT_GETSET(max_wr_xactions)
+IMPLEMENT_GETSET(max_rd_xactions)
+IMPLEMENT_GETSET(max_write_request)
+IMPLEMENT_GETSET(max_read_request)
+IMPLEMENT_GETSET(dma_channels)
+IMPLEMENT_GETSET(chreset_timeout_cycles)
+
+static int set_priority(struct hidma_mgmt_dev *mdev, unsigned int i, u64 val)
+{
+ u64 tmp;
+ int rc;
+
+ if (i >= mdev->dma_channels)
+ return -EINVAL;
+
+ tmp = mdev->priority[i];
+ mdev->priority[i] = val;
+ rc = hidma_mgmt_setup(mdev);
+ if (rc)
+ mdev->priority[i] = tmp;
+ return rc;
+}
+
+static int set_weight(struct hidma_mgmt_dev *mdev, unsigned int i, u64 val)
+{
+ u64 tmp;
+ int rc;
+
+ if (i >= mdev->dma_channels)
+ return -EINVAL;
+
+ tmp = mdev->weight[i];
+ mdev->weight[i] = val;
+ rc = hidma_mgmt_setup(mdev);
+ if (rc)
+ mdev->weight[i] = tmp;
+ return rc;
+}
+
+static struct hidma_mgmt_fileinfo hidma_mgmt_files[] = {
+ DECLARE_ATTRIBUTE(hw_version_major, S_IRUGO),
+ DECLARE_ATTRIBUTE(hw_version_minor, S_IRUGO),
+ DECLARE_ATTRIBUTE(dma_channels, S_IRUGO),
+ DECLARE_ATTRIBUTE(chreset_timeout_cycles, S_IRUGO),
+ DECLARE_ATTRIBUTE(max_wr_xactions, S_IRUGO),
+ DECLARE_ATTRIBUTE(max_rd_xactions, S_IRUGO),
+ DECLARE_ATTRIBUTE(max_write_request, S_IRUGO),
+ DECLARE_ATTRIBUTE(max_read_request, S_IRUGO),
+};
+
+static ssize_t show_values(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct hidma_mgmt_dev *mdev = platform_get_drvdata(pdev);
+ unsigned int i;
+
+ buf[0] = 0;
+
+ for (i = 0; i < ARRAY_SIZE(hidma_mgmt_files); i++) {
+ if (strcmp(attr->attr.name, hidma_mgmt_files[i].name) == 0) {
+ sprintf(buf, "%d\n", hidma_mgmt_files[i].get(mdev));
+ break;
+ }
+ }
+ return strlen(buf);
+}
+
+static ssize_t set_values(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct hidma_mgmt_dev *mdev = platform_get_drvdata(pdev);
+ unsigned long tmp;
+ unsigned int i;
+ int rc;
+
+ rc = kstrtoul(buf, 0, &tmp);
+ if (rc)
+ return rc;
+
+ for (i = 0; i < ARRAY_SIZE(hidma_mgmt_files); i++) {
+ if (strcmp(attr->attr.name, hidma_mgmt_files[i].name) == 0) {
+ rc = hidma_mgmt_files[i].set(mdev, tmp);
+ if (rc)
+ return rc;
+
+ break;
+ }
+ }
+ return count;
+}
+
+static ssize_t show_values_channel(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ struct hidma_chan_attr *chattr;
+ struct hidma_mgmt_dev *mdev;
+
+ buf[0] = 0;
+ chattr = container_of(attr, struct hidma_chan_attr, attr);
+ mdev = chattr->mdev;
+ if (strcmp(attr->attr.name, "priority") == 0)
+ sprintf(buf, "%d\n", mdev->priority[chattr->index]);
+ else if (strcmp(attr->attr.name, "weight") == 0)
+ sprintf(buf, "%d\n", mdev->weight[chattr->index]);
+
+ return strlen(buf);
+}
+
+static ssize_t set_values_channel(struct kobject *kobj,
+ struct kobj_attribute *attr, const char *buf,
+ size_t count)
+{
+ struct hidma_chan_attr *chattr;
+ struct hidma_mgmt_dev *mdev;
+ unsigned long tmp;
+ int rc;
+
+ chattr = container_of(attr, struct hidma_chan_attr, attr);
+ mdev = chattr->mdev;
+
+ rc = kstrtoul(buf, 0, &tmp);
+ if (rc)
+ return rc;
+
+ if (strcmp(attr->attr.name, "priority") == 0) {
+ rc = set_priority(mdev, chattr->index, tmp);
+ if (rc)
+ return rc;
+ } else if (strcmp(attr->attr.name, "weight") == 0) {
+ rc = set_weight(mdev, chattr->index, tmp);
+ if (rc)
+ return rc;
+ }
+ return count;
+}
+
+static int create_sysfs_entry(struct hidma_mgmt_dev *dev, char *name, int mode)
+{
+ struct device_attribute *attrs;
+ char *name_copy;
+
+ attrs = devm_kmalloc(&dev->pdev->dev,
+ sizeof(struct device_attribute), GFP_KERNEL);
+ if (!attrs)
+ return -ENOMEM;
+
+ name_copy = devm_kstrdup(&dev->pdev->dev, name, GFP_KERNEL);
+ if (!name_copy)
+ return -ENOMEM;
+
+ attrs->attr.name = name_copy;
+ attrs->attr.mode = mode;
+ attrs->show = show_values;
+ attrs->store = set_values;
+ sysfs_attr_init(&attrs->attr);
+
+ return device_create_file(&dev->pdev->dev, attrs);
+}
+
+static int create_sysfs_entry_channel(struct hidma_mgmt_dev *mdev, char *name,
+ int mode, int index,
+ struct kobject *parent)
+{
+ struct hidma_chan_attr *chattr;
+ char *name_copy;
+
+ chattr = devm_kmalloc(&mdev->pdev->dev, sizeof(*chattr), GFP_KERNEL);
+ if (!chattr)
+ return -ENOMEM;
+
+ name_copy = devm_kstrdup(&mdev->pdev->dev, name, GFP_KERNEL);
+ if (!name_copy)
+ return -ENOMEM;
+
+ chattr->mdev = mdev;
+ chattr->index = index;
+ chattr->attr.attr.name = name_copy;
+ chattr->attr.attr.mode = mode;
+ chattr->attr.show = show_values_channel;
+ chattr->attr.store = set_values_channel;
+ sysfs_attr_init(&chattr->attr.attr);
+
+ return sysfs_create_file(parent, &chattr->attr.attr);
+}
+
+int hidma_mgmt_init_sys(struct hidma_mgmt_dev *mdev)
+{
+ unsigned int i;
+ int rc;
+ int required;
+ struct kobject *chanops;
+
+ required = sizeof(*mdev->chroots) * mdev->dma_channels;
+ mdev->chroots = devm_kmalloc(&mdev->pdev->dev, required, GFP_KERNEL);
+ if (!mdev->chroots)
+ return -ENOMEM;
+
+ chanops = kobject_create_and_add("chanops", &mdev->pdev->dev.kobj);
+ if (!chanops)
+ return -ENOMEM;
+
+ /* create each channel directory here */
+ for (i = 0; i < mdev->dma_channels; i++) {
+ char name[20];
+
+ snprintf(name, sizeof(name), "chan%d", i);
+ mdev->chroots[i] = kobject_create_and_add(name, chanops);
+ if (!mdev->chroots[i])
+ return -ENOMEM;
+ }
+
+ /* populate common parameters */
+ for (i = 0; i < ARRAY_SIZE(hidma_mgmt_files); i++) {
+ rc = create_sysfs_entry(mdev, hidma_mgmt_files[i].name,
+ hidma_mgmt_files[i].mode);
+ if (rc)
+ return rc;
+ }
+
+ /* populate parameters that are per channel */
+ for (i = 0; i < mdev->dma_channels; i++) {
+ rc = create_sysfs_entry_channel(mdev, "priority",
+ (S_IRUGO | S_IWUGO), i,
+ mdev->chroots[i]);
+ if (rc)
+ return rc;
+
+ rc = create_sysfs_entry_channel(mdev, "weight",
+ (S_IRUGO | S_IWUGO), i,
+ mdev->chroots[i]);
+ if (rc)
+ return rc;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(hidma_mgmt_init_sys);
diff --git a/drivers/dma/sh/Kconfig b/drivers/dma/sh/Kconfig
index f32c430eb16c..6e0685f1a838 100644
--- a/drivers/dma/sh/Kconfig
+++ b/drivers/dma/sh/Kconfig
@@ -12,7 +12,7 @@ config RENESAS_DMA
config SH_DMAE_BASE
bool "Renesas SuperH DMA Engine support"
- depends on SUPERH || ARCH_SHMOBILE || COMPILE_TEST
+ depends on SUPERH || ARCH_RENESAS || COMPILE_TEST
depends on !SUPERH || SH_DMA
depends on !SH_DMA_API
default y
@@ -41,7 +41,7 @@ endif
config RCAR_DMAC
tristate "Renesas R-Car Gen2 DMA Controller"
- depends on ARCH_SHMOBILE || COMPILE_TEST
+ depends on ARCH_RENESAS || COMPILE_TEST
select RENESAS_DMA
help
This driver supports the general purpose DMA controller found in the
@@ -49,7 +49,7 @@ config RCAR_DMAC
config RENESAS_USB_DMAC
tristate "Renesas USB-DMA Controller"
- depends on ARCH_SHMOBILE || COMPILE_TEST
+ depends on ARCH_RENESAS || COMPILE_TEST
select RENESAS_DMA
select DMA_VIRTUAL_CHANNELS
help
diff --git a/drivers/dma/sh/rcar-dmac.c b/drivers/dma/sh/rcar-dmac.c
index 7820d07e7bee..dfb17926297b 100644
--- a/drivers/dma/sh/rcar-dmac.c
+++ b/drivers/dma/sh/rcar-dmac.c
@@ -413,7 +413,7 @@ static int rcar_dmac_init(struct rcar_dmac *dmac)
u16 dmaor;
/* Clear all channels and enable the DMAC globally. */
- rcar_dmac_write(dmac, RCAR_DMACHCLR, 0x7fff);
+ rcar_dmac_write(dmac, RCAR_DMACHCLR, GENMASK(dmac->n_channels - 1, 0));
rcar_dmac_write(dmac, RCAR_DMAOR,
RCAR_DMAOR_PRI_FIXED | RCAR_DMAOR_DME);
diff --git a/drivers/dma/sh/shdmac.c b/drivers/dma/sh/shdmac.c
index 11707df1a689..80d86402490e 100644
--- a/drivers/dma/sh/shdmac.c
+++ b/drivers/dma/sh/shdmac.c
@@ -699,7 +699,7 @@ static int sh_dmae_probe(struct platform_device *pdev)
struct resource *chan, *dmars, *errirq_res, *chanirq_res;
if (pdev->dev.of_node)
- pdata = of_match_device(sh_dmae_of_match, &pdev->dev)->data;
+ pdata = of_device_get_match_data(&pdev->dev);
else
pdata = dev_get_platdata(&pdev->dev);
diff --git a/drivers/dma/sirf-dma.c b/drivers/dma/sirf-dma.c
index 22ea2419ee56..e48350e65089 100644
--- a/drivers/dma/sirf-dma.c
+++ b/drivers/dma/sirf-dma.c
@@ -989,7 +989,7 @@ static int sirfsoc_dma_remove(struct platform_device *op)
return 0;
}
-static int sirfsoc_dma_runtime_suspend(struct device *dev)
+static int __maybe_unused sirfsoc_dma_runtime_suspend(struct device *dev)
{
struct sirfsoc_dma *sdma = dev_get_drvdata(dev);
@@ -997,7 +997,7 @@ static int sirfsoc_dma_runtime_suspend(struct device *dev)
return 0;
}
-static int sirfsoc_dma_runtime_resume(struct device *dev)
+static int __maybe_unused sirfsoc_dma_runtime_resume(struct device *dev)
{
struct sirfsoc_dma *sdma = dev_get_drvdata(dev);
int ret;
@@ -1010,8 +1010,7 @@ static int sirfsoc_dma_runtime_resume(struct device *dev)
return 0;
}
-#ifdef CONFIG_PM_SLEEP
-static int sirfsoc_dma_pm_suspend(struct device *dev)
+static int __maybe_unused sirfsoc_dma_pm_suspend(struct device *dev)
{
struct sirfsoc_dma *sdma = dev_get_drvdata(dev);
struct sirfsoc_dma_regs *save = &sdma->regs_save;
@@ -1062,7 +1061,7 @@ static int sirfsoc_dma_pm_suspend(struct device *dev)
return 0;
}
-static int sirfsoc_dma_pm_resume(struct device *dev)
+static int __maybe_unused sirfsoc_dma_pm_resume(struct device *dev)
{
struct sirfsoc_dma *sdma = dev_get_drvdata(dev);
struct sirfsoc_dma_regs *save = &sdma->regs_save;
@@ -1121,7 +1120,6 @@ static int sirfsoc_dma_pm_resume(struct device *dev)
return 0;
}
-#endif
static const struct dev_pm_ops sirfsoc_dma_pm_ops = {
SET_RUNTIME_PM_OPS(sirfsoc_dma_runtime_suspend, sirfsoc_dma_runtime_resume, NULL)
diff --git a/drivers/dma/sun4i-dma.c b/drivers/dma/sun4i-dma.c
index 1661d518224a..e0df233dde92 100644
--- a/drivers/dma/sun4i-dma.c
+++ b/drivers/dma/sun4i-dma.c
@@ -1271,6 +1271,7 @@ static const struct of_device_id sun4i_dma_match[] = {
{ .compatible = "allwinner,sun4i-a10-dma" },
{ /* sentinel */ },
};
+MODULE_DEVICE_TABLE(of, sun4i_dma_match);
static struct platform_driver sun4i_dma_driver = {
.probe = sun4i_dma_probe,
diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c
index 935da8192f59..3871f29e523d 100644
--- a/drivers/dma/tegra20-apb-dma.c
+++ b/drivers/dma/tegra20-apb-dma.c
@@ -1292,40 +1292,19 @@ static const struct tegra_dma_chip_data tegra148_dma_chip_data = {
.support_separate_wcount_reg = true,
};
-
-static const struct of_device_id tegra_dma_of_match[] = {
- {
- .compatible = "nvidia,tegra148-apbdma",
- .data = &tegra148_dma_chip_data,
- }, {
- .compatible = "nvidia,tegra114-apbdma",
- .data = &tegra114_dma_chip_data,
- }, {
- .compatible = "nvidia,tegra30-apbdma",
- .data = &tegra30_dma_chip_data,
- }, {
- .compatible = "nvidia,tegra20-apbdma",
- .data = &tegra20_dma_chip_data,
- }, {
- },
-};
-MODULE_DEVICE_TABLE(of, tegra_dma_of_match);
-
static int tegra_dma_probe(struct platform_device *pdev)
{
struct resource *res;
struct tegra_dma *tdma;
int ret;
int i;
- const struct tegra_dma_chip_data *cdata = NULL;
- const struct of_device_id *match;
+ const struct tegra_dma_chip_data *cdata;
- match = of_match_device(tegra_dma_of_match, &pdev->dev);
- if (!match) {
- dev_err(&pdev->dev, "Error: No device match found\n");
+ cdata = of_device_get_match_data(&pdev->dev);
+ if (!cdata) {
+ dev_err(&pdev->dev, "Error: No device match data found\n");
return -ENODEV;
}
- cdata = match->data;
tdma = devm_kzalloc(&pdev->dev, sizeof(*tdma) + cdata->nr_channels *
sizeof(struct tegra_dma_channel), GFP_KERNEL);
@@ -1612,6 +1591,24 @@ static const struct dev_pm_ops tegra_dma_dev_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(tegra_dma_pm_suspend, tegra_dma_pm_resume)
};
+static const struct of_device_id tegra_dma_of_match[] = {
+ {
+ .compatible = "nvidia,tegra148-apbdma",
+ .data = &tegra148_dma_chip_data,
+ }, {
+ .compatible = "nvidia,tegra114-apbdma",
+ .data = &tegra114_dma_chip_data,
+ }, {
+ .compatible = "nvidia,tegra30-apbdma",
+ .data = &tegra30_dma_chip_data,
+ }, {
+ .compatible = "nvidia,tegra20-apbdma",
+ .data = &tegra20_dma_chip_data,
+ }, {
+ },
+};
+MODULE_DEVICE_TABLE(of, tegra_dma_of_match);
+
static struct platform_driver tegra_dmac_driver = {
.driver = {
.name = "tegra-apbdma",
diff --git a/drivers/dma/xilinx/xilinx_vdma.c b/drivers/dma/xilinx/xilinx_vdma.c
index 6f4b5017ca3b..ef67f278e076 100644
--- a/drivers/dma/xilinx/xilinx_vdma.c
+++ b/drivers/dma/xilinx/xilinx_vdma.c
@@ -28,6 +28,7 @@
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
+#include <linux/iopoll.h>
#include <linux/module.h>
#include <linux/of_address.h>
#include <linux/of_dma.h>
@@ -190,8 +191,7 @@ struct xilinx_vdma_tx_descriptor {
* @desc_offset: TX descriptor registers offset
* @lock: Descriptor operation lock
* @pending_list: Descriptors waiting
- * @active_desc: Active descriptor
- * @allocated_desc: Allocated descriptor
+ * @active_list: Descriptors ready to submit
* @done_list: Complete descriptors
* @common: DMA common channel
* @desc_pool: Descriptors pool
@@ -206,6 +206,7 @@ struct xilinx_vdma_tx_descriptor {
* @tasklet: Cleanup work after irq
* @config: Device configuration info
* @flush_on_fsync: Flush on Frame sync
+ * @desc_pendingcount: Descriptor pending count
*/
struct xilinx_vdma_chan {
struct xilinx_vdma_device *xdev;
@@ -213,8 +214,7 @@ struct xilinx_vdma_chan {
u32 desc_offset;
spinlock_t lock;
struct list_head pending_list;
- struct xilinx_vdma_tx_descriptor *active_desc;
- struct xilinx_vdma_tx_descriptor *allocated_desc;
+ struct list_head active_list;
struct list_head done_list;
struct dma_chan common;
struct dma_pool *desc_pool;
@@ -229,6 +229,7 @@ struct xilinx_vdma_chan {
struct tasklet_struct tasklet;
struct xilinx_vdma_config config;
bool flush_on_fsync;
+ u32 desc_pendingcount;
};
/**
@@ -254,6 +255,9 @@ struct xilinx_vdma_device {
container_of(chan, struct xilinx_vdma_chan, common)
#define to_vdma_tx_descriptor(tx) \
container_of(tx, struct xilinx_vdma_tx_descriptor, async_tx)
+#define xilinx_vdma_poll_timeout(chan, reg, val, cond, delay_us, timeout_us) \
+ readl_poll_timeout(chan->xdev->regs + chan->ctrl_offset + reg, val, \
+ cond, delay_us, timeout_us)
/* IO accessors */
static inline u32 vdma_read(struct xilinx_vdma_chan *chan, u32 reg)
@@ -342,19 +346,11 @@ static struct xilinx_vdma_tx_descriptor *
xilinx_vdma_alloc_tx_descriptor(struct xilinx_vdma_chan *chan)
{
struct xilinx_vdma_tx_descriptor *desc;
- unsigned long flags;
-
- if (chan->allocated_desc)
- return chan->allocated_desc;
desc = kzalloc(sizeof(*desc), GFP_KERNEL);
if (!desc)
return NULL;
- spin_lock_irqsave(&chan->lock, flags);
- chan->allocated_desc = desc;
- spin_unlock_irqrestore(&chan->lock, flags);
-
INIT_LIST_HEAD(&desc->segments);
return desc;
@@ -412,9 +408,7 @@ static void xilinx_vdma_free_descriptors(struct xilinx_vdma_chan *chan)
xilinx_vdma_free_desc_list(chan, &chan->pending_list);
xilinx_vdma_free_desc_list(chan, &chan->done_list);
-
- xilinx_vdma_free_tx_descriptor(chan, chan->active_desc);
- chan->active_desc = NULL;
+ xilinx_vdma_free_desc_list(chan, &chan->active_list);
spin_unlock_irqrestore(&chan->lock, flags);
}
@@ -560,18 +554,17 @@ static bool xilinx_vdma_is_idle(struct xilinx_vdma_chan *chan)
*/
static void xilinx_vdma_halt(struct xilinx_vdma_chan *chan)
{
- int loop = XILINX_VDMA_LOOP_COUNT;
+ int err;
+ u32 val;
vdma_ctrl_clr(chan, XILINX_VDMA_REG_DMACR, XILINX_VDMA_DMACR_RUNSTOP);
/* Wait for the hardware to halt */
- do {
- if (vdma_ctrl_read(chan, XILINX_VDMA_REG_DMASR) &
- XILINX_VDMA_DMASR_HALTED)
- break;
- } while (loop--);
+ err = xilinx_vdma_poll_timeout(chan, XILINX_VDMA_REG_DMASR, val,
+ (val & XILINX_VDMA_DMASR_HALTED), 0,
+ XILINX_VDMA_LOOP_COUNT);
- if (!loop) {
+ if (err) {
dev_err(chan->dev, "Cannot stop channel %p: %x\n",
chan, vdma_ctrl_read(chan, XILINX_VDMA_REG_DMASR));
chan->err = true;
@@ -586,18 +579,17 @@ static void xilinx_vdma_halt(struct xilinx_vdma_chan *chan)
*/
static void xilinx_vdma_start(struct xilinx_vdma_chan *chan)
{
- int loop = XILINX_VDMA_LOOP_COUNT;
+ int err;
+ u32 val;
vdma_ctrl_set(chan, XILINX_VDMA_REG_DMACR, XILINX_VDMA_DMACR_RUNSTOP);
/* Wait for the hardware to start */
- do {
- if (!(vdma_ctrl_read(chan, XILINX_VDMA_REG_DMASR) &
- XILINX_VDMA_DMASR_HALTED))
- break;
- } while (loop--);
+ err = xilinx_vdma_poll_timeout(chan, XILINX_VDMA_REG_DMASR, val,
+ !(val & XILINX_VDMA_DMASR_HALTED), 0,
+ XILINX_VDMA_LOOP_COUNT);
- if (!loop) {
+ if (err) {
dev_err(chan->dev, "Cannot start channel %p: %x\n",
chan, vdma_ctrl_read(chan, XILINX_VDMA_REG_DMASR));
@@ -614,45 +606,39 @@ static void xilinx_vdma_start(struct xilinx_vdma_chan *chan)
static void xilinx_vdma_start_transfer(struct xilinx_vdma_chan *chan)
{
struct xilinx_vdma_config *config = &chan->config;
- struct xilinx_vdma_tx_descriptor *desc;
- unsigned long flags;
+ struct xilinx_vdma_tx_descriptor *desc, *tail_desc;
u32 reg;
- struct xilinx_vdma_tx_segment *head, *tail = NULL;
+ struct xilinx_vdma_tx_segment *tail_segment;
+ /* This function was invoked with lock held */
if (chan->err)
return;
- spin_lock_irqsave(&chan->lock, flags);
-
- /* There's already an active descriptor, bail out. */
- if (chan->active_desc)
- goto out_unlock;
-
if (list_empty(&chan->pending_list))
- goto out_unlock;
+ return;
desc = list_first_entry(&chan->pending_list,
struct xilinx_vdma_tx_descriptor, node);
+ tail_desc = list_last_entry(&chan->pending_list,
+ struct xilinx_vdma_tx_descriptor, node);
+
+ tail_segment = list_last_entry(&tail_desc->segments,
+ struct xilinx_vdma_tx_segment, node);
/* If it is SG mode and hardware is busy, cannot submit */
if (chan->has_sg && xilinx_vdma_is_running(chan) &&
!xilinx_vdma_is_idle(chan)) {
dev_dbg(chan->dev, "DMA controller still busy\n");
- goto out_unlock;
+ return;
}
/*
* If hardware is idle, then all descriptors on the running lists are
* done, start new transfers
*/
- if (chan->has_sg) {
- head = list_first_entry(&desc->segments,
- struct xilinx_vdma_tx_segment, node);
- tail = list_entry(desc->segments.prev,
- struct xilinx_vdma_tx_segment, node);
-
- vdma_ctrl_write(chan, XILINX_VDMA_REG_CURDESC, head->phys);
- }
+ if (chan->has_sg)
+ vdma_ctrl_write(chan, XILINX_VDMA_REG_CURDESC,
+ desc->async_tx.phys);
/* Configure the hardware using info in the config structure */
reg = vdma_ctrl_read(chan, XILINX_VDMA_REG_DMACR);
@@ -662,6 +648,10 @@ static void xilinx_vdma_start_transfer(struct xilinx_vdma_chan *chan)
else
reg &= ~XILINX_VDMA_DMACR_FRAMECNT_EN;
+ /* Configure channel to allow number frame buffers */
+ vdma_ctrl_write(chan, XILINX_VDMA_REG_FRMSTORE,
+ chan->desc_pendingcount);
+
/*
* With SG, start with circular mode, so that BDs can be fetched.
* In direct register mode, if not parking, enable circular mode
@@ -690,16 +680,19 @@ static void xilinx_vdma_start_transfer(struct xilinx_vdma_chan *chan)
xilinx_vdma_start(chan);
if (chan->err)
- goto out_unlock;
+ return;
/* Start the transfer */
if (chan->has_sg) {
- vdma_ctrl_write(chan, XILINX_VDMA_REG_TAILDESC, tail->phys);
+ vdma_ctrl_write(chan, XILINX_VDMA_REG_TAILDESC,
+ tail_segment->phys);
} else {
struct xilinx_vdma_tx_segment *segment, *last = NULL;
int i = 0;
- list_for_each_entry(segment, &desc->segments, node) {
+ list_for_each_entry(desc, &chan->pending_list, node) {
+ segment = list_first_entry(&desc->segments,
+ struct xilinx_vdma_tx_segment, node);
vdma_desc_write(chan,
XILINX_VDMA_REG_START_ADDRESS(i++),
segment->hw.buf_addr);
@@ -707,7 +700,7 @@ static void xilinx_vdma_start_transfer(struct xilinx_vdma_chan *chan)
}
if (!last)
- goto out_unlock;
+ return;
/* HW expects these parameters to be same for one transaction */
vdma_desc_write(chan, XILINX_VDMA_REG_HSIZE, last->hw.hsize);
@@ -716,11 +709,8 @@ static void xilinx_vdma_start_transfer(struct xilinx_vdma_chan *chan)
vdma_desc_write(chan, XILINX_VDMA_REG_VSIZE, last->hw.vsize);
}
- list_del(&desc->node);
- chan->active_desc = desc;
-
-out_unlock:
- spin_unlock_irqrestore(&chan->lock, flags);
+ list_splice_tail_init(&chan->pending_list, &chan->active_list);
+ chan->desc_pendingcount = 0;
}
/**
@@ -730,8 +720,11 @@ out_unlock:
static void xilinx_vdma_issue_pending(struct dma_chan *dchan)
{
struct xilinx_vdma_chan *chan = to_xilinx_chan(dchan);
+ unsigned long flags;
+ spin_lock_irqsave(&chan->lock, flags);
xilinx_vdma_start_transfer(chan);
+ spin_unlock_irqrestore(&chan->lock, flags);
}
/**
@@ -742,24 +735,17 @@ static void xilinx_vdma_issue_pending(struct dma_chan *dchan)
*/
static void xilinx_vdma_complete_descriptor(struct xilinx_vdma_chan *chan)
{
- struct xilinx_vdma_tx_descriptor *desc;
- unsigned long flags;
+ struct xilinx_vdma_tx_descriptor *desc, *next;
- spin_lock_irqsave(&chan->lock, flags);
+ /* This function was invoked with lock held */
+ if (list_empty(&chan->active_list))
+ return;
- desc = chan->active_desc;
- if (!desc) {
- dev_dbg(chan->dev, "no running descriptors\n");
- goto out_unlock;
+ list_for_each_entry_safe(desc, next, &chan->active_list, node) {
+ list_del(&desc->node);
+ dma_cookie_complete(&desc->async_tx);
+ list_add_tail(&desc->node, &chan->done_list);
}
-
- dma_cookie_complete(&desc->async_tx);
- list_add_tail(&desc->node, &chan->done_list);
-
- chan->active_desc = NULL;
-
-out_unlock:
- spin_unlock_irqrestore(&chan->lock, flags);
}
/**
@@ -770,21 +756,17 @@ out_unlock:
*/
static int xilinx_vdma_reset(struct xilinx_vdma_chan *chan)
{
- int loop = XILINX_VDMA_LOOP_COUNT;
+ int err;
u32 tmp;
vdma_ctrl_set(chan, XILINX_VDMA_REG_DMACR, XILINX_VDMA_DMACR_RESET);
- tmp = vdma_ctrl_read(chan, XILINX_VDMA_REG_DMACR) &
- XILINX_VDMA_DMACR_RESET;
-
/* Wait for the hardware to finish reset */
- do {
- tmp = vdma_ctrl_read(chan, XILINX_VDMA_REG_DMACR) &
- XILINX_VDMA_DMACR_RESET;
- } while (loop-- && tmp);
+ err = xilinx_vdma_poll_timeout(chan, XILINX_VDMA_REG_DMACR, tmp,
+ !(tmp & XILINX_VDMA_DMACR_RESET), 0,
+ XILINX_VDMA_LOOP_COUNT);
- if (!loop) {
+ if (err) {
dev_err(chan->dev, "reset timeout, cr %x, sr %x\n",
vdma_ctrl_read(chan, XILINX_VDMA_REG_DMACR),
vdma_ctrl_read(chan, XILINX_VDMA_REG_DMASR));
@@ -793,7 +775,7 @@ static int xilinx_vdma_reset(struct xilinx_vdma_chan *chan)
chan->err = false;
- return 0;
+ return err;
}
/**
@@ -870,8 +852,10 @@ static irqreturn_t xilinx_vdma_irq_handler(int irq, void *data)
}
if (status & XILINX_VDMA_DMASR_FRM_CNT_IRQ) {
+ spin_lock(&chan->lock);
xilinx_vdma_complete_descriptor(chan);
xilinx_vdma_start_transfer(chan);
+ spin_unlock(&chan->lock);
}
tasklet_schedule(&chan->tasklet);
@@ -879,6 +863,44 @@ static irqreturn_t xilinx_vdma_irq_handler(int irq, void *data)
}
/**
+ * append_desc_queue - Queuing descriptor
+ * @chan: Driver specific dma channel
+ * @desc: dma transaction descriptor
+ */
+static void append_desc_queue(struct xilinx_vdma_chan *chan,
+ struct xilinx_vdma_tx_descriptor *desc)
+{
+ struct xilinx_vdma_tx_segment *tail_segment;
+ struct xilinx_vdma_tx_descriptor *tail_desc;
+
+ if (list_empty(&chan->pending_list))
+ goto append;
+
+ /*
+ * Add the hardware descriptor to the chain of hardware descriptors
+ * that already exists in memory.
+ */
+ tail_desc = list_last_entry(&chan->pending_list,
+ struct xilinx_vdma_tx_descriptor, node);
+ tail_segment = list_last_entry(&tail_desc->segments,
+ struct xilinx_vdma_tx_segment, node);
+ tail_segment->hw.next_desc = (u32)desc->async_tx.phys;
+
+ /*
+ * Add the software descriptor and all children to the list
+ * of pending transactions
+ */
+append:
+ list_add_tail(&desc->node, &chan->pending_list);
+ chan->desc_pendingcount++;
+
+ if (unlikely(chan->desc_pendingcount > chan->num_frms)) {
+ dev_dbg(chan->dev, "desc pendingcount is too high\n");
+ chan->desc_pendingcount = chan->num_frms;
+ }
+}
+
+/**
* xilinx_vdma_tx_submit - Submit DMA transaction
* @tx: Async transaction descriptor
*
@@ -906,11 +928,8 @@ static dma_cookie_t xilinx_vdma_tx_submit(struct dma_async_tx_descriptor *tx)
cookie = dma_cookie_assign(tx);
- /* Append the transaction to the pending transactions queue. */
- list_add_tail(&desc->node, &chan->pending_list);
-
- /* Free the allocated desc */
- chan->allocated_desc = NULL;
+ /* Put this transaction onto the tail of the pending queue */
+ append_desc_queue(chan, desc);
spin_unlock_irqrestore(&chan->lock, flags);
@@ -973,13 +992,6 @@ xilinx_vdma_dma_prep_interleaved(struct dma_chan *dchan,
else
hw->buf_addr = xt->src_start;
- /* Link the previous next descriptor to current */
- if (!list_empty(&desc->segments)) {
- prev = list_last_entry(&desc->segments,
- struct xilinx_vdma_tx_segment, node);
- prev->hw.next_desc = segment->phys;
- }
-
/* Insert the segment into the descriptor segments list. */
list_add_tail(&segment->node, &desc->segments);
@@ -988,7 +1000,7 @@ xilinx_vdma_dma_prep_interleaved(struct dma_chan *dchan,
/* Link the last hardware descriptor with the first. */
segment = list_first_entry(&desc->segments,
struct xilinx_vdma_tx_segment, node);
- prev->hw.next_desc = segment->phys;
+ desc->async_tx.phys = segment->phys;
return &desc->async_tx;
@@ -1127,10 +1139,12 @@ static int xilinx_vdma_chan_probe(struct xilinx_vdma_device *xdev,
chan->dev = xdev->dev;
chan->xdev = xdev;
chan->has_sg = xdev->has_sg;
+ chan->desc_pendingcount = 0x0;
spin_lock_init(&chan->lock);
INIT_LIST_HEAD(&chan->pending_list);
INIT_LIST_HEAD(&chan->done_list);
+ INIT_LIST_HEAD(&chan->active_list);
/* Retrieve the channel properties from the device tree */
has_dre = of_property_read_bool(node, "xlnx,include-dre");
@@ -1222,7 +1236,7 @@ static struct dma_chan *of_dma_xilinx_xlate(struct of_phandle_args *dma_spec,
struct xilinx_vdma_device *xdev = ofdma->of_dma_data;
int chan_id = dma_spec->args[0];
- if (chan_id >= XILINX_VDMA_MAX_CHANS_PER_DEVICE)
+ if (chan_id >= XILINX_VDMA_MAX_CHANS_PER_DEVICE || !xdev->chan[chan_id])
return NULL;
return dma_get_slave_channel(&xdev->chan[chan_id]->common);
diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig
index ef25000a5bc6..37755e63cc28 100644
--- a/drivers/edac/Kconfig
+++ b/drivers/edac/Kconfig
@@ -367,14 +367,30 @@ config EDAC_OCTEON_PCI
Support for error detection and correction on the
Cavium Octeon family of SOCs.
-config EDAC_ALTERA_MC
- bool "Altera SDRAM Memory Controller EDAC"
+config EDAC_ALTERA
+ bool "Altera SOCFPGA ECC"
depends on EDAC_MM_EDAC=y && ARCH_SOCFPGA
help
Support for error detection and correction on the
- Altera SDRAM memory controller. Note that the
- preloader must initialize the SDRAM before loading
- the kernel.
+ Altera SOCs. This must be selected for SDRAM ECC.
+ Note that the preloader must initialize the SDRAM
+ before loading the kernel.
+
+config EDAC_ALTERA_L2C
+ bool "Altera L2 Cache ECC"
+ depends on EDAC_ALTERA=y
+ select CACHE_L2X0
+ help
+ Support for error detection and correction on the
+ Altera L2 cache Memory for Altera SoCs. This option
+ requires L2 cache so it will force that selection.
+
+config EDAC_ALTERA_OCRAM
+ bool "Altera On-Chip RAM ECC"
+ depends on EDAC_ALTERA=y && SRAM && GENERIC_ALLOCATOR
+ help
+ Support for error detection and correction on the
+ Altera On-Chip RAM Memory for Altera SoCs.
config EDAC_SYNOPSYS
tristate "Synopsys DDR Memory Controller"
diff --git a/drivers/edac/Makefile b/drivers/edac/Makefile
index be163e20fe56..f9e4a3e0e6e9 100644
--- a/drivers/edac/Makefile
+++ b/drivers/edac/Makefile
@@ -67,6 +67,6 @@ obj-$(CONFIG_EDAC_OCTEON_L2C) += octeon_edac-l2c.o
obj-$(CONFIG_EDAC_OCTEON_LMC) += octeon_edac-lmc.o
obj-$(CONFIG_EDAC_OCTEON_PCI) += octeon_edac-pci.o
-obj-$(CONFIG_EDAC_ALTERA_MC) += altera_edac.o
+obj-$(CONFIG_EDAC_ALTERA) += altera_edac.o
obj-$(CONFIG_EDAC_SYNOPSYS) += synopsys_edac.o
obj-$(CONFIG_EDAC_XGENE) += xgene_edac.o
diff --git a/drivers/edac/altera_edac.c b/drivers/edac/altera_edac.c
index 929640981d8a..63e42098726d 100644
--- a/drivers/edac/altera_edac.c
+++ b/drivers/edac/altera_edac.c
@@ -1,5 +1,5 @@
/*
- * Copyright Altera Corporation (C) 2014-2015. All rights reserved.
+ * Copyright Altera Corporation (C) 2014-2016. All rights reserved.
* Copyright 2011-2012 Calxeda, Inc.
*
* This program is free software; you can redistribute it and/or modify it
@@ -17,8 +17,10 @@
* Adapted from the highbank_mc_edac driver.
*/
+#include <asm/cacheflush.h>
#include <linux/ctype.h>
#include <linux/edac.h>
+#include <linux/genalloc.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/mfd/syscon.h>
@@ -34,6 +36,7 @@
#define EDAC_MOD_STR "altera_edac"
#define EDAC_VERSION "1"
+#define EDAC_DEVICE "Altera"
static const struct altr_sdram_prv_data c5_data = {
.ecc_ctrl_offset = CV_CTLCFG_OFST,
@@ -75,6 +78,31 @@ static const struct altr_sdram_prv_data a10_data = {
.ue_set_mask = A10_DIAGINT_TDERRA_MASK,
};
+/************************** EDAC Device Defines **************************/
+
+/* OCRAM ECC Management Group Defines */
+#define ALTR_MAN_GRP_OCRAM_ECC_OFFSET 0x04
+#define ALTR_OCR_ECC_EN BIT(0)
+#define ALTR_OCR_ECC_INJS BIT(1)
+#define ALTR_OCR_ECC_INJD BIT(2)
+#define ALTR_OCR_ECC_SERR BIT(3)
+#define ALTR_OCR_ECC_DERR BIT(4)
+
+/* L2 ECC Management Group Defines */
+#define ALTR_MAN_GRP_L2_ECC_OFFSET 0x00
+#define ALTR_L2_ECC_EN BIT(0)
+#define ALTR_L2_ECC_INJS BIT(1)
+#define ALTR_L2_ECC_INJD BIT(2)
+
+#define ALTR_UE_TRIGGER_CHAR 'U' /* Trigger for UE */
+#define ALTR_TRIGGER_READ_WRD_CNT 32 /* Line size x 4 */
+#define ALTR_TRIG_OCRAM_BYTE_SIZE 128 /* Line size x 4 */
+#define ALTR_TRIG_L2C_BYTE_SIZE 4096 /* Full Page */
+
+/*********************** EDAC Memory Controller Functions ****************/
+
+/* The SDRAM controller uses the EDAC Memory Controller framework. */
+
static irqreturn_t altr_sdram_mc_err_handler(int irq, void *dev_id)
{
struct mem_ctl_info *mci = dev_id;
@@ -504,6 +532,466 @@ static struct platform_driver altr_sdram_edac_driver = {
module_platform_driver(altr_sdram_edac_driver);
+/************************* EDAC Parent Probe *************************/
+
+static const struct of_device_id altr_edac_device_of_match[];
+
+static const struct of_device_id altr_edac_of_match[] = {
+ { .compatible = "altr,socfpga-ecc-manager" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, altr_edac_of_match);
+
+static int altr_edac_probe(struct platform_device *pdev)
+{
+ of_platform_populate(pdev->dev.of_node, altr_edac_device_of_match,
+ NULL, &pdev->dev);
+ return 0;
+}
+
+static struct platform_driver altr_edac_driver = {
+ .probe = altr_edac_probe,
+ .driver = {
+ .name = "socfpga_ecc_manager",
+ .of_match_table = altr_edac_of_match,
+ },
+};
+module_platform_driver(altr_edac_driver);
+
+/************************* EDAC Device Functions *************************/
+
+/*
+ * EDAC Device Functions (shared between various IPs).
+ * The discrete memories use the EDAC Device framework. The probe
+ * and error handling functions are very similar between memories
+ * so they are shared. The memory allocation and freeing for EDAC
+ * trigger testing are different for each memory.
+ */
+
+const struct edac_device_prv_data ocramecc_data;
+const struct edac_device_prv_data l2ecc_data;
+
+struct edac_device_prv_data {
+ int (*setup)(struct platform_device *pdev, void __iomem *base);
+ int ce_clear_mask;
+ int ue_clear_mask;
+ char dbgfs_name[20];
+ void * (*alloc_mem)(size_t size, void **other);
+ void (*free_mem)(void *p, size_t size, void *other);
+ int ecc_enable_mask;
+ int ce_set_mask;
+ int ue_set_mask;
+ int trig_alloc_sz;
+};
+
+struct altr_edac_device_dev {
+ void __iomem *base;
+ int sb_irq;
+ int db_irq;
+ const struct edac_device_prv_data *data;
+ struct dentry *debugfs_dir;
+ char *edac_dev_name;
+};
+
+static irqreturn_t altr_edac_device_handler(int irq, void *dev_id)
+{
+ irqreturn_t ret_value = IRQ_NONE;
+ struct edac_device_ctl_info *dci = dev_id;
+ struct altr_edac_device_dev *drvdata = dci->pvt_info;
+ const struct edac_device_prv_data *priv = drvdata->data;
+
+ if (irq == drvdata->sb_irq) {
+ if (priv->ce_clear_mask)
+ writel(priv->ce_clear_mask, drvdata->base);
+ edac_device_handle_ce(dci, 0, 0, drvdata->edac_dev_name);
+ ret_value = IRQ_HANDLED;
+ } else if (irq == drvdata->db_irq) {
+ if (priv->ue_clear_mask)
+ writel(priv->ue_clear_mask, drvdata->base);
+ edac_device_handle_ue(dci, 0, 0, drvdata->edac_dev_name);
+ panic("\nEDAC:ECC_DEVICE[Uncorrectable errors]\n");
+ ret_value = IRQ_HANDLED;
+ } else {
+ WARN_ON(1);
+ }
+
+ return ret_value;
+}
+
+static ssize_t altr_edac_device_trig(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+
+{
+ u32 *ptemp, i, error_mask;
+ int result = 0;
+ u8 trig_type;
+ unsigned long flags;
+ struct edac_device_ctl_info *edac_dci = file->private_data;
+ struct altr_edac_device_dev *drvdata = edac_dci->pvt_info;
+ const struct edac_device_prv_data *priv = drvdata->data;
+ void *generic_ptr = edac_dci->dev;
+
+ if (!user_buf || get_user(trig_type, user_buf))
+ return -EFAULT;
+
+ if (!priv->alloc_mem)
+ return -ENOMEM;
+
+ /*
+ * Note that generic_ptr is initialized to the device * but in
+ * some alloc_functions, this is overridden and returns data.
+ */
+ ptemp = priv->alloc_mem(priv->trig_alloc_sz, &generic_ptr);
+ if (!ptemp) {
+ edac_printk(KERN_ERR, EDAC_DEVICE,
+ "Inject: Buffer Allocation error\n");
+ return -ENOMEM;
+ }
+
+ if (trig_type == ALTR_UE_TRIGGER_CHAR)
+ error_mask = priv->ue_set_mask;
+ else
+ error_mask = priv->ce_set_mask;
+
+ edac_printk(KERN_ALERT, EDAC_DEVICE,
+ "Trigger Error Mask (0x%X)\n", error_mask);
+
+ local_irq_save(flags);
+ /* write ECC corrupted data out. */
+ for (i = 0; i < (priv->trig_alloc_sz / sizeof(*ptemp)); i++) {
+ /* Read data so we're in the correct state */
+ rmb();
+ if (ACCESS_ONCE(ptemp[i]))
+ result = -1;
+ /* Toggle Error bit (it is latched), leave ECC enabled */
+ writel(error_mask, drvdata->base);
+ writel(priv->ecc_enable_mask, drvdata->base);
+ ptemp[i] = i;
+ }
+ /* Ensure it has been written out */
+ wmb();
+ local_irq_restore(flags);
+
+ if (result)
+ edac_printk(KERN_ERR, EDAC_DEVICE, "Mem Not Cleared\n");
+
+ /* Read out written data. ECC error caused here */
+ for (i = 0; i < ALTR_TRIGGER_READ_WRD_CNT; i++)
+ if (ACCESS_ONCE(ptemp[i]) != i)
+ edac_printk(KERN_ERR, EDAC_DEVICE,
+ "Read doesn't match written data\n");
+
+ if (priv->free_mem)
+ priv->free_mem(ptemp, priv->trig_alloc_sz, generic_ptr);
+
+ return count;
+}
+
+static const struct file_operations altr_edac_device_inject_fops = {
+ .open = simple_open,
+ .write = altr_edac_device_trig,
+ .llseek = generic_file_llseek,
+};
+
+static void altr_create_edacdev_dbgfs(struct edac_device_ctl_info *edac_dci,
+ const struct edac_device_prv_data *priv)
+{
+ struct altr_edac_device_dev *drvdata = edac_dci->pvt_info;
+
+ if (!IS_ENABLED(CONFIG_EDAC_DEBUG))
+ return;
+
+ drvdata->debugfs_dir = edac_debugfs_create_dir(drvdata->edac_dev_name);
+ if (!drvdata->debugfs_dir)
+ return;
+
+ if (!edac_debugfs_create_file(priv->dbgfs_name, S_IWUSR,
+ drvdata->debugfs_dir, edac_dci,
+ &altr_edac_device_inject_fops))
+ debugfs_remove_recursive(drvdata->debugfs_dir);
+}
+
+static const struct of_device_id altr_edac_device_of_match[] = {
+#ifdef CONFIG_EDAC_ALTERA_L2C
+ { .compatible = "altr,socfpga-l2-ecc", .data = (void *)&l2ecc_data },
+#endif
+#ifdef CONFIG_EDAC_ALTERA_OCRAM
+ { .compatible = "altr,socfpga-ocram-ecc",
+ .data = (void *)&ocramecc_data },
+#endif
+ {},
+};
+MODULE_DEVICE_TABLE(of, altr_edac_device_of_match);
+
+/*
+ * altr_edac_device_probe()
+ * This is a generic EDAC device driver that will support
+ * various Altera memory devices such as the L2 cache ECC and
+ * OCRAM ECC as well as the memories for other peripherals.
+ * Module specific initialization is done by passing the
+ * function index in the device tree.
+ */
+static int altr_edac_device_probe(struct platform_device *pdev)
+{
+ struct edac_device_ctl_info *dci;
+ struct altr_edac_device_dev *drvdata;
+ struct resource *r;
+ int res = 0;
+ struct device_node *np = pdev->dev.of_node;
+ char *ecc_name = (char *)np->name;
+ static int dev_instance;
+
+ if (!devres_open_group(&pdev->dev, NULL, GFP_KERNEL)) {
+ edac_printk(KERN_ERR, EDAC_DEVICE,
+ "Unable to open devm\n");
+ return -ENOMEM;
+ }
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!r) {
+ edac_printk(KERN_ERR, EDAC_DEVICE,
+ "Unable to get mem resource\n");
+ res = -ENODEV;
+ goto fail;
+ }
+
+ if (!devm_request_mem_region(&pdev->dev, r->start, resource_size(r),
+ dev_name(&pdev->dev))) {
+ edac_printk(KERN_ERR, EDAC_DEVICE,
+ "%s:Error requesting mem region\n", ecc_name);
+ res = -EBUSY;
+ goto fail;
+ }
+
+ dci = edac_device_alloc_ctl_info(sizeof(*drvdata), ecc_name,
+ 1, ecc_name, 1, 0, NULL, 0,
+ dev_instance++);
+
+ if (!dci) {
+ edac_printk(KERN_ERR, EDAC_DEVICE,
+ "%s: Unable to allocate EDAC device\n", ecc_name);
+ res = -ENOMEM;
+ goto fail;
+ }
+
+ drvdata = dci->pvt_info;
+ dci->dev = &pdev->dev;
+ platform_set_drvdata(pdev, dci);
+ drvdata->edac_dev_name = ecc_name;
+
+ drvdata->base = devm_ioremap(&pdev->dev, r->start, resource_size(r));
+ if (!drvdata->base)
+ goto fail1;
+
+ /* Get driver specific data for this EDAC device */
+ drvdata->data = of_match_node(altr_edac_device_of_match, np)->data;
+
+ /* Check specific dependencies for the module */
+ if (drvdata->data->setup) {
+ res = drvdata->data->setup(pdev, drvdata->base);
+ if (res)
+ goto fail1;
+ }
+
+ drvdata->sb_irq = platform_get_irq(pdev, 0);
+ res = devm_request_irq(&pdev->dev, drvdata->sb_irq,
+ altr_edac_device_handler,
+ 0, dev_name(&pdev->dev), dci);
+ if (res)
+ goto fail1;
+
+ drvdata->db_irq = platform_get_irq(pdev, 1);
+ res = devm_request_irq(&pdev->dev, drvdata->db_irq,
+ altr_edac_device_handler,
+ 0, dev_name(&pdev->dev), dci);
+ if (res)
+ goto fail1;
+
+ dci->mod_name = "Altera ECC Manager";
+ dci->dev_name = drvdata->edac_dev_name;
+
+ res = edac_device_add_device(dci);
+ if (res)
+ goto fail1;
+
+ altr_create_edacdev_dbgfs(dci, drvdata->data);
+
+ devres_close_group(&pdev->dev, NULL);
+
+ return 0;
+
+fail1:
+ edac_device_free_ctl_info(dci);
+fail:
+ devres_release_group(&pdev->dev, NULL);
+ edac_printk(KERN_ERR, EDAC_DEVICE,
+ "%s:Error setting up EDAC device: %d\n", ecc_name, res);
+
+ return res;
+}
+
+static int altr_edac_device_remove(struct platform_device *pdev)
+{
+ struct edac_device_ctl_info *dci = platform_get_drvdata(pdev);
+ struct altr_edac_device_dev *drvdata = dci->pvt_info;
+
+ debugfs_remove_recursive(drvdata->debugfs_dir);
+ edac_device_del_device(&pdev->dev);
+ edac_device_free_ctl_info(dci);
+
+ return 0;
+}
+
+static struct platform_driver altr_edac_device_driver = {
+ .probe = altr_edac_device_probe,
+ .remove = altr_edac_device_remove,
+ .driver = {
+ .name = "altr_edac_device",
+ .of_match_table = altr_edac_device_of_match,
+ },
+};
+module_platform_driver(altr_edac_device_driver);
+
+/*********************** OCRAM EDAC Device Functions *********************/
+
+#ifdef CONFIG_EDAC_ALTERA_OCRAM
+
+static void *ocram_alloc_mem(size_t size, void **other)
+{
+ struct device_node *np;
+ struct gen_pool *gp;
+ void *sram_addr;
+
+ np = of_find_compatible_node(NULL, NULL, "altr,socfpga-ocram-ecc");
+ if (!np)
+ return NULL;
+
+ gp = of_gen_pool_get(np, "iram", 0);
+ of_node_put(np);
+ if (!gp)
+ return NULL;
+
+ sram_addr = (void *)gen_pool_alloc(gp, size);
+ if (!sram_addr)
+ return NULL;
+
+ memset(sram_addr, 0, size);
+ /* Ensure data is written out */
+ wmb();
+
+ /* Remember this handle for freeing later */
+ *other = gp;
+
+ return sram_addr;
+}
+
+static void ocram_free_mem(void *p, size_t size, void *other)
+{
+ gen_pool_free((struct gen_pool *)other, (u32)p, size);
+}
+
+/*
+ * altr_ocram_check_deps()
+ * Test for OCRAM cache ECC dependencies upon entry because
+ * platform specific startup should have initialized the
+ * On-Chip RAM memory and enabled the ECC.
+ * Can't turn on ECC here because accessing un-initialized
+ * memory will cause CE/UE errors possibly causing an ABORT.
+ */
+static int altr_ocram_check_deps(struct platform_device *pdev,
+ void __iomem *base)
+{
+ if (readl(base) & ALTR_OCR_ECC_EN)
+ return 0;
+
+ edac_printk(KERN_ERR, EDAC_DEVICE,
+ "OCRAM: No ECC present or ECC disabled.\n");
+ return -ENODEV;
+}
+
+const struct edac_device_prv_data ocramecc_data = {
+ .setup = altr_ocram_check_deps,
+ .ce_clear_mask = (ALTR_OCR_ECC_EN | ALTR_OCR_ECC_SERR),
+ .ue_clear_mask = (ALTR_OCR_ECC_EN | ALTR_OCR_ECC_DERR),
+ .dbgfs_name = "altr_ocram_trigger",
+ .alloc_mem = ocram_alloc_mem,
+ .free_mem = ocram_free_mem,
+ .ecc_enable_mask = ALTR_OCR_ECC_EN,
+ .ce_set_mask = (ALTR_OCR_ECC_EN | ALTR_OCR_ECC_INJS),
+ .ue_set_mask = (ALTR_OCR_ECC_EN | ALTR_OCR_ECC_INJD),
+ .trig_alloc_sz = ALTR_TRIG_OCRAM_BYTE_SIZE,
+};
+
+#endif /* CONFIG_EDAC_ALTERA_OCRAM */
+
+/********************* L2 Cache EDAC Device Functions ********************/
+
+#ifdef CONFIG_EDAC_ALTERA_L2C
+
+static void *l2_alloc_mem(size_t size, void **other)
+{
+ struct device *dev = *other;
+ void *ptemp = devm_kzalloc(dev, size, GFP_KERNEL);
+
+ if (!ptemp)
+ return NULL;
+
+ /* Make sure everything is written out */
+ wmb();
+
+ /*
+ * Clean all cache levels up to LoC (includes L2)
+ * This ensures the corrupted data is written into
+ * L2 cache for readback test (which causes ECC error).
+ */
+ flush_cache_all();
+
+ return ptemp;
+}
+
+static void l2_free_mem(void *p, size_t size, void *other)
+{
+ struct device *dev = other;
+
+ if (dev && p)
+ devm_kfree(dev, p);
+}
+
+/*
+ * altr_l2_check_deps()
+ * Test for L2 cache ECC dependencies upon entry because
+ * platform specific startup should have initialized the L2
+ * memory and enabled the ECC.
+ * Bail if ECC is not enabled.
+ * Note that L2 Cache Enable is forced at build time.
+ */
+static int altr_l2_check_deps(struct platform_device *pdev,
+ void __iomem *base)
+{
+ if (readl(base) & ALTR_L2_ECC_EN)
+ return 0;
+
+ edac_printk(KERN_ERR, EDAC_DEVICE,
+ "L2: No ECC present, or ECC disabled\n");
+ return -ENODEV;
+}
+
+const struct edac_device_prv_data l2ecc_data = {
+ .setup = altr_l2_check_deps,
+ .ce_clear_mask = 0,
+ .ue_clear_mask = 0,
+ .dbgfs_name = "altr_l2_trigger",
+ .alloc_mem = l2_alloc_mem,
+ .free_mem = l2_free_mem,
+ .ecc_enable_mask = ALTR_L2_ECC_EN,
+ .ce_set_mask = (ALTR_L2_ECC_EN | ALTR_L2_ECC_INJS),
+ .ue_set_mask = (ALTR_L2_ECC_EN | ALTR_L2_ECC_INJD),
+ .trig_alloc_sz = ALTR_TRIG_L2C_BYTE_SIZE,
+};
+
+#endif /* CONFIG_EDAC_ALTERA_L2C */
+
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Thor Thayer");
-MODULE_DESCRIPTION("EDAC Driver for Altera SDRAM Controller");
+MODULE_DESCRIPTION("EDAC Driver for Altera Memories");
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index 9eee13ef83a5..d87a47547ba5 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -1452,7 +1452,7 @@ static u64 f1x_get_norm_dct_addr(struct amd64_pvt *pvt, u8 range,
u64 chan_off;
u64 dram_base = get_dram_base(pvt, range);
u64 hole_off = f10_dhar_offset(pvt);
- u64 dct_sel_base_off = (pvt->dct_sel_hi & 0xFFFFFC00) << 16;
+ u64 dct_sel_base_off = (u64)(pvt->dct_sel_hi & 0xFFFFFC00) << 16;
if (hi_rng) {
/*
diff --git a/drivers/edac/debugfs.c b/drivers/edac/debugfs.c
index 54d2f668cb0a..92dbb7e2320c 100644
--- a/drivers/edac/debugfs.c
+++ b/drivers/edac/debugfs.c
@@ -53,7 +53,7 @@ int __init edac_debugfs_init(void)
void edac_debugfs_exit(void)
{
- debugfs_remove(edac_debugfs);
+ debugfs_remove_recursive(edac_debugfs);
}
int edac_create_debugfs_nodes(struct mem_ctl_info *mci)
diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c
index 8adfc167c2e3..1472f48c8ac6 100644
--- a/drivers/edac/edac_mc.c
+++ b/drivers/edac/edac_mc.c
@@ -535,60 +535,21 @@ static void edac_mc_workq_function(struct work_struct *work_req)
mutex_lock(&mem_ctls_mutex);
- /* if this control struct has movd to offline state, we are done */
- if (mci->op_state == OP_OFFLINE) {
+ if (mci->op_state != OP_RUNNING_POLL) {
mutex_unlock(&mem_ctls_mutex);
return;
}
- /* Only poll controllers that are running polled and have a check */
- if (edac_mc_assert_error_check_and_clear() && (mci->edac_check != NULL))
+ if (edac_mc_assert_error_check_and_clear())
mci->edac_check(mci);
mutex_unlock(&mem_ctls_mutex);
- /* Reschedule */
+ /* Queue ourselves again. */
edac_queue_work(&mci->work, msecs_to_jiffies(edac_mc_get_poll_msec()));
}
/*
- * edac_mc_workq_setup
- * initialize a workq item for this mci
- * passing in the new delay period in msec
- *
- * locking model:
- *
- * called with the mem_ctls_mutex held
- */
-static void edac_mc_workq_setup(struct mem_ctl_info *mci, unsigned msec)
-{
- edac_dbg(0, "\n");
-
- /* if this instance is not in the POLL state, then simply return */
- if (mci->op_state != OP_RUNNING_POLL)
- return;
-
- INIT_DELAYED_WORK(&mci->work, edac_mc_workq_function);
-
- edac_queue_work(&mci->work, msecs_to_jiffies(msec));
-}
-
-/*
- * edac_mc_workq_teardown
- * stop the workq processing on this mci
- *
- * locking model:
- *
- * called WITHOUT lock held
- */
-static void edac_mc_workq_teardown(struct mem_ctl_info *mci)
-{
- mci->op_state = OP_OFFLINE;
-
- edac_stop_work(&mci->work);
-}
-
-/*
* edac_mc_reset_delay_period(unsigned long value)
*
* user space has updated our poll period value, need to
@@ -771,12 +732,12 @@ int edac_mc_add_mc_with_groups(struct mem_ctl_info *mci,
goto fail1;
}
- /* If there IS a check routine, then we are running POLLED */
- if (mci->edac_check != NULL) {
- /* This instance is NOW RUNNING */
+ if (mci->edac_check) {
mci->op_state = OP_RUNNING_POLL;
- edac_mc_workq_setup(mci, edac_mc_get_poll_msec());
+ INIT_DELAYED_WORK(&mci->work, edac_mc_workq_function);
+ edac_queue_work(&mci->work, msecs_to_jiffies(edac_mc_get_poll_msec()));
+
} else {
mci->op_state = OP_RUNNING_INTERRUPT;
}
@@ -823,15 +784,16 @@ struct mem_ctl_info *edac_mc_del_mc(struct device *dev)
return NULL;
}
+ /* mark MCI offline: */
+ mci->op_state = OP_OFFLINE;
+
if (!del_mc_from_global_list(mci))
edac_mc_owner = NULL;
- mutex_unlock(&mem_ctls_mutex);
- /* flush workq processes */
- edac_mc_workq_teardown(mci);
+ mutex_unlock(&mem_ctls_mutex);
- /* marking MCI offline */
- mci->op_state = OP_OFFLINE;
+ if (mci->edac_check)
+ edac_stop_work(&mci->work);
/* remove from sysfs */
edac_remove_sysfs_mci_device(mci);
diff --git a/drivers/edac/edac_pci.c b/drivers/edac/edac_pci.c
index 99685388d3fb..8f2f2899a7a2 100644
--- a/drivers/edac/edac_pci.c
+++ b/drivers/edac/edac_pci.c
@@ -195,55 +195,24 @@ static void edac_pci_workq_function(struct work_struct *work_req)
mutex_lock(&edac_pci_ctls_mutex);
- if (pci->op_state == OP_RUNNING_POLL) {
- /* we might be in POLL mode, but there may NOT be a poll func
- */
- if ((pci->edac_check != NULL) && edac_pci_get_check_errors())
- pci->edac_check(pci);
-
- /* if we are on a one second period, then use round */
- msec = edac_pci_get_poll_msec();
- if (msec == 1000)
- delay = round_jiffies_relative(msecs_to_jiffies(msec));
- else
- delay = msecs_to_jiffies(msec);
-
- /* Reschedule only if we are in POLL mode */
- edac_queue_work(&pci->work, delay);
+ if (pci->op_state != OP_RUNNING_POLL) {
+ mutex_unlock(&edac_pci_ctls_mutex);
+ return;
}
- mutex_unlock(&edac_pci_ctls_mutex);
-}
-
-/*
- * edac_pci_workq_setup()
- * initialize a workq item for this edac_pci instance
- * passing in the new delay period in msec
- *
- * locking model:
- * called when 'edac_pci_ctls_mutex' is locked
- */
-static void edac_pci_workq_setup(struct edac_pci_ctl_info *pci,
- unsigned int msec)
-{
- edac_dbg(0, "\n");
+ if (edac_pci_get_check_errors())
+ pci->edac_check(pci);
- INIT_DELAYED_WORK(&pci->work, edac_pci_workq_function);
+ /* if we are on a one second period, then use round */
+ msec = edac_pci_get_poll_msec();
+ if (msec == 1000)
+ delay = round_jiffies_relative(msecs_to_jiffies(msec));
+ else
+ delay = msecs_to_jiffies(msec);
- edac_queue_work(&pci->work, msecs_to_jiffies(edac_pci_get_poll_msec()));
-}
+ edac_queue_work(&pci->work, delay);
-/*
- * edac_pci_workq_teardown()
- * stop the workq processing on this edac_pci instance
- */
-static void edac_pci_workq_teardown(struct edac_pci_ctl_info *pci)
-{
- edac_dbg(0, "\n");
-
- pci->op_state = OP_OFFLINE;
-
- edac_stop_work(&pci->work);
+ mutex_unlock(&edac_pci_ctls_mutex);
}
/*
@@ -289,10 +258,12 @@ int edac_pci_add_device(struct edac_pci_ctl_info *pci, int edac_idx)
goto fail1;
}
- if (pci->edac_check != NULL) {
+ if (pci->edac_check) {
pci->op_state = OP_RUNNING_POLL;
- edac_pci_workq_setup(pci, 1000);
+ INIT_DELAYED_WORK(&pci->work, edac_pci_workq_function);
+ edac_queue_work(&pci->work, msecs_to_jiffies(edac_pci_get_poll_msec()));
+
} else {
pci->op_state = OP_RUNNING_INTERRUPT;
}
@@ -350,8 +321,8 @@ struct edac_pci_ctl_info *edac_pci_del_device(struct device *dev)
mutex_unlock(&edac_pci_ctls_mutex);
- /* stop the workq timer */
- edac_pci_workq_teardown(pci);
+ if (pci->edac_check)
+ edac_stop_work(&pci->work);
edac_printk(KERN_INFO, EDAC_PCI,
"Removed device %d for %s %s: DEV %s\n",
diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c
index 01087a38da22..792bdae2b91d 100644
--- a/drivers/edac/i7core_edac.c
+++ b/drivers/edac/i7core_edac.c
@@ -1866,7 +1866,7 @@ static int i7core_mce_check_error(struct notifier_block *nb, unsigned long val,
i7_dev = get_i7core_dev(mce->socketid);
if (!i7_dev)
- return NOTIFY_BAD;
+ return NOTIFY_DONE;
mci = i7_dev->mci;
pvt = mci->pvt_info;
diff --git a/drivers/edac/mce_amd.c b/drivers/edac/mce_amd.c
index e3a945ce374b..49768c08ac07 100644
--- a/drivers/edac/mce_amd.c
+++ b/drivers/edac/mce_amd.c
@@ -147,6 +147,135 @@ static const char * const mc6_mce_desc[] = {
"Status Register File",
};
+/* Scalable MCA error strings */
+static const char * const f17h_ls_mce_desc[] = {
+ "Load queue parity",
+ "Store queue parity",
+ "Miss address buffer payload parity",
+ "L1 TLB parity",
+ "", /* reserved */
+ "DC tag error type 6",
+ "DC tag error type 1",
+ "Internal error type 1",
+ "Internal error type 2",
+ "Sys Read data error thread 0",
+ "Sys read data error thread 1",
+ "DC tag error type 2",
+ "DC data error type 1 (poison comsumption)",
+ "DC data error type 2",
+ "DC data error type 3",
+ "DC tag error type 4",
+ "L2 TLB parity",
+ "PDC parity error",
+ "DC tag error type 3",
+ "DC tag error type 5",
+ "L2 fill data error",
+};
+
+static const char * const f17h_if_mce_desc[] = {
+ "microtag probe port parity error",
+ "IC microtag or full tag multi-hit error",
+ "IC full tag parity",
+ "IC data array parity",
+ "Decoupling queue phys addr parity error",
+ "L0 ITLB parity error",
+ "L1 ITLB parity error",
+ "L2 ITLB parity error",
+ "BPQ snoop parity on Thread 0",
+ "BPQ snoop parity on Thread 1",
+ "L1 BTB multi-match error",
+ "L2 BTB multi-match error",
+};
+
+static const char * const f17h_l2_mce_desc[] = {
+ "L2M tag multi-way-hit error",
+ "L2M tag ECC error",
+ "L2M data ECC error",
+ "HW assert",
+};
+
+static const char * const f17h_de_mce_desc[] = {
+ "uop cache tag parity error",
+ "uop cache data parity error",
+ "Insn buffer parity error",
+ "Insn dispatch queue parity error",
+ "Fetch address FIFO parity",
+ "Patch RAM data parity",
+ "Patch RAM sequencer parity",
+ "uop buffer parity"
+};
+
+static const char * const f17h_ex_mce_desc[] = {
+ "Watchdog timeout error",
+ "Phy register file parity",
+ "Flag register file parity",
+ "Immediate displacement register file parity",
+ "Address generator payload parity",
+ "EX payload parity",
+ "Checkpoint queue parity",
+ "Retire dispatch queue parity",
+};
+
+static const char * const f17h_fp_mce_desc[] = {
+ "Physical register file parity",
+ "Freelist parity error",
+ "Schedule queue parity",
+ "NSQ parity error",
+ "Retire queue parity",
+ "Status register file parity",
+};
+
+static const char * const f17h_l3_mce_desc[] = {
+ "Shadow tag macro ECC error",
+ "Shadow tag macro multi-way-hit error",
+ "L3M tag ECC error",
+ "L3M tag multi-way-hit error",
+ "L3M data ECC error",
+ "XI parity, L3 fill done channel error",
+ "L3 victim queue parity",
+ "L3 HW assert",
+};
+
+static const char * const f17h_cs_mce_desc[] = {
+ "Illegal request from transport layer",
+ "Address violation",
+ "Security violation",
+ "Illegal response from transport layer",
+ "Unexpected response",
+ "Parity error on incoming request or probe response data",
+ "Parity error on incoming read response data",
+ "Atomic request parity",
+ "ECC error on probe filter access",
+};
+
+static const char * const f17h_pie_mce_desc[] = {
+ "HW assert",
+ "Internal PIE register security violation",
+ "Error on GMI link",
+ "Poison data written to internal PIE register",
+};
+
+static const char * const f17h_umc_mce_desc[] = {
+ "DRAM ECC error",
+ "Data poison error on DRAM",
+ "SDP parity error",
+ "Advanced peripheral bus error",
+ "Command/address parity error",
+ "Write data CRC error",
+};
+
+static const char * const f17h_pb_mce_desc[] = {
+ "Parameter Block RAM ECC error",
+};
+
+static const char * const f17h_psp_mce_desc[] = {
+ "PSP RAM ECC or parity error",
+};
+
+static const char * const f17h_smu_mce_desc[] = {
+ "SMU RAM ECC or parity error",
+};
+
static bool f12h_mc0_mce(u16 ec, u8 xec)
{
bool ret = false;
@@ -691,6 +820,177 @@ static void decode_mc6_mce(struct mce *m)
pr_emerg(HW_ERR "Corrupted MC6 MCE info?\n");
}
+static void decode_f17h_core_errors(const char *ip_name, u8 xec,
+ unsigned int mca_type)
+{
+ const char * const *error_desc_array;
+ size_t len;
+
+ pr_emerg(HW_ERR "%s Error: ", ip_name);
+
+ switch (mca_type) {
+ case SMCA_LS:
+ error_desc_array = f17h_ls_mce_desc;
+ len = ARRAY_SIZE(f17h_ls_mce_desc) - 1;
+
+ if (xec == 0x4) {
+ pr_cont("Unrecognized LS MCA error code.\n");
+ return;
+ }
+ break;
+
+ case SMCA_IF:
+ error_desc_array = f17h_if_mce_desc;
+ len = ARRAY_SIZE(f17h_if_mce_desc) - 1;
+ break;
+
+ case SMCA_L2_CACHE:
+ error_desc_array = f17h_l2_mce_desc;
+ len = ARRAY_SIZE(f17h_l2_mce_desc) - 1;
+ break;
+
+ case SMCA_DE:
+ error_desc_array = f17h_de_mce_desc;
+ len = ARRAY_SIZE(f17h_de_mce_desc) - 1;
+ break;
+
+ case SMCA_EX:
+ error_desc_array = f17h_ex_mce_desc;
+ len = ARRAY_SIZE(f17h_ex_mce_desc) - 1;
+ break;
+
+ case SMCA_FP:
+ error_desc_array = f17h_fp_mce_desc;
+ len = ARRAY_SIZE(f17h_fp_mce_desc) - 1;
+ break;
+
+ case SMCA_L3_CACHE:
+ error_desc_array = f17h_l3_mce_desc;
+ len = ARRAY_SIZE(f17h_l3_mce_desc) - 1;
+ break;
+
+ default:
+ pr_cont("Corrupted MCA core error info.\n");
+ return;
+ }
+
+ if (xec > len) {
+ pr_cont("Unrecognized %s MCA bank error code.\n",
+ amd_core_mcablock_names[mca_type]);
+ return;
+ }
+
+ pr_cont("%s.\n", error_desc_array[xec]);
+}
+
+static void decode_df_errors(u8 xec, unsigned int mca_type)
+{
+ const char * const *error_desc_array;
+ size_t len;
+
+ pr_emerg(HW_ERR "Data Fabric Error: ");
+
+ switch (mca_type) {
+ case SMCA_CS:
+ error_desc_array = f17h_cs_mce_desc;
+ len = ARRAY_SIZE(f17h_cs_mce_desc) - 1;
+ break;
+
+ case SMCA_PIE:
+ error_desc_array = f17h_pie_mce_desc;
+ len = ARRAY_SIZE(f17h_pie_mce_desc) - 1;
+ break;
+
+ default:
+ pr_cont("Corrupted MCA Data Fabric info.\n");
+ return;
+ }
+
+ if (xec > len) {
+ pr_cont("Unrecognized %s MCA bank error code.\n",
+ amd_df_mcablock_names[mca_type]);
+ return;
+ }
+
+ pr_cont("%s.\n", error_desc_array[xec]);
+}
+
+/* Decode errors according to Scalable MCA specification */
+static void decode_smca_errors(struct mce *m)
+{
+ u32 addr = MSR_AMD64_SMCA_MCx_IPID(m->bank);
+ unsigned int hwid, mca_type, i;
+ u8 xec = XEC(m->status, xec_mask);
+ const char * const *error_desc_array;
+ const char *ip_name;
+ u32 low, high;
+ size_t len;
+
+ if (rdmsr_safe(addr, &low, &high)) {
+ pr_emerg("Invalid IP block specified, error information is unreliable.\n");
+ return;
+ }
+
+ hwid = high & MCI_IPID_HWID;
+ mca_type = (high & MCI_IPID_MCATYPE) >> 16;
+
+ pr_emerg(HW_ERR "MC%d IPID value: 0x%08x%08x\n", m->bank, high, low);
+
+ /*
+ * Based on hwid and mca_type values, decode errors from respective IPs.
+ * Note: mca_type values make sense only in the context of an hwid.
+ */
+ for (i = 0; i < ARRAY_SIZE(amd_hwids); i++)
+ if (amd_hwids[i].hwid == hwid)
+ break;
+
+ switch (i) {
+ case SMCA_F17H_CORE:
+ ip_name = (mca_type == SMCA_L3_CACHE) ?
+ "L3 Cache" : "F17h Core";
+ return decode_f17h_core_errors(ip_name, xec, mca_type);
+ break;
+
+ case SMCA_DF:
+ return decode_df_errors(xec, mca_type);
+ break;
+
+ case SMCA_UMC:
+ error_desc_array = f17h_umc_mce_desc;
+ len = ARRAY_SIZE(f17h_umc_mce_desc) - 1;
+ break;
+
+ case SMCA_PB:
+ error_desc_array = f17h_pb_mce_desc;
+ len = ARRAY_SIZE(f17h_pb_mce_desc) - 1;
+ break;
+
+ case SMCA_PSP:
+ error_desc_array = f17h_psp_mce_desc;
+ len = ARRAY_SIZE(f17h_psp_mce_desc) - 1;
+ break;
+
+ case SMCA_SMU:
+ error_desc_array = f17h_smu_mce_desc;
+ len = ARRAY_SIZE(f17h_smu_mce_desc) - 1;
+ break;
+
+ default:
+ pr_emerg(HW_ERR "HWID:%d does not match any existing IPs.\n", hwid);
+ return;
+ }
+
+ ip_name = amd_hwids[i].name;
+ pr_emerg(HW_ERR "%s Error: ", ip_name);
+
+ if (xec > len) {
+ pr_cont("Unrecognized %s MCA bank error code.\n", ip_name);
+ return;
+ }
+
+ pr_cont("%s.\n", error_desc_array[xec]);
+}
+
static inline void amd_decode_err_code(u16 ec)
{
if (INT_ERROR(ec)) {
@@ -752,6 +1052,7 @@ int amd_decode_mce(struct notifier_block *nb, unsigned long val, void *data)
struct mce *m = (struct mce *)data;
struct cpuinfo_x86 *c = &cpu_data(m->extcpu);
int ecc;
+ u32 ebx = cpuid_ebx(0x80000007);
if (amd_filter_mce(m))
return NOTIFY_STOP;
@@ -769,11 +1070,20 @@ int amd_decode_mce(struct notifier_block *nb, unsigned long val, void *data)
((m->status & MCI_STATUS_PCC) ? "PCC" : "-"),
((m->status & MCI_STATUS_ADDRV) ? "AddrV" : "-"));
- if (c->x86 == 0x15 || c->x86 == 0x16)
+ if (c->x86 >= 0x15)
pr_cont("|%s|%s",
((m->status & MCI_STATUS_DEFERRED) ? "Deferred" : "-"),
((m->status & MCI_STATUS_POISON) ? "Poison" : "-"));
+ if (!!(ebx & BIT(3))) {
+ u32 low, high;
+ u32 addr = MSR_AMD64_SMCA_MCx_CONFIG(m->bank);
+
+ if (!rdmsr_safe(addr, &low, &high) &&
+ (low & MCI_CONFIG_MCAX))
+ pr_cont("|%s", ((m->status & MCI_STATUS_TCC) ? "TCC" : "-"));
+ }
+
/* do the two bits[14:13] together */
ecc = (m->status >> 45) & 0x3;
if (ecc)
@@ -784,6 +1094,11 @@ int amd_decode_mce(struct notifier_block *nb, unsigned long val, void *data)
if (m->status & MCI_STATUS_ADDRV)
pr_emerg(HW_ERR "MC%d Error Address: 0x%016llx\n", m->bank, m->addr);
+ if (!!(ebx & BIT(3))) {
+ decode_smca_errors(m);
+ goto err_code;
+ }
+
if (!fam_ops)
goto err_code;
@@ -834,6 +1149,7 @@ static struct notifier_block amd_mce_dec_nb = {
static int __init mce_amd_init(void)
{
struct cpuinfo_x86 *c = &boot_cpu_data;
+ u32 ebx;
if (c->x86_vendor != X86_VENDOR_AMD)
return -ENODEV;
@@ -888,10 +1204,18 @@ static int __init mce_amd_init(void)
fam_ops->mc2_mce = f16h_mc2_mce;
break;
+ case 0x17:
+ ebx = cpuid_ebx(0x80000007);
+ xec_mask = 0x3f;
+ if (!(ebx & BIT(3))) {
+ printk(KERN_WARNING "Decoding supported only on Scalable MCA processors.\n");
+ goto err_out;
+ }
+ break;
+
default:
printk(KERN_WARNING "Huh? What family is it: 0x%x?!\n", c->x86);
- kfree(fam_ops);
- fam_ops = NULL;
+ goto err_out;
}
pr_info("MCE: In-kernel MCE decoding enabled.\n");
@@ -899,6 +1223,11 @@ static int __init mce_amd_init(void)
mce_register_decode_chain(&amd_mce_dec_nb);
return 0;
+
+err_out:
+ kfree(fam_ops);
+ fam_ops = NULL;
+ return -EINVAL;
}
early_initcall(mce_amd_init);
diff --git a/drivers/edac/mpc85xx_edac.c b/drivers/edac/mpc85xx_edac.c
index b7139c160baf..ca63d0da8889 100644
--- a/drivers/edac/mpc85xx_edac.c
+++ b/drivers/edac/mpc85xx_edac.c
@@ -1244,7 +1244,7 @@ static struct platform_driver * const drivers[] = {
static int __init mpc85xx_mc_init(void)
{
int res = 0;
- u32 pvr = 0;
+ u32 __maybe_unused pvr = 0;
printk(KERN_INFO "Freescale(R) MPC85xx EDAC driver, "
"(C) 2006 Montavista Software\n");
diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
index e438ee5b433f..8bf745d2da7e 100644
--- a/drivers/edac/sb_edac.c
+++ b/drivers/edac/sb_edac.c
@@ -362,6 +362,7 @@ struct sbridge_pvt {
/* Memory type detection */
bool is_mirrored, is_lockstep, is_close_pg;
+ bool is_chan_hash;
/* Fifo double buffers */
struct mce mce_entry[MCE_LOG_LEN];
@@ -1060,6 +1061,20 @@ static inline u8 sad_pkg_ha(u8 pkg)
return (pkg >> 2) & 0x1;
}
+static int haswell_chan_hash(int idx, u64 addr)
+{
+ int i;
+
+ /*
+ * XOR even bits from 12:26 to bit0 of idx,
+ * odd bits from 13:27 to bit1
+ */
+ for (i = 12; i < 28; i += 2)
+ idx ^= (addr >> i) & 3;
+
+ return idx;
+}
+
/****************************************************************************
Memory check routines
****************************************************************************/
@@ -1574,7 +1589,7 @@ static int knl_get_dimm_capacity(struct sbridge_pvt *pvt, u64 *mc_sizes)
for (cha = 0; cha < KNL_MAX_CHAS; cha++) {
if (knl_get_mc_route(target,
mc_route_reg[cha]) == channel
- && participants[channel]) {
+ && !participants[channel]) {
participant_count++;
participants[channel] = 1;
break;
@@ -1616,6 +1631,10 @@ static int get_dimm_config(struct mem_ctl_info *mci)
KNL_MAX_CHANNELS : NUM_CHANNELS;
u64 knl_mc_sizes[KNL_MAX_CHANNELS];
+ if (pvt->info.type == HASWELL || pvt->info.type == BROADWELL) {
+ pci_read_config_dword(pvt->pci_ha0, HASWELL_HASYSDEFEATURE2, &reg);
+ pvt->is_chan_hash = GET_BITFIELD(reg, 21, 21);
+ }
if (pvt->info.type == HASWELL || pvt->info.type == BROADWELL ||
pvt->info.type == KNIGHTS_LANDING)
pci_read_config_dword(pvt->pci_sad1, SAD_TARGET, &reg);
@@ -1839,8 +1858,8 @@ static void get_memory_layout(const struct mem_ctl_info *mci)
edac_dbg(0, "TAD#%d: up to %u.%03u GB (0x%016Lx), socket interleave %d, memory interleave %d, TGT: %d, %d, %d, %d, reg=0x%08x\n",
n_tads, gb, (mb*1000)/1024,
((u64)tmp_mb) << 20L,
- (u32)TAD_SOCK(reg),
- (u32)TAD_CH(reg),
+ (u32)(1 << TAD_SOCK(reg)),
+ (u32)TAD_CH(reg) + 1,
(u32)TAD_TGT0(reg),
(u32)TAD_TGT1(reg),
(u32)TAD_TGT2(reg),
@@ -2118,12 +2137,15 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
}
ch_way = TAD_CH(reg) + 1;
- sck_way = TAD_SOCK(reg) + 1;
+ sck_way = TAD_SOCK(reg);
if (ch_way == 3)
idx = addr >> 6;
- else
+ else {
idx = (addr >> (6 + sck_way + shiftup)) & 0x3;
+ if (pvt->is_chan_hash)
+ idx = haswell_chan_hash(idx, addr);
+ }
idx = idx % ch_way;
/*
@@ -2157,7 +2179,7 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
switch(ch_way) {
case 2:
case 4:
- sck_xch = 1 << sck_way * (ch_way >> 1);
+ sck_xch = (1 << sck_way) * (ch_way >> 1);
break;
default:
sprintf(msg, "Invalid mirror set. Can't decode addr");
@@ -2175,7 +2197,7 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
n_tads,
addr,
limit,
- (u32)TAD_SOCK(reg),
+ sck_way,
ch_way,
offset,
idx,
@@ -2190,18 +2212,12 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
offset, addr);
return -EINVAL;
}
- addr -= offset;
- /* Store the low bits [0:6] of the addr */
- ch_addr = addr & 0x7f;
- /* Remove socket wayness and remove 6 bits */
- addr >>= 6;
- addr = div_u64(addr, sck_xch);
-#if 0
- /* Divide by channel way */
- addr = addr / ch_way;
-#endif
- /* Recover the last 6 bits */
- ch_addr |= addr << 6;
+
+ ch_addr = addr - offset;
+ ch_addr >>= (6 + shiftup);
+ ch_addr /= sck_xch;
+ ch_addr <<= (6 + shiftup);
+ ch_addr |= addr & ((1 << (6 + shiftup)) - 1);
/*
* Step 3) Decode rank
@@ -3152,7 +3168,7 @@ static int sbridge_mce_check_error(struct notifier_block *nb, unsigned long val,
mci = get_mci_for_node_id(mce->socketid);
if (!mci)
- return NOTIFY_BAD;
+ return NOTIFY_DONE;
pvt = mci->pvt_info;
/*
diff --git a/drivers/edac/xgene_edac.c b/drivers/edac/xgene_edac.c
index 41f876414a18..bf19b6e3bd12 100644
--- a/drivers/edac/xgene_edac.c
+++ b/drivers/edac/xgene_edac.c
@@ -61,6 +61,7 @@ struct xgene_edac {
struct regmap *mcba_map;
struct regmap *mcbb_map;
struct regmap *efuse_map;
+ struct regmap *rb_map;
void __iomem *pcp_csr;
spinlock_t lock;
struct dentry *dfs;
@@ -1057,7 +1058,7 @@ static bool xgene_edac_l3_promote_to_uc_err(u32 l3cesr, u32 l3celr)
case 0x041:
return true;
}
- } else if (L3C_ELR_ERRSYN(l3celr) == 9)
+ } else if (L3C_ELR_ERRWAY(l3celr) == 9)
return true;
return false;
@@ -1353,6 +1354,17 @@ static int xgene_edac_l3_remove(struct xgene_edac_dev_ctx *l3)
#define GLBL_MDED_ERRH 0x0848
#define GLBL_MDED_ERRHMASK 0x084c
+/* IO Bus Registers */
+#define RBCSR 0x0000
+#define STICKYERR_MASK BIT(0)
+#define RBEIR 0x0008
+#define AGENT_OFFLINE_ERR_MASK BIT(30)
+#define UNIMPL_RBPAGE_ERR_MASK BIT(29)
+#define WORD_ALIGNED_ERR_MASK BIT(28)
+#define PAGE_ACCESS_ERR_MASK BIT(27)
+#define WRITE_ACCESS_MASK BIT(26)
+#define RBERRADDR_RD(src) ((src) & 0x03FFFFFF)
+
static const char * const soc_mem_err_v1[] = {
"10GbE0",
"10GbE1",
@@ -1470,6 +1482,51 @@ static void xgene_edac_rb_report(struct edac_device_ctl_info *edac_dev)
u32 err_addr_hi;
u32 reg;
+ /* If the register bus resource isn't available, just skip it */
+ if (!ctx->edac->rb_map)
+ goto rb_skip;
+
+ /*
+ * Check RB access errors
+ * 1. Out of range
+ * 2. Un-implemented page
+ * 3. Un-aligned access
+ * 4. Offline slave IP
+ */
+ if (regmap_read(ctx->edac->rb_map, RBCSR, &reg))
+ return;
+ if (reg & STICKYERR_MASK) {
+ bool write;
+ u32 address;
+
+ dev_err(edac_dev->dev, "IOB bus access error(s)\n");
+ if (regmap_read(ctx->edac->rb_map, RBEIR, &reg))
+ return;
+ write = reg & WRITE_ACCESS_MASK ? 1 : 0;
+ address = RBERRADDR_RD(reg);
+ if (reg & AGENT_OFFLINE_ERR_MASK)
+ dev_err(edac_dev->dev,
+ "IOB bus %s access to offline agent error\n",
+ write ? "write" : "read");
+ if (reg & UNIMPL_RBPAGE_ERR_MASK)
+ dev_err(edac_dev->dev,
+ "IOB bus %s access to unimplemented page error\n",
+ write ? "write" : "read");
+ if (reg & WORD_ALIGNED_ERR_MASK)
+ dev_err(edac_dev->dev,
+ "IOB bus %s word aligned access error\n",
+ write ? "write" : "read");
+ if (reg & PAGE_ACCESS_ERR_MASK)
+ dev_err(edac_dev->dev,
+ "IOB bus %s to page out of range access error\n",
+ write ? "write" : "read");
+ if (regmap_write(ctx->edac->rb_map, RBEIR, 0))
+ return;
+ if (regmap_write(ctx->edac->rb_map, RBCSR, 0))
+ return;
+ }
+rb_skip:
+
/* IOB Bridge agent transaction error interrupt */
reg = readl(ctx->dev_csr + IOBBATRANSERRINTSTS);
if (!reg)
@@ -1852,6 +1909,17 @@ static int xgene_edac_probe(struct platform_device *pdev)
goto out_err;
}
+ /*
+ * NOTE: The register bus resource is optional for compatibility
+ * reason.
+ */
+ edac->rb_map = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
+ "regmap-rb");
+ if (IS_ERR(edac->rb_map)) {
+ dev_warn(edac->dev, "missing syscon regmap rb\n");
+ edac->rb_map = NULL;
+ }
+
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
edac->pcp_csr = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(edac->pcp_csr)) {
diff --git a/drivers/extcon/extcon-arizona.c b/drivers/extcon/extcon-arizona.c
index c121d01a5cd6..1d8e0a57bd51 100644
--- a/drivers/extcon/extcon-arizona.c
+++ b/drivers/extcon/extcon-arizona.c
@@ -185,7 +185,7 @@ static void arizona_extcon_hp_clamp(struct arizona_extcon_info *info,
break;
};
- mutex_lock(&arizona->dapm->card->dapm_mutex);
+ snd_soc_dapm_mutex_lock(arizona->dapm);
arizona->hpdet_clamp = clamp;
@@ -227,7 +227,7 @@ static void arizona_extcon_hp_clamp(struct arizona_extcon_info *info,
ret);
}
- mutex_unlock(&arizona->dapm->card->dapm_mutex);
+ snd_soc_dapm_mutex_unlock(arizona->dapm);
}
static void arizona_extcon_set_mode(struct arizona_extcon_info *info, int mode)
diff --git a/drivers/extcon/extcon-gpio.c b/drivers/extcon/extcon-gpio.c
index 279ff8f6637d..d023789f0fda 100644
--- a/drivers/extcon/extcon-gpio.c
+++ b/drivers/extcon/extcon-gpio.c
@@ -126,7 +126,7 @@ static int gpio_extcon_probe(struct platform_device *pdev)
INIT_DELAYED_WORK(&data->work, gpio_extcon_work);
/*
- * Request the interrput of gpio to detect whether external connector
+ * Request the interrupt of gpio to detect whether external connector
* is attached or detached.
*/
ret = devm_request_any_context_irq(&pdev->dev, data->irq,
diff --git a/drivers/extcon/extcon-max14577.c b/drivers/extcon/extcon-max14577.c
index b30ab97ce75f..852a7112f451 100644
--- a/drivers/extcon/extcon-max14577.c
+++ b/drivers/extcon/extcon-max14577.c
@@ -150,6 +150,7 @@ enum max14577_muic_acc_type {
static const unsigned int max14577_extcon_cable[] = {
EXTCON_USB,
+ EXTCON_CHG_USB_SDP,
EXTCON_CHG_USB_DCP,
EXTCON_CHG_USB_FAST,
EXTCON_CHG_USB_SLOW,
@@ -454,6 +455,8 @@ static int max14577_muic_chg_handler(struct max14577_muic_info *info)
return ret;
extcon_set_cable_state_(info->edev, EXTCON_USB, attached);
+ extcon_set_cable_state_(info->edev, EXTCON_CHG_USB_SDP,
+ attached);
break;
case MAX14577_CHARGER_TYPE_DEDICATED_CHG:
extcon_set_cable_state_(info->edev, EXTCON_CHG_USB_DCP,
diff --git a/drivers/extcon/extcon-max77693.c b/drivers/extcon/extcon-max77693.c
index fdf8f5d4d4e9..f17cb76b567c 100644
--- a/drivers/extcon/extcon-max77693.c
+++ b/drivers/extcon/extcon-max77693.c
@@ -204,6 +204,7 @@ enum max77693_muic_acc_type {
static const unsigned int max77693_extcon_cable[] = {
EXTCON_USB,
EXTCON_USB_HOST,
+ EXTCON_CHG_USB_SDP,
EXTCON_CHG_USB_DCP,
EXTCON_CHG_USB_FAST,
EXTCON_CHG_USB_SLOW,
@@ -512,8 +513,11 @@ static int max77693_muic_dock_handler(struct max77693_muic_info *info,
break;
case MAX77693_MUIC_ADC_AV_CABLE_NOLOAD: /* Dock-Audio */
dock_id = EXTCON_DOCK;
- if (!attached)
+ if (!attached) {
extcon_set_cable_state_(info->edev, EXTCON_USB, false);
+ extcon_set_cable_state_(info->edev, EXTCON_CHG_USB_SDP,
+ false);
+ }
break;
default:
dev_err(info->dev, "failed to detect %s dock device\n",
@@ -601,6 +605,8 @@ static int max77693_muic_adc_ground_handler(struct max77693_muic_info *info)
if (ret < 0)
return ret;
extcon_set_cable_state_(info->edev, EXTCON_USB, attached);
+ extcon_set_cable_state_(info->edev, EXTCON_CHG_USB_SDP,
+ attached);
break;
case MAX77693_MUIC_GND_MHL:
case MAX77693_MUIC_GND_MHL_VB:
@@ -830,6 +836,8 @@ static int max77693_muic_chg_handler(struct max77693_muic_info *info)
*/
extcon_set_cable_state_(info->edev, EXTCON_USB,
attached);
+ extcon_set_cable_state_(info->edev, EXTCON_CHG_USB_SDP,
+ attached);
if (!cable_attached)
extcon_set_cable_state_(info->edev, EXTCON_DOCK,
@@ -899,6 +907,8 @@ static int max77693_muic_chg_handler(struct max77693_muic_info *info)
extcon_set_cable_state_(info->edev, EXTCON_USB,
attached);
+ extcon_set_cable_state_(info->edev, EXTCON_CHG_USB_SDP,
+ attached);
break;
case MAX77693_CHARGER_TYPE_DEDICATED_CHG:
/* Only TA cable */
diff --git a/drivers/extcon/extcon-max77843.c b/drivers/extcon/extcon-max77843.c
index 74dfb7f4f277..b188bd650efa 100644
--- a/drivers/extcon/extcon-max77843.c
+++ b/drivers/extcon/extcon-max77843.c
@@ -122,6 +122,7 @@ enum max77843_muic_charger_type {
static const unsigned int max77843_extcon_cable[] = {
EXTCON_USB,
EXTCON_USB_HOST,
+ EXTCON_CHG_USB_SDP,
EXTCON_CHG_USB_DCP,
EXTCON_CHG_USB_CDP,
EXTCON_CHG_USB_FAST,
@@ -486,6 +487,8 @@ static int max77843_muic_chg_handler(struct max77843_muic_info *info)
return ret;
extcon_set_cable_state_(info->edev, EXTCON_USB, attached);
+ extcon_set_cable_state_(info->edev, EXTCON_CHG_USB_SDP,
+ attached);
break;
case MAX77843_MUIC_CHG_DOWNSTREAM:
ret = max77843_muic_set_path(info,
@@ -803,7 +806,7 @@ static int max77843_muic_probe(struct platform_device *pdev)
/* Clear IRQ bits before request IRQs */
ret = regmap_bulk_read(max77843->regmap_muic,
MAX77843_MUIC_REG_INT1, info->status,
- MAX77843_MUIC_IRQ_NUM);
+ MAX77843_MUIC_STATUS_NUM);
if (ret) {
dev_err(&pdev->dev, "Failed to Clear IRQ bits\n");
goto err_muic_irq;
diff --git a/drivers/extcon/extcon-max8997.c b/drivers/extcon/extcon-max8997.c
index b2b13b3dce14..9a89320d09a8 100644
--- a/drivers/extcon/extcon-max8997.c
+++ b/drivers/extcon/extcon-max8997.c
@@ -148,6 +148,7 @@ struct max8997_muic_info {
static const unsigned int max8997_extcon_cable[] = {
EXTCON_USB,
EXTCON_USB_HOST,
+ EXTCON_CHG_USB_SDP,
EXTCON_CHG_USB_DCP,
EXTCON_CHG_USB_FAST,
EXTCON_CHG_USB_SLOW,
@@ -334,6 +335,8 @@ static int max8997_muic_handle_usb(struct max8997_muic_info *info,
break;
case MAX8997_USB_DEVICE:
extcon_set_cable_state_(info->edev, EXTCON_USB, attached);
+ extcon_set_cable_state_(info->edev, EXTCON_CHG_USB_SDP,
+ attached);
break;
default:
dev_err(info->dev, "failed to detect %s usb cable\n",
diff --git a/drivers/extcon/extcon-palmas.c b/drivers/extcon/extcon-palmas.c
index 93c30a885740..8b3226dca1d9 100644
--- a/drivers/extcon/extcon-palmas.c
+++ b/drivers/extcon/extcon-palmas.c
@@ -216,11 +216,23 @@ static int palmas_usb_probe(struct platform_device *pdev)
return PTR_ERR(palmas_usb->id_gpiod);
}
+ palmas_usb->vbus_gpiod = devm_gpiod_get_optional(&pdev->dev, "vbus",
+ GPIOD_IN);
+ if (IS_ERR(palmas_usb->vbus_gpiod)) {
+ dev_err(&pdev->dev, "failed to get vbus gpio\n");
+ return PTR_ERR(palmas_usb->vbus_gpiod);
+ }
+
if (palmas_usb->enable_id_detection && palmas_usb->id_gpiod) {
palmas_usb->enable_id_detection = false;
palmas_usb->enable_gpio_id_detection = true;
}
+ if (palmas_usb->enable_vbus_detection && palmas_usb->vbus_gpiod) {
+ palmas_usb->enable_vbus_detection = false;
+ palmas_usb->enable_gpio_vbus_detection = true;
+ }
+
if (palmas_usb->enable_gpio_id_detection) {
u32 debounce;
@@ -266,7 +278,7 @@ static int palmas_usb_probe(struct platform_device *pdev)
palmas_usb->id_irq,
NULL, palmas_id_irq_handler,
IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING |
- IRQF_ONESHOT | IRQF_EARLY_RESUME,
+ IRQF_ONESHOT,
"palmas_usb_id", palmas_usb);
if (status < 0) {
dev_err(&pdev->dev, "can't get IRQ %d, err %d\n",
@@ -304,13 +316,46 @@ static int palmas_usb_probe(struct platform_device *pdev)
palmas_usb->vbus_irq, NULL,
palmas_vbus_irq_handler,
IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING |
- IRQF_ONESHOT | IRQF_EARLY_RESUME,
+ IRQF_ONESHOT,
"palmas_usb_vbus", palmas_usb);
if (status < 0) {
dev_err(&pdev->dev, "can't get IRQ %d, err %d\n",
palmas_usb->vbus_irq, status);
return status;
}
+ } else if (palmas_usb->enable_gpio_vbus_detection) {
+ /* remux GPIO_1 as VBUSDET */
+ status = palmas_update_bits(palmas,
+ PALMAS_PU_PD_OD_BASE,
+ PALMAS_PRIMARY_SECONDARY_PAD1,
+ PALMAS_PRIMARY_SECONDARY_PAD1_GPIO_1_MASK,
+ (1 << PALMAS_PRIMARY_SECONDARY_PAD1_GPIO_1_SHIFT));
+ if (status < 0) {
+ dev_err(&pdev->dev, "can't remux GPIO1\n");
+ return status;
+ }
+
+ palmas_usb->vbus_otg_irq = regmap_irq_get_virq(palmas->irq_data,
+ PALMAS_VBUS_OTG_IRQ);
+ palmas_usb->gpio_vbus_irq = gpiod_to_irq(palmas_usb->vbus_gpiod);
+ if (palmas_usb->gpio_vbus_irq < 0) {
+ dev_err(&pdev->dev, "failed to get vbus irq\n");
+ return palmas_usb->gpio_vbus_irq;
+ }
+ status = devm_request_threaded_irq(&pdev->dev,
+ palmas_usb->gpio_vbus_irq,
+ NULL,
+ palmas_vbus_irq_handler,
+ IRQF_TRIGGER_FALLING |
+ IRQF_TRIGGER_RISING |
+ IRQF_ONESHOT,
+ "palmas_usb_vbus",
+ palmas_usb);
+ if (status < 0) {
+ dev_err(&pdev->dev,
+ "failed to request handler for vbus irq\n");
+ return status;
+ }
}
palmas_enable_irq(palmas_usb);
@@ -337,6 +382,8 @@ static int palmas_usb_suspend(struct device *dev)
if (device_may_wakeup(dev)) {
if (palmas_usb->enable_vbus_detection)
enable_irq_wake(palmas_usb->vbus_irq);
+ if (palmas_usb->enable_gpio_vbus_detection)
+ enable_irq_wake(palmas_usb->gpio_vbus_irq);
if (palmas_usb->enable_id_detection)
enable_irq_wake(palmas_usb->id_irq);
if (palmas_usb->enable_gpio_id_detection)
@@ -352,6 +399,8 @@ static int palmas_usb_resume(struct device *dev)
if (device_may_wakeup(dev)) {
if (palmas_usb->enable_vbus_detection)
disable_irq_wake(palmas_usb->vbus_irq);
+ if (palmas_usb->enable_gpio_vbus_detection)
+ disable_irq_wake(palmas_usb->gpio_vbus_irq);
if (palmas_usb->enable_id_detection)
disable_irq_wake(palmas_usb->id_irq);
if (palmas_usb->enable_gpio_id_detection)
diff --git a/drivers/extcon/extcon-rt8973a.c b/drivers/extcon/extcon-rt8973a.c
index e1bb82809bef..97e074d70eca 100644
--- a/drivers/extcon/extcon-rt8973a.c
+++ b/drivers/extcon/extcon-rt8973a.c
@@ -93,6 +93,7 @@ static struct reg_data rt8973a_reg_data[] = {
static const unsigned int rt8973a_extcon_cable[] = {
EXTCON_USB,
EXTCON_USB_HOST,
+ EXTCON_CHG_USB_SDP,
EXTCON_CHG_USB_DCP,
EXTCON_JIG,
EXTCON_NONE,
@@ -398,6 +399,9 @@ static int rt8973a_muic_cable_handler(struct rt8973a_muic_info *info,
/* Change the state of external accessory */
extcon_set_cable_state_(info->edev, id, attached);
+ if (id == EXTCON_USB)
+ extcon_set_cable_state_(info->edev, EXTCON_CHG_USB_SDP,
+ attached);
return 0;
}
@@ -663,7 +667,7 @@ MODULE_DEVICE_TABLE(of, rt8973a_dt_match);
#ifdef CONFIG_PM_SLEEP
static int rt8973a_muic_suspend(struct device *dev)
{
- struct i2c_client *i2c = container_of(dev, struct i2c_client, dev);
+ struct i2c_client *i2c = to_i2c_client(dev);
struct rt8973a_muic_info *info = i2c_get_clientdata(i2c);
enable_irq_wake(info->irq);
@@ -673,7 +677,7 @@ static int rt8973a_muic_suspend(struct device *dev)
static int rt8973a_muic_resume(struct device *dev)
{
- struct i2c_client *i2c = container_of(dev, struct i2c_client, dev);
+ struct i2c_client *i2c = to_i2c_client(dev);
struct rt8973a_muic_info *info = i2c_get_clientdata(i2c);
disable_irq_wake(info->irq);
diff --git a/drivers/extcon/extcon-sm5502.c b/drivers/extcon/extcon-sm5502.c
index 7aac3cc7efd7..df769a17e736 100644
--- a/drivers/extcon/extcon-sm5502.c
+++ b/drivers/extcon/extcon-sm5502.c
@@ -95,6 +95,7 @@ static struct reg_data sm5502_reg_data[] = {
static const unsigned int sm5502_extcon_cable[] = {
EXTCON_USB,
EXTCON_USB_HOST,
+ EXTCON_CHG_USB_SDP,
EXTCON_CHG_USB_DCP,
EXTCON_NONE,
};
@@ -411,6 +412,9 @@ static int sm5502_muic_cable_handler(struct sm5502_muic_info *info,
/* Change the state of external accessory */
extcon_set_cable_state_(info->edev, id, attached);
+ if (id == EXTCON_USB)
+ extcon_set_cable_state_(info->edev, EXTCON_CHG_USB_SDP,
+ attached);
return 0;
}
@@ -655,7 +659,7 @@ MODULE_DEVICE_TABLE(of, sm5502_dt_match);
#ifdef CONFIG_PM_SLEEP
static int sm5502_muic_suspend(struct device *dev)
{
- struct i2c_client *i2c = container_of(dev, struct i2c_client, dev);
+ struct i2c_client *i2c = to_i2c_client(dev);
struct sm5502_muic_info *info = i2c_get_clientdata(i2c);
enable_irq_wake(info->irq);
@@ -665,7 +669,7 @@ static int sm5502_muic_suspend(struct device *dev)
static int sm5502_muic_resume(struct device *dev)
{
- struct i2c_client *i2c = container_of(dev, struct i2c_client, dev);
+ struct i2c_client *i2c = to_i2c_client(dev);
struct sm5502_muic_info *info = i2c_get_clientdata(i2c);
disable_irq_wake(info->irq);
diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
index 36a7c2d89a01..aee149bdf4c0 100644
--- a/drivers/firewire/core-cdev.c
+++ b/drivers/firewire/core-cdev.c
@@ -221,7 +221,7 @@ struct inbound_phy_packet_event {
#ifdef CONFIG_COMPAT
static void __user *u64_to_uptr(u64 value)
{
- if (is_compat_task())
+ if (in_compat_syscall())
return compat_ptr(value);
else
return (void __user *)(unsigned long)value;
@@ -229,7 +229,7 @@ static void __user *u64_to_uptr(u64 value)
static u64 uptr_to_u64(void __user *ptr)
{
- if (is_compat_task())
+ if (in_compat_syscall())
return ptr_to_compat(ptr);
else
return (u64)(unsigned long)ptr;
diff --git a/drivers/firewire/nosy.c b/drivers/firewire/nosy.c
index 76b2d390f6ec..631c977b0da5 100644
--- a/drivers/firewire/nosy.c
+++ b/drivers/firewire/nosy.c
@@ -33,6 +33,7 @@
#include <linux/sched.h> /* required for linux/wait.h */
#include <linux/slab.h>
#include <linux/spinlock.h>
+#include <linux/time64.h>
#include <linux/timex.h>
#include <linux/uaccess.h>
#include <linux/wait.h>
@@ -413,17 +414,18 @@ static void
packet_irq_handler(struct pcilynx *lynx)
{
struct client *client;
- u32 tcode_mask, tcode;
+ u32 tcode_mask, tcode, timestamp;
size_t length;
- struct timeval tv;
+ struct timespec64 ts64;
/* FIXME: Also report rcv_speed. */
length = __le32_to_cpu(lynx->rcv_pcl->pcl_status) & 0x00001fff;
tcode = __le32_to_cpu(lynx->rcv_buffer[1]) >> 4 & 0xf;
- do_gettimeofday(&tv);
- lynx->rcv_buffer[0] = (__force __le32)tv.tv_usec;
+ ktime_get_real_ts64(&ts64);
+ timestamp = ts64.tv_nsec / NSEC_PER_USEC;
+ lynx->rcv_buffer[0] = (__force __le32)timestamp;
if (length == PHY_PACKET_SIZE)
tcode_mask = 1 << TCODE_PHY_PACKET;
@@ -444,14 +446,16 @@ static void
bus_reset_irq_handler(struct pcilynx *lynx)
{
struct client *client;
- struct timeval tv;
+ struct timespec64 ts64;
+ u32 timestamp;
- do_gettimeofday(&tv);
+ ktime_get_real_ts64(&ts64);
+ timestamp = ts64.tv_nsec / NSEC_PER_USEC;
spin_lock(&lynx->client_list_lock);
list_for_each_entry(client, &lynx->client_list, link)
- packet_buffer_put(&client->buffer, &tv.tv_usec, 4);
+ packet_buffer_put(&client->buffer, &timestamp, 4);
spin_unlock(&lynx->client_list_lock);
}
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
index c2f5117fd8cb..8bf89267dc25 100644
--- a/drivers/firewire/ohci.c
+++ b/drivers/firewire/ohci.c
@@ -2278,9 +2278,10 @@ static int ohci_enable(struct fw_card *card,
u32 lps, version, irqs;
int i, ret;
- if (software_reset(ohci)) {
+ ret = software_reset(ohci);
+ if (ret < 0) {
ohci_err(ohci, "failed to reset ohci card\n");
- return -EBUSY;
+ return ret;
}
/*
diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig
index 49a3a1185bb6..6664f1108c7c 100644
--- a/drivers/firmware/Kconfig
+++ b/drivers/firmware/Kconfig
@@ -161,6 +161,26 @@ config RASPBERRYPI_FIRMWARE
This option enables support for communicating with the firmware on the
Raspberry Pi.
+config FW_CFG_SYSFS
+ tristate "QEMU fw_cfg device support in sysfs"
+ depends on SYSFS && (ARM || ARM64 || PPC_PMAC || SPARC || X86)
+ depends on HAS_IOPORT_MAP
+ default n
+ help
+ Say Y or M here to enable the exporting of the QEMU firmware
+ configuration (fw_cfg) file entries via sysfs. Entries are
+ found under /sys/firmware/fw_cfg when this option is enabled
+ and loaded.
+
+config FW_CFG_SYSFS_CMDLINE
+ bool "QEMU fw_cfg device parameter parsing"
+ depends on FW_CFG_SYSFS
+ help
+ Allow the qemu_fw_cfg device to be initialized via the kernel
+ command line or using a module parameter.
+ WARNING: Using incorrect parameters (base address in particular)
+ may crash your system.
+
config QCOM_SCM
bool
depends on ARM || ARM64
diff --git a/drivers/firmware/Makefile b/drivers/firmware/Makefile
index 48dd4175297e..474bada56fcd 100644
--- a/drivers/firmware/Makefile
+++ b/drivers/firmware/Makefile
@@ -14,6 +14,7 @@ obj-$(CONFIG_ISCSI_IBFT_FIND) += iscsi_ibft_find.o
obj-$(CONFIG_ISCSI_IBFT) += iscsi_ibft.o
obj-$(CONFIG_FIRMWARE_MEMMAP) += memmap.o
obj-$(CONFIG_RASPBERRYPI_FIRMWARE) += raspberrypi.o
+obj-$(CONFIG_FW_CFG_SYSFS) += qemu_fw_cfg.o
obj-$(CONFIG_QCOM_SCM) += qcom_scm.o
obj-$(CONFIG_QCOM_SCM_64) += qcom_scm-64.o
obj-$(CONFIG_QCOM_SCM_32) += qcom_scm-32.o
diff --git a/drivers/firmware/arm_scpi.c b/drivers/firmware/arm_scpi.c
index 6174db80c663..7e3e595c9f30 100644
--- a/drivers/firmware/arm_scpi.c
+++ b/drivers/firmware/arm_scpi.c
@@ -80,7 +80,7 @@
#define FW_REV_MINOR(x) (((x) & FW_REV_MINOR_MASK) >> FW_REV_MINOR_BITS)
#define FW_REV_PATCH(x) ((x) & FW_REV_PATCH_MASK)
-#define MAX_RX_TIMEOUT (msecs_to_jiffies(20))
+#define MAX_RX_TIMEOUT (msecs_to_jiffies(30))
enum scpi_error_codes {
SCPI_SUCCESS = 0, /* Success */
@@ -231,7 +231,8 @@ struct _scpi_sensor_info {
};
struct sensor_value {
- __le32 val;
+ __le32 lo_val;
+ __le32 hi_val;
} __packed;
static struct scpi_drvinfo *scpi_info;
@@ -373,7 +374,7 @@ static int scpi_send_message(u8 cmd, void *tx_buf, unsigned int tx_len,
ret = -ETIMEDOUT;
else
/* first status word */
- ret = le32_to_cpu(msg->status);
+ ret = msg->status;
out:
if (ret < 0 && rx_buf) /* remove entry from the list if timed-out */
scpi_process_cmd(scpi_chan, msg->cmd);
@@ -525,15 +526,17 @@ static int scpi_sensor_get_info(u16 sensor_id, struct scpi_sensor_info *info)
return ret;
}
-int scpi_sensor_get_value(u16 sensor, u32 *val)
+int scpi_sensor_get_value(u16 sensor, u64 *val)
{
+ __le16 id = cpu_to_le16(sensor);
struct sensor_value buf;
int ret;
- ret = scpi_send_message(SCPI_CMD_SENSOR_VALUE, &sensor, sizeof(sensor),
+ ret = scpi_send_message(SCPI_CMD_SENSOR_VALUE, &id, sizeof(id),
&buf, sizeof(buf));
if (!ret)
- *val = le32_to_cpu(buf.val);
+ *val = (u64)le32_to_cpu(buf.hi_val) << 32 |
+ le32_to_cpu(buf.lo_val);
return ret;
}
@@ -699,7 +702,7 @@ static int scpi_probe(struct platform_device *pdev)
cl->rx_callback = scpi_handle_remote_msg;
cl->tx_prepare = scpi_tx_prepare;
cl->tx_block = true;
- cl->tx_tout = 50;
+ cl->tx_tout = 20;
cl->knows_txdone = false; /* controller can't ack */
INIT_LIST_HEAD(&pchan->rx_pending);
diff --git a/drivers/firmware/broadcom/bcm47xx_nvram.c b/drivers/firmware/broadcom/bcm47xx_nvram.c
index 0c2f0a61b0ea..0b631e5b5b84 100644
--- a/drivers/firmware/broadcom/bcm47xx_nvram.c
+++ b/drivers/firmware/broadcom/bcm47xx_nvram.c
@@ -94,15 +94,14 @@ static int nvram_find_and_copy(void __iomem *iobase, u32 lim)
found:
__ioread32_copy(nvram_buf, header, sizeof(*header) / 4);
- header = (struct nvram_header *)nvram_buf;
- nvram_len = header->len;
+ nvram_len = ((struct nvram_header *)(nvram_buf))->len;
if (nvram_len > size) {
pr_err("The nvram size according to the header seems to be bigger than the partition on flash\n");
nvram_len = size;
}
if (nvram_len >= NVRAM_SPACE) {
pr_err("nvram on flash (%i bytes) is bigger than the reserved space in memory, will just copy the first %i bytes\n",
- header->len, NVRAM_SPACE - 1);
+ nvram_len, NVRAM_SPACE - 1);
nvram_len = NVRAM_SPACE - 1;
}
/* proceed reading data after header */
diff --git a/drivers/firmware/efi/arm-init.c b/drivers/firmware/efi/arm-init.c
index 9e15d571b53c..8714f8c271ba 100644
--- a/drivers/firmware/efi/arm-init.c
+++ b/drivers/firmware/efi/arm-init.c
@@ -61,8 +61,8 @@ static int __init uefi_init(void)
char vendor[100] = "unknown";
int i, retval;
- efi.systab = early_memremap(efi_system_table,
- sizeof(efi_system_table_t));
+ efi.systab = early_memremap_ro(efi_system_table,
+ sizeof(efi_system_table_t));
if (efi.systab == NULL) {
pr_warn("Unable to map EFI system table.\n");
return -ENOMEM;
@@ -86,8 +86,8 @@ static int __init uefi_init(void)
efi.systab->hdr.revision & 0xffff);
/* Show what we know for posterity */
- c16 = early_memremap(efi_to_phys(efi.systab->fw_vendor),
- sizeof(vendor) * sizeof(efi_char16_t));
+ c16 = early_memremap_ro(efi_to_phys(efi.systab->fw_vendor),
+ sizeof(vendor) * sizeof(efi_char16_t));
if (c16) {
for (i = 0; i < (int) sizeof(vendor) - 1 && *c16; ++i)
vendor[i] = c16[i];
@@ -100,8 +100,8 @@ static int __init uefi_init(void)
efi.systab->hdr.revision & 0xffff, vendor);
table_size = sizeof(efi_config_table_64_t) * efi.systab->nr_tables;
- config_tables = early_memremap(efi_to_phys(efi.systab->tables),
- table_size);
+ config_tables = early_memremap_ro(efi_to_phys(efi.systab->tables),
+ table_size);
if (config_tables == NULL) {
pr_warn("Unable to map EFI config table array.\n");
retval = -ENOMEM;
@@ -185,7 +185,7 @@ void __init efi_init(void)
efi_system_table = params.system_table;
memmap.phys_map = params.mmap;
- memmap.map = early_memremap(params.mmap, params.mmap_size);
+ memmap.map = early_memremap_ro(params.mmap, params.mmap_size);
if (memmap.map == NULL) {
/*
* If we are booting via UEFI, the UEFI memory map is the only
@@ -203,7 +203,19 @@ void __init efi_init(void)
reserve_regions();
early_memunmap(memmap.map, params.mmap_size);
- memblock_mark_nomap(params.mmap & PAGE_MASK,
- PAGE_ALIGN(params.mmap_size +
- (params.mmap & ~PAGE_MASK)));
+
+ if (IS_ENABLED(CONFIG_ARM)) {
+ /*
+ * ARM currently does not allow ioremap_cache() to be called on
+ * memory regions that are covered by struct page. So remove the
+ * UEFI memory map from the linear mapping.
+ */
+ memblock_mark_nomap(params.mmap & PAGE_MASK,
+ PAGE_ALIGN(params.mmap_size +
+ (params.mmap & ~PAGE_MASK)));
+ } else {
+ memblock_reserve(params.mmap & PAGE_MASK,
+ PAGE_ALIGN(params.mmap_size +
+ (params.mmap & ~PAGE_MASK)));
+ }
}
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
index 2cd37dad67a6..3a69ed5ecfcb 100644
--- a/drivers/firmware/efi/efi.c
+++ b/drivers/firmware/efi/efi.c
@@ -182,6 +182,7 @@ static int generic_ops_register(void)
{
generic_ops.get_variable = efi.get_variable;
generic_ops.set_variable = efi.set_variable;
+ generic_ops.set_variable_nonblocking = efi.set_variable_nonblocking;
generic_ops.get_next_variable = efi.get_next_variable;
generic_ops.query_variable_store = efi_query_variable_store;
@@ -326,38 +327,6 @@ u64 __init efi_mem_desc_end(efi_memory_desc_t *md)
return end;
}
-/*
- * We can't ioremap data in EFI boot services RAM, because we've already mapped
- * it as RAM. So, look it up in the existing EFI memory map instead. Only
- * callable after efi_enter_virtual_mode and before efi_free_boot_services.
- */
-void __iomem *efi_lookup_mapped_addr(u64 phys_addr)
-{
- struct efi_memory_map *map;
- void *p;
- map = efi.memmap;
- if (!map)
- return NULL;
- if (WARN_ON(!map->map))
- return NULL;
- for (p = map->map; p < map->map_end; p += map->desc_size) {
- efi_memory_desc_t *md = p;
- u64 size = md->num_pages << EFI_PAGE_SHIFT;
- u64 end = md->phys_addr + size;
- if (!(md->attribute & EFI_MEMORY_RUNTIME) &&
- md->type != EFI_BOOT_SERVICES_CODE &&
- md->type != EFI_BOOT_SERVICES_DATA)
- continue;
- if (!md->virt_addr)
- continue;
- if (phys_addr >= md->phys_addr && phys_addr < end) {
- phys_addr += md->virt_addr - md->phys_addr;
- return (__force void __iomem *)(unsigned long)phys_addr;
- }
- }
- return NULL;
-}
-
static __initdata efi_config_table_type_t common_tables[] = {
{ACPI_20_TABLE_GUID, "ACPI 2.0", &efi.acpi20},
{ACPI_TABLE_GUID, "ACPI", &efi.acpi},
@@ -586,7 +555,8 @@ static __initdata char memory_type_name[][20] = {
"ACPI Memory NVS",
"Memory Mapped I/O",
"MMIO Port Space",
- "PAL Code"
+ "PAL Code",
+ "Persistent Memory",
};
char * __init efi_md_typeattr_format(char *buf, size_t size,
@@ -613,13 +583,16 @@ char * __init efi_md_typeattr_format(char *buf, size_t size,
if (attr & ~(EFI_MEMORY_UC | EFI_MEMORY_WC | EFI_MEMORY_WT |
EFI_MEMORY_WB | EFI_MEMORY_UCE | EFI_MEMORY_RO |
EFI_MEMORY_WP | EFI_MEMORY_RP | EFI_MEMORY_XP |
+ EFI_MEMORY_NV |
EFI_MEMORY_RUNTIME | EFI_MEMORY_MORE_RELIABLE))
snprintf(pos, size, "|attr=0x%016llx]",
(unsigned long long)attr);
else
- snprintf(pos, size, "|%3s|%2s|%2s|%2s|%2s|%2s|%3s|%2s|%2s|%2s|%2s]",
+ snprintf(pos, size,
+ "|%3s|%2s|%2s|%2s|%2s|%2s|%2s|%3s|%2s|%2s|%2s|%2s]",
attr & EFI_MEMORY_RUNTIME ? "RUN" : "",
attr & EFI_MEMORY_MORE_RELIABLE ? "MR" : "",
+ attr & EFI_MEMORY_NV ? "NV" : "",
attr & EFI_MEMORY_XP ? "XP" : "",
attr & EFI_MEMORY_RP ? "RP" : "",
attr & EFI_MEMORY_WP ? "WP" : "",
diff --git a/drivers/firmware/efi/efivars.c b/drivers/firmware/efi/efivars.c
index 10e6774ab2a2..096adcbcb5a9 100644
--- a/drivers/firmware/efi/efivars.c
+++ b/drivers/firmware/efi/efivars.c
@@ -231,7 +231,7 @@ sanity_check(struct efi_variable *var, efi_char16_t *name, efi_guid_t vendor,
static inline bool is_compat(void)
{
- if (IS_ENABLED(CONFIG_COMPAT) && is_compat_task())
+ if (IS_ENABLED(CONFIG_COMPAT) && in_compat_syscall())
return true;
return false;
@@ -386,7 +386,7 @@ static const struct sysfs_ops efivar_attr_ops = {
static void efivar_release(struct kobject *kobj)
{
- struct efivar_entry *var = container_of(kobj, struct efivar_entry, kobj);
+ struct efivar_entry *var = to_efivar_entry(kobj);
kfree(var);
}
diff --git a/drivers/firmware/efi/esrt.c b/drivers/firmware/efi/esrt.c
index 22c5285f7705..75feb3f5829b 100644
--- a/drivers/firmware/efi/esrt.c
+++ b/drivers/firmware/efi/esrt.c
@@ -167,14 +167,11 @@ static struct kset *esrt_kset;
static int esre_create_sysfs_entry(void *esre, int entry_num)
{
struct esre_entry *entry;
- char name[20];
entry = kzalloc(sizeof(*entry), GFP_KERNEL);
if (!entry)
return -ENOMEM;
- sprintf(name, "entry%d", entry_num);
-
entry->kobj.kset = esrt_kset;
if (esrt->fw_resource_version == 1) {
@@ -182,7 +179,7 @@ static int esre_create_sysfs_entry(void *esre, int entry_num)
entry->esre.esre1 = esre;
rc = kobject_init_and_add(&entry->kobj, &esre1_ktype, NULL,
- "%s", name);
+ "entry%d", entry_num);
if (rc) {
kfree(entry);
return rc;
diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile
index aaf9c0bab42e..da99bbb74aeb 100644
--- a/drivers/firmware/efi/libstub/Makefile
+++ b/drivers/firmware/efi/libstub/Makefile
@@ -23,6 +23,10 @@ KBUILD_CFLAGS := $(cflags-y) -DDISABLE_BRANCH_PROFILING \
GCOV_PROFILE := n
KASAN_SANITIZE := n
UBSAN_SANITIZE := n
+OBJECT_FILES_NON_STANDARD := y
+
+# Prevents link failures: __sanitizer_cov_trace_pc() is not linked in.
+KCOV_INSTRUMENT := n
lib-y := efi-stub-helper.o
@@ -36,7 +40,7 @@ lib-$(CONFIG_EFI_ARMSTUB) += arm-stub.o fdt.o string.o \
$(patsubst %.c,lib-%.o,$(arm-deps))
lib-$(CONFIG_ARM) += arm32-stub.o
-lib-$(CONFIG_ARM64) += arm64-stub.o
+lib-$(CONFIG_ARM64) += arm64-stub.o random.o
CFLAGS_arm64-stub.o := -DTEXT_OFFSET=$(TEXT_OFFSET)
#
diff --git a/drivers/firmware/efi/libstub/arm-stub.c b/drivers/firmware/efi/libstub/arm-stub.c
index 3397902e4040..414deb85c2e5 100644
--- a/drivers/firmware/efi/libstub/arm-stub.c
+++ b/drivers/firmware/efi/libstub/arm-stub.c
@@ -18,6 +18,8 @@
#include "efistub.h"
+bool __nokaslr;
+
static int efi_secureboot_enabled(efi_system_table_t *sys_table_arg)
{
static efi_guid_t const var_guid = EFI_GLOBAL_VARIABLE_GUID;
@@ -190,6 +192,10 @@ unsigned long efi_entry(void *handle, efi_system_table_t *sys_table,
pr_efi(sys_table, "Booting Linux Kernel...\n");
+ status = check_platform_features(sys_table);
+ if (status != EFI_SUCCESS)
+ goto fail;
+
/*
* Get a handle to the loaded image protocol. This is used to get
* information about the running image, such as size and the command
@@ -207,14 +213,6 @@ unsigned long efi_entry(void *handle, efi_system_table_t *sys_table,
pr_efi_err(sys_table, "Failed to find DRAM base\n");
goto fail;
}
- status = handle_kernel_image(sys_table, image_addr, &image_size,
- &reserve_addr,
- &reserve_size,
- dram_base, image);
- if (status != EFI_SUCCESS) {
- pr_efi_err(sys_table, "Failed to relocate kernel\n");
- goto fail;
- }
/*
* Get the command line from EFI, using the LOADED_IMAGE
@@ -224,7 +222,28 @@ unsigned long efi_entry(void *handle, efi_system_table_t *sys_table,
cmdline_ptr = efi_convert_cmdline(sys_table, image, &cmdline_size);
if (!cmdline_ptr) {
pr_efi_err(sys_table, "getting command line via LOADED_IMAGE_PROTOCOL\n");
- goto fail_free_image;
+ goto fail;
+ }
+
+ /* check whether 'nokaslr' was passed on the command line */
+ if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) {
+ static const u8 default_cmdline[] = CONFIG_CMDLINE;
+ const u8 *str, *cmdline = cmdline_ptr;
+
+ if (IS_ENABLED(CONFIG_CMDLINE_FORCE))
+ cmdline = default_cmdline;
+ str = strstr(cmdline, "nokaslr");
+ if (str == cmdline || (str > cmdline && *(str - 1) == ' '))
+ __nokaslr = true;
+ }
+
+ status = handle_kernel_image(sys_table, image_addr, &image_size,
+ &reserve_addr,
+ &reserve_size,
+ dram_base, image);
+ if (status != EFI_SUCCESS) {
+ pr_efi_err(sys_table, "Failed to relocate kernel\n");
+ goto fail_free_cmdline;
}
status = efi_parse_options(cmdline_ptr);
@@ -244,7 +263,7 @@ unsigned long efi_entry(void *handle, efi_system_table_t *sys_table,
if (status != EFI_SUCCESS) {
pr_efi_err(sys_table, "Failed to load device tree!\n");
- goto fail_free_cmdline;
+ goto fail_free_image;
}
}
@@ -286,12 +305,11 @@ unsigned long efi_entry(void *handle, efi_system_table_t *sys_table,
efi_free(sys_table, initrd_size, initrd_addr);
efi_free(sys_table, fdt_size, fdt_addr);
-fail_free_cmdline:
- efi_free(sys_table, cmdline_size, (unsigned long)cmdline_ptr);
-
fail_free_image:
efi_free(sys_table, image_size, *image_addr);
efi_free(sys_table, reserve_size, reserve_addr);
+fail_free_cmdline:
+ efi_free(sys_table, cmdline_size, (unsigned long)cmdline_ptr);
fail:
return EFI_ERROR;
}
diff --git a/drivers/firmware/efi/libstub/arm32-stub.c b/drivers/firmware/efi/libstub/arm32-stub.c
index 495ebd657e38..6f42be4d0084 100644
--- a/drivers/firmware/efi/libstub/arm32-stub.c
+++ b/drivers/firmware/efi/libstub/arm32-stub.c
@@ -9,6 +9,23 @@
#include <linux/efi.h>
#include <asm/efi.h>
+efi_status_t check_platform_features(efi_system_table_t *sys_table_arg)
+{
+ int block;
+
+ /* non-LPAE kernels can run anywhere */
+ if (!IS_ENABLED(CONFIG_ARM_LPAE))
+ return EFI_SUCCESS;
+
+ /* LPAE kernels need compatible hardware */
+ block = cpuid_feature_extract(CPUID_EXT_MMFR0, 0);
+ if (block < 5) {
+ pr_efi_err(sys_table_arg, "This LPAE kernel is not supported by your CPU\n");
+ return EFI_UNSUPPORTED;
+ }
+ return EFI_SUCCESS;
+}
+
efi_status_t handle_kernel_image(efi_system_table_t *sys_table,
unsigned long *image_addr,
unsigned long *image_size,
diff --git a/drivers/firmware/efi/libstub/arm64-stub.c b/drivers/firmware/efi/libstub/arm64-stub.c
index 78dfbd34b6bf..a90f6459f5c6 100644
--- a/drivers/firmware/efi/libstub/arm64-stub.c
+++ b/drivers/firmware/efi/libstub/arm64-stub.c
@@ -12,37 +12,87 @@
#include <linux/efi.h>
#include <asm/efi.h>
#include <asm/sections.h>
+#include <asm/sysreg.h>
-efi_status_t __init handle_kernel_image(efi_system_table_t *sys_table_arg,
- unsigned long *image_addr,
- unsigned long *image_size,
- unsigned long *reserve_addr,
- unsigned long *reserve_size,
- unsigned long dram_base,
- efi_loaded_image_t *image)
+#include "efistub.h"
+
+extern bool __nokaslr;
+
+efi_status_t check_platform_features(efi_system_table_t *sys_table_arg)
+{
+ u64 tg;
+
+ /* UEFI mandates support for 4 KB granularity, no need to check */
+ if (IS_ENABLED(CONFIG_ARM64_4K_PAGES))
+ return EFI_SUCCESS;
+
+ tg = (read_cpuid(ID_AA64MMFR0_EL1) >> ID_AA64MMFR0_TGRAN_SHIFT) & 0xf;
+ if (tg != ID_AA64MMFR0_TGRAN_SUPPORTED) {
+ if (IS_ENABLED(CONFIG_ARM64_64K_PAGES))
+ pr_efi_err(sys_table_arg, "This 64 KB granular kernel is not supported by your CPU\n");
+ else
+ pr_efi_err(sys_table_arg, "This 16 KB granular kernel is not supported by your CPU\n");
+ return EFI_UNSUPPORTED;
+ }
+ return EFI_SUCCESS;
+}
+
+efi_status_t handle_kernel_image(efi_system_table_t *sys_table_arg,
+ unsigned long *image_addr,
+ unsigned long *image_size,
+ unsigned long *reserve_addr,
+ unsigned long *reserve_size,
+ unsigned long dram_base,
+ efi_loaded_image_t *image)
{
efi_status_t status;
unsigned long kernel_size, kernel_memsize = 0;
- unsigned long nr_pages;
void *old_image_addr = (void *)*image_addr;
unsigned long preferred_offset;
+ u64 phys_seed = 0;
+
+ if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) {
+ if (!__nokaslr) {
+ status = efi_get_random_bytes(sys_table_arg,
+ sizeof(phys_seed),
+ (u8 *)&phys_seed);
+ if (status == EFI_NOT_FOUND) {
+ pr_efi(sys_table_arg, "EFI_RNG_PROTOCOL unavailable, no randomness supplied\n");
+ } else if (status != EFI_SUCCESS) {
+ pr_efi_err(sys_table_arg, "efi_get_random_bytes() failed\n");
+ return status;
+ }
+ } else {
+ pr_efi(sys_table_arg, "KASLR disabled on kernel command line\n");
+ }
+ }
/*
* The preferred offset of the kernel Image is TEXT_OFFSET bytes beyond
* a 2 MB aligned base, which itself may be lower than dram_base, as
* long as the resulting offset equals or exceeds it.
*/
- preferred_offset = round_down(dram_base, SZ_2M) + TEXT_OFFSET;
+ preferred_offset = round_down(dram_base, MIN_KIMG_ALIGN) + TEXT_OFFSET;
if (preferred_offset < dram_base)
- preferred_offset += SZ_2M;
+ preferred_offset += MIN_KIMG_ALIGN;
- /* Relocate the image, if required. */
kernel_size = _edata - _text;
- if (*image_addr != preferred_offset) {
- kernel_memsize = kernel_size + (_end - _edata);
+ kernel_memsize = kernel_size + (_end - _edata);
+
+ if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && phys_seed != 0) {
+ /*
+ * If KASLR is enabled, and we have some randomness available,
+ * locate the kernel at a randomized offset in physical memory.
+ */
+ *reserve_size = kernel_memsize + TEXT_OFFSET;
+ status = efi_random_alloc(sys_table_arg, *reserve_size,
+ MIN_KIMG_ALIGN, reserve_addr,
+ phys_seed);
+ *image_addr = *reserve_addr + TEXT_OFFSET;
+ } else {
/*
- * First, try a straight allocation at the preferred offset.
+ * Else, try a straight allocation at the preferred offset.
* This will work around the issue where, if dram_base == 0x0,
* efi_low_alloc() refuses to allocate at 0x0 (to prevent the
* address of the allocation to be mistaken for a FAIL return
@@ -52,27 +102,31 @@ efi_status_t __init handle_kernel_image(efi_system_table_t *sys_table_arg,
* Mustang), we can still place the kernel at the address
* 'dram_base + TEXT_OFFSET'.
*/
+ if (*image_addr == preferred_offset)
+ return EFI_SUCCESS;
+
*image_addr = *reserve_addr = preferred_offset;
- nr_pages = round_up(kernel_memsize, EFI_ALLOC_ALIGN) /
- EFI_PAGE_SIZE;
+ *reserve_size = round_up(kernel_memsize, EFI_ALLOC_ALIGN);
+
status = efi_call_early(allocate_pages, EFI_ALLOCATE_ADDRESS,
- EFI_LOADER_DATA, nr_pages,
+ EFI_LOADER_DATA,
+ *reserve_size / EFI_PAGE_SIZE,
(efi_physical_addr_t *)reserve_addr);
- if (status != EFI_SUCCESS) {
- kernel_memsize += TEXT_OFFSET;
- status = efi_low_alloc(sys_table_arg, kernel_memsize,
- SZ_2M, reserve_addr);
+ }
- if (status != EFI_SUCCESS) {
- pr_efi_err(sys_table_arg, "Failed to relocate kernel\n");
- return status;
- }
- *image_addr = *reserve_addr + TEXT_OFFSET;
+ if (status != EFI_SUCCESS) {
+ *reserve_size = kernel_memsize + TEXT_OFFSET;
+ status = efi_low_alloc(sys_table_arg, *reserve_size,
+ MIN_KIMG_ALIGN, reserve_addr);
+
+ if (status != EFI_SUCCESS) {
+ pr_efi_err(sys_table_arg, "Failed to relocate kernel\n");
+ *reserve_size = 0;
+ return status;
}
- memcpy((void *)*image_addr, old_image_addr, kernel_size);
- *reserve_size = kernel_memsize;
+ *image_addr = *reserve_addr + TEXT_OFFSET;
}
-
+ memcpy((void *)*image_addr, old_image_addr, kernel_size);
return EFI_SUCCESS;
}
diff --git a/drivers/firmware/efi/libstub/efi-stub-helper.c b/drivers/firmware/efi/libstub/efi-stub-helper.c
index f07d4a67fa76..29ed2f9b218c 100644
--- a/drivers/firmware/efi/libstub/efi-stub-helper.c
+++ b/drivers/firmware/efi/libstub/efi-stub-helper.c
@@ -649,6 +649,10 @@ static u8 *efi_utf16_to_utf8(u8 *dst, const u16 *src, int n)
return dst;
}
+#ifndef MAX_CMDLINE_ADDRESS
+#define MAX_CMDLINE_ADDRESS ULONG_MAX
+#endif
+
/*
* Convert the unicode UEFI command line to ASCII to pass to kernel.
* Size of memory allocated return in *cmd_line_len.
@@ -684,7 +688,8 @@ char *efi_convert_cmdline(efi_system_table_t *sys_table_arg,
options_bytes++; /* NUL termination */
- status = efi_low_alloc(sys_table_arg, options_bytes, 0, &cmdline_addr);
+ status = efi_high_alloc(sys_table_arg, options_bytes, 0,
+ &cmdline_addr, MAX_CMDLINE_ADDRESS);
if (status != EFI_SUCCESS)
return NULL;
diff --git a/drivers/firmware/efi/libstub/efistub.h b/drivers/firmware/efi/libstub/efistub.h
index 6b6548fda089..ee49cd23ee63 100644
--- a/drivers/firmware/efi/libstub/efistub.h
+++ b/drivers/firmware/efi/libstub/efistub.h
@@ -5,6 +5,16 @@
/* error code which can't be mistaken for valid address */
#define EFI_ERROR (~0UL)
+/*
+ * __init annotations should not be used in the EFI stub, since the code is
+ * either included in the decompressor (x86, ARM) where they have no effect,
+ * or the whole stub is __init annotated at the section level (arm64), by
+ * renaming the sections, in which case the __init annotation will be
+ * redundant, and will result in section names like .init.init.text, and our
+ * linker script does not expect that.
+ */
+#undef __init
+
void efi_char16_printk(efi_system_table_t *, efi_char16_t *);
efi_status_t efi_open_volume(efi_system_table_t *sys_table_arg, void *__image,
@@ -43,4 +53,13 @@ void efi_get_virtmap(efi_memory_desc_t *memory_map, unsigned long map_size,
unsigned long desc_size, efi_memory_desc_t *runtime_map,
int *count);
+efi_status_t efi_get_random_bytes(efi_system_table_t *sys_table,
+ unsigned long size, u8 *out);
+
+efi_status_t efi_random_alloc(efi_system_table_t *sys_table_arg,
+ unsigned long size, unsigned long align,
+ unsigned long *addr, unsigned long random_seed);
+
+efi_status_t check_platform_features(efi_system_table_t *sys_table_arg);
+
#endif
diff --git a/drivers/firmware/efi/libstub/fdt.c b/drivers/firmware/efi/libstub/fdt.c
index cf7b7d46302a..6dba78aef337 100644
--- a/drivers/firmware/efi/libstub/fdt.c
+++ b/drivers/firmware/efi/libstub/fdt.c
@@ -147,6 +147,20 @@ efi_status_t update_fdt(efi_system_table_t *sys_table, void *orig_fdt,
if (status)
goto fdt_set_fail;
+ if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) {
+ efi_status_t efi_status;
+
+ efi_status = efi_get_random_bytes(sys_table, sizeof(fdt_val64),
+ (u8 *)&fdt_val64);
+ if (efi_status == EFI_SUCCESS) {
+ status = fdt_setprop(fdt, node, "kaslr-seed",
+ &fdt_val64, sizeof(fdt_val64));
+ if (status)
+ goto fdt_set_fail;
+ } else if (efi_status != EFI_NOT_FOUND) {
+ return efi_status;
+ }
+ }
return EFI_SUCCESS;
fdt_set_fail:
diff --git a/drivers/firmware/efi/libstub/random.c b/drivers/firmware/efi/libstub/random.c
new file mode 100644
index 000000000000..53f6d3fe6d86
--- /dev/null
+++ b/drivers/firmware/efi/libstub/random.c
@@ -0,0 +1,135 @@
+/*
+ * Copyright (C) 2016 Linaro Ltd; <ard.biesheuvel@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/efi.h>
+#include <asm/efi.h>
+
+#include "efistub.h"
+
+struct efi_rng_protocol {
+ efi_status_t (*get_info)(struct efi_rng_protocol *,
+ unsigned long *, efi_guid_t *);
+ efi_status_t (*get_rng)(struct efi_rng_protocol *,
+ efi_guid_t *, unsigned long, u8 *out);
+};
+
+efi_status_t efi_get_random_bytes(efi_system_table_t *sys_table_arg,
+ unsigned long size, u8 *out)
+{
+ efi_guid_t rng_proto = EFI_RNG_PROTOCOL_GUID;
+ efi_status_t status;
+ struct efi_rng_protocol *rng;
+
+ status = efi_call_early(locate_protocol, &rng_proto, NULL,
+ (void **)&rng);
+ if (status != EFI_SUCCESS)
+ return status;
+
+ return rng->get_rng(rng, NULL, size, out);
+}
+
+/*
+ * Return the number of slots covered by this entry, i.e., the number of
+ * addresses it covers that are suitably aligned and supply enough room
+ * for the allocation.
+ */
+static unsigned long get_entry_num_slots(efi_memory_desc_t *md,
+ unsigned long size,
+ unsigned long align)
+{
+ u64 start, end;
+
+ if (md->type != EFI_CONVENTIONAL_MEMORY)
+ return 0;
+
+ start = round_up(md->phys_addr, align);
+ end = round_down(md->phys_addr + md->num_pages * EFI_PAGE_SIZE - size,
+ align);
+
+ if (start > end)
+ return 0;
+
+ return (end - start + 1) / align;
+}
+
+/*
+ * The UEFI memory descriptors have a virtual address field that is only used
+ * when installing the virtual mapping using SetVirtualAddressMap(). Since it
+ * is unused here, we can reuse it to keep track of each descriptor's slot
+ * count.
+ */
+#define MD_NUM_SLOTS(md) ((md)->virt_addr)
+
+efi_status_t efi_random_alloc(efi_system_table_t *sys_table_arg,
+ unsigned long size,
+ unsigned long align,
+ unsigned long *addr,
+ unsigned long random_seed)
+{
+ unsigned long map_size, desc_size, total_slots = 0, target_slot;
+ efi_status_t status;
+ efi_memory_desc_t *memory_map;
+ int map_offset;
+
+ status = efi_get_memory_map(sys_table_arg, &memory_map, &map_size,
+ &desc_size, NULL, NULL);
+ if (status != EFI_SUCCESS)
+ return status;
+
+ if (align < EFI_ALLOC_ALIGN)
+ align = EFI_ALLOC_ALIGN;
+
+ /* count the suitable slots in each memory map entry */
+ for (map_offset = 0; map_offset < map_size; map_offset += desc_size) {
+ efi_memory_desc_t *md = (void *)memory_map + map_offset;
+ unsigned long slots;
+
+ slots = get_entry_num_slots(md, size, align);
+ MD_NUM_SLOTS(md) = slots;
+ total_slots += slots;
+ }
+
+ /* find a random number between 0 and total_slots */
+ target_slot = (total_slots * (u16)random_seed) >> 16;
+
+ /*
+ * target_slot is now a value in the range [0, total_slots), and so
+ * it corresponds with exactly one of the suitable slots we recorded
+ * when iterating over the memory map the first time around.
+ *
+ * So iterate over the memory map again, subtracting the number of
+ * slots of each entry at each iteration, until we have found the entry
+ * that covers our chosen slot. Use the residual value of target_slot
+ * to calculate the randomly chosen address, and allocate it directly
+ * using EFI_ALLOCATE_ADDRESS.
+ */
+ for (map_offset = 0; map_offset < map_size; map_offset += desc_size) {
+ efi_memory_desc_t *md = (void *)memory_map + map_offset;
+ efi_physical_addr_t target;
+ unsigned long pages;
+
+ if (target_slot >= MD_NUM_SLOTS(md)) {
+ target_slot -= MD_NUM_SLOTS(md);
+ continue;
+ }
+
+ target = round_up(md->phys_addr, align) + target_slot * align;
+ pages = round_up(size, EFI_PAGE_SIZE) / EFI_PAGE_SIZE;
+
+ status = efi_call_early(allocate_pages, EFI_ALLOCATE_ADDRESS,
+ EFI_LOADER_DATA, pages, &target);
+ if (status == EFI_SUCCESS)
+ *addr = target;
+ break;
+ }
+
+ efi_call_early(free_pool, memory_map);
+
+ return status;
+}
diff --git a/drivers/firmware/efi/runtime-wrappers.c b/drivers/firmware/efi/runtime-wrappers.c
index 228bbf910461..de6953039af6 100644
--- a/drivers/firmware/efi/runtime-wrappers.c
+++ b/drivers/firmware/efi/runtime-wrappers.c
@@ -61,63 +61,23 @@
*/
static DEFINE_SPINLOCK(efi_runtime_lock);
-/*
- * Some runtime services calls can be reentrant under NMI, even if the table
- * above says they are not. (source: UEFI Specification v2.4A)
- *
- * Table 32. Functions that may be called after Machine Check, INIT and NMI
- * +----------------------------+------------------------------------------+
- * | Function | Called after Machine Check, INIT and NMI |
- * +----------------------------+------------------------------------------+
- * | GetTime() | Yes, even if previously busy. |
- * | GetVariable() | Yes, even if previously busy |
- * | GetNextVariableName() | Yes, even if previously busy |
- * | QueryVariableInfo() | Yes, even if previously busy |
- * | SetVariable() | Yes, even if previously busy |
- * | UpdateCapsule() | Yes, even if previously busy |
- * | QueryCapsuleCapabilities() | Yes, even if previously busy |
- * | ResetSystem() | Yes, even if previously busy |
- * +----------------------------+------------------------------------------+
- *
- * In order to prevent deadlocks under NMI, the wrappers for these functions
- * may only grab the efi_runtime_lock or rtc_lock spinlocks if !efi_in_nmi().
- * However, not all of the services listed are reachable through NMI code paths,
- * so the the special handling as suggested by the UEFI spec is only implemented
- * for QueryVariableInfo() and SetVariable(), as these can be reached in NMI
- * context through efi_pstore_write().
- */
-
-/*
- * As per commit ef68c8f87ed1 ("x86: Serialize EFI time accesses on rtc_lock"),
- * the EFI specification requires that callers of the time related runtime
- * functions serialize with other CMOS accesses in the kernel, as the EFI time
- * functions may choose to also use the legacy CMOS RTC.
- */
-__weak DEFINE_SPINLOCK(rtc_lock);
-
static efi_status_t virt_efi_get_time(efi_time_t *tm, efi_time_cap_t *tc)
{
- unsigned long flags;
efi_status_t status;
- spin_lock_irqsave(&rtc_lock, flags);
spin_lock(&efi_runtime_lock);
status = efi_call_virt(get_time, tm, tc);
spin_unlock(&efi_runtime_lock);
- spin_unlock_irqrestore(&rtc_lock, flags);
return status;
}
static efi_status_t virt_efi_set_time(efi_time_t *tm)
{
- unsigned long flags;
efi_status_t status;
- spin_lock_irqsave(&rtc_lock, flags);
spin_lock(&efi_runtime_lock);
status = efi_call_virt(set_time, tm);
spin_unlock(&efi_runtime_lock);
- spin_unlock_irqrestore(&rtc_lock, flags);
return status;
}
@@ -125,27 +85,21 @@ static efi_status_t virt_efi_get_wakeup_time(efi_bool_t *enabled,
efi_bool_t *pending,
efi_time_t *tm)
{
- unsigned long flags;
efi_status_t status;
- spin_lock_irqsave(&rtc_lock, flags);
spin_lock(&efi_runtime_lock);
status = efi_call_virt(get_wakeup_time, enabled, pending, tm);
spin_unlock(&efi_runtime_lock);
- spin_unlock_irqrestore(&rtc_lock, flags);
return status;
}
static efi_status_t virt_efi_set_wakeup_time(efi_bool_t enabled, efi_time_t *tm)
{
- unsigned long flags;
efi_status_t status;
- spin_lock_irqsave(&rtc_lock, flags);
spin_lock(&efi_runtime_lock);
status = efi_call_virt(set_wakeup_time, enabled, tm);
spin_unlock(&efi_runtime_lock);
- spin_unlock_irqrestore(&rtc_lock, flags);
return status;
}
@@ -155,13 +109,12 @@ static efi_status_t virt_efi_get_variable(efi_char16_t *name,
unsigned long *data_size,
void *data)
{
- unsigned long flags;
efi_status_t status;
- spin_lock_irqsave(&efi_runtime_lock, flags);
+ spin_lock(&efi_runtime_lock);
status = efi_call_virt(get_variable, name, vendor, attr, data_size,
data);
- spin_unlock_irqrestore(&efi_runtime_lock, flags);
+ spin_unlock(&efi_runtime_lock);
return status;
}
@@ -169,12 +122,11 @@ static efi_status_t virt_efi_get_next_variable(unsigned long *name_size,
efi_char16_t *name,
efi_guid_t *vendor)
{
- unsigned long flags;
efi_status_t status;
- spin_lock_irqsave(&efi_runtime_lock, flags);
+ spin_lock(&efi_runtime_lock);
status = efi_call_virt(get_next_variable, name_size, name, vendor);
- spin_unlock_irqrestore(&efi_runtime_lock, flags);
+ spin_unlock(&efi_runtime_lock);
return status;
}
@@ -184,13 +136,12 @@ static efi_status_t virt_efi_set_variable(efi_char16_t *name,
unsigned long data_size,
void *data)
{
- unsigned long flags;
efi_status_t status;
- spin_lock_irqsave(&efi_runtime_lock, flags);
+ spin_lock(&efi_runtime_lock);
status = efi_call_virt(set_variable, name, vendor, attr, data_size,
data);
- spin_unlock_irqrestore(&efi_runtime_lock, flags);
+ spin_unlock(&efi_runtime_lock);
return status;
}
@@ -199,15 +150,14 @@ virt_efi_set_variable_nonblocking(efi_char16_t *name, efi_guid_t *vendor,
u32 attr, unsigned long data_size,
void *data)
{
- unsigned long flags;
efi_status_t status;
- if (!spin_trylock_irqsave(&efi_runtime_lock, flags))
+ if (!spin_trylock(&efi_runtime_lock))
return EFI_NOT_READY;
status = efi_call_virt(set_variable, name, vendor, attr, data_size,
data);
- spin_unlock_irqrestore(&efi_runtime_lock, flags);
+ spin_unlock(&efi_runtime_lock);
return status;
}
@@ -217,27 +167,45 @@ static efi_status_t virt_efi_query_variable_info(u32 attr,
u64 *remaining_space,
u64 *max_variable_size)
{
- unsigned long flags;
efi_status_t status;
if (efi.runtime_version < EFI_2_00_SYSTEM_TABLE_REVISION)
return EFI_UNSUPPORTED;
- spin_lock_irqsave(&efi_runtime_lock, flags);
+ spin_lock(&efi_runtime_lock);
status = efi_call_virt(query_variable_info, attr, storage_space,
remaining_space, max_variable_size);
- spin_unlock_irqrestore(&efi_runtime_lock, flags);
+ spin_unlock(&efi_runtime_lock);
+ return status;
+}
+
+static efi_status_t
+virt_efi_query_variable_info_nonblocking(u32 attr,
+ u64 *storage_space,
+ u64 *remaining_space,
+ u64 *max_variable_size)
+{
+ efi_status_t status;
+
+ if (efi.runtime_version < EFI_2_00_SYSTEM_TABLE_REVISION)
+ return EFI_UNSUPPORTED;
+
+ if (!spin_trylock(&efi_runtime_lock))
+ return EFI_NOT_READY;
+
+ status = efi_call_virt(query_variable_info, attr, storage_space,
+ remaining_space, max_variable_size);
+ spin_unlock(&efi_runtime_lock);
return status;
}
static efi_status_t virt_efi_get_next_high_mono_count(u32 *count)
{
- unsigned long flags;
efi_status_t status;
- spin_lock_irqsave(&efi_runtime_lock, flags);
+ spin_lock(&efi_runtime_lock);
status = efi_call_virt(get_next_high_mono_count, count);
- spin_unlock_irqrestore(&efi_runtime_lock, flags);
+ spin_unlock(&efi_runtime_lock);
return status;
}
@@ -246,26 +214,23 @@ static void virt_efi_reset_system(int reset_type,
unsigned long data_size,
efi_char16_t *data)
{
- unsigned long flags;
-
- spin_lock_irqsave(&efi_runtime_lock, flags);
+ spin_lock(&efi_runtime_lock);
__efi_call_virt(reset_system, reset_type, status, data_size, data);
- spin_unlock_irqrestore(&efi_runtime_lock, flags);
+ spin_unlock(&efi_runtime_lock);
}
static efi_status_t virt_efi_update_capsule(efi_capsule_header_t **capsules,
unsigned long count,
unsigned long sg_list)
{
- unsigned long flags;
efi_status_t status;
if (efi.runtime_version < EFI_2_00_SYSTEM_TABLE_REVISION)
return EFI_UNSUPPORTED;
- spin_lock_irqsave(&efi_runtime_lock, flags);
+ spin_lock(&efi_runtime_lock);
status = efi_call_virt(update_capsule, capsules, count, sg_list);
- spin_unlock_irqrestore(&efi_runtime_lock, flags);
+ spin_unlock(&efi_runtime_lock);
return status;
}
@@ -274,16 +239,15 @@ static efi_status_t virt_efi_query_capsule_caps(efi_capsule_header_t **capsules,
u64 *max_size,
int *reset_type)
{
- unsigned long flags;
efi_status_t status;
if (efi.runtime_version < EFI_2_00_SYSTEM_TABLE_REVISION)
return EFI_UNSUPPORTED;
- spin_lock_irqsave(&efi_runtime_lock, flags);
+ spin_lock(&efi_runtime_lock);
status = efi_call_virt(query_capsule_caps, capsules, count, max_size,
reset_type);
- spin_unlock_irqrestore(&efi_runtime_lock, flags);
+ spin_unlock(&efi_runtime_lock);
return status;
}
@@ -300,6 +264,7 @@ void efi_native_runtime_setup(void)
efi.get_next_high_mono_count = virt_efi_get_next_high_mono_count;
efi.reset_system = virt_efi_reset_system;
efi.query_variable_info = virt_efi_query_variable_info;
+ efi.query_variable_info_nonblocking = virt_efi_query_variable_info_nonblocking;
efi.update_capsule = virt_efi_update_capsule;
efi.query_capsule_caps = virt_efi_query_capsule_caps;
}
diff --git a/drivers/firmware/efi/vars.c b/drivers/firmware/efi/vars.c
index 7f2ea21c730d..34b741940494 100644
--- a/drivers/firmware/efi/vars.c
+++ b/drivers/firmware/efi/vars.c
@@ -202,29 +202,44 @@ static const struct variable_validate variable_validate[] = {
{ NULL_GUID, "", NULL },
};
+/*
+ * Check if @var_name matches the pattern given in @match_name.
+ *
+ * @var_name: an array of @len non-NUL characters.
+ * @match_name: a NUL-terminated pattern string, optionally ending in "*". A
+ * final "*" character matches any trailing characters @var_name,
+ * including the case when there are none left in @var_name.
+ * @match: on output, the number of non-wildcard characters in @match_name
+ * that @var_name matches, regardless of the return value.
+ * @return: whether @var_name fully matches @match_name.
+ */
static bool
variable_matches(const char *var_name, size_t len, const char *match_name,
int *match)
{
for (*match = 0; ; (*match)++) {
char c = match_name[*match];
- char u = var_name[*match];
- /* Wildcard in the matching name means we've matched */
- if (c == '*')
+ switch (c) {
+ case '*':
+ /* Wildcard in @match_name means we've matched. */
return true;
- /* Case sensitive match */
- if (!c && *match == len)
- return true;
+ case '\0':
+ /* @match_name has ended. Has @var_name too? */
+ return (*match == len);
- if (c != u)
+ default:
+ /*
+ * We've reached a non-wildcard char in @match_name.
+ * Continue only if there's an identical character in
+ * @var_name.
+ */
+ if (*match < len && c == var_name[*match])
+ continue;
return false;
-
- if (!c)
- return true;
+ }
}
- return true;
}
bool
@@ -300,7 +315,18 @@ check_var_size(u32 attributes, unsigned long size)
if (!fops->query_variable_store)
return EFI_UNSUPPORTED;
- return fops->query_variable_store(attributes, size);
+ return fops->query_variable_store(attributes, size, false);
+}
+
+static efi_status_t
+check_var_size_nonblocking(u32 attributes, unsigned long size)
+{
+ const struct efivar_operations *fops = __efivars->ops;
+
+ if (!fops->query_variable_store)
+ return EFI_UNSUPPORTED;
+
+ return fops->query_variable_store(attributes, size, true);
}
static int efi_status_to_err(efi_status_t status)
@@ -681,7 +707,8 @@ efivar_entry_set_nonblocking(efi_char16_t *name, efi_guid_t vendor,
if (!spin_trylock_irqsave(&__efivars->lock, flags))
return -EBUSY;
- status = check_var_size(attributes, size + ucs2_strsize(name, 1024));
+ status = check_var_size_nonblocking(attributes,
+ size + ucs2_strsize(name, 1024));
if (status != EFI_SUCCESS) {
spin_unlock_irqrestore(&__efivars->lock, flags);
return -ENOSPC;
diff --git a/drivers/firmware/iscsi_ibft.c b/drivers/firmware/iscsi_ibft.c
index 72791232e46b..81037e5fe301 100644
--- a/drivers/firmware/iscsi_ibft.c
+++ b/drivers/firmware/iscsi_ibft.c
@@ -319,6 +319,9 @@ static ssize_t ibft_attr_show_nic(void *data, int type, char *buf)
val = cpu_to_be32(~((1 << (32-nic->subnet_mask_prefix))-1));
str += sprintf(str, "%pI4", &val);
break;
+ case ISCSI_BOOT_ETH_PREFIX_LEN:
+ str += sprintf(str, "%d\n", nic->subnet_mask_prefix);
+ break;
case ISCSI_BOOT_ETH_ORIGIN:
str += sprintf(str, "%d\n", nic->origin);
break;
@@ -460,6 +463,7 @@ static umode_t ibft_check_nic_for(void *data, int type)
if (address_not_null(nic->ip_addr))
rc = S_IRUGO;
break;
+ case ISCSI_BOOT_ETH_PREFIX_LEN:
case ISCSI_BOOT_ETH_SUBNET_MASK:
if (nic->subnet_mask_prefix)
rc = S_IRUGO;
diff --git a/drivers/firmware/psci.c b/drivers/firmware/psci.c
index f25cd79c8a79..b5d05807e6ec 100644
--- a/drivers/firmware/psci.c
+++ b/drivers/firmware/psci.c
@@ -14,6 +14,7 @@
#define pr_fmt(fmt) "psci: " fmt
#include <linux/arm-smccc.h>
+#include <linux/cpuidle.h>
#include <linux/errno.h>
#include <linux/linkage.h>
#include <linux/of.h>
@@ -21,10 +22,12 @@
#include <linux/printk.h>
#include <linux/psci.h>
#include <linux/reboot.h>
+#include <linux/slab.h>
#include <linux/suspend.h>
#include <uapi/linux/psci.h>
+#include <asm/cpuidle.h>
#include <asm/cputype.h>
#include <asm/system_misc.h>
#include <asm/smp_plat.h>
@@ -244,6 +247,123 @@ static int __init psci_features(u32 psci_func_id)
psci_func_id, 0, 0);
}
+#ifdef CONFIG_CPU_IDLE
+static DEFINE_PER_CPU_READ_MOSTLY(u32 *, psci_power_state);
+
+static int psci_dt_cpu_init_idle(struct device_node *cpu_node, int cpu)
+{
+ int i, ret, count = 0;
+ u32 *psci_states;
+ struct device_node *state_node;
+
+ /*
+ * If the PSCI cpu_suspend function hook has not been initialized
+ * idle states must not be enabled, so bail out
+ */
+ if (!psci_ops.cpu_suspend)
+ return -EOPNOTSUPP;
+
+ /* Count idle states */
+ while ((state_node = of_parse_phandle(cpu_node, "cpu-idle-states",
+ count))) {
+ count++;
+ of_node_put(state_node);
+ }
+
+ if (!count)
+ return -ENODEV;
+
+ psci_states = kcalloc(count, sizeof(*psci_states), GFP_KERNEL);
+ if (!psci_states)
+ return -ENOMEM;
+
+ for (i = 0; i < count; i++) {
+ u32 state;
+
+ state_node = of_parse_phandle(cpu_node, "cpu-idle-states", i);
+
+ ret = of_property_read_u32(state_node,
+ "arm,psci-suspend-param",
+ &state);
+ if (ret) {
+ pr_warn(" * %s missing arm,psci-suspend-param property\n",
+ state_node->full_name);
+ of_node_put(state_node);
+ goto free_mem;
+ }
+
+ of_node_put(state_node);
+ pr_debug("psci-power-state %#x index %d\n", state, i);
+ if (!psci_power_state_is_valid(state)) {
+ pr_warn("Invalid PSCI power state %#x\n", state);
+ ret = -EINVAL;
+ goto free_mem;
+ }
+ psci_states[i] = state;
+ }
+ /* Idle states parsed correctly, initialize per-cpu pointer */
+ per_cpu(psci_power_state, cpu) = psci_states;
+ return 0;
+
+free_mem:
+ kfree(psci_states);
+ return ret;
+}
+
+int psci_cpu_init_idle(unsigned int cpu)
+{
+ struct device_node *cpu_node;
+ int ret;
+
+ cpu_node = of_get_cpu_node(cpu, NULL);
+ if (!cpu_node)
+ return -ENODEV;
+
+ ret = psci_dt_cpu_init_idle(cpu_node, cpu);
+
+ of_node_put(cpu_node);
+
+ return ret;
+}
+
+static int psci_suspend_finisher(unsigned long index)
+{
+ u32 *state = __this_cpu_read(psci_power_state);
+
+ return psci_ops.cpu_suspend(state[index - 1],
+ virt_to_phys(cpu_resume));
+}
+
+int psci_cpu_suspend_enter(unsigned long index)
+{
+ int ret;
+ u32 *state = __this_cpu_read(psci_power_state);
+ /*
+ * idle state index 0 corresponds to wfi, should never be called
+ * from the cpu_suspend operations
+ */
+ if (WARN_ON_ONCE(!index))
+ return -EINVAL;
+
+ if (!psci_power_state_loses_context(state[index - 1]))
+ ret = psci_ops.cpu_suspend(state[index - 1], 0);
+ else
+ ret = cpu_suspend(index, psci_suspend_finisher);
+
+ return ret;
+}
+
+/* ARM specific CPU idle operations */
+#ifdef CONFIG_ARM
+static struct cpuidle_ops psci_cpuidle_ops __initdata = {
+ .suspend = psci_cpu_suspend_enter,
+ .init = psci_dt_cpu_init_idle,
+};
+
+CPUIDLE_METHOD_OF_DECLARE(psci, "psci", &psci_cpuidle_ops);
+#endif
+#endif
+
static int psci_system_suspend(unsigned long unused)
{
return invoke_psci_fn(PSCI_FN_NATIVE(1_0, SYSTEM_SUSPEND),
diff --git a/drivers/firmware/qemu_fw_cfg.c b/drivers/firmware/qemu_fw_cfg.c
new file mode 100644
index 000000000000..1b95475b6aef
--- /dev/null
+++ b/drivers/firmware/qemu_fw_cfg.c
@@ -0,0 +1,773 @@
+/*
+ * drivers/firmware/qemu_fw_cfg.c
+ *
+ * Copyright 2015 Carnegie Mellon University
+ *
+ * Expose entries from QEMU's firmware configuration (fw_cfg) device in
+ * sysfs (read-only, under "/sys/firmware/qemu_fw_cfg/...").
+ *
+ * The fw_cfg device may be instantiated via either an ACPI node (on x86
+ * and select subsets of aarch64), a Device Tree node (on arm), or using
+ * a kernel module (or command line) parameter with the following syntax:
+ *
+ * [fw_cfg.]ioport=<size>@<base>[:<ctrl_off>:<data_off>]
+ * or
+ * [fw_cfg.]mmio=<size>@<base>[:<ctrl_off>:<data_off>]
+ *
+ * where:
+ * <size> := size of ioport or mmio range
+ * <base> := physical base address of ioport or mmio range
+ * <ctrl_off> := (optional) offset of control register
+ * <data_off> := (optional) offset of data register
+ *
+ * e.g.:
+ * fw_cfg.ioport=2@0x510:0:1 (the default on x86)
+ * or
+ * fw_cfg.mmio=0xA@0x9020000:8:0 (the default on arm)
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/acpi.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+
+MODULE_AUTHOR("Gabriel L. Somlo <somlo@cmu.edu>");
+MODULE_DESCRIPTION("QEMU fw_cfg sysfs support");
+MODULE_LICENSE("GPL");
+
+/* selector key values for "well-known" fw_cfg entries */
+#define FW_CFG_SIGNATURE 0x00
+#define FW_CFG_ID 0x01
+#define FW_CFG_FILE_DIR 0x19
+
+/* size in bytes of fw_cfg signature */
+#define FW_CFG_SIG_SIZE 4
+
+/* fw_cfg "file name" is up to 56 characters (including terminating nul) */
+#define FW_CFG_MAX_FILE_PATH 56
+
+/* fw_cfg file directory entry type */
+struct fw_cfg_file {
+ u32 size;
+ u16 select;
+ u16 reserved;
+ char name[FW_CFG_MAX_FILE_PATH];
+};
+
+/* fw_cfg device i/o register addresses */
+static bool fw_cfg_is_mmio;
+static phys_addr_t fw_cfg_p_base;
+static resource_size_t fw_cfg_p_size;
+static void __iomem *fw_cfg_dev_base;
+static void __iomem *fw_cfg_reg_ctrl;
+static void __iomem *fw_cfg_reg_data;
+
+/* atomic access to fw_cfg device (potentially slow i/o, so using mutex) */
+static DEFINE_MUTEX(fw_cfg_dev_lock);
+
+/* pick appropriate endianness for selector key */
+static inline u16 fw_cfg_sel_endianness(u16 key)
+{
+ return fw_cfg_is_mmio ? cpu_to_be16(key) : cpu_to_le16(key);
+}
+
+/* read chunk of given fw_cfg blob (caller responsible for sanity-check) */
+static inline void fw_cfg_read_blob(u16 key,
+ void *buf, loff_t pos, size_t count)
+{
+ u32 glk = -1U;
+ acpi_status status;
+
+ /* If we have ACPI, ensure mutual exclusion against any potential
+ * device access by the firmware, e.g. via AML methods:
+ */
+ status = acpi_acquire_global_lock(ACPI_WAIT_FOREVER, &glk);
+ if (ACPI_FAILURE(status) && status != AE_NOT_CONFIGURED) {
+ /* Should never get here */
+ WARN(1, "fw_cfg_read_blob: Failed to lock ACPI!\n");
+ memset(buf, 0, count);
+ return;
+ }
+
+ mutex_lock(&fw_cfg_dev_lock);
+ iowrite16(fw_cfg_sel_endianness(key), fw_cfg_reg_ctrl);
+ while (pos-- > 0)
+ ioread8(fw_cfg_reg_data);
+ ioread8_rep(fw_cfg_reg_data, buf, count);
+ mutex_unlock(&fw_cfg_dev_lock);
+
+ acpi_release_global_lock(glk);
+}
+
+/* clean up fw_cfg device i/o */
+static void fw_cfg_io_cleanup(void)
+{
+ if (fw_cfg_is_mmio) {
+ iounmap(fw_cfg_dev_base);
+ release_mem_region(fw_cfg_p_base, fw_cfg_p_size);
+ } else {
+ ioport_unmap(fw_cfg_dev_base);
+ release_region(fw_cfg_p_base, fw_cfg_p_size);
+ }
+}
+
+/* arch-specific ctrl & data register offsets are not available in ACPI, DT */
+#if !(defined(FW_CFG_CTRL_OFF) && defined(FW_CFG_DATA_OFF))
+# if (defined(CONFIG_ARM) || defined(CONFIG_ARM64))
+# define FW_CFG_CTRL_OFF 0x08
+# define FW_CFG_DATA_OFF 0x00
+# elif (defined(CONFIG_PPC_PMAC) || defined(CONFIG_SPARC32)) /* ppc/mac,sun4m */
+# define FW_CFG_CTRL_OFF 0x00
+# define FW_CFG_DATA_OFF 0x02
+# elif (defined(CONFIG_X86) || defined(CONFIG_SPARC64)) /* x86, sun4u */
+# define FW_CFG_CTRL_OFF 0x00
+# define FW_CFG_DATA_OFF 0x01
+# else
+# warning "QEMU FW_CFG may not be available on this architecture!"
+# define FW_CFG_CTRL_OFF 0x00
+# define FW_CFG_DATA_OFF 0x01
+# endif
+#endif
+
+/* initialize fw_cfg device i/o from platform data */
+static int fw_cfg_do_platform_probe(struct platform_device *pdev)
+{
+ char sig[FW_CFG_SIG_SIZE];
+ struct resource *range, *ctrl, *data;
+
+ /* acquire i/o range details */
+ fw_cfg_is_mmio = false;
+ range = platform_get_resource(pdev, IORESOURCE_IO, 0);
+ if (!range) {
+ fw_cfg_is_mmio = true;
+ range = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!range)
+ return -EINVAL;
+ }
+ fw_cfg_p_base = range->start;
+ fw_cfg_p_size = resource_size(range);
+
+ if (fw_cfg_is_mmio) {
+ if (!request_mem_region(fw_cfg_p_base,
+ fw_cfg_p_size, "fw_cfg_mem"))
+ return -EBUSY;
+ fw_cfg_dev_base = ioremap(fw_cfg_p_base, fw_cfg_p_size);
+ if (!fw_cfg_dev_base) {
+ release_mem_region(fw_cfg_p_base, fw_cfg_p_size);
+ return -EFAULT;
+ }
+ } else {
+ if (!request_region(fw_cfg_p_base,
+ fw_cfg_p_size, "fw_cfg_io"))
+ return -EBUSY;
+ fw_cfg_dev_base = ioport_map(fw_cfg_p_base, fw_cfg_p_size);
+ if (!fw_cfg_dev_base) {
+ release_region(fw_cfg_p_base, fw_cfg_p_size);
+ return -EFAULT;
+ }
+ }
+
+ /* were custom register offsets provided (e.g. on the command line)? */
+ ctrl = platform_get_resource_byname(pdev, IORESOURCE_REG, "ctrl");
+ data = platform_get_resource_byname(pdev, IORESOURCE_REG, "data");
+ if (ctrl && data) {
+ fw_cfg_reg_ctrl = fw_cfg_dev_base + ctrl->start;
+ fw_cfg_reg_data = fw_cfg_dev_base + data->start;
+ } else {
+ /* use architecture-specific offsets */
+ fw_cfg_reg_ctrl = fw_cfg_dev_base + FW_CFG_CTRL_OFF;
+ fw_cfg_reg_data = fw_cfg_dev_base + FW_CFG_DATA_OFF;
+ }
+
+ /* verify fw_cfg device signature */
+ fw_cfg_read_blob(FW_CFG_SIGNATURE, sig, 0, FW_CFG_SIG_SIZE);
+ if (memcmp(sig, "QEMU", FW_CFG_SIG_SIZE) != 0) {
+ fw_cfg_io_cleanup();
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+/* fw_cfg revision attribute, in /sys/firmware/qemu_fw_cfg top-level dir. */
+static u32 fw_cfg_rev;
+
+static ssize_t fw_cfg_showrev(struct kobject *k, struct attribute *a, char *buf)
+{
+ return sprintf(buf, "%u\n", fw_cfg_rev);
+}
+
+static const struct {
+ struct attribute attr;
+ ssize_t (*show)(struct kobject *k, struct attribute *a, char *buf);
+} fw_cfg_rev_attr = {
+ .attr = { .name = "rev", .mode = S_IRUSR },
+ .show = fw_cfg_showrev,
+};
+
+/* fw_cfg_sysfs_entry type */
+struct fw_cfg_sysfs_entry {
+ struct kobject kobj;
+ struct fw_cfg_file f;
+ struct list_head list;
+};
+
+/* get fw_cfg_sysfs_entry from kobject member */
+static inline struct fw_cfg_sysfs_entry *to_entry(struct kobject *kobj)
+{
+ return container_of(kobj, struct fw_cfg_sysfs_entry, kobj);
+}
+
+/* fw_cfg_sysfs_attribute type */
+struct fw_cfg_sysfs_attribute {
+ struct attribute attr;
+ ssize_t (*show)(struct fw_cfg_sysfs_entry *entry, char *buf);
+};
+
+/* get fw_cfg_sysfs_attribute from attribute member */
+static inline struct fw_cfg_sysfs_attribute *to_attr(struct attribute *attr)
+{
+ return container_of(attr, struct fw_cfg_sysfs_attribute, attr);
+}
+
+/* global cache of fw_cfg_sysfs_entry objects */
+static LIST_HEAD(fw_cfg_entry_cache);
+
+/* kobjects removed lazily by kernel, mutual exclusion needed */
+static DEFINE_SPINLOCK(fw_cfg_cache_lock);
+
+static inline void fw_cfg_sysfs_cache_enlist(struct fw_cfg_sysfs_entry *entry)
+{
+ spin_lock(&fw_cfg_cache_lock);
+ list_add_tail(&entry->list, &fw_cfg_entry_cache);
+ spin_unlock(&fw_cfg_cache_lock);
+}
+
+static inline void fw_cfg_sysfs_cache_delist(struct fw_cfg_sysfs_entry *entry)
+{
+ spin_lock(&fw_cfg_cache_lock);
+ list_del(&entry->list);
+ spin_unlock(&fw_cfg_cache_lock);
+}
+
+static void fw_cfg_sysfs_cache_cleanup(void)
+{
+ struct fw_cfg_sysfs_entry *entry, *next;
+
+ list_for_each_entry_safe(entry, next, &fw_cfg_entry_cache, list) {
+ /* will end up invoking fw_cfg_sysfs_cache_delist()
+ * via each object's release() method (i.e. destructor)
+ */
+ kobject_put(&entry->kobj);
+ }
+}
+
+/* default_attrs: per-entry attributes and show methods */
+
+#define FW_CFG_SYSFS_ATTR(_attr) \
+struct fw_cfg_sysfs_attribute fw_cfg_sysfs_attr_##_attr = { \
+ .attr = { .name = __stringify(_attr), .mode = S_IRUSR }, \
+ .show = fw_cfg_sysfs_show_##_attr, \
+}
+
+static ssize_t fw_cfg_sysfs_show_size(struct fw_cfg_sysfs_entry *e, char *buf)
+{
+ return sprintf(buf, "%u\n", e->f.size);
+}
+
+static ssize_t fw_cfg_sysfs_show_key(struct fw_cfg_sysfs_entry *e, char *buf)
+{
+ return sprintf(buf, "%u\n", e->f.select);
+}
+
+static ssize_t fw_cfg_sysfs_show_name(struct fw_cfg_sysfs_entry *e, char *buf)
+{
+ return sprintf(buf, "%s\n", e->f.name);
+}
+
+static FW_CFG_SYSFS_ATTR(size);
+static FW_CFG_SYSFS_ATTR(key);
+static FW_CFG_SYSFS_ATTR(name);
+
+static struct attribute *fw_cfg_sysfs_entry_attrs[] = {
+ &fw_cfg_sysfs_attr_size.attr,
+ &fw_cfg_sysfs_attr_key.attr,
+ &fw_cfg_sysfs_attr_name.attr,
+ NULL,
+};
+
+/* sysfs_ops: find fw_cfg_[entry, attribute] and call appropriate show method */
+static ssize_t fw_cfg_sysfs_attr_show(struct kobject *kobj, struct attribute *a,
+ char *buf)
+{
+ struct fw_cfg_sysfs_entry *entry = to_entry(kobj);
+ struct fw_cfg_sysfs_attribute *attr = to_attr(a);
+
+ return attr->show(entry, buf);
+}
+
+static const struct sysfs_ops fw_cfg_sysfs_attr_ops = {
+ .show = fw_cfg_sysfs_attr_show,
+};
+
+/* release: destructor, to be called via kobject_put() */
+static void fw_cfg_sysfs_release_entry(struct kobject *kobj)
+{
+ struct fw_cfg_sysfs_entry *entry = to_entry(kobj);
+
+ fw_cfg_sysfs_cache_delist(entry);
+ kfree(entry);
+}
+
+/* kobj_type: ties together all properties required to register an entry */
+static struct kobj_type fw_cfg_sysfs_entry_ktype = {
+ .default_attrs = fw_cfg_sysfs_entry_attrs,
+ .sysfs_ops = &fw_cfg_sysfs_attr_ops,
+ .release = fw_cfg_sysfs_release_entry,
+};
+
+/* raw-read method and attribute */
+static ssize_t fw_cfg_sysfs_read_raw(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buf, loff_t pos, size_t count)
+{
+ struct fw_cfg_sysfs_entry *entry = to_entry(kobj);
+
+ if (pos > entry->f.size)
+ return -EINVAL;
+
+ if (count > entry->f.size - pos)
+ count = entry->f.size - pos;
+
+ fw_cfg_read_blob(entry->f.select, buf, pos, count);
+ return count;
+}
+
+static struct bin_attribute fw_cfg_sysfs_attr_raw = {
+ .attr = { .name = "raw", .mode = S_IRUSR },
+ .read = fw_cfg_sysfs_read_raw,
+};
+
+/*
+ * Create a kset subdirectory matching each '/' delimited dirname token
+ * in 'name', starting with sysfs kset/folder 'dir'; At the end, create
+ * a symlink directed at the given 'target'.
+ * NOTE: We do this on a best-effort basis, since 'name' is not guaranteed
+ * to be a well-behaved path name. Whenever a symlink vs. kset directory
+ * name collision occurs, the kernel will issue big scary warnings while
+ * refusing to add the offending link or directory. We follow up with our
+ * own, slightly less scary error messages explaining the situation :)
+ */
+static int fw_cfg_build_symlink(struct kset *dir,
+ struct kobject *target, const char *name)
+{
+ int ret;
+ struct kset *subdir;
+ struct kobject *ko;
+ char *name_copy, *p, *tok;
+
+ if (!dir || !target || !name || !*name)
+ return -EINVAL;
+
+ /* clone a copy of name for parsing */
+ name_copy = p = kstrdup(name, GFP_KERNEL);
+ if (!name_copy)
+ return -ENOMEM;
+
+ /* create folders for each dirname token, then symlink for basename */
+ while ((tok = strsep(&p, "/")) && *tok) {
+
+ /* last (basename) token? If so, add symlink here */
+ if (!p || !*p) {
+ ret = sysfs_create_link(&dir->kobj, target, tok);
+ break;
+ }
+
+ /* does the current dir contain an item named after tok ? */
+ ko = kset_find_obj(dir, tok);
+ if (ko) {
+ /* drop reference added by kset_find_obj */
+ kobject_put(ko);
+
+ /* ko MUST be a kset - we're about to use it as one ! */
+ if (ko->ktype != dir->kobj.ktype) {
+ ret = -EINVAL;
+ break;
+ }
+
+ /* descend into already existing subdirectory */
+ dir = to_kset(ko);
+ } else {
+ /* create new subdirectory kset */
+ subdir = kzalloc(sizeof(struct kset), GFP_KERNEL);
+ if (!subdir) {
+ ret = -ENOMEM;
+ break;
+ }
+ subdir->kobj.kset = dir;
+ subdir->kobj.ktype = dir->kobj.ktype;
+ ret = kobject_set_name(&subdir->kobj, "%s", tok);
+ if (ret) {
+ kfree(subdir);
+ break;
+ }
+ ret = kset_register(subdir);
+ if (ret) {
+ kfree(subdir);
+ break;
+ }
+
+ /* descend into newly created subdirectory */
+ dir = subdir;
+ }
+ }
+
+ /* we're done with cloned copy of name */
+ kfree(name_copy);
+ return ret;
+}
+
+/* recursively unregister fw_cfg/by_name/ kset directory tree */
+static void fw_cfg_kset_unregister_recursive(struct kset *kset)
+{
+ struct kobject *k, *next;
+
+ list_for_each_entry_safe(k, next, &kset->list, entry)
+ /* all set members are ksets too, but check just in case... */
+ if (k->ktype == kset->kobj.ktype)
+ fw_cfg_kset_unregister_recursive(to_kset(k));
+
+ /* symlinks are cleanly and automatically removed with the directory */
+ kset_unregister(kset);
+}
+
+/* kobjects & kset representing top-level, by_key, and by_name folders */
+static struct kobject *fw_cfg_top_ko;
+static struct kobject *fw_cfg_sel_ko;
+static struct kset *fw_cfg_fname_kset;
+
+/* register an individual fw_cfg file */
+static int fw_cfg_register_file(const struct fw_cfg_file *f)
+{
+ int err;
+ struct fw_cfg_sysfs_entry *entry;
+
+ /* allocate new entry */
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry)
+ return -ENOMEM;
+
+ /* set file entry information */
+ memcpy(&entry->f, f, sizeof(struct fw_cfg_file));
+
+ /* register entry under "/sys/firmware/qemu_fw_cfg/by_key/" */
+ err = kobject_init_and_add(&entry->kobj, &fw_cfg_sysfs_entry_ktype,
+ fw_cfg_sel_ko, "%d", entry->f.select);
+ if (err)
+ goto err_register;
+
+ /* add raw binary content access */
+ err = sysfs_create_bin_file(&entry->kobj, &fw_cfg_sysfs_attr_raw);
+ if (err)
+ goto err_add_raw;
+
+ /* try adding "/sys/firmware/qemu_fw_cfg/by_name/" symlink */
+ fw_cfg_build_symlink(fw_cfg_fname_kset, &entry->kobj, entry->f.name);
+
+ /* success, add entry to global cache */
+ fw_cfg_sysfs_cache_enlist(entry);
+ return 0;
+
+err_add_raw:
+ kobject_del(&entry->kobj);
+err_register:
+ kfree(entry);
+ return err;
+}
+
+/* iterate over all fw_cfg directory entries, registering each one */
+static int fw_cfg_register_dir_entries(void)
+{
+ int ret = 0;
+ u32 count, i;
+ struct fw_cfg_file *dir;
+ size_t dir_size;
+
+ fw_cfg_read_blob(FW_CFG_FILE_DIR, &count, 0, sizeof(count));
+ count = be32_to_cpu(count);
+ dir_size = count * sizeof(struct fw_cfg_file);
+
+ dir = kmalloc(dir_size, GFP_KERNEL);
+ if (!dir)
+ return -ENOMEM;
+
+ fw_cfg_read_blob(FW_CFG_FILE_DIR, dir, sizeof(count), dir_size);
+
+ for (i = 0; i < count; i++) {
+ dir[i].size = be32_to_cpu(dir[i].size);
+ dir[i].select = be16_to_cpu(dir[i].select);
+ ret = fw_cfg_register_file(&dir[i]);
+ if (ret)
+ break;
+ }
+
+ kfree(dir);
+ return ret;
+}
+
+/* unregister top-level or by_key folder */
+static inline void fw_cfg_kobj_cleanup(struct kobject *kobj)
+{
+ kobject_del(kobj);
+ kobject_put(kobj);
+}
+
+static int fw_cfg_sysfs_probe(struct platform_device *pdev)
+{
+ int err;
+
+ /* NOTE: If we supported multiple fw_cfg devices, we'd first create
+ * a subdirectory named after e.g. pdev->id, then hang per-device
+ * by_key (and by_name) subdirectories underneath it. However, only
+ * one fw_cfg device exist system-wide, so if one was already found
+ * earlier, we might as well stop here.
+ */
+ if (fw_cfg_sel_ko)
+ return -EBUSY;
+
+ /* create by_key and by_name subdirs of /sys/firmware/qemu_fw_cfg/ */
+ err = -ENOMEM;
+ fw_cfg_sel_ko = kobject_create_and_add("by_key", fw_cfg_top_ko);
+ if (!fw_cfg_sel_ko)
+ goto err_sel;
+ fw_cfg_fname_kset = kset_create_and_add("by_name", NULL, fw_cfg_top_ko);
+ if (!fw_cfg_fname_kset)
+ goto err_name;
+
+ /* initialize fw_cfg device i/o from platform data */
+ err = fw_cfg_do_platform_probe(pdev);
+ if (err)
+ goto err_probe;
+
+ /* get revision number, add matching top-level attribute */
+ fw_cfg_read_blob(FW_CFG_ID, &fw_cfg_rev, 0, sizeof(fw_cfg_rev));
+ fw_cfg_rev = le32_to_cpu(fw_cfg_rev);
+ err = sysfs_create_file(fw_cfg_top_ko, &fw_cfg_rev_attr.attr);
+ if (err)
+ goto err_rev;
+
+ /* process fw_cfg file directory entry, registering each file */
+ err = fw_cfg_register_dir_entries();
+ if (err)
+ goto err_dir;
+
+ /* success */
+ pr_debug("fw_cfg: loaded.\n");
+ return 0;
+
+err_dir:
+ fw_cfg_sysfs_cache_cleanup();
+ sysfs_remove_file(fw_cfg_top_ko, &fw_cfg_rev_attr.attr);
+err_rev:
+ fw_cfg_io_cleanup();
+err_probe:
+ fw_cfg_kset_unregister_recursive(fw_cfg_fname_kset);
+err_name:
+ fw_cfg_kobj_cleanup(fw_cfg_sel_ko);
+err_sel:
+ return err;
+}
+
+static int fw_cfg_sysfs_remove(struct platform_device *pdev)
+{
+ pr_debug("fw_cfg: unloading.\n");
+ fw_cfg_sysfs_cache_cleanup();
+ fw_cfg_kset_unregister_recursive(fw_cfg_fname_kset);
+ fw_cfg_kobj_cleanup(fw_cfg_sel_ko);
+ fw_cfg_io_cleanup();
+ return 0;
+}
+
+static const struct of_device_id fw_cfg_sysfs_mmio_match[] = {
+ { .compatible = "qemu,fw-cfg-mmio", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, fw_cfg_sysfs_mmio_match);
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id fw_cfg_sysfs_acpi_match[] = {
+ { "QEMU0002", },
+ {},
+};
+MODULE_DEVICE_TABLE(acpi, fw_cfg_sysfs_acpi_match);
+#endif
+
+static struct platform_driver fw_cfg_sysfs_driver = {
+ .probe = fw_cfg_sysfs_probe,
+ .remove = fw_cfg_sysfs_remove,
+ .driver = {
+ .name = "fw_cfg",
+ .of_match_table = fw_cfg_sysfs_mmio_match,
+ .acpi_match_table = ACPI_PTR(fw_cfg_sysfs_acpi_match),
+ },
+};
+
+#ifdef CONFIG_FW_CFG_SYSFS_CMDLINE
+
+static struct platform_device *fw_cfg_cmdline_dev;
+
+/* this probably belongs in e.g. include/linux/types.h,
+ * but right now we are the only ones doing it...
+ */
+#ifdef CONFIG_PHYS_ADDR_T_64BIT
+#define __PHYS_ADDR_PREFIX "ll"
+#else
+#define __PHYS_ADDR_PREFIX ""
+#endif
+
+/* use special scanf/printf modifier for phys_addr_t, resource_size_t */
+#define PH_ADDR_SCAN_FMT "@%" __PHYS_ADDR_PREFIX "i%n" \
+ ":%" __PHYS_ADDR_PREFIX "i" \
+ ":%" __PHYS_ADDR_PREFIX "i%n"
+
+#define PH_ADDR_PR_1_FMT "0x%" __PHYS_ADDR_PREFIX "x@" \
+ "0x%" __PHYS_ADDR_PREFIX "x"
+
+#define PH_ADDR_PR_3_FMT PH_ADDR_PR_1_FMT \
+ ":%" __PHYS_ADDR_PREFIX "u" \
+ ":%" __PHYS_ADDR_PREFIX "u"
+
+static int fw_cfg_cmdline_set(const char *arg, const struct kernel_param *kp)
+{
+ struct resource res[3] = {};
+ char *str;
+ phys_addr_t base;
+ resource_size_t size, ctrl_off, data_off;
+ int processed, consumed = 0;
+
+ /* only one fw_cfg device can exist system-wide, so if one
+ * was processed on the command line already, we might as
+ * well stop here.
+ */
+ if (fw_cfg_cmdline_dev) {
+ /* avoid leaking previously registered device */
+ platform_device_unregister(fw_cfg_cmdline_dev);
+ return -EINVAL;
+ }
+
+ /* consume "<size>" portion of command line argument */
+ size = memparse(arg, &str);
+
+ /* get "@<base>[:<ctrl_off>:<data_off>]" chunks */
+ processed = sscanf(str, PH_ADDR_SCAN_FMT,
+ &base, &consumed,
+ &ctrl_off, &data_off, &consumed);
+
+ /* sscanf() must process precisely 1 or 3 chunks:
+ * <base> is mandatory, optionally followed by <ctrl_off>
+ * and <data_off>;
+ * there must be no extra characters after the last chunk,
+ * so str[consumed] must be '\0'.
+ */
+ if (str[consumed] ||
+ (processed != 1 && processed != 3))
+ return -EINVAL;
+
+ res[0].start = base;
+ res[0].end = base + size - 1;
+ res[0].flags = !strcmp(kp->name, "mmio") ? IORESOURCE_MEM :
+ IORESOURCE_IO;
+
+ /* insert register offsets, if provided */
+ if (processed > 1) {
+ res[1].name = "ctrl";
+ res[1].start = ctrl_off;
+ res[1].flags = IORESOURCE_REG;
+ res[2].name = "data";
+ res[2].start = data_off;
+ res[2].flags = IORESOURCE_REG;
+ }
+
+ /* "processed" happens to nicely match the number of resources
+ * we need to pass in to this platform device.
+ */
+ fw_cfg_cmdline_dev = platform_device_register_simple("fw_cfg",
+ PLATFORM_DEVID_NONE, res, processed);
+ if (IS_ERR(fw_cfg_cmdline_dev))
+ return PTR_ERR(fw_cfg_cmdline_dev);
+
+ return 0;
+}
+
+static int fw_cfg_cmdline_get(char *buf, const struct kernel_param *kp)
+{
+ /* stay silent if device was not configured via the command
+ * line, or if the parameter name (ioport/mmio) doesn't match
+ * the device setting
+ */
+ if (!fw_cfg_cmdline_dev ||
+ (!strcmp(kp->name, "mmio") ^
+ (fw_cfg_cmdline_dev->resource[0].flags == IORESOURCE_MEM)))
+ return 0;
+
+ switch (fw_cfg_cmdline_dev->num_resources) {
+ case 1:
+ return snprintf(buf, PAGE_SIZE, PH_ADDR_PR_1_FMT,
+ resource_size(&fw_cfg_cmdline_dev->resource[0]),
+ fw_cfg_cmdline_dev->resource[0].start);
+ case 3:
+ return snprintf(buf, PAGE_SIZE, PH_ADDR_PR_3_FMT,
+ resource_size(&fw_cfg_cmdline_dev->resource[0]),
+ fw_cfg_cmdline_dev->resource[0].start,
+ fw_cfg_cmdline_dev->resource[1].start,
+ fw_cfg_cmdline_dev->resource[2].start);
+ }
+
+ /* Should never get here */
+ WARN(1, "Unexpected number of resources: %d\n",
+ fw_cfg_cmdline_dev->num_resources);
+ return 0;
+}
+
+static const struct kernel_param_ops fw_cfg_cmdline_param_ops = {
+ .set = fw_cfg_cmdline_set,
+ .get = fw_cfg_cmdline_get,
+};
+
+device_param_cb(ioport, &fw_cfg_cmdline_param_ops, NULL, S_IRUSR);
+device_param_cb(mmio, &fw_cfg_cmdline_param_ops, NULL, S_IRUSR);
+
+#endif /* CONFIG_FW_CFG_SYSFS_CMDLINE */
+
+static int __init fw_cfg_sysfs_init(void)
+{
+ int ret;
+
+ /* create /sys/firmware/qemu_fw_cfg/ top level directory */
+ fw_cfg_top_ko = kobject_create_and_add("qemu_fw_cfg", firmware_kobj);
+ if (!fw_cfg_top_ko)
+ return -ENOMEM;
+
+ ret = platform_driver_register(&fw_cfg_sysfs_driver);
+ if (ret)
+ fw_cfg_kobj_cleanup(fw_cfg_top_ko);
+
+ return ret;
+}
+
+static void __exit fw_cfg_sysfs_exit(void)
+{
+ platform_driver_unregister(&fw_cfg_sysfs_driver);
+
+#ifdef CONFIG_FW_CFG_SYSFS_CMDLINE
+ platform_device_unregister(fw_cfg_cmdline_dev);
+#endif
+
+ /* clean up /sys/firmware/qemu_fw_cfg/ */
+ fw_cfg_kobj_cleanup(fw_cfg_top_ko);
+}
+
+module_init(fw_cfg_sysfs_init);
+module_exit(fw_cfg_sysfs_exit);
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index c88dd24a4b1f..5f3429f0bf46 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -126,6 +126,16 @@ config GPIO_AMDPT
driver for GPIO functionality on Promontory IOHub
Require ACPI ASL code to enumerate as a platform device.
+config GPIO_ATH79
+ tristate "Atheros AR71XX/AR724X/AR913X GPIO support"
+ default y if ATH79
+ depends on ATH79 || COMPILE_TEST
+ select GPIO_GENERIC
+ select GPIOLIB_IRQCHIP
+ help
+ Select this option to enable GPIO driver for
+ Atheros AR71XX/AR724X/AR913X SoC devices.
+
config GPIO_BCM_KONA
bool "Broadcom Kona GPIO"
depends on OF_GPIO && (ARCH_BCM_MOBILE || COMPILE_TEST)
@@ -256,10 +266,17 @@ config GPIO_LYNXPOINT
config GPIO_MB86S7X
bool "GPIO support for Fujitsu MB86S7x Platforms"
- depends on ARCH_MB86S7X
+ depends on ARCH_MB86S7X || COMPILE_TEST
help
Say yes here to support the GPIO controller in Fujitsu MB86S70 SoCs.
+config GPIO_MENZ127
+ tristate "MEN 16Z127 GPIO support"
+ depends on MCB
+ select GPIO_GENERIC
+ help
+ Say yes here to support the MEN 16Z127 GPIO Controller
+
config GPIO_MM_LANTIQ
bool "Lantiq Memory mapped GPIOs"
depends on LANTIQ && SOC_XWAY
@@ -270,7 +287,7 @@ config GPIO_MM_LANTIQ
config GPIO_MOXART
bool "MOXART GPIO support"
- depends on ARCH_MOXART
+ depends on ARCH_MOXART || COMPILE_TEST
select GPIO_GENERIC
help
Select this option to enable GPIO driver for
@@ -281,12 +298,14 @@ config GPIO_MPC5200
depends on PPC_MPC52xx
config GPIO_MPC8XXX
- bool "MPC512x/MPC8xxx GPIO support"
+ bool "MPC512x/MPC8xxx/QorIQ GPIO support"
depends on PPC_MPC512x || PPC_MPC831x || PPC_MPC834x || PPC_MPC837x || \
- FSL_SOC_BOOKE || PPC_86xx
+ FSL_SOC_BOOKE || PPC_86xx || ARCH_LAYERSCAPE || ARM || \
+ COMPILE_TEST
+ select GPIO_GENERIC
help
Say Y here if you're going to use hardware that connects to the
- MPC512x/831x/834x/837x/8572/8610 GPIOs.
+ MPC512x/831x/834x/837x/8572/8610/QorIQ GPIOs.
config GPIO_MVEBU
def_bool y
@@ -339,7 +358,7 @@ config GPIO_PXA
config GPIO_RCAR
tristate "Renesas R-Car GPIO"
- depends on ARCH_SHMOBILE || COMPILE_TEST
+ depends on ARCH_RENESAS || COMPILE_TEST
select GPIOLIB_IRQCHIP
help
Say yes here to support GPIO on Renesas R-Car SoCs.
@@ -380,6 +399,14 @@ config GPIO_TB10X
select GENERIC_IRQ_CHIP
select OF_GPIO
+config GPIO_TS4800
+ tristate "TS-4800 DIO blocks and compatibles"
+ depends on OF_GPIO
+ depends on SOC_IMX51 || COMPILE_TEST
+ select GPIO_GENERIC
+ help
+ This driver support TS-4800 FPGA GPIO controllers.
+
config GPIO_TZ1090
bool "Toumaz Xenif TZ1090 GPIO support"
depends on SOC_TZ1090
@@ -433,6 +460,7 @@ config GPIO_XGENE_SB
tristate "APM X-Gene GPIO standby controller support"
depends on ARCH_XGENE && OF_GPIO
select GPIO_GENERIC
+ select GPIOLIB_IRQCHIP
help
This driver supports the GPIO block within the APM X-Gene
Standby Domain. Say yes here to enable the GPIO functionality.
@@ -487,6 +515,15 @@ endmenu
menu "Port-mapped I/O GPIO drivers"
depends on X86 # Unconditional I/O space access
+config GPIO_104_DIO_48E
+ tristate "ACCES 104-DIO-48E GPIO support"
+ select GPIOLIB_IRQCHIP
+ help
+ Enables GPIO support for the ACCES 104-DIO-48E family. The base port
+ address for the device may be configured via the dio_48e_base module
+ parameter. The interrupt line number for the device may be configured
+ via the dio_48e_irq module parameter.
+
config GPIO_104_IDIO_16
tristate "ACCES 104-IDIO-16 GPIO support"
select GPIOLIB_IRQCHIP
@@ -506,10 +543,10 @@ config GPIO_104_IDI_48
via the idi_48_irq module parameter.
config GPIO_F7188X
- tristate "F71869, F71869A, F71882FG and F71889F GPIO support"
+ tristate "F71869, F71869A, F71882FG, F71889F and F81866 GPIO support"
help
This option enables support for GPIOs found on Fintek Super-I/O
- chips F71869, F71869A, F71882FG and F71889F.
+ chips F71869, F71869A, F71882FG, F71889F and F81866.
To compile this driver as a module, choose M here: the module will
be called f7188x-gpio.
@@ -570,6 +607,15 @@ config GPIO_TS5500
blocks of the TS-5500: DIO1, DIO2 and the LCD port, and the TS-5600
LCD port.
+config GPIO_WS16C48
+ tristate "WinSystems WS16C48 GPIO support"
+ select GPIOLIB_IRQCHIP
+ help
+ Enables GPIO support for the WinSystems WS16C48. The base port address
+ for the device may be configured via the ws16c48_base module
+ parameter. The interrupt line number for the device may be configured
+ via the ws16c48_irq module parameter.
+
endmenu
menu "I2C GPIO expanders"
@@ -702,6 +748,14 @@ config GPIO_SX150X
8 bits: sx1508q
16 bits: sx1509q
+config GPIO_TPIC2810
+ tristate "TPIC2810 8-Bit I2C GPO expander"
+ help
+ Say yes here to enable the GPO driver for the TI TPIC2810 chip.
+
+ To compile this driver as a module, choose M here: the module will
+ be called gpio-tpic2810.
+
endmenu
menu "MFD GPIO expanders"
@@ -844,6 +898,19 @@ config GPIO_TIMBERDALE
---help---
Add support for the GPIO IP in the timberdale FPGA.
+config GPIO_TPS65086
+ tristate "TI TPS65086 GPO"
+ depends on MFD_TPS65086
+ help
+ This driver supports the GPO on TI TPS65086x PMICs.
+
+config GPIO_TPS65218
+ tristate "TPS65218 GPIO"
+ depends on MFD_TPS65218
+ help
+ Select this option to enable GPIO driver for the TPS65218
+ chip family.
+
config GPIO_TPS6586X
bool "TPS6586X GPIO"
depends on MFD_TPS6586X
@@ -860,7 +927,7 @@ config GPIO_TPS65910
config GPIO_TPS65912
tristate "TI TPS65912 GPIO"
- depends on (MFD_TPS65912_I2C || MFD_TPS65912_SPI)
+ depends on MFD_TPS65912
help
This driver supports TPS65912 gpio chip
@@ -1011,6 +1078,12 @@ config GPIO_MC33880
SPI driver for Freescale MC33880 high-side/low-side switch.
This provides GPIO interface supporting inputs and outputs.
+config GPIO_PISOSR
+ tristate "Generic parallel-in/serial-out shift register"
+ help
+ GPIO driver for SPI compatible parallel-in/serial-out shift
+ registers. These are input only devices.
+
endmenu
menu "SPI or I2C GPIO expanders"
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index ece7d7cbdc80..1e0b74f3b1ed 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -12,6 +12,7 @@ obj-$(CONFIG_GPIO_ACPI) += gpiolib-acpi.o
# Device drivers. Generally keep list sorted alphabetically
obj-$(CONFIG_GPIO_GENERIC) += gpio-generic.o
+obj-$(CONFIG_GPIO_104_DIO_48E) += gpio-104-dio-48e.o
obj-$(CONFIG_GPIO_104_IDIO_16) += gpio-104-idio-16.o
obj-$(CONFIG_GPIO_104_IDI_48) += gpio-104-idi-48.o
obj-$(CONFIG_GPIO_74X164) += gpio-74x164.o
@@ -23,7 +24,7 @@ obj-$(CONFIG_GPIO_ALTERA) += gpio-altera.o
obj-$(CONFIG_GPIO_AMD8111) += gpio-amd8111.o
obj-$(CONFIG_GPIO_AMDPT) += gpio-amdpt.o
obj-$(CONFIG_GPIO_ARIZONA) += gpio-arizona.o
-obj-$(CONFIG_ATH79) += gpio-ath79.o
+obj-$(CONFIG_GPIO_ATH79) += gpio-ath79.o
obj-$(CONFIG_GPIO_BCM_KONA) += gpio-bcm-kona.o
obj-$(CONFIG_GPIO_BRCMSTB) += gpio-brcmstb.o
obj-$(CONFIG_GPIO_BT8XX) += gpio-bt8xx.o
@@ -58,6 +59,7 @@ obj-$(CONFIG_GPIO_MAX7300) += gpio-max7300.o
obj-$(CONFIG_GPIO_MAX7301) += gpio-max7301.o
obj-$(CONFIG_GPIO_MAX732X) += gpio-max732x.o
obj-$(CONFIG_GPIO_MB86S7X) += gpio-mb86s7x.o
+obj-$(CONFIG_GPIO_MENZ127) += gpio-menz127.o
obj-$(CONFIG_GPIO_MC33880) += gpio-mc33880.o
obj-$(CONFIG_GPIO_MC9S08DZ60) += gpio-mc9s08dz60.o
obj-$(CONFIG_GPIO_MCP23S08) += gpio-mcp23s08.o
@@ -75,6 +77,7 @@ obj-$(CONFIG_GPIO_OMAP) += gpio-omap.o
obj-$(CONFIG_GPIO_PCA953X) += gpio-pca953x.o
obj-$(CONFIG_GPIO_PCF857X) += gpio-pcf857x.o
obj-$(CONFIG_GPIO_PCH) += gpio-pch.o
+obj-$(CONFIG_GPIO_PISOSR) += gpio-pisosr.o
obj-$(CONFIG_GPIO_PL061) += gpio-pl061.o
obj-$(CONFIG_GPIO_PXA) += gpio-pxa.o
obj-$(CONFIG_GPIO_RC5T583) += gpio-rc5t583.o
@@ -95,9 +98,13 @@ obj-$(CONFIG_GPIO_TC3589X) += gpio-tc3589x.o
obj-$(CONFIG_ARCH_TEGRA) += gpio-tegra.o
obj-$(CONFIG_GPIO_TIMBERDALE) += gpio-timberdale.o
obj-$(CONFIG_GPIO_PALMAS) += gpio-palmas.o
+obj-$(CONFIG_GPIO_TPIC2810) += gpio-tpic2810.o
+obj-$(CONFIG_GPIO_TPS65086) += gpio-tps65086.o
+obj-$(CONFIG_GPIO_TPS65218) += gpio-tps65218.o
obj-$(CONFIG_GPIO_TPS6586X) += gpio-tps6586x.o
obj-$(CONFIG_GPIO_TPS65910) += gpio-tps65910.o
obj-$(CONFIG_GPIO_TPS65912) += gpio-tps65912.o
+obj-$(CONFIG_GPIO_TS4800) += gpio-ts4800.o
obj-$(CONFIG_GPIO_TS5500) += gpio-ts5500.o
obj-$(CONFIG_GPIO_TWL4030) += gpio-twl4030.o
obj-$(CONFIG_GPIO_TWL6040) += gpio-twl6040.o
@@ -111,6 +118,7 @@ obj-$(CONFIG_GPIO_VX855) += gpio-vx855.o
obj-$(CONFIG_GPIO_WM831X) += gpio-wm831x.o
obj-$(CONFIG_GPIO_WM8350) += gpio-wm8350.o
obj-$(CONFIG_GPIO_WM8994) += gpio-wm8994.o
+obj-$(CONFIG_GPIO_WS16C48) += gpio-ws16c48.o
obj-$(CONFIG_GPIO_XGENE) += gpio-xgene.o
obj-$(CONFIG_GPIO_XGENE_SB) += gpio-xgene-sb.o
obj-$(CONFIG_GPIO_XILINX) += gpio-xilinx.o
diff --git a/drivers/gpio/devres.c b/drivers/gpio/devres.c
index 903fcf4d04a0..b760cbbb41d8 100644
--- a/drivers/gpio/devres.c
+++ b/drivers/gpio/devres.c
@@ -155,7 +155,7 @@ struct gpio_desc *devm_get_gpiod_from_child(struct device *dev,
suffixes[i]);
desc = fwnode_get_named_gpiod(child, prop_name);
- if (!IS_ERR(desc) || (PTR_ERR(desc) == -EPROBE_DEFER))
+ if (!IS_ERR(desc) || (PTR_ERR(desc) != -ENOENT))
break;
}
if (IS_ERR(desc)) {
diff --git a/drivers/gpio/gpio-104-dio-48e.c b/drivers/gpio/gpio-104-dio-48e.c
new file mode 100644
index 000000000000..448a903089ef
--- /dev/null
+++ b/drivers/gpio/gpio-104-dio-48e.c
@@ -0,0 +1,430 @@
+/*
+ * GPIO driver for the ACCES 104-DIO-48E
+ * Copyright (C) 2016 William Breathitt Gray
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+#include <linux/bitops.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/gpio/driver.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/interrupt.h>
+#include <linux/irqdesc.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+
+static unsigned dio_48e_base;
+module_param(dio_48e_base, uint, 0);
+MODULE_PARM_DESC(dio_48e_base, "ACCES 104-DIO-48E base address");
+static unsigned dio_48e_irq;
+module_param(dio_48e_irq, uint, 0);
+MODULE_PARM_DESC(dio_48e_irq, "ACCES 104-DIO-48E interrupt line number");
+
+/**
+ * struct dio48e_gpio - GPIO device private data structure
+ * @chip: instance of the gpio_chip
+ * @io_state: bit I/O state (whether bit is set to input or output)
+ * @out_state: output bits state
+ * @control: Control registers state
+ * @lock: synchronization lock to prevent I/O race conditions
+ * @base: base port address of the GPIO device
+ * @irq: Interrupt line number
+ * @irq_mask: I/O bits affected by interrupts
+ */
+struct dio48e_gpio {
+ struct gpio_chip chip;
+ unsigned char io_state[6];
+ unsigned char out_state[6];
+ unsigned char control[2];
+ spinlock_t lock;
+ unsigned base;
+ unsigned irq;
+ unsigned char irq_mask;
+};
+
+static int dio48e_gpio_get_direction(struct gpio_chip *chip, unsigned offset)
+{
+ struct dio48e_gpio *const dio48egpio = gpiochip_get_data(chip);
+ const unsigned port = offset / 8;
+ const unsigned mask = BIT(offset % 8);
+
+ return !!(dio48egpio->io_state[port] & mask);
+}
+
+static int dio48e_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
+{
+ struct dio48e_gpio *const dio48egpio = gpiochip_get_data(chip);
+ const unsigned io_port = offset / 8;
+ const unsigned control_port = io_port / 2;
+ const unsigned control_addr = dio48egpio->base + 3 + control_port*4;
+ unsigned long flags;
+ unsigned control;
+
+ spin_lock_irqsave(&dio48egpio->lock, flags);
+
+ /* Check if configuring Port C */
+ if (io_port == 2 || io_port == 5) {
+ /* Port C can be configured by nibble */
+ if (offset % 8 > 3) {
+ dio48egpio->io_state[io_port] |= 0xF0;
+ dio48egpio->control[control_port] |= BIT(3);
+ } else {
+ dio48egpio->io_state[io_port] |= 0x0F;
+ dio48egpio->control[control_port] |= BIT(0);
+ }
+ } else {
+ dio48egpio->io_state[io_port] |= 0xFF;
+ if (io_port == 0 || io_port == 3)
+ dio48egpio->control[control_port] |= BIT(4);
+ else
+ dio48egpio->control[control_port] |= BIT(1);
+ }
+
+ control = BIT(7) | dio48egpio->control[control_port];
+ outb(control, control_addr);
+ control &= ~BIT(7);
+ outb(control, control_addr);
+
+ spin_unlock_irqrestore(&dio48egpio->lock, flags);
+
+ return 0;
+}
+
+static int dio48e_gpio_direction_output(struct gpio_chip *chip, unsigned offset,
+ int value)
+{
+ struct dio48e_gpio *const dio48egpio = gpiochip_get_data(chip);
+ const unsigned io_port = offset / 8;
+ const unsigned control_port = io_port / 2;
+ const unsigned mask = BIT(offset % 8);
+ const unsigned control_addr = dio48egpio->base + 3 + control_port*4;
+ const unsigned out_port = (io_port > 2) ? io_port + 1 : io_port;
+ unsigned long flags;
+ unsigned control;
+
+ spin_lock_irqsave(&dio48egpio->lock, flags);
+
+ /* Check if configuring Port C */
+ if (io_port == 2 || io_port == 5) {
+ /* Port C can be configured by nibble */
+ if (offset % 8 > 3) {
+ dio48egpio->io_state[io_port] &= 0x0F;
+ dio48egpio->control[control_port] &= ~BIT(3);
+ } else {
+ dio48egpio->io_state[io_port] &= 0xF0;
+ dio48egpio->control[control_port] &= ~BIT(0);
+ }
+ } else {
+ dio48egpio->io_state[io_port] &= 0x00;
+ if (io_port == 0 || io_port == 3)
+ dio48egpio->control[control_port] &= ~BIT(4);
+ else
+ dio48egpio->control[control_port] &= ~BIT(1);
+ }
+
+ if (value)
+ dio48egpio->out_state[io_port] |= mask;
+ else
+ dio48egpio->out_state[io_port] &= ~mask;
+
+ control = BIT(7) | dio48egpio->control[control_port];
+ outb(control, control_addr);
+
+ outb(dio48egpio->out_state[io_port], dio48egpio->base + out_port);
+
+ control &= ~BIT(7);
+ outb(control, control_addr);
+
+ spin_unlock_irqrestore(&dio48egpio->lock, flags);
+
+ return 0;
+}
+
+static int dio48e_gpio_get(struct gpio_chip *chip, unsigned offset)
+{
+ struct dio48e_gpio *const dio48egpio = gpiochip_get_data(chip);
+ const unsigned port = offset / 8;
+ const unsigned mask = BIT(offset % 8);
+ const unsigned in_port = (port > 2) ? port + 1 : port;
+ unsigned long flags;
+ unsigned port_state;
+
+ spin_lock_irqsave(&dio48egpio->lock, flags);
+
+ /* ensure that GPIO is set for input */
+ if (!(dio48egpio->io_state[port] & mask)) {
+ spin_unlock_irqrestore(&dio48egpio->lock, flags);
+ return -EINVAL;
+ }
+
+ port_state = inb(dio48egpio->base + in_port);
+
+ spin_unlock_irqrestore(&dio48egpio->lock, flags);
+
+ return !!(port_state & mask);
+}
+
+static void dio48e_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
+{
+ struct dio48e_gpio *const dio48egpio = gpiochip_get_data(chip);
+ const unsigned port = offset / 8;
+ const unsigned mask = BIT(offset % 8);
+ const unsigned out_port = (port > 2) ? port + 1 : port;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dio48egpio->lock, flags);
+
+ if (value)
+ dio48egpio->out_state[port] |= mask;
+ else
+ dio48egpio->out_state[port] &= ~mask;
+
+ outb(dio48egpio->out_state[port], dio48egpio->base + out_port);
+
+ spin_unlock_irqrestore(&dio48egpio->lock, flags);
+}
+
+static void dio48e_irq_ack(struct irq_data *data)
+{
+}
+
+static void dio48e_irq_mask(struct irq_data *data)
+{
+ struct gpio_chip *chip = irq_data_get_irq_chip_data(data);
+ struct dio48e_gpio *const dio48egpio = gpiochip_get_data(chip);
+ const unsigned long offset = irqd_to_hwirq(data);
+ unsigned long flags;
+
+ /* only bit 3 on each respective Port C supports interrupts */
+ if (offset != 19 && offset != 43)
+ return;
+
+ spin_lock_irqsave(&dio48egpio->lock, flags);
+
+ if (offset == 19)
+ dio48egpio->irq_mask &= ~BIT(0);
+ else
+ dio48egpio->irq_mask &= ~BIT(1);
+
+ if (!dio48egpio->irq_mask)
+ /* disable interrupts */
+ inb(dio48egpio->base + 0xB);
+
+ spin_unlock_irqrestore(&dio48egpio->lock, flags);
+}
+
+static void dio48e_irq_unmask(struct irq_data *data)
+{
+ struct gpio_chip *chip = irq_data_get_irq_chip_data(data);
+ struct dio48e_gpio *const dio48egpio = gpiochip_get_data(chip);
+ const unsigned long offset = irqd_to_hwirq(data);
+ unsigned long flags;
+
+ /* only bit 3 on each respective Port C supports interrupts */
+ if (offset != 19 && offset != 43)
+ return;
+
+ spin_lock_irqsave(&dio48egpio->lock, flags);
+
+ if (!dio48egpio->irq_mask) {
+ /* enable interrupts */
+ outb(0x00, dio48egpio->base + 0xF);
+ outb(0x00, dio48egpio->base + 0xB);
+ }
+
+ if (offset == 19)
+ dio48egpio->irq_mask |= BIT(0);
+ else
+ dio48egpio->irq_mask |= BIT(1);
+
+ spin_unlock_irqrestore(&dio48egpio->lock, flags);
+}
+
+static int dio48e_irq_set_type(struct irq_data *data, unsigned flow_type)
+{
+ const unsigned long offset = irqd_to_hwirq(data);
+
+ /* only bit 3 on each respective Port C supports interrupts */
+ if (offset != 19 && offset != 43)
+ return -EINVAL;
+
+ if (flow_type != IRQ_TYPE_NONE && flow_type != IRQ_TYPE_EDGE_RISING)
+ return -EINVAL;
+
+ return 0;
+}
+
+static struct irq_chip dio48e_irqchip = {
+ .name = "104-dio-48e",
+ .irq_ack = dio48e_irq_ack,
+ .irq_mask = dio48e_irq_mask,
+ .irq_unmask = dio48e_irq_unmask,
+ .irq_set_type = dio48e_irq_set_type
+};
+
+static irqreturn_t dio48e_irq_handler(int irq, void *dev_id)
+{
+ struct dio48e_gpio *const dio48egpio = dev_id;
+ struct gpio_chip *const chip = &dio48egpio->chip;
+ const unsigned long irq_mask = dio48egpio->irq_mask;
+ unsigned long gpio;
+
+ for_each_set_bit(gpio, &irq_mask, 2)
+ generic_handle_irq(irq_find_mapping(chip->irqdomain,
+ 19 + gpio*24));
+
+ spin_lock(&dio48egpio->lock);
+
+ outb(0x00, dio48egpio->base + 0xF);
+
+ spin_unlock(&dio48egpio->lock);
+
+ return IRQ_HANDLED;
+}
+
+static int __init dio48e_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct dio48e_gpio *dio48egpio;
+ const unsigned base = dio_48e_base;
+ const unsigned extent = 16;
+ const char *const name = dev_name(dev);
+ int err;
+ const unsigned irq = dio_48e_irq;
+
+ dio48egpio = devm_kzalloc(dev, sizeof(*dio48egpio), GFP_KERNEL);
+ if (!dio48egpio)
+ return -ENOMEM;
+
+ if (!devm_request_region(dev, base, extent, name)) {
+ dev_err(dev, "Unable to lock port addresses (0x%X-0x%X)\n",
+ base, base + extent);
+ return -EBUSY;
+ }
+
+ dio48egpio->chip.label = name;
+ dio48egpio->chip.parent = dev;
+ dio48egpio->chip.owner = THIS_MODULE;
+ dio48egpio->chip.base = -1;
+ dio48egpio->chip.ngpio = 48;
+ dio48egpio->chip.get_direction = dio48e_gpio_get_direction;
+ dio48egpio->chip.direction_input = dio48e_gpio_direction_input;
+ dio48egpio->chip.direction_output = dio48e_gpio_direction_output;
+ dio48egpio->chip.get = dio48e_gpio_get;
+ dio48egpio->chip.set = dio48e_gpio_set;
+ dio48egpio->base = base;
+ dio48egpio->irq = irq;
+
+ spin_lock_init(&dio48egpio->lock);
+
+ dev_set_drvdata(dev, dio48egpio);
+
+ err = gpiochip_add_data(&dio48egpio->chip, dio48egpio);
+ if (err) {
+ dev_err(dev, "GPIO registering failed (%d)\n", err);
+ return err;
+ }
+
+ /* initialize all GPIO as output */
+ outb(0x80, base + 3);
+ outb(0x00, base);
+ outb(0x00, base + 1);
+ outb(0x00, base + 2);
+ outb(0x00, base + 3);
+ outb(0x80, base + 7);
+ outb(0x00, base + 4);
+ outb(0x00, base + 5);
+ outb(0x00, base + 6);
+ outb(0x00, base + 7);
+
+ /* disable IRQ by default */
+ inb(base + 0xB);
+
+ err = gpiochip_irqchip_add(&dio48egpio->chip, &dio48e_irqchip, 0,
+ handle_edge_irq, IRQ_TYPE_NONE);
+ if (err) {
+ dev_err(dev, "Could not add irqchip (%d)\n", err);
+ goto err_gpiochip_remove;
+ }
+
+ err = request_irq(irq, dio48e_irq_handler, 0, name, dio48egpio);
+ if (err) {
+ dev_err(dev, "IRQ handler registering failed (%d)\n", err);
+ goto err_gpiochip_remove;
+ }
+
+ return 0;
+
+err_gpiochip_remove:
+ gpiochip_remove(&dio48egpio->chip);
+ return err;
+}
+
+static int dio48e_remove(struct platform_device *pdev)
+{
+ struct dio48e_gpio *const dio48egpio = platform_get_drvdata(pdev);
+
+ free_irq(dio48egpio->irq, dio48egpio);
+ gpiochip_remove(&dio48egpio->chip);
+
+ return 0;
+}
+
+static struct platform_device *dio48e_device;
+
+static struct platform_driver dio48e_driver = {
+ .driver = {
+ .name = "104-dio-48e"
+ },
+ .remove = dio48e_remove
+};
+
+static void __exit dio48e_exit(void)
+{
+ platform_device_unregister(dio48e_device);
+ platform_driver_unregister(&dio48e_driver);
+}
+
+static int __init dio48e_init(void)
+{
+ int err;
+
+ dio48e_device = platform_device_alloc(dio48e_driver.driver.name, -1);
+ if (!dio48e_device)
+ return -ENOMEM;
+
+ err = platform_device_add(dio48e_device);
+ if (err)
+ goto err_platform_device;
+
+ err = platform_driver_probe(&dio48e_driver, dio48e_probe);
+ if (err)
+ goto err_platform_driver;
+
+ return 0;
+
+err_platform_driver:
+ platform_device_del(dio48e_device);
+err_platform_device:
+ platform_device_put(dio48e_device);
+ return err;
+}
+
+module_init(dio48e_init);
+module_exit(dio48e_exit);
+
+MODULE_AUTHOR("William Breathitt Gray <vilhelm.gray@gmail.com>");
+MODULE_DESCRIPTION("ACCES 104-DIO-48E GPIO driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpio/gpio-104-idi-48.c b/drivers/gpio/gpio-104-idi-48.c
index 52eed328ce99..e37cd4cdda35 100644
--- a/drivers/gpio/gpio-104-idi-48.c
+++ b/drivers/gpio/gpio-104-idi-48.c
@@ -39,7 +39,6 @@ MODULE_PARM_DESC(idi_48_irq, "ACCES 104-IDI-48 interrupt line number");
* @ack_lock: synchronization lock to prevent IRQ handler race conditions
* @irq_mask: input bits affected by interrupts
* @base: base port address of the GPIO device
- * @extent: extent of port address region of the GPIO device
* @irq: Interrupt line number
* @cos_enb: Change-Of-State IRQ enable boundaries mask
*/
@@ -49,7 +48,6 @@ struct idi_48_gpio {
spinlock_t ack_lock;
unsigned char irq_mask[6];
unsigned base;
- unsigned extent;
unsigned irq;
unsigned char cos_enb;
};
@@ -227,11 +225,10 @@ static int __init idi_48_probe(struct platform_device *pdev)
if (!idi48gpio)
return -ENOMEM;
- if (!request_region(base, extent, name)) {
- dev_err(dev, "Unable to lock %s port addresses (0x%X-0x%X)\n",
- name, base, base + extent);
- err = -EBUSY;
- goto err_lock_io_port;
+ if (!devm_request_region(dev, base, extent, name)) {
+ dev_err(dev, "Unable to lock port addresses (0x%X-0x%X)\n",
+ base, base + extent);
+ return -EBUSY;
}
idi48gpio->chip.label = name;
@@ -243,7 +240,6 @@ static int __init idi_48_probe(struct platform_device *pdev)
idi48gpio->chip.direction_input = idi_48_gpio_direction_input;
idi48gpio->chip.get = idi_48_gpio_get;
idi48gpio->base = base;
- idi48gpio->extent = extent;
idi48gpio->irq = irq;
spin_lock_init(&idi48gpio->lock);
@@ -253,7 +249,7 @@ static int __init idi_48_probe(struct platform_device *pdev)
err = gpiochip_add_data(&idi48gpio->chip, idi48gpio);
if (err) {
dev_err(dev, "GPIO registering failed (%d)\n", err);
- goto err_gpio_register;
+ return err;
}
/* Disable IRQ by default */
@@ -264,23 +260,20 @@ static int __init idi_48_probe(struct platform_device *pdev)
handle_edge_irq, IRQ_TYPE_NONE);
if (err) {
dev_err(dev, "Could not add irqchip (%d)\n", err);
- goto err_gpiochip_irqchip_add;
+ goto err_gpiochip_remove;
}
- err = request_irq(irq, idi_48_irq_handler, 0, name, idi48gpio);
+ err = request_irq(irq, idi_48_irq_handler, IRQF_SHARED, name,
+ idi48gpio);
if (err) {
dev_err(dev, "IRQ handler registering failed (%d)\n", err);
- goto err_request_irq;
+ goto err_gpiochip_remove;
}
return 0;
-err_request_irq:
-err_gpiochip_irqchip_add:
+err_gpiochip_remove:
gpiochip_remove(&idi48gpio->chip);
-err_gpio_register:
- release_region(base, extent);
-err_lock_io_port:
return err;
}
@@ -290,7 +283,6 @@ static int idi_48_remove(struct platform_device *pdev)
free_irq(idi48gpio->irq, idi48gpio);
gpiochip_remove(&idi48gpio->chip);
- release_region(idi48gpio->base, idi48gpio->extent);
return 0;
}
@@ -340,4 +332,4 @@ module_exit(idi_48_exit);
MODULE_AUTHOR("William Breathitt Gray <vilhelm.gray@gmail.com>");
MODULE_DESCRIPTION("ACCES 104-IDI-48 GPIO driver");
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpio/gpio-104-idio-16.c b/drivers/gpio/gpio-104-idio-16.c
index 4d69b50b2d84..ecc85fe9323d 100644
--- a/drivers/gpio/gpio-104-idio-16.c
+++ b/drivers/gpio/gpio-104-idio-16.c
@@ -38,7 +38,6 @@ MODULE_PARM_DESC(idio_16_irq, "ACCES 104-IDIO-16 interrupt line number");
* @lock: synchronization lock to prevent I/O race conditions
* @irq_mask: I/O bits affected by interrupts
* @base: base port address of the GPIO device
- * @extent: extent of port address region of the GPIO device
* @irq: Interrupt line number
* @out_state: output bits state
*/
@@ -47,7 +46,6 @@ struct idio_16_gpio {
spinlock_t lock;
unsigned long irq_mask;
unsigned base;
- unsigned extent;
unsigned irq;
unsigned out_state;
};
@@ -201,11 +199,10 @@ static int __init idio_16_probe(struct platform_device *pdev)
if (!idio16gpio)
return -ENOMEM;
- if (!request_region(base, extent, name)) {
- dev_err(dev, "Unable to lock %s port addresses (0x%X-0x%X)\n",
- name, base, base + extent);
- err = -EBUSY;
- goto err_lock_io_port;
+ if (!devm_request_region(dev, base, extent, name)) {
+ dev_err(dev, "Unable to lock port addresses (0x%X-0x%X)\n",
+ base, base + extent);
+ return -EBUSY;
}
idio16gpio->chip.label = name;
@@ -219,7 +216,6 @@ static int __init idio_16_probe(struct platform_device *pdev)
idio16gpio->chip.get = idio_16_gpio_get;
idio16gpio->chip.set = idio_16_gpio_set;
idio16gpio->base = base;
- idio16gpio->extent = extent;
idio16gpio->irq = irq;
idio16gpio->out_state = 0xFFFF;
@@ -230,7 +226,7 @@ static int __init idio_16_probe(struct platform_device *pdev)
err = gpiochip_add_data(&idio16gpio->chip, idio16gpio);
if (err) {
dev_err(dev, "GPIO registering failed (%d)\n", err);
- goto err_gpio_register;
+ return err;
}
/* Disable IRQ by default */
@@ -241,23 +237,19 @@ static int __init idio_16_probe(struct platform_device *pdev)
handle_edge_irq, IRQ_TYPE_NONE);
if (err) {
dev_err(dev, "Could not add irqchip (%d)\n", err);
- goto err_gpiochip_irqchip_add;
+ goto err_gpiochip_remove;
}
err = request_irq(irq, idio_16_irq_handler, 0, name, idio16gpio);
if (err) {
dev_err(dev, "IRQ handler registering failed (%d)\n", err);
- goto err_request_irq;
+ goto err_gpiochip_remove;
}
return 0;
-err_request_irq:
-err_gpiochip_irqchip_add:
+err_gpiochip_remove:
gpiochip_remove(&idio16gpio->chip);
-err_gpio_register:
- release_region(base, extent);
-err_lock_io_port:
return err;
}
@@ -267,7 +259,6 @@ static int idio_16_remove(struct platform_device *pdev)
free_irq(idio16gpio->irq, idio16gpio);
gpiochip_remove(&idio16gpio->chip);
- release_region(idio16gpio->base, idio16gpio->extent);
return 0;
}
@@ -317,4 +308,4 @@ module_exit(idio_16_exit);
MODULE_AUTHOR("William Breathitt Gray <vilhelm.gray@gmail.com>");
MODULE_DESCRIPTION("ACCES 104-IDIO-16 GPIO driver");
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpio/gpio-74xx-mmio.c b/drivers/gpio/gpio-74xx-mmio.c
index 372b0e01adc6..0475e8ec96d0 100644
--- a/drivers/gpio/gpio-74xx-mmio.c
+++ b/drivers/gpio/gpio-74xx-mmio.c
@@ -140,15 +140,7 @@ static int mmio_74xx_gpio_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, priv);
- return gpiochip_add_data(&priv->gc, priv);
-}
-
-static int mmio_74xx_gpio_remove(struct platform_device *pdev)
-{
- struct mmio_74xx_gpio_priv *priv = platform_get_drvdata(pdev);
-
- gpiochip_remove(&priv->gc);
- return 0;
+ return devm_gpiochip_add_data(&pdev->dev, &priv->gc, priv);
}
static struct platform_driver mmio_74xx_gpio_driver = {
@@ -157,7 +149,6 @@ static struct platform_driver mmio_74xx_gpio_driver = {
.of_match_table = mmio_74xx_gpio_ids,
},
.probe = mmio_74xx_gpio_probe,
- .remove = mmio_74xx_gpio_remove,
};
module_platform_driver(mmio_74xx_gpio_driver);
diff --git a/drivers/gpio/gpio-adnp.c b/drivers/gpio/gpio-adnp.c
index fb5b47b69f14..8ff7b0d3eac6 100644
--- a/drivers/gpio/gpio-adnp.c
+++ b/drivers/gpio/gpio-adnp.c
@@ -265,7 +265,7 @@ static int adnp_gpio_setup(struct adnp *adnp, unsigned int num_gpios)
chip->of_node = chip->parent->of_node;
chip->owner = THIS_MODULE;
- err = gpiochip_add_data(chip, adnp);
+ err = devm_gpiochip_add_data(&adnp->client->dev, chip, adnp);
if (err)
return err;
@@ -520,14 +520,6 @@ static int adnp_i2c_probe(struct i2c_client *client,
return 0;
}
-static int adnp_i2c_remove(struct i2c_client *client)
-{
- struct adnp *adnp = i2c_get_clientdata(client);
-
- gpiochip_remove(&adnp->gpio);
- return 0;
-}
-
static const struct i2c_device_id adnp_i2c_id[] = {
{ "gpio-adnp" },
{ },
@@ -546,7 +538,6 @@ static struct i2c_driver adnp_i2c_driver = {
.of_match_table = adnp_of_match,
},
.probe = adnp_i2c_probe,
- .remove = adnp_i2c_remove,
.id_table = adnp_i2c_id,
};
module_i2c_driver(adnp_i2c_driver);
diff --git a/drivers/gpio/gpio-adp5520.c b/drivers/gpio/gpio-adp5520.c
index 4fa7ff1fec9a..abf199609546 100644
--- a/drivers/gpio/gpio-adp5520.c
+++ b/drivers/gpio/gpio-adp5520.c
@@ -153,7 +153,7 @@ static int adp5520_gpio_probe(struct platform_device *pdev)
goto err;
}
- ret = gpiochip_add_data(&dev->gpio_chip, dev);
+ ret = devm_gpiochip_add_data(&pdev->dev, &dev->gpio_chip, dev);
if (ret)
goto err;
@@ -164,22 +164,11 @@ err:
return ret;
}
-static int adp5520_gpio_remove(struct platform_device *pdev)
-{
- struct adp5520_gpio *dev;
-
- dev = platform_get_drvdata(pdev);
- gpiochip_remove(&dev->gpio_chip);
-
- return 0;
-}
-
static struct platform_driver adp5520_gpio_driver = {
.driver = {
.name = "adp5520-gpio",
},
.probe = adp5520_gpio_probe,
- .remove = adp5520_gpio_remove,
};
module_platform_driver(adp5520_gpio_driver);
diff --git a/drivers/gpio/gpio-adp5588.c b/drivers/gpio/gpio-adp5588.c
index 19a0eba1e942..c0f718b12317 100644
--- a/drivers/gpio/gpio-adp5588.c
+++ b/drivers/gpio/gpio-adp5588.c
@@ -414,7 +414,7 @@ static int adp5588_gpio_probe(struct i2c_client *client,
}
}
- ret = gpiochip_add_data(&dev->gpio_chip, dev);
+ ret = devm_gpiochip_add_data(&client->dev, &dev->gpio_chip, dev);
if (ret)
goto err_irq;
@@ -457,8 +457,6 @@ static int adp5588_gpio_remove(struct i2c_client *client)
if (dev->irq_base)
free_irq(dev->client->irq, dev);
- gpiochip_remove(&dev->gpio_chip);
-
return 0;
}
diff --git a/drivers/gpio/gpio-amd8111.c b/drivers/gpio/gpio-amd8111.c
index c7040fffc5b4..30ad7d7c1678 100644
--- a/drivers/gpio/gpio-amd8111.c
+++ b/drivers/gpio/gpio-amd8111.c
@@ -25,6 +25,7 @@
* License version 2. This program is licensed "as is" without any
* warranty of any kind, whether express or implied.
*/
+#include <linux/ioport.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/gpio.h>
@@ -204,7 +205,8 @@ found:
gp.pmbase &= 0x0000FF00;
if (gp.pmbase == 0)
goto out;
- if (!request_region(gp.pmbase + PMBASE_OFFSET, PMBASE_SIZE, "AMD GPIO")) {
+ if (!devm_request_region(&pdev->dev, gp.pmbase + PMBASE_OFFSET,
+ PMBASE_SIZE, "AMD GPIO")) {
dev_err(&pdev->dev, "AMD GPIO region 0x%x already in use!\n",
gp.pmbase + PMBASE_OFFSET);
err = -EBUSY;
@@ -213,7 +215,6 @@ found:
gp.pm = ioport_map(gp.pmbase + PMBASE_OFFSET, PMBASE_SIZE);
if (!gp.pm) {
dev_err(&pdev->dev, "Couldn't map io port into io memory\n");
- release_region(gp.pmbase + PMBASE_OFFSET, PMBASE_SIZE);
err = -ENOMEM;
goto out;
}
@@ -228,7 +229,6 @@ found:
printk(KERN_ERR "GPIO registering failed (%d)\n",
err);
ioport_unmap(gp.pm);
- release_region(gp.pmbase + PMBASE_OFFSET, PMBASE_SIZE);
goto out;
}
out:
@@ -239,7 +239,6 @@ static void __exit amd_gpio_exit(void)
{
gpiochip_remove(&gp.chip);
ioport_unmap(gp.pm);
- release_region(gp.pmbase + PMBASE_OFFSET, PMBASE_SIZE);
}
module_init(amd_gpio_init);
diff --git a/drivers/gpio/gpio-arizona.c b/drivers/gpio/gpio-arizona.c
index e910c1f41d93..991370494922 100644
--- a/drivers/gpio/gpio-arizona.c
+++ b/drivers/gpio/gpio-arizona.c
@@ -132,7 +132,8 @@ static int arizona_gpio_probe(struct platform_device *pdev)
else
arizona_gpio->gpio_chip.base = -1;
- ret = gpiochip_add_data(&arizona_gpio->gpio_chip, arizona_gpio);
+ ret = devm_gpiochip_add_data(&pdev->dev, &arizona_gpio->gpio_chip,
+ arizona_gpio);
if (ret < 0) {
dev_err(&pdev->dev, "Could not register gpiochip, %d\n",
ret);
@@ -147,18 +148,9 @@ err:
return ret;
}
-static int arizona_gpio_remove(struct platform_device *pdev)
-{
- struct arizona_gpio *arizona_gpio = platform_get_drvdata(pdev);
-
- gpiochip_remove(&arizona_gpio->gpio_chip);
- return 0;
-}
-
static struct platform_driver arizona_gpio_driver = {
.driver.name = "arizona-gpio",
.probe = arizona_gpio_probe,
- .remove = arizona_gpio_remove,
};
module_platform_driver(arizona_gpio_driver);
diff --git a/drivers/gpio/gpio-ath79.c b/drivers/gpio/gpio-ath79.c
index d13dd133a907..c4f4cddc7c1a 100644
--- a/drivers/gpio/gpio-ath79.c
+++ b/drivers/gpio/gpio-ath79.c
@@ -1,12 +1,11 @@
/*
* Atheros AR71XX/AR724X/AR913X GPIO API support
*
+ * Copyright (C) 2015 Alban Bedel <albeu@free.fr>
* Copyright (C) 2010-2011 Jaiganesh Narayanan <jnarayanan@atheros.com>
* Copyright (C) 2008-2011 Gabor Juhos <juhosg@openwrt.org>
* Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
*
- * Parts of this file are based on Atheros' 2.6.15/2.6.31 BSP
- *
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation.
@@ -15,118 +14,204 @@
#include <linux/gpio/driver.h>
#include <linux/platform_data/gpio-ath79.h>
#include <linux/of_device.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+
+#define AR71XX_GPIO_REG_OE 0x00
+#define AR71XX_GPIO_REG_IN 0x04
+#define AR71XX_GPIO_REG_SET 0x0c
+#define AR71XX_GPIO_REG_CLEAR 0x10
-#include <asm/mach-ath79/ar71xx_regs.h>
+#define AR71XX_GPIO_REG_INT_ENABLE 0x14
+#define AR71XX_GPIO_REG_INT_TYPE 0x18
+#define AR71XX_GPIO_REG_INT_POLARITY 0x1c
+#define AR71XX_GPIO_REG_INT_PENDING 0x20
+#define AR71XX_GPIO_REG_INT_MASK 0x24
struct ath79_gpio_ctrl {
- struct gpio_chip chip;
+ struct gpio_chip gc;
void __iomem *base;
spinlock_t lock;
+ unsigned long both_edges;
};
-static void ath79_gpio_set_value(struct gpio_chip *chip,
- unsigned gpio, int value)
+static struct ath79_gpio_ctrl *irq_data_to_ath79_gpio(struct irq_data *data)
{
- struct ath79_gpio_ctrl *ctrl = gpiochip_get_data(chip);
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(data);
- if (value)
- __raw_writel(BIT(gpio), ctrl->base + AR71XX_GPIO_REG_SET);
- else
- __raw_writel(BIT(gpio), ctrl->base + AR71XX_GPIO_REG_CLEAR);
+ return container_of(gc, struct ath79_gpio_ctrl, gc);
}
-static int ath79_gpio_get_value(struct gpio_chip *chip, unsigned gpio)
+static u32 ath79_gpio_read(struct ath79_gpio_ctrl *ctrl, unsigned reg)
{
- struct ath79_gpio_ctrl *ctrl = gpiochip_get_data(chip);
+ return readl(ctrl->base + reg);
+}
- return (__raw_readl(ctrl->base + AR71XX_GPIO_REG_IN) >> gpio) & 1;
+static void ath79_gpio_write(struct ath79_gpio_ctrl *ctrl,
+ unsigned reg, u32 val)
+{
+ return writel(val, ctrl->base + reg);
}
-static int ath79_gpio_direction_input(struct gpio_chip *chip,
- unsigned offset)
+static bool ath79_gpio_update_bits(
+ struct ath79_gpio_ctrl *ctrl, unsigned reg, u32 mask, u32 bits)
{
- struct ath79_gpio_ctrl *ctrl = gpiochip_get_data(chip);
+ u32 old_val, new_val;
+
+ old_val = ath79_gpio_read(ctrl, reg);
+ new_val = (old_val & ~mask) | (bits & mask);
+
+ if (new_val != old_val)
+ ath79_gpio_write(ctrl, reg, new_val);
+
+ return new_val != old_val;
+}
+
+static void ath79_gpio_irq_unmask(struct irq_data *data)
+{
+ struct ath79_gpio_ctrl *ctrl = irq_data_to_ath79_gpio(data);
+ u32 mask = BIT(irqd_to_hwirq(data));
unsigned long flags;
spin_lock_irqsave(&ctrl->lock, flags);
+ ath79_gpio_update_bits(ctrl, AR71XX_GPIO_REG_INT_MASK, mask, mask);
+ spin_unlock_irqrestore(&ctrl->lock, flags);
+}
- __raw_writel(
- __raw_readl(ctrl->base + AR71XX_GPIO_REG_OE) & ~BIT(offset),
- ctrl->base + AR71XX_GPIO_REG_OE);
+static void ath79_gpio_irq_mask(struct irq_data *data)
+{
+ struct ath79_gpio_ctrl *ctrl = irq_data_to_ath79_gpio(data);
+ u32 mask = BIT(irqd_to_hwirq(data));
+ unsigned long flags;
+ spin_lock_irqsave(&ctrl->lock, flags);
+ ath79_gpio_update_bits(ctrl, AR71XX_GPIO_REG_INT_MASK, mask, 0);
spin_unlock_irqrestore(&ctrl->lock, flags);
-
- return 0;
}
-static int ath79_gpio_direction_output(struct gpio_chip *chip,
- unsigned offset, int value)
+static void ath79_gpio_irq_enable(struct irq_data *data)
{
- struct ath79_gpio_ctrl *ctrl = gpiochip_get_data(chip);
+ struct ath79_gpio_ctrl *ctrl = irq_data_to_ath79_gpio(data);
+ u32 mask = BIT(irqd_to_hwirq(data));
unsigned long flags;
spin_lock_irqsave(&ctrl->lock, flags);
+ ath79_gpio_update_bits(ctrl, AR71XX_GPIO_REG_INT_ENABLE, mask, mask);
+ ath79_gpio_update_bits(ctrl, AR71XX_GPIO_REG_INT_MASK, mask, mask);
+ spin_unlock_irqrestore(&ctrl->lock, flags);
+}
- if (value)
- __raw_writel(BIT(offset), ctrl->base + AR71XX_GPIO_REG_SET);
- else
- __raw_writel(BIT(offset), ctrl->base + AR71XX_GPIO_REG_CLEAR);
-
- __raw_writel(
- __raw_readl(ctrl->base + AR71XX_GPIO_REG_OE) | BIT(offset),
- ctrl->base + AR71XX_GPIO_REG_OE);
+static void ath79_gpio_irq_disable(struct irq_data *data)
+{
+ struct ath79_gpio_ctrl *ctrl = irq_data_to_ath79_gpio(data);
+ u32 mask = BIT(irqd_to_hwirq(data));
+ unsigned long flags;
+ spin_lock_irqsave(&ctrl->lock, flags);
+ ath79_gpio_update_bits(ctrl, AR71XX_GPIO_REG_INT_MASK, mask, 0);
+ ath79_gpio_update_bits(ctrl, AR71XX_GPIO_REG_INT_ENABLE, mask, 0);
spin_unlock_irqrestore(&ctrl->lock, flags);
-
- return 0;
}
-static int ar934x_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
+static int ath79_gpio_irq_set_type(struct irq_data *data,
+ unsigned int flow_type)
{
- struct ath79_gpio_ctrl *ctrl = gpiochip_get_data(chip);
+ struct ath79_gpio_ctrl *ctrl = irq_data_to_ath79_gpio(data);
+ u32 mask = BIT(irqd_to_hwirq(data));
+ u32 type = 0, polarity = 0;
unsigned long flags;
+ bool disabled;
+
+ switch (flow_type) {
+ case IRQ_TYPE_EDGE_RISING:
+ polarity |= mask;
+ case IRQ_TYPE_EDGE_FALLING:
+ case IRQ_TYPE_EDGE_BOTH:
+ break;
+
+ case IRQ_TYPE_LEVEL_HIGH:
+ polarity |= mask;
+ case IRQ_TYPE_LEVEL_LOW:
+ type |= mask;
+ break;
+
+ default:
+ return -EINVAL;
+ }
spin_lock_irqsave(&ctrl->lock, flags);
- __raw_writel(
- __raw_readl(ctrl->base + AR71XX_GPIO_REG_OE) | BIT(offset),
- ctrl->base + AR71XX_GPIO_REG_OE);
+ if (flow_type == IRQ_TYPE_EDGE_BOTH) {
+ ctrl->both_edges |= mask;
+ polarity = ~ath79_gpio_read(ctrl, AR71XX_GPIO_REG_IN);
+ } else {
+ ctrl->both_edges &= ~mask;
+ }
+
+ /* As the IRQ configuration can't be loaded atomically we
+ * have to disable the interrupt while the configuration state
+ * is invalid.
+ */
+ disabled = ath79_gpio_update_bits(
+ ctrl, AR71XX_GPIO_REG_INT_ENABLE, mask, 0);
+
+ ath79_gpio_update_bits(
+ ctrl, AR71XX_GPIO_REG_INT_TYPE, mask, type);
+ ath79_gpio_update_bits(
+ ctrl, AR71XX_GPIO_REG_INT_POLARITY, mask, polarity);
+
+ if (disabled)
+ ath79_gpio_update_bits(
+ ctrl, AR71XX_GPIO_REG_INT_ENABLE, mask, mask);
spin_unlock_irqrestore(&ctrl->lock, flags);
return 0;
}
-static int ar934x_gpio_direction_output(struct gpio_chip *chip, unsigned offset,
- int value)
+static struct irq_chip ath79_gpio_irqchip = {
+ .name = "gpio-ath79",
+ .irq_enable = ath79_gpio_irq_enable,
+ .irq_disable = ath79_gpio_irq_disable,
+ .irq_mask = ath79_gpio_irq_mask,
+ .irq_unmask = ath79_gpio_irq_unmask,
+ .irq_set_type = ath79_gpio_irq_set_type,
+};
+
+static void ath79_gpio_irq_handler(struct irq_desc *desc)
{
- struct ath79_gpio_ctrl *ctrl = gpiochip_get_data(chip);
- unsigned long flags;
+ struct gpio_chip *gc = irq_desc_get_handler_data(desc);
+ struct irq_chip *irqchip = irq_desc_get_chip(desc);
+ struct ath79_gpio_ctrl *ctrl =
+ container_of(gc, struct ath79_gpio_ctrl, gc);
+ unsigned long flags, pending;
+ u32 both_edges, state;
+ int irq;
+
+ chained_irq_enter(irqchip, desc);
spin_lock_irqsave(&ctrl->lock, flags);
- if (value)
- __raw_writel(BIT(offset), ctrl->base + AR71XX_GPIO_REG_SET);
- else
- __raw_writel(BIT(offset), ctrl->base + AR71XX_GPIO_REG_CLEAR);
+ pending = ath79_gpio_read(ctrl, AR71XX_GPIO_REG_INT_PENDING);
- __raw_writel(
- __raw_readl(ctrl->base + AR71XX_GPIO_REG_OE) & ~BIT(offset),
- ctrl->base + AR71XX_GPIO_REG_OE);
+ /* Update the polarity of the both edges irqs */
+ both_edges = ctrl->both_edges & pending;
+ if (both_edges) {
+ state = ath79_gpio_read(ctrl, AR71XX_GPIO_REG_IN);
+ ath79_gpio_update_bits(ctrl, AR71XX_GPIO_REG_INT_POLARITY,
+ both_edges, ~state);
+ }
spin_unlock_irqrestore(&ctrl->lock, flags);
- return 0;
-}
+ if (pending) {
+ for_each_set_bit(irq, &pending, gc->ngpio)
+ generic_handle_irq(
+ irq_linear_revmap(gc->irqdomain, irq));
+ }
-static const struct gpio_chip ath79_gpio_chip = {
- .label = "ath79",
- .get = ath79_gpio_get_value,
- .set = ath79_gpio_set_value,
- .direction_input = ath79_gpio_direction_input,
- .direction_output = ath79_gpio_direction_output,
- .base = 0,
-};
+ chained_irq_exit(irqchip, desc);
+}
static const struct of_device_id ath79_gpio_of_match[] = {
{ .compatible = "qca,ar7100-gpio" },
@@ -147,6 +232,7 @@ static int ath79_gpio_probe(struct platform_device *pdev)
ctrl = devm_kzalloc(&pdev->dev, sizeof(*ctrl), GFP_KERNEL);
if (!ctrl)
return -ENOMEM;
+ platform_set_drvdata(pdev, ctrl);
if (np) {
err = of_property_read_u32(np, "ngpios", &ath79_gpio_count);
@@ -154,10 +240,6 @@ static int ath79_gpio_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "ngpios property is not valid\n");
return err;
}
- if (ath79_gpio_count >= 32) {
- dev_err(&pdev->dev, "ngpios must be less than 32\n");
- return -EINVAL;
- }
oe_inverted = of_device_is_compatible(np, "qca,ar9340-gpio");
} else if (pdata) {
ath79_gpio_count = pdata->ngpios;
@@ -167,6 +249,11 @@ static int ath79_gpio_probe(struct platform_device *pdev)
return -EINVAL;
}
+ if (ath79_gpio_count >= 32) {
+ dev_err(&pdev->dev, "ngpios must be less than 32\n");
+ return -EINVAL;
+ }
+
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
ctrl->base = devm_ioremap_nocache(
&pdev->dev, res->start, resource_size(res));
@@ -174,21 +261,53 @@ static int ath79_gpio_probe(struct platform_device *pdev)
return -ENOMEM;
spin_lock_init(&ctrl->lock);
- memcpy(&ctrl->chip, &ath79_gpio_chip, sizeof(ctrl->chip));
- ctrl->chip.parent = &pdev->dev;
- ctrl->chip.ngpio = ath79_gpio_count;
- if (oe_inverted) {
- ctrl->chip.direction_input = ar934x_gpio_direction_input;
- ctrl->chip.direction_output = ar934x_gpio_direction_output;
+ err = bgpio_init(&ctrl->gc, &pdev->dev, 4,
+ ctrl->base + AR71XX_GPIO_REG_IN,
+ ctrl->base + AR71XX_GPIO_REG_SET,
+ ctrl->base + AR71XX_GPIO_REG_CLEAR,
+ oe_inverted ? NULL : ctrl->base + AR71XX_GPIO_REG_OE,
+ oe_inverted ? ctrl->base + AR71XX_GPIO_REG_OE : NULL,
+ 0);
+ if (err) {
+ dev_err(&pdev->dev, "bgpio_init failed\n");
+ return err;
}
+ /* Use base 0 to stay compatible with legacy platforms */
+ ctrl->gc.base = 0;
- err = gpiochip_add_data(&ctrl->chip, ctrl);
+ err = gpiochip_add_data(&ctrl->gc, ctrl);
if (err) {
dev_err(&pdev->dev,
"cannot add AR71xx GPIO chip, error=%d", err);
return err;
}
+ if (np && !of_property_read_bool(np, "interrupt-controller"))
+ return 0;
+
+ err = gpiochip_irqchip_add(&ctrl->gc, &ath79_gpio_irqchip, 0,
+ handle_simple_irq, IRQ_TYPE_NONE);
+ if (err) {
+ dev_err(&pdev->dev, "failed to add gpiochip_irqchip\n");
+ goto gpiochip_remove;
+ }
+
+ gpiochip_set_chained_irqchip(&ctrl->gc, &ath79_gpio_irqchip,
+ platform_get_irq(pdev, 0),
+ ath79_gpio_irq_handler);
+
+ return 0;
+
+gpiochip_remove:
+ gpiochip_remove(&ctrl->gc);
+ return err;
+}
+
+static int ath79_gpio_remove(struct platform_device *pdev)
+{
+ struct ath79_gpio_ctrl *ctrl = platform_get_drvdata(pdev);
+
+ gpiochip_remove(&ctrl->gc);
return 0;
}
@@ -198,6 +317,7 @@ static struct platform_driver ath79_gpio_driver = {
.of_match_table = ath79_gpio_of_match,
},
.probe = ath79_gpio_probe,
+ .remove = ath79_gpio_remove,
};
module_platform_driver(ath79_gpio_driver);
diff --git a/drivers/gpio/gpio-bcm-kona.c b/drivers/gpio/gpio-bcm-kona.c
index b6c5abe85daf..2fd38d598f3d 100644
--- a/drivers/gpio/gpio-bcm-kona.c
+++ b/drivers/gpio/gpio-bcm-kona.c
@@ -630,7 +630,7 @@ static int bcm_kona_gpio_probe(struct platform_device *pdev)
bcm_kona_gpio_reset(kona_gpio);
- ret = gpiochip_add_data(chip, kona_gpio);
+ ret = devm_gpiochip_add_data(dev, chip, kona_gpio);
if (ret < 0) {
dev_err(dev, "Couldn't add GPIO chip -- %d\n", ret);
goto err_irq_domain;
diff --git a/drivers/gpio/gpio-brcmstb.c b/drivers/gpio/gpio-brcmstb.c
index d7644251e869..42d51c59ed50 100644
--- a/drivers/gpio/gpio-brcmstb.c
+++ b/drivers/gpio/gpio-brcmstb.c
@@ -233,17 +233,14 @@ static void brcmstb_gpio_irq_handler(struct irq_desc *desc)
struct gpio_chip *gc = irq_desc_get_handler_data(desc);
struct brcmstb_gpio_priv *priv = brcmstb_gpio_gc_to_priv(gc);
struct irq_chip *chip = irq_desc_get_chip(desc);
- struct list_head *pos;
+ struct brcmstb_gpio_bank *bank;
/* Interrupts weren't properly cleared during probe */
BUG_ON(!priv || !chip);
chained_irq_enter(chip, desc);
- list_for_each(pos, &priv->bank_list) {
- struct brcmstb_gpio_bank *bank =
- list_entry(pos, struct brcmstb_gpio_bank, node);
+ list_for_each_entry(bank, &priv->bank_list, node)
brcmstb_gpio_irq_bank_handler(bank);
- }
chained_irq_exit(chip, desc);
}
@@ -280,7 +277,6 @@ static int brcmstb_gpio_sanity_check_banks(struct device *dev,
static int brcmstb_gpio_remove(struct platform_device *pdev)
{
struct brcmstb_gpio_priv *priv = platform_get_drvdata(pdev);
- struct list_head *pos;
struct brcmstb_gpio_bank *bank;
int ret = 0;
@@ -293,10 +289,9 @@ static int brcmstb_gpio_remove(struct platform_device *pdev)
* You can lose return values below, but we report all errors, and it's
* more important to actually perform all of the steps.
*/
- list_for_each(pos, &priv->bank_list) {
- bank = list_entry(pos, struct brcmstb_gpio_bank, node);
+ list_for_each_entry(bank, &priv->bank_list, node)
gpiochip_remove(&bank->gc);
- }
+
if (priv->reboot_notifier.notifier_call) {
ret = unregister_reboot_notifier(&priv->reboot_notifier);
if (ret)
diff --git a/drivers/gpio/gpio-clps711x.c b/drivers/gpio/gpio-clps711x.c
index c84f9551f108..5a690256af9b 100644
--- a/drivers/gpio/gpio-clps711x.c
+++ b/drivers/gpio/gpio-clps711x.c
@@ -67,15 +67,7 @@ static int clps711x_gpio_probe(struct platform_device *pdev)
gc->owner = THIS_MODULE;
platform_set_drvdata(pdev, gc);
- return gpiochip_add_data(gc, NULL);
-}
-
-static int clps711x_gpio_remove(struct platform_device *pdev)
-{
- struct gpio_chip *gc = platform_get_drvdata(pdev);
-
- gpiochip_remove(gc);
- return 0;
+ return devm_gpiochip_add_data(&pdev->dev, gc, NULL);
}
static const struct of_device_id __maybe_unused clps711x_gpio_ids[] = {
@@ -90,7 +82,6 @@ static struct platform_driver clps711x_gpio_driver = {
.of_match_table = of_match_ptr(clps711x_gpio_ids),
},
.probe = clps711x_gpio_probe,
- .remove = clps711x_gpio_remove,
};
module_platform_driver(clps711x_gpio_driver);
diff --git a/drivers/gpio/gpio-crystalcove.c b/drivers/gpio/gpio-crystalcove.c
index 7865ef0d3352..7c446d118cd6 100644
--- a/drivers/gpio/gpio-crystalcove.c
+++ b/drivers/gpio/gpio-crystalcove.c
@@ -345,7 +345,7 @@ static int crystalcove_gpio_probe(struct platform_device *pdev)
cg->chip.dbg_show = crystalcove_gpio_dbg_show;
cg->regmap = pmic->regmap;
- retval = gpiochip_add_data(&cg->chip, cg);
+ retval = devm_gpiochip_add_data(&pdev->dev, &cg->chip, cg);
if (retval) {
dev_warn(&pdev->dev, "add gpio chip error: %d\n", retval);
return retval;
@@ -359,14 +359,10 @@ static int crystalcove_gpio_probe(struct platform_device *pdev)
if (retval) {
dev_warn(&pdev->dev, "request irq failed: %d\n", retval);
- goto out_remove_gpio;
+ return retval;
}
return 0;
-
-out_remove_gpio:
- gpiochip_remove(&cg->chip);
- return retval;
}
static int crystalcove_gpio_remove(struct platform_device *pdev)
@@ -374,7 +370,6 @@ static int crystalcove_gpio_remove(struct platform_device *pdev)
struct crystalcove_gpio *cg = platform_get_drvdata(pdev);
int irq = platform_get_irq(pdev, 0);
- gpiochip_remove(&cg->chip);
if (irq >= 0)
free_irq(irq, cg);
return 0;
diff --git a/drivers/gpio/gpio-cs5535.c b/drivers/gpio/gpio-cs5535.c
index eccb712e09fb..90278b19aa0e 100644
--- a/drivers/gpio/gpio-cs5535.c
+++ b/drivers/gpio/gpio-cs5535.c
@@ -320,13 +320,13 @@ static int cs5535_gpio_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_IO, 0);
if (!res) {
dev_err(&pdev->dev, "can't fetch device resource info\n");
- goto done;
+ return err;
}
if (!devm_request_region(&pdev->dev, res->start, resource_size(res),
pdev->name)) {
dev_err(&pdev->dev, "can't request region\n");
- goto done;
+ return err;
}
/* set up the driver-specific struct */
@@ -348,19 +348,10 @@ static int cs5535_gpio_probe(struct platform_device *pdev)
mask_orig, mask);
/* finally, register with the generic GPIO API */
- err = gpiochip_add_data(&cs5535_gpio_chip.chip, &cs5535_gpio_chip);
+ err = devm_gpiochip_add_data(&pdev->dev, &cs5535_gpio_chip.chip,
+ &cs5535_gpio_chip);
if (err)
- goto done;
-
- return 0;
-
-done:
- return err;
-}
-
-static int cs5535_gpio_remove(struct platform_device *pdev)
-{
- gpiochip_remove(&cs5535_gpio_chip.chip);
+ return err;
return 0;
}
@@ -370,7 +361,6 @@ static struct platform_driver cs5535_gpio_driver = {
.name = DRV_NAME,
},
.probe = cs5535_gpio_probe,
- .remove = cs5535_gpio_remove,
};
module_platform_driver(cs5535_gpio_driver);
diff --git a/drivers/gpio/gpio-da9052.c b/drivers/gpio/gpio-da9052.c
index f9b3247ad14b..e29553b7ccdb 100644
--- a/drivers/gpio/gpio-da9052.c
+++ b/drivers/gpio/gpio-da9052.c
@@ -214,7 +214,7 @@ static int da9052_gpio_probe(struct platform_device *pdev)
if (pdata && pdata->gpio_base)
gpio->gp.base = pdata->gpio_base;
- ret = gpiochip_add_data(&gpio->gp, gpio);
+ ret = devm_gpiochip_add_data(&pdev->dev, &gpio->gp, gpio);
if (ret < 0) {
dev_err(&pdev->dev, "Could not register gpiochip, %d\n", ret);
return ret;
@@ -225,17 +225,8 @@ static int da9052_gpio_probe(struct platform_device *pdev)
return 0;
}
-static int da9052_gpio_remove(struct platform_device *pdev)
-{
- struct da9052_gpio *gpio = platform_get_drvdata(pdev);
-
- gpiochip_remove(&gpio->gp);
- return 0;
-}
-
static struct platform_driver da9052_gpio_driver = {
.probe = da9052_gpio_probe,
- .remove = da9052_gpio_remove,
.driver = {
.name = "da9052-gpio",
},
diff --git a/drivers/gpio/gpio-da9055.c b/drivers/gpio/gpio-da9055.c
index 18210fb2cb13..2c2c18dc6c4f 100644
--- a/drivers/gpio/gpio-da9055.c
+++ b/drivers/gpio/gpio-da9055.c
@@ -151,31 +151,19 @@ static int da9055_gpio_probe(struct platform_device *pdev)
if (pdata && pdata->gpio_base)
gpio->gp.base = pdata->gpio_base;
- ret = gpiochip_add_data(&gpio->gp, gpio);
+ ret = devm_gpiochip_add_data(&pdev->dev, &gpio->gp, gpio);
if (ret < 0) {
dev_err(&pdev->dev, "Could not register gpiochip, %d\n", ret);
- goto err_mem;
+ return ret;
}
platform_set_drvdata(pdev, gpio);
return 0;
-
-err_mem:
- return ret;
-}
-
-static int da9055_gpio_remove(struct platform_device *pdev)
-{
- struct da9055_gpio *gpio = platform_get_drvdata(pdev);
-
- gpiochip_remove(&gpio->gp);
- return 0;
}
static struct platform_driver da9055_gpio_driver = {
.probe = da9055_gpio_probe,
- .remove = da9055_gpio_remove,
.driver = {
.name = "da9055-gpio",
},
diff --git a/drivers/gpio/gpio-davinci.c b/drivers/gpio/gpio-davinci.c
index cd007a67b302..dd262f00295d 100644
--- a/drivers/gpio/gpio-davinci.c
+++ b/drivers/gpio/gpio-davinci.c
@@ -258,6 +258,8 @@ static int davinci_gpio_probe(struct platform_device *pdev)
spin_lock_init(&chips[i].lock);
regs = gpio2regs(base);
+ if (!regs)
+ return -ENXIO;
chips[i].regs = regs;
chips[i].set_data = &regs->set_data;
chips[i].clr_data = &regs->clr_data;
@@ -433,8 +435,7 @@ static struct irq_chip *davinci_gpio_get_irq_chip(unsigned int irq)
{
static struct irq_chip_type gpio_unbanked;
- gpio_unbanked = *container_of(irq_get_chip(irq),
- struct irq_chip_type, chip);
+ gpio_unbanked = *irq_data_get_chip_type(irq_get_irq_data(irq));
return &gpio_unbanked.chip;
};
diff --git a/drivers/gpio/gpio-dln2.c b/drivers/gpio/gpio-dln2.c
index e11a7d126e74..f7a60a441e95 100644
--- a/drivers/gpio/gpio-dln2.c
+++ b/drivers/gpio/gpio-dln2.c
@@ -479,40 +479,32 @@ static int dln2_gpio_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, dln2);
- ret = gpiochip_add_data(&dln2->gpio, dln2);
+ ret = devm_gpiochip_add_data(dev, &dln2->gpio, dln2);
if (ret < 0) {
dev_err(dev, "failed to add gpio chip: %d\n", ret);
- goto out;
+ return ret;
}
ret = gpiochip_irqchip_add(&dln2->gpio, &dln2_gpio_irqchip, 0,
handle_simple_irq, IRQ_TYPE_NONE);
if (ret < 0) {
dev_err(dev, "failed to add irq chip: %d\n", ret);
- goto out_gpiochip_remove;
+ return ret;
}
ret = dln2_register_event_cb(pdev, DLN2_GPIO_CONDITION_MET_EV,
dln2_gpio_event);
if (ret) {
dev_err(dev, "failed to register event cb: %d\n", ret);
- goto out_gpiochip_remove;
+ return ret;
}
return 0;
-
-out_gpiochip_remove:
- gpiochip_remove(&dln2->gpio);
-out:
- return ret;
}
static int dln2_gpio_remove(struct platform_device *pdev)
{
- struct dln2_gpio *dln2 = platform_get_drvdata(pdev);
-
dln2_unregister_event_cb(pdev, DLN2_GPIO_CONDITION_MET_EV);
- gpiochip_remove(&dln2->gpio);
return 0;
}
diff --git a/drivers/gpio/gpio-ep93xx.c b/drivers/gpio/gpio-ep93xx.c
index ad279078fed7..d054219e18b9 100644
--- a/drivers/gpio/gpio-ep93xx.c
+++ b/drivers/gpio/gpio-ep93xx.c
@@ -339,7 +339,7 @@ static int ep93xx_gpio_add_bank(struct gpio_chip *gc, struct device *dev,
gc->to_irq = ep93xx_gpio_to_irq;
}
- return gpiochip_add_data(gc, NULL);
+ return devm_gpiochip_add_data(dev, gc, NULL);
}
static int ep93xx_gpio_probe(struct platform_device *pdev)
diff --git a/drivers/gpio/gpio-f7188x.c b/drivers/gpio/gpio-f7188x.c
index d62fd6bbaf82..daac2d480db1 100644
--- a/drivers/gpio/gpio-f7188x.c
+++ b/drivers/gpio/gpio-f7188x.c
@@ -1,5 +1,5 @@
/*
- * GPIO driver for Fintek Super-I/O F71869, F71869A, F71882 and F71889
+ * GPIO driver for Fintek Super-I/O F71869, F71869A, F71882, F71889 and F81866
*
* Copyright (C) 2010-2013 LaCie
*
@@ -36,14 +36,16 @@
#define SIO_F71869A_ID 0x1007 /* F71869A chipset ID */
#define SIO_F71882_ID 0x0541 /* F71882 chipset ID */
#define SIO_F71889_ID 0x0909 /* F71889 chipset ID */
+#define SIO_F81866_ID 0x1010 /* F81866 chipset ID */
-enum chips { f71869, f71869a, f71882fg, f71889f };
+enum chips { f71869, f71869a, f71882fg, f71889f, f81866 };
static const char * const f7188x_names[] = {
"f71869",
"f71869a",
"f71882fg",
"f71889f",
+ "f81866",
};
struct f7188x_sio {
@@ -190,6 +192,18 @@ static struct f7188x_gpio_bank f71889_gpio_bank[] = {
F7188X_GPIO_BANK(70, 8, 0x80),
};
+static struct f7188x_gpio_bank f81866_gpio_bank[] = {
+ F7188X_GPIO_BANK(0, 8, 0xF0),
+ F7188X_GPIO_BANK(10, 8, 0xE0),
+ F7188X_GPIO_BANK(20, 8, 0xD0),
+ F7188X_GPIO_BANK(30, 8, 0xC0),
+ F7188X_GPIO_BANK(40, 8, 0xB0),
+ F7188X_GPIO_BANK(50, 8, 0xA0),
+ F7188X_GPIO_BANK(60, 8, 0x90),
+ F7188X_GPIO_BANK(70, 8, 0x80),
+ F7188X_GPIO_BANK(80, 8, 0x88),
+};
+
static int f7188x_gpio_direction_in(struct gpio_chip *chip, unsigned offset)
{
int err;
@@ -318,6 +332,10 @@ static int f7188x_gpio_probe(struct platform_device *pdev)
data->nr_bank = ARRAY_SIZE(f71889_gpio_bank);
data->bank = f71889_gpio_bank;
break;
+ case f81866:
+ data->nr_bank = ARRAY_SIZE(f81866_gpio_bank);
+ data->bank = f81866_gpio_bank;
+ break;
default:
return -ENODEV;
}
@@ -332,37 +350,16 @@ static int f7188x_gpio_probe(struct platform_device *pdev)
bank->chip.parent = &pdev->dev;
bank->data = data;
- err = gpiochip_add_data(&bank->chip, bank);
+ err = devm_gpiochip_add_data(&pdev->dev, &bank->chip, bank);
if (err) {
dev_err(&pdev->dev,
"Failed to register gpiochip %d: %d\n",
i, err);
- goto err_gpiochip;
+ return err;
}
}
return 0;
-
-err_gpiochip:
- for (i = i - 1; i >= 0; i--) {
- struct f7188x_gpio_bank *bank = &data->bank[i];
- gpiochip_remove(&bank->chip);
- }
-
- return err;
-}
-
-static int f7188x_gpio_remove(struct platform_device *pdev)
-{
- int i;
- struct f7188x_gpio_data *data = platform_get_drvdata(pdev);
-
- for (i = 0; i < data->nr_bank; i++) {
- struct f7188x_gpio_bank *bank = &data->bank[i];
- gpiochip_remove(&bank->chip);
- }
-
- return 0;
}
static int __init f7188x_find(int addr, struct f7188x_sio *sio)
@@ -395,6 +392,9 @@ static int __init f7188x_find(int addr, struct f7188x_sio *sio)
case SIO_F71889_ID:
sio->type = f71889f;
break;
+ case SIO_F81866_ID:
+ sio->type = f81866;
+ break;
default:
pr_info(DRVNAME ": Unsupported Fintek device 0x%04x\n", devid);
goto err;
@@ -455,7 +455,6 @@ static struct platform_driver f7188x_gpio_driver = {
.name = DRVNAME,
},
.probe = f7188x_gpio_probe,
- .remove = f7188x_gpio_remove,
};
static int __init f7188x_gpio_init(void)
@@ -485,6 +484,6 @@ static void __exit f7188x_gpio_exit(void)
}
module_exit(f7188x_gpio_exit);
-MODULE_DESCRIPTION("GPIO driver for Super-I/O chips F71869, F71869A, F71882FG and F71889F");
+MODULE_DESCRIPTION("GPIO driver for Super-I/O chips F71869, F71869A, F71882FG, F71889F and F81866");
MODULE_AUTHOR("Simon Guinot <simon.guinot@sequanux.org>");
MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/gpio-ge.c b/drivers/gpio/gpio-ge.c
index cbbec838a9d1..8650b2916f87 100644
--- a/drivers/gpio/gpio-ge.c
+++ b/drivers/gpio/gpio-ge.c
@@ -89,7 +89,7 @@ static int __init gef_gpio_probe(struct platform_device *pdev)
gc->of_node = pdev->dev.of_node;
/* This function adds a memory mapped GPIO chip */
- ret = gpiochip_add_data(gc, NULL);
+ ret = devm_gpiochip_add_data(&pdev->dev, gc, NULL);
if (ret)
goto err0;
diff --git a/drivers/gpio/gpio-generic.c b/drivers/gpio/gpio-generic.c
index 2a4f2333a50b..54cddfa98f50 100644
--- a/drivers/gpio/gpio-generic.c
+++ b/drivers/gpio/gpio-generic.c
@@ -628,15 +628,7 @@ static int bgpio_pdev_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, gc);
- return gpiochip_add_data(gc, NULL);
-}
-
-static int bgpio_pdev_remove(struct platform_device *pdev)
-{
- struct gpio_chip *gc = platform_get_drvdata(pdev);
-
- gpiochip_remove(gc);
- return 0;
+ return devm_gpiochip_add_data(&pdev->dev, gc, NULL);
}
static const struct platform_device_id bgpio_id_table[] = {
@@ -657,7 +649,6 @@ static struct platform_driver bgpio_driver = {
},
.id_table = bgpio_id_table,
.probe = bgpio_pdev_probe,
- .remove = bgpio_pdev_remove,
};
module_platform_driver(bgpio_driver);
diff --git a/drivers/gpio/gpio-ich.c b/drivers/gpio/gpio-ich.c
index a4893386abbf..4f6d643516b7 100644
--- a/drivers/gpio/gpio-ich.c
+++ b/drivers/gpio/gpio-ich.c
@@ -20,6 +20,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/ioport.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/gpio.h>
@@ -384,8 +385,8 @@ static struct ichx_desc avoton_desc = {
.use_outlvl_cache = true,
};
-static int ichx_gpio_request_regions(struct resource *res_base,
- const char *name, u8 use_gpio)
+static int ichx_gpio_request_regions(struct device *dev,
+ struct resource *res_base, const char *name, u8 use_gpio)
{
int i;
@@ -395,34 +396,12 @@ static int ichx_gpio_request_regions(struct resource *res_base,
for (i = 0; i < ARRAY_SIZE(ichx_priv.desc->regs[0]); i++) {
if (!(use_gpio & (1 << i)))
continue;
- if (!request_region(
+ if (!devm_request_region(dev,
res_base->start + ichx_priv.desc->regs[0][i],
ichx_priv.desc->reglen[i], name))
- goto request_err;
+ return -EBUSY;
}
return 0;
-
-request_err:
- /* Clean up: release already requested regions, if any */
- for (i--; i >= 0; i--) {
- if (!(use_gpio & (1 << i)))
- continue;
- release_region(res_base->start + ichx_priv.desc->regs[0][i],
- ichx_priv.desc->reglen[i]);
- }
- return -EBUSY;
-}
-
-static void ichx_gpio_release_regions(struct resource *res_base, u8 use_gpio)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(ichx_priv.desc->regs[0]); i++) {
- if (!(use_gpio & (1 << i)))
- continue;
- release_region(res_base->start + ichx_priv.desc->regs[0][i],
- ichx_priv.desc->reglen[i]);
- }
}
static int ichx_gpio_probe(struct platform_device *pdev)
@@ -468,7 +447,7 @@ static int ichx_gpio_probe(struct platform_device *pdev)
spin_lock_init(&ichx_priv.lock);
res_base = platform_get_resource(pdev, IORESOURCE_IO, ICH_RES_GPIO);
ichx_priv.use_gpio = ich_info->use_gpio;
- err = ichx_gpio_request_regions(res_base, pdev->name,
+ err = ichx_gpio_request_regions(&pdev->dev, res_base, pdev->name,
ichx_priv.use_gpio);
if (err)
return err;
@@ -489,8 +468,8 @@ static int ichx_gpio_probe(struct platform_device *pdev)
goto init;
}
- if (!request_region(res_pm->start, resource_size(res_pm),
- pdev->name)) {
+ if (!devm_request_region(&pdev->dev, res_pm->start,
+ resource_size(res_pm), pdev->name)) {
pr_warn("ACPI BAR is busy, GPI 0 - 15 unavailable\n");
goto init;
}
@@ -502,31 +481,19 @@ init:
err = gpiochip_add_data(&ichx_priv.chip, NULL);
if (err) {
pr_err("Failed to register GPIOs\n");
- goto add_err;
+ return err;
}
pr_info("GPIO from %d to %d on %s\n", ichx_priv.chip.base,
ichx_priv.chip.base + ichx_priv.chip.ngpio - 1, DRV_NAME);
return 0;
-
-add_err:
- ichx_gpio_release_regions(ichx_priv.gpio_base, ichx_priv.use_gpio);
- if (ichx_priv.pm_base)
- release_region(ichx_priv.pm_base->start,
- resource_size(ichx_priv.pm_base));
- return err;
}
static int ichx_gpio_remove(struct platform_device *pdev)
{
gpiochip_remove(&ichx_priv.chip);
- ichx_gpio_release_regions(ichx_priv.gpio_base, ichx_priv.use_gpio);
- if (ichx_priv.pm_base)
- release_region(ichx_priv.pm_base->start,
- resource_size(ichx_priv.pm_base));
-
return 0;
}
diff --git a/drivers/gpio/gpio-iop.c b/drivers/gpio/gpio-iop.c
index fb65e5850e0c..860c535922fd 100644
--- a/drivers/gpio/gpio-iop.c
+++ b/drivers/gpio/gpio-iop.c
@@ -114,7 +114,7 @@ static int iop3xx_gpio_probe(struct platform_device *pdev)
if (IS_ERR(base))
return PTR_ERR(base);
- return gpiochip_add_data(&iop3xx_chip, NULL);
+ return devm_gpiochip_add_data(&pdev->dev, &iop3xx_chip, NULL);
}
static struct platform_driver iop3xx_gpio_driver = {
diff --git a/drivers/gpio/gpio-janz-ttl.c b/drivers/gpio/gpio-janz-ttl.c
index 482aa0353868..a8d0a6b8025a 100644
--- a/drivers/gpio/gpio-janz-ttl.c
+++ b/drivers/gpio/gpio-janz-ttl.c
@@ -182,7 +182,7 @@ static int ttl_probe(struct platform_device *pdev)
gpio->base = -1;
gpio->ngpio = 20;
- ret = gpiochip_add_data(gpio, NULL);
+ ret = devm_gpiochip_add_data(dev, gpio, NULL);
if (ret) {
dev_err(dev, "unable to add GPIO chip\n");
return ret;
@@ -191,21 +191,11 @@ static int ttl_probe(struct platform_device *pdev)
return 0;
}
-static int ttl_remove(struct platform_device *pdev)
-{
- struct ttl_module *mod = platform_get_drvdata(pdev);
-
- gpiochip_remove(&mod->gpio);
-
- return 0;
-}
-
static struct platform_driver ttl_driver = {
.driver = {
.name = DRV_NAME,
},
.probe = ttl_probe,
- .remove = ttl_remove,
};
module_platform_driver(ttl_driver);
diff --git a/drivers/gpio/gpio-kempld.c b/drivers/gpio/gpio-kempld.c
index 01117747b965..701f1510328c 100644
--- a/drivers/gpio/gpio-kempld.c
+++ b/drivers/gpio/gpio-kempld.c
@@ -178,7 +178,7 @@ static int kempld_gpio_probe(struct platform_device *pdev)
return -ENODEV;
}
- ret = gpiochip_add_data(chip, gpio);
+ ret = devm_gpiochip_add_data(dev, chip, gpio);
if (ret) {
dev_err(dev, "Could not register GPIO chip\n");
return ret;
@@ -190,20 +190,11 @@ static int kempld_gpio_probe(struct platform_device *pdev)
return 0;
}
-static int kempld_gpio_remove(struct platform_device *pdev)
-{
- struct kempld_gpio_data *gpio = platform_get_drvdata(pdev);
-
- gpiochip_remove(&gpio->chip);
- return 0;
-}
-
static struct platform_driver kempld_gpio_driver = {
.driver = {
.name = "kempld-gpio",
},
.probe = kempld_gpio_probe,
- .remove = kempld_gpio_remove,
};
module_platform_driver(kempld_gpio_driver);
diff --git a/drivers/gpio/gpio-ks8695.c b/drivers/gpio/gpio-ks8695.c
index 9f86ed9c753b..179723d02f55 100644
--- a/drivers/gpio/gpio-ks8695.c
+++ b/drivers/gpio/gpio-ks8695.c
@@ -205,18 +205,6 @@ static int ks8695_gpio_to_irq(struct gpio_chip *gc, unsigned int pin)
return gpio_irq[pin];
}
-/*
- * Map IRQ number to GPIO line.
- */
-int irq_to_gpio(unsigned int irq)
-{
- if ((irq < KS8695_IRQ_EXTERN0) || (irq > KS8695_IRQ_EXTERN3))
- return -EINVAL;
-
- return (irq - KS8695_IRQ_EXTERN0);
-}
-EXPORT_SYMBOL(irq_to_gpio);
-
/* GPIOLIB interface */
static struct gpio_chip ks8695_gpio_chip = {
diff --git a/drivers/gpio/gpio-lp3943.c b/drivers/gpio/gpio-lp3943.c
index 1c8e2ae26938..6dc6725403ec 100644
--- a/drivers/gpio/gpio-lp3943.c
+++ b/drivers/gpio/gpio-lp3943.c
@@ -204,15 +204,8 @@ static int lp3943_gpio_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, lp3943_gpio);
- return gpiochip_add_data(&lp3943_gpio->chip, lp3943_gpio);
-}
-
-static int lp3943_gpio_remove(struct platform_device *pdev)
-{
- struct lp3943_gpio *lp3943_gpio = platform_get_drvdata(pdev);
-
- gpiochip_remove(&lp3943_gpio->chip);
- return 0;
+ return devm_gpiochip_add_data(&pdev->dev, &lp3943_gpio->chip,
+ lp3943_gpio);
}
static const struct of_device_id lp3943_gpio_of_match[] = {
@@ -223,7 +216,6 @@ MODULE_DEVICE_TABLE(of, lp3943_gpio_of_match);
static struct platform_driver lp3943_gpio_driver = {
.probe = lp3943_gpio_probe,
- .remove = lp3943_gpio_remove,
.driver = {
.name = "lp3943-gpio",
.of_match_table = lp3943_gpio_of_match,
diff --git a/drivers/gpio/gpio-lpc32xx.c b/drivers/gpio/gpio-lpc32xx.c
index 4cecf4ce96c1..d39014daeef9 100644
--- a/drivers/gpio/gpio-lpc32xx.c
+++ b/drivers/gpio/gpio-lpc32xx.c
@@ -547,7 +547,7 @@ static int lpc32xx_gpio_probe(struct platform_device *pdev)
lpc32xx_gpiochip[i].chip.of_gpio_n_cells = 3;
lpc32xx_gpiochip[i].chip.of_node = pdev->dev.of_node;
}
- gpiochip_add_data(&lpc32xx_gpiochip[i].chip,
+ devm_gpiochip_add_data(&pdev->dev, &lpc32xx_gpiochip[i].chip,
&lpc32xx_gpiochip[i]);
}
diff --git a/drivers/gpio/gpio-lynxpoint.c b/drivers/gpio/gpio-lynxpoint.c
index 13107772be4f..9df015e85ad9 100644
--- a/drivers/gpio/gpio-lynxpoint.c
+++ b/drivers/gpio/gpio-lynxpoint.c
@@ -370,7 +370,7 @@ static int lp_gpio_probe(struct platform_device *pdev)
gc->can_sleep = false;
gc->parent = dev;
- ret = gpiochip_add_data(gc, lg);
+ ret = devm_gpiochip_add_data(dev, gc, lg);
if (ret) {
dev_err(dev, "failed adding lp-gpio chip\n");
return ret;
@@ -439,9 +439,7 @@ MODULE_DEVICE_TABLE(acpi, lynxpoint_gpio_acpi_match);
static int lp_gpio_remove(struct platform_device *pdev)
{
- struct lp_gpio *lg = platform_get_drvdata(pdev);
pm_runtime_disable(&pdev->dev);
- gpiochip_remove(&lg->chip);
return 0;
}
diff --git a/drivers/gpio/gpio-mc9s08dz60.c b/drivers/gpio/gpio-mc9s08dz60.c
index ba22fb92a6e7..14f252f9eb29 100644
--- a/drivers/gpio/gpio-mc9s08dz60.c
+++ b/drivers/gpio/gpio-mc9s08dz60.c
@@ -103,17 +103,7 @@ static int mc9s08dz60_probe(struct i2c_client *client,
mc9s->client = client;
i2c_set_clientdata(client, mc9s);
- return gpiochip_add_data(&mc9s->chip, mc9s);
-}
-
-static int mc9s08dz60_remove(struct i2c_client *client)
-{
- struct mc9s08dz60 *mc9s;
-
- mc9s = i2c_get_clientdata(client);
-
- gpiochip_remove(&mc9s->chip);
- return 0;
+ return devm_gpiochip_add_data(&client->dev, &mc9s->chip, mc9s);
}
static const struct i2c_device_id mc9s08dz60_id[] = {
@@ -128,7 +118,6 @@ static struct i2c_driver mc9s08dz60_i2c_driver = {
.name = "mc9s08dz60",
},
.probe = mc9s08dz60_probe,
- .remove = mc9s08dz60_remove,
.id_table = mc9s08dz60_id,
};
diff --git a/drivers/gpio/gpio-mcp23s08.c b/drivers/gpio/gpio-mcp23s08.c
index c767879e4dd9..47e486910aab 100644
--- a/drivers/gpio/gpio-mcp23s08.c
+++ b/drivers/gpio/gpio-mcp23s08.c
@@ -31,6 +31,7 @@
#define MCP_TYPE_S17 1
#define MCP_TYPE_008 2
#define MCP_TYPE_017 3
+#define MCP_TYPE_S18 4
/* Registers are all 8 bits wide.
*
@@ -48,6 +49,7 @@
# define IOCON_HAEN (1 << 3)
# define IOCON_ODR (1 << 2)
# define IOCON_INTPOL (1 << 1)
+# define IOCON_INTCC (1)
#define MCP_GPPU 0x06
#define MCP_INTF 0x07
#define MCP_INTCAP 0x08
@@ -617,6 +619,12 @@ static int mcp23s08_probe_one(struct mcp23s08 *mcp, struct device *dev,
mcp->chip.ngpio = 16;
mcp->chip.label = "mcp23s17";
break;
+
+ case MCP_TYPE_S18:
+ mcp->ops = &mcp23s17_ops;
+ mcp->chip.ngpio = 16;
+ mcp->chip.label = "mcp23s18";
+ break;
#endif /* CONFIG_SPI_MASTER */
#if IS_ENABLED(CONFIG_I2C)
@@ -657,8 +665,7 @@ static int mcp23s08_probe_one(struct mcp23s08 *mcp, struct device *dev,
of_property_read_bool(mcp->chip.parent->of_node,
"microchip,irq-active-high");
- if (type == MCP_TYPE_017)
- mirror = pdata->mirror;
+ mirror = pdata->mirror;
}
if ((status & IOCON_SEQOP) || !(status & IOCON_HAEN) || mirror ||
@@ -674,6 +681,9 @@ static int mcp23s08_probe_one(struct mcp23s08 *mcp, struct device *dev,
if (mirror)
status |= IOCON_MIRROR | (IOCON_MIRROR << 8);
+ if (type == MCP_TYPE_S18)
+ status |= IOCON_INTCC | (IOCON_INTCC << 8);
+
status = mcp->ops->write(mcp, MCP_IOCON, status);
if (status < 0)
goto fail;
@@ -735,6 +745,10 @@ static const struct of_device_id mcp23s08_spi_of_match[] = {
.compatible = "microchip,mcp23s17",
.data = (void *) MCP_TYPE_S17,
},
+ {
+ .compatible = "microchip,mcp23s18",
+ .data = (void *) MCP_TYPE_S18,
+ },
/* NOTE: The use of the mcp prefix is deprecated and will be removed. */
{
.compatible = "mcp,mcp23s08",
@@ -803,6 +817,8 @@ static int mcp230xx_probe(struct i2c_client *client,
pdata = devm_kzalloc(&client->dev,
sizeof(struct mcp23s08_platform_data),
GFP_KERNEL);
+ if (!pdata)
+ return -ENOMEM;
pdata->base = -1;
}
}
@@ -969,8 +985,8 @@ static int mcp23s08_probe(struct spi_device *spi)
goto fail;
if (pdata->base != -1)
- pdata->base += (type == MCP_TYPE_S17) ? 16 : 8;
- ngpio += (type == MCP_TYPE_S17) ? 16 : 8;
+ pdata->base += data->mcp[addr]->chip.ngpio;
+ ngpio += data->mcp[addr]->chip.ngpio;
}
data->ngpio = ngpio;
@@ -1012,6 +1028,7 @@ static int mcp23s08_remove(struct spi_device *spi)
static const struct spi_device_id mcp23s08_ids[] = {
{ "mcp23s08", MCP_TYPE_S08 },
{ "mcp23s17", MCP_TYPE_S17 },
+ { "mcp23s18", MCP_TYPE_S18 },
{ },
};
MODULE_DEVICE_TABLE(spi, mcp23s08_ids);
diff --git a/drivers/gpio/gpio-menz127.c b/drivers/gpio/gpio-menz127.c
new file mode 100644
index 000000000000..c5c9599a3a71
--- /dev/null
+++ b/drivers/gpio/gpio-menz127.c
@@ -0,0 +1,199 @@
+/*
+ * MEN 16Z127 GPIO driver
+ *
+ * Copyright (C) 2016 MEN Mikroelektronik GmbH (www.men.de)
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; version 2 of the License.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/io.h>
+#include <linux/err.h>
+#include <linux/mcb.h>
+#include <linux/bitops.h>
+#include <linux/gpio/driver.h>
+
+#define MEN_Z127_CTRL 0x00
+#define MEN_Z127_PSR 0x04
+#define MEN_Z127_IRQR 0x08
+#define MEN_Z127_GPIODR 0x0c
+#define MEN_Z127_IER1 0x10
+#define MEN_Z127_IER2 0x14
+#define MEN_Z127_DBER 0x18
+#define MEN_Z127_ODER 0x1C
+#define GPIO_TO_DBCNT_REG(gpio) ((gpio * 4) + 0x80)
+
+#define MEN_Z127_DB_MIN_US 50
+/* 16 bit compare register. Each bit represents 50us */
+#define MEN_Z127_DB_MAX_US (0xffff * MEN_Z127_DB_MIN_US)
+#define MEN_Z127_DB_IN_RANGE(db) ((db >= MEN_Z127_DB_MIN_US) && \
+ (db <= MEN_Z127_DB_MAX_US))
+
+struct men_z127_gpio {
+ struct gpio_chip gc;
+ void __iomem *reg_base;
+ struct mcb_device *mdev;
+ struct resource *mem;
+};
+
+static int men_z127_debounce(struct gpio_chip *gc, unsigned gpio,
+ unsigned debounce)
+{
+ struct men_z127_gpio *priv = gpiochip_get_data(gc);
+ struct device *dev = &priv->mdev->dev;
+ unsigned int rnd;
+ u32 db_en, db_cnt;
+
+ if (!MEN_Z127_DB_IN_RANGE(debounce)) {
+ dev_err(dev, "debounce value %u out of range", debounce);
+ return -EINVAL;
+ }
+
+ if (debounce > 0) {
+ /* round up or down depending on MSB-1 */
+ rnd = fls(debounce) - 1;
+
+ if (rnd && (debounce & BIT(rnd - 1)))
+ debounce = round_up(debounce, MEN_Z127_DB_MIN_US);
+ else
+ debounce = round_down(debounce, MEN_Z127_DB_MIN_US);
+
+ if (debounce > MEN_Z127_DB_MAX_US)
+ debounce = MEN_Z127_DB_MAX_US;
+
+ /* 50us per register unit */
+ debounce /= 50;
+ }
+
+ spin_lock(&gc->bgpio_lock);
+
+ db_en = readl(priv->reg_base + MEN_Z127_DBER);
+
+ if (debounce == 0) {
+ db_en &= ~BIT(gpio);
+ db_cnt = 0;
+ } else {
+ db_en |= BIT(gpio);
+ db_cnt = debounce;
+ }
+
+ writel(db_en, priv->reg_base + MEN_Z127_DBER);
+ writel(db_cnt, priv->reg_base + GPIO_TO_DBCNT_REG(gpio));
+
+ spin_unlock(&gc->bgpio_lock);
+
+ return 0;
+}
+
+static int men_z127_request(struct gpio_chip *gc, unsigned gpio_pin)
+{
+ struct men_z127_gpio *priv = gpiochip_get_data(gc);
+ u32 od_en;
+
+ if (gpio_pin >= gc->ngpio)
+ return -EINVAL;
+
+ spin_lock(&gc->bgpio_lock);
+ od_en = readl(priv->reg_base + MEN_Z127_ODER);
+
+ if (gpiochip_line_is_open_drain(gc, gpio_pin))
+ od_en |= BIT(gpio_pin);
+ else
+ od_en &= ~BIT(gpio_pin);
+
+ writel(od_en, priv->reg_base + MEN_Z127_ODER);
+ spin_unlock(&gc->bgpio_lock);
+
+ return 0;
+}
+
+static int men_z127_probe(struct mcb_device *mdev,
+ const struct mcb_device_id *id)
+{
+ struct men_z127_gpio *men_z127_gpio;
+ struct device *dev = &mdev->dev;
+ int ret;
+
+ men_z127_gpio = devm_kzalloc(dev, sizeof(struct men_z127_gpio),
+ GFP_KERNEL);
+ if (!men_z127_gpio)
+ return -ENOMEM;
+
+ men_z127_gpio->mem = mcb_request_mem(mdev, dev_name(dev));
+ if (IS_ERR(men_z127_gpio->mem)) {
+ dev_err(dev, "failed to request device memory");
+ return PTR_ERR(men_z127_gpio->mem);
+ }
+
+ men_z127_gpio->reg_base = ioremap(men_z127_gpio->mem->start,
+ resource_size(men_z127_gpio->mem));
+ if (men_z127_gpio->reg_base == NULL) {
+ ret = -ENXIO;
+ goto err_release;
+ }
+
+ men_z127_gpio->mdev = mdev;
+ mcb_set_drvdata(mdev, men_z127_gpio);
+
+ ret = bgpio_init(&men_z127_gpio->gc, &mdev->dev, 4,
+ men_z127_gpio->reg_base + MEN_Z127_PSR,
+ men_z127_gpio->reg_base + MEN_Z127_CTRL,
+ NULL,
+ men_z127_gpio->reg_base + MEN_Z127_GPIODR,
+ NULL, 0);
+ if (ret)
+ goto err_unmap;
+
+ men_z127_gpio->gc.set_debounce = men_z127_debounce;
+ men_z127_gpio->gc.request = men_z127_request;
+
+ ret = gpiochip_add_data(&men_z127_gpio->gc, men_z127_gpio);
+ if (ret) {
+ dev_err(dev, "failed to register MEN 16Z127 GPIO controller");
+ goto err_unmap;
+ }
+
+ dev_info(dev, "MEN 16Z127 GPIO driver registered");
+
+ return 0;
+
+err_unmap:
+ iounmap(men_z127_gpio->reg_base);
+err_release:
+ mcb_release_mem(men_z127_gpio->mem);
+ return ret;
+}
+
+static void men_z127_remove(struct mcb_device *mdev)
+{
+ struct men_z127_gpio *men_z127_gpio = mcb_get_drvdata(mdev);
+
+ gpiochip_remove(&men_z127_gpio->gc);
+ iounmap(men_z127_gpio->reg_base);
+ mcb_release_mem(men_z127_gpio->mem);
+}
+
+static const struct mcb_device_id men_z127_ids[] = {
+ { .device = 0x7f },
+ { }
+};
+MODULE_DEVICE_TABLE(mcb, men_z127_ids);
+
+static struct mcb_driver men_z127_driver = {
+ .driver = {
+ .name = "z127-gpio",
+ .owner = THIS_MODULE,
+ },
+ .probe = men_z127_probe,
+ .remove = men_z127_remove,
+ .id_table = men_z127_ids,
+};
+module_mcb_driver(men_z127_driver);
+
+MODULE_AUTHOR("Andreas Werner <andreas.werner@men.de>");
+MODULE_DESCRIPTION("MEN 16z127 GPIO Controller");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("mcb:16z127");
diff --git a/drivers/gpio/gpio-moxart.c b/drivers/gpio/gpio-moxart.c
index ca604538ebf7..f02d0b490978 100644
--- a/drivers/gpio/gpio-moxart.c
+++ b/drivers/gpio/gpio-moxart.c
@@ -57,13 +57,10 @@ static int moxart_gpio_probe(struct platform_device *pdev)
gc->label = "moxart-gpio";
gc->request = gpiochip_generic_request;
gc->free = gpiochip_generic_free;
- gc->bgpio_data = gc->read_reg(gc->reg_set);
gc->base = 0;
- gc->ngpio = 32;
- gc->parent = dev;
gc->owner = THIS_MODULE;
- ret = gpiochip_add_data(gc, NULL);
+ ret = devm_gpiochip_add_data(dev, gc, NULL);
if (ret) {
dev_err(dev, "%s: gpiochip_add failed\n",
dev->of_node->full_name);
diff --git a/drivers/gpio/gpio-mpc5200.c b/drivers/gpio/gpio-mpc5200.c
index 0e5a6709f27d..fc10cf59691c 100644
--- a/drivers/gpio/gpio-mpc5200.c
+++ b/drivers/gpio/gpio-mpc5200.c
@@ -25,7 +25,6 @@
#include <linux/of_platform.h>
#include <linux/module.h>
-#include <asm/gpio.h>
#include <asm/mpc52xx.h>
#include <sysdev/fsl_soc.h>
diff --git a/drivers/gpio/gpio-mpc8xxx.c b/drivers/gpio/gpio-mpc8xxx.c
index 9d40787e66c0..425501c39527 100644
--- a/drivers/gpio/gpio-mpc8xxx.c
+++ b/drivers/gpio/gpio-mpc8xxx.c
@@ -1,7 +1,8 @@
/*
- * GPIOs on MPC512x/8349/8572/8610 and compatible
+ * GPIOs on MPC512x/8349/8572/8610/QorIQ and compatible
*
* Copyright (C) 2008 Peter Korsgaard <jacmet@sunsite.dk>
+ * Copyright (C) 2016 Freescale Semiconductor Inc.
*
* This file is licensed under the terms of the GNU General Public License
* version 2. This program is licensed "as is" without any warranty of any
@@ -14,11 +15,12 @@
#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_gpio.h>
+#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/of_platform.h>
-#include <linux/gpio.h>
#include <linux/slab.h>
#include <linux/irq.h>
+#include <linux/gpio/driver.h>
#define MPC8XXX_GPIO_PINS 32
@@ -31,32 +33,17 @@
#define GPIO_ICR2 0x18
struct mpc8xxx_gpio_chip {
- struct of_mm_gpio_chip mm_gc;
+ struct gpio_chip gc;
+ void __iomem *regs;
raw_spinlock_t lock;
- /*
- * shadowed data register to be able to clear/set output pins in
- * open drain mode safely
- */
- u32 data;
+ int (*direction_output)(struct gpio_chip *chip,
+ unsigned offset, int value);
+
struct irq_domain *irq;
unsigned int irqn;
- const void *of_dev_id_data;
};
-static inline u32 mpc8xxx_gpio2mask(unsigned int gpio)
-{
- return 1u << (MPC8XXX_GPIO_PINS - 1 - gpio);
-}
-
-static void mpc8xxx_gpio_save_regs(struct of_mm_gpio_chip *mm)
-{
- struct mpc8xxx_gpio_chip *mpc8xxx_gc =
- container_of(mm, struct mpc8xxx_gpio_chip, mm_gc);
-
- mpc8xxx_gc->data = in_be32(mm->regs + GPIO_DAT);
-}
-
/* Workaround GPIO 1 errata on MPC8572/MPC8536. The status of GPIOs
* defined as output cannot be determined by reading GPDAT register,
* so we use shadow data register instead. The status of input pins
@@ -65,117 +52,36 @@ static void mpc8xxx_gpio_save_regs(struct of_mm_gpio_chip *mm)
static int mpc8572_gpio_get(struct gpio_chip *gc, unsigned int gpio)
{
u32 val;
- struct of_mm_gpio_chip *mm = to_of_mm_gpio_chip(gc);
struct mpc8xxx_gpio_chip *mpc8xxx_gc = gpiochip_get_data(gc);
u32 out_mask, out_shadow;
- out_mask = in_be32(mm->regs + GPIO_DIR);
-
- val = in_be32(mm->regs + GPIO_DAT) & ~out_mask;
- out_shadow = mpc8xxx_gc->data & out_mask;
-
- return !!((val | out_shadow) & mpc8xxx_gpio2mask(gpio));
-}
-
-static int mpc8xxx_gpio_get(struct gpio_chip *gc, unsigned int gpio)
-{
- struct of_mm_gpio_chip *mm = to_of_mm_gpio_chip(gc);
-
- return in_be32(mm->regs + GPIO_DAT) & mpc8xxx_gpio2mask(gpio);
-}
-
-static void mpc8xxx_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
-{
- struct of_mm_gpio_chip *mm = to_of_mm_gpio_chip(gc);
- struct mpc8xxx_gpio_chip *mpc8xxx_gc = gpiochip_get_data(gc);
- unsigned long flags;
-
- raw_spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
-
- if (val)
- mpc8xxx_gc->data |= mpc8xxx_gpio2mask(gpio);
- else
- mpc8xxx_gc->data &= ~mpc8xxx_gpio2mask(gpio);
-
- out_be32(mm->regs + GPIO_DAT, mpc8xxx_gc->data);
-
- raw_spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
-}
-
-static void mpc8xxx_gpio_set_multiple(struct gpio_chip *gc,
- unsigned long *mask, unsigned long *bits)
-{
- struct of_mm_gpio_chip *mm = to_of_mm_gpio_chip(gc);
- struct mpc8xxx_gpio_chip *mpc8xxx_gc = gpiochip_get_data(gc);
- unsigned long flags;
- int i;
-
- raw_spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
-
- for (i = 0; i < gc->ngpio; i++) {
- if (*mask == 0)
- break;
- if (__test_and_clear_bit(i, mask)) {
- if (test_bit(i, bits))
- mpc8xxx_gc->data |= mpc8xxx_gpio2mask(i);
- else
- mpc8xxx_gc->data &= ~mpc8xxx_gpio2mask(i);
- }
- }
-
- out_be32(mm->regs + GPIO_DAT, mpc8xxx_gc->data);
-
- raw_spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
-}
-
-static int mpc8xxx_gpio_dir_in(struct gpio_chip *gc, unsigned int gpio)
-{
- struct of_mm_gpio_chip *mm = to_of_mm_gpio_chip(gc);
- struct mpc8xxx_gpio_chip *mpc8xxx_gc = gpiochip_get_data(gc);
- unsigned long flags;
+ out_mask = gc->read_reg(mpc8xxx_gc->regs + GPIO_DIR);
+ val = gc->read_reg(mpc8xxx_gc->regs + GPIO_DAT) & ~out_mask;
+ out_shadow = gc->bgpio_data & out_mask;
- raw_spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
-
- clrbits32(mm->regs + GPIO_DIR, mpc8xxx_gpio2mask(gpio));
-
- raw_spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
-
- return 0;
+ return !!((val | out_shadow) & gc->pin2mask(gc, gpio));
}
-static int mpc8xxx_gpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
+static int mpc5121_gpio_dir_out(struct gpio_chip *gc,
+ unsigned int gpio, int val)
{
- struct of_mm_gpio_chip *mm = to_of_mm_gpio_chip(gc);
struct mpc8xxx_gpio_chip *mpc8xxx_gc = gpiochip_get_data(gc);
- unsigned long flags;
-
- mpc8xxx_gpio_set(gc, gpio, val);
-
- raw_spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
-
- setbits32(mm->regs + GPIO_DIR, mpc8xxx_gpio2mask(gpio));
-
- raw_spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
-
- return 0;
-}
-
-static int mpc5121_gpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
-{
/* GPIO 28..31 are input only on MPC5121 */
if (gpio >= 28)
return -EINVAL;
- return mpc8xxx_gpio_dir_out(gc, gpio, val);
+ return mpc8xxx_gc->direction_output(gc, gpio, val);
}
-static int mpc5125_gpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
+static int mpc5125_gpio_dir_out(struct gpio_chip *gc,
+ unsigned int gpio, int val)
{
+ struct mpc8xxx_gpio_chip *mpc8xxx_gc = gpiochip_get_data(gc);
/* GPIO 0..3 are input only on MPC5125 */
if (gpio <= 3)
return -EINVAL;
- return mpc8xxx_gpio_dir_out(gc, gpio, val);
+ return mpc8xxx_gc->direction_output(gc, gpio, val);
}
static int mpc8xxx_gpio_to_irq(struct gpio_chip *gc, unsigned offset)
@@ -192,10 +98,11 @@ static void mpc8xxx_gpio_irq_cascade(struct irq_desc *desc)
{
struct mpc8xxx_gpio_chip *mpc8xxx_gc = irq_desc_get_handler_data(desc);
struct irq_chip *chip = irq_desc_get_chip(desc);
- struct of_mm_gpio_chip *mm = &mpc8xxx_gc->mm_gc;
+ struct gpio_chip *gc = &mpc8xxx_gc->gc;
unsigned int mask;
- mask = in_be32(mm->regs + GPIO_IER) & in_be32(mm->regs + GPIO_IMR);
+ mask = gc->read_reg(mpc8xxx_gc->regs + GPIO_IER)
+ & gc->read_reg(mpc8xxx_gc->regs + GPIO_IMR);
if (mask)
generic_handle_irq(irq_linear_revmap(mpc8xxx_gc->irq,
32 - ffs(mask)));
@@ -206,12 +113,14 @@ static void mpc8xxx_gpio_irq_cascade(struct irq_desc *desc)
static void mpc8xxx_irq_unmask(struct irq_data *d)
{
struct mpc8xxx_gpio_chip *mpc8xxx_gc = irq_data_get_irq_chip_data(d);
- struct of_mm_gpio_chip *mm = &mpc8xxx_gc->mm_gc;
+ struct gpio_chip *gc = &mpc8xxx_gc->gc;
unsigned long flags;
raw_spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
- setbits32(mm->regs + GPIO_IMR, mpc8xxx_gpio2mask(irqd_to_hwirq(d)));
+ gc->write_reg(mpc8xxx_gc->regs + GPIO_IMR,
+ gc->read_reg(mpc8xxx_gc->regs + GPIO_IMR)
+ | gc->pin2mask(gc, irqd_to_hwirq(d)));
raw_spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
}
@@ -219,12 +128,14 @@ static void mpc8xxx_irq_unmask(struct irq_data *d)
static void mpc8xxx_irq_mask(struct irq_data *d)
{
struct mpc8xxx_gpio_chip *mpc8xxx_gc = irq_data_get_irq_chip_data(d);
- struct of_mm_gpio_chip *mm = &mpc8xxx_gc->mm_gc;
+ struct gpio_chip *gc = &mpc8xxx_gc->gc;
unsigned long flags;
raw_spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
- clrbits32(mm->regs + GPIO_IMR, mpc8xxx_gpio2mask(irqd_to_hwirq(d)));
+ gc->write_reg(mpc8xxx_gc->regs + GPIO_IMR,
+ gc->read_reg(mpc8xxx_gc->regs + GPIO_IMR)
+ & ~(gc->pin2mask(gc, irqd_to_hwirq(d))));
raw_spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
}
@@ -232,29 +143,32 @@ static void mpc8xxx_irq_mask(struct irq_data *d)
static void mpc8xxx_irq_ack(struct irq_data *d)
{
struct mpc8xxx_gpio_chip *mpc8xxx_gc = irq_data_get_irq_chip_data(d);
- struct of_mm_gpio_chip *mm = &mpc8xxx_gc->mm_gc;
+ struct gpio_chip *gc = &mpc8xxx_gc->gc;
- out_be32(mm->regs + GPIO_IER, mpc8xxx_gpio2mask(irqd_to_hwirq(d)));
+ gc->write_reg(mpc8xxx_gc->regs + GPIO_IER,
+ gc->pin2mask(gc, irqd_to_hwirq(d)));
}
static int mpc8xxx_irq_set_type(struct irq_data *d, unsigned int flow_type)
{
struct mpc8xxx_gpio_chip *mpc8xxx_gc = irq_data_get_irq_chip_data(d);
- struct of_mm_gpio_chip *mm = &mpc8xxx_gc->mm_gc;
+ struct gpio_chip *gc = &mpc8xxx_gc->gc;
unsigned long flags;
switch (flow_type) {
case IRQ_TYPE_EDGE_FALLING:
raw_spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
- setbits32(mm->regs + GPIO_ICR,
- mpc8xxx_gpio2mask(irqd_to_hwirq(d)));
+ gc->write_reg(mpc8xxx_gc->regs + GPIO_ICR,
+ gc->read_reg(mpc8xxx_gc->regs + GPIO_ICR)
+ | gc->pin2mask(gc, irqd_to_hwirq(d)));
raw_spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
break;
case IRQ_TYPE_EDGE_BOTH:
raw_spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
- clrbits32(mm->regs + GPIO_ICR,
- mpc8xxx_gpio2mask(irqd_to_hwirq(d)));
+ gc->write_reg(mpc8xxx_gc->regs + GPIO_ICR,
+ gc->read_reg(mpc8xxx_gc->regs + GPIO_ICR)
+ & ~(gc->pin2mask(gc, irqd_to_hwirq(d))));
raw_spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
break;
@@ -268,17 +182,17 @@ static int mpc8xxx_irq_set_type(struct irq_data *d, unsigned int flow_type)
static int mpc512x_irq_set_type(struct irq_data *d, unsigned int flow_type)
{
struct mpc8xxx_gpio_chip *mpc8xxx_gc = irq_data_get_irq_chip_data(d);
- struct of_mm_gpio_chip *mm = &mpc8xxx_gc->mm_gc;
+ struct gpio_chip *gc = &mpc8xxx_gc->gc;
unsigned long gpio = irqd_to_hwirq(d);
void __iomem *reg;
unsigned int shift;
unsigned long flags;
if (gpio < 16) {
- reg = mm->regs + GPIO_ICR;
+ reg = mpc8xxx_gc->regs + GPIO_ICR;
shift = (15 - gpio) * 2;
} else {
- reg = mm->regs + GPIO_ICR2;
+ reg = mpc8xxx_gc->regs + GPIO_ICR2;
shift = (15 - (gpio % 16)) * 2;
}
@@ -286,20 +200,22 @@ static int mpc512x_irq_set_type(struct irq_data *d, unsigned int flow_type)
case IRQ_TYPE_EDGE_FALLING:
case IRQ_TYPE_LEVEL_LOW:
raw_spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
- clrsetbits_be32(reg, 3 << shift, 2 << shift);
+ gc->write_reg(reg, (gc->read_reg(reg) & ~(3 << shift))
+ | (2 << shift));
raw_spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
break;
case IRQ_TYPE_EDGE_RISING:
case IRQ_TYPE_LEVEL_HIGH:
raw_spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
- clrsetbits_be32(reg, 3 << shift, 1 << shift);
+ gc->write_reg(reg, (gc->read_reg(reg) & ~(3 << shift))
+ | (1 << shift));
raw_spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
break;
case IRQ_TYPE_EDGE_BOTH:
raw_spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
- clrbits32(reg, 3 << shift);
+ gc->write_reg(reg, (gc->read_reg(reg) & ~(3 << shift)));
raw_spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
break;
@@ -354,8 +270,6 @@ static const struct mpc8xxx_gpio_devtype mpc8572_gpio_devtype = {
};
static const struct mpc8xxx_gpio_devtype mpc8xxx_gpio_devtype_default = {
- .gpio_dir_out = mpc8xxx_gpio_dir_out,
- .gpio_get = mpc8xxx_gpio_get,
.irq_set_type = mpc8xxx_irq_set_type,
};
@@ -374,9 +288,7 @@ static int mpc8xxx_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
struct mpc8xxx_gpio_chip *mpc8xxx_gc;
- struct of_mm_gpio_chip *mm_gc;
- struct gpio_chip *gc;
- const struct of_device_id *id;
+ struct gpio_chip *gc;
const struct mpc8xxx_gpio_devtype *devtype =
of_device_get_match_data(&pdev->dev);
int ret;
@@ -389,12 +301,34 @@ static int mpc8xxx_probe(struct platform_device *pdev)
raw_spin_lock_init(&mpc8xxx_gc->lock);
- mm_gc = &mpc8xxx_gc->mm_gc;
- gc = &mm_gc->gc;
+ mpc8xxx_gc->regs = of_iomap(np, 0);
+ if (!mpc8xxx_gc->regs)
+ return -ENOMEM;
+
+ gc = &mpc8xxx_gc->gc;
+
+ if (of_property_read_bool(np, "little-endian")) {
+ ret = bgpio_init(gc, &pdev->dev, 4,
+ mpc8xxx_gc->regs + GPIO_DAT,
+ NULL, NULL,
+ mpc8xxx_gc->regs + GPIO_DIR, NULL,
+ BGPIOF_BIG_ENDIAN);
+ if (ret)
+ goto err;
+ dev_dbg(&pdev->dev, "GPIO registers are LITTLE endian\n");
+ } else {
+ ret = bgpio_init(gc, &pdev->dev, 4,
+ mpc8xxx_gc->regs + GPIO_DAT,
+ NULL, NULL,
+ mpc8xxx_gc->regs + GPIO_DIR, NULL,
+ BGPIOF_BIG_ENDIAN
+ | BGPIOF_BIG_ENDIAN_BYTE_ORDER);
+ if (ret)
+ goto err;
+ dev_dbg(&pdev->dev, "GPIO registers are BIG endian\n");
+ }
- mm_gc->save_regs = mpc8xxx_gpio_save_regs;
- gc->ngpio = MPC8XXX_GPIO_PINS;
- gc->direction_input = mpc8xxx_gpio_dir_in;
+ mpc8xxx_gc->direction_output = gc->direction_output;
if (!devtype)
devtype = &mpc8xxx_gpio_devtype_default;
@@ -405,18 +339,22 @@ static int mpc8xxx_probe(struct platform_device *pdev)
*/
mpc8xxx_irq_chip.irq_set_type = devtype->irq_set_type;
- gc->direction_output = devtype->gpio_dir_out ?: mpc8xxx_gpio_dir_out;
- gc->get = devtype->gpio_get ?: mpc8xxx_gpio_get;
- gc->set = mpc8xxx_gpio_set;
- gc->set_multiple = mpc8xxx_gpio_set_multiple;
+ if (devtype->gpio_dir_out)
+ gc->direction_output = devtype->gpio_dir_out;
+ if (devtype->gpio_get)
+ gc->get = devtype->gpio_get;
+
gc->to_irq = mpc8xxx_gpio_to_irq;
- ret = of_mm_gpiochip_add_data(np, mm_gc, mpc8xxx_gc);
- if (ret)
- return ret;
+ ret = gpiochip_add_data(gc, mpc8xxx_gc);
+ if (ret) {
+ pr_err("%s: GPIO chip registration failed with status %d\n",
+ np->full_name, ret);
+ goto err;
+ }
mpc8xxx_gc->irqn = irq_of_parse_and_map(np, 0);
- if (mpc8xxx_gc->irqn == NO_IRQ)
+ if (!mpc8xxx_gc->irqn)
return 0;
mpc8xxx_gc->irq = irq_domain_add_linear(np, MPC8XXX_GPIO_PINS,
@@ -424,18 +362,16 @@ static int mpc8xxx_probe(struct platform_device *pdev)
if (!mpc8xxx_gc->irq)
return 0;
- id = of_match_node(mpc8xxx_gpio_ids, np);
- if (id)
- mpc8xxx_gc->of_dev_id_data = id->data;
-
/* ack and mask all irqs */
- out_be32(mm_gc->regs + GPIO_IER, 0xffffffff);
- out_be32(mm_gc->regs + GPIO_IMR, 0);
+ gc->write_reg(mpc8xxx_gc->regs + GPIO_IER, 0xffffffff);
+ gc->write_reg(mpc8xxx_gc->regs + GPIO_IMR, 0);
irq_set_chained_handler_and_data(mpc8xxx_gc->irqn,
mpc8xxx_gpio_irq_cascade, mpc8xxx_gc);
-
return 0;
+err:
+ iounmap(mpc8xxx_gc->regs);
+ return ret;
}
static int mpc8xxx_remove(struct platform_device *pdev)
@@ -447,7 +383,8 @@ static int mpc8xxx_remove(struct platform_device *pdev)
irq_domain_remove(mpc8xxx_gc->irq);
}
- of_mm_gpiochip_remove(&mpc8xxx_gc->mm_gc);
+ gpiochip_remove(&mpc8xxx_gc->gc);
+ iounmap(mpc8xxx_gc->regs);
return 0;
}
diff --git a/drivers/gpio/gpio-mvebu.c b/drivers/gpio/gpio-mvebu.c
index a5eacc1dff09..11c6582ef0a6 100644
--- a/drivers/gpio/gpio-mvebu.c
+++ b/drivers/gpio/gpio-mvebu.c
@@ -756,7 +756,7 @@ static int mvebu_gpio_probe(struct platform_device *pdev)
BUG();
}
- gpiochip_add_data(&mvchip->chip, mvchip);
+ devm_gpiochip_add_data(&pdev->dev, &mvchip->chip, mvchip);
/* Some gpio controllers do not provide irq support */
if (!of_irq_count(np))
@@ -777,16 +777,14 @@ static int mvebu_gpio_probe(struct platform_device *pdev)
mvchip->irqbase = irq_alloc_descs(-1, 0, ngpios, -1);
if (mvchip->irqbase < 0) {
dev_err(&pdev->dev, "no irqs\n");
- err = mvchip->irqbase;
- goto err_gpiochip_add;
+ return mvchip->irqbase;
}
gc = irq_alloc_generic_chip("mvebu_gpio_irq", 2, mvchip->irqbase,
mvchip->membase, handle_level_irq);
if (!gc) {
dev_err(&pdev->dev, "Cannot allocate generic irq_chip\n");
- err = -ENOMEM;
- goto err_gpiochip_add;
+ return -ENOMEM;
}
gc->private = mvchip;
@@ -828,9 +826,6 @@ err_generic_chip:
IRQ_LEVEL | IRQ_NOPROBE);
kfree(gc);
-err_gpiochip_add:
- gpiochip_remove(&mvchip->chip);
-
return err;
}
diff --git a/drivers/gpio/gpio-mxc.c b/drivers/gpio/gpio-mxc.c
index 7fd21cb53c81..1b342a3842c8 100644
--- a/drivers/gpio/gpio-mxc.c
+++ b/drivers/gpio/gpio-mxc.c
@@ -462,14 +462,14 @@ static int mxc_gpio_probe(struct platform_device *pdev)
port->gc.base = (pdev->id < 0) ? of_alias_get_id(np, "gpio") * 32 :
pdev->id * 32;
- err = gpiochip_add_data(&port->gc, port);
+ err = devm_gpiochip_add_data(&pdev->dev, &port->gc, port);
if (err)
goto out_bgio;
irq_base = irq_alloc_descs(-1, 0, 32, numa_node_id());
if (irq_base < 0) {
err = irq_base;
- goto out_gpiochip_remove;
+ goto out_bgio;
}
port->domain = irq_domain_add_legacy(np, 32, irq_base, 0,
@@ -492,8 +492,6 @@ out_irqdomain_remove:
irq_domain_remove(port->domain);
out_irqdesc_free:
irq_free_descs(irq_base, 32);
-out_gpiochip_remove:
- gpiochip_remove(&port->gc);
out_bgio:
dev_info(&pdev->dev, "%s failed with errno %d\n", __func__, err);
return err;
diff --git a/drivers/gpio/gpio-octeon.c b/drivers/gpio/gpio-octeon.c
index 7665ebcd0c1d..47aead1ed1cc 100644
--- a/drivers/gpio/gpio-octeon.c
+++ b/drivers/gpio/gpio-octeon.c
@@ -117,7 +117,7 @@ static int octeon_gpio_probe(struct platform_device *pdev)
chip->get = octeon_gpio_get;
chip->direction_output = octeon_gpio_dir_out;
chip->set = octeon_gpio_set;
- err = gpiochip_add_data(chip, gpio);
+ err = devm_gpiochip_add_data(&pdev->dev, chip, gpio);
if (err)
goto out;
@@ -126,13 +126,6 @@ out:
return err;
}
-static int octeon_gpio_remove(struct platform_device *pdev)
-{
- struct gpio_chip *chip = dev_get_platdata(&pdev->dev);
- gpiochip_remove(chip);
- return 0;
-}
-
static struct of_device_id octeon_gpio_match[] = {
{
.compatible = "cavium,octeon-3860-gpio",
@@ -147,7 +140,6 @@ static struct platform_driver octeon_gpio_driver = {
.of_match_table = octeon_gpio_match,
},
.probe = octeon_gpio_probe,
- .remove = octeon_gpio_remove,
};
module_platform_driver(octeon_gpio_driver);
diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c
index 189f672bebc1..551dfa9d97ab 100644
--- a/drivers/gpio/gpio-omap.c
+++ b/drivers/gpio/gpio-omap.c
@@ -66,7 +66,6 @@ struct gpio_bank {
u32 irq_usage;
u32 dbck_enable_mask;
bool dbck_enabled;
- struct device *dev;
bool is_mpuio;
bool dbck_flag;
bool loses_context;
@@ -627,7 +626,7 @@ static int omap_set_gpio_wakeup(struct gpio_bank *bank, unsigned offset,
unsigned long flags;
if (bank->non_wakeup_gpios & gpio_bit) {
- dev_err(bank->dev,
+ dev_err(bank->chip.parent,
"Unable to modify wakeup on non-wakeup GPIO%d\n",
offset);
return -EINVAL;
@@ -669,7 +668,7 @@ static int omap_gpio_request(struct gpio_chip *chip, unsigned offset)
* enable the bank module.
*/
if (!BANK_USED(bank))
- pm_runtime_get_sync(bank->dev);
+ pm_runtime_get_sync(chip->parent);
raw_spin_lock_irqsave(&bank->lock, flags);
omap_enable_gpio_module(bank, offset);
@@ -698,7 +697,7 @@ static void omap_gpio_free(struct gpio_chip *chip, unsigned offset)
* disable the bank module.
*/
if (!BANK_USED(bank))
- pm_runtime_put(bank->dev);
+ pm_runtime_put(chip->parent);
}
/*
@@ -723,7 +722,7 @@ static irqreturn_t omap_gpio_irq_handler(int irq, void *gpiobank)
if (WARN_ON(!isr_reg))
goto exit;
- pm_runtime_get_sync(bank->dev);
+ pm_runtime_get_sync(bank->chip.parent);
while (1) {
u32 isr_saved, level_mask = 0;
@@ -776,7 +775,7 @@ static irqreturn_t omap_gpio_irq_handler(int irq, void *gpiobank)
}
}
exit:
- pm_runtime_put(bank->dev);
+ pm_runtime_put(bank->chip.parent);
return IRQ_HANDLED;
}
@@ -826,7 +825,7 @@ static void omap_gpio_irq_bus_lock(struct irq_data *data)
struct gpio_bank *bank = omap_irq_data_get_bank(data);
if (!BANK_USED(bank))
- pm_runtime_get_sync(bank->dev);
+ pm_runtime_get_sync(bank->chip.parent);
}
static void gpio_irq_bus_sync_unlock(struct irq_data *data)
@@ -838,7 +837,7 @@ static void gpio_irq_bus_sync_unlock(struct irq_data *data)
* disable the bank module.
*/
if (!BANK_USED(bank))
- pm_runtime_put(bank->dev);
+ pm_runtime_put(bank->chip.parent);
}
static void omap_gpio_ack_irq(struct irq_data *d)
@@ -1100,7 +1099,8 @@ static int omap_gpio_chip_init(struct gpio_bank *bank, struct irq_chip *irqc)
ret = gpiochip_add_data(&bank->chip, bank);
if (ret) {
- dev_err(bank->dev, "Could not register gpio chip %d\n", ret);
+ dev_err(bank->chip.parent,
+ "Could not register gpio chip %d\n", ret);
return ret;
}
@@ -1114,7 +1114,7 @@ static int omap_gpio_chip_init(struct gpio_bank *bank, struct irq_chip *irqc)
*/
irq_base = irq_alloc_descs(-1, 0, bank->width, 0);
if (irq_base < 0) {
- dev_err(bank->dev, "Couldn't allocate IRQ numbers\n");
+ dev_err(bank->chip.parent, "Couldn't allocate IRQ numbers\n");
return -ENODEV;
}
#endif
@@ -1131,15 +1131,17 @@ static int omap_gpio_chip_init(struct gpio_bank *bank, struct irq_chip *irqc)
IRQ_TYPE_NONE);
if (ret) {
- dev_err(bank->dev, "Couldn't add irqchip to gpiochip %d\n", ret);
+ dev_err(bank->chip.parent,
+ "Couldn't add irqchip to gpiochip %d\n", ret);
gpiochip_remove(&bank->chip);
return -ENODEV;
}
gpiochip_set_chained_irqchip(&bank->chip, irqc, bank->irq, NULL);
- ret = devm_request_irq(bank->dev, bank->irq, omap_gpio_irq_handler,
- 0, dev_name(bank->dev), bank);
+ ret = devm_request_irq(bank->chip.parent, bank->irq,
+ omap_gpio_irq_handler,
+ 0, dev_name(bank->chip.parent), bank);
if (ret)
gpiochip_remove(&bank->chip);
@@ -1196,7 +1198,6 @@ static int omap_gpio_probe(struct platform_device *pdev)
return bank->irq;
}
- bank->dev = dev;
bank->chip.parent = dev;
bank->chip.owner = THIS_MODULE;
bank->dbck_flag = pdata->dbck_flag;
@@ -1235,9 +1236,9 @@ static int omap_gpio_probe(struct platform_device *pdev)
}
if (bank->dbck_flag) {
- bank->dbck = devm_clk_get(bank->dev, "dbclk");
+ bank->dbck = devm_clk_get(dev, "dbclk");
if (IS_ERR(bank->dbck)) {
- dev_err(bank->dev,
+ dev_err(dev,
"Could not get gpio dbck. Disable debounce\n");
bank->dbck_flag = false;
} else {
@@ -1247,9 +1248,9 @@ static int omap_gpio_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, bank);
- pm_runtime_enable(bank->dev);
- pm_runtime_irq_safe(bank->dev);
- pm_runtime_get_sync(bank->dev);
+ pm_runtime_enable(dev);
+ pm_runtime_irq_safe(dev);
+ pm_runtime_get_sync(dev);
if (bank->is_mpuio)
omap_mpuio_init(bank);
@@ -1258,14 +1259,14 @@ static int omap_gpio_probe(struct platform_device *pdev)
ret = omap_gpio_chip_init(bank, irqc);
if (ret) {
- pm_runtime_put_sync(bank->dev);
- pm_runtime_disable(bank->dev);
+ pm_runtime_put_sync(dev);
+ pm_runtime_disable(dev);
return ret;
}
omap_gpio_show_rev(bank);
- pm_runtime_put(bank->dev);
+ pm_runtime_put(dev);
list_add_tail(&bank->node, &omap_gpio_list);
@@ -1278,7 +1279,7 @@ static int omap_gpio_remove(struct platform_device *pdev)
list_del(&bank->node);
gpiochip_remove(&bank->chip);
- pm_runtime_disable(bank->dev);
+ pm_runtime_disable(&pdev->dev);
if (bank->dbck_flag)
clk_unprepare(bank->dbck);
@@ -1348,7 +1349,7 @@ static int omap_gpio_runtime_suspend(struct device *dev)
update_gpio_context_count:
if (bank->get_context_loss_count)
bank->context_loss_count =
- bank->get_context_loss_count(bank->dev);
+ bank->get_context_loss_count(dev);
omap_gpio_dbck_disable(bank);
raw_spin_unlock_irqrestore(&bank->lock, flags);
@@ -1378,7 +1379,7 @@ static int omap_gpio_runtime_resume(struct device *dev)
if (bank->get_context_loss_count)
bank->context_loss_count =
- bank->get_context_loss_count(bank->dev);
+ bank->get_context_loss_count(dev);
}
omap_gpio_dbck_enable(bank);
@@ -1398,7 +1399,7 @@ static int omap_gpio_runtime_resume(struct device *dev)
if (!bank->get_context_loss_count) {
omap_gpio_restore_context(bank);
} else {
- c = bank->get_context_loss_count(bank->dev);
+ c = bank->get_context_loss_count(dev);
if (c != bank->context_loss_count) {
omap_gpio_restore_context(bank);
} else {
@@ -1481,7 +1482,7 @@ void omap2_gpio_prepare_for_idle(int pwr_mode)
bank->power_mode = pwr_mode;
- pm_runtime_put_sync_suspend(bank->dev);
+ pm_runtime_put_sync_suspend(bank->chip.parent);
}
}
@@ -1493,7 +1494,7 @@ void omap2_gpio_resume_after_idle(void)
if (!BANK_USED(bank) || !bank->loses_context)
continue;
- pm_runtime_get_sync(bank->dev);
+ pm_runtime_get_sync(bank->chip.parent);
}
}
#endif
diff --git a/drivers/gpio/gpio-palmas.c b/drivers/gpio/gpio-palmas.c
index fdfb3b1e0def..6f27b3d94d53 100644
--- a/drivers/gpio/gpio-palmas.c
+++ b/drivers/gpio/gpio-palmas.c
@@ -195,7 +195,8 @@ static int palmas_gpio_probe(struct platform_device *pdev)
else
palmas_gpio->gpio_chip.base = -1;
- ret = gpiochip_add_data(&palmas_gpio->gpio_chip, palmas_gpio);
+ ret = devm_gpiochip_add_data(&pdev->dev, &palmas_gpio->gpio_chip,
+ palmas_gpio);
if (ret < 0) {
dev_err(&pdev->dev, "Could not register gpiochip, %d\n", ret);
return ret;
@@ -205,20 +206,11 @@ static int palmas_gpio_probe(struct platform_device *pdev)
return ret;
}
-static int palmas_gpio_remove(struct platform_device *pdev)
-{
- struct palmas_gpio *palmas_gpio = platform_get_drvdata(pdev);
-
- gpiochip_remove(&palmas_gpio->gpio_chip);
- return 0;
-}
-
static struct platform_driver palmas_gpio_driver = {
.driver.name = "palmas-gpio",
.driver.owner = THIS_MODULE,
.driver.of_match_table = of_palmas_gpio_match,
.probe = palmas_gpio_probe,
- .remove = palmas_gpio_remove,
};
static int __init palmas_gpio_init(void)
diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c
index 23196c5fc17c..e66084c295fb 100644
--- a/drivers/gpio/gpio-pca953x.c
+++ b/drivers/gpio/gpio-pca953x.c
@@ -18,6 +18,7 @@
#include <linux/i2c.h>
#include <linux/platform_data/pca953x.h>
#include <linux/slab.h>
+#include <asm/unaligned.h>
#include <linux/of_platform.h>
#include <linux/acpi.h>
@@ -159,7 +160,7 @@ static int pca953x_write_regs(struct pca953x_chip *chip, int reg, u8 *val)
switch (chip->chip_type) {
case PCA953X_TYPE:
ret = i2c_smbus_write_word_data(chip->client,
- reg << 1, (u16) *val);
+ reg << 1, cpu_to_le16(get_unaligned((u16 *)val)));
break;
case PCA957X_TYPE:
ret = i2c_smbus_write_byte_data(chip->client, reg << 1,
@@ -367,9 +368,11 @@ static void pca953x_gpio_set_multiple(struct gpio_chip *gc,
memcpy(reg_val, chip->reg_output, NBANK(chip));
mutex_lock(&chip->i2c_lock);
for(bank=0; bank<NBANK(chip); bank++) {
- unsigned bankmask = mask[bank/4] >> ((bank % 4) * 8);
+ unsigned bankmask = mask[bank / sizeof(*mask)] >>
+ ((bank % sizeof(*mask)) * 8);
if(bankmask) {
- unsigned bankval = bits[bank/4] >> ((bank % 4) * 8);
+ unsigned bankval = bits[bank / sizeof(*bits)] >>
+ ((bank % sizeof(*bits)) * 8);
reg_val[bank] = (reg_val[bank] & ~bankmask) | bankval;
}
}
@@ -754,7 +757,7 @@ static int pca953x_probe(struct i2c_client *client,
if (ret)
return ret;
- ret = gpiochip_add_data(&chip->gpio_chip, chip);
+ ret = devm_gpiochip_add_data(&client->dev, &chip->gpio_chip, chip);
if (ret)
return ret;
@@ -789,8 +792,6 @@ static int pca953x_remove(struct i2c_client *client)
}
}
- gpiochip_remove(&chip->gpio_chip);
-
return 0;
}
diff --git a/drivers/gpio/gpio-pcf857x.c b/drivers/gpio/gpio-pcf857x.c
index 709cd3fc2a70..169c09aa33c8 100644
--- a/drivers/gpio/gpio-pcf857x.c
+++ b/drivers/gpio/gpio-pcf857x.c
@@ -372,7 +372,7 @@ static int pcf857x_probe(struct i2c_client *client,
gpio->out = ~n_latch;
gpio->status = gpio->out;
- status = gpiochip_add_data(&gpio->chip, gpio);
+ status = devm_gpiochip_add_data(&client->dev, &gpio->chip, gpio);
if (status < 0)
goto fail;
@@ -383,7 +383,7 @@ static int pcf857x_probe(struct i2c_client *client,
IRQ_TYPE_NONE);
if (status) {
dev_err(&client->dev, "cannot add irqchip\n");
- goto fail_irq;
+ goto fail;
}
status = devm_request_threaded_irq(&client->dev, client->irq,
@@ -391,7 +391,7 @@ static int pcf857x_probe(struct i2c_client *client,
IRQF_TRIGGER_FALLING | IRQF_SHARED,
dev_name(&client->dev), gpio);
if (status)
- goto fail_irq;
+ goto fail;
gpiochip_set_chained_irqchip(&gpio->chip, &pcf857x_irq_chip,
client->irq, NULL);
@@ -413,9 +413,6 @@ static int pcf857x_probe(struct i2c_client *client,
return 0;
-fail_irq:
- gpiochip_remove(&gpio->chip);
-
fail:
dev_dbg(&client->dev, "probe error %d for '%s'\n", status,
client->name);
@@ -440,7 +437,6 @@ static int pcf857x_remove(struct i2c_client *client)
}
}
- gpiochip_remove(&gpio->chip);
return status;
}
diff --git a/drivers/gpio/gpio-pisosr.c b/drivers/gpio/gpio-pisosr.c
new file mode 100644
index 000000000000..cb14b8d1d512
--- /dev/null
+++ b/drivers/gpio/gpio-pisosr.c
@@ -0,0 +1,183 @@
+/*
+ * Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com/
+ * Andrew F. Davis <afd@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether expressed or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License version 2 for more details.
+ */
+
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/gpio/driver.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/spi/spi.h>
+
+#define DEFAULT_NGPIO 8
+
+/**
+ * struct pisosr_gpio - GPIO driver data
+ * @chip: GPIO controller chip
+ * @spi: SPI device pointer
+ * @buffer: Buffer for device reads
+ * @buffer_size: Size of buffer
+ * @load_gpio: GPIO pin used to load input into device
+ * @lock: Protects read sequences
+ */
+struct pisosr_gpio {
+ struct gpio_chip chip;
+ struct spi_device *spi;
+ u8 *buffer;
+ size_t buffer_size;
+ struct gpio_desc *load_gpio;
+ struct mutex lock;
+};
+
+static int pisosr_gpio_refresh(struct pisosr_gpio *gpio)
+{
+ int ret;
+
+ mutex_lock(&gpio->lock);
+
+ if (gpio->load_gpio) {
+ gpiod_set_value_cansleep(gpio->load_gpio, 1);
+ udelay(1); /* registers load time (~10ns) */
+ gpiod_set_value_cansleep(gpio->load_gpio, 0);
+ udelay(1); /* registers recovery time (~5ns) */
+ }
+
+ ret = spi_read(gpio->spi, gpio->buffer, gpio->buffer_size);
+
+ mutex_unlock(&gpio->lock);
+
+ return ret;
+}
+
+static int pisosr_gpio_get_direction(struct gpio_chip *chip,
+ unsigned offset)
+{
+ /* This device always input */
+ return 1;
+}
+
+static int pisosr_gpio_direction_input(struct gpio_chip *chip,
+ unsigned offset)
+{
+ /* This device always input */
+ return 0;
+}
+
+static int pisosr_gpio_direction_output(struct gpio_chip *chip,
+ unsigned offset, int value)
+{
+ /* This device is input only */
+ return -EINVAL;
+}
+
+static int pisosr_gpio_get(struct gpio_chip *chip, unsigned offset)
+{
+ struct pisosr_gpio *gpio = gpiochip_get_data(chip);
+
+ /* Refresh may not always be needed */
+ pisosr_gpio_refresh(gpio);
+
+ return (gpio->buffer[offset / 8] >> (offset % 8)) & 0x1;
+}
+
+static struct gpio_chip template_chip = {
+ .label = "pisosr-gpio",
+ .owner = THIS_MODULE,
+ .get_direction = pisosr_gpio_get_direction,
+ .direction_input = pisosr_gpio_direction_input,
+ .direction_output = pisosr_gpio_direction_output,
+ .get = pisosr_gpio_get,
+ .base = -1,
+ .ngpio = DEFAULT_NGPIO,
+ .can_sleep = true,
+};
+
+static int pisosr_gpio_probe(struct spi_device *spi)
+{
+ struct device *dev = &spi->dev;
+ struct pisosr_gpio *gpio;
+ int ret;
+
+ gpio = devm_kzalloc(dev, sizeof(*gpio), GFP_KERNEL);
+ if (!gpio)
+ return -ENOMEM;
+
+ spi_set_drvdata(spi, gpio);
+
+ gpio->chip = template_chip;
+ gpio->chip.parent = dev;
+ of_property_read_u16(dev->of_node, "ngpios", &gpio->chip.ngpio);
+
+ gpio->spi = spi;
+
+ gpio->buffer_size = DIV_ROUND_UP(gpio->chip.ngpio, 8);
+ gpio->buffer = devm_kzalloc(dev, gpio->buffer_size, GFP_KERNEL);
+ if (!gpio->buffer)
+ return -ENOMEM;
+
+ gpio->load_gpio = devm_gpiod_get_optional(dev, "load", GPIOD_OUT_LOW);
+ if (IS_ERR(gpio->load_gpio)) {
+ ret = PTR_ERR(gpio->load_gpio);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "Unable to allocate load GPIO\n");
+ return ret;
+ }
+
+ mutex_init(&gpio->lock);
+
+ ret = gpiochip_add_data(&gpio->chip, gpio);
+ if (ret < 0) {
+ dev_err(dev, "Unable to register gpiochip\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int pisosr_gpio_remove(struct spi_device *spi)
+{
+ struct pisosr_gpio *gpio = spi_get_drvdata(spi);
+
+ gpiochip_remove(&gpio->chip);
+
+ mutex_destroy(&gpio->lock);
+
+ return 0;
+}
+
+static const struct spi_device_id pisosr_gpio_id_table[] = {
+ { "pisosr-gpio", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(spi, pisosr_gpio_id_table);
+
+static const struct of_device_id pisosr_gpio_of_match_table[] = {
+ { .compatible = "pisosr-gpio", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, pisosr_gpio_of_match_table);
+
+static struct spi_driver pisosr_gpio_driver = {
+ .driver = {
+ .name = "pisosr-gpio",
+ .of_match_table = pisosr_gpio_of_match_table,
+ },
+ .probe = pisosr_gpio_probe,
+ .remove = pisosr_gpio_remove,
+ .id_table = pisosr_gpio_id_table,
+};
+module_spi_driver(pisosr_gpio_driver);
+
+MODULE_AUTHOR("Andrew F. Davis <afd@ti.com>");
+MODULE_DESCRIPTION("SPI Compatible PISO Shift Register GPIO Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpio/gpio-pxa.c b/drivers/gpio/gpio-pxa.c
index b2b7b78664b8..76ac906b4d78 100644
--- a/drivers/gpio/gpio-pxa.c
+++ b/drivers/gpio/gpio-pxa.c
@@ -283,8 +283,8 @@ static int pxa_gpio_direction_output(struct gpio_chip *chip,
writel_relaxed(mask, base + (value ? GPSR_OFFSET : GPCR_OFFSET));
ret = pinctrl_gpio_direction_output(chip->base + offset);
- if (!ret)
- return 0;
+ if (ret)
+ return ret;
spin_lock_irqsave(&gpio_lock, flags);
diff --git a/drivers/gpio/gpio-rc5t583.c b/drivers/gpio/gpio-rc5t583.c
index 1e2d210b3369..1d6100fa312a 100644
--- a/drivers/gpio/gpio-rc5t583.c
+++ b/drivers/gpio/gpio-rc5t583.c
@@ -136,15 +136,8 @@ static int rc5t583_gpio_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, rc5t583_gpio);
- return gpiochip_add_data(&rc5t583_gpio->gpio_chip, rc5t583_gpio);
-}
-
-static int rc5t583_gpio_remove(struct platform_device *pdev)
-{
- struct rc5t583_gpio *rc5t583_gpio = platform_get_drvdata(pdev);
-
- gpiochip_remove(&rc5t583_gpio->gpio_chip);
- return 0;
+ return devm_gpiochip_add_data(&pdev->dev, &rc5t583_gpio->gpio_chip,
+ rc5t583_gpio);
}
static struct platform_driver rc5t583_gpio_driver = {
@@ -152,7 +145,6 @@ static struct platform_driver rc5t583_gpio_driver = {
.name = "rc5t583-gpio",
},
.probe = rc5t583_gpio_probe,
- .remove = rc5t583_gpio_remove,
};
static int __init rc5t583_gpio_init(void)
diff --git a/drivers/gpio/gpio-rcar.c b/drivers/gpio/gpio-rcar.c
index d9ab0cd1d205..4d9a315cfd43 100644
--- a/drivers/gpio/gpio-rcar.c
+++ b/drivers/gpio/gpio-rcar.c
@@ -196,44 +196,6 @@ static int gpio_rcar_irq_set_wake(struct irq_data *d, unsigned int on)
return 0;
}
-static void gpio_rcar_irq_bus_lock(struct irq_data *d)
-{
- struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
- struct gpio_rcar_priv *p = gpiochip_get_data(gc);
-
- pm_runtime_get_sync(&p->pdev->dev);
-}
-
-static void gpio_rcar_irq_bus_sync_unlock(struct irq_data *d)
-{
- struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
- struct gpio_rcar_priv *p = gpiochip_get_data(gc);
-
- pm_runtime_put(&p->pdev->dev);
-}
-
-
-static int gpio_rcar_irq_request_resources(struct irq_data *d)
-{
- struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
- struct gpio_rcar_priv *p = gpiochip_get_data(gc);
- int error;
-
- error = pm_runtime_get_sync(&p->pdev->dev);
- if (error < 0)
- return error;
-
- return 0;
-}
-
-static void gpio_rcar_irq_release_resources(struct irq_data *d)
-{
- struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
- struct gpio_rcar_priv *p = gpiochip_get_data(gc);
-
- pm_runtime_put(&p->pdev->dev);
-}
-
static irqreturn_t gpio_rcar_irq_handler(int irq, void *dev_id)
{
struct gpio_rcar_priv *p = dev_id;
@@ -280,32 +242,18 @@ static void gpio_rcar_config_general_input_output_mode(struct gpio_chip *chip,
static int gpio_rcar_request(struct gpio_chip *chip, unsigned offset)
{
- struct gpio_rcar_priv *p = gpiochip_get_data(chip);
- int error;
-
- error = pm_runtime_get_sync(&p->pdev->dev);
- if (error < 0)
- return error;
-
- error = pinctrl_request_gpio(chip->base + offset);
- if (error)
- pm_runtime_put(&p->pdev->dev);
-
- return error;
+ return pinctrl_request_gpio(chip->base + offset);
}
static void gpio_rcar_free(struct gpio_chip *chip, unsigned offset)
{
- struct gpio_rcar_priv *p = gpiochip_get_data(chip);
-
pinctrl_free_gpio(chip->base + offset);
- /* Set the GPIO as an input to ensure that the next GPIO request won't
+ /*
+ * Set the GPIO as an input to ensure that the next GPIO request won't
* drive the GPIO pin as an output.
*/
gpio_rcar_config_general_input_output_mode(chip, offset, false);
-
- pm_runtime_put(&p->pdev->dev);
}
static int gpio_rcar_direction_input(struct gpio_chip *chip, unsigned offset)
@@ -452,6 +400,7 @@ static int gpio_rcar_probe(struct platform_device *pdev)
}
pm_runtime_enable(dev);
+ pm_runtime_get_sync(dev);
io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
@@ -488,10 +437,6 @@ static int gpio_rcar_probe(struct platform_device *pdev)
irq_chip->irq_unmask = gpio_rcar_irq_enable;
irq_chip->irq_set_type = gpio_rcar_irq_set_type;
irq_chip->irq_set_wake = gpio_rcar_irq_set_wake;
- irq_chip->irq_bus_lock = gpio_rcar_irq_bus_lock;
- irq_chip->irq_bus_sync_unlock = gpio_rcar_irq_bus_sync_unlock;
- irq_chip->irq_request_resources = gpio_rcar_irq_request_resources;
- irq_chip->irq_release_resources = gpio_rcar_irq_release_resources;
irq_chip->flags = IRQCHIP_SET_TYPE_MASKED | IRQCHIP_MASK_ON_SUSPEND;
ret = gpiochip_add_data(gpio_chip, p);
@@ -522,6 +467,7 @@ static int gpio_rcar_probe(struct platform_device *pdev)
err1:
gpiochip_remove(gpio_chip);
err0:
+ pm_runtime_put(dev);
pm_runtime_disable(dev);
return ret;
}
@@ -532,6 +478,7 @@ static int gpio_rcar_remove(struct platform_device *pdev)
gpiochip_remove(&p->gpio_chip);
+ pm_runtime_put(&pdev->dev);
pm_runtime_disable(&pdev->dev);
return 0;
}
diff --git a/drivers/gpio/gpio-rdc321x.c b/drivers/gpio/gpio-rdc321x.c
index 96ddee3f464a..ec945b90f54d 100644
--- a/drivers/gpio/gpio-rdc321x.c
+++ b/drivers/gpio/gpio-rdc321x.c
@@ -194,23 +194,14 @@ static int rdc321x_gpio_probe(struct platform_device *pdev)
dev_info(&pdev->dev, "registering %d GPIOs\n",
rdc321x_gpio_dev->chip.ngpio);
- return gpiochip_add_data(&rdc321x_gpio_dev->chip, rdc321x_gpio_dev);
-}
-
-static int rdc321x_gpio_remove(struct platform_device *pdev)
-{
- struct rdc321x_gpio *rdc321x_gpio_dev = platform_get_drvdata(pdev);
-
- gpiochip_remove(&rdc321x_gpio_dev->chip);
-
- return 0;
+ return devm_gpiochip_add_data(&pdev->dev, &rdc321x_gpio_dev->chip,
+ rdc321x_gpio_dev);
}
static struct platform_driver rdc321x_gpio_driver = {
.driver.name = "rdc321x-gpio",
.driver.owner = THIS_MODULE,
.probe = rdc321x_gpio_probe,
- .remove = rdc321x_gpio_remove,
};
module_platform_driver(rdc321x_gpio_driver);
diff --git a/drivers/gpio/gpio-sch.c b/drivers/gpio/gpio-sch.c
index 5314ee4b947d..e85e7539cf5d 100644
--- a/drivers/gpio/gpio-sch.c
+++ b/drivers/gpio/gpio-sch.c
@@ -215,15 +215,7 @@ static int sch_gpio_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, sch);
- return gpiochip_add_data(&sch->chip, sch);
-}
-
-static int sch_gpio_remove(struct platform_device *pdev)
-{
- struct sch_gpio *sch = platform_get_drvdata(pdev);
-
- gpiochip_remove(&sch->chip);
- return 0;
+ return devm_gpiochip_add_data(&pdev->dev, &sch->chip, sch);
}
static struct platform_driver sch_gpio_driver = {
@@ -231,7 +223,6 @@ static struct platform_driver sch_gpio_driver = {
.name = "sch_gpio",
},
.probe = sch_gpio_probe,
- .remove = sch_gpio_remove,
};
module_platform_driver(sch_gpio_driver);
diff --git a/drivers/gpio/gpio-sch311x.c b/drivers/gpio/gpio-sch311x.c
index 1cbd77a04e7b..a03b38ee2e02 100644
--- a/drivers/gpio/gpio-sch311x.c
+++ b/drivers/gpio/gpio-sch311x.c
@@ -12,6 +12,7 @@
* (at your option) any later version.
*/
+#include <linux/ioport.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
@@ -228,7 +229,8 @@ static int sch311x_gpio_probe(struct platform_device *pdev)
int err, i;
/* we can register all GPIO data registers at once */
- if (!request_region(pdata->runtime_reg + GP1, 6, DRV_NAME)) {
+ if (!devm_request_region(&pdev->dev, pdata->runtime_reg + GP1, 6,
+ DRV_NAME)) {
dev_err(&pdev->dev, "Failed to request region 0x%04x-0x%04x.\n",
pdata->runtime_reg + GP1, pdata->runtime_reg + GP1 + 5);
return -EBUSY;
@@ -273,7 +275,6 @@ static int sch311x_gpio_probe(struct platform_device *pdev)
return 0;
exit_err:
- release_region(pdata->runtime_reg + GP1, 6);
/* release already registered chips */
for (--i; i >= 0; i--)
gpiochip_remove(&priv->blocks[i].chip);
@@ -282,12 +283,9 @@ exit_err:
static int sch311x_gpio_remove(struct platform_device *pdev)
{
- struct sch311x_pdev_data *pdata = dev_get_platdata(&pdev->dev);
struct sch311x_gpio_priv *priv = platform_get_drvdata(pdev);
int i;
- release_region(pdata->runtime_reg + GP1, 6);
-
for (i = 0; i < ARRAY_SIZE(priv->blocks); i++) {
gpiochip_remove(&priv->blocks[i].chip);
dev_info(&pdev->dev,
diff --git a/drivers/gpio/gpio-spear-spics.c b/drivers/gpio/gpio-spear-spics.c
index 50fb09080a6b..7ffd16495286 100644
--- a/drivers/gpio/gpio-spear-spics.c
+++ b/drivers/gpio/gpio-spear-spics.c
@@ -165,7 +165,7 @@ static int spics_gpio_probe(struct platform_device *pdev)
spics->chip.owner = THIS_MODULE;
spics->last_off = -1;
- ret = gpiochip_add_data(&spics->chip, spics);
+ ret = devm_gpiochip_add_data(&pdev->dev, &spics->chip, spics);
if (ret) {
dev_err(&pdev->dev, "unable to add gpio chip\n");
return ret;
diff --git a/drivers/gpio/gpio-sta2x11.c b/drivers/gpio/gpio-sta2x11.c
index 83af1cb36333..0d5b8c525dd9 100644
--- a/drivers/gpio/gpio-sta2x11.c
+++ b/drivers/gpio/gpio-sta2x11.c
@@ -409,7 +409,7 @@ static int gsta_probe(struct platform_device *dev)
goto err_free_descs;
}
- err = gpiochip_add_data(&chip->gpio, chip);
+ err = devm_gpiochip_add_data(&dev->dev, &chip->gpio, chip);
if (err < 0) {
dev_err(&dev->dev, "sta2x11 gpio: Can't register (%i)\n",
-err);
diff --git a/drivers/gpio/gpio-stp-xway.c b/drivers/gpio/gpio-stp-xway.c
index d11dd48570b2..19e654f88b3a 100644
--- a/drivers/gpio/gpio-stp-xway.c
+++ b/drivers/gpio/gpio-stp-xway.c
@@ -258,7 +258,7 @@ static int xway_stp_probe(struct platform_device *pdev)
ret = xway_stp_hw_init(chip);
if (!ret)
- ret = gpiochip_add_data(&chip->gc, chip);
+ ret = devm_gpiochip_add_data(&pdev->dev, &chip->gc, chip);
if (!ret)
dev_info(&pdev->dev, "Init done\n");
diff --git a/drivers/gpio/gpio-sx150x.c b/drivers/gpio/gpio-sx150x.c
index e6cff1cabd0c..d387eb524bf3 100644
--- a/drivers/gpio/gpio-sx150x.c
+++ b/drivers/gpio/gpio-sx150x.c
@@ -687,7 +687,7 @@ static int sx150x_probe(struct i2c_client *client,
if (rc < 0)
return rc;
- rc = gpiochip_add_data(&chip->gpio_chip, chip);
+ rc = devm_gpiochip_add_data(&client->dev, &chip->gpio_chip, chip);
if (rc)
return rc;
@@ -696,25 +696,12 @@ static int sx150x_probe(struct i2c_client *client,
pdata->irq_summary,
pdata->irq_base);
if (rc < 0)
- goto probe_fail_post_gpiochip_add;
+ return rc;
}
i2c_set_clientdata(client, chip);
return 0;
-probe_fail_post_gpiochip_add:
- gpiochip_remove(&chip->gpio_chip);
- return rc;
-}
-
-static int sx150x_remove(struct i2c_client *client)
-{
- struct sx150x_chip *chip;
-
- chip = i2c_get_clientdata(client);
- gpiochip_remove(&chip->gpio_chip);
-
- return 0;
}
static struct i2c_driver sx150x_driver = {
@@ -723,7 +710,6 @@ static struct i2c_driver sx150x_driver = {
.of_match_table = of_match_ptr(sx150x_of_match),
},
.probe = sx150x_probe,
- .remove = sx150x_remove,
.id_table = sx150x_id,
};
diff --git a/drivers/gpio/gpio-syscon.c b/drivers/gpio/gpio-syscon.c
index e5c5b6205886..24b6d643ecdb 100644
--- a/drivers/gpio/gpio-syscon.c
+++ b/drivers/gpio/gpio-syscon.c
@@ -238,15 +238,7 @@ static int syscon_gpio_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, priv);
- return gpiochip_add_data(&priv->chip, priv);
-}
-
-static int syscon_gpio_remove(struct platform_device *pdev)
-{
- struct syscon_gpio_priv *priv = platform_get_drvdata(pdev);
-
- gpiochip_remove(&priv->chip);
- return 0;
+ return devm_gpiochip_add_data(&pdev->dev, &priv->chip, priv);
}
static struct platform_driver syscon_gpio_driver = {
@@ -255,7 +247,6 @@ static struct platform_driver syscon_gpio_driver = {
.of_match_table = syscon_gpio_ids,
},
.probe = syscon_gpio_probe,
- .remove = syscon_gpio_remove,
};
module_platform_driver(syscon_gpio_driver);
diff --git a/drivers/gpio/gpio-tb10x.c b/drivers/gpio/gpio-tb10x.c
index 5eaec20ddbc7..80b6959ae995 100644
--- a/drivers/gpio/gpio-tb10x.c
+++ b/drivers/gpio/gpio-tb10x.c
@@ -205,10 +205,10 @@ static int tb10x_gpio_probe(struct platform_device *pdev)
tb10x_gpio->gc.can_sleep = false;
- ret = gpiochip_add_data(&tb10x_gpio->gc, tb10x_gpio);
+ ret = devm_gpiochip_add_data(&pdev->dev, &tb10x_gpio->gc, tb10x_gpio);
if (ret < 0) {
dev_err(&pdev->dev, "Could not add gpiochip.\n");
- goto fail_gpiochip_registration;
+ return ret;
}
platform_set_drvdata(pdev, tb10x_gpio);
@@ -219,7 +219,7 @@ static int tb10x_gpio_probe(struct platform_device *pdev)
ret = platform_get_irq(pdev, 0);
if (ret < 0) {
dev_err(&pdev->dev, "No interrupt specified.\n");
- goto fail_get_irq;
+ return ret;
}
tb10x_gpio->gc.to_irq = tb10x_gpio_to_irq;
@@ -229,14 +229,13 @@ static int tb10x_gpio_probe(struct platform_device *pdev)
IRQF_TRIGGER_NONE | IRQF_SHARED,
dev_name(&pdev->dev), tb10x_gpio);
if (ret != 0)
- goto fail_request_irq;
+ return ret;
tb10x_gpio->domain = irq_domain_add_linear(dn,
tb10x_gpio->gc.ngpio,
&irq_generic_chip_ops, NULL);
if (!tb10x_gpio->domain) {
- ret = -ENOMEM;
- goto fail_irq_domain;
+ return -ENOMEM;
}
ret = irq_alloc_domain_generic_chips(tb10x_gpio->domain,
@@ -244,7 +243,7 @@ static int tb10x_gpio_probe(struct platform_device *pdev)
handle_edge_irq, IRQ_NOREQUEST, IRQ_NOPROBE,
IRQ_GC_INIT_MASK_CACHE);
if (ret)
- goto fail_irq_domain;
+ return ret;
gc = tb10x_gpio->domain->gc->gc[0];
gc->reg_base = tb10x_gpio->base;
@@ -258,14 +257,6 @@ static int tb10x_gpio_probe(struct platform_device *pdev)
}
return 0;
-
-fail_irq_domain:
-fail_request_irq:
-fail_get_irq:
- gpiochip_remove(&tb10x_gpio->gc);
-fail_gpiochip_registration:
-fail_ioremap:
- return ret;
}
static int tb10x_gpio_remove(struct platform_device *pdev)
@@ -278,7 +269,6 @@ static int tb10x_gpio_remove(struct platform_device *pdev)
kfree(tb10x_gpio->domain->gc);
irq_domain_remove(tb10x_gpio->domain);
}
- gpiochip_remove(&tb10x_gpio->gc);
return 0;
}
diff --git a/drivers/gpio/gpio-tc3589x.c b/drivers/gpio/gpio-tc3589x.c
index 05a27ec55add..4f566e6b81f1 100644
--- a/drivers/gpio/gpio-tc3589x.c
+++ b/drivers/gpio/gpio-tc3589x.c
@@ -272,7 +272,8 @@ static int tc3589x_gpio_probe(struct platform_device *pdev)
return ret;
}
- ret = gpiochip_add_data(&tc3589x_gpio->chip, tc3589x_gpio);
+ ret = devm_gpiochip_add_data(&pdev->dev, &tc3589x_gpio->chip,
+ tc3589x_gpio);
if (ret) {
dev_err(&pdev->dev, "unable to add gpiochip: %d\n", ret);
return ret;
@@ -299,20 +300,10 @@ static int tc3589x_gpio_probe(struct platform_device *pdev)
return 0;
}
-static int tc3589x_gpio_remove(struct platform_device *pdev)
-{
- struct tc3589x_gpio *tc3589x_gpio = platform_get_drvdata(pdev);
-
- gpiochip_remove(&tc3589x_gpio->chip);
-
- return 0;
-}
-
static struct platform_driver tc3589x_gpio_driver = {
.driver.name = "tc3589x-gpio",
.driver.owner = THIS_MODULE,
.probe = tc3589x_gpio_probe,
- .remove = tc3589x_gpio_remove,
};
static int __init tc3589x_gpio_init(void)
diff --git a/drivers/gpio/gpio-tegra.c b/drivers/gpio/gpio-tegra.c
index 9a1a7e2ef388..790bb111b2cb 100644
--- a/drivers/gpio/gpio-tegra.c
+++ b/drivers/gpio/gpio-tegra.c
@@ -545,7 +545,7 @@ static int tegra_gpio_probe(struct platform_device *pdev)
tegra_gpio_chip.of_node = pdev->dev.of_node;
- ret = gpiochip_add_data(&tegra_gpio_chip, NULL);
+ ret = devm_gpiochip_add_data(&pdev->dev, &tegra_gpio_chip, NULL);
if (ret < 0) {
irq_domain_remove(irq_domain);
return ret;
diff --git a/drivers/gpio/gpio-timberdale.c b/drivers/gpio/gpio-timberdale.c
index a6de10c5275b..85ed608c2b27 100644
--- a/drivers/gpio/gpio-timberdale.c
+++ b/drivers/gpio/gpio-timberdale.c
@@ -237,12 +237,6 @@ static int timbgpio_probe(struct platform_device *pdev)
return -EINVAL;
}
- iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!iomem) {
- dev_err(dev, "Unable to get resource\n");
- return -EINVAL;
- }
-
tgpio = devm_kzalloc(dev, sizeof(struct timbgpio), GFP_KERNEL);
if (!tgpio) {
dev_err(dev, "Memory alloc failed\n");
@@ -252,17 +246,10 @@ static int timbgpio_probe(struct platform_device *pdev)
spin_lock_init(&tgpio->lock);
- if (!devm_request_mem_region(dev, iomem->start, resource_size(iomem),
- DRIVER_NAME)) {
- dev_err(dev, "Region already claimed\n");
- return -EBUSY;
- }
-
- tgpio->membase = devm_ioremap(dev, iomem->start, resource_size(iomem));
- if (!tgpio->membase) {
- dev_err(dev, "Cannot ioremap\n");
- return -ENOMEM;
- }
+ iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ tgpio->membase = devm_ioremap_resource(dev, iomem);
+ if (IS_ERR(tgpio->membase))
+ return PTR_ERR(tgpio->membase);
gc = &tgpio->gpio;
@@ -279,7 +266,7 @@ static int timbgpio_probe(struct platform_device *pdev)
gc->ngpio = pdata->nr_pins;
gc->can_sleep = false;
- err = gpiochip_add_data(gc, tgpio);
+ err = devm_gpiochip_add_data(&pdev->dev, gc, tgpio);
if (err)
return err;
@@ -320,8 +307,6 @@ static int timbgpio_remove(struct platform_device *pdev)
irq_set_handler_data(irq, NULL);
}
- gpiochip_remove(&tgpio->gpio);
-
return 0;
}
diff --git a/drivers/gpio/gpio-tpic2810.c b/drivers/gpio/gpio-tpic2810.c
new file mode 100644
index 000000000000..9f020aa4b067
--- /dev/null
+++ b/drivers/gpio/gpio-tpic2810.c
@@ -0,0 +1,170 @@
+/*
+ * Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com/
+ * Andrew F. Davis <afd@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether expressed or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License version 2 for more details.
+ */
+
+#include <linux/gpio/driver.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+
+#define TPIC2810_WS_COMMAND 0x44
+
+/**
+ * struct tpic2810 - GPIO driver data
+ * @chip: GPIO controller chip
+ * @client: I2C device pointer
+ * @buffer: Buffer for device register
+ * @lock: Protects write sequences
+ */
+struct tpic2810 {
+ struct gpio_chip chip;
+ struct i2c_client *client;
+ u8 buffer;
+ struct mutex lock;
+};
+
+static void tpic2810_set(struct gpio_chip *chip, unsigned offset, int value);
+
+static int tpic2810_get_direction(struct gpio_chip *chip,
+ unsigned offset)
+{
+ /* This device always output */
+ return 0;
+}
+
+static int tpic2810_direction_input(struct gpio_chip *chip,
+ unsigned offset)
+{
+ /* This device is output only */
+ return -EINVAL;
+}
+
+static int tpic2810_direction_output(struct gpio_chip *chip,
+ unsigned offset, int value)
+{
+ /* This device always output */
+ tpic2810_set(chip, offset, value);
+ return 0;
+}
+
+static void tpic2810_set(struct gpio_chip *chip, unsigned offset, int value)
+{
+ struct tpic2810 *gpio = gpiochip_get_data(chip);
+
+ mutex_lock(&gpio->lock);
+
+ if (value)
+ gpio->buffer |= BIT(offset);
+ else
+ gpio->buffer &= ~BIT(offset);
+
+ i2c_smbus_write_byte_data(gpio->client, TPIC2810_WS_COMMAND,
+ gpio->buffer);
+
+ mutex_unlock(&gpio->lock);
+}
+
+static void tpic2810_set_multiple(struct gpio_chip *chip, unsigned long *mask,
+ unsigned long *bits)
+{
+ struct tpic2810 *gpio = gpiochip_get_data(chip);
+
+ mutex_lock(&gpio->lock);
+
+ /* clear bits under mask */
+ gpio->buffer &= ~(*mask);
+ /* set bits under mask */
+ gpio->buffer |= ((*mask) & (*bits));
+
+ i2c_smbus_write_byte_data(gpio->client, TPIC2810_WS_COMMAND,
+ gpio->buffer);
+
+ mutex_unlock(&gpio->lock);
+}
+
+static struct gpio_chip template_chip = {
+ .label = "tpic2810",
+ .owner = THIS_MODULE,
+ .get_direction = tpic2810_get_direction,
+ .direction_input = tpic2810_direction_input,
+ .direction_output = tpic2810_direction_output,
+ .set = tpic2810_set,
+ .set_multiple = tpic2810_set_multiple,
+ .base = -1,
+ .ngpio = 8,
+ .can_sleep = true,
+};
+
+static const struct of_device_id tpic2810_of_match_table[] = {
+ { .compatible = "ti,tpic2810" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, tpic2810_of_match_table);
+
+static int tpic2810_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct tpic2810 *gpio;
+ int ret;
+
+ gpio = devm_kzalloc(&client->dev, sizeof(*gpio), GFP_KERNEL);
+ if (!gpio)
+ return -ENOMEM;
+
+ i2c_set_clientdata(client, gpio);
+
+ gpio->chip = template_chip;
+ gpio->chip.parent = &client->dev;
+
+ gpio->client = client;
+
+ mutex_init(&gpio->lock);
+
+ ret = gpiochip_add_data(&gpio->chip, gpio);
+ if (ret < 0) {
+ dev_err(&client->dev, "Unable to register gpiochip\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int tpic2810_remove(struct i2c_client *client)
+{
+ struct tpic2810 *gpio = i2c_get_clientdata(client);
+
+ gpiochip_remove(&gpio->chip);
+
+ return 0;
+}
+
+static const struct i2c_device_id tpic2810_id_table[] = {
+ { "tpic2810", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(i2c, tpic2810_id_table);
+
+static struct i2c_driver tpic2810_driver = {
+ .driver = {
+ .name = "tpic2810",
+ .of_match_table = tpic2810_of_match_table,
+ },
+ .probe = tpic2810_probe,
+ .remove = tpic2810_remove,
+ .id_table = tpic2810_id_table,
+};
+module_i2c_driver(tpic2810_driver);
+
+MODULE_AUTHOR("Andrew F. Davis <afd@ti.com>");
+MODULE_DESCRIPTION("TPIC2810 8-Bit LED Driver GPIO Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpio/gpio-tps65086.c b/drivers/gpio/gpio-tps65086.c
new file mode 100644
index 000000000000..8e25f01ac314
--- /dev/null
+++ b/drivers/gpio/gpio-tps65086.c
@@ -0,0 +1,139 @@
+/*
+ * Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com/
+ * Andrew F. Davis <afd@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether expressed or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License version 2 for more details.
+ *
+ * Based on the TPS65912 driver
+ */
+
+#include <linux/gpio.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include <linux/mfd/tps65086.h>
+
+struct tps65086_gpio {
+ struct gpio_chip chip;
+ struct tps65086 *tps;
+};
+
+static int tps65086_gpio_get_direction(struct gpio_chip *chip,
+ unsigned offset)
+{
+ /* This device is output only */
+ return 0;
+}
+
+static int tps65086_gpio_direction_input(struct gpio_chip *chip,
+ unsigned offset)
+{
+ /* This device is output only */
+ return -EINVAL;
+}
+
+static int tps65086_gpio_direction_output(struct gpio_chip *chip,
+ unsigned offset, int value)
+{
+ struct tps65086_gpio *gpio = gpiochip_get_data(chip);
+
+ /* Set the initial value */
+ regmap_update_bits(gpio->tps->regmap, TPS65086_GPOCTRL,
+ BIT(4 + offset), value ? BIT(4 + offset) : 0);
+
+ return 0;
+}
+
+static int tps65086_gpio_get(struct gpio_chip *chip, unsigned offset)
+{
+ struct tps65086_gpio *gpio = gpiochip_get_data(chip);
+ int ret, val;
+
+ ret = regmap_read(gpio->tps->regmap, TPS65086_GPOCTRL, &val);
+ if (ret < 0)
+ return ret;
+
+ return val & BIT(4 + offset);
+}
+
+static void tps65086_gpio_set(struct gpio_chip *chip, unsigned offset,
+ int value)
+{
+ struct tps65086_gpio *gpio = gpiochip_get_data(chip);
+
+ regmap_update_bits(gpio->tps->regmap, TPS65086_GPOCTRL,
+ BIT(4 + offset), value ? BIT(4 + offset) : 0);
+}
+
+static struct gpio_chip template_chip = {
+ .label = "tps65086-gpio",
+ .owner = THIS_MODULE,
+ .get_direction = tps65086_gpio_get_direction,
+ .direction_input = tps65086_gpio_direction_input,
+ .direction_output = tps65086_gpio_direction_output,
+ .get = tps65086_gpio_get,
+ .set = tps65086_gpio_set,
+ .base = -1,
+ .ngpio = 4,
+ .can_sleep = true,
+};
+
+static int tps65086_gpio_probe(struct platform_device *pdev)
+{
+ struct tps65086_gpio *gpio;
+ int ret;
+
+ gpio = devm_kzalloc(&pdev->dev, sizeof(*gpio), GFP_KERNEL);
+ if (!gpio)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, gpio);
+
+ gpio->tps = dev_get_drvdata(pdev->dev.parent);
+ gpio->chip = template_chip;
+ gpio->chip.parent = gpio->tps->dev;
+
+ ret = gpiochip_add_data(&gpio->chip, gpio);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Could not register gpiochip, %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int tps65086_gpio_remove(struct platform_device *pdev)
+{
+ struct tps65086_gpio *gpio = platform_get_drvdata(pdev);
+
+ gpiochip_remove(&gpio->chip);
+
+ return 0;
+}
+
+static const struct platform_device_id tps65086_gpio_id_table[] = {
+ { "tps65086-gpio", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(platform, tps65086_gpio_id_table);
+
+static struct platform_driver tps65086_gpio_driver = {
+ .driver = {
+ .name = "tps65086-gpio",
+ },
+ .probe = tps65086_gpio_probe,
+ .remove = tps65086_gpio_remove,
+ .id_table = tps65086_gpio_id_table,
+};
+module_platform_driver(tps65086_gpio_driver);
+
+MODULE_AUTHOR("Andrew F. Davis <afd@ti.com>");
+MODULE_DESCRIPTION("TPS65086 GPIO driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpio/gpio-tps65218.c b/drivers/gpio/gpio-tps65218.c
new file mode 100644
index 000000000000..313c0e484607
--- /dev/null
+++ b/drivers/gpio/gpio-tps65218.c
@@ -0,0 +1,222 @@
+/*
+ * Copyright 2015 Verifone Int.
+ *
+ * Author: Nicolas Saenz Julienne <nicolassaenzj@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify i t
+ * under the terms of the GNU General Public License as published by th e
+ * Free Software Foundation; either version 2 of the License, or (at you r
+ * option) any later version.
+ *
+ * This driver is based on the gpio-tps65912 implementation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/gpio/driver.h>
+#include <linux/platform_device.h>
+#include <linux/mfd/tps65218.h>
+
+struct tps65218_gpio {
+ struct tps65218 *tps65218;
+ struct gpio_chip gpio_chip;
+};
+
+static int tps65218_gpio_get(struct gpio_chip *gc, unsigned offset)
+{
+ struct tps65218_gpio *tps65218_gpio = gpiochip_get_data(gc);
+ struct tps65218 *tps65218 = tps65218_gpio->tps65218;
+ unsigned int val;
+ int ret;
+
+ ret = tps65218_reg_read(tps65218, TPS65218_REG_ENABLE2, &val);
+ if (ret)
+ return ret;
+
+ return !!(val & (TPS65218_ENABLE2_GPIO1 << offset));
+}
+
+static void tps65218_gpio_set(struct gpio_chip *gc, unsigned offset,
+ int value)
+{
+ struct tps65218_gpio *tps65218_gpio = gpiochip_get_data(gc);
+ struct tps65218 *tps65218 = tps65218_gpio->tps65218;
+
+ if (value)
+ tps65218_set_bits(tps65218, TPS65218_REG_ENABLE2,
+ TPS65218_ENABLE2_GPIO1 << offset,
+ TPS65218_ENABLE2_GPIO1 << offset,
+ TPS65218_PROTECT_L1);
+ else
+ tps65218_clear_bits(tps65218, TPS65218_REG_ENABLE2,
+ TPS65218_ENABLE2_GPIO1 << offset,
+ TPS65218_PROTECT_L1);
+}
+
+static int tps65218_gpio_output(struct gpio_chip *gc, unsigned offset,
+ int value)
+{
+ /* Only drives GPOs */
+ tps65218_gpio_set(gc, offset, value);
+ return 0;
+}
+
+static int tps65218_gpio_input(struct gpio_chip *gc, unsigned offset)
+{
+ return -EPERM;
+}
+
+static int tps65218_gpio_request(struct gpio_chip *gc, unsigned offset)
+{
+ struct tps65218_gpio *tps65218_gpio = gpiochip_get_data(gc);
+ struct tps65218 *tps65218 = tps65218_gpio->tps65218;
+ int ret;
+
+ if (gpiochip_line_is_open_source(gc, offset)) {
+ dev_err(gc->parent, "can't work as open source\n");
+ return -EINVAL;
+ }
+
+ switch (offset) {
+ case 0:
+ if (!gpiochip_line_is_open_drain(gc, offset)) {
+ dev_err(gc->parent, "GPO1 works only as open drain\n");
+ return -EINVAL;
+ }
+
+ /* Disable sequencer for GPO1 */
+ ret = tps65218_clear_bits(tps65218, TPS65218_REG_SEQ7,
+ TPS65218_SEQ7_GPO1_SEQ_MASK,
+ TPS65218_PROTECT_L1);
+ if (ret)
+ return ret;
+
+ /* Setup GPO1 */
+ ret = tps65218_clear_bits(tps65218, TPS65218_REG_CONFIG1,
+ TPS65218_CONFIG1_IO1_SEL,
+ TPS65218_PROTECT_L1);
+ if (ret)
+ return ret;
+
+ break;
+ case 1:
+ /* GP02 is push-pull by default, can be set as open drain. */
+ if (gpiochip_line_is_open_drain(gc, offset)) {
+ ret = tps65218_clear_bits(tps65218,
+ TPS65218_REG_CONFIG1,
+ TPS65218_CONFIG1_GPO2_BUF,
+ TPS65218_PROTECT_L1);
+ if (ret)
+ return ret;
+ }
+
+ /* Setup GPO2 */
+ ret = tps65218_clear_bits(tps65218, TPS65218_REG_CONFIG1,
+ TPS65218_CONFIG1_IO1_SEL,
+ TPS65218_PROTECT_L1);
+ if (ret)
+ return ret;
+
+ break;
+
+ case 2:
+ if (!gpiochip_line_is_open_drain(gc, offset)) {
+ dev_err(gc->parent, "GPO3 works only as open drain\n");
+ return -EINVAL;
+ }
+
+ /* Disable sequencer for GPO3 */
+ ret = tps65218_clear_bits(tps65218, TPS65218_REG_SEQ7,
+ TPS65218_SEQ7_GPO3_SEQ_MASK,
+ TPS65218_PROTECT_L1);
+ if (ret)
+ return ret;
+
+ /* Setup GPO3 */
+ ret = tps65218_clear_bits(tps65218, TPS65218_REG_CONFIG2,
+ TPS65218_CONFIG2_DC12_RST,
+ TPS65218_PROTECT_L1);
+ if (ret)
+ return ret;
+
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static struct gpio_chip template_chip = {
+ .label = "gpio-tps65218",
+ .owner = THIS_MODULE,
+ .request = tps65218_gpio_request,
+ .direction_output = tps65218_gpio_output,
+ .direction_input = tps65218_gpio_input,
+ .get = tps65218_gpio_get,
+ .set = tps65218_gpio_set,
+ .can_sleep = true,
+ .ngpio = 3,
+ .base = -1,
+};
+
+static int tps65218_gpio_probe(struct platform_device *pdev)
+{
+ struct tps65218 *tps65218 = dev_get_drvdata(pdev->dev.parent);
+ struct tps65218_gpio *tps65218_gpio;
+ int ret;
+
+ tps65218_gpio = devm_kzalloc(&pdev->dev, sizeof(*tps65218_gpio),
+ GFP_KERNEL);
+ if (!tps65218_gpio)
+ return -ENOMEM;
+
+ tps65218_gpio->tps65218 = tps65218;
+ tps65218_gpio->gpio_chip = template_chip;
+ tps65218_gpio->gpio_chip.parent = &pdev->dev;
+#ifdef CONFIG_OF_GPIO
+ tps65218_gpio->gpio_chip.of_node = pdev->dev.of_node;
+#endif
+
+ ret = gpiochip_add_data(&tps65218_gpio->gpio_chip, tps65218_gpio);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to register gpiochip, %d\n", ret);
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, tps65218_gpio);
+
+ return ret;
+}
+
+static int tps65218_gpio_remove(struct platform_device *pdev)
+{
+ struct tps65218_gpio *tps65218_gpio = platform_get_drvdata(pdev);
+
+ gpiochip_remove(&tps65218_gpio->gpio_chip);
+
+ return 0;
+}
+
+static const struct of_device_id tps65218_dt_match[] = {
+ { .compatible = "ti,tps65218-gpio" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, tps65218_dt_match);
+
+static struct platform_driver tps65218_gpio_driver = {
+ .driver = {
+ .name = "tps65218-gpio",
+ .of_match_table = of_match_ptr(tps65218_dt_match)
+ },
+ .probe = tps65218_gpio_probe,
+ .remove = tps65218_gpio_remove,
+};
+
+module_platform_driver(tps65218_gpio_driver);
+
+MODULE_AUTHOR("Nicolas Saenz Julienne <nicolassaenzj@gmail.com>");
+MODULE_DESCRIPTION("GPO interface for TPS65218 PMICs");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:tps65218-gpio");
diff --git a/drivers/gpio/gpio-tps6586x.c b/drivers/gpio/gpio-tps6586x.c
index 87de5486a29e..c88bdc8ee2c9 100644
--- a/drivers/gpio/gpio-tps6586x.c
+++ b/drivers/gpio/gpio-tps6586x.c
@@ -117,7 +117,8 @@ static int tps6586x_gpio_probe(struct platform_device *pdev)
else
tps6586x_gpio->gpio_chip.base = -1;
- ret = gpiochip_add_data(&tps6586x_gpio->gpio_chip, tps6586x_gpio);
+ ret = devm_gpiochip_add_data(&pdev->dev, &tps6586x_gpio->gpio_chip,
+ tps6586x_gpio);
if (ret < 0) {
dev_err(&pdev->dev, "Could not register gpiochip, %d\n", ret);
return ret;
@@ -128,19 +129,10 @@ static int tps6586x_gpio_probe(struct platform_device *pdev)
return ret;
}
-static int tps6586x_gpio_remove(struct platform_device *pdev)
-{
- struct tps6586x_gpio *tps6586x_gpio = platform_get_drvdata(pdev);
-
- gpiochip_remove(&tps6586x_gpio->gpio_chip);
- return 0;
-}
-
static struct platform_driver tps6586x_gpio_driver = {
.driver.name = "tps6586x-gpio",
.driver.owner = THIS_MODULE,
.probe = tps6586x_gpio_probe,
- .remove = tps6586x_gpio_remove,
};
static int __init tps6586x_gpio_init(void)
diff --git a/drivers/gpio/gpio-tps65910.c b/drivers/gpio/gpio-tps65910.c
index e81eee7627a3..cdbd7c740043 100644
--- a/drivers/gpio/gpio-tps65910.c
+++ b/drivers/gpio/gpio-tps65910.c
@@ -170,7 +170,8 @@ static int tps65910_gpio_probe(struct platform_device *pdev)
}
skip_init:
- ret = gpiochip_add_data(&tps65910_gpio->gpio_chip, tps65910_gpio);
+ ret = devm_gpiochip_add_data(&pdev->dev, &tps65910_gpio->gpio_chip,
+ tps65910_gpio);
if (ret < 0) {
dev_err(&pdev->dev, "Could not register gpiochip, %d\n", ret);
return ret;
@@ -181,19 +182,10 @@ skip_init:
return ret;
}
-static int tps65910_gpio_remove(struct platform_device *pdev)
-{
- struct tps65910_gpio *tps65910_gpio = platform_get_drvdata(pdev);
-
- gpiochip_remove(&tps65910_gpio->gpio_chip);
- return 0;
-}
-
static struct platform_driver tps65910_gpio_driver = {
.driver.name = "tps65910-gpio",
.driver.owner = THIS_MODULE,
.probe = tps65910_gpio_probe,
- .remove = tps65910_gpio_remove,
};
static int __init tps65910_gpio_init(void)
diff --git a/drivers/gpio/gpio-tps65912.c b/drivers/gpio/gpio-tps65912.c
index 4f2029c7da3a..acfd30a13a56 100644
--- a/drivers/gpio/gpio-tps65912.c
+++ b/drivers/gpio/gpio-tps65912.c
@@ -1,151 +1,149 @@
/*
- * Copyright 2011 Texas Instruments Inc.
+ * GPIO driver for TI TPS65912x PMICs
*
- * Author: Margarita Olaya <magi@slimlogic.co.uk>
+ * Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com/
+ * Andrew F. Davis <afd@ti.com>
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
*
- * This driver is based on wm8350 implementation.
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether expressed or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License version 2 for more details.
+ *
+ * Based on the Arizona GPIO driver and the previous TPS65912 driver by
+ * Margarita Olaya Cabrera <magi@slimlogic.co.uk>
*/
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/errno.h>
#include <linux/gpio.h>
-#include <linux/mfd/core.h>
+#include <linux/module.h>
#include <linux/platform_device.h>
-#include <linux/seq_file.h>
-#include <linux/slab.h>
+
#include <linux/mfd/tps65912.h>
-struct tps65912_gpio_data {
- struct tps65912 *tps65912;
+struct tps65912_gpio {
struct gpio_chip gpio_chip;
+ struct tps65912 *tps;
};
-static int tps65912_gpio_get(struct gpio_chip *gc, unsigned offset)
+static int tps65912_gpio_get_direction(struct gpio_chip *gc,
+ unsigned offset)
{
- struct tps65912_gpio_data *tps65912_gpio = gpiochip_get_data(gc);
- struct tps65912 *tps65912 = tps65912_gpio->tps65912;
- int val;
+ struct tps65912_gpio *gpio = gpiochip_get_data(gc);
- val = tps65912_reg_read(tps65912, TPS65912_GPIO1 + offset);
+ int ret, val;
- if (val & GPIO_STS_MASK)
- return 1;
+ ret = regmap_read(gpio->tps->regmap, TPS65912_GPIO1 + offset, &val);
+ if (ret)
+ return ret;
- return 0;
+ if (val & GPIO_CFG_MASK)
+ return GPIOF_DIR_OUT;
+ else
+ return GPIOF_DIR_IN;
}
-static void tps65912_gpio_set(struct gpio_chip *gc, unsigned offset,
- int value)
+static int tps65912_gpio_direction_input(struct gpio_chip *gc, unsigned offset)
{
- struct tps65912_gpio_data *tps65912_gpio = gpiochip_get_data(gc);
- struct tps65912 *tps65912 = tps65912_gpio->tps65912;
+ struct tps65912_gpio *gpio = gpiochip_get_data(gc);
- if (value)
- tps65912_set_bits(tps65912, TPS65912_GPIO1 + offset,
- GPIO_SET_MASK);
- else
- tps65912_clear_bits(tps65912, TPS65912_GPIO1 + offset,
- GPIO_SET_MASK);
+ return regmap_update_bits(gpio->tps->regmap, TPS65912_GPIO1 + offset,
+ GPIO_CFG_MASK, 0);
}
-static int tps65912_gpio_output(struct gpio_chip *gc, unsigned offset,
- int value)
+static int tps65912_gpio_direction_output(struct gpio_chip *gc,
+ unsigned offset, int value)
{
- struct tps65912_gpio_data *tps65912_gpio = gpiochip_get_data(gc);
- struct tps65912 *tps65912 = tps65912_gpio->tps65912;
+ struct tps65912_gpio *gpio = gpiochip_get_data(gc);
/* Set the initial value */
- tps65912_gpio_set(gc, offset, value);
+ regmap_update_bits(gpio->tps->regmap, TPS65912_GPIO1 + offset,
+ GPIO_SET_MASK, value ? GPIO_SET_MASK : 0);
+
+ return regmap_update_bits(gpio->tps->regmap, TPS65912_GPIO1 + offset,
+ GPIO_CFG_MASK, GPIO_CFG_MASK);
+}
+
+static int tps65912_gpio_get(struct gpio_chip *gc, unsigned offset)
+{
+ struct tps65912_gpio *gpio = gpiochip_get_data(gc);
+ int ret, val;
+
+ ret = regmap_read(gpio->tps->regmap, TPS65912_GPIO1 + offset, &val);
+ if (ret)
+ return ret;
+
+ if (val & GPIO_STS_MASK)
+ return 1;
- return tps65912_set_bits(tps65912, TPS65912_GPIO1 + offset,
- GPIO_CFG_MASK);
+ return 0;
}
-static int tps65912_gpio_input(struct gpio_chip *gc, unsigned offset)
+static void tps65912_gpio_set(struct gpio_chip *gc, unsigned offset,
+ int value)
{
- struct tps65912_gpio_data *tps65912_gpio = gpiochip_get_data(gc);
- struct tps65912 *tps65912 = tps65912_gpio->tps65912;
+ struct tps65912_gpio *gpio = gpiochip_get_data(gc);
- return tps65912_clear_bits(tps65912, TPS65912_GPIO1 + offset,
- GPIO_CFG_MASK);
+ regmap_update_bits(gpio->tps->regmap, TPS65912_GPIO1 + offset,
+ GPIO_SET_MASK, value ? GPIO_SET_MASK : 0);
}
static struct gpio_chip template_chip = {
- .label = "tps65912",
+ .label = "tps65912-gpio",
.owner = THIS_MODULE,
- .direction_input = tps65912_gpio_input,
- .direction_output = tps65912_gpio_output,
+ .get_direction = tps65912_gpio_get_direction,
+ .direction_input = tps65912_gpio_direction_input,
+ .direction_output = tps65912_gpio_direction_output,
.get = tps65912_gpio_get,
.set = tps65912_gpio_set,
- .can_sleep = true,
- .ngpio = 5,
.base = -1,
+ .ngpio = 5,
+ .can_sleep = true,
};
static int tps65912_gpio_probe(struct platform_device *pdev)
{
- struct tps65912 *tps65912 = dev_get_drvdata(pdev->dev.parent);
- struct tps65912_board *pdata = dev_get_platdata(tps65912->dev);
- struct tps65912_gpio_data *tps65912_gpio;
+ struct tps65912 *tps = dev_get_drvdata(pdev->dev.parent);
+ struct tps65912_gpio *gpio;
int ret;
- tps65912_gpio = devm_kzalloc(&pdev->dev, sizeof(*tps65912_gpio),
- GFP_KERNEL);
- if (tps65912_gpio == NULL)
+ gpio = devm_kzalloc(&pdev->dev, sizeof(*gpio), GFP_KERNEL);
+ if (!gpio)
return -ENOMEM;
- tps65912_gpio->tps65912 = tps65912;
- tps65912_gpio->gpio_chip = template_chip;
- tps65912_gpio->gpio_chip.parent = &pdev->dev;
- if (pdata && pdata->gpio_base)
- tps65912_gpio->gpio_chip.base = pdata->gpio_base;
+ gpio->tps = dev_get_drvdata(pdev->dev.parent);
+ gpio->gpio_chip = template_chip;
+ gpio->gpio_chip.parent = tps->dev;
- ret = gpiochip_add_data(&tps65912_gpio->gpio_chip, tps65912_gpio);
+ ret = devm_gpiochip_add_data(&pdev->dev, &gpio->gpio_chip,
+ gpio);
if (ret < 0) {
- dev_err(&pdev->dev, "Failed to register gpiochip, %d\n", ret);
+ dev_err(&pdev->dev, "Could not register gpiochip, %d\n", ret);
return ret;
}
- platform_set_drvdata(pdev, tps65912_gpio);
-
- return ret;
-}
+ platform_set_drvdata(pdev, gpio);
-static int tps65912_gpio_remove(struct platform_device *pdev)
-{
- struct tps65912_gpio_data *tps65912_gpio = platform_get_drvdata(pdev);
-
- gpiochip_remove(&tps65912_gpio->gpio_chip);
return 0;
}
+static const struct platform_device_id tps65912_gpio_id_table[] = {
+ { "tps65912-gpio", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(platform, tps65912_gpio_id_table);
+
static struct platform_driver tps65912_gpio_driver = {
.driver = {
.name = "tps65912-gpio",
},
.probe = tps65912_gpio_probe,
- .remove = tps65912_gpio_remove,
+ .id_table = tps65912_gpio_id_table,
};
+module_platform_driver(tps65912_gpio_driver);
-static int __init tps65912_gpio_init(void)
-{
- return platform_driver_register(&tps65912_gpio_driver);
-}
-subsys_initcall(tps65912_gpio_init);
-
-static void __exit tps65912_gpio_exit(void)
-{
- platform_driver_unregister(&tps65912_gpio_driver);
-}
-module_exit(tps65912_gpio_exit);
-
-MODULE_AUTHOR("Margarita Olaya Cabrera <magi@slimlogic.co.uk>");
-MODULE_DESCRIPTION("GPIO interface for TPS65912 PMICs");
+MODULE_AUTHOR("Andrew F. Davis <afd@ti.com>");
+MODULE_DESCRIPTION("TPS65912 GPIO driver");
MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("platform:tps65912-gpio");
diff --git a/drivers/gpio/gpio-ts4800.c b/drivers/gpio/gpio-ts4800.c
new file mode 100644
index 000000000000..0c144a72f9af
--- /dev/null
+++ b/drivers/gpio/gpio-ts4800.c
@@ -0,0 +1,81 @@
+/*
+ * GPIO driver for the TS-4800 board
+ *
+ * Copyright (c) 2016 - Savoir-faire Linux
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/gpio/driver.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#define DEFAULT_PIN_NUMBER 16
+#define INPUT_REG_OFFSET 0x00
+#define OUTPUT_REG_OFFSET 0x02
+#define DIRECTION_REG_OFFSET 0x04
+
+static int ts4800_gpio_probe(struct platform_device *pdev)
+{
+ struct device_node *node;
+ struct gpio_chip *chip;
+ struct resource *res;
+ void __iomem *base_addr;
+ int retval;
+ u32 ngpios;
+
+ chip = devm_kzalloc(&pdev->dev, sizeof(struct gpio_chip), GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ base_addr = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(base_addr))
+ return PTR_ERR(base_addr);
+
+ node = pdev->dev.of_node;
+ if (!node)
+ return -EINVAL;
+
+ retval = of_property_read_u32(node, "ngpios", &ngpios);
+ if (retval == -EINVAL)
+ ngpios = DEFAULT_PIN_NUMBER;
+ else if (retval)
+ return retval;
+
+ retval = bgpio_init(chip, &pdev->dev, 2, base_addr + INPUT_REG_OFFSET,
+ base_addr + OUTPUT_REG_OFFSET, NULL,
+ base_addr + DIRECTION_REG_OFFSET, NULL, 0);
+ if (retval) {
+ dev_err(&pdev->dev, "bgpio_init failed\n");
+ return retval;
+ }
+
+ chip->ngpio = ngpios;
+
+ platform_set_drvdata(pdev, chip);
+
+ return devm_gpiochip_add_data(&pdev->dev, chip, NULL);
+}
+
+static const struct of_device_id ts4800_gpio_of_match[] = {
+ { .compatible = "technologic,ts4800-gpio", },
+ {},
+};
+
+static struct platform_driver ts4800_gpio_driver = {
+ .driver = {
+ .name = "ts4800-gpio",
+ .of_match_table = ts4800_gpio_of_match,
+ },
+ .probe = ts4800_gpio_probe,
+};
+
+module_platform_driver_probe(ts4800_gpio_driver, ts4800_gpio_probe);
+
+MODULE_AUTHOR("Julien Grossholtz <julien.grossholtz@savoirfairelinux.com>");
+MODULE_DESCRIPTION("TS4800 FPGA GPIO driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpio/gpio-ts5500.c b/drivers/gpio/gpio-ts5500.c
index 5f945083f9d8..6cfeba07f882 100644
--- a/drivers/gpio/gpio-ts5500.c
+++ b/drivers/gpio/gpio-ts5500.c
@@ -409,7 +409,7 @@ static int ts5500_dio_probe(struct platform_device *pdev)
break;
}
- ret = gpiochip_add_data(&priv->gpio_chip, priv);
+ ret = devm_gpiochip_add_data(dev, &priv->gpio_chip, priv);
if (ret) {
dev_err(dev, "failed to register the gpio chip\n");
return ret;
@@ -418,13 +418,10 @@ static int ts5500_dio_probe(struct platform_device *pdev)
ret = ts5500_enable_irq(priv);
if (ret) {
dev_err(dev, "invalid interrupt %d\n", priv->hwirq);
- goto cleanup;
+ return ret;
}
return 0;
-cleanup:
- gpiochip_remove(&priv->gpio_chip);
- return ret;
}
static int ts5500_dio_remove(struct platform_device *pdev)
@@ -432,7 +429,7 @@ static int ts5500_dio_remove(struct platform_device *pdev)
struct ts5500_priv *priv = platform_get_drvdata(pdev);
ts5500_disable_irq(priv);
- gpiochip_remove(&priv->gpio_chip);
+
return 0;
}
diff --git a/drivers/gpio/gpio-twl6040.c b/drivers/gpio/gpio-twl6040.c
index 8e9e9853f3bd..b780314cdfc9 100644
--- a/drivers/gpio/gpio-twl6040.c
+++ b/drivers/gpio/gpio-twl6040.c
@@ -100,7 +100,7 @@ static int gpo_twl6040_probe(struct platform_device *pdev)
twl6040gpo_chip.of_node = twl6040_core_dev->of_node;
#endif
- ret = gpiochip_add_data(&twl6040gpo_chip, NULL);
+ ret = devm_gpiochip_add_data(&pdev->dev, &twl6040gpo_chip, NULL);
if (ret < 0) {
dev_err(&pdev->dev, "could not register gpiochip, %d\n", ret);
twl6040gpo_chip.ngpio = 0;
@@ -109,12 +109,6 @@ static int gpo_twl6040_probe(struct platform_device *pdev)
return ret;
}
-static int gpo_twl6040_remove(struct platform_device *pdev)
-{
- gpiochip_remove(&twl6040gpo_chip);
- return 0;
-}
-
/* Note: this hardware lives inside an I2C-based multi-function device. */
MODULE_ALIAS("platform:twl6040-gpo");
@@ -123,7 +117,6 @@ static struct platform_driver gpo_twl6040_driver = {
.name = "twl6040-gpo",
},
.probe = gpo_twl6040_probe,
- .remove = gpo_twl6040_remove,
};
module_platform_driver(gpo_twl6040_driver);
diff --git a/drivers/gpio/gpio-ucb1400.c b/drivers/gpio/gpio-ucb1400.c
index 2c5cd46bfa6e..5dbe31bf6699 100644
--- a/drivers/gpio/gpio-ucb1400.c
+++ b/drivers/gpio/gpio-ucb1400.c
@@ -67,7 +67,7 @@ static int ucb1400_gpio_probe(struct platform_device *dev)
ucb->gc.set = ucb1400_gpio_set;
ucb->gc.can_sleep = true;
- err = gpiochip_add_data(&ucb->gc, ucb);
+ err = devm_gpiochip_add_data(&dev->dev, &ucb->gc, ucb);
if (err)
goto err;
@@ -90,7 +90,6 @@ static int ucb1400_gpio_remove(struct platform_device *dev)
return err;
}
- gpiochip_remove(&ucb->gc);
return err;
}
diff --git a/drivers/gpio/gpio-viperboard.c b/drivers/gpio/gpio-viperboard.c
index 1170b035cb92..dec47aafd5cd 100644
--- a/drivers/gpio/gpio-viperboard.c
+++ b/drivers/gpio/gpio-viperboard.c
@@ -410,10 +410,10 @@ static int vprbrd_gpio_probe(struct platform_device *pdev)
vb_gpio->gpioa.get = vprbrd_gpioa_get;
vb_gpio->gpioa.direction_input = vprbrd_gpioa_direction_input;
vb_gpio->gpioa.direction_output = vprbrd_gpioa_direction_output;
- ret = gpiochip_add_data(&vb_gpio->gpioa, vb_gpio);
+ ret = devm_gpiochip_add_data(&pdev->dev, &vb_gpio->gpioa, vb_gpio);
if (ret < 0) {
dev_err(vb_gpio->gpioa.parent, "could not add gpio a");
- goto err_gpioa;
+ return ret;
}
/* registering gpio b */
@@ -427,37 +427,21 @@ static int vprbrd_gpio_probe(struct platform_device *pdev)
vb_gpio->gpiob.get = vprbrd_gpiob_get;
vb_gpio->gpiob.direction_input = vprbrd_gpiob_direction_input;
vb_gpio->gpiob.direction_output = vprbrd_gpiob_direction_output;
- ret = gpiochip_add_data(&vb_gpio->gpiob, vb_gpio);
+ ret = devm_gpiochip_add_data(&pdev->dev, &vb_gpio->gpiob, vb_gpio);
if (ret < 0) {
dev_err(vb_gpio->gpiob.parent, "could not add gpio b");
- goto err_gpiob;
+ return ret;
}
platform_set_drvdata(pdev, vb_gpio);
return ret;
-
-err_gpiob:
- gpiochip_remove(&vb_gpio->gpioa);
-
-err_gpioa:
- return ret;
-}
-
-static int vprbrd_gpio_remove(struct platform_device *pdev)
-{
- struct vprbrd_gpio *vb_gpio = platform_get_drvdata(pdev);
-
- gpiochip_remove(&vb_gpio->gpiob);
-
- return 0;
}
static struct platform_driver vprbrd_gpio_driver = {
.driver.name = "viperboard-gpio",
.driver.owner = THIS_MODULE,
.probe = vprbrd_gpio_probe,
- .remove = vprbrd_gpio_remove,
};
static int __init vprbrd_gpio_init(void)
diff --git a/drivers/gpio/gpio-vx855.c b/drivers/gpio/gpio-vx855.c
index 764999cc0794..8cdb9f7ec7e0 100644
--- a/drivers/gpio/gpio-vx855.c
+++ b/drivers/gpio/gpio-vx855.c
@@ -259,16 +259,7 @@ static int vx855gpio_probe(struct platform_device *pdev)
vx855gpio_gpio_setup(vg);
- return gpiochip_add_data(&vg->gpio, vg);
-}
-
-static int vx855gpio_remove(struct platform_device *pdev)
-{
- struct vx855_gpio *vg = platform_get_drvdata(pdev);
-
- gpiochip_remove(&vg->gpio);
-
- return 0;
+ return devm_gpiochip_add_data(&pdev->dev, &vg->gpio, vg);
}
static struct platform_driver vx855gpio_driver = {
@@ -276,7 +267,6 @@ static struct platform_driver vx855gpio_driver = {
.name = MODULE_NAME,
},
.probe = vx855gpio_probe,
- .remove = vx855gpio_remove,
};
module_platform_driver(vx855gpio_driver);
diff --git a/drivers/gpio/gpio-wm831x.c b/drivers/gpio/gpio-wm831x.c
index 98390070fb64..18cb0f534b91 100644
--- a/drivers/gpio/gpio-wm831x.c
+++ b/drivers/gpio/gpio-wm831x.c
@@ -259,7 +259,8 @@ static int wm831x_gpio_probe(struct platform_device *pdev)
else
wm831x_gpio->gpio_chip.base = -1;
- ret = gpiochip_add_data(&wm831x_gpio->gpio_chip, wm831x_gpio);
+ ret = devm_gpiochip_add_data(&pdev->dev, &wm831x_gpio->gpio_chip,
+ wm831x_gpio);
if (ret < 0) {
dev_err(&pdev->dev, "Could not register gpiochip, %d\n", ret);
return ret;
@@ -270,19 +271,10 @@ static int wm831x_gpio_probe(struct platform_device *pdev)
return ret;
}
-static int wm831x_gpio_remove(struct platform_device *pdev)
-{
- struct wm831x_gpio *wm831x_gpio = platform_get_drvdata(pdev);
-
- gpiochip_remove(&wm831x_gpio->gpio_chip);
- return 0;
-}
-
static struct platform_driver wm831x_gpio_driver = {
.driver.name = "wm831x-gpio",
.driver.owner = THIS_MODULE,
.probe = wm831x_gpio_probe,
- .remove = wm831x_gpio_remove,
};
static int __init wm831x_gpio_init(void)
diff --git a/drivers/gpio/gpio-wm8350.c b/drivers/gpio/gpio-wm8350.c
index 0a306b4baa73..07d45a3b205a 100644
--- a/drivers/gpio/gpio-wm8350.c
+++ b/drivers/gpio/gpio-wm8350.c
@@ -125,7 +125,8 @@ static int wm8350_gpio_probe(struct platform_device *pdev)
else
wm8350_gpio->gpio_chip.base = -1;
- ret = gpiochip_add_data(&wm8350_gpio->gpio_chip, wm8350_gpio);
+ ret = devm_gpiochip_add_data(&pdev->dev, &wm8350_gpio->gpio_chip,
+ wm8350_gpio);
if (ret < 0) {
dev_err(&pdev->dev, "Could not register gpiochip, %d\n", ret);
return ret;
@@ -136,19 +137,10 @@ static int wm8350_gpio_probe(struct platform_device *pdev)
return ret;
}
-static int wm8350_gpio_remove(struct platform_device *pdev)
-{
- struct wm8350_gpio_data *wm8350_gpio = platform_get_drvdata(pdev);
-
- gpiochip_remove(&wm8350_gpio->gpio_chip);
- return 0;
-}
-
static struct platform_driver wm8350_gpio_driver = {
.driver.name = "wm8350-gpio",
.driver.owner = THIS_MODULE,
.probe = wm8350_gpio_probe,
- .remove = wm8350_gpio_remove,
};
static int __init wm8350_gpio_init(void)
diff --git a/drivers/gpio/gpio-wm8994.c b/drivers/gpio/gpio-wm8994.c
index 3ae4c1597494..b089df99a0d0 100644
--- a/drivers/gpio/gpio-wm8994.c
+++ b/drivers/gpio/gpio-wm8994.c
@@ -261,34 +261,23 @@ static int wm8994_gpio_probe(struct platform_device *pdev)
else
wm8994_gpio->gpio_chip.base = -1;
- ret = gpiochip_add_data(&wm8994_gpio->gpio_chip, wm8994_gpio);
+ ret = devm_gpiochip_add_data(&pdev->dev, &wm8994_gpio->gpio_chip,
+ wm8994_gpio);
if (ret < 0) {
dev_err(&pdev->dev, "Could not register gpiochip, %d\n",
ret);
- goto err;
+ return ret;
}
platform_set_drvdata(pdev, wm8994_gpio);
return ret;
-
-err:
- return ret;
-}
-
-static int wm8994_gpio_remove(struct platform_device *pdev)
-{
- struct wm8994_gpio *wm8994_gpio = platform_get_drvdata(pdev);
-
- gpiochip_remove(&wm8994_gpio->gpio_chip);
- return 0;
}
static struct platform_driver wm8994_gpio_driver = {
.driver.name = "wm8994-gpio",
.driver.owner = THIS_MODULE,
.probe = wm8994_gpio_probe,
- .remove = wm8994_gpio_remove,
};
static int __init wm8994_gpio_init(void)
diff --git a/drivers/gpio/gpio-ws16c48.c b/drivers/gpio/gpio-ws16c48.c
new file mode 100644
index 000000000000..51f41e8fd21e
--- /dev/null
+++ b/drivers/gpio/gpio-ws16c48.c
@@ -0,0 +1,427 @@
+/*
+ * GPIO driver for the WinSystems WS16C48
+ * Copyright (C) 2016 William Breathitt Gray
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+#include <linux/bitops.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/gpio/driver.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/interrupt.h>
+#include <linux/irqdesc.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+
+static unsigned ws16c48_base;
+module_param(ws16c48_base, uint, 0);
+MODULE_PARM_DESC(ws16c48_base, "WinSystems WS16C48 base address");
+static unsigned ws16c48_irq;
+module_param(ws16c48_irq, uint, 0);
+MODULE_PARM_DESC(ws16c48_irq, "WinSystems WS16C48 interrupt line number");
+
+/**
+ * struct ws16c48_gpio - GPIO device private data structure
+ * @chip: instance of the gpio_chip
+ * @io_state: bit I/O state (whether bit is set to input or output)
+ * @out_state: output bits state
+ * @lock: synchronization lock to prevent I/O race conditions
+ * @irq_mask: I/O bits affected by interrupts
+ * @flow_mask: IRQ flow type mask for the respective I/O bits
+ * @base: base port address of the GPIO device
+ * @irq: Interrupt line number
+ */
+struct ws16c48_gpio {
+ struct gpio_chip chip;
+ unsigned char io_state[6];
+ unsigned char out_state[6];
+ spinlock_t lock;
+ unsigned long irq_mask;
+ unsigned long flow_mask;
+ unsigned base;
+ unsigned irq;
+};
+
+static int ws16c48_gpio_get_direction(struct gpio_chip *chip, unsigned offset)
+{
+ struct ws16c48_gpio *const ws16c48gpio = gpiochip_get_data(chip);
+ const unsigned port = offset / 8;
+ const unsigned mask = BIT(offset % 8);
+
+ return !!(ws16c48gpio->io_state[port] & mask);
+}
+
+static int ws16c48_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
+{
+ struct ws16c48_gpio *const ws16c48gpio = gpiochip_get_data(chip);
+ const unsigned port = offset / 8;
+ const unsigned mask = BIT(offset % 8);
+ unsigned long flags;
+
+ spin_lock_irqsave(&ws16c48gpio->lock, flags);
+
+ ws16c48gpio->io_state[port] |= mask;
+ ws16c48gpio->out_state[port] &= ~mask;
+ outb(ws16c48gpio->out_state[port], ws16c48gpio->base + port);
+
+ spin_unlock_irqrestore(&ws16c48gpio->lock, flags);
+
+ return 0;
+}
+
+static int ws16c48_gpio_direction_output(struct gpio_chip *chip,
+ unsigned offset, int value)
+{
+ struct ws16c48_gpio *const ws16c48gpio = gpiochip_get_data(chip);
+ const unsigned port = offset / 8;
+ const unsigned mask = BIT(offset % 8);
+ unsigned long flags;
+
+ spin_lock_irqsave(&ws16c48gpio->lock, flags);
+
+ ws16c48gpio->io_state[port] &= ~mask;
+ if (value)
+ ws16c48gpio->out_state[port] |= mask;
+ else
+ ws16c48gpio->out_state[port] &= ~mask;
+ outb(ws16c48gpio->out_state[port], ws16c48gpio->base + port);
+
+ spin_unlock_irqrestore(&ws16c48gpio->lock, flags);
+
+ return 0;
+}
+
+static int ws16c48_gpio_get(struct gpio_chip *chip, unsigned offset)
+{
+ struct ws16c48_gpio *const ws16c48gpio = gpiochip_get_data(chip);
+ const unsigned port = offset / 8;
+ const unsigned mask = BIT(offset % 8);
+ unsigned long flags;
+ unsigned port_state;
+
+ spin_lock_irqsave(&ws16c48gpio->lock, flags);
+
+ /* ensure that GPIO is set for input */
+ if (!(ws16c48gpio->io_state[port] & mask)) {
+ spin_unlock_irqrestore(&ws16c48gpio->lock, flags);
+ return -EINVAL;
+ }
+
+ port_state = inb(ws16c48gpio->base + port);
+
+ spin_unlock_irqrestore(&ws16c48gpio->lock, flags);
+
+ return !!(port_state & mask);
+}
+
+static void ws16c48_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
+{
+ struct ws16c48_gpio *const ws16c48gpio = gpiochip_get_data(chip);
+ const unsigned port = offset / 8;
+ const unsigned mask = BIT(offset % 8);
+ unsigned long flags;
+
+ spin_lock_irqsave(&ws16c48gpio->lock, flags);
+
+ /* ensure that GPIO is set for output */
+ if (ws16c48gpio->io_state[port] & mask) {
+ spin_unlock_irqrestore(&ws16c48gpio->lock, flags);
+ return;
+ }
+
+ if (value)
+ ws16c48gpio->out_state[port] |= mask;
+ else
+ ws16c48gpio->out_state[port] &= ~mask;
+ outb(ws16c48gpio->out_state[port], ws16c48gpio->base + port);
+
+ spin_unlock_irqrestore(&ws16c48gpio->lock, flags);
+}
+
+static void ws16c48_irq_ack(struct irq_data *data)
+{
+ struct gpio_chip *chip = irq_data_get_irq_chip_data(data);
+ struct ws16c48_gpio *const ws16c48gpio = gpiochip_get_data(chip);
+ const unsigned long offset = irqd_to_hwirq(data);
+ const unsigned port = offset / 8;
+ const unsigned mask = BIT(offset % 8);
+ unsigned long flags;
+ unsigned port_state;
+
+ /* only the first 3 ports support interrupts */
+ if (port > 2)
+ return;
+
+ spin_lock_irqsave(&ws16c48gpio->lock, flags);
+
+ port_state = ws16c48gpio->irq_mask >> (8*port);
+
+ outb(0x80, ws16c48gpio->base + 7);
+ outb(port_state & ~mask, ws16c48gpio->base + 8 + port);
+ outb(port_state | mask, ws16c48gpio->base + 8 + port);
+ outb(0xC0, ws16c48gpio->base + 7);
+
+ spin_unlock_irqrestore(&ws16c48gpio->lock, flags);
+}
+
+static void ws16c48_irq_mask(struct irq_data *data)
+{
+ struct gpio_chip *chip = irq_data_get_irq_chip_data(data);
+ struct ws16c48_gpio *const ws16c48gpio = gpiochip_get_data(chip);
+ const unsigned long offset = irqd_to_hwirq(data);
+ const unsigned long mask = BIT(offset);
+ const unsigned port = offset / 8;
+ unsigned long flags;
+
+ /* only the first 3 ports support interrupts */
+ if (port > 2)
+ return;
+
+ spin_lock_irqsave(&ws16c48gpio->lock, flags);
+
+ ws16c48gpio->irq_mask &= ~mask;
+
+ outb(0x80, ws16c48gpio->base + 7);
+ outb(ws16c48gpio->irq_mask >> (8*port), ws16c48gpio->base + 8 + port);
+ outb(0xC0, ws16c48gpio->base + 7);
+
+ spin_unlock_irqrestore(&ws16c48gpio->lock, flags);
+}
+
+static void ws16c48_irq_unmask(struct irq_data *data)
+{
+ struct gpio_chip *chip = irq_data_get_irq_chip_data(data);
+ struct ws16c48_gpio *const ws16c48gpio = gpiochip_get_data(chip);
+ const unsigned long offset = irqd_to_hwirq(data);
+ const unsigned long mask = BIT(offset);
+ const unsigned port = offset / 8;
+ unsigned long flags;
+
+ /* only the first 3 ports support interrupts */
+ if (port > 2)
+ return;
+
+ spin_lock_irqsave(&ws16c48gpio->lock, flags);
+
+ ws16c48gpio->irq_mask |= mask;
+
+ outb(0x80, ws16c48gpio->base + 7);
+ outb(ws16c48gpio->irq_mask >> (8*port), ws16c48gpio->base + 8 + port);
+ outb(0xC0, ws16c48gpio->base + 7);
+
+ spin_unlock_irqrestore(&ws16c48gpio->lock, flags);
+}
+
+static int ws16c48_irq_set_type(struct irq_data *data, unsigned flow_type)
+{
+ struct gpio_chip *chip = irq_data_get_irq_chip_data(data);
+ struct ws16c48_gpio *const ws16c48gpio = gpiochip_get_data(chip);
+ const unsigned long offset = irqd_to_hwirq(data);
+ const unsigned long mask = BIT(offset);
+ const unsigned port = offset / 8;
+ unsigned long flags;
+
+ /* only the first 3 ports support interrupts */
+ if (port > 2)
+ return -EINVAL;
+
+ spin_lock_irqsave(&ws16c48gpio->lock, flags);
+
+ switch (flow_type) {
+ case IRQ_TYPE_NONE:
+ break;
+ case IRQ_TYPE_EDGE_RISING:
+ ws16c48gpio->flow_mask |= mask;
+ break;
+ case IRQ_TYPE_EDGE_FALLING:
+ ws16c48gpio->flow_mask &= ~mask;
+ break;
+ default:
+ spin_unlock_irqrestore(&ws16c48gpio->lock, flags);
+ return -EINVAL;
+ }
+
+ outb(0x40, ws16c48gpio->base + 7);
+ outb(ws16c48gpio->flow_mask >> (8*port), ws16c48gpio->base + 8 + port);
+ outb(0xC0, ws16c48gpio->base + 7);
+
+ spin_unlock_irqrestore(&ws16c48gpio->lock, flags);
+
+ return 0;
+}
+
+static struct irq_chip ws16c48_irqchip = {
+ .name = "ws16c48",
+ .irq_ack = ws16c48_irq_ack,
+ .irq_mask = ws16c48_irq_mask,
+ .irq_unmask = ws16c48_irq_unmask,
+ .irq_set_type = ws16c48_irq_set_type
+};
+
+static irqreturn_t ws16c48_irq_handler(int irq, void *dev_id)
+{
+ struct ws16c48_gpio *const ws16c48gpio = dev_id;
+ struct gpio_chip *const chip = &ws16c48gpio->chip;
+ unsigned long int_pending;
+ unsigned long port;
+ unsigned long int_id;
+ unsigned long gpio;
+
+ int_pending = inb(ws16c48gpio->base + 6) & 0x7;
+ if (!int_pending)
+ return IRQ_NONE;
+
+ /* loop until all pending interrupts are handled */
+ do {
+ for_each_set_bit(port, &int_pending, 3) {
+ int_id = inb(ws16c48gpio->base + 8 + port);
+ for_each_set_bit(gpio, &int_id, 8)
+ generic_handle_irq(irq_find_mapping(
+ chip->irqdomain, gpio + 8*port));
+ }
+
+ int_pending = inb(ws16c48gpio->base + 6) & 0x7;
+ } while (int_pending);
+
+ return IRQ_HANDLED;
+}
+
+static int __init ws16c48_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct ws16c48_gpio *ws16c48gpio;
+ const unsigned base = ws16c48_base;
+ const unsigned extent = 16;
+ const char *const name = dev_name(dev);
+ int err;
+ const unsigned irq = ws16c48_irq;
+
+ ws16c48gpio = devm_kzalloc(dev, sizeof(*ws16c48gpio), GFP_KERNEL);
+ if (!ws16c48gpio)
+ return -ENOMEM;
+
+ if (!devm_request_region(dev, base, extent, name)) {
+ dev_err(dev, "Unable to lock port addresses (0x%X-0x%X)\n",
+ base, base + extent);
+ return -EBUSY;
+ }
+
+ ws16c48gpio->chip.label = name;
+ ws16c48gpio->chip.parent = dev;
+ ws16c48gpio->chip.owner = THIS_MODULE;
+ ws16c48gpio->chip.base = -1;
+ ws16c48gpio->chip.ngpio = 48;
+ ws16c48gpio->chip.get_direction = ws16c48_gpio_get_direction;
+ ws16c48gpio->chip.direction_input = ws16c48_gpio_direction_input;
+ ws16c48gpio->chip.direction_output = ws16c48_gpio_direction_output;
+ ws16c48gpio->chip.get = ws16c48_gpio_get;
+ ws16c48gpio->chip.set = ws16c48_gpio_set;
+ ws16c48gpio->base = base;
+ ws16c48gpio->irq = irq;
+
+ spin_lock_init(&ws16c48gpio->lock);
+
+ dev_set_drvdata(dev, ws16c48gpio);
+
+ err = gpiochip_add_data(&ws16c48gpio->chip, ws16c48gpio);
+ if (err) {
+ dev_err(dev, "GPIO registering failed (%d)\n", err);
+ return err;
+ }
+
+ /* Disable IRQ by default */
+ outb(0x80, base + 7);
+ outb(0, base + 8);
+ outb(0, base + 9);
+ outb(0, base + 10);
+ outb(0xC0, base + 7);
+
+ err = gpiochip_irqchip_add(&ws16c48gpio->chip, &ws16c48_irqchip, 0,
+ handle_edge_irq, IRQ_TYPE_NONE);
+ if (err) {
+ dev_err(dev, "Could not add irqchip (%d)\n", err);
+ goto err_gpiochip_remove;
+ }
+
+ err = request_irq(irq, ws16c48_irq_handler, IRQF_SHARED, name,
+ ws16c48gpio);
+ if (err) {
+ dev_err(dev, "IRQ handler registering failed (%d)\n", err);
+ goto err_gpiochip_remove;
+ }
+
+ return 0;
+
+err_gpiochip_remove:
+ gpiochip_remove(&ws16c48gpio->chip);
+ return err;
+}
+
+static int ws16c48_remove(struct platform_device *pdev)
+{
+ struct ws16c48_gpio *const ws16c48gpio = platform_get_drvdata(pdev);
+
+ free_irq(ws16c48gpio->irq, ws16c48gpio);
+ gpiochip_remove(&ws16c48gpio->chip);
+
+ return 0;
+}
+
+static struct platform_device *ws16c48_device;
+
+static struct platform_driver ws16c48_driver = {
+ .driver = {
+ .name = "ws16c48"
+ },
+ .remove = ws16c48_remove
+};
+
+static void __exit ws16c48_exit(void)
+{
+ platform_device_unregister(ws16c48_device);
+ platform_driver_unregister(&ws16c48_driver);
+}
+
+static int __init ws16c48_init(void)
+{
+ int err;
+
+ ws16c48_device = platform_device_alloc(ws16c48_driver.driver.name, -1);
+ if (!ws16c48_device)
+ return -ENOMEM;
+
+ err = platform_device_add(ws16c48_device);
+ if (err)
+ goto err_platform_device;
+
+ err = platform_driver_probe(&ws16c48_driver, ws16c48_probe);
+ if (err)
+ goto err_platform_driver;
+
+ return 0;
+
+err_platform_driver:
+ platform_device_del(ws16c48_device);
+err_platform_device:
+ platform_device_put(ws16c48_device);
+ return err;
+}
+
+module_init(ws16c48_init);
+module_exit(ws16c48_exit);
+
+MODULE_AUTHOR("William Breathitt Gray <vilhelm.gray@gmail.com>");
+MODULE_DESCRIPTION("WinSystems WS16C48 GPIO driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpio/gpio-xgene-sb.c b/drivers/gpio/gpio-xgene-sb.c
index 282004deb5d4..31cbcb84cfaf 100644
--- a/drivers/gpio/gpio-xgene-sb.c
+++ b/drivers/gpio/gpio-xgene-sb.c
@@ -2,8 +2,9 @@
* AppliedMicro X-Gene SoC GPIO-Standby Driver
*
* Copyright (c) 2014, Applied Micro Circuits Corporation
- * Author: Tin Huynh <tnhuynh@apm.com>.
- * Y Vo <yvo@apm.com>.
+ * Author: Tin Huynh <tnhuynh@apm.com>.
+ * Y Vo <yvo@apm.com>.
+ * Quan Nguyen <qnguyen@apm.com>.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@@ -28,9 +29,14 @@
#include "gpiolib.h"
-#define XGENE_MAX_GPIO_DS 22
-#define XGENE_MAX_GPIO_DS_IRQ 6
+/* Common property names */
+#define XGENE_NIRQ_PROPERTY "apm,nr-irqs"
+#define XGENE_NGPIO_PROPERTY "apm,nr-gpios"
+#define XGENE_IRQ_START_PROPERTY "apm,irq-start"
+#define XGENE_DFLT_MAX_NGPIO 22
+#define XGENE_DFLT_MAX_NIRQ 6
+#define XGENE_DFLT_IRQ_START_PIN 8
#define GPIO_MASK(x) (1U << ((x) % 32))
#define MPA_GPIO_INT_LVL 0x0290
@@ -39,19 +45,32 @@
#define MPA_GPIO_IN_ADDR 0x02a4
#define MPA_GPIO_SEL_LO 0x0294
+#define GPIO_INT_LEVEL_H 0x000001
+#define GPIO_INT_LEVEL_L 0x000000
+
/**
* struct xgene_gpio_sb - GPIO-Standby private data structure.
* @gc: memory-mapped GPIO controllers.
- * @irq: Mapping GPIO pins and interrupt number
- * nirq: Number of GPIO pins that supports interrupt
+ * @regs: GPIO register base offset
+ * @irq_domain: GPIO interrupt domain
+ * @irq_start: GPIO pin that start support interrupt
+ * @nirq: Number of GPIO pins that supports interrupt
+ * @parent_irq_base: Start parent HWIRQ
*/
struct xgene_gpio_sb {
struct gpio_chip gc;
- u32 *irq;
- u32 nirq;
+ void __iomem *regs;
+ struct irq_domain *irq_domain;
+ u16 irq_start;
+ u16 nirq;
+ u16 parent_irq_base;
};
-static void xgene_gpio_set_bit(struct gpio_chip *gc, void __iomem *reg, u32 gpio, int val)
+#define HWIRQ_TO_GPIO(priv, hwirq) ((hwirq) + (priv)->irq_start)
+#define GPIO_TO_HWIRQ(priv, gpio) ((gpio) - (priv)->irq_start)
+
+static void xgene_gpio_set_bit(struct gpio_chip *gc,
+ void __iomem *reg, u32 gpio, int val)
{
u32 data;
@@ -63,23 +82,170 @@ static void xgene_gpio_set_bit(struct gpio_chip *gc, void __iomem *reg, u32 gpio
gc->write_reg(reg, data);
}
-static int apm_gpio_sb_to_irq(struct gpio_chip *gc, u32 gpio)
+static int xgene_gpio_sb_irq_set_type(struct irq_data *d, unsigned int type)
+{
+ struct xgene_gpio_sb *priv = irq_data_get_irq_chip_data(d);
+ int gpio = HWIRQ_TO_GPIO(priv, d->hwirq);
+ int lvl_type = GPIO_INT_LEVEL_H;
+
+ switch (type & IRQ_TYPE_SENSE_MASK) {
+ case IRQ_TYPE_EDGE_RISING:
+ case IRQ_TYPE_LEVEL_HIGH:
+ lvl_type = GPIO_INT_LEVEL_H;
+ break;
+ case IRQ_TYPE_EDGE_FALLING:
+ case IRQ_TYPE_LEVEL_LOW:
+ lvl_type = GPIO_INT_LEVEL_L;
+ break;
+ default:
+ break;
+ }
+
+ xgene_gpio_set_bit(&priv->gc, priv->regs + MPA_GPIO_SEL_LO,
+ gpio * 2, 1);
+ xgene_gpio_set_bit(&priv->gc, priv->regs + MPA_GPIO_INT_LVL,
+ d->hwirq, lvl_type);
+
+ /* Propagate IRQ type setting to parent */
+ if (type & IRQ_TYPE_EDGE_BOTH)
+ return irq_chip_set_type_parent(d, IRQ_TYPE_EDGE_RISING);
+ else
+ return irq_chip_set_type_parent(d, IRQ_TYPE_LEVEL_HIGH);
+}
+
+static struct irq_chip xgene_gpio_sb_irq_chip = {
+ .name = "sbgpio",
+ .irq_eoi = irq_chip_eoi_parent,
+ .irq_mask = irq_chip_mask_parent,
+ .irq_unmask = irq_chip_unmask_parent,
+ .irq_set_type = xgene_gpio_sb_irq_set_type,
+};
+
+static int xgene_gpio_sb_to_irq(struct gpio_chip *gc, u32 gpio)
{
struct xgene_gpio_sb *priv = gpiochip_get_data(gc);
+ struct irq_fwspec fwspec;
+
+ if ((gpio < priv->irq_start) ||
+ (gpio > HWIRQ_TO_GPIO(priv, priv->nirq)))
+ return -ENXIO;
+
+ if (gc->parent->of_node)
+ fwspec.fwnode = of_node_to_fwnode(gc->parent->of_node);
+ else
+ fwspec.fwnode = gc->parent->fwnode;
+ fwspec.param_count = 2;
+ fwspec.param[0] = GPIO_TO_HWIRQ(priv, gpio);
+ fwspec.param[1] = IRQ_TYPE_NONE;
+ return irq_create_fwspec_mapping(&fwspec);
+}
+
+static void xgene_gpio_sb_domain_activate(struct irq_domain *d,
+ struct irq_data *irq_data)
+{
+ struct xgene_gpio_sb *priv = d->host_data;
+ u32 gpio = HWIRQ_TO_GPIO(priv, irq_data->hwirq);
+
+ if (gpiochip_lock_as_irq(&priv->gc, gpio)) {
+ dev_err(priv->gc.parent,
+ "Unable to configure XGene GPIO standby pin %d as IRQ\n",
+ gpio);
+ return;
+ }
+
+ xgene_gpio_set_bit(&priv->gc, priv->regs + MPA_GPIO_SEL_LO,
+ gpio * 2, 1);
+}
+
+static void xgene_gpio_sb_domain_deactivate(struct irq_domain *d,
+ struct irq_data *irq_data)
+{
+ struct xgene_gpio_sb *priv = d->host_data;
+ u32 gpio = HWIRQ_TO_GPIO(priv, irq_data->hwirq);
+
+ gpiochip_unlock_as_irq(&priv->gc, gpio);
+ xgene_gpio_set_bit(&priv->gc, priv->regs + MPA_GPIO_SEL_LO,
+ gpio * 2, 0);
+}
+
+static int xgene_gpio_sb_domain_translate(struct irq_domain *d,
+ struct irq_fwspec *fwspec,
+ unsigned long *hwirq,
+ unsigned int *type)
+{
+ struct xgene_gpio_sb *priv = d->host_data;
+
+ if ((fwspec->param_count != 2) ||
+ (fwspec->param[0] >= priv->nirq))
+ return -EINVAL;
+ *hwirq = fwspec->param[0];
+ *type = fwspec->param[1];
+ return 0;
+}
+
+static int xgene_gpio_sb_domain_alloc(struct irq_domain *domain,
+ unsigned int virq,
+ unsigned int nr_irqs, void *data)
+{
+ struct irq_fwspec *fwspec = data;
+ struct irq_fwspec parent_fwspec;
+ struct xgene_gpio_sb *priv = domain->host_data;
+ irq_hw_number_t hwirq;
+ unsigned int i;
+
+ hwirq = fwspec->param[0];
+ for (i = 0; i < nr_irqs; i++)
+ irq_domain_set_hwirq_and_chip(domain, virq + i, hwirq + i,
+ &xgene_gpio_sb_irq_chip, priv);
- if (priv->irq[gpio])
- return priv->irq[gpio];
+ parent_fwspec.fwnode = domain->parent->fwnode;
+ if (is_of_node(parent_fwspec.fwnode)) {
+ parent_fwspec.param_count = 3;
+ parent_fwspec.param[0] = 0;/* SPI */
+ /* Skip SGIs and PPIs*/
+ parent_fwspec.param[1] = hwirq + priv->parent_irq_base - 32;
+ parent_fwspec.param[2] = fwspec->param[1];
+ } else if (is_fwnode_irqchip(parent_fwspec.fwnode)) {
+ parent_fwspec.param_count = 2;
+ parent_fwspec.param[0] = hwirq + priv->parent_irq_base;
+ parent_fwspec.param[1] = fwspec->param[1];
+ } else
+ return -EINVAL;
- return -ENXIO;
+ return irq_domain_alloc_irqs_parent(domain, virq, nr_irqs,
+ &parent_fwspec);
}
+static void xgene_gpio_sb_domain_free(struct irq_domain *domain,
+ unsigned int virq,
+ unsigned int nr_irqs)
+{
+ struct irq_data *d;
+ unsigned int i;
+
+ for (i = 0; i < nr_irqs; i++) {
+ d = irq_domain_get_irq_data(domain, virq + i);
+ irq_domain_reset_irq_data(d);
+ }
+}
+
+static const struct irq_domain_ops xgene_gpio_sb_domain_ops = {
+ .translate = xgene_gpio_sb_domain_translate,
+ .alloc = xgene_gpio_sb_domain_alloc,
+ .free = xgene_gpio_sb_domain_free,
+ .activate = xgene_gpio_sb_domain_activate,
+ .deactivate = xgene_gpio_sb_domain_deactivate,
+};
+
static int xgene_gpio_sb_probe(struct platform_device *pdev)
{
struct xgene_gpio_sb *priv;
- u32 ret, i;
- u32 default_lines[] = {0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D};
+ int ret;
struct resource *res;
void __iomem *regs;
+ struct irq_domain *parent_domain = NULL;
+ struct fwnode_handle *fwnode;
+ u32 val32;
priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
@@ -90,6 +256,18 @@ static int xgene_gpio_sb_probe(struct platform_device *pdev)
if (IS_ERR(regs))
return PTR_ERR(regs);
+ priv->regs = regs;
+
+ ret = platform_get_irq(pdev, 0);
+ if (ret > 0) {
+ priv->parent_irq_base = irq_get_irq_data(ret)->hwirq;
+ parent_domain = irq_get_irq_data(ret)->domain;
+ }
+ if (!parent_domain) {
+ dev_err(&pdev->dev, "unable to obtain parent domain\n");
+ return -ENODEV;
+ }
+
ret = bgpio_init(&priv->gc, &pdev->dev, 4,
regs + MPA_GPIO_IN_ADDR,
regs + MPA_GPIO_OUT_ADDR, NULL,
@@ -97,30 +275,51 @@ static int xgene_gpio_sb_probe(struct platform_device *pdev)
if (ret)
return ret;
- priv->gc.to_irq = apm_gpio_sb_to_irq;
- priv->gc.ngpio = XGENE_MAX_GPIO_DS;
+ priv->gc.to_irq = xgene_gpio_sb_to_irq;
- priv->nirq = XGENE_MAX_GPIO_DS_IRQ;
+ /* Retrieve start irq pin, use default if property not found */
+ priv->irq_start = XGENE_DFLT_IRQ_START_PIN;
+ if (!device_property_read_u32(&pdev->dev,
+ XGENE_IRQ_START_PROPERTY, &val32))
+ priv->irq_start = val32;
- priv->irq = devm_kzalloc(&pdev->dev, sizeof(u32) * XGENE_MAX_GPIO_DS,
- GFP_KERNEL);
- if (!priv->irq)
- return -ENOMEM;
+ /* Retrieve number irqs, use default if property not found */
+ priv->nirq = XGENE_DFLT_MAX_NIRQ;
+ if (!device_property_read_u32(&pdev->dev, XGENE_NIRQ_PROPERTY, &val32))
+ priv->nirq = val32;
- for (i = 0; i < priv->nirq; i++) {
- priv->irq[default_lines[i]] = platform_get_irq(pdev, i);
- xgene_gpio_set_bit(&priv->gc, regs + MPA_GPIO_SEL_LO,
- default_lines[i] * 2, 1);
- xgene_gpio_set_bit(&priv->gc, regs + MPA_GPIO_INT_LVL, i, 1);
- }
+ /* Retrieve number gpio, use default if property not found */
+ priv->gc.ngpio = XGENE_DFLT_MAX_NGPIO;
+ if (!device_property_read_u32(&pdev->dev, XGENE_NGPIO_PROPERTY, &val32))
+ priv->gc.ngpio = val32;
+
+ dev_info(&pdev->dev, "Support %d gpios, %d irqs start from pin %d\n",
+ priv->gc.ngpio, priv->nirq, priv->irq_start);
platform_set_drvdata(pdev, priv);
- ret = gpiochip_add_data(&priv->gc, priv);
- if (ret)
- dev_err(&pdev->dev, "failed to register X-Gene GPIO Standby driver\n");
+ if (pdev->dev.of_node)
+ fwnode = of_node_to_fwnode(pdev->dev.of_node);
else
- dev_info(&pdev->dev, "X-Gene GPIO Standby driver registered\n");
+ fwnode = pdev->dev.fwnode;
+
+ priv->irq_domain = irq_domain_create_hierarchy(parent_domain,
+ 0, priv->nirq, fwnode,
+ &xgene_gpio_sb_domain_ops, priv);
+ if (!priv->irq_domain)
+ return -ENODEV;
+
+ priv->gc.irqdomain = priv->irq_domain;
+
+ ret = devm_gpiochip_add_data(&pdev->dev, &priv->gc, priv);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "failed to register X-Gene GPIO Standby driver\n");
+ irq_domain_remove(priv->irq_domain);
+ return ret;
+ }
+
+ dev_info(&pdev->dev, "X-Gene GPIO Standby driver registered\n");
if (priv->nirq > 0) {
/* Register interrupt handlers for gpio signaled acpi events */
@@ -138,7 +337,8 @@ static int xgene_gpio_sb_remove(struct platform_device *pdev)
acpi_gpiochip_free_interrupts(&priv->gc);
}
- gpiochip_remove(&priv->gc);
+ irq_domain_remove(priv->irq_domain);
+
return 0;
}
diff --git a/drivers/gpio/gpio-xgene.c b/drivers/gpio/gpio-xgene.c
index 592e9cdf9c53..0dc916191689 100644
--- a/drivers/gpio/gpio-xgene.c
+++ b/drivers/gpio/gpio-xgene.c
@@ -173,6 +173,11 @@ static int xgene_gpio_probe(struct platform_device *pdev)
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ err = -EINVAL;
+ goto err;
+ }
+
gpio->base = devm_ioremap_nocache(&pdev->dev, res->start,
resource_size(res));
if (!gpio->base) {
@@ -193,7 +198,7 @@ static int xgene_gpio_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, gpio);
- err = gpiochip_add_data(&gpio->chip, gpio);
+ err = devm_gpiochip_add_data(&pdev->dev, &gpio->chip, gpio);
if (err) {
dev_err(&pdev->dev,
"failed to register gpiochip.\n");
@@ -207,14 +212,6 @@ err:
return err;
}
-static int xgene_gpio_remove(struct platform_device *pdev)
-{
- struct xgene_gpio *gpio = platform_get_drvdata(pdev);
-
- gpiochip_remove(&gpio->chip);
- return 0;
-}
-
static const struct of_device_id xgene_gpio_of_match[] = {
{ .compatible = "apm,xgene-gpio", },
{},
@@ -228,7 +225,6 @@ static struct platform_driver xgene_gpio_driver = {
.pm = XGENE_GPIO_PM_OPS,
},
.probe = xgene_gpio_probe,
- .remove = xgene_gpio_remove,
};
module_platform_driver(xgene_gpio_driver);
diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
index 540cbc88c7a2..2dc52585e3f2 100644
--- a/drivers/gpio/gpiolib-acpi.c
+++ b/drivers/gpio/gpiolib-acpi.c
@@ -71,29 +71,29 @@ static int acpi_gpiochip_find(struct gpio_chip *gc, void *data)
* controller uses pin controller and the mapping is not contiguous the
* offset might be different.
*/
-static int acpi_gpiochip_pin_to_gpio_offset(struct gpio_chip *chip, int pin)
+static int acpi_gpiochip_pin_to_gpio_offset(struct gpio_device *gdev, int pin)
{
struct gpio_pin_range *pin_range;
/* If there are no ranges in this chip, use 1:1 mapping */
- if (list_empty(&chip->pin_ranges))
+ if (list_empty(&gdev->pin_ranges))
return pin;
- list_for_each_entry(pin_range, &chip->pin_ranges, node) {
+ list_for_each_entry(pin_range, &gdev->pin_ranges, node) {
const struct pinctrl_gpio_range *range = &pin_range->range;
int i;
if (range->pins) {
for (i = 0; i < range->npins; i++) {
if (range->pins[i] == pin)
- return range->base + i - chip->base;
+ return range->base + i - gdev->base;
}
} else {
if (pin >= range->pin_base &&
pin < range->pin_base + range->npins) {
unsigned gpio_base;
- gpio_base = range->base - chip->base;
+ gpio_base = range->base - gdev->base;
return gpio_base + pin - range->pin_base;
}
}
@@ -102,7 +102,7 @@ static int acpi_gpiochip_pin_to_gpio_offset(struct gpio_chip *chip, int pin)
return -EINVAL;
}
#else
-static inline int acpi_gpiochip_pin_to_gpio_offset(struct gpio_chip *chip,
+static inline int acpi_gpiochip_pin_to_gpio_offset(struct gpio_device *gdev,
int pin)
{
return pin;
@@ -134,7 +134,7 @@ static struct gpio_desc *acpi_get_gpiod(char *path, int pin)
if (!chip)
return ERR_PTR(-EPROBE_DEFER);
- offset = acpi_gpiochip_pin_to_gpio_offset(chip, pin);
+ offset = acpi_gpiochip_pin_to_gpio_offset(chip->gpiodev, pin);
if (offset < 0)
return ERR_PTR(offset);
@@ -202,7 +202,7 @@ static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares,
if (!handler)
return AE_BAD_PARAMETER;
- pin = acpi_gpiochip_pin_to_gpio_offset(chip, pin);
+ pin = acpi_gpiochip_pin_to_gpio_offset(chip->gpiodev, pin);
if (pin < 0)
return AE_BAD_PARAMETER;
@@ -673,7 +673,7 @@ acpi_gpio_adr_space_handler(u32 function, acpi_physical_address address,
struct gpio_desc *desc;
bool found;
- pin = acpi_gpiochip_pin_to_gpio_offset(chip, pin);
+ pin = acpi_gpiochip_pin_to_gpio_offset(chip->gpiodev, pin);
if (pin < 0) {
status = AE_BAD_PARAMETER;
goto out;
@@ -977,7 +977,7 @@ bool acpi_can_fallback_to_crs(struct acpi_device *adev, const char *con_id)
lookup = kmalloc(sizeof(*lookup), GFP_KERNEL);
if (lookup) {
lookup->adev = adev;
- lookup->con_id = con_id;
+ lookup->con_id = kstrdup(con_id, GFP_KERNEL);
list_add_tail(&lookup->node, &acpi_crs_lookup_list);
}
}
diff --git a/drivers/gpio/gpiolib-sysfs.c b/drivers/gpio/gpiolib-sysfs.c
index 405dfcaadc4c..932e510aec50 100644
--- a/drivers/gpio/gpiolib-sysfs.c
+++ b/drivers/gpio/gpiolib-sysfs.c
@@ -180,7 +180,7 @@ static int gpio_sysfs_request_irq(struct device *dev, unsigned char flags)
* Remove this redundant call (along with the corresponding
* unlock) when those drivers have been fixed.
*/
- ret = gpiochip_lock_as_irq(desc->chip, gpio_chip_hwgpio(desc));
+ ret = gpiochip_lock_as_irq(desc->gdev->chip, gpio_chip_hwgpio(desc));
if (ret < 0)
goto err_put_kn;
@@ -194,7 +194,7 @@ static int gpio_sysfs_request_irq(struct device *dev, unsigned char flags)
return 0;
err_unlock:
- gpiochip_unlock_as_irq(desc->chip, gpio_chip_hwgpio(desc));
+ gpiochip_unlock_as_irq(desc->gdev->chip, gpio_chip_hwgpio(desc));
err_put_kn:
sysfs_put(data->value_kn);
@@ -212,7 +212,7 @@ static void gpio_sysfs_free_irq(struct device *dev)
data->irq_flags = 0;
free_irq(data->irq, data);
- gpiochip_unlock_as_irq(desc->chip, gpio_chip_hwgpio(desc));
+ gpiochip_unlock_as_irq(desc->gdev->chip, gpio_chip_hwgpio(desc));
sysfs_put(data->value_kn);
}
@@ -547,6 +547,7 @@ static struct class gpio_class = {
int gpiod_export(struct gpio_desc *desc, bool direction_may_change)
{
struct gpio_chip *chip;
+ struct gpio_device *gdev;
struct gpiod_data *data;
unsigned long flags;
int status;
@@ -565,12 +566,13 @@ int gpiod_export(struct gpio_desc *desc, bool direction_may_change)
return -EINVAL;
}
- chip = desc->chip;
+ gdev = desc->gdev;
+ chip = gdev->chip;
mutex_lock(&sysfs_lock);
/* check if chip is being removed */
- if (!chip || !chip->cdev) {
+ if (!chip || !gdev->mockdev) {
status = -ENODEV;
goto err_unlock;
}
@@ -605,7 +607,7 @@ int gpiod_export(struct gpio_desc *desc, bool direction_may_change)
if (chip->names && chip->names[offset])
ioname = chip->names[offset];
- dev = device_create_with_groups(&gpio_class, chip->parent,
+ dev = device_create_with_groups(&gpio_class, &gdev->dev,
MKDEV(0, 0), data, gpio_groups,
ioname ? ioname : "gpio%u",
desc_to_gpio(desc));
@@ -716,9 +718,11 @@ err_unlock:
}
EXPORT_SYMBOL_GPL(gpiod_unexport);
-int gpiochip_sysfs_register(struct gpio_chip *chip)
+int gpiochip_sysfs_register(struct gpio_device *gdev)
{
struct device *dev;
+ struct device *parent;
+ struct gpio_chip *chip = gdev->chip;
/*
* Many systems add gpio chips for SOC support very early,
@@ -729,8 +733,17 @@ int gpiochip_sysfs_register(struct gpio_chip *chip)
if (!gpio_class.p)
return 0;
+ /*
+ * For sysfs backward compatibility we need to preserve this
+ * preferred parenting to the gpio_chip parent field, if set.
+ */
+ if (chip->parent)
+ parent = chip->parent;
+ else
+ parent = &gdev->dev;
+
/* use chip->base for the ID; it's already known to be unique */
- dev = device_create_with_groups(&gpio_class, chip->parent,
+ dev = device_create_with_groups(&gpio_class, parent,
MKDEV(0, 0),
chip, gpiochip_groups,
"gpiochip%d", chip->base);
@@ -738,30 +751,31 @@ int gpiochip_sysfs_register(struct gpio_chip *chip)
return PTR_ERR(dev);
mutex_lock(&sysfs_lock);
- chip->cdev = dev;
+ gdev->mockdev = dev;
mutex_unlock(&sysfs_lock);
return 0;
}
-void gpiochip_sysfs_unregister(struct gpio_chip *chip)
+void gpiochip_sysfs_unregister(struct gpio_device *gdev)
{
struct gpio_desc *desc;
+ struct gpio_chip *chip = gdev->chip;
unsigned int i;
- if (!chip->cdev)
+ if (!gdev->mockdev)
return;
- device_unregister(chip->cdev);
+ device_unregister(gdev->mockdev);
/* prevent further gpiod exports */
mutex_lock(&sysfs_lock);
- chip->cdev = NULL;
+ gdev->mockdev = NULL;
mutex_unlock(&sysfs_lock);
/* unregister gpiod class devices owned by sysfs */
for (i = 0; i < chip->ngpio; i++) {
- desc = &chip->desc[i];
+ desc = &gdev->descs[i];
if (test_and_clear_bit(FLAG_SYSFS, &desc->flags))
gpiod_free(desc);
}
@@ -771,7 +785,7 @@ static int __init gpiolib_sysfs_init(void)
{
int status;
unsigned long flags;
- struct gpio_chip *chip;
+ struct gpio_device *gdev;
status = class_register(&gpio_class);
if (status < 0)
@@ -784,8 +798,8 @@ static int __init gpiolib_sysfs_init(void)
* registered, and so arch_initcall() can always gpio_export().
*/
spin_lock_irqsave(&gpio_lock, flags);
- list_for_each_entry(chip, &gpio_chips, list) {
- if (chip->cdev)
+ list_for_each_entry(gdev, &gpio_devices, list) {
+ if (gdev->mockdev)
continue;
/*
@@ -798,12 +812,11 @@ static int __init gpiolib_sysfs_init(void)
* gpio_lock prevents us from doing this.
*/
spin_unlock_irqrestore(&gpio_lock, flags);
- status = gpiochip_sysfs_register(chip);
+ status = gpiochip_sysfs_register(gdev);
spin_lock_irqsave(&gpio_lock, flags);
}
spin_unlock_irqrestore(&gpio_lock, flags);
-
return status;
}
postcore_initcall(gpiolib_sysfs_init);
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 5c1ba879f889..b747c76fd2b1 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -16,6 +16,11 @@
#include <linux/gpio/driver.h>
#include <linux/gpio/machine.h>
#include <linux/pinctrl/consumer.h>
+#include <linux/idr.h>
+#include <linux/cdev.h>
+#include <linux/fs.h>
+#include <linux/uaccess.h>
+#include <uapi/linux/gpio.h>
#include "gpiolib.h"
@@ -42,6 +47,14 @@
#define extra_checks 0
#endif
+/* Device and char device-related information */
+static DEFINE_IDA(gpio_ida);
+static dev_t gpio_devt;
+#define GPIO_DEV_MAX 256 /* 256 GPIO chip devices supported */
+static struct bus_type gpio_bus_type = {
+ .name = "gpio",
+};
+
/* gpio_lock prevents conflicts during gpio_desc[] table updates.
* While any GPIO is requested, its gpio_chip is not removable;
* each GPIO's "requested" flag serves as a lock and refcount.
@@ -50,12 +63,12 @@ DEFINE_SPINLOCK(gpio_lock);
static DEFINE_MUTEX(gpio_lookup_lock);
static LIST_HEAD(gpio_lookup_list);
-LIST_HEAD(gpio_chips);
-
+LIST_HEAD(gpio_devices);
static void gpiochip_free_hogs(struct gpio_chip *chip);
static void gpiochip_irqchip_remove(struct gpio_chip *gpiochip);
+static bool gpiolib_initialized;
static inline void desc_set_label(struct gpio_desc *d, const char *label)
{
@@ -67,15 +80,16 @@ static inline void desc_set_label(struct gpio_desc *d, const char *label)
*/
struct gpio_desc *gpio_to_desc(unsigned gpio)
{
- struct gpio_chip *chip;
+ struct gpio_device *gdev;
unsigned long flags;
spin_lock_irqsave(&gpio_lock, flags);
- list_for_each_entry(chip, &gpio_chips, list) {
- if (chip->base <= gpio && chip->base + chip->ngpio > gpio) {
+ list_for_each_entry(gdev, &gpio_devices, list) {
+ if (gdev->base <= gpio &&
+ gdev->base + gdev->ngpio > gpio) {
spin_unlock_irqrestore(&gpio_lock, flags);
- return &chip->desc[gpio - chip->base];
+ return &gdev->descs[gpio - gdev->base];
}
}
@@ -94,10 +108,12 @@ EXPORT_SYMBOL_GPL(gpio_to_desc);
struct gpio_desc *gpiochip_get_desc(struct gpio_chip *chip,
u16 hwnum)
{
- if (hwnum >= chip->ngpio)
+ struct gpio_device *gdev = chip->gpiodev;
+
+ if (hwnum >= gdev->ngpio)
return ERR_PTR(-EINVAL);
- return &chip->desc[hwnum];
+ return &gdev->descs[hwnum];
}
/**
@@ -107,7 +123,7 @@ struct gpio_desc *gpiochip_get_desc(struct gpio_chip *chip,
*/
int desc_to_gpio(const struct gpio_desc *desc)
{
- return desc->chip->base + (desc - &desc->chip->desc[0]);
+ return desc->gdev->base + (desc - &desc->gdev->descs[0]);
}
EXPORT_SYMBOL_GPL(desc_to_gpio);
@@ -118,23 +134,25 @@ EXPORT_SYMBOL_GPL(desc_to_gpio);
*/
struct gpio_chip *gpiod_to_chip(const struct gpio_desc *desc)
{
- return desc ? desc->chip : NULL;
+ if (!desc || !desc->gdev || !desc->gdev->chip)
+ return NULL;
+ return desc->gdev->chip;
}
EXPORT_SYMBOL_GPL(gpiod_to_chip);
/* dynamic allocation of GPIOs, e.g. on a hotplugged device */
static int gpiochip_find_base(int ngpio)
{
- struct gpio_chip *chip;
+ struct gpio_device *gdev;
int base = ARCH_NR_GPIOS - ngpio;
- list_for_each_entry_reverse(chip, &gpio_chips, list) {
+ list_for_each_entry_reverse(gdev, &gpio_devices, list) {
/* found a free space? */
- if (chip->base + chip->ngpio <= base)
+ if (gdev->base + gdev->ngpio <= base)
break;
else
/* nope, check the space right before the chip */
- base = chip->base - ngpio;
+ base = gdev->base - ngpio;
}
if (gpio_is_valid(base)) {
@@ -187,57 +205,45 @@ EXPORT_SYMBOL_GPL(gpiod_get_direction);
* Return -EBUSY if the new chip overlaps with some other chip's integer
* space.
*/
-static int gpiochip_add_to_list(struct gpio_chip *chip)
+static int gpiodev_add_to_list(struct gpio_device *gdev)
{
- struct gpio_chip *iterator;
- struct gpio_chip *previous = NULL;
+ struct gpio_device *prev, *next;
- if (list_empty(&gpio_chips)) {
- list_add_tail(&chip->list, &gpio_chips);
+ if (list_empty(&gpio_devices)) {
+ /* initial entry in list */
+ list_add_tail(&gdev->list, &gpio_devices);
return 0;
}
- list_for_each_entry(iterator, &gpio_chips, list) {
- if (iterator->base >= chip->base + chip->ngpio) {
- /*
- * Iterator is the first GPIO chip so there is no
- * previous one
- */
- if (!previous) {
- goto found;
- } else {
- /*
- * We found a valid range(means
- * [base, base + ngpio - 1]) between previous
- * and iterator chip.
- */
- if (previous->base + previous->ngpio
- <= chip->base)
- goto found;
- }
- }
- previous = iterator;
+ next = list_entry(gpio_devices.next, struct gpio_device, list);
+ if (gdev->base + gdev->ngpio <= next->base) {
+ /* add before first entry */
+ list_add(&gdev->list, &gpio_devices);
+ return 0;
}
- /*
- * We are beyond the last chip in the list and iterator now
- * points to the head.
- * Let iterator point to the last chip in the list.
- */
-
- iterator = list_last_entry(&gpio_chips, struct gpio_chip, list);
- if (iterator->base + iterator->ngpio <= chip->base) {
- list_add(&chip->list, &iterator->list);
+ prev = list_entry(gpio_devices.prev, struct gpio_device, list);
+ if (prev->base + prev->ngpio <= gdev->base) {
+ /* add behind last entry */
+ list_add_tail(&gdev->list, &gpio_devices);
return 0;
}
- dev_err(chip->parent,
- "GPIO integer space overlap, cannot add chip\n");
- return -EBUSY;
+ list_for_each_entry_safe(prev, next, &gpio_devices, list) {
+ /* at the end of the list */
+ if (&next->list == &gpio_devices)
+ break;
-found:
- list_add_tail(&chip->list, &iterator->list);
- return 0;
+ /* add between prev and next */
+ if (prev->base + prev->ngpio <= gdev->base
+ && gdev->base + gdev->ngpio <= next->base) {
+ list_add(&gdev->list, &prev->list);
+ return 0;
+ }
+ }
+
+ dev_err(&gdev->dev, "GPIO integer space overlap, cannot add chip\n");
+ return -EBUSY;
}
/**
@@ -245,23 +251,23 @@ found:
*/
static struct gpio_desc *gpio_name_to_desc(const char * const name)
{
- struct gpio_chip *chip;
+ struct gpio_device *gdev;
unsigned long flags;
spin_lock_irqsave(&gpio_lock, flags);
- list_for_each_entry(chip, &gpio_chips, list) {
+ list_for_each_entry(gdev, &gpio_devices, list) {
int i;
- for (i = 0; i != chip->ngpio; ++i) {
- struct gpio_desc *gpio = &chip->desc[i];
+ for (i = 0; i != gdev->ngpio; ++i) {
+ struct gpio_desc *desc = &gdev->descs[i];
- if (!gpio->name || !name)
+ if (!desc->name || !name)
continue;
- if (!strcmp(gpio->name, name)) {
+ if (!strcmp(desc->name, name)) {
spin_unlock_irqrestore(&gpio_lock, flags);
- return gpio;
+ return desc;
}
}
}
@@ -279,6 +285,7 @@ static struct gpio_desc *gpio_name_to_desc(const char * const name)
*/
static int gpiochip_set_desc_names(struct gpio_chip *gc)
{
+ struct gpio_device *gdev = gc->gpiodev;
int i;
if (!gc->names)
@@ -290,19 +297,208 @@ static int gpiochip_set_desc_names(struct gpio_chip *gc)
gpio = gpio_name_to_desc(gc->names[i]);
if (gpio)
- dev_warn(gc->parent, "Detected name collision for "
- "GPIO name '%s'\n",
+ dev_warn(&gdev->dev,
+ "Detected name collision for GPIO name '%s'\n",
gc->names[i]);
}
/* Then add all names to the GPIO descriptors */
for (i = 0; i != gc->ngpio; ++i)
- gc->desc[i].name = gc->names[i];
+ gdev->descs[i].name = gc->names[i];
+
+ return 0;
+}
+
+/**
+ * gpio_ioctl() - ioctl handler for the GPIO chardev
+ */
+static long gpio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+ struct gpio_device *gdev = filp->private_data;
+ struct gpio_chip *chip = gdev->chip;
+ int __user *ip = (int __user *)arg;
+
+ /* We fail any subsequent ioctl():s when the chip is gone */
+ if (!chip)
+ return -ENODEV;
+
+ /* Fill in the struct and pass to userspace */
+ if (cmd == GPIO_GET_CHIPINFO_IOCTL) {
+ struct gpiochip_info chipinfo;
+
+ strncpy(chipinfo.name, dev_name(&gdev->dev),
+ sizeof(chipinfo.name));
+ chipinfo.name[sizeof(chipinfo.name)-1] = '\0';
+ strncpy(chipinfo.label, gdev->label,
+ sizeof(chipinfo.label));
+ chipinfo.label[sizeof(chipinfo.label)-1] = '\0';
+ chipinfo.lines = gdev->ngpio;
+ if (copy_to_user(ip, &chipinfo, sizeof(chipinfo)))
+ return -EFAULT;
+ return 0;
+ } else if (cmd == GPIO_GET_LINEINFO_IOCTL) {
+ struct gpioline_info lineinfo;
+ struct gpio_desc *desc;
+
+ if (copy_from_user(&lineinfo, ip, sizeof(lineinfo)))
+ return -EFAULT;
+ if (lineinfo.line_offset > gdev->ngpio)
+ return -EINVAL;
+
+ desc = &gdev->descs[lineinfo.line_offset];
+ if (desc->name) {
+ strncpy(lineinfo.name, desc->name,
+ sizeof(lineinfo.name));
+ lineinfo.name[sizeof(lineinfo.name)-1] = '\0';
+ } else {
+ lineinfo.name[0] = '\0';
+ }
+ if (desc->label) {
+ strncpy(lineinfo.consumer, desc->label,
+ sizeof(lineinfo.consumer));
+ lineinfo.consumer[sizeof(lineinfo.consumer)-1] = '\0';
+ } else {
+ lineinfo.consumer[0] = '\0';
+ }
+
+ /*
+ * Userspace only need to know that the kernel is using
+ * this GPIO so it can't use it.
+ */
+ lineinfo.flags = 0;
+ if (test_bit(FLAG_REQUESTED, &desc->flags) ||
+ test_bit(FLAG_IS_HOGGED, &desc->flags) ||
+ test_bit(FLAG_USED_AS_IRQ, &desc->flags) ||
+ test_bit(FLAG_EXPORT, &desc->flags) ||
+ test_bit(FLAG_SYSFS, &desc->flags))
+ lineinfo.flags |= GPIOLINE_FLAG_KERNEL;
+ if (test_bit(FLAG_IS_OUT, &desc->flags))
+ lineinfo.flags |= GPIOLINE_FLAG_IS_OUT;
+ if (test_bit(FLAG_ACTIVE_LOW, &desc->flags))
+ lineinfo.flags |= GPIOLINE_FLAG_ACTIVE_LOW;
+ if (test_bit(FLAG_OPEN_DRAIN, &desc->flags))
+ lineinfo.flags |= GPIOLINE_FLAG_OPEN_DRAIN;
+ if (test_bit(FLAG_OPEN_SOURCE, &desc->flags))
+ lineinfo.flags |= GPIOLINE_FLAG_OPEN_SOURCE;
+
+ if (copy_to_user(ip, &lineinfo, sizeof(lineinfo)))
+ return -EFAULT;
+ return 0;
+ }
+ return -EINVAL;
+}
+
+/**
+ * gpio_chrdev_open() - open the chardev for ioctl operations
+ * @inode: inode for this chardev
+ * @filp: file struct for storing private data
+ * Returns 0 on success
+ */
+static int gpio_chrdev_open(struct inode *inode, struct file *filp)
+{
+ struct gpio_device *gdev = container_of(inode->i_cdev,
+ struct gpio_device, chrdev);
+ /* Fail on open if the backing gpiochip is gone */
+ if (!gdev || !gdev->chip)
+ return -ENODEV;
+ get_device(&gdev->dev);
+ filp->private_data = gdev;
return 0;
}
/**
+ * gpio_chrdev_release() - close chardev after ioctl operations
+ * @inode: inode for this chardev
+ * @filp: file struct for storing private data
+ * Returns 0 on success
+ */
+static int gpio_chrdev_release(struct inode *inode, struct file *filp)
+{
+ struct gpio_device *gdev = container_of(inode->i_cdev,
+ struct gpio_device, chrdev);
+
+ if (!gdev)
+ return -ENODEV;
+ put_device(&gdev->dev);
+ return 0;
+}
+
+
+static const struct file_operations gpio_fileops = {
+ .release = gpio_chrdev_release,
+ .open = gpio_chrdev_open,
+ .owner = THIS_MODULE,
+ .llseek = noop_llseek,
+ .unlocked_ioctl = gpio_ioctl,
+ .compat_ioctl = gpio_ioctl,
+};
+
+static void gpiodevice_release(struct device *dev)
+{
+ struct gpio_device *gdev = dev_get_drvdata(dev);
+
+ cdev_del(&gdev->chrdev);
+ list_del(&gdev->list);
+ ida_simple_remove(&gpio_ida, gdev->id);
+ kfree(gdev->label);
+ kfree(gdev->descs);
+ kfree(gdev);
+}
+
+static int gpiochip_setup_dev(struct gpio_device *gdev)
+{
+ int status;
+
+ cdev_init(&gdev->chrdev, &gpio_fileops);
+ gdev->chrdev.owner = THIS_MODULE;
+ gdev->chrdev.kobj.parent = &gdev->dev.kobj;
+ gdev->dev.devt = MKDEV(MAJOR(gpio_devt), gdev->id);
+ status = cdev_add(&gdev->chrdev, gdev->dev.devt, 1);
+ if (status < 0)
+ chip_warn(gdev->chip, "failed to add char device %d:%d\n",
+ MAJOR(gpio_devt), gdev->id);
+ else
+ chip_dbg(gdev->chip, "added GPIO chardev (%d:%d)\n",
+ MAJOR(gpio_devt), gdev->id);
+ status = device_add(&gdev->dev);
+ if (status)
+ goto err_remove_chardev;
+
+ status = gpiochip_sysfs_register(gdev);
+ if (status)
+ goto err_remove_device;
+
+ /* From this point, the .release() function cleans up gpio_device */
+ gdev->dev.release = gpiodevice_release;
+ get_device(&gdev->dev);
+ pr_debug("%s: registered GPIOs %d to %d on device: %s (%s)\n",
+ __func__, gdev->base, gdev->base + gdev->ngpio - 1,
+ dev_name(&gdev->dev), gdev->chip->label ? : "generic");
+
+ return 0;
+
+err_remove_device:
+ device_del(&gdev->dev);
+err_remove_chardev:
+ cdev_del(&gdev->chrdev);
+ return status;
+}
+
+static void gpiochip_setup_devs(void)
+{
+ struct gpio_device *gdev;
+ int err;
+
+ list_for_each_entry(gdev, &gpio_devices, list) {
+ err = gpiochip_setup_dev(gdev);
+ if (err)
+ pr_err("%s: Failed to initialize gpio device (%d)\n",
+ dev_name(&gdev->dev), err);
+ }
+}
+
+/**
* gpiochip_add_data() - register a gpio_chip
* @chip: the chip to register, with chip->base initialized
* Context: potentially before irqs will work
@@ -316,6 +512,9 @@ static int gpiochip_set_desc_names(struct gpio_chip *gc)
* the gpio framework's arch_initcall(). Otherwise sysfs initialization
* for GPIOs will fail rudely.
*
+ * gpiochip_add_data() must only be called after gpiolib initialization,
+ * ie after core_initcall().
+ *
* If chip->base is negative, this requests dynamic assignment of
* a range of valid GPIOs.
*/
@@ -323,43 +522,106 @@ int gpiochip_add_data(struct gpio_chip *chip, void *data)
{
unsigned long flags;
int status = 0;
- unsigned id;
+ unsigned i;
int base = chip->base;
- struct gpio_desc *descs;
+ struct gpio_device *gdev;
- descs = kcalloc(chip->ngpio, sizeof(descs[0]), GFP_KERNEL);
- if (!descs)
+ /*
+ * First: allocate and populate the internal stat container, and
+ * set up the struct device.
+ */
+ gdev = kzalloc(sizeof(*gdev), GFP_KERNEL);
+ if (!gdev)
return -ENOMEM;
+ gdev->dev.bus = &gpio_bus_type;
+ gdev->chip = chip;
+ chip->gpiodev = gdev;
+ if (chip->parent) {
+ gdev->dev.parent = chip->parent;
+ gdev->dev.of_node = chip->parent->of_node;
+ } else {
+#ifdef CONFIG_OF_GPIO
+ /* If the gpiochip has an assigned OF node this takes precedence */
+ if (chip->of_node)
+ gdev->dev.of_node = chip->of_node;
+#endif
+ }
+ gdev->id = ida_simple_get(&gpio_ida, 0, 0, GFP_KERNEL);
+ if (gdev->id < 0) {
+ status = gdev->id;
+ goto err_free_gdev;
+ }
+ dev_set_name(&gdev->dev, "gpiochip%d", gdev->id);
+ device_initialize(&gdev->dev);
+ dev_set_drvdata(&gdev->dev, gdev);
+ if (chip->parent && chip->parent->driver)
+ gdev->owner = chip->parent->driver->owner;
+ else if (chip->owner)
+ /* TODO: remove chip->owner */
+ gdev->owner = chip->owner;
+ else
+ gdev->owner = THIS_MODULE;
- chip->data = data;
+ gdev->descs = kcalloc(chip->ngpio, sizeof(gdev->descs[0]), GFP_KERNEL);
+ if (!gdev->descs) {
+ status = -ENOMEM;
+ goto err_free_gdev;
+ }
if (chip->ngpio == 0) {
chip_err(chip, "tried to insert a GPIO chip with zero lines\n");
- return -EINVAL;
+ status = -EINVAL;
+ goto err_free_descs;
+ }
+
+ if (chip->label)
+ gdev->label = kstrdup(chip->label, GFP_KERNEL);
+ else
+ gdev->label = kstrdup("unknown", GFP_KERNEL);
+ if (!gdev->label) {
+ status = -ENOMEM;
+ goto err_free_descs;
}
+ gdev->ngpio = chip->ngpio;
+ gdev->data = data;
+
spin_lock_irqsave(&gpio_lock, flags);
+ /*
+ * TODO: this allocates a Linux GPIO number base in the global
+ * GPIO numberspace for this chip. In the long run we want to
+ * get *rid* of this numberspace and use only descriptors, but
+ * it may be a pipe dream. It will not happen before we get rid
+ * of the sysfs interface anyways.
+ */
if (base < 0) {
base = gpiochip_find_base(chip->ngpio);
if (base < 0) {
status = base;
spin_unlock_irqrestore(&gpio_lock, flags);
- goto err_free_descs;
+ goto err_free_label;
}
+ /*
+ * TODO: it should not be necessary to reflect the assigned
+ * base outside of the GPIO subsystem. Go over drivers and
+ * see if anyone makes use of this, else drop this and assign
+ * a poison instead.
+ */
chip->base = base;
}
+ gdev->base = base;
- status = gpiochip_add_to_list(chip);
+ status = gpiodev_add_to_list(gdev);
if (status) {
spin_unlock_irqrestore(&gpio_lock, flags);
- goto err_free_descs;
+ goto err_free_label;
}
- for (id = 0; id < chip->ngpio; id++) {
- struct gpio_desc *desc = &descs[id];
+ for (i = 0; i < chip->ngpio; i++) {
+ struct gpio_desc *desc = &gdev->descs[i];
- desc->chip = chip;
+ desc->gdev = gdev;
/* REVISIT: most hardware initializes GPIOs as inputs (often
* with pullups enabled) so power usage is minimized. Linux
@@ -370,17 +632,12 @@ int gpiochip_add_data(struct gpio_chip *chip, void *data)
desc->flags = !chip->direction_input ? (1 << FLAG_IS_OUT) : 0;
}
- chip->desc = descs;
-
spin_unlock_irqrestore(&gpio_lock, flags);
#ifdef CONFIG_PINCTRL
- INIT_LIST_HEAD(&chip->pin_ranges);
+ INIT_LIST_HEAD(&gdev->pin_ranges);
#endif
- if (!chip->owner && chip->parent && chip->parent->driver)
- chip->owner = chip->parent->driver->owner;
-
status = gpiochip_set_desc_names(chip);
if (status)
goto err_remove_from_list;
@@ -391,14 +648,19 @@ int gpiochip_add_data(struct gpio_chip *chip, void *data)
acpi_gpiochip_add(chip);
- status = gpiochip_sysfs_register(chip);
- if (status)
- goto err_remove_chip;
-
- pr_debug("%s: registered GPIOs %d to %d on device: %s\n", __func__,
- chip->base, chip->base + chip->ngpio - 1,
- chip->label ? : "generic");
-
+ /*
+ * By first adding the chardev, and then adding the device,
+ * we get a device node entry in sysfs under
+ * /sys/bus/gpio/devices/gpiochipN/dev that can be used for
+ * coldplug of device nodes and other udev business.
+ * We can do this only if gpiolib has been initialized.
+ * Otherwise, defer until later.
+ */
+ if (gpiolib_initialized) {
+ status = gpiochip_setup_dev(gdev);
+ if (status)
+ goto err_remove_chip;
+ }
return 0;
err_remove_chip:
@@ -407,21 +669,33 @@ err_remove_chip:
of_gpiochip_remove(chip);
err_remove_from_list:
spin_lock_irqsave(&gpio_lock, flags);
- list_del(&chip->list);
+ list_del(&gdev->list);
spin_unlock_irqrestore(&gpio_lock, flags);
- chip->desc = NULL;
+err_free_label:
+ kfree(gdev->label);
err_free_descs:
- kfree(descs);
-
+ kfree(gdev->descs);
+err_free_gdev:
+ ida_simple_remove(&gpio_ida, gdev->id);
/* failures here can mean systems won't boot... */
pr_err("%s: GPIOs %d..%d (%s) failed to register\n", __func__,
- chip->base, chip->base + chip->ngpio - 1,
- chip->label ? : "generic");
+ gdev->base, gdev->base + gdev->ngpio - 1,
+ chip->label ? : "generic");
+ kfree(gdev);
return status;
}
EXPORT_SYMBOL_GPL(gpiochip_add_data);
/**
+ * gpiochip_get_data() - get per-subdriver data for the chip
+ */
+void *gpiochip_get_data(struct gpio_chip *chip)
+{
+ return chip->gpiodev->data;
+}
+EXPORT_SYMBOL_GPL(gpiochip_get_data);
+
+/**
* gpiochip_remove() - unregister a gpio_chip
* @chip: the chip to unregister
*
@@ -429,39 +703,123 @@ EXPORT_SYMBOL_GPL(gpiochip_add_data);
*/
void gpiochip_remove(struct gpio_chip *chip)
{
+ struct gpio_device *gdev = chip->gpiodev;
struct gpio_desc *desc;
unsigned long flags;
- unsigned id;
+ unsigned i;
bool requested = false;
- gpiochip_sysfs_unregister(chip);
-
+ /* FIXME: should the legacy sysfs handling be moved to gpio_device? */
+ gpiochip_sysfs_unregister(gdev);
+ /* Numb the device, cancelling all outstanding operations */
+ gdev->chip = NULL;
gpiochip_irqchip_remove(chip);
-
acpi_gpiochip_remove(chip);
gpiochip_remove_pin_ranges(chip);
gpiochip_free_hogs(chip);
of_gpiochip_remove(chip);
+ /*
+ * We accept no more calls into the driver from this point, so
+ * NULL the driver data pointer
+ */
+ gdev->data = NULL;
spin_lock_irqsave(&gpio_lock, flags);
- for (id = 0; id < chip->ngpio; id++) {
- desc = &chip->desc[id];
- desc->chip = NULL;
+ for (i = 0; i < gdev->ngpio; i++) {
+ desc = &gdev->descs[i];
if (test_bit(FLAG_REQUESTED, &desc->flags))
requested = true;
}
- list_del(&chip->list);
spin_unlock_irqrestore(&gpio_lock, flags);
if (requested)
- dev_crit(chip->parent,
+ dev_crit(&gdev->dev,
"REMOVING GPIOCHIP WITH GPIOS STILL REQUESTED\n");
- kfree(chip->desc);
- chip->desc = NULL;
+ /*
+ * The gpiochip side puts its use of the device to rest here:
+ * if there are no userspace clients, the chardev and device will
+ * be removed, else it will be dangling until the last user is
+ * gone.
+ */
+ put_device(&gdev->dev);
}
EXPORT_SYMBOL_GPL(gpiochip_remove);
+static void devm_gpio_chip_release(struct device *dev, void *res)
+{
+ struct gpio_chip *chip = *(struct gpio_chip **)res;
+
+ gpiochip_remove(chip);
+}
+
+static int devm_gpio_chip_match(struct device *dev, void *res, void *data)
+
+{
+ struct gpio_chip **r = res;
+
+ if (!r || !*r) {
+ WARN_ON(!r || !*r);
+ return 0;
+ }
+
+ return *r == data;
+}
+
+/**
+ * devm_gpiochip_add_data() - Resource manager piochip_add_data()
+ * @dev: the device pointer on which irq_chip belongs to.
+ * @chip: the chip to register, with chip->base initialized
+ * Context: potentially before irqs will work
+ *
+ * Returns a negative errno if the chip can't be registered, such as
+ * because the chip->base is invalid or already associated with a
+ * different chip. Otherwise it returns zero as a success code.
+ *
+ * The gpio chip automatically be released when the device is unbound.
+ */
+int devm_gpiochip_add_data(struct device *dev, struct gpio_chip *chip,
+ void *data)
+{
+ struct gpio_chip **ptr;
+ int ret;
+
+ ptr = devres_alloc(devm_gpio_chip_release, sizeof(*ptr),
+ GFP_KERNEL);
+ if (!ptr)
+ return -ENOMEM;
+
+ ret = gpiochip_add_data(chip, data);
+ if (ret < 0) {
+ devres_free(ptr);
+ return ret;
+ }
+
+ *ptr = chip;
+ devres_add(dev, ptr);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(devm_gpiochip_add_data);
+
+/**
+ * devm_gpiochip_remove() - Resource manager of gpiochip_remove()
+ * @dev: device for which which resource was allocated
+ * @chip: the chip to remove
+ *
+ * A gpio_chip with any GPIOs still requested may not be removed.
+ */
+void devm_gpiochip_remove(struct device *dev, struct gpio_chip *chip)
+{
+ int ret;
+
+ ret = devres_release(dev, devm_gpio_chip_release,
+ devm_gpio_chip_match, chip);
+ if (!ret)
+ WARN_ON(ret);
+}
+EXPORT_SYMBOL_GPL(devm_gpiochip_remove);
+
/**
* gpiochip_find() - iterator for locating a specific gpio_chip
* @data: data to pass to match function
@@ -477,17 +835,21 @@ struct gpio_chip *gpiochip_find(void *data,
int (*match)(struct gpio_chip *chip,
void *data))
{
+ struct gpio_device *gdev;
struct gpio_chip *chip;
unsigned long flags;
spin_lock_irqsave(&gpio_lock, flags);
- list_for_each_entry(chip, &gpio_chips, list)
- if (match(chip, data))
+ list_for_each_entry(gdev, &gpio_devices, list)
+ if (match(gdev->chip, data))
break;
/* No match? */
- if (&chip->list == &gpio_chips)
+ if (&gdev->list == &gpio_devices)
chip = NULL;
+ else
+ chip = gdev->chip;
+
spin_unlock_irqrestore(&gpio_lock, flags);
return chip;
@@ -617,14 +979,14 @@ static int gpiochip_irq_reqres(struct irq_data *d)
{
struct gpio_chip *chip = irq_data_get_irq_chip_data(d);
- if (!try_module_get(chip->owner))
+ if (!try_module_get(chip->gpiodev->owner))
return -ENODEV;
if (gpiochip_lock_as_irq(chip, d->hwirq)) {
chip_err(chip,
"unable to lock HW IRQ %lu for IRQ\n",
d->hwirq);
- module_put(chip->owner);
+ module_put(chip->gpiodev->owner);
return -EINVAL;
}
return 0;
@@ -635,7 +997,7 @@ static void gpiochip_irq_relres(struct irq_data *d)
struct gpio_chip *chip = irq_data_get_irq_chip_data(d);
gpiochip_unlock_as_irq(chip, d->hwirq);
- module_put(chip->owner);
+ module_put(chip->gpiodev->owner);
}
static int gpiochip_to_irq(struct gpio_chip *chip, unsigned offset)
@@ -785,7 +1147,7 @@ static void gpiochip_irqchip_remove(struct gpio_chip *gpiochip) {}
*/
int gpiochip_generic_request(struct gpio_chip *chip, unsigned offset)
{
- return pinctrl_request_gpio(chip->base + offset);
+ return pinctrl_request_gpio(chip->gpiodev->base + offset);
}
EXPORT_SYMBOL_GPL(gpiochip_generic_request);
@@ -796,7 +1158,7 @@ EXPORT_SYMBOL_GPL(gpiochip_generic_request);
*/
void gpiochip_generic_free(struct gpio_chip *chip, unsigned offset)
{
- pinctrl_free_gpio(chip->base + offset);
+ pinctrl_free_gpio(chip->gpiodev->base + offset);
}
EXPORT_SYMBOL_GPL(gpiochip_generic_free);
@@ -814,6 +1176,7 @@ int gpiochip_add_pingroup_range(struct gpio_chip *chip,
unsigned int gpio_offset, const char *pin_group)
{
struct gpio_pin_range *pin_range;
+ struct gpio_device *gdev = chip->gpiodev;
int ret;
pin_range = kzalloc(sizeof(*pin_range), GFP_KERNEL);
@@ -826,7 +1189,7 @@ int gpiochip_add_pingroup_range(struct gpio_chip *chip,
pin_range->range.id = gpio_offset;
pin_range->range.gc = chip;
pin_range->range.name = chip->label;
- pin_range->range.base = chip->base + gpio_offset;
+ pin_range->range.base = gdev->base + gpio_offset;
pin_range->pctldev = pctldev;
ret = pinctrl_get_group_pins(pctldev, pin_group,
@@ -843,7 +1206,7 @@ int gpiochip_add_pingroup_range(struct gpio_chip *chip,
gpio_offset, gpio_offset + pin_range->range.npins - 1,
pinctrl_dev_get_devname(pctldev), pin_group);
- list_add_tail(&pin_range->node, &chip->pin_ranges);
+ list_add_tail(&pin_range->node, &gdev->pin_ranges);
return 0;
}
@@ -863,6 +1226,7 @@ int gpiochip_add_pin_range(struct gpio_chip *chip, const char *pinctl_name,
unsigned int npins)
{
struct gpio_pin_range *pin_range;
+ struct gpio_device *gdev = chip->gpiodev;
int ret;
pin_range = kzalloc(sizeof(*pin_range), GFP_KERNEL);
@@ -875,7 +1239,7 @@ int gpiochip_add_pin_range(struct gpio_chip *chip, const char *pinctl_name,
pin_range->range.id = gpio_offset;
pin_range->range.gc = chip;
pin_range->range.name = chip->label;
- pin_range->range.base = chip->base + gpio_offset;
+ pin_range->range.base = gdev->base + gpio_offset;
pin_range->range.pin_base = pin_offset;
pin_range->range.npins = npins;
pin_range->pctldev = pinctrl_find_and_add_gpio_range(pinctl_name,
@@ -891,7 +1255,7 @@ int gpiochip_add_pin_range(struct gpio_chip *chip, const char *pinctl_name,
pinctl_name,
pin_offset, pin_offset + npins - 1);
- list_add_tail(&pin_range->node, &chip->pin_ranges);
+ list_add_tail(&pin_range->node, &gdev->pin_ranges);
return 0;
}
@@ -904,8 +1268,9 @@ EXPORT_SYMBOL_GPL(gpiochip_add_pin_range);
void gpiochip_remove_pin_ranges(struct gpio_chip *chip)
{
struct gpio_pin_range *pin_range, *tmp;
+ struct gpio_device *gdev = chip->gpiodev;
- list_for_each_entry_safe(pin_range, tmp, &chip->pin_ranges, node) {
+ list_for_each_entry_safe(pin_range, tmp, &gdev->pin_ranges, node) {
list_del(&pin_range->node);
pinctrl_remove_gpio_range(pin_range->pctldev,
&pin_range->range);
@@ -922,7 +1287,7 @@ EXPORT_SYMBOL_GPL(gpiochip_remove_pin_ranges);
*/
static int __gpiod_request(struct gpio_desc *desc, const char *label)
{
- struct gpio_chip *chip = desc->chip;
+ struct gpio_chip *chip = desc->gdev->chip;
int status;
unsigned long flags;
@@ -971,27 +1336,50 @@ done:
return status;
}
+/*
+ * This descriptor validation needs to be inserted verbatim into each
+ * function taking a descriptor, so we need to use a preprocessor
+ * macro to avoid endless duplication.
+ */
+#define VALIDATE_DESC(desc) do { \
+ if (!desc || !desc->gdev) { \
+ pr_warn("%s: invalid GPIO\n", __func__); \
+ return -EINVAL; \
+ } \
+ if ( !desc->gdev->chip ) { \
+ dev_warn(&desc->gdev->dev, \
+ "%s: backing chip is gone\n", __func__); \
+ return 0; \
+ } } while (0)
+
+#define VALIDATE_DESC_VOID(desc) do { \
+ if (!desc || !desc->gdev) { \
+ pr_warn("%s: invalid GPIO\n", __func__); \
+ return; \
+ } \
+ if (!desc->gdev->chip) { \
+ dev_warn(&desc->gdev->dev, \
+ "%s: backing chip is gone\n", __func__); \
+ return; \
+ } } while (0)
+
+
int gpiod_request(struct gpio_desc *desc, const char *label)
{
int status = -EPROBE_DEFER;
- struct gpio_chip *chip;
-
- if (!desc) {
- pr_warn("%s: invalid GPIO\n", __func__);
- return -EINVAL;
- }
+ struct gpio_device *gdev;
- chip = desc->chip;
- if (!chip)
- goto done;
+ VALIDATE_DESC(desc);
+ gdev = desc->gdev;
- if (try_module_get(chip->owner)) {
+ if (try_module_get(gdev->owner)) {
status = __gpiod_request(desc, label);
if (status < 0)
- module_put(chip->owner);
+ module_put(gdev->owner);
+ else
+ get_device(&gdev->dev);
}
-done:
if (status)
gpiod_dbg(desc, "%s: status %d\n", __func__, status);
@@ -1010,7 +1398,7 @@ static bool __gpiod_free(struct gpio_desc *desc)
spin_lock_irqsave(&gpio_lock, flags);
- chip = desc->chip;
+ chip = desc->gdev->chip;
if (chip && test_bit(FLAG_REQUESTED, &desc->flags)) {
if (chip->free) {
spin_unlock_irqrestore(&gpio_lock, flags);
@@ -1033,10 +1421,12 @@ static bool __gpiod_free(struct gpio_desc *desc)
void gpiod_free(struct gpio_desc *desc)
{
- if (desc && __gpiod_free(desc))
- module_put(desc->chip->owner);
- else
+ if (desc && desc->gdev && __gpiod_free(desc)) {
+ module_put(desc->gdev->owner);
+ put_device(&desc->gdev->dev);
+ } else {
WARN_ON(extra_checks);
+ }
}
/**
@@ -1059,7 +1449,7 @@ const char *gpiochip_is_requested(struct gpio_chip *chip, unsigned offset)
if (offset >= chip->ngpio)
return NULL;
- desc = &chip->desc[offset];
+ desc = &chip->gpiodev->descs[offset];
if (test_bit(FLAG_REQUESTED, &desc->flags) == 0)
return NULL;
@@ -1111,7 +1501,8 @@ void gpiochip_free_own_desc(struct gpio_desc *desc)
}
EXPORT_SYMBOL_GPL(gpiochip_free_own_desc);
-/* Drivers MUST set GPIO direction before making get/set calls. In
+/*
+ * Drivers MUST set GPIO direction before making get/set calls. In
* some cases this is done in early boot, before IRQs are enabled.
*
* As a rule these aren't called more than once (except for drivers
@@ -1134,12 +1525,9 @@ int gpiod_direction_input(struct gpio_desc *desc)
struct gpio_chip *chip;
int status = -EINVAL;
- if (!desc || !desc->chip) {
- pr_warn("%s: invalid GPIO\n", __func__);
- return -EINVAL;
- }
+ VALIDATE_DESC(desc);
+ chip = desc->gdev->chip;
- chip = desc->chip;
if (!chip->get || !chip->direction_input) {
gpiod_warn(desc,
"%s: missing get() or direction_input() operations\n",
@@ -1178,7 +1566,7 @@ static int _gpiod_direction_output_raw(struct gpio_desc *desc, int value)
if (!value && test_bit(FLAG_OPEN_SOURCE, &desc->flags))
return gpiod_direction_input(desc);
- chip = desc->chip;
+ chip = desc->gdev->chip;
if (!chip->set || !chip->direction_output) {
gpiod_warn(desc,
"%s: missing set() or direction_output() operations\n",
@@ -1207,10 +1595,7 @@ static int _gpiod_direction_output_raw(struct gpio_desc *desc, int value)
*/
int gpiod_direction_output_raw(struct gpio_desc *desc, int value)
{
- if (!desc || !desc->chip) {
- pr_warn("%s: invalid GPIO\n", __func__);
- return -EINVAL;
- }
+ VALIDATE_DESC(desc);
return _gpiod_direction_output_raw(desc, value);
}
EXPORT_SYMBOL_GPL(gpiod_direction_output_raw);
@@ -1229,10 +1614,7 @@ EXPORT_SYMBOL_GPL(gpiod_direction_output_raw);
*/
int gpiod_direction_output(struct gpio_desc *desc, int value)
{
- if (!desc || !desc->chip) {
- pr_warn("%s: invalid GPIO\n", __func__);
- return -EINVAL;
- }
+ VALIDATE_DESC(desc);
if (test_bit(FLAG_ACTIVE_LOW, &desc->flags))
value = !value;
return _gpiod_direction_output_raw(desc, value);
@@ -1251,12 +1633,8 @@ int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce)
{
struct gpio_chip *chip;
- if (!desc || !desc->chip) {
- pr_warn("%s: invalid GPIO\n", __func__);
- return -EINVAL;
- }
-
- chip = desc->chip;
+ VALIDATE_DESC(desc);
+ chip = desc->gdev->chip;
if (!chip->set || !chip->set_debounce) {
gpiod_dbg(desc,
"%s: missing set() or set_debounce() operations\n",
@@ -1276,6 +1654,7 @@ EXPORT_SYMBOL_GPL(gpiod_set_debounce);
*/
int gpiod_is_active_low(const struct gpio_desc *desc)
{
+ VALIDATE_DESC(desc);
return test_bit(FLAG_ACTIVE_LOW, &desc->flags);
}
EXPORT_SYMBOL_GPL(gpiod_is_active_low);
@@ -1308,7 +1687,7 @@ static int _gpiod_get_raw_value(const struct gpio_desc *desc)
int offset;
int value;
- chip = desc->chip;
+ chip = desc->gdev->chip;
offset = gpio_chip_hwgpio(desc);
value = chip->get ? chip->get(chip, offset) : -EIO;
value = value < 0 ? value : !!value;
@@ -1328,10 +1707,9 @@ static int _gpiod_get_raw_value(const struct gpio_desc *desc)
*/
int gpiod_get_raw_value(const struct gpio_desc *desc)
{
- if (!desc)
- return 0;
+ VALIDATE_DESC(desc);
/* Should be using gpio_get_value_cansleep() */
- WARN_ON(desc->chip->can_sleep);
+ WARN_ON(desc->gdev->chip->can_sleep);
return _gpiod_get_raw_value(desc);
}
EXPORT_SYMBOL_GPL(gpiod_get_raw_value);
@@ -1349,10 +1727,10 @@ EXPORT_SYMBOL_GPL(gpiod_get_raw_value);
int gpiod_get_value(const struct gpio_desc *desc)
{
int value;
- if (!desc)
- return 0;
+
+ VALIDATE_DESC(desc);
/* Should be using gpio_get_value_cansleep() */
- WARN_ON(desc->chip->can_sleep);
+ WARN_ON(desc->gdev->chip->can_sleep);
value = _gpiod_get_raw_value(desc);
if (value < 0)
@@ -1373,7 +1751,7 @@ EXPORT_SYMBOL_GPL(gpiod_get_value);
static void _gpio_set_open_drain_value(struct gpio_desc *desc, bool value)
{
int err = 0;
- struct gpio_chip *chip = desc->chip;
+ struct gpio_chip *chip = desc->gdev->chip;
int offset = gpio_chip_hwgpio(desc);
if (value) {
@@ -1400,7 +1778,7 @@ static void _gpio_set_open_drain_value(struct gpio_desc *desc, bool value)
static void _gpio_set_open_source_value(struct gpio_desc *desc, bool value)
{
int err = 0;
- struct gpio_chip *chip = desc->chip;
+ struct gpio_chip *chip = desc->gdev->chip;
int offset = gpio_chip_hwgpio(desc);
if (value) {
@@ -1423,7 +1801,7 @@ static void _gpiod_set_raw_value(struct gpio_desc *desc, bool value)
{
struct gpio_chip *chip;
- chip = desc->chip;
+ chip = desc->gdev->chip;
trace_gpio_value(desc_to_gpio(desc), 0, value);
if (test_bit(FLAG_OPEN_DRAIN, &desc->flags))
_gpio_set_open_drain_value(desc, value);
@@ -1471,7 +1849,7 @@ static void gpiod_set_array_value_priv(bool raw, bool can_sleep,
int i = 0;
while (i < array_size) {
- struct gpio_chip *chip = desc_array[i]->chip;
+ struct gpio_chip *chip = desc_array[i]->gdev->chip;
unsigned long mask[BITS_TO_LONGS(chip->ngpio)];
unsigned long bits[BITS_TO_LONGS(chip->ngpio)];
int count = 0;
@@ -1505,7 +1883,8 @@ static void gpiod_set_array_value_priv(bool raw, bool can_sleep,
count++;
}
i++;
- } while ((i < array_size) && (desc_array[i]->chip == chip));
+ } while ((i < array_size) &&
+ (desc_array[i]->gdev->chip == chip));
/* push collected bits to outputs */
if (count != 0)
gpio_chip_set_multiple(chip, mask, bits);
@@ -1525,10 +1904,9 @@ static void gpiod_set_array_value_priv(bool raw, bool can_sleep,
*/
void gpiod_set_raw_value(struct gpio_desc *desc, int value)
{
- if (!desc)
- return;
- /* Should be using gpio_set_value_cansleep() */
- WARN_ON(desc->chip->can_sleep);
+ VALIDATE_DESC_VOID(desc);
+ /* Should be using gpiod_set_value_cansleep() */
+ WARN_ON(desc->gdev->chip->can_sleep);
_gpiod_set_raw_value(desc, value);
}
EXPORT_SYMBOL_GPL(gpiod_set_raw_value);
@@ -1546,10 +1924,9 @@ EXPORT_SYMBOL_GPL(gpiod_set_raw_value);
*/
void gpiod_set_value(struct gpio_desc *desc, int value)
{
- if (!desc)
- return;
- /* Should be using gpio_set_value_cansleep() */
- WARN_ON(desc->chip->can_sleep);
+ VALIDATE_DESC_VOID(desc);
+ /* Should be using gpiod_set_value_cansleep() */
+ WARN_ON(desc->gdev->chip->can_sleep);
if (test_bit(FLAG_ACTIVE_LOW, &desc->flags))
value = !value;
_gpiod_set_raw_value(desc, value);
@@ -1607,9 +1984,8 @@ EXPORT_SYMBOL_GPL(gpiod_set_array_value);
*/
int gpiod_cansleep(const struct gpio_desc *desc)
{
- if (!desc)
- return 0;
- return desc->chip->can_sleep;
+ VALIDATE_DESC(desc);
+ return desc->gdev->chip->can_sleep;
}
EXPORT_SYMBOL_GPL(gpiod_cansleep);
@@ -1625,9 +2001,8 @@ int gpiod_to_irq(const struct gpio_desc *desc)
struct gpio_chip *chip;
int offset;
- if (!desc)
- return -EINVAL;
- chip = desc->chip;
+ VALIDATE_DESC(desc);
+ chip = desc->gdev->chip;
offset = gpio_chip_hwgpio(desc);
return chip->to_irq ? chip->to_irq(chip, offset) : -ENXIO;
}
@@ -1646,14 +2021,14 @@ int gpiochip_lock_as_irq(struct gpio_chip *chip, unsigned int offset)
if (offset >= chip->ngpio)
return -EINVAL;
- if (test_bit(FLAG_IS_OUT, &chip->desc[offset].flags)) {
+ if (test_bit(FLAG_IS_OUT, &chip->gpiodev->descs[offset].flags)) {
chip_err(chip,
"%s: tried to flag a GPIO set as output for IRQ\n",
__func__);
return -EIO;
}
- set_bit(FLAG_USED_AS_IRQ, &chip->desc[offset].flags);
+ set_bit(FLAG_USED_AS_IRQ, &chip->gpiodev->descs[offset].flags);
return 0;
}
EXPORT_SYMBOL_GPL(gpiochip_lock_as_irq);
@@ -1671,10 +2046,37 @@ void gpiochip_unlock_as_irq(struct gpio_chip *chip, unsigned int offset)
if (offset >= chip->ngpio)
return;
- clear_bit(FLAG_USED_AS_IRQ, &chip->desc[offset].flags);
+ clear_bit(FLAG_USED_AS_IRQ, &chip->gpiodev->descs[offset].flags);
}
EXPORT_SYMBOL_GPL(gpiochip_unlock_as_irq);
+bool gpiochip_line_is_irq(struct gpio_chip *chip, unsigned int offset)
+{
+ if (offset >= chip->ngpio)
+ return false;
+
+ return test_bit(FLAG_USED_AS_IRQ, &chip->gpiodev->descs[offset].flags);
+}
+EXPORT_SYMBOL_GPL(gpiochip_line_is_irq);
+
+bool gpiochip_line_is_open_drain(struct gpio_chip *chip, unsigned int offset)
+{
+ if (offset >= chip->ngpio)
+ return false;
+
+ return test_bit(FLAG_OPEN_DRAIN, &chip->gpiodev->descs[offset].flags);
+}
+EXPORT_SYMBOL_GPL(gpiochip_line_is_open_drain);
+
+bool gpiochip_line_is_open_source(struct gpio_chip *chip, unsigned int offset)
+{
+ if (offset >= chip->ngpio)
+ return false;
+
+ return test_bit(FLAG_OPEN_SOURCE, &chip->gpiodev->descs[offset].flags);
+}
+EXPORT_SYMBOL_GPL(gpiochip_line_is_open_source);
+
/**
* gpiod_get_raw_value_cansleep() - return a gpio's raw value
* @desc: gpio whose value will be returned
@@ -1687,8 +2089,7 @@ EXPORT_SYMBOL_GPL(gpiochip_unlock_as_irq);
int gpiod_get_raw_value_cansleep(const struct gpio_desc *desc)
{
might_sleep_if(extra_checks);
- if (!desc)
- return 0;
+ VALIDATE_DESC(desc);
return _gpiod_get_raw_value(desc);
}
EXPORT_SYMBOL_GPL(gpiod_get_raw_value_cansleep);
@@ -1707,9 +2108,7 @@ int gpiod_get_value_cansleep(const struct gpio_desc *desc)
int value;
might_sleep_if(extra_checks);
- if (!desc)
- return 0;
-
+ VALIDATE_DESC(desc);
value = _gpiod_get_raw_value(desc);
if (value < 0)
return value;
@@ -1734,8 +2133,7 @@ EXPORT_SYMBOL_GPL(gpiod_get_value_cansleep);
void gpiod_set_raw_value_cansleep(struct gpio_desc *desc, int value)
{
might_sleep_if(extra_checks);
- if (!desc)
- return;
+ VALIDATE_DESC_VOID(desc);
_gpiod_set_raw_value(desc, value);
}
EXPORT_SYMBOL_GPL(gpiod_set_raw_value_cansleep);
@@ -1753,9 +2151,7 @@ EXPORT_SYMBOL_GPL(gpiod_set_raw_value_cansleep);
void gpiod_set_value_cansleep(struct gpio_desc *desc, int value)
{
might_sleep_if(extra_checks);
- if (!desc)
- return;
-
+ VALIDATE_DESC_VOID(desc);
if (test_bit(FLAG_ACTIVE_LOW, &desc->flags))
value = !value;
_gpiod_set_raw_value(desc, value);
@@ -1873,9 +2269,11 @@ static struct gpio_desc *of_find_gpio(struct device *dev, const char *con_id,
return desc;
}
-static struct gpio_desc *acpi_find_gpio(struct device *dev, const char *con_id,
+static struct gpio_desc *acpi_find_gpio(struct device *dev,
+ const char *con_id,
unsigned int idx,
- enum gpio_lookup_flags *flags)
+ enum gpiod_flags flags,
+ enum gpio_lookup_flags *lookupflags)
{
struct acpi_device *adev = ACPI_COMPANION(dev);
struct acpi_gpio_info info;
@@ -1906,10 +2304,16 @@ static struct gpio_desc *acpi_find_gpio(struct device *dev, const char *con_id,
desc = acpi_get_gpiod_by_index(adev, NULL, idx, &info);
if (IS_ERR(desc))
return desc;
+
+ if ((flags == GPIOD_OUT_LOW || flags == GPIOD_OUT_HIGH) &&
+ info.gpioint) {
+ dev_dbg(dev, "refusing GpioInt() entry when doing GPIOD_OUT_* lookup\n");
+ return ERR_PTR(-ENOENT);
+ }
}
if (info.polarity == GPIO_ACTIVE_LOW)
- *flags |= GPIO_ACTIVE_LOW;
+ *lookupflags |= GPIO_ACTIVE_LOW;
return desc;
}
@@ -2172,7 +2576,7 @@ struct gpio_desc *__must_check gpiod_get_index(struct device *dev,
desc = of_find_gpio(dev, con_id, idx, &lookupflags);
} else if (ACPI_COMPANION(dev)) {
dev_dbg(dev, "using ACPI for GPIO lookup\n");
- desc = acpi_find_gpio(dev, con_id, idx, &lookupflags);
+ desc = acpi_find_gpio(dev, con_id, idx, flags, &lookupflags);
}
}
@@ -2358,8 +2762,8 @@ static void gpiochip_free_hogs(struct gpio_chip *chip)
int id;
for (id = 0; id < chip->ngpio; id++) {
- if (test_bit(FLAG_IS_HOGGED, &chip->desc[id].flags))
- gpiochip_free_own_desc(&chip->desc[id]);
+ if (test_bit(FLAG_IS_HOGGED, &chip->gpiodev->descs[id].flags))
+ gpiochip_free_own_desc(&chip->gpiodev->descs[id]);
}
}
@@ -2456,17 +2860,41 @@ void gpiod_put_array(struct gpio_descs *descs)
}
EXPORT_SYMBOL_GPL(gpiod_put_array);
+static int __init gpiolib_dev_init(void)
+{
+ int ret;
+
+ /* Register GPIO sysfs bus */
+ ret = bus_register(&gpio_bus_type);
+ if (ret < 0) {
+ pr_err("gpiolib: could not register GPIO bus type\n");
+ return ret;
+ }
+
+ ret = alloc_chrdev_region(&gpio_devt, 0, GPIO_DEV_MAX, "gpiochip");
+ if (ret < 0) {
+ pr_err("gpiolib: failed to allocate char dev region\n");
+ bus_unregister(&gpio_bus_type);
+ } else {
+ gpiolib_initialized = true;
+ gpiochip_setup_devs();
+ }
+ return ret;
+}
+core_initcall(gpiolib_dev_init);
+
#ifdef CONFIG_DEBUG_FS
-static void gpiolib_dbg_show(struct seq_file *s, struct gpio_chip *chip)
+static void gpiolib_dbg_show(struct seq_file *s, struct gpio_device *gdev)
{
unsigned i;
- unsigned gpio = chip->base;
- struct gpio_desc *gdesc = &chip->desc[0];
+ struct gpio_chip *chip = gdev->chip;
+ unsigned gpio = gdev->base;
+ struct gpio_desc *gdesc = &gdev->descs[0];
int is_out;
int is_irq;
- for (i = 0; i < chip->ngpio; i++, gpio++, gdesc++) {
+ for (i = 0; i < gdev->ngpio; i++, gpio++, gdesc++) {
if (!test_bit(FLAG_REQUESTED, &gdesc->flags)) {
if (gdesc->name) {
seq_printf(s, " gpio-%-3d (%-20.20s)\n",
@@ -2492,16 +2920,16 @@ static void gpiolib_dbg_show(struct seq_file *s, struct gpio_chip *chip)
static void *gpiolib_seq_start(struct seq_file *s, loff_t *pos)
{
unsigned long flags;
- struct gpio_chip *chip = NULL;
+ struct gpio_device *gdev = NULL;
loff_t index = *pos;
s->private = "";
spin_lock_irqsave(&gpio_lock, flags);
- list_for_each_entry(chip, &gpio_chips, list)
+ list_for_each_entry(gdev, &gpio_devices, list)
if (index-- == 0) {
spin_unlock_irqrestore(&gpio_lock, flags);
- return chip;
+ return gdev;
}
spin_unlock_irqrestore(&gpio_lock, flags);
@@ -2511,14 +2939,14 @@ static void *gpiolib_seq_start(struct seq_file *s, loff_t *pos)
static void *gpiolib_seq_next(struct seq_file *s, void *v, loff_t *pos)
{
unsigned long flags;
- struct gpio_chip *chip = v;
+ struct gpio_device *gdev = v;
void *ret = NULL;
spin_lock_irqsave(&gpio_lock, flags);
- if (list_is_last(&chip->list, &gpio_chips))
+ if (list_is_last(&gdev->list, &gpio_devices))
ret = NULL;
else
- ret = list_entry(chip->list.next, struct gpio_chip, list);
+ ret = list_entry(gdev->list.next, struct gpio_device, list);
spin_unlock_irqrestore(&gpio_lock, flags);
s->private = "\n";
@@ -2533,15 +2961,24 @@ static void gpiolib_seq_stop(struct seq_file *s, void *v)
static int gpiolib_seq_show(struct seq_file *s, void *v)
{
- struct gpio_chip *chip = v;
- struct device *dev;
+ struct gpio_device *gdev = v;
+ struct gpio_chip *chip = gdev->chip;
+ struct device *parent;
+
+ if (!chip) {
+ seq_printf(s, "%s%s: (dangling chip)", (char *)s->private,
+ dev_name(&gdev->dev));
+ return 0;
+ }
- seq_printf(s, "%sGPIOs %d-%d", (char *)s->private,
- chip->base, chip->base + chip->ngpio - 1);
- dev = chip->parent;
- if (dev)
- seq_printf(s, ", %s/%s", dev->bus ? dev->bus->name : "no-bus",
- dev_name(dev));
+ seq_printf(s, "%s%s: GPIOs %d-%d", (char *)s->private,
+ dev_name(&gdev->dev),
+ gdev->base, gdev->base + gdev->ngpio - 1);
+ parent = chip->parent;
+ if (parent)
+ seq_printf(s, ", parent: %s/%s",
+ parent->bus ? parent->bus->name : "no-bus",
+ dev_name(parent));
if (chip->label)
seq_printf(s, ", %s", chip->label);
if (chip->can_sleep)
@@ -2551,7 +2988,7 @@ static int gpiolib_seq_show(struct seq_file *s, void *v)
if (chip->dbg_show)
chip->dbg_show(s, chip);
else
- gpiolib_dbg_show(s, chip);
+ gpiolib_dbg_show(s, gdev);
return 0;
}
diff --git a/drivers/gpio/gpiolib.h b/drivers/gpio/gpiolib.h
index 99ed3b00ffe9..e30e5fdb1214 100644
--- a/drivers/gpio/gpiolib.h
+++ b/drivers/gpio/gpiolib.h
@@ -12,14 +12,67 @@
#ifndef GPIOLIB_H
#define GPIOLIB_H
+#include <linux/gpio/driver.h>
#include <linux/err.h>
#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/cdev.h>
enum of_gpio_flags;
enum gpiod_flags;
struct acpi_device;
/**
+ * struct gpio_device - internal state container for GPIO devices
+ * @id: numerical ID number for the GPIO chip
+ * @dev: the GPIO device struct
+ * @chrdev: character device for the GPIO device
+ * @mockdev: class device used by the deprecated sysfs interface (may be
+ * NULL)
+ * @owner: helps prevent removal of modules exporting active GPIOs
+ * @chip: pointer to the corresponding gpiochip, holding static
+ * data for this device
+ * @descs: array of ngpio descriptors.
+ * @ngpio: the number of GPIO lines on this GPIO device, equal to the size
+ * of the @descs array.
+ * @base: GPIO base in the DEPRECATED global Linux GPIO numberspace, assigned
+ * at device creation time.
+ * @label: a descriptive name for the GPIO device, such as the part number
+ * or name of the IP component in a System on Chip.
+ * @data: per-instance data assigned by the driver
+ * @list: links gpio_device:s together for traversal
+ *
+ * This state container holds most of the runtime variable data
+ * for a GPIO device and can hold references and live on after the
+ * GPIO chip has been removed, if it is still being used from
+ * userspace.
+ */
+struct gpio_device {
+ int id;
+ struct device dev;
+ struct cdev chrdev;
+ struct device *mockdev;
+ struct module *owner;
+ struct gpio_chip *chip;
+ struct gpio_desc *descs;
+ int base;
+ u16 ngpio;
+ char *label;
+ void *data;
+ struct list_head list;
+
+#ifdef CONFIG_PINCTRL
+ /*
+ * If CONFIG_PINCTRL is enabled, then gpio controllers can optionally
+ * describe the actual pin range which they serve in an SoC. This
+ * information would be used by pinctrl subsystem to configure
+ * corresponding pins for gpio usage.
+ */
+ struct list_head pin_ranges;
+#endif
+};
+
+/**
* struct acpi_gpio_info - ACPI GPIO specific information
* @gpioint: if %true this GPIO is of type GpioInt otherwise type is GpioIo
* @active_low: in case of @gpioint, the pin is active low
@@ -90,10 +143,10 @@ struct gpio_desc *of_get_named_gpiod_flags(struct device_node *np,
struct gpio_desc *gpiochip_get_desc(struct gpio_chip *chip, u16 hwnum);
extern struct spinlock gpio_lock;
-extern struct list_head gpio_chips;
+extern struct list_head gpio_devices;
struct gpio_desc {
- struct gpio_chip *chip;
+ struct gpio_device *gdev;
unsigned long flags;
/* flag symbols are bit numbers */
#define FLAG_REQUESTED 0
@@ -122,7 +175,7 @@ int gpiod_hog(struct gpio_desc *desc, const char *name,
*/
static int __maybe_unused gpio_chip_hwgpio(const struct gpio_desc *desc)
{
- return desc - &desc->chip->desc[0];
+ return desc - &desc->gdev->descs[0];
}
/* With descriptor prefix */
@@ -149,31 +202,31 @@ static int __maybe_unused gpio_chip_hwgpio(const struct gpio_desc *desc)
/* With chip prefix */
#define chip_emerg(chip, fmt, ...) \
- pr_emerg("GPIO chip %s: " fmt, chip->label, ##__VA_ARGS__)
+ dev_emerg(&chip->gpiodev->dev, "(%s): " fmt, chip->label, ##__VA_ARGS__)
#define chip_crit(chip, fmt, ...) \
- pr_crit("GPIO chip %s: " fmt, chip->label, ##__VA_ARGS__)
+ dev_crit(&chip->gpiodev->dev, "(%s): " fmt, chip->label, ##__VA_ARGS__)
#define chip_err(chip, fmt, ...) \
- pr_err("GPIO chip %s: " fmt, chip->label, ##__VA_ARGS__)
+ dev_err(&chip->gpiodev->dev, "(%s): " fmt, chip->label, ##__VA_ARGS__)
#define chip_warn(chip, fmt, ...) \
- pr_warn("GPIO chip %s: " fmt, chip->label, ##__VA_ARGS__)
+ dev_warn(&chip->gpiodev->dev, "(%s): " fmt, chip->label, ##__VA_ARGS__)
#define chip_info(chip, fmt, ...) \
- pr_info("GPIO chip %s: " fmt, chip->label, ##__VA_ARGS__)
+ dev_info(&chip->gpiodev->dev, "(%s): " fmt, chip->label, ##__VA_ARGS__)
#define chip_dbg(chip, fmt, ...) \
- pr_debug("GPIO chip %s: " fmt, chip->label, ##__VA_ARGS__)
+ dev_dbg(&chip->gpiodev->dev, "(%s): " fmt, chip->label, ##__VA_ARGS__)
#ifdef CONFIG_GPIO_SYSFS
-int gpiochip_sysfs_register(struct gpio_chip *chip);
-void gpiochip_sysfs_unregister(struct gpio_chip *chip);
+int gpiochip_sysfs_register(struct gpio_device *gdev);
+void gpiochip_sysfs_unregister(struct gpio_device *gdev);
#else
-static inline int gpiochip_sysfs_register(struct gpio_chip *chip)
+static inline int gpiochip_sysfs_register(struct gpio_device *gdev)
{
return 0;
}
-static inline void gpiochip_sysfs_unregister(struct gpio_chip *chip)
+static inline void gpiochip_sysfs_unregister(struct gpio_device *gdev)
{
}
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 8ae7ab68cb97..f2a74d0b68ae 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -25,6 +25,14 @@ config DRM_MIPI_DSI
bool
depends on DRM
+config DRM_DP_AUX_CHARDEV
+ bool "DRM DP AUX Interface"
+ depends on DRM
+ help
+ Choose this option to enable a /dev/drm_dp_auxN node that allows to
+ read and write values to arbitrary DPCD registers on the DP aux
+ channel.
+
config DRM_KMS_HELPER
tristate
depends on DRM
@@ -106,6 +114,8 @@ config DRM_TDFX
Choose this option if you have a 3dfx Banshee or Voodoo3 (or later),
graphics card. If M is selected, the module will be called tdfx.
+source "drivers/gpu/drm/arm/Kconfig"
+
config DRM_R128
tristate "ATI Rage 128"
depends on DRM && PCI
@@ -162,6 +172,8 @@ config DRM_AMDGPU
source "drivers/gpu/drm/amd/amdgpu/Kconfig"
source "drivers/gpu/drm/amd/powerplay/Kconfig"
+source "drivers/gpu/drm/amd/acp/Kconfig"
+
source "drivers/gpu/drm/nouveau/Kconfig"
config DRM_I810
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 61766dec6a8d..6eb94fc561dc 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -22,10 +22,13 @@ drm-$(CONFIG_OF) += drm_of.o
drm-$(CONFIG_AGP) += drm_agpsupport.o
drm_kms_helper-y := drm_crtc_helper.o drm_dp_helper.o drm_probe_helper.o \
- drm_plane_helper.o drm_dp_mst_topology.o drm_atomic_helper.o
+ drm_plane_helper.o drm_dp_mst_topology.o drm_atomic_helper.o \
+ drm_kms_helper_common.o
+
drm_kms_helper-$(CONFIG_DRM_LOAD_EDID_FIRMWARE) += drm_edid_load.o
drm_kms_helper-$(CONFIG_DRM_FBDEV_EMULATION) += drm_fb_helper.o
drm_kms_helper-$(CONFIG_DRM_KMS_CMA_HELPER) += drm_fb_cma_helper.o
+drm_kms_helper-$(CONFIG_DRM_DP_AUX_CHARDEV) += drm_dp_aux_dev.o
obj-$(CONFIG_DRM_KMS_HELPER) += drm_kms_helper.o
@@ -33,6 +36,7 @@ CFLAGS_drm_trace_points.o := -I$(src)
obj-$(CONFIG_DRM) += drm.o
obj-$(CONFIG_DRM_MIPI_DSI) += drm_mipi_dsi.o
+obj-$(CONFIG_DRM_ARM) += arm/
obj-$(CONFIG_DRM_TTM) += ttm/
obj-$(CONFIG_DRM_TDFX) += tdfx/
obj-$(CONFIG_DRM_R128) += r128/
diff --git a/drivers/gpu/drm/amd/acp/Kconfig b/drivers/gpu/drm/amd/acp/Kconfig
new file mode 100644
index 000000000000..ca77ec10147c
--- /dev/null
+++ b/drivers/gpu/drm/amd/acp/Kconfig
@@ -0,0 +1,14 @@
+menu "ACP (Audio CoProcessor) Configuration"
+
+config DRM_AMD_ACP
+ bool "Enable AMD Audio CoProcessor IP support"
+ select MFD_CORE
+ select PM_GENERIC_DOMAINS if PM
+ help
+ Choose this option to enable ACP IP support for AMD SOCs.
+ This adds the ACP (Audio CoProcessor) IP driver and wires
+ it up into the amdgpu driver. The ACP block provides the DMA
+ engine for the i2s-based ALSA driver. It is required for audio
+ on APUs which utilize an i2s codec.
+
+endmenu
diff --git a/drivers/gpu/drm/amd/acp/Makefile b/drivers/gpu/drm/amd/acp/Makefile
new file mode 100644
index 000000000000..8363cb57915b
--- /dev/null
+++ b/drivers/gpu/drm/amd/acp/Makefile
@@ -0,0 +1,8 @@
+#
+# Makefile for the ACP, which is a sub-component
+# of AMDSOC/AMDGPU drm driver.
+# It provides the HW control for ACP related functionalities.
+
+subdir-ccflags-y += -I$(AMDACPPATH)/ -I$(AMDACPPATH)/include
+
+AMD_ACP_FILES := $(AMDACPPATH)/acp_hw.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm206.c b/drivers/gpu/drm/amd/acp/acp_hw.c
index 341dc560acbb..7af83f142b4b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm206.c
+++ b/drivers/gpu/drm/amd/acp/acp_hw.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2015 Red Hat Inc.
+ * Copyright 2015 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -19,30 +19,32 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
- * Authors: Ben Skeggs <bskeggs@redhat.com>
*/
-#include "gf100.h"
-#include "ctxgf100.h"
-
-#include <nvif/class.h>
-
-static const struct gf100_gr_func
-gm206_gr = {
- .init = gm204_gr_init,
- .mmio = gm204_gr_pack_mmio,
- .ppc_nr = 2,
- .grctx = &gm206_grctx,
- .sclass = {
- { -1, -1, FERMI_TWOD_A },
- { -1, -1, KEPLER_INLINE_TO_MEMORY_B },
- { -1, -1, MAXWELL_B, &gf100_fermi },
- { -1, -1, MAXWELL_COMPUTE_B },
- {}
- }
-};
-
-int
-gm206_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+
+#include "acp_gfx_if.h"
+
+#define ACP_MODE_I2S 0
+#define ACP_MODE_AZ 1
+
+#define mmACP_AZALIA_I2S_SELECT 0x51d4
+
+int amd_acp_hw_init(void *cgs_device,
+ unsigned acp_version_major, unsigned acp_version_minor)
{
- return gf100_gr_new_(&gm206_gr, device, index, pgr);
+ unsigned int acp_mode = ACP_MODE_I2S;
+
+ if ((acp_version_major == 2) && (acp_version_minor == 2))
+ acp_mode = cgs_read_register(cgs_device,
+ mmACP_AZALIA_I2S_SELECT);
+
+ if (acp_mode != ACP_MODE_I2S)
+ return -ENODEV;
+
+ return 0;
}
diff --git a/drivers/gpu/drm/amd/acp/include/acp_gfx_if.h b/drivers/gpu/drm/amd/acp/include/acp_gfx_if.h
new file mode 100644
index 000000000000..bccf47b63899
--- /dev/null
+++ b/drivers/gpu/drm/amd/acp/include/acp_gfx_if.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+*/
+
+#ifndef _ACP_GFX_IF_H
+#define _ACP_GFX_IF_H
+
+#include <linux/types.h>
+#include "cgs_linux.h"
+#include "cgs_common.h"
+
+int amd_acp_hw_init(void *cgs_device,
+ unsigned acp_version_major, unsigned acp_version_minor);
+
+#endif /* _ACP_GFX_IF_H */
diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
index 20c9539abc36..c7fcdcedaadb 100644
--- a/drivers/gpu/drm/amd/amdgpu/Makefile
+++ b/drivers/gpu/drm/amd/amdgpu/Makefile
@@ -8,7 +8,8 @@ ccflags-y := -Iinclude/drm -I$(FULL_AMD_PATH)/include/asic_reg \
-I$(FULL_AMD_PATH)/include \
-I$(FULL_AMD_PATH)/amdgpu \
-I$(FULL_AMD_PATH)/scheduler \
- -I$(FULL_AMD_PATH)/powerplay/inc
+ -I$(FULL_AMD_PATH)/powerplay/inc \
+ -I$(FULL_AMD_PATH)/acp/include
amdgpu-y := amdgpu_drv.o
@@ -20,7 +21,7 @@ amdgpu-y += amdgpu_device.o amdgpu_kms.o \
amdgpu_fb.o amdgpu_gem.o amdgpu_ring.o \
amdgpu_cs.o amdgpu_bios.o amdgpu_benchmark.o amdgpu_test.o \
amdgpu_pm.o atombios_dp.o amdgpu_afmt.o amdgpu_trace_points.o \
- atombios_encoders.o amdgpu_semaphore.o amdgpu_sa.o atombios_i2c.o \
+ atombios_encoders.o amdgpu_sa.o atombios_i2c.o \
amdgpu_prime.o amdgpu_vm.o amdgpu_ib.o amdgpu_pll.o \
amdgpu_ucode.o amdgpu_bo_list.o amdgpu_ctx.o amdgpu_sync.o
@@ -92,7 +93,17 @@ amdgpu-y += amdgpu_cgs.o
amdgpu-y += \
../scheduler/gpu_scheduler.o \
../scheduler/sched_fence.o \
- amdgpu_sched.o
+ amdgpu_job.o
+
+# ACP componet
+ifneq ($(CONFIG_DRM_AMD_ACP),)
+amdgpu-y += amdgpu_acp.o
+
+AMDACPPATH := ../acp
+include $(FULL_AMD_PATH)/acp/Makefile
+
+amdgpu-y += $(AMD_ACP_FILES)
+endif
amdgpu-$(CONFIG_COMPAT) += amdgpu_ioc32.o
amdgpu-$(CONFIG_VGA_SWITCHEROO) += amdgpu_atpx_handler.o
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 5e7770f9a415..1bcbade479dc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -53,6 +53,7 @@
#include "amdgpu_ucode.h"
#include "amdgpu_gds.h"
#include "amd_powerplay.h"
+#include "amdgpu_acp.h"
#include "gpu_scheduler.h"
@@ -74,7 +75,6 @@ extern int amdgpu_dpm;
extern int amdgpu_smc_load_fw;
extern int amdgpu_aspm;
extern int amdgpu_runtime_pm;
-extern int amdgpu_hard_reset;
extern unsigned amdgpu_ip_block_mask;
extern int amdgpu_bapm;
extern int amdgpu_deep_color;
@@ -82,10 +82,8 @@ extern int amdgpu_vm_size;
extern int amdgpu_vm_block_size;
extern int amdgpu_vm_fault_stop;
extern int amdgpu_vm_debug;
-extern int amdgpu_enable_scheduler;
extern int amdgpu_sched_jobs;
extern int amdgpu_sched_hw_submission;
-extern int amdgpu_enable_semaphores;
extern int amdgpu_powerplay;
extern unsigned amdgpu_pcie_gen_cap;
extern unsigned amdgpu_pcie_lane_cap;
@@ -108,9 +106,6 @@ extern unsigned amdgpu_pcie_lane_cap;
/* max number of IP instances */
#define AMDGPU_MAX_SDMA_INSTANCES 2
-/* number of hw syncs before falling back on blocking */
-#define AMDGPU_NUM_SYNCS 4
-
/* hardcode that limit for now */
#define AMDGPU_VA_RESERVED_SIZE (8 << 20)
@@ -146,11 +141,9 @@ extern unsigned amdgpu_pcie_lane_cap;
#define CIK_CURSOR_HEIGHT 128
struct amdgpu_device;
-struct amdgpu_fence;
struct amdgpu_ib;
struct amdgpu_vm;
struct amdgpu_ring;
-struct amdgpu_semaphore;
struct amdgpu_cs_parser;
struct amdgpu_job;
struct amdgpu_irq_src;
@@ -248,7 +241,7 @@ struct amdgpu_vm_pte_funcs {
unsigned count);
/* write pte one entry at a time with addr mapping */
void (*write_pte)(struct amdgpu_ib *ib,
- uint64_t pe,
+ const dma_addr_t *pages_addr, uint64_t pe,
uint64_t addr, unsigned count,
uint32_t incr, uint32_t flags);
/* for linear pte/pde updates without addr mapping */
@@ -256,8 +249,6 @@ struct amdgpu_vm_pte_funcs {
uint64_t pe,
uint64_t addr, unsigned count,
uint32_t incr, uint32_t flags);
- /* pad the indirect buffer to the necessary number of dw */
- void (*pad_ib)(struct amdgpu_ib *ib);
};
/* provided by the gmc block */
@@ -295,12 +286,11 @@ struct amdgpu_ring_funcs {
struct amdgpu_ib *ib);
void (*emit_fence)(struct amdgpu_ring *ring, uint64_t addr,
uint64_t seq, unsigned flags);
- bool (*emit_semaphore)(struct amdgpu_ring *ring,
- struct amdgpu_semaphore *semaphore,
- bool emit_wait);
+ void (*emit_pipeline_sync)(struct amdgpu_ring *ring);
void (*emit_vm_flush)(struct amdgpu_ring *ring, unsigned vm_id,
uint64_t pd_addr);
void (*emit_hdp_flush)(struct amdgpu_ring *ring);
+ void (*emit_hdp_invalidate)(struct amdgpu_ring *ring);
void (*emit_gds_switch)(struct amdgpu_ring *ring, uint32_t vmid,
uint32_t gds_base, uint32_t gds_size,
uint32_t gws_base, uint32_t gws_size,
@@ -310,6 +300,8 @@ struct amdgpu_ring_funcs {
int (*test_ib)(struct amdgpu_ring *ring);
/* insert NOP packets */
void (*insert_nop)(struct amdgpu_ring *ring, uint32_t count);
+ /* pad the indirect buffer to the necessary number of dw */
+ void (*pad_ib)(struct amdgpu_ring *ring, struct amdgpu_ib *ib);
};
/*
@@ -355,13 +347,15 @@ struct amdgpu_fence_driver {
uint64_t gpu_addr;
volatile uint32_t *cpu_addr;
/* sync_seq is protected by ring emission lock */
- uint64_t sync_seq[AMDGPU_MAX_RINGS];
- atomic64_t last_seq;
+ uint32_t sync_seq;
+ atomic_t last_seq;
bool initialized;
struct amdgpu_irq_src *irq_src;
unsigned irq_type;
struct timer_list fallback_timer;
- wait_queue_head_t fence_queue;
+ unsigned num_fences_mask;
+ spinlock_t lock;
+ struct fence **fences;
};
/* some special values for the owner field */
@@ -371,19 +365,6 @@ struct amdgpu_fence_driver {
#define AMDGPU_FENCE_FLAG_64BIT (1 << 0)
#define AMDGPU_FENCE_FLAG_INT (1 << 1)
-struct amdgpu_fence {
- struct fence base;
-
- /* RB, DMA, etc. */
- struct amdgpu_ring *ring;
- uint64_t seq;
-
- /* filp or special value for fence creator */
- void *owner;
-
- wait_queue_t fence_wake;
-};
-
struct amdgpu_user_fence {
/* write-back bo */
struct amdgpu_bo *bo;
@@ -395,24 +376,18 @@ int amdgpu_fence_driver_init(struct amdgpu_device *adev);
void amdgpu_fence_driver_fini(struct amdgpu_device *adev);
void amdgpu_fence_driver_force_completion(struct amdgpu_device *adev);
-int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring);
+int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
+ unsigned num_hw_submission);
int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
struct amdgpu_irq_src *irq_src,
unsigned irq_type);
void amdgpu_fence_driver_suspend(struct amdgpu_device *adev);
void amdgpu_fence_driver_resume(struct amdgpu_device *adev);
-int amdgpu_fence_emit(struct amdgpu_ring *ring, void *owner,
- struct amdgpu_fence **fence);
+int amdgpu_fence_emit(struct amdgpu_ring *ring, struct fence **fence);
void amdgpu_fence_process(struct amdgpu_ring *ring);
-int amdgpu_fence_wait_next(struct amdgpu_ring *ring);
int amdgpu_fence_wait_empty(struct amdgpu_ring *ring);
unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring);
-bool amdgpu_fence_need_sync(struct amdgpu_fence *fence,
- struct amdgpu_ring *ring);
-void amdgpu_fence_note_sync(struct amdgpu_fence *fence,
- struct amdgpu_ring *ring);
-
/*
* TTM.
*/
@@ -431,6 +406,8 @@ struct amdgpu_mman {
/* buffer handling */
const struct amdgpu_buffer_funcs *buffer_funcs;
struct amdgpu_ring *buffer_funcs_ring;
+ /* Scheduler entity for buffer moves */
+ struct amd_sched_entity entity;
};
int amdgpu_copy_buffer(struct amdgpu_ring *ring,
@@ -445,9 +422,9 @@ struct amdgpu_bo_list_entry {
struct amdgpu_bo *robj;
struct ttm_validate_buffer tv;
struct amdgpu_bo_va *bo_va;
- unsigned prefered_domains;
- unsigned allowed_domains;
uint32_t priority;
+ struct page **user_pages;
+ int user_invalidated;
};
struct amdgpu_bo_va_mapping {
@@ -459,7 +436,6 @@ struct amdgpu_bo_va_mapping {
/* bo virtual addresses in a specific vm */
struct amdgpu_bo_va {
- struct mutex mutex;
/* protected by bo being reserved */
struct list_head bo_list;
struct fence *last_pt_update;
@@ -483,7 +459,8 @@ struct amdgpu_bo {
/* Protected by gem.mutex */
struct list_head list;
/* Protected by tbo.reserved */
- u32 initial_domain;
+ u32 prefered_domains;
+ u32 allowed_domains;
struct ttm_place placements[AMDGPU_GEM_DOMAIN_MAX + 1];
struct ttm_placement placement;
struct ttm_buffer_object tbo;
@@ -505,7 +482,6 @@ struct amdgpu_bo {
struct amdgpu_bo *parent;
struct ttm_bo_kmap_obj dma_buf_vmap;
- pid_t pid;
struct amdgpu_mn *mn;
struct list_head mn_list;
};
@@ -554,11 +530,14 @@ int amdgpu_gem_debugfs_init(struct amdgpu_device *adev);
* Assumption is that there won't be hole (all object on same
* alignment).
*/
+
+#define AMDGPU_SA_NUM_FENCE_LISTS 32
+
struct amdgpu_sa_manager {
wait_queue_head_t wq;
struct amdgpu_bo *bo;
struct list_head *hole;
- struct list_head flist[AMDGPU_MAX_RINGS];
+ struct list_head flist[AMDGPU_SA_NUM_FENCE_LISTS];
struct list_head olist;
unsigned size;
uint64_t gpu_addr;
@@ -580,13 +559,7 @@ struct amdgpu_sa_bo {
/*
* GEM objects.
*/
-struct amdgpu_gem {
- struct mutex mutex;
- struct list_head objects;
-};
-
-int amdgpu_gem_init(struct amdgpu_device *adev);
-void amdgpu_gem_fini(struct amdgpu_device *adev);
+void amdgpu_gem_force_release(struct amdgpu_device *adev);
int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
int alignment, u32 initial_domain,
u64 flags, bool kernel,
@@ -598,32 +571,10 @@ int amdgpu_mode_dumb_create(struct drm_file *file_priv,
int amdgpu_mode_dumb_mmap(struct drm_file *filp,
struct drm_device *dev,
uint32_t handle, uint64_t *offset_p);
-
-/*
- * Semaphores.
- */
-struct amdgpu_semaphore {
- struct amdgpu_sa_bo *sa_bo;
- signed waiters;
- uint64_t gpu_addr;
-};
-
-int amdgpu_semaphore_create(struct amdgpu_device *adev,
- struct amdgpu_semaphore **semaphore);
-bool amdgpu_semaphore_emit_signal(struct amdgpu_ring *ring,
- struct amdgpu_semaphore *semaphore);
-bool amdgpu_semaphore_emit_wait(struct amdgpu_ring *ring,
- struct amdgpu_semaphore *semaphore);
-void amdgpu_semaphore_free(struct amdgpu_device *adev,
- struct amdgpu_semaphore **semaphore,
- struct fence *fence);
-
/*
* Synchronization
*/
struct amdgpu_sync {
- struct amdgpu_semaphore *semaphores[AMDGPU_NUM_SYNCS];
- struct fence *sync_to[AMDGPU_MAX_RINGS];
DECLARE_HASHTABLE(fences, 4);
struct fence *last_vm_update;
};
@@ -635,12 +586,11 @@ int amdgpu_sync_resv(struct amdgpu_device *adev,
struct amdgpu_sync *sync,
struct reservation_object *resv,
void *owner);
-int amdgpu_sync_rings(struct amdgpu_sync *sync,
- struct amdgpu_ring *ring);
struct fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync);
int amdgpu_sync_wait(struct amdgpu_sync *sync);
-void amdgpu_sync_free(struct amdgpu_device *adev, struct amdgpu_sync *sync,
- struct fence *fence);
+void amdgpu_sync_free(struct amdgpu_sync *sync);
+int amdgpu_sync_init(void);
+void amdgpu_sync_fini(void);
/*
* GART structures, functions & helpers
@@ -758,6 +708,7 @@ struct amdgpu_flip_work {
struct fence *excl;
unsigned shared_count;
struct fence **shared;
+ struct fence_cb cb;
};
@@ -770,12 +721,11 @@ struct amdgpu_ib {
uint32_t length_dw;
uint64_t gpu_addr;
uint32_t *ptr;
- struct amdgpu_ring *ring;
- struct amdgpu_fence *fence;
struct amdgpu_user_fence *user;
struct amdgpu_vm *vm;
+ unsigned vm_id;
+ uint64_t vm_pd_addr;
struct amdgpu_ctx *ctx;
- struct amdgpu_sync sync;
uint32_t gds_base, gds_size;
uint32_t gws_base, gws_size;
uint32_t oa_base, oa_size;
@@ -794,13 +744,14 @@ enum amdgpu_ring_type {
extern struct amd_sched_backend_ops amdgpu_sched_ops;
-int amdgpu_sched_ib_submit_kernel_helper(struct amdgpu_device *adev,
- struct amdgpu_ring *ring,
- struct amdgpu_ib *ibs,
- unsigned num_ibs,
- int (*free_job)(struct amdgpu_job *),
- void *owner,
- struct fence **fence);
+int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
+ struct amdgpu_job **job);
+int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size,
+ struct amdgpu_job **job);
+void amdgpu_job_free(struct amdgpu_job *job);
+int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring,
+ struct amd_sched_entity *entity, void *owner,
+ struct fence **f);
struct amdgpu_ring {
struct amdgpu_device *adev;
@@ -809,7 +760,6 @@ struct amdgpu_ring {
struct amd_gpu_scheduler sched;
spinlock_t fence_lock;
- struct mutex *ring_lock;
struct amdgpu_bo *ring_obj;
volatile uint32_t *ring;
unsigned rptr_offs;
@@ -818,7 +768,7 @@ struct amdgpu_ring {
unsigned wptr;
unsigned wptr_old;
unsigned ring_size;
- unsigned ring_free_dw;
+ unsigned max_dw;
int count_dw;
uint64_t gpu_addr;
uint32_t align_mask;
@@ -826,8 +776,6 @@ struct amdgpu_ring {
bool ready;
u32 nop;
u32 idx;
- u64 last_semaphore_signal_addr;
- u64 last_semaphore_wait_addr;
u32 me;
u32 pipe;
u32 queue;
@@ -840,7 +788,6 @@ struct amdgpu_ring {
struct amdgpu_ctx *current_ctx;
enum amdgpu_ring_type type;
char name[16];
- bool is_pte_ring;
};
/*
@@ -884,13 +831,14 @@ struct amdgpu_vm_pt {
};
struct amdgpu_vm_id {
- unsigned id;
- uint64_t pd_gpu_addr;
+ struct amdgpu_vm_manager_id *mgr_id;
+ uint64_t pd_gpu_addr;
/* last flushed PD/PT update */
- struct fence *flushed_updates;
+ struct fence *flushed_updates;
};
struct amdgpu_vm {
+ /* tree of virtual addresses mapped */
struct rb_root va;
/* protecting invalidated */
@@ -915,30 +863,47 @@ struct amdgpu_vm {
/* for id and flush management per ring */
struct amdgpu_vm_id ids[AMDGPU_MAX_RINGS];
- /* for interval tree */
- spinlock_t it_lock;
+
/* protecting freed */
spinlock_t freed_lock;
+
+ /* Scheduler entity for page table updates */
+ struct amd_sched_entity entity;
+};
+
+struct amdgpu_vm_manager_id {
+ struct list_head list;
+ struct fence *active;
+ atomic_long_t owner;
+
+ uint32_t gds_base;
+ uint32_t gds_size;
+ uint32_t gws_base;
+ uint32_t gws_size;
+ uint32_t oa_base;
+ uint32_t oa_size;
};
struct amdgpu_vm_manager {
- struct {
- struct fence *active;
- atomic_long_t owner;
- } ids[AMDGPU_NUM_VM];
+ /* Handling of VMIDs */
+ struct mutex lock;
+ unsigned num_ids;
+ struct list_head ids_lru;
+ struct amdgpu_vm_manager_id ids[AMDGPU_NUM_VM];
uint32_t max_pfn;
- /* number of VMIDs */
- unsigned nvm;
/* vram base address for page table entry */
u64 vram_base_offset;
/* is vm enabled? */
bool enabled;
/* vm pte handling */
const struct amdgpu_vm_pte_funcs *vm_pte_funcs;
- struct amdgpu_ring *vm_pte_funcs_ring;
+ struct amdgpu_ring *vm_pte_rings[AMDGPU_MAX_RINGS];
+ unsigned vm_pte_num_rings;
+ atomic_t vm_pte_next_ring;
};
+void amdgpu_vm_manager_init(struct amdgpu_device *adev);
void amdgpu_vm_manager_fini(struct amdgpu_device *adev);
int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm);
void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm);
@@ -949,14 +914,15 @@ void amdgpu_vm_get_pt_bos(struct amdgpu_vm *vm, struct list_head *duplicates);
void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev,
struct amdgpu_vm *vm);
int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
- struct amdgpu_sync *sync);
+ struct amdgpu_sync *sync, struct fence *fence,
+ unsigned *vm_id, uint64_t *vm_pd_addr);
void amdgpu_vm_flush(struct amdgpu_ring *ring,
- struct amdgpu_vm *vm,
- struct fence *updates);
-void amdgpu_vm_fence(struct amdgpu_device *adev,
- struct amdgpu_vm *vm,
- struct fence *fence);
-uint64_t amdgpu_vm_map_gart(struct amdgpu_device *adev, uint64_t addr);
+ unsigned vm_id, uint64_t pd_addr,
+ uint32_t gds_base, uint32_t gds_size,
+ uint32_t gws_base, uint32_t gws_size,
+ uint32_t oa_base, uint32_t oa_size);
+void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vm_id);
+uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr);
int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
struct amdgpu_vm *vm);
int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
@@ -982,7 +948,6 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
uint64_t addr);
void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
struct amdgpu_bo_va *bo_va);
-int amdgpu_vm_free_job(struct amdgpu_job *job);
/*
* context related structures
@@ -1010,10 +975,6 @@ struct amdgpu_ctx_mgr {
struct idr ctx_handles;
};
-int amdgpu_ctx_init(struct amdgpu_device *adev, enum amd_sched_priority pri,
- struct amdgpu_ctx *ctx);
-void amdgpu_ctx_fini(struct amdgpu_ctx *ctx);
-
struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id);
int amdgpu_ctx_put(struct amdgpu_ctx *ctx);
@@ -1048,13 +1009,15 @@ struct amdgpu_bo_list {
struct amdgpu_bo *gds_obj;
struct amdgpu_bo *gws_obj;
struct amdgpu_bo *oa_obj;
- bool has_userptr;
+ unsigned first_userptr;
unsigned num_entries;
struct amdgpu_bo_list_entry *array;
};
struct amdgpu_bo_list *
amdgpu_bo_list_get(struct amdgpu_fpriv *fpriv, int id);
+void amdgpu_bo_list_get_list(struct amdgpu_bo_list *list,
+ struct list_head *validated);
void amdgpu_bo_list_put(struct amdgpu_bo_list *list);
void amdgpu_bo_list_free(struct amdgpu_bo_list *list);
@@ -1128,6 +1091,7 @@ struct amdgpu_gca_config {
unsigned multi_gpu_tile_size;
unsigned mc_arb_ramcfg;
unsigned gb_addr_config;
+ unsigned num_rbs;
uint32_t tile_mode_array[32];
uint32_t macrotile_mode_array[16];
@@ -1170,23 +1134,20 @@ struct amdgpu_gfx {
unsigned ce_ram_size;
};
-int amdgpu_ib_get(struct amdgpu_ring *ring, struct amdgpu_vm *vm,
+int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm,
unsigned size, struct amdgpu_ib *ib);
-void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib);
-int amdgpu_ib_schedule(struct amdgpu_device *adev, unsigned num_ibs,
- struct amdgpu_ib *ib, void *owner);
+void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib, struct fence *f);
+int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
+ struct amdgpu_ib *ib, struct fence *last_vm_update,
+ struct fence **f);
int amdgpu_ib_pool_init(struct amdgpu_device *adev);
void amdgpu_ib_pool_fini(struct amdgpu_device *adev);
int amdgpu_ib_ring_tests(struct amdgpu_device *adev);
-/* Ring access between begin & end cannot sleep */
-void amdgpu_ring_free_size(struct amdgpu_ring *ring);
int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned ndw);
-int amdgpu_ring_lock(struct amdgpu_ring *ring, unsigned ndw);
void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count);
+void amdgpu_ring_generic_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib);
void amdgpu_ring_commit(struct amdgpu_ring *ring);
-void amdgpu_ring_unlock_commit(struct amdgpu_ring *ring);
void amdgpu_ring_undo(struct amdgpu_ring *ring);
-void amdgpu_ring_unlock_undo(struct amdgpu_ring *ring);
unsigned amdgpu_ring_backup(struct amdgpu_ring *ring,
uint32_t **data);
int amdgpu_ring_restore(struct amdgpu_ring *ring,
@@ -1196,7 +1157,6 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
struct amdgpu_irq_src *irq_src, unsigned irq_type,
enum amdgpu_ring_type ring_type);
void amdgpu_ring_fini(struct amdgpu_ring *ring);
-struct amdgpu_ring *amdgpu_ring_from_fence(struct fence *f);
/*
* CS.
@@ -1205,47 +1165,58 @@ struct amdgpu_cs_chunk {
uint32_t chunk_id;
uint32_t length_dw;
uint32_t *kdata;
- void __user *user_ptr;
};
struct amdgpu_cs_parser {
struct amdgpu_device *adev;
struct drm_file *filp;
struct amdgpu_ctx *ctx;
- struct amdgpu_bo_list *bo_list;
+
/* chunks */
unsigned nchunks;
struct amdgpu_cs_chunk *chunks;
- /* relocations */
- struct amdgpu_bo_list_entry vm_pd;
- struct list_head validated;
- struct fence *fence;
- struct amdgpu_ib *ibs;
- uint32_t num_ibs;
+ /* scheduler job object */
+ struct amdgpu_job *job;
- struct ww_acquire_ctx ticket;
+ /* buffer objects */
+ struct ww_acquire_ctx ticket;
+ struct amdgpu_bo_list *bo_list;
+ struct amdgpu_bo_list_entry vm_pd;
+ struct list_head validated;
+ struct fence *fence;
+ uint64_t bytes_moved_threshold;
+ uint64_t bytes_moved;
/* user fence */
- struct amdgpu_user_fence uf;
struct amdgpu_bo_list_entry uf_entry;
};
struct amdgpu_job {
struct amd_sched_job base;
struct amdgpu_device *adev;
+ struct amdgpu_ring *ring;
+ struct amdgpu_sync sync;
struct amdgpu_ib *ibs;
+ struct fence *fence; /* the hw fence */
uint32_t num_ibs;
void *owner;
struct amdgpu_user_fence uf;
- int (*free_job)(struct amdgpu_job *job);
};
#define to_amdgpu_job(sched_job) \
container_of((sched_job), struct amdgpu_job, base)
-static inline u32 amdgpu_get_ib_value(struct amdgpu_cs_parser *p, uint32_t ib_idx, int idx)
+static inline u32 amdgpu_get_ib_value(struct amdgpu_cs_parser *p,
+ uint32_t ib_idx, int idx)
+{
+ return p->job->ibs[ib_idx].ptr[idx];
+}
+
+static inline void amdgpu_set_ib_value(struct amdgpu_cs_parser *p,
+ uint32_t ib_idx, int idx,
+ uint32_t value)
{
- return p->ibs[ib_idx].ptr[idx];
+ p->job->ibs[ib_idx].ptr[idx] = value;
}
/*
@@ -1497,6 +1468,7 @@ enum amdgpu_dpm_forced_level {
AMDGPU_DPM_FORCED_LEVEL_AUTO = 0,
AMDGPU_DPM_FORCED_LEVEL_LOW = 1,
AMDGPU_DPM_FORCED_LEVEL_HIGH = 2,
+ AMDGPU_DPM_FORCED_LEVEL_MANUAL = 3,
};
struct amdgpu_vce_state {
@@ -1619,6 +1591,8 @@ struct amdgpu_uvd {
struct amdgpu_bo *vcpu_bo;
void *cpu_addr;
uint64_t gpu_addr;
+ unsigned fw_version;
+ void *saved_bo;
atomic_t handles[AMDGPU_MAX_UVD_HANDLES];
struct drm_file *filp[AMDGPU_MAX_UVD_HANDLES];
struct delayed_work idle_work;
@@ -1626,6 +1600,7 @@ struct amdgpu_uvd {
struct amdgpu_ring ring;
struct amdgpu_irq_src irq;
bool address_64_bit;
+ struct amd_sched_entity entity;
};
/*
@@ -1650,6 +1625,7 @@ struct amdgpu_vce {
struct amdgpu_ring ring[AMDGPU_MAX_VCE_RINGS];
struct amdgpu_irq_src irq;
unsigned harvest_config;
+ struct amd_sched_entity entity;
};
/*
@@ -1884,6 +1860,18 @@ void amdgpu_cgs_destroy_device(void *cgs_device);
/*
+ * CGS
+ */
+void *amdgpu_cgs_create_device(struct amdgpu_device *adev);
+void amdgpu_cgs_destroy_device(void *cgs_device);
+
+
+/* GPU virtualization */
+struct amdgpu_virtualization {
+ bool supports_sr_iov;
+};
+
+/*
* Core structure, functions and helpers.
*/
typedef uint32_t (*amdgpu_rreg_t)(struct amdgpu_device*, uint32_t);
@@ -1903,6 +1891,10 @@ struct amdgpu_device {
struct drm_device *ddev;
struct pci_dev *pdev;
+#ifdef CONFIG_DRM_AMD_ACP
+ struct amdgpu_acp acp;
+#endif
+
/* ASIC */
enum amd_asic_type asic_type;
uint32_t family;
@@ -1979,7 +1971,6 @@ struct amdgpu_device {
/* memory management */
struct amdgpu_mman mman;
- struct amdgpu_gem gem;
struct amdgpu_vram_scratch vram_scratch;
struct amdgpu_wb wb;
atomic64_t vram_usage;
@@ -1997,7 +1988,6 @@ struct amdgpu_device {
/* rings */
unsigned fence_context;
- struct mutex ring_lock;
unsigned num_rings;
struct amdgpu_ring *rings[AMDGPU_MAX_RINGS];
bool ib_pool_ready;
@@ -2009,6 +1999,7 @@ struct amdgpu_device {
/* powerplay */
struct amd_powerplay powerplay;
bool pp_enabled;
+ bool pp_force_state_enabled;
/* dpm */
struct amdgpu_pm pm;
@@ -2025,7 +2016,6 @@ struct amdgpu_device {
struct amdgpu_sdma sdma;
/* uvd */
- bool has_uvd;
struct amdgpu_uvd uvd;
/* vce */
@@ -2045,13 +2035,13 @@ struct amdgpu_device {
/* tracking pinned memory */
u64 vram_pin_size;
+ u64 invisible_pin_size;
u64 gart_pin_size;
/* amdkfd interface */
struct kfd_dev *kfd;
- /* kernel conext for IB submission */
- struct amdgpu_ctx kernel_ctx;
+ struct amdgpu_virtualization virtualization;
};
bool amdgpu_device_is_px(struct drm_device *dev);
@@ -2073,20 +2063,6 @@ u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index);
void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v);
/*
- * Cast helper
- */
-extern const struct fence_ops amdgpu_fence_ops;
-static inline struct amdgpu_fence *to_amdgpu_fence(struct fence *f)
-{
- struct amdgpu_fence *__f = container_of(f, struct amdgpu_fence, base);
-
- if (__f->base.ops == &amdgpu_fence_ops)
- return __f;
-
- return NULL;
-}
-
-/*
* Registers read & write functions.
*/
#define RREG32(reg) amdgpu_mm_rreg(adev, (reg), false)
@@ -2156,7 +2132,6 @@ static inline void amdgpu_ring_write(struct amdgpu_ring *ring, uint32_t v)
ring->ring[ring->wptr++] = v;
ring->wptr &= ring->ptr_mask;
ring->count_dw--;
- ring->ring_free_dw--;
}
static inline struct amdgpu_sdma_instance *
@@ -2192,9 +2167,8 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
#define amdgpu_gart_flush_gpu_tlb(adev, vmid) (adev)->gart.gart_funcs->flush_gpu_tlb((adev), (vmid))
#define amdgpu_gart_set_pte_pde(adev, pt, idx, addr, flags) (adev)->gart.gart_funcs->set_pte_pde((adev), (pt), (idx), (addr), (flags))
#define amdgpu_vm_copy_pte(adev, ib, pe, src, count) ((adev)->vm_manager.vm_pte_funcs->copy_pte((ib), (pe), (src), (count)))
-#define amdgpu_vm_write_pte(adev, ib, pe, addr, count, incr, flags) ((adev)->vm_manager.vm_pte_funcs->write_pte((ib), (pe), (addr), (count), (incr), (flags)))
+#define amdgpu_vm_write_pte(adev, ib, pa, pe, addr, count, incr, flags) ((adev)->vm_manager.vm_pte_funcs->write_pte((ib), (pa), (pe), (addr), (count), (incr), (flags)))
#define amdgpu_vm_set_pte_pde(adev, ib, pe, addr, count, incr, flags) ((adev)->vm_manager.vm_pte_funcs->set_pte_pde((ib), (pe), (addr), (count), (incr), (flags)))
-#define amdgpu_vm_pad_ib(adev, ib) ((adev)->vm_manager.vm_pte_funcs->pad_ib((ib)))
#define amdgpu_ring_parse_cs(r, p, ib) ((r)->funcs->parse_cs((p), (ib)))
#define amdgpu_ring_test_ring(r) (r)->funcs->test_ring((r))
#define amdgpu_ring_test_ib(r) (r)->funcs->test_ib((r))
@@ -2202,11 +2176,13 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
#define amdgpu_ring_get_wptr(r) (r)->funcs->get_wptr((r))
#define amdgpu_ring_set_wptr(r) (r)->funcs->set_wptr((r))
#define amdgpu_ring_emit_ib(r, ib) (r)->funcs->emit_ib((r), (ib))
+#define amdgpu_ring_emit_pipeline_sync(r) (r)->funcs->emit_pipeline_sync((r))
#define amdgpu_ring_emit_vm_flush(r, vmid, addr) (r)->funcs->emit_vm_flush((r), (vmid), (addr))
#define amdgpu_ring_emit_fence(r, addr, seq, flags) (r)->funcs->emit_fence((r), (addr), (seq), (flags))
-#define amdgpu_ring_emit_semaphore(r, semaphore, emit_wait) (r)->funcs->emit_semaphore((r), (semaphore), (emit_wait))
#define amdgpu_ring_emit_gds_switch(r, v, db, ds, wb, ws, ab, as) (r)->funcs->emit_gds_switch((r), (v), (db), (ds), (wb), (ws), (ab), (as))
#define amdgpu_ring_emit_hdp_flush(r) (r)->funcs->emit_hdp_flush((r))
+#define amdgpu_ring_emit_hdp_invalidate(r) (r)->funcs->emit_hdp_invalidate((r))
+#define amdgpu_ring_pad_ib(r, ib) ((r)->funcs->pad_ib((r), (ib)))
#define amdgpu_ih_get_wptr(adev) (adev)->irq.ih_funcs->get_wptr((adev))
#define amdgpu_ih_decode_iv(adev, iv) (adev)->irq.ih_funcs->decode_iv((adev), (iv))
#define amdgpu_ih_set_rptr(adev) (adev)->irq.ih_funcs->set_rptr((adev))
@@ -2298,6 +2274,21 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
#define amdgpu_dpm_get_performance_level(adev) \
(adev)->powerplay.pp_funcs->get_performance_level((adev)->powerplay.pp_handle)
+#define amdgpu_dpm_get_pp_num_states(adev, data) \
+ (adev)->powerplay.pp_funcs->get_pp_num_states((adev)->powerplay.pp_handle, data)
+
+#define amdgpu_dpm_get_pp_table(adev, table) \
+ (adev)->powerplay.pp_funcs->get_pp_table((adev)->powerplay.pp_handle, table)
+
+#define amdgpu_dpm_set_pp_table(adev, buf, size) \
+ (adev)->powerplay.pp_funcs->set_pp_table((adev)->powerplay.pp_handle, buf, size)
+
+#define amdgpu_dpm_print_clock_levels(adev, type, buf) \
+ (adev)->powerplay.pp_funcs->print_clock_levels((adev)->powerplay.pp_handle, type, buf)
+
+#define amdgpu_dpm_force_clock_level(adev, type, level) \
+ (adev)->powerplay.pp_funcs->force_clock_level((adev)->powerplay.pp_handle, type, level)
+
#define amdgpu_dpm_dispatch_task(adev, event_id, input, output) \
(adev)->powerplay.pp_funcs->dispatch_tasks((adev)->powerplay.pp_handle, (event_id), (input), (output))
@@ -2308,7 +2299,6 @@ int amdgpu_gpu_reset(struct amdgpu_device *adev);
void amdgpu_pci_config_reset(struct amdgpu_device *adev);
bool amdgpu_card_posted(struct amdgpu_device *adev);
void amdgpu_update_display_priority(struct amdgpu_device *adev);
-bool amdgpu_boot_test_post_card(struct amdgpu_device *adev);
int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data);
int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type,
@@ -2316,11 +2306,15 @@ int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type,
struct amdgpu_ring **out_ring);
void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *rbo, u32 domain);
bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo);
+int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages);
int amdgpu_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr,
uint32_t flags);
bool amdgpu_ttm_tt_has_userptr(struct ttm_tt *ttm);
+struct mm_struct *amdgpu_ttm_tt_get_usermm(struct ttm_tt *ttm);
bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
unsigned long end);
+bool amdgpu_ttm_tt_userptr_invalidated(struct ttm_tt *ttm,
+ int *last_invalidated);
bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm);
uint32_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
struct ttm_mem_reg *mem);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
new file mode 100644
index 000000000000..b7b583c42ea8
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
@@ -0,0 +1,504 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include <linux/irqdomain.h>
+#include <linux/pm_domain.h>
+#include <linux/platform_device.h>
+#include <sound/designware_i2s.h>
+#include <sound/pcm.h>
+
+#include "amdgpu.h"
+#include "atom.h"
+#include "amdgpu_acp.h"
+
+#include "acp_gfx_if.h"
+
+#define ACP_TILE_ON_MASK 0x03
+#define ACP_TILE_OFF_MASK 0x02
+#define ACP_TILE_ON_RETAIN_REG_MASK 0x1f
+#define ACP_TILE_OFF_RETAIN_REG_MASK 0x20
+
+#define ACP_TILE_P1_MASK 0x3e
+#define ACP_TILE_P2_MASK 0x3d
+#define ACP_TILE_DSP0_MASK 0x3b
+#define ACP_TILE_DSP1_MASK 0x37
+
+#define ACP_TILE_DSP2_MASK 0x2f
+
+#define ACP_DMA_REGS_END 0x146c0
+#define ACP_I2S_PLAY_REGS_START 0x14840
+#define ACP_I2S_PLAY_REGS_END 0x148b4
+#define ACP_I2S_CAP_REGS_START 0x148b8
+#define ACP_I2S_CAP_REGS_END 0x1496c
+
+#define ACP_I2S_COMP1_CAP_REG_OFFSET 0xac
+#define ACP_I2S_COMP2_CAP_REG_OFFSET 0xa8
+#define ACP_I2S_COMP1_PLAY_REG_OFFSET 0x6c
+#define ACP_I2S_COMP2_PLAY_REG_OFFSET 0x68
+
+#define mmACP_PGFSM_RETAIN_REG 0x51c9
+#define mmACP_PGFSM_CONFIG_REG 0x51ca
+#define mmACP_PGFSM_READ_REG_0 0x51cc
+
+#define mmACP_MEM_SHUT_DOWN_REQ_LO 0x51f8
+#define mmACP_MEM_SHUT_DOWN_REQ_HI 0x51f9
+#define mmACP_MEM_SHUT_DOWN_STS_LO 0x51fa
+#define mmACP_MEM_SHUT_DOWN_STS_HI 0x51fb
+
+#define ACP_TIMEOUT_LOOP 0x000000FF
+#define ACP_DEVS 3
+#define ACP_SRC_ID 162
+
+enum {
+ ACP_TILE_P1 = 0,
+ ACP_TILE_P2,
+ ACP_TILE_DSP0,
+ ACP_TILE_DSP1,
+ ACP_TILE_DSP2,
+};
+
+static int acp_sw_init(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ adev->acp.parent = adev->dev;
+
+ adev->acp.cgs_device =
+ amdgpu_cgs_create_device(adev);
+ if (!adev->acp.cgs_device)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int acp_sw_fini(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ if (adev->acp.cgs_device)
+ amdgpu_cgs_destroy_device(adev->acp.cgs_device);
+
+ return 0;
+}
+
+/* power off a tile/block within ACP */
+static int acp_suspend_tile(void *cgs_dev, int tile)
+{
+ u32 val = 0;
+ u32 count = 0;
+
+ if ((tile < ACP_TILE_P1) || (tile > ACP_TILE_DSP2)) {
+ pr_err("Invalid ACP tile : %d to suspend\n", tile);
+ return -1;
+ }
+
+ val = cgs_read_register(cgs_dev, mmACP_PGFSM_READ_REG_0 + tile);
+ val &= ACP_TILE_ON_MASK;
+
+ if (val == 0x0) {
+ val = cgs_read_register(cgs_dev, mmACP_PGFSM_RETAIN_REG);
+ val = val | (1 << tile);
+ cgs_write_register(cgs_dev, mmACP_PGFSM_RETAIN_REG, val);
+ cgs_write_register(cgs_dev, mmACP_PGFSM_CONFIG_REG,
+ 0x500 + tile);
+
+ count = ACP_TIMEOUT_LOOP;
+ while (true) {
+ val = cgs_read_register(cgs_dev, mmACP_PGFSM_READ_REG_0
+ + tile);
+ val = val & ACP_TILE_ON_MASK;
+ if (val == ACP_TILE_OFF_MASK)
+ break;
+ if (--count == 0) {
+ pr_err("Timeout reading ACP PGFSM status\n");
+ return -ETIMEDOUT;
+ }
+ udelay(100);
+ }
+
+ val = cgs_read_register(cgs_dev, mmACP_PGFSM_RETAIN_REG);
+
+ val |= ACP_TILE_OFF_RETAIN_REG_MASK;
+ cgs_write_register(cgs_dev, mmACP_PGFSM_RETAIN_REG, val);
+ }
+ return 0;
+}
+
+/* power on a tile/block within ACP */
+static int acp_resume_tile(void *cgs_dev, int tile)
+{
+ u32 val = 0;
+ u32 count = 0;
+
+ if ((tile < ACP_TILE_P1) || (tile > ACP_TILE_DSP2)) {
+ pr_err("Invalid ACP tile to resume\n");
+ return -1;
+ }
+
+ val = cgs_read_register(cgs_dev, mmACP_PGFSM_READ_REG_0 + tile);
+ val = val & ACP_TILE_ON_MASK;
+
+ if (val != 0x0) {
+ cgs_write_register(cgs_dev, mmACP_PGFSM_CONFIG_REG,
+ 0x600 + tile);
+ count = ACP_TIMEOUT_LOOP;
+ while (true) {
+ val = cgs_read_register(cgs_dev, mmACP_PGFSM_READ_REG_0
+ + tile);
+ val = val & ACP_TILE_ON_MASK;
+ if (val == 0x0)
+ break;
+ if (--count == 0) {
+ pr_err("Timeout reading ACP PGFSM status\n");
+ return -ETIMEDOUT;
+ }
+ udelay(100);
+ }
+ val = cgs_read_register(cgs_dev, mmACP_PGFSM_RETAIN_REG);
+ if (tile == ACP_TILE_P1)
+ val = val & (ACP_TILE_P1_MASK);
+ else if (tile == ACP_TILE_P2)
+ val = val & (ACP_TILE_P2_MASK);
+
+ cgs_write_register(cgs_dev, mmACP_PGFSM_RETAIN_REG, val);
+ }
+ return 0;
+}
+
+struct acp_pm_domain {
+ void *cgs_dev;
+ struct generic_pm_domain gpd;
+};
+
+static int acp_poweroff(struct generic_pm_domain *genpd)
+{
+ int i, ret;
+ struct acp_pm_domain *apd;
+
+ apd = container_of(genpd, struct acp_pm_domain, gpd);
+ if (apd != NULL) {
+ /* Donot return abruptly if any of power tile fails to suspend.
+ * Log it and continue powering off other tile
+ */
+ for (i = 4; i >= 0 ; i--) {
+ ret = acp_suspend_tile(apd->cgs_dev, ACP_TILE_P1 + i);
+ if (ret)
+ pr_err("ACP tile %d tile suspend failed\n", i);
+ }
+ }
+ return 0;
+}
+
+static int acp_poweron(struct generic_pm_domain *genpd)
+{
+ int i, ret;
+ struct acp_pm_domain *apd;
+
+ apd = container_of(genpd, struct acp_pm_domain, gpd);
+ if (apd != NULL) {
+ for (i = 0; i < 2; i++) {
+ ret = acp_resume_tile(apd->cgs_dev, ACP_TILE_P1 + i);
+ if (ret) {
+ pr_err("ACP tile %d resume failed\n", i);
+ break;
+ }
+ }
+
+ /* Disable DSPs which are not going to be used */
+ for (i = 0; i < 3; i++) {
+ ret = acp_suspend_tile(apd->cgs_dev, ACP_TILE_DSP0 + i);
+ /* Continue suspending other DSP, even if one fails */
+ if (ret)
+ pr_err("ACP DSP %d suspend failed\n", i);
+ }
+ }
+ return 0;
+}
+
+static struct device *get_mfd_cell_dev(const char *device_name, int r)
+{
+ char auto_dev_name[25];
+ struct device *dev;
+
+ snprintf(auto_dev_name, sizeof(auto_dev_name),
+ "%s.%d.auto", device_name, r);
+ dev = bus_find_device_by_name(&platform_bus_type, NULL, auto_dev_name);
+ dev_info(dev, "device %s added to pm domain\n", auto_dev_name);
+
+ return dev;
+}
+
+/**
+ * acp_hw_init - start and test ACP block
+ *
+ * @adev: amdgpu_device pointer
+ *
+ */
+static int acp_hw_init(void *handle)
+{
+ int r, i;
+ uint64_t acp_base;
+ struct device *dev;
+ struct i2s_platform_data *i2s_pdata;
+
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ const struct amdgpu_ip_block_version *ip_version =
+ amdgpu_get_ip_block(adev, AMD_IP_BLOCK_TYPE_ACP);
+
+ if (!ip_version)
+ return -EINVAL;
+
+ r = amd_acp_hw_init(adev->acp.cgs_device,
+ ip_version->major, ip_version->minor);
+ /* -ENODEV means board uses AZ rather than ACP */
+ if (r == -ENODEV)
+ return 0;
+ else if (r)
+ return r;
+
+ r = cgs_get_pci_resource(adev->acp.cgs_device, CGS_RESOURCE_TYPE_MMIO,
+ 0x5289, 0, &acp_base);
+ if (r == -ENODEV)
+ return 0;
+ else if (r)
+ return r;
+
+ adev->acp.acp_genpd = kzalloc(sizeof(struct acp_pm_domain), GFP_KERNEL);
+ if (adev->acp.acp_genpd == NULL)
+ return -ENOMEM;
+
+ adev->acp.acp_genpd->gpd.name = "ACP_AUDIO";
+ adev->acp.acp_genpd->gpd.power_off = acp_poweroff;
+ adev->acp.acp_genpd->gpd.power_on = acp_poweron;
+
+
+ adev->acp.acp_genpd->cgs_dev = adev->acp.cgs_device;
+
+ pm_genpd_init(&adev->acp.acp_genpd->gpd, NULL, false);
+
+ adev->acp.acp_cell = kzalloc(sizeof(struct mfd_cell) * ACP_DEVS,
+ GFP_KERNEL);
+
+ if (adev->acp.acp_cell == NULL)
+ return -ENOMEM;
+
+ adev->acp.acp_res = kzalloc(sizeof(struct resource) * 4, GFP_KERNEL);
+
+ if (adev->acp.acp_res == NULL) {
+ kfree(adev->acp.acp_cell);
+ return -ENOMEM;
+ }
+
+ i2s_pdata = kzalloc(sizeof(struct i2s_platform_data) * 2, GFP_KERNEL);
+ if (i2s_pdata == NULL) {
+ kfree(adev->acp.acp_res);
+ kfree(adev->acp.acp_cell);
+ return -ENOMEM;
+ }
+
+ i2s_pdata[0].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET;
+ i2s_pdata[0].cap = DWC_I2S_PLAY;
+ i2s_pdata[0].snd_rates = SNDRV_PCM_RATE_8000_96000;
+ i2s_pdata[0].i2s_reg_comp1 = ACP_I2S_COMP1_PLAY_REG_OFFSET;
+ i2s_pdata[0].i2s_reg_comp2 = ACP_I2S_COMP2_PLAY_REG_OFFSET;
+
+ i2s_pdata[1].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET |
+ DW_I2S_QUIRK_COMP_PARAM1;
+ i2s_pdata[1].cap = DWC_I2S_RECORD;
+ i2s_pdata[1].snd_rates = SNDRV_PCM_RATE_8000_96000;
+ i2s_pdata[1].i2s_reg_comp1 = ACP_I2S_COMP1_CAP_REG_OFFSET;
+ i2s_pdata[1].i2s_reg_comp2 = ACP_I2S_COMP2_CAP_REG_OFFSET;
+
+ adev->acp.acp_res[0].name = "acp2x_dma";
+ adev->acp.acp_res[0].flags = IORESOURCE_MEM;
+ adev->acp.acp_res[0].start = acp_base;
+ adev->acp.acp_res[0].end = acp_base + ACP_DMA_REGS_END;
+
+ adev->acp.acp_res[1].name = "acp2x_dw_i2s_play";
+ adev->acp.acp_res[1].flags = IORESOURCE_MEM;
+ adev->acp.acp_res[1].start = acp_base + ACP_I2S_PLAY_REGS_START;
+ adev->acp.acp_res[1].end = acp_base + ACP_I2S_PLAY_REGS_END;
+
+ adev->acp.acp_res[2].name = "acp2x_dw_i2s_cap";
+ adev->acp.acp_res[2].flags = IORESOURCE_MEM;
+ adev->acp.acp_res[2].start = acp_base + ACP_I2S_CAP_REGS_START;
+ adev->acp.acp_res[2].end = acp_base + ACP_I2S_CAP_REGS_END;
+
+ adev->acp.acp_res[3].name = "acp2x_dma_irq";
+ adev->acp.acp_res[3].flags = IORESOURCE_IRQ;
+ adev->acp.acp_res[3].start = amdgpu_irq_create_mapping(adev, 162);
+ adev->acp.acp_res[3].end = adev->acp.acp_res[3].start;
+
+ adev->acp.acp_cell[0].name = "acp_audio_dma";
+ adev->acp.acp_cell[0].num_resources = 4;
+ adev->acp.acp_cell[0].resources = &adev->acp.acp_res[0];
+
+ adev->acp.acp_cell[1].name = "designware-i2s";
+ adev->acp.acp_cell[1].num_resources = 1;
+ adev->acp.acp_cell[1].resources = &adev->acp.acp_res[1];
+ adev->acp.acp_cell[1].platform_data = &i2s_pdata[0];
+ adev->acp.acp_cell[1].pdata_size = sizeof(struct i2s_platform_data);
+
+ adev->acp.acp_cell[2].name = "designware-i2s";
+ adev->acp.acp_cell[2].num_resources = 1;
+ adev->acp.acp_cell[2].resources = &adev->acp.acp_res[2];
+ adev->acp.acp_cell[2].platform_data = &i2s_pdata[1];
+ adev->acp.acp_cell[2].pdata_size = sizeof(struct i2s_platform_data);
+
+ r = mfd_add_hotplug_devices(adev->acp.parent, adev->acp.acp_cell,
+ ACP_DEVS);
+ if (r)
+ return r;
+
+ for (i = 0; i < ACP_DEVS ; i++) {
+ dev = get_mfd_cell_dev(adev->acp.acp_cell[i].name, i);
+ r = pm_genpd_add_device(&adev->acp.acp_genpd->gpd, dev);
+ if (r) {
+ dev_err(dev, "Failed to add dev to genpd\n");
+ return r;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * acp_hw_fini - stop the hardware block
+ *
+ * @adev: amdgpu_device pointer
+ *
+ */
+static int acp_hw_fini(void *handle)
+{
+ int i, ret;
+ struct device *dev;
+
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ for (i = 0; i < ACP_DEVS ; i++) {
+ dev = get_mfd_cell_dev(adev->acp.acp_cell[i].name, i);
+ ret = pm_genpd_remove_device(&adev->acp.acp_genpd->gpd, dev);
+ /* If removal fails, dont giveup and try rest */
+ if (ret)
+ dev_err(dev, "remove dev from genpd failed\n");
+ }
+
+ mfd_remove_devices(adev->acp.parent);
+ kfree(adev->acp.acp_res);
+ kfree(adev->acp.acp_genpd);
+ kfree(adev->acp.acp_cell);
+
+ return 0;
+}
+
+static int acp_suspend(void *handle)
+{
+ return 0;
+}
+
+static int acp_resume(void *handle)
+{
+ int i, ret;
+ struct acp_pm_domain *apd;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ /* return early if no ACP */
+ if (!adev->acp.acp_genpd)
+ return 0;
+
+ /* SMU block will power on ACP irrespective of ACP runtime status.
+ * Power off explicitly based on genpd ACP runtime status so that ACP
+ * hw and ACP-genpd status are in sync.
+ * 'suspend_power_off' represents "Power status before system suspend"
+ */
+ if (adev->acp.acp_genpd->gpd.suspend_power_off == true) {
+ apd = container_of(&adev->acp.acp_genpd->gpd,
+ struct acp_pm_domain, gpd);
+
+ for (i = 4; i >= 0 ; i--) {
+ ret = acp_suspend_tile(apd->cgs_dev, ACP_TILE_P1 + i);
+ if (ret)
+ pr_err("ACP tile %d tile suspend failed\n", i);
+ }
+ }
+ return 0;
+}
+
+static int acp_early_init(void *handle)
+{
+ return 0;
+}
+
+static bool acp_is_idle(void *handle)
+{
+ return true;
+}
+
+static int acp_wait_for_idle(void *handle)
+{
+ return 0;
+}
+
+static int acp_soft_reset(void *handle)
+{
+ return 0;
+}
+
+static void acp_print_status(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ dev_info(adev->dev, "ACP STATUS\n");
+}
+
+static int acp_set_clockgating_state(void *handle,
+ enum amd_clockgating_state state)
+{
+ return 0;
+}
+
+static int acp_set_powergating_state(void *handle,
+ enum amd_powergating_state state)
+{
+ return 0;
+}
+
+const struct amd_ip_funcs acp_ip_funcs = {
+ .early_init = acp_early_init,
+ .late_init = NULL,
+ .sw_init = acp_sw_init,
+ .sw_fini = acp_sw_fini,
+ .hw_init = acp_hw_init,
+ .hw_fini = acp_hw_fini,
+ .suspend = acp_suspend,
+ .resume = acp_resume,
+ .is_idle = acp_is_idle,
+ .wait_for_idle = acp_wait_for_idle,
+ .soft_reset = acp_soft_reset,
+ .print_status = acp_print_status,
+ .set_clockgating_state = acp_set_clockgating_state,
+ .set_powergating_state = acp_set_powergating_state,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.h
new file mode 100644
index 000000000000..f6e32a639107
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __AMDGPU_ACP_H__
+#define __AMDGPU_ACP_H__
+
+#include <linux/mfd/core.h>
+
+struct amdgpu_acp {
+ struct device *parent;
+ void *cgs_device;
+ struct amd_acp_private *private;
+ struct mfd_cell *acp_cell;
+ struct resource *acp_res;
+ struct acp_pm_domain *acp_genpd;
+};
+
+extern const struct amd_ip_funcs acp_ip_funcs;
+
+#endif /* __AMDGPU_ACP_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
index 84d68d658f8a..32809f749903 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
@@ -30,25 +30,38 @@ const struct kfd2kgd_calls *kfd2kgd;
const struct kgd2kfd_calls *kgd2kfd;
bool (*kgd2kfd_init_p)(unsigned, const struct kgd2kfd_calls**);
-bool amdgpu_amdkfd_init(void)
+int amdgpu_amdkfd_init(void)
{
+ int ret;
+
#if defined(CONFIG_HSA_AMD_MODULE)
- bool (*kgd2kfd_init_p)(unsigned, const struct kgd2kfd_calls**);
+ int (*kgd2kfd_init_p)(unsigned, const struct kgd2kfd_calls**);
kgd2kfd_init_p = symbol_request(kgd2kfd_init);
if (kgd2kfd_init_p == NULL)
- return false;
+ return -ENOENT;
+
+ ret = kgd2kfd_init_p(KFD_INTERFACE_VERSION, &kgd2kfd);
+ if (ret) {
+ symbol_put(kgd2kfd_init);
+ kgd2kfd = NULL;
+ }
+
+#elif defined(CONFIG_HSA_AMD)
+ ret = kgd2kfd_init(KFD_INTERFACE_VERSION, &kgd2kfd);
+ if (ret)
+ kgd2kfd = NULL;
+
+#else
+ ret = -ENOENT;
#endif
- return true;
+
+ return ret;
}
bool amdgpu_amdkfd_load_interface(struct amdgpu_device *rdev)
{
-#if defined(CONFIG_HSA_AMD_MODULE)
- bool (*kgd2kfd_init_p)(unsigned, const struct kgd2kfd_calls**);
-#endif
-
switch (rdev->asic_type) {
#ifdef CONFIG_DRM_AMDGPU_CIK
case CHIP_KAVERI:
@@ -62,35 +75,7 @@ bool amdgpu_amdkfd_load_interface(struct amdgpu_device *rdev)
return false;
}
-#if defined(CONFIG_HSA_AMD_MODULE)
- kgd2kfd_init_p = symbol_request(kgd2kfd_init);
-
- if (kgd2kfd_init_p == NULL) {
- kfd2kgd = NULL;
- return false;
- }
-
- if (!kgd2kfd_init_p(KFD_INTERFACE_VERSION, &kgd2kfd)) {
- symbol_put(kgd2kfd_init);
- kfd2kgd = NULL;
- kgd2kfd = NULL;
-
- return false;
- }
-
return true;
-#elif defined(CONFIG_HSA_AMD)
- if (!kgd2kfd_init(KFD_INTERFACE_VERSION, &kgd2kfd)) {
- kfd2kgd = NULL;
- kgd2kfd = NULL;
- return false;
- }
-
- return true;
-#else
- kfd2kgd = NULL;
- return false;
-#endif
}
void amdgpu_amdkfd_fini(void)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
index a8be765542e6..de530f68d4e3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
@@ -36,7 +36,7 @@ struct kgd_mem {
void *cpu_ptr;
};
-bool amdgpu_amdkfd_init(void);
+int amdgpu_amdkfd_init(void);
void amdgpu_amdkfd_fini(void);
bool amdgpu_amdkfd_load_interface(struct amdgpu_device *rdev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
index 9416e0f5c1db..84b0ce39ee14 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
@@ -1514,6 +1514,19 @@ int amdgpu_atombios_init_mc_reg_table(struct amdgpu_device *adev,
return -EINVAL;
}
+bool amdgpu_atombios_has_gpu_virtualization_table(struct amdgpu_device *adev)
+{
+ int index = GetIndexIntoMasterTable(DATA, GPUVirtualizationInfo);
+ u8 frev, crev;
+ u16 data_offset, size;
+
+ if (amdgpu_atom_parse_data_header(adev->mode_info.atom_context, index, &size,
+ &frev, &crev, &data_offset))
+ return true;
+
+ return false;
+}
+
void amdgpu_atombios_scratch_regs_lock(struct amdgpu_device *adev, bool lock)
{
uint32_t bios_6_scratch;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h
index 0ebb959ea435..9e1442053fe4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h
@@ -196,6 +196,8 @@ int amdgpu_atombios_init_mc_reg_table(struct amdgpu_device *adev,
u8 module_index,
struct atom_mc_reg_table *reg_table);
+bool amdgpu_atombios_has_gpu_virtualization_table(struct amdgpu_device *adev);
+
void amdgpu_atombios_scratch_regs_lock(struct amdgpu_device *adev, bool lock);
void amdgpu_atombios_scratch_regs_init(struct amdgpu_device *adev);
void amdgpu_atombios_scratch_regs_save(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
index 3c895863fcf5..35a1248aaa77 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
@@ -144,7 +144,10 @@ static int amdgpu_atpx_validate(struct amdgpu_atpx *atpx)
{
/* make sure required functions are enabled */
/* dGPU power control is required */
- atpx->functions.power_cntl = true;
+ if (atpx->functions.power_cntl == false) {
+ printk("ATPX dGPU power cntl not present, forcing\n");
+ atpx->functions.power_cntl = true;
+ }
if (atpx->functions.px_params) {
union acpi_object *info;
@@ -552,13 +555,14 @@ static bool amdgpu_atpx_detect(void)
void amdgpu_register_atpx_handler(void)
{
bool r;
+ enum vga_switcheroo_handler_flags_t handler_flags = 0;
/* detect if we have any ATPX + 2 VGA in the system */
r = amdgpu_atpx_detect();
if (!r)
return;
- vga_switcheroo_register_handler(&amdgpu_atpx_handler);
+ vga_switcheroo_register_handler(&amdgpu_atpx_handler, handler_flags);
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
index f82a2dd83874..eacd810fc09b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
@@ -32,6 +32,9 @@
#include "amdgpu.h"
#include "amdgpu_trace.h"
+#define AMDGPU_BO_LIST_MAX_PRIORITY 32u
+#define AMDGPU_BO_LIST_NUM_BUCKETS (AMDGPU_BO_LIST_MAX_PRIORITY + 1)
+
static int amdgpu_bo_list_create(struct amdgpu_fpriv *fpriv,
struct amdgpu_bo_list **result,
int *id)
@@ -88,8 +91,9 @@ static int amdgpu_bo_list_set(struct amdgpu_device *adev,
struct amdgpu_bo *gws_obj = adev->gds.gws_gfx_bo;
struct amdgpu_bo *oa_obj = adev->gds.oa_gfx_bo;
- bool has_userptr = false;
+ unsigned last_entry = 0, first_userptr = num_entries;
unsigned i;
+ int r;
array = drm_malloc_ab(num_entries, sizeof(struct amdgpu_bo_list_entry));
if (!array)
@@ -97,33 +101,43 @@ static int amdgpu_bo_list_set(struct amdgpu_device *adev,
memset(array, 0, num_entries * sizeof(struct amdgpu_bo_list_entry));
for (i = 0; i < num_entries; ++i) {
- struct amdgpu_bo_list_entry *entry = &array[i];
+ struct amdgpu_bo_list_entry *entry;
struct drm_gem_object *gobj;
+ struct amdgpu_bo *bo;
+ struct mm_struct *usermm;
gobj = drm_gem_object_lookup(adev->ddev, filp, info[i].bo_handle);
- if (!gobj)
+ if (!gobj) {
+ r = -ENOENT;
goto error_free;
+ }
- entry->robj = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj));
+ bo = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj));
drm_gem_object_unreference_unlocked(gobj);
- entry->priority = info[i].bo_priority;
- entry->prefered_domains = entry->robj->initial_domain;
- entry->allowed_domains = entry->prefered_domains;
- if (entry->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM)
- entry->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT;
- if (amdgpu_ttm_tt_has_userptr(entry->robj->tbo.ttm)) {
- has_userptr = true;
- entry->prefered_domains = AMDGPU_GEM_DOMAIN_GTT;
- entry->allowed_domains = AMDGPU_GEM_DOMAIN_GTT;
+
+ usermm = amdgpu_ttm_tt_get_usermm(bo->tbo.ttm);
+ if (usermm) {
+ if (usermm != current->mm) {
+ amdgpu_bo_unref(&bo);
+ r = -EPERM;
+ goto error_free;
+ }
+ entry = &array[--first_userptr];
+ } else {
+ entry = &array[last_entry++];
}
+
+ entry->robj = bo;
+ entry->priority = min(info[i].bo_priority,
+ AMDGPU_BO_LIST_MAX_PRIORITY);
entry->tv.bo = &entry->robj->tbo;
entry->tv.shared = true;
- if (entry->prefered_domains == AMDGPU_GEM_DOMAIN_GDS)
+ if (entry->robj->prefered_domains == AMDGPU_GEM_DOMAIN_GDS)
gds_obj = entry->robj;
- if (entry->prefered_domains == AMDGPU_GEM_DOMAIN_GWS)
+ if (entry->robj->prefered_domains == AMDGPU_GEM_DOMAIN_GWS)
gws_obj = entry->robj;
- if (entry->prefered_domains == AMDGPU_GEM_DOMAIN_OA)
+ if (entry->robj->prefered_domains == AMDGPU_GEM_DOMAIN_OA)
oa_obj = entry->robj;
trace_amdgpu_bo_list_set(list, entry->robj);
@@ -137,15 +151,17 @@ static int amdgpu_bo_list_set(struct amdgpu_device *adev,
list->gds_obj = gds_obj;
list->gws_obj = gws_obj;
list->oa_obj = oa_obj;
- list->has_userptr = has_userptr;
+ list->first_userptr = first_userptr;
list->array = array;
list->num_entries = num_entries;
return 0;
error_free:
+ while (i--)
+ amdgpu_bo_unref(&array[i].robj);
drm_free_large(array);
- return -ENOENT;
+ return r;
}
struct amdgpu_bo_list *
@@ -161,6 +177,37 @@ amdgpu_bo_list_get(struct amdgpu_fpriv *fpriv, int id)
return result;
}
+void amdgpu_bo_list_get_list(struct amdgpu_bo_list *list,
+ struct list_head *validated)
+{
+ /* This is based on the bucket sort with O(n) time complexity.
+ * An item with priority "i" is added to bucket[i]. The lists are then
+ * concatenated in descending order.
+ */
+ struct list_head bucket[AMDGPU_BO_LIST_NUM_BUCKETS];
+ unsigned i;
+
+ for (i = 0; i < AMDGPU_BO_LIST_NUM_BUCKETS; i++)
+ INIT_LIST_HEAD(&bucket[i]);
+
+ /* Since buffers which appear sooner in the relocation list are
+ * likely to be used more often than buffers which appear later
+ * in the list, the sort mustn't change the ordering of buffers
+ * with the same priority, i.e. it must be stable.
+ */
+ for (i = 0; i < list->num_entries; i++) {
+ unsigned priority = list->array[i].priority;
+
+ list_add_tail(&list->array[i].tv.head,
+ &bucket[priority]);
+ list->array[i].user_pages = NULL;
+ }
+
+ /* Connect the sorted buckets in the output list. */
+ for (i = 0; i < AMDGPU_BO_LIST_NUM_BUCKETS; i++)
+ list_splice(&bucket[i], validated);
+}
+
void amdgpu_bo_list_put(struct amdgpu_bo_list *list)
{
mutex_unlock(&list->lock);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
index 7a4b101e10c6..6043dc7c3a94 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
@@ -816,10 +816,13 @@ static int amdgpu_cgs_get_active_displays_info(void *cgs_device,
struct drm_device *ddev = adev->ddev;
struct drm_crtc *crtc;
uint32_t line_time_us, vblank_lines;
+ struct cgs_mode_info *mode_info;
if (info == NULL)
return -EINVAL;
+ mode_info = info->mode_info;
+
if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
list_for_each_entry(crtc,
&ddev->mode_config.crtc_list, head) {
@@ -828,7 +831,7 @@ static int amdgpu_cgs_get_active_displays_info(void *cgs_device,
info->active_display_mask |= (1 << amdgpu_crtc->crtc_id);
info->display_count++;
}
- if (info->mode_info != NULL &&
+ if (mode_info != NULL &&
crtc->enabled && amdgpu_crtc->enabled &&
amdgpu_crtc->hw_mode.clock) {
line_time_us = (amdgpu_crtc->hw_mode.crtc_htotal * 1000) /
@@ -836,10 +839,10 @@ static int amdgpu_cgs_get_active_displays_info(void *cgs_device,
vblank_lines = amdgpu_crtc->hw_mode.crtc_vblank_end -
amdgpu_crtc->hw_mode.crtc_vdisplay +
(amdgpu_crtc->v_border * 2);
- info->mode_info->vblank_time_us = vblank_lines * line_time_us;
- info->mode_info->refresh_rate = drm_mode_vrefresh(&amdgpu_crtc->hw_mode);
- info->mode_info->ref_clock = adev->clock.spll.reference_freq;
- info->mode_info++;
+ mode_info->vblank_time_us = vblank_lines * line_time_us;
+ mode_info->refresh_rate = drm_mode_vrefresh(&amdgpu_crtc->hw_mode);
+ mode_info->ref_clock = adev->clock.spll.reference_freq;
+ mode_info = NULL;
}
}
}
@@ -847,6 +850,16 @@ static int amdgpu_cgs_get_active_displays_info(void *cgs_device,
return 0;
}
+
+static int amdgpu_cgs_notify_dpm_enabled(void *cgs_device, bool enabled)
+{
+ CGS_FUNC_ADEV;
+
+ adev->pm.dpm_enabled = enabled;
+
+ return 0;
+}
+
/** \brief evaluate acpi namespace object, handle or pathname must be valid
* \param cgs_device
* \param info input/output arguments for the control method
@@ -1097,6 +1110,7 @@ static const struct cgs_ops amdgpu_cgs_ops = {
amdgpu_cgs_set_powergating_state,
amdgpu_cgs_set_clockgating_state,
amdgpu_cgs_get_active_displays_info,
+ amdgpu_cgs_notify_dpm_enabled,
amdgpu_cgs_call_acpi_method,
amdgpu_cgs_query_system_info,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index b882e8175615..9392e50a7ba4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -25,52 +25,12 @@
* Jerome Glisse <glisse@freedesktop.org>
*/
#include <linux/list_sort.h>
+#include <linux/pagemap.h>
#include <drm/drmP.h>
#include <drm/amdgpu_drm.h>
#include "amdgpu.h"
#include "amdgpu_trace.h"
-#define AMDGPU_CS_MAX_PRIORITY 32u
-#define AMDGPU_CS_NUM_BUCKETS (AMDGPU_CS_MAX_PRIORITY + 1)
-
-/* This is based on the bucket sort with O(n) time complexity.
- * An item with priority "i" is added to bucket[i]. The lists are then
- * concatenated in descending order.
- */
-struct amdgpu_cs_buckets {
- struct list_head bucket[AMDGPU_CS_NUM_BUCKETS];
-};
-
-static void amdgpu_cs_buckets_init(struct amdgpu_cs_buckets *b)
-{
- unsigned i;
-
- for (i = 0; i < AMDGPU_CS_NUM_BUCKETS; i++)
- INIT_LIST_HEAD(&b->bucket[i]);
-}
-
-static void amdgpu_cs_buckets_add(struct amdgpu_cs_buckets *b,
- struct list_head *item, unsigned priority)
-{
- /* Since buffers which appear sooner in the relocation list are
- * likely to be used more often than buffers which appear later
- * in the list, the sort mustn't change the ordering of buffers
- * with the same priority, i.e. it must be stable.
- */
- list_add_tail(item, &b->bucket[min(priority, AMDGPU_CS_MAX_PRIORITY)]);
-}
-
-static void amdgpu_cs_buckets_get_list(struct amdgpu_cs_buckets *b,
- struct list_head *out_list)
-{
- unsigned i;
-
- /* Connect the sorted buckets in the output list. */
- for (i = 0; i < AMDGPU_CS_NUM_BUCKETS; i++) {
- list_splice(&b->bucket[i], out_list);
- }
-}
-
int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type,
u32 ip_instance, u32 ring,
struct amdgpu_ring **out_ring)
@@ -128,6 +88,7 @@ int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type,
}
static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p,
+ struct amdgpu_user_fence *uf,
struct drm_amdgpu_cs_chunk_fence *fence_data)
{
struct drm_gem_object *gobj;
@@ -139,20 +100,19 @@ static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p,
if (gobj == NULL)
return -EINVAL;
- p->uf.bo = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj));
- p->uf.offset = fence_data->offset;
+ uf->bo = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj));
+ uf->offset = fence_data->offset;
- if (amdgpu_ttm_tt_has_userptr(p->uf.bo->tbo.ttm)) {
+ if (amdgpu_ttm_tt_get_usermm(uf->bo->tbo.ttm)) {
drm_gem_object_unreference_unlocked(gobj);
return -EINVAL;
}
- p->uf_entry.robj = amdgpu_bo_ref(p->uf.bo);
- p->uf_entry.prefered_domains = AMDGPU_GEM_DOMAIN_GTT;
- p->uf_entry.allowed_domains = AMDGPU_GEM_DOMAIN_GTT;
+ p->uf_entry.robj = amdgpu_bo_ref(uf->bo);
p->uf_entry.priority = 0;
p->uf_entry.tv.bo = &p->uf_entry.robj->tbo;
p->uf_entry.tv.shared = true;
+ p->uf_entry.user_pages = NULL;
drm_gem_object_unreference_unlocked(gobj);
return 0;
@@ -160,11 +120,12 @@ static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p,
int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
{
+ struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
union drm_amdgpu_cs *cs = data;
uint64_t *chunk_array_user;
uint64_t *chunk_array;
- struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
- unsigned size;
+ struct amdgpu_user_fence uf = {};
+ unsigned size, num_ibs = 0;
int i;
int ret;
@@ -181,15 +142,12 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
goto free_chunk;
}
- p->bo_list = amdgpu_bo_list_get(fpriv, cs->in.bo_list_handle);
-
/* get chunks */
- INIT_LIST_HEAD(&p->validated);
chunk_array_user = (uint64_t __user *)(unsigned long)(cs->in.chunks);
if (copy_from_user(chunk_array, chunk_array_user,
sizeof(uint64_t)*cs->in.num_chunks)) {
ret = -EFAULT;
- goto put_bo_list;
+ goto put_ctx;
}
p->nchunks = cs->in.num_chunks;
@@ -197,7 +155,7 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
GFP_KERNEL);
if (!p->chunks) {
ret = -ENOMEM;
- goto put_bo_list;
+ goto put_ctx;
}
for (i = 0; i < p->nchunks; i++) {
@@ -217,7 +175,6 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
size = p->chunks[i].length_dw;
cdata = (void __user *)(unsigned long)user_chunk.chunk_data;
- p->chunks[i].user_ptr = cdata;
p->chunks[i].kdata = drm_malloc_ab(size, sizeof(uint32_t));
if (p->chunks[i].kdata == NULL) {
@@ -233,7 +190,7 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
switch (p->chunks[i].chunk_id) {
case AMDGPU_CHUNK_ID_IB:
- p->num_ibs++;
+ ++num_ibs;
break;
case AMDGPU_CHUNK_ID_FENCE:
@@ -243,7 +200,7 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
goto free_partial_kdata;
}
- ret = amdgpu_cs_user_fence_chunk(p, (void *)p->chunks[i].kdata);
+ ret = amdgpu_cs_user_fence_chunk(p, &uf, (void *)p->chunks[i].kdata);
if (ret)
goto free_partial_kdata;
@@ -258,12 +215,11 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
}
}
-
- p->ibs = kcalloc(p->num_ibs, sizeof(struct amdgpu_ib), GFP_KERNEL);
- if (!p->ibs) {
- ret = -ENOMEM;
+ ret = amdgpu_job_alloc(p->adev, num_ibs, &p->job);
+ if (ret)
goto free_all_kdata;
- }
+
+ p->job->uf = uf;
kfree(chunk_array);
return 0;
@@ -274,9 +230,7 @@ free_partial_kdata:
for (; i >= 0; i--)
drm_free_large(p->chunks[i].kdata);
kfree(p->chunks);
-put_bo_list:
- if (p->bo_list)
- amdgpu_bo_list_put(p->bo_list);
+put_ctx:
amdgpu_ctx_put(p->ctx);
free_chunk:
kfree(chunk_array);
@@ -336,96 +290,198 @@ static u64 amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev)
return max(bytes_moved_threshold, 1024*1024ull);
}
-int amdgpu_cs_list_validate(struct amdgpu_device *adev,
- struct amdgpu_vm *vm,
+int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p,
struct list_head *validated)
{
struct amdgpu_bo_list_entry *lobj;
- struct amdgpu_bo *bo;
- u64 bytes_moved = 0, initial_bytes_moved;
- u64 bytes_moved_threshold = amdgpu_cs_get_threshold_for_moves(adev);
+ u64 initial_bytes_moved;
int r;
list_for_each_entry(lobj, validated, tv.head) {
- bo = lobj->robj;
- if (!bo->pin_count) {
- u32 domain = lobj->prefered_domains;
- u32 current_domain =
- amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
-
- /* Check if this buffer will be moved and don't move it
- * if we have moved too many buffers for this IB already.
- *
- * Note that this allows moving at least one buffer of
- * any size, because it doesn't take the current "bo"
- * into account. We don't want to disallow buffer moves
- * completely.
- */
- if ((lobj->allowed_domains & current_domain) != 0 &&
- (domain & current_domain) == 0 && /* will be moved */
- bytes_moved > bytes_moved_threshold) {
- /* don't move it */
- domain = current_domain;
- }
+ struct amdgpu_bo *bo = lobj->robj;
+ bool binding_userptr = false;
+ struct mm_struct *usermm;
+ uint32_t domain;
+
+ usermm = amdgpu_ttm_tt_get_usermm(bo->tbo.ttm);
+ if (usermm && usermm != current->mm)
+ return -EPERM;
+
+ /* Check if we have user pages and nobody bound the BO already */
+ if (lobj->user_pages && bo->tbo.ttm->state != tt_bound) {
+ size_t size = sizeof(struct page *);
+
+ size *= bo->tbo.ttm->num_pages;
+ memcpy(bo->tbo.ttm->pages, lobj->user_pages, size);
+ binding_userptr = true;
+ }
- retry:
- amdgpu_ttm_placement_from_domain(bo, domain);
- initial_bytes_moved = atomic64_read(&adev->num_bytes_moved);
- r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
- bytes_moved += atomic64_read(&adev->num_bytes_moved) -
- initial_bytes_moved;
-
- if (unlikely(r)) {
- if (r != -ERESTARTSYS && domain != lobj->allowed_domains) {
- domain = lobj->allowed_domains;
- goto retry;
- }
- return r;
+ if (bo->pin_count)
+ continue;
+
+ /* Avoid moving this one if we have moved too many buffers
+ * for this IB already.
+ *
+ * Note that this allows moving at least one buffer of
+ * any size, because it doesn't take the current "bo"
+ * into account. We don't want to disallow buffer moves
+ * completely.
+ */
+ if (p->bytes_moved <= p->bytes_moved_threshold)
+ domain = bo->prefered_domains;
+ else
+ domain = bo->allowed_domains;
+
+ retry:
+ amdgpu_ttm_placement_from_domain(bo, domain);
+ initial_bytes_moved = atomic64_read(&bo->adev->num_bytes_moved);
+ r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
+ p->bytes_moved += atomic64_read(&bo->adev->num_bytes_moved) -
+ initial_bytes_moved;
+
+ if (unlikely(r)) {
+ if (r != -ERESTARTSYS && domain != bo->allowed_domains) {
+ domain = bo->allowed_domains;
+ goto retry;
}
+ return r;
+ }
+
+ if (binding_userptr) {
+ drm_free_large(lobj->user_pages);
+ lobj->user_pages = NULL;
}
- lobj->bo_va = amdgpu_vm_bo_find(vm, bo);
}
return 0;
}
-static int amdgpu_cs_parser_relocs(struct amdgpu_cs_parser *p)
+static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
+ union drm_amdgpu_cs *cs)
{
struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
- struct amdgpu_cs_buckets buckets;
+ struct amdgpu_bo_list_entry *e;
struct list_head duplicates;
bool need_mmap_lock = false;
- int i, r;
+ unsigned i, tries = 10;
+ int r;
- if (p->bo_list) {
- need_mmap_lock = p->bo_list->has_userptr;
- amdgpu_cs_buckets_init(&buckets);
- for (i = 0; i < p->bo_list->num_entries; i++)
- amdgpu_cs_buckets_add(&buckets, &p->bo_list->array[i].tv.head,
- p->bo_list->array[i].priority);
+ INIT_LIST_HEAD(&p->validated);
- amdgpu_cs_buckets_get_list(&buckets, &p->validated);
+ p->bo_list = amdgpu_bo_list_get(fpriv, cs->in.bo_list_handle);
+ if (p->bo_list) {
+ need_mmap_lock = p->bo_list->first_userptr !=
+ p->bo_list->num_entries;
+ amdgpu_bo_list_get_list(p->bo_list, &p->validated);
}
INIT_LIST_HEAD(&duplicates);
amdgpu_vm_get_pd_bo(&fpriv->vm, &p->validated, &p->vm_pd);
- if (p->uf.bo)
+ if (p->job->uf.bo)
list_add(&p->uf_entry.tv.head, &p->validated);
if (need_mmap_lock)
down_read(&current->mm->mmap_sem);
- r = ttm_eu_reserve_buffers(&p->ticket, &p->validated, true, &duplicates);
- if (unlikely(r != 0))
- goto error_reserve;
+ while (1) {
+ struct list_head need_pages;
+ unsigned i;
+
+ r = ttm_eu_reserve_buffers(&p->ticket, &p->validated, true,
+ &duplicates);
+ if (unlikely(r != 0))
+ goto error_free_pages;
+
+ /* Without a BO list we don't have userptr BOs */
+ if (!p->bo_list)
+ break;
+
+ INIT_LIST_HEAD(&need_pages);
+ for (i = p->bo_list->first_userptr;
+ i < p->bo_list->num_entries; ++i) {
+
+ e = &p->bo_list->array[i];
+
+ if (amdgpu_ttm_tt_userptr_invalidated(e->robj->tbo.ttm,
+ &e->user_invalidated) && e->user_pages) {
+
+ /* We acquired a page array, but somebody
+ * invalidated it. Free it an try again
+ */
+ release_pages(e->user_pages,
+ e->robj->tbo.ttm->num_pages,
+ false);
+ drm_free_large(e->user_pages);
+ e->user_pages = NULL;
+ }
+
+ if (e->robj->tbo.ttm->state != tt_bound &&
+ !e->user_pages) {
+ list_del(&e->tv.head);
+ list_add(&e->tv.head, &need_pages);
+
+ amdgpu_bo_unreserve(e->robj);
+ }
+ }
+
+ if (list_empty(&need_pages))
+ break;
+
+ /* Unreserve everything again. */
+ ttm_eu_backoff_reservation(&p->ticket, &p->validated);
+
+ /* We tried to often, just abort */
+ if (!--tries) {
+ r = -EDEADLK;
+ goto error_free_pages;
+ }
+
+ /* Fill the page arrays for all useptrs. */
+ list_for_each_entry(e, &need_pages, tv.head) {
+ struct ttm_tt *ttm = e->robj->tbo.ttm;
+
+ e->user_pages = drm_calloc_large(ttm->num_pages,
+ sizeof(struct page*));
+ if (!e->user_pages) {
+ r = -ENOMEM;
+ goto error_free_pages;
+ }
+
+ r = amdgpu_ttm_tt_get_user_pages(ttm, e->user_pages);
+ if (r) {
+ drm_free_large(e->user_pages);
+ e->user_pages = NULL;
+ goto error_free_pages;
+ }
+ }
+
+ /* And try again. */
+ list_splice(&need_pages, &p->validated);
+ }
amdgpu_vm_get_pt_bos(&fpriv->vm, &duplicates);
- r = amdgpu_cs_list_validate(p->adev, &fpriv->vm, &duplicates);
+ p->bytes_moved_threshold = amdgpu_cs_get_threshold_for_moves(p->adev);
+ p->bytes_moved = 0;
+
+ r = amdgpu_cs_list_validate(p, &duplicates);
+ if (r)
+ goto error_validate;
+
+ r = amdgpu_cs_list_validate(p, &p->validated);
if (r)
goto error_validate;
- r = amdgpu_cs_list_validate(p->adev, &fpriv->vm, &p->validated);
+ if (p->bo_list) {
+ struct amdgpu_vm *vm = &fpriv->vm;
+ unsigned i;
+
+ for (i = 0; i < p->bo_list->num_entries; i++) {
+ struct amdgpu_bo *bo = p->bo_list->array[i].robj;
+
+ p->bo_list->array[i].bo_va = amdgpu_vm_bo_find(vm, bo);
+ }
+ }
error_validate:
if (r) {
@@ -433,10 +489,26 @@ error_validate:
ttm_eu_backoff_reservation(&p->ticket, &p->validated);
}
-error_reserve:
+error_free_pages:
+
if (need_mmap_lock)
up_read(&current->mm->mmap_sem);
+ if (p->bo_list) {
+ for (i = p->bo_list->first_userptr;
+ i < p->bo_list->num_entries; ++i) {
+ e = &p->bo_list->array[i];
+
+ if (!e->user_pages)
+ continue;
+
+ release_pages(e->user_pages,
+ e->robj->tbo.ttm->num_pages,
+ false);
+ drm_free_large(e->user_pages);
+ }
+ }
+
return r;
}
@@ -447,7 +519,7 @@ static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p)
list_for_each_entry(e, &p->validated, tv.head) {
struct reservation_object *resv = e->robj->tbo.resv;
- r = amdgpu_sync_resv(p->adev, &p->ibs[0].sync, resv, p->filp);
+ r = amdgpu_sync_resv(p->adev, &p->job->sync, resv, p->filp);
if (r)
return r;
@@ -510,11 +582,8 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, bo
for (i = 0; i < parser->nchunks; i++)
drm_free_large(parser->chunks[i].kdata);
kfree(parser->chunks);
- if (parser->ibs)
- for (i = 0; i < parser->num_ibs; i++)
- amdgpu_ib_free(parser->adev, &parser->ibs[i]);
- kfree(parser->ibs);
- amdgpu_bo_unref(&parser->uf.bo);
+ if (parser->job)
+ amdgpu_job_free(parser->job);
amdgpu_bo_unref(&parser->uf_entry.robj);
}
@@ -530,7 +599,7 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p,
if (r)
return r;
- r = amdgpu_sync_fence(adev, &p->ibs[0].sync, vm->page_directory_fence);
+ r = amdgpu_sync_fence(adev, &p->job->sync, vm->page_directory_fence);
if (r)
return r;
@@ -556,14 +625,14 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p,
return r;
f = bo_va->last_pt_update;
- r = amdgpu_sync_fence(adev, &p->ibs[0].sync, f);
+ r = amdgpu_sync_fence(adev, &p->job->sync, f);
if (r)
return r;
}
}
- r = amdgpu_vm_clear_invalids(adev, vm, &p->ibs[0].sync);
+ r = amdgpu_vm_clear_invalids(adev, vm, &p->job->sync);
if (amdgpu_vm_debug && p->bo_list) {
/* Invalidate all BOs to test for userspace bugs */
@@ -581,29 +650,25 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p,
}
static int amdgpu_cs_ib_vm_chunk(struct amdgpu_device *adev,
- struct amdgpu_cs_parser *parser)
+ struct amdgpu_cs_parser *p)
{
- struct amdgpu_fpriv *fpriv = parser->filp->driver_priv;
+ struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
struct amdgpu_vm *vm = &fpriv->vm;
- struct amdgpu_ring *ring;
+ struct amdgpu_ring *ring = p->job->ring;
int i, r;
- if (parser->num_ibs == 0)
- return 0;
-
/* Only for UVD/VCE VM emulation */
- for (i = 0; i < parser->num_ibs; i++) {
- ring = parser->ibs[i].ring;
- if (ring->funcs->parse_cs) {
- r = amdgpu_ring_parse_cs(ring, parser, i);
+ if (ring->funcs->parse_cs) {
+ for (i = 0; i < p->job->num_ibs; i++) {
+ r = amdgpu_ring_parse_cs(ring, p, i);
if (r)
return r;
}
}
- r = amdgpu_bo_vm_update_pte(parser, vm);
+ r = amdgpu_bo_vm_update_pte(p, vm);
if (!r)
- amdgpu_cs_sync_rings(parser);
+ amdgpu_cs_sync_rings(p);
return r;
}
@@ -626,14 +691,14 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
int i, j;
int r;
- for (i = 0, j = 0; i < parser->nchunks && j < parser->num_ibs; i++) {
+ for (i = 0, j = 0; i < parser->nchunks && j < parser->job->num_ibs; i++) {
struct amdgpu_cs_chunk *chunk;
struct amdgpu_ib *ib;
struct drm_amdgpu_cs_chunk_ib *chunk_ib;
struct amdgpu_ring *ring;
chunk = &parser->chunks[i];
- ib = &parser->ibs[j];
+ ib = &parser->job->ibs[j];
chunk_ib = (struct drm_amdgpu_cs_chunk_ib *)chunk->kdata;
if (chunk->chunk_id != AMDGPU_CHUNK_ID_IB)
@@ -645,6 +710,11 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
if (r)
return r;
+ if (parser->job->ring && parser->job->ring != ring)
+ return -EINVAL;
+
+ parser->job->ring = ring;
+
if (ring->funcs->parse_cs) {
struct amdgpu_bo_va_mapping *m;
struct amdgpu_bo *aobj = NULL;
@@ -673,7 +743,7 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
offset = ((uint64_t)m->it.start) * AMDGPU_GPU_PAGE_SIZE;
kptr += chunk_ib->va_start - offset;
- r = amdgpu_ib_get(ring, NULL, chunk_ib->ib_bytes, ib);
+ r = amdgpu_ib_get(adev, NULL, chunk_ib->ib_bytes, ib);
if (r) {
DRM_ERROR("Failed to get ib !\n");
return r;
@@ -682,7 +752,7 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
memcpy(ib->ptr, kptr, chunk_ib->ib_bytes);
amdgpu_bo_kunmap(aobj);
} else {
- r = amdgpu_ib_get(ring, vm, 0, ib);
+ r = amdgpu_ib_get(adev, vm, 0, ib);
if (r) {
DRM_ERROR("Failed to get ib !\n");
return r;
@@ -697,15 +767,12 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
j++;
}
- if (!parser->num_ibs)
- return 0;
-
/* add GDS resources to first IB */
if (parser->bo_list) {
struct amdgpu_bo *gds = parser->bo_list->gds_obj;
struct amdgpu_bo *gws = parser->bo_list->gws_obj;
struct amdgpu_bo *oa = parser->bo_list->oa_obj;
- struct amdgpu_ib *ib = &parser->ibs[0];
+ struct amdgpu_ib *ib = &parser->job->ibs[0];
if (gds) {
ib->gds_base = amdgpu_bo_gpu_offset(gds);
@@ -721,15 +788,15 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
}
}
/* wrap the last IB with user fence */
- if (parser->uf.bo) {
- struct amdgpu_ib *ib = &parser->ibs[parser->num_ibs - 1];
+ if (parser->job->uf.bo) {
+ struct amdgpu_ib *ib = &parser->job->ibs[parser->job->num_ibs - 1];
/* UVD & VCE fw doesn't support user fences */
- if (ib->ring->type == AMDGPU_RING_TYPE_UVD ||
- ib->ring->type == AMDGPU_RING_TYPE_VCE)
+ if (parser->job->ring->type == AMDGPU_RING_TYPE_UVD ||
+ parser->job->ring->type == AMDGPU_RING_TYPE_VCE)
return -EINVAL;
- ib->user = &parser->uf;
+ ib->user = &parser->job->uf;
}
return 0;
@@ -739,14 +806,8 @@ static int amdgpu_cs_dependencies(struct amdgpu_device *adev,
struct amdgpu_cs_parser *p)
{
struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
- struct amdgpu_ib *ib;
int i, j, r;
- if (!p->num_ibs)
- return 0;
-
- /* Add dependencies to first IB */
- ib = &p->ibs[0];
for (i = 0; i < p->nchunks; ++i) {
struct drm_amdgpu_cs_chunk_dep *deps;
struct amdgpu_cs_chunk *chunk;
@@ -784,7 +845,8 @@ static int amdgpu_cs_dependencies(struct amdgpu_device *adev,
return r;
} else if (fence) {
- r = amdgpu_sync_fence(adev, &ib->sync, fence);
+ r = amdgpu_sync_fence(adev, &p->job->sync,
+ fence);
fence_put(fence);
amdgpu_ctx_put(ctx);
if (r)
@@ -796,15 +858,36 @@ static int amdgpu_cs_dependencies(struct amdgpu_device *adev,
return 0;
}
-static int amdgpu_cs_free_job(struct amdgpu_job *job)
+static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
+ union drm_amdgpu_cs *cs)
{
- int i;
- if (job->ibs)
- for (i = 0; i < job->num_ibs; i++)
- amdgpu_ib_free(job->adev, &job->ibs[i]);
- kfree(job->ibs);
- if (job->uf.bo)
- amdgpu_bo_unref(&job->uf.bo);
+ struct amdgpu_ring *ring = p->job->ring;
+ struct amd_sched_fence *fence;
+ struct amdgpu_job *job;
+
+ job = p->job;
+ p->job = NULL;
+
+ job->base.sched = &ring->sched;
+ job->base.s_entity = &p->ctx->rings[ring->idx].entity;
+ job->owner = p->filp;
+
+ fence = amd_sched_fence_create(job->base.s_entity, p->filp);
+ if (!fence) {
+ amdgpu_job_free(job);
+ return -ENOMEM;
+ }
+
+ job->base.s_fence = fence;
+ p->fence = fence_get(&fence->base);
+
+ cs->out.handle = amdgpu_ctx_add_fence(p->ctx, ring,
+ &fence->base);
+ job->ibs[job->num_ibs - 1].sequence = cs->out.handle;
+
+ trace_amdgpu_cs_ioctl(job);
+ amd_sched_entity_push_job(&job->base);
+
return 0;
}
@@ -829,7 +912,7 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
r = amdgpu_cs_handle_lockup(adev, r);
return r;
}
- r = amdgpu_cs_parser_relocs(&parser);
+ r = amdgpu_cs_parser_bos(&parser, data);
if (r == -ENOMEM)
DRM_ERROR("Not enough memory for command submission!\n");
else if (r && r != -ERESTARTSYS)
@@ -848,68 +931,14 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
if (r)
goto out;
- for (i = 0; i < parser.num_ibs; i++)
+ for (i = 0; i < parser.job->num_ibs; i++)
trace_amdgpu_cs(&parser, i);
r = amdgpu_cs_ib_vm_chunk(adev, &parser);
if (r)
goto out;
- if (amdgpu_enable_scheduler && parser.num_ibs) {
- struct amdgpu_ring * ring = parser.ibs->ring;
- struct amd_sched_fence *fence;
- struct amdgpu_job *job;
-
- job = kzalloc(sizeof(struct amdgpu_job), GFP_KERNEL);
- if (!job) {
- r = -ENOMEM;
- goto out;
- }
-
- job->base.sched = &ring->sched;
- job->base.s_entity = &parser.ctx->rings[ring->idx].entity;
- job->adev = parser.adev;
- job->owner = parser.filp;
- job->free_job = amdgpu_cs_free_job;
-
- job->ibs = parser.ibs;
- job->num_ibs = parser.num_ibs;
- parser.ibs = NULL;
- parser.num_ibs = 0;
-
- if (job->ibs[job->num_ibs - 1].user) {
- job->uf = parser.uf;
- job->ibs[job->num_ibs - 1].user = &job->uf;
- parser.uf.bo = NULL;
- }
-
- fence = amd_sched_fence_create(job->base.s_entity,
- parser.filp);
- if (!fence) {
- r = -ENOMEM;
- amdgpu_cs_free_job(job);
- kfree(job);
- goto out;
- }
- job->base.s_fence = fence;
- parser.fence = fence_get(&fence->base);
-
- cs->out.handle = amdgpu_ctx_add_fence(parser.ctx, ring,
- &fence->base);
- job->ibs[job->num_ibs - 1].sequence = cs->out.handle;
-
- trace_amdgpu_cs_ioctl(job);
- amd_sched_entity_push_job(&job->base);
-
- } else {
- struct amdgpu_fence *fence;
-
- r = amdgpu_ib_schedule(adev, parser.num_ibs, parser.ibs,
- parser.filp);
- fence = parser.ibs[parser.num_ibs - 1].fence;
- parser.fence = fence_get(&fence->base);
- cs->out.handle = parser.ibs[parser.num_ibs - 1].sequence;
- }
+ r = amdgpu_cs_submit(&parser, cs);
out:
amdgpu_cs_parser_fini(&parser, r, reserved_buffers);
@@ -980,30 +1009,36 @@ struct amdgpu_bo_va_mapping *
amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
uint64_t addr, struct amdgpu_bo **bo)
{
- struct amdgpu_bo_list_entry *reloc;
struct amdgpu_bo_va_mapping *mapping;
+ unsigned i;
+
+ if (!parser->bo_list)
+ return NULL;
addr /= AMDGPU_GPU_PAGE_SIZE;
- list_for_each_entry(reloc, &parser->validated, tv.head) {
- if (!reloc->bo_va)
+ for (i = 0; i < parser->bo_list->num_entries; i++) {
+ struct amdgpu_bo_list_entry *lobj;
+
+ lobj = &parser->bo_list->array[i];
+ if (!lobj->bo_va)
continue;
- list_for_each_entry(mapping, &reloc->bo_va->valids, list) {
+ list_for_each_entry(mapping, &lobj->bo_va->valids, list) {
if (mapping->it.start > addr ||
addr > mapping->it.last)
continue;
- *bo = reloc->bo_va->bo;
+ *bo = lobj->bo_va->bo;
return mapping;
}
- list_for_each_entry(mapping, &reloc->bo_va->invalids, list) {
+ list_for_each_entry(mapping, &lobj->bo_va->invalids, list) {
if (mapping->it.start > addr ||
addr > mapping->it.last)
continue;
- *bo = reloc->bo_va->bo;
+ *bo = lobj->bo_va->bo;
return mapping;
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index 17d1fb12128a..17e13621fae9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -25,8 +25,7 @@
#include <drm/drmP.h>
#include "amdgpu.h"
-int amdgpu_ctx_init(struct amdgpu_device *adev, enum amd_sched_priority pri,
- struct amdgpu_ctx *ctx)
+static int amdgpu_ctx_init(struct amdgpu_device *adev, struct amdgpu_ctx *ctx)
{
unsigned i, j;
int r;
@@ -35,44 +34,38 @@ int amdgpu_ctx_init(struct amdgpu_device *adev, enum amd_sched_priority pri,
ctx->adev = adev;
kref_init(&ctx->refcount);
spin_lock_init(&ctx->ring_lock);
- ctx->fences = kzalloc(sizeof(struct fence *) * amdgpu_sched_jobs *
- AMDGPU_MAX_RINGS, GFP_KERNEL);
+ ctx->fences = kcalloc(amdgpu_sched_jobs * AMDGPU_MAX_RINGS,
+ sizeof(struct fence*), GFP_KERNEL);
if (!ctx->fences)
return -ENOMEM;
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
ctx->rings[i].sequence = 1;
- ctx->rings[i].fences = (void *)ctx->fences + sizeof(struct fence *) *
- amdgpu_sched_jobs * i;
+ ctx->rings[i].fences = &ctx->fences[amdgpu_sched_jobs * i];
}
- if (amdgpu_enable_scheduler) {
- /* create context entity for each ring */
- for (i = 0; i < adev->num_rings; i++) {
- struct amd_sched_rq *rq;
- if (pri >= AMD_SCHED_MAX_PRIORITY) {
- kfree(ctx->fences);
- return -EINVAL;
- }
- rq = &adev->rings[i]->sched.sched_rq[pri];
- r = amd_sched_entity_init(&adev->rings[i]->sched,
- &ctx->rings[i].entity,
- rq, amdgpu_sched_jobs);
- if (r)
- break;
- }
-
- if (i < adev->num_rings) {
- for (j = 0; j < i; j++)
- amd_sched_entity_fini(&adev->rings[j]->sched,
- &ctx->rings[j].entity);
- kfree(ctx->fences);
- return r;
- }
+ /* create context entity for each ring */
+ for (i = 0; i < adev->num_rings; i++) {
+ struct amdgpu_ring *ring = adev->rings[i];
+ struct amd_sched_rq *rq;
+
+ rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_NORMAL];
+ r = amd_sched_entity_init(&ring->sched, &ctx->rings[i].entity,
+ rq, amdgpu_sched_jobs);
+ if (r)
+ break;
+ }
+
+ if (i < adev->num_rings) {
+ for (j = 0; j < i; j++)
+ amd_sched_entity_fini(&adev->rings[j]->sched,
+ &ctx->rings[j].entity);
+ kfree(ctx->fences);
+ return r;
}
return 0;
}
-void amdgpu_ctx_fini(struct amdgpu_ctx *ctx)
+static void amdgpu_ctx_fini(struct amdgpu_ctx *ctx)
{
struct amdgpu_device *adev = ctx->adev;
unsigned i, j;
@@ -85,11 +78,9 @@ void amdgpu_ctx_fini(struct amdgpu_ctx *ctx)
fence_put(ctx->rings[i].fences[j]);
kfree(ctx->fences);
- if (amdgpu_enable_scheduler) {
- for (i = 0; i < adev->num_rings; i++)
- amd_sched_entity_fini(&adev->rings[i]->sched,
- &ctx->rings[i].entity);
- }
+ for (i = 0; i < adev->num_rings; i++)
+ amd_sched_entity_fini(&adev->rings[i]->sched,
+ &ctx->rings[i].entity);
}
static int amdgpu_ctx_alloc(struct amdgpu_device *adev,
@@ -112,7 +103,7 @@ static int amdgpu_ctx_alloc(struct amdgpu_device *adev,
return r;
}
*id = (uint32_t)r;
- r = amdgpu_ctx_init(adev, AMD_SCHED_PRIORITY_NORMAL, ctx);
+ r = amdgpu_ctx_init(adev, ctx);
if (r) {
idr_remove(&mgr->ctx_handles, *id);
*id = 0;
@@ -200,18 +191,18 @@ int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
id = args->in.ctx_id;
switch (args->in.op) {
- case AMDGPU_CTX_OP_ALLOC_CTX:
- r = amdgpu_ctx_alloc(adev, fpriv, &id);
- args->out.alloc.ctx_id = id;
- break;
- case AMDGPU_CTX_OP_FREE_CTX:
- r = amdgpu_ctx_free(fpriv, id);
- break;
- case AMDGPU_CTX_OP_QUERY_STATE:
- r = amdgpu_ctx_query(adev, fpriv, id, &args->out);
- break;
- default:
- return -EINVAL;
+ case AMDGPU_CTX_OP_ALLOC_CTX:
+ r = amdgpu_ctx_alloc(adev, fpriv, &id);
+ args->out.alloc.ctx_id = id;
+ break;
+ case AMDGPU_CTX_OP_FREE_CTX:
+ r = amdgpu_ctx_free(fpriv, id);
+ break;
+ case AMDGPU_CTX_OP_QUERY_STATE:
+ r = amdgpu_ctx_query(adev, fpriv, id, &args->out);
+ break;
+ default:
+ return -EINVAL;
}
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 51bfc114584e..2139da773da6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -636,31 +636,6 @@ bool amdgpu_card_posted(struct amdgpu_device *adev)
}
/**
- * amdgpu_boot_test_post_card - check and possibly initialize the hw
- *
- * @adev: amdgpu_device pointer
- *
- * Check if the asic is initialized and if not, attempt to initialize
- * it (all asics).
- * Returns true if initialized or false if not.
- */
-bool amdgpu_boot_test_post_card(struct amdgpu_device *adev)
-{
- if (amdgpu_card_posted(adev))
- return true;
-
- if (adev->bios) {
- DRM_INFO("GPU not posted. posting now...\n");
- if (adev->is_atom_bios)
- amdgpu_atom_asic_init(adev->mode_info.atom_context);
- return true;
- } else {
- dev_err(adev->dev, "Card not posted and no BIOS - ignoring\n");
- return false;
- }
-}
-
-/**
* amdgpu_dummy_page_init - init dummy page used by the driver
*
* @adev: amdgpu_device pointer
@@ -959,12 +934,6 @@ static void amdgpu_check_arguments(struct amdgpu_device *adev)
amdgpu_sched_jobs);
amdgpu_sched_jobs = roundup_pow_of_two(amdgpu_sched_jobs);
}
- /* vramlimit must be a power of two */
- if (!amdgpu_check_pot_argument(amdgpu_vram_limit)) {
- dev_warn(adev->dev, "vram limit (%d) must be a power of 2\n",
- amdgpu_vram_limit);
- amdgpu_vram_limit = 0;
- }
if (amdgpu_gart_size != -1) {
/* gtt size must be power of two and greater or equal to 32M */
@@ -1434,7 +1403,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
adev->mman.buffer_funcs = NULL;
adev->mman.buffer_funcs_ring = NULL;
adev->vm_manager.vm_pte_funcs = NULL;
- adev->vm_manager.vm_pte_funcs_ring = NULL;
+ adev->vm_manager.vm_pte_num_rings = 0;
adev->gart.gart_funcs = NULL;
adev->fence_context = fence_context_alloc(AMDGPU_MAX_RINGS);
@@ -1455,9 +1424,8 @@ int amdgpu_device_init(struct amdgpu_device *adev,
/* mutex initialization are all done here so we
* can recall function without having locking issues */
- mutex_init(&adev->ring_lock);
+ mutex_init(&adev->vm_manager.lock);
atomic_set(&adev->irq.ih.lock, 0);
- mutex_init(&adev->gem.mutex);
mutex_init(&adev->pm.mutex);
mutex_init(&adev->gfx.gpu_clock_mutex);
mutex_init(&adev->srbm_mutex);
@@ -1531,8 +1499,13 @@ int amdgpu_device_init(struct amdgpu_device *adev,
return r;
}
+ /* See if the asic supports SR-IOV */
+ adev->virtualization.supports_sr_iov =
+ amdgpu_atombios_has_gpu_virtualization_table(adev);
+
/* Post card if necessary */
- if (!amdgpu_card_posted(adev)) {
+ if (!amdgpu_card_posted(adev) ||
+ adev->virtualization.supports_sr_iov) {
if (!adev->bios) {
dev_err(adev->dev, "Card not posted and no BIOS - ignoring\n");
return -EINVAL;
@@ -1577,11 +1550,6 @@ int amdgpu_device_init(struct amdgpu_device *adev,
return r;
}
- r = amdgpu_ctx_init(adev, AMD_SCHED_PRIORITY_KERNEL, &adev->kernel_ctx);
- if (r) {
- dev_err(adev->dev, "failed to create kernel context (%d).\n", r);
- return r;
- }
r = amdgpu_ib_ring_tests(adev);
if (r)
DRM_ERROR("ib ring test failed (%d).\n", r);
@@ -1645,7 +1613,6 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
adev->shutdown = true;
/* evict vram memory */
amdgpu_bo_evict_vram(adev);
- amdgpu_ctx_fini(&adev->kernel_ctx);
amdgpu_ib_pool_fini(adev);
amdgpu_fence_driver_fini(adev);
amdgpu_fbdev_fini(adev);
@@ -1894,6 +1861,9 @@ int amdgpu_gpu_reset(struct amdgpu_device *adev)
retry:
r = amdgpu_asic_reset(adev);
+ /* post card */
+ amdgpu_atom_asic_init(adev->mode_info.atom_context);
+
if (!r) {
dev_info(adev->dev, "GPU reset succeeded, trying to resume\n");
r = amdgpu_resume(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index 8297bc319369..3fb405b3a614 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -35,32 +35,30 @@
#include <drm/drm_crtc_helper.h>
#include <drm/drm_edid.h>
-static void amdgpu_flip_wait_fence(struct amdgpu_device *adev,
- struct fence **f)
+static void amdgpu_flip_callback(struct fence *f, struct fence_cb *cb)
{
- struct amdgpu_fence *fence;
- long r;
+ struct amdgpu_flip_work *work =
+ container_of(cb, struct amdgpu_flip_work, cb);
- if (*f == NULL)
- return;
+ fence_put(f);
+ schedule_work(&work->flip_work);
+}
- fence = to_amdgpu_fence(*f);
- if (fence) {
- r = fence_wait(&fence->base, false);
- if (r == -EDEADLK)
- r = amdgpu_gpu_reset(adev);
- } else
- r = fence_wait(*f, false);
+static bool amdgpu_flip_handle_fence(struct amdgpu_flip_work *work,
+ struct fence **f)
+{
+ struct fence *fence= *f;
- if (r)
- DRM_ERROR("failed to wait on page flip fence (%ld)!\n", r);
+ if (fence == NULL)
+ return false;
- /* We continue with the page flip even if we failed to wait on
- * the fence, otherwise the DRM core and userspace will be
- * confused about which BO the CRTC is scanning out
- */
- fence_put(*f);
*f = NULL;
+
+ if (!fence_add_callback(fence, &work->cb, amdgpu_flip_callback))
+ return true;
+
+ fence_put(fence);
+ return false;
}
static void amdgpu_flip_work_func(struct work_struct *__work)
@@ -76,9 +74,12 @@ static void amdgpu_flip_work_func(struct work_struct *__work)
int vpos, hpos, stat, min_udelay = 0;
struct drm_vblank_crtc *vblank = &crtc->dev->vblank[work->crtc_id];
- amdgpu_flip_wait_fence(adev, &work->excl);
+ if (amdgpu_flip_handle_fence(work, &work->excl))
+ return;
+
for (i = 0; i < work->shared_count; ++i)
- amdgpu_flip_wait_fence(adev, &work->shared[i]);
+ if (amdgpu_flip_handle_fence(work, &work->shared[i]))
+ return;
/* We borrow the event spin lock for protecting flip_status */
spin_lock_irqsave(&crtc->dev->event_lock, flags);
@@ -96,7 +97,7 @@ static void amdgpu_flip_work_func(struct work_struct *__work)
* In practice this won't execute very often unless on very fast
* machines because the time window for this to happen is very small.
*/
- while (amdgpuCrtc->enabled && repcnt--) {
+ while (amdgpuCrtc->enabled && --repcnt) {
/* GET_DISTANCE_TO_VBLANKSTART returns distance to real vblank
* start in hpos, and to the "fudged earlier" vblank start in
* vpos.
@@ -112,13 +113,13 @@ static void amdgpu_flip_work_func(struct work_struct *__work)
break;
/* Sleep at least until estimated real start of hw vblank */
- spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
min_udelay = (-hpos + 1) * max(vblank->linedur_ns / 1000, 5);
if (min_udelay > vblank->framedur_ns / 2000) {
/* Don't wait ridiculously long - something is wrong */
repcnt = 0;
break;
}
+ spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
usleep_range(min_udelay, 2 * min_udelay);
spin_lock_irqsave(&crtc->dev->event_lock, flags);
};
@@ -130,12 +131,12 @@ static void amdgpu_flip_work_func(struct work_struct *__work)
vblank->framedur_ns / 1000,
vblank->linedur_ns / 1000, stat, vpos, hpos);
- /* do the flip (mmio) */
- adev->mode_info.funcs->page_flip(adev, work->crtc_id, work->base);
/* set the flip status */
amdgpuCrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
-
spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
+
+ /* Do the flip (mmio) */
+ adev->mode_info.funcs->page_flip(adev, work->crtc_id, work->base);
}
/*
@@ -254,7 +255,7 @@ int amdgpu_crtc_page_flip(struct drm_crtc *crtc,
/* update crtc fb */
crtc->primary->fb = fb;
spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
- queue_work(amdgpu_crtc->pflip_queue, &work->flip_work);
+ amdgpu_flip_work_func(&work->flip_work);
return 0;
vblank_cleanup:
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 9ef1db87cf26..f1e17d60055a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -69,7 +69,6 @@ int amdgpu_dpm = -1;
int amdgpu_smc_load_fw = 1;
int amdgpu_aspm = -1;
int amdgpu_runtime_pm = -1;
-int amdgpu_hard_reset = 0;
unsigned amdgpu_ip_block_mask = 0xffffffff;
int amdgpu_bapm = -1;
int amdgpu_deep_color = 0;
@@ -78,10 +77,8 @@ int amdgpu_vm_block_size = -1;
int amdgpu_vm_fault_stop = 0;
int amdgpu_vm_debug = 0;
int amdgpu_exp_hw_support = 0;
-int amdgpu_enable_scheduler = 1;
int amdgpu_sched_jobs = 32;
int amdgpu_sched_hw_submission = 2;
-int amdgpu_enable_semaphores = 0;
int amdgpu_powerplay = -1;
unsigned amdgpu_pcie_gen_cap = 0;
unsigned amdgpu_pcie_lane_cap = 0;
@@ -128,9 +125,6 @@ module_param_named(aspm, amdgpu_aspm, int, 0444);
MODULE_PARM_DESC(runpm, "PX runtime pm (1 = force enable, 0 = disable, -1 = PX only default)");
module_param_named(runpm, amdgpu_runtime_pm, int, 0444);
-MODULE_PARM_DESC(hard_reset, "PCI config reset (1 = force enable, 0 = disable (default))");
-module_param_named(hard_reset, amdgpu_hard_reset, int, 0444);
-
MODULE_PARM_DESC(ip_block_mask, "IP Block Mask (all blocks enabled (default))");
module_param_named(ip_block_mask, amdgpu_ip_block_mask, uint, 0444);
@@ -155,18 +149,12 @@ module_param_named(vm_debug, amdgpu_vm_debug, int, 0644);
MODULE_PARM_DESC(exp_hw_support, "experimental hw support (1 = enable, 0 = disable (default))");
module_param_named(exp_hw_support, amdgpu_exp_hw_support, int, 0444);
-MODULE_PARM_DESC(enable_scheduler, "enable SW GPU scheduler (1 = enable (default), 0 = disable)");
-module_param_named(enable_scheduler, amdgpu_enable_scheduler, int, 0444);
-
MODULE_PARM_DESC(sched_jobs, "the max number of jobs supported in the sw queue (default 32)");
module_param_named(sched_jobs, amdgpu_sched_jobs, int, 0444);
MODULE_PARM_DESC(sched_hw_submission, "the max number of HW submissions (default 2)");
module_param_named(sched_hw_submission, amdgpu_sched_hw_submission, int, 0444);
-MODULE_PARM_DESC(enable_semaphores, "Enable semaphores (1 = enable, 0 = disable (default))");
-module_param_named(enable_semaphores, amdgpu_enable_semaphores, int, 0644);
-
#ifdef CONFIG_DRM_AMD_POWERPLAY
MODULE_PARM_DESC(powerplay, "Powerplay component (1 = enable, 0 = disable, -1 = auto (default))");
module_param_named(powerplay, amdgpu_powerplay, int, 0444);
@@ -330,6 +318,14 @@ static int amdgpu_pci_probe(struct pci_dev *pdev,
return -ENODEV;
}
+ /*
+ * Initialize amdkfd before starting radeon. If it was not loaded yet,
+ * defer radeon probing
+ */
+ ret = amdgpu_amdkfd_init();
+ if (ret == -EPROBE_DEFER)
+ return ret;
+
/* Get rid of things like offb */
ret = amdgpu_kick_out_firmware_fb(pdev);
if (ret)
@@ -559,6 +555,7 @@ static struct pci_driver amdgpu_kms_pci_driver = {
static int __init amdgpu_init(void)
{
+ amdgpu_sync_init();
#ifdef CONFIG_VGA_CONSOLE
if (vgacon_text_force()) {
DRM_ERROR("VGACON disables amdgpu kernel modesetting.\n");
@@ -572,8 +569,6 @@ static int __init amdgpu_init(void)
driver->num_ioctls = amdgpu_max_kms_ioctl;
amdgpu_register_atpx_handler();
- amdgpu_amdkfd_init();
-
/* let modprobe override vga console setting */
return drm_pci_init(driver, pdriver);
}
@@ -583,6 +578,7 @@ static void __exit amdgpu_exit(void)
amdgpu_amdkfd_fini();
drm_pci_exit(driver, pdriver);
amdgpu_unregister_atpx_handler();
+ amdgpu_sync_fini();
}
module_init(amdgpu_init);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index 3671f9f220bd..d81f1f4883a6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -47,9 +47,30 @@
* that the the relevant GPU caches have been flushed.
*/
+struct amdgpu_fence {
+ struct fence base;
+
+ /* RB, DMA, etc. */
+ struct amdgpu_ring *ring;
+};
+
static struct kmem_cache *amdgpu_fence_slab;
static atomic_t amdgpu_fence_slab_ref = ATOMIC_INIT(0);
+/*
+ * Cast helper
+ */
+static const struct fence_ops amdgpu_fence_ops;
+static inline struct amdgpu_fence *to_amdgpu_fence(struct fence *f)
+{
+ struct amdgpu_fence *__f = container_of(f, struct amdgpu_fence, base);
+
+ if (__f->base.ops == &amdgpu_fence_ops)
+ return __f;
+
+ return NULL;
+}
+
/**
* amdgpu_fence_write - write a fence value
*
@@ -82,7 +103,7 @@ static u32 amdgpu_fence_read(struct amdgpu_ring *ring)
if (drv->cpu_addr)
seq = le32_to_cpu(*drv->cpu_addr);
else
- seq = lower_32_bits(atomic64_read(&drv->last_seq));
+ seq = atomic_read(&drv->last_seq);
return seq;
}
@@ -91,32 +112,45 @@ static u32 amdgpu_fence_read(struct amdgpu_ring *ring)
* amdgpu_fence_emit - emit a fence on the requested ring
*
* @ring: ring the fence is associated with
- * @owner: creator of the fence
- * @fence: amdgpu fence object
+ * @f: resulting fence object
*
* Emits a fence command on the requested ring (all asics).
* Returns 0 on success, -ENOMEM on failure.
*/
-int amdgpu_fence_emit(struct amdgpu_ring *ring, void *owner,
- struct amdgpu_fence **fence)
+int amdgpu_fence_emit(struct amdgpu_ring *ring, struct fence **f)
{
struct amdgpu_device *adev = ring->adev;
+ struct amdgpu_fence *fence;
+ struct fence *old, **ptr;
+ uint32_t seq;
- /* we are protected by the ring emission mutex */
- *fence = kmem_cache_alloc(amdgpu_fence_slab, GFP_KERNEL);
- if ((*fence) == NULL) {
+ fence = kmem_cache_alloc(amdgpu_fence_slab, GFP_KERNEL);
+ if (fence == NULL)
return -ENOMEM;
- }
- (*fence)->seq = ++ring->fence_drv.sync_seq[ring->idx];
- (*fence)->ring = ring;
- (*fence)->owner = owner;
- fence_init(&(*fence)->base, &amdgpu_fence_ops,
- &ring->fence_drv.fence_queue.lock,
- adev->fence_context + ring->idx,
- (*fence)->seq);
+
+ seq = ++ring->fence_drv.sync_seq;
+ fence->ring = ring;
+ fence_init(&fence->base, &amdgpu_fence_ops,
+ &ring->fence_drv.lock,
+ adev->fence_context + ring->idx,
+ seq);
amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
- (*fence)->seq,
- AMDGPU_FENCE_FLAG_INT);
+ seq, AMDGPU_FENCE_FLAG_INT);
+
+ ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask];
+ /* This function can't be called concurrently anyway, otherwise
+ * emitting the fence would mess up the hardware ring buffer.
+ */
+ old = rcu_dereference_protected(*ptr, 1);
+ if (old && !fence_is_signaled(old)) {
+ DRM_INFO("rcu slot is busy\n");
+ fence_wait(old, false);
+ }
+
+ rcu_assign_pointer(*ptr, fence_get(&fence->base));
+
+ *f = &fence->base;
+
return 0;
}
@@ -134,89 +168,48 @@ static void amdgpu_fence_schedule_fallback(struct amdgpu_ring *ring)
}
/**
- * amdgpu_fence_activity - check for fence activity
+ * amdgpu_fence_process - check for fence activity
*
* @ring: pointer to struct amdgpu_ring
*
* Checks the current fence value and calculates the last
- * signalled fence value. Returns true if activity occured
- * on the ring, and the fence_queue should be waken up.
+ * signalled fence value. Wakes the fence queue if the
+ * sequence number has increased.
*/
-static bool amdgpu_fence_activity(struct amdgpu_ring *ring)
+void amdgpu_fence_process(struct amdgpu_ring *ring)
{
- uint64_t seq, last_seq, last_emitted;
- unsigned count_loop = 0;
- bool wake = false;
-
- /* Note there is a scenario here for an infinite loop but it's
- * very unlikely to happen. For it to happen, the current polling
- * process need to be interrupted by another process and another
- * process needs to update the last_seq btw the atomic read and
- * xchg of the current process.
- *
- * More over for this to go in infinite loop there need to be
- * continuously new fence signaled ie amdgpu_fence_read needs
- * to return a different value each time for both the currently
- * polling process and the other process that xchg the last_seq
- * btw atomic read and xchg of the current process. And the
- * value the other process set as last seq must be higher than
- * the seq value we just read. Which means that current process
- * need to be interrupted after amdgpu_fence_read and before
- * atomic xchg.
- *
- * To be even more safe we count the number of time we loop and
- * we bail after 10 loop just accepting the fact that we might
- * have temporarly set the last_seq not to the true real last
- * seq but to an older one.
- */
- last_seq = atomic64_read(&ring->fence_drv.last_seq);
+ struct amdgpu_fence_driver *drv = &ring->fence_drv;
+ uint32_t seq, last_seq;
+ int r;
+
do {
- last_emitted = ring->fence_drv.sync_seq[ring->idx];
+ last_seq = atomic_read(&ring->fence_drv.last_seq);
seq = amdgpu_fence_read(ring);
- seq |= last_seq & 0xffffffff00000000LL;
- if (seq < last_seq) {
- seq &= 0xffffffff;
- seq |= last_emitted & 0xffffffff00000000LL;
- }
- if (seq <= last_seq || seq > last_emitted) {
- break;
- }
- /* If we loop over we don't want to return without
- * checking if a fence is signaled as it means that the
- * seq we just read is different from the previous on.
- */
- wake = true;
- last_seq = seq;
- if ((count_loop++) > 10) {
- /* We looped over too many time leave with the
- * fact that we might have set an older fence
- * seq then the current real last seq as signaled
- * by the hw.
- */
- break;
- }
- } while (atomic64_xchg(&ring->fence_drv.last_seq, seq) > seq);
+ } while (atomic_cmpxchg(&drv->last_seq, last_seq, seq) != last_seq);
- if (seq < last_emitted)
+ if (seq != ring->fence_drv.sync_seq)
amdgpu_fence_schedule_fallback(ring);
- return wake;
-}
+ while (last_seq != seq) {
+ struct fence *fence, **ptr;
-/**
- * amdgpu_fence_process - process a fence
- *
- * @adev: amdgpu_device pointer
- * @ring: ring index the fence is associated with
- *
- * Checks the current fence value and wakes the fence queue
- * if the sequence number has increased (all asics).
- */
-void amdgpu_fence_process(struct amdgpu_ring *ring)
-{
- if (amdgpu_fence_activity(ring))
- wake_up_all(&ring->fence_drv.fence_queue);
+ ptr = &drv->fences[++last_seq & drv->num_fences_mask];
+
+ /* There is always exactly one thread signaling this fence slot */
+ fence = rcu_dereference_protected(*ptr, 1);
+ rcu_assign_pointer(*ptr, NULL);
+
+ BUG_ON(!fence);
+
+ r = fence_signal(fence);
+ if (!r)
+ FENCE_TRACE(fence, "signaled from irq context\n");
+ else
+ BUG();
+
+ fence_put(fence);
+ }
}
/**
@@ -234,83 +227,6 @@ static void amdgpu_fence_fallback(unsigned long arg)
}
/**
- * amdgpu_fence_seq_signaled - check if a fence sequence number has signaled
- *
- * @ring: ring the fence is associated with
- * @seq: sequence number
- *
- * Check if the last signaled fence sequnce number is >= the requested
- * sequence number (all asics).
- * Returns true if the fence has signaled (current fence value
- * is >= requested value) or false if it has not (current fence
- * value is < the requested value. Helper function for
- * amdgpu_fence_signaled().
- */
-static bool amdgpu_fence_seq_signaled(struct amdgpu_ring *ring, u64 seq)
-{
- if (atomic64_read(&ring->fence_drv.last_seq) >= seq)
- return true;
-
- /* poll new last sequence at least once */
- amdgpu_fence_process(ring);
- if (atomic64_read(&ring->fence_drv.last_seq) >= seq)
- return true;
-
- return false;
-}
-
-/*
- * amdgpu_ring_wait_seq_timeout - wait for seq of the specific ring to signal
- * @ring: ring to wait on for the seq number
- * @seq: seq number wait for
- *
- * return value:
- * 0: seq signaled, and gpu not hang
- * -EDEADL: GPU hang detected
- * -EINVAL: some paramter is not valid
- */
-static int amdgpu_fence_ring_wait_seq(struct amdgpu_ring *ring, uint64_t seq)
-{
- bool signaled = false;
-
- BUG_ON(!ring);
- if (seq > ring->fence_drv.sync_seq[ring->idx])
- return -EINVAL;
-
- if (atomic64_read(&ring->fence_drv.last_seq) >= seq)
- return 0;
-
- amdgpu_fence_schedule_fallback(ring);
- wait_event(ring->fence_drv.fence_queue, (
- (signaled = amdgpu_fence_seq_signaled(ring, seq))));
-
- if (signaled)
- return 0;
- else
- return -EDEADLK;
-}
-
-/**
- * amdgpu_fence_wait_next - wait for the next fence to signal
- *
- * @adev: amdgpu device pointer
- * @ring: ring index the fence is associated with
- *
- * Wait for the next fence on the requested ring to signal (all asics).
- * Returns 0 if the next fence has passed, error for all other cases.
- * Caller must hold ring lock.
- */
-int amdgpu_fence_wait_next(struct amdgpu_ring *ring)
-{
- uint64_t seq = atomic64_read(&ring->fence_drv.last_seq) + 1ULL;
-
- if (seq >= ring->fence_drv.sync_seq[ring->idx])
- return -ENOENT;
-
- return amdgpu_fence_ring_wait_seq(ring, seq);
-}
-
-/**
* amdgpu_fence_wait_empty - wait for all fences to signal
*
* @adev: amdgpu device pointer
@@ -318,16 +234,28 @@ int amdgpu_fence_wait_next(struct amdgpu_ring *ring)
*
* Wait for all fences on the requested ring to signal (all asics).
* Returns 0 if the fences have passed, error for all other cases.
- * Caller must hold ring lock.
*/
int amdgpu_fence_wait_empty(struct amdgpu_ring *ring)
{
- uint64_t seq = ring->fence_drv.sync_seq[ring->idx];
+ uint64_t seq = ACCESS_ONCE(ring->fence_drv.sync_seq);
+ struct fence *fence, **ptr;
+ int r;
if (!seq)
return 0;
- return amdgpu_fence_ring_wait_seq(ring, seq);
+ ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask];
+ rcu_read_lock();
+ fence = rcu_dereference(*ptr);
+ if (!fence || !fence_get_rcu(fence)) {
+ rcu_read_unlock();
+ return 0;
+ }
+ rcu_read_unlock();
+
+ r = fence_wait(fence, false);
+ fence_put(fence);
+ return r;
}
/**
@@ -347,75 +275,10 @@ unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring)
* but it's ok to report slightly wrong fence count here.
*/
amdgpu_fence_process(ring);
- emitted = ring->fence_drv.sync_seq[ring->idx]
- - atomic64_read(&ring->fence_drv.last_seq);
- /* to avoid 32bits warp around */
- if (emitted > 0x10000000)
- emitted = 0x10000000;
-
- return (unsigned)emitted;
-}
-
-/**
- * amdgpu_fence_need_sync - do we need a semaphore
- *
- * @fence: amdgpu fence object
- * @dst_ring: which ring to check against
- *
- * Check if the fence needs to be synced against another ring
- * (all asics). If so, we need to emit a semaphore.
- * Returns true if we need to sync with another ring, false if
- * not.
- */
-bool amdgpu_fence_need_sync(struct amdgpu_fence *fence,
- struct amdgpu_ring *dst_ring)
-{
- struct amdgpu_fence_driver *fdrv;
-
- if (!fence)
- return false;
-
- if (fence->ring == dst_ring)
- return false;
-
- /* we are protected by the ring mutex */
- fdrv = &dst_ring->fence_drv;
- if (fence->seq <= fdrv->sync_seq[fence->ring->idx])
- return false;
-
- return true;
-}
-
-/**
- * amdgpu_fence_note_sync - record the sync point
- *
- * @fence: amdgpu fence object
- * @dst_ring: which ring to check against
- *
- * Note the sequence number at which point the fence will
- * be synced with the requested ring (all asics).
- */
-void amdgpu_fence_note_sync(struct amdgpu_fence *fence,
- struct amdgpu_ring *dst_ring)
-{
- struct amdgpu_fence_driver *dst, *src;
- unsigned i;
-
- if (!fence)
- return;
-
- if (fence->ring == dst_ring)
- return;
-
- /* we are protected by the ring mutex */
- src = &fence->ring->fence_drv;
- dst = &dst_ring->fence_drv;
- for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
- if (i == dst_ring->idx)
- continue;
-
- dst->sync_seq[i] = max(dst->sync_seq[i], src->sync_seq[i]);
- }
+ emitted = 0x100000000ull;
+ emitted -= atomic_read(&ring->fence_drv.last_seq);
+ emitted += ACCESS_ONCE(ring->fence_drv.sync_seq);
+ return lower_32_bits(emitted);
}
/**
@@ -447,7 +310,7 @@ int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
ring->fence_drv.cpu_addr = adev->uvd.cpu_addr + index;
ring->fence_drv.gpu_addr = adev->uvd.gpu_addr + index;
}
- amdgpu_fence_write(ring, atomic64_read(&ring->fence_drv.last_seq));
+ amdgpu_fence_write(ring, atomic_read(&ring->fence_drv.last_seq));
amdgpu_irq_get(adev, irq_src, irq_type);
ring->fence_drv.irq_src = irq_src;
@@ -465,47 +328,55 @@ int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
* for the requested ring.
*
* @ring: ring to init the fence driver on
+ * @num_hw_submission: number of entries on the hardware queue
*
* Init the fence driver for the requested ring (all asics).
* Helper function for amdgpu_fence_driver_init().
*/
-int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring)
+int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
+ unsigned num_hw_submission)
{
- int i, r;
+ long timeout;
+ int r;
+
+ /* Check that num_hw_submission is a power of two */
+ if ((num_hw_submission & (num_hw_submission - 1)) != 0)
+ return -EINVAL;
ring->fence_drv.cpu_addr = NULL;
ring->fence_drv.gpu_addr = 0;
- for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
- ring->fence_drv.sync_seq[i] = 0;
-
- atomic64_set(&ring->fence_drv.last_seq, 0);
+ ring->fence_drv.sync_seq = 0;
+ atomic_set(&ring->fence_drv.last_seq, 0);
ring->fence_drv.initialized = false;
setup_timer(&ring->fence_drv.fallback_timer, amdgpu_fence_fallback,
(unsigned long)ring);
- init_waitqueue_head(&ring->fence_drv.fence_queue);
-
- if (amdgpu_enable_scheduler) {
- long timeout = msecs_to_jiffies(amdgpu_lockup_timeout);
- if (timeout == 0) {
- /*
- * FIXME:
- * Delayed workqueue cannot use it directly,
- * so the scheduler will not use delayed workqueue if
- * MAX_SCHEDULE_TIMEOUT is set.
- * Currently keep it simple and silly.
- */
- timeout = MAX_SCHEDULE_TIMEOUT;
- }
- r = amd_sched_init(&ring->sched, &amdgpu_sched_ops,
- amdgpu_sched_hw_submission,
- timeout, ring->name);
- if (r) {
- DRM_ERROR("Failed to create scheduler on ring %s.\n",
- ring->name);
- return r;
- }
+ ring->fence_drv.num_fences_mask = num_hw_submission - 1;
+ spin_lock_init(&ring->fence_drv.lock);
+ ring->fence_drv.fences = kcalloc(num_hw_submission, sizeof(void *),
+ GFP_KERNEL);
+ if (!ring->fence_drv.fences)
+ return -ENOMEM;
+
+ timeout = msecs_to_jiffies(amdgpu_lockup_timeout);
+ if (timeout == 0) {
+ /*
+ * FIXME:
+ * Delayed workqueue cannot use it directly,
+ * so the scheduler will not use delayed workqueue if
+ * MAX_SCHEDULE_TIMEOUT is set.
+ * Currently keep it simple and silly.
+ */
+ timeout = MAX_SCHEDULE_TIMEOUT;
+ }
+ r = amd_sched_init(&ring->sched, &amdgpu_sched_ops,
+ num_hw_submission,
+ timeout, ring->name);
+ if (r) {
+ DRM_ERROR("Failed to create scheduler on ring %s.\n",
+ ring->name);
+ return r;
}
return 0;
@@ -548,11 +419,9 @@ int amdgpu_fence_driver_init(struct amdgpu_device *adev)
*/
void amdgpu_fence_driver_fini(struct amdgpu_device *adev)
{
- int i, r;
+ unsigned i, j;
+ int r;
- if (atomic_dec_and_test(&amdgpu_fence_slab_ref))
- kmem_cache_destroy(amdgpu_fence_slab);
- mutex_lock(&adev->ring_lock);
for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
struct amdgpu_ring *ring = adev->rings[i];
@@ -563,14 +432,18 @@ void amdgpu_fence_driver_fini(struct amdgpu_device *adev)
/* no need to trigger GPU reset as we are unloading */
amdgpu_fence_driver_force_completion(adev);
}
- wake_up_all(&ring->fence_drv.fence_queue);
amdgpu_irq_put(adev, ring->fence_drv.irq_src,
ring->fence_drv.irq_type);
amd_sched_fini(&ring->sched);
del_timer_sync(&ring->fence_drv.fallback_timer);
+ for (j = 0; j <= ring->fence_drv.num_fences_mask; ++j)
+ fence_put(ring->fence_drv.fences[i]);
+ kfree(ring->fence_drv.fences);
ring->fence_drv.initialized = false;
}
- mutex_unlock(&adev->ring_lock);
+
+ if (atomic_dec_and_test(&amdgpu_fence_slab_ref))
+ kmem_cache_destroy(amdgpu_fence_slab);
}
/**
@@ -585,7 +458,6 @@ void amdgpu_fence_driver_suspend(struct amdgpu_device *adev)
{
int i, r;
- mutex_lock(&adev->ring_lock);
for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
struct amdgpu_ring *ring = adev->rings[i];
if (!ring || !ring->fence_drv.initialized)
@@ -602,7 +474,6 @@ void amdgpu_fence_driver_suspend(struct amdgpu_device *adev)
amdgpu_irq_put(adev, ring->fence_drv.irq_src,
ring->fence_drv.irq_type);
}
- mutex_unlock(&adev->ring_lock);
}
/**
@@ -621,7 +492,6 @@ void amdgpu_fence_driver_resume(struct amdgpu_device *adev)
{
int i;
- mutex_lock(&adev->ring_lock);
for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
struct amdgpu_ring *ring = adev->rings[i];
if (!ring || !ring->fence_drv.initialized)
@@ -631,7 +501,6 @@ void amdgpu_fence_driver_resume(struct amdgpu_device *adev)
amdgpu_irq_get(adev, ring->fence_drv.irq_src,
ring->fence_drv.irq_type);
}
- mutex_unlock(&adev->ring_lock);
}
/**
@@ -651,7 +520,7 @@ void amdgpu_fence_driver_force_completion(struct amdgpu_device *adev)
if (!ring || !ring->fence_drv.initialized)
continue;
- amdgpu_fence_write(ring, ring->fence_drv.sync_seq[i]);
+ amdgpu_fence_write(ring, ring->fence_drv.sync_seq);
}
}
@@ -671,103 +540,57 @@ static const char *amdgpu_fence_get_timeline_name(struct fence *f)
}
/**
- * amdgpu_fence_is_signaled - test if fence is signaled
- *
- * @f: fence to test
+ * amdgpu_fence_enable_signaling - enable signalling on fence
+ * @fence: fence
*
- * Test the fence sequence number if it is already signaled. If it isn't
- * signaled start fence processing. Returns True if the fence is signaled.
+ * This function is called with fence_queue lock held, and adds a callback
+ * to fence_queue that checks if this fence is signaled, and if so it
+ * signals the fence and removes itself.
*/
-static bool amdgpu_fence_is_signaled(struct fence *f)
+static bool amdgpu_fence_enable_signaling(struct fence *f)
{
struct amdgpu_fence *fence = to_amdgpu_fence(f);
struct amdgpu_ring *ring = fence->ring;
- if (atomic64_read(&ring->fence_drv.last_seq) >= fence->seq)
- return true;
-
- amdgpu_fence_process(ring);
+ if (!timer_pending(&ring->fence_drv.fallback_timer))
+ amdgpu_fence_schedule_fallback(ring);
- if (atomic64_read(&ring->fence_drv.last_seq) >= fence->seq)
- return true;
+ FENCE_TRACE(&fence->base, "armed on ring %i!\n", ring->idx);
- return false;
+ return true;
}
/**
- * amdgpu_fence_check_signaled - callback from fence_queue
+ * amdgpu_fence_free - free up the fence memory
+ *
+ * @rcu: RCU callback head
*
- * this function is called with fence_queue lock held, which is also used
- * for the fence locking itself, so unlocked variants are used for
- * fence_signal, and remove_wait_queue.
+ * Free up the fence memory after the RCU grace period.
*/
-static int amdgpu_fence_check_signaled(wait_queue_t *wait, unsigned mode, int flags, void *key)
+static void amdgpu_fence_free(struct rcu_head *rcu)
{
- struct amdgpu_fence *fence;
- struct amdgpu_device *adev;
- u64 seq;
- int ret;
-
- fence = container_of(wait, struct amdgpu_fence, fence_wake);
- adev = fence->ring->adev;
-
- /*
- * We cannot use amdgpu_fence_process here because we're already
- * in the waitqueue, in a call from wake_up_all.
- */
- seq = atomic64_read(&fence->ring->fence_drv.last_seq);
- if (seq >= fence->seq) {
- ret = fence_signal_locked(&fence->base);
- if (!ret)
- FENCE_TRACE(&fence->base, "signaled from irq context\n");
- else
- FENCE_TRACE(&fence->base, "was already signaled\n");
-
- __remove_wait_queue(&fence->ring->fence_drv.fence_queue, &fence->fence_wake);
- fence_put(&fence->base);
- } else
- FENCE_TRACE(&fence->base, "pending\n");
- return 0;
+ struct fence *f = container_of(rcu, struct fence, rcu);
+ struct amdgpu_fence *fence = to_amdgpu_fence(f);
+ kmem_cache_free(amdgpu_fence_slab, fence);
}
/**
- * amdgpu_fence_enable_signaling - enable signalling on fence
+ * amdgpu_fence_release - callback that fence can be freed
+ *
* @fence: fence
*
- * This function is called with fence_queue lock held, and adds a callback
- * to fence_queue that checks if this fence is signaled, and if so it
- * signals the fence and removes itself.
+ * This function is called when the reference count becomes zero.
+ * It just RCU schedules freeing up the fence.
*/
-static bool amdgpu_fence_enable_signaling(struct fence *f)
-{
- struct amdgpu_fence *fence = to_amdgpu_fence(f);
- struct amdgpu_ring *ring = fence->ring;
-
- if (atomic64_read(&ring->fence_drv.last_seq) >= fence->seq)
- return false;
-
- fence->fence_wake.flags = 0;
- fence->fence_wake.private = NULL;
- fence->fence_wake.func = amdgpu_fence_check_signaled;
- __add_wait_queue(&ring->fence_drv.fence_queue, &fence->fence_wake);
- fence_get(f);
- if (!timer_pending(&ring->fence_drv.fallback_timer))
- amdgpu_fence_schedule_fallback(ring);
- FENCE_TRACE(&fence->base, "armed on ring %i!\n", ring->idx);
- return true;
-}
-
static void amdgpu_fence_release(struct fence *f)
{
- struct amdgpu_fence *fence = to_amdgpu_fence(f);
- kmem_cache_free(amdgpu_fence_slab, fence);
+ call_rcu(&f->rcu, amdgpu_fence_free);
}
-const struct fence_ops amdgpu_fence_ops = {
+static const struct fence_ops amdgpu_fence_ops = {
.get_driver_name = amdgpu_fence_get_driver_name,
.get_timeline_name = amdgpu_fence_get_timeline_name,
.enable_signaling = amdgpu_fence_enable_signaling,
- .signaled = amdgpu_fence_is_signaled,
.wait = fence_default_wait,
.release = amdgpu_fence_release,
};
@@ -781,7 +604,7 @@ static int amdgpu_debugfs_fence_info(struct seq_file *m, void *data)
struct drm_info_node *node = (struct drm_info_node *)m->private;
struct drm_device *dev = node->minor->dev;
struct amdgpu_device *adev = dev->dev_private;
- int i, j;
+ int i;
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
struct amdgpu_ring *ring = adev->rings[i];
@@ -791,31 +614,41 @@ static int amdgpu_debugfs_fence_info(struct seq_file *m, void *data)
amdgpu_fence_process(ring);
seq_printf(m, "--- ring %d (%s) ---\n", i, ring->name);
- seq_printf(m, "Last signaled fence 0x%016llx\n",
- (unsigned long long)atomic64_read(&ring->fence_drv.last_seq));
- seq_printf(m, "Last emitted 0x%016llx\n",
- ring->fence_drv.sync_seq[i]);
-
- for (j = 0; j < AMDGPU_MAX_RINGS; ++j) {
- struct amdgpu_ring *other = adev->rings[j];
- if (i != j && other && other->fence_drv.initialized &&
- ring->fence_drv.sync_seq[j])
- seq_printf(m, "Last sync to ring %d 0x%016llx\n",
- j, ring->fence_drv.sync_seq[j]);
- }
+ seq_printf(m, "Last signaled fence 0x%08x\n",
+ atomic_read(&ring->fence_drv.last_seq));
+ seq_printf(m, "Last emitted 0x%08x\n",
+ ring->fence_drv.sync_seq);
}
return 0;
}
+/**
+ * amdgpu_debugfs_gpu_reset - manually trigger a gpu reset
+ *
+ * Manually trigger a gpu reset at the next fence wait.
+ */
+static int amdgpu_debugfs_gpu_reset(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct amdgpu_device *adev = dev->dev_private;
+
+ seq_printf(m, "gpu reset\n");
+ amdgpu_gpu_reset(adev);
+
+ return 0;
+}
+
static struct drm_info_list amdgpu_debugfs_fence_list[] = {
{"amdgpu_fence_info", &amdgpu_debugfs_fence_info, 0, NULL},
+ {"amdgpu_gpu_reset", &amdgpu_debugfs_gpu_reset, 0, NULL}
};
#endif
int amdgpu_debugfs_fence_init(struct amdgpu_device *adev)
{
#if defined(CONFIG_DEBUG_FS)
- return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_fence_list, 1);
+ return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_fence_list, 2);
#else
return 0;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index d20c2a8929cb..fa6a27bff298 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -26,6 +26,7 @@
* Jerome Glisse
*/
#include <linux/ktime.h>
+#include <linux/pagemap.h>
#include <drm/drmP.h>
#include <drm/amdgpu_drm.h>
#include "amdgpu.h"
@@ -83,24 +84,32 @@ retry:
return r;
}
*obj = &robj->gem_base;
- robj->pid = task_pid_nr(current);
-
- mutex_lock(&adev->gem.mutex);
- list_add_tail(&robj->list, &adev->gem.objects);
- mutex_unlock(&adev->gem.mutex);
return 0;
}
-int amdgpu_gem_init(struct amdgpu_device *adev)
+void amdgpu_gem_force_release(struct amdgpu_device *adev)
{
- INIT_LIST_HEAD(&adev->gem.objects);
- return 0;
-}
+ struct drm_device *ddev = adev->ddev;
+ struct drm_file *file;
-void amdgpu_gem_fini(struct amdgpu_device *adev)
-{
- amdgpu_bo_force_delete(adev);
+ mutex_lock(&ddev->struct_mutex);
+
+ list_for_each_entry(file, &ddev->filelist, lhead) {
+ struct drm_gem_object *gobj;
+ int handle;
+
+ WARN_ONCE(1, "Still active user space clients!\n");
+ spin_lock(&file->table_lock);
+ idr_for_each_entry(&file->object_idr, gobj, handle) {
+ WARN_ONCE(1, "And also active allocations!\n");
+ drm_gem_object_unreference(gobj);
+ }
+ idr_destroy(&file->object_idr);
+ spin_unlock(&file->table_lock);
+ }
+
+ mutex_unlock(&ddev->struct_mutex);
}
/*
@@ -132,25 +141,40 @@ int amdgpu_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_pri
void amdgpu_gem_object_close(struct drm_gem_object *obj,
struct drm_file *file_priv)
{
- struct amdgpu_bo *rbo = gem_to_amdgpu_bo(obj);
- struct amdgpu_device *adev = rbo->adev;
+ struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
+ struct amdgpu_device *adev = bo->adev;
struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
struct amdgpu_vm *vm = &fpriv->vm;
+
+ struct amdgpu_bo_list_entry vm_pd;
+ struct list_head list, duplicates;
+ struct ttm_validate_buffer tv;
+ struct ww_acquire_ctx ticket;
struct amdgpu_bo_va *bo_va;
int r;
- r = amdgpu_bo_reserve(rbo, true);
+
+ INIT_LIST_HEAD(&list);
+ INIT_LIST_HEAD(&duplicates);
+
+ tv.bo = &bo->tbo;
+ tv.shared = true;
+ list_add(&tv.head, &list);
+
+ amdgpu_vm_get_pd_bo(vm, &list, &vm_pd);
+
+ r = ttm_eu_reserve_buffers(&ticket, &list, false, &duplicates);
if (r) {
dev_err(adev->dev, "leaking bo va because "
"we fail to reserve bo (%d)\n", r);
return;
}
- bo_va = amdgpu_vm_bo_find(vm, rbo);
+ bo_va = amdgpu_vm_bo_find(vm, bo);
if (bo_va) {
if (--bo_va->ref_count == 0) {
amdgpu_vm_bo_rmv(adev, bo_va);
}
}
- amdgpu_bo_unreserve(rbo);
+ ttm_eu_backoff_reservation(&ticket, &list);
}
static int amdgpu_gem_handle_lockup(struct amdgpu_device *adev, int r)
@@ -235,12 +259,10 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
AMDGPU_GEM_USERPTR_REGISTER))
return -EINVAL;
- if (!(args->flags & AMDGPU_GEM_USERPTR_READONLY) && (
- !(args->flags & AMDGPU_GEM_USERPTR_ANONONLY) ||
- !(args->flags & AMDGPU_GEM_USERPTR_REGISTER))) {
+ if (!(args->flags & AMDGPU_GEM_USERPTR_READONLY) &&
+ !(args->flags & AMDGPU_GEM_USERPTR_REGISTER)) {
- /* if we want to write to it we must require anonymous
- memory and install a MMU notifier */
+ /* if we want to write to it we must install a MMU notifier */
return -EACCES;
}
@@ -252,6 +274,8 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
goto handle_lockup;
bo = gem_to_amdgpu_bo(gobj);
+ bo->prefered_domains = AMDGPU_GEM_DOMAIN_GTT;
+ bo->allowed_domains = AMDGPU_GEM_DOMAIN_GTT;
r = amdgpu_ttm_tt_set_userptr(bo->tbo.ttm, args->addr, args->flags);
if (r)
goto release_object;
@@ -264,18 +288,23 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE) {
down_read(&current->mm->mmap_sem);
+
+ r = amdgpu_ttm_tt_get_user_pages(bo->tbo.ttm,
+ bo->tbo.ttm->pages);
+ if (r)
+ goto unlock_mmap_sem;
+
r = amdgpu_bo_reserve(bo, true);
- if (r) {
- up_read(&current->mm->mmap_sem);
- goto release_object;
- }
+ if (r)
+ goto free_pages;
amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
amdgpu_bo_unreserve(bo);
- up_read(&current->mm->mmap_sem);
if (r)
- goto release_object;
+ goto free_pages;
+
+ up_read(&current->mm->mmap_sem);
}
r = drm_gem_handle_create(filp, gobj, &handle);
@@ -287,6 +316,12 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
args->handle = handle;
return 0;
+free_pages:
+ release_pages(bo->tbo.ttm->pages, bo->tbo.ttm->num_pages, false);
+
+unlock_mmap_sem:
+ up_read(&current->mm->mmap_sem);
+
release_object:
drm_gem_object_unreference_unlocked(gobj);
@@ -308,7 +343,7 @@ int amdgpu_mode_dumb_mmap(struct drm_file *filp,
return -ENOENT;
}
robj = gem_to_amdgpu_bo(gobj);
- if (amdgpu_ttm_tt_has_userptr(robj->tbo.ttm) ||
+ if (amdgpu_ttm_tt_get_usermm(robj->tbo.ttm) ||
(robj->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)) {
drm_gem_object_unreference_unlocked(gobj);
return -EPERM;
@@ -559,11 +594,10 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
tv.shared = true;
list_add(&tv.head, &list);
- if (args->operation == AMDGPU_VA_OP_MAP) {
- tv_pd.bo = &fpriv->vm.page_directory->tbo;
- tv_pd.shared = true;
- list_add(&tv_pd.head, &list);
- }
+ tv_pd.bo = &fpriv->vm.page_directory->tbo;
+ tv_pd.shared = true;
+ list_add(&tv_pd.head, &list);
+
r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates);
if (r) {
drm_gem_object_unreference_unlocked(gobj);
@@ -629,7 +663,7 @@ int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
info.bo_size = robj->gem_base.size;
info.alignment = robj->tbo.mem.page_alignment << PAGE_SHIFT;
- info.domains = robj->initial_domain;
+ info.domains = robj->prefered_domains;
info.domain_flags = robj->flags;
amdgpu_bo_unreserve(robj);
if (copy_to_user(out, &info, sizeof(info)))
@@ -637,14 +671,18 @@ int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
break;
}
case AMDGPU_GEM_OP_SET_PLACEMENT:
- if (amdgpu_ttm_tt_has_userptr(robj->tbo.ttm)) {
+ if (amdgpu_ttm_tt_get_usermm(robj->tbo.ttm)) {
r = -EPERM;
amdgpu_bo_unreserve(robj);
break;
}
- robj->initial_domain = args->value & (AMDGPU_GEM_DOMAIN_VRAM |
- AMDGPU_GEM_DOMAIN_GTT |
- AMDGPU_GEM_DOMAIN_CPU);
+ robj->prefered_domains = args->value & (AMDGPU_GEM_DOMAIN_VRAM |
+ AMDGPU_GEM_DOMAIN_GTT |
+ AMDGPU_GEM_DOMAIN_CPU);
+ robj->allowed_domains = robj->prefered_domains;
+ if (robj->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM)
+ robj->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT;
+
amdgpu_bo_unreserve(robj);
break;
default:
@@ -689,38 +727,73 @@ int amdgpu_mode_dumb_create(struct drm_file *file_priv,
}
#if defined(CONFIG_DEBUG_FS)
+static int amdgpu_debugfs_gem_bo_info(int id, void *ptr, void *data)
+{
+ struct drm_gem_object *gobj = ptr;
+ struct amdgpu_bo *bo = gem_to_amdgpu_bo(gobj);
+ struct seq_file *m = data;
+
+ unsigned domain;
+ const char *placement;
+ unsigned pin_count;
+
+ domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
+ switch (domain) {
+ case AMDGPU_GEM_DOMAIN_VRAM:
+ placement = "VRAM";
+ break;
+ case AMDGPU_GEM_DOMAIN_GTT:
+ placement = " GTT";
+ break;
+ case AMDGPU_GEM_DOMAIN_CPU:
+ default:
+ placement = " CPU";
+ break;
+ }
+ seq_printf(m, "\t0x%08x: %12ld byte %s @ 0x%010Lx",
+ id, amdgpu_bo_size(bo), placement,
+ amdgpu_bo_gpu_offset(bo));
+
+ pin_count = ACCESS_ONCE(bo->pin_count);
+ if (pin_count)
+ seq_printf(m, " pin count %d", pin_count);
+ seq_printf(m, "\n");
+
+ return 0;
+}
+
static int amdgpu_debugfs_gem_info(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *)m->private;
struct drm_device *dev = node->minor->dev;
- struct amdgpu_device *adev = dev->dev_private;
- struct amdgpu_bo *rbo;
- unsigned i = 0;
+ struct drm_file *file;
+ int r;
+
+ r = mutex_lock_interruptible(&dev->struct_mutex);
+ if (r)
+ return r;
- mutex_lock(&adev->gem.mutex);
- list_for_each_entry(rbo, &adev->gem.objects, list) {
- unsigned domain;
- const char *placement;
+ list_for_each_entry(file, &dev->filelist, lhead) {
+ struct task_struct *task;
- domain = amdgpu_mem_type_to_domain(rbo->tbo.mem.mem_type);
- switch (domain) {
- case AMDGPU_GEM_DOMAIN_VRAM:
- placement = "VRAM";
- break;
- case AMDGPU_GEM_DOMAIN_GTT:
- placement = " GTT";
- break;
- case AMDGPU_GEM_DOMAIN_CPU:
- default:
- placement = " CPU";
- break;
- }
- seq_printf(m, "bo[0x%08x] %8ldkB %8ldMB %s pid %8ld\n",
- i, amdgpu_bo_size(rbo) >> 10, amdgpu_bo_size(rbo) >> 20,
- placement, (unsigned long)rbo->pid);
- i++;
+ /*
+ * Although we have a valid reference on file->pid, that does
+ * not guarantee that the task_struct who called get_pid() is
+ * still alive (e.g. get_pid(current) => fork() => exit()).
+ * Therefore, we need to protect this ->comm access using RCU.
+ */
+ rcu_read_lock();
+ task = pid_task(file->pid, PIDTYPE_PID);
+ seq_printf(m, "pid %8d command %s:\n", pid_nr(file->pid),
+ task ? task->comm : "<unknown>");
+ rcu_read_unlock();
+
+ spin_lock(&file->table_lock);
+ idr_for_each(&file->object_idr, amdgpu_debugfs_gem_bo_info, m);
+ spin_unlock(&file->table_lock);
}
- mutex_unlock(&adev->gem.mutex);
+
+ mutex_unlock(&dev->struct_mutex);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
index 9e25edafa721..8443cea6821a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
@@ -55,10 +55,9 @@ static int amdgpu_debugfs_sa_init(struct amdgpu_device *adev);
* suballocator.
* Returns 0 on success, error on failure.
*/
-int amdgpu_ib_get(struct amdgpu_ring *ring, struct amdgpu_vm *vm,
+int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm,
unsigned size, struct amdgpu_ib *ib)
{
- struct amdgpu_device *adev = ring->adev;
int r;
if (size) {
@@ -75,10 +74,8 @@ int amdgpu_ib_get(struct amdgpu_ring *ring, struct amdgpu_vm *vm,
ib->gpu_addr = amdgpu_sa_bo_gpu_addr(ib->sa_bo);
}
- amdgpu_sync_create(&ib->sync);
-
- ib->ring = ring;
ib->vm = vm;
+ ib->vm_id = 0;
return 0;
}
@@ -88,15 +85,13 @@ int amdgpu_ib_get(struct amdgpu_ring *ring, struct amdgpu_vm *vm,
*
* @adev: amdgpu_device pointer
* @ib: IB object to free
+ * @f: the fence SA bo need wait on for the ib alloation
*
* Free an IB (all asics).
*/
-void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib)
+void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib, struct fence *f)
{
- amdgpu_sync_free(adev, &ib->sync, &ib->fence->base);
- amdgpu_sa_bo_free(adev, &ib->sa_bo, &ib->fence->base);
- if (ib->fence)
- fence_put(&ib->fence->base);
+ amdgpu_sa_bo_free(adev, &ib->sa_bo, f);
}
/**
@@ -105,7 +100,7 @@ void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib)
* @adev: amdgpu_device pointer
* @num_ibs: number of IBs to schedule
* @ibs: IB objects to schedule
- * @owner: owner for creating the fences
+ * @f: fence created during this submission
*
* Schedule an IB on the associated ring (all asics).
* Returns 0 on success, error on failure.
@@ -120,20 +115,21 @@ void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib)
* a CONST_IB), it will be put on the ring prior to the DE IB. Prior
* to SI there was just a DE IB.
*/
-int amdgpu_ib_schedule(struct amdgpu_device *adev, unsigned num_ibs,
- struct amdgpu_ib *ibs, void *owner)
+int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
+ struct amdgpu_ib *ibs, struct fence *last_vm_update,
+ struct fence **f)
{
+ struct amdgpu_device *adev = ring->adev;
struct amdgpu_ib *ib = &ibs[0];
- struct amdgpu_ring *ring;
struct amdgpu_ctx *ctx, *old_ctx;
struct amdgpu_vm *vm;
+ struct fence *hwf;
unsigned i;
int r = 0;
if (num_ibs == 0)
return -EINVAL;
- ring = ibs->ring;
ctx = ibs->ctx;
vm = ibs->vm;
@@ -141,42 +137,24 @@ int amdgpu_ib_schedule(struct amdgpu_device *adev, unsigned num_ibs,
dev_err(adev->dev, "couldn't schedule ib\n");
return -EINVAL;
}
- r = amdgpu_sync_wait(&ibs->sync);
- if (r) {
- dev_err(adev->dev, "IB sync failed (%d).\n", r);
- return r;
- }
- r = amdgpu_ring_lock(ring, (256 + AMDGPU_NUM_SYNCS * 8) * num_ibs);
- if (r) {
- dev_err(adev->dev, "scheduling IB failed (%d).\n", r);
- return r;
- }
- if (vm) {
- /* grab a vm id if necessary */
- r = amdgpu_vm_grab_id(ibs->vm, ibs->ring, &ibs->sync);
- if (r) {
- amdgpu_ring_unlock_undo(ring);
- return r;
- }
+ if (vm && !ibs->vm_id) {
+ dev_err(adev->dev, "VM IB without ID\n");
+ return -EINVAL;
}
- r = amdgpu_sync_rings(&ibs->sync, ring);
+ r = amdgpu_ring_alloc(ring, 256 * num_ibs);
if (r) {
- amdgpu_ring_unlock_undo(ring);
- dev_err(adev->dev, "failed to sync rings (%d)\n", r);
+ dev_err(adev->dev, "scheduling IB failed (%d).\n", r);
return r;
}
if (vm) {
/* do context switch */
- amdgpu_vm_flush(ring, vm, ib->sync.last_vm_update);
-
- if (ring->funcs->emit_gds_switch)
- amdgpu_ring_emit_gds_switch(ring, ib->vm->ids[ring->idx].id,
- ib->gds_base, ib->gds_size,
- ib->gws_base, ib->gws_size,
- ib->oa_base, ib->oa_size);
+ amdgpu_vm_flush(ring, ib->vm_id, ib->vm_pd_addr,
+ ib->gds_base, ib->gds_size,
+ ib->gws_base, ib->gws_size,
+ ib->oa_base, ib->oa_size);
if (ring->funcs->emit_hdp_flush)
amdgpu_ring_emit_hdp_flush(ring);
@@ -186,27 +164,32 @@ int amdgpu_ib_schedule(struct amdgpu_device *adev, unsigned num_ibs,
for (i = 0; i < num_ibs; ++i) {
ib = &ibs[i];
- if (ib->ring != ring || ib->ctx != ctx || ib->vm != vm) {
+ if (ib->ctx != ctx || ib->vm != vm) {
ring->current_ctx = old_ctx;
- amdgpu_ring_unlock_undo(ring);
+ if (ib->vm_id)
+ amdgpu_vm_reset_id(adev, ib->vm_id);
+ amdgpu_ring_undo(ring);
return -EINVAL;
}
amdgpu_ring_emit_ib(ring, ib);
ring->current_ctx = ctx;
}
- r = amdgpu_fence_emit(ring, owner, &ib->fence);
+ if (vm) {
+ if (ring->funcs->emit_hdp_invalidate)
+ amdgpu_ring_emit_hdp_invalidate(ring);
+ }
+
+ r = amdgpu_fence_emit(ring, &hwf);
if (r) {
dev_err(adev->dev, "failed to emit fence (%d)\n", r);
ring->current_ctx = old_ctx;
- amdgpu_ring_unlock_undo(ring);
+ if (ib->vm_id)
+ amdgpu_vm_reset_id(adev, ib->vm_id);
+ amdgpu_ring_undo(ring);
return r;
}
- if (!amdgpu_enable_scheduler && ib->ctx)
- ib->sequence = amdgpu_ctx_add_fence(ib->ctx, ring,
- &ib->fence->base);
-
/* wrap the last IB with fence */
if (ib->user) {
uint64_t addr = amdgpu_bo_gpu_offset(ib->user->bo);
@@ -215,10 +198,10 @@ int amdgpu_ib_schedule(struct amdgpu_device *adev, unsigned num_ibs,
AMDGPU_FENCE_FLAG_64BIT);
}
- if (ib->vm)
- amdgpu_vm_fence(adev, ib->vm, &ib->fence->base);
+ if (f)
+ *f = fence_get(hwf);
- amdgpu_ring_unlock_commit(ring);
+ amdgpu_ring_commit(ring);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
index f594cfaa97e5..762cfdb85147 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
@@ -219,6 +219,8 @@ int amdgpu_irq_init(struct amdgpu_device *adev)
if (r) {
return r;
}
+ adev->ddev->vblank_disable_allowed = true;
+
/* enable msi */
adev->irq.msi_enabled = false;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
new file mode 100644
index 000000000000..9c9b19e2f353
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -0,0 +1,171 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ *
+ */
+#include <linux/kthread.h>
+#include <linux/wait.h>
+#include <linux/sched.h>
+#include <drm/drmP.h>
+#include "amdgpu.h"
+#include "amdgpu_trace.h"
+
+int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
+ struct amdgpu_job **job)
+{
+ size_t size = sizeof(struct amdgpu_job);
+
+ if (num_ibs == 0)
+ return -EINVAL;
+
+ size += sizeof(struct amdgpu_ib) * num_ibs;
+
+ *job = kzalloc(size, GFP_KERNEL);
+ if (!*job)
+ return -ENOMEM;
+
+ (*job)->adev = adev;
+ (*job)->ibs = (void *)&(*job)[1];
+ (*job)->num_ibs = num_ibs;
+
+ amdgpu_sync_create(&(*job)->sync);
+
+ return 0;
+}
+
+int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size,
+ struct amdgpu_job **job)
+{
+ int r;
+
+ r = amdgpu_job_alloc(adev, 1, job);
+ if (r)
+ return r;
+
+ r = amdgpu_ib_get(adev, NULL, size, &(*job)->ibs[0]);
+ if (r)
+ kfree(*job);
+
+ return r;
+}
+
+void amdgpu_job_free(struct amdgpu_job *job)
+{
+ unsigned i;
+ struct fence *f;
+ /* use sched fence if available */
+ f = (job->base.s_fence)? &job->base.s_fence->base : job->fence;
+
+ for (i = 0; i < job->num_ibs; ++i)
+ amdgpu_sa_bo_free(job->adev, &job->ibs[i].sa_bo, f);
+ fence_put(job->fence);
+
+ amdgpu_bo_unref(&job->uf.bo);
+ amdgpu_sync_free(&job->sync);
+ kfree(job);
+}
+
+int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring,
+ struct amd_sched_entity *entity, void *owner,
+ struct fence **f)
+{
+ job->ring = ring;
+ job->base.sched = &ring->sched;
+ job->base.s_entity = entity;
+ job->base.s_fence = amd_sched_fence_create(job->base.s_entity, owner);
+ if (!job->base.s_fence)
+ return -ENOMEM;
+
+ *f = fence_get(&job->base.s_fence->base);
+
+ job->owner = owner;
+ amd_sched_entity_push_job(&job->base);
+
+ return 0;
+}
+
+static struct fence *amdgpu_job_dependency(struct amd_sched_job *sched_job)
+{
+ struct amdgpu_job *job = to_amdgpu_job(sched_job);
+ struct amdgpu_vm *vm = job->ibs->vm;
+
+ struct fence *fence = amdgpu_sync_get_fence(&job->sync);
+
+ if (fence == NULL && vm && !job->ibs->vm_id) {
+ struct amdgpu_ring *ring = job->ring;
+ unsigned i, vm_id;
+ uint64_t vm_pd_addr;
+ int r;
+
+ r = amdgpu_vm_grab_id(vm, ring, &job->sync,
+ &job->base.s_fence->base,
+ &vm_id, &vm_pd_addr);
+ if (r)
+ DRM_ERROR("Error getting VM ID (%d)\n", r);
+ else {
+ for (i = 0; i < job->num_ibs; ++i) {
+ job->ibs[i].vm_id = vm_id;
+ job->ibs[i].vm_pd_addr = vm_pd_addr;
+ }
+ }
+
+ fence = amdgpu_sync_get_fence(&job->sync);
+ }
+
+ return fence;
+}
+
+static struct fence *amdgpu_job_run(struct amd_sched_job *sched_job)
+{
+ struct fence *fence = NULL;
+ struct amdgpu_job *job;
+ int r;
+
+ if (!sched_job) {
+ DRM_ERROR("job is null\n");
+ return NULL;
+ }
+ job = to_amdgpu_job(sched_job);
+
+ r = amdgpu_sync_wait(&job->sync);
+ if (r) {
+ DRM_ERROR("failed to sync wait (%d)\n", r);
+ return NULL;
+ }
+
+ trace_amdgpu_sched_run_job(job);
+ r = amdgpu_ib_schedule(job->ring, job->num_ibs, job->ibs,
+ job->sync.last_vm_update, &fence);
+ if (r) {
+ DRM_ERROR("Error scheduling IBs (%d)\n", r);
+ goto err;
+ }
+
+err:
+ job->fence = fence;
+ amdgpu_job_free(job);
+ return fence;
+}
+
+struct amd_sched_backend_ops amdgpu_sched_ops = {
+ .dependency = amdgpu_job_dependency,
+ .run_job = amdgpu_job_run,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index e23843f4d877..b04337de65d1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -303,7 +303,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
fw_info.feature = adev->vce.fb_version;
break;
case AMDGPU_INFO_FW_UVD:
- fw_info.ver = 0;
+ fw_info.ver = adev->uvd.fw_version;
fw_info.feature = 0;
break;
case AMDGPU_INFO_FW_GMC:
@@ -382,8 +382,9 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
struct drm_amdgpu_info_vram_gtt vram_gtt;
vram_gtt.vram_size = adev->mc.real_vram_size;
+ vram_gtt.vram_size -= adev->vram_pin_size;
vram_gtt.vram_cpu_accessible_size = adev->mc.visible_vram_size;
- vram_gtt.vram_cpu_accessible_size -= adev->vram_pin_size;
+ vram_gtt.vram_cpu_accessible_size -= (adev->vram_pin_size - adev->invisible_pin_size);
vram_gtt.gtt_size = adev->mc.gtt_size;
vram_gtt.gtt_size -= adev->gart_pin_size;
return copy_to_user(out, &vram_gtt,
@@ -447,8 +448,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
dev_info.max_memory_clock = adev->pm.default_mclk * 10;
}
dev_info.enabled_rb_pipes_mask = adev->gfx.config.backend_enable_mask;
- dev_info.num_rb_pipes = adev->gfx.config.max_backends_per_se *
- adev->gfx.config.max_shader_engines;
+ dev_info.num_rb_pipes = adev->gfx.config.num_rbs;
dev_info.num_hw_gfx_contexts = adev->gfx.config.max_hw_contexts;
dev_info._pad = 0;
dev_info.ids_flags = 0;
@@ -727,6 +727,12 @@ int amdgpu_get_vblank_timestamp_kms(struct drm_device *dev, unsigned int pipe,
/* Get associated drm_crtc: */
crtc = &adev->mode_info.crtcs[pipe]->base;
+ if (!crtc) {
+ /* This can occur on driver load if some component fails to
+ * initialize completely and driver is unloaded */
+ DRM_ERROR("Uninitialized crtc %d\n", pipe);
+ return -EINVAL;
+ }
/* Helper routine in DRM core does all the work: */
return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
index d4e2780c0796..9f4a45cd2aab 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
@@ -77,8 +77,6 @@ static void amdgpu_mn_destroy(struct work_struct *work)
hash_del(&rmn->node);
rbtree_postorder_for_each_entry_safe(node, next_node, &rmn->objects,
it.rb) {
-
- interval_tree_remove(&node->it, &rmn->objects);
list_for_each_entry_safe(bo, next_bo, &node->bos, mn_list) {
bo->mn = NULL;
list_del_init(&bo->mn_list);
@@ -87,7 +85,7 @@ static void amdgpu_mn_destroy(struct work_struct *work)
}
mutex_unlock(&rmn->lock);
mutex_unlock(&adev->mn_lock);
- mmu_notifier_unregister(&rmn->mn, rmn->mm);
+ mmu_notifier_unregister_no_release(&rmn->mn, rmn->mm);
kfree(rmn);
}
@@ -108,6 +106,76 @@ static void amdgpu_mn_release(struct mmu_notifier *mn,
}
/**
+ * amdgpu_mn_invalidate_node - unmap all BOs of a node
+ *
+ * @node: the node with the BOs to unmap
+ *
+ * We block for all BOs and unmap them by move them
+ * into system domain again.
+ */
+static void amdgpu_mn_invalidate_node(struct amdgpu_mn_node *node,
+ unsigned long start,
+ unsigned long end)
+{
+ struct amdgpu_bo *bo;
+ long r;
+
+ list_for_each_entry(bo, &node->bos, mn_list) {
+
+ if (!amdgpu_ttm_tt_affect_userptr(bo->tbo.ttm, start, end))
+ continue;
+
+ r = amdgpu_bo_reserve(bo, true);
+ if (r) {
+ DRM_ERROR("(%ld) failed to reserve user bo\n", r);
+ continue;
+ }
+
+ r = reservation_object_wait_timeout_rcu(bo->tbo.resv,
+ true, false, MAX_SCHEDULE_TIMEOUT);
+ if (r <= 0)
+ DRM_ERROR("(%ld) failed to wait for user bo\n", r);
+
+ amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
+ r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
+ if (r)
+ DRM_ERROR("(%ld) failed to validate user bo\n", r);
+
+ amdgpu_bo_unreserve(bo);
+ }
+}
+
+/**
+ * amdgpu_mn_invalidate_page - callback to notify about mm change
+ *
+ * @mn: our notifier
+ * @mn: the mm this callback is about
+ * @address: address of invalidate page
+ *
+ * Invalidation of a single page. Blocks for all BOs mapping it
+ * and unmap them by move them into system domain again.
+ */
+static void amdgpu_mn_invalidate_page(struct mmu_notifier *mn,
+ struct mm_struct *mm,
+ unsigned long address)
+{
+ struct amdgpu_mn *rmn = container_of(mn, struct amdgpu_mn, mn);
+ struct interval_tree_node *it;
+
+ mutex_lock(&rmn->lock);
+
+ it = interval_tree_iter_first(&rmn->objects, address, address);
+ if (it) {
+ struct amdgpu_mn_node *node;
+
+ node = container_of(it, struct amdgpu_mn_node, it);
+ amdgpu_mn_invalidate_node(node, address, address);
+ }
+
+ mutex_unlock(&rmn->lock);
+}
+
+/**
* amdgpu_mn_invalidate_range_start - callback to notify about mm change
*
* @mn: our notifier
@@ -134,36 +202,11 @@ static void amdgpu_mn_invalidate_range_start(struct mmu_notifier *mn,
it = interval_tree_iter_first(&rmn->objects, start, end);
while (it) {
struct amdgpu_mn_node *node;
- struct amdgpu_bo *bo;
- long r;
node = container_of(it, struct amdgpu_mn_node, it);
it = interval_tree_iter_next(it, start, end);
- list_for_each_entry(bo, &node->bos, mn_list) {
-
- if (!amdgpu_ttm_tt_affect_userptr(bo->tbo.ttm, start,
- end))
- continue;
-
- r = amdgpu_bo_reserve(bo, true);
- if (r) {
- DRM_ERROR("(%ld) failed to reserve user bo\n", r);
- continue;
- }
-
- r = reservation_object_wait_timeout_rcu(bo->tbo.resv,
- true, false, MAX_SCHEDULE_TIMEOUT);
- if (r <= 0)
- DRM_ERROR("(%ld) failed to wait for user bo\n", r);
-
- amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
- r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
- if (r)
- DRM_ERROR("(%ld) failed to validate user bo\n", r);
-
- amdgpu_bo_unreserve(bo);
- }
+ amdgpu_mn_invalidate_node(node, start, end);
}
mutex_unlock(&rmn->lock);
@@ -171,6 +214,7 @@ static void amdgpu_mn_invalidate_range_start(struct mmu_notifier *mn,
static const struct mmu_notifier_ops amdgpu_mn_ops = {
.release = amdgpu_mn_release,
+ .invalidate_page = amdgpu_mn_invalidate_page,
.invalidate_range_start = amdgpu_mn_invalidate_range_start,
};
@@ -187,8 +231,8 @@ static struct amdgpu_mn *amdgpu_mn_get(struct amdgpu_device *adev)
struct amdgpu_mn *rmn;
int r;
- down_write(&mm->mmap_sem);
mutex_lock(&adev->mn_lock);
+ down_write(&mm->mmap_sem);
hash_for_each_possible(adev->mn_hash, rmn, node, (unsigned long)mm)
if (rmn->mm == mm)
@@ -213,14 +257,14 @@ static struct amdgpu_mn *amdgpu_mn_get(struct amdgpu_device *adev)
hash_add(adev->mn_hash, &rmn->node, (unsigned long)mm);
release_locks:
- mutex_unlock(&adev->mn_lock);
up_write(&mm->mmap_sem);
+ mutex_unlock(&adev->mn_lock);
return rmn;
free_rmn:
- mutex_unlock(&adev->mn_lock);
up_write(&mm->mmap_sem);
+ mutex_unlock(&adev->mn_lock);
kfree(rmn);
return ERR_PTR(r);
@@ -298,6 +342,7 @@ void amdgpu_mn_unregister(struct amdgpu_bo *bo)
struct list_head *head;
mutex_lock(&adev->mn_lock);
+
rmn = bo->mn;
if (rmn == NULL) {
mutex_unlock(&adev->mn_lock);
@@ -305,6 +350,7 @@ void amdgpu_mn_unregister(struct amdgpu_bo *bo)
}
mutex_lock(&rmn->lock);
+
/* save the next list entry for later */
head = bo->mn_list.next;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
index fdc1be8550da..81bd964d3dfc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
@@ -53,7 +53,7 @@ struct amdgpu_hpd;
#define AMDGPU_MAX_HPD_PINS 6
#define AMDGPU_MAX_CRTCS 6
-#define AMDGPU_MAX_AFMT_BLOCKS 7
+#define AMDGPU_MAX_AFMT_BLOCKS 9
enum amdgpu_rmx_type {
RMX_OFF,
@@ -309,8 +309,8 @@ struct amdgpu_mode_info {
struct atom_context *atom_context;
struct card_info *atom_card_info;
bool mode_config_initialized;
- struct amdgpu_crtc *crtcs[6];
- struct amdgpu_afmt *afmt[7];
+ struct amdgpu_crtc *crtcs[AMDGPU_MAX_CRTCS];
+ struct amdgpu_afmt *afmt[AMDGPU_MAX_AFMT_BLOCKS];
/* DVI-I properties */
struct drm_property *coherent_mode_property;
/* DAC enable load detect */
@@ -390,7 +390,6 @@ struct amdgpu_crtc {
struct drm_display_mode native_mode;
u32 pll_id;
/* page flipping */
- struct workqueue_struct *pflip_queue;
struct amdgpu_flip_work *pflip_works;
enum amdgpu_flip_status pflip_status;
int deferred_flip_completion;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index b8fbbd7699e4..7ecea83ce453 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -97,9 +97,6 @@ static void amdgpu_ttm_bo_destroy(struct ttm_buffer_object *tbo)
amdgpu_update_memory_usage(bo->adev, &bo->tbo.mem, NULL);
- mutex_lock(&bo->adev->gem.mutex);
- list_del_init(&bo->list);
- mutex_unlock(&bo->adev->gem.mutex);
drm_gem_object_release(&bo->gem_base);
amdgpu_bo_unref(&bo->parent);
kfree(bo->metadata);
@@ -254,12 +251,15 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
bo->adev = adev;
INIT_LIST_HEAD(&bo->list);
INIT_LIST_HEAD(&bo->va);
- bo->initial_domain = domain & (AMDGPU_GEM_DOMAIN_VRAM |
- AMDGPU_GEM_DOMAIN_GTT |
- AMDGPU_GEM_DOMAIN_CPU |
- AMDGPU_GEM_DOMAIN_GDS |
- AMDGPU_GEM_DOMAIN_GWS |
- AMDGPU_GEM_DOMAIN_OA);
+ bo->prefered_domains = domain & (AMDGPU_GEM_DOMAIN_VRAM |
+ AMDGPU_GEM_DOMAIN_GTT |
+ AMDGPU_GEM_DOMAIN_CPU |
+ AMDGPU_GEM_DOMAIN_GDS |
+ AMDGPU_GEM_DOMAIN_GWS |
+ AMDGPU_GEM_DOMAIN_OA);
+ bo->allowed_domains = bo->prefered_domains;
+ if (!kernel && bo->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM)
+ bo->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT;
bo->flags = flags;
@@ -308,7 +308,7 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr)
{
bool is_iomem;
- int r;
+ long r;
if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
return -EPERM;
@@ -319,14 +319,20 @@ int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr)
}
return 0;
}
+
+ r = reservation_object_wait_timeout_rcu(bo->tbo.resv, false, false,
+ MAX_SCHEDULE_TIMEOUT);
+ if (r < 0)
+ return r;
+
r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap);
- if (r) {
+ if (r)
return r;
- }
+
bo->kptr = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem);
- if (ptr) {
+ if (ptr)
*ptr = bo->kptr;
- }
+
return 0;
}
@@ -367,7 +373,7 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
int r, i;
unsigned fpfn, lpfn;
- if (amdgpu_ttm_tt_has_userptr(bo->tbo.ttm))
+ if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm))
return -EPERM;
if (WARN_ON_ONCE(min_offset > max_offset))
@@ -418,9 +424,11 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
bo->pin_count = 1;
if (gpu_addr != NULL)
*gpu_addr = amdgpu_bo_gpu_offset(bo);
- if (domain == AMDGPU_GEM_DOMAIN_VRAM)
+ if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
bo->adev->vram_pin_size += amdgpu_bo_size(bo);
- else
+ if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
+ bo->adev->invisible_pin_size += amdgpu_bo_size(bo);
+ } else
bo->adev->gart_pin_size += amdgpu_bo_size(bo);
} else {
dev_err(bo->adev->dev, "%p pin failed\n", bo);
@@ -450,9 +458,11 @@ int amdgpu_bo_unpin(struct amdgpu_bo *bo)
}
r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
if (likely(r == 0)) {
- if (bo->tbo.mem.mem_type == TTM_PL_VRAM)
+ if (bo->tbo.mem.mem_type == TTM_PL_VRAM) {
bo->adev->vram_pin_size -= amdgpu_bo_size(bo);
- else
+ if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
+ bo->adev->invisible_pin_size -= amdgpu_bo_size(bo);
+ } else
bo->adev->gart_pin_size -= amdgpu_bo_size(bo);
} else {
dev_err(bo->adev->dev, "%p validate failed for unpin\n", bo);
@@ -470,25 +480,16 @@ int amdgpu_bo_evict_vram(struct amdgpu_device *adev)
return ttm_bo_evict_mm(&adev->mman.bdev, TTM_PL_VRAM);
}
-void amdgpu_bo_force_delete(struct amdgpu_device *adev)
-{
- struct amdgpu_bo *bo, *n;
-
- if (list_empty(&adev->gem.objects)) {
- return;
- }
- dev_err(adev->dev, "Userspace still has active objects !\n");
- list_for_each_entry_safe(bo, n, &adev->gem.objects, list) {
- dev_err(adev->dev, "%p %p %lu %lu force free\n",
- &bo->gem_base, bo, (unsigned long)bo->gem_base.size,
- *((unsigned long *)&bo->gem_base.refcount));
- mutex_lock(&bo->adev->gem.mutex);
- list_del_init(&bo->list);
- mutex_unlock(&bo->adev->gem.mutex);
- /* this should unref the ttm bo */
- drm_gem_object_unreference_unlocked(&bo->gem_base);
- }
-}
+static const char *amdgpu_vram_names[] = {
+ "UNKNOWN",
+ "GDDR1",
+ "DDR2",
+ "GDDR3",
+ "GDDR4",
+ "GDDR5",
+ "HBM",
+ "DDR3"
+};
int amdgpu_bo_init(struct amdgpu_device *adev)
{
@@ -498,8 +499,8 @@ int amdgpu_bo_init(struct amdgpu_device *adev)
DRM_INFO("Detected VRAM RAM=%lluM, BAR=%lluM\n",
adev->mc.mc_vram_size >> 20,
(unsigned long long)adev->mc.aper_size >> 20);
- DRM_INFO("RAM width %dbits DDR\n",
- adev->mc.vram_width);
+ DRM_INFO("RAM width %dbits %s\n",
+ adev->mc.vram_width, amdgpu_vram_names[adev->mc.vram_type]);
return amdgpu_ttm_init(adev);
}
@@ -540,6 +541,7 @@ int amdgpu_bo_set_metadata (struct amdgpu_bo *bo, void *metadata,
if (!metadata_size) {
if (bo->metadata_size) {
kfree(bo->metadata);
+ bo->metadata = NULL;
bo->metadata_size = 0;
}
return 0;
@@ -622,6 +624,10 @@ int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
if ((offset + size) <= adev->mc.visible_vram_size)
return 0;
+ /* Can't move a pinned BO to visible VRAM */
+ if (abo->pin_count > 0)
+ return -EINVAL;
+
/* hurrah the memory is not visible ! */
amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM);
lpfn = adev->mc.visible_vram_size >> PAGE_SHIFT;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
index 5107fb291bdb..acc08018c6cc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
@@ -149,7 +149,6 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
u64 *gpu_addr);
int amdgpu_bo_unpin(struct amdgpu_bo *bo);
int amdgpu_bo_evict_vram(struct amdgpu_device *adev);
-void amdgpu_bo_force_delete(struct amdgpu_device *adev);
int amdgpu_bo_init(struct amdgpu_device *adev);
void amdgpu_bo_fini(struct amdgpu_device *adev);
int amdgpu_bo_fbdev_mmap(struct amdgpu_bo *bo,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
index 95a4a25d8df9..ff9597ce268c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
@@ -123,7 +123,9 @@ static ssize_t amdgpu_get_dpm_forced_performance_level(struct device *dev,
level = amdgpu_dpm_get_performance_level(adev);
return snprintf(buf, PAGE_SIZE, "%s\n",
(level == AMD_DPM_FORCED_LEVEL_AUTO) ? "auto" :
- (level == AMD_DPM_FORCED_LEVEL_LOW) ? "low" : "high");
+ (level == AMD_DPM_FORCED_LEVEL_LOW) ? "low" :
+ (level == AMD_DPM_FORCED_LEVEL_HIGH) ? "high" :
+ (level == AMD_DPM_FORCED_LEVEL_MANUAL) ? "manual" : "unknown");
} else {
enum amdgpu_dpm_forced_level level;
@@ -155,6 +157,8 @@ static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev,
level = AMDGPU_DPM_FORCED_LEVEL_HIGH;
} else if (strncmp("auto", buf, strlen("auto")) == 0) {
level = AMDGPU_DPM_FORCED_LEVEL_AUTO;
+ } else if (strncmp("manual", buf, strlen("manual")) == 0) {
+ level = AMDGPU_DPM_FORCED_LEVEL_MANUAL;
} else {
count = -EINVAL;
goto fail;
@@ -180,10 +184,293 @@ fail:
return count;
}
+static ssize_t amdgpu_get_pp_num_states(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = ddev->dev_private;
+ struct pp_states_info data;
+ int i, buf_len;
+
+ if (adev->pp_enabled)
+ amdgpu_dpm_get_pp_num_states(adev, &data);
+
+ buf_len = snprintf(buf, PAGE_SIZE, "states: %d\n", data.nums);
+ for (i = 0; i < data.nums; i++)
+ buf_len += snprintf(buf + buf_len, PAGE_SIZE, "%d %s\n", i,
+ (data.states[i] == POWER_STATE_TYPE_INTERNAL_BOOT) ? "boot" :
+ (data.states[i] == POWER_STATE_TYPE_BATTERY) ? "battery" :
+ (data.states[i] == POWER_STATE_TYPE_BALANCED) ? "balanced" :
+ (data.states[i] == POWER_STATE_TYPE_PERFORMANCE) ? "performance" : "default");
+
+ return buf_len;
+}
+
+static ssize_t amdgpu_get_pp_cur_state(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = ddev->dev_private;
+ struct pp_states_info data;
+ enum amd_pm_state_type pm = 0;
+ int i = 0;
+
+ if (adev->pp_enabled) {
+
+ pm = amdgpu_dpm_get_current_power_state(adev);
+ amdgpu_dpm_get_pp_num_states(adev, &data);
+
+ for (i = 0; i < data.nums; i++) {
+ if (pm == data.states[i])
+ break;
+ }
+
+ if (i == data.nums)
+ i = -EINVAL;
+ }
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", i);
+}
+
+static ssize_t amdgpu_get_pp_force_state(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = ddev->dev_private;
+ struct pp_states_info data;
+ enum amd_pm_state_type pm = 0;
+ int i;
+
+ if (adev->pp_force_state_enabled && adev->pp_enabled) {
+ pm = amdgpu_dpm_get_current_power_state(adev);
+ amdgpu_dpm_get_pp_num_states(adev, &data);
+
+ for (i = 0; i < data.nums; i++) {
+ if (pm == data.states[i])
+ break;
+ }
+
+ if (i == data.nums)
+ i = -EINVAL;
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", i);
+
+ } else
+ return snprintf(buf, PAGE_SIZE, "\n");
+}
+
+static ssize_t amdgpu_set_pp_force_state(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = ddev->dev_private;
+ enum amd_pm_state_type state = 0;
+ long idx;
+ int ret;
+
+ if (strlen(buf) == 1)
+ adev->pp_force_state_enabled = false;
+ else {
+ ret = kstrtol(buf, 0, &idx);
+
+ if (ret) {
+ count = -EINVAL;
+ goto fail;
+ }
+
+ if (adev->pp_enabled) {
+ struct pp_states_info data;
+ amdgpu_dpm_get_pp_num_states(adev, &data);
+ state = data.states[idx];
+ /* only set user selected power states */
+ if (state != POWER_STATE_TYPE_INTERNAL_BOOT &&
+ state != POWER_STATE_TYPE_DEFAULT) {
+ amdgpu_dpm_dispatch_task(adev,
+ AMD_PP_EVENT_ENABLE_USER_STATE, &state, NULL);
+ adev->pp_force_state_enabled = true;
+ }
+ }
+ }
+fail:
+ return count;
+}
+
+static ssize_t amdgpu_get_pp_table(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = ddev->dev_private;
+ char *table = NULL;
+ int size, i;
+
+ if (adev->pp_enabled)
+ size = amdgpu_dpm_get_pp_table(adev, &table);
+ else
+ return 0;
+
+ if (size >= PAGE_SIZE)
+ size = PAGE_SIZE - 1;
+
+ for (i = 0; i < size; i++) {
+ sprintf(buf + i, "%02x", table[i]);
+ }
+ sprintf(buf + i, "\n");
+
+ return size;
+}
+
+static ssize_t amdgpu_set_pp_table(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = ddev->dev_private;
+
+ if (adev->pp_enabled)
+ amdgpu_dpm_set_pp_table(adev, buf, count);
+
+ return count;
+}
+
+static ssize_t amdgpu_get_pp_dpm_sclk(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = ddev->dev_private;
+ ssize_t size = 0;
+
+ if (adev->pp_enabled)
+ size = amdgpu_dpm_print_clock_levels(adev, PP_SCLK, buf);
+
+ return size;
+}
+
+static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = ddev->dev_private;
+ int ret;
+ long level;
+
+ ret = kstrtol(buf, 0, &level);
+
+ if (ret) {
+ count = -EINVAL;
+ goto fail;
+ }
+
+ if (adev->pp_enabled)
+ amdgpu_dpm_force_clock_level(adev, PP_SCLK, level);
+fail:
+ return count;
+}
+
+static ssize_t amdgpu_get_pp_dpm_mclk(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = ddev->dev_private;
+ ssize_t size = 0;
+
+ if (adev->pp_enabled)
+ size = amdgpu_dpm_print_clock_levels(adev, PP_MCLK, buf);
+
+ return size;
+}
+
+static ssize_t amdgpu_set_pp_dpm_mclk(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = ddev->dev_private;
+ int ret;
+ long level;
+
+ ret = kstrtol(buf, 0, &level);
+
+ if (ret) {
+ count = -EINVAL;
+ goto fail;
+ }
+
+ if (adev->pp_enabled)
+ amdgpu_dpm_force_clock_level(adev, PP_MCLK, level);
+fail:
+ return count;
+}
+
+static ssize_t amdgpu_get_pp_dpm_pcie(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = ddev->dev_private;
+ ssize_t size = 0;
+
+ if (adev->pp_enabled)
+ size = amdgpu_dpm_print_clock_levels(adev, PP_PCIE, buf);
+
+ return size;
+}
+
+static ssize_t amdgpu_set_pp_dpm_pcie(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = ddev->dev_private;
+ int ret;
+ long level;
+
+ ret = kstrtol(buf, 0, &level);
+
+ if (ret) {
+ count = -EINVAL;
+ goto fail;
+ }
+
+ if (adev->pp_enabled)
+ amdgpu_dpm_force_clock_level(adev, PP_PCIE, level);
+fail:
+ return count;
+}
+
static DEVICE_ATTR(power_dpm_state, S_IRUGO | S_IWUSR, amdgpu_get_dpm_state, amdgpu_set_dpm_state);
static DEVICE_ATTR(power_dpm_force_performance_level, S_IRUGO | S_IWUSR,
amdgpu_get_dpm_forced_performance_level,
amdgpu_set_dpm_forced_performance_level);
+static DEVICE_ATTR(pp_num_states, S_IRUGO, amdgpu_get_pp_num_states, NULL);
+static DEVICE_ATTR(pp_cur_state, S_IRUGO, amdgpu_get_pp_cur_state, NULL);
+static DEVICE_ATTR(pp_force_state, S_IRUGO | S_IWUSR,
+ amdgpu_get_pp_force_state,
+ amdgpu_set_pp_force_state);
+static DEVICE_ATTR(pp_table, S_IRUGO | S_IWUSR,
+ amdgpu_get_pp_table,
+ amdgpu_set_pp_table);
+static DEVICE_ATTR(pp_dpm_sclk, S_IRUGO | S_IWUSR,
+ amdgpu_get_pp_dpm_sclk,
+ amdgpu_set_pp_dpm_sclk);
+static DEVICE_ATTR(pp_dpm_mclk, S_IRUGO | S_IWUSR,
+ amdgpu_get_pp_dpm_mclk,
+ amdgpu_set_pp_dpm_mclk);
+static DEVICE_ATTR(pp_dpm_pcie, S_IRUGO | S_IWUSR,
+ amdgpu_get_pp_dpm_pcie,
+ amdgpu_set_pp_dpm_pcie);
static ssize_t amdgpu_hwmon_show_temp(struct device *dev,
struct device_attribute *attr,
@@ -637,14 +924,12 @@ force:
amdgpu_dpm_print_power_state(adev, adev->pm.dpm.requested_ps);
}
- mutex_lock(&adev->ring_lock);
-
/* update whether vce is active */
ps->vce_active = adev->pm.dpm.vce_active;
ret = amdgpu_dpm_pre_set_power_state(adev);
if (ret)
- goto done;
+ return;
/* update display watermarks based on new power state */
amdgpu_display_bandwidth_update(adev);
@@ -682,9 +967,6 @@ force:
amdgpu_dpm_force_performance_level(adev, adev->pm.dpm.forced_level);
}
}
-
-done:
- mutex_unlock(&adev->ring_lock);
}
void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable)
@@ -785,6 +1067,44 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
DRM_ERROR("failed to create device file for dpm state\n");
return ret;
}
+
+ if (adev->pp_enabled) {
+ ret = device_create_file(adev->dev, &dev_attr_pp_num_states);
+ if (ret) {
+ DRM_ERROR("failed to create device file pp_num_states\n");
+ return ret;
+ }
+ ret = device_create_file(adev->dev, &dev_attr_pp_cur_state);
+ if (ret) {
+ DRM_ERROR("failed to create device file pp_cur_state\n");
+ return ret;
+ }
+ ret = device_create_file(adev->dev, &dev_attr_pp_force_state);
+ if (ret) {
+ DRM_ERROR("failed to create device file pp_force_state\n");
+ return ret;
+ }
+ ret = device_create_file(adev->dev, &dev_attr_pp_table);
+ if (ret) {
+ DRM_ERROR("failed to create device file pp_table\n");
+ return ret;
+ }
+ ret = device_create_file(adev->dev, &dev_attr_pp_dpm_sclk);
+ if (ret) {
+ DRM_ERROR("failed to create device file pp_dpm_sclk\n");
+ return ret;
+ }
+ ret = device_create_file(adev->dev, &dev_attr_pp_dpm_mclk);
+ if (ret) {
+ DRM_ERROR("failed to create device file pp_dpm_mclk\n");
+ return ret;
+ }
+ ret = device_create_file(adev->dev, &dev_attr_pp_dpm_pcie);
+ if (ret) {
+ DRM_ERROR("failed to create device file pp_dpm_pcie\n");
+ return ret;
+ }
+ }
ret = amdgpu_debugfs_pm_init(adev);
if (ret) {
DRM_ERROR("Failed to register debugfs file for dpm!\n");
@@ -802,6 +1122,15 @@ void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev)
hwmon_device_unregister(adev->pm.int_hwmon_dev);
device_remove_file(adev->dev, &dev_attr_power_dpm_state);
device_remove_file(adev->dev, &dev_attr_power_dpm_force_performance_level);
+ if (adev->pp_enabled) {
+ device_remove_file(adev->dev, &dev_attr_pp_num_states);
+ device_remove_file(adev->dev, &dev_attr_pp_cur_state);
+ device_remove_file(adev->dev, &dev_attr_pp_force_state);
+ device_remove_file(adev->dev, &dev_attr_pp_table);
+ device_remove_file(adev->dev, &dev_attr_pp_dpm_sclk);
+ device_remove_file(adev->dev, &dev_attr_pp_dpm_mclk);
+ device_remove_file(adev->dev, &dev_attr_pp_dpm_pcie);
+ }
}
void amdgpu_pm_compute_clocks(struct amdgpu_device *adev)
@@ -817,13 +1146,11 @@ void amdgpu_pm_compute_clocks(struct amdgpu_device *adev)
int i = 0;
amdgpu_display_bandwidth_update(adev);
- mutex_lock(&adev->ring_lock);
- for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
- struct amdgpu_ring *ring = adev->rings[i];
- if (ring && ring->ready)
- amdgpu_fence_wait_empty(ring);
- }
- mutex_unlock(&adev->ring_lock);
+ for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
+ struct amdgpu_ring *ring = adev->rings[i];
+ if (ring && ring->ready)
+ amdgpu_fence_wait_empty(ring);
+ }
amdgpu_dpm_dispatch_task(adev, AMD_PP_EVENT_DISPLAY_CONFIG_CHANGE, NULL, NULL);
} else {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
index 3cb6d6c413c7..e9c6ae6ed2f7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
@@ -143,7 +143,7 @@ static int amdgpu_pp_late_init(void *handle)
adev->powerplay.pp_handle);
#ifdef CONFIG_DRM_AMD_POWERPLAY
- if (adev->pp_enabled) {
+ if (adev->pp_enabled && adev->pm.dpm_enabled) {
amdgpu_pm_sysfs_init(adev);
amdgpu_dpm_dispatch_task(adev, AMD_PP_EVENT_COMPLETE_INIT, NULL, NULL);
}
@@ -161,12 +161,8 @@ static int amdgpu_pp_sw_init(void *handle)
adev->powerplay.pp_handle);
#ifdef CONFIG_DRM_AMD_POWERPLAY
- if (adev->pp_enabled) {
- if (amdgpu_dpm == 0)
- adev->pm.dpm_enabled = false;
- else
- adev->pm.dpm_enabled = true;
- }
+ if (adev->pp_enabled)
+ adev->pm.dpm_enabled = true;
#endif
return ret;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
index 59f735a933a9..be6388f73ba2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
@@ -73,10 +73,6 @@ struct drm_gem_object *amdgpu_gem_prime_import_sg_table(struct drm_device *dev,
if (ret)
return ERR_PTR(ret);
- mutex_lock(&adev->gem.mutex);
- list_add_tail(&bo->list, &adev->gem.objects);
- mutex_unlock(&adev->gem.mutex);
-
return &bo->gem_base;
}
@@ -121,7 +117,7 @@ struct dma_buf *amdgpu_gem_prime_export(struct drm_device *dev,
{
struct amdgpu_bo *bo = gem_to_amdgpu_bo(gobj);
- if (amdgpu_ttm_tt_has_userptr(bo->tbo.ttm))
+ if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm))
return ERR_PTR(-EPERM);
return drm_gem_prime_export(dev, gobj, flags);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
index d1f234dd2126..972eed2ef787 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
@@ -49,28 +49,6 @@
static int amdgpu_debugfs_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring);
/**
- * amdgpu_ring_free_size - update the free size
- *
- * @adev: amdgpu_device pointer
- * @ring: amdgpu_ring structure holding ring information
- *
- * Update the free dw slots in the ring buffer (all asics).
- */
-void amdgpu_ring_free_size(struct amdgpu_ring *ring)
-{
- uint32_t rptr = amdgpu_ring_get_rptr(ring);
-
- /* This works because ring_size is a power of 2 */
- ring->ring_free_dw = rptr + (ring->ring_size / 4);
- ring->ring_free_dw -= ring->wptr;
- ring->ring_free_dw &= ring->ptr_mask;
- if (!ring->ring_free_dw) {
- /* this is an empty ring */
- ring->ring_free_dw = ring->ring_size / 4;
- }
-}
-
-/**
* amdgpu_ring_alloc - allocate space on the ring buffer
*
* @adev: amdgpu_device pointer
@@ -82,50 +60,18 @@ void amdgpu_ring_free_size(struct amdgpu_ring *ring)
*/
int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned ndw)
{
- int r;
-
- /* make sure we aren't trying to allocate more space than there is on the ring */
- if (ndw > (ring->ring_size / 4))
- return -ENOMEM;
/* Align requested size with padding so unlock_commit can
* pad safely */
- amdgpu_ring_free_size(ring);
ndw = (ndw + ring->align_mask) & ~ring->align_mask;
- while (ndw > (ring->ring_free_dw - 1)) {
- amdgpu_ring_free_size(ring);
- if (ndw < ring->ring_free_dw) {
- break;
- }
- r = amdgpu_fence_wait_next(ring);
- if (r)
- return r;
- }
- ring->count_dw = ndw;
- ring->wptr_old = ring->wptr;
- return 0;
-}
-/**
- * amdgpu_ring_lock - lock the ring and allocate space on it
- *
- * @adev: amdgpu_device pointer
- * @ring: amdgpu_ring structure holding ring information
- * @ndw: number of dwords to allocate in the ring buffer
- *
- * Lock the ring and allocate @ndw dwords in the ring buffer
- * (all asics).
- * Returns 0 on success, error on failure.
- */
-int amdgpu_ring_lock(struct amdgpu_ring *ring, unsigned ndw)
-{
- int r;
+ /* Make sure we aren't trying to allocate more space
+ * than the maximum for one submission
+ */
+ if (WARN_ON_ONCE(ndw > ring->max_dw))
+ return -ENOMEM;
- mutex_lock(ring->ring_lock);
- r = amdgpu_ring_alloc(ring, ndw);
- if (r) {
- mutex_unlock(ring->ring_lock);
- return r;
- }
+ ring->count_dw = ndw;
+ ring->wptr_old = ring->wptr;
return 0;
}
@@ -144,6 +90,19 @@ void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
amdgpu_ring_write(ring, ring->nop);
}
+/** amdgpu_ring_generic_pad_ib - pad IB with NOP packets
+ *
+ * @ring: amdgpu_ring structure holding ring information
+ * @ib: IB to add NOP packets to
+ *
+ * This is the generic pad_ib function for rings except SDMA
+ */
+void amdgpu_ring_generic_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
+{
+ while (ib->length_dw & ring->align_mask)
+ ib->ptr[ib->length_dw++] = ring->nop;
+}
+
/**
* amdgpu_ring_commit - tell the GPU to execute the new
* commands on the ring buffer
@@ -168,20 +127,6 @@ void amdgpu_ring_commit(struct amdgpu_ring *ring)
}
/**
- * amdgpu_ring_unlock_commit - tell the GPU to execute the new
- * commands on the ring buffer and unlock it
- *
- * @ring: amdgpu_ring structure holding ring information
- *
- * Call amdgpu_ring_commit() then unlock the ring (all asics).
- */
-void amdgpu_ring_unlock_commit(struct amdgpu_ring *ring)
-{
- amdgpu_ring_commit(ring);
- mutex_unlock(ring->ring_lock);
-}
-
-/**
* amdgpu_ring_undo - reset the wptr
*
* @ring: amdgpu_ring structure holding ring information
@@ -194,19 +139,6 @@ void amdgpu_ring_undo(struct amdgpu_ring *ring)
}
/**
- * amdgpu_ring_unlock_undo - reset the wptr and unlock the ring
- *
- * @ring: amdgpu_ring structure holding ring information
- *
- * Call amdgpu_ring_undo() then unlock the ring (all asics).
- */
-void amdgpu_ring_unlock_undo(struct amdgpu_ring *ring)
-{
- amdgpu_ring_undo(ring);
- mutex_unlock(ring->ring_lock);
-}
-
-/**
* amdgpu_ring_backup - Back up the content of a ring
*
* @ring: the ring we want to back up
@@ -218,43 +150,32 @@ unsigned amdgpu_ring_backup(struct amdgpu_ring *ring,
{
unsigned size, ptr, i;
- /* just in case lock the ring */
- mutex_lock(ring->ring_lock);
*data = NULL;
- if (ring->ring_obj == NULL) {
- mutex_unlock(ring->ring_lock);
+ if (ring->ring_obj == NULL)
return 0;
- }
/* it doesn't make sense to save anything if all fences are signaled */
- if (!amdgpu_fence_count_emitted(ring)) {
- mutex_unlock(ring->ring_lock);
+ if (!amdgpu_fence_count_emitted(ring))
return 0;
- }
ptr = le32_to_cpu(*ring->next_rptr_cpu_addr);
size = ring->wptr + (ring->ring_size / 4);
size -= ptr;
size &= ring->ptr_mask;
- if (size == 0) {
- mutex_unlock(ring->ring_lock);
+ if (size == 0)
return 0;
- }
/* and then save the content of the ring */
*data = kmalloc_array(size, sizeof(uint32_t), GFP_KERNEL);
- if (!*data) {
- mutex_unlock(ring->ring_lock);
+ if (!*data)
return 0;
- }
for (i = 0; i < size; ++i) {
(*data)[i] = ring->ring[ptr++];
ptr &= ring->ptr_mask;
}
- mutex_unlock(ring->ring_lock);
return size;
}
@@ -276,7 +197,7 @@ int amdgpu_ring_restore(struct amdgpu_ring *ring,
return 0;
/* restore the saved ring content */
- r = amdgpu_ring_lock(ring, size);
+ r = amdgpu_ring_alloc(ring, size);
if (r)
return r;
@@ -284,7 +205,7 @@ int amdgpu_ring_restore(struct amdgpu_ring *ring,
amdgpu_ring_write(ring, data[i]);
}
- amdgpu_ring_unlock_commit(ring);
+ amdgpu_ring_commit(ring);
kfree(data);
return 0;
}
@@ -315,7 +236,8 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
ring->adev = adev;
ring->idx = adev->num_rings++;
adev->rings[ring->idx] = ring;
- r = amdgpu_fence_driver_init_ring(ring);
+ r = amdgpu_fence_driver_init_ring(ring,
+ amdgpu_sched_hw_submission);
if (r)
return r;
}
@@ -352,7 +274,6 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
return r;
}
- ring->ring_lock = &adev->ring_lock;
/* Align ring size */
rb_bufsz = order_base_2(ring_size / 8);
ring_size = (1 << (rb_bufsz + 1)) * 4;
@@ -389,7 +310,8 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
}
}
ring->ptr_mask = (ring->ring_size / 4) - 1;
- ring->ring_free_dw = ring->ring_size / 4;
+ ring->max_dw = DIV_ROUND_UP(ring->ring_size / 4,
+ amdgpu_sched_hw_submission);
if (amdgpu_debugfs_ring_init(adev, ring)) {
DRM_ERROR("Failed to register debugfs file for rings !\n");
@@ -410,15 +332,10 @@ void amdgpu_ring_fini(struct amdgpu_ring *ring)
int r;
struct amdgpu_bo *ring_obj;
- if (ring->ring_lock == NULL)
- return;
-
- mutex_lock(ring->ring_lock);
ring_obj = ring->ring_obj;
ring->ready = false;
ring->ring = NULL;
ring->ring_obj = NULL;
- mutex_unlock(ring->ring_lock);
amdgpu_wb_free(ring->adev, ring->fence_offs);
amdgpu_wb_free(ring->adev, ring->rptr_offs);
@@ -436,30 +353,6 @@ void amdgpu_ring_fini(struct amdgpu_ring *ring)
}
}
-/**
- * amdgpu_ring_from_fence - get ring from fence
- *
- * @f: fence structure
- *
- * Extract the ring a fence belongs to. Handles both scheduler as
- * well as hardware fences.
- */
-struct amdgpu_ring *amdgpu_ring_from_fence(struct fence *f)
-{
- struct amdgpu_fence *a_fence;
- struct amd_sched_fence *s_fence;
-
- s_fence = to_amd_sched_fence(f);
- if (s_fence)
- return container_of(s_fence->sched, struct amdgpu_ring, sched);
-
- a_fence = to_amdgpu_fence(f);
- if (a_fence)
- return a_fence->ring;
-
- return NULL;
-}
-
/*
* Debugfs info
*/
@@ -474,29 +367,18 @@ static int amdgpu_debugfs_ring_info(struct seq_file *m, void *data)
struct amdgpu_ring *ring = (void *)(((uint8_t*)adev) + roffset);
uint32_t rptr, wptr, rptr_next;
- unsigned count, i, j;
-
- amdgpu_ring_free_size(ring);
- count = (ring->ring_size / 4) - ring->ring_free_dw;
+ unsigned i;
wptr = amdgpu_ring_get_wptr(ring);
- seq_printf(m, "wptr: 0x%08x [%5d]\n",
- wptr, wptr);
+ seq_printf(m, "wptr: 0x%08x [%5d]\n", wptr, wptr);
rptr = amdgpu_ring_get_rptr(ring);
- seq_printf(m, "rptr: 0x%08x [%5d]\n",
- rptr, rptr);
-
rptr_next = le32_to_cpu(*ring->next_rptr_cpu_addr);
+ seq_printf(m, "rptr: 0x%08x [%5d]\n", rptr, rptr);
+
seq_printf(m, "driver's copy of the wptr: 0x%08x [%5d]\n",
ring->wptr, ring->wptr);
- seq_printf(m, "last semaphore signal addr : 0x%016llx\n",
- ring->last_semaphore_signal_addr);
- seq_printf(m, "last semaphore wait addr : 0x%016llx\n",
- ring->last_semaphore_wait_addr);
- seq_printf(m, "%u free dwords in ring\n", ring->ring_free_dw);
- seq_printf(m, "%u dwords in ring\n", count);
if (!ring->ready)
return 0;
@@ -505,11 +387,20 @@ static int amdgpu_debugfs_ring_info(struct seq_file *m, void *data)
* packet that is the root issue
*/
i = (rptr + ring->ptr_mask + 1 - 32) & ring->ptr_mask;
- for (j = 0; j <= (count + 32); j++) {
+ while (i != rptr) {
+ seq_printf(m, "r[%5d]=0x%08x", i, ring->ring[i]);
+ if (i == rptr)
+ seq_puts(m, " *");
+ if (i == rptr_next)
+ seq_puts(m, " #");
+ seq_puts(m, "\n");
+ i = (i + 1) & ring->ptr_mask;
+ }
+ while (i != wptr) {
seq_printf(m, "r[%5d]=0x%08x", i, ring->ring[i]);
- if (rptr == i)
+ if (i == rptr)
seq_puts(m, " *");
- if (rptr_next == i)
+ if (i == rptr_next)
seq_puts(m, " #");
seq_puts(m, "\n");
i = (i + 1) & ring->ptr_mask;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
index ca72a2e487b9..8bf84efafb04 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
@@ -60,9 +60,8 @@ int amdgpu_sa_bo_manager_init(struct amdgpu_device *adev,
sa_manager->align = align;
sa_manager->hole = &sa_manager->olist;
INIT_LIST_HEAD(&sa_manager->olist);
- for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
+ for (i = 0; i < AMDGPU_SA_NUM_FENCE_LISTS; ++i)
INIT_LIST_HEAD(&sa_manager->flist[i]);
- }
r = amdgpu_bo_create(adev, size, align, true, domain,
0, NULL, NULL, &sa_manager->bo);
@@ -228,11 +227,9 @@ static bool amdgpu_sa_event(struct amdgpu_sa_manager *sa_manager,
unsigned soffset, eoffset, wasted;
int i;
- for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
- if (!list_empty(&sa_manager->flist[i])) {
+ for (i = 0; i < AMDGPU_SA_NUM_FENCE_LISTS; ++i)
+ if (!list_empty(&sa_manager->flist[i]))
return true;
- }
- }
soffset = amdgpu_sa_bo_hole_soffset(sa_manager);
eoffset = amdgpu_sa_bo_hole_eoffset(sa_manager);
@@ -265,12 +262,11 @@ static bool amdgpu_sa_bo_next_hole(struct amdgpu_sa_manager *sa_manager,
/* go over all fence list and try to find the closest sa_bo
* of the current last
*/
- for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
+ for (i = 0; i < AMDGPU_SA_NUM_FENCE_LISTS; ++i) {
struct amdgpu_sa_bo *sa_bo;
- if (list_empty(&sa_manager->flist[i])) {
+ if (list_empty(&sa_manager->flist[i]))
continue;
- }
sa_bo = list_first_entry(&sa_manager->flist[i],
struct amdgpu_sa_bo, flist);
@@ -299,7 +295,9 @@ static bool amdgpu_sa_bo_next_hole(struct amdgpu_sa_manager *sa_manager,
}
if (best_bo) {
- uint32_t idx = amdgpu_ring_from_fence(best_bo->fence)->idx;
+ uint32_t idx = best_bo->fence->context;
+
+ idx %= AMDGPU_SA_NUM_FENCE_LISTS;
++tries[idx];
sa_manager->hole = best_bo->olist.prev;
@@ -315,14 +313,17 @@ int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager,
struct amdgpu_sa_bo **sa_bo,
unsigned size, unsigned align)
{
- struct fence *fences[AMDGPU_MAX_RINGS];
- unsigned tries[AMDGPU_MAX_RINGS];
+ struct fence *fences[AMDGPU_SA_NUM_FENCE_LISTS];
+ unsigned tries[AMDGPU_SA_NUM_FENCE_LISTS];
unsigned count;
int i, r;
signed long t;
- BUG_ON(align > sa_manager->align);
- BUG_ON(size > sa_manager->size);
+ if (WARN_ON_ONCE(align > sa_manager->align))
+ return -EINVAL;
+
+ if (WARN_ON_ONCE(size > sa_manager->size))
+ return -EINVAL;
*sa_bo = kmalloc(sizeof(struct amdgpu_sa_bo), GFP_KERNEL);
if ((*sa_bo) == NULL) {
@@ -335,7 +336,7 @@ int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager,
spin_lock(&sa_manager->wq.lock);
do {
- for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
+ for (i = 0; i < AMDGPU_SA_NUM_FENCE_LISTS; ++i) {
fences[i] = NULL;
tries[i] = 0;
}
@@ -352,7 +353,7 @@ int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager,
/* see if we can skip over some allocations */
} while (amdgpu_sa_bo_next_hole(sa_manager, fences, tries));
- for (i = 0, count = 0; i < AMDGPU_MAX_RINGS; ++i)
+ for (i = 0, count = 0; i < AMDGPU_SA_NUM_FENCE_LISTS; ++i)
if (fences[i])
fences[count++] = fence_get(fences[i]);
@@ -394,8 +395,9 @@ void amdgpu_sa_bo_free(struct amdgpu_device *adev, struct amdgpu_sa_bo **sa_bo,
spin_lock(&sa_manager->wq.lock);
if (fence && !fence_is_signaled(fence)) {
uint32_t idx;
+
(*sa_bo)->fence = fence_get(fence);
- idx = amdgpu_ring_from_fence(fence)->idx;
+ idx = fence->context % AMDGPU_SA_NUM_FENCE_LISTS;
list_add_tail(&(*sa_bo)->flist, &sa_manager->flist[idx]);
} else {
amdgpu_sa_bo_remove_locked(*sa_bo);
@@ -407,25 +409,6 @@ void amdgpu_sa_bo_free(struct amdgpu_device *adev, struct amdgpu_sa_bo **sa_bo,
#if defined(CONFIG_DEBUG_FS)
-static void amdgpu_sa_bo_dump_fence(struct fence *fence, struct seq_file *m)
-{
- struct amdgpu_fence *a_fence = to_amdgpu_fence(fence);
- struct amd_sched_fence *s_fence = to_amd_sched_fence(fence);
-
- if (a_fence)
- seq_printf(m, " protected by 0x%016llx on ring %d",
- a_fence->seq, a_fence->ring->idx);
-
- if (s_fence) {
- struct amdgpu_ring *ring;
-
-
- ring = container_of(s_fence->sched, struct amdgpu_ring, sched);
- seq_printf(m, " protected by 0x%016x on ring %d",
- s_fence->base.seqno, ring->idx);
- }
-}
-
void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager,
struct seq_file *m)
{
@@ -442,8 +425,11 @@ void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager,
}
seq_printf(m, "[0x%010llx 0x%010llx] size %8lld",
soffset, eoffset, eoffset - soffset);
+
if (i->fence)
- amdgpu_sa_bo_dump_fence(i->fence, m);
+ seq_printf(m, " protected by 0x%08x on context %d",
+ i->fence->seqno, i->fence->context);
+
seq_printf(m, "\n");
}
spin_unlock(&sa_manager->wq.lock);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
deleted file mode 100644
index 438c05254695..000000000000
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
+++ /dev/null
@@ -1,108 +0,0 @@
-/*
- * Copyright 2015 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- *
- */
-#include <linux/kthread.h>
-#include <linux/wait.h>
-#include <linux/sched.h>
-#include <drm/drmP.h>
-#include "amdgpu.h"
-#include "amdgpu_trace.h"
-
-static struct fence *amdgpu_sched_dependency(struct amd_sched_job *sched_job)
-{
- struct amdgpu_job *job = to_amdgpu_job(sched_job);
- return amdgpu_sync_get_fence(&job->ibs->sync);
-}
-
-static struct fence *amdgpu_sched_run_job(struct amd_sched_job *sched_job)
-{
- struct amdgpu_fence *fence = NULL;
- struct amdgpu_job *job;
- int r;
-
- if (!sched_job) {
- DRM_ERROR("job is null\n");
- return NULL;
- }
- job = to_amdgpu_job(sched_job);
- trace_amdgpu_sched_run_job(job);
- r = amdgpu_ib_schedule(job->adev, job->num_ibs, job->ibs, job->owner);
- if (r) {
- DRM_ERROR("Error scheduling IBs (%d)\n", r);
- goto err;
- }
-
- fence = job->ibs[job->num_ibs - 1].fence;
- fence_get(&fence->base);
-
-err:
- if (job->free_job)
- job->free_job(job);
-
- kfree(job);
- return fence ? &fence->base : NULL;
-}
-
-struct amd_sched_backend_ops amdgpu_sched_ops = {
- .dependency = amdgpu_sched_dependency,
- .run_job = amdgpu_sched_run_job,
-};
-
-int amdgpu_sched_ib_submit_kernel_helper(struct amdgpu_device *adev,
- struct amdgpu_ring *ring,
- struct amdgpu_ib *ibs,
- unsigned num_ibs,
- int (*free_job)(struct amdgpu_job *),
- void *owner,
- struct fence **f)
-{
- int r = 0;
- if (amdgpu_enable_scheduler) {
- struct amdgpu_job *job =
- kzalloc(sizeof(struct amdgpu_job), GFP_KERNEL);
- if (!job)
- return -ENOMEM;
- job->base.sched = &ring->sched;
- job->base.s_entity = &adev->kernel_ctx.rings[ring->idx].entity;
- job->base.s_fence = amd_sched_fence_create(job->base.s_entity, owner);
- if (!job->base.s_fence) {
- kfree(job);
- return -ENOMEM;
- }
- *f = fence_get(&job->base.s_fence->base);
-
- job->adev = adev;
- job->ibs = ibs;
- job->num_ibs = num_ibs;
- job->owner = owner;
- job->free_job = free_job;
- amd_sched_entity_push_job(&job->base);
- } else {
- r = amdgpu_ib_schedule(adev, num_ibs, ibs, owner);
- if (r)
- return r;
- *f = fence_get(&ibs[num_ibs - 1].fence->base);
- }
-
- return 0;
-}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_semaphore.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_semaphore.c
deleted file mode 100644
index 1caaf201b708..000000000000
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_semaphore.c
+++ /dev/null
@@ -1,102 +0,0 @@
-/*
- * Copyright 2011 Christian König.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- */
-/*
- * Authors:
- * Christian König <deathsimple@vodafone.de>
- */
-#include <drm/drmP.h>
-#include "amdgpu.h"
-#include "amdgpu_trace.h"
-
-int amdgpu_semaphore_create(struct amdgpu_device *adev,
- struct amdgpu_semaphore **semaphore)
-{
- int r;
-
- *semaphore = kmalloc(sizeof(struct amdgpu_semaphore), GFP_KERNEL);
- if (*semaphore == NULL) {
- return -ENOMEM;
- }
- r = amdgpu_sa_bo_new(&adev->ring_tmp_bo,
- &(*semaphore)->sa_bo, 8, 8);
- if (r) {
- kfree(*semaphore);
- *semaphore = NULL;
- return r;
- }
- (*semaphore)->waiters = 0;
- (*semaphore)->gpu_addr = amdgpu_sa_bo_gpu_addr((*semaphore)->sa_bo);
-
- *((uint64_t *)amdgpu_sa_bo_cpu_addr((*semaphore)->sa_bo)) = 0;
-
- return 0;
-}
-
-bool amdgpu_semaphore_emit_signal(struct amdgpu_ring *ring,
- struct amdgpu_semaphore *semaphore)
-{
- trace_amdgpu_semaphore_signale(ring->idx, semaphore);
-
- if (amdgpu_ring_emit_semaphore(ring, semaphore, false)) {
- --semaphore->waiters;
-
- /* for debugging lockup only, used by sysfs debug files */
- ring->last_semaphore_signal_addr = semaphore->gpu_addr;
- return true;
- }
- return false;
-}
-
-bool amdgpu_semaphore_emit_wait(struct amdgpu_ring *ring,
- struct amdgpu_semaphore *semaphore)
-{
- trace_amdgpu_semaphore_wait(ring->idx, semaphore);
-
- if (amdgpu_ring_emit_semaphore(ring, semaphore, true)) {
- ++semaphore->waiters;
-
- /* for debugging lockup only, used by sysfs debug files */
- ring->last_semaphore_wait_addr = semaphore->gpu_addr;
- return true;
- }
- return false;
-}
-
-void amdgpu_semaphore_free(struct amdgpu_device *adev,
- struct amdgpu_semaphore **semaphore,
- struct fence *fence)
-{
- if (semaphore == NULL || *semaphore == NULL) {
- return;
- }
- if ((*semaphore)->waiters > 0) {
- dev_err(adev->dev, "semaphore %p has more waiters than signalers,"
- " hardware lockup imminent!\n", *semaphore);
- }
- amdgpu_sa_bo_free(adev, &(*semaphore)->sa_bo, fence);
- kfree(*semaphore);
- *semaphore = NULL;
-}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
index 181ce39ef5e5..c48b4fce5e57 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
@@ -37,6 +37,8 @@ struct amdgpu_sync_entry {
struct fence *fence;
};
+static struct kmem_cache *amdgpu_sync_slab;
+
/**
* amdgpu_sync_create - zero init sync object
*
@@ -46,26 +48,22 @@ struct amdgpu_sync_entry {
*/
void amdgpu_sync_create(struct amdgpu_sync *sync)
{
- unsigned i;
-
- for (i = 0; i < AMDGPU_NUM_SYNCS; ++i)
- sync->semaphores[i] = NULL;
-
- for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
- sync->sync_to[i] = NULL;
-
hash_init(sync->fences);
sync->last_vm_update = NULL;
}
+/**
+ * amdgpu_sync_same_dev - test if fence belong to us
+ *
+ * @adev: amdgpu device to use for the test
+ * @f: fence to test
+ *
+ * Test if the fence was issued by us.
+ */
static bool amdgpu_sync_same_dev(struct amdgpu_device *adev, struct fence *f)
{
- struct amdgpu_fence *a_fence = to_amdgpu_fence(f);
struct amd_sched_fence *s_fence = to_amd_sched_fence(f);
- if (a_fence)
- return a_fence->ring->adev == adev;
-
if (s_fence) {
struct amdgpu_ring *ring;
@@ -76,17 +74,31 @@ static bool amdgpu_sync_same_dev(struct amdgpu_device *adev, struct fence *f)
return false;
}
-static bool amdgpu_sync_test_owner(struct fence *f, void *owner)
+/**
+ * amdgpu_sync_get_owner - extract the owner of a fence
+ *
+ * @fence: fence get the owner from
+ *
+ * Extract who originally created the fence.
+ */
+static void *amdgpu_sync_get_owner(struct fence *f)
{
- struct amdgpu_fence *a_fence = to_amdgpu_fence(f);
struct amd_sched_fence *s_fence = to_amd_sched_fence(f);
+
if (s_fence)
- return s_fence->owner == owner;
- if (a_fence)
- return a_fence->owner == owner;
- return false;
+ return s_fence->owner;
+
+ return AMDGPU_FENCE_OWNER_UNDEFINED;
}
+/**
+ * amdgpu_sync_keep_later - Keep the later fence
+ *
+ * @keep: existing fence to test
+ * @fence: new fence
+ *
+ * Either keep the existing fence or the new one, depending which one is later.
+ */
static void amdgpu_sync_keep_later(struct fence **keep, struct fence *fence)
{
if (*keep && fence_is_later(*keep, fence))
@@ -107,59 +119,39 @@ int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
struct fence *f)
{
struct amdgpu_sync_entry *e;
- struct amdgpu_fence *fence;
if (!f)
return 0;
if (amdgpu_sync_same_dev(adev, f) &&
- amdgpu_sync_test_owner(f, AMDGPU_FENCE_OWNER_VM))
+ amdgpu_sync_get_owner(f) == AMDGPU_FENCE_OWNER_VM)
amdgpu_sync_keep_later(&sync->last_vm_update, f);
- fence = to_amdgpu_fence(f);
- if (!fence || fence->ring->adev != adev) {
- hash_for_each_possible(sync->fences, e, node, f->context) {
- if (unlikely(e->fence->context != f->context))
- continue;
-
- amdgpu_sync_keep_later(&e->fence, f);
- return 0;
- }
-
- e = kmalloc(sizeof(struct amdgpu_sync_entry), GFP_KERNEL);
- if (!e)
- return -ENOMEM;
+ hash_for_each_possible(sync->fences, e, node, f->context) {
+ if (unlikely(e->fence->context != f->context))
+ continue;
- hash_add(sync->fences, &e->node, f->context);
- e->fence = fence_get(f);
+ amdgpu_sync_keep_later(&e->fence, f);
return 0;
}
- amdgpu_sync_keep_later(&sync->sync_to[fence->ring->idx], f);
+ e = kmem_cache_alloc(amdgpu_sync_slab, GFP_KERNEL);
+ if (!e)
+ return -ENOMEM;
+ hash_add(sync->fences, &e->node, f->context);
+ e->fence = fence_get(f);
return 0;
}
-static void *amdgpu_sync_get_owner(struct fence *f)
-{
- struct amdgpu_fence *a_fence = to_amdgpu_fence(f);
- struct amd_sched_fence *s_fence = to_amd_sched_fence(f);
-
- if (s_fence)
- return s_fence->owner;
- else if (a_fence)
- return a_fence->owner;
- return AMDGPU_FENCE_OWNER_UNDEFINED;
-}
-
/**
- * amdgpu_sync_resv - use the semaphores to sync to a reservation object
+ * amdgpu_sync_resv - sync to a reservation object
*
* @sync: sync object to add fences from reservation object to
* @resv: reservation object with embedded fence
* @shared: true if we should only sync to the exclusive fence
*
- * Sync to the fence using the semaphore objects
+ * Sync to the fence
*/
int amdgpu_sync_resv(struct amdgpu_device *adev,
struct amdgpu_sync *sync,
@@ -224,7 +216,7 @@ struct fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync)
f = e->fence;
hash_del(&e->node);
- kfree(e);
+ kmem_cache_free(amdgpu_sync_slab, e);
if (!fence_is_signaled(f))
return f;
@@ -247,109 +239,7 @@ int amdgpu_sync_wait(struct amdgpu_sync *sync)
hash_del(&e->node);
fence_put(e->fence);
- kfree(e);
- }
-
- if (amdgpu_enable_semaphores)
- return 0;
-
- for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
- struct fence *fence = sync->sync_to[i];
- if (!fence)
- continue;
-
- r = fence_wait(fence, false);
- if (r)
- return r;
- }
-
- return 0;
-}
-
-/**
- * amdgpu_sync_rings - sync ring to all registered fences
- *
- * @sync: sync object to use
- * @ring: ring that needs sync
- *
- * Ensure that all registered fences are signaled before letting
- * the ring continue. The caller must hold the ring lock.
- */
-int amdgpu_sync_rings(struct amdgpu_sync *sync,
- struct amdgpu_ring *ring)
-{
- struct amdgpu_device *adev = ring->adev;
- unsigned count = 0;
- int i, r;
-
- for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
- struct amdgpu_ring *other = adev->rings[i];
- struct amdgpu_semaphore *semaphore;
- struct amdgpu_fence *fence;
-
- if (!sync->sync_to[i])
- continue;
-
- fence = to_amdgpu_fence(sync->sync_to[i]);
-
- /* check if we really need to sync */
- if (!amdgpu_enable_scheduler &&
- !amdgpu_fence_need_sync(fence, ring))
- continue;
-
- /* prevent GPU deadlocks */
- if (!other->ready) {
- dev_err(adev->dev, "Syncing to a disabled ring!");
- return -EINVAL;
- }
-
- if (amdgpu_enable_scheduler || !amdgpu_enable_semaphores) {
- r = fence_wait(sync->sync_to[i], true);
- if (r)
- return r;
- continue;
- }
-
- if (count >= AMDGPU_NUM_SYNCS) {
- /* not enough room, wait manually */
- r = fence_wait(&fence->base, false);
- if (r)
- return r;
- continue;
- }
- r = amdgpu_semaphore_create(adev, &semaphore);
- if (r)
- return r;
-
- sync->semaphores[count++] = semaphore;
-
- /* allocate enough space for sync command */
- r = amdgpu_ring_alloc(other, 16);
- if (r)
- return r;
-
- /* emit the signal semaphore */
- if (!amdgpu_semaphore_emit_signal(other, semaphore)) {
- /* signaling wasn't successful wait manually */
- amdgpu_ring_undo(other);
- r = fence_wait(&fence->base, false);
- if (r)
- return r;
- continue;
- }
-
- /* we assume caller has already allocated space on waiters ring */
- if (!amdgpu_semaphore_emit_wait(ring, semaphore)) {
- /* waiting wasn't successful wait manually */
- amdgpu_ring_undo(other);
- r = fence_wait(&fence->base, false);
- if (r)
- return r;
- continue;
- }
-
- amdgpu_ring_commit(other);
- amdgpu_fence_note_sync(fence, ring);
+ kmem_cache_free(amdgpu_sync_slab, e);
}
return 0;
@@ -358,15 +248,11 @@ int amdgpu_sync_rings(struct amdgpu_sync *sync,
/**
* amdgpu_sync_free - free the sync object
*
- * @adev: amdgpu_device pointer
* @sync: sync object to use
- * @fence: fence to use for the free
*
- * Free the sync object by freeing all semaphores in it.
+ * Free the sync object.
*/
-void amdgpu_sync_free(struct amdgpu_device *adev,
- struct amdgpu_sync *sync,
- struct fence *fence)
+void amdgpu_sync_free(struct amdgpu_sync *sync)
{
struct amdgpu_sync_entry *e;
struct hlist_node *tmp;
@@ -375,14 +261,34 @@ void amdgpu_sync_free(struct amdgpu_device *adev,
hash_for_each_safe(sync->fences, i, tmp, e, node) {
hash_del(&e->node);
fence_put(e->fence);
- kfree(e);
+ kmem_cache_free(amdgpu_sync_slab, e);
}
- for (i = 0; i < AMDGPU_NUM_SYNCS; ++i)
- amdgpu_semaphore_free(adev, &sync->semaphores[i], fence);
+ fence_put(sync->last_vm_update);
+}
+
+/**
+ * amdgpu_sync_init - init sync object subsystem
+ *
+ * Allocate the slab allocator.
+ */
+int amdgpu_sync_init(void)
+{
+ amdgpu_sync_slab = kmem_cache_create(
+ "amdgpu_sync", sizeof(struct amdgpu_sync_entry), 0,
+ SLAB_HWCACHE_ALIGN, NULL);
+ if (!amdgpu_sync_slab)
+ return -ENOMEM;
- for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
- fence_put(sync->sync_to[i]);
+ return 0;
+}
- fence_put(sync->last_vm_update);
+/**
+ * amdgpu_sync_fini - fini sync object subsystem
+ *
+ * Free the slab allocator.
+ */
+void amdgpu_sync_fini(void)
+{
+ kmem_cache_destroy(amdgpu_sync_slab);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
index 4865615e9c06..05a53f4fc334 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
@@ -238,144 +238,10 @@ void amdgpu_test_moves(struct amdgpu_device *adev)
amdgpu_do_test_moves(adev);
}
-static int amdgpu_test_create_and_emit_fence(struct amdgpu_device *adev,
- struct amdgpu_ring *ring,
- struct fence **fence)
-{
- uint32_t handle = ring->idx ^ 0xdeafbeef;
- int r;
-
- if (ring == &adev->uvd.ring) {
- r = amdgpu_uvd_get_create_msg(ring, handle, NULL);
- if (r) {
- DRM_ERROR("Failed to get dummy create msg\n");
- return r;
- }
-
- r = amdgpu_uvd_get_destroy_msg(ring, handle, fence);
- if (r) {
- DRM_ERROR("Failed to get dummy destroy msg\n");
- return r;
- }
-
- } else if (ring == &adev->vce.ring[0] ||
- ring == &adev->vce.ring[1]) {
- r = amdgpu_vce_get_create_msg(ring, handle, NULL);
- if (r) {
- DRM_ERROR("Failed to get dummy create msg\n");
- return r;
- }
-
- r = amdgpu_vce_get_destroy_msg(ring, handle, fence);
- if (r) {
- DRM_ERROR("Failed to get dummy destroy msg\n");
- return r;
- }
- } else {
- struct amdgpu_fence *a_fence = NULL;
- r = amdgpu_ring_lock(ring, 64);
- if (r) {
- DRM_ERROR("Failed to lock ring A %d\n", ring->idx);
- return r;
- }
- amdgpu_fence_emit(ring, AMDGPU_FENCE_OWNER_UNDEFINED, &a_fence);
- amdgpu_ring_unlock_commit(ring);
- *fence = &a_fence->base;
- }
- return 0;
-}
-
void amdgpu_test_ring_sync(struct amdgpu_device *adev,
struct amdgpu_ring *ringA,
struct amdgpu_ring *ringB)
{
- struct fence *fence1 = NULL, *fence2 = NULL;
- struct amdgpu_semaphore *semaphore = NULL;
- int r;
-
- r = amdgpu_semaphore_create(adev, &semaphore);
- if (r) {
- DRM_ERROR("Failed to create semaphore\n");
- goto out_cleanup;
- }
-
- r = amdgpu_ring_lock(ringA, 64);
- if (r) {
- DRM_ERROR("Failed to lock ring A %d\n", ringA->idx);
- goto out_cleanup;
- }
- amdgpu_semaphore_emit_wait(ringA, semaphore);
- amdgpu_ring_unlock_commit(ringA);
-
- r = amdgpu_test_create_and_emit_fence(adev, ringA, &fence1);
- if (r)
- goto out_cleanup;
-
- r = amdgpu_ring_lock(ringA, 64);
- if (r) {
- DRM_ERROR("Failed to lock ring A %d\n", ringA->idx);
- goto out_cleanup;
- }
- amdgpu_semaphore_emit_wait(ringA, semaphore);
- amdgpu_ring_unlock_commit(ringA);
-
- r = amdgpu_test_create_and_emit_fence(adev, ringA, &fence2);
- if (r)
- goto out_cleanup;
-
- mdelay(1000);
-
- if (fence_is_signaled(fence1)) {
- DRM_ERROR("Fence 1 signaled without waiting for semaphore.\n");
- goto out_cleanup;
- }
-
- r = amdgpu_ring_lock(ringB, 64);
- if (r) {
- DRM_ERROR("Failed to lock ring B %p\n", ringB);
- goto out_cleanup;
- }
- amdgpu_semaphore_emit_signal(ringB, semaphore);
- amdgpu_ring_unlock_commit(ringB);
-
- r = fence_wait(fence1, false);
- if (r) {
- DRM_ERROR("Failed to wait for sync fence 1\n");
- goto out_cleanup;
- }
-
- mdelay(1000);
-
- if (fence_is_signaled(fence2)) {
- DRM_ERROR("Fence 2 signaled without waiting for semaphore.\n");
- goto out_cleanup;
- }
-
- r = amdgpu_ring_lock(ringB, 64);
- if (r) {
- DRM_ERROR("Failed to lock ring B %p\n", ringB);
- goto out_cleanup;
- }
- amdgpu_semaphore_emit_signal(ringB, semaphore);
- amdgpu_ring_unlock_commit(ringB);
-
- r = fence_wait(fence2, false);
- if (r) {
- DRM_ERROR("Failed to wait for sync fence 1\n");
- goto out_cleanup;
- }
-
-out_cleanup:
- amdgpu_semaphore_free(adev, &semaphore, NULL);
-
- if (fence1)
- fence_put(fence1);
-
- if (fence2)
- fence_put(fence2);
-
- if (r)
- printk(KERN_WARNING "Error while testing ring sync (%d).\n", r);
}
static void amdgpu_test_ring_sync2(struct amdgpu_device *adev,
@@ -383,109 +249,6 @@ static void amdgpu_test_ring_sync2(struct amdgpu_device *adev,
struct amdgpu_ring *ringB,
struct amdgpu_ring *ringC)
{
- struct fence *fenceA = NULL, *fenceB = NULL;
- struct amdgpu_semaphore *semaphore = NULL;
- bool sigA, sigB;
- int i, r;
-
- r = amdgpu_semaphore_create(adev, &semaphore);
- if (r) {
- DRM_ERROR("Failed to create semaphore\n");
- goto out_cleanup;
- }
-
- r = amdgpu_ring_lock(ringA, 64);
- if (r) {
- DRM_ERROR("Failed to lock ring A %d\n", ringA->idx);
- goto out_cleanup;
- }
- amdgpu_semaphore_emit_wait(ringA, semaphore);
- amdgpu_ring_unlock_commit(ringA);
-
- r = amdgpu_test_create_and_emit_fence(adev, ringA, &fenceA);
- if (r)
- goto out_cleanup;
-
- r = amdgpu_ring_lock(ringB, 64);
- if (r) {
- DRM_ERROR("Failed to lock ring B %d\n", ringB->idx);
- goto out_cleanup;
- }
- amdgpu_semaphore_emit_wait(ringB, semaphore);
- amdgpu_ring_unlock_commit(ringB);
- r = amdgpu_test_create_and_emit_fence(adev, ringB, &fenceB);
- if (r)
- goto out_cleanup;
-
- mdelay(1000);
-
- if (fence_is_signaled(fenceA)) {
- DRM_ERROR("Fence A signaled without waiting for semaphore.\n");
- goto out_cleanup;
- }
- if (fence_is_signaled(fenceB)) {
- DRM_ERROR("Fence B signaled without waiting for semaphore.\n");
- goto out_cleanup;
- }
-
- r = amdgpu_ring_lock(ringC, 64);
- if (r) {
- DRM_ERROR("Failed to lock ring B %p\n", ringC);
- goto out_cleanup;
- }
- amdgpu_semaphore_emit_signal(ringC, semaphore);
- amdgpu_ring_unlock_commit(ringC);
-
- for (i = 0; i < 30; ++i) {
- mdelay(100);
- sigA = fence_is_signaled(fenceA);
- sigB = fence_is_signaled(fenceB);
- if (sigA || sigB)
- break;
- }
-
- if (!sigA && !sigB) {
- DRM_ERROR("Neither fence A nor B has been signaled\n");
- goto out_cleanup;
- } else if (sigA && sigB) {
- DRM_ERROR("Both fence A and B has been signaled\n");
- goto out_cleanup;
- }
-
- DRM_INFO("Fence %c was first signaled\n", sigA ? 'A' : 'B');
-
- r = amdgpu_ring_lock(ringC, 64);
- if (r) {
- DRM_ERROR("Failed to lock ring B %p\n", ringC);
- goto out_cleanup;
- }
- amdgpu_semaphore_emit_signal(ringC, semaphore);
- amdgpu_ring_unlock_commit(ringC);
-
- mdelay(1000);
-
- r = fence_wait(fenceA, false);
- if (r) {
- DRM_ERROR("Failed to wait for sync fence A\n");
- goto out_cleanup;
- }
- r = fence_wait(fenceB, false);
- if (r) {
- DRM_ERROR("Failed to wait for sync fence B\n");
- goto out_cleanup;
- }
-
-out_cleanup:
- amdgpu_semaphore_free(adev, &semaphore, NULL);
-
- if (fenceA)
- fence_put(fenceA);
-
- if (fenceB)
- fence_put(fenceB);
-
- if (r)
- printk(KERN_WARNING "Error while testing ring sync (%d).\n", r);
}
static bool amdgpu_test_sync_possible(struct amdgpu_ring *ringA,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
index 8f9834ab1bd5..26a5f4acf584 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
@@ -38,10 +38,10 @@ TRACE_EVENT(amdgpu_cs,
TP_fast_assign(
__entry->bo_list = p->bo_list;
- __entry->ring = p->ibs[i].ring->idx;
- __entry->dw = p->ibs[i].length_dw;
+ __entry->ring = p->job->ring->idx;
+ __entry->dw = p->job->ibs[i].length_dw;
__entry->fences = amdgpu_fence_count_emitted(
- p->ibs[i].ring);
+ p->job->ring);
),
TP_printk("bo_list=%p, ring=%u, dw=%u, fences=%u",
__entry->bo_list, __entry->ring, __entry->dw,
@@ -65,7 +65,7 @@ TRACE_EVENT(amdgpu_cs_ioctl,
__entry->sched_job = &job->base;
__entry->ib = job->ibs;
__entry->fence = &job->base.s_fence->base;
- __entry->ring_name = job->ibs[0].ring->name;
+ __entry->ring_name = job->ring->name;
__entry->num_ibs = job->num_ibs;
),
TP_printk("adev=%p, sched_job=%p, first ib=%p, sched fence=%p, ring name:%s, num_ibs:%u",
@@ -90,7 +90,7 @@ TRACE_EVENT(amdgpu_sched_run_job,
__entry->sched_job = &job->base;
__entry->ib = job->ibs;
__entry->fence = &job->base.s_fence->base;
- __entry->ring_name = job->ibs[0].ring->name;
+ __entry->ring_name = job->ring->name;
__entry->num_ibs = job->num_ibs;
),
TP_printk("adev=%p, sched_job=%p, first ib=%p, sched fence=%p, ring name:%s, num_ibs:%u",
@@ -100,18 +100,24 @@ TRACE_EVENT(amdgpu_sched_run_job,
TRACE_EVENT(amdgpu_vm_grab_id,
- TP_PROTO(unsigned vmid, int ring),
- TP_ARGS(vmid, ring),
+ TP_PROTO(struct amdgpu_vm *vm, int ring, unsigned vmid,
+ uint64_t pd_addr),
+ TP_ARGS(vm, ring, vmid, pd_addr),
TP_STRUCT__entry(
- __field(u32, vmid)
+ __field(struct amdgpu_vm *, vm)
__field(u32, ring)
+ __field(u32, vmid)
+ __field(u64, pd_addr)
),
TP_fast_assign(
- __entry->vmid = vmid;
+ __entry->vm = vm;
__entry->ring = ring;
+ __entry->vmid = vmid;
+ __entry->pd_addr = pd_addr;
),
- TP_printk("vmid=%u, ring=%u", __entry->vmid, __entry->ring)
+ TP_printk("vm=%p, ring=%u, id=%u, pd_addr=%010Lx", __entry->vm,
+ __entry->ring, __entry->vmid, __entry->pd_addr)
);
TRACE_EVENT(amdgpu_vm_bo_map,
@@ -228,8 +234,8 @@ TRACE_EVENT(amdgpu_vm_flush,
__entry->ring = ring;
__entry->id = id;
),
- TP_printk("pd_addr=%010Lx, ring=%u, id=%u",
- __entry->pd_addr, __entry->ring, __entry->id)
+ TP_printk("ring=%u, id=%u, pd_addr=%010Lx",
+ __entry->ring, __entry->id, __entry->pd_addr)
);
TRACE_EVENT(amdgpu_bo_list_set,
@@ -247,42 +253,6 @@ TRACE_EVENT(amdgpu_bo_list_set,
TP_printk("list=%p, bo=%p", __entry->list, __entry->bo)
);
-DECLARE_EVENT_CLASS(amdgpu_semaphore_request,
-
- TP_PROTO(int ring, struct amdgpu_semaphore *sem),
-
- TP_ARGS(ring, sem),
-
- TP_STRUCT__entry(
- __field(int, ring)
- __field(signed, waiters)
- __field(uint64_t, gpu_addr)
- ),
-
- TP_fast_assign(
- __entry->ring = ring;
- __entry->waiters = sem->waiters;
- __entry->gpu_addr = sem->gpu_addr;
- ),
-
- TP_printk("ring=%u, waiters=%d, addr=%010Lx", __entry->ring,
- __entry->waiters, __entry->gpu_addr)
-);
-
-DEFINE_EVENT(amdgpu_semaphore_request, amdgpu_semaphore_signale,
-
- TP_PROTO(int ring, struct amdgpu_semaphore *sem),
-
- TP_ARGS(ring, sem)
-);
-
-DEFINE_EVENT(amdgpu_semaphore_request, amdgpu_semaphore_wait,
-
- TP_PROTO(int ring, struct amdgpu_semaphore *sem),
-
- TP_ARGS(ring, sem)
-);
-
#endif
/* This part must be outside protection */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 1cbb16e15307..11af4492b4be 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -77,6 +77,8 @@ static void amdgpu_ttm_mem_global_release(struct drm_global_reference *ref)
static int amdgpu_ttm_global_init(struct amdgpu_device *adev)
{
struct drm_global_reference *global_ref;
+ struct amdgpu_ring *ring;
+ struct amd_sched_rq *rq;
int r;
adev->mman.mem_global_referenced = false;
@@ -106,13 +108,27 @@ static int amdgpu_ttm_global_init(struct amdgpu_device *adev)
return r;
}
+ ring = adev->mman.buffer_funcs_ring;
+ rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_KERNEL];
+ r = amd_sched_entity_init(&ring->sched, &adev->mman.entity,
+ rq, amdgpu_sched_jobs);
+ if (r != 0) {
+ DRM_ERROR("Failed setting up TTM BO move run queue.\n");
+ drm_global_item_unref(&adev->mman.mem_global_ref);
+ drm_global_item_unref(&adev->mman.bo_global_ref.ref);
+ return r;
+ }
+
adev->mman.mem_global_referenced = true;
+
return 0;
}
static void amdgpu_ttm_global_fini(struct amdgpu_device *adev)
{
if (adev->mman.mem_global_referenced) {
+ amd_sched_entity_fini(adev->mman.entity.sched,
+ &adev->mman.entity);
drm_global_item_unref(&adev->mman.bo_global_ref.ref);
drm_global_item_unref(&adev->mman.mem_global_ref);
adev->mman.mem_global_referenced = false;
@@ -207,6 +223,8 @@ static int amdgpu_verify_access(struct ttm_buffer_object *bo, struct file *filp)
{
struct amdgpu_bo *rbo = container_of(bo, struct amdgpu_bo, tbo);
+ if (amdgpu_ttm_tt_get_usermm(bo->ttm))
+ return -EPERM;
return drm_vma_node_verify_access(&rbo->gem_base.vma_node, filp);
}
@@ -368,9 +386,15 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo,
struct ttm_mem_reg *new_mem)
{
struct amdgpu_device *adev;
+ struct amdgpu_bo *abo;
struct ttm_mem_reg *old_mem = &bo->mem;
int r;
+ /* Can't move a pinned BO */
+ abo = container_of(bo, struct amdgpu_bo, tbo);
+ if (WARN_ON_ONCE(abo->pin_count > 0))
+ return -EINVAL;
+
adev = amdgpu_get_adev(bo->bdev);
if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
amdgpu_move_null(bo, new_mem);
@@ -478,32 +502,32 @@ static void amdgpu_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_re
/*
* TTM backend functions.
*/
+struct amdgpu_ttm_gup_task_list {
+ struct list_head list;
+ struct task_struct *task;
+};
+
struct amdgpu_ttm_tt {
- struct ttm_dma_tt ttm;
- struct amdgpu_device *adev;
- u64 offset;
- uint64_t userptr;
- struct mm_struct *usermm;
- uint32_t userflags;
+ struct ttm_dma_tt ttm;
+ struct amdgpu_device *adev;
+ u64 offset;
+ uint64_t userptr;
+ struct mm_struct *usermm;
+ uint32_t userflags;
+ spinlock_t guptasklock;
+ struct list_head guptasks;
+ atomic_t mmu_invalidations;
};
-/* prepare the sg table with the user pages */
-static int amdgpu_ttm_tt_pin_userptr(struct ttm_tt *ttm)
+int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages)
{
- struct amdgpu_device *adev = amdgpu_get_adev(ttm->bdev);
struct amdgpu_ttm_tt *gtt = (void *)ttm;
- unsigned pinned = 0, nents;
- int r;
-
int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
- enum dma_data_direction direction = write ?
- DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
-
- if (current->mm != gtt->usermm)
- return -EPERM;
+ unsigned pinned = 0;
+ int r;
if (gtt->userflags & AMDGPU_GEM_USERPTR_ANONONLY) {
- /* check that we only pin down anonymous memory
+ /* check that we only use anonymous memory
to prevent problems with writeback */
unsigned long end = gtt->userptr + ttm->num_pages * PAGE_SIZE;
struct vm_area_struct *vma;
@@ -516,10 +540,20 @@ static int amdgpu_ttm_tt_pin_userptr(struct ttm_tt *ttm)
do {
unsigned num_pages = ttm->num_pages - pinned;
uint64_t userptr = gtt->userptr + pinned * PAGE_SIZE;
- struct page **pages = ttm->pages + pinned;
+ struct page **p = pages + pinned;
+ struct amdgpu_ttm_gup_task_list guptask;
+
+ guptask.task = current;
+ spin_lock(&gtt->guptasklock);
+ list_add(&guptask.list, &gtt->guptasks);
+ spin_unlock(&gtt->guptasklock);
+
+ r = get_user_pages(userptr, num_pages, write, 0, p, NULL);
+
+ spin_lock(&gtt->guptasklock);
+ list_del(&guptask.list);
+ spin_unlock(&gtt->guptasklock);
- r = get_user_pages(current, current->mm, userptr, num_pages,
- write, 0, pages, NULL);
if (r < 0)
goto release_pages;
@@ -527,6 +561,25 @@ static int amdgpu_ttm_tt_pin_userptr(struct ttm_tt *ttm)
} while (pinned < ttm->num_pages);
+ return 0;
+
+release_pages:
+ release_pages(pages, pinned, 0);
+ return r;
+}
+
+/* prepare the sg table with the user pages */
+static int amdgpu_ttm_tt_pin_userptr(struct ttm_tt *ttm)
+{
+ struct amdgpu_device *adev = amdgpu_get_adev(ttm->bdev);
+ struct amdgpu_ttm_tt *gtt = (void *)ttm;
+ unsigned nents;
+ int r;
+
+ int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
+ enum dma_data_direction direction = write ?
+ DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
+
r = sg_alloc_table_from_pages(ttm->sg, ttm->pages, ttm->num_pages, 0,
ttm->num_pages << PAGE_SHIFT,
GFP_KERNEL);
@@ -545,9 +598,6 @@ static int amdgpu_ttm_tt_pin_userptr(struct ttm_tt *ttm)
release_sg:
kfree(ttm->sg);
-
-release_pages:
- release_pages(ttm->pages, pinned, 0);
return r;
}
@@ -574,7 +624,7 @@ static void amdgpu_ttm_tt_unpin_userptr(struct ttm_tt *ttm)
set_page_dirty(page);
mark_page_accessed(page);
- page_cache_release(page);
+ put_page(page);
}
sg_free_table(ttm->sg);
@@ -770,38 +820,61 @@ int amdgpu_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr,
gtt->userptr = addr;
gtt->usermm = current->mm;
gtt->userflags = flags;
+ spin_lock_init(&gtt->guptasklock);
+ INIT_LIST_HEAD(&gtt->guptasks);
+ atomic_set(&gtt->mmu_invalidations, 0);
+
return 0;
}
-bool amdgpu_ttm_tt_has_userptr(struct ttm_tt *ttm)
+struct mm_struct *amdgpu_ttm_tt_get_usermm(struct ttm_tt *ttm)
{
struct amdgpu_ttm_tt *gtt = (void *)ttm;
if (gtt == NULL)
- return false;
+ return NULL;
- return !!gtt->userptr;
+ return gtt->usermm;
}
bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
unsigned long end)
{
struct amdgpu_ttm_tt *gtt = (void *)ttm;
+ struct amdgpu_ttm_gup_task_list *entry;
unsigned long size;
- if (gtt == NULL)
- return false;
-
- if (gtt->ttm.ttm.state != tt_bound || !gtt->userptr)
+ if (gtt == NULL || !gtt->userptr)
return false;
size = (unsigned long)gtt->ttm.ttm.num_pages * PAGE_SIZE;
if (gtt->userptr > end || gtt->userptr + size <= start)
return false;
+ spin_lock(&gtt->guptasklock);
+ list_for_each_entry(entry, &gtt->guptasks, list) {
+ if (entry->task == current) {
+ spin_unlock(&gtt->guptasklock);
+ return false;
+ }
+ }
+ spin_unlock(&gtt->guptasklock);
+
+ atomic_inc(&gtt->mmu_invalidations);
+
return true;
}
+bool amdgpu_ttm_tt_userptr_invalidated(struct ttm_tt *ttm,
+ int *last_invalidated)
+{
+ struct amdgpu_ttm_tt *gtt = (void *)ttm;
+ int prev_invalidated = *last_invalidated;
+
+ *last_invalidated = atomic_read(&gtt->mmu_invalidations);
+ return prev_invalidated != *last_invalidated;
+}
+
bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm)
{
struct amdgpu_ttm_tt *gtt = (void *)ttm;
@@ -1015,9 +1088,10 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring,
struct fence **fence)
{
struct amdgpu_device *adev = ring->adev;
+ struct amdgpu_job *job;
+
uint32_t max_bytes;
unsigned num_loops, num_dw;
- struct amdgpu_ib *ib;
unsigned i;
int r;
@@ -1029,20 +1103,12 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring,
while (num_dw & 0x7)
num_dw++;
- ib = kzalloc(sizeof(struct amdgpu_ib), GFP_KERNEL);
- if (!ib)
- return -ENOMEM;
-
- r = amdgpu_ib_get(ring, NULL, num_dw * 4, ib);
- if (r) {
- kfree(ib);
+ r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, &job);
+ if (r)
return r;
- }
-
- ib->length_dw = 0;
if (resv) {
- r = amdgpu_sync_resv(adev, &ib->sync, resv,
+ r = amdgpu_sync_resv(adev, &job->sync, resv,
AMDGPU_FENCE_OWNER_UNDEFINED);
if (r) {
DRM_ERROR("sync failed (%d).\n", r);
@@ -1053,31 +1119,25 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring,
for (i = 0; i < num_loops; i++) {
uint32_t cur_size_in_bytes = min(byte_count, max_bytes);
- amdgpu_emit_copy_buffer(adev, ib, src_offset, dst_offset,
- cur_size_in_bytes);
+ amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_offset,
+ dst_offset, cur_size_in_bytes);
src_offset += cur_size_in_bytes;
dst_offset += cur_size_in_bytes;
byte_count -= cur_size_in_bytes;
}
- amdgpu_vm_pad_ib(adev, ib);
- WARN_ON(ib->length_dw > num_dw);
- r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, ib, 1,
- &amdgpu_vm_free_job,
- AMDGPU_FENCE_OWNER_UNDEFINED,
- fence);
+ amdgpu_ring_pad_ib(ring, &job->ibs[0]);
+ WARN_ON(job->ibs[0].length_dw > num_dw);
+ r = amdgpu_job_submit(job, ring, &adev->mman.entity,
+ AMDGPU_FENCE_OWNER_UNDEFINED, fence);
if (r)
goto error_free;
- if (!amdgpu_enable_scheduler) {
- amdgpu_ib_free(adev, ib);
- kfree(ib);
- }
return 0;
+
error_free:
- amdgpu_ib_free(adev, ib);
- kfree(ib);
+ amdgpu_job_free(job);
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index 53f987aeeacf..871018c634e0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -91,6 +91,8 @@ static void amdgpu_uvd_idle_work_handler(struct work_struct *work);
int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
{
+ struct amdgpu_ring *ring;
+ struct amd_sched_rq *rq;
unsigned long bo_size;
const char *fw_name;
const struct common_firmware_header *hdr;
@@ -156,6 +158,9 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
DRM_INFO("Found UVD firmware Version: %hu.%hu Family ID: %hu\n",
version_major, version_minor, family_id);
+ adev->uvd.fw_version = ((version_major << 24) | (version_minor << 16) |
+ (family_id << 8));
+
bo_size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8)
+ AMDGPU_UVD_STACK_SIZE + AMDGPU_UVD_HEAP_SIZE;
r = amdgpu_bo_create(adev, bo_size, PAGE_SIZE, true,
@@ -191,6 +196,15 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
amdgpu_bo_unreserve(adev->uvd.vcpu_bo);
+ ring = &adev->uvd.ring;
+ rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_NORMAL];
+ r = amd_sched_entity_init(&ring->sched, &adev->uvd.entity,
+ rq, amdgpu_sched_jobs);
+ if (r != 0) {
+ DRM_ERROR("Failed setting up UVD run queue.\n");
+ return r;
+ }
+
for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) {
atomic_set(&adev->uvd.handles[i], 0);
adev->uvd.filp[i] = NULL;
@@ -210,6 +224,8 @@ int amdgpu_uvd_sw_fini(struct amdgpu_device *adev)
if (adev->uvd.vcpu_bo == NULL)
return 0;
+ amd_sched_entity_fini(&adev->uvd.ring.sched, &adev->uvd.entity);
+
r = amdgpu_bo_reserve(adev->uvd.vcpu_bo, false);
if (!r) {
amdgpu_bo_kunmap(adev->uvd.vcpu_bo);
@@ -228,32 +244,30 @@ int amdgpu_uvd_sw_fini(struct amdgpu_device *adev)
int amdgpu_uvd_suspend(struct amdgpu_device *adev)
{
- struct amdgpu_ring *ring = &adev->uvd.ring;
- int i, r;
+ unsigned size;
+ void *ptr;
+ int i;
if (adev->uvd.vcpu_bo == NULL)
return 0;
- for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) {
- uint32_t handle = atomic_read(&adev->uvd.handles[i]);
- if (handle != 0) {
- struct fence *fence;
+ for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i)
+ if (atomic_read(&adev->uvd.handles[i]))
+ break;
- amdgpu_uvd_note_usage(adev);
+ if (i == AMDGPU_MAX_UVD_HANDLES)
+ return 0;
- r = amdgpu_uvd_get_destroy_msg(ring, handle, &fence);
- if (r) {
- DRM_ERROR("Error destroying UVD (%d)!\n", r);
- continue;
- }
+ cancel_delayed_work_sync(&adev->uvd.idle_work);
- fence_wait(fence, false);
- fence_put(fence);
+ size = amdgpu_bo_size(adev->uvd.vcpu_bo);
+ ptr = adev->uvd.cpu_addr;
- adev->uvd.filp[i] = NULL;
- atomic_set(&adev->uvd.handles[i], 0);
- }
- }
+ adev->uvd.saved_bo = kmalloc(size, GFP_KERNEL);
+ if (!adev->uvd.saved_bo)
+ return -ENOMEM;
+
+ memcpy(adev->uvd.saved_bo, ptr, size);
return 0;
}
@@ -262,23 +276,29 @@ int amdgpu_uvd_resume(struct amdgpu_device *adev)
{
unsigned size;
void *ptr;
- const struct common_firmware_header *hdr;
- unsigned offset;
if (adev->uvd.vcpu_bo == NULL)
return -EINVAL;
- hdr = (const struct common_firmware_header *)adev->uvd.fw->data;
- offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
- memcpy(adev->uvd.cpu_addr, (adev->uvd.fw->data) + offset,
- (adev->uvd.fw->size) - offset);
-
size = amdgpu_bo_size(adev->uvd.vcpu_bo);
- size -= le32_to_cpu(hdr->ucode_size_bytes);
ptr = adev->uvd.cpu_addr;
- ptr += le32_to_cpu(hdr->ucode_size_bytes);
- memset(ptr, 0, size);
+ if (adev->uvd.saved_bo != NULL) {
+ memcpy(ptr, adev->uvd.saved_bo, size);
+ kfree(adev->uvd.saved_bo);
+ adev->uvd.saved_bo = NULL;
+ } else {
+ const struct common_firmware_header *hdr;
+ unsigned offset;
+
+ hdr = (const struct common_firmware_header *)adev->uvd.fw->data;
+ offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
+ memcpy(adev->uvd.cpu_addr, (adev->uvd.fw->data) + offset,
+ (adev->uvd.fw->size) - offset);
+ size -= le32_to_cpu(hdr->ucode_size_bytes);
+ ptr += le32_to_cpu(hdr->ucode_size_bytes);
+ memset(ptr, 0, size);
+ }
return 0;
}
@@ -295,7 +315,8 @@ void amdgpu_uvd_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
amdgpu_uvd_note_usage(adev);
- r = amdgpu_uvd_get_destroy_msg(ring, handle, &fence);
+ r = amdgpu_uvd_get_destroy_msg(ring, handle,
+ false, &fence);
if (r) {
DRM_ERROR("Error destroying UVD (%d)!\n", r);
continue;
@@ -525,13 +546,6 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx,
return -EINVAL;
}
- r = reservation_object_wait_timeout_rcu(bo->tbo.resv, true, false,
- MAX_SCHEDULE_TIMEOUT);
- if (r < 0) {
- DRM_ERROR("Failed waiting for UVD message (%ld)!\n", r);
- return r;
- }
-
r = amdgpu_bo_kmap(bo, &ptr);
if (r) {
DRM_ERROR("Failed mapping the UVD message (%ld)!\n", r);
@@ -616,7 +630,6 @@ static int amdgpu_uvd_cs_pass2(struct amdgpu_uvd_cs_ctx *ctx)
{
struct amdgpu_bo_va_mapping *mapping;
struct amdgpu_bo *bo;
- struct amdgpu_ib *ib;
uint32_t cmd, lo, hi;
uint64_t start, end;
uint64_t addr;
@@ -638,9 +651,10 @@ static int amdgpu_uvd_cs_pass2(struct amdgpu_uvd_cs_ctx *ctx)
addr -= ((uint64_t)mapping->it.start) * AMDGPU_GPU_PAGE_SIZE;
start += addr;
- ib = &ctx->parser->ibs[ctx->ib_idx];
- ib->ptr[ctx->data0] = start & 0xFFFFFFFF;
- ib->ptr[ctx->data1] = start >> 32;
+ amdgpu_set_ib_value(ctx->parser, ctx->ib_idx, ctx->data0,
+ lower_32_bits(start));
+ amdgpu_set_ib_value(ctx->parser, ctx->ib_idx, ctx->data1,
+ upper_32_bits(start));
cmd = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->idx) >> 1;
if (cmd < 0x4) {
@@ -702,7 +716,7 @@ static int amdgpu_uvd_cs_pass2(struct amdgpu_uvd_cs_ctx *ctx)
static int amdgpu_uvd_cs_reg(struct amdgpu_uvd_cs_ctx *ctx,
int (*cb)(struct amdgpu_uvd_cs_ctx *ctx))
{
- struct amdgpu_ib *ib = &ctx->parser->ibs[ctx->ib_idx];
+ struct amdgpu_ib *ib = &ctx->parser->job->ibs[ctx->ib_idx];
int i, r;
ctx->idx++;
@@ -748,7 +762,7 @@ static int amdgpu_uvd_cs_reg(struct amdgpu_uvd_cs_ctx *ctx,
static int amdgpu_uvd_cs_packets(struct amdgpu_uvd_cs_ctx *ctx,
int (*cb)(struct amdgpu_uvd_cs_ctx *ctx))
{
- struct amdgpu_ib *ib = &ctx->parser->ibs[ctx->ib_idx];
+ struct amdgpu_ib *ib = &ctx->parser->job->ibs[ctx->ib_idx];
int r;
for (ctx->idx = 0 ; ctx->idx < ib->length_dw; ) {
@@ -790,7 +804,7 @@ int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx)
[0x00000003] = 2048,
[0x00000004] = 0xFFFFFFFF,
};
- struct amdgpu_ib *ib = &parser->ibs[ib_idx];
+ struct amdgpu_ib *ib = &parser->job->ibs[ib_idx];
int r;
if (ib->length_dw % 16) {
@@ -823,22 +837,14 @@ int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx)
return 0;
}
-static int amdgpu_uvd_free_job(
- struct amdgpu_job *job)
-{
- amdgpu_ib_free(job->adev, job->ibs);
- kfree(job->ibs);
- return 0;
-}
-
-static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring,
- struct amdgpu_bo *bo,
- struct fence **fence)
+static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
+ bool direct, struct fence **fence)
{
struct ttm_validate_buffer tv;
struct ww_acquire_ctx ticket;
struct list_head head;
- struct amdgpu_ib *ib = NULL;
+ struct amdgpu_job *job;
+ struct amdgpu_ib *ib;
struct fence *f = NULL;
struct amdgpu_device *adev = ring->adev;
uint64_t addr;
@@ -862,15 +868,12 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring,
r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
if (r)
goto err;
- ib = kzalloc(sizeof(struct amdgpu_ib), GFP_KERNEL);
- if (!ib) {
- r = -ENOMEM;
- goto err;
- }
- r = amdgpu_ib_get(ring, NULL, 64, ib);
+
+ r = amdgpu_job_alloc_with_ib(adev, 64, &job);
if (r)
- goto err1;
+ goto err;
+ ib = &job->ibs[0];
addr = amdgpu_bo_gpu_offset(bo);
ib->ptr[0] = PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0);
ib->ptr[1] = addr;
@@ -882,12 +885,19 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring,
ib->ptr[i] = PACKET2(0);
ib->length_dw = 16;
- r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, ib, 1,
- &amdgpu_uvd_free_job,
- AMDGPU_FENCE_OWNER_UNDEFINED,
- &f);
- if (r)
- goto err2;
+ if (direct) {
+ r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
+ job->fence = f;
+ if (r)
+ goto err_free;
+
+ amdgpu_job_free(job);
+ } else {
+ r = amdgpu_job_submit(job, ring, &adev->uvd.entity,
+ AMDGPU_FENCE_OWNER_UNDEFINED, &f);
+ if (r)
+ goto err_free;
+ }
ttm_eu_fence_buffer_objects(&ticket, &head, f);
@@ -895,16 +905,12 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring,
*fence = fence_get(f);
amdgpu_bo_unref(&bo);
fence_put(f);
- if (amdgpu_enable_scheduler)
- return 0;
- amdgpu_ib_free(ring->adev, ib);
- kfree(ib);
return 0;
-err2:
- amdgpu_ib_free(ring->adev, ib);
-err1:
- kfree(ib);
+
+err_free:
+ amdgpu_job_free(job);
+
err:
ttm_eu_backoff_reservation(&ticket, &head);
return r;
@@ -959,11 +965,11 @@ int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
amdgpu_bo_kunmap(bo);
amdgpu_bo_unreserve(bo);
- return amdgpu_uvd_send_msg(ring, bo, fence);
+ return amdgpu_uvd_send_msg(ring, bo, true, fence);
}
int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
- struct fence **fence)
+ bool direct, struct fence **fence)
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_bo *bo;
@@ -1001,7 +1007,7 @@ int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
amdgpu_bo_kunmap(bo);
amdgpu_bo_unreserve(bo);
- return amdgpu_uvd_send_msg(ring, bo, fence);
+ return amdgpu_uvd_send_msg(ring, bo, direct, fence);
}
static void amdgpu_uvd_idle_work_handler(struct work_struct *work)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
index 1724c2c86151..9a3b449081a7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
@@ -31,7 +31,7 @@ int amdgpu_uvd_resume(struct amdgpu_device *adev);
int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
struct fence **fence);
int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
- struct fence **fence);
+ bool direct, struct fence **fence);
void amdgpu_uvd_free_handles(struct amdgpu_device *adev,
struct drm_file *filp);
int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
index a745eeeb5d82..481a64fa9b47 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ -74,6 +74,8 @@ static void amdgpu_vce_idle_work_handler(struct work_struct *work);
*/
int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size)
{
+ struct amdgpu_ring *ring;
+ struct amd_sched_rq *rq;
const char *fw_name;
const struct common_firmware_header *hdr;
unsigned ucode_version, version_major, version_minor, binary_id;
@@ -170,6 +172,16 @@ int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size)
return r;
}
+
+ ring = &adev->vce.ring[0];
+ rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_NORMAL];
+ r = amd_sched_entity_init(&ring->sched, &adev->vce.entity,
+ rq, amdgpu_sched_jobs);
+ if (r != 0) {
+ DRM_ERROR("Failed setting up VCE run queue.\n");
+ return r;
+ }
+
for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) {
atomic_set(&adev->vce.handles[i], 0);
adev->vce.filp[i] = NULL;
@@ -190,6 +202,8 @@ int amdgpu_vce_sw_fini(struct amdgpu_device *adev)
if (adev->vce.vcpu_bo == NULL)
return 0;
+ amd_sched_entity_fini(&adev->vce.ring[0].sched, &adev->vce.entity);
+
amdgpu_bo_unref(&adev->vce.vcpu_bo);
amdgpu_ring_fini(&adev->vce.ring[0]);
@@ -220,6 +234,7 @@ int amdgpu_vce_suspend(struct amdgpu_device *adev)
if (i == AMDGPU_MAX_VCE_HANDLES)
return 0;
+ cancel_delayed_work_sync(&adev->vce.idle_work);
/* TODO: suspending running encoding sessions isn't supported */
return -EINVAL;
}
@@ -337,7 +352,7 @@ void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
amdgpu_vce_note_usage(adev);
- r = amdgpu_vce_get_destroy_msg(ring, handle, NULL);
+ r = amdgpu_vce_get_destroy_msg(ring, handle, false, NULL);
if (r)
DRM_ERROR("Error destroying VCE handle (%d)!\n", r);
@@ -346,14 +361,6 @@ void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
}
}
-static int amdgpu_vce_free_job(
- struct amdgpu_job *job)
-{
- amdgpu_ib_free(job->adev, job->ibs);
- kfree(job->ibs);
- return 0;
-}
-
/**
* amdgpu_vce_get_create_msg - generate a VCE create msg
*
@@ -368,21 +375,17 @@ int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
struct fence **fence)
{
const unsigned ib_size_dw = 1024;
- struct amdgpu_ib *ib = NULL;
+ struct amdgpu_job *job;
+ struct amdgpu_ib *ib;
struct fence *f = NULL;
- struct amdgpu_device *adev = ring->adev;
uint64_t dummy;
int i, r;
- ib = kzalloc(sizeof(struct amdgpu_ib), GFP_KERNEL);
- if (!ib)
- return -ENOMEM;
- r = amdgpu_ib_get(ring, NULL, ib_size_dw * 4, ib);
- if (r) {
- DRM_ERROR("amdgpu: failed to get ib (%d).\n", r);
- kfree(ib);
+ r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
+ if (r)
return r;
- }
+
+ ib = &job->ibs[0];
dummy = ib->gpu_addr + 1024;
@@ -423,20 +426,19 @@ int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
for (i = ib->length_dw; i < ib_size_dw; ++i)
ib->ptr[i] = 0x0;
- r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, ib, 1,
- &amdgpu_vce_free_job,
- AMDGPU_FENCE_OWNER_UNDEFINED,
- &f);
+ r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
+ job->fence = f;
if (r)
goto err;
+
+ amdgpu_job_free(job);
if (fence)
*fence = fence_get(f);
fence_put(f);
- if (amdgpu_enable_scheduler)
- return 0;
+ return 0;
+
err:
- amdgpu_ib_free(adev, ib);
- kfree(ib);
+ amdgpu_job_free(job);
return r;
}
@@ -451,26 +453,20 @@ err:
* Close up a stream for HW test or if userspace failed to do so
*/
int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
- struct fence **fence)
+ bool direct, struct fence **fence)
{
const unsigned ib_size_dw = 1024;
- struct amdgpu_ib *ib = NULL;
+ struct amdgpu_job *job;
+ struct amdgpu_ib *ib;
struct fence *f = NULL;
- struct amdgpu_device *adev = ring->adev;
uint64_t dummy;
int i, r;
- ib = kzalloc(sizeof(struct amdgpu_ib), GFP_KERNEL);
- if (!ib)
- return -ENOMEM;
-
- r = amdgpu_ib_get(ring, NULL, ib_size_dw * 4, ib);
- if (r) {
- kfree(ib);
- DRM_ERROR("amdgpu: failed to get ib (%d).\n", r);
+ r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
+ if (r)
return r;
- }
+ ib = &job->ibs[0];
dummy = ib->gpu_addr + 1024;
/* stitch together an VCE destroy msg */
@@ -490,20 +486,28 @@ int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
for (i = ib->length_dw; i < ib_size_dw; ++i)
ib->ptr[i] = 0x0;
- r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, ib, 1,
- &amdgpu_vce_free_job,
- AMDGPU_FENCE_OWNER_UNDEFINED,
- &f);
- if (r)
- goto err;
+
+ if (direct) {
+ r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
+ job->fence = f;
+ if (r)
+ goto err;
+
+ amdgpu_job_free(job);
+ } else {
+ r = amdgpu_job_submit(job, ring, &ring->adev->vce.entity,
+ AMDGPU_FENCE_OWNER_UNDEFINED, &f);
+ if (r)
+ goto err;
+ }
+
if (fence)
*fence = fence_get(f);
fence_put(f);
- if (amdgpu_enable_scheduler)
- return 0;
+ return 0;
+
err:
- amdgpu_ib_free(adev, ib);
- kfree(ib);
+ amdgpu_job_free(job);
return r;
}
@@ -521,7 +525,6 @@ static int amdgpu_vce_cs_reloc(struct amdgpu_cs_parser *p, uint32_t ib_idx,
int lo, int hi, unsigned size, uint32_t index)
{
struct amdgpu_bo_va_mapping *mapping;
- struct amdgpu_ib *ib = &p->ibs[ib_idx];
struct amdgpu_bo *bo;
uint64_t addr;
@@ -550,8 +553,8 @@ static int amdgpu_vce_cs_reloc(struct amdgpu_cs_parser *p, uint32_t ib_idx,
addr += amdgpu_bo_gpu_offset(bo);
addr -= ((uint64_t)size) * ((uint64_t)index);
- ib->ptr[lo] = addr & 0xFFFFFFFF;
- ib->ptr[hi] = addr >> 32;
+ amdgpu_set_ib_value(p, ib_idx, lo, lower_32_bits(addr));
+ amdgpu_set_ib_value(p, ib_idx, hi, upper_32_bits(addr));
return 0;
}
@@ -606,7 +609,7 @@ static int amdgpu_vce_validate_handle(struct amdgpu_cs_parser *p,
*/
int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx)
{
- struct amdgpu_ib *ib = &p->ibs[ib_idx];
+ struct amdgpu_ib *ib = &p->job->ibs[ib_idx];
unsigned fb_idx = 0, bs_idx = 0;
int session_idx = -1;
bool destroyed = false;
@@ -743,30 +746,6 @@ out:
}
/**
- * amdgpu_vce_ring_emit_semaphore - emit a semaphore command
- *
- * @ring: engine to use
- * @semaphore: address of semaphore
- * @emit_wait: true=emit wait, false=emit signal
- *
- */
-bool amdgpu_vce_ring_emit_semaphore(struct amdgpu_ring *ring,
- struct amdgpu_semaphore *semaphore,
- bool emit_wait)
-{
- uint64_t addr = semaphore->gpu_addr;
-
- amdgpu_ring_write(ring, VCE_CMD_SEMAPHORE);
- amdgpu_ring_write(ring, (addr >> 3) & 0x000FFFFF);
- amdgpu_ring_write(ring, (addr >> 23) & 0x000FFFFF);
- amdgpu_ring_write(ring, 0x01003000 | (emit_wait ? 1 : 0));
- if (!emit_wait)
- amdgpu_ring_write(ring, VCE_CMD_END);
-
- return true;
-}
-
-/**
* amdgpu_vce_ring_emit_ib - execute indirect buffer
*
* @ring: engine to use
@@ -814,14 +793,14 @@ int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring)
unsigned i;
int r;
- r = amdgpu_ring_lock(ring, 16);
+ r = amdgpu_ring_alloc(ring, 16);
if (r) {
DRM_ERROR("amdgpu: vce failed to lock ring %d (%d).\n",
ring->idx, r);
return r;
}
amdgpu_ring_write(ring, VCE_CMD_END);
- amdgpu_ring_unlock_commit(ring);
+ amdgpu_ring_commit(ring);
for (i = 0; i < adev->usec_timeout; i++) {
if (amdgpu_ring_get_rptr(ring) != rptr)
@@ -862,7 +841,7 @@ int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring)
goto error;
}
- r = amdgpu_vce_get_destroy_msg(ring, 1, &fence);
+ r = amdgpu_vce_get_destroy_msg(ring, 1, true, &fence);
if (r) {
DRM_ERROR("amdgpu: failed to get destroy ib (%d).\n", r);
goto error;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
index ba2da8ee5906..ef99d2370182 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
@@ -31,12 +31,9 @@ int amdgpu_vce_resume(struct amdgpu_device *adev);
int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
struct fence **fence);
int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
- struct fence **fence);
+ bool direct, struct fence **fence);
void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp);
int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx);
-bool amdgpu_vce_ring_emit_semaphore(struct amdgpu_ring *ring,
- struct amdgpu_semaphore *semaphore,
- bool emit_wait);
void amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib);
void amdgpu_vce_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
unsigned flags);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 9599f7559b3d..b6c011b83641 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -50,12 +50,15 @@
* SI supports 16.
*/
+/* Special value that no flush is necessary */
+#define AMDGPU_VM_NO_FLUSH (~0ll)
+
/**
* amdgpu_vm_num_pde - return the number of page directory entries
*
* @adev: amdgpu_device pointer
*
- * Calculate the number of page directory entries (cayman+).
+ * Calculate the number of page directory entries.
*/
static unsigned amdgpu_vm_num_pdes(struct amdgpu_device *adev)
{
@@ -67,7 +70,7 @@ static unsigned amdgpu_vm_num_pdes(struct amdgpu_device *adev)
*
* @adev: amdgpu_device pointer
*
- * Calculate the size of the page directory in bytes (cayman+).
+ * Calculate the size of the page directory in bytes.
*/
static unsigned amdgpu_vm_directory_size(struct amdgpu_device *adev)
{
@@ -89,11 +92,10 @@ void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
struct amdgpu_bo_list_entry *entry)
{
entry->robj = vm->page_directory;
- entry->prefered_domains = AMDGPU_GEM_DOMAIN_VRAM;
- entry->allowed_domains = AMDGPU_GEM_DOMAIN_VRAM;
entry->priority = 0;
entry->tv.bo = &vm->page_directory->tbo;
entry->tv.shared = true;
+ entry->user_pages = NULL;
list_add(&entry->tv.head, validated);
}
@@ -154,133 +156,154 @@ void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev,
* @vm: vm to allocate id for
* @ring: ring we want to submit job to
* @sync: sync object where we add dependencies
+ * @fence: fence protecting ID from reuse
*
* Allocate an id for the vm, adding fences to the sync obj as necessary.
- *
- * Global mutex must be locked!
*/
int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
- struct amdgpu_sync *sync)
+ struct amdgpu_sync *sync, struct fence *fence,
+ unsigned *vm_id, uint64_t *vm_pd_addr)
{
- struct fence *best[AMDGPU_MAX_RINGS] = {};
- struct amdgpu_vm_id *vm_id = &vm->ids[ring->idx];
+ uint64_t pd_addr = amdgpu_bo_gpu_offset(vm->page_directory);
struct amdgpu_device *adev = ring->adev;
+ struct amdgpu_vm_id *id = &vm->ids[ring->idx];
+ struct fence *updates = sync->last_vm_update;
+ int r;
- unsigned choices[2] = {};
- unsigned i;
+ mutex_lock(&adev->vm_manager.lock);
/* check if the id is still valid */
- if (vm_id->id) {
- unsigned id = vm_id->id;
+ if (id->mgr_id) {
+ struct fence *flushed = id->flushed_updates;
+ bool is_later;
long owner;
- owner = atomic_long_read(&adev->vm_manager.ids[id].owner);
- if (owner == (long)vm) {
- trace_amdgpu_vm_grab_id(vm_id->id, ring->idx);
+ if (!flushed)
+ is_later = true;
+ else if (!updates)
+ is_later = false;
+ else
+ is_later = fence_is_later(updates, flushed);
+
+ owner = atomic_long_read(&id->mgr_id->owner);
+ if (!is_later && owner == (long)id &&
+ pd_addr == id->pd_gpu_addr) {
+
+ r = amdgpu_sync_fence(ring->adev, sync,
+ id->mgr_id->active);
+ if (r) {
+ mutex_unlock(&adev->vm_manager.lock);
+ return r;
+ }
+
+ fence_put(id->mgr_id->active);
+ id->mgr_id->active = fence_get(fence);
+
+ list_move_tail(&id->mgr_id->list,
+ &adev->vm_manager.ids_lru);
+
+ *vm_id = id->mgr_id - adev->vm_manager.ids;
+ *vm_pd_addr = AMDGPU_VM_NO_FLUSH;
+ trace_amdgpu_vm_grab_id(vm, ring->idx, *vm_id,
+ *vm_pd_addr);
+
+ mutex_unlock(&adev->vm_manager.lock);
return 0;
}
}
- /* we definately need to flush */
- vm_id->pd_gpu_addr = ~0ll;
+ id->mgr_id = list_first_entry(&adev->vm_manager.ids_lru,
+ struct amdgpu_vm_manager_id,
+ list);
- /* skip over VMID 0, since it is the system VM */
- for (i = 1; i < adev->vm_manager.nvm; ++i) {
- struct fence *fence = adev->vm_manager.ids[i].active;
- struct amdgpu_ring *fring;
-
- if (fence == NULL) {
- /* found a free one */
- vm_id->id = i;
- trace_amdgpu_vm_grab_id(i, ring->idx);
- return 0;
- }
+ r = amdgpu_sync_fence(ring->adev, sync, id->mgr_id->active);
+ if (!r) {
+ fence_put(id->mgr_id->active);
+ id->mgr_id->active = fence_get(fence);
- fring = amdgpu_ring_from_fence(fence);
- if (best[fring->idx] == NULL ||
- fence_is_later(best[fring->idx], fence)) {
- best[fring->idx] = fence;
- choices[fring == ring ? 0 : 1] = i;
- }
- }
+ fence_put(id->flushed_updates);
+ id->flushed_updates = fence_get(updates);
- for (i = 0; i < 2; ++i) {
- if (choices[i]) {
- struct fence *fence;
+ id->pd_gpu_addr = pd_addr;
- fence = adev->vm_manager.ids[choices[i]].active;
- vm_id->id = choices[i];
+ list_move_tail(&id->mgr_id->list, &adev->vm_manager.ids_lru);
+ atomic_long_set(&id->mgr_id->owner, (long)id);
- trace_amdgpu_vm_grab_id(choices[i], ring->idx);
- return amdgpu_sync_fence(ring->adev, sync, fence);
- }
+ *vm_id = id->mgr_id - adev->vm_manager.ids;
+ *vm_pd_addr = pd_addr;
+ trace_amdgpu_vm_grab_id(vm, ring->idx, *vm_id, *vm_pd_addr);
}
- /* should never happen */
- BUG();
- return -EINVAL;
+ mutex_unlock(&adev->vm_manager.lock);
+ return r;
}
/**
* amdgpu_vm_flush - hardware flush the vm
*
* @ring: ring to use for flush
- * @vm: vm we want to flush
- * @updates: last vm update that we waited for
+ * @vm_id: vmid number to use
+ * @pd_addr: address of the page directory
*
- * Flush the vm (cayman+).
- *
- * Global and local mutex must be locked!
+ * Emit a VM flush when it is necessary.
*/
void amdgpu_vm_flush(struct amdgpu_ring *ring,
- struct amdgpu_vm *vm,
- struct fence *updates)
+ unsigned vm_id, uint64_t pd_addr,
+ uint32_t gds_base, uint32_t gds_size,
+ uint32_t gws_base, uint32_t gws_size,
+ uint32_t oa_base, uint32_t oa_size)
{
- uint64_t pd_addr = amdgpu_bo_gpu_offset(vm->page_directory);
- struct amdgpu_vm_id *vm_id = &vm->ids[ring->idx];
- struct fence *flushed_updates = vm_id->flushed_updates;
- bool is_later;
-
- if (!flushed_updates)
- is_later = true;
- else if (!updates)
- is_later = false;
- else
- is_later = fence_is_later(updates, flushed_updates);
+ struct amdgpu_device *adev = ring->adev;
+ struct amdgpu_vm_manager_id *mgr_id = &adev->vm_manager.ids[vm_id];
+ bool gds_switch_needed = ring->funcs->emit_gds_switch && (
+ mgr_id->gds_base != gds_base ||
+ mgr_id->gds_size != gds_size ||
+ mgr_id->gws_base != gws_base ||
+ mgr_id->gws_size != gws_size ||
+ mgr_id->oa_base != oa_base ||
+ mgr_id->oa_size != oa_size);
+
+ if (ring->funcs->emit_pipeline_sync && (
+ pd_addr != AMDGPU_VM_NO_FLUSH || gds_switch_needed))
+ amdgpu_ring_emit_pipeline_sync(ring);
+
+ if (pd_addr != AMDGPU_VM_NO_FLUSH) {
+ trace_amdgpu_vm_flush(pd_addr, ring->idx, vm_id);
+ amdgpu_ring_emit_vm_flush(ring, vm_id, pd_addr);
+ }
- if (pd_addr != vm_id->pd_gpu_addr || is_later) {
- trace_amdgpu_vm_flush(pd_addr, ring->idx, vm_id->id);
- if (is_later) {
- vm_id->flushed_updates = fence_get(updates);
- fence_put(flushed_updates);
- }
- vm_id->pd_gpu_addr = pd_addr;
- amdgpu_ring_emit_vm_flush(ring, vm_id->id, vm_id->pd_gpu_addr);
+ if (gds_switch_needed) {
+ mgr_id->gds_base = gds_base;
+ mgr_id->gds_size = gds_size;
+ mgr_id->gws_base = gws_base;
+ mgr_id->gws_size = gws_size;
+ mgr_id->oa_base = oa_base;
+ mgr_id->oa_size = oa_size;
+ amdgpu_ring_emit_gds_switch(ring, vm_id,
+ gds_base, gds_size,
+ gws_base, gws_size,
+ oa_base, oa_size);
}
}
/**
- * amdgpu_vm_fence - remember fence for vm
+ * amdgpu_vm_reset_id - reset VMID to zero
*
- * @adev: amdgpu_device pointer
- * @vm: vm we want to fence
- * @fence: fence to remember
- *
- * Fence the vm (cayman+).
- * Set the fence used to protect page table and id.
+ * @adev: amdgpu device structure
+ * @vm_id: vmid number to use
*
- * Global and local mutex must be locked!
+ * Reset saved GDW, GWS and OA to force switch on next flush.
*/
-void amdgpu_vm_fence(struct amdgpu_device *adev,
- struct amdgpu_vm *vm,
- struct fence *fence)
+void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vm_id)
{
- struct amdgpu_ring *ring = amdgpu_ring_from_fence(fence);
- unsigned vm_id = vm->ids[ring->idx].id;
-
- fence_put(adev->vm_manager.ids[vm_id].active);
- adev->vm_manager.ids[vm_id].active = fence_get(fence);
- atomic_long_set(&adev->vm_manager.ids[vm_id].owner, (long)vm);
+ struct amdgpu_vm_manager_id *mgr_id = &adev->vm_manager.ids[vm_id];
+
+ mgr_id->gds_base = 0;
+ mgr_id->gds_size = 0;
+ mgr_id->gws_base = 0;
+ mgr_id->gws_size = 0;
+ mgr_id->oa_base = 0;
+ mgr_id->oa_size = 0;
}
/**
@@ -289,7 +312,7 @@ void amdgpu_vm_fence(struct amdgpu_device *adev,
* @vm: requested vm
* @bo: requested buffer object
*
- * Find @bo inside the requested vm (cayman+).
+ * Find @bo inside the requested vm.
* Search inside the @bos vm list for the requested vm
* Returns the found bo_va or NULL if none is found
*
@@ -312,32 +335,40 @@ struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
* amdgpu_vm_update_pages - helper to call the right asic function
*
* @adev: amdgpu_device pointer
+ * @gtt: GART instance to use for mapping
+ * @gtt_flags: GTT hw access flags
* @ib: indirect buffer to fill with commands
* @pe: addr of the page entry
* @addr: dst addr to write into pe
* @count: number of page entries to update
* @incr: increase next addr by incr bytes
* @flags: hw access flags
- * @gtt_flags: GTT hw access flags
*
* Traces the parameters and calls the right asic functions
* to setup the page table using the DMA.
*/
static void amdgpu_vm_update_pages(struct amdgpu_device *adev,
+ struct amdgpu_gart *gtt,
+ uint32_t gtt_flags,
struct amdgpu_ib *ib,
uint64_t pe, uint64_t addr,
unsigned count, uint32_t incr,
- uint32_t flags, uint32_t gtt_flags)
+ uint32_t flags)
{
trace_amdgpu_vm_set_page(pe, addr, count, incr, flags);
- if ((flags & AMDGPU_PTE_SYSTEM) && (flags == gtt_flags)) {
- uint64_t src = adev->gart.table_addr + (addr >> 12) * 8;
+ if ((gtt == &adev->gart) && (flags == gtt_flags)) {
+ uint64_t src = gtt->table_addr + (addr >> 12) * 8;
amdgpu_vm_copy_pte(adev, ib, pe, src, count);
- } else if ((flags & AMDGPU_PTE_SYSTEM) || (count < 3)) {
- amdgpu_vm_write_pte(adev, ib, pe, addr,
- count, incr, flags);
+ } else if (gtt) {
+ dma_addr_t *pages_addr = gtt->pages_addr;
+ amdgpu_vm_write_pte(adev, ib, pages_addr, pe, addr,
+ count, incr, flags);
+
+ } else if (count < 3) {
+ amdgpu_vm_write_pte(adev, ib, NULL, pe, addr,
+ count, incr, flags);
} else {
amdgpu_vm_set_pte_pde(adev, ib, pe, addr,
@@ -345,15 +376,6 @@ static void amdgpu_vm_update_pages(struct amdgpu_device *adev,
}
}
-int amdgpu_vm_free_job(struct amdgpu_job *job)
-{
- int i;
- for (i = 0; i < job->num_ibs; i++)
- amdgpu_ib_free(job->adev, &job->ibs[i]);
- kfree(job->ibs);
- return 0;
-}
-
/**
* amdgpu_vm_clear_bo - initially clear the page dir/table
*
@@ -363,15 +385,18 @@ int amdgpu_vm_free_job(struct amdgpu_job *job)
* need to reserve bo first before calling it.
*/
static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
+ struct amdgpu_vm *vm,
struct amdgpu_bo *bo)
{
- struct amdgpu_ring *ring = adev->vm_manager.vm_pte_funcs_ring;
+ struct amdgpu_ring *ring;
struct fence *fence = NULL;
- struct amdgpu_ib *ib;
+ struct amdgpu_job *job;
unsigned entries;
uint64_t addr;
int r;
+ ring = container_of(vm->entity.sched, struct amdgpu_ring, sched);
+
r = reservation_object_reserve_shared(bo->tbo.resv);
if (r)
return r;
@@ -383,56 +408,57 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
addr = amdgpu_bo_gpu_offset(bo);
entries = amdgpu_bo_size(bo) / 8;
- ib = kzalloc(sizeof(struct amdgpu_ib), GFP_KERNEL);
- if (!ib)
+ r = amdgpu_job_alloc_with_ib(adev, 64, &job);
+ if (r)
goto error;
- r = amdgpu_ib_get(ring, NULL, entries * 2 + 64, ib);
+ amdgpu_vm_update_pages(adev, NULL, 0, &job->ibs[0], addr, 0, entries,
+ 0, 0);
+ amdgpu_ring_pad_ib(ring, &job->ibs[0]);
+
+ WARN_ON(job->ibs[0].length_dw > 64);
+ r = amdgpu_job_submit(job, ring, &vm->entity,
+ AMDGPU_FENCE_OWNER_VM, &fence);
if (r)
goto error_free;
- ib->length_dw = 0;
-
- amdgpu_vm_update_pages(adev, ib, addr, 0, entries, 0, 0, 0);
- amdgpu_vm_pad_ib(adev, ib);
- WARN_ON(ib->length_dw > 64);
- r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, ib, 1,
- &amdgpu_vm_free_job,
- AMDGPU_FENCE_OWNER_VM,
- &fence);
- if (!r)
- amdgpu_bo_fence(bo, fence, true);
+ amdgpu_bo_fence(bo, fence, true);
fence_put(fence);
- if (amdgpu_enable_scheduler)
- return 0;
+ return 0;
error_free:
- amdgpu_ib_free(adev, ib);
- kfree(ib);
+ amdgpu_job_free(job);
error:
return r;
}
/**
- * amdgpu_vm_map_gart - get the physical address of a gart page
+ * amdgpu_vm_map_gart - Resolve gart mapping of addr
*
- * @adev: amdgpu_device pointer
+ * @pages_addr: optional DMA address to use for lookup
* @addr: the unmapped addr
*
* Look up the physical address of the page that the pte resolves
- * to (cayman+).
- * Returns the physical address of the page.
+ * to and return the pointer for the page table entry.
*/
-uint64_t amdgpu_vm_map_gart(struct amdgpu_device *adev, uint64_t addr)
+uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr)
{
uint64_t result;
- /* page table offset */
- result = adev->gart.pages_addr[addr >> PAGE_SHIFT];
+ if (pages_addr) {
+ /* page table offset */
+ result = pages_addr[addr >> PAGE_SHIFT];
+
+ /* in case cpu page size != gpu page size*/
+ result |= addr & (~PAGE_MASK);
- /* in case cpu page size != gpu page size*/
- result |= addr & (~PAGE_MASK);
+ } else {
+ /* No mapping required */
+ result = addr;
+ }
+
+ result &= 0xFFFFFFFFFFFFF000ULL;
return result;
}
@@ -446,45 +472,37 @@ uint64_t amdgpu_vm_map_gart(struct amdgpu_device *adev, uint64_t addr)
* @end: end of GPU address range
*
* Allocates new page tables if necessary
- * and updates the page directory (cayman+).
+ * and updates the page directory.
* Returns 0 for success, error for failure.
- *
- * Global and local mutex must be locked!
*/
int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
struct amdgpu_vm *vm)
{
- struct amdgpu_ring *ring = adev->vm_manager.vm_pte_funcs_ring;
+ struct amdgpu_ring *ring;
struct amdgpu_bo *pd = vm->page_directory;
uint64_t pd_addr = amdgpu_bo_gpu_offset(pd);
uint32_t incr = AMDGPU_VM_PTE_COUNT * 8;
uint64_t last_pde = ~0, last_pt = ~0;
unsigned count = 0, pt_idx, ndw;
+ struct amdgpu_job *job;
struct amdgpu_ib *ib;
struct fence *fence = NULL;
int r;
+ ring = container_of(vm->entity.sched, struct amdgpu_ring, sched);
+
/* padding, etc. */
ndw = 64;
/* assume the worst case */
ndw += vm->max_pde_used * 6;
- /* update too big for an IB */
- if (ndw > 0xfffff)
- return -ENOMEM;
-
- ib = kzalloc(sizeof(struct amdgpu_ib), GFP_KERNEL);
- if (!ib)
- return -ENOMEM;
-
- r = amdgpu_ib_get(ring, NULL, ndw * 4, ib);
- if (r) {
- kfree(ib);
+ r = amdgpu_job_alloc_with_ib(adev, ndw * 4, &job);
+ if (r)
return r;
- }
- ib->length_dw = 0;
+
+ ib = &job->ibs[0];
/* walk over the address space and update the page directory */
for (pt_idx = 0; pt_idx <= vm->max_pde_used; ++pt_idx) {
@@ -504,9 +522,10 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
((last_pt + incr * count) != pt)) {
if (count) {
- amdgpu_vm_update_pages(adev, ib, last_pde,
- last_pt, count, incr,
- AMDGPU_PTE_VALID, 0);
+ amdgpu_vm_update_pages(adev, NULL, 0, ib,
+ last_pde, last_pt,
+ count, incr,
+ AMDGPU_PTE_VALID);
}
count = 1;
@@ -518,17 +537,16 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
}
if (count)
- amdgpu_vm_update_pages(adev, ib, last_pde, last_pt, count,
- incr, AMDGPU_PTE_VALID, 0);
+ amdgpu_vm_update_pages(adev, NULL, 0, ib, last_pde, last_pt,
+ count, incr, AMDGPU_PTE_VALID);
if (ib->length_dw != 0) {
- amdgpu_vm_pad_ib(adev, ib);
- amdgpu_sync_resv(adev, &ib->sync, pd->tbo.resv, AMDGPU_FENCE_OWNER_VM);
+ amdgpu_ring_pad_ib(ring, ib);
+ amdgpu_sync_resv(adev, &job->sync, pd->tbo.resv,
+ AMDGPU_FENCE_OWNER_VM);
WARN_ON(ib->length_dw > ndw);
- r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, ib, 1,
- &amdgpu_vm_free_job,
- AMDGPU_FENCE_OWNER_VM,
- &fence);
+ r = amdgpu_job_submit(job, ring, &vm->entity,
+ AMDGPU_FENCE_OWNER_VM, &fence);
if (r)
goto error_free;
@@ -536,18 +554,15 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
fence_put(vm->page_directory_fence);
vm->page_directory_fence = fence_get(fence);
fence_put(fence);
- }
- if (!amdgpu_enable_scheduler || ib->length_dw == 0) {
- amdgpu_ib_free(adev, ib);
- kfree(ib);
+ } else {
+ amdgpu_job_free(job);
}
return 0;
error_free:
- amdgpu_ib_free(adev, ib);
- kfree(ib);
+ amdgpu_job_free(job);
return r;
}
@@ -555,20 +570,20 @@ error_free:
* amdgpu_vm_frag_ptes - add fragment information to PTEs
*
* @adev: amdgpu_device pointer
+ * @gtt: GART instance to use for mapping
+ * @gtt_flags: GTT hw mapping flags
* @ib: IB for the update
* @pe_start: first PTE to handle
* @pe_end: last PTE to handle
* @addr: addr those PTEs should point to
* @flags: hw mapping flags
- * @gtt_flags: GTT hw mapping flags
- *
- * Global and local mutex must be locked!
*/
static void amdgpu_vm_frag_ptes(struct amdgpu_device *adev,
+ struct amdgpu_gart *gtt,
+ uint32_t gtt_flags,
struct amdgpu_ib *ib,
uint64_t pe_start, uint64_t pe_end,
- uint64_t addr, uint32_t flags,
- uint32_t gtt_flags)
+ uint64_t addr, uint32_t flags)
{
/**
* The MC L1 TLB supports variable sized pages, based on a fragment
@@ -598,36 +613,39 @@ static void amdgpu_vm_frag_ptes(struct amdgpu_device *adev,
unsigned count;
+ /* Abort early if there isn't anything to do */
+ if (pe_start == pe_end)
+ return;
+
/* system pages are non continuously */
- if ((flags & AMDGPU_PTE_SYSTEM) || !(flags & AMDGPU_PTE_VALID) ||
- (frag_start >= frag_end)) {
+ if (gtt || !(flags & AMDGPU_PTE_VALID) || (frag_start >= frag_end)) {
count = (pe_end - pe_start) / 8;
- amdgpu_vm_update_pages(adev, ib, pe_start, addr, count,
- AMDGPU_GPU_PAGE_SIZE, flags, gtt_flags);
+ amdgpu_vm_update_pages(adev, gtt, gtt_flags, ib, pe_start,
+ addr, count, AMDGPU_GPU_PAGE_SIZE,
+ flags);
return;
}
/* handle the 4K area at the beginning */
if (pe_start != frag_start) {
count = (frag_start - pe_start) / 8;
- amdgpu_vm_update_pages(adev, ib, pe_start, addr, count,
- AMDGPU_GPU_PAGE_SIZE, flags, gtt_flags);
+ amdgpu_vm_update_pages(adev, NULL, 0, ib, pe_start, addr,
+ count, AMDGPU_GPU_PAGE_SIZE, flags);
addr += AMDGPU_GPU_PAGE_SIZE * count;
}
/* handle the area in the middle */
count = (frag_end - frag_start) / 8;
- amdgpu_vm_update_pages(adev, ib, frag_start, addr, count,
- AMDGPU_GPU_PAGE_SIZE, flags | frag_flags,
- gtt_flags);
+ amdgpu_vm_update_pages(adev, NULL, 0, ib, frag_start, addr, count,
+ AMDGPU_GPU_PAGE_SIZE, flags | frag_flags);
/* handle the 4K area at the end */
if (frag_end != pe_end) {
addr += AMDGPU_GPU_PAGE_SIZE * count;
count = (pe_end - frag_end) / 8;
- amdgpu_vm_update_pages(adev, ib, frag_end, addr, count,
- AMDGPU_GPU_PAGE_SIZE, flags, gtt_flags);
+ amdgpu_vm_update_pages(adev, NULL, 0, ib, frag_end, addr,
+ count, AMDGPU_GPU_PAGE_SIZE, flags);
}
}
@@ -635,122 +653,105 @@ static void amdgpu_vm_frag_ptes(struct amdgpu_device *adev,
* amdgpu_vm_update_ptes - make sure that page tables are valid
*
* @adev: amdgpu_device pointer
+ * @gtt: GART instance to use for mapping
+ * @gtt_flags: GTT hw mapping flags
* @vm: requested vm
* @start: start of GPU address range
* @end: end of GPU address range
* @dst: destination address to map to
* @flags: mapping flags
*
- * Update the page tables in the range @start - @end (cayman+).
- *
- * Global and local mutex must be locked!
+ * Update the page tables in the range @start - @end.
*/
-static int amdgpu_vm_update_ptes(struct amdgpu_device *adev,
- struct amdgpu_vm *vm,
- struct amdgpu_ib *ib,
- uint64_t start, uint64_t end,
- uint64_t dst, uint32_t flags,
- uint32_t gtt_flags)
+static void amdgpu_vm_update_ptes(struct amdgpu_device *adev,
+ struct amdgpu_gart *gtt,
+ uint32_t gtt_flags,
+ struct amdgpu_vm *vm,
+ struct amdgpu_ib *ib,
+ uint64_t start, uint64_t end,
+ uint64_t dst, uint32_t flags)
{
- uint64_t mask = AMDGPU_VM_PTE_COUNT - 1;
- uint64_t last_pte = ~0, last_dst = ~0;
- void *owner = AMDGPU_FENCE_OWNER_VM;
- unsigned count = 0;
- uint64_t addr;
+ const uint64_t mask = AMDGPU_VM_PTE_COUNT - 1;
- /* sync to everything on unmapping */
- if (!(flags & AMDGPU_PTE_VALID))
- owner = AMDGPU_FENCE_OWNER_UNDEFINED;
+ uint64_t last_pe_start = ~0, last_pe_end = ~0, last_dst = ~0;
+ uint64_t addr;
/* walk over the address space and update the page tables */
for (addr = start; addr < end; ) {
uint64_t pt_idx = addr >> amdgpu_vm_block_size;
struct amdgpu_bo *pt = vm->page_tables[pt_idx].entry.robj;
unsigned nptes;
- uint64_t pte;
- int r;
-
- amdgpu_sync_resv(adev, &ib->sync, pt->tbo.resv, owner);
- r = reservation_object_reserve_shared(pt->tbo.resv);
- if (r)
- return r;
+ uint64_t pe_start;
if ((addr & ~mask) == (end & ~mask))
nptes = end - addr;
else
nptes = AMDGPU_VM_PTE_COUNT - (addr & mask);
- pte = amdgpu_bo_gpu_offset(pt);
- pte += (addr & mask) * 8;
+ pe_start = amdgpu_bo_gpu_offset(pt);
+ pe_start += (addr & mask) * 8;
- if ((last_pte + 8 * count) != pte) {
+ if (last_pe_end != pe_start) {
- if (count) {
- amdgpu_vm_frag_ptes(adev, ib, last_pte,
- last_pte + 8 * count,
- last_dst, flags,
- gtt_flags);
- }
+ amdgpu_vm_frag_ptes(adev, gtt, gtt_flags, ib,
+ last_pe_start, last_pe_end,
+ last_dst, flags);
- count = nptes;
- last_pte = pte;
+ last_pe_start = pe_start;
+ last_pe_end = pe_start + 8 * nptes;
last_dst = dst;
} else {
- count += nptes;
+ last_pe_end += 8 * nptes;
}
addr += nptes;
dst += nptes * AMDGPU_GPU_PAGE_SIZE;
}
- if (count) {
- amdgpu_vm_frag_ptes(adev, ib, last_pte,
- last_pte + 8 * count,
- last_dst, flags, gtt_flags);
- }
-
- return 0;
+ amdgpu_vm_frag_ptes(adev, gtt, gtt_flags, ib,
+ last_pe_start, last_pe_end,
+ last_dst, flags);
}
/**
* amdgpu_vm_bo_update_mapping - update a mapping in the vm page table
*
* @adev: amdgpu_device pointer
+ * @gtt: GART instance to use for mapping
+ * @gtt_flags: flags as they are used for GTT
* @vm: requested vm
- * @mapping: mapped range and flags to use for the update
+ * @start: start of mapped range
+ * @last: last mapped entry
+ * @flags: flags for the entries
* @addr: addr to set the area to
- * @gtt_flags: flags as they are used for GTT
* @fence: optional resulting fence
*
- * Fill in the page table entries for @mapping.
+ * Fill in the page table entries between @start and @last.
* Returns 0 for success, -EINVAL for failure.
- *
- * Object have to be reserved and mutex must be locked!
*/
static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
+ struct amdgpu_gart *gtt,
+ uint32_t gtt_flags,
struct amdgpu_vm *vm,
- struct amdgpu_bo_va_mapping *mapping,
- uint64_t addr, uint32_t gtt_flags,
+ uint64_t start, uint64_t last,
+ uint32_t flags, uint64_t addr,
struct fence **fence)
{
- struct amdgpu_ring *ring = adev->vm_manager.vm_pte_funcs_ring;
+ struct amdgpu_ring *ring;
+ void *owner = AMDGPU_FENCE_OWNER_VM;
unsigned nptes, ncmds, ndw;
- uint32_t flags = gtt_flags;
+ struct amdgpu_job *job;
struct amdgpu_ib *ib;
struct fence *f = NULL;
int r;
- /* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here
- * but in case of something, we filter the flags in first place
- */
- if (!(mapping->flags & AMDGPU_PTE_READABLE))
- flags &= ~AMDGPU_PTE_READABLE;
- if (!(mapping->flags & AMDGPU_PTE_WRITEABLE))
- flags &= ~AMDGPU_PTE_WRITEABLE;
+ ring = container_of(vm->entity.sched, struct amdgpu_ring, sched);
- trace_amdgpu_vm_bo_update(mapping);
+ /* sync to everything on unmapping */
+ if (!(flags & AMDGPU_PTE_VALID))
+ owner = AMDGPU_FENCE_OWNER_UNDEFINED;
- nptes = mapping->it.last - mapping->it.start + 1;
+ nptes = last - start + 1;
/*
* reserve space for one command every (1 << BLOCK_SIZE)
@@ -761,11 +762,11 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
/* padding, etc. */
ndw = 64;
- if ((flags & AMDGPU_PTE_SYSTEM) && (flags == gtt_flags)) {
+ if ((gtt == &adev->gart) && (flags == gtt_flags)) {
/* only copy commands needed */
ndw += ncmds * 7;
- } else if (flags & AMDGPU_PTE_SYSTEM) {
+ } else if (gtt) {
/* header for write data commands */
ndw += ncmds * 4;
@@ -780,38 +781,28 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
ndw += 2 * 10;
}
- /* update too big for an IB */
- if (ndw > 0xfffff)
- return -ENOMEM;
-
- ib = kzalloc(sizeof(struct amdgpu_ib), GFP_KERNEL);
- if (!ib)
- return -ENOMEM;
-
- r = amdgpu_ib_get(ring, NULL, ndw * 4, ib);
- if (r) {
- kfree(ib);
+ r = amdgpu_job_alloc_with_ib(adev, ndw * 4, &job);
+ if (r)
return r;
- }
- ib->length_dw = 0;
+ ib = &job->ibs[0];
- r = amdgpu_vm_update_ptes(adev, vm, ib, mapping->it.start,
- mapping->it.last + 1, addr + mapping->offset,
- flags, gtt_flags);
+ r = amdgpu_sync_resv(adev, &job->sync, vm->page_directory->tbo.resv,
+ owner);
+ if (r)
+ goto error_free;
- if (r) {
- amdgpu_ib_free(adev, ib);
- kfree(ib);
- return r;
- }
+ r = reservation_object_reserve_shared(vm->page_directory->tbo.resv);
+ if (r)
+ goto error_free;
+
+ amdgpu_vm_update_ptes(adev, gtt, gtt_flags, vm, ib, start, last + 1,
+ addr, flags);
- amdgpu_vm_pad_ib(adev, ib);
+ amdgpu_ring_pad_ib(ring, ib);
WARN_ON(ib->length_dw > ndw);
- r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, ib, 1,
- &amdgpu_vm_free_job,
- AMDGPU_FENCE_OWNER_VM,
- &f);
+ r = amdgpu_job_submit(job, ring, &vm->entity,
+ AMDGPU_FENCE_OWNER_VM, &f);
if (r)
goto error_free;
@@ -821,19 +812,76 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
*fence = fence_get(f);
}
fence_put(f);
- if (!amdgpu_enable_scheduler) {
- amdgpu_ib_free(adev, ib);
- kfree(ib);
- }
return 0;
error_free:
- amdgpu_ib_free(adev, ib);
- kfree(ib);
+ amdgpu_job_free(job);
return r;
}
/**
+ * amdgpu_vm_bo_split_mapping - split a mapping into smaller chunks
+ *
+ * @adev: amdgpu_device pointer
+ * @gtt: GART instance to use for mapping
+ * @vm: requested vm
+ * @mapping: mapped range and flags to use for the update
+ * @addr: addr to set the area to
+ * @gtt_flags: flags as they are used for GTT
+ * @fence: optional resulting fence
+ *
+ * Split the mapping into smaller chunks so that each update fits
+ * into a SDMA IB.
+ * Returns 0 for success, -EINVAL for failure.
+ */
+static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
+ struct amdgpu_gart *gtt,
+ uint32_t gtt_flags,
+ struct amdgpu_vm *vm,
+ struct amdgpu_bo_va_mapping *mapping,
+ uint64_t addr, struct fence **fence)
+{
+ const uint64_t max_size = 64ULL * 1024ULL * 1024ULL / AMDGPU_GPU_PAGE_SIZE;
+
+ uint64_t start = mapping->it.start;
+ uint32_t flags = gtt_flags;
+ int r;
+
+ /* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here
+ * but in case of something, we filter the flags in first place
+ */
+ if (!(mapping->flags & AMDGPU_PTE_READABLE))
+ flags &= ~AMDGPU_PTE_READABLE;
+ if (!(mapping->flags & AMDGPU_PTE_WRITEABLE))
+ flags &= ~AMDGPU_PTE_WRITEABLE;
+
+ trace_amdgpu_vm_bo_update(mapping);
+
+ addr += mapping->offset;
+
+ if (!gtt || ((gtt == &adev->gart) && (flags == gtt_flags)))
+ return amdgpu_vm_bo_update_mapping(adev, gtt, gtt_flags, vm,
+ start, mapping->it.last,
+ flags, addr, fence);
+
+ while (start != mapping->it.last + 1) {
+ uint64_t last;
+
+ last = min((uint64_t)mapping->it.last, start + max_size - 1);
+ r = amdgpu_vm_bo_update_mapping(adev, gtt, gtt_flags, vm,
+ start, last, flags, addr,
+ fence);
+ if (r)
+ return r;
+
+ start = last + 1;
+ addr += max_size * AMDGPU_GPU_PAGE_SIZE;
+ }
+
+ return 0;
+}
+
+/**
* amdgpu_vm_bo_update - update all BO mappings in the vm page table
*
* @adev: amdgpu_device pointer
@@ -851,14 +899,25 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
{
struct amdgpu_vm *vm = bo_va->vm;
struct amdgpu_bo_va_mapping *mapping;
+ struct amdgpu_gart *gtt = NULL;
uint32_t flags;
uint64_t addr;
int r;
if (mem) {
addr = (u64)mem->start << PAGE_SHIFT;
- if (mem->mem_type != TTM_PL_TT)
+ switch (mem->mem_type) {
+ case TTM_PL_TT:
+ gtt = &bo_va->bo->adev->gart;
+ break;
+
+ case TTM_PL_VRAM:
addr += adev->vm_manager.vram_base_offset;
+ break;
+
+ default:
+ break;
+ }
} else {
addr = 0;
}
@@ -871,8 +930,8 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
spin_unlock(&vm->status_lock);
list_for_each_entry(mapping, &bo_va->invalids, list) {
- r = amdgpu_vm_bo_update_mapping(adev, vm, mapping, addr,
- flags, &bo_va->last_pt_update);
+ r = amdgpu_vm_bo_split_mapping(adev, gtt, flags, vm, mapping, addr,
+ &bo_va->last_pt_update);
if (r)
return r;
}
@@ -912,21 +971,18 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
struct amdgpu_bo_va_mapping *mapping;
int r;
- spin_lock(&vm->freed_lock);
while (!list_empty(&vm->freed)) {
mapping = list_first_entry(&vm->freed,
struct amdgpu_bo_va_mapping, list);
list_del(&mapping->list);
- spin_unlock(&vm->freed_lock);
- r = amdgpu_vm_bo_update_mapping(adev, vm, mapping, 0, 0, NULL);
+
+ r = amdgpu_vm_bo_split_mapping(adev, NULL, 0, vm, mapping,
+ 0, NULL);
kfree(mapping);
if (r)
return r;
- spin_lock(&vm->freed_lock);
}
- spin_unlock(&vm->freed_lock);
-
return 0;
}
@@ -953,9 +1009,8 @@ int amdgpu_vm_clear_invalids(struct amdgpu_device *adev,
bo_va = list_first_entry(&vm->invalidated,
struct amdgpu_bo_va, vm_status);
spin_unlock(&vm->status_lock);
- mutex_lock(&bo_va->mutex);
+
r = amdgpu_vm_bo_update(adev, bo_va, NULL);
- mutex_unlock(&bo_va->mutex);
if (r)
return r;
@@ -976,7 +1031,7 @@ int amdgpu_vm_clear_invalids(struct amdgpu_device *adev,
* @vm: requested vm
* @bo: amdgpu buffer object
*
- * Add @bo into the requested vm (cayman+).
+ * Add @bo into the requested vm.
* Add @bo to the list of bos associated with the vm
* Returns newly added bo_va or NULL for failure
*
@@ -999,7 +1054,7 @@ struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
INIT_LIST_HEAD(&bo_va->valids);
INIT_LIST_HEAD(&bo_va->invalids);
INIT_LIST_HEAD(&bo_va->vm_status);
- mutex_init(&bo_va->mutex);
+
list_add_tail(&bo_va->bo_list, &bo->va);
return bo_va;
@@ -1051,9 +1106,7 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
saddr /= AMDGPU_GPU_PAGE_SIZE;
eaddr /= AMDGPU_GPU_PAGE_SIZE;
- spin_lock(&vm->it_lock);
it = interval_tree_iter_first(&vm->va, saddr, eaddr);
- spin_unlock(&vm->it_lock);
if (it) {
struct amdgpu_bo_va_mapping *tmp;
tmp = container_of(it, struct amdgpu_bo_va_mapping, it);
@@ -1077,13 +1130,8 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
mapping->offset = offset;
mapping->flags = flags;
- mutex_lock(&bo_va->mutex);
list_add(&mapping->list, &bo_va->invalids);
- mutex_unlock(&bo_va->mutex);
- spin_lock(&vm->it_lock);
interval_tree_insert(&mapping->it, &vm->va);
- spin_unlock(&vm->it_lock);
- trace_amdgpu_vm_bo_map(bo_va, mapping);
/* Make sure the page tables are allocated */
saddr >>= amdgpu_vm_block_size;
@@ -1117,18 +1165,17 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
*/
pt->parent = amdgpu_bo_ref(vm->page_directory);
- r = amdgpu_vm_clear_bo(adev, pt);
+ r = amdgpu_vm_clear_bo(adev, vm, pt);
if (r) {
amdgpu_bo_unref(&pt);
goto error_free;
}
entry->robj = pt;
- entry->prefered_domains = AMDGPU_GEM_DOMAIN_VRAM;
- entry->allowed_domains = AMDGPU_GEM_DOMAIN_VRAM;
entry->priority = 0;
entry->tv.bo = &entry->robj->tbo;
entry->tv.shared = true;
+ entry->user_pages = NULL;
vm->page_tables[pt_idx].addr = 0;
}
@@ -1136,9 +1183,7 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
error_free:
list_del(&mapping->list);
- spin_lock(&vm->it_lock);
interval_tree_remove(&mapping->it, &vm->va);
- spin_unlock(&vm->it_lock);
trace_amdgpu_vm_bo_unmap(bo_va, mapping);
kfree(mapping);
@@ -1167,7 +1212,7 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
bool valid = true;
saddr /= AMDGPU_GPU_PAGE_SIZE;
- mutex_lock(&bo_va->mutex);
+
list_for_each_entry(mapping, &bo_va->valids, list) {
if (mapping->it.start == saddr)
break;
@@ -1181,25 +1226,18 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
break;
}
- if (&mapping->list == &bo_va->invalids) {
- mutex_unlock(&bo_va->mutex);
+ if (&mapping->list == &bo_va->invalids)
return -ENOENT;
- }
}
- mutex_unlock(&bo_va->mutex);
+
list_del(&mapping->list);
- spin_lock(&vm->it_lock);
interval_tree_remove(&mapping->it, &vm->va);
- spin_unlock(&vm->it_lock);
trace_amdgpu_vm_bo_unmap(bo_va, mapping);
- if (valid) {
- spin_lock(&vm->freed_lock);
+ if (valid)
list_add(&mapping->list, &vm->freed);
- spin_unlock(&vm->freed_lock);
- } else {
+ else
kfree(mapping);
- }
return 0;
}
@@ -1210,7 +1248,7 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
* @adev: amdgpu_device pointer
* @bo_va: requested bo_va
*
- * Remove @bo_va->bo from the requested vm (cayman+).
+ * Remove @bo_va->bo from the requested vm.
*
* Object have to be reserved!
*/
@@ -1228,23 +1266,17 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
list_for_each_entry_safe(mapping, next, &bo_va->valids, list) {
list_del(&mapping->list);
- spin_lock(&vm->it_lock);
interval_tree_remove(&mapping->it, &vm->va);
- spin_unlock(&vm->it_lock);
trace_amdgpu_vm_bo_unmap(bo_va, mapping);
- spin_lock(&vm->freed_lock);
list_add(&mapping->list, &vm->freed);
- spin_unlock(&vm->freed_lock);
}
list_for_each_entry_safe(mapping, next, &bo_va->invalids, list) {
list_del(&mapping->list);
- spin_lock(&vm->it_lock);
interval_tree_remove(&mapping->it, &vm->va);
- spin_unlock(&vm->it_lock);
kfree(mapping);
}
+
fence_put(bo_va->last_pt_update);
- mutex_destroy(&bo_va->mutex);
kfree(bo_va);
}
@@ -1255,7 +1287,7 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
* @vm: requested vm
* @bo: amdgpu buffer object
*
- * Mark @bo as invalid (cayman+).
+ * Mark @bo as invalid.
*/
void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
struct amdgpu_bo *bo)
@@ -1276,17 +1308,20 @@ void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
* @adev: amdgpu_device pointer
* @vm: requested vm
*
- * Init @vm fields (cayman+).
+ * Init @vm fields.
*/
int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
{
const unsigned align = min(AMDGPU_VM_PTB_ALIGN_SIZE,
AMDGPU_VM_PTE_COUNT * 8);
unsigned pd_size, pd_entries;
+ unsigned ring_instance;
+ struct amdgpu_ring *ring;
+ struct amd_sched_rq *rq;
int i, r;
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
- vm->ids[i].id = 0;
+ vm->ids[i].mgr_id = NULL;
vm->ids[i].flushed_updates = NULL;
}
vm->va = RB_ROOT;
@@ -1294,8 +1329,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
INIT_LIST_HEAD(&vm->invalidated);
INIT_LIST_HEAD(&vm->cleared);
INIT_LIST_HEAD(&vm->freed);
- spin_lock_init(&vm->it_lock);
- spin_lock_init(&vm->freed_lock);
+
pd_size = amdgpu_vm_directory_size(adev);
pd_entries = amdgpu_vm_num_pdes(adev);
@@ -1306,6 +1340,17 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
return -ENOMEM;
}
+ /* create scheduler entity for page table updates */
+
+ ring_instance = atomic_inc_return(&adev->vm_manager.vm_pte_next_ring);
+ ring_instance %= adev->vm_manager.vm_pte_num_rings;
+ ring = adev->vm_manager.vm_pte_rings[ring_instance];
+ rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_KERNEL];
+ r = amd_sched_entity_init(&ring->sched, &vm->entity,
+ rq, amdgpu_sched_jobs);
+ if (r)
+ return r;
+
vm->page_directory_fence = NULL;
r = amdgpu_bo_create(adev, pd_size, align, true,
@@ -1313,22 +1358,27 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
AMDGPU_GEM_CREATE_NO_CPU_ACCESS,
NULL, NULL, &vm->page_directory);
if (r)
- return r;
+ goto error_free_sched_entity;
+
r = amdgpu_bo_reserve(vm->page_directory, false);
- if (r) {
- amdgpu_bo_unref(&vm->page_directory);
- vm->page_directory = NULL;
- return r;
- }
- r = amdgpu_vm_clear_bo(adev, vm->page_directory);
+ if (r)
+ goto error_free_page_directory;
+
+ r = amdgpu_vm_clear_bo(adev, vm, vm->page_directory);
amdgpu_bo_unreserve(vm->page_directory);
- if (r) {
- amdgpu_bo_unref(&vm->page_directory);
- vm->page_directory = NULL;
- return r;
- }
+ if (r)
+ goto error_free_page_directory;
return 0;
+
+error_free_page_directory:
+ amdgpu_bo_unref(&vm->page_directory);
+ vm->page_directory = NULL;
+
+error_free_sched_entity:
+ amd_sched_entity_fini(&ring->sched, &vm->entity);
+
+ return r;
}
/**
@@ -1337,7 +1387,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
* @adev: amdgpu_device pointer
* @vm: requested vm
*
- * Tear down @vm (cayman+).
+ * Tear down @vm.
* Unbind the VM and remove all bos from the vm bo list
*/
void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
@@ -1345,6 +1395,8 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
struct amdgpu_bo_va_mapping *mapping, *tmp;
int i;
+ amd_sched_entity_fini(vm->entity.sched, &vm->entity);
+
if (!RB_EMPTY_ROOT(&vm->va)) {
dev_err(adev->dev, "still active bo inside vm\n");
}
@@ -1364,14 +1416,38 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
amdgpu_bo_unref(&vm->page_directory);
fence_put(vm->page_directory_fence);
+
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
- unsigned id = vm->ids[i].id;
+ struct amdgpu_vm_id *id = &vm->ids[i];
- atomic_long_cmpxchg(&adev->vm_manager.ids[id].owner,
- (long)vm, 0);
- fence_put(vm->ids[i].flushed_updates);
+ if (id->mgr_id)
+ atomic_long_cmpxchg(&id->mgr_id->owner,
+ (long)id, 0);
+ fence_put(id->flushed_updates);
+ }
+}
+
+/**
+ * amdgpu_vm_manager_init - init the VM manager
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Initialize the VM manager structures
+ */
+void amdgpu_vm_manager_init(struct amdgpu_device *adev)
+{
+ unsigned i;
+
+ INIT_LIST_HEAD(&adev->vm_manager.ids_lru);
+
+ /* skip over VMID 0, since it is the system VM */
+ for (i = 1; i < adev->vm_manager.num_ids; ++i) {
+ amdgpu_vm_reset_id(adev, i);
+ list_add_tail(&adev->vm_manager.ids[i].list,
+ &adev->vm_manager.ids_lru);
}
+ atomic_set(&adev->vm_manager.vm_pte_next_ring, 0);
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_dp.c b/drivers/gpu/drm/amd/amdgpu/atombios_dp.c
index 21aacc1f45c1..7f85c2c1d681 100644
--- a/drivers/gpu/drm/amd/amdgpu/atombios_dp.c
+++ b/drivers/gpu/drm/amd/amdgpu/atombios_dp.c
@@ -265,15 +265,27 @@ static int amdgpu_atombios_dp_get_dp_link_config(struct drm_connector *connector
unsigned max_lane_num = drm_dp_max_lane_count(dpcd);
unsigned lane_num, i, max_pix_clock;
- for (lane_num = 1; lane_num <= max_lane_num; lane_num <<= 1) {
- for (i = 0; i < ARRAY_SIZE(link_rates) && link_rates[i] <= max_link_rate; i++) {
- max_pix_clock = (lane_num * link_rates[i] * 8) / bpp;
+ if (amdgpu_connector_encoder_get_dp_bridge_encoder_id(connector) ==
+ ENCODER_OBJECT_ID_NUTMEG) {
+ for (lane_num = 1; lane_num <= max_lane_num; lane_num <<= 1) {
+ max_pix_clock = (lane_num * 270000 * 8) / bpp;
if (max_pix_clock >= pix_clock) {
*dp_lanes = lane_num;
- *dp_rate = link_rates[i];
+ *dp_rate = 270000;
return 0;
}
}
+ } else {
+ for (i = 0; i < ARRAY_SIZE(link_rates) && link_rates[i] <= max_link_rate; i++) {
+ for (lane_num = 1; lane_num <= max_lane_num; lane_num <<= 1) {
+ max_pix_clock = (lane_num * link_rates[i] * 8) / bpp;
+ if (max_pix_clock >= pix_clock) {
+ *dp_lanes = lane_num;
+ *dp_rate = link_rates[i];
+ return 0;
+ }
+ }
+ }
}
return -EINVAL;
diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
index 1e0bba29e167..1cd6de575305 100644
--- a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
+++ b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
@@ -298,6 +298,10 @@ bool amdgpu_atombios_encoder_mode_fixup(struct drm_encoder *encoder,
&& (mode->crtc_vsync_start < (mode->crtc_vdisplay + 2)))
adjusted_mode->crtc_vsync_start = adjusted_mode->crtc_vdisplay + 2;
+ /* vertical FP must be at least 1 */
+ if (mode->crtc_vsync_start == mode->crtc_vdisplay)
+ adjusted_mode->crtc_vsync_start++;
+
/* get the native mode for scaling */
if (amdgpu_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT))
amdgpu_panel_mode_fixup(encoder, adjusted_mode);
diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
index 474ca02b0949..1f9109d3348b 100644
--- a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
@@ -3017,7 +3017,6 @@ static int ci_populate_single_memory_level(struct amdgpu_device *adev,
&memory_level->MinVddcPhases);
memory_level->EnabledForThrottle = 1;
- memory_level->EnabledForActivity = 1;
memory_level->UpH = 0;
memory_level->DownH = 100;
memory_level->VoltageDownH = 0;
@@ -3376,7 +3375,6 @@ static int ci_populate_single_graphic_level(struct amdgpu_device *adev,
graphic_level->SpllSpreadSpectrum2 = cpu_to_be32(graphic_level->SpllSpreadSpectrum2);
graphic_level->CcPwrDynRm = cpu_to_be32(graphic_level->CcPwrDynRm);
graphic_level->CcPwrDynRm1 = cpu_to_be32(graphic_level->CcPwrDynRm1);
- graphic_level->EnabledForActivity = 1;
return 0;
}
@@ -3407,6 +3405,7 @@ static int ci_populate_all_graphic_levels(struct amdgpu_device *adev)
pi->smc_state_table.GraphicsLevel[i].DisplayWatermark =
PPSMC_DISPLAY_WATERMARK_HIGH;
}
+ pi->smc_state_table.GraphicsLevel[0].EnabledForActivity = 1;
pi->smc_state_table.GraphicsDpmLevelCount = (u8)dpm_table->sclk_table.count;
pi->dpm_level_enable_mask.sclk_dpm_enable_mask =
@@ -3450,6 +3449,8 @@ static int ci_populate_all_memory_levels(struct amdgpu_device *adev)
return ret;
}
+ pi->smc_state_table.MemoryLevel[0].EnabledForActivity = 1;
+
if ((dpm_table->mclk_table.count >= 2) &&
((adev->pdev->device == 0x67B0) || (adev->pdev->device == 0x67B1))) {
pi->smc_state_table.MemoryLevel[1].MinVddc =
@@ -4381,26 +4382,6 @@ static int ci_dpm_force_performance_level(struct amdgpu_device *adev,
}
}
}
- if ((!pi->pcie_dpm_key_disabled) &&
- pi->dpm_level_enable_mask.pcie_dpm_enable_mask) {
- levels = 0;
- tmp = pi->dpm_level_enable_mask.pcie_dpm_enable_mask;
- while (tmp >>= 1)
- levels++;
- if (levels) {
- ret = ci_dpm_force_state_pcie(adev, level);
- if (ret)
- return ret;
- for (i = 0; i < adev->usec_timeout; i++) {
- tmp = (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX_1) &
- TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_PCIE_INDEX_MASK) >>
- TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_PCIE_INDEX__SHIFT;
- if (tmp == levels)
- break;
- udelay(1);
- }
- }
- }
} else if (level == AMDGPU_DPM_FORCED_LEVEL_LOW) {
if ((!pi->sclk_dpm_key_disabled) &&
pi->dpm_level_enable_mask.sclk_dpm_enable_mask) {
@@ -5395,30 +5376,6 @@ static int ci_dpm_enable(struct amdgpu_device *adev)
ci_update_current_ps(adev, boot_ps);
- if (adev->irq.installed &&
- amdgpu_is_internal_thermal_sensor(adev->pm.int_thermal_type)) {
-#if 0
- PPSMC_Result result;
-#endif
- ret = ci_thermal_set_temperature_range(adev, CISLANDS_TEMP_RANGE_MIN,
- CISLANDS_TEMP_RANGE_MAX);
- if (ret) {
- DRM_ERROR("ci_thermal_set_temperature_range failed\n");
- return ret;
- }
- amdgpu_irq_get(adev, &adev->pm.dpm.thermal.irq,
- AMDGPU_THERMAL_IRQ_LOW_TO_HIGH);
- amdgpu_irq_get(adev, &adev->pm.dpm.thermal.irq,
- AMDGPU_THERMAL_IRQ_HIGH_TO_LOW);
-
-#if 0
- result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_EnableThermalInterrupt);
-
- if (result != PPSMC_Result_OK)
- DRM_DEBUG_KMS("Could not enable thermal interrupts.\n");
-#endif
- }
-
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c
index 155965ed14a3..bddc9ba11495 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik.c
@@ -1059,257 +1059,6 @@ static int cik_read_register(struct amdgpu_device *adev, u32 se_num,
return -EINVAL;
}
-static void cik_print_gpu_status_regs(struct amdgpu_device *adev)
-{
- dev_info(adev->dev, " GRBM_STATUS=0x%08X\n",
- RREG32(mmGRBM_STATUS));
- dev_info(adev->dev, " GRBM_STATUS2=0x%08X\n",
- RREG32(mmGRBM_STATUS2));
- dev_info(adev->dev, " GRBM_STATUS_SE0=0x%08X\n",
- RREG32(mmGRBM_STATUS_SE0));
- dev_info(adev->dev, " GRBM_STATUS_SE1=0x%08X\n",
- RREG32(mmGRBM_STATUS_SE1));
- dev_info(adev->dev, " GRBM_STATUS_SE2=0x%08X\n",
- RREG32(mmGRBM_STATUS_SE2));
- dev_info(adev->dev, " GRBM_STATUS_SE3=0x%08X\n",
- RREG32(mmGRBM_STATUS_SE3));
- dev_info(adev->dev, " SRBM_STATUS=0x%08X\n",
- RREG32(mmSRBM_STATUS));
- dev_info(adev->dev, " SRBM_STATUS2=0x%08X\n",
- RREG32(mmSRBM_STATUS2));
- dev_info(adev->dev, " SDMA0_STATUS_REG = 0x%08X\n",
- RREG32(mmSDMA0_STATUS_REG + SDMA0_REGISTER_OFFSET));
- dev_info(adev->dev, " SDMA1_STATUS_REG = 0x%08X\n",
- RREG32(mmSDMA0_STATUS_REG + SDMA1_REGISTER_OFFSET));
- dev_info(adev->dev, " CP_STAT = 0x%08x\n", RREG32(mmCP_STAT));
- dev_info(adev->dev, " CP_STALLED_STAT1 = 0x%08x\n",
- RREG32(mmCP_STALLED_STAT1));
- dev_info(adev->dev, " CP_STALLED_STAT2 = 0x%08x\n",
- RREG32(mmCP_STALLED_STAT2));
- dev_info(adev->dev, " CP_STALLED_STAT3 = 0x%08x\n",
- RREG32(mmCP_STALLED_STAT3));
- dev_info(adev->dev, " CP_CPF_BUSY_STAT = 0x%08x\n",
- RREG32(mmCP_CPF_BUSY_STAT));
- dev_info(adev->dev, " CP_CPF_STALLED_STAT1 = 0x%08x\n",
- RREG32(mmCP_CPF_STALLED_STAT1));
- dev_info(adev->dev, " CP_CPF_STATUS = 0x%08x\n", RREG32(mmCP_CPF_STATUS));
- dev_info(adev->dev, " CP_CPC_BUSY_STAT = 0x%08x\n", RREG32(mmCP_CPC_BUSY_STAT));
- dev_info(adev->dev, " CP_CPC_STALLED_STAT1 = 0x%08x\n",
- RREG32(mmCP_CPC_STALLED_STAT1));
- dev_info(adev->dev, " CP_CPC_STATUS = 0x%08x\n", RREG32(mmCP_CPC_STATUS));
-}
-
-/**
- * cik_gpu_check_soft_reset - check which blocks are busy
- *
- * @adev: amdgpu_device pointer
- *
- * Check which blocks are busy and return the relevant reset
- * mask to be used by cik_gpu_soft_reset().
- * Returns a mask of the blocks to be reset.
- */
-u32 amdgpu_cik_gpu_check_soft_reset(struct amdgpu_device *adev)
-{
- u32 reset_mask = 0;
- u32 tmp;
-
- /* GRBM_STATUS */
- tmp = RREG32(mmGRBM_STATUS);
- if (tmp & (GRBM_STATUS__PA_BUSY_MASK | GRBM_STATUS__SC_BUSY_MASK |
- GRBM_STATUS__BCI_BUSY_MASK | GRBM_STATUS__SX_BUSY_MASK |
- GRBM_STATUS__TA_BUSY_MASK | GRBM_STATUS__VGT_BUSY_MASK |
- GRBM_STATUS__DB_BUSY_MASK | GRBM_STATUS__CB_BUSY_MASK |
- GRBM_STATUS__GDS_BUSY_MASK | GRBM_STATUS__SPI_BUSY_MASK |
- GRBM_STATUS__IA_BUSY_MASK | GRBM_STATUS__IA_BUSY_NO_DMA_MASK))
- reset_mask |= AMDGPU_RESET_GFX;
-
- if (tmp & (GRBM_STATUS__CP_BUSY_MASK | GRBM_STATUS__CP_COHERENCY_BUSY_MASK))
- reset_mask |= AMDGPU_RESET_CP;
-
- /* GRBM_STATUS2 */
- tmp = RREG32(mmGRBM_STATUS2);
- if (tmp & GRBM_STATUS2__RLC_BUSY_MASK)
- reset_mask |= AMDGPU_RESET_RLC;
-
- /* SDMA0_STATUS_REG */
- tmp = RREG32(mmSDMA0_STATUS_REG + SDMA0_REGISTER_OFFSET);
- if (!(tmp & SDMA0_STATUS_REG__IDLE_MASK))
- reset_mask |= AMDGPU_RESET_DMA;
-
- /* SDMA1_STATUS_REG */
- tmp = RREG32(mmSDMA0_STATUS_REG + SDMA1_REGISTER_OFFSET);
- if (!(tmp & SDMA0_STATUS_REG__IDLE_MASK))
- reset_mask |= AMDGPU_RESET_DMA1;
-
- /* SRBM_STATUS2 */
- tmp = RREG32(mmSRBM_STATUS2);
- if (tmp & SRBM_STATUS2__SDMA_BUSY_MASK)
- reset_mask |= AMDGPU_RESET_DMA;
-
- if (tmp & SRBM_STATUS2__SDMA1_BUSY_MASK)
- reset_mask |= AMDGPU_RESET_DMA1;
-
- /* SRBM_STATUS */
- tmp = RREG32(mmSRBM_STATUS);
-
- if (tmp & SRBM_STATUS__IH_BUSY_MASK)
- reset_mask |= AMDGPU_RESET_IH;
-
- if (tmp & SRBM_STATUS__SEM_BUSY_MASK)
- reset_mask |= AMDGPU_RESET_SEM;
-
- if (tmp & SRBM_STATUS__GRBM_RQ_PENDING_MASK)
- reset_mask |= AMDGPU_RESET_GRBM;
-
- if (tmp & SRBM_STATUS__VMC_BUSY_MASK)
- reset_mask |= AMDGPU_RESET_VMC;
-
- if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
- SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK))
- reset_mask |= AMDGPU_RESET_MC;
-
- if (amdgpu_display_is_display_hung(adev))
- reset_mask |= AMDGPU_RESET_DISPLAY;
-
- /* Skip MC reset as it's mostly likely not hung, just busy */
- if (reset_mask & AMDGPU_RESET_MC) {
- DRM_DEBUG("MC busy: 0x%08X, clearing.\n", reset_mask);
- reset_mask &= ~AMDGPU_RESET_MC;
- }
-
- return reset_mask;
-}
-
-/**
- * cik_gpu_soft_reset - soft reset GPU
- *
- * @adev: amdgpu_device pointer
- * @reset_mask: mask of which blocks to reset
- *
- * Soft reset the blocks specified in @reset_mask.
- */
-static void cik_gpu_soft_reset(struct amdgpu_device *adev, u32 reset_mask)
-{
- struct amdgpu_mode_mc_save save;
- u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
- u32 tmp;
-
- if (reset_mask == 0)
- return;
-
- dev_info(adev->dev, "GPU softreset: 0x%08X\n", reset_mask);
-
- cik_print_gpu_status_regs(adev);
- dev_info(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
- RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR));
- dev_info(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
- RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS));
-
- /* disable CG/PG */
-
- /* stop the rlc */
- gfx_v7_0_rlc_stop(adev);
-
- /* Disable GFX parsing/prefetching */
- WREG32(mmCP_ME_CNTL, CP_ME_CNTL__ME_HALT_MASK | CP_ME_CNTL__PFP_HALT_MASK | CP_ME_CNTL__CE_HALT_MASK);
-
- /* Disable MEC parsing/prefetching */
- WREG32(mmCP_MEC_CNTL, CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK);
-
- if (reset_mask & AMDGPU_RESET_DMA) {
- /* sdma0 */
- tmp = RREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET);
- tmp |= SDMA0_F32_CNTL__HALT_MASK;
- WREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET, tmp);
- }
- if (reset_mask & AMDGPU_RESET_DMA1) {
- /* sdma1 */
- tmp = RREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET);
- tmp |= SDMA0_F32_CNTL__HALT_MASK;
- WREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET, tmp);
- }
-
- gmc_v7_0_mc_stop(adev, &save);
- if (amdgpu_asic_wait_for_mc_idle(adev)) {
- dev_warn(adev->dev, "Wait for MC idle timedout !\n");
- }
-
- if (reset_mask & (AMDGPU_RESET_GFX | AMDGPU_RESET_COMPUTE | AMDGPU_RESET_CP))
- grbm_soft_reset = GRBM_SOFT_RESET__SOFT_RESET_CP_MASK |
- GRBM_SOFT_RESET__SOFT_RESET_GFX_MASK;
-
- if (reset_mask & AMDGPU_RESET_CP) {
- grbm_soft_reset |= GRBM_SOFT_RESET__SOFT_RESET_CP_MASK;
-
- srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_GRBM_MASK;
- }
-
- if (reset_mask & AMDGPU_RESET_DMA)
- srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA_MASK;
-
- if (reset_mask & AMDGPU_RESET_DMA1)
- srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA1_MASK;
-
- if (reset_mask & AMDGPU_RESET_DISPLAY)
- srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_DC_MASK;
-
- if (reset_mask & AMDGPU_RESET_RLC)
- grbm_soft_reset |= GRBM_SOFT_RESET__SOFT_RESET_RLC_MASK;
-
- if (reset_mask & AMDGPU_RESET_SEM)
- srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SEM_MASK;
-
- if (reset_mask & AMDGPU_RESET_IH)
- srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_IH_MASK;
-
- if (reset_mask & AMDGPU_RESET_GRBM)
- srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_GRBM_MASK;
-
- if (reset_mask & AMDGPU_RESET_VMC)
- srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_VMC_MASK;
-
- if (!(adev->flags & AMD_IS_APU)) {
- if (reset_mask & AMDGPU_RESET_MC)
- srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_MC_MASK;
- }
-
- if (grbm_soft_reset) {
- tmp = RREG32(mmGRBM_SOFT_RESET);
- tmp |= grbm_soft_reset;
- dev_info(adev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
- WREG32(mmGRBM_SOFT_RESET, tmp);
- tmp = RREG32(mmGRBM_SOFT_RESET);
-
- udelay(50);
-
- tmp &= ~grbm_soft_reset;
- WREG32(mmGRBM_SOFT_RESET, tmp);
- tmp = RREG32(mmGRBM_SOFT_RESET);
- }
-
- if (srbm_soft_reset) {
- tmp = RREG32(mmSRBM_SOFT_RESET);
- tmp |= srbm_soft_reset;
- dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
- WREG32(mmSRBM_SOFT_RESET, tmp);
- tmp = RREG32(mmSRBM_SOFT_RESET);
-
- udelay(50);
-
- tmp &= ~srbm_soft_reset;
- WREG32(mmSRBM_SOFT_RESET, tmp);
- tmp = RREG32(mmSRBM_SOFT_RESET);
- }
-
- /* Wait a little for things to settle down */
- udelay(50);
-
- gmc_v7_0_mc_resume(adev, &save);
- udelay(50);
-
- cik_print_gpu_status_regs(adev);
-}
-
struct kv_reset_save_regs {
u32 gmcon_reng_execute;
u32 gmcon_misc;
@@ -1405,45 +1154,11 @@ static void kv_restore_regs_for_reset(struct amdgpu_device *adev,
static void cik_gpu_pci_config_reset(struct amdgpu_device *adev)
{
- struct amdgpu_mode_mc_save save;
struct kv_reset_save_regs kv_save = { 0 };
- u32 tmp, i;
+ u32 i;
dev_info(adev->dev, "GPU pci config reset\n");
- /* disable dpm? */
-
- /* disable cg/pg */
-
- /* Disable GFX parsing/prefetching */
- WREG32(mmCP_ME_CNTL, CP_ME_CNTL__ME_HALT_MASK |
- CP_ME_CNTL__PFP_HALT_MASK | CP_ME_CNTL__CE_HALT_MASK);
-
- /* Disable MEC parsing/prefetching */
- WREG32(mmCP_MEC_CNTL,
- CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK);
-
- /* sdma0 */
- tmp = RREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET);
- tmp |= SDMA0_F32_CNTL__HALT_MASK;
- WREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET, tmp);
- /* sdma1 */
- tmp = RREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET);
- tmp |= SDMA0_F32_CNTL__HALT_MASK;
- WREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET, tmp);
- /* XXX other engines? */
-
- /* halt the rlc, disable cp internal ints */
- gfx_v7_0_rlc_stop(adev);
-
- udelay(50);
-
- /* disable mem access */
- gmc_v7_0_mc_stop(adev, &save);
- if (amdgpu_asic_wait_for_mc_idle(adev)) {
- dev_warn(adev->dev, "Wait for MC idle timed out !\n");
- }
-
if (adev->flags & AMD_IS_APU)
kv_save_regs_for_reset(adev, &kv_save);
@@ -1489,26 +1204,11 @@ static void cik_set_bios_scratch_engine_hung(struct amdgpu_device *adev, bool hu
*/
static int cik_asic_reset(struct amdgpu_device *adev)
{
- u32 reset_mask;
-
- reset_mask = amdgpu_cik_gpu_check_soft_reset(adev);
+ cik_set_bios_scratch_engine_hung(adev, true);
- if (reset_mask)
- cik_set_bios_scratch_engine_hung(adev, true);
+ cik_gpu_pci_config_reset(adev);
- /* try soft reset */
- cik_gpu_soft_reset(adev, reset_mask);
-
- reset_mask = amdgpu_cik_gpu_check_soft_reset(adev);
-
- /* try pci config reset */
- if (reset_mask && amdgpu_hard_reset)
- cik_gpu_pci_config_reset(adev);
-
- reset_mask = amdgpu_cik_gpu_check_soft_reset(adev);
-
- if (!reset_mask)
- cik_set_bios_scratch_engine_hung(adev, false);
+ cik_set_bios_scratch_engine_hung(adev, false);
return 0;
}
@@ -2328,8 +2028,6 @@ static int cik_common_early_init(void *handle)
adev->asic_funcs = &cik_asic_funcs;
- adev->has_uvd = true;
-
adev->rev_id = cik_get_rev_id(adev);
adev->external_rev_id = 0xFF;
switch (adev->asic_type) {
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
index c55ecf0ea845..d3ac3298fba8 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
@@ -212,7 +212,7 @@ static void cik_sdma_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
static void cik_sdma_ring_emit_ib(struct amdgpu_ring *ring,
struct amdgpu_ib *ib)
{
- u32 extra_bits = (ib->vm ? ib->vm->ids[ring->idx].id : 0) & 0xf;
+ u32 extra_bits = ib->vm_id & 0xf;
u32 next_rptr = ring->wptr + 5;
while ((next_rptr & 7) != 4)
@@ -261,6 +261,13 @@ static void cik_sdma_ring_emit_hdp_flush(struct amdgpu_ring *ring)
amdgpu_ring_write(ring, (0xfff << 16) | 10); /* retry count, poll interval */
}
+static void cik_sdma_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
+{
+ amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
+ amdgpu_ring_write(ring, mmHDP_DEBUG0);
+ amdgpu_ring_write(ring, 1);
+}
+
/**
* cik_sdma_ring_emit_fence - emit a fence on the DMA ring
*
@@ -295,30 +302,6 @@ static void cik_sdma_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq
}
/**
- * cik_sdma_ring_emit_semaphore - emit a semaphore on the dma ring
- *
- * @ring: amdgpu_ring structure holding ring information
- * @semaphore: amdgpu semaphore object
- * @emit_wait: wait or signal semaphore
- *
- * Add a DMA semaphore packet to the ring wait on or signal
- * other rings (CIK).
- */
-static bool cik_sdma_ring_emit_semaphore(struct amdgpu_ring *ring,
- struct amdgpu_semaphore *semaphore,
- bool emit_wait)
-{
- u64 addr = semaphore->gpu_addr;
- u32 extra_bits = emit_wait ? 0 : SDMA_SEMAPHORE_EXTRA_S;
-
- amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SEMAPHORE, 0, extra_bits));
- amdgpu_ring_write(ring, addr & 0xfffffff8);
- amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
-
- return true;
-}
-
-/**
* cik_sdma_gfx_stop - stop the gfx async dma engines
*
* @adev: amdgpu_device pointer
@@ -417,6 +400,9 @@ static int cik_sdma_gfx_resume(struct amdgpu_device *adev)
cik_srbm_select(adev, 0, 0, 0, 0);
mutex_unlock(&adev->srbm_mutex);
+ WREG32(mmSDMA0_TILING_CONFIG + sdma_offsets[i],
+ adev->gfx.config.gb_addr_config & 0x70);
+
WREG32(mmSDMA0_SEM_INCOMPLETE_TIMER_CNTL + sdma_offsets[i], 0);
WREG32(mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL + sdma_offsets[i], 0);
@@ -584,7 +570,7 @@ static int cik_sdma_ring_test_ring(struct amdgpu_ring *ring)
tmp = 0xCAFEDEAD;
adev->wb.wb[index] = cpu_to_le32(tmp);
- r = amdgpu_ring_lock(ring, 5);
+ r = amdgpu_ring_alloc(ring, 5);
if (r) {
DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r);
amdgpu_wb_free(adev, index);
@@ -595,7 +581,7 @@ static int cik_sdma_ring_test_ring(struct amdgpu_ring *ring)
amdgpu_ring_write(ring, upper_32_bits(gpu_addr));
amdgpu_ring_write(ring, 1); /* number of DWs to follow */
amdgpu_ring_write(ring, 0xDEADBEEF);
- amdgpu_ring_unlock_commit(ring);
+ amdgpu_ring_commit(ring);
for (i = 0; i < adev->usec_timeout; i++) {
tmp = le32_to_cpu(adev->wb.wb[index]);
@@ -645,7 +631,7 @@ static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring)
tmp = 0xCAFEDEAD;
adev->wb.wb[index] = cpu_to_le32(tmp);
memset(&ib, 0, sizeof(ib));
- r = amdgpu_ib_get(ring, NULL, 256, &ib);
+ r = amdgpu_ib_get(adev, NULL, 256, &ib);
if (r) {
DRM_ERROR("amdgpu: failed to get ib (%d).\n", r);
goto err0;
@@ -657,9 +643,7 @@ static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring)
ib.ptr[3] = 1;
ib.ptr[4] = 0xDEADBEEF;
ib.length_dw = 5;
- r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, &ib, 1, NULL,
- AMDGPU_FENCE_OWNER_UNDEFINED,
- &f);
+ r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
if (r)
goto err1;
@@ -685,7 +669,8 @@ static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring)
err1:
fence_put(f);
- amdgpu_ib_free(adev, &ib);
+ amdgpu_ib_free(adev, &ib, NULL);
+ fence_put(f);
err0:
amdgpu_wb_free(adev, index);
return r;
@@ -738,7 +723,7 @@ static void cik_sdma_vm_copy_pte(struct amdgpu_ib *ib,
* Update PTEs by writing them manually using sDMA (CIK).
*/
static void cik_sdma_vm_write_pte(struct amdgpu_ib *ib,
- uint64_t pe,
+ const dma_addr_t *pages_addr, uint64_t pe,
uint64_t addr, unsigned count,
uint32_t incr, uint32_t flags)
{
@@ -757,14 +742,7 @@ static void cik_sdma_vm_write_pte(struct amdgpu_ib *ib,
ib->ptr[ib->length_dw++] = upper_32_bits(pe);
ib->ptr[ib->length_dw++] = ndw;
for (; ndw > 0; ndw -= 2, --count, pe += 8) {
- if (flags & AMDGPU_PTE_SYSTEM) {
- value = amdgpu_vm_map_gart(ib->ring->adev, addr);
- value &= 0xFFFFFFFFFFFFF000ULL;
- } else if (flags & AMDGPU_PTE_VALID) {
- value = addr;
- } else {
- value = 0;
- }
+ value = amdgpu_vm_map_gart(pages_addr, addr);
addr += incr;
value |= flags;
ib->ptr[ib->length_dw++] = value;
@@ -827,9 +805,9 @@ static void cik_sdma_vm_set_pte_pde(struct amdgpu_ib *ib,
* @ib: indirect buffer to fill with padding
*
*/
-static void cik_sdma_vm_pad_ib(struct amdgpu_ib *ib)
+static void cik_sdma_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
{
- struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ib->ring);
+ struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ring);
u32 pad_count;
int i;
@@ -845,6 +823,30 @@ static void cik_sdma_vm_pad_ib(struct amdgpu_ib *ib)
}
/**
+ * cik_sdma_ring_emit_pipeline_sync - sync the pipeline
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Make sure all previous operations are completed (CIK).
+ */
+static void cik_sdma_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
+{
+ uint32_t seq = ring->fence_drv.sync_seq;
+ uint64_t addr = ring->fence_drv.gpu_addr;
+
+ /* wait for idle */
+ amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_POLL_REG_MEM, 0,
+ SDMA_POLL_REG_MEM_EXTRA_OP(0) |
+ SDMA_POLL_REG_MEM_EXTRA_FUNC(3) | /* equal */
+ SDMA_POLL_REG_MEM_EXTRA_M));
+ amdgpu_ring_write(ring, addr & 0xfffffffc);
+ amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
+ amdgpu_ring_write(ring, seq); /* reference */
+ amdgpu_ring_write(ring, 0xfffffff); /* mask */
+ amdgpu_ring_write(ring, (0xfff << 16) | 4); /* retry count, poll interval */
+}
+
+/**
* cik_sdma_ring_emit_vm_flush - cik vm flush using sDMA
*
* @ring: amdgpu_ring pointer
@@ -1097,6 +1099,8 @@ static void cik_sdma_print_status(void *handle)
i, RREG32(mmSDMA0_GFX_RB_BASE + sdma_offsets[i]));
dev_info(adev->dev, " SDMA%d_GFX_RB_BASE_HI=0x%08X\n",
i, RREG32(mmSDMA0_GFX_RB_BASE_HI + sdma_offsets[i]));
+ dev_info(adev->dev, " SDMA%d_TILING_CONFIG=0x%08X\n",
+ i, RREG32(mmSDMA0_TILING_CONFIG + sdma_offsets[i]));
mutex_lock(&adev->srbm_mutex);
for (j = 0; j < 16; j++) {
cik_srbm_select(adev, 0, 0, 0, j);
@@ -1297,12 +1301,14 @@ static const struct amdgpu_ring_funcs cik_sdma_ring_funcs = {
.parse_cs = NULL,
.emit_ib = cik_sdma_ring_emit_ib,
.emit_fence = cik_sdma_ring_emit_fence,
- .emit_semaphore = cik_sdma_ring_emit_semaphore,
+ .emit_pipeline_sync = cik_sdma_ring_emit_pipeline_sync,
.emit_vm_flush = cik_sdma_ring_emit_vm_flush,
.emit_hdp_flush = cik_sdma_ring_emit_hdp_flush,
+ .emit_hdp_invalidate = cik_sdma_ring_emit_hdp_invalidate,
.test_ring = cik_sdma_ring_test_ring,
.test_ib = cik_sdma_ring_test_ib,
.insert_nop = cik_sdma_ring_insert_nop,
+ .pad_ib = cik_sdma_ring_pad_ib,
};
static void cik_sdma_set_ring_funcs(struct amdgpu_device *adev)
@@ -1399,14 +1405,18 @@ static const struct amdgpu_vm_pte_funcs cik_sdma_vm_pte_funcs = {
.copy_pte = cik_sdma_vm_copy_pte,
.write_pte = cik_sdma_vm_write_pte,
.set_pte_pde = cik_sdma_vm_set_pte_pde,
- .pad_ib = cik_sdma_vm_pad_ib,
};
static void cik_sdma_set_vm_pte_funcs(struct amdgpu_device *adev)
{
+ unsigned i;
+
if (adev->vm_manager.vm_pte_funcs == NULL) {
adev->vm_manager.vm_pte_funcs = &cik_sdma_vm_pte_funcs;
- adev->vm_manager.vm_pte_funcs_ring = &adev->sdma.instance[0].ring;
- adev->vm_manager.vm_pte_funcs_ring->is_pte_ring = true;
+ for (i = 0; i < adev->sdma.num_instances; i++)
+ adev->vm_manager.vm_pte_rings[i] =
+ &adev->sdma.instance[i].ring;
+
+ adev->vm_manager.vm_pte_num_rings = adev->sdma.num_instances;
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/cikd.h b/drivers/gpu/drm/amd/amdgpu/cikd.h
index 7f6d457f250a..60d4493206dd 100644
--- a/drivers/gpu/drm/amd/amdgpu/cikd.h
+++ b/drivers/gpu/drm/amd/amdgpu/cikd.h
@@ -46,9 +46,6 @@
#define BONAIRE_GB_ADDR_CONFIG_GOLDEN 0x12010001
#define HAWAII_GB_ADDR_CONFIG_GOLDEN 0x12011003
-#define CIK_RB_BITMAP_WIDTH_PER_SH 2
-#define HAWAII_RB_BITMAP_WIDTH_PER_SH 4
-
#define AMDGPU_NUM_OF_VMIDS 8
#define PIPEID(x) ((x) << 0)
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
index 093599aba64b..6de2ce535e37 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
@@ -1668,6 +1668,9 @@ static void dce_v10_0_audio_fini(struct amdgpu_device *adev)
{
int i;
+ if (!amdgpu_audio)
+ return;
+
if (!adev->mode_info.audio.enabled)
return;
@@ -1973,7 +1976,7 @@ static void dce_v10_0_afmt_enable(struct drm_encoder *encoder, bool enable)
enable ? "En" : "Dis", dig->afmt->offset, amdgpu_encoder->encoder_id);
}
-static void dce_v10_0_afmt_init(struct amdgpu_device *adev)
+static int dce_v10_0_afmt_init(struct amdgpu_device *adev)
{
int i;
@@ -1986,8 +1989,16 @@ static void dce_v10_0_afmt_init(struct amdgpu_device *adev)
if (adev->mode_info.afmt[i]) {
adev->mode_info.afmt[i]->offset = dig_offsets[i];
adev->mode_info.afmt[i]->id = i;
+ } else {
+ int j;
+ for (j = 0; j < i; j++) {
+ kfree(adev->mode_info.afmt[j]);
+ adev->mode_info.afmt[j] = NULL;
+ }
+ return -ENOMEM;
}
}
+ return 0;
}
static void dce_v10_0_afmt_fini(struct amdgpu_device *adev)
@@ -2064,8 +2075,7 @@ static int dce_v10_0_crtc_do_set_base(struct drm_crtc *crtc,
if (atomic) {
amdgpu_fb = to_amdgpu_framebuffer(fb);
target_fb = fb;
- }
- else {
+ } else {
amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
target_fb = crtc->primary->fb;
}
@@ -2079,9 +2089,9 @@ static int dce_v10_0_crtc_do_set_base(struct drm_crtc *crtc,
if (unlikely(r != 0))
return r;
- if (atomic)
+ if (atomic) {
fb_location = amdgpu_bo_gpu_offset(rbo);
- else {
+ } else {
r = amdgpu_bo_pin(rbo, AMDGPU_GEM_DOMAIN_VRAM, &fb_location);
if (unlikely(r != 0)) {
amdgpu_bo_unreserve(rbo);
@@ -2670,7 +2680,6 @@ static void dce_v10_0_crtc_destroy(struct drm_crtc *crtc)
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
drm_crtc_cleanup(crtc);
- destroy_workqueue(amdgpu_crtc->pflip_queue);
kfree(amdgpu_crtc);
}
@@ -2701,13 +2710,13 @@ static void dce_v10_0_crtc_dpms(struct drm_crtc *crtc, int mode)
type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id);
amdgpu_irq_update(adev, &adev->crtc_irq, type);
amdgpu_irq_update(adev, &adev->pageflip_irq, type);
- drm_vblank_post_modeset(dev, amdgpu_crtc->crtc_id);
+ drm_vblank_on(dev, amdgpu_crtc->crtc_id);
dce_v10_0_crtc_load_lut(crtc);
break;
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
case DRM_MODE_DPMS_OFF:
- drm_vblank_pre_modeset(dev, amdgpu_crtc->crtc_id);
+ drm_vblank_off(dev, amdgpu_crtc->crtc_id);
if (amdgpu_crtc->enabled) {
dce_v10_0_vga_enable(crtc, true);
amdgpu_atombios_crtc_blank(crtc, ATOM_ENABLE);
@@ -2890,7 +2899,6 @@ static int dce_v10_0_crtc_init(struct amdgpu_device *adev, int index)
drm_mode_crtc_set_gamma_size(&amdgpu_crtc->base, 256);
amdgpu_crtc->crtc_id = index;
- amdgpu_crtc->pflip_queue = create_singlethread_workqueue("amdgpu-pageflip-queue");
adev->mode_info.crtcs[index] = amdgpu_crtc;
amdgpu_crtc->max_cursor_width = 128;
@@ -2982,8 +2990,6 @@ static int dce_v10_0_sw_init(void *handle)
if (r)
return r;
- adev->mode_info.mode_config_initialized = true;
-
adev->ddev->mode_config.funcs = &amdgpu_mode_funcs;
adev->ddev->mode_config.max_width = 16384;
@@ -3014,7 +3020,9 @@ static int dce_v10_0_sw_init(void *handle)
return -EINVAL;
/* setup afmt */
- dce_v10_0_afmt_init(adev);
+ r = dce_v10_0_afmt_init(adev);
+ if (r)
+ return r;
r = dce_v10_0_audio_init(adev);
if (r)
@@ -3022,7 +3030,8 @@ static int dce_v10_0_sw_init(void *handle)
drm_kms_helper_poll_init(adev->ddev);
- return r;
+ adev->mode_info.mode_config_initialized = true;
+ return 0;
}
static int dce_v10_0_sw_fini(void *handle)
@@ -3366,7 +3375,7 @@ static int dce_v10_0_pageflip_irq(struct amdgpu_device *adev,
spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
drm_vblank_put(adev->ddev, amdgpu_crtc->crtc_id);
- queue_work(amdgpu_crtc->pflip_queue, &works->unpin_work);
+ schedule_work(&works->unpin_work);
return 0;
}
@@ -3624,16 +3633,8 @@ dce_v10_0_ext_dpms(struct drm_encoder *encoder, int mode)
}
-static bool dce_v10_0_ext_mode_fixup(struct drm_encoder *encoder,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- return true;
-}
-
static const struct drm_encoder_helper_funcs dce_v10_0_ext_helper_funcs = {
.dpms = dce_v10_0_ext_dpms,
- .mode_fixup = dce_v10_0_ext_mode_fixup,
.prepare = dce_v10_0_ext_prepare,
.mode_set = dce_v10_0_ext_mode_set,
.commit = dce_v10_0_ext_commit,
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
index 8e67249d4367..e9ccc6b787f3 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
@@ -1658,6 +1658,9 @@ static void dce_v11_0_audio_fini(struct amdgpu_device *adev)
{
int i;
+ if (!amdgpu_audio)
+ return;
+
if (!adev->mode_info.audio.enabled)
return;
@@ -1963,7 +1966,7 @@ static void dce_v11_0_afmt_enable(struct drm_encoder *encoder, bool enable)
enable ? "En" : "Dis", dig->afmt->offset, amdgpu_encoder->encoder_id);
}
-static void dce_v11_0_afmt_init(struct amdgpu_device *adev)
+static int dce_v11_0_afmt_init(struct amdgpu_device *adev)
{
int i;
@@ -1976,8 +1979,16 @@ static void dce_v11_0_afmt_init(struct amdgpu_device *adev)
if (adev->mode_info.afmt[i]) {
adev->mode_info.afmt[i]->offset = dig_offsets[i];
adev->mode_info.afmt[i]->id = i;
+ } else {
+ int j;
+ for (j = 0; j < i; j++) {
+ kfree(adev->mode_info.afmt[j]);
+ adev->mode_info.afmt[j] = NULL;
+ }
+ return -ENOMEM;
}
}
+ return 0;
}
static void dce_v11_0_afmt_fini(struct amdgpu_device *adev)
@@ -2054,8 +2065,7 @@ static int dce_v11_0_crtc_do_set_base(struct drm_crtc *crtc,
if (atomic) {
amdgpu_fb = to_amdgpu_framebuffer(fb);
target_fb = fb;
- }
- else {
+ } else {
amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
target_fb = crtc->primary->fb;
}
@@ -2069,9 +2079,9 @@ static int dce_v11_0_crtc_do_set_base(struct drm_crtc *crtc,
if (unlikely(r != 0))
return r;
- if (atomic)
+ if (atomic) {
fb_location = amdgpu_bo_gpu_offset(rbo);
- else {
+ } else {
r = amdgpu_bo_pin(rbo, AMDGPU_GEM_DOMAIN_VRAM, &fb_location);
if (unlikely(r != 0)) {
amdgpu_bo_unreserve(rbo);
@@ -2661,7 +2671,6 @@ static void dce_v11_0_crtc_destroy(struct drm_crtc *crtc)
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
drm_crtc_cleanup(crtc);
- destroy_workqueue(amdgpu_crtc->pflip_queue);
kfree(amdgpu_crtc);
}
@@ -2692,13 +2701,13 @@ static void dce_v11_0_crtc_dpms(struct drm_crtc *crtc, int mode)
type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id);
amdgpu_irq_update(adev, &adev->crtc_irq, type);
amdgpu_irq_update(adev, &adev->pageflip_irq, type);
- drm_vblank_post_modeset(dev, amdgpu_crtc->crtc_id);
+ drm_vblank_on(dev, amdgpu_crtc->crtc_id);
dce_v11_0_crtc_load_lut(crtc);
break;
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
case DRM_MODE_DPMS_OFF:
- drm_vblank_pre_modeset(dev, amdgpu_crtc->crtc_id);
+ drm_vblank_off(dev, amdgpu_crtc->crtc_id);
if (amdgpu_crtc->enabled) {
dce_v11_0_vga_enable(crtc, true);
amdgpu_atombios_crtc_blank(crtc, ATOM_ENABLE);
@@ -2881,7 +2890,6 @@ static int dce_v11_0_crtc_init(struct amdgpu_device *adev, int index)
drm_mode_crtc_set_gamma_size(&amdgpu_crtc->base, 256);
amdgpu_crtc->crtc_id = index;
- amdgpu_crtc->pflip_queue = create_singlethread_workqueue("amdgpu-pageflip-queue");
adev->mode_info.crtcs[index] = amdgpu_crtc;
amdgpu_crtc->max_cursor_width = 128;
@@ -2963,7 +2971,7 @@ static int dce_v11_0_sw_init(void *handle)
for (i = 0; i < adev->mode_info.num_crtc; i++) {
r = amdgpu_irq_add_id(adev, i + 1, &adev->crtc_irq);
if (r)
- return r;
+ return r;
}
for (i = 8; i < 20; i += 2) {
@@ -2975,9 +2983,7 @@ static int dce_v11_0_sw_init(void *handle)
/* HPD hotplug */
r = amdgpu_irq_add_id(adev, 42, &adev->hpd_irq);
if (r)
- return r;
-
- adev->mode_info.mode_config_initialized = true;
+ return r;
adev->ddev->mode_config.funcs = &amdgpu_mode_funcs;
@@ -2996,6 +3002,7 @@ static int dce_v11_0_sw_init(void *handle)
adev->ddev->mode_config.max_width = 16384;
adev->ddev->mode_config.max_height = 16384;
+
/* allocate crtcs */
for (i = 0; i < adev->mode_info.num_crtc; i++) {
r = dce_v11_0_crtc_init(adev, i);
@@ -3009,7 +3016,9 @@ static int dce_v11_0_sw_init(void *handle)
return -EINVAL;
/* setup afmt */
- dce_v11_0_afmt_init(adev);
+ r = dce_v11_0_afmt_init(adev);
+ if (r)
+ return r;
r = dce_v11_0_audio_init(adev);
if (r)
@@ -3017,7 +3026,8 @@ static int dce_v11_0_sw_init(void *handle)
drm_kms_helper_poll_init(adev->ddev);
- return r;
+ adev->mode_info.mode_config_initialized = true;
+ return 0;
}
static int dce_v11_0_sw_fini(void *handle)
@@ -3361,7 +3371,7 @@ static int dce_v11_0_pageflip_irq(struct amdgpu_device *adev,
spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
drm_vblank_put(adev->ddev, amdgpu_crtc->crtc_id);
- queue_work(amdgpu_crtc->pflip_queue, &works->unpin_work);
+ schedule_work(&works->unpin_work);
return 0;
}
@@ -3619,16 +3629,8 @@ dce_v11_0_ext_dpms(struct drm_encoder *encoder, int mode)
}
-static bool dce_v11_0_ext_mode_fixup(struct drm_encoder *encoder,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- return true;
-}
-
static const struct drm_encoder_helper_funcs dce_v11_0_ext_helper_funcs = {
.dpms = dce_v11_0_ext_dpms,
- .mode_fixup = dce_v11_0_ext_mode_fixup,
.prepare = dce_v11_0_ext_prepare,
.mode_set = dce_v11_0_ext_mode_set,
.commit = dce_v11_0_ext_commit,
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
index d0e128c24813..e56b55d8c280 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
@@ -1639,6 +1639,9 @@ static void dce_v8_0_audio_fini(struct amdgpu_device *adev)
{
int i;
+ if (!amdgpu_audio)
+ return;
+
if (!adev->mode_info.audio.enabled)
return;
@@ -1910,7 +1913,7 @@ static void dce_v8_0_afmt_enable(struct drm_encoder *encoder, bool enable)
enable ? "En" : "Dis", dig->afmt->offset, amdgpu_encoder->encoder_id);
}
-static void dce_v8_0_afmt_init(struct amdgpu_device *adev)
+static int dce_v8_0_afmt_init(struct amdgpu_device *adev)
{
int i;
@@ -1923,8 +1926,16 @@ static void dce_v8_0_afmt_init(struct amdgpu_device *adev)
if (adev->mode_info.afmt[i]) {
adev->mode_info.afmt[i]->offset = dig_offsets[i];
adev->mode_info.afmt[i]->id = i;
+ } else {
+ int j;
+ for (j = 0; j < i; j++) {
+ kfree(adev->mode_info.afmt[j]);
+ adev->mode_info.afmt[j] = NULL;
+ }
+ return -ENOMEM;
}
}
+ return 0;
}
static void dce_v8_0_afmt_fini(struct amdgpu_device *adev)
@@ -2001,8 +2012,7 @@ static int dce_v8_0_crtc_do_set_base(struct drm_crtc *crtc,
if (atomic) {
amdgpu_fb = to_amdgpu_framebuffer(fb);
target_fb = fb;
- }
- else {
+ } else {
amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
target_fb = crtc->primary->fb;
}
@@ -2016,9 +2026,9 @@ static int dce_v8_0_crtc_do_set_base(struct drm_crtc *crtc,
if (unlikely(r != 0))
return r;
- if (atomic)
+ if (atomic) {
fb_location = amdgpu_bo_gpu_offset(rbo);
- else {
+ } else {
r = amdgpu_bo_pin(rbo, AMDGPU_GEM_DOMAIN_VRAM, &fb_location);
if (unlikely(r != 0)) {
amdgpu_bo_unreserve(rbo);
@@ -2582,7 +2592,6 @@ static void dce_v8_0_crtc_destroy(struct drm_crtc *crtc)
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
drm_crtc_cleanup(crtc);
- destroy_workqueue(amdgpu_crtc->pflip_queue);
kfree(amdgpu_crtc);
}
@@ -2613,13 +2622,13 @@ static void dce_v8_0_crtc_dpms(struct drm_crtc *crtc, int mode)
type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id);
amdgpu_irq_update(adev, &adev->crtc_irq, type);
amdgpu_irq_update(adev, &adev->pageflip_irq, type);
- drm_vblank_post_modeset(dev, amdgpu_crtc->crtc_id);
+ drm_vblank_on(dev, amdgpu_crtc->crtc_id);
dce_v8_0_crtc_load_lut(crtc);
break;
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
case DRM_MODE_DPMS_OFF:
- drm_vblank_pre_modeset(dev, amdgpu_crtc->crtc_id);
+ drm_vblank_off(dev, amdgpu_crtc->crtc_id);
if (amdgpu_crtc->enabled) {
dce_v8_0_vga_enable(crtc, true);
amdgpu_atombios_crtc_blank(crtc, ATOM_ENABLE);
@@ -2809,7 +2818,6 @@ static int dce_v8_0_crtc_init(struct amdgpu_device *adev, int index)
drm_mode_crtc_set_gamma_size(&amdgpu_crtc->base, 256);
amdgpu_crtc->crtc_id = index;
- amdgpu_crtc->pflip_queue = create_singlethread_workqueue("amdgpu-pageflip-queue");
adev->mode_info.crtcs[index] = amdgpu_crtc;
amdgpu_crtc->max_cursor_width = CIK_CURSOR_WIDTH;
@@ -2892,8 +2900,6 @@ static int dce_v8_0_sw_init(void *handle)
if (r)
return r;
- adev->mode_info.mode_config_initialized = true;
-
adev->ddev->mode_config.funcs = &amdgpu_mode_funcs;
adev->ddev->mode_config.max_width = 16384;
@@ -2924,7 +2930,9 @@ static int dce_v8_0_sw_init(void *handle)
return -EINVAL;
/* setup afmt */
- dce_v8_0_afmt_init(adev);
+ r = dce_v8_0_afmt_init(adev);
+ if (r)
+ return r;
r = dce_v8_0_audio_init(adev);
if (r)
@@ -2932,7 +2940,8 @@ static int dce_v8_0_sw_init(void *handle)
drm_kms_helper_poll_init(adev->ddev);
- return r;
+ adev->mode_info.mode_config_initialized = true;
+ return 0;
}
static int dce_v8_0_sw_fini(void *handle)
@@ -3375,7 +3384,7 @@ static int dce_v8_0_pageflip_irq(struct amdgpu_device *adev,
spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
drm_vblank_put(adev->ddev, amdgpu_crtc->crtc_id);
- queue_work(amdgpu_crtc->pflip_queue, &works->unpin_work);
+ schedule_work(&works->unpin_work);
return 0;
}
@@ -3554,16 +3563,8 @@ dce_v8_0_ext_dpms(struct drm_encoder *encoder, int mode)
}
-static bool dce_v8_0_ext_mode_fixup(struct drm_encoder *encoder,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- return true;
-}
-
static const struct drm_encoder_helper_funcs dce_v8_0_ext_helper_funcs = {
.dpms = dce_v8_0_ext_dpms,
- .mode_fixup = dce_v8_0_ext_mode_fixup,
.prepare = dce_v8_0_ext_prepare,
.mode_set = dce_v8_0_ext_mode_set,
.commit = dce_v8_0_ext_commit,
diff --git a/drivers/gpu/drm/amd/amdgpu/fiji_smc.c b/drivers/gpu/drm/amd/amdgpu/fiji_smc.c
index e35340afd3db..b336c918d6a7 100644
--- a/drivers/gpu/drm/amd/amdgpu/fiji_smc.c
+++ b/drivers/gpu/drm/amd/amdgpu/fiji_smc.c
@@ -272,6 +272,12 @@ static int fiji_smu_upload_firmware_image(struct amdgpu_device *adev)
if (!adev->pm.fw)
return -EINVAL;
+ /* Skip SMC ucode loading on SR-IOV capable boards.
+ * vbios does this for us in asic_init in that case.
+ */
+ if (adev->virtualization.supports_sr_iov)
+ return 0;
+
hdr = (const struct smc_firmware_header_v1_0 *)adev->pm.fw->data;
amdgpu_ucode_print_smc_hdr(&hdr->header);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
index 06602df707f8..bb8709066fd8 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
@@ -31,8 +31,6 @@
#include "amdgpu_ucode.h"
#include "clearstate_ci.h"
-#include "uvd/uvd_4_2_d.h"
-
#include "dce/dce_8_0_d.h"
#include "dce/dce_8_0_sh_mask.h"
@@ -1006,9 +1004,15 @@ out:
*/
static void gfx_v7_0_tiling_mode_table_init(struct amdgpu_device *adev)
{
- const u32 num_tile_mode_states = 32;
- const u32 num_secondary_tile_mode_states = 16;
- u32 reg_offset, gb_tile_moden, split_equal_to_row_size;
+ const u32 num_tile_mode_states =
+ ARRAY_SIZE(adev->gfx.config.tile_mode_array);
+ const u32 num_secondary_tile_mode_states =
+ ARRAY_SIZE(adev->gfx.config.macrotile_mode_array);
+ u32 reg_offset, split_equal_to_row_size;
+ uint32_t *tile, *macrotile;
+
+ tile = adev->gfx.config.tile_mode_array;
+ macrotile = adev->gfx.config.macrotile_mode_array;
switch (adev->gfx.config.mem_row_size_in_kb) {
case 1:
@@ -1023,832 +1027,531 @@ static void gfx_v7_0_tiling_mode_table_init(struct amdgpu_device *adev)
break;
}
+ for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
+ tile[reg_offset] = 0;
+ for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++)
+ macrotile[reg_offset] = 0;
+
switch (adev->asic_type) {
case CHIP_BONAIRE:
- for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) {
- switch (reg_offset) {
- case 0:
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- PIPE_CONFIG(ADDR_SURF_P4_16x16) |
- TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
- break;
- case 1:
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- PIPE_CONFIG(ADDR_SURF_P4_16x16) |
- TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
- break;
- case 2:
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- PIPE_CONFIG(ADDR_SURF_P4_16x16) |
- TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
- break;
- case 3:
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- PIPE_CONFIG(ADDR_SURF_P4_16x16) |
- TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
- break;
- case 4:
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- PIPE_CONFIG(ADDR_SURF_P4_16x16) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
- TILE_SPLIT(split_equal_to_row_size));
- break;
- case 5:
- gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
- PIPE_CONFIG(ADDR_SURF_P4_16x16) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
- break;
- case 6:
- gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
- PIPE_CONFIG(ADDR_SURF_P4_16x16) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
- TILE_SPLIT(split_equal_to_row_size));
- break;
- case 7:
- gb_tile_moden = (TILE_SPLIT(split_equal_to_row_size));
- break;
-
- case 8:
- gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
- PIPE_CONFIG(ADDR_SURF_P4_16x16));
- break;
- case 9:
- gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
- PIPE_CONFIG(ADDR_SURF_P4_16x16) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING));
- break;
- case 10:
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- PIPE_CONFIG(ADDR_SURF_P4_16x16) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
- SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
- break;
- case 11:
- gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
- PIPE_CONFIG(ADDR_SURF_P4_16x16) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
- SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
- break;
- case 12:
- gb_tile_moden = (TILE_SPLIT(split_equal_to_row_size));
- break;
- case 13:
- gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
- PIPE_CONFIG(ADDR_SURF_P4_16x16) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING));
- break;
- case 14:
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- PIPE_CONFIG(ADDR_SURF_P4_16x16) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
- SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
- break;
- case 15:
- gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) |
- PIPE_CONFIG(ADDR_SURF_P4_16x16) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
- SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
- break;
- case 16:
- gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
- PIPE_CONFIG(ADDR_SURF_P4_16x16) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
- SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
- break;
- case 17:
- gb_tile_moden = (TILE_SPLIT(split_equal_to_row_size));
- break;
- case 18:
- gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
- PIPE_CONFIG(ADDR_SURF_P4_16x16) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
- SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
- break;
- case 19:
- gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
- PIPE_CONFIG(ADDR_SURF_P4_16x16) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING));
- break;
- case 20:
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
- PIPE_CONFIG(ADDR_SURF_P4_16x16) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
- SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
- break;
- case 21:
- gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_THICK) |
- PIPE_CONFIG(ADDR_SURF_P4_16x16) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
- SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
- break;
- case 22:
- gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
- PIPE_CONFIG(ADDR_SURF_P4_16x16) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
- SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
- break;
- case 23:
- gb_tile_moden = (TILE_SPLIT(split_equal_to_row_size));
- break;
- case 24:
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
- PIPE_CONFIG(ADDR_SURF_P4_16x16) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
- SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
- break;
- case 25:
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) |
- PIPE_CONFIG(ADDR_SURF_P4_16x16) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
- SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
- break;
- case 26:
- gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) |
- PIPE_CONFIG(ADDR_SURF_P4_16x16) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
- SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
- break;
- case 27:
- gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
- PIPE_CONFIG(ADDR_SURF_P4_16x16) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING));
- break;
- case 28:
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- PIPE_CONFIG(ADDR_SURF_P4_16x16) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
- SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
- break;
- case 29:
- gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
- PIPE_CONFIG(ADDR_SURF_P4_16x16) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
- SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
- break;
- case 30:
- gb_tile_moden = (TILE_SPLIT(split_equal_to_row_size));
- break;
- default:
- gb_tile_moden = 0;
- break;
- }
- adev->gfx.config.tile_mode_array[reg_offset] = gb_tile_moden;
- WREG32(mmGB_TILE_MODE0 + reg_offset, gb_tile_moden);
- }
- for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++) {
- switch (reg_offset) {
- case 0:
- gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
- NUM_BANKS(ADDR_SURF_16_BANK));
- break;
- case 1:
- gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
- NUM_BANKS(ADDR_SURF_16_BANK));
- break;
- case 2:
- gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
- NUM_BANKS(ADDR_SURF_16_BANK));
- break;
- case 3:
- gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
- NUM_BANKS(ADDR_SURF_16_BANK));
- break;
- case 4:
- gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
- NUM_BANKS(ADDR_SURF_16_BANK));
- break;
- case 5:
- gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
- NUM_BANKS(ADDR_SURF_8_BANK));
- break;
- case 6:
- gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
- NUM_BANKS(ADDR_SURF_4_BANK));
- break;
- case 8:
- gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
- NUM_BANKS(ADDR_SURF_16_BANK));
- break;
- case 9:
- gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
- NUM_BANKS(ADDR_SURF_16_BANK));
- break;
- case 10:
- gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
- NUM_BANKS(ADDR_SURF_16_BANK));
- break;
- case 11:
- gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
- NUM_BANKS(ADDR_SURF_16_BANK));
- break;
- case 12:
- gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
- NUM_BANKS(ADDR_SURF_16_BANK));
- break;
- case 13:
- gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
- NUM_BANKS(ADDR_SURF_8_BANK));
- break;
- case 14:
- gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
- NUM_BANKS(ADDR_SURF_4_BANK));
- break;
- default:
- gb_tile_moden = 0;
- break;
- }
- adev->gfx.config.macrotile_mode_array[reg_offset] = gb_tile_moden;
- WREG32(mmGB_MACROTILE_MODE0 + reg_offset, gb_tile_moden);
- }
+ tile[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
+ tile[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
+ tile[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
+ tile[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
+ tile[4] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
+ TILE_SPLIT(split_equal_to_row_size));
+ tile[5] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
+ tile[6] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
+ TILE_SPLIT(split_equal_to_row_size));
+ tile[7] = (TILE_SPLIT(split_equal_to_row_size));
+ tile[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
+ PIPE_CONFIG(ADDR_SURF_P4_16x16));
+ tile[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING));
+ tile[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+ tile[11] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
+ tile[12] = (TILE_SPLIT(split_equal_to_row_size));
+ tile[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING));
+ tile[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+ tile[15] = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+ tile[16] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
+ tile[17] = (TILE_SPLIT(split_equal_to_row_size));
+ tile[18] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
+ PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
+ tile[19] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
+ PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING));
+ tile[20] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
+ PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
+ tile[21] = (ARRAY_MODE(ARRAY_3D_TILED_THICK) |
+ PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
+ tile[22] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
+ PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
+ tile[23] = (TILE_SPLIT(split_equal_to_row_size));
+ tile[24] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
+ PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
+ tile[25] = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) |
+ PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
+ tile[26] = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) |
+ PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
+ tile[27] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING));
+ tile[28] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+ tile[29] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
+ tile[30] = (TILE_SPLIT(split_equal_to_row_size));
+
+ macrotile[0] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
+ macrotile[1] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
+ macrotile[2] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
+ macrotile[3] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
+ macrotile[4] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
+ macrotile[5] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+ NUM_BANKS(ADDR_SURF_8_BANK));
+ macrotile[6] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_4_BANK));
+ macrotile[8] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
+ macrotile[9] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
+ macrotile[10] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
+ macrotile[11] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
+ macrotile[12] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
+ macrotile[13] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+ NUM_BANKS(ADDR_SURF_8_BANK));
+ macrotile[14] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_4_BANK));
+
+ for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
+ WREG32(mmGB_TILE_MODE0 + reg_offset, tile[reg_offset]);
+ for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++)
+ if (reg_offset != 7)
+ WREG32(mmGB_MACROTILE_MODE0 + reg_offset, macrotile[reg_offset]);
break;
case CHIP_HAWAII:
- for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) {
- switch (reg_offset) {
- case 0:
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
- TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
- break;
- case 1:
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
- TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
- break;
- case 2:
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
- TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
- break;
- case 3:
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
- TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
- break;
- case 4:
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
- TILE_SPLIT(split_equal_to_row_size));
- break;
- case 5:
- gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
- PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
- TILE_SPLIT(split_equal_to_row_size));
- break;
- case 6:
- gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
- PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
- TILE_SPLIT(split_equal_to_row_size));
- break;
- case 7:
- gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
- PIPE_CONFIG(ADDR_SURF_P4_16x16) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
- TILE_SPLIT(split_equal_to_row_size));
- break;
-
- case 8:
- gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
- PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16));
- break;
- case 9:
- gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
- PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING));
- break;
- case 10:
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
- SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
- break;
- case 11:
- gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
- PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
- SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
- break;
- case 12:
- gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
- PIPE_CONFIG(ADDR_SURF_P4_16x16) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
- SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
- break;
- case 13:
- gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
- PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING));
- break;
- case 14:
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
- SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
- break;
- case 15:
- gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) |
- PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
- SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
- break;
- case 16:
- gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
- PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
- SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
- break;
- case 17:
- gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
- PIPE_CONFIG(ADDR_SURF_P4_16x16) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
- SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
- break;
- case 18:
- gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
- PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
- SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
- break;
- case 19:
- gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
- PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING));
- break;
- case 20:
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
- PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
- SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
- break;
- case 21:
- gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_THICK) |
- PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
- SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
- break;
- case 22:
- gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
- PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
- SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
- break;
- case 23:
- gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
- PIPE_CONFIG(ADDR_SURF_P4_16x16) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
- SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
- break;
- case 24:
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
- PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
- SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
- break;
- case 25:
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) |
- PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
- SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
- break;
- case 26:
- gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) |
- PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
- SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
- break;
- case 27:
- gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
- PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING));
- break;
- case 28:
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
- SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
- break;
- case 29:
- gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
- PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
- SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
- break;
- case 30:
- gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
- PIPE_CONFIG(ADDR_SURF_P4_16x16) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
- SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
- break;
- default:
- gb_tile_moden = 0;
- break;
- }
- adev->gfx.config.tile_mode_array[reg_offset] = gb_tile_moden;
- WREG32(mmGB_TILE_MODE0 + reg_offset, gb_tile_moden);
- }
- for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++) {
- switch (reg_offset) {
- case 0:
- gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
- NUM_BANKS(ADDR_SURF_16_BANK));
- break;
- case 1:
- gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
- NUM_BANKS(ADDR_SURF_16_BANK));
- break;
- case 2:
- gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
- NUM_BANKS(ADDR_SURF_16_BANK));
- break;
- case 3:
- gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
- NUM_BANKS(ADDR_SURF_16_BANK));
- break;
- case 4:
- gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
- NUM_BANKS(ADDR_SURF_8_BANK));
- break;
- case 5:
- gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
- NUM_BANKS(ADDR_SURF_4_BANK));
- break;
- case 6:
- gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
- NUM_BANKS(ADDR_SURF_4_BANK));
- break;
- case 8:
- gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
- NUM_BANKS(ADDR_SURF_16_BANK));
- break;
- case 9:
- gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
- NUM_BANKS(ADDR_SURF_16_BANK));
- break;
- case 10:
- gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
- NUM_BANKS(ADDR_SURF_16_BANK));
- break;
- case 11:
- gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
- NUM_BANKS(ADDR_SURF_8_BANK));
- break;
- case 12:
- gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
- NUM_BANKS(ADDR_SURF_16_BANK));
- break;
- case 13:
- gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
- NUM_BANKS(ADDR_SURF_8_BANK));
- break;
- case 14:
- gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
- NUM_BANKS(ADDR_SURF_4_BANK));
- break;
- default:
- gb_tile_moden = 0;
- break;
- }
- adev->gfx.config.macrotile_mode_array[reg_offset] = gb_tile_moden;
- WREG32(mmGB_MACROTILE_MODE0 + reg_offset, gb_tile_moden);
- }
+ tile[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
+ tile[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
+ tile[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
+ tile[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
+ tile[4] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
+ TILE_SPLIT(split_equal_to_row_size));
+ tile[5] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
+ TILE_SPLIT(split_equal_to_row_size));
+ tile[6] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
+ TILE_SPLIT(split_equal_to_row_size));
+ tile[7] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
+ TILE_SPLIT(split_equal_to_row_size));
+ tile[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
+ PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16));
+ tile[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING));
+ tile[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+ tile[11] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
+ tile[12] = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
+ tile[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING));
+ tile[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+ tile[15] = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+ tile[16] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
+ tile[17] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
+ tile[18] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
+ PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
+ tile[19] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
+ PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING));
+ tile[20] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
+ PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
+ tile[21] = (ARRAY_MODE(ARRAY_3D_TILED_THICK) |
+ PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
+ tile[22] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
+ PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
+ tile[23] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
+ PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
+ tile[24] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
+ PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
+ tile[25] = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) |
+ PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
+ tile[26] = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) |
+ PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
+ tile[27] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING));
+ tile[28] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+ tile[29] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
+ tile[30] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
+
+ macrotile[0] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
+ macrotile[1] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
+ macrotile[2] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
+ macrotile[3] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
+ macrotile[4] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_8_BANK));
+ macrotile[5] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_4_BANK));
+ macrotile[6] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_4_BANK));
+ macrotile[8] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
+ macrotile[9] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
+ macrotile[10] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
+ macrotile[11] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_8_BANK));
+ macrotile[12] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
+ macrotile[13] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+ NUM_BANKS(ADDR_SURF_8_BANK));
+ macrotile[14] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_4_BANK));
+
+ for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
+ WREG32(mmGB_TILE_MODE0 + reg_offset, tile[reg_offset]);
+ for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++)
+ if (reg_offset != 7)
+ WREG32(mmGB_MACROTILE_MODE0 + reg_offset, macrotile[reg_offset]);
break;
case CHIP_KABINI:
case CHIP_KAVERI:
case CHIP_MULLINS:
default:
- for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) {
- switch (reg_offset) {
- case 0:
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- PIPE_CONFIG(ADDR_SURF_P2) |
- TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
- break;
- case 1:
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- PIPE_CONFIG(ADDR_SURF_P2) |
- TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
- break;
- case 2:
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- PIPE_CONFIG(ADDR_SURF_P2) |
- TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
- break;
- case 3:
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- PIPE_CONFIG(ADDR_SURF_P2) |
- TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
- break;
- case 4:
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- PIPE_CONFIG(ADDR_SURF_P2) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
- TILE_SPLIT(split_equal_to_row_size));
- break;
- case 5:
- gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
- PIPE_CONFIG(ADDR_SURF_P2) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
- break;
- case 6:
- gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
- PIPE_CONFIG(ADDR_SURF_P2) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
- TILE_SPLIT(split_equal_to_row_size));
- break;
- case 7:
- gb_tile_moden = (TILE_SPLIT(split_equal_to_row_size));
- break;
-
- case 8:
- gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
- PIPE_CONFIG(ADDR_SURF_P2));
- break;
- case 9:
- gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
- PIPE_CONFIG(ADDR_SURF_P2) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING));
- break;
- case 10:
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- PIPE_CONFIG(ADDR_SURF_P2) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
- SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
- break;
- case 11:
- gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
- PIPE_CONFIG(ADDR_SURF_P2) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
- SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
- break;
- case 12:
- gb_tile_moden = (TILE_SPLIT(split_equal_to_row_size));
- break;
- case 13:
- gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
- PIPE_CONFIG(ADDR_SURF_P2) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING));
- break;
- case 14:
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- PIPE_CONFIG(ADDR_SURF_P2) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
- SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
- break;
- case 15:
- gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) |
- PIPE_CONFIG(ADDR_SURF_P2) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
- SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
- break;
- case 16:
- gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
- PIPE_CONFIG(ADDR_SURF_P2) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
- SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
- break;
- case 17:
- gb_tile_moden = (TILE_SPLIT(split_equal_to_row_size));
- break;
- case 18:
- gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
- PIPE_CONFIG(ADDR_SURF_P2) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
- SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
- break;
- case 19:
- gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
- PIPE_CONFIG(ADDR_SURF_P2) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING));
- break;
- case 20:
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
- PIPE_CONFIG(ADDR_SURF_P2) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
- SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
- break;
- case 21:
- gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_THICK) |
- PIPE_CONFIG(ADDR_SURF_P2) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
- SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
- break;
- case 22:
- gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
- PIPE_CONFIG(ADDR_SURF_P2) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
- SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
- break;
- case 23:
- gb_tile_moden = (TILE_SPLIT(split_equal_to_row_size));
- break;
- case 24:
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
- PIPE_CONFIG(ADDR_SURF_P2) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
- SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
- break;
- case 25:
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) |
- PIPE_CONFIG(ADDR_SURF_P2) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
- SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
- break;
- case 26:
- gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) |
- PIPE_CONFIG(ADDR_SURF_P2) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
- SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
- break;
- case 27:
- gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
- PIPE_CONFIG(ADDR_SURF_P2) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING));
- break;
- case 28:
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- PIPE_CONFIG(ADDR_SURF_P2) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
- SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
- break;
- case 29:
- gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
- PIPE_CONFIG(ADDR_SURF_P2) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
- SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
- break;
- case 30:
- gb_tile_moden = (TILE_SPLIT(split_equal_to_row_size));
- break;
- default:
- gb_tile_moden = 0;
- break;
- }
- adev->gfx.config.tile_mode_array[reg_offset] = gb_tile_moden;
- WREG32(mmGB_TILE_MODE0 + reg_offset, gb_tile_moden);
- }
- for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++) {
- switch (reg_offset) {
- case 0:
- gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
- NUM_BANKS(ADDR_SURF_8_BANK));
- break;
- case 1:
- gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
- NUM_BANKS(ADDR_SURF_8_BANK));
- break;
- case 2:
- gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
- NUM_BANKS(ADDR_SURF_8_BANK));
- break;
- case 3:
- gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
- NUM_BANKS(ADDR_SURF_8_BANK));
- break;
- case 4:
- gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
- NUM_BANKS(ADDR_SURF_8_BANK));
- break;
- case 5:
- gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
- NUM_BANKS(ADDR_SURF_8_BANK));
- break;
- case 6:
- gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
- NUM_BANKS(ADDR_SURF_8_BANK));
- break;
- case 8:
- gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
- NUM_BANKS(ADDR_SURF_16_BANK));
- break;
- case 9:
- gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
- NUM_BANKS(ADDR_SURF_16_BANK));
- break;
- case 10:
- gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
- NUM_BANKS(ADDR_SURF_16_BANK));
- break;
- case 11:
- gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
- NUM_BANKS(ADDR_SURF_16_BANK));
- break;
- case 12:
- gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
- NUM_BANKS(ADDR_SURF_16_BANK));
- break;
- case 13:
- gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
- NUM_BANKS(ADDR_SURF_16_BANK));
- break;
- case 14:
- gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
- NUM_BANKS(ADDR_SURF_8_BANK));
- break;
- default:
- gb_tile_moden = 0;
- break;
- }
- adev->gfx.config.macrotile_mode_array[reg_offset] = gb_tile_moden;
- WREG32(mmGB_MACROTILE_MODE0 + reg_offset, gb_tile_moden);
- }
+ tile[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P2) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
+ tile[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P2) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
+ tile[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P2) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
+ tile[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P2) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
+ tile[4] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P2) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
+ TILE_SPLIT(split_equal_to_row_size));
+ tile[5] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P2) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
+ tile[6] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P2) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
+ TILE_SPLIT(split_equal_to_row_size));
+ tile[7] = (TILE_SPLIT(split_equal_to_row_size));
+ tile[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
+ PIPE_CONFIG(ADDR_SURF_P2));
+ tile[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P2) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING));
+ tile[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P2) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+ tile[11] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P2) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
+ tile[12] = (TILE_SPLIT(split_equal_to_row_size));
+ tile[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P2) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING));
+ tile[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P2) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+ tile[15] = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P2) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+ tile[16] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P2) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
+ tile[17] = (TILE_SPLIT(split_equal_to_row_size));
+ tile[18] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
+ PIPE_CONFIG(ADDR_SURF_P2) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
+ tile[19] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
+ PIPE_CONFIG(ADDR_SURF_P2) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING));
+ tile[20] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
+ PIPE_CONFIG(ADDR_SURF_P2) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
+ tile[21] = (ARRAY_MODE(ARRAY_3D_TILED_THICK) |
+ PIPE_CONFIG(ADDR_SURF_P2) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
+ tile[22] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
+ PIPE_CONFIG(ADDR_SURF_P2) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
+ tile[23] = (TILE_SPLIT(split_equal_to_row_size));
+ tile[24] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
+ PIPE_CONFIG(ADDR_SURF_P2) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
+ tile[25] = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) |
+ PIPE_CONFIG(ADDR_SURF_P2) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
+ tile[26] = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) |
+ PIPE_CONFIG(ADDR_SURF_P2) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
+ tile[27] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P2) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING));
+ tile[28] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P2) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+ tile[29] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P2) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
+ tile[30] = (TILE_SPLIT(split_equal_to_row_size));
+
+ macrotile[0] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+ NUM_BANKS(ADDR_SURF_8_BANK));
+ macrotile[1] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+ NUM_BANKS(ADDR_SURF_8_BANK));
+ macrotile[2] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+ NUM_BANKS(ADDR_SURF_8_BANK));
+ macrotile[3] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+ NUM_BANKS(ADDR_SURF_8_BANK));
+ macrotile[4] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+ NUM_BANKS(ADDR_SURF_8_BANK));
+ macrotile[5] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+ NUM_BANKS(ADDR_SURF_8_BANK));
+ macrotile[6] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+ NUM_BANKS(ADDR_SURF_8_BANK));
+ macrotile[8] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
+ macrotile[9] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
+ macrotile[10] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
+ macrotile[11] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
+ macrotile[12] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
+ macrotile[13] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
+ macrotile[14] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+ NUM_BANKS(ADDR_SURF_8_BANK));
+
+ for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
+ WREG32(mmGB_TILE_MODE0 + reg_offset, tile[reg_offset]);
+ for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++)
+ if (reg_offset != 7)
+ WREG32(mmGB_MACROTILE_MODE0 + reg_offset, macrotile[reg_offset]);
break;
}
}
@@ -1893,45 +1596,31 @@ void gfx_v7_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num)
*/
static u32 gfx_v7_0_create_bitmask(u32 bit_width)
{
- u32 i, mask = 0;
-
- for (i = 0; i < bit_width; i++) {
- mask <<= 1;
- mask |= 1;
- }
- return mask;
+ return (u32)((1ULL << bit_width) - 1);
}
/**
- * gfx_v7_0_get_rb_disabled - computes the mask of disabled RBs
+ * gfx_v7_0_get_rb_active_bitmap - computes the mask of enabled RBs
*
* @adev: amdgpu_device pointer
- * @max_rb_num: max RBs (render backends) for the asic
- * @se_num: number of SEs (shader engines) for the asic
- * @sh_per_se: number of SH blocks per SE for the asic
*
- * Calculates the bitmask of disabled RBs (CIK).
- * Returns the disabled RB bitmask.
+ * Calculates the bitmask of enabled RBs (CIK).
+ * Returns the enabled RB bitmask.
*/
-static u32 gfx_v7_0_get_rb_disabled(struct amdgpu_device *adev,
- u32 max_rb_num_per_se,
- u32 sh_per_se)
+static u32 gfx_v7_0_get_rb_active_bitmap(struct amdgpu_device *adev)
{
u32 data, mask;
data = RREG32(mmCC_RB_BACKEND_DISABLE);
- if (data & 1)
- data &= CC_RB_BACKEND_DISABLE__BACKEND_DISABLE_MASK;
- else
- data = 0;
-
data |= RREG32(mmGC_USER_RB_BACKEND_DISABLE);
+ data &= CC_RB_BACKEND_DISABLE__BACKEND_DISABLE_MASK;
data >>= GC_USER_RB_BACKEND_DISABLE__BACKEND_DISABLE__SHIFT;
- mask = gfx_v7_0_create_bitmask(max_rb_num_per_se / sh_per_se);
+ mask = gfx_v7_0_create_bitmask(adev->gfx.config.max_backends_per_se /
+ adev->gfx.config.max_sh_per_se);
- return data & mask;
+ return (~data) & mask;
}
/**
@@ -1940,73 +1629,31 @@ static u32 gfx_v7_0_get_rb_disabled(struct amdgpu_device *adev,
* @adev: amdgpu_device pointer
* @se_num: number of SEs (shader engines) for the asic
* @sh_per_se: number of SH blocks per SE for the asic
- * @max_rb_num: max RBs (render backends) for the asic
*
* Configures per-SE/SH RB registers (CIK).
*/
-static void gfx_v7_0_setup_rb(struct amdgpu_device *adev,
- u32 se_num, u32 sh_per_se,
- u32 max_rb_num_per_se)
+static void gfx_v7_0_setup_rb(struct amdgpu_device *adev)
{
int i, j;
- u32 data, mask;
- u32 disabled_rbs = 0;
- u32 enabled_rbs = 0;
+ u32 data;
+ u32 active_rbs = 0;
+ u32 rb_bitmap_width_per_sh = adev->gfx.config.max_backends_per_se /
+ adev->gfx.config.max_sh_per_se;
mutex_lock(&adev->grbm_idx_mutex);
- for (i = 0; i < se_num; i++) {
- for (j = 0; j < sh_per_se; j++) {
+ for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
+ for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
gfx_v7_0_select_se_sh(adev, i, j);
- data = gfx_v7_0_get_rb_disabled(adev, max_rb_num_per_se, sh_per_se);
- if (adev->asic_type == CHIP_HAWAII)
- disabled_rbs |= data << ((i * sh_per_se + j) * HAWAII_RB_BITMAP_WIDTH_PER_SH);
- else
- disabled_rbs |= data << ((i * sh_per_se + j) * CIK_RB_BITMAP_WIDTH_PER_SH);
+ data = gfx_v7_0_get_rb_active_bitmap(adev);
+ active_rbs |= data << ((i * adev->gfx.config.max_sh_per_se + j) *
+ rb_bitmap_width_per_sh);
}
}
gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff);
mutex_unlock(&adev->grbm_idx_mutex);
- mask = 1;
- for (i = 0; i < max_rb_num_per_se * se_num; i++) {
- if (!(disabled_rbs & mask))
- enabled_rbs |= mask;
- mask <<= 1;
- }
-
- adev->gfx.config.backend_enable_mask = enabled_rbs;
-
- mutex_lock(&adev->grbm_idx_mutex);
- for (i = 0; i < se_num; i++) {
- gfx_v7_0_select_se_sh(adev, i, 0xffffffff);
- data = 0;
- for (j = 0; j < sh_per_se; j++) {
- switch (enabled_rbs & 3) {
- case 0:
- if (j == 0)
- data |= (RASTER_CONFIG_RB_MAP_3 <<
- PA_SC_RASTER_CONFIG__PKR_MAP__SHIFT);
- else
- data |= (RASTER_CONFIG_RB_MAP_0 <<
- PA_SC_RASTER_CONFIG__PKR_MAP__SHIFT);
- break;
- case 1:
- data |= (RASTER_CONFIG_RB_MAP_0 << (i * sh_per_se + j) * 2);
- break;
- case 2:
- data |= (RASTER_CONFIG_RB_MAP_3 << (i * sh_per_se + j) * 2);
- break;
- case 3:
- default:
- data |= (RASTER_CONFIG_RB_MAP_2 << (i * sh_per_se + j) * 2);
- break;
- }
- enabled_rbs >>= 2;
- }
- WREG32(mmPA_SC_RASTER_CONFIG, data);
- }
- gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff);
- mutex_unlock(&adev->grbm_idx_mutex);
+ adev->gfx.config.backend_enable_mask = active_rbs;
+ adev->gfx.config.num_rbs = hweight32(active_rbs);
}
/**
@@ -2059,192 +1706,23 @@ static void gmc_v7_0_init_compute_vmid(struct amdgpu_device *adev)
*/
static void gfx_v7_0_gpu_init(struct amdgpu_device *adev)
{
- u32 gb_addr_config;
- u32 mc_shared_chmap, mc_arb_ramcfg;
- u32 dimm00_addr_map, dimm01_addr_map, dimm10_addr_map, dimm11_addr_map;
- u32 sh_mem_cfg;
- u32 tmp;
+ u32 tmp, sh_mem_cfg;
int i;
- switch (adev->asic_type) {
- case CHIP_BONAIRE:
- adev->gfx.config.max_shader_engines = 2;
- adev->gfx.config.max_tile_pipes = 4;
- adev->gfx.config.max_cu_per_sh = 7;
- adev->gfx.config.max_sh_per_se = 1;
- adev->gfx.config.max_backends_per_se = 2;
- adev->gfx.config.max_texture_channel_caches = 4;
- adev->gfx.config.max_gprs = 256;
- adev->gfx.config.max_gs_threads = 32;
- adev->gfx.config.max_hw_contexts = 8;
-
- adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
- adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
- adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
- adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
- gb_addr_config = BONAIRE_GB_ADDR_CONFIG_GOLDEN;
- break;
- case CHIP_HAWAII:
- adev->gfx.config.max_shader_engines = 4;
- adev->gfx.config.max_tile_pipes = 16;
- adev->gfx.config.max_cu_per_sh = 11;
- adev->gfx.config.max_sh_per_se = 1;
- adev->gfx.config.max_backends_per_se = 4;
- adev->gfx.config.max_texture_channel_caches = 16;
- adev->gfx.config.max_gprs = 256;
- adev->gfx.config.max_gs_threads = 32;
- adev->gfx.config.max_hw_contexts = 8;
-
- adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
- adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
- adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
- adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
- gb_addr_config = HAWAII_GB_ADDR_CONFIG_GOLDEN;
- break;
- case CHIP_KAVERI:
- adev->gfx.config.max_shader_engines = 1;
- adev->gfx.config.max_tile_pipes = 4;
- if ((adev->pdev->device == 0x1304) ||
- (adev->pdev->device == 0x1305) ||
- (adev->pdev->device == 0x130C) ||
- (adev->pdev->device == 0x130F) ||
- (adev->pdev->device == 0x1310) ||
- (adev->pdev->device == 0x1311) ||
- (adev->pdev->device == 0x131C)) {
- adev->gfx.config.max_cu_per_sh = 8;
- adev->gfx.config.max_backends_per_se = 2;
- } else if ((adev->pdev->device == 0x1309) ||
- (adev->pdev->device == 0x130A) ||
- (adev->pdev->device == 0x130D) ||
- (adev->pdev->device == 0x1313) ||
- (adev->pdev->device == 0x131D)) {
- adev->gfx.config.max_cu_per_sh = 6;
- adev->gfx.config.max_backends_per_se = 2;
- } else if ((adev->pdev->device == 0x1306) ||
- (adev->pdev->device == 0x1307) ||
- (adev->pdev->device == 0x130B) ||
- (adev->pdev->device == 0x130E) ||
- (adev->pdev->device == 0x1315) ||
- (adev->pdev->device == 0x131B)) {
- adev->gfx.config.max_cu_per_sh = 4;
- adev->gfx.config.max_backends_per_se = 1;
- } else {
- adev->gfx.config.max_cu_per_sh = 3;
- adev->gfx.config.max_backends_per_se = 1;
- }
- adev->gfx.config.max_sh_per_se = 1;
- adev->gfx.config.max_texture_channel_caches = 4;
- adev->gfx.config.max_gprs = 256;
- adev->gfx.config.max_gs_threads = 16;
- adev->gfx.config.max_hw_contexts = 8;
-
- adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
- adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
- adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
- adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
- gb_addr_config = BONAIRE_GB_ADDR_CONFIG_GOLDEN;
- break;
- case CHIP_KABINI:
- case CHIP_MULLINS:
- default:
- adev->gfx.config.max_shader_engines = 1;
- adev->gfx.config.max_tile_pipes = 2;
- adev->gfx.config.max_cu_per_sh = 2;
- adev->gfx.config.max_sh_per_se = 1;
- adev->gfx.config.max_backends_per_se = 1;
- adev->gfx.config.max_texture_channel_caches = 2;
- adev->gfx.config.max_gprs = 256;
- adev->gfx.config.max_gs_threads = 16;
- adev->gfx.config.max_hw_contexts = 8;
-
- adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
- adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
- adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
- adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
- gb_addr_config = BONAIRE_GB_ADDR_CONFIG_GOLDEN;
- break;
- }
-
WREG32(mmGRBM_CNTL, (0xff << GRBM_CNTL__READ_TIMEOUT__SHIFT));
- mc_shared_chmap = RREG32(mmMC_SHARED_CHMAP);
- adev->gfx.config.mc_arb_ramcfg = RREG32(mmMC_ARB_RAMCFG);
- mc_arb_ramcfg = adev->gfx.config.mc_arb_ramcfg;
-
- adev->gfx.config.num_tile_pipes = adev->gfx.config.max_tile_pipes;
- adev->gfx.config.mem_max_burst_length_bytes = 256;
- if (adev->flags & AMD_IS_APU) {
- /* Get memory bank mapping mode. */
- tmp = RREG32(mmMC_FUS_DRAM0_BANK_ADDR_MAPPING);
- dimm00_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM0_BANK_ADDR_MAPPING, DIMM0ADDRMAP);
- dimm01_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM0_BANK_ADDR_MAPPING, DIMM1ADDRMAP);
-
- tmp = RREG32(mmMC_FUS_DRAM1_BANK_ADDR_MAPPING);
- dimm10_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM1_BANK_ADDR_MAPPING, DIMM0ADDRMAP);
- dimm11_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM1_BANK_ADDR_MAPPING, DIMM1ADDRMAP);
-
- /* Validate settings in case only one DIMM installed. */
- if ((dimm00_addr_map == 0) || (dimm00_addr_map == 3) || (dimm00_addr_map == 4) || (dimm00_addr_map > 12))
- dimm00_addr_map = 0;
- if ((dimm01_addr_map == 0) || (dimm01_addr_map == 3) || (dimm01_addr_map == 4) || (dimm01_addr_map > 12))
- dimm01_addr_map = 0;
- if ((dimm10_addr_map == 0) || (dimm10_addr_map == 3) || (dimm10_addr_map == 4) || (dimm10_addr_map > 12))
- dimm10_addr_map = 0;
- if ((dimm11_addr_map == 0) || (dimm11_addr_map == 3) || (dimm11_addr_map == 4) || (dimm11_addr_map > 12))
- dimm11_addr_map = 0;
-
- /* If DIMM Addr map is 8GB, ROW size should be 2KB. Otherwise 1KB. */
- /* If ROW size(DIMM1) != ROW size(DMIMM0), ROW size should be larger one. */
- if ((dimm00_addr_map == 11) || (dimm01_addr_map == 11) || (dimm10_addr_map == 11) || (dimm11_addr_map == 11))
- adev->gfx.config.mem_row_size_in_kb = 2;
- else
- adev->gfx.config.mem_row_size_in_kb = 1;
- } else {
- tmp = (mc_arb_ramcfg & MC_ARB_RAMCFG__NOOFCOLS_MASK) >> MC_ARB_RAMCFG__NOOFCOLS__SHIFT;
- adev->gfx.config.mem_row_size_in_kb = (4 * (1 << (8 + tmp))) / 1024;
- if (adev->gfx.config.mem_row_size_in_kb > 4)
- adev->gfx.config.mem_row_size_in_kb = 4;
- }
- /* XXX use MC settings? */
- adev->gfx.config.shader_engine_tile_size = 32;
- adev->gfx.config.num_gpus = 1;
- adev->gfx.config.multi_gpu_tile_size = 64;
-
- /* fix up row size */
- gb_addr_config &= ~GB_ADDR_CONFIG__ROW_SIZE_MASK;
- switch (adev->gfx.config.mem_row_size_in_kb) {
- case 1:
- default:
- gb_addr_config |= (0 << GB_ADDR_CONFIG__ROW_SIZE__SHIFT);
- break;
- case 2:
- gb_addr_config |= (1 << GB_ADDR_CONFIG__ROW_SIZE__SHIFT);
- break;
- case 4:
- gb_addr_config |= (2 << GB_ADDR_CONFIG__ROW_SIZE__SHIFT);
- break;
- }
- adev->gfx.config.gb_addr_config = gb_addr_config;
-
- WREG32(mmGB_ADDR_CONFIG, gb_addr_config);
- WREG32(mmHDP_ADDR_CONFIG, gb_addr_config);
- WREG32(mmDMIF_ADDR_CALC, gb_addr_config);
- WREG32(mmSDMA0_TILING_CONFIG + SDMA0_REGISTER_OFFSET, gb_addr_config & 0x70);
- WREG32(mmSDMA0_TILING_CONFIG + SDMA1_REGISTER_OFFSET, gb_addr_config & 0x70);
- WREG32(mmUVD_UDEC_ADDR_CONFIG, gb_addr_config);
- WREG32(mmUVD_UDEC_DB_ADDR_CONFIG, gb_addr_config);
- WREG32(mmUVD_UDEC_DBW_ADDR_CONFIG, gb_addr_config);
+ WREG32(mmGB_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
+ WREG32(mmHDP_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
+ WREG32(mmDMIF_ADDR_CALC, adev->gfx.config.gb_addr_config);
gfx_v7_0_tiling_mode_table_init(adev);
- gfx_v7_0_setup_rb(adev, adev->gfx.config.max_shader_engines,
- adev->gfx.config.max_sh_per_se,
- adev->gfx.config.max_backends_per_se);
+ gfx_v7_0_setup_rb(adev);
/* set HW defaults for 3D engine */
WREG32(mmCP_MEQ_THRESHOLDS,
- (0x30 << CP_MEQ_THRESHOLDS__MEQ1_START__SHIFT) |
- (0x60 << CP_MEQ_THRESHOLDS__MEQ2_START__SHIFT));
+ (0x30 << CP_MEQ_THRESHOLDS__MEQ1_START__SHIFT) |
+ (0x60 << CP_MEQ_THRESHOLDS__MEQ2_START__SHIFT));
mutex_lock(&adev->grbm_idx_mutex);
/*
@@ -2255,7 +1733,7 @@ static void gfx_v7_0_gpu_init(struct amdgpu_device *adev)
/* XXX SH_MEM regs */
/* where to put LDS, scratch, GPUVM in FSA64 space */
- sh_mem_cfg = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE,
+ sh_mem_cfg = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE,
SH_MEM_ALIGNMENT_MODE_UNALIGNED);
mutex_lock(&adev->srbm_mutex);
@@ -2379,7 +1857,7 @@ static int gfx_v7_0_ring_test_ring(struct amdgpu_ring *ring)
return r;
}
WREG32(scratch, 0xCAFEDEAD);
- r = amdgpu_ring_lock(ring, 3);
+ r = amdgpu_ring_alloc(ring, 3);
if (r) {
DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n", ring->idx, r);
amdgpu_gfx_scratch_free(adev, scratch);
@@ -2388,7 +1866,7 @@ static int gfx_v7_0_ring_test_ring(struct amdgpu_ring *ring)
amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
amdgpu_ring_write(ring, (scratch - PACKET3_SET_UCONFIG_REG_START));
amdgpu_ring_write(ring, 0xDEADBEEF);
- amdgpu_ring_unlock_commit(ring);
+ amdgpu_ring_commit(ring);
for (i = 0; i < adev->usec_timeout; i++) {
tmp = RREG32(scratch);
@@ -2447,6 +1925,25 @@ static void gfx_v7_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
}
/**
+ * gfx_v7_0_ring_emit_hdp_invalidate - emit an hdp invalidate on the cp
+ *
+ * @adev: amdgpu_device pointer
+ * @ridx: amdgpu ring index
+ *
+ * Emits an hdp invalidate on the cp.
+ */
+static void gfx_v7_0_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
+{
+ amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
+ amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
+ WRITE_DATA_DST_SEL(0) |
+ WR_CONFIRM));
+ amdgpu_ring_write(ring, mmHDP_DEBUG0);
+ amdgpu_ring_write(ring, 0);
+ amdgpu_ring_write(ring, 1);
+}
+
+/**
* gfx_v7_0_ring_emit_fence_gfx - emit a fence on the gfx ring
*
* @adev: amdgpu_device pointer
@@ -2516,36 +2013,6 @@ static void gfx_v7_0_ring_emit_fence_compute(struct amdgpu_ring *ring,
amdgpu_ring_write(ring, upper_32_bits(seq));
}
-/**
- * gfx_v7_0_ring_emit_semaphore - emit a semaphore on the CP ring
- *
- * @ring: amdgpu ring buffer object
- * @semaphore: amdgpu semaphore object
- * @emit_wait: Is this a sempahore wait?
- *
- * Emits a semaphore signal/wait packet to the CP ring and prevents the PFP
- * from running ahead of semaphore waits.
- */
-static bool gfx_v7_0_ring_emit_semaphore(struct amdgpu_ring *ring,
- struct amdgpu_semaphore *semaphore,
- bool emit_wait)
-{
- uint64_t addr = semaphore->gpu_addr;
- unsigned sel = emit_wait ? PACKET3_SEM_SEL_WAIT : PACKET3_SEM_SEL_SIGNAL;
-
- amdgpu_ring_write(ring, PACKET3(PACKET3_MEM_SEMAPHORE, 1));
- amdgpu_ring_write(ring, addr & 0xffffffff);
- amdgpu_ring_write(ring, (upper_32_bits(addr) & 0xffff) | sel);
-
- if (emit_wait && (ring->type == AMDGPU_RING_TYPE_GFX)) {
- /* Prevent the PFP from running ahead of the semaphore wait */
- amdgpu_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
- amdgpu_ring_write(ring, 0x0);
- }
-
- return true;
-}
-
/*
* IB stuff
*/
@@ -2593,8 +2060,7 @@ static void gfx_v7_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
else
header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
- control |= ib->length_dw |
- (ib->vm ? (ib->vm->ids[ring->idx].id << 24) : 0);
+ control |= ib->length_dw | (ib->vm_id << 24);
amdgpu_ring_write(ring, header);
amdgpu_ring_write(ring,
@@ -2622,8 +2088,7 @@ static void gfx_v7_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
- control |= ib->length_dw |
- (ib->vm ? (ib->vm->ids[ring->idx].id << 24) : 0);
+ control |= ib->length_dw | (ib->vm_id << 24);
amdgpu_ring_write(ring, header);
amdgpu_ring_write(ring,
@@ -2661,7 +2126,7 @@ static int gfx_v7_0_ring_test_ib(struct amdgpu_ring *ring)
}
WREG32(scratch, 0xCAFEDEAD);
memset(&ib, 0, sizeof(ib));
- r = amdgpu_ib_get(ring, NULL, 256, &ib);
+ r = amdgpu_ib_get(adev, NULL, 256, &ib);
if (r) {
DRM_ERROR("amdgpu: failed to get ib (%d).\n", r);
goto err1;
@@ -2671,9 +2136,7 @@ static int gfx_v7_0_ring_test_ib(struct amdgpu_ring *ring)
ib.ptr[2] = 0xDEADBEEF;
ib.length_dw = 3;
- r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, &ib, 1, NULL,
- AMDGPU_FENCE_OWNER_UNDEFINED,
- &f);
+ r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
if (r)
goto err2;
@@ -2700,7 +2163,8 @@ static int gfx_v7_0_ring_test_ib(struct amdgpu_ring *ring)
err2:
fence_put(f);
- amdgpu_ib_free(adev, &ib);
+ amdgpu_ib_free(adev, &ib, NULL);
+ fence_put(f);
err1:
amdgpu_gfx_scratch_free(adev, scratch);
return r;
@@ -2842,7 +2306,7 @@ static int gfx_v7_0_cp_gfx_start(struct amdgpu_device *adev)
gfx_v7_0_cp_gfx_enable(adev, true);
- r = amdgpu_ring_lock(ring, gfx_v7_0_get_csb_size(adev) + 8);
+ r = amdgpu_ring_alloc(ring, gfx_v7_0_get_csb_size(adev) + 8);
if (r) {
DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r);
return r;
@@ -2911,7 +2375,7 @@ static int gfx_v7_0_cp_gfx_start(struct amdgpu_device *adev)
amdgpu_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
amdgpu_ring_write(ring, 0x00000010); /* VGT_OUT_DEALLOC_CNTL */
- amdgpu_ring_unlock_commit(ring);
+ amdgpu_ring_commit(ring);
return 0;
}
@@ -2989,21 +2453,14 @@ static int gfx_v7_0_cp_gfx_resume(struct amdgpu_device *adev)
static u32 gfx_v7_0_ring_get_rptr_gfx(struct amdgpu_ring *ring)
{
- u32 rptr;
-
- rptr = ring->adev->wb.wb[ring->rptr_offs];
-
- return rptr;
+ return ring->adev->wb.wb[ring->rptr_offs];
}
static u32 gfx_v7_0_ring_get_wptr_gfx(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
- u32 wptr;
-
- wptr = RREG32(mmCP_RB0_WPTR);
- return wptr;
+ return RREG32(mmCP_RB0_WPTR);
}
static void gfx_v7_0_ring_set_wptr_gfx(struct amdgpu_ring *ring)
@@ -3016,21 +2473,13 @@ static void gfx_v7_0_ring_set_wptr_gfx(struct amdgpu_ring *ring)
static u32 gfx_v7_0_ring_get_rptr_compute(struct amdgpu_ring *ring)
{
- u32 rptr;
-
- rptr = ring->adev->wb.wb[ring->rptr_offs];
-
- return rptr;
+ return ring->adev->wb.wb[ring->rptr_offs];
}
static u32 gfx_v7_0_ring_get_wptr_compute(struct amdgpu_ring *ring)
{
- u32 wptr;
-
/* XXX check if swapping is necessary on BE */
- wptr = ring->adev->wb.wb[ring->wptr_offs];
-
- return wptr;
+ return ring->adev->wb.wb[ring->wptr_offs];
}
static void gfx_v7_0_ring_set_wptr_compute(struct amdgpu_ring *ring)
@@ -3126,21 +2575,6 @@ static int gfx_v7_0_cp_compute_load_microcode(struct amdgpu_device *adev)
}
/**
- * gfx_v7_0_cp_compute_start - start the compute queues
- *
- * @adev: amdgpu_device pointer
- *
- * Enable the compute queues.
- * Returns 0 for success, error for failure.
- */
-static int gfx_v7_0_cp_compute_start(struct amdgpu_device *adev)
-{
- gfx_v7_0_cp_compute_enable(adev, true);
-
- return 0;
-}
-
-/**
* gfx_v7_0_cp_compute_fini - stop the compute queues
*
* @adev: amdgpu_device pointer
@@ -3330,9 +2764,7 @@ static int gfx_v7_0_cp_compute_resume(struct amdgpu_device *adev)
u32 *buf;
struct bonaire_mqd *mqd;
- r = gfx_v7_0_cp_compute_start(adev);
- if (r)
- return r;
+ gfx_v7_0_cp_compute_enable(adev, true);
/* fix up chicken bits */
tmp = RREG32(mmCP_CPF_DEBUG);
@@ -3610,6 +3042,26 @@ static int gfx_v7_0_cp_resume(struct amdgpu_device *adev)
return 0;
}
+/**
+ * gfx_v7_0_ring_emit_vm_flush - cik vm flush using the CP
+ *
+ * @ring: the ring to emmit the commands to
+ *
+ * Sync the command pipeline with the PFP. E.g. wait for everything
+ * to be completed.
+ */
+static void gfx_v7_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
+{
+ int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX);
+ if (usepfp) {
+ /* synce CE with ME to prevent CE fetch CEIB before context switch done */
+ amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
+ amdgpu_ring_write(ring, 0);
+ amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
+ amdgpu_ring_write(ring, 0);
+ }
+}
+
/*
* vm
* VMID 0 is the physical GPU addresses as used by the kernel.
@@ -3641,14 +3093,6 @@ static void gfx_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
amdgpu_ring_write(ring, 0xffffffff);
amdgpu_ring_write(ring, 4); /* poll interval */
- if (usepfp) {
- /* synce CE with ME to prevent CE fetch CEIB before context switch done */
- amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
- amdgpu_ring_write(ring, 0);
- amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
- amdgpu_ring_write(ring, 0);
- }
-
amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) |
WRITE_DATA_DST_SEL(0)));
@@ -4408,28 +3852,19 @@ static void gfx_v7_0_enable_gfx_cgpg(struct amdgpu_device *adev,
}
}
-static u32 gfx_v7_0_get_cu_active_bitmap(struct amdgpu_device *adev,
- u32 se, u32 sh)
+static u32 gfx_v7_0_get_cu_active_bitmap(struct amdgpu_device *adev)
{
- u32 mask = 0, tmp, tmp1;
- int i;
-
- gfx_v7_0_select_se_sh(adev, se, sh);
- tmp = RREG32(mmCC_GC_SHADER_ARRAY_CONFIG);
- tmp1 = RREG32(mmGC_USER_SHADER_ARRAY_CONFIG);
- gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff);
+ u32 data, mask;
- tmp &= 0xffff0000;
+ data = RREG32(mmCC_GC_SHADER_ARRAY_CONFIG);
+ data |= RREG32(mmGC_USER_SHADER_ARRAY_CONFIG);
- tmp |= tmp1;
- tmp >>= 16;
+ data &= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK;
+ data >>= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT;
- for (i = 0; i < adev->gfx.config.max_cu_per_sh; i ++) {
- mask <<= 1;
- mask |= 1;
- }
+ mask = gfx_v7_0_create_bitmask(adev->gfx.config.max_cu_per_sh);
- return (~tmp) & mask;
+ return (~data) & mask;
}
static void gfx_v7_0_init_ao_cu_mask(struct amdgpu_device *adev)
@@ -4767,6 +4202,172 @@ static int gfx_v7_0_late_init(void *handle)
return 0;
}
+static void gfx_v7_0_gpu_early_init(struct amdgpu_device *adev)
+{
+ u32 gb_addr_config;
+ u32 mc_shared_chmap, mc_arb_ramcfg;
+ u32 dimm00_addr_map, dimm01_addr_map, dimm10_addr_map, dimm11_addr_map;
+ u32 tmp;
+
+ switch (adev->asic_type) {
+ case CHIP_BONAIRE:
+ adev->gfx.config.max_shader_engines = 2;
+ adev->gfx.config.max_tile_pipes = 4;
+ adev->gfx.config.max_cu_per_sh = 7;
+ adev->gfx.config.max_sh_per_se = 1;
+ adev->gfx.config.max_backends_per_se = 2;
+ adev->gfx.config.max_texture_channel_caches = 4;
+ adev->gfx.config.max_gprs = 256;
+ adev->gfx.config.max_gs_threads = 32;
+ adev->gfx.config.max_hw_contexts = 8;
+
+ adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
+ adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
+ adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
+ adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
+ gb_addr_config = BONAIRE_GB_ADDR_CONFIG_GOLDEN;
+ break;
+ case CHIP_HAWAII:
+ adev->gfx.config.max_shader_engines = 4;
+ adev->gfx.config.max_tile_pipes = 16;
+ adev->gfx.config.max_cu_per_sh = 11;
+ adev->gfx.config.max_sh_per_se = 1;
+ adev->gfx.config.max_backends_per_se = 4;
+ adev->gfx.config.max_texture_channel_caches = 16;
+ adev->gfx.config.max_gprs = 256;
+ adev->gfx.config.max_gs_threads = 32;
+ adev->gfx.config.max_hw_contexts = 8;
+
+ adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
+ adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
+ adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
+ adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
+ gb_addr_config = HAWAII_GB_ADDR_CONFIG_GOLDEN;
+ break;
+ case CHIP_KAVERI:
+ adev->gfx.config.max_shader_engines = 1;
+ adev->gfx.config.max_tile_pipes = 4;
+ if ((adev->pdev->device == 0x1304) ||
+ (adev->pdev->device == 0x1305) ||
+ (adev->pdev->device == 0x130C) ||
+ (adev->pdev->device == 0x130F) ||
+ (adev->pdev->device == 0x1310) ||
+ (adev->pdev->device == 0x1311) ||
+ (adev->pdev->device == 0x131C)) {
+ adev->gfx.config.max_cu_per_sh = 8;
+ adev->gfx.config.max_backends_per_se = 2;
+ } else if ((adev->pdev->device == 0x1309) ||
+ (adev->pdev->device == 0x130A) ||
+ (adev->pdev->device == 0x130D) ||
+ (adev->pdev->device == 0x1313) ||
+ (adev->pdev->device == 0x131D)) {
+ adev->gfx.config.max_cu_per_sh = 6;
+ adev->gfx.config.max_backends_per_se = 2;
+ } else if ((adev->pdev->device == 0x1306) ||
+ (adev->pdev->device == 0x1307) ||
+ (adev->pdev->device == 0x130B) ||
+ (adev->pdev->device == 0x130E) ||
+ (adev->pdev->device == 0x1315) ||
+ (adev->pdev->device == 0x131B)) {
+ adev->gfx.config.max_cu_per_sh = 4;
+ adev->gfx.config.max_backends_per_se = 1;
+ } else {
+ adev->gfx.config.max_cu_per_sh = 3;
+ adev->gfx.config.max_backends_per_se = 1;
+ }
+ adev->gfx.config.max_sh_per_se = 1;
+ adev->gfx.config.max_texture_channel_caches = 4;
+ adev->gfx.config.max_gprs = 256;
+ adev->gfx.config.max_gs_threads = 16;
+ adev->gfx.config.max_hw_contexts = 8;
+
+ adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
+ adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
+ adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
+ adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
+ gb_addr_config = BONAIRE_GB_ADDR_CONFIG_GOLDEN;
+ break;
+ case CHIP_KABINI:
+ case CHIP_MULLINS:
+ default:
+ adev->gfx.config.max_shader_engines = 1;
+ adev->gfx.config.max_tile_pipes = 2;
+ adev->gfx.config.max_cu_per_sh = 2;
+ adev->gfx.config.max_sh_per_se = 1;
+ adev->gfx.config.max_backends_per_se = 1;
+ adev->gfx.config.max_texture_channel_caches = 2;
+ adev->gfx.config.max_gprs = 256;
+ adev->gfx.config.max_gs_threads = 16;
+ adev->gfx.config.max_hw_contexts = 8;
+
+ adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
+ adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
+ adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
+ adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
+ gb_addr_config = BONAIRE_GB_ADDR_CONFIG_GOLDEN;
+ break;
+ }
+
+ mc_shared_chmap = RREG32(mmMC_SHARED_CHMAP);
+ adev->gfx.config.mc_arb_ramcfg = RREG32(mmMC_ARB_RAMCFG);
+ mc_arb_ramcfg = adev->gfx.config.mc_arb_ramcfg;
+
+ adev->gfx.config.num_tile_pipes = adev->gfx.config.max_tile_pipes;
+ adev->gfx.config.mem_max_burst_length_bytes = 256;
+ if (adev->flags & AMD_IS_APU) {
+ /* Get memory bank mapping mode. */
+ tmp = RREG32(mmMC_FUS_DRAM0_BANK_ADDR_MAPPING);
+ dimm00_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM0_BANK_ADDR_MAPPING, DIMM0ADDRMAP);
+ dimm01_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM0_BANK_ADDR_MAPPING, DIMM1ADDRMAP);
+
+ tmp = RREG32(mmMC_FUS_DRAM1_BANK_ADDR_MAPPING);
+ dimm10_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM1_BANK_ADDR_MAPPING, DIMM0ADDRMAP);
+ dimm11_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM1_BANK_ADDR_MAPPING, DIMM1ADDRMAP);
+
+ /* Validate settings in case only one DIMM installed. */
+ if ((dimm00_addr_map == 0) || (dimm00_addr_map == 3) || (dimm00_addr_map == 4) || (dimm00_addr_map > 12))
+ dimm00_addr_map = 0;
+ if ((dimm01_addr_map == 0) || (dimm01_addr_map == 3) || (dimm01_addr_map == 4) || (dimm01_addr_map > 12))
+ dimm01_addr_map = 0;
+ if ((dimm10_addr_map == 0) || (dimm10_addr_map == 3) || (dimm10_addr_map == 4) || (dimm10_addr_map > 12))
+ dimm10_addr_map = 0;
+ if ((dimm11_addr_map == 0) || (dimm11_addr_map == 3) || (dimm11_addr_map == 4) || (dimm11_addr_map > 12))
+ dimm11_addr_map = 0;
+
+ /* If DIMM Addr map is 8GB, ROW size should be 2KB. Otherwise 1KB. */
+ /* If ROW size(DIMM1) != ROW size(DMIMM0), ROW size should be larger one. */
+ if ((dimm00_addr_map == 11) || (dimm01_addr_map == 11) || (dimm10_addr_map == 11) || (dimm11_addr_map == 11))
+ adev->gfx.config.mem_row_size_in_kb = 2;
+ else
+ adev->gfx.config.mem_row_size_in_kb = 1;
+ } else {
+ tmp = (mc_arb_ramcfg & MC_ARB_RAMCFG__NOOFCOLS_MASK) >> MC_ARB_RAMCFG__NOOFCOLS__SHIFT;
+ adev->gfx.config.mem_row_size_in_kb = (4 * (1 << (8 + tmp))) / 1024;
+ if (adev->gfx.config.mem_row_size_in_kb > 4)
+ adev->gfx.config.mem_row_size_in_kb = 4;
+ }
+ /* XXX use MC settings? */
+ adev->gfx.config.shader_engine_tile_size = 32;
+ adev->gfx.config.num_gpus = 1;
+ adev->gfx.config.multi_gpu_tile_size = 64;
+
+ /* fix up row size */
+ gb_addr_config &= ~GB_ADDR_CONFIG__ROW_SIZE_MASK;
+ switch (adev->gfx.config.mem_row_size_in_kb) {
+ case 1:
+ default:
+ gb_addr_config |= (0 << GB_ADDR_CONFIG__ROW_SIZE__SHIFT);
+ break;
+ case 2:
+ gb_addr_config |= (1 << GB_ADDR_CONFIG__ROW_SIZE__SHIFT);
+ break;
+ case 4:
+ gb_addr_config |= (2 << GB_ADDR_CONFIG__ROW_SIZE__SHIFT);
+ break;
+ }
+ adev->gfx.config.gb_addr_config = gb_addr_config;
+}
+
static int gfx_v7_0_sw_init(void *handle)
{
struct amdgpu_ring *ring;
@@ -4870,6 +4471,10 @@ static int gfx_v7_0_sw_init(void *handle)
if (r)
return r;
+ adev->gfx.ce_ram_size = 0x8000;
+
+ gfx_v7_0_gpu_early_init(adev);
+
return r;
}
@@ -4910,8 +4515,6 @@ static int gfx_v7_0_hw_init(void *handle)
if (r)
return r;
- adev->gfx.ce_ram_size = 0x8000;
-
return r;
}
@@ -5028,16 +4631,6 @@ static void gfx_v7_0_print_status(void *handle)
RREG32(mmHDP_ADDR_CONFIG));
dev_info(adev->dev, " DMIF_ADDR_CALC=0x%08X\n",
RREG32(mmDMIF_ADDR_CALC));
- dev_info(adev->dev, " SDMA0_TILING_CONFIG=0x%08X\n",
- RREG32(mmSDMA0_TILING_CONFIG + SDMA0_REGISTER_OFFSET));
- dev_info(adev->dev, " SDMA1_TILING_CONFIG=0x%08X\n",
- RREG32(mmSDMA0_TILING_CONFIG + SDMA1_REGISTER_OFFSET));
- dev_info(adev->dev, " UVD_UDEC_ADDR_CONFIG=0x%08X\n",
- RREG32(mmUVD_UDEC_ADDR_CONFIG));
- dev_info(adev->dev, " UVD_UDEC_DB_ADDR_CONFIG=0x%08X\n",
- RREG32(mmUVD_UDEC_DB_ADDR_CONFIG));
- dev_info(adev->dev, " UVD_UDEC_DBW_ADDR_CONFIG=0x%08X\n",
- RREG32(mmUVD_UDEC_DBW_ADDR_CONFIG));
dev_info(adev->dev, " CP_MEQ_THRESHOLDS=0x%08X\n",
RREG32(mmCP_MEQ_THRESHOLDS));
@@ -5580,13 +5173,15 @@ static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_gfx = {
.parse_cs = NULL,
.emit_ib = gfx_v7_0_ring_emit_ib_gfx,
.emit_fence = gfx_v7_0_ring_emit_fence_gfx,
- .emit_semaphore = gfx_v7_0_ring_emit_semaphore,
+ .emit_pipeline_sync = gfx_v7_0_ring_emit_pipeline_sync,
.emit_vm_flush = gfx_v7_0_ring_emit_vm_flush,
.emit_gds_switch = gfx_v7_0_ring_emit_gds_switch,
.emit_hdp_flush = gfx_v7_0_ring_emit_hdp_flush,
+ .emit_hdp_invalidate = gfx_v7_0_ring_emit_hdp_invalidate,
.test_ring = gfx_v7_0_ring_test_ring,
.test_ib = gfx_v7_0_ring_test_ib,
.insert_nop = amdgpu_ring_insert_nop,
+ .pad_ib = amdgpu_ring_generic_pad_ib,
};
static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_compute = {
@@ -5596,13 +5191,15 @@ static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_compute = {
.parse_cs = NULL,
.emit_ib = gfx_v7_0_ring_emit_ib_compute,
.emit_fence = gfx_v7_0_ring_emit_fence_compute,
- .emit_semaphore = gfx_v7_0_ring_emit_semaphore,
+ .emit_pipeline_sync = gfx_v7_0_ring_emit_pipeline_sync,
.emit_vm_flush = gfx_v7_0_ring_emit_vm_flush,
.emit_gds_switch = gfx_v7_0_ring_emit_gds_switch,
.emit_hdp_flush = gfx_v7_0_ring_emit_hdp_flush,
+ .emit_hdp_invalidate = gfx_v7_0_ring_emit_hdp_invalidate,
.test_ring = gfx_v7_0_ring_test_ring,
.test_ib = gfx_v7_0_ring_test_ib,
.insert_nop = amdgpu_ring_insert_nop,
+ .pad_ib = amdgpu_ring_generic_pad_ib,
};
static void gfx_v7_0_set_ring_funcs(struct amdgpu_device *adev)
@@ -5672,7 +5269,7 @@ static void gfx_v7_0_set_gds_init(struct amdgpu_device *adev)
int gfx_v7_0_get_cu_info(struct amdgpu_device *adev,
- struct amdgpu_cu_info *cu_info)
+ struct amdgpu_cu_info *cu_info)
{
int i, j, k, counter, active_cu_number = 0;
u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0;
@@ -5680,16 +5277,19 @@ int gfx_v7_0_get_cu_info(struct amdgpu_device *adev,
if (!adev || !cu_info)
return -EINVAL;
+ memset(cu_info, 0, sizeof(*cu_info));
+
mutex_lock(&adev->grbm_idx_mutex);
for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
mask = 1;
ao_bitmap = 0;
counter = 0;
- bitmap = gfx_v7_0_get_cu_active_bitmap(adev, i, j);
+ gfx_v7_0_select_se_sh(adev, i, j);
+ bitmap = gfx_v7_0_get_cu_active_bitmap(adev);
cu_info->bitmap[i][j] = bitmap;
- for (k = 0; k < adev->gfx.config.max_cu_per_sh; k ++) {
+ for (k = 0; k < 16; k ++) {
if (bitmap & mask) {
if (counter < 2)
ao_bitmap |= mask;
@@ -5701,9 +5301,11 @@ int gfx_v7_0_get_cu_info(struct amdgpu_device *adev,
ao_cu_mask |= (ao_bitmap << (i * 16 + j * 8));
}
}
+ gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff);
+ mutex_unlock(&adev->grbm_idx_mutex);
cu_info->number = active_cu_number;
cu_info->ao_cu_mask = ao_cu_mask;
- mutex_unlock(&adev->grbm_idx_mutex);
+
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index 7086ac17abee..f0c7b3596480 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -43,9 +43,6 @@
#include "gca/gfx_8_0_sh_mask.h"
#include "gca/gfx_8_0_enum.h"
-#include "uvd/uvd_5_0_d.h"
-#include "uvd/uvd_5_0_sh_mask.h"
-
#include "dce/dce_10_0_d.h"
#include "dce/dce_10_0_sh_mask.h"
@@ -652,7 +649,7 @@ static int gfx_v8_0_ring_test_ring(struct amdgpu_ring *ring)
return r;
}
WREG32(scratch, 0xCAFEDEAD);
- r = amdgpu_ring_lock(ring, 3);
+ r = amdgpu_ring_alloc(ring, 3);
if (r) {
DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n",
ring->idx, r);
@@ -662,7 +659,7 @@ static int gfx_v8_0_ring_test_ring(struct amdgpu_ring *ring)
amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
amdgpu_ring_write(ring, (scratch - PACKET3_SET_UCONFIG_REG_START));
amdgpu_ring_write(ring, 0xDEADBEEF);
- amdgpu_ring_unlock_commit(ring);
+ amdgpu_ring_commit(ring);
for (i = 0; i < adev->usec_timeout; i++) {
tmp = RREG32(scratch);
@@ -699,7 +696,7 @@ static int gfx_v8_0_ring_test_ib(struct amdgpu_ring *ring)
}
WREG32(scratch, 0xCAFEDEAD);
memset(&ib, 0, sizeof(ib));
- r = amdgpu_ib_get(ring, NULL, 256, &ib);
+ r = amdgpu_ib_get(adev, NULL, 256, &ib);
if (r) {
DRM_ERROR("amdgpu: failed to get ib (%d).\n", r);
goto err1;
@@ -709,9 +706,7 @@ static int gfx_v8_0_ring_test_ib(struct amdgpu_ring *ring)
ib.ptr[2] = 0xDEADBEEF;
ib.length_dw = 3;
- r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, &ib, 1, NULL,
- AMDGPU_FENCE_OWNER_UNDEFINED,
- &f);
+ r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
if (r)
goto err2;
@@ -737,7 +732,8 @@ static int gfx_v8_0_ring_test_ib(struct amdgpu_ring *ring)
}
err2:
fence_put(f);
- amdgpu_ib_free(adev, &ib);
+ amdgpu_ib_free(adev, &ib, NULL);
+ fence_put(f);
err1:
amdgpu_gfx_scratch_free(adev, scratch);
return r;
@@ -1171,7 +1167,7 @@ static int gfx_v8_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
/* allocate an indirect buffer to put the commands in */
memset(&ib, 0, sizeof(ib));
- r = amdgpu_ib_get(ring, NULL, total_size, &ib);
+ r = amdgpu_ib_get(adev, NULL, total_size, &ib);
if (r) {
DRM_ERROR("amdgpu: failed to get ib (%d).\n", r);
return r;
@@ -1266,9 +1262,7 @@ static int gfx_v8_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
ib.ptr[ib.length_dw++] = EVENT_TYPE(7) | EVENT_INDEX(4);
/* shedule the ib on the ring */
- r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, &ib, 1, NULL,
- AMDGPU_FENCE_OWNER_UNDEFINED,
- &f);
+ r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
if (r) {
DRM_ERROR("amdgpu: ib submit failed (%d).\n", r);
goto fail;
@@ -1296,7 +1290,8 @@ static int gfx_v8_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
fail:
fence_put(f);
- amdgpu_ib_free(adev, &ib);
+ amdgpu_ib_free(adev, &ib, NULL);
+ fence_put(f);
return r;
}
@@ -2574,11 +2569,6 @@ static void gfx_v8_0_tiling_mode_table_init(struct amdgpu_device *adev)
}
}
-static u32 gfx_v8_0_create_bitmask(u32 bit_width)
-{
- return (u32)((1ULL << bit_width) - 1);
-}
-
void gfx_v8_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num)
{
u32 data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES, 1);
@@ -2599,89 +2589,49 @@ void gfx_v8_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num)
WREG32(mmGRBM_GFX_INDEX, data);
}
-static u32 gfx_v8_0_get_rb_disabled(struct amdgpu_device *adev,
- u32 max_rb_num_per_se,
- u32 sh_per_se)
+static u32 gfx_v8_0_create_bitmask(u32 bit_width)
+{
+ return (u32)((1ULL << bit_width) - 1);
+}
+
+static u32 gfx_v8_0_get_rb_active_bitmap(struct amdgpu_device *adev)
{
u32 data, mask;
data = RREG32(mmCC_RB_BACKEND_DISABLE);
- data &= CC_RB_BACKEND_DISABLE__BACKEND_DISABLE_MASK;
-
data |= RREG32(mmGC_USER_RB_BACKEND_DISABLE);
+ data &= CC_RB_BACKEND_DISABLE__BACKEND_DISABLE_MASK;
data >>= GC_USER_RB_BACKEND_DISABLE__BACKEND_DISABLE__SHIFT;
- mask = gfx_v8_0_create_bitmask(max_rb_num_per_se / sh_per_se);
+ mask = gfx_v8_0_create_bitmask(adev->gfx.config.max_backends_per_se /
+ adev->gfx.config.max_sh_per_se);
- return data & mask;
+ return (~data) & mask;
}
-static void gfx_v8_0_setup_rb(struct amdgpu_device *adev,
- u32 se_num, u32 sh_per_se,
- u32 max_rb_num_per_se)
+static void gfx_v8_0_setup_rb(struct amdgpu_device *adev)
{
int i, j;
- u32 data, mask;
- u32 disabled_rbs = 0;
- u32 enabled_rbs = 0;
+ u32 data;
+ u32 active_rbs = 0;
+ u32 rb_bitmap_width_per_sh = adev->gfx.config.max_backends_per_se /
+ adev->gfx.config.max_sh_per_se;
mutex_lock(&adev->grbm_idx_mutex);
- for (i = 0; i < se_num; i++) {
- for (j = 0; j < sh_per_se; j++) {
+ for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
+ for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
gfx_v8_0_select_se_sh(adev, i, j);
- data = gfx_v8_0_get_rb_disabled(adev,
- max_rb_num_per_se, sh_per_se);
- disabled_rbs |= data << ((i * sh_per_se + j) *
- RB_BITMAP_WIDTH_PER_SH);
+ data = gfx_v8_0_get_rb_active_bitmap(adev);
+ active_rbs |= data << ((i * adev->gfx.config.max_sh_per_se + j) *
+ rb_bitmap_width_per_sh);
}
}
gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff);
mutex_unlock(&adev->grbm_idx_mutex);
- mask = 1;
- for (i = 0; i < max_rb_num_per_se * se_num; i++) {
- if (!(disabled_rbs & mask))
- enabled_rbs |= mask;
- mask <<= 1;
- }
-
- adev->gfx.config.backend_enable_mask = enabled_rbs;
-
- mutex_lock(&adev->grbm_idx_mutex);
- for (i = 0; i < se_num; i++) {
- gfx_v8_0_select_se_sh(adev, i, 0xffffffff);
- data = RREG32(mmPA_SC_RASTER_CONFIG);
- for (j = 0; j < sh_per_se; j++) {
- switch (enabled_rbs & 3) {
- case 0:
- if (j == 0)
- data |= (RASTER_CONFIG_RB_MAP_3 <<
- PA_SC_RASTER_CONFIG__PKR_MAP__SHIFT);
- else
- data |= (RASTER_CONFIG_RB_MAP_0 <<
- PA_SC_RASTER_CONFIG__PKR_MAP__SHIFT);
- break;
- case 1:
- data |= (RASTER_CONFIG_RB_MAP_0 <<
- (i * sh_per_se + j) * 2);
- break;
- case 2:
- data |= (RASTER_CONFIG_RB_MAP_3 <<
- (i * sh_per_se + j) * 2);
- break;
- case 3:
- default:
- data |= (RASTER_CONFIG_RB_MAP_2 <<
- (i * sh_per_se + j) * 2);
- break;
- }
- enabled_rbs >>= 2;
- }
- WREG32(mmPA_SC_RASTER_CONFIG, data);
- }
- gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff);
- mutex_unlock(&adev->grbm_idx_mutex);
+ adev->gfx.config.backend_enable_mask = active_rbs;
+ adev->gfx.config.num_rbs = hweight32(active_rbs);
}
/**
@@ -2741,19 +2691,10 @@ static void gfx_v8_0_gpu_init(struct amdgpu_device *adev)
WREG32(mmGB_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
WREG32(mmHDP_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
WREG32(mmDMIF_ADDR_CALC, adev->gfx.config.gb_addr_config);
- WREG32(mmSDMA0_TILING_CONFIG + SDMA0_REGISTER_OFFSET,
- adev->gfx.config.gb_addr_config & 0x70);
- WREG32(mmSDMA0_TILING_CONFIG + SDMA1_REGISTER_OFFSET,
- adev->gfx.config.gb_addr_config & 0x70);
- WREG32(mmUVD_UDEC_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
- WREG32(mmUVD_UDEC_DB_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
- WREG32(mmUVD_UDEC_DBW_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
gfx_v8_0_tiling_mode_table_init(adev);
- gfx_v8_0_setup_rb(adev, adev->gfx.config.max_shader_engines,
- adev->gfx.config.max_sh_per_se,
- adev->gfx.config.max_backends_per_se);
+ gfx_v8_0_setup_rb(adev);
/* XXX SH_MEM regs */
/* where to put LDS, scratch, GPUVM in FSA64 space */
@@ -3062,7 +3003,7 @@ static int gfx_v8_0_cp_gfx_start(struct amdgpu_device *adev)
gfx_v8_0_cp_gfx_enable(adev, true);
- r = amdgpu_ring_lock(ring, gfx_v8_0_get_csb_size(adev) + 4);
+ r = amdgpu_ring_alloc(ring, gfx_v8_0_get_csb_size(adev) + 4);
if (r) {
DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r);
return r;
@@ -3126,7 +3067,7 @@ static int gfx_v8_0_cp_gfx_start(struct amdgpu_device *adev)
amdgpu_ring_write(ring, 0x8000);
amdgpu_ring_write(ring, 0x8000);
- amdgpu_ring_unlock_commit(ring);
+ amdgpu_ring_commit(ring);
return 0;
}
@@ -3226,13 +3167,6 @@ static void gfx_v8_0_cp_compute_enable(struct amdgpu_device *adev, bool enable)
udelay(50);
}
-static int gfx_v8_0_cp_compute_start(struct amdgpu_device *adev)
-{
- gfx_v8_0_cp_compute_enable(adev, true);
-
- return 0;
-}
-
static int gfx_v8_0_cp_compute_load_microcode(struct amdgpu_device *adev)
{
const struct gfx_firmware_header_v1_0 *mec_hdr;
@@ -3802,9 +3736,7 @@ static int gfx_v8_0_cp_compute_resume(struct amdgpu_device *adev)
WREG32(mmCP_PQ_STATUS, tmp);
}
- r = gfx_v8_0_cp_compute_start(adev);
- if (r)
- return r;
+ gfx_v8_0_cp_compute_enable(adev, true);
for (i = 0; i < adev->gfx.num_compute_rings; i++) {
struct amdgpu_ring *ring = &adev->gfx.compute_ring[i];
@@ -4016,16 +3948,6 @@ static void gfx_v8_0_print_status(void *handle)
RREG32(mmHDP_ADDR_CONFIG));
dev_info(adev->dev, " DMIF_ADDR_CALC=0x%08X\n",
RREG32(mmDMIF_ADDR_CALC));
- dev_info(adev->dev, " SDMA0_TILING_CONFIG=0x%08X\n",
- RREG32(mmSDMA0_TILING_CONFIG + SDMA0_REGISTER_OFFSET));
- dev_info(adev->dev, " SDMA1_TILING_CONFIG=0x%08X\n",
- RREG32(mmSDMA0_TILING_CONFIG + SDMA1_REGISTER_OFFSET));
- dev_info(adev->dev, " UVD_UDEC_ADDR_CONFIG=0x%08X\n",
- RREG32(mmUVD_UDEC_ADDR_CONFIG));
- dev_info(adev->dev, " UVD_UDEC_DB_ADDR_CONFIG=0x%08X\n",
- RREG32(mmUVD_UDEC_DB_ADDR_CONFIG));
- dev_info(adev->dev, " UVD_UDEC_DBW_ADDR_CONFIG=0x%08X\n",
- RREG32(mmUVD_UDEC_DBW_ADDR_CONFIG));
dev_info(adev->dev, " CP_MEQ_THRESHOLDS=0x%08X\n",
RREG32(mmCP_MEQ_THRESHOLDS));
@@ -4667,6 +4589,18 @@ static void gfx_v8_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
amdgpu_ring_write(ring, 0x20); /* poll interval */
}
+static void gfx_v8_0_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
+{
+ amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
+ amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
+ WRITE_DATA_DST_SEL(0) |
+ WR_CONFIRM));
+ amdgpu_ring_write(ring, mmHDP_DEBUG0);
+ amdgpu_ring_write(ring, 0);
+ amdgpu_ring_write(ring, 1);
+
+}
+
static void gfx_v8_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
struct amdgpu_ib *ib)
{
@@ -4699,8 +4633,7 @@ static void gfx_v8_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
else
header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
- control |= ib->length_dw |
- (ib->vm ? (ib->vm->ids[ring->idx].id << 24) : 0);
+ control |= ib->length_dw | (ib->vm_id << 24);
amdgpu_ring_write(ring, header);
amdgpu_ring_write(ring,
@@ -4729,8 +4662,7 @@ static void gfx_v8_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
- control |= ib->length_dw |
- (ib->vm ? (ib->vm->ids[ring->idx].id << 24) : 0);
+ control |= ib->length_dw | (ib->vm_id << 24);
amdgpu_ring_write(ring, header);
amdgpu_ring_write(ring,
@@ -4762,49 +4694,10 @@ static void gfx_v8_0_ring_emit_fence_gfx(struct amdgpu_ring *ring, u64 addr,
}
-/**
- * gfx_v8_0_ring_emit_semaphore - emit a semaphore on the CP ring
- *
- * @ring: amdgpu ring buffer object
- * @semaphore: amdgpu semaphore object
- * @emit_wait: Is this a sempahore wait?
- *
- * Emits a semaphore signal/wait packet to the CP ring and prevents the PFP
- * from running ahead of semaphore waits.
- */
-static bool gfx_v8_0_ring_emit_semaphore(struct amdgpu_ring *ring,
- struct amdgpu_semaphore *semaphore,
- bool emit_wait)
-{
- uint64_t addr = semaphore->gpu_addr;
- unsigned sel = emit_wait ? PACKET3_SEM_SEL_WAIT : PACKET3_SEM_SEL_SIGNAL;
-
- if (ring->adev->asic_type == CHIP_TOPAZ ||
- ring->adev->asic_type == CHIP_TONGA ||
- ring->adev->asic_type == CHIP_FIJI)
- /* we got a hw semaphore bug in VI TONGA, return false to switch back to sw fence wait */
- return false;
- else {
- amdgpu_ring_write(ring, PACKET3(PACKET3_MEM_SEMAPHORE, 2));
- amdgpu_ring_write(ring, lower_32_bits(addr));
- amdgpu_ring_write(ring, upper_32_bits(addr));
- amdgpu_ring_write(ring, sel);
- }
-
- if (emit_wait && (ring->type == AMDGPU_RING_TYPE_GFX)) {
- /* Prevent the PFP from running ahead of the semaphore wait */
- amdgpu_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
- amdgpu_ring_write(ring, 0x0);
- }
-
- return true;
-}
-
-static void gfx_v8_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
- unsigned vm_id, uint64_t pd_addr)
+static void gfx_v8_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
{
int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX);
- uint32_t seq = ring->fence_drv.sync_seq[ring->idx];
+ uint32_t seq = ring->fence_drv.sync_seq;
uint64_t addr = ring->fence_drv.gpu_addr;
amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
@@ -4824,6 +4717,12 @@ static void gfx_v8_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
amdgpu_ring_write(ring, 0);
}
+}
+
+static void gfx_v8_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
+ unsigned vm_id, uint64_t pd_addr)
+{
+ int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX);
amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) |
@@ -5146,13 +5045,15 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_gfx = {
.parse_cs = NULL,
.emit_ib = gfx_v8_0_ring_emit_ib_gfx,
.emit_fence = gfx_v8_0_ring_emit_fence_gfx,
- .emit_semaphore = gfx_v8_0_ring_emit_semaphore,
+ .emit_pipeline_sync = gfx_v8_0_ring_emit_pipeline_sync,
.emit_vm_flush = gfx_v8_0_ring_emit_vm_flush,
.emit_gds_switch = gfx_v8_0_ring_emit_gds_switch,
.emit_hdp_flush = gfx_v8_0_ring_emit_hdp_flush,
+ .emit_hdp_invalidate = gfx_v8_0_ring_emit_hdp_invalidate,
.test_ring = gfx_v8_0_ring_test_ring,
.test_ib = gfx_v8_0_ring_test_ib,
.insert_nop = amdgpu_ring_insert_nop,
+ .pad_ib = amdgpu_ring_generic_pad_ib,
};
static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = {
@@ -5162,13 +5063,15 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = {
.parse_cs = NULL,
.emit_ib = gfx_v8_0_ring_emit_ib_compute,
.emit_fence = gfx_v8_0_ring_emit_fence_compute,
- .emit_semaphore = gfx_v8_0_ring_emit_semaphore,
+ .emit_pipeline_sync = gfx_v8_0_ring_emit_pipeline_sync,
.emit_vm_flush = gfx_v8_0_ring_emit_vm_flush,
.emit_gds_switch = gfx_v8_0_ring_emit_gds_switch,
.emit_hdp_flush = gfx_v8_0_ring_emit_hdp_flush,
+ .emit_hdp_invalidate = gfx_v8_0_ring_emit_hdp_invalidate,
.test_ring = gfx_v8_0_ring_test_ring,
.test_ib = gfx_v8_0_ring_test_ib,
.insert_nop = amdgpu_ring_insert_nop,
+ .pad_ib = amdgpu_ring_generic_pad_ib,
};
static void gfx_v8_0_set_ring_funcs(struct amdgpu_device *adev)
@@ -5237,32 +5140,23 @@ static void gfx_v8_0_set_gds_init(struct amdgpu_device *adev)
}
}
-static u32 gfx_v8_0_get_cu_active_bitmap(struct amdgpu_device *adev,
- u32 se, u32 sh)
+static u32 gfx_v8_0_get_cu_active_bitmap(struct amdgpu_device *adev)
{
- u32 mask = 0, tmp, tmp1;
- int i;
-
- gfx_v8_0_select_se_sh(adev, se, sh);
- tmp = RREG32(mmCC_GC_SHADER_ARRAY_CONFIG);
- tmp1 = RREG32(mmGC_USER_SHADER_ARRAY_CONFIG);
- gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff);
+ u32 data, mask;
- tmp &= 0xffff0000;
+ data = RREG32(mmCC_GC_SHADER_ARRAY_CONFIG);
+ data |= RREG32(mmGC_USER_SHADER_ARRAY_CONFIG);
- tmp |= tmp1;
- tmp >>= 16;
+ data &= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK;
+ data >>= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT;
- for (i = 0; i < adev->gfx.config.max_cu_per_sh; i ++) {
- mask <<= 1;
- mask |= 1;
- }
+ mask = gfx_v8_0_create_bitmask(adev->gfx.config.max_cu_per_sh);
- return (~tmp) & mask;
+ return (~data) & mask;
}
int gfx_v8_0_get_cu_info(struct amdgpu_device *adev,
- struct amdgpu_cu_info *cu_info)
+ struct amdgpu_cu_info *cu_info)
{
int i, j, k, counter, active_cu_number = 0;
u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0;
@@ -5270,16 +5164,19 @@ int gfx_v8_0_get_cu_info(struct amdgpu_device *adev,
if (!adev || !cu_info)
return -EINVAL;
+ memset(cu_info, 0, sizeof(*cu_info));
+
mutex_lock(&adev->grbm_idx_mutex);
for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
mask = 1;
ao_bitmap = 0;
counter = 0;
- bitmap = gfx_v8_0_get_cu_active_bitmap(adev, i, j);
+ gfx_v8_0_select_se_sh(adev, i, j);
+ bitmap = gfx_v8_0_get_cu_active_bitmap(adev);
cu_info->bitmap[i][j] = bitmap;
- for (k = 0; k < adev->gfx.config.max_cu_per_sh; k ++) {
+ for (k = 0; k < 16; k ++) {
if (bitmap & mask) {
if (counter < 2)
ao_bitmap |= mask;
@@ -5291,9 +5188,11 @@ int gfx_v8_0_get_cu_info(struct amdgpu_device *adev,
ao_cu_mask |= (ao_bitmap << (i * 16 + j * 8));
}
}
+ gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff);
+ mutex_unlock(&adev->grbm_idx_mutex);
cu_info->number = active_cu_number;
cu_info->ao_cu_mask = ao_cu_mask;
- mutex_unlock(&adev->grbm_idx_mutex);
+
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
index b8060795b27b..a4a2e6cc61bb 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
@@ -339,7 +339,7 @@ static void gmc_v7_0_mc_program(struct amdgpu_device *adev)
WREG32(mmBIF_FB_EN, BIF_FB_EN__FB_READ_EN_MASK | BIF_FB_EN__FB_WRITE_EN_MASK);
tmp = RREG32(mmHDP_MISC_CNTL);
- tmp = REG_SET_FIELD(tmp, HDP_MISC_CNTL, FLUSH_INVALIDATE_CACHE, 1);
+ tmp = REG_SET_FIELD(tmp, HDP_MISC_CNTL, FLUSH_INVALIDATE_CACHE, 0);
WREG32(mmHDP_MISC_CNTL, tmp);
tmp = RREG32(mmHDP_HOST_PATH_CNTL);
@@ -694,7 +694,8 @@ static int gmc_v7_0_vm_init(struct amdgpu_device *adev)
* amdgpu graphics/compute will use VMIDs 1-7
* amdkfd will use VMIDs 8-15
*/
- adev->vm_manager.nvm = AMDGPU_NUM_OF_VMIDS;
+ adev->vm_manager.num_ids = AMDGPU_NUM_OF_VMIDS;
+ amdgpu_vm_manager_init(adev);
/* base offset of vram pages */
if (adev->flags & AMD_IS_APU) {
@@ -902,14 +903,6 @@ static int gmc_v7_0_early_init(void *handle)
gmc_v7_0_set_gart_funcs(adev);
gmc_v7_0_set_irq_funcs(adev);
- if (adev->flags & AMD_IS_APU) {
- adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
- } else {
- u32 tmp = RREG32(mmMC_SEQ_MISC0);
- tmp &= MC_SEQ_MISC0__MT__MASK;
- adev->mc.vram_type = gmc_v7_0_convert_vram_type(tmp);
- }
-
return 0;
}
@@ -917,7 +910,10 @@ static int gmc_v7_0_late_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0);
+ if (amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS)
+ return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0);
+ else
+ return 0;
}
static int gmc_v7_0_sw_init(void *handle)
@@ -926,9 +922,13 @@ static int gmc_v7_0_sw_init(void *handle)
int dma_bits;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- r = amdgpu_gem_init(adev);
- if (r)
- return r;
+ if (adev->flags & AMD_IS_APU) {
+ adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
+ } else {
+ u32 tmp = RREG32(mmMC_SEQ_MISC0);
+ tmp &= MC_SEQ_MISC0__MT__MASK;
+ adev->mc.vram_type = gmc_v7_0_convert_vram_type(tmp);
+ }
r = amdgpu_irq_add_id(adev, 146, &adev->mc.vm_fault);
if (r)
@@ -1010,7 +1010,7 @@ static int gmc_v7_0_sw_fini(void *handle)
adev->vm_manager.enabled = false;
}
gmc_v7_0_gart_fini(adev);
- amdgpu_gem_fini(adev);
+ amdgpu_gem_force_release(adev);
amdgpu_bo_fini(adev);
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index 3efd45546241..7a9db2c72c89 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -252,6 +252,12 @@ static int gmc_v8_0_mc_load_microcode(struct amdgpu_device *adev)
if (!adev->mc.fw)
return -EINVAL;
+ /* Skip MC ucode loading on SR-IOV capable boards.
+ * vbios does this for us in asic_init in that case.
+ */
+ if (adev->virtualization.supports_sr_iov)
+ return 0;
+
hdr = (const struct mc_firmware_header_v1_0 *)adev->mc.fw->data;
amdgpu_ucode_print_mc_hdr(&hdr->header);
@@ -380,7 +386,7 @@ static void gmc_v8_0_mc_program(struct amdgpu_device *adev)
WREG32(mmBIF_FB_EN, BIF_FB_EN__FB_READ_EN_MASK | BIF_FB_EN__FB_WRITE_EN_MASK);
tmp = RREG32(mmHDP_MISC_CNTL);
- tmp = REG_SET_FIELD(tmp, HDP_MISC_CNTL, FLUSH_INVALIDATE_CACHE, 1);
+ tmp = REG_SET_FIELD(tmp, HDP_MISC_CNTL, FLUSH_INVALIDATE_CACHE, 0);
WREG32(mmHDP_MISC_CNTL, tmp);
tmp = RREG32(mmHDP_HOST_PATH_CNTL);
@@ -774,7 +780,8 @@ static int gmc_v8_0_vm_init(struct amdgpu_device *adev)
* amdgpu graphics/compute will use VMIDs 1-7
* amdkfd will use VMIDs 8-15
*/
- adev->vm_manager.nvm = AMDGPU_NUM_OF_VMIDS;
+ adev->vm_manager.num_ids = AMDGPU_NUM_OF_VMIDS;
+ amdgpu_vm_manager_init(adev);
/* base offset of vram pages */
if (adev->flags & AMD_IS_APU) {
@@ -856,14 +863,6 @@ static int gmc_v8_0_early_init(void *handle)
gmc_v8_0_set_gart_funcs(adev);
gmc_v8_0_set_irq_funcs(adev);
- if (adev->flags & AMD_IS_APU) {
- adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
- } else {
- u32 tmp = RREG32(mmMC_SEQ_MISC0);
- tmp &= MC_SEQ_MISC0__MT__MASK;
- adev->mc.vram_type = gmc_v8_0_convert_vram_type(tmp);
- }
-
return 0;
}
@@ -871,18 +870,32 @@ static int gmc_v8_0_late_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0);
+ if (amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS)
+ return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0);
+ else
+ return 0;
}
+#define mmMC_SEQ_MISC0_FIJI 0xA71
+
static int gmc_v8_0_sw_init(void *handle)
{
int r;
int dma_bits;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- r = amdgpu_gem_init(adev);
- if (r)
- return r;
+ if (adev->flags & AMD_IS_APU) {
+ adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
+ } else {
+ u32 tmp;
+
+ if (adev->asic_type == CHIP_FIJI)
+ tmp = RREG32(mmMC_SEQ_MISC0_FIJI);
+ else
+ tmp = RREG32(mmMC_SEQ_MISC0);
+ tmp &= MC_SEQ_MISC0__MT__MASK;
+ adev->mc.vram_type = gmc_v8_0_convert_vram_type(tmp);
+ }
r = amdgpu_irq_add_id(adev, 146, &adev->mc.vm_fault);
if (r)
@@ -964,7 +977,7 @@ static int gmc_v8_0_sw_fini(void *handle)
adev->vm_manager.enabled = false;
}
gmc_v8_0_gart_fini(adev);
- amdgpu_gem_fini(adev);
+ amdgpu_gem_force_release(adev);
amdgpu_bo_fini(adev);
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_smc.c b/drivers/gpu/drm/amd/amdgpu/iceland_smc.c
index 090486c18249..52ee08193295 100644
--- a/drivers/gpu/drm/amd/amdgpu/iceland_smc.c
+++ b/drivers/gpu/drm/amd/amdgpu/iceland_smc.c
@@ -279,6 +279,12 @@ static int iceland_smu_upload_firmware_image(struct amdgpu_device *adev)
if (!adev->pm.fw)
return -EINVAL;
+ /* Skip SMC ucode loading on SR-IOV capable boards.
+ * vbios does this for us in asic_init in that case.
+ */
+ if (adev->virtualization.supports_sr_iov)
+ return 0;
+
hdr = (const struct smc_firmware_header_v1_0 *)adev->pm.fw->data;
amdgpu_ucode_print_smc_hdr(&hdr->header);
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
index 2cf50180cc51..6e0a86a563f3 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
@@ -32,8 +32,8 @@
#include "oss/oss_2_4_d.h"
#include "oss/oss_2_4_sh_mask.h"
-#include "gmc/gmc_8_1_d.h"
-#include "gmc/gmc_8_1_sh_mask.h"
+#include "gmc/gmc_7_1_d.h"
+#include "gmc/gmc_7_1_sh_mask.h"
#include "gca/gfx_8_0_d.h"
#include "gca/gfx_8_0_enum.h"
@@ -244,7 +244,7 @@ static void sdma_v2_4_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
static void sdma_v2_4_ring_emit_ib(struct amdgpu_ring *ring,
struct amdgpu_ib *ib)
{
- u32 vmid = (ib->vm ? ib->vm->ids[ring->idx].id : 0) & 0xf;
+ u32 vmid = ib->vm_id & 0xf;
u32 next_rptr = ring->wptr + 5;
while ((next_rptr & 7) != 2)
@@ -300,6 +300,13 @@ static void sdma_v2_4_ring_emit_hdp_flush(struct amdgpu_ring *ring)
SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */
}
+static void sdma_v2_4_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
+{
+ amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
+ SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
+ amdgpu_ring_write(ring, mmHDP_DEBUG0);
+ amdgpu_ring_write(ring, 1);
+}
/**
* sdma_v2_4_ring_emit_fence - emit a fence on the DMA ring
*
@@ -335,31 +342,6 @@ static void sdma_v2_4_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 se
}
/**
- * sdma_v2_4_ring_emit_semaphore - emit a semaphore on the dma ring
- *
- * @ring: amdgpu_ring structure holding ring information
- * @semaphore: amdgpu semaphore object
- * @emit_wait: wait or signal semaphore
- *
- * Add a DMA semaphore packet to the ring wait on or signal
- * other rings (VI).
- */
-static bool sdma_v2_4_ring_emit_semaphore(struct amdgpu_ring *ring,
- struct amdgpu_semaphore *semaphore,
- bool emit_wait)
-{
- u64 addr = semaphore->gpu_addr;
- u32 sig = emit_wait ? 0 : 1;
-
- amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SEM) |
- SDMA_PKT_SEMAPHORE_HEADER_SIGNAL(sig));
- amdgpu_ring_write(ring, lower_32_bits(addr) & 0xfffffff8);
- amdgpu_ring_write(ring, upper_32_bits(addr));
-
- return true;
-}
-
-/**
* sdma_v2_4_gfx_stop - stop the gfx async dma engines
*
* @adev: amdgpu_device pointer
@@ -459,6 +441,9 @@ static int sdma_v2_4_gfx_resume(struct amdgpu_device *adev)
vi_srbm_select(adev, 0, 0, 0, 0);
mutex_unlock(&adev->srbm_mutex);
+ WREG32(mmSDMA0_TILING_CONFIG + sdma_offsets[i],
+ adev->gfx.config.gb_addr_config & 0x70);
+
WREG32(mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL + sdma_offsets[i], 0);
/* Set ring buffer size in dwords */
@@ -636,7 +621,7 @@ static int sdma_v2_4_ring_test_ring(struct amdgpu_ring *ring)
tmp = 0xCAFEDEAD;
adev->wb.wb[index] = cpu_to_le32(tmp);
- r = amdgpu_ring_lock(ring, 5);
+ r = amdgpu_ring_alloc(ring, 5);
if (r) {
DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r);
amdgpu_wb_free(adev, index);
@@ -649,7 +634,7 @@ static int sdma_v2_4_ring_test_ring(struct amdgpu_ring *ring)
amdgpu_ring_write(ring, upper_32_bits(gpu_addr));
amdgpu_ring_write(ring, SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(1));
amdgpu_ring_write(ring, 0xDEADBEEF);
- amdgpu_ring_unlock_commit(ring);
+ amdgpu_ring_commit(ring);
for (i = 0; i < adev->usec_timeout; i++) {
tmp = le32_to_cpu(adev->wb.wb[index]);
@@ -699,7 +684,7 @@ static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring)
tmp = 0xCAFEDEAD;
adev->wb.wb[index] = cpu_to_le32(tmp);
memset(&ib, 0, sizeof(ib));
- r = amdgpu_ib_get(ring, NULL, 256, &ib);
+ r = amdgpu_ib_get(adev, NULL, 256, &ib);
if (r) {
DRM_ERROR("amdgpu: failed to get ib (%d).\n", r);
goto err0;
@@ -716,9 +701,7 @@ static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring)
ib.ptr[7] = SDMA_PKT_HEADER_OP(SDMA_OP_NOP);
ib.length_dw = 8;
- r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, &ib, 1, NULL,
- AMDGPU_FENCE_OWNER_UNDEFINED,
- &f);
+ r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
if (r)
goto err1;
@@ -744,7 +727,8 @@ static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring)
err1:
fence_put(f);
- amdgpu_ib_free(adev, &ib);
+ amdgpu_ib_free(adev, &ib, NULL);
+ fence_put(f);
err0:
amdgpu_wb_free(adev, index);
return r;
@@ -797,7 +781,7 @@ static void sdma_v2_4_vm_copy_pte(struct amdgpu_ib *ib,
* Update PTEs by writing them manually using sDMA (CIK).
*/
static void sdma_v2_4_vm_write_pte(struct amdgpu_ib *ib,
- uint64_t pe,
+ const dma_addr_t *pages_addr, uint64_t pe,
uint64_t addr, unsigned count,
uint32_t incr, uint32_t flags)
{
@@ -816,14 +800,7 @@ static void sdma_v2_4_vm_write_pte(struct amdgpu_ib *ib,
ib->ptr[ib->length_dw++] = upper_32_bits(pe);
ib->ptr[ib->length_dw++] = ndw;
for (; ndw > 0; ndw -= 2, --count, pe += 8) {
- if (flags & AMDGPU_PTE_SYSTEM) {
- value = amdgpu_vm_map_gart(ib->ring->adev, addr);
- value &= 0xFFFFFFFFFFFFF000ULL;
- } else if (flags & AMDGPU_PTE_VALID) {
- value = addr;
- } else {
- value = 0;
- }
+ value = amdgpu_vm_map_gart(pages_addr, addr);
addr += incr;
value |= flags;
ib->ptr[ib->length_dw++] = value;
@@ -881,14 +858,14 @@ static void sdma_v2_4_vm_set_pte_pde(struct amdgpu_ib *ib,
}
/**
- * sdma_v2_4_vm_pad_ib - pad the IB to the required number of dw
+ * sdma_v2_4_ring_pad_ib - pad the IB to the required number of dw
*
* @ib: indirect buffer to fill with padding
*
*/
-static void sdma_v2_4_vm_pad_ib(struct amdgpu_ib *ib)
+static void sdma_v2_4_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
{
- struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ib->ring);
+ struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ring);
u32 pad_count;
int i;
@@ -904,6 +881,31 @@ static void sdma_v2_4_vm_pad_ib(struct amdgpu_ib *ib)
}
/**
+ * sdma_v2_4_ring_emit_pipeline_sync - sync the pipeline
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Make sure all previous operations are completed (CIK).
+ */
+static void sdma_v2_4_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
+{
+ uint32_t seq = ring->fence_drv.sync_seq;
+ uint64_t addr = ring->fence_drv.gpu_addr;
+
+ /* wait for idle */
+ amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
+ SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(0) |
+ SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3) | /* equal */
+ SDMA_PKT_POLL_REGMEM_HEADER_MEM_POLL(1));
+ amdgpu_ring_write(ring, addr & 0xfffffffc);
+ amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
+ amdgpu_ring_write(ring, seq); /* reference */
+ amdgpu_ring_write(ring, 0xfffffff); /* mask */
+ amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
+ SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(4)); /* retry count, poll interval */
+}
+
+/**
* sdma_v2_4_ring_emit_vm_flush - cik vm flush using sDMA
*
* @ring: amdgpu_ring pointer
@@ -1111,6 +1113,8 @@ static void sdma_v2_4_print_status(void *handle)
i, RREG32(mmSDMA0_GFX_RB_BASE + sdma_offsets[i]));
dev_info(adev->dev, " SDMA%d_GFX_RB_BASE_HI=0x%08X\n",
i, RREG32(mmSDMA0_GFX_RB_BASE_HI + sdma_offsets[i]));
+ dev_info(adev->dev, " SDMA%d_TILING_CONFIG=0x%08X\n",
+ i, RREG32(mmSDMA0_TILING_CONFIG + sdma_offsets[i]));
mutex_lock(&adev->srbm_mutex);
for (j = 0; j < 16; j++) {
vi_srbm_select(adev, 0, 0, 0, j);
@@ -1302,12 +1306,14 @@ static const struct amdgpu_ring_funcs sdma_v2_4_ring_funcs = {
.parse_cs = NULL,
.emit_ib = sdma_v2_4_ring_emit_ib,
.emit_fence = sdma_v2_4_ring_emit_fence,
- .emit_semaphore = sdma_v2_4_ring_emit_semaphore,
+ .emit_pipeline_sync = sdma_v2_4_ring_emit_pipeline_sync,
.emit_vm_flush = sdma_v2_4_ring_emit_vm_flush,
.emit_hdp_flush = sdma_v2_4_ring_emit_hdp_flush,
+ .emit_hdp_invalidate = sdma_v2_4_ring_emit_hdp_invalidate,
.test_ring = sdma_v2_4_ring_test_ring,
.test_ib = sdma_v2_4_ring_test_ib,
.insert_nop = sdma_v2_4_ring_insert_nop,
+ .pad_ib = sdma_v2_4_ring_pad_ib,
};
static void sdma_v2_4_set_ring_funcs(struct amdgpu_device *adev)
@@ -1405,14 +1411,18 @@ static const struct amdgpu_vm_pte_funcs sdma_v2_4_vm_pte_funcs = {
.copy_pte = sdma_v2_4_vm_copy_pte,
.write_pte = sdma_v2_4_vm_write_pte,
.set_pte_pde = sdma_v2_4_vm_set_pte_pde,
- .pad_ib = sdma_v2_4_vm_pad_ib,
};
static void sdma_v2_4_set_vm_pte_funcs(struct amdgpu_device *adev)
{
+ unsigned i;
+
if (adev->vm_manager.vm_pte_funcs == NULL) {
adev->vm_manager.vm_pte_funcs = &sdma_v2_4_vm_pte_funcs;
- adev->vm_manager.vm_pte_funcs_ring = &adev->sdma.instance[0].ring;
- adev->vm_manager.vm_pte_funcs_ring->is_pte_ring = true;
+ for (i = 0; i < adev->sdma.num_instances; i++)
+ adev->vm_manager.vm_pte_rings[i] =
+ &adev->sdma.instance[i].ring;
+
+ adev->vm_manager.vm_pte_num_rings = adev->sdma.num_instances;
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
index ad54c46751b0..8c8ca98dd129 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
@@ -355,7 +355,7 @@ static void sdma_v3_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
static void sdma_v3_0_ring_emit_ib(struct amdgpu_ring *ring,
struct amdgpu_ib *ib)
{
- u32 vmid = (ib->vm ? ib->vm->ids[ring->idx].id : 0) & 0xf;
+ u32 vmid = ib->vm_id & 0xf;
u32 next_rptr = ring->wptr + 5;
while ((next_rptr & 7) != 2)
@@ -410,6 +410,14 @@ static void sdma_v3_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */
}
+static void sdma_v3_0_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
+{
+ amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
+ SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
+ amdgpu_ring_write(ring, mmHDP_DEBUG0);
+ amdgpu_ring_write(ring, 1);
+}
+
/**
* sdma_v3_0_ring_emit_fence - emit a fence on the DMA ring
*
@@ -444,32 +452,6 @@ static void sdma_v3_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 se
amdgpu_ring_write(ring, SDMA_PKT_TRAP_INT_CONTEXT_INT_CONTEXT(0));
}
-
-/**
- * sdma_v3_0_ring_emit_semaphore - emit a semaphore on the dma ring
- *
- * @ring: amdgpu_ring structure holding ring information
- * @semaphore: amdgpu semaphore object
- * @emit_wait: wait or signal semaphore
- *
- * Add a DMA semaphore packet to the ring wait on or signal
- * other rings (VI).
- */
-static bool sdma_v3_0_ring_emit_semaphore(struct amdgpu_ring *ring,
- struct amdgpu_semaphore *semaphore,
- bool emit_wait)
-{
- u64 addr = semaphore->gpu_addr;
- u32 sig = emit_wait ? 0 : 1;
-
- amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SEM) |
- SDMA_PKT_SEMAPHORE_HEADER_SIGNAL(sig));
- amdgpu_ring_write(ring, lower_32_bits(addr) & 0xfffffff8);
- amdgpu_ring_write(ring, upper_32_bits(addr));
-
- return true;
-}
-
/**
* sdma_v3_0_gfx_stop - stop the gfx async dma engines
*
@@ -596,6 +578,9 @@ static int sdma_v3_0_gfx_resume(struct amdgpu_device *adev)
vi_srbm_select(adev, 0, 0, 0, 0);
mutex_unlock(&adev->srbm_mutex);
+ WREG32(mmSDMA0_TILING_CONFIG + sdma_offsets[i],
+ adev->gfx.config.gb_addr_config & 0x70);
+
WREG32(mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL + sdma_offsets[i], 0);
/* Set ring buffer size in dwords */
@@ -788,7 +773,7 @@ static int sdma_v3_0_ring_test_ring(struct amdgpu_ring *ring)
tmp = 0xCAFEDEAD;
adev->wb.wb[index] = cpu_to_le32(tmp);
- r = amdgpu_ring_lock(ring, 5);
+ r = amdgpu_ring_alloc(ring, 5);
if (r) {
DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r);
amdgpu_wb_free(adev, index);
@@ -801,7 +786,7 @@ static int sdma_v3_0_ring_test_ring(struct amdgpu_ring *ring)
amdgpu_ring_write(ring, upper_32_bits(gpu_addr));
amdgpu_ring_write(ring, SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(1));
amdgpu_ring_write(ring, 0xDEADBEEF);
- amdgpu_ring_unlock_commit(ring);
+ amdgpu_ring_commit(ring);
for (i = 0; i < adev->usec_timeout; i++) {
tmp = le32_to_cpu(adev->wb.wb[index]);
@@ -851,7 +836,7 @@ static int sdma_v3_0_ring_test_ib(struct amdgpu_ring *ring)
tmp = 0xCAFEDEAD;
adev->wb.wb[index] = cpu_to_le32(tmp);
memset(&ib, 0, sizeof(ib));
- r = amdgpu_ib_get(ring, NULL, 256, &ib);
+ r = amdgpu_ib_get(adev, NULL, 256, &ib);
if (r) {
DRM_ERROR("amdgpu: failed to get ib (%d).\n", r);
goto err0;
@@ -868,9 +853,7 @@ static int sdma_v3_0_ring_test_ib(struct amdgpu_ring *ring)
ib.ptr[7] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP);
ib.length_dw = 8;
- r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, &ib, 1, NULL,
- AMDGPU_FENCE_OWNER_UNDEFINED,
- &f);
+ r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
if (r)
goto err1;
@@ -895,7 +878,8 @@ static int sdma_v3_0_ring_test_ib(struct amdgpu_ring *ring)
}
err1:
fence_put(f);
- amdgpu_ib_free(adev, &ib);
+ amdgpu_ib_free(adev, &ib, NULL);
+ fence_put(f);
err0:
amdgpu_wb_free(adev, index);
return r;
@@ -948,7 +932,7 @@ static void sdma_v3_0_vm_copy_pte(struct amdgpu_ib *ib,
* Update PTEs by writing them manually using sDMA (CIK).
*/
static void sdma_v3_0_vm_write_pte(struct amdgpu_ib *ib,
- uint64_t pe,
+ const dma_addr_t *pages_addr, uint64_t pe,
uint64_t addr, unsigned count,
uint32_t incr, uint32_t flags)
{
@@ -967,14 +951,7 @@ static void sdma_v3_0_vm_write_pte(struct amdgpu_ib *ib,
ib->ptr[ib->length_dw++] = upper_32_bits(pe);
ib->ptr[ib->length_dw++] = ndw;
for (; ndw > 0; ndw -= 2, --count, pe += 8) {
- if (flags & AMDGPU_PTE_SYSTEM) {
- value = amdgpu_vm_map_gart(ib->ring->adev, addr);
- value &= 0xFFFFFFFFFFFFF000ULL;
- } else if (flags & AMDGPU_PTE_VALID) {
- value = addr;
- } else {
- value = 0;
- }
+ value = amdgpu_vm_map_gart(pages_addr, addr);
addr += incr;
value |= flags;
ib->ptr[ib->length_dw++] = value;
@@ -1032,14 +1009,14 @@ static void sdma_v3_0_vm_set_pte_pde(struct amdgpu_ib *ib,
}
/**
- * sdma_v3_0_vm_pad_ib - pad the IB to the required number of dw
+ * sdma_v3_0_ring_pad_ib - pad the IB to the required number of dw
*
* @ib: indirect buffer to fill with padding
*
*/
-static void sdma_v3_0_vm_pad_ib(struct amdgpu_ib *ib)
+static void sdma_v3_0_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
{
- struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ib->ring);
+ struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ring);
u32 pad_count;
int i;
@@ -1055,6 +1032,31 @@ static void sdma_v3_0_vm_pad_ib(struct amdgpu_ib *ib)
}
/**
+ * sdma_v3_0_ring_emit_pipeline_sync - sync the pipeline
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Make sure all previous operations are completed (CIK).
+ */
+static void sdma_v3_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
+{
+ uint32_t seq = ring->fence_drv.sync_seq;
+ uint64_t addr = ring->fence_drv.gpu_addr;
+
+ /* wait for idle */
+ amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
+ SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(0) |
+ SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3) | /* equal */
+ SDMA_PKT_POLL_REGMEM_HEADER_MEM_POLL(1));
+ amdgpu_ring_write(ring, addr & 0xfffffffc);
+ amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
+ amdgpu_ring_write(ring, seq); /* reference */
+ amdgpu_ring_write(ring, 0xfffffff); /* mask */
+ amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
+ SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(4)); /* retry count, poll interval */
+}
+
+/**
* sdma_v3_0_ring_emit_vm_flush - cik vm flush using sDMA
*
* @ring: amdgpu_ring pointer
@@ -1275,6 +1277,8 @@ static void sdma_v3_0_print_status(void *handle)
i, RREG32(mmSDMA0_GFX_RB_BASE_HI + sdma_offsets[i]));
dev_info(adev->dev, " SDMA%d_GFX_DOORBELL=0x%08X\n",
i, RREG32(mmSDMA0_GFX_DOORBELL + sdma_offsets[i]));
+ dev_info(adev->dev, " SDMA%d_TILING_CONFIG=0x%08X\n",
+ i, RREG32(mmSDMA0_TILING_CONFIG + sdma_offsets[i]));
mutex_lock(&adev->srbm_mutex);
for (j = 0; j < 16; j++) {
vi_srbm_select(adev, 0, 0, 0, j);
@@ -1570,12 +1574,14 @@ static const struct amdgpu_ring_funcs sdma_v3_0_ring_funcs = {
.parse_cs = NULL,
.emit_ib = sdma_v3_0_ring_emit_ib,
.emit_fence = sdma_v3_0_ring_emit_fence,
- .emit_semaphore = sdma_v3_0_ring_emit_semaphore,
+ .emit_pipeline_sync = sdma_v3_0_ring_emit_pipeline_sync,
.emit_vm_flush = sdma_v3_0_ring_emit_vm_flush,
.emit_hdp_flush = sdma_v3_0_ring_emit_hdp_flush,
+ .emit_hdp_invalidate = sdma_v3_0_ring_emit_hdp_invalidate,
.test_ring = sdma_v3_0_ring_test_ring,
.test_ib = sdma_v3_0_ring_test_ib,
.insert_nop = sdma_v3_0_ring_insert_nop,
+ .pad_ib = sdma_v3_0_ring_pad_ib,
};
static void sdma_v3_0_set_ring_funcs(struct amdgpu_device *adev)
@@ -1673,14 +1679,18 @@ static const struct amdgpu_vm_pte_funcs sdma_v3_0_vm_pte_funcs = {
.copy_pte = sdma_v3_0_vm_copy_pte,
.write_pte = sdma_v3_0_vm_write_pte,
.set_pte_pde = sdma_v3_0_vm_set_pte_pde,
- .pad_ib = sdma_v3_0_vm_pad_ib,
};
static void sdma_v3_0_set_vm_pte_funcs(struct amdgpu_device *adev)
{
+ unsigned i;
+
if (adev->vm_manager.vm_pte_funcs == NULL) {
adev->vm_manager.vm_pte_funcs = &sdma_v3_0_vm_pte_funcs;
- adev->vm_manager.vm_pte_funcs_ring = &adev->sdma.instance[0].ring;
- adev->vm_manager.vm_pte_funcs_ring->is_pte_ring = true;
+ for (i = 0; i < adev->sdma.num_instances; i++)
+ adev->vm_manager.vm_pte_rings[i] =
+ &adev->sdma.instance[i].ring;
+
+ adev->vm_manager.vm_pte_num_rings = adev->sdma.num_instances;
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
index b6f7d7bff929..0f14199cf716 100644
--- a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
@@ -307,7 +307,7 @@ static int tonga_ih_sw_fini(void *handle)
amdgpu_irq_fini(adev);
amdgpu_ih_ring_fini(adev);
- amdgpu_irq_add_domain(adev);
+ amdgpu_irq_remove_domain(adev);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_smc.c b/drivers/gpu/drm/amd/amdgpu/tonga_smc.c
index 361c49a82323..083893dd68c0 100644
--- a/drivers/gpu/drm/amd/amdgpu/tonga_smc.c
+++ b/drivers/gpu/drm/amd/amdgpu/tonga_smc.c
@@ -272,6 +272,12 @@ static int tonga_smu_upload_firmware_image(struct amdgpu_device *adev)
if (!adev->pm.fw)
return -EINVAL;
+ /* Skip SMC ucode loading on SR-IOV capable boards.
+ * vbios does this for us in asic_init in that case.
+ */
+ if (adev->virtualization.supports_sr_iov)
+ return 0;
+
hdr = (const struct smc_firmware_header_v1_0 *)adev->pm.fw->data;
amdgpu_ucode_print_smc_hdr(&hdr->header);
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
index fbd3767671bb..cb463753115b 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
@@ -164,7 +164,7 @@ static int uvd_v4_2_hw_init(void *handle)
goto done;
}
- r = amdgpu_ring_lock(ring, 10);
+ r = amdgpu_ring_alloc(ring, 10);
if (r) {
DRM_ERROR("amdgpu: ring failed to lock UVD ring (%d).\n", r);
goto done;
@@ -189,7 +189,7 @@ static int uvd_v4_2_hw_init(void *handle)
amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_CNTL, 0));
amdgpu_ring_write(ring, 3);
- amdgpu_ring_unlock_commit(ring);
+ amdgpu_ring_commit(ring);
done:
/* lower clocks again */
@@ -224,11 +224,11 @@ static int uvd_v4_2_suspend(void *handle)
int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- r = amdgpu_uvd_suspend(adev);
+ r = uvd_v4_2_hw_fini(adev);
if (r)
return r;
- r = uvd_v4_2_hw_fini(adev);
+ r = amdgpu_uvd_suspend(adev);
if (r)
return r;
@@ -439,33 +439,6 @@ static void uvd_v4_2_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq
}
/**
- * uvd_v4_2_ring_emit_semaphore - emit semaphore command
- *
- * @ring: amdgpu_ring pointer
- * @semaphore: semaphore to emit commands for
- * @emit_wait: true if we should emit a wait command
- *
- * Emit a semaphore command (either wait or signal) to the UVD ring.
- */
-static bool uvd_v4_2_ring_emit_semaphore(struct amdgpu_ring *ring,
- struct amdgpu_semaphore *semaphore,
- bool emit_wait)
-{
- uint64_t addr = semaphore->gpu_addr;
-
- amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_ADDR_LOW, 0));
- amdgpu_ring_write(ring, (addr >> 3) & 0x000FFFFF);
-
- amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_ADDR_HIGH, 0));
- amdgpu_ring_write(ring, (addr >> 23) & 0x000FFFFF);
-
- amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_CMD, 0));
- amdgpu_ring_write(ring, 0x80 | (emit_wait ? 1 : 0));
-
- return true;
-}
-
-/**
* uvd_v4_2_ring_test_ring - register write test
*
* @ring: amdgpu_ring pointer
@@ -480,7 +453,7 @@ static int uvd_v4_2_ring_test_ring(struct amdgpu_ring *ring)
int r;
WREG32(mmUVD_CONTEXT_ID, 0xCAFEDEAD);
- r = amdgpu_ring_lock(ring, 3);
+ r = amdgpu_ring_alloc(ring, 3);
if (r) {
DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n",
ring->idx, r);
@@ -488,7 +461,7 @@ static int uvd_v4_2_ring_test_ring(struct amdgpu_ring *ring)
}
amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0));
amdgpu_ring_write(ring, 0xDEADBEEF);
- amdgpu_ring_unlock_commit(ring);
+ amdgpu_ring_commit(ring);
for (i = 0; i < adev->usec_timeout; i++) {
tmp = RREG32(mmUVD_CONTEXT_ID);
if (tmp == 0xDEADBEEF)
@@ -549,7 +522,7 @@ static int uvd_v4_2_ring_test_ib(struct amdgpu_ring *ring)
goto error;
}
- r = amdgpu_uvd_get_destroy_msg(ring, 1, &fence);
+ r = amdgpu_uvd_get_destroy_msg(ring, 1, true, &fence);
if (r) {
DRM_ERROR("amdgpu: failed to get destroy ib (%d).\n", r);
goto error;
@@ -603,6 +576,10 @@ static void uvd_v4_2_mc_resume(struct amdgpu_device *adev)
addr = (adev->uvd.gpu_addr >> 32) & 0xFF;
WREG32(mmUVD_LMI_EXT40_ADDR, addr | (0x9 << 16) | (0x1 << 31));
+ WREG32(mmUVD_UDEC_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
+ WREG32(mmUVD_UDEC_DB_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
+ WREG32(mmUVD_UDEC_DBW_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
+
uvd_v4_2_init_cg(adev);
}
@@ -804,6 +781,13 @@ static void uvd_v4_2_print_status(void *handle)
RREG32(mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL));
dev_info(adev->dev, " UVD_CONTEXT_ID=0x%08X\n",
RREG32(mmUVD_CONTEXT_ID));
+ dev_info(adev->dev, " UVD_UDEC_ADDR_CONFIG=0x%08X\n",
+ RREG32(mmUVD_UDEC_ADDR_CONFIG));
+ dev_info(adev->dev, " UVD_UDEC_DB_ADDR_CONFIG=0x%08X\n",
+ RREG32(mmUVD_UDEC_DB_ADDR_CONFIG));
+ dev_info(adev->dev, " UVD_UDEC_DBW_ADDR_CONFIG=0x%08X\n",
+ RREG32(mmUVD_UDEC_DBW_ADDR_CONFIG));
+
}
static int uvd_v4_2_set_interrupt_state(struct amdgpu_device *adev,
@@ -888,10 +872,10 @@ static const struct amdgpu_ring_funcs uvd_v4_2_ring_funcs = {
.parse_cs = amdgpu_uvd_ring_parse_cs,
.emit_ib = uvd_v4_2_ring_emit_ib,
.emit_fence = uvd_v4_2_ring_emit_fence,
- .emit_semaphore = uvd_v4_2_ring_emit_semaphore,
.test_ring = uvd_v4_2_ring_test_ring,
.test_ib = uvd_v4_2_ring_test_ib,
.insert_nop = amdgpu_ring_insert_nop,
+ .pad_ib = amdgpu_ring_generic_pad_ib,
};
static void uvd_v4_2_set_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
index 57f1c5bf3bf1..16476d80f475 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
@@ -160,7 +160,7 @@ static int uvd_v5_0_hw_init(void *handle)
goto done;
}
- r = amdgpu_ring_lock(ring, 10);
+ r = amdgpu_ring_alloc(ring, 10);
if (r) {
DRM_ERROR("amdgpu: ring failed to lock UVD ring (%d).\n", r);
goto done;
@@ -185,7 +185,7 @@ static int uvd_v5_0_hw_init(void *handle)
amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_CNTL, 0));
amdgpu_ring_write(ring, 3);
- amdgpu_ring_unlock_commit(ring);
+ amdgpu_ring_commit(ring);
done:
/* lower clocks again */
@@ -220,11 +220,11 @@ static int uvd_v5_0_suspend(void *handle)
int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- r = amdgpu_uvd_suspend(adev);
+ r = uvd_v5_0_hw_fini(adev);
if (r)
return r;
- r = uvd_v5_0_hw_fini(adev);
+ r = amdgpu_uvd_suspend(adev);
if (r)
return r;
@@ -279,6 +279,10 @@ static void uvd_v5_0_mc_resume(struct amdgpu_device *adev)
size = AMDGPU_UVD_HEAP_SIZE;
WREG32(mmUVD_VCPU_CACHE_OFFSET2, offset >> 3);
WREG32(mmUVD_VCPU_CACHE_SIZE2, size);
+
+ WREG32(mmUVD_UDEC_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
+ WREG32(mmUVD_UDEC_DB_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
+ WREG32(mmUVD_UDEC_DBW_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
}
/**
@@ -483,33 +487,6 @@ static void uvd_v5_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq
}
/**
- * uvd_v5_0_ring_emit_semaphore - emit semaphore command
- *
- * @ring: amdgpu_ring pointer
- * @semaphore: semaphore to emit commands for
- * @emit_wait: true if we should emit a wait command
- *
- * Emit a semaphore command (either wait or signal) to the UVD ring.
- */
-static bool uvd_v5_0_ring_emit_semaphore(struct amdgpu_ring *ring,
- struct amdgpu_semaphore *semaphore,
- bool emit_wait)
-{
- uint64_t addr = semaphore->gpu_addr;
-
- amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_ADDR_LOW, 0));
- amdgpu_ring_write(ring, (addr >> 3) & 0x000FFFFF);
-
- amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_ADDR_HIGH, 0));
- amdgpu_ring_write(ring, (addr >> 23) & 0x000FFFFF);
-
- amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_CMD, 0));
- amdgpu_ring_write(ring, 0x80 | (emit_wait ? 1 : 0));
-
- return true;
-}
-
-/**
* uvd_v5_0_ring_test_ring - register write test
*
* @ring: amdgpu_ring pointer
@@ -524,7 +501,7 @@ static int uvd_v5_0_ring_test_ring(struct amdgpu_ring *ring)
int r;
WREG32(mmUVD_CONTEXT_ID, 0xCAFEDEAD);
- r = amdgpu_ring_lock(ring, 3);
+ r = amdgpu_ring_alloc(ring, 3);
if (r) {
DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n",
ring->idx, r);
@@ -532,7 +509,7 @@ static int uvd_v5_0_ring_test_ring(struct amdgpu_ring *ring)
}
amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0));
amdgpu_ring_write(ring, 0xDEADBEEF);
- amdgpu_ring_unlock_commit(ring);
+ amdgpu_ring_commit(ring);
for (i = 0; i < adev->usec_timeout; i++) {
tmp = RREG32(mmUVD_CONTEXT_ID);
if (tmp == 0xDEADBEEF)
@@ -595,7 +572,7 @@ static int uvd_v5_0_ring_test_ib(struct amdgpu_ring *ring)
goto error;
}
- r = amdgpu_uvd_get_destroy_msg(ring, 1, &fence);
+ r = amdgpu_uvd_get_destroy_msg(ring, 1, true, &fence);
if (r) {
DRM_ERROR("amdgpu: failed to get destroy ib (%d).\n", r);
goto error;
@@ -751,6 +728,12 @@ static void uvd_v5_0_print_status(void *handle)
RREG32(mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL));
dev_info(adev->dev, " UVD_CONTEXT_ID=0x%08X\n",
RREG32(mmUVD_CONTEXT_ID));
+ dev_info(adev->dev, " UVD_UDEC_ADDR_CONFIG=0x%08X\n",
+ RREG32(mmUVD_UDEC_ADDR_CONFIG));
+ dev_info(adev->dev, " UVD_UDEC_DB_ADDR_CONFIG=0x%08X\n",
+ RREG32(mmUVD_UDEC_DB_ADDR_CONFIG));
+ dev_info(adev->dev, " UVD_UDEC_DBW_ADDR_CONFIG=0x%08X\n",
+ RREG32(mmUVD_UDEC_DBW_ADDR_CONFIG));
}
static int uvd_v5_0_set_interrupt_state(struct amdgpu_device *adev,
@@ -829,10 +812,10 @@ static const struct amdgpu_ring_funcs uvd_v5_0_ring_funcs = {
.parse_cs = amdgpu_uvd_ring_parse_cs,
.emit_ib = uvd_v5_0_ring_emit_ib,
.emit_fence = uvd_v5_0_ring_emit_fence,
- .emit_semaphore = uvd_v5_0_ring_emit_semaphore,
.test_ring = uvd_v5_0_ring_test_ring,
.test_ib = uvd_v5_0_ring_test_ib,
.insert_nop = amdgpu_ring_insert_nop,
+ .pad_ib = amdgpu_ring_generic_pad_ib,
};
static void uvd_v5_0_set_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
index 0b365b7651ff..d49379145ef2 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
@@ -157,7 +157,7 @@ static int uvd_v6_0_hw_init(void *handle)
goto done;
}
- r = amdgpu_ring_lock(ring, 10);
+ r = amdgpu_ring_alloc(ring, 10);
if (r) {
DRM_ERROR("amdgpu: ring failed to lock UVD ring (%d).\n", r);
goto done;
@@ -182,7 +182,7 @@ static int uvd_v6_0_hw_init(void *handle)
amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_CNTL, 0));
amdgpu_ring_write(ring, 3);
- amdgpu_ring_unlock_commit(ring);
+ amdgpu_ring_commit(ring);
done:
if (!r)
@@ -214,15 +214,16 @@ static int uvd_v6_0_suspend(void *handle)
int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ r = uvd_v6_0_hw_fini(adev);
+ if (r)
+ return r;
+
/* Skip this for APU for now */
if (!(adev->flags & AMD_IS_APU)) {
r = amdgpu_uvd_suspend(adev);
if (r)
return r;
}
- r = uvd_v6_0_hw_fini(adev);
- if (r)
- return r;
return r;
}
@@ -277,6 +278,10 @@ static void uvd_v6_0_mc_resume(struct amdgpu_device *adev)
size = AMDGPU_UVD_HEAP_SIZE;
WREG32(mmUVD_VCPU_CACHE_OFFSET2, offset >> 3);
WREG32(mmUVD_VCPU_CACHE_SIZE2, size);
+
+ WREG32(mmUVD_UDEC_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
+ WREG32(mmUVD_UDEC_DB_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
+ WREG32(mmUVD_UDEC_DBW_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
}
static void cz_set_uvd_clock_gating_branches(struct amdgpu_device *adev,
@@ -722,33 +727,6 @@ static void uvd_v6_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq
}
/**
- * uvd_v6_0_ring_emit_semaphore - emit semaphore command
- *
- * @ring: amdgpu_ring pointer
- * @semaphore: semaphore to emit commands for
- * @emit_wait: true if we should emit a wait command
- *
- * Emit a semaphore command (either wait or signal) to the UVD ring.
- */
-static bool uvd_v6_0_ring_emit_semaphore(struct amdgpu_ring *ring,
- struct amdgpu_semaphore *semaphore,
- bool emit_wait)
-{
- uint64_t addr = semaphore->gpu_addr;
-
- amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_ADDR_LOW, 0));
- amdgpu_ring_write(ring, (addr >> 3) & 0x000FFFFF);
-
- amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_ADDR_HIGH, 0));
- amdgpu_ring_write(ring, (addr >> 23) & 0x000FFFFF);
-
- amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_CMD, 0));
- amdgpu_ring_write(ring, 0x80 | (emit_wait ? 1 : 0));
-
- return true;
-}
-
-/**
* uvd_v6_0_ring_test_ring - register write test
*
* @ring: amdgpu_ring pointer
@@ -763,7 +741,7 @@ static int uvd_v6_0_ring_test_ring(struct amdgpu_ring *ring)
int r;
WREG32(mmUVD_CONTEXT_ID, 0xCAFEDEAD);
- r = amdgpu_ring_lock(ring, 3);
+ r = amdgpu_ring_alloc(ring, 3);
if (r) {
DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n",
ring->idx, r);
@@ -771,7 +749,7 @@ static int uvd_v6_0_ring_test_ring(struct amdgpu_ring *ring)
}
amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0));
amdgpu_ring_write(ring, 0xDEADBEEF);
- amdgpu_ring_unlock_commit(ring);
+ amdgpu_ring_commit(ring);
for (i = 0; i < adev->usec_timeout; i++) {
tmp = RREG32(mmUVD_CONTEXT_ID);
if (tmp == 0xDEADBEEF)
@@ -827,7 +805,7 @@ static int uvd_v6_0_ring_test_ib(struct amdgpu_ring *ring)
goto error;
}
- r = amdgpu_uvd_get_destroy_msg(ring, 1, &fence);
+ r = amdgpu_uvd_get_destroy_msg(ring, 1, true, &fence);
if (r) {
DRM_ERROR("amdgpu: failed to get destroy ib (%d).\n", r);
goto error;
@@ -974,6 +952,12 @@ static void uvd_v6_0_print_status(void *handle)
RREG32(mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL));
dev_info(adev->dev, " UVD_CONTEXT_ID=0x%08X\n",
RREG32(mmUVD_CONTEXT_ID));
+ dev_info(adev->dev, " UVD_UDEC_ADDR_CONFIG=0x%08X\n",
+ RREG32(mmUVD_UDEC_ADDR_CONFIG));
+ dev_info(adev->dev, " UVD_UDEC_DB_ADDR_CONFIG=0x%08X\n",
+ RREG32(mmUVD_UDEC_DB_ADDR_CONFIG));
+ dev_info(adev->dev, " UVD_UDEC_DBW_ADDR_CONFIG=0x%08X\n",
+ RREG32(mmUVD_UDEC_DBW_ADDR_CONFIG));
}
static int uvd_v6_0_set_interrupt_state(struct amdgpu_device *adev,
@@ -1065,10 +1049,10 @@ static const struct amdgpu_ring_funcs uvd_v6_0_ring_funcs = {
.parse_cs = amdgpu_uvd_ring_parse_cs,
.emit_ib = uvd_v6_0_ring_emit_ib,
.emit_fence = uvd_v6_0_ring_emit_fence,
- .emit_semaphore = uvd_v6_0_ring_emit_semaphore,
.test_ring = uvd_v6_0_ring_test_ring,
.test_ib = uvd_v6_0_ring_test_ib,
.insert_nop = amdgpu_ring_insert_nop,
+ .pad_ib = amdgpu_ring_generic_pad_ib,
};
static void uvd_v6_0_set_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
index a822edacfa95..c7e885bcfd41 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
@@ -642,10 +642,10 @@ static const struct amdgpu_ring_funcs vce_v2_0_ring_funcs = {
.parse_cs = amdgpu_vce_ring_parse_cs,
.emit_ib = amdgpu_vce_ring_emit_ib,
.emit_fence = amdgpu_vce_ring_emit_fence,
- .emit_semaphore = amdgpu_vce_ring_emit_semaphore,
.test_ring = amdgpu_vce_ring_test_ring,
.test_ib = amdgpu_vce_ring_test_ib,
.insert_nop = amdgpu_ring_insert_nop,
+ .pad_ib = amdgpu_ring_generic_pad_ib,
};
static void vce_v2_0_set_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
index d662fa9f9091..ce468ee5da2a 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
@@ -762,10 +762,10 @@ static const struct amdgpu_ring_funcs vce_v3_0_ring_funcs = {
.parse_cs = amdgpu_vce_ring_parse_cs,
.emit_ib = amdgpu_vce_ring_emit_ib,
.emit_fence = amdgpu_vce_ring_emit_fence,
- .emit_semaphore = amdgpu_vce_ring_emit_semaphore,
.test_ring = amdgpu_vce_ring_test_ring,
.test_ib = amdgpu_vce_ring_test_ib,
.insert_nop = amdgpu_ring_insert_nop,
+ .pad_ib = amdgpu_ring_generic_pad_ib,
};
static void vce_v3_0_set_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
index 0d14d108a6c4..1c120efa292c 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/vi.c
@@ -74,6 +74,9 @@
#include "uvd_v6_0.h"
#include "vce_v3_0.h"
#include "amdgpu_powerplay.h"
+#if defined(CONFIG_DRM_AMD_ACP)
+#include "amdgpu_acp.h"
+#endif
/*
* Indirect registers accessor
@@ -571,374 +574,12 @@ static int vi_read_register(struct amdgpu_device *adev, u32 se_num,
return -EINVAL;
}
-static void vi_print_gpu_status_regs(struct amdgpu_device *adev)
-{
- dev_info(adev->dev, " GRBM_STATUS=0x%08X\n",
- RREG32(mmGRBM_STATUS));
- dev_info(adev->dev, " GRBM_STATUS2=0x%08X\n",
- RREG32(mmGRBM_STATUS2));
- dev_info(adev->dev, " GRBM_STATUS_SE0=0x%08X\n",
- RREG32(mmGRBM_STATUS_SE0));
- dev_info(adev->dev, " GRBM_STATUS_SE1=0x%08X\n",
- RREG32(mmGRBM_STATUS_SE1));
- dev_info(adev->dev, " GRBM_STATUS_SE2=0x%08X\n",
- RREG32(mmGRBM_STATUS_SE2));
- dev_info(adev->dev, " GRBM_STATUS_SE3=0x%08X\n",
- RREG32(mmGRBM_STATUS_SE3));
- dev_info(adev->dev, " SRBM_STATUS=0x%08X\n",
- RREG32(mmSRBM_STATUS));
- dev_info(adev->dev, " SRBM_STATUS2=0x%08X\n",
- RREG32(mmSRBM_STATUS2));
- dev_info(adev->dev, " SDMA0_STATUS_REG = 0x%08X\n",
- RREG32(mmSDMA0_STATUS_REG + SDMA0_REGISTER_OFFSET));
- if (adev->sdma.num_instances > 1) {
- dev_info(adev->dev, " SDMA1_STATUS_REG = 0x%08X\n",
- RREG32(mmSDMA0_STATUS_REG + SDMA1_REGISTER_OFFSET));
- }
- dev_info(adev->dev, " CP_STAT = 0x%08x\n", RREG32(mmCP_STAT));
- dev_info(adev->dev, " CP_STALLED_STAT1 = 0x%08x\n",
- RREG32(mmCP_STALLED_STAT1));
- dev_info(adev->dev, " CP_STALLED_STAT2 = 0x%08x\n",
- RREG32(mmCP_STALLED_STAT2));
- dev_info(adev->dev, " CP_STALLED_STAT3 = 0x%08x\n",
- RREG32(mmCP_STALLED_STAT3));
- dev_info(adev->dev, " CP_CPF_BUSY_STAT = 0x%08x\n",
- RREG32(mmCP_CPF_BUSY_STAT));
- dev_info(adev->dev, " CP_CPF_STALLED_STAT1 = 0x%08x\n",
- RREG32(mmCP_CPF_STALLED_STAT1));
- dev_info(adev->dev, " CP_CPF_STATUS = 0x%08x\n", RREG32(mmCP_CPF_STATUS));
- dev_info(adev->dev, " CP_CPC_BUSY_STAT = 0x%08x\n", RREG32(mmCP_CPC_BUSY_STAT));
- dev_info(adev->dev, " CP_CPC_STALLED_STAT1 = 0x%08x\n",
- RREG32(mmCP_CPC_STALLED_STAT1));
- dev_info(adev->dev, " CP_CPC_STATUS = 0x%08x\n", RREG32(mmCP_CPC_STATUS));
-}
-
-/**
- * vi_gpu_check_soft_reset - check which blocks are busy
- *
- * @adev: amdgpu_device pointer
- *
- * Check which blocks are busy and return the relevant reset
- * mask to be used by vi_gpu_soft_reset().
- * Returns a mask of the blocks to be reset.
- */
-u32 vi_gpu_check_soft_reset(struct amdgpu_device *adev)
-{
- u32 reset_mask = 0;
- u32 tmp;
-
- /* GRBM_STATUS */
- tmp = RREG32(mmGRBM_STATUS);
- if (tmp & (GRBM_STATUS__PA_BUSY_MASK | GRBM_STATUS__SC_BUSY_MASK |
- GRBM_STATUS__BCI_BUSY_MASK | GRBM_STATUS__SX_BUSY_MASK |
- GRBM_STATUS__TA_BUSY_MASK | GRBM_STATUS__VGT_BUSY_MASK |
- GRBM_STATUS__DB_BUSY_MASK | GRBM_STATUS__CB_BUSY_MASK |
- GRBM_STATUS__GDS_BUSY_MASK | GRBM_STATUS__SPI_BUSY_MASK |
- GRBM_STATUS__IA_BUSY_MASK | GRBM_STATUS__IA_BUSY_NO_DMA_MASK))
- reset_mask |= AMDGPU_RESET_GFX;
-
- if (tmp & (GRBM_STATUS__CP_BUSY_MASK | GRBM_STATUS__CP_COHERENCY_BUSY_MASK))
- reset_mask |= AMDGPU_RESET_CP;
-
- /* GRBM_STATUS2 */
- tmp = RREG32(mmGRBM_STATUS2);
- if (tmp & GRBM_STATUS2__RLC_BUSY_MASK)
- reset_mask |= AMDGPU_RESET_RLC;
-
- if (tmp & (GRBM_STATUS2__CPF_BUSY_MASK |
- GRBM_STATUS2__CPC_BUSY_MASK |
- GRBM_STATUS2__CPG_BUSY_MASK))
- reset_mask |= AMDGPU_RESET_CP;
-
- /* SRBM_STATUS2 */
- tmp = RREG32(mmSRBM_STATUS2);
- if (tmp & SRBM_STATUS2__SDMA_BUSY_MASK)
- reset_mask |= AMDGPU_RESET_DMA;
-
- if (tmp & SRBM_STATUS2__SDMA1_BUSY_MASK)
- reset_mask |= AMDGPU_RESET_DMA1;
-
- /* SRBM_STATUS */
- tmp = RREG32(mmSRBM_STATUS);
-
- if (tmp & SRBM_STATUS__IH_BUSY_MASK)
- reset_mask |= AMDGPU_RESET_IH;
-
- if (tmp & SRBM_STATUS__SEM_BUSY_MASK)
- reset_mask |= AMDGPU_RESET_SEM;
-
- if (tmp & SRBM_STATUS__GRBM_RQ_PENDING_MASK)
- reset_mask |= AMDGPU_RESET_GRBM;
-
- if (adev->asic_type != CHIP_TOPAZ) {
- if (tmp & (SRBM_STATUS__UVD_RQ_PENDING_MASK |
- SRBM_STATUS__UVD_BUSY_MASK))
- reset_mask |= AMDGPU_RESET_UVD;
- }
-
- if (tmp & SRBM_STATUS__VMC_BUSY_MASK)
- reset_mask |= AMDGPU_RESET_VMC;
-
- if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
- SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK))
- reset_mask |= AMDGPU_RESET_MC;
-
- /* SDMA0_STATUS_REG */
- tmp = RREG32(mmSDMA0_STATUS_REG + SDMA0_REGISTER_OFFSET);
- if (!(tmp & SDMA0_STATUS_REG__IDLE_MASK))
- reset_mask |= AMDGPU_RESET_DMA;
-
- /* SDMA1_STATUS_REG */
- if (adev->sdma.num_instances > 1) {
- tmp = RREG32(mmSDMA0_STATUS_REG + SDMA1_REGISTER_OFFSET);
- if (!(tmp & SDMA0_STATUS_REG__IDLE_MASK))
- reset_mask |= AMDGPU_RESET_DMA1;
- }
-#if 0
- /* VCE_STATUS */
- if (adev->asic_type != CHIP_TOPAZ) {
- tmp = RREG32(mmVCE_STATUS);
- if (tmp & VCE_STATUS__VCPU_REPORT_RB0_BUSY_MASK)
- reset_mask |= AMDGPU_RESET_VCE;
- if (tmp & VCE_STATUS__VCPU_REPORT_RB1_BUSY_MASK)
- reset_mask |= AMDGPU_RESET_VCE1;
-
- }
-
- if (adev->asic_type != CHIP_TOPAZ) {
- if (amdgpu_display_is_display_hung(adev))
- reset_mask |= AMDGPU_RESET_DISPLAY;
- }
-#endif
-
- /* Skip MC reset as it's mostly likely not hung, just busy */
- if (reset_mask & AMDGPU_RESET_MC) {
- DRM_DEBUG("MC busy: 0x%08X, clearing.\n", reset_mask);
- reset_mask &= ~AMDGPU_RESET_MC;
- }
-
- return reset_mask;
-}
-
-/**
- * vi_gpu_soft_reset - soft reset GPU
- *
- * @adev: amdgpu_device pointer
- * @reset_mask: mask of which blocks to reset
- *
- * Soft reset the blocks specified in @reset_mask.
- */
-static void vi_gpu_soft_reset(struct amdgpu_device *adev, u32 reset_mask)
-{
- struct amdgpu_mode_mc_save save;
- u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
- u32 tmp;
-
- if (reset_mask == 0)
- return;
-
- dev_info(adev->dev, "GPU softreset: 0x%08X\n", reset_mask);
-
- vi_print_gpu_status_regs(adev);
- dev_info(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
- RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR));
- dev_info(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
- RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS));
-
- /* disable CG/PG */
-
- /* stop the rlc */
- //XXX
- //gfx_v8_0_rlc_stop(adev);
-
- /* Disable GFX parsing/prefetching */
- tmp = RREG32(mmCP_ME_CNTL);
- tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_HALT, 1);
- tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, 1);
- tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, 1);
- WREG32(mmCP_ME_CNTL, tmp);
-
- /* Disable MEC parsing/prefetching */
- tmp = RREG32(mmCP_MEC_CNTL);
- tmp = REG_SET_FIELD(tmp, CP_MEC_CNTL, MEC_ME1_HALT, 1);
- tmp = REG_SET_FIELD(tmp, CP_MEC_CNTL, MEC_ME2_HALT, 1);
- WREG32(mmCP_MEC_CNTL, tmp);
-
- if (reset_mask & AMDGPU_RESET_DMA) {
- /* sdma0 */
- tmp = RREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET);
- tmp = REG_SET_FIELD(tmp, SDMA0_F32_CNTL, HALT, 1);
- WREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET, tmp);
- }
- if (reset_mask & AMDGPU_RESET_DMA1) {
- /* sdma1 */
- tmp = RREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET);
- tmp = REG_SET_FIELD(tmp, SDMA0_F32_CNTL, HALT, 1);
- WREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET, tmp);
- }
-
- gmc_v8_0_mc_stop(adev, &save);
- if (amdgpu_asic_wait_for_mc_idle(adev)) {
- dev_warn(adev->dev, "Wait for MC idle timedout !\n");
- }
-
- if (reset_mask & (AMDGPU_RESET_GFX | AMDGPU_RESET_COMPUTE | AMDGPU_RESET_CP)) {
- grbm_soft_reset =
- REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CP, 1);
- grbm_soft_reset =
- REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_GFX, 1);
- }
-
- if (reset_mask & AMDGPU_RESET_CP) {
- grbm_soft_reset =
- REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CP, 1);
- srbm_soft_reset =
- REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_GRBM, 1);
- }
-
- if (reset_mask & AMDGPU_RESET_DMA)
- srbm_soft_reset =
- REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_SDMA, 1);
-
- if (reset_mask & AMDGPU_RESET_DMA1)
- srbm_soft_reset =
- REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_SDMA1, 1);
-
- if (reset_mask & AMDGPU_RESET_DISPLAY)
- srbm_soft_reset =
- REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_DC, 1);
-
- if (reset_mask & AMDGPU_RESET_RLC)
- grbm_soft_reset =
- REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_RLC, 1);
-
- if (reset_mask & AMDGPU_RESET_SEM)
- srbm_soft_reset =
- REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_SEM, 1);
-
- if (reset_mask & AMDGPU_RESET_IH)
- srbm_soft_reset =
- REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_IH, 1);
-
- if (reset_mask & AMDGPU_RESET_GRBM)
- srbm_soft_reset =
- REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_GRBM, 1);
-
- if (reset_mask & AMDGPU_RESET_VMC)
- srbm_soft_reset =
- REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VMC, 1);
-
- if (reset_mask & AMDGPU_RESET_UVD)
- srbm_soft_reset =
- REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_UVD, 1);
-
- if (reset_mask & AMDGPU_RESET_VCE)
- srbm_soft_reset =
- REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE0, 1);
-
- if (reset_mask & AMDGPU_RESET_VCE)
- srbm_soft_reset =
- REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1);
-
- if (!(adev->flags & AMD_IS_APU)) {
- if (reset_mask & AMDGPU_RESET_MC)
- srbm_soft_reset =
- REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_MC, 1);
- }
-
- if (grbm_soft_reset) {
- tmp = RREG32(mmGRBM_SOFT_RESET);
- tmp |= grbm_soft_reset;
- dev_info(adev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
- WREG32(mmGRBM_SOFT_RESET, tmp);
- tmp = RREG32(mmGRBM_SOFT_RESET);
-
- udelay(50);
-
- tmp &= ~grbm_soft_reset;
- WREG32(mmGRBM_SOFT_RESET, tmp);
- tmp = RREG32(mmGRBM_SOFT_RESET);
- }
-
- if (srbm_soft_reset) {
- tmp = RREG32(mmSRBM_SOFT_RESET);
- tmp |= srbm_soft_reset;
- dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
- WREG32(mmSRBM_SOFT_RESET, tmp);
- tmp = RREG32(mmSRBM_SOFT_RESET);
-
- udelay(50);
-
- tmp &= ~srbm_soft_reset;
- WREG32(mmSRBM_SOFT_RESET, tmp);
- tmp = RREG32(mmSRBM_SOFT_RESET);
- }
-
- /* Wait a little for things to settle down */
- udelay(50);
-
- gmc_v8_0_mc_resume(adev, &save);
- udelay(50);
-
- vi_print_gpu_status_regs(adev);
-}
-
static void vi_gpu_pci_config_reset(struct amdgpu_device *adev)
{
- struct amdgpu_mode_mc_save save;
- u32 tmp, i;
+ u32 i;
dev_info(adev->dev, "GPU pci config reset\n");
- /* disable dpm? */
-
- /* disable cg/pg */
-
- /* Disable GFX parsing/prefetching */
- tmp = RREG32(mmCP_ME_CNTL);
- tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_HALT, 1);
- tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, 1);
- tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, 1);
- WREG32(mmCP_ME_CNTL, tmp);
-
- /* Disable MEC parsing/prefetching */
- tmp = RREG32(mmCP_MEC_CNTL);
- tmp = REG_SET_FIELD(tmp, CP_MEC_CNTL, MEC_ME1_HALT, 1);
- tmp = REG_SET_FIELD(tmp, CP_MEC_CNTL, MEC_ME2_HALT, 1);
- WREG32(mmCP_MEC_CNTL, tmp);
-
- /* Disable GFX parsing/prefetching */
- WREG32(mmCP_ME_CNTL, CP_ME_CNTL__ME_HALT_MASK |
- CP_ME_CNTL__PFP_HALT_MASK | CP_ME_CNTL__CE_HALT_MASK);
-
- /* Disable MEC parsing/prefetching */
- WREG32(mmCP_MEC_CNTL,
- CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK);
-
- /* sdma0 */
- tmp = RREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET);
- tmp = REG_SET_FIELD(tmp, SDMA0_F32_CNTL, HALT, 1);
- WREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET, tmp);
-
- /* sdma1 */
- tmp = RREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET);
- tmp = REG_SET_FIELD(tmp, SDMA0_F32_CNTL, HALT, 1);
- WREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET, tmp);
-
- /* XXX other engines? */
-
- /* halt the rlc, disable cp internal ints */
- //XXX
- //gfx_v8_0_rlc_stop(adev);
-
- udelay(50);
-
- /* disable mem access */
- gmc_v8_0_mc_stop(adev, &save);
- if (amdgpu_asic_wait_for_mc_idle(adev)) {
- dev_warn(adev->dev, "Wait for MC idle timed out !\n");
- }
-
/* disable BM */
pci_clear_master(adev->pdev);
/* reset */
@@ -978,26 +619,11 @@ static void vi_set_bios_scratch_engine_hung(struct amdgpu_device *adev, bool hun
*/
static int vi_asic_reset(struct amdgpu_device *adev)
{
- u32 reset_mask;
-
- reset_mask = vi_gpu_check_soft_reset(adev);
-
- if (reset_mask)
- vi_set_bios_scratch_engine_hung(adev, true);
-
- /* try soft reset */
- vi_gpu_soft_reset(adev, reset_mask);
-
- reset_mask = vi_gpu_check_soft_reset(adev);
+ vi_set_bios_scratch_engine_hung(adev, true);
- /* try pci config reset */
- if (reset_mask && amdgpu_hard_reset)
- vi_gpu_pci_config_reset(adev);
+ vi_gpu_pci_config_reset(adev);
- reset_mask = vi_gpu_check_soft_reset(adev);
-
- if (!reset_mask)
- vi_set_bios_scratch_engine_hung(adev, false);
+ vi_set_bios_scratch_engine_hung(adev, false);
return 0;
}
@@ -1347,6 +973,15 @@ static const struct amdgpu_ip_block_version cz_ip_blocks[] =
.rev = 0,
.funcs = &vce_v3_0_ip_funcs,
},
+#if defined(CONFIG_DRM_AMD_ACP)
+ {
+ .type = AMD_IP_BLOCK_TYPE_ACP,
+ .major = 2,
+ .minor = 2,
+ .rev = 0,
+ .funcs = &acp_ip_funcs,
+ },
+#endif
};
int vi_set_ip_blocks(struct amdgpu_device *adev)
@@ -1436,26 +1071,22 @@ static int vi_common_early_init(void *handle)
adev->external_rev_id = 0xFF;
switch (adev->asic_type) {
case CHIP_TOPAZ:
- adev->has_uvd = false;
adev->cg_flags = 0;
adev->pg_flags = 0;
adev->external_rev_id = 0x1;
break;
case CHIP_FIJI:
- adev->has_uvd = true;
adev->cg_flags = 0;
adev->pg_flags = 0;
adev->external_rev_id = adev->rev_id + 0x3c;
break;
case CHIP_TONGA:
- adev->has_uvd = true;
adev->cg_flags = 0;
adev->pg_flags = 0;
adev->external_rev_id = adev->rev_id + 0x14;
break;
case CHIP_CARRIZO:
case CHIP_STONEY:
- adev->has_uvd = true;
adev->cg_flags = 0;
adev->pg_flags = 0;
adev->external_rev_id = adev->rev_id + 0x1;
diff --git a/drivers/gpu/drm/amd/amdgpu/vid.h b/drivers/gpu/drm/amd/amdgpu/vid.h
index d98aa9d82fa1..ace49976f7be 100644
--- a/drivers/gpu/drm/amd/amdgpu/vid.h
+++ b/drivers/gpu/drm/amd/amdgpu/vid.h
@@ -71,8 +71,6 @@
#define VMID(x) ((x) << 4)
#define QUEUEID(x) ((x) << 8)
-#define RB_BITMAP_WIDTH_PER_SH 2
-
#define MC_SEQ_MISC0__MT__MASK 0xf0000000
#define MC_SEQ_MISC0__MT__GDDR1 0x10000000
#define MC_SEQ_MISC0__MT__DDR2 0x20000000
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
index d2b49c026cf6..07ac724e3ec9 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
@@ -107,7 +107,7 @@ static int kfd_open(struct inode *inode, struct file *filep)
if (iminor(inode) != 0)
return -ENODEV;
- is_32bit_user_mode = is_compat_task();
+ is_32bit_user_mode = in_compat_syscall();
if (is_32bit_user_mode == true) {
dev_warn(kfd_device,
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c b/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c
index c34c393e9aea..d5e19b5fbbfb 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c
@@ -513,7 +513,7 @@ static int dbgdev_wave_control_set_registers(
union SQ_CMD_BITS *in_reg_sq_cmd,
union GRBM_GFX_INDEX_BITS *in_reg_gfx_index)
{
- int status;
+ int status = 0;
union SQ_CMD_BITS reg_sq_cmd;
union GRBM_GFX_INDEX_BITS reg_gfx_index;
struct HsaDbgWaveMsgAMDGen2 *pMsg;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_module.c b/drivers/gpu/drm/amd/amdkfd/kfd_module.c
index ca8410e8683d..850a5623661f 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_module.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_module.c
@@ -59,18 +59,23 @@ module_param(send_sigterm, int, 0444);
MODULE_PARM_DESC(send_sigterm,
"Send sigterm to HSA process on unhandled exception (0 = disable, 1 = enable)");
-bool kgd2kfd_init(unsigned interface_version, const struct kgd2kfd_calls **g2f)
+static int amdkfd_init_completed;
+
+int kgd2kfd_init(unsigned interface_version, const struct kgd2kfd_calls **g2f)
{
+ if (!amdkfd_init_completed)
+ return -EPROBE_DEFER;
+
/*
* Only one interface version is supported,
* no kfd/kgd version skew allowed.
*/
if (interface_version != KFD_INTERFACE_VERSION)
- return false;
+ return -EINVAL;
*g2f = &kgd2kfd;
- return true;
+ return 0;
}
EXPORT_SYMBOL(kgd2kfd_init);
@@ -111,6 +116,8 @@ static int __init kfd_module_init(void)
kfd_process_create_wq();
+ amdkfd_init_completed = 1;
+
dev_info(kfd_device, "Initialized module\n");
return 0;
@@ -125,6 +132,8 @@ err_pasid:
static void __exit kfd_module_exit(void)
{
+ amdkfd_init_completed = 0;
+
kfd_process_destroy_wq();
kfd_topology_shutdown();
kfd_chardev_exit();
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
index a902ae037398..ac005796b71c 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
@@ -311,7 +311,7 @@ static struct kfd_process *create_process(const struct task_struct *thread)
goto err_process_pqm_init;
/* init process apertures*/
- process->is_32bit_user_mode = is_compat_task();
+ process->is_32bit_user_mode = in_compat_syscall();
if (kfd_init_apertures(process) != 0)
goto err_init_apretures;
diff --git a/drivers/gpu/drm/amd/include/amd_acpi.h b/drivers/gpu/drm/amd/include/amd_acpi.h
index 496360eb3fba..50e893325141 100644
--- a/drivers/gpu/drm/amd/include/amd_acpi.h
+++ b/drivers/gpu/drm/amd/include/amd_acpi.h
@@ -340,6 +340,8 @@ struct atcs_pref_req_output {
# define ATPX_FIXED_NOT_SUPPORTED (1 << 9)
# define ATPX_DYNAMIC_DGPU_POWER_OFF_SUPPORTED (1 << 10)
# define ATPX_DGPU_REQ_POWER_FOR_DISPLAYS (1 << 11)
+# define ATPX_DGPU_CAN_DRIVE_DISPLAYS (1 << 12)
+# define ATPX_MS_HYBRID_GFX_SUPPORTED (1 << 14)
#define ATPX_FUNCTION_POWER_CONTROL 0x2
/* ARG0: ATPX_FUNCTION_POWER_CONTROL
* ARG1:
diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h
index dbf7e6413cab..04e4090666fb 100644
--- a/drivers/gpu/drm/amd/include/amd_shared.h
+++ b/drivers/gpu/drm/amd/include/amd_shared.h
@@ -73,6 +73,7 @@ enum amd_ip_block_type {
AMD_IP_BLOCK_TYPE_SDMA,
AMD_IP_BLOCK_TYPE_UVD,
AMD_IP_BLOCK_TYPE_VCE,
+ AMD_IP_BLOCK_TYPE_ACP,
};
enum amd_clockgating_state {
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_8_0_d.h b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_8_0_d.h
index dc52ea0df4b4..d3ccf5a86de0 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_8_0_d.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_8_0_d.h
@@ -1379,6 +1379,7 @@
#define mmDC_GPIO_PAD_STRENGTH_1 0x1978
#define mmDC_GPIO_PAD_STRENGTH_2 0x1979
#define mmPHY_AUX_CNTL 0x197f
+#define mmDC_GPIO_I2CPAD_MASK 0x1974
#define mmDC_GPIO_I2CPAD_A 0x1975
#define mmDC_GPIO_I2CPAD_EN 0x1976
#define mmDC_GPIO_I2CPAD_Y 0x1977
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_8_0_enum.h b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_8_0_enum.h
new file mode 100644
index 000000000000..6bea30ef3df5
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_8_0_enum.h
@@ -0,0 +1,1117 @@
+/*
+ * DCE_8_0 Register documentation
+ *
+ * Copyright (C) 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef DCE_8_0_ENUM_H
+#define DCE_8_0_ENUM_H
+
+typedef enum SurfaceEndian {
+ ENDIAN_NONE = 0x0,
+ ENDIAN_8IN16 = 0x1,
+ ENDIAN_8IN32 = 0x2,
+ ENDIAN_8IN64 = 0x3,
+} SurfaceEndian;
+typedef enum ArrayMode {
+ ARRAY_LINEAR_GENERAL = 0x0,
+ ARRAY_LINEAR_ALIGNED = 0x1,
+ ARRAY_1D_TILED_THIN1 = 0x2,
+ ARRAY_1D_TILED_THICK = 0x3,
+ ARRAY_2D_TILED_THIN1 = 0x4,
+ ARRAY_PRT_TILED_THIN1 = 0x5,
+ ARRAY_PRT_2D_TILED_THIN1 = 0x6,
+ ARRAY_2D_TILED_THICK = 0x7,
+ ARRAY_2D_TILED_XTHICK = 0x8,
+ ARRAY_PRT_TILED_THICK = 0x9,
+ ARRAY_PRT_2D_TILED_THICK = 0xa,
+ ARRAY_PRT_3D_TILED_THIN1 = 0xb,
+ ARRAY_3D_TILED_THIN1 = 0xc,
+ ARRAY_3D_TILED_THICK = 0xd,
+ ARRAY_3D_TILED_XTHICK = 0xe,
+ ARRAY_PRT_3D_TILED_THICK = 0xf,
+} ArrayMode;
+typedef enum PipeTiling {
+ CONFIG_1_PIPE = 0x0,
+ CONFIG_2_PIPE = 0x1,
+ CONFIG_4_PIPE = 0x2,
+ CONFIG_8_PIPE = 0x3,
+} PipeTiling;
+typedef enum BankTiling {
+ CONFIG_4_BANK = 0x0,
+ CONFIG_8_BANK = 0x1,
+} BankTiling;
+typedef enum GroupInterleave {
+ CONFIG_256B_GROUP = 0x0,
+ CONFIG_512B_GROUP = 0x1,
+} GroupInterleave;
+typedef enum RowTiling {
+ CONFIG_1KB_ROW = 0x0,
+ CONFIG_2KB_ROW = 0x1,
+ CONFIG_4KB_ROW = 0x2,
+ CONFIG_8KB_ROW = 0x3,
+ CONFIG_1KB_ROW_OPT = 0x4,
+ CONFIG_2KB_ROW_OPT = 0x5,
+ CONFIG_4KB_ROW_OPT = 0x6,
+ CONFIG_8KB_ROW_OPT = 0x7,
+} RowTiling;
+typedef enum BankSwapBytes {
+ CONFIG_128B_SWAPS = 0x0,
+ CONFIG_256B_SWAPS = 0x1,
+ CONFIG_512B_SWAPS = 0x2,
+ CONFIG_1KB_SWAPS = 0x3,
+} BankSwapBytes;
+typedef enum SampleSplitBytes {
+ CONFIG_1KB_SPLIT = 0x0,
+ CONFIG_2KB_SPLIT = 0x1,
+ CONFIG_4KB_SPLIT = 0x2,
+ CONFIG_8KB_SPLIT = 0x3,
+} SampleSplitBytes;
+typedef enum NumPipes {
+ ADDR_CONFIG_1_PIPE = 0x0,
+ ADDR_CONFIG_2_PIPE = 0x1,
+ ADDR_CONFIG_4_PIPE = 0x2,
+ ADDR_CONFIG_8_PIPE = 0x3,
+} NumPipes;
+typedef enum PipeInterleaveSize {
+ ADDR_CONFIG_PIPE_INTERLEAVE_256B = 0x0,
+ ADDR_CONFIG_PIPE_INTERLEAVE_512B = 0x1,
+} PipeInterleaveSize;
+typedef enum BankInterleaveSize {
+ ADDR_CONFIG_BANK_INTERLEAVE_1 = 0x0,
+ ADDR_CONFIG_BANK_INTERLEAVE_2 = 0x1,
+ ADDR_CONFIG_BANK_INTERLEAVE_4 = 0x2,
+ ADDR_CONFIG_BANK_INTERLEAVE_8 = 0x3,
+} BankInterleaveSize;
+typedef enum NumShaderEngines {
+ ADDR_CONFIG_1_SHADER_ENGINE = 0x0,
+ ADDR_CONFIG_2_SHADER_ENGINE = 0x1,
+} NumShaderEngines;
+typedef enum ShaderEngineTileSize {
+ ADDR_CONFIG_SE_TILE_16 = 0x0,
+ ADDR_CONFIG_SE_TILE_32 = 0x1,
+} ShaderEngineTileSize;
+typedef enum NumGPUs {
+ ADDR_CONFIG_1_GPU = 0x0,
+ ADDR_CONFIG_2_GPU = 0x1,
+ ADDR_CONFIG_4_GPU = 0x2,
+} NumGPUs;
+typedef enum MultiGPUTileSize {
+ ADDR_CONFIG_GPU_TILE_16 = 0x0,
+ ADDR_CONFIG_GPU_TILE_32 = 0x1,
+ ADDR_CONFIG_GPU_TILE_64 = 0x2,
+ ADDR_CONFIG_GPU_TILE_128 = 0x3,
+} MultiGPUTileSize;
+typedef enum RowSize {
+ ADDR_CONFIG_1KB_ROW = 0x0,
+ ADDR_CONFIG_2KB_ROW = 0x1,
+ ADDR_CONFIG_4KB_ROW = 0x2,
+} RowSize;
+typedef enum NumLowerPipes {
+ ADDR_CONFIG_1_LOWER_PIPES = 0x0,
+ ADDR_CONFIG_2_LOWER_PIPES = 0x1,
+} NumLowerPipes;
+typedef enum DebugBlockId {
+ DBG_CLIENT_BLKID_RESERVED = 0x0,
+ DBG_CLIENT_BLKID_dbg = 0x1,
+ DBG_CLIENT_BLKID_uvdu_0 = 0x2,
+ DBG_CLIENT_BLKID_uvdu_1 = 0x3,
+ DBG_CLIENT_BLKID_uvdu_2 = 0x4,
+ DBG_CLIENT_BLKID_uvdu_3 = 0x5,
+ DBG_CLIENT_BLKID_uvdu_4 = 0x6,
+ DBG_CLIENT_BLKID_uvdu_5 = 0x7,
+ DBG_CLIENT_BLKID_uvdu_6 = 0x8,
+ DBG_CLIENT_BLKID_uvdm_0 = 0x9,
+ DBG_CLIENT_BLKID_uvdm_1 = 0xa,
+ DBG_CLIENT_BLKID_uvdm_2 = 0xb,
+ DBG_CLIENT_BLKID_uvdm_3 = 0xc,
+ DBG_CLIENT_BLKID_vcea_0 = 0xd,
+ DBG_CLIENT_BLKID_vcea_1 = 0xe,
+ DBG_CLIENT_BLKID_vcea_2 = 0xf,
+ DBG_CLIENT_BLKID_vcea_3 = 0x10,
+ DBG_CLIENT_BLKID_vcea_4 = 0x11,
+ DBG_CLIENT_BLKID_vcea_5 = 0x12,
+ DBG_CLIENT_BLKID_vcea_6 = 0x13,
+ DBG_CLIENT_BLKID_vceb_0 = 0x14,
+ DBG_CLIENT_BLKID_vceb_1 = 0x15,
+ DBG_CLIENT_BLKID_vceb_2 = 0x16,
+ DBG_CLIENT_BLKID_dco = 0x17,
+ DBG_CLIENT_BLKID_xdma = 0x18,
+ DBG_CLIENT_BLKID_smu_0 = 0x19,
+ DBG_CLIENT_BLKID_smu_1 = 0x1a,
+ DBG_CLIENT_BLKID_smu_2 = 0x1b,
+ DBG_CLIENT_BLKID_gck = 0x1c,
+ DBG_CLIENT_BLKID_tmonw0 = 0x1d,
+ DBG_CLIENT_BLKID_tmonw1 = 0x1e,
+ DBG_CLIENT_BLKID_grbm = 0x1f,
+ DBG_CLIENT_BLKID_rlc = 0x20,
+ DBG_CLIENT_BLKID_ds0 = 0x21,
+ DBG_CLIENT_BLKID_cpg_0 = 0x22,
+ DBG_CLIENT_BLKID_cpg_1 = 0x23,
+ DBG_CLIENT_BLKID_cpc_0 = 0x24,
+ DBG_CLIENT_BLKID_cpc_1 = 0x25,
+ DBG_CLIENT_BLKID_cpf = 0x26,
+ DBG_CLIENT_BLKID_scf0 = 0x27,
+ DBG_CLIENT_BLKID_scf1 = 0x28,
+ DBG_CLIENT_BLKID_scf2 = 0x29,
+ DBG_CLIENT_BLKID_scf3 = 0x2a,
+ DBG_CLIENT_BLKID_pc0 = 0x2b,
+ DBG_CLIENT_BLKID_pc1 = 0x2c,
+ DBG_CLIENT_BLKID_pc2 = 0x2d,
+ DBG_CLIENT_BLKID_pc3 = 0x2e,
+ DBG_CLIENT_BLKID_vgt0 = 0x2f,
+ DBG_CLIENT_BLKID_vgt1 = 0x30,
+ DBG_CLIENT_BLKID_vgt2 = 0x31,
+ DBG_CLIENT_BLKID_vgt3 = 0x32,
+ DBG_CLIENT_BLKID_sx00 = 0x33,
+ DBG_CLIENT_BLKID_sx10 = 0x34,
+ DBG_CLIENT_BLKID_sx20 = 0x35,
+ DBG_CLIENT_BLKID_sx30 = 0x36,
+ DBG_CLIENT_BLKID_cb001 = 0x37,
+ DBG_CLIENT_BLKID_cb200 = 0x38,
+ DBG_CLIENT_BLKID_cb201 = 0x39,
+ DBG_CLIENT_BLKID_cbr0 = 0x3a,
+ DBG_CLIENT_BLKID_cb000 = 0x3b,
+ DBG_CLIENT_BLKID_cb101 = 0x3c,
+ DBG_CLIENT_BLKID_cb300 = 0x3d,
+ DBG_CLIENT_BLKID_cb301 = 0x3e,
+ DBG_CLIENT_BLKID_cbr1 = 0x3f,
+ DBG_CLIENT_BLKID_cb100 = 0x40,
+ DBG_CLIENT_BLKID_ia0 = 0x41,
+ DBG_CLIENT_BLKID_ia1 = 0x42,
+ DBG_CLIENT_BLKID_bci0 = 0x43,
+ DBG_CLIENT_BLKID_bci1 = 0x44,
+ DBG_CLIENT_BLKID_bci2 = 0x45,
+ DBG_CLIENT_BLKID_bci3 = 0x46,
+ DBG_CLIENT_BLKID_pa0 = 0x47,
+ DBG_CLIENT_BLKID_pa1 = 0x48,
+ DBG_CLIENT_BLKID_spim0 = 0x49,
+ DBG_CLIENT_BLKID_spim1 = 0x4a,
+ DBG_CLIENT_BLKID_spim2 = 0x4b,
+ DBG_CLIENT_BLKID_spim3 = 0x4c,
+ DBG_CLIENT_BLKID_sdma = 0x4d,
+ DBG_CLIENT_BLKID_ih = 0x4e,
+ DBG_CLIENT_BLKID_sem = 0x4f,
+ DBG_CLIENT_BLKID_srbm = 0x50,
+ DBG_CLIENT_BLKID_hdp = 0x51,
+ DBG_CLIENT_BLKID_acp_0 = 0x52,
+ DBG_CLIENT_BLKID_acp_1 = 0x53,
+ DBG_CLIENT_BLKID_sam = 0x54,
+ DBG_CLIENT_BLKID_mcc0 = 0x55,
+ DBG_CLIENT_BLKID_mcc1 = 0x56,
+ DBG_CLIENT_BLKID_mcc2 = 0x57,
+ DBG_CLIENT_BLKID_mcc3 = 0x58,
+ DBG_CLIENT_BLKID_mcd0 = 0x59,
+ DBG_CLIENT_BLKID_mcd1 = 0x5a,
+ DBG_CLIENT_BLKID_mcd2 = 0x5b,
+ DBG_CLIENT_BLKID_mcd3 = 0x5c,
+ DBG_CLIENT_BLKID_mcb = 0x5d,
+ DBG_CLIENT_BLKID_vmc = 0x5e,
+ DBG_CLIENT_BLKID_gmcon = 0x5f,
+ DBG_CLIENT_BLKID_gdc_0 = 0x60,
+ DBG_CLIENT_BLKID_gdc_1 = 0x61,
+ DBG_CLIENT_BLKID_gdc_2 = 0x62,
+ DBG_CLIENT_BLKID_gdc_3 = 0x63,
+ DBG_CLIENT_BLKID_gdc_4 = 0x64,
+ DBG_CLIENT_BLKID_gdc_5 = 0x65,
+ DBG_CLIENT_BLKID_gdc_6 = 0x66,
+ DBG_CLIENT_BLKID_gdc_7 = 0x67,
+ DBG_CLIENT_BLKID_gdc_8 = 0x68,
+ DBG_CLIENT_BLKID_gdc_9 = 0x69,
+ DBG_CLIENT_BLKID_gdc_10 = 0x6a,
+ DBG_CLIENT_BLKID_gdc_11 = 0x6b,
+ DBG_CLIENT_BLKID_gdc_12 = 0x6c,
+ DBG_CLIENT_BLKID_gdc_13 = 0x6d,
+ DBG_CLIENT_BLKID_gdc_14 = 0x6e,
+ DBG_CLIENT_BLKID_gdc_15 = 0x6f,
+ DBG_CLIENT_BLKID_gdc_16 = 0x70,
+ DBG_CLIENT_BLKID_gdc_17 = 0x71,
+ DBG_CLIENT_BLKID_gdc_18 = 0x72,
+ DBG_CLIENT_BLKID_gdc_19 = 0x73,
+ DBG_CLIENT_BLKID_gdc_20 = 0x74,
+ DBG_CLIENT_BLKID_gdc_21 = 0x75,
+ DBG_CLIENT_BLKID_gdc_22 = 0x76,
+ DBG_CLIENT_BLKID_wd = 0x77,
+ DBG_CLIENT_BLKID_sdma_0 = 0x78,
+ DBG_CLIENT_BLKID_sdma_1 = 0x79,
+} DebugBlockId;
+typedef enum DebugBlockId_OLD {
+ DBG_BLOCK_ID_RESERVED = 0x0,
+ DBG_BLOCK_ID_DBG = 0x1,
+ DBG_BLOCK_ID_VMC = 0x2,
+ DBG_BLOCK_ID_PDMA = 0x3,
+ DBG_BLOCK_ID_CG = 0x4,
+ DBG_BLOCK_ID_SRBM = 0x5,
+ DBG_BLOCK_ID_GRBM = 0x6,
+ DBG_BLOCK_ID_RLC = 0x7,
+ DBG_BLOCK_ID_CSC = 0x8,
+ DBG_BLOCK_ID_SEM = 0x9,
+ DBG_BLOCK_ID_IH = 0xa,
+ DBG_BLOCK_ID_SC = 0xb,
+ DBG_BLOCK_ID_SQ = 0xc,
+ DBG_BLOCK_ID_AVP = 0xd,
+ DBG_BLOCK_ID_GMCON = 0xe,
+ DBG_BLOCK_ID_SMU = 0xf,
+ DBG_BLOCK_ID_DMA0 = 0x10,
+ DBG_BLOCK_ID_DMA1 = 0x11,
+ DBG_BLOCK_ID_SPIM = 0x12,
+ DBG_BLOCK_ID_GDS = 0x13,
+ DBG_BLOCK_ID_SPIS = 0x14,
+ DBG_BLOCK_ID_UNUSED0 = 0x15,
+ DBG_BLOCK_ID_PA0 = 0x16,
+ DBG_BLOCK_ID_PA1 = 0x17,
+ DBG_BLOCK_ID_CP0 = 0x18,
+ DBG_BLOCK_ID_CP1 = 0x19,
+ DBG_BLOCK_ID_CP2 = 0x1a,
+ DBG_BLOCK_ID_UNUSED1 = 0x1b,
+ DBG_BLOCK_ID_UVDU = 0x1c,
+ DBG_BLOCK_ID_UVDM = 0x1d,
+ DBG_BLOCK_ID_VCE = 0x1e,
+ DBG_BLOCK_ID_UNUSED2 = 0x1f,
+ DBG_BLOCK_ID_VGT0 = 0x20,
+ DBG_BLOCK_ID_VGT1 = 0x21,
+ DBG_BLOCK_ID_IA = 0x22,
+ DBG_BLOCK_ID_UNUSED3 = 0x23,
+ DBG_BLOCK_ID_SCT0 = 0x24,
+ DBG_BLOCK_ID_SCT1 = 0x25,
+ DBG_BLOCK_ID_SPM0 = 0x26,
+ DBG_BLOCK_ID_SPM1 = 0x27,
+ DBG_BLOCK_ID_TCAA = 0x28,
+ DBG_BLOCK_ID_TCAB = 0x29,
+ DBG_BLOCK_ID_TCCA = 0x2a,
+ DBG_BLOCK_ID_TCCB = 0x2b,
+ DBG_BLOCK_ID_MCC0 = 0x2c,
+ DBG_BLOCK_ID_MCC1 = 0x2d,
+ DBG_BLOCK_ID_MCC2 = 0x2e,
+ DBG_BLOCK_ID_MCC3 = 0x2f,
+ DBG_BLOCK_ID_SX0 = 0x30,
+ DBG_BLOCK_ID_SX1 = 0x31,
+ DBG_BLOCK_ID_SX2 = 0x32,
+ DBG_BLOCK_ID_SX3 = 0x33,
+ DBG_BLOCK_ID_UNUSED4 = 0x34,
+ DBG_BLOCK_ID_UNUSED5 = 0x35,
+ DBG_BLOCK_ID_UNUSED6 = 0x36,
+ DBG_BLOCK_ID_UNUSED7 = 0x37,
+ DBG_BLOCK_ID_PC0 = 0x38,
+ DBG_BLOCK_ID_PC1 = 0x39,
+ DBG_BLOCK_ID_UNUSED8 = 0x3a,
+ DBG_BLOCK_ID_UNUSED9 = 0x3b,
+ DBG_BLOCK_ID_UNUSED10 = 0x3c,
+ DBG_BLOCK_ID_UNUSED11 = 0x3d,
+ DBG_BLOCK_ID_MCB = 0x3e,
+ DBG_BLOCK_ID_UNUSED12 = 0x3f,
+ DBG_BLOCK_ID_SCB0 = 0x40,
+ DBG_BLOCK_ID_SCB1 = 0x41,
+ DBG_BLOCK_ID_UNUSED13 = 0x42,
+ DBG_BLOCK_ID_UNUSED14 = 0x43,
+ DBG_BLOCK_ID_SCF0 = 0x44,
+ DBG_BLOCK_ID_SCF1 = 0x45,
+ DBG_BLOCK_ID_UNUSED15 = 0x46,
+ DBG_BLOCK_ID_UNUSED16 = 0x47,
+ DBG_BLOCK_ID_BCI0 = 0x48,
+ DBG_BLOCK_ID_BCI1 = 0x49,
+ DBG_BLOCK_ID_BCI2 = 0x4a,
+ DBG_BLOCK_ID_BCI3 = 0x4b,
+ DBG_BLOCK_ID_UNUSED17 = 0x4c,
+ DBG_BLOCK_ID_UNUSED18 = 0x4d,
+ DBG_BLOCK_ID_UNUSED19 = 0x4e,
+ DBG_BLOCK_ID_UNUSED20 = 0x4f,
+ DBG_BLOCK_ID_CB00 = 0x50,
+ DBG_BLOCK_ID_CB01 = 0x51,
+ DBG_BLOCK_ID_CB02 = 0x52,
+ DBG_BLOCK_ID_CB03 = 0x53,
+ DBG_BLOCK_ID_CB04 = 0x54,
+ DBG_BLOCK_ID_UNUSED21 = 0x55,
+ DBG_BLOCK_ID_UNUSED22 = 0x56,
+ DBG_BLOCK_ID_UNUSED23 = 0x57,
+ DBG_BLOCK_ID_CB10 = 0x58,
+ DBG_BLOCK_ID_CB11 = 0x59,
+ DBG_BLOCK_ID_CB12 = 0x5a,
+ DBG_BLOCK_ID_CB13 = 0x5b,
+ DBG_BLOCK_ID_CB14 = 0x5c,
+ DBG_BLOCK_ID_UNUSED24 = 0x5d,
+ DBG_BLOCK_ID_UNUSED25 = 0x5e,
+ DBG_BLOCK_ID_UNUSED26 = 0x5f,
+ DBG_BLOCK_ID_TCP0 = 0x60,
+ DBG_BLOCK_ID_TCP1 = 0x61,
+ DBG_BLOCK_ID_TCP2 = 0x62,
+ DBG_BLOCK_ID_TCP3 = 0x63,
+ DBG_BLOCK_ID_TCP4 = 0x64,
+ DBG_BLOCK_ID_TCP5 = 0x65,
+ DBG_BLOCK_ID_TCP6 = 0x66,
+ DBG_BLOCK_ID_TCP7 = 0x67,
+ DBG_BLOCK_ID_TCP8 = 0x68,
+ DBG_BLOCK_ID_TCP9 = 0x69,
+ DBG_BLOCK_ID_TCP10 = 0x6a,
+ DBG_BLOCK_ID_TCP11 = 0x6b,
+ DBG_BLOCK_ID_TCP12 = 0x6c,
+ DBG_BLOCK_ID_TCP13 = 0x6d,
+ DBG_BLOCK_ID_TCP14 = 0x6e,
+ DBG_BLOCK_ID_TCP15 = 0x6f,
+ DBG_BLOCK_ID_TCP16 = 0x70,
+ DBG_BLOCK_ID_TCP17 = 0x71,
+ DBG_BLOCK_ID_TCP18 = 0x72,
+ DBG_BLOCK_ID_TCP19 = 0x73,
+ DBG_BLOCK_ID_TCP20 = 0x74,
+ DBG_BLOCK_ID_TCP21 = 0x75,
+ DBG_BLOCK_ID_TCP22 = 0x76,
+ DBG_BLOCK_ID_TCP23 = 0x77,
+ DBG_BLOCK_ID_TCP_RESERVED0 = 0x78,
+ DBG_BLOCK_ID_TCP_RESERVED1 = 0x79,
+ DBG_BLOCK_ID_TCP_RESERVED2 = 0x7a,
+ DBG_BLOCK_ID_TCP_RESERVED3 = 0x7b,
+ DBG_BLOCK_ID_TCP_RESERVED4 = 0x7c,
+ DBG_BLOCK_ID_TCP_RESERVED5 = 0x7d,
+ DBG_BLOCK_ID_TCP_RESERVED6 = 0x7e,
+ DBG_BLOCK_ID_TCP_RESERVED7 = 0x7f,
+ DBG_BLOCK_ID_DB00 = 0x80,
+ DBG_BLOCK_ID_DB01 = 0x81,
+ DBG_BLOCK_ID_DB02 = 0x82,
+ DBG_BLOCK_ID_DB03 = 0x83,
+ DBG_BLOCK_ID_DB04 = 0x84,
+ DBG_BLOCK_ID_UNUSED27 = 0x85,
+ DBG_BLOCK_ID_UNUSED28 = 0x86,
+ DBG_BLOCK_ID_UNUSED29 = 0x87,
+ DBG_BLOCK_ID_DB10 = 0x88,
+ DBG_BLOCK_ID_DB11 = 0x89,
+ DBG_BLOCK_ID_DB12 = 0x8a,
+ DBG_BLOCK_ID_DB13 = 0x8b,
+ DBG_BLOCK_ID_DB14 = 0x8c,
+ DBG_BLOCK_ID_UNUSED30 = 0x8d,
+ DBG_BLOCK_ID_UNUSED31 = 0x8e,
+ DBG_BLOCK_ID_UNUSED32 = 0x8f,
+ DBG_BLOCK_ID_TCC0 = 0x90,
+ DBG_BLOCK_ID_TCC1 = 0x91,
+ DBG_BLOCK_ID_TCC2 = 0x92,
+ DBG_BLOCK_ID_TCC3 = 0x93,
+ DBG_BLOCK_ID_TCC4 = 0x94,
+ DBG_BLOCK_ID_TCC5 = 0x95,
+ DBG_BLOCK_ID_TCC6 = 0x96,
+ DBG_BLOCK_ID_TCC7 = 0x97,
+ DBG_BLOCK_ID_SPS00 = 0x98,
+ DBG_BLOCK_ID_SPS01 = 0x99,
+ DBG_BLOCK_ID_SPS02 = 0x9a,
+ DBG_BLOCK_ID_SPS10 = 0x9b,
+ DBG_BLOCK_ID_SPS11 = 0x9c,
+ DBG_BLOCK_ID_SPS12 = 0x9d,
+ DBG_BLOCK_ID_UNUSED33 = 0x9e,
+ DBG_BLOCK_ID_UNUSED34 = 0x9f,
+ DBG_BLOCK_ID_TA00 = 0xa0,
+ DBG_BLOCK_ID_TA01 = 0xa1,
+ DBG_BLOCK_ID_TA02 = 0xa2,
+ DBG_BLOCK_ID_TA03 = 0xa3,
+ DBG_BLOCK_ID_TA04 = 0xa4,
+ DBG_BLOCK_ID_TA05 = 0xa5,
+ DBG_BLOCK_ID_TA06 = 0xa6,
+ DBG_BLOCK_ID_TA07 = 0xa7,
+ DBG_BLOCK_ID_TA08 = 0xa8,
+ DBG_BLOCK_ID_TA09 = 0xa9,
+ DBG_BLOCK_ID_TA0A = 0xaa,
+ DBG_BLOCK_ID_TA0B = 0xab,
+ DBG_BLOCK_ID_UNUSED35 = 0xac,
+ DBG_BLOCK_ID_UNUSED36 = 0xad,
+ DBG_BLOCK_ID_UNUSED37 = 0xae,
+ DBG_BLOCK_ID_UNUSED38 = 0xaf,
+ DBG_BLOCK_ID_TA10 = 0xb0,
+ DBG_BLOCK_ID_TA11 = 0xb1,
+ DBG_BLOCK_ID_TA12 = 0xb2,
+ DBG_BLOCK_ID_TA13 = 0xb3,
+ DBG_BLOCK_ID_TA14 = 0xb4,
+ DBG_BLOCK_ID_TA15 = 0xb5,
+ DBG_BLOCK_ID_TA16 = 0xb6,
+ DBG_BLOCK_ID_TA17 = 0xb7,
+ DBG_BLOCK_ID_TA18 = 0xb8,
+ DBG_BLOCK_ID_TA19 = 0xb9,
+ DBG_BLOCK_ID_TA1A = 0xba,
+ DBG_BLOCK_ID_TA1B = 0xbb,
+ DBG_BLOCK_ID_UNUSED39 = 0xbc,
+ DBG_BLOCK_ID_UNUSED40 = 0xbd,
+ DBG_BLOCK_ID_UNUSED41 = 0xbe,
+ DBG_BLOCK_ID_UNUSED42 = 0xbf,
+ DBG_BLOCK_ID_TD00 = 0xc0,
+ DBG_BLOCK_ID_TD01 = 0xc1,
+ DBG_BLOCK_ID_TD02 = 0xc2,
+ DBG_BLOCK_ID_TD03 = 0xc3,
+ DBG_BLOCK_ID_TD04 = 0xc4,
+ DBG_BLOCK_ID_TD05 = 0xc5,
+ DBG_BLOCK_ID_TD06 = 0xc6,
+ DBG_BLOCK_ID_TD07 = 0xc7,
+ DBG_BLOCK_ID_TD08 = 0xc8,
+ DBG_BLOCK_ID_TD09 = 0xc9,
+ DBG_BLOCK_ID_TD0A = 0xca,
+ DBG_BLOCK_ID_TD0B = 0xcb,
+ DBG_BLOCK_ID_UNUSED43 = 0xcc,
+ DBG_BLOCK_ID_UNUSED44 = 0xcd,
+ DBG_BLOCK_ID_UNUSED45 = 0xce,
+ DBG_BLOCK_ID_UNUSED46 = 0xcf,
+ DBG_BLOCK_ID_TD10 = 0xd0,
+ DBG_BLOCK_ID_TD11 = 0xd1,
+ DBG_BLOCK_ID_TD12 = 0xd2,
+ DBG_BLOCK_ID_TD13 = 0xd3,
+ DBG_BLOCK_ID_TD14 = 0xd4,
+ DBG_BLOCK_ID_TD15 = 0xd5,
+ DBG_BLOCK_ID_TD16 = 0xd6,
+ DBG_BLOCK_ID_TD17 = 0xd7,
+ DBG_BLOCK_ID_TD18 = 0xd8,
+ DBG_BLOCK_ID_TD19 = 0xd9,
+ DBG_BLOCK_ID_TD1A = 0xda,
+ DBG_BLOCK_ID_TD1B = 0xdb,
+ DBG_BLOCK_ID_UNUSED47 = 0xdc,
+ DBG_BLOCK_ID_UNUSED48 = 0xdd,
+ DBG_BLOCK_ID_UNUSED49 = 0xde,
+ DBG_BLOCK_ID_UNUSED50 = 0xdf,
+ DBG_BLOCK_ID_MCD0 = 0xe0,
+ DBG_BLOCK_ID_MCD1 = 0xe1,
+ DBG_BLOCK_ID_MCD2 = 0xe2,
+ DBG_BLOCK_ID_MCD3 = 0xe3,
+ DBG_BLOCK_ID_MCD4 = 0xe4,
+ DBG_BLOCK_ID_MCD5 = 0xe5,
+ DBG_BLOCK_ID_UNUSED51 = 0xe6,
+ DBG_BLOCK_ID_UNUSED52 = 0xe7,
+} DebugBlockId_OLD;
+typedef enum DebugBlockId_BY2 {
+ DBG_BLOCK_ID_RESERVED_BY2 = 0x0,
+ DBG_BLOCK_ID_VMC_BY2 = 0x1,
+ DBG_BLOCK_ID_CG_BY2 = 0x2,
+ DBG_BLOCK_ID_GRBM_BY2 = 0x3,
+ DBG_BLOCK_ID_CSC_BY2 = 0x4,
+ DBG_BLOCK_ID_IH_BY2 = 0x5,
+ DBG_BLOCK_ID_SQ_BY2 = 0x6,
+ DBG_BLOCK_ID_GMCON_BY2 = 0x7,
+ DBG_BLOCK_ID_DMA0_BY2 = 0x8,
+ DBG_BLOCK_ID_SPIM_BY2 = 0x9,
+ DBG_BLOCK_ID_SPIS_BY2 = 0xa,
+ DBG_BLOCK_ID_PA0_BY2 = 0xb,
+ DBG_BLOCK_ID_CP0_BY2 = 0xc,
+ DBG_BLOCK_ID_CP2_BY2 = 0xd,
+ DBG_BLOCK_ID_UVDU_BY2 = 0xe,
+ DBG_BLOCK_ID_VCE_BY2 = 0xf,
+ DBG_BLOCK_ID_VGT0_BY2 = 0x10,
+ DBG_BLOCK_ID_IA_BY2 = 0x11,
+ DBG_BLOCK_ID_SCT0_BY2 = 0x12,
+ DBG_BLOCK_ID_SPM0_BY2 = 0x13,
+ DBG_BLOCK_ID_TCAA_BY2 = 0x14,
+ DBG_BLOCK_ID_TCCA_BY2 = 0x15,
+ DBG_BLOCK_ID_MCC0_BY2 = 0x16,
+ DBG_BLOCK_ID_MCC2_BY2 = 0x17,
+ DBG_BLOCK_ID_SX0_BY2 = 0x18,
+ DBG_BLOCK_ID_SX2_BY2 = 0x19,
+ DBG_BLOCK_ID_UNUSED4_BY2 = 0x1a,
+ DBG_BLOCK_ID_UNUSED6_BY2 = 0x1b,
+ DBG_BLOCK_ID_PC0_BY2 = 0x1c,
+ DBG_BLOCK_ID_UNUSED8_BY2 = 0x1d,
+ DBG_BLOCK_ID_UNUSED10_BY2 = 0x1e,
+ DBG_BLOCK_ID_MCB_BY2 = 0x1f,
+ DBG_BLOCK_ID_SCB0_BY2 = 0x20,
+ DBG_BLOCK_ID_UNUSED13_BY2 = 0x21,
+ DBG_BLOCK_ID_SCF0_BY2 = 0x22,
+ DBG_BLOCK_ID_UNUSED15_BY2 = 0x23,
+ DBG_BLOCK_ID_BCI0_BY2 = 0x24,
+ DBG_BLOCK_ID_BCI2_BY2 = 0x25,
+ DBG_BLOCK_ID_UNUSED17_BY2 = 0x26,
+ DBG_BLOCK_ID_UNUSED19_BY2 = 0x27,
+ DBG_BLOCK_ID_CB00_BY2 = 0x28,
+ DBG_BLOCK_ID_CB02_BY2 = 0x29,
+ DBG_BLOCK_ID_CB04_BY2 = 0x2a,
+ DBG_BLOCK_ID_UNUSED22_BY2 = 0x2b,
+ DBG_BLOCK_ID_CB10_BY2 = 0x2c,
+ DBG_BLOCK_ID_CB12_BY2 = 0x2d,
+ DBG_BLOCK_ID_CB14_BY2 = 0x2e,
+ DBG_BLOCK_ID_UNUSED25_BY2 = 0x2f,
+ DBG_BLOCK_ID_TCP0_BY2 = 0x30,
+ DBG_BLOCK_ID_TCP2_BY2 = 0x31,
+ DBG_BLOCK_ID_TCP4_BY2 = 0x32,
+ DBG_BLOCK_ID_TCP6_BY2 = 0x33,
+ DBG_BLOCK_ID_TCP8_BY2 = 0x34,
+ DBG_BLOCK_ID_TCP10_BY2 = 0x35,
+ DBG_BLOCK_ID_TCP12_BY2 = 0x36,
+ DBG_BLOCK_ID_TCP14_BY2 = 0x37,
+ DBG_BLOCK_ID_TCP16_BY2 = 0x38,
+ DBG_BLOCK_ID_TCP18_BY2 = 0x39,
+ DBG_BLOCK_ID_TCP20_BY2 = 0x3a,
+ DBG_BLOCK_ID_TCP22_BY2 = 0x3b,
+ DBG_BLOCK_ID_TCP_RESERVED0_BY2 = 0x3c,
+ DBG_BLOCK_ID_TCP_RESERVED2_BY2 = 0x3d,
+ DBG_BLOCK_ID_TCP_RESERVED4_BY2 = 0x3e,
+ DBG_BLOCK_ID_TCP_RESERVED6_BY2 = 0x3f,
+ DBG_BLOCK_ID_DB00_BY2 = 0x40,
+ DBG_BLOCK_ID_DB02_BY2 = 0x41,
+ DBG_BLOCK_ID_DB04_BY2 = 0x42,
+ DBG_BLOCK_ID_UNUSED28_BY2 = 0x43,
+ DBG_BLOCK_ID_DB10_BY2 = 0x44,
+ DBG_BLOCK_ID_DB12_BY2 = 0x45,
+ DBG_BLOCK_ID_DB14_BY2 = 0x46,
+ DBG_BLOCK_ID_UNUSED31_BY2 = 0x47,
+ DBG_BLOCK_ID_TCC0_BY2 = 0x48,
+ DBG_BLOCK_ID_TCC2_BY2 = 0x49,
+ DBG_BLOCK_ID_TCC4_BY2 = 0x4a,
+ DBG_BLOCK_ID_TCC6_BY2 = 0x4b,
+ DBG_BLOCK_ID_SPS00_BY2 = 0x4c,
+ DBG_BLOCK_ID_SPS02_BY2 = 0x4d,
+ DBG_BLOCK_ID_SPS11_BY2 = 0x4e,
+ DBG_BLOCK_ID_UNUSED33_BY2 = 0x4f,
+ DBG_BLOCK_ID_TA00_BY2 = 0x50,
+ DBG_BLOCK_ID_TA02_BY2 = 0x51,
+ DBG_BLOCK_ID_TA04_BY2 = 0x52,
+ DBG_BLOCK_ID_TA06_BY2 = 0x53,
+ DBG_BLOCK_ID_TA08_BY2 = 0x54,
+ DBG_BLOCK_ID_TA0A_BY2 = 0x55,
+ DBG_BLOCK_ID_UNUSED35_BY2 = 0x56,
+ DBG_BLOCK_ID_UNUSED37_BY2 = 0x57,
+ DBG_BLOCK_ID_TA10_BY2 = 0x58,
+ DBG_BLOCK_ID_TA12_BY2 = 0x59,
+ DBG_BLOCK_ID_TA14_BY2 = 0x5a,
+ DBG_BLOCK_ID_TA16_BY2 = 0x5b,
+ DBG_BLOCK_ID_TA18_BY2 = 0x5c,
+ DBG_BLOCK_ID_TA1A_BY2 = 0x5d,
+ DBG_BLOCK_ID_UNUSED39_BY2 = 0x5e,
+ DBG_BLOCK_ID_UNUSED41_BY2 = 0x5f,
+ DBG_BLOCK_ID_TD00_BY2 = 0x60,
+ DBG_BLOCK_ID_TD02_BY2 = 0x61,
+ DBG_BLOCK_ID_TD04_BY2 = 0x62,
+ DBG_BLOCK_ID_TD06_BY2 = 0x63,
+ DBG_BLOCK_ID_TD08_BY2 = 0x64,
+ DBG_BLOCK_ID_TD0A_BY2 = 0x65,
+ DBG_BLOCK_ID_UNUSED43_BY2 = 0x66,
+ DBG_BLOCK_ID_UNUSED45_BY2 = 0x67,
+ DBG_BLOCK_ID_TD10_BY2 = 0x68,
+ DBG_BLOCK_ID_TD12_BY2 = 0x69,
+ DBG_BLOCK_ID_TD14_BY2 = 0x6a,
+ DBG_BLOCK_ID_TD16_BY2 = 0x6b,
+ DBG_BLOCK_ID_TD18_BY2 = 0x6c,
+ DBG_BLOCK_ID_TD1A_BY2 = 0x6d,
+ DBG_BLOCK_ID_UNUSED47_BY2 = 0x6e,
+ DBG_BLOCK_ID_UNUSED49_BY2 = 0x6f,
+ DBG_BLOCK_ID_MCD0_BY2 = 0x70,
+ DBG_BLOCK_ID_MCD2_BY2 = 0x71,
+ DBG_BLOCK_ID_MCD4_BY2 = 0x72,
+ DBG_BLOCK_ID_UNUSED51_BY2 = 0x73,
+} DebugBlockId_BY2;
+typedef enum DebugBlockId_BY4 {
+ DBG_BLOCK_ID_RESERVED_BY4 = 0x0,
+ DBG_BLOCK_ID_CG_BY4 = 0x1,
+ DBG_BLOCK_ID_CSC_BY4 = 0x2,
+ DBG_BLOCK_ID_SQ_BY4 = 0x3,
+ DBG_BLOCK_ID_DMA0_BY4 = 0x4,
+ DBG_BLOCK_ID_SPIS_BY4 = 0x5,
+ DBG_BLOCK_ID_CP0_BY4 = 0x6,
+ DBG_BLOCK_ID_UVDU_BY4 = 0x7,
+ DBG_BLOCK_ID_VGT0_BY4 = 0x8,
+ DBG_BLOCK_ID_SCT0_BY4 = 0x9,
+ DBG_BLOCK_ID_TCAA_BY4 = 0xa,
+ DBG_BLOCK_ID_MCC0_BY4 = 0xb,
+ DBG_BLOCK_ID_SX0_BY4 = 0xc,
+ DBG_BLOCK_ID_UNUSED4_BY4 = 0xd,
+ DBG_BLOCK_ID_PC0_BY4 = 0xe,
+ DBG_BLOCK_ID_UNUSED10_BY4 = 0xf,
+ DBG_BLOCK_ID_SCB0_BY4 = 0x10,
+ DBG_BLOCK_ID_SCF0_BY4 = 0x11,
+ DBG_BLOCK_ID_BCI0_BY4 = 0x12,
+ DBG_BLOCK_ID_UNUSED17_BY4 = 0x13,
+ DBG_BLOCK_ID_CB00_BY4 = 0x14,
+ DBG_BLOCK_ID_CB04_BY4 = 0x15,
+ DBG_BLOCK_ID_CB10_BY4 = 0x16,
+ DBG_BLOCK_ID_CB14_BY4 = 0x17,
+ DBG_BLOCK_ID_TCP0_BY4 = 0x18,
+ DBG_BLOCK_ID_TCP4_BY4 = 0x19,
+ DBG_BLOCK_ID_TCP8_BY4 = 0x1a,
+ DBG_BLOCK_ID_TCP12_BY4 = 0x1b,
+ DBG_BLOCK_ID_TCP16_BY4 = 0x1c,
+ DBG_BLOCK_ID_TCP20_BY4 = 0x1d,
+ DBG_BLOCK_ID_TCP_RESERVED0_BY4 = 0x1e,
+ DBG_BLOCK_ID_TCP_RESERVED4_BY4 = 0x1f,
+ DBG_BLOCK_ID_DB_BY4 = 0x20,
+ DBG_BLOCK_ID_DB04_BY4 = 0x21,
+ DBG_BLOCK_ID_DB10_BY4 = 0x22,
+ DBG_BLOCK_ID_DB14_BY4 = 0x23,
+ DBG_BLOCK_ID_TCC0_BY4 = 0x24,
+ DBG_BLOCK_ID_TCC4_BY4 = 0x25,
+ DBG_BLOCK_ID_SPS00_BY4 = 0x26,
+ DBG_BLOCK_ID_SPS11_BY4 = 0x27,
+ DBG_BLOCK_ID_TA00_BY4 = 0x28,
+ DBG_BLOCK_ID_TA04_BY4 = 0x29,
+ DBG_BLOCK_ID_TA08_BY4 = 0x2a,
+ DBG_BLOCK_ID_UNUSED35_BY4 = 0x2b,
+ DBG_BLOCK_ID_TA10_BY4 = 0x2c,
+ DBG_BLOCK_ID_TA14_BY4 = 0x2d,
+ DBG_BLOCK_ID_TA18_BY4 = 0x2e,
+ DBG_BLOCK_ID_UNUSED39_BY4 = 0x2f,
+ DBG_BLOCK_ID_TD00_BY4 = 0x30,
+ DBG_BLOCK_ID_TD04_BY4 = 0x31,
+ DBG_BLOCK_ID_TD08_BY4 = 0x32,
+ DBG_BLOCK_ID_UNUSED43_BY4 = 0x33,
+ DBG_BLOCK_ID_TD10_BY4 = 0x34,
+ DBG_BLOCK_ID_TD14_BY4 = 0x35,
+ DBG_BLOCK_ID_TD18_BY4 = 0x36,
+ DBG_BLOCK_ID_UNUSED47_BY4 = 0x37,
+ DBG_BLOCK_ID_MCD0_BY4 = 0x38,
+ DBG_BLOCK_ID_MCD4_BY4 = 0x39,
+} DebugBlockId_BY4;
+typedef enum DebugBlockId_BY8 {
+ DBG_BLOCK_ID_RESERVED_BY8 = 0x0,
+ DBG_BLOCK_ID_CSC_BY8 = 0x1,
+ DBG_BLOCK_ID_DMA0_BY8 = 0x2,
+ DBG_BLOCK_ID_CP0_BY8 = 0x3,
+ DBG_BLOCK_ID_VGT0_BY8 = 0x4,
+ DBG_BLOCK_ID_TCAA_BY8 = 0x5,
+ DBG_BLOCK_ID_SX0_BY8 = 0x6,
+ DBG_BLOCK_ID_PC0_BY8 = 0x7,
+ DBG_BLOCK_ID_SCB0_BY8 = 0x8,
+ DBG_BLOCK_ID_BCI0_BY8 = 0x9,
+ DBG_BLOCK_ID_CB00_BY8 = 0xa,
+ DBG_BLOCK_ID_CB10_BY8 = 0xb,
+ DBG_BLOCK_ID_TCP0_BY8 = 0xc,
+ DBG_BLOCK_ID_TCP8_BY8 = 0xd,
+ DBG_BLOCK_ID_TCP16_BY8 = 0xe,
+ DBG_BLOCK_ID_TCP_RESERVED0_BY8 = 0xf,
+ DBG_BLOCK_ID_DB00_BY8 = 0x10,
+ DBG_BLOCK_ID_DB10_BY8 = 0x11,
+ DBG_BLOCK_ID_TCC0_BY8 = 0x12,
+ DBG_BLOCK_ID_SPS00_BY8 = 0x13,
+ DBG_BLOCK_ID_TA00_BY8 = 0x14,
+ DBG_BLOCK_ID_TA08_BY8 = 0x15,
+ DBG_BLOCK_ID_TA10_BY8 = 0x16,
+ DBG_BLOCK_ID_TA18_BY8 = 0x17,
+ DBG_BLOCK_ID_TD00_BY8 = 0x18,
+ DBG_BLOCK_ID_TD08_BY8 = 0x19,
+ DBG_BLOCK_ID_TD10_BY8 = 0x1a,
+ DBG_BLOCK_ID_TD18_BY8 = 0x1b,
+ DBG_BLOCK_ID_MCD0_BY8 = 0x1c,
+} DebugBlockId_BY8;
+typedef enum DebugBlockId_BY16 {
+ DBG_BLOCK_ID_RESERVED_BY16 = 0x0,
+ DBG_BLOCK_ID_DMA0_BY16 = 0x1,
+ DBG_BLOCK_ID_VGT0_BY16 = 0x2,
+ DBG_BLOCK_ID_SX0_BY16 = 0x3,
+ DBG_BLOCK_ID_SCB0_BY16 = 0x4,
+ DBG_BLOCK_ID_CB00_BY16 = 0x5,
+ DBG_BLOCK_ID_TCP0_BY16 = 0x6,
+ DBG_BLOCK_ID_TCP16_BY16 = 0x7,
+ DBG_BLOCK_ID_DB00_BY16 = 0x8,
+ DBG_BLOCK_ID_TCC0_BY16 = 0x9,
+ DBG_BLOCK_ID_TA00_BY16 = 0xa,
+ DBG_BLOCK_ID_TA10_BY16 = 0xb,
+ DBG_BLOCK_ID_TD00_BY16 = 0xc,
+ DBG_BLOCK_ID_TD10_BY16 = 0xd,
+ DBG_BLOCK_ID_MCD0_BY16 = 0xe,
+} DebugBlockId_BY16;
+typedef enum CompareRef {
+ REF_NEVER = 0x0,
+ REF_LESS = 0x1,
+ REF_EQUAL = 0x2,
+ REF_LEQUAL = 0x3,
+ REF_GREATER = 0x4,
+ REF_NOTEQUAL = 0x5,
+ REF_GEQUAL = 0x6,
+ REF_ALWAYS = 0x7,
+} CompareRef;
+typedef enum ReadSize {
+ READ_256_BITS = 0x0,
+ READ_512_BITS = 0x1,
+} ReadSize;
+typedef enum DepthFormat {
+ DEPTH_INVALID = 0x0,
+ DEPTH_16 = 0x1,
+ DEPTH_X8_24 = 0x2,
+ DEPTH_8_24 = 0x3,
+ DEPTH_X8_24_FLOAT = 0x4,
+ DEPTH_8_24_FLOAT = 0x5,
+ DEPTH_32_FLOAT = 0x6,
+ DEPTH_X24_8_32_FLOAT = 0x7,
+} DepthFormat;
+typedef enum ZFormat {
+ Z_INVALID = 0x0,
+ Z_16 = 0x1,
+ Z_24 = 0x2,
+ Z_32_FLOAT = 0x3,
+} ZFormat;
+typedef enum StencilFormat {
+ STENCIL_INVALID = 0x0,
+ STENCIL_8 = 0x1,
+} StencilFormat;
+typedef enum CmaskMode {
+ CMASK_CLEAR_NONE = 0x0,
+ CMASK_CLEAR_ONE = 0x1,
+ CMASK_CLEAR_ALL = 0x2,
+ CMASK_ANY_EXPANDED = 0x3,
+ CMASK_ALPHA0_FRAG1 = 0x4,
+ CMASK_ALPHA0_FRAG2 = 0x5,
+ CMASK_ALPHA0_FRAG4 = 0x6,
+ CMASK_ALPHA0_FRAGS = 0x7,
+ CMASK_ALPHA1_FRAG1 = 0x8,
+ CMASK_ALPHA1_FRAG2 = 0x9,
+ CMASK_ALPHA1_FRAG4 = 0xa,
+ CMASK_ALPHA1_FRAGS = 0xb,
+ CMASK_ALPHAX_FRAG1 = 0xc,
+ CMASK_ALPHAX_FRAG2 = 0xd,
+ CMASK_ALPHAX_FRAG4 = 0xe,
+ CMASK_ALPHAX_FRAGS = 0xf,
+} CmaskMode;
+typedef enum QuadExportFormat {
+ EXPORT_UNUSED = 0x0,
+ EXPORT_32_R = 0x1,
+ EXPORT_32_GR = 0x2,
+ EXPORT_32_AR = 0x3,
+ EXPORT_FP16_ABGR = 0x4,
+ EXPORT_UNSIGNED16_ABGR = 0x5,
+ EXPORT_SIGNED16_ABGR = 0x6,
+ EXPORT_32_ABGR = 0x7,
+} QuadExportFormat;
+typedef enum QuadExportFormatOld {
+ EXPORT_4P_32BPC_ABGR = 0x0,
+ EXPORT_4P_16BPC_ABGR = 0x1,
+ EXPORT_4P_32BPC_GR = 0x2,
+ EXPORT_4P_32BPC_AR = 0x3,
+ EXPORT_2P_32BPC_ABGR = 0x4,
+ EXPORT_8P_32BPC_R = 0x5,
+} QuadExportFormatOld;
+typedef enum ColorFormat {
+ COLOR_INVALID = 0x0,
+ COLOR_8 = 0x1,
+ COLOR_16 = 0x2,
+ COLOR_8_8 = 0x3,
+ COLOR_32 = 0x4,
+ COLOR_16_16 = 0x5,
+ COLOR_10_11_11 = 0x6,
+ COLOR_11_11_10 = 0x7,
+ COLOR_10_10_10_2 = 0x8,
+ COLOR_2_10_10_10 = 0x9,
+ COLOR_8_8_8_8 = 0xa,
+ COLOR_32_32 = 0xb,
+ COLOR_16_16_16_16 = 0xc,
+ COLOR_RESERVED_13 = 0xd,
+ COLOR_32_32_32_32 = 0xe,
+ COLOR_RESERVED_15 = 0xf,
+ COLOR_5_6_5 = 0x10,
+ COLOR_1_5_5_5 = 0x11,
+ COLOR_5_5_5_1 = 0x12,
+ COLOR_4_4_4_4 = 0x13,
+ COLOR_8_24 = 0x14,
+ COLOR_24_8 = 0x15,
+ COLOR_X24_8_32_FLOAT = 0x16,
+ COLOR_RESERVED_23 = 0x17,
+} ColorFormat;
+typedef enum SurfaceFormat {
+ FMT_INVALID = 0x0,
+ FMT_8 = 0x1,
+ FMT_16 = 0x2,
+ FMT_8_8 = 0x3,
+ FMT_32 = 0x4,
+ FMT_16_16 = 0x5,
+ FMT_10_11_11 = 0x6,
+ FMT_11_11_10 = 0x7,
+ FMT_10_10_10_2 = 0x8,
+ FMT_2_10_10_10 = 0x9,
+ FMT_8_8_8_8 = 0xa,
+ FMT_32_32 = 0xb,
+ FMT_16_16_16_16 = 0xc,
+ FMT_32_32_32 = 0xd,
+ FMT_32_32_32_32 = 0xe,
+ FMT_RESERVED_4 = 0xf,
+ FMT_5_6_5 = 0x10,
+ FMT_1_5_5_5 = 0x11,
+ FMT_5_5_5_1 = 0x12,
+ FMT_4_4_4_4 = 0x13,
+ FMT_8_24 = 0x14,
+ FMT_24_8 = 0x15,
+ FMT_X24_8_32_FLOAT = 0x16,
+ FMT_RESERVED_33 = 0x17,
+ FMT_11_11_10_FLOAT = 0x18,
+ FMT_16_FLOAT = 0x19,
+ FMT_32_FLOAT = 0x1a,
+ FMT_16_16_FLOAT = 0x1b,
+ FMT_8_24_FLOAT = 0x1c,
+ FMT_24_8_FLOAT = 0x1d,
+ FMT_32_32_FLOAT = 0x1e,
+ FMT_10_11_11_FLOAT = 0x1f,
+ FMT_16_16_16_16_FLOAT = 0x20,
+ FMT_3_3_2 = 0x21,
+ FMT_6_5_5 = 0x22,
+ FMT_32_32_32_32_FLOAT = 0x23,
+ FMT_RESERVED_36 = 0x24,
+ FMT_1 = 0x25,
+ FMT_1_REVERSED = 0x26,
+ FMT_GB_GR = 0x27,
+ FMT_BG_RG = 0x28,
+ FMT_32_AS_8 = 0x29,
+ FMT_32_AS_8_8 = 0x2a,
+ FMT_5_9_9_9_SHAREDEXP = 0x2b,
+ FMT_8_8_8 = 0x2c,
+ FMT_16_16_16 = 0x2d,
+ FMT_16_16_16_FLOAT = 0x2e,
+ FMT_4_4 = 0x2f,
+ FMT_32_32_32_FLOAT = 0x30,
+ FMT_BC1 = 0x31,
+ FMT_BC2 = 0x32,
+ FMT_BC3 = 0x33,
+ FMT_BC4 = 0x34,
+ FMT_BC5 = 0x35,
+ FMT_BC6 = 0x36,
+ FMT_BC7 = 0x37,
+ FMT_32_AS_32_32_32_32 = 0x38,
+ FMT_APC3 = 0x39,
+ FMT_APC4 = 0x3a,
+ FMT_APC5 = 0x3b,
+ FMT_APC6 = 0x3c,
+ FMT_APC7 = 0x3d,
+ FMT_CTX1 = 0x3e,
+ FMT_RESERVED_63 = 0x3f,
+} SurfaceFormat;
+typedef enum BUF_DATA_FORMAT {
+ BUF_DATA_FORMAT_INVALID = 0x0,
+ BUF_DATA_FORMAT_8 = 0x1,
+ BUF_DATA_FORMAT_16 = 0x2,
+ BUF_DATA_FORMAT_8_8 = 0x3,
+ BUF_DATA_FORMAT_32 = 0x4,
+ BUF_DATA_FORMAT_16_16 = 0x5,
+ BUF_DATA_FORMAT_10_11_11 = 0x6,
+ BUF_DATA_FORMAT_11_11_10 = 0x7,
+ BUF_DATA_FORMAT_10_10_10_2 = 0x8,
+ BUF_DATA_FORMAT_2_10_10_10 = 0x9,
+ BUF_DATA_FORMAT_8_8_8_8 = 0xa,
+ BUF_DATA_FORMAT_32_32 = 0xb,
+ BUF_DATA_FORMAT_16_16_16_16 = 0xc,
+ BUF_DATA_FORMAT_32_32_32 = 0xd,
+ BUF_DATA_FORMAT_32_32_32_32 = 0xe,
+ BUF_DATA_FORMAT_RESERVED_15 = 0xf,
+} BUF_DATA_FORMAT;
+typedef enum IMG_DATA_FORMAT {
+ IMG_DATA_FORMAT_INVALID = 0x0,
+ IMG_DATA_FORMAT_8 = 0x1,
+ IMG_DATA_FORMAT_16 = 0x2,
+ IMG_DATA_FORMAT_8_8 = 0x3,
+ IMG_DATA_FORMAT_32 = 0x4,
+ IMG_DATA_FORMAT_16_16 = 0x5,
+ IMG_DATA_FORMAT_10_11_11 = 0x6,
+ IMG_DATA_FORMAT_11_11_10 = 0x7,
+ IMG_DATA_FORMAT_10_10_10_2 = 0x8,
+ IMG_DATA_FORMAT_2_10_10_10 = 0x9,
+ IMG_DATA_FORMAT_8_8_8_8 = 0xa,
+ IMG_DATA_FORMAT_32_32 = 0xb,
+ IMG_DATA_FORMAT_16_16_16_16 = 0xc,
+ IMG_DATA_FORMAT_32_32_32 = 0xd,
+ IMG_DATA_FORMAT_32_32_32_32 = 0xe,
+ IMG_DATA_FORMAT_RESERVED_15 = 0xf,
+ IMG_DATA_FORMAT_5_6_5 = 0x10,
+ IMG_DATA_FORMAT_1_5_5_5 = 0x11,
+ IMG_DATA_FORMAT_5_5_5_1 = 0x12,
+ IMG_DATA_FORMAT_4_4_4_4 = 0x13,
+ IMG_DATA_FORMAT_8_24 = 0x14,
+ IMG_DATA_FORMAT_24_8 = 0x15,
+ IMG_DATA_FORMAT_X24_8_32 = 0x16,
+ IMG_DATA_FORMAT_RESERVED_23 = 0x17,
+ IMG_DATA_FORMAT_RESERVED_24 = 0x18,
+ IMG_DATA_FORMAT_RESERVED_25 = 0x19,
+ IMG_DATA_FORMAT_RESERVED_26 = 0x1a,
+ IMG_DATA_FORMAT_RESERVED_27 = 0x1b,
+ IMG_DATA_FORMAT_RESERVED_28 = 0x1c,
+ IMG_DATA_FORMAT_RESERVED_29 = 0x1d,
+ IMG_DATA_FORMAT_RESERVED_30 = 0x1e,
+ IMG_DATA_FORMAT_RESERVED_31 = 0x1f,
+ IMG_DATA_FORMAT_GB_GR = 0x20,
+ IMG_DATA_FORMAT_BG_RG = 0x21,
+ IMG_DATA_FORMAT_5_9_9_9 = 0x22,
+ IMG_DATA_FORMAT_BC1 = 0x23,
+ IMG_DATA_FORMAT_BC2 = 0x24,
+ IMG_DATA_FORMAT_BC3 = 0x25,
+ IMG_DATA_FORMAT_BC4 = 0x26,
+ IMG_DATA_FORMAT_BC5 = 0x27,
+ IMG_DATA_FORMAT_BC6 = 0x28,
+ IMG_DATA_FORMAT_BC7 = 0x29,
+ IMG_DATA_FORMAT_RESERVED_42 = 0x2a,
+ IMG_DATA_FORMAT_RESERVED_43 = 0x2b,
+ IMG_DATA_FORMAT_FMASK8_S2_F1 = 0x2c,
+ IMG_DATA_FORMAT_FMASK8_S4_F1 = 0x2d,
+ IMG_DATA_FORMAT_FMASK8_S8_F1 = 0x2e,
+ IMG_DATA_FORMAT_FMASK8_S2_F2 = 0x2f,
+ IMG_DATA_FORMAT_FMASK8_S4_F2 = 0x30,
+ IMG_DATA_FORMAT_FMASK8_S4_F4 = 0x31,
+ IMG_DATA_FORMAT_FMASK16_S16_F1 = 0x32,
+ IMG_DATA_FORMAT_FMASK16_S8_F2 = 0x33,
+ IMG_DATA_FORMAT_FMASK32_S16_F2 = 0x34,
+ IMG_DATA_FORMAT_FMASK32_S8_F4 = 0x35,
+ IMG_DATA_FORMAT_FMASK32_S8_F8 = 0x36,
+ IMG_DATA_FORMAT_FMASK64_S16_F4 = 0x37,
+ IMG_DATA_FORMAT_FMASK64_S16_F8 = 0x38,
+ IMG_DATA_FORMAT_4_4 = 0x39,
+ IMG_DATA_FORMAT_6_5_5 = 0x3a,
+ IMG_DATA_FORMAT_1 = 0x3b,
+ IMG_DATA_FORMAT_1_REVERSED = 0x3c,
+ IMG_DATA_FORMAT_32_AS_8 = 0x3d,
+ IMG_DATA_FORMAT_32_AS_8_8 = 0x3e,
+ IMG_DATA_FORMAT_32_AS_32_32_32_32 = 0x3f,
+} IMG_DATA_FORMAT;
+typedef enum BUF_NUM_FORMAT {
+ BUF_NUM_FORMAT_UNORM = 0x0,
+ BUF_NUM_FORMAT_SNORM = 0x1,
+ BUF_NUM_FORMAT_USCALED = 0x2,
+ BUF_NUM_FORMAT_SSCALED = 0x3,
+ BUF_NUM_FORMAT_UINT = 0x4,
+ BUF_NUM_FORMAT_SINT = 0x5,
+ BUF_NUM_FORMAT_SNORM_OGL = 0x6,
+ BUF_NUM_FORMAT_FLOAT = 0x7,
+} BUF_NUM_FORMAT;
+typedef enum IMG_NUM_FORMAT {
+ IMG_NUM_FORMAT_UNORM = 0x0,
+ IMG_NUM_FORMAT_SNORM = 0x1,
+ IMG_NUM_FORMAT_USCALED = 0x2,
+ IMG_NUM_FORMAT_SSCALED = 0x3,
+ IMG_NUM_FORMAT_UINT = 0x4,
+ IMG_NUM_FORMAT_SINT = 0x5,
+ IMG_NUM_FORMAT_SNORM_OGL = 0x6,
+ IMG_NUM_FORMAT_FLOAT = 0x7,
+ IMG_NUM_FORMAT_RESERVED_8 = 0x8,
+ IMG_NUM_FORMAT_SRGB = 0x9,
+ IMG_NUM_FORMAT_UBNORM = 0xa,
+ IMG_NUM_FORMAT_UBNORM_OGL = 0xb,
+ IMG_NUM_FORMAT_UBINT = 0xc,
+ IMG_NUM_FORMAT_UBSCALED = 0xd,
+ IMG_NUM_FORMAT_RESERVED_14 = 0xe,
+ IMG_NUM_FORMAT_RESERVED_15 = 0xf,
+} IMG_NUM_FORMAT;
+typedef enum TileType {
+ ARRAY_COLOR_TILE = 0x0,
+ ARRAY_DEPTH_TILE = 0x1,
+} TileType;
+typedef enum NonDispTilingOrder {
+ ADDR_SURF_MICRO_TILING_DISPLAY = 0x0,
+ ADDR_SURF_MICRO_TILING_NON_DISPLAY = 0x1,
+} NonDispTilingOrder;
+typedef enum MicroTileMode {
+ ADDR_SURF_DISPLAY_MICRO_TILING = 0x0,
+ ADDR_SURF_THIN_MICRO_TILING = 0x1,
+ ADDR_SURF_DEPTH_MICRO_TILING = 0x2,
+ ADDR_SURF_ROTATED_MICRO_TILING = 0x3,
+ ADDR_SURF_THICK_MICRO_TILING = 0x4,
+} MicroTileMode;
+typedef enum TileSplit {
+ ADDR_SURF_TILE_SPLIT_64B = 0x0,
+ ADDR_SURF_TILE_SPLIT_128B = 0x1,
+ ADDR_SURF_TILE_SPLIT_256B = 0x2,
+ ADDR_SURF_TILE_SPLIT_512B = 0x3,
+ ADDR_SURF_TILE_SPLIT_1KB = 0x4,
+ ADDR_SURF_TILE_SPLIT_2KB = 0x5,
+ ADDR_SURF_TILE_SPLIT_4KB = 0x6,
+} TileSplit;
+typedef enum SampleSplit {
+ ADDR_SURF_SAMPLE_SPLIT_1 = 0x0,
+ ADDR_SURF_SAMPLE_SPLIT_2 = 0x1,
+ ADDR_SURF_SAMPLE_SPLIT_4 = 0x2,
+ ADDR_SURF_SAMPLE_SPLIT_8 = 0x3,
+} SampleSplit;
+typedef enum PipeConfig {
+ ADDR_SURF_P2 = 0x0,
+ ADDR_SURF_P2_RESERVED0 = 0x1,
+ ADDR_SURF_P2_RESERVED1 = 0x2,
+ ADDR_SURF_P2_RESERVED2 = 0x3,
+ ADDR_SURF_P4_8x16 = 0x4,
+ ADDR_SURF_P4_16x16 = 0x5,
+ ADDR_SURF_P4_16x32 = 0x6,
+ ADDR_SURF_P4_32x32 = 0x7,
+ ADDR_SURF_P8_16x16_8x16 = 0x8,
+ ADDR_SURF_P8_16x32_8x16 = 0x9,
+ ADDR_SURF_P8_32x32_8x16 = 0xa,
+ ADDR_SURF_P8_16x32_16x16 = 0xb,
+ ADDR_SURF_P8_32x32_16x16 = 0xc,
+ ADDR_SURF_P8_32x32_16x32 = 0xd,
+ ADDR_SURF_P8_32x64_32x32 = 0xe,
+} PipeConfig;
+typedef enum NumBanks {
+ ADDR_SURF_2_BANK = 0x0,
+ ADDR_SURF_4_BANK = 0x1,
+ ADDR_SURF_8_BANK = 0x2,
+ ADDR_SURF_16_BANK = 0x3,
+} NumBanks;
+typedef enum BankWidth {
+ ADDR_SURF_BANK_WIDTH_1 = 0x0,
+ ADDR_SURF_BANK_WIDTH_2 = 0x1,
+ ADDR_SURF_BANK_WIDTH_4 = 0x2,
+ ADDR_SURF_BANK_WIDTH_8 = 0x3,
+} BankWidth;
+typedef enum BankHeight {
+ ADDR_SURF_BANK_HEIGHT_1 = 0x0,
+ ADDR_SURF_BANK_HEIGHT_2 = 0x1,
+ ADDR_SURF_BANK_HEIGHT_4 = 0x2,
+ ADDR_SURF_BANK_HEIGHT_8 = 0x3,
+} BankHeight;
+typedef enum BankWidthHeight {
+ ADDR_SURF_BANK_WH_1 = 0x0,
+ ADDR_SURF_BANK_WH_2 = 0x1,
+ ADDR_SURF_BANK_WH_4 = 0x2,
+ ADDR_SURF_BANK_WH_8 = 0x3,
+} BankWidthHeight;
+typedef enum MacroTileAspect {
+ ADDR_SURF_MACRO_ASPECT_1 = 0x0,
+ ADDR_SURF_MACRO_ASPECT_2 = 0x1,
+ ADDR_SURF_MACRO_ASPECT_4 = 0x2,
+ ADDR_SURF_MACRO_ASPECT_8 = 0x3,
+} MacroTileAspect;
+typedef enum TCC_CACHE_POLICIES {
+ TCC_CACHE_POLICY_LRU = 0x0,
+ TCC_CACHE_POLICY_STREAM = 0x1,
+ TCC_CACHE_POLICY_BYPASS = 0x2,
+} TCC_CACHE_POLICIES;
+typedef enum PERFMON_COUNTER_MODE {
+ PERFMON_COUNTER_MODE_ACCUM = 0x0,
+ PERFMON_COUNTER_MODE_ACTIVE_CYCLES = 0x1,
+ PERFMON_COUNTER_MODE_MAX = 0x2,
+ PERFMON_COUNTER_MODE_DIRTY = 0x3,
+ PERFMON_COUNTER_MODE_SAMPLE = 0x4,
+ PERFMON_COUNTER_MODE_CYCLES_SINCE_FIRST_EVENT = 0x5,
+ PERFMON_COUNTER_MODE_CYCLES_SINCE_LAST_EVENT = 0x6,
+ PERFMON_COUNTER_MODE_CYCLES_GE_HI = 0x7,
+ PERFMON_COUNTER_MODE_CYCLES_EQ_HI = 0x8,
+ PERFMON_COUNTER_MODE_INACTIVE_CYCLES = 0x9,
+ PERFMON_COUNTER_MODE_RESERVED = 0xf,
+} PERFMON_COUNTER_MODE;
+typedef enum PERFMON_SPM_MODE {
+ PERFMON_SPM_MODE_OFF = 0x0,
+ PERFMON_SPM_MODE_16BIT_CLAMP = 0x1,
+ PERFMON_SPM_MODE_16BIT_NO_CLAMP = 0x2,
+ PERFMON_SPM_MODE_32BIT_CLAMP = 0x3,
+ PERFMON_SPM_MODE_32BIT_NO_CLAMP = 0x4,
+ PERFMON_SPM_MODE_RESERVED_5 = 0x5,
+ PERFMON_SPM_MODE_RESERVED_6 = 0x6,
+ PERFMON_SPM_MODE_RESERVED_7 = 0x7,
+ PERFMON_SPM_MODE_TEST_MODE_0 = 0x8,
+ PERFMON_SPM_MODE_TEST_MODE_1 = 0x9,
+ PERFMON_SPM_MODE_TEST_MODE_2 = 0xa,
+} PERFMON_SPM_MODE;
+typedef enum SurfaceTiling {
+ ARRAY_LINEAR = 0x0,
+ ARRAY_TILED = 0x1,
+} SurfaceTiling;
+typedef enum SurfaceArray {
+ ARRAY_1D = 0x0,
+ ARRAY_2D = 0x1,
+ ARRAY_3D = 0x2,
+ ARRAY_3D_SLICE = 0x3,
+} SurfaceArray;
+typedef enum ColorArray {
+ ARRAY_2D_ALT_COLOR = 0x0,
+ ARRAY_2D_COLOR = 0x1,
+ ARRAY_3D_SLICE_COLOR = 0x3,
+} ColorArray;
+typedef enum DepthArray {
+ ARRAY_2D_ALT_DEPTH = 0x0,
+ ARRAY_2D_DEPTH = 0x1,
+} DepthArray;
+
+#endif /* DCE_8_0_ENUM_H */
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_8_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_8_0_sh_mask.h
index 8a2930734477..c331c9fe7b81 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_8_0_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_8_0_sh_mask.h
@@ -4130,6 +4130,18 @@
#define PHY_AUX_CNTL__AUX_PAD_WAKE__SHIFT 0xe
#define PHY_AUX_CNTL__AUX_PAD_RXSEL_MASK 0x10000
#define PHY_AUX_CNTL__AUX_PAD_RXSEL__SHIFT 0x10
+#define DC_GPIO_I2CPAD_MASK__DC_GPIO_SCL_MASK_MASK 0x1
+#define DC_GPIO_I2CPAD_MASK__DC_GPIO_SCL_MASK__SHIFT 0x0
+#define DC_GPIO_I2CPAD_MASK__DC_GPIO_SCL_PD_DIS_MASK 0x2
+#define DC_GPIO_I2CPAD_MASK__DC_GPIO_SCL_PD_DIS__SHIFT 0x1
+#define DC_GPIO_I2CPAD_MASK__DC_GPIO_SCL_RECV_MASK 0x4
+#define DC_GPIO_I2CPAD_MASK__DC_GPIO_SCL_RECV__SHIFT 0x2
+#define DC_GPIO_I2CPAD_MASK__DC_GPIO_SDA_MASK_MASK 0x10
+#define DC_GPIO_I2CPAD_MASK__DC_GPIO_SDA_MASK__SHIFT 0x4
+#define DC_GPIO_I2CPAD_MASK__DC_GPIO_SDA_PD_DIS_MASK 0x20
+#define DC_GPIO_I2CPAD_MASK__DC_GPIO_SDA_PD_DIS__SHIFT 0x5
+#define DC_GPIO_I2CPAD_MASK__DC_GPIO_SDA_RECV_MASK 0x40
+#define DC_GPIO_I2CPAD_MASK__DC_GPIO_SDA_RECV__SHIFT 0x6
#define DC_GPIO_I2CPAD_A__DC_GPIO_SCL_A_MASK 0x1
#define DC_GPIO_I2CPAD_A__DC_GPIO_SCL_A__SHIFT 0x0
#define DC_GPIO_I2CPAD_A__DC_GPIO_SDA_A_MASK 0x2
diff --git a/drivers/gpu/drm/amd/include/asic_reg/gca/gfx_7_2_enum.h b/drivers/gpu/drm/amd/include/asic_reg/gca/gfx_7_2_enum.h
index 9d4347dd6125..dfe78799100d 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/gca/gfx_7_2_enum.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/gca/gfx_7_2_enum.h
@@ -6225,6 +6225,12 @@ typedef enum TCC_CACHE_POLICIES {
TCC_CACHE_POLICY_STREAM = 0x1,
TCC_CACHE_POLICY_BYPASS = 0x2,
} TCC_CACHE_POLICIES;
+typedef enum MTYPE {
+ MTYPE_NC_NV = 0x0,
+ MTYPE_NC = 0x1,
+ MTYPE_CC = 0x2,
+ MTYPE_UC = 0x3,
+} MTYPE;
typedef enum PERFMON_COUNTER_MODE {
PERFMON_COUNTER_MODE_ACCUM = 0x0,
PERFMON_COUNTER_MODE_ACTIVE_CYCLES = 0x1,
diff --git a/drivers/gpu/drm/amd/include/cgs_common.h b/drivers/gpu/drm/amd/include/cgs_common.h
index aec38fc3834f..ab84d4947247 100644
--- a/drivers/gpu/drm/amd/include/cgs_common.h
+++ b/drivers/gpu/drm/amd/include/cgs_common.h
@@ -589,6 +589,8 @@ typedef int(*cgs_get_active_displays_info)(
void *cgs_device,
struct cgs_display_info *info);
+typedef int (*cgs_notify_dpm_enabled)(void *cgs_device, bool enabled);
+
typedef int (*cgs_call_acpi_method)(void *cgs_device,
uint32_t acpi_method,
uint32_t acpi_function,
@@ -644,6 +646,8 @@ struct cgs_ops {
cgs_set_clockgating_state set_clockgating_state;
/* display manager */
cgs_get_active_displays_info get_active_displays_info;
+ /* notify dpm enabled */
+ cgs_notify_dpm_enabled notify_dpm_enabled;
/* ACPI */
cgs_call_acpi_method call_acpi_method;
/* get system info */
@@ -734,8 +738,12 @@ struct cgs_device
CGS_CALL(set_powergating_state, dev, block_type, state)
#define cgs_set_clockgating_state(dev, block_type, state) \
CGS_CALL(set_clockgating_state, dev, block_type, state)
+#define cgs_notify_dpm_enabled(dev, enabled) \
+ CGS_CALL(notify_dpm_enabled, dev, enabled)
+
#define cgs_get_active_displays_info(dev, info) \
CGS_CALL(get_active_displays_info, dev, info)
+
#define cgs_call_acpi_method(dev, acpi_method, acpi_function, pintput, poutput, output_count, input_size, output_size) \
CGS_CALL(call_acpi_method, dev, acpi_method, acpi_function, pintput, poutput, output_count, input_size, output_size)
#define cgs_query_system_info(dev, sys_info) \
diff --git a/drivers/gpu/drm/amd/include/ivsrcid/ivsrcid_vislands30.h b/drivers/gpu/drm/amd/include/ivsrcid/ivsrcid_vislands30.h
new file mode 100644
index 000000000000..d21c6b14662f
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/ivsrcid/ivsrcid_vislands30.h
@@ -0,0 +1,102 @@
+/*
+ * Volcanic Islands IV SRC Register documentation
+ *
+ * Copyright (C) 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _IVSRCID_VISLANDS30_H_
+#define _IVSRCID_VISLANDS30_H_
+
+
+// IV Source IDs
+
+#define VISLANDS30_IV_SRCID_D1_V_UPDATE_INT 7 // 0x07
+#define VISLANDS30_IV_EXTID_D1_V_UPDATE_INT 0
+
+#define VISLANDS30_IV_SRCID_D1_GRPH_PFLIP 8 // 0x08
+#define VISLANDS30_IV_EXTID_D1_GRPH_PFLIP 0
+
+#define VISLANDS30_IV_SRCID_D2_V_UPDATE_INT 9 // 0x09
+#define VISLANDS30_IV_EXTID_D2_V_UPDATE_INT 0
+
+#define VISLANDS30_IV_SRCID_D2_GRPH_PFLIP 10 // 0x0a
+#define VISLANDS30_IV_EXTID_D2_GRPH_PFLIP 0
+
+#define VISLANDS30_IV_SRCID_D3_V_UPDATE_INT 11 // 0x0b
+#define VISLANDS30_IV_EXTID_D3_V_UPDATE_INT 0
+
+#define VISLANDS30_IV_SRCID_D3_GRPH_PFLIP 12 // 0x0c
+#define VISLANDS30_IV_EXTID_D3_GRPH_PFLIP 0
+
+#define VISLANDS30_IV_SRCID_D4_V_UPDATE_INT 13 // 0x0d
+#define VISLANDS30_IV_EXTID_D4_V_UPDATE_INT 0
+
+#define VISLANDS30_IV_SRCID_D4_GRPH_PFLIP 14 // 0x0e
+#define VISLANDS30_IV_EXTID_D4_GRPH_PFLIP 0
+
+#define VISLANDS30_IV_SRCID_D5_V_UPDATE_INT 15 // 0x0f
+#define VISLANDS30_IV_EXTID_D5_V_UPDATE_INT 0
+
+#define VISLANDS30_IV_SRCID_D5_GRPH_PFLIP 16 // 0x10
+#define VISLANDS30_IV_EXTID_D5_GRPH_PFLIP 0
+
+#define VISLANDS30_IV_SRCID_D6_V_UPDATE_INT 17 // 0x11
+#define VISLANDS30_IV_EXTID_D6_V_UPDATE_INT 0
+
+#define VISLANDS30_IV_SRCID_D6_GRPH_PFLIP 18 // 0x12
+#define VISLANDS30_IV_EXTID_D6_GRPH_PFLIP 0
+
+#define VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A 42 // 0x2a
+#define VISLANDS30_IV_EXTID_HOTPLUG_DETECT_A 0
+
+#define VISLANDS30_IV_SRCID_HOTPLUG_DETECT_B 42 // 0x2a
+#define VISLANDS30_IV_EXTID_HOTPLUG_DETECT_B 1
+
+#define VISLANDS30_IV_SRCID_HOTPLUG_DETECT_C 42 // 0x2a
+#define VISLANDS30_IV_EXTID_HOTPLUG_DETECT_C 2
+
+#define VISLANDS30_IV_SRCID_HOTPLUG_DETECT_D 42 // 0x2a
+#define VISLANDS30_IV_EXTID_HOTPLUG_DETECT_D 3
+
+#define VISLANDS30_IV_SRCID_HOTPLUG_DETECT_E 42 // 0x2a
+#define VISLANDS30_IV_EXTID_HOTPLUG_DETECT_E 4
+
+#define VISLANDS30_IV_SRCID_HOTPLUG_DETECT_F 42 // 0x2a
+#define VISLANDS30_IV_EXTID_HOTPLUG_DETECT_F 5
+
+#define VISLANDS30_IV_SRCID_HPD_RX_A 42 // 0x2a
+#define VISLANDS30_IV_EXTID_HPD_RX_A 6
+
+#define VISLANDS30_IV_SRCID_HPD_RX_B 42 // 0x2a
+#define VISLANDS30_IV_EXTID_HPD_RX_B 7
+
+#define VISLANDS30_IV_SRCID_HPD_RX_C 42 // 0x2a
+#define VISLANDS30_IV_EXTID_HPD_RX_C 8
+
+#define VISLANDS30_IV_SRCID_HPD_RX_D 42 // 0x2a
+#define VISLANDS30_IV_EXTID_HPD_RX_D 9
+
+#define VISLANDS30_IV_SRCID_HPD_RX_E 42 // 0x2a
+#define VISLANDS30_IV_EXTID_HPD_RX_E 10
+
+#define VISLANDS30_IV_SRCID_HPD_RX_F 42 // 0x2a
+#define VISLANDS30_IV_EXTID_HPD_RX_F 11
+
+#endif // _IVSRCID_VISLANDS30_H_
diff --git a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
index 888250b33ea8..a09d9f352871 100644
--- a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
+++ b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
@@ -221,7 +221,7 @@ struct kgd2kfd_calls {
int (*resume)(struct kfd_dev *kfd);
};
-bool kgd2kfd_init(unsigned interface_version,
+int kgd2kfd_init(unsigned interface_version,
const struct kgd2kfd_calls **g2f);
#endif /* KGD_KFD_INTERFACE_H_INCLUDED */
diff --git a/drivers/gpu/drm/amd/powerplay/Makefile b/drivers/gpu/drm/amd/powerplay/Makefile
index e195bf59da86..043e6ebab575 100644
--- a/drivers/gpu/drm/amd/powerplay/Makefile
+++ b/drivers/gpu/drm/amd/powerplay/Makefile
@@ -1,17 +1,17 @@
subdir-ccflags-y += -Iinclude/drm \
- -Idrivers/gpu/drm/amd/powerplay/inc/ \
- -Idrivers/gpu/drm/amd/include/asic_reg \
- -Idrivers/gpu/drm/amd/include \
- -Idrivers/gpu/drm/amd/powerplay/smumgr\
- -Idrivers/gpu/drm/amd/powerplay/hwmgr \
- -Idrivers/gpu/drm/amd/powerplay/eventmgr
+ -I$(FULL_AMD_PATH)/powerplay/inc/ \
+ -I$(FULL_AMD_PATH)/include/asic_reg \
+ -I$(FULL_AMD_PATH)/include \
+ -I$(FULL_AMD_PATH)/powerplay/smumgr\
+ -I$(FULL_AMD_PATH)/powerplay/hwmgr \
+ -I$(FULL_AMD_PATH)/powerplay/eventmgr
AMD_PP_PATH = ../powerplay
PP_LIBS = smumgr hwmgr eventmgr
-AMD_POWERPLAY = $(addsuffix /Makefile,$(addprefix drivers/gpu/drm/amd/powerplay/,$(PP_LIBS)))
+AMD_POWERPLAY = $(addsuffix /Makefile,$(addprefix $(FULL_AMD_PATH)/powerplay/,$(PP_LIBS)))
include $(AMD_POWERPLAY)
diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
index 589599f66fcc..9d2290044708 100644
--- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
+++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
@@ -29,6 +29,7 @@
#include "pp_instance.h"
#include "power_state.h"
#include "eventmanager.h"
+#include "pp_debug.h"
#define PP_CHECK(handle) \
do { \
@@ -436,7 +437,10 @@ enum amd_pm_state_type pp_dpm_get_current_power_state(void *handle)
case PP_StateUILabel_Performance:
return POWER_STATE_TYPE_PERFORMANCE;
default:
- return POWER_STATE_TYPE_DEFAULT;
+ if (state->classification.flags & PP_StateClassificationFlag_Boot)
+ return POWER_STATE_TYPE_INTERNAL_BOOT;
+ else
+ return POWER_STATE_TYPE_DEFAULT;
}
}
@@ -538,6 +542,112 @@ static int pp_dpm_get_temperature(void *handle)
return hwmgr->hwmgr_func->get_temperature(hwmgr);
}
+static int pp_dpm_get_pp_num_states(void *handle,
+ struct pp_states_info *data)
+{
+ struct pp_hwmgr *hwmgr;
+ int i;
+
+ if (!handle)
+ return -EINVAL;
+
+ hwmgr = ((struct pp_instance *)handle)->hwmgr;
+
+ if (hwmgr == NULL || hwmgr->ps == NULL)
+ return -EINVAL;
+
+ data->nums = hwmgr->num_ps;
+
+ for (i = 0; i < hwmgr->num_ps; i++) {
+ struct pp_power_state *state = (struct pp_power_state *)
+ ((unsigned long)hwmgr->ps + i * hwmgr->ps_size);
+ switch (state->classification.ui_label) {
+ case PP_StateUILabel_Battery:
+ data->states[i] = POWER_STATE_TYPE_BATTERY;
+ break;
+ case PP_StateUILabel_Balanced:
+ data->states[i] = POWER_STATE_TYPE_BALANCED;
+ break;
+ case PP_StateUILabel_Performance:
+ data->states[i] = POWER_STATE_TYPE_PERFORMANCE;
+ break;
+ default:
+ if (state->classification.flags & PP_StateClassificationFlag_Boot)
+ data->states[i] = POWER_STATE_TYPE_INTERNAL_BOOT;
+ else
+ data->states[i] = POWER_STATE_TYPE_DEFAULT;
+ }
+ }
+
+ return 0;
+}
+
+static int pp_dpm_get_pp_table(void *handle, char **table)
+{
+ struct pp_hwmgr *hwmgr;
+
+ if (!handle)
+ return -EINVAL;
+
+ hwmgr = ((struct pp_instance *)handle)->hwmgr;
+
+ if (hwmgr == NULL || hwmgr->hwmgr_func == NULL ||
+ hwmgr->hwmgr_func->get_pp_table == NULL)
+ return -EINVAL;
+
+ return hwmgr->hwmgr_func->get_pp_table(hwmgr, table);
+}
+
+static int pp_dpm_set_pp_table(void *handle, const char *buf, size_t size)
+{
+ struct pp_hwmgr *hwmgr;
+
+ if (!handle)
+ return -EINVAL;
+
+ hwmgr = ((struct pp_instance *)handle)->hwmgr;
+
+ if (hwmgr == NULL || hwmgr->hwmgr_func == NULL ||
+ hwmgr->hwmgr_func->set_pp_table == NULL)
+ return -EINVAL;
+
+ return hwmgr->hwmgr_func->set_pp_table(hwmgr, buf, size);
+}
+
+static int pp_dpm_force_clock_level(void *handle,
+ enum pp_clock_type type, int level)
+{
+ struct pp_hwmgr *hwmgr;
+
+ if (!handle)
+ return -EINVAL;
+
+ hwmgr = ((struct pp_instance *)handle)->hwmgr;
+
+ if (hwmgr == NULL || hwmgr->hwmgr_func == NULL ||
+ hwmgr->hwmgr_func->force_clock_level == NULL)
+ return -EINVAL;
+
+ return hwmgr->hwmgr_func->force_clock_level(hwmgr, type, level);
+}
+
+static int pp_dpm_print_clock_levels(void *handle,
+ enum pp_clock_type type, char *buf)
+{
+ struct pp_hwmgr *hwmgr;
+
+ if (!handle)
+ return -EINVAL;
+
+ hwmgr = ((struct pp_instance *)handle)->hwmgr;
+
+ if (hwmgr == NULL || hwmgr->hwmgr_func == NULL ||
+ hwmgr->hwmgr_func->print_clock_levels == NULL)
+ return -EINVAL;
+
+ return hwmgr->hwmgr_func->print_clock_levels(hwmgr, type, buf);
+}
+
const struct amd_powerplay_funcs pp_dpm_funcs = {
.get_temperature = pp_dpm_get_temperature,
.load_firmware = pp_dpm_load_fw,
@@ -555,6 +665,11 @@ const struct amd_powerplay_funcs pp_dpm_funcs = {
.get_fan_control_mode = pp_dpm_get_fan_control_mode,
.set_fan_speed_percent = pp_dpm_set_fan_speed_percent,
.get_fan_speed_percent = pp_dpm_get_fan_speed_percent,
+ .get_pp_num_states = pp_dpm_get_pp_num_states,
+ .get_pp_table = pp_dpm_get_pp_table,
+ .set_pp_table = pp_dpm_set_pp_table,
+ .force_clock_level = pp_dpm_force_clock_level,
+ .print_clock_levels = pp_dpm_print_clock_levels,
};
static int amd_pp_instance_init(struct amd_pp_init *pp_init,
@@ -638,10 +753,10 @@ int amd_powerplay_fini(void *handle)
/* export this function to DAL */
-int amd_powerplay_display_configuration_change(void *handle, const void *input)
+int amd_powerplay_display_configuration_change(void *handle,
+ const struct amd_pp_display_configuration *display_config)
{
struct pp_hwmgr *hwmgr;
- const struct amd_pp_display_configuration *display_config = input;
PP_CHECK((struct pp_instance *)handle);
@@ -653,7 +768,7 @@ int amd_powerplay_display_configuration_change(void *handle, const void *input)
}
int amd_powerplay_get_display_power_level(void *handle,
- struct amd_pp_dal_clock_info *output)
+ struct amd_pp_simple_clock_info *output)
{
struct pp_hwmgr *hwmgr;
@@ -666,3 +781,86 @@ int amd_powerplay_get_display_power_level(void *handle,
return phm_get_dal_power_level(hwmgr, output);
}
+
+int amd_powerplay_get_current_clocks(void *handle,
+ struct amd_pp_clock_info *clocks)
+{
+ struct pp_hwmgr *hwmgr;
+ struct amd_pp_simple_clock_info simple_clocks;
+ struct pp_clock_info hw_clocks;
+
+ PP_CHECK((struct pp_instance *)handle);
+
+ if (clocks == NULL)
+ return -EINVAL;
+
+ hwmgr = ((struct pp_instance *)handle)->hwmgr;
+
+ phm_get_dal_power_level(hwmgr, &simple_clocks);
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PowerContainment)) {
+ if (0 != phm_get_clock_info(hwmgr, &hwmgr->current_ps->hardware, &hw_clocks, PHM_PerformanceLevelDesignation_PowerContainment))
+ PP_ASSERT_WITH_CODE(0, "Error in PHM_GetPowerContainmentClockInfo", return -1);
+ } else {
+ if (0 != phm_get_clock_info(hwmgr, &hwmgr->current_ps->hardware, &hw_clocks, PHM_PerformanceLevelDesignation_Activity))
+ PP_ASSERT_WITH_CODE(0, "Error in PHM_GetClockInfo", return -1);
+ }
+
+ clocks->min_engine_clock = hw_clocks.min_eng_clk;
+ clocks->max_engine_clock = hw_clocks.max_eng_clk;
+ clocks->min_memory_clock = hw_clocks.min_mem_clk;
+ clocks->max_memory_clock = hw_clocks.max_mem_clk;
+ clocks->min_bus_bandwidth = hw_clocks.min_bus_bandwidth;
+ clocks->max_bus_bandwidth = hw_clocks.max_bus_bandwidth;
+
+ clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
+ clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;
+
+ clocks->max_clocks_state = simple_clocks.level;
+
+ if (0 == phm_get_current_shallow_sleep_clocks(hwmgr, &hwmgr->current_ps->hardware, &hw_clocks)) {
+ clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
+ clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;
+ }
+
+ return 0;
+
+}
+
+int amd_powerplay_get_clock_by_type(void *handle, enum amd_pp_clock_type type, struct amd_pp_clocks *clocks)
+{
+ int result = -1;
+
+ struct pp_hwmgr *hwmgr;
+
+ PP_CHECK((struct pp_instance *)handle);
+
+ if (clocks == NULL)
+ return -EINVAL;
+
+ hwmgr = ((struct pp_instance *)handle)->hwmgr;
+
+ result = phm_get_clock_by_type(hwmgr, type, clocks);
+
+ return result;
+}
+
+int amd_powerplay_get_display_mode_validation_clocks(void *handle,
+ struct amd_pp_simple_clock_info *clocks)
+{
+ int result = -1;
+ struct pp_hwmgr *hwmgr;
+
+ PP_CHECK((struct pp_instance *)handle);
+
+ if (clocks == NULL)
+ return -EINVAL;
+
+ hwmgr = ((struct pp_instance *)handle)->hwmgr;
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DynamicPatchPowerState))
+ result = phm_get_max_high_clocks(hwmgr, clocks);
+
+ return result;
+}
+
diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c b/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c
index 6b52c78cb404..56856a2864d1 100644
--- a/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c
+++ b/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c
@@ -137,14 +137,14 @@ static const pem_event_action *resume_event[] = {
reset_display_configCounter_tasks,
update_dal_configuration_tasks,
vari_bright_resume_tasks,
- block_adjust_power_state_tasks,
setup_asic_tasks,
enable_stutter_mode_tasks, /*must do this in boot state and before SMC is started */
enable_dynamic_state_management_tasks,
enable_clock_power_gatings_tasks,
enable_disable_bapm_tasks,
initialize_thermal_controller_tasks,
- reset_boot_state_tasks,
+ get_2d_performance_state_tasks,
+ set_performance_state_tasks,
adjust_power_state_tasks,
enable_disable_fps_tasks,
notify_hw_power_source_tasks,
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
index cf01177ca3b5..5682490337e3 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
@@ -241,6 +241,11 @@ static int cz_initialize_dpm_defaults(struct pp_hwmgr *hwmgr)
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_DynamicUVDState);
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_UVDDPM);
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_VCEDPM);
+
cz_hwmgr->cc6_settings.cpu_cc6_disable = false;
cz_hwmgr->cc6_settings.cpu_pstate_disable = false;
cz_hwmgr->cc6_settings.nb_pstate_switch_disable = false;
@@ -733,7 +738,6 @@ static int cz_tf_update_sclk_limit(struct pp_hwmgr *hwmgr,
unsigned long clock = 0;
unsigned long level;
unsigned long stable_pstate_sclk;
- struct PP_Clocks clocks;
unsigned long percentage;
cz_hwmgr->sclk_dpm.soft_min_clk = table->entries[0].clk;
@@ -744,8 +748,10 @@ static int cz_tf_update_sclk_limit(struct pp_hwmgr *hwmgr,
else
cz_hwmgr->sclk_dpm.soft_max_clk = table->entries[table->count - 1].clk;
- /*PECI_GetMinClockSettings(pHwMgr->pPECI, &clocks);*/
- clock = clocks.engineClock;
+ clock = hwmgr->display_config.min_core_set_clock;
+;
+ if (clock == 0)
+ printk(KERN_INFO "[ powerplay ] min_core_set_clock not set\n");
if (cz_hwmgr->sclk_dpm.hard_min_clk != clock) {
cz_hwmgr->sclk_dpm.hard_min_clk = clock;
@@ -901,9 +907,9 @@ static int cz_tf_update_low_mem_pstate(struct pp_hwmgr *hwmgr,
if (pnew_state->action == FORCE_HIGH)
cz_nbdpm_pstate_enable_disable(hwmgr, false, disable_switch);
- else if(pnew_state->action == CANCEL_FORCE_HIGH)
- cz_nbdpm_pstate_enable_disable(hwmgr, false, disable_switch);
- else
+ else if (pnew_state->action == CANCEL_FORCE_HIGH)
+ cz_nbdpm_pstate_enable_disable(hwmgr, true, disable_switch);
+ else
cz_nbdpm_pstate_enable_disable(hwmgr, enable_low_mem_state, disable_switch);
}
return 0;
@@ -1128,9 +1134,10 @@ static int cz_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
cast_const_PhwCzPowerState(&pcurrent_ps->hardware);
struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
- struct PP_Clocks clocks;
+ struct PP_Clocks clocks = {0, 0, 0, 0};
bool force_high;
- unsigned long num_of_active_displays = 4;
+ uint32_t num_of_active_displays = 0;
+ struct cgs_display_info info = {0};
cz_ps->evclk = hwmgr->vce_arbiter.evclk;
cz_ps->ecclk = hwmgr->vce_arbiter.ecclk;
@@ -1142,12 +1149,15 @@ static int cz_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
cz_hwmgr->battery_state = (PP_StateUILabel_Battery == prequest_ps->classification.ui_label);
- /* to do PECI_GetMinClockSettings(pHwMgr->pPECI, &clocks); */
- /* PECI_GetNumberOfActiveDisplays(pHwMgr->pPECI, &numOfActiveDisplays); */
+ clocks.memoryClock = hwmgr->display_config.min_mem_set_clock != 0 ?
+ hwmgr->display_config.min_mem_set_clock :
+ cz_hwmgr->sys_info.nbp_memory_clock[1];
+
+ cgs_get_active_displays_info(hwmgr->device, &info);
+ num_of_active_displays = info.display_count;
+
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState))
clocks.memoryClock = hwmgr->dyn_state.max_clock_voltage_on_ac.mclk;
- else
- clocks.memoryClock = 0;
if (clocks.memoryClock < hwmgr->gfx_arbiter.mclk)
clocks.memoryClock = hwmgr->gfx_arbiter.mclk;
@@ -1217,6 +1227,7 @@ static int cz_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
printk(KERN_ERR "[ powerplay ] Fail to construct set_power_state\n");
return result;
}
+ hwmgr->platform_descriptor.hardwareActivityPerformanceLevels = CZ_MAX_HARDWARE_POWERLEVELS;
result = phm_construct_table(hwmgr, &cz_phm_enable_clock_power_gatings_master, &(hwmgr->enable_clock_power_gatings));
if (result != 0) {
@@ -1648,10 +1659,10 @@ static void cz_hw_print_display_cfg(
& PWRMGT_SEPARATION_TIME_MASK)
<< PWRMGT_SEPARATION_TIME_SHIFT;
- data|= (hw_data->cc6_settings.cpu_cc6_disable ? 0x1 : 0x0)
+ data |= (hw_data->cc6_settings.cpu_cc6_disable ? 0x1 : 0x0)
<< PWRMGT_DISABLE_CPU_CSTATES_SHIFT;
- data|= (hw_data->cc6_settings.cpu_pstate_disable ? 0x1 : 0x0)
+ data |= (hw_data->cc6_settings.cpu_pstate_disable ? 0x1 : 0x0)
<< PWRMGT_DISABLE_CPU_PSTATES_SHIFT;
PP_DBG_LOG("SetDisplaySizePowerParams data: 0x%X\n",
@@ -1666,9 +1677,9 @@ static void cz_hw_print_display_cfg(
}
- static int cz_store_cc6_data(struct pp_hwmgr *hwmgr, uint32_t separation_time,
+static int cz_store_cc6_data(struct pp_hwmgr *hwmgr, uint32_t separation_time,
bool cc6_disable, bool pstate_disable, bool pstate_switch_disable)
- {
+{
struct cz_hwmgr *hw_data = (struct cz_hwmgr *)(hwmgr->backend);
if (separation_time !=
@@ -1696,20 +1707,19 @@ static void cz_hw_print_display_cfg(
return 0;
}
- static int cz_get_dal_power_level(struct pp_hwmgr *hwmgr,
- struct amd_pp_dal_clock_info*info)
+static int cz_get_dal_power_level(struct pp_hwmgr *hwmgr,
+ struct amd_pp_simple_clock_info *info)
{
uint32_t i;
- const struct phm_clock_voltage_dependency_table * table =
+ const struct phm_clock_voltage_dependency_table *table =
hwmgr->dyn_state.vddc_dep_on_dal_pwrl;
- const struct phm_clock_and_voltage_limits* limits =
+ const struct phm_clock_and_voltage_limits *limits =
&hwmgr->dyn_state.max_clock_voltage_on_ac;
info->engine_max_clock = limits->sclk;
info->memory_max_clock = limits->mclk;
for (i = table->count - 1; i > 0; i--) {
-
if (limits->vddc >= table->entries[i].v) {
info->level = table->entries[i].clk;
return 0;
@@ -1718,6 +1728,158 @@ static void cz_hw_print_display_cfg(
return -EINVAL;
}
+static int cz_force_clock_level(struct pp_hwmgr *hwmgr,
+ enum pp_clock_type type, int level)
+{
+ if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
+ return -EINVAL;
+
+ switch (type) {
+ case PP_SCLK:
+ smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ PPSMC_MSG_SetSclkSoftMin,
+ (1 << level));
+ smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ PPSMC_MSG_SetSclkSoftMax,
+ (1 << level));
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int cz_print_clock_levels(struct pp_hwmgr *hwmgr,
+ enum pp_clock_type type, char *buf)
+{
+ struct phm_clock_voltage_dependency_table *sclk_table =
+ hwmgr->dyn_state.vddc_dependency_on_sclk;
+ int i, now, size = 0;
+
+ switch (type) {
+ case PP_SCLK:
+ now = PHM_GET_FIELD(cgs_read_ind_register(hwmgr->device,
+ CGS_IND_REG__SMC,
+ ixTARGET_AND_CURRENT_PROFILE_INDEX),
+ TARGET_AND_CURRENT_PROFILE_INDEX,
+ CURR_SCLK_INDEX);
+
+ for (i = 0; i < sclk_table->count; i++)
+ size += sprintf(buf + size, "%d: %uMhz %s\n",
+ i, sclk_table->entries[i].clk / 100,
+ (i == now) ? "*" : "");
+ break;
+ default:
+ break;
+ }
+ return size;
+}
+
+static int cz_get_performance_level(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *state,
+ PHM_PerformanceLevelDesignation designation, uint32_t index,
+ PHM_PerformanceLevel *level)
+{
+ const struct cz_power_state *ps;
+ struct cz_hwmgr *data;
+ uint32_t level_index;
+ uint32_t i;
+
+ if (level == NULL || hwmgr == NULL || state == NULL)
+ return -EINVAL;
+
+ data = (struct cz_hwmgr *)(hwmgr->backend);
+ ps = cast_const_PhwCzPowerState(state);
+
+ level_index = index > ps->level - 1 ? ps->level - 1 : index;
+
+ level->coreClock = ps->levels[level_index].engineClock;
+
+ if (designation == PHM_PerformanceLevelDesignation_PowerContainment) {
+ for (i = 1; i < ps->level; i++) {
+ if (ps->levels[i].engineClock > data->dce_slow_sclk_threshold) {
+ level->coreClock = ps->levels[i].engineClock;
+ break;
+ }
+ }
+ }
+
+ if (level_index == 0)
+ level->memory_clock = data->sys_info.nbp_memory_clock[CZ_NUM_NBPMEMORYCLOCK - 1];
+ else
+ level->memory_clock = data->sys_info.nbp_memory_clock[0];
+
+ level->vddc = (cz_convert_8Bit_index_to_voltage(hwmgr, ps->levels[level_index].vddcIndex) + 2) / 4;
+ level->nonLocalMemoryFreq = 0;
+ level->nonLocalMemoryWidth = 0;
+
+ return 0;
+}
+
+static int cz_get_current_shallow_sleep_clocks(struct pp_hwmgr *hwmgr,
+ const struct pp_hw_power_state *state, struct pp_clock_info *clock_info)
+{
+ const struct cz_power_state *ps = cast_const_PhwCzPowerState(state);
+
+ clock_info->min_eng_clk = ps->levels[0].engineClock / (1 << (ps->levels[0].ssDividerIndex));
+ clock_info->max_eng_clk = ps->levels[ps->level - 1].engineClock / (1 << (ps->levels[ps->level - 1].ssDividerIndex));
+
+ return 0;
+}
+
+static int cz_get_clock_by_type(struct pp_hwmgr *hwmgr, enum amd_pp_clock_type type,
+ struct amd_pp_clocks *clocks)
+{
+ struct cz_hwmgr *data = (struct cz_hwmgr *)(hwmgr->backend);
+ int i;
+ struct phm_clock_voltage_dependency_table *table;
+
+ clocks->count = cz_get_max_sclk_level(hwmgr);
+ switch (type) {
+ case amd_pp_disp_clock:
+ for (i = 0; i < clocks->count; i++)
+ clocks->clock[i] = data->sys_info.display_clock[i];
+ break;
+ case amd_pp_sys_clock:
+ table = hwmgr->dyn_state.vddc_dependency_on_sclk;
+ for (i = 0; i < clocks->count; i++)
+ clocks->clock[i] = table->entries[i].clk;
+ break;
+ case amd_pp_mem_clock:
+ clocks->count = CZ_NUM_NBPMEMORYCLOCK;
+ for (i = 0; i < clocks->count; i++)
+ clocks->clock[i] = data->sys_info.nbp_memory_clock[clocks->count - 1 - i];
+ break;
+ default:
+ return -1;
+ }
+
+ return 0;
+}
+
+static int cz_get_max_high_clocks(struct pp_hwmgr *hwmgr, struct amd_pp_simple_clock_info *clocks)
+{
+ struct phm_clock_voltage_dependency_table *table =
+ hwmgr->dyn_state.vddc_dependency_on_sclk;
+ unsigned long level;
+ const struct phm_clock_and_voltage_limits *limits =
+ &hwmgr->dyn_state.max_clock_voltage_on_ac;
+
+ if ((NULL == table) || (table->count <= 0) || (clocks == NULL))
+ return -EINVAL;
+
+ level = cz_get_max_sclk_level(hwmgr) - 1;
+
+ if (level < table->count)
+ clocks->engine_max_clock = table->entries[level].clk;
+ else
+ clocks->engine_max_clock = table->entries[table->count - 1].clk;
+
+ clocks->memory_max_clock = limits->mclk;
+
+ return 0;
+}
+
static const struct pp_hwmgr_func cz_hwmgr_funcs = {
.backend_init = cz_hwmgr_backend_init,
.backend_fini = cz_hwmgr_backend_fini,
@@ -1736,7 +1898,13 @@ static const struct pp_hwmgr_func cz_hwmgr_funcs = {
.print_current_perforce_level = cz_print_current_perforce_level,
.set_cpu_power_state = cz_set_cpu_power_state,
.store_cc6_data = cz_store_cc6_data,
- .get_dal_power_level= cz_get_dal_power_level,
+ .force_clock_level = cz_force_clock_level,
+ .print_clock_levels = cz_print_clock_levels,
+ .get_dal_power_level = cz_get_dal_power_level,
+ .get_performance_level = cz_get_performance_level,
+ .get_current_shallow_sleep_clocks = cz_get_current_shallow_sleep_clocks,
+ .get_clock_by_type = cz_get_clock_by_type,
+ .get_max_high_clocks = cz_get_max_high_clocks,
};
int cz_hwmgr_init(struct pp_hwmgr *hwmgr)
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c
index 28031a7eddba..89f31bc5b68b 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c
@@ -2389,6 +2389,7 @@ static int fiji_populate_smc_vce_level(struct pp_hwmgr *hwmgr,
for(count = 0; count < table->VceLevelCount; count++) {
table->VceLevel[count].Frequency = mm_table->entries[count].eclk;
+ table->VceLevel[count].MinVoltage = 0;
table->VceLevel[count].MinVoltage |=
(mm_table->entries[count].vddc * VOLTAGE_SCALE) << VDDC_SHIFT;
table->VceLevel[count].MinVoltage |=
@@ -2465,6 +2466,7 @@ static int fiji_populate_smc_samu_level(struct pp_hwmgr *hwmgr,
for (count = 0; count < table->SamuLevelCount; count++) {
/* not sure whether we need evclk or not */
+ table->SamuLevel[count].MinVoltage = 0;
table->SamuLevel[count].Frequency = mm_table->entries[count].samclock;
table->SamuLevel[count].MinVoltage |= (mm_table->entries[count].vddc *
VOLTAGE_SCALE) << VDDC_SHIFT;
@@ -2562,6 +2564,7 @@ static int fiji_populate_smc_uvd_level(struct pp_hwmgr *hwmgr,
table->UvdBootLevel = 0;
for (count = 0; count < table->UvdLevelCount; count++) {
+ table->UvdLevel[count].MinVoltage = 0;
table->UvdLevel[count].VclkFrequency = mm_table->entries[count].vclk;
table->UvdLevel[count].DclkFrequency = mm_table->entries[count].dclk;
table->UvdLevel[count].MinVoltage |= (mm_table->entries[count].vddc *
@@ -2900,6 +2903,8 @@ static int fiji_init_smc_table(struct pp_hwmgr *hwmgr)
if(FIJI_VOLTAGE_CONTROL_NONE != data->voltage_control)
fiji_populate_smc_voltage_tables(hwmgr, table);
+ table->SystemFlags = 0;
+
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_AutomaticDCTransition))
table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
@@ -2997,6 +3002,7 @@ static int fiji_init_smc_table(struct pp_hwmgr *hwmgr)
table->MemoryThermThrottleEnable = 1;
table->PCIeBootLinkLevel = 0; /* 0:Gen1 1:Gen2 2:Gen3*/
table->PCIeGenInterval = 1;
+ table->VRConfig = 0;
result = fiji_populate_vr_config(hwmgr, table);
PP_ASSERT_WITH_CODE(0 == result,
@@ -4275,7 +4281,6 @@ static int fiji_populate_and_upload_sclk_mclk_dpm_levels(
if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK) {
dpm_table->mclk_table.dpm_levels
[dpm_table->mclk_table.count - 1].value = mclk;
-
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_OD6PlusinACSupport) ||
phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
@@ -4886,6 +4891,10 @@ static void fiji_print_current_perforce_level(
activity_percent >>= 8;
seq_printf(m, "\n [GPU load]: %u%%\n\n", activity_percent > 100 ? 100 : activity_percent);
+
+ seq_printf(m, "uvd %sabled\n", data->uvd_power_gated ? "dis" : "en");
+
+ seq_printf(m, "vce %sabled\n", data->vce_power_gated ? "dis" : "en");
}
static int fiji_program_display_gap(struct pp_hwmgr *hwmgr)
@@ -5073,6 +5082,186 @@ static int fiji_get_fan_control_mode(struct pp_hwmgr *hwmgr)
CG_FDO_CTRL2, FDO_PWM_MODE);
}
+static int fiji_get_pp_table(struct pp_hwmgr *hwmgr, char **table)
+{
+ struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
+
+ *table = (char *)&data->smc_state_table;
+
+ return sizeof(struct SMU73_Discrete_DpmTable);
+}
+
+static int fiji_set_pp_table(struct pp_hwmgr *hwmgr, const char *buf, size_t size)
+{
+ struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
+
+ void *table = (void *)&data->smc_state_table;
+
+ memcpy(table, buf, size);
+
+ return 0;
+}
+
+static int fiji_force_clock_level(struct pp_hwmgr *hwmgr,
+ enum pp_clock_type type, int level)
+{
+ struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
+
+ if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
+ return -EINVAL;
+
+ switch (type) {
+ case PP_SCLK:
+ if (!data->sclk_dpm_key_disabled)
+ smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ PPSMC_MSG_SCLKDPM_SetEnabledMask,
+ (1 << level));
+ break;
+ case PP_MCLK:
+ if (!data->mclk_dpm_key_disabled)
+ smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ PPSMC_MSG_MCLKDPM_SetEnabledMask,
+ (1 << level));
+ break;
+ case PP_PCIE:
+ if (!data->pcie_dpm_key_disabled)
+ smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ PPSMC_MSG_PCIeDPM_ForceLevel,
+ (1 << level));
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int fiji_print_clock_levels(struct pp_hwmgr *hwmgr,
+ enum pp_clock_type type, char *buf)
+{
+ struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
+ struct fiji_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table);
+ struct fiji_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table);
+ struct fiji_single_dpm_table *pcie_table = &(data->dpm_table.pcie_speed_table);
+ int i, now, size = 0;
+ uint32_t clock, pcie_speed;
+
+ switch (type) {
+ case PP_SCLK:
+ smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetSclkFrequency);
+ clock = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
+
+ for (i = 0; i < sclk_table->count; i++) {
+ if (clock > sclk_table->dpm_levels[i].value)
+ continue;
+ break;
+ }
+ now = i;
+
+ for (i = 0; i < sclk_table->count; i++)
+ size += sprintf(buf + size, "%d: %uMhz %s\n",
+ i, sclk_table->dpm_levels[i].value / 100,
+ (i == now) ? "*" : "");
+ break;
+ case PP_MCLK:
+ smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetMclkFrequency);
+ clock = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
+
+ for (i = 0; i < mclk_table->count; i++) {
+ if (clock > mclk_table->dpm_levels[i].value)
+ continue;
+ break;
+ }
+ now = i;
+
+ for (i = 0; i < mclk_table->count; i++)
+ size += sprintf(buf + size, "%d: %uMhz %s\n",
+ i, mclk_table->dpm_levels[i].value / 100,
+ (i == now) ? "*" : "");
+ break;
+ case PP_PCIE:
+ pcie_speed = fiji_get_current_pcie_speed(hwmgr);
+ for (i = 0; i < pcie_table->count; i++) {
+ if (pcie_speed != pcie_table->dpm_levels[i].value)
+ continue;
+ break;
+ }
+ now = i;
+
+ for (i = 0; i < pcie_table->count; i++)
+ size += sprintf(buf + size, "%d: %s %s\n", i,
+ (pcie_table->dpm_levels[i].value == 0) ? "2.5GB, x1" :
+ (pcie_table->dpm_levels[i].value == 1) ? "5.0GB, x16" :
+ (pcie_table->dpm_levels[i].value == 2) ? "8.0GB, x16" : "",
+ (i == now) ? "*" : "");
+ break;
+ default:
+ break;
+ }
+ return size;
+}
+
+static inline bool fiji_are_power_levels_equal(const struct fiji_performance_level *pl1,
+ const struct fiji_performance_level *pl2)
+{
+ return ((pl1->memory_clock == pl2->memory_clock) &&
+ (pl1->engine_clock == pl2->engine_clock) &&
+ (pl1->pcie_gen == pl2->pcie_gen) &&
+ (pl1->pcie_lane == pl2->pcie_lane));
+}
+
+int fiji_check_states_equal(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *pstate1, const struct pp_hw_power_state *pstate2, bool *equal)
+{
+ const struct fiji_power_state *psa = cast_const_phw_fiji_power_state(pstate1);
+ const struct fiji_power_state *psb = cast_const_phw_fiji_power_state(pstate2);
+ int i;
+
+ if (equal == NULL || psa == NULL || psb == NULL)
+ return -EINVAL;
+
+ /* If the two states don't even have the same number of performance levels they cannot be the same state. */
+ if (psa->performance_level_count != psb->performance_level_count) {
+ *equal = false;
+ return 0;
+ }
+
+ for (i = 0; i < psa->performance_level_count; i++) {
+ if (!fiji_are_power_levels_equal(&(psa->performance_levels[i]), &(psb->performance_levels[i]))) {
+ /* If we have found even one performance level pair that is different the states are different. */
+ *equal = false;
+ return 0;
+ }
+ }
+
+ /* If all performance levels are the same try to use the UVD clocks to break the tie.*/
+ *equal = ((psa->uvd_clks.vclk == psb->uvd_clks.vclk) && (psa->uvd_clks.dclk == psb->uvd_clks.dclk));
+ *equal &= ((psa->vce_clks.evclk == psb->vce_clks.evclk) && (psa->vce_clks.ecclk == psb->vce_clks.ecclk));
+ *equal &= (psa->sclk_threshold == psb->sclk_threshold);
+ *equal &= (psa->acp_clk == psb->acp_clk);
+
+ return 0;
+}
+
+bool fiji_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmgr)
+{
+ struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
+ bool is_update_required = false;
+ struct cgs_display_info info = {0,0,NULL};
+
+ cgs_get_active_displays_info(hwmgr->device, &info);
+
+ if (data->display_timing.num_existing_displays != info.display_count)
+ is_update_required = true;
+/* TO DO NEED TO GET DEEP SLEEP CLOCK FROM DAL
+ if (phm_cap_enabled(hwmgr->hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep)) {
+ cgs_get_min_clock_settings(hwmgr->device, &min_clocks);
+ if(min_clocks.engineClockInSR != data->display_timing.minClockInSR)
+ is_update_required = true;
+*/
+ return is_update_required;
+}
+
+
static const struct pp_hwmgr_func fiji_hwmgr_funcs = {
.backend_init = &fiji_hwmgr_backend_init,
.backend_fini = &tonga_hwmgr_backend_fini,
@@ -5108,6 +5297,12 @@ static const struct pp_hwmgr_func fiji_hwmgr_funcs = {
.register_internal_thermal_interrupt = fiji_register_internal_thermal_interrupt,
.set_fan_control_mode = fiji_set_fan_control_mode,
.get_fan_control_mode = fiji_get_fan_control_mode,
+ .check_states_equal = fiji_check_states_equal,
+ .check_smc_update_required_for_display_configuration = fiji_check_smc_update_required_for_display_configuration,
+ .get_pp_table = fiji_get_pp_table,
+ .set_pp_table = fiji_set_pp_table,
+ .force_clock_level = fiji_force_clock_level,
+ .print_clock_levels = fiji_print_clock_levels,
};
int fiji_hwmgr_init(struct pp_hwmgr *hwmgr)
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.h
index 22e273b1c1c5..a16f7cd4c238 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.h
@@ -29,6 +29,7 @@
#include "smu73_discrete.h"
#include "ppatomctrl.h"
#include "fiji_ppsmc.h"
+#include "pp_endian.h"
#define FIJI_MAX_HARDWARE_POWERLEVELS 2
#define FIJI_AT_DFLT 30
@@ -347,15 +348,4 @@ int fiji_update_samu_dpm(struct pp_hwmgr *hwmgr, bool bgate);
int fiji_update_acp_dpm(struct pp_hwmgr *hwmgr, bool bgate);
int fiji_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable);
-#define PP_HOST_TO_SMC_UL(X) cpu_to_be32(X)
-#define PP_SMC_TO_HOST_UL(X) be32_to_cpu(X)
-
-#define PP_HOST_TO_SMC_US(X) cpu_to_be16(X)
-#define PP_SMC_TO_HOST_US(X) be16_to_cpu(X)
-
-#define CONVERT_FROM_HOST_TO_SMC_UL(X) ((X) = PP_HOST_TO_SMC_UL(X))
-#define CONVERT_FROM_SMC_TO_HOST_UL(X) ((X) = PP_SMC_TO_HOST_UL(X))
-
-#define CONVERT_FROM_HOST_TO_SMC_US(X) ((X) = PP_HOST_TO_SMC_US(X))
-
#endif /* _FIJI_HWMGR_H_ */
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/functiontables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/functiontables.c
index 9deadabbc81c..72cfecc4f9f7 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/functiontables.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/functiontables.c
@@ -34,6 +34,11 @@ static int phm_run_table(struct pp_hwmgr *hwmgr,
int result = 0;
phm_table_function *function;
+ if (rt_table->function_list == NULL) {
+ printk(KERN_INFO "[ powerplay ] this function not implement!\n");
+ return 0;
+ }
+
for (function = rt_table->function_list; NULL != *function; function++) {
int tmp = (*function)(hwmgr, input, output, temp_storage, result);
@@ -57,9 +62,9 @@ int phm_dispatch_table(struct pp_hwmgr *hwmgr,
int result = 0;
void *temp_storage = NULL;
- if (hwmgr == NULL || rt_table == NULL || rt_table->function_list == NULL) {
+ if (hwmgr == NULL || rt_table == NULL) {
printk(KERN_ERR "[ powerplay ] Invalid Parameter!\n");
- return 0; /*temp return ture because some function not implement on some asic */
+ return -EINVAL;
}
if (0 != rt_table->storage_size) {
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
index 0f2d5e4bc241..fa208ada6892 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
@@ -26,7 +26,7 @@
#include "power_state.h"
#include "pp_acpi.h"
#include "amd_acpi.h"
-#include "amd_powerplay.h"
+#include "pp_debug.h"
#define PHM_FUNC_CHECK(hw) \
do { \
@@ -58,6 +58,9 @@ void phm_init_dynamic_caps(struct pp_hwmgr *hwmgr)
phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_VpuRecoveryInProgress);
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_UVDDPM);
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_VCEDPM);
+
if (acpi_atcs_functions_supported(hwmgr->device, ATCS_FUNCTION_PCIE_PERFORMANCE_REQUEST) &&
acpi_atcs_functions_supported(hwmgr->device, ATCS_FUNCTION_PCIE_DEVICE_READY_NOTIFICATION))
phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PCIEPerformanceRequest);
@@ -130,18 +133,25 @@ int phm_set_power_state(struct pp_hwmgr *hwmgr,
int phm_enable_dynamic_state_management(struct pp_hwmgr *hwmgr)
{
+ int ret = 1;
+ bool enabled;
PHM_FUNC_CHECK(hwmgr);
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_TablelessHardwareInterface)) {
if (NULL != hwmgr->hwmgr_func->dynamic_state_management_enable)
- return hwmgr->hwmgr_func->dynamic_state_management_enable(hwmgr);
+ ret = hwmgr->hwmgr_func->dynamic_state_management_enable(hwmgr);
} else {
- return phm_dispatch_table(hwmgr,
+ ret = phm_dispatch_table(hwmgr,
&(hwmgr->enable_dynamic_state_management),
NULL, NULL);
}
- return 0;
+
+ enabled = ret == 0 ? true : false;
+
+ cgs_notify_dpm_enabled(hwmgr->device, enabled);
+
+ return ret;
}
int phm_force_dpm_levels(struct pp_hwmgr *hwmgr, enum amd_dpm_forced_level level)
@@ -313,13 +323,12 @@ int phm_store_dal_configuration_data(struct pp_hwmgr *hwmgr,
}
int phm_get_dal_power_level(struct pp_hwmgr *hwmgr,
- struct amd_pp_dal_clock_info *info)
+ struct amd_pp_simple_clock_info *info)
{
PHM_FUNC_CHECK(hwmgr);
if (info == NULL || hwmgr->hwmgr_func->get_dal_power_level == NULL)
return -EINVAL;
-
return hwmgr->hwmgr_func->get_dal_power_level(hwmgr, info);
}
@@ -332,3 +341,91 @@ int phm_set_cpu_power_state(struct pp_hwmgr *hwmgr)
return 0;
}
+
+
+int phm_get_performance_level(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *state,
+ PHM_PerformanceLevelDesignation designation, uint32_t index,
+ PHM_PerformanceLevel *level)
+{
+ PHM_FUNC_CHECK(hwmgr);
+ if (hwmgr->hwmgr_func->get_performance_level == NULL)
+ return -EINVAL;
+
+ return hwmgr->hwmgr_func->get_performance_level(hwmgr, state, designation, index, level);
+
+
+}
+
+
+/**
+* Gets Clock Info.
+*
+* @param pHwMgr the address of the powerplay hardware manager.
+* @param pPowerState the address of the Power State structure.
+* @param pClockInfo the address of PP_ClockInfo structure where the result will be returned.
+* @exception PP_Result_Failed if any of the paramters is NULL, otherwise the return value from the back-end.
+*/
+int phm_get_clock_info(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *state, struct pp_clock_info *pclock_info,
+ PHM_PerformanceLevelDesignation designation)
+{
+ int result;
+ PHM_PerformanceLevel performance_level;
+
+ PHM_FUNC_CHECK(hwmgr);
+
+ PP_ASSERT_WITH_CODE((NULL != state), "Invalid Input!", return -EINVAL);
+ PP_ASSERT_WITH_CODE((NULL != pclock_info), "Invalid Input!", return -EINVAL);
+
+ result = phm_get_performance_level(hwmgr, state, PHM_PerformanceLevelDesignation_Activity, 0, &performance_level);
+
+ PP_ASSERT_WITH_CODE((0 == result), "Failed to retrieve minimum clocks.", return result);
+
+
+ pclock_info->min_mem_clk = performance_level.memory_clock;
+ pclock_info->min_eng_clk = performance_level.coreClock;
+ pclock_info->min_bus_bandwidth = performance_level.nonLocalMemoryFreq * performance_level.nonLocalMemoryWidth;
+
+
+ result = phm_get_performance_level(hwmgr, state, designation,
+ (hwmgr->platform_descriptor.hardwareActivityPerformanceLevels - 1), &performance_level);
+
+ PP_ASSERT_WITH_CODE((0 == result), "Failed to retrieve maximum clocks.", return result);
+
+ pclock_info->max_mem_clk = performance_level.memory_clock;
+ pclock_info->max_eng_clk = performance_level.coreClock;
+ pclock_info->max_bus_bandwidth = performance_level.nonLocalMemoryFreq * performance_level.nonLocalMemoryWidth;
+
+ return 0;
+}
+
+int phm_get_current_shallow_sleep_clocks(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *state, struct pp_clock_info *clock_info)
+{
+ PHM_FUNC_CHECK(hwmgr);
+
+ if (hwmgr->hwmgr_func->get_current_shallow_sleep_clocks == NULL)
+ return -EINVAL;
+
+ return hwmgr->hwmgr_func->get_current_shallow_sleep_clocks(hwmgr, state, clock_info);
+
+}
+
+int phm_get_clock_by_type(struct pp_hwmgr *hwmgr, enum amd_pp_clock_type type, struct amd_pp_clocks *clocks)
+{
+ PHM_FUNC_CHECK(hwmgr);
+
+ if (hwmgr->hwmgr_func->get_clock_by_type == NULL)
+ return -EINVAL;
+
+ return hwmgr->hwmgr_func->get_clock_by_type(hwmgr, type, clocks);
+
+}
+
+int phm_get_max_high_clocks(struct pp_hwmgr *hwmgr, struct amd_pp_simple_clock_info *clocks)
+{
+ PHM_FUNC_CHECK(hwmgr);
+
+ if (hwmgr->hwmgr_func->get_max_high_clocks == NULL)
+ return -EINVAL;
+
+ return hwmgr->hwmgr_func->get_max_high_clocks(hwmgr, clocks);
+}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppevvmath.h b/drivers/gpu/drm/amd/powerplay/hwmgr/ppevvmath.h
index b7429a527828..b10df328d58c 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppevvmath.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppevvmath.h
@@ -293,7 +293,7 @@ fInt GetScaledFraction(int X, int factor)
}
if (factor == 1)
- return (ConvertToFraction(X));
+ return ConvertToFraction(X);
fValue = fDivide(ConvertToFraction(X * uPow(-1, bNEGATED)), ConvertToFraction(factor));
@@ -371,7 +371,7 @@ fInt fDivide (fInt X, fInt Y)
fZERO = ConvertToFraction(0);
if (Equal(Y, fZERO))
- return fZERO;
+ return fZERO;
longlongX = (int64_t)X.full;
longlongY = (int64_t)Y.full;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c
index 980d3bf8ea76..0d5d8372953e 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c
@@ -5185,7 +5185,6 @@ tonga_print_current_perforce_level(struct pp_hwmgr *hwmgr, struct seq_file *m)
mclk = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
seq_printf(m, "\n [ mclk ]: %u MHz\n\n [ sclk ]: %u MHz\n", mclk/100, sclk/100);
-
offset = data->soft_regs_start + offsetof(SMU72_SoftRegisters, AverageGraphicsActivity);
activity_percent = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset);
activity_percent += 0x80;
@@ -5193,6 +5192,9 @@ tonga_print_current_perforce_level(struct pp_hwmgr *hwmgr, struct seq_file *m)
seq_printf(m, "\n [GPU load]: %u%%\n\n", activity_percent > 100 ? 100 : activity_percent);
+ seq_printf(m, "uvd %sabled\n", data->uvd_power_gated ? "dis" : "en");
+
+ seq_printf(m, "vce %sabled\n", data->vce_power_gated ? "dis" : "en");
}
static int tonga_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, const void *input)
@@ -6033,6 +6035,125 @@ static int tonga_get_fan_control_mode(struct pp_hwmgr *hwmgr)
CG_FDO_CTRL2, FDO_PWM_MODE);
}
+static int tonga_get_pp_table(struct pp_hwmgr *hwmgr, char **table)
+{
+ struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
+
+ *table = (char *)&data->smc_state_table;
+
+ return sizeof(struct SMU72_Discrete_DpmTable);
+}
+
+static int tonga_set_pp_table(struct pp_hwmgr *hwmgr, const char *buf, size_t size)
+{
+ struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
+
+ void *table = (void *)&data->smc_state_table;
+
+ memcpy(table, buf, size);
+
+ return 0;
+}
+
+static int tonga_force_clock_level(struct pp_hwmgr *hwmgr,
+ enum pp_clock_type type, int level)
+{
+ struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
+
+ if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
+ return -EINVAL;
+
+ switch (type) {
+ case PP_SCLK:
+ if (!data->sclk_dpm_key_disabled)
+ smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ PPSMC_MSG_SCLKDPM_SetEnabledMask,
+ (1 << level));
+ break;
+ case PP_MCLK:
+ if (!data->mclk_dpm_key_disabled)
+ smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ PPSMC_MSG_MCLKDPM_SetEnabledMask,
+ (1 << level));
+ break;
+ case PP_PCIE:
+ if (!data->pcie_dpm_key_disabled)
+ smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ PPSMC_MSG_PCIeDPM_ForceLevel,
+ (1 << level));
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int tonga_print_clock_levels(struct pp_hwmgr *hwmgr,
+ enum pp_clock_type type, char *buf)
+{
+ struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
+ struct tonga_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table);
+ struct tonga_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table);
+ struct tonga_single_dpm_table *pcie_table = &(data->dpm_table.pcie_speed_table);
+ int i, now, size = 0;
+ uint32_t clock, pcie_speed;
+
+ switch (type) {
+ case PP_SCLK:
+ smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetSclkFrequency);
+ clock = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
+
+ for (i = 0; i < sclk_table->count; i++) {
+ if (clock > sclk_table->dpm_levels[i].value)
+ continue;
+ break;
+ }
+ now = i;
+
+ for (i = 0; i < sclk_table->count; i++)
+ size += sprintf(buf + size, "%d: %uMhz %s\n",
+ i, sclk_table->dpm_levels[i].value / 100,
+ (i == now) ? "*" : "");
+ break;
+ case PP_MCLK:
+ smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetMclkFrequency);
+ clock = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
+
+ for (i = 0; i < mclk_table->count; i++) {
+ if (clock > mclk_table->dpm_levels[i].value)
+ continue;
+ break;
+ }
+ now = i;
+
+ for (i = 0; i < mclk_table->count; i++)
+ size += sprintf(buf + size, "%d: %uMhz %s\n",
+ i, mclk_table->dpm_levels[i].value / 100,
+ (i == now) ? "*" : "");
+ break;
+ case PP_PCIE:
+ pcie_speed = tonga_get_current_pcie_speed(hwmgr);
+ for (i = 0; i < pcie_table->count; i++) {
+ if (pcie_speed != pcie_table->dpm_levels[i].value)
+ continue;
+ break;
+ }
+ now = i;
+
+ for (i = 0; i < pcie_table->count; i++)
+ size += sprintf(buf + size, "%d: %s %s\n", i,
+ (pcie_table->dpm_levels[i].value == 0) ? "2.5GB, x8" :
+ (pcie_table->dpm_levels[i].value == 1) ? "5.0GB, x16" :
+ (pcie_table->dpm_levels[i].value == 2) ? "8.0GB, x16" : "",
+ (i == now) ? "*" : "");
+ break;
+ default:
+ break;
+ }
+ return size;
+}
+
static const struct pp_hwmgr_func tonga_hwmgr_funcs = {
.backend_init = &tonga_hwmgr_backend_init,
.backend_fini = &tonga_hwmgr_backend_fini,
@@ -6070,6 +6191,10 @@ static const struct pp_hwmgr_func tonga_hwmgr_funcs = {
.check_states_equal = tonga_check_states_equal,
.set_fan_control_mode = tonga_set_fan_control_mode,
.get_fan_control_mode = tonga_get_fan_control_mode,
+ .get_pp_table = tonga_get_pp_table,
+ .set_pp_table = tonga_set_pp_table,
+ .force_clock_level = tonga_force_clock_level,
+ .print_clock_levels = tonga_print_clock_levels,
};
int tonga_hwmgr_init(struct pp_hwmgr *hwmgr)
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.h
index 49168d262ccc..f88d3bbe6671 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.h
@@ -28,6 +28,7 @@
#include "ppatomctrl.h"
#include "ppinterrupt.h"
#include "tonga_powertune.h"
+#include "pp_endian.h"
#define TONGA_MAX_HARDWARE_POWERLEVELS 2
#define TONGA_DYNCLK_NUMBER_OF_TREND_COEFFICIENTS 15
@@ -386,17 +387,6 @@ typedef struct tonga_hwmgr tonga_hwmgr;
#define TONGA_UNUSED_GPIO_PIN 0x7F
-#define PP_HOST_TO_SMC_UL(X) cpu_to_be32(X)
-#define PP_SMC_TO_HOST_UL(X) be32_to_cpu(X)
-
-#define PP_HOST_TO_SMC_US(X) cpu_to_be16(X)
-#define PP_SMC_TO_HOST_US(X) be16_to_cpu(X)
-
-#define CONVERT_FROM_HOST_TO_SMC_UL(X) ((X) = PP_HOST_TO_SMC_UL(X))
-#define CONVERT_FROM_SMC_TO_HOST_UL(X) ((X) = PP_SMC_TO_HOST_UL(X))
-
-#define CONVERT_FROM_HOST_TO_SMC_US(X) ((X) = PP_HOST_TO_SMC_US(X))
-
int tonga_hwmgr_init(struct pp_hwmgr *hwmgr);
int tonga_update_vce_dpm(struct pp_hwmgr *hwmgr, const void *input);
int tonga_update_uvd_dpm(struct pp_hwmgr *hwmgr, bool bgate);
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.c
index 34f4bef3691f..b156481b50e8 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.c
@@ -512,8 +512,10 @@ static int get_cac_tdp_table(
hwmgr->dyn_state.cac_dtp_table = kzalloc(table_size, GFP_KERNEL);
- if (NULL == hwmgr->dyn_state.cac_dtp_table)
+ if (NULL == hwmgr->dyn_state.cac_dtp_table) {
+ kfree(tdp_table);
return -ENOMEM;
+ }
memset(hwmgr->dyn_state.cac_dtp_table, 0x00, table_size);
diff --git a/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h b/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h
index e61a3e67852e..7255f7ddf93a 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h
@@ -29,6 +29,7 @@
#include "amd_shared.h"
#include "cgs_common.h"
+
enum amd_pp_event {
AMD_PP_EVENT_INITIALIZE = 0,
AMD_PP_EVENT_UNINITIALIZE,
@@ -123,6 +124,7 @@ enum amd_dpm_forced_level {
AMD_DPM_FORCED_LEVEL_AUTO = 0,
AMD_DPM_FORCED_LEVEL_LOW = 1,
AMD_DPM_FORCED_LEVEL_HIGH = 2,
+ AMD_DPM_FORCED_LEVEL_MANUAL = 3,
};
struct amd_pp_init {
@@ -212,12 +214,55 @@ struct amd_pp_display_configuration {
uint32_t dce_tolerable_mclk_in_active_latency;
};
-struct amd_pp_dal_clock_info {
+struct amd_pp_simple_clock_info {
uint32_t engine_max_clock;
uint32_t memory_max_clock;
uint32_t level;
};
+enum PP_DAL_POWERLEVEL {
+ PP_DAL_POWERLEVEL_INVALID = 0,
+ PP_DAL_POWERLEVEL_ULTRALOW,
+ PP_DAL_POWERLEVEL_LOW,
+ PP_DAL_POWERLEVEL_NOMINAL,
+ PP_DAL_POWERLEVEL_PERFORMANCE,
+
+ PP_DAL_POWERLEVEL_0 = PP_DAL_POWERLEVEL_ULTRALOW,
+ PP_DAL_POWERLEVEL_1 = PP_DAL_POWERLEVEL_LOW,
+ PP_DAL_POWERLEVEL_2 = PP_DAL_POWERLEVEL_NOMINAL,
+ PP_DAL_POWERLEVEL_3 = PP_DAL_POWERLEVEL_PERFORMANCE,
+ PP_DAL_POWERLEVEL_4 = PP_DAL_POWERLEVEL_3+1,
+ PP_DAL_POWERLEVEL_5 = PP_DAL_POWERLEVEL_4+1,
+ PP_DAL_POWERLEVEL_6 = PP_DAL_POWERLEVEL_5+1,
+ PP_DAL_POWERLEVEL_7 = PP_DAL_POWERLEVEL_6+1,
+};
+
+struct amd_pp_clock_info {
+ uint32_t min_engine_clock;
+ uint32_t max_engine_clock;
+ uint32_t min_memory_clock;
+ uint32_t max_memory_clock;
+ uint32_t min_bus_bandwidth;
+ uint32_t max_bus_bandwidth;
+ uint32_t max_engine_clock_in_sr;
+ uint32_t min_engine_clock_in_sr;
+ enum PP_DAL_POWERLEVEL max_clocks_state;
+};
+
+enum amd_pp_clock_type {
+ amd_pp_disp_clock = 1,
+ amd_pp_sys_clock,
+ amd_pp_mem_clock
+};
+
+#define MAX_NUM_CLOCKS 16
+
+struct amd_pp_clocks {
+ uint32_t count;
+ uint32_t clock[MAX_NUM_CLOCKS];
+};
+
+
enum {
PP_GROUP_UNKNOWN = 0,
PP_GROUP_GFX = 1,
@@ -225,6 +270,17 @@ enum {
PP_GROUP_MAX
};
+enum pp_clock_type {
+ PP_SCLK,
+ PP_MCLK,
+ PP_PCIE,
+};
+
+struct pp_states_info {
+ uint32_t nums;
+ uint32_t states[16];
+};
+
#define PP_GROUP_MASK 0xF0000000
#define PP_GROUP_SHIFT 28
@@ -278,6 +334,11 @@ struct amd_powerplay_funcs {
int (*get_fan_control_mode)(void *handle);
int (*set_fan_speed_percent)(void *handle, uint32_t percent);
int (*get_fan_speed_percent)(void *handle, uint32_t *speed);
+ int (*get_pp_num_states)(void *handle, struct pp_states_info *data);
+ int (*get_pp_table)(void *handle, char **table);
+ int (*set_pp_table)(void *handle, const char *buf, size_t size);
+ int (*force_clock_level)(void *handle, enum pp_clock_type type, int level);
+ int (*print_clock_levels)(void *handle, enum pp_clock_type type, char *buf);
};
struct amd_powerplay {
@@ -288,12 +349,23 @@ struct amd_powerplay {
int amd_powerplay_init(struct amd_pp_init *pp_init,
struct amd_powerplay *amd_pp);
+
int amd_powerplay_fini(void *handle);
-int amd_powerplay_display_configuration_change(void *handle, const void *input);
+int amd_powerplay_display_configuration_change(void *handle,
+ const struct amd_pp_display_configuration *input);
int amd_powerplay_get_display_power_level(void *handle,
- struct amd_pp_dal_clock_info *output);
+ struct amd_pp_simple_clock_info *output);
+
+int amd_powerplay_get_current_clocks(void *handle,
+ struct amd_pp_clock_info *output);
+
+int amd_powerplay_get_clock_by_type(void *handle,
+ enum amd_pp_clock_type type,
+ struct amd_pp_clocks *clocks);
+int amd_powerplay_get_display_mode_validation_clocks(void *handle,
+ struct amd_pp_simple_clock_info *output);
#endif /* _AMD_POWERPLAY_H_ */
diff --git a/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h b/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h
index 91795efe1336..040d3f7cbf49 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h
@@ -31,6 +31,7 @@ struct pp_power_state;
enum amd_dpm_forced_level;
struct PP_TemperatureRange;
+
struct phm_fan_speed_info {
uint32_t min_percent;
uint32_t max_percent;
@@ -290,6 +291,15 @@ struct PP_Clocks {
uint32_t engineClockInSR;
};
+struct pp_clock_info {
+ uint32_t min_mem_clk;
+ uint32_t max_mem_clk;
+ uint32_t min_eng_clk;
+ uint32_t max_eng_clk;
+ uint32_t min_bus_bandwidth;
+ uint32_t max_bus_bandwidth;
+};
+
struct phm_platform_descriptor {
uint32_t platformCaps[PHM_MAX_NUM_CAPS_ULONG_ENTRIES];
uint32_t vbiosInterruptId;
@@ -323,24 +333,6 @@ struct phm_clocks {
uint32_t clock[MAX_NUM_CLOCKS];
};
-enum PP_DAL_POWERLEVEL {
- PP_DAL_POWERLEVEL_INVALID = 0,
- PP_DAL_POWERLEVEL_ULTRALOW,
- PP_DAL_POWERLEVEL_LOW,
- PP_DAL_POWERLEVEL_NOMINAL,
- PP_DAL_POWERLEVEL_PERFORMANCE,
-
- PP_DAL_POWERLEVEL_0 = PP_DAL_POWERLEVEL_ULTRALOW,
- PP_DAL_POWERLEVEL_1 = PP_DAL_POWERLEVEL_LOW,
- PP_DAL_POWERLEVEL_2 = PP_DAL_POWERLEVEL_NOMINAL,
- PP_DAL_POWERLEVEL_3 = PP_DAL_POWERLEVEL_PERFORMANCE,
- PP_DAL_POWERLEVEL_4 = PP_DAL_POWERLEVEL_3+1,
- PP_DAL_POWERLEVEL_5 = PP_DAL_POWERLEVEL_4+1,
- PP_DAL_POWERLEVEL_6 = PP_DAL_POWERLEVEL_5+1,
- PP_DAL_POWERLEVEL_7 = PP_DAL_POWERLEVEL_6+1,
-};
-
-
extern int phm_enable_clock_power_gatings(struct pp_hwmgr *hwmgr);
extern int phm_powergate_uvd(struct pp_hwmgr *hwmgr, bool gate);
extern int phm_powergate_vce(struct pp_hwmgr *hwmgr, bool gate);
@@ -375,11 +367,25 @@ extern int phm_store_dal_configuration_data(struct pp_hwmgr *hwmgr,
const struct amd_pp_display_configuration *display_config);
extern int phm_get_dal_power_level(struct pp_hwmgr *hwmgr,
- struct amd_pp_dal_clock_info*info);
+ struct amd_pp_simple_clock_info *info);
extern int phm_set_cpu_power_state(struct pp_hwmgr *hwmgr);
extern int phm_power_down_asic(struct pp_hwmgr *hwmgr);
+extern int phm_get_performance_level(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *state,
+ PHM_PerformanceLevelDesignation designation, uint32_t index,
+ PHM_PerformanceLevel *level);
+
+extern int phm_get_clock_info(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *state,
+ struct pp_clock_info *pclock_info,
+ PHM_PerformanceLevelDesignation designation);
+
+extern int phm_get_current_shallow_sleep_clocks(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *state, struct pp_clock_info *clock_info);
+
+extern int phm_get_clock_by_type(struct pp_hwmgr *hwmgr, enum amd_pp_clock_type type, struct amd_pp_clocks *clocks);
+
+extern int phm_get_max_high_clocks(struct pp_hwmgr *hwmgr, struct amd_pp_simple_clock_info *clocks);
+
#endif /* _HARDWARE_MANAGER_H_ */
diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
index aeaa3dbba525..928f5a740cba 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
@@ -325,8 +325,18 @@ struct pp_hwmgr_func {
bool cc6_disable, bool pstate_disable,
bool pstate_switch_disable);
int (*get_dal_power_level)(struct pp_hwmgr *hwmgr,
- struct amd_pp_dal_clock_info *info);
+ struct amd_pp_simple_clock_info *info);
+ int (*get_performance_level)(struct pp_hwmgr *, const struct pp_hw_power_state *,
+ PHM_PerformanceLevelDesignation, uint32_t, PHM_PerformanceLevel *);
+ int (*get_current_shallow_sleep_clocks)(struct pp_hwmgr *hwmgr,
+ const struct pp_hw_power_state *state, struct pp_clock_info *clock_info);
+ int (*get_clock_by_type)(struct pp_hwmgr *hwmgr, enum amd_pp_clock_type type, struct amd_pp_clocks *clocks);
+ int (*get_max_high_clocks)(struct pp_hwmgr *hwmgr, struct amd_pp_simple_clock_info *clocks);
int (*power_off_asic)(struct pp_hwmgr *hwmgr);
+ int (*get_pp_table)(struct pp_hwmgr *hwmgr, char **table);
+ int (*set_pp_table)(struct pp_hwmgr *hwmgr, const char *buf, size_t size);
+ int (*force_clock_level)(struct pp_hwmgr *hwmgr, enum pp_clock_type type, int level);
+ int (*print_clock_levels)(struct pp_hwmgr *hwmgr, enum pp_clock_type type, char *buf);
};
struct pp_table_func {
diff --git a/drivers/gpu/drm/amd/powerplay/inc/pp_endian.h b/drivers/gpu/drm/amd/powerplay/inc/pp_endian.h
new file mode 100644
index 000000000000..f49d1963fe85
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/inc/pp_endian.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef _PP_ENDIAN_H_
+#define _PP_ENDIAN_H_
+
+#define PP_HOST_TO_SMC_UL(X) cpu_to_be32(X)
+#define PP_SMC_TO_HOST_UL(X) be32_to_cpu(X)
+
+#define PP_HOST_TO_SMC_US(X) cpu_to_be16(X)
+#define PP_SMC_TO_HOST_US(X) be16_to_cpu(X)
+
+#define CONVERT_FROM_HOST_TO_SMC_UL(X) ((X) = PP_HOST_TO_SMC_UL(X))
+#define CONVERT_FROM_SMC_TO_HOST_UL(X) ((X) = PP_SMC_TO_HOST_UL(X))
+
+#define CONVERT_FROM_HOST_TO_SMC_US(X) ((X) = PP_HOST_TO_SMC_US(X))
+
+#endif /* _PP_ENDIAN_H_ */
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smumgr.h b/drivers/gpu/drm/amd/powerplay/inc/smumgr.h
index 504f035d1843..fc9e3d1dd409 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smumgr.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smumgr.h
@@ -32,6 +32,27 @@ struct pp_instance;
#define smu_lower_32_bits(n) ((uint32_t)(n))
#define smu_upper_32_bits(n) ((uint32_t)(((n)>>16)>>16))
+enum AVFS_BTC_STATUS {
+ AVFS_BTC_BOOT = 0,
+ AVFS_BTC_BOOT_STARTEDSMU,
+ AVFS_LOAD_VIRUS,
+ AVFS_BTC_VIRUS_LOADED,
+ AVFS_BTC_VIRUS_FAIL,
+ AVFS_BTC_COMPLETED_PREVIOUSLY,
+ AVFS_BTC_ENABLEAVFS,
+ AVFS_BTC_STARTED,
+ AVFS_BTC_FAILED,
+ AVFS_BTC_RESTOREVFT_FAILED,
+ AVFS_BTC_SAVEVFT_FAILED,
+ AVFS_BTC_DPMTABLESETUP_FAILED,
+ AVFS_BTC_COMPLETED_UNSAVED,
+ AVFS_BTC_COMPLETED_SAVED,
+ AVFS_BTC_COMPLETED_RESTORED,
+ AVFS_BTC_DISABLED,
+ AVFS_BTC_NOTSUPPORTED,
+ AVFS_BTC_SMUMSG_ERROR
+};
+
struct pp_smumgr_func {
int (*smu_init)(struct pp_smumgr *smumgr);
int (*smu_fini)(struct pp_smumgr *smumgr);
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.h
index 8cd22d9c9140..b4eb483215b1 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.h
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.h
@@ -23,24 +23,6 @@
#ifndef _FIJI_SMUMANAGER_H_
#define _FIJI_SMUMANAGER_H_
-enum AVFS_BTC_STATUS {
- AVFS_BTC_BOOT = 0,
- AVFS_BTC_BOOT_STARTEDSMU,
- AVFS_LOAD_VIRUS,
- AVFS_BTC_VIRUS_LOADED,
- AVFS_BTC_VIRUS_FAIL,
- AVFS_BTC_STARTED,
- AVFS_BTC_FAILED,
- AVFS_BTC_RESTOREVFT_FAILED,
- AVFS_BTC_SAVEVFT_FAILED,
- AVFS_BTC_DPMTABLESETUP_FAILED,
- AVFS_BTC_COMPLETED_UNSAVED,
- AVFS_BTC_COMPLETED_SAVED,
- AVFS_BTC_COMPLETED_RESTORED,
- AVFS_BTC_DISABLED,
- AVFS_BTC_NOTSUPPORTED,
- AVFS_BTC_SMUMSG_ERROR
-};
struct fiji_smu_avfs {
enum AVFS_BTC_STATUS AvfsBtcStatus;
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
index 8b2becd1aa07..a5ff9458d359 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
+++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
@@ -229,6 +229,14 @@ static void amd_sched_entity_wakeup(struct fence *f, struct fence_cb *cb)
amd_sched_wakeup(entity->sched);
}
+static void amd_sched_entity_clear_dep(struct fence *f, struct fence_cb *cb)
+{
+ struct amd_sched_entity *entity =
+ container_of(cb, struct amd_sched_entity, cb);
+ entity->dependency = NULL;
+ fence_put(f);
+}
+
static bool amd_sched_entity_add_dependency_cb(struct amd_sched_entity *entity)
{
struct amd_gpu_scheduler *sched = entity->sched;
@@ -251,7 +259,7 @@ static bool amd_sched_entity_add_dependency_cb(struct amd_sched_entity *entity)
}
/* Wait for fence to be scheduled */
- entity->cb.func = amd_sched_entity_wakeup;
+ entity->cb.func = amd_sched_entity_clear_dep;
list_add_tail(&entity->cb.node, &s_fence->scheduled_cb);
return true;
}
diff --git a/drivers/gpu/drm/amd/scheduler/sched_fence.c b/drivers/gpu/drm/amd/scheduler/sched_fence.c
index 87c78eecea64..dc115aea352b 100644
--- a/drivers/gpu/drm/amd/scheduler/sched_fence.c
+++ b/drivers/gpu/drm/amd/scheduler/sched_fence.c
@@ -84,12 +84,33 @@ static bool amd_sched_fence_enable_signaling(struct fence *f)
return true;
}
-static void amd_sched_fence_release(struct fence *f)
+/**
+ * amd_sched_fence_free - free up the fence memory
+ *
+ * @rcu: RCU callback head
+ *
+ * Free up the fence memory after the RCU grace period.
+ */
+static void amd_sched_fence_free(struct rcu_head *rcu)
{
+ struct fence *f = container_of(rcu, struct fence, rcu);
struct amd_sched_fence *fence = to_amd_sched_fence(f);
kmem_cache_free(sched_fence_slab, fence);
}
+/**
+ * amd_sched_fence_release - callback that fence can be freed
+ *
+ * @fence: fence
+ *
+ * This function is called when the reference count becomes zero.
+ * It just RCU schedules freeing up the fence.
+ */
+static void amd_sched_fence_release(struct fence *f)
+{
+ call_rcu(&f->rcu, amd_sched_fence_free);
+}
+
const struct fence_ops amd_sched_fence_ops = {
.get_driver_name = amd_sched_fence_get_driver_name,
.get_timeline_name = amd_sched_fence_get_timeline_name,
diff --git a/drivers/gpu/drm/arm/Kconfig b/drivers/gpu/drm/arm/Kconfig
new file mode 100644
index 000000000000..eaed454e043c
--- /dev/null
+++ b/drivers/gpu/drm/arm/Kconfig
@@ -0,0 +1,27 @@
+config DRM_ARM
+ bool
+ help
+ Choose this option to select drivers for ARM's devices
+
+config DRM_HDLCD
+ tristate "ARM HDLCD"
+ depends on DRM && OF && (ARM || ARM64)
+ depends on COMMON_CLK
+ select DRM_ARM
+ select DRM_KMS_HELPER
+ select DRM_KMS_FB_HELPER
+ select DRM_KMS_CMA_HELPER
+ help
+ Choose this option if you have an ARM High Definition Colour LCD
+ controller.
+
+ If M is selected the module will be called hdlcd.
+
+config DRM_HDLCD_SHOW_UNDERRUN
+ bool "Show underrun conditions"
+ depends on DRM_HDLCD
+ default n
+ help
+ Enable this option to show in red colour the pixels that the
+ HDLCD device did not fetch from framebuffer due to underrun
+ conditions.
diff --git a/drivers/gpu/drm/arm/Makefile b/drivers/gpu/drm/arm/Makefile
new file mode 100644
index 000000000000..89dcb7bab93a
--- /dev/null
+++ b/drivers/gpu/drm/arm/Makefile
@@ -0,0 +1,2 @@
+hdlcd-y := hdlcd_drv.o hdlcd_crtc.o
+obj-$(CONFIG_DRM_HDLCD) += hdlcd.o
diff --git a/drivers/gpu/drm/arm/hdlcd_crtc.c b/drivers/gpu/drm/arm/hdlcd_crtc.c
new file mode 100644
index 000000000000..fef1b04c2aab
--- /dev/null
+++ b/drivers/gpu/drm/arm/hdlcd_crtc.c
@@ -0,0 +1,327 @@
+/*
+ * Copyright (C) 2013-2015 ARM Limited
+ * Author: Liviu Dudau <Liviu.Dudau@arm.com>
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file COPYING in the main directory of this archive
+ * for more details.
+ *
+ * Implementation of a CRTC class for the HDLCD driver.
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_of.h>
+#include <drm/drm_plane_helper.h>
+#include <linux/clk.h>
+#include <linux/of_graph.h>
+#include <linux/platform_data/simplefb.h>
+#include <video/videomode.h>
+
+#include "hdlcd_drv.h"
+#include "hdlcd_regs.h"
+
+/*
+ * The HDLCD controller is a dumb RGB streamer that gets connected to
+ * a single HDMI transmitter or in the case of the ARM Models it gets
+ * emulated by the software that does the actual rendering.
+ *
+ */
+
+static const struct drm_crtc_funcs hdlcd_crtc_funcs = {
+ .destroy = drm_crtc_cleanup,
+ .set_config = drm_atomic_helper_set_config,
+ .page_flip = drm_atomic_helper_page_flip,
+ .reset = drm_atomic_helper_crtc_reset,
+ .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
+};
+
+static struct simplefb_format supported_formats[] = SIMPLEFB_FORMATS;
+
+/*
+ * Setup the HDLCD registers for decoding the pixels out of the framebuffer
+ */
+static int hdlcd_set_pxl_fmt(struct drm_crtc *crtc)
+{
+ unsigned int btpp;
+ struct hdlcd_drm_private *hdlcd = crtc_to_hdlcd_priv(crtc);
+ uint32_t pixel_format;
+ struct simplefb_format *format = NULL;
+ int i;
+
+ pixel_format = crtc->primary->state->fb->pixel_format;
+
+ for (i = 0; i < ARRAY_SIZE(supported_formats); i++) {
+ if (supported_formats[i].fourcc == pixel_format)
+ format = &supported_formats[i];
+ }
+
+ if (WARN_ON(!format))
+ return 0;
+
+ /* HDLCD uses 'bytes per pixel', zero means 1 byte */
+ btpp = (format->bits_per_pixel + 7) / 8;
+ hdlcd_write(hdlcd, HDLCD_REG_PIXEL_FORMAT, (btpp - 1) << 3);
+
+ /*
+ * The format of the HDLCD_REG_<color>_SELECT register is:
+ * - bits[23:16] - default value for that color component
+ * - bits[11:8] - number of bits to extract for each color component
+ * - bits[4:0] - index of the lowest bit to extract
+ *
+ * The default color value is used when bits[11:8] are zero, when the
+ * pixel is outside the visible frame area or when there is a
+ * buffer underrun.
+ */
+ hdlcd_write(hdlcd, HDLCD_REG_RED_SELECT, format->red.offset |
+#ifdef CONFIG_DRM_HDLCD_SHOW_UNDERRUN
+ 0x00ff0000 | /* show underruns in red */
+#endif
+ ((format->red.length & 0xf) << 8));
+ hdlcd_write(hdlcd, HDLCD_REG_GREEN_SELECT, format->green.offset |
+ ((format->green.length & 0xf) << 8));
+ hdlcd_write(hdlcd, HDLCD_REG_BLUE_SELECT, format->blue.offset |
+ ((format->blue.length & 0xf) << 8));
+
+ return 0;
+}
+
+static void hdlcd_crtc_mode_set_nofb(struct drm_crtc *crtc)
+{
+ struct hdlcd_drm_private *hdlcd = crtc_to_hdlcd_priv(crtc);
+ struct drm_display_mode *m = &crtc->state->adjusted_mode;
+ struct videomode vm;
+ unsigned int polarities, line_length, err;
+
+ vm.vfront_porch = m->crtc_vsync_start - m->crtc_vdisplay;
+ vm.vback_porch = m->crtc_vtotal - m->crtc_vsync_end;
+ vm.vsync_len = m->crtc_vsync_end - m->crtc_vsync_start;
+ vm.hfront_porch = m->crtc_hsync_start - m->crtc_hdisplay;
+ vm.hback_porch = m->crtc_htotal - m->crtc_hsync_end;
+ vm.hsync_len = m->crtc_hsync_end - m->crtc_hsync_start;
+
+ polarities = HDLCD_POLARITY_DATAEN | HDLCD_POLARITY_DATA;
+
+ if (m->flags & DRM_MODE_FLAG_PHSYNC)
+ polarities |= HDLCD_POLARITY_HSYNC;
+ if (m->flags & DRM_MODE_FLAG_PVSYNC)
+ polarities |= HDLCD_POLARITY_VSYNC;
+
+ line_length = crtc->primary->state->fb->pitches[0];
+
+ /* Allow max number of outstanding requests and largest burst size */
+ hdlcd_write(hdlcd, HDLCD_REG_BUS_OPTIONS,
+ HDLCD_BUS_MAX_OUTSTAND | HDLCD_BUS_BURST_16);
+
+ hdlcd_write(hdlcd, HDLCD_REG_FB_LINE_LENGTH, line_length);
+ hdlcd_write(hdlcd, HDLCD_REG_FB_LINE_PITCH, line_length);
+ hdlcd_write(hdlcd, HDLCD_REG_FB_LINE_COUNT, m->crtc_vdisplay - 1);
+ hdlcd_write(hdlcd, HDLCD_REG_V_DATA, m->crtc_vdisplay - 1);
+ hdlcd_write(hdlcd, HDLCD_REG_V_BACK_PORCH, vm.vback_porch - 1);
+ hdlcd_write(hdlcd, HDLCD_REG_V_FRONT_PORCH, vm.vfront_porch - 1);
+ hdlcd_write(hdlcd, HDLCD_REG_V_SYNC, vm.vsync_len - 1);
+ hdlcd_write(hdlcd, HDLCD_REG_H_BACK_PORCH, vm.hback_porch - 1);
+ hdlcd_write(hdlcd, HDLCD_REG_H_FRONT_PORCH, vm.hfront_porch - 1);
+ hdlcd_write(hdlcd, HDLCD_REG_H_SYNC, vm.hsync_len - 1);
+ hdlcd_write(hdlcd, HDLCD_REG_H_DATA, m->crtc_hdisplay - 1);
+ hdlcd_write(hdlcd, HDLCD_REG_POLARITIES, polarities);
+
+ err = hdlcd_set_pxl_fmt(crtc);
+ if (err)
+ return;
+
+ clk_set_rate(hdlcd->clk, m->crtc_clock * 1000);
+}
+
+static void hdlcd_crtc_enable(struct drm_crtc *crtc)
+{
+ struct hdlcd_drm_private *hdlcd = crtc_to_hdlcd_priv(crtc);
+
+ clk_prepare_enable(hdlcd->clk);
+ hdlcd_write(hdlcd, HDLCD_REG_COMMAND, 1);
+ drm_crtc_vblank_on(crtc);
+}
+
+static void hdlcd_crtc_disable(struct drm_crtc *crtc)
+{
+ struct hdlcd_drm_private *hdlcd = crtc_to_hdlcd_priv(crtc);
+
+ if (!crtc->primary->fb)
+ return;
+
+ clk_disable_unprepare(hdlcd->clk);
+ hdlcd_write(hdlcd, HDLCD_REG_COMMAND, 0);
+ drm_crtc_vblank_off(crtc);
+}
+
+static int hdlcd_crtc_atomic_check(struct drm_crtc *crtc,
+ struct drm_crtc_state *state)
+{
+ struct hdlcd_drm_private *hdlcd = crtc_to_hdlcd_priv(crtc);
+ struct drm_display_mode *mode = &state->adjusted_mode;
+ long rate, clk_rate = mode->clock * 1000;
+
+ rate = clk_round_rate(hdlcd->clk, clk_rate);
+ if (rate != clk_rate) {
+ /* clock required by mode not supported by hardware */
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void hdlcd_crtc_atomic_begin(struct drm_crtc *crtc,
+ struct drm_crtc_state *state)
+{
+ struct hdlcd_drm_private *hdlcd = crtc_to_hdlcd_priv(crtc);
+ unsigned long flags;
+
+ if (crtc->state->event) {
+ struct drm_pending_vblank_event *event = crtc->state->event;
+
+ crtc->state->event = NULL;
+ event->pipe = drm_crtc_index(crtc);
+
+ WARN_ON(drm_crtc_vblank_get(crtc) != 0);
+
+ spin_lock_irqsave(&crtc->dev->event_lock, flags);
+ list_add_tail(&event->base.link, &hdlcd->event_list);
+ spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
+ }
+}
+
+static void hdlcd_crtc_atomic_flush(struct drm_crtc *crtc,
+ struct drm_crtc_state *state)
+{
+}
+
+static bool hdlcd_crtc_mode_fixup(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ return true;
+}
+
+static const struct drm_crtc_helper_funcs hdlcd_crtc_helper_funcs = {
+ .mode_fixup = hdlcd_crtc_mode_fixup,
+ .mode_set = drm_helper_crtc_mode_set,
+ .mode_set_base = drm_helper_crtc_mode_set_base,
+ .mode_set_nofb = hdlcd_crtc_mode_set_nofb,
+ .enable = hdlcd_crtc_enable,
+ .disable = hdlcd_crtc_disable,
+ .prepare = hdlcd_crtc_disable,
+ .commit = hdlcd_crtc_enable,
+ .atomic_check = hdlcd_crtc_atomic_check,
+ .atomic_begin = hdlcd_crtc_atomic_begin,
+ .atomic_flush = hdlcd_crtc_atomic_flush,
+};
+
+static int hdlcd_plane_atomic_check(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ return 0;
+}
+
+static void hdlcd_plane_atomic_update(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ struct hdlcd_drm_private *hdlcd;
+ struct drm_gem_cma_object *gem;
+ dma_addr_t scanout_start;
+
+ if (!plane->state->crtc || !plane->state->fb)
+ return;
+
+ hdlcd = crtc_to_hdlcd_priv(plane->state->crtc);
+ gem = drm_fb_cma_get_gem_obj(plane->state->fb, 0);
+ scanout_start = gem->paddr;
+ hdlcd_write(hdlcd, HDLCD_REG_FB_BASE, scanout_start);
+}
+
+static const struct drm_plane_helper_funcs hdlcd_plane_helper_funcs = {
+ .prepare_fb = NULL,
+ .cleanup_fb = NULL,
+ .atomic_check = hdlcd_plane_atomic_check,
+ .atomic_update = hdlcd_plane_atomic_update,
+};
+
+static void hdlcd_plane_destroy(struct drm_plane *plane)
+{
+ drm_plane_helper_disable(plane);
+ drm_plane_cleanup(plane);
+}
+
+static const struct drm_plane_funcs hdlcd_plane_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .destroy = hdlcd_plane_destroy,
+ .reset = drm_atomic_helper_plane_reset,
+ .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
+};
+
+static struct drm_plane *hdlcd_plane_init(struct drm_device *drm)
+{
+ struct hdlcd_drm_private *hdlcd = drm->dev_private;
+ struct drm_plane *plane = NULL;
+ u32 formats[ARRAY_SIZE(supported_formats)], i;
+ int ret;
+
+ plane = devm_kzalloc(drm->dev, sizeof(*plane), GFP_KERNEL);
+ if (!plane)
+ return ERR_PTR(-ENOMEM);
+
+ for (i = 0; i < ARRAY_SIZE(supported_formats); i++)
+ formats[i] = supported_formats[i].fourcc;
+
+ ret = drm_universal_plane_init(drm, plane, 0xff, &hdlcd_plane_funcs,
+ formats, ARRAY_SIZE(formats),
+ DRM_PLANE_TYPE_PRIMARY, NULL);
+ if (ret) {
+ devm_kfree(drm->dev, plane);
+ return ERR_PTR(ret);
+ }
+
+ drm_plane_helper_add(plane, &hdlcd_plane_helper_funcs);
+ hdlcd->plane = plane;
+
+ return plane;
+}
+
+void hdlcd_crtc_suspend(struct drm_crtc *crtc)
+{
+ hdlcd_crtc_disable(crtc);
+}
+
+void hdlcd_crtc_resume(struct drm_crtc *crtc)
+{
+ hdlcd_crtc_enable(crtc);
+}
+
+int hdlcd_setup_crtc(struct drm_device *drm)
+{
+ struct hdlcd_drm_private *hdlcd = drm->dev_private;
+ struct drm_plane *primary;
+ int ret;
+
+ primary = hdlcd_plane_init(drm);
+ if (IS_ERR(primary))
+ return PTR_ERR(primary);
+
+ ret = drm_crtc_init_with_planes(drm, &hdlcd->crtc, primary, NULL,
+ &hdlcd_crtc_funcs, NULL);
+ if (ret) {
+ hdlcd_plane_destroy(primary);
+ devm_kfree(drm->dev, primary);
+ return ret;
+ }
+
+ drm_crtc_helper_add(&hdlcd->crtc, &hdlcd_crtc_helper_funcs);
+ return 0;
+}
diff --git a/drivers/gpu/drm/arm/hdlcd_drv.c b/drivers/gpu/drm/arm/hdlcd_drv.c
new file mode 100644
index 000000000000..3ac1ae4d8caf
--- /dev/null
+++ b/drivers/gpu/drm/arm/hdlcd_drv.c
@@ -0,0 +1,542 @@
+/*
+ * Copyright (C) 2013-2015 ARM Limited
+ * Author: Liviu Dudau <Liviu.Dudau@arm.com>
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file COPYING in the main directory of this archive
+ * for more details.
+ *
+ * ARM HDLCD Driver
+ */
+
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <linux/clk.h>
+#include <linux/component.h>
+#include <linux/list.h>
+#include <linux/of_graph.h>
+#include <linux/of_reserved_mem.h>
+#include <linux/pm_runtime.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_of.h>
+
+#include "hdlcd_drv.h"
+#include "hdlcd_regs.h"
+
+static int hdlcd_load(struct drm_device *drm, unsigned long flags)
+{
+ struct hdlcd_drm_private *hdlcd = drm->dev_private;
+ struct platform_device *pdev = to_platform_device(drm->dev);
+ struct resource *res;
+ u32 version;
+ int ret;
+
+ hdlcd->clk = devm_clk_get(drm->dev, "pxlclk");
+ if (IS_ERR(hdlcd->clk))
+ return PTR_ERR(hdlcd->clk);
+
+#ifdef CONFIG_DEBUG_FS
+ atomic_set(&hdlcd->buffer_underrun_count, 0);
+ atomic_set(&hdlcd->bus_error_count, 0);
+ atomic_set(&hdlcd->vsync_count, 0);
+ atomic_set(&hdlcd->dma_end_count, 0);
+#endif
+
+ INIT_LIST_HEAD(&hdlcd->event_list);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ hdlcd->mmio = devm_ioremap_resource(drm->dev, res);
+ if (IS_ERR(hdlcd->mmio)) {
+ DRM_ERROR("failed to map control registers area\n");
+ ret = PTR_ERR(hdlcd->mmio);
+ hdlcd->mmio = NULL;
+ return ret;
+ }
+
+ version = hdlcd_read(hdlcd, HDLCD_REG_VERSION);
+ if ((version & HDLCD_PRODUCT_MASK) != HDLCD_PRODUCT_ID) {
+ DRM_ERROR("unknown product id: 0x%x\n", version);
+ return -EINVAL;
+ }
+ DRM_INFO("found ARM HDLCD version r%dp%d\n",
+ (version & HDLCD_VERSION_MAJOR_MASK) >> 8,
+ version & HDLCD_VERSION_MINOR_MASK);
+
+ /* Get the optional framebuffer memory resource */
+ ret = of_reserved_mem_device_init(drm->dev);
+ if (ret && ret != -ENODEV)
+ return ret;
+
+ ret = dma_set_mask_and_coherent(drm->dev, DMA_BIT_MASK(32));
+ if (ret)
+ goto setup_fail;
+
+ ret = hdlcd_setup_crtc(drm);
+ if (ret < 0) {
+ DRM_ERROR("failed to create crtc\n");
+ goto setup_fail;
+ }
+
+ pm_runtime_enable(drm->dev);
+
+ pm_runtime_get_sync(drm->dev);
+ ret = drm_irq_install(drm, platform_get_irq(pdev, 0));
+ pm_runtime_put_sync(drm->dev);
+ if (ret < 0) {
+ DRM_ERROR("failed to install IRQ handler\n");
+ goto irq_fail;
+ }
+
+ return 0;
+
+irq_fail:
+ drm_crtc_cleanup(&hdlcd->crtc);
+setup_fail:
+ of_reserved_mem_device_release(drm->dev);
+
+ return ret;
+}
+
+static void hdlcd_fb_output_poll_changed(struct drm_device *drm)
+{
+ struct hdlcd_drm_private *hdlcd = drm->dev_private;
+
+ if (hdlcd->fbdev)
+ drm_fbdev_cma_hotplug_event(hdlcd->fbdev);
+}
+
+static int hdlcd_atomic_commit(struct drm_device *dev,
+ struct drm_atomic_state *state, bool async)
+{
+ return drm_atomic_helper_commit(dev, state, false);
+}
+
+static const struct drm_mode_config_funcs hdlcd_mode_config_funcs = {
+ .fb_create = drm_fb_cma_create,
+ .output_poll_changed = hdlcd_fb_output_poll_changed,
+ .atomic_check = drm_atomic_helper_check,
+ .atomic_commit = hdlcd_atomic_commit,
+};
+
+static void hdlcd_setup_mode_config(struct drm_device *drm)
+{
+ drm_mode_config_init(drm);
+ drm->mode_config.min_width = 0;
+ drm->mode_config.min_height = 0;
+ drm->mode_config.max_width = HDLCD_MAX_XRES;
+ drm->mode_config.max_height = HDLCD_MAX_YRES;
+ drm->mode_config.funcs = &hdlcd_mode_config_funcs;
+}
+
+static void hdlcd_lastclose(struct drm_device *drm)
+{
+ struct hdlcd_drm_private *hdlcd = drm->dev_private;
+
+ drm_fbdev_cma_restore_mode(hdlcd->fbdev);
+}
+
+static irqreturn_t hdlcd_irq(int irq, void *arg)
+{
+ struct drm_device *drm = arg;
+ struct hdlcd_drm_private *hdlcd = drm->dev_private;
+ unsigned long irq_status;
+
+ irq_status = hdlcd_read(hdlcd, HDLCD_REG_INT_STATUS);
+
+#ifdef CONFIG_DEBUG_FS
+ if (irq_status & HDLCD_INTERRUPT_UNDERRUN)
+ atomic_inc(&hdlcd->buffer_underrun_count);
+
+ if (irq_status & HDLCD_INTERRUPT_DMA_END)
+ atomic_inc(&hdlcd->dma_end_count);
+
+ if (irq_status & HDLCD_INTERRUPT_BUS_ERROR)
+ atomic_inc(&hdlcd->bus_error_count);
+
+ if (irq_status & HDLCD_INTERRUPT_VSYNC)
+ atomic_inc(&hdlcd->vsync_count);
+
+#endif
+ if (irq_status & HDLCD_INTERRUPT_VSYNC) {
+ bool events_sent = false;
+ unsigned long flags;
+ struct drm_pending_vblank_event *e, *t;
+
+ drm_crtc_handle_vblank(&hdlcd->crtc);
+
+ spin_lock_irqsave(&drm->event_lock, flags);
+ list_for_each_entry_safe(e, t, &hdlcd->event_list, base.link) {
+ list_del(&e->base.link);
+ drm_crtc_send_vblank_event(&hdlcd->crtc, e);
+ events_sent = true;
+ }
+ if (events_sent)
+ drm_crtc_vblank_put(&hdlcd->crtc);
+ spin_unlock_irqrestore(&drm->event_lock, flags);
+ }
+
+ /* acknowledge interrupt(s) */
+ hdlcd_write(hdlcd, HDLCD_REG_INT_CLEAR, irq_status);
+
+ return IRQ_HANDLED;
+}
+
+static void hdlcd_irq_preinstall(struct drm_device *drm)
+{
+ struct hdlcd_drm_private *hdlcd = drm->dev_private;
+ /* Ensure interrupts are disabled */
+ hdlcd_write(hdlcd, HDLCD_REG_INT_MASK, 0);
+ hdlcd_write(hdlcd, HDLCD_REG_INT_CLEAR, ~0);
+}
+
+static int hdlcd_irq_postinstall(struct drm_device *drm)
+{
+#ifdef CONFIG_DEBUG_FS
+ struct hdlcd_drm_private *hdlcd = drm->dev_private;
+ unsigned long irq_mask = hdlcd_read(hdlcd, HDLCD_REG_INT_MASK);
+
+ /* enable debug interrupts */
+ irq_mask |= HDLCD_DEBUG_INT_MASK;
+
+ hdlcd_write(hdlcd, HDLCD_REG_INT_MASK, irq_mask);
+#endif
+ return 0;
+}
+
+static void hdlcd_irq_uninstall(struct drm_device *drm)
+{
+ struct hdlcd_drm_private *hdlcd = drm->dev_private;
+ /* disable all the interrupts that we might have enabled */
+ unsigned long irq_mask = hdlcd_read(hdlcd, HDLCD_REG_INT_MASK);
+
+#ifdef CONFIG_DEBUG_FS
+ /* disable debug interrupts */
+ irq_mask &= ~HDLCD_DEBUG_INT_MASK;
+#endif
+
+ /* disable vsync interrupts */
+ irq_mask &= ~HDLCD_INTERRUPT_VSYNC;
+
+ hdlcd_write(hdlcd, HDLCD_REG_INT_MASK, irq_mask);
+}
+
+static int hdlcd_enable_vblank(struct drm_device *drm, unsigned int crtc)
+{
+ struct hdlcd_drm_private *hdlcd = drm->dev_private;
+ unsigned int mask = hdlcd_read(hdlcd, HDLCD_REG_INT_MASK);
+
+ hdlcd_write(hdlcd, HDLCD_REG_INT_MASK, mask | HDLCD_INTERRUPT_VSYNC);
+
+ return 0;
+}
+
+static void hdlcd_disable_vblank(struct drm_device *drm, unsigned int crtc)
+{
+ struct hdlcd_drm_private *hdlcd = drm->dev_private;
+ unsigned int mask = hdlcd_read(hdlcd, HDLCD_REG_INT_MASK);
+
+ hdlcd_write(hdlcd, HDLCD_REG_INT_MASK, mask & ~HDLCD_INTERRUPT_VSYNC);
+}
+
+#ifdef CONFIG_DEBUG_FS
+static int hdlcd_show_underrun_count(struct seq_file *m, void *arg)
+{
+ struct drm_info_node *node = (struct drm_info_node *)m->private;
+ struct drm_device *drm = node->minor->dev;
+ struct hdlcd_drm_private *hdlcd = drm->dev_private;
+
+ seq_printf(m, "underrun : %d\n", atomic_read(&hdlcd->buffer_underrun_count));
+ seq_printf(m, "dma_end : %d\n", atomic_read(&hdlcd->dma_end_count));
+ seq_printf(m, "bus_error: %d\n", atomic_read(&hdlcd->bus_error_count));
+ seq_printf(m, "vsync : %d\n", atomic_read(&hdlcd->vsync_count));
+ return 0;
+}
+
+static int hdlcd_show_pxlclock(struct seq_file *m, void *arg)
+{
+ struct drm_info_node *node = (struct drm_info_node *)m->private;
+ struct drm_device *drm = node->minor->dev;
+ struct hdlcd_drm_private *hdlcd = drm->dev_private;
+ unsigned long clkrate = clk_get_rate(hdlcd->clk);
+ unsigned long mode_clock = hdlcd->crtc.mode.crtc_clock * 1000;
+
+ seq_printf(m, "hw : %lu\n", clkrate);
+ seq_printf(m, "mode: %lu\n", mode_clock);
+ return 0;
+}
+
+static struct drm_info_list hdlcd_debugfs_list[] = {
+ { "interrupt_count", hdlcd_show_underrun_count, 0 },
+ { "clocks", hdlcd_show_pxlclock, 0 },
+};
+
+static int hdlcd_debugfs_init(struct drm_minor *minor)
+{
+ return drm_debugfs_create_files(hdlcd_debugfs_list,
+ ARRAY_SIZE(hdlcd_debugfs_list), minor->debugfs_root, minor);
+}
+
+static void hdlcd_debugfs_cleanup(struct drm_minor *minor)
+{
+ drm_debugfs_remove_files(hdlcd_debugfs_list,
+ ARRAY_SIZE(hdlcd_debugfs_list), minor);
+}
+#endif
+
+static const struct file_operations fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .release = drm_release,
+ .unlocked_ioctl = drm_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = drm_compat_ioctl,
+#endif
+ .poll = drm_poll,
+ .read = drm_read,
+ .llseek = noop_llseek,
+ .mmap = drm_gem_cma_mmap,
+};
+
+static struct drm_driver hdlcd_driver = {
+ .driver_features = DRIVER_HAVE_IRQ | DRIVER_GEM |
+ DRIVER_MODESET | DRIVER_PRIME |
+ DRIVER_ATOMIC,
+ .lastclose = hdlcd_lastclose,
+ .irq_handler = hdlcd_irq,
+ .irq_preinstall = hdlcd_irq_preinstall,
+ .irq_postinstall = hdlcd_irq_postinstall,
+ .irq_uninstall = hdlcd_irq_uninstall,
+ .get_vblank_counter = drm_vblank_no_hw_counter,
+ .enable_vblank = hdlcd_enable_vblank,
+ .disable_vblank = hdlcd_disable_vblank,
+ .gem_free_object = drm_gem_cma_free_object,
+ .gem_vm_ops = &drm_gem_cma_vm_ops,
+ .dumb_create = drm_gem_cma_dumb_create,
+ .dumb_map_offset = drm_gem_cma_dumb_map_offset,
+ .dumb_destroy = drm_gem_dumb_destroy,
+ .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
+ .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
+ .gem_prime_export = drm_gem_prime_export,
+ .gem_prime_import = drm_gem_prime_import,
+ .gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table,
+ .gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table,
+ .gem_prime_vmap = drm_gem_cma_prime_vmap,
+ .gem_prime_vunmap = drm_gem_cma_prime_vunmap,
+ .gem_prime_mmap = drm_gem_cma_prime_mmap,
+#ifdef CONFIG_DEBUG_FS
+ .debugfs_init = hdlcd_debugfs_init,
+ .debugfs_cleanup = hdlcd_debugfs_cleanup,
+#endif
+ .fops = &fops,
+ .name = "hdlcd",
+ .desc = "ARM HDLCD Controller DRM",
+ .date = "20151021",
+ .major = 1,
+ .minor = 0,
+};
+
+static int hdlcd_drm_bind(struct device *dev)
+{
+ struct drm_device *drm;
+ struct hdlcd_drm_private *hdlcd;
+ int ret;
+
+ hdlcd = devm_kzalloc(dev, sizeof(*hdlcd), GFP_KERNEL);
+ if (!hdlcd)
+ return -ENOMEM;
+
+ drm = drm_dev_alloc(&hdlcd_driver, dev);
+ if (!drm)
+ return -ENOMEM;
+
+ drm->dev_private = hdlcd;
+ hdlcd_setup_mode_config(drm);
+ ret = hdlcd_load(drm, 0);
+ if (ret)
+ goto err_free;
+
+ ret = drm_dev_register(drm, 0);
+ if (ret)
+ goto err_unload;
+
+ dev_set_drvdata(dev, drm);
+
+ ret = component_bind_all(dev, drm);
+ if (ret) {
+ DRM_ERROR("Failed to bind all components\n");
+ goto err_unregister;
+ }
+
+ ret = drm_vblank_init(drm, drm->mode_config.num_crtc);
+ if (ret < 0) {
+ DRM_ERROR("failed to initialise vblank\n");
+ goto err_vblank;
+ }
+ drm->vblank_disable_allowed = true;
+
+ drm_mode_config_reset(drm);
+ drm_kms_helper_poll_init(drm);
+
+ hdlcd->fbdev = drm_fbdev_cma_init(drm, 32, drm->mode_config.num_crtc,
+ drm->mode_config.num_connector);
+
+ if (IS_ERR(hdlcd->fbdev)) {
+ ret = PTR_ERR(hdlcd->fbdev);
+ hdlcd->fbdev = NULL;
+ goto err_fbdev;
+ }
+
+ return 0;
+
+err_fbdev:
+ drm_kms_helper_poll_fini(drm);
+ drm_mode_config_cleanup(drm);
+ drm_vblank_cleanup(drm);
+err_vblank:
+ component_unbind_all(dev, drm);
+err_unregister:
+ drm_dev_unregister(drm);
+err_unload:
+ pm_runtime_get_sync(drm->dev);
+ drm_irq_uninstall(drm);
+ pm_runtime_put_sync(drm->dev);
+ pm_runtime_disable(drm->dev);
+ of_reserved_mem_device_release(drm->dev);
+err_free:
+ drm_dev_unref(drm);
+
+ return ret;
+}
+
+static void hdlcd_drm_unbind(struct device *dev)
+{
+ struct drm_device *drm = dev_get_drvdata(dev);
+ struct hdlcd_drm_private *hdlcd = drm->dev_private;
+
+ if (hdlcd->fbdev) {
+ drm_fbdev_cma_fini(hdlcd->fbdev);
+ hdlcd->fbdev = NULL;
+ }
+ drm_kms_helper_poll_fini(drm);
+ component_unbind_all(dev, drm);
+ drm_vblank_cleanup(drm);
+ pm_runtime_get_sync(drm->dev);
+ drm_irq_uninstall(drm);
+ pm_runtime_put_sync(drm->dev);
+ pm_runtime_disable(drm->dev);
+ of_reserved_mem_device_release(drm->dev);
+ drm_mode_config_cleanup(drm);
+ drm_dev_unregister(drm);
+ drm_dev_unref(drm);
+ drm->dev_private = NULL;
+ dev_set_drvdata(dev, NULL);
+}
+
+static const struct component_master_ops hdlcd_master_ops = {
+ .bind = hdlcd_drm_bind,
+ .unbind = hdlcd_drm_unbind,
+};
+
+static int compare_dev(struct device *dev, void *data)
+{
+ return dev->of_node == data;
+}
+
+static int hdlcd_probe(struct platform_device *pdev)
+{
+ struct device_node *port, *ep;
+ struct component_match *match = NULL;
+
+ if (!pdev->dev.of_node)
+ return -ENODEV;
+
+ /* there is only one output port inside each device, find it */
+ ep = of_graph_get_next_endpoint(pdev->dev.of_node, NULL);
+ if (!ep)
+ return -ENODEV;
+
+ if (!of_device_is_available(ep)) {
+ of_node_put(ep);
+ return -ENODEV;
+ }
+
+ /* add the remote encoder port as component */
+ port = of_graph_get_remote_port_parent(ep);
+ of_node_put(ep);
+ if (!port || !of_device_is_available(port)) {
+ of_node_put(port);
+ return -EAGAIN;
+ }
+
+ component_match_add(&pdev->dev, &match, compare_dev, port);
+
+ return component_master_add_with_match(&pdev->dev, &hdlcd_master_ops,
+ match);
+}
+
+static int hdlcd_remove(struct platform_device *pdev)
+{
+ component_master_del(&pdev->dev, &hdlcd_master_ops);
+ return 0;
+}
+
+static const struct of_device_id hdlcd_of_match[] = {
+ { .compatible = "arm,hdlcd" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, hdlcd_of_match);
+
+static int __maybe_unused hdlcd_pm_suspend(struct device *dev)
+{
+ struct drm_device *drm = dev_get_drvdata(dev);
+ struct drm_crtc *crtc;
+
+ if (pm_runtime_suspended(dev))
+ return 0;
+
+ drm_modeset_lock_all(drm);
+ list_for_each_entry(crtc, &drm->mode_config.crtc_list, head)
+ hdlcd_crtc_suspend(crtc);
+ drm_modeset_unlock_all(drm);
+ return 0;
+}
+
+static int __maybe_unused hdlcd_pm_resume(struct device *dev)
+{
+ struct drm_device *drm = dev_get_drvdata(dev);
+ struct drm_crtc *crtc;
+
+ if (!pm_runtime_suspended(dev))
+ return 0;
+
+ drm_modeset_lock_all(drm);
+ list_for_each_entry(crtc, &drm->mode_config.crtc_list, head)
+ hdlcd_crtc_resume(crtc);
+ drm_modeset_unlock_all(drm);
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(hdlcd_pm_ops, hdlcd_pm_suspend, hdlcd_pm_resume);
+
+static struct platform_driver hdlcd_platform_driver = {
+ .probe = hdlcd_probe,
+ .remove = hdlcd_remove,
+ .driver = {
+ .name = "hdlcd",
+ .pm = &hdlcd_pm_ops,
+ .of_match_table = hdlcd_of_match,
+ },
+};
+
+module_platform_driver(hdlcd_platform_driver);
+
+MODULE_AUTHOR("Liviu Dudau");
+MODULE_DESCRIPTION("ARM HDLCD DRM driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/arm/hdlcd_drv.h b/drivers/gpu/drm/arm/hdlcd_drv.h
new file mode 100644
index 000000000000..aa234784f053
--- /dev/null
+++ b/drivers/gpu/drm/arm/hdlcd_drv.h
@@ -0,0 +1,42 @@
+/*
+ * ARM HDLCD Controller register definition
+ */
+
+#ifndef __HDLCD_DRV_H__
+#define __HDLCD_DRV_H__
+
+struct hdlcd_drm_private {
+ void __iomem *mmio;
+ struct clk *clk;
+ struct drm_fbdev_cma *fbdev;
+ struct drm_framebuffer *fb;
+ struct list_head event_list;
+ struct drm_crtc crtc;
+ struct drm_plane *plane;
+#ifdef CONFIG_DEBUG_FS
+ atomic_t buffer_underrun_count;
+ atomic_t bus_error_count;
+ atomic_t vsync_count;
+ atomic_t dma_end_count;
+#endif
+};
+
+#define crtc_to_hdlcd_priv(x) container_of(x, struct hdlcd_drm_private, crtc)
+
+static inline void hdlcd_write(struct hdlcd_drm_private *hdlcd,
+ unsigned int reg, u32 value)
+{
+ writel(value, hdlcd->mmio + reg);
+}
+
+static inline u32 hdlcd_read(struct hdlcd_drm_private *hdlcd, unsigned int reg)
+{
+ return readl(hdlcd->mmio + reg);
+}
+
+int hdlcd_setup_crtc(struct drm_device *dev);
+void hdlcd_set_scanout(struct hdlcd_drm_private *hdlcd);
+void hdlcd_crtc_suspend(struct drm_crtc *crtc);
+void hdlcd_crtc_resume(struct drm_crtc *crtc);
+
+#endif /* __HDLCD_DRV_H__ */
diff --git a/drivers/gpu/drm/arm/hdlcd_regs.h b/drivers/gpu/drm/arm/hdlcd_regs.h
new file mode 100644
index 000000000000..66799ebef6d3
--- /dev/null
+++ b/drivers/gpu/drm/arm/hdlcd_regs.h
@@ -0,0 +1,87 @@
+/*
+ * Copyright (C) 2013,2014 ARM Limited
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file COPYING in the main directory of this archive
+ * for more details.
+ *
+ * ARM HDLCD Controller register definition
+ */
+
+#ifndef __HDLCD_REGS_H__
+#define __HDLCD_REGS_H__
+
+/* register offsets */
+#define HDLCD_REG_VERSION 0x0000 /* ro */
+#define HDLCD_REG_INT_RAWSTAT 0x0010 /* rw */
+#define HDLCD_REG_INT_CLEAR 0x0014 /* wo */
+#define HDLCD_REG_INT_MASK 0x0018 /* rw */
+#define HDLCD_REG_INT_STATUS 0x001c /* ro */
+#define HDLCD_REG_FB_BASE 0x0100 /* rw */
+#define HDLCD_REG_FB_LINE_LENGTH 0x0104 /* rw */
+#define HDLCD_REG_FB_LINE_COUNT 0x0108 /* rw */
+#define HDLCD_REG_FB_LINE_PITCH 0x010c /* rw */
+#define HDLCD_REG_BUS_OPTIONS 0x0110 /* rw */
+#define HDLCD_REG_V_SYNC 0x0200 /* rw */
+#define HDLCD_REG_V_BACK_PORCH 0x0204 /* rw */
+#define HDLCD_REG_V_DATA 0x0208 /* rw */
+#define HDLCD_REG_V_FRONT_PORCH 0x020c /* rw */
+#define HDLCD_REG_H_SYNC 0x0210 /* rw */
+#define HDLCD_REG_H_BACK_PORCH 0x0214 /* rw */
+#define HDLCD_REG_H_DATA 0x0218 /* rw */
+#define HDLCD_REG_H_FRONT_PORCH 0x021c /* rw */
+#define HDLCD_REG_POLARITIES 0x0220 /* rw */
+#define HDLCD_REG_COMMAND 0x0230 /* rw */
+#define HDLCD_REG_PIXEL_FORMAT 0x0240 /* rw */
+#define HDLCD_REG_RED_SELECT 0x0244 /* rw */
+#define HDLCD_REG_GREEN_SELECT 0x0248 /* rw */
+#define HDLCD_REG_BLUE_SELECT 0x024c /* rw */
+
+/* version */
+#define HDLCD_PRODUCT_ID 0x1CDC0000
+#define HDLCD_PRODUCT_MASK 0xFFFF0000
+#define HDLCD_VERSION_MAJOR_MASK 0x0000FF00
+#define HDLCD_VERSION_MINOR_MASK 0x000000FF
+
+/* interrupts */
+#define HDLCD_INTERRUPT_DMA_END (1 << 0)
+#define HDLCD_INTERRUPT_BUS_ERROR (1 << 1)
+#define HDLCD_INTERRUPT_VSYNC (1 << 2)
+#define HDLCD_INTERRUPT_UNDERRUN (1 << 3)
+#define HDLCD_DEBUG_INT_MASK (HDLCD_INTERRUPT_DMA_END | \
+ HDLCD_INTERRUPT_BUS_ERROR | \
+ HDLCD_INTERRUPT_UNDERRUN)
+
+/* polarities */
+#define HDLCD_POLARITY_VSYNC (1 << 0)
+#define HDLCD_POLARITY_HSYNC (1 << 1)
+#define HDLCD_POLARITY_DATAEN (1 << 2)
+#define HDLCD_POLARITY_DATA (1 << 3)
+#define HDLCD_POLARITY_PIXELCLK (1 << 4)
+
+/* commands */
+#define HDLCD_COMMAND_DISABLE (0 << 0)
+#define HDLCD_COMMAND_ENABLE (1 << 0)
+
+/* pixel format */
+#define HDLCD_PIXEL_FMT_LITTLE_ENDIAN (0 << 31)
+#define HDLCD_PIXEL_FMT_BIG_ENDIAN (1 << 31)
+#define HDLCD_BYTES_PER_PIXEL_MASK (3 << 3)
+
+/* bus options */
+#define HDLCD_BUS_BURST_MASK 0x01f
+#define HDLCD_BUS_MAX_OUTSTAND 0xf00
+#define HDLCD_BUS_BURST_NONE (0 << 0)
+#define HDLCD_BUS_BURST_1 (1 << 0)
+#define HDLCD_BUS_BURST_2 (1 << 1)
+#define HDLCD_BUS_BURST_4 (1 << 2)
+#define HDLCD_BUS_BURST_8 (1 << 3)
+#define HDLCD_BUS_BURST_16 (1 << 4)
+
+/* Max resolution supported is 4096x4096, 32bpp */
+#define HDLCD_MAX_XRES 4096
+#define HDLCD_MAX_YRES 4096
+
+#define NR_PALETTE 256
+
+#endif /* __HDLCD_REGS_H__ */
diff --git a/drivers/gpu/drm/armada/armada_drv.c b/drivers/gpu/drm/armada/armada_drv.c
index 3bd7e1cde99e..82043c204b76 100644
--- a/drivers/gpu/drm/armada/armada_drv.c
+++ b/drivers/gpu/drm/armada/armada_drv.c
@@ -188,9 +188,6 @@ static const struct file_operations armada_drm_fops = {
static struct drm_driver armada_drm_driver = {
.load = armada_drm_load,
- .open = NULL,
- .preclose = NULL,
- .postclose = NULL,
.lastclose = armada_drm_lastclose,
.unload = armada_drm_unload,
.set_busid = drm_platform_set_busid,
diff --git a/drivers/gpu/drm/armada/armada_gem.c b/drivers/gpu/drm/armada/armada_gem.c
index 6e731db31aa4..aca7f9cc6109 100644
--- a/drivers/gpu/drm/armada/armada_gem.c
+++ b/drivers/gpu/drm/armada/armada_gem.c
@@ -481,7 +481,7 @@ armada_gem_prime_map_dma_buf(struct dma_buf_attachment *attach,
release:
for_each_sg(sgt->sgl, sg, num, i)
- page_cache_release(sg_page(sg));
+ put_page(sg_page(sg));
free_table:
sg_free_table(sgt);
free_sgt:
@@ -502,7 +502,7 @@ static void armada_gem_prime_unmap_dma_buf(struct dma_buf_attachment *attach,
if (dobj->obj.filp) {
struct scatterlist *sg;
for_each_sg(sgt->sgl, sg, sgt->nents, i)
- page_cache_release(sg_page(sg));
+ put_page(sg_page(sg));
}
sg_free_table(sgt);
diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
index 0123458cbd83..a965e7e8ad6e 100644
--- a/drivers/gpu/drm/ast/ast_mode.c
+++ b/drivers/gpu/drm/ast/ast_mode.c
@@ -497,13 +497,6 @@ static void ast_crtc_dpms(struct drm_crtc *crtc, int mode)
}
}
-static bool ast_crtc_mode_fixup(struct drm_crtc *crtc,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- return true;
-}
-
/* ast is different - we will force move buffers out of VRAM */
static int ast_crtc_do_set_base(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
@@ -617,7 +610,6 @@ static void ast_crtc_commit(struct drm_crtc *crtc)
static const struct drm_crtc_helper_funcs ast_crtc_helper_funcs = {
.dpms = ast_crtc_dpms,
- .mode_fixup = ast_crtc_mode_fixup,
.mode_set = ast_crtc_mode_set,
.mode_set_base = ast_crtc_mode_set_base,
.disable = ast_crtc_disable,
@@ -710,13 +702,6 @@ static void ast_encoder_dpms(struct drm_encoder *encoder, int mode)
}
-static bool ast_mode_fixup(struct drm_encoder *encoder,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- return true;
-}
-
static void ast_encoder_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
@@ -736,7 +721,6 @@ static void ast_encoder_commit(struct drm_encoder *encoder)
static const struct drm_encoder_helper_funcs ast_enc_helper_funcs = {
.dpms = ast_encoder_dpms,
- .mode_fixup = ast_mode_fixup,
.prepare = ast_encoder_prepare,
.commit = ast_encoder_commit,
.mode_set = ast_encoder_mode_set,
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
index 468a14f266a7..58c4f785cf84 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
@@ -121,13 +121,6 @@ static void atmel_hlcdc_crtc_mode_set_nofb(struct drm_crtc *c)
cfg);
}
-static bool atmel_hlcdc_crtc_mode_fixup(struct drm_crtc *crtc,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- return true;
-}
-
static void atmel_hlcdc_crtc_disable(struct drm_crtc *c)
{
struct drm_device *dev = c->dev;
@@ -261,7 +254,6 @@ static void atmel_hlcdc_crtc_atomic_flush(struct drm_crtc *crtc,
}
static const struct drm_crtc_helper_funcs lcdc_crtc_helper_funcs = {
- .mode_fixup = atmel_hlcdc_crtc_mode_fixup,
.mode_set = drm_helper_crtc_mode_set,
.mode_set_nofb = atmel_hlcdc_crtc_mode_set_nofb,
.mode_set_base = drm_helper_crtc_mode_set_base,
@@ -280,24 +272,6 @@ static void atmel_hlcdc_crtc_destroy(struct drm_crtc *c)
kfree(crtc);
}
-void atmel_hlcdc_crtc_cancel_page_flip(struct drm_crtc *c,
- struct drm_file *file)
-{
- struct atmel_hlcdc_crtc *crtc = drm_crtc_to_atmel_hlcdc_crtc(c);
- struct drm_pending_vblank_event *event;
- struct drm_device *dev = c->dev;
- unsigned long flags;
-
- spin_lock_irqsave(&dev->event_lock, flags);
- event = crtc->event;
- if (event && event->base.file_priv == file) {
- event->base.destroy(&event->base);
- drm_vblank_put(dev, crtc->id);
- crtc->event = NULL;
- }
- spin_unlock_irqrestore(&dev->event_lock, flags);
-}
-
static void atmel_hlcdc_crtc_finish_page_flip(struct atmel_hlcdc_crtc *crtc)
{
struct drm_device *dev = crtc->base.dev;
@@ -367,4 +341,3 @@ fail:
atmel_hlcdc_crtc_destroy(&crtc->base);
return ret;
}
-
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
index a45b32ba029e..3d8d16402d07 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
@@ -619,15 +619,6 @@ static void atmel_hlcdc_dc_connector_unplug_all(struct drm_device *dev)
mutex_unlock(&dev->mode_config.mutex);
}
-static void atmel_hlcdc_dc_preclose(struct drm_device *dev,
- struct drm_file *file)
-{
- struct drm_crtc *crtc;
-
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
- atmel_hlcdc_crtc_cancel_page_flip(crtc, file);
-}
-
static void atmel_hlcdc_dc_lastclose(struct drm_device *dev)
{
struct atmel_hlcdc_dc *dc = dev->dev_private;
@@ -698,7 +689,6 @@ static struct drm_driver atmel_hlcdc_dc_driver = {
.driver_features = DRIVER_HAVE_IRQ | DRIVER_GEM |
DRIVER_MODESET | DRIVER_PRIME |
DRIVER_ATOMIC,
- .preclose = atmel_hlcdc_dc_preclose,
.lastclose = atmel_hlcdc_dc_lastclose,
.irq_handler = atmel_hlcdc_dc_irq_handler,
.irq_preinstall = atmel_hlcdc_dc_irq_uninstall,
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h
index cf6b375bc38d..fed517f297da 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h
@@ -152,9 +152,6 @@ int atmel_hlcdc_plane_prepare_disc_area(struct drm_crtc_state *c_state);
void atmel_hlcdc_crtc_irq(struct drm_crtc *c);
-void atmel_hlcdc_crtc_cancel_page_flip(struct drm_crtc *crtc,
- struct drm_file *file);
-
void atmel_hlcdc_crtc_suspend(struct drm_crtc *crtc);
void atmel_hlcdc_crtc_resume(struct drm_crtc *crtc);
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
index 1ffe9c329c46..d65dcaee3832 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
@@ -558,7 +558,7 @@ static int atmel_hlcdc_plane_atomic_check(struct drm_plane *p,
if (!state->base.crtc || !fb)
return 0;
- crtc_state = s->state->crtc_states[drm_crtc_index(s->crtc)];
+ crtc_state = drm_atomic_get_existing_crtc_state(s->state, s->crtc);
mode = &crtc_state->adjusted_mode;
state->src_x = s->src_x;
diff --git a/drivers/gpu/drm/bochs/bochs_drv.c b/drivers/gpu/drm/bochs/bochs_drv.c
index 7f1a3604b19f..b332b4d3b0e2 100644
--- a/drivers/gpu/drm/bochs/bochs_drv.c
+++ b/drivers/gpu/drm/bochs/bochs_drv.c
@@ -182,8 +182,8 @@ static const struct pci_device_id bochs_pci_tbl[] = {
{
.vendor = 0x1234,
.device = 0x1111,
- .subvendor = 0x1af4,
- .subdevice = 0x1100,
+ .subvendor = PCI_SUBVENDOR_ID_REDHAT_QUMRANET,
+ .subdevice = PCI_SUBDEVICE_ID_QEMU,
.driver_data = BOCHS_QEMU_STDVGA,
},
{
diff --git a/drivers/gpu/drm/bochs/bochs_kms.c b/drivers/gpu/drm/bochs/bochs_kms.c
index 2849f1b95eec..96926f09e0c9 100644
--- a/drivers/gpu/drm/bochs/bochs_kms.c
+++ b/drivers/gpu/drm/bochs/bochs_kms.c
@@ -30,13 +30,6 @@ static void bochs_crtc_dpms(struct drm_crtc *crtc, int mode)
}
}
-static bool bochs_crtc_mode_fixup(struct drm_crtc *crtc,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- return true;
-}
-
static int bochs_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
struct drm_framebuffer *old_fb)
{
@@ -135,7 +128,6 @@ static const struct drm_crtc_funcs bochs_crtc_funcs = {
static const struct drm_crtc_helper_funcs bochs_helper_funcs = {
.dpms = bochs_crtc_dpms,
- .mode_fixup = bochs_crtc_mode_fixup,
.mode_set = bochs_crtc_mode_set,
.mode_set_base = bochs_crtc_mode_set_base,
.prepare = bochs_crtc_prepare,
@@ -152,13 +144,6 @@ static void bochs_crtc_init(struct drm_device *dev)
drm_crtc_helper_add(crtc, &bochs_helper_funcs);
}
-static bool bochs_encoder_mode_fixup(struct drm_encoder *encoder,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- return true;
-}
-
static void bochs_encoder_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
@@ -179,7 +164,6 @@ static void bochs_encoder_commit(struct drm_encoder *encoder)
static const struct drm_encoder_helper_funcs bochs_encoder_helper_funcs = {
.dpms = bochs_encoder_dpms,
- .mode_fixup = bochs_encoder_mode_fixup,
.mode_set = bochs_encoder_mode_set,
.prepare = bochs_encoder_prepare,
.commit = bochs_encoder_commit,
diff --git a/drivers/gpu/drm/bridge/dw-hdmi.c b/drivers/gpu/drm/bridge/dw-hdmi.c
index b0aac4733020..9795b72472ba 100644
--- a/drivers/gpu/drm/bridge/dw-hdmi.c
+++ b/drivers/gpu/drm/bridge/dw-hdmi.c
@@ -1391,13 +1391,6 @@ static void dw_hdmi_bridge_mode_set(struct drm_bridge *bridge,
mutex_unlock(&hdmi->mutex);
}
-static bool dw_hdmi_bridge_mode_fixup(struct drm_bridge *bridge,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- return true;
-}
-
static void dw_hdmi_bridge_disable(struct drm_bridge *bridge)
{
struct dw_hdmi *hdmi = bridge->driver_private;
@@ -1546,7 +1539,6 @@ static const struct drm_bridge_funcs dw_hdmi_bridge_funcs = {
.pre_enable = dw_hdmi_bridge_nop,
.post_disable = dw_hdmi_bridge_nop,
.mode_set = dw_hdmi_bridge_mode_set,
- .mode_fixup = dw_hdmi_bridge_mode_fixup,
};
static irqreturn_t dw_hdmi_hardirq(int irq, void *dev_id)
diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.c b/drivers/gpu/drm/cirrus/cirrus_drv.c
index b1619e29a564..7bc394ec9fb3 100644
--- a/drivers/gpu/drm/cirrus/cirrus_drv.c
+++ b/drivers/gpu/drm/cirrus/cirrus_drv.c
@@ -33,8 +33,9 @@ static struct drm_driver driver;
/* only bind to the cirrus chip in qemu */
static const struct pci_device_id pciidlist[] = {
- { PCI_VENDOR_ID_CIRRUS, PCI_DEVICE_ID_CIRRUS_5446, 0x1af4, 0x1100, 0,
- 0, 0 },
+ { PCI_VENDOR_ID_CIRRUS, PCI_DEVICE_ID_CIRRUS_5446,
+ PCI_SUBVENDOR_ID_REDHAT_QUMRANET, PCI_SUBDEVICE_ID_QEMU,
+ 0, 0, 0 },
{ PCI_VENDOR_ID_CIRRUS, PCI_DEVICE_ID_CIRRUS_5446, PCI_VENDOR_ID_XEN,
0x0001, 0, 0, 0 },
{0,}
diff --git a/drivers/gpu/drm/cirrus/cirrus_mode.c b/drivers/gpu/drm/cirrus/cirrus_mode.c
index 4a02854a6963..d3d8d7bfcc57 100644
--- a/drivers/gpu/drm/cirrus/cirrus_mode.c
+++ b/drivers/gpu/drm/cirrus/cirrus_mode.c
@@ -91,18 +91,6 @@ static void cirrus_crtc_dpms(struct drm_crtc *crtc, int mode)
WREG_GFX(0xe, gr0e);
}
-/*
- * The core passes the desired mode to the CRTC code to see whether any
- * CRTC-specific modifications need to be made to it. We're in a position
- * to just pass that straight through, so this does nothing
- */
-static bool cirrus_crtc_mode_fixup(struct drm_crtc *crtc,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- return true;
-}
-
static void cirrus_set_start_address(struct drm_crtc *crtc, unsigned offset)
{
struct cirrus_device *cdev = crtc->dev->dev_private;
@@ -372,7 +360,6 @@ static const struct drm_crtc_funcs cirrus_crtc_funcs = {
static const struct drm_crtc_helper_funcs cirrus_helper_funcs = {
.dpms = cirrus_crtc_dpms,
- .mode_fixup = cirrus_crtc_mode_fixup,
.mode_set = cirrus_crtc_mode_set,
.mode_set_base = cirrus_crtc_mode_set_base,
.prepare = cirrus_crtc_prepare,
@@ -430,14 +417,6 @@ void cirrus_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
*blue = cirrus_crtc->lut_b[regno];
}
-
-static bool cirrus_encoder_mode_fixup(struct drm_encoder *encoder,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- return true;
-}
-
static void cirrus_encoder_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
@@ -466,7 +445,6 @@ static void cirrus_encoder_destroy(struct drm_encoder *encoder)
static const struct drm_encoder_helper_funcs cirrus_encoder_helper_funcs = {
.dpms = cirrus_encoder_dpms,
- .mode_fixup = cirrus_encoder_mode_fixup,
.mode_set = cirrus_encoder_mode_set,
.prepare = cirrus_encoder_prepare,
.commit = cirrus_encoder_commit,
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
index 9a7b44616b55..8ee1db866e80 100644
--- a/drivers/gpu/drm/drm_atomic.c
+++ b/drivers/gpu/drm/drm_atomic.c
@@ -28,6 +28,7 @@
#include <drm/drmP.h>
#include <drm/drm_atomic.h>
+#include <drm/drm_mode.h>
#include <drm/drm_plane_helper.h>
/**
@@ -376,6 +377,58 @@ int drm_atomic_set_mode_prop_for_crtc(struct drm_crtc_state *state,
EXPORT_SYMBOL(drm_atomic_set_mode_prop_for_crtc);
/**
+ * drm_atomic_replace_property_blob - replace a blob property
+ * @blob: a pointer to the member blob to be replaced
+ * @new_blob: the new blob to replace with
+ * @replaced: whether the blob has been replaced
+ *
+ * RETURNS:
+ * Zero on success, error code on failure
+ */
+static void
+drm_atomic_replace_property_blob(struct drm_property_blob **blob,
+ struct drm_property_blob *new_blob,
+ bool *replaced)
+{
+ struct drm_property_blob *old_blob = *blob;
+
+ if (old_blob == new_blob)
+ return;
+
+ if (old_blob)
+ drm_property_unreference_blob(old_blob);
+ if (new_blob)
+ drm_property_reference_blob(new_blob);
+ *blob = new_blob;
+ *replaced = true;
+
+ return;
+}
+
+static int
+drm_atomic_replace_property_blob_from_id(struct drm_crtc *crtc,
+ struct drm_property_blob **blob,
+ uint64_t blob_id,
+ ssize_t expected_size,
+ bool *replaced)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_property_blob *new_blob = NULL;
+
+ if (blob_id != 0) {
+ new_blob = drm_property_lookup_blob(dev, blob_id);
+ if (new_blob == NULL)
+ return -EINVAL;
+ if (expected_size > 0 && expected_size != new_blob->length)
+ return -EINVAL;
+ }
+
+ drm_atomic_replace_property_blob(blob, new_blob, replaced);
+
+ return 0;
+}
+
+/**
* drm_atomic_crtc_set_property - set property on CRTC
* @crtc: the drm CRTC to set a property on
* @state: the state object to update with the new property value
@@ -397,6 +450,7 @@ int drm_atomic_crtc_set_property(struct drm_crtc *crtc,
{
struct drm_device *dev = crtc->dev;
struct drm_mode_config *config = &dev->mode_config;
+ bool replaced = false;
int ret;
if (property == config->prop_active)
@@ -407,8 +461,31 @@ int drm_atomic_crtc_set_property(struct drm_crtc *crtc,
ret = drm_atomic_set_mode_prop_for_crtc(state, mode);
drm_property_unreference_blob(mode);
return ret;
- }
- else if (crtc->funcs->atomic_set_property)
+ } else if (property == config->degamma_lut_property) {
+ ret = drm_atomic_replace_property_blob_from_id(crtc,
+ &state->degamma_lut,
+ val,
+ -1,
+ &replaced);
+ state->color_mgmt_changed = replaced;
+ return ret;
+ } else if (property == config->ctm_property) {
+ ret = drm_atomic_replace_property_blob_from_id(crtc,
+ &state->ctm,
+ val,
+ sizeof(struct drm_color_ctm),
+ &replaced);
+ state->color_mgmt_changed = replaced;
+ return ret;
+ } else if (property == config->gamma_lut_property) {
+ ret = drm_atomic_replace_property_blob_from_id(crtc,
+ &state->gamma_lut,
+ val,
+ -1,
+ &replaced);
+ state->color_mgmt_changed = replaced;
+ return ret;
+ } else if (crtc->funcs->atomic_set_property)
return crtc->funcs->atomic_set_property(crtc, state, property, val);
else
return -EINVAL;
@@ -444,6 +521,12 @@ drm_atomic_crtc_get_property(struct drm_crtc *crtc,
*val = state->active;
else if (property == config->prop_mode_id)
*val = (state->mode_blob) ? state->mode_blob->base.id : 0;
+ else if (property == config->degamma_lut_property)
+ *val = (state->degamma_lut) ? state->degamma_lut->base.id : 0;
+ else if (property == config->ctm_property)
+ *val = (state->ctm) ? state->ctm->base.id : 0;
+ else if (property == config->gamma_lut_property)
+ *val = (state->gamma_lut) ? state->gamma_lut->base.id : 0;
else if (crtc->funcs->atomic_get_property)
return crtc->funcs->atomic_get_property(crtc, state, property, val);
else
@@ -1343,44 +1426,23 @@ static struct drm_pending_vblank_event *create_vblank_event(
struct drm_device *dev, struct drm_file *file_priv, uint64_t user_data)
{
struct drm_pending_vblank_event *e = NULL;
- unsigned long flags;
-
- spin_lock_irqsave(&dev->event_lock, flags);
- if (file_priv->event_space < sizeof e->event) {
- spin_unlock_irqrestore(&dev->event_lock, flags);
- goto out;
- }
- file_priv->event_space -= sizeof e->event;
- spin_unlock_irqrestore(&dev->event_lock, flags);
+ int ret;
e = kzalloc(sizeof *e, GFP_KERNEL);
- if (e == NULL) {
- spin_lock_irqsave(&dev->event_lock, flags);
- file_priv->event_space += sizeof e->event;
- spin_unlock_irqrestore(&dev->event_lock, flags);
- goto out;
- }
+ if (!e)
+ return NULL;
e->event.base.type = DRM_EVENT_FLIP_COMPLETE;
- e->event.base.length = sizeof e->event;
+ e->event.base.length = sizeof(e->event);
e->event.user_data = user_data;
- e->base.event = &e->event.base;
- e->base.file_priv = file_priv;
- e->base.destroy = (void (*) (struct drm_pending_event *)) kfree;
-
-out:
- return e;
-}
-static void destroy_vblank_event(struct drm_device *dev,
- struct drm_file *file_priv, struct drm_pending_vblank_event *e)
-{
- unsigned long flags;
+ ret = drm_event_reserve_init(dev, file_priv, &e->base, &e->event.base);
+ if (ret) {
+ kfree(e);
+ return NULL;
+ }
- spin_lock_irqsave(&dev->event_lock, flags);
- file_priv->event_space += sizeof e->event;
- spin_unlock_irqrestore(&dev->event_lock, flags);
- kfree(e);
+ return e;
}
static int atomic_set_prop(struct drm_atomic_state *state,
@@ -1642,8 +1704,7 @@ out:
if (!crtc_state->event)
continue;
- destroy_vblank_event(dev, file_priv,
- crtc_state->event);
+ drm_event_cancel_free(dev, &crtc_state->event->base);
}
}
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index 4f2d3e161593..4befe25c81c7 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -67,7 +67,8 @@ drm_atomic_helper_plane_changed(struct drm_atomic_state *state,
struct drm_crtc_state *crtc_state;
if (plane->state->crtc) {
- crtc_state = state->crtc_states[drm_crtc_index(plane->state->crtc)];
+ crtc_state = drm_atomic_get_existing_crtc_state(state,
+ plane->state->crtc);
if (WARN_ON(!crtc_state))
return;
@@ -76,8 +77,8 @@ drm_atomic_helper_plane_changed(struct drm_atomic_state *state,
}
if (plane_state->crtc) {
- crtc_state =
- state->crtc_states[drm_crtc_index(plane_state->crtc)];
+ crtc_state = drm_atomic_get_existing_crtc_state(state,
+ plane_state->crtc);
if (WARN_ON(!crtc_state))
return;
@@ -86,110 +87,185 @@ drm_atomic_helper_plane_changed(struct drm_atomic_state *state,
}
}
-static bool
-check_pending_encoder_assignment(struct drm_atomic_state *state,
- struct drm_encoder *new_encoder)
+static int handle_conflicting_encoders(struct drm_atomic_state *state,
+ bool disable_conflicting_encoders)
{
- struct drm_connector *connector;
struct drm_connector_state *conn_state;
- int i;
+ struct drm_connector *connector;
+ struct drm_encoder *encoder;
+ unsigned encoder_mask = 0;
+ int i, ret;
+ /*
+ * First loop, find all newly assigned encoders from the connectors
+ * part of the state. If the same encoder is assigned to multiple
+ * connectors bail out.
+ */
for_each_connector_in_state(state, connector, conn_state, i) {
- if (conn_state->best_encoder != new_encoder)
+ const struct drm_connector_helper_funcs *funcs = connector->helper_private;
+ struct drm_encoder *new_encoder;
+
+ if (!conn_state->crtc)
+ continue;
+
+ if (funcs->atomic_best_encoder)
+ new_encoder = funcs->atomic_best_encoder(connector, conn_state);
+ else
+ new_encoder = funcs->best_encoder(connector);
+
+ if (new_encoder) {
+ if (encoder_mask & (1 << drm_encoder_index(new_encoder))) {
+ DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] on [CONNECTOR:%d:%s] already assigned\n",
+ new_encoder->base.id, new_encoder->name,
+ connector->base.id, connector->name);
+
+ return -EINVAL;
+ }
+
+ encoder_mask |= 1 << drm_encoder_index(new_encoder);
+ }
+ }
+
+ if (!encoder_mask)
+ return 0;
+
+ /*
+ * Second loop, iterate over all connectors not part of the state.
+ *
+ * If a conflicting encoder is found and disable_conflicting_encoders
+ * is not set, an error is returned. Userspace can provide a solution
+ * through the atomic ioctl.
+ *
+ * If the flag is set conflicting connectors are removed from the crtc
+ * and the crtc is disabled if no encoder is left. This preserves
+ * compatibility with the legacy set_config behavior.
+ */
+ drm_for_each_connector(connector, state->dev) {
+ struct drm_crtc_state *crtc_state;
+
+ if (drm_atomic_get_existing_connector_state(state, connector))
+ continue;
+
+ encoder = connector->state->best_encoder;
+ if (!encoder || !(encoder_mask & (1 << drm_encoder_index(encoder))))
continue;
- /* encoder already assigned and we're trying to re-steal it! */
- if (connector->state->best_encoder != conn_state->best_encoder)
- return false;
+ if (!disable_conflicting_encoders) {
+ DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] in use on [CRTC:%d:%s] by [CONNECTOR:%d:%s]\n",
+ encoder->base.id, encoder->name,
+ connector->state->crtc->base.id,
+ connector->state->crtc->name,
+ connector->base.id, connector->name);
+ return -EINVAL;
+ }
+
+ conn_state = drm_atomic_get_connector_state(state, connector);
+ if (IS_ERR(conn_state))
+ return PTR_ERR(conn_state);
+
+ DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] in use on [CRTC:%d:%s], disabling [CONNECTOR:%d:%s]\n",
+ encoder->base.id, encoder->name,
+ conn_state->crtc->base.id, conn_state->crtc->name,
+ connector->base.id, connector->name);
+
+ crtc_state = drm_atomic_get_existing_crtc_state(state, conn_state->crtc);
+
+ ret = drm_atomic_set_crtc_for_connector(conn_state, NULL);
+ if (ret)
+ return ret;
+
+ if (!crtc_state->connector_mask) {
+ ret = drm_atomic_set_mode_prop_for_crtc(crtc_state,
+ NULL);
+ if (ret < 0)
+ return ret;
+
+ crtc_state->active = false;
+ }
}
- return true;
+ return 0;
}
-static struct drm_crtc *
-get_current_crtc_for_encoder(struct drm_device *dev,
- struct drm_encoder *encoder)
+static void
+set_best_encoder(struct drm_atomic_state *state,
+ struct drm_connector_state *conn_state,
+ struct drm_encoder *encoder)
{
- struct drm_mode_config *config = &dev->mode_config;
- struct drm_connector *connector;
+ struct drm_crtc_state *crtc_state;
+ struct drm_crtc *crtc;
- WARN_ON(!drm_modeset_is_locked(&config->connection_mutex));
+ if (conn_state->best_encoder) {
+ /* Unset the encoder_mask in the old crtc state. */
+ crtc = conn_state->connector->state->crtc;
- drm_for_each_connector(connector, dev) {
- if (connector->state->best_encoder != encoder)
- continue;
+ /* A NULL crtc is an error here because we should have
+ * duplicated a NULL best_encoder when crtc was NULL.
+ * As an exception restoring duplicated atomic state
+ * during resume is allowed, so don't warn when
+ * best_encoder is equal to encoder we intend to set.
+ */
+ WARN_ON(!crtc && encoder != conn_state->best_encoder);
+ if (crtc) {
+ crtc_state = drm_atomic_get_existing_crtc_state(state, crtc);
+
+ crtc_state->encoder_mask &=
+ ~(1 << drm_encoder_index(conn_state->best_encoder));
+ }
+ }
- return connector->state->crtc;
+ if (encoder) {
+ crtc = conn_state->crtc;
+ WARN_ON(!crtc);
+ if (crtc) {
+ crtc_state = drm_atomic_get_existing_crtc_state(state, crtc);
+
+ crtc_state->encoder_mask |=
+ 1 << drm_encoder_index(encoder);
+ }
}
- return NULL;
+ conn_state->best_encoder = encoder;
}
-static int
+static void
steal_encoder(struct drm_atomic_state *state,
- struct drm_encoder *encoder,
- struct drm_crtc *encoder_crtc)
+ struct drm_encoder *encoder)
{
- struct drm_mode_config *config = &state->dev->mode_config;
struct drm_crtc_state *crtc_state;
struct drm_connector *connector;
struct drm_connector_state *connector_state;
- int ret;
-
- /*
- * We can only steal an encoder coming from a connector, which means we
- * must already hold the connection_mutex.
- */
- WARN_ON(!drm_modeset_is_locked(&config->connection_mutex));
+ int i;
- DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] in use on [CRTC:%d:%s], stealing it\n",
- encoder->base.id, encoder->name,
- encoder_crtc->base.id, encoder_crtc->name);
+ for_each_connector_in_state(state, connector, connector_state, i) {
+ struct drm_crtc *encoder_crtc;
- crtc_state = drm_atomic_get_crtc_state(state, encoder_crtc);
- if (IS_ERR(crtc_state))
- return PTR_ERR(crtc_state);
+ if (connector_state->best_encoder != encoder)
+ continue;
- crtc_state->connectors_changed = true;
+ encoder_crtc = connector->state->crtc;
- list_for_each_entry(connector, &config->connector_list, head) {
- if (connector->state->best_encoder != encoder)
- continue;
+ DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] in use on [CRTC:%d:%s], stealing it\n",
+ encoder->base.id, encoder->name,
+ encoder_crtc->base.id, encoder_crtc->name);
- DRM_DEBUG_ATOMIC("Stealing encoder from [CONNECTOR:%d:%s]\n",
- connector->base.id,
- connector->name);
+ set_best_encoder(state, connector_state, NULL);
- connector_state = drm_atomic_get_connector_state(state,
- connector);
- if (IS_ERR(connector_state))
- return PTR_ERR(connector_state);
+ crtc_state = drm_atomic_get_existing_crtc_state(state, encoder_crtc);
+ crtc_state->connectors_changed = true;
- ret = drm_atomic_set_crtc_for_connector(connector_state, NULL);
- if (ret)
- return ret;
- connector_state->best_encoder = NULL;
+ return;
}
-
- return 0;
}
static int
-update_connector_routing(struct drm_atomic_state *state, int conn_idx)
+update_connector_routing(struct drm_atomic_state *state,
+ struct drm_connector *connector,
+ struct drm_connector_state *connector_state)
{
const struct drm_connector_helper_funcs *funcs;
struct drm_encoder *new_encoder;
- struct drm_crtc *encoder_crtc;
- struct drm_connector *connector;
- struct drm_connector_state *connector_state;
struct drm_crtc_state *crtc_state;
- int idx, ret;
-
- connector = state->connectors[conn_idx];
- connector_state = state->connector_states[conn_idx];
-
- if (!connector)
- return 0;
DRM_DEBUG_ATOMIC("Updating routing for [CONNECTOR:%d:%s]\n",
connector->base.id,
@@ -197,16 +273,12 @@ update_connector_routing(struct drm_atomic_state *state, int conn_idx)
if (connector->state->crtc != connector_state->crtc) {
if (connector->state->crtc) {
- idx = drm_crtc_index(connector->state->crtc);
-
- crtc_state = state->crtc_states[idx];
+ crtc_state = drm_atomic_get_existing_crtc_state(state, connector->state->crtc);
crtc_state->connectors_changed = true;
}
if (connector_state->crtc) {
- idx = drm_crtc_index(connector_state->crtc);
-
- crtc_state = state->crtc_states[idx];
+ crtc_state = drm_atomic_get_existing_crtc_state(state, connector_state->crtc);
crtc_state->connectors_changed = true;
}
}
@@ -216,7 +288,7 @@ update_connector_routing(struct drm_atomic_state *state, int conn_idx)
connector->base.id,
connector->name);
- connector_state->best_encoder = NULL;
+ set_best_encoder(state, connector_state, NULL);
return 0;
}
@@ -245,6 +317,8 @@ update_connector_routing(struct drm_atomic_state *state, int conn_idx)
}
if (new_encoder == connector_state->best_encoder) {
+ set_best_encoder(state, connector_state, new_encoder);
+
DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] keeps [ENCODER:%d:%s], now on [CRTC:%d:%s]\n",
connector->base.id,
connector->name,
@@ -256,33 +330,11 @@ update_connector_routing(struct drm_atomic_state *state, int conn_idx)
return 0;
}
- if (!check_pending_encoder_assignment(state, new_encoder)) {
- DRM_DEBUG_ATOMIC("Encoder for [CONNECTOR:%d:%s] already assigned\n",
- connector->base.id,
- connector->name);
- return -EINVAL;
- }
-
- encoder_crtc = get_current_crtc_for_encoder(state->dev,
- new_encoder);
+ steal_encoder(state, new_encoder);
- if (encoder_crtc) {
- ret = steal_encoder(state, new_encoder, encoder_crtc);
- if (ret) {
- DRM_DEBUG_ATOMIC("Encoder stealing failed for [CONNECTOR:%d:%s]\n",
- connector->base.id,
- connector->name);
- return ret;
- }
- }
-
- if (WARN_ON(!connector_state->crtc))
- return -EINVAL;
+ set_best_encoder(state, connector_state, new_encoder);
- connector_state->best_encoder = new_encoder;
- idx = drm_crtc_index(connector_state->crtc);
-
- crtc_state = state->crtc_states[idx];
+ crtc_state = drm_atomic_get_existing_crtc_state(state, connector_state->crtc);
crtc_state->connectors_changed = true;
DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] using [ENCODER:%d:%s] on [CRTC:%d:%s]\n",
@@ -323,8 +375,8 @@ mode_fixup(struct drm_atomic_state *state)
if (!conn_state->crtc || !conn_state->best_encoder)
continue;
- crtc_state =
- state->crtc_states[drm_crtc_index(conn_state->crtc)];
+ crtc_state = drm_atomic_get_existing_crtc_state(state,
+ conn_state->crtc);
/*
* Each encoder has at most one connector (since we always steal
@@ -445,13 +497,18 @@ drm_atomic_helper_check_modeset(struct drm_device *dev,
}
}
+ ret = handle_conflicting_encoders(state, state->legacy_set_config);
+ if (ret)
+ return ret;
+
for_each_connector_in_state(state, connector, connector_state, i) {
/*
* This only sets crtc->mode_changed for routing changes,
* drivers must set crtc->mode_changed themselves when connector
* properties need to be updated.
*/
- ret = update_connector_routing(state, i);
+ ret = update_connector_routing(state, connector,
+ connector_state);
if (ret)
return ret;
}
@@ -617,14 +674,14 @@ disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state)
for_each_connector_in_state(old_state, connector, old_conn_state, i) {
const struct drm_encoder_helper_funcs *funcs;
struct drm_encoder *encoder;
- struct drm_crtc_state *old_crtc_state;
/* Shut down everything that's in the changeset and currently
* still on. So need to check the old, saved state. */
if (!old_conn_state->crtc)
continue;
- old_crtc_state = old_state->crtc_states[drm_crtc_index(old_conn_state->crtc)];
+ old_crtc_state = drm_atomic_get_existing_crtc_state(old_state,
+ old_conn_state->crtc);
if (!old_crtc_state->active ||
!drm_atomic_crtc_needs_modeset(old_conn_state->crtc->state))
@@ -1719,28 +1776,18 @@ static int update_output_state(struct drm_atomic_state *state,
struct drm_crtc_state *crtc_state;
struct drm_connector *connector;
struct drm_connector_state *conn_state;
- int ret, i, j;
+ int ret, i;
ret = drm_modeset_lock(&dev->mode_config.connection_mutex,
state->acquire_ctx);
if (ret)
return ret;
- /* First grab all affected connector/crtc states. */
- for (i = 0; i < set->num_connectors; i++) {
- conn_state = drm_atomic_get_connector_state(state,
- set->connectors[i]);
- if (IS_ERR(conn_state))
- return PTR_ERR(conn_state);
- }
-
- for_each_crtc_in_state(state, crtc, crtc_state, i) {
- ret = drm_atomic_add_affected_connectors(state, crtc);
- if (ret)
- return ret;
- }
+ /* First disable all connectors on the target crtc. */
+ ret = drm_atomic_add_affected_connectors(state, set->crtc);
+ if (ret)
+ return ret;
- /* Then recompute connector->crtc links and crtc enabling state. */
for_each_connector_in_state(state, connector, conn_state, i) {
if (conn_state->crtc == set->crtc) {
ret = drm_atomic_set_crtc_for_connector(conn_state,
@@ -1748,16 +1795,19 @@ static int update_output_state(struct drm_atomic_state *state,
if (ret)
return ret;
}
+ }
- for (j = 0; j < set->num_connectors; j++) {
- if (set->connectors[j] == connector) {
- ret = drm_atomic_set_crtc_for_connector(conn_state,
- set->crtc);
- if (ret)
- return ret;
- break;
- }
- }
+ /* Then set all connectors from set->connectors on the target crtc */
+ for (i = 0; i < set->num_connectors; i++) {
+ conn_state = drm_atomic_get_connector_state(state,
+ set->connectors[i]);
+ if (IS_ERR(conn_state))
+ return PTR_ERR(conn_state);
+
+ ret = drm_atomic_set_crtc_for_connector(conn_state,
+ set->crtc);
+ if (ret)
+ return ret;
}
for_each_crtc_in_state(state, crtc, crtc_state, i) {
@@ -1800,6 +1850,7 @@ int drm_atomic_helper_set_config(struct drm_mode_set *set)
if (!state)
return -ENOMEM;
+ state->legacy_set_config = true;
state->acquire_ctx = drm_modeset_legacy_acquire_ctx(crtc);
retry:
ret = __drm_atomic_helper_set_config(set, state);
@@ -2446,8 +2497,12 @@ EXPORT_SYMBOL(drm_atomic_helper_connector_dpms);
*/
void drm_atomic_helper_crtc_reset(struct drm_crtc *crtc)
{
- if (crtc->state)
+ if (crtc->state) {
drm_property_unreference_blob(crtc->state->mode_blob);
+ drm_property_unreference_blob(crtc->state->degamma_lut);
+ drm_property_unreference_blob(crtc->state->ctm);
+ drm_property_unreference_blob(crtc->state->gamma_lut);
+ }
kfree(crtc->state);
crtc->state = kzalloc(sizeof(*crtc->state), GFP_KERNEL);
@@ -2471,10 +2526,17 @@ void __drm_atomic_helper_crtc_duplicate_state(struct drm_crtc *crtc,
if (state->mode_blob)
drm_property_reference_blob(state->mode_blob);
+ if (state->degamma_lut)
+ drm_property_reference_blob(state->degamma_lut);
+ if (state->ctm)
+ drm_property_reference_blob(state->ctm);
+ if (state->gamma_lut)
+ drm_property_reference_blob(state->gamma_lut);
state->mode_changed = false;
state->active_changed = false;
state->planes_changed = false;
state->connectors_changed = false;
+ state->color_mgmt_changed = false;
state->event = NULL;
}
EXPORT_SYMBOL(__drm_atomic_helper_crtc_duplicate_state);
@@ -2515,6 +2577,9 @@ void __drm_atomic_helper_crtc_destroy_state(struct drm_crtc *crtc,
struct drm_crtc_state *state)
{
drm_property_unreference_blob(state->mode_blob);
+ drm_property_unreference_blob(state->degamma_lut);
+ drm_property_unreference_blob(state->ctm);
+ drm_property_unreference_blob(state->gamma_lut);
}
EXPORT_SYMBOL(__drm_atomic_helper_crtc_destroy_state);
@@ -2549,8 +2614,10 @@ void drm_atomic_helper_plane_reset(struct drm_plane *plane)
kfree(plane->state);
plane->state = kzalloc(sizeof(*plane->state), GFP_KERNEL);
- if (plane->state)
+ if (plane->state) {
plane->state->plane = plane;
+ plane->state->rotation = BIT(DRM_ROTATE_0);
+ }
}
EXPORT_SYMBOL(drm_atomic_helper_plane_reset);
@@ -2826,3 +2893,98 @@ void drm_atomic_helper_connector_destroy_state(struct drm_connector *connector,
kfree(state);
}
EXPORT_SYMBOL(drm_atomic_helper_connector_destroy_state);
+
+/**
+ * drm_atomic_helper_legacy_gamma_set - set the legacy gamma correction table
+ * @crtc: CRTC object
+ * @red: red correction table
+ * @green: green correction table
+ * @blue: green correction table
+ * @start:
+ * @size: size of the tables
+ *
+ * Implements support for legacy gamma correction table for drivers
+ * that support color management through the DEGAMMA_LUT/GAMMA_LUT
+ * properties.
+ */
+void drm_atomic_helper_legacy_gamma_set(struct drm_crtc *crtc,
+ u16 *red, u16 *green, u16 *blue,
+ uint32_t start, uint32_t size)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_mode_config *config = &dev->mode_config;
+ struct drm_atomic_state *state;
+ struct drm_crtc_state *crtc_state;
+ struct drm_property_blob *blob = NULL;
+ struct drm_color_lut *blob_data;
+ int i, ret = 0;
+
+ state = drm_atomic_state_alloc(crtc->dev);
+ if (!state)
+ return;
+
+ blob = drm_property_create_blob(dev,
+ sizeof(struct drm_color_lut) * size,
+ NULL);
+ if (IS_ERR(blob)) {
+ ret = PTR_ERR(blob);
+ blob = NULL;
+ goto fail;
+ }
+
+ /* Prepare GAMMA_LUT with the legacy values. */
+ blob_data = (struct drm_color_lut *) blob->data;
+ for (i = 0; i < size; i++) {
+ blob_data[i].red = red[i];
+ blob_data[i].green = green[i];
+ blob_data[i].blue = blue[i];
+ }
+
+ state->acquire_ctx = crtc->dev->mode_config.acquire_ctx;
+retry:
+ crtc_state = drm_atomic_get_crtc_state(state, crtc);
+ if (IS_ERR(crtc_state)) {
+ ret = PTR_ERR(crtc_state);
+ goto fail;
+ }
+
+ /* Reset DEGAMMA_LUT and CTM properties. */
+ ret = drm_atomic_crtc_set_property(crtc, crtc_state,
+ config->degamma_lut_property, 0);
+ if (ret)
+ goto fail;
+
+ ret = drm_atomic_crtc_set_property(crtc, crtc_state,
+ config->ctm_property, 0);
+ if (ret)
+ goto fail;
+
+ ret = drm_atomic_crtc_set_property(crtc, crtc_state,
+ config->gamma_lut_property, blob->base.id);
+ if (ret)
+ goto fail;
+
+ ret = drm_atomic_commit(state);
+ if (ret)
+ goto fail;
+
+ /* Driver takes ownership of state on successful commit. */
+
+ drm_property_unreference_blob(blob);
+
+ return;
+fail:
+ if (ret == -EDEADLK)
+ goto backoff;
+
+ drm_atomic_state_free(state);
+ drm_property_unreference_blob(blob);
+
+ return;
+backoff:
+ drm_atomic_state_clear(state);
+ drm_atomic_legacy_backoff(state);
+
+ goto retry;
+}
+EXPORT_SYMBOL(drm_atomic_helper_legacy_gamma_set);
diff --git a/drivers/gpu/drm/drm_bridge.c b/drivers/gpu/drm/drm_bridge.c
index bd93453afa61..b3654404abd0 100644
--- a/drivers/gpu/drm/drm_bridge.c
+++ b/drivers/gpu/drm/drm_bridge.c
@@ -186,7 +186,8 @@ void drm_bridge_disable(struct drm_bridge *bridge)
drm_bridge_disable(bridge->next);
- bridge->funcs->disable(bridge);
+ if (bridge->funcs->disable)
+ bridge->funcs->disable(bridge);
}
EXPORT_SYMBOL(drm_bridge_disable);
@@ -206,7 +207,8 @@ void drm_bridge_post_disable(struct drm_bridge *bridge)
if (!bridge)
return;
- bridge->funcs->post_disable(bridge);
+ if (bridge->funcs->post_disable)
+ bridge->funcs->post_disable(bridge);
drm_bridge_post_disable(bridge->next);
}
@@ -256,7 +258,8 @@ void drm_bridge_pre_enable(struct drm_bridge *bridge)
drm_bridge_pre_enable(bridge->next);
- bridge->funcs->pre_enable(bridge);
+ if (bridge->funcs->pre_enable)
+ bridge->funcs->pre_enable(bridge);
}
EXPORT_SYMBOL(drm_bridge_pre_enable);
@@ -276,7 +279,8 @@ void drm_bridge_enable(struct drm_bridge *bridge)
if (!bridge)
return;
- bridge->funcs->enable(bridge);
+ if (bridge->funcs->enable)
+ bridge->funcs->enable(bridge);
drm_bridge_enable(bridge->next);
}
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index f6191215b2cb..e08f962288d9 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -430,9 +430,7 @@ EXPORT_SYMBOL(drm_framebuffer_init);
static void __drm_framebuffer_unregister(struct drm_device *dev,
struct drm_framebuffer *fb)
{
- mutex_lock(&dev->mode_config.idr_mutex);
- idr_remove(&dev->mode_config.crtc_idr, fb->base.id);
- mutex_unlock(&dev->mode_config.idr_mutex);
+ drm_mode_object_put(dev, &fb->base);
fb->base.id = 0;
}
@@ -1150,6 +1148,29 @@ out_unlock:
EXPORT_SYMBOL(drm_encoder_init);
/**
+ * drm_encoder_index - find the index of a registered encoder
+ * @encoder: encoder to find index for
+ *
+ * Given a registered encoder, return the index of that encoder within a DRM
+ * device's list of encoders.
+ */
+unsigned int drm_encoder_index(struct drm_encoder *encoder)
+{
+ unsigned int index = 0;
+ struct drm_encoder *tmp;
+
+ drm_for_each_encoder(tmp, encoder->dev) {
+ if (tmp == encoder)
+ return index;
+
+ index++;
+ }
+
+ BUG();
+}
+EXPORT_SYMBOL(drm_encoder_index);
+
+/**
* drm_encoder_cleanup - cleans up an initialised encoder
* @encoder: encoder to cleanup
*
@@ -1531,6 +1552,41 @@ static int drm_mode_create_standard_properties(struct drm_device *dev)
return -ENOMEM;
dev->mode_config.prop_mode_id = prop;
+ prop = drm_property_create(dev,
+ DRM_MODE_PROP_BLOB,
+ "DEGAMMA_LUT", 0);
+ if (!prop)
+ return -ENOMEM;
+ dev->mode_config.degamma_lut_property = prop;
+
+ prop = drm_property_create_range(dev,
+ DRM_MODE_PROP_IMMUTABLE,
+ "DEGAMMA_LUT_SIZE", 0, UINT_MAX);
+ if (!prop)
+ return -ENOMEM;
+ dev->mode_config.degamma_lut_size_property = prop;
+
+ prop = drm_property_create(dev,
+ DRM_MODE_PROP_BLOB,
+ "CTM", 0);
+ if (!prop)
+ return -ENOMEM;
+ dev->mode_config.ctm_property = prop;
+
+ prop = drm_property_create(dev,
+ DRM_MODE_PROP_BLOB,
+ "GAMMA_LUT", 0);
+ if (!prop)
+ return -ENOMEM;
+ dev->mode_config.gamma_lut_property = prop;
+
+ prop = drm_property_create_range(dev,
+ DRM_MODE_PROP_IMMUTABLE,
+ "GAMMA_LUT_SIZE", 0, UINT_MAX);
+ if (!prop)
+ return -ENOMEM;
+ dev->mode_config.gamma_lut_size_property = prop;
+
return 0;
}
@@ -5254,7 +5310,6 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
struct drm_crtc *crtc;
struct drm_framebuffer *fb = NULL;
struct drm_pending_vblank_event *e = NULL;
- unsigned long flags;
int ret = -EINVAL;
if (page_flip->flags & ~DRM_MODE_PAGE_FLIP_FLAGS ||
@@ -5305,41 +5360,26 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
}
if (page_flip->flags & DRM_MODE_PAGE_FLIP_EVENT) {
- ret = -ENOMEM;
- spin_lock_irqsave(&dev->event_lock, flags);
- if (file_priv->event_space < sizeof(e->event)) {
- spin_unlock_irqrestore(&dev->event_lock, flags);
- goto out;
- }
- file_priv->event_space -= sizeof(e->event);
- spin_unlock_irqrestore(&dev->event_lock, flags);
-
- e = kzalloc(sizeof(*e), GFP_KERNEL);
- if (e == NULL) {
- spin_lock_irqsave(&dev->event_lock, flags);
- file_priv->event_space += sizeof(e->event);
- spin_unlock_irqrestore(&dev->event_lock, flags);
+ e = kzalloc(sizeof *e, GFP_KERNEL);
+ if (!e) {
+ ret = -ENOMEM;
goto out;
}
-
e->event.base.type = DRM_EVENT_FLIP_COMPLETE;
e->event.base.length = sizeof(e->event);
e->event.user_data = page_flip->user_data;
- e->base.event = &e->event.base;
- e->base.file_priv = file_priv;
- e->base.destroy =
- (void (*) (struct drm_pending_event *)) kfree;
+ ret = drm_event_reserve_init(dev, file_priv, &e->base, &e->event.base);
+ if (ret) {
+ kfree(e);
+ goto out;
+ }
}
crtc->primary->old_fb = crtc->primary->fb;
ret = crtc->funcs->page_flip(crtc, fb, e, page_flip->flags);
if (ret) {
- if (page_flip->flags & DRM_MODE_PAGE_FLIP_EVENT) {
- spin_lock_irqsave(&dev->event_lock, flags);
- file_priv->event_space += sizeof(e->event);
- spin_unlock_irqrestore(&dev->event_lock, flags);
- kfree(e);
- }
+ if (page_flip->flags & DRM_MODE_PAGE_FLIP_EVENT)
+ drm_event_cancel_free(dev, &e->base);
/* Keep the old fb, don't unref it. */
crtc->primary->old_fb = NULL;
} else {
@@ -5720,6 +5760,48 @@ int drm_format_vert_chroma_subsampling(uint32_t format)
EXPORT_SYMBOL(drm_format_vert_chroma_subsampling);
/**
+ * drm_format_plane_width - width of the plane given the first plane
+ * @width: width of the first plane
+ * @format: pixel format
+ * @plane: plane index
+ *
+ * Returns:
+ * The width of @plane, given that the width of the first plane is @width.
+ */
+int drm_format_plane_width(int width, uint32_t format, int plane)
+{
+ if (plane >= drm_format_num_planes(format))
+ return 0;
+
+ if (plane == 0)
+ return width;
+
+ return width / drm_format_horz_chroma_subsampling(format);
+}
+EXPORT_SYMBOL(drm_format_plane_width);
+
+/**
+ * drm_format_plane_height - height of the plane given the first plane
+ * @height: height of the first plane
+ * @format: pixel format
+ * @plane: plane index
+ *
+ * Returns:
+ * The height of @plane, given that the height of the first plane is @height.
+ */
+int drm_format_plane_height(int height, uint32_t format, int plane)
+{
+ if (plane >= drm_format_num_planes(format))
+ return 0;
+
+ if (plane == 0)
+ return height;
+
+ return height / drm_format_vert_chroma_subsampling(format);
+}
+EXPORT_SYMBOL(drm_format_plane_height);
+
+/**
* drm_rotation_simplify() - Try to simplify the rotation
* @rotation: Rotation to be simplified
* @supported_rotations: Supported rotations
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index a02a7f9a6a9d..79555d2b1b87 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -73,9 +73,6 @@
* &drm_crtc_helper_funcs, struct &drm_encoder_helper_funcs and struct
* &drm_connector_helper_funcs.
*/
-MODULE_AUTHOR("David Airlie, Jesse Barnes");
-MODULE_DESCRIPTION("DRM KMS helper");
-MODULE_LICENSE("GPL and additional rights");
/**
* drm_helper_move_panel_connectors_to_head() - move panels to the front in the
@@ -220,6 +217,15 @@ static void __drm_helper_disable_unused_functions(struct drm_device *dev)
* disconnected connectors. Then it will disable all unused encoders and CRTCs
* either by calling their disable callback if available or by calling their
* dpms callback with DRM_MODE_DPMS_OFF.
+ *
+ * NOTE:
+ *
+ * This function is part of the legacy modeset helper library and will cause
+ * major confusion with atomic drivers. This is because atomic helpers guarantee
+ * to never call ->disable() hooks on a disabled function, or ->enable() hooks
+ * on an enabled functions. drm_helper_disable_unused_functions() on the other
+ * hand throws such guarantees into the wind and calls disable hooks
+ * unconditionally on unused functions.
*/
void drm_helper_disable_unused_functions(struct drm_device *dev)
{
@@ -328,16 +334,21 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
}
encoder_funcs = encoder->helper_private;
- if (!(ret = encoder_funcs->mode_fixup(encoder, mode,
- adjusted_mode))) {
- DRM_DEBUG_KMS("Encoder fixup failed\n");
- goto done;
+ if (encoder_funcs->mode_fixup) {
+ if (!(ret = encoder_funcs->mode_fixup(encoder, mode,
+ adjusted_mode))) {
+ DRM_DEBUG_KMS("Encoder fixup failed\n");
+ goto done;
+ }
}
}
- if (!(ret = crtc_funcs->mode_fixup(crtc, mode, adjusted_mode))) {
- DRM_DEBUG_KMS("CRTC fixup failed\n");
- goto done;
+ if (crtc_funcs->mode_fixup) {
+ if (!(ret = crtc_funcs->mode_fixup(crtc, mode,
+ adjusted_mode))) {
+ DRM_DEBUG_KMS("CRTC fixup failed\n");
+ goto done;
+ }
}
DRM_DEBUG_KMS("[CRTC:%d:%s]\n", crtc->base.id, crtc->name);
@@ -578,8 +589,6 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
if (set->crtc->primary->fb == NULL) {
DRM_DEBUG_KMS("crtc has no fb, full mode set\n");
mode_changed = true;
- } else if (set->fb == NULL) {
- mode_changed = true;
} else if (set->fb->pixel_format !=
set->crtc->primary->fb->pixel_format) {
mode_changed = true;
@@ -590,7 +599,7 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
if (set->x != set->crtc->x || set->y != set->crtc->y)
fb_changed = true;
- if (set->mode && !drm_mode_equal(set->mode, &set->crtc->mode)) {
+ if (!drm_mode_equal(set->mode, &set->crtc->mode)) {
DRM_DEBUG_KMS("modes are different, full mode set\n");
drm_mode_debug_printmodeline(&set->crtc->mode);
drm_mode_debug_printmodeline(set->mode);
@@ -1066,3 +1075,36 @@ int drm_helper_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
return drm_plane_helper_commit(plane, plane_state, old_fb);
}
EXPORT_SYMBOL(drm_helper_crtc_mode_set_base);
+
+/**
+ * drm_helper_crtc_enable_color_mgmt - enable color management properties
+ * @crtc: DRM CRTC
+ * @degamma_lut_size: the size of the degamma lut (before CSC)
+ * @gamma_lut_size: the size of the gamma lut (after CSC)
+ *
+ * This function lets the driver enable the color correction properties on a
+ * CRTC. This includes 3 degamma, csc and gamma properties that userspace can
+ * set and 2 size properties to inform the userspace of the lut sizes.
+ */
+void drm_helper_crtc_enable_color_mgmt(struct drm_crtc *crtc,
+ int degamma_lut_size,
+ int gamma_lut_size)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_mode_config *config = &dev->mode_config;
+
+ drm_object_attach_property(&crtc->base,
+ config->degamma_lut_property, 0);
+ drm_object_attach_property(&crtc->base,
+ config->ctm_property, 0);
+ drm_object_attach_property(&crtc->base,
+ config->gamma_lut_property, 0);
+
+ drm_object_attach_property(&crtc->base,
+ config->degamma_lut_size_property,
+ degamma_lut_size);
+ drm_object_attach_property(&crtc->base,
+ config->gamma_lut_size_property,
+ gamma_lut_size);
+}
+EXPORT_SYMBOL(drm_helper_crtc_enable_color_mgmt);
diff --git a/drivers/gpu/drm/drm_dp_aux_dev.c b/drivers/gpu/drm/drm_dp_aux_dev.c
new file mode 100644
index 000000000000..f73b38b33a8e
--- /dev/null
+++ b/drivers/gpu/drm/drm_dp_aux_dev.c
@@ -0,0 +1,368 @@
+/*
+ * Copyright © 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Rafael Antognolli <rafael.antognolli@intel.com>
+ *
+ */
+
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/uaccess.h>
+#include <drm/drm_dp_helper.h>
+#include <drm/drm_crtc.h>
+#include <drm/drmP.h>
+
+struct drm_dp_aux_dev {
+ unsigned index;
+ struct drm_dp_aux *aux;
+ struct device *dev;
+ struct kref refcount;
+ atomic_t usecount;
+};
+
+#define DRM_AUX_MINORS 256
+#define AUX_MAX_OFFSET (1 << 20)
+static DEFINE_IDR(aux_idr);
+static DEFINE_MUTEX(aux_idr_mutex);
+static struct class *drm_dp_aux_dev_class;
+static int drm_dev_major = -1;
+
+static struct drm_dp_aux_dev *drm_dp_aux_dev_get_by_minor(unsigned index)
+{
+ struct drm_dp_aux_dev *aux_dev = NULL;
+
+ mutex_lock(&aux_idr_mutex);
+ aux_dev = idr_find(&aux_idr, index);
+ if (!kref_get_unless_zero(&aux_dev->refcount))
+ aux_dev = NULL;
+ mutex_unlock(&aux_idr_mutex);
+
+ return aux_dev;
+}
+
+static struct drm_dp_aux_dev *alloc_drm_dp_aux_dev(struct drm_dp_aux *aux)
+{
+ struct drm_dp_aux_dev *aux_dev;
+ int index;
+
+ aux_dev = kzalloc(sizeof(*aux_dev), GFP_KERNEL);
+ if (!aux_dev)
+ return ERR_PTR(-ENOMEM);
+ aux_dev->aux = aux;
+ atomic_set(&aux_dev->usecount, 1);
+ kref_init(&aux_dev->refcount);
+
+ mutex_lock(&aux_idr_mutex);
+ index = idr_alloc_cyclic(&aux_idr, aux_dev, 0, DRM_AUX_MINORS,
+ GFP_KERNEL);
+ mutex_unlock(&aux_idr_mutex);
+ if (index < 0) {
+ kfree(aux_dev);
+ return ERR_PTR(index);
+ }
+ aux_dev->index = index;
+
+ return aux_dev;
+}
+
+static void release_drm_dp_aux_dev(struct kref *ref)
+{
+ struct drm_dp_aux_dev *aux_dev =
+ container_of(ref, struct drm_dp_aux_dev, refcount);
+
+ kfree(aux_dev);
+}
+
+static ssize_t name_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ ssize_t res;
+ struct drm_dp_aux_dev *aux_dev =
+ drm_dp_aux_dev_get_by_minor(MINOR(dev->devt));
+
+ if (!aux_dev)
+ return -ENODEV;
+
+ res = sprintf(buf, "%s\n", aux_dev->aux->name);
+ kref_put(&aux_dev->refcount, release_drm_dp_aux_dev);
+
+ return res;
+}
+static DEVICE_ATTR_RO(name);
+
+static struct attribute *drm_dp_aux_attrs[] = {
+ &dev_attr_name.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(drm_dp_aux);
+
+static int auxdev_open(struct inode *inode, struct file *file)
+{
+ unsigned int minor = iminor(inode);
+ struct drm_dp_aux_dev *aux_dev;
+
+ aux_dev = drm_dp_aux_dev_get_by_minor(minor);
+ if (!aux_dev)
+ return -ENODEV;
+
+ file->private_data = aux_dev;
+ return 0;
+}
+
+static loff_t auxdev_llseek(struct file *file, loff_t offset, int whence)
+{
+ return fixed_size_llseek(file, offset, whence, AUX_MAX_OFFSET);
+}
+
+static ssize_t auxdev_read(struct file *file, char __user *buf, size_t count,
+ loff_t *offset)
+{
+ size_t bytes_pending, num_bytes_processed = 0;
+ struct drm_dp_aux_dev *aux_dev = file->private_data;
+ ssize_t res = 0;
+
+ if (!atomic_inc_not_zero(&aux_dev->usecount))
+ return -ENODEV;
+
+ bytes_pending = min((loff_t)count, AUX_MAX_OFFSET - (*offset));
+
+ if (!access_ok(VERIFY_WRITE, buf, bytes_pending)) {
+ res = -EFAULT;
+ goto out;
+ }
+
+ while (bytes_pending > 0) {
+ uint8_t localbuf[DP_AUX_MAX_PAYLOAD_BYTES];
+ ssize_t todo = min_t(size_t, bytes_pending, sizeof(localbuf));
+
+ res = drm_dp_dpcd_read(aux_dev->aux, *offset, localbuf, todo);
+ if (res <= 0) {
+ res = num_bytes_processed ? num_bytes_processed : res;
+ goto out;
+ }
+ if (__copy_to_user(buf + num_bytes_processed, localbuf, res)) {
+ res = num_bytes_processed ?
+ num_bytes_processed : -EFAULT;
+ goto out;
+ }
+ bytes_pending -= res;
+ *offset += res;
+ num_bytes_processed += res;
+ res = num_bytes_processed;
+ }
+
+out:
+ atomic_dec(&aux_dev->usecount);
+ wake_up_atomic_t(&aux_dev->usecount);
+ return res;
+}
+
+static ssize_t auxdev_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *offset)
+{
+ size_t bytes_pending, num_bytes_processed = 0;
+ struct drm_dp_aux_dev *aux_dev = file->private_data;
+ ssize_t res = 0;
+
+ if (!atomic_inc_not_zero(&aux_dev->usecount))
+ return -ENODEV;
+
+ bytes_pending = min((loff_t)count, AUX_MAX_OFFSET - *offset);
+
+ if (!access_ok(VERIFY_READ, buf, bytes_pending)) {
+ res = -EFAULT;
+ goto out;
+ }
+
+ while (bytes_pending > 0) {
+ uint8_t localbuf[DP_AUX_MAX_PAYLOAD_BYTES];
+ ssize_t todo = min_t(size_t, bytes_pending, sizeof(localbuf));
+
+ if (__copy_from_user(localbuf,
+ buf + num_bytes_processed, todo)) {
+ res = num_bytes_processed ?
+ num_bytes_processed : -EFAULT;
+ goto out;
+ }
+
+ res = drm_dp_dpcd_write(aux_dev->aux, *offset, localbuf, todo);
+ if (res <= 0) {
+ res = num_bytes_processed ? num_bytes_processed : res;
+ goto out;
+ }
+ bytes_pending -= res;
+ *offset += res;
+ num_bytes_processed += res;
+ res = num_bytes_processed;
+ }
+
+out:
+ atomic_dec(&aux_dev->usecount);
+ wake_up_atomic_t(&aux_dev->usecount);
+ return res;
+}
+
+static int auxdev_release(struct inode *inode, struct file *file)
+{
+ struct drm_dp_aux_dev *aux_dev = file->private_data;
+
+ kref_put(&aux_dev->refcount, release_drm_dp_aux_dev);
+ return 0;
+}
+
+static const struct file_operations auxdev_fops = {
+ .owner = THIS_MODULE,
+ .llseek = auxdev_llseek,
+ .read = auxdev_read,
+ .write = auxdev_write,
+ .open = auxdev_open,
+ .release = auxdev_release,
+};
+
+#define to_auxdev(d) container_of(d, struct drm_dp_aux_dev, aux)
+
+static struct drm_dp_aux_dev *drm_dp_aux_dev_get_by_aux(struct drm_dp_aux *aux)
+{
+ struct drm_dp_aux_dev *iter, *aux_dev = NULL;
+ int id;
+
+ /* don't increase kref count here because this function should only be
+ * used by drm_dp_aux_unregister_devnode. Thus, it will always have at
+ * least one reference - the one that drm_dp_aux_register_devnode
+ * created
+ */
+ mutex_lock(&aux_idr_mutex);
+ idr_for_each_entry(&aux_idr, iter, id) {
+ if (iter->aux == aux) {
+ aux_dev = iter;
+ break;
+ }
+ }
+ mutex_unlock(&aux_idr_mutex);
+ return aux_dev;
+}
+
+static int auxdev_wait_atomic_t(atomic_t *p)
+{
+ schedule();
+ return 0;
+}
+/**
+ * drm_dp_aux_unregister_devnode() - unregister a devnode for this aux channel
+ * @aux: DisplayPort AUX channel
+ *
+ * Returns 0 on success or a negative error code on failure.
+ */
+void drm_dp_aux_unregister_devnode(struct drm_dp_aux *aux)
+{
+ struct drm_dp_aux_dev *aux_dev;
+ unsigned int minor;
+
+ aux_dev = drm_dp_aux_dev_get_by_aux(aux);
+ if (!aux_dev) /* attach must have failed */
+ return;
+
+ mutex_lock(&aux_idr_mutex);
+ idr_remove(&aux_idr, aux_dev->index);
+ mutex_unlock(&aux_idr_mutex);
+
+ atomic_dec(&aux_dev->usecount);
+ wait_on_atomic_t(&aux_dev->usecount, auxdev_wait_atomic_t,
+ TASK_UNINTERRUPTIBLE);
+
+ minor = aux_dev->index;
+ if (aux_dev->dev)
+ device_destroy(drm_dp_aux_dev_class,
+ MKDEV(drm_dev_major, minor));
+
+ DRM_DEBUG("drm_dp_aux_dev: aux [%s] unregistering\n", aux->name);
+ kref_put(&aux_dev->refcount, release_drm_dp_aux_dev);
+}
+EXPORT_SYMBOL(drm_dp_aux_unregister_devnode);
+
+/**
+ * drm_dp_aux_register_devnode() - register a devnode for this aux channel
+ * @aux: DisplayPort AUX channel
+ *
+ * Returns 0 on success or a negative error code on failure.
+ */
+int drm_dp_aux_register_devnode(struct drm_dp_aux *aux)
+{
+ struct drm_dp_aux_dev *aux_dev;
+ int res;
+
+ aux_dev = alloc_drm_dp_aux_dev(aux);
+ if (IS_ERR(aux_dev))
+ return PTR_ERR(aux_dev);
+
+ aux_dev->dev = device_create(drm_dp_aux_dev_class, aux->dev,
+ MKDEV(drm_dev_major, aux_dev->index), NULL,
+ "drm_dp_aux%d", aux_dev->index);
+ if (IS_ERR(aux_dev->dev)) {
+ res = PTR_ERR(aux_dev->dev);
+ aux_dev->dev = NULL;
+ goto error;
+ }
+
+ DRM_DEBUG("drm_dp_aux_dev: aux [%s] registered as minor %d\n",
+ aux->name, aux_dev->index);
+ return 0;
+error:
+ drm_dp_aux_unregister_devnode(aux);
+ return res;
+}
+EXPORT_SYMBOL(drm_dp_aux_register_devnode);
+
+int drm_dp_aux_dev_init(void)
+{
+ int res;
+
+ drm_dp_aux_dev_class = class_create(THIS_MODULE, "drm_dp_aux_dev");
+ if (IS_ERR(drm_dp_aux_dev_class)) {
+ res = PTR_ERR(drm_dp_aux_dev_class);
+ goto out;
+ }
+ drm_dp_aux_dev_class->dev_groups = drm_dp_aux_groups;
+
+ res = register_chrdev(0, "aux", &auxdev_fops);
+ if (res < 0)
+ goto out;
+ drm_dev_major = res;
+
+ return 0;
+out:
+ class_destroy(drm_dp_aux_dev_class);
+ return res;
+}
+EXPORT_SYMBOL(drm_dp_aux_dev_init);
+
+void drm_dp_aux_dev_exit(void)
+{
+ unregister_chrdev(drm_dev_major, "aux");
+ class_destroy(drm_dp_aux_dev_class);
+}
+EXPORT_SYMBOL(drm_dp_aux_dev_exit);
diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c
index 9535c5b60387..df64ed1c0139 100644
--- a/drivers/gpu/drm/drm_dp_helper.c
+++ b/drivers/gpu/drm/drm_dp_helper.c
@@ -28,6 +28,7 @@
#include <linux/sched.h>
#include <linux/i2c.h>
#include <drm/drm_dp_helper.h>
+#include <drm/drm_dp_aux_dev.h>
#include <drm/drmP.h>
/**
@@ -178,7 +179,7 @@ static int drm_dp_dpcd_access(struct drm_dp_aux *aux, u8 request,
{
struct drm_dp_aux_msg msg;
unsigned int retry;
- int err;
+ int err = 0;
memset(&msg, 0, sizeof(msg));
msg.address = offset;
@@ -186,6 +187,8 @@ static int drm_dp_dpcd_access(struct drm_dp_aux *aux, u8 request,
msg.buffer = buffer;
msg.size = size;
+ mutex_lock(&aux->hw_mutex);
+
/*
* The specification doesn't give any recommendation on how often to
* retry native transactions. We used to retry 7 times like for
@@ -194,25 +197,24 @@ static int drm_dp_dpcd_access(struct drm_dp_aux *aux, u8 request,
*/
for (retry = 0; retry < 32; retry++) {
- mutex_lock(&aux->hw_mutex);
err = aux->transfer(aux, &msg);
- mutex_unlock(&aux->hw_mutex);
if (err < 0) {
if (err == -EBUSY)
continue;
- return err;
+ goto unlock;
}
switch (msg.reply & DP_AUX_NATIVE_REPLY_MASK) {
case DP_AUX_NATIVE_REPLY_ACK:
if (err < size)
- return -EPROTO;
- return err;
+ err = -EPROTO;
+ goto unlock;
case DP_AUX_NATIVE_REPLY_NACK:
- return -EIO;
+ err = -EIO;
+ goto unlock;
case DP_AUX_NATIVE_REPLY_DEFER:
usleep_range(AUX_RETRY_INTERVAL, AUX_RETRY_INTERVAL + 100);
@@ -221,7 +223,11 @@ static int drm_dp_dpcd_access(struct drm_dp_aux *aux, u8 request,
}
DRM_DEBUG_KMS("too many retries, giving up\n");
- return -EIO;
+ err = -EIO;
+
+unlock:
+ mutex_unlock(&aux->hw_mutex);
+ return err;
}
/**
@@ -543,9 +549,7 @@ static int drm_dp_i2c_do_msg(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
int max_retries = max(7, drm_dp_i2c_retry_count(msg, dp_aux_i2c_speed_khz));
for (retry = 0, defer_i2c = 0; retry < (max_retries + defer_i2c); retry++) {
- mutex_lock(&aux->hw_mutex);
ret = aux->transfer(aux, msg);
- mutex_unlock(&aux->hw_mutex);
if (ret < 0) {
if (ret == -EBUSY)
continue;
@@ -684,6 +688,8 @@ static int drm_dp_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs,
memset(&msg, 0, sizeof(msg));
+ mutex_lock(&aux->hw_mutex);
+
for (i = 0; i < num; i++) {
msg.address = msgs[i].addr;
drm_dp_i2c_msg_set_request(&msg, &msgs[i]);
@@ -738,6 +744,8 @@ static int drm_dp_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs,
msg.size = 0;
(void)drm_dp_i2c_do_msg(aux, &msg);
+ mutex_unlock(&aux->hw_mutex);
+
return err;
}
@@ -754,6 +762,8 @@ static const struct i2c_algorithm drm_dp_i2c_algo = {
*/
int drm_dp_aux_register(struct drm_dp_aux *aux)
{
+ int ret;
+
mutex_init(&aux->hw_mutex);
aux->ddc.algo = &drm_dp_i2c_algo;
@@ -768,7 +778,17 @@ int drm_dp_aux_register(struct drm_dp_aux *aux)
strlcpy(aux->ddc.name, aux->name ? aux->name : dev_name(aux->dev),
sizeof(aux->ddc.name));
- return i2c_add_adapter(&aux->ddc);
+ ret = drm_dp_aux_register_devnode(aux);
+ if (ret)
+ return ret;
+
+ ret = i2c_add_adapter(&aux->ddc);
+ if (ret) {
+ drm_dp_aux_unregister_devnode(aux);
+ return ret;
+ }
+
+ return 0;
}
EXPORT_SYMBOL(drm_dp_aux_register);
@@ -778,6 +798,7 @@ EXPORT_SYMBOL(drm_dp_aux_register);
*/
void drm_dp_aux_unregister(struct drm_dp_aux *aux)
{
+ drm_dp_aux_unregister_devnode(aux);
i2c_del_adapter(&aux->ddc);
}
EXPORT_SYMBOL(drm_dp_aux_unregister);
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index 27fbd79d0daf..71ea0521ea96 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -1672,13 +1672,19 @@ static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
u8 sinks[DRM_DP_MAX_SDP_STREAMS];
int i;
+ port = drm_dp_get_validated_port_ref(mgr, port);
+ if (!port)
+ return -EINVAL;
+
port_num = port->port_num;
mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent);
if (!mstb) {
mstb = drm_dp_get_last_connected_port_and_mstb(mgr, port->parent, &port_num);
- if (!mstb)
+ if (!mstb) {
+ drm_dp_put_port(port);
return -EINVAL;
+ }
}
txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
@@ -1707,6 +1713,7 @@ static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
kfree(txmsg);
fail_put:
drm_dp_put_mst_branch_device(mstb);
+ drm_dp_put_port(port);
return ret;
}
@@ -1789,6 +1796,11 @@ int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr)
req_payload.start_slot = cur_slots;
if (mgr->proposed_vcpis[i]) {
port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi);
+ port = drm_dp_get_validated_port_ref(mgr, port);
+ if (!port) {
+ mutex_unlock(&mgr->payload_lock);
+ return -EINVAL;
+ }
req_payload.num_slots = mgr->proposed_vcpis[i]->num_slots;
req_payload.vcpi = mgr->proposed_vcpis[i]->vcpi;
} else {
@@ -1816,6 +1828,9 @@ int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr)
mgr->payloads[i].payload_state = req_payload.payload_state;
}
cur_slots += req_payload.num_slots;
+
+ if (port)
+ drm_dp_put_port(port);
}
for (i = 0; i < mgr->max_payloads; i++) {
@@ -2121,6 +2136,8 @@ int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr)
if (mgr->mst_primary) {
int sret;
+ u8 guid[16];
+
sret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd, DP_RECEIVER_CAP_SIZE);
if (sret != DP_RECEIVER_CAP_SIZE) {
DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n");
@@ -2135,6 +2152,16 @@ int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr)
ret = -1;
goto out_unlock;
}
+
+ /* Some hubs forget their guids after they resume */
+ sret = drm_dp_dpcd_read(mgr->aux, DP_GUID, guid, 16);
+ if (sret != 16) {
+ DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n");
+ ret = -1;
+ goto out_unlock;
+ }
+ drm_dp_check_mstb_guid(mgr->mst_primary, guid);
+
ret = 0;
} else
ret = -1;
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 04cb4877fabd..558ef9fc39e6 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -32,6 +32,7 @@
#include <linux/hdmi.h>
#include <linux/i2c.h>
#include <linux/module.h>
+#include <linux/vga_switcheroo.h>
#include <drm/drmP.h>
#include <drm/drm_edid.h>
#include <drm/drm_displayid.h>
@@ -204,7 +205,7 @@ static const struct drm_display_mode drm_dmt_modes[] = {
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 0x0f - 1024x768@43Hz, interlace */
{ DRM_MODE("1024x768i", DRM_MODE_TYPE_DRIVER, 44900, 1024, 1032,
- 1208, 1264, 0, 768, 768, 772, 817, 0,
+ 1208, 1264, 0, 768, 768, 776, 817, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
DRM_MODE_FLAG_INTERLACE) },
/* 0x10 - 1024x768@60Hz */
@@ -521,12 +522,12 @@ static const struct drm_display_mode edid_est_modes[] = {
720, 840, 0, 480, 481, 484, 500, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@75Hz */
{ DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 31500, 640, 664,
- 704, 832, 0, 480, 489, 491, 520, 0,
+ 704, 832, 0, 480, 489, 492, 520, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@72Hz */
{ DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 30240, 640, 704,
768, 864, 0, 480, 483, 486, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@67Hz */
- { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25200, 640, 656,
+ { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656,
752, 800, 0, 480, 490, 492, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@60Hz */
{ DRM_MODE("720x400", DRM_MODE_TYPE_DRIVER, 35500, 720, 738,
@@ -538,7 +539,7 @@ static const struct drm_display_mode edid_est_modes[] = {
{ DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 135000, 1280, 1296,
1440, 1688, 0, 1024, 1025, 1028, 1066, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1280x1024@75Hz */
- { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 78800, 1024, 1040,
+ { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 78750, 1024, 1040,
1136, 1312, 0, 768, 769, 772, 800, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1024x768@75Hz */
{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 75000, 1024, 1048,
@@ -1395,6 +1396,31 @@ struct edid *drm_get_edid(struct drm_connector *connector,
EXPORT_SYMBOL(drm_get_edid);
/**
+ * drm_get_edid_switcheroo - get EDID data for a vga_switcheroo output
+ * @connector: connector we're probing
+ * @adapter: I2C adapter to use for DDC
+ *
+ * Wrapper around drm_get_edid() for laptops with dual GPUs using one set of
+ * outputs. The wrapper adds the requisite vga_switcheroo calls to temporarily
+ * switch DDC to the GPU which is retrieving EDID.
+ *
+ * Return: Pointer to valid EDID or %NULL if we couldn't find any.
+ */
+struct edid *drm_get_edid_switcheroo(struct drm_connector *connector,
+ struct i2c_adapter *adapter)
+{
+ struct pci_dev *pdev = connector->dev->pdev;
+ struct edid *edid;
+
+ vga_switcheroo_lock_ddc(pdev);
+ edid = drm_get_edid(connector, adapter);
+ vga_switcheroo_unlock_ddc(pdev);
+
+ return edid;
+}
+EXPORT_SYMBOL(drm_get_edid_switcheroo);
+
+/**
* drm_edid_duplicate - duplicate an EDID and the extensions
* @edid: EDID to duplicate
*
@@ -2215,7 +2241,7 @@ drm_est3_modes(struct drm_connector *connector, struct detailed_timing *timing)
{
int i, j, m, modes = 0;
struct drm_display_mode *mode;
- u8 *est = ((u8 *)timing) + 5;
+ u8 *est = ((u8 *)timing) + 6;
for (i = 0; i < 6; i++) {
for (j = 7; j >= 0; j--) {
@@ -3282,7 +3308,7 @@ void drm_edid_to_eld(struct drm_connector *connector, struct edid *edid)
u8 *cea;
u8 *name;
u8 *db;
- int sad_count = 0;
+ int total_sad_count = 0;
int mnl;
int dbl;
@@ -3296,6 +3322,7 @@ void drm_edid_to_eld(struct drm_connector *connector, struct edid *edid)
name = NULL;
drm_for_each_detailed_block((u8 *)edid, monitor_name, &name);
+ /* max: 13 bytes EDID, 16 bytes ELD */
for (mnl = 0; name && mnl < 13; mnl++) {
if (name[mnl] == 0x0a)
break;
@@ -3324,11 +3351,15 @@ void drm_edid_to_eld(struct drm_connector *connector, struct edid *edid)
dbl = cea_db_payload_len(db);
switch (cea_db_tag(db)) {
+ int sad_count;
+
case AUDIO_BLOCK:
/* Audio Data Block, contains SADs */
- sad_count = dbl / 3;
- if (dbl >= 1)
- memcpy(eld + 20 + mnl, &db[1], dbl);
+ sad_count = min(dbl / 3, 15 - total_sad_count);
+ if (sad_count >= 1)
+ memcpy(eld + 20 + mnl + total_sad_count * 3,
+ &db[1], sad_count * 3);
+ total_sad_count += sad_count;
break;
case SPEAKER_BLOCK:
/* Speaker Allocation Data Block */
@@ -3345,13 +3376,13 @@ void drm_edid_to_eld(struct drm_connector *connector, struct edid *edid)
}
}
}
- eld[5] |= sad_count << 4;
+ eld[5] |= total_sad_count << 4;
eld[DRM_ELD_BASELINE_ELD_LEN] =
DIV_ROUND_UP(drm_eld_calc_baseline_block_size(eld), 4);
DRM_DEBUG_KMS("ELD size %d, SAD count %d\n",
- drm_eld_size(eld), sad_count);
+ drm_eld_size(eld), total_sad_count);
}
EXPORT_SYMBOL(drm_edid_to_eld);
diff --git a/drivers/gpu/drm/drm_edid_load.c b/drivers/gpu/drm/drm_edid_load.c
index 698b8c3b09d9..9a401aed98e0 100644
--- a/drivers/gpu/drm/drm_edid_load.c
+++ b/drivers/gpu/drm/drm_edid_load.c
@@ -170,16 +170,11 @@ static void *edid_load(struct drm_connector *connector, const char *name,
int i, valid_extensions = 0;
bool print_bad_edid = !connector->bad_edid_counter || (drm_debug & DRM_UT_KMS);
- builtin = 0;
- for (i = 0; i < GENERIC_EDIDS; i++) {
- if (strcmp(name, generic_edid_name[i]) == 0) {
- fwdata = generic_edid[i];
- fwsize = sizeof(generic_edid[i]);
- builtin = 1;
- break;
- }
- }
- if (!builtin) {
+ builtin = match_string(generic_edid_name, GENERIC_EDIDS, name);
+ if (builtin >= 0) {
+ fwdata = generic_edid[builtin];
+ fwsize = sizeof(generic_edid[builtin]);
+ } else {
struct platform_device *pdev;
int err;
@@ -252,7 +247,7 @@ static void *edid_load(struct drm_connector *connector, const char *name,
}
DRM_INFO("Got %s EDID base block and %d extension%s from "
- "\"%s\" for connector \"%s\"\n", builtin ? "built-in" :
+ "\"%s\" for connector \"%s\"\n", (builtin >= 0) ? "built-in" :
"external", valid_extensions, valid_extensions == 1 ? "" : "s",
name, connector_name);
diff --git a/drivers/gpu/drm/drm_encoder_slave.c b/drivers/gpu/drm/drm_encoder_slave.c
index e8629076de32..4484785cd9ac 100644
--- a/drivers/gpu/drm/drm_encoder_slave.c
+++ b/drivers/gpu/drm/drm_encoder_slave.c
@@ -140,6 +140,9 @@ bool drm_i2c_encoder_mode_fixup(struct drm_encoder *encoder,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
+ if (!get_slave_funcs(encoder)->mode_fixup)
+ return true;
+
return get_slave_funcs(encoder)->mode_fixup(encoder, mode, adjusted_mode);
}
EXPORT_SYMBOL(drm_i2c_encoder_mode_fixup);
diff --git a/drivers/gpu/drm/drm_fb_cma_helper.c b/drivers/gpu/drm/drm_fb_cma_helper.c
index c895b6fddbd8..bb88e3df9257 100644
--- a/drivers/gpu/drm/drm_fb_cma_helper.c
+++ b/drivers/gpu/drm/drm_fb_cma_helper.c
@@ -74,7 +74,8 @@ static struct drm_framebuffer_funcs drm_fb_cma_funcs = {
};
static struct drm_fb_cma *drm_fb_cma_alloc(struct drm_device *dev,
- const const struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_cma_object **obj,
+ const struct drm_mode_fb_cmd2 *mode_cmd,
+ struct drm_gem_cma_object **obj,
unsigned int num_planes)
{
struct drm_fb_cma *fb_cma;
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 1e103c4c6ee0..855108e6e1bd 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -104,21 +104,17 @@ int drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper)
{
struct drm_device *dev = fb_helper->dev;
struct drm_connector *connector;
- int i;
+ int i, ret;
if (!drm_fbdev_emulation)
return 0;
mutex_lock(&dev->mode_config.mutex);
drm_for_each_connector(connector, dev) {
- struct drm_fb_helper_connector *fb_helper_connector;
+ ret = drm_fb_helper_add_one_connector(fb_helper, connector);
- fb_helper_connector = kzalloc(sizeof(struct drm_fb_helper_connector), GFP_KERNEL);
- if (!fb_helper_connector)
+ if (ret)
goto fail;
-
- fb_helper_connector->connector = connector;
- fb_helper->connector_info[fb_helper->connector_count++] = fb_helper_connector;
}
mutex_unlock(&dev->mode_config.mutex);
return 0;
@@ -130,7 +126,7 @@ fail:
fb_helper->connector_count = 0;
mutex_unlock(&dev->mode_config.mutex);
- return -ENOMEM;
+ return ret;
}
EXPORT_SYMBOL(drm_fb_helper_single_add_all_connectors);
@@ -1989,13 +1985,13 @@ static void drm_setup_crtcs(struct drm_fb_helper *fb_helper)
width = dev->mode_config.max_width;
height = dev->mode_config.max_height;
- crtcs = kcalloc(dev->mode_config.num_connector,
+ crtcs = kcalloc(fb_helper->connector_count,
sizeof(struct drm_fb_helper_crtc *), GFP_KERNEL);
- modes = kcalloc(dev->mode_config.num_connector,
+ modes = kcalloc(fb_helper->connector_count,
sizeof(struct drm_display_mode *), GFP_KERNEL);
- offsets = kcalloc(dev->mode_config.num_connector,
+ offsets = kcalloc(fb_helper->connector_count,
sizeof(struct drm_fb_offset), GFP_KERNEL);
- enabled = kcalloc(dev->mode_config.num_connector,
+ enabled = kcalloc(fb_helper->connector_count,
sizeof(bool), GFP_KERNEL);
if (!crtcs || !modes || !enabled || !offsets) {
DRM_ERROR("Memory allocation failed\n");
@@ -2009,9 +2005,9 @@ static void drm_setup_crtcs(struct drm_fb_helper *fb_helper)
fb_helper->funcs->initial_config(fb_helper, crtcs, modes,
offsets,
enabled, width, height))) {
- memset(modes, 0, dev->mode_config.num_connector*sizeof(modes[0]));
- memset(crtcs, 0, dev->mode_config.num_connector*sizeof(crtcs[0]));
- memset(offsets, 0, dev->mode_config.num_connector*sizeof(offsets[0]));
+ memset(modes, 0, fb_helper->connector_count*sizeof(modes[0]));
+ memset(crtcs, 0, fb_helper->connector_count*sizeof(crtcs[0]));
+ memset(offsets, 0, fb_helper->connector_count*sizeof(offsets[0]));
if (!drm_target_cloned(fb_helper, modes, offsets,
enabled, width, height) &&
@@ -2091,6 +2087,27 @@ out:
* drm_fb_helper_fill_fix() are provided as helpers to setup simple default
* values for the fbdev info structure.
*
+ * HANG DEBUGGING:
+ *
+ * When you have fbcon support built-in or already loaded, this function will do
+ * a full modeset to setup the fbdev console. Due to locking misdesign in the
+ * VT/fbdev subsystem that entire modeset sequence has to be done while holding
+ * console_lock. Until console_unlock is called no dmesg lines will be sent out
+ * to consoles, not even serial console. This means when your driver crashes,
+ * you will see absolutely nothing else but a system stuck in this function,
+ * with no further output. Any kind of printk() you place within your own driver
+ * or in the drm core modeset code will also never show up.
+ *
+ * Standard debug practice is to run the fbcon setup without taking the
+ * console_lock as a hack, to be able to see backtraces and crashes on the
+ * serial line. This can be done by setting the fb.lockless_register_fb=1 kernel
+ * cmdline option.
+ *
+ * The other option is to just disable fbdev emulation since very likely the
+ * first modest from userspace will crash in the same way, and is even easier to
+ * debug. This can be done by setting the drm_kms_helper.fbdev_emulation=0
+ * kernel cmdline option.
+ *
* RETURNS:
* Zero if everything went ok, nonzero otherwise.
*/
@@ -2175,9 +2192,9 @@ EXPORT_SYMBOL(drm_fb_helper_hotplug_event);
* but the module doesn't depend on any fb console symbols. At least
* attempt to load fbcon to avoid leaving the system without a usable console.
*/
-#if defined(CONFIG_FRAMEBUFFER_CONSOLE_MODULE) && !defined(CONFIG_EXPERT)
-static int __init drm_fb_helper_modinit(void)
+int __init drm_fb_helper_modinit(void)
{
+#if defined(CONFIG_FRAMEBUFFER_CONSOLE_MODULE) && !defined(CONFIG_EXPERT)
const char *name = "fbcon";
struct module *fbcon;
@@ -2187,8 +2204,7 @@ static int __init drm_fb_helper_modinit(void)
if (!fbcon)
request_module_nowait(name);
+#endif
return 0;
}
-
-module_init(drm_fb_helper_modinit);
-#endif
+EXPORT_SYMBOL(drm_fb_helper_modinit);
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
index 1ea8790e5090..aeef58ed359b 100644
--- a/drivers/gpu/drm/drm_fops.c
+++ b/drivers/gpu/drm/drm_fops.c
@@ -1,4 +1,4 @@
-/**
+/*
* \file drm_fops.c
* File operations for DRM
*
@@ -44,6 +44,46 @@
/* from BKL pushdown */
DEFINE_MUTEX(drm_global_mutex);
+/**
+ * DOC: file operations
+ *
+ * Drivers must define the file operations structure that forms the DRM
+ * userspace API entry point, even though most of those operations are
+ * implemented in the DRM core. The mandatory functions are drm_open(),
+ * drm_read(), drm_ioctl() and drm_compat_ioctl if CONFIG_COMPAT is enabled.
+ * Drivers which implement private ioctls that require 32/64 bit compatibility
+ * support must provided their onw .compat_ioctl() handler that processes
+ * private ioctls and calls drm_compat_ioctl() for core ioctls.
+ *
+ * In addition drm_read() and drm_poll() provide support for DRM events. DRM
+ * events are a generic and extensible means to send asynchronous events to
+ * userspace through the file descriptor. They are used to send vblank event and
+ * page flip completions by the KMS API. But drivers can also use it for their
+ * own needs, e.g. to signal completion of rendering.
+ *
+ * The memory mapping implementation will vary depending on how the driver
+ * manages memory. Legacy drivers will use the deprecated drm_legacy_mmap()
+ * function, modern drivers should use one of the provided memory-manager
+ * specific implementations. For GEM-based drivers this is drm_gem_mmap().
+ *
+ * No other file operations are supported by the DRM userspace API. Overall the
+ * following is an example #file_operations structure:
+ *
+ * static const example_drm_fops = {
+ * .owner = THIS_MODULE,
+ * .open = drm_open,
+ * .release = drm_release,
+ * .unlocked_ioctl = drm_ioctl,
+ * #ifdef CONFIG_COMPAT
+ * .compat_ioctl = drm_compat_ioctl,
+ * #endif
+ * .poll = drm_poll,
+ * .read = drm_read,
+ * .llseek = no_llseek,
+ * .mmap = drm_gem_mmap,
+ * };
+ */
+
static int drm_open_helper(struct file *filp, struct drm_minor *minor);
static int drm_setup(struct drm_device * dev)
@@ -67,15 +107,17 @@ static int drm_setup(struct drm_device * dev)
}
/**
- * Open file.
+ * drm_open - open method for DRM file
+ * @inode: device inode
+ * @filp: file pointer.
*
- * \param inode device inode
- * \param filp file pointer.
- * \return zero on success or a negative number on failure.
+ * This function must be used by drivers as their .open() #file_operations
+ * method. It looks up the correct DRM device and instantiates all the per-file
+ * resources for it.
+ *
+ * RETURNS:
*
- * Searches the DRM device with the same minor number, calls open_helper(), and
- * increments the device open count. If the open count was previous at zero,
- * i.e., it's the first that the device is open, then calls setup().
+ * 0 on success or negative errno value on falure.
*/
int drm_open(struct inode *inode, struct file *filp)
{
@@ -112,7 +154,7 @@ err_undo:
}
EXPORT_SYMBOL(drm_open);
-/**
+/*
* Check whether DRI will run on this CPU.
*
* \return non-zero if the DRI will run on this CPU, or zero otherwise.
@@ -125,7 +167,7 @@ static int drm_cpu_valid(void)
return 1;
}
-/**
+/*
* drm_new_set_master - Allocate a new master object and become master for the
* associated master realm.
*
@@ -179,7 +221,7 @@ out_err:
return ret;
}
-/**
+/*
* Called whenever a process opens /dev/drm.
*
* \param filp file pointer.
@@ -222,6 +264,7 @@ static int drm_open_helper(struct file *filp, struct drm_minor *minor)
INIT_LIST_HEAD(&priv->fbs);
mutex_init(&priv->fbs_lock);
INIT_LIST_HEAD(&priv->blobs);
+ INIT_LIST_HEAD(&priv->pending_event_list);
INIT_LIST_HEAD(&priv->event_list);
init_waitqueue_head(&priv->event_wait);
priv->event_space = 4096; /* set aside 4k for event buffer */
@@ -311,18 +354,16 @@ static void drm_events_release(struct drm_file *file_priv)
{
struct drm_device *dev = file_priv->minor->dev;
struct drm_pending_event *e, *et;
- struct drm_pending_vblank_event *v, *vt;
unsigned long flags;
spin_lock_irqsave(&dev->event_lock, flags);
- /* Remove pending flips */
- list_for_each_entry_safe(v, vt, &dev->vblank_event_list, base.link)
- if (v->base.file_priv == file_priv) {
- list_del(&v->base.link);
- drm_vblank_put(dev, v->pipe);
- v->base.destroy(&v->base);
- }
+ /* Unlink pending events */
+ list_for_each_entry_safe(e, et, &file_priv->pending_event_list,
+ pending_link) {
+ list_del(&e->pending_link);
+ e->file_priv = NULL;
+ }
/* Remove unconsumed events */
list_for_each_entry_safe(e, et, &file_priv->event_list, link) {
@@ -333,7 +374,7 @@ static void drm_events_release(struct drm_file *file_priv)
spin_unlock_irqrestore(&dev->event_lock, flags);
}
-/**
+/*
* drm_legacy_dev_reinit
*
* Reinitializes a legacy/ums drm device in it's lastclose function.
@@ -350,7 +391,7 @@ static void drm_legacy_dev_reinit(struct drm_device *dev)
dev->if_version = 0;
}
-/**
+/*
* Take down the DRM device.
*
* \param dev DRM device structure.
@@ -387,16 +428,17 @@ int drm_lastclose(struct drm_device * dev)
}
/**
- * Release file.
+ * drm_release - release method for DRM file
+ * @inode: device inode
+ * @filp: file pointer.
*
- * \param inode device inode
- * \param file_priv DRM file private.
- * \return zero on success or a negative number on failure.
+ * This function must be used by drivers as their .release() #file_operations
+ * method. It frees any resources associated with the open file, and if this is
+ * the last open file for the DRM device also proceeds to call drm_lastclose().
*
- * If the hardware lock is held then free it, and take it again for the kernel
- * context since it's necessary to reclaim buffers. Unlink the file private
- * data from its list and free it. Decreases the open count and if it reaches
- * zero calls drm_lastclose().
+ * RETURNS:
+ *
+ * Always succeeds and returns 0.
*/
int drm_release(struct inode *inode, struct file *filp)
{
@@ -451,7 +493,7 @@ int drm_release(struct inode *inode, struct file *filp)
if (file_priv->is_master) {
struct drm_master *master = file_priv->master;
- /**
+ /*
* Since the master is disappearing, so is the
* possibility to lock.
*/
@@ -508,6 +550,32 @@ int drm_release(struct inode *inode, struct file *filp)
}
EXPORT_SYMBOL(drm_release);
+/**
+ * drm_read - read method for DRM file
+ * @filp: file pointer
+ * @buffer: userspace destination pointer for the read
+ * @count: count in bytes to read
+ * @offset: offset to read
+ *
+ * This function must be used by drivers as their .read() #file_operations
+ * method iff they use DRM events for asynchronous signalling to userspace.
+ * Since events are used by the KMS API for vblank and page flip completion this
+ * means all modern display drivers must use it.
+ *
+ * @offset is ignore, DRM events are read like a pipe. Therefore drivers also
+ * must set the .llseek() #file_operation to no_llseek(). Polling support is
+ * provided by drm_poll().
+ *
+ * This function will only ever read a full event. Therefore userspace must
+ * supply a big enough buffer to fit any event to ensure forward progress. Since
+ * the maximum event space is currently 4K it's recommended to just use that for
+ * safety.
+ *
+ * RETURNS:
+ *
+ * Number of bytes read (always aligned to full events, and can be 0) or a
+ * negative error code on failure.
+ */
ssize_t drm_read(struct file *filp, char __user *buffer,
size_t count, loff_t *offset)
{
@@ -578,6 +646,22 @@ put_back_event:
}
EXPORT_SYMBOL(drm_read);
+/**
+ * drm_poll - poll method for DRM file
+ * @filp: file pointer
+ * @wait: poll waiter table
+ *
+ * This function must be used by drivers as their .read() #file_operations
+ * method iff they use DRM events for asynchronous signalling to userspace.
+ * Since events are used by the KMS API for vblank and page flip completion this
+ * means all modern display drivers must use it.
+ *
+ * See also drm_read().
+ *
+ * RETURNS:
+ *
+ * Mask of POLL flags indicating the current status of the file.
+ */
unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait)
{
struct drm_file *file_priv = filp->private_data;
@@ -591,3 +675,164 @@ unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait)
return mask;
}
EXPORT_SYMBOL(drm_poll);
+
+/**
+ * drm_event_reserve_init_locked - init a DRM event and reserve space for it
+ * @dev: DRM device
+ * @file_priv: DRM file private data
+ * @p: tracking structure for the pending event
+ * @e: actual event data to deliver to userspace
+ *
+ * This function prepares the passed in event for eventual delivery. If the event
+ * doesn't get delivered (because the IOCTL fails later on, before queuing up
+ * anything) then the even must be cancelled and freed using
+ * drm_event_cancel_free(). Successfully initialized events should be sent out
+ * using drm_send_event() or drm_send_event_locked() to signal completion of the
+ * asynchronous event to userspace.
+ *
+ * If callers embedded @p into a larger structure it must be allocated with
+ * kmalloc and @p must be the first member element.
+ *
+ * This is the locked version of drm_event_reserve_init() for callers which
+ * already hold dev->event_lock.
+ *
+ * RETURNS:
+ *
+ * 0 on success or a negative error code on failure.
+ */
+int drm_event_reserve_init_locked(struct drm_device *dev,
+ struct drm_file *file_priv,
+ struct drm_pending_event *p,
+ struct drm_event *e)
+{
+ if (file_priv->event_space < e->length)
+ return -ENOMEM;
+
+ file_priv->event_space -= e->length;
+
+ p->event = e;
+ list_add(&p->pending_link, &file_priv->pending_event_list);
+ p->file_priv = file_priv;
+
+ /* we *could* pass this in as arg, but everyone uses kfree: */
+ p->destroy = (void (*) (struct drm_pending_event *)) kfree;
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_event_reserve_init_locked);
+
+/**
+ * drm_event_reserve_init - init a DRM event and reserve space for it
+ * @dev: DRM device
+ * @file_priv: DRM file private data
+ * @p: tracking structure for the pending event
+ * @e: actual event data to deliver to userspace
+ *
+ * This function prepares the passed in event for eventual delivery. If the event
+ * doesn't get delivered (because the IOCTL fails later on, before queuing up
+ * anything) then the even must be cancelled and freed using
+ * drm_event_cancel_free(). Successfully initialized events should be sent out
+ * using drm_send_event() or drm_send_event_locked() to signal completion of the
+ * asynchronous event to userspace.
+ *
+ * If callers embedded @p into a larger structure it must be allocated with
+ * kmalloc and @p must be the first member element.
+ *
+ * Callers which already hold dev->event_lock should use
+ * drm_event_reserve_init() instead.
+ *
+ * RETURNS:
+ *
+ * 0 on success or a negative error code on failure.
+ */
+int drm_event_reserve_init(struct drm_device *dev,
+ struct drm_file *file_priv,
+ struct drm_pending_event *p,
+ struct drm_event *e)
+{
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&dev->event_lock, flags);
+ ret = drm_event_reserve_init_locked(dev, file_priv, p, e);
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+
+ return ret;
+}
+EXPORT_SYMBOL(drm_event_reserve_init);
+
+/**
+ * drm_event_cancel_free - free a DRM event and release it's space
+ * @dev: DRM device
+ * @p: tracking structure for the pending event
+ *
+ * This function frees the event @p initialized with drm_event_reserve_init()
+ * and releases any allocated space.
+ */
+void drm_event_cancel_free(struct drm_device *dev,
+ struct drm_pending_event *p)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&dev->event_lock, flags);
+ if (p->file_priv) {
+ p->file_priv->event_space += p->event->length;
+ list_del(&p->pending_link);
+ }
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+ p->destroy(p);
+}
+EXPORT_SYMBOL(drm_event_cancel_free);
+
+/**
+ * drm_send_event_locked - send DRM event to file descriptor
+ * @dev: DRM device
+ * @e: DRM event to deliver
+ *
+ * This function sends the event @e, initialized with drm_event_reserve_init(),
+ * to its associated userspace DRM file. Callers must already hold
+ * dev->event_lock, see drm_send_event() for the unlocked version.
+ *
+ * Note that the core will take care of unlinking and disarming events when the
+ * corresponding DRM file is closed. Drivers need not worry about whether the
+ * DRM file for this event still exists and can call this function upon
+ * completion of the asynchronous work unconditionally.
+ */
+void drm_send_event_locked(struct drm_device *dev, struct drm_pending_event *e)
+{
+ assert_spin_locked(&dev->event_lock);
+
+ if (!e->file_priv) {
+ e->destroy(e);
+ return;
+ }
+
+ list_del(&e->pending_link);
+ list_add_tail(&e->link,
+ &e->file_priv->event_list);
+ wake_up_interruptible(&e->file_priv->event_wait);
+}
+EXPORT_SYMBOL(drm_send_event_locked);
+
+/**
+ * drm_send_event - send DRM event to file descriptor
+ * @dev: DRM device
+ * @e: DRM event to deliver
+ *
+ * This function sends the event @e, initialized with drm_event_reserve_init(),
+ * to its associated userspace DRM file. This function acquires dev->event_lock,
+ * see drm_send_event_locked() for callers which already hold this lock.
+ *
+ * Note that the core will take care of unlinking and disarming events when the
+ * corresponding DRM file is closed. Drivers need not worry about whether the
+ * DRM file for this event still exists and can call this function upon
+ * completion of the asynchronous work unconditionally.
+ */
+void drm_send_event(struct drm_device *dev, struct drm_pending_event *e)
+{
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&dev->event_lock, irqflags);
+ drm_send_event_locked(dev, e);
+ spin_unlock_irqrestore(&dev->event_lock, irqflags);
+}
+EXPORT_SYMBOL(drm_send_event);
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index 2e8c77e71e1f..da0c5320789f 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -534,7 +534,7 @@ struct page **drm_gem_get_pages(struct drm_gem_object *obj)
fail:
while (i--)
- page_cache_release(pages[i]);
+ put_page(pages[i]);
drm_free_large(pages);
return ERR_CAST(p);
@@ -569,7 +569,7 @@ void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
mark_page_accessed(pages[i]);
/* Undo the reference we took when populating the table */
- page_cache_release(pages[i]);
+ put_page(pages[i]);
}
drm_free_large(pages);
diff --git a/drivers/gpu/drm/drm_gem_cma_helper.c b/drivers/gpu/drm/drm_gem_cma_helper.c
index e5df53b6e229..1f500a1b9969 100644
--- a/drivers/gpu/drm/drm_gem_cma_helper.c
+++ b/drivers/gpu/drm/drm_gem_cma_helper.c
@@ -109,8 +109,8 @@ struct drm_gem_cma_object *drm_gem_cma_create(struct drm_device *drm,
if (IS_ERR(cma_obj))
return cma_obj;
- cma_obj->vaddr = dma_alloc_writecombine(drm->dev, size,
- &cma_obj->paddr, GFP_KERNEL | __GFP_NOWARN);
+ cma_obj->vaddr = dma_alloc_wc(drm->dev, size, &cma_obj->paddr,
+ GFP_KERNEL | __GFP_NOWARN);
if (!cma_obj->vaddr) {
dev_err(drm->dev, "failed to allocate buffer with size %zu\n",
size);
@@ -192,8 +192,8 @@ void drm_gem_cma_free_object(struct drm_gem_object *gem_obj)
cma_obj = to_drm_gem_cma_obj(gem_obj);
if (cma_obj->vaddr) {
- dma_free_writecombine(gem_obj->dev->dev, cma_obj->base.size,
- cma_obj->vaddr, cma_obj->paddr);
+ dma_free_wc(gem_obj->dev->dev, cma_obj->base.size,
+ cma_obj->vaddr, cma_obj->paddr);
} else if (gem_obj->import_attach) {
drm_prime_gem_destroy(gem_obj, cma_obj->sgt);
}
@@ -324,9 +324,8 @@ static int drm_gem_cma_mmap_obj(struct drm_gem_cma_object *cma_obj,
vma->vm_flags &= ~VM_PFNMAP;
vma->vm_pgoff = 0;
- ret = dma_mmap_writecombine(cma_obj->base.dev->dev, vma,
- cma_obj->vaddr, cma_obj->paddr,
- vma->vm_end - vma->vm_start);
+ ret = dma_mmap_wc(cma_obj->base.dev->dev, vma, cma_obj->vaddr,
+ cma_obj->paddr, vma->vm_end - vma->vm_start);
if (ret)
drm_gem_vm_close(vma);
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index 1fe14579e8c9..881c5a6c180c 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -1041,15 +1041,12 @@ static void send_vblank_event(struct drm_device *dev,
struct drm_pending_vblank_event *e,
unsigned long seq, struct timeval *now)
{
- assert_spin_locked(&dev->event_lock);
-
e->event.sequence = seq;
e->event.tv_sec = now->tv_sec;
e->event.tv_usec = now->tv_usec;
- list_add_tail(&e->base.link,
- &e->base.file_priv->event_list);
- wake_up_interruptible(&e->base.file_priv->event_wait);
+ drm_send_event_locked(dev, &e->base);
+
trace_drm_vblank_event_delivered(e->base.pid, e->pipe,
e->event.sequence);
}
@@ -1668,9 +1665,6 @@ static int drm_queue_vblank_event(struct drm_device *dev, unsigned int pipe,
e->event.base.type = DRM_EVENT_VBLANK;
e->event.base.length = sizeof(e->event);
e->event.user_data = vblwait->request.signal;
- e->base.event = &e->event.base;
- e->base.file_priv = file_priv;
- e->base.destroy = (void (*) (struct drm_pending_event *)) kfree;
spin_lock_irqsave(&dev->event_lock, flags);
@@ -1686,12 +1680,12 @@ static int drm_queue_vblank_event(struct drm_device *dev, unsigned int pipe,
goto err_unlock;
}
- if (file_priv->event_space < sizeof(e->event)) {
- ret = -EBUSY;
+ ret = drm_event_reserve_init_locked(dev, file_priv, &e->base,
+ &e->event.base);
+
+ if (ret)
goto err_unlock;
- }
- file_priv->event_space -= sizeof(e->event);
seq = drm_vblank_count_and_time(dev, pipe, &now);
if ((vblwait->request.type & _DRM_VBLANK_NEXTONMISS) &&
diff --git a/drivers/gpu/drm/drm_kms_helper_common.c b/drivers/gpu/drm/drm_kms_helper_common.c
new file mode 100644
index 000000000000..3187c4bb01cb
--- /dev/null
+++ b/drivers/gpu/drm/drm_kms_helper_common.c
@@ -0,0 +1,60 @@
+/*
+ * Copyright © 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Rafael Antognolli <rafael.antognolli@intel.com>
+ *
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_dp_aux_dev.h>
+
+MODULE_AUTHOR("David Airlie, Jesse Barnes");
+MODULE_DESCRIPTION("DRM KMS helper");
+MODULE_LICENSE("GPL and additional rights");
+
+static int __init drm_kms_helper_init(void)
+{
+ int ret;
+
+ /* Call init functions from specific kms helpers here */
+ ret = drm_fb_helper_modinit();
+ if (ret < 0)
+ goto out;
+
+ ret = drm_dp_aux_dev_init();
+ if (ret < 0)
+ goto out;
+
+out:
+ return ret;
+}
+
+static void __exit drm_kms_helper_exit(void)
+{
+ /* Call exit functions from specific kms helpers here */
+ drm_dp_aux_dev_exit();
+}
+
+module_init(drm_kms_helper_init);
+module_exit(drm_kms_helper_exit);
diff --git a/drivers/gpu/drm/drm_mipi_dsi.c b/drivers/gpu/drm/drm_mipi_dsi.c
index 6e6a9c58d404..f5d80839a90c 100644
--- a/drivers/gpu/drm/drm_mipi_dsi.c
+++ b/drivers/gpu/drm/drm_mipi_dsi.c
@@ -47,7 +47,17 @@
static int mipi_dsi_device_match(struct device *dev, struct device_driver *drv)
{
- return of_driver_match_device(dev, drv);
+ struct mipi_dsi_device *dsi = to_mipi_dsi_device(dev);
+
+ /* attempt OF style match */
+ if (of_driver_match_device(dev, drv))
+ return 1;
+
+ /* compare DSI device and driver names */
+ if (!strcmp(dsi->name, drv->name))
+ return 1;
+
+ return 0;
}
static const struct dev_pm_ops mipi_dsi_device_pm_ops = {
@@ -129,14 +139,20 @@ static int mipi_dsi_device_add(struct mipi_dsi_device *dsi)
return device_add(&dsi->dev);
}
+#if IS_ENABLED(CONFIG_OF)
static struct mipi_dsi_device *
of_mipi_dsi_device_add(struct mipi_dsi_host *host, struct device_node *node)
{
- struct mipi_dsi_device *dsi;
struct device *dev = host->dev;
+ struct mipi_dsi_device_info info = { };
int ret;
u32 reg;
+ if (of_modalias_node(node, info.type, sizeof(info.type)) < 0) {
+ dev_err(dev, "modalias failure on %s\n", node->full_name);
+ return ERR_PTR(-EINVAL);
+ }
+
ret = of_property_read_u32(node, "reg", &reg);
if (ret) {
dev_err(dev, "device node %s has no valid reg property: %d\n",
@@ -144,32 +160,111 @@ of_mipi_dsi_device_add(struct mipi_dsi_host *host, struct device_node *node)
return ERR_PTR(-EINVAL);
}
- if (reg > 3) {
- dev_err(dev, "device node %s has invalid reg property: %u\n",
- node->full_name, reg);
+ info.channel = reg;
+ info.node = of_node_get(node);
+
+ return mipi_dsi_device_register_full(host, &info);
+}
+#else
+static struct mipi_dsi_device *
+of_mipi_dsi_device_add(struct mipi_dsi_host *host, struct device_node *node)
+{
+ return ERR_PTR(-ENODEV);
+}
+#endif
+
+/**
+ * mipi_dsi_device_register_full - create a MIPI DSI device
+ * @host: DSI host to which this device is connected
+ * @info: pointer to template containing DSI device information
+ *
+ * Create a MIPI DSI device by using the device information provided by
+ * mipi_dsi_device_info template
+ *
+ * Returns:
+ * A pointer to the newly created MIPI DSI device, or, a pointer encoded
+ * with an error
+ */
+struct mipi_dsi_device *
+mipi_dsi_device_register_full(struct mipi_dsi_host *host,
+ const struct mipi_dsi_device_info *info)
+{
+ struct mipi_dsi_device *dsi;
+ struct device *dev = host->dev;
+ int ret;
+
+ if (!info) {
+ dev_err(dev, "invalid mipi_dsi_device_info pointer\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (info->channel > 3) {
+ dev_err(dev, "invalid virtual channel: %u\n", info->channel);
return ERR_PTR(-EINVAL);
}
dsi = mipi_dsi_device_alloc(host);
if (IS_ERR(dsi)) {
- dev_err(dev, "failed to allocate DSI device %s: %ld\n",
- node->full_name, PTR_ERR(dsi));
+ dev_err(dev, "failed to allocate DSI device %ld\n",
+ PTR_ERR(dsi));
return dsi;
}
- dsi->dev.of_node = of_node_get(node);
- dsi->channel = reg;
+ dsi->dev.of_node = info->node;
+ dsi->channel = info->channel;
+ strlcpy(dsi->name, info->type, sizeof(dsi->name));
ret = mipi_dsi_device_add(dsi);
if (ret) {
- dev_err(dev, "failed to add DSI device %s: %d\n",
- node->full_name, ret);
+ dev_err(dev, "failed to add DSI device %d\n", ret);
kfree(dsi);
return ERR_PTR(ret);
}
return dsi;
}
+EXPORT_SYMBOL(mipi_dsi_device_register_full);
+
+/**
+ * mipi_dsi_device_unregister - unregister MIPI DSI device
+ * @dsi: DSI peripheral device
+ */
+void mipi_dsi_device_unregister(struct mipi_dsi_device *dsi)
+{
+ device_unregister(&dsi->dev);
+}
+EXPORT_SYMBOL(mipi_dsi_device_unregister);
+
+static DEFINE_MUTEX(host_lock);
+static LIST_HEAD(host_list);
+
+/**
+ * of_find_mipi_dsi_host_by_node() - find the MIPI DSI host matching a
+ * device tree node
+ * @node: device tree node
+ *
+ * Returns:
+ * A pointer to the MIPI DSI host corresponding to @node or NULL if no
+ * such device exists (or has not been registered yet).
+ */
+struct mipi_dsi_host *of_find_mipi_dsi_host_by_node(struct device_node *node)
+{
+ struct mipi_dsi_host *host;
+
+ mutex_lock(&host_lock);
+
+ list_for_each_entry(host, &host_list, list) {
+ if (host->dev->of_node == node) {
+ mutex_unlock(&host_lock);
+ return host;
+ }
+ }
+
+ mutex_unlock(&host_lock);
+
+ return NULL;
+}
+EXPORT_SYMBOL(of_find_mipi_dsi_host_by_node);
int mipi_dsi_host_register(struct mipi_dsi_host *host)
{
@@ -182,6 +277,10 @@ int mipi_dsi_host_register(struct mipi_dsi_host *host)
of_mipi_dsi_device_add(host, node);
}
+ mutex_lock(&host_lock);
+ list_add_tail(&host->list, &host_list);
+ mutex_unlock(&host_lock);
+
return 0;
}
EXPORT_SYMBOL(mipi_dsi_host_register);
@@ -190,7 +289,7 @@ static int mipi_dsi_remove_device_fn(struct device *dev, void *priv)
{
struct mipi_dsi_device *dsi = to_mipi_dsi_device(dev);
- device_unregister(&dsi->dev);
+ mipi_dsi_device_unregister(dsi);
return 0;
}
@@ -198,6 +297,10 @@ static int mipi_dsi_remove_device_fn(struct device *dev, void *priv)
void mipi_dsi_host_unregister(struct mipi_dsi_host *host)
{
device_for_each_child(host->dev, NULL, mipi_dsi_remove_device_fn);
+
+ mutex_lock(&host_lock);
+ list_del_init(&host->list);
+ mutex_unlock(&host_lock);
}
EXPORT_SYMBOL(mipi_dsi_host_unregister);
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index 20775c05235a..f7448a5e95a9 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -1371,8 +1371,7 @@ bool drm_mode_parse_command_line_for_connector(const char *mode_option,
}
done:
if (i >= 0) {
- printk(KERN_WARNING
- "parse error at position %i in video mode '%s'\n",
+ pr_warn("[drm] parse error at position %i in video mode '%s'\n",
i, name);
mode->specified = false;
return false;
diff --git a/drivers/gpu/drm/drm_of.c b/drivers/gpu/drm/drm_of.c
index 493c05c9ce4f..bc98bb94264d 100644
--- a/drivers/gpu/drm/drm_of.c
+++ b/drivers/gpu/drm/drm_of.c
@@ -149,3 +149,37 @@ int drm_of_component_probe(struct device *dev,
return component_master_add_with_match(dev, m_ops, match);
}
EXPORT_SYMBOL(drm_of_component_probe);
+
+/*
+ * drm_of_encoder_active_endpoint - return the active encoder endpoint
+ * @node: device tree node containing encoder input ports
+ * @encoder: drm_encoder
+ *
+ * Given an encoder device node and a drm_encoder with a connected crtc,
+ * parse the encoder endpoint connecting to the crtc port.
+ */
+int drm_of_encoder_active_endpoint(struct device_node *node,
+ struct drm_encoder *encoder,
+ struct of_endpoint *endpoint)
+{
+ struct device_node *ep;
+ struct drm_crtc *crtc = encoder->crtc;
+ struct device_node *port;
+ int ret;
+
+ if (!node || !crtc)
+ return -EINVAL;
+
+ for_each_endpoint_of_node(node, ep) {
+ port = of_graph_get_remote_port(ep);
+ of_node_put(port);
+ if (port == crtc->port) {
+ ret = of_graph_parse_endpoint(ep, endpoint);
+ of_node_put(ep);
+ return ret;
+ }
+ }
+
+ return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(drm_of_encoder_active_endpoint);
diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
index 27aa7183b20b..df6cdc76a16e 100644
--- a/drivers/gpu/drm/drm_prime.c
+++ b/drivers/gpu/drm/drm_prime.c
@@ -329,7 +329,7 @@ static const struct dma_buf_ops drm_gem_prime_dmabuf_ops = {
* drm_gem_prime_export - helper library implementation of the export callback
* @dev: drm_device to export from
* @obj: GEM object to export
- * @flags: flags like DRM_CLOEXEC
+ * @flags: flags like DRM_CLOEXEC and DRM_RDWR
*
* This is the implementation of the gem_prime_export functions for GEM drivers
* using the PRIME helpers.
@@ -628,7 +628,6 @@ int drm_prime_handle_to_fd_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_prime_handle *args = data;
- uint32_t flags;
if (!drm_core_check_feature(dev, DRIVER_PRIME))
return -EINVAL;
@@ -637,14 +636,11 @@ int drm_prime_handle_to_fd_ioctl(struct drm_device *dev, void *data,
return -ENOSYS;
/* check flags are valid */
- if (args->flags & ~DRM_CLOEXEC)
+ if (args->flags & ~(DRM_CLOEXEC | DRM_RDWR))
return -EINVAL;
- /* we only want to pass DRM_CLOEXEC which is == O_CLOEXEC */
- flags = args->flags & DRM_CLOEXEC;
-
return dev->driver->prime_handle_to_fd(dev, file_priv,
- args->handle, flags, &args->fd);
+ args->handle, args->flags, &args->fd);
}
int drm_prime_fd_to_handle_ioctl(struct drm_device *dev, void *data,
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_buffer.c b/drivers/gpu/drm/etnaviv/etnaviv_buffer.c
index 332c55ebba6d..d8d556457427 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_buffer.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_buffer.c
@@ -21,6 +21,7 @@
#include "common.xml.h"
#include "state.xml.h"
+#include "state_3d.xml.h"
#include "cmdstream.xml.h"
/*
@@ -85,10 +86,17 @@ static inline void CMD_STALL(struct etnaviv_cmdbuf *buffer,
OUT(buffer, VIV_FE_STALL_TOKEN_FROM(from) | VIV_FE_STALL_TOKEN_TO(to));
}
-static void etnaviv_cmd_select_pipe(struct etnaviv_cmdbuf *buffer, u8 pipe)
+static inline void CMD_SEM(struct etnaviv_cmdbuf *buffer, u32 from, u32 to)
{
- u32 flush;
- u32 stall;
+ CMD_LOAD_STATE(buffer, VIVS_GL_SEMAPHORE_TOKEN,
+ VIVS_GL_SEMAPHORE_TOKEN_FROM(from) |
+ VIVS_GL_SEMAPHORE_TOKEN_TO(to));
+}
+
+static void etnaviv_cmd_select_pipe(struct etnaviv_gpu *gpu,
+ struct etnaviv_cmdbuf *buffer, u8 pipe)
+{
+ u32 flush = 0;
/*
* This assumes that if we're switching to 2D, we're switching
@@ -96,17 +104,13 @@ static void etnaviv_cmd_select_pipe(struct etnaviv_cmdbuf *buffer, u8 pipe)
* the 2D core, we need to flush the 3D depth and color caches,
* otherwise we need to flush the 2D pixel engine cache.
*/
- if (pipe == ETNA_PIPE_2D)
- flush = VIVS_GL_FLUSH_CACHE_DEPTH | VIVS_GL_FLUSH_CACHE_COLOR;
- else
+ if (gpu->exec_state == ETNA_PIPE_2D)
flush = VIVS_GL_FLUSH_CACHE_PE2D;
-
- stall = VIVS_GL_SEMAPHORE_TOKEN_FROM(SYNC_RECIPIENT_FE) |
- VIVS_GL_SEMAPHORE_TOKEN_TO(SYNC_RECIPIENT_PE);
+ else if (gpu->exec_state == ETNA_PIPE_3D)
+ flush = VIVS_GL_FLUSH_CACHE_DEPTH | VIVS_GL_FLUSH_CACHE_COLOR;
CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_CACHE, flush);
- CMD_LOAD_STATE(buffer, VIVS_GL_SEMAPHORE_TOKEN, stall);
-
+ CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
CMD_LOAD_STATE(buffer, VIVS_GL_PIPE_SELECT,
@@ -131,6 +135,36 @@ static void etnaviv_buffer_dump(struct etnaviv_gpu *gpu,
ptr, len * 4, 0);
}
+/*
+ * Safely replace the WAIT of a waitlink with a new command and argument.
+ * The GPU may be executing this WAIT while we're modifying it, so we have
+ * to write it in a specific order to avoid the GPU branching to somewhere
+ * else. 'wl_offset' is the offset to the first byte of the WAIT command.
+ */
+static void etnaviv_buffer_replace_wait(struct etnaviv_cmdbuf *buffer,
+ unsigned int wl_offset, u32 cmd, u32 arg)
+{
+ u32 *lw = buffer->vaddr + wl_offset;
+
+ lw[1] = arg;
+ mb();
+ lw[0] = cmd;
+ mb();
+}
+
+/*
+ * Ensure that there is space in the command buffer to contiguously write
+ * 'cmd_dwords' 64-bit words into the buffer, wrapping if necessary.
+ */
+static u32 etnaviv_buffer_reserve(struct etnaviv_gpu *gpu,
+ struct etnaviv_cmdbuf *buffer, unsigned int cmd_dwords)
+{
+ if (buffer->user_size + cmd_dwords * sizeof(u64) > buffer->size)
+ buffer->user_size = 0;
+
+ return gpu_va(gpu, buffer) + buffer->user_size;
+}
+
u16 etnaviv_buffer_init(struct etnaviv_gpu *gpu)
{
struct etnaviv_cmdbuf *buffer = gpu->buffer;
@@ -147,81 +181,79 @@ u16 etnaviv_buffer_init(struct etnaviv_gpu *gpu)
void etnaviv_buffer_end(struct etnaviv_gpu *gpu)
{
struct etnaviv_cmdbuf *buffer = gpu->buffer;
+ unsigned int waitlink_offset = buffer->user_size - 16;
+ u32 link_target, flush = 0;
- /* Replace the last WAIT with an END */
- buffer->user_size -= 16;
-
- CMD_END(buffer);
- mb();
+ if (gpu->exec_state == ETNA_PIPE_2D)
+ flush = VIVS_GL_FLUSH_CACHE_PE2D;
+ else if (gpu->exec_state == ETNA_PIPE_3D)
+ flush = VIVS_GL_FLUSH_CACHE_DEPTH |
+ VIVS_GL_FLUSH_CACHE_COLOR |
+ VIVS_GL_FLUSH_CACHE_TEXTURE |
+ VIVS_GL_FLUSH_CACHE_TEXTUREVS |
+ VIVS_GL_FLUSH_CACHE_SHADER_L2;
+
+ if (flush) {
+ unsigned int dwords = 7;
+
+ link_target = etnaviv_buffer_reserve(gpu, buffer, dwords);
+
+ CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
+ CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
+ CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_CACHE, flush);
+ if (gpu->exec_state == ETNA_PIPE_3D)
+ CMD_LOAD_STATE(buffer, VIVS_TS_FLUSH_CACHE,
+ VIVS_TS_FLUSH_CACHE_FLUSH);
+ CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
+ CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
+ CMD_END(buffer);
+
+ etnaviv_buffer_replace_wait(buffer, waitlink_offset,
+ VIV_FE_LINK_HEADER_OP_LINK |
+ VIV_FE_LINK_HEADER_PREFETCH(dwords),
+ link_target);
+ } else {
+ /* Replace the last link-wait with an "END" command */
+ etnaviv_buffer_replace_wait(buffer, waitlink_offset,
+ VIV_FE_END_HEADER_OP_END, 0);
+ }
}
+/* Append a command buffer to the ring buffer. */
void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, unsigned int event,
struct etnaviv_cmdbuf *cmdbuf)
{
struct etnaviv_cmdbuf *buffer = gpu->buffer;
- u32 *lw = buffer->vaddr + buffer->user_size - 16;
- u32 back, link_target, link_size, reserve_size, extra_size = 0;
+ unsigned int waitlink_offset = buffer->user_size - 16;
+ u32 return_target, return_dwords;
+ u32 link_target, link_dwords;
if (drm_debug & DRM_UT_DRIVER)
etnaviv_buffer_dump(gpu, buffer, 0, 0x50);
+ link_target = gpu_va(gpu, cmdbuf);
+ link_dwords = cmdbuf->size / 8;
+
/*
- * If we need to flush the MMU prior to submitting this buffer, we
- * will need to append a mmu flush load state, followed by a new
+ * If we need maintanence prior to submitting this buffer, we will
+ * need to append a mmu flush load state, followed by a new
* link to this buffer - a total of four additional words.
*/
if (gpu->mmu->need_flush || gpu->switch_context) {
+ u32 target, extra_dwords;
+
/* link command */
- extra_size += 2;
+ extra_dwords = 1;
+
/* flush command */
if (gpu->mmu->need_flush)
- extra_size += 2;
+ extra_dwords += 1;
+
/* pipe switch commands */
if (gpu->switch_context)
- extra_size += 8;
- }
+ extra_dwords += 4;
- reserve_size = (6 + extra_size) * 4;
-
- /*
- * if we are going to completely overflow the buffer, we need to wrap.
- */
- if (buffer->user_size + reserve_size > buffer->size)
- buffer->user_size = 0;
-
- /* save offset back into main buffer */
- back = buffer->user_size + reserve_size - 6 * 4;
- link_target = gpu_va(gpu, buffer) + buffer->user_size;
- link_size = 6;
-
- /* Skip over any extra instructions */
- link_target += extra_size * sizeof(u32);
-
- if (drm_debug & DRM_UT_DRIVER)
- pr_info("stream link to 0x%08x @ 0x%08x %p\n",
- link_target, gpu_va(gpu, cmdbuf), cmdbuf->vaddr);
-
- /* jump back from cmd to main buffer */
- CMD_LINK(cmdbuf, link_size, link_target);
-
- link_target = gpu_va(gpu, cmdbuf);
- link_size = cmdbuf->size / 8;
-
-
-
- if (drm_debug & DRM_UT_DRIVER) {
- print_hex_dump(KERN_INFO, "cmd ", DUMP_PREFIX_OFFSET, 16, 4,
- cmdbuf->vaddr, cmdbuf->size, 0);
-
- pr_info("link op: %p\n", lw);
- pr_info("link addr: %p\n", lw + 1);
- pr_info("addr: 0x%08x\n", link_target);
- pr_info("back: 0x%08x\n", gpu_va(gpu, buffer) + back);
- pr_info("event: %d\n", event);
- }
-
- if (gpu->mmu->need_flush || gpu->switch_context) {
- u32 new_target = gpu_va(gpu, buffer) + buffer->user_size;
+ target = etnaviv_buffer_reserve(gpu, buffer, extra_dwords);
if (gpu->mmu->need_flush) {
/* Add the MMU flush */
@@ -236,32 +268,59 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, unsigned int event,
}
if (gpu->switch_context) {
- etnaviv_cmd_select_pipe(buffer, cmdbuf->exec_state);
+ etnaviv_cmd_select_pipe(gpu, buffer, cmdbuf->exec_state);
+ gpu->exec_state = cmdbuf->exec_state;
gpu->switch_context = false;
}
- /* And the link to the first buffer */
- CMD_LINK(buffer, link_size, link_target);
+ /* And the link to the submitted buffer */
+ CMD_LINK(buffer, link_dwords, link_target);
/* Update the link target to point to above instructions */
- link_target = new_target;
- link_size = extra_size;
+ link_target = target;
+ link_dwords = extra_dwords;
}
- /* trigger event */
+ /*
+ * Append a LINK to the submitted command buffer to return to
+ * the ring buffer. return_target is the ring target address.
+ * We need three dwords: event, wait, link.
+ */
+ return_dwords = 3;
+ return_target = etnaviv_buffer_reserve(gpu, buffer, return_dwords);
+ CMD_LINK(cmdbuf, return_dwords, return_target);
+
+ /*
+ * Append event, wait and link pointing back to the wait
+ * command to the ring buffer.
+ */
CMD_LOAD_STATE(buffer, VIVS_GL_EVENT, VIVS_GL_EVENT_EVENT_ID(event) |
VIVS_GL_EVENT_FROM_PE);
-
- /* append WAIT/LINK to main buffer */
CMD_WAIT(buffer);
- CMD_LINK(buffer, 2, gpu_va(gpu, buffer) + (buffer->user_size - 4));
+ CMD_LINK(buffer, 2, return_target + 8);
- /* Change WAIT into a LINK command; write the address first. */
- *(lw + 1) = link_target;
- mb();
- *(lw) = VIV_FE_LINK_HEADER_OP_LINK |
- VIV_FE_LINK_HEADER_PREFETCH(link_size);
- mb();
+ if (drm_debug & DRM_UT_DRIVER)
+ pr_info("stream link to 0x%08x @ 0x%08x %p\n",
+ return_target, gpu_va(gpu, cmdbuf), cmdbuf->vaddr);
+
+ if (drm_debug & DRM_UT_DRIVER) {
+ print_hex_dump(KERN_INFO, "cmd ", DUMP_PREFIX_OFFSET, 16, 4,
+ cmdbuf->vaddr, cmdbuf->size, 0);
+
+ pr_info("link op: %p\n", buffer->vaddr + waitlink_offset);
+ pr_info("addr: 0x%08x\n", link_target);
+ pr_info("back: 0x%08x\n", return_target);
+ pr_info("event: %d\n", event);
+ }
+
+ /*
+ * Kick off the submitted command by replacing the previous
+ * WAIT with a link to the address in the ring buffer.
+ */
+ etnaviv_buffer_replace_wait(buffer, waitlink_offset,
+ VIV_FE_LINK_HEADER_OP_LINK |
+ VIV_FE_LINK_HEADER_PREFETCH(link_dwords),
+ link_target);
if (drm_debug & DRM_UT_DRIVER)
etnaviv_buffer_dump(gpu, buffer, 0, 0x50);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.h b/drivers/gpu/drm/etnaviv/etnaviv_drv.h
index 1cd6046e76b1..115c5bc6d7c8 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_drv.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.h
@@ -75,9 +75,6 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
int etnaviv_gem_mmap(struct file *filp, struct vm_area_struct *vma);
int etnaviv_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
int etnaviv_gem_mmap_offset(struct drm_gem_object *obj, u64 *offset);
-int etnaviv_gem_get_iova(struct etnaviv_gpu *gpu,
- struct drm_gem_object *obj, u32 *iova);
-void etnaviv_gem_put_iova(struct etnaviv_gpu *gpu, struct drm_gem_object *obj);
struct sg_table *etnaviv_gem_prime_get_sg_table(struct drm_gem_object *obj);
void *etnaviv_gem_prime_vmap(struct drm_gem_object *obj);
void etnaviv_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
index 4b519e4309b2..281c6eca20a8 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
@@ -260,8 +260,32 @@ etnaviv_gem_get_vram_mapping(struct etnaviv_gem_object *obj,
return NULL;
}
-int etnaviv_gem_get_iova(struct etnaviv_gpu *gpu,
- struct drm_gem_object *obj, u32 *iova)
+void etnaviv_gem_mapping_reference(struct etnaviv_vram_mapping *mapping)
+{
+ struct etnaviv_gem_object *etnaviv_obj = mapping->object;
+
+ drm_gem_object_reference(&etnaviv_obj->base);
+
+ mutex_lock(&etnaviv_obj->lock);
+ WARN_ON(mapping->use == 0);
+ mapping->use += 1;
+ mutex_unlock(&etnaviv_obj->lock);
+}
+
+void etnaviv_gem_mapping_unreference(struct etnaviv_vram_mapping *mapping)
+{
+ struct etnaviv_gem_object *etnaviv_obj = mapping->object;
+
+ mutex_lock(&etnaviv_obj->lock);
+ WARN_ON(mapping->use == 0);
+ mapping->use -= 1;
+ mutex_unlock(&etnaviv_obj->lock);
+
+ drm_gem_object_unreference_unlocked(&etnaviv_obj->base);
+}
+
+struct etnaviv_vram_mapping *etnaviv_gem_mapping_get(
+ struct drm_gem_object *obj, struct etnaviv_gpu *gpu)
{
struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
struct etnaviv_vram_mapping *mapping;
@@ -329,28 +353,12 @@ int etnaviv_gem_get_iova(struct etnaviv_gpu *gpu,
out:
mutex_unlock(&etnaviv_obj->lock);
- if (!ret) {
- /* Take a reference on the object */
- drm_gem_object_reference(obj);
- *iova = mapping->iova;
- }
-
- return ret;
-}
-
-void etnaviv_gem_put_iova(struct etnaviv_gpu *gpu, struct drm_gem_object *obj)
-{
- struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
- struct etnaviv_vram_mapping *mapping;
-
- mutex_lock(&etnaviv_obj->lock);
- mapping = etnaviv_gem_get_vram_mapping(etnaviv_obj, gpu->mmu);
-
- WARN_ON(mapping->use == 0);
- mapping->use -= 1;
- mutex_unlock(&etnaviv_obj->lock);
+ if (ret)
+ return ERR_PTR(ret);
- drm_gem_object_unreference_unlocked(obj);
+ /* Take a reference on the object */
+ drm_gem_object_reference(obj);
+ return mapping;
}
void *etnaviv_gem_vmap(struct drm_gem_object *obj)
@@ -753,9 +761,9 @@ static struct page **etnaviv_gem_userptr_do_get_pages(
down_read(&mm->mmap_sem);
while (pinned < npages) {
- ret = get_user_pages(task, mm, ptr, npages - pinned,
- !etnaviv_obj->userptr.ro, 0,
- pvec + pinned, NULL);
+ ret = get_user_pages_remote(task, mm, ptr, npages - pinned,
+ !etnaviv_obj->userptr.ro, 0,
+ pvec + pinned, NULL);
if (ret < 0)
break;
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.h b/drivers/gpu/drm/etnaviv/etnaviv_gem.h
index ab5df8147a5f..02665d8c10ee 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.h
@@ -88,6 +88,12 @@ static inline bool is_active(struct etnaviv_gem_object *etnaviv_obj)
#define MAX_CMDS 4
+struct etnaviv_gem_submit_bo {
+ u32 flags;
+ struct etnaviv_gem_object *obj;
+ struct etnaviv_vram_mapping *mapping;
+};
+
/* Created per submit-ioctl, to track bo's and cmdstream bufs, etc,
* associated with the cmdstream submission for synchronization (and
* make it easier to unwind when things go wrong, etc). This only
@@ -99,11 +105,7 @@ struct etnaviv_gem_submit {
struct ww_acquire_ctx ticket;
u32 fence;
unsigned int nr_bos;
- struct {
- u32 flags;
- struct etnaviv_gem_object *obj;
- u32 iova;
- } bos[0];
+ struct etnaviv_gem_submit_bo bos[0];
};
int etnaviv_gem_wait_bo(struct etnaviv_gpu *gpu, struct drm_gem_object *obj,
@@ -115,4 +117,9 @@ int etnaviv_gem_obj_add(struct drm_device *dev, struct drm_gem_object *obj);
struct page **etnaviv_gem_get_pages(struct etnaviv_gem_object *obj);
void etnaviv_gem_put_pages(struct etnaviv_gem_object *obj);
+struct etnaviv_vram_mapping *etnaviv_gem_mapping_get(
+ struct drm_gem_object *obj, struct etnaviv_gpu *gpu);
+void etnaviv_gem_mapping_reference(struct etnaviv_vram_mapping *mapping);
+void etnaviv_gem_mapping_unreference(struct etnaviv_vram_mapping *mapping);
+
#endif /* __ETNAVIV_GEM_H__ */
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
index 1aba01a999df..236ada93df53 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
@@ -187,12 +187,10 @@ static void submit_unpin_objects(struct etnaviv_gem_submit *submit)
int i;
for (i = 0; i < submit->nr_bos; i++) {
- struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj;
-
if (submit->bos[i].flags & BO_PINNED)
- etnaviv_gem_put_iova(submit->gpu, &etnaviv_obj->base);
+ etnaviv_gem_mapping_unreference(submit->bos[i].mapping);
- submit->bos[i].iova = 0;
+ submit->bos[i].mapping = NULL;
submit->bos[i].flags &= ~BO_PINNED;
}
}
@@ -203,22 +201,24 @@ static int submit_pin_objects(struct etnaviv_gem_submit *submit)
for (i = 0; i < submit->nr_bos; i++) {
struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj;
- u32 iova;
+ struct etnaviv_vram_mapping *mapping;
- ret = etnaviv_gem_get_iova(submit->gpu, &etnaviv_obj->base,
- &iova);
- if (ret)
+ mapping = etnaviv_gem_mapping_get(&etnaviv_obj->base,
+ submit->gpu);
+ if (IS_ERR(mapping)) {
+ ret = PTR_ERR(mapping);
break;
+ }
submit->bos[i].flags |= BO_PINNED;
- submit->bos[i].iova = iova;
+ submit->bos[i].mapping = mapping;
}
return ret;
}
static int submit_bo(struct etnaviv_gem_submit *submit, u32 idx,
- struct etnaviv_gem_object **obj, u32 *iova)
+ struct etnaviv_gem_submit_bo **bo)
{
if (idx >= submit->nr_bos) {
DRM_ERROR("invalid buffer index: %u (out of %u)\n",
@@ -226,10 +226,7 @@ static int submit_bo(struct etnaviv_gem_submit *submit, u32 idx,
return -EINVAL;
}
- if (obj)
- *obj = submit->bos[idx].obj;
- if (iova)
- *iova = submit->bos[idx].iova;
+ *bo = &submit->bos[idx];
return 0;
}
@@ -245,8 +242,8 @@ static int submit_reloc(struct etnaviv_gem_submit *submit, void *stream,
for (i = 0; i < nr_relocs; i++) {
const struct drm_etnaviv_gem_submit_reloc *r = relocs + i;
- struct etnaviv_gem_object *bobj;
- u32 iova, off;
+ struct etnaviv_gem_submit_bo *bo;
+ u32 off;
if (unlikely(r->flags)) {
DRM_ERROR("invalid reloc flags\n");
@@ -268,17 +265,16 @@ static int submit_reloc(struct etnaviv_gem_submit *submit, void *stream,
return -EINVAL;
}
- ret = submit_bo(submit, r->reloc_idx, &bobj, &iova);
+ ret = submit_bo(submit, r->reloc_idx, &bo);
if (ret)
return ret;
- if (r->reloc_offset >=
- bobj->base.size - sizeof(*ptr)) {
+ if (r->reloc_offset >= bo->obj->base.size - sizeof(*ptr)) {
DRM_ERROR("relocation %u outside object", i);
return -EINVAL;
}
- ptr[off] = iova + r->reloc_offset;
+ ptr[off] = bo->mapping->iova + r->reloc_offset;
last_offset = off;
}
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
index a33162cf4f4c..306dde18a94a 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
@@ -572,6 +572,24 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
goto fail;
}
+ /*
+ * Set the GPU linear window to be at the end of the DMA window, where
+ * the CMA area is likely to reside. This ensures that we are able to
+ * map the command buffers while having the linear window overlap as
+ * much RAM as possible, so we can optimize mappings for other buffers.
+ *
+ * For 3D cores only do this if MC2.0 is present, as with MC1.0 it leads
+ * to different views of the memory on the individual engines.
+ */
+ if (!(gpu->identity.features & chipFeatures_PIPE_3D) ||
+ (gpu->identity.minor_features0 & chipMinorFeatures0_MC20)) {
+ u32 dma_mask = (u32)dma_get_required_mask(gpu->dev);
+ if (dma_mask < PHYS_OFFSET + SZ_2G)
+ gpu->memory_base = PHYS_OFFSET;
+ else
+ gpu->memory_base = dma_mask - SZ_2G + 1;
+ }
+
ret = etnaviv_hw_reset(gpu);
if (ret)
goto fail;
@@ -628,6 +646,7 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
/* Now program the hardware */
mutex_lock(&gpu->lock);
etnaviv_gpu_hw_init(gpu);
+ gpu->exec_state = -1;
mutex_unlock(&gpu->lock);
pm_runtime_mark_last_busy(gpu->dev);
@@ -871,17 +890,13 @@ static void recover_worker(struct work_struct *work)
gpu->event[i].fence = NULL;
gpu->event[i].used = false;
complete(&gpu->event_free);
- /*
- * Decrement the PM count for each stuck event. This is safe
- * even in atomic context as we use ASYNC RPM here.
- */
- pm_runtime_put_autosuspend(gpu->dev);
}
spin_unlock_irqrestore(&gpu->event_spinlock, flags);
gpu->completed_fence = gpu->active_fence;
etnaviv_gpu_hw_init(gpu);
gpu->switch_context = true;
+ gpu->exec_state = -1;
mutex_unlock(&gpu->lock);
pm_runtime_mark_last_busy(gpu->dev);
@@ -1106,15 +1121,15 @@ struct etnaviv_cmdbuf *etnaviv_gpu_cmdbuf_new(struct etnaviv_gpu *gpu, u32 size,
size_t nr_bos)
{
struct etnaviv_cmdbuf *cmdbuf;
- size_t sz = size_vstruct(nr_bos, sizeof(cmdbuf->bo[0]),
+ size_t sz = size_vstruct(nr_bos, sizeof(cmdbuf->bo_map[0]),
sizeof(*cmdbuf));
cmdbuf = kzalloc(sz, GFP_KERNEL);
if (!cmdbuf)
return NULL;
- cmdbuf->vaddr = dma_alloc_writecombine(gpu->dev, size, &cmdbuf->paddr,
- GFP_KERNEL);
+ cmdbuf->vaddr = dma_alloc_wc(gpu->dev, size, &cmdbuf->paddr,
+ GFP_KERNEL);
if (!cmdbuf->vaddr) {
kfree(cmdbuf);
return NULL;
@@ -1128,8 +1143,8 @@ struct etnaviv_cmdbuf *etnaviv_gpu_cmdbuf_new(struct etnaviv_gpu *gpu, u32 size,
void etnaviv_gpu_cmdbuf_free(struct etnaviv_cmdbuf *cmdbuf)
{
- dma_free_writecombine(cmdbuf->gpu->dev, cmdbuf->size,
- cmdbuf->vaddr, cmdbuf->paddr);
+ dma_free_wc(cmdbuf->gpu->dev, cmdbuf->size, cmdbuf->vaddr,
+ cmdbuf->paddr);
kfree(cmdbuf);
}
@@ -1150,14 +1165,23 @@ static void retire_worker(struct work_struct *work)
fence_put(cmdbuf->fence);
for (i = 0; i < cmdbuf->nr_bos; i++) {
- struct etnaviv_gem_object *etnaviv_obj = cmdbuf->bo[i];
+ struct etnaviv_vram_mapping *mapping = cmdbuf->bo_map[i];
+ struct etnaviv_gem_object *etnaviv_obj = mapping->object;
atomic_dec(&etnaviv_obj->gpu_active);
/* drop the refcount taken in etnaviv_gpu_submit */
- etnaviv_gem_put_iova(gpu, &etnaviv_obj->base);
+ etnaviv_gem_mapping_unreference(mapping);
}
etnaviv_gpu_cmdbuf_free(cmdbuf);
+ /*
+ * We need to balance the runtime PM count caused by
+ * each submission. Upon submission, we increment
+ * the runtime PM counter, and allocate one event.
+ * So here, we put the runtime PM count for each
+ * completed event.
+ */
+ pm_runtime_put_autosuspend(gpu->dev);
}
gpu->retired_fence = fence;
@@ -1304,11 +1328,10 @@ int etnaviv_gpu_submit(struct etnaviv_gpu *gpu,
for (i = 0; i < submit->nr_bos; i++) {
struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj;
- u32 iova;
- /* Each cmdbuf takes a refcount on the iova */
- etnaviv_gem_get_iova(gpu, &etnaviv_obj->base, &iova);
- cmdbuf->bo[i] = etnaviv_obj;
+ /* Each cmdbuf takes a refcount on the mapping */
+ etnaviv_gem_mapping_reference(submit->bos[i].mapping);
+ cmdbuf->bo_map[i] = submit->bos[i].mapping;
atomic_inc(&etnaviv_obj->gpu_active);
if (submit->bos[i].flags & ETNA_SUBMIT_BO_WRITE)
@@ -1378,15 +1401,6 @@ static irqreturn_t irq_handler(int irq, void *data)
gpu->completed_fence = fence->seqno;
event_free(gpu, event);
-
- /*
- * We need to balance the runtime PM count caused by
- * each submission. Upon submission, we increment
- * the runtime PM counter, and allocate one event.
- * So here, we put the runtime PM count for each
- * completed event.
- */
- pm_runtime_put_autosuspend(gpu->dev);
}
/* Retire the buffer objects in a work */
@@ -1481,6 +1495,7 @@ static int etnaviv_gpu_hw_resume(struct etnaviv_gpu *gpu)
etnaviv_gpu_hw_init(gpu);
gpu->switch_context = true;
+ gpu->exec_state = -1;
mutex_unlock(&gpu->lock);
@@ -1578,14 +1593,6 @@ static int etnaviv_gpu_platform_probe(struct platform_device *pdev)
gpu->dev = &pdev->dev;
mutex_init(&gpu->lock);
- /*
- * Set the GPU base address to the start of physical memory. This
- * ensures that if we have up to 2GB, the v1 MMU can address the
- * highest memory. This is important as command buffers may be
- * allocated outside of this limit.
- */
- gpu->memory_base = PHYS_OFFSET;
-
/* Map registers: */
gpu->mmio = etnaviv_ioremap(pdev, NULL, dev_name(gpu->dev));
if (IS_ERR(gpu->mmio))
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
index f233ac4c7c1c..f5321e2f25ff 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
@@ -23,6 +23,7 @@
#include "etnaviv_drv.h"
struct etnaviv_gem_submit;
+struct etnaviv_vram_mapping;
struct etnaviv_chip_identity {
/* Chip model. */
@@ -103,6 +104,7 @@ struct etnaviv_gpu {
/* 'ring'-buffer: */
struct etnaviv_cmdbuf *buffer;
+ int exec_state;
/* bus base address of memory */
u32 memory_base;
@@ -166,7 +168,7 @@ struct etnaviv_cmdbuf {
struct list_head node;
/* BOs attached to this command buffer */
unsigned int nr_bos;
- struct etnaviv_gem_object *bo[0];
+ struct etnaviv_vram_mapping *bo_map[0];
};
static inline void gpu_write(struct etnaviv_gpu *gpu, u32 reg, u32 data)
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
index 6743bc648dc8..29a723fabc17 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
@@ -193,7 +193,7 @@ int etnaviv_iommu_map_gem(struct etnaviv_iommu *mmu,
/*
* Unmap the blocks which need to be reaped from the MMU.
- * Clear the mmu pointer to prevent the get_iova finding
+ * Clear the mmu pointer to prevent the mapping_get finding
* this mapping.
*/
list_for_each_entry_safe(m, n, &list, scan_node) {
diff --git a/drivers/gpu/drm/etnaviv/state_3d.xml.h b/drivers/gpu/drm/etnaviv/state_3d.xml.h
new file mode 100644
index 000000000000..d7146fd13943
--- /dev/null
+++ b/drivers/gpu/drm/etnaviv/state_3d.xml.h
@@ -0,0 +1,9 @@
+#ifndef STATE_3D_XML
+#define STATE_3D_XML
+
+/* This is a cut-down version of the state_3d.xml.h file */
+
+#define VIVS_TS_FLUSH_CACHE 0x00001650
+#define VIVS_TS_FLUSH_CACHE_FLUSH 0x00000001
+
+#endif /* STATE_3D_XML */
diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig
index f17d39279596..baddf33fb475 100644
--- a/drivers/gpu/drm/exynos/Kconfig
+++ b/drivers/gpu/drm/exynos/Kconfig
@@ -94,7 +94,7 @@ comment "Sub-drivers"
config DRM_EXYNOS_G2D
bool "G2D"
- depends on !VIDEO_SAMSUNG_S5P_G2D
+ depends on VIDEO_SAMSUNG_S5P_G2D=n
select FRAME_VECTOR
help
Choose this option if you want to use Exynos G2D for DRM.
diff --git a/drivers/gpu/drm/exynos/Makefile b/drivers/gpu/drm/exynos/Makefile
index 6496532aaa91..23d2f958739b 100644
--- a/drivers/gpu/drm/exynos/Makefile
+++ b/drivers/gpu/drm/exynos/Makefile
@@ -2,11 +2,10 @@
# Makefile for the drm device driver. This driver provides support for the
# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
-ccflags-y := -Iinclude/drm -Idrivers/gpu/drm/exynos
-exynosdrm-y := exynos_drm_drv.o exynos_drm_crtc.o exynos_drm_fbdev.o \
- exynos_drm_fb.o exynos_drm_gem.o exynos_drm_core.o \
- exynos_drm_plane.o
+exynosdrm-y := exynos_drm_drv.o exynos_drm_crtc.o exynos_drm_fb.o \
+ exynos_drm_gem.o exynos_drm_core.o exynos_drm_plane.o
+exynosdrm-$(CONFIG_DRM_FBDEV_EMULATION) += exynos_drm_fbdev.o
exynosdrm-$(CONFIG_DRM_EXYNOS_IOMMU) += exynos_drm_iommu.o
exynosdrm-$(CONFIG_DRM_EXYNOS_FIMD) += exynos_drm_fimd.o
exynosdrm-$(CONFIG_DRM_EXYNOS5433_DECON) += exynos5433_drm_decon.o
diff --git a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
index 162ab93e99cb..5245bc5e82e9 100644
--- a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
+++ b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
@@ -429,7 +429,7 @@ static void decon_disable(struct exynos_drm_crtc *crtc)
set_bit(BIT_SUSPENDED, &ctx->flags);
}
-void decon_te_irq_handler(struct exynos_drm_crtc *crtc)
+static void decon_te_irq_handler(struct exynos_drm_crtc *crtc)
{
struct decon_context *ctx = crtc->ctx;
diff --git a/drivers/gpu/drm/exynos/exynos7_drm_decon.c b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
index 52bda3b42fe0..93361073af9a 100644
--- a/drivers/gpu/drm/exynos/exynos7_drm_decon.c
+++ b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
@@ -60,7 +60,6 @@ struct decon_context {
wait_queue_head_t wait_vsync_queue;
atomic_t wait_vsync_event;
- struct exynos_drm_panel_info panel;
struct drm_encoder *encoder;
};
diff --git a/drivers/gpu/drm/exynos/exynos_dp_core.c b/drivers/gpu/drm/exynos/exynos_dp_core.c
index 673164b331c8..cff8dc788820 100644
--- a/drivers/gpu/drm/exynos/exynos_dp_core.c
+++ b/drivers/gpu/drm/exynos/exynos_dp_core.c
@@ -977,9 +977,7 @@ static int exynos_dp_get_modes(struct drm_connector *connector)
return 0;
}
- drm_display_mode_from_videomode(&dp->priv.vm, mode);
- mode->width_mm = dp->priv.width_mm;
- mode->height_mm = dp->priv.height_mm;
+ drm_display_mode_from_videomode(&dp->vm, mode);
connector->display_info.width_mm = mode->width_mm;
connector->display_info.height_mm = mode->height_mm;
@@ -1155,13 +1153,6 @@ static int exynos_dp_create_connector(struct drm_encoder *encoder)
return 0;
}
-static bool exynos_dp_mode_fixup(struct drm_encoder *encoder,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- return true;
-}
-
static void exynos_dp_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
@@ -1177,7 +1168,6 @@ static void exynos_dp_disable(struct drm_encoder *encoder)
}
static const struct drm_encoder_helper_funcs exynos_dp_encoder_helper_funcs = {
- .mode_fixup = exynos_dp_mode_fixup,
.mode_set = exynos_dp_mode_set,
.enable = exynos_dp_enable,
.disable = exynos_dp_disable,
@@ -1249,8 +1239,7 @@ static int exynos_dp_dt_parse_panel(struct exynos_dp_device *dp)
{
int ret;
- ret = of_get_videomode(dp->dev->of_node, &dp->priv.vm,
- OF_USE_NATIVE_MODE);
+ ret = of_get_videomode(dp->dev->of_node, &dp->vm, OF_USE_NATIVE_MODE);
if (ret) {
DRM_ERROR("failed: of_get_videomode() : %d\n", ret);
return ret;
diff --git a/drivers/gpu/drm/exynos/exynos_dp_core.h b/drivers/gpu/drm/exynos/exynos_dp_core.h
index 66eec4b2d5c6..b5c2d8f47f9c 100644
--- a/drivers/gpu/drm/exynos/exynos_dp_core.h
+++ b/drivers/gpu/drm/exynos/exynos_dp_core.h
@@ -16,6 +16,7 @@
#include <drm/drm_crtc.h>
#include <drm/drm_dp_helper.h>
#include <drm/exynos_drm.h>
+#include <video/videomode.h>
#include "exynos_drm_drv.h"
@@ -164,8 +165,7 @@ struct exynos_dp_device {
struct phy *phy;
int dpms_mode;
int hpd_gpio;
-
- struct exynos_drm_panel_info priv;
+ struct videomode vm;
};
/* exynos_dp_reg.c */
diff --git a/drivers/gpu/drm/exynos/exynos_drm_core.c b/drivers/gpu/drm/exynos/exynos_drm_core.c
index 7f55ba6771c6..011211e4167d 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_core.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_core.c
@@ -101,7 +101,7 @@ int exynos_drm_subdrv_open(struct drm_device *dev, struct drm_file *file)
return 0;
err:
- list_for_each_entry_reverse(subdrv, &subdrv->list, list) {
+ list_for_each_entry_continue_reverse(subdrv, &exynos_drm_subdrv_list, list) {
if (subdrv->close)
subdrv->close(dev, subdrv->dev, file);
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dpi.c b/drivers/gpu/drm/exynos/exynos_drm_dpi.c
index 05350ae0785b..75e570f45259 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dpi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dpi.c
@@ -128,13 +128,6 @@ static int exynos_dpi_create_connector(struct drm_encoder *encoder)
return 0;
}
-static bool exynos_dpi_mode_fixup(struct drm_encoder *encoder,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- return true;
-}
-
static void exynos_dpi_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
@@ -162,7 +155,6 @@ static void exynos_dpi_disable(struct drm_encoder *encoder)
}
static const struct drm_encoder_helper_funcs exynos_dpi_encoder_helper_funcs = {
- .mode_fixup = exynos_dpi_mode_fixup,
.mode_set = exynos_dpi_mode_set,
.enable = exynos_dpi_enable,
.disable = exynos_dpi_disable,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index 68f0f36f6e7e..5344940c8a07 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -130,6 +130,8 @@ static void exynos_drm_atomic_work(struct work_struct *work)
exynos_atomic_commit_complete(commit);
}
+static struct device *exynos_drm_get_dma_device(void);
+
static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
{
struct exynos_drm_private *private;
@@ -147,6 +149,16 @@ static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
dev_set_drvdata(dev->dev, dev);
dev->dev_private = (void *)private;
+ /* the first real CRTC device is used for all dma mapping operations */
+ private->dma_dev = exynos_drm_get_dma_device();
+ if (!private->dma_dev) {
+ DRM_ERROR("no device found for DMA mapping operations.\n");
+ ret = -ENODEV;
+ goto err_free_private;
+ }
+ DRM_INFO("Exynos DRM: using %s device for DMA mapping operations\n",
+ dev_name(private->dma_dev));
+
/*
* create mapping to manage iommu table and set a pointer to iommu
* mapping structure to iommu_mapping of private data.
@@ -340,20 +352,6 @@ static void exynos_drm_preclose(struct drm_device *dev,
static void exynos_drm_postclose(struct drm_device *dev, struct drm_file *file)
{
- struct drm_pending_event *e, *et;
- unsigned long flags;
-
- if (!file->driver_priv)
- return;
-
- spin_lock_irqsave(&dev->event_lock, flags);
- /* Release all events handled by page flip handler but not freed. */
- list_for_each_entry_safe(e, et, &file->event_list, link) {
- list_del(&e->link);
- e->destroy(e);
- }
- spin_unlock_irqrestore(&dev->event_lock, flags);
-
kfree(file->driver_priv);
file->driver_priv = NULL;
}
@@ -372,6 +370,8 @@ static const struct vm_operations_struct exynos_drm_gem_vm_ops = {
static const struct drm_ioctl_desc exynos_ioctls[] = {
DRM_IOCTL_DEF_DRV(EXYNOS_GEM_CREATE, exynos_drm_gem_create_ioctl,
DRM_AUTH | DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(EXYNOS_GEM_MAP, exynos_drm_gem_map_ioctl,
+ DRM_AUTH | DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(EXYNOS_GEM_GET, exynos_drm_gem_get_ioctl,
DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(EXYNOS_VIDI_CONNECTION, vidi_connection_ioctl,
@@ -495,69 +495,65 @@ static const struct dev_pm_ops exynos_drm_pm_ops = {
/* forward declaration */
static struct platform_driver exynos_drm_platform_driver;
+struct exynos_drm_driver_info {
+ struct platform_driver *driver;
+ unsigned int flags;
+};
+
+#define DRM_COMPONENT_DRIVER BIT(0) /* supports component framework */
+#define DRM_VIRTUAL_DEVICE BIT(1) /* create virtual platform device */
+#define DRM_DMA_DEVICE BIT(2) /* can be used for dma allocations */
+
+#define DRV_PTR(drv, cond) (IS_ENABLED(cond) ? &drv : NULL)
+
/*
* Connector drivers should not be placed before associated crtc drivers,
* because connector requires pipe number of its crtc during initialization.
*/
-static struct platform_driver *const exynos_drm_kms_drivers[] = {
-#ifdef CONFIG_DRM_EXYNOS_FIMD
- &fimd_driver,
-#endif
-#ifdef CONFIG_DRM_EXYNOS5433_DECON
- &exynos5433_decon_driver,
-#endif
-#ifdef CONFIG_DRM_EXYNOS7_DECON
- &decon_driver,
-#endif
-#ifdef CONFIG_DRM_EXYNOS_MIC
- &mic_driver,
-#endif
-#ifdef CONFIG_DRM_EXYNOS_DP
- &dp_driver,
-#endif
-#ifdef CONFIG_DRM_EXYNOS_DSI
- &dsi_driver,
-#endif
-#ifdef CONFIG_DRM_EXYNOS_MIXER
- &mixer_driver,
-#endif
-#ifdef CONFIG_DRM_EXYNOS_HDMI
- &hdmi_driver,
-#endif
-#ifdef CONFIG_DRM_EXYNOS_VIDI
- &vidi_driver,
-#endif
-};
-
-static struct platform_driver *const exynos_drm_non_kms_drivers[] = {
-#ifdef CONFIG_DRM_EXYNOS_G2D
- &g2d_driver,
-#endif
-#ifdef CONFIG_DRM_EXYNOS_FIMC
- &fimc_driver,
-#endif
-#ifdef CONFIG_DRM_EXYNOS_ROTATOR
- &rotator_driver,
-#endif
-#ifdef CONFIG_DRM_EXYNOS_GSC
- &gsc_driver,
-#endif
-#ifdef CONFIG_DRM_EXYNOS_IPP
- &ipp_driver,
-#endif
- &exynos_drm_platform_driver,
-};
-
-static struct platform_driver *const exynos_drm_drv_with_simple_dev[] = {
-#ifdef CONFIG_DRM_EXYNOS_VIDI
- &vidi_driver,
-#endif
-#ifdef CONFIG_DRM_EXYNOS_IPP
- &ipp_driver,
-#endif
- &exynos_drm_platform_driver,
+static struct exynos_drm_driver_info exynos_drm_drivers[] = {
+ {
+ DRV_PTR(fimd_driver, CONFIG_DRM_EXYNOS_FIMD),
+ DRM_COMPONENT_DRIVER | DRM_DMA_DEVICE
+ }, {
+ DRV_PTR(exynos5433_decon_driver, CONFIG_DRM_EXYNOS5433_DECON),
+ DRM_COMPONENT_DRIVER | DRM_DMA_DEVICE
+ }, {
+ DRV_PTR(decon_driver, CONFIG_DRM_EXYNOS7_DECON),
+ DRM_COMPONENT_DRIVER | DRM_DMA_DEVICE
+ }, {
+ DRV_PTR(mixer_driver, CONFIG_DRM_EXYNOS_MIXER),
+ DRM_COMPONENT_DRIVER | DRM_DMA_DEVICE
+ }, {
+ DRV_PTR(mic_driver, CONFIG_DRM_EXYNOS_MIC),
+ DRM_COMPONENT_DRIVER
+ }, {
+ DRV_PTR(dp_driver, CONFIG_DRM_EXYNOS_DP),
+ DRM_COMPONENT_DRIVER
+ }, {
+ DRV_PTR(dsi_driver, CONFIG_DRM_EXYNOS_DSI),
+ DRM_COMPONENT_DRIVER
+ }, {
+ DRV_PTR(hdmi_driver, CONFIG_DRM_EXYNOS_HDMI),
+ DRM_COMPONENT_DRIVER
+ }, {
+ DRV_PTR(vidi_driver, CONFIG_DRM_EXYNOS_VIDI),
+ DRM_COMPONENT_DRIVER | DRM_VIRTUAL_DEVICE
+ }, {
+ DRV_PTR(g2d_driver, CONFIG_DRM_EXYNOS_G2D),
+ }, {
+ DRV_PTR(fimc_driver, CONFIG_DRM_EXYNOS_FIMC),
+ }, {
+ DRV_PTR(rotator_driver, CONFIG_DRM_EXYNOS_ROTATOR),
+ }, {
+ DRV_PTR(gsc_driver, CONFIG_DRM_EXYNOS_GSC),
+ }, {
+ DRV_PTR(ipp_driver, CONFIG_DRM_EXYNOS_IPP),
+ DRM_VIRTUAL_DEVICE
+ }, {
+ &exynos_drm_platform_driver,
+ DRM_VIRTUAL_DEVICE
+ }
};
-#define PDEV_COUNT ARRAY_SIZE(exynos_drm_drv_with_simple_dev)
static int compare_dev(struct device *dev, void *data)
{
@@ -569,11 +565,15 @@ static struct component_match *exynos_drm_match_add(struct device *dev)
struct component_match *match = NULL;
int i;
- for (i = 0; i < ARRAY_SIZE(exynos_drm_kms_drivers); ++i) {
- struct device_driver *drv = &exynos_drm_kms_drivers[i]->driver;
+ for (i = 0; i < ARRAY_SIZE(exynos_drm_drivers); ++i) {
+ struct exynos_drm_driver_info *info = &exynos_drm_drivers[i];
struct device *p = NULL, *d;
- while ((d = bus_find_device(&platform_bus_type, p, drv,
+ if (!info->driver || !(info->flags & DRM_COMPONENT_DRIVER))
+ continue;
+
+ while ((d = bus_find_device(&platform_bus_type, p,
+ &info->driver->driver,
(void *)platform_bus_type.match))) {
put_device(p);
component_match_add(dev, &match, compare_dev, d);
@@ -630,91 +630,102 @@ static struct platform_driver exynos_drm_platform_driver = {
},
};
-static struct platform_device *exynos_drm_pdevs[PDEV_COUNT];
+static struct device *exynos_drm_get_dma_device(void)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(exynos_drm_drivers); ++i) {
+ struct exynos_drm_driver_info *info = &exynos_drm_drivers[i];
+ struct device *dev;
+
+ if (!info->driver || !(info->flags & DRM_DMA_DEVICE))
+ continue;
+
+ while ((dev = bus_find_device(&platform_bus_type, NULL,
+ &info->driver->driver,
+ (void *)platform_bus_type.match))) {
+ put_device(dev);
+ return dev;
+ }
+ }
+ return NULL;
+}
static void exynos_drm_unregister_devices(void)
{
- int i = PDEV_COUNT;
+ int i;
+
+ for (i = ARRAY_SIZE(exynos_drm_drivers) - 1; i >= 0; --i) {
+ struct exynos_drm_driver_info *info = &exynos_drm_drivers[i];
+ struct device *dev;
+
+ if (!info->driver || !(info->flags & DRM_VIRTUAL_DEVICE))
+ continue;
- while (--i >= 0) {
- platform_device_unregister(exynos_drm_pdevs[i]);
- exynos_drm_pdevs[i] = NULL;
+ while ((dev = bus_find_device(&platform_bus_type, NULL,
+ &info->driver->driver,
+ (void *)platform_bus_type.match))) {
+ put_device(dev);
+ platform_device_unregister(to_platform_device(dev));
+ }
}
}
static int exynos_drm_register_devices(void)
{
+ struct platform_device *pdev;
int i;
- for (i = 0; i < PDEV_COUNT; ++i) {
- struct platform_driver *d = exynos_drm_drv_with_simple_dev[i];
- struct platform_device *pdev =
- platform_device_register_simple(d->driver.name, -1,
- NULL, 0);
+ for (i = 0; i < ARRAY_SIZE(exynos_drm_drivers); ++i) {
+ struct exynos_drm_driver_info *info = &exynos_drm_drivers[i];
- if (!IS_ERR(pdev)) {
- exynos_drm_pdevs[i] = pdev;
+ if (!info->driver || !(info->flags & DRM_VIRTUAL_DEVICE))
continue;
- }
- while (--i >= 0) {
- platform_device_unregister(exynos_drm_pdevs[i]);
- exynos_drm_pdevs[i] = NULL;
- }
- return PTR_ERR(pdev);
+ pdev = platform_device_register_simple(
+ info->driver->driver.name, -1, NULL, 0);
+ if (IS_ERR(pdev))
+ goto fail;
}
return 0;
+fail:
+ exynos_drm_unregister_devices();
+ return PTR_ERR(pdev);
}
-static void exynos_drm_unregister_drivers(struct platform_driver * const *drv,
- int count)
+static void exynos_drm_unregister_drivers(void)
{
- while (--count >= 0)
- platform_driver_unregister(drv[count]);
-}
+ int i;
-static int exynos_drm_register_drivers(struct platform_driver * const *drv,
- int count)
-{
- int i, ret;
+ for (i = ARRAY_SIZE(exynos_drm_drivers) - 1; i >= 0; --i) {
+ struct exynos_drm_driver_info *info = &exynos_drm_drivers[i];
- for (i = 0; i < count; ++i) {
- ret = platform_driver_register(drv[i]);
- if (!ret)
+ if (!info->driver)
continue;
- while (--i >= 0)
- platform_driver_unregister(drv[i]);
-
- return ret;
+ platform_driver_unregister(info->driver);
}
-
- return 0;
}
-static inline int exynos_drm_register_kms_drivers(void)
+static int exynos_drm_register_drivers(void)
{
- return exynos_drm_register_drivers(exynos_drm_kms_drivers,
- ARRAY_SIZE(exynos_drm_kms_drivers));
-}
+ int i, ret;
-static inline int exynos_drm_register_non_kms_drivers(void)
-{
- return exynos_drm_register_drivers(exynos_drm_non_kms_drivers,
- ARRAY_SIZE(exynos_drm_non_kms_drivers));
-}
+ for (i = 0; i < ARRAY_SIZE(exynos_drm_drivers); ++i) {
+ struct exynos_drm_driver_info *info = &exynos_drm_drivers[i];
-static inline void exynos_drm_unregister_kms_drivers(void)
-{
- exynos_drm_unregister_drivers(exynos_drm_kms_drivers,
- ARRAY_SIZE(exynos_drm_kms_drivers));
-}
+ if (!info->driver)
+ continue;
-static inline void exynos_drm_unregister_non_kms_drivers(void)
-{
- exynos_drm_unregister_drivers(exynos_drm_non_kms_drivers,
- ARRAY_SIZE(exynos_drm_non_kms_drivers));
+ ret = platform_driver_register(info->driver);
+ if (ret)
+ goto fail;
+ }
+ return 0;
+fail:
+ exynos_drm_unregister_drivers();
+ return ret;
}
static int exynos_drm_init(void)
@@ -725,19 +736,12 @@ static int exynos_drm_init(void)
if (ret)
return ret;
- ret = exynos_drm_register_kms_drivers();
+ ret = exynos_drm_register_drivers();
if (ret)
goto err_unregister_pdevs;
- ret = exynos_drm_register_non_kms_drivers();
- if (ret)
- goto err_unregister_kms_drivers;
-
return 0;
-err_unregister_kms_drivers:
- exynos_drm_unregister_kms_drivers();
-
err_unregister_pdevs:
exynos_drm_unregister_devices();
@@ -746,8 +750,7 @@ err_unregister_pdevs:
static void exynos_drm_exit(void)
{
- exynos_drm_unregister_non_kms_drivers();
- exynos_drm_unregister_kms_drivers();
+ exynos_drm_unregister_drivers();
exynos_drm_unregister_devices();
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h
index 17b5ded72ff1..502f750bad2a 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h
@@ -219,8 +219,10 @@ struct exynos_drm_private {
struct drm_crtc *crtc[MAX_CRTC];
struct drm_property *plane_zpos_property;
+ struct device *dma_dev;
unsigned long da_start;
unsigned long da_space_size;
+ void *mapping;
unsigned int pipe;
@@ -230,6 +232,13 @@ struct exynos_drm_private {
wait_queue_head_t wait;
};
+static inline struct device *to_dma_dev(struct drm_device *dev)
+{
+ struct exynos_drm_private *priv = dev->dev_private;
+
+ return priv->dma_dev;
+}
+
/*
* Exynos drm sub driver structure.
*
@@ -297,7 +306,6 @@ extern struct platform_driver dp_driver;
extern struct platform_driver dsi_driver;
extern struct platform_driver mixer_driver;
extern struct platform_driver hdmi_driver;
-extern struct platform_driver exynos_drm_common_hdmi_driver;
extern struct platform_driver vidi_driver;
extern struct platform_driver g2d_driver;
extern struct platform_driver fimc_driver;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
index 26e81d191f56..63c84a106c0b 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
@@ -10,6 +10,8 @@
* published by the Free Software Foundation.
*/
+#include <asm/unaligned.h>
+
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_mipi_dsi.h>
@@ -209,12 +211,6 @@
#define OLD_SCLK_MIPI_CLK_NAME "pll_clk"
-#define REG_ADDR(dsi, reg_idx) ((dsi)->reg_base + \
- dsi->driver_data->reg_ofs[(reg_idx)])
-#define DSI_WRITE(dsi, reg_idx, val) writel((val), \
- REG_ADDR((dsi), (reg_idx)))
-#define DSI_READ(dsi, reg_idx) readl(REG_ADDR((dsi), (reg_idx)))
-
static char *clk_names[5] = { "bus_clk", "sclk_mipi",
"phyclk_mipidphy0_bitclkdiv8", "phyclk_mipidphy0_rxclkesc0",
"sclk_rgb_vclk_to_dsim0" };
@@ -228,12 +224,8 @@ struct exynos_dsi_transfer {
struct list_head list;
struct completion completed;
int result;
- u8 data_id;
- u8 data[2];
+ struct mipi_dsi_packet packet;
u16 flags;
-
- const u8 *tx_payload;
- u16 tx_len;
u16 tx_done;
u8 *rx_payload;
@@ -247,7 +239,7 @@ struct exynos_dsi_transfer {
#define DSIM_STATE_VIDOUT_AVAILABLE BIT(3)
struct exynos_dsi_driver_data {
- unsigned int *reg_ofs;
+ const unsigned int *reg_ofs;
unsigned int plltmr_reg;
unsigned int has_freqband:1;
unsigned int has_clklane_stop:1;
@@ -255,7 +247,7 @@ struct exynos_dsi_driver_data {
unsigned int max_freq;
unsigned int wait_for_reset;
unsigned int num_bits_resol;
- unsigned int *reg_values;
+ const unsigned int *reg_values;
};
struct exynos_dsi {
@@ -324,7 +316,20 @@ enum reg_idx {
DSIM_PHYTIMING2_REG,
NUM_REGS
};
-static unsigned int exynos_reg_ofs[] = {
+
+static inline void exynos_dsi_write(struct exynos_dsi *dsi, enum reg_idx idx,
+ u32 val)
+{
+
+ writel(val, dsi->reg_base + dsi->driver_data->reg_ofs[idx]);
+}
+
+static inline u32 exynos_dsi_read(struct exynos_dsi *dsi, enum reg_idx idx)
+{
+ return readl(dsi->reg_base + dsi->driver_data->reg_ofs[idx]);
+}
+
+static const unsigned int exynos_reg_ofs[] = {
[DSIM_STATUS_REG] = 0x00,
[DSIM_SWRST_REG] = 0x04,
[DSIM_CLKCTRL_REG] = 0x08,
@@ -348,7 +353,7 @@ static unsigned int exynos_reg_ofs[] = {
[DSIM_PHYTIMING2_REG] = 0x6c,
};
-static unsigned int exynos5433_reg_ofs[] = {
+static const unsigned int exynos5433_reg_ofs[] = {
[DSIM_STATUS_REG] = 0x04,
[DSIM_SWRST_REG] = 0x0C,
[DSIM_CLKCTRL_REG] = 0x10,
@@ -390,7 +395,7 @@ enum reg_value_idx {
PHYTIMING_HS_TRAIL
};
-static unsigned int reg_values[] = {
+static const unsigned int reg_values[] = {
[RESET_TYPE] = DSIM_SWRST,
[PLL_TIMER] = 500,
[STOP_STATE_CNT] = 0xf,
@@ -408,7 +413,25 @@ static unsigned int reg_values[] = {
[PHYTIMING_HS_TRAIL] = DSIM_PHYTIMING2_HS_TRAIL(0x0b),
};
-static unsigned int exynos5433_reg_values[] = {
+static const unsigned int exynos5422_reg_values[] = {
+ [RESET_TYPE] = DSIM_SWRST,
+ [PLL_TIMER] = 500,
+ [STOP_STATE_CNT] = 0xf,
+ [PHYCTRL_ULPS_EXIT] = DSIM_PHYCTRL_ULPS_EXIT(0xaf),
+ [PHYCTRL_VREG_LP] = 0,
+ [PHYCTRL_SLEW_UP] = 0,
+ [PHYTIMING_LPX] = DSIM_PHYTIMING_LPX(0x08),
+ [PHYTIMING_HS_EXIT] = DSIM_PHYTIMING_HS_EXIT(0x0d),
+ [PHYTIMING_CLK_PREPARE] = DSIM_PHYTIMING1_CLK_PREPARE(0x09),
+ [PHYTIMING_CLK_ZERO] = DSIM_PHYTIMING1_CLK_ZERO(0x30),
+ [PHYTIMING_CLK_POST] = DSIM_PHYTIMING1_CLK_POST(0x0e),
+ [PHYTIMING_CLK_TRAIL] = DSIM_PHYTIMING1_CLK_TRAIL(0x0a),
+ [PHYTIMING_HS_PREPARE] = DSIM_PHYTIMING2_HS_PREPARE(0x0c),
+ [PHYTIMING_HS_ZERO] = DSIM_PHYTIMING2_HS_ZERO(0x11),
+ [PHYTIMING_HS_TRAIL] = DSIM_PHYTIMING2_HS_TRAIL(0x0d),
+};
+
+static const unsigned int exynos5433_reg_values[] = {
[RESET_TYPE] = DSIM_FUNCRST,
[PLL_TIMER] = 22200,
[STOP_STATE_CNT] = 0xa,
@@ -426,7 +449,7 @@ static unsigned int exynos5433_reg_values[] = {
[PHYTIMING_HS_TRAIL] = DSIM_PHYTIMING2_HS_TRAIL(0x0c),
};
-static struct exynos_dsi_driver_data exynos3_dsi_driver_data = {
+static const struct exynos_dsi_driver_data exynos3_dsi_driver_data = {
.reg_ofs = exynos_reg_ofs,
.plltmr_reg = 0x50,
.has_freqband = 1,
@@ -438,7 +461,7 @@ static struct exynos_dsi_driver_data exynos3_dsi_driver_data = {
.reg_values = reg_values,
};
-static struct exynos_dsi_driver_data exynos4_dsi_driver_data = {
+static const struct exynos_dsi_driver_data exynos4_dsi_driver_data = {
.reg_ofs = exynos_reg_ofs,
.plltmr_reg = 0x50,
.has_freqband = 1,
@@ -450,7 +473,7 @@ static struct exynos_dsi_driver_data exynos4_dsi_driver_data = {
.reg_values = reg_values,
};
-static struct exynos_dsi_driver_data exynos4415_dsi_driver_data = {
+static const struct exynos_dsi_driver_data exynos4415_dsi_driver_data = {
.reg_ofs = exynos_reg_ofs,
.plltmr_reg = 0x58,
.has_clklane_stop = 1,
@@ -461,7 +484,7 @@ static struct exynos_dsi_driver_data exynos4415_dsi_driver_data = {
.reg_values = reg_values,
};
-static struct exynos_dsi_driver_data exynos5_dsi_driver_data = {
+static const struct exynos_dsi_driver_data exynos5_dsi_driver_data = {
.reg_ofs = exynos_reg_ofs,
.plltmr_reg = 0x58,
.num_clks = 2,
@@ -471,7 +494,7 @@ static struct exynos_dsi_driver_data exynos5_dsi_driver_data = {
.reg_values = reg_values,
};
-static struct exynos_dsi_driver_data exynos5433_dsi_driver_data = {
+static const struct exynos_dsi_driver_data exynos5433_dsi_driver_data = {
.reg_ofs = exynos5433_reg_ofs,
.plltmr_reg = 0xa0,
.has_clklane_stop = 1,
@@ -482,7 +505,18 @@ static struct exynos_dsi_driver_data exynos5433_dsi_driver_data = {
.reg_values = exynos5433_reg_values,
};
-static struct of_device_id exynos_dsi_of_match[] = {
+static const struct exynos_dsi_driver_data exynos5422_dsi_driver_data = {
+ .reg_ofs = exynos5433_reg_ofs,
+ .plltmr_reg = 0xa0,
+ .has_clklane_stop = 1,
+ .num_clks = 2,
+ .max_freq = 1500,
+ .wait_for_reset = 1,
+ .num_bits_resol = 12,
+ .reg_values = exynos5422_reg_values,
+};
+
+static const struct of_device_id exynos_dsi_of_match[] = {
{ .compatible = "samsung,exynos3250-mipi-dsi",
.data = &exynos3_dsi_driver_data },
{ .compatible = "samsung,exynos4210-mipi-dsi",
@@ -491,6 +525,8 @@ static struct of_device_id exynos_dsi_of_match[] = {
.data = &exynos4415_dsi_driver_data },
{ .compatible = "samsung,exynos5410-mipi-dsi",
.data = &exynos5_dsi_driver_data },
+ { .compatible = "samsung,exynos5422-mipi-dsi",
+ .data = &exynos5422_dsi_driver_data },
{ .compatible = "samsung,exynos5433-mipi-dsi",
.data = &exynos5433_dsi_driver_data },
{ }
@@ -515,10 +551,10 @@ static void exynos_dsi_wait_for_reset(struct exynos_dsi *dsi)
static void exynos_dsi_reset(struct exynos_dsi *dsi)
{
- struct exynos_dsi_driver_data *driver_data = dsi->driver_data;
+ u32 reset_val = dsi->driver_data->reg_values[RESET_TYPE];
reinit_completion(&dsi->completed);
- DSI_WRITE(dsi, DSIM_SWRST_REG, driver_data->reg_values[RESET_TYPE]);
+ exynos_dsi_write(dsi, DSIM_SWRST_REG, reset_val);
}
#ifndef MHZ
@@ -621,7 +657,7 @@ static unsigned long exynos_dsi_set_pll(struct exynos_dsi *dsi,
reg |= DSIM_FREQ_BAND(band);
}
- DSI_WRITE(dsi, DSIM_PLLCTRL_REG, reg);
+ exynos_dsi_write(dsi, DSIM_PLLCTRL_REG, reg);
timeout = 1000;
do {
@@ -629,7 +665,7 @@ static unsigned long exynos_dsi_set_pll(struct exynos_dsi *dsi,
dev_err(dsi->dev, "PLL failed to stabilize\n");
return 0;
}
- reg = DSI_READ(dsi, DSIM_STATUS_REG);
+ reg = exynos_dsi_read(dsi, DSIM_STATUS_REG);
} while ((reg & DSIM_PLL_STABLE) == 0);
return fout;
@@ -659,7 +695,7 @@ static int exynos_dsi_enable_clock(struct exynos_dsi *dsi)
dev_dbg(dsi->dev, "hs_clk = %lu, byte_clk = %lu, esc_clk = %lu\n",
hs_clk, byte_clk, esc_clk);
- reg = DSI_READ(dsi, DSIM_CLKCTRL_REG);
+ reg = exynos_dsi_read(dsi, DSIM_CLKCTRL_REG);
reg &= ~(DSIM_ESC_PRESCALER_MASK | DSIM_LANE_ESC_CLK_EN_CLK
| DSIM_LANE_ESC_CLK_EN_DATA_MASK | DSIM_PLL_BYPASS
| DSIM_BYTE_CLK_SRC_MASK);
@@ -669,7 +705,7 @@ static int exynos_dsi_enable_clock(struct exynos_dsi *dsi)
| DSIM_LANE_ESC_CLK_EN_DATA(BIT(dsi->lanes) - 1)
| DSIM_BYTE_CLK_SRC(0)
| DSIM_TX_REQUEST_HSCLK;
- DSI_WRITE(dsi, DSIM_CLKCTRL_REG, reg);
+ exynos_dsi_write(dsi, DSIM_CLKCTRL_REG, reg);
return 0;
}
@@ -677,7 +713,7 @@ static int exynos_dsi_enable_clock(struct exynos_dsi *dsi)
static void exynos_dsi_set_phy_ctrl(struct exynos_dsi *dsi)
{
struct exynos_dsi_driver_data *driver_data = dsi->driver_data;
- unsigned int *reg_values = driver_data->reg_values;
+ const unsigned int *reg_values = driver_data->reg_values;
u32 reg;
if (driver_data->has_freqband)
@@ -686,7 +722,7 @@ static void exynos_dsi_set_phy_ctrl(struct exynos_dsi *dsi)
/* B D-PHY: D-PHY Master & Slave Analog Block control */
reg = reg_values[PHYCTRL_ULPS_EXIT] | reg_values[PHYCTRL_VREG_LP] |
reg_values[PHYCTRL_SLEW_UP];
- DSI_WRITE(dsi, DSIM_PHYCTRL_REG, reg);
+ exynos_dsi_write(dsi, DSIM_PHYCTRL_REG, reg);
/*
* T LPX: Transmitted length of any Low-Power state period
@@ -694,7 +730,7 @@ static void exynos_dsi_set_phy_ctrl(struct exynos_dsi *dsi)
* burst
*/
reg = reg_values[PHYTIMING_LPX] | reg_values[PHYTIMING_HS_EXIT];
- DSI_WRITE(dsi, DSIM_PHYTIMING_REG, reg);
+ exynos_dsi_write(dsi, DSIM_PHYTIMING_REG, reg);
/*
* T CLK-PREPARE: Time that the transmitter drives the Clock Lane LP-00
@@ -714,7 +750,7 @@ static void exynos_dsi_set_phy_ctrl(struct exynos_dsi *dsi)
reg_values[PHYTIMING_CLK_POST] |
reg_values[PHYTIMING_CLK_TRAIL];
- DSI_WRITE(dsi, DSIM_PHYTIMING1_REG, reg);
+ exynos_dsi_write(dsi, DSIM_PHYTIMING1_REG, reg);
/*
* T HS-PREPARE: Time that the transmitter drives the Data Lane LP-00
@@ -727,29 +763,29 @@ static void exynos_dsi_set_phy_ctrl(struct exynos_dsi *dsi)
*/
reg = reg_values[PHYTIMING_HS_PREPARE] | reg_values[PHYTIMING_HS_ZERO] |
reg_values[PHYTIMING_HS_TRAIL];
- DSI_WRITE(dsi, DSIM_PHYTIMING2_REG, reg);
+ exynos_dsi_write(dsi, DSIM_PHYTIMING2_REG, reg);
}
static void exynos_dsi_disable_clock(struct exynos_dsi *dsi)
{
u32 reg;
- reg = DSI_READ(dsi, DSIM_CLKCTRL_REG);
+ reg = exynos_dsi_read(dsi, DSIM_CLKCTRL_REG);
reg &= ~(DSIM_LANE_ESC_CLK_EN_CLK | DSIM_LANE_ESC_CLK_EN_DATA_MASK
| DSIM_ESC_CLKEN | DSIM_BYTE_CLKEN);
- DSI_WRITE(dsi, DSIM_CLKCTRL_REG, reg);
+ exynos_dsi_write(dsi, DSIM_CLKCTRL_REG, reg);
- reg = DSI_READ(dsi, DSIM_PLLCTRL_REG);
+ reg = exynos_dsi_read(dsi, DSIM_PLLCTRL_REG);
reg &= ~DSIM_PLL_EN;
- DSI_WRITE(dsi, DSIM_PLLCTRL_REG, reg);
+ exynos_dsi_write(dsi, DSIM_PLLCTRL_REG, reg);
}
static void exynos_dsi_enable_lane(struct exynos_dsi *dsi, u32 lane)
{
- u32 reg = DSI_READ(dsi, DSIM_CONFIG_REG);
+ u32 reg = exynos_dsi_read(dsi, DSIM_CONFIG_REG);
reg |= (DSIM_NUM_OF_DATA_LANE(dsi->lanes - 1) | DSIM_LANE_EN_CLK |
DSIM_LANE_EN(lane));
- DSI_WRITE(dsi, DSIM_CONFIG_REG, reg);
+ exynos_dsi_write(dsi, DSIM_CONFIG_REG, reg);
}
static int exynos_dsi_init_link(struct exynos_dsi *dsi)
@@ -760,14 +796,14 @@ static int exynos_dsi_init_link(struct exynos_dsi *dsi)
u32 lanes_mask;
/* Initialize FIFO pointers */
- reg = DSI_READ(dsi, DSIM_FIFOCTRL_REG);
+ reg = exynos_dsi_read(dsi, DSIM_FIFOCTRL_REG);
reg &= ~0x1f;
- DSI_WRITE(dsi, DSIM_FIFOCTRL_REG, reg);
+ exynos_dsi_write(dsi, DSIM_FIFOCTRL_REG, reg);
usleep_range(9000, 11000);
reg |= 0x1f;
- DSI_WRITE(dsi, DSIM_FIFOCTRL_REG, reg);
+ exynos_dsi_write(dsi, DSIM_FIFOCTRL_REG, reg);
usleep_range(9000, 11000);
/* DSI configuration */
@@ -836,7 +872,7 @@ static int exynos_dsi_init_link(struct exynos_dsi *dsi)
dsi->mode_flags & MIPI_DSI_CLOCK_NON_CONTINUOUS) {
reg |= DSIM_CLKLANE_STOP;
}
- DSI_WRITE(dsi, DSIM_CONFIG_REG, reg);
+ exynos_dsi_write(dsi, DSIM_CONFIG_REG, reg);
lanes_mask = BIT(dsi->lanes) - 1;
exynos_dsi_enable_lane(dsi, lanes_mask);
@@ -849,19 +885,19 @@ static int exynos_dsi_init_link(struct exynos_dsi *dsi)
return -EFAULT;
}
- reg = DSI_READ(dsi, DSIM_STATUS_REG);
+ reg = exynos_dsi_read(dsi, DSIM_STATUS_REG);
if ((reg & DSIM_STOP_STATE_DAT(lanes_mask))
!= DSIM_STOP_STATE_DAT(lanes_mask))
continue;
} while (!(reg & (DSIM_STOP_STATE_CLK | DSIM_TX_READY_HS_CLK)));
- reg = DSI_READ(dsi, DSIM_ESCMODE_REG);
+ reg = exynos_dsi_read(dsi, DSIM_ESCMODE_REG);
reg &= ~DSIM_STOP_STATE_CNT_MASK;
reg |= DSIM_STOP_STATE_CNT(driver_data->reg_values[STOP_STATE_CNT]);
- DSI_WRITE(dsi, DSIM_ESCMODE_REG, reg);
+ exynos_dsi_write(dsi, DSIM_ESCMODE_REG, reg);
reg = DSIM_BTA_TIMEOUT(0xff) | DSIM_LPDR_TIMEOUT(0xffff);
- DSI_WRITE(dsi, DSIM_TIMEOUT_REG, reg);
+ exynos_dsi_write(dsi, DSIM_TIMEOUT_REG, reg);
return 0;
}
@@ -876,20 +912,20 @@ static void exynos_dsi_set_display_mode(struct exynos_dsi *dsi)
reg = DSIM_CMD_ALLOW(0xf)
| DSIM_STABLE_VFP(vm->vfront_porch)
| DSIM_MAIN_VBP(vm->vback_porch);
- DSI_WRITE(dsi, DSIM_MVPORCH_REG, reg);
+ exynos_dsi_write(dsi, DSIM_MVPORCH_REG, reg);
reg = DSIM_MAIN_HFP(vm->hfront_porch)
| DSIM_MAIN_HBP(vm->hback_porch);
- DSI_WRITE(dsi, DSIM_MHPORCH_REG, reg);
+ exynos_dsi_write(dsi, DSIM_MHPORCH_REG, reg);
reg = DSIM_MAIN_VSA(vm->vsync_len)
| DSIM_MAIN_HSA(vm->hsync_len);
- DSI_WRITE(dsi, DSIM_MSYNC_REG, reg);
+ exynos_dsi_write(dsi, DSIM_MSYNC_REG, reg);
}
reg = DSIM_MAIN_HRESOL(vm->hactive, num_bits_resol) |
DSIM_MAIN_VRESOL(vm->vactive, num_bits_resol);
- DSI_WRITE(dsi, DSIM_MDRESOL_REG, reg);
+ exynos_dsi_write(dsi, DSIM_MDRESOL_REG, reg);
dev_dbg(dsi->dev, "LCD size = %dx%d\n", vm->hactive, vm->vactive);
}
@@ -898,12 +934,12 @@ static void exynos_dsi_set_display_enable(struct exynos_dsi *dsi, bool enable)
{
u32 reg;
- reg = DSI_READ(dsi, DSIM_MDRESOL_REG);
+ reg = exynos_dsi_read(dsi, DSIM_MDRESOL_REG);
if (enable)
reg |= DSIM_MAIN_STAND_BY;
else
reg &= ~DSIM_MAIN_STAND_BY;
- DSI_WRITE(dsi, DSIM_MDRESOL_REG, reg);
+ exynos_dsi_write(dsi, DSIM_MDRESOL_REG, reg);
}
static int exynos_dsi_wait_for_hdr_fifo(struct exynos_dsi *dsi)
@@ -911,7 +947,7 @@ static int exynos_dsi_wait_for_hdr_fifo(struct exynos_dsi *dsi)
int timeout = 2000;
do {
- u32 reg = DSI_READ(dsi, DSIM_FIFOCTRL_REG);
+ u32 reg = exynos_dsi_read(dsi, DSIM_FIFOCTRL_REG);
if (!(reg & DSIM_SFR_HEADER_FULL))
return 0;
@@ -925,34 +961,35 @@ static int exynos_dsi_wait_for_hdr_fifo(struct exynos_dsi *dsi)
static void exynos_dsi_set_cmd_lpm(struct exynos_dsi *dsi, bool lpm)
{
- u32 v = DSI_READ(dsi, DSIM_ESCMODE_REG);
+ u32 v = exynos_dsi_read(dsi, DSIM_ESCMODE_REG);
if (lpm)
v |= DSIM_CMD_LPDT_LP;
else
v &= ~DSIM_CMD_LPDT_LP;
- DSI_WRITE(dsi, DSIM_ESCMODE_REG, v);
+ exynos_dsi_write(dsi, DSIM_ESCMODE_REG, v);
}
static void exynos_dsi_force_bta(struct exynos_dsi *dsi)
{
- u32 v = DSI_READ(dsi, DSIM_ESCMODE_REG);
+ u32 v = exynos_dsi_read(dsi, DSIM_ESCMODE_REG);
v |= DSIM_FORCE_BTA;
- DSI_WRITE(dsi, DSIM_ESCMODE_REG, v);
+ exynos_dsi_write(dsi, DSIM_ESCMODE_REG, v);
}
static void exynos_dsi_send_to_fifo(struct exynos_dsi *dsi,
struct exynos_dsi_transfer *xfer)
{
struct device *dev = dsi->dev;
- const u8 *payload = xfer->tx_payload + xfer->tx_done;
- u16 length = xfer->tx_len - xfer->tx_done;
+ struct mipi_dsi_packet *pkt = &xfer->packet;
+ const u8 *payload = pkt->payload + xfer->tx_done;
+ u16 length = pkt->payload_length - xfer->tx_done;
bool first = !xfer->tx_done;
u32 reg;
dev_dbg(dev, "< xfer %p: tx len %u, done %u, rx len %u, done %u\n",
- xfer, xfer->tx_len, xfer->tx_done, xfer->rx_len, xfer->rx_done);
+ xfer, length, xfer->tx_done, xfer->rx_len, xfer->rx_done);
if (length > DSI_TX_FIFO_SIZE)
length = DSI_TX_FIFO_SIZE;
@@ -961,9 +998,8 @@ static void exynos_dsi_send_to_fifo(struct exynos_dsi *dsi,
/* Send payload */
while (length >= 4) {
- reg = (payload[3] << 24) | (payload[2] << 16)
- | (payload[1] << 8) | payload[0];
- DSI_WRITE(dsi, DSIM_PAYLOAD_REG, reg);
+ reg = get_unaligned_le32(payload);
+ exynos_dsi_write(dsi, DSIM_PAYLOAD_REG, reg);
payload += 4;
length -= 4;
}
@@ -978,10 +1014,7 @@ static void exynos_dsi_send_to_fifo(struct exynos_dsi *dsi,
/* Fall through */
case 1:
reg |= payload[0];
- DSI_WRITE(dsi, DSIM_PAYLOAD_REG, reg);
- break;
- case 0:
- /* Do nothing */
+ exynos_dsi_write(dsi, DSIM_PAYLOAD_REG, reg);
break;
}
@@ -989,7 +1022,7 @@ static void exynos_dsi_send_to_fifo(struct exynos_dsi *dsi,
if (!first)
return;
- reg = (xfer->data[1] << 16) | (xfer->data[0] << 8) | xfer->data_id;
+ reg = get_unaligned_le32(pkt->header);
if (exynos_dsi_wait_for_hdr_fifo(dsi)) {
dev_err(dev, "waiting for header FIFO timed out\n");
return;
@@ -1001,7 +1034,7 @@ static void exynos_dsi_send_to_fifo(struct exynos_dsi *dsi,
dsi->state ^= DSIM_STATE_CMD_LPM;
}
- DSI_WRITE(dsi, DSIM_PKTHDR_REG, reg);
+ exynos_dsi_write(dsi, DSIM_PKTHDR_REG, reg);
if (xfer->flags & MIPI_DSI_MSG_REQ_ACK)
exynos_dsi_force_bta(dsi);
@@ -1017,7 +1050,7 @@ static void exynos_dsi_read_from_fifo(struct exynos_dsi *dsi,
u32 reg;
if (first) {
- reg = DSI_READ(dsi, DSIM_RXFIFO_REG);
+ reg = exynos_dsi_read(dsi, DSIM_RXFIFO_REG);
switch (reg & 0x3f) {
case MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_2BYTE:
@@ -1056,7 +1089,7 @@ static void exynos_dsi_read_from_fifo(struct exynos_dsi *dsi,
/* Receive payload */
while (length >= 4) {
- reg = DSI_READ(dsi, DSIM_RXFIFO_REG);
+ reg = exynos_dsi_read(dsi, DSIM_RXFIFO_REG);
payload[0] = (reg >> 0) & 0xff;
payload[1] = (reg >> 8) & 0xff;
payload[2] = (reg >> 16) & 0xff;
@@ -1066,7 +1099,7 @@ static void exynos_dsi_read_from_fifo(struct exynos_dsi *dsi,
}
if (length) {
- reg = DSI_READ(dsi, DSIM_RXFIFO_REG);
+ reg = exynos_dsi_read(dsi, DSIM_RXFIFO_REG);
switch (length) {
case 3:
payload[2] = (reg >> 16) & 0xff;
@@ -1085,7 +1118,7 @@ static void exynos_dsi_read_from_fifo(struct exynos_dsi *dsi,
clear_fifo:
length = DSI_RX_FIFO_SIZE / 4;
do {
- reg = DSI_READ(dsi, DSIM_RXFIFO_REG);
+ reg = exynos_dsi_read(dsi, DSIM_RXFIFO_REG);
if (reg == DSI_RX_FIFO_EMPTY)
break;
} while (--length);
@@ -1110,13 +1143,14 @@ again:
spin_unlock_irqrestore(&dsi->transfer_lock, flags);
- if (xfer->tx_len && xfer->tx_done == xfer->tx_len)
+ if (xfer->packet.payload_length &&
+ xfer->tx_done == xfer->packet.payload_length)
/* waiting for RX */
return;
exynos_dsi_send_to_fifo(dsi, xfer);
- if (xfer->tx_len || xfer->rx_len)
+ if (xfer->packet.payload_length || xfer->rx_len)
return;
xfer->result = 0;
@@ -1152,10 +1186,11 @@ static bool exynos_dsi_transfer_finish(struct exynos_dsi *dsi)
spin_unlock_irqrestore(&dsi->transfer_lock, flags);
dev_dbg(dsi->dev,
- "> xfer %p, tx_len %u, tx_done %u, rx_len %u, rx_done %u\n",
- xfer, xfer->tx_len, xfer->tx_done, xfer->rx_len, xfer->rx_done);
+ "> xfer %p, tx_len %zu, tx_done %u, rx_len %u, rx_done %u\n",
+ xfer, xfer->packet.payload_length, xfer->tx_done, xfer->rx_len,
+ xfer->rx_done);
- if (xfer->tx_done != xfer->tx_len)
+ if (xfer->tx_done != xfer->packet.payload_length)
return true;
if (xfer->rx_done != xfer->rx_len)
@@ -1226,9 +1261,10 @@ static int exynos_dsi_transfer(struct exynos_dsi *dsi,
wait_for_completion_timeout(&xfer->completed,
msecs_to_jiffies(DSI_XFER_TIMEOUT_MS));
if (xfer->result == -ETIMEDOUT) {
+ struct mipi_dsi_packet *pkt = &xfer->packet;
exynos_dsi_remove_transfer(dsi, xfer);
- dev_err(dsi->dev, "xfer timed out: %*ph %*ph\n", 2, xfer->data,
- xfer->tx_len, xfer->tx_payload);
+ dev_err(dsi->dev, "xfer timed out: %*ph %*ph\n", 4, pkt->header,
+ (int)pkt->payload_length, pkt->payload);
return -ETIMEDOUT;
}
@@ -1241,20 +1277,20 @@ static irqreturn_t exynos_dsi_irq(int irq, void *dev_id)
struct exynos_dsi *dsi = dev_id;
u32 status;
- status = DSI_READ(dsi, DSIM_INTSRC_REG);
+ status = exynos_dsi_read(dsi, DSIM_INTSRC_REG);
if (!status) {
static unsigned long int j;
if (printk_timed_ratelimit(&j, 500))
dev_warn(dsi->dev, "spurious interrupt\n");
return IRQ_HANDLED;
}
- DSI_WRITE(dsi, DSIM_INTSRC_REG, status);
+ exynos_dsi_write(dsi, DSIM_INTSRC_REG, status);
if (status & DSIM_INT_SW_RST_RELEASE) {
u32 mask = ~(DSIM_INT_RX_DONE | DSIM_INT_SFR_FIFO_EMPTY |
DSIM_INT_SFR_HDR_FIFO_EMPTY | DSIM_INT_FRAME_DONE |
DSIM_INT_RX_ECC_ERR | DSIM_INT_SW_RST_RELEASE);
- DSI_WRITE(dsi, DSIM_INTMSK_REG, mask);
+ exynos_dsi_write(dsi, DSIM_INTMSK_REG, mask);
complete(&dsi->completed);
return IRQ_HANDLED;
}
@@ -1401,12 +1437,6 @@ static int exynos_dsi_host_detach(struct mipi_dsi_host *host,
return 0;
}
-/* distinguish between short and long DSI packet types */
-static bool exynos_dsi_is_short_dsi_type(u8 type)
-{
- return (type & 0x0f) <= 8;
-}
-
static ssize_t exynos_dsi_host_transfer(struct mipi_dsi_host *host,
const struct mipi_dsi_msg *msg)
{
@@ -1424,25 +1454,9 @@ static ssize_t exynos_dsi_host_transfer(struct mipi_dsi_host *host,
dsi->state |= DSIM_STATE_INITIALIZED;
}
- if (msg->tx_len == 0)
- return -EINVAL;
-
- xfer.data_id = msg->type | (msg->channel << 6);
-
- if (exynos_dsi_is_short_dsi_type(msg->type)) {
- const char *tx_buf = msg->tx_buf;
-
- if (msg->tx_len > 2)
- return -EINVAL;
- xfer.tx_len = 0;
- xfer.data[0] = tx_buf[0];
- xfer.data[1] = (msg->tx_len == 2) ? tx_buf[1] : 0;
- } else {
- xfer.tx_len = msg->tx_len;
- xfer.data[0] = msg->tx_len & 0xff;
- xfer.data[1] = msg->tx_len >> 8;
- xfer.tx_payload = msg->tx_buf;
- }
+ ret = mipi_dsi_create_packet(&xfer.packet, msg);
+ if (ret < 0)
+ return ret;
xfer.rx_len = msg->rx_len;
xfer.rx_payload = msg->rx_buf;
@@ -1597,13 +1611,6 @@ static int exynos_dsi_create_connector(struct drm_encoder *encoder)
return 0;
}
-static bool exynos_dsi_mode_fixup(struct drm_encoder *encoder,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- return true;
-}
-
static void exynos_dsi_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
@@ -1623,7 +1630,6 @@ static void exynos_dsi_mode_set(struct drm_encoder *encoder,
}
static const struct drm_encoder_helper_funcs exynos_dsi_encoder_helper_funcs = {
- .mode_fixup = exynos_dsi_mode_fixup,
.mode_set = exynos_dsi_mode_set,
.enable = exynos_dsi_enable,
.disable = exynos_dsi_disable,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.c b/drivers/gpu/drm/exynos/exynos_drm_fb.c
index d614194644c8..81cc5537cf25 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fb.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fb.c
@@ -199,17 +199,6 @@ dma_addr_t exynos_drm_fb_dma_addr(struct drm_framebuffer *fb, int index)
return exynos_fb->dma_addr[index];
}
-static void exynos_drm_output_poll_changed(struct drm_device *dev)
-{
- struct exynos_drm_private *private = dev->dev_private;
- struct drm_fb_helper *fb_helper = private->fb_helper;
-
- if (fb_helper)
- drm_fb_helper_hotplug_event(fb_helper);
- else
- exynos_drm_fbdev_init(dev);
-}
-
static const struct drm_mode_config_funcs exynos_drm_mode_config_funcs = {
.fb_create = exynos_user_fb_create,
.output_poll_changed = exynos_drm_output_poll_changed,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
index 8baabd813ff5..72d7c0b7c216 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
@@ -50,7 +50,7 @@ static int exynos_drm_fb_mmap(struct fb_info *info,
if (vm_size > exynos_gem->size)
return -EINVAL;
- ret = dma_mmap_attrs(helper->dev->dev, vma, exynos_gem->cookie,
+ ret = dma_mmap_attrs(to_dma_dev(helper->dev), vma, exynos_gem->cookie,
exynos_gem->dma_addr, exynos_gem->size,
&exynos_gem->dma_attrs);
if (ret < 0) {
@@ -317,3 +317,14 @@ void exynos_drm_fbdev_restore_mode(struct drm_device *dev)
drm_fb_helper_restore_fbdev_mode_unlocked(private->fb_helper);
}
+
+void exynos_drm_output_poll_changed(struct drm_device *dev)
+{
+ struct exynos_drm_private *private = dev->dev_private;
+ struct drm_fb_helper *fb_helper = private->fb_helper;
+
+ if (fb_helper)
+ drm_fb_helper_hotplug_event(fb_helper);
+ else
+ exynos_drm_fbdev_init(dev);
+}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.h b/drivers/gpu/drm/exynos/exynos_drm_fbdev.h
index e16d7f0ae192..330eef87f718 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.h
@@ -15,9 +15,30 @@
#ifndef _EXYNOS_DRM_FBDEV_H_
#define _EXYNOS_DRM_FBDEV_H_
+#ifdef CONFIG_DRM_FBDEV_EMULATION
+
int exynos_drm_fbdev_init(struct drm_device *dev);
-int exynos_drm_fbdev_reinit(struct drm_device *dev);
void exynos_drm_fbdev_fini(struct drm_device *dev);
void exynos_drm_fbdev_restore_mode(struct drm_device *dev);
+void exynos_drm_output_poll_changed(struct drm_device *dev);
+
+#else
+
+static inline int exynos_drm_fbdev_init(struct drm_device *dev)
+{
+ return 0;
+}
+
+static inline void exynos_drm_fbdev_fini(struct drm_device *dev)
+{
+}
+
+static inline void exynos_drm_fbdev_restore_mode(struct drm_device *dev)
+{
+}
+
+#define exynos_drm_output_poll_changed (NULL)
+
+#endif
#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.c b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
index 8a4f4a0211d0..0525c56145db 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
@@ -163,7 +163,6 @@ struct fimc_context {
u32 clk_frequency;
struct regmap *sysreg;
struct fimc_scaler sc;
- struct exynos_drm_ipp_pol pol;
int id;
int irq;
bool suspended;
@@ -260,32 +259,6 @@ static void fimc_set_type_ctrl(struct fimc_context *ctx, enum fimc_wb wb)
fimc_write(ctx, cfg, EXYNOS_CIGCTRL);
}
-static void fimc_set_polarity(struct fimc_context *ctx,
- struct exynos_drm_ipp_pol *pol)
-{
- u32 cfg;
-
- DRM_DEBUG_KMS("inv_pclk[%d]inv_vsync[%d]\n",
- pol->inv_pclk, pol->inv_vsync);
- DRM_DEBUG_KMS("inv_href[%d]inv_hsync[%d]\n",
- pol->inv_href, pol->inv_hsync);
-
- cfg = fimc_read(ctx, EXYNOS_CIGCTRL);
- cfg &= ~(EXYNOS_CIGCTRL_INVPOLPCLK | EXYNOS_CIGCTRL_INVPOLVSYNC |
- EXYNOS_CIGCTRL_INVPOLHREF | EXYNOS_CIGCTRL_INVPOLHSYNC);
-
- if (pol->inv_pclk)
- cfg |= EXYNOS_CIGCTRL_INVPOLPCLK;
- if (pol->inv_vsync)
- cfg |= EXYNOS_CIGCTRL_INVPOLVSYNC;
- if (pol->inv_href)
- cfg |= EXYNOS_CIGCTRL_INVPOLHREF;
- if (pol->inv_hsync)
- cfg |= EXYNOS_CIGCTRL_INVPOLHSYNC;
-
- fimc_write(ctx, cfg, EXYNOS_CIGCTRL);
-}
-
static void fimc_handle_jpeg(struct fimc_context *ctx, bool enable)
{
u32 cfg;
@@ -1467,7 +1440,6 @@ static int fimc_ippdrv_start(struct device *dev, enum drm_exynos_ipp_cmd cmd)
/* If set ture, we can save jpeg about screen */
fimc_handle_jpeg(ctx, false);
fimc_set_scaler(ctx, &ctx->sc);
- fimc_set_polarity(ctx, &ctx->pol);
switch (cmd) {
case IPP_CMD_M2M:
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
index 70194d0e4fe4..018449f8d557 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -94,12 +94,14 @@ struct fimd_driver_data {
unsigned int lcdblk_offset;
unsigned int lcdblk_vt_shift;
unsigned int lcdblk_bypass_shift;
+ unsigned int lcdblk_mic_bypass_shift;
unsigned int has_shadowcon:1;
unsigned int has_clksel:1;
unsigned int has_limited_fmt:1;
unsigned int has_vidoutcon:1;
unsigned int has_vtsel:1;
+ unsigned int has_mic_bypass:1;
};
static struct fimd_driver_data s3c64xx_fimd_driver_data = {
@@ -145,6 +147,18 @@ static struct fimd_driver_data exynos5_fimd_driver_data = {
.has_vtsel = 1,
};
+static struct fimd_driver_data exynos5420_fimd_driver_data = {
+ .timing_base = 0x20000,
+ .lcdblk_offset = 0x214,
+ .lcdblk_vt_shift = 24,
+ .lcdblk_bypass_shift = 15,
+ .lcdblk_mic_bypass_shift = 11,
+ .has_shadowcon = 1,
+ .has_vidoutcon = 1,
+ .has_vtsel = 1,
+ .has_mic_bypass = 1,
+};
+
struct fimd_context {
struct device *dev;
struct drm_device *drm_dev;
@@ -168,7 +182,6 @@ struct fimd_context {
atomic_t win_updated;
atomic_t triggering;
- struct exynos_drm_panel_info panel;
struct fimd_driver_data *driver_data;
struct drm_encoder *encoder;
};
@@ -184,6 +197,8 @@ static const struct of_device_id fimd_driver_dt_match[] = {
.data = &exynos4415_fimd_driver_data },
{ .compatible = "samsung,exynos5250-fimd",
.data = &exynos5_fimd_driver_data },
+ { .compatible = "samsung,exynos5420-fimd",
+ .data = &exynos5420_fimd_driver_data },
{},
};
MODULE_DEVICE_TABLE(of, fimd_driver_dt_match);
@@ -380,7 +395,7 @@ static u32 fimd_calc_clkdiv(struct fimd_context *ctx,
}
/* Find the clock divider value that gets us closest to ideal_clk */
- clkdiv = DIV_ROUND_UP(clk_get_rate(ctx->lcd_clk), ideal_clk);
+ clkdiv = DIV_ROUND_CLOSEST(clk_get_rate(ctx->lcd_clk), ideal_clk);
return (clkdiv < 0x100) ? clkdiv : 0xff;
}
@@ -461,6 +476,18 @@ static void fimd_commit(struct exynos_drm_crtc *crtc)
return;
}
+ /* TODO: When MIC is enabled for display path, the lcdblk_mic_bypass
+ * bit should be cleared.
+ */
+ if (driver_data->has_mic_bypass && ctx->sysreg &&
+ regmap_update_bits(ctx->sysreg,
+ driver_data->lcdblk_offset,
+ 0x1 << driver_data->lcdblk_mic_bypass_shift,
+ 0x1 << driver_data->lcdblk_mic_bypass_shift)) {
+ DRM_ERROR("Failed to update sysreg for bypass mic.\n");
+ return;
+ }
+
/* setup horizontal and vertical display size. */
val = VIDTCON2_LINEVAL(mode->vdisplay - 1) |
VIDTCON2_HOZVAL(mode->hdisplay - 1) |
@@ -861,7 +888,8 @@ static void fimd_dp_clock_enable(struct exynos_drm_crtc *crtc, bool enable)
* clock. On these SoCs the bootloader may enable it but any
* power domain off/on will reset it to disable state.
*/
- if (ctx->driver_data != &exynos5_fimd_driver_data)
+ if (ctx->driver_data != &exynos5_fimd_driver_data &&
+ ctx->driver_data != &exynos5420_fimd_driver_data)
return;
val = enable ? DP_MIE_CLK_DP_ENABLE : DP_MIE_CLK_DISABLE;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
index 8dfe6e113a88..193d3602dffb 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
@@ -259,7 +259,7 @@ static int g2d_init_cmdlist(struct g2d_data *g2d)
init_dma_attrs(&g2d->cmdlist_dma_attrs);
dma_set_attr(DMA_ATTR_WRITE_COMBINE, &g2d->cmdlist_dma_attrs);
- g2d->cmdlist_pool_virt = dma_alloc_attrs(subdrv->drm_dev->dev,
+ g2d->cmdlist_pool_virt = dma_alloc_attrs(to_dma_dev(subdrv->drm_dev),
G2D_CMDLIST_POOL_SIZE,
&g2d->cmdlist_pool, GFP_KERNEL,
&g2d->cmdlist_dma_attrs);
@@ -293,7 +293,7 @@ static int g2d_init_cmdlist(struct g2d_data *g2d)
return 0;
err:
- dma_free_attrs(subdrv->drm_dev->dev, G2D_CMDLIST_POOL_SIZE,
+ dma_free_attrs(to_dma_dev(subdrv->drm_dev), G2D_CMDLIST_POOL_SIZE,
g2d->cmdlist_pool_virt,
g2d->cmdlist_pool, &g2d->cmdlist_dma_attrs);
return ret;
@@ -306,7 +306,8 @@ static void g2d_fini_cmdlist(struct g2d_data *g2d)
kfree(g2d->cmdlist_node);
if (g2d->cmdlist_pool_virt && g2d->cmdlist_pool) {
- dma_free_attrs(subdrv->drm_dev->dev, G2D_CMDLIST_POOL_SIZE,
+ dma_free_attrs(to_dma_dev(subdrv->drm_dev),
+ G2D_CMDLIST_POOL_SIZE,
g2d->cmdlist_pool_virt,
g2d->cmdlist_pool, &g2d->cmdlist_dma_attrs);
}
@@ -880,7 +881,6 @@ static void g2d_finish_event(struct g2d_data *g2d, u32 cmdlist_no)
struct g2d_runqueue_node *runqueue_node = g2d->runqueue_node;
struct drm_exynos_pending_g2d_event *e;
struct timeval now;
- unsigned long flags;
if (list_empty(&runqueue_node->event_list))
return;
@@ -893,10 +893,7 @@ static void g2d_finish_event(struct g2d_data *g2d, u32 cmdlist_no)
e->event.tv_usec = now.tv_usec;
e->event.cmdlist_no = cmdlist_no;
- spin_lock_irqsave(&drm_dev->event_lock, flags);
- list_move_tail(&e->base.link, &e->base.file_priv->event_list);
- wake_up_interruptible(&e->base.file_priv->event_wait);
- spin_unlock_irqrestore(&drm_dev->event_lock, flags);
+ drm_send_event(drm_dev, &e->base);
}
static irqreturn_t g2d_irq_handler(int irq, void *dev_id)
@@ -1072,7 +1069,6 @@ int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data,
struct drm_exynos_pending_g2d_event *e;
struct g2d_cmdlist_node *node;
struct g2d_cmdlist *cmdlist;
- unsigned long flags;
int size;
int ret;
@@ -1094,21 +1090,8 @@ int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data,
node->event = NULL;
if (req->event_type != G2D_EVENT_NOT) {
- spin_lock_irqsave(&drm_dev->event_lock, flags);
- if (file->event_space < sizeof(e->event)) {
- spin_unlock_irqrestore(&drm_dev->event_lock, flags);
- ret = -ENOMEM;
- goto err;
- }
- file->event_space -= sizeof(e->event);
- spin_unlock_irqrestore(&drm_dev->event_lock, flags);
-
e = kzalloc(sizeof(*node->event), GFP_KERNEL);
if (!e) {
- spin_lock_irqsave(&drm_dev->event_lock, flags);
- file->event_space += sizeof(e->event);
- spin_unlock_irqrestore(&drm_dev->event_lock, flags);
-
ret = -ENOMEM;
goto err;
}
@@ -1116,9 +1099,12 @@ int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data,
e->event.base.type = DRM_EXYNOS_G2D_EVENT;
e->event.base.length = sizeof(e->event);
e->event.user_data = req->user_data;
- e->base.event = &e->event.base;
- e->base.file_priv = file;
- e->base.destroy = (void (*) (struct drm_pending_event *)) kfree;
+
+ ret = drm_event_reserve_init(drm_dev, file, &e->base, &e->event.base);
+ if (ret) {
+ kfree(e);
+ goto err;
+ }
node->event = e;
}
@@ -1220,12 +1206,8 @@ int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data,
err_unmap:
g2d_unmap_cmdlist_gem(g2d, node, file);
err_free_event:
- if (node->event) {
- spin_lock_irqsave(&drm_dev->event_lock, flags);
- file->event_space += sizeof(e->event);
- spin_unlock_irqrestore(&drm_dev->event_lock, flags);
- kfree(node->event);
- }
+ if (node->event)
+ drm_event_cancel_free(drm_dev, &node->event->base);
err:
g2d_put_cmdlist(g2d, node);
return ret;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
index 26b5e4bd55b6..2914d62d0d80 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
@@ -65,7 +65,7 @@ static int exynos_drm_alloc_buf(struct exynos_drm_gem *exynos_gem)
return -ENOMEM;
}
- exynos_gem->cookie = dma_alloc_attrs(dev->dev, exynos_gem->size,
+ exynos_gem->cookie = dma_alloc_attrs(to_dma_dev(dev), exynos_gem->size,
&exynos_gem->dma_addr, GFP_KERNEL,
&exynos_gem->dma_attrs);
if (!exynos_gem->cookie) {
@@ -73,7 +73,7 @@ static int exynos_drm_alloc_buf(struct exynos_drm_gem *exynos_gem)
goto err_free;
}
- ret = dma_get_sgtable_attrs(dev->dev, &sgt, exynos_gem->cookie,
+ ret = dma_get_sgtable_attrs(to_dma_dev(dev), &sgt, exynos_gem->cookie,
exynos_gem->dma_addr, exynos_gem->size,
&exynos_gem->dma_attrs);
if (ret < 0) {
@@ -98,7 +98,7 @@ static int exynos_drm_alloc_buf(struct exynos_drm_gem *exynos_gem)
err_sgt_free:
sg_free_table(&sgt);
err_dma_free:
- dma_free_attrs(dev->dev, exynos_gem->size, exynos_gem->cookie,
+ dma_free_attrs(to_dma_dev(dev), exynos_gem->size, exynos_gem->cookie,
exynos_gem->dma_addr, &exynos_gem->dma_attrs);
err_free:
drm_free_large(exynos_gem->pages);
@@ -118,7 +118,7 @@ static void exynos_drm_free_buf(struct exynos_drm_gem *exynos_gem)
DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n",
(unsigned long)exynos_gem->dma_addr, exynos_gem->size);
- dma_free_attrs(dev->dev, exynos_gem->size, exynos_gem->cookie,
+ dma_free_attrs(to_dma_dev(dev), exynos_gem->size, exynos_gem->cookie,
(dma_addr_t)exynos_gem->dma_addr,
&exynos_gem->dma_attrs);
@@ -280,6 +280,15 @@ int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data,
return 0;
}
+int exynos_drm_gem_map_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_exynos_gem_map *args = data;
+
+ return exynos_drm_gem_dumb_map_offset(file_priv, dev, args->handle,
+ &args->offset);
+}
+
dma_addr_t *exynos_drm_gem_get_dma_addr(struct drm_device *dev,
unsigned int gem_handle,
struct drm_file *filp)
@@ -335,7 +344,7 @@ static int exynos_drm_gem_mmap_buffer(struct exynos_drm_gem *exynos_gem,
if (vm_size > exynos_gem->size)
return -EINVAL;
- ret = dma_mmap_attrs(drm_dev->dev, vma, exynos_gem->cookie,
+ ret = dma_mmap_attrs(to_dma_dev(drm_dev), vma, exynos_gem->cookie,
exynos_gem->dma_addr, exynos_gem->size,
&exynos_gem->dma_attrs);
if (ret < 0) {
@@ -381,7 +390,7 @@ int exynos_gem_map_sgt_with_dma(struct drm_device *drm_dev,
mutex_lock(&drm_dev->struct_mutex);
- nents = dma_map_sg(drm_dev->dev, sgt->sgl, sgt->nents, dir);
+ nents = dma_map_sg(to_dma_dev(drm_dev), sgt->sgl, sgt->nents, dir);
if (!nents) {
DRM_ERROR("failed to map sgl with dma.\n");
mutex_unlock(&drm_dev->struct_mutex);
@@ -396,7 +405,7 @@ void exynos_gem_unmap_sgt_from_dma(struct drm_device *drm_dev,
struct sg_table *sgt,
enum dma_data_direction dir)
{
- dma_unmap_sg(drm_dev->dev, sgt->sgl, sgt->nents, dir);
+ dma_unmap_sg(to_dma_dev(drm_dev), sgt->sgl, sgt->nents, dir);
}
void exynos_drm_gem_free_object(struct drm_gem_object *obj)
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.h b/drivers/gpu/drm/exynos/exynos_drm_gem.h
index 9ca5047959ec..00223052b87b 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.h
@@ -71,6 +71,10 @@ struct exynos_drm_gem *exynos_drm_gem_create(struct drm_device *dev,
int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
+/* get fake-offset of gem object that can be used with mmap. */
+int exynos_drm_gem_map_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+
/*
* get dma address from gem handle and this function could be used for
* other drivers such as 2d/3d acceleration drivers.
diff --git a/drivers/gpu/drm/exynos/exynos_drm_iommu.c b/drivers/gpu/drm/exynos/exynos_drm_iommu.c
index d73b9ad35b7a..7ca09ee19656 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_iommu.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_iommu.c
@@ -9,7 +9,7 @@
* option) any later version.
*/
-#include <drmP.h>
+#include <drm/drmP.h>
#include <drm/exynos_drm.h>
#include <linux/dma-mapping.h>
@@ -30,7 +30,6 @@ int drm_create_iommu_mapping(struct drm_device *drm_dev)
{
struct dma_iommu_mapping *mapping = NULL;
struct exynos_drm_private *priv = drm_dev->dev_private;
- struct device *dev = drm_dev->dev;
if (!priv->da_start)
priv->da_start = EXYNOS_DEV_ADDR_START;
@@ -43,18 +42,9 @@ int drm_create_iommu_mapping(struct drm_device *drm_dev)
if (IS_ERR(mapping))
return PTR_ERR(mapping);
- dev->dma_parms = devm_kzalloc(dev, sizeof(*dev->dma_parms),
- GFP_KERNEL);
- if (!dev->dma_parms)
- goto error;
-
- dma_set_max_seg_size(dev, 0xffffffffu);
- dev->archdata.mapping = mapping;
+ priv->mapping = mapping;
return 0;
-error:
- arm_iommu_release_mapping(mapping);
- return -ENOMEM;
}
/*
@@ -67,9 +57,9 @@ error:
*/
void drm_release_iommu_mapping(struct drm_device *drm_dev)
{
- struct device *dev = drm_dev->dev;
+ struct exynos_drm_private *priv = drm_dev->dev_private;
- arm_iommu_release_mapping(dev->archdata.mapping);
+ arm_iommu_release_mapping(priv->mapping);
}
/*
@@ -84,10 +74,10 @@ void drm_release_iommu_mapping(struct drm_device *drm_dev)
int drm_iommu_attach_device(struct drm_device *drm_dev,
struct device *subdrv_dev)
{
- struct device *dev = drm_dev->dev;
+ struct exynos_drm_private *priv = drm_dev->dev_private;
int ret;
- if (!dev->archdata.mapping)
+ if (!priv->mapping)
return 0;
subdrv_dev->dma_parms = devm_kzalloc(subdrv_dev,
@@ -101,23 +91,12 @@ int drm_iommu_attach_device(struct drm_device *drm_dev,
if (subdrv_dev->archdata.mapping)
arm_iommu_detach_device(subdrv_dev);
- ret = arm_iommu_attach_device(subdrv_dev, dev->archdata.mapping);
+ ret = arm_iommu_attach_device(subdrv_dev, priv->mapping);
if (ret < 0) {
DRM_DEBUG_KMS("failed iommu attach.\n");
return ret;
}
- /*
- * Set dma_ops to drm_device just one time.
- *
- * The dma mapping api needs device object and the api is used
- * to allocate physial memory and map it with iommu table.
- * If iommu attach succeeded, the sub driver would have dma_ops
- * for iommu and also all sub drivers have same dma_ops.
- */
- if (get_dma_ops(dev) == get_dma_ops(NULL))
- set_dma_ops(dev, get_dma_ops(subdrv_dev));
-
return 0;
}
@@ -133,8 +112,8 @@ int drm_iommu_attach_device(struct drm_device *drm_dev,
void drm_iommu_detach_device(struct drm_device *drm_dev,
struct device *subdrv_dev)
{
- struct device *dev = drm_dev->dev;
- struct dma_iommu_mapping *mapping = dev->archdata.mapping;
+ struct exynos_drm_private *priv = drm_dev->dev_private;
+ struct dma_iommu_mapping *mapping = priv->mapping;
if (!mapping || !mapping->domain)
return;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_iommu.h b/drivers/gpu/drm/exynos/exynos_drm_iommu.h
index dc1b5441f491..5ffebe02ee4d 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_iommu.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_iommu.h
@@ -29,9 +29,9 @@ void drm_iommu_detach_device(struct drm_device *dev_dev,
static inline bool is_drm_iommu_supported(struct drm_device *drm_dev)
{
- struct device *dev = drm_dev->dev;
+ struct exynos_drm_private *priv = drm_dev->dev_private;
- return dev->archdata.mapping ? true : false;
+ return priv->mapping ? true : false;
}
#else
diff --git a/drivers/gpu/drm/exynos/exynos_drm_ipp.c b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
index 95eeb9116f10..9c84ee76f18a 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_ipp.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
@@ -618,27 +618,18 @@ static void ipp_clean_mem_nodes(struct drm_device *drm_dev,
mutex_unlock(&c_node->mem_lock);
}
-static void ipp_free_event(struct drm_pending_event *event)
-{
- kfree(event);
-}
-
static int ipp_get_event(struct drm_device *drm_dev,
struct drm_exynos_ipp_cmd_node *c_node,
struct drm_exynos_ipp_queue_buf *qbuf)
{
struct drm_exynos_ipp_send_event *e;
- unsigned long flags;
+ int ret;
DRM_DEBUG_KMS("ops_id[%d]buf_id[%d]\n", qbuf->ops_id, qbuf->buf_id);
e = kzalloc(sizeof(*e), GFP_KERNEL);
- if (!e) {
- spin_lock_irqsave(&drm_dev->event_lock, flags);
- c_node->filp->event_space += sizeof(e->event);
- spin_unlock_irqrestore(&drm_dev->event_lock, flags);
+ if (!e)
return -ENOMEM;
- }
/* make event */
e->event.base.type = DRM_EXYNOS_IPP_EVENT;
@@ -646,9 +637,13 @@ static int ipp_get_event(struct drm_device *drm_dev,
e->event.user_data = qbuf->user_data;
e->event.prop_id = qbuf->prop_id;
e->event.buf_id[EXYNOS_DRM_OPS_DST] = qbuf->buf_id;
- e->base.event = &e->event.base;
- e->base.file_priv = c_node->filp;
- e->base.destroy = ipp_free_event;
+
+ ret = drm_event_reserve_init(drm_dev, c_node->filp, &e->base, &e->event.base);
+ if (ret) {
+ kfree(e);
+ return ret;
+ }
+
mutex_lock(&c_node->event_lock);
list_add_tail(&e->base.link, &c_node->event_list);
mutex_unlock(&c_node->event_lock);
@@ -1412,7 +1407,6 @@ static int ipp_send_event(struct exynos_drm_ippdrv *ippdrv,
struct drm_exynos_ipp_send_event *e;
struct list_head *head;
struct timeval now;
- unsigned long flags;
u32 tbuf_id[EXYNOS_DRM_OPS_MAX] = {0, };
int ret, i;
@@ -1525,10 +1519,7 @@ static int ipp_send_event(struct exynos_drm_ippdrv *ippdrv,
for_each_ipp_ops(i)
e->event.buf_id[i] = tbuf_id[i];
- spin_lock_irqsave(&drm_dev->event_lock, flags);
- list_move_tail(&e->base.link, &e->base.file_priv->event_list);
- wake_up_interruptible(&e->base.file_priv->event_wait);
- spin_unlock_irqrestore(&drm_dev->event_lock, flags);
+ drm_send_event(drm_dev, &e->base);
mutex_unlock(&c_node->event_lock);
DRM_DEBUG_KMS("done cmd[%d]prop_id[%d]buf_id[%d]\n",
diff --git a/drivers/gpu/drm/exynos/exynos_drm_mic.c b/drivers/gpu/drm/exynos/exynos_drm_mic.c
index 9869d70e9e54..a0def0be6d65 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_mic.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_mic.c
@@ -129,7 +129,7 @@ static void mic_set_path(struct exynos_mic *mic, bool enable)
} else
val &= ~(MIC0_RGB_MUX | MIC0_I80_MUX | MIC0_ON_MUX);
- regmap_write(mic->sysreg, DSD_CFG_MUX, val);
+ ret = regmap_write(mic->sysreg, DSD_CFG_MUX, val);
if (ret)
DRM_ERROR("mic: Failed to read system register\n");
}
@@ -457,6 +457,7 @@ static int exynos_mic_probe(struct platform_device *pdev)
"samsung,disp-syscon");
if (IS_ERR(mic->sysreg)) {
DRM_ERROR("mic: Failed to get system register.\n");
+ ret = PTR_ERR(mic->sysreg);
goto err;
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_plane.c b/drivers/gpu/drm/exynos/exynos_drm_plane.c
index d86227236f55..50185ac347b2 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_plane.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_plane.c
@@ -11,9 +11,10 @@
#include <drm/drmP.h>
-#include <drm/exynos_drm.h>
-#include <drm/drm_plane_helper.h>
+#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_plane_helper.h>
+#include <drm/exynos_drm.h>
#include "exynos_drm_drv.h"
#include "exynos_drm_crtc.h"
#include "exynos_drm_fb.h"
@@ -57,11 +58,12 @@ static int exynos_plane_get_size(int start, unsigned length, unsigned last)
}
static void exynos_plane_mode_set(struct exynos_drm_plane_state *exynos_state)
-
{
struct drm_plane_state *state = &exynos_state->base;
- struct drm_crtc *crtc = exynos_state->base.crtc;
- struct drm_display_mode *mode = &crtc->state->adjusted_mode;
+ struct drm_crtc *crtc = state->crtc;
+ struct drm_crtc_state *crtc_state =
+ drm_atomic_get_existing_crtc_state(state->state, crtc);
+ struct drm_display_mode *mode = &crtc_state->adjusted_mode;
int crtc_x, crtc_y;
unsigned int crtc_w, crtc_h;
unsigned int src_x, src_y;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_rotator.c b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
index ce59f4443394..f18fbe43f55f 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_rotator.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
@@ -20,7 +20,6 @@
#include <drm/drmP.h>
#include <drm/exynos_drm.h>
#include "regs-rotator.h"
-#include "exynos_drm.h"
#include "exynos_drm_drv.h"
#include "exynos_drm_ipp.h"
diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
index b605bd7395ec..608b0afa337f 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
@@ -412,13 +412,6 @@ static int vidi_create_connector(struct drm_encoder *encoder)
return 0;
}
-static bool exynos_vidi_mode_fixup(struct drm_encoder *encoder,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- return true;
-}
-
static void exynos_vidi_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
@@ -434,7 +427,6 @@ static void exynos_vidi_disable(struct drm_encoder *encoder)
}
static const struct drm_encoder_helper_funcs exynos_vidi_encoder_helper_funcs = {
- .mode_fixup = exynos_vidi_mode_fixup,
.mode_set = exynos_vidi_mode_set,
.enable = exynos_vidi_enable,
.disable = exynos_vidi_disable,
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index 21a29dbce18c..e148d728e28c 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -867,10 +867,8 @@ static void hdmi_reg_infoframe(struct hdmi_context *hdata,
{
u32 hdr_sum;
u8 chksum;
- u32 mod;
u8 ar;
- mod = hdmi_reg_read(hdata, HDMI_MODE_SEL);
if (hdata->dvi_mode) {
hdmi_reg_writeb(hdata, HDMI_VSI_CON,
HDMI_VSI_CON_DO_NOT_TRANSMIT);
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c
index d8ab8f0af10c..4ed7798533f9 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c
@@ -42,41 +42,24 @@ static void fsl_dcu_drm_disable_crtc(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct fsl_dcu_drm_device *fsl_dev = dev->dev_private;
- int ret;
- ret = regmap_update_bits(fsl_dev->regmap, DCU_DCU_MODE,
- DCU_MODE_DCU_MODE_MASK,
- DCU_MODE_DCU_MODE(DCU_MODE_OFF));
- if (ret)
- dev_err(fsl_dev->dev, "Disable CRTC failed\n");
- ret = regmap_write(fsl_dev->regmap, DCU_UPDATE_MODE,
- DCU_UPDATE_MODE_READREG);
- if (ret)
- dev_err(fsl_dev->dev, "Enable CRTC failed\n");
+ regmap_update_bits(fsl_dev->regmap, DCU_DCU_MODE,
+ DCU_MODE_DCU_MODE_MASK,
+ DCU_MODE_DCU_MODE(DCU_MODE_OFF));
+ regmap_write(fsl_dev->regmap, DCU_UPDATE_MODE,
+ DCU_UPDATE_MODE_READREG);
}
static void fsl_dcu_drm_crtc_enable(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct fsl_dcu_drm_device *fsl_dev = dev->dev_private;
- int ret;
-
- ret = regmap_update_bits(fsl_dev->regmap, DCU_DCU_MODE,
- DCU_MODE_DCU_MODE_MASK,
- DCU_MODE_DCU_MODE(DCU_MODE_NORMAL));
- if (ret)
- dev_err(fsl_dev->dev, "Enable CRTC failed\n");
- ret = regmap_write(fsl_dev->regmap, DCU_UPDATE_MODE,
- DCU_UPDATE_MODE_READREG);
- if (ret)
- dev_err(fsl_dev->dev, "Enable CRTC failed\n");
-}
-static bool fsl_dcu_drm_crtc_mode_fixup(struct drm_crtc *crtc,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- return true;
+ regmap_update_bits(fsl_dev->regmap, DCU_DCU_MODE,
+ DCU_MODE_DCU_MODE_MASK,
+ DCU_MODE_DCU_MODE(DCU_MODE_NORMAL));
+ regmap_write(fsl_dev->regmap, DCU_UPDATE_MODE,
+ DCU_UPDATE_MODE_READREG);
}
static void fsl_dcu_drm_crtc_mode_set_nofb(struct drm_crtc *crtc)
@@ -84,9 +67,8 @@ static void fsl_dcu_drm_crtc_mode_set_nofb(struct drm_crtc *crtc)
struct drm_device *dev = crtc->dev;
struct fsl_dcu_drm_device *fsl_dev = dev->dev_private;
struct drm_display_mode *mode = &crtc->state->mode;
- unsigned int hbp, hfp, hsw, vbp, vfp, vsw, div, index;
+ unsigned int hbp, hfp, hsw, vbp, vfp, vsw, div, index, pol = 0;
unsigned long dcuclk;
- int ret;
index = drm_crtc_index(crtc);
dcuclk = clk_get_rate(fsl_dev->clk);
@@ -100,51 +82,36 @@ static void fsl_dcu_drm_crtc_mode_set_nofb(struct drm_crtc *crtc)
vfp = mode->vsync_start - mode->vdisplay;
vsw = mode->vsync_end - mode->vsync_start;
- ret = regmap_write(fsl_dev->regmap, DCU_HSYN_PARA,
- DCU_HSYN_PARA_BP(hbp) |
- DCU_HSYN_PARA_PW(hsw) |
- DCU_HSYN_PARA_FP(hfp));
- if (ret)
- goto set_failed;
- ret = regmap_write(fsl_dev->regmap, DCU_VSYN_PARA,
- DCU_VSYN_PARA_BP(vbp) |
- DCU_VSYN_PARA_PW(vsw) |
- DCU_VSYN_PARA_FP(vfp));
- if (ret)
- goto set_failed;
- ret = regmap_write(fsl_dev->regmap, DCU_DISP_SIZE,
- DCU_DISP_SIZE_DELTA_Y(mode->vdisplay) |
- DCU_DISP_SIZE_DELTA_X(mode->hdisplay));
- if (ret)
- goto set_failed;
- ret = regmap_write(fsl_dev->regmap, DCU_DIV_RATIO, div);
- if (ret)
- goto set_failed;
- ret = regmap_write(fsl_dev->regmap, DCU_SYN_POL,
- DCU_SYN_POL_INV_VS_LOW | DCU_SYN_POL_INV_HS_LOW);
- if (ret)
- goto set_failed;
- ret = regmap_write(fsl_dev->regmap, DCU_BGND, DCU_BGND_R(0) |
- DCU_BGND_G(0) | DCU_BGND_B(0));
- if (ret)
- goto set_failed;
- ret = regmap_write(fsl_dev->regmap, DCU_DCU_MODE,
- DCU_MODE_BLEND_ITER(1) | DCU_MODE_RASTER_EN);
- if (ret)
- goto set_failed;
- ret = regmap_write(fsl_dev->regmap, DCU_THRESHOLD,
- DCU_THRESHOLD_LS_BF_VS(BF_VS_VAL) |
- DCU_THRESHOLD_OUT_BUF_HIGH(BUF_MAX_VAL) |
- DCU_THRESHOLD_OUT_BUF_LOW(BUF_MIN_VAL));
- if (ret)
- goto set_failed;
- ret = regmap_write(fsl_dev->regmap, DCU_UPDATE_MODE,
- DCU_UPDATE_MODE_READREG);
- if (ret)
- goto set_failed;
+ if (mode->flags & DRM_MODE_FLAG_NHSYNC)
+ pol |= DCU_SYN_POL_INV_HS_LOW;
+
+ if (mode->flags & DRM_MODE_FLAG_NVSYNC)
+ pol |= DCU_SYN_POL_INV_VS_LOW;
+
+ regmap_write(fsl_dev->regmap, DCU_HSYN_PARA,
+ DCU_HSYN_PARA_BP(hbp) |
+ DCU_HSYN_PARA_PW(hsw) |
+ DCU_HSYN_PARA_FP(hfp));
+ regmap_write(fsl_dev->regmap, DCU_VSYN_PARA,
+ DCU_VSYN_PARA_BP(vbp) |
+ DCU_VSYN_PARA_PW(vsw) |
+ DCU_VSYN_PARA_FP(vfp));
+ regmap_write(fsl_dev->regmap, DCU_DISP_SIZE,
+ DCU_DISP_SIZE_DELTA_Y(mode->vdisplay) |
+ DCU_DISP_SIZE_DELTA_X(mode->hdisplay));
+ regmap_write(fsl_dev->regmap, DCU_DIV_RATIO, div);
+ regmap_write(fsl_dev->regmap, DCU_SYN_POL, pol);
+ regmap_write(fsl_dev->regmap, DCU_BGND, DCU_BGND_R(0) |
+ DCU_BGND_G(0) | DCU_BGND_B(0));
+ regmap_write(fsl_dev->regmap, DCU_DCU_MODE,
+ DCU_MODE_BLEND_ITER(1) | DCU_MODE_RASTER_EN);
+ regmap_write(fsl_dev->regmap, DCU_THRESHOLD,
+ DCU_THRESHOLD_LS_BF_VS(BF_VS_VAL) |
+ DCU_THRESHOLD_OUT_BUF_HIGH(BUF_MAX_VAL) |
+ DCU_THRESHOLD_OUT_BUF_LOW(BUF_MIN_VAL));
+ regmap_write(fsl_dev->regmap, DCU_UPDATE_MODE,
+ DCU_UPDATE_MODE_READREG);
return;
-set_failed:
- dev_err(dev->dev, "set DCU register failed\n");
}
static const struct drm_crtc_helper_funcs fsl_dcu_drm_crtc_helper_funcs = {
@@ -153,7 +120,6 @@ static const struct drm_crtc_helper_funcs fsl_dcu_drm_crtc_helper_funcs = {
.atomic_flush = fsl_dcu_drm_crtc_atomic_flush,
.disable = fsl_dcu_drm_disable_crtc,
.enable = fsl_dcu_drm_crtc_enable,
- .mode_fixup = fsl_dcu_drm_crtc_mode_fixup,
.mode_set_nofb = fsl_dcu_drm_crtc_mode_set_nofb,
};
@@ -174,10 +140,15 @@ int fsl_dcu_drm_crtc_create(struct fsl_dcu_drm_device *fsl_dev)
int ret;
primary = fsl_dcu_drm_primary_create_plane(fsl_dev->drm);
+ if (!primary)
+ return -ENOMEM;
+
ret = drm_crtc_init_with_planes(fsl_dev->drm, crtc, primary, NULL,
&fsl_dcu_drm_crtc_funcs, NULL);
- if (ret < 0)
+ if (ret) {
+ primary->funcs->destroy(primary);
return ret;
+ }
drm_crtc_helper_add(crtc, &fsl_dcu_drm_crtc_helper_funcs);
@@ -185,26 +156,15 @@ int fsl_dcu_drm_crtc_create(struct fsl_dcu_drm_device *fsl_dev)
reg_num = LS1021A_LAYER_REG_NUM;
else
reg_num = VF610_LAYER_REG_NUM;
- for (i = 0; i <= fsl_dev->soc->total_layer; i++) {
- for (j = 0; j < reg_num; j++) {
- ret = regmap_write(fsl_dev->regmap,
- DCU_CTRLDESCLN(i, j), 0);
- if (ret)
- goto init_failed;
- }
+ for (i = 0; i < fsl_dev->soc->total_layer; i++) {
+ for (j = 1; j <= reg_num; j++)
+ regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(i, j), 0);
}
- ret = regmap_update_bits(fsl_dev->regmap, DCU_DCU_MODE,
- DCU_MODE_DCU_MODE_MASK,
- DCU_MODE_DCU_MODE(DCU_MODE_OFF));
- if (ret)
- goto init_failed;
- ret = regmap_write(fsl_dev->regmap, DCU_UPDATE_MODE,
- DCU_UPDATE_MODE_READREG);
- if (ret)
- goto init_failed;
+ regmap_update_bits(fsl_dev->regmap, DCU_DCU_MODE,
+ DCU_MODE_DCU_MODE_MASK,
+ DCU_MODE_DCU_MODE(DCU_MODE_OFF));
+ regmap_write(fsl_dev->regmap, DCU_UPDATE_MODE,
+ DCU_UPDATE_MODE_READREG);
return 0;
-init_failed:
- dev_err(fsl_dev->dev, "init DCU register failed\n");
- return ret;
}
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
index fca97d3fc846..e8d9337a66d8 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
@@ -28,37 +28,36 @@
#include "fsl_dcu_drm_crtc.h"
#include "fsl_dcu_drm_drv.h"
+static bool fsl_dcu_drm_is_volatile_reg(struct device *dev, unsigned int reg)
+{
+ if (reg == DCU_INT_STATUS || reg == DCU_UPDATE_MODE)
+ return true;
+
+ return false;
+}
+
static const struct regmap_config fsl_dcu_regmap_config = {
.reg_bits = 32,
.reg_stride = 4,
.val_bits = 32,
.cache_type = REGCACHE_RBTREE,
+
+ .volatile_reg = fsl_dcu_drm_is_volatile_reg,
};
static int fsl_dcu_drm_irq_init(struct drm_device *dev)
{
struct fsl_dcu_drm_device *fsl_dev = dev->dev_private;
- unsigned int value;
int ret;
ret = drm_irq_install(dev, fsl_dev->irq);
if (ret < 0)
dev_err(dev->dev, "failed to install IRQ handler\n");
- ret = regmap_write(fsl_dev->regmap, DCU_INT_STATUS, 0);
- if (ret)
- dev_err(dev->dev, "set DCU_INT_STATUS failed\n");
- ret = regmap_read(fsl_dev->regmap, DCU_INT_MASK, &value);
- if (ret)
- dev_err(dev->dev, "read DCU_INT_MASK failed\n");
- value &= DCU_INT_MASK_VBLANK;
- ret = regmap_write(fsl_dev->regmap, DCU_INT_MASK, value);
- if (ret)
- dev_err(dev->dev, "set DCU_INT_MASK failed\n");
- ret = regmap_write(fsl_dev->regmap, DCU_UPDATE_MODE,
- DCU_UPDATE_MODE_READREG);
- if (ret)
- dev_err(dev->dev, "set DCU_UPDATE_MODE failed\n");
+ regmap_write(fsl_dev->regmap, DCU_INT_STATUS, 0);
+ regmap_write(fsl_dev->regmap, DCU_INT_MASK, ~0);
+ regmap_write(fsl_dev->regmap, DCU_UPDATE_MODE,
+ DCU_UPDATE_MODE_READREG);
return ret;
}
@@ -112,10 +111,6 @@ static int fsl_dcu_unload(struct drm_device *dev)
return 0;
}
-static void fsl_dcu_drm_preclose(struct drm_device *dev, struct drm_file *file)
-{
-}
-
static irqreturn_t fsl_dcu_drm_irq(int irq, void *arg)
{
struct drm_device *dev = arg;
@@ -124,18 +119,17 @@ static irqreturn_t fsl_dcu_drm_irq(int irq, void *arg)
int ret;
ret = regmap_read(fsl_dev->regmap, DCU_INT_STATUS, &int_status);
- if (ret)
- dev_err(dev->dev, "set DCU_INT_STATUS failed\n");
+ if (ret) {
+ dev_err(dev->dev, "read DCU_INT_STATUS failed\n");
+ return IRQ_NONE;
+ }
+
if (int_status & DCU_INT_STATUS_VBLANK)
drm_handle_vblank(dev, 0);
- ret = regmap_write(fsl_dev->regmap, DCU_INT_STATUS, 0xffffffff);
- if (ret)
- dev_err(dev->dev, "set DCU_INT_STATUS failed\n");
- ret = regmap_write(fsl_dev->regmap, DCU_UPDATE_MODE,
- DCU_UPDATE_MODE_READREG);
- if (ret)
- dev_err(dev->dev, "set DCU_UPDATE_MODE failed\n");
+ regmap_write(fsl_dev->regmap, DCU_INT_STATUS, int_status);
+ regmap_write(fsl_dev->regmap, DCU_UPDATE_MODE,
+ DCU_UPDATE_MODE_READREG);
return IRQ_HANDLED;
}
@@ -144,15 +138,11 @@ static int fsl_dcu_drm_enable_vblank(struct drm_device *dev, unsigned int pipe)
{
struct fsl_dcu_drm_device *fsl_dev = dev->dev_private;
unsigned int value;
- int ret;
- ret = regmap_read(fsl_dev->regmap, DCU_INT_MASK, &value);
- if (ret)
- dev_err(dev->dev, "read DCU_INT_MASK failed\n");
+ regmap_read(fsl_dev->regmap, DCU_INT_MASK, &value);
value &= ~DCU_INT_MASK_VBLANK;
- ret = regmap_write(fsl_dev->regmap, DCU_INT_MASK, value);
- if (ret)
- dev_err(dev->dev, "set DCU_INT_MASK failed\n");
+ regmap_write(fsl_dev->regmap, DCU_INT_MASK, value);
+
return 0;
}
@@ -161,15 +151,10 @@ static void fsl_dcu_drm_disable_vblank(struct drm_device *dev,
{
struct fsl_dcu_drm_device *fsl_dev = dev->dev_private;
unsigned int value;
- int ret;
- ret = regmap_read(fsl_dev->regmap, DCU_INT_MASK, &value);
- if (ret)
- dev_err(dev->dev, "read DCU_INT_MASK failed\n");
+ regmap_read(fsl_dev->regmap, DCU_INT_MASK, &value);
value |= DCU_INT_MASK_VBLANK;
- ret = regmap_write(fsl_dev->regmap, DCU_INT_MASK, value);
- if (ret)
- dev_err(dev->dev, "set DCU_INT_MASK failed\n");
+ regmap_write(fsl_dev->regmap, DCU_INT_MASK, value);
}
static const struct file_operations fsl_dcu_drm_fops = {
@@ -191,7 +176,6 @@ static struct drm_driver fsl_dcu_drm_driver = {
| DRIVER_PRIME | DRIVER_ATOMIC,
.load = fsl_dcu_load,
.unload = fsl_dcu_unload,
- .preclose = fsl_dcu_drm_preclose,
.irq_handler = fsl_dcu_drm_irq,
.get_vblank_counter = drm_vblank_no_hw_counter,
.enable_vblank = fsl_dcu_drm_enable_vblank,
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.h b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.h
index 579b9e44e764..6413ac9e4769 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.h
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.h
@@ -133,7 +133,9 @@
#define DCU_LAYER_RLE_EN BIT(15)
#define DCU_LAYER_LUOFFS(x) ((x) << 4)
#define DCU_LAYER_BB_ON BIT(2)
-#define DCU_LAYER_AB(x) (x)
+#define DCU_LAYER_AB_NONE 0
+#define DCU_LAYER_AB_CHROMA_KEYING 1
+#define DCU_LAYER_AB_WHOLE_FRAME 2
#define DCU_LAYER_CKMAX_R(x) ((x) << 16)
#define DCU_LAYER_CKMAX_G(x) ((x) << 8)
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_kms.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_kms.c
index 0ef5959710e7..c564ec612b59 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_kms.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_kms.c
@@ -25,6 +25,8 @@ static const struct drm_mode_config_funcs fsl_dcu_drm_mode_config_funcs = {
int fsl_dcu_drm_modeset_init(struct fsl_dcu_drm_device *fsl_dev)
{
+ int ret;
+
drm_mode_config_init(fsl_dev->drm);
fsl_dev->drm->mode_config.min_width = 0;
@@ -33,11 +35,25 @@ int fsl_dcu_drm_modeset_init(struct fsl_dcu_drm_device *fsl_dev)
fsl_dev->drm->mode_config.max_height = 2047;
fsl_dev->drm->mode_config.funcs = &fsl_dcu_drm_mode_config_funcs;
- drm_kms_helper_poll_init(fsl_dev->drm);
- fsl_dcu_drm_crtc_create(fsl_dev);
- fsl_dcu_drm_encoder_create(fsl_dev, &fsl_dev->crtc);
- fsl_dcu_drm_connector_create(fsl_dev, &fsl_dev->encoder);
+ ret = fsl_dcu_drm_crtc_create(fsl_dev);
+ if (ret)
+ return ret;
+
+ ret = fsl_dcu_drm_encoder_create(fsl_dev, &fsl_dev->crtc);
+ if (ret)
+ goto fail_encoder;
+
+ ret = fsl_dcu_drm_connector_create(fsl_dev, &fsl_dev->encoder);
+ if (ret)
+ goto fail_connector;
+
drm_mode_config_reset(fsl_dev->drm);
+ drm_kms_helper_poll_init(fsl_dev->drm);
return 0;
+fail_encoder:
+ fsl_dev->crtc.funcs->destroy(&fsl_dev->crtc);
+fail_connector:
+ fsl_dev->encoder.funcs->destroy(&fsl_dev->encoder);
+ return ret;
}
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c
index 4b13cf919575..274558b3b32b 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c
@@ -41,11 +41,17 @@ static int fsl_dcu_drm_plane_atomic_check(struct drm_plane *plane,
{
struct drm_framebuffer *fb = state->fb;
+ if (!state->fb || !state->crtc)
+ return 0;
+
switch (fb->pixel_format) {
case DRM_FORMAT_RGB565:
case DRM_FORMAT_RGB888:
+ case DRM_FORMAT_XRGB8888:
case DRM_FORMAT_ARGB8888:
- case DRM_FORMAT_BGRA4444:
+ case DRM_FORMAT_XRGB4444:
+ case DRM_FORMAT_ARGB4444:
+ case DRM_FORMAT_XRGB1555:
case DRM_FORMAT_ARGB1555:
case DRM_FORMAT_YUV422:
return 0;
@@ -59,19 +65,15 @@ static void fsl_dcu_drm_plane_atomic_disable(struct drm_plane *plane,
{
struct fsl_dcu_drm_device *fsl_dev = plane->dev->dev_private;
unsigned int value;
- int index, ret;
+ int index;
index = fsl_dcu_drm_plane_index(plane);
if (index < 0)
return;
- ret = regmap_read(fsl_dev->regmap, DCU_CTRLDESCLN(index, 4), &value);
- if (ret)
- dev_err(fsl_dev->dev, "read DCU_INT_MASK failed\n");
+ regmap_read(fsl_dev->regmap, DCU_CTRLDESCLN(index, 4), &value);
value &= ~DCU_LAYER_EN;
- ret = regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(index, 4), value);
- if (ret)
- dev_err(fsl_dev->dev, "set DCU register failed\n");
+ regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(index, 4), value);
}
static void fsl_dcu_drm_plane_atomic_update(struct drm_plane *plane,
@@ -82,8 +84,8 @@ static void fsl_dcu_drm_plane_atomic_update(struct drm_plane *plane,
struct drm_plane_state *state = plane->state;
struct drm_framebuffer *fb = plane->state->fb;
struct drm_gem_cma_object *gem;
- unsigned int alpha, bpp;
- int index, ret;
+ unsigned int alpha = DCU_LAYER_AB_NONE, bpp;
+ int index;
if (!fb)
return;
@@ -97,96 +99,74 @@ static void fsl_dcu_drm_plane_atomic_update(struct drm_plane *plane,
switch (fb->pixel_format) {
case DRM_FORMAT_RGB565:
bpp = FSL_DCU_RGB565;
- alpha = 0xff;
break;
case DRM_FORMAT_RGB888:
bpp = FSL_DCU_RGB888;
- alpha = 0xff;
break;
case DRM_FORMAT_ARGB8888:
+ alpha = DCU_LAYER_AB_WHOLE_FRAME;
+ /* fall-through */
+ case DRM_FORMAT_XRGB8888:
bpp = FSL_DCU_ARGB8888;
- alpha = 0xff;
break;
- case DRM_FORMAT_BGRA4444:
+ case DRM_FORMAT_ARGB4444:
+ alpha = DCU_LAYER_AB_WHOLE_FRAME;
+ /* fall-through */
+ case DRM_FORMAT_XRGB4444:
bpp = FSL_DCU_ARGB4444;
- alpha = 0xff;
break;
case DRM_FORMAT_ARGB1555:
+ alpha = DCU_LAYER_AB_WHOLE_FRAME;
+ /* fall-through */
+ case DRM_FORMAT_XRGB1555:
bpp = FSL_DCU_ARGB1555;
- alpha = 0xff;
break;
case DRM_FORMAT_YUV422:
bpp = FSL_DCU_YUV422;
- alpha = 0xff;
break;
default:
return;
}
- ret = regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(index, 1),
- DCU_LAYER_HEIGHT(state->crtc_h) |
- DCU_LAYER_WIDTH(state->crtc_w));
- if (ret)
- goto set_failed;
- ret = regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(index, 2),
- DCU_LAYER_POSY(state->crtc_y) |
- DCU_LAYER_POSX(state->crtc_x));
- if (ret)
- goto set_failed;
- ret = regmap_write(fsl_dev->regmap,
- DCU_CTRLDESCLN(index, 3), gem->paddr);
- if (ret)
- goto set_failed;
- ret = regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(index, 4),
- DCU_LAYER_EN |
- DCU_LAYER_TRANS(alpha) |
- DCU_LAYER_BPP(bpp) |
- DCU_LAYER_AB(0));
- if (ret)
- goto set_failed;
- ret = regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(index, 5),
- DCU_LAYER_CKMAX_R(0xFF) |
- DCU_LAYER_CKMAX_G(0xFF) |
- DCU_LAYER_CKMAX_B(0xFF));
- if (ret)
- goto set_failed;
- ret = regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(index, 6),
- DCU_LAYER_CKMIN_R(0) |
- DCU_LAYER_CKMIN_G(0) |
- DCU_LAYER_CKMIN_B(0));
- if (ret)
- goto set_failed;
- ret = regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(index, 7), 0);
- if (ret)
- goto set_failed;
- ret = regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(index, 8),
- DCU_LAYER_FG_FCOLOR(0));
- if (ret)
- goto set_failed;
- ret = regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(index, 9),
- DCU_LAYER_BG_BCOLOR(0));
- if (ret)
- goto set_failed;
+ regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(index, 1),
+ DCU_LAYER_HEIGHT(state->crtc_h) |
+ DCU_LAYER_WIDTH(state->crtc_w));
+ regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(index, 2),
+ DCU_LAYER_POSY(state->crtc_y) |
+ DCU_LAYER_POSX(state->crtc_x));
+ regmap_write(fsl_dev->regmap,
+ DCU_CTRLDESCLN(index, 3), gem->paddr);
+ regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(index, 4),
+ DCU_LAYER_EN |
+ DCU_LAYER_TRANS(0xff) |
+ DCU_LAYER_BPP(bpp) |
+ alpha);
+ regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(index, 5),
+ DCU_LAYER_CKMAX_R(0xFF) |
+ DCU_LAYER_CKMAX_G(0xFF) |
+ DCU_LAYER_CKMAX_B(0xFF));
+ regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(index, 6),
+ DCU_LAYER_CKMIN_R(0) |
+ DCU_LAYER_CKMIN_G(0) |
+ DCU_LAYER_CKMIN_B(0));
+ regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(index, 7), 0);
+ regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(index, 8),
+ DCU_LAYER_FG_FCOLOR(0));
+ regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(index, 9),
+ DCU_LAYER_BG_BCOLOR(0));
+
if (!strcmp(fsl_dev->soc->name, "ls1021a")) {
- ret = regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(index, 10),
- DCU_LAYER_POST_SKIP(0) |
- DCU_LAYER_PRE_SKIP(0));
- if (ret)
- goto set_failed;
+ regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(index, 10),
+ DCU_LAYER_POST_SKIP(0) |
+ DCU_LAYER_PRE_SKIP(0));
}
- ret = regmap_update_bits(fsl_dev->regmap, DCU_DCU_MODE,
- DCU_MODE_DCU_MODE_MASK,
- DCU_MODE_DCU_MODE(DCU_MODE_NORMAL));
- if (ret)
- goto set_failed;
- ret = regmap_write(fsl_dev->regmap,
- DCU_UPDATE_MODE, DCU_UPDATE_MODE_READREG);
- if (ret)
- goto set_failed;
- return;
+ regmap_update_bits(fsl_dev->regmap, DCU_DCU_MODE,
+ DCU_MODE_DCU_MODE_MASK,
+ DCU_MODE_DCU_MODE(DCU_MODE_NORMAL));
+ regmap_write(fsl_dev->regmap,
+ DCU_UPDATE_MODE, DCU_UPDATE_MODE_READREG);
-set_failed:
- dev_err(fsl_dev->dev, "set DCU register failed\n");
+ return;
}
static void
@@ -213,6 +193,7 @@ static const struct drm_plane_helper_funcs fsl_dcu_drm_plane_helper_funcs = {
static void fsl_dcu_drm_plane_destroy(struct drm_plane *plane)
{
drm_plane_cleanup(plane);
+ kfree(plane);
}
static const struct drm_plane_funcs fsl_dcu_drm_plane_funcs = {
@@ -227,8 +208,11 @@ static const struct drm_plane_funcs fsl_dcu_drm_plane_funcs = {
static const u32 fsl_dcu_drm_plane_formats[] = {
DRM_FORMAT_RGB565,
DRM_FORMAT_RGB888,
+ DRM_FORMAT_XRGB8888,
DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_XRGB4444,
DRM_FORMAT_ARGB4444,
+ DRM_FORMAT_XRGB1555,
DRM_FORMAT_ARGB1555,
DRM_FORMAT_YUV422,
};
diff --git a/drivers/gpu/drm/gma500/cdv_intel_crt.c b/drivers/gpu/drm/gma500/cdv_intel_crt.c
index d0717a85c7ec..b837e7a92196 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_crt.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_crt.c
@@ -217,7 +217,6 @@ static int cdv_intel_crt_set_property(struct drm_connector *connector,
static const struct drm_encoder_helper_funcs cdv_intel_crt_helper_funcs = {
.dpms = cdv_intel_crt_dpms,
- .mode_fixup = gma_encoder_mode_fixup,
.prepare = gma_encoder_prepare,
.commit = gma_encoder_commit,
.mode_set = cdv_intel_crt_mode_set,
diff --git a/drivers/gpu/drm/gma500/cdv_intel_display.c b/drivers/gpu/drm/gma500/cdv_intel_display.c
index 6126546295e9..17db4b4749d5 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_display.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_display.c
@@ -116,7 +116,7 @@ static const struct gma_limit_t cdv_intel_limits[] = {
.p1 = {.min = 1, .max = 10},
.p2 = {.dot_limit = 225000, .p2_slow = 10, .p2_fast = 10},
.find_pll = cdv_intel_find_dp_pll,
- }
+ }
};
#define _wait_for(COND, MS, W) ({ \
@@ -245,7 +245,7 @@ cdv_dpll_set_clock_cdv(struct drm_device *dev, struct drm_crtc *crtc,
/* We don't know what the other fields of these regs are, so
* leave them in place.
*/
- /*
+ /*
* The BIT 14:13 of 0x8010/0x8030 is used to select the ref clk
* for the pipe A/B. Display spec 1.06 has wrong definition.
* Correct definition is like below:
@@ -256,7 +256,7 @@ cdv_dpll_set_clock_cdv(struct drm_device *dev, struct drm_crtc *crtc,
*
* if DPLLA sets 01 and DPLLB sets 02, both use clk from DPLLA
*
- */
+ */
ret = cdv_sb_read(dev, ref_sfr, &ref_value);
if (ret)
return ret;
@@ -646,7 +646,7 @@ static int cdv_intel_crtc_mode_set(struct drm_crtc *crtc,
* for DP/eDP. When using SSC clock, the ref clk is 100MHz.Otherwise
* it will be 27MHz. From the VBIOS code it seems that the pipe A choose
* 27MHz for DP/eDP while the Pipe B chooses the 100MHz.
- */
+ */
if (pipe == 0)
refclk = 27000;
else
@@ -659,7 +659,7 @@ static int cdv_intel_crtc_mode_set(struct drm_crtc *crtc,
}
drm_mode_debug_printmodeline(adjusted_mode);
-
+
limit = gma_crtc->clock_funcs->limit(crtc, refclk);
ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk,
@@ -721,7 +721,7 @@ static int cdv_intel_crtc_mode_set(struct drm_crtc *crtc,
pipeconf |= PIPE_6BPC;
} else
pipeconf |= PIPE_8BPC;
-
+
/* Set up the display plane register */
dspcntr = DISPPLANE_GAMMA_ENABLE;
@@ -974,7 +974,6 @@ struct drm_display_mode *cdv_intel_crtc_mode_get(struct drm_device *dev,
const struct drm_crtc_helper_funcs cdv_intel_helper_funcs = {
.dpms = gma_crtc_dpms,
- .mode_fixup = gma_crtc_mode_fixup,
.mode_set = cdv_intel_crtc_mode_set,
.mode_set_base = gma_pipe_set_base,
.prepare = gma_crtc_prepare,
diff --git a/drivers/gpu/drm/gma500/cdv_intel_dp.c b/drivers/gpu/drm/gma500/cdv_intel_dp.c
index 7bb1f1aff932..c52f9adf5e04 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_dp.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_dp.c
@@ -220,7 +220,7 @@ i2c_dp_aux_prepare_bus(struct i2c_adapter *adapter)
* FIXME: This is the old dp aux helper, gma500 is the last driver that needs to
* be ported over to the new helper code in drm_dp_helper.c like i915 or radeon.
*/
-static int __deprecated
+static int
i2c_dp_aux_add_bus(struct i2c_adapter *adapter)
{
int error;
diff --git a/drivers/gpu/drm/gma500/cdv_intel_hdmi.c b/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
index ddf2d7700759..28f9d90988ff 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
@@ -255,7 +255,6 @@ static void cdv_hdmi_destroy(struct drm_connector *connector)
static const struct drm_encoder_helper_funcs cdv_hdmi_helper_funcs = {
.dpms = cdv_hdmi_dpms,
- .mode_fixup = gma_encoder_mode_fixup,
.prepare = gma_encoder_prepare,
.mode_set = cdv_hdmi_mode_set,
.commit = gma_encoder_commit,
diff --git a/drivers/gpu/drm/gma500/framebuffer.c b/drivers/gpu/drm/gma500/framebuffer.c
index cb95765050cc..033d894d030e 100644
--- a/drivers/gpu/drm/gma500/framebuffer.c
+++ b/drivers/gpu/drm/gma500/framebuffer.c
@@ -674,29 +674,17 @@ static const struct drm_mode_config_funcs psb_mode_funcs = {
.output_poll_changed = psbfb_output_poll_changed,
};
-static int psb_create_backlight_property(struct drm_device *dev)
-{
- struct drm_psb_private *dev_priv = dev->dev_private;
- struct drm_property *backlight;
-
- if (dev_priv->backlight_property)
- return 0;
-
- backlight = drm_property_create_range(dev, 0, "backlight", 0, 100);
-
- dev_priv->backlight_property = backlight;
-
- return 0;
-}
-
static void psb_setup_outputs(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = dev->dev_private;
struct drm_connector *connector;
drm_mode_create_scaling_mode_property(dev);
- psb_create_backlight_property(dev);
+ /* It is ok for this to fail - we just don't get backlight control */
+ if (!dev_priv->backlight_property)
+ dev_priv->backlight_property = drm_property_create_range(dev, 0,
+ "backlight", 0, 100);
dev_priv->ops->output_init(dev);
list_for_each_entry(connector, &dev->mode_config.connector_list,
diff --git a/drivers/gpu/drm/gma500/gma_display.c b/drivers/gpu/drm/gma500/gma_display.c
index ff17af4cfc64..5bf765de2517 100644
--- a/drivers/gpu/drm/gma500/gma_display.c
+++ b/drivers/gpu/drm/gma500/gma_display.c
@@ -478,20 +478,6 @@ int gma_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
return 0;
}
-bool gma_encoder_mode_fixup(struct drm_encoder *encoder,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- return true;
-}
-
-bool gma_crtc_mode_fixup(struct drm_crtc *crtc,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- return true;
-}
-
void gma_crtc_prepare(struct drm_crtc *crtc)
{
const struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
diff --git a/drivers/gpu/drm/gma500/gma_display.h b/drivers/gpu/drm/gma500/gma_display.h
index ed569d8a6af3..b2491c65f053 100644
--- a/drivers/gpu/drm/gma500/gma_display.h
+++ b/drivers/gpu/drm/gma500/gma_display.h
@@ -75,9 +75,6 @@ extern void gma_crtc_load_lut(struct drm_crtc *crtc);
extern void gma_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
u16 *blue, u32 start, u32 size);
extern void gma_crtc_dpms(struct drm_crtc *crtc, int mode);
-extern bool gma_crtc_mode_fixup(struct drm_crtc *crtc,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode);
extern void gma_crtc_prepare(struct drm_crtc *crtc);
extern void gma_crtc_commit(struct drm_crtc *crtc);
extern void gma_crtc_disable(struct drm_crtc *crtc);
@@ -90,9 +87,6 @@ extern void gma_crtc_restore(struct drm_crtc *crtc);
extern void gma_encoder_prepare(struct drm_encoder *encoder);
extern void gma_encoder_commit(struct drm_encoder *encoder);
extern void gma_encoder_destroy(struct drm_encoder *encoder);
-extern bool gma_encoder_mode_fixup(struct drm_encoder *encoder,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode);
/* Common clock related functions */
extern const struct gma_limit_t *gma_limit(struct drm_crtc *crtc, int refclk);
diff --git a/drivers/gpu/drm/gma500/intel_gmbus.c b/drivers/gpu/drm/gma500/intel_gmbus.c
index 566d330aaeea..e7e22187c539 100644
--- a/drivers/gpu/drm/gma500/intel_gmbus.c
+++ b/drivers/gpu/drm/gma500/intel_gmbus.c
@@ -436,7 +436,7 @@ int gma_intel_setup_gmbus(struct drm_device *dev)
return 0;
err:
- while (--i) {
+ while (i--) {
struct intel_gmbus *bus = &dev_priv->gmbus[i];
i2c_del_adapter(&bus->adapter);
}
diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_output.c b/drivers/gpu/drm/gma500/mdfld_dsi_output.c
index d758f4cc6805..907cb51795c3 100644
--- a/drivers/gpu/drm/gma500/mdfld_dsi_output.c
+++ b/drivers/gpu/drm/gma500/mdfld_dsi_output.c
@@ -382,16 +382,6 @@ static int mdfld_dsi_connector_mode_valid(struct drm_connector *connector,
return MODE_OK;
}
-static void mdfld_dsi_connector_dpms(struct drm_connector *connector, int mode)
-{
- if (mode == connector->dpms)
- return;
-
- /*first, execute dpms*/
-
- drm_helper_connector_dpms(connector, mode);
-}
-
static struct drm_encoder *mdfld_dsi_connector_best_encoder(
struct drm_connector *connector)
{
@@ -404,7 +394,7 @@ static struct drm_encoder *mdfld_dsi_connector_best_encoder(
/*DSI connector funcs*/
static const struct drm_connector_funcs mdfld_dsi_connector_funcs = {
- .dpms = /*drm_helper_connector_dpms*/mdfld_dsi_connector_dpms,
+ .dpms = drm_helper_connector_dpms,
.detect = mdfld_dsi_connector_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.set_property = mdfld_dsi_connector_set_property,
diff --git a/drivers/gpu/drm/gma500/mdfld_intel_display.c b/drivers/gpu/drm/gma500/mdfld_intel_display.c
index acd38344b302..92e3f93ee682 100644
--- a/drivers/gpu/drm/gma500/mdfld_intel_display.c
+++ b/drivers/gpu/drm/gma500/mdfld_intel_display.c
@@ -1026,10 +1026,8 @@ mrst_crtc_mode_set_exit:
const struct drm_crtc_helper_funcs mdfld_helper_funcs = {
.dpms = mdfld_crtc_dpms,
- .mode_fixup = gma_crtc_mode_fixup,
.mode_set = mdfld_crtc_mode_set,
.mode_set_base = mdfld__intel_pipe_set_base,
.prepare = gma_crtc_prepare,
.commit = gma_crtc_commit,
};
-
diff --git a/drivers/gpu/drm/gma500/oaktrail_crtc.c b/drivers/gpu/drm/gma500/oaktrail_crtc.c
index 1048f0c7c6ce..da9fd34b9550 100644
--- a/drivers/gpu/drm/gma500/oaktrail_crtc.c
+++ b/drivers/gpu/drm/gma500/oaktrail_crtc.c
@@ -657,7 +657,6 @@ pipe_set_base_exit:
const struct drm_crtc_helper_funcs oaktrail_helper_funcs = {
.dpms = oaktrail_crtc_dpms,
- .mode_fixup = gma_crtc_mode_fixup,
.mode_set = oaktrail_crtc_mode_set,
.mode_set_base = oaktrail_pipe_set_base,
.prepare = gma_crtc_prepare,
diff --git a/drivers/gpu/drm/gma500/oaktrail_hdmi.c b/drivers/gpu/drm/gma500/oaktrail_hdmi.c
index 2d18499d6060..8b2eb32ee988 100644
--- a/drivers/gpu/drm/gma500/oaktrail_hdmi.c
+++ b/drivers/gpu/drm/gma500/oaktrail_hdmi.c
@@ -601,7 +601,6 @@ static void oaktrail_hdmi_destroy(struct drm_connector *connector)
static const struct drm_encoder_helper_funcs oaktrail_hdmi_helper_funcs = {
.dpms = oaktrail_hdmi_dpms,
- .mode_fixup = gma_encoder_mode_fixup,
.prepare = gma_encoder_prepare,
.mode_set = oaktrail_hdmi_mode_set,
.commit = gma_encoder_commit,
diff --git a/drivers/gpu/drm/gma500/psb_drv.c b/drivers/gpu/drm/gma500/psb_drv.c
index 92e7e5795398..4e1c6850520e 100644
--- a/drivers/gpu/drm/gma500/psb_drv.c
+++ b/drivers/gpu/drm/gma500/psb_drv.c
@@ -442,14 +442,6 @@ static long psb_unlocked_ioctl(struct file *filp, unsigned int cmd,
/* FIXME: do we need to wrap the other side of this */
}
-/*
- * When a client dies:
- * - Check for and clean up flipped page state
- */
-static void psb_driver_preclose(struct drm_device *dev, struct drm_file *priv)
-{
-}
-
static int psb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
return drm_get_pci_dev(pdev, ent, &driver);
@@ -495,7 +487,6 @@ static struct drm_driver driver = {
.load = psb_driver_load,
.unload = psb_driver_unload,
.lastclose = psb_driver_lastclose,
- .preclose = psb_driver_preclose,
.set_busid = drm_pci_set_busid,
.num_ioctls = ARRAY_SIZE(psb_ioctls),
diff --git a/drivers/gpu/drm/gma500/psb_intel_display.c b/drivers/gpu/drm/gma500/psb_intel_display.c
index dcdbc37e55e1..398015be87e4 100644
--- a/drivers/gpu/drm/gma500/psb_intel_display.c
+++ b/drivers/gpu/drm/gma500/psb_intel_display.c
@@ -430,7 +430,6 @@ struct drm_display_mode *psb_intel_crtc_mode_get(struct drm_device *dev,
const struct drm_crtc_helper_funcs psb_intel_helper_funcs = {
.dpms = gma_crtc_dpms,
- .mode_fixup = gma_crtc_mode_fixup,
.mode_set = psb_intel_crtc_mode_set,
.mode_set_base = gma_pipe_set_base,
.prepare = gma_crtc_prepare,
diff --git a/drivers/gpu/drm/i2c/ch7006_drv.c b/drivers/gpu/drm/i2c/ch7006_drv.c
index 90db5f4dcce5..0594c45f7164 100644
--- a/drivers/gpu/drm/i2c/ch7006_drv.c
+++ b/drivers/gpu/drm/i2c/ch7006_drv.c
@@ -253,6 +253,8 @@ static int ch7006_encoder_create_resources(struct drm_encoder *encoder,
drm_mode_create_tv_properties(dev, NUM_TV_NORMS, ch7006_tv_norm_names);
priv->scale_property = drm_property_create_range(dev, 0, "scale", 0, 2);
+ if (!priv->scale_property)
+ return -ENOMEM;
drm_object_attach_property(&connector->base, conf->tv_select_subconnector_property,
priv->select_subconnector);
diff --git a/drivers/gpu/drm/i2c/sil164_drv.c b/drivers/gpu/drm/i2c/sil164_drv.c
index c400428f6c8c..db0b03fb0ff1 100644
--- a/drivers/gpu/drm/i2c/sil164_drv.c
+++ b/drivers/gpu/drm/i2c/sil164_drv.c
@@ -252,14 +252,6 @@ sil164_encoder_restore(struct drm_encoder *encoder)
priv->saved_slave_state);
}
-static bool
-sil164_encoder_mode_fixup(struct drm_encoder *encoder,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- return true;
-}
-
static int
sil164_encoder_mode_valid(struct drm_encoder *encoder,
struct drm_display_mode *mode)
@@ -347,7 +339,6 @@ static const struct drm_encoder_slave_funcs sil164_encoder_funcs = {
.dpms = sil164_encoder_dpms,
.save = sil164_encoder_save,
.restore = sil164_encoder_restore,
- .mode_fixup = sil164_encoder_mode_fixup,
.mode_valid = sil164_encoder_mode_valid,
.mode_set = sil164_encoder_mode_set,
.detect = sil164_encoder_detect,
diff --git a/drivers/gpu/drm/i2c/tda998x_drv.c b/drivers/gpu/drm/i2c/tda998x_drv.c
index 34e38749a817..f4315bc8d471 100644
--- a/drivers/gpu/drm/i2c/tda998x_drv.c
+++ b/drivers/gpu/drm/i2c/tda998x_drv.c
@@ -856,14 +856,6 @@ static void tda998x_encoder_dpms(struct drm_encoder *encoder, int mode)
priv->dpms = mode;
}
-static bool
-tda998x_encoder_mode_fixup(struct drm_encoder *encoder,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- return true;
-}
-
static int tda998x_connector_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
@@ -1343,7 +1335,6 @@ static void tda998x_encoder_commit(struct drm_encoder *encoder)
static const struct drm_encoder_helper_funcs tda998x_encoder_helper_funcs = {
.dpms = tda998x_encoder_dpms,
- .mode_fixup = tda998x_encoder_mode_fixup,
.prepare = tda998x_encoder_prepare,
.commit = tda998x_encoder_commit,
.mode_set = tda998x_encoder_mode_set,
@@ -1382,8 +1373,16 @@ static void tda998x_connector_destroy(struct drm_connector *connector)
drm_connector_cleanup(connector);
}
+static int tda998x_connector_dpms(struct drm_connector *connector, int mode)
+{
+ if (drm_core_check_feature(connector->dev, DRIVER_ATOMIC))
+ return drm_atomic_helper_connector_dpms(connector, mode);
+ else
+ return drm_helper_connector_dpms(connector, mode);
+}
+
static const struct drm_connector_funcs tda998x_connector_funcs = {
- .dpms = drm_atomic_helper_connector_dpms,
+ .dpms = tda998x_connector_dpms,
.reset = drm_atomic_helper_connector_reset,
.fill_modes = drm_helper_probe_single_connector_modes,
.detect = tda998x_connector_detect,
diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig
index 051eab33e4c7..20a5d0455e19 100644
--- a/drivers/gpu/drm/i915/Kconfig
+++ b/drivers/gpu/drm/i915/Kconfig
@@ -2,9 +2,7 @@ config DRM_I915
tristate "Intel 8xx/9xx/G3x/G4x/HD Graphics"
depends on DRM
depends on X86 && PCI
- depends on (AGP || AGP=n)
select INTEL_GTT
- select AGP_INTEL if AGP
select INTERVAL_TREE
# we need shmfs for the swappable backing store, and in particular
# the shmem_readpage() which depends upon tmpfs
@@ -47,3 +45,14 @@ config DRM_I915_PRELIMINARY_HW_SUPPORT
option changes the default for that module option.
If in doubt, say "N".
+
+config DRM_I915_USERPTR
+ bool "Always enable userptr support"
+ depends on DRM_I915
+ select MMU_NOTIFIER
+ default y
+ help
+ This option selects CONFIG_MMU_NOTIFIER if it isn't already
+ selected to enabled full userptr support.
+
+ If in doubt, say "Y".
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index cf39ed3133d6..e3f4c725a1c6 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -117,9 +117,8 @@ static u64 i915_gem_obj_total_ggtt_size(struct drm_i915_gem_object *obj)
u64 size = 0;
struct i915_vma *vma;
- list_for_each_entry(vma, &obj->vma_list, vma_link) {
- if (i915_is_ggtt(vma->vm) &&
- drm_mm_node_allocated(&vma->node))
+ list_for_each_entry(vma, &obj->vma_list, obj_link) {
+ if (vma->is_ggtt && drm_mm_node_allocated(&vma->node))
size += vma->node.size;
}
@@ -155,7 +154,7 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
obj->madv == I915_MADV_DONTNEED ? " purgeable" : "");
if (obj->base.name)
seq_printf(m, " (name: %d)", obj->base.name);
- list_for_each_entry(vma, &obj->vma_list, vma_link) {
+ list_for_each_entry(vma, &obj->vma_list, obj_link) {
if (vma->pin_count > 0)
pin_count++;
}
@@ -164,14 +163,13 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
seq_printf(m, " (display)");
if (obj->fence_reg != I915_FENCE_REG_NONE)
seq_printf(m, " (fence: %d)", obj->fence_reg);
- list_for_each_entry(vma, &obj->vma_list, vma_link) {
+ list_for_each_entry(vma, &obj->vma_list, obj_link) {
seq_printf(m, " (%sgtt offset: %08llx, size: %08llx",
- i915_is_ggtt(vma->vm) ? "g" : "pp",
+ vma->is_ggtt ? "g" : "pp",
vma->node.start, vma->node.size);
- if (i915_is_ggtt(vma->vm))
- seq_printf(m, ", type: %u)", vma->ggtt_view.type);
- else
- seq_puts(m, ")");
+ if (vma->is_ggtt)
+ seq_printf(m, ", type: %u", vma->ggtt_view.type);
+ seq_puts(m, ")");
}
if (obj->stolen)
seq_printf(m, " (stolen: %08llx)", obj->stolen->start);
@@ -230,7 +228,7 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
}
total_obj_size = total_gtt_size = count = 0;
- list_for_each_entry(vma, head, mm_list) {
+ list_for_each_entry(vma, head, vm_link) {
seq_printf(m, " ");
describe_obj(m, vma->obj);
seq_printf(m, "\n");
@@ -342,13 +340,13 @@ static int per_file_stats(int id, void *ptr, void *data)
stats->shared += obj->base.size;
if (USES_FULL_PPGTT(obj->base.dev)) {
- list_for_each_entry(vma, &obj->vma_list, vma_link) {
+ list_for_each_entry(vma, &obj->vma_list, obj_link) {
struct i915_hw_ppgtt *ppgtt;
if (!drm_mm_node_allocated(&vma->node))
continue;
- if (i915_is_ggtt(vma->vm)) {
+ if (vma->is_ggtt) {
stats->global += obj->base.size;
continue;
}
@@ -454,12 +452,12 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
count, mappable_count, size, mappable_size);
size = count = mappable_size = mappable_count = 0;
- count_vmas(&vm->active_list, mm_list);
+ count_vmas(&vm->active_list, vm_link);
seq_printf(m, " %u [%u] active objects, %llu [%llu] bytes\n",
count, mappable_count, size, mappable_size);
size = count = mappable_size = mappable_count = 0;
- count_vmas(&vm->inactive_list, mm_list);
+ count_vmas(&vm->inactive_list, vm_link);
seq_printf(m, " %u [%u] inactive objects, %llu [%llu] bytes\n",
count, mappable_count, size, mappable_size);
@@ -1336,7 +1334,8 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
struct intel_engine_cs *ring;
u64 acthd[I915_NUM_RINGS];
u32 seqno[I915_NUM_RINGS];
- int i;
+ u32 instdone[I915_NUM_INSTDONE_REG];
+ int i, j;
if (!i915.enable_hangcheck) {
seq_printf(m, "Hangcheck disabled\n");
@@ -1350,6 +1349,8 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
acthd[i] = intel_ring_get_active_head(ring);
}
+ i915_get_extra_instdone(dev, instdone);
+
intel_runtime_pm_put(dev_priv);
if (delayed_work_pending(&dev_priv->gpu_error.hangcheck_work)) {
@@ -1370,6 +1371,21 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
(long long)ring->hangcheck.max_acthd);
seq_printf(m, "\tscore = %d\n", ring->hangcheck.score);
seq_printf(m, "\taction = %d\n", ring->hangcheck.action);
+
+ if (ring->id == RCS) {
+ seq_puts(m, "\tinstdone read =");
+
+ for (j = 0; j < I915_NUM_INSTDONE_REG; j++)
+ seq_printf(m, " 0x%08x", instdone[j]);
+
+ seq_puts(m, "\n\tinstdone accu =");
+
+ for (j = 0; j < I915_NUM_INSTDONE_REG; j++)
+ seq_printf(m, " 0x%08x",
+ ring->hangcheck.instdone[j]);
+
+ seq_puts(m, "\n");
+ }
}
return 0;
@@ -1947,11 +1963,8 @@ static int i915_context_status(struct seq_file *m, void *unused)
seq_puts(m, "HW context ");
describe_ctx(m, ctx);
- for_each_ring(ring, dev_priv, i) {
- if (ring->default_context == ctx)
- seq_printf(m, "(default context %s) ",
- ring->name);
- }
+ if (ctx == dev_priv->kernel_context)
+ seq_printf(m, "(kernel context) ");
if (i915.enable_execlists) {
seq_putc(m, '\n');
@@ -1981,12 +1994,13 @@ static int i915_context_status(struct seq_file *m, void *unused)
}
static void i915_dump_lrc_obj(struct seq_file *m,
- struct intel_engine_cs *ring,
- struct drm_i915_gem_object *ctx_obj)
+ struct intel_context *ctx,
+ struct intel_engine_cs *ring)
{
struct page *page;
uint32_t *reg_state;
int j;
+ struct drm_i915_gem_object *ctx_obj = ctx->engine[ring->id].state;
unsigned long ggtt_offset = 0;
if (ctx_obj == NULL) {
@@ -1996,7 +2010,7 @@ static void i915_dump_lrc_obj(struct seq_file *m,
}
seq_printf(m, "CONTEXT: %s %u\n", ring->name,
- intel_execlists_ctx_id(ctx_obj));
+ intel_execlists_ctx_id(ctx, ring));
if (!i915_gem_obj_ggtt_bound(ctx_obj))
seq_puts(m, "\tNot bound in GGTT\n");
@@ -2042,13 +2056,10 @@ static int i915_dump_lrc(struct seq_file *m, void *unused)
if (ret)
return ret;
- list_for_each_entry(ctx, &dev_priv->context_list, link) {
- for_each_ring(ring, dev_priv, i) {
- if (ring->default_context != ctx)
- i915_dump_lrc_obj(m, ring,
- ctx->engine[i].state);
- }
- }
+ list_for_each_entry(ctx, &dev_priv->context_list, link)
+ if (ctx != dev_priv->kernel_context)
+ for_each_ring(ring, dev_priv, i)
+ i915_dump_lrc_obj(m, ctx, ring);
mutex_unlock(&dev->struct_mutex);
@@ -2097,13 +2108,13 @@ static int i915_execlists(struct seq_file *m, void *data)
seq_printf(m, "\tStatus pointer: 0x%08X\n", status_pointer);
read_pointer = ring->next_context_status_buffer;
- write_pointer = status_pointer & 0x07;
+ write_pointer = GEN8_CSB_WRITE_PTR(status_pointer);
if (read_pointer > write_pointer)
- write_pointer += 6;
+ write_pointer += GEN8_CSB_ENTRIES;
seq_printf(m, "\tRead pointer: 0x%08X, write pointer 0x%08X\n",
read_pointer, write_pointer);
- for (i = 0; i < 6; i++) {
+ for (i = 0; i < GEN8_CSB_ENTRIES; i++) {
status = I915_READ(RING_CONTEXT_STATUS_BUF_LO(ring, i));
ctx_id = I915_READ(RING_CONTEXT_STATUS_BUF_HI(ring, i));
@@ -2120,11 +2131,8 @@ static int i915_execlists(struct seq_file *m, void *data)
seq_printf(m, "\t%d requests in queue\n", count);
if (head_req) {
- struct drm_i915_gem_object *ctx_obj;
-
- ctx_obj = head_req->ctx->engine[ring_id].state;
seq_printf(m, "\tHead request id: %u\n",
- intel_execlists_ctx_id(ctx_obj));
+ intel_execlists_ctx_id(head_req->ctx, ring));
seq_printf(m, "\tHead request tail: %u\n",
head_req->tail);
}
@@ -2458,9 +2466,9 @@ static void i915_guc_client_info(struct seq_file *m,
for_each_ring(ring, dev_priv, i) {
seq_printf(m, "\tSubmissions: %llu %s\n",
- client->submissions[i],
+ client->submissions[ring->guc_id],
ring->name);
- tot += client->submissions[i];
+ tot += client->submissions[ring->guc_id];
}
seq_printf(m, "\tTotal: %llu\n", tot);
}
@@ -2497,10 +2505,10 @@ static int i915_guc_info(struct seq_file *m, void *data)
seq_printf(m, "\nGuC submissions:\n");
for_each_ring(ring, dev_priv, i) {
- seq_printf(m, "\t%-24s: %10llu, last seqno 0x%08x %9d\n",
- ring->name, guc.submissions[i],
- guc.last_seqno[i], guc.last_seqno[i]);
- total += guc.submissions[i];
+ seq_printf(m, "\t%-24s: %10llu, last seqno 0x%08x\n",
+ ring->name, guc.submissions[ring->guc_id],
+ guc.last_seqno[ring->guc_id]);
+ total += guc.submissions[ring->guc_id];
}
seq_printf(m, "\t%s: %llu\n", "Total", total);
@@ -2578,6 +2586,10 @@ static int i915_edp_psr_status(struct seq_file *m, void *data)
enabled = true;
}
}
+
+ seq_printf(m, "Main link in standby mode: %s\n",
+ yesno(dev_priv->psr.link_standby));
+
seq_printf(m, "HW Enabled & Active bit: %s", yesno(enabled));
if (!HAS_DDI(dev))
@@ -2860,20 +2872,6 @@ static void intel_dp_info(struct seq_file *m,
intel_panel_info(m, &intel_connector->panel);
}
-static void intel_dp_mst_info(struct seq_file *m,
- struct intel_connector *intel_connector)
-{
- struct intel_encoder *intel_encoder = intel_connector->encoder;
- struct intel_dp_mst_encoder *intel_mst =
- enc_to_mst(&intel_encoder->base);
- struct intel_digital_port *intel_dig_port = intel_mst->primary;
- struct intel_dp *intel_dp = &intel_dig_port->dp;
- bool has_audio = drm_dp_mst_port_has_audio(&intel_dp->mst_mgr,
- intel_connector->port);
-
- seq_printf(m, "\taudio support: %s\n", yesno(has_audio));
-}
-
static void intel_hdmi_info(struct seq_file *m,
struct intel_connector *intel_connector)
{
@@ -2917,8 +2915,6 @@ static void intel_connector_info(struct seq_file *m,
intel_hdmi_info(m, intel_connector);
else if (intel_encoder->type == INTEL_OUTPUT_LVDS)
intel_lvds_info(m, intel_connector);
- else if (intel_encoder->type == INTEL_OUTPUT_DP_MST)
- intel_dp_mst_info(m, intel_connector);
}
seq_printf(m, "\tmodes:\n");
@@ -3216,9 +3212,11 @@ static int i915_wa_registers(struct seq_file *m, void *unused)
{
int i;
int ret;
+ struct intel_engine_cs *ring;
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct i915_workarounds *workarounds = &dev_priv->workarounds;
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
@@ -3226,15 +3224,18 @@ static int i915_wa_registers(struct seq_file *m, void *unused)
intel_runtime_pm_get(dev_priv);
- seq_printf(m, "Workarounds applied: %d\n", dev_priv->workarounds.count);
- for (i = 0; i < dev_priv->workarounds.count; ++i) {
+ seq_printf(m, "Workarounds applied: %d\n", workarounds->count);
+ for_each_ring(ring, dev_priv, i)
+ seq_printf(m, "HW whitelist count for %s: %d\n",
+ ring->name, workarounds->hw_whitelist_count[i]);
+ for (i = 0; i < workarounds->count; ++i) {
i915_reg_t addr;
u32 mask, value, read;
bool ok;
- addr = dev_priv->workarounds.reg[i].addr;
- mask = dev_priv->workarounds.reg[i].mask;
- value = dev_priv->workarounds.reg[i].value;
+ addr = workarounds->reg[i].addr;
+ mask = workarounds->reg[i].mask;
+ value = workarounds->reg[i].value;
read = I915_READ(addr);
ok = (value & mask) == (read & mask);
seq_printf(m, "0x%X: 0x%08X, mask: 0x%08X, read: 0x%08x, status: %s\n",
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index d70d96fe553b..1c6d227aae7c 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -391,20 +391,13 @@ static int i915_load_modeset_init(struct drm_device *dev)
if (ret)
goto cleanup_vga_client;
- /* Initialise stolen first so that we may reserve preallocated
- * objects for the BIOS to KMS transition.
- */
- ret = i915_gem_init_stolen(dev);
- if (ret)
- goto cleanup_vga_switcheroo;
-
intel_power_domains_init_hw(dev_priv, false);
intel_csr_ucode_init(dev_priv);
ret = intel_irq_install(dev_priv);
if (ret)
- goto cleanup_gem_stolen;
+ goto cleanup_csr;
intel_setup_gmbus(dev);
@@ -458,9 +451,8 @@ cleanup_irq:
intel_guc_ucode_fini(dev);
drm_irq_uninstall(dev);
intel_teardown_gmbus(dev);
-cleanup_gem_stolen:
- i915_gem_cleanup_stolen(dev);
-cleanup_vga_switcheroo:
+cleanup_csr:
+ intel_csr_ucode_fini(dev_priv);
vga_switcheroo_unregister_client(dev->pdev);
cleanup_vga_client:
vga_client_register(dev->pdev, NULL, NULL, NULL);
@@ -816,7 +808,41 @@ static void intel_device_info_runtime_init(struct drm_device *dev)
!(sfuse_strap & SFUSE_STRAP_FUSE_LOCK))) {
DRM_INFO("Display fused off, disabling\n");
info->num_pipes = 0;
+ } else if (fuse_strap & IVB_PIPE_C_DISABLE) {
+ DRM_INFO("PipeC fused off\n");
+ info->num_pipes -= 1;
}
+ } else if (info->num_pipes > 0 && INTEL_INFO(dev)->gen == 9) {
+ u32 dfsm = I915_READ(SKL_DFSM);
+ u8 disabled_mask = 0;
+ bool invalid;
+ int num_bits;
+
+ if (dfsm & SKL_DFSM_PIPE_A_DISABLE)
+ disabled_mask |= BIT(PIPE_A);
+ if (dfsm & SKL_DFSM_PIPE_B_DISABLE)
+ disabled_mask |= BIT(PIPE_B);
+ if (dfsm & SKL_DFSM_PIPE_C_DISABLE)
+ disabled_mask |= BIT(PIPE_C);
+
+ num_bits = hweight8(disabled_mask);
+
+ switch (disabled_mask) {
+ case BIT(PIPE_A):
+ case BIT(PIPE_B):
+ case BIT(PIPE_A) | BIT(PIPE_B):
+ case BIT(PIPE_A) | BIT(PIPE_C):
+ invalid = true;
+ break;
+ default:
+ invalid = false;
+ }
+
+ if (num_bits > info->num_pipes || invalid)
+ DRM_ERROR("invalid pipe fuse configuration: 0x%x\n",
+ disabled_mask);
+ else
+ info->num_pipes -= num_bits;
}
/* Initialize slice/subslice/EU info */
@@ -855,6 +881,94 @@ static void intel_init_dpio(struct drm_i915_private *dev_priv)
}
}
+static int i915_workqueues_init(struct drm_i915_private *dev_priv)
+{
+ /*
+ * The i915 workqueue is primarily used for batched retirement of
+ * requests (and thus managing bo) once the task has been completed
+ * by the GPU. i915_gem_retire_requests() is called directly when we
+ * need high-priority retirement, such as waiting for an explicit
+ * bo.
+ *
+ * It is also used for periodic low-priority events, such as
+ * idle-timers and recording error state.
+ *
+ * All tasks on the workqueue are expected to acquire the dev mutex
+ * so there is no point in running more than one instance of the
+ * workqueue at any time. Use an ordered one.
+ */
+ dev_priv->wq = alloc_ordered_workqueue("i915", 0);
+ if (dev_priv->wq == NULL)
+ goto out_err;
+
+ dev_priv->hotplug.dp_wq = alloc_ordered_workqueue("i915-dp", 0);
+ if (dev_priv->hotplug.dp_wq == NULL)
+ goto out_free_wq;
+
+ dev_priv->gpu_error.hangcheck_wq =
+ alloc_ordered_workqueue("i915-hangcheck", 0);
+ if (dev_priv->gpu_error.hangcheck_wq == NULL)
+ goto out_free_dp_wq;
+
+ return 0;
+
+out_free_dp_wq:
+ destroy_workqueue(dev_priv->hotplug.dp_wq);
+out_free_wq:
+ destroy_workqueue(dev_priv->wq);
+out_err:
+ DRM_ERROR("Failed to allocate workqueues.\n");
+
+ return -ENOMEM;
+}
+
+static void i915_workqueues_cleanup(struct drm_i915_private *dev_priv)
+{
+ destroy_workqueue(dev_priv->gpu_error.hangcheck_wq);
+ destroy_workqueue(dev_priv->hotplug.dp_wq);
+ destroy_workqueue(dev_priv->wq);
+}
+
+static int i915_mmio_setup(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ int mmio_bar;
+ int mmio_size;
+
+ mmio_bar = IS_GEN2(dev) ? 1 : 0;
+ /*
+ * Before gen4, the registers and the GTT are behind different BARs.
+ * However, from gen4 onwards, the registers and the GTT are shared
+ * in the same BAR, so we want to restrict this ioremap from
+ * clobbering the GTT which we want ioremap_wc instead. Fortunately,
+ * the register BAR remains the same size for all the earlier
+ * generations up to Ironlake.
+ */
+ if (INTEL_INFO(dev)->gen < 5)
+ mmio_size = 512 * 1024;
+ else
+ mmio_size = 2 * 1024 * 1024;
+ dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, mmio_size);
+ if (dev_priv->regs == NULL) {
+ DRM_ERROR("failed to map registers\n");
+
+ return -EIO;
+ }
+
+ /* Try to make sure MCHBAR is enabled before poking at it */
+ intel_setup_mchbar(dev);
+
+ return 0;
+}
+
+static void i915_mmio_cleanup(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = to_i915(dev);
+
+ intel_teardown_mchbar(dev);
+ pci_iounmap(dev->pdev, dev_priv->regs);
+}
+
/**
* i915_driver_load - setup chip and create an initial config
* @dev: DRM device
@@ -870,7 +984,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
{
struct drm_i915_private *dev_priv;
struct intel_device_info *info, *device_info;
- int ret = 0, mmio_bar, mmio_size;
+ int ret = 0;
uint32_t aperture_size;
info = (struct intel_device_info *) flags;
@@ -897,6 +1011,10 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
mutex_init(&dev_priv->modeset_restore_lock);
mutex_init(&dev_priv->av_mutex);
+ ret = i915_workqueues_init(dev_priv);
+ if (ret < 0)
+ goto out_free_priv;
+
intel_pm_setup(dev);
intel_runtime_pm_get(dev_priv);
@@ -915,28 +1033,12 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
if (i915_get_bridge_dev(dev)) {
ret = -EIO;
- goto free_priv;
+ goto out_runtime_pm_put;
}
- mmio_bar = IS_GEN2(dev) ? 1 : 0;
- /* Before gen4, the registers and the GTT are behind different BARs.
- * However, from gen4 onwards, the registers and the GTT are shared
- * in the same BAR, so we want to restrict this ioremap from
- * clobbering the GTT which we want ioremap_wc instead. Fortunately,
- * the register BAR remains the same size for all the earlier
- * generations up to Ironlake.
- */
- if (info->gen < 5)
- mmio_size = 512*1024;
- else
- mmio_size = 2*1024*1024;
-
- dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, mmio_size);
- if (!dev_priv->regs) {
- DRM_ERROR("failed to map registers\n");
- ret = -EIO;
+ ret = i915_mmio_setup(dev);
+ if (ret < 0)
goto put_bridge;
- }
/* This must be called before any calls to HAS_PCH_* */
intel_detect_pch(dev);
@@ -945,7 +1047,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
ret = i915_gem_gtt_init(dev);
if (ret)
- goto out_freecsr;
+ goto out_uncore_fini;
/* WARNING: Apparently we must kick fbdev drivers before vgacon,
* otherwise the vga fbdev driver falls over. */
@@ -991,49 +1093,13 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
dev_priv->gtt.mtrr = arch_phys_wc_add(dev_priv->gtt.mappable_base,
aperture_size);
- /* The i915 workqueue is primarily used for batched retirement of
- * requests (and thus managing bo) once the task has been completed
- * by the GPU. i915_gem_retire_requests() is called directly when we
- * need high-priority retirement, such as waiting for an explicit
- * bo.
- *
- * It is also used for periodic low-priority events, such as
- * idle-timers and recording error state.
- *
- * All tasks on the workqueue are expected to acquire the dev mutex
- * so there is no point in running more than one instance of the
- * workqueue at any time. Use an ordered one.
- */
- dev_priv->wq = alloc_ordered_workqueue("i915", 0);
- if (dev_priv->wq == NULL) {
- DRM_ERROR("Failed to create our workqueue.\n");
- ret = -ENOMEM;
- goto out_mtrrfree;
- }
-
- dev_priv->hotplug.dp_wq = alloc_ordered_workqueue("i915-dp", 0);
- if (dev_priv->hotplug.dp_wq == NULL) {
- DRM_ERROR("Failed to create our dp workqueue.\n");
- ret = -ENOMEM;
- goto out_freewq;
- }
-
- dev_priv->gpu_error.hangcheck_wq =
- alloc_ordered_workqueue("i915-hangcheck", 0);
- if (dev_priv->gpu_error.hangcheck_wq == NULL) {
- DRM_ERROR("Failed to create our hangcheck workqueue.\n");
- ret = -ENOMEM;
- goto out_freedpwq;
- }
-
intel_irq_init(dev_priv);
intel_uncore_sanitize(dev);
- /* Try to make sure MCHBAR is enabled before poking at it */
- intel_setup_mchbar(dev);
intel_opregion_setup(dev);
- i915_gem_load(dev);
+ i915_gem_load_init(dev);
+ i915_gem_shrinker_init(dev_priv);
/* On the 945G/GM, the chipset reports the MSI capability on the
* integrated graphics even though the support isn't actually there
@@ -1046,8 +1112,10 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
* be lost or delayed, but we use them anyways to avoid
* stuck interrupts on some machines.
*/
- if (!IS_I945G(dev) && !IS_I945GM(dev))
- pci_enable_msi(dev->pdev);
+ if (!IS_I945G(dev) && !IS_I945GM(dev)) {
+ if (pci_enable_msi(dev->pdev) < 0)
+ DRM_DEBUG_DRIVER("can't enable MSI");
+ }
intel_device_info_runtime_init(dev);
@@ -1097,38 +1165,29 @@ out_power_well:
intel_power_domains_fini(dev_priv);
drm_vblank_cleanup(dev);
out_gem_unload:
- WARN_ON(unregister_oom_notifier(&dev_priv->mm.oom_notifier));
- unregister_shrinker(&dev_priv->mm.shrinker);
+ i915_gem_shrinker_cleanup(dev_priv);
if (dev->pdev->msi_enabled)
pci_disable_msi(dev->pdev);
intel_teardown_mchbar(dev);
pm_qos_remove_request(&dev_priv->pm_qos);
- destroy_workqueue(dev_priv->gpu_error.hangcheck_wq);
-out_freedpwq:
- destroy_workqueue(dev_priv->hotplug.dp_wq);
-out_freewq:
- destroy_workqueue(dev_priv->wq);
-out_mtrrfree:
arch_phys_wc_del(dev_priv->gtt.mtrr);
io_mapping_free(dev_priv->gtt.mappable);
out_gtt:
i915_global_gtt_cleanup(dev);
-out_freecsr:
- intel_csr_ucode_fini(dev_priv);
+out_uncore_fini:
intel_uncore_fini(dev);
- pci_iounmap(dev->pdev, dev_priv->regs);
+ i915_mmio_cleanup(dev);
put_bridge:
pci_dev_put(dev_priv->bridge_dev);
-free_priv:
- kmem_cache_destroy(dev_priv->requests);
- kmem_cache_destroy(dev_priv->vmas);
- kmem_cache_destroy(dev_priv->objects);
-
+ i915_gem_load_cleanup(dev);
+out_runtime_pm_put:
intel_runtime_pm_put(dev_priv);
-
+ i915_workqueues_cleanup(dev_priv);
+out_free_priv:
kfree(dev_priv);
+
return ret;
}
@@ -1153,8 +1212,7 @@ int i915_driver_unload(struct drm_device *dev)
i915_teardown_sysfs(dev);
- WARN_ON(unregister_oom_notifier(&dev_priv->mm.oom_notifier));
- unregister_shrinker(&dev_priv->mm.shrinker);
+ i915_gem_shrinker_cleanup(dev_priv);
io_mapping_free(dev_priv->gtt.mappable);
arch_phys_wc_del(dev_priv->gtt.mtrr);
@@ -1182,6 +1240,8 @@ int i915_driver_unload(struct drm_device *dev)
vga_switcheroo_unregister_client(dev->pdev);
vga_client_register(dev->pdev, NULL, NULL, NULL);
+ intel_csr_ucode_fini(dev_priv);
+
/* Free error state after interrupts are fully disabled. */
cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
i915_destroy_error_state(dev);
@@ -1200,27 +1260,17 @@ int i915_driver_unload(struct drm_device *dev)
i915_gem_context_fini(dev);
mutex_unlock(&dev->struct_mutex);
intel_fbc_cleanup_cfb(dev_priv);
- i915_gem_cleanup_stolen(dev);
- intel_csr_ucode_fini(dev_priv);
-
- intel_teardown_mchbar(dev);
-
- destroy_workqueue(dev_priv->hotplug.dp_wq);
- destroy_workqueue(dev_priv->wq);
- destroy_workqueue(dev_priv->gpu_error.hangcheck_wq);
pm_qos_remove_request(&dev_priv->pm_qos);
i915_global_gtt_cleanup(dev);
intel_uncore_fini(dev);
- if (dev_priv->regs != NULL)
- pci_iounmap(dev->pdev, dev_priv->regs);
+ i915_mmio_cleanup(dev);
- kmem_cache_destroy(dev_priv->requests);
- kmem_cache_destroy(dev_priv->vmas);
- kmem_cache_destroy(dev_priv->objects);
+ i915_gem_load_cleanup(dev);
pci_dev_put(dev_priv->bridge_dev);
+ i915_workqueues_cleanup(dev_priv);
kfree(dev_priv);
return 0;
@@ -1261,8 +1311,6 @@ void i915_driver_preclose(struct drm_device *dev, struct drm_file *file)
i915_gem_context_close(dev, file);
i915_gem_release(dev, file);
mutex_unlock(&dev->struct_mutex);
-
- intel_modeset_preclose(dev, file);
}
void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index f357058c74d9..6d2fb3f4ac62 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -35,9 +35,12 @@
#include "i915_trace.h"
#include "intel_drv.h"
+#include <linux/apple-gmux.h>
#include <linux/console.h>
#include <linux/module.h>
#include <linux/pm_runtime.h>
+#include <linux/vgaarb.h>
+#include <linux/vga_switcheroo.h>
#include <drm/drm_crtc_helper.h>
static struct drm_driver driver;
@@ -600,13 +603,7 @@ static int i915_drm_suspend(struct drm_device *dev)
intel_suspend_gt_powersave(dev);
- /*
- * Disable CRTCs directly since we want to preserve sw state
- * for _thaw. Also, power gate the CRTC power wells.
- */
- drm_modeset_lock_all(dev);
intel_display_suspend(dev);
- drm_modeset_unlock_all(dev);
intel_dp_mst_suspend(dev);
@@ -761,12 +758,10 @@ static int i915_drm_resume(struct drm_device *dev)
dev_priv->display.hpd_irq_setup(dev);
spin_unlock_irq(&dev_priv->irq_lock);
- drm_modeset_lock_all(dev);
- intel_display_resume(dev);
- drm_modeset_unlock_all(dev);
-
intel_dp_mst_resume(dev);
+ intel_display_resume(dev);
+
/*
* ... but also need to make sure that hotplug processing
* doesn't cause havoc. Like in the driver load code we don't
@@ -797,7 +792,7 @@ static int i915_drm_resume(struct drm_device *dev)
static int i915_drm_resume_early(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- int ret = 0;
+ int ret;
/*
* We have a resume ordering issue with the snd-hda driver also
@@ -808,6 +803,36 @@ static int i915_drm_resume_early(struct drm_device *dev)
* FIXME: This should be solved with a special hdmi sink device or
* similar so that power domains can be employed.
*/
+
+ /*
+ * Note that we need to set the power state explicitly, since we
+ * powered off the device during freeze and the PCI core won't power
+ * it back up for us during thaw. Powering off the device during
+ * freeze is not a hard requirement though, and during the
+ * suspend/resume phases the PCI core makes sure we get here with the
+ * device powered on. So in case we change our freeze logic and keep
+ * the device powered we can also remove the following set power state
+ * call.
+ */
+ ret = pci_set_power_state(dev->pdev, PCI_D0);
+ if (ret) {
+ DRM_ERROR("failed to set PCI D0 power state (%d)\n", ret);
+ goto out;
+ }
+
+ /*
+ * Note that pci_enable_device() first enables any parent bridge
+ * device and only then sets the power state for this device. The
+ * bridge enabling is a nop though, since bridge devices are resumed
+ * first. The order of enabling power and enabling the device is
+ * imposed by the PCI core as described above, so here we preserve the
+ * same order for the freeze/thaw phases.
+ *
+ * TODO: eventually we should remove pci_disable_device() /
+ * pci_enable_enable_device() from suspend/resume. Due to how they
+ * depend on the device enable refcount we can't anyway depend on them
+ * disabling/enabling the device.
+ */
if (pci_enable_device(dev->pdev)) {
ret = -EIO;
goto out;
@@ -969,6 +994,15 @@ static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (PCI_FUNC(pdev->devfn))
return -ENODEV;
+ /*
+ * apple-gmux is needed on dual GPU MacBook Pro
+ * to probe the panel if we're the inactive GPU.
+ */
+ if (IS_ENABLED(CONFIG_VGA_ARB) && IS_ENABLED(CONFIG_VGA_SWITCHEROO) &&
+ apple_gmux_present() && pdev != vga_default_device() &&
+ !vga_switcheroo_handler_flags())
+ return -EPROBE_DEFER;
+
return drm_get_pci_dev(pdev, ent, &driver);
}
@@ -1079,7 +1113,6 @@ static int bxt_resume_prepare(struct drm_i915_private *dev_priv)
*/
broxton_init_cdclk(dev);
broxton_ddi_phy_init(dev);
- intel_prepare_ddi(dev);
return 0;
}
@@ -1338,8 +1371,8 @@ static int vlv_wait_for_gt_wells(struct drm_i915_private *dev_priv,
return 0;
DRM_DEBUG_KMS("waiting for GT wells to go %s (%08x)\n",
- wait_for_on ? "on" : "off",
- I915_READ(VLV_GTLC_PW_STATUS));
+ onoff(wait_for_on),
+ I915_READ(VLV_GTLC_PW_STATUS));
/*
* RC6 transitioning can be delayed up to 2 msec (see
@@ -1348,7 +1381,7 @@ static int vlv_wait_for_gt_wells(struct drm_i915_private *dev_priv,
err = wait_for(COND, 3);
if (err)
DRM_ERROR("timeout waiting for GT wells to go %s\n",
- wait_for_on ? "on" : "off");
+ onoff(wait_for_on));
return err;
#undef COND
@@ -1359,7 +1392,7 @@ static void vlv_check_no_gt_access(struct drm_i915_private *dev_priv)
if (!(I915_READ(VLV_GTLC_PW_STATUS) & VLV_GTLC_ALLOWWAKEERR))
return;
- DRM_ERROR("GT register access while GT waking disabled\n");
+ DRM_DEBUG_DRIVER("GT register access while GT waking disabled\n");
I915_WRITE(VLV_GTLC_PW_STATUS, VLV_GTLC_ALLOWWAKEERR);
}
@@ -1503,6 +1536,10 @@ static int intel_runtime_suspend(struct device *device)
enable_rpm_wakeref_asserts(dev_priv);
WARN_ON_ONCE(atomic_read(&dev_priv->pm.wakeref_count));
+
+ if (intel_uncore_arm_unclaimed_mmio_detection(dev_priv))
+ DRM_ERROR("Unclaimed access detected prior to suspending\n");
+
dev_priv->pm.suspended = true;
/*
@@ -1551,6 +1588,8 @@ static int intel_runtime_resume(struct device *device)
intel_opregion_notify_adapter(dev, PCI_D0);
dev_priv->pm.suspended = false;
+ if (intel_uncore_unclaimed_mmio(dev_priv))
+ DRM_DEBUG_DRIVER("Unclaimed access during suspend, bios?\n");
intel_guc_resume(dev);
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index b0847b915545..daba7ebb9699 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -34,6 +34,7 @@
#include <uapi/drm/drm_fourcc.h>
#include <drm/drmP.h>
+#include "i915_params.h"
#include "i915_reg.h"
#include "intel_bios.h"
#include "intel_ringbuffer.h"
@@ -58,7 +59,7 @@
#define DRIVER_NAME "i915"
#define DRIVER_DESC "Intel Graphics"
-#define DRIVER_DATE "20151218"
+#define DRIVER_DATE "20160229"
#undef WARN_ON
/* Many gcc seem to no see through this and fall over :( */
@@ -69,11 +70,11 @@
BUILD_BUG_ON(__i915_warn_cond); \
WARN(__i915_warn_cond, "WARN_ON(" #x ")"); })
#else
-#define WARN_ON(x) WARN((x), "WARN_ON(%s)", #x )
+#define WARN_ON(x) WARN((x), "%s", "WARN_ON(" __stringify(x) ")")
#endif
#undef WARN_ON_ONCE
-#define WARN_ON_ONCE(x) WARN_ONCE((x), "WARN_ON_ONCE(%s)", #x )
+#define WARN_ON_ONCE(x) WARN_ONCE((x), "%s", "WARN_ON_ONCE(" __stringify(x) ")")
#define MISSING_CASE(x) WARN(1, "Missing switch case (%lu) in %s\n", \
(long) (x), __func__);
@@ -87,31 +88,25 @@
*/
#define I915_STATE_WARN(condition, format...) ({ \
int __ret_warn_on = !!(condition); \
- if (unlikely(__ret_warn_on)) { \
- if (i915.verbose_state_checks) \
- WARN(1, format); \
- else \
+ if (unlikely(__ret_warn_on)) \
+ if (!WARN(i915.verbose_state_checks, format)) \
DRM_ERROR(format); \
- } \
unlikely(__ret_warn_on); \
})
-#define I915_STATE_WARN_ON(condition) ({ \
- int __ret_warn_on = !!(condition); \
- if (unlikely(__ret_warn_on)) { \
- if (i915.verbose_state_checks) \
- WARN(1, "WARN_ON(" #condition ")\n"); \
- else \
- DRM_ERROR("WARN_ON(" #condition ")\n"); \
- } \
- unlikely(__ret_warn_on); \
-})
+#define I915_STATE_WARN_ON(x) \
+ I915_STATE_WARN((x), "%s", "WARN_ON(" __stringify(x) ")")
static inline const char *yesno(bool v)
{
return v ? "yes" : "no";
}
+static inline const char *onoff(bool v)
+{
+ return v ? "on" : "off";
+}
+
enum pipe {
INVALID_PIPE = -1,
PIPE_A = 0,
@@ -266,6 +261,9 @@ struct i915_hotplug {
#define for_each_pipe(__dev_priv, __p) \
for ((__p) = 0; (__p) < INTEL_INFO(__dev_priv)->num_pipes; (__p)++)
+#define for_each_pipe_masked(__dev_priv, __p, __mask) \
+ for ((__p) = 0; (__p) < INTEL_INFO(__dev_priv)->num_pipes; (__p)++) \
+ for_each_if ((__mask) & (1 << (__p)))
#define for_each_plane(__dev_priv, __pipe, __p) \
for ((__p) = 0; \
(__p) < INTEL_INFO(__dev_priv)->num_sprites[(__pipe)] + 1; \
@@ -339,7 +337,7 @@ struct drm_i915_file_private {
unsigned boosts;
} rps;
- struct intel_engine_cs *bsd_ring;
+ unsigned int bsd_ring;
};
enum intel_dpll_id {
@@ -633,6 +631,7 @@ struct drm_i915_display_funcs {
struct dpll *best_clock);
int (*compute_pipe_wm)(struct intel_crtc *crtc,
struct drm_atomic_state *state);
+ void (*program_watermarks)(struct intel_crtc_state *cstate);
void (*update_wm)(struct drm_crtc *crtc);
int (*modeset_calc_cdclk)(struct drm_atomic_state *state);
void (*modeset_commit_cdclk)(struct drm_atomic_state *state);
@@ -657,9 +656,6 @@ struct drm_i915_display_funcs {
struct drm_i915_gem_object *obj,
struct drm_i915_gem_request *req,
uint32_t flags);
- void (*update_primary_plane)(struct drm_crtc *crtc,
- struct drm_framebuffer *fb,
- int x, int y);
void (*hpd_irq_setup)(struct drm_device *dev);
/* clock updates for mode set */
/* cursor updates */
@@ -726,6 +722,8 @@ struct intel_uncore {
i915_reg_t reg_post;
u32 val_reset;
} fw_domain[FW_DOMAIN_ID_COUNT];
+
+ int unclaimed_mmio_check;
};
/* Iterate over initialised fw domains */
@@ -890,6 +888,9 @@ struct intel_context {
struct drm_i915_gem_object *state;
struct intel_ringbuffer *ringbuf;
int pin_count;
+ struct i915_vma *lrc_vma;
+ u64 lrc_desc;
+ uint32_t *lrc_reg_state;
} engine[I915_NUM_RINGS];
struct list_head link;
@@ -903,16 +904,15 @@ enum fb_op_origin {
ORIGIN_DIRTYFB,
};
-struct i915_fbc {
+struct intel_fbc {
/* This is always the inner lock when overlapping with struct_mutex and
* it's the outer lock when overlapping with stolen_lock. */
struct mutex lock;
unsigned threshold;
- unsigned int fb_id;
unsigned int possible_framebuffer_bits;
unsigned int busy_bits;
+ unsigned int visible_pipes_mask;
struct intel_crtc *crtc;
- int y;
struct drm_mm_node compressed_fb;
struct drm_mm_node *compressed_llb;
@@ -922,18 +922,52 @@ struct i915_fbc {
bool enabled;
bool active;
+ struct intel_fbc_state_cache {
+ struct {
+ unsigned int mode_flags;
+ uint32_t hsw_bdw_pixel_rate;
+ } crtc;
+
+ struct {
+ unsigned int rotation;
+ int src_w;
+ int src_h;
+ bool visible;
+ } plane;
+
+ struct {
+ u64 ilk_ggtt_offset;
+ uint32_t pixel_format;
+ unsigned int stride;
+ int fence_reg;
+ unsigned int tiling_mode;
+ } fb;
+ } state_cache;
+
+ struct intel_fbc_reg_params {
+ struct {
+ enum pipe pipe;
+ enum plane plane;
+ unsigned int fence_y_offset;
+ } crtc;
+
+ struct {
+ u64 ggtt_offset;
+ uint32_t pixel_format;
+ unsigned int stride;
+ int fence_reg;
+ } fb;
+
+ int cfb_size;
+ } params;
+
struct intel_fbc_work {
bool scheduled;
+ u32 scheduled_vblank;
struct work_struct work;
- struct drm_framebuffer *fb;
- unsigned long enable_jiffies;
} work;
const char *no_fbc_reason;
-
- bool (*is_active)(struct drm_i915_private *dev_priv);
- void (*activate)(struct intel_crtc *crtc);
- void (*deactivate)(struct drm_i915_private *dev_priv);
};
/**
@@ -973,6 +1007,7 @@ struct i915_psr {
unsigned busy_frontbuffer_bits;
bool psr2_support;
bool aux_frame_sync;
+ bool link_standby;
};
enum intel_pch {
@@ -1302,7 +1337,7 @@ struct i915_gem_mm {
bool busy;
/* the indicator for dispatch video commands on two BSD rings */
- int bsd_ring_dispatch_index;
+ unsigned int bsd_ring_dispatch_index;
/** Bit 6 swizzling required for X tiling */
uint32_t bit_6_swizzle_x;
@@ -1488,7 +1523,7 @@ struct intel_vbt_data {
u8 seq_version;
u32 size;
u8 *data;
- u8 *sequence[MIPI_SEQ_MAX];
+ const u8 *sequence[MIPI_SEQ_MAX];
} dsi;
int crt_ddc_pin;
@@ -1660,11 +1695,18 @@ struct i915_wa_reg {
u32 mask;
};
-#define I915_MAX_WA_REGS 16
+/*
+ * RING_MAX_NONPRIV_SLOTS is per-engine but at this point we are only
+ * allowing it for RCS as we don't foresee any requirement of having
+ * a whitelist for other engines. When it is really required for
+ * other engines then the limit need to be increased.
+ */
+#define I915_MAX_WA_REGS (16 + RING_MAX_NONPRIV_SLOTS)
struct i915_workarounds {
struct i915_wa_reg reg[I915_MAX_WA_REGS];
u32 count;
+ u32 hw_whitelist_count[I915_NUM_RINGS];
};
struct i915_virtual_gpu {
@@ -1761,7 +1803,7 @@ struct drm_i915_private {
u32 pipestat_irq_mask[I915_MAX_PIPES];
struct i915_hotplug hotplug;
- struct i915_fbc fbc;
+ struct intel_fbc fbc;
struct i915_drrs drrs;
struct intel_opregion opregion;
struct intel_vbt_data vbt;
@@ -1785,7 +1827,7 @@ struct drm_i915_private {
unsigned int fsb_freq, mem_freq, is_ddr3;
unsigned int skl_boot_cdclk;
- unsigned int cdclk_freq, max_cdclk_freq;
+ unsigned int cdclk_freq, max_cdclk_freq, atomic_cdclk_freq;
unsigned int max_dotclk_freq;
unsigned int hpll_freq;
unsigned int czclk_freq;
@@ -1810,6 +1852,7 @@ struct drm_i915_private {
enum modeset_restore modeset_restore;
struct mutex modeset_restore_lock;
+ struct drm_atomic_state *modeset_restore_state;
struct list_head vm_list; /* Global list of all address spaces */
struct i915_gtt gtt; /* VM representing the global address space */
@@ -1830,8 +1873,13 @@ struct drm_i915_private {
struct intel_pipe_crc pipe_crc[I915_MAX_PIPES];
#endif
+ /* dpll and cdclk state is protected by connection_mutex */
int num_shared_dpll;
struct intel_shared_dpll shared_dplls[I915_NUM_PLLS];
+
+ unsigned int active_crtcs;
+ unsigned int min_pixclk[I915_MAX_PIPES];
+
int dpio_phy_iosf_port[I915_NUM_PHYS_VLV];
struct i915_workarounds workarounds;
@@ -1946,6 +1994,8 @@ struct drm_i915_private {
void (*stop_ring)(struct intel_engine_cs *ring);
} gt;
+ struct intel_context *kernel_context;
+
bool edp_low_vswing;
/* perform PHY state sanity checks? */
@@ -2270,9 +2320,9 @@ struct drm_i915_gem_request {
};
-int i915_gem_request_alloc(struct intel_engine_cs *ring,
- struct intel_context *ctx,
- struct drm_i915_gem_request **req_out);
+struct drm_i915_gem_request * __must_check
+i915_gem_request_alloc(struct intel_engine_cs *engine,
+ struct intel_context *ctx);
void i915_gem_request_cancel(struct drm_i915_gem_request *req);
void i915_gem_request_free(struct kref *req_ref);
int i915_gem_request_add_to_client(struct drm_i915_gem_request *req,
@@ -2581,6 +2631,12 @@ struct drm_i915_cmd_table {
/* Early gen2 have a totally busted CS tlb and require pinned batches. */
#define HAS_BROKEN_CS_TLB(dev) (IS_I830(dev) || IS_845G(dev))
+
+/* WaRsDisableCoarsePowerGating:skl,bxt */
+#define NEEDS_WaRsDisableCoarsePowerGating(dev) (IS_BXT_REVID(dev, 0, BXT_REVID_A1) || \
+ IS_SKL_GT3(dev) || \
+ IS_SKL_GT4(dev))
+
/*
* dp aux and gmbus irq on gen4 seems to be able to generate legacy interrupts
* even when in MSI mode. This results in spurious interrupt warnings if the
@@ -2670,44 +2726,7 @@ extern int i915_max_ioctl;
extern int i915_suspend_switcheroo(struct drm_device *dev, pm_message_t state);
extern int i915_resume_switcheroo(struct drm_device *dev);
-/* i915_params.c */
-struct i915_params {
- int modeset;
- int panel_ignore_lid;
- int semaphores;
- int lvds_channel_mode;
- int panel_use_ssc;
- int vbt_sdvo_panel_type;
- int enable_rc6;
- int enable_dc;
- int enable_fbc;
- int enable_ppgtt;
- int enable_execlists;
- int enable_psr;
- unsigned int preliminary_hw_support;
- int disable_power_well;
- int enable_ips;
- int invert_brightness;
- int enable_cmd_parser;
- /* leave bools at the end to not create holes */
- bool enable_hangcheck;
- bool fastboot;
- bool prefault_disable;
- bool load_detect_test;
- bool reset;
- bool disable_display;
- bool disable_vtd_wa;
- bool enable_guc_submission;
- int guc_log_level;
- int use_mmio_flip;
- int mmio_debug;
- bool verbose_state_checks;
- bool nuclear_pageflip;
- int edp_vswing;
-};
-extern struct i915_params i915 __read_mostly;
-
- /* i915_dma.c */
+/* i915_dma.c */
extern int i915_driver_load(struct drm_device *, unsigned long flags);
extern int i915_driver_unload(struct drm_device *);
extern int i915_driver_open(struct drm_device *dev, struct drm_file *file);
@@ -2750,7 +2769,8 @@ extern void intel_uncore_sanitize(struct drm_device *dev);
extern void intel_uncore_early_sanitize(struct drm_device *dev,
bool restore_forcewake);
extern void intel_uncore_init(struct drm_device *dev);
-extern void intel_uncore_check_errors(struct drm_device *dev);
+extern bool intel_uncore_unclaimed_mmio(struct drm_i915_private *dev_priv);
+extern bool intel_uncore_arm_unclaimed_mmio_detection(struct drm_i915_private *dev_priv);
extern void intel_uncore_fini(struct drm_device *dev);
extern void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore);
const char *intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id);
@@ -2872,7 +2892,8 @@ int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int i915_gem_wait_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
-void i915_gem_load(struct drm_device *dev);
+void i915_gem_load_init(struct drm_device *dev);
+void i915_gem_load_cleanup(struct drm_device *dev);
void *i915_gem_object_alloc(struct drm_device *dev);
void i915_gem_object_free(struct drm_i915_gem_object *obj);
void i915_gem_object_init(struct drm_i915_gem_object *obj,
@@ -3136,18 +3157,11 @@ bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj);
/* Some GGTT VM helpers */
#define i915_obj_to_ggtt(obj) \
(&((struct drm_i915_private *)(obj)->base.dev->dev_private)->gtt.base)
-static inline bool i915_is_ggtt(struct i915_address_space *vm)
-{
- struct i915_address_space *ggtt =
- &((struct drm_i915_private *)(vm)->dev->dev_private)->gtt.base;
- return vm == ggtt;
-}
static inline struct i915_hw_ppgtt *
i915_vm_to_ppgtt(struct i915_address_space *vm)
{
WARN_ON(i915_is_ggtt(vm));
-
return container_of(vm, struct i915_hw_ppgtt, base);
}
@@ -3285,6 +3299,7 @@ unsigned long i915_gem_shrink(struct drm_i915_private *dev_priv,
#define I915_SHRINK_ACTIVE 0x8
unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv);
void i915_gem_shrinker_init(struct drm_i915_private *dev_priv);
+void i915_gem_shrinker_cleanup(struct drm_i915_private *dev_priv);
/* i915_gem_tiling.c */
@@ -3455,16 +3470,14 @@ int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u32 mbox, u32 val
u32 vlv_punit_read(struct drm_i915_private *dev_priv, u32 addr);
void vlv_punit_write(struct drm_i915_private *dev_priv, u32 addr, u32 val);
u32 vlv_nc_read(struct drm_i915_private *dev_priv, u8 addr);
-u32 vlv_gpio_nc_read(struct drm_i915_private *dev_priv, u32 reg);
-void vlv_gpio_nc_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
+u32 vlv_iosf_sb_read(struct drm_i915_private *dev_priv, u8 port, u32 reg);
+void vlv_iosf_sb_write(struct drm_i915_private *dev_priv, u8 port, u32 reg, u32 val);
u32 vlv_cck_read(struct drm_i915_private *dev_priv, u32 reg);
void vlv_cck_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
u32 vlv_ccu_read(struct drm_i915_private *dev_priv, u32 reg);
void vlv_ccu_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
u32 vlv_bunit_read(struct drm_i915_private *dev_priv, u32 reg);
void vlv_bunit_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
-u32 vlv_gps_core_read(struct drm_i915_private *dev_priv, u32 reg);
-void vlv_gps_core_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
u32 vlv_dpio_read(struct drm_i915_private *dev_priv, enum pipe pipe, int reg);
void vlv_dpio_write(struct drm_i915_private *dev_priv, enum pipe pipe, int reg, u32 val);
u32 intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg,
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index bb44bad15403..dabc08987b5e 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -138,10 +138,10 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
pinned = 0;
mutex_lock(&dev->struct_mutex);
- list_for_each_entry(vma, &ggtt->base.active_list, mm_list)
+ list_for_each_entry(vma, &ggtt->base.active_list, vm_link)
if (vma->pin_count)
pinned += vma->node.size;
- list_for_each_entry(vma, &ggtt->base.inactive_list, mm_list)
+ list_for_each_entry(vma, &ggtt->base.inactive_list, vm_link)
if (vma->pin_count)
pinned += vma->node.size;
mutex_unlock(&dev->struct_mutex);
@@ -177,7 +177,7 @@ i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
drm_clflush_virt_range(vaddr, PAGE_SIZE);
kunmap_atomic(src);
- page_cache_release(page);
+ put_page(page);
vaddr += PAGE_SIZE;
}
@@ -243,7 +243,7 @@ i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj)
set_page_dirty(page);
if (obj->madv == I915_MADV_WILLNEED)
mark_page_accessed(page);
- page_cache_release(page);
+ put_page(page);
vaddr += PAGE_SIZE;
}
obj->dirty = 0;
@@ -272,7 +272,7 @@ drop_pages(struct drm_i915_gem_object *obj)
int ret;
drm_gem_object_reference(&obj->base);
- list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link)
+ list_for_each_entry_safe(vma, next, &obj->vma_list, obj_link)
if (i915_vma_unbind(vma))
break;
@@ -489,7 +489,7 @@ int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
*needs_clflush = 0;
- if (!obj->base.filp)
+ if (WARN_ON((obj->ops->flags & I915_GEM_OBJECT_HAS_STRUCT_PAGE) == 0))
return -EINVAL;
if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) {
@@ -1251,7 +1251,7 @@ int __i915_wait_request(struct drm_i915_gem_request *req,
int state = interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;
DEFINE_WAIT(wait);
unsigned long timeout_expire;
- s64 before, now;
+ s64 before = 0; /* Only to silence a compiler warning. */
int ret;
WARN(!intel_irqs_enabled(dev_priv), "IRQs disabled");
@@ -1271,14 +1271,17 @@ int __i915_wait_request(struct drm_i915_gem_request *req,
return -ETIME;
timeout_expire = jiffies + nsecs_to_jiffies_timeout(*timeout);
+
+ /*
+ * Record current time in case interrupted by signal, or wedged.
+ */
+ before = ktime_get_raw_ns();
}
if (INTEL_INFO(dev_priv)->gen >= 6)
gen6_rps_boost(dev_priv, rps, req->emitted_jiffies);
- /* Record current time in case interrupted by signal, or wedged */
trace_i915_gem_request_wait_begin(req);
- before = ktime_get_raw_ns();
/* Optimistic spin for the next jiffie before touching IRQs */
ret = __i915_spin_request(req, state);
@@ -1343,11 +1346,10 @@ int __i915_wait_request(struct drm_i915_gem_request *req,
finish_wait(&ring->irq_queue, &wait);
out:
- now = ktime_get_raw_ns();
trace_i915_gem_request_wait_end(req);
if (timeout) {
- s64 tres = *timeout - (now - before);
+ s64 tres = *timeout - (ktime_get_raw_ns() - before);
*timeout = tres < 0 ? 0 : tres;
@@ -2204,7 +2206,7 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
if (obj->madv == I915_MADV_WILLNEED)
mark_page_accessed(page);
- page_cache_release(page);
+ put_page(page);
}
obj->dirty = 0;
@@ -2344,7 +2346,7 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
err_pages:
sg_mark_end(sg);
for_each_sg_page(st->sgl, &sg_iter, st->nents, 0)
- page_cache_release(sg_page_iter_page(&sg_iter));
+ put_page(sg_page_iter_page(&sg_iter));
sg_free_table(st);
kfree(st);
@@ -2414,7 +2416,7 @@ void i915_vma_move_to_active(struct i915_vma *vma,
list_move_tail(&obj->ring_list[ring->id], &ring->active_list);
i915_gem_request_assign(&obj->last_read_req[ring->id], req);
- list_move_tail(&vma->mm_list, &vma->vm->active_list);
+ list_move_tail(&vma->vm_link, &vma->vm->active_list);
}
static void
@@ -2452,9 +2454,9 @@ i915_gem_object_retire__read(struct drm_i915_gem_object *obj, int ring)
list_move_tail(&obj->global_list,
&to_i915(obj->base.dev)->mm.bound_list);
- list_for_each_entry(vma, &obj->vma_list, vma_link) {
- if (!list_empty(&vma->mm_list))
- list_move_tail(&vma->mm_list, &vma->vm->inactive_list);
+ list_for_each_entry(vma, &obj->vma_list, obj_link) {
+ if (!list_empty(&vma->vm_link))
+ list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
}
i915_gem_request_assign(&obj->last_fenced_req, NULL);
@@ -2677,10 +2679,8 @@ void i915_gem_request_free(struct kref *req_ref)
i915_gem_request_remove_from_client(req);
if (ctx) {
- if (i915.enable_execlists) {
- if (ctx != req->ring->default_context)
- intel_lr_context_unpin(req);
- }
+ if (i915.enable_execlists && ctx != req->i915->kernel_context)
+ intel_lr_context_unpin(ctx, req->ring);
i915_gem_context_unreference(ctx);
}
@@ -2688,9 +2688,10 @@ void i915_gem_request_free(struct kref *req_ref)
kmem_cache_free(req->i915->requests, req);
}
-int i915_gem_request_alloc(struct intel_engine_cs *ring,
- struct intel_context *ctx,
- struct drm_i915_gem_request **req_out)
+static inline int
+__i915_gem_request_alloc(struct intel_engine_cs *ring,
+ struct intel_context *ctx,
+ struct drm_i915_gem_request **req_out)
{
struct drm_i915_private *dev_priv = to_i915(ring->dev);
struct drm_i915_gem_request *req;
@@ -2753,6 +2754,31 @@ err:
return ret;
}
+/**
+ * i915_gem_request_alloc - allocate a request structure
+ *
+ * @engine: engine that we wish to issue the request on.
+ * @ctx: context that the request will be associated with.
+ * This can be NULL if the request is not directly related to
+ * any specific user context, in which case this function will
+ * choose an appropriate context to use.
+ *
+ * Returns a pointer to the allocated request if successful,
+ * or an error code if not.
+ */
+struct drm_i915_gem_request *
+i915_gem_request_alloc(struct intel_engine_cs *engine,
+ struct intel_context *ctx)
+{
+ struct drm_i915_gem_request *req;
+ int err;
+
+ if (ctx == NULL)
+ ctx = to_i915(engine->dev)->kernel_context;
+ err = __i915_gem_request_alloc(engine, ctx, &req);
+ return err ? ERR_PTR(err) : req;
+}
+
void i915_gem_request_cancel(struct drm_i915_gem_request *req)
{
intel_ring_reserved_space_cancel(req->ringbuf);
@@ -2944,11 +2970,9 @@ i915_gem_retire_requests(struct drm_device *dev)
i915_gem_retire_requests_ring(ring);
idle &= list_empty(&ring->request_list);
if (i915.enable_execlists) {
- unsigned long flags;
-
- spin_lock_irqsave(&ring->execlist_lock, flags);
+ spin_lock_irq(&ring->execlist_lock);
idle &= list_empty(&ring->execlist_queue);
- spin_unlock_irqrestore(&ring->execlist_lock, flags);
+ spin_unlock_irq(&ring->execlist_lock);
intel_execlists_retire_requests(ring);
}
@@ -3170,9 +3194,13 @@ __i915_gem_object_sync(struct drm_i915_gem_object *obj,
return 0;
if (*to_req == NULL) {
- ret = i915_gem_request_alloc(to, to->default_context, to_req);
- if (ret)
- return ret;
+ struct drm_i915_gem_request *req;
+
+ req = i915_gem_request_alloc(to, NULL);
+ if (IS_ERR(req))
+ return PTR_ERR(req);
+
+ *to_req = req;
}
trace_i915_gem_ring_sync_to(*to_req, from, from_req);
@@ -3289,7 +3317,7 @@ static int __i915_vma_unbind(struct i915_vma *vma, bool wait)
struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
int ret;
- if (list_empty(&vma->vma_link))
+ if (list_empty(&vma->obj_link))
return 0;
if (!drm_mm_node_allocated(&vma->node)) {
@@ -3308,8 +3336,7 @@ static int __i915_vma_unbind(struct i915_vma *vma, bool wait)
return ret;
}
- if (i915_is_ggtt(vma->vm) &&
- vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL) {
+ if (vma->is_ggtt && vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL) {
i915_gem_object_finish_gtt(obj);
/* release the fence reg _after_ flushing */
@@ -3323,8 +3350,8 @@ static int __i915_vma_unbind(struct i915_vma *vma, bool wait)
vma->vm->unbind_vma(vma);
vma->bound = 0;
- list_del_init(&vma->mm_list);
- if (i915_is_ggtt(vma->vm)) {
+ list_del_init(&vma->vm_link);
+ if (vma->is_ggtt) {
if (vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL) {
obj->map_and_fenceable = false;
} else if (vma->ggtt_view.pages) {
@@ -3372,9 +3399,9 @@ int i915_gpu_idle(struct drm_device *dev)
if (!i915.enable_execlists) {
struct drm_i915_gem_request *req;
- ret = i915_gem_request_alloc(ring, ring->default_context, &req);
- if (ret)
- return ret;
+ req = i915_gem_request_alloc(ring, NULL);
+ if (IS_ERR(req))
+ return PTR_ERR(req);
ret = i915_switch_context(req);
if (ret) {
@@ -3581,7 +3608,7 @@ search_free:
goto err_remove_node;
list_move_tail(&obj->global_list, &dev_priv->mm.bound_list);
- list_add_tail(&vma->mm_list, &vm->inactive_list);
+ list_add_tail(&vma->vm_link, &vm->inactive_list);
return vma;
@@ -3746,7 +3773,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
/* And bump the LRU for this access */
vma = i915_gem_obj_to_ggtt(obj);
if (vma && drm_mm_node_allocated(&vma->node) && !obj->active)
- list_move_tail(&vma->mm_list,
+ list_move_tail(&vma->vm_link,
&to_i915(obj->base.dev)->gtt.base.inactive_list);
return 0;
@@ -3781,7 +3808,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
* catch the issue of the CS prefetch crossing page boundaries and
* reading an invalid PTE on older architectures.
*/
- list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) {
+ list_for_each_entry_safe(vma, next, &obj->vma_list, obj_link) {
if (!drm_mm_node_allocated(&vma->node))
continue;
@@ -3844,7 +3871,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
*/
}
- list_for_each_entry(vma, &obj->vma_list, vma_link) {
+ list_for_each_entry(vma, &obj->vma_list, obj_link) {
if (!drm_mm_node_allocated(&vma->node))
continue;
@@ -3854,7 +3881,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
}
}
- list_for_each_entry(vma, &obj->vma_list, vma_link)
+ list_for_each_entry(vma, &obj->vma_list, obj_link)
vma->node.color = cache_level;
obj->cache_level = cache_level;
@@ -4328,10 +4355,20 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
if (ret)
goto unref;
- BUILD_BUG_ON(I915_NUM_RINGS > 16);
- args->busy = obj->active << 16;
- if (obj->last_write_req)
- args->busy |= obj->last_write_req->ring->id;
+ args->busy = 0;
+ if (obj->active) {
+ int i;
+
+ for (i = 0; i < I915_NUM_RINGS; i++) {
+ struct drm_i915_gem_request *req;
+
+ req = obj->last_read_req[i];
+ if (req)
+ args->busy |= 1 << (16 + req->ring->exec_id);
+ }
+ if (obj->last_write_req)
+ args->busy |= obj->last_write_req->ring->exec_id;
+ }
unref:
drm_gem_object_unreference(&obj->base);
@@ -4518,7 +4555,7 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
trace_i915_gem_object_destroy(obj);
- list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) {
+ list_for_each_entry_safe(vma, next, &obj->vma_list, obj_link) {
int ret;
vma->pin_count = 0;
@@ -4575,7 +4612,7 @@ struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
struct i915_address_space *vm)
{
struct i915_vma *vma;
- list_for_each_entry(vma, &obj->vma_list, vma_link) {
+ list_for_each_entry(vma, &obj->vma_list, obj_link) {
if (vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL &&
vma->vm == vm)
return vma;
@@ -4592,7 +4629,7 @@ struct i915_vma *i915_gem_obj_to_ggtt_view(struct drm_i915_gem_object *obj,
if (WARN_ONCE(!view, "no view specified"))
return ERR_PTR(-EINVAL);
- list_for_each_entry(vma, &obj->vma_list, vma_link)
+ list_for_each_entry(vma, &obj->vma_list, obj_link)
if (vma->vm == ggtt &&
i915_ggtt_view_equal(&vma->ggtt_view, view))
return vma;
@@ -4601,19 +4638,16 @@ struct i915_vma *i915_gem_obj_to_ggtt_view(struct drm_i915_gem_object *obj,
void i915_gem_vma_destroy(struct i915_vma *vma)
{
- struct i915_address_space *vm = NULL;
WARN_ON(vma->node.allocated);
/* Keep the vma as a placeholder in the execbuffer reservation lists */
if (!list_empty(&vma->exec_list))
return;
- vm = vma->vm;
-
- if (!i915_is_ggtt(vm))
- i915_ppgtt_put(i915_vm_to_ppgtt(vm));
+ if (!vma->is_ggtt)
+ i915_ppgtt_put(i915_vm_to_ppgtt(vma->vm));
- list_del(&vma->vma_link);
+ list_del(&vma->obj_link);
kmem_cache_free(to_i915(vma->obj->base.dev)->vmas, vma);
}
@@ -4833,7 +4867,7 @@ i915_gem_init_hw(struct drm_device *dev)
*/
init_unused_rings(dev);
- BUG_ON(!dev_priv->ring[RCS].default_context);
+ BUG_ON(!dev_priv->kernel_context);
ret = i915_ppgtt_init_hw(dev);
if (ret) {
@@ -4870,10 +4904,9 @@ i915_gem_init_hw(struct drm_device *dev)
for_each_ring(ring, dev_priv, i) {
struct drm_i915_gem_request *req;
- WARN_ON(!ring->default_context);
-
- ret = i915_gem_request_alloc(ring, ring->default_context, &req);
- if (ret) {
+ req = i915_gem_request_alloc(ring, NULL);
+ if (IS_ERR(req)) {
+ ret = PTR_ERR(req);
i915_gem_cleanup_ringbuffer(dev);
goto out;
}
@@ -4996,7 +5029,7 @@ init_ring_lists(struct intel_engine_cs *ring)
}
void
-i915_gem_load(struct drm_device *dev)
+i915_gem_load_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int i;
@@ -5062,11 +5095,18 @@ i915_gem_load(struct drm_device *dev)
dev_priv->mm.interruptible = true;
- i915_gem_shrinker_init(dev_priv);
-
mutex_init(&dev_priv->fb_tracking.lock);
}
+void i915_gem_load_cleanup(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = to_i915(dev);
+
+ kmem_cache_destroy(dev_priv->requests);
+ kmem_cache_destroy(dev_priv->vmas);
+ kmem_cache_destroy(dev_priv->objects);
+}
+
void i915_gem_release(struct drm_device *dev, struct drm_file *file)
{
struct drm_i915_file_private *file_priv = file->driver_priv;
@@ -5113,6 +5153,8 @@ int i915_gem_open(struct drm_device *dev, struct drm_file *file)
spin_lock_init(&file_priv->mm.lock);
INIT_LIST_HEAD(&file_priv->mm.request_list);
+ file_priv->bsd_ring = -1;
+
ret = i915_gem_context_open(dev, file);
if (ret)
kfree(file_priv);
@@ -5155,8 +5197,8 @@ u64 i915_gem_obj_offset(struct drm_i915_gem_object *o,
WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base);
- list_for_each_entry(vma, &o->vma_list, vma_link) {
- if (i915_is_ggtt(vma->vm) &&
+ list_for_each_entry(vma, &o->vma_list, obj_link) {
+ if (vma->is_ggtt &&
vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL)
continue;
if (vma->vm == vm)
@@ -5174,7 +5216,7 @@ u64 i915_gem_obj_ggtt_offset_view(struct drm_i915_gem_object *o,
struct i915_address_space *ggtt = i915_obj_to_ggtt(o);
struct i915_vma *vma;
- list_for_each_entry(vma, &o->vma_list, vma_link)
+ list_for_each_entry(vma, &o->vma_list, obj_link)
if (vma->vm == ggtt &&
i915_ggtt_view_equal(&vma->ggtt_view, view))
return vma->node.start;
@@ -5188,8 +5230,8 @@ bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
{
struct i915_vma *vma;
- list_for_each_entry(vma, &o->vma_list, vma_link) {
- if (i915_is_ggtt(vma->vm) &&
+ list_for_each_entry(vma, &o->vma_list, obj_link) {
+ if (vma->is_ggtt &&
vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL)
continue;
if (vma->vm == vm && drm_mm_node_allocated(&vma->node))
@@ -5205,7 +5247,7 @@ bool i915_gem_obj_ggtt_bound_view(struct drm_i915_gem_object *o,
struct i915_address_space *ggtt = i915_obj_to_ggtt(o);
struct i915_vma *vma;
- list_for_each_entry(vma, &o->vma_list, vma_link)
+ list_for_each_entry(vma, &o->vma_list, obj_link)
if (vma->vm == ggtt &&
i915_ggtt_view_equal(&vma->ggtt_view, view) &&
drm_mm_node_allocated(&vma->node))
@@ -5218,7 +5260,7 @@ bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o)
{
struct i915_vma *vma;
- list_for_each_entry(vma, &o->vma_list, vma_link)
+ list_for_each_entry(vma, &o->vma_list, obj_link)
if (drm_mm_node_allocated(&vma->node))
return true;
@@ -5235,8 +5277,8 @@ unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o,
BUG_ON(list_empty(&o->vma_list));
- list_for_each_entry(vma, &o->vma_list, vma_link) {
- if (i915_is_ggtt(vma->vm) &&
+ list_for_each_entry(vma, &o->vma_list, obj_link) {
+ if (vma->is_ggtt &&
vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL)
continue;
if (vma->vm == vm)
@@ -5248,7 +5290,7 @@ unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o,
bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj)
{
struct i915_vma *vma;
- list_for_each_entry(vma, &obj->vma_list, vma_link)
+ list_for_each_entry(vma, &obj->vma_list, obj_link)
if (vma->pin_count > 0)
return true;
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index c25083c78ba7..5dd84e148bba 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -142,7 +142,7 @@ static void i915_gem_context_clean(struct intel_context *ctx)
return;
list_for_each_entry_safe(vma, next, &ppgtt->base.inactive_list,
- mm_list) {
+ vm_link) {
if (WARN_ON(__i915_vma_unbind_no_wait(vma)))
break;
}
@@ -321,6 +321,18 @@ err_destroy:
return ERR_PTR(ret);
}
+static void i915_gem_context_unpin(struct intel_context *ctx,
+ struct intel_engine_cs *engine)
+{
+ if (i915.enable_execlists) {
+ intel_lr_context_unpin(ctx, engine);
+ } else {
+ if (engine->id == RCS && ctx->legacy_hw_ctx.rcs_state)
+ i915_gem_object_ggtt_unpin(ctx->legacy_hw_ctx.rcs_state);
+ i915_gem_context_unreference(ctx);
+ }
+}
+
void i915_gem_context_reset(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -329,40 +341,31 @@ void i915_gem_context_reset(struct drm_device *dev)
if (i915.enable_execlists) {
struct intel_context *ctx;
- list_for_each_entry(ctx, &dev_priv->context_list, link) {
+ list_for_each_entry(ctx, &dev_priv->context_list, link)
intel_lr_context_reset(dev, ctx);
- }
-
- return;
}
for (i = 0; i < I915_NUM_RINGS; i++) {
struct intel_engine_cs *ring = &dev_priv->ring[i];
- struct intel_context *lctx = ring->last_context;
-
- if (lctx) {
- if (lctx->legacy_hw_ctx.rcs_state && i == RCS)
- i915_gem_object_ggtt_unpin(lctx->legacy_hw_ctx.rcs_state);
- i915_gem_context_unreference(lctx);
+ if (ring->last_context) {
+ i915_gem_context_unpin(ring->last_context, ring);
ring->last_context = NULL;
}
-
- /* Force the GPU state to be reinitialised on enabling */
- if (ring->default_context)
- ring->default_context->legacy_hw_ctx.initialized = false;
}
+
+ /* Force the GPU state to be reinitialised on enabling */
+ dev_priv->kernel_context->legacy_hw_ctx.initialized = false;
}
int i915_gem_context_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_context *ctx;
- int i;
/* Init should only be called once per module load. Eventually the
* restriction on the context_disabled check can be loosened. */
- if (WARN_ON(dev_priv->ring[RCS].default_context))
+ if (WARN_ON(dev_priv->kernel_context))
return 0;
if (intel_vgpu_active(dev) && HAS_LOGICAL_RING_CONTEXTS(dev)) {
@@ -392,12 +395,7 @@ int i915_gem_context_init(struct drm_device *dev)
return PTR_ERR(ctx);
}
- for (i = 0; i < I915_NUM_RINGS; i++) {
- struct intel_engine_cs *ring = &dev_priv->ring[i];
-
- /* NB: RCS will hold a ref for all rings */
- ring->default_context = ctx;
- }
+ dev_priv->kernel_context = ctx;
DRM_DEBUG_DRIVER("%s context support initialized\n",
i915.enable_execlists ? "LR" :
@@ -408,7 +406,7 @@ int i915_gem_context_init(struct drm_device *dev)
void i915_gem_context_fini(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_context *dctx = dev_priv->ring[RCS].default_context;
+ struct intel_context *dctx = dev_priv->kernel_context;
int i;
if (dctx->legacy_hw_ctx.rcs_state) {
@@ -424,28 +422,21 @@ void i915_gem_context_fini(struct drm_device *dev)
* to offset the do_switch part, so that i915_gem_context_unreference()
* can then free the base object correctly. */
WARN_ON(!dev_priv->ring[RCS].last_context);
- if (dev_priv->ring[RCS].last_context == dctx) {
- /* Fake switch to NULL context */
- WARN_ON(dctx->legacy_hw_ctx.rcs_state->active);
- i915_gem_object_ggtt_unpin(dctx->legacy_hw_ctx.rcs_state);
- i915_gem_context_unreference(dctx);
- dev_priv->ring[RCS].last_context = NULL;
- }
i915_gem_object_ggtt_unpin(dctx->legacy_hw_ctx.rcs_state);
}
- for (i = 0; i < I915_NUM_RINGS; i++) {
+ for (i = I915_NUM_RINGS; --i >= 0;) {
struct intel_engine_cs *ring = &dev_priv->ring[i];
- if (ring->last_context)
- i915_gem_context_unreference(ring->last_context);
-
- ring->default_context = NULL;
- ring->last_context = NULL;
+ if (ring->last_context) {
+ i915_gem_context_unpin(ring->last_context, ring);
+ ring->last_context = NULL;
+ }
}
i915_gem_context_unreference(dctx);
+ dev_priv->kernel_context = NULL;
}
int i915_gem_context_enable(struct drm_i915_gem_request *req)
@@ -864,6 +855,9 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
if (!contexts_enabled(dev))
return -ENODEV;
+ if (args->pad != 0)
+ return -EINVAL;
+
ret = i915_mutex_lock_interruptible(dev);
if (ret)
return ret;
@@ -887,6 +881,9 @@ int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
struct intel_context *ctx;
int ret;
+ if (args->pad != 0)
+ return -EINVAL;
+
if (args->ctx_id == DEFAULT_CONTEXT_HANDLE)
return -ENOENT;
diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
index e9c2bfd85b52..0506016e18e0 100644
--- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
@@ -193,10 +193,26 @@ static void i915_gem_dmabuf_kunmap(struct dma_buf *dma_buf, unsigned long page_n
static int i915_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
{
- return -EINVAL;
+ struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
+ int ret;
+
+ if (obj->base.size < vma->vm_end - vma->vm_start)
+ return -EINVAL;
+
+ if (!obj->base.filp)
+ return -ENODEV;
+
+ ret = obj->base.filp->f_op->mmap(obj->base.filp, vma);
+ if (ret)
+ return ret;
+
+ fput(vma->vm_file);
+ vma->vm_file = get_file(obj->base.filp);
+
+ return 0;
}
-static int i915_gem_begin_cpu_access(struct dma_buf *dma_buf, size_t start, size_t length, enum dma_data_direction direction)
+static int i915_gem_begin_cpu_access(struct dma_buf *dma_buf, enum dma_data_direction direction)
{
struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
struct drm_device *dev = obj->base.dev;
@@ -212,6 +228,22 @@ static int i915_gem_begin_cpu_access(struct dma_buf *dma_buf, size_t start, size
return ret;
}
+static int i915_gem_end_cpu_access(struct dma_buf *dma_buf, enum dma_data_direction direction)
+{
+ struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
+ struct drm_device *dev = obj->base.dev;
+ int ret;
+
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret)
+ return ret;
+
+ ret = i915_gem_object_set_to_gtt_domain(obj, false);
+ mutex_unlock(&dev->struct_mutex);
+
+ return ret;
+}
+
static const struct dma_buf_ops i915_dmabuf_ops = {
.map_dma_buf = i915_gem_map_dma_buf,
.unmap_dma_buf = i915_gem_unmap_dma_buf,
@@ -224,6 +256,7 @@ static const struct dma_buf_ops i915_dmabuf_ops = {
.vmap = i915_gem_dmabuf_vmap,
.vunmap = i915_gem_dmabuf_vunmap,
.begin_cpu_access = i915_gem_begin_cpu_access,
+ .end_cpu_access = i915_gem_end_cpu_access,
};
struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index 07c6e4d320c9..ea1f8d1bd228 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -116,7 +116,7 @@ i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm,
search_again:
/* First see if there is a large enough contiguous idle region... */
- list_for_each_entry(vma, &vm->inactive_list, mm_list) {
+ list_for_each_entry(vma, &vm->inactive_list, vm_link) {
if (mark_free(vma, &unwind_list))
goto found;
}
@@ -125,7 +125,7 @@ search_again:
goto none;
/* Now merge in the soon-to-be-expired objects... */
- list_for_each_entry(vma, &vm->active_list, mm_list) {
+ list_for_each_entry(vma, &vm->active_list, vm_link) {
if (mark_free(vma, &unwind_list))
goto found;
}
@@ -270,7 +270,7 @@ int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle)
WARN_ON(!list_empty(&vm->active_list));
}
- list_for_each_entry_safe(vma, next, &vm->inactive_list, mm_list)
+ list_for_each_entry_safe(vma, next, &vm->inactive_list, vm_link)
if (vma->pin_count == 0)
WARN_ON(i915_vma_unbind(vma));
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index dccb517361b3..1328bc5021b4 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -193,13 +193,10 @@ static struct i915_vma *eb_get_vma(struct eb_vmas *eb, unsigned long handle)
return eb->lut[handle];
} else {
struct hlist_head *head;
- struct hlist_node *node;
+ struct i915_vma *vma;
head = &eb->buckets[handle & eb->and];
- hlist_for_each(node, head) {
- struct i915_vma *vma;
-
- vma = hlist_entry(node, struct i915_vma, exec_node);
+ hlist_for_each_entry(vma, head, exec_node) {
if (vma->exec_handle == handle)
return vma;
}
@@ -671,7 +668,7 @@ need_reloc_mappable(struct i915_vma *vma)
if (entry->relocation_count == 0)
return false;
- if (!i915_is_ggtt(vma->vm))
+ if (!vma->is_ggtt)
return false;
/* See also use_cpu_reloc() */
@@ -690,8 +687,7 @@ eb_vma_misplaced(struct i915_vma *vma)
struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
struct drm_i915_gem_object *obj = vma->obj;
- WARN_ON(entry->flags & __EXEC_OBJECT_NEEDS_MAP &&
- !i915_is_ggtt(vma->vm));
+ WARN_ON(entry->flags & __EXEC_OBJECT_NEEDS_MAP && !vma->is_ggtt);
if (entry->alignment &&
vma->node.start & (entry->alignment - 1))
@@ -1309,6 +1305,9 @@ i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
exec_start = params->batch_obj_vm_offset +
params->args_batch_start_offset;
+ if (exec_len == 0)
+ exec_len = params->batch_obj->base.size;
+
ret = ring->dispatch_execbuffer(params->request,
exec_start, exec_len,
params->dispatch_flags);
@@ -1325,33 +1324,23 @@ i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
/**
* Find one BSD ring to dispatch the corresponding BSD command.
- * The Ring ID is returned.
+ * The ring index is returned.
*/
-static int gen8_dispatch_bsd_ring(struct drm_device *dev,
- struct drm_file *file)
+static unsigned int
+gen8_dispatch_bsd_ring(struct drm_i915_private *dev_priv, struct drm_file *file)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_file_private *file_priv = file->driver_priv;
- /* Check whether the file_priv is using one ring */
- if (file_priv->bsd_ring)
- return file_priv->bsd_ring->id;
- else {
- /* If no, use the ping-pong mechanism to select one ring */
- int ring_id;
-
- mutex_lock(&dev->struct_mutex);
- if (dev_priv->mm.bsd_ring_dispatch_index == 0) {
- ring_id = VCS;
- dev_priv->mm.bsd_ring_dispatch_index = 1;
- } else {
- ring_id = VCS2;
- dev_priv->mm.bsd_ring_dispatch_index = 0;
- }
- file_priv->bsd_ring = &dev_priv->ring[ring_id];
- mutex_unlock(&dev->struct_mutex);
- return ring_id;
+ /* Check whether the file_priv has already selected one ring. */
+ if ((int)file_priv->bsd_ring < 0) {
+ /* If not, use the ping-pong mechanism to select one. */
+ mutex_lock(&dev_priv->dev->struct_mutex);
+ file_priv->bsd_ring = dev_priv->mm.bsd_ring_dispatch_index;
+ dev_priv->mm.bsd_ring_dispatch_index ^= 1;
+ mutex_unlock(&dev_priv->dev->struct_mutex);
}
+
+ return file_priv->bsd_ring;
}
static struct drm_i915_gem_object *
@@ -1374,6 +1363,64 @@ eb_get_batch(struct eb_vmas *eb)
return vma->obj;
}
+#define I915_USER_RINGS (4)
+
+static const enum intel_ring_id user_ring_map[I915_USER_RINGS + 1] = {
+ [I915_EXEC_DEFAULT] = RCS,
+ [I915_EXEC_RENDER] = RCS,
+ [I915_EXEC_BLT] = BCS,
+ [I915_EXEC_BSD] = VCS,
+ [I915_EXEC_VEBOX] = VECS
+};
+
+static int
+eb_select_ring(struct drm_i915_private *dev_priv,
+ struct drm_file *file,
+ struct drm_i915_gem_execbuffer2 *args,
+ struct intel_engine_cs **ring)
+{
+ unsigned int user_ring_id = args->flags & I915_EXEC_RING_MASK;
+
+ if (user_ring_id > I915_USER_RINGS) {
+ DRM_DEBUG("execbuf with unknown ring: %u\n", user_ring_id);
+ return -EINVAL;
+ }
+
+ if ((user_ring_id != I915_EXEC_BSD) &&
+ ((args->flags & I915_EXEC_BSD_MASK) != 0)) {
+ DRM_DEBUG("execbuf with non bsd ring but with invalid "
+ "bsd dispatch flags: %d\n", (int)(args->flags));
+ return -EINVAL;
+ }
+
+ if (user_ring_id == I915_EXEC_BSD && HAS_BSD2(dev_priv)) {
+ unsigned int bsd_idx = args->flags & I915_EXEC_BSD_MASK;
+
+ if (bsd_idx == I915_EXEC_BSD_DEFAULT) {
+ bsd_idx = gen8_dispatch_bsd_ring(dev_priv, file);
+ } else if (bsd_idx >= I915_EXEC_BSD_RING1 &&
+ bsd_idx <= I915_EXEC_BSD_RING2) {
+ bsd_idx >>= I915_EXEC_BSD_SHIFT;
+ bsd_idx--;
+ } else {
+ DRM_DEBUG("execbuf with unknown bsd ring: %u\n",
+ bsd_idx);
+ return -EINVAL;
+ }
+
+ *ring = &dev_priv->ring[_VCS(bsd_idx)];
+ } else {
+ *ring = &dev_priv->ring[user_ring_map[user_ring_id]];
+ }
+
+ if (!intel_ring_initialized(*ring)) {
+ DRM_DEBUG("execbuf with invalid ring: %u\n", user_ring_id);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int
i915_gem_do_execbuffer(struct drm_device *dev, void *data,
struct drm_file *file,
@@ -1381,6 +1428,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
struct drm_i915_gem_exec_object2 *exec)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_gem_request *req = NULL;
struct eb_vmas *eb;
struct drm_i915_gem_object *batch_obj;
struct drm_i915_gem_exec_object2 shadow_exec_entry;
@@ -1411,51 +1459,9 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
if (args->flags & I915_EXEC_IS_PINNED)
dispatch_flags |= I915_DISPATCH_PINNED;
- if ((args->flags & I915_EXEC_RING_MASK) > LAST_USER_RING) {
- DRM_DEBUG("execbuf with unknown ring: %d\n",
- (int)(args->flags & I915_EXEC_RING_MASK));
- return -EINVAL;
- }
-
- if (((args->flags & I915_EXEC_RING_MASK) != I915_EXEC_BSD) &&
- ((args->flags & I915_EXEC_BSD_MASK) != 0)) {
- DRM_DEBUG("execbuf with non bsd ring but with invalid "
- "bsd dispatch flags: %d\n", (int)(args->flags));
- return -EINVAL;
- }
-
- if ((args->flags & I915_EXEC_RING_MASK) == I915_EXEC_DEFAULT)
- ring = &dev_priv->ring[RCS];
- else if ((args->flags & I915_EXEC_RING_MASK) == I915_EXEC_BSD) {
- if (HAS_BSD2(dev)) {
- int ring_id;
-
- switch (args->flags & I915_EXEC_BSD_MASK) {
- case I915_EXEC_BSD_DEFAULT:
- ring_id = gen8_dispatch_bsd_ring(dev, file);
- ring = &dev_priv->ring[ring_id];
- break;
- case I915_EXEC_BSD_RING1:
- ring = &dev_priv->ring[VCS];
- break;
- case I915_EXEC_BSD_RING2:
- ring = &dev_priv->ring[VCS2];
- break;
- default:
- DRM_DEBUG("execbuf with unknown bsd ring: %d\n",
- (int)(args->flags & I915_EXEC_BSD_MASK));
- return -EINVAL;
- }
- } else
- ring = &dev_priv->ring[VCS];
- } else
- ring = &dev_priv->ring[(args->flags & I915_EXEC_RING_MASK) - 1];
-
- if (!intel_ring_initialized(ring)) {
- DRM_DEBUG("execbuf with invalid ring: %d\n",
- (int)(args->flags & I915_EXEC_RING_MASK));
- return -EINVAL;
- }
+ ret = eb_select_ring(dev_priv, file, args, &ring);
+ if (ret)
+ return ret;
if (args->buffer_count < 1) {
DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
@@ -1602,11 +1608,13 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
params->batch_obj_vm_offset = i915_gem_obj_offset(batch_obj, vm);
/* Allocate a request for this batch buffer nice and early. */
- ret = i915_gem_request_alloc(ring, ctx, &params->request);
- if (ret)
+ req = i915_gem_request_alloc(ring, ctx);
+ if (IS_ERR(req)) {
+ ret = PTR_ERR(req);
goto err_batch_unpin;
+ }
- ret = i915_gem_request_add_to_client(params->request, file);
+ ret = i915_gem_request_add_to_client(req, file);
if (ret)
goto err_batch_unpin;
@@ -1622,6 +1630,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
params->dispatch_flags = dispatch_flags;
params->batch_obj = batch_obj;
params->ctx = ctx;
+ params->request = req;
ret = dev_priv->gt.execbuf_submit(params, args, &eb->vmas);
@@ -1645,8 +1654,8 @@ err:
* must be freed again. If it was submitted then it is being tracked
* on the active request list and no clean up is required here.
*/
- if (ret && params->request)
- i915_gem_request_cancel(params->request);
+ if (ret && !IS_ERR_OR_NULL(req))
+ i915_gem_request_cancel(req);
mutex_unlock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/i915/i915_gem_fence.c b/drivers/gpu/drm/i915/i915_gem_fence.c
index 598198543dcd..a2b938ec01a7 100644
--- a/drivers/gpu/drm/i915/i915_gem_fence.c
+++ b/drivers/gpu/drm/i915/i915_gem_fence.c
@@ -34,8 +34,8 @@
* set of these objects.
*
* Fences are used to detile GTT memory mappings. They're also connected to the
- * hardware frontbuffer render tracking and hence interract with frontbuffer
- * conmpression. Furthermore on older platforms fences are required for tiled
+ * hardware frontbuffer render tracking and hence interact with frontbuffer
+ * compression. Furthermore on older platforms fences are required for tiled
* objects used by the display engine. They can also be used by the render
* engine - they're required for blitter commands and are optional for render
* commands. But on gen4+ both display (with the exception of fbc) and rendering
@@ -46,8 +46,8 @@
*
* Finally note that because fences are such a restricted resource they're
* dynamically associated with objects. Furthermore fence state is committed to
- * the hardware lazily to avoid unecessary stalls on gen2/3. Therefore code must
- * explictly call i915_gem_object_get_fence() to synchronize fencing status
+ * the hardware lazily to avoid unnecessary stalls on gen2/3. Therefore code must
+ * explicitly call i915_gem_object_get_fence() to synchronize fencing status
* for cpu access. Also note that some code wants an unfenced view, for those
* cases the fence can be removed forcefully with i915_gem_object_put_fence().
*
@@ -527,7 +527,7 @@ void i915_gem_restore_fences(struct drm_device *dev)
* required.
*
* When bit 17 is XORed in, we simply refuse to tile at all. Bit
- * 17 is not just a page offset, so as we page an objet out and back in,
+ * 17 is not just a page offset, so as we page an object out and back in,
* individual pages in it will have different bit 17 addresses, resulting in
* each 64 bytes being swapped with its neighbor!
*
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 56f4f2e58d53..49e4f26b79d8 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -96,9 +96,11 @@
static int
i915_get_ggtt_vma_pages(struct i915_vma *vma);
-const struct i915_ggtt_view i915_ggtt_view_normal;
+const struct i915_ggtt_view i915_ggtt_view_normal = {
+ .type = I915_GGTT_VIEW_NORMAL,
+};
const struct i915_ggtt_view i915_ggtt_view_rotated = {
- .type = I915_GGTT_VIEW_ROTATED
+ .type = I915_GGTT_VIEW_ROTATED,
};
static int sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt)
@@ -2130,6 +2132,25 @@ static void i915_address_space_init(struct i915_address_space *vm,
list_add_tail(&vm->global_link, &dev_priv->vm_list);
}
+static void gtt_write_workarounds(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ /* This function is for gtt related workarounds. This function is
+ * called on driver load and after a GPU reset, so you can place
+ * workarounds here even if they get overwritten by GPU reset.
+ */
+ /* WaIncreaseDefaultTLBEntries:chv,bdw,skl,bxt */
+ if (IS_BROADWELL(dev))
+ I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_BDW);
+ else if (IS_CHERRYVIEW(dev))
+ I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_CHV);
+ else if (IS_SKYLAKE(dev))
+ I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_SKL);
+ else if (IS_BROXTON(dev))
+ I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_BXT);
+}
+
int i915_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -2146,6 +2167,8 @@ int i915_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt)
int i915_ppgtt_init_hw(struct drm_device *dev)
{
+ gtt_write_workarounds(dev);
+
/* In the case of execlists, PPGTT is enabled by the context descriptor
* and the PDPs are contained within the context itself. We don't
* need to do anything here. */
@@ -2735,7 +2758,7 @@ static int i915_gem_setup_global_gtt(struct drm_device *dev,
}
vma->bound |= GLOBAL_BIND;
__i915_vma_set_map_and_fenceable(vma);
- list_add_tail(&vma->mm_list, &ggtt_vm->inactive_list);
+ list_add_tail(&vma->vm_link, &ggtt_vm->inactive_list);
}
/* Clear any non-preallocated blocks */
@@ -2807,6 +2830,8 @@ void i915_global_gtt_cleanup(struct drm_device *dev)
ppgtt->base.cleanup(&ppgtt->base);
}
+ i915_gem_cleanup_stolen(dev);
+
if (drm_mm_initialized(&vm->mm)) {
if (intel_vgpu_active(dev))
intel_vgt_deballoon();
@@ -3173,12 +3198,21 @@ int i915_gem_gtt_init(struct drm_device *dev)
}
gtt->base.dev = dev;
+ gtt->base.is_ggtt = true;
ret = gtt->gtt_probe(dev, &gtt->base.total, &gtt->stolen_size,
&gtt->mappable_base, &gtt->mappable_end);
if (ret)
return ret;
+ /*
+ * Initialise stolen early so that we may reserve preallocated
+ * objects for the BIOS to KMS transition.
+ */
+ ret = i915_gem_init_stolen(dev);
+ if (ret)
+ goto out_gtt_cleanup;
+
/* GMADR is the PCI mmio aperture into the global GTT. */
DRM_INFO("Memory usable by graphics device = %lluM\n",
gtt->base.total >> 20);
@@ -3198,6 +3232,11 @@ int i915_gem_gtt_init(struct drm_device *dev)
DRM_DEBUG_DRIVER("ppgtt mode: %i\n", i915.enable_ppgtt);
return 0;
+
+out_gtt_cleanup:
+ gtt->base.cleanup(&dev_priv->gtt.base);
+
+ return ret;
}
void i915_gem_restore_gtt_mappings(struct drm_device *dev)
@@ -3220,7 +3259,7 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
vm = &dev_priv->gtt.base;
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
flush = false;
- list_for_each_entry(vma, &obj->vma_list, vma_link) {
+ list_for_each_entry(vma, &obj->vma_list, obj_link) {
if (vma->vm != vm)
continue;
@@ -3276,19 +3315,20 @@ __i915_gem_vma_create(struct drm_i915_gem_object *obj,
if (vma == NULL)
return ERR_PTR(-ENOMEM);
- INIT_LIST_HEAD(&vma->vma_link);
- INIT_LIST_HEAD(&vma->mm_list);
+ INIT_LIST_HEAD(&vma->vm_link);
+ INIT_LIST_HEAD(&vma->obj_link);
INIT_LIST_HEAD(&vma->exec_list);
vma->vm = vm;
vma->obj = obj;
+ vma->is_ggtt = i915_is_ggtt(vm);
if (i915_is_ggtt(vm))
vma->ggtt_view = *ggtt_view;
-
- list_add_tail(&vma->vma_link, &obj->vma_list);
- if (!i915_is_ggtt(vm))
+ else
i915_ppgtt_get(i915_vm_to_ppgtt(vm));
+ list_add_tail(&vma->obj_link, &obj->vma_list);
+
return vma;
}
@@ -3329,8 +3369,9 @@ i915_gem_obj_lookup_or_create_ggtt_vma(struct drm_i915_gem_object *obj,
}
static struct scatterlist *
-rotate_pages(dma_addr_t *in, unsigned int offset,
+rotate_pages(const dma_addr_t *in, unsigned int offset,
unsigned int width, unsigned int height,
+ unsigned int stride,
struct sg_table *st, struct scatterlist *sg)
{
unsigned int column, row;
@@ -3342,7 +3383,7 @@ rotate_pages(dma_addr_t *in, unsigned int offset,
}
for (column = 0; column < width; column++) {
- src_idx = width * (height - 1) + column;
+ src_idx = stride * (height - 1) + column;
for (row = 0; row < height; row++) {
st->nents++;
/* We don't need the pages, but need to initialize
@@ -3353,7 +3394,7 @@ rotate_pages(dma_addr_t *in, unsigned int offset,
sg_dma_address(sg) = in[offset + src_idx];
sg_dma_len(sg) = PAGE_SIZE;
sg = sg_next(sg);
- src_idx -= width;
+ src_idx -= stride;
}
}
@@ -3361,10 +3402,9 @@ rotate_pages(dma_addr_t *in, unsigned int offset,
}
static struct sg_table *
-intel_rotate_fb_obj_pages(struct i915_ggtt_view *ggtt_view,
+intel_rotate_fb_obj_pages(struct intel_rotation_info *rot_info,
struct drm_i915_gem_object *obj)
{
- struct intel_rotation_info *rot_info = &ggtt_view->params.rotation_info;
unsigned int size_pages = rot_info->size >> PAGE_SHIFT;
unsigned int size_pages_uv;
struct sg_page_iter sg_iter;
@@ -3406,6 +3446,7 @@ intel_rotate_fb_obj_pages(struct i915_ggtt_view *ggtt_view,
/* Rotate the pages. */
sg = rotate_pages(page_addr_list, 0,
rot_info->width_pages, rot_info->height_pages,
+ rot_info->width_pages,
st, NULL);
/* Append the UV plane if NV12. */
@@ -3421,6 +3462,7 @@ intel_rotate_fb_obj_pages(struct i915_ggtt_view *ggtt_view,
rotate_pages(page_addr_list, uv_start_page,
rot_info->width_pages_uv,
rot_info->height_pages_uv,
+ rot_info->width_pages_uv,
st, sg);
}
@@ -3502,7 +3544,7 @@ i915_get_ggtt_vma_pages(struct i915_vma *vma)
vma->ggtt_view.pages = vma->obj->pages;
else if (vma->ggtt_view.type == I915_GGTT_VIEW_ROTATED)
vma->ggtt_view.pages =
- intel_rotate_fb_obj_pages(&vma->ggtt_view, vma->obj);
+ intel_rotate_fb_obj_pages(&vma->ggtt_view.params.rotated, vma->obj);
else if (vma->ggtt_view.type == I915_GGTT_VIEW_PARTIAL)
vma->ggtt_view.pages =
intel_partial_pages(&vma->ggtt_view, vma->obj);
@@ -3558,13 +3600,9 @@ int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
return 0;
if (vma->bound == 0 && vma->vm->allocate_va_range) {
- trace_i915_va_alloc(vma->vm,
- vma->node.start,
- vma->node.size,
- VM_TO_TRACE_NAME(vma->vm));
-
/* XXX: i915_vma_pin() will fix this +- hack */
vma->pin_count++;
+ trace_i915_va_alloc(vma);
ret = vma->vm->allocate_va_range(vma->vm,
vma->node.start,
vma->node.size);
@@ -3596,7 +3634,7 @@ i915_ggtt_view_size(struct drm_i915_gem_object *obj,
if (view->type == I915_GGTT_VIEW_NORMAL) {
return obj->base.size;
} else if (view->type == I915_GGTT_VIEW_ROTATED) {
- return view->params.rotation_info.size;
+ return view->params.rotated.size;
} else if (view->type == I915_GGTT_VIEW_PARTIAL) {
return view->params.partial.size << PAGE_SHIFT;
} else {
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
index b448ad832dcf..8774f1ba46e7 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -44,7 +44,6 @@ typedef uint64_t gen8_ppgtt_pml4e_t;
#define gtt_total_entries(gtt) ((gtt).base.total >> PAGE_SHIFT)
-
/* gen6-hsw has bit 11-4 for physical addr bit 39-32 */
#define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0))
#define GEN6_PTE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
@@ -156,7 +155,7 @@ struct i915_ggtt_view {
u64 offset;
unsigned int size;
} partial;
- struct intel_rotation_info rotation_info;
+ struct intel_rotation_info rotated;
} params;
struct sg_table *pages;
@@ -184,6 +183,7 @@ struct i915_vma {
#define GLOBAL_BIND (1<<0)
#define LOCAL_BIND (1<<1)
unsigned int bound : 4;
+ bool is_ggtt : 1;
/**
* Support different GGTT views into the same object.
@@ -195,9 +195,9 @@ struct i915_vma {
struct i915_ggtt_view ggtt_view;
/** This object's place on the active/inactive lists */
- struct list_head mm_list;
+ struct list_head vm_link;
- struct list_head vma_link; /* Link in the object's VMA list */
+ struct list_head obj_link; /* Link in the object's VMA list */
/** This vma's place in the batchbuffer or on the eviction list */
struct list_head exec_list;
@@ -276,6 +276,8 @@ struct i915_address_space {
u64 start; /* Start offset always 0 for dri2 */
u64 total; /* size addr space maps (ex. 2GB for ggtt) */
+ bool is_ggtt;
+
struct i915_page_scratch *scratch_page;
struct i915_page_table *scratch_pt;
struct i915_page_directory *scratch_pd;
@@ -331,6 +333,8 @@ struct i915_address_space {
u32 flags);
};
+#define i915_is_ggtt(V) ((V)->is_ggtt)
+
/* The Graphics Translation Table is the way in which GEN hardware translates a
* Graphics Virtual Address into a Physical Address. In addition to the normal
* collateral associated with any va->pa translations GEN hardware also has a
@@ -343,6 +347,8 @@ struct i915_gtt {
size_t stolen_size; /* Total size of stolen memory */
size_t stolen_usable_size; /* Total size minus BIOS reserved */
+ size_t stolen_reserved_base;
+ size_t stolen_reserved_size;
u64 mappable_end; /* End offset that we can CPU map */
struct io_mapping *mappable; /* Mapping to our CPU mappable region */
phys_addr_t mappable_base; /* PA of our GMADR */
@@ -417,7 +423,7 @@ static inline uint32_t i915_pte_index(uint64_t address, uint32_t pde_shift)
static inline uint32_t i915_pte_count(uint64_t addr, size_t length,
uint32_t pde_shift)
{
- const uint64_t mask = ~((1 << pde_shift) - 1);
+ const uint64_t mask = ~((1ULL << pde_shift) - 1);
uint64_t end;
WARN_ON(length == 0);
diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c
index f7df54a8ee2b..d3c473ffb90a 100644
--- a/drivers/gpu/drm/i915/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c
@@ -47,6 +47,46 @@ static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
#endif
}
+static int num_vma_bound(struct drm_i915_gem_object *obj)
+{
+ struct i915_vma *vma;
+ int count = 0;
+
+ list_for_each_entry(vma, &obj->vma_list, obj_link) {
+ if (drm_mm_node_allocated(&vma->node))
+ count++;
+ if (vma->pin_count)
+ count++;
+ }
+
+ return count;
+}
+
+static bool swap_available(void)
+{
+ return get_nr_swap_pages() > 0;
+}
+
+static bool can_release_pages(struct drm_i915_gem_object *obj)
+{
+ /* Only report true if by unbinding the object and putting its pages
+ * we can actually make forward progress towards freeing physical
+ * pages.
+ *
+ * If the pages are pinned for any other reason than being bound
+ * to the GPU, simply unbinding from the GPU is not going to succeed
+ * in releasing our pin count on the pages themselves.
+ */
+ if (obj->pages_pin_count != num_vma_bound(obj))
+ return false;
+
+ /* We can only return physical pages to the system if we can either
+ * discard the contents (because the user has marked them as being
+ * purgeable) or if we can move their contents out to swap.
+ */
+ return swap_available() || obj->madv == I915_MADV_DONTNEED;
+}
+
/**
* i915_gem_shrink - Shrink buffer object caches
* @dev_priv: i915 device
@@ -129,11 +169,14 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
if ((flags & I915_SHRINK_ACTIVE) == 0 && obj->active)
continue;
+ if (!can_release_pages(obj))
+ continue;
+
drm_gem_object_reference(&obj->base);
/* For the unbound phase, this should be a no-op! */
list_for_each_entry_safe(vma, v,
- &obj->vma_list, vma_link)
+ &obj->vma_list, obj_link)
if (i915_vma_unbind(vma))
break;
@@ -188,21 +231,6 @@ static bool i915_gem_shrinker_lock(struct drm_device *dev, bool *unlock)
return true;
}
-static int num_vma_bound(struct drm_i915_gem_object *obj)
-{
- struct i915_vma *vma;
- int count = 0;
-
- list_for_each_entry(vma, &obj->vma_list, vma_link) {
- if (drm_mm_node_allocated(&vma->node))
- count++;
- if (vma->pin_count)
- count++;
- }
-
- return count;
-}
-
static unsigned long
i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
{
@@ -222,7 +250,7 @@ i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
count += obj->base.size >> PAGE_SHIFT;
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
- if (!obj->active && obj->pages_pin_count == num_vma_bound(obj))
+ if (!obj->active && can_release_pages(obj))
count += obj->base.size >> PAGE_SHIFT;
}
@@ -339,8 +367,20 @@ void i915_gem_shrinker_init(struct drm_i915_private *dev_priv)
dev_priv->mm.shrinker.scan_objects = i915_gem_shrinker_scan;
dev_priv->mm.shrinker.count_objects = i915_gem_shrinker_count;
dev_priv->mm.shrinker.seeks = DEFAULT_SEEKS;
- register_shrinker(&dev_priv->mm.shrinker);
+ WARN_ON(register_shrinker(&dev_priv->mm.shrinker));
dev_priv->mm.oom_notifier.notifier_call = i915_gem_shrinker_oom;
- register_oom_notifier(&dev_priv->mm.oom_notifier);
+ WARN_ON(register_oom_notifier(&dev_priv->mm.oom_notifier));
+}
+
+/**
+ * i915_gem_shrinker_cleanup - Clean up i915 shrinker
+ * @dev_priv: i915 device
+ *
+ * This function unregisters the i915 shrinker and OOM handler.
+ */
+void i915_gem_shrinker_cleanup(struct drm_i915_private *dev_priv)
+{
+ WARN_ON(unregister_oom_notifier(&dev_priv->mm.oom_notifier));
+ unregister_shrinker(&dev_priv->mm.shrinker);
}
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index 3476877fc0d6..2e6e9fb6f80d 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -458,6 +458,9 @@ int i915_gem_init_stolen(struct drm_device *dev)
return 0;
}
+ dev_priv->gtt.stolen_reserved_base = reserved_base;
+ dev_priv->gtt.stolen_reserved_size = reserved_size;
+
/* It is possible for the reserved area to end before the end of stolen
* memory, so just consider the start. */
reserved_total = stolen_top - reserved_base;
@@ -569,6 +572,9 @@ _i915_gem_object_create_stolen(struct drm_device *dev,
if (obj->pages == NULL)
goto cleanup;
+ obj->get_page.sg = obj->pages->sgl;
+ obj->get_page.last = 0;
+
i915_gem_object_pin_pages(obj);
obj->stolen = stolen;
@@ -632,6 +638,8 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
if (!drm_mm_initialized(&dev_priv->mm.stolen))
return NULL;
+ lockdep_assert_held(&dev->struct_mutex);
+
DRM_DEBUG_KMS("creating preallocated stolen object: stolen_offset=%x, gtt_offset=%x, size=%x\n",
stolen_offset, gtt_offset, size);
@@ -689,7 +697,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
vma->bound |= GLOBAL_BIND;
__i915_vma_set_map_and_fenceable(vma);
- list_add_tail(&vma->mm_list, &ggtt->inactive_list);
+ list_add_tail(&vma->vm_link, &ggtt->inactive_list);
}
list_add_tail(&obj->global_list, &dev_priv->mm.bound_list);
diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c
index 59e45b3a6937..4d30b60defda 100644
--- a/drivers/gpu/drm/i915/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
@@ -49,21 +49,18 @@ struct i915_mmu_notifier {
struct hlist_node node;
struct mmu_notifier mn;
struct rb_root objects;
- struct list_head linear;
- bool has_linear;
};
struct i915_mmu_object {
struct i915_mmu_notifier *mn;
+ struct drm_i915_gem_object *obj;
struct interval_tree_node it;
struct list_head link;
- struct drm_i915_gem_object *obj;
struct work_struct work;
- bool active;
- bool is_linear;
+ bool attached;
};
-static void __cancel_userptr__worker(struct work_struct *work)
+static void cancel_userptr(struct work_struct *work)
{
struct i915_mmu_object *mo = container_of(work, typeof(*mo), work);
struct drm_i915_gem_object *obj = mo->obj;
@@ -81,7 +78,7 @@ static void __cancel_userptr__worker(struct work_struct *work)
was_interruptible = dev_priv->mm.interruptible;
dev_priv->mm.interruptible = false;
- list_for_each_entry_safe(vma, tmp, &obj->vma_list, vma_link) {
+ list_for_each_entry_safe(vma, tmp, &obj->vma_list, obj_link) {
int ret = i915_vma_unbind(vma);
WARN_ON(ret && ret != -EIO);
}
@@ -94,24 +91,22 @@ static void __cancel_userptr__worker(struct work_struct *work)
mutex_unlock(&dev->struct_mutex);
}
-static unsigned long cancel_userptr(struct i915_mmu_object *mo)
+static void add_object(struct i915_mmu_object *mo)
{
- unsigned long end = mo->obj->userptr.ptr + mo->obj->base.size;
-
- /* The mmu_object is released late when destroying the
- * GEM object so it is entirely possible to gain a
- * reference on an object in the process of being freed
- * since our serialisation is via the spinlock and not
- * the struct_mutex - and consequently use it after it
- * is freed and then double free it.
- */
- if (mo->active && kref_get_unless_zero(&mo->obj->base.refcount)) {
- schedule_work(&mo->work);
- /* only schedule one work packet to avoid the refleak */
- mo->active = false;
- }
+ if (mo->attached)
+ return;
+
+ interval_tree_insert(&mo->it, &mo->mn->objects);
+ mo->attached = true;
+}
- return end;
+static void del_object(struct i915_mmu_object *mo)
+{
+ if (!mo->attached)
+ return;
+
+ interval_tree_remove(&mo->it, &mo->mn->objects);
+ mo->attached = false;
}
static void i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
@@ -122,28 +117,36 @@ static void i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
struct i915_mmu_notifier *mn =
container_of(_mn, struct i915_mmu_notifier, mn);
struct i915_mmu_object *mo;
+ struct interval_tree_node *it;
+ LIST_HEAD(cancelled);
+
+ if (RB_EMPTY_ROOT(&mn->objects))
+ return;
/* interval ranges are inclusive, but invalidate range is exclusive */
end--;
spin_lock(&mn->lock);
- if (mn->has_linear) {
- list_for_each_entry(mo, &mn->linear, link) {
- if (mo->it.last < start || mo->it.start > end)
- continue;
-
- cancel_userptr(mo);
- }
- } else {
- struct interval_tree_node *it;
+ it = interval_tree_iter_first(&mn->objects, start, end);
+ while (it) {
+ /* The mmu_object is released late when destroying the
+ * GEM object so it is entirely possible to gain a
+ * reference on an object in the process of being freed
+ * since our serialisation is via the spinlock and not
+ * the struct_mutex - and consequently use it after it
+ * is freed and then double free it. To prevent that
+ * use-after-free we only acquire a reference on the
+ * object if it is not in the process of being destroyed.
+ */
+ mo = container_of(it, struct i915_mmu_object, it);
+ if (kref_get_unless_zero(&mo->obj->base.refcount))
+ schedule_work(&mo->work);
- it = interval_tree_iter_first(&mn->objects, start, end);
- while (it) {
- mo = container_of(it, struct i915_mmu_object, it);
- start = cancel_userptr(mo);
- it = interval_tree_iter_next(it, start, end);
- }
+ list_add(&mo->link, &cancelled);
+ it = interval_tree_iter_next(it, start, end);
}
+ list_for_each_entry(mo, &cancelled, link)
+ del_object(mo);
spin_unlock(&mn->lock);
}
@@ -164,8 +167,6 @@ i915_mmu_notifier_create(struct mm_struct *mm)
spin_lock_init(&mn->lock);
mn->mn.ops = &i915_gem_userptr_notifier;
mn->objects = RB_ROOT;
- INIT_LIST_HEAD(&mn->linear);
- mn->has_linear = false;
/* Protected by mmap_sem (write-lock) */
ret = __mmu_notifier_register(&mn->mn, mm);
@@ -177,85 +178,6 @@ i915_mmu_notifier_create(struct mm_struct *mm)
return mn;
}
-static int
-i915_mmu_notifier_add(struct drm_device *dev,
- struct i915_mmu_notifier *mn,
- struct i915_mmu_object *mo)
-{
- struct interval_tree_node *it;
- int ret = 0;
-
- /* By this point we have already done a lot of expensive setup that
- * we do not want to repeat just because the caller (e.g. X) has a
- * signal pending (and partly because of that expensive setup, X
- * using an interrupt timer is likely to get stuck in an EINTR loop).
- */
- mutex_lock(&dev->struct_mutex);
-
- /* Make sure we drop the final active reference (and thereby
- * remove the objects from the interval tree) before we do
- * the check for overlapping objects.
- */
- i915_gem_retire_requests(dev);
-
- spin_lock(&mn->lock);
- it = interval_tree_iter_first(&mn->objects,
- mo->it.start, mo->it.last);
- if (it) {
- struct drm_i915_gem_object *obj;
-
- /* We only need to check the first object in the range as it
- * either has cancelled gup work queued and we need to
- * return back to the user to give time for the gup-workers
- * to flush their object references upon which the object will
- * be removed from the interval-tree, or the the range is
- * still in use by another client and the overlap is invalid.
- *
- * If we do have an overlap, we cannot use the interval tree
- * for fast range invalidation.
- */
-
- obj = container_of(it, struct i915_mmu_object, it)->obj;
- if (!obj->userptr.workers)
- mn->has_linear = mo->is_linear = true;
- else
- ret = -EAGAIN;
- } else
- interval_tree_insert(&mo->it, &mn->objects);
-
- if (ret == 0)
- list_add(&mo->link, &mn->linear);
-
- spin_unlock(&mn->lock);
- mutex_unlock(&dev->struct_mutex);
-
- return ret;
-}
-
-static bool i915_mmu_notifier_has_linear(struct i915_mmu_notifier *mn)
-{
- struct i915_mmu_object *mo;
-
- list_for_each_entry(mo, &mn->linear, link)
- if (mo->is_linear)
- return true;
-
- return false;
-}
-
-static void
-i915_mmu_notifier_del(struct i915_mmu_notifier *mn,
- struct i915_mmu_object *mo)
-{
- spin_lock(&mn->lock);
- list_del(&mo->link);
- if (mo->is_linear)
- mn->has_linear = i915_mmu_notifier_has_linear(mn);
- else
- interval_tree_remove(&mo->it, &mn->objects);
- spin_unlock(&mn->lock);
-}
-
static void
i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object *obj)
{
@@ -265,7 +187,9 @@ i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object *obj)
if (mo == NULL)
return;
- i915_mmu_notifier_del(mo->mn, mo);
+ spin_lock(&mo->mn->lock);
+ del_object(mo);
+ spin_unlock(&mo->mn->lock);
kfree(mo);
obj->userptr.mmu_object = NULL;
@@ -299,7 +223,6 @@ i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj,
{
struct i915_mmu_notifier *mn;
struct i915_mmu_object *mo;
- int ret;
if (flags & I915_USERPTR_UNSYNCHRONIZED)
return capable(CAP_SYS_ADMIN) ? 0 : -EPERM;
@@ -316,16 +239,10 @@ i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj,
return -ENOMEM;
mo->mn = mn;
- mo->it.start = obj->userptr.ptr;
- mo->it.last = mo->it.start + obj->base.size - 1;
mo->obj = obj;
- INIT_WORK(&mo->work, __cancel_userptr__worker);
-
- ret = i915_mmu_notifier_add(obj->base.dev, mn, mo);
- if (ret) {
- kfree(mo);
- return ret;
- }
+ mo->it.start = obj->userptr.ptr;
+ mo->it.last = obj->userptr.ptr + obj->base.size - 1;
+ INIT_WORK(&mo->work, cancel_userptr);
obj->userptr.mmu_object = mo;
return 0;
@@ -552,8 +469,10 @@ __i915_gem_userptr_set_active(struct drm_i915_gem_object *obj,
/* In order to serialise get_pages with an outstanding
* cancel_userptr, we must drop the struct_mutex and try again.
*/
- if (!value || !work_pending(&obj->userptr.mmu_object->work))
- obj->userptr.mmu_object->active = value;
+ if (!value)
+ del_object(obj->userptr.mmu_object);
+ else if (!work_pending(&obj->userptr.mmu_object->work))
+ add_object(obj->userptr.mmu_object);
else
ret = -EAGAIN;
spin_unlock(&obj->userptr.mmu_object->mn->lock);
@@ -582,19 +501,24 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
if (pvec != NULL) {
struct mm_struct *mm = obj->userptr.mm->mm;
- down_read(&mm->mmap_sem);
- while (pinned < npages) {
- ret = get_user_pages(work->task, mm,
- obj->userptr.ptr + pinned * PAGE_SIZE,
- npages - pinned,
- !obj->userptr.read_only, 0,
- pvec + pinned, NULL);
- if (ret < 0)
- break;
-
- pinned += ret;
+ ret = -EFAULT;
+ if (atomic_inc_not_zero(&mm->mm_users)) {
+ down_read(&mm->mmap_sem);
+ while (pinned < npages) {
+ ret = get_user_pages_remote
+ (work->task, mm,
+ obj->userptr.ptr + pinned * PAGE_SIZE,
+ npages - pinned,
+ !obj->userptr.read_only, 0,
+ pvec + pinned, NULL);
+ if (ret < 0)
+ break;
+
+ pinned += ret;
+ }
+ up_read(&mm->mmap_sem);
+ mmput(mm);
}
- up_read(&mm->mmap_sem);
}
mutex_lock(&dev->struct_mutex);
@@ -764,7 +688,7 @@ i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj)
set_page_dirty(page);
mark_page_accessed(page);
- page_cache_release(page);
+ put_page(page);
}
obj->dirty = 0;
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 06ca4082735b..831895b8cb75 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -365,6 +365,10 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
err_printf(m, "Reset count: %u\n", error->reset_count);
err_printf(m, "Suspend count: %u\n", error->suspend_count);
err_printf(m, "PCI ID: 0x%04x\n", dev->pdev->device);
+ err_printf(m, "PCI Revision: 0x%02x\n", dev->pdev->revision);
+ err_printf(m, "PCI Subsystem: %04x:%04x\n",
+ dev->pdev->subsystem_vendor,
+ dev->pdev->subsystem_device);
err_printf(m, "IOMMU enabled?: %d\n", error->iommu);
if (HAS_CSR(dev)) {
@@ -732,7 +736,7 @@ static u32 capture_active_bo(struct drm_i915_error_buffer *err,
struct i915_vma *vma;
int i = 0;
- list_for_each_entry(vma, head, mm_list) {
+ list_for_each_entry(vma, head, vm_link) {
capture_bo(err++, vma);
if (++i == count)
break;
@@ -755,7 +759,7 @@ static u32 capture_pinned_bo(struct drm_i915_error_buffer *err,
if (err == last)
break;
- list_for_each_entry(vma, &obj->vma_list, vma_link)
+ list_for_each_entry(vma, &obj->vma_list, obj_link)
if (vma->vm == vm && vma->pin_count > 0)
capture_bo(err++, vma);
}
@@ -1050,7 +1054,7 @@ static void i915_gem_record_rings(struct drm_device *dev,
if (request)
rbuf = request->ctx->engine[ring->id].ringbuf;
else
- rbuf = ring->default_context->engine[ring->id].ringbuf;
+ rbuf = dev_priv->kernel_context->engine[ring->id].ringbuf;
} else
rbuf = ring->buffer;
@@ -1123,12 +1127,12 @@ static void i915_gem_capture_vm(struct drm_i915_private *dev_priv,
int i;
i = 0;
- list_for_each_entry(vma, &vm->active_list, mm_list)
+ list_for_each_entry(vma, &vm->active_list, vm_link)
i++;
error->active_bo_count[ndx] = i;
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
- list_for_each_entry(vma, &obj->vma_list, vma_link)
+ list_for_each_entry(vma, &obj->vma_list, obj_link)
if (vma->vm == vm && vma->pin_count > 0)
i++;
}
diff --git a/drivers/gpu/drm/i915/i915_guc_reg.h b/drivers/gpu/drm/i915/i915_guc_reg.h
index 685c7991e24f..e4ba5822289b 100644
--- a/drivers/gpu/drm/i915/i915_guc_reg.h
+++ b/drivers/gpu/drm/i915/i915_guc_reg.h
@@ -40,6 +40,7 @@
#define GS_MIA_CORE_STATE (1 << GS_MIA_SHIFT)
#define SOFT_SCRATCH(n) _MMIO(0xc180 + (n) * 4)
+#define SOFT_SCRATCH_COUNT 16
#define UOS_RSA_SCRATCH(i) _MMIO(0xc200 + (i) * 4)
#define UOS_RSA_SCRATCH_MAX_COUNT 64
diff --git a/drivers/gpu/drm/i915/i915_guc_submission.c b/drivers/gpu/drm/i915/i915_guc_submission.c
index 05aa7e61cbe0..d7543efc8a5e 100644
--- a/drivers/gpu/drm/i915/i915_guc_submission.c
+++ b/drivers/gpu/drm/i915/i915_guc_submission.c
@@ -158,10 +158,8 @@ static int host2guc_sample_forcewake(struct intel_guc *guc,
data[0] = HOST2GUC_ACTION_SAMPLE_FORCEWAKE;
/* WaRsDisableCoarsePowerGating:skl,bxt */
- if (!intel_enable_rc6(dev_priv->dev) ||
- IS_BXT_REVID(dev, 0, BXT_REVID_A1) ||
- (IS_SKL_GT3(dev) && IS_SKL_REVID(dev, 0, SKL_REVID_E0)) ||
- (IS_SKL_GT4(dev) && IS_SKL_REVID(dev, 0, SKL_REVID_E0)))
+ if (!intel_enable_rc6(dev) ||
+ NEEDS_WaRsDisableCoarsePowerGating(dev))
data[1] = 0;
else
/* bit 0 and 1 are for Render and Media domain separately */
@@ -246,6 +244,9 @@ static int guc_ring_doorbell(struct i915_guc_client *gc)
db_exc.cookie = 1;
}
+ /* Finally, update the cached copy of the GuC's WQ head */
+ gc->wq_head = desc->head;
+
kunmap_atomic(base);
return ret;
}
@@ -375,6 +376,8 @@ static void guc_init_proc_desc(struct intel_guc *guc,
static void guc_init_ctx_desc(struct intel_guc *guc,
struct i915_guc_client *client)
{
+ struct drm_i915_private *dev_priv = guc_to_i915(guc);
+ struct intel_engine_cs *ring;
struct intel_context *ctx = client->owner;
struct guc_context_desc desc;
struct sg_table *sg;
@@ -387,10 +390,8 @@ static void guc_init_ctx_desc(struct intel_guc *guc,
desc.priority = client->priority;
desc.db_id = client->doorbell_id;
- for (i = 0; i < I915_NUM_RINGS; i++) {
- struct guc_execlist_context *lrc = &desc.lrc[i];
- struct intel_ringbuffer *ringbuf = ctx->engine[i].ringbuf;
- struct intel_engine_cs *ring;
+ for_each_ring(ring, dev_priv, i) {
+ struct guc_execlist_context *lrc = &desc.lrc[ring->guc_id];
struct drm_i915_gem_object *obj;
uint64_t ctx_desc;
@@ -405,7 +406,6 @@ static void guc_init_ctx_desc(struct intel_guc *guc,
if (!obj)
break; /* XXX: continue? */
- ring = ringbuf->ring;
ctx_desc = intel_lr_context_descriptor(ctx, ring);
lrc->context_desc = (u32)ctx_desc;
@@ -413,16 +413,16 @@ static void guc_init_ctx_desc(struct intel_guc *guc,
lrc->ring_lcra = i915_gem_obj_ggtt_offset(obj) +
LRC_STATE_PN * PAGE_SIZE;
lrc->context_id = (client->ctx_index << GUC_ELC_CTXID_OFFSET) |
- (ring->id << GUC_ELC_ENGINE_OFFSET);
+ (ring->guc_id << GUC_ELC_ENGINE_OFFSET);
- obj = ringbuf->obj;
+ obj = ctx->engine[i].ringbuf->obj;
lrc->ring_begin = i915_gem_obj_ggtt_offset(obj);
lrc->ring_end = lrc->ring_begin + obj->base.size - 1;
lrc->ring_next_free_location = lrc->ring_begin;
lrc->ring_current_tail_pointer_value = 0;
- desc.engines_used |= (1 << ring->id);
+ desc.engines_used |= (1 << ring->guc_id);
}
WARN_ON(desc.engines_used == 0);
@@ -471,28 +471,30 @@ static void guc_fini_ctx_desc(struct intel_guc *guc,
sizeof(desc) * client->ctx_index);
}
-/* Get valid workqueue item and return it back to offset */
-static int guc_get_workqueue_space(struct i915_guc_client *gc, u32 *offset)
+int i915_guc_wq_check_space(struct i915_guc_client *gc)
{
struct guc_process_desc *desc;
void *base;
u32 size = sizeof(struct guc_wq_item);
int ret = -ETIMEDOUT, timeout_counter = 200;
+ if (!gc)
+ return 0;
+
+ /* Quickly return if wq space is available since last time we cache the
+ * head position. */
+ if (CIRC_SPACE(gc->wq_tail, gc->wq_head, gc->wq_size) >= size)
+ return 0;
+
base = kmap_atomic(i915_gem_object_get_page(gc->client_obj, 0));
desc = base + gc->proc_desc_offset;
while (timeout_counter-- > 0) {
- if (CIRC_SPACE(gc->wq_tail, desc->head, gc->wq_size) >= size) {
- *offset = gc->wq_tail;
+ gc->wq_head = desc->head;
- /* advance the tail for next workqueue item */
- gc->wq_tail += size;
- gc->wq_tail &= gc->wq_size - 1;
-
- /* this will break the loop */
- timeout_counter = 0;
+ if (CIRC_SPACE(gc->wq_tail, gc->wq_head, gc->wq_size) >= size) {
ret = 0;
+ break;
}
if (timeout_counter)
@@ -507,15 +509,18 @@ static int guc_get_workqueue_space(struct i915_guc_client *gc, u32 *offset)
static int guc_add_workqueue_item(struct i915_guc_client *gc,
struct drm_i915_gem_request *rq)
{
- enum intel_ring_id ring_id = rq->ring->id;
struct guc_wq_item *wqi;
void *base;
- u32 tail, wq_len, wq_off = 0;
- int ret;
+ u32 tail, wq_len, wq_off, space;
+
+ space = CIRC_SPACE(gc->wq_tail, gc->wq_head, gc->wq_size);
+ if (WARN_ON(space < sizeof(struct guc_wq_item)))
+ return -ENOSPC; /* shouldn't happen */
- ret = guc_get_workqueue_space(gc, &wq_off);
- if (ret)
- return ret;
+ /* postincrement WQ tail for next time */
+ wq_off = gc->wq_tail;
+ gc->wq_tail += sizeof(struct guc_wq_item);
+ gc->wq_tail &= gc->wq_size - 1;
/* For now workqueue item is 4 DWs; workqueue buffer is 2 pages. So we
* should not have the case where structure wqi is across page, neither
@@ -537,7 +542,7 @@ static int guc_add_workqueue_item(struct i915_guc_client *gc,
wq_len = sizeof(struct guc_wq_item) / sizeof(u32) - 1;
wqi->header = WQ_TYPE_INORDER |
(wq_len << WQ_LEN_SHIFT) |
- (ring_id << WQ_TARGET_SHIFT) |
+ (rq->ring->guc_id << WQ_TARGET_SHIFT) |
WQ_NO_WCFLUSH_WAIT;
/* The GuC wants only the low-order word of the context descriptor */
@@ -553,29 +558,6 @@ static int guc_add_workqueue_item(struct i915_guc_client *gc,
return 0;
}
-#define CTX_RING_BUFFER_START 0x08
-
-/* Update the ringbuffer pointer in a saved context image */
-static void lr_context_update(struct drm_i915_gem_request *rq)
-{
- enum intel_ring_id ring_id = rq->ring->id;
- struct drm_i915_gem_object *ctx_obj = rq->ctx->engine[ring_id].state;
- struct drm_i915_gem_object *rb_obj = rq->ringbuf->obj;
- struct page *page;
- uint32_t *reg_state;
-
- BUG_ON(!ctx_obj);
- WARN_ON(!i915_gem_obj_is_pinned(ctx_obj));
- WARN_ON(!i915_gem_obj_is_pinned(rb_obj));
-
- page = i915_gem_object_get_dirty_page(ctx_obj, LRC_STATE_PN);
- reg_state = kmap_atomic(page);
-
- reg_state[CTX_RING_BUFFER_START+1] = i915_gem_obj_ggtt_offset(rb_obj);
-
- kunmap_atomic(reg_state);
-}
-
/**
* i915_guc_submit() - Submit commands through GuC
* @client: the guc client where commands will go through
@@ -587,18 +569,14 @@ int i915_guc_submit(struct i915_guc_client *client,
struct drm_i915_gem_request *rq)
{
struct intel_guc *guc = client->guc;
- enum intel_ring_id ring_id = rq->ring->id;
+ unsigned int engine_id = rq->ring->guc_id;
int q_ret, b_ret;
- /* Need this because of the deferred pin ctx and ring */
- /* Shall we move this right after ring is pinned? */
- lr_context_update(rq);
-
q_ret = guc_add_workqueue_item(client, rq);
if (q_ret == 0)
b_ret = guc_ring_doorbell(client);
- client->submissions[ring_id] += 1;
+ client->submissions[engine_id] += 1;
if (q_ret) {
client->q_fail += 1;
client->retcode = q_ret;
@@ -608,8 +586,8 @@ int i915_guc_submit(struct i915_guc_client *client,
} else {
client->retcode = 0;
}
- guc->submissions[ring_id] += 1;
- guc->last_seqno[ring_id] = rq->seqno;
+ guc->submissions[engine_id] += 1;
+ guc->last_seqno[engine_id] = rq->seqno;
return q_ret;
}
@@ -832,6 +810,96 @@ static void guc_create_log(struct intel_guc *guc)
guc->log_flags = (offset << GUC_LOG_BUF_ADDR_SHIFT) | flags;
}
+static void init_guc_policies(struct guc_policies *policies)
+{
+ struct guc_policy *policy;
+ u32 p, i;
+
+ policies->dpc_promote_time = 500000;
+ policies->max_num_work_items = POLICY_MAX_NUM_WI;
+
+ for (p = 0; p < GUC_CTX_PRIORITY_NUM; p++) {
+ for (i = GUC_RENDER_ENGINE; i < GUC_MAX_ENGINES_NUM; i++) {
+ policy = &policies->policy[p][i];
+
+ policy->execution_quantum = 1000000;
+ policy->preemption_time = 500000;
+ policy->fault_time = 250000;
+ policy->policy_flags = 0;
+ }
+ }
+
+ policies->is_valid = 1;
+}
+
+static void guc_create_ads(struct intel_guc *guc)
+{
+ struct drm_i915_private *dev_priv = guc_to_i915(guc);
+ struct drm_i915_gem_object *obj;
+ struct guc_ads *ads;
+ struct guc_policies *policies;
+ struct guc_mmio_reg_state *reg_state;
+ struct intel_engine_cs *ring;
+ struct page *page;
+ u32 size, i;
+
+ /* The ads obj includes the struct itself and buffers passed to GuC */
+ size = sizeof(struct guc_ads) + sizeof(struct guc_policies) +
+ sizeof(struct guc_mmio_reg_state) +
+ GUC_S3_SAVE_SPACE_PAGES * PAGE_SIZE;
+
+ obj = guc->ads_obj;
+ if (!obj) {
+ obj = gem_allocate_guc_obj(dev_priv->dev, PAGE_ALIGN(size));
+ if (!obj)
+ return;
+
+ guc->ads_obj = obj;
+ }
+
+ page = i915_gem_object_get_page(obj, 0);
+ ads = kmap(page);
+
+ /*
+ * The GuC requires a "Golden Context" when it reinitialises
+ * engines after a reset. Here we use the Render ring default
+ * context, which must already exist and be pinned in the GGTT,
+ * so its address won't change after we've told the GuC where
+ * to find it.
+ */
+ ring = &dev_priv->ring[RCS];
+ ads->golden_context_lrca = ring->status_page.gfx_addr;
+
+ for_each_ring(ring, dev_priv, i)
+ ads->eng_state_size[ring->guc_id] = intel_lr_context_size(ring);
+
+ /* GuC scheduling policies */
+ policies = (void *)ads + sizeof(struct guc_ads);
+ init_guc_policies(policies);
+
+ ads->scheduler_policies = i915_gem_obj_ggtt_offset(obj) +
+ sizeof(struct guc_ads);
+
+ /* MMIO reg state */
+ reg_state = (void *)policies + sizeof(struct guc_policies);
+
+ for_each_ring(ring, dev_priv, i) {
+ reg_state->mmio_white_list[ring->guc_id].mmio_start =
+ ring->mmio_base + GUC_MMIO_WHITE_LIST_START;
+
+ /* Nothing to be saved or restored for now. */
+ reg_state->mmio_white_list[ring->guc_id].count = 0;
+ }
+
+ ads->reg_state_addr = ads->scheduler_policies +
+ sizeof(struct guc_policies);
+
+ ads->reg_state_buffer = ads->reg_state_addr +
+ sizeof(struct guc_mmio_reg_state);
+
+ kunmap(page);
+}
+
/*
* Set up the memory resources to be shared with the GuC. At this point,
* we require just one object that can be mapped through the GGTT.
@@ -858,6 +926,8 @@ int i915_guc_submission_init(struct drm_device *dev)
guc_create_log(guc);
+ guc_create_ads(guc);
+
return 0;
}
@@ -865,7 +935,7 @@ int i915_guc_submission_enable(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_guc *guc = &dev_priv->guc;
- struct intel_context *ctx = dev_priv->ring[RCS].default_context;
+ struct intel_context *ctx = dev_priv->kernel_context;
struct i915_guc_client *client;
/* client for execbuf submission */
@@ -896,6 +966,9 @@ void i915_guc_submission_fini(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_guc *guc = &dev_priv->guc;
+ gem_release_guc_obj(dev_priv->guc.ads_obj);
+ guc->ads_obj = NULL;
+
gem_release_guc_obj(dev_priv->guc.log_obj);
guc->log_obj = NULL;
@@ -919,7 +992,7 @@ int intel_guc_suspend(struct drm_device *dev)
if (!i915.enable_guc_submission)
return 0;
- ctx = dev_priv->ring[RCS].default_context;
+ ctx = dev_priv->kernel_context;
data[0] = HOST2GUC_ACTION_ENTER_S_STATE;
/* any value greater than GUC_POWER_D0 */
@@ -945,7 +1018,7 @@ int intel_guc_resume(struct drm_device *dev)
if (!i915.enable_guc_submission)
return 0;
- ctx = dev_priv->ring[RCS].default_context;
+ ctx = dev_priv->kernel_context;
data[0] = HOST2GUC_ACTION_EXIT_S_STATE;
data[1] = GUC_POWER_D0;
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index fa8afa7860ae..1c212205d0e7 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -1651,6 +1651,12 @@ static void valleyview_pipestat_irq_handler(struct drm_device *dev, u32 iir)
int pipe;
spin_lock(&dev_priv->irq_lock);
+
+ if (!dev_priv->display_irqs_enabled) {
+ spin_unlock(&dev_priv->irq_lock);
+ return;
+ }
+
for_each_pipe(dev_priv, pipe) {
i915_reg_t reg;
u32 mask, iir_bit = 0;
@@ -1823,7 +1829,7 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg)
/* IRQs are synced during runtime_suspend, we don't require a wakeref */
disable_rpm_wakeref_asserts(dev_priv);
- for (;;) {
+ do {
master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
iir = I915_READ(VLV_IIR);
@@ -1851,7 +1857,7 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg)
I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL);
POSTING_READ(GEN8_MASTER_IRQ);
- }
+ } while (0);
enable_rpm_wakeref_asserts(dev_priv);
@@ -2188,10 +2194,6 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
/* IRQs are synced during runtime_suspend, we don't require a wakeref */
disable_rpm_wakeref_asserts(dev_priv);
- /* We get interrupts on unclaimed registers, so check for this before we
- * do any I915_{READ,WRITE}. */
- intel_uncore_check_errors(dev);
-
/* disable master interrupt before clearing iir */
de_ier = I915_READ(DEIER);
I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
@@ -2268,43 +2270,20 @@ static void bxt_hpd_irq_handler(struct drm_device *dev, u32 hotplug_trigger,
intel_hpd_irq_handler(dev, pin_mask, long_mask);
}
-static irqreturn_t gen8_irq_handler(int irq, void *arg)
+static irqreturn_t
+gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
{
- struct drm_device *dev = arg;
- struct drm_i915_private *dev_priv = dev->dev_private;
- u32 master_ctl;
+ struct drm_device *dev = dev_priv->dev;
irqreturn_t ret = IRQ_NONE;
- uint32_t tmp = 0;
+ u32 iir;
enum pipe pipe;
- u32 aux_mask = GEN8_AUX_CHANNEL_A;
-
- if (!intel_irqs_enabled(dev_priv))
- return IRQ_NONE;
-
- /* IRQs are synced during runtime_suspend, we don't require a wakeref */
- disable_rpm_wakeref_asserts(dev_priv);
-
- if (INTEL_INFO(dev_priv)->gen >= 9)
- aux_mask |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
- GEN9_AUX_CHANNEL_D;
-
- master_ctl = I915_READ_FW(GEN8_MASTER_IRQ);
- master_ctl &= ~GEN8_MASTER_IRQ_CONTROL;
- if (!master_ctl)
- goto out;
-
- I915_WRITE_FW(GEN8_MASTER_IRQ, 0);
-
- /* Find, clear, then process each source of interrupt */
-
- ret = gen8_gt_irq_handler(dev_priv, master_ctl);
if (master_ctl & GEN8_DE_MISC_IRQ) {
- tmp = I915_READ(GEN8_DE_MISC_IIR);
- if (tmp) {
- I915_WRITE(GEN8_DE_MISC_IIR, tmp);
+ iir = I915_READ(GEN8_DE_MISC_IIR);
+ if (iir) {
+ I915_WRITE(GEN8_DE_MISC_IIR, iir);
ret = IRQ_HANDLED;
- if (tmp & GEN8_DE_MISC_GSE)
+ if (iir & GEN8_DE_MISC_GSE)
intel_opregion_asle_intr(dev);
else
DRM_ERROR("Unexpected DE Misc interrupt\n");
@@ -2314,33 +2293,40 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
}
if (master_ctl & GEN8_DE_PORT_IRQ) {
- tmp = I915_READ(GEN8_DE_PORT_IIR);
- if (tmp) {
+ iir = I915_READ(GEN8_DE_PORT_IIR);
+ if (iir) {
+ u32 tmp_mask;
bool found = false;
- u32 hotplug_trigger = 0;
-
- if (IS_BROXTON(dev_priv))
- hotplug_trigger = tmp & BXT_DE_PORT_HOTPLUG_MASK;
- else if (IS_BROADWELL(dev_priv))
- hotplug_trigger = tmp & GEN8_PORT_DP_A_HOTPLUG;
- I915_WRITE(GEN8_DE_PORT_IIR, tmp);
+ I915_WRITE(GEN8_DE_PORT_IIR, iir);
ret = IRQ_HANDLED;
- if (tmp & aux_mask) {
+ tmp_mask = GEN8_AUX_CHANNEL_A;
+ if (INTEL_INFO(dev_priv)->gen >= 9)
+ tmp_mask |= GEN9_AUX_CHANNEL_B |
+ GEN9_AUX_CHANNEL_C |
+ GEN9_AUX_CHANNEL_D;
+
+ if (iir & tmp_mask) {
dp_aux_irq_handler(dev);
found = true;
}
- if (hotplug_trigger) {
- if (IS_BROXTON(dev))
- bxt_hpd_irq_handler(dev, hotplug_trigger, hpd_bxt);
- else
- ilk_hpd_irq_handler(dev, hotplug_trigger, hpd_bdw);
- found = true;
+ if (IS_BROXTON(dev_priv)) {
+ tmp_mask = iir & BXT_DE_PORT_HOTPLUG_MASK;
+ if (tmp_mask) {
+ bxt_hpd_irq_handler(dev, tmp_mask, hpd_bxt);
+ found = true;
+ }
+ } else if (IS_BROADWELL(dev_priv)) {
+ tmp_mask = iir & GEN8_PORT_DP_A_HOTPLUG;
+ if (tmp_mask) {
+ ilk_hpd_irq_handler(dev, tmp_mask, hpd_bdw);
+ found = true;
+ }
}
- if (IS_BROXTON(dev) && (tmp & BXT_DE_PORT_GMBUS)) {
+ if (IS_BROXTON(dev) && (iir & BXT_DE_PORT_GMBUS)) {
gmbus_irq_handler(dev);
found = true;
}
@@ -2353,49 +2339,51 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
}
for_each_pipe(dev_priv, pipe) {
- uint32_t pipe_iir, flip_done = 0, fault_errors = 0;
+ u32 flip_done, fault_errors;
if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
continue;
- pipe_iir = I915_READ(GEN8_DE_PIPE_IIR(pipe));
- if (pipe_iir) {
- ret = IRQ_HANDLED;
- I915_WRITE(GEN8_DE_PIPE_IIR(pipe), pipe_iir);
+ iir = I915_READ(GEN8_DE_PIPE_IIR(pipe));
+ if (!iir) {
+ DRM_ERROR("The master control interrupt lied (DE PIPE)!\n");
+ continue;
+ }
- if (pipe_iir & GEN8_PIPE_VBLANK &&
- intel_pipe_handle_vblank(dev, pipe))
- intel_check_page_flip(dev, pipe);
+ ret = IRQ_HANDLED;
+ I915_WRITE(GEN8_DE_PIPE_IIR(pipe), iir);
- if (INTEL_INFO(dev_priv)->gen >= 9)
- flip_done = pipe_iir & GEN9_PIPE_PLANE1_FLIP_DONE;
- else
- flip_done = pipe_iir & GEN8_PIPE_PRIMARY_FLIP_DONE;
+ if (iir & GEN8_PIPE_VBLANK &&
+ intel_pipe_handle_vblank(dev, pipe))
+ intel_check_page_flip(dev, pipe);
- if (flip_done) {
- intel_prepare_page_flip(dev, pipe);
- intel_finish_page_flip_plane(dev, pipe);
- }
+ flip_done = iir;
+ if (INTEL_INFO(dev_priv)->gen >= 9)
+ flip_done &= GEN9_PIPE_PLANE1_FLIP_DONE;
+ else
+ flip_done &= GEN8_PIPE_PRIMARY_FLIP_DONE;
- if (pipe_iir & GEN8_PIPE_CDCLK_CRC_DONE)
- hsw_pipe_crc_irq_handler(dev, pipe);
+ if (flip_done) {
+ intel_prepare_page_flip(dev, pipe);
+ intel_finish_page_flip_plane(dev, pipe);
+ }
- if (pipe_iir & GEN8_PIPE_FIFO_UNDERRUN)
- intel_cpu_fifo_underrun_irq_handler(dev_priv,
- pipe);
+ if (iir & GEN8_PIPE_CDCLK_CRC_DONE)
+ hsw_pipe_crc_irq_handler(dev, pipe);
+ if (iir & GEN8_PIPE_FIFO_UNDERRUN)
+ intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
- if (INTEL_INFO(dev_priv)->gen >= 9)
- fault_errors = pipe_iir & GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
- else
- fault_errors = pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
+ fault_errors = iir;
+ if (INTEL_INFO(dev_priv)->gen >= 9)
+ fault_errors &= GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
+ else
+ fault_errors &= GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
- if (fault_errors)
- DRM_ERROR("Fault errors on pipe %c\n: 0x%08x",
- pipe_name(pipe),
- pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS);
- } else
- DRM_ERROR("The master control interrupt lied (DE PIPE)!\n");
+ if (fault_errors)
+ DRM_ERROR("Fault errors on pipe %c\n: 0x%08x",
+ pipe_name(pipe),
+ fault_errors);
}
if (HAS_PCH_SPLIT(dev) && !HAS_PCH_NOP(dev) &&
@@ -2405,15 +2393,15 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
* scheme also closed the SDE interrupt handling race we've seen
* on older pch-split platforms. But this needs testing.
*/
- u32 pch_iir = I915_READ(SDEIIR);
- if (pch_iir) {
- I915_WRITE(SDEIIR, pch_iir);
+ iir = I915_READ(SDEIIR);
+ if (iir) {
+ I915_WRITE(SDEIIR, iir);
ret = IRQ_HANDLED;
if (HAS_PCH_SPT(dev_priv))
- spt_irq_handler(dev, pch_iir);
+ spt_irq_handler(dev, iir);
else
- cpt_irq_handler(dev, pch_iir);
+ cpt_irq_handler(dev, iir);
} else {
/*
* Like on previous PCH there seems to be something
@@ -2423,10 +2411,36 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
}
}
+ return ret;
+}
+
+static irqreturn_t gen8_irq_handler(int irq, void *arg)
+{
+ struct drm_device *dev = arg;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 master_ctl;
+ irqreturn_t ret;
+
+ if (!intel_irqs_enabled(dev_priv))
+ return IRQ_NONE;
+
+ master_ctl = I915_READ_FW(GEN8_MASTER_IRQ);
+ master_ctl &= ~GEN8_MASTER_IRQ_CONTROL;
+ if (!master_ctl)
+ return IRQ_NONE;
+
+ I915_WRITE_FW(GEN8_MASTER_IRQ, 0);
+
+ /* IRQs are synced during runtime_suspend, we don't require a wakeref */
+ disable_rpm_wakeref_asserts(dev_priv);
+
+ /* Find, clear, then process each source of interrupt */
+ ret = gen8_gt_irq_handler(dev_priv, master_ctl);
+ ret |= gen8_de_irq_handler(dev_priv, master_ctl);
+
I915_WRITE_FW(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
POSTING_READ_FW(GEN8_MASTER_IRQ);
-out:
enable_rpm_wakeref_asserts(dev_priv);
return ret;
@@ -2949,14 +2963,44 @@ static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv)
ring->hangcheck.deadlock = 0;
}
-static enum intel_ring_hangcheck_action
-ring_stuck(struct intel_engine_cs *ring, u64 acthd)
+static bool subunits_stuck(struct intel_engine_cs *ring)
{
- struct drm_device *dev = ring->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- u32 tmp;
+ u32 instdone[I915_NUM_INSTDONE_REG];
+ bool stuck;
+ int i;
+
+ if (ring->id != RCS)
+ return true;
+
+ i915_get_extra_instdone(ring->dev, instdone);
+
+ /* There might be unstable subunit states even when
+ * actual head is not moving. Filter out the unstable ones by
+ * accumulating the undone -> done transitions and only
+ * consider those as progress.
+ */
+ stuck = true;
+ for (i = 0; i < I915_NUM_INSTDONE_REG; i++) {
+ const u32 tmp = instdone[i] | ring->hangcheck.instdone[i];
+
+ if (tmp != ring->hangcheck.instdone[i])
+ stuck = false;
+ ring->hangcheck.instdone[i] |= tmp;
+ }
+
+ return stuck;
+}
+
+static enum intel_ring_hangcheck_action
+head_stuck(struct intel_engine_cs *ring, u64 acthd)
+{
if (acthd != ring->hangcheck.acthd) {
+
+ /* Clear subunit states on head movement */
+ memset(ring->hangcheck.instdone, 0,
+ sizeof(ring->hangcheck.instdone));
+
if (acthd > ring->hangcheck.max_acthd) {
ring->hangcheck.max_acthd = acthd;
return HANGCHECK_ACTIVE;
@@ -2965,6 +3009,24 @@ ring_stuck(struct intel_engine_cs *ring, u64 acthd)
return HANGCHECK_ACTIVE_LOOP;
}
+ if (!subunits_stuck(ring))
+ return HANGCHECK_ACTIVE;
+
+ return HANGCHECK_HUNG;
+}
+
+static enum intel_ring_hangcheck_action
+ring_stuck(struct intel_engine_cs *ring, u64 acthd)
+{
+ struct drm_device *dev = ring->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ enum intel_ring_hangcheck_action ha;
+ u32 tmp;
+
+ ha = head_stuck(ring, acthd);
+ if (ha != HANGCHECK_HUNG)
+ return ha;
+
if (IS_GEN2(dev))
return HANGCHECK_HUNG;
@@ -3032,6 +3094,12 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
*/
DISABLE_RPM_WAKEREF_ASSERTS(dev_priv);
+ /* As enabling the GPU requires fairly extensive mmio access,
+ * periodically arm the mmio checker to see if we are triggering
+ * any invalid access.
+ */
+ intel_uncore_arm_unclaimed_mmio_detection(dev_priv);
+
for_each_ring(ring, dev_priv, i) {
u64 acthd;
u32 seqno;
@@ -3106,7 +3174,11 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
if (ring->hangcheck.score > 0)
ring->hangcheck.score--;
+ /* Clear head and subunit states on seqno movement */
ring->hangcheck.acthd = ring->hangcheck.max_acthd = 0;
+
+ memset(ring->hangcheck.instdone, 0,
+ sizeof(ring->hangcheck.instdone));
}
ring->hangcheck.seqno = seqno;
@@ -3277,23 +3349,30 @@ void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
unsigned int pipe_mask)
{
uint32_t extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN;
+ enum pipe pipe;
spin_lock_irq(&dev_priv->irq_lock);
- if (pipe_mask & 1 << PIPE_A)
- GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_A,
- dev_priv->de_irq_mask[PIPE_A],
- ~dev_priv->de_irq_mask[PIPE_A] | extra_ier);
- if (pipe_mask & 1 << PIPE_B)
- GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_B,
- dev_priv->de_irq_mask[PIPE_B],
- ~dev_priv->de_irq_mask[PIPE_B] | extra_ier);
- if (pipe_mask & 1 << PIPE_C)
- GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_C,
- dev_priv->de_irq_mask[PIPE_C],
- ~dev_priv->de_irq_mask[PIPE_C] | extra_ier);
+ for_each_pipe_masked(dev_priv, pipe, pipe_mask)
+ GEN8_IRQ_INIT_NDX(DE_PIPE, pipe,
+ dev_priv->de_irq_mask[pipe],
+ ~dev_priv->de_irq_mask[pipe] | extra_ier);
spin_unlock_irq(&dev_priv->irq_lock);
}
+void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
+ unsigned int pipe_mask)
+{
+ enum pipe pipe;
+
+ spin_lock_irq(&dev_priv->irq_lock);
+ for_each_pipe_masked(dev_priv, pipe, pipe_mask)
+ GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
+ spin_unlock_irq(&dev_priv->irq_lock);
+
+ /* make sure we're done processing display irqs */
+ synchronize_irq(dev_priv->dev->irq);
+}
+
static void cherryview_irq_preinstall(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c
index 835d6099c769..278c9c40c2e0 100644
--- a/drivers/gpu/drm/i915/i915_params.c
+++ b/drivers/gpu/drm/i915/i915_params.c
@@ -22,6 +22,7 @@
* IN THE SOFTWARE.
*/
+#include "i915_params.h"
#include "i915_drv.h"
struct i915_params i915 __read_mostly = {
@@ -37,7 +38,7 @@ struct i915_params i915 __read_mostly = {
.enable_execlists = -1,
.enable_hangcheck = true,
.enable_ppgtt = -1,
- .enable_psr = 0,
+ .enable_psr = -1,
.preliminary_hw_support = IS_ENABLED(CONFIG_DRM_I915_PRELIMINARY_HW_SUPPORT),
.disable_power_well = -1,
.enable_ips = 1,
@@ -48,7 +49,6 @@ struct i915_params i915 __read_mostly = {
.invert_brightness = 0,
.disable_display = 0,
.enable_cmd_parser = 1,
- .disable_vtd_wa = 0,
.use_mmio_flip = 0,
.mmio_debug = 0,
.verbose_state_checks = 1,
@@ -91,7 +91,7 @@ MODULE_PARM_DESC(enable_fbc,
"Enable frame buffer compression for power savings "
"(default: -1 (use per-chip default))");
-module_param_named_unsafe(lvds_channel_mode, i915.lvds_channel_mode, int, 0600);
+module_param_named_unsafe(lvds_channel_mode, i915.lvds_channel_mode, int, 0400);
MODULE_PARM_DESC(lvds_channel_mode,
"Specify LVDS channel mode "
"(0=probe BIOS [default], 1=single-channel, 2=dual-channel)");
@@ -101,7 +101,7 @@ MODULE_PARM_DESC(lvds_use_ssc,
"Use Spread Spectrum Clock with panels [LVDS/eDP] "
"(default: auto from VBT)");
-module_param_named_unsafe(vbt_sdvo_panel_type, i915.vbt_sdvo_panel_type, int, 0600);
+module_param_named_unsafe(vbt_sdvo_panel_type, i915.vbt_sdvo_panel_type, int, 0400);
MODULE_PARM_DESC(vbt_sdvo_panel_type,
"Override/Ignore selection of SDVO panel mode in the VBT "
"(-2=ignore, -1=auto [default], index in VBT BIOS table)");
@@ -126,9 +126,11 @@ MODULE_PARM_DESC(enable_execlists,
"(-1=auto [default], 0=disabled, 1=enabled)");
module_param_named_unsafe(enable_psr, i915.enable_psr, int, 0600);
-MODULE_PARM_DESC(enable_psr, "Enable PSR (default: false)");
+MODULE_PARM_DESC(enable_psr, "Enable PSR "
+ "(0=disabled, 1=enabled - link mode chosen per-platform, 2=force link-standby mode, 3=force link-off mode) "
+ "Default: -1 (use per-chip default)");
-module_param_named_unsafe(preliminary_hw_support, i915.preliminary_hw_support, int, 0600);
+module_param_named_unsafe(preliminary_hw_support, i915.preliminary_hw_support, int, 0400);
MODULE_PARM_DESC(preliminary_hw_support,
"Enable preliminary hardware support.");
@@ -162,12 +164,9 @@ MODULE_PARM_DESC(invert_brightness,
"to dri-devel@lists.freedesktop.org, if your machine needs it. "
"It will then be included in an upcoming module version.");
-module_param_named(disable_display, i915.disable_display, bool, 0600);
+module_param_named(disable_display, i915.disable_display, bool, 0400);
MODULE_PARM_DESC(disable_display, "Disable display (default: false)");
-module_param_named_unsafe(disable_vtd_wa, i915.disable_vtd_wa, bool, 0600);
-MODULE_PARM_DESC(disable_vtd_wa, "Disable all VT-d workarounds (default: false)");
-
module_param_named_unsafe(enable_cmd_parser, i915.enable_cmd_parser, int, 0600);
MODULE_PARM_DESC(enable_cmd_parser,
"Enable command parsing (1=enabled [default], 0=disabled)");
diff --git a/drivers/gpu/drm/i915/i915_params.h b/drivers/gpu/drm/i915/i915_params.h
new file mode 100644
index 000000000000..bd5026b15d3e
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_params.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright © 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#ifndef _I915_PARAMS_H_
+#define _I915_PARAMS_H_
+
+#include <linux/cache.h> /* for __read_mostly */
+
+struct i915_params {
+ int modeset;
+ int panel_ignore_lid;
+ int semaphores;
+ int lvds_channel_mode;
+ int panel_use_ssc;
+ int vbt_sdvo_panel_type;
+ int enable_rc6;
+ int enable_dc;
+ int enable_fbc;
+ int enable_ppgtt;
+ int enable_execlists;
+ int enable_psr;
+ unsigned int preliminary_hw_support;
+ int disable_power_well;
+ int enable_ips;
+ int invert_brightness;
+ int enable_cmd_parser;
+ int guc_log_level;
+ int use_mmio_flip;
+ int mmio_debug;
+ int edp_vswing;
+ /* leave bools at the end to not create holes */
+ bool enable_hangcheck;
+ bool fastboot;
+ bool prefault_disable;
+ bool load_detect_test;
+ bool reset;
+ bool disable_display;
+ bool enable_guc_submission;
+ bool verbose_state_checks;
+ bool nuclear_pageflip;
+};
+
+extern struct i915_params i915 __read_mostly;
+
+#endif
+
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 4897728713f6..363bd79dea2e 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -610,16 +610,17 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define IOSF_BYTE_ENABLES_SHIFT 4
#define IOSF_BAR_SHIFT 1
#define IOSF_SB_BUSY (1<<0)
-#define IOSF_PORT_BUNIT 0x3
-#define IOSF_PORT_PUNIT 0x4
+#define IOSF_PORT_BUNIT 0x03
+#define IOSF_PORT_PUNIT 0x04
#define IOSF_PORT_NC 0x11
#define IOSF_PORT_DPIO 0x12
-#define IOSF_PORT_DPIO_2 0x1a
#define IOSF_PORT_GPIO_NC 0x13
#define IOSF_PORT_CCK 0x14
-#define IOSF_PORT_CCU 0xA9
-#define IOSF_PORT_GPS_CORE 0x48
-#define IOSF_PORT_FLISDSI 0x1B
+#define IOSF_PORT_DPIO_2 0x1a
+#define IOSF_PORT_FLISDSI 0x1b
+#define IOSF_PORT_GPIO_SC 0x48
+#define IOSF_PORT_GPIO_SUS 0xa8
+#define IOSF_PORT_CCU 0xa9
#define VLV_IOSF_DATA _MMIO(VLV_DISPLAY_BASE + 0x2104)
#define VLV_IOSF_ADDR _MMIO(VLV_DISPLAY_BASE + 0x2108)
@@ -1635,6 +1636,9 @@ enum skl_disp_power_wells {
#define RING_WAIT (1<<11) /* gen3+, PRBx_CTL */
#define RING_WAIT_SEMAPHORE (1<<10) /* gen6+ */
+#define RING_FORCE_TO_NONPRIV(base, i) _MMIO(((base)+0x4D0) + (i)*4)
+#define RING_MAX_NONPRIV_SLOTS 12
+
#define GEN7_TLB_RD_ADDR _MMIO(0x4700)
#if 0
@@ -1711,6 +1715,11 @@ enum skl_disp_power_wells {
#define FPGA_DBG _MMIO(0x42300)
#define FPGA_DBG_RM_NOCLAIM (1<<31)
+#define CLAIM_ER _MMIO(VLV_DISPLAY_BASE + 0x2028)
+#define CLAIM_ER_CLR (1 << 31)
+#define CLAIM_ER_OVERFLOW (1 << 16)
+#define CLAIM_ER_CTR_MASK 0xffff
+
#define DERRMR _MMIO(0x44050)
/* Note that HBLANK events are reserved on bdw+ */
#define DERRMR_PIPEA_SCANLINE (1<<0)
@@ -2898,7 +2907,14 @@ enum skl_disp_power_wells {
#define GEN6_RP_STATE_CAP _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5998)
#define BXT_RP_STATE_CAP _MMIO(0x138170)
-#define INTERVAL_1_28_US(us) (((us) * 100) >> 7)
+/*
+ * Make these a multiple of magic 25 to avoid SNB (eg. Dell XPS
+ * 8300) freezing up around GPU hangs. Looks as if even
+ * scheduling/timer interrupts start misbehaving if the RPS
+ * EI/thresholds are "bad", leading to a very sluggish or even
+ * frozen machine.
+ */
+#define INTERVAL_1_28_US(us) roundup(((us) * 100) >> 7, 25)
#define INTERVAL_1_33_US(us) (((us) * 3) >> 2)
#define INTERVAL_0_833_US(us) (((us) * 6) / 5)
#define GT_INTERVAL_FROM_US(dev_priv, us) (IS_GEN9(dev_priv) ? \
@@ -5941,6 +5957,7 @@ enum skl_disp_power_wells {
#define ILK_INTERNAL_GRAPHICS_DISABLE (1 << 31)
#define ILK_INTERNAL_DISPLAY_DISABLE (1 << 30)
#define ILK_DISPLAY_DEBUG_DISABLE (1 << 29)
+#define IVB_PIPE_C_DISABLE (1 << 28)
#define ILK_HDCP_DISABLE (1 << 25)
#define ILK_eDP_A_DISABLE (1 << 24)
#define HSW_CDCLK_LIMIT (1 << 24)
@@ -5987,10 +6004,19 @@ enum skl_disp_power_wells {
#define SKL_DFSM_CDCLK_LIMIT_540 (1 << 23)
#define SKL_DFSM_CDCLK_LIMIT_450 (2 << 23)
#define SKL_DFSM_CDCLK_LIMIT_337_5 (3 << 23)
+#define SKL_DFSM_PIPE_A_DISABLE (1 << 30)
+#define SKL_DFSM_PIPE_B_DISABLE (1 << 21)
+#define SKL_DFSM_PIPE_C_DISABLE (1 << 28)
+
+#define GEN7_FF_SLICE_CS_CHICKEN1 _MMIO(0x20e0)
+#define GEN9_FFSC_PERCTX_PREEMPT_CTRL (1<<14)
#define FF_SLICE_CS_CHICKEN2 _MMIO(0x20e4)
#define GEN9_TSG_BARRIER_ACK_DISABLE (1<<8)
+#define GEN9_CS_DEBUG_MODE1 _MMIO(0x20ec)
+#define GEN8_CS_CHICKEN1 _MMIO(0x2580)
+
/* GEN7 chicken */
#define GEN7_COMMON_SLICE_CHICKEN1 _MMIO(0x7010)
# define GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC ((1<<10) | (1<<26))
@@ -6036,6 +6062,8 @@ enum skl_disp_power_wells {
#define HDC_FORCE_NON_COHERENT (1<<4)
#define HDC_BARRIER_PERFORMANCE_DISABLE (1<<10)
+#define GEN8_HDC_CHICKEN1 _MMIO(0x7304)
+
/* GEN9 chicken */
#define SLICE_ECO_CHICKEN0 _MMIO(0x7308)
#define PIXEL_MASK_CAMMING_DISABLE (1 << 14)
@@ -6766,6 +6794,16 @@ enum skl_disp_power_wells {
#define VLV_PMWGICZ _MMIO(0x1300a4)
+#define RC6_LOCATION _MMIO(0xD40)
+#define RC6_CTX_IN_DRAM (1 << 0)
+#define RC6_CTX_BASE _MMIO(0xD48)
+#define RC6_CTX_BASE_MASK 0xFFFFFFF0
+#define PWRCTX_MAXCNT_RCSUNIT _MMIO(0x2054)
+#define PWRCTX_MAXCNT_VCSUNIT0 _MMIO(0x12054)
+#define PWRCTX_MAXCNT_BCSUNIT _MMIO(0x22054)
+#define PWRCTX_MAXCNT_VECSUNIT _MMIO(0x1A054)
+#define PWRCTX_MAXCNT_VCSUNIT1 _MMIO(0x1C054)
+#define IDLE_TIME_MASK 0xFFFFF
#define FORCEWAKE _MMIO(0xA18C)
#define FORCEWAKE_VLV _MMIO(0x1300b0)
#define FORCEWAKE_ACK_VLV _MMIO(0x1300b4)
@@ -6904,6 +6942,7 @@ enum skl_disp_power_wells {
#define GEN6_RPDEUC _MMIO(0xA084)
#define GEN6_RPDEUCSW _MMIO(0xA088)
#define GEN6_RC_STATE _MMIO(0xA094)
+#define RC6_STATE (1 << 18)
#define GEN6_RC1_WAKE_RATE_LIMIT _MMIO(0xA098)
#define GEN6_RC6_WAKE_RATE_LIMIT _MMIO(0xA09C)
#define GEN6_RC6pp_WAKE_RATE_LIMIT _MMIO(0xA0A0)
@@ -7405,6 +7444,8 @@ enum skl_disp_power_wells {
#define TRANS_CLK_SEL_DISABLED (0x0<<29)
#define TRANS_CLK_SEL_PORT(x) (((x)+1)<<29)
+#define CDCLK_FREQ _MMIO(0x46200)
+
#define _TRANSA_MSA_MISC 0x60410
#define _TRANSB_MSA_MISC 0x61410
#define _TRANSC_MSA_MISC 0x62410
@@ -7536,6 +7577,7 @@ enum skl_disp_power_wells {
#define DC_STATE_EN_UPTO_DC5_DC6_MASK 0x3
#define DC_STATE_DEBUG _MMIO(0x45520)
+#define DC_STATE_DEBUG_MASK_CORES (1<<0)
#define DC_STATE_DEBUG_MASK_MEMORY_UP (1<<1)
/* Please see hsw_read_dcomp() and hsw_write_dcomp() before using this register,
@@ -8155,4 +8197,11 @@ enum skl_disp_power_wells {
#define GEN9_VEBOX_MOCS(i) _MMIO(0xcb00 + (i) * 4) /* Video MOCS registers */
#define GEN9_BLT_MOCS(i) _MMIO(0xcc00 + (i) * 4) /* Blitter MOCS registers */
+/* gamt regs */
+#define GEN8_L3_LRA_1_GPGPU _MMIO(0x4dd4)
+#define GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_BDW 0x67F1427F /* max/min for LRA1/2 */
+#define GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_CHV 0x5FF101FF /* max/min for LRA1/2 */
+#define GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_SKL 0x67F1427F /* " " */
+#define GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_BXT 0x5FF101FF /* " " */
+
#endif /* _I915_REG_H_ */
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index a8af594fbd00..34e061a9ef06 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -92,7 +92,7 @@ static void i915_restore_display(struct drm_device *dev)
}
/* only restore FBC info on the platform that supports FBC*/
- intel_fbc_disable(dev_priv);
+ intel_fbc_global_disable(dev_priv);
/* restore FBC interval */
if (HAS_FBC(dev) && INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev))
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index 37e3f0ddf8e0..c6188dddb341 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -164,7 +164,7 @@ i915_l3_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *attr, char *buf,
loff_t offset, size_t count)
{
- struct device *dev = container_of(kobj, struct device, kobj);
+ struct device *dev = kobj_to_dev(kobj);
struct drm_minor *dminor = dev_to_drm_minor(dev);
struct drm_device *drm_dev = dminor->dev;
struct drm_i915_private *dev_priv = drm_dev->dev_private;
@@ -200,7 +200,7 @@ i915_l3_write(struct file *filp, struct kobject *kobj,
struct bin_attribute *attr, char *buf,
loff_t offset, size_t count)
{
- struct device *dev = container_of(kobj, struct device, kobj);
+ struct device *dev = kobj_to_dev(kobj);
struct drm_minor *dminor = dev_to_drm_minor(dev);
struct drm_device *drm_dev = dminor->dev;
struct drm_i915_private *dev_priv = drm_dev->dev_private;
@@ -521,7 +521,7 @@ static ssize_t error_state_read(struct file *filp, struct kobject *kobj,
loff_t off, size_t count)
{
- struct device *kdev = container_of(kobj, struct device, kobj);
+ struct device *kdev = kobj_to_dev(kobj);
struct drm_minor *minor = dev_to_drm_minor(kdev);
struct drm_device *dev = minor->dev;
struct i915_error_state_file_priv error_priv;
@@ -556,7 +556,7 @@ static ssize_t error_state_write(struct file *file, struct kobject *kobj,
struct bin_attribute *attr, char *buf,
loff_t off, size_t count)
{
- struct device *kdev = container_of(kobj, struct device, kobj);
+ struct device *kdev = kobj_to_dev(kobj);
struct drm_minor *minor = dev_to_drm_minor(kdev);
struct drm_device *dev = minor->dev;
int ret;
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index 52b2d409945d..fa09e5581137 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -175,35 +175,24 @@ TRACE_EVENT(i915_vma_unbind,
__entry->obj, __entry->offset, __entry->size, __entry->vm)
);
-#define VM_TO_TRACE_NAME(vm) \
- (i915_is_ggtt(vm) ? "G" : \
- "P")
-
-DECLARE_EVENT_CLASS(i915_va,
- TP_PROTO(struct i915_address_space *vm, u64 start, u64 length, const char *name),
- TP_ARGS(vm, start, length, name),
+TRACE_EVENT(i915_va_alloc,
+ TP_PROTO(struct i915_vma *vma),
+ TP_ARGS(vma),
TP_STRUCT__entry(
__field(struct i915_address_space *, vm)
__field(u64, start)
__field(u64, end)
- __string(name, name)
),
TP_fast_assign(
- __entry->vm = vm;
- __entry->start = start;
- __entry->end = start + length - 1;
- __assign_str(name, name);
+ __entry->vm = vma->vm;
+ __entry->start = vma->node.start;
+ __entry->end = vma->node.start + vma->node.size - 1;
),
- TP_printk("vm=%p (%s), 0x%llx-0x%llx",
- __entry->vm, __get_str(name), __entry->start, __entry->end)
-);
-
-DEFINE_EVENT(i915_va, i915_va_alloc,
- TP_PROTO(struct i915_address_space *vm, u64 start, u64 length, const char *name),
- TP_ARGS(vm, start, length, name)
+ TP_printk("vm=%p (%c), 0x%llx-0x%llx",
+ __entry->vm, i915_is_ggtt(__entry->vm) ? 'G' : 'P', __entry->start, __entry->end)
);
DECLARE_EVENT_CLASS(i915_px_entry,
diff --git a/drivers/gpu/drm/i915/intel_atomic.c b/drivers/gpu/drm/i915/intel_atomic.c
index d0b1c9afa35e..8e579a8505ac 100644
--- a/drivers/gpu/drm/i915/intel_atomic.c
+++ b/drivers/gpu/drm/i915/intel_atomic.c
@@ -97,6 +97,7 @@ intel_crtc_duplicate_state(struct drm_crtc *crtc)
crtc_state->disable_lp_wm = false;
crtc_state->disable_cxsr = false;
crtc_state->wm_changed = false;
+ crtc_state->fb_changed = false;
return &crtc_state->base;
}
@@ -308,5 +309,5 @@ void intel_atomic_state_clear(struct drm_atomic_state *s)
{
struct intel_atomic_state *state = to_intel_atomic_state(s);
drm_atomic_state_default_clear(&state->base);
- state->dpll_set = false;
+ state->dpll_set = state->modeset = false;
}
diff --git a/drivers/gpu/drm/i915/intel_atomic_plane.c b/drivers/gpu/drm/i915/intel_atomic_plane.c
index c6bb0fc1edfb..e0b851a0004a 100644
--- a/drivers/gpu/drm/i915/intel_atomic_plane.c
+++ b/drivers/gpu/drm/i915/intel_atomic_plane.c
@@ -152,9 +152,9 @@ static int intel_plane_atomic_check(struct drm_plane *plane,
intel_state->clip.x1 = 0;
intel_state->clip.y1 = 0;
intel_state->clip.x2 =
- crtc_state->base.active ? crtc_state->pipe_src_w : 0;
+ crtc_state->base.enable ? crtc_state->pipe_src_w : 0;
intel_state->clip.y2 =
- crtc_state->base.active ? crtc_state->pipe_src_h : 0;
+ crtc_state->base.enable ? crtc_state->pipe_src_h : 0;
if (state->fb && intel_rotation_90_or_270(state->rotation)) {
if (!(state->fb->modifier[0] == I915_FORMAT_MOD_Y_TILED ||
@@ -194,8 +194,16 @@ static void intel_plane_atomic_update(struct drm_plane *plane,
struct intel_plane *intel_plane = to_intel_plane(plane);
struct intel_plane_state *intel_state =
to_intel_plane_state(plane->state);
+ struct drm_crtc *crtc = plane->state->crtc ?: old_state->crtc;
+ struct drm_crtc_state *crtc_state =
+ drm_atomic_get_existing_crtc_state(old_state->state, crtc);
- intel_plane->commit_plane(plane, intel_state);
+ if (intel_state->visible)
+ intel_plane->update_plane(plane,
+ to_intel_crtc_state(crtc_state),
+ intel_state);
+ else
+ intel_plane->disable_plane(plane, crtc);
}
const struct drm_plane_helper_funcs intel_plane_helper_funcs = {
diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c
index 31f6d212fb1b..7d281b40064a 100644
--- a/drivers/gpu/drm/i915/intel_audio.c
+++ b/drivers/gpu/drm/i915/intel_audio.c
@@ -262,8 +262,7 @@ static void hsw_audio_codec_disable(struct intel_encoder *encoder)
tmp |= AUD_CONFIG_N_PROG_ENABLE;
tmp &= ~AUD_CONFIG_UPPER_N_MASK;
tmp &= ~AUD_CONFIG_LOWER_N_MASK;
- if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DISPLAYPORT) ||
- intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DP_MST))
+ if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DISPLAYPORT))
tmp |= AUD_CONFIG_N_VALUE_INDEX;
I915_WRITE(HSW_AUD_CFG(pipe), tmp);
@@ -476,8 +475,7 @@ static void ilk_audio_codec_enable(struct drm_connector *connector,
tmp &= ~AUD_CONFIG_N_VALUE_INDEX;
tmp &= ~AUD_CONFIG_N_PROG_ENABLE;
tmp &= ~AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK;
- if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DISPLAYPORT) ||
- intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DP_MST))
+ if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DISPLAYPORT))
tmp |= AUD_CONFIG_N_VALUE_INDEX;
else
tmp |= audio_config_hdmi_pixel_clock(adjusted_mode);
@@ -515,8 +513,7 @@ void intel_audio_codec_enable(struct intel_encoder *intel_encoder)
/* ELD Conn_Type */
connector->eld[5] &= ~(3 << 2);
- if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
- intel_pipe_has_type(crtc, INTEL_OUTPUT_DP_MST))
+ if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT))
connector->eld[5] |= (1 << 2);
connector->eld[6] = drm_av_sync_delay(connector, adjusted_mode) / 2;
@@ -527,6 +524,8 @@ void intel_audio_codec_enable(struct intel_encoder *intel_encoder)
mutex_lock(&dev_priv->av_mutex);
intel_dig_port->audio_connector = connector;
+ /* referred in audio callbacks */
+ dev_priv->dig_port_map[port] = intel_encoder;
mutex_unlock(&dev_priv->av_mutex);
if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify)
@@ -554,6 +553,7 @@ void intel_audio_codec_disable(struct intel_encoder *intel_encoder)
mutex_lock(&dev_priv->av_mutex);
intel_dig_port->audio_connector = NULL;
+ dev_priv->dig_port_map[port] = NULL;
mutex_unlock(&dev_priv->av_mutex);
if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify)
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index eba3e0f87181..bf62a19c8f69 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -31,11 +31,49 @@
#include "i915_drv.h"
#include "intel_bios.h"
+/**
+ * DOC: Video BIOS Table (VBT)
+ *
+ * The Video BIOS Table, or VBT, provides platform and board specific
+ * configuration information to the driver that is not discoverable or available
+ * through other means. The configuration is mostly related to display
+ * hardware. The VBT is available via the ACPI OpRegion or, on older systems, in
+ * the PCI ROM.
+ *
+ * The VBT consists of a VBT Header (defined as &struct vbt_header), a BDB
+ * Header (&struct bdb_header), and a number of BIOS Data Blocks (BDB) that
+ * contain the actual configuration information. The VBT Header, and thus the
+ * VBT, begins with "$VBT" signature. The VBT Header contains the offset of the
+ * BDB Header. The data blocks are concatenated after the BDB Header. The data
+ * blocks have a 1-byte Block ID, 2-byte Block Size, and Block Size bytes of
+ * data. (Block 53, the MIPI Sequence Block is an exception.)
+ *
+ * The driver parses the VBT during load. The relevant information is stored in
+ * driver private data for ease of use, and the actual VBT is not read after
+ * that.
+ */
+
#define SLAVE_ADDR1 0x70
#define SLAVE_ADDR2 0x72
static int panel_type;
+/* Get BDB block size given a pointer to Block ID. */
+static u32 _get_blocksize(const u8 *block_base)
+{
+ /* The MIPI Sequence Block v3+ has a separate size field. */
+ if (*block_base == BDB_MIPI_SEQUENCE && *(block_base + 3) >= 3)
+ return *((const u32 *)(block_base + 4));
+ else
+ return *((const u16 *)(block_base + 1));
+}
+
+/* Get BDB block size give a pointer to data after Block ID and Block Size. */
+static u32 get_blocksize(const void *block_data)
+{
+ return _get_blocksize(block_data - 3);
+}
+
static const void *
find_section(const void *_bdb, int section_id)
{
@@ -52,14 +90,8 @@ find_section(const void *_bdb, int section_id)
/* walk the sections looking for section_id */
while (index + 3 < total) {
current_id = *(base + index);
- index++;
-
- current_size = *((const u16 *)(base + index));
- index += 2;
-
- /* The MIPI Sequence Block v3+ has a separate size field. */
- if (current_id == BDB_MIPI_SEQUENCE && *(base + index) >= 3)
- current_size = *((const u32 *)(base + index + 1));
+ current_size = _get_blocksize(base + index);
+ index += 3;
if (index + current_size > total)
return NULL;
@@ -73,16 +105,6 @@ find_section(const void *_bdb, int section_id)
return NULL;
}
-static u16
-get_blocksize(const void *p)
-{
- u16 *block_ptr, block_size;
-
- block_ptr = (u16 *)((char *)p - 2);
- block_size = *block_ptr;
- return block_size;
-}
-
static void
fill_detail_timing_data(struct drm_display_mode *panel_fixed_mode,
const struct lvds_dvo_timing *dvo_timing)
@@ -675,84 +697,13 @@ parse_psr(struct drm_i915_private *dev_priv, const struct bdb_header *bdb)
dev_priv->vbt.psr.tp2_tp3_wakeup_time = psr_table->tp2_tp3_wakeup_time;
}
-static u8 *goto_next_sequence(u8 *data, int *size)
-{
- u16 len;
- int tmp = *size;
-
- if (--tmp < 0)
- return NULL;
-
- /* goto first element */
- data++;
- while (1) {
- switch (*data) {
- case MIPI_SEQ_ELEM_SEND_PKT:
- /*
- * skip by this element payload size
- * skip elem id, command flag and data type
- */
- tmp -= 5;
- if (tmp < 0)
- return NULL;
-
- data += 3;
- len = *((u16 *)data);
-
- tmp -= len;
- if (tmp < 0)
- return NULL;
-
- /* skip by len */
- data = data + 2 + len;
- break;
- case MIPI_SEQ_ELEM_DELAY:
- /* skip by elem id, and delay is 4 bytes */
- tmp -= 5;
- if (tmp < 0)
- return NULL;
-
- data += 5;
- break;
- case MIPI_SEQ_ELEM_GPIO:
- tmp -= 3;
- if (tmp < 0)
- return NULL;
-
- data += 3;
- break;
- default:
- DRM_ERROR("Unknown element\n");
- return NULL;
- }
-
- /* end of sequence ? */
- if (*data == 0)
- break;
- }
-
- /* goto next sequence or end of block byte */
- if (--tmp < 0)
- return NULL;
-
- data++;
-
- /* update amount of data left for the sequence block to be parsed */
- *size = tmp;
- return data;
-}
-
static void
-parse_mipi(struct drm_i915_private *dev_priv, const struct bdb_header *bdb)
+parse_mipi_config(struct drm_i915_private *dev_priv,
+ const struct bdb_header *bdb)
{
const struct bdb_mipi_config *start;
- const struct bdb_mipi_sequence *sequence;
const struct mipi_config *config;
const struct mipi_pps_data *pps;
- u8 *data;
- const u8 *seq_data;
- int i, panel_id, seq_size;
- u16 block_size;
/* parse MIPI blocks only if LFP type is MIPI */
if (!dev_priv->vbt.has_mipi)
@@ -798,104 +749,233 @@ parse_mipi(struct drm_i915_private *dev_priv, const struct bdb_header *bdb)
/* We have mandatory mipi config blocks. Initialize as generic panel */
dev_priv->vbt.dsi.panel_id = MIPI_DSI_GENERIC_PANEL_ID;
+}
- /* Check if we have sequence block as well */
- sequence = find_section(bdb, BDB_MIPI_SEQUENCE);
- if (!sequence) {
- DRM_DEBUG_KMS("No MIPI Sequence found, parsing complete\n");
- return;
+/* Find the sequence block and size for the given panel. */
+static const u8 *
+find_panel_sequence_block(const struct bdb_mipi_sequence *sequence,
+ u16 panel_id, u32 *seq_size)
+{
+ u32 total = get_blocksize(sequence);
+ const u8 *data = &sequence->data[0];
+ u8 current_id;
+ u32 current_size;
+ int header_size = sequence->version >= 3 ? 5 : 3;
+ int index = 0;
+ int i;
+
+ /* skip new block size */
+ if (sequence->version >= 3)
+ data += 4;
+
+ for (i = 0; i < MAX_MIPI_CONFIGURATIONS && index < total; i++) {
+ if (index + header_size > total) {
+ DRM_ERROR("Invalid sequence block (header)\n");
+ return NULL;
+ }
+
+ current_id = *(data + index);
+ if (sequence->version >= 3)
+ current_size = *((const u32 *)(data + index + 1));
+ else
+ current_size = *((const u16 *)(data + index + 1));
+
+ index += header_size;
+
+ if (index + current_size > total) {
+ DRM_ERROR("Invalid sequence block\n");
+ return NULL;
+ }
+
+ if (current_id == panel_id) {
+ *seq_size = current_size;
+ return data + index;
+ }
+
+ index += current_size;
}
- /* Fail gracefully for forward incompatible sequence block. */
- if (sequence->version >= 3) {
- DRM_ERROR("Unable to parse MIPI Sequence Block v3+\n");
- return;
+ DRM_ERROR("Sequence block detected but no valid configuration\n");
+
+ return NULL;
+}
+
+static int goto_next_sequence(const u8 *data, int index, int total)
+{
+ u16 len;
+
+ /* Skip Sequence Byte. */
+ for (index = index + 1; index < total; index += len) {
+ u8 operation_byte = *(data + index);
+ index++;
+
+ switch (operation_byte) {
+ case MIPI_SEQ_ELEM_END:
+ return index;
+ case MIPI_SEQ_ELEM_SEND_PKT:
+ if (index + 4 > total)
+ return 0;
+
+ len = *((const u16 *)(data + index + 2)) + 4;
+ break;
+ case MIPI_SEQ_ELEM_DELAY:
+ len = 4;
+ break;
+ case MIPI_SEQ_ELEM_GPIO:
+ len = 2;
+ break;
+ case MIPI_SEQ_ELEM_I2C:
+ if (index + 7 > total)
+ return 0;
+ len = *(data + index + 6) + 7;
+ break;
+ default:
+ DRM_ERROR("Unknown operation byte\n");
+ return 0;
+ }
}
- DRM_DEBUG_DRIVER("Found MIPI sequence block\n");
+ return 0;
+}
- block_size = get_blocksize(sequence);
+static int goto_next_sequence_v3(const u8 *data, int index, int total)
+{
+ int seq_end;
+ u16 len;
+ u32 size_of_sequence;
/*
- * parse the sequence block for individual sequences
+ * Could skip sequence based on Size of Sequence alone, but also do some
+ * checking on the structure.
*/
- dev_priv->vbt.dsi.seq_version = sequence->version;
+ if (total < 5) {
+ DRM_ERROR("Too small sequence size\n");
+ return 0;
+ }
- seq_data = &sequence->data[0];
+ /* Skip Sequence Byte. */
+ index++;
/*
- * sequence block is variable length and hence we need to parse and
- * get the sequence data for specific panel id
+ * Size of Sequence. Excludes the Sequence Byte and the size itself,
+ * includes MIPI_SEQ_ELEM_END byte, excludes the final MIPI_SEQ_END
+ * byte.
*/
- for (i = 0; i < MAX_MIPI_CONFIGURATIONS; i++) {
- panel_id = *seq_data;
- seq_size = *((u16 *) (seq_data + 1));
- if (panel_id == panel_type)
- break;
+ size_of_sequence = *((const uint32_t *)(data + index));
+ index += 4;
- /* skip the sequence including seq header of 3 bytes */
- seq_data = seq_data + 3 + seq_size;
- if ((seq_data - &sequence->data[0]) > block_size) {
- DRM_ERROR("Sequence start is beyond sequence block size, corrupted sequence block\n");
- return;
+ seq_end = index + size_of_sequence;
+ if (seq_end > total) {
+ DRM_ERROR("Invalid sequence size\n");
+ return 0;
+ }
+
+ for (; index < total; index += len) {
+ u8 operation_byte = *(data + index);
+ index++;
+
+ if (operation_byte == MIPI_SEQ_ELEM_END) {
+ if (index != seq_end) {
+ DRM_ERROR("Invalid element structure\n");
+ return 0;
+ }
+ return index;
+ }
+
+ len = *(data + index);
+ index++;
+
+ /*
+ * FIXME: Would be nice to check elements like for v1/v2 in
+ * goto_next_sequence() above.
+ */
+ switch (operation_byte) {
+ case MIPI_SEQ_ELEM_SEND_PKT:
+ case MIPI_SEQ_ELEM_DELAY:
+ case MIPI_SEQ_ELEM_GPIO:
+ case MIPI_SEQ_ELEM_I2C:
+ case MIPI_SEQ_ELEM_SPI:
+ case MIPI_SEQ_ELEM_PMIC:
+ break;
+ default:
+ DRM_ERROR("Unknown operation byte %u\n",
+ operation_byte);
+ break;
}
}
- if (i == MAX_MIPI_CONFIGURATIONS) {
- DRM_ERROR("Sequence block detected but no valid configuration\n");
+ return 0;
+}
+
+static void
+parse_mipi_sequence(struct drm_i915_private *dev_priv,
+ const struct bdb_header *bdb)
+{
+ const struct bdb_mipi_sequence *sequence;
+ const u8 *seq_data;
+ u32 seq_size;
+ u8 *data;
+ int index = 0;
+
+ /* Only our generic panel driver uses the sequence block. */
+ if (dev_priv->vbt.dsi.panel_id != MIPI_DSI_GENERIC_PANEL_ID)
+ return;
+
+ sequence = find_section(bdb, BDB_MIPI_SEQUENCE);
+ if (!sequence) {
+ DRM_DEBUG_KMS("No MIPI Sequence found, parsing complete\n");
return;
}
- /* check if found sequence is completely within the sequence block
- * just being paranoid */
- if (seq_size > block_size) {
- DRM_ERROR("Corrupted sequence/size, bailing out\n");
+ /* Fail gracefully for forward incompatible sequence block. */
+ if (sequence->version >= 4) {
+ DRM_ERROR("Unable to parse MIPI Sequence Block v%u\n",
+ sequence->version);
return;
}
- /* skip the panel id(1 byte) and seq size(2 bytes) */
- dev_priv->vbt.dsi.data = kmemdup(seq_data + 3, seq_size, GFP_KERNEL);
- if (!dev_priv->vbt.dsi.data)
+ DRM_DEBUG_DRIVER("Found MIPI sequence block v%u\n", sequence->version);
+
+ seq_data = find_panel_sequence_block(sequence, panel_type, &seq_size);
+ if (!seq_data)
return;
- /*
- * loop into the sequence data and split into multiple sequneces
- * There are only 5 types of sequences as of now
- */
- data = dev_priv->vbt.dsi.data;
- dev_priv->vbt.dsi.size = seq_size;
+ data = kmemdup(seq_data, seq_size, GFP_KERNEL);
+ if (!data)
+ return;
- /* two consecutive 0x00 indicate end of all sequences */
- while (1) {
- int seq_id = *data;
- if (MIPI_SEQ_MAX > seq_id && seq_id > MIPI_SEQ_UNDEFINED) {
- dev_priv->vbt.dsi.sequence[seq_id] = data;
- DRM_DEBUG_DRIVER("Found mipi sequence - %d\n", seq_id);
- } else {
- DRM_ERROR("undefined sequence\n");
+ /* Parse the sequences, store pointers to each sequence. */
+ for (;;) {
+ u8 seq_id = *(data + index);
+ if (seq_id == MIPI_SEQ_END)
+ break;
+
+ if (seq_id >= MIPI_SEQ_MAX) {
+ DRM_ERROR("Unknown sequence %u\n", seq_id);
goto err;
}
- /* partial parsing to skip elements */
- data = goto_next_sequence(data, &seq_size);
+ dev_priv->vbt.dsi.sequence[seq_id] = data + index;
- if (data == NULL) {
- DRM_ERROR("Sequence elements going beyond block itself. Sequence block parsing failed\n");
+ if (sequence->version >= 3)
+ index = goto_next_sequence_v3(data, index, seq_size);
+ else
+ index = goto_next_sequence(data, index, seq_size);
+ if (!index) {
+ DRM_ERROR("Invalid sequence %u\n", seq_id);
goto err;
}
-
- if (*data == 0)
- break; /* end of sequence reached */
}
- DRM_DEBUG_DRIVER("MIPI related vbt parsing complete\n");
+ dev_priv->vbt.dsi.data = data;
+ dev_priv->vbt.dsi.size = seq_size;
+ dev_priv->vbt.dsi.seq_version = sequence->version;
+
+ DRM_DEBUG_DRIVER("MIPI related VBT parsing complete\n");
return;
-err:
- kfree(dev_priv->vbt.dsi.data);
- dev_priv->vbt.dsi.data = NULL;
- /* error during parsing so set all pointers to null
- * because of partial parsing */
+err:
+ kfree(data);
memset(dev_priv->vbt.dsi.sequence, 0, sizeof(dev_priv->vbt.dsi.sequence));
}
@@ -1088,7 +1168,12 @@ parse_device_mapping(struct drm_i915_private *dev_priv,
DRM_DEBUG_KMS("No general definition block is found, no devices defined.\n");
return;
}
- if (bdb->version < 195) {
+ if (bdb->version < 106) {
+ expected_size = 22;
+ } else if (bdb->version < 109) {
+ expected_size = 27;
+ } else if (bdb->version < 195) {
+ BUILD_BUG_ON(sizeof(struct old_child_dev_config) != 33);
expected_size = sizeof(struct old_child_dev_config);
} else if (bdb->version == 195) {
expected_size = 37;
@@ -1101,18 +1186,18 @@ parse_device_mapping(struct drm_i915_private *dev_priv,
bdb->version, expected_size);
}
- /* The legacy sized child device config is the minimum we need. */
- if (p_defs->child_dev_size < sizeof(struct old_child_dev_config)) {
- DRM_ERROR("Child device config size %u is too small.\n",
- p_defs->child_dev_size);
- return;
- }
-
/* Flag an error for unexpected size, but continue anyway. */
if (p_defs->child_dev_size != expected_size)
DRM_ERROR("Unexpected child device config size %u (expected %u for VBT version %u)\n",
p_defs->child_dev_size, expected_size, bdb->version);
+ /* The legacy sized child device config is the minimum we need. */
+ if (p_defs->child_dev_size < sizeof(struct old_child_dev_config)) {
+ DRM_DEBUG_KMS("Child device config size %u is too small.\n",
+ p_defs->child_dev_size);
+ return;
+ }
+
/* get the block size of general definitions */
block_size = get_blocksize(p_defs);
/* get the number of child device */
@@ -1285,7 +1370,7 @@ static const struct vbt_header *find_vbt(void __iomem *bios, size_t size)
/**
* intel_bios_init - find VBT and initialize settings from the BIOS
- * @dev: DRM device
+ * @dev_priv: i915 device instance
*
* Loads the Video BIOS and checks that the VBT exists. Sets scratch registers
* to appropriate values.
@@ -1337,7 +1422,8 @@ intel_bios_init(struct drm_i915_private *dev_priv)
parse_driver_features(dev_priv, bdb);
parse_edp(dev_priv, bdb);
parse_psr(dev_priv, bdb);
- parse_mipi(dev_priv, bdb);
+ parse_mipi_config(dev_priv, bdb);
+ parse_mipi_sequence(dev_priv, bdb);
parse_ddi_ports(dev_priv, bdb);
if (bios)
diff --git a/drivers/gpu/drm/i915/intel_bios.h b/drivers/gpu/drm/i915/intel_bios.h
index 54eac1003a1e..350d4e0f75a4 100644
--- a/drivers/gpu/drm/i915/intel_bios.h
+++ b/drivers/gpu/drm/i915/intel_bios.h
@@ -25,25 +25,43 @@
*
*/
-#ifndef _I830_BIOS_H_
-#define _I830_BIOS_H_
-
+#ifndef _INTEL_BIOS_H_
+#define _INTEL_BIOS_H_
+
+/**
+ * struct vbt_header - VBT Header structure
+ * @signature: VBT signature, always starts with "$VBT"
+ * @version: Version of this structure
+ * @header_size: Size of this structure
+ * @vbt_size: Size of VBT (VBT Header, BDB Header and data blocks)
+ * @vbt_checksum: Checksum
+ * @reserved0: Reserved
+ * @bdb_offset: Offset of &struct bdb_header from beginning of VBT
+ * @aim_offset: Offsets of add-in data blocks from beginning of VBT
+ */
struct vbt_header {
- u8 signature[20]; /**< Always starts with 'VBT$' */
- u16 version; /**< decimal */
- u16 header_size; /**< in bytes */
- u16 vbt_size; /**< in bytes */
+ u8 signature[20];
+ u16 version;
+ u16 header_size;
+ u16 vbt_size;
u8 vbt_checksum;
u8 reserved0;
- u32 bdb_offset; /**< from beginning of VBT */
- u32 aim_offset[4]; /**< from beginning of VBT */
+ u32 bdb_offset;
+ u32 aim_offset[4];
} __packed;
+/**
+ * struct bdb_header - BDB Header structure
+ * @signature: BDB signature "BIOS_DATA_BLOCK"
+ * @version: Version of the data block definitions
+ * @header_size: Size of this structure
+ * @bdb_size: Size of BDB (BDB Header and data blocks)
+ */
struct bdb_header {
- u8 signature[16]; /**< Always 'BIOS_DATA_BLOCK' */
- u16 version; /**< decimal */
- u16 header_size; /**< in bytes */
- u16 bdb_size; /**< in bytes */
+ u8 signature[16];
+ u16 version;
+ u16 header_size;
+ u16 bdb_size;
} __packed;
/* strictly speaking, this is a "skip" block, but it has interesting info */
@@ -936,21 +954,29 @@ struct bdb_mipi_sequence {
/* MIPI Sequnece Block definitions */
enum mipi_seq {
- MIPI_SEQ_UNDEFINED = 0,
+ MIPI_SEQ_END = 0,
MIPI_SEQ_ASSERT_RESET,
MIPI_SEQ_INIT_OTP,
MIPI_SEQ_DISPLAY_ON,
MIPI_SEQ_DISPLAY_OFF,
MIPI_SEQ_DEASSERT_RESET,
+ MIPI_SEQ_BACKLIGHT_ON, /* sequence block v2+ */
+ MIPI_SEQ_BACKLIGHT_OFF, /* sequence block v2+ */
+ MIPI_SEQ_TEAR_ON, /* sequence block v2+ */
+ MIPI_SEQ_TEAR_OFF, /* sequence block v3+ */
+ MIPI_SEQ_POWER_ON, /* sequence block v3+ */
+ MIPI_SEQ_POWER_OFF, /* sequence block v3+ */
MIPI_SEQ_MAX
};
enum mipi_seq_element {
- MIPI_SEQ_ELEM_UNDEFINED = 0,
+ MIPI_SEQ_ELEM_END = 0,
MIPI_SEQ_ELEM_SEND_PKT,
MIPI_SEQ_ELEM_DELAY,
MIPI_SEQ_ELEM_GPIO,
- MIPI_SEQ_ELEM_STATUS,
+ MIPI_SEQ_ELEM_I2C, /* sequence block v2+ */
+ MIPI_SEQ_ELEM_SPI, /* sequence block v3+ */
+ MIPI_SEQ_ELEM_PMIC, /* sequence block v3+ */
MIPI_SEQ_ELEM_MAX
};
@@ -965,4 +991,4 @@ enum mipi_gpio_pin_index {
MIPI_GPIO_MAX
};
-#endif /* _I830_BIOS_H_ */
+#endif /* _INTEL_BIOS_H_ */
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index a7b4a524fadd..0364292367b1 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -213,9 +213,7 @@ static void pch_post_disable_crt(struct intel_encoder *encoder)
static void intel_enable_crt(struct intel_encoder *encoder)
{
- struct intel_crt *crt = intel_encoder_to_crt(encoder);
-
- intel_crt_set_dpms(encoder, crt->connector->base.dpms);
+ intel_crt_set_dpms(encoder, DRM_MODE_DPMS_ON);
}
static enum drm_mode_status
@@ -223,6 +221,7 @@ intel_crt_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
struct drm_device *dev = connector->dev;
+ int max_dotclk = to_i915(dev)->max_dotclk_freq;
int max_clock = 0;
if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
@@ -238,6 +237,9 @@ intel_crt_mode_valid(struct drm_connector *connector,
if (mode->clock > max_clock)
return MODE_CLOCK_HIGH;
+ if (mode->clock > max_dotclk)
+ return MODE_CLOCK_HIGH;
+
/* The FDI receiver on LPT only supports 8bpc and only has 2 lanes. */
if (HAS_PCH_LPT(dev) &&
(ironlake_get_lanes_required(mode->clock, 270000, 24) > 2))
@@ -255,8 +257,14 @@ static bool intel_crt_compute_config(struct intel_encoder *encoder,
pipe_config->has_pch_encoder = true;
/* LPT FDI RX only supports 8bpc. */
- if (HAS_PCH_LPT(dev))
+ if (HAS_PCH_LPT(dev)) {
+ if (pipe_config->bw_constrained && pipe_config->pipe_bpp < 24) {
+ DRM_DEBUG_KMS("LPT only supports 24bpp\n");
+ return false;
+ }
+
pipe_config->pipe_bpp = 24;
+ }
/* FDI must always be 2.7 GHz */
if (HAS_DDI(dev)) {
@@ -476,11 +484,10 @@ static bool intel_crt_detect_ddc(struct drm_connector *connector)
}
static enum drm_connector_status
-intel_crt_load_detect(struct intel_crt *crt)
+intel_crt_load_detect(struct intel_crt *crt, uint32_t pipe)
{
struct drm_device *dev = crt->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- uint32_t pipe = to_intel_crtc(crt->base.base.crtc)->pipe;
uint32_t save_bclrpat;
uint32_t save_vtotal;
uint32_t vtotal, vactive;
@@ -649,7 +656,8 @@ intel_crt_detect(struct drm_connector *connector, bool force)
if (intel_crt_detect_ddc(connector))
status = connector_status_connected;
else if (INTEL_INFO(dev)->gen < 4)
- status = intel_crt_load_detect(crt);
+ status = intel_crt_load_detect(crt,
+ to_intel_crtc(connector->state->crtc)->pipe);
else
status = connector_status_unknown;
intel_release_load_detect_pipe(connector, &tmp, &ctx);
diff --git a/drivers/gpu/drm/i915/intel_csr.c b/drivers/gpu/drm/i915/intel_csr.c
index 647d85e77c2f..902054efb902 100644
--- a/drivers/gpu/drm/i915/intel_csr.c
+++ b/drivers/gpu/drm/i915/intel_csr.c
@@ -44,6 +44,8 @@
#define I915_CSR_SKL "i915/skl_dmc_ver1.bin"
#define I915_CSR_BXT "i915/bxt_dmc_ver1.bin"
+#define FIRMWARE_URL "https://01.org/linuxgraphics/intel-linux-graphics-firmwares"
+
MODULE_FIRMWARE(I915_CSR_SKL);
MODULE_FIRMWARE(I915_CSR_BXT);
@@ -177,7 +179,8 @@ static const struct stepping_info kbl_stepping_info[] = {
static const struct stepping_info skl_stepping_info[] = {
{'A', '0'}, {'B', '0'}, {'C', '0'},
{'D', '0'}, {'E', '0'}, {'F', '0'},
- {'G', '0'}, {'H', '0'}, {'I', '0'}
+ {'G', '0'}, {'H', '0'}, {'I', '0'},
+ {'J', '0'}, {'K', '0'}
};
static const struct stepping_info bxt_stepping_info[] = {
@@ -217,19 +220,19 @@ static const struct stepping_info *intel_get_stepping_info(struct drm_device *de
* Everytime display comes back from low power state this function is called to
* copy the firmware from internal memory to registers.
*/
-void intel_csr_load_program(struct drm_i915_private *dev_priv)
+bool intel_csr_load_program(struct drm_i915_private *dev_priv)
{
u32 *payload = dev_priv->csr.dmc_payload;
uint32_t i, fw_size;
if (!IS_GEN9(dev_priv)) {
DRM_ERROR("No CSR support available for this platform\n");
- return;
+ return false;
}
if (!dev_priv->csr.dmc_payload) {
DRM_ERROR("Tried to program CSR with empty payload\n");
- return;
+ return false;
}
fw_size = dev_priv->csr.dmc_fw_size;
@@ -242,6 +245,8 @@ void intel_csr_load_program(struct drm_i915_private *dev_priv)
}
dev_priv->csr.dc_state = 0;
+
+ return true;
}
static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv,
@@ -280,10 +285,11 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv,
csr->version = css_header->version;
- if (IS_SKYLAKE(dev) && csr->version < SKL_CSR_VERSION_REQUIRED) {
+ if ((IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) &&
+ csr->version < SKL_CSR_VERSION_REQUIRED) {
DRM_INFO("Refusing to load old Skylake DMC firmware v%u.%u,"
" please upgrade to v%u.%u or later"
- " [https://01.org/linuxgraphics/intel-linux-graphics-firmwares].\n",
+ " [" FIRMWARE_URL "].\n",
CSR_VERSION_MAJOR(csr->version),
CSR_VERSION_MINOR(csr->version),
CSR_VERSION_MAJOR(SKL_CSR_VERSION_REQUIRED),
@@ -401,7 +407,10 @@ out:
CSR_VERSION_MAJOR(csr->version),
CSR_VERSION_MINOR(csr->version));
} else {
- DRM_ERROR("Failed to load DMC firmware, disabling rpm\n");
+ dev_notice(dev_priv->dev->dev,
+ "Failed to load DMC firmware"
+ " [" FIRMWARE_URL "],"
+ " disabling runtime power management.\n");
}
release_firmware(fw);
@@ -423,7 +432,7 @@ void intel_csr_ucode_init(struct drm_i915_private *dev_priv)
if (!HAS_CSR(dev_priv))
return;
- if (IS_SKYLAKE(dev_priv))
+ if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
csr->fw_path = I915_CSR_SKL;
else if (IS_BROXTON(dev_priv))
csr->fw_path = I915_CSR_BXT;
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index 0f3df2c39f7c..96ffcc541e17 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -133,38 +133,38 @@ static const struct ddi_buf_trans skl_ddi_translations_dp[] = {
{ 0x00002016, 0x000000A0, 0x0 },
{ 0x00005012, 0x0000009B, 0x0 },
{ 0x00007011, 0x00000088, 0x0 },
- { 0x80009010, 0x000000C0, 0x1 }, /* Uses I_boost level 0x1 */
+ { 0x80009010, 0x000000C0, 0x1 },
{ 0x00002016, 0x0000009B, 0x0 },
{ 0x00005012, 0x00000088, 0x0 },
- { 0x80007011, 0x000000C0, 0x1 }, /* Uses I_boost level 0x1 */
+ { 0x80007011, 0x000000C0, 0x1 },
{ 0x00002016, 0x000000DF, 0x0 },
- { 0x80005012, 0x000000C0, 0x1 }, /* Uses I_boost level 0x1 */
+ { 0x80005012, 0x000000C0, 0x1 },
};
/* Skylake U */
static const struct ddi_buf_trans skl_u_ddi_translations_dp[] = {
{ 0x0000201B, 0x000000A2, 0x0 },
{ 0x00005012, 0x00000088, 0x0 },
- { 0x00007011, 0x00000087, 0x0 },
- { 0x80009010, 0x000000C0, 0x1 }, /* Uses I_boost level 0x1 */
+ { 0x80007011, 0x000000CD, 0x0 },
+ { 0x80009010, 0x000000C0, 0x1 },
{ 0x0000201B, 0x0000009D, 0x0 },
- { 0x80005012, 0x000000C0, 0x1 }, /* Uses I_boost level 0x1 */
- { 0x80007011, 0x000000C0, 0x1 }, /* Uses I_boost level 0x1 */
+ { 0x80005012, 0x000000C0, 0x1 },
+ { 0x80007011, 0x000000C0, 0x1 },
{ 0x00002016, 0x00000088, 0x0 },
- { 0x80005012, 0x000000C0, 0x1 }, /* Uses I_boost level 0x1 */
+ { 0x80005012, 0x000000C0, 0x1 },
};
/* Skylake Y */
static const struct ddi_buf_trans skl_y_ddi_translations_dp[] = {
{ 0x00000018, 0x000000A2, 0x0 },
{ 0x00005012, 0x00000088, 0x0 },
- { 0x00007011, 0x00000087, 0x0 },
- { 0x80009010, 0x000000C0, 0x3 }, /* Uses I_boost level 0x3 */
+ { 0x80007011, 0x000000CD, 0x0 },
+ { 0x80009010, 0x000000C0, 0x3 },
{ 0x00000018, 0x0000009D, 0x0 },
- { 0x80005012, 0x000000C0, 0x3 }, /* Uses I_boost level 0x3 */
- { 0x80007011, 0x000000C0, 0x3 }, /* Uses I_boost level 0x3 */
+ { 0x80005012, 0x000000C0, 0x3 },
+ { 0x80007011, 0x000000C0, 0x3 },
{ 0x00000018, 0x00000088, 0x0 },
- { 0x80005012, 0x000000C0, 0x3 }, /* Uses I_boost level 0x3 */
+ { 0x80005012, 0x000000C0, 0x3 },
};
/*
@@ -226,26 +226,26 @@ static const struct ddi_buf_trans skl_ddi_translations_hdmi[] = {
{ 0x00000018, 0x000000A1, 0x0 },
{ 0x00000018, 0x00000098, 0x0 },
{ 0x00004013, 0x00000088, 0x0 },
- { 0x00006012, 0x00000087, 0x0 },
+ { 0x80006012, 0x000000CD, 0x1 },
{ 0x00000018, 0x000000DF, 0x0 },
- { 0x00003015, 0x00000087, 0x0 }, /* Default */
- { 0x00003015, 0x000000C7, 0x0 },
- { 0x00000018, 0x000000C7, 0x0 },
+ { 0x80003015, 0x000000CD, 0x1 }, /* Default */
+ { 0x80003015, 0x000000C0, 0x1 },
+ { 0x80000018, 0x000000C0, 0x1 },
};
/* Skylake Y */
static const struct ddi_buf_trans skl_y_ddi_translations_hdmi[] = {
{ 0x00000018, 0x000000A1, 0x0 },
{ 0x00005012, 0x000000DF, 0x0 },
- { 0x00007011, 0x00000084, 0x0 },
+ { 0x80007011, 0x000000CB, 0x3 },
{ 0x00000018, 0x000000A4, 0x0 },
{ 0x00000018, 0x0000009D, 0x0 },
{ 0x00004013, 0x00000080, 0x0 },
- { 0x00006013, 0x000000C7, 0x0 },
+ { 0x80006013, 0x000000C0, 0x3 },
{ 0x00000018, 0x0000008A, 0x0 },
- { 0x00003015, 0x000000C7, 0x0 }, /* Default */
- { 0x80003015, 0x000000C7, 0x7 }, /* Uses I_boost level 0x7 */
- { 0x00000018, 0x000000C7, 0x0 },
+ { 0x80003015, 0x000000C0, 0x3 }, /* Default */
+ { 0x80003015, 0x000000C0, 0x3 },
+ { 0x80000018, 0x000000C0, 0x3 },
};
struct bxt_ddi_buf_trans {
@@ -301,8 +301,8 @@ static const struct bxt_ddi_buf_trans bxt_ddi_translations_hdmi[] = {
{ 154, 0x9A, 1, 128, true }, /* 9: 1200 0 */
};
-static void bxt_ddi_vswing_sequence(struct drm_device *dev, u32 level,
- enum port port, int type);
+static void bxt_ddi_vswing_sequence(struct drm_i915_private *dev_priv,
+ u32 level, enum port port, int type);
static void ddi_get_encoder_port(struct intel_encoder *intel_encoder,
struct intel_digital_port **dig_port,
@@ -342,81 +342,50 @@ enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder)
return port;
}
-static bool
-intel_dig_port_supports_hdmi(const struct intel_digital_port *intel_dig_port)
-{
- return i915_mmio_reg_valid(intel_dig_port->hdmi.hdmi_reg);
-}
-
-static const struct ddi_buf_trans *skl_get_buf_trans_dp(struct drm_device *dev,
- int *n_entries)
+static const struct ddi_buf_trans *
+skl_get_buf_trans_dp(struct drm_i915_private *dev_priv, int *n_entries)
{
- const struct ddi_buf_trans *ddi_translations;
-
- if (IS_SKL_ULX(dev) || IS_KBL_ULX(dev)) {
- ddi_translations = skl_y_ddi_translations_dp;
+ if (IS_SKL_ULX(dev_priv) || IS_KBL_ULX(dev_priv)) {
*n_entries = ARRAY_SIZE(skl_y_ddi_translations_dp);
- } else if (IS_SKL_ULT(dev) || IS_KBL_ULT(dev)) {
- ddi_translations = skl_u_ddi_translations_dp;
+ return skl_y_ddi_translations_dp;
+ } else if (IS_SKL_ULT(dev_priv) || IS_KBL_ULT(dev_priv)) {
*n_entries = ARRAY_SIZE(skl_u_ddi_translations_dp);
+ return skl_u_ddi_translations_dp;
} else {
- ddi_translations = skl_ddi_translations_dp;
*n_entries = ARRAY_SIZE(skl_ddi_translations_dp);
+ return skl_ddi_translations_dp;
}
-
- return ddi_translations;
}
-static const struct ddi_buf_trans *skl_get_buf_trans_edp(struct drm_device *dev,
- int *n_entries)
+static const struct ddi_buf_trans *
+skl_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
- const struct ddi_buf_trans *ddi_translations;
-
- if (IS_SKL_ULX(dev) || IS_KBL_ULX(dev)) {
- if (dev_priv->edp_low_vswing) {
- ddi_translations = skl_y_ddi_translations_edp;
+ if (dev_priv->edp_low_vswing) {
+ if (IS_SKL_ULX(dev_priv) || IS_KBL_ULX(dev_priv)) {
*n_entries = ARRAY_SIZE(skl_y_ddi_translations_edp);
- } else {
- ddi_translations = skl_y_ddi_translations_dp;
- *n_entries = ARRAY_SIZE(skl_y_ddi_translations_dp);
- }
- } else if (IS_SKL_ULT(dev) || IS_KBL_ULT(dev)) {
- if (dev_priv->edp_low_vswing) {
- ddi_translations = skl_u_ddi_translations_edp;
+ return skl_y_ddi_translations_edp;
+ } else if (IS_SKL_ULT(dev_priv) || IS_KBL_ULT(dev_priv)) {
*n_entries = ARRAY_SIZE(skl_u_ddi_translations_edp);
+ return skl_u_ddi_translations_edp;
} else {
- ddi_translations = skl_u_ddi_translations_dp;
- *n_entries = ARRAY_SIZE(skl_u_ddi_translations_dp);
- }
- } else {
- if (dev_priv->edp_low_vswing) {
- ddi_translations = skl_ddi_translations_edp;
*n_entries = ARRAY_SIZE(skl_ddi_translations_edp);
- } else {
- ddi_translations = skl_ddi_translations_dp;
- *n_entries = ARRAY_SIZE(skl_ddi_translations_dp);
+ return skl_ddi_translations_edp;
}
}
- return ddi_translations;
+ return skl_get_buf_trans_dp(dev_priv, n_entries);
}
static const struct ddi_buf_trans *
-skl_get_buf_trans_hdmi(struct drm_device *dev,
- int *n_entries)
+skl_get_buf_trans_hdmi(struct drm_i915_private *dev_priv, int *n_entries)
{
- const struct ddi_buf_trans *ddi_translations;
-
- if (IS_SKL_ULX(dev) || IS_KBL_ULX(dev)) {
- ddi_translations = skl_y_ddi_translations_hdmi;
+ if (IS_SKL_ULX(dev_priv) || IS_KBL_ULX(dev_priv)) {
*n_entries = ARRAY_SIZE(skl_y_ddi_translations_hdmi);
+ return skl_y_ddi_translations_hdmi;
} else {
- ddi_translations = skl_ddi_translations_hdmi;
*n_entries = ARRAY_SIZE(skl_ddi_translations_hdmi);
+ return skl_ddi_translations_hdmi;
}
-
- return ddi_translations;
}
/*
@@ -426,51 +395,69 @@ skl_get_buf_trans_hdmi(struct drm_device *dev,
* in either FDI or DP modes only, as HDMI connections will work with both
* of those
*/
-static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port,
- bool supports_hdmi)
+void intel_prepare_ddi_buffer(struct intel_encoder *encoder)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
u32 iboost_bit = 0;
int i, n_hdmi_entries, n_dp_entries, n_edp_entries, hdmi_default_entry,
size;
- int hdmi_level = dev_priv->vbt.ddi_port_info[port].hdmi_level_shift;
+ int hdmi_level;
+ enum port port;
const struct ddi_buf_trans *ddi_translations_fdi;
const struct ddi_buf_trans *ddi_translations_dp;
const struct ddi_buf_trans *ddi_translations_edp;
const struct ddi_buf_trans *ddi_translations_hdmi;
const struct ddi_buf_trans *ddi_translations;
- if (IS_BROXTON(dev)) {
- if (!supports_hdmi)
+ port = intel_ddi_get_encoder_port(encoder);
+ hdmi_level = dev_priv->vbt.ddi_port_info[port].hdmi_level_shift;
+
+ if (IS_BROXTON(dev_priv)) {
+ if (encoder->type != INTEL_OUTPUT_HDMI)
return;
/* Vswing programming for HDMI */
- bxt_ddi_vswing_sequence(dev, hdmi_level, port,
+ bxt_ddi_vswing_sequence(dev_priv, hdmi_level, port,
INTEL_OUTPUT_HDMI);
return;
- } else if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
+ }
+
+ if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
ddi_translations_fdi = NULL;
ddi_translations_dp =
- skl_get_buf_trans_dp(dev, &n_dp_entries);
+ skl_get_buf_trans_dp(dev_priv, &n_dp_entries);
ddi_translations_edp =
- skl_get_buf_trans_edp(dev, &n_edp_entries);
+ skl_get_buf_trans_edp(dev_priv, &n_edp_entries);
ddi_translations_hdmi =
- skl_get_buf_trans_hdmi(dev, &n_hdmi_entries);
+ skl_get_buf_trans_hdmi(dev_priv, &n_hdmi_entries);
hdmi_default_entry = 8;
/* If we're boosting the current, set bit 31 of trans1 */
if (dev_priv->vbt.ddi_port_info[port].hdmi_boost_level ||
dev_priv->vbt.ddi_port_info[port].dp_boost_level)
iboost_bit = 1<<31;
- } else if (IS_BROADWELL(dev)) {
+
+ if (WARN_ON(encoder->type == INTEL_OUTPUT_EDP &&
+ port != PORT_A && port != PORT_E &&
+ n_edp_entries > 9))
+ n_edp_entries = 9;
+ } else if (IS_BROADWELL(dev_priv)) {
ddi_translations_fdi = bdw_ddi_translations_fdi;
ddi_translations_dp = bdw_ddi_translations_dp;
- ddi_translations_edp = bdw_ddi_translations_edp;
+
+ if (dev_priv->edp_low_vswing) {
+ ddi_translations_edp = bdw_ddi_translations_edp;
+ n_edp_entries = ARRAY_SIZE(bdw_ddi_translations_edp);
+ } else {
+ ddi_translations_edp = bdw_ddi_translations_dp;
+ n_edp_entries = ARRAY_SIZE(bdw_ddi_translations_dp);
+ }
+
ddi_translations_hdmi = bdw_ddi_translations_hdmi;
- n_edp_entries = ARRAY_SIZE(bdw_ddi_translations_edp);
+
n_dp_entries = ARRAY_SIZE(bdw_ddi_translations_dp);
n_hdmi_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi);
hdmi_default_entry = 7;
- } else if (IS_HASWELL(dev)) {
+ } else if (IS_HASWELL(dev_priv)) {
ddi_translations_fdi = hsw_ddi_translations_fdi;
ddi_translations_dp = hsw_ddi_translations_dp;
ddi_translations_edp = hsw_ddi_translations_dp;
@@ -490,30 +477,18 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port,
hdmi_default_entry = 7;
}
- switch (port) {
- case PORT_A:
+ switch (encoder->type) {
+ case INTEL_OUTPUT_EDP:
ddi_translations = ddi_translations_edp;
size = n_edp_entries;
break;
- case PORT_B:
- case PORT_C:
+ case INTEL_OUTPUT_DISPLAYPORT:
+ case INTEL_OUTPUT_HDMI:
ddi_translations = ddi_translations_dp;
size = n_dp_entries;
break;
- case PORT_D:
- if (intel_dp_is_edp(dev, PORT_D)) {
- ddi_translations = ddi_translations_edp;
- size = n_edp_entries;
- } else {
- ddi_translations = ddi_translations_dp;
- size = n_dp_entries;
- }
- break;
- case PORT_E:
- if (ddi_translations_fdi)
- ddi_translations = ddi_translations_fdi;
- else
- ddi_translations = ddi_translations_dp;
+ case INTEL_OUTPUT_ANALOG:
+ ddi_translations = ddi_translations_fdi;
size = n_dp_entries;
break;
default:
@@ -527,7 +502,7 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port,
ddi_translations[i].trans2);
}
- if (!supports_hdmi)
+ if (encoder->type != INTEL_OUTPUT_HDMI)
return;
/* Choose a good default if VBT is badly populated */
@@ -542,37 +517,6 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port,
ddi_translations_hdmi[hdmi_level].trans2);
}
-/* Program DDI buffers translations for DP. By default, program ports A-D in DP
- * mode and port E for FDI.
- */
-void intel_prepare_ddi(struct drm_device *dev)
-{
- struct intel_encoder *intel_encoder;
- bool visited[I915_MAX_PORTS] = { 0, };
-
- if (!HAS_DDI(dev))
- return;
-
- for_each_intel_encoder(dev, intel_encoder) {
- struct intel_digital_port *intel_dig_port;
- enum port port;
- bool supports_hdmi;
-
- if (intel_encoder->type == INTEL_OUTPUT_DSI)
- continue;
-
- ddi_get_encoder_port(intel_encoder, &intel_dig_port, &port);
- if (visited[port])
- continue;
-
- supports_hdmi = intel_dig_port &&
- intel_dig_port_supports_hdmi(intel_dig_port);
-
- intel_prepare_ddi_buffers(dev, port, supports_hdmi);
- visited[port] = true;
- }
-}
-
static void intel_wait_ddi_buf_idle(struct drm_i915_private *dev_priv,
enum port port)
{
@@ -601,8 +545,14 @@ void hsw_fdi_link_train(struct drm_crtc *crtc)
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_encoder *encoder;
u32 temp, i, rx_ctl_val;
+ for_each_encoder_on_crtc(dev, crtc, encoder) {
+ WARN_ON(encoder->type != INTEL_OUTPUT_ANALOG);
+ intel_prepare_ddi_buffer(encoder);
+ }
+
/* Set the FDI_RX_MISC pwrdn lanes and the 2 workarounds listed at the
* mode set "sequence for CRT port" document:
* - TP1 to TP2 time with the default value
@@ -1604,8 +1554,10 @@ skl_ddi_pll_select(struct intel_crtc *intel_crtc,
}
cfgcr1 = cfgcr2 = 0;
- } else /* eDP */
+ } else if (intel_encoder->type == INTEL_OUTPUT_EDP) {
return true;
+ } else
+ return false;
memset(&crtc_state->dpll_hw_state, 0,
sizeof(crtc_state->dpll_hw_state));
@@ -2109,10 +2061,9 @@ void intel_ddi_disable_pipe_clock(struct intel_crtc *intel_crtc)
TRANS_CLK_SEL_DISABLED);
}
-static void skl_ddi_set_iboost(struct drm_device *dev, u32 level,
- enum port port, int type)
+static void skl_ddi_set_iboost(struct drm_i915_private *dev_priv,
+ u32 level, enum port port, int type)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
const struct ddi_buf_trans *ddi_translations;
uint8_t iboost;
uint8_t dp_iboost, hdmi_iboost;
@@ -2127,21 +2078,26 @@ static void skl_ddi_set_iboost(struct drm_device *dev, u32 level,
if (dp_iboost) {
iboost = dp_iboost;
} else {
- ddi_translations = skl_get_buf_trans_dp(dev, &n_entries);
+ ddi_translations = skl_get_buf_trans_dp(dev_priv, &n_entries);
iboost = ddi_translations[level].i_boost;
}
} else if (type == INTEL_OUTPUT_EDP) {
if (dp_iboost) {
iboost = dp_iboost;
} else {
- ddi_translations = skl_get_buf_trans_edp(dev, &n_entries);
+ ddi_translations = skl_get_buf_trans_edp(dev_priv, &n_entries);
+
+ if (WARN_ON(port != PORT_A &&
+ port != PORT_E && n_entries > 9))
+ n_entries = 9;
+
iboost = ddi_translations[level].i_boost;
}
} else if (type == INTEL_OUTPUT_HDMI) {
if (hdmi_iboost) {
iboost = hdmi_iboost;
} else {
- ddi_translations = skl_get_buf_trans_hdmi(dev, &n_entries);
+ ddi_translations = skl_get_buf_trans_hdmi(dev_priv, &n_entries);
iboost = ddi_translations[level].i_boost;
}
} else {
@@ -2166,10 +2122,9 @@ static void skl_ddi_set_iboost(struct drm_device *dev, u32 level,
I915_WRITE(DISPIO_CR_TX_BMU_CR0, reg);
}
-static void bxt_ddi_vswing_sequence(struct drm_device *dev, u32 level,
- enum port port, int type)
+static void bxt_ddi_vswing_sequence(struct drm_i915_private *dev_priv,
+ u32 level, enum port port, int type)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
const struct bxt_ddi_buf_trans *ddi_translations;
u32 n_entries, i;
uint32_t val;
@@ -2284,7 +2239,7 @@ static uint32_t translate_signal_level(int signal_levels)
uint32_t ddi_signal_levels(struct intel_dp *intel_dp)
{
struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
- struct drm_device *dev = dport->base.base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dport->base.base.dev);
struct intel_encoder *encoder = &dport->base;
uint8_t train_set = intel_dp->train_set[0];
int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
@@ -2294,10 +2249,10 @@ uint32_t ddi_signal_levels(struct intel_dp *intel_dp)
level = translate_signal_level(signal_levels);
- if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
- skl_ddi_set_iboost(dev, level, port, encoder->type);
- else if (IS_BROXTON(dev))
- bxt_ddi_vswing_sequence(dev, level, port, encoder->type);
+ if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
+ skl_ddi_set_iboost(dev_priv, level, port, encoder->type);
+ else if (IS_BROXTON(dev_priv))
+ bxt_ddi_vswing_sequence(dev_priv, level, port, encoder->type);
return DDI_BUF_TRANS_SELECT(level);
}
@@ -2349,12 +2304,12 @@ void intel_ddi_clk_select(struct intel_encoder *encoder,
static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder)
{
struct drm_encoder *encoder = &intel_encoder->base;
- struct drm_device *dev = encoder->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(encoder->dev);
struct intel_crtc *crtc = to_intel_crtc(encoder->crtc);
enum port port = intel_ddi_get_encoder_port(intel_encoder);
int type = intel_encoder->type;
- int hdmi_level;
+
+ intel_prepare_ddi_buffer(intel_encoder);
if (type == INTEL_OUTPUT_EDP) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
@@ -2372,17 +2327,11 @@ static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder)
intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
intel_dp_start_link_train(intel_dp);
- if (port != PORT_A || INTEL_INFO(dev)->gen >= 9)
+ if (port != PORT_A || INTEL_INFO(dev_priv)->gen >= 9)
intel_dp_stop_link_train(intel_dp);
} else if (type == INTEL_OUTPUT_HDMI) {
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
- if (IS_BROXTON(dev)) {
- hdmi_level = dev_priv->vbt.
- ddi_port_info[port].hdmi_level_shift;
- bxt_ddi_vswing_sequence(dev, hdmi_level, port,
- INTEL_OUTPUT_HDMI);
- }
intel_hdmi->set_infoframes(encoder,
crtc->config->has_hdmi_sink,
&crtc->config->base.adjusted_mode);
@@ -3157,23 +3106,6 @@ void intel_ddi_fdi_disable(struct drm_crtc *crtc)
I915_WRITE(FDI_RX_CTL(PIPE_A), val);
}
-bool intel_ddi_is_audio_enabled(struct drm_i915_private *dev_priv,
- struct intel_crtc *intel_crtc)
-{
- u32 temp;
-
- if (intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_AUDIO)) {
- temp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD);
-
- intel_display_power_put(dev_priv, POWER_DOMAIN_AUDIO);
-
- if (temp & AUDIO_OUTPUT_ENABLE(intel_crtc->pipe))
- return true;
- }
-
- return false;
-}
-
void intel_ddi_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
@@ -3234,8 +3166,11 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
break;
}
- pipe_config->has_audio =
- intel_ddi_is_audio_enabled(dev_priv, intel_crtc);
+ if (intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_AUDIO)) {
+ temp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD);
+ if (temp & AUDIO_OUTPUT_ENABLE(intel_crtc->pipe))
+ pipe_config->has_audio = true;
+ }
if (encoder->type == INTEL_OUTPUT_EDP && dev_priv->vbt.edp_bpp &&
pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) {
@@ -3260,12 +3195,6 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
intel_ddi_clock_get(encoder, pipe_config);
}
-static void intel_ddi_destroy(struct drm_encoder *encoder)
-{
- /* HDMI has nothing special to destroy, so we can go with this. */
- intel_dp_encoder_destroy(encoder);
-}
-
static bool intel_ddi_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
@@ -3284,7 +3213,8 @@ static bool intel_ddi_compute_config(struct intel_encoder *encoder,
}
static const struct drm_encoder_funcs intel_ddi_funcs = {
- .destroy = intel_ddi_destroy,
+ .reset = intel_dp_encoder_reset,
+ .destroy = intel_dp_encoder_destroy,
};
static struct intel_connector *
@@ -3329,6 +3259,33 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
struct intel_encoder *intel_encoder;
struct drm_encoder *encoder;
bool init_hdmi, init_dp;
+ int max_lanes;
+
+ if (I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES) {
+ switch (port) {
+ case PORT_A:
+ max_lanes = 4;
+ break;
+ case PORT_E:
+ max_lanes = 0;
+ break;
+ default:
+ max_lanes = 4;
+ break;
+ }
+ } else {
+ switch (port) {
+ case PORT_A:
+ max_lanes = 2;
+ break;
+ case PORT_E:
+ max_lanes = 2;
+ break;
+ default:
+ max_lanes = 4;
+ break;
+ }
+ }
init_hdmi = (dev_priv->vbt.ddi_port_info[port].supports_dvi ||
dev_priv->vbt.ddi_port_info[port].supports_hdmi);
@@ -3356,9 +3313,9 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
intel_encoder->post_disable = intel_ddi_post_disable;
intel_encoder->get_hw_state = intel_ddi_get_hw_state;
intel_encoder->get_config = intel_ddi_get_config;
+ intel_encoder->suspend = intel_dp_encoder_suspend;
intel_dig_port->port = port;
- dev_priv->dig_port_map[port] = intel_encoder;
intel_dig_port->saved_port_bits = I915_READ(DDI_BUF_CTL(port)) &
(DDI_BUF_PORT_REVERSAL |
DDI_A_4_LANES);
@@ -3374,9 +3331,12 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
if (!(intel_dig_port->saved_port_bits & DDI_A_4_LANES)) {
DRM_DEBUG_KMS("BXT BIOS forgot to set DDI_A_4_LANES for port A; fixing\n");
intel_dig_port->saved_port_bits |= DDI_A_4_LANES;
+ max_lanes = 4;
}
}
+ intel_dig_port->max_lanes = max_lanes;
+
intel_encoder->type = INTEL_OUTPUT_UNKNOWN;
intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
intel_encoder->cloneable = 0;
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 46947fffd599..0104a06d01fd 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -85,8 +85,6 @@ static const uint32_t intel_cursor_formats[] = {
DRM_FORMAT_ARGB8888,
};
-static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on);
-
static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config);
static void ironlake_pch_clock_get(struct intel_crtc *crtc,
@@ -1152,11 +1150,6 @@ static void intel_wait_for_pipe_off(struct intel_crtc *crtc)
}
}
-static const char *state_string(bool enabled)
-{
- return enabled ? "on" : "off";
-}
-
/* Only for pre-ILK configs */
void assert_pll(struct drm_i915_private *dev_priv,
enum pipe pipe, bool state)
@@ -1168,7 +1161,7 @@ void assert_pll(struct drm_i915_private *dev_priv,
cur_state = !!(val & DPLL_VCO_ENABLE);
I915_STATE_WARN(cur_state != state,
"PLL state assertion failure (expected %s, current %s)\n",
- state_string(state), state_string(cur_state));
+ onoff(state), onoff(cur_state));
}
/* XXX: the dsi pll is shared between MIPI DSI ports */
@@ -1184,7 +1177,7 @@ static void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state)
cur_state = val & DSI_PLL_VCO_EN;
I915_STATE_WARN(cur_state != state,
"DSI PLL state assertion failure (expected %s, current %s)\n",
- state_string(state), state_string(cur_state));
+ onoff(state), onoff(cur_state));
}
#define assert_dsi_pll_enabled(d) assert_dsi_pll(d, true)
#define assert_dsi_pll_disabled(d) assert_dsi_pll(d, false)
@@ -1208,14 +1201,13 @@ void assert_shared_dpll(struct drm_i915_private *dev_priv,
bool cur_state;
struct intel_dpll_hw_state hw_state;
- if (WARN (!pll,
- "asserting DPLL %s with no DPLL\n", state_string(state)))
+ if (WARN(!pll, "asserting DPLL %s with no DPLL\n", onoff(state)))
return;
cur_state = pll->get_hw_state(dev_priv, pll, &hw_state);
I915_STATE_WARN(cur_state != state,
"%s assertion failure (expected %s, current %s)\n",
- pll->name, state_string(state), state_string(cur_state));
+ pll->name, onoff(state), onoff(cur_state));
}
static void assert_fdi_tx(struct drm_i915_private *dev_priv,
@@ -1235,7 +1227,7 @@ static void assert_fdi_tx(struct drm_i915_private *dev_priv,
}
I915_STATE_WARN(cur_state != state,
"FDI TX state assertion failure (expected %s, current %s)\n",
- state_string(state), state_string(cur_state));
+ onoff(state), onoff(cur_state));
}
#define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true)
#define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false)
@@ -1250,7 +1242,7 @@ static void assert_fdi_rx(struct drm_i915_private *dev_priv,
cur_state = !!(val & FDI_RX_ENABLE);
I915_STATE_WARN(cur_state != state,
"FDI RX state assertion failure (expected %s, current %s)\n",
- state_string(state), state_string(cur_state));
+ onoff(state), onoff(cur_state));
}
#define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true)
#define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false)
@@ -1282,7 +1274,7 @@ void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
cur_state = !!(val & FDI_RX_PLL_ENABLE);
I915_STATE_WARN(cur_state != state,
"FDI RX PLL assertion failure (expected %s, current %s)\n",
- state_string(state), state_string(cur_state));
+ onoff(state), onoff(cur_state));
}
void assert_panel_unlocked(struct drm_i915_private *dev_priv,
@@ -1340,7 +1332,7 @@ static void assert_cursor(struct drm_i915_private *dev_priv,
I915_STATE_WARN(cur_state != state,
"cursor on pipe %c assertion failure (expected %s, current %s)\n",
- pipe_name(pipe), state_string(state), state_string(cur_state));
+ pipe_name(pipe), onoff(state), onoff(cur_state));
}
#define assert_cursor_enabled(d, p) assert_cursor(d, p, true)
#define assert_cursor_disabled(d, p) assert_cursor(d, p, false)
@@ -1370,7 +1362,7 @@ void assert_pipe(struct drm_i915_private *dev_priv,
I915_STATE_WARN(cur_state != state,
"pipe %c assertion failure (expected %s, current %s)\n",
- pipe_name(pipe), state_string(state), state_string(cur_state));
+ pipe_name(pipe), onoff(state), onoff(cur_state));
}
static void assert_plane(struct drm_i915_private *dev_priv,
@@ -1383,7 +1375,7 @@ static void assert_plane(struct drm_i915_private *dev_priv,
cur_state = !!(val & DISPLAY_PLANE_ENABLE);
I915_STATE_WARN(cur_state != state,
"plane %c assertion failure (expected %s, current %s)\n",
- plane_name(plane), state_string(state), state_string(cur_state));
+ plane_name(plane), onoff(state), onoff(cur_state));
}
#define assert_plane_enabled(d, p) assert_plane(d, p, true)
@@ -2156,6 +2148,17 @@ static void intel_enable_pipe(struct intel_crtc *crtc)
I915_WRITE(reg, val | PIPECONF_ENABLE);
POSTING_READ(reg);
+
+ /*
+ * Until the pipe starts DSL will read as 0, which would cause
+ * an apparent vblank timestamp jump, which messes up also the
+ * frame count when it's derived from the timestamps. So let's
+ * wait for the pipe to start properly before we call
+ * drm_crtc_vblank_on()
+ */
+ if (dev->max_vblank_count == 0 &&
+ wait_for(intel_get_crtc_scanline(crtc) != crtc->scanline_offset, 50))
+ DRM_ERROR("pipe %c didn't start\n", pipe_name(pipe));
}
/**
@@ -2217,67 +2220,75 @@ static bool need_vtd_wa(struct drm_device *dev)
return false;
}
-unsigned int
-intel_tile_height(struct drm_device *dev, uint32_t pixel_format,
- uint64_t fb_format_modifier, unsigned int plane)
+static unsigned int intel_tile_size(const struct drm_i915_private *dev_priv)
{
- unsigned int tile_height;
- uint32_t pixel_bytes;
+ return IS_GEN2(dev_priv) ? 2048 : 4096;
+}
- switch (fb_format_modifier) {
+static unsigned int intel_tile_width(const struct drm_i915_private *dev_priv,
+ uint64_t fb_modifier, unsigned int cpp)
+{
+ switch (fb_modifier) {
case DRM_FORMAT_MOD_NONE:
- tile_height = 1;
- break;
+ return cpp;
case I915_FORMAT_MOD_X_TILED:
- tile_height = IS_GEN2(dev) ? 16 : 8;
- break;
+ if (IS_GEN2(dev_priv))
+ return 128;
+ else
+ return 512;
case I915_FORMAT_MOD_Y_TILED:
- tile_height = 32;
- break;
+ if (IS_GEN2(dev_priv) || HAS_128_BYTE_Y_TILING(dev_priv))
+ return 128;
+ else
+ return 512;
case I915_FORMAT_MOD_Yf_TILED:
- pixel_bytes = drm_format_plane_cpp(pixel_format, plane);
- switch (pixel_bytes) {
- default:
+ switch (cpp) {
case 1:
- tile_height = 64;
- break;
+ return 64;
case 2:
case 4:
- tile_height = 32;
- break;
+ return 128;
case 8:
- tile_height = 16;
- break;
case 16:
- WARN_ONCE(1,
- "128-bit pixels are not supported for display!");
- tile_height = 16;
- break;
+ return 256;
+ default:
+ MISSING_CASE(cpp);
+ return cpp;
}
break;
default:
- MISSING_CASE(fb_format_modifier);
- tile_height = 1;
- break;
+ MISSING_CASE(fb_modifier);
+ return cpp;
}
+}
- return tile_height;
+unsigned int intel_tile_height(const struct drm_i915_private *dev_priv,
+ uint64_t fb_modifier, unsigned int cpp)
+{
+ if (fb_modifier == DRM_FORMAT_MOD_NONE)
+ return 1;
+ else
+ return intel_tile_size(dev_priv) /
+ intel_tile_width(dev_priv, fb_modifier, cpp);
}
unsigned int
intel_fb_align_height(struct drm_device *dev, unsigned int height,
- uint32_t pixel_format, uint64_t fb_format_modifier)
+ uint32_t pixel_format, uint64_t fb_modifier)
{
- return ALIGN(height, intel_tile_height(dev, pixel_format,
- fb_format_modifier, 0));
+ unsigned int cpp = drm_format_plane_cpp(pixel_format, 0);
+ unsigned int tile_height = intel_tile_height(to_i915(dev), fb_modifier, cpp);
+
+ return ALIGN(height, tile_height);
}
static void
intel_fill_fb_ggtt_view(struct i915_ggtt_view *view, struct drm_framebuffer *fb,
const struct drm_plane_state *plane_state)
{
- struct intel_rotation_info *info = &view->params.rotation_info;
- unsigned int tile_height, tile_pitch;
+ struct drm_i915_private *dev_priv = to_i915(fb->dev);
+ struct intel_rotation_info *info = &view->params.rotated;
+ unsigned int tile_size, tile_width, tile_height, cpp;
*view = i915_ggtt_view_normal;
@@ -2295,26 +2306,28 @@ intel_fill_fb_ggtt_view(struct i915_ggtt_view *view, struct drm_framebuffer *fb,
info->uv_offset = fb->offsets[1];
info->fb_modifier = fb->modifier[0];
- tile_height = intel_tile_height(fb->dev, fb->pixel_format,
- fb->modifier[0], 0);
- tile_pitch = PAGE_SIZE / tile_height;
- info->width_pages = DIV_ROUND_UP(fb->pitches[0], tile_pitch);
+ tile_size = intel_tile_size(dev_priv);
+
+ cpp = drm_format_plane_cpp(fb->pixel_format, 0);
+ tile_width = intel_tile_width(dev_priv, fb->modifier[0], cpp);
+ tile_height = tile_size / tile_width;
+
+ info->width_pages = DIV_ROUND_UP(fb->pitches[0], tile_width);
info->height_pages = DIV_ROUND_UP(fb->height, tile_height);
- info->size = info->width_pages * info->height_pages * PAGE_SIZE;
+ info->size = info->width_pages * info->height_pages * tile_size;
if (info->pixel_format == DRM_FORMAT_NV12) {
- tile_height = intel_tile_height(fb->dev, fb->pixel_format,
- fb->modifier[0], 1);
- tile_pitch = PAGE_SIZE / tile_height;
- info->width_pages_uv = DIV_ROUND_UP(fb->pitches[0], tile_pitch);
- info->height_pages_uv = DIV_ROUND_UP(fb->height / 2,
- tile_height);
- info->size_uv = info->width_pages_uv * info->height_pages_uv *
- PAGE_SIZE;
+ cpp = drm_format_plane_cpp(fb->pixel_format, 1);
+ tile_width = intel_tile_width(dev_priv, fb->modifier[1], cpp);
+ tile_height = tile_size / tile_width;
+
+ info->width_pages_uv = DIV_ROUND_UP(fb->pitches[1], tile_width);
+ info->height_pages_uv = DIV_ROUND_UP(fb->height / 2, tile_height);
+ info->size_uv = info->width_pages_uv * info->height_pages_uv * tile_size;
}
}
-static unsigned int intel_linear_alignment(struct drm_i915_private *dev_priv)
+static unsigned int intel_linear_alignment(const struct drm_i915_private *dev_priv)
{
if (INTEL_INFO(dev_priv)->gen >= 9)
return 256 * 1024;
@@ -2327,6 +2340,25 @@ static unsigned int intel_linear_alignment(struct drm_i915_private *dev_priv)
return 0;
}
+static unsigned int intel_surf_alignment(const struct drm_i915_private *dev_priv,
+ uint64_t fb_modifier)
+{
+ switch (fb_modifier) {
+ case DRM_FORMAT_MOD_NONE:
+ return intel_linear_alignment(dev_priv);
+ case I915_FORMAT_MOD_X_TILED:
+ if (INTEL_INFO(dev_priv)->gen >= 9)
+ return 256 * 1024;
+ return 0;
+ case I915_FORMAT_MOD_Y_TILED:
+ case I915_FORMAT_MOD_Yf_TILED:
+ return 1 * 1024 * 1024;
+ default:
+ MISSING_CASE(fb_modifier);
+ return 0;
+ }
+}
+
int
intel_pin_and_fence_fb_obj(struct drm_plane *plane,
struct drm_framebuffer *fb,
@@ -2341,29 +2373,7 @@ intel_pin_and_fence_fb_obj(struct drm_plane *plane,
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
- switch (fb->modifier[0]) {
- case DRM_FORMAT_MOD_NONE:
- alignment = intel_linear_alignment(dev_priv);
- break;
- case I915_FORMAT_MOD_X_TILED:
- if (INTEL_INFO(dev)->gen >= 9)
- alignment = 256 * 1024;
- else {
- /* pin() will align the object as required by fence */
- alignment = 0;
- }
- break;
- case I915_FORMAT_MOD_Y_TILED:
- case I915_FORMAT_MOD_Yf_TILED:
- if (WARN_ONCE(INTEL_INFO(dev)->gen < 9,
- "Y tiling bo slipped through, driver bug!\n"))
- return -EINVAL;
- alignment = 1 * 1024 * 1024;
- break;
- default:
- MISSING_CASE(fb->modifier[0]);
- return -EINVAL;
- }
+ alignment = intel_surf_alignment(dev_priv, fb->modifier[0]);
intel_fill_fb_ggtt_view(&view, fb, plane_state);
@@ -2441,22 +2451,27 @@ static void intel_unpin_fb_obj(struct drm_framebuffer *fb,
/* Computes the linear offset to the base tile and adjusts x, y. bytes per pixel
* is assumed to be a power-of-two. */
-unsigned long intel_gen4_compute_page_offset(struct drm_i915_private *dev_priv,
- int *x, int *y,
- unsigned int tiling_mode,
- unsigned int cpp,
- unsigned int pitch)
-{
- if (tiling_mode != I915_TILING_NONE) {
+u32 intel_compute_tile_offset(struct drm_i915_private *dev_priv,
+ int *x, int *y,
+ uint64_t fb_modifier,
+ unsigned int cpp,
+ unsigned int pitch)
+{
+ if (fb_modifier != DRM_FORMAT_MOD_NONE) {
+ unsigned int tile_size, tile_width, tile_height;
unsigned int tile_rows, tiles;
- tile_rows = *y / 8;
- *y %= 8;
+ tile_size = intel_tile_size(dev_priv);
+ tile_width = intel_tile_width(dev_priv, fb_modifier, cpp);
+ tile_height = tile_size / tile_width;
- tiles = *x / (512/cpp);
- *x %= 512/cpp;
+ tile_rows = *y / tile_height;
+ *y %= tile_height;
- return tile_rows * pitch * 8 + tiles * 4096;
+ tiles = *x / (tile_width/cpp);
+ *x %= tile_width/cpp;
+
+ return tile_rows * pitch * tile_height + tiles * tile_size;
} else {
unsigned int alignment = intel_linear_alignment(dev_priv) - 1;
unsigned int offset;
@@ -2539,12 +2554,16 @@ intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
if (size_aligned * 2 > dev_priv->gtt.stolen_usable_size)
return false;
+ mutex_lock(&dev->struct_mutex);
+
obj = i915_gem_object_create_stolen_for_preallocated(dev,
base_aligned,
base_aligned,
size_aligned);
- if (!obj)
+ if (!obj) {
+ mutex_unlock(&dev->struct_mutex);
return false;
+ }
obj->tiling_mode = plane_config->tiling;
if (obj->tiling_mode == I915_TILING_X)
@@ -2557,12 +2576,12 @@ intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
mode_cmd.modifier[0] = fb->modifier[0];
mode_cmd.flags = DRM_MODE_FB_MODIFIERS;
- mutex_lock(&dev->struct_mutex);
if (intel_framebuffer_init(dev, to_intel_framebuffer(fb),
&mode_cmd, obj)) {
DRM_DEBUG_KMS("intel fb init failed\n");
goto out_unref_obj;
}
+
mutex_unlock(&dev->struct_mutex);
DRM_DEBUG_KMS("initial plane fb obj %p\n", obj);
@@ -2601,6 +2620,8 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
struct drm_plane_state *plane_state = primary->state;
struct drm_crtc_state *crtc_state = intel_crtc->base.state;
struct intel_plane *intel_plane = to_intel_plane(primary);
+ struct intel_plane_state *intel_state =
+ to_intel_plane_state(plane_state);
struct drm_framebuffer *fb;
if (!plane_config->fb)
@@ -2662,6 +2683,15 @@ valid_fb:
plane_state->crtc_w = fb->width;
plane_state->crtc_h = fb->height;
+ intel_state->src.x1 = plane_state->src_x;
+ intel_state->src.y1 = plane_state->src_y;
+ intel_state->src.x2 = plane_state->src_x + plane_state->src_w;
+ intel_state->src.y2 = plane_state->src_y + plane_state->src_h;
+ intel_state->dst.x1 = plane_state->crtc_x;
+ intel_state->dst.y1 = plane_state->crtc_y;
+ intel_state->dst.x2 = plane_state->crtc_x + plane_state->crtc_w;
+ intel_state->dst.y2 = plane_state->crtc_y + plane_state->crtc_h;
+
obj = intel_fb_obj(fb);
if (obj->tiling_mode != I915_TILING_NONE)
dev_priv->preserve_bios_swizzle = true;
@@ -2673,37 +2703,22 @@ valid_fb:
obj->frontbuffer_bits |= to_intel_plane(primary)->frontbuffer_bit;
}
-static void i9xx_update_primary_plane(struct drm_crtc *crtc,
- struct drm_framebuffer *fb,
- int x, int y)
+static void i9xx_update_primary_plane(struct drm_plane *primary,
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
{
- struct drm_device *dev = crtc->dev;
+ struct drm_device *dev = primary->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct drm_plane *primary = crtc->primary;
- bool visible = to_intel_plane_state(primary->state)->visible;
- struct drm_i915_gem_object *obj;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_framebuffer *fb = plane_state->base.fb;
+ struct drm_i915_gem_object *obj = intel_fb_obj(fb);
int plane = intel_crtc->plane;
- unsigned long linear_offset;
+ u32 linear_offset;
u32 dspcntr;
i915_reg_t reg = DSPCNTR(plane);
- int pixel_size;
-
- if (!visible || !fb) {
- I915_WRITE(reg, 0);
- if (INTEL_INFO(dev)->gen >= 4)
- I915_WRITE(DSPSURF(plane), 0);
- else
- I915_WRITE(DSPADDR(plane), 0);
- POSTING_READ(reg);
- return;
- }
-
- obj = intel_fb_obj(fb);
- if (WARN_ON(obj == NULL))
- return;
-
- pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
+ int cpp = drm_format_plane_cpp(fb->pixel_format, 0);
+ int x = plane_state->src.x1 >> 16;
+ int y = plane_state->src.y1 >> 16;
dspcntr = DISPPLANE_GAMMA_ENABLE;
@@ -2717,13 +2732,13 @@ static void i9xx_update_primary_plane(struct drm_crtc *crtc,
* which should always be the user's requested size.
*/
I915_WRITE(DSPSIZE(plane),
- ((intel_crtc->config->pipe_src_h - 1) << 16) |
- (intel_crtc->config->pipe_src_w - 1));
+ ((crtc_state->pipe_src_h - 1) << 16) |
+ (crtc_state->pipe_src_w - 1));
I915_WRITE(DSPPOS(plane), 0);
} else if (IS_CHERRYVIEW(dev) && plane == PLANE_B) {
I915_WRITE(PRIMSIZE(plane),
- ((intel_crtc->config->pipe_src_h - 1) << 16) |
- (intel_crtc->config->pipe_src_w - 1));
+ ((crtc_state->pipe_src_h - 1) << 16) |
+ (crtc_state->pipe_src_w - 1));
I915_WRITE(PRIMPOS(plane), 0);
I915_WRITE(PRIMCNSTALPHA(plane), 0);
}
@@ -2761,30 +2776,29 @@ static void i9xx_update_primary_plane(struct drm_crtc *crtc,
if (IS_G4X(dev))
dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
- linear_offset = y * fb->pitches[0] + x * pixel_size;
+ linear_offset = y * fb->pitches[0] + x * cpp;
if (INTEL_INFO(dev)->gen >= 4) {
intel_crtc->dspaddr_offset =
- intel_gen4_compute_page_offset(dev_priv,
- &x, &y, obj->tiling_mode,
- pixel_size,
- fb->pitches[0]);
+ intel_compute_tile_offset(dev_priv, &x, &y,
+ fb->modifier[0], cpp,
+ fb->pitches[0]);
linear_offset -= intel_crtc->dspaddr_offset;
} else {
intel_crtc->dspaddr_offset = linear_offset;
}
- if (crtc->primary->state->rotation == BIT(DRM_ROTATE_180)) {
+ if (plane_state->base.rotation == BIT(DRM_ROTATE_180)) {
dspcntr |= DISPPLANE_ROTATE_180;
- x += (intel_crtc->config->pipe_src_w - 1);
- y += (intel_crtc->config->pipe_src_h - 1);
+ x += (crtc_state->pipe_src_w - 1);
+ y += (crtc_state->pipe_src_h - 1);
/* Finding the last pixel of the last line of the display
data and adding to linear_offset*/
linear_offset +=
- (intel_crtc->config->pipe_src_h - 1) * fb->pitches[0] +
- (intel_crtc->config->pipe_src_w - 1) * pixel_size;
+ (crtc_state->pipe_src_h - 1) * fb->pitches[0] +
+ (crtc_state->pipe_src_w - 1) * cpp;
}
intel_crtc->adjusted_x = x;
@@ -2803,37 +2817,40 @@ static void i9xx_update_primary_plane(struct drm_crtc *crtc,
POSTING_READ(reg);
}
-static void ironlake_update_primary_plane(struct drm_crtc *crtc,
- struct drm_framebuffer *fb,
- int x, int y)
+static void i9xx_disable_primary_plane(struct drm_plane *primary,
+ struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct drm_plane *primary = crtc->primary;
- bool visible = to_intel_plane_state(primary->state)->visible;
- struct drm_i915_gem_object *obj;
int plane = intel_crtc->plane;
- unsigned long linear_offset;
- u32 dspcntr;
- i915_reg_t reg = DSPCNTR(plane);
- int pixel_size;
- if (!visible || !fb) {
- I915_WRITE(reg, 0);
+ I915_WRITE(DSPCNTR(plane), 0);
+ if (INTEL_INFO(dev_priv)->gen >= 4)
I915_WRITE(DSPSURF(plane), 0);
- POSTING_READ(reg);
- return;
- }
-
- obj = intel_fb_obj(fb);
- if (WARN_ON(obj == NULL))
- return;
+ else
+ I915_WRITE(DSPADDR(plane), 0);
+ POSTING_READ(DSPCNTR(plane));
+}
- pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
+static void ironlake_update_primary_plane(struct drm_plane *primary,
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ struct drm_device *dev = primary->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_framebuffer *fb = plane_state->base.fb;
+ struct drm_i915_gem_object *obj = intel_fb_obj(fb);
+ int plane = intel_crtc->plane;
+ u32 linear_offset;
+ u32 dspcntr;
+ i915_reg_t reg = DSPCNTR(plane);
+ int cpp = drm_format_plane_cpp(fb->pixel_format, 0);
+ int x = plane_state->src.x1 >> 16;
+ int y = plane_state->src.y1 >> 16;
dspcntr = DISPPLANE_GAMMA_ENABLE;
-
dspcntr |= DISPLAY_PLANE_ENABLE;
if (IS_HASWELL(dev) || IS_BROADWELL(dev))
@@ -2868,25 +2885,24 @@ static void ironlake_update_primary_plane(struct drm_crtc *crtc,
if (!IS_HASWELL(dev) && !IS_BROADWELL(dev))
dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
- linear_offset = y * fb->pitches[0] + x * pixel_size;
+ linear_offset = y * fb->pitches[0] + x * cpp;
intel_crtc->dspaddr_offset =
- intel_gen4_compute_page_offset(dev_priv,
- &x, &y, obj->tiling_mode,
- pixel_size,
- fb->pitches[0]);
+ intel_compute_tile_offset(dev_priv, &x, &y,
+ fb->modifier[0], cpp,
+ fb->pitches[0]);
linear_offset -= intel_crtc->dspaddr_offset;
- if (crtc->primary->state->rotation == BIT(DRM_ROTATE_180)) {
+ if (plane_state->base.rotation == BIT(DRM_ROTATE_180)) {
dspcntr |= DISPPLANE_ROTATE_180;
if (!IS_HASWELL(dev) && !IS_BROADWELL(dev)) {
- x += (intel_crtc->config->pipe_src_w - 1);
- y += (intel_crtc->config->pipe_src_h - 1);
+ x += (crtc_state->pipe_src_w - 1);
+ y += (crtc_state->pipe_src_h - 1);
/* Finding the last pixel of the last line of the display
data and adding to linear_offset*/
linear_offset +=
- (intel_crtc->config->pipe_src_h - 1) * fb->pitches[0] +
- (intel_crtc->config->pipe_src_w - 1) * pixel_size;
+ (crtc_state->pipe_src_h - 1) * fb->pitches[0] +
+ (crtc_state->pipe_src_w - 1) * cpp;
}
}
@@ -2907,37 +2923,15 @@ static void ironlake_update_primary_plane(struct drm_crtc *crtc,
POSTING_READ(reg);
}
-u32 intel_fb_stride_alignment(struct drm_device *dev, uint64_t fb_modifier,
- uint32_t pixel_format)
+u32 intel_fb_stride_alignment(const struct drm_i915_private *dev_priv,
+ uint64_t fb_modifier, uint32_t pixel_format)
{
- u32 bits_per_pixel = drm_format_plane_cpp(pixel_format, 0) * 8;
-
- /*
- * The stride is either expressed as a multiple of 64 bytes
- * chunks for linear buffers or in number of tiles for tiled
- * buffers.
- */
- switch (fb_modifier) {
- case DRM_FORMAT_MOD_NONE:
- return 64;
- case I915_FORMAT_MOD_X_TILED:
- if (INTEL_INFO(dev)->gen == 2)
- return 128;
- return 512;
- case I915_FORMAT_MOD_Y_TILED:
- /* No need to check for old gens and Y tiling since this is
- * about the display engine and those will be blocked before
- * we get here.
- */
- return 128;
- case I915_FORMAT_MOD_Yf_TILED:
- if (bits_per_pixel == 8)
- return 64;
- else
- return 128;
- default:
- MISSING_CASE(fb_modifier);
+ if (fb_modifier == DRM_FORMAT_MOD_NONE) {
return 64;
+ } else {
+ int cpp = drm_format_plane_cpp(pixel_format, 0);
+
+ return intel_tile_width(dev_priv, fb_modifier, cpp);
}
}
@@ -2960,7 +2954,7 @@ u32 intel_plane_obj_offset(struct intel_plane *intel_plane,
offset = vma->node.start;
if (plane == 1) {
- offset += vma->ggtt_view.params.rotation_info.uv_start_page *
+ offset += vma->ggtt_view.params.rotated.uv_start_page *
PAGE_SIZE;
}
@@ -3077,36 +3071,30 @@ u32 skl_plane_ctl_rotation(unsigned int rotation)
return 0;
}
-static void skylake_update_primary_plane(struct drm_crtc *crtc,
- struct drm_framebuffer *fb,
- int x, int y)
+static void skylake_update_primary_plane(struct drm_plane *plane,
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
{
- struct drm_device *dev = crtc->dev;
+ struct drm_device *dev = plane->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct drm_plane *plane = crtc->primary;
- bool visible = to_intel_plane_state(plane->state)->visible;
- struct drm_i915_gem_object *obj;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_framebuffer *fb = plane_state->base.fb;
+ struct drm_i915_gem_object *obj = intel_fb_obj(fb);
int pipe = intel_crtc->pipe;
u32 plane_ctl, stride_div, stride;
u32 tile_height, plane_offset, plane_size;
- unsigned int rotation;
+ unsigned int rotation = plane_state->base.rotation;
int x_offset, y_offset;
u32 surf_addr;
- struct intel_crtc_state *crtc_state = intel_crtc->config;
- struct intel_plane_state *plane_state;
- int src_x = 0, src_y = 0, src_w = 0, src_h = 0;
- int dst_x = 0, dst_y = 0, dst_w = 0, dst_h = 0;
- int scaler_id = -1;
-
- plane_state = to_intel_plane_state(plane->state);
-
- if (!visible || !fb) {
- I915_WRITE(PLANE_CTL(pipe, 0), 0);
- I915_WRITE(PLANE_SURF(pipe, 0), 0);
- POSTING_READ(PLANE_CTL(pipe, 0));
- return;
- }
+ int scaler_id = plane_state->scaler_id;
+ int src_x = plane_state->src.x1 >> 16;
+ int src_y = plane_state->src.y1 >> 16;
+ int src_w = drm_rect_width(&plane_state->src) >> 16;
+ int src_h = drm_rect_height(&plane_state->src) >> 16;
+ int dst_x = plane_state->dst.x1;
+ int dst_y = plane_state->dst.y1;
+ int dst_w = drm_rect_width(&plane_state->dst);
+ int dst_h = drm_rect_height(&plane_state->dst);
plane_ctl = PLANE_CTL_ENABLE |
PLANE_CTL_PIPE_GAMMA_ENABLE |
@@ -3115,41 +3103,27 @@ static void skylake_update_primary_plane(struct drm_crtc *crtc,
plane_ctl |= skl_plane_ctl_format(fb->pixel_format);
plane_ctl |= skl_plane_ctl_tiling(fb->modifier[0]);
plane_ctl |= PLANE_CTL_PLANE_GAMMA_DISABLE;
-
- rotation = plane->state->rotation;
plane_ctl |= skl_plane_ctl_rotation(rotation);
- obj = intel_fb_obj(fb);
- stride_div = intel_fb_stride_alignment(dev, fb->modifier[0],
+ stride_div = intel_fb_stride_alignment(dev_priv, fb->modifier[0],
fb->pixel_format);
surf_addr = intel_plane_obj_offset(to_intel_plane(plane), obj, 0);
WARN_ON(drm_rect_width(&plane_state->src) == 0);
- scaler_id = plane_state->scaler_id;
- src_x = plane_state->src.x1 >> 16;
- src_y = plane_state->src.y1 >> 16;
- src_w = drm_rect_width(&plane_state->src) >> 16;
- src_h = drm_rect_height(&plane_state->src) >> 16;
- dst_x = plane_state->dst.x1;
- dst_y = plane_state->dst.y1;
- dst_w = drm_rect_width(&plane_state->dst);
- dst_h = drm_rect_height(&plane_state->dst);
-
- WARN_ON(x != src_x || y != src_y);
-
if (intel_rotation_90_or_270(rotation)) {
+ int cpp = drm_format_plane_cpp(fb->pixel_format, 0);
+
/* stride = Surface height in tiles */
- tile_height = intel_tile_height(dev, fb->pixel_format,
- fb->modifier[0], 0);
+ tile_height = intel_tile_height(dev_priv, fb->modifier[0], cpp);
stride = DIV_ROUND_UP(fb->height, tile_height);
- x_offset = stride * tile_height - y - src_h;
- y_offset = x;
+ x_offset = stride * tile_height - src_y - src_h;
+ y_offset = src_x;
plane_size = (src_w - 1) << 16 | (src_h - 1);
} else {
stride = fb->pitches[0] / stride_div;
- x_offset = x;
- y_offset = y;
+ x_offset = src_x;
+ y_offset = src_y;
plane_size = (src_h - 1) << 16 | (src_w - 1);
}
plane_offset = y_offset << 16 | x_offset;
@@ -3182,20 +3156,27 @@ static void skylake_update_primary_plane(struct drm_crtc *crtc,
POSTING_READ(PLANE_SURF(pipe, 0));
}
-/* Assume fb object is pinned & idle & fenced and just update base pointers */
-static int
-intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
- int x, int y, enum mode_set_atomic state)
+static void skylake_disable_primary_plane(struct drm_plane *primary,
+ struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ int pipe = to_intel_crtc(crtc)->pipe;
- if (dev_priv->fbc.deactivate)
- dev_priv->fbc.deactivate(dev_priv);
+ I915_WRITE(PLANE_CTL(pipe, 0), 0);
+ I915_WRITE(PLANE_SURF(pipe, 0), 0);
+ POSTING_READ(PLANE_SURF(pipe, 0));
+}
- dev_priv->display.update_primary_plane(crtc, fb, x, y);
+/* Assume fb object is pinned & idle & fenced and just update base pointers */
+static int
+intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
+ int x, int y, enum mode_set_atomic state)
+{
+ /* Support for kgdboc is disabled, this needs a major rework. */
+ DRM_ERROR("legacy panic handler not supported any more.\n");
- return 0;
+ return -ENODEV;
}
static void intel_complete_page_flips(struct drm_device *dev)
@@ -3222,8 +3203,10 @@ static void intel_update_primary_planes(struct drm_device *dev)
drm_modeset_lock_crtc(crtc, &plane->base);
plane_state = to_intel_plane_state(plane->base.state);
- if (crtc->state->active && plane_state->base.fb)
- plane->commit_plane(&plane->base, plane_state);
+ if (plane_state->visible)
+ plane->update_plane(&plane->base,
+ to_intel_crtc_state(crtc->state),
+ plane_state);
drm_modeset_unlock_crtc(crtc);
}
@@ -4455,7 +4438,7 @@ int skl_update_scaler_crtc(struct intel_crtc_state *state)
intel_crtc->base.base.id, intel_crtc->pipe, SKL_CRTC_INDEX);
return skl_update_scaler(state, !state->base.active, SKL_CRTC_INDEX,
- &state->scaler_state.scaler_id, DRM_ROTATE_0,
+ &state->scaler_state.scaler_id, BIT(DRM_ROTATE_0),
state->pipe_src_w, state->pipe_src_h,
adjusted_mode->crtc_hdisplay, adjusted_mode->crtc_vdisplay);
}
@@ -4809,9 +4792,6 @@ static void intel_post_plane_update(struct intel_crtc *crtc)
to_intel_crtc_state(crtc->base.state);
struct drm_device *dev = crtc->base.dev;
- if (atomic->wait_vblank)
- intel_wait_for_vblank(dev, crtc->pipe);
-
intel_frontbuffer_flip(dev, atomic->fb_bits);
crtc->wm.cxsr_allowed = true;
@@ -4820,7 +4800,7 @@ static void intel_post_plane_update(struct intel_crtc *crtc)
intel_update_watermarks(&crtc->base);
if (atomic->update_fbc)
- intel_fbc_update(crtc);
+ intel_fbc_post_update(crtc);
if (atomic->post_enable_primary)
intel_post_enable_primary(&crtc->base);
@@ -4828,26 +4808,39 @@ static void intel_post_plane_update(struct intel_crtc *crtc)
memset(atomic, 0, sizeof(*atomic));
}
-static void intel_pre_plane_update(struct intel_crtc *crtc)
+static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state)
{
+ struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc_atomic_commit *atomic = &crtc->atomic;
struct intel_crtc_state *pipe_config =
to_intel_crtc_state(crtc->base.state);
+ struct drm_atomic_state *old_state = old_crtc_state->base.state;
+ struct drm_plane *primary = crtc->base.primary;
+ struct drm_plane_state *old_pri_state =
+ drm_atomic_get_existing_plane_state(old_state, primary);
+ bool modeset = needs_modeset(&pipe_config->base);
- if (atomic->disable_fbc)
- intel_fbc_deactivate(crtc);
+ if (atomic->update_fbc)
+ intel_fbc_pre_update(crtc);
- if (crtc->atomic.disable_ips)
- hsw_disable_ips(crtc);
+ if (old_pri_state) {
+ struct intel_plane_state *primary_state =
+ to_intel_plane_state(primary->state);
+ struct intel_plane_state *old_primary_state =
+ to_intel_plane_state(old_pri_state);
- if (atomic->pre_disable_primary)
- intel_pre_disable_primary(&crtc->base);
+ if (old_primary_state->visible &&
+ (modeset || !primary_state->visible))
+ intel_pre_disable_primary(&crtc->base);
+ }
if (pipe_config->disable_cxsr) {
crtc->wm.cxsr_allowed = false;
- intel_set_memory_cxsr(dev_priv, false);
+
+ if (old_crtc_state->base.active)
+ intel_set_memory_cxsr(dev_priv, false);
}
if (!needs_modeset(&pipe_config->base) && pipe_config->wm_changed)
@@ -4948,8 +4941,6 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
if (intel_crtc->config->has_pch_encoder)
intel_wait_for_vblank(dev, pipe);
intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
-
- intel_fbc_enable(intel_crtc);
}
/* IPS only exists on ULT machines and is tied to pipe A. */
@@ -5062,8 +5053,6 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
intel_wait_for_vblank(dev, hsw_workaround_pipe);
intel_wait_for_vblank(dev, hsw_workaround_pipe);
}
-
- intel_fbc_enable(intel_crtc);
}
static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force)
@@ -5144,8 +5133,6 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
}
intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
-
- intel_fbc_disable_crtc(intel_crtc);
}
static void haswell_crtc_disable(struct drm_crtc *crtc)
@@ -5196,8 +5183,6 @@ static void haswell_crtc_disable(struct drm_crtc *crtc)
intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
true);
}
-
- intel_fbc_disable_crtc(intel_crtc);
}
static void i9xx_pfit_enable(struct intel_crtc *crtc)
@@ -5320,31 +5305,37 @@ intel_display_port_aux_power_domain(struct intel_encoder *intel_encoder)
}
}
-static unsigned long get_crtc_power_domains(struct drm_crtc *crtc)
+static unsigned long get_crtc_power_domains(struct drm_crtc *crtc,
+ struct intel_crtc_state *crtc_state)
{
struct drm_device *dev = crtc->dev;
- struct intel_encoder *intel_encoder;
+ struct drm_encoder *encoder;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
enum pipe pipe = intel_crtc->pipe;
unsigned long mask;
- enum transcoder transcoder = intel_crtc->config->cpu_transcoder;
+ enum transcoder transcoder = crtc_state->cpu_transcoder;
- if (!crtc->state->active)
+ if (!crtc_state->base.active)
return 0;
mask = BIT(POWER_DOMAIN_PIPE(pipe));
mask |= BIT(POWER_DOMAIN_TRANSCODER(transcoder));
- if (intel_crtc->config->pch_pfit.enabled ||
- intel_crtc->config->pch_pfit.force_thru)
+ if (crtc_state->pch_pfit.enabled ||
+ crtc_state->pch_pfit.force_thru)
mask |= BIT(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe));
- for_each_encoder_on_crtc(dev, crtc, intel_encoder)
+ drm_for_each_encoder_mask(encoder, dev, crtc_state->base.encoder_mask) {
+ struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
+
mask |= BIT(intel_display_port_power_domain(intel_encoder));
+ }
return mask;
}
-static unsigned long modeset_get_crtc_power_domains(struct drm_crtc *crtc)
+static unsigned long
+modeset_get_crtc_power_domains(struct drm_crtc *crtc,
+ struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = crtc->dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
@@ -5352,7 +5343,8 @@ static unsigned long modeset_get_crtc_power_domains(struct drm_crtc *crtc)
unsigned long domains, new_domains, old_domains;
old_domains = intel_crtc->enabled_power_domains;
- intel_crtc->enabled_power_domains = new_domains = get_crtc_power_domains(crtc);
+ intel_crtc->enabled_power_domains = new_domains =
+ get_crtc_power_domains(crtc, crtc_state);
domains = new_domains & ~old_domains;
@@ -5371,34 +5363,6 @@ static void modeset_put_power_domains(struct drm_i915_private *dev_priv,
intel_display_power_put(dev_priv, domain);
}
-static void modeset_update_crtc_power_domains(struct drm_atomic_state *state)
-{
- struct drm_device *dev = state->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- unsigned long put_domains[I915_MAX_PIPES] = {};
- struct drm_crtc_state *crtc_state;
- struct drm_crtc *crtc;
- int i;
-
- for_each_crtc_in_state(state, crtc, crtc_state, i) {
- if (needs_modeset(crtc->state))
- put_domains[to_intel_crtc(crtc)->pipe] =
- modeset_get_crtc_power_domains(crtc);
- }
-
- if (dev_priv->display.modeset_commit_cdclk) {
- unsigned int cdclk = to_intel_atomic_state(state)->cdclk;
-
- if (cdclk != dev_priv->cdclk_freq &&
- !WARN_ON(!state->allow_modeset))
- dev_priv->display.modeset_commit_cdclk(state);
- }
-
- for (i = 0; i < I915_MAX_PIPES; i++)
- if (put_domains[i])
- modeset_put_power_domains(dev_priv, put_domains[i]);
-}
-
static int intel_compute_max_dotclk(struct drm_i915_private *dev_priv)
{
int max_cdclk_freq = dev_priv->max_cdclk_freq;
@@ -6061,27 +6025,32 @@ static int broxton_calc_cdclk(struct drm_i915_private *dev_priv,
return 144000;
}
-/* Compute the max pixel clock for new configuration. Uses atomic state if
- * that's non-NULL, look at current state otherwise. */
+/* Compute the max pixel clock for new configuration. */
static int intel_mode_max_pixclk(struct drm_device *dev,
struct drm_atomic_state *state)
{
- struct intel_crtc *intel_crtc;
- struct intel_crtc_state *crtc_state;
- int max_pixclk = 0;
+ struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *crtc_state;
+ unsigned max_pixclk = 0, i;
+ enum pipe pipe;
- for_each_intel_crtc(dev, intel_crtc) {
- crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
- if (IS_ERR(crtc_state))
- return PTR_ERR(crtc_state);
+ memcpy(intel_state->min_pixclk, dev_priv->min_pixclk,
+ sizeof(intel_state->min_pixclk));
- if (!crtc_state->base.enable)
- continue;
+ for_each_crtc_in_state(state, crtc, crtc_state, i) {
+ int pixclk = 0;
+
+ if (crtc_state->enable)
+ pixclk = crtc_state->adjusted_mode.crtc_clock;
- max_pixclk = max(max_pixclk,
- crtc_state->base.adjusted_mode.crtc_clock);
+ intel_state->min_pixclk[i] = pixclk;
}
+ for_each_pipe(dev_priv, pipe)
+ max_pixclk = max(intel_state->min_pixclk[pipe], max_pixclk);
+
return max_pixclk;
}
@@ -6090,13 +6059,18 @@ static int valleyview_modeset_calc_cdclk(struct drm_atomic_state *state)
struct drm_device *dev = state->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int max_pixclk = intel_mode_max_pixclk(dev, state);
+ struct intel_atomic_state *intel_state =
+ to_intel_atomic_state(state);
if (max_pixclk < 0)
return max_pixclk;
- to_intel_atomic_state(state)->cdclk =
+ intel_state->cdclk = intel_state->dev_cdclk =
valleyview_calc_cdclk(dev_priv, max_pixclk);
+ if (!intel_state->active_crtcs)
+ intel_state->dev_cdclk = valleyview_calc_cdclk(dev_priv, 0);
+
return 0;
}
@@ -6105,13 +6079,18 @@ static int broxton_modeset_calc_cdclk(struct drm_atomic_state *state)
struct drm_device *dev = state->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int max_pixclk = intel_mode_max_pixclk(dev, state);
+ struct intel_atomic_state *intel_state =
+ to_intel_atomic_state(state);
if (max_pixclk < 0)
return max_pixclk;
- to_intel_atomic_state(state)->cdclk =
+ intel_state->cdclk = intel_state->dev_cdclk =
broxton_calc_cdclk(dev_priv, max_pixclk);
+ if (!intel_state->active_crtcs)
+ intel_state->dev_cdclk = broxton_calc_cdclk(dev_priv, 0);
+
return 0;
}
@@ -6154,8 +6133,10 @@ static void vlv_program_pfi_credits(struct drm_i915_private *dev_priv)
static void valleyview_modeset_commit_cdclk(struct drm_atomic_state *old_state)
{
struct drm_device *dev = old_state->dev;
- unsigned int req_cdclk = to_intel_atomic_state(old_state)->cdclk;
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_atomic_state *old_intel_state =
+ to_intel_atomic_state(old_state);
+ unsigned req_cdclk = old_intel_state->dev_cdclk;
/*
* FIXME: We can end up here with all power domains off, yet
@@ -6290,8 +6271,6 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc)
for_each_encoder_on_crtc(dev, crtc, encoder)
encoder->enable(encoder);
-
- intel_fbc_enable(intel_crtc);
}
static void i9xx_pfit_disable(struct intel_crtc *crtc)
@@ -6354,8 +6333,6 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
if (!IS_GEN2(dev))
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
-
- intel_fbc_disable_crtc(intel_crtc);
}
static void intel_crtc_disable_noatomic(struct drm_crtc *crtc)
@@ -6379,6 +6356,7 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc)
dev_priv->display.crtc_disable(crtc);
intel_crtc->active = false;
+ intel_fbc_disable(intel_crtc);
intel_update_watermarks(crtc);
intel_disable_shared_dpll(intel_crtc);
@@ -6386,6 +6364,9 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc)
for_each_power_domain(domain, domains)
intel_display_power_put(dev_priv, domain);
intel_crtc->enabled_power_domains = 0;
+
+ dev_priv->active_crtcs &= ~(1 << intel_crtc->pipe);
+ dev_priv->min_pixclk[intel_crtc->pipe] = 0;
}
/*
@@ -6394,55 +6375,16 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc)
*/
int intel_display_suspend(struct drm_device *dev)
{
- struct drm_mode_config *config = &dev->mode_config;
- struct drm_modeset_acquire_ctx *ctx = config->acquire_ctx;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_atomic_state *state;
- struct drm_crtc *crtc;
- unsigned crtc_mask = 0;
- int ret = 0;
-
- if (WARN_ON(!ctx))
- return 0;
-
- lockdep_assert_held(&ctx->ww_ctx);
- state = drm_atomic_state_alloc(dev);
- if (WARN_ON(!state))
- return -ENOMEM;
-
- state->acquire_ctx = ctx;
- state->allow_modeset = true;
-
- for_each_crtc(dev, crtc) {
- struct drm_crtc_state *crtc_state =
- drm_atomic_get_crtc_state(state, crtc);
-
- ret = PTR_ERR_OR_ZERO(crtc_state);
- if (ret)
- goto free;
-
- if (!crtc_state->active)
- continue;
-
- crtc_state->active = false;
- crtc_mask |= 1 << drm_crtc_index(crtc);
- }
-
- if (crtc_mask) {
- ret = drm_atomic_commit(state);
-
- if (!ret) {
- for_each_crtc(dev, crtc)
- if (crtc_mask & (1 << drm_crtc_index(crtc)))
- crtc->state->active = true;
-
- return ret;
- }
- }
+ int ret;
-free:
+ state = drm_atomic_helper_suspend(dev);
+ ret = PTR_ERR_OR_ZERO(state);
if (ret)
DRM_ERROR("Suspending crtc's failed with %i\n", ret);
- drm_atomic_state_free(state);
+ else
+ dev_priv->modeset_restore_state = state;
return ret;
}
@@ -7596,26 +7538,34 @@ static void chv_prepare_pll(struct intel_crtc *crtc,
* in cases where we need the PLL enabled even when @pipe is not going to
* be enabled.
*/
-void vlv_force_pll_on(struct drm_device *dev, enum pipe pipe,
- const struct dpll *dpll)
+int vlv_force_pll_on(struct drm_device *dev, enum pipe pipe,
+ const struct dpll *dpll)
{
struct intel_crtc *crtc =
to_intel_crtc(intel_get_crtc_for_pipe(dev, pipe));
- struct intel_crtc_state pipe_config = {
- .base.crtc = &crtc->base,
- .pixel_multiplier = 1,
- .dpll = *dpll,
- };
+ struct intel_crtc_state *pipe_config;
+
+ pipe_config = kzalloc(sizeof(*pipe_config), GFP_KERNEL);
+ if (!pipe_config)
+ return -ENOMEM;
+
+ pipe_config->base.crtc = &crtc->base;
+ pipe_config->pixel_multiplier = 1;
+ pipe_config->dpll = *dpll;
if (IS_CHERRYVIEW(dev)) {
- chv_compute_dpll(crtc, &pipe_config);
- chv_prepare_pll(crtc, &pipe_config);
- chv_enable_pll(crtc, &pipe_config);
+ chv_compute_dpll(crtc, pipe_config);
+ chv_prepare_pll(crtc, pipe_config);
+ chv_enable_pll(crtc, pipe_config);
} else {
- vlv_compute_dpll(crtc, &pipe_config);
- vlv_prepare_pll(crtc, &pipe_config);
- vlv_enable_pll(crtc, &pipe_config);
+ vlv_compute_dpll(crtc, pipe_config);
+ vlv_prepare_pll(crtc, pipe_config);
+ vlv_enable_pll(crtc, pipe_config);
}
+
+ kfree(pipe_config);
+
+ return 0;
}
/**
@@ -8038,9 +7988,6 @@ static void i9xx_get_pfit_config(struct intel_crtc *crtc,
pipe_config->gmch_pfit.control = tmp;
pipe_config->gmch_pfit.pgm_ratios = I915_READ(PFIT_PGM_RATIOS);
- if (INTEL_INFO(dev)->gen < 5)
- pipe_config->gmch_pfit.lvds_border_bits =
- I915_READ(LVDS) & LVDS_BORDER_ENABLE;
}
static void vlv_crtc_clock_get(struct intel_crtc *crtc,
@@ -9258,7 +9205,7 @@ skylake_get_initial_plane_config(struct intel_crtc *crtc,
fb->width = ((val >> 0) & 0x1fff) + 1;
val = I915_READ(PLANE_STRIDE(pipe, 0));
- stride_mult = intel_fb_stride_alignment(dev, fb->modifier[0],
+ stride_mult = intel_fb_stride_alignment(dev_priv, fb->modifier[0],
fb->pixel_format);
fb->pitches[0] = (val & 0x3ff) * stride_mult;
@@ -9682,14 +9629,14 @@ void hsw_disable_pc8(struct drm_i915_private *dev_priv)
val |= PCH_LP_PARTITION_LEVEL_DISABLE;
I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
}
-
- intel_prepare_ddi(dev);
}
static void broxton_modeset_commit_cdclk(struct drm_atomic_state *old_state)
{
struct drm_device *dev = old_state->dev;
- unsigned int req_cdclk = to_intel_atomic_state(old_state)->cdclk;
+ struct intel_atomic_state *old_intel_state =
+ to_intel_atomic_state(old_state);
+ unsigned int req_cdclk = old_intel_state->dev_cdclk;
broxton_set_cdclk(dev, req_cdclk);
}
@@ -9697,29 +9644,38 @@ static void broxton_modeset_commit_cdclk(struct drm_atomic_state *old_state)
/* compute the max rate for new configuration */
static int ilk_max_pixel_rate(struct drm_atomic_state *state)
{
- struct intel_crtc *intel_crtc;
+ struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
+ struct drm_i915_private *dev_priv = state->dev->dev_private;
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *cstate;
struct intel_crtc_state *crtc_state;
- int max_pixel_rate = 0;
+ unsigned max_pixel_rate = 0, i;
+ enum pipe pipe;
- for_each_intel_crtc(state->dev, intel_crtc) {
- int pixel_rate;
+ memcpy(intel_state->min_pixclk, dev_priv->min_pixclk,
+ sizeof(intel_state->min_pixclk));
- crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
- if (IS_ERR(crtc_state))
- return PTR_ERR(crtc_state);
+ for_each_crtc_in_state(state, crtc, cstate, i) {
+ int pixel_rate;
- if (!crtc_state->base.enable)
+ crtc_state = to_intel_crtc_state(cstate);
+ if (!crtc_state->base.enable) {
+ intel_state->min_pixclk[i] = 0;
continue;
+ }
pixel_rate = ilk_pipe_pixel_rate(crtc_state);
/* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
- if (IS_BROADWELL(state->dev) && crtc_state->ips_enabled)
+ if (IS_BROADWELL(dev_priv) && crtc_state->ips_enabled)
pixel_rate = DIV_ROUND_UP(pixel_rate * 100, 95);
- max_pixel_rate = max(max_pixel_rate, pixel_rate);
+ intel_state->min_pixclk[i] = pixel_rate;
}
+ for_each_pipe(dev_priv, pipe)
+ max_pixel_rate = max(intel_state->min_pixclk[pipe], max_pixel_rate);
+
return max_pixel_rate;
}
@@ -9793,6 +9749,8 @@ static void broadwell_set_cdclk(struct drm_device *dev, int cdclk)
sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ, data);
mutex_unlock(&dev_priv->rps.hw_lock);
+ I915_WRITE(CDCLK_FREQ, DIV_ROUND_CLOSEST(cdclk, 1000) - 1);
+
intel_update_cdclk(dev);
WARN(cdclk != dev_priv->cdclk_freq,
@@ -9803,6 +9761,7 @@ static void broadwell_set_cdclk(struct drm_device *dev, int cdclk)
static int broadwell_modeset_calc_cdclk(struct drm_atomic_state *state)
{
struct drm_i915_private *dev_priv = to_i915(state->dev);
+ struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
int max_pixclk = ilk_max_pixel_rate(state);
int cdclk;
@@ -9825,7 +9784,9 @@ static int broadwell_modeset_calc_cdclk(struct drm_atomic_state *state)
return -EINVAL;
}
- to_intel_atomic_state(state)->cdclk = cdclk;
+ intel_state->cdclk = intel_state->dev_cdclk = cdclk;
+ if (!intel_state->active_crtcs)
+ intel_state->dev_cdclk = 337500;
return 0;
}
@@ -9833,7 +9794,9 @@ static int broadwell_modeset_calc_cdclk(struct drm_atomic_state *state)
static void broadwell_modeset_commit_cdclk(struct drm_atomic_state *old_state)
{
struct drm_device *dev = old_state->dev;
- unsigned int req_cdclk = to_intel_atomic_state(old_state)->cdclk;
+ struct intel_atomic_state *old_intel_state =
+ to_intel_atomic_state(old_state);
+ unsigned req_cdclk = old_intel_state->dev_cdclk;
broadwell_set_cdclk(dev, req_cdclk);
}
@@ -9841,8 +9804,13 @@ static void broadwell_modeset_commit_cdclk(struct drm_atomic_state *old_state)
static int haswell_crtc_compute_clock(struct intel_crtc *crtc,
struct intel_crtc_state *crtc_state)
{
- if (!intel_ddi_pll_select(crtc, crtc_state))
- return -EINVAL;
+ struct intel_encoder *intel_encoder =
+ intel_ddi_get_crtc_new_encoder(crtc_state);
+
+ if (intel_encoder->type != INTEL_OUTPUT_DSI) {
+ if (!intel_ddi_pll_select(crtc, crtc_state))
+ return -EINVAL;
+ }
crtc->lowfreq_avail = false;
@@ -10058,16 +10026,17 @@ out:
return ret;
}
-static void i845_update_cursor(struct drm_crtc *crtc, u32 base, bool on)
+static void i845_update_cursor(struct drm_crtc *crtc, u32 base,
+ const struct intel_plane_state *plane_state)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
uint32_t cntl = 0, size = 0;
- if (on) {
- unsigned int width = intel_crtc->base.cursor->state->crtc_w;
- unsigned int height = intel_crtc->base.cursor->state->crtc_h;
+ if (plane_state && plane_state->visible) {
+ unsigned int width = plane_state->base.crtc_w;
+ unsigned int height = plane_state->base.crtc_h;
unsigned int stride = roundup_pow_of_two(width) * 4;
switch (stride) {
@@ -10120,7 +10089,8 @@ static void i845_update_cursor(struct drm_crtc *crtc, u32 base, bool on)
}
}
-static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base, bool on)
+static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base,
+ const struct intel_plane_state *plane_state)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -10128,9 +10098,9 @@ static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base, bool on)
int pipe = intel_crtc->pipe;
uint32_t cntl = 0;
- if (on) {
+ if (plane_state && plane_state->visible) {
cntl = MCURSOR_GAMMA_ENABLE;
- switch (intel_crtc->base.cursor->state->crtc_w) {
+ switch (plane_state->base.crtc_w) {
case 64:
cntl |= CURSOR_MODE_64_ARGB_AX;
break;
@@ -10141,17 +10111,17 @@ static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base, bool on)
cntl |= CURSOR_MODE_256_ARGB_AX;
break;
default:
- MISSING_CASE(intel_crtc->base.cursor->state->crtc_w);
+ MISSING_CASE(plane_state->base.crtc_w);
return;
}
cntl |= pipe << 28; /* Connect to correct pipe */
if (HAS_DDI(dev))
cntl |= CURSOR_PIPE_CSC_ENABLE;
- }
- if (crtc->cursor->state->rotation == BIT(DRM_ROTATE_180))
- cntl |= CURSOR_ROTATE_180;
+ if (plane_state->base.rotation == BIT(DRM_ROTATE_180))
+ cntl |= CURSOR_ROTATE_180;
+ }
if (intel_crtc->cursor_cntl != cntl) {
I915_WRITE(CURCNTR(pipe), cntl);
@@ -10168,56 +10138,45 @@ static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base, bool on)
/* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */
static void intel_crtc_update_cursor(struct drm_crtc *crtc,
- bool on)
+ const struct intel_plane_state *plane_state)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
- struct drm_plane_state *cursor_state = crtc->cursor->state;
- int x = cursor_state->crtc_x;
- int y = cursor_state->crtc_y;
- u32 base = 0, pos = 0;
-
- base = intel_crtc->cursor_addr;
+ u32 base = intel_crtc->cursor_addr;
+ u32 pos = 0;
- if (x >= intel_crtc->config->pipe_src_w)
- on = false;
+ if (plane_state) {
+ int x = plane_state->base.crtc_x;
+ int y = plane_state->base.crtc_y;
- if (y >= intel_crtc->config->pipe_src_h)
- on = false;
-
- if (x < 0) {
- if (x + cursor_state->crtc_w <= 0)
- on = false;
-
- pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
- x = -x;
- }
- pos |= x << CURSOR_X_SHIFT;
+ if (x < 0) {
+ pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
+ x = -x;
+ }
+ pos |= x << CURSOR_X_SHIFT;
- if (y < 0) {
- if (y + cursor_state->crtc_h <= 0)
- on = false;
+ if (y < 0) {
+ pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT;
+ y = -y;
+ }
+ pos |= y << CURSOR_Y_SHIFT;
- pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT;
- y = -y;
+ /* ILK+ do this automagically */
+ if (HAS_GMCH_DISPLAY(dev) &&
+ plane_state->base.rotation == BIT(DRM_ROTATE_180)) {
+ base += (plane_state->base.crtc_h *
+ plane_state->base.crtc_w - 1) * 4;
+ }
}
- pos |= y << CURSOR_Y_SHIFT;
I915_WRITE(CURPOS(pipe), pos);
- /* ILK+ do this automagically */
- if (HAS_GMCH_DISPLAY(dev) &&
- crtc->cursor->state->rotation == BIT(DRM_ROTATE_180)) {
- base += (cursor_state->crtc_h *
- cursor_state->crtc_w - 1) * 4;
- }
-
if (IS_845G(dev) || IS_I865G(dev))
- i845_update_cursor(crtc, base, on);
+ i845_update_cursor(crtc, base, plane_state);
else
- i9xx_update_cursor(crtc, base, on);
+ i9xx_update_cursor(crtc, base, plane_state);
}
static bool cursor_size_ok(struct drm_device *dev,
@@ -10385,6 +10344,7 @@ mode_fits_in_fbdev(struct drm_device *dev,
if (obj->base.size < mode->vdisplay * fb->pitches[0])
return NULL;
+ drm_framebuffer_reference(fb);
return fb;
#else
return NULL;
@@ -10440,7 +10400,7 @@ bool intel_get_load_detect_pipe(struct drm_connector *connector,
struct drm_device *dev = encoder->dev;
struct drm_framebuffer *fb;
struct drm_mode_config *config = &dev->mode_config;
- struct drm_atomic_state *state = NULL;
+ struct drm_atomic_state *state = NULL, *restore_state = NULL;
struct drm_connector_state *connector_state;
struct intel_crtc_state *crtc_state;
int ret, i = -1;
@@ -10449,6 +10409,8 @@ bool intel_get_load_detect_pipe(struct drm_connector *connector,
connector->base.id, connector->name,
encoder->base.id, encoder->name);
+ old->restore_state = NULL;
+
retry:
ret = drm_modeset_lock(&config->connection_mutex, ctx);
if (ret)
@@ -10465,24 +10427,15 @@ retry:
*/
/* See if we already have a CRTC for this connector */
- if (encoder->crtc) {
- crtc = encoder->crtc;
+ if (connector->state->crtc) {
+ crtc = connector->state->crtc;
ret = drm_modeset_lock(&crtc->mutex, ctx);
if (ret)
goto fail;
- ret = drm_modeset_lock(&crtc->primary->mutex, ctx);
- if (ret)
- goto fail;
-
- old->dpms_mode = connector->dpms;
- old->load_detect_temp = false;
/* Make sure the crtc and connector are running */
- if (connector->dpms != DRM_MODE_DPMS_ON)
- connector->funcs->dpms(connector, DRM_MODE_DPMS_ON);
-
- return true;
+ goto found;
}
/* Find an unused one (if possible) */
@@ -10490,8 +10443,15 @@ retry:
i++;
if (!(encoder->possible_crtcs & (1 << i)))
continue;
- if (possible_crtc->state->enable)
+
+ ret = drm_modeset_lock(&possible_crtc->mutex, ctx);
+ if (ret)
+ goto fail;
+
+ if (possible_crtc->state->enable) {
+ drm_modeset_unlock(&possible_crtc->mutex);
continue;
+ }
crtc = possible_crtc;
break;
@@ -10505,23 +10465,22 @@ retry:
goto fail;
}
- ret = drm_modeset_lock(&crtc->mutex, ctx);
- if (ret)
- goto fail;
+found:
+ intel_crtc = to_intel_crtc(crtc);
+
ret = drm_modeset_lock(&crtc->primary->mutex, ctx);
if (ret)
goto fail;
- intel_crtc = to_intel_crtc(crtc);
- old->dpms_mode = connector->dpms;
- old->load_detect_temp = true;
- old->release_fb = NULL;
-
state = drm_atomic_state_alloc(dev);
- if (!state)
- return false;
+ restore_state = drm_atomic_state_alloc(dev);
+ if (!state || !restore_state) {
+ ret = -ENOMEM;
+ goto fail;
+ }
state->acquire_ctx = ctx;
+ restore_state->acquire_ctx = ctx;
connector_state = drm_atomic_get_connector_state(state, connector);
if (IS_ERR(connector_state)) {
@@ -10529,8 +10488,9 @@ retry:
goto fail;
}
- connector_state->crtc = crtc;
- connector_state->best_encoder = &intel_encoder->base;
+ ret = drm_atomic_set_crtc_for_connector(connector_state, crtc);
+ if (ret)
+ goto fail;
crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
if (IS_ERR(crtc_state)) {
@@ -10554,7 +10514,6 @@ retry:
if (fb == NULL) {
DRM_DEBUG_KMS("creating tmp fb for load-detection\n");
fb = intel_framebuffer_create_for_mode(dev, mode, 24, 32);
- old->release_fb = fb;
} else
DRM_DEBUG_KMS("reusing fbdev for load-detection framebuffer\n");
if (IS_ERR(fb)) {
@@ -10566,15 +10525,29 @@ retry:
if (ret)
goto fail;
- drm_mode_copy(&crtc_state->base.mode, mode);
+ drm_framebuffer_unreference(fb);
+
+ ret = drm_atomic_set_mode_for_crtc(&crtc_state->base, mode);
+ if (ret)
+ goto fail;
+
+ ret = PTR_ERR_OR_ZERO(drm_atomic_get_connector_state(restore_state, connector));
+ if (!ret)
+ ret = PTR_ERR_OR_ZERO(drm_atomic_get_crtc_state(restore_state, crtc));
+ if (!ret)
+ ret = PTR_ERR_OR_ZERO(drm_atomic_get_plane_state(restore_state, crtc->primary));
+ if (ret) {
+ DRM_DEBUG_KMS("Failed to create a copy of old state to restore: %i\n", ret);
+ goto fail;
+ }
- if (drm_atomic_commit(state)) {
+ ret = drm_atomic_commit(state);
+ if (ret) {
DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n");
- if (old->release_fb)
- old->release_fb->funcs->destroy(old->release_fb);
goto fail;
}
- crtc->primary->crtc = crtc;
+
+ old->restore_state = restore_state;
/* let the connector get through one full cycle before testing */
intel_wait_for_vblank(dev, intel_crtc->pipe);
@@ -10582,7 +10555,8 @@ retry:
fail:
drm_atomic_state_free(state);
- state = NULL;
+ drm_atomic_state_free(restore_state);
+ restore_state = state = NULL;
if (ret == -EDEADLK) {
drm_modeset_backoff(ctx);
@@ -10596,66 +10570,24 @@ void intel_release_load_detect_pipe(struct drm_connector *connector,
struct intel_load_detect_pipe *old,
struct drm_modeset_acquire_ctx *ctx)
{
- struct drm_device *dev = connector->dev;
struct intel_encoder *intel_encoder =
intel_attached_encoder(connector);
struct drm_encoder *encoder = &intel_encoder->base;
- struct drm_crtc *crtc = encoder->crtc;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct drm_atomic_state *state;
- struct drm_connector_state *connector_state;
- struct intel_crtc_state *crtc_state;
+ struct drm_atomic_state *state = old->restore_state;
int ret;
DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
connector->base.id, connector->name,
encoder->base.id, encoder->name);
- if (old->load_detect_temp) {
- state = drm_atomic_state_alloc(dev);
- if (!state)
- goto fail;
-
- state->acquire_ctx = ctx;
-
- connector_state = drm_atomic_get_connector_state(state, connector);
- if (IS_ERR(connector_state))
- goto fail;
-
- crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
- if (IS_ERR(crtc_state))
- goto fail;
-
- connector_state->best_encoder = NULL;
- connector_state->crtc = NULL;
-
- crtc_state->base.enable = crtc_state->base.active = false;
-
- ret = intel_modeset_setup_plane_state(state, crtc, NULL, NULL,
- 0, 0);
- if (ret)
- goto fail;
-
- ret = drm_atomic_commit(state);
- if (ret)
- goto fail;
-
- if (old->release_fb) {
- drm_framebuffer_unregister_private(old->release_fb);
- drm_framebuffer_unreference(old->release_fb);
- }
-
+ if (!state)
return;
- }
-
- /* Switch crtc and encoder back off if necessary */
- if (old->dpms_mode != DRM_MODE_DPMS_ON)
- connector->funcs->dpms(connector, old->dpms_mode);
- return;
-fail:
- DRM_DEBUG_KMS("Couldn't release load detect pipe.\n");
- drm_atomic_state_free(state);
+ ret = drm_atomic_commit(state);
+ if (ret) {
+ DRM_DEBUG_KMS("Couldn't release load detect pipe: %i\n", ret);
+ drm_atomic_state_free(state);
+ }
}
static int i9xx_pll_refclk(struct drm_device *dev,
@@ -10810,7 +10742,7 @@ struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
struct drm_display_mode *mode;
- struct intel_crtc_state pipe_config;
+ struct intel_crtc_state *pipe_config;
int htot = I915_READ(HTOTAL(cpu_transcoder));
int hsync = I915_READ(HSYNC(cpu_transcoder));
int vtot = I915_READ(VTOTAL(cpu_transcoder));
@@ -10821,6 +10753,12 @@ struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
if (!mode)
return NULL;
+ pipe_config = kzalloc(sizeof(*pipe_config), GFP_KERNEL);
+ if (!pipe_config) {
+ kfree(mode);
+ return NULL;
+ }
+
/*
* Construct a pipe_config sufficient for getting the clock info
* back out of crtc_clock_get.
@@ -10828,14 +10766,14 @@ struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
* Note, if LVDS ever uses a non-1 pixel multiplier, we'll need
* to use a real value here instead.
*/
- pipe_config.cpu_transcoder = (enum transcoder) pipe;
- pipe_config.pixel_multiplier = 1;
- pipe_config.dpll_hw_state.dpll = I915_READ(DPLL(pipe));
- pipe_config.dpll_hw_state.fp0 = I915_READ(FP0(pipe));
- pipe_config.dpll_hw_state.fp1 = I915_READ(FP1(pipe));
- i9xx_crtc_clock_get(intel_crtc, &pipe_config);
-
- mode->clock = pipe_config.port_clock / pipe_config.pixel_multiplier;
+ pipe_config->cpu_transcoder = (enum transcoder) pipe;
+ pipe_config->pixel_multiplier = 1;
+ pipe_config->dpll_hw_state.dpll = I915_READ(DPLL(pipe));
+ pipe_config->dpll_hw_state.fp0 = I915_READ(FP0(pipe));
+ pipe_config->dpll_hw_state.fp1 = I915_READ(FP1(pipe));
+ i9xx_crtc_clock_get(intel_crtc, pipe_config);
+
+ mode->clock = pipe_config->port_clock / pipe_config->pixel_multiplier;
mode->hdisplay = (htot & 0xffff) + 1;
mode->htotal = ((htot & 0xffff0000) >> 16) + 1;
mode->hsync_start = (hsync & 0xffff) + 1;
@@ -10847,6 +10785,8 @@ struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
drm_mode_set_name(mode);
+ kfree(pipe_config);
+
return mode;
}
@@ -10917,6 +10857,7 @@ static void intel_unpin_work_fn(struct work_struct *__work)
mutex_unlock(&dev->struct_mutex);
intel_frontbuffer_flip_complete(dev, to_intel_plane(primary)->frontbuffer_bit);
+ intel_fbc_post_update(crtc);
drm_framebuffer_unreference(work->old_fb);
BUG_ON(atomic_read(&crtc->unpin_work_count) == 0);
@@ -10998,6 +10939,12 @@ static bool page_flip_finished(struct intel_crtc *crtc)
return true;
/*
+ * BDW signals flip done immediately if the plane
+ * is disabled, even if the plane enable is already
+ * armed to occur at the next vblank :(
+ */
+
+ /*
* A DSPSURFLIVE check isn't enough in case the mmio and CS flips
* used the same base address. In that case the mmio flip might
* have completed, but the CS hasn't even executed the flip yet.
@@ -11351,13 +11298,12 @@ static void skl_do_mmio_flip(struct intel_crtc *intel_crtc,
*/
if (intel_rotation_90_or_270(rotation)) {
/* stride = Surface height in tiles */
- tile_height = intel_tile_height(dev, fb->pixel_format,
- fb->modifier[0], 0);
+ tile_height = intel_tile_height(dev_priv, fb->modifier[0], 0);
stride = DIV_ROUND_UP(fb->height, tile_height);
} else {
stride = fb->pitches[0] /
- intel_fb_stride_alignment(dev, fb->modifier[0],
- fb->pixel_format);
+ intel_fb_stride_alignment(dev_priv, fb->modifier[0],
+ fb->pixel_format);
}
/*
@@ -11633,6 +11579,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
crtc->primary->fb = fb;
update_state_fb(crtc->primary);
+ intel_fbc_pre_update(intel_crtc);
work->pending_flip_obj = obj;
@@ -11692,9 +11639,11 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
obj->last_write_req);
} else {
if (!request) {
- ret = i915_gem_request_alloc(ring, ring->default_context, &request);
- if (ret)
+ request = i915_gem_request_alloc(ring, NULL);
+ if (IS_ERR(request)) {
+ ret = PTR_ERR(request);
goto cleanup_unpin;
+ }
}
ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, request,
@@ -11715,7 +11664,6 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
to_intel_plane(primary)->frontbuffer_bit);
mutex_unlock(&dev->struct_mutex);
- intel_fbc_deactivate(intel_crtc);
intel_frontbuffer_flip_prepare(dev,
to_intel_plane(primary)->frontbuffer_bit);
@@ -11726,7 +11674,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
cleanup_unpin:
intel_unpin_fb_obj(fb, crtc->primary->state);
cleanup_pending:
- if (request)
+ if (!IS_ERR_OR_NULL(request))
i915_gem_request_cancel(request);
atomic_dec(&intel_crtc->unpin_work_count);
mutex_unlock(&dev->struct_mutex);
@@ -11837,11 +11785,9 @@ int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state,
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct drm_plane *plane = plane_state->plane;
struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_plane_state *old_plane_state =
to_intel_plane_state(plane->state);
int idx = intel_crtc->base.base.id, ret;
- int i = drm_plane_index(plane);
bool mode_changed = needs_modeset(crtc_state);
bool was_crtc_enabled = crtc->state->active;
bool is_crtc_enabled = crtc_state->active;
@@ -11863,12 +11809,20 @@ int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state,
if (!was_crtc_enabled && WARN_ON(was_visible))
was_visible = false;
- if (!is_crtc_enabled && WARN_ON(visible))
- visible = false;
+ /*
+ * Visibility is calculated as if the crtc was on, but
+ * after scaler setup everything depends on it being off
+ * when the crtc isn't active.
+ */
+ if (!is_crtc_enabled)
+ to_intel_plane_state(plane_state)->visible = visible = false;
if (!was_visible && !visible)
return 0;
+ if (fb != old_plane_state->base.fb)
+ pipe_config->fb_changed = true;
+
turn_off = was_visible && (!visible || mode_changed);
turn_on = visible && (!was_visible || mode_changed);
@@ -11883,11 +11837,8 @@ int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state,
pipe_config->wm_changed = true;
/* must disable cxsr around plane enable/disable */
- if (plane->type != DRM_PLANE_TYPE_CURSOR) {
- if (is_crtc_enabled)
- intel_crtc->atomic.wait_vblank = true;
+ if (plane->type != DRM_PLANE_TYPE_CURSOR)
pipe_config->disable_cxsr = true;
- }
} else if (intel_wm_need_update(plane, plane_state)) {
pipe_config->wm_changed = true;
}
@@ -11898,49 +11849,9 @@ int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state,
switch (plane->type) {
case DRM_PLANE_TYPE_PRIMARY:
- intel_crtc->atomic.pre_disable_primary = turn_off;
intel_crtc->atomic.post_enable_primary = turn_on;
+ intel_crtc->atomic.update_fbc = true;
- if (turn_off) {
- /*
- * FIXME: Actually if we will still have any other
- * plane enabled on the pipe we could let IPS enabled
- * still, but for now lets consider that when we make
- * primary invisible by setting DSPCNTR to 0 on
- * update_primary_plane function IPS needs to be
- * disable.
- */
- intel_crtc->atomic.disable_ips = true;
-
- intel_crtc->atomic.disable_fbc = true;
- }
-
- /*
- * FBC does not work on some platforms for rotated
- * planes, so disable it when rotation is not 0 and
- * update it when rotation is set back to 0.
- *
- * FIXME: This is redundant with the fbc update done in
- * the primary plane enable function except that that
- * one is done too late. We eventually need to unify
- * this.
- */
-
- if (visible &&
- INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev) &&
- dev_priv->fbc.crtc == intel_crtc &&
- plane_state->rotation != BIT(DRM_ROTATE_0))
- intel_crtc->atomic.disable_fbc = true;
-
- /*
- * BDW signals flip done immediately if the plane
- * is disabled, even if the plane enable is already
- * armed to occur at the next vblank :(
- */
- if (turn_on && IS_BROADWELL(dev))
- intel_crtc->atomic.wait_vblank = true;
-
- intel_crtc->atomic.update_fbc |= visible || mode_changed;
break;
case DRM_PLANE_TYPE_CURSOR:
break;
@@ -11953,13 +11864,8 @@ int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state,
*/
if (IS_IVYBRIDGE(dev) &&
needs_scaling(to_intel_plane_state(plane_state)) &&
- !needs_scaling(old_plane_state)) {
- to_intel_crtc_state(crtc_state)->disable_lp_wm = true;
- } else if (turn_off && !mode_changed) {
- intel_crtc->atomic.wait_vblank = true;
- intel_crtc->atomic.update_sprite_watermarks |=
- 1 << i;
- }
+ !needs_scaling(old_plane_state))
+ pipe_config->disable_lp_wm = true;
break;
}
@@ -12561,19 +12467,22 @@ intel_compare_m_n(unsigned int m, unsigned int n,
BUILD_BUG_ON(DATA_LINK_M_N_MASK > INT_MAX);
- if (m > m2) {
- while (m > m2) {
+ if (n > n2) {
+ while (n > n2) {
m2 <<= 1;
n2 <<= 1;
}
- } else if (m < m2) {
- while (m < m2) {
+ } else if (n < n2) {
+ while (n < n2) {
m <<= 1;
n <<= 1;
}
}
- return m == m2 && n == n2;
+ if (n != n2)
+ return false;
+
+ return intel_fuzzy_clock_check(m, m2);
}
static bool
@@ -13124,8 +13033,6 @@ static void intel_modeset_clear_plls(struct drm_atomic_state *state)
struct drm_device *dev = state->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_shared_dpll_config *shared_dpll = NULL;
- struct intel_crtc *intel_crtc;
- struct intel_crtc_state *intel_crtc_state;
struct drm_crtc *crtc;
struct drm_crtc_state *crtc_state;
int i;
@@ -13134,21 +13041,21 @@ static void intel_modeset_clear_plls(struct drm_atomic_state *state)
return;
for_each_crtc_in_state(state, crtc, crtc_state, i) {
- int dpll;
-
- intel_crtc = to_intel_crtc(crtc);
- intel_crtc_state = to_intel_crtc_state(crtc_state);
- dpll = intel_crtc_state->shared_dpll;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int old_dpll = to_intel_crtc_state(crtc->state)->shared_dpll;
- if (!needs_modeset(crtc_state) || dpll == DPLL_ID_PRIVATE)
+ if (!needs_modeset(crtc_state))
continue;
- intel_crtc_state->shared_dpll = DPLL_ID_PRIVATE;
+ to_intel_crtc_state(crtc_state)->shared_dpll = DPLL_ID_PRIVATE;
+
+ if (old_dpll == DPLL_ID_PRIVATE)
+ continue;
if (!shared_dpll)
shared_dpll = intel_atomic_get_shared_dpll_state(state);
- shared_dpll[dpll].crtc_mask &= ~(1 << intel_crtc->pipe);
+ shared_dpll[old_dpll].crtc_mask &= ~(1 << intel_crtc->pipe);
}
}
@@ -13248,15 +13155,27 @@ static int intel_modeset_all_pipes(struct drm_atomic_state *state)
static int intel_modeset_checks(struct drm_atomic_state *state)
{
- struct drm_device *dev = state->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- int ret;
+ struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
+ struct drm_i915_private *dev_priv = state->dev->dev_private;
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *crtc_state;
+ int ret = 0, i;
if (!check_digital_port_conflicts(state)) {
DRM_DEBUG_KMS("rejecting conflicting digital port configuration\n");
return -EINVAL;
}
+ intel_state->modeset = true;
+ intel_state->active_crtcs = dev_priv->active_crtcs;
+
+ for_each_crtc_in_state(state, crtc, crtc_state, i) {
+ if (crtc_state->active)
+ intel_state->active_crtcs |= 1 << i;
+ else
+ intel_state->active_crtcs &= ~(1 << i);
+ }
+
/*
* See if the config requires any additional preparation, e.g.
* to adjust global state with pipes off. We need to do this
@@ -13265,22 +13184,22 @@ static int intel_modeset_checks(struct drm_atomic_state *state)
* adjusted_mode bits in the crtc directly.
*/
if (dev_priv->display.modeset_calc_cdclk) {
- unsigned int cdclk;
-
ret = dev_priv->display.modeset_calc_cdclk(state);
- cdclk = to_intel_atomic_state(state)->cdclk;
- if (!ret && cdclk != dev_priv->cdclk_freq)
+ if (!ret && intel_state->dev_cdclk != dev_priv->cdclk_freq)
ret = intel_modeset_all_pipes(state);
if (ret < 0)
return ret;
+
+ DRM_DEBUG_KMS("New cdclk calculated to be atomic %u, actual %u\n",
+ intel_state->cdclk, intel_state->dev_cdclk);
} else
- to_intel_atomic_state(state)->cdclk = dev_priv->cdclk_freq;
+ to_intel_atomic_state(state)->cdclk = dev_priv->atomic_cdclk_freq;
intel_modeset_clear_plls(state);
- if (IS_HASWELL(dev))
+ if (IS_HASWELL(dev_priv))
return haswell_mode_set_planes_workaround(state);
return 0;
@@ -13333,6 +13252,7 @@ static void calc_watermark_data(struct drm_atomic_state *state)
static int intel_atomic_check(struct drm_device *dev,
struct drm_atomic_state *state)
{
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
struct drm_crtc *crtc;
struct drm_crtc_state *crtc_state;
@@ -13375,7 +13295,7 @@ static int intel_atomic_check(struct drm_device *dev,
return ret;
if (i915.fastboot &&
- intel_pipe_config_compare(state->dev,
+ intel_pipe_config_compare(dev,
to_intel_crtc_state(crtc->state),
pipe_config, true)) {
crtc_state->mode_changed = false;
@@ -13401,12 +13321,13 @@ static int intel_atomic_check(struct drm_device *dev,
if (ret)
return ret;
} else
- intel_state->cdclk = to_i915(state->dev)->cdclk_freq;
+ intel_state->cdclk = dev_priv->cdclk_freq;
- ret = drm_atomic_helper_check_planes(state->dev, state);
+ ret = drm_atomic_helper_check_planes(dev, state);
if (ret)
return ret;
+ intel_fbc_choose_crtc(dev_priv, state);
calc_watermark_data(state);
return 0;
@@ -13429,6 +13350,9 @@ static int intel_atomic_prepare_commit(struct drm_device *dev,
}
for_each_crtc_in_state(state, crtc, crtc_state, i) {
+ if (state->legacy_cursor_update)
+ continue;
+
ret = intel_crtc_wait_for_pending_flips(crtc);
if (ret)
return ret;
@@ -13478,6 +13402,71 @@ static int intel_atomic_prepare_commit(struct drm_device *dev,
return ret;
}
+static void intel_atomic_wait_for_vblanks(struct drm_device *dev,
+ struct drm_i915_private *dev_priv,
+ unsigned crtc_mask)
+{
+ unsigned last_vblank_count[I915_MAX_PIPES];
+ enum pipe pipe;
+ int ret;
+
+ if (!crtc_mask)
+ return;
+
+ for_each_pipe(dev_priv, pipe) {
+ struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
+
+ if (!((1 << pipe) & crtc_mask))
+ continue;
+
+ ret = drm_crtc_vblank_get(crtc);
+ if (WARN_ON(ret != 0)) {
+ crtc_mask &= ~(1 << pipe);
+ continue;
+ }
+
+ last_vblank_count[pipe] = drm_crtc_vblank_count(crtc);
+ }
+
+ for_each_pipe(dev_priv, pipe) {
+ struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
+ long lret;
+
+ if (!((1 << pipe) & crtc_mask))
+ continue;
+
+ lret = wait_event_timeout(dev->vblank[pipe].queue,
+ last_vblank_count[pipe] !=
+ drm_crtc_vblank_count(crtc),
+ msecs_to_jiffies(50));
+
+ WARN_ON(!lret);
+
+ drm_crtc_vblank_put(crtc);
+ }
+}
+
+static bool needs_vblank_wait(struct intel_crtc_state *crtc_state)
+{
+ /* fb updated, need to unpin old fb */
+ if (crtc_state->fb_changed)
+ return true;
+
+ /* wm changes, need vblank before final wm's */
+ if (crtc_state->wm_changed)
+ return true;
+
+ /*
+ * cxsr is re-enabled after vblank.
+ * This is already handled by crtc_state->wm_changed,
+ * but added for clarity.
+ */
+ if (crtc_state->disable_cxsr)
+ return true;
+
+ return false;
+}
+
/**
* intel_atomic_commit - commit validated state object
* @dev: DRM device
@@ -13498,12 +13487,14 @@ static int intel_atomic_commit(struct drm_device *dev,
struct drm_atomic_state *state,
bool async)
{
+ struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc_state *crtc_state;
struct drm_crtc *crtc;
- int ret = 0;
- int i;
- bool any_ms = false;
+ int ret = 0, i;
+ bool hw_check = intel_state->modeset;
+ unsigned long put_domains[I915_MAX_PIPES] = {};
+ unsigned crtc_vblank_mask = 0;
ret = intel_atomic_prepare_commit(dev, state, async);
if (ret) {
@@ -13514,19 +13505,37 @@ static int intel_atomic_commit(struct drm_device *dev,
drm_atomic_helper_swap_state(dev, state);
dev_priv->wm.config = to_intel_atomic_state(state)->wm_config;
+ if (intel_state->modeset) {
+ memcpy(dev_priv->min_pixclk, intel_state->min_pixclk,
+ sizeof(intel_state->min_pixclk));
+ dev_priv->active_crtcs = intel_state->active_crtcs;
+ dev_priv->atomic_cdclk_freq = intel_state->cdclk;
+
+ intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET);
+ }
+
for_each_crtc_in_state(state, crtc, crtc_state, i) {
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ if (needs_modeset(crtc->state) ||
+ to_intel_crtc_state(crtc->state)->update_pipe) {
+ hw_check = true;
+
+ put_domains[to_intel_crtc(crtc)->pipe] =
+ modeset_get_crtc_power_domains(crtc,
+ to_intel_crtc_state(crtc->state));
+ }
+
if (!needs_modeset(crtc->state))
continue;
- any_ms = true;
- intel_pre_plane_update(intel_crtc);
+ intel_pre_plane_update(to_intel_crtc_state(crtc_state));
if (crtc_state->active) {
intel_crtc_disable_planes(crtc, crtc_state->plane_mask);
dev_priv->display.crtc_disable(crtc);
intel_crtc->active = false;
+ intel_fbc_disable(intel_crtc);
intel_disable_shared_dpll(intel_crtc);
/*
@@ -13545,65 +13554,80 @@ static int intel_atomic_commit(struct drm_device *dev,
* update the the output configuration. */
intel_modeset_update_crtc_state(state);
- if (any_ms) {
+ if (intel_state->modeset) {
intel_shared_dpll_commit(state);
drm_atomic_helper_update_legacy_modeset_state(state->dev, state);
- modeset_update_crtc_power_domains(state);
+
+ if (dev_priv->display.modeset_commit_cdclk &&
+ intel_state->dev_cdclk != dev_priv->cdclk_freq)
+ dev_priv->display.modeset_commit_cdclk(state);
}
/* Now enable the clocks, plane, pipe, and connectors that we set up. */
for_each_crtc_in_state(state, crtc, crtc_state, i) {
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
bool modeset = needs_modeset(crtc->state);
- bool update_pipe = !modeset &&
- to_intel_crtc_state(crtc->state)->update_pipe;
- unsigned long put_domains = 0;
-
- if (modeset)
- intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET);
+ struct intel_crtc_state *pipe_config =
+ to_intel_crtc_state(crtc->state);
+ bool update_pipe = !modeset && pipe_config->update_pipe;
if (modeset && crtc->state->active) {
update_scanline_offset(to_intel_crtc(crtc));
dev_priv->display.crtc_enable(crtc);
}
- if (update_pipe) {
- put_domains = modeset_get_crtc_power_domains(crtc);
-
- /* make sure intel_modeset_check_state runs */
- any_ms = true;
- }
-
if (!modeset)
- intel_pre_plane_update(intel_crtc);
+ intel_pre_plane_update(to_intel_crtc_state(crtc_state));
+
+ if (crtc->state->active && intel_crtc->atomic.update_fbc)
+ intel_fbc_enable(intel_crtc);
if (crtc->state->active &&
(crtc->state->planes_changed || update_pipe))
drm_atomic_helper_commit_planes_on_crtc(crtc_state);
- if (put_domains)
- modeset_put_power_domains(dev_priv, put_domains);
-
- intel_post_plane_update(intel_crtc);
-
- if (modeset)
- intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET);
+ if (pipe_config->base.active && needs_vblank_wait(pipe_config))
+ crtc_vblank_mask |= 1 << i;
}
/* FIXME: add subpixel order */
- drm_atomic_helper_wait_for_vblanks(dev, state);
+ if (!state->legacy_cursor_update)
+ intel_atomic_wait_for_vblanks(dev, dev_priv, crtc_vblank_mask);
+
+ for_each_crtc_in_state(state, crtc, crtc_state, i) {
+ intel_post_plane_update(to_intel_crtc(crtc));
+
+ if (put_domains[i])
+ modeset_put_power_domains(dev_priv, put_domains[i]);
+ }
+
+ if (intel_state->modeset)
+ intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET);
mutex_lock(&dev->struct_mutex);
drm_atomic_helper_cleanup_planes(dev, state);
mutex_unlock(&dev->struct_mutex);
- if (any_ms)
+ if (hw_check)
intel_modeset_check_state(dev, state);
drm_atomic_state_free(state);
+ /* As one of the primary mmio accessors, KMS has a high likelihood
+ * of triggering bugs in unclaimed access. After we finish
+ * modesetting, see if an error has been flagged, and if so
+ * enable debugging for the next modeset - and hope we catch
+ * the culprit.
+ *
+ * XXX note that we assume display power is on at this point.
+ * This might hold true now but we need to add pm helper to check
+ * unclaimed only when the hardware is on, as atomic commits
+ * can happen also when the device is completely off.
+ */
+ intel_uncore_arm_unclaimed_mmio_detection(dev_priv);
+
return 0;
}
@@ -13894,7 +13918,7 @@ skl_max_scale(struct intel_crtc *intel_crtc, struct intel_crtc_state *crtc_state
struct drm_i915_private *dev_priv;
int crtc_clock, cdclk;
- if (!intel_crtc || !crtc_state)
+ if (!intel_crtc || !crtc_state->base.enable)
return DRM_PLANE_HELPER_NO_SCALING;
dev = intel_crtc->base.dev;
@@ -13943,32 +13967,6 @@ intel_check_primary_plane(struct drm_plane *plane,
&state->visible);
}
-static void
-intel_commit_primary_plane(struct drm_plane *plane,
- struct intel_plane_state *state)
-{
- struct drm_crtc *crtc = state->base.crtc;
- struct drm_framebuffer *fb = state->base.fb;
- struct drm_device *dev = plane->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- crtc = crtc ? crtc : plane->crtc;
-
- dev_priv->display.update_primary_plane(crtc, fb,
- state->src.x1 >> 16,
- state->src.y1 >> 16);
-}
-
-static void
-intel_disable_primary_plane(struct drm_plane *plane,
- struct drm_crtc *crtc)
-{
- struct drm_device *dev = plane->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- dev_priv->display.update_primary_plane(crtc, NULL, 0, 0);
-}
-
static void intel_begin_crtc_commit(struct drm_crtc *crtc,
struct drm_crtc_state *old_crtc_state)
{
@@ -14053,20 +14051,33 @@ static struct drm_plane *intel_primary_plane_create(struct drm_device *dev,
primary->plane = pipe;
primary->frontbuffer_bit = INTEL_FRONTBUFFER_PRIMARY(pipe);
primary->check_plane = intel_check_primary_plane;
- primary->commit_plane = intel_commit_primary_plane;
- primary->disable_plane = intel_disable_primary_plane;
if (HAS_FBC(dev) && INTEL_INFO(dev)->gen < 4)
primary->plane = !pipe;
if (INTEL_INFO(dev)->gen >= 9) {
intel_primary_formats = skl_primary_formats;
num_formats = ARRAY_SIZE(skl_primary_formats);
+
+ primary->update_plane = skylake_update_primary_plane;
+ primary->disable_plane = skylake_disable_primary_plane;
+ } else if (HAS_PCH_SPLIT(dev)) {
+ intel_primary_formats = i965_primary_formats;
+ num_formats = ARRAY_SIZE(i965_primary_formats);
+
+ primary->update_plane = ironlake_update_primary_plane;
+ primary->disable_plane = i9xx_disable_primary_plane;
} else if (INTEL_INFO(dev)->gen >= 4) {
intel_primary_formats = i965_primary_formats;
num_formats = ARRAY_SIZE(i965_primary_formats);
+
+ primary->update_plane = i9xx_update_primary_plane;
+ primary->disable_plane = i9xx_disable_primary_plane;
} else {
intel_primary_formats = i8xx_primary_formats;
num_formats = ARRAY_SIZE(i8xx_primary_formats);
+
+ primary->update_plane = i9xx_update_primary_plane;
+ primary->disable_plane = i9xx_disable_primary_plane;
}
drm_universal_plane_init(dev, &primary->base, 0,
@@ -14165,22 +14176,23 @@ static void
intel_disable_cursor_plane(struct drm_plane *plane,
struct drm_crtc *crtc)
{
- intel_crtc_update_cursor(crtc, false);
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+
+ intel_crtc->cursor_addr = 0;
+ intel_crtc_update_cursor(crtc, NULL);
}
static void
-intel_commit_cursor_plane(struct drm_plane *plane,
- struct intel_plane_state *state)
+intel_update_cursor_plane(struct drm_plane *plane,
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *state)
{
- struct drm_crtc *crtc = state->base.crtc;
+ struct drm_crtc *crtc = crtc_state->base.crtc;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct drm_device *dev = plane->dev;
- struct intel_crtc *intel_crtc;
struct drm_i915_gem_object *obj = intel_fb_obj(state->base.fb);
uint32_t addr;
- crtc = crtc ? crtc : plane->crtc;
- intel_crtc = to_intel_crtc(crtc);
-
if (!obj)
addr = 0;
else if (!INTEL_INFO(dev)->cursor_needs_physical)
@@ -14189,9 +14201,7 @@ intel_commit_cursor_plane(struct drm_plane *plane,
addr = obj->phys_handle->busaddr;
intel_crtc->cursor_addr = addr;
-
- if (crtc->state->active)
- intel_crtc_update_cursor(crtc, state->visible);
+ intel_crtc_update_cursor(crtc, state);
}
static struct drm_plane *intel_cursor_plane_create(struct drm_device *dev,
@@ -14217,7 +14227,7 @@ static struct drm_plane *intel_cursor_plane_create(struct drm_device *dev,
cursor->plane = pipe;
cursor->frontbuffer_bit = INTEL_FRONTBUFFER_CURSOR(pipe);
cursor->check_plane = intel_check_cursor_plane;
- cursor->commit_plane = intel_commit_cursor_plane;
+ cursor->update_plane = intel_update_cursor_plane;
cursor->disable_plane = intel_disable_cursor_plane;
drm_universal_plane_init(dev, &cursor->base, 0,
@@ -14664,10 +14674,12 @@ u32 intel_fb_pitch_limit(struct drm_device *dev, uint64_t fb_modifier,
u32 gen = INTEL_INFO(dev)->gen;
if (gen >= 9) {
+ int cpp = drm_format_plane_cpp(pixel_format, 0);
+
/* "The stride in bytes must not exceed the of the size of 8K
* pixels and 32K bytes."
*/
- return min(8192*drm_format_plane_cpp(pixel_format, 0), 32768);
+ return min(8192 * cpp, 32768);
} else if (gen >= 5 && !IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) {
return 32*1024;
} else if (gen >= 4) {
@@ -14691,6 +14703,7 @@ static int intel_framebuffer_init(struct drm_device *dev,
struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_i915_gem_object *obj)
{
+ struct drm_i915_private *dev_priv = to_i915(dev);
unsigned int aligned_height;
int ret;
u32 pitch_limit, stride_alignment;
@@ -14732,7 +14745,8 @@ static int intel_framebuffer_init(struct drm_device *dev,
return -EINVAL;
}
- stride_alignment = intel_fb_stride_alignment(dev, mode_cmd->modifier[0],
+ stride_alignment = intel_fb_stride_alignment(dev_priv,
+ mode_cmd->modifier[0],
mode_cmd->pixel_format);
if (mode_cmd->pitches[0] & (stride_alignment - 1)) {
DRM_DEBUG("pitch (%d) must be at least %u byte aligned\n",
@@ -14824,7 +14838,6 @@ static int intel_framebuffer_init(struct drm_device *dev,
drm_helper_mode_fill_fb_struct(&intel_fb->base, mode_cmd);
intel_fb->obj = obj;
- intel_fb->obj->framebuffer_references++;
ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs);
if (ret) {
@@ -14832,6 +14845,8 @@ static int intel_framebuffer_init(struct drm_device *dev,
return ret;
}
+ intel_fb->obj->framebuffer_references++;
+
return 0;
}
@@ -14895,8 +14910,6 @@ static void intel_init_display(struct drm_device *dev)
haswell_crtc_compute_clock;
dev_priv->display.crtc_enable = haswell_crtc_enable;
dev_priv->display.crtc_disable = haswell_crtc_disable;
- dev_priv->display.update_primary_plane =
- skylake_update_primary_plane;
} else if (HAS_DDI(dev)) {
dev_priv->display.get_pipe_config = haswell_get_pipe_config;
dev_priv->display.get_initial_plane_config =
@@ -14905,8 +14918,6 @@ static void intel_init_display(struct drm_device *dev)
haswell_crtc_compute_clock;
dev_priv->display.crtc_enable = haswell_crtc_enable;
dev_priv->display.crtc_disable = haswell_crtc_disable;
- dev_priv->display.update_primary_plane =
- ironlake_update_primary_plane;
} else if (HAS_PCH_SPLIT(dev)) {
dev_priv->display.get_pipe_config = ironlake_get_pipe_config;
dev_priv->display.get_initial_plane_config =
@@ -14915,8 +14926,6 @@ static void intel_init_display(struct drm_device *dev)
ironlake_crtc_compute_clock;
dev_priv->display.crtc_enable = ironlake_crtc_enable;
dev_priv->display.crtc_disable = ironlake_crtc_disable;
- dev_priv->display.update_primary_plane =
- ironlake_update_primary_plane;
} else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
dev_priv->display.get_initial_plane_config =
@@ -14924,8 +14933,6 @@ static void intel_init_display(struct drm_device *dev)
dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock;
dev_priv->display.crtc_enable = valleyview_crtc_enable;
dev_priv->display.crtc_disable = i9xx_crtc_disable;
- dev_priv->display.update_primary_plane =
- i9xx_update_primary_plane;
} else {
dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
dev_priv->display.get_initial_plane_config =
@@ -14933,8 +14940,6 @@ static void intel_init_display(struct drm_device *dev)
dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock;
dev_priv->display.crtc_enable = i9xx_crtc_enable;
dev_priv->display.crtc_disable = i9xx_crtc_disable;
- dev_priv->display.update_primary_plane =
- i9xx_update_primary_plane;
}
/* Returns the core display clock speed */
@@ -15240,12 +15245,89 @@ static void i915_disable_vga(struct drm_device *dev)
void intel_modeset_init_hw(struct drm_device *dev)
{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
intel_update_cdclk(dev);
- intel_prepare_ddi(dev);
+
+ dev_priv->atomic_cdclk_freq = dev_priv->cdclk_freq;
+
intel_init_clock_gating(dev);
intel_enable_gt_powersave(dev);
}
+/*
+ * Calculate what we think the watermarks should be for the state we've read
+ * out of the hardware and then immediately program those watermarks so that
+ * we ensure the hardware settings match our internal state.
+ *
+ * We can calculate what we think WM's should be by creating a duplicate of the
+ * current state (which was constructed during hardware readout) and running it
+ * through the atomic check code to calculate new watermark values in the
+ * state object.
+ */
+static void sanitize_watermarks(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_atomic_state *state;
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *cstate;
+ struct drm_modeset_acquire_ctx ctx;
+ int ret;
+ int i;
+
+ /* Only supported on platforms that use atomic watermark design */
+ if (!dev_priv->display.program_watermarks)
+ return;
+
+ /*
+ * We need to hold connection_mutex before calling duplicate_state so
+ * that the connector loop is protected.
+ */
+ drm_modeset_acquire_init(&ctx, 0);
+retry:
+ ret = drm_modeset_lock_all_ctx(dev, &ctx);
+ if (ret == -EDEADLK) {
+ drm_modeset_backoff(&ctx);
+ goto retry;
+ } else if (WARN_ON(ret)) {
+ goto fail;
+ }
+
+ state = drm_atomic_helper_duplicate_state(dev, &ctx);
+ if (WARN_ON(IS_ERR(state)))
+ goto fail;
+
+ ret = intel_atomic_check(dev, state);
+ if (ret) {
+ /*
+ * If we fail here, it means that the hardware appears to be
+ * programmed in a way that shouldn't be possible, given our
+ * understanding of watermark requirements. This might mean a
+ * mistake in the hardware readout code or a mistake in the
+ * watermark calculations for a given platform. Raise a WARN
+ * so that this is noticeable.
+ *
+ * If this actually happens, we'll have to just leave the
+ * BIOS-programmed watermarks untouched and hope for the best.
+ */
+ WARN(true, "Could not determine valid watermarks for inherited state\n");
+ goto fail;
+ }
+
+ /* Write calculated watermark values back */
+ to_i915(dev)->wm.config = to_intel_atomic_state(state)->wm_config;
+ for_each_crtc_in_state(state, crtc, cstate, i) {
+ struct intel_crtc_state *cs = to_intel_crtc_state(cstate);
+
+ dev_priv->display.program_watermarks(cs);
+ }
+
+ drm_atomic_state_free(state);
+fail:
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
+}
+
void intel_modeset_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -15366,6 +15448,13 @@ void intel_modeset_init(struct drm_device *dev)
*/
intel_find_initial_plane_obj(crtc, &plane_config);
}
+
+ /*
+ * Make sure hardware watermarks really match the state we read out.
+ * Note that we need to do this after reconstructing the BIOS fb's
+ * since the watermark calculation done here will use pstate->fb.
+ */
+ sanitize_watermarks(dev);
}
static void intel_enable_pipe_a(struct drm_device *dev)
@@ -15422,6 +15511,17 @@ static bool intel_crtc_has_encoders(struct intel_crtc *crtc)
return false;
}
+static bool intel_encoder_has_connectors(struct intel_encoder *encoder)
+{
+ struct drm_device *dev = encoder->base.dev;
+ struct intel_connector *connector;
+
+ for_each_connector_on_encoder(dev, &encoder->base, connector)
+ return true;
+
+ return false;
+}
+
static void intel_sanitize_crtc(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->base.dev;
@@ -15496,6 +15596,7 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc)
crtc->base.state->active = crtc->active;
crtc->base.enabled = crtc->active;
crtc->base.state->connector_mask = 0;
+ crtc->base.state->encoder_mask = 0;
/* Because we only establish the connector -> encoder ->
* crtc links if something is active, this means the
@@ -15531,7 +15632,6 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder)
{
struct intel_connector *connector;
struct drm_device *dev = encoder->base.dev;
- bool active = false;
/* We need to check both for a crtc link (meaning that the
* encoder is active and trying to read from a pipe) and the
@@ -15539,15 +15639,7 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder)
bool has_active_crtc = encoder->base.crtc &&
to_intel_crtc(encoder->base.crtc)->active;
- for_each_intel_connector(dev, connector) {
- if (connector->base.encoder != &encoder->base)
- continue;
-
- active = true;
- break;
- }
-
- if (active && !has_active_crtc) {
+ if (intel_encoder_has_connectors(encoder) && !has_active_crtc) {
DRM_DEBUG_KMS("[ENCODER:%d:%s] has active connectors but no active pipe!\n",
encoder->base.base.id,
encoder->base.name);
@@ -15640,16 +15732,40 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
struct intel_connector *connector;
int i;
+ dev_priv->active_crtcs = 0;
+
for_each_intel_crtc(dev, crtc) {
- __drm_atomic_helper_crtc_destroy_state(&crtc->base, crtc->base.state);
- memset(crtc->config, 0, sizeof(*crtc->config));
- crtc->config->base.crtc = &crtc->base;
+ struct intel_crtc_state *crtc_state = crtc->config;
+ int pixclk = 0;
- crtc->active = dev_priv->display.get_pipe_config(crtc,
- crtc->config);
+ __drm_atomic_helper_crtc_destroy_state(&crtc->base, &crtc_state->base);
+ memset(crtc_state, 0, sizeof(*crtc_state));
+ crtc_state->base.crtc = &crtc->base;
- crtc->base.state->active = crtc->active;
- crtc->base.enabled = crtc->active;
+ crtc_state->base.active = crtc_state->base.enable =
+ dev_priv->display.get_pipe_config(crtc, crtc_state);
+
+ crtc->base.enabled = crtc_state->base.enable;
+ crtc->active = crtc_state->base.active;
+
+ if (crtc_state->base.active) {
+ dev_priv->active_crtcs |= 1 << crtc->pipe;
+
+ if (IS_BROADWELL(dev_priv)) {
+ pixclk = ilk_pipe_pixel_rate(crtc_state);
+
+ /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
+ if (crtc_state->ips_enabled)
+ pixclk = DIV_ROUND_UP(pixclk * 100, 95);
+ } else if (IS_VALLEYVIEW(dev_priv) ||
+ IS_CHERRYVIEW(dev_priv) ||
+ IS_BROXTON(dev_priv))
+ pixclk = crtc_state->base.adjusted_mode.crtc_clock;
+ else
+ WARN_ON(dev_priv->display.modeset_calc_cdclk);
+ }
+
+ dev_priv->min_pixclk[crtc->pipe] = pixclk;
readout_plane_state(crtc);
@@ -15713,6 +15829,8 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
*/
encoder->base.crtc->state->connector_mask |=
1 << drm_connector_index(&connector->base);
+ encoder->base.crtc->state->encoder_mask |=
+ 1 << drm_encoder_index(&encoder->base);
}
} else {
@@ -15809,63 +15927,76 @@ intel_modeset_setup_hw_state(struct drm_device *dev)
for_each_intel_crtc(dev, crtc) {
unsigned long put_domains;
- put_domains = modeset_get_crtc_power_domains(&crtc->base);
+ put_domains = modeset_get_crtc_power_domains(&crtc->base, crtc->config);
if (WARN_ON(put_domains))
modeset_put_power_domains(dev_priv, put_domains);
}
intel_display_set_init_power(dev_priv, false);
+
+ intel_fbc_init_pipe_state(dev_priv);
}
void intel_display_resume(struct drm_device *dev)
{
- struct drm_atomic_state *state = drm_atomic_state_alloc(dev);
- struct intel_connector *conn;
- struct intel_plane *plane;
- struct drm_crtc *crtc;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_atomic_state *state = dev_priv->modeset_restore_state;
+ struct drm_modeset_acquire_ctx ctx;
int ret;
+ bool setup = false;
- if (!state)
- return;
-
- state->acquire_ctx = dev->mode_config.acquire_ctx;
+ dev_priv->modeset_restore_state = NULL;
- /* preserve complete old state, including dpll */
- intel_atomic_get_shared_dpll_state(state);
+ /*
+ * This is a cludge because with real atomic modeset mode_config.mutex
+ * won't be taken. Unfortunately some probed state like
+ * audio_codec_enable is still protected by mode_config.mutex, so lock
+ * it here for now.
+ */
+ mutex_lock(&dev->mode_config.mutex);
+ drm_modeset_acquire_init(&ctx, 0);
- for_each_crtc(dev, crtc) {
- struct drm_crtc_state *crtc_state =
- drm_atomic_get_crtc_state(state, crtc);
+retry:
+ ret = drm_modeset_lock_all_ctx(dev, &ctx);
- ret = PTR_ERR_OR_ZERO(crtc_state);
- if (ret)
- goto err;
+ if (ret == 0 && !setup) {
+ setup = true;
- /* force a restore */
- crtc_state->mode_changed = true;
+ intel_modeset_setup_hw_state(dev);
+ i915_redisable_vga(dev);
}
- for_each_intel_plane(dev, plane) {
- ret = PTR_ERR_OR_ZERO(drm_atomic_get_plane_state(state, &plane->base));
- if (ret)
- goto err;
- }
+ if (ret == 0 && state) {
+ struct drm_crtc_state *crtc_state;
+ struct drm_crtc *crtc;
+ int i;
- for_each_intel_connector(dev, conn) {
- ret = PTR_ERR_OR_ZERO(drm_atomic_get_connector_state(state, &conn->base));
- if (ret)
- goto err;
+ state->acquire_ctx = &ctx;
+
+ for_each_crtc_in_state(state, crtc, crtc_state, i) {
+ /*
+ * Force recalculation even if we restore
+ * current state. With fast modeset this may not result
+ * in a modeset when the state is compatible.
+ */
+ crtc_state->mode_changed = true;
+ }
+
+ ret = drm_atomic_commit(state);
}
- intel_modeset_setup_hw_state(dev);
+ if (ret == -EDEADLK) {
+ drm_modeset_backoff(&ctx);
+ goto retry;
+ }
- i915_redisable_vga(dev);
- ret = drm_atomic_commit(state);
- if (!ret)
- return;
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
+ mutex_unlock(&dev->mode_config.mutex);
-err:
- DRM_ERROR("Restoring old state failed with %i\n", ret);
- drm_atomic_state_free(state);
+ if (ret) {
+ DRM_ERROR("Restoring old state failed with %i\n", ret);
+ drm_atomic_state_free(state);
+ }
}
void intel_modeset_gem_init(struct drm_device *dev)
@@ -15874,9 +16005,7 @@ void intel_modeset_gem_init(struct drm_device *dev)
struct drm_i915_gem_object *obj;
int ret;
- mutex_lock(&dev->struct_mutex);
intel_init_gt_powersave(dev);
- mutex_unlock(&dev->struct_mutex);
intel_modeset_init_hw(dev);
@@ -15943,7 +16072,7 @@ void intel_modeset_cleanup(struct drm_device *dev)
intel_unregister_dsm_handler();
- intel_fbc_disable(dev_priv);
+ intel_fbc_global_disable(dev_priv);
/* flush any delayed tasks or pending work */
flush_scheduled_work();
@@ -15956,9 +16085,7 @@ void intel_modeset_cleanup(struct drm_device *dev)
intel_cleanup_overlay(dev);
- mutex_lock(&dev->struct_mutex);
intel_cleanup_gt_powersave(dev);
- mutex_unlock(&dev->struct_mutex);
intel_teardown_gmbus(dev);
}
@@ -16153,7 +16280,7 @@ intel_display_print_error_state(struct drm_i915_error_state_buf *m,
for_each_pipe(dev_priv, i) {
err_printf(m, "Pipe [%d]:\n", i);
err_printf(m, " Power: %s\n",
- error->pipe[i].power_domain_on ? "on" : "off");
+ onoff(error->pipe[i].power_domain_on));
err_printf(m, " SRC: %08x\n", error->pipe[i].source);
err_printf(m, " STAT: %08x\n", error->pipe[i].stat);
@@ -16181,7 +16308,7 @@ intel_display_print_error_state(struct drm_i915_error_state_buf *m,
err_printf(m, "CPU transcoder: %c\n",
transcoder_name(error->transcoder[i].cpu_transcoder));
err_printf(m, " Power: %s\n",
- error->transcoder[i].power_domain_on ? "on" : "off");
+ onoff(error->transcoder[i].power_domain_on));
err_printf(m, " CONF: %08x\n", error->transcoder[i].conf);
err_printf(m, " HTOTAL: %08x\n", error->transcoder[i].htotal);
err_printf(m, " HBLANK: %08x\n", error->transcoder[i].hblank);
@@ -16191,24 +16318,3 @@ intel_display_print_error_state(struct drm_i915_error_state_buf *m,
err_printf(m, " VSYNC: %08x\n", error->transcoder[i].vsync);
}
}
-
-void intel_modeset_preclose(struct drm_device *dev, struct drm_file *file)
-{
- struct intel_crtc *crtc;
-
- for_each_intel_crtc(dev, crtc) {
- struct intel_unpin_work *work;
-
- spin_lock_irq(&dev->event_lock);
-
- work = crtc->unpin_work;
-
- if (work && work->event &&
- work->event->base.file_priv == file) {
- kfree(work->event);
- work->event = NULL;
- }
-
- spin_unlock_irq(&dev->event_lock);
- }
-}
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 1d8de43bed56..412a34c39522 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -157,14 +157,9 @@ intel_dp_max_link_bw(struct intel_dp *intel_dp)
static u8 intel_dp_max_lane_count(struct intel_dp *intel_dp)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
- struct drm_device *dev = intel_dig_port->base.base.dev;
u8 source_max, sink_max;
- source_max = 4;
- if (HAS_DDI(dev) && intel_dig_port->port == PORT_A &&
- (intel_dig_port->saved_port_bits & DDI_A_4_LANES) == 0)
- source_max = 2;
-
+ source_max = intel_dig_port->max_lanes;
sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
return min(source_max, sink_max);
@@ -208,6 +203,7 @@ intel_dp_mode_valid(struct drm_connector *connector,
struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
int target_clock = mode->clock;
int max_rate, mode_rate, max_lanes, max_link_clock;
+ int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
if (is_edp(intel_dp) && fixed_mode) {
if (mode->hdisplay > fixed_mode->hdisplay)
@@ -225,7 +221,7 @@ intel_dp_mode_valid(struct drm_connector *connector,
max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
mode_rate = intel_dp_link_required(target_clock, 18);
- if (mode_rate > max_rate)
+ if (mode_rate > max_rate || target_clock > max_dotclk)
return MODE_CLOCK_HIGH;
if (mode->clock < 10000)
@@ -340,8 +336,12 @@ vlv_power_sequencer_kick(struct intel_dp *intel_dp)
release_cl_override = IS_CHERRYVIEW(dev) &&
!chv_phy_powergate_ch(dev_priv, phy, ch, true);
- vlv_force_pll_on(dev, pipe, IS_CHERRYVIEW(dev) ?
- &chv_dpll[0].dpll : &vlv_dpll[0].dpll);
+ if (vlv_force_pll_on(dev, pipe, IS_CHERRYVIEW(dev) ?
+ &chv_dpll[0].dpll : &vlv_dpll[0].dpll)) {
+ DRM_ERROR("Failed to force on pll for pipe %c!\n",
+ pipe_name(pipe));
+ return;
+ }
}
/*
@@ -980,7 +980,10 @@ intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
if (WARN_ON(txsize > 20))
return -E2BIG;
- memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
+ if (msg->buffer)
+ memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
+ else
+ WARN_ON(msg->size);
ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
if (ret > 0) {
@@ -1189,7 +1192,6 @@ intel_dp_aux_fini(struct intel_dp *intel_dp)
static int
intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector)
{
- struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
enum port port = intel_dig_port->port;
int ret;
@@ -1200,7 +1202,7 @@ intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector)
if (!intel_dp->aux.name)
return -ENOMEM;
- intel_dp->aux.dev = dev->dev;
+ intel_dp->aux.dev = connector->base.kdev;
intel_dp->aux.transfer = intel_dp_aux_transfer;
DRM_DEBUG_KMS("registering %s bus for %s\n",
@@ -1215,16 +1217,6 @@ intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector)
return ret;
}
- ret = sysfs_create_link(&connector->base.kdev->kobj,
- &intel_dp->aux.ddc.dev.kobj,
- intel_dp->aux.ddc.dev.kobj.name);
- if (ret < 0) {
- DRM_ERROR("sysfs_create_link() for %s failed (%d)\n",
- intel_dp->aux.name, ret);
- intel_dp_aux_fini(intel_dp);
- return ret;
- }
-
return 0;
}
@@ -1233,9 +1225,7 @@ intel_dp_connector_unregister(struct intel_connector *intel_connector)
{
struct intel_dp *intel_dp = intel_attached_dp(&intel_connector->base);
- if (!intel_connector->mst_port)
- sysfs_remove_link(&intel_connector->base.kdev->kobj,
- intel_dp->aux.ddc.dev.kobj.name);
+ intel_dp_aux_fini(intel_dp);
intel_connector_unregister(intel_connector);
}
@@ -1812,12 +1802,21 @@ static void wait_panel_off(struct intel_dp *intel_dp)
static void wait_panel_power_cycle(struct intel_dp *intel_dp)
{
+ ktime_t panel_power_on_time;
+ s64 panel_power_off_duration;
+
DRM_DEBUG_KMS("Wait for panel power cycle\n");
+ /* take the difference of currrent time and panel power off time
+ * and then make panel wait for t11_t12 if needed. */
+ panel_power_on_time = ktime_get_boottime();
+ panel_power_off_duration = ktime_ms_delta(panel_power_on_time, intel_dp->panel_power_off_time);
+
/* When we disable the VDD override bit last we have to do the manual
* wait. */
- wait_remaining_ms_from_jiffies(intel_dp->last_power_cycle,
- intel_dp->panel_power_cycle_delay);
+ if (panel_power_off_duration < (s64)intel_dp->panel_power_cycle_delay)
+ wait_remaining_ms_from_jiffies(jiffies,
+ intel_dp->panel_power_cycle_delay - panel_power_off_duration);
wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
}
@@ -1969,7 +1968,7 @@ static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
if ((pp & POWER_TARGET_ON) == 0)
- intel_dp->last_power_cycle = jiffies;
+ intel_dp->panel_power_off_time = ktime_get_boottime();
power_domain = intel_display_port_aux_power_domain(intel_encoder);
intel_display_power_put(dev_priv, power_domain);
@@ -2118,7 +2117,7 @@ static void edp_panel_off(struct intel_dp *intel_dp)
I915_WRITE(pp_ctrl_reg, pp);
POSTING_READ(pp_ctrl_reg);
- intel_dp->last_power_cycle = jiffies;
+ intel_dp->panel_power_off_time = ktime_get_boottime();
wait_panel_off(intel_dp);
/* We got a reference when we enabled the VDD. */
@@ -2243,11 +2242,6 @@ static void intel_edp_backlight_power(struct intel_connector *connector,
_intel_edp_backlight_off(intel_dp);
}
-static const char *state_string(bool enabled)
-{
- return enabled ? "on" : "off";
-}
-
static void assert_dp_port(struct intel_dp *intel_dp, bool state)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
@@ -2257,7 +2251,7 @@ static void assert_dp_port(struct intel_dp *intel_dp, bool state)
I915_STATE_WARN(cur_state != state,
"DP port %c state assertion failure (expected %s, current %s)\n",
port_name(dig_port->port),
- state_string(state), state_string(cur_state));
+ onoff(state), onoff(cur_state));
}
#define assert_dp_port_disabled(d) assert_dp_port((d), false)
@@ -2267,7 +2261,7 @@ static void assert_edp_pll(struct drm_i915_private *dev_priv, bool state)
I915_STATE_WARN(cur_state != state,
"eDP PLL state assertion failure (expected %s, current %s)\n",
- state_string(state), state_string(cur_state));
+ onoff(state), onoff(cur_state));
}
#define assert_edp_pll_enabled(d) assert_edp_pll((d), true)
#define assert_edp_pll_disabled(d) assert_edp_pll((d), false)
@@ -4024,7 +4018,7 @@ static int intel_dp_sink_crc_stop(struct intel_dp *intel_dp)
} while (--attempts && count);
if (attempts == 0) {
- DRM_ERROR("TIMEOUT: Sink CRC counter is not zeroed\n");
+ DRM_DEBUG_KMS("TIMEOUT: Sink CRC counter is not zeroed after calculation is stopped\n");
ret = -ETIMEDOUT;
}
@@ -4564,7 +4558,7 @@ bool intel_digital_port_connected(struct drm_i915_private *dev_priv,
{
if (HAS_PCH_IBX(dev_priv))
return ibx_digital_port_connected(dev_priv, port);
- if (HAS_PCH_SPLIT(dev_priv))
+ else if (HAS_PCH_SPLIT(dev_priv))
return cpt_digital_port_connected(dev_priv, port);
else if (IS_BROXTON(dev_priv))
return bxt_digital_port_connected(dev_priv, port);
@@ -4884,7 +4878,6 @@ void intel_dp_encoder_destroy(struct drm_encoder *encoder)
struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
struct intel_dp *intel_dp = &intel_dig_port->dp;
- intel_dp_aux_fini(intel_dp);
intel_dp_mst_encoder_cleanup(intel_dig_port);
if (is_edp(intel_dp)) {
cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
@@ -4905,7 +4898,7 @@ void intel_dp_encoder_destroy(struct drm_encoder *encoder)
kfree(intel_dig_port);
}
-static void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
+void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
{
struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
@@ -4947,7 +4940,7 @@ static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
edp_panel_vdd_schedule_off(intel_dp);
}
-static void intel_dp_encoder_reset(struct drm_encoder *encoder)
+void intel_dp_encoder_reset(struct drm_encoder *encoder)
{
struct intel_dp *intel_dp;
@@ -5132,7 +5125,7 @@ intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connect
static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
{
- intel_dp->last_power_cycle = jiffies;
+ intel_dp->panel_power_off_time = ktime_get_boottime();
intel_dp->last_power_on = jiffies;
intel_dp->last_backlight_off = jiffies;
}
@@ -5849,6 +5842,11 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
enum port port = intel_dig_port->port;
int type, ret;
+ if (WARN(intel_dig_port->max_lanes < 1,
+ "Not enough lanes (%d) for DP on port %c\n",
+ intel_dig_port->max_lanes, port_name(port)))
+ return false;
+
intel_dp->pps_pipe = INVALID_PIPE;
/* intel_dp vfuncs */
@@ -6045,8 +6043,8 @@ intel_dp_init(struct drm_device *dev,
}
intel_dig_port->port = port;
- dev_priv->dig_port_map[port] = intel_encoder;
intel_dig_port->dp.output_reg = output_reg;
+ intel_dig_port->max_lanes = 4;
intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
if (IS_CHERRYVIEW(dev)) {
diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c
index fa0dabf578dc..2c999725b3d4 100644
--- a/drivers/gpu/drm/i915/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/intel_dp_mst.c
@@ -78,8 +78,6 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder,
return false;
}
- if (drm_dp_mst_port_has_audio(&intel_dp->mst_mgr, found->port))
- pipe_config->has_audio = true;
mst_pbn = drm_dp_calc_pbn_mode(adjusted_mode->crtc_clock, bpp);
pipe_config->pbn = mst_pbn;
@@ -104,11 +102,6 @@ static void intel_mst_disable_dp(struct intel_encoder *encoder)
struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
struct intel_digital_port *intel_dig_port = intel_mst->primary;
struct intel_dp *intel_dp = &intel_dig_port->dp;
- struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_crtc *crtc = encoder->base.crtc;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-
int ret;
DRM_DEBUG_KMS("%d\n", intel_dp->active_mst_links);
@@ -119,10 +112,6 @@ static void intel_mst_disable_dp(struct intel_encoder *encoder)
if (ret) {
DRM_ERROR("failed to update payload %d\n", ret);
}
- if (intel_crtc->config->has_audio) {
- intel_audio_codec_disable(encoder);
- intel_display_power_put(dev_priv, POWER_DOMAIN_AUDIO);
- }
}
static void intel_mst_post_disable_dp(struct intel_encoder *encoder)
@@ -184,7 +173,9 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder)
intel_mst->port = found->port;
if (intel_dp->active_mst_links == 0) {
- intel_ddi_clk_select(encoder, intel_crtc->config);
+ intel_prepare_ddi_buffer(&intel_dig_port->base);
+
+ intel_ddi_clk_select(&intel_dig_port->base, intel_crtc->config);
intel_dp_set_link_params(intel_dp, intel_crtc->config);
@@ -219,7 +210,6 @@ static void intel_mst_enable_dp(struct intel_encoder *encoder)
struct intel_dp *intel_dp = &intel_dig_port->dp;
struct drm_device *dev = intel_dig_port->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
enum port port = intel_dig_port->port;
int ret;
@@ -232,13 +222,6 @@ static void intel_mst_enable_dp(struct intel_encoder *encoder)
ret = drm_dp_check_act_status(&intel_dp->mst_mgr);
ret = drm_dp_update_payload_part2(&intel_dp->mst_mgr);
-
- if (crtc->config->has_audio) {
- DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
- pipe_name(crtc->pipe));
- intel_display_power_get(dev_priv, POWER_DOMAIN_AUDIO);
- intel_audio_codec_enable(encoder);
- }
}
static bool intel_dp_mst_enc_get_hw_state(struct intel_encoder *encoder,
@@ -264,9 +247,6 @@ static void intel_dp_mst_enc_get_config(struct intel_encoder *encoder,
pipe_config->has_dp_encoder = true;
- pipe_config->has_audio =
- intel_ddi_is_audio_enabled(dev_priv, crtc);
-
temp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
if (temp & TRANS_DDI_PHSYNC)
flags |= DRM_MODE_FLAG_PHSYNC;
@@ -369,6 +349,8 @@ static enum drm_mode_status
intel_dp_mst_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
+ int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
+
/* TODO - validate mode against available PBN for link */
if (mode->clock < 10000)
return MODE_CLOCK_LOW;
@@ -376,6 +358,9 @@ intel_dp_mst_mode_valid(struct drm_connector *connector,
if (mode->flags & DRM_MODE_FLAG_DBLCLK)
return MODE_H_ILLEGAL;
+ if (mode->clock > max_dotclk)
+ return MODE_CLOCK_HIGH;
+
return MODE_OK;
}
@@ -499,6 +484,8 @@ static void intel_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
struct intel_connector *intel_connector = to_intel_connector(connector);
struct drm_device *dev = connector->dev;
+ intel_connector->unregister(intel_connector);
+
/* need to nuke the connector */
drm_modeset_lock_all(dev);
if (connector->state->crtc) {
@@ -512,11 +499,7 @@ static void intel_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
WARN(ret, "Disabling mst crtc failed with %i\n", ret);
}
- drm_modeset_unlock_all(dev);
-
- intel_connector->unregister(intel_connector);
- drm_modeset_lock_all(dev);
intel_connector_remove_from_fbdev(intel_connector);
drm_connector_cleanup(connector);
drm_modeset_unlock_all(dev);
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index df7f3cb66056..9d0770c23fde 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -246,7 +246,18 @@ struct intel_atomic_state {
struct drm_atomic_state base;
unsigned int cdclk;
- bool dpll_set;
+
+ /*
+ * Calculated device cdclk, can be different from cdclk
+ * only when all crtc's are DPMS off.
+ */
+ unsigned int dev_cdclk;
+
+ bool dpll_set, modeset;
+
+ unsigned int active_crtcs;
+ unsigned int min_pixclk[I915_MAX_PIPES];
+
struct intel_shared_dpll_config shared_dpll[I915_NUM_PLLS];
struct intel_wm_config wm_config;
};
@@ -368,6 +379,7 @@ struct intel_crtc_state {
bool update_pipe; /* can a fast modeset be performed? */
bool disable_cxsr;
bool wm_changed; /* watermarks are updated */
+ bool fb_changed; /* fb on any of the planes is changed */
/* Pipe source size (ie. panel fitter input size)
* All planes will be positioned inside this space,
@@ -481,6 +493,8 @@ struct intel_crtc_state {
bool ips_enabled;
+ bool enable_fbc;
+
bool double_wide;
bool dp_encoder_is_mst;
@@ -531,16 +545,13 @@ struct intel_mmio_flip {
*/
struct intel_crtc_atomic_commit {
/* Sleepable operations to perform before commit */
- bool disable_fbc;
- bool disable_ips;
- bool pre_disable_primary;
/* Sleepable operations to perform after commit */
unsigned fb_bits;
- bool wait_vblank;
- bool update_fbc;
bool post_enable_primary;
- unsigned update_sprite_watermarks;
+
+ /* Sleepable operations to perform before and after commit */
+ bool update_fbc;
};
struct intel_crtc {
@@ -564,7 +575,7 @@ struct intel_crtc {
/* Display surface base address adjustement for pageflips. Note that on
* gen4+ this only adjusts up to a tile, offsets within a tile are
* handled in the hw itself (with the TILEOFF register). */
- unsigned long dspaddr_offset;
+ u32 dspaddr_offset;
int adjusted_x;
int adjusted_y;
@@ -647,23 +658,17 @@ struct intel_plane {
/*
* NOTE: Do not place new plane state fields here (e.g., when adding
* new plane properties). New runtime state should now be placed in
- * the intel_plane_state structure and accessed via drm_plane->state.
+ * the intel_plane_state structure and accessed via plane_state.
*/
void (*update_plane)(struct drm_plane *plane,
- struct drm_crtc *crtc,
- struct drm_framebuffer *fb,
- int crtc_x, int crtc_y,
- unsigned int crtc_w, unsigned int crtc_h,
- uint32_t x, uint32_t y,
- uint32_t src_w, uint32_t src_h);
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state);
void (*disable_plane)(struct drm_plane *plane,
struct drm_crtc *crtc);
int (*check_plane)(struct drm_plane *plane,
struct intel_crtc_state *crtc_state,
struct intel_plane_state *state);
- void (*commit_plane)(struct drm_plane *plane,
- struct intel_plane_state *state);
};
struct intel_watermark_params {
@@ -765,9 +770,9 @@ struct intel_dp {
int backlight_off_delay;
struct delayed_work panel_vdd_work;
bool want_panel_vdd;
- unsigned long last_power_cycle;
unsigned long last_power_on;
unsigned long last_backlight_off;
+ ktime_t panel_power_off_time;
struct notifier_block edp_notifier;
@@ -817,6 +822,7 @@ struct intel_digital_port {
struct intel_hdmi hdmi;
enum irqreturn (*hpd_pulse)(struct intel_digital_port *, bool);
bool release_cl2_override;
+ uint8_t max_lanes;
/* for communication with audio component; protected by av_mutex */
const struct drm_connector *audio_connector;
};
@@ -903,9 +909,7 @@ struct intel_unpin_work {
};
struct intel_load_detect_pipe {
- struct drm_framebuffer *release_fb;
- bool load_detect_temp;
- int dpms_mode;
+ struct drm_atomic_state *restore_state;
};
static inline struct intel_encoder *
@@ -988,6 +992,8 @@ static inline bool intel_irqs_enabled(struct drm_i915_private *dev_priv)
int intel_get_crtc_scanline(struct intel_crtc *crtc);
void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
unsigned int pipe_mask);
+void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
+ unsigned int pipe_mask);
/* intel_crt.c */
void intel_crt_init(struct drm_device *dev);
@@ -996,7 +1002,7 @@ void intel_crt_init(struct drm_device *dev);
/* intel_ddi.c */
void intel_ddi_clk_select(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config);
-void intel_prepare_ddi(struct drm_device *dev);
+void intel_prepare_ddi_buffer(struct intel_encoder *encoder);
void hsw_fdi_link_train(struct drm_crtc *crtc);
void intel_ddi_init(struct drm_device *dev, enum port port);
enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder);
@@ -1013,8 +1019,6 @@ void intel_ddi_set_pipe_settings(struct drm_crtc *crtc);
void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp);
bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector);
void intel_ddi_fdi_disable(struct drm_crtc *crtc);
-bool intel_ddi_is_audio_enabled(struct drm_i915_private *dev_priv,
- struct intel_crtc *intel_crtc);
void intel_ddi_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config);
struct intel_encoder *
@@ -1041,8 +1045,8 @@ unsigned int intel_fb_align_height(struct drm_device *dev,
uint64_t fb_format_modifier);
void intel_fb_obj_flush(struct drm_i915_gem_object *obj, bool retire,
enum fb_op_origin origin);
-u32 intel_fb_stride_alignment(struct drm_device *dev, uint64_t fb_modifier,
- uint32_t pixel_format);
+u32 intel_fb_stride_alignment(const struct drm_i915_private *dev_priv,
+ uint64_t fb_modifier, uint32_t pixel_format);
/* intel_audio.c */
void intel_init_audio(struct drm_device *dev);
@@ -1126,9 +1130,8 @@ int intel_plane_atomic_set_property(struct drm_plane *plane,
int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state,
struct drm_plane_state *plane_state);
-unsigned int
-intel_tile_height(struct drm_device *dev, uint32_t pixel_format,
- uint64_t fb_format_modifier, unsigned int plane);
+unsigned int intel_tile_height(const struct drm_i915_private *dev_priv,
+ uint64_t fb_modifier, unsigned int cpp);
static inline bool
intel_rotation_90_or_270(unsigned int rotation)
@@ -1149,8 +1152,8 @@ void assert_shared_dpll(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc,
struct intel_crtc_state *state);
-void vlv_force_pll_on(struct drm_device *dev, enum pipe pipe,
- const struct dpll *dpll);
+int vlv_force_pll_on(struct drm_device *dev, enum pipe pipe,
+ const struct dpll *dpll);
void vlv_force_pll_off(struct drm_device *dev, enum pipe pipe);
/* modesetting asserts */
@@ -1167,11 +1170,11 @@ void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
void assert_pipe(struct drm_i915_private *dev_priv, enum pipe pipe, bool state);
#define assert_pipe_enabled(d, p) assert_pipe(d, p, true)
#define assert_pipe_disabled(d, p) assert_pipe(d, p, false)
-unsigned long intel_gen4_compute_page_offset(struct drm_i915_private *dev_priv,
- int *x, int *y,
- unsigned int tiling_mode,
- unsigned int bpp,
- unsigned int pitch);
+u32 intel_compute_tile_offset(struct drm_i915_private *dev_priv,
+ int *x, int *y,
+ uint64_t fb_modifier,
+ unsigned int cpp,
+ unsigned int pitch);
void intel_prepare_reset(struct drm_device *dev);
void intel_finish_reset(struct drm_device *dev);
void hsw_enable_pc8(struct drm_i915_private *dev_priv);
@@ -1207,7 +1210,6 @@ enum intel_display_power_domain
intel_display_port_aux_power_domain(struct intel_encoder *intel_encoder);
void intel_mode_from_pipe_config(struct drm_display_mode *mode,
struct intel_crtc_state *pipe_config);
-void intel_modeset_preclose(struct drm_device *dev, struct drm_file *file);
int skl_update_scaler_crtc(struct intel_crtc_state *crtc_state);
int skl_max_scale(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state);
@@ -1222,7 +1224,7 @@ u32 skl_plane_ctl_rotation(unsigned int rotation);
/* intel_csr.c */
void intel_csr_ucode_init(struct drm_i915_private *);
-void intel_csr_load_program(struct drm_i915_private *);
+bool intel_csr_load_program(struct drm_i915_private *);
void intel_csr_ucode_fini(struct drm_i915_private *);
/* intel_dp.c */
@@ -1234,6 +1236,8 @@ void intel_dp_set_link_params(struct intel_dp *intel_dp,
void intel_dp_start_link_train(struct intel_dp *intel_dp);
void intel_dp_stop_link_train(struct intel_dp *intel_dp);
void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode);
+void intel_dp_encoder_reset(struct drm_encoder *encoder);
+void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder);
void intel_dp_encoder_destroy(struct drm_encoder *encoder);
int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc);
bool intel_dp_compute_config(struct intel_encoder *encoder,
@@ -1323,13 +1327,16 @@ static inline void intel_fbdev_restore_mode(struct drm_device *dev)
#endif
/* intel_fbc.c */
+void intel_fbc_choose_crtc(struct drm_i915_private *dev_priv,
+ struct drm_atomic_state *state);
bool intel_fbc_is_active(struct drm_i915_private *dev_priv);
-void intel_fbc_deactivate(struct intel_crtc *crtc);
-void intel_fbc_update(struct intel_crtc *crtc);
+void intel_fbc_pre_update(struct intel_crtc *crtc);
+void intel_fbc_post_update(struct intel_crtc *crtc);
void intel_fbc_init(struct drm_i915_private *dev_priv);
+void intel_fbc_init_pipe_state(struct drm_i915_private *dev_priv);
void intel_fbc_enable(struct intel_crtc *crtc);
-void intel_fbc_disable(struct drm_i915_private *dev_priv);
-void intel_fbc_disable_crtc(struct intel_crtc *crtc);
+void intel_fbc_disable(struct intel_crtc *crtc);
+void intel_fbc_global_disable(struct drm_i915_private *dev_priv);
void intel_fbc_invalidate(struct drm_i915_private *dev_priv,
unsigned int frontbuffer_bits,
enum fb_op_origin origin);
@@ -1558,6 +1565,7 @@ void skl_wm_get_hw_state(struct drm_device *dev);
void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
struct skl_ddb_allocation *ddb /* out */);
uint32_t ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config);
+int sanitize_rc6_option(const struct drm_device *dev, int enable_rc6);
/* intel_sdvo.c */
bool intel_sdvo_init(struct drm_device *dev,
diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/intel_dsi.c
index 0193c62a53ef..01b8e9f4c272 100644
--- a/drivers/gpu/drm/i915/intel_dsi.c
+++ b/drivers/gpu/drm/i915/intel_dsi.c
@@ -478,8 +478,8 @@ static void intel_dsi_pre_enable(struct intel_encoder *encoder)
DRM_DEBUG_KMS("\n");
- intel_dsi_prepare(encoder);
intel_enable_dsi_pll(encoder);
+ intel_dsi_prepare(encoder);
/* Panel Enable over CRC PMIC */
if (intel_dsi->gpio_panel)
@@ -634,7 +634,6 @@ static void intel_dsi_post_disable(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
- u32 val;
DRM_DEBUG_KMS("\n");
@@ -642,9 +641,13 @@ static void intel_dsi_post_disable(struct intel_encoder *encoder)
intel_dsi_clear_device_ready(encoder);
- val = I915_READ(DSPCLK_GATE_D);
- val &= ~DPOUNIT_CLOCK_GATE_DISABLE;
- I915_WRITE(DSPCLK_GATE_D, val);
+ if (!IS_BROXTON(dev_priv)) {
+ u32 val;
+
+ val = I915_READ(DSPCLK_GATE_D);
+ val &= ~DPOUNIT_CLOCK_GATE_DISABLE;
+ I915_WRITE(DSPCLK_GATE_D, val);
+ }
drm_panel_unprepare(intel_dsi->panel);
@@ -709,7 +712,7 @@ out:
static void intel_dsi_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
- u32 pclk = 0;
+ u32 pclk;
DRM_DEBUG_KMS("\n");
pipe_config->has_dsi_encoder = true;
@@ -720,12 +723,7 @@ static void intel_dsi_get_config(struct intel_encoder *encoder,
*/
pipe_config->dpll_hw_state.dpll_md = 0;
- if (IS_BROXTON(encoder->base.dev))
- pclk = bxt_get_dsi_pclk(encoder, pipe_config->pipe_bpp);
- else if (IS_VALLEYVIEW(encoder->base.dev) ||
- IS_CHERRYVIEW(encoder->base.dev))
- pclk = vlv_get_dsi_pclk(encoder, pipe_config->pipe_bpp);
-
+ pclk = intel_dsi_get_pclk(encoder, pipe_config->pipe_bpp);
if (!pclk)
return;
@@ -787,10 +785,9 @@ static void set_dsi_timings(struct drm_encoder *encoder,
{
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
- unsigned int bpp = intel_crtc->config->pipe_bpp;
+ unsigned int bpp = dsi_pixel_format_bpp(intel_dsi->pixel_format);
unsigned int lane_count = intel_dsi->lane_count;
u16 hactive, hfp, hsync, hbp, vfp, vsync, vbp;
@@ -861,7 +858,7 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder)
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode;
enum port port;
- unsigned int bpp = intel_crtc->config->pipe_bpp;
+ unsigned int bpp = dsi_pixel_format_bpp(intel_dsi->pixel_format);
u32 val, tmp;
u16 mode_hdisplay;
diff --git a/drivers/gpu/drm/i915/intel_dsi.h b/drivers/gpu/drm/i915/intel_dsi.h
index 02551ff228c2..92f39227b361 100644
--- a/drivers/gpu/drm/i915/intel_dsi.h
+++ b/drivers/gpu/drm/i915/intel_dsi.h
@@ -34,6 +34,8 @@
#define DSI_DUAL_LINK_FRONT_BACK 1
#define DSI_DUAL_LINK_PIXEL_ALT 2
+int dsi_pixel_format_bpp(int pixel_format);
+
struct intel_dsi_host;
struct intel_dsi {
@@ -126,8 +128,7 @@ static inline struct intel_dsi *enc_to_intel_dsi(struct drm_encoder *encoder)
extern void intel_enable_dsi_pll(struct intel_encoder *encoder);
extern void intel_disable_dsi_pll(struct intel_encoder *encoder);
-extern u32 vlv_get_dsi_pclk(struct intel_encoder *encoder, int pipe_bpp);
-extern u32 bxt_get_dsi_pclk(struct intel_encoder *encoder, int pipe_bpp);
+extern u32 intel_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp);
extern void intel_dsi_reset_clocks(struct intel_encoder *encoder,
enum port port);
diff --git a/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c b/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
index e8113ad65477..7f145b4fec6a 100644
--- a/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
+++ b/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
@@ -234,28 +234,33 @@ static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data)
if (!gtable[gpio].init) {
/* program the function */
/* FIXME: remove constant below */
- vlv_gpio_nc_write(dev_priv, function, 0x2000CC00);
+ vlv_iosf_sb_write(dev_priv, IOSF_PORT_GPIO_NC, function,
+ 0x2000CC00);
gtable[gpio].init = 1;
}
val = 0x4 | action;
/* pull up/down */
- vlv_gpio_nc_write(dev_priv, pad, val);
+ vlv_iosf_sb_write(dev_priv, IOSF_PORT_GPIO_NC, pad, val);
mutex_unlock(&dev_priv->sb_lock);
out:
return data;
}
+static const u8 *mipi_exec_i2c_skip(struct intel_dsi *intel_dsi, const u8 *data)
+{
+ return data + *(data + 6) + 7;
+}
+
typedef const u8 * (*fn_mipi_elem_exec)(struct intel_dsi *intel_dsi,
const u8 *data);
static const fn_mipi_elem_exec exec_elem[] = {
- NULL, /* reserved */
- mipi_exec_send_packet,
- mipi_exec_delay,
- mipi_exec_gpio,
- NULL, /* status read; later */
+ [MIPI_SEQ_ELEM_SEND_PKT] = mipi_exec_send_packet,
+ [MIPI_SEQ_ELEM_DELAY] = mipi_exec_delay,
+ [MIPI_SEQ_ELEM_GPIO] = mipi_exec_gpio,
+ [MIPI_SEQ_ELEM_I2C] = mipi_exec_i2c_skip,
};
/*
@@ -265,107 +270,114 @@ static const fn_mipi_elem_exec exec_elem[] = {
*/
static const char * const seq_name[] = {
- "UNDEFINED",
- "MIPI_SEQ_ASSERT_RESET",
- "MIPI_SEQ_INIT_OTP",
- "MIPI_SEQ_DISPLAY_ON",
- "MIPI_SEQ_DISPLAY_OFF",
- "MIPI_SEQ_DEASSERT_RESET"
+ [MIPI_SEQ_ASSERT_RESET] = "MIPI_SEQ_ASSERT_RESET",
+ [MIPI_SEQ_INIT_OTP] = "MIPI_SEQ_INIT_OTP",
+ [MIPI_SEQ_DISPLAY_ON] = "MIPI_SEQ_DISPLAY_ON",
+ [MIPI_SEQ_DISPLAY_OFF] = "MIPI_SEQ_DISPLAY_OFF",
+ [MIPI_SEQ_DEASSERT_RESET] = "MIPI_SEQ_DEASSERT_RESET",
+ [MIPI_SEQ_BACKLIGHT_ON] = "MIPI_SEQ_BACKLIGHT_ON",
+ [MIPI_SEQ_BACKLIGHT_OFF] = "MIPI_SEQ_BACKLIGHT_OFF",
+ [MIPI_SEQ_TEAR_ON] = "MIPI_SEQ_TEAR_ON",
+ [MIPI_SEQ_TEAR_OFF] = "MIPI_SEQ_TEAR_OFF",
+ [MIPI_SEQ_POWER_ON] = "MIPI_SEQ_POWER_ON",
+ [MIPI_SEQ_POWER_OFF] = "MIPI_SEQ_POWER_OFF",
};
-static void generic_exec_sequence(struct intel_dsi *intel_dsi, const u8 *data)
+static const char *sequence_name(enum mipi_seq seq_id)
+{
+ if (seq_id < ARRAY_SIZE(seq_name) && seq_name[seq_id])
+ return seq_name[seq_id];
+ else
+ return "(unknown)";
+}
+
+static void generic_exec_sequence(struct drm_panel *panel, enum mipi_seq seq_id)
{
+ struct vbt_panel *vbt_panel = to_vbt_panel(panel);
+ struct intel_dsi *intel_dsi = vbt_panel->intel_dsi;
+ struct drm_i915_private *dev_priv = to_i915(intel_dsi->base.base.dev);
+ const u8 *data;
fn_mipi_elem_exec mipi_elem_exec;
- int index;
- if (!data)
+ if (WARN_ON(seq_id >= ARRAY_SIZE(dev_priv->vbt.dsi.sequence)))
return;
- DRM_DEBUG_DRIVER("Starting MIPI sequence - %s\n", seq_name[*data]);
+ data = dev_priv->vbt.dsi.sequence[seq_id];
+ if (!data) {
+ DRM_DEBUG_KMS("MIPI sequence %d - %s not available\n",
+ seq_id, sequence_name(seq_id));
+ return;
+ }
- /* go to the first element of the sequence */
- data++;
+ WARN_ON(*data != seq_id);
- /* parse each byte till we reach end of sequence byte - 0x00 */
- while (1) {
- index = *data;
- mipi_elem_exec = exec_elem[index];
- if (!mipi_elem_exec) {
- DRM_ERROR("Unsupported MIPI element, skipping sequence execution\n");
- return;
- }
+ DRM_DEBUG_KMS("Starting MIPI sequence %d - %s\n",
+ seq_id, sequence_name(seq_id));
- /* goto element payload */
- data++;
+ /* Skip Sequence Byte. */
+ data++;
- /* execute the element specific rotines */
- data = mipi_elem_exec(intel_dsi, data);
+ /* Skip Size of Sequence. */
+ if (dev_priv->vbt.dsi.seq_version >= 3)
+ data += 4;
- /*
- * After processing the element, data should point to
- * next element or end of sequence
- * check if have we reached end of sequence
- */
- if (*data == 0x00)
+ while (1) {
+ u8 operation_byte = *data++;
+ u8 operation_size = 0;
+
+ if (operation_byte == MIPI_SEQ_ELEM_END)
break;
+
+ if (operation_byte < ARRAY_SIZE(exec_elem))
+ mipi_elem_exec = exec_elem[operation_byte];
+ else
+ mipi_elem_exec = NULL;
+
+ /* Size of Operation. */
+ if (dev_priv->vbt.dsi.seq_version >= 3)
+ operation_size = *data++;
+
+ if (mipi_elem_exec) {
+ data = mipi_elem_exec(intel_dsi, data);
+ } else if (operation_size) {
+ /* We have size, skip. */
+ DRM_DEBUG_KMS("Unsupported MIPI operation byte %u\n",
+ operation_byte);
+ data += operation_size;
+ } else {
+ /* No size, can't skip without parsing. */
+ DRM_ERROR("Unsupported MIPI operation byte %u\n",
+ operation_byte);
+ return;
+ }
}
}
static int vbt_panel_prepare(struct drm_panel *panel)
{
- struct vbt_panel *vbt_panel = to_vbt_panel(panel);
- struct intel_dsi *intel_dsi = vbt_panel->intel_dsi;
- struct drm_device *dev = intel_dsi->base.base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- const u8 *sequence;
-
- sequence = dev_priv->vbt.dsi.sequence[MIPI_SEQ_ASSERT_RESET];
- generic_exec_sequence(intel_dsi, sequence);
-
- sequence = dev_priv->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP];
- generic_exec_sequence(intel_dsi, sequence);
+ generic_exec_sequence(panel, MIPI_SEQ_ASSERT_RESET);
+ generic_exec_sequence(panel, MIPI_SEQ_INIT_OTP);
return 0;
}
static int vbt_panel_unprepare(struct drm_panel *panel)
{
- struct vbt_panel *vbt_panel = to_vbt_panel(panel);
- struct intel_dsi *intel_dsi = vbt_panel->intel_dsi;
- struct drm_device *dev = intel_dsi->base.base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- const u8 *sequence;
-
- sequence = dev_priv->vbt.dsi.sequence[MIPI_SEQ_DEASSERT_RESET];
- generic_exec_sequence(intel_dsi, sequence);
+ generic_exec_sequence(panel, MIPI_SEQ_DEASSERT_RESET);
return 0;
}
static int vbt_panel_enable(struct drm_panel *panel)
{
- struct vbt_panel *vbt_panel = to_vbt_panel(panel);
- struct intel_dsi *intel_dsi = vbt_panel->intel_dsi;
- struct drm_device *dev = intel_dsi->base.base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- const u8 *sequence;
-
- sequence = dev_priv->vbt.dsi.sequence[MIPI_SEQ_DISPLAY_ON];
- generic_exec_sequence(intel_dsi, sequence);
+ generic_exec_sequence(panel, MIPI_SEQ_DISPLAY_ON);
return 0;
}
static int vbt_panel_disable(struct drm_panel *panel)
{
- struct vbt_panel *vbt_panel = to_vbt_panel(panel);
- struct intel_dsi *intel_dsi = vbt_panel->intel_dsi;
- struct drm_device *dev = intel_dsi->base.base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- const u8 *sequence;
-
- sequence = dev_priv->vbt.dsi.sequence[MIPI_SEQ_DISPLAY_OFF];
- generic_exec_sequence(intel_dsi, sequence);
+ generic_exec_sequence(panel, MIPI_SEQ_DISPLAY_OFF);
return 0;
}
@@ -428,10 +440,7 @@ struct drm_panel *vbt_panel_init(struct intel_dsi *intel_dsi, u16 panel_id)
intel_dsi->dual_link = mipi_config->dual_link;
intel_dsi->pixel_overlap = mipi_config->pixel_overlap;
- if (intel_dsi->pixel_format == VID_MODE_FORMAT_RGB666)
- bits_per_pixel = 18;
- else if (intel_dsi->pixel_format == VID_MODE_FORMAT_RGB565)
- bits_per_pixel = 16;
+ bits_per_pixel = dsi_pixel_format_bpp(intel_dsi->pixel_format);
intel_dsi->operation_mode = mipi_config->is_cmd_mode;
intel_dsi->video_mode_format = mipi_config->video_transfer_mode;
@@ -685,6 +694,8 @@ struct drm_panel *vbt_panel_init(struct intel_dsi *intel_dsi, u16 panel_id)
/* This is cheating a bit with the cleanup. */
vbt_panel = devm_kzalloc(dev->dev, sizeof(*vbt_panel), GFP_KERNEL);
+ if (!vbt_panel)
+ return NULL;
vbt_panel->intel_dsi = intel_dsi;
drm_panel_init(&vbt_panel->panel);
diff --git a/drivers/gpu/drm/i915/intel_dsi_pll.c b/drivers/gpu/drm/i915/intel_dsi_pll.c
index fbd2b51810ca..70883c54cb0a 100644
--- a/drivers/gpu/drm/i915/intel_dsi_pll.c
+++ b/drivers/gpu/drm/i915/intel_dsi_pll.c
@@ -30,15 +30,7 @@
#include "i915_drv.h"
#include "intel_dsi.h"
-#define DSI_HSS_PACKET_SIZE 4
-#define DSI_HSE_PACKET_SIZE 4
-#define DSI_HSA_PACKET_EXTRA_SIZE 6
-#define DSI_HBP_PACKET_EXTRA_SIZE 6
-#define DSI_HACTIVE_PACKET_EXTRA_SIZE 6
-#define DSI_HFP_PACKET_EXTRA_SIZE 6
-#define DSI_EOTP_PACKET_SIZE 4
-
-static int dsi_pixel_format_bpp(int pixel_format)
+int dsi_pixel_format_bpp(int pixel_format)
{
int bpp;
@@ -71,77 +63,6 @@ static const u32 lfsr_converts[] = {
71, 35, 273, 136, 324, 418, 465, 488, 500, 506 /* 91 - 100 */
};
-#ifdef DSI_CLK_FROM_RR
-
-static u32 dsi_rr_formula(const struct drm_display_mode *mode,
- int pixel_format, int video_mode_format,
- int lane_count, bool eotp)
-{
- u32 bpp;
- u32 hactive, vactive, hfp, hsync, hbp, vfp, vsync, vbp;
- u32 hsync_bytes, hbp_bytes, hactive_bytes, hfp_bytes;
- u32 bytes_per_line, bytes_per_frame;
- u32 num_frames;
- u32 bytes_per_x_frames, bytes_per_x_frames_x_lanes;
- u32 dsi_bit_clock_hz;
- u32 dsi_clk;
-
- bpp = dsi_pixel_format_bpp(pixel_format);
-
- hactive = mode->hdisplay;
- vactive = mode->vdisplay;
- hfp = mode->hsync_start - mode->hdisplay;
- hsync = mode->hsync_end - mode->hsync_start;
- hbp = mode->htotal - mode->hsync_end;
-
- vfp = mode->vsync_start - mode->vdisplay;
- vsync = mode->vsync_end - mode->vsync_start;
- vbp = mode->vtotal - mode->vsync_end;
-
- hsync_bytes = DIV_ROUND_UP(hsync * bpp, 8);
- hbp_bytes = DIV_ROUND_UP(hbp * bpp, 8);
- hactive_bytes = DIV_ROUND_UP(hactive * bpp, 8);
- hfp_bytes = DIV_ROUND_UP(hfp * bpp, 8);
-
- bytes_per_line = DSI_HSS_PACKET_SIZE + hsync_bytes +
- DSI_HSA_PACKET_EXTRA_SIZE + DSI_HSE_PACKET_SIZE +
- hbp_bytes + DSI_HBP_PACKET_EXTRA_SIZE +
- hactive_bytes + DSI_HACTIVE_PACKET_EXTRA_SIZE +
- hfp_bytes + DSI_HFP_PACKET_EXTRA_SIZE;
-
- /*
- * XXX: Need to accurately calculate LP to HS transition timeout and add
- * it to bytes_per_line/bytes_per_frame.
- */
-
- if (eotp && video_mode_format == VIDEO_MODE_BURST)
- bytes_per_line += DSI_EOTP_PACKET_SIZE;
-
- bytes_per_frame = vsync * bytes_per_line + vbp * bytes_per_line +
- vactive * bytes_per_line + vfp * bytes_per_line;
-
- if (eotp &&
- (video_mode_format == VIDEO_MODE_NON_BURST_WITH_SYNC_PULSE ||
- video_mode_format == VIDEO_MODE_NON_BURST_WITH_SYNC_EVENTS))
- bytes_per_frame += DSI_EOTP_PACKET_SIZE;
-
- num_frames = drm_mode_vrefresh(mode);
- bytes_per_x_frames = num_frames * bytes_per_frame;
-
- bytes_per_x_frames_x_lanes = bytes_per_x_frames / lane_count;
-
- /* the dsi clock is divided by 2 in the hardware to get dsi ddr clock */
- dsi_bit_clock_hz = bytes_per_x_frames_x_lanes * 8;
- dsi_clk = dsi_bit_clock_hz / 1000;
-
- if (eotp && video_mode_format == VIDEO_MODE_BURST)
- dsi_clk *= 2;
-
- return dsi_clk;
-}
-
-#else
-
/* Get DSI clock from pixel clock */
static u32 dsi_clk_from_pclk(u32 pclk, int pixel_format, int lane_count)
{
@@ -155,8 +76,6 @@ static u32 dsi_clk_from_pclk(u32 pclk, int pixel_format, int lane_count)
return dsi_clk_khz;
}
-#endif
-
static int dsi_calc_mnp(struct drm_i915_private *dev_priv,
struct dsi_mnp *dsi_mnp, int target_dsi_clk)
{
@@ -322,7 +241,7 @@ static void assert_bpp_mismatch(int pixel_format, int pipe_bpp)
bpp, pipe_bpp);
}
-u32 vlv_get_dsi_pclk(struct intel_encoder *encoder, int pipe_bpp)
+static u32 vlv_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp)
{
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
@@ -384,7 +303,7 @@ u32 vlv_get_dsi_pclk(struct intel_encoder *encoder, int pipe_bpp)
return pclk;
}
-u32 bxt_get_dsi_pclk(struct intel_encoder *encoder, int pipe_bpp)
+static u32 bxt_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp)
{
u32 pclk;
u32 dsi_clk;
@@ -419,6 +338,14 @@ u32 bxt_get_dsi_pclk(struct intel_encoder *encoder, int pipe_bpp)
return pclk;
}
+u32 intel_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp)
+{
+ if (IS_BROXTON(encoder->base.dev))
+ return bxt_dsi_get_pclk(encoder, pipe_bpp);
+ else
+ return vlv_dsi_get_pclk(encoder, pipe_bpp);
+}
+
static void vlv_dsi_reset_clocks(struct intel_encoder *encoder, enum port port)
{
u32 temp;
diff --git a/drivers/gpu/drm/i915/intel_fbc.c b/drivers/gpu/drm/i915/intel_fbc.c
index a1988a486b92..0f0492f4a357 100644
--- a/drivers/gpu/drm/i915/intel_fbc.c
+++ b/drivers/gpu/drm/i915/intel_fbc.c
@@ -43,7 +43,7 @@
static inline bool fbc_supported(struct drm_i915_private *dev_priv)
{
- return dev_priv->fbc.activate != NULL;
+ return HAS_FBC(dev_priv);
}
static inline bool fbc_on_pipe_a_only(struct drm_i915_private *dev_priv)
@@ -56,6 +56,11 @@ static inline bool fbc_on_plane_a_only(struct drm_i915_private *dev_priv)
return INTEL_INFO(dev_priv)->gen < 4;
}
+static inline bool no_fbc_on_multiple_pipes(struct drm_i915_private *dev_priv)
+{
+ return INTEL_INFO(dev_priv)->gen <= 3;
+}
+
/*
* In some platforms where the CRTC's x:0/y:0 coordinates doesn't match the
* frontbuffer's x:0/y:0 coordinates we lie to the hardware about the plane's
@@ -74,19 +79,17 @@ static unsigned int get_crtc_fence_y_offset(struct intel_crtc *crtc)
* write to the PLANE_SIZE register. For BDW-, the hardware looks at the value
* we wrote to PIPESRC.
*/
-static void intel_fbc_get_plane_source_size(struct intel_crtc *crtc,
+static void intel_fbc_get_plane_source_size(struct intel_fbc_state_cache *cache,
int *width, int *height)
{
- struct intel_plane_state *plane_state =
- to_intel_plane_state(crtc->base.primary->state);
int w, h;
- if (intel_rotation_90_or_270(plane_state->base.rotation)) {
- w = drm_rect_height(&plane_state->src) >> 16;
- h = drm_rect_width(&plane_state->src) >> 16;
+ if (intel_rotation_90_or_270(cache->plane.rotation)) {
+ w = cache->plane.src_h;
+ h = cache->plane.src_w;
} else {
- w = drm_rect_width(&plane_state->src) >> 16;
- h = drm_rect_height(&plane_state->src) >> 16;
+ w = cache->plane.src_w;
+ h = cache->plane.src_h;
}
if (width)
@@ -95,26 +98,23 @@ static void intel_fbc_get_plane_source_size(struct intel_crtc *crtc,
*height = h;
}
-static int intel_fbc_calculate_cfb_size(struct intel_crtc *crtc,
- struct drm_framebuffer *fb)
+static int intel_fbc_calculate_cfb_size(struct drm_i915_private *dev_priv,
+ struct intel_fbc_state_cache *cache)
{
- struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
int lines;
- intel_fbc_get_plane_source_size(crtc, NULL, &lines);
+ intel_fbc_get_plane_source_size(cache, NULL, &lines);
if (INTEL_INFO(dev_priv)->gen >= 7)
lines = min(lines, 2048);
/* Hardware needs the full buffer stride, not just the active area. */
- return lines * fb->pitches[0];
+ return lines * cache->fb.stride;
}
static void i8xx_fbc_deactivate(struct drm_i915_private *dev_priv)
{
u32 fbc_ctl;
- dev_priv->fbc.active = false;
-
/* Disable compression */
fbc_ctl = I915_READ(FBC_CONTROL);
if ((fbc_ctl & FBC_CTL_EN) == 0)
@@ -130,21 +130,17 @@ static void i8xx_fbc_deactivate(struct drm_i915_private *dev_priv)
}
}
-static void i8xx_fbc_activate(struct intel_crtc *crtc)
+static void i8xx_fbc_activate(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
- struct drm_framebuffer *fb = crtc->base.primary->fb;
- struct drm_i915_gem_object *obj = intel_fb_obj(fb);
+ struct intel_fbc_reg_params *params = &dev_priv->fbc.params;
int cfb_pitch;
int i;
u32 fbc_ctl;
- dev_priv->fbc.active = true;
-
/* Note: fbc.threshold == 1 for i8xx */
- cfb_pitch = intel_fbc_calculate_cfb_size(crtc, fb) / FBC_LL_SIZE;
- if (fb->pitches[0] < cfb_pitch)
- cfb_pitch = fb->pitches[0];
+ cfb_pitch = params->cfb_size / FBC_LL_SIZE;
+ if (params->fb.stride < cfb_pitch)
+ cfb_pitch = params->fb.stride;
/* FBC_CTL wants 32B or 64B units */
if (IS_GEN2(dev_priv))
@@ -161,9 +157,9 @@ static void i8xx_fbc_activate(struct intel_crtc *crtc)
/* Set it up... */
fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE;
- fbc_ctl2 |= FBC_CTL_PLANE(crtc->plane);
+ fbc_ctl2 |= FBC_CTL_PLANE(params->crtc.plane);
I915_WRITE(FBC_CONTROL2, fbc_ctl2);
- I915_WRITE(FBC_FENCE_OFF, get_crtc_fence_y_offset(crtc));
+ I915_WRITE(FBC_FENCE_OFF, params->crtc.fence_y_offset);
}
/* enable it... */
@@ -173,7 +169,7 @@ static void i8xx_fbc_activate(struct intel_crtc *crtc)
if (IS_I945GM(dev_priv))
fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */
fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
- fbc_ctl |= obj->fence_reg;
+ fbc_ctl |= params->fb.fence_reg;
I915_WRITE(FBC_CONTROL, fbc_ctl);
}
@@ -182,23 +178,19 @@ static bool i8xx_fbc_is_active(struct drm_i915_private *dev_priv)
return I915_READ(FBC_CONTROL) & FBC_CTL_EN;
}
-static void g4x_fbc_activate(struct intel_crtc *crtc)
+static void g4x_fbc_activate(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
- struct drm_framebuffer *fb = crtc->base.primary->fb;
- struct drm_i915_gem_object *obj = intel_fb_obj(fb);
+ struct intel_fbc_reg_params *params = &dev_priv->fbc.params;
u32 dpfc_ctl;
- dev_priv->fbc.active = true;
-
- dpfc_ctl = DPFC_CTL_PLANE(crtc->plane) | DPFC_SR_EN;
- if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
+ dpfc_ctl = DPFC_CTL_PLANE(params->crtc.plane) | DPFC_SR_EN;
+ if (drm_format_plane_cpp(params->fb.pixel_format, 0) == 2)
dpfc_ctl |= DPFC_CTL_LIMIT_2X;
else
dpfc_ctl |= DPFC_CTL_LIMIT_1X;
- dpfc_ctl |= DPFC_CTL_FENCE_EN | obj->fence_reg;
+ dpfc_ctl |= DPFC_CTL_FENCE_EN | params->fb.fence_reg;
- I915_WRITE(DPFC_FENCE_YOFF, get_crtc_fence_y_offset(crtc));
+ I915_WRITE(DPFC_FENCE_YOFF, params->crtc.fence_y_offset);
/* enable it... */
I915_WRITE(DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
@@ -208,8 +200,6 @@ static void g4x_fbc_deactivate(struct drm_i915_private *dev_priv)
{
u32 dpfc_ctl;
- dev_priv->fbc.active = false;
-
/* Disable compression */
dpfc_ctl = I915_READ(DPFC_CONTROL);
if (dpfc_ctl & DPFC_CTL_EN) {
@@ -230,19 +220,14 @@ static void intel_fbc_recompress(struct drm_i915_private *dev_priv)
POSTING_READ(MSG_FBC_REND_STATE);
}
-static void ilk_fbc_activate(struct intel_crtc *crtc)
+static void ilk_fbc_activate(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
- struct drm_framebuffer *fb = crtc->base.primary->fb;
- struct drm_i915_gem_object *obj = intel_fb_obj(fb);
+ struct intel_fbc_reg_params *params = &dev_priv->fbc.params;
u32 dpfc_ctl;
int threshold = dev_priv->fbc.threshold;
- unsigned int y_offset;
- dev_priv->fbc.active = true;
-
- dpfc_ctl = DPFC_CTL_PLANE(crtc->plane);
- if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
+ dpfc_ctl = DPFC_CTL_PLANE(params->crtc.plane);
+ if (drm_format_plane_cpp(params->fb.pixel_format, 0) == 2)
threshold++;
switch (threshold) {
@@ -259,18 +244,17 @@ static void ilk_fbc_activate(struct intel_crtc *crtc)
}
dpfc_ctl |= DPFC_CTL_FENCE_EN;
if (IS_GEN5(dev_priv))
- dpfc_ctl |= obj->fence_reg;
+ dpfc_ctl |= params->fb.fence_reg;
- y_offset = get_crtc_fence_y_offset(crtc);
- I915_WRITE(ILK_DPFC_FENCE_YOFF, y_offset);
- I915_WRITE(ILK_FBC_RT_BASE, i915_gem_obj_ggtt_offset(obj) | ILK_FBC_RT_VALID);
+ I915_WRITE(ILK_DPFC_FENCE_YOFF, params->crtc.fence_y_offset);
+ I915_WRITE(ILK_FBC_RT_BASE, params->fb.ggtt_offset | ILK_FBC_RT_VALID);
/* enable it... */
I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
if (IS_GEN6(dev_priv)) {
I915_WRITE(SNB_DPFC_CTL_SA,
- SNB_CPU_FENCE_ENABLE | obj->fence_reg);
- I915_WRITE(DPFC_CPU_FENCE_OFFSET, y_offset);
+ SNB_CPU_FENCE_ENABLE | params->fb.fence_reg);
+ I915_WRITE(DPFC_CPU_FENCE_OFFSET, params->crtc.fence_y_offset);
}
intel_fbc_recompress(dev_priv);
@@ -280,8 +264,6 @@ static void ilk_fbc_deactivate(struct drm_i915_private *dev_priv)
{
u32 dpfc_ctl;
- dev_priv->fbc.active = false;
-
/* Disable compression */
dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
if (dpfc_ctl & DPFC_CTL_EN) {
@@ -295,21 +277,17 @@ static bool ilk_fbc_is_active(struct drm_i915_private *dev_priv)
return I915_READ(ILK_DPFC_CONTROL) & DPFC_CTL_EN;
}
-static void gen7_fbc_activate(struct intel_crtc *crtc)
+static void gen7_fbc_activate(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
- struct drm_framebuffer *fb = crtc->base.primary->fb;
- struct drm_i915_gem_object *obj = intel_fb_obj(fb);
+ struct intel_fbc_reg_params *params = &dev_priv->fbc.params;
u32 dpfc_ctl;
int threshold = dev_priv->fbc.threshold;
- dev_priv->fbc.active = true;
-
dpfc_ctl = 0;
if (IS_IVYBRIDGE(dev_priv))
- dpfc_ctl |= IVB_DPFC_CTL_PLANE(crtc->plane);
+ dpfc_ctl |= IVB_DPFC_CTL_PLANE(params->crtc.plane);
- if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
+ if (drm_format_plane_cpp(params->fb.pixel_format, 0) == 2)
threshold++;
switch (threshold) {
@@ -337,20 +315,60 @@ static void gen7_fbc_activate(struct intel_crtc *crtc)
ILK_FBCQ_DIS);
} else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
/* WaFbcAsynchFlipDisableFbcQueue:hsw,bdw */
- I915_WRITE(CHICKEN_PIPESL_1(crtc->pipe),
- I915_READ(CHICKEN_PIPESL_1(crtc->pipe)) |
+ I915_WRITE(CHICKEN_PIPESL_1(params->crtc.pipe),
+ I915_READ(CHICKEN_PIPESL_1(params->crtc.pipe)) |
HSW_FBCQ_DIS);
}
I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
I915_WRITE(SNB_DPFC_CTL_SA,
- SNB_CPU_FENCE_ENABLE | obj->fence_reg);
- I915_WRITE(DPFC_CPU_FENCE_OFFSET, get_crtc_fence_y_offset(crtc));
+ SNB_CPU_FENCE_ENABLE | params->fb.fence_reg);
+ I915_WRITE(DPFC_CPU_FENCE_OFFSET, params->crtc.fence_y_offset);
intel_fbc_recompress(dev_priv);
}
+static bool intel_fbc_hw_is_active(struct drm_i915_private *dev_priv)
+{
+ if (INTEL_INFO(dev_priv)->gen >= 5)
+ return ilk_fbc_is_active(dev_priv);
+ else if (IS_GM45(dev_priv))
+ return g4x_fbc_is_active(dev_priv);
+ else
+ return i8xx_fbc_is_active(dev_priv);
+}
+
+static void intel_fbc_hw_activate(struct drm_i915_private *dev_priv)
+{
+ struct intel_fbc *fbc = &dev_priv->fbc;
+
+ fbc->active = true;
+
+ if (INTEL_INFO(dev_priv)->gen >= 7)
+ gen7_fbc_activate(dev_priv);
+ else if (INTEL_INFO(dev_priv)->gen >= 5)
+ ilk_fbc_activate(dev_priv);
+ else if (IS_GM45(dev_priv))
+ g4x_fbc_activate(dev_priv);
+ else
+ i8xx_fbc_activate(dev_priv);
+}
+
+static void intel_fbc_hw_deactivate(struct drm_i915_private *dev_priv)
+{
+ struct intel_fbc *fbc = &dev_priv->fbc;
+
+ fbc->active = false;
+
+ if (INTEL_INFO(dev_priv)->gen >= 5)
+ ilk_fbc_deactivate(dev_priv);
+ else if (IS_GM45(dev_priv))
+ g4x_fbc_deactivate(dev_priv);
+ else
+ i8xx_fbc_deactivate(dev_priv);
+}
+
/**
* intel_fbc_is_active - Is FBC active?
* @dev_priv: i915 device instance
@@ -364,24 +382,24 @@ bool intel_fbc_is_active(struct drm_i915_private *dev_priv)
return dev_priv->fbc.active;
}
-static void intel_fbc_activate(const struct drm_framebuffer *fb)
-{
- struct drm_i915_private *dev_priv = fb->dev->dev_private;
- struct intel_crtc *crtc = dev_priv->fbc.crtc;
-
- dev_priv->fbc.activate(crtc);
-
- dev_priv->fbc.fb_id = fb->base.id;
- dev_priv->fbc.y = crtc->base.y;
-}
-
static void intel_fbc_work_fn(struct work_struct *__work)
{
struct drm_i915_private *dev_priv =
container_of(__work, struct drm_i915_private, fbc.work.work);
- struct intel_fbc_work *work = &dev_priv->fbc.work;
- struct intel_crtc *crtc = dev_priv->fbc.crtc;
- int delay_ms = 50;
+ struct intel_fbc *fbc = &dev_priv->fbc;
+ struct intel_fbc_work *work = &fbc->work;
+ struct intel_crtc *crtc = fbc->crtc;
+ struct drm_vblank_crtc *vblank = &dev_priv->dev->vblank[crtc->pipe];
+
+ if (drm_crtc_vblank_get(&crtc->base)) {
+ DRM_ERROR("vblank not available for FBC on pipe %c\n",
+ pipe_name(crtc->pipe));
+
+ mutex_lock(&fbc->lock);
+ work->scheduled = false;
+ mutex_unlock(&fbc->lock);
+ return;
+ }
retry:
/* Delay the actual enabling to let pageflipping cease and the
@@ -390,142 +408,97 @@ retry:
* vblank to pass after disabling the FBC before we attempt
* to modify the control registers.
*
- * A more complicated solution would involve tracking vblanks
- * following the termination of the page-flipping sequence
- * and indeed performing the enable as a co-routine and not
- * waiting synchronously upon the vblank.
- *
* WaFbcWaitForVBlankBeforeEnable:ilk,snb
+ *
+ * It is also worth mentioning that since work->scheduled_vblank can be
+ * updated multiple times by the other threads, hitting the timeout is
+ * not an error condition. We'll just end up hitting the "goto retry"
+ * case below.
*/
- wait_remaining_ms_from_jiffies(work->enable_jiffies, delay_ms);
+ wait_event_timeout(vblank->queue,
+ drm_crtc_vblank_count(&crtc->base) != work->scheduled_vblank,
+ msecs_to_jiffies(50));
- mutex_lock(&dev_priv->fbc.lock);
+ mutex_lock(&fbc->lock);
/* Were we cancelled? */
if (!work->scheduled)
goto out;
/* Were we delayed again while this function was sleeping? */
- if (time_after(work->enable_jiffies + msecs_to_jiffies(delay_ms),
- jiffies)) {
- mutex_unlock(&dev_priv->fbc.lock);
+ if (drm_crtc_vblank_count(&crtc->base) == work->scheduled_vblank) {
+ mutex_unlock(&fbc->lock);
goto retry;
}
- if (crtc->base.primary->fb == work->fb)
- intel_fbc_activate(work->fb);
+ intel_fbc_hw_activate(dev_priv);
work->scheduled = false;
out:
- mutex_unlock(&dev_priv->fbc.lock);
-}
-
-static void intel_fbc_cancel_work(struct drm_i915_private *dev_priv)
-{
- WARN_ON(!mutex_is_locked(&dev_priv->fbc.lock));
- dev_priv->fbc.work.scheduled = false;
+ mutex_unlock(&fbc->lock);
+ drm_crtc_vblank_put(&crtc->base);
}
static void intel_fbc_schedule_activation(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
- struct intel_fbc_work *work = &dev_priv->fbc.work;
+ struct intel_fbc *fbc = &dev_priv->fbc;
+ struct intel_fbc_work *work = &fbc->work;
+
+ WARN_ON(!mutex_is_locked(&fbc->lock));
- WARN_ON(!mutex_is_locked(&dev_priv->fbc.lock));
+ if (drm_crtc_vblank_get(&crtc->base)) {
+ DRM_ERROR("vblank not available for FBC on pipe %c\n",
+ pipe_name(crtc->pipe));
+ return;
+ }
- /* It is useless to call intel_fbc_cancel_work() in this function since
- * we're not releasing fbc.lock, so it won't have an opportunity to grab
- * it to discover that it was cancelled. So we just update the expected
- * jiffy count. */
- work->fb = crtc->base.primary->fb;
+ /* It is useless to call intel_fbc_cancel_work() or cancel_work() in
+ * this function since we're not releasing fbc.lock, so it won't have an
+ * opportunity to grab it to discover that it was cancelled. So we just
+ * update the expected jiffy count. */
work->scheduled = true;
- work->enable_jiffies = jiffies;
+ work->scheduled_vblank = drm_crtc_vblank_count(&crtc->base);
+ drm_crtc_vblank_put(&crtc->base);
schedule_work(&work->work);
}
-static void __intel_fbc_deactivate(struct drm_i915_private *dev_priv)
+static void intel_fbc_deactivate(struct drm_i915_private *dev_priv)
{
- WARN_ON(!mutex_is_locked(&dev_priv->fbc.lock));
-
- intel_fbc_cancel_work(dev_priv);
-
- if (dev_priv->fbc.active)
- dev_priv->fbc.deactivate(dev_priv);
-}
-
-/*
- * intel_fbc_deactivate - deactivate FBC if it's associated with crtc
- * @crtc: the CRTC
- *
- * This function deactivates FBC if it's associated with the provided CRTC.
- */
-void intel_fbc_deactivate(struct intel_crtc *crtc)
-{
- struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
-
- if (!fbc_supported(dev_priv))
- return;
+ struct intel_fbc *fbc = &dev_priv->fbc;
- mutex_lock(&dev_priv->fbc.lock);
- if (dev_priv->fbc.crtc == crtc)
- __intel_fbc_deactivate(dev_priv);
- mutex_unlock(&dev_priv->fbc.lock);
-}
+ WARN_ON(!mutex_is_locked(&fbc->lock));
-static void set_no_fbc_reason(struct drm_i915_private *dev_priv,
- const char *reason)
-{
- if (dev_priv->fbc.no_fbc_reason == reason)
- return;
+ /* Calling cancel_work() here won't help due to the fact that the work
+ * function grabs fbc->lock. Just set scheduled to false so the work
+ * function can know it was cancelled. */
+ fbc->work.scheduled = false;
- dev_priv->fbc.no_fbc_reason = reason;
- DRM_DEBUG_KMS("Disabling FBC: %s\n", reason);
+ if (fbc->active)
+ intel_fbc_hw_deactivate(dev_priv);
}
-static bool crtc_can_fbc(struct intel_crtc *crtc)
+static bool multiple_pipes_ok(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
+ struct drm_plane *primary = crtc->base.primary;
+ struct intel_fbc *fbc = &dev_priv->fbc;
+ enum pipe pipe = crtc->pipe;
- if (fbc_on_pipe_a_only(dev_priv) && crtc->pipe != PIPE_A)
- return false;
-
- if (fbc_on_plane_a_only(dev_priv) && crtc->plane != PLANE_A)
- return false;
-
- return true;
-}
-
-static bool crtc_is_valid(struct intel_crtc *crtc)
-{
- if (!intel_crtc_active(&crtc->base))
- return false;
-
- if (!to_intel_plane_state(crtc->base.primary->state)->visible)
- return false;
-
- return true;
-}
-
-static bool multiple_pipes_ok(struct drm_i915_private *dev_priv)
-{
- enum pipe pipe;
- int n_pipes = 0;
- struct drm_crtc *crtc;
-
- if (INTEL_INFO(dev_priv)->gen > 4)
+ /* Don't even bother tracking anything we don't need. */
+ if (!no_fbc_on_multiple_pipes(dev_priv))
return true;
- for_each_pipe(dev_priv, pipe) {
- crtc = dev_priv->pipe_to_crtc_mapping[pipe];
+ WARN_ON(!drm_modeset_is_locked(&primary->mutex));
- if (intel_crtc_active(crtc) &&
- to_intel_plane_state(crtc->primary->state)->visible)
- n_pipes++;
- }
+ if (to_intel_plane_state(primary->state)->visible)
+ fbc->visible_pipes_mask |= (1 << pipe);
+ else
+ fbc->visible_pipes_mask &= ~(1 << pipe);
- return (n_pipes < 2);
+ return (fbc->visible_pipes_mask & ~(1 << pipe)) != 0;
}
static int find_compression_threshold(struct drm_i915_private *dev_priv,
@@ -581,16 +554,16 @@ again:
static int intel_fbc_alloc_cfb(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
- struct drm_framebuffer *fb = crtc->base.primary->state->fb;
+ struct intel_fbc *fbc = &dev_priv->fbc;
struct drm_mm_node *uninitialized_var(compressed_llb);
int size, fb_cpp, ret;
- WARN_ON(drm_mm_node_allocated(&dev_priv->fbc.compressed_fb));
+ WARN_ON(drm_mm_node_allocated(&fbc->compressed_fb));
- size = intel_fbc_calculate_cfb_size(crtc, fb);
- fb_cpp = drm_format_plane_cpp(fb->pixel_format, 0);
+ size = intel_fbc_calculate_cfb_size(dev_priv, &fbc->state_cache);
+ fb_cpp = drm_format_plane_cpp(fbc->state_cache.fb.pixel_format, 0);
- ret = find_compression_threshold(dev_priv, &dev_priv->fbc.compressed_fb,
+ ret = find_compression_threshold(dev_priv, &fbc->compressed_fb,
size, fb_cpp);
if (!ret)
goto err_llb;
@@ -599,12 +572,12 @@ static int intel_fbc_alloc_cfb(struct intel_crtc *crtc)
}
- dev_priv->fbc.threshold = ret;
+ fbc->threshold = ret;
if (INTEL_INFO(dev_priv)->gen >= 5)
- I915_WRITE(ILK_DPFC_CB_BASE, dev_priv->fbc.compressed_fb.start);
+ I915_WRITE(ILK_DPFC_CB_BASE, fbc->compressed_fb.start);
else if (IS_GM45(dev_priv)) {
- I915_WRITE(DPFC_CB_BASE, dev_priv->fbc.compressed_fb.start);
+ I915_WRITE(DPFC_CB_BASE, fbc->compressed_fb.start);
} else {
compressed_llb = kzalloc(sizeof(*compressed_llb), GFP_KERNEL);
if (!compressed_llb)
@@ -615,23 +588,22 @@ static int intel_fbc_alloc_cfb(struct intel_crtc *crtc)
if (ret)
goto err_fb;
- dev_priv->fbc.compressed_llb = compressed_llb;
+ fbc->compressed_llb = compressed_llb;
I915_WRITE(FBC_CFB_BASE,
- dev_priv->mm.stolen_base + dev_priv->fbc.compressed_fb.start);
+ dev_priv->mm.stolen_base + fbc->compressed_fb.start);
I915_WRITE(FBC_LL_BASE,
dev_priv->mm.stolen_base + compressed_llb->start);
}
DRM_DEBUG_KMS("reserved %llu bytes of contiguous stolen space for FBC, threshold: %d\n",
- dev_priv->fbc.compressed_fb.size,
- dev_priv->fbc.threshold);
+ fbc->compressed_fb.size, fbc->threshold);
return 0;
err_fb:
kfree(compressed_llb);
- i915_gem_stolen_remove_node(dev_priv, &dev_priv->fbc.compressed_fb);
+ i915_gem_stolen_remove_node(dev_priv, &fbc->compressed_fb);
err_llb:
pr_info_once("drm: not enough stolen space for compressed buffer (need %d more bytes), disabling. Hint: you may be able to increase stolen memory size in the BIOS to avoid this.\n", size);
return -ENOSPC;
@@ -639,25 +611,27 @@ err_llb:
static void __intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv)
{
- if (drm_mm_node_allocated(&dev_priv->fbc.compressed_fb))
- i915_gem_stolen_remove_node(dev_priv,
- &dev_priv->fbc.compressed_fb);
-
- if (dev_priv->fbc.compressed_llb) {
- i915_gem_stolen_remove_node(dev_priv,
- dev_priv->fbc.compressed_llb);
- kfree(dev_priv->fbc.compressed_llb);
+ struct intel_fbc *fbc = &dev_priv->fbc;
+
+ if (drm_mm_node_allocated(&fbc->compressed_fb))
+ i915_gem_stolen_remove_node(dev_priv, &fbc->compressed_fb);
+
+ if (fbc->compressed_llb) {
+ i915_gem_stolen_remove_node(dev_priv, fbc->compressed_llb);
+ kfree(fbc->compressed_llb);
}
}
void intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv)
{
+ struct intel_fbc *fbc = &dev_priv->fbc;
+
if (!fbc_supported(dev_priv))
return;
- mutex_lock(&dev_priv->fbc.lock);
+ mutex_lock(&fbc->lock);
__intel_fbc_cleanup_cfb(dev_priv);
- mutex_unlock(&dev_priv->fbc.lock);
+ mutex_unlock(&fbc->lock);
}
static bool stride_is_valid(struct drm_i915_private *dev_priv,
@@ -681,19 +655,17 @@ static bool stride_is_valid(struct drm_i915_private *dev_priv,
return true;
}
-static bool pixel_format_is_valid(struct drm_framebuffer *fb)
+static bool pixel_format_is_valid(struct drm_i915_private *dev_priv,
+ uint32_t pixel_format)
{
- struct drm_device *dev = fb->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- switch (fb->pixel_format) {
+ switch (pixel_format) {
case DRM_FORMAT_XRGB8888:
case DRM_FORMAT_XBGR8888:
return true;
case DRM_FORMAT_XRGB1555:
case DRM_FORMAT_RGB565:
/* 16bpp not supported on gen2 */
- if (IS_GEN2(dev))
+ if (IS_GEN2(dev_priv))
return false;
/* WaFbcOnly1to1Ratio:ctg */
if (IS_G4X(dev_priv))
@@ -713,6 +685,7 @@ static bool pixel_format_is_valid(struct drm_framebuffer *fb)
static bool intel_fbc_hw_tracking_covers_screen(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
+ struct intel_fbc *fbc = &dev_priv->fbc;
unsigned int effective_w, effective_h, max_w, max_h;
if (INTEL_INFO(dev_priv)->gen >= 8 || IS_HASWELL(dev_priv)) {
@@ -726,87 +699,105 @@ static bool intel_fbc_hw_tracking_covers_screen(struct intel_crtc *crtc)
max_h = 1536;
}
- intel_fbc_get_plane_source_size(crtc, &effective_w, &effective_h);
+ intel_fbc_get_plane_source_size(&fbc->state_cache, &effective_w,
+ &effective_h);
effective_w += crtc->adjusted_x;
effective_h += crtc->adjusted_y;
return effective_w <= max_w && effective_h <= max_h;
}
-/**
- * __intel_fbc_update - activate/deactivate FBC as needed, unlocked
- * @crtc: the CRTC that triggered the update
- *
- * This function completely reevaluates the status of FBC, then activates,
- * deactivates or maintains it on the same state.
- */
-static void __intel_fbc_update(struct intel_crtc *crtc)
+static void intel_fbc_update_state_cache(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
- struct drm_framebuffer *fb;
+ struct intel_fbc *fbc = &dev_priv->fbc;
+ struct intel_fbc_state_cache *cache = &fbc->state_cache;
+ struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
+ struct intel_plane_state *plane_state =
+ to_intel_plane_state(crtc->base.primary->state);
+ struct drm_framebuffer *fb = plane_state->base.fb;
struct drm_i915_gem_object *obj;
- const struct drm_display_mode *adjusted_mode;
- WARN_ON(!mutex_is_locked(&dev_priv->fbc.lock));
+ WARN_ON(!drm_modeset_is_locked(&crtc->base.mutex));
+ WARN_ON(!drm_modeset_is_locked(&crtc->base.primary->mutex));
- if (!multiple_pipes_ok(dev_priv)) {
- set_no_fbc_reason(dev_priv, "more than one pipe active");
- goto out_disable;
- }
+ cache->crtc.mode_flags = crtc_state->base.adjusted_mode.flags;
+ if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
+ cache->crtc.hsw_bdw_pixel_rate =
+ ilk_pipe_pixel_rate(crtc_state);
- if (!dev_priv->fbc.enabled || dev_priv->fbc.crtc != crtc)
- return;
+ cache->plane.rotation = plane_state->base.rotation;
+ cache->plane.src_w = drm_rect_width(&plane_state->src) >> 16;
+ cache->plane.src_h = drm_rect_height(&plane_state->src) >> 16;
+ cache->plane.visible = plane_state->visible;
- if (!crtc_is_valid(crtc)) {
- set_no_fbc_reason(dev_priv, "no output");
- goto out_disable;
- }
+ if (!cache->plane.visible)
+ return;
- fb = crtc->base.primary->fb;
obj = intel_fb_obj(fb);
- adjusted_mode = &crtc->config->base.adjusted_mode;
- if ((adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) ||
- (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)) {
- set_no_fbc_reason(dev_priv, "incompatible mode");
- goto out_disable;
+ /* FIXME: We lack the proper locking here, so only run this on the
+ * platforms that need. */
+ if (INTEL_INFO(dev_priv)->gen >= 5 && INTEL_INFO(dev_priv)->gen < 7)
+ cache->fb.ilk_ggtt_offset = i915_gem_obj_ggtt_offset(obj);
+ cache->fb.pixel_format = fb->pixel_format;
+ cache->fb.stride = fb->pitches[0];
+ cache->fb.fence_reg = obj->fence_reg;
+ cache->fb.tiling_mode = obj->tiling_mode;
+}
+
+static bool intel_fbc_can_activate(struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
+ struct intel_fbc *fbc = &dev_priv->fbc;
+ struct intel_fbc_state_cache *cache = &fbc->state_cache;
+
+ if (!cache->plane.visible) {
+ fbc->no_fbc_reason = "primary plane not visible";
+ return false;
+ }
+
+ if ((cache->crtc.mode_flags & DRM_MODE_FLAG_INTERLACE) ||
+ (cache->crtc.mode_flags & DRM_MODE_FLAG_DBLSCAN)) {
+ fbc->no_fbc_reason = "incompatible mode";
+ return false;
}
if (!intel_fbc_hw_tracking_covers_screen(crtc)) {
- set_no_fbc_reason(dev_priv, "mode too large for compression");
- goto out_disable;
+ fbc->no_fbc_reason = "mode too large for compression";
+ return false;
}
/* The use of a CPU fence is mandatory in order to detect writes
* by the CPU to the scanout and trigger updates to the FBC.
*/
- if (obj->tiling_mode != I915_TILING_X ||
- obj->fence_reg == I915_FENCE_REG_NONE) {
- set_no_fbc_reason(dev_priv, "framebuffer not tiled or fenced");
- goto out_disable;
+ if (cache->fb.tiling_mode != I915_TILING_X ||
+ cache->fb.fence_reg == I915_FENCE_REG_NONE) {
+ fbc->no_fbc_reason = "framebuffer not tiled or fenced";
+ return false;
}
if (INTEL_INFO(dev_priv)->gen <= 4 && !IS_G4X(dev_priv) &&
- crtc->base.primary->state->rotation != BIT(DRM_ROTATE_0)) {
- set_no_fbc_reason(dev_priv, "rotation unsupported");
- goto out_disable;
+ cache->plane.rotation != BIT(DRM_ROTATE_0)) {
+ fbc->no_fbc_reason = "rotation unsupported";
+ return false;
}
- if (!stride_is_valid(dev_priv, fb->pitches[0])) {
- set_no_fbc_reason(dev_priv, "framebuffer stride not supported");
- goto out_disable;
+ if (!stride_is_valid(dev_priv, cache->fb.stride)) {
+ fbc->no_fbc_reason = "framebuffer stride not supported";
+ return false;
}
- if (!pixel_format_is_valid(fb)) {
- set_no_fbc_reason(dev_priv, "pixel format is invalid");
- goto out_disable;
+ if (!pixel_format_is_valid(dev_priv, cache->fb.pixel_format)) {
+ fbc->no_fbc_reason = "pixel format is invalid";
+ return false;
}
/* WaFbcExceedCdClockThreshold:hsw,bdw */
if ((IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) &&
- ilk_pipe_pixel_rate(crtc->config) >=
- dev_priv->cdclk_freq * 95 / 100) {
- set_no_fbc_reason(dev_priv, "pixel rate is too big");
- goto out_disable;
+ cache->crtc.hsw_bdw_pixel_rate >= dev_priv->cdclk_freq * 95 / 100) {
+ fbc->no_fbc_reason = "pixel rate is too big";
+ return false;
}
/* It is possible for the required CFB size change without a
@@ -819,189 +810,322 @@ static void __intel_fbc_update(struct intel_crtc *crtc)
* we didn't get any invalidate/deactivate calls, but this would require
* a lot of tracking just for a specific case. If we conclude it's an
* important case, we can implement it later. */
- if (intel_fbc_calculate_cfb_size(crtc, fb) >
- dev_priv->fbc.compressed_fb.size * dev_priv->fbc.threshold) {
- set_no_fbc_reason(dev_priv, "CFB requirements changed");
- goto out_disable;
+ if (intel_fbc_calculate_cfb_size(dev_priv, &fbc->state_cache) >
+ fbc->compressed_fb.size * fbc->threshold) {
+ fbc->no_fbc_reason = "CFB requirements changed";
+ return false;
}
+ return true;
+}
+
+static bool intel_fbc_can_choose(struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
+ struct intel_fbc *fbc = &dev_priv->fbc;
+ bool enable_by_default = IS_HASWELL(dev_priv) ||
+ IS_BROADWELL(dev_priv);
+
+ if (intel_vgpu_active(dev_priv->dev)) {
+ fbc->no_fbc_reason = "VGPU is active";
+ return false;
+ }
+
+ if (i915.enable_fbc < 0 && !enable_by_default) {
+ fbc->no_fbc_reason = "disabled per chip default";
+ return false;
+ }
+
+ if (!i915.enable_fbc) {
+ fbc->no_fbc_reason = "disabled per module param";
+ return false;
+ }
+
+ if (fbc_on_pipe_a_only(dev_priv) && crtc->pipe != PIPE_A) {
+ fbc->no_fbc_reason = "no enabled pipes can have FBC";
+ return false;
+ }
+
+ if (fbc_on_plane_a_only(dev_priv) && crtc->plane != PLANE_A) {
+ fbc->no_fbc_reason = "no enabled planes can have FBC";
+ return false;
+ }
+
+ return true;
+}
+
+static void intel_fbc_get_reg_params(struct intel_crtc *crtc,
+ struct intel_fbc_reg_params *params)
+{
+ struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
+ struct intel_fbc *fbc = &dev_priv->fbc;
+ struct intel_fbc_state_cache *cache = &fbc->state_cache;
+
+ /* Since all our fields are integer types, use memset here so the
+ * comparison function can rely on memcmp because the padding will be
+ * zero. */
+ memset(params, 0, sizeof(*params));
+
+ params->crtc.pipe = crtc->pipe;
+ params->crtc.plane = crtc->plane;
+ params->crtc.fence_y_offset = get_crtc_fence_y_offset(crtc);
+
+ params->fb.pixel_format = cache->fb.pixel_format;
+ params->fb.stride = cache->fb.stride;
+ params->fb.fence_reg = cache->fb.fence_reg;
+
+ params->cfb_size = intel_fbc_calculate_cfb_size(dev_priv, cache);
+
+ params->fb.ggtt_offset = cache->fb.ilk_ggtt_offset;
+}
+
+static bool intel_fbc_reg_params_equal(struct intel_fbc_reg_params *params1,
+ struct intel_fbc_reg_params *params2)
+{
+ /* We can use this since intel_fbc_get_reg_params() does a memset. */
+ return memcmp(params1, params2, sizeof(*params1)) == 0;
+}
+
+void intel_fbc_pre_update(struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
+ struct intel_fbc *fbc = &dev_priv->fbc;
+
+ if (!fbc_supported(dev_priv))
+ return;
+
+ mutex_lock(&fbc->lock);
+
+ if (!multiple_pipes_ok(crtc)) {
+ fbc->no_fbc_reason = "more than one pipe active";
+ goto deactivate;
+ }
+
+ if (!fbc->enabled || fbc->crtc != crtc)
+ goto unlock;
+
+ intel_fbc_update_state_cache(crtc);
+
+deactivate:
+ intel_fbc_deactivate(dev_priv);
+unlock:
+ mutex_unlock(&fbc->lock);
+}
+
+static void __intel_fbc_post_update(struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
+ struct intel_fbc *fbc = &dev_priv->fbc;
+ struct intel_fbc_reg_params old_params;
+
+ WARN_ON(!mutex_is_locked(&fbc->lock));
+
+ if (!fbc->enabled || fbc->crtc != crtc)
+ return;
+
+ if (!intel_fbc_can_activate(crtc)) {
+ WARN_ON(fbc->active);
+ return;
+ }
+
+ old_params = fbc->params;
+ intel_fbc_get_reg_params(crtc, &fbc->params);
+
/* If the scanout has not changed, don't modify the FBC settings.
* Note that we make the fundamental assumption that the fb->obj
* cannot be unpinned (and have its GTT offset and fence revoked)
* without first being decoupled from the scanout and FBC disabled.
*/
- if (dev_priv->fbc.crtc == crtc &&
- dev_priv->fbc.fb_id == fb->base.id &&
- dev_priv->fbc.y == crtc->base.y &&
- dev_priv->fbc.active)
+ if (fbc->active &&
+ intel_fbc_reg_params_equal(&old_params, &fbc->params))
return;
- if (intel_fbc_is_active(dev_priv)) {
- /* We update FBC along two paths, after changing fb/crtc
- * configuration (modeswitching) and after page-flipping
- * finishes. For the latter, we know that not only did
- * we disable the FBC at the start of the page-flip
- * sequence, but also more than one vblank has passed.
- *
- * For the former case of modeswitching, it is possible
- * to switch between two FBC valid configurations
- * instantaneously so we do need to disable the FBC
- * before we can modify its control registers. We also
- * have to wait for the next vblank for that to take
- * effect. However, since we delay enabling FBC we can
- * assume that a vblank has passed since disabling and
- * that we can safely alter the registers in the deferred
- * callback.
- *
- * In the scenario that we go from a valid to invalid
- * and then back to valid FBC configuration we have
- * no strict enforcement that a vblank occurred since
- * disabling the FBC. However, along all current pipe
- * disabling paths we do need to wait for a vblank at
- * some point. And we wait before enabling FBC anyway.
- */
- DRM_DEBUG_KMS("deactivating FBC for update\n");
- __intel_fbc_deactivate(dev_priv);
- }
-
+ intel_fbc_deactivate(dev_priv);
intel_fbc_schedule_activation(crtc);
- dev_priv->fbc.no_fbc_reason = "FBC enabled (not necessarily active)";
- return;
-
-out_disable:
- /* Multiple disables should be harmless */
- if (intel_fbc_is_active(dev_priv)) {
- DRM_DEBUG_KMS("unsupported config, deactivating FBC\n");
- __intel_fbc_deactivate(dev_priv);
- }
+ fbc->no_fbc_reason = "FBC enabled (active or scheduled)";
}
-/*
- * intel_fbc_update - activate/deactivate FBC as needed
- * @crtc: the CRTC that triggered the update
- *
- * This function reevaluates the overall state and activates or deactivates FBC.
- */
-void intel_fbc_update(struct intel_crtc *crtc)
+void intel_fbc_post_update(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
+ struct intel_fbc *fbc = &dev_priv->fbc;
if (!fbc_supported(dev_priv))
return;
- mutex_lock(&dev_priv->fbc.lock);
- __intel_fbc_update(crtc);
- mutex_unlock(&dev_priv->fbc.lock);
+ mutex_lock(&fbc->lock);
+ __intel_fbc_post_update(crtc);
+ mutex_unlock(&fbc->lock);
+}
+
+static unsigned int intel_fbc_get_frontbuffer_bit(struct intel_fbc *fbc)
+{
+ if (fbc->enabled)
+ return to_intel_plane(fbc->crtc->base.primary)->frontbuffer_bit;
+ else
+ return fbc->possible_framebuffer_bits;
}
void intel_fbc_invalidate(struct drm_i915_private *dev_priv,
unsigned int frontbuffer_bits,
enum fb_op_origin origin)
{
- unsigned int fbc_bits;
+ struct intel_fbc *fbc = &dev_priv->fbc;
if (!fbc_supported(dev_priv))
return;
- if (origin == ORIGIN_GTT)
+ if (origin == ORIGIN_GTT || origin == ORIGIN_FLIP)
return;
- mutex_lock(&dev_priv->fbc.lock);
+ mutex_lock(&fbc->lock);
- if (dev_priv->fbc.enabled)
- fbc_bits = INTEL_FRONTBUFFER_PRIMARY(dev_priv->fbc.crtc->pipe);
- else
- fbc_bits = dev_priv->fbc.possible_framebuffer_bits;
-
- dev_priv->fbc.busy_bits |= (fbc_bits & frontbuffer_bits);
+ fbc->busy_bits |= intel_fbc_get_frontbuffer_bit(fbc) & frontbuffer_bits;
- if (dev_priv->fbc.busy_bits)
- __intel_fbc_deactivate(dev_priv);
+ if (fbc->enabled && fbc->busy_bits)
+ intel_fbc_deactivate(dev_priv);
- mutex_unlock(&dev_priv->fbc.lock);
+ mutex_unlock(&fbc->lock);
}
void intel_fbc_flush(struct drm_i915_private *dev_priv,
unsigned int frontbuffer_bits, enum fb_op_origin origin)
{
+ struct intel_fbc *fbc = &dev_priv->fbc;
+
if (!fbc_supported(dev_priv))
return;
- if (origin == ORIGIN_GTT)
+ if (origin == ORIGIN_GTT || origin == ORIGIN_FLIP)
return;
- mutex_lock(&dev_priv->fbc.lock);
+ mutex_lock(&fbc->lock);
- dev_priv->fbc.busy_bits &= ~frontbuffer_bits;
+ fbc->busy_bits &= ~frontbuffer_bits;
- if (!dev_priv->fbc.busy_bits && dev_priv->fbc.enabled) {
- if (origin != ORIGIN_FLIP && dev_priv->fbc.active) {
+ if (!fbc->busy_bits && fbc->enabled &&
+ (frontbuffer_bits & intel_fbc_get_frontbuffer_bit(fbc))) {
+ if (fbc->active)
intel_fbc_recompress(dev_priv);
- } else {
- __intel_fbc_deactivate(dev_priv);
- __intel_fbc_update(dev_priv->fbc.crtc);
+ else
+ __intel_fbc_post_update(fbc->crtc);
+ }
+
+ mutex_unlock(&fbc->lock);
+}
+
+/**
+ * intel_fbc_choose_crtc - select a CRTC to enable FBC on
+ * @dev_priv: i915 device instance
+ * @state: the atomic state structure
+ *
+ * This function looks at the proposed state for CRTCs and planes, then chooses
+ * which pipe is going to have FBC by setting intel_crtc_state->enable_fbc to
+ * true.
+ *
+ * Later, intel_fbc_enable is going to look for state->enable_fbc and then maybe
+ * enable FBC for the chosen CRTC. If it does, it will set dev_priv->fbc.crtc.
+ */
+void intel_fbc_choose_crtc(struct drm_i915_private *dev_priv,
+ struct drm_atomic_state *state)
+{
+ struct intel_fbc *fbc = &dev_priv->fbc;
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *crtc_state;
+ struct drm_plane *plane;
+ struct drm_plane_state *plane_state;
+ bool fbc_crtc_present = false;
+ int i, j;
+
+ mutex_lock(&fbc->lock);
+
+ for_each_crtc_in_state(state, crtc, crtc_state, i) {
+ if (fbc->crtc == to_intel_crtc(crtc)) {
+ fbc_crtc_present = true;
+ break;
+ }
+ }
+ /* This atomic commit doesn't involve the CRTC currently tied to FBC. */
+ if (!fbc_crtc_present && fbc->crtc != NULL)
+ goto out;
+
+ /* Simply choose the first CRTC that is compatible and has a visible
+ * plane. We could go for fancier schemes such as checking the plane
+ * size, but this would just affect the few platforms that don't tie FBC
+ * to pipe or plane A. */
+ for_each_plane_in_state(state, plane, plane_state, i) {
+ struct intel_plane_state *intel_plane_state =
+ to_intel_plane_state(plane_state);
+
+ if (!intel_plane_state->visible)
+ continue;
+
+ for_each_crtc_in_state(state, crtc, crtc_state, j) {
+ struct intel_crtc_state *intel_crtc_state =
+ to_intel_crtc_state(crtc_state);
+
+ if (plane_state->crtc != crtc)
+ continue;
+
+ if (!intel_fbc_can_choose(to_intel_crtc(crtc)))
+ break;
+
+ intel_crtc_state->enable_fbc = true;
+ goto out;
}
}
- mutex_unlock(&dev_priv->fbc.lock);
+out:
+ mutex_unlock(&fbc->lock);
}
/**
* intel_fbc_enable: tries to enable FBC on the CRTC
* @crtc: the CRTC
*
- * This function checks if it's possible to enable FBC on the following CRTC,
- * then enables it. Notice that it doesn't activate FBC.
+ * This function checks if the given CRTC was chosen for FBC, then enables it if
+ * possible. Notice that it doesn't activate FBC. It is valid to call
+ * intel_fbc_enable multiple times for the same pipe without an
+ * intel_fbc_disable in the middle, as long as it is deactivated.
*/
void intel_fbc_enable(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
+ struct intel_fbc *fbc = &dev_priv->fbc;
if (!fbc_supported(dev_priv))
return;
- mutex_lock(&dev_priv->fbc.lock);
-
- if (dev_priv->fbc.enabled) {
- WARN_ON(dev_priv->fbc.crtc == crtc);
- goto out;
- }
-
- WARN_ON(dev_priv->fbc.active);
- WARN_ON(dev_priv->fbc.crtc != NULL);
+ mutex_lock(&fbc->lock);
- if (intel_vgpu_active(dev_priv->dev)) {
- set_no_fbc_reason(dev_priv, "VGPU is active");
- goto out;
- }
-
- if (i915.enable_fbc < 0) {
- set_no_fbc_reason(dev_priv, "disabled per chip default");
+ if (fbc->enabled) {
+ WARN_ON(fbc->crtc == NULL);
+ if (fbc->crtc == crtc) {
+ WARN_ON(!crtc->config->enable_fbc);
+ WARN_ON(fbc->active);
+ }
goto out;
}
- if (!i915.enable_fbc) {
- set_no_fbc_reason(dev_priv, "disabled per module param");
+ if (!crtc->config->enable_fbc)
goto out;
- }
- if (!crtc_can_fbc(crtc)) {
- set_no_fbc_reason(dev_priv, "no enabled pipes can have FBC");
- goto out;
- }
+ WARN_ON(fbc->active);
+ WARN_ON(fbc->crtc != NULL);
+ intel_fbc_update_state_cache(crtc);
if (intel_fbc_alloc_cfb(crtc)) {
- set_no_fbc_reason(dev_priv, "not enough stolen memory");
+ fbc->no_fbc_reason = "not enough stolen memory";
goto out;
}
DRM_DEBUG_KMS("Enabling FBC on pipe %c\n", pipe_name(crtc->pipe));
- dev_priv->fbc.no_fbc_reason = "FBC enabled but not active yet\n";
+ fbc->no_fbc_reason = "FBC enabled but not active yet\n";
- dev_priv->fbc.enabled = true;
- dev_priv->fbc.crtc = crtc;
+ fbc->enabled = true;
+ fbc->crtc = crtc;
out:
- mutex_unlock(&dev_priv->fbc.lock);
+ mutex_unlock(&fbc->lock);
}
/**
@@ -1013,58 +1137,88 @@ out:
*/
static void __intel_fbc_disable(struct drm_i915_private *dev_priv)
{
- struct intel_crtc *crtc = dev_priv->fbc.crtc;
+ struct intel_fbc *fbc = &dev_priv->fbc;
+ struct intel_crtc *crtc = fbc->crtc;
- WARN_ON(!mutex_is_locked(&dev_priv->fbc.lock));
- WARN_ON(!dev_priv->fbc.enabled);
- WARN_ON(dev_priv->fbc.active);
- assert_pipe_disabled(dev_priv, crtc->pipe);
+ WARN_ON(!mutex_is_locked(&fbc->lock));
+ WARN_ON(!fbc->enabled);
+ WARN_ON(fbc->active);
+ WARN_ON(crtc->active);
DRM_DEBUG_KMS("Disabling FBC on pipe %c\n", pipe_name(crtc->pipe));
__intel_fbc_cleanup_cfb(dev_priv);
- dev_priv->fbc.enabled = false;
- dev_priv->fbc.crtc = NULL;
+ fbc->enabled = false;
+ fbc->crtc = NULL;
}
/**
- * intel_fbc_disable_crtc - disable FBC if it's associated with crtc
+ * intel_fbc_disable - disable FBC if it's associated with crtc
* @crtc: the CRTC
*
* This function disables FBC if it's associated with the provided CRTC.
*/
-void intel_fbc_disable_crtc(struct intel_crtc *crtc)
+void intel_fbc_disable(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
+ struct intel_fbc *fbc = &dev_priv->fbc;
if (!fbc_supported(dev_priv))
return;
- mutex_lock(&dev_priv->fbc.lock);
- if (dev_priv->fbc.crtc == crtc) {
- WARN_ON(!dev_priv->fbc.enabled);
- WARN_ON(dev_priv->fbc.active);
+ mutex_lock(&fbc->lock);
+ if (fbc->crtc == crtc) {
+ WARN_ON(!fbc->enabled);
+ WARN_ON(fbc->active);
__intel_fbc_disable(dev_priv);
}
- mutex_unlock(&dev_priv->fbc.lock);
+ mutex_unlock(&fbc->lock);
+
+ cancel_work_sync(&fbc->work.work);
}
/**
- * intel_fbc_disable - globally disable FBC
+ * intel_fbc_global_disable - globally disable FBC
* @dev_priv: i915 device instance
*
* This function disables FBC regardless of which CRTC is associated with it.
*/
-void intel_fbc_disable(struct drm_i915_private *dev_priv)
+void intel_fbc_global_disable(struct drm_i915_private *dev_priv)
{
+ struct intel_fbc *fbc = &dev_priv->fbc;
+
if (!fbc_supported(dev_priv))
return;
- mutex_lock(&dev_priv->fbc.lock);
- if (dev_priv->fbc.enabled)
+ mutex_lock(&fbc->lock);
+ if (fbc->enabled)
__intel_fbc_disable(dev_priv);
- mutex_unlock(&dev_priv->fbc.lock);
+ mutex_unlock(&fbc->lock);
+
+ cancel_work_sync(&fbc->work.work);
+}
+
+/**
+ * intel_fbc_init_pipe_state - initialize FBC's CRTC visibility tracking
+ * @dev_priv: i915 device instance
+ *
+ * The FBC code needs to track CRTC visibility since the older platforms can't
+ * have FBC enabled while multiple pipes are used. This function does the
+ * initial setup at driver load to make sure FBC is matching the real hardware.
+ */
+void intel_fbc_init_pipe_state(struct drm_i915_private *dev_priv)
+{
+ struct intel_crtc *crtc;
+
+ /* Don't even bother tracking anything if we don't need. */
+ if (!no_fbc_on_multiple_pipes(dev_priv))
+ return;
+
+ for_each_intel_crtc(dev_priv->dev, crtc)
+ if (intel_crtc_active(&crtc->base) &&
+ to_intel_plane_state(crtc->base.primary->state)->visible)
+ dev_priv->fbc.visible_pipes_mask |= (1 << crtc->pipe);
}
/**
@@ -1075,51 +1229,35 @@ void intel_fbc_disable(struct drm_i915_private *dev_priv)
*/
void intel_fbc_init(struct drm_i915_private *dev_priv)
{
+ struct intel_fbc *fbc = &dev_priv->fbc;
enum pipe pipe;
- INIT_WORK(&dev_priv->fbc.work.work, intel_fbc_work_fn);
- mutex_init(&dev_priv->fbc.lock);
- dev_priv->fbc.enabled = false;
- dev_priv->fbc.active = false;
- dev_priv->fbc.work.scheduled = false;
+ INIT_WORK(&fbc->work.work, intel_fbc_work_fn);
+ mutex_init(&fbc->lock);
+ fbc->enabled = false;
+ fbc->active = false;
+ fbc->work.scheduled = false;
if (!HAS_FBC(dev_priv)) {
- dev_priv->fbc.no_fbc_reason = "unsupported by this chipset";
+ fbc->no_fbc_reason = "unsupported by this chipset";
return;
}
for_each_pipe(dev_priv, pipe) {
- dev_priv->fbc.possible_framebuffer_bits |=
+ fbc->possible_framebuffer_bits |=
INTEL_FRONTBUFFER_PRIMARY(pipe);
if (fbc_on_pipe_a_only(dev_priv))
break;
}
- if (INTEL_INFO(dev_priv)->gen >= 7) {
- dev_priv->fbc.is_active = ilk_fbc_is_active;
- dev_priv->fbc.activate = gen7_fbc_activate;
- dev_priv->fbc.deactivate = ilk_fbc_deactivate;
- } else if (INTEL_INFO(dev_priv)->gen >= 5) {
- dev_priv->fbc.is_active = ilk_fbc_is_active;
- dev_priv->fbc.activate = ilk_fbc_activate;
- dev_priv->fbc.deactivate = ilk_fbc_deactivate;
- } else if (IS_GM45(dev_priv)) {
- dev_priv->fbc.is_active = g4x_fbc_is_active;
- dev_priv->fbc.activate = g4x_fbc_activate;
- dev_priv->fbc.deactivate = g4x_fbc_deactivate;
- } else {
- dev_priv->fbc.is_active = i8xx_fbc_is_active;
- dev_priv->fbc.activate = i8xx_fbc_activate;
- dev_priv->fbc.deactivate = i8xx_fbc_deactivate;
-
- /* This value was pulled out of someone's hat */
+ /* This value was pulled out of someone's hat */
+ if (INTEL_INFO(dev_priv)->gen <= 4 && !IS_GM45(dev_priv))
I915_WRITE(FBC_CONTROL, 500 << FBC_CTL_INTERVAL_SHIFT);
- }
/* We still don't have any sort of hardware state readout for FBC, so
* deactivate it in case the BIOS activated it to make sure software
* matches the hardware state. */
- if (dev_priv->fbc.is_active(dev_priv))
- dev_priv->fbc.deactivate(dev_priv);
+ if (intel_fbc_hw_is_active(dev_priv))
+ intel_fbc_hw_deactivate(dev_priv);
}
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
index bea75cafc623..97a91e631915 100644
--- a/drivers/gpu/drm/i915/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/intel_fbdev.c
@@ -119,7 +119,7 @@ static int intelfb_alloc(struct drm_fb_helper *helper,
{
struct intel_fbdev *ifbdev =
container_of(helper, struct intel_fbdev, helper);
- struct drm_framebuffer *fb = NULL;
+ struct drm_framebuffer *fb;
struct drm_device *dev = helper->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_mode_fb_cmd2 mode_cmd = {};
@@ -171,8 +171,6 @@ static int intelfb_alloc(struct drm_fb_helper *helper,
out:
mutex_unlock(&dev->struct_mutex);
- if (!IS_ERR_OR_NULL(fb))
- drm_framebuffer_unreference(fb);
return ret;
}
@@ -408,8 +406,8 @@ retry:
continue;
}
- encoder = connector->encoder;
- if (!encoder || WARN_ON(!encoder->crtc)) {
+ encoder = connector->state->best_encoder;
+ if (!encoder || WARN_ON(!connector->state->crtc)) {
if (connector->force > DRM_FORCE_OFF)
goto bail;
@@ -422,7 +420,7 @@ retry:
num_connectors_enabled++;
- new_crtc = intel_fb_helper_crtc(fb_helper, encoder->crtc);
+ new_crtc = intel_fb_helper_crtc(fb_helper, connector->state->crtc);
/*
* Make sure we're not trying to drive multiple connectors
@@ -468,17 +466,22 @@ retry:
* usually contains. But since our current
* code puts a mode derived from the post-pfit timings
* into crtc->mode this works out correctly.
+ *
+ * This is crtc->mode and not crtc->state->mode for the
+ * fastboot check to work correctly. crtc_state->mode has
+ * I915_MODE_FLAG_INHERITED, which we clear to force check
+ * state.
*/
DRM_DEBUG_KMS("looking for current mode on connector %s\n",
connector->name);
- modes[i] = &encoder->crtc->mode;
+ modes[i] = &connector->state->crtc->mode;
}
crtcs[i] = new_crtc;
DRM_DEBUG_KMS("connector %s on pipe %c [CRTC:%d]: %dx%d%s\n",
connector->name,
- pipe_name(to_intel_crtc(encoder->crtc)->pipe),
- encoder->crtc->base.id,
+ pipe_name(to_intel_crtc(connector->state->crtc)->pipe),
+ connector->state->crtc->base.id,
modes[i]->hdisplay, modes[i]->vdisplay,
modes[i]->flags & DRM_MODE_FLAG_INTERLACE ? "i" :"");
diff --git a/drivers/gpu/drm/i915/intel_guc.h b/drivers/gpu/drm/i915/intel_guc.h
index 822952235dcf..73002e901ff2 100644
--- a/drivers/gpu/drm/i915/intel_guc.h
+++ b/drivers/gpu/drm/i915/intel_guc.h
@@ -43,9 +43,10 @@ struct i915_guc_client {
uint32_t wq_offset;
uint32_t wq_size;
uint32_t wq_tail;
+ uint32_t wq_head;
/* GuC submission statistics & status */
- uint64_t submissions[I915_NUM_RINGS];
+ uint64_t submissions[GUC_MAX_ENGINES_NUM];
uint32_t q_fail;
uint32_t b_fail;
int retcode;
@@ -88,6 +89,8 @@ struct intel_guc {
uint32_t log_flags;
struct drm_i915_gem_object *log_obj;
+ struct drm_i915_gem_object *ads_obj;
+
struct drm_i915_gem_object *ctx_pool_obj;
struct ida ctx_ids;
@@ -103,8 +106,8 @@ struct intel_guc {
uint32_t action_fail; /* Total number of failures */
int32_t action_err; /* Last error code */
- uint64_t submissions[I915_NUM_RINGS];
- uint32_t last_seqno[I915_NUM_RINGS];
+ uint64_t submissions[GUC_MAX_ENGINES_NUM];
+ uint32_t last_seqno[GUC_MAX_ENGINES_NUM];
};
/* intel_guc_loader.c */
@@ -122,5 +125,6 @@ int i915_guc_submit(struct i915_guc_client *client,
struct drm_i915_gem_request *rq);
void i915_guc_submission_disable(struct drm_device *dev);
void i915_guc_submission_fini(struct drm_device *dev);
+int i915_guc_wq_check_space(struct i915_guc_client *client);
#endif
diff --git a/drivers/gpu/drm/i915/intel_guc_fwif.h b/drivers/gpu/drm/i915/intel_guc_fwif.h
index 40b2ea572e16..2de57ffe5e18 100644
--- a/drivers/gpu/drm/i915/intel_guc_fwif.h
+++ b/drivers/gpu/drm/i915/intel_guc_fwif.h
@@ -39,10 +39,18 @@
#define GUC_CTX_PRIORITY_HIGH 1
#define GUC_CTX_PRIORITY_KMD_NORMAL 2
#define GUC_CTX_PRIORITY_NORMAL 3
+#define GUC_CTX_PRIORITY_NUM 4
#define GUC_MAX_GPU_CONTEXTS 1024
#define GUC_INVALID_CTX_ID GUC_MAX_GPU_CONTEXTS
+#define GUC_RENDER_ENGINE 0
+#define GUC_VIDEO_ENGINE 1
+#define GUC_BLITTER_ENGINE 2
+#define GUC_VIDEOENHANCE_ENGINE 3
+#define GUC_VIDEO_ENGINE2 4
+#define GUC_MAX_ENGINES_NUM (GUC_VIDEO_ENGINE2 + 1)
+
/* Work queue item header definitions */
#define WQ_STATUS_ACTIVE 1
#define WQ_STATUS_SUSPENDED 2
@@ -81,11 +89,14 @@
#define GUC_CTL_CTXINFO 0
#define GUC_CTL_CTXNUM_IN16_SHIFT 0
#define GUC_CTL_BASE_ADDR_SHIFT 12
+
#define GUC_CTL_ARAT_HIGH 1
#define GUC_CTL_ARAT_LOW 2
+
#define GUC_CTL_DEVICE_INFO 3
#define GUC_CTL_GTTYPE_SHIFT 0
#define GUC_CTL_COREFAMILY_SHIFT 7
+
#define GUC_CTL_LOG_PARAMS 4
#define GUC_LOG_VALID (1 << 0)
#define GUC_LOG_NOTIFY_ON_HALF_FULL (1 << 1)
@@ -97,9 +108,12 @@
#define GUC_LOG_ISR_PAGES 3
#define GUC_LOG_ISR_SHIFT 9
#define GUC_LOG_BUF_ADDR_SHIFT 12
+
#define GUC_CTL_PAGE_FAULT_CONTROL 5
+
#define GUC_CTL_WA 6
#define GUC_CTL_WA_UK_BY_DRIVER (1 << 3)
+
#define GUC_CTL_FEATURE 7
#define GUC_CTL_VCS2_ENABLED (1 << 0)
#define GUC_CTL_KERNEL_SUBMISSIONS (1 << 1)
@@ -109,6 +123,7 @@
#define GUC_CTL_PREEMPTION_LOG (1 << 5)
#define GUC_CTL_ENABLE_SLPC (1 << 7)
#define GUC_CTL_RESET_ON_PREMPT_FAILURE (1 << 8)
+
#define GUC_CTL_DEBUG 8
#define GUC_LOG_VERBOSITY_SHIFT 0
#define GUC_LOG_VERBOSITY_LOW (0 << GUC_LOG_VERBOSITY_SHIFT)
@@ -118,9 +133,19 @@
/* Verbosity range-check limits, without the shift */
#define GUC_LOG_VERBOSITY_MIN 0
#define GUC_LOG_VERBOSITY_MAX 3
+#define GUC_LOG_VERBOSITY_MASK 0x0000000f
+#define GUC_LOG_DESTINATION_MASK (3 << 4)
+#define GUC_LOG_DISABLED (1 << 6)
+#define GUC_PROFILE_ENABLED (1 << 7)
+#define GUC_WQ_TRACK_ENABLED (1 << 8)
+#define GUC_ADS_ENABLED (1 << 9)
+#define GUC_DEBUG_RESERVED (1 << 10)
+#define GUC_ADS_ADDR_SHIFT 11
+#define GUC_ADS_ADDR_MASK 0xfffff800
+
#define GUC_CTL_RSRVD 9
-#define GUC_CTL_MAX_DWORDS (GUC_CTL_RSRVD + 1)
+#define GUC_CTL_MAX_DWORDS (SOFT_SCRATCH_COUNT - 2) /* [1..14] */
/**
* DOC: GuC Firmware Layout
@@ -267,7 +292,7 @@ struct guc_context_desc {
u64 db_trigger_phy;
u16 db_id;
- struct guc_execlist_context lrc[I915_NUM_RINGS];
+ struct guc_execlist_context lrc[GUC_MAX_ENGINES_NUM];
u8 attribute;
@@ -299,6 +324,99 @@ struct guc_context_desc {
#define GUC_POWER_D2 3
#define GUC_POWER_D3 4
+/* Scheduling policy settings */
+
+/* Reset engine upon preempt failure */
+#define POLICY_RESET_ENGINE (1<<0)
+/* Preempt to idle on quantum expiry */
+#define POLICY_PREEMPT_TO_IDLE (1<<1)
+
+#define POLICY_MAX_NUM_WI 15
+
+struct guc_policy {
+ /* Time for one workload to execute. (in micro seconds) */
+ u32 execution_quantum;
+ u32 reserved1;
+
+ /* Time to wait for a preemption request to completed before issuing a
+ * reset. (in micro seconds). */
+ u32 preemption_time;
+
+ /* How much time to allow to run after the first fault is observed.
+ * Then preempt afterwards. (in micro seconds) */
+ u32 fault_time;
+
+ u32 policy_flags;
+ u32 reserved[2];
+} __packed;
+
+struct guc_policies {
+ struct guc_policy policy[GUC_CTX_PRIORITY_NUM][GUC_MAX_ENGINES_NUM];
+
+ /* In micro seconds. How much time to allow before DPC processing is
+ * called back via interrupt (to prevent DPC queue drain starving).
+ * Typically 1000s of micro seconds (example only, not granularity). */
+ u32 dpc_promote_time;
+
+ /* Must be set to take these new values. */
+ u32 is_valid;
+
+ /* Max number of WIs to process per call. A large value may keep CS
+ * idle. */
+ u32 max_num_work_items;
+
+ u32 reserved[19];
+} __packed;
+
+/* GuC MMIO reg state struct */
+
+#define GUC_REGSET_FLAGS_NONE 0x0
+#define GUC_REGSET_POWERCYCLE 0x1
+#define GUC_REGSET_MASKED 0x2
+#define GUC_REGSET_ENGINERESET 0x4
+#define GUC_REGSET_SAVE_DEFAULT_VALUE 0x8
+#define GUC_REGSET_SAVE_CURRENT_VALUE 0x10
+
+#define GUC_REGSET_MAX_REGISTERS 25
+#define GUC_MMIO_WHITE_LIST_START 0x24d0
+#define GUC_MMIO_WHITE_LIST_MAX 12
+#define GUC_S3_SAVE_SPACE_PAGES 10
+
+struct guc_mmio_regset {
+ struct __packed {
+ u32 offset;
+ u32 value;
+ u32 flags;
+ } registers[GUC_REGSET_MAX_REGISTERS];
+
+ u32 values_valid;
+ u32 number_of_registers;
+} __packed;
+
+struct guc_mmio_reg_state {
+ struct guc_mmio_regset global_reg;
+ struct guc_mmio_regset engine_reg[GUC_MAX_ENGINES_NUM];
+
+ /* MMIO registers that are set as non privileged */
+ struct __packed {
+ u32 mmio_start;
+ u32 offsets[GUC_MMIO_WHITE_LIST_MAX];
+ u32 count;
+ } mmio_white_list[GUC_MAX_ENGINES_NUM];
+} __packed;
+
+/* GuC Additional Data Struct */
+
+struct guc_ads {
+ u32 reg_state_addr;
+ u32 reg_state_buffer;
+ u32 golden_context_lrca;
+ u32 scheduler_policies;
+ u32 reserved0[3];
+ u32 eng_state_size[GUC_MAX_ENGINES_NUM];
+ u32 reserved2[4];
+} __packed;
+
/* This Action will be programmed in C180 - SOFT_SCRATCH_O_REG */
enum host2guc_action {
HOST2GUC_ACTION_DEFAULT = 0x0,
diff --git a/drivers/gpu/drm/i915/intel_guc_loader.c b/drivers/gpu/drm/i915/intel_guc_loader.c
index 550921f2ef7d..82a3c03fbc0e 100644
--- a/drivers/gpu/drm/i915/intel_guc_loader.c
+++ b/drivers/gpu/drm/i915/intel_guc_loader.c
@@ -165,6 +165,13 @@ static void set_guc_init_params(struct drm_i915_private *dev_priv)
i915.guc_log_level << GUC_LOG_VERBOSITY_SHIFT;
}
+ if (guc->ads_obj) {
+ u32 ads = (u32)i915_gem_obj_ggtt_offset(guc->ads_obj)
+ >> PAGE_SHIFT;
+ params[GUC_CTL_DEBUG] |= ads << GUC_ADS_ADDR_SHIFT;
+ params[GUC_CTL_DEBUG] |= GUC_ADS_ENABLED;
+ }
+
/* If GuC submission is enabled, set up additional parameters here */
if (i915.enable_guc_submission) {
u32 pgs = i915_gem_obj_ggtt_offset(dev_priv->guc.ctx_pool_obj);
@@ -192,7 +199,7 @@ static void set_guc_init_params(struct drm_i915_private *dev_priv)
* the value matches either of two values representing completion
* of the GuC boot process.
*
- * This is used for polling the GuC status in a wait_for_atomic()
+ * This is used for polling the GuC status in a wait_for()
* loop below.
*/
static inline bool guc_ucode_response(struct drm_i915_private *dev_priv,
@@ -252,14 +259,14 @@ static int guc_ucode_xfer_dma(struct drm_i915_private *dev_priv)
I915_WRITE(DMA_CTRL, _MASKED_BIT_ENABLE(UOS_MOVE | START_DMA));
/*
- * Spin-wait for the DMA to complete & the GuC to start up.
+ * Wait for the DMA to complete & the GuC to start up.
* NB: Docs recommend not using the interrupt for completion.
* Measurements indicate this should take no more than 20ms, so a
* timeout here indicates that the GuC has failed and is unusable.
* (Higher levels of the driver will attempt to fall back to
* execlist mode if this happens.)
*/
- ret = wait_for_atomic(guc_ucode_response(dev_priv, &status), 100);
+ ret = wait_for(guc_ucode_response(dev_priv, &status), 100);
DRM_DEBUG_DRIVER("DMA status 0x%x, GuC status 0x%x\n",
I915_READ(DMA_CTRL), status);
@@ -438,6 +445,7 @@ fail:
direct_interrupts_to_host(dev_priv);
i915_guc_submission_disable(dev);
+ i915_guc_submission_fini(dev);
return err;
}
@@ -554,10 +562,12 @@ fail:
DRM_ERROR("Failed to fetch GuC firmware from %s (error %d)\n",
guc_fw->guc_fw_path, err);
+ mutex_lock(&dev->struct_mutex);
obj = guc_fw->guc_fw_obj;
if (obj)
drm_gem_object_unreference(&obj->base);
guc_fw->guc_fw_obj = NULL;
+ mutex_unlock(&dev->struct_mutex);
release_firmware(fw); /* OK even if fw is NULL */
guc_fw->guc_fw_fetch_status = GUC_FIRMWARE_FAIL;
@@ -624,10 +634,11 @@ void intel_guc_ucode_fini(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
+ mutex_lock(&dev->struct_mutex);
direct_interrupts_to_host(dev_priv);
+ i915_guc_submission_disable(dev);
i915_guc_submission_fini(dev);
- mutex_lock(&dev->struct_mutex);
if (guc_fw->guc_fw_obj)
drm_gem_object_unreference(&guc_fw->guc_fw_obj->base);
guc_fw->guc_fw_obj = NULL;
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index cb5d1b15755c..1ab6f687f640 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -1210,11 +1210,19 @@ intel_hdmi_mode_valid(struct drm_connector *connector,
struct drm_device *dev = intel_hdmi_to_dev(hdmi);
enum drm_mode_status status;
int clock;
+ int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
return MODE_NO_DBLESCAN;
clock = mode->clock;
+
+ if ((mode->flags & DRM_MODE_FLAG_3D_MASK) == DRM_MODE_FLAG_3D_FRAME_PACKING)
+ clock *= 2;
+
+ if (clock > max_dotclk)
+ return MODE_CLOCK_HIGH;
+
if (mode->flags & DRM_MODE_FLAG_DBLCLK)
clock *= 2;
@@ -1407,8 +1415,16 @@ intel_hdmi_detect(struct drm_connector *connector, bool force)
hdmi_to_dig_port(intel_hdmi));
}
- if (!live_status)
- DRM_DEBUG_KMS("Live status not up!");
+ if (!live_status) {
+ DRM_DEBUG_KMS("HDMI live status down\n");
+ /*
+ * Live status register is not reliable on all intel platforms.
+ * So consider live_status only for certain platforms, for
+ * others, read EDID to determine presence of sink.
+ */
+ if (INTEL_INFO(dev_priv)->gen < 7 || IS_IVYBRIDGE(dev_priv))
+ live_status = true;
+ }
intel_hdmi_unset_edid(connector);
@@ -2041,6 +2057,11 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
enum port port = intel_dig_port->port;
uint8_t alternate_ddc_pin;
+ if (WARN(intel_dig_port->max_lanes < 4,
+ "Not enough lanes (%d) for HDMI on port %c\n",
+ intel_dig_port->max_lanes, port_name(port)))
+ return;
+
drm_connector_init(dev, connector, &intel_hdmi_connector_funcs,
DRM_MODE_CONNECTOR_HDMIA);
drm_connector_helper_add(connector, &intel_hdmi_connector_helper_funcs);
@@ -2154,7 +2175,6 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
void intel_hdmi_init(struct drm_device *dev,
i915_reg_t hdmi_reg, enum port port)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_digital_port *intel_dig_port;
struct intel_encoder *intel_encoder;
struct intel_connector *intel_connector;
@@ -2223,9 +2243,9 @@ void intel_hdmi_init(struct drm_device *dev,
intel_encoder->cloneable |= 1 << INTEL_OUTPUT_HDMI;
intel_dig_port->port = port;
- dev_priv->dig_port_map[port] = intel_encoder;
intel_dig_port->hdmi.hdmi_reg = hdmi_reg;
intel_dig_port->dp.output_reg = INVALID_MMIO_REG;
+ intel_dig_port->max_lanes = 4;
intel_hdmi_init_connector(intel_dig_port, intel_connector);
}
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index deb8282c26d8..52fbe530fc9e 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -664,6 +664,12 @@ int intel_setup_gmbus(struct drm_device *dev)
bus->adapter.algo = &gmbus_algorithm;
+ /*
+ * We wish to retry with bit banging
+ * after a timed out GMBUS attempt.
+ */
+ bus->adapter.retries = 1;
+
/* By default use a conservative clock rate */
bus->reg0 = pin | GMBUS_RATE_100KHZ;
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index f1fa756c5d5d..5c6080fd0968 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -223,9 +223,11 @@ enum {
FAULT_AND_CONTINUE /* Unsupported */
};
#define GEN8_CTX_ID_SHIFT 32
-#define CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT 0x17
+#define GEN8_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT 0x17
+#define GEN9_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT 0x26
-static int intel_lr_context_pin(struct drm_i915_gem_request *rq);
+static int intel_lr_context_pin(struct intel_context *ctx,
+ struct intel_engine_cs *engine);
static void lrc_setup_hardware_status_page(struct intel_engine_cs *ring,
struct drm_i915_gem_object *default_ctx_obj);
@@ -263,65 +265,92 @@ int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists
return 0;
}
+static void
+logical_ring_init_platform_invariants(struct intel_engine_cs *ring)
+{
+ struct drm_device *dev = ring->dev;
+
+ ring->disable_lite_restore_wa = (IS_SKL_REVID(dev, 0, SKL_REVID_B0) ||
+ IS_BXT_REVID(dev, 0, BXT_REVID_A1)) &&
+ (ring->id == VCS || ring->id == VCS2);
+
+ ring->ctx_desc_template = GEN8_CTX_VALID;
+ ring->ctx_desc_template |= GEN8_CTX_ADDRESSING_MODE(dev) <<
+ GEN8_CTX_ADDRESSING_MODE_SHIFT;
+ if (IS_GEN8(dev))
+ ring->ctx_desc_template |= GEN8_CTX_L3LLC_COHERENT;
+ ring->ctx_desc_template |= GEN8_CTX_PRIVILEGE;
+
+ /* TODO: WaDisableLiteRestore when we start using semaphore
+ * signalling between Command Streamers */
+ /* ring->ctx_desc_template |= GEN8_CTX_FORCE_RESTORE; */
+
+ /* WaEnableForceRestoreInCtxtDescForVCS:skl */
+ /* WaEnableForceRestoreInCtxtDescForVCS:bxt */
+ if (ring->disable_lite_restore_wa)
+ ring->ctx_desc_template |= GEN8_CTX_FORCE_RESTORE;
+}
+
/**
- * intel_execlists_ctx_id() - get the Execlists Context ID
- * @ctx_obj: Logical Ring Context backing object.
+ * intel_lr_context_descriptor_update() - calculate & cache the descriptor
+ * descriptor for a pinned context
*
- * Do not confuse with ctx->id! Unfortunately we have a name overload
- * here: the old context ID we pass to userspace as a handler so that
- * they can refer to a context, and the new context ID we pass to the
- * ELSP so that the GPU can inform us of the context status via
- * interrupts.
+ * @ctx: Context to work on
+ * @ring: Engine the descriptor will be used with
*
- * Return: 20-bits globally unique context ID.
+ * The context descriptor encodes various attributes of a context,
+ * including its GTT address and some flags. Because it's fairly
+ * expensive to calculate, we'll just do it once and cache the result,
+ * which remains valid until the context is unpinned.
+ *
+ * This is what a descriptor looks like, from LSB to MSB:
+ * bits 0-11: flags, GEN8_CTX_* (cached in ctx_desc_template)
+ * bits 12-31: LRCA, GTT address of (the HWSP of) this context
+ * bits 32-51: ctx ID, a globally unique tag (the LRCA again!)
+ * bits 52-63: reserved, may encode the engine ID (for GuC)
*/
-u32 intel_execlists_ctx_id(struct drm_i915_gem_object *ctx_obj)
+static void
+intel_lr_context_descriptor_update(struct intel_context *ctx,
+ struct intel_engine_cs *ring)
{
- u32 lrca = i915_gem_obj_ggtt_offset(ctx_obj) +
- LRC_PPHWSP_PN * PAGE_SIZE;
+ uint64_t lrca, desc;
- /* LRCA is required to be 4K aligned so the more significant 20 bits
- * are globally unique */
- return lrca >> 12;
-}
+ lrca = ctx->engine[ring->id].lrc_vma->node.start +
+ LRC_PPHWSP_PN * PAGE_SIZE;
-static bool disable_lite_restore_wa(struct intel_engine_cs *ring)
-{
- struct drm_device *dev = ring->dev;
+ desc = ring->ctx_desc_template; /* bits 0-11 */
+ desc |= lrca; /* bits 12-31 */
+ desc |= (lrca >> PAGE_SHIFT) << GEN8_CTX_ID_SHIFT; /* bits 32-51 */
- return (IS_SKL_REVID(dev, 0, SKL_REVID_B0) ||
- IS_BXT_REVID(dev, 0, BXT_REVID_A1)) &&
- (ring->id == VCS || ring->id == VCS2);
+ ctx->engine[ring->id].lrc_desc = desc;
}
uint64_t intel_lr_context_descriptor(struct intel_context *ctx,
struct intel_engine_cs *ring)
{
- struct drm_i915_gem_object *ctx_obj = ctx->engine[ring->id].state;
- uint64_t desc;
- uint64_t lrca = i915_gem_obj_ggtt_offset(ctx_obj) +
- LRC_PPHWSP_PN * PAGE_SIZE;
-
- WARN_ON(lrca & 0xFFFFFFFF00000FFFULL);
-
- desc = GEN8_CTX_VALID;
- desc |= GEN8_CTX_ADDRESSING_MODE(dev) << GEN8_CTX_ADDRESSING_MODE_SHIFT;
- if (IS_GEN8(ctx_obj->base.dev))
- desc |= GEN8_CTX_L3LLC_COHERENT;
- desc |= GEN8_CTX_PRIVILEGE;
- desc |= lrca;
- desc |= (u64)intel_execlists_ctx_id(ctx_obj) << GEN8_CTX_ID_SHIFT;
-
- /* TODO: WaDisableLiteRestore when we start using semaphore
- * signalling between Command Streamers */
- /* desc |= GEN8_CTX_FORCE_RESTORE; */
-
- /* WaEnableForceRestoreInCtxtDescForVCS:skl */
- /* WaEnableForceRestoreInCtxtDescForVCS:bxt */
- if (disable_lite_restore_wa(ring))
- desc |= GEN8_CTX_FORCE_RESTORE;
+ return ctx->engine[ring->id].lrc_desc;
+}
- return desc;
+/**
+ * intel_execlists_ctx_id() - get the Execlists Context ID
+ * @ctx: Context to get the ID for
+ * @ring: Engine to get the ID for
+ *
+ * Do not confuse with ctx->id! Unfortunately we have a name overload
+ * here: the old context ID we pass to userspace as a handler so that
+ * they can refer to a context, and the new context ID we pass to the
+ * ELSP so that the GPU can inform us of the context status via
+ * interrupts.
+ *
+ * The context ID is a portion of the context descriptor, so we can
+ * just extract the required part from the cached descriptor.
+ *
+ * Return: 20-bits globally unique context ID.
+ */
+u32 intel_execlists_ctx_id(struct intel_context *ctx,
+ struct intel_engine_cs *ring)
+{
+ return intel_lr_context_descriptor(ctx, ring) >> GEN8_CTX_ID_SHIFT;
}
static void execlists_elsp_write(struct drm_i915_gem_request *rq0,
@@ -363,20 +392,9 @@ static int execlists_update_context(struct drm_i915_gem_request *rq)
{
struct intel_engine_cs *ring = rq->ring;
struct i915_hw_ppgtt *ppgtt = rq->ctx->ppgtt;
- struct drm_i915_gem_object *ctx_obj = rq->ctx->engine[ring->id].state;
- struct drm_i915_gem_object *rb_obj = rq->ringbuf->obj;
- struct page *page;
- uint32_t *reg_state;
-
- BUG_ON(!ctx_obj);
- WARN_ON(!i915_gem_obj_is_pinned(ctx_obj));
- WARN_ON(!i915_gem_obj_is_pinned(rb_obj));
-
- page = i915_gem_object_get_dirty_page(ctx_obj, LRC_STATE_PN);
- reg_state = kmap_atomic(page);
+ uint32_t *reg_state = rq->ctx->engine[ring->id].lrc_reg_state;
reg_state[CTX_RING_TAIL+1] = rq->tail;
- reg_state[CTX_RING_BUFFER_START+1] = i915_gem_obj_ggtt_offset(rb_obj);
if (ppgtt && !USES_FULL_48BIT_PPGTT(ppgtt->base.dev)) {
/* True 32b PPGTT with dynamic page allocation: update PDP
@@ -390,8 +408,6 @@ static int execlists_update_context(struct drm_i915_gem_request *rq)
ASSIGN_CTX_PDP(ppgtt, reg_state, 0);
}
- kunmap_atomic(reg_state);
-
return 0;
}
@@ -431,9 +447,8 @@ static void execlists_context_unqueue(struct intel_engine_cs *ring)
/* Same ctx: ignore first request, as second request
* will update tail past first request's workload */
cursor->elsp_submitted = req0->elsp_submitted;
- list_del(&req0->execlist_link);
- list_add_tail(&req0->execlist_link,
- &ring->execlist_retired_req_list);
+ list_move_tail(&req0->execlist_link,
+ &ring->execlist_retired_req_list);
req0 = cursor;
} else {
req1 = cursor;
@@ -478,16 +493,13 @@ static bool execlists_check_remove_request(struct intel_engine_cs *ring,
execlist_link);
if (head_req != NULL) {
- struct drm_i915_gem_object *ctx_obj =
- head_req->ctx->engine[ring->id].state;
- if (intel_execlists_ctx_id(ctx_obj) == request_id) {
+ if (intel_execlists_ctx_id(head_req->ctx, ring) == request_id) {
WARN(head_req->elsp_submitted == 0,
"Never submitted head request\n");
if (--head_req->elsp_submitted <= 0) {
- list_del(&head_req->execlist_link);
- list_add_tail(&head_req->execlist_link,
- &ring->execlist_retired_req_list);
+ list_move_tail(&head_req->execlist_link,
+ &ring->execlist_retired_req_list);
return true;
}
}
@@ -496,6 +508,19 @@ static bool execlists_check_remove_request(struct intel_engine_cs *ring,
return false;
}
+static void get_context_status(struct intel_engine_cs *ring,
+ u8 read_pointer,
+ u32 *status, u32 *context_id)
+{
+ struct drm_i915_private *dev_priv = ring->dev->dev_private;
+
+ if (WARN_ON(read_pointer >= GEN8_CSB_ENTRIES))
+ return;
+
+ *status = I915_READ(RING_CONTEXT_STATUS_BUF_LO(ring, read_pointer));
+ *context_id = I915_READ(RING_CONTEXT_STATUS_BUF_HI(ring, read_pointer));
+}
+
/**
* intel_lrc_irq_handler() - handle Context Switch interrupts
* @ring: Engine Command Streamer to handle.
@@ -516,16 +541,16 @@ void intel_lrc_irq_handler(struct intel_engine_cs *ring)
status_pointer = I915_READ(RING_CONTEXT_STATUS_PTR(ring));
read_pointer = ring->next_context_status_buffer;
- write_pointer = status_pointer & GEN8_CSB_PTR_MASK;
+ write_pointer = GEN8_CSB_WRITE_PTR(status_pointer);
if (read_pointer > write_pointer)
write_pointer += GEN8_CSB_ENTRIES;
spin_lock(&ring->execlist_lock);
while (read_pointer < write_pointer) {
- read_pointer++;
- status = I915_READ(RING_CONTEXT_STATUS_BUF_LO(ring, read_pointer % GEN8_CSB_ENTRIES));
- status_id = I915_READ(RING_CONTEXT_STATUS_BUF_HI(ring, read_pointer % GEN8_CSB_ENTRIES));
+
+ get_context_status(ring, ++read_pointer % GEN8_CSB_ENTRIES,
+ &status, &status_id);
if (status & GEN8_CTX_STATUS_IDLE_ACTIVE)
continue;
@@ -538,14 +563,14 @@ void intel_lrc_irq_handler(struct intel_engine_cs *ring)
WARN(1, "Preemption without Lite Restore\n");
}
- if ((status & GEN8_CTX_STATUS_ACTIVE_IDLE) ||
- (status & GEN8_CTX_STATUS_ELEMENT_SWITCH)) {
+ if ((status & GEN8_CTX_STATUS_ACTIVE_IDLE) ||
+ (status & GEN8_CTX_STATUS_ELEMENT_SWITCH)) {
if (execlists_check_remove_request(ring, status_id))
submit_contexts++;
}
}
- if (disable_lite_restore_wa(ring)) {
+ if (ring->disable_lite_restore_wa) {
/* Prevent a ctx to preempt itself */
if ((status & GEN8_CTX_STATUS_ACTIVE_IDLE) &&
(submit_contexts != 0))
@@ -556,13 +581,16 @@ void intel_lrc_irq_handler(struct intel_engine_cs *ring)
spin_unlock(&ring->execlist_lock);
- WARN(submit_contexts > 2, "More than two context complete events?\n");
+ if (unlikely(submit_contexts > 2))
+ DRM_ERROR("More than two context complete events?\n");
+
ring->next_context_status_buffer = write_pointer % GEN8_CSB_ENTRIES;
+ /* Update the read pointer to the old write pointer. Manual ringbuffer
+ * management ftw </sarcasm> */
I915_WRITE(RING_CONTEXT_STATUS_PTR(ring),
- _MASKED_FIELD(GEN8_CSB_PTR_MASK << 8,
- ((u32)ring->next_context_status_buffer &
- GEN8_CSB_PTR_MASK) << 8));
+ _MASKED_FIELD(GEN8_CSB_READ_PTR_MASK,
+ ring->next_context_status_buffer << 8));
}
static int execlists_context_queue(struct drm_i915_gem_request *request)
@@ -571,8 +599,8 @@ static int execlists_context_queue(struct drm_i915_gem_request *request)
struct drm_i915_gem_request *cursor;
int num_elements = 0;
- if (request->ctx != ring->default_context)
- intel_lr_context_pin(request);
+ if (request->ctx != request->i915->kernel_context)
+ intel_lr_context_pin(request->ctx, ring);
i915_gem_request_reference(request);
@@ -592,9 +620,8 @@ static int execlists_context_queue(struct drm_i915_gem_request *request)
if (request->ctx == tail_req->ctx) {
WARN(tail_req->elsp_submitted != 0,
"More than 2 already-submitted reqs queued\n");
- list_del(&tail_req->execlist_link);
- list_add_tail(&tail_req->execlist_link,
- &ring->execlist_retired_req_list);
+ list_move_tail(&tail_req->execlist_link,
+ &ring->execlist_retired_req_list);
}
}
@@ -660,17 +687,27 @@ static int execlists_move_to_gpu(struct drm_i915_gem_request *req,
int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request)
{
- int ret;
+ int ret = 0;
request->ringbuf = request->ctx->engine[request->ring->id].ringbuf;
- if (request->ctx != request->ring->default_context) {
- ret = intel_lr_context_pin(request);
+ if (i915.enable_guc_submission) {
+ /*
+ * Check that the GuC has space for the request before
+ * going any further, as the i915_add_request() call
+ * later on mustn't fail ...
+ */
+ struct intel_guc *guc = &request->i915->guc;
+
+ ret = i915_guc_wq_check_space(guc->execbuf_client);
if (ret)
return ret;
}
- return 0;
+ if (request->ctx != request->i915->kernel_context)
+ ret = intel_lr_context_pin(request->ctx, request->ring);
+
+ return ret;
}
static int logical_ring_wait_for_space(struct drm_i915_gem_request *req,
@@ -724,23 +761,46 @@ static int logical_ring_wait_for_space(struct drm_i915_gem_request *req,
* on a queue waiting for the ELSP to be ready to accept a new context submission. At that
* point, the tail *inside* the context is updated and the ELSP written to.
*/
-static void
+static int
intel_logical_ring_advance_and_submit(struct drm_i915_gem_request *request)
{
- struct intel_engine_cs *ring = request->ring;
+ struct intel_ringbuffer *ringbuf = request->ringbuf;
struct drm_i915_private *dev_priv = request->i915;
+ struct intel_engine_cs *engine = request->ring;
+
+ intel_logical_ring_advance(ringbuf);
+ request->tail = ringbuf->tail;
- intel_logical_ring_advance(request->ringbuf);
+ /*
+ * Here we add two extra NOOPs as padding to avoid
+ * lite restore of a context with HEAD==TAIL.
+ *
+ * Caller must reserve WA_TAIL_DWORDS for us!
+ */
+ intel_logical_ring_emit(ringbuf, MI_NOOP);
+ intel_logical_ring_emit(ringbuf, MI_NOOP);
+ intel_logical_ring_advance(ringbuf);
- request->tail = request->ringbuf->tail;
+ if (intel_ring_stopped(engine))
+ return 0;
- if (intel_ring_stopped(ring))
- return;
+ if (engine->last_context != request->ctx) {
+ if (engine->last_context)
+ intel_lr_context_unpin(engine->last_context, engine);
+ if (request->ctx != request->i915->kernel_context) {
+ intel_lr_context_pin(request->ctx, engine);
+ engine->last_context = request->ctx;
+ } else {
+ engine->last_context = NULL;
+ }
+ }
if (dev_priv->guc.execbuf_client)
i915_guc_submit(dev_priv->guc.execbuf_client, request);
else
execlists_context_queue(request);
+
+ return 0;
}
static void __wrap_ring_buffer(struct intel_ringbuffer *ringbuf)
@@ -781,11 +841,11 @@ static int logical_ring_prepare(struct drm_i915_gem_request *req, int bytes)
if (unlikely(total_bytes > remain_usable)) {
/*
* The base request will fit but the reserved space
- * falls off the end. So only need to to wait for the
- * reserved size after flushing out the remainder.
+ * falls off the end. So don't need an immediate wrap
+ * and only need to effectively wait for the reserved
+ * size space from the start of ringbuffer.
*/
wait_bytes = remain_actual + ringbuf->reserved_size;
- need_wrap = true;
} else if (total_bytes > ringbuf->space) {
/* No wrapping required, just waiting. */
wait_bytes = total_bytes;
@@ -967,8 +1027,9 @@ void intel_execlists_retire_requests(struct intel_engine_cs *ring)
struct drm_i915_gem_object *ctx_obj =
ctx->engine[ring->id].state;
- if (ctx_obj && (ctx != ring->default_context))
- intel_lr_context_unpin(req);
+ if (ctx_obj && (ctx != req->i915->kernel_context))
+ intel_lr_context_unpin(ctx, ring);
+
list_del(&req->execlist_link);
i915_gem_request_unreference(req);
}
@@ -1012,24 +1073,39 @@ int logical_ring_flush_all_caches(struct drm_i915_gem_request *req)
return 0;
}
-static int intel_lr_context_do_pin(struct intel_engine_cs *ring,
- struct drm_i915_gem_object *ctx_obj,
- struct intel_ringbuffer *ringbuf)
+static int intel_lr_context_do_pin(struct intel_context *ctx,
+ struct intel_engine_cs *ring)
{
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- int ret = 0;
+ struct drm_i915_gem_object *ctx_obj = ctx->engine[ring->id].state;
+ struct intel_ringbuffer *ringbuf = ctx->engine[ring->id].ringbuf;
+ struct page *lrc_state_page;
+ uint32_t *lrc_reg_state;
+ int ret;
WARN_ON(!mutex_is_locked(&ring->dev->struct_mutex));
+
ret = i915_gem_obj_ggtt_pin(ctx_obj, GEN8_LR_CONTEXT_ALIGN,
PIN_OFFSET_BIAS | GUC_WOPCM_TOP);
if (ret)
return ret;
+ lrc_state_page = i915_gem_object_get_dirty_page(ctx_obj, LRC_STATE_PN);
+ if (WARN_ON(!lrc_state_page)) {
+ ret = -ENODEV;
+ goto unpin_ctx_obj;
+ }
+
ret = intel_pin_and_map_ringbuffer_obj(ring->dev, ringbuf);
if (ret)
goto unpin_ctx_obj;
+ ctx->engine[ring->id].lrc_vma = i915_gem_obj_to_ggtt(ctx_obj);
+ intel_lr_context_descriptor_update(ctx, ring);
+ lrc_reg_state = kmap(lrc_state_page);
+ lrc_reg_state[CTX_RING_BUFFER_START+1] = ringbuf->vma->node.start;
+ ctx->engine[ring->id].lrc_reg_state = lrc_reg_state;
ctx_obj->dirty = true;
/* Invalidate GuC TLB. */
@@ -1044,37 +1120,40 @@ unpin_ctx_obj:
return ret;
}
-static int intel_lr_context_pin(struct drm_i915_gem_request *rq)
+static int intel_lr_context_pin(struct intel_context *ctx,
+ struct intel_engine_cs *engine)
{
int ret = 0;
- struct intel_engine_cs *ring = rq->ring;
- struct drm_i915_gem_object *ctx_obj = rq->ctx->engine[ring->id].state;
- struct intel_ringbuffer *ringbuf = rq->ringbuf;
- if (rq->ctx->engine[ring->id].pin_count++ == 0) {
- ret = intel_lr_context_do_pin(ring, ctx_obj, ringbuf);
+ if (ctx->engine[engine->id].pin_count++ == 0) {
+ ret = intel_lr_context_do_pin(ctx, engine);
if (ret)
goto reset_pin_count;
+
+ i915_gem_context_reference(ctx);
}
return ret;
reset_pin_count:
- rq->ctx->engine[ring->id].pin_count = 0;
+ ctx->engine[engine->id].pin_count = 0;
return ret;
}
-void intel_lr_context_unpin(struct drm_i915_gem_request *rq)
+void intel_lr_context_unpin(struct intel_context *ctx,
+ struct intel_engine_cs *engine)
{
- struct intel_engine_cs *ring = rq->ring;
- struct drm_i915_gem_object *ctx_obj = rq->ctx->engine[ring->id].state;
- struct intel_ringbuffer *ringbuf = rq->ringbuf;
+ struct drm_i915_gem_object *ctx_obj = ctx->engine[engine->id].state;
- if (ctx_obj) {
- WARN_ON(!mutex_is_locked(&ring->dev->struct_mutex));
- if (--rq->ctx->engine[ring->id].pin_count == 0) {
- intel_unpin_ringbuffer_obj(ringbuf);
- i915_gem_object_ggtt_unpin(ctx_obj);
- }
+ WARN_ON(!mutex_is_locked(&ctx->i915->dev->struct_mutex));
+ if (--ctx->engine[engine->id].pin_count == 0) {
+ kunmap(kmap_to_page(ctx->engine[engine->id].lrc_reg_state));
+ intel_unpin_ringbuffer_obj(ctx->engine[engine->id].ringbuf);
+ i915_gem_object_ggtt_unpin(ctx_obj);
+ ctx->engine[engine->id].lrc_vma = NULL;
+ ctx->engine[engine->id].lrc_desc = 0;
+ ctx->engine[engine->id].lrc_reg_state = NULL;
+
+ i915_gem_context_unreference(ctx);
}
}
@@ -1087,7 +1166,7 @@ static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req)
struct drm_i915_private *dev_priv = dev->dev_private;
struct i915_workarounds *w = &dev_priv->workarounds;
- if (WARN_ON_ONCE(w->count == 0))
+ if (w->count == 0)
return 0;
ring->gpu_caches_dirty = true;
@@ -1474,7 +1553,7 @@ static int gen8_init_common_ring(struct intel_engine_cs *ring)
u8 next_context_status_buffer_hw;
lrc_setup_hardware_status_page(ring,
- ring->default_context->engine[ring->id].state);
+ dev_priv->kernel_context->engine[ring->id].state);
I915_WRITE_IMR(ring, ~(ring->irq_enable_mask | ring->irq_keep_mask));
I915_WRITE(RING_HWSTAM(ring->mmio_base), 0xffffffff);
@@ -1493,9 +1572,11 @@ static int gen8_init_common_ring(struct intel_engine_cs *ring)
* | Suspend-to-idle (freeze) | Suspend-to-RAM (mem) |
* BDW | CSB regs not reset | CSB regs reset |
* CHT | CSB regs not reset | CSB regs not reset |
+ * SKL | ? | ? |
+ * BXT | ? | ? |
*/
- next_context_status_buffer_hw = (I915_READ(RING_CONTEXT_STATUS_PTR(ring))
- & GEN8_CSB_PTR_MASK);
+ next_context_status_buffer_hw =
+ GEN8_CSB_WRITE_PTR(I915_READ(RING_CONTEXT_STATUS_PTR(ring)));
/*
* When the CSB registers are reset (also after power-up / gpu reset),
@@ -1698,7 +1779,7 @@ static int gen8_emit_flush_render(struct drm_i915_gem_request *request,
struct intel_ringbuffer *ringbuf = request->ringbuf;
struct intel_engine_cs *ring = ringbuf->ring;
u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
- bool vf_flush_wa;
+ bool vf_flush_wa = false;
u32 flags = 0;
int ret;
@@ -1720,14 +1801,14 @@ static int gen8_emit_flush_render(struct drm_i915_gem_request *request,
flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
flags |= PIPE_CONTROL_QW_WRITE;
flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
- }
- /*
- * On GEN9+ Before VF_CACHE_INVALIDATE we need to emit a NULL pipe
- * control.
- */
- vf_flush_wa = INTEL_INFO(ring->dev)->gen >= 9 &&
- flags & PIPE_CONTROL_VF_CACHE_INVALIDATE;
+ /*
+ * On GEN9: before VF_CACHE_INVALIDATE we need to emit a NULL
+ * pipe control.
+ */
+ if (IS_GEN9(ring->dev))
+ vf_flush_wa = true;
+ }
ret = intel_logical_ring_begin(request, vf_flush_wa ? 12 : 6);
if (ret)
@@ -1791,44 +1872,71 @@ static void bxt_a_set_seqno(struct intel_engine_cs *ring, u32 seqno)
intel_flush_status_page(ring, I915_GEM_HWS_INDEX);
}
+/*
+ * Reserve space for 2 NOOPs at the end of each request to be
+ * used as a workaround for not being allowed to do lite
+ * restore with HEAD==TAIL (WaIdleLiteRestore).
+ */
+#define WA_TAIL_DWORDS 2
+
+static inline u32 hws_seqno_address(struct intel_engine_cs *engine)
+{
+ return engine->status_page.gfx_addr + I915_GEM_HWS_INDEX_ADDR;
+}
+
static int gen8_emit_request(struct drm_i915_gem_request *request)
{
struct intel_ringbuffer *ringbuf = request->ringbuf;
- struct intel_engine_cs *ring = ringbuf->ring;
- u32 cmd;
int ret;
- /*
- * Reserve space for 2 NOOPs at the end of each request to be
- * used as a workaround for not being allowed to do lite
- * restore with HEAD==TAIL (WaIdleLiteRestore).
- */
- ret = intel_logical_ring_begin(request, 8);
+ ret = intel_logical_ring_begin(request, 6 + WA_TAIL_DWORDS);
if (ret)
return ret;
- cmd = MI_STORE_DWORD_IMM_GEN4;
- cmd |= MI_GLOBAL_GTT;
+ /* w/a: bit 5 needs to be zero for MI_FLUSH_DW address. */
+ BUILD_BUG_ON(I915_GEM_HWS_INDEX_ADDR & (1 << 5));
- intel_logical_ring_emit(ringbuf, cmd);
intel_logical_ring_emit(ringbuf,
- (ring->status_page.gfx_addr +
- (I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT)));
+ (MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW);
+ intel_logical_ring_emit(ringbuf,
+ hws_seqno_address(request->ring) |
+ MI_FLUSH_DW_USE_GTT);
intel_logical_ring_emit(ringbuf, 0);
intel_logical_ring_emit(ringbuf, i915_gem_request_get_seqno(request));
intel_logical_ring_emit(ringbuf, MI_USER_INTERRUPT);
intel_logical_ring_emit(ringbuf, MI_NOOP);
- intel_logical_ring_advance_and_submit(request);
+ return intel_logical_ring_advance_and_submit(request);
+}
- /*
- * Here we add two extra NOOPs as padding to avoid
- * lite restore of a context with HEAD==TAIL.
+static int gen8_emit_request_render(struct drm_i915_gem_request *request)
+{
+ struct intel_ringbuffer *ringbuf = request->ringbuf;
+ int ret;
+
+ ret = intel_logical_ring_begin(request, 8 + WA_TAIL_DWORDS);
+ if (ret)
+ return ret;
+
+ /* We're using qword write, seqno should be aligned to 8 bytes. */
+ BUILD_BUG_ON(I915_GEM_HWS_INDEX & 1);
+
+ /* w/a for post sync ops following a GPGPU operation we
+ * need a prior CS_STALL, which is emitted by the flush
+ * following the batch.
*/
+ intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(6));
+ intel_logical_ring_emit(ringbuf,
+ (PIPE_CONTROL_GLOBAL_GTT_IVB |
+ PIPE_CONTROL_CS_STALL |
+ PIPE_CONTROL_QW_WRITE));
+ intel_logical_ring_emit(ringbuf, hws_seqno_address(request->ring));
+ intel_logical_ring_emit(ringbuf, 0);
+ intel_logical_ring_emit(ringbuf, i915_gem_request_get_seqno(request));
+ /* We're thrashing one dword of HWS. */
+ intel_logical_ring_emit(ringbuf, 0);
+ intel_logical_ring_emit(ringbuf, MI_USER_INTERRUPT);
intel_logical_ring_emit(ringbuf, MI_NOOP);
- intel_logical_ring_emit(ringbuf, MI_NOOP);
- intel_logical_ring_advance(ringbuf);
-
- return 0;
+ return intel_logical_ring_advance_and_submit(request);
}
static int intel_lr_context_render_state_init(struct drm_i915_gem_request *req)
@@ -1911,12 +2019,44 @@ void intel_logical_ring_cleanup(struct intel_engine_cs *ring)
ring->status_page.obj = NULL;
}
+ ring->disable_lite_restore_wa = false;
+ ring->ctx_desc_template = 0;
+
lrc_destroy_wa_ctx_obj(ring);
ring->dev = NULL;
}
-static int logical_ring_init(struct drm_device *dev, struct intel_engine_cs *ring)
+static void
+logical_ring_default_vfuncs(struct drm_device *dev,
+ struct intel_engine_cs *ring)
+{
+ /* Default vfuncs which can be overriden by each engine. */
+ ring->init_hw = gen8_init_common_ring;
+ ring->emit_request = gen8_emit_request;
+ ring->emit_flush = gen8_emit_flush;
+ ring->irq_get = gen8_logical_ring_get_irq;
+ ring->irq_put = gen8_logical_ring_put_irq;
+ ring->emit_bb_start = gen8_emit_bb_start;
+ if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
+ ring->get_seqno = bxt_a_get_seqno;
+ ring->set_seqno = bxt_a_set_seqno;
+ } else {
+ ring->get_seqno = gen8_get_seqno;
+ ring->set_seqno = gen8_set_seqno;
+ }
+}
+
+static inline void
+logical_ring_default_irqs(struct intel_engine_cs *ring, unsigned shift)
+{
+ ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT << shift;
+ ring->irq_keep_mask = GT_CONTEXT_SWITCH_INTERRUPT << shift;
+}
+
+static int
+logical_ring_init(struct drm_device *dev, struct intel_engine_cs *ring)
{
+ struct intel_context *dctx = to_i915(dev)->kernel_context;
int ret;
/* Intentionally left blank. */
@@ -1933,19 +2073,18 @@ static int logical_ring_init(struct drm_device *dev, struct intel_engine_cs *rin
INIT_LIST_HEAD(&ring->execlist_retired_req_list);
spin_lock_init(&ring->execlist_lock);
+ logical_ring_init_platform_invariants(ring);
+
ret = i915_cmd_parser_init_ring(ring);
if (ret)
goto error;
- ret = intel_lr_context_deferred_alloc(ring->default_context, ring);
+ ret = intel_lr_context_deferred_alloc(dctx, ring);
if (ret)
goto error;
/* As this is the default context, always pin it */
- ret = intel_lr_context_do_pin(
- ring,
- ring->default_context->engine[ring->id].state,
- ring->default_context->engine[ring->id].ringbuf);
+ ret = intel_lr_context_do_pin(dctx, ring);
if (ret) {
DRM_ERROR(
"Failed to pin and map ringbuffer %s: %d\n",
@@ -1968,32 +2107,25 @@ static int logical_render_ring_init(struct drm_device *dev)
ring->name = "render ring";
ring->id = RCS;
+ ring->exec_id = I915_EXEC_RENDER;
+ ring->guc_id = GUC_RENDER_ENGINE;
ring->mmio_base = RENDER_RING_BASE;
- ring->irq_enable_mask =
- GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT;
- ring->irq_keep_mask =
- GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT;
+
+ logical_ring_default_irqs(ring, GEN8_RCS_IRQ_SHIFT);
if (HAS_L3_DPF(dev))
ring->irq_keep_mask |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
+ logical_ring_default_vfuncs(dev, ring);
+
+ /* Override some for render ring. */
if (INTEL_INFO(dev)->gen >= 9)
ring->init_hw = gen9_init_render_ring;
else
ring->init_hw = gen8_init_render_ring;
ring->init_context = gen8_init_rcs_context;
ring->cleanup = intel_fini_pipe_control;
- if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
- ring->get_seqno = bxt_a_get_seqno;
- ring->set_seqno = bxt_a_set_seqno;
- } else {
- ring->get_seqno = gen8_get_seqno;
- ring->set_seqno = gen8_set_seqno;
- }
- ring->emit_request = gen8_emit_request;
ring->emit_flush = gen8_emit_flush_render;
- ring->irq_get = gen8_logical_ring_get_irq;
- ring->irq_put = gen8_logical_ring_put_irq;
- ring->emit_bb_start = gen8_emit_bb_start;
+ ring->emit_request = gen8_emit_request_render;
ring->dev = dev;
@@ -2027,25 +2159,12 @@ static int logical_bsd_ring_init(struct drm_device *dev)
ring->name = "bsd ring";
ring->id = VCS;
+ ring->exec_id = I915_EXEC_BSD;
+ ring->guc_id = GUC_VIDEO_ENGINE;
ring->mmio_base = GEN6_BSD_RING_BASE;
- ring->irq_enable_mask =
- GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT;
- ring->irq_keep_mask =
- GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT;
- ring->init_hw = gen8_init_common_ring;
- if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
- ring->get_seqno = bxt_a_get_seqno;
- ring->set_seqno = bxt_a_set_seqno;
- } else {
- ring->get_seqno = gen8_get_seqno;
- ring->set_seqno = gen8_set_seqno;
- }
- ring->emit_request = gen8_emit_request;
- ring->emit_flush = gen8_emit_flush;
- ring->irq_get = gen8_logical_ring_get_irq;
- ring->irq_put = gen8_logical_ring_put_irq;
- ring->emit_bb_start = gen8_emit_bb_start;
+ logical_ring_default_irqs(ring, GEN8_VCS1_IRQ_SHIFT);
+ logical_ring_default_vfuncs(dev, ring);
return logical_ring_init(dev, ring);
}
@@ -2055,22 +2174,14 @@ static int logical_bsd2_ring_init(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *ring = &dev_priv->ring[VCS2];
- ring->name = "bds2 ring";
+ ring->name = "bsd2 ring";
ring->id = VCS2;
+ ring->exec_id = I915_EXEC_BSD;
+ ring->guc_id = GUC_VIDEO_ENGINE2;
ring->mmio_base = GEN8_BSD2_RING_BASE;
- ring->irq_enable_mask =
- GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT;
- ring->irq_keep_mask =
- GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT;
- ring->init_hw = gen8_init_common_ring;
- ring->get_seqno = gen8_get_seqno;
- ring->set_seqno = gen8_set_seqno;
- ring->emit_request = gen8_emit_request;
- ring->emit_flush = gen8_emit_flush;
- ring->irq_get = gen8_logical_ring_get_irq;
- ring->irq_put = gen8_logical_ring_put_irq;
- ring->emit_bb_start = gen8_emit_bb_start;
+ logical_ring_default_irqs(ring, GEN8_VCS2_IRQ_SHIFT);
+ logical_ring_default_vfuncs(dev, ring);
return logical_ring_init(dev, ring);
}
@@ -2082,25 +2193,12 @@ static int logical_blt_ring_init(struct drm_device *dev)
ring->name = "blitter ring";
ring->id = BCS;
+ ring->exec_id = I915_EXEC_BLT;
+ ring->guc_id = GUC_BLITTER_ENGINE;
ring->mmio_base = BLT_RING_BASE;
- ring->irq_enable_mask =
- GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT;
- ring->irq_keep_mask =
- GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT;
- ring->init_hw = gen8_init_common_ring;
- if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
- ring->get_seqno = bxt_a_get_seqno;
- ring->set_seqno = bxt_a_set_seqno;
- } else {
- ring->get_seqno = gen8_get_seqno;
- ring->set_seqno = gen8_set_seqno;
- }
- ring->emit_request = gen8_emit_request;
- ring->emit_flush = gen8_emit_flush;
- ring->irq_get = gen8_logical_ring_get_irq;
- ring->irq_put = gen8_logical_ring_put_irq;
- ring->emit_bb_start = gen8_emit_bb_start;
+ logical_ring_default_irqs(ring, GEN8_BCS_IRQ_SHIFT);
+ logical_ring_default_vfuncs(dev, ring);
return logical_ring_init(dev, ring);
}
@@ -2112,25 +2210,12 @@ static int logical_vebox_ring_init(struct drm_device *dev)
ring->name = "video enhancement ring";
ring->id = VECS;
+ ring->exec_id = I915_EXEC_VEBOX;
+ ring->guc_id = GUC_VIDEOENHANCE_ENGINE;
ring->mmio_base = VEBOX_RING_BASE;
- ring->irq_enable_mask =
- GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT;
- ring->irq_keep_mask =
- GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT;
- ring->init_hw = gen8_init_common_ring;
- if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
- ring->get_seqno = bxt_a_get_seqno;
- ring->set_seqno = bxt_a_set_seqno;
- } else {
- ring->get_seqno = gen8_get_seqno;
- ring->set_seqno = gen8_set_seqno;
- }
- ring->emit_request = gen8_emit_request;
- ring->emit_flush = gen8_emit_flush;
- ring->irq_get = gen8_logical_ring_get_irq;
- ring->irq_put = gen8_logical_ring_put_irq;
- ring->emit_bb_start = gen8_emit_bb_start;
+ logical_ring_default_irqs(ring, GEN8_VECS_IRQ_SHIFT);
+ logical_ring_default_vfuncs(dev, ring);
return logical_ring_init(dev, ring);
}
@@ -2235,6 +2320,27 @@ make_rpcs(struct drm_device *dev)
return rpcs;
}
+static u32 intel_lr_indirect_ctx_offset(struct intel_engine_cs *ring)
+{
+ u32 indirect_ctx_offset;
+
+ switch (INTEL_INFO(ring->dev)->gen) {
+ default:
+ MISSING_CASE(INTEL_INFO(ring->dev)->gen);
+ /* fall through */
+ case 9:
+ indirect_ctx_offset =
+ GEN9_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT;
+ break;
+ case 8:
+ indirect_ctx_offset =
+ GEN8_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT;
+ break;
+ }
+
+ return indirect_ctx_offset;
+}
+
static int
populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_obj,
struct intel_engine_cs *ring, struct intel_ringbuffer *ringbuf)
@@ -2278,7 +2384,8 @@ populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_o
ASSIGN_CTX_REG(reg_state, CTX_CONTEXT_CONTROL, RING_CONTEXT_CONTROL(ring),
_MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH |
CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT |
- CTX_CTRL_RS_CTX_ENABLE));
+ (HAS_RESOURCE_STREAMER(dev) ?
+ CTX_CTRL_RS_CTX_ENABLE : 0)));
ASSIGN_CTX_REG(reg_state, CTX_RING_HEAD, RING_HEAD(ring->mmio_base), 0);
ASSIGN_CTX_REG(reg_state, CTX_RING_TAIL, RING_TAIL(ring->mmio_base), 0);
/* Ring buffer start address is not known until the buffer is pinned.
@@ -2307,7 +2414,7 @@ populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_o
(wa_ctx->indirect_ctx.size / CACHELINE_DWORDS);
reg_state[CTX_RCS_INDIRECT_CTX_OFFSET+1] =
- CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT << 6;
+ intel_lr_indirect_ctx_offset(ring) << 6;
reg_state[CTX_BB_PER_CTX_PTR+1] =
(ggtt_offset + wa_ctx->per_ctx.offset * sizeof(uint32_t)) |
@@ -2368,26 +2475,39 @@ void intel_lr_context_free(struct intel_context *ctx)
{
int i;
- for (i = 0; i < I915_NUM_RINGS; i++) {
+ for (i = I915_NUM_RINGS; --i >= 0; ) {
+ struct intel_ringbuffer *ringbuf = ctx->engine[i].ringbuf;
struct drm_i915_gem_object *ctx_obj = ctx->engine[i].state;
- if (ctx_obj) {
- struct intel_ringbuffer *ringbuf =
- ctx->engine[i].ringbuf;
- struct intel_engine_cs *ring = ringbuf->ring;
+ if (!ctx_obj)
+ continue;
- if (ctx == ring->default_context) {
- intel_unpin_ringbuffer_obj(ringbuf);
- i915_gem_object_ggtt_unpin(ctx_obj);
- }
- WARN_ON(ctx->engine[ring->id].pin_count);
- intel_ringbuffer_free(ringbuf);
- drm_gem_object_unreference(&ctx_obj->base);
+ if (ctx == ctx->i915->kernel_context) {
+ intel_unpin_ringbuffer_obj(ringbuf);
+ i915_gem_object_ggtt_unpin(ctx_obj);
}
+
+ WARN_ON(ctx->engine[i].pin_count);
+ intel_ringbuffer_free(ringbuf);
+ drm_gem_object_unreference(&ctx_obj->base);
}
}
-static uint32_t get_lr_context_size(struct intel_engine_cs *ring)
+/**
+ * intel_lr_context_size() - return the size of the context for an engine
+ * @ring: which engine to find the context size for
+ *
+ * Each engine may require a different amount of space for a context image,
+ * so when allocating (or copying) an image, this function can be used to
+ * find the right size for the specific engine.
+ *
+ * Return: size (in bytes) of an engine-specific context image
+ *
+ * Note: this size includes the HWSP, which is part of the context image
+ * in LRC mode, but does not include the "shared data page" used with
+ * GuC submission. The caller should account for this if using the GuC.
+ */
+uint32_t intel_lr_context_size(struct intel_engine_cs *ring)
{
int ret = 0;
@@ -2444,7 +2564,7 @@ static void lrc_setup_hardware_status_page(struct intel_engine_cs *ring,
*/
int intel_lr_context_deferred_alloc(struct intel_context *ctx,
- struct intel_engine_cs *ring)
+ struct intel_engine_cs *ring)
{
struct drm_device *dev = ring->dev;
struct drm_i915_gem_object *ctx_obj;
@@ -2455,7 +2575,7 @@ int intel_lr_context_deferred_alloc(struct intel_context *ctx,
WARN_ON(ctx->legacy_hw_ctx.rcs_state != NULL);
WARN_ON(ctx->engine[ring->id].state);
- context_size = round_up(get_lr_context_size(ring), 4096);
+ context_size = round_up(intel_lr_context_size(ring), 4096);
/* One extra page as the sharing data between driver and GuC */
context_size += PAGE_SIZE * LRC_PPHWSP_PN;
@@ -2481,14 +2601,13 @@ int intel_lr_context_deferred_alloc(struct intel_context *ctx,
ctx->engine[ring->id].ringbuf = ringbuf;
ctx->engine[ring->id].state = ctx_obj;
- if (ctx != ring->default_context && ring->init_context) {
+ if (ctx != ctx->i915->kernel_context && ring->init_context) {
struct drm_i915_gem_request *req;
- ret = i915_gem_request_alloc(ring,
- ctx, &req);
- if (ret) {
- DRM_ERROR("ring create req: %d\n",
- ret);
+ req = i915_gem_request_alloc(ring, ctx);
+ if (IS_ERR(req)) {
+ ret = PTR_ERR(req);
+ DRM_ERROR("ring create req: %d\n", ret);
goto error_ringbuf;
}
diff --git a/drivers/gpu/drm/i915/intel_lrc.h b/drivers/gpu/drm/i915/intel_lrc.h
index 0b821b91723a..e6cda3e225d0 100644
--- a/drivers/gpu/drm/i915/intel_lrc.h
+++ b/drivers/gpu/drm/i915/intel_lrc.h
@@ -25,8 +25,6 @@
#define _INTEL_LRC_H_
#define GEN8_LR_CONTEXT_ALIGN 4096
-#define GEN8_CSB_ENTRIES 6
-#define GEN8_CSB_PTR_MASK 0x07
/* Execlists regs */
#define RING_ELSP(ring) _MMIO((ring)->mmio_base + 0x230)
@@ -40,6 +38,22 @@
#define RING_CONTEXT_STATUS_BUF_HI(ring, i) _MMIO((ring)->mmio_base + 0x370 + (i) * 8 + 4)
#define RING_CONTEXT_STATUS_PTR(ring) _MMIO((ring)->mmio_base + 0x3a0)
+/* The docs specify that the write pointer wraps around after 5h, "After status
+ * is written out to the last available status QW at offset 5h, this pointer
+ * wraps to 0."
+ *
+ * Therefore, one must infer than even though there are 3 bits available, 6 and
+ * 7 appear to be * reserved.
+ */
+#define GEN8_CSB_ENTRIES 6
+#define GEN8_CSB_PTR_MASK 0x7
+#define GEN8_CSB_READ_PTR_MASK (GEN8_CSB_PTR_MASK << 8)
+#define GEN8_CSB_WRITE_PTR_MASK (GEN8_CSB_PTR_MASK << 0)
+#define GEN8_CSB_WRITE_PTR(csb_status) \
+ (((csb_status) & GEN8_CSB_WRITE_PTR_MASK) >> 0)
+#define GEN8_CSB_READ_PTR(csb_status) \
+ (((csb_status) & GEN8_CSB_READ_PTR_MASK) >> 8)
+
/* Logical Rings */
int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request);
int intel_logical_ring_reserve_space(struct drm_i915_gem_request *request);
@@ -84,21 +98,25 @@ static inline void intel_logical_ring_emit_reg(struct intel_ringbuffer *ringbuf,
#define LRC_STATE_PN (LRC_PPHWSP_PN + 1)
void intel_lr_context_free(struct intel_context *ctx);
+uint32_t intel_lr_context_size(struct intel_engine_cs *ring);
int intel_lr_context_deferred_alloc(struct intel_context *ctx,
struct intel_engine_cs *ring);
-void intel_lr_context_unpin(struct drm_i915_gem_request *req);
+void intel_lr_context_unpin(struct intel_context *ctx,
+ struct intel_engine_cs *engine);
void intel_lr_context_reset(struct drm_device *dev,
struct intel_context *ctx);
uint64_t intel_lr_context_descriptor(struct intel_context *ctx,
struct intel_engine_cs *ring);
+u32 intel_execlists_ctx_id(struct intel_context *ctx,
+ struct intel_engine_cs *ring);
+
/* Execlists */
int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists);
struct i915_execbuffer_params;
int intel_execlists_submission(struct i915_execbuffer_params *params,
struct drm_i915_gem_execbuffer2 *args,
struct list_head *vmas);
-u32 intel_execlists_ctx_id(struct drm_i915_gem_object *ctx_obj);
void intel_lrc_irq_handler(struct intel_engine_cs *ring);
void intel_execlists_retire_requests(struct intel_engine_cs *ring);
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index bc04d8d29acb..10dc3517b63b 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -31,6 +31,7 @@
#include <linux/dmi.h>
#include <linux/i2c.h>
#include <linux/slab.h>
+#include <linux/vga_switcheroo.h>
#include <drm/drmP.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
@@ -122,6 +123,10 @@ static void intel_lvds_get_config(struct intel_encoder *encoder,
pipe_config->base.adjusted_mode.flags |= flags;
+ if (INTEL_INFO(dev)->gen < 5)
+ pipe_config->gmch_pfit.lvds_border_bits =
+ tmp & LVDS_BORDER_ENABLE;
+
/* gen2/3 store dither state in pfit control, needs to match */
if (INTEL_INFO(dev)->gen < 4) {
tmp = I915_READ(PFIT_CONTROL);
@@ -477,11 +482,8 @@ static int intel_lid_notify(struct notifier_block *nb, unsigned long val,
* and as part of the cleanup in the hw state restore we also redisable
* the vga plane.
*/
- if (!HAS_PCH_SPLIT(dev)) {
- drm_modeset_lock_all(dev);
+ if (!HAS_PCH_SPLIT(dev))
intel_display_resume(dev);
- drm_modeset_unlock_all(dev);
- }
dev_priv->modeset_restore = MODESET_DONE;
@@ -1088,7 +1090,12 @@ void intel_lvds_init(struct drm_device *dev)
* preferred mode is the right one.
*/
mutex_lock(&dev->mode_config.mutex);
- edid = drm_get_edid(connector, intel_gmbus_get_adapter(dev_priv, pin));
+ if (vga_switcheroo_handler_flags() & VGA_SWITCHEROO_CAN_SWITCH_DDC)
+ edid = drm_get_edid_switcheroo(connector,
+ intel_gmbus_get_adapter(dev_priv, pin));
+ else
+ edid = drm_get_edid(connector,
+ intel_gmbus_get_adapter(dev_priv, pin));
if (edid) {
if (drm_add_edid_modes(connector, edid)) {
drm_mode_connector_update_edid_property(connector,
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index 76f1980a7541..9168413fe204 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -240,9 +240,9 @@ static int intel_overlay_on(struct intel_overlay *overlay)
WARN_ON(overlay->active);
WARN_ON(IS_I830(dev) && !(dev_priv->quirks & QUIRK_PIPEA_FORCE));
- ret = i915_gem_request_alloc(ring, ring->default_context, &req);
- if (ret)
- return ret;
+ req = i915_gem_request_alloc(ring, NULL);
+ if (IS_ERR(req))
+ return PTR_ERR(req);
ret = intel_ring_begin(req, 4);
if (ret) {
@@ -283,9 +283,9 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
if (tmp & (1 << 17))
DRM_DEBUG("overlay underrun, DOVSTA: %x\n", tmp);
- ret = i915_gem_request_alloc(ring, ring->default_context, &req);
- if (ret)
- return ret;
+ req = i915_gem_request_alloc(ring, NULL);
+ if (IS_ERR(req))
+ return PTR_ERR(req);
ret = intel_ring_begin(req, 2);
if (ret) {
@@ -349,9 +349,9 @@ static int intel_overlay_off(struct intel_overlay *overlay)
* of the hw. Do it in both cases */
flip_addr |= OFC_UPDATE;
- ret = i915_gem_request_alloc(ring, ring->default_context, &req);
- if (ret)
- return ret;
+ req = i915_gem_request_alloc(ring, NULL);
+ if (IS_ERR(req))
+ return PTR_ERR(req);
ret = intel_ring_begin(req, 6);
if (ret) {
@@ -423,9 +423,9 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
/* synchronous slowpath */
struct drm_i915_gem_request *req;
- ret = i915_gem_request_alloc(ring, ring->default_context, &req);
- if (ret)
- return ret;
+ req = i915_gem_request_alloc(ring, NULL);
+ if (IS_ERR(req))
+ return PTR_ERR(req);
ret = intel_ring_begin(req, 2);
if (ret) {
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index b28c29f20e75..3425d8e737b3 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -32,6 +32,8 @@
#include <linux/module.h>
/**
+ * DOC: RC6
+ *
* RC6 is a special power stage which allows the GPU to enter an very
* low-voltage mode when idle, using down to 0V while at this stage. This
* stage is entered automatically when the GPU is idle when RC6 support is
@@ -546,7 +548,7 @@ static const struct intel_watermark_params i845_wm_info = {
* intel_calculate_wm - calculate watermark level
* @clock_in_khz: pixel clock
* @wm: chip FIFO params
- * @pixel_size: display pixel size
+ * @cpp: bytes per pixel
* @latency_ns: memory latency for the platform
*
* Calculate the watermark level (the level at which the display plane will
@@ -562,8 +564,7 @@ static const struct intel_watermark_params i845_wm_info = {
*/
static unsigned long intel_calculate_wm(unsigned long clock_in_khz,
const struct intel_watermark_params *wm,
- int fifo_size,
- int pixel_size,
+ int fifo_size, int cpp,
unsigned long latency_ns)
{
long entries_required, wm_size;
@@ -574,7 +575,7 @@ static unsigned long intel_calculate_wm(unsigned long clock_in_khz,
* clocks go from a few thousand to several hundred thousand.
* latency is usually a few thousand
*/
- entries_required = ((clock_in_khz / 1000) * pixel_size * latency_ns) /
+ entries_required = ((clock_in_khz / 1000) * cpp * latency_ns) /
1000;
entries_required = DIV_ROUND_UP(entries_required, wm->cacheline_size);
@@ -638,13 +639,13 @@ static void pineview_update_wm(struct drm_crtc *unused_crtc)
crtc = single_enabled_crtc(dev);
if (crtc) {
const struct drm_display_mode *adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode;
- int pixel_size = crtc->primary->state->fb->bits_per_pixel / 8;
+ int cpp = drm_format_plane_cpp(crtc->primary->state->fb->pixel_format, 0);
int clock = adjusted_mode->crtc_clock;
/* Display SR */
wm = intel_calculate_wm(clock, &pineview_display_wm,
pineview_display_wm.fifo_size,
- pixel_size, latency->display_sr);
+ cpp, latency->display_sr);
reg = I915_READ(DSPFW1);
reg &= ~DSPFW_SR_MASK;
reg |= FW_WM(wm, SR);
@@ -654,7 +655,7 @@ static void pineview_update_wm(struct drm_crtc *unused_crtc)
/* cursor SR */
wm = intel_calculate_wm(clock, &pineview_cursor_wm,
pineview_display_wm.fifo_size,
- pixel_size, latency->cursor_sr);
+ cpp, latency->cursor_sr);
reg = I915_READ(DSPFW3);
reg &= ~DSPFW_CURSOR_SR_MASK;
reg |= FW_WM(wm, CURSOR_SR);
@@ -663,7 +664,7 @@ static void pineview_update_wm(struct drm_crtc *unused_crtc)
/* Display HPLL off SR */
wm = intel_calculate_wm(clock, &pineview_display_hplloff_wm,
pineview_display_hplloff_wm.fifo_size,
- pixel_size, latency->display_hpll_disable);
+ cpp, latency->display_hpll_disable);
reg = I915_READ(DSPFW3);
reg &= ~DSPFW_HPLL_SR_MASK;
reg |= FW_WM(wm, HPLL_SR);
@@ -672,7 +673,7 @@ static void pineview_update_wm(struct drm_crtc *unused_crtc)
/* cursor HPLL off SR */
wm = intel_calculate_wm(clock, &pineview_cursor_hplloff_wm,
pineview_display_hplloff_wm.fifo_size,
- pixel_size, latency->cursor_hpll_disable);
+ cpp, latency->cursor_hpll_disable);
reg = I915_READ(DSPFW3);
reg &= ~DSPFW_HPLL_CURSOR_MASK;
reg |= FW_WM(wm, HPLL_CURSOR);
@@ -696,7 +697,7 @@ static bool g4x_compute_wm0(struct drm_device *dev,
{
struct drm_crtc *crtc;
const struct drm_display_mode *adjusted_mode;
- int htotal, hdisplay, clock, pixel_size;
+ int htotal, hdisplay, clock, cpp;
int line_time_us, line_count;
int entries, tlb_miss;
@@ -711,10 +712,10 @@ static bool g4x_compute_wm0(struct drm_device *dev,
clock = adjusted_mode->crtc_clock;
htotal = adjusted_mode->crtc_htotal;
hdisplay = to_intel_crtc(crtc)->config->pipe_src_w;
- pixel_size = crtc->primary->state->fb->bits_per_pixel / 8;
+ cpp = drm_format_plane_cpp(crtc->primary->state->fb->pixel_format, 0);
/* Use the small buffer method to calculate plane watermark */
- entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
+ entries = ((clock * cpp / 1000) * display_latency_ns) / 1000;
tlb_miss = display->fifo_size*display->cacheline_size - hdisplay * 8;
if (tlb_miss > 0)
entries += tlb_miss;
@@ -726,7 +727,7 @@ static bool g4x_compute_wm0(struct drm_device *dev,
/* Use the large buffer method to calculate cursor watermark */
line_time_us = max(htotal * 1000 / clock, 1);
line_count = (cursor_latency_ns / line_time_us + 1000) / 1000;
- entries = line_count * crtc->cursor->state->crtc_w * pixel_size;
+ entries = line_count * crtc->cursor->state->crtc_w * cpp;
tlb_miss = cursor->fifo_size*cursor->cacheline_size - hdisplay * 8;
if (tlb_miss > 0)
entries += tlb_miss;
@@ -782,7 +783,7 @@ static bool g4x_compute_srwm(struct drm_device *dev,
{
struct drm_crtc *crtc;
const struct drm_display_mode *adjusted_mode;
- int hdisplay, htotal, pixel_size, clock;
+ int hdisplay, htotal, cpp, clock;
unsigned long line_time_us;
int line_count, line_size;
int small, large;
@@ -798,21 +799,21 @@ static bool g4x_compute_srwm(struct drm_device *dev,
clock = adjusted_mode->crtc_clock;
htotal = adjusted_mode->crtc_htotal;
hdisplay = to_intel_crtc(crtc)->config->pipe_src_w;
- pixel_size = crtc->primary->state->fb->bits_per_pixel / 8;
+ cpp = drm_format_plane_cpp(crtc->primary->state->fb->pixel_format, 0);
line_time_us = max(htotal * 1000 / clock, 1);
line_count = (latency_ns / line_time_us + 1000) / 1000;
- line_size = hdisplay * pixel_size;
+ line_size = hdisplay * cpp;
/* Use the minimum of the small and large buffer method for primary */
- small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
+ small = ((clock * cpp / 1000) * latency_ns) / 1000;
large = line_count * line_size;
entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
*display_wm = entries + display->guard_size;
/* calculate the self-refresh watermark for display cursor */
- entries = line_count * pixel_size * crtc->cursor->state->crtc_w;
+ entries = line_count * cpp * crtc->cursor->state->crtc_w;
entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
*cursor_wm = entries + cursor->guard_size;
@@ -904,13 +905,13 @@ enum vlv_wm_level {
static unsigned int vlv_wm_method2(unsigned int pixel_rate,
unsigned int pipe_htotal,
unsigned int horiz_pixels,
- unsigned int bytes_per_pixel,
+ unsigned int cpp,
unsigned int latency)
{
unsigned int ret;
ret = (latency * pixel_rate) / (pipe_htotal * 10000);
- ret = (ret + 1) * horiz_pixels * bytes_per_pixel;
+ ret = (ret + 1) * horiz_pixels * cpp;
ret = DIV_ROUND_UP(ret, 64);
return ret;
@@ -939,7 +940,7 @@ static uint16_t vlv_compute_wm_level(struct intel_plane *plane,
int level)
{
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
- int clock, htotal, pixel_size, width, wm;
+ int clock, htotal, cpp, width, wm;
if (dev_priv->wm.pri_latency[level] == 0)
return USHRT_MAX;
@@ -947,7 +948,7 @@ static uint16_t vlv_compute_wm_level(struct intel_plane *plane,
if (!state->visible)
return 0;
- pixel_size = drm_format_plane_cpp(state->base.fb->pixel_format, 0);
+ cpp = drm_format_plane_cpp(state->base.fb->pixel_format, 0);
clock = crtc->config->base.adjusted_mode.crtc_clock;
htotal = crtc->config->base.adjusted_mode.crtc_htotal;
width = crtc->config->pipe_src_w;
@@ -963,7 +964,7 @@ static uint16_t vlv_compute_wm_level(struct intel_plane *plane,
*/
wm = 63;
} else {
- wm = vlv_wm_method2(clock, htotal, width, pixel_size,
+ wm = vlv_wm_method2(clock, htotal, width, cpp,
dev_priv->wm.pri_latency[level] * 10);
}
@@ -1437,7 +1438,7 @@ static void i965_update_wm(struct drm_crtc *unused_crtc)
int clock = adjusted_mode->crtc_clock;
int htotal = adjusted_mode->crtc_htotal;
int hdisplay = to_intel_crtc(crtc)->config->pipe_src_w;
- int pixel_size = crtc->primary->state->fb->bits_per_pixel / 8;
+ int cpp = drm_format_plane_cpp(crtc->primary->state->fb->pixel_format, 0);
unsigned long line_time_us;
int entries;
@@ -1445,7 +1446,7 @@ static void i965_update_wm(struct drm_crtc *unused_crtc)
/* Use ns/us then divide to preserve precision */
entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
- pixel_size * hdisplay;
+ cpp * hdisplay;
entries = DIV_ROUND_UP(entries, I915_FIFO_LINE_SIZE);
srwm = I965_FIFO_SIZE - entries;
if (srwm < 0)
@@ -1455,7 +1456,7 @@ static void i965_update_wm(struct drm_crtc *unused_crtc)
entries, srwm);
entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
- pixel_size * crtc->cursor->state->crtc_w;
+ cpp * crtc->cursor->state->crtc_w;
entries = DIV_ROUND_UP(entries,
i965_cursor_wm_info.cacheline_size);
cursor_sr = i965_cursor_wm_info.fifo_size -
@@ -1516,7 +1517,7 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc)
crtc = intel_get_crtc_for_plane(dev, 0);
if (intel_crtc_active(crtc)) {
const struct drm_display_mode *adjusted_mode;
- int cpp = crtc->primary->state->fb->bits_per_pixel / 8;
+ int cpp = drm_format_plane_cpp(crtc->primary->state->fb->pixel_format, 0);
if (IS_GEN2(dev))
cpp = 4;
@@ -1538,7 +1539,7 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc)
crtc = intel_get_crtc_for_plane(dev, 1);
if (intel_crtc_active(crtc)) {
const struct drm_display_mode *adjusted_mode;
- int cpp = crtc->primary->state->fb->bits_per_pixel / 8;
+ int cpp = drm_format_plane_cpp(crtc->primary->state->fb->pixel_format, 0);
if (IS_GEN2(dev))
cpp = 4;
@@ -1584,7 +1585,7 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc)
int clock = adjusted_mode->crtc_clock;
int htotal = adjusted_mode->crtc_htotal;
int hdisplay = to_intel_crtc(enabled)->config->pipe_src_w;
- int pixel_size = enabled->primary->state->fb->bits_per_pixel / 8;
+ int cpp = drm_format_plane_cpp(enabled->primary->state->fb->pixel_format, 0);
unsigned long line_time_us;
int entries;
@@ -1592,7 +1593,7 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc)
/* Use ns/us then divide to preserve precision */
entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
- pixel_size * hdisplay;
+ cpp * hdisplay;
entries = DIV_ROUND_UP(entries, wm_info->cacheline_size);
DRM_DEBUG_KMS("self-refresh entries: %d\n", entries);
srwm = wm_info->fifo_size - entries;
@@ -1672,6 +1673,9 @@ uint32_t ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config)
if (pipe_h < pfit_h)
pipe_h = pfit_h;
+ if (WARN_ON(!pfit_w || !pfit_h))
+ return pixel_rate;
+
pixel_rate = div_u64((uint64_t) pixel_rate * pipe_w * pipe_h,
pfit_w * pfit_h);
}
@@ -1680,15 +1684,14 @@ uint32_t ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config)
}
/* latency must be in 0.1us units. */
-static uint32_t ilk_wm_method1(uint32_t pixel_rate, uint8_t bytes_per_pixel,
- uint32_t latency)
+static uint32_t ilk_wm_method1(uint32_t pixel_rate, uint8_t cpp, uint32_t latency)
{
uint64_t ret;
if (WARN(latency == 0, "Latency value missing\n"))
return UINT_MAX;
- ret = (uint64_t) pixel_rate * bytes_per_pixel * latency;
+ ret = (uint64_t) pixel_rate * cpp * latency;
ret = DIV_ROUND_UP_ULL(ret, 64 * 10000) + 2;
return ret;
@@ -1696,24 +1699,37 @@ static uint32_t ilk_wm_method1(uint32_t pixel_rate, uint8_t bytes_per_pixel,
/* latency must be in 0.1us units. */
static uint32_t ilk_wm_method2(uint32_t pixel_rate, uint32_t pipe_htotal,
- uint32_t horiz_pixels, uint8_t bytes_per_pixel,
+ uint32_t horiz_pixels, uint8_t cpp,
uint32_t latency)
{
uint32_t ret;
if (WARN(latency == 0, "Latency value missing\n"))
return UINT_MAX;
+ if (WARN_ON(!pipe_htotal))
+ return UINT_MAX;
ret = (latency * pixel_rate) / (pipe_htotal * 10000);
- ret = (ret + 1) * horiz_pixels * bytes_per_pixel;
+ ret = (ret + 1) * horiz_pixels * cpp;
ret = DIV_ROUND_UP(ret, 64) + 2;
return ret;
}
static uint32_t ilk_wm_fbc(uint32_t pri_val, uint32_t horiz_pixels,
- uint8_t bytes_per_pixel)
+ uint8_t cpp)
{
- return DIV_ROUND_UP(pri_val * 64, horiz_pixels * bytes_per_pixel) + 2;
+ /*
+ * Neither of these should be possible since this function shouldn't be
+ * called if the CRTC is off or the plane is invisible. But let's be
+ * extra paranoid to avoid a potential divide-by-zero if we screw up
+ * elsewhere in the driver.
+ */
+ if (WARN_ON(!cpp))
+ return 0;
+ if (WARN_ON(!horiz_pixels))
+ return 0;
+
+ return DIV_ROUND_UP(pri_val * 64, horiz_pixels * cpp) + 2;
}
struct ilk_wm_maximums {
@@ -1732,13 +1748,14 @@ static uint32_t ilk_compute_pri_wm(const struct intel_crtc_state *cstate,
uint32_t mem_value,
bool is_lp)
{
- int bpp = pstate->base.fb ? pstate->base.fb->bits_per_pixel / 8 : 0;
+ int cpp = pstate->base.fb ?
+ drm_format_plane_cpp(pstate->base.fb->pixel_format, 0) : 0;
uint32_t method1, method2;
if (!cstate->base.active || !pstate->visible)
return 0;
- method1 = ilk_wm_method1(ilk_pipe_pixel_rate(cstate), bpp, mem_value);
+ method1 = ilk_wm_method1(ilk_pipe_pixel_rate(cstate), cpp, mem_value);
if (!is_lp)
return method1;
@@ -1746,8 +1763,7 @@ static uint32_t ilk_compute_pri_wm(const struct intel_crtc_state *cstate,
method2 = ilk_wm_method2(ilk_pipe_pixel_rate(cstate),
cstate->base.adjusted_mode.crtc_htotal,
drm_rect_width(&pstate->dst),
- bpp,
- mem_value);
+ cpp, mem_value);
return min(method1, method2);
}
@@ -1760,18 +1776,18 @@ static uint32_t ilk_compute_spr_wm(const struct intel_crtc_state *cstate,
const struct intel_plane_state *pstate,
uint32_t mem_value)
{
- int bpp = pstate->base.fb ? pstate->base.fb->bits_per_pixel / 8 : 0;
+ int cpp = pstate->base.fb ?
+ drm_format_plane_cpp(pstate->base.fb->pixel_format, 0) : 0;
uint32_t method1, method2;
if (!cstate->base.active || !pstate->visible)
return 0;
- method1 = ilk_wm_method1(ilk_pipe_pixel_rate(cstate), bpp, mem_value);
+ method1 = ilk_wm_method1(ilk_pipe_pixel_rate(cstate), cpp, mem_value);
method2 = ilk_wm_method2(ilk_pipe_pixel_rate(cstate),
cstate->base.adjusted_mode.crtc_htotal,
drm_rect_width(&pstate->dst),
- bpp,
- mem_value);
+ cpp, mem_value);
return min(method1, method2);
}
@@ -1804,12 +1820,13 @@ static uint32_t ilk_compute_fbc_wm(const struct intel_crtc_state *cstate,
const struct intel_plane_state *pstate,
uint32_t pri_val)
{
- int bpp = pstate->base.fb ? pstate->base.fb->bits_per_pixel / 8 : 0;
+ int cpp = pstate->base.fb ?
+ drm_format_plane_cpp(pstate->base.fb->pixel_format, 0) : 0;
if (!cstate->base.active || !pstate->visible)
return 0;
- return ilk_wm_fbc(pri_val, drm_rect_width(&pstate->dst), bpp);
+ return ilk_wm_fbc(pri_val, drm_rect_width(&pstate->dst), cpp);
}
static unsigned int ilk_display_fifo_size(const struct drm_device *dev)
@@ -2002,14 +2019,19 @@ static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv,
}
static uint32_t
-hsw_compute_linetime_wm(struct drm_device *dev, struct drm_crtc *crtc)
+hsw_compute_linetime_wm(struct drm_device *dev,
+ struct intel_crtc_state *cstate)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode;
+ const struct drm_display_mode *adjusted_mode =
+ &cstate->base.adjusted_mode;
u32 linetime, ips_linetime;
- if (!intel_crtc->active)
+ if (!cstate->base.active)
+ return 0;
+ if (WARN_ON(adjusted_mode->crtc_clock == 0))
+ return 0;
+ if (WARN_ON(dev_priv->cdclk_freq == 0))
return 0;
/* The WM are computed with base on how long it takes to fill a single
@@ -2281,6 +2303,7 @@ static int ilk_compute_pipe_wm(struct intel_crtc *intel_crtc,
return PTR_ERR(cstate);
pipe_wm = &cstate->wm.optimal.ilk;
+ memset(pipe_wm, 0, sizeof(*pipe_wm));
for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
ps = drm_atomic_get_plane_state(state,
@@ -2317,8 +2340,7 @@ static int ilk_compute_pipe_wm(struct intel_crtc *intel_crtc,
pristate, sprstate, curstate, &pipe_wm->wm[0]);
if (IS_HASWELL(dev) || IS_BROADWELL(dev))
- pipe_wm->linetime = hsw_compute_linetime_wm(dev,
- &intel_crtc->base);
+ pipe_wm->linetime = hsw_compute_linetime_wm(dev, cstate);
/* LP0 watermarks always use 1/2 DDB partitioning */
ilk_compute_wm_maximums(dev, 0, &config, INTEL_DDB_PART_1_2, &max);
@@ -2854,25 +2876,28 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *cstate,
const struct drm_plane_state *pstate,
int y)
{
- struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
+ struct intel_plane_state *intel_pstate = to_intel_plane_state(pstate);
struct drm_framebuffer *fb = pstate->fb;
+ uint32_t width = 0, height = 0;
+
+ width = drm_rect_width(&intel_pstate->src) >> 16;
+ height = drm_rect_height(&intel_pstate->src) >> 16;
+
+ if (intel_rotation_90_or_270(pstate->rotation))
+ swap(width, height);
/* for planar format */
if (fb->pixel_format == DRM_FORMAT_NV12) {
if (y) /* y-plane data rate */
- return intel_crtc->config->pipe_src_w *
- intel_crtc->config->pipe_src_h *
+ return width * height *
drm_format_plane_cpp(fb->pixel_format, 0);
else /* uv-plane data rate */
- return (intel_crtc->config->pipe_src_w/2) *
- (intel_crtc->config->pipe_src_h/2) *
+ return (width / 2) * (height / 2) *
drm_format_plane_cpp(fb->pixel_format, 1);
}
/* for packed formats */
- return intel_crtc->config->pipe_src_w *
- intel_crtc->config->pipe_src_h *
- drm_format_plane_cpp(fb->pixel_format, 0);
+ return width * height * drm_format_plane_cpp(fb->pixel_format, 0);
}
/*
@@ -2951,8 +2976,9 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
struct drm_framebuffer *fb = plane->state->fb;
int id = skl_wm_plane_id(intel_plane);
- if (fb == NULL)
+ if (!to_intel_plane_state(plane->state)->visible)
continue;
+
if (plane->type == DRM_PLANE_TYPE_CURSOR)
continue;
@@ -2978,7 +3004,7 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
uint16_t plane_blocks, y_plane_blocks = 0;
int id = skl_wm_plane_id(intel_plane);
- if (pstate->fb == NULL)
+ if (!to_intel_plane_state(pstate)->visible)
continue;
if (plane->type == DRM_PLANE_TYPE_CURSOR)
continue;
@@ -3028,26 +3054,25 @@ static uint32_t skl_pipe_pixel_rate(const struct intel_crtc_state *config)
/*
* The max latency should be 257 (max the punit can code is 255 and we add 2us
- * for the read latency) and bytes_per_pixel should always be <= 8, so that
+ * for the read latency) and cpp should always be <= 8, so that
* should allow pixel_rate up to ~2 GHz which seems sufficient since max
* 2xcdclk is 1350 MHz and the pixel rate should never exceed that.
*/
-static uint32_t skl_wm_method1(uint32_t pixel_rate, uint8_t bytes_per_pixel,
- uint32_t latency)
+static uint32_t skl_wm_method1(uint32_t pixel_rate, uint8_t cpp, uint32_t latency)
{
uint32_t wm_intermediate_val, ret;
if (latency == 0)
return UINT_MAX;
- wm_intermediate_val = latency * pixel_rate * bytes_per_pixel / 512;
+ wm_intermediate_val = latency * pixel_rate * cpp / 512;
ret = DIV_ROUND_UP(wm_intermediate_val, 1000);
return ret;
}
static uint32_t skl_wm_method2(uint32_t pixel_rate, uint32_t pipe_htotal,
- uint32_t horiz_pixels, uint8_t bytes_per_pixel,
+ uint32_t horiz_pixels, uint8_t cpp,
uint64_t tiling, uint32_t latency)
{
uint32_t ret;
@@ -3057,7 +3082,7 @@ static uint32_t skl_wm_method2(uint32_t pixel_rate, uint32_t pipe_htotal,
if (latency == 0)
return UINT_MAX;
- plane_bytes_per_line = horiz_pixels * bytes_per_pixel;
+ plane_bytes_per_line = horiz_pixels * cpp;
if (tiling == I915_FORMAT_MOD_Y_TILED ||
tiling == I915_FORMAT_MOD_Yf_TILED) {
@@ -3102,28 +3127,36 @@ static bool skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
{
struct drm_plane *plane = &intel_plane->base;
struct drm_framebuffer *fb = plane->state->fb;
+ struct intel_plane_state *intel_pstate =
+ to_intel_plane_state(plane->state);
uint32_t latency = dev_priv->wm.skl_latency[level];
uint32_t method1, method2;
uint32_t plane_bytes_per_line, plane_blocks_per_line;
uint32_t res_blocks, res_lines;
uint32_t selected_result;
- uint8_t bytes_per_pixel;
+ uint8_t cpp;
+ uint32_t width = 0, height = 0;
- if (latency == 0 || !cstate->base.active || !fb)
+ if (latency == 0 || !cstate->base.active || !intel_pstate->visible)
return false;
- bytes_per_pixel = drm_format_plane_cpp(fb->pixel_format, 0);
+ width = drm_rect_width(&intel_pstate->src) >> 16;
+ height = drm_rect_height(&intel_pstate->src) >> 16;
+
+ if (intel_rotation_90_or_270(plane->state->rotation))
+ swap(width, height);
+
+ cpp = drm_format_plane_cpp(fb->pixel_format, 0);
method1 = skl_wm_method1(skl_pipe_pixel_rate(cstate),
- bytes_per_pixel,
- latency);
+ cpp, latency);
method2 = skl_wm_method2(skl_pipe_pixel_rate(cstate),
cstate->base.adjusted_mode.crtc_htotal,
- cstate->pipe_src_w,
- bytes_per_pixel,
+ width,
+ cpp,
fb->modifier[0],
latency);
- plane_bytes_per_line = cstate->pipe_src_w * bytes_per_pixel;
+ plane_bytes_per_line = width * cpp;
plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512);
if (fb->modifier[0] == I915_FORMAT_MOD_Y_TILED ||
@@ -3131,11 +3164,11 @@ static bool skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
uint32_t min_scanlines = 4;
uint32_t y_tile_minimum;
if (intel_rotation_90_or_270(plane->state->rotation)) {
- int bpp = (fb->pixel_format == DRM_FORMAT_NV12) ?
+ int cpp = (fb->pixel_format == DRM_FORMAT_NV12) ?
drm_format_plane_cpp(fb->pixel_format, 1) :
drm_format_plane_cpp(fb->pixel_format, 0);
- switch (bpp) {
+ switch (cpp) {
case 1:
min_scanlines = 16;
break;
@@ -3606,23 +3639,45 @@ static void skl_update_wm(struct drm_crtc *crtc)
dev_priv->wm.skl_hw = *results;
}
-static void ilk_program_watermarks(struct drm_i915_private *dev_priv)
+static void ilk_compute_wm_config(struct drm_device *dev,
+ struct intel_wm_config *config)
{
- struct drm_device *dev = dev_priv->dev;
+ struct intel_crtc *crtc;
+
+ /* Compute the currently _active_ config */
+ for_each_intel_crtc(dev, crtc) {
+ const struct intel_pipe_wm *wm = &crtc->wm.active.ilk;
+
+ if (!wm->pipe_enabled)
+ continue;
+
+ config->sprites_enabled |= wm->sprites_enabled;
+ config->sprites_scaled |= wm->sprites_scaled;
+ config->num_pipes_active++;
+ }
+}
+
+static void ilk_program_watermarks(struct intel_crtc_state *cstate)
+{
+ struct drm_crtc *crtc = cstate->base.crtc;
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_pipe_wm lp_wm_1_2 = {}, lp_wm_5_6 = {}, *best_lp_wm;
struct ilk_wm_maximums max;
- struct intel_wm_config *config = &dev_priv->wm.config;
+ struct intel_wm_config config = {};
struct ilk_wm_values results = {};
enum intel_ddb_partitioning partitioning;
- ilk_compute_wm_maximums(dev, 1, config, INTEL_DDB_PART_1_2, &max);
- ilk_wm_merge(dev, config, &max, &lp_wm_1_2);
+ ilk_compute_wm_config(dev, &config);
+
+ ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_1_2, &max);
+ ilk_wm_merge(dev, &config, &max, &lp_wm_1_2);
/* 5/6 split only in single pipe config on IVB+ */
if (INTEL_INFO(dev)->gen >= 7 &&
- config->num_pipes_active == 1 && config->sprites_enabled) {
- ilk_compute_wm_maximums(dev, 1, config, INTEL_DDB_PART_5_6, &max);
- ilk_wm_merge(dev, config, &max, &lp_wm_5_6);
+ config.num_pipes_active == 1 && config.sprites_enabled) {
+ ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_5_6, &max);
+ ilk_wm_merge(dev, &config, &max, &lp_wm_5_6);
best_lp_wm = ilk_find_best_result(dev, &lp_wm_1_2, &lp_wm_5_6);
} else {
@@ -3639,7 +3694,6 @@ static void ilk_program_watermarks(struct drm_i915_private *dev_priv)
static void ilk_update_wm(struct drm_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state);
@@ -3659,7 +3713,7 @@ static void ilk_update_wm(struct drm_crtc *crtc)
intel_crtc->wm.active.ilk = cstate->wm.optimal.ilk;
- ilk_program_watermarks(dev_priv);
+ ilk_program_watermarks(cstate);
}
static void skl_pipe_wm_active_state(uint32_t val,
@@ -4045,7 +4099,7 @@ void intel_update_watermarks(struct drm_crtc *crtc)
dev_priv->display.update_wm(crtc);
}
-/**
+/*
* Lock protecting IPS related data structures
*/
DEFINE_SPINLOCK(mchdev_lock);
@@ -4081,11 +4135,13 @@ bool ironlake_set_drps(struct drm_device *dev, u8 val)
static void ironlake_enable_drps(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 rgvmodectl = I915_READ(MEMMODECTL);
+ u32 rgvmodectl;
u8 fmax, fmin, fstart, vstart;
spin_lock_irq(&mchdev_lock);
+ rgvmodectl = I915_READ(MEMMODECTL);
+
/* Enable temp reporting */
I915_WRITE16(PMMISC, I915_READ(PMMISC) | MCPPCE_EN);
I915_WRITE16(TSC1, I915_READ(TSC1) | TSE);
@@ -4518,21 +4574,71 @@ static void intel_print_rc6_info(struct drm_device *dev, u32 mode)
}
if (HAS_RC6p(dev))
DRM_DEBUG_KMS("Enabling RC6 states: RC6 %s RC6p %s RC6pp %s\n",
- (mode & GEN6_RC_CTL_RC6_ENABLE) ? "on" : "off",
- (mode & GEN6_RC_CTL_RC6p_ENABLE) ? "on" : "off",
- (mode & GEN6_RC_CTL_RC6pp_ENABLE) ? "on" : "off");
+ onoff(mode & GEN6_RC_CTL_RC6_ENABLE),
+ onoff(mode & GEN6_RC_CTL_RC6p_ENABLE),
+ onoff(mode & GEN6_RC_CTL_RC6pp_ENABLE));
else
DRM_DEBUG_KMS("Enabling RC6 states: RC6 %s\n",
- (mode & GEN6_RC_CTL_RC6_ENABLE) ? "on" : "off");
+ onoff(mode & GEN6_RC_CTL_RC6_ENABLE));
}
-static int sanitize_rc6_option(const struct drm_device *dev, int enable_rc6)
+static bool bxt_check_bios_rc6_setup(const struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ bool enable_rc6 = true;
+ unsigned long rc6_ctx_base;
+
+ if (!(I915_READ(RC6_LOCATION) & RC6_CTX_IN_DRAM)) {
+ DRM_DEBUG_KMS("RC6 Base location not set properly.\n");
+ enable_rc6 = false;
+ }
+
+ /*
+ * The exact context size is not known for BXT, so assume a page size
+ * for this check.
+ */
+ rc6_ctx_base = I915_READ(RC6_CTX_BASE) & RC6_CTX_BASE_MASK;
+ if (!((rc6_ctx_base >= dev_priv->gtt.stolen_reserved_base) &&
+ (rc6_ctx_base + PAGE_SIZE <= dev_priv->gtt.stolen_reserved_base +
+ dev_priv->gtt.stolen_reserved_size))) {
+ DRM_DEBUG_KMS("RC6 Base address not as expected.\n");
+ enable_rc6 = false;
+ }
+
+ if (!(((I915_READ(PWRCTX_MAXCNT_RCSUNIT) & IDLE_TIME_MASK) > 1) &&
+ ((I915_READ(PWRCTX_MAXCNT_VCSUNIT0) & IDLE_TIME_MASK) > 1) &&
+ ((I915_READ(PWRCTX_MAXCNT_BCSUNIT) & IDLE_TIME_MASK) > 1) &&
+ ((I915_READ(PWRCTX_MAXCNT_VECSUNIT) & IDLE_TIME_MASK) > 1))) {
+ DRM_DEBUG_KMS("Engine Idle wait time not set properly.\n");
+ enable_rc6 = false;
+ }
+
+ if (!(I915_READ(GEN6_RC_CONTROL) & (GEN6_RC_CTL_RC6_ENABLE |
+ GEN6_RC_CTL_HW_ENABLE)) &&
+ ((I915_READ(GEN6_RC_CONTROL) & GEN6_RC_CTL_HW_ENABLE) ||
+ !(I915_READ(GEN6_RC_STATE) & RC6_STATE))) {
+ DRM_DEBUG_KMS("HW/SW RC6 is not enabled by BIOS.\n");
+ enable_rc6 = false;
+ }
+
+ return enable_rc6;
+}
+
+int sanitize_rc6_option(const struct drm_device *dev, int enable_rc6)
{
/* No RC6 before Ironlake and code is gone for ilk. */
if (INTEL_INFO(dev)->gen < 6)
return 0;
+ if (!enable_rc6)
+ return 0;
+
+ if (IS_BROXTON(dev) && !bxt_check_bios_rc6_setup(dev)) {
+ DRM_INFO("RC6 disabled by BIOS\n");
+ return 0;
+ }
+
/* Respect the kernel parameter if it is set */
if (enable_rc6 >= 0) {
int mask;
@@ -4702,8 +4808,7 @@ static void gen9_enable_rc6(struct drm_device *dev)
/* 3a: Enable RC6 */
if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE)
rc6_mask = GEN6_RC_CTL_RC6_ENABLE;
- DRM_INFO("RC6 %s\n", (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ?
- "on" : "off");
+ DRM_INFO("RC6 %s\n", onoff(rc6_mask & GEN6_RC_CTL_RC6_ENABLE));
/* WaRsUseTimeoutMode */
if (IS_SKL_REVID(dev, 0, SKL_REVID_D0) ||
IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
@@ -4722,8 +4827,7 @@ static void gen9_enable_rc6(struct drm_device *dev)
* 3b: Enable Coarse Power Gating only when RC6 is enabled.
* WaRsDisableCoarsePowerGating:skl,bxt - Render/Media PG need to be disabled with RC6.
*/
- if ((IS_BROXTON(dev) && (INTEL_REVID(dev) < BXT_REVID_B0)) ||
- ((IS_SKL_GT3(dev) || IS_SKL_GT4(dev)) && (INTEL_REVID(dev) <= SKL_REVID_F0)))
+ if (NEEDS_WaRsDisableCoarsePowerGating(dev))
I915_WRITE(GEN9_PG_ENABLE, 0);
else
I915_WRITE(GEN9_PG_ENABLE, (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ?
@@ -5146,8 +5250,6 @@ static void cherryview_setup_pctx(struct drm_device *dev)
u32 pcbr;
int pctx_size = 32*1024;
- WARN_ON(!mutex_is_locked(&dev->struct_mutex));
-
pcbr = I915_READ(VLV_PCBR);
if ((pcbr >> VLV_PCBR_ADDR_SHIFT) == 0) {
DRM_DEBUG_DRIVER("BIOS didn't set up PCBR, fixing up\n");
@@ -5169,7 +5271,7 @@ static void valleyview_setup_pctx(struct drm_device *dev)
u32 pcbr;
int pctx_size = 24*1024;
- WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+ mutex_lock(&dev->struct_mutex);
pcbr = I915_READ(VLV_PCBR);
if (pcbr) {
@@ -5197,7 +5299,7 @@ static void valleyview_setup_pctx(struct drm_device *dev)
pctx = i915_gem_object_create_stolen(dev, pctx_size);
if (!pctx) {
DRM_DEBUG("not enough stolen space for PCTX, disabling\n");
- return;
+ goto out;
}
pctx_paddr = dev_priv->mm.stolen_base + pctx->stolen->start;
@@ -5206,6 +5308,7 @@ static void valleyview_setup_pctx(struct drm_device *dev)
out:
DRM_DEBUG_DRIVER("PCBR: 0x%08x\n", I915_READ(VLV_PCBR));
dev_priv->vlv_pctx = pctx;
+ mutex_unlock(&dev->struct_mutex);
}
static void valleyview_cleanup_pctx(struct drm_device *dev)
@@ -5215,7 +5318,7 @@ static void valleyview_cleanup_pctx(struct drm_device *dev)
if (WARN_ON(!dev_priv->vlv_pctx))
return;
- drm_gem_object_unreference(&dev_priv->vlv_pctx->base);
+ drm_gem_object_unreference_unlocked(&dev_priv->vlv_pctx->base);
dev_priv->vlv_pctx = NULL;
}
@@ -6024,7 +6127,6 @@ void intel_init_gt_powersave(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- i915.enable_rc6 = sanitize_rc6_option(dev, i915.enable_rc6);
/*
* RPM depends on RC6 to save restore the GT HW context, so make RC6 a
* requirement.
@@ -6159,8 +6261,8 @@ void intel_enable_gt_powersave(struct drm_device *dev)
return;
if (IS_IRONLAKE_M(dev)) {
- mutex_lock(&dev->struct_mutex);
ironlake_enable_drps(dev);
+ mutex_lock(&dev->struct_mutex);
intel_init_emon(dev);
mutex_unlock(&dev->struct_mutex);
} else if (INTEL_INFO(dev)->gen >= 6) {
@@ -6544,6 +6646,12 @@ static void broadwell_init_clock_gating(struct drm_device *dev)
misccpctl = I915_READ(GEN7_MISCCPCTL);
I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
I915_WRITE(GEN8_L3SQCREG1, BDW_WA_L3SQCREG1_DEFAULT);
+ /*
+ * Wait at least 100 clocks before re-enabling clock gating. See
+ * the definition of L3SQCREG1 in BSpec.
+ */
+ POSTING_READ(GEN8_L3SQCREG1);
+ udelay(1);
I915_WRITE(GEN7_MISCCPCTL, misccpctl);
/*
@@ -6990,6 +7098,7 @@ void intel_init_pm(struct drm_device *dev)
dev_priv->wm.spr_latency[0] && dev_priv->wm.cur_latency[0])) {
dev_priv->display.update_wm = ilk_update_wm;
dev_priv->display.compute_pipe_wm = ilk_compute_pipe_wm;
+ dev_priv->display.program_watermarks = ilk_program_watermarks;
} else {
DRM_DEBUG_KMS("Failed to read display plane latency. "
"Disable CxSR\n");
@@ -7155,9 +7264,10 @@ static int chv_gpu_freq(struct drm_i915_private *dev_priv, int val)
{
int div, czclk_freq = DIV_ROUND_CLOSEST(dev_priv->czclk_freq, 1000);
- div = vlv_gpu_freq_div(czclk_freq) / 2;
+ div = vlv_gpu_freq_div(czclk_freq);
if (div < 0)
return div;
+ div /= 2;
return DIV_ROUND_CLOSEST(czclk_freq * val, 2 * div) / 2;
}
@@ -7166,9 +7276,10 @@ static int chv_freq_opcode(struct drm_i915_private *dev_priv, int val)
{
int mul, czclk_freq = DIV_ROUND_CLOSEST(dev_priv->czclk_freq, 1000);
- mul = vlv_gpu_freq_div(czclk_freq) / 2;
+ mul = vlv_gpu_freq_div(czclk_freq);
if (mul < 0)
return mul;
+ mul /= 2;
/* CHV needs even values */
return DIV_ROUND_CLOSEST(val * 2 * mul, czclk_freq) * 2;
diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c
index 9ccff3011523..0b42ada338c8 100644
--- a/drivers/gpu/drm/i915/intel_psr.c
+++ b/drivers/gpu/drm/i915/intel_psr.c
@@ -225,7 +225,12 @@ static void hsw_psr_enable_sink(struct intel_dp *intel_dp)
(aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT));
}
- drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, DP_PSR_ENABLE);
+ if (dev_priv->psr.link_standby)
+ drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
+ DP_PSR_ENABLE | DP_PSR_MAIN_LINK_ACTIVE);
+ else
+ drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
+ DP_PSR_ENABLE);
}
static void vlv_psr_enable_source(struct intel_dp *intel_dp)
@@ -280,6 +285,9 @@ static void hsw_psr_enable_source(struct intel_dp *intel_dp)
if (IS_HASWELL(dev))
val |= EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
+ if (dev_priv->psr.link_standby)
+ val |= EDP_PSR_LINK_STANDBY;
+
I915_WRITE(EDP_PSR_CTL, val |
max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT |
idle_frames << EDP_PSR_IDLE_FRAME_SHIFT |
@@ -304,8 +312,15 @@ static bool intel_psr_match_conditions(struct intel_dp *intel_dp)
dev_priv->psr.source_ok = false;
- if (IS_HASWELL(dev) && dig_port->port != PORT_A) {
- DRM_DEBUG_KMS("HSW ties PSR to DDI A (eDP)\n");
+ /*
+ * HSW spec explicitly says PSR is tied to port A.
+ * BDW+ platforms with DDI implementation of PSR have different
+ * PSR registers per transcoder and we only implement transcoder EDP
+ * ones. Since by Display design transcoder EDP is tied to port A
+ * we can safely escape based on the port A.
+ */
+ if (HAS_DDI(dev) && dig_port->port != PORT_A) {
+ DRM_DEBUG_KMS("PSR condition failed: Port not supported\n");
return false;
}
@@ -314,6 +329,12 @@ static bool intel_psr_match_conditions(struct intel_dp *intel_dp)
return false;
}
+ if ((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) &&
+ !dev_priv->psr.link_standby) {
+ DRM_ERROR("PSR condition failed: Link off requested but not supported on this platform\n");
+ return false;
+ }
+
if (IS_HASWELL(dev) &&
I915_READ(HSW_STEREO_3D_CTL(intel_crtc->config->cpu_transcoder)) &
S3D_ENABLE) {
@@ -327,12 +348,6 @@ static bool intel_psr_match_conditions(struct intel_dp *intel_dp)
return false;
}
- if (!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev) &&
- ((dev_priv->vbt.psr.full_link) || (dig_port->port != PORT_A))) {
- DRM_DEBUG_KMS("PSR condition failed: Link Standby requested/needed but not supported on this platform\n");
- return false;
- }
-
dev_priv->psr.source_ok = true;
return true;
}
@@ -763,6 +778,36 @@ void intel_psr_init(struct drm_device *dev)
dev_priv->psr_mmio_base = IS_HASWELL(dev_priv) ?
HSW_EDP_PSR_BASE : BDW_EDP_PSR_BASE;
+ /* Per platform default */
+ if (i915.enable_psr == -1) {
+ if (IS_HASWELL(dev) || IS_BROADWELL(dev) ||
+ IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
+ i915.enable_psr = 1;
+ else
+ i915.enable_psr = 0;
+ }
+
+ /* Set link_standby x link_off defaults */
+ if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+ /* HSW and BDW require workarounds that we don't implement. */
+ dev_priv->psr.link_standby = false;
+ else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
+ /* On VLV and CHV only standby mode is supported. */
+ dev_priv->psr.link_standby = true;
+ else
+ /* For new platforms let's respect VBT back again */
+ dev_priv->psr.link_standby = dev_priv->vbt.psr.full_link;
+
+ /* Override link_standby x link_off defaults */
+ if (i915.enable_psr == 2 && !dev_priv->psr.link_standby) {
+ DRM_DEBUG_KMS("PSR: Forcing link standby\n");
+ dev_priv->psr.link_standby = true;
+ }
+ if (i915.enable_psr == 3 && dev_priv->psr.link_standby) {
+ DRM_DEBUG_KMS("PSR: Forcing main link off\n");
+ dev_priv->psr.link_standby = false;
+ }
+
INIT_DELAYED_WORK(&dev_priv->psr.work, intel_psr_work);
mutex_init(&dev_priv->psr.lock);
}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 40c6aff57256..9121646d7c4d 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -746,9 +746,9 @@ static int intel_rcs_ctx_init(struct drm_i915_gem_request *req)
ret = i915_gem_render_state_init(req);
if (ret)
- DRM_ERROR("init render state: %d\n", ret);
+ return ret;
- return ret;
+ return 0;
}
static int wa_add(struct drm_i915_private *dev_priv,
@@ -789,6 +789,22 @@ static int wa_add(struct drm_i915_private *dev_priv,
#define WA_WRITE(addr, val) WA_REG(addr, 0xffffffff, val)
+static int wa_ring_whitelist_reg(struct intel_engine_cs *ring, i915_reg_t reg)
+{
+ struct drm_i915_private *dev_priv = ring->dev->dev_private;
+ struct i915_workarounds *wa = &dev_priv->workarounds;
+ const uint32_t index = wa->hw_whitelist_count[ring->id];
+
+ if (WARN_ON(index >= RING_MAX_NONPRIV_SLOTS))
+ return -EINVAL;
+
+ WA_WRITE(RING_FORCE_TO_NONPRIV(ring->mmio_base, index),
+ i915_mmio_reg_offset(reg));
+ wa->hw_whitelist_count[ring->id]++;
+
+ return 0;
+}
+
static int gen8_init_workarounds(struct intel_engine_cs *ring)
{
struct drm_device *dev = ring->dev;
@@ -894,6 +910,7 @@ static int gen9_init_workarounds(struct intel_engine_cs *ring)
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t tmp;
+ int ret;
/* WaEnableLbsSlaRetryTimerDecrement:skl */
I915_WRITE(BDW_SCRATCH1, I915_READ(BDW_SCRATCH1) |
@@ -951,7 +968,7 @@ static int gen9_init_workarounds(struct intel_engine_cs *ring)
/* WaForceContextSaveRestoreNonCoherent:skl,bxt */
tmp = HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT;
- if (IS_SKL_REVID(dev, SKL_REVID_F0, SKL_REVID_F0) ||
+ if (IS_SKL_REVID(dev, SKL_REVID_F0, REVID_FOREVER) ||
IS_BXT_REVID(dev, BXT_REVID_B0, REVID_FOREVER))
tmp |= HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE;
WA_SET_BIT_MASKED(HDC_CHICKEN0, tmp);
@@ -964,6 +981,20 @@ static int gen9_init_workarounds(struct intel_engine_cs *ring)
/* WaDisableSTUnitPowerOptimization:skl,bxt */
WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN2, GEN8_ST_PO_DISABLE);
+ /* WaOCLCoherentLineFlush:skl,bxt */
+ I915_WRITE(GEN8_L3SQCREG4, (I915_READ(GEN8_L3SQCREG4) |
+ GEN8_LQSC_FLUSH_COHERENT_LINES));
+
+ /* WaEnablePreemptionGranularityControlByUMD:skl,bxt */
+ ret= wa_ring_whitelist_reg(ring, GEN8_CS_CHICKEN1);
+ if (ret)
+ return ret;
+
+ /* WaAllowUMDToModifyHDCChicken1:skl,bxt */
+ ret = wa_ring_whitelist_reg(ring, GEN8_HDC_CHICKEN1);
+ if (ret)
+ return ret;
+
return 0;
}
@@ -1019,6 +1050,16 @@ static int skl_init_workarounds(struct intel_engine_cs *ring)
if (ret)
return ret;
+ /*
+ * Actual WA is to disable percontext preemption granularity control
+ * until D0 which is the default case so this is equivalent to
+ * !WaDisablePerCtxtPreemptionGranularityControl:skl
+ */
+ if (IS_SKL_REVID(dev, SKL_REVID_E0, REVID_FOREVER)) {
+ I915_WRITE(GEN7_FF_SLICE_CS_CHICKEN1,
+ _MASKED_BIT_ENABLE(GEN9_FFSC_PERCTX_PREEMPT_CTRL));
+ }
+
if (IS_SKL_REVID(dev, 0, SKL_REVID_D0)) {
/* WaDisableChickenBitTSGBarrierAckForFFSliceCS:skl */
I915_WRITE(FF_SLICE_CS_CHICKEN2,
@@ -1044,7 +1085,8 @@ static int skl_init_workarounds(struct intel_engine_cs *ring)
WA_SET_BIT_MASKED(HIZ_CHICKEN,
BDW_HIZ_POWER_COMPILER_CLOCK_GATING_DISABLE);
- if (IS_SKL_REVID(dev, 0, SKL_REVID_F0)) {
+ /* This is tied to WaForceContextSaveRestoreNonCoherent */
+ if (IS_SKL_REVID(dev, 0, REVID_FOREVER)) {
/*
*Use Force Non-Coherent whenever executing a 3D context. This
* is a workaround for a possible hang in the unlikely event
@@ -1071,6 +1113,11 @@ static int skl_init_workarounds(struct intel_engine_cs *ring)
GEN7_HALF_SLICE_CHICKEN1,
GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
+ /* WaDisableLSQCROPERFforOCL:skl */
+ ret = wa_ring_whitelist_reg(ring, GEN8_L3SQCREG4);
+ if (ret)
+ return ret;
+
return skl_tune_iz_hashing(ring);
}
@@ -1106,6 +1153,20 @@ static int bxt_init_workarounds(struct intel_engine_cs *ring)
GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
}
+ /* WaDisableObjectLevelPreemptionForTrifanOrPolygon:bxt */
+ /* WaDisableObjectLevelPreemptionForInstancedDraw:bxt */
+ /* WaDisableObjectLevelPreemtionForInstanceId:bxt */
+ /* WaDisableLSQCROPERFforOCL:bxt */
+ if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
+ ret = wa_ring_whitelist_reg(ring, GEN9_CS_DEBUG_MODE1);
+ if (ret)
+ return ret;
+
+ ret = wa_ring_whitelist_reg(ring, GEN8_L3SQCREG4);
+ if (ret)
+ return ret;
+ }
+
return 0;
}
@@ -1117,6 +1178,7 @@ int init_workarounds_ring(struct intel_engine_cs *ring)
WARN_ON(ring->id != RCS);
dev_priv->workarounds.count = 0;
+ dev_priv->workarounds.hw_whitelist_count[RCS] = 0;
if (IS_BROADWELL(dev))
return bdw_init_workarounds(ring);
@@ -1867,15 +1929,13 @@ i830_dispatch_execbuffer(struct drm_i915_gem_request *req,
offset = cs_offset;
}
- ret = intel_ring_begin(req, 4);
+ ret = intel_ring_begin(req, 2);
if (ret)
return ret;
- intel_ring_emit(ring, MI_BATCH_BUFFER);
+ intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_GTT);
intel_ring_emit(ring, offset | (dispatch_flags & I915_DISPATCH_SECURE ?
0 : MI_BATCH_NON_SECURE));
- intel_ring_emit(ring, offset + len - 8);
- intel_ring_emit(ring, MI_NOOP);
intel_ring_advance(ring);
return 0;
@@ -1901,6 +1961,17 @@ i915_dispatch_execbuffer(struct drm_i915_gem_request *req,
return 0;
}
+static void cleanup_phys_status_page(struct intel_engine_cs *ring)
+{
+ struct drm_i915_private *dev_priv = to_i915(ring->dev);
+
+ if (!dev_priv->status_page_dmah)
+ return;
+
+ drm_pci_free(ring->dev, dev_priv->status_page_dmah);
+ ring->status_page.page_addr = NULL;
+}
+
static void cleanup_status_page(struct intel_engine_cs *ring)
{
struct drm_i915_gem_object *obj;
@@ -1917,9 +1988,9 @@ static void cleanup_status_page(struct intel_engine_cs *ring)
static int init_status_page(struct intel_engine_cs *ring)
{
- struct drm_i915_gem_object *obj;
+ struct drm_i915_gem_object *obj = ring->status_page.obj;
- if ((obj = ring->status_page.obj) == NULL) {
+ if (obj == NULL) {
unsigned flags;
int ret;
@@ -1990,6 +2061,7 @@ void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf)
else
iounmap(ringbuf->virtual_start);
ringbuf->virtual_start = NULL;
+ ringbuf->vma = NULL;
i915_gem_object_ggtt_unpin(ringbuf->obj);
}
@@ -2019,10 +2091,12 @@ int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev,
{
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_i915_gem_object *obj = ringbuf->obj;
+ /* Ring wraparound at offset 0 sometimes hangs. No idea why. */
+ unsigned flags = PIN_OFFSET_BIAS | 4096;
int ret;
if (HAS_LLC(dev_priv) && !obj->stolen) {
- ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, 0);
+ ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, flags);
if (ret)
return ret;
@@ -2038,7 +2112,8 @@ int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev,
return -ENOMEM;
}
} else {
- ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, PIN_MAPPABLE);
+ ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE,
+ flags | PIN_MAPPABLE);
if (ret)
return ret;
@@ -2048,6 +2123,9 @@ int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev,
return ret;
}
+ /* Access through the GTT requires the device to be awake. */
+ assert_rpm_wakelock_held(dev_priv);
+
ringbuf->virtual_start = ioremap_wc(dev_priv->gtt.mappable_base +
i915_gem_obj_ggtt_offset(obj), ringbuf->size);
if (ringbuf->virtual_start == NULL) {
@@ -2056,6 +2134,8 @@ int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev,
}
}
+ ringbuf->vma = i915_gem_obj_to_ggtt(obj);
+
return 0;
}
@@ -2164,7 +2244,7 @@ static int intel_init_ring_buffer(struct drm_device *dev,
if (ret)
goto error;
} else {
- BUG_ON(ring->id != RCS);
+ WARN_ON(ring->id != RCS);
ret = init_phys_status_page(ring);
if (ret)
goto error;
@@ -2210,7 +2290,12 @@ void intel_cleanup_ring_buffer(struct intel_engine_cs *ring)
if (ring->cleanup)
ring->cleanup(ring);
- cleanup_status_page(ring);
+ if (I915_NEED_GFX_HWS(ring->dev)) {
+ cleanup_status_page(ring);
+ } else {
+ WARN_ON(ring->id != RCS);
+ cleanup_phys_status_page(ring);
+ }
i915_cmd_parser_fini_ring(ring);
i915_gem_batch_pool_fini(&ring->batch_pool);
@@ -2373,11 +2458,11 @@ static int __intel_ring_prepare(struct intel_engine_cs *ring, int bytes)
if (unlikely(total_bytes > remain_usable)) {
/*
* The base request will fit but the reserved space
- * falls off the end. So only need to to wait for the
- * reserved size after flushing out the remainder.
+ * falls off the end. So don't need an immediate wrap
+ * and only need to effectively wait for the reserved
+ * size space from the start of ringbuffer.
*/
wait_bytes = remain_actual + ringbuf->reserved_size;
- need_wrap = true;
} else if (total_bytes > ringbuf->space) {
/* No wrapping required, just waiting. */
wait_bytes = total_bytes;
@@ -2666,6 +2751,7 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
ring->name = "render ring";
ring->id = RCS;
+ ring->exec_id = I915_EXEC_RENDER;
ring->mmio_base = RENDER_RING_BASE;
if (INTEL_INFO(dev)->gen >= 8) {
@@ -2814,6 +2900,7 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev)
ring->name = "bsd ring";
ring->id = VCS;
+ ring->exec_id = I915_EXEC_BSD;
ring->write_tail = ring_write_tail;
if (INTEL_INFO(dev)->gen >= 6) {
@@ -2890,6 +2977,7 @@ int intel_init_bsd2_ring_buffer(struct drm_device *dev)
ring->name = "bsd2 ring";
ring->id = VCS2;
+ ring->exec_id = I915_EXEC_BSD;
ring->write_tail = ring_write_tail;
ring->mmio_base = GEN8_BSD2_RING_BASE;
@@ -2920,6 +3008,7 @@ int intel_init_blt_ring_buffer(struct drm_device *dev)
ring->name = "blitter ring";
ring->id = BCS;
+ ring->exec_id = I915_EXEC_BLT;
ring->mmio_base = BLT_RING_BASE;
ring->write_tail = ring_write_tail;
@@ -2977,6 +3066,7 @@ int intel_init_vebox_ring_buffer(struct drm_device *dev)
ring->name = "video enhancement ring";
ring->id = VECS;
+ ring->exec_id = I915_EXEC_VEBOX;
ring->mmio_base = VEBOX_RING_BASE;
ring->write_tail = ring_write_tail;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 49574ffe54bc..566b0ae10ce0 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -93,11 +93,13 @@ struct intel_ring_hangcheck {
int score;
enum intel_ring_hangcheck_action action;
int deadlock;
+ u32 instdone[I915_NUM_INSTDONE_REG];
};
struct intel_ringbuffer {
struct drm_i915_gem_object *obj;
void __iomem *virtual_start;
+ struct i915_vma *vma;
struct intel_engine_cs *ring;
struct list_head link;
@@ -147,14 +149,16 @@ struct i915_ctx_workarounds {
struct intel_engine_cs {
const char *name;
enum intel_ring_id {
- RCS = 0x0,
- VCS,
+ RCS = 0,
BCS,
- VECS,
- VCS2
+ VCS,
+ VCS2, /* Keep instances of the same type engine together. */
+ VECS
} id;
#define I915_NUM_RINGS 5
-#define LAST_USER_RING (VECS + 1)
+#define _VCS(n) (VCS + (n))
+ unsigned int exec_id;
+ unsigned int guc_id;
u32 mmio_base;
struct drm_device *dev;
struct intel_ringbuffer *buffer;
@@ -268,6 +272,8 @@ struct intel_engine_cs {
struct list_head execlist_queue;
struct list_head execlist_retired_req_list;
u8 next_context_status_buffer;
+ bool disable_lite_restore_wa;
+ u32 ctx_desc_template;
u32 irq_keep_mask; /* bitmask for interrupts that should not be masked */
int (*emit_request)(struct drm_i915_gem_request *request);
int (*emit_flush)(struct drm_i915_gem_request *request,
@@ -305,7 +311,6 @@ struct intel_engine_cs {
wait_queue_head_t irq_queue;
- struct intel_context *default_context;
struct intel_context *last_context;
struct intel_ring_hangcheck hangcheck;
@@ -406,7 +411,7 @@ intel_write_status_page(struct intel_engine_cs *ring,
ring->status_page.page_addr[reg] = value;
}
-/**
+/*
* Reads a dword out of the status page, which is written to from the command
* queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or
* MI_STORE_DATA_IMM.
@@ -423,6 +428,7 @@ intel_write_status_page(struct intel_engine_cs *ring,
* The area from dword 0x30 to 0x3ff is available for driver usage.
*/
#define I915_GEM_HWS_INDEX 0x30
+#define I915_GEM_HWS_INDEX_ADDR (I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
#define I915_GEM_HWS_SCRATCH_INDEX 0x40
#define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index 4f43d9b32e66..6e54d978d9d4 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -284,6 +284,13 @@ static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv)
1 << PIPE_C | 1 << PIPE_B);
}
+static void hsw_power_well_pre_disable(struct drm_i915_private *dev_priv)
+{
+ if (IS_BROADWELL(dev_priv))
+ gen8_irq_power_well_pre_disable(dev_priv,
+ 1 << PIPE_C | 1 << PIPE_B);
+}
+
static void skl_power_well_post_enable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
@@ -309,6 +316,14 @@ static void skl_power_well_post_enable(struct drm_i915_private *dev_priv,
}
}
+static void skl_power_well_pre_disable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ if (power_well->data == SKL_DISP_PW_2)
+ gen8_irq_power_well_pre_disable(dev_priv,
+ 1 << PIPE_C | 1 << PIPE_B);
+}
+
static void hsw_set_power_well(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well, bool enable)
{
@@ -334,6 +349,7 @@ static void hsw_set_power_well(struct drm_i915_private *dev_priv,
} else {
if (enable_requested) {
+ hsw_power_well_pre_disable(dev_priv);
I915_WRITE(HSW_PWR_WELL_DRIVER, 0);
POSTING_READ(HSW_PWR_WELL_DRIVER);
DRM_DEBUG_KMS("Requesting to disable the power well\n");
@@ -456,15 +472,19 @@ static void assert_can_disable_dc9(struct drm_i915_private *dev_priv)
*/
}
-static void gen9_set_dc_state_debugmask_memory_up(
- struct drm_i915_private *dev_priv)
+static void gen9_set_dc_state_debugmask(struct drm_i915_private *dev_priv)
{
- uint32_t val;
+ uint32_t val, mask;
+
+ mask = DC_STATE_DEBUG_MASK_MEMORY_UP;
+
+ if (IS_BROXTON(dev_priv))
+ mask |= DC_STATE_DEBUG_MASK_CORES;
/* The below bit doesn't need to be cleared ever afterwards */
val = I915_READ(DC_STATE_DEBUG);
- if (!(val & DC_STATE_DEBUG_MASK_MEMORY_UP)) {
- val |= DC_STATE_DEBUG_MASK_MEMORY_UP;
+ if ((val & mask) != mask) {
+ val |= mask;
I915_WRITE(DC_STATE_DEBUG, val);
POSTING_READ(DC_STATE_DEBUG);
}
@@ -525,9 +545,6 @@ static void gen9_set_dc_state(struct drm_i915_private *dev_priv, uint32_t state)
else if (i915.enable_dc == 1 && state > DC_STATE_EN_UPTO_DC5)
state = DC_STATE_EN_UPTO_DC5;
- if (state & DC_STATE_EN_UPTO_DC5_DC6_MASK)
- gen9_set_dc_state_debugmask_memory_up(dev_priv);
-
val = I915_READ(DC_STATE_EN);
DRM_DEBUG_KMS("Setting DC state from %02x to %02x\n",
val & mask, state);
@@ -577,7 +594,8 @@ static void assert_can_enable_dc5(struct drm_i915_private *dev_priv)
bool pg2_enabled = intel_display_power_well_is_enabled(dev_priv,
SKL_DISP_PW_2);
- WARN_ONCE(!IS_SKYLAKE(dev), "Platform doesn't support DC5.\n");
+ WARN_ONCE(!IS_SKYLAKE(dev) && !IS_KABYLAKE(dev),
+ "Platform doesn't support DC5.\n");
WARN_ONCE(!HAS_RUNTIME_PM(dev), "Runtime PM not enabled.\n");
WARN_ONCE(pg2_enabled, "PG2 not disabled to enable DC5.\n");
@@ -613,7 +631,8 @@ static void assert_can_enable_dc6(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
- WARN_ONCE(!IS_SKYLAKE(dev), "Platform doesn't support DC6.\n");
+ WARN_ONCE(!IS_SKYLAKE(dev) && !IS_KABYLAKE(dev),
+ "Platform doesn't support DC6.\n");
WARN_ONCE(!HAS_RUNTIME_PM(dev), "Runtime PM not enabled.\n");
WARN_ONCE(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
"Backlight is not disabled.\n");
@@ -640,7 +659,8 @@ static void gen9_disable_dc5_dc6(struct drm_i915_private *dev_priv)
{
assert_can_disable_dc5(dev_priv);
- if (IS_SKYLAKE(dev_priv) && i915.enable_dc != 0 && i915.enable_dc != 1)
+ if ((IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) &&
+ i915.enable_dc != 0 && i915.enable_dc != 1)
assert_can_disable_dc6(dev_priv);
gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
@@ -668,7 +688,6 @@ void skl_disable_dc6(struct drm_i915_private *dev_priv)
static void skl_set_power_well(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well, bool enable)
{
- struct drm_device *dev = dev_priv->dev;
uint32_t tmp, fuse_status;
uint32_t req_mask, state_mask;
bool is_enabled, enable_requested, check_fuse_status = false;
@@ -706,23 +725,15 @@ static void skl_set_power_well(struct drm_i915_private *dev_priv,
state_mask = SKL_POWER_WELL_STATE(power_well->data);
is_enabled = tmp & state_mask;
+ if (!enable && enable_requested)
+ skl_power_well_pre_disable(dev_priv, power_well);
+
if (enable) {
if (!enable_requested) {
WARN((tmp & state_mask) &&
!I915_READ(HSW_PWR_WELL_BIOS),
"Invalid for power well status to be enabled, unless done by the BIOS, \
when request is to disable!\n");
- if (power_well->data == SKL_DISP_PW_2) {
- /*
- * DDI buffer programming unnecessary during
- * driver-load/resume as it's already done
- * during modeset initialization then. It's
- * also invalid here as encoder list is still
- * uninitialized.
- */
- if (!dev_priv->power_domains.initializing)
- intel_prepare_ddi(dev);
- }
I915_WRITE(HSW_PWR_WELL_DRIVER, tmp | req_mask);
}
@@ -828,7 +839,8 @@ static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv,
static void gen9_dc_off_power_well_disable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
- if (IS_SKYLAKE(dev_priv) && i915.enable_dc != 0 && i915.enable_dc != 1)
+ if ((IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) &&
+ i915.enable_dc != 0 && i915.enable_dc != 1)
skl_enable_dc6(dev_priv);
else
gen9_enable_dc5(dev_priv);
@@ -840,7 +852,8 @@ static void gen9_dc_off_power_well_sync_hw(struct drm_i915_private *dev_priv,
if (power_well->count > 0) {
gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
} else {
- if (IS_SKYLAKE(dev_priv) && i915.enable_dc != 0 &&
+ if ((IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) &&
+ i915.enable_dc != 0 &&
i915.enable_dc != 1)
gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
else
@@ -993,6 +1006,9 @@ static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv)
valleyview_disable_display_irqs(dev_priv);
spin_unlock_irq(&dev_priv->irq_lock);
+ /* make sure we're done processing display irqs */
+ synchronize_irq(dev_priv->dev->irq);
+
vlv_power_sequencer_reset(dev_priv);
}
@@ -1941,7 +1957,7 @@ void skl_pw1_misc_io_init(struct drm_i915_private *dev_priv)
{
struct i915_power_well *well;
- if (!IS_SKYLAKE(dev_priv))
+ if (!(IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)))
return;
well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
@@ -1955,7 +1971,7 @@ void skl_pw1_misc_io_fini(struct drm_i915_private *dev_priv)
{
struct i915_power_well *well;
- if (!IS_SKYLAKE(dev_priv))
+ if (!(IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)))
return;
well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
@@ -2125,8 +2141,8 @@ static void skl_display_core_init(struct drm_i915_private *dev_priv,
skl_init_cdclk(dev_priv);
- if (dev_priv->csr.dmc_payload)
- intel_csr_load_program(dev_priv);
+ if (dev_priv->csr.dmc_payload && intel_csr_load_program(dev_priv))
+ gen9_set_dc_state_debugmask(dev_priv);
}
static void skl_display_core_uninit(struct drm_i915_private *dev_priv)
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 2e1da060b0e1..4ecc076c4041 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -1527,6 +1527,7 @@ intel_sdvo_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
+ int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
return MODE_NO_DBLESCAN;
@@ -1537,6 +1538,9 @@ intel_sdvo_mode_valid(struct drm_connector *connector,
if (intel_sdvo->pixel_clock_max < mode->clock)
return MODE_CLOCK_HIGH;
+ if (mode->clock > max_dotclk)
+ return MODE_CLOCK_HIGH;
+
if (intel_sdvo->is_lvds) {
if (mode->hdisplay > intel_sdvo->sdvo_lvds_fixed_mode->hdisplay)
return MODE_PANEL;
diff --git a/drivers/gpu/drm/i915/intel_sdvo_regs.h b/drivers/gpu/drm/i915/intel_sdvo_regs.h
index 2e2d4eb4a00d..db0ed499268a 100644
--- a/drivers/gpu/drm/i915/intel_sdvo_regs.h
+++ b/drivers/gpu/drm/i915/intel_sdvo_regs.h
@@ -24,8 +24,8 @@
* Eric Anholt <eric@anholt.net>
*/
-/**
- * @file SDVO command definitions and structures.
+/*
+ * SDVO command definitions and structures.
*/
#define SDVO_OUTPUT_FIRST (0)
@@ -66,39 +66,39 @@ struct intel_sdvo_caps {
#define DTD_FLAG_VSYNC_POSITIVE (1 << 2)
#define DTD_FLAG_INTERLACE (1 << 7)
-/** This matches the EDID DTD structure, more or less */
+/* This matches the EDID DTD structure, more or less */
struct intel_sdvo_dtd {
struct {
- u16 clock; /**< pixel clock, in 10kHz units */
- u8 h_active; /**< lower 8 bits (pixels) */
- u8 h_blank; /**< lower 8 bits (pixels) */
- u8 h_high; /**< upper 4 bits each h_active, h_blank */
- u8 v_active; /**< lower 8 bits (lines) */
- u8 v_blank; /**< lower 8 bits (lines) */
- u8 v_high; /**< upper 4 bits each v_active, v_blank */
+ u16 clock; /* pixel clock, in 10kHz units */
+ u8 h_active; /* lower 8 bits (pixels) */
+ u8 h_blank; /* lower 8 bits (pixels) */
+ u8 h_high; /* upper 4 bits each h_active, h_blank */
+ u8 v_active; /* lower 8 bits (lines) */
+ u8 v_blank; /* lower 8 bits (lines) */
+ u8 v_high; /* upper 4 bits each v_active, v_blank */
} part1;
struct {
- u8 h_sync_off; /**< lower 8 bits, from hblank start */
- u8 h_sync_width; /**< lower 8 bits (pixels) */
- /** lower 4 bits each vsync offset, vsync width */
+ u8 h_sync_off; /* lower 8 bits, from hblank start */
+ u8 h_sync_width; /* lower 8 bits (pixels) */
+ /* lower 4 bits each vsync offset, vsync width */
u8 v_sync_off_width;
- /**
+ /*
* 2 high bits of hsync offset, 2 high bits of hsync width,
* bits 4-5 of vsync offset, and 2 high bits of vsync width.
*/
u8 sync_off_width_high;
u8 dtd_flags;
u8 sdvo_flags;
- /** bits 6-7 of vsync offset at bits 6-7 */
+ /* bits 6-7 of vsync offset at bits 6-7 */
u8 v_sync_off_high;
u8 reserved;
} part2;
} __packed;
struct intel_sdvo_pixel_clock_range {
- u16 min; /**< pixel clock, in 10kHz units */
- u16 max; /**< pixel clock, in 10kHz units */
+ u16 min; /* pixel clock, in 10kHz units */
+ u16 max; /* pixel clock, in 10kHz units */
} __packed;
struct intel_sdvo_preferred_input_timing_args {
@@ -144,7 +144,7 @@ struct intel_sdvo_preferred_input_timing_args {
#define SDVO_CMD_RESET 0x01
-/** Returns a struct intel_sdvo_caps */
+/* Returns a struct intel_sdvo_caps */
#define SDVO_CMD_GET_DEVICE_CAPS 0x02
#define SDVO_CMD_GET_FIRMWARE_REV 0x86
@@ -152,7 +152,7 @@ struct intel_sdvo_preferred_input_timing_args {
# define SDVO_DEVICE_FIRMWARE_MAJOR SDVO_I2C_RETURN_1
# define SDVO_DEVICE_FIRMWARE_PATCH SDVO_I2C_RETURN_2
-/**
+/*
* Reports which inputs are trained (managed to sync).
*
* Devices must have trained within 2 vsyncs of a mode change.
@@ -164,10 +164,10 @@ struct intel_sdvo_get_trained_inputs_response {
unsigned int pad:6;
} __packed;
-/** Returns a struct intel_sdvo_output_flags of active outputs. */
+/* Returns a struct intel_sdvo_output_flags of active outputs. */
#define SDVO_CMD_GET_ACTIVE_OUTPUTS 0x04
-/**
+/*
* Sets the current set of active outputs.
*
* Takes a struct intel_sdvo_output_flags. Must be preceded by a SET_IN_OUT_MAP
@@ -175,7 +175,7 @@ struct intel_sdvo_get_trained_inputs_response {
*/
#define SDVO_CMD_SET_ACTIVE_OUTPUTS 0x05
-/**
+/*
* Returns the current mapping of SDVO inputs to outputs on the device.
*
* Returns two struct intel_sdvo_output_flags structures.
@@ -185,29 +185,29 @@ struct intel_sdvo_in_out_map {
u16 in0, in1;
};
-/**
+/*
* Sets the current mapping of SDVO inputs to outputs on the device.
*
* Takes two struct i380_sdvo_output_flags structures.
*/
#define SDVO_CMD_SET_IN_OUT_MAP 0x07
-/**
+/*
* Returns a struct intel_sdvo_output_flags of attached displays.
*/
#define SDVO_CMD_GET_ATTACHED_DISPLAYS 0x0b
-/**
+/*
* Returns a struct intel_sdvo_ouptut_flags of displays supporting hot plugging.
*/
#define SDVO_CMD_GET_HOT_PLUG_SUPPORT 0x0c
-/**
+/*
* Takes a struct intel_sdvo_output_flags.
*/
#define SDVO_CMD_SET_ACTIVE_HOT_PLUG 0x0d
-/**
+/*
* Returns a struct intel_sdvo_output_flags of displays with hot plug
* interrupts enabled.
*/
@@ -221,7 +221,7 @@ struct intel_sdvo_get_interrupt_event_source_response {
unsigned int pad:6;
} __packed;
-/**
+/*
* Selects which input is affected by future input commands.
*
* Commands affected include SET_INPUT_TIMINGS_PART[12],
@@ -234,7 +234,7 @@ struct intel_sdvo_set_target_input_args {
unsigned int pad:7;
} __packed;
-/**
+/*
* Takes a struct intel_sdvo_output_flags of which outputs are targeted by
* future output commands.
*
@@ -280,7 +280,7 @@ struct intel_sdvo_set_target_input_args {
# define SDVO_DTD_SDVO_FLAG_SCALING_SMOOTH (2 << 4)
# define SDVO_DTD_VSYNC_OFF_HIGH SDVO_I2C_ARG_6
-/**
+/*
* Generates a DTD based on the given width, height, and flags.
*
* This will be supported by any device supporting scaling or interlaced
@@ -300,24 +300,24 @@ struct intel_sdvo_set_target_input_args {
#define SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1 0x1b
#define SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2 0x1c
-/** Returns a struct intel_sdvo_pixel_clock_range */
+/* Returns a struct intel_sdvo_pixel_clock_range */
#define SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE 0x1d
-/** Returns a struct intel_sdvo_pixel_clock_range */
+/* Returns a struct intel_sdvo_pixel_clock_range */
#define SDVO_CMD_GET_OUTPUT_PIXEL_CLOCK_RANGE 0x1e
-/** Returns a byte bitfield containing SDVO_CLOCK_RATE_MULT_* flags */
+/* Returns a byte bitfield containing SDVO_CLOCK_RATE_MULT_* flags */
#define SDVO_CMD_GET_SUPPORTED_CLOCK_RATE_MULTS 0x1f
-/** Returns a byte containing a SDVO_CLOCK_RATE_MULT_* flag */
+/* Returns a byte containing a SDVO_CLOCK_RATE_MULT_* flag */
#define SDVO_CMD_GET_CLOCK_RATE_MULT 0x20
-/** Takes a byte containing a SDVO_CLOCK_RATE_MULT_* flag */
+/* Takes a byte containing a SDVO_CLOCK_RATE_MULT_* flag */
#define SDVO_CMD_SET_CLOCK_RATE_MULT 0x21
# define SDVO_CLOCK_RATE_MULT_1X (1 << 0)
# define SDVO_CLOCK_RATE_MULT_2X (1 << 1)
# define SDVO_CLOCK_RATE_MULT_4X (1 << 3)
#define SDVO_CMD_GET_SUPPORTED_TV_FORMATS 0x27
-/** 6 bytes of bit flags for TV formats shared by all TV format functions */
+/* 6 bytes of bit flags for TV formats shared by all TV format functions */
struct intel_sdvo_tv_format {
unsigned int ntsc_m:1;
unsigned int ntsc_j:1;
@@ -376,7 +376,7 @@ struct intel_sdvo_tv_format {
#define SDVO_CMD_SET_TV_FORMAT 0x29
-/** Returns the resolutiosn that can be used with the given TV format */
+/* Returns the resolutiosn that can be used with the given TV format */
#define SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT 0x83
struct intel_sdvo_sdtv_resolution_request {
unsigned int ntsc_m:1;
@@ -539,7 +539,7 @@ struct intel_sdvo_hdtv_resolution_reply {
#define SDVO_CMD_GET_MAX_PANEL_POWER_SEQUENCING 0x2d
#define SDVO_CMD_GET_PANEL_POWER_SEQUENCING 0x2e
#define SDVO_CMD_SET_PANEL_POWER_SEQUENCING 0x2f
-/**
+/*
* The panel power sequencing parameters are in units of milliseconds.
* The high fields are bits 8:9 of the 10-bit values.
*/
diff --git a/drivers/gpu/drm/i915/intel_sideband.c b/drivers/gpu/drm/i915/intel_sideband.c
index 8831fc579ade..c3998188cf35 100644
--- a/drivers/gpu/drm/i915/intel_sideband.c
+++ b/drivers/gpu/drm/i915/intel_sideband.c
@@ -129,17 +129,18 @@ u32 vlv_nc_read(struct drm_i915_private *dev_priv, u8 addr)
return val;
}
-u32 vlv_gpio_nc_read(struct drm_i915_private *dev_priv, u32 reg)
+u32 vlv_iosf_sb_read(struct drm_i915_private *dev_priv, u8 port, u32 reg)
{
u32 val = 0;
- vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_GPIO_NC,
+ vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), port,
SB_CRRDDA_NP, reg, &val);
return val;
}
-void vlv_gpio_nc_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
+void vlv_iosf_sb_write(struct drm_i915_private *dev_priv,
+ u8 port, u32 reg, u32 val)
{
- vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_GPIO_NC,
+ vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), port,
SB_CRWRDA_NP, reg, &val);
}
@@ -171,20 +172,6 @@ void vlv_ccu_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
SB_CRWRDA_NP, reg, &val);
}
-u32 vlv_gps_core_read(struct drm_i915_private *dev_priv, u32 reg)
-{
- u32 val = 0;
- vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_GPS_CORE,
- SB_CRRDDA_NP, reg, &val);
- return val;
-}
-
-void vlv_gps_core_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
-{
- vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_GPS_CORE,
- SB_CRWRDA_NP, reg, &val);
-}
-
u32 vlv_dpio_read(struct drm_i915_private *dev_priv, enum pipe pipe, int reg)
{
u32 val = 0;
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index 4ff7a1f4183e..a2582c455b36 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -178,28 +178,33 @@ void intel_pipe_update_end(struct intel_crtc *crtc)
}
static void
-skl_update_plane(struct drm_plane *drm_plane, struct drm_crtc *crtc,
- struct drm_framebuffer *fb,
- int crtc_x, int crtc_y,
- unsigned int crtc_w, unsigned int crtc_h,
- uint32_t x, uint32_t y,
- uint32_t src_w, uint32_t src_h)
+skl_update_plane(struct drm_plane *drm_plane,
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
{
struct drm_device *dev = drm_plane->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_plane *intel_plane = to_intel_plane(drm_plane);
+ struct drm_framebuffer *fb = plane_state->base.fb;
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
const int pipe = intel_plane->pipe;
const int plane = intel_plane->plane + 1;
u32 plane_ctl, stride_div, stride;
- const struct drm_intel_sprite_colorkey *key =
- &to_intel_plane_state(drm_plane->state)->ckey;
+ const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
u32 surf_addr;
u32 tile_height, plane_offset, plane_size;
unsigned int rotation;
int x_offset, y_offset;
- struct intel_crtc_state *crtc_state = to_intel_crtc(crtc)->config;
- int scaler_id;
+ int crtc_x = plane_state->dst.x1;
+ int crtc_y = plane_state->dst.y1;
+ uint32_t crtc_w = drm_rect_width(&plane_state->dst);
+ uint32_t crtc_h = drm_rect_height(&plane_state->dst);
+ uint32_t x = plane_state->src.x1 >> 16;
+ uint32_t y = plane_state->src.y1 >> 16;
+ uint32_t src_w = drm_rect_width(&plane_state->src) >> 16;
+ uint32_t src_h = drm_rect_height(&plane_state->src) >> 16;
+ const struct intel_scaler *scaler =
+ &crtc_state->scaler_state.scalers[plane_state->scaler_id];
plane_ctl = PLANE_CTL_ENABLE |
PLANE_CTL_PIPE_GAMMA_ENABLE |
@@ -208,14 +213,12 @@ skl_update_plane(struct drm_plane *drm_plane, struct drm_crtc *crtc,
plane_ctl |= skl_plane_ctl_format(fb->pixel_format);
plane_ctl |= skl_plane_ctl_tiling(fb->modifier[0]);
- rotation = drm_plane->state->rotation;
+ rotation = plane_state->base.rotation;
plane_ctl |= skl_plane_ctl_rotation(rotation);
- stride_div = intel_fb_stride_alignment(dev, fb->modifier[0],
+ stride_div = intel_fb_stride_alignment(dev_priv, fb->modifier[0],
fb->pixel_format);
- scaler_id = to_intel_plane_state(drm_plane->state)->scaler_id;
-
/* Sizes are 0 based */
src_w--;
src_h--;
@@ -236,9 +239,10 @@ skl_update_plane(struct drm_plane *drm_plane, struct drm_crtc *crtc,
surf_addr = intel_plane_obj_offset(intel_plane, obj, 0);
if (intel_rotation_90_or_270(rotation)) {
+ int cpp = drm_format_plane_cpp(fb->pixel_format, 0);
+
/* stride: Surface height in tiles */
- tile_height = intel_tile_height(dev, fb->pixel_format,
- fb->modifier[0], 0);
+ tile_height = intel_tile_height(dev_priv, fb->modifier[0], cpp);
stride = DIV_ROUND_UP(fb->height, tile_height);
plane_size = (src_w << 16) | src_h;
x_offset = stride * tile_height - y - (src_h + 1);
@@ -256,13 +260,13 @@ skl_update_plane(struct drm_plane *drm_plane, struct drm_crtc *crtc,
I915_WRITE(PLANE_SIZE(pipe, plane), plane_size);
/* program plane scaler */
- if (scaler_id >= 0) {
+ if (plane_state->scaler_id >= 0) {
uint32_t ps_ctrl = 0;
+ int scaler_id = plane_state->scaler_id;
DRM_DEBUG_KMS("plane = %d PS_PLANE_SEL(plane) = 0x%x\n", plane,
PS_PLANE_SEL(plane));
- ps_ctrl = PS_SCALER_EN | PS_PLANE_SEL(plane) |
- crtc_state->scaler_state.scalers[scaler_id].mode;
+ ps_ctrl = PS_SCALER_EN | PS_PLANE_SEL(plane) | scaler->mode;
I915_WRITE(SKL_PS_CTRL(pipe, scaler_id), ps_ctrl);
I915_WRITE(SKL_PS_PWR_GATE(pipe, scaler_id), 0);
I915_WRITE(SKL_PS_WIN_POS(pipe, scaler_id), (crtc_x << 16) | crtc_y);
@@ -334,24 +338,29 @@ chv_update_csc(struct intel_plane *intel_plane, uint32_t format)
}
static void
-vlv_update_plane(struct drm_plane *dplane, struct drm_crtc *crtc,
- struct drm_framebuffer *fb,
- int crtc_x, int crtc_y,
- unsigned int crtc_w, unsigned int crtc_h,
- uint32_t x, uint32_t y,
- uint32_t src_w, uint32_t src_h)
+vlv_update_plane(struct drm_plane *dplane,
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
{
struct drm_device *dev = dplane->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_plane *intel_plane = to_intel_plane(dplane);
+ struct drm_framebuffer *fb = plane_state->base.fb;
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
int pipe = intel_plane->pipe;
int plane = intel_plane->plane;
u32 sprctl;
- unsigned long sprsurf_offset, linear_offset;
- int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
- const struct drm_intel_sprite_colorkey *key =
- &to_intel_plane_state(dplane->state)->ckey;
+ u32 sprsurf_offset, linear_offset;
+ int cpp = drm_format_plane_cpp(fb->pixel_format, 0);
+ const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
+ int crtc_x = plane_state->dst.x1;
+ int crtc_y = plane_state->dst.y1;
+ uint32_t crtc_w = drm_rect_width(&plane_state->dst);
+ uint32_t crtc_h = drm_rect_height(&plane_state->dst);
+ uint32_t x = plane_state->src.x1 >> 16;
+ uint32_t y = plane_state->src.y1 >> 16;
+ uint32_t src_w = drm_rect_width(&plane_state->src) >> 16;
+ uint32_t src_h = drm_rect_height(&plane_state->src) >> 16;
sprctl = SP_ENABLE;
@@ -413,20 +422,18 @@ vlv_update_plane(struct drm_plane *dplane, struct drm_crtc *crtc,
crtc_w--;
crtc_h--;
- linear_offset = y * fb->pitches[0] + x * pixel_size;
- sprsurf_offset = intel_gen4_compute_page_offset(dev_priv,
- &x, &y,
- obj->tiling_mode,
- pixel_size,
- fb->pitches[0]);
+ linear_offset = y * fb->pitches[0] + x * cpp;
+ sprsurf_offset = intel_compute_tile_offset(dev_priv, &x, &y,
+ fb->modifier[0], cpp,
+ fb->pitches[0]);
linear_offset -= sprsurf_offset;
- if (dplane->state->rotation == BIT(DRM_ROTATE_180)) {
+ if (plane_state->base.rotation == BIT(DRM_ROTATE_180)) {
sprctl |= SP_ROTATE_180;
x += src_w;
y += src_h;
- linear_offset += src_h * fb->pitches[0] + src_w * pixel_size;
+ linear_offset += src_h * fb->pitches[0] + src_w * cpp;
}
if (key->flags) {
@@ -474,23 +481,28 @@ vlv_disable_plane(struct drm_plane *dplane, struct drm_crtc *crtc)
}
static void
-ivb_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
- struct drm_framebuffer *fb,
- int crtc_x, int crtc_y,
- unsigned int crtc_w, unsigned int crtc_h,
- uint32_t x, uint32_t y,
- uint32_t src_w, uint32_t src_h)
+ivb_update_plane(struct drm_plane *plane,
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
{
struct drm_device *dev = plane->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_plane *intel_plane = to_intel_plane(plane);
+ struct drm_framebuffer *fb = plane_state->base.fb;
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
enum pipe pipe = intel_plane->pipe;
u32 sprctl, sprscale = 0;
- unsigned long sprsurf_offset, linear_offset;
- int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
- const struct drm_intel_sprite_colorkey *key =
- &to_intel_plane_state(plane->state)->ckey;
+ u32 sprsurf_offset, linear_offset;
+ int cpp = drm_format_plane_cpp(fb->pixel_format, 0);
+ const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
+ int crtc_x = plane_state->dst.x1;
+ int crtc_y = plane_state->dst.y1;
+ uint32_t crtc_w = drm_rect_width(&plane_state->dst);
+ uint32_t crtc_h = drm_rect_height(&plane_state->dst);
+ uint32_t x = plane_state->src.x1 >> 16;
+ uint32_t y = plane_state->src.y1 >> 16;
+ uint32_t src_w = drm_rect_width(&plane_state->src) >> 16;
+ uint32_t src_h = drm_rect_height(&plane_state->src) >> 16;
sprctl = SPRITE_ENABLE;
@@ -543,22 +555,20 @@ ivb_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
if (crtc_w != src_w || crtc_h != src_h)
sprscale = SPRITE_SCALE_ENABLE | (src_w << 16) | src_h;
- linear_offset = y * fb->pitches[0] + x * pixel_size;
- sprsurf_offset =
- intel_gen4_compute_page_offset(dev_priv,
- &x, &y, obj->tiling_mode,
- pixel_size, fb->pitches[0]);
+ linear_offset = y * fb->pitches[0] + x * cpp;
+ sprsurf_offset = intel_compute_tile_offset(dev_priv, &x, &y,
+ fb->modifier[0], cpp,
+ fb->pitches[0]);
linear_offset -= sprsurf_offset;
- if (plane->state->rotation == BIT(DRM_ROTATE_180)) {
+ if (plane_state->base.rotation == BIT(DRM_ROTATE_180)) {
sprctl |= SPRITE_ROTATE_180;
/* HSW and BDW does this automagically in hardware */
if (!IS_HASWELL(dev) && !IS_BROADWELL(dev)) {
x += src_w;
y += src_h;
- linear_offset += src_h * fb->pitches[0] +
- src_w * pixel_size;
+ linear_offset += src_h * fb->pitches[0] + src_w * cpp;
}
}
@@ -612,23 +622,28 @@ ivb_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc)
}
static void
-ilk_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
- struct drm_framebuffer *fb,
- int crtc_x, int crtc_y,
- unsigned int crtc_w, unsigned int crtc_h,
- uint32_t x, uint32_t y,
- uint32_t src_w, uint32_t src_h)
+ilk_update_plane(struct drm_plane *plane,
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
{
struct drm_device *dev = plane->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_plane *intel_plane = to_intel_plane(plane);
+ struct drm_framebuffer *fb = plane_state->base.fb;
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
int pipe = intel_plane->pipe;
- unsigned long dvssurf_offset, linear_offset;
u32 dvscntr, dvsscale;
- int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
- const struct drm_intel_sprite_colorkey *key =
- &to_intel_plane_state(plane->state)->ckey;
+ u32 dvssurf_offset, linear_offset;
+ int cpp = drm_format_plane_cpp(fb->pixel_format, 0);
+ const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
+ int crtc_x = plane_state->dst.x1;
+ int crtc_y = plane_state->dst.y1;
+ uint32_t crtc_w = drm_rect_width(&plane_state->dst);
+ uint32_t crtc_h = drm_rect_height(&plane_state->dst);
+ uint32_t x = plane_state->src.x1 >> 16;
+ uint32_t y = plane_state->src.y1 >> 16;
+ uint32_t src_w = drm_rect_width(&plane_state->src) >> 16;
+ uint32_t src_h = drm_rect_height(&plane_state->src) >> 16;
dvscntr = DVS_ENABLE;
@@ -677,19 +692,18 @@ ilk_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
if (crtc_w != src_w || crtc_h != src_h)
dvsscale = DVS_SCALE_ENABLE | (src_w << 16) | src_h;
- linear_offset = y * fb->pitches[0] + x * pixel_size;
- dvssurf_offset =
- intel_gen4_compute_page_offset(dev_priv,
- &x, &y, obj->tiling_mode,
- pixel_size, fb->pitches[0]);
+ linear_offset = y * fb->pitches[0] + x * cpp;
+ dvssurf_offset = intel_compute_tile_offset(dev_priv, &x, &y,
+ fb->modifier[0], cpp,
+ fb->pitches[0]);
linear_offset -= dvssurf_offset;
- if (plane->state->rotation == BIT(DRM_ROTATE_180)) {
+ if (plane_state->base.rotation == BIT(DRM_ROTATE_180)) {
dvscntr |= DVS_ROTATE_180;
x += src_w;
y += src_h;
- linear_offset += src_h * fb->pitches[0] + src_w * pixel_size;
+ linear_offset += src_h * fb->pitches[0] + src_w * cpp;
}
if (key->flags) {
@@ -754,7 +768,6 @@ intel_check_sprite_plane(struct drm_plane *plane,
int hscale, vscale;
int max_scale, min_scale;
bool can_scale;
- int pixel_size;
if (!fb) {
state->visible = false;
@@ -876,6 +889,7 @@ intel_check_sprite_plane(struct drm_plane *plane,
/* Check size restrictions when scaling */
if (state->visible && (src_w != crtc_w || src_h != crtc_h)) {
unsigned int width_bytes;
+ int cpp = drm_format_plane_cpp(fb->pixel_format, 0);
WARN_ON(!can_scale);
@@ -887,9 +901,7 @@ intel_check_sprite_plane(struct drm_plane *plane,
if (src_w < 3 || src_h < 3)
state->visible = false;
- pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
- width_bytes = ((src_x * pixel_size) & 63) +
- src_w * pixel_size;
+ width_bytes = ((src_x * cpp) & 63) + src_w * cpp;
if (INTEL_INFO(dev)->gen < 9 && (src_w > 2048 || src_h > 2048 ||
width_bytes > 4096 || fb->pitches[0] > 4096)) {
@@ -913,30 +925,6 @@ intel_check_sprite_plane(struct drm_plane *plane,
return 0;
}
-static void
-intel_commit_sprite_plane(struct drm_plane *plane,
- struct intel_plane_state *state)
-{
- struct drm_crtc *crtc = state->base.crtc;
- struct intel_plane *intel_plane = to_intel_plane(plane);
- struct drm_framebuffer *fb = state->base.fb;
-
- crtc = crtc ? crtc : plane->crtc;
-
- if (state->visible) {
- intel_plane->update_plane(plane, crtc, fb,
- state->dst.x1, state->dst.y1,
- drm_rect_width(&state->dst),
- drm_rect_height(&state->dst),
- state->src.x1 >> 16,
- state->src.y1 >> 16,
- drm_rect_width(&state->src) >> 16,
- drm_rect_height(&state->src) >> 16);
- } else {
- intel_plane->disable_plane(plane, crtc);
- }
-}
-
int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
@@ -1118,7 +1106,6 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane)
intel_plane->plane = plane;
intel_plane->frontbuffer_bit = INTEL_FRONTBUFFER_SPRITE(pipe, plane);
intel_plane->check_plane = intel_check_sprite_plane;
- intel_plane->commit_plane = intel_commit_sprite_plane;
possible_crtcs = (1 << pipe);
ret = drm_universal_plane_init(dev, &intel_plane->base, possible_crtcs,
&intel_plane_funcs,
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index 948cbff6c62e..6745bad5bff0 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -897,6 +897,10 @@ intel_tv_mode_valid(struct drm_connector *connector,
{
struct intel_tv *intel_tv = intel_attached_tv(connector);
const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv);
+ int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
+
+ if (mode->clock > max_dotclk)
+ return MODE_CLOCK_HIGH;
/* Ensure TV refresh is close to desired refresh */
if (tv_mode && abs(tv_mode->refresh - drm_mode_vrefresh(mode) * 1000)
@@ -1178,10 +1182,9 @@ static int
intel_tv_detect_type(struct intel_tv *intel_tv,
struct drm_connector *connector)
{
- struct drm_encoder *encoder = &intel_tv->base.base;
- struct drm_crtc *crtc = encoder->crtc;
+ struct drm_crtc *crtc = connector->state->crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct drm_device *dev = encoder->dev;
+ struct drm_device *dev = connector->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 tv_ctl, save_tv_ctl;
u32 tv_dac, save_tv_dac;
@@ -1230,8 +1233,7 @@ intel_tv_detect_type(struct intel_tv *intel_tv,
I915_WRITE(TV_DAC, tv_dac);
POSTING_READ(TV_DAC);
- intel_wait_for_vblank(intel_tv->base.base.dev,
- to_intel_crtc(intel_tv->base.base.crtc)->pipe);
+ intel_wait_for_vblank(dev, intel_crtc->pipe);
type = -1;
tv_dac = I915_READ(TV_DAC);
@@ -1261,8 +1263,7 @@ intel_tv_detect_type(struct intel_tv *intel_tv,
POSTING_READ(TV_CTL);
/* For unknown reasons the hw barfs if we don't do this vblank wait. */
- intel_wait_for_vblank(intel_tv->base.base.dev,
- to_intel_crtc(intel_tv->base.base.crtc)->pipe);
+ intel_wait_for_vblank(dev, intel_crtc->pipe);
/* Restore interrupt config */
if (connector->polled & DRM_CONNECTOR_POLL_HPD) {
@@ -1420,6 +1421,7 @@ intel_tv_get_modes(struct drm_connector *connector)
if (!mode_ptr)
continue;
strncpy(mode_ptr->name, input->name, DRM_DISPLAY_MODE_LEN);
+ mode_ptr->name[DRM_DISPLAY_MODE_LEN - 1] = '\0';
mode_ptr->hdisplay = hactive_s;
mode_ptr->hsync_start = hactive_s + 1;
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 277e60ae0e47..68b6f69aa682 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -327,13 +327,54 @@ static void intel_uncore_ellc_detect(struct drm_device *dev)
}
}
+static bool
+fpga_check_for_unclaimed_mmio(struct drm_i915_private *dev_priv)
+{
+ u32 dbg;
+
+ dbg = __raw_i915_read32(dev_priv, FPGA_DBG);
+ if (likely(!(dbg & FPGA_DBG_RM_NOCLAIM)))
+ return false;
+
+ __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
+
+ return true;
+}
+
+static bool
+vlv_check_for_unclaimed_mmio(struct drm_i915_private *dev_priv)
+{
+ u32 cer;
+
+ cer = __raw_i915_read32(dev_priv, CLAIM_ER);
+ if (likely(!(cer & (CLAIM_ER_OVERFLOW | CLAIM_ER_CTR_MASK))))
+ return false;
+
+ __raw_i915_write32(dev_priv, CLAIM_ER, CLAIM_ER_CLR);
+
+ return true;
+}
+
+static bool
+check_for_unclaimed_mmio(struct drm_i915_private *dev_priv)
+{
+ if (HAS_FPGA_DBG_UNCLAIMED(dev_priv))
+ return fpga_check_for_unclaimed_mmio(dev_priv);
+
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+ return vlv_check_for_unclaimed_mmio(dev_priv);
+
+ return false;
+}
+
static void __intel_uncore_early_sanitize(struct drm_device *dev,
bool restore_forcewake)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- if (HAS_FPGA_DBG_UNCLAIMED(dev))
- __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
+ /* clear out unclaimed reg detection bit */
+ if (check_for_unclaimed_mmio(dev_priv))
+ DRM_DEBUG("unclaimed mmio detected on uncore init, clearing\n");
/* clear out old GT FIFO errors */
if (IS_GEN6(dev) || IS_GEN7(dev))
@@ -359,6 +400,8 @@ void intel_uncore_early_sanitize(struct drm_device *dev, bool restore_forcewake)
void intel_uncore_sanitize(struct drm_device *dev)
{
+ i915.enable_rc6 = sanitize_rc6_option(dev, i915.enable_rc6);
+
/* BIOS often leaves RC6 enabled, but disable it for hw init */
intel_disable_gt_powersave(dev);
}
@@ -585,38 +628,38 @@ ilk_dummy_write(struct drm_i915_private *dev_priv)
}
static void
-hsw_unclaimed_reg_debug(struct drm_i915_private *dev_priv,
- i915_reg_t reg, bool read, bool before)
+__unclaimed_reg_debug(struct drm_i915_private *dev_priv,
+ const i915_reg_t reg,
+ const bool read,
+ const bool before)
{
- const char *op = read ? "reading" : "writing to";
- const char *when = before ? "before" : "after";
-
- if (!i915.mmio_debug)
+ /* XXX. We limit the auto arming traces for mmio
+ * debugs on these platforms. There are just too many
+ * revealed by these and CI/Bat suffers from the noise.
+ * Please fix and then re-enable the automatic traces.
+ */
+ if (i915.mmio_debug < 2 &&
+ (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)))
return;
- if (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM) {
- WARN(1, "Unclaimed register detected %s %s register 0x%x\n",
- when, op, i915_mmio_reg_offset(reg));
- __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
+ if (WARN(check_for_unclaimed_mmio(dev_priv),
+ "Unclaimed register detected %s %s register 0x%x\n",
+ before ? "before" : "after",
+ read ? "reading" : "writing to",
+ i915_mmio_reg_offset(reg)))
i915.mmio_debug--; /* Only report the first N failures */
- }
}
-static void
-hsw_unclaimed_reg_detect(struct drm_i915_private *dev_priv)
+static inline void
+unclaimed_reg_debug(struct drm_i915_private *dev_priv,
+ const i915_reg_t reg,
+ const bool read,
+ const bool before)
{
- static bool mmio_debug_once = true;
-
- if (i915.mmio_debug || !mmio_debug_once)
+ if (likely(!i915.mmio_debug))
return;
- if (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM) {
- DRM_DEBUG("Unclaimed register detected, "
- "enabling oneshot unclaimed register reporting. "
- "Please use i915.mmio_debug=N for more information.\n");
- __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
- i915.mmio_debug = mmio_debug_once--;
- }
+ __unclaimed_reg_debug(dev_priv, reg, read, before);
}
#define GEN2_READ_HEADER(x) \
@@ -664,9 +707,11 @@ __gen2_read(64)
unsigned long irqflags; \
u##x val = 0; \
assert_rpm_wakelock_held(dev_priv); \
- spin_lock_irqsave(&dev_priv->uncore.lock, irqflags)
+ spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); \
+ unclaimed_reg_debug(dev_priv, reg, true, true)
#define GEN6_READ_FOOTER \
+ unclaimed_reg_debug(dev_priv, reg, true, false); \
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
return val
@@ -699,11 +744,9 @@ static inline void __force_wake_get(struct drm_i915_private *dev_priv,
static u##x \
gen6_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
GEN6_READ_HEADER(x); \
- hsw_unclaimed_reg_debug(dev_priv, reg, true, true); \
if (NEEDS_FORCE_WAKE(offset)) \
__force_wake_get(dev_priv, FORCEWAKE_RENDER); \
val = __raw_i915_read##x(dev_priv, reg); \
- hsw_unclaimed_reg_debug(dev_priv, reg, true, false); \
GEN6_READ_FOOTER; \
}
@@ -751,7 +794,6 @@ static u##x \
gen9_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
enum forcewake_domains fw_engine; \
GEN6_READ_HEADER(x); \
- hsw_unclaimed_reg_debug(dev_priv, reg, true, true); \
if (!SKL_NEEDS_FORCE_WAKE(offset)) \
fw_engine = 0; \
else if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(offset)) \
@@ -765,7 +807,6 @@ gen9_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
if (fw_engine) \
__force_wake_get(dev_priv, fw_engine); \
val = __raw_i915_read##x(dev_priv, reg); \
- hsw_unclaimed_reg_debug(dev_priv, reg, true, false); \
GEN6_READ_FOOTER; \
}
@@ -864,9 +905,11 @@ __gen2_write(64)
unsigned long irqflags; \
trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
assert_rpm_wakelock_held(dev_priv); \
- spin_lock_irqsave(&dev_priv->uncore.lock, irqflags)
+ spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); \
+ unclaimed_reg_debug(dev_priv, reg, false, true)
#define GEN6_WRITE_FOOTER \
+ unclaimed_reg_debug(dev_priv, reg, false, false); \
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags)
#define __gen6_write(x) \
@@ -892,13 +935,10 @@ hsw_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool t
if (NEEDS_FORCE_WAKE(offset)) { \
__fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
} \
- hsw_unclaimed_reg_debug(dev_priv, reg, false, true); \
__raw_i915_write##x(dev_priv, reg, val); \
if (unlikely(__fifo_ret)) { \
gen6_gt_check_fifodbg(dev_priv); \
} \
- hsw_unclaimed_reg_debug(dev_priv, reg, false, false); \
- hsw_unclaimed_reg_detect(dev_priv); \
GEN6_WRITE_FOOTER; \
}
@@ -928,12 +968,9 @@ static bool is_gen8_shadowed(struct drm_i915_private *dev_priv,
static void \
gen8_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
GEN6_WRITE_HEADER; \
- hsw_unclaimed_reg_debug(dev_priv, reg, false, true); \
if (NEEDS_FORCE_WAKE(offset) && !is_gen8_shadowed(dev_priv, reg)) \
__force_wake_get(dev_priv, FORCEWAKE_RENDER); \
__raw_i915_write##x(dev_priv, reg, val); \
- hsw_unclaimed_reg_debug(dev_priv, reg, false, false); \
- hsw_unclaimed_reg_detect(dev_priv); \
GEN6_WRITE_FOOTER; \
}
@@ -987,7 +1024,6 @@ gen9_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, \
bool trace) { \
enum forcewake_domains fw_engine; \
GEN6_WRITE_HEADER; \
- hsw_unclaimed_reg_debug(dev_priv, reg, false, true); \
if (!SKL_NEEDS_FORCE_WAKE(offset) || \
is_gen9_shadowed(dev_priv, reg)) \
fw_engine = 0; \
@@ -1002,8 +1038,6 @@ gen9_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, \
if (fw_engine) \
__force_wake_get(dev_priv, fw_engine); \
__raw_i915_write##x(dev_priv, reg, val); \
- hsw_unclaimed_reg_debug(dev_priv, reg, false, false); \
- hsw_unclaimed_reg_detect(dev_priv); \
GEN6_WRITE_FOOTER; \
}
@@ -1155,7 +1189,11 @@ static void intel_uncore_fw_domains_init(struct drm_device *dev)
} else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
dev_priv->uncore.funcs.force_wake_get =
fw_domains_get_with_thread_status;
- dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
+ if (IS_HASWELL(dev))
+ dev_priv->uncore.funcs.force_wake_put =
+ fw_domains_put_with_fifo;
+ else
+ dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
FORCEWAKE_MT, FORCEWAKE_ACK_HSW);
} else if (IS_IVYBRIDGE(dev)) {
@@ -1223,6 +1261,8 @@ void intel_uncore_init(struct drm_device *dev)
intel_uncore_fw_domains_init(dev);
__intel_uncore_early_sanitize(dev, false);
+ dev_priv->uncore.unclaimed_mmio_check = 1;
+
switch (INTEL_INFO(dev)->gen) {
default:
case 9:
@@ -1580,13 +1620,26 @@ bool intel_has_gpu_reset(struct drm_device *dev)
return intel_get_gpu_reset(dev) != NULL;
}
-void intel_uncore_check_errors(struct drm_device *dev)
+bool intel_uncore_unclaimed_mmio(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ return check_for_unclaimed_mmio(dev_priv);
+}
+
+bool
+intel_uncore_arm_unclaimed_mmio_detection(struct drm_i915_private *dev_priv)
+{
+ if (unlikely(i915.mmio_debug ||
+ dev_priv->uncore.unclaimed_mmio_check <= 0))
+ return false;
- if (HAS_FPGA_DBG_UNCLAIMED(dev) &&
- (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) {
- DRM_ERROR("Unclaimed register before interrupt\n");
- __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
+ if (unlikely(intel_uncore_unclaimed_mmio(dev_priv))) {
+ DRM_DEBUG("Unclaimed register detected, "
+ "enabling oneshot unclaimed register reporting. "
+ "Please use i915.mmio_debug=N for more information.\n");
+ i915.mmio_debug++;
+ dev_priv->uncore.unclaimed_mmio_check--;
+ return true;
}
+
+ return false;
}
diff --git a/drivers/gpu/drm/imx/dw_hdmi-imx.c b/drivers/gpu/drm/imx/dw_hdmi-imx.c
index 063825fecbe2..a24631fdf4ad 100644
--- a/drivers/gpu/drm/imx/dw_hdmi-imx.c
+++ b/drivers/gpu/drm/imx/dw_hdmi-imx.c
@@ -109,13 +109,6 @@ static void dw_hdmi_imx_encoder_disable(struct drm_encoder *encoder)
{
}
-static bool dw_hdmi_imx_encoder_mode_fixup(struct drm_encoder *encoder,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adj_mode)
-{
- return true;
-}
-
static void dw_hdmi_imx_encoder_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adj_mode)
@@ -125,7 +118,7 @@ static void dw_hdmi_imx_encoder_mode_set(struct drm_encoder *encoder,
static void dw_hdmi_imx_encoder_commit(struct drm_encoder *encoder)
{
struct imx_hdmi *hdmi = container_of(encoder, struct imx_hdmi, encoder);
- int mux = imx_drm_encoder_get_mux_id(hdmi->dev->of_node, encoder);
+ int mux = drm_of_encoder_active_port_id(hdmi->dev->of_node, encoder);
regmap_update_bits(hdmi->regmap, IOMUXC_GPR3,
IMX6Q_GPR3_HDMI_MUX_CTL_MASK,
@@ -138,7 +131,6 @@ static void dw_hdmi_imx_encoder_prepare(struct drm_encoder *encoder)
}
static const struct drm_encoder_helper_funcs dw_hdmi_imx_encoder_helper_funcs = {
- .mode_fixup = dw_hdmi_imx_encoder_mode_fixup,
.mode_set = dw_hdmi_imx_encoder_mode_set,
.prepare = dw_hdmi_imx_encoder_prepare,
.commit = dw_hdmi_imx_encoder_commit,
@@ -233,8 +225,6 @@ static int dw_hdmi_imx_bind(struct device *dev, struct device *master,
if (!iores)
return -ENXIO;
- platform_set_drvdata(pdev, hdmi);
-
encoder->possible_crtcs = drm_of_find_possible_crtcs(drm, dev->of_node);
/*
* If we failed to find the CRTC(s) which this encoder is
@@ -253,7 +243,16 @@ static int dw_hdmi_imx_bind(struct device *dev, struct device *master,
drm_encoder_init(drm, encoder, &dw_hdmi_imx_encoder_funcs,
DRM_MODE_ENCODER_TMDS, NULL);
- return dw_hdmi_bind(dev, master, data, encoder, iores, irq, plat_data);
+ ret = dw_hdmi_bind(dev, master, data, encoder, iores, irq, plat_data);
+
+ /*
+ * If dw_hdmi_bind() fails we'll never call dw_hdmi_unbind(),
+ * which would have called the encoder cleanup. Do it manually.
+ */
+ if (ret)
+ drm_encoder_cleanup(encoder);
+
+ return ret;
}
static void dw_hdmi_imx_unbind(struct device *dev, struct device *master,
diff --git a/drivers/gpu/drm/imx/imx-drm-core.c b/drivers/gpu/drm/imx/imx-drm-core.c
index 2f57d7967417..e26dcdec2aba 100644
--- a/drivers/gpu/drm/imx/imx-drm-core.c
+++ b/drivers/gpu/drm/imx/imx-drm-core.c
@@ -17,7 +17,6 @@
#include <linux/device.h>
#include <linux/fb.h>
#include <linux/module.h>
-#include <linux/of_graph.h>
#include <linux/platform_device.h>
#include <drm/drmP.h>
#include <drm/drm_fb_helper.h>
@@ -171,18 +170,6 @@ static void imx_drm_disable_vblank(struct drm_device *drm, unsigned int pipe)
imx_drm_crtc->imx_drm_helper_funcs.disable_vblank(imx_drm_crtc->crtc);
}
-static void imx_drm_driver_preclose(struct drm_device *drm,
- struct drm_file *file)
-{
- int i;
-
- if (!file->is_master)
- return;
-
- for (i = 0; i < MAX_CRTC; i++)
- imx_drm_disable_vblank(drm, i);
-}
-
static const struct file_operations imx_drm_driver_fops = {
.owner = THIS_MODULE,
.open = drm_open,
@@ -339,7 +326,6 @@ int imx_drm_add_crtc(struct drm_device *drm, struct drm_crtc *crtc,
{
struct imx_drm_device *imxdrm = drm->dev_private;
struct imx_drm_crtc *imx_drm_crtc;
- int ret;
/*
* The vblank arrays are dimensioned by MAX_CRTC - we can't
@@ -364,10 +350,6 @@ int imx_drm_add_crtc(struct drm_device *drm, struct drm_crtc *crtc,
*new_crtc = imx_drm_crtc;
- ret = drm_mode_crtc_set_gamma_size(imx_drm_crtc->crtc, 256);
- if (ret)
- goto err_register;
-
drm_crtc_helper_add(crtc,
imx_drm_crtc->imx_drm_helper_funcs.crtc_helper_funcs);
@@ -375,11 +357,6 @@ int imx_drm_add_crtc(struct drm_device *drm, struct drm_crtc *crtc,
imx_drm_crtc->imx_drm_helper_funcs.crtc_funcs, NULL);
return 0;
-
-err_register:
- imxdrm->crtc[--imxdrm->pipes] = NULL;
- kfree(imx_drm_crtc);
- return ret;
}
EXPORT_SYMBOL_GPL(imx_drm_add_crtc);
@@ -424,36 +401,6 @@ int imx_drm_encoder_parse_of(struct drm_device *drm,
}
EXPORT_SYMBOL_GPL(imx_drm_encoder_parse_of);
-/*
- * @node: device tree node containing encoder input ports
- * @encoder: drm_encoder
- */
-int imx_drm_encoder_get_mux_id(struct device_node *node,
- struct drm_encoder *encoder)
-{
- struct imx_drm_crtc *imx_crtc = imx_drm_find_crtc(encoder->crtc);
- struct device_node *ep;
- struct of_endpoint endpoint;
- struct device_node *port;
- int ret;
-
- if (!node || !imx_crtc)
- return -EINVAL;
-
- for_each_endpoint_of_node(node, ep) {
- port = of_graph_get_remote_port(ep);
- of_node_put(port);
- if (port == imx_crtc->crtc->port) {
- ret = of_graph_parse_endpoint(ep, &endpoint);
- of_node_put(ep);
- return ret ? ret : endpoint.port;
- }
- }
-
- return -EINVAL;
-}
-EXPORT_SYMBOL_GPL(imx_drm_encoder_get_mux_id);
-
static const struct drm_ioctl_desc imx_drm_ioctls[] = {
/* none so far */
};
@@ -463,7 +410,6 @@ static struct drm_driver imx_drm_driver = {
.load = imx_drm_driver_load,
.unload = imx_drm_driver_unload,
.lastclose = imx_drm_driver_lastclose,
- .preclose = imx_drm_driver_preclose,
.set_busid = drm_platform_set_busid,
.gem_free_object = drm_gem_cma_free_object,
.gem_vm_ops = &drm_gem_cma_vm_ops,
diff --git a/drivers/gpu/drm/imx/imx-drm.h b/drivers/gpu/drm/imx/imx-drm.h
index 71cf6d9c714f..b0241b9d1334 100644
--- a/drivers/gpu/drm/imx/imx-drm.h
+++ b/drivers/gpu/drm/imx/imx-drm.h
@@ -46,8 +46,6 @@ int imx_drm_set_bus_format_pins(struct drm_encoder *encoder,
int imx_drm_set_bus_format(struct drm_encoder *encoder,
u32 bus_format);
-int imx_drm_encoder_get_mux_id(struct device_node *node,
- struct drm_encoder *encoder);
int imx_drm_encoder_parse_of(struct drm_device *drm,
struct drm_encoder *encoder, struct device_node *np);
diff --git a/drivers/gpu/drm/imx/imx-ldb.c b/drivers/gpu/drm/imx/imx-ldb.c
index 22ac482231ed..a58eee59550a 100644
--- a/drivers/gpu/drm/imx/imx-ldb.c
+++ b/drivers/gpu/drm/imx/imx-ldb.c
@@ -19,6 +19,7 @@
#include <drm/drmP.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_of.h>
#include <drm/drm_panel.h>
#include <linux/mfd/syscon.h>
#include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
@@ -139,13 +140,6 @@ static void imx_ldb_encoder_dpms(struct drm_encoder *encoder, int mode)
{
}
-static bool imx_ldb_encoder_mode_fixup(struct drm_encoder *encoder,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- return true;
-}
-
static void imx_ldb_set_clock(struct imx_ldb *ldb, int mux, int chno,
unsigned long serial_clk, unsigned long di_clk)
{
@@ -215,7 +209,7 @@ static void imx_ldb_encoder_commit(struct drm_encoder *encoder)
struct imx_ldb_channel *imx_ldb_ch = enc_to_imx_ldb_ch(encoder);
struct imx_ldb *ldb = imx_ldb_ch->ldb;
int dual = ldb->ldb_ctrl & LDB_SPLIT_MODE_EN;
- int mux = imx_drm_encoder_get_mux_id(imx_ldb_ch->child, encoder);
+ int mux = drm_of_encoder_active_port_id(imx_ldb_ch->child, encoder);
drm_panel_prepare(imx_ldb_ch->panel);
@@ -265,7 +259,7 @@ static void imx_ldb_encoder_mode_set(struct drm_encoder *encoder,
int dual = ldb->ldb_ctrl & LDB_SPLIT_MODE_EN;
unsigned long serial_clk;
unsigned long di_clk = mode->clock * 1000;
- int mux = imx_drm_encoder_get_mux_id(imx_ldb_ch->child, encoder);
+ int mux = drm_of_encoder_active_port_id(imx_ldb_ch->child, encoder);
if (mode->clock > 170000) {
dev_warn(ldb->dev,
@@ -376,7 +370,6 @@ static const struct drm_encoder_funcs imx_ldb_encoder_funcs = {
static const struct drm_encoder_helper_funcs imx_ldb_encoder_helper_funcs = {
.dpms = imx_ldb_encoder_dpms,
- .mode_fixup = imx_ldb_encoder_mode_fixup,
.prepare = imx_ldb_encoder_prepare,
.commit = imx_ldb_encoder_commit,
.mode_set = imx_ldb_encoder_mode_set,
diff --git a/drivers/gpu/drm/imx/imx-tve.c b/drivers/gpu/drm/imx/imx-tve.c
index 292349f0b132..ae7a9fb3b8a2 100644
--- a/drivers/gpu/drm/imx/imx-tve.c
+++ b/drivers/gpu/drm/imx/imx-tve.c
@@ -286,13 +286,6 @@ static void imx_tve_encoder_dpms(struct drm_encoder *encoder, int mode)
dev_err(tve->dev, "failed to disable TVOUT: %d\n", ret);
}
-static bool imx_tve_encoder_mode_fixup(struct drm_encoder *encoder,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- return true;
-}
-
static void imx_tve_encoder_prepare(struct drm_encoder *encoder)
{
struct imx_tve *tve = enc_to_tve(encoder);
@@ -379,7 +372,6 @@ static const struct drm_encoder_funcs imx_tve_encoder_funcs = {
static const struct drm_encoder_helper_funcs imx_tve_encoder_helper_funcs = {
.dpms = imx_tve_encoder_dpms,
- .mode_fixup = imx_tve_encoder_mode_fixup,
.prepare = imx_tve_encoder_prepare,
.mode_set = imx_tve_encoder_mode_set,
.commit = imx_tve_encoder_commit,
diff --git a/drivers/gpu/drm/imx/ipuv3-crtc.c b/drivers/gpu/drm/imx/ipuv3-crtc.c
index 30a57185bdb4..dee8e8b3523b 100644
--- a/drivers/gpu/drm/imx/ipuv3-crtc.c
+++ b/drivers/gpu/drm/imx/ipuv3-crtc.c
@@ -22,6 +22,8 @@
#include <linux/fb.h>
#include <linux/clk.h>
#include <linux/errno.h>
+#include <linux/reservation.h>
+#include <linux/dma-buf.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_fb_cma_helper.h>
@@ -31,6 +33,23 @@
#define DRIVER_DESC "i.MX IPUv3 Graphics"
+enum ipu_flip_status {
+ IPU_FLIP_NONE,
+ IPU_FLIP_PENDING,
+ IPU_FLIP_SUBMITTED,
+};
+
+struct ipu_flip_work {
+ struct work_struct unref_work;
+ struct drm_gem_object *bo;
+ struct drm_pending_vblank_event *page_flip_event;
+ struct work_struct fence_work;
+ struct ipu_crtc *crtc;
+ struct fence *excl;
+ unsigned shared_count;
+ struct fence **shared;
+};
+
struct ipu_crtc {
struct device *dev;
struct drm_crtc base;
@@ -42,8 +61,9 @@ struct ipu_crtc {
struct ipu_dc *dc;
struct ipu_di *di;
int enabled;
- struct drm_pending_vblank_event *page_flip_event;
- struct drm_framebuffer *newfb;
+ enum ipu_flip_status flip_state;
+ struct workqueue_struct *flip_queue;
+ struct ipu_flip_work *flip_work;
int irq;
u32 bus_format;
int di_hsync_pin;
@@ -64,6 +84,7 @@ static void ipu_fb_enable(struct ipu_crtc *ipu_crtc)
/* Start DC channel and DI after IDMAC */
ipu_dc_enable_channel(ipu_crtc->dc);
ipu_di_enable(ipu_crtc->di);
+ drm_crtc_vblank_on(&ipu_crtc->base);
ipu_crtc->enabled = 1;
}
@@ -80,6 +101,7 @@ static void ipu_fb_disable(struct ipu_crtc *ipu_crtc)
ipu_di_disable(ipu_crtc->di);
ipu_plane_disable(ipu_crtc->plane[0]);
ipu_dc_disable(ipu);
+ drm_crtc_vblank_off(&ipu_crtc->base);
ipu_crtc->enabled = 0;
}
@@ -102,15 +124,45 @@ static void ipu_crtc_dpms(struct drm_crtc *crtc, int mode)
}
}
+static void ipu_flip_unref_work_func(struct work_struct *__work)
+{
+ struct ipu_flip_work *work =
+ container_of(__work, struct ipu_flip_work, unref_work);
+
+ drm_gem_object_unreference_unlocked(work->bo);
+ kfree(work);
+}
+
+static void ipu_flip_fence_work_func(struct work_struct *__work)
+{
+ struct ipu_flip_work *work =
+ container_of(__work, struct ipu_flip_work, fence_work);
+ int i;
+
+ /* wait for all fences attached to the FB obj to signal */
+ if (work->excl) {
+ fence_wait(work->excl, false);
+ fence_put(work->excl);
+ }
+ for (i = 0; i < work->shared_count; i++) {
+ fence_wait(work->shared[i], false);
+ fence_put(work->shared[i]);
+ }
+
+ work->crtc->flip_state = IPU_FLIP_SUBMITTED;
+}
+
static int ipu_page_flip(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_pending_vblank_event *event,
uint32_t page_flip_flags)
{
+ struct drm_gem_cma_object *cma_obj = drm_fb_cma_get_gem_obj(fb, 0);
struct ipu_crtc *ipu_crtc = to_ipu_crtc(crtc);
+ struct ipu_flip_work *flip_work;
int ret;
- if (ipu_crtc->newfb)
+ if (ipu_crtc->flip_state != IPU_FLIP_NONE)
return -EBUSY;
ret = imx_drm_crtc_vblank_get(ipu_crtc->imx_crtc);
@@ -121,11 +173,58 @@ static int ipu_page_flip(struct drm_crtc *crtc,
return ret;
}
- ipu_crtc->newfb = fb;
- ipu_crtc->page_flip_event = event;
- crtc->primary->fb = fb;
+ flip_work = kzalloc(sizeof *flip_work, GFP_KERNEL);
+ if (!flip_work) {
+ ret = -ENOMEM;
+ goto put_vblank;
+ }
+ INIT_WORK(&flip_work->unref_work, ipu_flip_unref_work_func);
+ flip_work->page_flip_event = event;
+
+ /* get BO backing the old framebuffer and take a reference */
+ flip_work->bo = &drm_fb_cma_get_gem_obj(crtc->primary->fb, 0)->base;
+ drm_gem_object_reference(flip_work->bo);
+
+ ipu_crtc->flip_work = flip_work;
+ /*
+ * If the object has a DMABUF attached, we need to wait on its fences
+ * if there are any.
+ */
+ if (cma_obj->base.dma_buf) {
+ INIT_WORK(&flip_work->fence_work, ipu_flip_fence_work_func);
+ flip_work->crtc = ipu_crtc;
+
+ ret = reservation_object_get_fences_rcu(
+ cma_obj->base.dma_buf->resv, &flip_work->excl,
+ &flip_work->shared_count, &flip_work->shared);
+
+ if (unlikely(ret)) {
+ DRM_ERROR("failed to get fences for buffer\n");
+ goto free_flip_work;
+ }
+
+ /* No need to queue the worker if the are no fences */
+ if (!flip_work->excl && !flip_work->shared_count) {
+ ipu_crtc->flip_state = IPU_FLIP_SUBMITTED;
+ } else {
+ ipu_crtc->flip_state = IPU_FLIP_PENDING;
+ queue_work(ipu_crtc->flip_queue,
+ &flip_work->fence_work);
+ }
+ } else {
+ ipu_crtc->flip_state = IPU_FLIP_SUBMITTED;
+ }
return 0;
+
+free_flip_work:
+ drm_gem_object_unreference_unlocked(flip_work->bo);
+ kfree(flip_work);
+ ipu_crtc->flip_work = NULL;
+put_vblank:
+ imx_drm_crtc_vblank_put(ipu_crtc->imx_crtc);
+
+ return ret;
}
static const struct drm_crtc_funcs ipu_crtc_funcs = {
@@ -209,12 +308,12 @@ static void ipu_crtc_handle_pageflip(struct ipu_crtc *ipu_crtc)
{
unsigned long flags;
struct drm_device *drm = ipu_crtc->base.dev;
+ struct ipu_flip_work *work = ipu_crtc->flip_work;
spin_lock_irqsave(&drm->event_lock, flags);
- if (ipu_crtc->page_flip_event)
+ if (work->page_flip_event)
drm_crtc_send_vblank_event(&ipu_crtc->base,
- ipu_crtc->page_flip_event);
- ipu_crtc->page_flip_event = NULL;
+ work->page_flip_event);
imx_drm_crtc_vblank_put(ipu_crtc->imx_crtc);
spin_unlock_irqrestore(&drm->event_lock, flags);
}
@@ -225,13 +324,15 @@ static irqreturn_t ipu_irq_handler(int irq, void *dev_id)
imx_drm_handle_vblank(ipu_crtc->imx_crtc);
- if (ipu_crtc->newfb) {
+ if (ipu_crtc->flip_state == IPU_FLIP_SUBMITTED) {
struct ipu_plane *plane = ipu_crtc->plane[0];
- ipu_crtc->newfb = NULL;
ipu_plane_set_base(plane, ipu_crtc->base.primary->fb,
plane->x, plane->y);
ipu_crtc_handle_pageflip(ipu_crtc);
+ queue_work(ipu_crtc->flip_queue,
+ &ipu_crtc->flip_work->unref_work);
+ ipu_crtc->flip_state = IPU_FLIP_NONE;
}
return IRQ_HANDLED;
@@ -280,6 +381,10 @@ static const struct drm_crtc_helper_funcs ipu_helper_funcs = {
static int ipu_enable_vblank(struct drm_crtc *crtc)
{
+ struct ipu_crtc *ipu_crtc = to_ipu_crtc(crtc);
+
+ enable_irq(ipu_crtc->irq);
+
return 0;
}
@@ -287,8 +392,7 @@ static void ipu_disable_vblank(struct drm_crtc *crtc)
{
struct ipu_crtc *ipu_crtc = to_ipu_crtc(crtc);
- ipu_crtc->page_flip_event = NULL;
- ipu_crtc->newfb = NULL;
+ disable_irq_nosync(ipu_crtc->irq);
}
static int ipu_set_interface_pix_fmt(struct drm_crtc *crtc,
@@ -399,6 +503,10 @@ static int ipu_crtc_init(struct ipu_crtc *ipu_crtc,
dev_err(ipu_crtc->dev, "irq request failed with %d.\n", ret);
goto err_put_plane_res;
}
+ /* Only enable IRQ when we actually need it to trigger work. */
+ disable_irq(ipu_crtc->irq);
+
+ ipu_crtc->flip_queue = create_singlethread_workqueue("ipu-crtc-flip");
return 0;
@@ -441,6 +549,7 @@ static void ipu_drm_unbind(struct device *dev, struct device *master,
imx_drm_remove_crtc(ipu_crtc->imx_crtc);
+ destroy_workqueue(ipu_crtc->flip_queue);
ipu_plane_put_resources(ipu_crtc->plane[0]);
ipu_put_resources(ipu_crtc);
}
diff --git a/drivers/gpu/drm/imx/ipuv3-plane.c b/drivers/gpu/drm/imx/ipuv3-plane.c
index 591ba2f1ae03..681ec6eb77d9 100644
--- a/drivers/gpu/drm/imx/ipuv3-plane.c
+++ b/drivers/gpu/drm/imx/ipuv3-plane.c
@@ -42,6 +42,7 @@ static const uint32_t ipu_plane_formats[] = {
DRM_FORMAT_YVYU,
DRM_FORMAT_YUV420,
DRM_FORMAT_YVU420,
+ DRM_FORMAT_RGB565,
};
int ipu_plane_irq(struct ipu_plane *ipu_plane)
@@ -71,22 +72,101 @@ static inline int calc_bandwidth(int width, int height, unsigned int vref)
int ipu_plane_set_base(struct ipu_plane *ipu_plane, struct drm_framebuffer *fb,
int x, int y)
{
- struct drm_gem_cma_object *cma_obj;
- unsigned long eba;
- int active;
-
- cma_obj = drm_fb_cma_get_gem_obj(fb, 0);
- if (!cma_obj) {
- DRM_DEBUG_KMS("entry is null.\n");
- return -EFAULT;
+ struct drm_gem_cma_object *cma_obj[3];
+ unsigned long eba, ubo, vbo;
+ int active, i;
+
+ for (i = 0; i < drm_format_num_planes(fb->pixel_format); i++) {
+ cma_obj[i] = drm_fb_cma_get_gem_obj(fb, i);
+ if (!cma_obj[i]) {
+ DRM_DEBUG_KMS("plane %d entry is null.\n", i);
+ return -EFAULT;
+ }
}
- dev_dbg(ipu_plane->base.dev->dev, "phys = %pad, x = %d, y = %d",
- &cma_obj->paddr, x, y);
-
- eba = cma_obj->paddr + fb->offsets[0] +
+ eba = cma_obj[0]->paddr + fb->offsets[0] +
fb->pitches[0] * y + (fb->bits_per_pixel >> 3) * x;
+ if (eba & 0x7) {
+ DRM_DEBUG_KMS("base address must be a multiple of 8.\n");
+ return -EINVAL;
+ }
+
+ if (fb->pitches[0] < 1 || fb->pitches[0] > 16384) {
+ DRM_DEBUG_KMS("pitches out of range.\n");
+ return -EINVAL;
+ }
+
+ if (ipu_plane->enabled && fb->pitches[0] != ipu_plane->stride[0]) {
+ DRM_DEBUG_KMS("pitches must not change while plane is enabled.\n");
+ return -EINVAL;
+ }
+
+ ipu_plane->stride[0] = fb->pitches[0];
+
+ switch (fb->pixel_format) {
+ case DRM_FORMAT_YUV420:
+ case DRM_FORMAT_YVU420:
+ /*
+ * Multiplanar formats have to meet the following restrictions:
+ * - The (up to) three plane addresses are EBA, EBA+UBO, EBA+VBO
+ * - EBA, UBO and VBO are a multiple of 8
+ * - UBO and VBO are unsigned and not larger than 0xfffff8
+ * - Only EBA may be changed while scanout is active
+ * - The strides of U and V planes must be identical.
+ */
+ ubo = cma_obj[1]->paddr + fb->offsets[1] +
+ fb->pitches[1] * y / 2 + x / 2 - eba;
+ vbo = cma_obj[2]->paddr + fb->offsets[2] +
+ fb->pitches[2] * y / 2 + x / 2 - eba;
+
+ if ((ubo & 0x7) || (vbo & 0x7)) {
+ DRM_DEBUG_KMS("U/V buffer offsets must be a multiple of 8.\n");
+ return -EINVAL;
+ }
+
+ if ((ubo > 0xfffff8) || (vbo > 0xfffff8)) {
+ DRM_DEBUG_KMS("U/V buffer offsets must be positive and not larger than 0xfffff8.\n");
+ return -EINVAL;
+ }
+
+ if (ipu_plane->enabled && ((ipu_plane->u_offset != ubo) ||
+ (ipu_plane->v_offset != vbo))) {
+ DRM_DEBUG_KMS("U/V buffer offsets must not change while plane is enabled.\n");
+ return -EINVAL;
+ }
+
+ if (fb->pitches[1] != fb->pitches[2]) {
+ DRM_DEBUG_KMS("U/V pitches must be identical.\n");
+ return -EINVAL;
+ }
+
+ if (fb->pitches[1] < 1 || fb->pitches[1] > 16384) {
+ DRM_DEBUG_KMS("U/V pitches out of range.\n");
+ return -EINVAL;
+ }
+
+ if (ipu_plane->enabled &&
+ (ipu_plane->stride[1] != fb->pitches[1])) {
+ DRM_DEBUG_KMS("U/V pitches must not change while plane is enabled.\n");
+ return -EINVAL;
+ }
+
+ ipu_plane->u_offset = ubo;
+ ipu_plane->v_offset = vbo;
+ ipu_plane->stride[1] = fb->pitches[1];
+
+ dev_dbg(ipu_plane->base.dev->dev,
+ "phys = %pad %pad %pad, x = %d, y = %d",
+ &cma_obj[0]->paddr, &cma_obj[1]->paddr,
+ &cma_obj[2]->paddr, x, y);
+ break;
+ default:
+ dev_dbg(ipu_plane->base.dev->dev, "phys = %pad, x = %d, y = %d",
+ &cma_obj[0]->paddr, x, y);
+ break;
+ }
+
if (ipu_plane->enabled) {
active = ipu_idmac_get_current_buffer(ipu_plane->ipu_ch);
ipu_cpmem_set_buffer(ipu_plane->ipu_ch, !active, eba);
@@ -200,12 +280,6 @@ int ipu_plane_mode_set(struct ipu_plane *ipu_plane, struct drm_crtc *crtc,
}
}
- ret = ipu_dmfc_init_channel(ipu_plane->dmfc, crtc_w);
- if (ret) {
- dev_err(dev, "initializing dmfc channel failed with %d\n", ret);
- return ret;
- }
-
ret = ipu_dmfc_alloc_bandwidth(ipu_plane->dmfc,
calc_bandwidth(crtc_w, crtc_h,
calc_vref(mode)), 64);
@@ -214,6 +288,8 @@ int ipu_plane_mode_set(struct ipu_plane *ipu_plane, struct drm_crtc *crtc,
return ret;
}
+ ipu_dmfc_config_wait4eot(ipu_plane->dmfc, crtc_w);
+
ipu_cpmem_zero(ipu_plane->ipu_ch);
ipu_cpmem_set_resolution(ipu_plane->ipu_ch, src_w, src_h);
ret = ipu_cpmem_set_fmt(ipu_plane->ipu_ch, fb->pixel_format);
@@ -232,6 +308,18 @@ int ipu_plane_mode_set(struct ipu_plane *ipu_plane, struct drm_crtc *crtc,
if (interlaced)
ipu_cpmem_interlaced_scan(ipu_plane->ipu_ch, fb->pitches[0]);
+ if (fb->pixel_format == DRM_FORMAT_YUV420) {
+ ipu_cpmem_set_yuv_planar_full(ipu_plane->ipu_ch,
+ ipu_plane->stride[1],
+ ipu_plane->u_offset,
+ ipu_plane->v_offset);
+ } else if (fb->pixel_format == DRM_FORMAT_YVU420) {
+ ipu_cpmem_set_yuv_planar_full(ipu_plane->ipu_ch,
+ ipu_plane->stride[1],
+ ipu_plane->v_offset,
+ ipu_plane->u_offset);
+ }
+
ipu_plane->w = src_w;
ipu_plane->h = src_h;
@@ -338,7 +426,7 @@ static int ipu_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
}
if (crtc != plane->crtc)
- dev_info(plane->dev->dev, "crtc change: %p -> %p\n",
+ dev_dbg(plane->dev->dev, "crtc change: %p -> %p\n",
plane->crtc, crtc);
plane->crtc = crtc;
diff --git a/drivers/gpu/drm/imx/ipuv3-plane.h b/drivers/gpu/drm/imx/ipuv3-plane.h
index 3a443b413c60..4448fd4ad4eb 100644
--- a/drivers/gpu/drm/imx/ipuv3-plane.h
+++ b/drivers/gpu/drm/imx/ipuv3-plane.h
@@ -29,6 +29,10 @@ struct ipu_plane {
int w;
int h;
+ unsigned int u_offset;
+ unsigned int v_offset;
+ unsigned int stride[2];
+
bool enabled;
};
diff --git a/drivers/gpu/drm/imx/parallel-display.c b/drivers/gpu/drm/imx/parallel-display.c
index 0ffef172afb4..363e2c7741e2 100644
--- a/drivers/gpu/drm/imx/parallel-display.c
+++ b/drivers/gpu/drm/imx/parallel-display.c
@@ -112,13 +112,6 @@ static void imx_pd_encoder_dpms(struct drm_encoder *encoder, int mode)
drm_panel_enable(imxpd->panel);
}
-static bool imx_pd_encoder_mode_fixup(struct drm_encoder *encoder,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- return true;
-}
-
static void imx_pd_encoder_prepare(struct drm_encoder *encoder)
{
struct imx_parallel_display *imxpd = enc_to_imxpd(encoder);
@@ -166,7 +159,6 @@ static const struct drm_encoder_funcs imx_pd_encoder_funcs = {
static const struct drm_encoder_helper_funcs imx_pd_encoder_helper_funcs = {
.dpms = imx_pd_encoder_dpms,
- .mode_fixup = imx_pd_encoder_mode_fixup,
.prepare = imx_pd_encoder_prepare,
.commit = imx_pd_encoder_commit,
.mode_set = imx_pd_encoder_mode_set,
diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
index dc13c4857e6f..14e64e08909e 100644
--- a/drivers/gpu/drm/mgag200/mgag200_mode.c
+++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
@@ -92,18 +92,6 @@ static inline void mga_wait_busy(struct mga_device *mdev)
} while ((status & 0x01) && time_before(jiffies, timeout));
}
-/*
- * The core passes the desired mode to the CRTC code to see whether any
- * CRTC-specific modifications need to be made to it. We're in a position
- * to just pass that straight through, so this does nothing
- */
-static bool mga_crtc_mode_fixup(struct drm_crtc *crtc,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- return true;
-}
-
#define P_ARRAY_SIZE 9
static int mga_g200se_set_plls(struct mga_device *mdev, long clock)
@@ -1410,7 +1398,6 @@ static const struct drm_crtc_funcs mga_crtc_funcs = {
static const struct drm_crtc_helper_funcs mga_helper_funcs = {
.disable = mga_crtc_disable,
.dpms = mga_crtc_dpms,
- .mode_fixup = mga_crtc_mode_fixup,
.mode_set = mga_crtc_mode_set,
.mode_set_base = mga_crtc_mode_set_base,
.prepare = mga_crtc_prepare,
@@ -1479,13 +1466,6 @@ void mga_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
* These functions are analagous to those in the CRTC code, but are intended
* to handle any encoder-specific limitations
*/
-static bool mga_encoder_mode_fixup(struct drm_encoder *encoder,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- return true;
-}
-
static void mga_encoder_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
@@ -1515,7 +1495,6 @@ static void mga_encoder_destroy(struct drm_encoder *encoder)
static const struct drm_encoder_helper_funcs mga_encoder_helper_funcs = {
.dpms = mga_encoder_dpms,
- .mode_fixup = mga_encoder_mode_fixup,
.mode_set = mga_encoder_mode_set,
.prepare = mga_encoder_prepare,
.commit = mga_encoder_commit,
diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile
index 065ad4138799..ddb4c9d097e4 100644
--- a/drivers/gpu/drm/msm/Makefile
+++ b/drivers/gpu/drm/msm/Makefile
@@ -12,6 +12,7 @@ msm-y := \
hdmi/hdmi_connector.o \
hdmi/hdmi_hdcp.o \
hdmi/hdmi_i2c.o \
+ hdmi/hdmi_phy.o \
hdmi/hdmi_phy_8960.o \
hdmi/hdmi_phy_8x60.o \
hdmi/hdmi_phy_8x74.o \
@@ -52,6 +53,8 @@ msm-y := \
msm-$(CONFIG_DRM_FBDEV_EMULATION) += msm_fbdev.o
msm-$(CONFIG_COMMON_CLK) += mdp/mdp4/mdp4_lvds_pll.o
+msm-$(CONFIG_COMMON_CLK) += hdmi/hdmi_pll_8960.o
+msm-$(CONFIG_COMMON_CLK) += hdmi/hdmi_phy_8996.o
msm-$(CONFIG_DRM_MSM_DSI) += dsi/dsi.o \
mdp/mdp4/mdp4_dsi_encoder.o \
diff --git a/drivers/gpu/drm/msm/adreno/a2xx.xml.h b/drivers/gpu/drm/msm/adreno/a2xx.xml.h
index 9e2aceb4ffe6..fee24297fb92 100644
--- a/drivers/gpu/drm/msm/adreno/a2xx.xml.h
+++ b/drivers/gpu/drm/msm/adreno/a2xx.xml.h
@@ -9,16 +9,17 @@ git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 398 bytes, from 2015-09-24 17:25:31)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2016-02-10 17:07:21)
- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2015-05-20 20:03:14)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 10755 bytes, from 2015-09-14 20:46:55)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 14968 bytes, from 2015-05-20 20:12:27)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 67771 bytes, from 2015-09-14 20:46:55)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 63970 bytes, from 2015-09-14 20:50:12)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 11518 bytes, from 2016-02-10 21:03:25)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 16166 bytes, from 2016-02-11 21:20:31)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 83967 bytes, from 2016-02-10 17:07:21)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 109916 bytes, from 2016-02-20 18:44:48)
- /home/robclark/src/freedreno/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2015-09-24 17:30:00)
Copyright (C) 2013-2015 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
+- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
diff --git a/drivers/gpu/drm/msm/adreno/a3xx.xml.h b/drivers/gpu/drm/msm/adreno/a3xx.xml.h
index 97dc1c6ec107..27dabd5e57fb 100644
--- a/drivers/gpu/drm/msm/adreno/a3xx.xml.h
+++ b/drivers/gpu/drm/msm/adreno/a3xx.xml.h
@@ -9,16 +9,17 @@ git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 398 bytes, from 2015-09-24 17:25:31)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2016-02-10 17:07:21)
- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2015-05-20 20:03:14)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 10755 bytes, from 2015-09-14 20:46:55)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 14968 bytes, from 2015-05-20 20:12:27)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 67771 bytes, from 2015-09-14 20:46:55)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 63970 bytes, from 2015-09-14 20:50:12)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 11518 bytes, from 2016-02-10 21:03:25)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 16166 bytes, from 2016-02-11 21:20:31)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 83967 bytes, from 2016-02-10 17:07:21)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 109916 bytes, from 2016-02-20 18:44:48)
- /home/robclark/src/freedreno/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2015-09-24 17:30:00)
-Copyright (C) 2013-2015 by the following authors:
+Copyright (C) 2013-2016 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
+- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
@@ -111,10 +112,14 @@ enum a3xx_vtx_fmt {
VFMT_8_8_SNORM = 53,
VFMT_8_8_8_SNORM = 54,
VFMT_8_8_8_8_SNORM = 55,
- VFMT_10_10_10_2_UINT = 60,
- VFMT_10_10_10_2_UNORM = 61,
- VFMT_10_10_10_2_SINT = 62,
- VFMT_10_10_10_2_SNORM = 63,
+ VFMT_10_10_10_2_UINT = 56,
+ VFMT_10_10_10_2_UNORM = 57,
+ VFMT_10_10_10_2_SINT = 58,
+ VFMT_10_10_10_2_SNORM = 59,
+ VFMT_2_10_10_10_UINT = 60,
+ VFMT_2_10_10_10_UNORM = 61,
+ VFMT_2_10_10_10_SINT = 62,
+ VFMT_2_10_10_10_SNORM = 63,
};
enum a3xx_tex_fmt {
@@ -138,10 +143,12 @@ enum a3xx_tex_fmt {
TFMT_DXT1 = 36,
TFMT_DXT3 = 37,
TFMT_DXT5 = 38,
+ TFMT_2_10_10_10_UNORM = 40,
TFMT_10_10_10_2_UNORM = 41,
TFMT_9_9_9_E5_FLOAT = 42,
TFMT_11_11_10_FLOAT = 43,
TFMT_A8_UNORM = 44,
+ TFMT_L8_UNORM = 45,
TFMT_L8_A8_UNORM = 47,
TFMT_8_UNORM = 48,
TFMT_8_8_UNORM = 49,
@@ -183,6 +190,8 @@ enum a3xx_tex_fmt {
TFMT_32_SINT = 92,
TFMT_32_32_SINT = 93,
TFMT_32_32_32_32_SINT = 95,
+ TFMT_2_10_10_10_UINT = 96,
+ TFMT_10_10_10_2_UINT = 97,
TFMT_ETC2_RG11_SNORM = 112,
TFMT_ETC2_RG11_UNORM = 113,
TFMT_ETC2_R11_SNORM = 114,
@@ -215,6 +224,9 @@ enum a3xx_color_fmt {
RB_R8_UINT = 14,
RB_R8_SINT = 15,
RB_R10G10B10A2_UNORM = 16,
+ RB_A2R10G10B10_UNORM = 17,
+ RB_R10G10B10A2_UINT = 18,
+ RB_A2R10G10B10_UINT = 19,
RB_A8_UNORM = 20,
RB_R8_UNORM = 21,
RB_R16_FLOAT = 24,
@@ -244,30 +256,273 @@ enum a3xx_color_fmt {
RB_R32G32B32A32_UINT = 59,
};
+enum a3xx_cp_perfcounter_select {
+ CP_ALWAYS_COUNT = 0,
+ CP_AHB_PFPTRANS_WAIT = 3,
+ CP_AHB_NRTTRANS_WAIT = 6,
+ CP_CSF_NRT_READ_WAIT = 8,
+ CP_CSF_I1_FIFO_FULL = 9,
+ CP_CSF_I2_FIFO_FULL = 10,
+ CP_CSF_ST_FIFO_FULL = 11,
+ CP_RESERVED_12 = 12,
+ CP_CSF_RING_ROQ_FULL = 13,
+ CP_CSF_I1_ROQ_FULL = 14,
+ CP_CSF_I2_ROQ_FULL = 15,
+ CP_CSF_ST_ROQ_FULL = 16,
+ CP_RESERVED_17 = 17,
+ CP_MIU_TAG_MEM_FULL = 18,
+ CP_MIU_NRT_WRITE_STALLED = 22,
+ CP_MIU_NRT_READ_STALLED = 23,
+ CP_ME_REGS_RB_DONE_FIFO_FULL = 26,
+ CP_ME_REGS_VS_EVENT_FIFO_FULL = 27,
+ CP_ME_REGS_PS_EVENT_FIFO_FULL = 28,
+ CP_ME_REGS_CF_EVENT_FIFO_FULL = 29,
+ CP_ME_MICRO_RB_STARVED = 30,
+ CP_AHB_RBBM_DWORD_SENT = 40,
+ CP_ME_BUSY_CLOCKS = 41,
+ CP_ME_WAIT_CONTEXT_AVAIL = 42,
+ CP_PFP_TYPE0_PACKET = 43,
+ CP_PFP_TYPE3_PACKET = 44,
+ CP_CSF_RB_WPTR_NEQ_RPTR = 45,
+ CP_CSF_I1_SIZE_NEQ_ZERO = 46,
+ CP_CSF_I2_SIZE_NEQ_ZERO = 47,
+ CP_CSF_RBI1I2_FETCHING = 48,
+};
+
+enum a3xx_gras_tse_perfcounter_select {
+ GRAS_TSEPERF_INPUT_PRIM = 0,
+ GRAS_TSEPERF_INPUT_NULL_PRIM = 1,
+ GRAS_TSEPERF_TRIVAL_REJ_PRIM = 2,
+ GRAS_TSEPERF_CLIPPED_PRIM = 3,
+ GRAS_TSEPERF_NEW_PRIM = 4,
+ GRAS_TSEPERF_ZERO_AREA_PRIM = 5,
+ GRAS_TSEPERF_FACENESS_CULLED_PRIM = 6,
+ GRAS_TSEPERF_ZERO_PIXEL_PRIM = 7,
+ GRAS_TSEPERF_OUTPUT_NULL_PRIM = 8,
+ GRAS_TSEPERF_OUTPUT_VISIBLE_PRIM = 9,
+ GRAS_TSEPERF_PRE_CLIP_PRIM = 10,
+ GRAS_TSEPERF_POST_CLIP_PRIM = 11,
+ GRAS_TSEPERF_WORKING_CYCLES = 12,
+ GRAS_TSEPERF_PC_STARVE = 13,
+ GRAS_TSERASPERF_STALL = 14,
+};
+
+enum a3xx_gras_ras_perfcounter_select {
+ GRAS_RASPERF_16X16_TILES = 0,
+ GRAS_RASPERF_8X8_TILES = 1,
+ GRAS_RASPERF_4X4_TILES = 2,
+ GRAS_RASPERF_WORKING_CYCLES = 3,
+ GRAS_RASPERF_STALL_CYCLES_BY_RB = 4,
+ GRAS_RASPERF_STALL_CYCLES_BY_VSC = 5,
+ GRAS_RASPERF_STARVE_CYCLES_BY_TSE = 6,
+};
+
+enum a3xx_hlsq_perfcounter_select {
+ HLSQ_PERF_SP_VS_CONSTANT = 0,
+ HLSQ_PERF_SP_VS_INSTRUCTIONS = 1,
+ HLSQ_PERF_SP_FS_CONSTANT = 2,
+ HLSQ_PERF_SP_FS_INSTRUCTIONS = 3,
+ HLSQ_PERF_TP_STATE = 4,
+ HLSQ_PERF_QUADS = 5,
+ HLSQ_PERF_PIXELS = 6,
+ HLSQ_PERF_VERTICES = 7,
+ HLSQ_PERF_FS8_THREADS = 8,
+ HLSQ_PERF_FS16_THREADS = 9,
+ HLSQ_PERF_FS32_THREADS = 10,
+ HLSQ_PERF_VS8_THREADS = 11,
+ HLSQ_PERF_VS16_THREADS = 12,
+ HLSQ_PERF_SP_VS_DATA_BYTES = 13,
+ HLSQ_PERF_SP_FS_DATA_BYTES = 14,
+ HLSQ_PERF_ACTIVE_CYCLES = 15,
+ HLSQ_PERF_STALL_CYCLES_SP_STATE = 16,
+ HLSQ_PERF_STALL_CYCLES_SP_VS = 17,
+ HLSQ_PERF_STALL_CYCLES_SP_FS = 18,
+ HLSQ_PERF_STALL_CYCLES_UCHE = 19,
+ HLSQ_PERF_RBBM_LOAD_CYCLES = 20,
+ HLSQ_PERF_DI_TO_VS_START_SP0 = 21,
+ HLSQ_PERF_DI_TO_FS_START_SP0 = 22,
+ HLSQ_PERF_VS_START_TO_DONE_SP0 = 23,
+ HLSQ_PERF_FS_START_TO_DONE_SP0 = 24,
+ HLSQ_PERF_SP_STATE_COPY_CYCLES_VS = 25,
+ HLSQ_PERF_SP_STATE_COPY_CYCLES_FS = 26,
+ HLSQ_PERF_UCHE_LATENCY_CYCLES = 27,
+ HLSQ_PERF_UCHE_LATENCY_COUNT = 28,
+};
+
+enum a3xx_pc_perfcounter_select {
+ PC_PCPERF_VISIBILITY_STREAMS = 0,
+ PC_PCPERF_TOTAL_INSTANCES = 1,
+ PC_PCPERF_PRIMITIVES_PC_VPC = 2,
+ PC_PCPERF_PRIMITIVES_KILLED_BY_VS = 3,
+ PC_PCPERF_PRIMITIVES_VISIBLE_BY_VS = 4,
+ PC_PCPERF_DRAWCALLS_KILLED_BY_VS = 5,
+ PC_PCPERF_DRAWCALLS_VISIBLE_BY_VS = 6,
+ PC_PCPERF_VERTICES_TO_VFD = 7,
+ PC_PCPERF_REUSED_VERTICES = 8,
+ PC_PCPERF_CYCLES_STALLED_BY_VFD = 9,
+ PC_PCPERF_CYCLES_STALLED_BY_TSE = 10,
+ PC_PCPERF_CYCLES_STALLED_BY_VBIF = 11,
+ PC_PCPERF_CYCLES_IS_WORKING = 12,
+};
+
+enum a3xx_rb_perfcounter_select {
+ RB_RBPERF_ACTIVE_CYCLES_ANY = 0,
+ RB_RBPERF_ACTIVE_CYCLES_ALL = 1,
+ RB_RBPERF_STARVE_CYCLES_BY_SP = 2,
+ RB_RBPERF_STARVE_CYCLES_BY_RAS = 3,
+ RB_RBPERF_STARVE_CYCLES_BY_MARB = 4,
+ RB_RBPERF_STALL_CYCLES_BY_MARB = 5,
+ RB_RBPERF_STALL_CYCLES_BY_HLSQ = 6,
+ RB_RBPERF_RB_MARB_DATA = 7,
+ RB_RBPERF_SP_RB_QUAD = 8,
+ RB_RBPERF_RAS_EARLY_Z_QUADS = 9,
+ RB_RBPERF_GMEM_CH0_READ = 10,
+ RB_RBPERF_GMEM_CH1_READ = 11,
+ RB_RBPERF_GMEM_CH0_WRITE = 12,
+ RB_RBPERF_GMEM_CH1_WRITE = 13,
+ RB_RBPERF_CP_CONTEXT_DONE = 14,
+ RB_RBPERF_CP_CACHE_FLUSH = 15,
+ RB_RBPERF_CP_ZPASS_DONE = 16,
+};
+
+enum a3xx_rbbm_perfcounter_select {
+ RBBM_ALAWYS_ON = 0,
+ RBBM_VBIF_BUSY = 1,
+ RBBM_TSE_BUSY = 2,
+ RBBM_RAS_BUSY = 3,
+ RBBM_PC_DCALL_BUSY = 4,
+ RBBM_PC_VSD_BUSY = 5,
+ RBBM_VFD_BUSY = 6,
+ RBBM_VPC_BUSY = 7,
+ RBBM_UCHE_BUSY = 8,
+ RBBM_VSC_BUSY = 9,
+ RBBM_HLSQ_BUSY = 10,
+ RBBM_ANY_RB_BUSY = 11,
+ RBBM_ANY_TEX_BUSY = 12,
+ RBBM_ANY_USP_BUSY = 13,
+ RBBM_ANY_MARB_BUSY = 14,
+ RBBM_ANY_ARB_BUSY = 15,
+ RBBM_AHB_STATUS_BUSY = 16,
+ RBBM_AHB_STATUS_STALLED = 17,
+ RBBM_AHB_STATUS_TXFR = 18,
+ RBBM_AHB_STATUS_TXFR_SPLIT = 19,
+ RBBM_AHB_STATUS_TXFR_ERROR = 20,
+ RBBM_AHB_STATUS_LONG_STALL = 21,
+ RBBM_RBBM_STATUS_MASKED = 22,
+};
+
enum a3xx_sp_perfcounter_select {
+ SP_LM_LOAD_INSTRUCTIONS = 0,
+ SP_LM_STORE_INSTRUCTIONS = 1,
+ SP_LM_ATOMICS = 2,
+ SP_UCHE_LOAD_INSTRUCTIONS = 3,
+ SP_UCHE_STORE_INSTRUCTIONS = 4,
+ SP_UCHE_ATOMICS = 5,
+ SP_VS_TEX_INSTRUCTIONS = 6,
+ SP_VS_CFLOW_INSTRUCTIONS = 7,
+ SP_VS_EFU_INSTRUCTIONS = 8,
+ SP_VS_FULL_ALU_INSTRUCTIONS = 9,
+ SP_VS_HALF_ALU_INSTRUCTIONS = 10,
+ SP_FS_TEX_INSTRUCTIONS = 11,
SP_FS_CFLOW_INSTRUCTIONS = 12,
+ SP_FS_EFU_INSTRUCTIONS = 13,
SP_FS_FULL_ALU_INSTRUCTIONS = 14,
- SP0_ICL1_MISSES = 26,
+ SP_FS_HALF_ALU_INSTRUCTIONS = 15,
+ SP_FS_BARY_INSTRUCTIONS = 16,
+ SP_VS_INSTRUCTIONS = 17,
+ SP_FS_INSTRUCTIONS = 18,
+ SP_ADDR_LOCK_COUNT = 19,
+ SP_UCHE_READ_TRANS = 20,
+ SP_UCHE_WRITE_TRANS = 21,
+ SP_EXPORT_VPC_TRANS = 22,
+ SP_EXPORT_RB_TRANS = 23,
+ SP_PIXELS_KILLED = 24,
+ SP_ICL1_REQUESTS = 25,
+ SP_ICL1_MISSES = 26,
+ SP_ICL0_REQUESTS = 27,
+ SP_ICL0_MISSES = 28,
SP_ALU_ACTIVE_CYCLES = 29,
+ SP_EFU_ACTIVE_CYCLES = 30,
+ SP_STALL_CYCLES_BY_VPC = 31,
+ SP_STALL_CYCLES_BY_TP = 32,
+ SP_STALL_CYCLES_BY_UCHE = 33,
+ SP_STALL_CYCLES_BY_RB = 34,
+ SP_ACTIVE_CYCLES_ANY = 35,
+ SP_ACTIVE_CYCLES_ALL = 36,
+};
+
+enum a3xx_tp_perfcounter_select {
+ TPL1_TPPERF_L1_REQUESTS = 0,
+ TPL1_TPPERF_TP0_L1_REQUESTS = 1,
+ TPL1_TPPERF_TP0_L1_MISSES = 2,
+ TPL1_TPPERF_TP1_L1_REQUESTS = 3,
+ TPL1_TPPERF_TP1_L1_MISSES = 4,
+ TPL1_TPPERF_TP2_L1_REQUESTS = 5,
+ TPL1_TPPERF_TP2_L1_MISSES = 6,
+ TPL1_TPPERF_TP3_L1_REQUESTS = 7,
+ TPL1_TPPERF_TP3_L1_MISSES = 8,
+ TPL1_TPPERF_OUTPUT_TEXELS_POINT = 9,
+ TPL1_TPPERF_OUTPUT_TEXELS_BILINEAR = 10,
+ TPL1_TPPERF_OUTPUT_TEXELS_MIP = 11,
+ TPL1_TPPERF_OUTPUT_TEXELS_ANISO = 12,
+ TPL1_TPPERF_BILINEAR_OPS = 13,
+ TPL1_TPPERF_QUADSQUADS_OFFSET = 14,
+ TPL1_TPPERF_QUADQUADS_SHADOW = 15,
+ TPL1_TPPERF_QUADS_ARRAY = 16,
+ TPL1_TPPERF_QUADS_PROJECTION = 17,
+ TPL1_TPPERF_QUADS_GRADIENT = 18,
+ TPL1_TPPERF_QUADS_1D2D = 19,
+ TPL1_TPPERF_QUADS_3DCUBE = 20,
+ TPL1_TPPERF_ZERO_LOD = 21,
+ TPL1_TPPERF_OUTPUT_TEXELS = 22,
+ TPL1_TPPERF_ACTIVE_CYCLES_ANY = 23,
+ TPL1_TPPERF_ACTIVE_CYCLES_ALL = 24,
+ TPL1_TPPERF_STALL_CYCLES_BY_ARB = 25,
+ TPL1_TPPERF_LATENCY = 26,
+ TPL1_TPPERF_LATENCY_TRANS = 27,
};
-enum a3xx_rop_code {
- ROP_CLEAR = 0,
- ROP_NOR = 1,
- ROP_AND_INVERTED = 2,
- ROP_COPY_INVERTED = 3,
- ROP_AND_REVERSE = 4,
- ROP_INVERT = 5,
- ROP_XOR = 6,
- ROP_NAND = 7,
- ROP_AND = 8,
- ROP_EQUIV = 9,
- ROP_NOOP = 10,
- ROP_OR_INVERTED = 11,
- ROP_COPY = 12,
- ROP_OR_REVERSE = 13,
- ROP_OR = 14,
- ROP_SET = 15,
+enum a3xx_vfd_perfcounter_select {
+ VFD_PERF_UCHE_BYTE_FETCHED = 0,
+ VFD_PERF_UCHE_TRANS = 1,
+ VFD_PERF_VPC_BYPASS_COMPONENTS = 2,
+ VFD_PERF_FETCH_INSTRUCTIONS = 3,
+ VFD_PERF_DECODE_INSTRUCTIONS = 4,
+ VFD_PERF_ACTIVE_CYCLES = 5,
+ VFD_PERF_STALL_CYCLES_UCHE = 6,
+ VFD_PERF_STALL_CYCLES_HLSQ = 7,
+ VFD_PERF_STALL_CYCLES_VPC_BYPASS = 8,
+ VFD_PERF_STALL_CYCLES_VPC_ALLOC = 9,
+};
+
+enum a3xx_vpc_perfcounter_select {
+ VPC_PERF_SP_LM_PRIMITIVES = 0,
+ VPC_PERF_COMPONENTS_FROM_SP = 1,
+ VPC_PERF_SP_LM_COMPONENTS = 2,
+ VPC_PERF_ACTIVE_CYCLES = 3,
+ VPC_PERF_STALL_CYCLES_LM = 4,
+ VPC_PERF_STALL_CYCLES_RAS = 5,
+};
+
+enum a3xx_uche_perfcounter_select {
+ UCHE_UCHEPERF_VBIF_READ_BEATS_TP = 0,
+ UCHE_UCHEPERF_VBIF_READ_BEATS_VFD = 1,
+ UCHE_UCHEPERF_VBIF_READ_BEATS_HLSQ = 2,
+ UCHE_UCHEPERF_VBIF_READ_BEATS_MARB = 3,
+ UCHE_UCHEPERF_VBIF_READ_BEATS_SP = 4,
+ UCHE_UCHEPERF_READ_REQUESTS_TP = 8,
+ UCHE_UCHEPERF_READ_REQUESTS_VFD = 9,
+ UCHE_UCHEPERF_READ_REQUESTS_HLSQ = 10,
+ UCHE_UCHEPERF_READ_REQUESTS_MARB = 11,
+ UCHE_UCHEPERF_READ_REQUESTS_SP = 12,
+ UCHE_UCHEPERF_WRITE_REQUESTS_MARB = 13,
+ UCHE_UCHEPERF_WRITE_REQUESTS_SP = 14,
+ UCHE_UCHEPERF_TAG_CHECK_FAILS = 15,
+ UCHE_UCHEPERF_EVICTS = 16,
+ UCHE_UCHEPERF_FLUSHES = 17,
+ UCHE_UCHEPERF_VBIF_LATENCY_CYCLES = 18,
+ UCHE_UCHEPERF_VBIF_LATENCY_SAMPLES = 19,
+ UCHE_UCHEPERF_ACTIVE_CYCLES = 20,
};
enum a3xx_rb_blend_opcode {
@@ -1429,15 +1684,23 @@ static inline uint32_t A3XX_PC_PRIM_VTX_CNTL_POLYMODE_BACK_PTYPE(enum adreno_pa_
#define REG_A3XX_PC_RESTART_INDEX 0x000021ed
#define REG_A3XX_HLSQ_CONTROL_0_REG 0x00002200
-#define A3XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE__MASK 0x00000010
+#define A3XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE__MASK 0x00000030
#define A3XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE__SHIFT 4
static inline uint32_t A3XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE(enum a3xx_threadsize val)
{
return ((val) << A3XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE__SHIFT) & A3XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE__MASK;
}
#define A3XX_HLSQ_CONTROL_0_REG_FSSUPERTHREADENABLE 0x00000040
+#define A3XX_HLSQ_CONTROL_0_REG_COMPUTEMODE 0x00000100
#define A3XX_HLSQ_CONTROL_0_REG_SPSHADERRESTART 0x00000200
#define A3XX_HLSQ_CONTROL_0_REG_RESERVED2 0x00000400
+#define A3XX_HLSQ_CONTROL_0_REG_CYCLETIMEOUTLIMITVPC__MASK 0x00fff000
+#define A3XX_HLSQ_CONTROL_0_REG_CYCLETIMEOUTLIMITVPC__SHIFT 12
+static inline uint32_t A3XX_HLSQ_CONTROL_0_REG_CYCLETIMEOUTLIMITVPC(uint32_t val)
+{
+ return ((val) << A3XX_HLSQ_CONTROL_0_REG_CYCLETIMEOUTLIMITVPC__SHIFT) & A3XX_HLSQ_CONTROL_0_REG_CYCLETIMEOUTLIMITVPC__MASK;
+}
+#define A3XX_HLSQ_CONTROL_0_REG_FSONLYTEX 0x02000000
#define A3XX_HLSQ_CONTROL_0_REG_CHUNKDISABLE 0x04000000
#define A3XX_HLSQ_CONTROL_0_REG_CONSTMODE__MASK 0x08000000
#define A3XX_HLSQ_CONTROL_0_REG_CONSTMODE__SHIFT 27
@@ -1451,17 +1714,39 @@ static inline uint32_t A3XX_HLSQ_CONTROL_0_REG_CONSTMODE(uint32_t val)
#define A3XX_HLSQ_CONTROL_0_REG_SINGLECONTEXT 0x80000000
#define REG_A3XX_HLSQ_CONTROL_1_REG 0x00002201
-#define A3XX_HLSQ_CONTROL_1_REG_VSTHREADSIZE__MASK 0x00000040
+#define A3XX_HLSQ_CONTROL_1_REG_VSTHREADSIZE__MASK 0x000000c0
#define A3XX_HLSQ_CONTROL_1_REG_VSTHREADSIZE__SHIFT 6
static inline uint32_t A3XX_HLSQ_CONTROL_1_REG_VSTHREADSIZE(enum a3xx_threadsize val)
{
return ((val) << A3XX_HLSQ_CONTROL_1_REG_VSTHREADSIZE__SHIFT) & A3XX_HLSQ_CONTROL_1_REG_VSTHREADSIZE__MASK;
}
#define A3XX_HLSQ_CONTROL_1_REG_VSSUPERTHREADENABLE 0x00000100
-#define A3XX_HLSQ_CONTROL_1_REG_RESERVED1 0x00000200
-#define A3XX_HLSQ_CONTROL_1_REG_ZWCOORD 0x02000000
+#define A3XX_HLSQ_CONTROL_1_REG_FRAGCOORDXYREGID__MASK 0x00ff0000
+#define A3XX_HLSQ_CONTROL_1_REG_FRAGCOORDXYREGID__SHIFT 16
+static inline uint32_t A3XX_HLSQ_CONTROL_1_REG_FRAGCOORDXYREGID(uint32_t val)
+{
+ return ((val) << A3XX_HLSQ_CONTROL_1_REG_FRAGCOORDXYREGID__SHIFT) & A3XX_HLSQ_CONTROL_1_REG_FRAGCOORDXYREGID__MASK;
+}
+#define A3XX_HLSQ_CONTROL_1_REG_FRAGCOORDZWREGID__MASK 0xff000000
+#define A3XX_HLSQ_CONTROL_1_REG_FRAGCOORDZWREGID__SHIFT 24
+static inline uint32_t A3XX_HLSQ_CONTROL_1_REG_FRAGCOORDZWREGID(uint32_t val)
+{
+ return ((val) << A3XX_HLSQ_CONTROL_1_REG_FRAGCOORDZWREGID__SHIFT) & A3XX_HLSQ_CONTROL_1_REG_FRAGCOORDZWREGID__MASK;
+}
#define REG_A3XX_HLSQ_CONTROL_2_REG 0x00002202
+#define A3XX_HLSQ_CONTROL_2_REG_FACENESSREGID__MASK 0x000003fc
+#define A3XX_HLSQ_CONTROL_2_REG_FACENESSREGID__SHIFT 2
+static inline uint32_t A3XX_HLSQ_CONTROL_2_REG_FACENESSREGID(uint32_t val)
+{
+ return ((val) << A3XX_HLSQ_CONTROL_2_REG_FACENESSREGID__SHIFT) & A3XX_HLSQ_CONTROL_2_REG_FACENESSREGID__MASK;
+}
+#define A3XX_HLSQ_CONTROL_2_REG_COVVALUEREGID__MASK 0x03fc0000
+#define A3XX_HLSQ_CONTROL_2_REG_COVVALUEREGID__SHIFT 18
+static inline uint32_t A3XX_HLSQ_CONTROL_2_REG_COVVALUEREGID(uint32_t val)
+{
+ return ((val) << A3XX_HLSQ_CONTROL_2_REG_COVVALUEREGID__SHIFT) & A3XX_HLSQ_CONTROL_2_REG_COVVALUEREGID__MASK;
+}
#define A3XX_HLSQ_CONTROL_2_REG_PRIMALLOCTHRESHOLD__MASK 0xfc000000
#define A3XX_HLSQ_CONTROL_2_REG_PRIMALLOCTHRESHOLD__SHIFT 26
static inline uint32_t A3XX_HLSQ_CONTROL_2_REG_PRIMALLOCTHRESHOLD(uint32_t val)
@@ -1478,13 +1763,13 @@ static inline uint32_t A3XX_HLSQ_CONTROL_3_REG_REGID(uint32_t val)
}
#define REG_A3XX_HLSQ_VS_CONTROL_REG 0x00002204
-#define A3XX_HLSQ_VS_CONTROL_REG_CONSTLENGTH__MASK 0x00000fff
+#define A3XX_HLSQ_VS_CONTROL_REG_CONSTLENGTH__MASK 0x000003ff
#define A3XX_HLSQ_VS_CONTROL_REG_CONSTLENGTH__SHIFT 0
static inline uint32_t A3XX_HLSQ_VS_CONTROL_REG_CONSTLENGTH(uint32_t val)
{
return ((val) << A3XX_HLSQ_VS_CONTROL_REG_CONSTLENGTH__SHIFT) & A3XX_HLSQ_VS_CONTROL_REG_CONSTLENGTH__MASK;
}
-#define A3XX_HLSQ_VS_CONTROL_REG_CONSTSTARTOFFSET__MASK 0x00fff000
+#define A3XX_HLSQ_VS_CONTROL_REG_CONSTSTARTOFFSET__MASK 0x001ff000
#define A3XX_HLSQ_VS_CONTROL_REG_CONSTSTARTOFFSET__SHIFT 12
static inline uint32_t A3XX_HLSQ_VS_CONTROL_REG_CONSTSTARTOFFSET(uint32_t val)
{
@@ -1498,13 +1783,13 @@ static inline uint32_t A3XX_HLSQ_VS_CONTROL_REG_INSTRLENGTH(uint32_t val)
}
#define REG_A3XX_HLSQ_FS_CONTROL_REG 0x00002205
-#define A3XX_HLSQ_FS_CONTROL_REG_CONSTLENGTH__MASK 0x00000fff
+#define A3XX_HLSQ_FS_CONTROL_REG_CONSTLENGTH__MASK 0x000003ff
#define A3XX_HLSQ_FS_CONTROL_REG_CONSTLENGTH__SHIFT 0
static inline uint32_t A3XX_HLSQ_FS_CONTROL_REG_CONSTLENGTH(uint32_t val)
{
return ((val) << A3XX_HLSQ_FS_CONTROL_REG_CONSTLENGTH__SHIFT) & A3XX_HLSQ_FS_CONTROL_REG_CONSTLENGTH__MASK;
}
-#define A3XX_HLSQ_FS_CONTROL_REG_CONSTSTARTOFFSET__MASK 0x00fff000
+#define A3XX_HLSQ_FS_CONTROL_REG_CONSTSTARTOFFSET__MASK 0x001ff000
#define A3XX_HLSQ_FS_CONTROL_REG_CONSTSTARTOFFSET__SHIFT 12
static inline uint32_t A3XX_HLSQ_FS_CONTROL_REG_CONSTSTARTOFFSET(uint32_t val)
{
@@ -1518,13 +1803,13 @@ static inline uint32_t A3XX_HLSQ_FS_CONTROL_REG_INSTRLENGTH(uint32_t val)
}
#define REG_A3XX_HLSQ_CONST_VSPRESV_RANGE_REG 0x00002206
-#define A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_STARTENTRY__MASK 0x0000ffff
+#define A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_STARTENTRY__MASK 0x000001ff
#define A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_STARTENTRY__SHIFT 0
static inline uint32_t A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_STARTENTRY(uint32_t val)
{
return ((val) << A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_STARTENTRY__SHIFT) & A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_STARTENTRY__MASK;
}
-#define A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_ENDENTRY__MASK 0xffff0000
+#define A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_ENDENTRY__MASK 0x01ff0000
#define A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_ENDENTRY__SHIFT 16
static inline uint32_t A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_ENDENTRY(uint32_t val)
{
@@ -1532,13 +1817,13 @@ static inline uint32_t A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_ENDENTRY(uint32_t val)
}
#define REG_A3XX_HLSQ_CONST_FSPRESV_RANGE_REG 0x00002207
-#define A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_STARTENTRY__MASK 0x0000ffff
+#define A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_STARTENTRY__MASK 0x000001ff
#define A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_STARTENTRY__SHIFT 0
static inline uint32_t A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_STARTENTRY(uint32_t val)
{
return ((val) << A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_STARTENTRY__SHIFT) & A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_STARTENTRY__MASK;
}
-#define A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_ENDENTRY__MASK 0xffff0000
+#define A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_ENDENTRY__MASK 0x01ff0000
#define A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_ENDENTRY__SHIFT 16
static inline uint32_t A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_ENDENTRY(uint32_t val)
{
@@ -1620,12 +1905,24 @@ static inline uint32_t A3XX_VFD_CONTROL_0_STRMFETCHINSTRCNT(uint32_t val)
}
#define REG_A3XX_VFD_CONTROL_1 0x00002241
-#define A3XX_VFD_CONTROL_1_MAXSTORAGE__MASK 0x0000ffff
+#define A3XX_VFD_CONTROL_1_MAXSTORAGE__MASK 0x0000000f
#define A3XX_VFD_CONTROL_1_MAXSTORAGE__SHIFT 0
static inline uint32_t A3XX_VFD_CONTROL_1_MAXSTORAGE(uint32_t val)
{
return ((val) << A3XX_VFD_CONTROL_1_MAXSTORAGE__SHIFT) & A3XX_VFD_CONTROL_1_MAXSTORAGE__MASK;
}
+#define A3XX_VFD_CONTROL_1_MAXTHRESHOLD__MASK 0x000000f0
+#define A3XX_VFD_CONTROL_1_MAXTHRESHOLD__SHIFT 4
+static inline uint32_t A3XX_VFD_CONTROL_1_MAXTHRESHOLD(uint32_t val)
+{
+ return ((val) << A3XX_VFD_CONTROL_1_MAXTHRESHOLD__SHIFT) & A3XX_VFD_CONTROL_1_MAXTHRESHOLD__MASK;
+}
+#define A3XX_VFD_CONTROL_1_MINTHRESHOLD__MASK 0x00000f00
+#define A3XX_VFD_CONTROL_1_MINTHRESHOLD__SHIFT 8
+static inline uint32_t A3XX_VFD_CONTROL_1_MINTHRESHOLD(uint32_t val)
+{
+ return ((val) << A3XX_VFD_CONTROL_1_MINTHRESHOLD__SHIFT) & A3XX_VFD_CONTROL_1_MINTHRESHOLD__MASK;
+}
#define A3XX_VFD_CONTROL_1_REGID4VTX__MASK 0x00ff0000
#define A3XX_VFD_CONTROL_1_REGID4VTX__SHIFT 16
static inline uint32_t A3XX_VFD_CONTROL_1_REGID4VTX(uint32_t val)
@@ -2008,24 +2305,19 @@ static inline uint32_t A3XX_SP_VS_CTRL_REG0_INSTRBUFFERMODE(enum a3xx_instrbuffe
return ((val) << A3XX_SP_VS_CTRL_REG0_INSTRBUFFERMODE__SHIFT) & A3XX_SP_VS_CTRL_REG0_INSTRBUFFERMODE__MASK;
}
#define A3XX_SP_VS_CTRL_REG0_CACHEINVALID 0x00000004
+#define A3XX_SP_VS_CTRL_REG0_ALUSCHMODE 0x00000008
#define A3XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__MASK 0x000003f0
#define A3XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT 4
static inline uint32_t A3XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val)
{
return ((val) << A3XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A3XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__MASK;
}
-#define A3XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x0003fc00
+#define A3XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x0000fc00
#define A3XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 10
static inline uint32_t A3XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val)
{
return ((val) << A3XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A3XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__MASK;
}
-#define A3XX_SP_VS_CTRL_REG0_INOUTREGOVERLAP__MASK 0x000c0000
-#define A3XX_SP_VS_CTRL_REG0_INOUTREGOVERLAP__SHIFT 18
-static inline uint32_t A3XX_SP_VS_CTRL_REG0_INOUTREGOVERLAP(uint32_t val)
-{
- return ((val) << A3XX_SP_VS_CTRL_REG0_INOUTREGOVERLAP__SHIFT) & A3XX_SP_VS_CTRL_REG0_INOUTREGOVERLAP__MASK;
-}
#define A3XX_SP_VS_CTRL_REG0_THREADSIZE__MASK 0x00100000
#define A3XX_SP_VS_CTRL_REG0_THREADSIZE__SHIFT 20
static inline uint32_t A3XX_SP_VS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val)
@@ -2033,8 +2325,6 @@ static inline uint32_t A3XX_SP_VS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val)
return ((val) << A3XX_SP_VS_CTRL_REG0_THREADSIZE__SHIFT) & A3XX_SP_VS_CTRL_REG0_THREADSIZE__MASK;
}
#define A3XX_SP_VS_CTRL_REG0_SUPERTHREADMODE 0x00200000
-#define A3XX_SP_VS_CTRL_REG0_PIXLODENABLE 0x00400000
-#define A3XX_SP_VS_CTRL_REG0_COMPUTEMODE 0x00800000
#define A3XX_SP_VS_CTRL_REG0_LENGTH__MASK 0xff000000
#define A3XX_SP_VS_CTRL_REG0_LENGTH__SHIFT 24
static inline uint32_t A3XX_SP_VS_CTRL_REG0_LENGTH(uint32_t val)
@@ -2075,7 +2365,8 @@ static inline uint32_t A3XX_SP_VS_PARAM_REG_PSIZEREGID(uint32_t val)
{
return ((val) << A3XX_SP_VS_PARAM_REG_PSIZEREGID__SHIFT) & A3XX_SP_VS_PARAM_REG_PSIZEREGID__MASK;
}
-#define A3XX_SP_VS_PARAM_REG_TOTALVSOUTVAR__MASK 0xfff00000
+#define A3XX_SP_VS_PARAM_REG_POS2DMODE 0x00010000
+#define A3XX_SP_VS_PARAM_REG_TOTALVSOUTVAR__MASK 0x01f00000
#define A3XX_SP_VS_PARAM_REG_TOTALVSOUTVAR__SHIFT 20
static inline uint32_t A3XX_SP_VS_PARAM_REG_TOTALVSOUTVAR(uint32_t val)
{
@@ -2085,24 +2376,26 @@ static inline uint32_t A3XX_SP_VS_PARAM_REG_TOTALVSOUTVAR(uint32_t val)
static inline uint32_t REG_A3XX_SP_VS_OUT(uint32_t i0) { return 0x000022c7 + 0x1*i0; }
static inline uint32_t REG_A3XX_SP_VS_OUT_REG(uint32_t i0) { return 0x000022c7 + 0x1*i0; }
-#define A3XX_SP_VS_OUT_REG_A_REGID__MASK 0x000001ff
+#define A3XX_SP_VS_OUT_REG_A_REGID__MASK 0x000000ff
#define A3XX_SP_VS_OUT_REG_A_REGID__SHIFT 0
static inline uint32_t A3XX_SP_VS_OUT_REG_A_REGID(uint32_t val)
{
return ((val) << A3XX_SP_VS_OUT_REG_A_REGID__SHIFT) & A3XX_SP_VS_OUT_REG_A_REGID__MASK;
}
+#define A3XX_SP_VS_OUT_REG_A_HALF 0x00000100
#define A3XX_SP_VS_OUT_REG_A_COMPMASK__MASK 0x00001e00
#define A3XX_SP_VS_OUT_REG_A_COMPMASK__SHIFT 9
static inline uint32_t A3XX_SP_VS_OUT_REG_A_COMPMASK(uint32_t val)
{
return ((val) << A3XX_SP_VS_OUT_REG_A_COMPMASK__SHIFT) & A3XX_SP_VS_OUT_REG_A_COMPMASK__MASK;
}
-#define A3XX_SP_VS_OUT_REG_B_REGID__MASK 0x01ff0000
+#define A3XX_SP_VS_OUT_REG_B_REGID__MASK 0x00ff0000
#define A3XX_SP_VS_OUT_REG_B_REGID__SHIFT 16
static inline uint32_t A3XX_SP_VS_OUT_REG_B_REGID(uint32_t val)
{
return ((val) << A3XX_SP_VS_OUT_REG_B_REGID__SHIFT) & A3XX_SP_VS_OUT_REG_B_REGID__MASK;
}
+#define A3XX_SP_VS_OUT_REG_B_HALF 0x01000000
#define A3XX_SP_VS_OUT_REG_B_COMPMASK__MASK 0x1e000000
#define A3XX_SP_VS_OUT_REG_B_COMPMASK__SHIFT 25
static inline uint32_t A3XX_SP_VS_OUT_REG_B_COMPMASK(uint32_t val)
@@ -2113,25 +2406,25 @@ static inline uint32_t A3XX_SP_VS_OUT_REG_B_COMPMASK(uint32_t val)
static inline uint32_t REG_A3XX_SP_VS_VPC_DST(uint32_t i0) { return 0x000022d0 + 0x1*i0; }
static inline uint32_t REG_A3XX_SP_VS_VPC_DST_REG(uint32_t i0) { return 0x000022d0 + 0x1*i0; }
-#define A3XX_SP_VS_VPC_DST_REG_OUTLOC0__MASK 0x000000ff
+#define A3XX_SP_VS_VPC_DST_REG_OUTLOC0__MASK 0x0000007f
#define A3XX_SP_VS_VPC_DST_REG_OUTLOC0__SHIFT 0
static inline uint32_t A3XX_SP_VS_VPC_DST_REG_OUTLOC0(uint32_t val)
{
return ((val) << A3XX_SP_VS_VPC_DST_REG_OUTLOC0__SHIFT) & A3XX_SP_VS_VPC_DST_REG_OUTLOC0__MASK;
}
-#define A3XX_SP_VS_VPC_DST_REG_OUTLOC1__MASK 0x0000ff00
+#define A3XX_SP_VS_VPC_DST_REG_OUTLOC1__MASK 0x00007f00
#define A3XX_SP_VS_VPC_DST_REG_OUTLOC1__SHIFT 8
static inline uint32_t A3XX_SP_VS_VPC_DST_REG_OUTLOC1(uint32_t val)
{
return ((val) << A3XX_SP_VS_VPC_DST_REG_OUTLOC1__SHIFT) & A3XX_SP_VS_VPC_DST_REG_OUTLOC1__MASK;
}
-#define A3XX_SP_VS_VPC_DST_REG_OUTLOC2__MASK 0x00ff0000
+#define A3XX_SP_VS_VPC_DST_REG_OUTLOC2__MASK 0x007f0000
#define A3XX_SP_VS_VPC_DST_REG_OUTLOC2__SHIFT 16
static inline uint32_t A3XX_SP_VS_VPC_DST_REG_OUTLOC2(uint32_t val)
{
return ((val) << A3XX_SP_VS_VPC_DST_REG_OUTLOC2__SHIFT) & A3XX_SP_VS_VPC_DST_REG_OUTLOC2__MASK;
}
-#define A3XX_SP_VS_VPC_DST_REG_OUTLOC3__MASK 0xff000000
+#define A3XX_SP_VS_VPC_DST_REG_OUTLOC3__MASK 0x7f000000
#define A3XX_SP_VS_VPC_DST_REG_OUTLOC3__SHIFT 24
static inline uint32_t A3XX_SP_VS_VPC_DST_REG_OUTLOC3(uint32_t val)
{
@@ -2139,6 +2432,12 @@ static inline uint32_t A3XX_SP_VS_VPC_DST_REG_OUTLOC3(uint32_t val)
}
#define REG_A3XX_SP_VS_OBJ_OFFSET_REG 0x000022d4
+#define A3XX_SP_VS_OBJ_OFFSET_REG_FIRSTEXECINSTROFFSET__MASK 0x0000ffff
+#define A3XX_SP_VS_OBJ_OFFSET_REG_FIRSTEXECINSTROFFSET__SHIFT 0
+static inline uint32_t A3XX_SP_VS_OBJ_OFFSET_REG_FIRSTEXECINSTROFFSET(uint32_t val)
+{
+ return ((val) << A3XX_SP_VS_OBJ_OFFSET_REG_FIRSTEXECINSTROFFSET__SHIFT) & A3XX_SP_VS_OBJ_OFFSET_REG_FIRSTEXECINSTROFFSET__MASK;
+}
#define A3XX_SP_VS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK 0x01ff0000
#define A3XX_SP_VS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__SHIFT 16
static inline uint32_t A3XX_SP_VS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET(uint32_t val)
@@ -2155,8 +2454,38 @@ static inline uint32_t A3XX_SP_VS_OBJ_OFFSET_REG_SHADEROBJOFFSET(uint32_t val)
#define REG_A3XX_SP_VS_OBJ_START_REG 0x000022d5
#define REG_A3XX_SP_VS_PVT_MEM_PARAM_REG 0x000022d6
+#define A3XX_SP_VS_PVT_MEM_PARAM_REG_MEMSIZEPERITEM__MASK 0x000000ff
+#define A3XX_SP_VS_PVT_MEM_PARAM_REG_MEMSIZEPERITEM__SHIFT 0
+static inline uint32_t A3XX_SP_VS_PVT_MEM_PARAM_REG_MEMSIZEPERITEM(uint32_t val)
+{
+ return ((val) << A3XX_SP_VS_PVT_MEM_PARAM_REG_MEMSIZEPERITEM__SHIFT) & A3XX_SP_VS_PVT_MEM_PARAM_REG_MEMSIZEPERITEM__MASK;
+}
+#define A3XX_SP_VS_PVT_MEM_PARAM_REG_HWSTACKOFFSET__MASK 0x00ffff00
+#define A3XX_SP_VS_PVT_MEM_PARAM_REG_HWSTACKOFFSET__SHIFT 8
+static inline uint32_t A3XX_SP_VS_PVT_MEM_PARAM_REG_HWSTACKOFFSET(uint32_t val)
+{
+ return ((val) << A3XX_SP_VS_PVT_MEM_PARAM_REG_HWSTACKOFFSET__SHIFT) & A3XX_SP_VS_PVT_MEM_PARAM_REG_HWSTACKOFFSET__MASK;
+}
+#define A3XX_SP_VS_PVT_MEM_PARAM_REG_HWSTACKSIZEPERTHREAD__MASK 0xff000000
+#define A3XX_SP_VS_PVT_MEM_PARAM_REG_HWSTACKSIZEPERTHREAD__SHIFT 24
+static inline uint32_t A3XX_SP_VS_PVT_MEM_PARAM_REG_HWSTACKSIZEPERTHREAD(uint32_t val)
+{
+ return ((val) << A3XX_SP_VS_PVT_MEM_PARAM_REG_HWSTACKSIZEPERTHREAD__SHIFT) & A3XX_SP_VS_PVT_MEM_PARAM_REG_HWSTACKSIZEPERTHREAD__MASK;
+}
#define REG_A3XX_SP_VS_PVT_MEM_ADDR_REG 0x000022d7
+#define A3XX_SP_VS_PVT_MEM_ADDR_REG_BURSTLEN__MASK 0x0000001f
+#define A3XX_SP_VS_PVT_MEM_ADDR_REG_BURSTLEN__SHIFT 0
+static inline uint32_t A3XX_SP_VS_PVT_MEM_ADDR_REG_BURSTLEN(uint32_t val)
+{
+ return ((val) << A3XX_SP_VS_PVT_MEM_ADDR_REG_BURSTLEN__SHIFT) & A3XX_SP_VS_PVT_MEM_ADDR_REG_BURSTLEN__MASK;
+}
+#define A3XX_SP_VS_PVT_MEM_ADDR_REG_SHADERSTARTADDRESS__MASK 0xffffffe0
+#define A3XX_SP_VS_PVT_MEM_ADDR_REG_SHADERSTARTADDRESS__SHIFT 5
+static inline uint32_t A3XX_SP_VS_PVT_MEM_ADDR_REG_SHADERSTARTADDRESS(uint32_t val)
+{
+ return ((val >> 5) << A3XX_SP_VS_PVT_MEM_ADDR_REG_SHADERSTARTADDRESS__SHIFT) & A3XX_SP_VS_PVT_MEM_ADDR_REG_SHADERSTARTADDRESS__MASK;
+}
#define REG_A3XX_SP_VS_PVT_MEM_SIZE_REG 0x000022d8
@@ -2182,24 +2511,22 @@ static inline uint32_t A3XX_SP_FS_CTRL_REG0_INSTRBUFFERMODE(enum a3xx_instrbuffe
return ((val) << A3XX_SP_FS_CTRL_REG0_INSTRBUFFERMODE__SHIFT) & A3XX_SP_FS_CTRL_REG0_INSTRBUFFERMODE__MASK;
}
#define A3XX_SP_FS_CTRL_REG0_CACHEINVALID 0x00000004
+#define A3XX_SP_FS_CTRL_REG0_ALUSCHMODE 0x00000008
#define A3XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__MASK 0x000003f0
#define A3XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT 4
static inline uint32_t A3XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val)
{
return ((val) << A3XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A3XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__MASK;
}
-#define A3XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x0003fc00
+#define A3XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x0000fc00
#define A3XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 10
static inline uint32_t A3XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val)
{
return ((val) << A3XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A3XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__MASK;
}
-#define A3XX_SP_FS_CTRL_REG0_INOUTREGOVERLAP__MASK 0x000c0000
-#define A3XX_SP_FS_CTRL_REG0_INOUTREGOVERLAP__SHIFT 18
-static inline uint32_t A3XX_SP_FS_CTRL_REG0_INOUTREGOVERLAP(uint32_t val)
-{
- return ((val) << A3XX_SP_FS_CTRL_REG0_INOUTREGOVERLAP__SHIFT) & A3XX_SP_FS_CTRL_REG0_INOUTREGOVERLAP__MASK;
-}
+#define A3XX_SP_FS_CTRL_REG0_FSBYPASSENABLE 0x00020000
+#define A3XX_SP_FS_CTRL_REG0_INOUTREGOVERLAP 0x00040000
+#define A3XX_SP_FS_CTRL_REG0_OUTORDERED 0x00080000
#define A3XX_SP_FS_CTRL_REG0_THREADSIZE__MASK 0x00100000
#define A3XX_SP_FS_CTRL_REG0_THREADSIZE__SHIFT 20
static inline uint32_t A3XX_SP_FS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val)
@@ -2235,7 +2562,7 @@ static inline uint32_t A3XX_SP_FS_CTRL_REG1_INITIALOUTSTANDING(uint32_t val)
{
return ((val) << A3XX_SP_FS_CTRL_REG1_INITIALOUTSTANDING__SHIFT) & A3XX_SP_FS_CTRL_REG1_INITIALOUTSTANDING__MASK;
}
-#define A3XX_SP_FS_CTRL_REG1_HALFPRECVAROFFSET__MASK 0x3f000000
+#define A3XX_SP_FS_CTRL_REG1_HALFPRECVAROFFSET__MASK 0x7f000000
#define A3XX_SP_FS_CTRL_REG1_HALFPRECVAROFFSET__SHIFT 24
static inline uint32_t A3XX_SP_FS_CTRL_REG1_HALFPRECVAROFFSET(uint32_t val)
{
@@ -2243,6 +2570,12 @@ static inline uint32_t A3XX_SP_FS_CTRL_REG1_HALFPRECVAROFFSET(uint32_t val)
}
#define REG_A3XX_SP_FS_OBJ_OFFSET_REG 0x000022e2
+#define A3XX_SP_FS_OBJ_OFFSET_REG_FIRSTEXECINSTROFFSET__MASK 0x0000ffff
+#define A3XX_SP_FS_OBJ_OFFSET_REG_FIRSTEXECINSTROFFSET__SHIFT 0
+static inline uint32_t A3XX_SP_FS_OBJ_OFFSET_REG_FIRSTEXECINSTROFFSET(uint32_t val)
+{
+ return ((val) << A3XX_SP_FS_OBJ_OFFSET_REG_FIRSTEXECINSTROFFSET__SHIFT) & A3XX_SP_FS_OBJ_OFFSET_REG_FIRSTEXECINSTROFFSET__MASK;
+}
#define A3XX_SP_FS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK 0x01ff0000
#define A3XX_SP_FS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__SHIFT 16
static inline uint32_t A3XX_SP_FS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET(uint32_t val)
@@ -2259,8 +2592,38 @@ static inline uint32_t A3XX_SP_FS_OBJ_OFFSET_REG_SHADEROBJOFFSET(uint32_t val)
#define REG_A3XX_SP_FS_OBJ_START_REG 0x000022e3
#define REG_A3XX_SP_FS_PVT_MEM_PARAM_REG 0x000022e4
+#define A3XX_SP_FS_PVT_MEM_PARAM_REG_MEMSIZEPERITEM__MASK 0x000000ff
+#define A3XX_SP_FS_PVT_MEM_PARAM_REG_MEMSIZEPERITEM__SHIFT 0
+static inline uint32_t A3XX_SP_FS_PVT_MEM_PARAM_REG_MEMSIZEPERITEM(uint32_t val)
+{
+ return ((val) << A3XX_SP_FS_PVT_MEM_PARAM_REG_MEMSIZEPERITEM__SHIFT) & A3XX_SP_FS_PVT_MEM_PARAM_REG_MEMSIZEPERITEM__MASK;
+}
+#define A3XX_SP_FS_PVT_MEM_PARAM_REG_HWSTACKOFFSET__MASK 0x00ffff00
+#define A3XX_SP_FS_PVT_MEM_PARAM_REG_HWSTACKOFFSET__SHIFT 8
+static inline uint32_t A3XX_SP_FS_PVT_MEM_PARAM_REG_HWSTACKOFFSET(uint32_t val)
+{
+ return ((val) << A3XX_SP_FS_PVT_MEM_PARAM_REG_HWSTACKOFFSET__SHIFT) & A3XX_SP_FS_PVT_MEM_PARAM_REG_HWSTACKOFFSET__MASK;
+}
+#define A3XX_SP_FS_PVT_MEM_PARAM_REG_HWSTACKSIZEPERTHREAD__MASK 0xff000000
+#define A3XX_SP_FS_PVT_MEM_PARAM_REG_HWSTACKSIZEPERTHREAD__SHIFT 24
+static inline uint32_t A3XX_SP_FS_PVT_MEM_PARAM_REG_HWSTACKSIZEPERTHREAD(uint32_t val)
+{
+ return ((val) << A3XX_SP_FS_PVT_MEM_PARAM_REG_HWSTACKSIZEPERTHREAD__SHIFT) & A3XX_SP_FS_PVT_MEM_PARAM_REG_HWSTACKSIZEPERTHREAD__MASK;
+}
#define REG_A3XX_SP_FS_PVT_MEM_ADDR_REG 0x000022e5
+#define A3XX_SP_FS_PVT_MEM_ADDR_REG_BURSTLEN__MASK 0x0000001f
+#define A3XX_SP_FS_PVT_MEM_ADDR_REG_BURSTLEN__SHIFT 0
+static inline uint32_t A3XX_SP_FS_PVT_MEM_ADDR_REG_BURSTLEN(uint32_t val)
+{
+ return ((val) << A3XX_SP_FS_PVT_MEM_ADDR_REG_BURSTLEN__SHIFT) & A3XX_SP_FS_PVT_MEM_ADDR_REG_BURSTLEN__MASK;
+}
+#define A3XX_SP_FS_PVT_MEM_ADDR_REG_SHADERSTARTADDRESS__MASK 0xffffffe0
+#define A3XX_SP_FS_PVT_MEM_ADDR_REG_SHADERSTARTADDRESS__SHIFT 5
+static inline uint32_t A3XX_SP_FS_PVT_MEM_ADDR_REG_SHADERSTARTADDRESS(uint32_t val)
+{
+ return ((val >> 5) << A3XX_SP_FS_PVT_MEM_ADDR_REG_SHADERSTARTADDRESS__SHIFT) & A3XX_SP_FS_PVT_MEM_ADDR_REG_SHADERSTARTADDRESS__MASK;
+}
#define REG_A3XX_SP_FS_PVT_MEM_SIZE_REG 0x000022e6
diff --git a/drivers/gpu/drm/msm/adreno/a4xx.xml.h b/drivers/gpu/drm/msm/adreno/a4xx.xml.h
index 99de8271dba8..3220b91f559a 100644
--- a/drivers/gpu/drm/msm/adreno/a4xx.xml.h
+++ b/drivers/gpu/drm/msm/adreno/a4xx.xml.h
@@ -9,16 +9,17 @@ git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 398 bytes, from 2015-09-24 17:25:31)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2016-02-10 17:07:21)
- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2015-05-20 20:03:14)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 10755 bytes, from 2015-09-14 20:46:55)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 14968 bytes, from 2015-05-20 20:12:27)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 67771 bytes, from 2015-09-14 20:46:55)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 63970 bytes, from 2015-09-14 20:50:12)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 11518 bytes, from 2016-02-10 21:03:25)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 16166 bytes, from 2016-02-11 21:20:31)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 83967 bytes, from 2016-02-10 17:07:21)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 109916 bytes, from 2016-02-20 18:44:48)
- /home/robclark/src/freedreno/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2015-09-24 17:30:00)
-Copyright (C) 2013-2015 by the following authors:
+Copyright (C) 2013-2016 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
+- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
@@ -47,11 +48,13 @@ enum a4xx_color_fmt {
RB4_R8_UNORM = 2,
RB4_R4G4B4A4_UNORM = 8,
RB4_R5G5B5A1_UNORM = 10,
- RB4_R5G6R5_UNORM = 14,
+ RB4_R5G6B5_UNORM = 14,
RB4_R8G8_UNORM = 15,
RB4_R8G8_SNORM = 16,
RB4_R8G8_UINT = 17,
RB4_R8G8_SINT = 18,
+ RB4_R16_UNORM = 19,
+ RB4_R16_SNORM = 20,
RB4_R16_FLOAT = 21,
RB4_R16_UINT = 22,
RB4_R16_SINT = 23,
@@ -63,12 +66,16 @@ enum a4xx_color_fmt {
RB4_R10G10B10A2_UNORM = 31,
RB4_R10G10B10A2_UINT = 34,
RB4_R11G11B10_FLOAT = 39,
+ RB4_R16G16_UNORM = 40,
+ RB4_R16G16_SNORM = 41,
RB4_R16G16_FLOAT = 42,
RB4_R16G16_UINT = 43,
RB4_R16G16_SINT = 44,
RB4_R32_FLOAT = 45,
RB4_R32_UINT = 46,
RB4_R32_SINT = 47,
+ RB4_R16G16B16A16_UNORM = 52,
+ RB4_R16G16B16A16_SNORM = 53,
RB4_R16G16B16A16_FLOAT = 54,
RB4_R16G16B16A16_UINT = 55,
RB4_R16G16B16A16_SINT = 56,
@@ -106,6 +113,7 @@ enum a4xx_vtx_fmt {
VFMT4_32_32_FIXED = 10,
VFMT4_32_32_32_FIXED = 11,
VFMT4_32_32_32_32_FIXED = 12,
+ VFMT4_11_11_10_FLOAT = 13,
VFMT4_16_SINT = 16,
VFMT4_16_16_SINT = 17,
VFMT4_16_16_16_SINT = 18,
@@ -146,52 +154,76 @@ enum a4xx_vtx_fmt {
VFMT4_8_8_SNORM = 53,
VFMT4_8_8_8_SNORM = 54,
VFMT4_8_8_8_8_SNORM = 55,
- VFMT4_10_10_10_2_UINT = 60,
- VFMT4_10_10_10_2_UNORM = 61,
- VFMT4_10_10_10_2_SINT = 62,
- VFMT4_10_10_10_2_SNORM = 63,
+ VFMT4_10_10_10_2_UINT = 56,
+ VFMT4_10_10_10_2_UNORM = 57,
+ VFMT4_10_10_10_2_SINT = 58,
+ VFMT4_10_10_10_2_SNORM = 59,
+ VFMT4_2_10_10_10_UINT = 60,
+ VFMT4_2_10_10_10_UNORM = 61,
+ VFMT4_2_10_10_10_SINT = 62,
+ VFMT4_2_10_10_10_SNORM = 63,
};
enum a4xx_tex_fmt {
- TFMT4_5_6_5_UNORM = 11,
- TFMT4_5_5_5_1_UNORM = 10,
- TFMT4_4_4_4_4_UNORM = 8,
- TFMT4_X8Z24_UNORM = 71,
- TFMT4_10_10_10_2_UNORM = 33,
TFMT4_A8_UNORM = 3,
- TFMT4_L8_A8_UNORM = 13,
TFMT4_8_UNORM = 4,
- TFMT4_8_8_UNORM = 14,
- TFMT4_8_8_8_8_UNORM = 28,
TFMT4_8_SNORM = 5,
- TFMT4_8_8_SNORM = 15,
- TFMT4_8_8_8_8_SNORM = 29,
TFMT4_8_UINT = 6,
- TFMT4_8_8_UINT = 16,
- TFMT4_8_8_8_8_UINT = 30,
TFMT4_8_SINT = 7,
+ TFMT4_4_4_4_4_UNORM = 8,
+ TFMT4_5_5_5_1_UNORM = 9,
+ TFMT4_5_6_5_UNORM = 11,
+ TFMT4_L8_A8_UNORM = 13,
+ TFMT4_8_8_UNORM = 14,
+ TFMT4_8_8_SNORM = 15,
+ TFMT4_8_8_UINT = 16,
TFMT4_8_8_SINT = 17,
- TFMT4_8_8_8_8_SINT = 31,
+ TFMT4_16_UNORM = 18,
+ TFMT4_16_SNORM = 19,
+ TFMT4_16_FLOAT = 20,
TFMT4_16_UINT = 21,
- TFMT4_16_16_UINT = 41,
- TFMT4_16_16_16_16_UINT = 54,
TFMT4_16_SINT = 22,
+ TFMT4_8_8_8_8_UNORM = 28,
+ TFMT4_8_8_8_8_SNORM = 29,
+ TFMT4_8_8_8_8_UINT = 30,
+ TFMT4_8_8_8_8_SINT = 31,
+ TFMT4_9_9_9_E5_FLOAT = 32,
+ TFMT4_10_10_10_2_UNORM = 33,
+ TFMT4_10_10_10_2_UINT = 34,
+ TFMT4_11_11_10_FLOAT = 37,
+ TFMT4_16_16_UNORM = 38,
+ TFMT4_16_16_SNORM = 39,
+ TFMT4_16_16_FLOAT = 40,
+ TFMT4_16_16_UINT = 41,
TFMT4_16_16_SINT = 42,
- TFMT4_16_16_16_16_SINT = 55,
+ TFMT4_32_FLOAT = 43,
TFMT4_32_UINT = 44,
- TFMT4_32_32_UINT = 57,
- TFMT4_32_32_32_32_UINT = 64,
TFMT4_32_SINT = 45,
- TFMT4_32_32_SINT = 58,
- TFMT4_32_32_32_32_SINT = 65,
- TFMT4_16_FLOAT = 20,
- TFMT4_16_16_FLOAT = 40,
+ TFMT4_16_16_16_16_UNORM = 51,
+ TFMT4_16_16_16_16_SNORM = 52,
TFMT4_16_16_16_16_FLOAT = 53,
- TFMT4_32_FLOAT = 43,
+ TFMT4_16_16_16_16_UINT = 54,
+ TFMT4_16_16_16_16_SINT = 55,
TFMT4_32_32_FLOAT = 56,
+ TFMT4_32_32_UINT = 57,
+ TFMT4_32_32_SINT = 58,
+ TFMT4_32_32_32_FLOAT = 59,
+ TFMT4_32_32_32_UINT = 60,
+ TFMT4_32_32_32_SINT = 61,
TFMT4_32_32_32_32_FLOAT = 63,
- TFMT4_9_9_9_E5_FLOAT = 32,
- TFMT4_11_11_10_FLOAT = 37,
+ TFMT4_32_32_32_32_UINT = 64,
+ TFMT4_32_32_32_32_SINT = 65,
+ TFMT4_X8Z24_UNORM = 71,
+ TFMT4_DXT1 = 86,
+ TFMT4_DXT3 = 87,
+ TFMT4_DXT5 = 88,
+ TFMT4_RGTC1_UNORM = 90,
+ TFMT4_RGTC1_SNORM = 91,
+ TFMT4_RGTC2_UNORM = 94,
+ TFMT4_RGTC2_SNORM = 95,
+ TFMT4_BPTC_UFLOAT = 97,
+ TFMT4_BPTC_FLOAT = 98,
+ TFMT4_BPTC = 99,
TFMT4_ATC_RGB = 100,
TFMT4_ATC_RGBA_EXPLICIT = 101,
TFMT4_ATC_RGBA_INTERPOLATED = 102,
@@ -240,6 +272,545 @@ enum a4xx_tess_spacing {
EVEN_SPACING = 3,
};
+enum a4xx_ccu_perfcounter_select {
+ CCU_BUSY_CYCLES = 0,
+ CCU_RB_DEPTH_RETURN_STALL = 2,
+ CCU_RB_COLOR_RETURN_STALL = 3,
+ CCU_DEPTH_BLOCKS = 6,
+ CCU_COLOR_BLOCKS = 7,
+ CCU_DEPTH_BLOCK_HIT = 8,
+ CCU_COLOR_BLOCK_HIT = 9,
+ CCU_DEPTH_FLAG1_COUNT = 10,
+ CCU_DEPTH_FLAG2_COUNT = 11,
+ CCU_DEPTH_FLAG3_COUNT = 12,
+ CCU_DEPTH_FLAG4_COUNT = 13,
+ CCU_COLOR_FLAG1_COUNT = 14,
+ CCU_COLOR_FLAG2_COUNT = 15,
+ CCU_COLOR_FLAG3_COUNT = 16,
+ CCU_COLOR_FLAG4_COUNT = 17,
+ CCU_PARTIAL_BLOCK_READ = 18,
+};
+
+enum a4xx_cp_perfcounter_select {
+ CP_ALWAYS_COUNT = 0,
+ CP_BUSY = 1,
+ CP_PFP_IDLE = 2,
+ CP_PFP_BUSY_WORKING = 3,
+ CP_PFP_STALL_CYCLES_ANY = 4,
+ CP_PFP_STARVE_CYCLES_ANY = 5,
+ CP_PFP_STARVED_PER_LOAD_ADDR = 6,
+ CP_PFP_STALLED_PER_STORE_ADDR = 7,
+ CP_PFP_PC_PROFILE = 8,
+ CP_PFP_MATCH_PM4_PKT_PROFILE = 9,
+ CP_PFP_COND_INDIRECT_DISCARDED = 10,
+ CP_LONG_RESUMPTIONS = 11,
+ CP_RESUME_CYCLES = 12,
+ CP_RESUME_TO_BOUNDARY_CYCLES = 13,
+ CP_LONG_PREEMPTIONS = 14,
+ CP_PREEMPT_CYCLES = 15,
+ CP_PREEMPT_TO_BOUNDARY_CYCLES = 16,
+ CP_ME_FIFO_EMPTY_PFP_IDLE = 17,
+ CP_ME_FIFO_EMPTY_PFP_BUSY = 18,
+ CP_ME_FIFO_NOT_EMPTY_NOT_FULL = 19,
+ CP_ME_FIFO_FULL_ME_BUSY = 20,
+ CP_ME_FIFO_FULL_ME_NON_WORKING = 21,
+ CP_ME_WAITING_FOR_PACKETS = 22,
+ CP_ME_BUSY_WORKING = 23,
+ CP_ME_STARVE_CYCLES_ANY = 24,
+ CP_ME_STARVE_CYCLES_PER_PROFILE = 25,
+ CP_ME_STALL_CYCLES_PER_PROFILE = 26,
+ CP_ME_PC_PROFILE = 27,
+ CP_RCIU_FIFO_EMPTY = 28,
+ CP_RCIU_FIFO_NOT_EMPTY_NOT_FULL = 29,
+ CP_RCIU_FIFO_FULL = 30,
+ CP_RCIU_FIFO_FULL_NO_CONTEXT = 31,
+ CP_RCIU_FIFO_FULL_AHB_MASTER = 32,
+ CP_RCIU_FIFO_FULL_OTHER = 33,
+ CP_AHB_IDLE = 34,
+ CP_AHB_STALL_ON_GRANT_NO_SPLIT = 35,
+ CP_AHB_STALL_ON_GRANT_SPLIT = 36,
+ CP_AHB_STALL_ON_GRANT_SPLIT_PROFILE = 37,
+ CP_AHB_BUSY_WORKING = 38,
+ CP_AHB_BUSY_STALL_ON_HRDY = 39,
+ CP_AHB_BUSY_STALL_ON_HRDY_PROFILE = 40,
+};
+
+enum a4xx_gras_ras_perfcounter_select {
+ RAS_SUPER_TILES = 0,
+ RAS_8X8_TILES = 1,
+ RAS_4X4_TILES = 2,
+ RAS_BUSY_CYCLES = 3,
+ RAS_STALL_CYCLES_BY_RB = 4,
+ RAS_STALL_CYCLES_BY_VSC = 5,
+ RAS_STARVE_CYCLES_BY_TSE = 6,
+ RAS_SUPERTILE_CYCLES = 7,
+ RAS_TILE_CYCLES = 8,
+ RAS_FULLY_COVERED_SUPER_TILES = 9,
+ RAS_FULLY_COVERED_8X8_TILES = 10,
+ RAS_4X4_PRIM = 11,
+ RAS_8X4_4X8_PRIM = 12,
+ RAS_8X8_PRIM = 13,
+};
+
+enum a4xx_gras_tse_perfcounter_select {
+ TSE_INPUT_PRIM = 0,
+ TSE_INPUT_NULL_PRIM = 1,
+ TSE_TRIVAL_REJ_PRIM = 2,
+ TSE_CLIPPED_PRIM = 3,
+ TSE_NEW_PRIM = 4,
+ TSE_ZERO_AREA_PRIM = 5,
+ TSE_FACENESS_CULLED_PRIM = 6,
+ TSE_ZERO_PIXEL_PRIM = 7,
+ TSE_OUTPUT_NULL_PRIM = 8,
+ TSE_OUTPUT_VISIBLE_PRIM = 9,
+ TSE_PRE_CLIP_PRIM = 10,
+ TSE_POST_CLIP_PRIM = 11,
+ TSE_BUSY_CYCLES = 12,
+ TSE_PC_STARVE = 13,
+ TSE_RAS_STALL = 14,
+ TSE_STALL_BARYPLANE_FIFO_FULL = 15,
+ TSE_STALL_ZPLANE_FIFO_FULL = 16,
+};
+
+enum a4xx_hlsq_perfcounter_select {
+ HLSQ_SP_VS_STAGE_CONSTANT = 0,
+ HLSQ_SP_VS_STAGE_INSTRUCTIONS = 1,
+ HLSQ_SP_FS_STAGE_CONSTANT = 2,
+ HLSQ_SP_FS_STAGE_INSTRUCTIONS = 3,
+ HLSQ_TP_STATE = 4,
+ HLSQ_QUADS = 5,
+ HLSQ_PIXELS = 6,
+ HLSQ_VERTICES = 7,
+ HLSQ_SP_VS_STAGE_DATA_BYTES = 13,
+ HLSQ_SP_FS_STAGE_DATA_BYTES = 14,
+ HLSQ_BUSY_CYCLES = 15,
+ HLSQ_STALL_CYCLES_SP_STATE = 16,
+ HLSQ_STALL_CYCLES_SP_VS_STAGE = 17,
+ HLSQ_STALL_CYCLES_SP_FS_STAGE = 18,
+ HLSQ_STALL_CYCLES_UCHE = 19,
+ HLSQ_RBBM_LOAD_CYCLES = 20,
+ HLSQ_DI_TO_VS_START_SP = 21,
+ HLSQ_DI_TO_FS_START_SP = 22,
+ HLSQ_VS_STAGE_START_TO_DONE_SP = 23,
+ HLSQ_FS_STAGE_START_TO_DONE_SP = 24,
+ HLSQ_SP_STATE_COPY_CYCLES_VS_STAGE = 25,
+ HLSQ_SP_STATE_COPY_CYCLES_FS_STAGE = 26,
+ HLSQ_UCHE_LATENCY_CYCLES = 27,
+ HLSQ_UCHE_LATENCY_COUNT = 28,
+ HLSQ_STARVE_CYCLES_VFD = 29,
+};
+
+enum a4xx_pc_perfcounter_select {
+ PC_VIS_STREAMS_LOADED = 0,
+ PC_VPC_PRIMITIVES = 2,
+ PC_DEAD_PRIM = 3,
+ PC_LIVE_PRIM = 4,
+ PC_DEAD_DRAWCALLS = 5,
+ PC_LIVE_DRAWCALLS = 6,
+ PC_VERTEX_MISSES = 7,
+ PC_STALL_CYCLES_VFD = 9,
+ PC_STALL_CYCLES_TSE = 10,
+ PC_STALL_CYCLES_UCHE = 11,
+ PC_WORKING_CYCLES = 12,
+ PC_IA_VERTICES = 13,
+ PC_GS_PRIMITIVES = 14,
+ PC_HS_INVOCATIONS = 15,
+ PC_DS_INVOCATIONS = 16,
+ PC_DS_PRIMITIVES = 17,
+ PC_STARVE_CYCLES_FOR_INDEX = 20,
+ PC_STARVE_CYCLES_FOR_TESS_FACTOR = 21,
+ PC_STARVE_CYCLES_FOR_VIZ_STREAM = 22,
+ PC_STALL_CYCLES_TESS = 23,
+ PC_STARVE_CYCLES_FOR_POSITION = 24,
+ PC_MODE0_DRAWCALL = 25,
+ PC_MODE1_DRAWCALL = 26,
+ PC_MODE2_DRAWCALL = 27,
+ PC_MODE3_DRAWCALL = 28,
+ PC_MODE4_DRAWCALL = 29,
+ PC_PREDICATED_DEAD_DRAWCALL = 30,
+ PC_STALL_CYCLES_BY_TSE_ONLY = 31,
+ PC_STALL_CYCLES_BY_VPC_ONLY = 32,
+ PC_VPC_POS_DATA_TRANSACTION = 33,
+ PC_BUSY_CYCLES = 34,
+ PC_STARVE_CYCLES_DI = 35,
+ PC_STALL_CYCLES_VPC = 36,
+ TESS_WORKING_CYCLES = 37,
+ TESS_NUM_CYCLES_SETUP_WORKING = 38,
+ TESS_NUM_CYCLES_PTGEN_WORKING = 39,
+ TESS_NUM_CYCLES_CONNGEN_WORKING = 40,
+ TESS_BUSY_CYCLES = 41,
+ TESS_STARVE_CYCLES_PC = 42,
+ TESS_STALL_CYCLES_PC = 43,
+};
+
+enum a4xx_pwr_perfcounter_select {
+ PWR_CORE_CLOCK_CYCLES = 0,
+ PWR_BUSY_CLOCK_CYCLES = 1,
+};
+
+enum a4xx_rb_perfcounter_select {
+ RB_BUSY_CYCLES = 0,
+ RB_BUSY_CYCLES_BINNING = 1,
+ RB_BUSY_CYCLES_RENDERING = 2,
+ RB_BUSY_CYCLES_RESOLVE = 3,
+ RB_STARVE_CYCLES_BY_SP = 4,
+ RB_STARVE_CYCLES_BY_RAS = 5,
+ RB_STARVE_CYCLES_BY_MARB = 6,
+ RB_STALL_CYCLES_BY_MARB = 7,
+ RB_STALL_CYCLES_BY_HLSQ = 8,
+ RB_RB_RB_MARB_DATA = 9,
+ RB_SP_RB_QUAD = 10,
+ RB_RAS_RB_Z_QUADS = 11,
+ RB_GMEM_CH0_READ = 12,
+ RB_GMEM_CH1_READ = 13,
+ RB_GMEM_CH0_WRITE = 14,
+ RB_GMEM_CH1_WRITE = 15,
+ RB_CP_CONTEXT_DONE = 16,
+ RB_CP_CACHE_FLUSH = 17,
+ RB_CP_ZPASS_DONE = 18,
+ RB_STALL_FIFO0_FULL = 19,
+ RB_STALL_FIFO1_FULL = 20,
+ RB_STALL_FIFO2_FULL = 21,
+ RB_STALL_FIFO3_FULL = 22,
+ RB_RB_HLSQ_TRANSACTIONS = 23,
+ RB_Z_READ = 24,
+ RB_Z_WRITE = 25,
+ RB_C_READ = 26,
+ RB_C_WRITE = 27,
+ RB_C_READ_LATENCY = 28,
+ RB_Z_READ_LATENCY = 29,
+ RB_STALL_BY_UCHE = 30,
+ RB_MARB_UCHE_TRANSACTIONS = 31,
+ RB_CACHE_STALL_MISS = 32,
+ RB_CACHE_STALL_FIFO_FULL = 33,
+ RB_8BIT_BLENDER_UNITS_ACTIVE = 34,
+ RB_16BIT_BLENDER_UNITS_ACTIVE = 35,
+ RB_SAMPLER_UNITS_ACTIVE = 36,
+ RB_TOTAL_PASS = 38,
+ RB_Z_PASS = 39,
+ RB_Z_FAIL = 40,
+ RB_S_FAIL = 41,
+ RB_POWER0 = 42,
+ RB_POWER1 = 43,
+ RB_POWER2 = 44,
+ RB_POWER3 = 45,
+ RB_POWER4 = 46,
+ RB_POWER5 = 47,
+ RB_POWER6 = 48,
+ RB_POWER7 = 49,
+};
+
+enum a4xx_rbbm_perfcounter_select {
+ RBBM_ALWAYS_ON = 0,
+ RBBM_VBIF_BUSY = 1,
+ RBBM_TSE_BUSY = 2,
+ RBBM_RAS_BUSY = 3,
+ RBBM_PC_DCALL_BUSY = 4,
+ RBBM_PC_VSD_BUSY = 5,
+ RBBM_VFD_BUSY = 6,
+ RBBM_VPC_BUSY = 7,
+ RBBM_UCHE_BUSY = 8,
+ RBBM_VSC_BUSY = 9,
+ RBBM_HLSQ_BUSY = 10,
+ RBBM_ANY_RB_BUSY = 11,
+ RBBM_ANY_TPL1_BUSY = 12,
+ RBBM_ANY_SP_BUSY = 13,
+ RBBM_ANY_MARB_BUSY = 14,
+ RBBM_ANY_ARB_BUSY = 15,
+ RBBM_AHB_STATUS_BUSY = 16,
+ RBBM_AHB_STATUS_STALLED = 17,
+ RBBM_AHB_STATUS_TXFR = 18,
+ RBBM_AHB_STATUS_TXFR_SPLIT = 19,
+ RBBM_AHB_STATUS_TXFR_ERROR = 20,
+ RBBM_AHB_STATUS_LONG_STALL = 21,
+ RBBM_STATUS_MASKED = 22,
+ RBBM_CP_BUSY_GFX_CORE_IDLE = 23,
+ RBBM_TESS_BUSY = 24,
+ RBBM_COM_BUSY = 25,
+ RBBM_DCOM_BUSY = 32,
+ RBBM_ANY_CCU_BUSY = 33,
+ RBBM_DPM_BUSY = 34,
+};
+
+enum a4xx_sp_perfcounter_select {
+ SP_LM_LOAD_INSTRUCTIONS = 0,
+ SP_LM_STORE_INSTRUCTIONS = 1,
+ SP_LM_ATOMICS = 2,
+ SP_GM_LOAD_INSTRUCTIONS = 3,
+ SP_GM_STORE_INSTRUCTIONS = 4,
+ SP_GM_ATOMICS = 5,
+ SP_VS_STAGE_TEX_INSTRUCTIONS = 6,
+ SP_VS_STAGE_CFLOW_INSTRUCTIONS = 7,
+ SP_VS_STAGE_EFU_INSTRUCTIONS = 8,
+ SP_VS_STAGE_FULL_ALU_INSTRUCTIONS = 9,
+ SP_VS_STAGE_HALF_ALU_INSTRUCTIONS = 10,
+ SP_FS_STAGE_TEX_INSTRUCTIONS = 11,
+ SP_FS_STAGE_CFLOW_INSTRUCTIONS = 12,
+ SP_FS_STAGE_EFU_INSTRUCTIONS = 13,
+ SP_FS_STAGE_FULL_ALU_INSTRUCTIONS = 14,
+ SP_FS_STAGE_HALF_ALU_INSTRUCTIONS = 15,
+ SP_VS_INSTRUCTIONS = 17,
+ SP_FS_INSTRUCTIONS = 18,
+ SP_ADDR_LOCK_COUNT = 19,
+ SP_UCHE_READ_TRANS = 20,
+ SP_UCHE_WRITE_TRANS = 21,
+ SP_EXPORT_VPC_TRANS = 22,
+ SP_EXPORT_RB_TRANS = 23,
+ SP_PIXELS_KILLED = 24,
+ SP_ICL1_REQUESTS = 25,
+ SP_ICL1_MISSES = 26,
+ SP_ICL0_REQUESTS = 27,
+ SP_ICL0_MISSES = 28,
+ SP_ALU_WORKING_CYCLES = 29,
+ SP_EFU_WORKING_CYCLES = 30,
+ SP_STALL_CYCLES_BY_VPC = 31,
+ SP_STALL_CYCLES_BY_TP = 32,
+ SP_STALL_CYCLES_BY_UCHE = 33,
+ SP_STALL_CYCLES_BY_RB = 34,
+ SP_BUSY_CYCLES = 35,
+ SP_HS_INSTRUCTIONS = 36,
+ SP_DS_INSTRUCTIONS = 37,
+ SP_GS_INSTRUCTIONS = 38,
+ SP_CS_INSTRUCTIONS = 39,
+ SP_SCHEDULER_NON_WORKING = 40,
+ SP_WAVE_CONTEXTS = 41,
+ SP_WAVE_CONTEXT_CYCLES = 42,
+ SP_POWER0 = 43,
+ SP_POWER1 = 44,
+ SP_POWER2 = 45,
+ SP_POWER3 = 46,
+ SP_POWER4 = 47,
+ SP_POWER5 = 48,
+ SP_POWER6 = 49,
+ SP_POWER7 = 50,
+ SP_POWER8 = 51,
+ SP_POWER9 = 52,
+ SP_POWER10 = 53,
+ SP_POWER11 = 54,
+ SP_POWER12 = 55,
+ SP_POWER13 = 56,
+ SP_POWER14 = 57,
+ SP_POWER15 = 58,
+};
+
+enum a4xx_tp_perfcounter_select {
+ TP_L1_REQUESTS = 0,
+ TP_L1_MISSES = 1,
+ TP_QUADS_OFFSET = 8,
+ TP_QUAD_SHADOW = 9,
+ TP_QUADS_ARRAY = 10,
+ TP_QUADS_GRADIENT = 11,
+ TP_QUADS_1D2D = 12,
+ TP_QUADS_3DCUBE = 13,
+ TP_BUSY_CYCLES = 16,
+ TP_STALL_CYCLES_BY_ARB = 17,
+ TP_STATE_CACHE_REQUESTS = 20,
+ TP_STATE_CACHE_MISSES = 21,
+ TP_POWER0 = 22,
+ TP_POWER1 = 23,
+ TP_POWER2 = 24,
+ TP_POWER3 = 25,
+ TP_POWER4 = 26,
+ TP_POWER5 = 27,
+ TP_POWER6 = 28,
+ TP_POWER7 = 29,
+};
+
+enum a4xx_uche_perfcounter_select {
+ UCHE_VBIF_READ_BEATS_TP = 0,
+ UCHE_VBIF_READ_BEATS_VFD = 1,
+ UCHE_VBIF_READ_BEATS_HLSQ = 2,
+ UCHE_VBIF_READ_BEATS_MARB = 3,
+ UCHE_VBIF_READ_BEATS_SP = 4,
+ UCHE_READ_REQUESTS_TP = 5,
+ UCHE_READ_REQUESTS_VFD = 6,
+ UCHE_READ_REQUESTS_HLSQ = 7,
+ UCHE_READ_REQUESTS_MARB = 8,
+ UCHE_READ_REQUESTS_SP = 9,
+ UCHE_WRITE_REQUESTS_MARB = 10,
+ UCHE_WRITE_REQUESTS_SP = 11,
+ UCHE_TAG_CHECK_FAILS = 12,
+ UCHE_EVICTS = 13,
+ UCHE_FLUSHES = 14,
+ UCHE_VBIF_LATENCY_CYCLES = 15,
+ UCHE_VBIF_LATENCY_SAMPLES = 16,
+ UCHE_BUSY_CYCLES = 17,
+ UCHE_VBIF_READ_BEATS_PC = 18,
+ UCHE_READ_REQUESTS_PC = 19,
+ UCHE_WRITE_REQUESTS_VPC = 20,
+ UCHE_STALL_BY_VBIF = 21,
+ UCHE_WRITE_REQUESTS_VSC = 22,
+ UCHE_POWER0 = 23,
+ UCHE_POWER1 = 24,
+ UCHE_POWER2 = 25,
+ UCHE_POWER3 = 26,
+ UCHE_POWER4 = 27,
+ UCHE_POWER5 = 28,
+ UCHE_POWER6 = 29,
+ UCHE_POWER7 = 30,
+};
+
+enum a4xx_vbif_perfcounter_select {
+ AXI_READ_REQUESTS_ID_0 = 0,
+ AXI_READ_REQUESTS_ID_1 = 1,
+ AXI_READ_REQUESTS_ID_2 = 2,
+ AXI_READ_REQUESTS_ID_3 = 3,
+ AXI_READ_REQUESTS_ID_4 = 4,
+ AXI_READ_REQUESTS_ID_5 = 5,
+ AXI_READ_REQUESTS_ID_6 = 6,
+ AXI_READ_REQUESTS_ID_7 = 7,
+ AXI_READ_REQUESTS_ID_8 = 8,
+ AXI_READ_REQUESTS_ID_9 = 9,
+ AXI_READ_REQUESTS_ID_10 = 10,
+ AXI_READ_REQUESTS_ID_11 = 11,
+ AXI_READ_REQUESTS_ID_12 = 12,
+ AXI_READ_REQUESTS_ID_13 = 13,
+ AXI_READ_REQUESTS_ID_14 = 14,
+ AXI_READ_REQUESTS_ID_15 = 15,
+ AXI0_READ_REQUESTS_TOTAL = 16,
+ AXI1_READ_REQUESTS_TOTAL = 17,
+ AXI2_READ_REQUESTS_TOTAL = 18,
+ AXI3_READ_REQUESTS_TOTAL = 19,
+ AXI_READ_REQUESTS_TOTAL = 20,
+ AXI_WRITE_REQUESTS_ID_0 = 21,
+ AXI_WRITE_REQUESTS_ID_1 = 22,
+ AXI_WRITE_REQUESTS_ID_2 = 23,
+ AXI_WRITE_REQUESTS_ID_3 = 24,
+ AXI_WRITE_REQUESTS_ID_4 = 25,
+ AXI_WRITE_REQUESTS_ID_5 = 26,
+ AXI_WRITE_REQUESTS_ID_6 = 27,
+ AXI_WRITE_REQUESTS_ID_7 = 28,
+ AXI_WRITE_REQUESTS_ID_8 = 29,
+ AXI_WRITE_REQUESTS_ID_9 = 30,
+ AXI_WRITE_REQUESTS_ID_10 = 31,
+ AXI_WRITE_REQUESTS_ID_11 = 32,
+ AXI_WRITE_REQUESTS_ID_12 = 33,
+ AXI_WRITE_REQUESTS_ID_13 = 34,
+ AXI_WRITE_REQUESTS_ID_14 = 35,
+ AXI_WRITE_REQUESTS_ID_15 = 36,
+ AXI0_WRITE_REQUESTS_TOTAL = 37,
+ AXI1_WRITE_REQUESTS_TOTAL = 38,
+ AXI2_WRITE_REQUESTS_TOTAL = 39,
+ AXI3_WRITE_REQUESTS_TOTAL = 40,
+ AXI_WRITE_REQUESTS_TOTAL = 41,
+ AXI_TOTAL_REQUESTS = 42,
+ AXI_READ_DATA_BEATS_ID_0 = 43,
+ AXI_READ_DATA_BEATS_ID_1 = 44,
+ AXI_READ_DATA_BEATS_ID_2 = 45,
+ AXI_READ_DATA_BEATS_ID_3 = 46,
+ AXI_READ_DATA_BEATS_ID_4 = 47,
+ AXI_READ_DATA_BEATS_ID_5 = 48,
+ AXI_READ_DATA_BEATS_ID_6 = 49,
+ AXI_READ_DATA_BEATS_ID_7 = 50,
+ AXI_READ_DATA_BEATS_ID_8 = 51,
+ AXI_READ_DATA_BEATS_ID_9 = 52,
+ AXI_READ_DATA_BEATS_ID_10 = 53,
+ AXI_READ_DATA_BEATS_ID_11 = 54,
+ AXI_READ_DATA_BEATS_ID_12 = 55,
+ AXI_READ_DATA_BEATS_ID_13 = 56,
+ AXI_READ_DATA_BEATS_ID_14 = 57,
+ AXI_READ_DATA_BEATS_ID_15 = 58,
+ AXI0_READ_DATA_BEATS_TOTAL = 59,
+ AXI1_READ_DATA_BEATS_TOTAL = 60,
+ AXI2_READ_DATA_BEATS_TOTAL = 61,
+ AXI3_READ_DATA_BEATS_TOTAL = 62,
+ AXI_READ_DATA_BEATS_TOTAL = 63,
+ AXI_WRITE_DATA_BEATS_ID_0 = 64,
+ AXI_WRITE_DATA_BEATS_ID_1 = 65,
+ AXI_WRITE_DATA_BEATS_ID_2 = 66,
+ AXI_WRITE_DATA_BEATS_ID_3 = 67,
+ AXI_WRITE_DATA_BEATS_ID_4 = 68,
+ AXI_WRITE_DATA_BEATS_ID_5 = 69,
+ AXI_WRITE_DATA_BEATS_ID_6 = 70,
+ AXI_WRITE_DATA_BEATS_ID_7 = 71,
+ AXI_WRITE_DATA_BEATS_ID_8 = 72,
+ AXI_WRITE_DATA_BEATS_ID_9 = 73,
+ AXI_WRITE_DATA_BEATS_ID_10 = 74,
+ AXI_WRITE_DATA_BEATS_ID_11 = 75,
+ AXI_WRITE_DATA_BEATS_ID_12 = 76,
+ AXI_WRITE_DATA_BEATS_ID_13 = 77,
+ AXI_WRITE_DATA_BEATS_ID_14 = 78,
+ AXI_WRITE_DATA_BEATS_ID_15 = 79,
+ AXI0_WRITE_DATA_BEATS_TOTAL = 80,
+ AXI1_WRITE_DATA_BEATS_TOTAL = 81,
+ AXI2_WRITE_DATA_BEATS_TOTAL = 82,
+ AXI3_WRITE_DATA_BEATS_TOTAL = 83,
+ AXI_WRITE_DATA_BEATS_TOTAL = 84,
+ AXI_DATA_BEATS_TOTAL = 85,
+ CYCLES_HELD_OFF_ID_0 = 86,
+ CYCLES_HELD_OFF_ID_1 = 87,
+ CYCLES_HELD_OFF_ID_2 = 88,
+ CYCLES_HELD_OFF_ID_3 = 89,
+ CYCLES_HELD_OFF_ID_4 = 90,
+ CYCLES_HELD_OFF_ID_5 = 91,
+ CYCLES_HELD_OFF_ID_6 = 92,
+ CYCLES_HELD_OFF_ID_7 = 93,
+ CYCLES_HELD_OFF_ID_8 = 94,
+ CYCLES_HELD_OFF_ID_9 = 95,
+ CYCLES_HELD_OFF_ID_10 = 96,
+ CYCLES_HELD_OFF_ID_11 = 97,
+ CYCLES_HELD_OFF_ID_12 = 98,
+ CYCLES_HELD_OFF_ID_13 = 99,
+ CYCLES_HELD_OFF_ID_14 = 100,
+ CYCLES_HELD_OFF_ID_15 = 101,
+ AXI_READ_REQUEST_HELD_OFF = 102,
+ AXI_WRITE_REQUEST_HELD_OFF = 103,
+ AXI_REQUEST_HELD_OFF = 104,
+ AXI_WRITE_DATA_HELD_OFF = 105,
+ OCMEM_AXI_READ_REQUEST_HELD_OFF = 106,
+ OCMEM_AXI_WRITE_REQUEST_HELD_OFF = 107,
+ OCMEM_AXI_REQUEST_HELD_OFF = 108,
+ OCMEM_AXI_WRITE_DATA_HELD_OFF = 109,
+ ELAPSED_CYCLES_DDR = 110,
+ ELAPSED_CYCLES_OCMEM = 111,
+};
+
+enum a4xx_vfd_perfcounter_select {
+ VFD_UCHE_BYTE_FETCHED = 0,
+ VFD_UCHE_TRANS = 1,
+ VFD_FETCH_INSTRUCTIONS = 3,
+ VFD_BUSY_CYCLES = 5,
+ VFD_STALL_CYCLES_UCHE = 6,
+ VFD_STALL_CYCLES_HLSQ = 7,
+ VFD_STALL_CYCLES_VPC_BYPASS = 8,
+ VFD_STALL_CYCLES_VPC_ALLOC = 9,
+ VFD_MODE_0_FIBERS = 13,
+ VFD_MODE_1_FIBERS = 14,
+ VFD_MODE_2_FIBERS = 15,
+ VFD_MODE_3_FIBERS = 16,
+ VFD_MODE_4_FIBERS = 17,
+ VFD_BFIFO_STALL = 18,
+ VFD_NUM_VERTICES_TOTAL = 19,
+ VFD_PACKER_FULL = 20,
+ VFD_UCHE_REQUEST_FIFO_FULL = 21,
+ VFD_STARVE_CYCLES_PC = 22,
+ VFD_STARVE_CYCLES_UCHE = 23,
+};
+
+enum a4xx_vpc_perfcounter_select {
+ VPC_SP_LM_COMPONENTS = 2,
+ VPC_SP0_LM_BYTES = 3,
+ VPC_SP1_LM_BYTES = 4,
+ VPC_SP2_LM_BYTES = 5,
+ VPC_SP3_LM_BYTES = 6,
+ VPC_WORKING_CYCLES = 7,
+ VPC_STALL_CYCLES_LM = 8,
+ VPC_STARVE_CYCLES_RAS = 9,
+ VPC_STREAMOUT_CYCLES = 10,
+ VPC_UCHE_TRANSACTIONS = 12,
+ VPC_STALL_CYCLES_UCHE = 13,
+ VPC_BUSY_CYCLES = 14,
+ VPC_STARVE_CYCLES_SP = 15,
+};
+
+enum a4xx_vsc_perfcounter_select {
+ VSC_BUSY_CYCLES = 0,
+ VSC_WORKING_CYCLES = 1,
+ VSC_STALL_CYCLES_UCHE = 2,
+ VSC_STARVE_CYCLES_RAS = 3,
+ VSC_EOT_NUM = 4,
+};
+
enum a4xx_tex_filter {
A4XX_TEX_NEAREST = 0,
A4XX_TEX_LINEAR = 1,
@@ -326,6 +897,12 @@ static inline uint32_t A4XX_CGC_HLSQ_EARLY_CYC(uint32_t val)
#define REG_A4XX_RB_PERFCTR_RB_SEL_7 0x00000cce
+#define REG_A4XX_RB_PERFCTR_CCU_SEL_0 0x00000ccf
+
+#define REG_A4XX_RB_PERFCTR_CCU_SEL_1 0x00000cd0
+
+#define REG_A4XX_RB_PERFCTR_CCU_SEL_2 0x00000cd1
+
#define REG_A4XX_RB_PERFCTR_CCU_SEL_3 0x00000cd2
#define REG_A4XX_RB_FRAME_BUFFER_DIMENSION 0x00000ce0
@@ -400,8 +977,13 @@ static inline uint32_t REG_A4XX_RB_MRT_CONTROL(uint32_t i0) { return 0x000020a4
#define A4XX_RB_MRT_CONTROL_READ_DEST_ENABLE 0x00000008
#define A4XX_RB_MRT_CONTROL_BLEND 0x00000010
#define A4XX_RB_MRT_CONTROL_BLEND2 0x00000020
-#define A4XX_RB_MRT_CONTROL_FASTCLEAR 0x00000400
-#define A4XX_RB_MRT_CONTROL_B11 0x00000800
+#define A4XX_RB_MRT_CONTROL_ROP_ENABLE 0x00000040
+#define A4XX_RB_MRT_CONTROL_ROP_CODE__MASK 0x00000f00
+#define A4XX_RB_MRT_CONTROL_ROP_CODE__SHIFT 8
+static inline uint32_t A4XX_RB_MRT_CONTROL_ROP_CODE(enum a3xx_rop_code val)
+{
+ return ((val) << A4XX_RB_MRT_CONTROL_ROP_CODE__SHIFT) & A4XX_RB_MRT_CONTROL_ROP_CODE__MASK;
+}
#define A4XX_RB_MRT_CONTROL_COMPONENT_ENABLE__MASK 0x0f000000
#define A4XX_RB_MRT_CONTROL_COMPONENT_ENABLE__SHIFT 24
static inline uint32_t A4XX_RB_MRT_CONTROL_COMPONENT_ENABLE(uint32_t val)
@@ -490,8 +1072,8 @@ static inline uint32_t A4XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR(enum adreno_r
return ((val) << A4XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__SHIFT) & A4XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__MASK;
}
-#define REG_A4XX_RB_BLEND_RED 0x000020f3
-#define A4XX_RB_BLEND_RED_UINT__MASK 0x00007fff
+#define REG_A4XX_RB_BLEND_RED 0x000020f0
+#define A4XX_RB_BLEND_RED_UINT__MASK 0x0000ffff
#define A4XX_RB_BLEND_RED_UINT__SHIFT 0
static inline uint32_t A4XX_RB_BLEND_RED_UINT(uint32_t val)
{
@@ -504,8 +1086,16 @@ static inline uint32_t A4XX_RB_BLEND_RED_FLOAT(float val)
return ((util_float_to_half(val)) << A4XX_RB_BLEND_RED_FLOAT__SHIFT) & A4XX_RB_BLEND_RED_FLOAT__MASK;
}
-#define REG_A4XX_RB_BLEND_GREEN 0x000020f4
-#define A4XX_RB_BLEND_GREEN_UINT__MASK 0x00007fff
+#define REG_A4XX_RB_BLEND_RED_F32 0x000020f1
+#define A4XX_RB_BLEND_RED_F32__MASK 0xffffffff
+#define A4XX_RB_BLEND_RED_F32__SHIFT 0
+static inline uint32_t A4XX_RB_BLEND_RED_F32(float val)
+{
+ return ((fui(val)) << A4XX_RB_BLEND_RED_F32__SHIFT) & A4XX_RB_BLEND_RED_F32__MASK;
+}
+
+#define REG_A4XX_RB_BLEND_GREEN 0x000020f2
+#define A4XX_RB_BLEND_GREEN_UINT__MASK 0x0000ffff
#define A4XX_RB_BLEND_GREEN_UINT__SHIFT 0
static inline uint32_t A4XX_RB_BLEND_GREEN_UINT(uint32_t val)
{
@@ -518,8 +1108,16 @@ static inline uint32_t A4XX_RB_BLEND_GREEN_FLOAT(float val)
return ((util_float_to_half(val)) << A4XX_RB_BLEND_GREEN_FLOAT__SHIFT) & A4XX_RB_BLEND_GREEN_FLOAT__MASK;
}
-#define REG_A4XX_RB_BLEND_BLUE 0x000020f5
-#define A4XX_RB_BLEND_BLUE_UINT__MASK 0x00007fff
+#define REG_A4XX_RB_BLEND_GREEN_F32 0x000020f3
+#define A4XX_RB_BLEND_GREEN_F32__MASK 0xffffffff
+#define A4XX_RB_BLEND_GREEN_F32__SHIFT 0
+static inline uint32_t A4XX_RB_BLEND_GREEN_F32(float val)
+{
+ return ((fui(val)) << A4XX_RB_BLEND_GREEN_F32__SHIFT) & A4XX_RB_BLEND_GREEN_F32__MASK;
+}
+
+#define REG_A4XX_RB_BLEND_BLUE 0x000020f4
+#define A4XX_RB_BLEND_BLUE_UINT__MASK 0x0000ffff
#define A4XX_RB_BLEND_BLUE_UINT__SHIFT 0
static inline uint32_t A4XX_RB_BLEND_BLUE_UINT(uint32_t val)
{
@@ -532,8 +1130,16 @@ static inline uint32_t A4XX_RB_BLEND_BLUE_FLOAT(float val)
return ((util_float_to_half(val)) << A4XX_RB_BLEND_BLUE_FLOAT__SHIFT) & A4XX_RB_BLEND_BLUE_FLOAT__MASK;
}
+#define REG_A4XX_RB_BLEND_BLUE_F32 0x000020f5
+#define A4XX_RB_BLEND_BLUE_F32__MASK 0xffffffff
+#define A4XX_RB_BLEND_BLUE_F32__SHIFT 0
+static inline uint32_t A4XX_RB_BLEND_BLUE_F32(float val)
+{
+ return ((fui(val)) << A4XX_RB_BLEND_BLUE_F32__SHIFT) & A4XX_RB_BLEND_BLUE_F32__MASK;
+}
+
#define REG_A4XX_RB_BLEND_ALPHA 0x000020f6
-#define A4XX_RB_BLEND_ALPHA_UINT__MASK 0x00007fff
+#define A4XX_RB_BLEND_ALPHA_UINT__MASK 0x0000ffff
#define A4XX_RB_BLEND_ALPHA_UINT__SHIFT 0
static inline uint32_t A4XX_RB_BLEND_ALPHA_UINT(uint32_t val)
{
@@ -546,6 +1152,14 @@ static inline uint32_t A4XX_RB_BLEND_ALPHA_FLOAT(float val)
return ((util_float_to_half(val)) << A4XX_RB_BLEND_ALPHA_FLOAT__SHIFT) & A4XX_RB_BLEND_ALPHA_FLOAT__MASK;
}
+#define REG_A4XX_RB_BLEND_ALPHA_F32 0x000020f7
+#define A4XX_RB_BLEND_ALPHA_F32__MASK 0xffffffff
+#define A4XX_RB_BLEND_ALPHA_F32__SHIFT 0
+static inline uint32_t A4XX_RB_BLEND_ALPHA_F32(float val)
+{
+ return ((fui(val)) << A4XX_RB_BLEND_ALPHA_F32__SHIFT) & A4XX_RB_BLEND_ALPHA_F32__MASK;
+}
+
#define REG_A4XX_RB_ALPHA_CONTROL 0x000020f8
#define A4XX_RB_ALPHA_CONTROL_ALPHA_REF__MASK 0x000000ff
#define A4XX_RB_ALPHA_CONTROL_ALPHA_REF__SHIFT 0
@@ -568,7 +1182,7 @@ static inline uint32_t A4XX_RB_FS_OUTPUT_ENABLE_BLEND(uint32_t val)
{
return ((val) << A4XX_RB_FS_OUTPUT_ENABLE_BLEND__SHIFT) & A4XX_RB_FS_OUTPUT_ENABLE_BLEND__MASK;
}
-#define A4XX_RB_FS_OUTPUT_FAST_CLEAR 0x00000100
+#define A4XX_RB_FS_OUTPUT_INDEPENDENT_BLEND 0x00000100
#define A4XX_RB_FS_OUTPUT_SAMPLE_MASK__MASK 0xffff0000
#define A4XX_RB_FS_OUTPUT_SAMPLE_MASK__SHIFT 16
static inline uint32_t A4XX_RB_FS_OUTPUT_SAMPLE_MASK(uint32_t val)
@@ -736,6 +1350,7 @@ static inline uint32_t A4XX_RB_DEPTH_CONTROL_ZFUNC(enum adreno_compare_func val)
}
#define A4XX_RB_DEPTH_CONTROL_BF_ENABLE 0x00000080
#define A4XX_RB_DEPTH_CONTROL_EARLY_Z_DISABLE 0x00010000
+#define A4XX_RB_DEPTH_CONTROL_FORCE_FRAGZ_TO_FS 0x00020000
#define A4XX_RB_DEPTH_CONTROL_Z_TEST_ENABLE 0x80000000
#define REG_A4XX_RB_DEPTH_CLEAR 0x00002102
@@ -996,8 +1611,386 @@ static inline uint32_t REG_A4XX_RBBM_CLOCK_DELAY_TP_REG(uint32_t i0) { return 0x
#define REG_A4XX_RBBM_CFG_DEBBUS_SEL_D 0x0000004d
+#define REG_A4XX_RBBM_POWER_CNTL_IP 0x00000098
+#define A4XX_RBBM_POWER_CNTL_IP_SW_COLLAPSE 0x00000001
+#define A4XX_RBBM_POWER_CNTL_IP_SP_TP_PWR_ON 0x00100000
+
#define REG_A4XX_RBBM_PERFCTR_CP_0_LO 0x0000009c
+#define REG_A4XX_RBBM_PERFCTR_CP_0_HI 0x0000009d
+
+#define REG_A4XX_RBBM_PERFCTR_CP_1_LO 0x0000009e
+
+#define REG_A4XX_RBBM_PERFCTR_CP_1_HI 0x0000009f
+
+#define REG_A4XX_RBBM_PERFCTR_CP_2_LO 0x000000a0
+
+#define REG_A4XX_RBBM_PERFCTR_CP_2_HI 0x000000a1
+
+#define REG_A4XX_RBBM_PERFCTR_CP_3_LO 0x000000a2
+
+#define REG_A4XX_RBBM_PERFCTR_CP_3_HI 0x000000a3
+
+#define REG_A4XX_RBBM_PERFCTR_CP_4_LO 0x000000a4
+
+#define REG_A4XX_RBBM_PERFCTR_CP_4_HI 0x000000a5
+
+#define REG_A4XX_RBBM_PERFCTR_CP_5_LO 0x000000a6
+
+#define REG_A4XX_RBBM_PERFCTR_CP_5_HI 0x000000a7
+
+#define REG_A4XX_RBBM_PERFCTR_CP_6_LO 0x000000a8
+
+#define REG_A4XX_RBBM_PERFCTR_CP_6_HI 0x000000a9
+
+#define REG_A4XX_RBBM_PERFCTR_CP_7_LO 0x000000aa
+
+#define REG_A4XX_RBBM_PERFCTR_CP_7_HI 0x000000ab
+
+#define REG_A4XX_RBBM_PERFCTR_RBBM_0_LO 0x000000ac
+
+#define REG_A4XX_RBBM_PERFCTR_RBBM_0_HI 0x000000ad
+
+#define REG_A4XX_RBBM_PERFCTR_RBBM_1_LO 0x000000ae
+
+#define REG_A4XX_RBBM_PERFCTR_RBBM_1_HI 0x000000af
+
+#define REG_A4XX_RBBM_PERFCTR_RBBM_2_LO 0x000000b0
+
+#define REG_A4XX_RBBM_PERFCTR_RBBM_2_HI 0x000000b1
+
+#define REG_A4XX_RBBM_PERFCTR_RBBM_3_LO 0x000000b2
+
+#define REG_A4XX_RBBM_PERFCTR_RBBM_3_HI 0x000000b3
+
+#define REG_A4XX_RBBM_PERFCTR_PC_0_LO 0x000000b4
+
+#define REG_A4XX_RBBM_PERFCTR_PC_0_HI 0x000000b5
+
+#define REG_A4XX_RBBM_PERFCTR_PC_1_LO 0x000000b6
+
+#define REG_A4XX_RBBM_PERFCTR_PC_1_HI 0x000000b7
+
+#define REG_A4XX_RBBM_PERFCTR_PC_2_LO 0x000000b8
+
+#define REG_A4XX_RBBM_PERFCTR_PC_2_HI 0x000000b9
+
+#define REG_A4XX_RBBM_PERFCTR_PC_3_LO 0x000000ba
+
+#define REG_A4XX_RBBM_PERFCTR_PC_3_HI 0x000000bb
+
+#define REG_A4XX_RBBM_PERFCTR_PC_4_LO 0x000000bc
+
+#define REG_A4XX_RBBM_PERFCTR_PC_4_HI 0x000000bd
+
+#define REG_A4XX_RBBM_PERFCTR_PC_5_LO 0x000000be
+
+#define REG_A4XX_RBBM_PERFCTR_PC_5_HI 0x000000bf
+
+#define REG_A4XX_RBBM_PERFCTR_PC_6_LO 0x000000c0
+
+#define REG_A4XX_RBBM_PERFCTR_PC_6_HI 0x000000c1
+
+#define REG_A4XX_RBBM_PERFCTR_PC_7_LO 0x000000c2
+
+#define REG_A4XX_RBBM_PERFCTR_PC_7_HI 0x000000c3
+
+#define REG_A4XX_RBBM_PERFCTR_VFD_0_LO 0x000000c4
+
+#define REG_A4XX_RBBM_PERFCTR_VFD_0_HI 0x000000c5
+
+#define REG_A4XX_RBBM_PERFCTR_VFD_1_LO 0x000000c6
+
+#define REG_A4XX_RBBM_PERFCTR_VFD_1_HI 0x000000c7
+
+#define REG_A4XX_RBBM_PERFCTR_VFD_2_LO 0x000000c8
+
+#define REG_A4XX_RBBM_PERFCTR_VFD_2_HI 0x000000c9
+
+#define REG_A4XX_RBBM_PERFCTR_VFD_3_LO 0x000000ca
+
+#define REG_A4XX_RBBM_PERFCTR_VFD_3_HI 0x000000cb
+
+#define REG_A4XX_RBBM_PERFCTR_VFD_4_LO 0x000000cc
+
+#define REG_A4XX_RBBM_PERFCTR_VFD_4_HI 0x000000cd
+
+#define REG_A4XX_RBBM_PERFCTR_VFD_5_LO 0x000000ce
+
+#define REG_A4XX_RBBM_PERFCTR_VFD_5_HI 0x000000cf
+
+#define REG_A4XX_RBBM_PERFCTR_VFD_6_LO 0x000000d0
+
+#define REG_A4XX_RBBM_PERFCTR_VFD_6_HI 0x000000d1
+
+#define REG_A4XX_RBBM_PERFCTR_VFD_7_LO 0x000000d2
+
+#define REG_A4XX_RBBM_PERFCTR_VFD_7_HI 0x000000d3
+
+#define REG_A4XX_RBBM_PERFCTR_HLSQ_0_LO 0x000000d4
+
+#define REG_A4XX_RBBM_PERFCTR_HLSQ_0_HI 0x000000d5
+
+#define REG_A4XX_RBBM_PERFCTR_HLSQ_1_LO 0x000000d6
+
+#define REG_A4XX_RBBM_PERFCTR_HLSQ_1_HI 0x000000d7
+
+#define REG_A4XX_RBBM_PERFCTR_HLSQ_2_LO 0x000000d8
+
+#define REG_A4XX_RBBM_PERFCTR_HLSQ_2_HI 0x000000d9
+
+#define REG_A4XX_RBBM_PERFCTR_HLSQ_3_LO 0x000000da
+
+#define REG_A4XX_RBBM_PERFCTR_HLSQ_3_HI 0x000000db
+
+#define REG_A4XX_RBBM_PERFCTR_HLSQ_4_LO 0x000000dc
+
+#define REG_A4XX_RBBM_PERFCTR_HLSQ_4_HI 0x000000dd
+
+#define REG_A4XX_RBBM_PERFCTR_HLSQ_5_LO 0x000000de
+
+#define REG_A4XX_RBBM_PERFCTR_HLSQ_5_HI 0x000000df
+
+#define REG_A4XX_RBBM_PERFCTR_HLSQ_6_LO 0x000000e0
+
+#define REG_A4XX_RBBM_PERFCTR_HLSQ_6_HI 0x000000e1
+
+#define REG_A4XX_RBBM_PERFCTR_HLSQ_7_LO 0x000000e2
+
+#define REG_A4XX_RBBM_PERFCTR_HLSQ_7_HI 0x000000e3
+
+#define REG_A4XX_RBBM_PERFCTR_VPC_0_LO 0x000000e4
+
+#define REG_A4XX_RBBM_PERFCTR_VPC_0_HI 0x000000e5
+
+#define REG_A4XX_RBBM_PERFCTR_VPC_1_LO 0x000000e6
+
+#define REG_A4XX_RBBM_PERFCTR_VPC_1_HI 0x000000e7
+
+#define REG_A4XX_RBBM_PERFCTR_VPC_2_LO 0x000000e8
+
+#define REG_A4XX_RBBM_PERFCTR_VPC_2_HI 0x000000e9
+
+#define REG_A4XX_RBBM_PERFCTR_VPC_3_LO 0x000000ea
+
+#define REG_A4XX_RBBM_PERFCTR_VPC_3_HI 0x000000eb
+
+#define REG_A4XX_RBBM_PERFCTR_CCU_0_LO 0x000000ec
+
+#define REG_A4XX_RBBM_PERFCTR_CCU_0_HI 0x000000ed
+
+#define REG_A4XX_RBBM_PERFCTR_CCU_1_LO 0x000000ee
+
+#define REG_A4XX_RBBM_PERFCTR_CCU_1_HI 0x000000ef
+
+#define REG_A4XX_RBBM_PERFCTR_CCU_2_LO 0x000000f0
+
+#define REG_A4XX_RBBM_PERFCTR_CCU_2_HI 0x000000f1
+
+#define REG_A4XX_RBBM_PERFCTR_CCU_3_LO 0x000000f2
+
+#define REG_A4XX_RBBM_PERFCTR_CCU_3_HI 0x000000f3
+
+#define REG_A4XX_RBBM_PERFCTR_TSE_0_LO 0x000000f4
+
+#define REG_A4XX_RBBM_PERFCTR_TSE_0_HI 0x000000f5
+
+#define REG_A4XX_RBBM_PERFCTR_TSE_1_LO 0x000000f6
+
+#define REG_A4XX_RBBM_PERFCTR_TSE_1_HI 0x000000f7
+
+#define REG_A4XX_RBBM_PERFCTR_TSE_2_LO 0x000000f8
+
+#define REG_A4XX_RBBM_PERFCTR_TSE_2_HI 0x000000f9
+
+#define REG_A4XX_RBBM_PERFCTR_TSE_3_LO 0x000000fa
+
+#define REG_A4XX_RBBM_PERFCTR_TSE_3_HI 0x000000fb
+
+#define REG_A4XX_RBBM_PERFCTR_RAS_0_LO 0x000000fc
+
+#define REG_A4XX_RBBM_PERFCTR_RAS_0_HI 0x000000fd
+
+#define REG_A4XX_RBBM_PERFCTR_RAS_1_LO 0x000000fe
+
+#define REG_A4XX_RBBM_PERFCTR_RAS_1_HI 0x000000ff
+
+#define REG_A4XX_RBBM_PERFCTR_RAS_2_LO 0x00000100
+
+#define REG_A4XX_RBBM_PERFCTR_RAS_2_HI 0x00000101
+
+#define REG_A4XX_RBBM_PERFCTR_RAS_3_LO 0x00000102
+
+#define REG_A4XX_RBBM_PERFCTR_RAS_3_HI 0x00000103
+
+#define REG_A4XX_RBBM_PERFCTR_UCHE_0_LO 0x00000104
+
+#define REG_A4XX_RBBM_PERFCTR_UCHE_0_HI 0x00000105
+
+#define REG_A4XX_RBBM_PERFCTR_UCHE_1_LO 0x00000106
+
+#define REG_A4XX_RBBM_PERFCTR_UCHE_1_HI 0x00000107
+
+#define REG_A4XX_RBBM_PERFCTR_UCHE_2_LO 0x00000108
+
+#define REG_A4XX_RBBM_PERFCTR_UCHE_2_HI 0x00000109
+
+#define REG_A4XX_RBBM_PERFCTR_UCHE_3_LO 0x0000010a
+
+#define REG_A4XX_RBBM_PERFCTR_UCHE_3_HI 0x0000010b
+
+#define REG_A4XX_RBBM_PERFCTR_UCHE_4_LO 0x0000010c
+
+#define REG_A4XX_RBBM_PERFCTR_UCHE_4_HI 0x0000010d
+
+#define REG_A4XX_RBBM_PERFCTR_UCHE_5_LO 0x0000010e
+
+#define REG_A4XX_RBBM_PERFCTR_UCHE_5_HI 0x0000010f
+
+#define REG_A4XX_RBBM_PERFCTR_UCHE_6_LO 0x00000110
+
+#define REG_A4XX_RBBM_PERFCTR_UCHE_6_HI 0x00000111
+
+#define REG_A4XX_RBBM_PERFCTR_UCHE_7_LO 0x00000112
+
+#define REG_A4XX_RBBM_PERFCTR_UCHE_7_HI 0x00000113
+
+#define REG_A4XX_RBBM_PERFCTR_TP_0_LO 0x00000114
+
+#define REG_A4XX_RBBM_PERFCTR_TP_0_HI 0x00000115
+
+#define REG_A4XX_RBBM_PERFCTR_TP_0_LO 0x00000114
+
+#define REG_A4XX_RBBM_PERFCTR_TP_0_HI 0x00000115
+
+#define REG_A4XX_RBBM_PERFCTR_TP_1_LO 0x00000116
+
+#define REG_A4XX_RBBM_PERFCTR_TP_1_HI 0x00000117
+
+#define REG_A4XX_RBBM_PERFCTR_TP_2_LO 0x00000118
+
+#define REG_A4XX_RBBM_PERFCTR_TP_2_HI 0x00000119
+
+#define REG_A4XX_RBBM_PERFCTR_TP_3_LO 0x0000011a
+
+#define REG_A4XX_RBBM_PERFCTR_TP_3_HI 0x0000011b
+
+#define REG_A4XX_RBBM_PERFCTR_TP_4_LO 0x0000011c
+
+#define REG_A4XX_RBBM_PERFCTR_TP_4_HI 0x0000011d
+
+#define REG_A4XX_RBBM_PERFCTR_TP_5_LO 0x0000011e
+
+#define REG_A4XX_RBBM_PERFCTR_TP_5_HI 0x0000011f
+
+#define REG_A4XX_RBBM_PERFCTR_TP_6_LO 0x00000120
+
+#define REG_A4XX_RBBM_PERFCTR_TP_6_HI 0x00000121
+
+#define REG_A4XX_RBBM_PERFCTR_TP_7_LO 0x00000122
+
+#define REG_A4XX_RBBM_PERFCTR_TP_7_HI 0x00000123
+
+#define REG_A4XX_RBBM_PERFCTR_SP_0_LO 0x00000124
+
+#define REG_A4XX_RBBM_PERFCTR_SP_0_HI 0x00000125
+
+#define REG_A4XX_RBBM_PERFCTR_SP_1_LO 0x00000126
+
+#define REG_A4XX_RBBM_PERFCTR_SP_1_HI 0x00000127
+
+#define REG_A4XX_RBBM_PERFCTR_SP_2_LO 0x00000128
+
+#define REG_A4XX_RBBM_PERFCTR_SP_2_HI 0x00000129
+
+#define REG_A4XX_RBBM_PERFCTR_SP_3_LO 0x0000012a
+
+#define REG_A4XX_RBBM_PERFCTR_SP_3_HI 0x0000012b
+
+#define REG_A4XX_RBBM_PERFCTR_SP_4_LO 0x0000012c
+
+#define REG_A4XX_RBBM_PERFCTR_SP_4_HI 0x0000012d
+
+#define REG_A4XX_RBBM_PERFCTR_SP_5_LO 0x0000012e
+
+#define REG_A4XX_RBBM_PERFCTR_SP_5_HI 0x0000012f
+
+#define REG_A4XX_RBBM_PERFCTR_SP_6_LO 0x00000130
+
+#define REG_A4XX_RBBM_PERFCTR_SP_6_HI 0x00000131
+
+#define REG_A4XX_RBBM_PERFCTR_SP_7_LO 0x00000132
+
+#define REG_A4XX_RBBM_PERFCTR_SP_7_HI 0x00000133
+
+#define REG_A4XX_RBBM_PERFCTR_SP_8_LO 0x00000134
+
+#define REG_A4XX_RBBM_PERFCTR_SP_8_HI 0x00000135
+
+#define REG_A4XX_RBBM_PERFCTR_SP_9_LO 0x00000136
+
+#define REG_A4XX_RBBM_PERFCTR_SP_9_HI 0x00000137
+
+#define REG_A4XX_RBBM_PERFCTR_SP_10_LO 0x00000138
+
+#define REG_A4XX_RBBM_PERFCTR_SP_10_HI 0x00000139
+
+#define REG_A4XX_RBBM_PERFCTR_SP_11_LO 0x0000013a
+
+#define REG_A4XX_RBBM_PERFCTR_SP_11_HI 0x0000013b
+
+#define REG_A4XX_RBBM_PERFCTR_RB_0_LO 0x0000013c
+
+#define REG_A4XX_RBBM_PERFCTR_RB_0_HI 0x0000013d
+
+#define REG_A4XX_RBBM_PERFCTR_RB_1_LO 0x0000013e
+
+#define REG_A4XX_RBBM_PERFCTR_RB_1_HI 0x0000013f
+
+#define REG_A4XX_RBBM_PERFCTR_RB_2_LO 0x00000140
+
+#define REG_A4XX_RBBM_PERFCTR_RB_2_HI 0x00000141
+
+#define REG_A4XX_RBBM_PERFCTR_RB_3_LO 0x00000142
+
+#define REG_A4XX_RBBM_PERFCTR_RB_3_HI 0x00000143
+
+#define REG_A4XX_RBBM_PERFCTR_RB_4_LO 0x00000144
+
+#define REG_A4XX_RBBM_PERFCTR_RB_4_HI 0x00000145
+
+#define REG_A4XX_RBBM_PERFCTR_RB_5_LO 0x00000146
+
+#define REG_A4XX_RBBM_PERFCTR_RB_5_HI 0x00000147
+
+#define REG_A4XX_RBBM_PERFCTR_RB_6_LO 0x00000148
+
+#define REG_A4XX_RBBM_PERFCTR_RB_6_HI 0x00000149
+
+#define REG_A4XX_RBBM_PERFCTR_RB_7_LO 0x0000014a
+
+#define REG_A4XX_RBBM_PERFCTR_RB_7_HI 0x0000014b
+
+#define REG_A4XX_RBBM_PERFCTR_VSC_0_LO 0x0000014c
+
+#define REG_A4XX_RBBM_PERFCTR_VSC_0_HI 0x0000014d
+
+#define REG_A4XX_RBBM_PERFCTR_VSC_1_LO 0x0000014e
+
+#define REG_A4XX_RBBM_PERFCTR_VSC_1_HI 0x0000014f
+
+#define REG_A4XX_RBBM_PERFCTR_PWR_0_LO 0x00000166
+
+#define REG_A4XX_RBBM_PERFCTR_PWR_0_HI 0x00000167
+
+#define REG_A4XX_RBBM_PERFCTR_PWR_1_LO 0x00000168
+
+#define REG_A4XX_RBBM_PERFCTR_PWR_1_HI 0x00000169
+
+#define REG_A4XX_RBBM_ALWAYSON_COUNTER_LO 0x0000016e
+
+#define REG_A4XX_RBBM_ALWAYSON_COUNTER_HI 0x0000016f
+
static inline uint32_t REG_A4XX_RBBM_CLOCK_CTL_SP(uint32_t i0) { return 0x00000068 + 0x1*i0; }
static inline uint32_t REG_A4XX_RBBM_CLOCK_CTL_SP_REG(uint32_t i0) { return 0x00000068 + 0x1*i0; }
@@ -1046,6 +2039,10 @@ static inline uint32_t REG_A4XX_RBBM_CLOCK_DELAY_RB_MARB_CCU_L1(uint32_t i0) { r
static inline uint32_t REG_A4XX_RBBM_CLOCK_DELAY_RB_MARB_CCU_L1_REG(uint32_t i0) { return 0x0000008e + 0x1*i0; }
+#define REG_A4XX_RBBM_SP_REGFILE_SLEEP_CNTL_0 0x00000099
+
+#define REG_A4XX_RBBM_SP_REGFILE_SLEEP_CNTL_1 0x0000009a
+
#define REG_A4XX_RBBM_PERFCTR_PWR_1_LO 0x00000168
#define REG_A4XX_RBBM_PERFCTR_CTL 0x00000170
@@ -1060,6 +2057,14 @@ static inline uint32_t REG_A4XX_RBBM_CLOCK_DELAY_RB_MARB_CCU_L1_REG(uint32_t i0)
#define REG_A4XX_RBBM_PERFCTR_LOAD_VALUE_HI 0x00000175
+#define REG_A4XX_RBBM_PERFCTR_RBBM_SEL_0 0x00000176
+
+#define REG_A4XX_RBBM_PERFCTR_RBBM_SEL_1 0x00000177
+
+#define REG_A4XX_RBBM_PERFCTR_RBBM_SEL_2 0x00000178
+
+#define REG_A4XX_RBBM_PERFCTR_RBBM_SEL_3 0x00000179
+
#define REG_A4XX_RBBM_GPU_BUSY_MASKED 0x0000017a
#define REG_A4XX_RBBM_INT_0_STATUS 0x0000017d
@@ -1099,6 +2104,11 @@ static inline uint32_t REG_A4XX_RBBM_CLOCK_DELAY_RB_MARB_CCU_L1_REG(uint32_t i0)
#define REG_A4XX_RBBM_INTERFACE_RRDY_STATUS5 0x0000019f
+#define REG_A4XX_RBBM_POWER_STATUS 0x000001b0
+#define A4XX_RBBM_POWER_STATUS_SP_TP_PWR_ON 0x00100000
+
+#define REG_A4XX_RBBM_WAIT_IDLE_CLOCKS_CTL2 0x000001b8
+
#define REG_A4XX_CP_SCRATCH_UMASK 0x00000228
#define REG_A4XX_CP_SCRATCH_ADDR 0x00000229
@@ -1191,6 +2201,20 @@ static inline uint32_t REG_A4XX_CP_PROTECT_REG(uint32_t i0) { return 0x00000240
#define REG_A4XX_CP_PERFCTR_CP_SEL_0 0x00000500
+#define REG_A4XX_CP_PERFCTR_CP_SEL_1 0x00000501
+
+#define REG_A4XX_CP_PERFCTR_CP_SEL_2 0x00000502
+
+#define REG_A4XX_CP_PERFCTR_CP_SEL_3 0x00000503
+
+#define REG_A4XX_CP_PERFCTR_CP_SEL_4 0x00000504
+
+#define REG_A4XX_CP_PERFCTR_CP_SEL_5 0x00000505
+
+#define REG_A4XX_CP_PERFCTR_CP_SEL_6 0x00000506
+
+#define REG_A4XX_CP_PERFCTR_CP_SEL_7 0x00000507
+
#define REG_A4XX_CP_PERFCOMBINER_SELECT 0x0000050b
static inline uint32_t REG_A4XX_CP_SCRATCH(uint32_t i0) { return 0x00000578 + 0x1*i0; }
@@ -1201,6 +2225,28 @@ static inline uint32_t REG_A4XX_CP_SCRATCH_REG(uint32_t i0) { return 0x00000578
#define REG_A4XX_SP_MODE_CONTROL 0x00000ec3
+#define REG_A4XX_SP_PERFCTR_SP_SEL_0 0x00000ec4
+
+#define REG_A4XX_SP_PERFCTR_SP_SEL_1 0x00000ec5
+
+#define REG_A4XX_SP_PERFCTR_SP_SEL_2 0x00000ec6
+
+#define REG_A4XX_SP_PERFCTR_SP_SEL_3 0x00000ec7
+
+#define REG_A4XX_SP_PERFCTR_SP_SEL_4 0x00000ec8
+
+#define REG_A4XX_SP_PERFCTR_SP_SEL_5 0x00000ec9
+
+#define REG_A4XX_SP_PERFCTR_SP_SEL_6 0x00000eca
+
+#define REG_A4XX_SP_PERFCTR_SP_SEL_7 0x00000ecb
+
+#define REG_A4XX_SP_PERFCTR_SP_SEL_8 0x00000ecc
+
+#define REG_A4XX_SP_PERFCTR_SP_SEL_9 0x00000ecd
+
+#define REG_A4XX_SP_PERFCTR_SP_SEL_10 0x00000ece
+
#define REG_A4XX_SP_PERFCTR_SP_SEL_11 0x00000ecf
#define REG_A4XX_SP_SP_CTRL_REG 0x000022c0
@@ -1699,6 +2745,12 @@ static inline uint32_t A4XX_SP_GS_OBJ_OFFSET_REG_SHADEROBJOFFSET(uint32_t val)
#define REG_A4XX_VPC_DEBUG_ECO_CONTROL 0x00000e64
+#define REG_A4XX_VPC_PERFCTR_VPC_SEL_0 0x00000e65
+
+#define REG_A4XX_VPC_PERFCTR_VPC_SEL_1 0x00000e66
+
+#define REG_A4XX_VPC_PERFCTR_VPC_SEL_2 0x00000e67
+
#define REG_A4XX_VPC_PERFCTR_VPC_SEL_3 0x00000e68
#define REG_A4XX_VPC_ATTR 0x00002140
@@ -1811,6 +2863,20 @@ static inline uint32_t REG_A4XX_VSC_PIPE_DATA_LENGTH_REG(uint32_t i0) { return 0
#define REG_A4XX_VFD_DEBUG_CONTROL 0x00000e40
+#define REG_A4XX_VFD_PERFCTR_VFD_SEL_0 0x00000e43
+
+#define REG_A4XX_VFD_PERFCTR_VFD_SEL_1 0x00000e44
+
+#define REG_A4XX_VFD_PERFCTR_VFD_SEL_2 0x00000e45
+
+#define REG_A4XX_VFD_PERFCTR_VFD_SEL_3 0x00000e46
+
+#define REG_A4XX_VFD_PERFCTR_VFD_SEL_4 0x00000e47
+
+#define REG_A4XX_VFD_PERFCTR_VFD_SEL_5 0x00000e48
+
+#define REG_A4XX_VFD_PERFCTR_VFD_SEL_6 0x00000e49
+
#define REG_A4XX_VFD_PERFCTR_VFD_SEL_7 0x00000e4a
#define REG_A4XX_VGT_CL_INITIATOR 0x000021d0
@@ -1967,6 +3033,20 @@ static inline uint32_t A4XX_VFD_DECODE_INSTR_SHIFTCNT(uint32_t val)
#define REG_A4XX_TPL1_TP_MODE_CONTROL 0x00000f03
+#define REG_A4XX_TPL1_PERFCTR_TP_SEL_0 0x00000f04
+
+#define REG_A4XX_TPL1_PERFCTR_TP_SEL_1 0x00000f05
+
+#define REG_A4XX_TPL1_PERFCTR_TP_SEL_2 0x00000f06
+
+#define REG_A4XX_TPL1_PERFCTR_TP_SEL_3 0x00000f07
+
+#define REG_A4XX_TPL1_PERFCTR_TP_SEL_4 0x00000f08
+
+#define REG_A4XX_TPL1_PERFCTR_TP_SEL_5 0x00000f09
+
+#define REG_A4XX_TPL1_PERFCTR_TP_SEL_6 0x00000f0a
+
#define REG_A4XX_TPL1_PERFCTR_TP_SEL_7 0x00000f0b
#define REG_A4XX_TPL1_TP_TEX_OFFSET 0x00002380
@@ -2021,9 +3101,23 @@ static inline uint32_t A4XX_TPL1_TP_TEX_COUNT_GS(uint32_t val)
#define REG_A4XX_GRAS_PERFCTR_TSE_SEL_0 0x00000c88
+#define REG_A4XX_GRAS_PERFCTR_TSE_SEL_1 0x00000c89
+
+#define REG_A4XX_GRAS_PERFCTR_TSE_SEL_2 0x00000c8a
+
#define REG_A4XX_GRAS_PERFCTR_TSE_SEL_3 0x00000c8b
+#define REG_A4XX_GRAS_PERFCTR_RAS_SEL_0 0x00000c8c
+
+#define REG_A4XX_GRAS_PERFCTR_RAS_SEL_1 0x00000c8d
+
+#define REG_A4XX_GRAS_PERFCTR_RAS_SEL_2 0x00000c8e
+
+#define REG_A4XX_GRAS_PERFCTR_RAS_SEL_3 0x00000c8f
+
#define REG_A4XX_GRAS_CL_CLIP_CNTL 0x00002000
+#define A4XX_GRAS_CL_CLIP_CNTL_CLIP_DISABLE 0x00008000
+#define A4XX_GRAS_CL_CLIP_CNTL_ZERO_GB_SCALE_Z 0x00400000
#define REG_A4XX_GRAS_CLEAR_CNTL 0x00002003
#define A4XX_GRAS_CLEAR_CNTL_NOT_FASTCLEAR 0x00000001
@@ -2114,6 +3208,7 @@ static inline uint32_t A4XX_GRAS_SU_POINT_SIZE(float val)
#define REG_A4XX_GRAS_ALPHA_CONTROL 0x00002073
#define A4XX_GRAS_ALPHA_CONTROL_ALPHA_TEST_ENABLE 0x00000004
+#define A4XX_GRAS_ALPHA_CONTROL_FORCE_FRAGZ_TO_FS 0x00000008
#define REG_A4XX_GRAS_SU_POLY_OFFSET_SCALE 0x00002074
#define A4XX_GRAS_SU_POLY_OFFSET_SCALE__MASK 0xffffffff
@@ -2285,6 +3380,20 @@ static inline uint32_t A4XX_GRAS_SC_EXTENT_WINDOW_TL_Y(uint32_t val)
#define REG_A4XX_UCHE_CACHE_WAYS_VFD 0x00000e8c
+#define REG_A4XX_UCHE_PERFCTR_UCHE_SEL_0 0x00000e8e
+
+#define REG_A4XX_UCHE_PERFCTR_UCHE_SEL_1 0x00000e8f
+
+#define REG_A4XX_UCHE_PERFCTR_UCHE_SEL_2 0x00000e90
+
+#define REG_A4XX_UCHE_PERFCTR_UCHE_SEL_3 0x00000e91
+
+#define REG_A4XX_UCHE_PERFCTR_UCHE_SEL_4 0x00000e92
+
+#define REG_A4XX_UCHE_PERFCTR_UCHE_SEL_5 0x00000e93
+
+#define REG_A4XX_UCHE_PERFCTR_UCHE_SEL_6 0x00000e94
+
#define REG_A4XX_UCHE_PERFCTR_UCHE_SEL_7 0x00000e95
#define REG_A4XX_HLSQ_TIMEOUT_THRESHOLD 0x00000e00
@@ -2295,6 +3404,22 @@ static inline uint32_t A4XX_GRAS_SC_EXTENT_WINDOW_TL_Y(uint32_t val)
#define REG_A4XX_HLSQ_PERF_PIPE_MASK 0x00000e0e
+#define REG_A4XX_HLSQ_PERFCTR_HLSQ_SEL_0 0x00000e06
+
+#define REG_A4XX_HLSQ_PERFCTR_HLSQ_SEL_1 0x00000e07
+
+#define REG_A4XX_HLSQ_PERFCTR_HLSQ_SEL_2 0x00000e08
+
+#define REG_A4XX_HLSQ_PERFCTR_HLSQ_SEL_3 0x00000e09
+
+#define REG_A4XX_HLSQ_PERFCTR_HLSQ_SEL_4 0x00000e0a
+
+#define REG_A4XX_HLSQ_PERFCTR_HLSQ_SEL_5 0x00000e0b
+
+#define REG_A4XX_HLSQ_PERFCTR_HLSQ_SEL_6 0x00000e0c
+
+#define REG_A4XX_HLSQ_PERFCTR_HLSQ_SEL_7 0x00000e0d
+
#define REG_A4XX_HLSQ_CONTROL_0_REG 0x000023c0
#define A4XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE__MASK 0x00000010
#define A4XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE__SHIFT 4
@@ -2549,6 +3674,18 @@ static inline uint32_t A4XX_HLSQ_GS_CONTROL_REG_INSTRLENGTH(uint32_t val)
#define REG_A4XX_PC_PERFCTR_PC_SEL_0 0x00000d10
+#define REG_A4XX_PC_PERFCTR_PC_SEL_1 0x00000d11
+
+#define REG_A4XX_PC_PERFCTR_PC_SEL_2 0x00000d12
+
+#define REG_A4XX_PC_PERFCTR_PC_SEL_3 0x00000d13
+
+#define REG_A4XX_PC_PERFCTR_PC_SEL_4 0x00000d14
+
+#define REG_A4XX_PC_PERFCTR_PC_SEL_5 0x00000d15
+
+#define REG_A4XX_PC_PERFCTR_PC_SEL_6 0x00000d16
+
#define REG_A4XX_PC_PERFCTR_PC_SEL_7 0x00000d17
#define REG_A4XX_PC_BIN_BASE 0x000021c0
@@ -2564,7 +3701,20 @@ static inline uint32_t A4XX_PC_PRIM_VTX_CNTL_VAROUT(uint32_t val)
#define A4XX_PC_PRIM_VTX_CNTL_PROVOKING_VTX_LAST 0x02000000
#define A4XX_PC_PRIM_VTX_CNTL_PSIZE 0x04000000
-#define REG_A4XX_UNKNOWN_21C5 0x000021c5
+#define REG_A4XX_PC_PRIM_VTX_CNTL2 0x000021c5
+#define A4XX_PC_PRIM_VTX_CNTL2_POLYMODE_FRONT_PTYPE__MASK 0x00000007
+#define A4XX_PC_PRIM_VTX_CNTL2_POLYMODE_FRONT_PTYPE__SHIFT 0
+static inline uint32_t A4XX_PC_PRIM_VTX_CNTL2_POLYMODE_FRONT_PTYPE(enum adreno_pa_su_sc_draw val)
+{
+ return ((val) << A4XX_PC_PRIM_VTX_CNTL2_POLYMODE_FRONT_PTYPE__SHIFT) & A4XX_PC_PRIM_VTX_CNTL2_POLYMODE_FRONT_PTYPE__MASK;
+}
+#define A4XX_PC_PRIM_VTX_CNTL2_POLYMODE_BACK_PTYPE__MASK 0x00000038
+#define A4XX_PC_PRIM_VTX_CNTL2_POLYMODE_BACK_PTYPE__SHIFT 3
+static inline uint32_t A4XX_PC_PRIM_VTX_CNTL2_POLYMODE_BACK_PTYPE(enum adreno_pa_su_sc_draw val)
+{
+ return ((val) << A4XX_PC_PRIM_VTX_CNTL2_POLYMODE_BACK_PTYPE__SHIFT) & A4XX_PC_PRIM_VTX_CNTL2_POLYMODE_BACK_PTYPE__MASK;
+}
+#define A4XX_PC_PRIM_VTX_CNTL2_POLYMODE_ENABLE 0x00000040
#define REG_A4XX_PC_RESTART_INDEX 0x000021c6
@@ -2646,20 +3796,6 @@ static inline uint32_t A4XX_PC_HS_PARAM_PRIMTYPE(enum adreno_pa_su_sc_draw val)
#define REG_A4XX_UNKNOWN_20EF 0x000020ef
-#define REG_A4XX_UNKNOWN_20F0 0x000020f0
-
-#define REG_A4XX_UNKNOWN_20F1 0x000020f1
-
-#define REG_A4XX_UNKNOWN_20F2 0x000020f2
-
-#define REG_A4XX_UNKNOWN_20F7 0x000020f7
-#define A4XX_UNKNOWN_20F7__MASK 0xffffffff
-#define A4XX_UNKNOWN_20F7__SHIFT 0
-static inline uint32_t A4XX_UNKNOWN_20F7(float val)
-{
- return ((fui(val)) << A4XX_UNKNOWN_20F7__SHIFT) & A4XX_UNKNOWN_20F7__MASK;
-}
-
#define REG_A4XX_UNKNOWN_2152 0x00002152
#define REG_A4XX_UNKNOWN_2153 0x00002153
@@ -2720,6 +3856,12 @@ static inline uint32_t A4XX_TEX_SAMP_0_ANISO(enum a4xx_tex_aniso val)
{
return ((val) << A4XX_TEX_SAMP_0_ANISO__SHIFT) & A4XX_TEX_SAMP_0_ANISO__MASK;
}
+#define A4XX_TEX_SAMP_0_LOD_BIAS__MASK 0xfff80000
+#define A4XX_TEX_SAMP_0_LOD_BIAS__SHIFT 19
+static inline uint32_t A4XX_TEX_SAMP_0_LOD_BIAS(float val)
+{
+ return ((((int32_t)(val * 256.0))) << A4XX_TEX_SAMP_0_LOD_BIAS__SHIFT) & A4XX_TEX_SAMP_0_LOD_BIAS__MASK;
+}
#define REG_A4XX_TEX_SAMP_1 0x00000001
#define A4XX_TEX_SAMP_1_COMPARE_FUNC__MASK 0x0000000e
@@ -2728,6 +3870,7 @@ static inline uint32_t A4XX_TEX_SAMP_1_COMPARE_FUNC(enum adreno_compare_func val
{
return ((val) << A4XX_TEX_SAMP_1_COMPARE_FUNC__SHIFT) & A4XX_TEX_SAMP_1_COMPARE_FUNC__MASK;
}
+#define A4XX_TEX_SAMP_1_CUBEMAPSEAMLESSFILTOFF 0x00000010
#define A4XX_TEX_SAMP_1_UNNORM_COORDS 0x00000020
#define A4XX_TEX_SAMP_1_MIPFILTER_LINEAR_FAR 0x00000040
#define A4XX_TEX_SAMP_1_MAX_LOD__MASK 0x000fff00
@@ -2796,7 +3939,7 @@ static inline uint32_t A4XX_TEX_CONST_1_HEIGHT(uint32_t val)
{
return ((val) << A4XX_TEX_CONST_1_HEIGHT__SHIFT) & A4XX_TEX_CONST_1_HEIGHT__MASK;
}
-#define A4XX_TEX_CONST_1_WIDTH__MASK 0x1fff8000
+#define A4XX_TEX_CONST_1_WIDTH__MASK 0x3fff8000
#define A4XX_TEX_CONST_1_WIDTH__SHIFT 15
static inline uint32_t A4XX_TEX_CONST_1_WIDTH(uint32_t val)
{
diff --git a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
index a53f1be05f75..d0d3c7baa8fe 100644
--- a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
@@ -102,11 +102,17 @@ static void a4xx_enable_hwcg(struct msm_gpu *gpu)
gpu_write(gpu, REG_A4XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00000222);
gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL_HLSQ , 0x00000000);
gpu_write(gpu, REG_A4XX_RBBM_CLOCK_HYST_HLSQ, 0x00000000);
- gpu_write(gpu, REG_A4XX_RBBM_CLOCK_DELAY_HLSQ, 0x00020000);
- gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL, 0xAAAAAAAA);
+ gpu_write(gpu, REG_A4XX_RBBM_CLOCK_DELAY_HLSQ, 0x00220000);
+ /* Early A430's have a timing issue with SP/TP power collapse;
+ disabling HW clock gating prevents it. */
+ if (adreno_is_a430(adreno_gpu) && adreno_gpu->rev.patchid < 2)
+ gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL, 0);
+ else
+ gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL, 0xAAAAAAAA);
gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL2, 0);
}
+
static void a4xx_me_init(struct msm_gpu *gpu)
{
struct msm_ringbuffer *ring = gpu->rb;
@@ -141,7 +147,7 @@ static int a4xx_hw_init(struct msm_gpu *gpu)
uint32_t *ptr, len;
int i, ret;
- if (adreno_is_a4xx(adreno_gpu)) {
+ if (adreno_is_a420(adreno_gpu)) {
gpu_write(gpu, REG_A4XX_VBIF_ABIT_SORT, 0x0001001F);
gpu_write(gpu, REG_A4XX_VBIF_ABIT_SORT_CONF, 0x000000A4);
gpu_write(gpu, REG_A4XX_VBIF_GATE_OFF_WRREQ_EN, 0x00000001);
@@ -150,6 +156,13 @@ static int a4xx_hw_init(struct msm_gpu *gpu)
gpu_write(gpu, REG_A4XX_VBIF_IN_WR_LIM_CONF0, 0x18181818);
gpu_write(gpu, REG_A4XX_VBIF_IN_WR_LIM_CONF1, 0x00000018);
gpu_write(gpu, REG_A4XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x00000003);
+ } else if (adreno_is_a430(adreno_gpu)) {
+ gpu_write(gpu, REG_A4XX_VBIF_GATE_OFF_WRREQ_EN, 0x00000001);
+ gpu_write(gpu, REG_A4XX_VBIF_IN_RD_LIM_CONF0, 0x18181818);
+ gpu_write(gpu, REG_A4XX_VBIF_IN_RD_LIM_CONF1, 0x00000018);
+ gpu_write(gpu, REG_A4XX_VBIF_IN_WR_LIM_CONF0, 0x18181818);
+ gpu_write(gpu, REG_A4XX_VBIF_IN_WR_LIM_CONF1, 0x00000018);
+ gpu_write(gpu, REG_A4XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x00000003);
} else {
BUG();
}
@@ -161,6 +174,10 @@ static int a4xx_hw_init(struct msm_gpu *gpu)
gpu_write(gpu, REG_A4XX_RBBM_SP_HYST_CNT, 0x10);
gpu_write(gpu, REG_A4XX_RBBM_WAIT_IDLE_CLOCKS_CTL, 0x10);
+ if (adreno_is_a430(adreno_gpu)) {
+ gpu_write(gpu, REG_A4XX_RBBM_WAIT_IDLE_CLOCKS_CTL2, 0x30);
+ }
+
/* Enable the RBBM error reporting bits */
gpu_write(gpu, REG_A4XX_RBBM_AHB_CTL0, 0x00000001);
@@ -183,6 +200,14 @@ static int a4xx_hw_init(struct msm_gpu *gpu)
/* Turn on performance counters: */
gpu_write(gpu, REG_A4XX_RBBM_PERFCTR_CTL, 0x01);
+ /* use the first CP counter for timestamp queries.. userspace may set
+ * this as well but it selects the same counter/countable:
+ */
+ gpu_write(gpu, REG_A4XX_CP_PERFCTR_CP_SEL_0, CP_ALWAYS_COUNT);
+
+ if (adreno_is_a430(adreno_gpu))
+ gpu_write(gpu, REG_A4XX_UCHE_CACHE_WAYS_VFD, 0x07);
+
/* Disable L2 bypass to avoid UCHE out of bounds errors */
gpu_write(gpu, REG_A4XX_UCHE_TRAP_BASE_LO, 0xffff0000);
gpu_write(gpu, REG_A4XX_UCHE_TRAP_BASE_HI, 0xffff0000);
@@ -190,6 +215,15 @@ static int a4xx_hw_init(struct msm_gpu *gpu)
gpu_write(gpu, REG_A4XX_CP_DEBUG, (1 << 25) |
(adreno_is_a420(adreno_gpu) ? (1 << 29) : 0));
+ /* On A430 enable SP regfile sleep for power savings */
+ /* TODO downstream does this for !420, so maybe applies for 405 too? */
+ if (!adreno_is_a420(adreno_gpu)) {
+ gpu_write(gpu, REG_A4XX_RBBM_SP_REGFILE_SLEEP_CNTL_0,
+ 0x00000441);
+ gpu_write(gpu, REG_A4XX_RBBM_SP_REGFILE_SLEEP_CNTL_1,
+ 0x00000441);
+ }
+
a4xx_enable_hwcg(gpu);
/*
@@ -204,10 +238,6 @@ static int a4xx_hw_init(struct msm_gpu *gpu)
gpu_write(gpu, REG_A4XX_RBBM_CLOCK_DELAY_HLSQ, val);
}
- ret = adreno_hw_init(gpu);
- if (ret)
- return ret;
-
/* setup access protection: */
gpu_write(gpu, REG_A4XX_CP_PROTECT_CTRL, 0x00000007);
@@ -263,6 +293,7 @@ static int a4xx_hw_init(struct msm_gpu *gpu)
gpu_write(gpu, REG_A4XX_CP_ME_CNTL, 0);
a4xx_me_init(gpu);
+
return 0;
}
@@ -317,6 +348,13 @@ static irqreturn_t a4xx_irq(struct msm_gpu *gpu)
status = gpu_read(gpu, REG_A4XX_RBBM_INT_0_STATUS);
DBG("%s: Int status %08x", gpu->name, status);
+ if (status & A4XX_INT0_CP_REG_PROTECT_FAULT) {
+ uint32_t reg = gpu_read(gpu, REG_A4XX_CP_PROTECT_STATUS);
+ printk("CP | Protected mode error| %s | addr=%x\n",
+ reg & (1 << 24) ? "WRITE" : "READ",
+ (reg & 0xFFFFF) >> 2);
+ }
+
gpu_write(gpu, REG_A4XX_RBBM_INT_CLEAR_CMD, status);
msm_gpu_retire(gpu);
@@ -512,12 +550,63 @@ static void a4xx_dump(struct msm_gpu *gpu)
adreno_dump(gpu);
}
+static int a4xx_pm_resume(struct msm_gpu *gpu) {
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ int ret;
+
+ ret = msm_gpu_pm_resume(gpu);
+ if (ret)
+ return ret;
+
+ if (adreno_is_a430(adreno_gpu)) {
+ unsigned int reg;
+ /* Set the default register values; set SW_COLLAPSE to 0 */
+ gpu_write(gpu, REG_A4XX_RBBM_POWER_CNTL_IP, 0x778000);
+ do {
+ udelay(5);
+ reg = gpu_read(gpu, REG_A4XX_RBBM_POWER_STATUS);
+ } while (!(reg & A4XX_RBBM_POWER_CNTL_IP_SP_TP_PWR_ON));
+ }
+ return 0;
+}
+
+static int a4xx_pm_suspend(struct msm_gpu *gpu) {
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ int ret;
+
+ ret = msm_gpu_pm_suspend(gpu);
+ if (ret)
+ return ret;
+
+ if (adreno_is_a430(adreno_gpu)) {
+ /* Set the default register values; set SW_COLLAPSE to 1 */
+ gpu_write(gpu, REG_A4XX_RBBM_POWER_CNTL_IP, 0x778001);
+ }
+ return 0;
+}
+
+static int a4xx_get_timestamp(struct msm_gpu *gpu, uint64_t *value)
+{
+ uint32_t hi, lo, tmp;
+
+ tmp = gpu_read(gpu, REG_A4XX_RBBM_PERFCTR_CP_0_HI);
+ do {
+ hi = tmp;
+ lo = gpu_read(gpu, REG_A4XX_RBBM_PERFCTR_CP_0_LO);
+ tmp = gpu_read(gpu, REG_A4XX_RBBM_PERFCTR_CP_0_HI);
+ } while (tmp != hi);
+
+ *value = (((uint64_t)hi) << 32) | lo;
+
+ return 0;
+}
+
static const struct adreno_gpu_funcs funcs = {
.base = {
.get_param = adreno_get_param,
.hw_init = a4xx_hw_init,
- .pm_suspend = msm_gpu_pm_suspend,
- .pm_resume = msm_gpu_pm_resume,
+ .pm_suspend = a4xx_pm_suspend,
+ .pm_resume = a4xx_pm_resume,
.recover = a4xx_recover,
.last_fence = adreno_last_fence,
.submit = adreno_submit,
@@ -529,6 +618,7 @@ static const struct adreno_gpu_funcs funcs = {
.show = a4xx_show,
#endif
},
+ .get_timestamp = a4xx_get_timestamp,
};
struct msm_gpu *a4xx_gpu_init(struct drm_device *dev)
diff --git a/drivers/gpu/drm/msm/adreno/adreno_common.xml.h b/drivers/gpu/drm/msm/adreno/adreno_common.xml.h
index c304468cf2bd..e81481d1b7df 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_common.xml.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_common.xml.h
@@ -9,16 +9,17 @@ git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 398 bytes, from 2015-09-24 17:25:31)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2016-02-10 17:07:21)
- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2015-05-20 20:03:14)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 10755 bytes, from 2015-09-14 20:46:55)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 14968 bytes, from 2015-05-20 20:12:27)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 67771 bytes, from 2015-09-14 20:46:55)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 63970 bytes, from 2015-09-14 20:50:12)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 11518 bytes, from 2016-02-10 21:03:25)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 16166 bytes, from 2016-02-11 21:20:31)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 83967 bytes, from 2016-02-10 17:07:21)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 109916 bytes, from 2016-02-20 18:44:48)
- /home/robclark/src/freedreno/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2015-09-24 17:30:00)
-Copyright (C) 2013-2015 by the following authors:
+Copyright (C) 2013-2016 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
+- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
@@ -119,6 +120,23 @@ enum adreno_rb_copy_control_mode {
RB_COPY_DEPTH_STENCIL = 5,
};
+enum a3xx_rop_code {
+ ROP_CLEAR = 0,
+ ROP_NOR = 1,
+ ROP_AND_INVERTED = 2,
+ ROP_COPY_INVERTED = 3,
+ ROP_AND_REVERSE = 4,
+ ROP_INVERT = 5,
+ ROP_NAND = 7,
+ ROP_AND = 8,
+ ROP_EQUIV = 9,
+ ROP_NOOP = 10,
+ ROP_OR_INVERTED = 11,
+ ROP_OR_REVERSE = 13,
+ ROP_OR = 14,
+ ROP_SET = 15,
+};
+
enum a3xx_render_mode {
RB_RENDERING_PASS = 0,
RB_TILING_PASS = 1,
diff --git a/drivers/gpu/drm/msm/adreno/adreno_device.c b/drivers/gpu/drm/msm/adreno/adreno_device.c
index 950d27d26b30..5127b75dbf40 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_device.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_device.c
@@ -69,6 +69,14 @@ static const struct adreno_info gpulist[] = {
.pfpfw = "a420_pfp.fw",
.gmem = (SZ_1M + SZ_512K),
.init = a4xx_gpu_init,
+ }, {
+ .rev = ADRENO_REV(4, 3, 0, ANY_ID),
+ .revn = 430,
+ .name = "A430",
+ .pm4fw = "a420_pm4.fw",
+ .pfpfw = "a420_pfp.fw",
+ .gmem = (SZ_1M + SZ_512K),
+ .init = a4xx_gpu_init,
},
};
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index a3b54cc76495..4951172ede06 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -41,6 +41,13 @@ int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value)
(adreno_gpu->rev.major << 16) |
(adreno_gpu->rev.core << 24);
return 0;
+ case MSM_PARAM_MAX_FREQ:
+ *value = adreno_gpu->base.fast_rate;
+ return 0;
+ case MSM_PARAM_TIMESTAMP:
+ if (adreno_gpu->funcs->get_timestamp)
+ return adreno_gpu->funcs->get_timestamp(gpu, value);
+ return -EINVAL;
default:
DBG("%s: invalid param: %u", gpu->name, param);
return -EINVAL;
@@ -68,18 +75,15 @@ int adreno_hw_init(struct msm_gpu *gpu)
adreno_gpu_write(adreno_gpu, REG_ADRENO_CP_RB_CNTL,
/* size is log2(quad-words): */
AXXX_CP_RB_CNTL_BUFSZ(ilog2(gpu->rb->size / 8)) |
- AXXX_CP_RB_CNTL_BLKSZ(ilog2(RB_BLKSIZE / 8)));
+ AXXX_CP_RB_CNTL_BLKSZ(ilog2(RB_BLKSIZE / 8)) |
+ (adreno_is_a430(adreno_gpu) ? AXXX_CP_RB_CNTL_NO_UPDATE : 0));
/* Setup ringbuffer address: */
adreno_gpu_write(adreno_gpu, REG_ADRENO_CP_RB_BASE, gpu->rb_iova);
- adreno_gpu_write(adreno_gpu, REG_ADRENO_CP_RB_RPTR_ADDR,
- rbmemptr(adreno_gpu, rptr));
-
- /* Setup scratch/timestamp: */
- adreno_gpu_write(adreno_gpu, REG_ADRENO_SCRATCH_ADDR,
- rbmemptr(adreno_gpu, fence));
- adreno_gpu_write(adreno_gpu, REG_ADRENO_SCRATCH_UMSK, 0x1);
+ if (!adreno_is_a430(adreno_gpu))
+ adreno_gpu_write(adreno_gpu, REG_ADRENO_CP_RB_RPTR_ADDR,
+ rbmemptr(adreno_gpu, rptr));
return 0;
}
@@ -89,6 +93,16 @@ static uint32_t get_wptr(struct msm_ringbuffer *ring)
return ring->cur - ring->start;
}
+/* Use this helper to read rptr, since a430 doesn't update rptr in memory */
+static uint32_t get_rptr(struct adreno_gpu *adreno_gpu)
+{
+ if (adreno_is_a430(adreno_gpu))
+ return adreno_gpu->memptrs->rptr = adreno_gpu_read(
+ adreno_gpu, REG_ADRENO_CP_RB_RPTR);
+ else
+ return adreno_gpu->memptrs->rptr;
+}
+
uint32_t adreno_last_fence(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
@@ -137,7 +151,8 @@ int adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
if (priv->lastctx == ctx)
break;
case MSM_SUBMIT_CMD_BUF:
- OUT_PKT3(ring, CP_INDIRECT_BUFFER_PFD, 2);
+ OUT_PKT3(ring, adreno_is_a430(adreno_gpu) ?
+ CP_INDIRECT_BUFFER_PFE : CP_INDIRECT_BUFFER_PFD, 2);
OUT_RING(ring, submit->cmd[i].iova);
OUT_RING(ring, submit->cmd[i].size);
ibs++;
@@ -216,9 +231,12 @@ void adreno_idle(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
uint32_t wptr = get_wptr(gpu->rb);
+ int ret;
/* wait for CP to drain ringbuffer: */
- if (spin_until(adreno_gpu->memptrs->rptr == wptr))
+ ret = spin_until(get_rptr(adreno_gpu) == wptr);
+
+ if (ret)
DRM_ERROR("%s: timeout waiting to drain ringbuffer!\n", gpu->name);
/* TODO maybe we need to reset GPU here to recover from hang? */
@@ -237,7 +255,7 @@ void adreno_show(struct msm_gpu *gpu, struct seq_file *m)
seq_printf(m, "fence: %d/%d\n", adreno_gpu->memptrs->fence,
gpu->submitted_fence);
- seq_printf(m, "rptr: %d\n", adreno_gpu->memptrs->rptr);
+ seq_printf(m, "rptr: %d\n", get_rptr(adreno_gpu));
seq_printf(m, "wptr: %d\n", adreno_gpu->memptrs->wptr);
seq_printf(m, "rb wptr: %d\n", get_wptr(gpu->rb));
@@ -278,7 +296,7 @@ void adreno_dump_info(struct msm_gpu *gpu)
printk("fence: %d/%d\n", adreno_gpu->memptrs->fence,
gpu->submitted_fence);
- printk("rptr: %d\n", adreno_gpu->memptrs->rptr);
+ printk("rptr: %d\n", get_rptr(adreno_gpu));
printk("wptr: %d\n", adreno_gpu->memptrs->wptr);
printk("rb wptr: %d\n", get_wptr(gpu->rb));
@@ -313,7 +331,7 @@ static uint32_t ring_freewords(struct msm_gpu *gpu)
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
uint32_t size = gpu->rb->size / 4;
uint32_t wptr = get_wptr(gpu->rb);
- uint32_t rptr = adreno_gpu->memptrs->rptr;
+ uint32_t rptr = get_rptr(adreno_gpu);
return (rptr + (size - 1) - wptr) % size;
}
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.h b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
index 0a312e9d3afd..1d07511f4d22 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
@@ -114,6 +114,7 @@ struct adreno_rev {
struct adreno_gpu_funcs {
struct msm_gpu_funcs base;
+ int (*get_timestamp)(struct msm_gpu *gpu, uint64_t *value);
};
struct adreno_info {
@@ -228,6 +229,11 @@ static inline int adreno_is_a420(struct adreno_gpu *gpu)
return gpu->revn == 420;
}
+static inline int adreno_is_a430(struct adreno_gpu *gpu)
+{
+ return gpu->revn == 430;
+}
+
int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value);
int adreno_hw_init(struct msm_gpu *gpu);
uint32_t adreno_last_fence(struct msm_gpu *gpu);
diff --git a/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h b/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h
index a22fef569499..d7477ff867c9 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h
@@ -9,16 +9,17 @@ git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 398 bytes, from 2015-09-24 17:25:31)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2016-02-10 17:07:21)
- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2015-05-20 20:03:14)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 10755 bytes, from 2015-09-14 20:46:55)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 14968 bytes, from 2015-05-20 20:12:27)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 67771 bytes, from 2015-09-14 20:46:55)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 63970 bytes, from 2015-09-14 20:50:12)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 11518 bytes, from 2016-02-10 21:03:25)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 16166 bytes, from 2016-02-11 21:20:31)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 83967 bytes, from 2016-02-10 17:07:21)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 109916 bytes, from 2016-02-20 18:44:48)
- /home/robclark/src/freedreno/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2015-09-24 17:30:00)
-Copyright (C) 2013-2015 by the following authors:
+Copyright (C) 2013-2016 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
+- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
@@ -172,6 +173,11 @@ enum adreno_pm4_type3_packets {
CP_UNKNOWN_1A = 26,
CP_UNKNOWN_4E = 78,
CP_WIDE_REG_WRITE = 116,
+ CP_SCRATCH_TO_REG = 77,
+ CP_REG_TO_SCRATCH = 74,
+ CP_WAIT_MEM_WRITES = 18,
+ CP_COND_REG_EXEC = 71,
+ CP_MEM_TO_REG = 66,
IN_IB_PREFETCH_END = 23,
IN_SUBBLK_PREFETCH = 31,
IN_INSTR_PREFETCH = 32,
@@ -199,7 +205,11 @@ enum adreno_state_type {
enum adreno_state_src {
SS_DIRECT = 0,
+ SS_INVALID_ALL_IC = 2,
+ SS_INVALID_PART_IC = 3,
SS_INDIRECT = 4,
+ SS_INDIRECT_TCM = 5,
+ SS_INDIRECT_STM = 6,
};
enum a4xx_index_size {
@@ -227,7 +237,7 @@ static inline uint32_t CP_LOAD_STATE_0_STATE_BLOCK(enum adreno_state_block val)
{
return ((val) << CP_LOAD_STATE_0_STATE_BLOCK__SHIFT) & CP_LOAD_STATE_0_STATE_BLOCK__MASK;
}
-#define CP_LOAD_STATE_0_NUM_UNIT__MASK 0x7fc00000
+#define CP_LOAD_STATE_0_NUM_UNIT__MASK 0xffc00000
#define CP_LOAD_STATE_0_NUM_UNIT__SHIFT 22
static inline uint32_t CP_LOAD_STATE_0_NUM_UNIT(uint32_t val)
{
@@ -499,5 +509,29 @@ static inline uint32_t CP_SET_BIN_DATA_1_BIN_SIZE_ADDRESS(uint32_t val)
return ((val) << CP_SET_BIN_DATA_1_BIN_SIZE_ADDRESS__SHIFT) & CP_SET_BIN_DATA_1_BIN_SIZE_ADDRESS__MASK;
}
+#define REG_CP_REG_TO_MEM_0 0x00000000
+#define CP_REG_TO_MEM_0_REG__MASK 0x0000ffff
+#define CP_REG_TO_MEM_0_REG__SHIFT 0
+static inline uint32_t CP_REG_TO_MEM_0_REG(uint32_t val)
+{
+ return ((val) << CP_REG_TO_MEM_0_REG__SHIFT) & CP_REG_TO_MEM_0_REG__MASK;
+}
+#define CP_REG_TO_MEM_0_CNT__MASK 0x3ff80000
+#define CP_REG_TO_MEM_0_CNT__SHIFT 19
+static inline uint32_t CP_REG_TO_MEM_0_CNT(uint32_t val)
+{
+ return ((val) << CP_REG_TO_MEM_0_CNT__SHIFT) & CP_REG_TO_MEM_0_CNT__MASK;
+}
+#define CP_REG_TO_MEM_0_64B 0x40000000
+#define CP_REG_TO_MEM_0_ACCUMULATE 0x80000000
+
+#define REG_CP_REG_TO_MEM_1 0x00000001
+#define CP_REG_TO_MEM_1_DEST__MASK 0xffffffff
+#define CP_REG_TO_MEM_1_DEST__SHIFT 0
+static inline uint32_t CP_REG_TO_MEM_1_DEST(uint32_t val)
+{
+ return ((val) << CP_REG_TO_MEM_1_DEST__SHIFT) & CP_REG_TO_MEM_1_DEST__MASK;
+}
+
#endif /* ADRENO_PM4_XML */
diff --git a/drivers/gpu/drm/msm/dsi/dsi.xml.h b/drivers/gpu/drm/msm/dsi/dsi.xml.h
index b2b5f3dd1b4c..4958594d5266 100644
--- a/drivers/gpu/drm/msm/dsi/dsi.xml.h
+++ b/drivers/gpu/drm/msm/dsi/dsi.xml.h
@@ -9,7 +9,7 @@ git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2015-05-20 20:03:14)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2016-02-10 17:07:21)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-05-20 20:03:14)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2015-09-18 12:07:28)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 37194 bytes, from 2015-09-18 12:07:28)
@@ -17,11 +17,12 @@ The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2015-10-22 16:35:02)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2015-05-20 20:03:14)
- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2015-05-20 20:03:07)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 29154 bytes, from 2015-08-10 21:25:43)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 41472 bytes, from 2016-01-22 18:18:18)
- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2015-05-20 20:03:14)
Copyright (C) 2013-2015 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
+- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
diff --git a/drivers/gpu/drm/msm/dsi/dsi_cfg.c b/drivers/gpu/drm/msm/dsi/dsi_cfg.c
index 2a827d8093a2..e58e9b91b34d 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_cfg.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_cfg.c
@@ -57,10 +57,9 @@ static const char * const dsi_8916_bus_clk_names[] = {
static const struct msm_dsi_config msm8916_dsi_cfg = {
.io_offset = DSI_6G_REG_SHIFT,
.reg_cfg = {
- .num = 4,
+ .num = 3,
.regs = {
{"gdsc", -1, -1, -1, -1},
- {"vdd", 2850000, 2850000, 100000, 100},
{"vdda", 1200000, 1200000, 100000, 100},
{"vddio", 1800000, 1800000, 100000, 100},
},
diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c
index 48f9967b4a1b..4282ec6bbaaf 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_host.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_host.c
@@ -163,6 +163,10 @@ struct msm_dsi_host {
enum mipi_dsi_pixel_format format;
unsigned long mode_flags;
+ /* lane data parsed via DT */
+ int dlane_swap;
+ int num_data_lanes;
+
u32 dma_cmd_ctrl_restore;
bool registered;
@@ -845,19 +849,10 @@ static void dsi_ctrl_config(struct msm_dsi_host *msm_host, bool enable,
data = DSI_CTRL_CLK_EN;
DBG("lane number=%d", msm_host->lanes);
- if (msm_host->lanes == 2) {
- data |= DSI_CTRL_LANE1 | DSI_CTRL_LANE2;
- /* swap lanes for 2-lane panel for better performance */
- dsi_write(msm_host, REG_DSI_LANE_SWAP_CTRL,
- DSI_LANE_SWAP_CTRL_DLN_SWAP_SEL(LANE_SWAP_1230));
- } else {
- /* Take 4 lanes as default */
- data |= DSI_CTRL_LANE0 | DSI_CTRL_LANE1 | DSI_CTRL_LANE2 |
- DSI_CTRL_LANE3;
- /* Do not swap lanes for 4-lane panel */
- dsi_write(msm_host, REG_DSI_LANE_SWAP_CTRL,
- DSI_LANE_SWAP_CTRL_DLN_SWAP_SEL(LANE_SWAP_0123));
- }
+ data |= ((DSI_CTRL_LANE0 << msm_host->lanes) - DSI_CTRL_LANE0);
+
+ dsi_write(msm_host, REG_DSI_LANE_SWAP_CTRL,
+ DSI_LANE_SWAP_CTRL_DLN_SWAP_SEL(msm_host->dlane_swap));
if (!(flags & MIPI_DSI_CLOCK_NON_CONTINUOUS))
dsi_write(msm_host, REG_DSI_LANE_CTRL,
@@ -1479,13 +1474,14 @@ static int dsi_host_attach(struct mipi_dsi_host *host,
struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
int ret;
+ if (dsi->lanes > msm_host->num_data_lanes)
+ return -EINVAL;
+
msm_host->channel = dsi->channel;
msm_host->lanes = dsi->lanes;
msm_host->format = dsi->format;
msm_host->mode_flags = dsi->mode_flags;
- WARN_ON(dsi->dev.of_node != msm_host->device_node);
-
/* Some gpios defined in panel DT need to be controlled by host */
ret = dsi_host_init_panel_gpios(msm_host, &dsi->dev);
if (ret)
@@ -1534,6 +1530,75 @@ static struct mipi_dsi_host_ops dsi_host_ops = {
.transfer = dsi_host_transfer,
};
+/*
+ * List of supported physical to logical lane mappings.
+ * For example, the 2nd entry represents the following mapping:
+ *
+ * "3012": Logic 3->Phys 0; Logic 0->Phys 1; Logic 1->Phys 2; Logic 2->Phys 3;
+ */
+static const int supported_data_lane_swaps[][4] = {
+ { 0, 1, 2, 3 },
+ { 3, 0, 1, 2 },
+ { 2, 3, 0, 1 },
+ { 1, 2, 3, 0 },
+ { 0, 3, 2, 1 },
+ { 1, 0, 3, 2 },
+ { 2, 1, 0, 3 },
+ { 3, 2, 1, 0 },
+};
+
+static int dsi_host_parse_lane_data(struct msm_dsi_host *msm_host,
+ struct device_node *ep)
+{
+ struct device *dev = &msm_host->pdev->dev;
+ struct property *prop;
+ u32 lane_map[4];
+ int ret, i, len, num_lanes;
+
+ prop = of_find_property(ep, "qcom,data-lane-map", &len);
+ if (!prop) {
+ dev_dbg(dev, "failed to find data lane mapping\n");
+ return -EINVAL;
+ }
+
+ num_lanes = len / sizeof(u32);
+
+ if (num_lanes < 1 || num_lanes > 4) {
+ dev_err(dev, "bad number of data lanes\n");
+ return -EINVAL;
+ }
+
+ msm_host->num_data_lanes = num_lanes;
+
+ ret = of_property_read_u32_array(ep, "qcom,data-lane-map", lane_map,
+ num_lanes);
+ if (ret) {
+ dev_err(dev, "failed to read lane data\n");
+ return ret;
+ }
+
+ /*
+ * compare DT specified physical-logical lane mappings with the ones
+ * supported by hardware
+ */
+ for (i = 0; i < ARRAY_SIZE(supported_data_lane_swaps); i++) {
+ const int *swap = supported_data_lane_swaps[i];
+ int j;
+
+ for (j = 0; j < num_lanes; j++) {
+ if (swap[j] != lane_map[j])
+ break;
+ }
+
+ if (j == num_lanes) {
+ msm_host->dlane_swap = i;
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
static int dsi_host_parse_dt(struct msm_dsi_host *msm_host)
{
struct device *dev = &msm_host->pdev->dev;
@@ -1560,17 +1625,21 @@ static int dsi_host_parse_dt(struct msm_dsi_host *msm_host)
return 0;
}
+ ret = dsi_host_parse_lane_data(msm_host, endpoint);
+ if (ret) {
+ dev_err(dev, "%s: invalid lane configuration %d\n",
+ __func__, ret);
+ goto err;
+ }
+
/* Get panel node from the output port's endpoint data */
device_node = of_graph_get_remote_port_parent(endpoint);
if (!device_node) {
dev_err(dev, "%s: no valid device\n", __func__);
- of_node_put(endpoint);
- return -ENODEV;
+ ret = -ENODEV;
+ goto err;
}
- of_node_put(endpoint);
- of_node_put(device_node);
-
msm_host->device_node = device_node;
if (of_property_read_bool(np, "syscon-sfpb")) {
@@ -1579,11 +1648,16 @@ static int dsi_host_parse_dt(struct msm_dsi_host *msm_host)
if (IS_ERR(msm_host->sfpb)) {
dev_err(dev, "%s: failed to get sfpb regmap\n",
__func__);
- return PTR_ERR(msm_host->sfpb);
+ ret = PTR_ERR(msm_host->sfpb);
}
}
- return 0;
+ of_node_put(device_node);
+
+err:
+ of_node_put(endpoint);
+
+ return ret;
}
int msm_dsi_host_init(struct msm_dsi *msm_dsi)
diff --git a/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h b/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h
index 80ec65e47468..2d999494cdea 100644
--- a/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h
+++ b/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h
@@ -9,7 +9,7 @@ git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2015-05-20 20:03:14)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2016-02-10 17:07:21)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-05-20 20:03:14)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2015-09-18 12:07:28)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 37194 bytes, from 2015-09-18 12:07:28)
@@ -17,11 +17,12 @@ The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2015-10-22 16:35:02)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2015-05-20 20:03:14)
- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2015-05-20 20:03:07)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 29154 bytes, from 2015-08-10 21:25:43)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 41472 bytes, from 2016-01-22 18:18:18)
- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2015-05-20 20:03:14)
Copyright (C) 2013-2015 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
+- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll.h b/drivers/gpu/drm/msm/dsi/pll/dsi_pll.h
index 80b6038334a6..2cf1664723e8 100644
--- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll.h
+++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll.h
@@ -97,8 +97,8 @@ static inline struct msm_dsi_pll *msm_dsi_pll_28nm_init(
struct msm_dsi_pll *msm_dsi_pll_28nm_8960_init(struct platform_device *pdev,
int id);
#else
-struct msm_dsi_pll *msm_dsi_pll_28nm_8960_init(struct platform_device *pdev,
- int id)
+static inline struct msm_dsi_pll *msm_dsi_pll_28nm_8960_init(
+ struct platform_device *pdev, int id)
{
return ERR_PTR(-ENODEV);
}
diff --git a/drivers/gpu/drm/msm/dsi/sfpb.xml.h b/drivers/gpu/drm/msm/dsi/sfpb.xml.h
index 7d7662e69e11..506434fac993 100644
--- a/drivers/gpu/drm/msm/dsi/sfpb.xml.h
+++ b/drivers/gpu/drm/msm/dsi/sfpb.xml.h
@@ -9,7 +9,7 @@ git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2015-05-20 20:03:14)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2016-02-10 17:07:21)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-05-20 20:03:14)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2015-09-18 12:07:28)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 37194 bytes, from 2015-09-18 12:07:28)
@@ -17,11 +17,12 @@ The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2015-10-22 16:35:02)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2015-05-20 20:03:14)
- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2015-05-20 20:03:07)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 29154 bytes, from 2015-08-10 21:25:43)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 41472 bytes, from 2016-01-22 18:18:18)
- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2015-05-20 20:03:14)
Copyright (C) 2013-2015 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
+- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
diff --git a/drivers/gpu/drm/msm/edp/edp.xml.h b/drivers/gpu/drm/msm/edp/edp.xml.h
index 90bf5ed46746..f1072c18c81e 100644
--- a/drivers/gpu/drm/msm/edp/edp.xml.h
+++ b/drivers/gpu/drm/msm/edp/edp.xml.h
@@ -9,7 +9,7 @@ git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2015-05-20 20:03:14)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2016-02-10 17:07:21)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-05-20 20:03:14)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2015-09-18 12:07:28)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 37194 bytes, from 2015-09-18 12:07:28)
@@ -17,11 +17,12 @@ The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2015-10-22 16:35:02)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2015-05-20 20:03:14)
- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2015-05-20 20:03:07)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 29154 bytes, from 2015-08-10 21:25:43)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 41472 bytes, from 2016-01-22 18:18:18)
- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2015-05-20 20:03:14)
Copyright (C) 2013-2015 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
+- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.c b/drivers/gpu/drm/msm/hdmi/hdmi.c
index 9a0989c0b4de..51b9ea552f97 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.c
@@ -21,7 +21,7 @@
#include "hdmi.h"
-void hdmi_set_mode(struct hdmi *hdmi, bool power_on)
+void msm_hdmi_set_mode(struct hdmi *hdmi, bool power_on)
{
uint32_t ctrl = 0;
unsigned long flags;
@@ -46,29 +46,27 @@ void hdmi_set_mode(struct hdmi *hdmi, bool power_on)
power_on ? "Enable" : "Disable", ctrl);
}
-static irqreturn_t hdmi_irq(int irq, void *dev_id)
+static irqreturn_t msm_hdmi_irq(int irq, void *dev_id)
{
struct hdmi *hdmi = dev_id;
/* Process HPD: */
- hdmi_connector_irq(hdmi->connector);
+ msm_hdmi_connector_irq(hdmi->connector);
/* Process DDC: */
- hdmi_i2c_irq(hdmi->i2c);
+ msm_hdmi_i2c_irq(hdmi->i2c);
/* Process HDCP: */
if (hdmi->hdcp_ctrl)
- hdmi_hdcp_irq(hdmi->hdcp_ctrl);
+ msm_hdmi_hdcp_irq(hdmi->hdcp_ctrl);
/* TODO audio.. */
return IRQ_HANDLED;
}
-static void hdmi_destroy(struct hdmi *hdmi)
+static void msm_hdmi_destroy(struct hdmi *hdmi)
{
- struct hdmi_phy *phy = hdmi->phy;
-
/*
* at this point, hpd has been disabled,
* after flush workq, it's safe to deinit hdcp
@@ -77,21 +75,53 @@ static void hdmi_destroy(struct hdmi *hdmi)
flush_workqueue(hdmi->workq);
destroy_workqueue(hdmi->workq);
}
- hdmi_hdcp_destroy(hdmi);
- if (phy)
- phy->funcs->destroy(phy);
+ msm_hdmi_hdcp_destroy(hdmi);
+
+ if (hdmi->phy_dev) {
+ put_device(hdmi->phy_dev);
+ hdmi->phy = NULL;
+ hdmi->phy_dev = NULL;
+ }
if (hdmi->i2c)
- hdmi_i2c_destroy(hdmi->i2c);
+ msm_hdmi_i2c_destroy(hdmi->i2c);
platform_set_drvdata(hdmi->pdev, NULL);
}
+static int msm_hdmi_get_phy(struct hdmi *hdmi)
+{
+ struct platform_device *pdev = hdmi->pdev;
+ struct platform_device *phy_pdev;
+ struct device_node *phy_node;
+
+ phy_node = of_parse_phandle(pdev->dev.of_node, "phys", 0);
+ if (!phy_node) {
+ dev_err(&pdev->dev, "cannot find phy device\n");
+ return -ENXIO;
+ }
+
+ phy_pdev = of_find_device_by_node(phy_node);
+ if (phy_pdev)
+ hdmi->phy = platform_get_drvdata(phy_pdev);
+
+ of_node_put(phy_node);
+
+ if (!phy_pdev || !hdmi->phy) {
+ dev_err(&pdev->dev, "phy driver is not ready\n");
+ return -EPROBE_DEFER;
+ }
+
+ hdmi->phy_dev = get_device(&phy_pdev->dev);
+
+ return 0;
+}
+
/* construct hdmi at bind/probe time, grab all the resources. If
* we are to EPROBE_DEFER we want to do it here, rather than later
* at modeset_init() time
*/
-static struct hdmi *hdmi_init(struct platform_device *pdev)
+static struct hdmi *msm_hdmi_init(struct platform_device *pdev)
{
struct hdmi_platform_config *config = pdev->dev.platform_data;
struct hdmi *hdmi = NULL;
@@ -108,18 +138,6 @@ static struct hdmi *hdmi_init(struct platform_device *pdev)
hdmi->config = config;
spin_lock_init(&hdmi->reg_lock);
- /* not sure about which phy maps to which msm.. probably I miss some */
- if (config->phy_init) {
- hdmi->phy = config->phy_init(hdmi);
-
- if (IS_ERR(hdmi->phy)) {
- ret = PTR_ERR(hdmi->phy);
- dev_err(&pdev->dev, "failed to load phy: %d\n", ret);
- hdmi->phy = NULL;
- goto fail;
- }
- }
-
hdmi->mmio = msm_ioremap(pdev, config->mmio_name, "HDMI");
if (IS_ERR(hdmi->mmio)) {
ret = PTR_ERR(hdmi->mmio);
@@ -222,7 +240,7 @@ static struct hdmi *hdmi_init(struct platform_device *pdev)
hdmi->workq = alloc_ordered_workqueue("msm_hdmi", 0);
- hdmi->i2c = hdmi_i2c_init(hdmi);
+ hdmi->i2c = msm_hdmi_i2c_init(hdmi);
if (IS_ERR(hdmi->i2c)) {
ret = PTR_ERR(hdmi->i2c);
dev_err(&pdev->dev, "failed to get i2c: %d\n", ret);
@@ -230,7 +248,13 @@ static struct hdmi *hdmi_init(struct platform_device *pdev)
goto fail;
}
- hdmi->hdcp_ctrl = hdmi_hdcp_init(hdmi);
+ ret = msm_hdmi_get_phy(hdmi);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to get phy\n");
+ goto fail;
+ }
+
+ hdmi->hdcp_ctrl = msm_hdmi_hdcp_init(hdmi);
if (IS_ERR(hdmi->hdcp_ctrl)) {
dev_warn(&pdev->dev, "failed to init hdcp: disabled\n");
hdmi->hdcp_ctrl = NULL;
@@ -240,7 +264,7 @@ static struct hdmi *hdmi_init(struct platform_device *pdev)
fail:
if (hdmi)
- hdmi_destroy(hdmi);
+ msm_hdmi_destroy(hdmi);
return ERR_PTR(ret);
}
@@ -250,10 +274,10 @@ fail:
* driver (not hdmi sub-device's probe/bind!)
*
* Any resource (regulator/clk/etc) which could be missing at boot
- * should be handled in hdmi_init() so that failure happens from
+ * should be handled in msm_hdmi_init() so that failure happens from
* hdmi sub-device's probe.
*/
-int hdmi_modeset_init(struct hdmi *hdmi,
+int msm_hdmi_modeset_init(struct hdmi *hdmi,
struct drm_device *dev, struct drm_encoder *encoder)
{
struct msm_drm_private *priv = dev->dev_private;
@@ -265,7 +289,7 @@ int hdmi_modeset_init(struct hdmi *hdmi,
hdmi_audio_infoframe_init(&hdmi->audio.infoframe);
- hdmi->bridge = hdmi_bridge_init(hdmi);
+ hdmi->bridge = msm_hdmi_bridge_init(hdmi);
if (IS_ERR(hdmi->bridge)) {
ret = PTR_ERR(hdmi->bridge);
dev_err(dev->dev, "failed to create HDMI bridge: %d\n", ret);
@@ -273,7 +297,7 @@ int hdmi_modeset_init(struct hdmi *hdmi,
goto fail;
}
- hdmi->connector = hdmi_connector_init(hdmi);
+ hdmi->connector = msm_hdmi_connector_init(hdmi);
if (IS_ERR(hdmi->connector)) {
ret = PTR_ERR(hdmi->connector);
dev_err(dev->dev, "failed to create HDMI connector: %d\n", ret);
@@ -289,7 +313,7 @@ int hdmi_modeset_init(struct hdmi *hdmi,
}
ret = devm_request_irq(&pdev->dev, hdmi->irq,
- hdmi_irq, IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
+ msm_hdmi_irq, IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
"hdmi_isr", hdmi);
if (ret < 0) {
dev_err(dev->dev, "failed to request IRQ%u: %d\n",
@@ -309,7 +333,7 @@ int hdmi_modeset_init(struct hdmi *hdmi,
fail:
/* bridge is normally destroyed by drm: */
if (hdmi->bridge) {
- hdmi_bridge_destroy(hdmi->bridge);
+ msm_hdmi_bridge_destroy(hdmi->bridge);
hdmi->bridge = NULL;
}
if (hdmi->connector) {
@@ -331,15 +355,12 @@ fail:
static const char *pwr_reg_names_none[] = {};
static const char *hpd_reg_names_none[] = {};
-static struct hdmi_platform_config hdmi_tx_8660_config = {
- .phy_init = hdmi_phy_8x60_init,
-};
+static struct hdmi_platform_config hdmi_tx_8660_config;
static const char *hpd_reg_names_8960[] = {"core-vdda", "hdmi-mux"};
static const char *hpd_clk_names_8960[] = {"core_clk", "master_iface_clk", "slave_iface_clk"};
static struct hdmi_platform_config hdmi_tx_8960_config = {
- .phy_init = hdmi_phy_8960_init,
HDMI_CFG(hpd_reg, 8960),
HDMI_CFG(hpd_clk, 8960),
};
@@ -351,7 +372,6 @@ static const char *hpd_clk_names_8x74[] = {"iface_clk", "core_clk", "mdp_core_cl
static unsigned long hpd_clk_freq_8x74[] = {0, 19200000, 0};
static struct hdmi_platform_config hdmi_tx_8974_config = {
- .phy_init = hdmi_phy_8x74_init,
HDMI_CFG(pwr_reg, 8x74),
HDMI_CFG(hpd_reg, 8x74),
HDMI_CFG(pwr_clk, 8x74),
@@ -362,7 +382,6 @@ static struct hdmi_platform_config hdmi_tx_8974_config = {
static const char *hpd_reg_names_8084[] = {"hpd-gdsc", "hpd-5v", "hpd-5v-en"};
static struct hdmi_platform_config hdmi_tx_8084_config = {
- .phy_init = hdmi_phy_8x74_init,
HDMI_CFG(pwr_reg, 8x74),
HDMI_CFG(hpd_reg, 8084),
HDMI_CFG(pwr_clk, 8x74),
@@ -371,7 +390,6 @@ static struct hdmi_platform_config hdmi_tx_8084_config = {
};
static struct hdmi_platform_config hdmi_tx_8994_config = {
- .phy_init = NULL, /* nothing to do for this HDMI PHY 20nm */
HDMI_CFG(pwr_reg, 8x74),
HDMI_CFG(hpd_reg, none),
HDMI_CFG(pwr_clk, 8x74),
@@ -380,7 +398,6 @@ static struct hdmi_platform_config hdmi_tx_8994_config = {
};
static struct hdmi_platform_config hdmi_tx_8996_config = {
- .phy_init = NULL,
HDMI_CFG(pwr_reg, none),
HDMI_CFG(hpd_reg, none),
HDMI_CFG(pwr_clk, 8x74),
@@ -388,7 +405,21 @@ static struct hdmi_platform_config hdmi_tx_8996_config = {
.hpd_freq = hpd_clk_freq_8x74,
};
-static int get_gpio(struct device *dev, struct device_node *of_node, const char *name)
+static const struct {
+ const char *name;
+ const bool output;
+ const int value;
+ const char *label;
+} msm_hdmi_gpio_pdata[] = {
+ { "qcom,hdmi-tx-ddc-clk", true, 1, "HDMI_DDC_CLK" },
+ { "qcom,hdmi-tx-ddc-data", true, 1, "HDMI_DDC_DATA" },
+ { "qcom,hdmi-tx-hpd", false, 1, "HDMI_HPD" },
+ { "qcom,hdmi-tx-mux-en", true, 1, "HDMI_MUX_EN" },
+ { "qcom,hdmi-tx-mux-sel", true, 0, "HDMI_MUX_SEL" },
+ { "qcom,hdmi-tx-mux-lpm", true, 1, "HDMI_MUX_LPM" },
+};
+
+static int msm_hdmi_get_gpio(struct device_node *of_node, const char *name)
{
int gpio = of_get_named_gpio(of_node, name, 0);
if (gpio < 0) {
@@ -403,13 +434,14 @@ static int get_gpio(struct device *dev, struct device_node *of_node, const char
return gpio;
}
-static int hdmi_bind(struct device *dev, struct device *master, void *data)
+static int msm_hdmi_bind(struct device *dev, struct device *master, void *data)
{
struct drm_device *drm = dev_get_drvdata(master);
struct msm_drm_private *priv = drm->dev_private;
static struct hdmi_platform_config *hdmi_cfg;
struct hdmi *hdmi;
struct device_node *of_node = dev->of_node;
+ int i;
hdmi_cfg = (struct hdmi_platform_config *)
of_device_get_match_data(dev);
@@ -420,16 +452,18 @@ static int hdmi_bind(struct device *dev, struct device *master, void *data)
hdmi_cfg->mmio_name = "core_physical";
hdmi_cfg->qfprom_mmio_name = "qfprom_physical";
- hdmi_cfg->ddc_clk_gpio = get_gpio(dev, of_node, "qcom,hdmi-tx-ddc-clk");
- hdmi_cfg->ddc_data_gpio = get_gpio(dev, of_node, "qcom,hdmi-tx-ddc-data");
- hdmi_cfg->hpd_gpio = get_gpio(dev, of_node, "qcom,hdmi-tx-hpd");
- hdmi_cfg->mux_en_gpio = get_gpio(dev, of_node, "qcom,hdmi-tx-mux-en");
- hdmi_cfg->mux_sel_gpio = get_gpio(dev, of_node, "qcom,hdmi-tx-mux-sel");
- hdmi_cfg->mux_lpm_gpio = get_gpio(dev, of_node, "qcom,hdmi-tx-mux-lpm");
+
+ for (i = 0; i < HDMI_MAX_NUM_GPIO; i++) {
+ hdmi_cfg->gpios[i].num = msm_hdmi_get_gpio(of_node,
+ msm_hdmi_gpio_pdata[i].name);
+ hdmi_cfg->gpios[i].output = msm_hdmi_gpio_pdata[i].output;
+ hdmi_cfg->gpios[i].value = msm_hdmi_gpio_pdata[i].value;
+ hdmi_cfg->gpios[i].label = msm_hdmi_gpio_pdata[i].label;
+ }
dev->platform_data = hdmi_cfg;
- hdmi = hdmi_init(to_platform_device(dev));
+ hdmi = msm_hdmi_init(to_platform_device(dev));
if (IS_ERR(hdmi))
return PTR_ERR(hdmi);
priv->hdmi = hdmi;
@@ -437,34 +471,34 @@ static int hdmi_bind(struct device *dev, struct device *master, void *data)
return 0;
}
-static void hdmi_unbind(struct device *dev, struct device *master,
+static void msm_hdmi_unbind(struct device *dev, struct device *master,
void *data)
{
struct drm_device *drm = dev_get_drvdata(master);
struct msm_drm_private *priv = drm->dev_private;
if (priv->hdmi) {
- hdmi_destroy(priv->hdmi);
+ msm_hdmi_destroy(priv->hdmi);
priv->hdmi = NULL;
}
}
-static const struct component_ops hdmi_ops = {
- .bind = hdmi_bind,
- .unbind = hdmi_unbind,
+static const struct component_ops msm_hdmi_ops = {
+ .bind = msm_hdmi_bind,
+ .unbind = msm_hdmi_unbind,
};
-static int hdmi_dev_probe(struct platform_device *pdev)
+static int msm_hdmi_dev_probe(struct platform_device *pdev)
{
- return component_add(&pdev->dev, &hdmi_ops);
+ return component_add(&pdev->dev, &msm_hdmi_ops);
}
-static int hdmi_dev_remove(struct platform_device *pdev)
+static int msm_hdmi_dev_remove(struct platform_device *pdev)
{
- component_del(&pdev->dev, &hdmi_ops);
+ component_del(&pdev->dev, &msm_hdmi_ops);
return 0;
}
-static const struct of_device_id dt_match[] = {
+static const struct of_device_id msm_hdmi_dt_match[] = {
{ .compatible = "qcom,hdmi-tx-8996", .data = &hdmi_tx_8996_config },
{ .compatible = "qcom,hdmi-tx-8994", .data = &hdmi_tx_8994_config },
{ .compatible = "qcom,hdmi-tx-8084", .data = &hdmi_tx_8084_config },
@@ -474,21 +508,23 @@ static const struct of_device_id dt_match[] = {
{}
};
-static struct platform_driver hdmi_driver = {
- .probe = hdmi_dev_probe,
- .remove = hdmi_dev_remove,
+static struct platform_driver msm_hdmi_driver = {
+ .probe = msm_hdmi_dev_probe,
+ .remove = msm_hdmi_dev_remove,
.driver = {
.name = "hdmi_msm",
- .of_match_table = dt_match,
+ .of_match_table = msm_hdmi_dt_match,
},
};
-void __init hdmi_register(void)
+void __init msm_hdmi_register(void)
{
- platform_driver_register(&hdmi_driver);
+ msm_hdmi_phy_driver_register();
+ platform_driver_register(&msm_hdmi_driver);
}
-void __exit hdmi_unregister(void)
+void __exit msm_hdmi_unregister(void)
{
- platform_driver_unregister(&hdmi_driver);
+ platform_driver_unregister(&msm_hdmi_driver);
+ msm_hdmi_phy_driver_unregister();
}
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.h b/drivers/gpu/drm/msm/hdmi/hdmi.h
index d0e663192d01..65428cf233ce 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.h
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.h
@@ -27,10 +27,18 @@
#include "msm_drv.h"
#include "hdmi.xml.h"
+#define HDMI_MAX_NUM_GPIO 6
struct hdmi_phy;
struct hdmi_platform_config;
+struct hdmi_gpio_data {
+ int num;
+ bool output;
+ int value;
+ const char *label;
+};
+
struct hdmi_audio {
bool enabled;
struct hdmi_audio_infoframe infoframe;
@@ -62,6 +70,8 @@ struct hdmi {
struct clk **pwr_clks;
struct hdmi_phy *phy;
+ struct device *phy_dev;
+
struct i2c_adapter *i2c;
struct drm_connector *connector;
struct drm_bridge *bridge;
@@ -88,7 +98,6 @@ struct hdmi {
/* platform config data (ie. from DT, or pdata) */
struct hdmi_platform_config {
- struct hdmi_phy *(*phy_init)(struct hdmi *hdmi);
const char *mmio_name;
const char *qfprom_mmio_name;
@@ -110,11 +119,10 @@ struct hdmi_platform_config {
int pwr_clk_cnt;
/* gpio's: */
- int ddc_clk_gpio, ddc_data_gpio, hpd_gpio, mux_en_gpio, mux_sel_gpio;
- int mux_lpm_gpio;
+ struct hdmi_gpio_data gpios[HDMI_MAX_NUM_GPIO];
};
-void hdmi_set_mode(struct hdmi *hdmi, bool power_on);
+void msm_hdmi_set_mode(struct hdmi *hdmi, bool power_on);
static inline void hdmi_write(struct hdmi *hdmi, u32 reg, u32 data)
{
@@ -132,65 +140,113 @@ static inline u32 hdmi_qfprom_read(struct hdmi *hdmi, u32 reg)
}
/*
- * The phy appears to be different, for example between 8960 and 8x60,
- * so split the phy related functions out and load the correct one at
- * runtime:
+ * hdmi phy:
*/
-struct hdmi_phy_funcs {
- void (*destroy)(struct hdmi_phy *phy);
+enum hdmi_phy_type {
+ MSM_HDMI_PHY_8x60,
+ MSM_HDMI_PHY_8960,
+ MSM_HDMI_PHY_8x74,
+ MSM_HDMI_PHY_8996,
+ MSM_HDMI_PHY_MAX,
+};
+
+struct hdmi_phy_cfg {
+ enum hdmi_phy_type type;
void (*powerup)(struct hdmi_phy *phy, unsigned long int pixclock);
void (*powerdown)(struct hdmi_phy *phy);
+ const char * const *reg_names;
+ int num_regs;
+ const char * const *clk_names;
+ int num_clks;
};
+extern const struct hdmi_phy_cfg msm_hdmi_phy_8x60_cfg;
+extern const struct hdmi_phy_cfg msm_hdmi_phy_8960_cfg;
+extern const struct hdmi_phy_cfg msm_hdmi_phy_8x74_cfg;
+extern const struct hdmi_phy_cfg msm_hdmi_phy_8996_cfg;
+
struct hdmi_phy {
+ struct platform_device *pdev;
+ void __iomem *mmio;
+ struct hdmi_phy_cfg *cfg;
const struct hdmi_phy_funcs *funcs;
+ struct regulator **regs;
+ struct clk **clks;
};
-struct hdmi_phy *hdmi_phy_8960_init(struct hdmi *hdmi);
-struct hdmi_phy *hdmi_phy_8x60_init(struct hdmi *hdmi);
-struct hdmi_phy *hdmi_phy_8x74_init(struct hdmi *hdmi);
+static inline void hdmi_phy_write(struct hdmi_phy *phy, u32 reg, u32 data)
+{
+ msm_writel(data, phy->mmio + reg);
+}
+
+static inline u32 hdmi_phy_read(struct hdmi_phy *phy, u32 reg)
+{
+ return msm_readl(phy->mmio + reg);
+}
+
+int msm_hdmi_phy_resource_enable(struct hdmi_phy *phy);
+void msm_hdmi_phy_resource_disable(struct hdmi_phy *phy);
+void msm_hdmi_phy_powerup(struct hdmi_phy *phy, unsigned long int pixclock);
+void msm_hdmi_phy_powerdown(struct hdmi_phy *phy);
+void __init msm_hdmi_phy_driver_register(void);
+void __exit msm_hdmi_phy_driver_unregister(void);
+
+#ifdef CONFIG_COMMON_CLK
+int msm_hdmi_pll_8960_init(struct platform_device *pdev);
+int msm_hdmi_pll_8996_init(struct platform_device *pdev);
+#else
+static inline int msm_hdmi_pll_8960_init(struct platform_device *pdev)
+{
+ return -ENODEV;
+}
+
+static inline int msm_hdmi_pll_8996_init(struct platform_device *pdev)
+{
+ return -ENODEV;
+}
+#endif
/*
* audio:
*/
-int hdmi_audio_update(struct hdmi *hdmi);
-int hdmi_audio_info_setup(struct hdmi *hdmi, bool enabled,
+int msm_hdmi_audio_update(struct hdmi *hdmi);
+int msm_hdmi_audio_info_setup(struct hdmi *hdmi, bool enabled,
uint32_t num_of_channels, uint32_t channel_allocation,
uint32_t level_shift, bool down_mix);
-void hdmi_audio_set_sample_rate(struct hdmi *hdmi, int rate);
+void msm_hdmi_audio_set_sample_rate(struct hdmi *hdmi, int rate);
/*
* hdmi bridge:
*/
-struct drm_bridge *hdmi_bridge_init(struct hdmi *hdmi);
-void hdmi_bridge_destroy(struct drm_bridge *bridge);
+struct drm_bridge *msm_hdmi_bridge_init(struct hdmi *hdmi);
+void msm_hdmi_bridge_destroy(struct drm_bridge *bridge);
/*
* hdmi connector:
*/
-void hdmi_connector_irq(struct drm_connector *connector);
-struct drm_connector *hdmi_connector_init(struct hdmi *hdmi);
+void msm_hdmi_connector_irq(struct drm_connector *connector);
+struct drm_connector *msm_hdmi_connector_init(struct hdmi *hdmi);
/*
* i2c adapter for ddc:
*/
-void hdmi_i2c_irq(struct i2c_adapter *i2c);
-void hdmi_i2c_destroy(struct i2c_adapter *i2c);
-struct i2c_adapter *hdmi_i2c_init(struct hdmi *hdmi);
+void msm_hdmi_i2c_irq(struct i2c_adapter *i2c);
+void msm_hdmi_i2c_destroy(struct i2c_adapter *i2c);
+struct i2c_adapter *msm_hdmi_i2c_init(struct hdmi *hdmi);
/*
* hdcp
*/
-struct hdmi_hdcp_ctrl *hdmi_hdcp_init(struct hdmi *hdmi);
-void hdmi_hdcp_destroy(struct hdmi *hdmi);
-void hdmi_hdcp_on(struct hdmi_hdcp_ctrl *hdcp_ctrl);
-void hdmi_hdcp_off(struct hdmi_hdcp_ctrl *hdcp_ctrl);
-void hdmi_hdcp_irq(struct hdmi_hdcp_ctrl *hdcp_ctrl);
+struct hdmi_hdcp_ctrl *msm_hdmi_hdcp_init(struct hdmi *hdmi);
+void msm_hdmi_hdcp_destroy(struct hdmi *hdmi);
+void msm_hdmi_hdcp_on(struct hdmi_hdcp_ctrl *hdcp_ctrl);
+void msm_hdmi_hdcp_off(struct hdmi_hdcp_ctrl *hdcp_ctrl);
+void msm_hdmi_hdcp_irq(struct hdmi_hdcp_ctrl *hdcp_ctrl);
#endif /* __HDMI_CONNECTOR_H__ */
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.xml.h b/drivers/gpu/drm/msm/hdmi/hdmi.xml.h
index 10c45700aefe..34c7df6549c1 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.xml.h
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.xml.h
@@ -9,7 +9,7 @@ git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2015-05-20 20:03:14)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2016-02-10 17:07:21)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-05-20 20:03:14)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2015-09-18 12:07:28)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 37194 bytes, from 2015-09-18 12:07:28)
@@ -17,11 +17,12 @@ The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2015-10-22 16:35:02)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2015-05-20 20:03:14)
- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2015-05-20 20:03:07)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 29154 bytes, from 2015-08-10 21:25:43)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 41472 bytes, from 2016-01-22 18:18:18)
- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2015-05-20 20:03:14)
-Copyright (C) 2013-2015 by the following authors:
+Copyright (C) 2013-2016 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
+- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
@@ -559,7 +560,7 @@ static inline uint32_t HDMI_VSYNC_TOTAL_F2_V_TOTAL(uint32_t val)
#define REG_HDMI_CEC_WR_CHECK_CONFIG 0x00000370
-#define REG_HDMI_8x60_PHY_REG0 0x00000300
+#define REG_HDMI_8x60_PHY_REG0 0x00000000
#define HDMI_8x60_PHY_REG0_DESER_DEL_CTRL__MASK 0x0000001c
#define HDMI_8x60_PHY_REG0_DESER_DEL_CTRL__SHIFT 2
static inline uint32_t HDMI_8x60_PHY_REG0_DESER_DEL_CTRL(uint32_t val)
@@ -567,7 +568,7 @@ static inline uint32_t HDMI_8x60_PHY_REG0_DESER_DEL_CTRL(uint32_t val)
return ((val) << HDMI_8x60_PHY_REG0_DESER_DEL_CTRL__SHIFT) & HDMI_8x60_PHY_REG0_DESER_DEL_CTRL__MASK;
}
-#define REG_HDMI_8x60_PHY_REG1 0x00000304
+#define REG_HDMI_8x60_PHY_REG1 0x00000004
#define HDMI_8x60_PHY_REG1_DTEST_MUX_SEL__MASK 0x000000f0
#define HDMI_8x60_PHY_REG1_DTEST_MUX_SEL__SHIFT 4
static inline uint32_t HDMI_8x60_PHY_REG1_DTEST_MUX_SEL(uint32_t val)
@@ -581,7 +582,7 @@ static inline uint32_t HDMI_8x60_PHY_REG1_OUTVOL_SWING_CTRL(uint32_t val)
return ((val) << HDMI_8x60_PHY_REG1_OUTVOL_SWING_CTRL__SHIFT) & HDMI_8x60_PHY_REG1_OUTVOL_SWING_CTRL__MASK;
}
-#define REG_HDMI_8x60_PHY_REG2 0x00000308
+#define REG_HDMI_8x60_PHY_REG2 0x00000008
#define HDMI_8x60_PHY_REG2_PD_DESER 0x00000001
#define HDMI_8x60_PHY_REG2_PD_DRIVE_1 0x00000002
#define HDMI_8x60_PHY_REG2_PD_DRIVE_2 0x00000004
@@ -591,152 +592,152 @@ static inline uint32_t HDMI_8x60_PHY_REG1_OUTVOL_SWING_CTRL(uint32_t val)
#define HDMI_8x60_PHY_REG2_PD_PWRGEN 0x00000040
#define HDMI_8x60_PHY_REG2_RCV_SENSE_EN 0x00000080
-#define REG_HDMI_8x60_PHY_REG3 0x0000030c
+#define REG_HDMI_8x60_PHY_REG3 0x0000000c
#define HDMI_8x60_PHY_REG3_PLL_ENABLE 0x00000001
-#define REG_HDMI_8x60_PHY_REG4 0x00000310
+#define REG_HDMI_8x60_PHY_REG4 0x00000010
-#define REG_HDMI_8x60_PHY_REG5 0x00000314
+#define REG_HDMI_8x60_PHY_REG5 0x00000014
-#define REG_HDMI_8x60_PHY_REG6 0x00000318
+#define REG_HDMI_8x60_PHY_REG6 0x00000018
-#define REG_HDMI_8x60_PHY_REG7 0x0000031c
+#define REG_HDMI_8x60_PHY_REG7 0x0000001c
-#define REG_HDMI_8x60_PHY_REG8 0x00000320
+#define REG_HDMI_8x60_PHY_REG8 0x00000020
-#define REG_HDMI_8x60_PHY_REG9 0x00000324
+#define REG_HDMI_8x60_PHY_REG9 0x00000024
-#define REG_HDMI_8x60_PHY_REG10 0x00000328
+#define REG_HDMI_8x60_PHY_REG10 0x00000028
-#define REG_HDMI_8x60_PHY_REG11 0x0000032c
+#define REG_HDMI_8x60_PHY_REG11 0x0000002c
-#define REG_HDMI_8x60_PHY_REG12 0x00000330
+#define REG_HDMI_8x60_PHY_REG12 0x00000030
#define HDMI_8x60_PHY_REG12_RETIMING_EN 0x00000001
#define HDMI_8x60_PHY_REG12_PLL_LOCK_DETECT_EN 0x00000002
#define HDMI_8x60_PHY_REG12_FORCE_LOCK 0x00000010
-#define REG_HDMI_8960_PHY_REG0 0x00000400
+#define REG_HDMI_8960_PHY_REG0 0x00000000
-#define REG_HDMI_8960_PHY_REG1 0x00000404
+#define REG_HDMI_8960_PHY_REG1 0x00000004
-#define REG_HDMI_8960_PHY_REG2 0x00000408
+#define REG_HDMI_8960_PHY_REG2 0x00000008
-#define REG_HDMI_8960_PHY_REG3 0x0000040c
+#define REG_HDMI_8960_PHY_REG3 0x0000000c
-#define REG_HDMI_8960_PHY_REG4 0x00000410
+#define REG_HDMI_8960_PHY_REG4 0x00000010
-#define REG_HDMI_8960_PHY_REG5 0x00000414
+#define REG_HDMI_8960_PHY_REG5 0x00000014
-#define REG_HDMI_8960_PHY_REG6 0x00000418
+#define REG_HDMI_8960_PHY_REG6 0x00000018
-#define REG_HDMI_8960_PHY_REG7 0x0000041c
+#define REG_HDMI_8960_PHY_REG7 0x0000001c
-#define REG_HDMI_8960_PHY_REG8 0x00000420
+#define REG_HDMI_8960_PHY_REG8 0x00000020
-#define REG_HDMI_8960_PHY_REG9 0x00000424
+#define REG_HDMI_8960_PHY_REG9 0x00000024
-#define REG_HDMI_8960_PHY_REG10 0x00000428
+#define REG_HDMI_8960_PHY_REG10 0x00000028
-#define REG_HDMI_8960_PHY_REG11 0x0000042c
+#define REG_HDMI_8960_PHY_REG11 0x0000002c
-#define REG_HDMI_8960_PHY_REG12 0x00000430
+#define REG_HDMI_8960_PHY_REG12 0x00000030
#define HDMI_8960_PHY_REG12_SW_RESET 0x00000020
#define HDMI_8960_PHY_REG12_PWRDN_B 0x00000080
-#define REG_HDMI_8960_PHY_REG_BIST_CFG 0x00000434
+#define REG_HDMI_8960_PHY_REG_BIST_CFG 0x00000034
-#define REG_HDMI_8960_PHY_DEBUG_BUS_SEL 0x00000438
+#define REG_HDMI_8960_PHY_DEBUG_BUS_SEL 0x00000038
-#define REG_HDMI_8960_PHY_REG_MISC0 0x0000043c
+#define REG_HDMI_8960_PHY_REG_MISC0 0x0000003c
-#define REG_HDMI_8960_PHY_REG13 0x00000440
+#define REG_HDMI_8960_PHY_REG13 0x00000040
-#define REG_HDMI_8960_PHY_REG14 0x00000444
+#define REG_HDMI_8960_PHY_REG14 0x00000044
-#define REG_HDMI_8960_PHY_REG15 0x00000448
+#define REG_HDMI_8960_PHY_REG15 0x00000048
-#define REG_HDMI_8960_PHY_PLL_REFCLK_CFG 0x00000500
+#define REG_HDMI_8960_PHY_PLL_REFCLK_CFG 0x00000000
-#define REG_HDMI_8960_PHY_PLL_CHRG_PUMP_CFG 0x00000504
+#define REG_HDMI_8960_PHY_PLL_CHRG_PUMP_CFG 0x00000004
-#define REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG0 0x00000508
+#define REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG0 0x00000008
-#define REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG1 0x0000050c
+#define REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG1 0x0000000c
-#define REG_HDMI_8960_PHY_PLL_IDAC_ADJ_CFG 0x00000510
+#define REG_HDMI_8960_PHY_PLL_IDAC_ADJ_CFG 0x00000010
-#define REG_HDMI_8960_PHY_PLL_I_VI_KVCO_CFG 0x00000514
+#define REG_HDMI_8960_PHY_PLL_I_VI_KVCO_CFG 0x00000014
-#define REG_HDMI_8960_PHY_PLL_PWRDN_B 0x00000518
+#define REG_HDMI_8960_PHY_PLL_PWRDN_B 0x00000018
#define HDMI_8960_PHY_PLL_PWRDN_B_PD_PLL 0x00000002
#define HDMI_8960_PHY_PLL_PWRDN_B_PLL_PWRDN_B 0x00000008
-#define REG_HDMI_8960_PHY_PLL_SDM_CFG0 0x0000051c
+#define REG_HDMI_8960_PHY_PLL_SDM_CFG0 0x0000001c
-#define REG_HDMI_8960_PHY_PLL_SDM_CFG1 0x00000520
+#define REG_HDMI_8960_PHY_PLL_SDM_CFG1 0x00000020
-#define REG_HDMI_8960_PHY_PLL_SDM_CFG2 0x00000524
+#define REG_HDMI_8960_PHY_PLL_SDM_CFG2 0x00000024
-#define REG_HDMI_8960_PHY_PLL_SDM_CFG3 0x00000528
+#define REG_HDMI_8960_PHY_PLL_SDM_CFG3 0x00000028
-#define REG_HDMI_8960_PHY_PLL_SDM_CFG4 0x0000052c
+#define REG_HDMI_8960_PHY_PLL_SDM_CFG4 0x0000002c
-#define REG_HDMI_8960_PHY_PLL_SSC_CFG0 0x00000530
+#define REG_HDMI_8960_PHY_PLL_SSC_CFG0 0x00000030
-#define REG_HDMI_8960_PHY_PLL_SSC_CFG1 0x00000534
+#define REG_HDMI_8960_PHY_PLL_SSC_CFG1 0x00000034
-#define REG_HDMI_8960_PHY_PLL_SSC_CFG2 0x00000538
+#define REG_HDMI_8960_PHY_PLL_SSC_CFG2 0x00000038
-#define REG_HDMI_8960_PHY_PLL_SSC_CFG3 0x0000053c
+#define REG_HDMI_8960_PHY_PLL_SSC_CFG3 0x0000003c
-#define REG_HDMI_8960_PHY_PLL_LOCKDET_CFG0 0x00000540
+#define REG_HDMI_8960_PHY_PLL_LOCKDET_CFG0 0x00000040
-#define REG_HDMI_8960_PHY_PLL_LOCKDET_CFG1 0x00000544
+#define REG_HDMI_8960_PHY_PLL_LOCKDET_CFG1 0x00000044
-#define REG_HDMI_8960_PHY_PLL_LOCKDET_CFG2 0x00000548
+#define REG_HDMI_8960_PHY_PLL_LOCKDET_CFG2 0x00000048
-#define REG_HDMI_8960_PHY_PLL_VCOCAL_CFG0 0x0000054c
+#define REG_HDMI_8960_PHY_PLL_VCOCAL_CFG0 0x0000004c
-#define REG_HDMI_8960_PHY_PLL_VCOCAL_CFG1 0x00000550
+#define REG_HDMI_8960_PHY_PLL_VCOCAL_CFG1 0x00000050
-#define REG_HDMI_8960_PHY_PLL_VCOCAL_CFG2 0x00000554
+#define REG_HDMI_8960_PHY_PLL_VCOCAL_CFG2 0x00000054
-#define REG_HDMI_8960_PHY_PLL_VCOCAL_CFG3 0x00000558
+#define REG_HDMI_8960_PHY_PLL_VCOCAL_CFG3 0x00000058
-#define REG_HDMI_8960_PHY_PLL_VCOCAL_CFG4 0x0000055c
+#define REG_HDMI_8960_PHY_PLL_VCOCAL_CFG4 0x0000005c
-#define REG_HDMI_8960_PHY_PLL_VCOCAL_CFG5 0x00000560
+#define REG_HDMI_8960_PHY_PLL_VCOCAL_CFG5 0x00000060
-#define REG_HDMI_8960_PHY_PLL_VCOCAL_CFG6 0x00000564
+#define REG_HDMI_8960_PHY_PLL_VCOCAL_CFG6 0x00000064
-#define REG_HDMI_8960_PHY_PLL_VCOCAL_CFG7 0x00000568
+#define REG_HDMI_8960_PHY_PLL_VCOCAL_CFG7 0x00000068
-#define REG_HDMI_8960_PHY_PLL_DEBUG_SEL 0x0000056c
+#define REG_HDMI_8960_PHY_PLL_DEBUG_SEL 0x0000006c
-#define REG_HDMI_8960_PHY_PLL_MISC0 0x00000570
+#define REG_HDMI_8960_PHY_PLL_MISC0 0x00000070
-#define REG_HDMI_8960_PHY_PLL_MISC1 0x00000574
+#define REG_HDMI_8960_PHY_PLL_MISC1 0x00000074
-#define REG_HDMI_8960_PHY_PLL_MISC2 0x00000578
+#define REG_HDMI_8960_PHY_PLL_MISC2 0x00000078
-#define REG_HDMI_8960_PHY_PLL_MISC3 0x0000057c
+#define REG_HDMI_8960_PHY_PLL_MISC3 0x0000007c
-#define REG_HDMI_8960_PHY_PLL_MISC4 0x00000580
+#define REG_HDMI_8960_PHY_PLL_MISC4 0x00000080
-#define REG_HDMI_8960_PHY_PLL_MISC5 0x00000584
+#define REG_HDMI_8960_PHY_PLL_MISC5 0x00000084
-#define REG_HDMI_8960_PHY_PLL_MISC6 0x00000588
+#define REG_HDMI_8960_PHY_PLL_MISC6 0x00000088
-#define REG_HDMI_8960_PHY_PLL_DEBUG_BUS0 0x0000058c
+#define REG_HDMI_8960_PHY_PLL_DEBUG_BUS0 0x0000008c
-#define REG_HDMI_8960_PHY_PLL_DEBUG_BUS1 0x00000590
+#define REG_HDMI_8960_PHY_PLL_DEBUG_BUS1 0x00000090
-#define REG_HDMI_8960_PHY_PLL_DEBUG_BUS2 0x00000594
+#define REG_HDMI_8960_PHY_PLL_DEBUG_BUS2 0x00000094
-#define REG_HDMI_8960_PHY_PLL_STATUS0 0x00000598
+#define REG_HDMI_8960_PHY_PLL_STATUS0 0x00000098
#define HDMI_8960_PHY_PLL_STATUS0_PLL_LOCK 0x00000001
-#define REG_HDMI_8960_PHY_PLL_STATUS1 0x0000059c
+#define REG_HDMI_8960_PHY_PLL_STATUS1 0x0000009c
#define REG_HDMI_8x74_ANA_CFG0 0x00000000
@@ -843,5 +844,501 @@ static inline uint32_t HDMI_8x60_PHY_REG1_OUTVOL_SWING_CTRL(uint32_t val)
#define REG_HDMI_28nm_PHY_PLL_DEBUG_BUS_SEL 0x000000a0
+#define REG_HDMI_8996_PHY_CFG 0x00000000
+
+#define REG_HDMI_8996_PHY_PD_CTL 0x00000004
+
+#define REG_HDMI_8996_PHY_MODE 0x00000008
+
+#define REG_HDMI_8996_PHY_MISR_CLEAR 0x0000000c
+
+#define REG_HDMI_8996_PHY_TX0_TX1_BIST_CFG0 0x00000010
+
+#define REG_HDMI_8996_PHY_TX0_TX1_BIST_CFG1 0x00000014
+
+#define REG_HDMI_8996_PHY_TX0_TX1_PRBS_SEED_BYTE0 0x00000018
+
+#define REG_HDMI_8996_PHY_TX0_TX1_PRBS_SEED_BYTE1 0x0000001c
+
+#define REG_HDMI_8996_PHY_TX0_TX1_BIST_PATTERN0 0x00000020
+
+#define REG_HDMI_8996_PHY_TX0_TX1_BIST_PATTERN1 0x00000024
+
+#define REG_HDMI_8996_PHY_TX2_TX3_BIST_CFG0 0x00000028
+
+#define REG_HDMI_8996_PHY_TX2_TX3_BIST_CFG1 0x0000002c
+
+#define REG_HDMI_8996_PHY_TX2_TX3_PRBS_SEED_BYTE0 0x00000030
+
+#define REG_HDMI_8996_PHY_TX2_TX3_PRBS_SEED_BYTE1 0x00000034
+
+#define REG_HDMI_8996_PHY_TX2_TX3_BIST_PATTERN0 0x00000038
+
+#define REG_HDMI_8996_PHY_TX2_TX3_BIST_PATTERN1 0x0000003c
+
+#define REG_HDMI_8996_PHY_DEBUG_BUS_SEL 0x00000040
+
+#define REG_HDMI_8996_PHY_TXCAL_CFG0 0x00000044
+
+#define REG_HDMI_8996_PHY_TXCAL_CFG1 0x00000048
+
+#define REG_HDMI_8996_PHY_TX0_TX1_LANE_CTL 0x0000004c
+
+#define REG_HDMI_8996_PHY_TX2_TX3_LANE_CTL 0x00000050
+
+#define REG_HDMI_8996_PHY_LANE_BIST_CONFIG 0x00000054
+
+#define REG_HDMI_8996_PHY_CLOCK 0x00000058
+
+#define REG_HDMI_8996_PHY_MISC1 0x0000005c
+
+#define REG_HDMI_8996_PHY_MISC2 0x00000060
+
+#define REG_HDMI_8996_PHY_TX0_TX1_BIST_STATUS0 0x00000064
+
+#define REG_HDMI_8996_PHY_TX0_TX1_BIST_STATUS1 0x00000068
+
+#define REG_HDMI_8996_PHY_TX0_TX1_BIST_STATUS2 0x0000006c
+
+#define REG_HDMI_8996_PHY_TX2_TX3_BIST_STATUS0 0x00000070
+
+#define REG_HDMI_8996_PHY_TX2_TX3_BIST_STATUS1 0x00000074
+
+#define REG_HDMI_8996_PHY_TX2_TX3_BIST_STATUS2 0x00000078
+
+#define REG_HDMI_8996_PHY_PRE_MISR_STATUS0 0x0000007c
+
+#define REG_HDMI_8996_PHY_PRE_MISR_STATUS1 0x00000080
+
+#define REG_HDMI_8996_PHY_PRE_MISR_STATUS2 0x00000084
+
+#define REG_HDMI_8996_PHY_PRE_MISR_STATUS3 0x00000088
+
+#define REG_HDMI_8996_PHY_POST_MISR_STATUS0 0x0000008c
+
+#define REG_HDMI_8996_PHY_POST_MISR_STATUS1 0x00000090
+
+#define REG_HDMI_8996_PHY_POST_MISR_STATUS2 0x00000094
+
+#define REG_HDMI_8996_PHY_POST_MISR_STATUS3 0x00000098
+
+#define REG_HDMI_8996_PHY_STATUS 0x0000009c
+
+#define REG_HDMI_8996_PHY_MISC3_STATUS 0x000000a0
+
+#define REG_HDMI_8996_PHY_MISC4_STATUS 0x000000a4
+
+#define REG_HDMI_8996_PHY_DEBUG_BUS0 0x000000a8
+
+#define REG_HDMI_8996_PHY_DEBUG_BUS1 0x000000ac
+
+#define REG_HDMI_8996_PHY_DEBUG_BUS2 0x000000b0
+
+#define REG_HDMI_8996_PHY_DEBUG_BUS3 0x000000b4
+
+#define REG_HDMI_8996_PHY_PHY_REVISION_ID0 0x000000b8
+
+#define REG_HDMI_8996_PHY_PHY_REVISION_ID1 0x000000bc
+
+#define REG_HDMI_8996_PHY_PHY_REVISION_ID2 0x000000c0
+
+#define REG_HDMI_8996_PHY_PHY_REVISION_ID3 0x000000c4
+
+#define REG_HDMI_PHY_QSERDES_COM_ATB_SEL1 0x00000000
+
+#define REG_HDMI_PHY_QSERDES_COM_ATB_SEL2 0x00000004
+
+#define REG_HDMI_PHY_QSERDES_COM_FREQ_UPDATE 0x00000008
+
+#define REG_HDMI_PHY_QSERDES_COM_BG_TIMER 0x0000000c
+
+#define REG_HDMI_PHY_QSERDES_COM_SSC_EN_CENTER 0x00000010
+
+#define REG_HDMI_PHY_QSERDES_COM_SSC_ADJ_PER1 0x00000014
+
+#define REG_HDMI_PHY_QSERDES_COM_SSC_ADJ_PER2 0x00000018
+
+#define REG_HDMI_PHY_QSERDES_COM_SSC_PER1 0x0000001c
+
+#define REG_HDMI_PHY_QSERDES_COM_SSC_PER2 0x00000020
+
+#define REG_HDMI_PHY_QSERDES_COM_SSC_STEP_SIZE1 0x00000024
+
+#define REG_HDMI_PHY_QSERDES_COM_SSC_STEP_SIZE2 0x00000028
+
+#define REG_HDMI_PHY_QSERDES_COM_POST_DIV 0x0000002c
+
+#define REG_HDMI_PHY_QSERDES_COM_POST_DIV_MUX 0x00000030
+
+#define REG_HDMI_PHY_QSERDES_COM_BIAS_EN_CLKBUFLR_EN 0x00000034
+
+#define REG_HDMI_PHY_QSERDES_COM_CLK_ENABLE1 0x00000038
+
+#define REG_HDMI_PHY_QSERDES_COM_SYS_CLK_CTRL 0x0000003c
+
+#define REG_HDMI_PHY_QSERDES_COM_SYSCLK_BUF_ENABLE 0x00000040
+
+#define REG_HDMI_PHY_QSERDES_COM_PLL_EN 0x00000044
+
+#define REG_HDMI_PHY_QSERDES_COM_PLL_IVCO 0x00000048
+
+#define REG_HDMI_PHY_QSERDES_COM_LOCK_CMP1_MODE0 0x0000004c
+
+#define REG_HDMI_PHY_QSERDES_COM_LOCK_CMP2_MODE0 0x00000050
+
+#define REG_HDMI_PHY_QSERDES_COM_LOCK_CMP3_MODE0 0x00000054
+
+#define REG_HDMI_PHY_QSERDES_COM_LOCK_CMP1_MODE1 0x00000058
+
+#define REG_HDMI_PHY_QSERDES_COM_LOCK_CMP2_MODE1 0x0000005c
+
+#define REG_HDMI_PHY_QSERDES_COM_LOCK_CMP3_MODE1 0x00000060
+
+#define REG_HDMI_PHY_QSERDES_COM_LOCK_CMP1_MODE2 0x00000064
+
+#define REG_HDMI_PHY_QSERDES_COM_CMN_RSVD0 0x00000064
+
+#define REG_HDMI_PHY_QSERDES_COM_LOCK_CMP2_MODE2 0x00000068
+
+#define REG_HDMI_PHY_QSERDES_COM_EP_CLOCK_DETECT_CTRL 0x00000068
+
+#define REG_HDMI_PHY_QSERDES_COM_LOCK_CMP3_MODE2 0x0000006c
+
+#define REG_HDMI_PHY_QSERDES_COM_SYSCLK_DET_COMP_STATUS 0x0000006c
+
+#define REG_HDMI_PHY_QSERDES_COM_BG_TRIM 0x00000070
+
+#define REG_HDMI_PHY_QSERDES_COM_CLK_EP_DIV 0x00000074
+
+#define REG_HDMI_PHY_QSERDES_COM_CP_CTRL_MODE0 0x00000078
+
+#define REG_HDMI_PHY_QSERDES_COM_CP_CTRL_MODE1 0x0000007c
+
+#define REG_HDMI_PHY_QSERDES_COM_CP_CTRL_MODE2 0x00000080
+
+#define REG_HDMI_PHY_QSERDES_COM_CMN_RSVD1 0x00000080
+
+#define REG_HDMI_PHY_QSERDES_COM_PLL_RCTRL_MODE0 0x00000084
+
+#define REG_HDMI_PHY_QSERDES_COM_PLL_RCTRL_MODE1 0x00000088
+
+#define REG_HDMI_PHY_QSERDES_COM_PLL_RCTRL_MODE2 0x0000008c
+
+#define REG_HDMI_PHY_QSERDES_COM_CMN_RSVD2 0x0000008c
+
+#define REG_HDMI_PHY_QSERDES_COM_PLL_CCTRL_MODE0 0x00000090
+
+#define REG_HDMI_PHY_QSERDES_COM_PLL_CCTRL_MODE1 0x00000094
+
+#define REG_HDMI_PHY_QSERDES_COM_PLL_CCTRL_MODE2 0x00000098
+
+#define REG_HDMI_PHY_QSERDES_COM_CMN_RSVD3 0x00000098
+
+#define REG_HDMI_PHY_QSERDES_COM_PLL_CNTRL 0x0000009c
+
+#define REG_HDMI_PHY_QSERDES_COM_PHASE_SEL_CTRL 0x000000a0
+
+#define REG_HDMI_PHY_QSERDES_COM_PHASE_SEL_DC 0x000000a4
+
+#define REG_HDMI_PHY_QSERDES_COM_CORE_CLK_IN_SYNC_SEL 0x000000a8
+
+#define REG_HDMI_PHY_QSERDES_COM_BIAS_EN_CTRL_BY_PSM 0x000000a8
+
+#define REG_HDMI_PHY_QSERDES_COM_SYSCLK_EN_SEL 0x000000ac
+
+#define REG_HDMI_PHY_QSERDES_COM_CML_SYSCLK_SEL 0x000000b0
+
+#define REG_HDMI_PHY_QSERDES_COM_RESETSM_CNTRL 0x000000b4
+
+#define REG_HDMI_PHY_QSERDES_COM_RESETSM_CNTRL2 0x000000b8
+
+#define REG_HDMI_PHY_QSERDES_COM_RESTRIM_CTRL 0x000000bc
+
+#define REG_HDMI_PHY_QSERDES_COM_RESTRIM_CTRL2 0x000000c0
+
+#define REG_HDMI_PHY_QSERDES_COM_RESCODE_DIV_NUM 0x000000c4
+
+#define REG_HDMI_PHY_QSERDES_COM_LOCK_CMP_EN 0x000000c8
+
+#define REG_HDMI_PHY_QSERDES_COM_LOCK_CMP_CFG 0x000000cc
+
+#define REG_HDMI_PHY_QSERDES_COM_DEC_START_MODE0 0x000000d0
+
+#define REG_HDMI_PHY_QSERDES_COM_DEC_START_MODE1 0x000000d4
+
+#define REG_HDMI_PHY_QSERDES_COM_DEC_START_MODE2 0x000000d8
+
+#define REG_HDMI_PHY_QSERDES_COM_VCOCAL_DEADMAN_CTRL 0x000000d8
+
+#define REG_HDMI_PHY_QSERDES_COM_DIV_FRAC_START1_MODE0 0x000000dc
+
+#define REG_HDMI_PHY_QSERDES_COM_DIV_FRAC_START2_MODE0 0x000000e0
+
+#define REG_HDMI_PHY_QSERDES_COM_DIV_FRAC_START3_MODE0 0x000000e4
+
+#define REG_HDMI_PHY_QSERDES_COM_DIV_FRAC_START1_MODE1 0x000000e8
+
+#define REG_HDMI_PHY_QSERDES_COM_DIV_FRAC_START2_MODE1 0x000000ec
+
+#define REG_HDMI_PHY_QSERDES_COM_DIV_FRAC_START3_MODE1 0x000000f0
+
+#define REG_HDMI_PHY_QSERDES_COM_DIV_FRAC_START1_MODE2 0x000000f4
+
+#define REG_HDMI_PHY_QSERDES_COM_VCO_TUNE_MINVAL1 0x000000f4
+
+#define REG_HDMI_PHY_QSERDES_COM_DIV_FRAC_START2_MODE2 0x000000f8
+
+#define REG_HDMI_PHY_QSERDES_COM_VCO_TUNE_MINVAL2 0x000000f8
+
+#define REG_HDMI_PHY_QSERDES_COM_DIV_FRAC_START3_MODE2 0x000000fc
+
+#define REG_HDMI_PHY_QSERDES_COM_CMN_RSVD4 0x000000fc
+
+#define REG_HDMI_PHY_QSERDES_COM_INTEGLOOP_INITVAL 0x00000100
+
+#define REG_HDMI_PHY_QSERDES_COM_INTEGLOOP_EN 0x00000104
+
+#define REG_HDMI_PHY_QSERDES_COM_INTEGLOOP_GAIN0_MODE0 0x00000108
+
+#define REG_HDMI_PHY_QSERDES_COM_INTEGLOOP_GAIN1_MODE0 0x0000010c
+
+#define REG_HDMI_PHY_QSERDES_COM_INTEGLOOP_GAIN0_MODE1 0x00000110
+
+#define REG_HDMI_PHY_QSERDES_COM_INTEGLOOP_GAIN1_MODE1 0x00000114
+
+#define REG_HDMI_PHY_QSERDES_COM_INTEGLOOP_GAIN0_MODE2 0x00000118
+
+#define REG_HDMI_PHY_QSERDES_COM_VCO_TUNE_MAXVAL1 0x00000118
+
+#define REG_HDMI_PHY_QSERDES_COM_INTEGLOOP_GAIN1_MODE2 0x0000011c
+
+#define REG_HDMI_PHY_QSERDES_COM_VCO_TUNE_MAXVAL2 0x0000011c
+
+#define REG_HDMI_PHY_QSERDES_COM_RES_TRIM_CONTROL2 0x00000120
+
+#define REG_HDMI_PHY_QSERDES_COM_VCO_TUNE_CTRL 0x00000124
+
+#define REG_HDMI_PHY_QSERDES_COM_VCO_TUNE_MAP 0x00000128
+
+#define REG_HDMI_PHY_QSERDES_COM_VCO_TUNE1_MODE0 0x0000012c
+
+#define REG_HDMI_PHY_QSERDES_COM_VCO_TUNE2_MODE0 0x00000130
+
+#define REG_HDMI_PHY_QSERDES_COM_VCO_TUNE1_MODE1 0x00000134
+
+#define REG_HDMI_PHY_QSERDES_COM_VCO_TUNE2_MODE1 0x00000138
+
+#define REG_HDMI_PHY_QSERDES_COM_VCO_TUNE1_MODE2 0x0000013c
+
+#define REG_HDMI_PHY_QSERDES_COM_VCO_TUNE_INITVAL1 0x0000013c
+
+#define REG_HDMI_PHY_QSERDES_COM_VCO_TUNE2_MODE2 0x00000140
+
+#define REG_HDMI_PHY_QSERDES_COM_VCO_TUNE_INITVAL2 0x00000140
+
+#define REG_HDMI_PHY_QSERDES_COM_VCO_TUNE_TIMER1 0x00000144
+
+#define REG_HDMI_PHY_QSERDES_COM_VCO_TUNE_TIMER2 0x00000148
+
+#define REG_HDMI_PHY_QSERDES_COM_SAR 0x0000014c
+
+#define REG_HDMI_PHY_QSERDES_COM_SAR_CLK 0x00000150
+
+#define REG_HDMI_PHY_QSERDES_COM_SAR_CODE_OUT_STATUS 0x00000154
+
+#define REG_HDMI_PHY_QSERDES_COM_SAR_CODE_READY_STATUS 0x00000158
+
+#define REG_HDMI_PHY_QSERDES_COM_CMN_STATUS 0x0000015c
+
+#define REG_HDMI_PHY_QSERDES_COM_RESET_SM_STATUS 0x00000160
+
+#define REG_HDMI_PHY_QSERDES_COM_RESTRIM_CODE_STATUS 0x00000164
+
+#define REG_HDMI_PHY_QSERDES_COM_PLLCAL_CODE1_STATUS 0x00000168
+
+#define REG_HDMI_PHY_QSERDES_COM_PLLCAL_CODE2_STATUS 0x0000016c
+
+#define REG_HDMI_PHY_QSERDES_COM_BG_CTRL 0x00000170
+
+#define REG_HDMI_PHY_QSERDES_COM_CLK_SELECT 0x00000174
+
+#define REG_HDMI_PHY_QSERDES_COM_HSCLK_SEL 0x00000178
+
+#define REG_HDMI_PHY_QSERDES_COM_INTEGLOOP_BINCODE_STATUS 0x0000017c
+
+#define REG_HDMI_PHY_QSERDES_COM_PLL_ANALOG 0x00000180
+
+#define REG_HDMI_PHY_QSERDES_COM_CORECLK_DIV 0x00000184
+
+#define REG_HDMI_PHY_QSERDES_COM_SW_RESET 0x00000188
+
+#define REG_HDMI_PHY_QSERDES_COM_CORE_CLK_EN 0x0000018c
+
+#define REG_HDMI_PHY_QSERDES_COM_C_READY_STATUS 0x00000190
+
+#define REG_HDMI_PHY_QSERDES_COM_CMN_CONFIG 0x00000194
+
+#define REG_HDMI_PHY_QSERDES_COM_CMN_RATE_OVERRIDE 0x00000198
+
+#define REG_HDMI_PHY_QSERDES_COM_SVS_MODE_CLK_SEL 0x0000019c
+
+#define REG_HDMI_PHY_QSERDES_COM_DEBUG_BUS0 0x000001a0
+
+#define REG_HDMI_PHY_QSERDES_COM_DEBUG_BUS1 0x000001a4
+
+#define REG_HDMI_PHY_QSERDES_COM_DEBUG_BUS2 0x000001a8
+
+#define REG_HDMI_PHY_QSERDES_COM_DEBUG_BUS3 0x000001ac
+
+#define REG_HDMI_PHY_QSERDES_COM_DEBUG_BUS_SEL 0x000001b0
+
+#define REG_HDMI_PHY_QSERDES_COM_CMN_MISC1 0x000001b4
+
+#define REG_HDMI_PHY_QSERDES_COM_CMN_MISC2 0x000001b8
+
+#define REG_HDMI_PHY_QSERDES_COM_CORECLK_DIV_MODE1 0x000001bc
+
+#define REG_HDMI_PHY_QSERDES_COM_CORECLK_DIV_MODE2 0x000001c0
+
+#define REG_HDMI_PHY_QSERDES_COM_CMN_RSVD5 0x000001c4
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_BIST_MODE_LANENO 0x00000000
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_BIST_INVERT 0x00000004
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_CLKBUF_ENABLE 0x00000008
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_CMN_CONTROL_ONE 0x0000000c
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_CMN_CONTROL_TWO 0x00000010
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_CMN_CONTROL_THREE 0x00000014
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_TX_EMP_POST1_LVL 0x00000018
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_TX_POST2_EMPH 0x0000001c
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_TX_BOOST_LVL_UP_DN 0x00000020
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_HP_PD_ENABLES 0x00000024
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_TX_IDLE_LVL_LARGE_AMP 0x00000028
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_TX_DRV_LVL 0x0000002c
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_TX_DRV_LVL_OFFSET 0x00000030
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_RESET_TSYNC_EN 0x00000034
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_PRE_STALL_LDO_BOOST_EN 0x00000038
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_TX_BAND 0x0000003c
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_SLEW_CNTL 0x00000040
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_INTERFACE_SELECT 0x00000044
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_LPB_EN 0x00000048
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_RES_CODE_LANE_TX 0x0000004c
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_RES_CODE_LANE_RX 0x00000050
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_RES_CODE_LANE_OFFSET 0x00000054
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_PERL_LENGTH1 0x00000058
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_PERL_LENGTH2 0x0000005c
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_SERDES_BYP_EN_OUT 0x00000060
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_DEBUG_BUS_SEL 0x00000064
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_HIGHZ_TRANSCEIVEREN_BIAS_DRVR_EN 0x00000068
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_TX_POL_INV 0x0000006c
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_PARRATE_REC_DETECT_IDLE_EN 0x00000070
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_BIST_PATTERN1 0x00000074
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_BIST_PATTERN2 0x00000078
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_BIST_PATTERN3 0x0000007c
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_BIST_PATTERN4 0x00000080
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_BIST_PATTERN5 0x00000084
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_BIST_PATTERN6 0x00000088
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_BIST_PATTERN7 0x0000008c
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_BIST_PATTERN8 0x00000090
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_LANE_MODE 0x00000094
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_IDAC_CAL_LANE_MODE 0x00000098
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_IDAC_CAL_LANE_MODE_CONFIGURATION 0x0000009c
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_ATB_SEL1 0x000000a0
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_ATB_SEL2 0x000000a4
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_RCV_DETECT_LVL 0x000000a8
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_RCV_DETECT_LVL_2 0x000000ac
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_PRBS_SEED1 0x000000b0
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_PRBS_SEED2 0x000000b4
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_PRBS_SEED3 0x000000b8
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_PRBS_SEED4 0x000000bc
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_RESET_GEN 0x000000c0
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_RESET_GEN_MUXES 0x000000c4
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_TRAN_DRVR_EMP_EN 0x000000c8
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_TX_INTERFACE_MODE 0x000000cc
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_PWM_CTRL 0x000000d0
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_PWM_ENCODED_OR_DATA 0x000000d4
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_PWM_GEAR_1_DIVIDER_BAND2 0x000000d8
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_PWM_GEAR_2_DIVIDER_BAND2 0x000000dc
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_PWM_GEAR_3_DIVIDER_BAND2 0x000000e0
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_PWM_GEAR_4_DIVIDER_BAND2 0x000000e4
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_PWM_GEAR_1_DIVIDER_BAND0_1 0x000000e8
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_PWM_GEAR_2_DIVIDER_BAND0_1 0x000000ec
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_PWM_GEAR_3_DIVIDER_BAND0_1 0x000000f0
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_PWM_GEAR_4_DIVIDER_BAND0_1 0x000000f4
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_VMODE_CTRL1 0x000000f8
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_VMODE_CTRL2 0x000000fc
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_TX_ALOG_INTF_OBSV_CNTL 0x00000100
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_BIST_STATUS 0x00000104
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_BIST_ERROR_COUNT1 0x00000108
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_BIST_ERROR_COUNT2 0x0000010c
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_TX_ALOG_INTF_OBSV 0x00000110
+
#endif /* HDMI_XML */
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_audio.c b/drivers/gpu/drm/msm/hdmi/hdmi_audio.c
index df232e20c13e..a54d3bb5baad 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_audio.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_audio.c
@@ -89,7 +89,7 @@ static const struct hdmi_msm_audio_arcs *get_arcs(unsigned long int pixclock)
return NULL;
}
-int hdmi_audio_update(struct hdmi *hdmi)
+int msm_hdmi_audio_update(struct hdmi *hdmi)
{
struct hdmi_audio *audio = &hdmi->audio;
struct hdmi_audio_infoframe *info = &audio->infoframe;
@@ -232,7 +232,7 @@ int hdmi_audio_update(struct hdmi *hdmi)
return 0;
}
-int hdmi_audio_info_setup(struct hdmi *hdmi, bool enabled,
+int msm_hdmi_audio_info_setup(struct hdmi *hdmi, bool enabled,
uint32_t num_of_channels, uint32_t channel_allocation,
uint32_t level_shift, bool down_mix)
{
@@ -252,10 +252,10 @@ int hdmi_audio_info_setup(struct hdmi *hdmi, bool enabled,
audio->infoframe.level_shift_value = level_shift;
audio->infoframe.downmix_inhibit = down_mix;
- return hdmi_audio_update(hdmi);
+ return msm_hdmi_audio_update(hdmi);
}
-void hdmi_audio_set_sample_rate(struct hdmi *hdmi, int rate)
+void msm_hdmi_audio_set_sample_rate(struct hdmi *hdmi, int rate)
{
struct hdmi_audio *audio;
@@ -268,5 +268,5 @@ void hdmi_audio_set_sample_rate(struct hdmi *hdmi, int rate)
return;
audio->rate = rate;
- hdmi_audio_update(hdmi);
+ msm_hdmi_audio_update(hdmi);
}
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c b/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
index 92b69ae8caf9..bacbd5d8df0e 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
@@ -23,11 +23,11 @@ struct hdmi_bridge {
};
#define to_hdmi_bridge(x) container_of(x, struct hdmi_bridge, base)
-void hdmi_bridge_destroy(struct drm_bridge *bridge)
+void msm_hdmi_bridge_destroy(struct drm_bridge *bridge)
{
}
-static void power_on(struct drm_bridge *bridge)
+static void msm_hdmi_power_on(struct drm_bridge *bridge)
{
struct drm_device *dev = bridge->dev;
struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge);
@@ -86,7 +86,7 @@ static void power_off(struct drm_bridge *bridge)
}
}
-static void hdmi_bridge_pre_enable(struct drm_bridge *bridge)
+static void msm_hdmi_bridge_pre_enable(struct drm_bridge *bridge)
{
struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge);
struct hdmi *hdmi = hdmi_bridge->hdmi;
@@ -95,51 +95,51 @@ static void hdmi_bridge_pre_enable(struct drm_bridge *bridge)
DBG("power up");
if (!hdmi->power_on) {
- power_on(bridge);
+ msm_hdmi_phy_resource_enable(phy);
+ msm_hdmi_power_on(bridge);
hdmi->power_on = true;
- hdmi_audio_update(hdmi);
+ msm_hdmi_audio_update(hdmi);
}
- if (phy)
- phy->funcs->powerup(phy, hdmi->pixclock);
+ msm_hdmi_phy_powerup(phy, hdmi->pixclock);
- hdmi_set_mode(hdmi, true);
+ msm_hdmi_set_mode(hdmi, true);
if (hdmi->hdcp_ctrl)
- hdmi_hdcp_on(hdmi->hdcp_ctrl);
+ msm_hdmi_hdcp_on(hdmi->hdcp_ctrl);
}
-static void hdmi_bridge_enable(struct drm_bridge *bridge)
+static void msm_hdmi_bridge_enable(struct drm_bridge *bridge)
{
}
-static void hdmi_bridge_disable(struct drm_bridge *bridge)
+static void msm_hdmi_bridge_disable(struct drm_bridge *bridge)
{
}
-static void hdmi_bridge_post_disable(struct drm_bridge *bridge)
+static void msm_hdmi_bridge_post_disable(struct drm_bridge *bridge)
{
struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge);
struct hdmi *hdmi = hdmi_bridge->hdmi;
struct hdmi_phy *phy = hdmi->phy;
if (hdmi->hdcp_ctrl)
- hdmi_hdcp_off(hdmi->hdcp_ctrl);
+ msm_hdmi_hdcp_off(hdmi->hdcp_ctrl);
DBG("power down");
- hdmi_set_mode(hdmi, false);
+ msm_hdmi_set_mode(hdmi, false);
- if (phy)
- phy->funcs->powerdown(phy);
+ msm_hdmi_phy_powerdown(phy);
if (hdmi->power_on) {
power_off(bridge);
hdmi->power_on = false;
- hdmi_audio_update(hdmi);
+ msm_hdmi_audio_update(hdmi);
+ msm_hdmi_phy_resource_disable(phy);
}
}
-static void hdmi_bridge_mode_set(struct drm_bridge *bridge,
+static void msm_hdmi_bridge_mode_set(struct drm_bridge *bridge,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
@@ -196,20 +196,20 @@ static void hdmi_bridge_mode_set(struct drm_bridge *bridge,
DBG("frame_ctrl=%08x", frame_ctrl);
hdmi_write(hdmi, REG_HDMI_FRAME_CTRL, frame_ctrl);
- hdmi_audio_update(hdmi);
+ msm_hdmi_audio_update(hdmi);
}
-static const struct drm_bridge_funcs hdmi_bridge_funcs = {
- .pre_enable = hdmi_bridge_pre_enable,
- .enable = hdmi_bridge_enable,
- .disable = hdmi_bridge_disable,
- .post_disable = hdmi_bridge_post_disable,
- .mode_set = hdmi_bridge_mode_set,
+static const struct drm_bridge_funcs msm_hdmi_bridge_funcs = {
+ .pre_enable = msm_hdmi_bridge_pre_enable,
+ .enable = msm_hdmi_bridge_enable,
+ .disable = msm_hdmi_bridge_disable,
+ .post_disable = msm_hdmi_bridge_post_disable,
+ .mode_set = msm_hdmi_bridge_mode_set,
};
/* initialize bridge */
-struct drm_bridge *hdmi_bridge_init(struct hdmi *hdmi)
+struct drm_bridge *msm_hdmi_bridge_init(struct hdmi *hdmi)
{
struct drm_bridge *bridge = NULL;
struct hdmi_bridge *hdmi_bridge;
@@ -225,7 +225,7 @@ struct drm_bridge *hdmi_bridge_init(struct hdmi *hdmi)
hdmi_bridge->hdmi = hdmi;
bridge = &hdmi_bridge->base;
- bridge->funcs = &hdmi_bridge_funcs;
+ bridge->funcs = &msm_hdmi_bridge_funcs;
ret = drm_bridge_attach(hdmi->dev, bridge);
if (ret)
@@ -235,7 +235,7 @@ struct drm_bridge *hdmi_bridge_init(struct hdmi *hdmi)
fail:
if (bridge)
- hdmi_bridge_destroy(bridge);
+ msm_hdmi_bridge_destroy(bridge);
return ERR_PTR(ret);
}
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
index a3b05ae52dae..26129bff2dd6 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
@@ -28,7 +28,7 @@ struct hdmi_connector {
};
#define to_hdmi_connector(x) container_of(x, struct hdmi_connector, base)
-static void hdmi_phy_reset(struct hdmi *hdmi)
+static void msm_hdmi_phy_reset(struct hdmi *hdmi)
{
unsigned int val;
@@ -81,114 +81,54 @@ static int gpio_config(struct hdmi *hdmi, bool on)
{
struct device *dev = &hdmi->pdev->dev;
const struct hdmi_platform_config *config = hdmi->config;
- int ret;
+ int ret, i;
if (on) {
- if (config->ddc_clk_gpio != -1) {
- ret = gpio_request(config->ddc_clk_gpio, "HDMI_DDC_CLK");
- if (ret) {
- dev_err(dev, "'%s'(%d) gpio_request failed: %d\n",
- "HDMI_DDC_CLK", config->ddc_clk_gpio, ret);
- goto error1;
- }
- gpio_set_value_cansleep(config->ddc_clk_gpio, 1);
- }
-
- if (config->ddc_data_gpio != -1) {
- ret = gpio_request(config->ddc_data_gpio, "HDMI_DDC_DATA");
- if (ret) {
- dev_err(dev, "'%s'(%d) gpio_request failed: %d\n",
- "HDMI_DDC_DATA", config->ddc_data_gpio, ret);
- goto error2;
- }
- gpio_set_value_cansleep(config->ddc_data_gpio, 1);
- }
-
- ret = gpio_request(config->hpd_gpio, "HDMI_HPD");
- if (ret) {
- dev_err(dev, "'%s'(%d) gpio_request failed: %d\n",
- "HDMI_HPD", config->hpd_gpio, ret);
- goto error3;
- }
- gpio_direction_input(config->hpd_gpio);
- gpio_set_value_cansleep(config->hpd_gpio, 1);
-
- if (config->mux_en_gpio != -1) {
- ret = gpio_request(config->mux_en_gpio, "HDMI_MUX_EN");
- if (ret) {
- dev_err(dev, "'%s'(%d) gpio_request failed: %d\n",
- "HDMI_MUX_EN", config->mux_en_gpio, ret);
- goto error4;
- }
- gpio_set_value_cansleep(config->mux_en_gpio, 1);
- }
-
- if (config->mux_sel_gpio != -1) {
- ret = gpio_request(config->mux_sel_gpio, "HDMI_MUX_SEL");
- if (ret) {
- dev_err(dev, "'%s'(%d) gpio_request failed: %d\n",
- "HDMI_MUX_SEL", config->mux_sel_gpio, ret);
- goto error5;
+ for (i = 0; i < HDMI_MAX_NUM_GPIO; i++) {
+ struct hdmi_gpio_data gpio = config->gpios[i];
+
+ if (gpio.num != -1) {
+ ret = gpio_request(gpio.num, gpio.label);
+ if (ret) {
+ dev_err(dev,
+ "'%s'(%d) gpio_request failed: %d\n",
+ gpio.label, gpio.num, ret);
+ goto err;
+ }
+
+ if (gpio.output) {
+ gpio_direction_output(gpio.num,
+ gpio.value);
+ } else {
+ gpio_direction_input(gpio.num);
+ gpio_set_value_cansleep(gpio.num,
+ gpio.value);
+ }
}
- gpio_set_value_cansleep(config->mux_sel_gpio, 0);
}
- if (config->mux_lpm_gpio != -1) {
- ret = gpio_request(config->mux_lpm_gpio,
- "HDMI_MUX_LPM");
- if (ret) {
- dev_err(dev,
- "'%s'(%d) gpio_request failed: %d\n",
- "HDMI_MUX_LPM",
- config->mux_lpm_gpio, ret);
- goto error6;
- }
- gpio_set_value_cansleep(config->mux_lpm_gpio, 1);
- }
DBG("gpio on");
} else {
- if (config->ddc_clk_gpio != -1)
- gpio_free(config->ddc_clk_gpio);
-
- if (config->ddc_data_gpio != -1)
- gpio_free(config->ddc_data_gpio);
+ for (i = 0; i < HDMI_MAX_NUM_GPIO; i++) {
+ struct hdmi_gpio_data gpio = config->gpios[i];
- gpio_free(config->hpd_gpio);
+ if (gpio.output) {
+ int value = gpio.value ? 0 : 1;
- if (config->mux_en_gpio != -1) {
- gpio_set_value_cansleep(config->mux_en_gpio, 0);
- gpio_free(config->mux_en_gpio);
- }
+ gpio_set_value_cansleep(gpio.num, value);
+ }
- if (config->mux_sel_gpio != -1) {
- gpio_set_value_cansleep(config->mux_sel_gpio, 1);
- gpio_free(config->mux_sel_gpio);
- }
+ gpio_free(gpio.num);
+ };
- if (config->mux_lpm_gpio != -1) {
- gpio_set_value_cansleep(config->mux_lpm_gpio, 0);
- gpio_free(config->mux_lpm_gpio);
- }
DBG("gpio off");
}
return 0;
+err:
+ while (i--)
+ gpio_free(config->gpios[i].num);
-error6:
- if (config->mux_sel_gpio != -1)
- gpio_free(config->mux_sel_gpio);
-error5:
- if (config->mux_en_gpio != -1)
- gpio_free(config->mux_en_gpio);
-error4:
- gpio_free(config->hpd_gpio);
-error3:
- if (config->ddc_data_gpio != -1)
- gpio_free(config->ddc_data_gpio);
-error2:
- if (config->ddc_clk_gpio != -1)
- gpio_free(config->ddc_clk_gpio);
-error1:
return ret;
}
@@ -239,9 +179,9 @@ static int hpd_enable(struct hdmi_connector *hdmi_connector)
}
}
- hdmi_set_mode(hdmi, false);
- hdmi_phy_reset(hdmi);
- hdmi_set_mode(hdmi, true);
+ msm_hdmi_set_mode(hdmi, false);
+ msm_hdmi_phy_reset(hdmi);
+ msm_hdmi_set_mode(hdmi, true);
hdmi_write(hdmi, REG_HDMI_USEC_REFTIMER, 0x0001001b);
@@ -278,7 +218,7 @@ static void hdp_disable(struct hdmi_connector *hdmi_connector)
/* Disable HPD interrupt */
hdmi_write(hdmi, REG_HDMI_HPD_INT_CTRL, 0);
- hdmi_set_mode(hdmi, false);
+ msm_hdmi_set_mode(hdmi, false);
for (i = 0; i < config->hpd_clk_cnt; i++)
clk_disable_unprepare(hdmi->hpd_clks[i]);
@@ -300,7 +240,7 @@ static void hdp_disable(struct hdmi_connector *hdmi_connector)
}
static void
-hotplug_work(struct work_struct *work)
+msm_hdmi_hotplug_work(struct work_struct *work)
{
struct hdmi_connector *hdmi_connector =
container_of(work, struct hdmi_connector, hpd_work);
@@ -308,7 +248,7 @@ hotplug_work(struct work_struct *work)
drm_helper_hpd_irq_event(connector->dev);
}
-void hdmi_connector_irq(struct drm_connector *connector)
+void msm_hdmi_connector_irq(struct drm_connector *connector)
{
struct hdmi_connector *hdmi_connector = to_hdmi_connector(connector);
struct hdmi *hdmi = hdmi_connector->hdmi;
@@ -345,10 +285,13 @@ static enum drm_connector_status detect_reg(struct hdmi *hdmi)
connector_status_connected : connector_status_disconnected;
}
+#define HPD_GPIO_INDEX 2
static enum drm_connector_status detect_gpio(struct hdmi *hdmi)
{
const struct hdmi_platform_config *config = hdmi->config;
- return gpio_get_value(config->hpd_gpio) ?
+ struct hdmi_gpio_data hpd_gpio = config->gpios[HPD_GPIO_INDEX];
+
+ return gpio_get_value(hpd_gpio.num) ?
connector_status_connected :
connector_status_disconnected;
}
@@ -358,9 +301,18 @@ static enum drm_connector_status hdmi_connector_detect(
{
struct hdmi_connector *hdmi_connector = to_hdmi_connector(connector);
struct hdmi *hdmi = hdmi_connector->hdmi;
+ const struct hdmi_platform_config *config = hdmi->config;
+ struct hdmi_gpio_data hpd_gpio = config->gpios[HPD_GPIO_INDEX];
enum drm_connector_status stat_gpio, stat_reg;
int retry = 20;
+ /*
+ * some platforms may not have hpd gpio. Rely only on the status
+ * provided by REG_HDMI_HPD_INT_STATUS in this case.
+ */
+ if (hpd_gpio.num == -1)
+ return detect_reg(hdmi);
+
do {
stat_gpio = detect_gpio(hdmi);
stat_reg = detect_reg(hdmi);
@@ -395,7 +347,7 @@ static void hdmi_connector_destroy(struct drm_connector *connector)
kfree(hdmi_connector);
}
-static int hdmi_connector_get_modes(struct drm_connector *connector)
+static int msm_hdmi_connector_get_modes(struct drm_connector *connector)
{
struct hdmi_connector *hdmi_connector = to_hdmi_connector(connector);
struct hdmi *hdmi = hdmi_connector->hdmi;
@@ -421,7 +373,7 @@ static int hdmi_connector_get_modes(struct drm_connector *connector)
return ret;
}
-static int hdmi_connector_mode_valid(struct drm_connector *connector,
+static int msm_hdmi_connector_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
struct hdmi_connector *hdmi_connector = to_hdmi_connector(connector);
@@ -451,7 +403,7 @@ static int hdmi_connector_mode_valid(struct drm_connector *connector,
}
static struct drm_encoder *
-hdmi_connector_best_encoder(struct drm_connector *connector)
+msm_hdmi_connector_best_encoder(struct drm_connector *connector)
{
struct hdmi_connector *hdmi_connector = to_hdmi_connector(connector);
return hdmi_connector->hdmi->encoder;
@@ -467,14 +419,14 @@ static const struct drm_connector_funcs hdmi_connector_funcs = {
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
-static const struct drm_connector_helper_funcs hdmi_connector_helper_funcs = {
- .get_modes = hdmi_connector_get_modes,
- .mode_valid = hdmi_connector_mode_valid,
- .best_encoder = hdmi_connector_best_encoder,
+static const struct drm_connector_helper_funcs msm_hdmi_connector_helper_funcs = {
+ .get_modes = msm_hdmi_connector_get_modes,
+ .mode_valid = msm_hdmi_connector_mode_valid,
+ .best_encoder = msm_hdmi_connector_best_encoder,
};
/* initialize connector */
-struct drm_connector *hdmi_connector_init(struct hdmi *hdmi)
+struct drm_connector *msm_hdmi_connector_init(struct hdmi *hdmi)
{
struct drm_connector *connector = NULL;
struct hdmi_connector *hdmi_connector;
@@ -487,13 +439,13 @@ struct drm_connector *hdmi_connector_init(struct hdmi *hdmi)
}
hdmi_connector->hdmi = hdmi;
- INIT_WORK(&hdmi_connector->hpd_work, hotplug_work);
+ INIT_WORK(&hdmi_connector->hpd_work, msm_hdmi_hotplug_work);
connector = &hdmi_connector->base;
drm_connector_init(hdmi->dev, connector, &hdmi_connector_funcs,
DRM_MODE_CONNECTOR_HDMIA);
- drm_connector_helper_add(connector, &hdmi_connector_helper_funcs);
+ drm_connector_helper_add(connector, &msm_hdmi_connector_helper_funcs);
connector->polled = DRM_CONNECTOR_POLL_CONNECT |
DRM_CONNECTOR_POLL_DISCONNECT;
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_hdcp.c b/drivers/gpu/drm/msm/hdmi/hdmi_hdcp.c
index 1dc9c34eb0df..0baaaaabd002 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_hdcp.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_hdcp.c
@@ -84,7 +84,7 @@ struct hdmi_hdcp_ctrl {
bool max_dev_exceeded;
};
-static int hdmi_ddc_read(struct hdmi *hdmi, u16 addr, u8 offset,
+static int msm_hdmi_ddc_read(struct hdmi *hdmi, u16 addr, u8 offset,
u8 *data, u16 data_len)
{
int rc;
@@ -122,7 +122,7 @@ retry:
#define HDCP_DDC_WRITE_MAX_BYTE_NUM 32
-static int hdmi_ddc_write(struct hdmi *hdmi, u16 addr, u8 offset,
+static int msm_hdmi_ddc_write(struct hdmi *hdmi, u16 addr, u8 offset,
u8 *data, u16 data_len)
{
int rc;
@@ -162,7 +162,7 @@ retry:
return rc;
}
-static int hdmi_hdcp_scm_wr(struct hdmi_hdcp_ctrl *hdcp_ctrl, u32 *preg,
+static int msm_hdmi_hdcp_scm_wr(struct hdmi_hdcp_ctrl *hdcp_ctrl, u32 *preg,
u32 *pdata, u32 count)
{
struct hdmi *hdmi = hdcp_ctrl->hdmi;
@@ -202,7 +202,7 @@ static int hdmi_hdcp_scm_wr(struct hdmi_hdcp_ctrl *hdcp_ctrl, u32 *preg,
return ret;
}
-void hdmi_hdcp_irq(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+void msm_hdmi_hdcp_irq(struct hdmi_hdcp_ctrl *hdcp_ctrl)
{
struct hdmi *hdmi = hdcp_ctrl->hdmi;
u32 reg_val, hdcp_int_status;
@@ -247,7 +247,7 @@ void hdmi_hdcp_irq(struct hdmi_hdcp_ctrl *hdcp_ctrl)
}
}
-static int hdmi_hdcp_msleep(struct hdmi_hdcp_ctrl *hdcp_ctrl, u32 ms, u32 ev)
+static int msm_hdmi_hdcp_msleep(struct hdmi_hdcp_ctrl *hdcp_ctrl, u32 ms, u32 ev)
{
int rc;
@@ -264,7 +264,7 @@ static int hdmi_hdcp_msleep(struct hdmi_hdcp_ctrl *hdcp_ctrl, u32 ms, u32 ev)
return 0;
}
-static int hdmi_hdcp_read_validate_aksv(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+static int msm_hdmi_hdcp_read_validate_aksv(struct hdmi_hdcp_ctrl *hdcp_ctrl)
{
struct hdmi *hdmi = hdcp_ctrl->hdmi;
@@ -287,7 +287,7 @@ static int hdmi_hdcp_read_validate_aksv(struct hdmi_hdcp_ctrl *hdcp_ctrl)
return 0;
}
-static int reset_hdcp_ddc_failures(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+static int msm_reset_hdcp_ddc_failures(struct hdmi_hdcp_ctrl *hdcp_ctrl)
{
struct hdmi *hdmi = hdcp_ctrl->hdmi;
u32 reg_val, failure, nack0;
@@ -337,7 +337,7 @@ static int reset_hdcp_ddc_failures(struct hdmi_hdcp_ctrl *hdcp_ctrl)
reg_val |= HDMI_DDC_CTRL_SW_STATUS_RESET;
hdmi_write(hdmi, REG_HDMI_DDC_CTRL, reg_val);
- rc = hdmi_hdcp_msleep(hdcp_ctrl, 20, AUTH_ABORT_EV);
+ rc = msm_hdmi_hdcp_msleep(hdcp_ctrl, 20, AUTH_ABORT_EV);
reg_val = hdmi_read(hdmi, REG_HDMI_DDC_CTRL);
reg_val &= ~HDMI_DDC_CTRL_SW_STATUS_RESET;
@@ -350,7 +350,7 @@ static int reset_hdcp_ddc_failures(struct hdmi_hdcp_ctrl *hdcp_ctrl)
/* If previous msleep is aborted, skip this msleep */
if (!rc)
- rc = hdmi_hdcp_msleep(hdcp_ctrl, 20, AUTH_ABORT_EV);
+ rc = msm_hdmi_hdcp_msleep(hdcp_ctrl, 20, AUTH_ABORT_EV);
reg_val = hdmi_read(hdmi, REG_HDMI_DDC_CTRL);
reg_val &= ~HDMI_DDC_CTRL_SOFT_RESET;
@@ -362,7 +362,7 @@ static int reset_hdcp_ddc_failures(struct hdmi_hdcp_ctrl *hdcp_ctrl)
return rc;
}
-static int hdmi_hdcp_hw_ddc_clean(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+static int msm_hdmi_hdcp_hw_ddc_clean(struct hdmi_hdcp_ctrl *hdcp_ctrl)
{
int rc;
u32 hdcp_ddc_status, ddc_hw_status;
@@ -394,7 +394,7 @@ static int hdmi_hdcp_hw_ddc_clean(struct hdmi_hdcp_ctrl *hdcp_ctrl)
return -ETIMEDOUT;
}
- rc = hdmi_hdcp_msleep(hdcp_ctrl, 20, AUTH_ABORT_EV);
+ rc = msm_hdmi_hdcp_msleep(hdcp_ctrl, 20, AUTH_ABORT_EV);
if (rc)
return rc;
} while (1);
@@ -402,7 +402,7 @@ static int hdmi_hdcp_hw_ddc_clean(struct hdmi_hdcp_ctrl *hdcp_ctrl)
return 0;
}
-static void hdmi_hdcp_reauth_work(struct work_struct *work)
+static void msm_hdmi_hdcp_reauth_work(struct work_struct *work)
{
struct hdmi_hdcp_ctrl *hdcp_ctrl = container_of(work,
struct hdmi_hdcp_ctrl, hdcp_reauth_work);
@@ -430,7 +430,7 @@ static void hdmi_hdcp_reauth_work(struct work_struct *work)
HDMI_HDCP_RESET_LINK0_DEAUTHENTICATE);
/* Wait to be clean on DDC HW engine */
- if (hdmi_hdcp_hw_ddc_clean(hdcp_ctrl)) {
+ if (msm_hdmi_hdcp_hw_ddc_clean(hdcp_ctrl)) {
pr_info("%s: reauth work aborted\n", __func__);
return;
}
@@ -461,7 +461,7 @@ static void hdmi_hdcp_reauth_work(struct work_struct *work)
queue_work(hdmi->workq, &hdcp_ctrl->hdcp_auth_work);
}
-static int hdmi_hdcp_auth_prepare(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+static int msm_hdmi_hdcp_auth_prepare(struct hdmi_hdcp_ctrl *hdcp_ctrl)
{
struct hdmi *hdmi = hdcp_ctrl->hdmi;
u32 link0_status;
@@ -470,7 +470,7 @@ static int hdmi_hdcp_auth_prepare(struct hdmi_hdcp_ctrl *hdcp_ctrl)
int rc;
if (!hdcp_ctrl->aksv_valid) {
- rc = hdmi_hdcp_read_validate_aksv(hdcp_ctrl);
+ rc = msm_hdmi_hdcp_read_validate_aksv(hdcp_ctrl);
if (rc) {
pr_err("%s: ASKV validation failed\n", __func__);
hdcp_ctrl->hdcp_state = HDCP_STATE_NO_AKSV;
@@ -538,12 +538,12 @@ static int hdmi_hdcp_auth_prepare(struct hdmi_hdcp_ctrl *hdcp_ctrl)
DBG("An not ready after enabling HDCP");
/* Clear any DDC failures from previous tries before enable HDCP*/
- rc = reset_hdcp_ddc_failures(hdcp_ctrl);
+ rc = msm_reset_hdcp_ddc_failures(hdcp_ctrl);
return rc;
}
-static void hdmi_hdcp_auth_fail(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+static void msm_hdmi_hdcp_auth_fail(struct hdmi_hdcp_ctrl *hdcp_ctrl)
{
struct hdmi *hdmi = hdcp_ctrl->hdmi;
u32 reg_val;
@@ -561,7 +561,7 @@ static void hdmi_hdcp_auth_fail(struct hdmi_hdcp_ctrl *hdcp_ctrl)
queue_work(hdmi->workq, &hdcp_ctrl->hdcp_reauth_work);
}
-static void hdmi_hdcp_auth_done(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+static void msm_hdmi_hdcp_auth_done(struct hdmi_hdcp_ctrl *hdcp_ctrl)
{
struct hdmi *hdmi = hdcp_ctrl->hdmi;
u32 reg_val;
@@ -596,7 +596,7 @@ static void hdmi_hdcp_auth_done(struct hdmi_hdcp_ctrl *hdcp_ctrl)
* Write An and AKSV to sink
* Read BKSV from sink and write into HDCP engine
*/
-static int hdmi_hdcp_wait_key_an_ready(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+static int msm_hdmi_hdcp_wait_key_an_ready(struct hdmi_hdcp_ctrl *hdcp_ctrl)
{
int rc;
struct hdmi *hdmi = hdcp_ctrl->hdmi;
@@ -621,7 +621,7 @@ static int hdmi_hdcp_wait_key_an_ready(struct hdmi_hdcp_ctrl *hdcp_ctrl)
return -ETIMEDOUT;
}
- rc = hdmi_hdcp_msleep(hdcp_ctrl, 20, AUTH_ABORT_EV);
+ rc = msm_hdmi_hdcp_msleep(hdcp_ctrl, 20, AUTH_ABORT_EV);
if (rc)
return rc;
} while (1);
@@ -643,7 +643,7 @@ static int hdmi_hdcp_wait_key_an_ready(struct hdmi_hdcp_ctrl *hdcp_ctrl)
return -ETIMEDOUT;
}
- rc = hdmi_hdcp_msleep(hdcp_ctrl, 20, AUTH_ABORT_EV);
+ rc = msm_hdmi_hdcp_msleep(hdcp_ctrl, 20, AUTH_ABORT_EV);
if (rc)
return rc;
} while (1);
@@ -651,7 +651,7 @@ static int hdmi_hdcp_wait_key_an_ready(struct hdmi_hdcp_ctrl *hdcp_ctrl)
return 0;
}
-static int hdmi_hdcp_send_aksv_an(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+static int msm_hdmi_hdcp_send_aksv_an(struct hdmi_hdcp_ctrl *hdcp_ctrl)
{
int rc = 0;
struct hdmi *hdmi = hdcp_ctrl->hdmi;
@@ -676,7 +676,7 @@ static int hdmi_hdcp_send_aksv_an(struct hdmi_hdcp_ctrl *hdcp_ctrl)
aksv[4] = link0_aksv_1 & 0xFF;
/* Write An to offset 0x18 */
- rc = hdmi_ddc_write(hdmi, HDCP_PORT_ADDR, 0x18, (u8 *)link0_an,
+ rc = msm_hdmi_ddc_write(hdmi, HDCP_PORT_ADDR, 0x18, (u8 *)link0_an,
(u16)sizeof(link0_an));
if (rc) {
pr_err("%s:An write failed\n", __func__);
@@ -685,7 +685,7 @@ static int hdmi_hdcp_send_aksv_an(struct hdmi_hdcp_ctrl *hdcp_ctrl)
DBG("Link0-An=%08x%08x", link0_an[0], link0_an[1]);
/* Write AKSV to offset 0x10 */
- rc = hdmi_ddc_write(hdmi, HDCP_PORT_ADDR, 0x10, aksv, 5);
+ rc = msm_hdmi_ddc_write(hdmi, HDCP_PORT_ADDR, 0x10, aksv, 5);
if (rc) {
pr_err("%s:AKSV write failed\n", __func__);
return rc;
@@ -695,7 +695,7 @@ static int hdmi_hdcp_send_aksv_an(struct hdmi_hdcp_ctrl *hdcp_ctrl)
return 0;
}
-static int hdmi_hdcp_recv_bksv(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+static int msm_hdmi_hdcp_recv_bksv(struct hdmi_hdcp_ctrl *hdcp_ctrl)
{
int rc = 0;
struct hdmi *hdmi = hdcp_ctrl->hdmi;
@@ -703,7 +703,7 @@ static int hdmi_hdcp_recv_bksv(struct hdmi_hdcp_ctrl *hdcp_ctrl)
u32 reg[2], data[2];
/* Read BKSV at offset 0x00 */
- rc = hdmi_ddc_read(hdmi, HDCP_PORT_ADDR, 0x00, bksv, 5);
+ rc = msm_hdmi_ddc_read(hdmi, HDCP_PORT_ADDR, 0x00, bksv, 5);
if (rc) {
pr_err("%s:BKSV read failed\n", __func__);
return rc;
@@ -728,19 +728,19 @@ static int hdmi_hdcp_recv_bksv(struct hdmi_hdcp_ctrl *hdcp_ctrl)
data[0] = hdcp_ctrl->bksv_lsb;
reg[1] = REG_HDMI_HDCP_RCVPORT_DATA1;
data[1] = hdcp_ctrl->bksv_msb;
- rc = hdmi_hdcp_scm_wr(hdcp_ctrl, reg, data, 2);
+ rc = msm_hdmi_hdcp_scm_wr(hdcp_ctrl, reg, data, 2);
return rc;
}
-static int hdmi_hdcp_recv_bcaps(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+static int msm_hdmi_hdcp_recv_bcaps(struct hdmi_hdcp_ctrl *hdcp_ctrl)
{
int rc = 0;
struct hdmi *hdmi = hdcp_ctrl->hdmi;
u32 reg, data;
u8 bcaps;
- rc = hdmi_ddc_read(hdmi, HDCP_PORT_ADDR, 0x40, &bcaps, 1);
+ rc = msm_hdmi_ddc_read(hdmi, HDCP_PORT_ADDR, 0x40, &bcaps, 1);
if (rc) {
pr_err("%s:BCAPS read failed\n", __func__);
return rc;
@@ -753,26 +753,26 @@ static int hdmi_hdcp_recv_bcaps(struct hdmi_hdcp_ctrl *hdcp_ctrl)
/* Write BCAPS to the hardware */
reg = REG_HDMI_HDCP_RCVPORT_DATA12;
data = (u32)bcaps;
- rc = hdmi_hdcp_scm_wr(hdcp_ctrl, &reg, &data, 1);
+ rc = msm_hdmi_hdcp_scm_wr(hdcp_ctrl, &reg, &data, 1);
return rc;
}
-static int hdmi_hdcp_auth_part1_key_exchange(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+static int msm_hdmi_hdcp_auth_part1_key_exchange(struct hdmi_hdcp_ctrl *hdcp_ctrl)
{
struct hdmi *hdmi = hdcp_ctrl->hdmi;
unsigned long flags;
int rc;
/* Wait for AKSV key and An ready */
- rc = hdmi_hdcp_wait_key_an_ready(hdcp_ctrl);
+ rc = msm_hdmi_hdcp_wait_key_an_ready(hdcp_ctrl);
if (rc) {
pr_err("%s: wait key and an ready failed\n", __func__);
return rc;
};
/* Read BCAPS and send to HDCP engine */
- rc = hdmi_hdcp_recv_bcaps(hdcp_ctrl);
+ rc = msm_hdmi_hdcp_recv_bcaps(hdcp_ctrl);
if (rc) {
pr_err("%s: read bcaps error, abort\n", __func__);
return rc;
@@ -785,14 +785,14 @@ static int hdmi_hdcp_auth_part1_key_exchange(struct hdmi_hdcp_ctrl *hdcp_ctrl)
hdmi_write(hdmi, REG_HDMI_HDCP_RCVPORT_DATA4, 0);
/* Send AKSV and An to sink */
- rc = hdmi_hdcp_send_aksv_an(hdcp_ctrl);
+ rc = msm_hdmi_hdcp_send_aksv_an(hdcp_ctrl);
if (rc) {
pr_err("%s:An/Aksv write failed\n", __func__);
return rc;
}
/* Read BKSV and send to HDCP engine*/
- rc = hdmi_hdcp_recv_bksv(hdcp_ctrl);
+ rc = msm_hdmi_hdcp_recv_bksv(hdcp_ctrl);
if (rc) {
pr_err("%s:BKSV Process failed\n", __func__);
return rc;
@@ -812,7 +812,7 @@ static int hdmi_hdcp_auth_part1_key_exchange(struct hdmi_hdcp_ctrl *hdcp_ctrl)
}
/* read R0' from sink and pass it to HDCP engine */
-static int hdmi_hdcp_auth_part1_recv_r0(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+static int msm_hdmi_hdcp_auth_part1_recv_r0(struct hdmi_hdcp_ctrl *hdcp_ctrl)
{
struct hdmi *hdmi = hdcp_ctrl->hdmi;
int rc = 0;
@@ -822,12 +822,12 @@ static int hdmi_hdcp_auth_part1_recv_r0(struct hdmi_hdcp_ctrl *hdcp_ctrl)
* HDCP Compliance Test case 1A-01:
* Wait here at least 100ms before reading R0'
*/
- rc = hdmi_hdcp_msleep(hdcp_ctrl, 125, AUTH_ABORT_EV);
+ rc = msm_hdmi_hdcp_msleep(hdcp_ctrl, 125, AUTH_ABORT_EV);
if (rc)
return rc;
/* Read R0' at offset 0x08 */
- rc = hdmi_ddc_read(hdmi, HDCP_PORT_ADDR, 0x08, buf, 2);
+ rc = msm_hdmi_ddc_read(hdmi, HDCP_PORT_ADDR, 0x08, buf, 2);
if (rc) {
pr_err("%s:R0' read failed\n", __func__);
return rc;
@@ -842,14 +842,14 @@ static int hdmi_hdcp_auth_part1_recv_r0(struct hdmi_hdcp_ctrl *hdcp_ctrl)
}
/* Wait for authenticating result: R0/R0' are matched or not */
-static int hdmi_hdcp_auth_part1_verify_r0(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+static int msm_hdmi_hdcp_auth_part1_verify_r0(struct hdmi_hdcp_ctrl *hdcp_ctrl)
{
struct hdmi *hdmi = hdcp_ctrl->hdmi;
u32 link0_status;
int rc;
/* wait for hdcp irq, 10 sec should be long enough */
- rc = hdmi_hdcp_msleep(hdcp_ctrl, 10000, AUTH_RESULT_RDY_EV);
+ rc = msm_hdmi_hdcp_msleep(hdcp_ctrl, 10000, AUTH_RESULT_RDY_EV);
if (!rc) {
pr_err("%s: Wait Auth IRQ timeout\n", __func__);
return -ETIMEDOUT;
@@ -869,7 +869,7 @@ static int hdmi_hdcp_auth_part1_verify_r0(struct hdmi_hdcp_ctrl *hdcp_ctrl)
return 0;
}
-static int hdmi_hdcp_recv_check_bstatus(struct hdmi_hdcp_ctrl *hdcp_ctrl,
+static int msm_hdmi_hdcp_recv_check_bstatus(struct hdmi_hdcp_ctrl *hdcp_ctrl,
u16 *pbstatus)
{
int rc;
@@ -880,7 +880,7 @@ static int hdmi_hdcp_recv_check_bstatus(struct hdmi_hdcp_ctrl *hdcp_ctrl,
u8 buf[2];
/* Read BSTATUS at offset 0x41 */
- rc = hdmi_ddc_read(hdmi, HDCP_PORT_ADDR, 0x41, buf, 2);
+ rc = msm_hdmi_ddc_read(hdmi, HDCP_PORT_ADDR, 0x41, buf, 2);
if (rc) {
pr_err("%s: BSTATUS read failed\n", __func__);
goto error;
@@ -936,7 +936,7 @@ error:
return rc;
}
-static int hdmi_hdcp_auth_part2_wait_ksv_fifo_ready(
+static int msm_hdmi_hdcp_auth_part2_wait_ksv_fifo_ready(
struct hdmi_hdcp_ctrl *hdcp_ctrl)
{
int rc;
@@ -953,7 +953,7 @@ static int hdmi_hdcp_auth_part2_wait_ksv_fifo_ready(
timeout_count = 100;
do {
/* Read BCAPS at offset 0x40 */
- rc = hdmi_ddc_read(hdmi, HDCP_PORT_ADDR, 0x40, &bcaps, 1);
+ rc = msm_hdmi_ddc_read(hdmi, HDCP_PORT_ADDR, 0x40, &bcaps, 1);
if (rc) {
pr_err("%s: BCAPS read failed\n", __func__);
return rc;
@@ -968,12 +968,12 @@ static int hdmi_hdcp_auth_part2_wait_ksv_fifo_ready(
return -ETIMEDOUT;
}
- rc = hdmi_hdcp_msleep(hdcp_ctrl, 20, AUTH_ABORT_EV);
+ rc = msm_hdmi_hdcp_msleep(hdcp_ctrl, 20, AUTH_ABORT_EV);
if (rc)
return rc;
} while (1);
- rc = hdmi_hdcp_recv_check_bstatus(hdcp_ctrl, &bstatus);
+ rc = msm_hdmi_hdcp_recv_check_bstatus(hdcp_ctrl, &bstatus);
if (rc) {
pr_err("%s: bstatus error\n", __func__);
return rc;
@@ -982,7 +982,7 @@ static int hdmi_hdcp_auth_part2_wait_ksv_fifo_ready(
/* Write BSTATUS and BCAPS to HDCP registers */
reg = REG_HDMI_HDCP_RCVPORT_DATA12;
data = bcaps | (bstatus << 8);
- rc = hdmi_hdcp_scm_wr(hdcp_ctrl, &reg, &data, 1);
+ rc = msm_hdmi_hdcp_scm_wr(hdcp_ctrl, &reg, &data, 1);
if (rc) {
pr_err("%s: BSTATUS write failed\n", __func__);
return rc;
@@ -997,7 +997,7 @@ static int hdmi_hdcp_auth_part2_wait_ksv_fifo_ready(
* transfer V' from sink to HDCP engine
* reset SHA engine
*/
-static int hdmi_hdcp_transfer_v_h(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+static int msm_hdmi_hdcp_transfer_v_h(struct hdmi_hdcp_ctrl *hdcp_ctrl)
{
struct hdmi *hdmi = hdcp_ctrl->hdmi;
int rc = 0;
@@ -1016,7 +1016,7 @@ static int hdmi_hdcp_transfer_v_h(struct hdmi_hdcp_ctrl *hdcp_ctrl)
for (i = 0; i < size; i++) {
rd = &reg_data[i];
- rc = hdmi_ddc_read(hdmi, HDCP_PORT_ADDR,
+ rc = msm_hdmi_ddc_read(hdmi, HDCP_PORT_ADDR,
rd->off, (u8 *)&data[i], (u16)sizeof(data[i]));
if (rc) {
pr_err("%s: Read %s failed\n", __func__, rd->name);
@@ -1027,13 +1027,13 @@ static int hdmi_hdcp_transfer_v_h(struct hdmi_hdcp_ctrl *hdcp_ctrl)
reg[i] = reg_data[i].reg_id;
}
- rc = hdmi_hdcp_scm_wr(hdcp_ctrl, reg, data, size);
+ rc = msm_hdmi_hdcp_scm_wr(hdcp_ctrl, reg, data, size);
error:
return rc;
}
-static int hdmi_hdcp_recv_ksv_fifo(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+static int msm_hdmi_hdcp_recv_ksv_fifo(struct hdmi_hdcp_ctrl *hdcp_ctrl)
{
int rc;
struct hdmi *hdmi = hdcp_ctrl->hdmi;
@@ -1041,7 +1041,7 @@ static int hdmi_hdcp_recv_ksv_fifo(struct hdmi_hdcp_ctrl *hdcp_ctrl)
ksv_bytes = 5 * hdcp_ctrl->dev_count;
- rc = hdmi_ddc_read(hdmi, HDCP_PORT_ADDR, 0x43,
+ rc = msm_hdmi_ddc_read(hdmi, HDCP_PORT_ADDR, 0x43,
hdcp_ctrl->ksv_list, ksv_bytes);
if (rc)
pr_err("%s: KSV FIFO read failed\n", __func__);
@@ -1049,7 +1049,7 @@ static int hdmi_hdcp_recv_ksv_fifo(struct hdmi_hdcp_ctrl *hdcp_ctrl)
return rc;
}
-static int hdmi_hdcp_reset_sha_engine(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+static int msm_hdmi_hdcp_reset_sha_engine(struct hdmi_hdcp_ctrl *hdcp_ctrl)
{
u32 reg[2], data[2];
u32 rc = 0;
@@ -1059,12 +1059,12 @@ static int hdmi_hdcp_reset_sha_engine(struct hdmi_hdcp_ctrl *hdcp_ctrl)
reg[1] = REG_HDMI_HDCP_SHA_CTRL;
data[1] = HDCP_REG_DISABLE;
- rc = hdmi_hdcp_scm_wr(hdcp_ctrl, reg, data, 2);
+ rc = msm_hdmi_hdcp_scm_wr(hdcp_ctrl, reg, data, 2);
return rc;
}
-static int hdmi_hdcp_auth_part2_recv_ksv_fifo(
+static int msm_hdmi_hdcp_auth_part2_recv_ksv_fifo(
struct hdmi_hdcp_ctrl *hdcp_ctrl)
{
int rc;
@@ -1081,7 +1081,7 @@ static int hdmi_hdcp_auth_part2_recv_ksv_fifo(
*/
timeout_count = 100;
do {
- rc = hdmi_hdcp_recv_ksv_fifo(hdcp_ctrl);
+ rc = msm_hdmi_hdcp_recv_ksv_fifo(hdcp_ctrl);
if (!rc)
break;
@@ -1091,19 +1091,19 @@ static int hdmi_hdcp_auth_part2_recv_ksv_fifo(
return -ETIMEDOUT;
}
- rc = hdmi_hdcp_msleep(hdcp_ctrl, 25, AUTH_ABORT_EV);
+ rc = msm_hdmi_hdcp_msleep(hdcp_ctrl, 25, AUTH_ABORT_EV);
if (rc)
return rc;
} while (1);
- rc = hdmi_hdcp_transfer_v_h(hdcp_ctrl);
+ rc = msm_hdmi_hdcp_transfer_v_h(hdcp_ctrl);
if (rc) {
pr_err("%s: transfer V failed\n", __func__);
return rc;
}
/* reset SHA engine before write ksv fifo */
- rc = hdmi_hdcp_reset_sha_engine(hdcp_ctrl);
+ rc = msm_hdmi_hdcp_reset_sha_engine(hdcp_ctrl);
if (rc) {
pr_err("%s: fail to reset sha engine\n", __func__);
return rc;
@@ -1120,7 +1120,7 @@ static int hdmi_hdcp_auth_part2_recv_ksv_fifo(
* If the last byte is written, we need to poll for
* HDCP_SHA_COMP_DONE to wait until HW finish
*/
-static int hdmi_hdcp_write_ksv_fifo(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+static int msm_hdmi_hdcp_write_ksv_fifo(struct hdmi_hdcp_ctrl *hdcp_ctrl)
{
int i;
struct hdmi *hdmi = hdcp_ctrl->hdmi;
@@ -1169,7 +1169,7 @@ static int hdmi_hdcp_write_ksv_fifo(struct hdmi_hdcp_ctrl *hdcp_ctrl)
reg = REG_HDMI_HDCP_SHA_DATA;
data = reg_val;
- rc = hdmi_hdcp_scm_wr(hdcp_ctrl, &reg, &data, 1);
+ rc = msm_hdmi_hdcp_scm_wr(hdcp_ctrl, &reg, &data, 1);
if (rc)
return rc;
@@ -1184,7 +1184,7 @@ static int hdmi_hdcp_write_ksv_fifo(struct hdmi_hdcp_ctrl *hdcp_ctrl)
}
/* write ksv fifo into HDCP engine */
-static int hdmi_hdcp_auth_part2_write_ksv_fifo(
+static int msm_hdmi_hdcp_auth_part2_write_ksv_fifo(
struct hdmi_hdcp_ctrl *hdcp_ctrl)
{
int rc;
@@ -1193,7 +1193,7 @@ static int hdmi_hdcp_auth_part2_write_ksv_fifo(
hdcp_ctrl->ksv_fifo_w_index = 0;
timeout_count = 100;
do {
- rc = hdmi_hdcp_write_ksv_fifo(hdcp_ctrl);
+ rc = msm_hdmi_hdcp_write_ksv_fifo(hdcp_ctrl);
if (!rc)
break;
@@ -1206,7 +1206,7 @@ static int hdmi_hdcp_auth_part2_write_ksv_fifo(
return -ETIMEDOUT;
}
- rc = hdmi_hdcp_msleep(hdcp_ctrl, 20, AUTH_ABORT_EV);
+ rc = msm_hdmi_hdcp_msleep(hdcp_ctrl, 20, AUTH_ABORT_EV);
if (rc)
return rc;
} while (1);
@@ -1214,7 +1214,7 @@ static int hdmi_hdcp_auth_part2_write_ksv_fifo(
return 0;
}
-static int hdmi_hdcp_auth_part2_check_v_match(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+static int msm_hdmi_hdcp_auth_part2_check_v_match(struct hdmi_hdcp_ctrl *hdcp_ctrl)
{
int rc = 0;
struct hdmi *hdmi = hdcp_ctrl->hdmi;
@@ -1232,7 +1232,7 @@ static int hdmi_hdcp_auth_part2_check_v_match(struct hdmi_hdcp_ctrl *hdcp_ctrl)
return -ETIMEDOUT;
}
- rc = hdmi_hdcp_msleep(hdcp_ctrl, 20, AUTH_ABORT_EV);
+ rc = msm_hdmi_hdcp_msleep(hdcp_ctrl, 20, AUTH_ABORT_EV);
if (rc)
return rc;
} while (1);
@@ -1240,32 +1240,32 @@ static int hdmi_hdcp_auth_part2_check_v_match(struct hdmi_hdcp_ctrl *hdcp_ctrl)
return 0;
}
-static void hdmi_hdcp_auth_work(struct work_struct *work)
+static void msm_hdmi_hdcp_auth_work(struct work_struct *work)
{
struct hdmi_hdcp_ctrl *hdcp_ctrl = container_of(work,
struct hdmi_hdcp_ctrl, hdcp_auth_work);
int rc;
- rc = hdmi_hdcp_auth_prepare(hdcp_ctrl);
+ rc = msm_hdmi_hdcp_auth_prepare(hdcp_ctrl);
if (rc) {
pr_err("%s: auth prepare failed %d\n", __func__, rc);
goto end;
}
/* HDCP PartI */
- rc = hdmi_hdcp_auth_part1_key_exchange(hdcp_ctrl);
+ rc = msm_hdmi_hdcp_auth_part1_key_exchange(hdcp_ctrl);
if (rc) {
pr_err("%s: key exchange failed %d\n", __func__, rc);
goto end;
}
- rc = hdmi_hdcp_auth_part1_recv_r0(hdcp_ctrl);
+ rc = msm_hdmi_hdcp_auth_part1_recv_r0(hdcp_ctrl);
if (rc) {
pr_err("%s: receive r0 failed %d\n", __func__, rc);
goto end;
}
- rc = hdmi_hdcp_auth_part1_verify_r0(hdcp_ctrl);
+ rc = msm_hdmi_hdcp_auth_part1_verify_r0(hdcp_ctrl);
if (rc) {
pr_err("%s: verify r0 failed %d\n", __func__, rc);
goto end;
@@ -1275,25 +1275,25 @@ static void hdmi_hdcp_auth_work(struct work_struct *work)
goto end;
/* HDCP PartII */
- rc = hdmi_hdcp_auth_part2_wait_ksv_fifo_ready(hdcp_ctrl);
+ rc = msm_hdmi_hdcp_auth_part2_wait_ksv_fifo_ready(hdcp_ctrl);
if (rc) {
pr_err("%s: wait ksv fifo ready failed %d\n", __func__, rc);
goto end;
}
- rc = hdmi_hdcp_auth_part2_recv_ksv_fifo(hdcp_ctrl);
+ rc = msm_hdmi_hdcp_auth_part2_recv_ksv_fifo(hdcp_ctrl);
if (rc) {
pr_err("%s: recv ksv fifo failed %d\n", __func__, rc);
goto end;
}
- rc = hdmi_hdcp_auth_part2_write_ksv_fifo(hdcp_ctrl);
+ rc = msm_hdmi_hdcp_auth_part2_write_ksv_fifo(hdcp_ctrl);
if (rc) {
pr_err("%s: write ksv fifo failed %d\n", __func__, rc);
goto end;
}
- rc = hdmi_hdcp_auth_part2_check_v_match(hdcp_ctrl);
+ rc = msm_hdmi_hdcp_auth_part2_check_v_match(hdcp_ctrl);
if (rc)
pr_err("%s: check v match failed %d\n", __func__, rc);
@@ -1304,13 +1304,13 @@ end:
pr_info("%s: hdcp is not supported\n", __func__);
} else if (rc) {
pr_err("%s: hdcp authentication failed\n", __func__);
- hdmi_hdcp_auth_fail(hdcp_ctrl);
+ msm_hdmi_hdcp_auth_fail(hdcp_ctrl);
} else {
- hdmi_hdcp_auth_done(hdcp_ctrl);
+ msm_hdmi_hdcp_auth_done(hdcp_ctrl);
}
}
-void hdmi_hdcp_on(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+void msm_hdmi_hdcp_on(struct hdmi_hdcp_ctrl *hdcp_ctrl)
{
struct hdmi *hdmi = hdcp_ctrl->hdmi;
u32 reg_val;
@@ -1335,7 +1335,7 @@ void hdmi_hdcp_on(struct hdmi_hdcp_ctrl *hdcp_ctrl)
queue_work(hdmi->workq, &hdcp_ctrl->hdcp_auth_work);
}
-void hdmi_hdcp_off(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+void msm_hdmi_hdcp_off(struct hdmi_hdcp_ctrl *hdcp_ctrl)
{
struct hdmi *hdmi = hdcp_ctrl->hdmi;
unsigned long flags;
@@ -1399,7 +1399,7 @@ void hdmi_hdcp_off(struct hdmi_hdcp_ctrl *hdcp_ctrl)
DBG("HDCP: Off");
}
-struct hdmi_hdcp_ctrl *hdmi_hdcp_init(struct hdmi *hdmi)
+struct hdmi_hdcp_ctrl *msm_hdmi_hdcp_init(struct hdmi *hdmi)
{
struct hdmi_hdcp_ctrl *hdcp_ctrl = NULL;
@@ -1413,8 +1413,8 @@ struct hdmi_hdcp_ctrl *hdmi_hdcp_init(struct hdmi *hdmi)
if (!hdcp_ctrl)
return ERR_PTR(-ENOMEM);
- INIT_WORK(&hdcp_ctrl->hdcp_auth_work, hdmi_hdcp_auth_work);
- INIT_WORK(&hdcp_ctrl->hdcp_reauth_work, hdmi_hdcp_reauth_work);
+ INIT_WORK(&hdcp_ctrl->hdcp_auth_work, msm_hdmi_hdcp_auth_work);
+ INIT_WORK(&hdcp_ctrl->hdcp_reauth_work, msm_hdmi_hdcp_reauth_work);
init_waitqueue_head(&hdcp_ctrl->auth_event_queue);
hdcp_ctrl->hdmi = hdmi;
hdcp_ctrl->hdcp_state = HDCP_STATE_INACTIVE;
@@ -1428,7 +1428,7 @@ struct hdmi_hdcp_ctrl *hdmi_hdcp_init(struct hdmi *hdmi)
return hdcp_ctrl;
}
-void hdmi_hdcp_destroy(struct hdmi *hdmi)
+void msm_hdmi_hdcp_destroy(struct hdmi *hdmi)
{
if (hdmi && hdmi->hdcp_ctrl) {
kfree(hdmi->hdcp_ctrl);
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_i2c.c b/drivers/gpu/drm/msm/hdmi/hdmi_i2c.c
index f4ab7f70fed1..de9007e72f4e 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_i2c.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_i2c.c
@@ -97,7 +97,7 @@ static bool sw_done(struct hdmi_i2c_adapter *hdmi_i2c)
return hdmi_i2c->sw_done;
}
-static int hdmi_i2c_xfer(struct i2c_adapter *i2c,
+static int msm_hdmi_i2c_xfer(struct i2c_adapter *i2c,
struct i2c_msg *msgs, int num)
{
struct hdmi_i2c_adapter *hdmi_i2c = to_hdmi_i2c_adapter(i2c);
@@ -216,17 +216,17 @@ static int hdmi_i2c_xfer(struct i2c_adapter *i2c,
return i;
}
-static u32 hdmi_i2c_func(struct i2c_adapter *adapter)
+static u32 msm_hdmi_i2c_func(struct i2c_adapter *adapter)
{
return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
}
-static const struct i2c_algorithm hdmi_i2c_algorithm = {
- .master_xfer = hdmi_i2c_xfer,
- .functionality = hdmi_i2c_func,
+static const struct i2c_algorithm msm_hdmi_i2c_algorithm = {
+ .master_xfer = msm_hdmi_i2c_xfer,
+ .functionality = msm_hdmi_i2c_func,
};
-void hdmi_i2c_irq(struct i2c_adapter *i2c)
+void msm_hdmi_i2c_irq(struct i2c_adapter *i2c)
{
struct hdmi_i2c_adapter *hdmi_i2c = to_hdmi_i2c_adapter(i2c);
@@ -234,14 +234,14 @@ void hdmi_i2c_irq(struct i2c_adapter *i2c)
wake_up_all(&hdmi_i2c->ddc_event);
}
-void hdmi_i2c_destroy(struct i2c_adapter *i2c)
+void msm_hdmi_i2c_destroy(struct i2c_adapter *i2c)
{
struct hdmi_i2c_adapter *hdmi_i2c = to_hdmi_i2c_adapter(i2c);
i2c_del_adapter(i2c);
kfree(hdmi_i2c);
}
-struct i2c_adapter *hdmi_i2c_init(struct hdmi *hdmi)
+struct i2c_adapter *msm_hdmi_i2c_init(struct hdmi *hdmi)
{
struct drm_device *dev = hdmi->dev;
struct hdmi_i2c_adapter *hdmi_i2c;
@@ -264,7 +264,7 @@ struct i2c_adapter *hdmi_i2c_init(struct hdmi *hdmi)
i2c->class = I2C_CLASS_DDC;
snprintf(i2c->name, sizeof(i2c->name), "msm hdmi i2c");
i2c->dev.parent = &hdmi->pdev->dev;
- i2c->algo = &hdmi_i2c_algorithm;
+ i2c->algo = &msm_hdmi_i2c_algorithm;
ret = i2c_add_adapter(i2c);
if (ret) {
@@ -276,6 +276,6 @@ struct i2c_adapter *hdmi_i2c_init(struct hdmi *hdmi)
fail:
if (i2c)
- hdmi_i2c_destroy(i2c);
+ msm_hdmi_i2c_destroy(i2c);
return ERR_PTR(ret);
}
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_phy.c b/drivers/gpu/drm/msm/hdmi/hdmi_phy.c
new file mode 100644
index 000000000000..534ce5b49781
--- /dev/null
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_phy.c
@@ -0,0 +1,230 @@
+/*
+ * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/of_device.h>
+
+#include "hdmi.h"
+
+static int msm_hdmi_phy_resource_init(struct hdmi_phy *phy)
+{
+ struct hdmi_phy_cfg *cfg = phy->cfg;
+ struct device *dev = &phy->pdev->dev;
+ int i, ret;
+
+ phy->regs = devm_kzalloc(dev, sizeof(phy->regs[0]) * cfg->num_regs,
+ GFP_KERNEL);
+ if (!phy->regs)
+ return -ENOMEM;
+
+ phy->clks = devm_kzalloc(dev, sizeof(phy->clks[0]) * cfg->num_clks,
+ GFP_KERNEL);
+ if (!phy->clks)
+ return -ENOMEM;
+
+ for (i = 0; i < cfg->num_regs; i++) {
+ struct regulator *reg;
+
+ reg = devm_regulator_get(dev, cfg->reg_names[i]);
+ if (IS_ERR(reg)) {
+ ret = PTR_ERR(reg);
+ dev_err(dev, "failed to get phy regulator: %s (%d)\n",
+ cfg->reg_names[i], ret);
+ return ret;
+ }
+
+ phy->regs[i] = reg;
+ }
+
+ for (i = 0; i < cfg->num_clks; i++) {
+ struct clk *clk;
+
+ clk = devm_clk_get(dev, cfg->clk_names[i]);
+ if (IS_ERR(clk)) {
+ ret = PTR_ERR(clk);
+ dev_err(dev, "failed to get phy clock: %s (%d)\n",
+ cfg->clk_names[i], ret);
+ return ret;
+ }
+
+ phy->clks[i] = clk;
+ }
+
+ return 0;
+}
+
+int msm_hdmi_phy_resource_enable(struct hdmi_phy *phy)
+{
+ struct hdmi_phy_cfg *cfg = phy->cfg;
+ struct device *dev = &phy->pdev->dev;
+ int i, ret = 0;
+
+ pm_runtime_get_sync(dev);
+
+ for (i = 0; i < cfg->num_regs; i++) {
+ ret = regulator_enable(phy->regs[i]);
+ if (ret)
+ dev_err(dev, "failed to enable regulator: %s (%d)\n",
+ cfg->reg_names[i], ret);
+ }
+
+ for (i = 0; i < cfg->num_clks; i++) {
+ ret = clk_prepare_enable(phy->clks[i]);
+ if (ret)
+ dev_err(dev, "failed to enable clock: %s (%d)\n",
+ cfg->clk_names[i], ret);
+ }
+
+ return ret;
+}
+
+void msm_hdmi_phy_resource_disable(struct hdmi_phy *phy)
+{
+ struct hdmi_phy_cfg *cfg = phy->cfg;
+ struct device *dev = &phy->pdev->dev;
+ int i;
+
+ for (i = cfg->num_clks - 1; i >= 0; i--)
+ clk_disable_unprepare(phy->clks[i]);
+
+ for (i = cfg->num_regs - 1; i >= 0; i--)
+ regulator_disable(phy->regs[i]);
+
+ pm_runtime_put_sync(dev);
+}
+
+void msm_hdmi_phy_powerup(struct hdmi_phy *phy, unsigned long int pixclock)
+{
+ if (!phy || !phy->cfg->powerup)
+ return;
+
+ phy->cfg->powerup(phy, pixclock);
+}
+
+void msm_hdmi_phy_powerdown(struct hdmi_phy *phy)
+{
+ if (!phy || !phy->cfg->powerdown)
+ return;
+
+ phy->cfg->powerdown(phy);
+}
+
+static int msm_hdmi_phy_pll_init(struct platform_device *pdev,
+ enum hdmi_phy_type type)
+{
+ int ret;
+
+ switch (type) {
+ case MSM_HDMI_PHY_8960:
+ ret = msm_hdmi_pll_8960_init(pdev);
+ break;
+ case MSM_HDMI_PHY_8996:
+ ret = msm_hdmi_pll_8996_init(pdev);
+ break;
+ /*
+ * we don't have PLL support for these, don't report an error for now
+ */
+ case MSM_HDMI_PHY_8x60:
+ case MSM_HDMI_PHY_8x74:
+ default:
+ ret = 0;
+ break;
+ }
+
+ return ret;
+}
+
+static int msm_hdmi_phy_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct hdmi_phy *phy;
+ int ret;
+
+ phy = devm_kzalloc(dev, sizeof(*phy), GFP_KERNEL);
+ if (!phy)
+ return -ENODEV;
+
+ phy->cfg = (struct hdmi_phy_cfg *)of_device_get_match_data(dev);
+ if (!phy->cfg)
+ return -ENODEV;
+
+ phy->mmio = msm_ioremap(pdev, "hdmi_phy", "HDMI_PHY");
+ if (IS_ERR(phy->mmio)) {
+ dev_err(dev, "%s: failed to map phy base\n", __func__);
+ return -ENOMEM;
+ }
+
+ phy->pdev = pdev;
+
+ ret = msm_hdmi_phy_resource_init(phy);
+ if (ret)
+ return ret;
+
+ pm_runtime_enable(&pdev->dev);
+
+ ret = msm_hdmi_phy_resource_enable(phy);
+ if (ret)
+ return ret;
+
+ ret = msm_hdmi_phy_pll_init(pdev, phy->cfg->type);
+ if (ret) {
+ dev_err(dev, "couldn't init PLL\n");
+ msm_hdmi_phy_resource_disable(phy);
+ return ret;
+ }
+
+ msm_hdmi_phy_resource_disable(phy);
+
+ platform_set_drvdata(pdev, phy);
+
+ return 0;
+}
+
+static int msm_hdmi_phy_remove(struct platform_device *pdev)
+{
+ pm_runtime_disable(&pdev->dev);
+
+ return 0;
+}
+
+static const struct of_device_id msm_hdmi_phy_dt_match[] = {
+ { .compatible = "qcom,hdmi-phy-8660",
+ .data = &msm_hdmi_phy_8x60_cfg },
+ { .compatible = "qcom,hdmi-phy-8960",
+ .data = &msm_hdmi_phy_8960_cfg },
+ { .compatible = "qcom,hdmi-phy-8974",
+ .data = &msm_hdmi_phy_8x74_cfg },
+ { .compatible = "qcom,hdmi-phy-8084",
+ .data = &msm_hdmi_phy_8x74_cfg },
+ { .compatible = "qcom,hdmi-phy-8996",
+ .data = &msm_hdmi_phy_8996_cfg },
+ {}
+};
+
+static struct platform_driver msm_hdmi_phy_platform_driver = {
+ .probe = msm_hdmi_phy_probe,
+ .remove = msm_hdmi_phy_remove,
+ .driver = {
+ .name = "msm_hdmi_phy",
+ .of_match_table = msm_hdmi_phy_dt_match,
+ },
+};
+
+void __init msm_hdmi_phy_driver_register(void)
+{
+ platform_driver_register(&msm_hdmi_phy_platform_driver);
+}
+
+void __exit msm_hdmi_phy_driver_unregister(void)
+{
+ platform_driver_unregister(&msm_hdmi_phy_platform_driver);
+}
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c
index 3a01cb5051e2..e6ee6b745ab7 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c
@@ -15,495 +15,48 @@
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
-#ifdef CONFIG_COMMON_CLK
-#include <linux/clk.h>
-#include <linux/clk-provider.h>
-#endif
-
#include "hdmi.h"
-struct hdmi_phy_8960 {
- struct hdmi_phy base;
- struct hdmi *hdmi;
-#ifdef CONFIG_COMMON_CLK
- struct clk_hw pll_hw;
- struct clk *pll;
- unsigned long pixclk;
-#endif
-};
-#define to_hdmi_phy_8960(x) container_of(x, struct hdmi_phy_8960, base)
-
-#ifdef CONFIG_COMMON_CLK
-#define clk_to_phy(x) container_of(x, struct hdmi_phy_8960, pll_hw)
-
-/*
- * HDMI PLL:
- *
- * To get the parent clock setup properly, we need to plug in hdmi pll
- * configuration into common-clock-framework.
- */
-
-struct pll_rate {
- unsigned long rate;
- struct {
- uint32_t val;
- uint32_t reg;
- } conf[32];
-};
-
-/* NOTE: keep sorted highest freq to lowest: */
-static const struct pll_rate freqtbl[] = {
- { 154000000, {
- { 0x08, REG_HDMI_8960_PHY_PLL_REFCLK_CFG },
- { 0x20, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG0 },
- { 0xf9, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG1 },
- { 0x02, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG0 },
- { 0x03, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG1 },
- { 0x3b, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG2 },
- { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG3 },
- { 0x86, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG4 },
- { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG5 },
- { 0x0d, REG_HDMI_8960_PHY_PLL_SDM_CFG0 },
- { 0x4d, REG_HDMI_8960_PHY_PLL_SDM_CFG1 },
- { 0x5e, REG_HDMI_8960_PHY_PLL_SDM_CFG2 },
- { 0x42, REG_HDMI_8960_PHY_PLL_SDM_CFG3 },
- { 0x00, REG_HDMI_8960_PHY_PLL_SDM_CFG4 },
- { 0, 0 } }
- },
- /* 1080p60/1080p50 case */
- { 148500000, {
- { 0x02, REG_HDMI_8960_PHY_PLL_REFCLK_CFG },
- { 0x02, REG_HDMI_8960_PHY_PLL_CHRG_PUMP_CFG },
- { 0x01, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG0 },
- { 0x33, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG1 },
- { 0x2c, REG_HDMI_8960_PHY_PLL_IDAC_ADJ_CFG },
- { 0x06, REG_HDMI_8960_PHY_PLL_I_VI_KVCO_CFG },
- { 0x0a, REG_HDMI_8960_PHY_PLL_PWRDN_B },
- { 0x76, REG_HDMI_8960_PHY_PLL_SDM_CFG0 },
- { 0x01, REG_HDMI_8960_PHY_PLL_SDM_CFG1 },
- { 0x4c, REG_HDMI_8960_PHY_PLL_SDM_CFG2 },
- { 0xc0, REG_HDMI_8960_PHY_PLL_SDM_CFG3 },
- { 0x00, REG_HDMI_8960_PHY_PLL_SDM_CFG4 },
- { 0x9a, REG_HDMI_8960_PHY_PLL_SSC_CFG0 },
- { 0x00, REG_HDMI_8960_PHY_PLL_SSC_CFG1 },
- { 0x00, REG_HDMI_8960_PHY_PLL_SSC_CFG2 },
- { 0x00, REG_HDMI_8960_PHY_PLL_SSC_CFG3 },
- { 0x10, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG0 },
- { 0x1a, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG1 },
- { 0x0d, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG2 },
- { 0xe6, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG0 },
- { 0x02, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG1 },
- { 0x3b, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG2 },
- { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG3 },
- { 0x86, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG4 },
- { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG5 },
- { 0x33, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG6 },
- { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG7 },
- { 0, 0 } }
- },
- { 108000000, {
- { 0x08, REG_HDMI_8960_PHY_PLL_REFCLK_CFG },
- { 0x21, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG0 },
- { 0xf9, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG1 },
- { 0x1c, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG0 },
- { 0x02, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG1 },
- { 0x3b, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG2 },
- { 0x86, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG4 },
- { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG5 },
- { 0x49, REG_HDMI_8960_PHY_PLL_SDM_CFG0 },
- { 0x49, REG_HDMI_8960_PHY_PLL_SDM_CFG1 },
- { 0x00, REG_HDMI_8960_PHY_PLL_SDM_CFG2 },
- { 0x00, REG_HDMI_8960_PHY_PLL_SDM_CFG3 },
- { 0x00, REG_HDMI_8960_PHY_PLL_SDM_CFG4 },
- { 0, 0 } }
- },
- /* 720p60/720p50/1080i60/1080i50/1080p24/1080p30/1080p25 */
- { 74250000, {
- { 0x0a, REG_HDMI_8960_PHY_PLL_PWRDN_B },
- { 0x12, REG_HDMI_8960_PHY_PLL_REFCLK_CFG },
- { 0x01, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG0 },
- { 0x33, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG1 },
- { 0x76, REG_HDMI_8960_PHY_PLL_SDM_CFG0 },
- { 0xe6, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG0 },
- { 0x02, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG1 },
- { 0x3b, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG2 },
- { 0, 0 } }
- },
- { 74176000, {
- { 0x18, REG_HDMI_8960_PHY_PLL_REFCLK_CFG },
- { 0x20, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG0 },
- { 0xf9, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG1 },
- { 0xe5, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG0 },
- { 0x02, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG1 },
- { 0x3b, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG2 },
- { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG3 },
- { 0x86, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG4 },
- { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG5 },
- { 0x0c, REG_HDMI_8960_PHY_PLL_SDM_CFG0 },
- { 0x4c, REG_HDMI_8960_PHY_PLL_SDM_CFG1 },
- { 0x7d, REG_HDMI_8960_PHY_PLL_SDM_CFG2 },
- { 0xbc, REG_HDMI_8960_PHY_PLL_SDM_CFG3 },
- { 0x00, REG_HDMI_8960_PHY_PLL_SDM_CFG4 },
- { 0, 0 } }
- },
- { 65000000, {
- { 0x18, REG_HDMI_8960_PHY_PLL_REFCLK_CFG },
- { 0x20, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG0 },
- { 0xf9, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG1 },
- { 0x8a, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG0 },
- { 0x02, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG1 },
- { 0x3b, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG2 },
- { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG3 },
- { 0x86, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG4 },
- { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG5 },
- { 0x0b, REG_HDMI_8960_PHY_PLL_SDM_CFG0 },
- { 0x4b, REG_HDMI_8960_PHY_PLL_SDM_CFG1 },
- { 0x7b, REG_HDMI_8960_PHY_PLL_SDM_CFG2 },
- { 0x09, REG_HDMI_8960_PHY_PLL_SDM_CFG3 },
- { 0x00, REG_HDMI_8960_PHY_PLL_SDM_CFG4 },
- { 0, 0 } }
- },
- /* 480p60/480i60 */
- { 27030000, {
- { 0x0a, REG_HDMI_8960_PHY_PLL_PWRDN_B },
- { 0x38, REG_HDMI_8960_PHY_PLL_REFCLK_CFG },
- { 0x02, REG_HDMI_8960_PHY_PLL_CHRG_PUMP_CFG },
- { 0x20, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG0 },
- { 0xff, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG1 },
- { 0x00, REG_HDMI_8960_PHY_PLL_SDM_CFG0 },
- { 0x4e, REG_HDMI_8960_PHY_PLL_SDM_CFG1 },
- { 0xd7, REG_HDMI_8960_PHY_PLL_SDM_CFG2 },
- { 0x03, REG_HDMI_8960_PHY_PLL_SDM_CFG3 },
- { 0x00, REG_HDMI_8960_PHY_PLL_SDM_CFG4 },
- { 0x2a, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG0 },
- { 0x03, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG1 },
- { 0x3b, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG2 },
- { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG3 },
- { 0x86, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG4 },
- { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG5 },
- { 0x33, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG6 },
- { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG7 },
- { 0, 0 } }
- },
- /* 576p50/576i50 */
- { 27000000, {
- { 0x32, REG_HDMI_8960_PHY_PLL_REFCLK_CFG },
- { 0x02, REG_HDMI_8960_PHY_PLL_CHRG_PUMP_CFG },
- { 0x01, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG0 },
- { 0x33, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG1 },
- { 0x2c, REG_HDMI_8960_PHY_PLL_IDAC_ADJ_CFG },
- { 0x06, REG_HDMI_8960_PHY_PLL_I_VI_KVCO_CFG },
- { 0x0a, REG_HDMI_8960_PHY_PLL_PWRDN_B },
- { 0x7b, REG_HDMI_8960_PHY_PLL_SDM_CFG0 },
- { 0x01, REG_HDMI_8960_PHY_PLL_SDM_CFG1 },
- { 0x4c, REG_HDMI_8960_PHY_PLL_SDM_CFG2 },
- { 0xc0, REG_HDMI_8960_PHY_PLL_SDM_CFG3 },
- { 0x00, REG_HDMI_8960_PHY_PLL_SDM_CFG4 },
- { 0x9a, REG_HDMI_8960_PHY_PLL_SSC_CFG0 },
- { 0x00, REG_HDMI_8960_PHY_PLL_SSC_CFG1 },
- { 0x00, REG_HDMI_8960_PHY_PLL_SSC_CFG2 },
- { 0x00, REG_HDMI_8960_PHY_PLL_SSC_CFG3 },
- { 0x10, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG0 },
- { 0x1a, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG1 },
- { 0x0d, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG2 },
- { 0x2a, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG0 },
- { 0x03, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG1 },
- { 0x3b, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG2 },
- { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG3 },
- { 0x86, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG4 },
- { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG5 },
- { 0x33, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG6 },
- { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG7 },
- { 0, 0 } }
- },
- /* 640x480p60 */
- { 25200000, {
- { 0x32, REG_HDMI_8960_PHY_PLL_REFCLK_CFG },
- { 0x02, REG_HDMI_8960_PHY_PLL_CHRG_PUMP_CFG },
- { 0x01, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG0 },
- { 0x33, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG1 },
- { 0x2c, REG_HDMI_8960_PHY_PLL_IDAC_ADJ_CFG },
- { 0x06, REG_HDMI_8960_PHY_PLL_I_VI_KVCO_CFG },
- { 0x0a, REG_HDMI_8960_PHY_PLL_PWRDN_B },
- { 0x77, REG_HDMI_8960_PHY_PLL_SDM_CFG0 },
- { 0x4c, REG_HDMI_8960_PHY_PLL_SDM_CFG1 },
- { 0x00, REG_HDMI_8960_PHY_PLL_SDM_CFG2 },
- { 0xc0, REG_HDMI_8960_PHY_PLL_SDM_CFG3 },
- { 0x00, REG_HDMI_8960_PHY_PLL_SDM_CFG4 },
- { 0x9a, REG_HDMI_8960_PHY_PLL_SSC_CFG0 },
- { 0x00, REG_HDMI_8960_PHY_PLL_SSC_CFG1 },
- { 0x00, REG_HDMI_8960_PHY_PLL_SSC_CFG2 },
- { 0x20, REG_HDMI_8960_PHY_PLL_SSC_CFG3 },
- { 0x10, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG0 },
- { 0x1a, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG1 },
- { 0x0d, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG2 },
- { 0xf4, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG0 },
- { 0x02, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG1 },
- { 0x3b, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG2 },
- { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG3 },
- { 0x86, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG4 },
- { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG5 },
- { 0x33, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG6 },
- { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG7 },
- { 0, 0 } }
- },
-};
-
-static int hdmi_pll_enable(struct clk_hw *hw)
-{
- struct hdmi_phy_8960 *phy_8960 = clk_to_phy(hw);
- struct hdmi *hdmi = phy_8960->hdmi;
- int timeout_count, pll_lock_retry = 10;
- unsigned int val;
-
- DBG("");
-
- /* Assert PLL S/W reset */
- hdmi_write(hdmi, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG2, 0x8d);
- hdmi_write(hdmi, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG0, 0x10);
- hdmi_write(hdmi, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG1, 0x1a);
-
- /* Wait for a short time before de-asserting
- * to allow the hardware to complete its job.
- * This much of delay should be fine for hardware
- * to assert and de-assert.
- */
- udelay(10);
-
- /* De-assert PLL S/W reset */
- hdmi_write(hdmi, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG2, 0x0d);
-
- val = hdmi_read(hdmi, REG_HDMI_8960_PHY_REG12);
- val |= HDMI_8960_PHY_REG12_SW_RESET;
- /* Assert PHY S/W reset */
- hdmi_write(hdmi, REG_HDMI_8960_PHY_REG12, val);
- val &= ~HDMI_8960_PHY_REG12_SW_RESET;
- /* Wait for a short time before de-asserting
- to allow the hardware to complete its job.
- This much of delay should be fine for hardware
- to assert and de-assert. */
- udelay(10);
- /* De-assert PHY S/W reset */
- hdmi_write(hdmi, REG_HDMI_8960_PHY_REG12, val);
- hdmi_write(hdmi, REG_HDMI_8960_PHY_REG2, 0x3f);
-
- val = hdmi_read(hdmi, REG_HDMI_8960_PHY_REG12);
- val |= HDMI_8960_PHY_REG12_PWRDN_B;
- hdmi_write(hdmi, REG_HDMI_8960_PHY_REG12, val);
- /* Wait 10 us for enabling global power for PHY */
- mb();
- udelay(10);
-
- val = hdmi_read(hdmi, REG_HDMI_8960_PHY_PLL_PWRDN_B);
- val |= HDMI_8960_PHY_PLL_PWRDN_B_PLL_PWRDN_B;
- val &= ~HDMI_8960_PHY_PLL_PWRDN_B_PD_PLL;
- hdmi_write(hdmi, REG_HDMI_8960_PHY_PLL_PWRDN_B, val);
- hdmi_write(hdmi, REG_HDMI_8960_PHY_REG2, 0x80);
-
- timeout_count = 1000;
- while (--pll_lock_retry > 0) {
-
- /* are we there yet? */
- val = hdmi_read(hdmi, REG_HDMI_8960_PHY_PLL_STATUS0);
- if (val & HDMI_8960_PHY_PLL_STATUS0_PLL_LOCK)
- break;
-
- udelay(1);
-
- if (--timeout_count > 0)
- continue;
-
- /*
- * PLL has still not locked.
- * Do a software reset and try again
- * Assert PLL S/W reset first
- */
- hdmi_write(hdmi, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG2, 0x8d);
- udelay(10);
- hdmi_write(hdmi, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG2, 0x0d);
-
- /*
- * Wait for a short duration for the PLL calibration
- * before checking if the PLL gets locked
- */
- udelay(350);
-
- timeout_count = 1000;
- }
-
- return 0;
-}
-
-static void hdmi_pll_disable(struct clk_hw *hw)
-{
- struct hdmi_phy_8960 *phy_8960 = clk_to_phy(hw);
- struct hdmi *hdmi = phy_8960->hdmi;
- unsigned int val;
-
- DBG("");
-
- val = hdmi_read(hdmi, REG_HDMI_8960_PHY_REG12);
- val &= ~HDMI_8960_PHY_REG12_PWRDN_B;
- hdmi_write(hdmi, REG_HDMI_8960_PHY_REG12, val);
-
- val = hdmi_read(hdmi, REG_HDMI_8960_PHY_PLL_PWRDN_B);
- val |= HDMI_8960_PHY_REG12_SW_RESET;
- val &= ~HDMI_8960_PHY_REG12_PWRDN_B;
- hdmi_write(hdmi, REG_HDMI_8960_PHY_PLL_PWRDN_B, val);
- /* Make sure HDMI PHY/PLL are powered down */
- mb();
-}
-
-static const struct pll_rate *find_rate(unsigned long rate)
-{
- int i;
- for (i = 1; i < ARRAY_SIZE(freqtbl); i++)
- if (rate > freqtbl[i].rate)
- return &freqtbl[i-1];
- return &freqtbl[i-1];
-}
-
-static unsigned long hdmi_pll_recalc_rate(struct clk_hw *hw,
- unsigned long parent_rate)
-{
- struct hdmi_phy_8960 *phy_8960 = clk_to_phy(hw);
- return phy_8960->pixclk;
-}
-
-static long hdmi_pll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
-{
- const struct pll_rate *pll_rate = find_rate(rate);
- return pll_rate->rate;
-}
-
-static int hdmi_pll_set_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long parent_rate)
-{
- struct hdmi_phy_8960 *phy_8960 = clk_to_phy(hw);
- struct hdmi *hdmi = phy_8960->hdmi;
- const struct pll_rate *pll_rate = find_rate(rate);
- int i;
-
- DBG("rate=%lu", rate);
-
- for (i = 0; pll_rate->conf[i].reg; i++)
- hdmi_write(hdmi, pll_rate->conf[i].reg, pll_rate->conf[i].val);
-
- phy_8960->pixclk = rate;
-
- return 0;
-}
-
-
-static const struct clk_ops hdmi_pll_ops = {
- .enable = hdmi_pll_enable,
- .disable = hdmi_pll_disable,
- .recalc_rate = hdmi_pll_recalc_rate,
- .round_rate = hdmi_pll_round_rate,
- .set_rate = hdmi_pll_set_rate,
-};
-
-static const char *hdmi_pll_parents[] = {
- "pxo",
-};
-
-static struct clk_init_data pll_init = {
- .name = "hdmi_pll",
- .ops = &hdmi_pll_ops,
- .parent_names = hdmi_pll_parents,
- .num_parents = ARRAY_SIZE(hdmi_pll_parents),
-};
-#endif
-
-/*
- * HDMI Phy:
- */
-
-static void hdmi_phy_8960_destroy(struct hdmi_phy *phy)
-{
- struct hdmi_phy_8960 *phy_8960 = to_hdmi_phy_8960(phy);
- kfree(phy_8960);
-}
-
static void hdmi_phy_8960_powerup(struct hdmi_phy *phy,
- unsigned long int pixclock)
+ unsigned long int pixclock)
{
- struct hdmi_phy_8960 *phy_8960 = to_hdmi_phy_8960(phy);
- struct hdmi *hdmi = phy_8960->hdmi;
-
DBG("pixclock: %lu", pixclock);
- hdmi_write(hdmi, REG_HDMI_8960_PHY_REG2, 0x00);
- hdmi_write(hdmi, REG_HDMI_8960_PHY_REG0, 0x1b);
- hdmi_write(hdmi, REG_HDMI_8960_PHY_REG1, 0xf2);
- hdmi_write(hdmi, REG_HDMI_8960_PHY_REG4, 0x00);
- hdmi_write(hdmi, REG_HDMI_8960_PHY_REG5, 0x00);
- hdmi_write(hdmi, REG_HDMI_8960_PHY_REG6, 0x00);
- hdmi_write(hdmi, REG_HDMI_8960_PHY_REG7, 0x00);
- hdmi_write(hdmi, REG_HDMI_8960_PHY_REG8, 0x00);
- hdmi_write(hdmi, REG_HDMI_8960_PHY_REG9, 0x00);
- hdmi_write(hdmi, REG_HDMI_8960_PHY_REG10, 0x00);
- hdmi_write(hdmi, REG_HDMI_8960_PHY_REG11, 0x00);
- hdmi_write(hdmi, REG_HDMI_8960_PHY_REG3, 0x20);
+ hdmi_phy_write(phy, REG_HDMI_8960_PHY_REG2, 0x00);
+ hdmi_phy_write(phy, REG_HDMI_8960_PHY_REG0, 0x1b);
+ hdmi_phy_write(phy, REG_HDMI_8960_PHY_REG1, 0xf2);
+ hdmi_phy_write(phy, REG_HDMI_8960_PHY_REG4, 0x00);
+ hdmi_phy_write(phy, REG_HDMI_8960_PHY_REG5, 0x00);
+ hdmi_phy_write(phy, REG_HDMI_8960_PHY_REG6, 0x00);
+ hdmi_phy_write(phy, REG_HDMI_8960_PHY_REG7, 0x00);
+ hdmi_phy_write(phy, REG_HDMI_8960_PHY_REG8, 0x00);
+ hdmi_phy_write(phy, REG_HDMI_8960_PHY_REG9, 0x00);
+ hdmi_phy_write(phy, REG_HDMI_8960_PHY_REG10, 0x00);
+ hdmi_phy_write(phy, REG_HDMI_8960_PHY_REG11, 0x00);
+ hdmi_phy_write(phy, REG_HDMI_8960_PHY_REG3, 0x20);
}
static void hdmi_phy_8960_powerdown(struct hdmi_phy *phy)
{
- struct hdmi_phy_8960 *phy_8960 = to_hdmi_phy_8960(phy);
- struct hdmi *hdmi = phy_8960->hdmi;
-
DBG("");
- hdmi_write(hdmi, REG_HDMI_8960_PHY_REG2, 0x7f);
+ hdmi_phy_write(phy, REG_HDMI_8960_PHY_REG2, 0x7f);
}
-static const struct hdmi_phy_funcs hdmi_phy_8960_funcs = {
- .destroy = hdmi_phy_8960_destroy,
- .powerup = hdmi_phy_8960_powerup,
- .powerdown = hdmi_phy_8960_powerdown,
+static const char * const hdmi_phy_8960_reg_names[] = {
+ "core-vdda",
};
-struct hdmi_phy *hdmi_phy_8960_init(struct hdmi *hdmi)
-{
- struct hdmi_phy_8960 *phy_8960;
- struct hdmi_phy *phy = NULL;
- int ret;
-#ifdef CONFIG_COMMON_CLK
- int i;
-
- /* sanity check: */
- for (i = 0; i < (ARRAY_SIZE(freqtbl) - 1); i++)
- if (WARN_ON(freqtbl[i].rate < freqtbl[i+1].rate))
- return ERR_PTR(-EINVAL);
-#endif
-
- phy_8960 = kzalloc(sizeof(*phy_8960), GFP_KERNEL);
- if (!phy_8960) {
- ret = -ENOMEM;
- goto fail;
- }
-
- phy = &phy_8960->base;
-
- phy->funcs = &hdmi_phy_8960_funcs;
-
- phy_8960->hdmi = hdmi;
-
-#ifdef CONFIG_COMMON_CLK
- phy_8960->pll_hw.init = &pll_init;
- phy_8960->pll = devm_clk_register(&hdmi->pdev->dev, &phy_8960->pll_hw);
- if (IS_ERR(phy_8960->pll)) {
- ret = PTR_ERR(phy_8960->pll);
- phy_8960->pll = NULL;
- goto fail;
- }
-#endif
-
- return phy;
+static const char * const hdmi_phy_8960_clk_names[] = {
+ "slave_iface_clk",
+};
-fail:
- if (phy)
- hdmi_phy_8960_destroy(phy);
- return ERR_PTR(ret);
-}
+const struct hdmi_phy_cfg msm_hdmi_phy_8960_cfg = {
+ .type = MSM_HDMI_PHY_8960,
+ .powerup = hdmi_phy_8960_powerup,
+ .powerdown = hdmi_phy_8960_powerdown,
+ .reg_names = hdmi_phy_8960_reg_names,
+ .num_regs = ARRAY_SIZE(hdmi_phy_8960_reg_names),
+ .clk_names = hdmi_phy_8960_clk_names,
+ .num_clks = ARRAY_SIZE(hdmi_phy_8960_clk_names),
+};
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8996.c b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8996.c
new file mode 100644
index 000000000000..aa94a553794f
--- /dev/null
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8996.c
@@ -0,0 +1,766 @@
+/*
+ * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk-provider.h>
+
+#include "hdmi.h"
+
+#define HDMI_VCO_MAX_FREQ 12000000000UL
+#define HDMI_VCO_MIN_FREQ 8000000000UL
+
+#define HDMI_PCLK_MAX_FREQ 600000000
+#define HDMI_PCLK_MIN_FREQ 25000000
+
+#define HDMI_HIGH_FREQ_BIT_CLK_THRESHOLD 3400000000UL
+#define HDMI_DIG_FREQ_BIT_CLK_THRESHOLD 1500000000UL
+#define HDMI_MID_FREQ_BIT_CLK_THRESHOLD 750000000UL
+#define HDMI_CORECLK_DIV 5
+#define HDMI_DEFAULT_REF_CLOCK 19200000
+#define HDMI_PLL_CMP_CNT 1024
+
+#define HDMI_PLL_POLL_MAX_READS 100
+#define HDMI_PLL_POLL_TIMEOUT_US 150
+
+#define HDMI_NUM_TX_CHANNEL 4
+
+struct hdmi_pll_8996 {
+ struct platform_device *pdev;
+ struct clk_hw clk_hw;
+
+ /* pll mmio base */
+ void __iomem *mmio_qserdes_com;
+ /* tx channel base */
+ void __iomem *mmio_qserdes_tx[HDMI_NUM_TX_CHANNEL];
+};
+
+#define hw_clk_to_pll(x) container_of(x, struct hdmi_pll_8996, clk_hw)
+
+struct hdmi_8996_phy_pll_reg_cfg {
+ u32 tx_lx_lane_mode[HDMI_NUM_TX_CHANNEL];
+ u32 tx_lx_tx_band[HDMI_NUM_TX_CHANNEL];
+ u32 com_svs_mode_clk_sel;
+ u32 com_hsclk_sel;
+ u32 com_pll_cctrl_mode0;
+ u32 com_pll_rctrl_mode0;
+ u32 com_cp_ctrl_mode0;
+ u32 com_dec_start_mode0;
+ u32 com_div_frac_start1_mode0;
+ u32 com_div_frac_start2_mode0;
+ u32 com_div_frac_start3_mode0;
+ u32 com_integloop_gain0_mode0;
+ u32 com_integloop_gain1_mode0;
+ u32 com_lock_cmp_en;
+ u32 com_lock_cmp1_mode0;
+ u32 com_lock_cmp2_mode0;
+ u32 com_lock_cmp3_mode0;
+ u32 com_core_clk_en;
+ u32 com_coreclk_div;
+ u32 com_vco_tune_ctrl;
+
+ u32 tx_lx_tx_drv_lvl[HDMI_NUM_TX_CHANNEL];
+ u32 tx_lx_tx_emp_post1_lvl[HDMI_NUM_TX_CHANNEL];
+ u32 tx_lx_vmode_ctrl1[HDMI_NUM_TX_CHANNEL];
+ u32 tx_lx_vmode_ctrl2[HDMI_NUM_TX_CHANNEL];
+ u32 tx_lx_res_code_lane_tx[HDMI_NUM_TX_CHANNEL];
+ u32 tx_lx_hp_pd_enables[HDMI_NUM_TX_CHANNEL];
+
+ u32 phy_mode;
+};
+
+struct hdmi_8996_post_divider {
+ u64 vco_freq;
+ int hsclk_divsel;
+ int vco_ratio;
+ int tx_band_sel;
+ int half_rate_mode;
+};
+
+static inline struct hdmi_phy *pll_get_phy(struct hdmi_pll_8996 *pll)
+{
+ return platform_get_drvdata(pll->pdev);
+}
+
+static inline void hdmi_pll_write(struct hdmi_pll_8996 *pll, int offset,
+ u32 data)
+{
+ msm_writel(data, pll->mmio_qserdes_com + offset);
+}
+
+static inline u32 hdmi_pll_read(struct hdmi_pll_8996 *pll, int offset)
+{
+ return msm_readl(pll->mmio_qserdes_com + offset);
+}
+
+static inline void hdmi_tx_chan_write(struct hdmi_pll_8996 *pll, int channel,
+ int offset, int data)
+{
+ msm_writel(data, pll->mmio_qserdes_tx[channel] + offset);
+}
+
+static inline u32 pll_get_cpctrl(u64 frac_start, unsigned long ref_clk,
+ bool gen_ssc)
+{
+ if ((frac_start != 0) || gen_ssc)
+ return (11000000 / (ref_clk / 20));
+
+ return 0x23;
+}
+
+static inline u32 pll_get_rctrl(u64 frac_start, bool gen_ssc)
+{
+ if ((frac_start != 0) || gen_ssc)
+ return 0x16;
+
+ return 0x10;
+}
+
+static inline u32 pll_get_cctrl(u64 frac_start, bool gen_ssc)
+{
+ if ((frac_start != 0) || gen_ssc)
+ return 0x28;
+
+ return 0x1;
+}
+
+static inline u32 pll_get_integloop_gain(u64 frac_start, u64 bclk, u32 ref_clk,
+ bool gen_ssc)
+{
+ int digclk_divsel = bclk >= HDMI_DIG_FREQ_BIT_CLK_THRESHOLD ? 1 : 2;
+ u64 base;
+
+ if ((frac_start != 0) || gen_ssc)
+ base = (64 * ref_clk) / HDMI_DEFAULT_REF_CLOCK;
+ else
+ base = (1022 * ref_clk) / 100;
+
+ base <<= digclk_divsel;
+
+ return (base <= 2046 ? base : 2046);
+}
+
+static inline u32 pll_get_pll_cmp(u64 fdata, unsigned long ref_clk)
+{
+ u64 dividend = HDMI_PLL_CMP_CNT * fdata;
+ u32 divisor = ref_clk * 10;
+ u32 rem;
+
+ rem = do_div(dividend, divisor);
+ if (rem > (divisor >> 1))
+ dividend++;
+
+ return dividend - 1;
+}
+
+static inline u64 pll_cmp_to_fdata(u32 pll_cmp, unsigned long ref_clk)
+{
+ u64 fdata = ((u64)pll_cmp) * ref_clk * 10;
+
+ do_div(fdata, HDMI_PLL_CMP_CNT);
+
+ return fdata;
+}
+
+static int pll_get_post_div(struct hdmi_8996_post_divider *pd, u64 bclk)
+{
+ int ratio[] = { 2, 3, 4, 5, 6, 9, 10, 12, 14, 15, 20, 21, 25, 28, 35 };
+ int hs_divsel[] = { 0, 4, 8, 12, 1, 5, 2, 9, 3, 13, 10, 7, 14, 11, 15 };
+ int tx_band_sel[] = { 0, 1, 2, 3 };
+ u64 vco_freq[60];
+ u64 vco, vco_optimal;
+ int half_rate_mode = 0;
+ int vco_optimal_index, vco_freq_index;
+ int i, j;
+
+retry:
+ vco_optimal = HDMI_VCO_MAX_FREQ;
+ vco_optimal_index = -1;
+ vco_freq_index = 0;
+ for (i = 0; i < 15; i++) {
+ for (j = 0; j < 4; j++) {
+ u32 ratio_mult = ratio[i] << tx_band_sel[j];
+
+ vco = bclk >> half_rate_mode;
+ vco *= ratio_mult;
+ vco_freq[vco_freq_index++] = vco;
+ }
+ }
+
+ for (i = 0; i < 60; i++) {
+ u64 vco_tmp = vco_freq[i];
+
+ if ((vco_tmp >= HDMI_VCO_MIN_FREQ) &&
+ (vco_tmp <= vco_optimal)) {
+ vco_optimal = vco_tmp;
+ vco_optimal_index = i;
+ }
+ }
+
+ if (vco_optimal_index == -1) {
+ if (!half_rate_mode) {
+ half_rate_mode = 1;
+ goto retry;
+ }
+ } else {
+ pd->vco_freq = vco_optimal;
+ pd->tx_band_sel = tx_band_sel[vco_optimal_index % 4];
+ pd->vco_ratio = ratio[vco_optimal_index / 4];
+ pd->hsclk_divsel = hs_divsel[vco_optimal_index / 4];
+
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int pll_calculate(unsigned long pix_clk, unsigned long ref_clk,
+ struct hdmi_8996_phy_pll_reg_cfg *cfg)
+{
+ struct hdmi_8996_post_divider pd;
+ u64 bclk;
+ u64 tmds_clk;
+ u64 dec_start;
+ u64 frac_start;
+ u64 fdata;
+ u32 pll_divisor;
+ u32 rem;
+ u32 cpctrl;
+ u32 rctrl;
+ u32 cctrl;
+ u32 integloop_gain;
+ u32 pll_cmp;
+ int i, ret;
+
+ /* bit clk = 10 * pix_clk */
+ bclk = ((u64)pix_clk) * 10;
+
+ if (bclk > HDMI_HIGH_FREQ_BIT_CLK_THRESHOLD)
+ tmds_clk = pix_clk >> 2;
+ else
+ tmds_clk = pix_clk;
+
+ ret = pll_get_post_div(&pd, bclk);
+ if (ret)
+ return ret;
+
+ dec_start = pd.vco_freq;
+ pll_divisor = 4 * ref_clk;
+ do_div(dec_start, pll_divisor);
+
+ frac_start = pd.vco_freq * (1 << 20);
+
+ rem = do_div(frac_start, pll_divisor);
+ frac_start -= dec_start * (1 << 20);
+ if (rem > (pll_divisor >> 1))
+ frac_start++;
+
+ cpctrl = pll_get_cpctrl(frac_start, ref_clk, false);
+ rctrl = pll_get_rctrl(frac_start, false);
+ cctrl = pll_get_cctrl(frac_start, false);
+ integloop_gain = pll_get_integloop_gain(frac_start, bclk,
+ ref_clk, false);
+
+ fdata = pd.vco_freq;
+ do_div(fdata, pd.vco_ratio);
+
+ pll_cmp = pll_get_pll_cmp(fdata, ref_clk);
+
+ DBG("VCO freq: %llu", pd.vco_freq);
+ DBG("fdata: %llu", fdata);
+ DBG("pix_clk: %lu", pix_clk);
+ DBG("tmds clk: %llu", tmds_clk);
+ DBG("HSCLK_SEL: %d", pd.hsclk_divsel);
+ DBG("DEC_START: %llu", dec_start);
+ DBG("DIV_FRAC_START: %llu", frac_start);
+ DBG("PLL_CPCTRL: %u", cpctrl);
+ DBG("PLL_RCTRL: %u", rctrl);
+ DBG("PLL_CCTRL: %u", cctrl);
+ DBG("INTEGLOOP_GAIN: %u", integloop_gain);
+ DBG("TX_BAND: %d", pd.tx_band_sel);
+ DBG("PLL_CMP: %u", pll_cmp);
+
+ /* Convert these values to register specific values */
+ if (bclk > HDMI_DIG_FREQ_BIT_CLK_THRESHOLD)
+ cfg->com_svs_mode_clk_sel = 1;
+ else
+ cfg->com_svs_mode_clk_sel = 2;
+
+ cfg->com_hsclk_sel = (0x20 | pd.hsclk_divsel);
+ cfg->com_pll_cctrl_mode0 = cctrl;
+ cfg->com_pll_rctrl_mode0 = rctrl;
+ cfg->com_cp_ctrl_mode0 = cpctrl;
+ cfg->com_dec_start_mode0 = dec_start;
+ cfg->com_div_frac_start1_mode0 = (frac_start & 0xff);
+ cfg->com_div_frac_start2_mode0 = ((frac_start & 0xff00) >> 8);
+ cfg->com_div_frac_start3_mode0 = ((frac_start & 0xf0000) >> 16);
+ cfg->com_integloop_gain0_mode0 = (integloop_gain & 0xff);
+ cfg->com_integloop_gain1_mode0 = ((integloop_gain & 0xf00) >> 8);
+ cfg->com_lock_cmp1_mode0 = (pll_cmp & 0xff);
+ cfg->com_lock_cmp2_mode0 = ((pll_cmp & 0xff00) >> 8);
+ cfg->com_lock_cmp3_mode0 = ((pll_cmp & 0x30000) >> 16);
+ cfg->com_lock_cmp_en = 0x0;
+ cfg->com_core_clk_en = 0x2c;
+ cfg->com_coreclk_div = HDMI_CORECLK_DIV;
+ cfg->phy_mode = (bclk > HDMI_HIGH_FREQ_BIT_CLK_THRESHOLD) ? 0x10 : 0x0;
+ cfg->com_vco_tune_ctrl = 0x0;
+
+ cfg->tx_lx_lane_mode[0] =
+ cfg->tx_lx_lane_mode[2] = 0x43;
+
+ cfg->tx_lx_hp_pd_enables[0] =
+ cfg->tx_lx_hp_pd_enables[1] =
+ cfg->tx_lx_hp_pd_enables[2] = 0x0c;
+ cfg->tx_lx_hp_pd_enables[3] = 0x3;
+
+ for (i = 0; i < HDMI_NUM_TX_CHANNEL; i++)
+ cfg->tx_lx_tx_band[i] = pd.tx_band_sel + 4;
+
+ if (bclk > HDMI_HIGH_FREQ_BIT_CLK_THRESHOLD) {
+ cfg->tx_lx_tx_drv_lvl[0] =
+ cfg->tx_lx_tx_drv_lvl[1] =
+ cfg->tx_lx_tx_drv_lvl[2] = 0x25;
+ cfg->tx_lx_tx_drv_lvl[3] = 0x22;
+
+ cfg->tx_lx_tx_emp_post1_lvl[0] =
+ cfg->tx_lx_tx_emp_post1_lvl[1] =
+ cfg->tx_lx_tx_emp_post1_lvl[2] = 0x23;
+ cfg->tx_lx_tx_emp_post1_lvl[3] = 0x27;
+
+ cfg->tx_lx_vmode_ctrl1[0] =
+ cfg->tx_lx_vmode_ctrl1[1] =
+ cfg->tx_lx_vmode_ctrl1[2] =
+ cfg->tx_lx_vmode_ctrl1[3] = 0x00;
+
+ cfg->tx_lx_vmode_ctrl2[0] =
+ cfg->tx_lx_vmode_ctrl2[1] =
+ cfg->tx_lx_vmode_ctrl2[2] = 0x0D;
+
+ cfg->tx_lx_vmode_ctrl2[3] = 0x00;
+ } else if (bclk > HDMI_MID_FREQ_BIT_CLK_THRESHOLD) {
+ for (i = 0; i < HDMI_NUM_TX_CHANNEL; i++) {
+ cfg->tx_lx_tx_drv_lvl[i] = 0x25;
+ cfg->tx_lx_tx_emp_post1_lvl[i] = 0x23;
+ cfg->tx_lx_vmode_ctrl1[i] = 0x00;
+ }
+
+ cfg->tx_lx_vmode_ctrl2[0] =
+ cfg->tx_lx_vmode_ctrl2[1] =
+ cfg->tx_lx_vmode_ctrl2[2] = 0x0D;
+ cfg->tx_lx_vmode_ctrl2[3] = 0x00;
+ } else {
+ for (i = 0; i < HDMI_NUM_TX_CHANNEL; i++) {
+ cfg->tx_lx_tx_drv_lvl[i] = 0x20;
+ cfg->tx_lx_tx_emp_post1_lvl[i] = 0x20;
+ cfg->tx_lx_vmode_ctrl1[i] = 0x00;
+ cfg->tx_lx_vmode_ctrl2[i] = 0x0E;
+ }
+ }
+
+ DBG("com_svs_mode_clk_sel = 0x%x", cfg->com_svs_mode_clk_sel);
+ DBG("com_hsclk_sel = 0x%x", cfg->com_hsclk_sel);
+ DBG("com_lock_cmp_en = 0x%x", cfg->com_lock_cmp_en);
+ DBG("com_pll_cctrl_mode0 = 0x%x", cfg->com_pll_cctrl_mode0);
+ DBG("com_pll_rctrl_mode0 = 0x%x", cfg->com_pll_rctrl_mode0);
+ DBG("com_cp_ctrl_mode0 = 0x%x", cfg->com_cp_ctrl_mode0);
+ DBG("com_dec_start_mode0 = 0x%x", cfg->com_dec_start_mode0);
+ DBG("com_div_frac_start1_mode0 = 0x%x", cfg->com_div_frac_start1_mode0);
+ DBG("com_div_frac_start2_mode0 = 0x%x", cfg->com_div_frac_start2_mode0);
+ DBG("com_div_frac_start3_mode0 = 0x%x", cfg->com_div_frac_start3_mode0);
+ DBG("com_integloop_gain0_mode0 = 0x%x", cfg->com_integloop_gain0_mode0);
+ DBG("com_integloop_gain1_mode0 = 0x%x", cfg->com_integloop_gain1_mode0);
+ DBG("com_lock_cmp1_mode0 = 0x%x", cfg->com_lock_cmp1_mode0);
+ DBG("com_lock_cmp2_mode0 = 0x%x", cfg->com_lock_cmp2_mode0);
+ DBG("com_lock_cmp3_mode0 = 0x%x", cfg->com_lock_cmp3_mode0);
+ DBG("com_core_clk_en = 0x%x", cfg->com_core_clk_en);
+ DBG("com_coreclk_div = 0x%x", cfg->com_coreclk_div);
+ DBG("phy_mode = 0x%x", cfg->phy_mode);
+
+ DBG("tx_l0_lane_mode = 0x%x", cfg->tx_lx_lane_mode[0]);
+ DBG("tx_l2_lane_mode = 0x%x", cfg->tx_lx_lane_mode[2]);
+
+ for (i = 0; i < HDMI_NUM_TX_CHANNEL; i++) {
+ DBG("tx_l%d_tx_band = 0x%x", i, cfg->tx_lx_tx_band[i]);
+ DBG("tx_l%d_tx_drv_lvl = 0x%x", i, cfg->tx_lx_tx_drv_lvl[i]);
+ DBG("tx_l%d_tx_emp_post1_lvl = 0x%x", i,
+ cfg->tx_lx_tx_emp_post1_lvl[i]);
+ DBG("tx_l%d_vmode_ctrl1 = 0x%x", i, cfg->tx_lx_vmode_ctrl1[i]);
+ DBG("tx_l%d_vmode_ctrl2 = 0x%x", i, cfg->tx_lx_vmode_ctrl2[i]);
+ }
+
+ return 0;
+}
+
+static int hdmi_8996_pll_set_clk_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct hdmi_pll_8996 *pll = hw_clk_to_pll(hw);
+ struct hdmi_phy *phy = pll_get_phy(pll);
+ struct hdmi_8996_phy_pll_reg_cfg cfg;
+ int i, ret;
+
+ memset(&cfg, 0x00, sizeof(cfg));
+
+ ret = pll_calculate(rate, parent_rate, &cfg);
+ if (ret) {
+ DRM_ERROR("PLL calculation failed\n");
+ return ret;
+ }
+
+ /* Initially shut down PHY */
+ DBG("Disabling PHY");
+ hdmi_phy_write(phy, REG_HDMI_8996_PHY_PD_CTL, 0x0);
+ udelay(500);
+
+ /* Power up sequence */
+ hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_BG_CTRL, 0x04);
+
+ hdmi_phy_write(phy, REG_HDMI_8996_PHY_PD_CTL, 0x1);
+ hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_RESETSM_CNTRL, 0x20);
+ hdmi_phy_write(phy, REG_HDMI_8996_PHY_TX0_TX1_LANE_CTL, 0x0F);
+ hdmi_phy_write(phy, REG_HDMI_8996_PHY_TX2_TX3_LANE_CTL, 0x0F);
+
+ for (i = 0; i < HDMI_NUM_TX_CHANNEL; i++) {
+ hdmi_tx_chan_write(pll, i,
+ REG_HDMI_PHY_QSERDES_TX_LX_CLKBUF_ENABLE,
+ 0x03);
+ hdmi_tx_chan_write(pll, i,
+ REG_HDMI_PHY_QSERDES_TX_LX_TX_BAND,
+ cfg.tx_lx_tx_band[i]);
+ hdmi_tx_chan_write(pll, i,
+ REG_HDMI_PHY_QSERDES_TX_LX_RESET_TSYNC_EN,
+ 0x03);
+ }
+
+ hdmi_tx_chan_write(pll, 0, REG_HDMI_PHY_QSERDES_TX_LX_LANE_MODE,
+ cfg.tx_lx_lane_mode[0]);
+ hdmi_tx_chan_write(pll, 2, REG_HDMI_PHY_QSERDES_TX_LX_LANE_MODE,
+ cfg.tx_lx_lane_mode[2]);
+
+ hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_SYSCLK_BUF_ENABLE, 0x1E);
+ hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x07);
+ hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_SYSCLK_EN_SEL, 0x37);
+ hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_SYS_CLK_CTRL, 0x02);
+ hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_CLK_ENABLE1, 0x0E);
+
+ /* Bypass VCO calibration */
+ hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_SVS_MODE_CLK_SEL,
+ cfg.com_svs_mode_clk_sel);
+
+ hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_BG_TRIM, 0x0F);
+ hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_PLL_IVCO, 0x0F);
+ hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_VCO_TUNE_CTRL,
+ cfg.com_vco_tune_ctrl);
+
+ hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_BG_CTRL, 0x06);
+
+ hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_CLK_SELECT, 0x30);
+ hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_HSCLK_SEL,
+ cfg.com_hsclk_sel);
+ hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_LOCK_CMP_EN,
+ cfg.com_lock_cmp_en);
+
+ hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_PLL_CCTRL_MODE0,
+ cfg.com_pll_cctrl_mode0);
+ hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_PLL_RCTRL_MODE0,
+ cfg.com_pll_rctrl_mode0);
+ hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_CP_CTRL_MODE0,
+ cfg.com_cp_ctrl_mode0);
+ hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_DEC_START_MODE0,
+ cfg.com_dec_start_mode0);
+ hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_DIV_FRAC_START1_MODE0,
+ cfg.com_div_frac_start1_mode0);
+ hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_DIV_FRAC_START2_MODE0,
+ cfg.com_div_frac_start2_mode0);
+ hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_DIV_FRAC_START3_MODE0,
+ cfg.com_div_frac_start3_mode0);
+
+ hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_INTEGLOOP_GAIN0_MODE0,
+ cfg.com_integloop_gain0_mode0);
+ hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_INTEGLOOP_GAIN1_MODE0,
+ cfg.com_integloop_gain1_mode0);
+
+ hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_LOCK_CMP1_MODE0,
+ cfg.com_lock_cmp1_mode0);
+ hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_LOCK_CMP2_MODE0,
+ cfg.com_lock_cmp2_mode0);
+ hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_LOCK_CMP3_MODE0,
+ cfg.com_lock_cmp3_mode0);
+
+ hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_VCO_TUNE_MAP, 0x00);
+ hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_CORE_CLK_EN,
+ cfg.com_core_clk_en);
+ hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_CORECLK_DIV,
+ cfg.com_coreclk_div);
+ hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_CMN_CONFIG, 0x02);
+
+ hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_RESCODE_DIV_NUM, 0x15);
+
+ /* TX lanes setup (TX 0/1/2/3) */
+ for (i = 0; i < HDMI_NUM_TX_CHANNEL; i++) {
+ hdmi_tx_chan_write(pll, i,
+ REG_HDMI_PHY_QSERDES_TX_LX_TX_DRV_LVL,
+ cfg.tx_lx_tx_drv_lvl[i]);
+ hdmi_tx_chan_write(pll, i,
+ REG_HDMI_PHY_QSERDES_TX_LX_TX_EMP_POST1_LVL,
+ cfg.tx_lx_tx_emp_post1_lvl[i]);
+ hdmi_tx_chan_write(pll, i,
+ REG_HDMI_PHY_QSERDES_TX_LX_VMODE_CTRL1,
+ cfg.tx_lx_vmode_ctrl1[i]);
+ hdmi_tx_chan_write(pll, i,
+ REG_HDMI_PHY_QSERDES_TX_LX_VMODE_CTRL2,
+ cfg.tx_lx_vmode_ctrl2[i]);
+ hdmi_tx_chan_write(pll, i,
+ REG_HDMI_PHY_QSERDES_TX_LX_TX_DRV_LVL_OFFSET,
+ 0x00);
+ hdmi_tx_chan_write(pll, i,
+ REG_HDMI_PHY_QSERDES_TX_LX_RES_CODE_LANE_OFFSET,
+ 0x00);
+ hdmi_tx_chan_write(pll, i,
+ REG_HDMI_PHY_QSERDES_TX_LX_TRAN_DRVR_EMP_EN,
+ 0x03);
+ hdmi_tx_chan_write(pll, i,
+ REG_HDMI_PHY_QSERDES_TX_LX_PARRATE_REC_DETECT_IDLE_EN,
+ 0x40);
+ hdmi_tx_chan_write(pll, i,
+ REG_HDMI_PHY_QSERDES_TX_LX_HP_PD_ENABLES,
+ cfg.tx_lx_hp_pd_enables[i]);
+ }
+
+ hdmi_phy_write(phy, REG_HDMI_8996_PHY_MODE, cfg.phy_mode);
+ hdmi_phy_write(phy, REG_HDMI_8996_PHY_PD_CTL, 0x1F);
+
+ /*
+ * Ensure that vco configuration gets flushed to hardware before
+ * enabling the PLL
+ */
+ wmb();
+
+ return 0;
+}
+
+static int hdmi_8996_phy_ready_status(struct hdmi_phy *phy)
+{
+ u32 nb_tries = HDMI_PLL_POLL_MAX_READS;
+ unsigned long timeout = HDMI_PLL_POLL_TIMEOUT_US;
+ u32 status;
+ int phy_ready = 0;
+
+ DBG("Waiting for PHY ready");
+
+ while (nb_tries--) {
+ status = hdmi_phy_read(phy, REG_HDMI_8996_PHY_STATUS);
+ phy_ready = status & BIT(0);
+
+ if (phy_ready)
+ break;
+
+ udelay(timeout);
+ }
+
+ DBG("PHY is %sready", phy_ready ? "" : "*not* ");
+
+ return phy_ready;
+}
+
+static int hdmi_8996_pll_lock_status(struct hdmi_pll_8996 *pll)
+{
+ u32 status;
+ int nb_tries = HDMI_PLL_POLL_MAX_READS;
+ unsigned long timeout = HDMI_PLL_POLL_TIMEOUT_US;
+ int pll_locked = 0;
+
+ DBG("Waiting for PLL lock");
+
+ while (nb_tries--) {
+ status = hdmi_pll_read(pll,
+ REG_HDMI_PHY_QSERDES_COM_C_READY_STATUS);
+ pll_locked = status & BIT(0);
+
+ if (pll_locked)
+ break;
+
+ udelay(timeout);
+ }
+
+ DBG("HDMI PLL is %slocked", pll_locked ? "" : "*not* ");
+
+ return pll_locked;
+}
+
+static int hdmi_8996_pll_prepare(struct clk_hw *hw)
+{
+ struct hdmi_pll_8996 *pll = hw_clk_to_pll(hw);
+ struct hdmi_phy *phy = pll_get_phy(pll);
+ int i, ret = 0;
+
+ hdmi_phy_write(phy, REG_HDMI_8996_PHY_CFG, 0x1);
+ udelay(100);
+
+ hdmi_phy_write(phy, REG_HDMI_8996_PHY_CFG, 0x19);
+ udelay(100);
+
+ ret = hdmi_8996_pll_lock_status(pll);
+ if (!ret)
+ return ret;
+
+ for (i = 0; i < HDMI_NUM_TX_CHANNEL; i++)
+ hdmi_tx_chan_write(pll, i,
+ REG_HDMI_PHY_QSERDES_TX_LX_HIGHZ_TRANSCEIVEREN_BIAS_DRVR_EN,
+ 0x6F);
+
+ /* Disable SSC */
+ hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_SSC_PER1, 0x0);
+ hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_SSC_PER2, 0x0);
+ hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_SSC_STEP_SIZE1, 0x0);
+ hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_SSC_STEP_SIZE2, 0x0);
+ hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_SSC_EN_CENTER, 0x2);
+
+ ret = hdmi_8996_phy_ready_status(phy);
+ if (!ret)
+ return ret;
+
+ /* Restart the retiming buffer */
+ hdmi_phy_write(phy, REG_HDMI_8996_PHY_CFG, 0x18);
+ udelay(1);
+ hdmi_phy_write(phy, REG_HDMI_8996_PHY_CFG, 0x19);
+
+ return 0;
+}
+
+static long hdmi_8996_pll_round_rate(struct clk_hw *hw,
+ unsigned long rate,
+ unsigned long *parent_rate)
+{
+ if (rate < HDMI_PCLK_MIN_FREQ)
+ return HDMI_PCLK_MIN_FREQ;
+ else if (rate > HDMI_PCLK_MAX_FREQ)
+ return HDMI_PCLK_MAX_FREQ;
+ else
+ return rate;
+}
+
+static unsigned long hdmi_8996_pll_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct hdmi_pll_8996 *pll = hw_clk_to_pll(hw);
+ u64 fdata;
+ u32 cmp1, cmp2, cmp3, pll_cmp;
+
+ cmp1 = hdmi_pll_read(pll, REG_HDMI_PHY_QSERDES_COM_LOCK_CMP1_MODE0);
+ cmp2 = hdmi_pll_read(pll, REG_HDMI_PHY_QSERDES_COM_LOCK_CMP2_MODE0);
+ cmp3 = hdmi_pll_read(pll, REG_HDMI_PHY_QSERDES_COM_LOCK_CMP3_MODE0);
+
+ pll_cmp = cmp1 | (cmp2 << 8) | (cmp3 << 16);
+
+ fdata = pll_cmp_to_fdata(pll_cmp + 1, parent_rate);
+
+ do_div(fdata, 10);
+
+ return fdata;
+}
+
+static void hdmi_8996_pll_unprepare(struct clk_hw *hw)
+{
+}
+
+static int hdmi_8996_pll_is_enabled(struct clk_hw *hw)
+{
+ struct hdmi_pll_8996 *pll = hw_clk_to_pll(hw);
+ u32 status;
+ int pll_locked;
+
+ status = hdmi_pll_read(pll, REG_HDMI_PHY_QSERDES_COM_C_READY_STATUS);
+ pll_locked = status & BIT(0);
+
+ return pll_locked;
+}
+
+static struct clk_ops hdmi_8996_pll_ops = {
+ .set_rate = hdmi_8996_pll_set_clk_rate,
+ .round_rate = hdmi_8996_pll_round_rate,
+ .recalc_rate = hdmi_8996_pll_recalc_rate,
+ .prepare = hdmi_8996_pll_prepare,
+ .unprepare = hdmi_8996_pll_unprepare,
+ .is_enabled = hdmi_8996_pll_is_enabled,
+};
+
+static const char * const hdmi_pll_parents[] = {
+ "xo",
+};
+
+static struct clk_init_data pll_init = {
+ .name = "hdmipll",
+ .ops = &hdmi_8996_pll_ops,
+ .parent_names = hdmi_pll_parents,
+ .num_parents = ARRAY_SIZE(hdmi_pll_parents),
+};
+
+int msm_hdmi_pll_8996_init(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct hdmi_pll_8996 *pll;
+ struct clk *clk;
+ int i;
+
+ pll = devm_kzalloc(dev, sizeof(*pll), GFP_KERNEL);
+ if (!pll)
+ return -ENOMEM;
+
+ pll->pdev = pdev;
+
+ pll->mmio_qserdes_com = msm_ioremap(pdev, "hdmi_pll", "HDMI_PLL");
+ if (IS_ERR(pll->mmio_qserdes_com)) {
+ dev_err(dev, "failed to map pll base\n");
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < HDMI_NUM_TX_CHANNEL; i++) {
+ char name[32], label[32];
+
+ snprintf(name, sizeof(name), "hdmi_tx_l%d", i);
+ snprintf(label, sizeof(label), "HDMI_TX_L%d", i);
+
+ pll->mmio_qserdes_tx[i] = msm_ioremap(pdev, name, label);
+ if (IS_ERR(pll->mmio_qserdes_tx[i])) {
+ dev_err(dev, "failed to map pll base\n");
+ return -ENOMEM;
+ }
+ }
+ pll->clk_hw.init = &pll_init;
+
+ clk = devm_clk_register(dev, &pll->clk_hw);
+ if (IS_ERR(clk)) {
+ dev_err(dev, "failed to register pll clock\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const char * const hdmi_phy_8996_reg_names[] = {
+ "vddio",
+ "vcca",
+};
+
+static const char * const hdmi_phy_8996_clk_names[] = {
+ "mmagic_iface_clk",
+ "iface_clk",
+ "ref_clk",
+};
+
+const struct hdmi_phy_cfg msm_hdmi_phy_8996_cfg = {
+ .type = MSM_HDMI_PHY_8996,
+ .reg_names = hdmi_phy_8996_reg_names,
+ .num_regs = ARRAY_SIZE(hdmi_phy_8996_reg_names),
+ .clk_names = hdmi_phy_8996_clk_names,
+ .num_clks = ARRAY_SIZE(hdmi_phy_8996_clk_names),
+};
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8x60.c b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8x60.c
index cb01421ae1e4..a68eea4153fc 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8x60.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8x60.c
@@ -17,166 +17,122 @@
#include "hdmi.h"
-struct hdmi_phy_8x60 {
- struct hdmi_phy base;
- struct hdmi *hdmi;
-};
-#define to_hdmi_phy_8x60(x) container_of(x, struct hdmi_phy_8x60, base)
-
-static void hdmi_phy_8x60_destroy(struct hdmi_phy *phy)
-{
- struct hdmi_phy_8x60 *phy_8x60 = to_hdmi_phy_8x60(phy);
- kfree(phy_8x60);
-}
-
static void hdmi_phy_8x60_powerup(struct hdmi_phy *phy,
unsigned long int pixclock)
{
- struct hdmi_phy_8x60 *phy_8x60 = to_hdmi_phy_8x60(phy);
- struct hdmi *hdmi = phy_8x60->hdmi;
-
/* De-serializer delay D/C for non-lbk mode: */
- hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG0,
- HDMI_8x60_PHY_REG0_DESER_DEL_CTRL(3));
+ hdmi_phy_write(phy, REG_HDMI_8x60_PHY_REG0,
+ HDMI_8x60_PHY_REG0_DESER_DEL_CTRL(3));
if (pixclock == 27000000) {
/* video_format == HDMI_VFRMT_720x480p60_16_9 */
- hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG1,
- HDMI_8x60_PHY_REG1_DTEST_MUX_SEL(5) |
- HDMI_8x60_PHY_REG1_OUTVOL_SWING_CTRL(3));
+ hdmi_phy_write(phy, REG_HDMI_8x60_PHY_REG1,
+ HDMI_8x60_PHY_REG1_DTEST_MUX_SEL(5) |
+ HDMI_8x60_PHY_REG1_OUTVOL_SWING_CTRL(3));
} else {
- hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG1,
- HDMI_8x60_PHY_REG1_DTEST_MUX_SEL(5) |
- HDMI_8x60_PHY_REG1_OUTVOL_SWING_CTRL(4));
+ hdmi_phy_write(phy, REG_HDMI_8x60_PHY_REG1,
+ HDMI_8x60_PHY_REG1_DTEST_MUX_SEL(5) |
+ HDMI_8x60_PHY_REG1_OUTVOL_SWING_CTRL(4));
}
/* No matter what, start from the power down mode: */
- hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG2,
- HDMI_8x60_PHY_REG2_PD_PWRGEN |
- HDMI_8x60_PHY_REG2_PD_PLL |
- HDMI_8x60_PHY_REG2_PD_DRIVE_4 |
- HDMI_8x60_PHY_REG2_PD_DRIVE_3 |
- HDMI_8x60_PHY_REG2_PD_DRIVE_2 |
- HDMI_8x60_PHY_REG2_PD_DRIVE_1 |
- HDMI_8x60_PHY_REG2_PD_DESER);
+ hdmi_phy_write(phy, REG_HDMI_8x60_PHY_REG2,
+ HDMI_8x60_PHY_REG2_PD_PWRGEN |
+ HDMI_8x60_PHY_REG2_PD_PLL |
+ HDMI_8x60_PHY_REG2_PD_DRIVE_4 |
+ HDMI_8x60_PHY_REG2_PD_DRIVE_3 |
+ HDMI_8x60_PHY_REG2_PD_DRIVE_2 |
+ HDMI_8x60_PHY_REG2_PD_DRIVE_1 |
+ HDMI_8x60_PHY_REG2_PD_DESER);
/* Turn PowerGen on: */
- hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG2,
- HDMI_8x60_PHY_REG2_PD_PLL |
- HDMI_8x60_PHY_REG2_PD_DRIVE_4 |
- HDMI_8x60_PHY_REG2_PD_DRIVE_3 |
- HDMI_8x60_PHY_REG2_PD_DRIVE_2 |
- HDMI_8x60_PHY_REG2_PD_DRIVE_1 |
- HDMI_8x60_PHY_REG2_PD_DESER);
+ hdmi_phy_write(phy, REG_HDMI_8x60_PHY_REG2,
+ HDMI_8x60_PHY_REG2_PD_PLL |
+ HDMI_8x60_PHY_REG2_PD_DRIVE_4 |
+ HDMI_8x60_PHY_REG2_PD_DRIVE_3 |
+ HDMI_8x60_PHY_REG2_PD_DRIVE_2 |
+ HDMI_8x60_PHY_REG2_PD_DRIVE_1 |
+ HDMI_8x60_PHY_REG2_PD_DESER);
/* Turn PLL power on: */
- hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG2,
- HDMI_8x60_PHY_REG2_PD_DRIVE_4 |
- HDMI_8x60_PHY_REG2_PD_DRIVE_3 |
- HDMI_8x60_PHY_REG2_PD_DRIVE_2 |
- HDMI_8x60_PHY_REG2_PD_DRIVE_1 |
- HDMI_8x60_PHY_REG2_PD_DESER);
+ hdmi_phy_write(phy, REG_HDMI_8x60_PHY_REG2,
+ HDMI_8x60_PHY_REG2_PD_DRIVE_4 |
+ HDMI_8x60_PHY_REG2_PD_DRIVE_3 |
+ HDMI_8x60_PHY_REG2_PD_DRIVE_2 |
+ HDMI_8x60_PHY_REG2_PD_DRIVE_1 |
+ HDMI_8x60_PHY_REG2_PD_DESER);
/* Write to HIGH after PLL power down de-assert: */
- hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG3,
- HDMI_8x60_PHY_REG3_PLL_ENABLE);
+ hdmi_phy_write(phy, REG_HDMI_8x60_PHY_REG3,
+ HDMI_8x60_PHY_REG3_PLL_ENABLE);
/* ASIC power on; PHY REG9 = 0 */
- hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG9, 0);
+ hdmi_phy_write(phy, REG_HDMI_8x60_PHY_REG9, 0);
/* Enable PLL lock detect, PLL lock det will go high after lock
* Enable the re-time logic
*/
- hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG12,
- HDMI_8x60_PHY_REG12_RETIMING_EN |
- HDMI_8x60_PHY_REG12_PLL_LOCK_DETECT_EN);
+ hdmi_phy_write(phy, REG_HDMI_8x60_PHY_REG12,
+ HDMI_8x60_PHY_REG12_RETIMING_EN |
+ HDMI_8x60_PHY_REG12_PLL_LOCK_DETECT_EN);
/* Drivers are on: */
- hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG2,
- HDMI_8x60_PHY_REG2_PD_DESER);
+ hdmi_phy_write(phy, REG_HDMI_8x60_PHY_REG2,
+ HDMI_8x60_PHY_REG2_PD_DESER);
/* If the RX detector is needed: */
- hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG2,
- HDMI_8x60_PHY_REG2_RCV_SENSE_EN |
- HDMI_8x60_PHY_REG2_PD_DESER);
-
- hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG4, 0);
- hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG5, 0);
- hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG6, 0);
- hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG7, 0);
- hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG8, 0);
- hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG9, 0);
- hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG10, 0);
- hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG11, 0);
+ hdmi_phy_write(phy, REG_HDMI_8x60_PHY_REG2,
+ HDMI_8x60_PHY_REG2_RCV_SENSE_EN |
+ HDMI_8x60_PHY_REG2_PD_DESER);
+
+ hdmi_phy_write(phy, REG_HDMI_8x60_PHY_REG4, 0);
+ hdmi_phy_write(phy, REG_HDMI_8x60_PHY_REG5, 0);
+ hdmi_phy_write(phy, REG_HDMI_8x60_PHY_REG6, 0);
+ hdmi_phy_write(phy, REG_HDMI_8x60_PHY_REG7, 0);
+ hdmi_phy_write(phy, REG_HDMI_8x60_PHY_REG8, 0);
+ hdmi_phy_write(phy, REG_HDMI_8x60_PHY_REG9, 0);
+ hdmi_phy_write(phy, REG_HDMI_8x60_PHY_REG10, 0);
+ hdmi_phy_write(phy, REG_HDMI_8x60_PHY_REG11, 0);
/* If we want to use lock enable based on counting: */
- hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG12,
- HDMI_8x60_PHY_REG12_RETIMING_EN |
- HDMI_8x60_PHY_REG12_PLL_LOCK_DETECT_EN |
- HDMI_8x60_PHY_REG12_FORCE_LOCK);
+ hdmi_phy_write(phy, REG_HDMI_8x60_PHY_REG12,
+ HDMI_8x60_PHY_REG12_RETIMING_EN |
+ HDMI_8x60_PHY_REG12_PLL_LOCK_DETECT_EN |
+ HDMI_8x60_PHY_REG12_FORCE_LOCK);
}
static void hdmi_phy_8x60_powerdown(struct hdmi_phy *phy)
{
- struct hdmi_phy_8x60 *phy_8x60 = to_hdmi_phy_8x60(phy);
- struct hdmi *hdmi = phy_8x60->hdmi;
-
/* Assert RESET PHY from controller */
- hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
- HDMI_PHY_CTRL_SW_RESET);
+ hdmi_phy_write(phy, REG_HDMI_PHY_CTRL,
+ HDMI_PHY_CTRL_SW_RESET);
udelay(10);
/* De-assert RESET PHY from controller */
- hdmi_write(hdmi, REG_HDMI_PHY_CTRL, 0);
+ hdmi_phy_write(phy, REG_HDMI_PHY_CTRL, 0);
/* Turn off Driver */
- hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG2,
- HDMI_8x60_PHY_REG2_PD_DRIVE_4 |
- HDMI_8x60_PHY_REG2_PD_DRIVE_3 |
- HDMI_8x60_PHY_REG2_PD_DRIVE_2 |
- HDMI_8x60_PHY_REG2_PD_DRIVE_1 |
- HDMI_8x60_PHY_REG2_PD_DESER);
+ hdmi_phy_write(phy, REG_HDMI_8x60_PHY_REG2,
+ HDMI_8x60_PHY_REG2_PD_DRIVE_4 |
+ HDMI_8x60_PHY_REG2_PD_DRIVE_3 |
+ HDMI_8x60_PHY_REG2_PD_DRIVE_2 |
+ HDMI_8x60_PHY_REG2_PD_DRIVE_1 |
+ HDMI_8x60_PHY_REG2_PD_DESER);
udelay(10);
/* Disable PLL */
- hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG3, 0);
+ hdmi_phy_write(phy, REG_HDMI_8x60_PHY_REG3, 0);
/* Power down PHY, but keep RX-sense: */
- hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG2,
- HDMI_8x60_PHY_REG2_RCV_SENSE_EN |
- HDMI_8x60_PHY_REG2_PD_PWRGEN |
- HDMI_8x60_PHY_REG2_PD_PLL |
- HDMI_8x60_PHY_REG2_PD_DRIVE_4 |
- HDMI_8x60_PHY_REG2_PD_DRIVE_3 |
- HDMI_8x60_PHY_REG2_PD_DRIVE_2 |
- HDMI_8x60_PHY_REG2_PD_DRIVE_1 |
- HDMI_8x60_PHY_REG2_PD_DESER);
+ hdmi_phy_write(phy, REG_HDMI_8x60_PHY_REG2,
+ HDMI_8x60_PHY_REG2_RCV_SENSE_EN |
+ HDMI_8x60_PHY_REG2_PD_PWRGEN |
+ HDMI_8x60_PHY_REG2_PD_PLL |
+ HDMI_8x60_PHY_REG2_PD_DRIVE_4 |
+ HDMI_8x60_PHY_REG2_PD_DRIVE_3 |
+ HDMI_8x60_PHY_REG2_PD_DRIVE_2 |
+ HDMI_8x60_PHY_REG2_PD_DRIVE_1 |
+ HDMI_8x60_PHY_REG2_PD_DESER);
}
-static const struct hdmi_phy_funcs hdmi_phy_8x60_funcs = {
- .destroy = hdmi_phy_8x60_destroy,
- .powerup = hdmi_phy_8x60_powerup,
- .powerdown = hdmi_phy_8x60_powerdown,
+const struct hdmi_phy_cfg msm_hdmi_phy_8x60_cfg = {
+ .type = MSM_HDMI_PHY_8x60,
+ .powerup = hdmi_phy_8x60_powerup,
+ .powerdown = hdmi_phy_8x60_powerdown,
};
-
-struct hdmi_phy *hdmi_phy_8x60_init(struct hdmi *hdmi)
-{
- struct hdmi_phy_8x60 *phy_8x60;
- struct hdmi_phy *phy = NULL;
- int ret;
-
- phy_8x60 = kzalloc(sizeof(*phy_8x60), GFP_KERNEL);
- if (!phy_8x60) {
- ret = -ENOMEM;
- goto fail;
- }
-
- phy = &phy_8x60->base;
-
- phy->funcs = &hdmi_phy_8x60_funcs;
-
- phy_8x60->hdmi = hdmi;
-
- return phy;
-
-fail:
- if (phy)
- hdmi_phy_8x60_destroy(phy);
- return ERR_PTR(ret);
-}
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8x74.c b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8x74.c
index 56ab8917ee9a..c4a61e537851 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8x74.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8x74.c
@@ -17,84 +17,40 @@
#include "hdmi.h"
-struct hdmi_phy_8x74 {
- struct hdmi_phy base;
- void __iomem *mmio;
-};
-#define to_hdmi_phy_8x74(x) container_of(x, struct hdmi_phy_8x74, base)
-
-
-static void phy_write(struct hdmi_phy_8x74 *phy, u32 reg, u32 data)
-{
- msm_writel(data, phy->mmio + reg);
-}
-
-//static u32 phy_read(struct hdmi_phy_8x74 *phy, u32 reg)
-//{
-// return msm_readl(phy->mmio + reg);
-//}
-
-static void hdmi_phy_8x74_destroy(struct hdmi_phy *phy)
-{
- struct hdmi_phy_8x74 *phy_8x74 = to_hdmi_phy_8x74(phy);
- kfree(phy_8x74);
-}
-
static void hdmi_phy_8x74_powerup(struct hdmi_phy *phy,
unsigned long int pixclock)
{
- struct hdmi_phy_8x74 *phy_8x74 = to_hdmi_phy_8x74(phy);
-
- phy_write(phy_8x74, REG_HDMI_8x74_ANA_CFG0, 0x1b);
- phy_write(phy_8x74, REG_HDMI_8x74_ANA_CFG1, 0xf2);
- phy_write(phy_8x74, REG_HDMI_8x74_BIST_CFG0, 0x0);
- phy_write(phy_8x74, REG_HDMI_8x74_BIST_PATN0, 0x0);
- phy_write(phy_8x74, REG_HDMI_8x74_BIST_PATN1, 0x0);
- phy_write(phy_8x74, REG_HDMI_8x74_BIST_PATN2, 0x0);
- phy_write(phy_8x74, REG_HDMI_8x74_BIST_PATN3, 0x0);
- phy_write(phy_8x74, REG_HDMI_8x74_PD_CTRL1, 0x20);
+ hdmi_phy_write(phy, REG_HDMI_8x74_ANA_CFG0, 0x1b);
+ hdmi_phy_write(phy, REG_HDMI_8x74_ANA_CFG1, 0xf2);
+ hdmi_phy_write(phy, REG_HDMI_8x74_BIST_CFG0, 0x0);
+ hdmi_phy_write(phy, REG_HDMI_8x74_BIST_PATN0, 0x0);
+ hdmi_phy_write(phy, REG_HDMI_8x74_BIST_PATN1, 0x0);
+ hdmi_phy_write(phy, REG_HDMI_8x74_BIST_PATN2, 0x0);
+ hdmi_phy_write(phy, REG_HDMI_8x74_BIST_PATN3, 0x0);
+ hdmi_phy_write(phy, REG_HDMI_8x74_PD_CTRL1, 0x20);
}
static void hdmi_phy_8x74_powerdown(struct hdmi_phy *phy)
{
- struct hdmi_phy_8x74 *phy_8x74 = to_hdmi_phy_8x74(phy);
- phy_write(phy_8x74, REG_HDMI_8x74_PD_CTRL0, 0x7f);
+ hdmi_phy_write(phy, REG_HDMI_8x74_PD_CTRL0, 0x7f);
}
-static const struct hdmi_phy_funcs hdmi_phy_8x74_funcs = {
- .destroy = hdmi_phy_8x74_destroy,
- .powerup = hdmi_phy_8x74_powerup,
- .powerdown = hdmi_phy_8x74_powerdown,
+static const char * const hdmi_phy_8x74_reg_names[] = {
+ "core-vdda",
+ "vddio",
};
-struct hdmi_phy *hdmi_phy_8x74_init(struct hdmi *hdmi)
-{
- struct hdmi_phy_8x74 *phy_8x74;
- struct hdmi_phy *phy = NULL;
- int ret;
-
- phy_8x74 = kzalloc(sizeof(*phy_8x74), GFP_KERNEL);
- if (!phy_8x74) {
- ret = -ENOMEM;
- goto fail;
- }
-
- phy = &phy_8x74->base;
-
- phy->funcs = &hdmi_phy_8x74_funcs;
-
- /* for 8x74, the phy mmio is mapped separately: */
- phy_8x74->mmio = msm_ioremap(hdmi->pdev,
- "phy_physical", "HDMI_8x74");
- if (IS_ERR(phy_8x74->mmio)) {
- ret = PTR_ERR(phy_8x74->mmio);
- goto fail;
- }
-
- return phy;
+static const char * const hdmi_phy_8x74_clk_names[] = {
+ "iface_clk",
+ "alt_iface_clk"
+};
-fail:
- if (phy)
- hdmi_phy_8x74_destroy(phy);
- return ERR_PTR(ret);
-}
+const struct hdmi_phy_cfg msm_hdmi_phy_8x74_cfg = {
+ .type = MSM_HDMI_PHY_8x74,
+ .powerup = hdmi_phy_8x74_powerup,
+ .powerdown = hdmi_phy_8x74_powerdown,
+ .reg_names = hdmi_phy_8x74_reg_names,
+ .num_regs = ARRAY_SIZE(hdmi_phy_8x74_reg_names),
+ .clk_names = hdmi_phy_8x74_clk_names,
+ .num_clks = ARRAY_SIZE(hdmi_phy_8x74_clk_names),
+};
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_pll_8960.c b/drivers/gpu/drm/msm/hdmi/hdmi_pll_8960.c
new file mode 100644
index 000000000000..92da69aa6187
--- /dev/null
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_pll_8960.c
@@ -0,0 +1,461 @@
+/*
+ * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/clk-provider.h>
+#include "hdmi.h"
+
+struct hdmi_pll_8960 {
+ struct platform_device *pdev;
+ struct clk_hw clk_hw;
+ void __iomem *mmio;
+
+ unsigned long pixclk;
+};
+
+#define hw_clk_to_pll(x) container_of(x, struct hdmi_pll_8960, clk_hw)
+
+/*
+ * HDMI PLL:
+ *
+ * To get the parent clock setup properly, we need to plug in hdmi pll
+ * configuration into common-clock-framework.
+ */
+
+struct pll_rate {
+ unsigned long rate;
+ int num_reg;
+ struct {
+ u32 val;
+ u32 reg;
+ } conf[32];
+};
+
+/* NOTE: keep sorted highest freq to lowest: */
+static const struct pll_rate freqtbl[] = {
+ { 154000000, 14, {
+ { 0x08, REG_HDMI_8960_PHY_PLL_REFCLK_CFG },
+ { 0x20, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG0 },
+ { 0xf9, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG1 },
+ { 0x02, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG0 },
+ { 0x03, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG1 },
+ { 0x3b, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG2 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG3 },
+ { 0x86, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG4 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG5 },
+ { 0x0d, REG_HDMI_8960_PHY_PLL_SDM_CFG0 },
+ { 0x4d, REG_HDMI_8960_PHY_PLL_SDM_CFG1 },
+ { 0x5e, REG_HDMI_8960_PHY_PLL_SDM_CFG2 },
+ { 0x42, REG_HDMI_8960_PHY_PLL_SDM_CFG3 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_SDM_CFG4 },
+ }
+ },
+ /* 1080p60/1080p50 case */
+ { 148500000, 27, {
+ { 0x02, REG_HDMI_8960_PHY_PLL_REFCLK_CFG },
+ { 0x02, REG_HDMI_8960_PHY_PLL_CHRG_PUMP_CFG },
+ { 0x01, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG0 },
+ { 0x33, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG1 },
+ { 0x2c, REG_HDMI_8960_PHY_PLL_IDAC_ADJ_CFG },
+ { 0x06, REG_HDMI_8960_PHY_PLL_I_VI_KVCO_CFG },
+ { 0x0a, REG_HDMI_8960_PHY_PLL_PWRDN_B },
+ { 0x76, REG_HDMI_8960_PHY_PLL_SDM_CFG0 },
+ { 0x01, REG_HDMI_8960_PHY_PLL_SDM_CFG1 },
+ { 0x4c, REG_HDMI_8960_PHY_PLL_SDM_CFG2 },
+ { 0xc0, REG_HDMI_8960_PHY_PLL_SDM_CFG3 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_SDM_CFG4 },
+ { 0x9a, REG_HDMI_8960_PHY_PLL_SSC_CFG0 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_SSC_CFG1 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_SSC_CFG2 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_SSC_CFG3 },
+ { 0x10, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG0 },
+ { 0x1a, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG1 },
+ { 0x0d, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG2 },
+ { 0xe6, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG0 },
+ { 0x02, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG1 },
+ { 0x3b, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG2 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG3 },
+ { 0x86, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG4 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG5 },
+ { 0x33, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG6 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG7 },
+ }
+ },
+ { 108000000, 13, {
+ { 0x08, REG_HDMI_8960_PHY_PLL_REFCLK_CFG },
+ { 0x21, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG0 },
+ { 0xf9, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG1 },
+ { 0x1c, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG0 },
+ { 0x02, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG1 },
+ { 0x3b, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG2 },
+ { 0x86, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG4 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG5 },
+ { 0x49, REG_HDMI_8960_PHY_PLL_SDM_CFG0 },
+ { 0x49, REG_HDMI_8960_PHY_PLL_SDM_CFG1 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_SDM_CFG2 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_SDM_CFG3 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_SDM_CFG4 },
+ }
+ },
+ /* 720p60/720p50/1080i60/1080i50/1080p24/1080p30/1080p25 */
+ { 74250000, 8, {
+ { 0x0a, REG_HDMI_8960_PHY_PLL_PWRDN_B },
+ { 0x12, REG_HDMI_8960_PHY_PLL_REFCLK_CFG },
+ { 0x01, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG0 },
+ { 0x33, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG1 },
+ { 0x76, REG_HDMI_8960_PHY_PLL_SDM_CFG0 },
+ { 0xe6, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG0 },
+ { 0x02, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG1 },
+ { 0x3b, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG2 },
+ }
+ },
+ { 74176000, 14, {
+ { 0x18, REG_HDMI_8960_PHY_PLL_REFCLK_CFG },
+ { 0x20, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG0 },
+ { 0xf9, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG1 },
+ { 0xe5, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG0 },
+ { 0x02, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG1 },
+ { 0x3b, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG2 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG3 },
+ { 0x86, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG4 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG5 },
+ { 0x0c, REG_HDMI_8960_PHY_PLL_SDM_CFG0 },
+ { 0x4c, REG_HDMI_8960_PHY_PLL_SDM_CFG1 },
+ { 0x7d, REG_HDMI_8960_PHY_PLL_SDM_CFG2 },
+ { 0xbc, REG_HDMI_8960_PHY_PLL_SDM_CFG3 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_SDM_CFG4 },
+ }
+ },
+ { 65000000, 14, {
+ { 0x18, REG_HDMI_8960_PHY_PLL_REFCLK_CFG },
+ { 0x20, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG0 },
+ { 0xf9, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG1 },
+ { 0x8a, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG0 },
+ { 0x02, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG1 },
+ { 0x3b, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG2 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG3 },
+ { 0x86, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG4 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG5 },
+ { 0x0b, REG_HDMI_8960_PHY_PLL_SDM_CFG0 },
+ { 0x4b, REG_HDMI_8960_PHY_PLL_SDM_CFG1 },
+ { 0x7b, REG_HDMI_8960_PHY_PLL_SDM_CFG2 },
+ { 0x09, REG_HDMI_8960_PHY_PLL_SDM_CFG3 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_SDM_CFG4 },
+ }
+ },
+ /* 480p60/480i60 */
+ { 27030000, 18, {
+ { 0x0a, REG_HDMI_8960_PHY_PLL_PWRDN_B },
+ { 0x38, REG_HDMI_8960_PHY_PLL_REFCLK_CFG },
+ { 0x02, REG_HDMI_8960_PHY_PLL_CHRG_PUMP_CFG },
+ { 0x20, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG0 },
+ { 0xff, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG1 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_SDM_CFG0 },
+ { 0x4e, REG_HDMI_8960_PHY_PLL_SDM_CFG1 },
+ { 0xd7, REG_HDMI_8960_PHY_PLL_SDM_CFG2 },
+ { 0x03, REG_HDMI_8960_PHY_PLL_SDM_CFG3 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_SDM_CFG4 },
+ { 0x2a, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG0 },
+ { 0x03, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG1 },
+ { 0x3b, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG2 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG3 },
+ { 0x86, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG4 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG5 },
+ { 0x33, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG6 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG7 },
+ }
+ },
+ /* 576p50/576i50 */
+ { 27000000, 27, {
+ { 0x32, REG_HDMI_8960_PHY_PLL_REFCLK_CFG },
+ { 0x02, REG_HDMI_8960_PHY_PLL_CHRG_PUMP_CFG },
+ { 0x01, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG0 },
+ { 0x33, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG1 },
+ { 0x2c, REG_HDMI_8960_PHY_PLL_IDAC_ADJ_CFG },
+ { 0x06, REG_HDMI_8960_PHY_PLL_I_VI_KVCO_CFG },
+ { 0x0a, REG_HDMI_8960_PHY_PLL_PWRDN_B },
+ { 0x7b, REG_HDMI_8960_PHY_PLL_SDM_CFG0 },
+ { 0x01, REG_HDMI_8960_PHY_PLL_SDM_CFG1 },
+ { 0x4c, REG_HDMI_8960_PHY_PLL_SDM_CFG2 },
+ { 0xc0, REG_HDMI_8960_PHY_PLL_SDM_CFG3 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_SDM_CFG4 },
+ { 0x9a, REG_HDMI_8960_PHY_PLL_SSC_CFG0 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_SSC_CFG1 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_SSC_CFG2 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_SSC_CFG3 },
+ { 0x10, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG0 },
+ { 0x1a, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG1 },
+ { 0x0d, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG2 },
+ { 0x2a, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG0 },
+ { 0x03, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG1 },
+ { 0x3b, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG2 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG3 },
+ { 0x86, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG4 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG5 },
+ { 0x33, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG6 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG7 },
+ }
+ },
+ /* 640x480p60 */
+ { 25200000, 27, {
+ { 0x32, REG_HDMI_8960_PHY_PLL_REFCLK_CFG },
+ { 0x02, REG_HDMI_8960_PHY_PLL_CHRG_PUMP_CFG },
+ { 0x01, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG0 },
+ { 0x33, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG1 },
+ { 0x2c, REG_HDMI_8960_PHY_PLL_IDAC_ADJ_CFG },
+ { 0x06, REG_HDMI_8960_PHY_PLL_I_VI_KVCO_CFG },
+ { 0x0a, REG_HDMI_8960_PHY_PLL_PWRDN_B },
+ { 0x77, REG_HDMI_8960_PHY_PLL_SDM_CFG0 },
+ { 0x4c, REG_HDMI_8960_PHY_PLL_SDM_CFG1 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_SDM_CFG2 },
+ { 0xc0, REG_HDMI_8960_PHY_PLL_SDM_CFG3 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_SDM_CFG4 },
+ { 0x9a, REG_HDMI_8960_PHY_PLL_SSC_CFG0 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_SSC_CFG1 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_SSC_CFG2 },
+ { 0x20, REG_HDMI_8960_PHY_PLL_SSC_CFG3 },
+ { 0x10, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG0 },
+ { 0x1a, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG1 },
+ { 0x0d, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG2 },
+ { 0xf4, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG0 },
+ { 0x02, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG1 },
+ { 0x3b, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG2 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG3 },
+ { 0x86, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG4 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG5 },
+ { 0x33, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG6 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG7 },
+ }
+ },
+};
+
+static inline void pll_write(struct hdmi_pll_8960 *pll, u32 reg, u32 data)
+{
+ msm_writel(data, pll->mmio + reg);
+}
+
+static inline u32 pll_read(struct hdmi_pll_8960 *pll, u32 reg)
+{
+ return msm_readl(pll->mmio + reg);
+}
+
+static inline struct hdmi_phy *pll_get_phy(struct hdmi_pll_8960 *pll)
+{
+ return platform_get_drvdata(pll->pdev);
+}
+
+static int hdmi_pll_enable(struct clk_hw *hw)
+{
+ struct hdmi_pll_8960 *pll = hw_clk_to_pll(hw);
+ struct hdmi_phy *phy = pll_get_phy(pll);
+ int timeout_count, pll_lock_retry = 10;
+ unsigned int val;
+
+ DBG("");
+
+ /* Assert PLL S/W reset */
+ pll_write(pll, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG2, 0x8d);
+ pll_write(pll, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG0, 0x10);
+ pll_write(pll, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG1, 0x1a);
+
+ /* Wait for a short time before de-asserting
+ * to allow the hardware to complete its job.
+ * This much of delay should be fine for hardware
+ * to assert and de-assert.
+ */
+ udelay(10);
+
+ /* De-assert PLL S/W reset */
+ pll_write(pll, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG2, 0x0d);
+
+ val = hdmi_phy_read(phy, REG_HDMI_8960_PHY_REG12);
+ val |= HDMI_8960_PHY_REG12_SW_RESET;
+ /* Assert PHY S/W reset */
+ hdmi_phy_write(phy, REG_HDMI_8960_PHY_REG12, val);
+ val &= ~HDMI_8960_PHY_REG12_SW_RESET;
+ /*
+ * Wait for a short time before de-asserting to allow the hardware to
+ * complete its job. This much of delay should be fine for hardware to
+ * assert and de-assert.
+ */
+ udelay(10);
+ /* De-assert PHY S/W reset */
+ hdmi_phy_write(phy, REG_HDMI_8960_PHY_REG12, val);
+ hdmi_phy_write(phy, REG_HDMI_8960_PHY_REG2, 0x3f);
+
+ val = hdmi_phy_read(phy, REG_HDMI_8960_PHY_REG12);
+ val |= HDMI_8960_PHY_REG12_PWRDN_B;
+ hdmi_phy_write(phy, REG_HDMI_8960_PHY_REG12, val);
+ /* Wait 10 us for enabling global power for PHY */
+ mb();
+ udelay(10);
+
+ val = pll_read(pll, REG_HDMI_8960_PHY_PLL_PWRDN_B);
+ val |= HDMI_8960_PHY_PLL_PWRDN_B_PLL_PWRDN_B;
+ val &= ~HDMI_8960_PHY_PLL_PWRDN_B_PD_PLL;
+ pll_write(pll, REG_HDMI_8960_PHY_PLL_PWRDN_B, val);
+ hdmi_phy_write(phy, REG_HDMI_8960_PHY_REG2, 0x80);
+
+ timeout_count = 1000;
+ while (--pll_lock_retry > 0) {
+ /* are we there yet? */
+ val = pll_read(pll, REG_HDMI_8960_PHY_PLL_STATUS0);
+ if (val & HDMI_8960_PHY_PLL_STATUS0_PLL_LOCK)
+ break;
+
+ udelay(1);
+
+ if (--timeout_count > 0)
+ continue;
+
+ /*
+ * PLL has still not locked.
+ * Do a software reset and try again
+ * Assert PLL S/W reset first
+ */
+ pll_write(pll, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG2, 0x8d);
+ udelay(10);
+ pll_write(pll, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG2, 0x0d);
+
+ /*
+ * Wait for a short duration for the PLL calibration
+ * before checking if the PLL gets locked
+ */
+ udelay(350);
+
+ timeout_count = 1000;
+ }
+
+ return 0;
+}
+
+static void hdmi_pll_disable(struct clk_hw *hw)
+{
+ struct hdmi_pll_8960 *pll = hw_clk_to_pll(hw);
+ struct hdmi_phy *phy = pll_get_phy(pll);
+ unsigned int val;
+
+ DBG("");
+
+ val = hdmi_phy_read(phy, REG_HDMI_8960_PHY_REG12);
+ val &= ~HDMI_8960_PHY_REG12_PWRDN_B;
+ hdmi_phy_write(phy, REG_HDMI_8960_PHY_REG12, val);
+
+ val = pll_read(pll, REG_HDMI_8960_PHY_PLL_PWRDN_B);
+ val |= HDMI_8960_PHY_REG12_SW_RESET;
+ val &= ~HDMI_8960_PHY_REG12_PWRDN_B;
+ pll_write(pll, REG_HDMI_8960_PHY_PLL_PWRDN_B, val);
+ /* Make sure HDMI PHY/PLL are powered down */
+ mb();
+}
+
+static const struct pll_rate *find_rate(unsigned long rate)
+{
+ int i;
+
+ for (i = 1; i < ARRAY_SIZE(freqtbl); i++)
+ if (rate > freqtbl[i].rate)
+ return &freqtbl[i - 1];
+
+ return &freqtbl[i - 1];
+}
+
+static unsigned long hdmi_pll_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct hdmi_pll_8960 *pll = hw_clk_to_pll(hw);
+
+ return pll->pixclk;
+}
+
+static long hdmi_pll_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
+{
+ const struct pll_rate *pll_rate = find_rate(rate);
+
+ return pll_rate->rate;
+}
+
+static int hdmi_pll_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct hdmi_pll_8960 *pll = hw_clk_to_pll(hw);
+ const struct pll_rate *pll_rate = find_rate(rate);
+ int i;
+
+ DBG("rate=%lu", rate);
+
+ for (i = 0; i < pll_rate->num_reg; i++)
+ pll_write(pll, pll_rate->conf[i].reg, pll_rate->conf[i].val);
+
+ pll->pixclk = rate;
+
+ return 0;
+}
+
+static const struct clk_ops hdmi_pll_ops = {
+ .enable = hdmi_pll_enable,
+ .disable = hdmi_pll_disable,
+ .recalc_rate = hdmi_pll_recalc_rate,
+ .round_rate = hdmi_pll_round_rate,
+ .set_rate = hdmi_pll_set_rate,
+};
+
+static const char * const hdmi_pll_parents[] = {
+ "pxo",
+};
+
+static struct clk_init_data pll_init = {
+ .name = "hdmi_pll",
+ .ops = &hdmi_pll_ops,
+ .parent_names = hdmi_pll_parents,
+ .num_parents = ARRAY_SIZE(hdmi_pll_parents),
+};
+
+int msm_hdmi_pll_8960_init(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct hdmi_pll_8960 *pll;
+ struct clk *clk;
+ int i;
+
+ /* sanity check: */
+ for (i = 0; i < (ARRAY_SIZE(freqtbl) - 1); i++)
+ if (WARN_ON(freqtbl[i].rate < freqtbl[i + 1].rate))
+ return -EINVAL;
+
+ pll = devm_kzalloc(dev, sizeof(*pll), GFP_KERNEL);
+ if (!pll)
+ return -ENOMEM;
+
+ pll->mmio = msm_ioremap(pdev, "hdmi_pll", "HDMI_PLL");
+ if (IS_ERR(pll->mmio)) {
+ dev_err(dev, "failed to map pll base\n");
+ return -ENOMEM;
+ }
+
+ pll->pdev = pdev;
+ pll->clk_hw.init = &pll_init;
+
+ clk = devm_clk_register(dev, &pll->clk_hw);
+ if (IS_ERR(clk)) {
+ dev_err(dev, "failed to register pll clock\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/msm/hdmi/qfprom.xml.h b/drivers/gpu/drm/msm/hdmi/qfprom.xml.h
index dbd9cc4daf2e..6eab7d0cf6b5 100644
--- a/drivers/gpu/drm/msm/hdmi/qfprom.xml.h
+++ b/drivers/gpu/drm/msm/hdmi/qfprom.xml.h
@@ -9,7 +9,7 @@ git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2015-05-20 20:03:14)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2016-02-10 17:07:21)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-05-20 20:03:14)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2015-09-18 12:07:28)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 37194 bytes, from 2015-09-18 12:07:28)
@@ -17,11 +17,12 @@ The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2015-10-22 16:35:02)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2015-05-20 20:03:14)
- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2015-05-20 20:03:07)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 29154 bytes, from 2015-08-10 21:25:43)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 41472 bytes, from 2016-01-22 18:18:18)
- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2015-05-20 20:03:14)
Copyright (C) 2013-2015 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
+- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h b/drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h
index d5d94575fa1b..6688e79cc88e 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h
@@ -9,7 +9,7 @@ git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2015-05-20 20:03:14)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2016-02-10 17:07:21)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-05-20 20:03:14)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2015-09-18 12:07:28)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 37194 bytes, from 2015-09-18 12:07:28)
@@ -17,11 +17,12 @@ The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2015-10-22 16:35:02)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2015-05-20 20:03:14)
- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2015-05-20 20:03:07)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 29154 bytes, from 2015-08-10 21:25:43)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 41472 bytes, from 2016-01-22 18:18:18)
- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2015-05-20 20:03:14)
Copyright (C) 2013-2015 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
+- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
index 28df397c3b04..e233acf52334 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
@@ -147,13 +147,6 @@ static void mdp4_crtc_destroy(struct drm_crtc *crtc)
kfree(mdp4_crtc);
}
-static bool mdp4_crtc_mode_fixup(struct drm_crtc *crtc,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- return true;
-}
-
/* statically (for now) map planes to mixer stage (z-order): */
static const int idxs[] = {
[VG1] = 1,
@@ -361,13 +354,6 @@ static void mdp4_crtc_atomic_flush(struct drm_crtc *crtc,
request_pending(crtc, PENDING_FLIP);
}
-static int mdp4_crtc_set_property(struct drm_crtc *crtc,
- struct drm_property *property, uint64_t val)
-{
- // XXX
- return -EINVAL;
-}
-
#define CURSOR_WIDTH 64
#define CURSOR_HEIGHT 64
@@ -499,7 +485,7 @@ static const struct drm_crtc_funcs mdp4_crtc_funcs = {
.set_config = drm_atomic_helper_set_config,
.destroy = mdp4_crtc_destroy,
.page_flip = drm_atomic_helper_page_flip,
- .set_property = mdp4_crtc_set_property,
+ .set_property = drm_atomic_helper_crtc_set_property,
.cursor_set = mdp4_crtc_cursor_set,
.cursor_move = mdp4_crtc_cursor_move,
.reset = drm_atomic_helper_crtc_reset,
@@ -508,7 +494,6 @@ static const struct drm_crtc_funcs mdp4_crtc_funcs = {
};
static const struct drm_crtc_helper_funcs mdp4_crtc_helper_funcs = {
- .mode_fixup = mdp4_crtc_mode_fixup,
.mode_set_nofb = mdp4_crtc_mode_set_nofb,
.disable = mdp4_crtc_disable,
.enable = mdp4_crtc_enable,
@@ -575,13 +560,6 @@ uint32_t mdp4_crtc_vblank(struct drm_crtc *crtc)
return mdp4_crtc->vblank.irqmask;
}
-void mdp4_crtc_cancel_pending_flip(struct drm_crtc *crtc, struct drm_file *file)
-{
- struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
- DBG("%s: cancel: %p", mdp4_crtc->name, file);
- complete_flip(crtc, file);
-}
-
/* set dma config, ie. the format the encoder wants. */
void mdp4_crtc_set_config(struct drm_crtc *crtc, uint32_t config)
{
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_dsi_encoder.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_dsi_encoder.c
index 2f57e9453b67..106f0e772595 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_dsi_encoder.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_dsi_encoder.c
@@ -47,13 +47,6 @@ static const struct drm_encoder_funcs mdp4_dsi_encoder_funcs = {
.destroy = mdp4_dsi_encoder_destroy,
};
-static bool mdp4_dsi_encoder_mode_fixup(struct drm_encoder *encoder,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- return true;
-}
-
static void mdp4_dsi_encoder_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
@@ -163,7 +156,6 @@ static void mdp4_dsi_encoder_enable(struct drm_encoder *encoder)
}
static const struct drm_encoder_helper_funcs mdp4_dsi_encoder_helper_funcs = {
- .mode_fixup = mdp4_dsi_encoder_mode_fixup,
.mode_set = mdp4_dsi_encoder_mode_set,
.disable = mdp4_dsi_encoder_disable,
.enable = mdp4_dsi_encoder_enable,
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_dtv_encoder.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_dtv_encoder.c
index a21df54cb50f..35ad78a1dc1c 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_dtv_encoder.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_dtv_encoder.c
@@ -94,13 +94,6 @@ static const struct drm_encoder_funcs mdp4_dtv_encoder_funcs = {
.destroy = mdp4_dtv_encoder_destroy,
};
-static bool mdp4_dtv_encoder_mode_fixup(struct drm_encoder *encoder,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- return true;
-}
-
static void mdp4_dtv_encoder_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
@@ -234,7 +227,6 @@ static void mdp4_dtv_encoder_enable(struct drm_encoder *encoder)
}
static const struct drm_encoder_helper_funcs mdp4_dtv_encoder_helper_funcs = {
- .mode_fixup = mdp4_dtv_encoder_mode_fixup,
.mode_set = mdp4_dtv_encoder_mode_set,
.enable = mdp4_dtv_encoder_enable,
.disable = mdp4_dtv_encoder_disable,
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
index 5a8e3d6bcbff..76e1dfb5d25e 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
@@ -179,19 +179,20 @@ static long mdp4_round_pixclk(struct msm_kms *kms, unsigned long rate,
}
}
-static void mdp4_preclose(struct msm_kms *kms, struct drm_file *file)
-{
- struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
- struct msm_drm_private *priv = mdp4_kms->dev->dev_private;
- unsigned i;
-
- for (i = 0; i < priv->num_crtcs; i++)
- mdp4_crtc_cancel_pending_flip(priv->crtcs[i], file);
-}
+static const char * const iommu_ports[] = {
+ "mdp_port0_cb0", "mdp_port1_cb0",
+};
static void mdp4_destroy(struct msm_kms *kms)
{
struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
+ struct msm_mmu *mmu = mdp4_kms->mmu;
+
+ if (mmu) {
+ mmu->funcs->detach(mmu, iommu_ports, ARRAY_SIZE(iommu_ports));
+ mmu->funcs->destroy(mmu);
+ }
+
if (mdp4_kms->blank_cursor_iova)
msm_gem_put_iova(mdp4_kms->blank_cursor_bo, mdp4_kms->id);
if (mdp4_kms->blank_cursor_bo)
@@ -213,7 +214,6 @@ static const struct mdp_kms_funcs kms_funcs = {
.wait_for_crtc_commit_done = mdp4_wait_for_crtc_commit_done,
.get_format = mdp_get_format,
.round_pixclk = mdp4_round_pixclk,
- .preclose = mdp4_preclose,
.destroy = mdp4_destroy,
},
.set_irqmask = mdp4_set_irqmask,
@@ -326,7 +326,7 @@ static int mdp4_modeset_init_intf(struct mdp4_kms *mdp4_kms,
if (priv->hdmi) {
/* Construct bridge/connector for HDMI: */
- ret = hdmi_modeset_init(priv->hdmi, dev, encoder);
+ ret = msm_hdmi_modeset_init(priv->hdmi, dev, encoder);
if (ret) {
dev_err(dev->dev, "failed to initialize HDMI: %d\n", ret);
return ret;
@@ -457,10 +457,6 @@ fail:
return ret;
}
-static const char *iommu_ports[] = {
- "mdp_port0_cb0", "mdp_port1_cb0",
-};
-
struct msm_kms *mdp4_kms_init(struct drm_device *dev)
{
struct platform_device *pdev = dev->platformdev;
@@ -565,6 +561,8 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
ARRAY_SIZE(iommu_ports));
if (ret)
goto fail;
+
+ mdp4_kms->mmu = mmu;
} else {
dev_info(dev->dev, "no iommu, fallback to phys "
"contig buffers for scanout\n");
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h
index d2c96ef431f4..b2828717be2a 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h
@@ -45,6 +45,7 @@ struct mdp4_kms {
struct clk *pclk;
struct clk *lut_clk;
struct clk *axi_clk;
+ struct msm_mmu *mmu;
struct mdp_irq error_handler;
@@ -199,7 +200,6 @@ struct drm_plane *mdp4_plane_init(struct drm_device *dev,
enum mdp4_pipe pipe_id, bool private_plane);
uint32_t mdp4_crtc_vblank(struct drm_crtc *crtc);
-void mdp4_crtc_cancel_pending_flip(struct drm_crtc *crtc, struct drm_file *file);
void mdp4_crtc_set_config(struct drm_crtc *crtc, uint32_t config);
void mdp4_crtc_set_intf(struct drm_crtc *crtc, enum mdp4_intf intf, int mixer);
void mdp4_crtc_wait_for_commit_done(struct drm_crtc *crtc);
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lcdc_encoder.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lcdc_encoder.c
index cd63fedb67cc..bc3d8e719c6c 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lcdc_encoder.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lcdc_encoder.c
@@ -260,13 +260,6 @@ static void setup_phy(struct drm_encoder *encoder)
mdp4_write(mdp4_kms, REG_MDP4_LVDS_PHY_CFG0, lvds_phy_cfg0);
}
-static bool mdp4_lcdc_encoder_mode_fixup(struct drm_encoder *encoder,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- return true;
-}
-
static void mdp4_lcdc_encoder_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
@@ -430,7 +423,6 @@ static void mdp4_lcdc_encoder_enable(struct drm_encoder *encoder)
}
static const struct drm_encoder_helper_funcs mdp4_lcdc_encoder_helper_funcs = {
- .mode_fixup = mdp4_lcdc_encoder_mode_fixup,
.mode_set = mdp4_lcdc_encoder_mode_set,
.disable = mdp4_lcdc_encoder_disable,
.enable = mdp4_lcdc_encoder_enable,
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h
index c37da9c61e29..b275ce11b24b 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h
@@ -9,7 +9,7 @@ git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2015-05-20 20:03:14)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2016-02-10 17:07:21)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-05-20 20:03:14)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2015-09-18 12:07:28)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 37194 bytes, from 2015-09-18 12:07:28)
@@ -17,11 +17,12 @@ The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2015-10-22 16:35:02)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2015-05-20 20:03:14)
- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2015-05-20 20:03:07)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 29154 bytes, from 2015-08-10 21:25:43)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 41472 bytes, from 2016-01-22 18:18:18)
- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2015-05-20 20:03:14)
Copyright (C) 2013-2015 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
+- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c
index 1aa21dba663d..69094cb28103 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c
@@ -188,13 +188,6 @@ static const struct drm_encoder_funcs mdp5_cmd_encoder_funcs = {
.destroy = mdp5_cmd_encoder_destroy,
};
-static bool mdp5_cmd_encoder_mode_fixup(struct drm_encoder *encoder,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- return true;
-}
-
static void mdp5_cmd_encoder_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
@@ -256,7 +249,6 @@ static void mdp5_cmd_encoder_enable(struct drm_encoder *encoder)
}
static const struct drm_encoder_helper_funcs mdp5_cmd_encoder_helper_funcs = {
- .mode_fixup = mdp5_cmd_encoder_mode_fixup,
.mode_set = mdp5_cmd_encoder_mode_set,
.disable = mdp5_cmd_encoder_disable,
.enable = mdp5_cmd_encoder_enable,
@@ -340,4 +332,3 @@ fail:
return ERR_PTR(ret);
}
-
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
index 20cee5ce4071..9673b9520b6a 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
@@ -185,13 +185,6 @@ static void mdp5_crtc_destroy(struct drm_crtc *crtc)
kfree(mdp5_crtc);
}
-static bool mdp5_crtc_mode_fixup(struct drm_crtc *crtc,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- return true;
-}
-
/*
* blend_setup() - blend all the planes of a CRTC
*
@@ -468,13 +461,6 @@ static void mdp5_crtc_atomic_flush(struct drm_crtc *crtc,
request_pending(crtc, PENDING_FLIP);
}
-static int mdp5_crtc_set_property(struct drm_crtc *crtc,
- struct drm_property *property, uint64_t val)
-{
- // XXX
- return -EINVAL;
-}
-
static void get_roi(struct drm_crtc *crtc, uint32_t *roi_w, uint32_t *roi_h)
{
struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
@@ -625,7 +611,7 @@ static const struct drm_crtc_funcs mdp5_crtc_funcs = {
.set_config = drm_atomic_helper_set_config,
.destroy = mdp5_crtc_destroy,
.page_flip = drm_atomic_helper_page_flip,
- .set_property = mdp5_crtc_set_property,
+ .set_property = drm_atomic_helper_crtc_set_property,
.reset = drm_atomic_helper_crtc_reset,
.atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
@@ -634,7 +620,6 @@ static const struct drm_crtc_funcs mdp5_crtc_funcs = {
};
static const struct drm_crtc_helper_funcs mdp5_crtc_helper_funcs = {
- .mode_fixup = mdp5_crtc_mode_fixup,
.mode_set_nofb = mdp5_crtc_mode_set_nofb,
.disable = mdp5_crtc_disable,
.enable = mdp5_crtc_enable,
@@ -721,12 +706,6 @@ uint32_t mdp5_crtc_vblank(struct drm_crtc *crtc)
return mdp5_crtc->vblank.irqmask;
}
-void mdp5_crtc_cancel_pending_flip(struct drm_crtc *crtc, struct drm_file *file)
-{
- DBG("cancel: %p", file);
- complete_flip(crtc, file);
-}
-
void mdp5_crtc_set_pipeline(struct drm_crtc *crtc,
struct mdp5_interface *intf, struct mdp5_ctl *ctl)
{
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c
index 0d737cad03a6..1d95f9fd9dc7 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c
@@ -112,13 +112,6 @@ static const struct drm_encoder_funcs mdp5_encoder_funcs = {
.destroy = mdp5_encoder_destroy,
};
-static bool mdp5_encoder_mode_fixup(struct drm_encoder *encoder,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- return true;
-}
-
static void mdp5_encoder_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
@@ -287,7 +280,6 @@ static void mdp5_encoder_enable(struct drm_encoder *encoder)
}
static const struct drm_encoder_helper_funcs mdp5_encoder_helper_funcs = {
- .mode_fixup = mdp5_encoder_mode_fixup,
.mode_set = mdp5_encoder_mode_set,
.disable = mdp5_encoder_disable,
.enable = mdp5_encoder_enable,
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
index e115318402bd..484b4d15e71d 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
@@ -117,16 +117,6 @@ static int mdp5_set_split_display(struct msm_kms *kms,
return mdp5_encoder_set_split_display(encoder, slave_encoder);
}
-static void mdp5_preclose(struct msm_kms *kms, struct drm_file *file)
-{
- struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
- struct msm_drm_private *priv = mdp5_kms->dev->dev_private;
- unsigned i;
-
- for (i = 0; i < priv->num_crtcs; i++)
- mdp5_crtc_cancel_pending_flip(priv->crtcs[i], file);
-}
-
static void mdp5_destroy(struct msm_kms *kms)
{
struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
@@ -164,7 +154,6 @@ static const struct mdp_kms_funcs kms_funcs = {
.get_format = mdp_get_format,
.round_pixclk = mdp5_round_pixclk,
.set_split_display = mdp5_set_split_display,
- .preclose = mdp5_preclose,
.destroy = mdp5_destroy,
},
.set_irqmask = mdp5_set_irqmask,
@@ -295,7 +284,7 @@ static int modeset_init_intf(struct mdp5_kms *mdp5_kms, int intf_num)
break;
}
- ret = hdmi_modeset_init(priv->hdmi, dev, encoder);
+ ret = msm_hdmi_modeset_init(priv->hdmi, dev, encoder);
break;
case INTF_DSI:
{
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
index 00730ba08a60..9a25898239d3 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
@@ -211,7 +211,6 @@ struct drm_plane *mdp5_plane_init(struct drm_device *dev,
uint32_t mdp5_crtc_vblank(struct drm_crtc *crtc);
int mdp5_crtc_get_lm(struct drm_crtc *crtc);
-void mdp5_crtc_cancel_pending_flip(struct drm_crtc *crtc, struct drm_file *file);
void mdp5_crtc_set_pipeline(struct drm_crtc *crtc,
struct mdp5_interface *intf, struct mdp5_ctl *ctl);
void mdp5_crtc_wait_for_commit_done(struct drm_crtc *crtc);
diff --git a/drivers/gpu/drm/msm/mdp/mdp_common.xml.h b/drivers/gpu/drm/msm/mdp/mdp_common.xml.h
index 0aec1ac1f6d0..452e3518f98b 100644
--- a/drivers/gpu/drm/msm/mdp/mdp_common.xml.h
+++ b/drivers/gpu/drm/msm/mdp/mdp_common.xml.h
@@ -9,7 +9,7 @@ git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2015-05-20 20:03:14)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2016-02-10 17:07:21)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-05-20 20:03:14)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2015-09-18 12:07:28)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 37194 bytes, from 2015-09-18 12:07:28)
@@ -17,11 +17,12 @@ The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2015-10-22 16:35:02)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2015-05-20 20:03:14)
- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2015-05-20 20:03:07)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 29154 bytes, from 2015-08-10 21:25:43)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 41472 bytes, from 2016-01-22 18:18:18)
- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2015-05-20 20:03:14)
Copyright (C) 2013-2015 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
+- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index 9a30807b900b..c03b96709179 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -61,7 +61,7 @@ module_param(fbdev, bool, 0600);
#endif
static char *vram = "16m";
-MODULE_PARM_DESC(vram, "Configure VRAM size (for devices without IOMMU/GPUMMU");
+MODULE_PARM_DESC(vram, "Configure VRAM size (for devices without IOMMU/GPUMMU)");
module_param(vram, charp, 0);
/*
@@ -196,6 +196,11 @@ static int msm_unload(struct drm_device *dev)
}
drm_kms_helper_poll_fini(dev);
+
+#ifdef CONFIG_DRM_FBDEV_EMULATION
+ if (fbdev && priv->fbdev)
+ msm_fbdev_free(dev);
+#endif
drm_mode_config_cleanup(dev);
drm_vblank_cleanup(dev);
@@ -462,9 +467,6 @@ static void msm_preclose(struct drm_device *dev, struct drm_file *file)
struct msm_file_private *ctx = file->driver_priv;
struct msm_kms *kms = priv->kms;
- if (kms)
- kms->funcs->preclose(kms, file);
-
mutex_lock(&dev->struct_mutex);
if (ctx == priv->lastctx)
priv->lastctx = NULL;
@@ -1116,7 +1118,7 @@ static int __init msm_drm_register(void)
DBG("init");
msm_dsi_register();
msm_edp_register();
- hdmi_register();
+ msm_hdmi_register();
adreno_register();
return platform_driver_register(&msm_platform_driver);
}
@@ -1125,7 +1127,7 @@ static void __exit msm_drm_unregister(void)
{
DBG("fini");
platform_driver_unregister(&msm_platform_driver);
- hdmi_unregister();
+ msm_hdmi_unregister();
adreno_unregister();
msm_edp_unregister();
msm_dsi_unregister();
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index c1e7bba2fdb7..870dbe58c259 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -240,12 +240,13 @@ struct drm_framebuffer *msm_framebuffer_create(struct drm_device *dev,
struct drm_file *file, const struct drm_mode_fb_cmd2 *mode_cmd);
struct drm_fb_helper *msm_fbdev_init(struct drm_device *dev);
+void msm_fbdev_free(struct drm_device *dev);
struct hdmi;
-int hdmi_modeset_init(struct hdmi *hdmi, struct drm_device *dev,
+int msm_hdmi_modeset_init(struct hdmi *hdmi, struct drm_device *dev,
struct drm_encoder *encoder);
-void __init hdmi_register(void);
-void __exit hdmi_unregister(void);
+void __init msm_hdmi_register(void);
+void __exit msm_hdmi_unregister(void);
struct msm_edp;
void __init msm_edp_register(void);
diff --git a/drivers/gpu/drm/msm/msm_fbdev.c b/drivers/gpu/drm/msm/msm_fbdev.c
index d95af6eba602..d9759bf3482e 100644
--- a/drivers/gpu/drm/msm/msm_fbdev.c
+++ b/drivers/gpu/drm/msm/msm_fbdev.c
@@ -62,12 +62,8 @@ static int msm_fbdev_mmap(struct fb_info *info, struct vm_area_struct *vma)
struct drm_fb_helper *helper = (struct drm_fb_helper *)info->par;
struct msm_fbdev *fbdev = to_msm_fbdev(helper);
struct drm_gem_object *drm_obj = fbdev->bo;
- struct drm_device *dev = helper->dev;
int ret = 0;
- if (drm_device_is_unplugged(dev))
- return -ENODEV;
-
ret = drm_gem_mmap_obj(drm_obj, drm_obj->size, vma);
if (ret) {
pr_err("%s:drm_gem_mmap_obj fail\n", __func__);
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
index 6d7cd3fe21e7..43d2181231c0 100644
--- a/drivers/gpu/drm/msm/msm_gem_submit.c
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -323,28 +323,27 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
struct drm_msm_gem_submit *args = data;
struct msm_file_private *ctx = file->driver_priv;
struct msm_gem_submit *submit;
- struct msm_gpu *gpu;
+ struct msm_gpu *gpu = priv->gpu;
unsigned i;
int ret;
+ if (!gpu)
+ return -ENXIO;
+
/* for now, we just have 3d pipe.. eventually this would need to
* be more clever to dispatch to appropriate gpu module:
*/
if (args->pipe != MSM_PIPE_3D0)
return -EINVAL;
- gpu = priv->gpu;
-
if (args->nr_cmds > MAX_CMDS)
return -EINVAL;
- mutex_lock(&dev->struct_mutex);
-
submit = submit_create(dev, gpu, args->nr_bos);
- if (!submit) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!submit)
+ return -ENOMEM;
+
+ mutex_lock(&dev->struct_mutex);
ret = submit_lookup_objects(submit, args, file);
if (ret)
@@ -419,8 +418,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
args->fence = submit->fence;
out:
- if (submit)
- submit_cleanup(submit, !!ret);
+ submit_cleanup(submit, !!ret);
mutex_unlock(&dev->struct_mutex);
return ret;
}
diff --git a/drivers/gpu/drm/msm/msm_iommu.c b/drivers/gpu/drm/msm/msm_iommu.c
index 7ac2f1997e4a..a7a0b6d9b057 100644
--- a/drivers/gpu/drm/msm/msm_iommu.c
+++ b/drivers/gpu/drm/msm/msm_iommu.c
@@ -31,13 +31,15 @@ static int msm_fault_handler(struct iommu_domain *iommu, struct device *dev,
return 0;
}
-static int msm_iommu_attach(struct msm_mmu *mmu, const char **names, int cnt)
+static int msm_iommu_attach(struct msm_mmu *mmu, const char * const *names,
+ int cnt)
{
struct msm_iommu *iommu = to_msm_iommu(mmu);
return iommu_attach_device(iommu->domain, mmu->dev);
}
-static void msm_iommu_detach(struct msm_mmu *mmu, const char **names, int cnt)
+static void msm_iommu_detach(struct msm_mmu *mmu, const char * const *names,
+ int cnt)
{
struct msm_iommu *iommu = to_msm_iommu(mmu);
iommu_detach_device(iommu->domain, mmu->dev);
diff --git a/drivers/gpu/drm/msm/msm_kms.h b/drivers/gpu/drm/msm/msm_kms.h
index 9bcabaada179..e32222c3d44f 100644
--- a/drivers/gpu/drm/msm/msm_kms.h
+++ b/drivers/gpu/drm/msm/msm_kms.h
@@ -55,7 +55,6 @@ struct msm_kms_funcs {
struct drm_encoder *slave_encoder,
bool is_cmd_mode);
/* cleanup: */
- void (*preclose)(struct msm_kms *kms, struct drm_file *file);
void (*destroy)(struct msm_kms *kms);
};
diff --git a/drivers/gpu/drm/msm/msm_mmu.h b/drivers/gpu/drm/msm/msm_mmu.h
index 7cd88d9dc155..b8ca9a0e9170 100644
--- a/drivers/gpu/drm/msm/msm_mmu.h
+++ b/drivers/gpu/drm/msm/msm_mmu.h
@@ -21,8 +21,8 @@
#include <linux/iommu.h>
struct msm_mmu_funcs {
- int (*attach)(struct msm_mmu *mmu, const char **names, int cnt);
- void (*detach)(struct msm_mmu *mmu, const char **names, int cnt);
+ int (*attach)(struct msm_mmu *mmu, const char * const *names, int cnt);
+ void (*detach)(struct msm_mmu *mmu, const char * const *names, int cnt);
int (*map)(struct msm_mmu *mmu, uint32_t iova, struct sg_table *sgt,
unsigned len, int prot);
int (*unmap)(struct msm_mmu *mmu, uint32_t iova, struct sg_table *sgt,
diff --git a/drivers/gpu/drm/nouveau/dispnv04/crtc.c b/drivers/gpu/drm/nouveau/dispnv04/crtc.c
index 6f04397d43a7..55ccbf006b5e 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/crtc.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/crtc.c
@@ -227,13 +227,6 @@ nv_crtc_dpms(struct drm_crtc *crtc, int mode)
NVWriteVgaCrtc(dev, nv_crtc->index, NV_CIO_CRE_RPC1_INDEX, crtc1A);
}
-static bool
-nv_crtc_mode_fixup(struct drm_crtc *crtc, const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- return true;
-}
-
static void
nv_crtc_mode_set_vga(struct drm_crtc *crtc, struct drm_display_mode *mode)
{
@@ -1093,7 +1086,6 @@ static const struct drm_crtc_helper_funcs nv04_crtc_helper_funcs = {
.dpms = nv_crtc_dpms,
.prepare = nv_crtc_prepare,
.commit = nv_crtc_commit,
- .mode_fixup = nv_crtc_mode_fixup,
.mode_set = nv_crtc_mode_set,
.mode_set_base = nv04_crtc_mode_set_base,
.mode_set_base_atomic = nv04_crtc_mode_set_base_atomic,
diff --git a/drivers/gpu/drm/nouveau/include/nvif/cla06f.h b/drivers/gpu/drm/nouveau/include/nvif/cla06f.h
index 85b7827eb782..46301ec018ce 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/cla06f.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/cla06f.h
@@ -3,19 +3,27 @@
struct kepler_channel_gpfifo_a_v0 {
__u8 version;
-#define KEPLER_CHANNEL_GPFIFO_A_V0_ENGINE_GR 0x01
-#define KEPLER_CHANNEL_GPFIFO_A_V0_ENGINE_MSPDEC 0x02
-#define KEPLER_CHANNEL_GPFIFO_A_V0_ENGINE_MSPPP 0x04
-#define KEPLER_CHANNEL_GPFIFO_A_V0_ENGINE_MSVLD 0x08
-#define KEPLER_CHANNEL_GPFIFO_A_V0_ENGINE_CE0 0x10
-#define KEPLER_CHANNEL_GPFIFO_A_V0_ENGINE_CE1 0x20
-#define KEPLER_CHANNEL_GPFIFO_A_V0_ENGINE_ENC 0x40
- __u8 engine;
+ __u8 pad01[5];
__u16 chid;
+#define NVA06F_V0_ENGINE_SW 0x00000001
+#define NVA06F_V0_ENGINE_GR 0x00000002
+#define NVA06F_V0_ENGINE_SEC 0x00000004
+#define NVA06F_V0_ENGINE_MSVLD 0x00000010
+#define NVA06F_V0_ENGINE_MSPDEC 0x00000020
+#define NVA06F_V0_ENGINE_MSPPP 0x00000040
+#define NVA06F_V0_ENGINE_MSENC 0x00000080
+#define NVA06F_V0_ENGINE_VIC 0x00000100
+#define NVA06F_V0_ENGINE_NVDEC 0x00000200
+#define NVA06F_V0_ENGINE_NVENC0 0x00000400
+#define NVA06F_V0_ENGINE_NVENC1 0x00000800
+#define NVA06F_V0_ENGINE_CE0 0x00010000
+#define NVA06F_V0_ENGINE_CE1 0x00020000
+#define NVA06F_V0_ENGINE_CE2 0x00040000
+ __u32 engines;
__u32 ilength;
__u64 ioffset;
__u64 vm;
};
-#define KEPLER_CHANNEL_GPFIFO_A_V0_NTFY_UEVENT 0x00
+#define NVA06F_V0_NTFY_UEVENT 0x00
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvif/class.h b/drivers/gpu/drm/nouveau/include/nvif/class.h
index 4179cd65ac0a..982aad8fa645 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/class.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/class.h
@@ -37,6 +37,7 @@
#define G82_CHANNEL_GPFIFO /* cl826f.h */ 0x0000826f
#define FERMI_CHANNEL_GPFIFO /* cl906f.h */ 0x0000906f
#define KEPLER_CHANNEL_GPFIFO_A /* cla06f.h */ 0x0000a06f
+#define KEPLER_CHANNEL_GPFIFO_B /* cla06f.h */ 0x0000a16f
#define MAXWELL_CHANNEL_GPFIFO_A /* cla06f.h */ 0x0000b06f
#define NV50_DISP /* cl5070.h */ 0x00005070
@@ -48,7 +49,7 @@
#define GK104_DISP /* cl5070.h */ 0x00009170
#define GK110_DISP /* cl5070.h */ 0x00009270
#define GM107_DISP /* cl5070.h */ 0x00009470
-#define GM204_DISP /* cl5070.h */ 0x00009570
+#define GM200_DISP /* cl5070.h */ 0x00009570
#define NV31_MPEG 0x00003174
#define G82_MPEG 0x00008274
@@ -84,7 +85,7 @@
#define GK104_DISP_CORE_CHANNEL_DMA /* cl507d.h */ 0x0000917d
#define GK110_DISP_CORE_CHANNEL_DMA /* cl507d.h */ 0x0000927d
#define GM107_DISP_CORE_CHANNEL_DMA /* cl507d.h */ 0x0000947d
-#define GM204_DISP_CORE_CHANNEL_DMA /* cl507d.h */ 0x0000957d
+#define GM200_DISP_CORE_CHANNEL_DMA /* cl507d.h */ 0x0000957d
#define NV50_DISP_OVERLAY_CHANNEL_DMA /* cl507e.h */ 0x0000507e
#define G82_DISP_OVERLAY_CHANNEL_DMA /* cl507e.h */ 0x0000827e
diff --git a/drivers/gpu/drm/nouveau/include/nvif/device.h b/drivers/gpu/drm/nouveau/include/nvif/device.h
index e0ed2f4b2f43..bcb981711617 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/device.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/device.h
@@ -62,6 +62,7 @@ u64 nvif_device_time(struct nvif_device *);
#define nvxx_gpio(a) nvxx_device(a)->gpio
#define nvxx_clk(a) nvxx_device(a)->clk
#define nvxx_i2c(a) nvxx_device(a)->i2c
+#define nvxx_iccsense(a) nvxx_device(a)->iccsense
#define nvxx_therm(a) nvxx_device(a)->therm
#define nvxx_volt(a) nvxx_device(a)->volt
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/device.h b/drivers/gpu/drm/nouveau/include/nvkm/core/device.h
index 913192c94876..4993a863adb9 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/device.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/device.h
@@ -22,30 +22,41 @@ enum nvkm_devidx {
NVKM_SUBDEV_BAR,
NVKM_SUBDEV_PMU,
NVKM_SUBDEV_VOLT,
+ NVKM_SUBDEV_ICCSENSE,
NVKM_SUBDEV_THERM,
NVKM_SUBDEV_CLK,
+ NVKM_SUBDEV_SECBOOT,
- NVKM_ENGINE_DMAOBJ,
- NVKM_ENGINE_IFB,
- NVKM_ENGINE_FIFO,
- NVKM_ENGINE_SW,
- NVKM_ENGINE_GR,
- NVKM_ENGINE_MPEG,
- NVKM_ENGINE_ME,
- NVKM_ENGINE_VP,
- NVKM_ENGINE_CIPHER,
NVKM_ENGINE_BSP,
- NVKM_ENGINE_MSPPP,
+
NVKM_ENGINE_CE0,
NVKM_ENGINE_CE1,
NVKM_ENGINE_CE2,
- NVKM_ENGINE_VIC,
- NVKM_ENGINE_MSENC,
+ NVKM_ENGINE_CE_LAST = NVKM_ENGINE_CE2,
+
+ NVKM_ENGINE_CIPHER,
NVKM_ENGINE_DISP,
- NVKM_ENGINE_PM,
+ NVKM_ENGINE_DMAOBJ,
+ NVKM_ENGINE_FIFO,
+ NVKM_ENGINE_GR,
+ NVKM_ENGINE_IFB,
+ NVKM_ENGINE_ME,
+ NVKM_ENGINE_MPEG,
+ NVKM_ENGINE_MSENC,
+ NVKM_ENGINE_MSPDEC,
+ NVKM_ENGINE_MSPPP,
NVKM_ENGINE_MSVLD,
+
+ NVKM_ENGINE_NVENC0,
+ NVKM_ENGINE_NVENC1,
+ NVKM_ENGINE_NVENC_LAST = NVKM_ENGINE_NVENC1,
+
+ NVKM_ENGINE_NVDEC,
+ NVKM_ENGINE_PM,
NVKM_ENGINE_SEC,
- NVKM_ENGINE_MSPDEC,
+ NVKM_ENGINE_SW,
+ NVKM_ENGINE_VIC,
+ NVKM_ENGINE_VP,
NVKM_SUBDEV_NR
};
@@ -109,6 +120,7 @@ struct nvkm_device {
struct nvkm_gpio *gpio;
struct nvkm_i2c *i2c;
struct nvkm_subdev *ibus;
+ struct nvkm_iccsense *iccsense;
struct nvkm_instmem *imem;
struct nvkm_ltc *ltc;
struct nvkm_mc *mc;
@@ -116,6 +128,7 @@ struct nvkm_device {
struct nvkm_subdev *mxm;
struct nvkm_pci *pci;
struct nvkm_pmu *pmu;
+ struct nvkm_secboot *secboot;
struct nvkm_therm *therm;
struct nvkm_timer *timer;
struct nvkm_volt *volt;
@@ -134,6 +147,8 @@ struct nvkm_device {
struct nvkm_engine *mspdec;
struct nvkm_engine *msppp;
struct nvkm_engine *msvld;
+ struct nvkm_engine *nvenc[2];
+ struct nvkm_engine *nvdec;
struct nvkm_pm *pm;
struct nvkm_engine *sec;
struct nvkm_sw *sw;
@@ -164,46 +179,50 @@ struct nvkm_device_quirk {
struct nvkm_device_chip {
const char *name;
- int (*bar )(struct nvkm_device *, int idx, struct nvkm_bar **);
- int (*bios )(struct nvkm_device *, int idx, struct nvkm_bios **);
- int (*bus )(struct nvkm_device *, int idx, struct nvkm_bus **);
- int (*clk )(struct nvkm_device *, int idx, struct nvkm_clk **);
- int (*devinit)(struct nvkm_device *, int idx, struct nvkm_devinit **);
- int (*fb )(struct nvkm_device *, int idx, struct nvkm_fb **);
- int (*fuse )(struct nvkm_device *, int idx, struct nvkm_fuse **);
- int (*gpio )(struct nvkm_device *, int idx, struct nvkm_gpio **);
- int (*i2c )(struct nvkm_device *, int idx, struct nvkm_i2c **);
- int (*ibus )(struct nvkm_device *, int idx, struct nvkm_subdev **);
- int (*imem )(struct nvkm_device *, int idx, struct nvkm_instmem **);
- int (*ltc )(struct nvkm_device *, int idx, struct nvkm_ltc **);
- int (*mc )(struct nvkm_device *, int idx, struct nvkm_mc **);
- int (*mmu )(struct nvkm_device *, int idx, struct nvkm_mmu **);
- int (*mxm )(struct nvkm_device *, int idx, struct nvkm_subdev **);
- int (*pci )(struct nvkm_device *, int idx, struct nvkm_pci **);
- int (*pmu )(struct nvkm_device *, int idx, struct nvkm_pmu **);
- int (*therm )(struct nvkm_device *, int idx, struct nvkm_therm **);
- int (*timer )(struct nvkm_device *, int idx, struct nvkm_timer **);
- int (*volt )(struct nvkm_device *, int idx, struct nvkm_volt **);
-
- int (*bsp )(struct nvkm_device *, int idx, struct nvkm_engine **);
- int (*ce[3] )(struct nvkm_device *, int idx, struct nvkm_engine **);
- int (*cipher )(struct nvkm_device *, int idx, struct nvkm_engine **);
- int (*disp )(struct nvkm_device *, int idx, struct nvkm_disp **);
- int (*dma )(struct nvkm_device *, int idx, struct nvkm_dma **);
- int (*fifo )(struct nvkm_device *, int idx, struct nvkm_fifo **);
- int (*gr )(struct nvkm_device *, int idx, struct nvkm_gr **);
- int (*ifb )(struct nvkm_device *, int idx, struct nvkm_engine **);
- int (*me )(struct nvkm_device *, int idx, struct nvkm_engine **);
- int (*mpeg )(struct nvkm_device *, int idx, struct nvkm_engine **);
- int (*msenc )(struct nvkm_device *, int idx, struct nvkm_engine **);
- int (*mspdec )(struct nvkm_device *, int idx, struct nvkm_engine **);
- int (*msppp )(struct nvkm_device *, int idx, struct nvkm_engine **);
- int (*msvld )(struct nvkm_device *, int idx, struct nvkm_engine **);
- int (*pm )(struct nvkm_device *, int idx, struct nvkm_pm **);
- int (*sec )(struct nvkm_device *, int idx, struct nvkm_engine **);
- int (*sw )(struct nvkm_device *, int idx, struct nvkm_sw **);
- int (*vic )(struct nvkm_device *, int idx, struct nvkm_engine **);
- int (*vp )(struct nvkm_device *, int idx, struct nvkm_engine **);
+ int (*bar )(struct nvkm_device *, int idx, struct nvkm_bar **);
+ int (*bios )(struct nvkm_device *, int idx, struct nvkm_bios **);
+ int (*bus )(struct nvkm_device *, int idx, struct nvkm_bus **);
+ int (*clk )(struct nvkm_device *, int idx, struct nvkm_clk **);
+ int (*devinit )(struct nvkm_device *, int idx, struct nvkm_devinit **);
+ int (*fb )(struct nvkm_device *, int idx, struct nvkm_fb **);
+ int (*fuse )(struct nvkm_device *, int idx, struct nvkm_fuse **);
+ int (*gpio )(struct nvkm_device *, int idx, struct nvkm_gpio **);
+ int (*i2c )(struct nvkm_device *, int idx, struct nvkm_i2c **);
+ int (*ibus )(struct nvkm_device *, int idx, struct nvkm_subdev **);
+ int (*iccsense)(struct nvkm_device *, int idx, struct nvkm_iccsense **);
+ int (*imem )(struct nvkm_device *, int idx, struct nvkm_instmem **);
+ int (*ltc )(struct nvkm_device *, int idx, struct nvkm_ltc **);
+ int (*mc )(struct nvkm_device *, int idx, struct nvkm_mc **);
+ int (*mmu )(struct nvkm_device *, int idx, struct nvkm_mmu **);
+ int (*mxm )(struct nvkm_device *, int idx, struct nvkm_subdev **);
+ int (*pci )(struct nvkm_device *, int idx, struct nvkm_pci **);
+ int (*pmu )(struct nvkm_device *, int idx, struct nvkm_pmu **);
+ int (*secboot )(struct nvkm_device *, int idx, struct nvkm_secboot **);
+ int (*therm )(struct nvkm_device *, int idx, struct nvkm_therm **);
+ int (*timer )(struct nvkm_device *, int idx, struct nvkm_timer **);
+ int (*volt )(struct nvkm_device *, int idx, struct nvkm_volt **);
+
+ int (*bsp )(struct nvkm_device *, int idx, struct nvkm_engine **);
+ int (*ce[3] )(struct nvkm_device *, int idx, struct nvkm_engine **);
+ int (*cipher )(struct nvkm_device *, int idx, struct nvkm_engine **);
+ int (*disp )(struct nvkm_device *, int idx, struct nvkm_disp **);
+ int (*dma )(struct nvkm_device *, int idx, struct nvkm_dma **);
+ int (*fifo )(struct nvkm_device *, int idx, struct nvkm_fifo **);
+ int (*gr )(struct nvkm_device *, int idx, struct nvkm_gr **);
+ int (*ifb )(struct nvkm_device *, int idx, struct nvkm_engine **);
+ int (*me )(struct nvkm_device *, int idx, struct nvkm_engine **);
+ int (*mpeg )(struct nvkm_device *, int idx, struct nvkm_engine **);
+ int (*msenc )(struct nvkm_device *, int idx, struct nvkm_engine **);
+ int (*mspdec )(struct nvkm_device *, int idx, struct nvkm_engine **);
+ int (*msppp )(struct nvkm_device *, int idx, struct nvkm_engine **);
+ int (*msvld )(struct nvkm_device *, int idx, struct nvkm_engine **);
+ int (*nvenc[2])(struct nvkm_device *, int idx, struct nvkm_engine **);
+ int (*nvdec )(struct nvkm_device *, int idx, struct nvkm_engine **);
+ int (*pm )(struct nvkm_device *, int idx, struct nvkm_pm **);
+ int (*sec )(struct nvkm_device *, int idx, struct nvkm_engine **);
+ int (*sw )(struct nvkm_device *, int idx, struct nvkm_sw **);
+ int (*vic )(struct nvkm_device *, int idx, struct nvkm_engine **);
+ int (*vp )(struct nvkm_device *, int idx, struct nvkm_engine **);
};
struct nvkm_device *nvkm_device_find(u64 name);
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/firmware.h b/drivers/gpu/drm/nouveau/include/nvkm/core/firmware.h
new file mode 100644
index 000000000000..a626ce378f04
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/firmware.h
@@ -0,0 +1,11 @@
+#ifndef __NVKM_FIRMWARE_H__
+#define __NVKM_FIRMWARE_H__
+
+#include <core/device.h>
+
+int nvkm_firmware_get(struct nvkm_device *device, const char *fwname,
+ const struct firmware **fw);
+
+void nvkm_firmware_put(const struct firmware *fw);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/gpuobj.h b/drivers/gpu/drm/nouveau/include/nvkm/core/gpuobj.h
index d4f56eafb073..c23da4f05929 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/gpuobj.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/gpuobj.h
@@ -37,4 +37,8 @@ int nvkm_gpuobj_wrap(struct nvkm_memory *, struct nvkm_gpuobj **);
int nvkm_gpuobj_map(struct nvkm_gpuobj *, struct nvkm_vm *, u32 access,
struct nvkm_vma *);
void nvkm_gpuobj_unmap(struct nvkm_vma *);
+void nvkm_gpuobj_memcpy_to(struct nvkm_gpuobj *dst, u32 dstoffset, void *src,
+ u32 length);
+void nvkm_gpuobj_memcpy_from(void *dst, struct nvkm_gpuobj *src, u32 srcoffset,
+ u32 length);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/tegra.h b/drivers/gpu/drm/nouveau/include/nvkm/core/tegra.h
index 16641cec18a2..b5370cb56e3c 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/tegra.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/tegra.h
@@ -11,6 +11,7 @@ struct nvkm_device_tegra {
struct reset_control *rst;
struct clk *clk;
+ struct clk *clk_ref;
struct clk *clk_pwr;
struct regulator *vdd;
@@ -36,6 +37,10 @@ struct nvkm_device_tegra_func {
* bypassed). A value of 0 means an IOMMU is never used.
*/
u8 iommu_bit;
+ /*
+ * Whether the chip requires a reference clock
+ */
+ bool require_ref_clk;
};
int nvkm_device_tegra_new(const struct nvkm_device_tegra_func *,
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/ce.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/ce.h
index e2e22cd5305b..594d719ba41e 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/ce.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/ce.h
@@ -5,5 +5,6 @@
int gt215_ce_new(struct nvkm_device *, int, struct nvkm_engine **);
int gf100_ce_new(struct nvkm_device *, int, struct nvkm_engine **);
int gk104_ce_new(struct nvkm_device *, int, struct nvkm_engine **);
-int gm204_ce_new(struct nvkm_device *, int, struct nvkm_engine **);
+int gm107_ce_new(struct nvkm_device *, int, struct nvkm_engine **);
+int gm200_ce_new(struct nvkm_device *, int, struct nvkm_engine **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h
index efc74d03346b..d4fdce27b297 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h
@@ -31,5 +31,5 @@ int gf119_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
int gk104_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
int gk110_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
int gm107_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
-int gm204_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
+int gm200_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h
index 9e6644955d19..15ddfcf5e8db 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h
@@ -60,8 +60,10 @@ int nv50_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
int g84_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
int gf100_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
int gk104_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
+int gk110_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
int gk208_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
int gk20a_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
-int gm204_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
+int gm107_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
+int gm200_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
int gm20b_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h
index f126e54d2e30..6515f5810a26 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h
@@ -40,7 +40,6 @@ int gk110b_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
int gk208_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
int gk20a_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
int gm107_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
-int gm204_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
-int gm206_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
+int gm200_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
int gm20b_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/msenc.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/msenc.h
new file mode 100644
index 000000000000..748ea9b7e559
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/msenc.h
@@ -0,0 +1,4 @@
+#ifndef __NVKM_MSENC_H__
+#define __NVKM_MSENC_H__
+#include <core/engine.h>
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/nvdec.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/nvdec.h
new file mode 100644
index 000000000000..30b76d13fdcb
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/nvdec.h
@@ -0,0 +1,4 @@
+#ifndef __NVKM_NVDEC_H__
+#define __NVKM_NVDEC_H__
+#include <core/engine.h>
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/nvenc.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/nvenc.h
new file mode 100644
index 000000000000..8a819328059b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/nvenc.h
@@ -0,0 +1,4 @@
+#ifndef __NVKM_NVENC_H__
+#define __NVKM_NVENC_H__
+#include <core/engine.h>
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/vic.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/vic.h
new file mode 100644
index 000000000000..2b0dc4c695c2
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/vic.h
@@ -0,0 +1,4 @@
+#ifndef __NVKM_VIC_H__
+#define __NVKM_VIC_H__
+#include <core/engine.h>
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/extdev.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/extdev.h
index 6d3bedc633b3..bb49bd5f879e 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/extdev.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/extdev.h
@@ -5,6 +5,9 @@ enum nvbios_extdev_type {
NVBIOS_EXTDEV_VT1103M = 0x40,
NVBIOS_EXTDEV_PX3540 = 0x41,
NVBIOS_EXTDEV_VT1105M = 0x42, /* or close enough... */
+ NVBIOS_EXTDEV_INA219 = 0x4c,
+ NVBIOS_EXTDEV_INA209 = 0x4d,
+ NVBIOS_EXTDEV_INA3221 = 0x4e,
NVBIOS_EXTDEV_ADT7473 = 0x70, /* can also be a LM64 */
NVBIOS_EXTDEV_HDCP_EEPROM = 0x90,
NVBIOS_EXTDEV_NONE = 0xff,
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/iccsense.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/iccsense.h
new file mode 100644
index 000000000000..9cb97477248b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/iccsense.h
@@ -0,0 +1,16 @@
+#ifndef __NVBIOS_ICCSENSE_H__
+#define __NVBIOS_ICCSENSE_H__
+struct pwr_rail_t {
+ u8 mode;
+ u8 extdev_id;
+ u8 resistor_mohm;
+ u8 rail;
+};
+
+struct nvbios_iccsense {
+ int nr_entry;
+ struct pwr_rail_t *rail;
+};
+
+int nvbios_iccsense_parse(struct nvkm_bios *, struct nvbios_iccsense *);
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/clk.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/clk.h
index 6b33bc058924..fb54417bc458 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/clk.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/clk.h
@@ -121,4 +121,5 @@ int gt215_clk_new(struct nvkm_device *, int, struct nvkm_clk **);
int gf100_clk_new(struct nvkm_device *, int, struct nvkm_clk **);
int gk104_clk_new(struct nvkm_device *, int, struct nvkm_clk **);
int gk20a_clk_new(struct nvkm_device *, int, struct nvkm_clk **);
+int gm20b_clk_new(struct nvkm_device *, int, struct nvkm_clk **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/devinit.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/devinit.h
index 6c1407fd317b..193626c69517 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/devinit.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/devinit.h
@@ -27,5 +27,5 @@ int gt215_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **);
int mcp89_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **);
int gf100_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **);
int gm107_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **);
-int gm204_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **);
+int gm200_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/i2c.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/i2c.h
index 6b6224dbd5bb..a63c5ac69f66 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/i2c.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/i2c.h
@@ -89,7 +89,7 @@ int g94_i2c_new(struct nvkm_device *, int, struct nvkm_i2c **);
int gf117_i2c_new(struct nvkm_device *, int, struct nvkm_i2c **);
int gf119_i2c_new(struct nvkm_device *, int, struct nvkm_i2c **);
int gk104_i2c_new(struct nvkm_device *, int, struct nvkm_i2c **);
-int gm204_i2c_new(struct nvkm_device *, int, struct nvkm_i2c **);
+int gm200_i2c_new(struct nvkm_device *, int, struct nvkm_i2c **);
static inline int
nvkm_rdi2cr(struct i2c_adapter *adap, u8 addr, u8 reg)
@@ -108,6 +108,22 @@ nvkm_rdi2cr(struct i2c_adapter *adap, u8 addr, u8 reg)
}
static inline int
+nv_rd16i2cr(struct i2c_adapter *adap, u8 addr, u8 reg)
+{
+ u8 val[2];
+ struct i2c_msg msgs[] = {
+ { .addr = addr, .flags = 0, .len = 1, .buf = &reg },
+ { .addr = addr, .flags = I2C_M_RD, .len = 2, .buf = val },
+ };
+
+ int ret = i2c_transfer(adap, msgs, ARRAY_SIZE(msgs));
+ if (ret != 2)
+ return -EIO;
+
+ return val[0] << 8 | val[1];
+}
+
+static inline int
nvkm_wri2cr(struct i2c_adapter *adap, u8 addr, u8 reg, u8 val)
{
u8 buf[2] = { reg, val };
@@ -122,6 +138,21 @@ nvkm_wri2cr(struct i2c_adapter *adap, u8 addr, u8 reg, u8 val)
return 0;
}
+static inline int
+nv_wr16i2cr(struct i2c_adapter *adap, u8 addr, u8 reg, u16 val)
+{
+ u8 buf[3] = { reg, val >> 8, val & 0xff};
+ struct i2c_msg msgs[] = {
+ { .addr = addr, .flags = 0, .len = 3, .buf = buf },
+ };
+
+ int ret = i2c_transfer(adap, msgs, ARRAY_SIZE(msgs));
+ if (ret != 1)
+ return -EIO;
+
+ return 0;
+}
+
static inline bool
nvkm_probe_i2c(struct i2c_adapter *adap, u8 addr)
{
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/ibus.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/ibus.h
index ea23e24a246c..c4ecf255ff39 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/ibus.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/ibus.h
@@ -6,5 +6,5 @@ int gf100_ibus_new(struct nvkm_device *, int, struct nvkm_subdev **);
int gf117_ibus_new(struct nvkm_device *, int, struct nvkm_subdev **);
int gk104_ibus_new(struct nvkm_device *, int, struct nvkm_subdev **);
int gk20a_ibus_new(struct nvkm_device *, int, struct nvkm_subdev **);
-int gm204_ibus_new(struct nvkm_device *, int, struct nvkm_subdev **);
+int gm200_ibus_new(struct nvkm_device *, int, struct nvkm_subdev **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/iccsense.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/iccsense.h
new file mode 100644
index 000000000000..530c6215fe4f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/iccsense.h
@@ -0,0 +1,17 @@
+#ifndef __NVKM_ICCSENSE_H__
+#define __NVKM_ICCSENSE_H__
+
+#include <core/subdev.h>
+
+struct nkvm_iccsense_rail;
+struct nvkm_iccsense {
+ struct nvkm_subdev subdev;
+ u8 rail_count;
+ bool data_valid;
+ struct nvkm_iccsense_rail *rails;
+};
+
+int gf100_iccsense_new(struct nvkm_device *, int index, struct nvkm_iccsense **);
+int nvkm_iccsense_read(struct nvkm_iccsense *iccsense, u8 idx);
+int nvkm_iccsense_read_all(struct nvkm_iccsense *iccsense);
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/ltc.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/ltc.h
index 0ffa2ec106d6..c6b90b6543b3 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/ltc.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/ltc.h
@@ -37,5 +37,5 @@ int gf100_ltc_new(struct nvkm_device *, int, struct nvkm_ltc **);
int gk104_ltc_new(struct nvkm_device *, int, struct nvkm_ltc **);
int gk20a_ltc_new(struct nvkm_device *, int, struct nvkm_ltc **);
int gm107_ltc_new(struct nvkm_device *, int, struct nvkm_ltc **);
-int gm204_ltc_new(struct nvkm_device *, int, struct nvkm_ltc **);
+int gm200_ltc_new(struct nvkm_device *, int, struct nvkm_ltc **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/secboot.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/secboot.h
new file mode 100644
index 000000000000..c6edd95a5b69
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/secboot.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef __NVKM_SECURE_BOOT_H__
+#define __NVKM_SECURE_BOOT_H__
+
+#include <core/subdev.h>
+
+enum nvkm_secboot_falcon {
+ NVKM_SECBOOT_FALCON_PMU = 0,
+ NVKM_SECBOOT_FALCON_RESERVED = 1,
+ NVKM_SECBOOT_FALCON_FECS = 2,
+ NVKM_SECBOOT_FALCON_GPCCS = 3,
+ NVKM_SECBOOT_FALCON_END = 4,
+ NVKM_SECBOOT_FALCON_INVALID = 0xffffffff,
+};
+
+/**
+ * @base: base IO address of the falcon performing secure boot
+ * @irq_mask: IRQ mask of the falcon performing secure boot
+ * @enable_mask: enable mask of the falcon performing secure boot
+*/
+struct nvkm_secboot {
+ const struct nvkm_secboot_func *func;
+ struct nvkm_subdev subdev;
+
+ u32 base;
+ u32 irq_mask;
+ u32 enable_mask;
+};
+#define nvkm_secboot(p) container_of((p), struct nvkm_secboot, subdev)
+
+bool nvkm_secboot_is_managed(struct nvkm_secboot *, enum nvkm_secboot_falcon);
+int nvkm_secboot_reset(struct nvkm_secboot *, u32 falcon);
+int nvkm_secboot_start(struct nvkm_secboot *, u32 falcon);
+
+int gm200_secboot_new(struct nvkm_device *, int, struct nvkm_secboot **);
+int gm20b_secboot_new(struct nvkm_device *, int, struct nvkm_secboot **);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/volt.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/volt.h
index b458d046dba7..feff55cff05b 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/volt.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/volt.h
@@ -20,4 +20,5 @@ int nvkm_volt_set_id(struct nvkm_volt *, u8 id, int condition);
int nv40_volt_new(struct nvkm_device *, int, struct nvkm_volt **);
int gk104_volt_new(struct nvkm_device *, int, struct nvkm_volt **);
int gk20a_volt_new(struct nvkm_device *, int, struct nvkm_volt **);
+int gm20b_volt_new(struct nvkm_device *, int, struct nvkm_volt **);
#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.c b/drivers/gpu/drm/nouveau/nouveau_abi16.c
index 50f52ffe5b0c..a59e524c028c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_abi16.c
+++ b/drivers/gpu/drm/nouveau/nouveau_abi16.c
@@ -263,13 +263,23 @@ nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS)
/* hack to allow channel engine type specification on kepler */
if (device->info.family >= NV_DEVICE_INFO_V0_KEPLER) {
if (init->fb_ctxdma_handle != ~0)
- init->fb_ctxdma_handle = KEPLER_CHANNEL_GPFIFO_A_V0_ENGINE_GR;
- else
- init->fb_ctxdma_handle = init->tt_ctxdma_handle;
+ init->fb_ctxdma_handle = NVA06F_V0_ENGINE_GR;
+ else {
+ init->fb_ctxdma_handle = 0;
+#define _(A,B) if (init->tt_ctxdma_handle & (A)) init->fb_ctxdma_handle |= (B)
+ _(0x01, NVA06F_V0_ENGINE_GR);
+ _(0x02, NVA06F_V0_ENGINE_MSPDEC);
+ _(0x04, NVA06F_V0_ENGINE_MSPPP);
+ _(0x08, NVA06F_V0_ENGINE_MSVLD);
+ _(0x10, NVA06F_V0_ENGINE_CE0);
+ _(0x20, NVA06F_V0_ENGINE_CE1);
+ _(0x40, NVA06F_V0_ENGINE_MSENC);
+#undef _
+ }
/* allow flips to be executed if this is a graphics channel */
init->tt_ctxdma_handle = 0;
- if (init->fb_ctxdma_handle == KEPLER_CHANNEL_GPFIFO_A_V0_ENGINE_GR)
+ if (init->fb_ctxdma_handle == NVA06F_V0_ENGINE_GR)
init->tt_ctxdma_handle = 1;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c
index d5e6938cc6bc..cdf522770cfa 100644
--- a/drivers/gpu/drm/nouveau/nouveau_acpi.c
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c
@@ -314,7 +314,7 @@ void nouveau_register_dsm_handler(void)
if (!r)
return;
- vga_switcheroo_register_handler(&nouveau_dsm_handler);
+ vga_switcheroo_register_handler(&nouveau_dsm_handler, 0);
}
/* Must be called for Optimus models before the card can be turned off */
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index e3acc35e3805..2cdaea58678d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -1502,7 +1502,7 @@ nouveau_ttm_tt_populate(struct ttm_tt *ttm)
}
#endif
-#ifdef CONFIG_SWIOTLB
+#if IS_ENABLED(CONFIG_SWIOTLB) && IS_ENABLED(CONFIG_X86)
if (swiotlb_nr_tbl()) {
return ttm_dma_populate((void *)ttm, dev->dev);
}
@@ -1570,7 +1570,7 @@ nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm)
}
#endif
-#ifdef CONFIG_SWIOTLB
+#if IS_ENABLED(CONFIG_SWIOTLB) && IS_ENABLED(CONFIG_X86)
if (swiotlb_nr_tbl()) {
ttm_dma_unpopulate((void *)ttm, dev->dev);
return;
diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.c b/drivers/gpu/drm/nouveau/nouveau_chan.c
index 3f804a8c590c..879655c03ae9 100644
--- a/drivers/gpu/drm/nouveau/nouveau_chan.c
+++ b/drivers/gpu/drm/nouveau/nouveau_chan.c
@@ -192,6 +192,7 @@ nouveau_channel_ind(struct nouveau_drm *drm, struct nvif_device *device,
u32 engine, struct nouveau_channel **pchan)
{
static const u16 oclasses[] = { MAXWELL_CHANNEL_GPFIFO_A,
+ KEPLER_CHANNEL_GPFIFO_B,
KEPLER_CHANNEL_GPFIFO_A,
FERMI_CHANNEL_GPFIFO,
G82_CHANNEL_GPFIFO,
@@ -217,7 +218,7 @@ nouveau_channel_ind(struct nouveau_drm *drm, struct nvif_device *device,
do {
if (oclass[0] >= KEPLER_CHANNEL_GPFIFO_A) {
args.kepler.version = 0;
- args.kepler.engine = engine;
+ args.kepler.engines = engine;
args.kepler.ilength = 0x02000;
args.kepler.ioffset = 0x10000 + chan->push.vma.offset;
args.kepler.vm = 0;
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index fcebfae5d426..e81aefe5ffa7 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -27,6 +27,7 @@
#include <acpi/button.h>
#include <linux/pm_runtime.h>
+#include <linux/vga_switcheroo.h>
#include <drm/drmP.h>
#include <drm/drm_edid.h>
@@ -153,6 +154,17 @@ nouveau_connector_ddc_detect(struct drm_connector *connector)
if (ret == 0)
break;
} else
+ if ((vga_switcheroo_handler_flags() &
+ VGA_SWITCHEROO_CAN_SWITCH_DDC) &&
+ nv_encoder->dcb->type == DCB_OUTPUT_LVDS &&
+ nv_encoder->i2c) {
+ int ret;
+ vga_switcheroo_lock_ddc(dev->pdev);
+ ret = nvkm_probe_i2c(nv_encoder->i2c, 0x50);
+ vga_switcheroo_unlock_ddc(dev->pdev);
+ if (ret)
+ break;
+ } else
if (nv_encoder->i2c) {
if (nvkm_probe_i2c(nv_encoder->i2c, 0x50))
break;
@@ -265,7 +277,14 @@ nouveau_connector_detect(struct drm_connector *connector, bool force)
nv_encoder = nouveau_connector_ddc_detect(connector);
if (nv_encoder && (i2c = nv_encoder->i2c) != NULL) {
- nv_connector->edid = drm_get_edid(connector, i2c);
+ if ((vga_switcheroo_handler_flags() &
+ VGA_SWITCHEROO_CAN_SWITCH_DDC) &&
+ nv_connector->type == DCB_CONNECTOR_LVDS)
+ nv_connector->edid = drm_get_edid_switcheroo(connector,
+ i2c);
+ else
+ nv_connector->edid = drm_get_edid(connector, i2c);
+
drm_mode_connector_update_edid_property(connector,
nv_connector->edid);
if (!nv_connector->edid) {
@@ -1257,18 +1276,18 @@ nouveau_connector_create(struct drm_device *dev, int index)
break;
default:
if (disp->dithering_mode) {
+ nv_connector->dithering_mode = DITHERING_MODE_AUTO;
drm_object_attach_property(&connector->base,
disp->dithering_mode,
nv_connector->
dithering_mode);
- nv_connector->dithering_mode = DITHERING_MODE_AUTO;
}
if (disp->dithering_depth) {
+ nv_connector->dithering_depth = DITHERING_DEPTH_AUTO;
drm_object_attach_property(&connector->base,
disp->dithering_depth,
nv_connector->
dithering_depth);
- nv_connector->dithering_depth = DITHERING_DEPTH_AUTO;
}
break;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 20935eb2a09e..7ce7fa5cb5e6 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -495,7 +495,7 @@ nouveau_display_create(struct drm_device *dev)
if (nouveau_modeset != 2 && drm->vbios.dcb.entries) {
static const u16 oclass[] = {
- GM204_DISP,
+ GM200_DISP,
GM107_DISP,
GK110_DISP,
GK104_DISP,
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 2f2f252e3fb6..d06877d9c1ed 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -22,11 +22,13 @@
* Authors: Ben Skeggs
*/
+#include <linux/apple-gmux.h>
#include <linux/console.h>
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/pm_runtime.h>
+#include <linux/vgaarb.h>
#include <linux/vga_switcheroo.h>
#include "drmP.h"
@@ -196,6 +198,7 @@ nouveau_accel_init(struct nouveau_drm *drm)
break;
case FERMI_CHANNEL_GPFIFO:
case KEPLER_CHANNEL_GPFIFO_A:
+ case KEPLER_CHANNEL_GPFIFO_B:
case MAXWELL_CHANNEL_GPFIFO_A:
ret = nvc0_fence_create(drm);
break;
@@ -213,13 +216,13 @@ nouveau_accel_init(struct nouveau_drm *drm)
if (device->info.family >= NV_DEVICE_INFO_V0_KEPLER) {
ret = nouveau_channel_new(drm, &drm->device,
- KEPLER_CHANNEL_GPFIFO_A_V0_ENGINE_CE0|
- KEPLER_CHANNEL_GPFIFO_A_V0_ENGINE_CE1,
+ NVA06F_V0_ENGINE_CE0 |
+ NVA06F_V0_ENGINE_CE1,
0, &drm->cechan);
if (ret)
NV_ERROR(drm, "failed to create ce channel, %d\n", ret);
- arg0 = KEPLER_CHANNEL_GPFIFO_A_V0_ENGINE_GR;
+ arg0 = NVA06F_V0_ENGINE_GR;
arg1 = 1;
} else
if (device->info.chipset >= 0xa3 &&
@@ -312,6 +315,15 @@ static int nouveau_drm_probe(struct pci_dev *pdev,
bool boot = false;
int ret;
+ /*
+ * apple-gmux is needed on dual GPU MacBook Pro
+ * to probe the panel if we're the inactive GPU.
+ */
+ if (IS_ENABLED(CONFIG_VGA_ARB) && IS_ENABLED(CONFIG_VGA_SWITCHEROO) &&
+ apple_gmux_present() && pdev != vga_default_device() &&
+ !vga_switcheroo_handler_flags())
+ return -EPROBE_DEFER;
+
/* remove conflicting drivers (vesafb, efifb etc) */
aper = alloc_apertures(3);
if (!aper)
@@ -364,7 +376,7 @@ nouveau_get_hdmi_dev(struct nouveau_drm *drm)
struct pci_dev *pdev = drm->dev->pdev;
if (!pdev) {
- DRM_INFO("not a PCI device; no HDMI\n");
+ NV_DEBUG(drm, "not a PCI device; no HDMI\n");
drm->hdmi_device = NULL;
return;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_hwmon.c b/drivers/gpu/drm/nouveau/nouveau_hwmon.c
index 8e13467d0ddb..67edd2f5b71a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_hwmon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_hwmon.c
@@ -34,6 +34,7 @@
#include "nouveau_drm.h"
#include "nouveau_hwmon.h"
+#include <nvkm/subdev/iccsense.h>
#include <nvkm/subdev/volt.h>
#if defined(CONFIG_HWMON) || (defined(MODULE) && defined(CONFIG_HWMON_MODULE))
@@ -543,6 +544,24 @@ nouveau_hwmon_get_in0_label(struct device *d,
static SENSOR_DEVICE_ATTR(in0_label, S_IRUGO,
nouveau_hwmon_get_in0_label, NULL, 0);
+static ssize_t
+nouveau_hwmon_get_power1_input(struct device *d, struct device_attribute *a,
+ char *buf)
+{
+ struct drm_device *dev = dev_get_drvdata(d);
+ struct nouveau_drm *drm = nouveau_drm(dev);
+ struct nvkm_iccsense *iccsense = nvxx_iccsense(&drm->device);
+ int result = nvkm_iccsense_read_all(iccsense);
+
+ if (result < 0)
+ return result;
+
+ return sprintf(buf, "%i\n", result);
+}
+
+static SENSOR_DEVICE_ATTR(power1_input, S_IRUGO,
+ nouveau_hwmon_get_power1_input, NULL, 0);
+
static struct attribute *hwmon_default_attributes[] = {
&sensor_dev_attr_name.dev_attr.attr,
&sensor_dev_attr_update_rate.dev_attr.attr,
@@ -579,6 +598,11 @@ static struct attribute *hwmon_in0_attributes[] = {
NULL
};
+static struct attribute *hwmon_power_attributes[] = {
+ &sensor_dev_attr_power1_input.dev_attr.attr,
+ NULL
+};
+
static const struct attribute_group hwmon_default_attrgroup = {
.attrs = hwmon_default_attributes,
};
@@ -594,6 +618,9 @@ static const struct attribute_group hwmon_pwm_fan_attrgroup = {
static const struct attribute_group hwmon_in0_attrgroup = {
.attrs = hwmon_in0_attributes,
};
+static const struct attribute_group hwmon_power_attrgroup = {
+ .attrs = hwmon_power_attributes,
+};
#endif
int
@@ -603,6 +630,7 @@ nouveau_hwmon_init(struct drm_device *dev)
struct nouveau_drm *drm = nouveau_drm(dev);
struct nvkm_therm *therm = nvxx_therm(&drm->device);
struct nvkm_volt *volt = nvxx_volt(&drm->device);
+ struct nvkm_iccsense *iccsense = nvxx_iccsense(&drm->device);
struct nouveau_hwmon *hwmon;
struct device *hwmon_dev;
int ret = 0;
@@ -612,10 +640,7 @@ nouveau_hwmon_init(struct drm_device *dev)
return -ENOMEM;
hwmon->dev = dev;
- if (!therm || !therm->attr_get || !therm->attr_set)
- return -ENODEV;
-
- hwmon_dev = hwmon_device_register(&dev->pdev->dev);
+ hwmon_dev = hwmon_device_register(dev->dev);
if (IS_ERR(hwmon_dev)) {
ret = PTR_ERR(hwmon_dev);
NV_ERROR(drm, "Unable to register hwmon device: %d\n", ret);
@@ -628,26 +653,28 @@ nouveau_hwmon_init(struct drm_device *dev)
if (ret)
goto error;
- /* if the card has a working thermal sensor */
- if (nvkm_therm_temp_get(therm) >= 0) {
- ret = sysfs_create_group(&hwmon_dev->kobj, &hwmon_temp_attrgroup);
- if (ret)
- goto error;
- }
-
- /* if the card has a pwm fan */
- /*XXX: incorrect, need better detection for this, some boards have
- * the gpio entries for pwm fan control even when there's no
- * actual fan connected to it... therm table? */
- if (therm->fan_get && therm->fan_get(therm) >= 0) {
- ret = sysfs_create_group(&hwmon_dev->kobj,
- &hwmon_pwm_fan_attrgroup);
- if (ret)
- goto error;
+ if (therm && therm->attr_get && therm->attr_set) {
+ /* if the card has a working thermal sensor */
+ if (nvkm_therm_temp_get(therm) >= 0) {
+ ret = sysfs_create_group(&hwmon_dev->kobj, &hwmon_temp_attrgroup);
+ if (ret)
+ goto error;
+ }
+
+ /* if the card has a pwm fan */
+ /*XXX: incorrect, need better detection for this, some boards have
+ * the gpio entries for pwm fan control even when there's no
+ * actual fan connected to it... therm table? */
+ if (therm->fan_get && therm->fan_get(therm) >= 0) {
+ ret = sysfs_create_group(&hwmon_dev->kobj,
+ &hwmon_pwm_fan_attrgroup);
+ if (ret)
+ goto error;
+ }
}
/* if the card can read the fan rpm */
- if (nvkm_therm_fan_sense(therm) >= 0) {
+ if (therm && nvkm_therm_fan_sense(therm) >= 0) {
ret = sysfs_create_group(&hwmon_dev->kobj,
&hwmon_fan_rpm_attrgroup);
if (ret)
@@ -662,6 +689,13 @@ nouveau_hwmon_init(struct drm_device *dev)
goto error;
}
+ if (iccsense && iccsense->data_valid && iccsense->rail_count) {
+ ret = sysfs_create_group(&hwmon_dev->kobj,
+ &hwmon_power_attrgroup);
+ if (ret)
+ goto error;
+ }
+
hwmon->hwmon = hwmon_dev;
return 0;
@@ -688,6 +722,7 @@ nouveau_hwmon_fini(struct drm_device *dev)
sysfs_remove_group(&hwmon->hwmon->kobj, &hwmon_pwm_fan_attrgroup);
sysfs_remove_group(&hwmon->hwmon->kobj, &hwmon_fan_rpm_attrgroup);
sysfs_remove_group(&hwmon->hwmon->kobj, &hwmon_in0_attrgroup);
+ sysfs_remove_group(&hwmon->hwmon->kobj, &hwmon_power_attrgroup);
hwmon_device_unregister(hwmon->hwmon);
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_platform.c b/drivers/gpu/drm/nouveau/nouveau_platform.c
index 2dfe58af12e4..4c4cc2260257 100644
--- a/drivers/gpu/drm/nouveau/nouveau_platform.c
+++ b/drivers/gpu/drm/nouveau/nouveau_platform.c
@@ -55,6 +55,11 @@ static const struct nvkm_device_tegra_func gk20a_platform_data = {
.iommu_bit = 34,
};
+static const struct nvkm_device_tegra_func gm20b_platform_data = {
+ .iommu_bit = 34,
+ .require_ref_clk = true,
+};
+
static const struct of_device_id nouveau_platform_match[] = {
{
.compatible = "nvidia,gk20a",
@@ -62,7 +67,7 @@ static const struct of_device_id nouveau_platform_match[] = {
},
{
.compatible = "nvidia,gm20b",
- .data = &gk20a_platform_data,
+ .data = &gm20b_platform_data,
},
{ }
};
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index ea3921652449..a43445caae60 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -297,7 +297,7 @@ nv50_core_create(struct nvif_device *device, struct nvif_object *disp,
.pushbuf = 0xb0007d00,
};
static const s32 oclass[] = {
- GM204_DISP_CORE_CHANNEL_DMA,
+ GM200_DISP_CORE_CHANNEL_DMA,
GM107_DISP_CORE_CHANNEL_DMA,
GK110_DISP_CORE_CHANNEL_DMA,
GK104_DISP_CORE_CHANNEL_DMA,
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/Kbuild b/drivers/gpu/drm/nouveau/nvkm/core/Kbuild
index 7f66963f305c..86a31a8e1e51 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/core/Kbuild
@@ -2,6 +2,7 @@ nvkm-y := nvkm/core/client.o
nvkm-y += nvkm/core/engine.o
nvkm-y += nvkm/core/enum.o
nvkm-y += nvkm/core/event.o
+nvkm-y += nvkm/core/firmware.o
nvkm-y += nvkm/core/gpuobj.o
nvkm-y += nvkm/core/ioctl.o
nvkm-y += nvkm/core/memory.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/firmware.c b/drivers/gpu/drm/nouveau/nvkm/core/firmware.c
new file mode 100644
index 000000000000..34ecd4a7e0c1
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/core/firmware.c
@@ -0,0 +1,61 @@
+/*
+ * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+#include <core/device.h>
+
+/**
+ * nvkm_firmware_get - load firmware from the official nvidia/chip/ directory
+ * @device device that will use that firmware
+ * @fwname name of firmware file to load
+ * @fw firmware structure to load to
+ *
+ * Use this function to load firmware files in the form nvidia/chip/fwname.bin.
+ * Firmware files released by NVIDIA will always follow this format.
+ */
+int
+nvkm_firmware_get(struct nvkm_device *device, const char *fwname,
+ const struct firmware **fw)
+{
+ char f[64];
+ char cname[16];
+ int i;
+
+ /* Convert device name to lowercase */
+ strncpy(cname, device->chip->name, sizeof(cname));
+ cname[sizeof(cname) - 1] = '\0';
+ i = strlen(cname);
+ while (i) {
+ --i;
+ cname[i] = tolower(cname[i]);
+ }
+
+ snprintf(f, sizeof(f), "nvidia/%s/%s.bin", cname, fwname);
+ return request_firmware(fw, f, device->dev);
+}
+
+/**
+ * nvkm_firmware_put - release firmware loaded with nvkm_firmware_get
+ */
+void
+nvkm_firmware_put(const struct firmware *fw)
+{
+ release_firmware(fw);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/gpuobj.c b/drivers/gpu/drm/nouveau/nvkm/core/gpuobj.c
index c3a790eb8d6a..a7bd22706b2a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/gpuobj.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/gpuobj.c
@@ -253,3 +253,23 @@ nvkm_gpuobj_wrap(struct nvkm_memory *memory, struct nvkm_gpuobj **pgpuobj)
(*pgpuobj)->size = nvkm_memory_size(memory);
return 0;
}
+
+void
+nvkm_gpuobj_memcpy_to(struct nvkm_gpuobj *dst, u32 dstoffset, void *src,
+ u32 length)
+{
+ int i;
+
+ for (i = 0; i < length; i += 4)
+ nvkm_wo32(dst, dstoffset + i, *(u32 *)(src + i));
+}
+
+void
+nvkm_gpuobj_memcpy_from(void *dst, struct nvkm_gpuobj *src, u32 srcoffset,
+ u32 length)
+{
+ int i;
+
+ for (i = 0; i < length; i += 4)
+ ((u32 *)src)[i / 4] = nvkm_ro32(src, srcoffset + i);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/ramht.c b/drivers/gpu/drm/nouveau/nvkm/core/ramht.c
index 3216e157a8a0..89da47234016 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/ramht.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/ramht.c
@@ -131,7 +131,7 @@ nvkm_ramht_del(struct nvkm_ramht **pramht)
struct nvkm_ramht *ramht = *pramht;
if (ramht) {
nvkm_gpuobj_del(&ramht->gpuobj);
- kfree(*pramht);
+ vfree(*pramht);
*pramht = NULL;
}
}
@@ -143,8 +143,8 @@ nvkm_ramht_new(struct nvkm_device *device, u32 size, u32 align,
struct nvkm_ramht *ramht;
int ret, i;
- if (!(ramht = *pramht = kzalloc(sizeof(*ramht) + (size >> 3) *
- sizeof(*ramht->data), GFP_KERNEL)))
+ if (!(ramht = *pramht = vzalloc(sizeof(*ramht) +
+ (size >> 3) * sizeof(*ramht->data))))
return -ENOMEM;
ramht->device = device;
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/subdev.c b/drivers/gpu/drm/nouveau/nvkm/core/subdev.c
index 7de98470a2a0..3bf08cb1a289 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/subdev.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/subdev.c
@@ -29,47 +29,52 @@ static struct lock_class_key nvkm_subdev_lock_class[NVKM_SUBDEV_NR];
const char *
nvkm_subdev_name[NVKM_SUBDEV_NR] = {
- [NVKM_SUBDEV_BAR ] = "bar",
- [NVKM_SUBDEV_VBIOS ] = "bios",
- [NVKM_SUBDEV_BUS ] = "bus",
- [NVKM_SUBDEV_CLK ] = "clk",
- [NVKM_SUBDEV_DEVINIT] = "devinit",
- [NVKM_SUBDEV_FB ] = "fb",
- [NVKM_SUBDEV_FUSE ] = "fuse",
- [NVKM_SUBDEV_GPIO ] = "gpio",
- [NVKM_SUBDEV_I2C ] = "i2c",
- [NVKM_SUBDEV_IBUS ] = "priv",
- [NVKM_SUBDEV_INSTMEM] = "imem",
- [NVKM_SUBDEV_LTC ] = "ltc",
- [NVKM_SUBDEV_MC ] = "mc",
- [NVKM_SUBDEV_MMU ] = "mmu",
- [NVKM_SUBDEV_MXM ] = "mxm",
- [NVKM_SUBDEV_PCI ] = "pci",
- [NVKM_SUBDEV_PMU ] = "pmu",
- [NVKM_SUBDEV_THERM ] = "therm",
- [NVKM_SUBDEV_TIMER ] = "tmr",
- [NVKM_SUBDEV_VOLT ] = "volt",
- [NVKM_ENGINE_BSP ] = "bsp",
- [NVKM_ENGINE_CE0 ] = "ce0",
- [NVKM_ENGINE_CE1 ] = "ce1",
- [NVKM_ENGINE_CE2 ] = "ce2",
- [NVKM_ENGINE_CIPHER ] = "cipher",
- [NVKM_ENGINE_DISP ] = "disp",
- [NVKM_ENGINE_DMAOBJ ] = "dma",
- [NVKM_ENGINE_FIFO ] = "fifo",
- [NVKM_ENGINE_GR ] = "gr",
- [NVKM_ENGINE_IFB ] = "ifb",
- [NVKM_ENGINE_ME ] = "me",
- [NVKM_ENGINE_MPEG ] = "mpeg",
- [NVKM_ENGINE_MSENC ] = "msenc",
- [NVKM_ENGINE_MSPDEC ] = "mspdec",
- [NVKM_ENGINE_MSPPP ] = "msppp",
- [NVKM_ENGINE_MSVLD ] = "msvld",
- [NVKM_ENGINE_PM ] = "pm",
- [NVKM_ENGINE_SEC ] = "sec",
- [NVKM_ENGINE_SW ] = "sw",
- [NVKM_ENGINE_VIC ] = "vic",
- [NVKM_ENGINE_VP ] = "vp",
+ [NVKM_SUBDEV_BAR ] = "bar",
+ [NVKM_SUBDEV_VBIOS ] = "bios",
+ [NVKM_SUBDEV_BUS ] = "bus",
+ [NVKM_SUBDEV_CLK ] = "clk",
+ [NVKM_SUBDEV_DEVINIT ] = "devinit",
+ [NVKM_SUBDEV_FB ] = "fb",
+ [NVKM_SUBDEV_FUSE ] = "fuse",
+ [NVKM_SUBDEV_GPIO ] = "gpio",
+ [NVKM_SUBDEV_I2C ] = "i2c",
+ [NVKM_SUBDEV_IBUS ] = "priv",
+ [NVKM_SUBDEV_ICCSENSE] = "iccsense",
+ [NVKM_SUBDEV_INSTMEM ] = "imem",
+ [NVKM_SUBDEV_LTC ] = "ltc",
+ [NVKM_SUBDEV_MC ] = "mc",
+ [NVKM_SUBDEV_MMU ] = "mmu",
+ [NVKM_SUBDEV_MXM ] = "mxm",
+ [NVKM_SUBDEV_PCI ] = "pci",
+ [NVKM_SUBDEV_PMU ] = "pmu",
+ [NVKM_SUBDEV_SECBOOT ] = "secboot",
+ [NVKM_SUBDEV_THERM ] = "therm",
+ [NVKM_SUBDEV_TIMER ] = "tmr",
+ [NVKM_SUBDEV_VOLT ] = "volt",
+ [NVKM_ENGINE_BSP ] = "bsp",
+ [NVKM_ENGINE_CE0 ] = "ce0",
+ [NVKM_ENGINE_CE1 ] = "ce1",
+ [NVKM_ENGINE_CE2 ] = "ce2",
+ [NVKM_ENGINE_CIPHER ] = "cipher",
+ [NVKM_ENGINE_DISP ] = "disp",
+ [NVKM_ENGINE_DMAOBJ ] = "dma",
+ [NVKM_ENGINE_FIFO ] = "fifo",
+ [NVKM_ENGINE_GR ] = "gr",
+ [NVKM_ENGINE_IFB ] = "ifb",
+ [NVKM_ENGINE_ME ] = "me",
+ [NVKM_ENGINE_MPEG ] = "mpeg",
+ [NVKM_ENGINE_MSENC ] = "msenc",
+ [NVKM_ENGINE_MSPDEC ] = "mspdec",
+ [NVKM_ENGINE_MSPPP ] = "msppp",
+ [NVKM_ENGINE_MSVLD ] = "msvld",
+ [NVKM_ENGINE_NVENC0 ] = "nvenc0",
+ [NVKM_ENGINE_NVENC1 ] = "nvenc1",
+ [NVKM_ENGINE_NVDEC ] = "nvdec",
+ [NVKM_ENGINE_PM ] = "pm",
+ [NVKM_ENGINE_SEC ] = "sec",
+ [NVKM_ENGINE_SW ] = "sw",
+ [NVKM_ENGINE_VIC ] = "vic",
+ [NVKM_ENGINE_VP ] = "vp",
};
void
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/Kbuild
index 36f724763fde..c2c8d2ac01b8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/Kbuild
@@ -10,10 +10,14 @@ include $(src)/nvkm/engine/dma/Kbuild
include $(src)/nvkm/engine/fifo/Kbuild
include $(src)/nvkm/engine/gr/Kbuild
include $(src)/nvkm/engine/mpeg/Kbuild
+include $(src)/nvkm/engine/msenc/Kbuild
include $(src)/nvkm/engine/mspdec/Kbuild
include $(src)/nvkm/engine/msppp/Kbuild
include $(src)/nvkm/engine/msvld/Kbuild
+include $(src)/nvkm/engine/nvenc/Kbuild
+include $(src)/nvkm/engine/nvdec/Kbuild
include $(src)/nvkm/engine/pm/Kbuild
include $(src)/nvkm/engine/sec/Kbuild
include $(src)/nvkm/engine/sw/Kbuild
+include $(src)/nvkm/engine/vic/Kbuild
include $(src)/nvkm/engine/vp/Kbuild
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/ce/Kbuild
index fa8cda7058cd..9c19d59b47df 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/Kbuild
@@ -1,4 +1,5 @@
nvkm-y += nvkm/engine/ce/gt215.o
nvkm-y += nvkm/engine/ce/gf100.o
nvkm-y += nvkm/engine/ce/gk104.o
-nvkm-y += nvkm/engine/ce/gm204.o
+nvkm-y += nvkm/engine/ce/gm107.o
+nvkm-y += nvkm/engine/ce/gm200.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/gm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gm107.c
new file mode 100644
index 000000000000..4c2f42919c1f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gm107.c
@@ -0,0 +1,55 @@
+/*
+ * Copyright 2016 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "priv.h"
+
+#include <nvif/class.h>
+
+static const struct nvkm_engine_func
+gm107_ce = {
+ .intr = gk104_ce_intr,
+ .sclass = {
+ { -1, -1, KEPLER_DMA_COPY_A },
+ { -1, -1, MAXWELL_DMA_COPY_A },
+ {}
+ }
+};
+
+int
+gm107_ce_new(struct nvkm_device *device, int index,
+ struct nvkm_engine **pengine)
+{
+ if (index == NVKM_ENGINE_CE0) {
+ return nvkm_engine_new_(&gm107_ce, device, index,
+ 0x00000040, true, pengine);
+ } else
+ if (index == NVKM_ENGINE_CE1) {
+ return nvkm_engine_new_(&gm107_ce, device, index,
+ 0x00000080, true, pengine);
+ } else
+ if (index == NVKM_ENGINE_CE2) {
+ return nvkm_engine_new_(&gm107_ce, device, index,
+ 0x00200000, true, pengine);
+ }
+ return -ENODEV;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/gm204.c b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gm200.c
index 8eaa72a59f40..13f07b32cd9c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/gm204.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gm200.c
@@ -26,7 +26,7 @@
#include <nvif/class.h>
static const struct nvkm_engine_func
-gm204_ce = {
+gm200_ce = {
.intr = gk104_ce_intr,
.sclass = {
{ -1, -1, MAXWELL_DMA_COPY_A },
@@ -35,19 +35,19 @@ gm204_ce = {
};
int
-gm204_ce_new(struct nvkm_device *device, int index,
+gm200_ce_new(struct nvkm_device *device, int index,
struct nvkm_engine **pengine)
{
if (index == NVKM_ENGINE_CE0) {
- return nvkm_engine_new_(&gm204_ce, device, index,
+ return nvkm_engine_new_(&gm200_ce, device, index,
0x00000040, true, pengine);
} else
if (index == NVKM_ENGINE_CE1) {
- return nvkm_engine_new_(&gm204_ce, device, index,
+ return nvkm_engine_new_(&gm200_ce, device, index,
0x00000080, true, pengine);
} else
if (index == NVKM_ENGINE_CE2) {
- return nvkm_engine_new_(&gm204_ce, device, index,
+ return nvkm_engine_new_(&gm200_ce, device, index,
0x00200000, true, pengine);
}
return -ENODEV;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
index b1ba1c782a2b..9f32c8739254 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
@@ -1347,6 +1347,7 @@ nvc0_chipset = {
.gpio = g94_gpio_new,
.i2c = g94_i2c_new,
.ibus = gf100_ibus_new,
+ .iccsense = gf100_iccsense_new,
.imem = nv50_instmem_new,
.ltc = gf100_ltc_new,
.mc = gf100_mc_new,
@@ -1383,6 +1384,7 @@ nvc1_chipset = {
.gpio = g94_gpio_new,
.i2c = g94_i2c_new,
.ibus = gf100_ibus_new,
+ .iccsense = gf100_iccsense_new,
.imem = nv50_instmem_new,
.ltc = gf100_ltc_new,
.mc = gf100_mc_new,
@@ -1418,6 +1420,7 @@ nvc3_chipset = {
.gpio = g94_gpio_new,
.i2c = g94_i2c_new,
.ibus = gf100_ibus_new,
+ .iccsense = gf100_iccsense_new,
.imem = nv50_instmem_new,
.ltc = gf100_ltc_new,
.mc = gf100_mc_new,
@@ -1453,6 +1456,7 @@ nvc4_chipset = {
.gpio = g94_gpio_new,
.i2c = g94_i2c_new,
.ibus = gf100_ibus_new,
+ .iccsense = gf100_iccsense_new,
.imem = nv50_instmem_new,
.ltc = gf100_ltc_new,
.mc = gf100_mc_new,
@@ -1489,6 +1493,7 @@ nvc8_chipset = {
.gpio = g94_gpio_new,
.i2c = g94_i2c_new,
.ibus = gf100_ibus_new,
+ .iccsense = gf100_iccsense_new,
.imem = nv50_instmem_new,
.ltc = gf100_ltc_new,
.mc = gf100_mc_new,
@@ -1525,6 +1530,7 @@ nvce_chipset = {
.gpio = g94_gpio_new,
.i2c = g94_i2c_new,
.ibus = gf100_ibus_new,
+ .iccsense = gf100_iccsense_new,
.imem = nv50_instmem_new,
.ltc = gf100_ltc_new,
.mc = gf100_mc_new,
@@ -1561,6 +1567,7 @@ nvcf_chipset = {
.gpio = g94_gpio_new,
.i2c = g94_i2c_new,
.ibus = gf100_ibus_new,
+ .iccsense = gf100_iccsense_new,
.imem = nv50_instmem_new,
.ltc = gf100_ltc_new,
.mc = gf100_mc_new,
@@ -1596,6 +1603,7 @@ nvd7_chipset = {
.gpio = gf119_gpio_new,
.i2c = gf117_i2c_new,
.ibus = gf117_ibus_new,
+ .iccsense = gf100_iccsense_new,
.imem = nv50_instmem_new,
.ltc = gf100_ltc_new,
.mc = gf100_mc_new,
@@ -1629,6 +1637,7 @@ nvd9_chipset = {
.gpio = gf119_gpio_new,
.i2c = gf119_i2c_new,
.ibus = gf117_ibus_new,
+ .iccsense = gf100_iccsense_new,
.imem = nv50_instmem_new,
.ltc = gf100_ltc_new,
.mc = gf100_mc_new,
@@ -1664,6 +1673,7 @@ nve4_chipset = {
.gpio = gk104_gpio_new,
.i2c = gk104_i2c_new,
.ibus = gk104_ibus_new,
+ .iccsense = gf100_iccsense_new,
.imem = nv50_instmem_new,
.ltc = gk104_ltc_new,
.mc = gf100_mc_new,
@@ -1701,6 +1711,7 @@ nve6_chipset = {
.gpio = gk104_gpio_new,
.i2c = gk104_i2c_new,
.ibus = gk104_ibus_new,
+ .iccsense = gf100_iccsense_new,
.imem = nv50_instmem_new,
.ltc = gk104_ltc_new,
.mc = gf100_mc_new,
@@ -1738,6 +1749,7 @@ nve7_chipset = {
.gpio = gk104_gpio_new,
.i2c = gk104_i2c_new,
.ibus = gk104_ibus_new,
+ .iccsense = gf100_iccsense_new,
.imem = nv50_instmem_new,
.ltc = gk104_ltc_new,
.mc = gf100_mc_new,
@@ -1799,6 +1811,7 @@ nvf0_chipset = {
.gpio = gk104_gpio_new,
.i2c = gk104_i2c_new,
.ibus = gk104_ibus_new,
+ .iccsense = gf100_iccsense_new,
.imem = nv50_instmem_new,
.ltc = gk104_ltc_new,
.mc = gf100_mc_new,
@@ -1814,7 +1827,7 @@ nvf0_chipset = {
.ce[2] = gk104_ce_new,
.disp = gk110_disp_new,
.dma = gf119_dma_new,
- .fifo = gk104_fifo_new,
+ .fifo = gk110_fifo_new,
.gr = gk110_gr_new,
.mspdec = gk104_mspdec_new,
.msppp = gf100_msppp_new,
@@ -1835,6 +1848,7 @@ nvf1_chipset = {
.gpio = gk104_gpio_new,
.i2c = gf119_i2c_new,
.ibus = gk104_ibus_new,
+ .iccsense = gf100_iccsense_new,
.imem = nv50_instmem_new,
.ltc = gk104_ltc_new,
.mc = gf100_mc_new,
@@ -1850,7 +1864,7 @@ nvf1_chipset = {
.ce[2] = gk104_ce_new,
.disp = gk110_disp_new,
.dma = gf119_dma_new,
- .fifo = gk104_fifo_new,
+ .fifo = gk110_fifo_new,
.gr = gk110b_gr_new,
.mspdec = gk104_mspdec_new,
.msppp = gf100_msppp_new,
@@ -1871,6 +1885,7 @@ nv106_chipset = {
.gpio = gk104_gpio_new,
.i2c = gk104_i2c_new,
.ibus = gk104_ibus_new,
+ .iccsense = gf100_iccsense_new,
.imem = nv50_instmem_new,
.ltc = gk104_ltc_new,
.mc = gk20a_mc_new,
@@ -1907,6 +1922,7 @@ nv108_chipset = {
.gpio = gk104_gpio_new,
.i2c = gk104_i2c_new,
.ibus = gk104_ibus_new,
+ .iccsense = gf100_iccsense_new,
.imem = nv50_instmem_new,
.ltc = gk104_ltc_new,
.mc = gk20a_mc_new,
@@ -1943,6 +1959,7 @@ nv117_chipset = {
.gpio = gk104_gpio_new,
.i2c = gf119_i2c_new,
.ibus = gk104_ibus_new,
+ .iccsense = gf100_iccsense_new,
.imem = nv50_instmem_new,
.ltc = gm107_ltc_new,
.mc = gk20a_mc_new,
@@ -1953,43 +1970,78 @@ nv117_chipset = {
.therm = gm107_therm_new,
.timer = gk20a_timer_new,
.volt = gk104_volt_new,
- .ce[0] = gk104_ce_new,
- .ce[2] = gk104_ce_new,
+ .ce[0] = gm107_ce_new,
+ .ce[2] = gm107_ce_new,
.disp = gm107_disp_new,
.dma = gf119_dma_new,
- .fifo = gk208_fifo_new,
+ .fifo = gm107_fifo_new,
.gr = gm107_gr_new,
.sw = gf100_sw_new,
};
static const struct nvkm_device_chip
+nv120_chipset = {
+ .name = "GM200",
+ .bar = gf100_bar_new,
+ .bios = nvkm_bios_new,
+ .bus = gf100_bus_new,
+ .devinit = gm200_devinit_new,
+ .fb = gm107_fb_new,
+ .fuse = gm107_fuse_new,
+ .gpio = gk104_gpio_new,
+ .i2c = gm200_i2c_new,
+ .ibus = gm200_ibus_new,
+ .iccsense = gf100_iccsense_new,
+ .imem = nv50_instmem_new,
+ .ltc = gm200_ltc_new,
+ .mc = gk20a_mc_new,
+ .mmu = gf100_mmu_new,
+ .mxm = nv50_mxm_new,
+ .pci = gk104_pci_new,
+ .pmu = gm107_pmu_new,
+ .secboot = gm200_secboot_new,
+ .timer = gk20a_timer_new,
+ .volt = gk104_volt_new,
+ .ce[0] = gm200_ce_new,
+ .ce[1] = gm200_ce_new,
+ .ce[2] = gm200_ce_new,
+ .disp = gm200_disp_new,
+ .dma = gf119_dma_new,
+ .fifo = gm200_fifo_new,
+ .gr = gm200_gr_new,
+ .sw = gf100_sw_new,
+};
+
+static const struct nvkm_device_chip
nv124_chipset = {
.name = "GM204",
.bar = gf100_bar_new,
.bios = nvkm_bios_new,
.bus = gf100_bus_new,
- .devinit = gm204_devinit_new,
+ .devinit = gm200_devinit_new,
.fb = gm107_fb_new,
.fuse = gm107_fuse_new,
.gpio = gk104_gpio_new,
- .i2c = gm204_i2c_new,
- .ibus = gm204_ibus_new,
+ .i2c = gm200_i2c_new,
+ .ibus = gm200_ibus_new,
+ .iccsense = gf100_iccsense_new,
.imem = nv50_instmem_new,
- .ltc = gm204_ltc_new,
+ .ltc = gm200_ltc_new,
.mc = gk20a_mc_new,
.mmu = gf100_mmu_new,
.mxm = nv50_mxm_new,
.pci = gk104_pci_new,
.pmu = gm107_pmu_new,
+ .secboot = gm200_secboot_new,
.timer = gk20a_timer_new,
.volt = gk104_volt_new,
- .ce[0] = gm204_ce_new,
- .ce[1] = gm204_ce_new,
- .ce[2] = gm204_ce_new,
- .disp = gm204_disp_new,
+ .ce[0] = gm200_ce_new,
+ .ce[1] = gm200_ce_new,
+ .ce[2] = gm200_ce_new,
+ .disp = gm200_disp_new,
.dma = gf119_dma_new,
- .fifo = gm204_fifo_new,
- .gr = gm204_gr_new,
+ .fifo = gm200_fifo_new,
+ .gr = gm200_gr_new,
.sw = gf100_sw_new,
};
@@ -1999,28 +2051,30 @@ nv126_chipset = {
.bar = gf100_bar_new,
.bios = nvkm_bios_new,
.bus = gf100_bus_new,
- .devinit = gm204_devinit_new,
+ .devinit = gm200_devinit_new,
.fb = gm107_fb_new,
.fuse = gm107_fuse_new,
.gpio = gk104_gpio_new,
- .i2c = gm204_i2c_new,
- .ibus = gm204_ibus_new,
+ .i2c = gm200_i2c_new,
+ .ibus = gm200_ibus_new,
+ .iccsense = gf100_iccsense_new,
.imem = nv50_instmem_new,
- .ltc = gm204_ltc_new,
+ .ltc = gm200_ltc_new,
.mc = gk20a_mc_new,
.mmu = gf100_mmu_new,
.mxm = nv50_mxm_new,
.pci = gk104_pci_new,
.pmu = gm107_pmu_new,
+ .secboot = gm200_secboot_new,
.timer = gk20a_timer_new,
.volt = gk104_volt_new,
- .ce[0] = gm204_ce_new,
- .ce[1] = gm204_ce_new,
- .ce[2] = gm204_ce_new,
- .disp = gm204_disp_new,
+ .ce[0] = gm200_ce_new,
+ .ce[1] = gm200_ce_new,
+ .ce[2] = gm200_ce_new,
+ .disp = gm200_disp_new,
.dma = gf119_dma_new,
- .fifo = gm204_fifo_new,
- .gr = gm206_gr_new,
+ .fifo = gm200_fifo_new,
+ .gr = gm200_gr_new,
.sw = gf100_sw_new,
};
@@ -2029,15 +2083,18 @@ nv12b_chipset = {
.name = "GM20B",
.bar = gk20a_bar_new,
.bus = gf100_bus_new,
+ .clk = gm20b_clk_new,
.fb = gk20a_fb_new,
.fuse = gm107_fuse_new,
.ibus = gk20a_ibus_new,
.imem = gk20a_instmem_new,
- .ltc = gm204_ltc_new,
+ .ltc = gm200_ltc_new,
.mc = gk20a_mc_new,
.mmu = gf100_mmu_new,
+ .secboot = gm20b_secboot_new,
.timer = gk20a_timer_new,
- .ce[2] = gm204_ce_new,
+ .ce[2] = gm200_ce_new,
+ .volt = gm20b_volt_new,
.dma = gf119_dma_new,
.fifo = gm20b_fifo_new,
.gr = gm20b_gr_new,
@@ -2072,26 +2129,28 @@ nvkm_device_subdev(struct nvkm_device *device, int index)
switch (index) {
#define _(n,p,m) case NVKM_SUBDEV_##n: if (p) return (m); break
- _(BAR , device->bar , &device->bar->subdev);
- _(VBIOS , device->bios , &device->bios->subdev);
- _(BUS , device->bus , &device->bus->subdev);
- _(CLK , device->clk , &device->clk->subdev);
- _(DEVINIT, device->devinit, &device->devinit->subdev);
- _(FB , device->fb , &device->fb->subdev);
- _(FUSE , device->fuse , &device->fuse->subdev);
- _(GPIO , device->gpio , &device->gpio->subdev);
- _(I2C , device->i2c , &device->i2c->subdev);
- _(IBUS , device->ibus , device->ibus);
- _(INSTMEM, device->imem , &device->imem->subdev);
- _(LTC , device->ltc , &device->ltc->subdev);
- _(MC , device->mc , &device->mc->subdev);
- _(MMU , device->mmu , &device->mmu->subdev);
- _(MXM , device->mxm , device->mxm);
- _(PCI , device->pci , &device->pci->subdev);
- _(PMU , device->pmu , &device->pmu->subdev);
- _(THERM , device->therm , &device->therm->subdev);
- _(TIMER , device->timer , &device->timer->subdev);
- _(VOLT , device->volt , &device->volt->subdev);
+ _(BAR , device->bar , &device->bar->subdev);
+ _(VBIOS , device->bios , &device->bios->subdev);
+ _(BUS , device->bus , &device->bus->subdev);
+ _(CLK , device->clk , &device->clk->subdev);
+ _(DEVINIT , device->devinit , &device->devinit->subdev);
+ _(FB , device->fb , &device->fb->subdev);
+ _(FUSE , device->fuse , &device->fuse->subdev);
+ _(GPIO , device->gpio , &device->gpio->subdev);
+ _(I2C , device->i2c , &device->i2c->subdev);
+ _(IBUS , device->ibus , device->ibus);
+ _(ICCSENSE, device->iccsense, &device->iccsense->subdev);
+ _(INSTMEM , device->imem , &device->imem->subdev);
+ _(LTC , device->ltc , &device->ltc->subdev);
+ _(MC , device->mc , &device->mc->subdev);
+ _(MMU , device->mmu , &device->mmu->subdev);
+ _(MXM , device->mxm , device->mxm);
+ _(PCI , device->pci , &device->pci->subdev);
+ _(PMU , device->pmu , &device->pmu->subdev);
+ _(SECBOOT , device->secboot , &device->secboot->subdev);
+ _(THERM , device->therm , &device->therm->subdev);
+ _(TIMER , device->timer , &device->timer->subdev);
+ _(VOLT , device->volt , &device->volt->subdev);
#undef _
default:
engine = nvkm_device_engine(device, index);
@@ -2110,27 +2169,30 @@ nvkm_device_engine(struct nvkm_device *device, int index)
switch (index) {
#define _(n,p,m) case NVKM_ENGINE_##n: if (p) return (m); break
- _(BSP , device->bsp , device->bsp);
- _(CE0 , device->ce[0] , device->ce[0]);
- _(CE1 , device->ce[1] , device->ce[1]);
- _(CE2 , device->ce[2] , device->ce[2]);
- _(CIPHER , device->cipher , device->cipher);
- _(DISP , device->disp , &device->disp->engine);
- _(DMAOBJ , device->dma , &device->dma->engine);
- _(FIFO , device->fifo , &device->fifo->engine);
- _(GR , device->gr , &device->gr->engine);
- _(IFB , device->ifb , device->ifb);
- _(ME , device->me , device->me);
- _(MPEG , device->mpeg , device->mpeg);
- _(MSENC , device->msenc , device->msenc);
- _(MSPDEC , device->mspdec , device->mspdec);
- _(MSPPP , device->msppp , device->msppp);
- _(MSVLD , device->msvld , device->msvld);
- _(PM , device->pm , &device->pm->engine);
- _(SEC , device->sec , device->sec);
- _(SW , device->sw , &device->sw->engine);
- _(VIC , device->vic , device->vic);
- _(VP , device->vp , device->vp);
+ _(BSP , device->bsp , device->bsp);
+ _(CE0 , device->ce[0] , device->ce[0]);
+ _(CE1 , device->ce[1] , device->ce[1]);
+ _(CE2 , device->ce[2] , device->ce[2]);
+ _(CIPHER , device->cipher , device->cipher);
+ _(DISP , device->disp , &device->disp->engine);
+ _(DMAOBJ , device->dma , &device->dma->engine);
+ _(FIFO , device->fifo , &device->fifo->engine);
+ _(GR , device->gr , &device->gr->engine);
+ _(IFB , device->ifb , device->ifb);
+ _(ME , device->me , device->me);
+ _(MPEG , device->mpeg , device->mpeg);
+ _(MSENC , device->msenc , device->msenc);
+ _(MSPDEC , device->mspdec , device->mspdec);
+ _(MSPPP , device->msppp , device->msppp);
+ _(MSVLD , device->msvld , device->msvld);
+ _(NVENC0 , device->nvenc[0], device->nvenc[0]);
+ _(NVENC1 , device->nvenc[1], device->nvenc[1]);
+ _(NVDEC , device->nvdec , device->nvdec);
+ _(PM , device->pm , &device->pm->engine);
+ _(SEC , device->sec , device->sec);
+ _(SW , device->sw , &device->sw->engine);
+ _(VIC , device->vic , device->vic);
+ _(VP , device->vp , device->vp);
#undef _
default:
WARN_ON(1);
@@ -2261,6 +2323,8 @@ fail_subdev:
} while (--i >= 0);
fail:
+ nvkm_device_fini(device, false);
+
nvdev_error(device, "init failed with %d\n", ret);
return ret;
}
@@ -2459,6 +2523,7 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
case 0x106: device->chip = &nv106_chipset; break;
case 0x108: device->chip = &nv108_chipset; break;
case 0x117: device->chip = &nv117_chipset; break;
+ case 0x120: device->chip = &nv120_chipset; break;
case 0x124: device->chip = &nv124_chipset; break;
case 0x126: device->chip = &nv126_chipset; break;
case 0x12b: device->chip = &nv12b_chipset; break;
@@ -2518,47 +2583,52 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
} \
break
switch (i) {
- _(NVKM_SUBDEV_BAR , bar);
- _(NVKM_SUBDEV_VBIOS , bios);
- _(NVKM_SUBDEV_BUS , bus);
- _(NVKM_SUBDEV_CLK , clk);
- _(NVKM_SUBDEV_DEVINIT, devinit);
- _(NVKM_SUBDEV_FB , fb);
- _(NVKM_SUBDEV_FUSE , fuse);
- _(NVKM_SUBDEV_GPIO , gpio);
- _(NVKM_SUBDEV_I2C , i2c);
- _(NVKM_SUBDEV_IBUS , ibus);
- _(NVKM_SUBDEV_INSTMEM, imem);
- _(NVKM_SUBDEV_LTC , ltc);
- _(NVKM_SUBDEV_MC , mc);
- _(NVKM_SUBDEV_MMU , mmu);
- _(NVKM_SUBDEV_MXM , mxm);
- _(NVKM_SUBDEV_PCI , pci);
- _(NVKM_SUBDEV_PMU , pmu);
- _(NVKM_SUBDEV_THERM , therm);
- _(NVKM_SUBDEV_TIMER , timer);
- _(NVKM_SUBDEV_VOLT , volt);
- _(NVKM_ENGINE_BSP , bsp);
- _(NVKM_ENGINE_CE0 , ce[0]);
- _(NVKM_ENGINE_CE1 , ce[1]);
- _(NVKM_ENGINE_CE2 , ce[2]);
- _(NVKM_ENGINE_CIPHER , cipher);
- _(NVKM_ENGINE_DISP , disp);
- _(NVKM_ENGINE_DMAOBJ , dma);
- _(NVKM_ENGINE_FIFO , fifo);
- _(NVKM_ENGINE_GR , gr);
- _(NVKM_ENGINE_IFB , ifb);
- _(NVKM_ENGINE_ME , me);
- _(NVKM_ENGINE_MPEG , mpeg);
- _(NVKM_ENGINE_MSENC , msenc);
- _(NVKM_ENGINE_MSPDEC , mspdec);
- _(NVKM_ENGINE_MSPPP , msppp);
- _(NVKM_ENGINE_MSVLD , msvld);
- _(NVKM_ENGINE_PM , pm);
- _(NVKM_ENGINE_SEC , sec);
- _(NVKM_ENGINE_SW , sw);
- _(NVKM_ENGINE_VIC , vic);
- _(NVKM_ENGINE_VP , vp);
+ _(NVKM_SUBDEV_BAR , bar);
+ _(NVKM_SUBDEV_VBIOS , bios);
+ _(NVKM_SUBDEV_BUS , bus);
+ _(NVKM_SUBDEV_CLK , clk);
+ _(NVKM_SUBDEV_DEVINIT , devinit);
+ _(NVKM_SUBDEV_FB , fb);
+ _(NVKM_SUBDEV_FUSE , fuse);
+ _(NVKM_SUBDEV_GPIO , gpio);
+ _(NVKM_SUBDEV_I2C , i2c);
+ _(NVKM_SUBDEV_IBUS , ibus);
+ _(NVKM_SUBDEV_ICCSENSE, iccsense);
+ _(NVKM_SUBDEV_INSTMEM , imem);
+ _(NVKM_SUBDEV_LTC , ltc);
+ _(NVKM_SUBDEV_MC , mc);
+ _(NVKM_SUBDEV_MMU , mmu);
+ _(NVKM_SUBDEV_MXM , mxm);
+ _(NVKM_SUBDEV_PCI , pci);
+ _(NVKM_SUBDEV_PMU , pmu);
+ _(NVKM_SUBDEV_SECBOOT , secboot);
+ _(NVKM_SUBDEV_THERM , therm);
+ _(NVKM_SUBDEV_TIMER , timer);
+ _(NVKM_SUBDEV_VOLT , volt);
+ _(NVKM_ENGINE_BSP , bsp);
+ _(NVKM_ENGINE_CE0 , ce[0]);
+ _(NVKM_ENGINE_CE1 , ce[1]);
+ _(NVKM_ENGINE_CE2 , ce[2]);
+ _(NVKM_ENGINE_CIPHER , cipher);
+ _(NVKM_ENGINE_DISP , disp);
+ _(NVKM_ENGINE_DMAOBJ , dma);
+ _(NVKM_ENGINE_FIFO , fifo);
+ _(NVKM_ENGINE_GR , gr);
+ _(NVKM_ENGINE_IFB , ifb);
+ _(NVKM_ENGINE_ME , me);
+ _(NVKM_ENGINE_MPEG , mpeg);
+ _(NVKM_ENGINE_MSENC , msenc);
+ _(NVKM_ENGINE_MSPDEC , mspdec);
+ _(NVKM_ENGINE_MSPPP , msppp);
+ _(NVKM_ENGINE_MSVLD , msvld);
+ _(NVKM_ENGINE_NVENC0 , nvenc[0]);
+ _(NVKM_ENGINE_NVENC1 , nvenc[1]);
+ _(NVKM_ENGINE_NVDEC , nvdec);
+ _(NVKM_ENGINE_PM , pm);
+ _(NVKM_ENGINE_SEC , sec);
+ _(NVKM_ENGINE_SW , sw);
+ _(NVKM_ENGINE_VIC , vic);
+ _(NVKM_ENGINE_VP , vp);
default:
WARN_ON(1);
continue;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c
index 62ad0300cfa5..18fab3973ce5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c
@@ -1614,7 +1614,7 @@ nvkm_device_pci_func = {
.fini = nvkm_device_pci_fini,
.resource_addr = nvkm_device_pci_resource_addr,
.resource_size = nvkm_device_pci_resource_size,
- .cpu_coherent = !IS_ENABLED(CONFIG_ARM),
+ .cpu_coherent = !IS_ENABLED(CONFIG_ARM) && !IS_ENABLED(CONFIG_ARM64),
};
int
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h
index ed3ad2c30e17..e80f6ab1c415 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h
@@ -12,6 +12,7 @@
#include <subdev/gpio.h>
#include <subdev/i2c.h>
#include <subdev/ibus.h>
+#include <subdev/iccsense.h>
#include <subdev/instmem.h>
#include <subdev/ltc.h>
#include <subdev/mc.h>
@@ -22,6 +23,7 @@
#include <subdev/therm.h>
#include <subdev/timer.h>
#include <subdev/volt.h>
+#include <subdev/secboot.h>
#include <engine/bsp.h>
#include <engine/ce.h>
@@ -34,9 +36,12 @@
#include <engine/mspdec.h>
#include <engine/msppp.h>
#include <engine/msvld.h>
+#include <engine/nvenc.h>
+#include <engine/nvdec.h>
#include <engine/pm.h>
#include <engine/sec.h>
#include <engine/sw.h>
+#include <engine/vic.h>
#include <engine/vp.h>
int nvkm_device_ctor(const struct nvkm_device_func *,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
index e7e581d6a8ff..ec12efb4689a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
@@ -35,6 +35,11 @@ nvkm_device_tegra_power_up(struct nvkm_device_tegra *tdev)
ret = clk_prepare_enable(tdev->clk);
if (ret)
goto err_clk;
+ if (tdev->clk_ref) {
+ ret = clk_prepare_enable(tdev->clk_ref);
+ if (ret)
+ goto err_clk_ref;
+ }
ret = clk_prepare_enable(tdev->clk_pwr);
if (ret)
goto err_clk_pwr;
@@ -57,6 +62,9 @@ nvkm_device_tegra_power_up(struct nvkm_device_tegra *tdev)
err_clamp:
clk_disable_unprepare(tdev->clk_pwr);
err_clk_pwr:
+ if (tdev->clk_ref)
+ clk_disable_unprepare(tdev->clk_ref);
+err_clk_ref:
clk_disable_unprepare(tdev->clk);
err_clk:
regulator_disable(tdev->vdd);
@@ -71,6 +79,8 @@ nvkm_device_tegra_power_down(struct nvkm_device_tegra *tdev)
udelay(10);
clk_disable_unprepare(tdev->clk_pwr);
+ if (tdev->clk_ref)
+ clk_disable_unprepare(tdev->clk_ref);
clk_disable_unprepare(tdev->clk);
udelay(10);
@@ -255,7 +265,6 @@ nvkm_device_tegra_new(const struct nvkm_device_tegra_func *func,
tdev->func = func;
tdev->pdev = pdev;
- tdev->irq = -1;
tdev->vdd = devm_regulator_get(&pdev->dev, "vdd");
if (IS_ERR(tdev->vdd)) {
@@ -275,12 +284,28 @@ nvkm_device_tegra_new(const struct nvkm_device_tegra_func *func,
goto free;
}
+ if (func->require_ref_clk)
+ tdev->clk_ref = devm_clk_get(&pdev->dev, "ref");
+ if (IS_ERR(tdev->clk_ref)) {
+ ret = PTR_ERR(tdev->clk_ref);
+ goto free;
+ }
+
tdev->clk_pwr = devm_clk_get(&pdev->dev, "pwr");
if (IS_ERR(tdev->clk_pwr)) {
ret = PTR_ERR(tdev->clk_pwr);
goto free;
}
+ /**
+ * The IOMMU bit defines the upper limit of the GPU-addressable space.
+ * This will be refined in nouveau_ttm_init but we need to do it early
+ * for instmem to behave properly
+ */
+ ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(tdev->func->iommu_bit));
+ if (ret)
+ goto free;
+
nvkm_device_tegra_probe_iommu(tdev);
ret = nvkm_device_tegra_power_up(tdev);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild
index 04f60452011e..a74c5dd27dc0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild
@@ -9,7 +9,7 @@ nvkm-y += nvkm/engine/disp/gf119.o
nvkm-y += nvkm/engine/disp/gk104.o
nvkm-y += nvkm/engine/disp/gk110.o
nvkm-y += nvkm/engine/disp/gm107.o
-nvkm-y += nvkm/engine/disp/gm204.o
+nvkm-y += nvkm/engine/disp/gm200.o
nvkm-y += nvkm/engine/disp/outp.o
nvkm-y += nvkm/engine/disp/outpdp.o
@@ -18,7 +18,7 @@ nvkm-y += nvkm/engine/disp/piornv50.o
nvkm-y += nvkm/engine/disp/sornv50.o
nvkm-y += nvkm/engine/disp/sorg94.o
nvkm-y += nvkm/engine/disp/sorgf119.o
-nvkm-y += nvkm/engine/disp/sorgm204.o
+nvkm-y += nvkm/engine/disp/sorgm200.o
nvkm-y += nvkm/engine/disp/dport.o
nvkm-y += nvkm/engine/disp/conn.o
@@ -43,7 +43,7 @@ nvkm-y += nvkm/engine/disp/rootgf119.o
nvkm-y += nvkm/engine/disp/rootgk104.o
nvkm-y += nvkm/engine/disp/rootgk110.o
nvkm-y += nvkm/engine/disp/rootgm107.o
-nvkm-y += nvkm/engine/disp/rootgm204.o
+nvkm-y += nvkm/engine/disp/rootgm200.o
nvkm-y += nvkm/engine/disp/channv50.o
nvkm-y += nvkm/engine/disp/changf119.o
@@ -68,7 +68,7 @@ nvkm-y += nvkm/engine/disp/coregf119.o
nvkm-y += nvkm/engine/disp/coregk104.o
nvkm-y += nvkm/engine/disp/coregk110.o
nvkm-y += nvkm/engine/disp/coregm107.o
-nvkm-y += nvkm/engine/disp/coregm204.o
+nvkm-y += nvkm/engine/disp/coregm200.o
nvkm-y += nvkm/engine/disp/ovlynv50.o
nvkm-y += nvkm/engine/disp/ovlyg84.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregm204.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregm200.c
index 222f4a822f4d..bb23a8658ac0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregm204.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregm200.c
@@ -27,8 +27,8 @@
#include <nvif/class.h>
const struct nv50_disp_dmac_oclass
-gm204_disp_core_oclass = {
- .base.oclass = GM204_DISP_CORE_CHANNEL_DMA,
+gm200_disp_core_oclass = {
+ .base.oclass = GM200_DISP_CORE_CHANNEL_DMA,
.base.minver = 0,
.base.maxver = 0,
.ctor = nv50_disp_core_new,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.h
index c748ca23ab70..fc84eb8b5c45 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.h
@@ -87,5 +87,5 @@ extern const struct nv50_disp_dmac_oclass gk110_disp_base_oclass;
extern const struct nv50_disp_dmac_oclass gm107_disp_core_oclass;
-extern const struct nv50_disp_dmac_oclass gm204_disp_core_oclass;
+extern const struct nv50_disp_dmac_oclass gm200_disp_core_oclass;
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm204.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm200.c
index 30f1987b5b40..67eec8620719 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm204.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm200.c
@@ -25,18 +25,18 @@
#include "rootnv50.h"
static const struct nv50_disp_func
-gm204_disp = {
+gm200_disp = {
.intr = gf119_disp_intr,
.uevent = &gf119_disp_chan_uevent,
.super = gf119_disp_intr_supervisor,
- .root = &gm204_disp_root_oclass,
+ .root = &gm200_disp_root_oclass,
.head.vblank_init = gf119_disp_vblank_init,
.head.vblank_fini = gf119_disp_vblank_fini,
.head.scanoutpos = gf119_disp_root_scanoutpos,
.outp.internal.crt = nv50_dac_output_new,
.outp.internal.tmds = nv50_sor_output_new,
.outp.internal.lvds = nv50_sor_output_new,
- .outp.internal.dp = gm204_sor_dp_new,
+ .outp.internal.dp = gm200_sor_dp_new,
.dac.nr = 3,
.dac.power = nv50_dac_power,
.dac.sense = nv50_dac_sense,
@@ -44,11 +44,11 @@ gm204_disp = {
.sor.power = nv50_sor_power,
.sor.hda_eld = gf119_hda_eld,
.sor.hdmi = gk104_hdmi_ctrl,
- .sor.magic = gm204_sor_magic,
+ .sor.magic = gm200_sor_magic,
};
int
-gm204_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp)
+gm200_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp)
{
- return gf119_disp_new_(&gm204_disp, device, index, pdisp);
+ return gf119_disp_new_(&gm200_disp, device, index, pdisp);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.h
index 2590fec67ca9..07727198d7ce 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.h
@@ -42,7 +42,7 @@ int nv50_pior_output_new(struct nvkm_disp *, int, struct dcb_output *,
u32 g94_sor_dp_lane_map(struct nvkm_device *, u8 lane);
-void gm204_sor_magic(struct nvkm_output *outp);
+void gm200_sor_magic(struct nvkm_output *outp);
#define OUTP_MSG(o,l,f,a...) do { \
struct nvkm_output *_outp = (o); \
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outpdp.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outpdp.h
index 731136d660b7..e9067ba4e179 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outpdp.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outpdp.h
@@ -63,6 +63,6 @@ int gf119_sor_dp_new(struct nvkm_disp *, int, struct dcb_output *,
struct nvkm_output **);
int gf119_sor_dp_lnk_ctl(struct nvkm_output_dp *, int, int, bool);
-int gm204_sor_dp_new(struct nvkm_disp *, int, struct dcb_output *,
+int gm200_sor_dp_new(struct nvkm_disp *, int, struct dcb_output *,
struct nvkm_output **);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgm204.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgm200.c
index 168bffe0643c..38f5ee1dfc58 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgm204.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgm200.c
@@ -27,11 +27,11 @@
#include <nvif/class.h>
static const struct nv50_disp_root_func
-gm204_disp_root = {
+gm200_disp_root = {
.init = gf119_disp_root_init,
.fini = gf119_disp_root_fini,
.dmac = {
- &gm204_disp_core_oclass,
+ &gm200_disp_core_oclass,
&gk110_disp_base_oclass,
&gk104_disp_ovly_oclass,
},
@@ -42,17 +42,17 @@ gm204_disp_root = {
};
static int
-gm204_disp_root_new(struct nvkm_disp *disp, const struct nvkm_oclass *oclass,
+gm200_disp_root_new(struct nvkm_disp *disp, const struct nvkm_oclass *oclass,
void *data, u32 size, struct nvkm_object **pobject)
{
- return nv50_disp_root_new_(&gm204_disp_root, disp, oclass,
+ return nv50_disp_root_new_(&gm200_disp_root, disp, oclass,
data, size, pobject);
}
const struct nvkm_disp_oclass
-gm204_disp_root_oclass = {
- .base.oclass = GM204_DISP,
+gm200_disp_root_oclass = {
+ .base.oclass = GM200_DISP,
.base.minver = -1,
.base.maxver = -1,
- .ctor = gm204_disp_root_new,
+ .ctor = gm200_disp_root_new,
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.h
index 5b2c903ce9ee..cb449ed8d92c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.h
@@ -39,5 +39,5 @@ extern const struct nvkm_disp_oclass gf119_disp_root_oclass;
extern const struct nvkm_disp_oclass gk104_disp_root_oclass;
extern const struct nvkm_disp_oclass gk110_disp_root_oclass;
extern const struct nvkm_disp_oclass gm107_disp_root_oclass;
-extern const struct nvkm_disp_oclass gm204_disp_root_oclass;
+extern const struct nvkm_disp_oclass gm200_disp_root_oclass;
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm204.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm200.c
index 029e5f16c2a8..2cfbef9c344f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm204.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm200.c
@@ -27,19 +27,19 @@
#include <subdev/timer.h>
static inline u32
-gm204_sor_soff(struct nvkm_output_dp *outp)
+gm200_sor_soff(struct nvkm_output_dp *outp)
{
return (ffs(outp->base.info.or) - 1) * 0x800;
}
static inline u32
-gm204_sor_loff(struct nvkm_output_dp *outp)
+gm200_sor_loff(struct nvkm_output_dp *outp)
{
- return gm204_sor_soff(outp) + !(outp->base.info.sorconf.link & 1) * 0x80;
+ return gm200_sor_soff(outp) + !(outp->base.info.sorconf.link & 1) * 0x80;
}
void
-gm204_sor_magic(struct nvkm_output *outp)
+gm200_sor_magic(struct nvkm_output *outp)
{
struct nvkm_device *device = outp->disp->engine.subdev.device;
const u32 soff = outp->or * 0x100;
@@ -51,16 +51,16 @@ gm204_sor_magic(struct nvkm_output *outp)
}
static inline u32
-gm204_sor_dp_lane_map(struct nvkm_device *device, u8 lane)
+gm200_sor_dp_lane_map(struct nvkm_device *device, u8 lane)
{
return lane * 0x08;
}
static int
-gm204_sor_dp_pattern(struct nvkm_output_dp *outp, int pattern)
+gm200_sor_dp_pattern(struct nvkm_output_dp *outp, int pattern)
{
struct nvkm_device *device = outp->base.disp->engine.subdev.device;
- const u32 soff = gm204_sor_soff(outp);
+ const u32 soff = gm200_sor_soff(outp);
const u32 data = 0x01010101 * pattern;
if (outp->base.info.sorconf.link & 1)
nvkm_mask(device, 0x61c110 + soff, 0x0f0f0f0f, data);
@@ -70,15 +70,15 @@ gm204_sor_dp_pattern(struct nvkm_output_dp *outp, int pattern)
}
static int
-gm204_sor_dp_lnk_pwr(struct nvkm_output_dp *outp, int nr)
+gm200_sor_dp_lnk_pwr(struct nvkm_output_dp *outp, int nr)
{
struct nvkm_device *device = outp->base.disp->engine.subdev.device;
- const u32 soff = gm204_sor_soff(outp);
- const u32 loff = gm204_sor_loff(outp);
+ const u32 soff = gm200_sor_soff(outp);
+ const u32 loff = gm200_sor_loff(outp);
u32 mask = 0, i;
for (i = 0; i < nr; i++)
- mask |= 1 << (gm204_sor_dp_lane_map(device, i) >> 3);
+ mask |= 1 << (gm200_sor_dp_lane_map(device, i) >> 3);
nvkm_mask(device, 0x61c130 + loff, 0x0000000f, mask);
nvkm_mask(device, 0x61c034 + soff, 0x80000000, 0x80000000);
@@ -90,13 +90,13 @@ gm204_sor_dp_lnk_pwr(struct nvkm_output_dp *outp, int nr)
}
static int
-gm204_sor_dp_drv_ctl(struct nvkm_output_dp *outp,
+gm200_sor_dp_drv_ctl(struct nvkm_output_dp *outp,
int ln, int vs, int pe, int pc)
{
struct nvkm_device *device = outp->base.disp->engine.subdev.device;
struct nvkm_bios *bios = device->bios;
- const u32 shift = gm204_sor_dp_lane_map(device, ln);
- const u32 loff = gm204_sor_loff(outp);
+ const u32 shift = gm200_sor_dp_lane_map(device, ln);
+ const u32 loff = gm200_sor_loff(outp);
u32 addr, data[4];
u8 ver, hdr, cnt, len;
struct nvbios_dpout info;
@@ -128,16 +128,16 @@ gm204_sor_dp_drv_ctl(struct nvkm_output_dp *outp,
}
static const struct nvkm_output_dp_func
-gm204_sor_dp_func = {
- .pattern = gm204_sor_dp_pattern,
- .lnk_pwr = gm204_sor_dp_lnk_pwr,
+gm200_sor_dp_func = {
+ .pattern = gm200_sor_dp_pattern,
+ .lnk_pwr = gm200_sor_dp_lnk_pwr,
.lnk_ctl = gf119_sor_dp_lnk_ctl,
- .drv_ctl = gm204_sor_dp_drv_ctl,
+ .drv_ctl = gm200_sor_dp_drv_ctl,
};
int
-gm204_sor_dp_new(struct nvkm_disp *disp, int index, struct dcb_output *dcbE,
+gm200_sor_dp_new(struct nvkm_disp *disp, int index, struct dcb_output *dcbE,
struct nvkm_output **poutp)
{
- return nvkm_output_dp_new_(&gm204_sor_dp_func, disp, index, dcbE, poutp);
+ return nvkm_output_dp_new_(&gm200_sor_dp_func, disp, index, dcbE, poutp);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild
index 74993c144a84..65e5d291ecda 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild
@@ -7,9 +7,11 @@ nvkm-y += nvkm/engine/fifo/nv50.o
nvkm-y += nvkm/engine/fifo/g84.o
nvkm-y += nvkm/engine/fifo/gf100.o
nvkm-y += nvkm/engine/fifo/gk104.o
+nvkm-y += nvkm/engine/fifo/gk110.o
nvkm-y += nvkm/engine/fifo/gk208.o
nvkm-y += nvkm/engine/fifo/gk20a.o
-nvkm-y += nvkm/engine/fifo/gm204.o
+nvkm-y += nvkm/engine/fifo/gm107.o
+nvkm-y += nvkm/engine/fifo/gm200.o
nvkm-y += nvkm/engine/fifo/gm20b.o
nvkm-y += nvkm/engine/fifo/chan.o
@@ -27,4 +29,5 @@ nvkm-y += nvkm/engine/fifo/gpfifonv50.o
nvkm-y += nvkm/engine/fifo/gpfifog84.o
nvkm-y += nvkm/engine/fifo/gpfifogf100.o
nvkm-y += nvkm/engine/fifo/gpfifogk104.o
-nvkm-y += nvkm/engine/fifo/gpfifogm204.o
+nvkm-y += nvkm/engine/fifo/gpfifogk110.o
+nvkm-y += nvkm/engine/fifo/gpfifogm200.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changk104.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changk104.h
index 97bdddb7644a..e06f4d46f802 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changk104.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changk104.h
@@ -7,7 +7,7 @@
struct gk104_fifo_chan {
struct nvkm_fifo_chan base;
struct gk104_fifo *fifo;
- int engine;
+ int runl;
struct list_head head;
bool killed;
@@ -25,5 +25,6 @@ int gk104_fifo_gpfifo_new(struct nvkm_fifo *, const struct nvkm_oclass *,
void *data, u32 size, struct nvkm_object **);
extern const struct nvkm_fifo_chan_oclass gk104_fifo_gpfifo_oclass;
-extern const struct nvkm_fifo_chan_oclass gm204_fifo_gpfifo_oclass;
+extern const struct nvkm_fifo_chan_oclass gk110_fifo_gpfifo_oclass;
+extern const struct nvkm_fifo_chan_oclass gm200_fifo_gpfifo_oclass;
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c
index 36a39c7fd8d2..352a0baec84d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c
@@ -54,6 +54,7 @@ gf100_fifo_runlist_commit(struct gf100_fifo *fifo)
struct nvkm_device *device = subdev->device;
struct nvkm_memory *cur;
int nr = 0;
+ int target;
mutex_lock(&subdev->mutex);
cur = fifo->runlist.mem[fifo->runlist.active];
@@ -67,7 +68,10 @@ gf100_fifo_runlist_commit(struct gf100_fifo *fifo)
}
nvkm_done(cur);
- nvkm_wr32(device, 0x002270, nvkm_memory_addr(cur) >> 12);
+ target = (nvkm_memory_target(cur) == NVKM_MEM_TARGET_HOST) ? 0x3 : 0x0;
+
+ nvkm_wr32(device, 0x002270, (nvkm_memory_addr(cur) >> 12) |
+ (target << 28));
nvkm_wr32(device, 0x002274, 0x01f00000 | nr);
if (wait_event_timeout(fifo->runlist.wait,
@@ -130,9 +134,9 @@ gf100_fifo_engine(struct gf100_fifo *fifo, u32 engn)
}
static void
-gf100_fifo_recover_work(struct work_struct *work)
+gf100_fifo_recover_work(struct work_struct *w)
{
- struct gf100_fifo *fifo = container_of(work, typeof(*fifo), fault);
+ struct gf100_fifo *fifo = container_of(w, typeof(*fifo), recover.work);
struct nvkm_device *device = fifo->base.engine.subdev.device;
struct nvkm_engine *engine;
unsigned long flags;
@@ -140,15 +144,15 @@ gf100_fifo_recover_work(struct work_struct *work)
u64 mask, todo;
spin_lock_irqsave(&fifo->base.lock, flags);
- mask = fifo->mask;
- fifo->mask = 0ULL;
+ mask = fifo->recover.mask;
+ fifo->recover.mask = 0ULL;
spin_unlock_irqrestore(&fifo->base.lock, flags);
- for (todo = mask; engn = __ffs64(todo), todo; todo &= ~(1 << engn))
+ for (todo = mask; engn = __ffs64(todo), todo; todo &= ~BIT_ULL(engn))
engm |= 1 << gf100_fifo_engidx(fifo, engn);
nvkm_mask(device, 0x002630, engm, engm);
- for (todo = mask; engn = __ffs64(todo), todo; todo &= ~(1 << engn)) {
+ for (todo = mask; engn = __ffs64(todo), todo; todo &= ~BIT_ULL(engn)) {
if ((engine = nvkm_device_engine(device, engn))) {
nvkm_subdev_fini(&engine->subdev, false);
WARN_ON(nvkm_subdev_init(&engine->subdev));
@@ -176,8 +180,8 @@ gf100_fifo_recover(struct gf100_fifo *fifo, struct nvkm_engine *engine,
list_del_init(&chan->head);
chan->killed = true;
- fifo->mask |= 1ULL << engine->subdev.index;
- schedule_work(&fifo->fault);
+ fifo->recover.mask |= 1ULL << engine->subdev.index;
+ schedule_work(&fifo->recover.work);
}
static const struct nvkm_enum
@@ -330,7 +334,7 @@ gf100_fifo_intr_fault(struct gf100_fifo *fifo, int unit)
snprintf(gpcid, sizeof(gpcid), "GPC%d/", gpc);
}
- if (eu) {
+ if (eu && eu->data2) {
switch (eu->data2) {
case NVKM_SUBDEV_BAR:
nvkm_mask(device, 0x001704, 0x00000000, 0x00000000);
@@ -544,9 +548,16 @@ static int
gf100_fifo_oneinit(struct nvkm_fifo *base)
{
struct gf100_fifo *fifo = gf100_fifo(base);
- struct nvkm_device *device = fifo->base.engine.subdev.device;
+ struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
+ struct nvkm_device *device = subdev->device;
int ret;
+ /* Determine number of PBDMAs by checking valid enable bits. */
+ nvkm_wr32(device, 0x002204, 0xffffffff);
+ fifo->pbdma_nr = hweight32(nvkm_rd32(device, 0x002204));
+ nvkm_debug(subdev, "%d PBDMA(s)\n", fifo->pbdma_nr);
+
+
ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x1000, 0x1000,
false, &fifo->runlist.mem[0]);
if (ret)
@@ -576,25 +587,22 @@ static void
gf100_fifo_fini(struct nvkm_fifo *base)
{
struct gf100_fifo *fifo = gf100_fifo(base);
- flush_work(&fifo->fault);
+ flush_work(&fifo->recover.work);
}
static void
gf100_fifo_init(struct nvkm_fifo *base)
{
struct gf100_fifo *fifo = gf100_fifo(base);
- struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
- struct nvkm_device *device = subdev->device;
+ struct nvkm_device *device = fifo->base.engine.subdev.device;
int i;
- nvkm_wr32(device, 0x000204, 0xffffffff);
- nvkm_wr32(device, 0x002204, 0xffffffff);
-
- fifo->spoon_nr = hweight32(nvkm_rd32(device, 0x002204));
- nvkm_debug(subdev, "%d PBDMA unit(s)\n", fifo->spoon_nr);
+ /* Enable PBDMAs. */
+ nvkm_wr32(device, 0x000204, (1 << fifo->pbdma_nr) - 1);
+ nvkm_wr32(device, 0x002204, (1 << fifo->pbdma_nr) - 1);
- /* assign engines to PBDMAs */
- if (fifo->spoon_nr >= 3) {
+ /* Assign engines to PBDMAs. */
+ if (fifo->pbdma_nr >= 3) {
nvkm_wr32(device, 0x002208, ~(1 << 0)); /* PGRAPH */
nvkm_wr32(device, 0x00220c, ~(1 << 1)); /* PVP */
nvkm_wr32(device, 0x002210, ~(1 << 1)); /* PMSPP */
@@ -604,7 +612,7 @@ gf100_fifo_init(struct nvkm_fifo *base)
}
/* PBDMA[n] */
- for (i = 0; i < fifo->spoon_nr; i++) {
+ for (i = 0; i < fifo->pbdma_nr; i++) {
nvkm_mask(device, 0x04013c + (i * 0x2000), 0x10000100, 0x00000000);
nvkm_wr32(device, 0x040108 + (i * 0x2000), 0xffffffff); /* INTR */
nvkm_wr32(device, 0x04010c + (i * 0x2000), 0xfffffeff); /* INTREN */
@@ -652,7 +660,7 @@ gf100_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo)
if (!(fifo = kzalloc(sizeof(*fifo), GFP_KERNEL)))
return -ENOMEM;
INIT_LIST_HEAD(&fifo->chan);
- INIT_WORK(&fifo->fault, gf100_fifo_recover_work);
+ INIT_WORK(&fifo->recover.work, gf100_fifo_recover_work);
*pfifo = &fifo->base;
return nvkm_fifo_ctor(&gf100_fifo, device, index, 128, &fifo->base);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.h
index 08c33c3ceaf7..70db58eab9c3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.h
@@ -11,8 +11,12 @@ struct gf100_fifo {
struct list_head chan;
- struct work_struct fault;
- u64 mask;
+ struct {
+ struct work_struct work;
+ u64 mask;
+ } recover;
+
+ int pbdma_nr;
struct {
struct nvkm_memory *mem[2];
@@ -24,7 +28,6 @@ struct gf100_fifo {
struct nvkm_memory *mem;
struct nvkm_vma bar;
} user;
- int spoon_nr;
};
void gf100_fifo_intr_engine(struct gf100_fifo *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
index 4fcd147d43c8..68acb36b3e6d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
@@ -47,34 +47,41 @@ gk104_fifo_uevent_init(struct nvkm_fifo *fifo)
}
void
-gk104_fifo_runlist_commit(struct gk104_fifo *fifo, u32 engine)
+gk104_fifo_runlist_commit(struct gk104_fifo *fifo, int runl)
{
- struct gk104_fifo_engn *engn = &fifo->engine[engine];
struct gk104_fifo_chan *chan;
struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
struct nvkm_device *device = subdev->device;
- struct nvkm_memory *cur;
+ struct nvkm_memory *mem;
int nr = 0;
+ int target;
mutex_lock(&subdev->mutex);
- cur = engn->runlist[engn->cur_runlist];
- engn->cur_runlist = !engn->cur_runlist;
+ mem = fifo->runlist[runl].mem[fifo->runlist[runl].next];
+ fifo->runlist[runl].next = !fifo->runlist[runl].next;
- nvkm_kmap(cur);
- list_for_each_entry(chan, &engn->chan, head) {
- nvkm_wo32(cur, (nr * 8) + 0, chan->base.chid);
- nvkm_wo32(cur, (nr * 8) + 4, 0x00000000);
+ nvkm_kmap(mem);
+ list_for_each_entry(chan, &fifo->runlist[runl].chan, head) {
+ nvkm_wo32(mem, (nr * 8) + 0, chan->base.chid);
+ nvkm_wo32(mem, (nr * 8) + 4, 0x00000000);
nr++;
}
- nvkm_done(cur);
-
- nvkm_wr32(device, 0x002270, nvkm_memory_addr(cur) >> 12);
- nvkm_wr32(device, 0x002274, (engine << 20) | nr);
-
- if (wait_event_timeout(engn->wait, !(nvkm_rd32(device, 0x002284 +
- (engine * 0x08)) & 0x00100000),
- msecs_to_jiffies(2000)) == 0)
- nvkm_error(subdev, "runlist %d update timeout\n", engine);
+ nvkm_done(mem);
+
+ if (nvkm_memory_target(mem) == NVKM_MEM_TARGET_VRAM)
+ target = 0;
+ else
+ target = 3;
+
+ nvkm_wr32(device, 0x002270, (nvkm_memory_addr(mem) >> 12) |
+ (target << 28));
+ nvkm_wr32(device, 0x002274, (runl << 20) | nr);
+
+ if (wait_event_timeout(fifo->runlist[runl].wait,
+ !(nvkm_rd32(device, 0x002284 + (runl * 0x08))
+ & 0x00100000),
+ msecs_to_jiffies(2000)) == 0)
+ nvkm_error(subdev, "runlist %d update timeout\n", runl);
mutex_unlock(&subdev->mutex);
}
@@ -90,58 +97,51 @@ void
gk104_fifo_runlist_insert(struct gk104_fifo *fifo, struct gk104_fifo_chan *chan)
{
mutex_lock(&fifo->base.engine.subdev.mutex);
- list_add_tail(&chan->head, &fifo->engine[chan->engine].chan);
+ list_add_tail(&chan->head, &fifo->runlist[chan->runl].chan);
mutex_unlock(&fifo->base.engine.subdev.mutex);
}
-static inline struct nvkm_engine *
-gk104_fifo_engine(struct gk104_fifo *fifo, u32 engn)
-{
- struct nvkm_device *device = fifo->base.engine.subdev.device;
- u64 subdevs = gk104_fifo_engine_subdev(engn);
- if (subdevs)
- return nvkm_device_engine(device, __ffs(subdevs));
- return NULL;
-}
-
static void
-gk104_fifo_recover_work(struct work_struct *work)
+gk104_fifo_recover_work(struct work_struct *w)
{
- struct gk104_fifo *fifo = container_of(work, typeof(*fifo), fault);
+ struct gk104_fifo *fifo = container_of(w, typeof(*fifo), recover.work);
struct nvkm_device *device = fifo->base.engine.subdev.device;
struct nvkm_engine *engine;
unsigned long flags;
- u32 engn, engm = 0;
- u64 mask, todo;
+ u32 engm, runm, todo;
+ int engn, runl;
spin_lock_irqsave(&fifo->base.lock, flags);
- mask = fifo->mask;
- fifo->mask = 0ULL;
+ runm = fifo->recover.runm;
+ engm = fifo->recover.engm;
+ fifo->recover.engm = 0;
+ fifo->recover.runm = 0;
spin_unlock_irqrestore(&fifo->base.lock, flags);
- for (todo = mask; engn = __ffs64(todo), todo; todo &= ~(1 << engn))
- engm |= 1 << gk104_fifo_subdev_engine(engn);
- nvkm_mask(device, 0x002630, engm, engm);
+ nvkm_mask(device, 0x002630, runm, runm);
- for (todo = mask; engn = __ffs64(todo), todo; todo &= ~(1 << engn)) {
- if ((engine = nvkm_device_engine(device, engn))) {
+ for (todo = engm; engn = __ffs(todo), todo; todo &= ~BIT(engn)) {
+ if ((engine = fifo->engine[engn].engine)) {
nvkm_subdev_fini(&engine->subdev, false);
WARN_ON(nvkm_subdev_init(&engine->subdev));
}
- gk104_fifo_runlist_commit(fifo, gk104_fifo_subdev_engine(engn));
}
- nvkm_wr32(device, 0x00262c, engm);
- nvkm_mask(device, 0x002630, engm, 0x00000000);
+ for (todo = runm; runl = __ffs(todo), todo; todo &= ~BIT(runl))
+ gk104_fifo_runlist_commit(fifo, runl);
+
+ nvkm_wr32(device, 0x00262c, runm);
+ nvkm_mask(device, 0x002630, runm, 0x00000000);
}
static void
gk104_fifo_recover(struct gk104_fifo *fifo, struct nvkm_engine *engine,
- struct gk104_fifo_chan *chan)
+ struct gk104_fifo_chan *chan)
{
struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
struct nvkm_device *device = subdev->device;
u32 chid = chan->base.chid;
+ int engn;
nvkm_error(subdev, "%s engine fault on channel %d, recovering...\n",
nvkm_subdev_name[engine->subdev.index], chid);
@@ -151,8 +151,15 @@ gk104_fifo_recover(struct gk104_fifo *fifo, struct nvkm_engine *engine,
list_del_init(&chan->head);
chan->killed = true;
- fifo->mask |= 1ULL << engine->subdev.index;
- schedule_work(&fifo->fault);
+ for (engn = 0; engn < fifo->engine_nr; engn++) {
+ if (fifo->engine[engn].engine == engine) {
+ fifo->recover.engm |= BIT(engn);
+ break;
+ }
+ }
+
+ fifo->recover.runm |= BIT(chan->runl);
+ schedule_work(&fifo->recover.work);
}
static const struct nvkm_enum
@@ -189,32 +196,31 @@ static void
gk104_fifo_intr_sched_ctxsw(struct gk104_fifo *fifo)
{
struct nvkm_device *device = fifo->base.engine.subdev.device;
- struct nvkm_engine *engine;
struct gk104_fifo_chan *chan;
unsigned long flags;
u32 engn;
spin_lock_irqsave(&fifo->base.lock, flags);
- for (engn = 0; engn < ARRAY_SIZE(fifo->engine); engn++) {
+ for (engn = 0; engn < fifo->engine_nr; engn++) {
+ struct nvkm_engine *engine = fifo->engine[engn].engine;
+ int runl = fifo->engine[engn].runl;
u32 stat = nvkm_rd32(device, 0x002640 + (engn * 0x08));
u32 busy = (stat & 0x80000000);
- u32 next = (stat & 0x07ff0000) >> 16;
+ u32 next = (stat & 0x0fff0000) >> 16;
u32 chsw = (stat & 0x00008000);
u32 save = (stat & 0x00004000);
u32 load = (stat & 0x00002000);
- u32 prev = (stat & 0x000007ff);
+ u32 prev = (stat & 0x00000fff);
u32 chid = load ? next : prev;
(void)save;
- if (busy && chsw) {
- list_for_each_entry(chan, &fifo->engine[engn].chan, head) {
- if (chan->base.chid == chid) {
- engine = gk104_fifo_engine(fifo, engn);
- if (!engine)
- break;
- gk104_fifo_recover(fifo, engine, chan);
- break;
- }
+ if (!busy || !chsw)
+ continue;
+
+ list_for_each_entry(chan, &fifo->runlist[runl].chan, head) {
+ if (chan->base.chid == chid && engine) {
+ gk104_fifo_recover(fifo, engine, chan);
+ break;
}
}
}
@@ -395,7 +401,7 @@ gk104_fifo_intr_fault(struct gk104_fifo *fifo, int unit)
snprintf(gpcid, sizeof(gpcid), "GPC%d/", gpc);
}
- if (eu) {
+ if (eu && eu->data2) {
switch (eu->data2) {
case NVKM_SUBDEV_BAR:
nvkm_mask(device, 0x001704, 0x00000000, 0x00000000);
@@ -484,9 +490,10 @@ gk104_fifo_intr_pbdma_0(struct gk104_fifo *fifo, int unit)
if (nvkm_sw_mthd(device->sw, chid, subc, mthd, data))
show &= ~0x00800000;
}
- nvkm_wr32(device, 0x0400c0 + (unit * 0x2000), 0x80600008);
}
+ nvkm_wr32(device, 0x0400c0 + (unit * 0x2000), 0x80600008);
+
if (show) {
nvkm_snprintbf(msg, sizeof(msg), gk104_fifo_pbdma_intr_0, show);
chan = nvkm_fifo_chan_chid(&fifo->base, chid, &flags);
@@ -537,10 +544,10 @@ gk104_fifo_intr_runlist(struct gk104_fifo *fifo)
struct nvkm_device *device = fifo->base.engine.subdev.device;
u32 mask = nvkm_rd32(device, 0x002a00);
while (mask) {
- u32 engn = __ffs(mask);
- wake_up(&fifo->engine[engn].wait);
- nvkm_wr32(device, 0x002a00, 1 << engn);
- mask &= ~(1 << engn);
+ int runl = __ffs(mask);
+ wake_up(&fifo->runlist[runl].wait);
+ nvkm_wr32(device, 0x002a00, 1 << runl);
+ mask &= ~(1 << runl);
}
}
@@ -647,7 +654,7 @@ gk104_fifo_fini(struct nvkm_fifo *base)
{
struct gk104_fifo *fifo = gk104_fifo(base);
struct nvkm_device *device = fifo->base.engine.subdev.device;
- flush_work(&fifo->fault);
+ flush_work(&fifo->recover.work);
/* allow mmu fault interrupts, even when we're not using fifo */
nvkm_mask(device, 0x002140, 0x10000000, 0x10000000);
}
@@ -656,24 +663,122 @@ int
gk104_fifo_oneinit(struct nvkm_fifo *base)
{
struct gk104_fifo *fifo = gk104_fifo(base);
- struct nvkm_device *device = fifo->base.engine.subdev.device;
+ struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
+ struct nvkm_device *device = subdev->device;
int ret, i;
+ u32 *map;
+
+ /* Determine number of PBDMAs by checking valid enable bits. */
+ nvkm_wr32(device, 0x000204, 0xffffffff);
+ fifo->pbdma_nr = hweight32(nvkm_rd32(device, 0x000204));
+ nvkm_debug(subdev, "%d PBDMA(s)\n", fifo->pbdma_nr);
+
+ /* Read PBDMA->runlist(s) mapping from HW. */
+ if (!(map = kzalloc(sizeof(*map) * fifo->pbdma_nr, GFP_KERNEL)))
+ return -ENOMEM;
+
+ for (i = 0; i < fifo->pbdma_nr; i++)
+ map[i] = nvkm_rd32(device, 0x002390 + (i * 0x04));
+
+ /* Read device topology from HW. */
+ for (i = 0; i < 64; i++) {
+ int type = -1, pbid = -1, engidx = -1;
+ int engn = -1, runl = -1, intr = -1, mcen = -1;
+ int fault = -1, j;
+ u32 data, addr = 0;
+
+ do {
+ data = nvkm_rd32(device, 0x022700 + (i * 0x04));
+ nvkm_trace(subdev, "%02x: %08x\n", i, data);
+ switch (data & 0x00000003) {
+ case 0x00000000: /* NOT_VALID */
+ continue;
+ case 0x00000001: /* DATA */
+ addr = (data & 0x00fff000);
+ fault = (data & 0x000000f8) >> 3;
+ break;
+ case 0x00000002: /* ENUM */
+ if (data & 0x00000020)
+ engn = (data & 0x3c000000) >> 26;
+ if (data & 0x00000010)
+ runl = (data & 0x01e00000) >> 21;
+ if (data & 0x00000008)
+ intr = (data & 0x000f8000) >> 15;
+ if (data & 0x00000004)
+ mcen = (data & 0x00003e00) >> 9;
+ break;
+ case 0x00000003: /* ENGINE_TYPE */
+ type = (data & 0x7ffffffc) >> 2;
+ break;
+ }
+ } while ((data & 0x80000000) && ++i < 64);
+
+ if (!data)
+ continue;
+
+ /* Determine which PBDMA handles requests for this engine. */
+ for (j = 0; runl >= 0 && j < fifo->pbdma_nr; j++) {
+ if (map[j] & (1 << runl)) {
+ pbid = j;
+ break;
+ }
+ }
+
+ /* Translate engine type to NVKM engine identifier. */
+ switch (type) {
+ case 0x00000000: engidx = NVKM_ENGINE_GR; break;
+ case 0x00000001: engidx = NVKM_ENGINE_CE0; break;
+ case 0x00000002: engidx = NVKM_ENGINE_CE1; break;
+ case 0x00000003: engidx = NVKM_ENGINE_CE2; break;
+ case 0x00000008: engidx = NVKM_ENGINE_MSPDEC; break;
+ case 0x00000009: engidx = NVKM_ENGINE_MSPPP; break;
+ case 0x0000000a: engidx = NVKM_ENGINE_MSVLD; break;
+ case 0x0000000b: engidx = NVKM_ENGINE_MSENC; break;
+ case 0x0000000c: engidx = NVKM_ENGINE_VIC; break;
+ case 0x0000000d: engidx = NVKM_ENGINE_SEC; break;
+ case 0x0000000e: engidx = NVKM_ENGINE_NVENC0; break;
+ case 0x0000000f: engidx = NVKM_ENGINE_NVENC1; break;
+ case 0x00000010: engidx = NVKM_ENGINE_NVDEC; break;
+ break;
+ default:
+ break;
+ }
+
+ nvkm_debug(subdev, "%02x (%8s): engine %2d runlist %2d "
+ "pbdma %2d intr %2d reset %2d "
+ "fault %2d addr %06x\n", type,
+ engidx < 0 ? NULL : nvkm_subdev_name[engidx],
+ engn, runl, pbid, intr, mcen, fault, addr);
+
+ /* Mark the engine as supported if everything checks out. */
+ if (engn >= 0 && runl >= 0) {
+ fifo->engine[engn].engine = engidx < 0 ? NULL :
+ nvkm_device_engine(device, engidx);
+ fifo->engine[engn].runl = runl;
+ fifo->engine[engn].pbid = pbid;
+ fifo->engine_nr = max(fifo->engine_nr, engn + 1);
+ fifo->runlist[runl].engm |= 1 << engn;
+ fifo->runlist_nr = max(fifo->runlist_nr, runl + 1);
+ }
+ }
- for (i = 0; i < ARRAY_SIZE(fifo->engine); i++) {
+ kfree(map);
+
+ for (i = 0; i < fifo->runlist_nr; i++) {
ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST,
0x8000, 0x1000, false,
- &fifo->engine[i].runlist[0]);
+ &fifo->runlist[i].mem[0]);
if (ret)
return ret;
ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST,
0x8000, 0x1000, false,
- &fifo->engine[i].runlist[1]);
+ &fifo->runlist[i].mem[1]);
if (ret)
return ret;
- init_waitqueue_head(&fifo->engine[i].wait);
- INIT_LIST_HEAD(&fifo->engine[i].chan);
+ init_waitqueue_head(&fifo->runlist[i].wait);
+ INIT_LIST_HEAD(&fifo->runlist[i].chan);
}
ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST,
@@ -695,24 +800,21 @@ void
gk104_fifo_init(struct nvkm_fifo *base)
{
struct gk104_fifo *fifo = gk104_fifo(base);
- struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
- struct nvkm_device *device = subdev->device;
+ struct nvkm_device *device = fifo->base.engine.subdev.device;
int i;
- /* enable all available PBDMA units */
- nvkm_wr32(device, 0x000204, 0xffffffff);
- fifo->spoon_nr = hweight32(nvkm_rd32(device, 0x000204));
- nvkm_debug(subdev, "%d PBDMA unit(s)\n", fifo->spoon_nr);
+ /* Enable PBDMAs. */
+ nvkm_wr32(device, 0x000204, (1 << fifo->pbdma_nr) - 1);
/* PBDMA[n] */
- for (i = 0; i < fifo->spoon_nr; i++) {
+ for (i = 0; i < fifo->pbdma_nr; i++) {
nvkm_mask(device, 0x04013c + (i * 0x2000), 0x10000100, 0x00000000);
nvkm_wr32(device, 0x040108 + (i * 0x2000), 0xffffffff); /* INTR */
nvkm_wr32(device, 0x04010c + (i * 0x2000), 0xfffffeff); /* INTREN */
}
/* PBDMA[n].HCE */
- for (i = 0; i < fifo->spoon_nr; i++) {
+ for (i = 0; i < fifo->pbdma_nr; i++) {
nvkm_wr32(device, 0x040148 + (i * 0x2000), 0xffffffff); /* INTR */
nvkm_wr32(device, 0x04014c + (i * 0x2000), 0xffffffff); /* INTREN */
}
@@ -732,9 +834,9 @@ gk104_fifo_dtor(struct nvkm_fifo *base)
nvkm_vm_put(&fifo->user.bar);
nvkm_memory_del(&fifo->user.mem);
- for (i = 0; i < ARRAY_SIZE(fifo->engine); i++) {
- nvkm_memory_del(&fifo->engine[i].runlist[1]);
- nvkm_memory_del(&fifo->engine[i].runlist[0]);
+ for (i = 0; i < fifo->runlist_nr; i++) {
+ nvkm_memory_del(&fifo->runlist[i].mem[1]);
+ nvkm_memory_del(&fifo->runlist[i].mem[0]);
}
return fifo;
@@ -748,7 +850,7 @@ gk104_fifo_new_(const struct nvkm_fifo_func *func, struct nvkm_device *device,
if (!(fifo = kzalloc(sizeof(*fifo), GFP_KERNEL)))
return -ENOMEM;
- INIT_WORK(&fifo->fault, gk104_fifo_recover_work);
+ INIT_WORK(&fifo->recover.work, gk104_fifo_recover_work);
*pfifo = &fifo->base;
return nvkm_fifo_ctor(func, device, index, nr, &fifo->base);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h
index bec519d8f91e..9e5d00ba34a2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h
@@ -6,25 +6,37 @@
#include <subdev/mmu.h>
struct gk104_fifo_chan;
-struct gk104_fifo_engn {
- struct nvkm_memory *runlist[2];
- int cur_runlist;
- wait_queue_head_t wait;
- struct list_head chan;
-};
-
struct gk104_fifo {
struct nvkm_fifo base;
- struct work_struct fault;
- u64 mask;
+ struct {
+ struct work_struct work;
+ u32 engm;
+ u32 runm;
+ } recover;
+
+ int pbdma_nr;
+
+ struct {
+ struct nvkm_engine *engine;
+ int runl;
+ int pbid;
+ } engine[16];
+ int engine_nr;
+
+ struct {
+ struct nvkm_memory *mem[2];
+ int next;
+ wait_queue_head_t wait;
+ struct list_head chan;
+ u32 engm;
+ } runlist[16];
+ int runlist_nr;
- struct gk104_fifo_engn engine[7];
struct {
struct nvkm_memory *mem;
struct nvkm_vma bar;
} user;
- int spoon_nr;
};
int gk104_fifo_new_(const struct nvkm_fifo_func *, struct nvkm_device *,
@@ -38,7 +50,7 @@ void gk104_fifo_uevent_init(struct nvkm_fifo *);
void gk104_fifo_uevent_fini(struct nvkm_fifo *);
void gk104_fifo_runlist_insert(struct gk104_fifo *, struct gk104_fifo_chan *);
void gk104_fifo_runlist_remove(struct gk104_fifo *, struct gk104_fifo_chan *);
-void gk104_fifo_runlist_commit(struct gk104_fifo *, u32 engine);
+void gk104_fifo_runlist_commit(struct gk104_fifo *, int runl);
static inline u64
gk104_fifo_engine_subdev(int engine)
@@ -58,23 +70,4 @@ gk104_fifo_engine_subdev(int engine)
return 0;
}
}
-
-static inline int
-gk104_fifo_subdev_engine(int subdev)
-{
- switch (subdev) {
- case NVKM_ENGINE_GR:
- case NVKM_ENGINE_SW:
- case NVKM_ENGINE_CE2 : return 0;
- case NVKM_ENGINE_MSPDEC: return 1;
- case NVKM_ENGINE_MSPPP : return 2;
- case NVKM_ENGINE_MSVLD : return 3;
- case NVKM_ENGINE_CE0 : return 4;
- case NVKM_ENGINE_CE1 : return 5;
- case NVKM_ENGINE_MSENC : return 6;
- default:
- WARN_ON(1);
- return 0;
- }
-}
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk110.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk110.c
new file mode 100644
index 000000000000..41307fcd4bb3
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk110.c
@@ -0,0 +1,46 @@
+/*
+ * Copyright 2016 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "gk104.h"
+#include "changk104.h"
+
+static const struct nvkm_fifo_func
+gk110_fifo = {
+ .dtor = gk104_fifo_dtor,
+ .oneinit = gk104_fifo_oneinit,
+ .init = gk104_fifo_init,
+ .fini = gk104_fifo_fini,
+ .intr = gk104_fifo_intr,
+ .uevent_init = gk104_fifo_uevent_init,
+ .uevent_fini = gk104_fifo_uevent_fini,
+ .chan = {
+ &gk110_fifo_gpfifo_oclass,
+ NULL
+ },
+};
+
+int
+gk110_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo)
+{
+ return gk104_fifo_new_(&gk110_fifo, device, index, 4096, pfifo);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm107.c
new file mode 100644
index 000000000000..6d59d65794a1
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm107.c
@@ -0,0 +1,46 @@
+/*
+ * Copyright 2016 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "gk104.h"
+#include "changk104.h"
+
+static const struct nvkm_fifo_func
+gm107_fifo = {
+ .dtor = gk104_fifo_dtor,
+ .oneinit = gk104_fifo_oneinit,
+ .init = gk104_fifo_init,
+ .fini = gk104_fifo_fini,
+ .intr = gk104_fifo_intr,
+ .uevent_init = gk104_fifo_uevent_init,
+ .uevent_fini = gk104_fifo_uevent_fini,
+ .chan = {
+ &gk110_fifo_gpfifo_oclass,
+ NULL
+ },
+};
+
+int
+gm107_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo)
+{
+ return gk104_fifo_new_(&gm107_fifo, device, index, 2048, pfifo);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm204.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm200.c
index 2db629f1bf7e..4bdd43078df9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm204.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm200.c
@@ -25,7 +25,7 @@
#include "changk104.h"
static const struct nvkm_fifo_func
-gm204_fifo = {
+gm200_fifo = {
.dtor = gk104_fifo_dtor,
.oneinit = gk104_fifo_oneinit,
.init = gk104_fifo_init,
@@ -34,13 +34,13 @@ gm204_fifo = {
.uevent_init = gk104_fifo_uevent_init,
.uevent_fini = gk104_fifo_uevent_fini,
.chan = {
- &gm204_fifo_gpfifo_oclass,
+ &gm200_fifo_gpfifo_oclass,
NULL
},
};
int
-gm204_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo)
+gm200_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo)
{
- return gk104_fifo_new_(&gm204_fifo, device, index, 4096, pfifo);
+ return gk104_fifo_new_(&gm200_fifo, device, index, 4096, pfifo);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm20b.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm20b.c
index ae6375d9760f..4c91d4aa1e9e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm20b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm20b.c
@@ -32,7 +32,7 @@ gm20b_fifo = {
.uevent_init = gk104_fifo_uevent_init,
.uevent_fini = gk104_fifo_uevent_fini,
.chan = {
- &gm204_fifo_gpfifo_oclass,
+ &gm200_fifo_gpfifo_oclass,
NULL
},
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c
index 2e1df01bd928..ed4351032ed6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c
@@ -63,9 +63,15 @@ gk104_fifo_gpfifo_engine_addr(struct nvkm_engine *engine)
case NVKM_ENGINE_CE1 :
case NVKM_ENGINE_CE2 : return 0x0000;
case NVKM_ENGINE_GR : return 0x0210;
+ case NVKM_ENGINE_SEC : return 0x0220;
case NVKM_ENGINE_MSPDEC: return 0x0250;
case NVKM_ENGINE_MSPPP : return 0x0260;
case NVKM_ENGINE_MSVLD : return 0x0270;
+ case NVKM_ENGINE_VIC : return 0x0280;
+ case NVKM_ENGINE_MSENC : return 0x0290;
+ case NVKM_ENGINE_NVDEC : return 0x02100270;
+ case NVKM_ENGINE_NVENC0: return 0x02100290;
+ case NVKM_ENGINE_NVENC1: return 0x0210;
default:
WARN_ON(1);
return 0;
@@ -76,9 +82,9 @@ static int
gk104_fifo_gpfifo_engine_fini(struct nvkm_fifo_chan *base,
struct nvkm_engine *engine, bool suspend)
{
- const u32 offset = gk104_fifo_gpfifo_engine_addr(engine);
struct gk104_fifo_chan *chan = gk104_fifo_chan(base);
struct nvkm_gpuobj *inst = chan->base.inst;
+ u32 offset = gk104_fifo_gpfifo_engine_addr(engine);
int ret;
ret = gk104_fifo_gpfifo_kick(chan);
@@ -87,8 +93,12 @@ gk104_fifo_gpfifo_engine_fini(struct nvkm_fifo_chan *base,
if (offset) {
nvkm_kmap(inst);
- nvkm_wo32(inst, offset + 0x00, 0x00000000);
- nvkm_wo32(inst, offset + 0x04, 0x00000000);
+ nvkm_wo32(inst, (offset & 0xffff) + 0x00, 0x00000000);
+ nvkm_wo32(inst, (offset & 0xffff) + 0x04, 0x00000000);
+ if ((offset >>= 16)) {
+ nvkm_wo32(inst, offset + 0x00, 0x00000000);
+ nvkm_wo32(inst, offset + 0x04, 0x00000000);
+ }
nvkm_done(inst);
}
@@ -99,15 +109,21 @@ static int
gk104_fifo_gpfifo_engine_init(struct nvkm_fifo_chan *base,
struct nvkm_engine *engine)
{
- const u32 offset = gk104_fifo_gpfifo_engine_addr(engine);
struct gk104_fifo_chan *chan = gk104_fifo_chan(base);
struct nvkm_gpuobj *inst = chan->base.inst;
+ u32 offset = gk104_fifo_gpfifo_engine_addr(engine);
if (offset) {
- u64 addr = chan->engn[engine->subdev.index].vma.offset;
+ u64 addr = chan->engn[engine->subdev.index].vma.offset;
+ u32 datalo = lower_32_bits(addr) | 0x00000004;
+ u32 datahi = upper_32_bits(addr);
nvkm_kmap(inst);
- nvkm_wo32(inst, offset + 0x00, lower_32_bits(addr) | 4);
- nvkm_wo32(inst, offset + 0x04, upper_32_bits(addr));
+ nvkm_wo32(inst, (offset & 0xffff) + 0x00, datalo);
+ nvkm_wo32(inst, (offset & 0xffff) + 0x04, datahi);
+ if ((offset >>= 16)) {
+ nvkm_wo32(inst, offset + 0x00, datalo);
+ nvkm_wo32(inst, offset + 0x04, datahi);
+ }
nvkm_done(inst);
}
@@ -154,7 +170,8 @@ gk104_fifo_gpfifo_fini(struct nvkm_fifo_chan *base)
if (!list_empty(&chan->head)) {
gk104_fifo_runlist_remove(fifo, chan);
nvkm_mask(device, 0x800004 + coff, 0x00000800, 0x00000800);
- gk104_fifo_runlist_commit(fifo, chan->engine);
+ gk104_fifo_gpfifo_kick(chan);
+ gk104_fifo_runlist_commit(fifo, chan->runl);
}
nvkm_wr32(device, 0x800000 + coff, 0x00000000);
@@ -169,13 +186,13 @@ gk104_fifo_gpfifo_init(struct nvkm_fifo_chan *base)
u32 addr = chan->base.inst->addr >> 12;
u32 coff = chan->base.chid * 8;
- nvkm_mask(device, 0x800004 + coff, 0x000f0000, chan->engine << 16);
+ nvkm_mask(device, 0x800004 + coff, 0x000f0000, chan->runl << 16);
nvkm_wr32(device, 0x800000 + coff, 0x80000000 | addr);
if (list_empty(&chan->head) && !chan->killed) {
gk104_fifo_runlist_insert(fifo, chan);
nvkm_mask(device, 0x800004 + coff, 0x00000400, 0x00000400);
- gk104_fifo_runlist_commit(fifo, chan->engine);
+ gk104_fifo_runlist_commit(fifo, chan->runl);
nvkm_mask(device, 0x800004 + coff, 0x00000400, 0x00000400);
}
}
@@ -201,73 +218,79 @@ gk104_fifo_gpfifo_func = {
.engine_fini = gk104_fifo_gpfifo_engine_fini,
};
-int
-gk104_fifo_gpfifo_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
- void *data, u32 size, struct nvkm_object **pobject)
+struct gk104_fifo_chan_func {
+ u32 engine;
+ u64 subdev;
+};
+
+static int
+gk104_fifo_gpfifo_new_(const struct gk104_fifo_chan_func *func,
+ struct gk104_fifo *fifo, u32 *engmask, u16 *chid,
+ u64 vm, u64 ioffset, u64 ilength,
+ const struct nvkm_oclass *oclass,
+ struct nvkm_object **pobject)
{
- union {
- struct kepler_channel_gpfifo_a_v0 v0;
- } *args = data;
- struct gk104_fifo *fifo = gk104_fifo(base);
struct nvkm_device *device = fifo->base.engine.subdev.device;
- struct nvkm_object *parent = oclass->parent;
struct gk104_fifo_chan *chan;
- u64 usermem, ioffset, ilength;
- u32 engines;
- int ret = -ENOSYS, i;
-
- nvif_ioctl(parent, "create channel gpfifo size %d\n", size);
- if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
- nvif_ioctl(parent, "create channel gpfifo vers %d vm %llx "
- "ioffset %016llx ilength %08x engine %08x\n",
- args->v0.version, args->v0.vm, args->v0.ioffset,
- args->v0.ilength, args->v0.engine);
- } else
- return ret;
-
- /* determine which downstream engines are present */
- for (i = 0, engines = 0; i < ARRAY_SIZE(fifo->engine); i++) {
- u64 subdevs = gk104_fifo_engine_subdev(i);
- if (!nvkm_device_engine(device, __ffs64(subdevs)))
- continue;
- engines |= (1 << i);
+ int runlist = -1, ret = -ENOSYS, i, j;
+ u32 engines = 0, present = 0;
+ u64 subdevs = 0;
+ u64 usermem;
+
+ /* Determine which downstream engines are present */
+ for (i = 0; i < fifo->engine_nr; i++) {
+ struct nvkm_engine *engine = fifo->engine[i].engine;
+ if (engine) {
+ u64 submask = BIT_ULL(engine->subdev.index);
+ for (j = 0; func[j].subdev; j++) {
+ if (func[j].subdev & submask) {
+ present |= func[j].engine;
+ break;
+ }
+ }
+
+ if (!func[j].subdev)
+ continue;
+
+ if (runlist < 0 && (*engmask & present))
+ runlist = fifo->engine[i].runl;
+ if (runlist == fifo->engine[i].runl) {
+ engines |= func[j].engine;
+ subdevs |= func[j].subdev;
+ }
+ }
}
- /* if this is an engine mask query, we're done */
- if (!args->v0.engine) {
- args->v0.engine = engines;
+ /* Just an engine mask query? All done here! */
+ if (!*engmask) {
+ *engmask = present;
return nvkm_object_new(oclass, NULL, 0, pobject);
}
- /* check that we support a requested engine - note that the user
- * argument is a mask in order to allow the user to request (for
- * example) *any* copy engine, but doesn't matter which.
- */
- args->v0.engine &= engines;
- if (!args->v0.engine) {
- nvif_ioctl(parent, "no supported engine\n");
+ /* No runlist? No supported engines. */
+ *engmask = present;
+ if (runlist < 0)
return -ENODEV;
- }
+ *engmask = engines;
- /* allocate the channel */
+ /* Allocate the channel. */
if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
return -ENOMEM;
*pobject = &chan->base.object;
chan->fifo = fifo;
- chan->engine = __ffs(args->v0.engine);
+ chan->runl = runlist;
INIT_LIST_HEAD(&chan->head);
ret = nvkm_fifo_chan_ctor(&gk104_fifo_gpfifo_func, &fifo->base,
- 0x1000, 0x1000, true, args->v0.vm, 0,
- gk104_fifo_engine_subdev(chan->engine),
+ 0x1000, 0x1000, true, vm, 0, subdevs,
1, fifo->user.bar.offset, 0x200,
oclass, &chan->base);
if (ret)
return ret;
- args->v0.chid = chan->base.chid;
+ *chid = chan->base.chid;
- /* page directory */
+ /* Page directory. */
ret = nvkm_gpuobj_new(device, 0x10000, 0x1000, false, NULL, &chan->pgd);
if (ret)
return ret;
@@ -283,10 +306,9 @@ gk104_fifo_gpfifo_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
if (ret)
return ret;
- /* clear channel control registers */
+ /* Clear channel control registers. */
usermem = chan->base.chid * 0x200;
- ioffset = args->v0.ioffset;
- ilength = order_base_2(args->v0.ilength / 8);
+ ilength = order_base_2(ilength / 8);
nvkm_kmap(fifo->user.mem);
for (i = 0; i < 0x200; i += 4)
@@ -315,6 +337,56 @@ gk104_fifo_gpfifo_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
return 0;
}
+static const struct gk104_fifo_chan_func
+gk104_fifo_gpfifo[] = {
+ { NVA06F_V0_ENGINE_SW | NVA06F_V0_ENGINE_GR,
+ BIT_ULL(NVKM_ENGINE_SW) | BIT_ULL(NVKM_ENGINE_GR)
+ },
+ { NVA06F_V0_ENGINE_SEC , BIT_ULL(NVKM_ENGINE_SEC ) },
+ { NVA06F_V0_ENGINE_MSVLD , BIT_ULL(NVKM_ENGINE_MSVLD ) },
+ { NVA06F_V0_ENGINE_MSPDEC, BIT_ULL(NVKM_ENGINE_MSPDEC) },
+ { NVA06F_V0_ENGINE_MSPPP , BIT_ULL(NVKM_ENGINE_MSPPP ) },
+ { NVA06F_V0_ENGINE_MSENC , BIT_ULL(NVKM_ENGINE_MSENC ) },
+ { NVA06F_V0_ENGINE_VIC , BIT_ULL(NVKM_ENGINE_VIC ) },
+ { NVA06F_V0_ENGINE_NVDEC , BIT_ULL(NVKM_ENGINE_NVDEC ) },
+ { NVA06F_V0_ENGINE_NVENC0, BIT_ULL(NVKM_ENGINE_NVENC0) },
+ { NVA06F_V0_ENGINE_NVENC1, BIT_ULL(NVKM_ENGINE_NVENC1) },
+ { NVA06F_V0_ENGINE_CE0 , BIT_ULL(NVKM_ENGINE_CE0 ) },
+ { NVA06F_V0_ENGINE_CE1 , BIT_ULL(NVKM_ENGINE_CE1 ) },
+ { NVA06F_V0_ENGINE_CE2 , BIT_ULL(NVKM_ENGINE_CE2 ) },
+ {}
+};
+
+int
+gk104_fifo_gpfifo_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
+ void *data, u32 size, struct nvkm_object **pobject)
+{
+ struct nvkm_object *parent = oclass->parent;
+ union {
+ struct kepler_channel_gpfifo_a_v0 v0;
+ } *args = data;
+ struct gk104_fifo *fifo = gk104_fifo(base);
+ int ret = -ENOSYS;
+
+ nvif_ioctl(parent, "create channel gpfifo size %d\n", size);
+ if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
+ nvif_ioctl(parent, "create channel gpfifo vers %d vm %llx "
+ "ioffset %016llx ilength %08x engine %08x\n",
+ args->v0.version, args->v0.vm, args->v0.ioffset,
+ args->v0.ilength, args->v0.engines);
+ return gk104_fifo_gpfifo_new_(gk104_fifo_gpfifo, fifo,
+ &args->v0.engines,
+ &args->v0.chid,
+ args->v0.vm,
+ args->v0.ioffset,
+ args->v0.ilength,
+ oclass, pobject);
+
+ }
+
+ return ret;
+}
+
const struct nvkm_fifo_chan_oclass
gk104_fifo_gpfifo_oclass = {
.base.oclass = KEPLER_CHANNEL_GPFIFO_A,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk110.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk110.c
new file mode 100644
index 000000000000..a9aa69c82e8e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk110.c
@@ -0,0 +1,34 @@
+/*
+ * Copyright 2016 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "changk104.h"
+
+#include <nvif/class.h>
+
+const struct nvkm_fifo_chan_oclass
+gk110_fifo_gpfifo_oclass = {
+ .base.oclass = KEPLER_CHANNEL_GPFIFO_B,
+ .base.minver = 0,
+ .base.maxver = 0,
+ .ctor = gk104_fifo_gpfifo_new,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogm204.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogm200.c
index 6511d6e21ecc..a13315147391 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogm204.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogm200.c
@@ -26,7 +26,7 @@
#include <nvif/class.h>
const struct nvkm_fifo_chan_oclass
-gm204_fifo_gpfifo_oclass = {
+gm200_fifo_gpfifo_oclass = {
.base.oclass = MAXWELL_CHANNEL_GPFIFO_A,
.base.minver = 0,
.base.maxver = 0,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild
index 9ad0d0e78a96..290ed0db8047 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild
@@ -29,8 +29,7 @@ nvkm-y += nvkm/engine/gr/gk110b.o
nvkm-y += nvkm/engine/gr/gk208.o
nvkm-y += nvkm/engine/gr/gk20a.o
nvkm-y += nvkm/engine/gr/gm107.o
-nvkm-y += nvkm/engine/gr/gm204.o
-nvkm-y += nvkm/engine/gr/gm206.o
+nvkm-y += nvkm/engine/gr/gm200.o
nvkm-y += nvkm/engine/gr/gm20b.o
nvkm-y += nvkm/engine/gr/ctxnv40.o
@@ -47,6 +46,5 @@ nvkm-y += nvkm/engine/gr/ctxgk110b.o
nvkm-y += nvkm/engine/gr/ctxgk208.o
nvkm-y += nvkm/engine/gr/ctxgk20a.o
nvkm-y += nvkm/engine/gr/ctxgm107.o
-nvkm-y += nvkm/engine/gr/ctxgm204.o
-nvkm-y += nvkm/engine/gr/ctxgm206.o
+nvkm-y += nvkm/engine/gr/ctxgm200.o
nvkm-y += nvkm/engine/gr/ctxgm20b.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h
index 3c64040ec5a2..3c8673958f22 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h
@@ -97,12 +97,11 @@ void gm107_grctx_generate_bundle(struct gf100_grctx *);
void gm107_grctx_generate_pagepool(struct gf100_grctx *);
void gm107_grctx_generate_attrib(struct gf100_grctx *);
-extern const struct gf100_grctx_func gm204_grctx;
-void gm204_grctx_generate_main(struct gf100_gr *, struct gf100_grctx *);
-void gm204_grctx_generate_tpcid(struct gf100_gr *);
-void gm204_grctx_generate_405b60(struct gf100_gr *);
+extern const struct gf100_grctx_func gm200_grctx;
+void gm200_grctx_generate_main(struct gf100_gr *, struct gf100_grctx *);
+void gm200_grctx_generate_tpcid(struct gf100_gr *);
+void gm200_grctx_generate_405b60(struct gf100_gr *);
-extern const struct gf100_grctx_func gm206_grctx;
extern const struct gf100_grctx_func gm20b_grctx;
/* context init value lists */
@@ -210,19 +209,4 @@ extern const struct gf100_gr_init gk208_grctx_init_crstr_0[];
extern const struct gf100_gr_init gm107_grctx_init_gpc_unk_0[];
extern const struct gf100_gr_init gm107_grctx_init_wwdx_0[];
-
-extern const struct gf100_gr_pack gm204_grctx_pack_icmd[];
-
-extern const struct gf100_gr_pack gm204_grctx_pack_mthd[];
-
-extern const struct gf100_gr_pack gm204_grctx_pack_hub[];
-
-extern const struct gf100_gr_init gm204_grctx_init_prop_0[];
-extern const struct gf100_gr_init gm204_grctx_init_setup_0[];
-extern const struct gf100_gr_init gm204_grctx_init_gpm_0[];
-extern const struct gf100_gr_init gm204_grctx_init_gpc_unk_2[];
-
-extern const struct gf100_gr_pack gm204_grctx_pack_tpc[];
-
-extern const struct gf100_gr_pack gm204_grctx_pack_ppc[];
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm200.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm200.c
new file mode 100644
index 000000000000..e586699fc43f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm200.c
@@ -0,0 +1,147 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "ctxgf100.h"
+
+/*******************************************************************************
+ * PGRAPH context implementation
+ ******************************************************************************/
+
+void
+gm200_grctx_generate_tpcid(struct gf100_gr *gr)
+{
+ struct nvkm_device *device = gr->base.engine.subdev.device;
+ int gpc, tpc, id;
+
+ for (tpc = 0, id = 0; tpc < 4; tpc++) {
+ for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
+ if (tpc < gr->tpc_nr[gpc]) {
+ nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x698), id);
+ nvkm_wr32(device, GPC_UNIT(gpc, 0x0c10 + tpc * 4), id);
+ nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x088), id);
+ id++;
+ }
+ }
+ }
+}
+
+static void
+gm200_grctx_generate_rop_active_fbps(struct gf100_gr *gr)
+{
+ struct nvkm_device *device = gr->base.engine.subdev.device;
+ const u32 fbp_count = nvkm_rd32(device, 0x12006c);
+ nvkm_mask(device, 0x408850, 0x0000000f, fbp_count); /* zrop */
+ nvkm_mask(device, 0x408958, 0x0000000f, fbp_count); /* crop */
+}
+
+void
+gm200_grctx_generate_405b60(struct gf100_gr *gr)
+{
+ struct nvkm_device *device = gr->base.engine.subdev.device;
+ const u32 dist_nr = DIV_ROUND_UP(gr->tpc_total, 4);
+ u32 dist[TPC_MAX / 4] = {};
+ u32 gpcs[GPC_MAX] = {};
+ u8 tpcnr[GPC_MAX];
+ int tpc, gpc, i;
+
+ memcpy(tpcnr, gr->tpc_nr, sizeof(gr->tpc_nr));
+
+ /* won't result in the same distribution as the binary driver where
+ * some of the gpcs have more tpcs than others, but this shall do
+ * for the moment. the code for earlier gpus has this issue too.
+ */
+ for (gpc = -1, i = 0; i < gr->tpc_total; i++) {
+ do {
+ gpc = (gpc + 1) % gr->gpc_nr;
+ } while(!tpcnr[gpc]);
+ tpc = gr->tpc_nr[gpc] - tpcnr[gpc]--;
+
+ dist[i / 4] |= ((gpc << 4) | tpc) << ((i % 4) * 8);
+ gpcs[gpc] |= i << (tpc * 8);
+ }
+
+ for (i = 0; i < dist_nr; i++)
+ nvkm_wr32(device, 0x405b60 + (i * 4), dist[i]);
+ for (i = 0; i < gr->gpc_nr; i++)
+ nvkm_wr32(device, 0x405ba0 + (i * 4), gpcs[i]);
+}
+
+void
+gm200_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
+{
+ struct nvkm_device *device = gr->base.engine.subdev.device;
+ const struct gf100_grctx_func *grctx = gr->func->grctx;
+ u32 tmp;
+ int i;
+
+ gf100_gr_mmio(gr, gr->fuc_sw_ctx);
+
+ nvkm_wr32(device, 0x404154, 0x00000000);
+
+ grctx->bundle(info);
+ grctx->pagepool(info);
+ grctx->attrib(info);
+ grctx->unkn(gr);
+
+ gm200_grctx_generate_tpcid(gr);
+ gf100_grctx_generate_r406028(gr);
+ gk104_grctx_generate_r418bb8(gr);
+
+ for (i = 0; i < 8; i++)
+ nvkm_wr32(device, 0x4064d0 + (i * 0x04), 0x00000000);
+ nvkm_wr32(device, 0x406500, 0x00000000);
+
+ nvkm_wr32(device, 0x405b00, (gr->tpc_total << 8) | gr->gpc_nr);
+
+ gm200_grctx_generate_rop_active_fbps(gr);
+
+ for (tmp = 0, i = 0; i < gr->gpc_nr; i++)
+ tmp |= ((1 << gr->tpc_nr[i]) - 1) << (i * 4);
+ nvkm_wr32(device, 0x4041c4, tmp);
+
+ gm200_grctx_generate_405b60(gr);
+
+ gf100_gr_icmd(gr, gr->fuc_bundle);
+ nvkm_wr32(device, 0x404154, 0x00000800);
+ gf100_gr_mthd(gr, gr->fuc_method);
+
+ nvkm_mask(device, 0x418e94, 0xffffffff, 0xc4230000);
+ nvkm_mask(device, 0x418e4c, 0xffffffff, 0x70000000);
+}
+
+const struct gf100_grctx_func
+gm200_grctx = {
+ .main = gm200_grctx_generate_main,
+ .unkn = gk104_grctx_generate_unkn,
+ .bundle = gm107_grctx_generate_bundle,
+ .bundle_size = 0x3000,
+ .bundle_min_gpm_fifo_depth = 0x180,
+ .bundle_token_limit = 0x780,
+ .pagepool = gm107_grctx_generate_pagepool,
+ .pagepool_size = 0x20000,
+ .attrib = gm107_grctx_generate_attrib,
+ .attrib_nr_max = 0x600,
+ .attrib_nr = 0x400,
+ .alpha_nr_max = 0x1800,
+ .alpha_nr = 0x1000,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm204.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm204.c
deleted file mode 100644
index 170cbfdbe1ae..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm204.c
+++ /dev/null
@@ -1,1049 +0,0 @@
-/*
- * Copyright 2015 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs <bskeggs@redhat.com>
- */
-#include "ctxgf100.h"
-
-/*******************************************************************************
- * PGRAPH context register lists
- ******************************************************************************/
-
-static const struct gf100_gr_init
-gm204_grctx_init_icmd_0[] = {
- { 0x001000, 1, 0x01, 0x00000002 },
- { 0x0006aa, 1, 0x01, 0x00000001 },
- { 0x0006ad, 2, 0x01, 0x00000100 },
- { 0x0006b1, 1, 0x01, 0x00000011 },
- { 0x00078c, 1, 0x01, 0x00000008 },
- { 0x000792, 1, 0x01, 0x00000001 },
- { 0x000794, 3, 0x01, 0x00000001 },
- { 0x000797, 1, 0x01, 0x000000cf },
- { 0x00079a, 1, 0x01, 0x00000002 },
- { 0x0007a1, 1, 0x01, 0x00000001 },
- { 0x0007a3, 3, 0x01, 0x00000001 },
- { 0x000831, 1, 0x01, 0x00000004 },
- { 0x01e100, 1, 0x01, 0x00000001 },
- { 0x001000, 1, 0x01, 0x00000008 },
- { 0x000039, 3, 0x01, 0x00000000 },
- { 0x000380, 1, 0x01, 0x00000001 },
- { 0x000366, 2, 0x01, 0x00000000 },
- { 0x000368, 1, 0x01, 0x00000fff },
- { 0x000370, 2, 0x01, 0x00000000 },
- { 0x000372, 1, 0x01, 0x000fffff },
- { 0x000374, 1, 0x01, 0x00000100 },
- { 0x000818, 8, 0x01, 0x00000000 },
- { 0x000848, 16, 0x01, 0x00000000 },
- { 0x000738, 1, 0x01, 0x00000000 },
- { 0x000b07, 1, 0x01, 0x00000002 },
- { 0x000b08, 2, 0x01, 0x00000100 },
- { 0x000b0a, 1, 0x01, 0x00000001 },
- { 0x000a04, 1, 0x01, 0x000000ff },
- { 0x000a0b, 1, 0x01, 0x00000040 },
- { 0x00097f, 1, 0x01, 0x00000100 },
- { 0x000a02, 1, 0x01, 0x00000001 },
- { 0x000809, 1, 0x01, 0x00000007 },
- { 0x00c221, 1, 0x01, 0x00000040 },
- { 0x00c401, 1, 0x01, 0x00000001 },
- { 0x00c402, 1, 0x01, 0x00010001 },
- { 0x00c403, 2, 0x01, 0x00000001 },
- { 0x00c40e, 1, 0x01, 0x00000020 },
- { 0x01e100, 1, 0x01, 0x00000001 },
- { 0x001000, 1, 0x01, 0x00000001 },
- { 0x000b07, 1, 0x01, 0x00000002 },
- { 0x000b08, 2, 0x01, 0x00000100 },
- { 0x000b0a, 1, 0x01, 0x00000001 },
- { 0x01e100, 1, 0x01, 0x00000001 },
- { 0x001000, 1, 0x01, 0x00000004 },
- { 0x000039, 3, 0x01, 0x00000000 },
- { 0x0000a9, 1, 0x01, 0x0000ffff },
- { 0x000038, 1, 0x01, 0x0fac6881 },
- { 0x00003d, 1, 0x01, 0x00000001 },
- { 0x0000e8, 8, 0x01, 0x00000400 },
- { 0x000078, 8, 0x01, 0x00000300 },
- { 0x000050, 1, 0x01, 0x00000011 },
- { 0x000058, 8, 0x01, 0x00000008 },
- { 0x000208, 8, 0x01, 0x00000001 },
- { 0x000081, 1, 0x01, 0x00000001 },
- { 0x000085, 1, 0x01, 0x00000004 },
- { 0x000088, 1, 0x01, 0x00000400 },
- { 0x000090, 1, 0x01, 0x00000300 },
- { 0x000098, 1, 0x01, 0x00001001 },
- { 0x0000e3, 1, 0x01, 0x00000001 },
- { 0x0000da, 1, 0x01, 0x00000001 },
- { 0x0000b4, 4, 0x01, 0x88888888 },
- { 0x0000f8, 1, 0x01, 0x00000003 },
- { 0x0000fa, 1, 0x01, 0x00000001 },
- { 0x0000b1, 2, 0x01, 0x00000001 },
- { 0x00009f, 4, 0x01, 0x0000ffff },
- { 0x0000a8, 1, 0x01, 0x0000ffff },
- { 0x0000ad, 1, 0x01, 0x0000013e },
- { 0x0000e1, 1, 0x01, 0x00000010 },
- { 0x000290, 16, 0x01, 0x00000000 },
- { 0x0003b0, 16, 0x01, 0x00000000 },
- { 0x0002a0, 16, 0x01, 0x00000000 },
- { 0x000420, 16, 0x01, 0x00000000 },
- { 0x0002b0, 16, 0x01, 0x00000000 },
- { 0x000430, 16, 0x01, 0x00000000 },
- { 0x0002c0, 16, 0x01, 0x00000000 },
- { 0x0004d0, 16, 0x01, 0x00000000 },
- { 0x000720, 16, 0x01, 0x00000000 },
- { 0x0008c0, 16, 0x01, 0x00000000 },
- { 0x000890, 16, 0x01, 0x00000000 },
- { 0x0008e0, 16, 0x01, 0x00000000 },
- { 0x0008a0, 16, 0x01, 0x00000000 },
- { 0x0008f0, 16, 0x01, 0x00000000 },
- { 0x00094c, 1, 0x01, 0x000000ff },
- { 0x00094d, 1, 0x01, 0xffffffff },
- { 0x00094e, 1, 0x01, 0x00000002 },
- { 0x0002f2, 2, 0x01, 0x00000001 },
- { 0x0002f5, 1, 0x01, 0x00000001 },
- { 0x0002f7, 1, 0x01, 0x00000001 },
- { 0x000303, 1, 0x01, 0x00000001 },
- { 0x0002e6, 1, 0x01, 0x00000001 },
- { 0x000466, 1, 0x01, 0x00000052 },
- { 0x000301, 1, 0x01, 0x3f800000 },
- { 0x000304, 1, 0x01, 0x30201000 },
- { 0x000305, 1, 0x01, 0x70605040 },
- { 0x000306, 1, 0x01, 0xb8a89888 },
- { 0x000307, 1, 0x01, 0xf8e8d8c8 },
- { 0x00030a, 1, 0x01, 0x00ffff00 },
- { 0x00030b, 1, 0x01, 0x0000001a },
- { 0x00030c, 1, 0x01, 0x00000001 },
- { 0x000318, 1, 0x01, 0x00000001 },
- { 0x000340, 1, 0x01, 0x00000000 },
- { 0x00037d, 1, 0x01, 0x00000006 },
- { 0x0003a0, 1, 0x01, 0x00000002 },
- { 0x0003aa, 1, 0x01, 0x00000001 },
- { 0x0003a9, 1, 0x01, 0x00000001 },
- { 0x000380, 1, 0x01, 0x00000001 },
- { 0x000383, 1, 0x01, 0x00000011 },
- { 0x000360, 1, 0x01, 0x00000040 },
- { 0x000366, 2, 0x01, 0x00000000 },
- { 0x000368, 1, 0x01, 0x00000fff },
- { 0x000370, 2, 0x01, 0x00000000 },
- { 0x000372, 1, 0x01, 0x000fffff },
- { 0x000374, 1, 0x01, 0x00000100 },
- { 0x00037a, 1, 0x01, 0x00000012 },
- { 0x000619, 1, 0x01, 0x00000003 },
- { 0x000811, 1, 0x01, 0x00000003 },
- { 0x000812, 1, 0x01, 0x00000004 },
- { 0x000813, 1, 0x01, 0x00000006 },
- { 0x000814, 1, 0x01, 0x00000008 },
- { 0x000815, 1, 0x01, 0x0000000b },
- { 0x000800, 6, 0x01, 0x00000001 },
- { 0x000632, 1, 0x01, 0x00000001 },
- { 0x000633, 1, 0x01, 0x00000002 },
- { 0x000634, 1, 0x01, 0x00000003 },
- { 0x000635, 1, 0x01, 0x00000004 },
- { 0x000654, 1, 0x01, 0x3f800000 },
- { 0x000657, 1, 0x01, 0x3f800000 },
- { 0x000655, 2, 0x01, 0x3f800000 },
- { 0x0006cd, 1, 0x01, 0x3f800000 },
- { 0x0007f5, 1, 0x01, 0x3f800000 },
- { 0x0007dc, 1, 0x01, 0x39291909 },
- { 0x0007dd, 1, 0x01, 0x79695949 },
- { 0x0007de, 1, 0x01, 0xb9a99989 },
- { 0x0007df, 1, 0x01, 0xf9e9d9c9 },
- { 0x0007e8, 1, 0x01, 0x00003210 },
- { 0x0007e9, 1, 0x01, 0x00007654 },
- { 0x0007ea, 1, 0x01, 0x00000098 },
- { 0x0007ec, 1, 0x01, 0x39291909 },
- { 0x0007ed, 1, 0x01, 0x79695949 },
- { 0x0007ee, 1, 0x01, 0xb9a99989 },
- { 0x0007ef, 1, 0x01, 0xf9e9d9c9 },
- { 0x0007f0, 1, 0x01, 0x00003210 },
- { 0x0007f1, 1, 0x01, 0x00007654 },
- { 0x0007f2, 1, 0x01, 0x00000098 },
- { 0x0005a5, 1, 0x01, 0x00000001 },
- { 0x0005aa, 1, 0x01, 0x00000002 },
- { 0x0005cb, 1, 0x01, 0x00000004 },
- { 0x0005d0, 1, 0x01, 0x20181008 },
- { 0x0005d1, 1, 0x01, 0x40383028 },
- { 0x0005d2, 1, 0x01, 0x60585048 },
- { 0x0005d3, 1, 0x01, 0x80787068 },
- { 0x000980, 128, 0x01, 0x00000000 },
- { 0x000468, 1, 0x01, 0x00000004 },
- { 0x00046c, 1, 0x01, 0x00000001 },
- { 0x000470, 96, 0x01, 0x00000000 },
- { 0x0005e0, 16, 0x01, 0x00000d10 },
- { 0x000510, 16, 0x01, 0x3f800000 },
- { 0x000520, 1, 0x01, 0x000002b6 },
- { 0x000529, 1, 0x01, 0x00000001 },
- { 0x000530, 16, 0x01, 0xffff0000 },
- { 0x000550, 32, 0x01, 0xffff0000 },
- { 0x000585, 1, 0x01, 0x0000003f },
- { 0x000576, 1, 0x01, 0x00000003 },
- { 0x00057b, 1, 0x01, 0x00000059 },
- { 0x000586, 1, 0x01, 0x00000040 },
- { 0x000582, 2, 0x01, 0x00000080 },
- { 0x000595, 1, 0x01, 0x00400040 },
- { 0x000596, 1, 0x01, 0x00000492 },
- { 0x000597, 1, 0x01, 0x08080203 },
- { 0x0005ad, 1, 0x01, 0x00000008 },
- { 0x000598, 1, 0x01, 0x00020001 },
- { 0x0005d4, 1, 0x01, 0x00000001 },
- { 0x0005c2, 1, 0x01, 0x00000001 },
- { 0x000638, 2, 0x01, 0x00000001 },
- { 0x00063a, 1, 0x01, 0x00000002 },
- { 0x00063b, 2, 0x01, 0x00000001 },
- { 0x00063d, 1, 0x01, 0x00000002 },
- { 0x00063e, 1, 0x01, 0x00000001 },
- { 0x0008b8, 8, 0x01, 0x00000001 },
- { 0x000900, 8, 0x01, 0x00000001 },
- { 0x000908, 8, 0x01, 0x00000002 },
- { 0x000910, 16, 0x01, 0x00000001 },
- { 0x000920, 8, 0x01, 0x00000002 },
- { 0x000928, 8, 0x01, 0x00000001 },
- { 0x000662, 1, 0x01, 0x00000001 },
- { 0x000648, 9, 0x01, 0x00000001 },
- { 0x000674, 1, 0x01, 0x00000001 },
- { 0x000658, 1, 0x01, 0x0000000f },
- { 0x0007ff, 1, 0x01, 0x0000000a },
- { 0x00066a, 1, 0x01, 0x40000000 },
- { 0x00066b, 1, 0x01, 0x10000000 },
- { 0x00066c, 2, 0x01, 0xffff0000 },
- { 0x0007af, 2, 0x01, 0x00000008 },
- { 0x0007f6, 1, 0x01, 0x00000001 },
- { 0x0006b2, 1, 0x01, 0x00000055 },
- { 0x0007ad, 1, 0x01, 0x00000003 },
- { 0x000971, 1, 0x01, 0x00000008 },
- { 0x000972, 1, 0x01, 0x00000040 },
- { 0x000973, 1, 0x01, 0x0000012c },
- { 0x00097c, 1, 0x01, 0x00000040 },
- { 0x000975, 1, 0x01, 0x00000020 },
- { 0x000976, 1, 0x01, 0x00000001 },
- { 0x000977, 1, 0x01, 0x00000020 },
- { 0x000978, 1, 0x01, 0x00000001 },
- { 0x000957, 1, 0x01, 0x00000003 },
- { 0x00095e, 1, 0x01, 0x20164010 },
- { 0x00095f, 1, 0x01, 0x00000020 },
- { 0x000a0d, 1, 0x01, 0x00000006 },
- { 0x00097d, 1, 0x01, 0x0000000c },
- { 0x000683, 1, 0x01, 0x00000006 },
- { 0x000687, 1, 0x01, 0x003fffff },
- { 0x0006a0, 1, 0x01, 0x00000005 },
- { 0x000840, 1, 0x01, 0x00400008 },
- { 0x000841, 1, 0x01, 0x08000080 },
- { 0x000842, 1, 0x01, 0x00400008 },
- { 0x000843, 1, 0x01, 0x08000080 },
- { 0x000818, 8, 0x01, 0x00000000 },
- { 0x000848, 16, 0x01, 0x00000000 },
- { 0x000738, 1, 0x01, 0x00000000 },
- { 0x0006aa, 1, 0x01, 0x00000001 },
- { 0x0006ab, 1, 0x01, 0x00000002 },
- { 0x0006ac, 1, 0x01, 0x00000080 },
- { 0x0006ad, 2, 0x01, 0x00000100 },
- { 0x0006b1, 1, 0x01, 0x00000011 },
- { 0x0006bb, 1, 0x01, 0x000000cf },
- { 0x0006ce, 1, 0x01, 0x2a712488 },
- { 0x000739, 1, 0x01, 0x4085c000 },
- { 0x00073a, 1, 0x01, 0x00000080 },
- { 0x000786, 1, 0x01, 0x80000100 },
- { 0x00073c, 1, 0x01, 0x00010100 },
- { 0x00073d, 1, 0x01, 0x02800000 },
- { 0x000787, 1, 0x01, 0x000000cf },
- { 0x00078c, 1, 0x01, 0x00000008 },
- { 0x000792, 1, 0x01, 0x00000001 },
- { 0x000794, 3, 0x01, 0x00000001 },
- { 0x000797, 1, 0x01, 0x000000cf },
- { 0x000836, 1, 0x01, 0x00000001 },
- { 0x00079a, 1, 0x01, 0x00000002 },
- { 0x000833, 1, 0x01, 0x04444480 },
- { 0x0007a1, 1, 0x01, 0x00000001 },
- { 0x0007a3, 3, 0x01, 0x00000001 },
- { 0x000831, 1, 0x01, 0x00000004 },
- { 0x000b07, 1, 0x01, 0x00000002 },
- { 0x000b08, 2, 0x01, 0x00000100 },
- { 0x000b0a, 1, 0x01, 0x00000001 },
- { 0x000a04, 1, 0x01, 0x000000ff },
- { 0x000a0b, 1, 0x01, 0x00000040 },
- { 0x00097f, 1, 0x01, 0x00000100 },
- { 0x000a02, 1, 0x01, 0x00000001 },
- { 0x000809, 1, 0x01, 0x00000007 },
- { 0x00c221, 1, 0x01, 0x00000040 },
- { 0x00c1b0, 8, 0x01, 0x0000000f },
- { 0x00c1b8, 1, 0x01, 0x0fac6881 },
- { 0x00c1b9, 1, 0x01, 0x00fac688 },
- { 0x00c401, 1, 0x01, 0x00000001 },
- { 0x00c402, 1, 0x01, 0x00010001 },
- { 0x00c403, 2, 0x01, 0x00000001 },
- { 0x00c40e, 1, 0x01, 0x00000020 },
- { 0x00c413, 4, 0x01, 0x88888888 },
- { 0x00c423, 1, 0x01, 0x0000ff00 },
- { 0x00c420, 1, 0x01, 0x00880101 },
- { 0x01e100, 1, 0x01, 0x00000001 },
- {}
-};
-
-const struct gf100_gr_pack
-gm204_grctx_pack_icmd[] = {
- { gm204_grctx_init_icmd_0 },
- {}
-};
-
-static const struct gf100_gr_init
-gm204_grctx_init_b197_0[] = {
- { 0x000800, 8, 0x40, 0x00000000 },
- { 0x000804, 8, 0x40, 0x00000000 },
- { 0x000808, 8, 0x40, 0x00000400 },
- { 0x00080c, 8, 0x40, 0x00000300 },
- { 0x000810, 1, 0x04, 0x000000cf },
- { 0x000850, 7, 0x40, 0x00000000 },
- { 0x000814, 8, 0x40, 0x00000040 },
- { 0x000818, 8, 0x40, 0x00000001 },
- { 0x00081c, 8, 0x40, 0x00000000 },
- { 0x000820, 8, 0x40, 0x00000000 },
- { 0x001c00, 16, 0x10, 0x00000000 },
- { 0x001c04, 16, 0x10, 0x00000000 },
- { 0x001c08, 16, 0x10, 0x00000000 },
- { 0x001c0c, 16, 0x10, 0x00000000 },
- { 0x001d00, 16, 0x10, 0x00000000 },
- { 0x001d04, 16, 0x10, 0x00000000 },
- { 0x001d08, 16, 0x10, 0x00000000 },
- { 0x001d0c, 16, 0x10, 0x00000000 },
- { 0x001f00, 16, 0x08, 0x00000000 },
- { 0x001f04, 16, 0x08, 0x00000000 },
- { 0x001f80, 16, 0x08, 0x00000000 },
- { 0x001f84, 16, 0x08, 0x00000000 },
- { 0x002000, 1, 0x04, 0x00000000 },
- { 0x002040, 1, 0x04, 0x00000011 },
- { 0x002080, 1, 0x04, 0x00000020 },
- { 0x0020c0, 1, 0x04, 0x00000030 },
- { 0x002100, 1, 0x04, 0x00000040 },
- { 0x002140, 1, 0x04, 0x00000051 },
- { 0x00200c, 6, 0x40, 0x00000001 },
- { 0x002010, 1, 0x04, 0x00000000 },
- { 0x002050, 1, 0x04, 0x00000000 },
- { 0x002090, 1, 0x04, 0x00000001 },
- { 0x0020d0, 1, 0x04, 0x00000002 },
- { 0x002110, 1, 0x04, 0x00000003 },
- { 0x002150, 1, 0x04, 0x00000004 },
- { 0x000380, 4, 0x20, 0x00000000 },
- { 0x000384, 4, 0x20, 0x00000000 },
- { 0x000388, 4, 0x20, 0x00000000 },
- { 0x00038c, 4, 0x20, 0x00000000 },
- { 0x000700, 4, 0x10, 0x00000000 },
- { 0x000704, 4, 0x10, 0x00000000 },
- { 0x000708, 4, 0x10, 0x00000000 },
- { 0x002800, 128, 0x04, 0x00000000 },
- { 0x000a00, 16, 0x20, 0x00000000 },
- { 0x000a04, 16, 0x20, 0x00000000 },
- { 0x000a08, 16, 0x20, 0x00000000 },
- { 0x000a0c, 16, 0x20, 0x00000000 },
- { 0x000a10, 16, 0x20, 0x00000000 },
- { 0x000a14, 16, 0x20, 0x00000000 },
- { 0x000a18, 16, 0x20, 0x00006420 },
- { 0x000a1c, 16, 0x20, 0x00000000 },
- { 0x000c00, 16, 0x10, 0x00000000 },
- { 0x000c04, 16, 0x10, 0x00000000 },
- { 0x000c08, 16, 0x10, 0x00000000 },
- { 0x000c0c, 16, 0x10, 0x3f800000 },
- { 0x000d00, 8, 0x08, 0xffff0000 },
- { 0x000d04, 8, 0x08, 0xffff0000 },
- { 0x000e00, 16, 0x10, 0x00000000 },
- { 0x000e04, 16, 0x10, 0xffff0000 },
- { 0x000e08, 16, 0x10, 0xffff0000 },
- { 0x000d40, 4, 0x08, 0x00000000 },
- { 0x000d44, 4, 0x08, 0x00000000 },
- { 0x001e00, 8, 0x20, 0x00000001 },
- { 0x001e04, 8, 0x20, 0x00000001 },
- { 0x001e08, 8, 0x20, 0x00000002 },
- { 0x001e0c, 8, 0x20, 0x00000001 },
- { 0x001e10, 8, 0x20, 0x00000001 },
- { 0x001e14, 8, 0x20, 0x00000002 },
- { 0x001e18, 8, 0x20, 0x00000001 },
- { 0x001480, 8, 0x10, 0x00000000 },
- { 0x001484, 8, 0x10, 0x00000000 },
- { 0x001488, 8, 0x10, 0x00000000 },
- { 0x003400, 128, 0x04, 0x00000000 },
- { 0x00030c, 1, 0x04, 0x00000001 },
- { 0x001944, 1, 0x04, 0x00000000 },
- { 0x001514, 1, 0x04, 0x00000000 },
- { 0x000d68, 1, 0x04, 0x0000ffff },
- { 0x00121c, 1, 0x04, 0x0fac6881 },
- { 0x000fac, 1, 0x04, 0x00000001 },
- { 0x001538, 1, 0x04, 0x00000001 },
- { 0x000fe0, 2, 0x04, 0x00000000 },
- { 0x000fe8, 1, 0x04, 0x00000014 },
- { 0x000fec, 1, 0x04, 0x00000040 },
- { 0x000ff0, 1, 0x04, 0x00000000 },
- { 0x00179c, 1, 0x04, 0x00000000 },
- { 0x001228, 1, 0x04, 0x00000400 },
- { 0x00122c, 1, 0x04, 0x00000300 },
- { 0x001230, 1, 0x04, 0x00010001 },
- { 0x0007f8, 1, 0x04, 0x00000000 },
- { 0x001208, 1, 0x04, 0x00000000 },
- { 0x0015b4, 1, 0x04, 0x00000001 },
- { 0x0015cc, 1, 0x04, 0x00000000 },
- { 0x001534, 1, 0x04, 0x00000000 },
- { 0x000754, 1, 0x04, 0x00000001 },
- { 0x000fb0, 1, 0x04, 0x00000000 },
- { 0x0015d0, 1, 0x04, 0x00000000 },
- { 0x0011e0, 4, 0x04, 0x88888888 },
- { 0x00153c, 1, 0x04, 0x00000000 },
- { 0x0016b4, 1, 0x04, 0x00000003 },
- { 0x000fa4, 1, 0x04, 0x00000001 },
- { 0x000fbc, 4, 0x04, 0x0000ffff },
- { 0x000fa8, 1, 0x04, 0x0000ffff },
- { 0x000df8, 2, 0x04, 0x00000000 },
- { 0x001948, 1, 0x04, 0x00000000 },
- { 0x001970, 1, 0x04, 0x00000001 },
- { 0x00161c, 1, 0x04, 0x000009f0 },
- { 0x000dcc, 1, 0x04, 0x00000010 },
- { 0x0015e4, 1, 0x04, 0x00000000 },
- { 0x001160, 32, 0x04, 0x25e00040 },
- { 0x001880, 32, 0x04, 0x00000000 },
- { 0x000f84, 2, 0x04, 0x00000000 },
- { 0x0017c8, 2, 0x04, 0x00000000 },
- { 0x0017d0, 1, 0x04, 0x000000ff },
- { 0x0017d4, 1, 0x04, 0xffffffff },
- { 0x0017d8, 1, 0x04, 0x00000002 },
- { 0x0017dc, 1, 0x04, 0x00000000 },
- { 0x0015f4, 2, 0x04, 0x00000000 },
- { 0x001434, 2, 0x04, 0x00000000 },
- { 0x000d74, 1, 0x04, 0x00000000 },
- { 0x0013a4, 1, 0x04, 0x00000000 },
- { 0x001318, 1, 0x04, 0x00000001 },
- { 0x001080, 2, 0x04, 0x00000000 },
- { 0x001088, 2, 0x04, 0x00000001 },
- { 0x001090, 1, 0x04, 0x00000000 },
- { 0x001094, 1, 0x04, 0x00000001 },
- { 0x001098, 1, 0x04, 0x00000000 },
- { 0x00109c, 1, 0x04, 0x00000001 },
- { 0x0010a0, 2, 0x04, 0x00000000 },
- { 0x001644, 1, 0x04, 0x00000000 },
- { 0x000748, 1, 0x04, 0x00000000 },
- { 0x000de8, 1, 0x04, 0x00000000 },
- { 0x001648, 1, 0x04, 0x00000000 },
- { 0x0012a4, 1, 0x04, 0x00000000 },
- { 0x001120, 4, 0x04, 0x00000000 },
- { 0x001118, 1, 0x04, 0x00000000 },
- { 0x00164c, 1, 0x04, 0x00000000 },
- { 0x001658, 1, 0x04, 0x00000000 },
- { 0x001910, 1, 0x04, 0x00000290 },
- { 0x001518, 1, 0x04, 0x00000000 },
- { 0x00165c, 1, 0x04, 0x00000001 },
- { 0x001520, 1, 0x04, 0x00000000 },
- { 0x001604, 1, 0x04, 0x00000000 },
- { 0x001570, 1, 0x04, 0x00000000 },
- { 0x0013b0, 2, 0x04, 0x3f800000 },
- { 0x00020c, 1, 0x04, 0x00000000 },
- { 0x001670, 1, 0x04, 0x30201000 },
- { 0x001674, 1, 0x04, 0x70605040 },
- { 0x001678, 1, 0x04, 0xb8a89888 },
- { 0x00167c, 1, 0x04, 0xf8e8d8c8 },
- { 0x00166c, 1, 0x04, 0x00000000 },
- { 0x001680, 1, 0x04, 0x00ffff00 },
- { 0x0012d0, 1, 0x04, 0x00000003 },
- { 0x00113c, 1, 0x04, 0x00000000 },
- { 0x0012d4, 1, 0x04, 0x00000002 },
- { 0x001684, 2, 0x04, 0x00000000 },
- { 0x000dac, 2, 0x04, 0x00001b02 },
- { 0x000db4, 1, 0x04, 0x00000000 },
- { 0x00168c, 1, 0x04, 0x00000000 },
- { 0x0015bc, 1, 0x04, 0x00000000 },
- { 0x00156c, 1, 0x04, 0x00000000 },
- { 0x00187c, 1, 0x04, 0x00000000 },
- { 0x001110, 1, 0x04, 0x00000001 },
- { 0x000dc0, 3, 0x04, 0x00000000 },
- { 0x000f40, 5, 0x04, 0x00000000 },
- { 0x001234, 1, 0x04, 0x00000000 },
- { 0x001690, 1, 0x04, 0x00000000 },
- { 0x000790, 5, 0x04, 0x00000000 },
- { 0x00077c, 1, 0x04, 0x00000000 },
- { 0x001000, 1, 0x04, 0x00000010 },
- { 0x0010fc, 1, 0x04, 0x00000000 },
- { 0x001290, 1, 0x04, 0x00000000 },
- { 0x000218, 1, 0x04, 0x00000010 },
- { 0x0012d8, 1, 0x04, 0x00000000 },
- { 0x0012dc, 1, 0x04, 0x00000010 },
- { 0x000d94, 1, 0x04, 0x00000001 },
- { 0x00155c, 2, 0x04, 0x00000000 },
- { 0x001564, 1, 0x04, 0x00000fff },
- { 0x001574, 2, 0x04, 0x00000000 },
- { 0x00157c, 1, 0x04, 0x000fffff },
- { 0x001354, 1, 0x04, 0x00000000 },
- { 0x001610, 1, 0x04, 0x00000012 },
- { 0x001608, 2, 0x04, 0x00000000 },
- { 0x00260c, 1, 0x04, 0x00000000 },
- { 0x0007ac, 1, 0x04, 0x00000000 },
- { 0x00162c, 1, 0x04, 0x00000003 },
- { 0x000210, 1, 0x04, 0x00000000 },
- { 0x000320, 1, 0x04, 0x00000000 },
- { 0x000324, 6, 0x04, 0x3f800000 },
- { 0x000750, 1, 0x04, 0x00000000 },
- { 0x000760, 1, 0x04, 0x39291909 },
- { 0x000764, 1, 0x04, 0x79695949 },
- { 0x000768, 1, 0x04, 0xb9a99989 },
- { 0x00076c, 1, 0x04, 0xf9e9d9c9 },
- { 0x000770, 1, 0x04, 0x30201000 },
- { 0x000774, 1, 0x04, 0x70605040 },
- { 0x000778, 1, 0x04, 0x00009080 },
- { 0x000780, 1, 0x04, 0x39291909 },
- { 0x000784, 1, 0x04, 0x79695949 },
- { 0x000788, 1, 0x04, 0xb9a99989 },
- { 0x00078c, 1, 0x04, 0xf9e9d9c9 },
- { 0x0007d0, 1, 0x04, 0x30201000 },
- { 0x0007d4, 1, 0x04, 0x70605040 },
- { 0x0007d8, 1, 0x04, 0x00009080 },
- { 0x001004, 1, 0x04, 0x00000000 },
- { 0x001240, 8, 0x04, 0x00000000 },
- { 0x00037c, 1, 0x04, 0x00000001 },
- { 0x000740, 1, 0x04, 0x00000000 },
- { 0x001148, 1, 0x04, 0x00000000 },
- { 0x000fb4, 1, 0x04, 0x00000000 },
- { 0x000fb8, 1, 0x04, 0x00000002 },
- { 0x001130, 1, 0x04, 0x00000002 },
- { 0x000fd4, 2, 0x04, 0x00000000 },
- { 0x001030, 1, 0x04, 0x20181008 },
- { 0x001034, 1, 0x04, 0x40383028 },
- { 0x001038, 1, 0x04, 0x60585048 },
- { 0x00103c, 1, 0x04, 0x80787068 },
- { 0x000744, 1, 0x04, 0x00000000 },
- { 0x002600, 1, 0x04, 0x00000000 },
- { 0x001918, 1, 0x04, 0x00000000 },
- { 0x00191c, 1, 0x04, 0x00000900 },
- { 0x001920, 1, 0x04, 0x00000405 },
- { 0x001308, 1, 0x04, 0x00000001 },
- { 0x001924, 1, 0x04, 0x00000000 },
- { 0x0013ac, 1, 0x04, 0x00000000 },
- { 0x00192c, 1, 0x04, 0x00000001 },
- { 0x00193c, 1, 0x04, 0x00002c1c },
- { 0x000d7c, 1, 0x04, 0x00000000 },
- { 0x000f8c, 1, 0x04, 0x00000000 },
- { 0x0002c0, 1, 0x04, 0x00000001 },
- { 0x001510, 1, 0x04, 0x00000000 },
- { 0x001940, 1, 0x04, 0x00000000 },
- { 0x000ff4, 2, 0x04, 0x00000000 },
- { 0x00194c, 2, 0x04, 0x00000000 },
- { 0x001968, 1, 0x04, 0x00000000 },
- { 0x001590, 1, 0x04, 0x0000003f },
- { 0x0007e8, 4, 0x04, 0x00000000 },
- { 0x00196c, 1, 0x04, 0x00000011 },
- { 0x0002e4, 1, 0x04, 0x0000b001 },
- { 0x00036c, 2, 0x04, 0x00000000 },
- { 0x00197c, 1, 0x04, 0x00000000 },
- { 0x000fcc, 2, 0x04, 0x00000000 },
- { 0x0002d8, 1, 0x04, 0x00000040 },
- { 0x001980, 1, 0x04, 0x00000080 },
- { 0x001504, 1, 0x04, 0x00000080 },
- { 0x001984, 1, 0x04, 0x00000000 },
- { 0x000f60, 1, 0x04, 0x00000000 },
- { 0x000f64, 1, 0x04, 0x00400040 },
- { 0x000f68, 1, 0x04, 0x00002212 },
- { 0x000f6c, 1, 0x04, 0x08080203 },
- { 0x001108, 1, 0x04, 0x00000008 },
- { 0x000f70, 1, 0x04, 0x00080001 },
- { 0x000ffc, 1, 0x04, 0x00000000 },
- { 0x001134, 1, 0x04, 0x00000000 },
- { 0x000f1c, 1, 0x04, 0x00000000 },
- { 0x0011f8, 1, 0x04, 0x00000000 },
- { 0x001138, 1, 0x04, 0x00000001 },
- { 0x000300, 1, 0x04, 0x00000001 },
- { 0x0013a8, 1, 0x04, 0x00000000 },
- { 0x001224, 1, 0x04, 0x00000000 },
- { 0x0012ec, 1, 0x04, 0x00000000 },
- { 0x001310, 1, 0x04, 0x00000000 },
- { 0x001314, 1, 0x04, 0x00000001 },
- { 0x001380, 1, 0x04, 0x00000000 },
- { 0x001384, 4, 0x04, 0x00000001 },
- { 0x001394, 1, 0x04, 0x00000000 },
- { 0x00139c, 1, 0x04, 0x00000000 },
- { 0x001398, 1, 0x04, 0x00000000 },
- { 0x001594, 1, 0x04, 0x00000000 },
- { 0x001598, 4, 0x04, 0x00000001 },
- { 0x000f54, 3, 0x04, 0x00000000 },
- { 0x0019bc, 1, 0x04, 0x00000000 },
- { 0x000f9c, 2, 0x04, 0x00000000 },
- { 0x0012cc, 1, 0x04, 0x00000000 },
- { 0x0012e8, 1, 0x04, 0x00000000 },
- { 0x00130c, 1, 0x04, 0x00000001 },
- { 0x001360, 8, 0x04, 0x00000000 },
- { 0x00133c, 2, 0x04, 0x00000001 },
- { 0x001344, 1, 0x04, 0x00000002 },
- { 0x001348, 2, 0x04, 0x00000001 },
- { 0x001350, 1, 0x04, 0x00000002 },
- { 0x001358, 1, 0x04, 0x00000001 },
- { 0x0012e4, 1, 0x04, 0x00000000 },
- { 0x00131c, 4, 0x04, 0x00000000 },
- { 0x0019c0, 1, 0x04, 0x00000000 },
- { 0x001140, 1, 0x04, 0x00000000 },
- { 0x000dd0, 1, 0x04, 0x00000000 },
- { 0x000dd4, 1, 0x04, 0x00000001 },
- { 0x0002f4, 1, 0x04, 0x00000000 },
- { 0x0019c4, 1, 0x04, 0x00000000 },
- { 0x0019c8, 1, 0x04, 0x00001500 },
- { 0x00135c, 1, 0x04, 0x00000000 },
- { 0x000f90, 1, 0x04, 0x00000000 },
- { 0x0019e0, 8, 0x04, 0x00000001 },
- { 0x0019cc, 1, 0x04, 0x00000001 },
- { 0x00111c, 1, 0x04, 0x00000001 },
- { 0x0015b8, 1, 0x04, 0x00000000 },
- { 0x001a00, 1, 0x04, 0x00001111 },
- { 0x001a04, 7, 0x04, 0x00000000 },
- { 0x000d6c, 2, 0x04, 0xffff0000 },
- { 0x0010f8, 1, 0x04, 0x00001010 },
- { 0x000d80, 5, 0x04, 0x00000000 },
- { 0x000da0, 1, 0x04, 0x00000000 },
- { 0x0007a4, 2, 0x04, 0x00000000 },
- { 0x001508, 1, 0x04, 0x80000000 },
- { 0x00150c, 1, 0x04, 0x40000000 },
- { 0x001668, 1, 0x04, 0x00000000 },
- { 0x000318, 2, 0x04, 0x00000008 },
- { 0x000d9c, 1, 0x04, 0x00000001 },
- { 0x000f14, 1, 0x04, 0x00000000 },
- { 0x000374, 1, 0x04, 0x00000000 },
- { 0x000378, 1, 0x04, 0x0000000c },
- { 0x0007dc, 1, 0x04, 0x00000000 },
- { 0x00074c, 1, 0x04, 0x00000055 },
- { 0x001420, 1, 0x04, 0x00000003 },
- { 0x001008, 1, 0x04, 0x00000008 },
- { 0x00100c, 1, 0x04, 0x00000040 },
- { 0x001010, 1, 0x04, 0x0000012c },
- { 0x000d60, 1, 0x04, 0x00000040 },
- { 0x001018, 1, 0x04, 0x00000020 },
- { 0x00101c, 1, 0x04, 0x00000001 },
- { 0x001020, 1, 0x04, 0x00000020 },
- { 0x001024, 1, 0x04, 0x00000001 },
- { 0x001444, 3, 0x04, 0x00000000 },
- { 0x000360, 1, 0x04, 0x20164010 },
- { 0x000364, 1, 0x04, 0x00000020 },
- { 0x000368, 1, 0x04, 0x00000000 },
- { 0x000da8, 1, 0x04, 0x00000030 },
- { 0x000de4, 1, 0x04, 0x00000000 },
- { 0x000204, 1, 0x04, 0x00000006 },
- { 0x0002d0, 1, 0x04, 0x003fffff },
- { 0x001220, 1, 0x04, 0x00000005 },
- { 0x000fdc, 1, 0x04, 0x00000000 },
- { 0x000f98, 1, 0x04, 0x00400008 },
- { 0x001284, 1, 0x04, 0x08000080 },
- { 0x001450, 1, 0x04, 0x00400008 },
- { 0x001454, 1, 0x04, 0x08000080 },
- { 0x000214, 1, 0x04, 0x00000000 },
- {}
-};
-
-const struct gf100_gr_pack
-gm204_grctx_pack_mthd[] = {
- { gm204_grctx_init_b197_0, 0xb197 },
- { gf100_grctx_init_902d_0, 0x902d },
- {}
-};
-
-static const struct gf100_gr_init
-gm204_grctx_init_fe_0[] = {
- { 0x404004, 8, 0x04, 0x00000000 },
- { 0x404024, 1, 0x04, 0x0000e000 },
- { 0x404028, 8, 0x04, 0x00000000 },
- { 0x4040a8, 8, 0x04, 0x00000000 },
- { 0x4040c8, 1, 0x04, 0xf801008f },
- { 0x4040d0, 6, 0x04, 0x00000000 },
- { 0x4040f8, 1, 0x04, 0x00000000 },
- { 0x404100, 10, 0x04, 0x00000000 },
- { 0x404130, 2, 0x04, 0x00000000 },
- { 0x404150, 1, 0x04, 0x0000002e },
- { 0x404154, 2, 0x04, 0x00000800 },
- { 0x404164, 1, 0x04, 0x00000045 },
- { 0x40417c, 2, 0x04, 0x00000000 },
- { 0x404194, 1, 0x04, 0x33000700 },
- { 0x4041a0, 4, 0x04, 0x00000000 },
- { 0x4041c4, 2, 0x04, 0x00000000 },
- {}
-};
-
-static const struct gf100_gr_init
-gm204_grctx_init_ds_0[] = {
- { 0x405800, 1, 0x04, 0x8f8001bf },
- { 0x405830, 1, 0x04, 0x04001000 },
- { 0x405834, 1, 0x04, 0x08000000 },
- { 0x405838, 1, 0x04, 0x00010000 },
- { 0x405854, 1, 0x04, 0x00000000 },
- { 0x405870, 4, 0x04, 0x00000001 },
- { 0x405a00, 2, 0x04, 0x00000000 },
- { 0x405a18, 1, 0x04, 0x00000000 },
- { 0x405a1c, 1, 0x04, 0x000000ff },
- {}
-};
-
-static const struct gf100_gr_init
-gm204_grctx_init_cwd_0[] = {
- { 0x405b00, 1, 0x04, 0x00000000 },
- { 0x405b10, 1, 0x04, 0x00001000 },
- { 0x405b20, 1, 0x04, 0x04000000 },
- { 0x405b60, 6, 0x04, 0x00000000 },
- { 0x405ba0, 6, 0x04, 0x00000000 },
- {}
-};
-
-static const struct gf100_gr_init
-gm204_grctx_init_pd_0[] = {
- { 0x406020, 1, 0x04, 0x17410001 },
- { 0x406028, 4, 0x04, 0x00000001 },
- { 0x4064a8, 1, 0x04, 0x00000000 },
- { 0x4064ac, 1, 0x04, 0x00003fff },
- { 0x4064b0, 3, 0x04, 0x00000000 },
- { 0x4064c0, 1, 0x04, 0x80400280 },
- { 0x4064c4, 1, 0x04, 0x0400ffff },
- { 0x4064c8, 1, 0x04, 0x01800780 },
- { 0x4064cc, 9, 0x04, 0x00000000 },
- { 0x4064fc, 1, 0x04, 0x0000022a },
- { 0x406500, 1, 0x04, 0x00000000 },
- {}
-};
-
-static const struct gf100_gr_init
-gm204_grctx_init_be_0[] = {
- { 0x408800, 1, 0x04, 0x32882a3c },
- { 0x408804, 1, 0x04, 0x00000040 },
- { 0x408808, 1, 0x04, 0x1003e005 },
- { 0x408840, 1, 0x04, 0x00000e0b },
- { 0x408900, 1, 0x04, 0xb080b801 },
- { 0x408904, 1, 0x04, 0x63038001 },
- { 0x408908, 1, 0x04, 0x12c8502f },
- { 0x408980, 1, 0x04, 0x0000011d },
- {}
-};
-
-const struct gf100_gr_pack
-gm204_grctx_pack_hub[] = {
- { gf100_grctx_init_main_0 },
- { gm204_grctx_init_fe_0 },
- { gk110_grctx_init_pri_0 },
- { gk104_grctx_init_memfmt_0 },
- { gm204_grctx_init_ds_0 },
- { gm204_grctx_init_cwd_0 },
- { gm204_grctx_init_pd_0 },
- { gk208_grctx_init_rstr2d_0 },
- { gk104_grctx_init_scc_0 },
- { gm204_grctx_init_be_0 },
- {}
-};
-
-const struct gf100_gr_init
-gm204_grctx_init_prop_0[] = {
- { 0x418400, 1, 0x04, 0x38e01e00 },
- { 0x418404, 1, 0x04, 0x70001fff },
- { 0x41840c, 1, 0x04, 0x20001008 },
- { 0x418410, 2, 0x04, 0x0fff0fff },
- { 0x418418, 1, 0x04, 0x07ff07ff },
- { 0x41841c, 1, 0x04, 0x3feffbff },
- { 0x418450, 6, 0x04, 0x00000000 },
- { 0x418468, 1, 0x04, 0x00000001 },
- { 0x41846c, 2, 0x04, 0x00000000 },
- {}
-};
-
-static const struct gf100_gr_init
-gm204_grctx_init_gpc_unk_1[] = {
- { 0x418600, 1, 0x04, 0x0000007f },
- { 0x418684, 1, 0x04, 0x0000001f },
- { 0x418700, 1, 0x04, 0x00000002 },
- { 0x418704, 1, 0x04, 0x00000080 },
- { 0x418708, 1, 0x04, 0x40000000 },
- { 0x41870c, 2, 0x04, 0x00000000 },
- { 0x418728, 1, 0x04, 0x00010000 },
- {}
-};
-
-const struct gf100_gr_init
-gm204_grctx_init_setup_0[] = {
- { 0x418800, 1, 0x04, 0x7006863a },
- { 0x418808, 1, 0x04, 0x00000000 },
- { 0x418810, 1, 0x04, 0x00000000 },
- { 0x418828, 1, 0x04, 0x00000044 },
- { 0x418830, 1, 0x04, 0x10000001 },
- { 0x4188d8, 1, 0x04, 0x00000008 },
- { 0x4188e0, 1, 0x04, 0x01000000 },
- { 0x4188e8, 5, 0x04, 0x00000000 },
- { 0x4188fc, 1, 0x04, 0x20100058 },
- {}
-};
-
-const struct gf100_gr_init
-gm204_grctx_init_gpm_0[] = {
- { 0x418c10, 8, 0x04, 0x00000000 },
- { 0x418c40, 1, 0x04, 0xffffffff },
- { 0x418c6c, 1, 0x04, 0x00000001 },
- { 0x418c80, 1, 0x04, 0x20200000 },
- {}
-};
-
-const struct gf100_gr_init
-gm204_grctx_init_gpc_unk_2[] = {
- { 0x418e00, 1, 0x04, 0x90040000 },
- { 0x418e24, 1, 0x04, 0x00000000 },
- { 0x418e28, 1, 0x04, 0x00000030 },
- { 0x418e2c, 1, 0x04, 0x00000100 },
- { 0x418e30, 3, 0x04, 0x00000000 },
- { 0x418e40, 22, 0x04, 0x00000000 },
- { 0x418ea0, 12, 0x04, 0x00000000 },
- {}
-};
-
-static const struct gf100_gr_pack
-gm204_grctx_pack_gpc[] = {
- { gm107_grctx_init_gpc_unk_0 },
- { gm204_grctx_init_prop_0 },
- { gm204_grctx_init_gpc_unk_1 },
- { gm204_grctx_init_setup_0 },
- { gf100_grctx_init_zcull_0 },
- { gk208_grctx_init_crstr_0 },
- { gm204_grctx_init_gpm_0 },
- { gm204_grctx_init_gpc_unk_2 },
- { gf100_grctx_init_gcc_0 },
- {}
-};
-
-static const struct gf100_gr_init
-gm204_grctx_init_pe_0[] = {
- { 0x419848, 1, 0x04, 0x00000000 },
- { 0x419864, 1, 0x04, 0x00000029 },
- { 0x419888, 1, 0x04, 0x00000000 },
- {}
-};
-
-static const struct gf100_gr_init
-gm204_grctx_init_tex_0[] = {
- { 0x419a00, 1, 0x04, 0x000100f0 },
- { 0x419a04, 1, 0x04, 0x00000005 },
- { 0x419a08, 1, 0x04, 0x00000621 },
- { 0x419a0c, 1, 0x04, 0x00320000 },
- { 0x419a10, 1, 0x04, 0x00000000 },
- { 0x419a14, 1, 0x04, 0x00000200 },
- { 0x419a1c, 1, 0x04, 0x0010c000 },
- { 0x419a20, 1, 0x04, 0x20008a00 },
- { 0x419a30, 1, 0x04, 0x00000001 },
- { 0x419a3c, 1, 0x04, 0x0000181e },
- { 0x419ac4, 1, 0x04, 0x00000000 },
- {}
-};
-
-static const struct gf100_gr_init
-gm204_grctx_init_mpc_0[] = {
- { 0x419c00, 1, 0x04, 0x0000009a },
- { 0x419c04, 1, 0x04, 0x80000bd6 },
- { 0x419c08, 1, 0x04, 0x00000002 },
- { 0x419c20, 1, 0x04, 0x00000000 },
- { 0x419c24, 1, 0x04, 0x00084210 },
- { 0x419c28, 1, 0x04, 0x3efbefbe },
- { 0x419c2c, 1, 0x04, 0x00000000 },
- { 0x419c34, 1, 0x04, 0x71ff1ff3 },
- { 0x419c3c, 1, 0x04, 0x00001919 },
- { 0x419c50, 1, 0x04, 0x00000005 },
- {}
-};
-
-static const struct gf100_gr_init
-gm204_grctx_init_l1c_0[] = {
- { 0x419c84, 1, 0x04, 0x0000003e },
- { 0x419c90, 1, 0x04, 0x0000000a },
- {}
-};
-
-static const struct gf100_gr_init
-gm204_grctx_init_sm_0[] = {
- { 0x419e04, 3, 0x04, 0x00000000 },
- { 0x419e10, 1, 0x04, 0x00001c02 },
- { 0x419e44, 1, 0x04, 0x00d3eff2 },
- { 0x419e48, 1, 0x04, 0x00000000 },
- { 0x419e4c, 1, 0x04, 0x0000007f },
- { 0x419e50, 1, 0x04, 0x00000000 },
- { 0x419e58, 6, 0x04, 0x00000000 },
- { 0x419e74, 10, 0x04, 0x00000000 },
- { 0x419eac, 1, 0x04, 0x0001cf8b },
- { 0x419eb0, 1, 0x04, 0x00030300 },
- { 0x419eb8, 1, 0x04, 0x40000000 },
- { 0x419ef0, 24, 0x04, 0x00000000 },
- { 0x419f68, 2, 0x04, 0x00000000 },
- { 0x419f70, 1, 0x04, 0x00000020 },
- { 0x419f78, 1, 0x04, 0x00010beb },
- { 0x419f7c, 1, 0x04, 0x00000000 },
- {}
-};
-
-const struct gf100_gr_pack
-gm204_grctx_pack_tpc[] = {
- { gm204_grctx_init_pe_0 },
- { gm204_grctx_init_tex_0 },
- { gm204_grctx_init_mpc_0 },
- { gm204_grctx_init_l1c_0 },
- { gm204_grctx_init_sm_0 },
- {}
-};
-
-static const struct gf100_gr_init
-gm204_grctx_init_pes_0[] = {
- { 0x41be24, 1, 0x04, 0x0000000e },
- {}
-};
-
-static const struct gf100_gr_init
-gm204_grctx_init_cbm_0[] = {
- { 0x41bec0, 1, 0x04, 0x00000000 },
- { 0x41bec4, 1, 0x04, 0x01030000 },
- { 0x41bee4, 1, 0x04, 0x00000000 },
- { 0x41bef0, 1, 0x04, 0x000003ff },
- { 0x41bef4, 2, 0x04, 0x00000000 },
- {}
-};
-
-const struct gf100_gr_pack
-gm204_grctx_pack_ppc[] = {
- { gm204_grctx_init_pes_0 },
- { gm204_grctx_init_cbm_0 },
- { gm107_grctx_init_wwdx_0 },
- {}
-};
-
-/*******************************************************************************
- * PGRAPH context implementation
- ******************************************************************************/
-
-void
-gm204_grctx_generate_tpcid(struct gf100_gr *gr)
-{
- struct nvkm_device *device = gr->base.engine.subdev.device;
- int gpc, tpc, id;
-
- for (tpc = 0, id = 0; tpc < 4; tpc++) {
- for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
- if (tpc < gr->tpc_nr[gpc]) {
- nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x698), id);
- nvkm_wr32(device, GPC_UNIT(gpc, 0x0c10 + tpc * 4), id);
- nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x088), id);
- id++;
- }
- }
- }
-}
-
-static void
-gm204_grctx_generate_rop_active_fbps(struct gf100_gr *gr)
-{
- struct nvkm_device *device = gr->base.engine.subdev.device;
- const u32 fbp_count = nvkm_rd32(device, 0x12006c);
- nvkm_mask(device, 0x408850, 0x0000000f, fbp_count); /* zrop */
- nvkm_mask(device, 0x408958, 0x0000000f, fbp_count); /* crop */
-}
-
-void
-gm204_grctx_generate_405b60(struct gf100_gr *gr)
-{
- struct nvkm_device *device = gr->base.engine.subdev.device;
- const u32 dist_nr = DIV_ROUND_UP(gr->tpc_total, 4);
- u32 dist[TPC_MAX / 4] = {};
- u32 gpcs[GPC_MAX] = {};
- u8 tpcnr[GPC_MAX];
- int tpc, gpc, i;
-
- memcpy(tpcnr, gr->tpc_nr, sizeof(gr->tpc_nr));
-
- /* won't result in the same distribution as the binary driver where
- * some of the gpcs have more tpcs than others, but this shall do
- * for the moment. the code for earlier gpus has this issue too.
- */
- for (gpc = -1, i = 0; i < gr->tpc_total; i++) {
- do {
- gpc = (gpc + 1) % gr->gpc_nr;
- } while(!tpcnr[gpc]);
- tpc = gr->tpc_nr[gpc] - tpcnr[gpc]--;
-
- dist[i / 4] |= ((gpc << 4) | tpc) << ((i % 4) * 8);
- gpcs[gpc] |= i << (tpc * 8);
- }
-
- for (i = 0; i < dist_nr; i++)
- nvkm_wr32(device, 0x405b60 + (i * 4), dist[i]);
- for (i = 0; i < gr->gpc_nr; i++)
- nvkm_wr32(device, 0x405ba0 + (i * 4), gpcs[i]);
-}
-
-void
-gm204_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
-{
- struct nvkm_device *device = gr->base.engine.subdev.device;
- const struct gf100_grctx_func *grctx = gr->func->grctx;
- u32 tmp;
- int i;
-
- gf100_gr_mmio(gr, grctx->hub);
- gf100_gr_mmio(gr, grctx->gpc);
- gf100_gr_mmio(gr, grctx->zcull);
- gf100_gr_mmio(gr, grctx->tpc);
- gf100_gr_mmio(gr, grctx->ppc);
-
- nvkm_wr32(device, 0x404154, 0x00000000);
-
- grctx->bundle(info);
- grctx->pagepool(info);
- grctx->attrib(info);
- grctx->unkn(gr);
-
- gm204_grctx_generate_tpcid(gr);
- gf100_grctx_generate_r406028(gr);
- gk104_grctx_generate_r418bb8(gr);
-
- for (i = 0; i < 8; i++)
- nvkm_wr32(device, 0x4064d0 + (i * 0x04), 0x00000000);
- nvkm_wr32(device, 0x406500, 0x00000000);
-
- nvkm_wr32(device, 0x405b00, (gr->tpc_total << 8) | gr->gpc_nr);
-
- gm204_grctx_generate_rop_active_fbps(gr);
-
- for (tmp = 0, i = 0; i < gr->gpc_nr; i++)
- tmp |= ((1 << gr->tpc_nr[i]) - 1) << (i * 4);
- nvkm_wr32(device, 0x4041c4, tmp);
-
- gm204_grctx_generate_405b60(gr);
-
- gf100_gr_icmd(gr, grctx->icmd);
- nvkm_wr32(device, 0x404154, 0x00000800);
- gf100_gr_mthd(gr, grctx->mthd);
-
- nvkm_mask(device, 0x418e94, 0xffffffff, 0xc4230000);
- nvkm_mask(device, 0x418e4c, 0xffffffff, 0x70000000);
-}
-
-const struct gf100_grctx_func
-gm204_grctx = {
- .main = gm204_grctx_generate_main,
- .unkn = gk104_grctx_generate_unkn,
- .hub = gm204_grctx_pack_hub,
- .gpc = gm204_grctx_pack_gpc,
- .zcull = gf100_grctx_pack_zcull,
- .tpc = gm204_grctx_pack_tpc,
- .ppc = gm204_grctx_pack_ppc,
- .icmd = gm204_grctx_pack_icmd,
- .mthd = gm204_grctx_pack_mthd,
- .bundle = gm107_grctx_generate_bundle,
- .bundle_size = 0x3000,
- .bundle_min_gpm_fifo_depth = 0x180,
- .bundle_token_limit = 0x780,
- .pagepool = gm107_grctx_generate_pagepool,
- .pagepool_size = 0x20000,
- .attrib = gm107_grctx_generate_attrib,
- .attrib_nr_max = 0x600,
- .attrib_nr = 0x400,
- .alpha_nr_max = 0x1800,
- .alpha_nr = 0x1000,
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm206.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm206.c
deleted file mode 100644
index d6be6034c2c2..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm206.c
+++ /dev/null
@@ -1,74 +0,0 @@
-/*
- * Copyright 2015 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs <bskeggs@redhat.com>
- */
-#include "ctxgf100.h"
-
-static const struct gf100_gr_init
-gm206_grctx_init_gpc_unk_1[] = {
- { 0x418600, 1, 0x04, 0x0000007f },
- { 0x418684, 1, 0x04, 0x0000001f },
- { 0x418700, 1, 0x04, 0x00000002 },
- { 0x418704, 1, 0x04, 0x00000080 },
- { 0x418708, 1, 0x04, 0x40000000 },
- { 0x41870c, 2, 0x04, 0x00000000 },
- { 0x418728, 1, 0x04, 0x00300020 },
- {}
-};
-
-static const struct gf100_gr_pack
-gm206_grctx_pack_gpc[] = {
- { gm107_grctx_init_gpc_unk_0 },
- { gm204_grctx_init_prop_0 },
- { gm206_grctx_init_gpc_unk_1 },
- { gm204_grctx_init_setup_0 },
- { gf100_grctx_init_zcull_0 },
- { gk208_grctx_init_crstr_0 },
- { gm204_grctx_init_gpm_0 },
- { gm204_grctx_init_gpc_unk_2 },
- { gf100_grctx_init_gcc_0 },
- {}
-};
-
-const struct gf100_grctx_func
-gm206_grctx = {
- .main = gm204_grctx_generate_main,
- .unkn = gk104_grctx_generate_unkn,
- .hub = gm204_grctx_pack_hub,
- .gpc = gm206_grctx_pack_gpc,
- .zcull = gf100_grctx_pack_zcull,
- .tpc = gm204_grctx_pack_tpc,
- .ppc = gm204_grctx_pack_ppc,
- .icmd = gm204_grctx_pack_icmd,
- .mthd = gm204_grctx_pack_mthd,
- .bundle = gm107_grctx_generate_bundle,
- .bundle_size = 0x3000,
- .bundle_min_gpm_fifo_depth = 0x180,
- .bundle_token_limit = 0x780,
- .pagepool = gm107_grctx_generate_pagepool,
- .pagepool_size = 0x20000,
- .attrib = gm107_grctx_generate_attrib,
- .attrib_nr_max = 0x600,
- .attrib_nr = 0x400,
- .alpha_nr_max = 0x1800,
- .alpha_nr = 0x1000,
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm20b.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm20b.c
index 670260402538..a8827efa90ae 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm20b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm20b.c
@@ -54,7 +54,7 @@ gm20b_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
grctx->unkn(gr);
- gm204_grctx_generate_tpcid(gr);
+ gm200_grctx_generate_tpcid(gr);
gm20b_grctx_generate_r406028(gr);
gk104_grctx_generate_r418bb8(gr);
@@ -70,7 +70,7 @@ gm20b_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
tmp |= ((1 << gr->tpc_nr[i]) - 1) << (i * 4);
nvkm_wr32(device, 0x4041c4, tmp);
- gm204_grctx_generate_405b60(gr);
+ gm200_grctx_generate_405b60(gr);
gf100_gr_wait_idle(gr);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpc.fuc b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpc.fuc
index e168b83a10c9..dc60509f76f7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpc.fuc
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpc.fuc
@@ -322,6 +322,7 @@ main:
// interrupt handler
ih:
+ push $r0
push $r8
mov $r8 $flags
push $r8
@@ -358,6 +359,7 @@ ih:
pop $r8
mov $flags $r8
pop $r8
+ pop $r0
bclr $flags $p0
iret
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgf100.fuc3.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgf100.fuc3.h
index 231f696d1e0a..5f4ddfee48a2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgf100.fuc3.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgf100.fuc3.h
@@ -382,56 +382,57 @@ uint32_t gf100_grgpc_code[] = {
0xb60412fd,
0x1efd01e4,
0x0018fe05,
- 0x05b021f5,
+ 0x05b421f5,
/* 0x04eb: main_not_ctx_xfer */
0x94d30ef4,
0xf5f010ef,
0x7e21f501,
0xc60ef403,
/* 0x04f8: ih */
- 0x88fe80f9,
- 0xf980f901,
- 0xf9a0f990,
- 0xf9d0f9b0,
- 0xbdf0f9e0,
- 0x00a7f104,
- 0x00a3f002,
- 0xc400aacf,
- 0x0bf404ab,
- 0x1cd7f02c,
- 0x1a00e7f1,
- 0xcf00e3f0,
- 0xf7f100ee,
- 0xf3f01900,
- 0x00ffcf00,
- 0xf00421f4,
- 0x07f101e7,
- 0x03f01d00,
- 0x000ed000,
-/* 0x0546: ih_no_fifo */
- 0x07f104bd,
- 0x03f00100,
- 0x000ad000,
- 0xf0fc04bd,
- 0xd0fce0fc,
- 0xa0fcb0fc,
- 0x80fc90fc,
- 0xfc0088fe,
- 0x0032f480,
-/* 0x056a: hub_barrier_done */
+ 0x80f900f9,
+ 0xf90188fe,
+ 0xf990f980,
+ 0xf9b0f9a0,
+ 0xf9e0f9d0,
+ 0xf104bdf0,
+ 0xf00200a7,
+ 0xaacf00a3,
+ 0x04abc400,
+ 0xf02c0bf4,
+ 0xe7f11cd7,
+ 0xe3f01a00,
+ 0x00eecf00,
+ 0x1900f7f1,
+ 0xcf00f3f0,
+ 0x21f400ff,
+ 0x01e7f004,
+ 0x1d0007f1,
+ 0xd00003f0,
+ 0x04bd000e,
+/* 0x0548: ih_no_fifo */
+ 0x010007f1,
+ 0xd00003f0,
+ 0x04bd000a,
+ 0xe0fcf0fc,
+ 0xb0fcd0fc,
+ 0x90fca0fc,
+ 0x88fe80fc,
+ 0xfc80fc00,
+ 0x0032f400,
+/* 0x056e: hub_barrier_done */
0xf7f001f8,
0x040e9801,
0xb904febb,
0xe7f102ff,
0xe3f09418,
0x9d21f440,
-/* 0x0582: ctx_redswitch */
+/* 0x0586: ctx_redswitch */
0xf7f000f8,
0x0007f120,
0x0103f085,
0xbd000fd0,
0x08e7f004,
-/* 0x0594: ctx_redswitch_delay */
+/* 0x0598: ctx_redswitch_delay */
0xf401e2b6,
0xf5f1fd1b,
0xf5f10800,
@@ -439,13 +440,13 @@ uint32_t gf100_grgpc_code[] = {
0x03f08500,
0x000fd001,
0x00f804bd,
-/* 0x05b0: ctx_xfer */
+/* 0x05b4: ctx_xfer */
0x810007f1,
0xd00203f0,
0x04bd000f,
0xf50711f4,
-/* 0x05c3: ctx_xfer_not_load */
- 0xf5058221,
+/* 0x05c7: ctx_xfer_not_load */
+ 0xf5058621,
0xbd026a21,
0xfc07f124,
0x0203f047,
@@ -475,12 +476,11 @@ uint32_t gf100_grgpc_code[] = {
0x6f21f508,
0x5e21f501,
0x0601f402,
-/* 0x063b: ctx_xfer_post */
+/* 0x063f: ctx_xfer_post */
0xf50712f4,
-/* 0x063f: ctx_xfer_done */
+/* 0x0643: ctx_xfer_done */
0xf5027f21,
- 0xf8056a21,
- 0x00000000,
+ 0xf8056e21,
0x00000000,
0x00000000,
0x00000000,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgf117.fuc3.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgf117.fuc3.h
index bb820ff28621..03381b163cfc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgf117.fuc3.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgf117.fuc3.h
@@ -408,56 +408,57 @@ uint32_t gf117_grgpc_code[] = {
0x0412fd20,
0xfd01e4b6,
0x18fe051e,
- 0xfd21f500,
- 0xd30ef405,
+ 0x0121f500,
+ 0xd30ef406,
/* 0x0538: main_not_ctx_xfer */
0xf010ef94,
0x21f501f5,
0x0ef4037e,
/* 0x0545: ih */
- 0xfe80f9c6,
- 0x80f90188,
- 0xa0f990f9,
- 0xd0f9b0f9,
- 0xf0f9e0f9,
- 0xa7f104bd,
- 0xa3f00200,
- 0x00aacf00,
- 0xf404abc4,
- 0xd7f02c0b,
- 0x00e7f124,
- 0x00e3f01a,
- 0xf100eecf,
- 0xf01900f7,
- 0xffcf00f3,
- 0x0421f400,
- 0xf101e7f0,
- 0xf01d0007,
- 0x0ed00003,
-/* 0x0593: ih_no_fifo */
- 0xf104bd00,
- 0xf0010007,
- 0x0ad00003,
- 0xfc04bd00,
- 0xfce0fcf0,
- 0xfcb0fcd0,
- 0xfc90fca0,
- 0x0088fe80,
- 0x32f480fc,
-/* 0x05b7: hub_barrier_done */
+ 0xf900f9c6,
+ 0x0188fe80,
+ 0x90f980f9,
+ 0xb0f9a0f9,
+ 0xe0f9d0f9,
+ 0x04bdf0f9,
+ 0x0200a7f1,
+ 0xcf00a3f0,
+ 0xabc400aa,
+ 0x2c0bf404,
+ 0xf124d7f0,
+ 0xf01a00e7,
+ 0xeecf00e3,
+ 0x00f7f100,
+ 0x00f3f019,
+ 0xf400ffcf,
+ 0xe7f00421,
+ 0x0007f101,
+ 0x0003f01d,
+ 0xbd000ed0,
+/* 0x0595: ih_no_fifo */
+ 0x0007f104,
+ 0x0003f001,
+ 0xbd000ad0,
+ 0xfcf0fc04,
+ 0xfcd0fce0,
+ 0xfca0fcb0,
+ 0xfe80fc90,
+ 0x80fc0088,
+ 0x32f400fc,
+/* 0x05bb: hub_barrier_done */
0xf001f800,
0x0e9801f7,
0x04febb04,
0xf102ffb9,
0xf09418e7,
0x21f440e3,
-/* 0x05cf: ctx_redswitch */
+/* 0x05d3: ctx_redswitch */
0xf000f89d,
0x07f120f7,
0x03f08500,
0x000fd001,
0xe7f004bd,
-/* 0x05e1: ctx_redswitch_delay */
+/* 0x05e5: ctx_redswitch_delay */
0x01e2b608,
0xf1fd1bf4,
0xf10800f5,
@@ -465,13 +466,13 @@ uint32_t gf117_grgpc_code[] = {
0xf0850007,
0x0fd00103,
0xf804bd00,
-/* 0x05fd: ctx_xfer */
+/* 0x0601: ctx_xfer */
0x0007f100,
0x0203f081,
0xbd000fd0,
0x0711f404,
- 0x05cf21f5,
-/* 0x0610: ctx_xfer_not_load */
+ 0x05d321f5,
+/* 0x0614: ctx_xfer_not_load */
0x026a21f5,
0x07f124bd,
0x03f047fc,
@@ -511,10 +512,10 @@ uint32_t gf117_grgpc_code[] = {
0x21f5016f,
0x01f4025e,
0x0712f406,
-/* 0x06ac: ctx_xfer_post */
+/* 0x06b0: ctx_xfer_post */
0x027f21f5,
-/* 0x06b0: ctx_xfer_done */
- 0x05b721f5,
+/* 0x06b4: ctx_xfer_done */
+ 0x05bb21f5,
0x000000f8,
0x00000000,
0x00000000,
@@ -533,5 +534,4 @@ uint32_t gf117_grgpc_code[] = {
0x00000000,
0x00000000,
0x00000000,
- 0x00000000,
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk104.fuc3.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk104.fuc3.h
index 911976d20940..99d9b48a3b50 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk104.fuc3.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk104.fuc3.h
@@ -408,56 +408,57 @@ uint32_t gk104_grgpc_code[] = {
0x0412fd20,
0xfd01e4b6,
0x18fe051e,
- 0xfd21f500,
- 0xd30ef405,
+ 0x0121f500,
+ 0xd30ef406,
/* 0x0538: main_not_ctx_xfer */
0xf010ef94,
0x21f501f5,
0x0ef4037e,
/* 0x0545: ih */
- 0xfe80f9c6,
- 0x80f90188,
- 0xa0f990f9,
- 0xd0f9b0f9,
- 0xf0f9e0f9,
- 0xa7f104bd,
- 0xa3f00200,
- 0x00aacf00,
- 0xf404abc4,
- 0xd7f02c0b,
- 0x00e7f124,
- 0x00e3f01a,
- 0xf100eecf,
- 0xf01900f7,
- 0xffcf00f3,
- 0x0421f400,
- 0xf101e7f0,
- 0xf01d0007,
- 0x0ed00003,
-/* 0x0593: ih_no_fifo */
- 0xf104bd00,
- 0xf0010007,
- 0x0ad00003,
- 0xfc04bd00,
- 0xfce0fcf0,
- 0xfcb0fcd0,
- 0xfc90fca0,
- 0x0088fe80,
- 0x32f480fc,
-/* 0x05b7: hub_barrier_done */
+ 0xf900f9c6,
+ 0x0188fe80,
+ 0x90f980f9,
+ 0xb0f9a0f9,
+ 0xe0f9d0f9,
+ 0x04bdf0f9,
+ 0x0200a7f1,
+ 0xcf00a3f0,
+ 0xabc400aa,
+ 0x2c0bf404,
+ 0xf124d7f0,
+ 0xf01a00e7,
+ 0xeecf00e3,
+ 0x00f7f100,
+ 0x00f3f019,
+ 0xf400ffcf,
+ 0xe7f00421,
+ 0x0007f101,
+ 0x0003f01d,
+ 0xbd000ed0,
+/* 0x0595: ih_no_fifo */
+ 0x0007f104,
+ 0x0003f001,
+ 0xbd000ad0,
+ 0xfcf0fc04,
+ 0xfcd0fce0,
+ 0xfca0fcb0,
+ 0xfe80fc90,
+ 0x80fc0088,
+ 0x32f400fc,
+/* 0x05bb: hub_barrier_done */
0xf001f800,
0x0e9801f7,
0x04febb04,
0xf102ffb9,
0xf09418e7,
0x21f440e3,
-/* 0x05cf: ctx_redswitch */
+/* 0x05d3: ctx_redswitch */
0xf000f89d,
0x07f120f7,
0x03f08500,
0x000fd001,
0xe7f004bd,
-/* 0x05e1: ctx_redswitch_delay */
+/* 0x05e5: ctx_redswitch_delay */
0x01e2b608,
0xf1fd1bf4,
0xf10800f5,
@@ -465,13 +466,13 @@ uint32_t gk104_grgpc_code[] = {
0xf0850007,
0x0fd00103,
0xf804bd00,
-/* 0x05fd: ctx_xfer */
+/* 0x0601: ctx_xfer */
0x0007f100,
0x0203f081,
0xbd000fd0,
0x0711f404,
- 0x05cf21f5,
-/* 0x0610: ctx_xfer_not_load */
+ 0x05d321f5,
+/* 0x0614: ctx_xfer_not_load */
0x026a21f5,
0x07f124bd,
0x03f047fc,
@@ -511,10 +512,10 @@ uint32_t gk104_grgpc_code[] = {
0x21f5016f,
0x01f4025e,
0x0712f406,
-/* 0x06ac: ctx_xfer_post */
+/* 0x06b0: ctx_xfer_post */
0x027f21f5,
-/* 0x06b0: ctx_xfer_done */
- 0x05b721f5,
+/* 0x06b4: ctx_xfer_done */
+ 0x05bb21f5,
0x000000f8,
0x00000000,
0x00000000,
@@ -533,5 +534,4 @@ uint32_t gk104_grgpc_code[] = {
0x00000000,
0x00000000,
0x00000000,
- 0x00000000,
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk110.fuc3.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk110.fuc3.h
index 1c6e11b05df2..f7267696cbfd 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk110.fuc3.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk110.fuc3.h
@@ -408,56 +408,57 @@ uint32_t gk110_grgpc_code[] = {
0x0412fd20,
0xfd01e4b6,
0x18fe051e,
- 0xfd21f500,
- 0xd30ef405,
+ 0x0121f500,
+ 0xd30ef406,
/* 0x0538: main_not_ctx_xfer */
0xf010ef94,
0x21f501f5,
0x0ef4037e,
/* 0x0545: ih */
- 0xfe80f9c6,
- 0x80f90188,
- 0xa0f990f9,
- 0xd0f9b0f9,
- 0xf0f9e0f9,
- 0xa7f104bd,
- 0xa3f00200,
- 0x00aacf00,
- 0xf404abc4,
- 0xd7f02c0b,
- 0x00e7f124,
- 0x00e3f01a,
- 0xf100eecf,
- 0xf01900f7,
- 0xffcf00f3,
- 0x0421f400,
- 0xf101e7f0,
- 0xf01d0007,
- 0x0ed00003,
-/* 0x0593: ih_no_fifo */
- 0xf104bd00,
- 0xf0010007,
- 0x0ad00003,
- 0xfc04bd00,
- 0xfce0fcf0,
- 0xfcb0fcd0,
- 0xfc90fca0,
- 0x0088fe80,
- 0x32f480fc,
-/* 0x05b7: hub_barrier_done */
+ 0xf900f9c6,
+ 0x0188fe80,
+ 0x90f980f9,
+ 0xb0f9a0f9,
+ 0xe0f9d0f9,
+ 0x04bdf0f9,
+ 0x0200a7f1,
+ 0xcf00a3f0,
+ 0xabc400aa,
+ 0x2c0bf404,
+ 0xf124d7f0,
+ 0xf01a00e7,
+ 0xeecf00e3,
+ 0x00f7f100,
+ 0x00f3f019,
+ 0xf400ffcf,
+ 0xe7f00421,
+ 0x0007f101,
+ 0x0003f01d,
+ 0xbd000ed0,
+/* 0x0595: ih_no_fifo */
+ 0x0007f104,
+ 0x0003f001,
+ 0xbd000ad0,
+ 0xfcf0fc04,
+ 0xfcd0fce0,
+ 0xfca0fcb0,
+ 0xfe80fc90,
+ 0x80fc0088,
+ 0x32f400fc,
+/* 0x05bb: hub_barrier_done */
0xf001f800,
0x0e9801f7,
0x04febb04,
0xf102ffb9,
0xf09418e7,
0x21f440e3,
-/* 0x05cf: ctx_redswitch */
+/* 0x05d3: ctx_redswitch */
0xf000f89d,
0x07f120f7,
0x03f08500,
0x000fd001,
0xe7f004bd,
-/* 0x05e1: ctx_redswitch_delay */
+/* 0x05e5: ctx_redswitch_delay */
0x01e2b608,
0xf1fd1bf4,
0xf10800f5,
@@ -465,13 +466,13 @@ uint32_t gk110_grgpc_code[] = {
0xf0850007,
0x0fd00103,
0xf804bd00,
-/* 0x05fd: ctx_xfer */
+/* 0x0601: ctx_xfer */
0x0007f100,
0x0203f081,
0xbd000fd0,
0x0711f404,
- 0x05cf21f5,
-/* 0x0610: ctx_xfer_not_load */
+ 0x05d321f5,
+/* 0x0614: ctx_xfer_not_load */
0x026a21f5,
0x07f124bd,
0x03f047fc,
@@ -511,10 +512,10 @@ uint32_t gk110_grgpc_code[] = {
0x21f5016f,
0x01f4025e,
0x0712f406,
-/* 0x06ac: ctx_xfer_post */
+/* 0x06b0: ctx_xfer_post */
0x027f21f5,
-/* 0x06b0: ctx_xfer_done */
- 0x05b721f5,
+/* 0x06b4: ctx_xfer_done */
+ 0x05bb21f5,
0x000000f8,
0x00000000,
0x00000000,
@@ -533,5 +534,4 @@ uint32_t gk110_grgpc_code[] = {
0x00000000,
0x00000000,
0x00000000,
- 0x00000000,
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk208.fuc5.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk208.fuc5.h
index 84af7ec6a78e..387d1fa3e231 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk208.fuc5.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk208.fuc5.h
@@ -360,61 +360,62 @@ uint32_t gk208_grgpc_code[] = {
0xb60412fd,
0x1efd01e4,
0x0018fe05,
- 0x00051b7e,
+ 0x00051f7e,
/* 0x0477: main_not_ctx_xfer */
0x94d40ef4,
0xf5f010ef,
0x02f87e01,
0xc70ef400,
/* 0x0484: ih */
- 0x88fe80f9,
- 0xf980f901,
- 0xf9a0f990,
- 0xf9d0f9b0,
- 0xbdf0f9e0,
- 0x02004a04,
- 0xc400aacf,
- 0x0bf404ab,
- 0x4e240d1f,
- 0xeecf1a00,
- 0x19004f00,
- 0x7e00ffcf,
- 0x0e000004,
- 0x1d004001,
- 0xbd000ef6,
-/* 0x04c1: ih_no_fifo */
- 0x01004004,
- 0xbd000af6,
- 0xfcf0fc04,
- 0xfcd0fce0,
- 0xfca0fcb0,
- 0xfe80fc90,
- 0x80fc0088,
+ 0x80f900f9,
+ 0xf90188fe,
+ 0xf990f980,
+ 0xf9b0f9a0,
+ 0xf9e0f9d0,
+ 0x4a04bdf0,
+ 0xaacf0200,
+ 0x04abc400,
+ 0x0d1f0bf4,
+ 0x1a004e24,
+ 0x4f00eecf,
+ 0xffcf1900,
+ 0x00047e00,
+ 0x40010e00,
+ 0x0ef61d00,
+/* 0x04c3: ih_no_fifo */
+ 0x4004bd00,
+ 0x0af60100,
+ 0xfc04bd00,
+ 0xfce0fcf0,
+ 0xfcb0fcd0,
+ 0xfc90fca0,
+ 0x0088fe80,
+ 0x00fc80fc,
0xf80032f4,
-/* 0x04e1: hub_barrier_done */
+/* 0x04e5: hub_barrier_done */
0x98010f01,
0xfebb040e,
0x8effb204,
0x7e409418,
0xf800008f,
-/* 0x04f5: ctx_redswitch */
+/* 0x04f9: ctx_redswitch */
0x80200f00,
0xf6018500,
0x04bd000f,
-/* 0x0502: ctx_redswitch_delay */
+/* 0x0506: ctx_redswitch_delay */
0xe2b6080e,
0xfd1bf401,
0x0800f5f1,
0x0200f5f1,
0x01850080,
0xbd000ff6,
-/* 0x051b: ctx_xfer */
+/* 0x051f: ctx_xfer */
0x8000f804,
0xf6028100,
0x04bd000f,
0x7e0711f4,
-/* 0x052b: ctx_xfer_not_load */
- 0x7e0004f5,
+/* 0x052f: ctx_xfer_not_load */
+ 0x7e0004f9,
0xbd000216,
0x47fc8024,
0x0002f602,
@@ -449,10 +450,10 @@ uint32_t gk208_grgpc_code[] = {
0x7e00013d,
0xf400020a,
0x12f40601,
-/* 0x05b5: ctx_xfer_post */
+/* 0x05b9: ctx_xfer_post */
0x02277e07,
-/* 0x05b9: ctx_xfer_done */
- 0x04e17e00,
+/* 0x05bd: ctx_xfer_done */
+ 0x04e57e00,
0x0000f800,
0x00000000,
0x00000000,
@@ -469,5 +470,4 @@ uint32_t gk208_grgpc_code[] = {
0x00000000,
0x00000000,
0x00000000,
- 0x00000000,
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgm107.fuc5.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgm107.fuc5.h
index 5136f9161706..fa9f3c0c5994 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgm107.fuc5.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgm107.fuc5.h
@@ -438,48 +438,49 @@ uint32_t gm107_grgpc_code[] = {
0x0412fd20,
0xfd01e4b6,
0x18fe051e,
- 0x06447e00,
+ 0x06487e00,
0xd40ef400,
/* 0x05a0: main_not_ctx_xfer */
0xf010ef94,
0xf87e01f5,
0x0ef40002,
/* 0x05ad: ih */
- 0xfe80f9c7,
- 0x80f90188,
- 0xa0f990f9,
- 0xd0f9b0f9,
- 0xf0f9e0f9,
- 0x004a04bd,
- 0x00aacf02,
- 0xf404abc4,
- 0x240d1f0b,
- 0xcf1a004e,
- 0x004f00ee,
- 0x00ffcf19,
- 0x0000047e,
- 0x0040010e,
- 0x000ef61d,
-/* 0x05ea: ih_no_fifo */
- 0x004004bd,
- 0x000af601,
- 0xf0fc04bd,
- 0xd0fce0fc,
- 0xa0fcb0fc,
- 0x80fc90fc,
- 0xfc0088fe,
- 0x0032f480,
-/* 0x060a: hub_barrier_done */
+ 0xf900f9c7,
+ 0x0188fe80,
+ 0x90f980f9,
+ 0xb0f9a0f9,
+ 0xe0f9d0f9,
+ 0x04bdf0f9,
+ 0xcf02004a,
+ 0xabc400aa,
+ 0x1f0bf404,
+ 0x004e240d,
+ 0x00eecf1a,
+ 0xcf19004f,
+ 0x047e00ff,
+ 0x010e0000,
+ 0xf61d0040,
+ 0x04bd000e,
+/* 0x05ec: ih_no_fifo */
+ 0xf6010040,
+ 0x04bd000a,
+ 0xe0fcf0fc,
+ 0xb0fcd0fc,
+ 0x90fca0fc,
+ 0x88fe80fc,
+ 0xfc80fc00,
+ 0x0032f400,
+/* 0x060e: hub_barrier_done */
0x010f01f8,
0xbb040e98,
0xffb204fe,
0x4094188e,
0x00008f7e,
-/* 0x061e: ctx_redswitch */
+/* 0x0622: ctx_redswitch */
0x200f00f8,
0x01850080,
0xbd000ff6,
-/* 0x062b: ctx_redswitch_delay */
+/* 0x062f: ctx_redswitch_delay */
0xb6080e04,
0x1bf401e2,
0x00f5f1fd,
@@ -487,15 +488,15 @@ uint32_t gm107_grgpc_code[] = {
0x85008002,
0x000ff601,
0x00f804bd,
-/* 0x0644: ctx_xfer */
+/* 0x0648: ctx_xfer */
0x02810080,
0xbd000ff6,
0x1dc48e04,
0x01e5f050,
0x8f7effb2,
0x11f40000,
- 0x061e7e07,
-/* 0x0661: ctx_xfer_not_load */
+ 0x06227e07,
+/* 0x0665: ctx_xfer_not_load */
0x02167e00,
0x8024bd00,
0xf60247fc,
@@ -550,15 +551,15 @@ uint32_t gm107_grgpc_code[] = {
0x7e00020a,
0xf4000314,
0x12f40601,
-/* 0x0739: ctx_xfer_post */
+/* 0x073d: ctx_xfer_post */
0x02277e1a,
0x8e0d0f00,
0xf0501da8,
0xffb201e5,
0x00008f7e,
0x0003147e,
-/* 0x0750: ctx_xfer_done */
- 0x00060a7e,
+/* 0x0754: ctx_xfer_done */
+ 0x00060e7e,
0x000000f8,
0x00000000,
0x00000000,
@@ -601,5 +602,4 @@ uint32_t gm107_grgpc_code[] = {
0x00000000,
0x00000000,
0x00000000,
- 0x00000000,
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hub.fuc b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hub.fuc
index 87f99e38acbf..e3a2fb308271 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hub.fuc
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hub.fuc
@@ -306,6 +306,7 @@ main:
// interrupt handler
ih:
+ push $r0
push $r8
mov $r8 $flags
push $r8
@@ -380,6 +381,7 @@ ih:
pop $r8
mov $flags $r8
pop $r8
+ pop $r0
bclr $flags $p0
iret
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgf100.fuc3.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgf100.fuc3.h
index f6acda505677..397921a9a46c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgf100.fuc3.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgf100.fuc3.h
@@ -528,10 +528,10 @@ uint32_t gf100_grhub_code[] = {
0x0001d001,
0x17f104bd,
0xf7f00100,
- 0x0d21f502,
- 0x1f21f508,
+ 0x1121f502,
+ 0x2321f508,
0x10f7f008,
- 0x086c21f5,
+ 0x087021f5,
0x98000e98,
0x21f5010f,
0x14950150,
@@ -574,9 +574,9 @@ uint32_t gf100_grhub_code[] = {
0xb6800040,
0x1bf40132,
0x00f7f0be,
- 0x086c21f5,
+ 0x087021f5,
0xf500f7f0,
- 0xf1080d21,
+ 0xf1081121,
0xf0010007,
0x01d00203,
0xbd04bd00,
@@ -610,7 +610,7 @@ uint32_t gf100_grhub_code[] = {
0x09d00203,
0xf404bd00,
0x31f40132,
- 0x4021f502,
+ 0x4421f502,
0xf094bd0a,
0x07f10799,
0x03f01700,
@@ -621,7 +621,7 @@ uint32_t gf100_grhub_code[] = {
0x0203f00f,
0xbd0009d0,
0x0131f404,
- 0x0a4021f5,
+ 0x0a4421f5,
0x99f094bd,
0x0007f106,
0x0203f017,
@@ -631,7 +631,7 @@ uint32_t gf100_grhub_code[] = {
0x12b920f9,
0x0132f402,
0xf50232f4,
- 0xfc0a4021,
+ 0xfc0a4421,
0x0007f120,
0x0203f0c0,
0xbd0002d0,
@@ -640,7 +640,7 @@ uint32_t gf100_grhub_code[] = {
0xf41f23c8,
0x31f40d0b,
0x0232f401,
- 0x0a4021f5,
+ 0x0a4421f5,
/* 0x063c: chsw_done */
0xf10127f0,
0xf0c30007,
@@ -654,7 +654,7 @@ uint32_t gf100_grhub_code[] = {
/* 0x0660: main_not_ctx_switch */
0xf401e4b0,
0xf2b90d1b,
- 0xd021f502,
+ 0xd421f502,
0x460ef409,
/* 0x0670: main_not_ctx_chan */
0xf402e4b0,
@@ -664,7 +664,7 @@ uint32_t gf100_grhub_code[] = {
0x09d00203,
0xf404bd00,
0x32f40132,
- 0x4021f502,
+ 0x4421f502,
0xf094bd0a,
0x07f10799,
0x03f01700,
@@ -682,107 +682,108 @@ uint32_t gf100_grhub_code[] = {
0x04bd0002,
0xfea00ef5,
/* 0x06c8: ih */
- 0x88fe80f9,
- 0xf980f901,
- 0xf9a0f990,
- 0xf9d0f9b0,
- 0xbdf0f9e0,
- 0x00a7f104,
- 0x00a3f002,
- 0xc400aacf,
- 0x0bf404ab,
- 0x10d7f030,
- 0x1a00e7f1,
- 0xcf00e3f0,
- 0xf7f100ee,
- 0xf3f01900,
- 0x00ffcf00,
- 0xb70421f4,
- 0xf00400b0,
- 0x07f101e7,
- 0x03f01d00,
- 0x000ed000,
-/* 0x071a: ih_no_fifo */
- 0xabe404bd,
- 0x0bf40100,
- 0x10d7f00d,
- 0x4001e7f1,
-/* 0x072b: ih_no_ctxsw */
- 0xe40421f4,
- 0xf40400ab,
- 0xe7f16c0b,
- 0xe3f00708,
- 0x6821f440,
- 0xf102ffb9,
- 0xf0040007,
- 0x0fd00203,
- 0xf104bd00,
- 0xf00704e7,
+ 0x80f900f9,
+ 0xf90188fe,
+ 0xf990f980,
+ 0xf9b0f9a0,
+ 0xf9e0f9d0,
+ 0xf104bdf0,
+ 0xf00200a7,
+ 0xaacf00a3,
+ 0x04abc400,
+ 0xf0300bf4,
+ 0xe7f110d7,
+ 0xe3f01a00,
+ 0x00eecf00,
+ 0x1900f7f1,
+ 0xcf00f3f0,
+ 0x21f400ff,
+ 0x00b0b704,
+ 0x01e7f004,
+ 0x1d0007f1,
+ 0xd00003f0,
+ 0x04bd000e,
+/* 0x071c: ih_no_fifo */
+ 0x0100abe4,
+ 0xf00d0bf4,
+ 0xe7f110d7,
+ 0x21f44001,
+/* 0x072d: ih_no_ctxsw */
+ 0x00abe404,
+ 0x6c0bf404,
+ 0x0708e7f1,
+ 0xf440e3f0,
+ 0xffb96821,
+ 0x0007f102,
+ 0x0203f004,
+ 0xbd000fd0,
+ 0x04e7f104,
+ 0x40e3f007,
+ 0xb96821f4,
+ 0x07f102ff,
+ 0x03f00300,
+ 0x000fd002,
+ 0xfec704bd,
+ 0x02ee9450,
+ 0x0700f7f1,
+ 0xbb40f3f0,
+ 0x21f400ef,
+ 0x0007f168,
+ 0x0203f002,
+ 0xbd000fd0,
+ 0x03f7f004,
+ 0x037e21f5,
+ 0x0100b7f1,
+ 0xf102bfb9,
+ 0xf00144e7,
0x21f440e3,
- 0x02ffb968,
- 0x030007f1,
- 0xd00203f0,
- 0x04bd000f,
- 0x9450fec7,
- 0xf7f102ee,
- 0xf3f00700,
- 0x00efbb40,
- 0xf16821f4,
- 0xf0020007,
- 0x0fd00203,
- 0xf004bd00,
- 0x21f503f7,
- 0xb7f1037e,
- 0xbfb90100,
- 0x44e7f102,
- 0x40e3f001,
-/* 0x079b: ih_no_fwmthd */
- 0xf19d21f4,
- 0xbd0504b7,
- 0xb4abffb0,
- 0xf10f0bf4,
- 0xf0070007,
- 0x0bd00303,
-/* 0x07b3: ih_no_other */
- 0xf104bd00,
- 0xf0010007,
- 0x0ad00003,
- 0xfc04bd00,
- 0xfce0fcf0,
- 0xfcb0fcd0,
- 0xfc90fca0,
- 0x0088fe80,
- 0x32f480fc,
-/* 0x07d7: ctx_4160s */
+/* 0x079d: ih_no_fwmthd */
+ 0x04b7f19d,
+ 0xffb0bd05,
+ 0x0bf4b4ab,
+ 0x0007f10f,
+ 0x0303f007,
+ 0xbd000bd0,
+/* 0x07b5: ih_no_other */
+ 0x0007f104,
+ 0x0003f001,
+ 0xbd000ad0,
+ 0xfcf0fc04,
+ 0xfcd0fce0,
+ 0xfca0fcb0,
+ 0xfe80fc90,
+ 0x80fc0088,
+ 0x32f400fc,
+/* 0x07db: ctx_4160s */
0xf001f800,
0xffb901f7,
0x60e7f102,
0x40e3f041,
-/* 0x07e7: ctx_4160s_wait */
+/* 0x07eb: ctx_4160s_wait */
0xf19d21f4,
0xf04160e7,
0x21f440e3,
0x02ffb968,
0xf404ffc8,
0x00f8f00b,
-/* 0x07fc: ctx_4160c */
+/* 0x0800: ctx_4160c */
0xffb9f4bd,
0x60e7f102,
0x40e3f041,
0xf89d21f4,
-/* 0x080d: ctx_4170s */
+/* 0x0811: ctx_4170s */
0x10f5f000,
0xf102ffb9,
0xf04170e7,
0x21f440e3,
-/* 0x081f: ctx_4170w */
+/* 0x0823: ctx_4170w */
0xf100f89d,
0xf04170e7,
0x21f440e3,
0x02ffb968,
0xf410f4f0,
0x00f8f01b,
-/* 0x0834: ctx_redswitch */
+/* 0x0838: ctx_redswitch */
0x0200e7f1,
0xf040e5f0,
0xe5f020e5,
@@ -790,7 +791,7 @@ uint32_t gf100_grhub_code[] = {
0x0103f085,
0xbd000ed0,
0x08f7f004,
-/* 0x0850: ctx_redswitch_delay */
+/* 0x0854: ctx_redswitch_delay */
0xf401f2b6,
0xe5f1fd1b,
0xe5f10400,
@@ -798,7 +799,7 @@ uint32_t gf100_grhub_code[] = {
0x03f08500,
0x000ed001,
0x00f804bd,
-/* 0x086c: ctx_86c */
+/* 0x0870: ctx_86c */
0x1b0007f1,
0xd00203f0,
0x04bd000f,
@@ -809,16 +810,16 @@ uint32_t gf100_grhub_code[] = {
0xa86ce7f1,
0xf441e3f0,
0x00f89d21,
-/* 0x0894: ctx_mem */
+/* 0x0898: ctx_mem */
0x840007f1,
0xd00203f0,
0x04bd000f,
-/* 0x08a0: ctx_mem_wait */
+/* 0x08a4: ctx_mem_wait */
0x8400f7f1,
0xcf02f3f0,
0xfffd00ff,
0xf31bf405,
-/* 0x08b2: ctx_load */
+/* 0x08b6: ctx_load */
0x94bd00f8,
0xf10599f0,
0xf00f0007,
@@ -836,7 +837,7 @@ uint32_t gf100_grhub_code[] = {
0x02d00203,
0xf004bd00,
0x21f507f7,
- 0x07f10894,
+ 0x07f10898,
0x03f0c000,
0x0002d002,
0x0bfe04bd,
@@ -891,31 +892,31 @@ uint32_t gf100_grhub_code[] = {
0x03f01700,
0x0009d002,
0x00f804bd,
-/* 0x09d0: ctx_chan */
- 0x07d721f5,
- 0x08b221f5,
+/* 0x09d4: ctx_chan */
+ 0x07db21f5,
+ 0x08b621f5,
0xf40ca7f0,
0xf7f0d021,
- 0x9421f505,
- 0xfc21f508,
-/* 0x09eb: ctx_mmio_exec */
- 0x9800f807,
+ 0x9821f505,
+ 0x0021f508,
+/* 0x09ef: ctx_mmio_exec */
+ 0x9800f808,
0x07f14103,
0x03f08100,
0x0003d002,
0x34bd04bd,
-/* 0x09fc: ctx_mmio_loop */
+/* 0x0a00: ctx_mmio_loop */
0xf4ff34c4,
0x57f10f1b,
0x53f00200,
0x0535fa06,
-/* 0x0a0e: ctx_mmio_pull */
+/* 0x0a12: ctx_mmio_pull */
0x4e9803f8,
0x814f9880,
0xb69d21f4,
0x12b60830,
0xdf1bf401,
-/* 0x0a20: ctx_mmio_done */
+/* 0x0a24: ctx_mmio_done */
0xf1160398,
0xf0810007,
0x03d00203,
@@ -924,30 +925,30 @@ uint32_t gf100_grhub_code[] = {
0x13f00100,
0x0601fa06,
0x00f803f8,
-/* 0x0a40: ctx_xfer */
+/* 0x0a44: ctx_xfer */
0xf104e7f0,
0xf0020007,
0x0ed00303,
-/* 0x0a4f: ctx_xfer_idle */
+/* 0x0a53: ctx_xfer_idle */
0xf104bd00,
0xf00000e7,
0xeecf03e3,
0x00e4f100,
0xf21bf420,
0xf40611f4,
-/* 0x0a66: ctx_xfer_pre */
+/* 0x0a6a: ctx_xfer_pre */
0xf7f01102,
- 0x6c21f510,
- 0xd721f508,
+ 0x7021f510,
+ 0xdb21f508,
0x1c11f407,
-/* 0x0a74: ctx_xfer_pre_load */
+/* 0x0a78: ctx_xfer_pre_load */
0xf502f7f0,
- 0xf5080d21,
- 0xf5081f21,
- 0xbd083421,
- 0x0d21f5f4,
- 0xb221f508,
-/* 0x0a8d: ctx_xfer_exec */
+ 0xf5081121,
+ 0xf5082321,
+ 0xbd083821,
+ 0x1121f5f4,
+ 0xb621f508,
+/* 0x0a91: ctx_xfer_exec */
0x16019808,
0x07f124bd,
0x03f00500,
@@ -982,24 +983,23 @@ uint32_t gf100_grhub_code[] = {
0x1301f402,
0xf40ca7f0,
0xf7f0d021,
- 0x9421f505,
+ 0x9821f505,
0x3202f408,
-/* 0x0b1c: ctx_xfer_post */
+/* 0x0b20: ctx_xfer_post */
0xf502f7f0,
- 0xbd080d21,
- 0x6c21f5f4,
+ 0xbd081121,
+ 0x7021f5f4,
0x7f21f508,
- 0x1f21f502,
+ 0x2321f502,
0xf5f4bd08,
- 0xf4080d21,
+ 0xf4081121,
0x01981011,
0x0511fd40,
0xf5070bf4,
-/* 0x0b47: ctx_xfer_no_post_mmio */
- 0xf509eb21,
-/* 0x0b4b: ctx_xfer_done */
- 0xf807fc21,
- 0x00000000,
+/* 0x0b4b: ctx_xfer_no_post_mmio */
+ 0xf509ef21,
+/* 0x0b4f: ctx_xfer_done */
+ 0xf8080021,
0x00000000,
0x00000000,
0x00000000,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgf117.fuc3.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgf117.fuc3.h
index 7cb14e59dea1..50c97163dcdb 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgf117.fuc3.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgf117.fuc3.h
@@ -528,10 +528,10 @@ uint32_t gf117_grhub_code[] = {
0x0001d001,
0x17f104bd,
0xf7f00100,
- 0x0d21f502,
- 0x1f21f508,
+ 0x1121f502,
+ 0x2321f508,
0x10f7f008,
- 0x086c21f5,
+ 0x087021f5,
0x98000e98,
0x21f5010f,
0x14950150,
@@ -574,9 +574,9 @@ uint32_t gf117_grhub_code[] = {
0xb6800040,
0x1bf40132,
0x00f7f0be,
- 0x086c21f5,
+ 0x087021f5,
0xf500f7f0,
- 0xf1080d21,
+ 0xf1081121,
0xf0010007,
0x01d00203,
0xbd04bd00,
@@ -610,7 +610,7 @@ uint32_t gf117_grhub_code[] = {
0x09d00203,
0xf404bd00,
0x31f40132,
- 0x4021f502,
+ 0x4421f502,
0xf094bd0a,
0x07f10799,
0x03f01700,
@@ -621,7 +621,7 @@ uint32_t gf117_grhub_code[] = {
0x0203f00f,
0xbd0009d0,
0x0131f404,
- 0x0a4021f5,
+ 0x0a4421f5,
0x99f094bd,
0x0007f106,
0x0203f017,
@@ -631,7 +631,7 @@ uint32_t gf117_grhub_code[] = {
0x12b920f9,
0x0132f402,
0xf50232f4,
- 0xfc0a4021,
+ 0xfc0a4421,
0x0007f120,
0x0203f0c0,
0xbd0002d0,
@@ -640,7 +640,7 @@ uint32_t gf117_grhub_code[] = {
0xf41f23c8,
0x31f40d0b,
0x0232f401,
- 0x0a4021f5,
+ 0x0a4421f5,
/* 0x063c: chsw_done */
0xf10127f0,
0xf0c30007,
@@ -654,7 +654,7 @@ uint32_t gf117_grhub_code[] = {
/* 0x0660: main_not_ctx_switch */
0xf401e4b0,
0xf2b90d1b,
- 0xd021f502,
+ 0xd421f502,
0x460ef409,
/* 0x0670: main_not_ctx_chan */
0xf402e4b0,
@@ -664,7 +664,7 @@ uint32_t gf117_grhub_code[] = {
0x09d00203,
0xf404bd00,
0x32f40132,
- 0x4021f502,
+ 0x4421f502,
0xf094bd0a,
0x07f10799,
0x03f01700,
@@ -682,107 +682,108 @@ uint32_t gf117_grhub_code[] = {
0x04bd0002,
0xfea00ef5,
/* 0x06c8: ih */
- 0x88fe80f9,
- 0xf980f901,
- 0xf9a0f990,
- 0xf9d0f9b0,
- 0xbdf0f9e0,
- 0x00a7f104,
- 0x00a3f002,
- 0xc400aacf,
- 0x0bf404ab,
- 0x10d7f030,
- 0x1a00e7f1,
- 0xcf00e3f0,
- 0xf7f100ee,
- 0xf3f01900,
- 0x00ffcf00,
- 0xb70421f4,
- 0xf00400b0,
- 0x07f101e7,
- 0x03f01d00,
- 0x000ed000,
-/* 0x071a: ih_no_fifo */
- 0xabe404bd,
- 0x0bf40100,
- 0x10d7f00d,
- 0x4001e7f1,
-/* 0x072b: ih_no_ctxsw */
- 0xe40421f4,
- 0xf40400ab,
- 0xe7f16c0b,
- 0xe3f00708,
- 0x6821f440,
- 0xf102ffb9,
- 0xf0040007,
- 0x0fd00203,
- 0xf104bd00,
- 0xf00704e7,
+ 0x80f900f9,
+ 0xf90188fe,
+ 0xf990f980,
+ 0xf9b0f9a0,
+ 0xf9e0f9d0,
+ 0xf104bdf0,
+ 0xf00200a7,
+ 0xaacf00a3,
+ 0x04abc400,
+ 0xf0300bf4,
+ 0xe7f110d7,
+ 0xe3f01a00,
+ 0x00eecf00,
+ 0x1900f7f1,
+ 0xcf00f3f0,
+ 0x21f400ff,
+ 0x00b0b704,
+ 0x01e7f004,
+ 0x1d0007f1,
+ 0xd00003f0,
+ 0x04bd000e,
+/* 0x071c: ih_no_fifo */
+ 0x0100abe4,
+ 0xf00d0bf4,
+ 0xe7f110d7,
+ 0x21f44001,
+/* 0x072d: ih_no_ctxsw */
+ 0x00abe404,
+ 0x6c0bf404,
+ 0x0708e7f1,
+ 0xf440e3f0,
+ 0xffb96821,
+ 0x0007f102,
+ 0x0203f004,
+ 0xbd000fd0,
+ 0x04e7f104,
+ 0x40e3f007,
+ 0xb96821f4,
+ 0x07f102ff,
+ 0x03f00300,
+ 0x000fd002,
+ 0xfec704bd,
+ 0x02ee9450,
+ 0x0700f7f1,
+ 0xbb40f3f0,
+ 0x21f400ef,
+ 0x0007f168,
+ 0x0203f002,
+ 0xbd000fd0,
+ 0x03f7f004,
+ 0x037e21f5,
+ 0x0100b7f1,
+ 0xf102bfb9,
+ 0xf00144e7,
0x21f440e3,
- 0x02ffb968,
- 0x030007f1,
- 0xd00203f0,
- 0x04bd000f,
- 0x9450fec7,
- 0xf7f102ee,
- 0xf3f00700,
- 0x00efbb40,
- 0xf16821f4,
- 0xf0020007,
- 0x0fd00203,
- 0xf004bd00,
- 0x21f503f7,
- 0xb7f1037e,
- 0xbfb90100,
- 0x44e7f102,
- 0x40e3f001,
-/* 0x079b: ih_no_fwmthd */
- 0xf19d21f4,
- 0xbd0504b7,
- 0xb4abffb0,
- 0xf10f0bf4,
- 0xf0070007,
- 0x0bd00303,
-/* 0x07b3: ih_no_other */
- 0xf104bd00,
- 0xf0010007,
- 0x0ad00003,
- 0xfc04bd00,
- 0xfce0fcf0,
- 0xfcb0fcd0,
- 0xfc90fca0,
- 0x0088fe80,
- 0x32f480fc,
-/* 0x07d7: ctx_4160s */
+/* 0x079d: ih_no_fwmthd */
+ 0x04b7f19d,
+ 0xffb0bd05,
+ 0x0bf4b4ab,
+ 0x0007f10f,
+ 0x0303f007,
+ 0xbd000bd0,
+/* 0x07b5: ih_no_other */
+ 0x0007f104,
+ 0x0003f001,
+ 0xbd000ad0,
+ 0xfcf0fc04,
+ 0xfcd0fce0,
+ 0xfca0fcb0,
+ 0xfe80fc90,
+ 0x80fc0088,
+ 0x32f400fc,
+/* 0x07db: ctx_4160s */
0xf001f800,
0xffb901f7,
0x60e7f102,
0x40e3f041,
-/* 0x07e7: ctx_4160s_wait */
+/* 0x07eb: ctx_4160s_wait */
0xf19d21f4,
0xf04160e7,
0x21f440e3,
0x02ffb968,
0xf404ffc8,
0x00f8f00b,
-/* 0x07fc: ctx_4160c */
+/* 0x0800: ctx_4160c */
0xffb9f4bd,
0x60e7f102,
0x40e3f041,
0xf89d21f4,
-/* 0x080d: ctx_4170s */
+/* 0x0811: ctx_4170s */
0x10f5f000,
0xf102ffb9,
0xf04170e7,
0x21f440e3,
-/* 0x081f: ctx_4170w */
+/* 0x0823: ctx_4170w */
0xf100f89d,
0xf04170e7,
0x21f440e3,
0x02ffb968,
0xf410f4f0,
0x00f8f01b,
-/* 0x0834: ctx_redswitch */
+/* 0x0838: ctx_redswitch */
0x0200e7f1,
0xf040e5f0,
0xe5f020e5,
@@ -790,7 +791,7 @@ uint32_t gf117_grhub_code[] = {
0x0103f085,
0xbd000ed0,
0x08f7f004,
-/* 0x0850: ctx_redswitch_delay */
+/* 0x0854: ctx_redswitch_delay */
0xf401f2b6,
0xe5f1fd1b,
0xe5f10400,
@@ -798,7 +799,7 @@ uint32_t gf117_grhub_code[] = {
0x03f08500,
0x000ed001,
0x00f804bd,
-/* 0x086c: ctx_86c */
+/* 0x0870: ctx_86c */
0x1b0007f1,
0xd00203f0,
0x04bd000f,
@@ -809,16 +810,16 @@ uint32_t gf117_grhub_code[] = {
0xa86ce7f1,
0xf441e3f0,
0x00f89d21,
-/* 0x0894: ctx_mem */
+/* 0x0898: ctx_mem */
0x840007f1,
0xd00203f0,
0x04bd000f,
-/* 0x08a0: ctx_mem_wait */
+/* 0x08a4: ctx_mem_wait */
0x8400f7f1,
0xcf02f3f0,
0xfffd00ff,
0xf31bf405,
-/* 0x08b2: ctx_load */
+/* 0x08b6: ctx_load */
0x94bd00f8,
0xf10599f0,
0xf00f0007,
@@ -836,7 +837,7 @@ uint32_t gf117_grhub_code[] = {
0x02d00203,
0xf004bd00,
0x21f507f7,
- 0x07f10894,
+ 0x07f10898,
0x03f0c000,
0x0002d002,
0x0bfe04bd,
@@ -891,31 +892,31 @@ uint32_t gf117_grhub_code[] = {
0x03f01700,
0x0009d002,
0x00f804bd,
-/* 0x09d0: ctx_chan */
- 0x07d721f5,
- 0x08b221f5,
+/* 0x09d4: ctx_chan */
+ 0x07db21f5,
+ 0x08b621f5,
0xf40ca7f0,
0xf7f0d021,
- 0x9421f505,
- 0xfc21f508,
-/* 0x09eb: ctx_mmio_exec */
- 0x9800f807,
+ 0x9821f505,
+ 0x0021f508,
+/* 0x09ef: ctx_mmio_exec */
+ 0x9800f808,
0x07f14103,
0x03f08100,
0x0003d002,
0x34bd04bd,
-/* 0x09fc: ctx_mmio_loop */
+/* 0x0a00: ctx_mmio_loop */
0xf4ff34c4,
0x57f10f1b,
0x53f00200,
0x0535fa06,
-/* 0x0a0e: ctx_mmio_pull */
+/* 0x0a12: ctx_mmio_pull */
0x4e9803f8,
0x814f9880,
0xb69d21f4,
0x12b60830,
0xdf1bf401,
-/* 0x0a20: ctx_mmio_done */
+/* 0x0a24: ctx_mmio_done */
0xf1160398,
0xf0810007,
0x03d00203,
@@ -924,30 +925,30 @@ uint32_t gf117_grhub_code[] = {
0x13f00100,
0x0601fa06,
0x00f803f8,
-/* 0x0a40: ctx_xfer */
+/* 0x0a44: ctx_xfer */
0xf104e7f0,
0xf0020007,
0x0ed00303,
-/* 0x0a4f: ctx_xfer_idle */
+/* 0x0a53: ctx_xfer_idle */
0xf104bd00,
0xf00000e7,
0xeecf03e3,
0x00e4f100,
0xf21bf420,
0xf40611f4,
-/* 0x0a66: ctx_xfer_pre */
+/* 0x0a6a: ctx_xfer_pre */
0xf7f01102,
- 0x6c21f510,
- 0xd721f508,
+ 0x7021f510,
+ 0xdb21f508,
0x1c11f407,
-/* 0x0a74: ctx_xfer_pre_load */
+/* 0x0a78: ctx_xfer_pre_load */
0xf502f7f0,
- 0xf5080d21,
- 0xf5081f21,
- 0xbd083421,
- 0x0d21f5f4,
- 0xb221f508,
-/* 0x0a8d: ctx_xfer_exec */
+ 0xf5081121,
+ 0xf5082321,
+ 0xbd083821,
+ 0x1121f5f4,
+ 0xb621f508,
+/* 0x0a91: ctx_xfer_exec */
0x16019808,
0x07f124bd,
0x03f00500,
@@ -982,24 +983,23 @@ uint32_t gf117_grhub_code[] = {
0x1301f402,
0xf40ca7f0,
0xf7f0d021,
- 0x9421f505,
+ 0x9821f505,
0x3202f408,
-/* 0x0b1c: ctx_xfer_post */
+/* 0x0b20: ctx_xfer_post */
0xf502f7f0,
- 0xbd080d21,
- 0x6c21f5f4,
+ 0xbd081121,
+ 0x7021f5f4,
0x7f21f508,
- 0x1f21f502,
+ 0x2321f502,
0xf5f4bd08,
- 0xf4080d21,
+ 0xf4081121,
0x01981011,
0x0511fd40,
0xf5070bf4,
-/* 0x0b47: ctx_xfer_no_post_mmio */
- 0xf509eb21,
-/* 0x0b4b: ctx_xfer_done */
- 0xf807fc21,
- 0x00000000,
+/* 0x0b4b: ctx_xfer_no_post_mmio */
+ 0xf509ef21,
+/* 0x0b4f: ctx_xfer_done */
+ 0xf8080021,
0x00000000,
0x00000000,
0x00000000,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk104.fuc3.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk104.fuc3.h
index 95ac15110049..125824b394bb 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk104.fuc3.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk104.fuc3.h
@@ -528,10 +528,10 @@ uint32_t gk104_grhub_code[] = {
0x0001d001,
0x17f104bd,
0xf7f00100,
- 0xd721f502,
- 0xe921f507,
+ 0xdb21f502,
+ 0xed21f507,
0x10f7f007,
- 0x083621f5,
+ 0x083a21f5,
0x98000e98,
0x21f5010f,
0x14950150,
@@ -574,9 +574,9 @@ uint32_t gk104_grhub_code[] = {
0xb6800040,
0x1bf40132,
0x00f7f0be,
- 0x083621f5,
+ 0x083a21f5,
0xf500f7f0,
- 0xf107d721,
+ 0xf107db21,
0xf0010007,
0x01d00203,
0xbd04bd00,
@@ -610,7 +610,7 @@ uint32_t gk104_grhub_code[] = {
0x09d00203,
0xf404bd00,
0x31f40132,
- 0x0221f502,
+ 0x0621f502,
0xf094bd0a,
0x07f10799,
0x03f01700,
@@ -621,7 +621,7 @@ uint32_t gk104_grhub_code[] = {
0x0203f00f,
0xbd0009d0,
0x0131f404,
- 0x0a0221f5,
+ 0x0a0621f5,
0x99f094bd,
0x0007f106,
0x0203f017,
@@ -631,7 +631,7 @@ uint32_t gk104_grhub_code[] = {
0x12b920f9,
0x0132f402,
0xf50232f4,
- 0xfc0a0221,
+ 0xfc0a0621,
0x0007f120,
0x0203f0c0,
0xbd0002d0,
@@ -640,7 +640,7 @@ uint32_t gk104_grhub_code[] = {
0xf41f23c8,
0x31f40d0b,
0x0232f401,
- 0x0a0221f5,
+ 0x0a0621f5,
/* 0x063c: chsw_done */
0xf10127f0,
0xf0c30007,
@@ -654,7 +654,7 @@ uint32_t gk104_grhub_code[] = {
/* 0x0660: main_not_ctx_switch */
0xf401e4b0,
0xf2b90d1b,
- 0x9a21f502,
+ 0x9e21f502,
0x460ef409,
/* 0x0670: main_not_ctx_chan */
0xf402e4b0,
@@ -664,7 +664,7 @@ uint32_t gk104_grhub_code[] = {
0x09d00203,
0xf404bd00,
0x32f40132,
- 0x0221f502,
+ 0x0621f502,
0xf094bd0a,
0x07f10799,
0x03f01700,
@@ -682,90 +682,91 @@ uint32_t gk104_grhub_code[] = {
0x04bd0002,
0xfea00ef5,
/* 0x06c8: ih */
- 0x88fe80f9,
- 0xf980f901,
- 0xf9a0f990,
- 0xf9d0f9b0,
- 0xbdf0f9e0,
- 0x00a7f104,
- 0x00a3f002,
- 0xc400aacf,
- 0x0bf404ab,
- 0x10d7f030,
- 0x1a00e7f1,
- 0xcf00e3f0,
- 0xf7f100ee,
- 0xf3f01900,
- 0x00ffcf00,
- 0xb70421f4,
- 0xf00400b0,
- 0x07f101e7,
- 0x03f01d00,
- 0x000ed000,
-/* 0x071a: ih_no_fifo */
- 0xabe404bd,
- 0x0bf40100,
- 0x10d7f00d,
- 0x4001e7f1,
-/* 0x072b: ih_no_ctxsw */
- 0xe40421f4,
- 0xf40400ab,
- 0xe7f16c0b,
- 0xe3f00708,
- 0x6821f440,
- 0xf102ffb9,
- 0xf0040007,
- 0x0fd00203,
- 0xf104bd00,
- 0xf00704e7,
+ 0x80f900f9,
+ 0xf90188fe,
+ 0xf990f980,
+ 0xf9b0f9a0,
+ 0xf9e0f9d0,
+ 0xf104bdf0,
+ 0xf00200a7,
+ 0xaacf00a3,
+ 0x04abc400,
+ 0xf0300bf4,
+ 0xe7f110d7,
+ 0xe3f01a00,
+ 0x00eecf00,
+ 0x1900f7f1,
+ 0xcf00f3f0,
+ 0x21f400ff,
+ 0x00b0b704,
+ 0x01e7f004,
+ 0x1d0007f1,
+ 0xd00003f0,
+ 0x04bd000e,
+/* 0x071c: ih_no_fifo */
+ 0x0100abe4,
+ 0xf00d0bf4,
+ 0xe7f110d7,
+ 0x21f44001,
+/* 0x072d: ih_no_ctxsw */
+ 0x00abe404,
+ 0x6c0bf404,
+ 0x0708e7f1,
+ 0xf440e3f0,
+ 0xffb96821,
+ 0x0007f102,
+ 0x0203f004,
+ 0xbd000fd0,
+ 0x04e7f104,
+ 0x40e3f007,
+ 0xb96821f4,
+ 0x07f102ff,
+ 0x03f00300,
+ 0x000fd002,
+ 0xfec704bd,
+ 0x02ee9450,
+ 0x0700f7f1,
+ 0xbb40f3f0,
+ 0x21f400ef,
+ 0x0007f168,
+ 0x0203f002,
+ 0xbd000fd0,
+ 0x03f7f004,
+ 0x037e21f5,
+ 0x0100b7f1,
+ 0xf102bfb9,
+ 0xf00144e7,
0x21f440e3,
- 0x02ffb968,
- 0x030007f1,
- 0xd00203f0,
- 0x04bd000f,
- 0x9450fec7,
- 0xf7f102ee,
- 0xf3f00700,
- 0x00efbb40,
- 0xf16821f4,
- 0xf0020007,
- 0x0fd00203,
- 0xf004bd00,
- 0x21f503f7,
- 0xb7f1037e,
- 0xbfb90100,
- 0x44e7f102,
- 0x40e3f001,
-/* 0x079b: ih_no_fwmthd */
- 0xf19d21f4,
- 0xbd0504b7,
- 0xb4abffb0,
- 0xf10f0bf4,
- 0xf0070007,
- 0x0bd00303,
-/* 0x07b3: ih_no_other */
- 0xf104bd00,
- 0xf0010007,
- 0x0ad00003,
- 0xfc04bd00,
- 0xfce0fcf0,
- 0xfcb0fcd0,
- 0xfc90fca0,
- 0x0088fe80,
- 0x32f480fc,
-/* 0x07d7: ctx_4170s */
+/* 0x079d: ih_no_fwmthd */
+ 0x04b7f19d,
+ 0xffb0bd05,
+ 0x0bf4b4ab,
+ 0x0007f10f,
+ 0x0303f007,
+ 0xbd000bd0,
+/* 0x07b5: ih_no_other */
+ 0x0007f104,
+ 0x0003f001,
+ 0xbd000ad0,
+ 0xfcf0fc04,
+ 0xfcd0fce0,
+ 0xfca0fcb0,
+ 0xfe80fc90,
+ 0x80fc0088,
+ 0x32f400fc,
+/* 0x07db: ctx_4170s */
0xf001f800,
0xffb910f5,
0x70e7f102,
0x40e3f041,
0xf89d21f4,
-/* 0x07e9: ctx_4170w */
+/* 0x07ed: ctx_4170w */
0x70e7f100,
0x40e3f041,
0xb96821f4,
0xf4f002ff,
0xf01bf410,
-/* 0x07fe: ctx_redswitch */
+/* 0x0802: ctx_redswitch */
0xe7f100f8,
0xe5f00200,
0x20e5f040,
@@ -773,7 +774,7 @@ uint32_t gk104_grhub_code[] = {
0xf0850007,
0x0ed00103,
0xf004bd00,
-/* 0x081a: ctx_redswitch_delay */
+/* 0x081e: ctx_redswitch_delay */
0xf2b608f7,
0xfd1bf401,
0x0400e5f1,
@@ -781,7 +782,7 @@ uint32_t gk104_grhub_code[] = {
0x850007f1,
0xd00103f0,
0x04bd000e,
-/* 0x0836: ctx_86c */
+/* 0x083a: ctx_86c */
0x07f100f8,
0x03f01b00,
0x000fd002,
@@ -792,17 +793,17 @@ uint32_t gk104_grhub_code[] = {
0xe7f102ff,
0xe3f0a86c,
0x9d21f441,
-/* 0x085e: ctx_mem */
+/* 0x0862: ctx_mem */
0x07f100f8,
0x03f08400,
0x000fd002,
-/* 0x086a: ctx_mem_wait */
+/* 0x086e: ctx_mem_wait */
0xf7f104bd,
0xf3f08400,
0x00ffcf02,
0xf405fffd,
0x00f8f31b,
-/* 0x087c: ctx_load */
+/* 0x0880: ctx_load */
0x99f094bd,
0x0007f105,
0x0203f00f,
@@ -819,7 +820,7 @@ uint32_t gk104_grhub_code[] = {
0x0203f083,
0xbd0002d0,
0x07f7f004,
- 0x085e21f5,
+ 0x086221f5,
0xc00007f1,
0xd00203f0,
0x04bd0002,
@@ -874,29 +875,29 @@ uint32_t gk104_grhub_code[] = {
0x170007f1,
0xd00203f0,
0x04bd0009,
-/* 0x099a: ctx_chan */
+/* 0x099e: ctx_chan */
0x21f500f8,
- 0xa7f0087c,
+ 0xa7f00880,
0xd021f40c,
0xf505f7f0,
- 0xf8085e21,
-/* 0x09ad: ctx_mmio_exec */
+ 0xf8086221,
+/* 0x09b1: ctx_mmio_exec */
0x41039800,
0x810007f1,
0xd00203f0,
0x04bd0003,
-/* 0x09be: ctx_mmio_loop */
+/* 0x09c2: ctx_mmio_loop */
0x34c434bd,
0x0f1bf4ff,
0x020057f1,
0xfa0653f0,
0x03f80535,
-/* 0x09d0: ctx_mmio_pull */
+/* 0x09d4: ctx_mmio_pull */
0x98804e98,
0x21f4814f,
0x0830b69d,
0xf40112b6,
-/* 0x09e2: ctx_mmio_done */
+/* 0x09e6: ctx_mmio_done */
0x0398df1b,
0x0007f116,
0x0203f081,
@@ -905,30 +906,30 @@ uint32_t gk104_grhub_code[] = {
0x010017f1,
0xfa0613f0,
0x03f80601,
-/* 0x0a02: ctx_xfer */
+/* 0x0a06: ctx_xfer */
0xe7f000f8,
0x0007f104,
0x0303f002,
0xbd000ed0,
-/* 0x0a11: ctx_xfer_idle */
+/* 0x0a15: ctx_xfer_idle */
0x00e7f104,
0x03e3f000,
0xf100eecf,
0xf42000e4,
0x11f4f21b,
0x0d02f406,
-/* 0x0a28: ctx_xfer_pre */
+/* 0x0a2c: ctx_xfer_pre */
0xf510f7f0,
- 0xf4083621,
-/* 0x0a32: ctx_xfer_pre_load */
+ 0xf4083a21,
+/* 0x0a36: ctx_xfer_pre_load */
0xf7f01c11,
- 0xd721f502,
- 0xe921f507,
- 0xfe21f507,
- 0xf5f4bd07,
- 0xf507d721,
-/* 0x0a4b: ctx_xfer_exec */
- 0x98087c21,
+ 0xdb21f502,
+ 0xed21f507,
+ 0x0221f507,
+ 0xf5f4bd08,
+ 0xf507db21,
+/* 0x0a4f: ctx_xfer_exec */
+ 0x98088021,
0x24bd1601,
0x050007f1,
0xd00103f0,
@@ -963,21 +964,21 @@ uint32_t gk104_grhub_code[] = {
0xa7f01301,
0xd021f40c,
0xf505f7f0,
- 0xf4085e21,
-/* 0x0ada: ctx_xfer_post */
+ 0xf4086221,
+/* 0x0ade: ctx_xfer_post */
0xf7f02e02,
- 0xd721f502,
+ 0xdb21f502,
0xf5f4bd07,
- 0xf5083621,
+ 0xf5083a21,
0xf5027f21,
- 0xbd07e921,
- 0xd721f5f4,
+ 0xbd07ed21,
+ 0xdb21f5f4,
0x1011f407,
0xfd400198,
0x0bf40511,
- 0xad21f507,
-/* 0x0b05: ctx_xfer_no_post_mmio */
-/* 0x0b05: ctx_xfer_done */
+ 0xb121f507,
+/* 0x0b09: ctx_xfer_no_post_mmio */
+/* 0x0b09: ctx_xfer_done */
0x0000f809,
0x00000000,
0x00000000,
@@ -1040,5 +1041,4 @@ uint32_t gk104_grhub_code[] = {
0x00000000,
0x00000000,
0x00000000,
- 0x00000000,
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk110.fuc3.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk110.fuc3.h
index 89986878480f..0a1b8c0b8b82 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk110.fuc3.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk110.fuc3.h
@@ -528,10 +528,10 @@ uint32_t gk110_grhub_code[] = {
0x0001d001,
0x17f104bd,
0xf7f00100,
- 0xd721f502,
- 0xe921f507,
+ 0xdb21f502,
+ 0xed21f507,
0x10f7f007,
- 0x083621f5,
+ 0x083a21f5,
0x98000e98,
0x21f5010f,
0x14950150,
@@ -574,9 +574,9 @@ uint32_t gk110_grhub_code[] = {
0xb6800040,
0x1bf40132,
0x00f7f0be,
- 0x083621f5,
+ 0x083a21f5,
0xf500f7f0,
- 0xf107d721,
+ 0xf107db21,
0xf0010007,
0x01d00203,
0xbd04bd00,
@@ -610,7 +610,7 @@ uint32_t gk110_grhub_code[] = {
0x09d00203,
0xf404bd00,
0x31f40132,
- 0x0221f502,
+ 0x0621f502,
0xf094bd0a,
0x07f10799,
0x03f01700,
@@ -621,7 +621,7 @@ uint32_t gk110_grhub_code[] = {
0x0203f037,
0xbd0009d0,
0x0131f404,
- 0x0a0221f5,
+ 0x0a0621f5,
0x99f094bd,
0x0007f106,
0x0203f017,
@@ -631,7 +631,7 @@ uint32_t gk110_grhub_code[] = {
0x12b920f9,
0x0132f402,
0xf50232f4,
- 0xfc0a0221,
+ 0xfc0a0621,
0x0007f120,
0x0203f0c0,
0xbd0002d0,
@@ -640,7 +640,7 @@ uint32_t gk110_grhub_code[] = {
0xf41f23c8,
0x31f40d0b,
0x0232f401,
- 0x0a0221f5,
+ 0x0a0621f5,
/* 0x063c: chsw_done */
0xf10127f0,
0xf0c30007,
@@ -654,7 +654,7 @@ uint32_t gk110_grhub_code[] = {
/* 0x0660: main_not_ctx_switch */
0xf401e4b0,
0xf2b90d1b,
- 0x9a21f502,
+ 0x9e21f502,
0x460ef409,
/* 0x0670: main_not_ctx_chan */
0xf402e4b0,
@@ -664,7 +664,7 @@ uint32_t gk110_grhub_code[] = {
0x09d00203,
0xf404bd00,
0x32f40132,
- 0x0221f502,
+ 0x0621f502,
0xf094bd0a,
0x07f10799,
0x03f01700,
@@ -682,90 +682,91 @@ uint32_t gk110_grhub_code[] = {
0x04bd0002,
0xfea00ef5,
/* 0x06c8: ih */
- 0x88fe80f9,
- 0xf980f901,
- 0xf9a0f990,
- 0xf9d0f9b0,
- 0xbdf0f9e0,
- 0x00a7f104,
- 0x00a3f002,
- 0xc400aacf,
- 0x0bf404ab,
- 0x10d7f030,
- 0x1a00e7f1,
- 0xcf00e3f0,
- 0xf7f100ee,
- 0xf3f01900,
- 0x00ffcf00,
- 0xb70421f4,
- 0xf00400b0,
- 0x07f101e7,
- 0x03f01d00,
- 0x000ed000,
-/* 0x071a: ih_no_fifo */
- 0xabe404bd,
- 0x0bf40100,
- 0x10d7f00d,
- 0x4001e7f1,
-/* 0x072b: ih_no_ctxsw */
- 0xe40421f4,
- 0xf40400ab,
- 0xe7f16c0b,
- 0xe3f00708,
- 0x6821f440,
- 0xf102ffb9,
- 0xf0040007,
- 0x0fd00203,
- 0xf104bd00,
- 0xf00704e7,
+ 0x80f900f9,
+ 0xf90188fe,
+ 0xf990f980,
+ 0xf9b0f9a0,
+ 0xf9e0f9d0,
+ 0xf104bdf0,
+ 0xf00200a7,
+ 0xaacf00a3,
+ 0x04abc400,
+ 0xf0300bf4,
+ 0xe7f110d7,
+ 0xe3f01a00,
+ 0x00eecf00,
+ 0x1900f7f1,
+ 0xcf00f3f0,
+ 0x21f400ff,
+ 0x00b0b704,
+ 0x01e7f004,
+ 0x1d0007f1,
+ 0xd00003f0,
+ 0x04bd000e,
+/* 0x071c: ih_no_fifo */
+ 0x0100abe4,
+ 0xf00d0bf4,
+ 0xe7f110d7,
+ 0x21f44001,
+/* 0x072d: ih_no_ctxsw */
+ 0x00abe404,
+ 0x6c0bf404,
+ 0x0708e7f1,
+ 0xf440e3f0,
+ 0xffb96821,
+ 0x0007f102,
+ 0x0203f004,
+ 0xbd000fd0,
+ 0x04e7f104,
+ 0x40e3f007,
+ 0xb96821f4,
+ 0x07f102ff,
+ 0x03f00300,
+ 0x000fd002,
+ 0xfec704bd,
+ 0x02ee9450,
+ 0x0700f7f1,
+ 0xbb40f3f0,
+ 0x21f400ef,
+ 0x0007f168,
+ 0x0203f002,
+ 0xbd000fd0,
+ 0x03f7f004,
+ 0x037e21f5,
+ 0x0100b7f1,
+ 0xf102bfb9,
+ 0xf00144e7,
0x21f440e3,
- 0x02ffb968,
- 0x030007f1,
- 0xd00203f0,
- 0x04bd000f,
- 0x9450fec7,
- 0xf7f102ee,
- 0xf3f00700,
- 0x00efbb40,
- 0xf16821f4,
- 0xf0020007,
- 0x0fd00203,
- 0xf004bd00,
- 0x21f503f7,
- 0xb7f1037e,
- 0xbfb90100,
- 0x44e7f102,
- 0x40e3f001,
-/* 0x079b: ih_no_fwmthd */
- 0xf19d21f4,
- 0xbd0504b7,
- 0xb4abffb0,
- 0xf10f0bf4,
- 0xf0070007,
- 0x0bd00303,
-/* 0x07b3: ih_no_other */
- 0xf104bd00,
- 0xf0010007,
- 0x0ad00003,
- 0xfc04bd00,
- 0xfce0fcf0,
- 0xfcb0fcd0,
- 0xfc90fca0,
- 0x0088fe80,
- 0x32f480fc,
-/* 0x07d7: ctx_4170s */
+/* 0x079d: ih_no_fwmthd */
+ 0x04b7f19d,
+ 0xffb0bd05,
+ 0x0bf4b4ab,
+ 0x0007f10f,
+ 0x0303f007,
+ 0xbd000bd0,
+/* 0x07b5: ih_no_other */
+ 0x0007f104,
+ 0x0003f001,
+ 0xbd000ad0,
+ 0xfcf0fc04,
+ 0xfcd0fce0,
+ 0xfca0fcb0,
+ 0xfe80fc90,
+ 0x80fc0088,
+ 0x32f400fc,
+/* 0x07db: ctx_4170s */
0xf001f800,
0xffb910f5,
0x70e7f102,
0x40e3f041,
0xf89d21f4,
-/* 0x07e9: ctx_4170w */
+/* 0x07ed: ctx_4170w */
0x70e7f100,
0x40e3f041,
0xb96821f4,
0xf4f002ff,
0xf01bf410,
-/* 0x07fe: ctx_redswitch */
+/* 0x0802: ctx_redswitch */
0xe7f100f8,
0xe5f00200,
0x20e5f040,
@@ -773,7 +774,7 @@ uint32_t gk110_grhub_code[] = {
0xf0850007,
0x0ed00103,
0xf004bd00,
-/* 0x081a: ctx_redswitch_delay */
+/* 0x081e: ctx_redswitch_delay */
0xf2b608f7,
0xfd1bf401,
0x0400e5f1,
@@ -781,7 +782,7 @@ uint32_t gk110_grhub_code[] = {
0x850007f1,
0xd00103f0,
0x04bd000e,
-/* 0x0836: ctx_86c */
+/* 0x083a: ctx_86c */
0x07f100f8,
0x03f02300,
0x000fd002,
@@ -792,17 +793,17 @@ uint32_t gk110_grhub_code[] = {
0xe7f102ff,
0xe3f0a88c,
0x9d21f441,
-/* 0x085e: ctx_mem */
+/* 0x0862: ctx_mem */
0x07f100f8,
0x03f08400,
0x000fd002,
-/* 0x086a: ctx_mem_wait */
+/* 0x086e: ctx_mem_wait */
0xf7f104bd,
0xf3f08400,
0x00ffcf02,
0xf405fffd,
0x00f8f31b,
-/* 0x087c: ctx_load */
+/* 0x0880: ctx_load */
0x99f094bd,
0x0007f105,
0x0203f037,
@@ -819,7 +820,7 @@ uint32_t gk110_grhub_code[] = {
0x0203f083,
0xbd0002d0,
0x07f7f004,
- 0x085e21f5,
+ 0x086221f5,
0xc00007f1,
0xd00203f0,
0x04bd0002,
@@ -874,29 +875,29 @@ uint32_t gk110_grhub_code[] = {
0x170007f1,
0xd00203f0,
0x04bd0009,
-/* 0x099a: ctx_chan */
+/* 0x099e: ctx_chan */
0x21f500f8,
- 0xa7f0087c,
+ 0xa7f00880,
0xd021f40c,
0xf505f7f0,
- 0xf8085e21,
-/* 0x09ad: ctx_mmio_exec */
+ 0xf8086221,
+/* 0x09b1: ctx_mmio_exec */
0x41039800,
0x810007f1,
0xd00203f0,
0x04bd0003,
-/* 0x09be: ctx_mmio_loop */
+/* 0x09c2: ctx_mmio_loop */
0x34c434bd,
0x0f1bf4ff,
0x020057f1,
0xfa0653f0,
0x03f80535,
-/* 0x09d0: ctx_mmio_pull */
+/* 0x09d4: ctx_mmio_pull */
0x98804e98,
0x21f4814f,
0x0830b69d,
0xf40112b6,
-/* 0x09e2: ctx_mmio_done */
+/* 0x09e6: ctx_mmio_done */
0x0398df1b,
0x0007f116,
0x0203f081,
@@ -905,30 +906,30 @@ uint32_t gk110_grhub_code[] = {
0x010017f1,
0xfa0613f0,
0x03f80601,
-/* 0x0a02: ctx_xfer */
+/* 0x0a06: ctx_xfer */
0xe7f000f8,
0x0007f104,
0x0303f002,
0xbd000ed0,
-/* 0x0a11: ctx_xfer_idle */
+/* 0x0a15: ctx_xfer_idle */
0x00e7f104,
0x03e3f000,
0xf100eecf,
0xf42000e4,
0x11f4f21b,
0x0d02f406,
-/* 0x0a28: ctx_xfer_pre */
+/* 0x0a2c: ctx_xfer_pre */
0xf510f7f0,
- 0xf4083621,
-/* 0x0a32: ctx_xfer_pre_load */
+ 0xf4083a21,
+/* 0x0a36: ctx_xfer_pre_load */
0xf7f01c11,
- 0xd721f502,
- 0xe921f507,
- 0xfe21f507,
- 0xf5f4bd07,
- 0xf507d721,
-/* 0x0a4b: ctx_xfer_exec */
- 0x98087c21,
+ 0xdb21f502,
+ 0xed21f507,
+ 0x0221f507,
+ 0xf5f4bd08,
+ 0xf507db21,
+/* 0x0a4f: ctx_xfer_exec */
+ 0x98088021,
0x24bd1601,
0x050007f1,
0xd00103f0,
@@ -963,21 +964,21 @@ uint32_t gk110_grhub_code[] = {
0xa7f01301,
0xd021f40c,
0xf505f7f0,
- 0xf4085e21,
-/* 0x0ada: ctx_xfer_post */
+ 0xf4086221,
+/* 0x0ade: ctx_xfer_post */
0xf7f02e02,
- 0xd721f502,
+ 0xdb21f502,
0xf5f4bd07,
- 0xf5083621,
+ 0xf5083a21,
0xf5027f21,
- 0xbd07e921,
- 0xd721f5f4,
+ 0xbd07ed21,
+ 0xdb21f5f4,
0x1011f407,
0xfd400198,
0x0bf40511,
- 0xad21f507,
-/* 0x0b05: ctx_xfer_no_post_mmio */
-/* 0x0b05: ctx_xfer_done */
+ 0xb121f507,
+/* 0x0b09: ctx_xfer_no_post_mmio */
+/* 0x0b09: ctx_xfer_done */
0x0000f809,
0x00000000,
0x00000000,
@@ -1040,5 +1041,4 @@ uint32_t gk110_grhub_code[] = {
0x00000000,
0x00000000,
0x00000000,
- 0x00000000,
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk208.fuc5.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk208.fuc5.h
index 0e98fa4a386e..16869d0b109b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk208.fuc5.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk208.fuc5.h
@@ -478,10 +478,10 @@ uint32_t gk208_grhub_code[] = {
0x01040080,
0xbd0001f6,
0x01004104,
- 0xa87e020f,
- 0xb77e0006,
+ 0xac7e020f,
+ 0xbb7e0006,
0x100f0006,
- 0x0006f97e,
+ 0x0006fd7e,
0x98000e98,
0x207e010f,
0x14950001,
@@ -523,8 +523,8 @@ uint32_t gk208_grhub_code[] = {
0x800040b7,
0xf40132b6,
0x000fb41b,
- 0x0006f97e,
- 0xa87e000f,
+ 0x0006fd7e,
+ 0xac7e000f,
0x00800006,
0x01f60201,
0xbd04bd00,
@@ -554,7 +554,7 @@ uint32_t gk208_grhub_code[] = {
0x0009f602,
0x32f404bd,
0x0231f401,
- 0x00087c7e,
+ 0x0008807e,
0x99f094bd,
0x17008007,
0x0009f602,
@@ -563,7 +563,7 @@ uint32_t gk208_grhub_code[] = {
0x37008006,
0x0009f602,
0x31f404bd,
- 0x087c7e01,
+ 0x08807e01,
0xf094bd00,
0x00800699,
0x09f60217,
@@ -572,7 +572,7 @@ uint32_t gk208_grhub_code[] = {
0x20f92f0e,
0x32f412b2,
0x0232f401,
- 0x00087c7e,
+ 0x0008807e,
0x008020fc,
0x02f602c0,
0xf404bd00,
@@ -580,7 +580,7 @@ uint32_t gk208_grhub_code[] = {
0x23c8130e,
0x0d0bf41f,
0xf40131f4,
- 0x7c7e0232,
+ 0x807e0232,
/* 0x054e: chsw_done */
0x01020008,
0x02c30080,
@@ -593,7 +593,7 @@ uint32_t gk208_grhub_code[] = {
0xb0ff2a0e,
0x1bf401e4,
0x7ef2b20c,
- 0xf400081c,
+ 0xf4000820,
/* 0x057a: main_not_ctx_chan */
0xe4b0400e,
0x2c1bf402,
@@ -602,7 +602,7 @@ uint32_t gk208_grhub_code[] = {
0x0009f602,
0x32f404bd,
0x0232f401,
- 0x00087c7e,
+ 0x0008807e,
0x99f094bd,
0x17008007,
0x0009f602,
@@ -618,91 +618,92 @@ uint32_t gk208_grhub_code[] = {
0xbd0002f6,
0xcc0ef504,
/* 0x05c9: ih */
- 0xfe80f9fe,
- 0x80f90188,
- 0xa0f990f9,
- 0xd0f9b0f9,
- 0xf0f9e0f9,
- 0x004a04bd,
- 0x00aacf02,
- 0xf404abc4,
- 0x100d230b,
- 0xcf1a004e,
- 0x004f00ee,
- 0x00ffcf19,
+ 0xf900f9fe,
+ 0x0188fe80,
+ 0x90f980f9,
+ 0xb0f9a0f9,
+ 0xe0f9d0f9,
+ 0x04bdf0f9,
+ 0xcf02004a,
+ 0xabc400aa,
+ 0x230bf404,
+ 0x004e100d,
+ 0x00eecf1a,
+ 0xcf19004f,
+ 0x047e00ff,
+ 0xb0b70000,
+ 0x010e0400,
+ 0xf61d0040,
+ 0x04bd000e,
+/* 0x060c: ih_no_fifo */
+ 0x0100abe4,
+ 0x0d0c0bf4,
+ 0x40014e10,
0x0000047e,
- 0x0400b0b7,
- 0x0040010e,
- 0x000ef61d,
-/* 0x060a: ih_no_fifo */
- 0xabe404bd,
- 0x0bf40100,
- 0x4e100d0c,
- 0x047e4001,
-/* 0x061a: ih_no_ctxsw */
- 0xabe40000,
- 0x0bf40400,
- 0x07088e56,
- 0x00657e40,
- 0x80ffb200,
- 0xf6020400,
- 0x04bd000f,
- 0x4007048e,
- 0x0000657e,
- 0x0080ffb2,
- 0x0ff60203,
- 0xc704bd00,
- 0xee9450fe,
- 0x07008f02,
- 0x00efbb40,
- 0x0000657e,
- 0x02020080,
+/* 0x061c: ih_no_ctxsw */
+ 0x0400abe4,
+ 0x8e560bf4,
+ 0x7e400708,
+ 0xb2000065,
+ 0x040080ff,
+ 0x000ff602,
+ 0x048e04bd,
+ 0x657e4007,
+ 0xffb20000,
+ 0x02030080,
0xbd000ff6,
- 0x7e030f04,
- 0x4b0002f8,
- 0xbfb20100,
- 0x4001448e,
- 0x00008f7e,
-/* 0x0674: ih_no_fwmthd */
- 0xbd05044b,
- 0xb4abffb0,
- 0x800c0bf4,
- 0xf6030700,
- 0x04bd000b,
-/* 0x0688: ih_no_other */
- 0xf6010040,
- 0x04bd000a,
- 0xe0fcf0fc,
- 0xb0fcd0fc,
- 0x90fca0fc,
- 0x88fe80fc,
- 0xf480fc00,
+ 0x50fec704,
+ 0x8f02ee94,
+ 0xbb400700,
+ 0x657e00ef,
+ 0x00800000,
+ 0x0ff60202,
+ 0x0f04bd00,
+ 0x02f87e03,
+ 0x01004b00,
+ 0x448ebfb2,
+ 0x8f7e4001,
+/* 0x0676: ih_no_fwmthd */
+ 0x044b0000,
+ 0xffb0bd05,
+ 0x0bf4b4ab,
+ 0x0700800c,
+ 0x000bf603,
+/* 0x068a: ih_no_other */
+ 0x004004bd,
+ 0x000af601,
+ 0xf0fc04bd,
+ 0xd0fce0fc,
+ 0xa0fcb0fc,
+ 0x80fc90fc,
+ 0xfc0088fe,
+ 0xf400fc80,
0x01f80032,
-/* 0x06a8: ctx_4170s */
+/* 0x06ac: ctx_4170s */
0xb210f5f0,
0x41708eff,
0x008f7e40,
-/* 0x06b7: ctx_4170w */
+/* 0x06bb: ctx_4170w */
0x8e00f800,
0x7e404170,
0xb2000065,
0x10f4f0ff,
0xf8f31bf4,
-/* 0x06c9: ctx_redswitch */
+/* 0x06cd: ctx_redswitch */
0x02004e00,
0xf040e5f0,
0xe5f020e5,
0x85008010,
0x000ef601,
0x080f04bd,
-/* 0x06e0: ctx_redswitch_delay */
+/* 0x06e4: ctx_redswitch_delay */
0xf401f2b6,
0xe5f1fd1b,
0xe5f10400,
0x00800100,
0x0ef60185,
0xf804bd00,
-/* 0x06f9: ctx_86c */
+/* 0x06fd: ctx_86c */
0x23008000,
0x000ff602,
0xffb204bd,
@@ -711,15 +712,15 @@ uint32_t gk208_grhub_code[] = {
0x8c8effb2,
0x8f7e41a8,
0x00f80000,
-/* 0x0718: ctx_mem */
+/* 0x071c: ctx_mem */
0x02840080,
0xbd000ff6,
-/* 0x0721: ctx_mem_wait */
+/* 0x0725: ctx_mem_wait */
0x84008f04,
0x00ffcf02,
0xf405fffd,
0x00f8f61b,
-/* 0x0730: ctx_load */
+/* 0x0734: ctx_load */
0x99f094bd,
0x37008005,
0x0009f602,
@@ -733,7 +734,7 @@ uint32_t gk208_grhub_code[] = {
0x02830080,
0xbd0002f6,
0x7e070f04,
- 0x80000718,
+ 0x8000071c,
0xf602c000,
0x04bd0002,
0xf0000bfe,
@@ -779,28 +780,28 @@ uint32_t gk208_grhub_code[] = {
0x17008005,
0x0009f602,
0x00f804bd,
-/* 0x081c: ctx_chan */
- 0x0007307e,
+/* 0x0820: ctx_chan */
+ 0x0007347e,
0xb87e0c0a,
0x050f0000,
- 0x0007187e,
-/* 0x082e: ctx_mmio_exec */
+ 0x00071c7e,
+/* 0x0832: ctx_mmio_exec */
0x039800f8,
0x81008041,
0x0003f602,
0x34bd04bd,
-/* 0x083c: ctx_mmio_loop */
+/* 0x0840: ctx_mmio_loop */
0xf4ff34c4,
0x00450e1b,
0x0653f002,
0xf80535fa,
-/* 0x084d: ctx_mmio_pull */
+/* 0x0851: ctx_mmio_pull */
0x804e9803,
0x7e814f98,
0xb600008f,
0x12b60830,
0xdf1bf401,
-/* 0x0860: ctx_mmio_done */
+/* 0x0864: ctx_mmio_done */
0x80160398,
0xf6028100,
0x04bd0003,
@@ -808,27 +809,27 @@ uint32_t gk208_grhub_code[] = {
0x13f00100,
0x0601fa06,
0x00f803f8,
-/* 0x087c: ctx_xfer */
+/* 0x0880: ctx_xfer */
0x0080040e,
0x0ef60302,
-/* 0x0887: ctx_xfer_idle */
+/* 0x088b: ctx_xfer_idle */
0x8e04bd00,
0xcf030000,
0xe4f100ee,
0x1bf42000,
0x0611f4f5,
-/* 0x089b: ctx_xfer_pre */
+/* 0x089f: ctx_xfer_pre */
0x0f0c02f4,
- 0x06f97e10,
+ 0x06fd7e10,
0x1b11f400,
-/* 0x08a4: ctx_xfer_pre_load */
- 0xa87e020f,
- 0xb77e0006,
- 0xc97e0006,
+/* 0x08a8: ctx_xfer_pre_load */
+ 0xac7e020f,
+ 0xbb7e0006,
+ 0xcd7e0006,
0xf4bd0006,
- 0x0006a87e,
- 0x0007307e,
-/* 0x08bc: ctx_xfer_exec */
+ 0x0006ac7e,
+ 0x0007347e,
+/* 0x08c0: ctx_xfer_exec */
0xbd160198,
0x05008024,
0x0002f601,
@@ -858,21 +859,21 @@ uint32_t gk208_grhub_code[] = {
0x01f40002,
0x7e0c0a12,
0x0f0000b8,
- 0x07187e05,
+ 0x071c7e05,
0x2d02f400,
-/* 0x0938: ctx_xfer_post */
- 0xa87e020f,
+/* 0x093c: ctx_xfer_post */
+ 0xac7e020f,
0xf4bd0006,
- 0x0006f97e,
+ 0x0006fd7e,
0x0002277e,
- 0x0006b77e,
- 0xa87ef4bd,
+ 0x0006bb7e,
+ 0xac7ef4bd,
0x11f40006,
0x40019810,
0xf40511fd,
- 0x2e7e070b,
-/* 0x0962: ctx_xfer_no_post_mmio */
-/* 0x0962: ctx_xfer_done */
+ 0x327e070b,
+/* 0x0966: ctx_xfer_no_post_mmio */
+/* 0x0966: ctx_xfer_done */
0x00f80008,
0x00000000,
0x00000000,
@@ -912,5 +913,4 @@ uint32_t gk208_grhub_code[] = {
0x00000000,
0x00000000,
0x00000000,
- 0x00000000,
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgm107.fuc5.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgm107.fuc5.h
index 5f953c5c20b7..d6343d2a614c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgm107.fuc5.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgm107.fuc5.h
@@ -478,10 +478,10 @@ uint32_t gm107_grhub_code[] = {
0x01040080,
0xbd0001f6,
0x01004104,
- 0xa87e020f,
- 0xb77e0006,
+ 0xac7e020f,
+ 0xbb7e0006,
0x100f0006,
- 0x0006f97e,
+ 0x0006fd7e,
0x98000e98,
0x207e010f,
0x14950001,
@@ -523,8 +523,8 @@ uint32_t gm107_grhub_code[] = {
0x800040b7,
0xf40132b6,
0x000fb41b,
- 0x0006f97e,
- 0xa87e000f,
+ 0x0006fd7e,
+ 0xac7e000f,
0x00800006,
0x01f60201,
0xbd04bd00,
@@ -554,7 +554,7 @@ uint32_t gm107_grhub_code[] = {
0x0009f602,
0x32f404bd,
0x0231f401,
- 0x00087c7e,
+ 0x0008807e,
0x99f094bd,
0x17008007,
0x0009f602,
@@ -563,7 +563,7 @@ uint32_t gm107_grhub_code[] = {
0x37008006,
0x0009f602,
0x31f404bd,
- 0x087c7e01,
+ 0x08807e01,
0xf094bd00,
0x00800699,
0x09f60217,
@@ -572,7 +572,7 @@ uint32_t gm107_grhub_code[] = {
0x20f92f0e,
0x32f412b2,
0x0232f401,
- 0x00087c7e,
+ 0x0008807e,
0x008020fc,
0x02f602c0,
0xf404bd00,
@@ -580,7 +580,7 @@ uint32_t gm107_grhub_code[] = {
0x23c8130e,
0x0d0bf41f,
0xf40131f4,
- 0x7c7e0232,
+ 0x807e0232,
/* 0x054e: chsw_done */
0x01020008,
0x02c30080,
@@ -593,7 +593,7 @@ uint32_t gm107_grhub_code[] = {
0xb0ff2a0e,
0x1bf401e4,
0x7ef2b20c,
- 0xf400081c,
+ 0xf4000820,
/* 0x057a: main_not_ctx_chan */
0xe4b0400e,
0x2c1bf402,
@@ -602,7 +602,7 @@ uint32_t gm107_grhub_code[] = {
0x0009f602,
0x32f404bd,
0x0232f401,
- 0x00087c7e,
+ 0x0008807e,
0x99f094bd,
0x17008007,
0x0009f602,
@@ -618,91 +618,92 @@ uint32_t gm107_grhub_code[] = {
0xbd0002f6,
0xcc0ef504,
/* 0x05c9: ih */
- 0xfe80f9fe,
- 0x80f90188,
- 0xa0f990f9,
- 0xd0f9b0f9,
- 0xf0f9e0f9,
- 0x004a04bd,
- 0x00aacf02,
- 0xf404abc4,
- 0x100d230b,
- 0xcf1a004e,
- 0x004f00ee,
- 0x00ffcf19,
+ 0xf900f9fe,
+ 0x0188fe80,
+ 0x90f980f9,
+ 0xb0f9a0f9,
+ 0xe0f9d0f9,
+ 0x04bdf0f9,
+ 0xcf02004a,
+ 0xabc400aa,
+ 0x230bf404,
+ 0x004e100d,
+ 0x00eecf1a,
+ 0xcf19004f,
+ 0x047e00ff,
+ 0xb0b70000,
+ 0x010e0400,
+ 0xf61d0040,
+ 0x04bd000e,
+/* 0x060c: ih_no_fifo */
+ 0x0100abe4,
+ 0x0d0c0bf4,
+ 0x40014e10,
0x0000047e,
- 0x0400b0b7,
- 0x0040010e,
- 0x000ef61d,
-/* 0x060a: ih_no_fifo */
- 0xabe404bd,
- 0x0bf40100,
- 0x4e100d0c,
- 0x047e4001,
-/* 0x061a: ih_no_ctxsw */
- 0xabe40000,
- 0x0bf40400,
- 0x07088e56,
- 0x00657e40,
- 0x80ffb200,
- 0xf6020400,
- 0x04bd000f,
- 0x4007048e,
- 0x0000657e,
- 0x0080ffb2,
- 0x0ff60203,
- 0xc704bd00,
- 0xee9450fe,
- 0x07008f02,
- 0x00efbb40,
- 0x0000657e,
- 0x02020080,
+/* 0x061c: ih_no_ctxsw */
+ 0x0400abe4,
+ 0x8e560bf4,
+ 0x7e400708,
+ 0xb2000065,
+ 0x040080ff,
+ 0x000ff602,
+ 0x048e04bd,
+ 0x657e4007,
+ 0xffb20000,
+ 0x02030080,
0xbd000ff6,
- 0x7e030f04,
- 0x4b0002f8,
- 0xbfb20100,
- 0x4001448e,
- 0x00008f7e,
-/* 0x0674: ih_no_fwmthd */
- 0xbd05044b,
- 0xb4abffb0,
- 0x800c0bf4,
- 0xf6030700,
- 0x04bd000b,
-/* 0x0688: ih_no_other */
- 0xf6010040,
- 0x04bd000a,
- 0xe0fcf0fc,
- 0xb0fcd0fc,
- 0x90fca0fc,
- 0x88fe80fc,
- 0xf480fc00,
+ 0x50fec704,
+ 0x8f02ee94,
+ 0xbb400700,
+ 0x657e00ef,
+ 0x00800000,
+ 0x0ff60202,
+ 0x0f04bd00,
+ 0x02f87e03,
+ 0x01004b00,
+ 0x448ebfb2,
+ 0x8f7e4001,
+/* 0x0676: ih_no_fwmthd */
+ 0x044b0000,
+ 0xffb0bd05,
+ 0x0bf4b4ab,
+ 0x0700800c,
+ 0x000bf603,
+/* 0x068a: ih_no_other */
+ 0x004004bd,
+ 0x000af601,
+ 0xf0fc04bd,
+ 0xd0fce0fc,
+ 0xa0fcb0fc,
+ 0x80fc90fc,
+ 0xfc0088fe,
+ 0xf400fc80,
0x01f80032,
-/* 0x06a8: ctx_4170s */
+/* 0x06ac: ctx_4170s */
0xb210f5f0,
0x41708eff,
0x008f7e40,
-/* 0x06b7: ctx_4170w */
+/* 0x06bb: ctx_4170w */
0x8e00f800,
0x7e404170,
0xb2000065,
0x10f4f0ff,
0xf8f31bf4,
-/* 0x06c9: ctx_redswitch */
+/* 0x06cd: ctx_redswitch */
0x02004e00,
0xf040e5f0,
0xe5f020e5,
0x85008010,
0x000ef601,
0x080f04bd,
-/* 0x06e0: ctx_redswitch_delay */
+/* 0x06e4: ctx_redswitch_delay */
0xf401f2b6,
0xe5f1fd1b,
0xe5f10400,
0x00800100,
0x0ef60185,
0xf804bd00,
-/* 0x06f9: ctx_86c */
+/* 0x06fd: ctx_86c */
0x23008000,
0x000ff602,
0xffb204bd,
@@ -711,15 +712,15 @@ uint32_t gm107_grhub_code[] = {
0x8c8effb2,
0x8f7e41a8,
0x00f80000,
-/* 0x0718: ctx_mem */
+/* 0x071c: ctx_mem */
0x02840080,
0xbd000ff6,
-/* 0x0721: ctx_mem_wait */
+/* 0x0725: ctx_mem_wait */
0x84008f04,
0x00ffcf02,
0xf405fffd,
0x00f8f61b,
-/* 0x0730: ctx_load */
+/* 0x0734: ctx_load */
0x99f094bd,
0x37008005,
0x0009f602,
@@ -733,7 +734,7 @@ uint32_t gm107_grhub_code[] = {
0x02830080,
0xbd0002f6,
0x7e070f04,
- 0x80000718,
+ 0x8000071c,
0xf602c000,
0x04bd0002,
0xf0000bfe,
@@ -779,28 +780,28 @@ uint32_t gm107_grhub_code[] = {
0x17008005,
0x0009f602,
0x00f804bd,
-/* 0x081c: ctx_chan */
- 0x0007307e,
+/* 0x0820: ctx_chan */
+ 0x0007347e,
0xb87e0c0a,
0x050f0000,
- 0x0007187e,
-/* 0x082e: ctx_mmio_exec */
+ 0x00071c7e,
+/* 0x0832: ctx_mmio_exec */
0x039800f8,
0x81008041,
0x0003f602,
0x34bd04bd,
-/* 0x083c: ctx_mmio_loop */
+/* 0x0840: ctx_mmio_loop */
0xf4ff34c4,
0x00450e1b,
0x0653f002,
0xf80535fa,
-/* 0x084d: ctx_mmio_pull */
+/* 0x0851: ctx_mmio_pull */
0x804e9803,
0x7e814f98,
0xb600008f,
0x12b60830,
0xdf1bf401,
-/* 0x0860: ctx_mmio_done */
+/* 0x0864: ctx_mmio_done */
0x80160398,
0xf6028100,
0x04bd0003,
@@ -808,27 +809,27 @@ uint32_t gm107_grhub_code[] = {
0x13f00100,
0x0601fa06,
0x00f803f8,
-/* 0x087c: ctx_xfer */
+/* 0x0880: ctx_xfer */
0x0080040e,
0x0ef60302,
-/* 0x0887: ctx_xfer_idle */
+/* 0x088b: ctx_xfer_idle */
0x8e04bd00,
0xcf030000,
0xe4f100ee,
0x1bf42000,
0x0611f4f5,
-/* 0x089b: ctx_xfer_pre */
+/* 0x089f: ctx_xfer_pre */
0x0f0c02f4,
- 0x06f97e10,
+ 0x06fd7e10,
0x1b11f400,
-/* 0x08a4: ctx_xfer_pre_load */
- 0xa87e020f,
- 0xb77e0006,
- 0xc97e0006,
+/* 0x08a8: ctx_xfer_pre_load */
+ 0xac7e020f,
+ 0xbb7e0006,
+ 0xcd7e0006,
0xf4bd0006,
- 0x0006a87e,
- 0x0007307e,
-/* 0x08bc: ctx_xfer_exec */
+ 0x0006ac7e,
+ 0x0007347e,
+/* 0x08c0: ctx_xfer_exec */
0xbd160198,
0x05008024,
0x0002f601,
@@ -858,21 +859,21 @@ uint32_t gm107_grhub_code[] = {
0x01f40002,
0x7e0c0a12,
0x0f0000b8,
- 0x07187e05,
+ 0x071c7e05,
0x2d02f400,
-/* 0x0938: ctx_xfer_post */
- 0xa87e020f,
+/* 0x093c: ctx_xfer_post */
+ 0xac7e020f,
0xf4bd0006,
- 0x0006f97e,
+ 0x0006fd7e,
0x0002277e,
- 0x0006b77e,
- 0xa87ef4bd,
+ 0x0006bb7e,
+ 0xac7ef4bd,
0x11f40006,
0x40019810,
0xf40511fd,
- 0x2e7e070b,
-/* 0x0962: ctx_xfer_no_post_mmio */
-/* 0x0962: ctx_xfer_done */
+ 0x327e070b,
+/* 0x0966: ctx_xfer_no_post_mmio */
+/* 0x0966: ctx_xfer_done */
0x00f80008,
0x00000000,
0x00000000,
@@ -912,5 +913,4 @@ uint32_t gm107_grhub_code[] = {
0x00000000,
0x00000000,
0x00000000,
- 0x00000000,
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
index 1f81069edc58..b2de290da16f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
@@ -27,6 +27,8 @@
#include <core/client.h>
#include <core/option.h>
+#include <core/firmware.h>
+#include <subdev/secboot.h>
#include <subdev/fb.h>
#include <subdev/mc.h>
#include <subdev/pmu.h>
@@ -1427,21 +1429,40 @@ gf100_gr_init_ctxctl(struct gf100_gr *gr)
const struct gf100_grctx_func *grctx = gr->func->grctx;
struct nvkm_subdev *subdev = &gr->base.engine.subdev;
struct nvkm_device *device = subdev->device;
+ struct nvkm_secboot *sb = device->secboot;
int i;
if (gr->firmware) {
/* load fuc microcode */
nvkm_mc_unk260(device->mc, 0);
- gf100_gr_init_fw(gr, 0x409000, &gr->fuc409c, &gr->fuc409d);
- gf100_gr_init_fw(gr, 0x41a000, &gr->fuc41ac, &gr->fuc41ad);
+
+ /* securely-managed falcons must be reset using secure boot */
+ if (nvkm_secboot_is_managed(sb, NVKM_SECBOOT_FALCON_FECS))
+ nvkm_secboot_reset(sb, NVKM_SECBOOT_FALCON_FECS);
+ else
+ gf100_gr_init_fw(gr, 0x409000, &gr->fuc409c,
+ &gr->fuc409d);
+ if (nvkm_secboot_is_managed(sb, NVKM_SECBOOT_FALCON_GPCCS))
+ nvkm_secboot_reset(sb, NVKM_SECBOOT_FALCON_GPCCS);
+ else
+ gf100_gr_init_fw(gr, 0x41a000, &gr->fuc41ac,
+ &gr->fuc41ad);
+
nvkm_mc_unk260(device->mc, 1);
/* start both of them running */
nvkm_wr32(device, 0x409840, 0xffffffff);
nvkm_wr32(device, 0x41a10c, 0x00000000);
nvkm_wr32(device, 0x40910c, 0x00000000);
- nvkm_wr32(device, 0x41a100, 0x00000002);
- nvkm_wr32(device, 0x409100, 0x00000002);
+
+ if (nvkm_secboot_is_managed(sb, NVKM_SECBOOT_FALCON_GPCCS))
+ nvkm_secboot_start(sb, NVKM_SECBOOT_FALCON_GPCCS);
+ else
+ nvkm_wr32(device, 0x41a100, 0x00000002);
+ if (nvkm_secboot_is_managed(sb, NVKM_SECBOOT_FALCON_FECS))
+ nvkm_secboot_start(sb, NVKM_SECBOOT_FALCON_FECS);
+ else
+ nvkm_wr32(device, 0x409100, 0x00000002);
if (nvkm_msec(device, 2000,
if (nvkm_rd32(device, 0x409800) & 0x00000001)
break;
@@ -1683,6 +1704,12 @@ gf100_gr_dtor_fw(struct gf100_gr_fuc *fuc)
fuc->data = NULL;
}
+static void
+gf100_gr_dtor_init(struct gf100_gr_pack *pack)
+{
+ vfree(pack);
+}
+
void *
gf100_gr_dtor(struct nvkm_gr *base)
{
@@ -1697,6 +1724,11 @@ gf100_gr_dtor(struct nvkm_gr *base)
gf100_gr_dtor_fw(&gr->fuc41ac);
gf100_gr_dtor_fw(&gr->fuc41ad);
+ gf100_gr_dtor_init(gr->fuc_bundle);
+ gf100_gr_dtor_init(gr->fuc_method);
+ gf100_gr_dtor_init(gr->fuc_sw_ctx);
+ gf100_gr_dtor_init(gr->fuc_sw_nonctx);
+
nvkm_memory_del(&gr->unk4188b8);
nvkm_memory_del(&gr->unk4188b4);
return gr;
@@ -1720,22 +1752,9 @@ gf100_gr_ctor_fw(struct gf100_gr *gr, const char *fwname,
struct nvkm_subdev *subdev = &gr->base.engine.subdev;
struct nvkm_device *device = subdev->device;
const struct firmware *fw;
- char f[64];
- char cname[16];
int ret;
- int i;
- /* Convert device name to lowercase */
- strncpy(cname, device->chip->name, sizeof(cname));
- cname[sizeof(cname) - 1] = '\0';
- i = strlen(cname);
- while (i) {
- --i;
- cname[i] = tolower(cname[i]);
- }
-
- snprintf(f, sizeof(f), "nvidia/%s/%s.bin", cname, fwname);
- ret = request_firmware(&fw, f, device->dev);
+ ret = nvkm_firmware_get(device, fwname, &fw);
if (ret) {
nvkm_error(subdev, "failed to load %s\n", fwname);
return ret;
@@ -1743,7 +1762,7 @@ gf100_gr_ctor_fw(struct gf100_gr *gr, const char *fwname,
fuc->size = fw->size;
fuc->data = kmemdup(fw->data, fuc->size, GFP_KERNEL);
- release_firmware(fw);
+ nvkm_firmware_put(fw);
return (fuc->data != NULL) ? 0 : -ENOMEM;
}
@@ -1763,15 +1782,6 @@ gf100_gr_ctor(const struct gf100_gr_func *func, struct nvkm_device *device,
if (ret)
return ret;
- if (gr->firmware) {
- nvkm_info(&gr->base.engine.subdev, "using external firmware\n");
- if (gf100_gr_ctor_fw(gr, "fecs_inst", &gr->fuc409c) ||
- gf100_gr_ctor_fw(gr, "fecs_data", &gr->fuc409d) ||
- gf100_gr_ctor_fw(gr, "gpccs_inst", &gr->fuc41ac) ||
- gf100_gr_ctor_fw(gr, "gpccs_data", &gr->fuc41ad))
- return -ENODEV;
- }
-
return 0;
}
@@ -1780,10 +1790,25 @@ gf100_gr_new_(const struct gf100_gr_func *func, struct nvkm_device *device,
int index, struct nvkm_gr **pgr)
{
struct gf100_gr *gr;
+ int ret;
+
if (!(gr = kzalloc(sizeof(*gr), GFP_KERNEL)))
return -ENOMEM;
*pgr = &gr->base;
- return gf100_gr_ctor(func, device, index, gr);
+
+ ret = gf100_gr_ctor(func, device, index, gr);
+ if (ret)
+ return ret;
+
+ if (gr->firmware) {
+ if (gf100_gr_ctor_fw(gr, "fecs_inst", &gr->fuc409c) ||
+ gf100_gr_ctor_fw(gr, "fecs_data", &gr->fuc409d) ||
+ gf100_gr_ctor_fw(gr, "gpccs_inst", &gr->fuc41ac) ||
+ gf100_gr_ctor_fw(gr, "gpccs_data", &gr->fuc41ad))
+ return -ENODEV;
+ }
+
+ return 0;
}
int
@@ -1807,6 +1832,8 @@ gf100_gr_init(struct gf100_gr *gr)
gf100_gr_mmio(gr, gr->func->mmio);
+ nvkm_mask(device, TPC_UNIT(0, 0, 0x05c), 0x00000001, 0x00000001);
+
memcpy(tpcnr, gr->tpc_nr, sizeof(gr->tpc_nr));
for (i = 0, gpc = -1; i < gr->tpc_total; i++) {
do {
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
index 02e78b8d93f6..f0c6acb0f8fd 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
@@ -82,7 +82,7 @@ struct gf100_gr {
/*
* Used if the register packs are loaded from NVIDIA fw instead of
- * using hardcoded arrays.
+ * using hardcoded arrays. To be allocated with vzalloc().
*/
struct gf100_gr_pack *fuc_sw_nonctx;
struct gf100_gr_pack *fuc_sw_ctx;
@@ -138,12 +138,9 @@ int gf100_gr_init(struct gf100_gr *);
int gk104_gr_init(struct gf100_gr *);
-int gk20a_gr_new_(const struct gf100_gr_func *, struct nvkm_device *,
- int, struct nvkm_gr **);
-void gk20a_gr_dtor(struct gf100_gr *);
int gk20a_gr_init(struct gf100_gr *);
-int gm204_gr_init(struct gf100_gr *);
+int gm200_gr_init(struct gf100_gr *);
#define gf100_gr_chan(p) container_of((p), struct gf100_gr_chan, object)
@@ -204,6 +201,17 @@ void gf100_gr_icmd(struct gf100_gr *, const struct gf100_gr_pack *);
void gf100_gr_mthd(struct gf100_gr *, const struct gf100_gr_pack *);
int gf100_gr_init_ctxctl(struct gf100_gr *);
+/* external bundles loading functions */
+int gk20a_gr_av_to_init(struct gf100_gr *, const char *,
+ struct gf100_gr_pack **);
+int gk20a_gr_aiv_to_init(struct gf100_gr *, const char *,
+ struct gf100_gr_pack **);
+int gk20a_gr_av_to_method(struct gf100_gr *, const char *,
+ struct gf100_gr_pack **);
+
+int gm200_gr_new_(const struct gf100_gr_func *, struct nvkm_device *, int,
+ struct nvkm_gr **);
+
/* register init value lists */
extern const struct gf100_gr_init gf100_gr_init_main_0[];
@@ -279,6 +287,4 @@ extern const struct gf100_gr_init gm107_gr_init_l1c_0[];
extern const struct gf100_gr_init gm107_gr_init_wwdx_0[];
extern const struct gf100_gr_init gm107_gr_init_cbm_0[];
void gm107_gr_init_bios(struct gf100_gr *);
-
-extern const struct gf100_gr_pack gm204_gr_pack_mmio[];
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c
index b8758d3b8b51..7ffb8a626196 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c
@@ -26,37 +26,40 @@
#include <nvif/class.h>
-static void
-gk20a_gr_init_dtor(struct gf100_gr_pack *pack)
-{
- vfree(pack);
-}
-
struct gk20a_fw_av
{
u32 addr;
u32 data;
};
-static struct gf100_gr_pack *
-gk20a_gr_av_to_init(struct gf100_gr_fuc *fuc)
+int
+gk20a_gr_av_to_init(struct gf100_gr *gr, const char *fw_name,
+ struct gf100_gr_pack **ppack)
{
+ struct gf100_gr_fuc fuc;
struct gf100_gr_init *init;
struct gf100_gr_pack *pack;
- const int nent = (fuc->size / sizeof(struct gk20a_fw_av));
+ int nent;
+ int ret;
int i;
+ ret = gf100_gr_ctor_fw(gr, fw_name, &fuc);
+ if (ret)
+ return ret;
+
+ nent = (fuc.size / sizeof(struct gk20a_fw_av));
pack = vzalloc((sizeof(*pack) * 2) + (sizeof(*init) * (nent + 1)));
- if (!pack)
- return ERR_PTR(-ENOMEM);
+ if (!pack) {
+ ret = -ENOMEM;
+ goto end;
+ }
init = (void *)(pack + 2);
-
pack[0].init = init;
for (i = 0; i < nent; i++) {
struct gf100_gr_init *ent = &init[i];
- struct gk20a_fw_av *av = &((struct gk20a_fw_av *)fuc->data)[i];
+ struct gk20a_fw_av *av = &((struct gk20a_fw_av *)fuc.data)[i];
ent->addr = av->addr;
ent->data = av->data;
@@ -64,7 +67,11 @@ gk20a_gr_av_to_init(struct gf100_gr_fuc *fuc)
ent->pitch = 1;
}
- return pack;
+ *ppack = pack;
+
+end:
+ gf100_gr_dtor_fw(&fuc);
+ return ret;
}
struct gk20a_fw_aiv
@@ -74,25 +81,34 @@ struct gk20a_fw_aiv
u32 data;
};
-static struct gf100_gr_pack *
-gk20a_gr_aiv_to_init(struct gf100_gr_fuc *fuc)
+int
+gk20a_gr_aiv_to_init(struct gf100_gr *gr, const char *fw_name,
+ struct gf100_gr_pack **ppack)
{
+ struct gf100_gr_fuc fuc;
struct gf100_gr_init *init;
struct gf100_gr_pack *pack;
- const int nent = (fuc->size / sizeof(struct gk20a_fw_aiv));
+ int nent;
+ int ret;
int i;
+ ret = gf100_gr_ctor_fw(gr, fw_name, &fuc);
+ if (ret)
+ return ret;
+
+ nent = (fuc.size / sizeof(struct gk20a_fw_aiv));
pack = vzalloc((sizeof(*pack) * 2) + (sizeof(*init) * (nent + 1)));
- if (!pack)
- return ERR_PTR(-ENOMEM);
+ if (!pack) {
+ ret = -ENOMEM;
+ goto end;
+ }
init = (void *)(pack + 2);
-
pack[0].init = init;
for (i = 0; i < nent; i++) {
struct gf100_gr_init *ent = &init[i];
- struct gk20a_fw_aiv *av = &((struct gk20a_fw_aiv *)fuc->data)[i];
+ struct gk20a_fw_aiv *av = &((struct gk20a_fw_aiv *)fuc.data)[i];
ent->addr = av->addr;
ent->data = av->data;
@@ -100,30 +116,45 @@ gk20a_gr_aiv_to_init(struct gf100_gr_fuc *fuc)
ent->pitch = 1;
}
- return pack;
+ *ppack = pack;
+
+end:
+ gf100_gr_dtor_fw(&fuc);
+ return ret;
}
-static struct gf100_gr_pack *
-gk20a_gr_av_to_method(struct gf100_gr_fuc *fuc)
+int
+gk20a_gr_av_to_method(struct gf100_gr *gr, const char *fw_name,
+ struct gf100_gr_pack **ppack)
{
+ struct gf100_gr_fuc fuc;
struct gf100_gr_init *init;
struct gf100_gr_pack *pack;
/* We don't suppose we will initialize more than 16 classes here... */
static const unsigned int max_classes = 16;
- const int nent = (fuc->size / sizeof(struct gk20a_fw_av));
- int i, classidx = 0;
- u32 prevclass = 0;
+ u32 classidx = 0, prevclass = 0;
+ int nent;
+ int ret;
+ int i;
+
+ ret = gf100_gr_ctor_fw(gr, fw_name, &fuc);
+ if (ret)
+ return ret;
+
+ nent = (fuc.size / sizeof(struct gk20a_fw_av));
pack = vzalloc((sizeof(*pack) * max_classes) +
(sizeof(*init) * (nent + 1)));
- if (!pack)
- return ERR_PTR(-ENOMEM);
+ if (!pack) {
+ ret = -ENOMEM;
+ goto end;
+ }
init = (void *)(pack + max_classes);
for (i = 0; i < nent; i++) {
struct gf100_gr_init *ent = &init[i];
- struct gk20a_fw_av *av = &((struct gk20a_fw_av *)fuc->data)[i];
+ struct gk20a_fw_av *av = &((struct gk20a_fw_av *)fuc.data)[i];
u32 class = av->addr & 0xffff;
u32 addr = (av->addr & 0xffff0000) >> 14;
@@ -133,7 +164,8 @@ gk20a_gr_av_to_method(struct gf100_gr_fuc *fuc)
prevclass = class;
if (++classidx >= max_classes) {
vfree(pack);
- return ERR_PTR(-ENOSPC);
+ ret = -ENOSPC;
+ goto end;
}
}
@@ -143,7 +175,11 @@ gk20a_gr_av_to_method(struct gf100_gr_fuc *fuc)
ent->pitch = 1;
}
- return pack;
+ *ppack = pack;
+
+end:
+ gf100_gr_dtor_fw(&fuc);
+ return ret;
}
static int
@@ -273,20 +309,24 @@ gk20a_gr_init(struct gf100_gr *gr)
return gf100_gr_init_ctxctl(gr);
}
-void
-gk20a_gr_dtor(struct gf100_gr *gr)
-{
- gk20a_gr_init_dtor(gr->fuc_method);
- gk20a_gr_init_dtor(gr->fuc_bundle);
- gk20a_gr_init_dtor(gr->fuc_sw_ctx);
- gk20a_gr_init_dtor(gr->fuc_sw_nonctx);
-}
+static const struct gf100_gr_func
+gk20a_gr = {
+ .init = gk20a_gr_init,
+ .set_hww_esr_report_mask = gk20a_gr_set_hww_esr_report_mask,
+ .ppc_nr = 1,
+ .grctx = &gk20a_grctx,
+ .sclass = {
+ { -1, -1, FERMI_TWOD_A },
+ { -1, -1, KEPLER_INLINE_TO_MEMORY_A },
+ { -1, -1, KEPLER_C, &gf100_fermi },
+ { -1, -1, KEPLER_COMPUTE_A },
+ {}
+ }
+};
int
-gk20a_gr_new_(const struct gf100_gr_func *func, struct nvkm_device *device,
- int index, struct nvkm_gr **pgr)
+gk20a_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
{
- struct gf100_gr_fuc fuc;
struct gf100_gr *gr;
int ret;
@@ -294,63 +334,32 @@ gk20a_gr_new_(const struct gf100_gr_func *func, struct nvkm_device *device,
return -ENOMEM;
*pgr = &gr->base;
- ret = gf100_gr_ctor(func, device, index, gr);
+ ret = gf100_gr_ctor(&gk20a_gr, device, index, gr);
if (ret)
return ret;
- ret = gf100_gr_ctor_fw(gr, "sw_nonctx", &fuc);
+ if (gf100_gr_ctor_fw(gr, "fecs_inst", &gr->fuc409c) ||
+ gf100_gr_ctor_fw(gr, "fecs_data", &gr->fuc409d) ||
+ gf100_gr_ctor_fw(gr, "gpccs_inst", &gr->fuc41ac) ||
+ gf100_gr_ctor_fw(gr, "gpccs_data", &gr->fuc41ad))
+ return -ENODEV;
+
+ ret = gk20a_gr_av_to_init(gr, "sw_nonctx", &gr->fuc_sw_nonctx);
if (ret)
return ret;
- gr->fuc_sw_nonctx = gk20a_gr_av_to_init(&fuc);
- gf100_gr_dtor_fw(&fuc);
- if (IS_ERR(gr->fuc_sw_nonctx))
- return PTR_ERR(gr->fuc_sw_nonctx);
- ret = gf100_gr_ctor_fw(gr, "sw_ctx", &fuc);
+ ret = gk20a_gr_aiv_to_init(gr, "sw_ctx", &gr->fuc_sw_ctx);
if (ret)
return ret;
- gr->fuc_sw_ctx = gk20a_gr_aiv_to_init(&fuc);
- gf100_gr_dtor_fw(&fuc);
- if (IS_ERR(gr->fuc_sw_ctx))
- return PTR_ERR(gr->fuc_sw_ctx);
- ret = gf100_gr_ctor_fw(gr, "sw_bundle_init", &fuc);
+ ret = gk20a_gr_av_to_init(gr, "sw_bundle_init", &gr->fuc_bundle);
if (ret)
return ret;
- gr->fuc_bundle = gk20a_gr_av_to_init(&fuc);
- gf100_gr_dtor_fw(&fuc);
- if (IS_ERR(gr->fuc_bundle))
- return PTR_ERR(gr->fuc_bundle);
- ret = gf100_gr_ctor_fw(gr, "sw_method_init", &fuc);
+ ret = gk20a_gr_av_to_method(gr, "sw_method_init", &gr->fuc_method);
if (ret)
return ret;
- gr->fuc_method = gk20a_gr_av_to_method(&fuc);
- gf100_gr_dtor_fw(&fuc);
- if (IS_ERR(gr->fuc_method))
- return PTR_ERR(gr->fuc_method);
- return 0;
-}
-static const struct gf100_gr_func
-gk20a_gr = {
- .dtor = gk20a_gr_dtor,
- .init = gk20a_gr_init,
- .set_hww_esr_report_mask = gk20a_gr_set_hww_esr_report_mask,
- .ppc_nr = 1,
- .grctx = &gk20a_grctx,
- .sclass = {
- { -1, -1, FERMI_TWOD_A },
- { -1, -1, KEPLER_INLINE_TO_MEMORY_A },
- { -1, -1, KEPLER_C, &gf100_fermi },
- { -1, -1, KEPLER_COMPUTE_A },
- {}
- }
-};
-
-int
-gk20a_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
-{
- return gk20a_gr_new_(&gk20a_gr, device, index, pgr);
+ return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm200.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm200.c
new file mode 100644
index 000000000000..058fc1d22c09
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm200.c
@@ -0,0 +1,207 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "gf100.h"
+#include "ctxgf100.h"
+
+#include <subdev/secboot.h>
+
+#include <nvif/class.h>
+
+/*******************************************************************************
+ * PGRAPH engine/subdev functions
+ ******************************************************************************/
+
+int
+gm200_gr_init(struct gf100_gr *gr)
+{
+ struct nvkm_device *device = gr->base.engine.subdev.device;
+ const u32 magicgpc918 = DIV_ROUND_UP(0x00800000, gr->tpc_total);
+ u32 data[TPC_MAX / 8] = {}, tmp;
+ u8 tpcnr[GPC_MAX];
+ int gpc, tpc, ppc, rop;
+ int i;
+
+ tmp = nvkm_rd32(device, 0x100c80); /*XXX: mask? */
+ nvkm_wr32(device, 0x418880, 0x00001000 | (tmp & 0x00000fff));
+ nvkm_wr32(device, 0x418890, 0x00000000);
+ nvkm_wr32(device, 0x418894, 0x00000000);
+ nvkm_wr32(device, 0x4188b4, nvkm_memory_addr(gr->unk4188b4) >> 8);
+ nvkm_wr32(device, 0x4188b8, nvkm_memory_addr(gr->unk4188b8) >> 8);
+ nvkm_mask(device, 0x4188b0, 0x00040000, 0x00040000);
+
+ /*XXX: belongs in fb */
+ nvkm_wr32(device, 0x100cc8, nvkm_memory_addr(gr->unk4188b4) >> 8);
+ nvkm_wr32(device, 0x100ccc, nvkm_memory_addr(gr->unk4188b8) >> 8);
+ nvkm_mask(device, 0x100cc4, 0x00040000, 0x00040000);
+
+ gf100_gr_mmio(gr, gr->fuc_sw_nonctx);
+
+ gm107_gr_init_bios(gr);
+
+ nvkm_wr32(device, GPC_UNIT(0, 0x3018), 0x00000001);
+
+ memset(data, 0x00, sizeof(data));
+ memcpy(tpcnr, gr->tpc_nr, sizeof(gr->tpc_nr));
+ for (i = 0, gpc = -1; i < gr->tpc_total; i++) {
+ do {
+ gpc = (gpc + 1) % gr->gpc_nr;
+ } while (!tpcnr[gpc]);
+ tpc = gr->tpc_nr[gpc] - tpcnr[gpc]--;
+
+ data[i / 8] |= tpc << ((i % 8) * 4);
+ }
+
+ nvkm_wr32(device, GPC_BCAST(0x0980), data[0]);
+ nvkm_wr32(device, GPC_BCAST(0x0984), data[1]);
+ nvkm_wr32(device, GPC_BCAST(0x0988), data[2]);
+ nvkm_wr32(device, GPC_BCAST(0x098c), data[3]);
+
+ for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
+ nvkm_wr32(device, GPC_UNIT(gpc, 0x0914),
+ gr->magic_not_rop_nr << 8 | gr->tpc_nr[gpc]);
+ nvkm_wr32(device, GPC_UNIT(gpc, 0x0910), 0x00040000 |
+ gr->tpc_total);
+ nvkm_wr32(device, GPC_UNIT(gpc, 0x0918), magicgpc918);
+ }
+
+ nvkm_wr32(device, GPC_BCAST(0x3fd4), magicgpc918);
+ nvkm_wr32(device, GPC_BCAST(0x08ac), nvkm_rd32(device, 0x100800));
+ nvkm_wr32(device, GPC_BCAST(0x033c), nvkm_rd32(device, 0x100804));
+
+ nvkm_wr32(device, 0x400500, 0x00010001);
+ nvkm_wr32(device, 0x400100, 0xffffffff);
+ nvkm_wr32(device, 0x40013c, 0xffffffff);
+ nvkm_wr32(device, 0x400124, 0x00000002);
+ nvkm_wr32(device, 0x409c24, 0x000e0000);
+ nvkm_wr32(device, 0x405848, 0xc0000000);
+ nvkm_wr32(device, 0x40584c, 0x00000001);
+ nvkm_wr32(device, 0x404000, 0xc0000000);
+ nvkm_wr32(device, 0x404600, 0xc0000000);
+ nvkm_wr32(device, 0x408030, 0xc0000000);
+ nvkm_wr32(device, 0x404490, 0xc0000000);
+ nvkm_wr32(device, 0x406018, 0xc0000000);
+ nvkm_wr32(device, 0x407020, 0x40000000);
+ nvkm_wr32(device, 0x405840, 0xc0000000);
+ nvkm_wr32(device, 0x405844, 0x00ffffff);
+ nvkm_mask(device, 0x419cc0, 0x00000008, 0x00000008);
+
+ for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
+ for (ppc = 0; ppc < gr->ppc_nr[gpc]; ppc++)
+ nvkm_wr32(device, PPC_UNIT(gpc, ppc, 0x038), 0xc0000000);
+ nvkm_wr32(device, GPC_UNIT(gpc, 0x0420), 0xc0000000);
+ nvkm_wr32(device, GPC_UNIT(gpc, 0x0900), 0xc0000000);
+ nvkm_wr32(device, GPC_UNIT(gpc, 0x1028), 0xc0000000);
+ nvkm_wr32(device, GPC_UNIT(gpc, 0x0824), 0xc0000000);
+ for (tpc = 0; tpc < gr->tpc_nr[gpc]; tpc++) {
+ nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x508), 0xffffffff);
+ nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x50c), 0xffffffff);
+ nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x224), 0xc0000000);
+ nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x48c), 0xc0000000);
+ nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x084), 0xc0000000);
+ nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x430), 0xc0000000);
+ nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x644), 0x00dffffe);
+ nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x64c), 0x00000005);
+ }
+ nvkm_wr32(device, GPC_UNIT(gpc, 0x2c90), 0xffffffff);
+ nvkm_wr32(device, GPC_UNIT(gpc, 0x2c94), 0xffffffff);
+ }
+
+ for (rop = 0; rop < gr->rop_nr; rop++) {
+ nvkm_wr32(device, ROP_UNIT(rop, 0x144), 0x40000000);
+ nvkm_wr32(device, ROP_UNIT(rop, 0x070), 0x40000000);
+ nvkm_wr32(device, ROP_UNIT(rop, 0x204), 0xffffffff);
+ nvkm_wr32(device, ROP_UNIT(rop, 0x208), 0xffffffff);
+ }
+
+ nvkm_wr32(device, 0x400108, 0xffffffff);
+ nvkm_wr32(device, 0x400138, 0xffffffff);
+ nvkm_wr32(device, 0x400118, 0xffffffff);
+ nvkm_wr32(device, 0x400130, 0xffffffff);
+ nvkm_wr32(device, 0x40011c, 0xffffffff);
+ nvkm_wr32(device, 0x400134, 0xffffffff);
+
+ nvkm_wr32(device, 0x400054, 0x2c350f63);
+
+ gf100_gr_zbc_init(gr);
+
+ return gf100_gr_init_ctxctl(gr);
+}
+
+int
+gm200_gr_new_(const struct gf100_gr_func *func, struct nvkm_device *device,
+ int index, struct nvkm_gr **pgr)
+{
+ struct gf100_gr *gr;
+ int ret;
+
+ if (!(gr = kzalloc(sizeof(*gr), GFP_KERNEL)))
+ return -ENOMEM;
+ *pgr = &gr->base;
+
+ ret = gf100_gr_ctor(func, device, index, gr);
+ if (ret)
+ return ret;
+
+ /* Load firmwares for non-secure falcons */
+ if (!nvkm_secboot_is_managed(device->secboot,
+ NVKM_SECBOOT_FALCON_FECS)) {
+ if ((ret = gf100_gr_ctor_fw(gr, "gr/fecs_inst", &gr->fuc409c)) ||
+ (ret = gf100_gr_ctor_fw(gr, "gr/fecs_data", &gr->fuc409d)))
+ return ret;
+ }
+ if (!nvkm_secboot_is_managed(device->secboot,
+ NVKM_SECBOOT_FALCON_GPCCS)) {
+ if ((ret = gf100_gr_ctor_fw(gr, "gr/gpccs_inst", &gr->fuc41ac)) ||
+ (ret = gf100_gr_ctor_fw(gr, "gr/gpccs_data", &gr->fuc41ad)))
+ return ret;
+ }
+
+ if ((ret = gk20a_gr_av_to_init(gr, "gr/sw_nonctx", &gr->fuc_sw_nonctx)) ||
+ (ret = gk20a_gr_aiv_to_init(gr, "gr/sw_ctx", &gr->fuc_sw_ctx)) ||
+ (ret = gk20a_gr_av_to_init(gr, "gr/sw_bundle_init", &gr->fuc_bundle)) ||
+ (ret = gk20a_gr_av_to_method(gr, "gr/sw_method_init", &gr->fuc_method)))
+ return ret;
+
+ return 0;
+}
+
+static const struct gf100_gr_func
+gm200_gr = {
+ .init = gm200_gr_init,
+ .ppc_nr = 2,
+ .grctx = &gm200_grctx,
+ .sclass = {
+ { -1, -1, FERMI_TWOD_A },
+ { -1, -1, KEPLER_INLINE_TO_MEMORY_B },
+ { -1, -1, MAXWELL_B, &gf100_fermi },
+ { -1, -1, MAXWELL_COMPUTE_B },
+ {}
+ }
+};
+
+int
+gm200_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+{
+ return gm200_gr_new_(&gm200_gr, device, index, pgr);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm204.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm204.c
deleted file mode 100644
index 90381dde451a..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm204.c
+++ /dev/null
@@ -1,373 +0,0 @@
-/*
- * Copyright 2015 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs <bskeggs@redhat.com>
- */
-#include "gf100.h"
-#include "ctxgf100.h"
-
-#include <nvif/class.h>
-
-/*******************************************************************************
- * PGRAPH register lists
- ******************************************************************************/
-
-static const struct gf100_gr_init
-gm204_gr_init_main_0[] = {
- { 0x400080, 1, 0x04, 0x003003e2 },
- { 0x400088, 1, 0x04, 0xe007bfe7 },
- { 0x40008c, 1, 0x04, 0x00060000 },
- { 0x400090, 1, 0x04, 0x00000030 },
- { 0x40013c, 1, 0x04, 0x003901f3 },
- { 0x400140, 1, 0x04, 0x00000100 },
- { 0x400144, 1, 0x04, 0x00000000 },
- { 0x400148, 1, 0x04, 0x00000110 },
- { 0x400138, 1, 0x04, 0x00000000 },
- { 0x400130, 2, 0x04, 0x00000000 },
- { 0x400124, 1, 0x04, 0x00000002 },
- {}
-};
-
-static const struct gf100_gr_init
-gm204_gr_init_fe_0[] = {
- { 0x40415c, 1, 0x04, 0x00000000 },
- { 0x404170, 1, 0x04, 0x00000000 },
- { 0x4041b4, 1, 0x04, 0x00000000 },
- { 0x4041b8, 1, 0x04, 0x00000010 },
- {}
-};
-
-static const struct gf100_gr_init
-gm204_gr_init_ds_0[] = {
- { 0x40583c, 1, 0x04, 0x00000000 },
- { 0x405844, 1, 0x04, 0x00ffffff },
- { 0x40584c, 1, 0x04, 0x00000001 },
- { 0x405850, 1, 0x04, 0x00000000 },
- { 0x405900, 1, 0x04, 0x00000000 },
- { 0x405908, 1, 0x04, 0x00000000 },
- {}
-};
-
-static const struct gf100_gr_init
-gm204_gr_init_sked_0[] = {
- { 0x407010, 1, 0x04, 0x00000000 },
- { 0x407040, 1, 0x04, 0x80440434 },
- { 0x407048, 1, 0x04, 0x00000008 },
- {}
-};
-
-static const struct gf100_gr_init
-gm204_gr_init_tpccs_0[] = {
- { 0x419d60, 1, 0x04, 0x0000003f },
- { 0x419d88, 3, 0x04, 0x00000000 },
- { 0x419dc4, 1, 0x04, 0x00000000 },
- { 0x419dc8, 1, 0x04, 0x00000501 },
- { 0x419dd0, 1, 0x04, 0x00000000 },
- { 0x419dd4, 1, 0x04, 0x00000100 },
- { 0x419dd8, 1, 0x04, 0x00000001 },
- { 0x419ddc, 1, 0x04, 0x00000002 },
- { 0x419de0, 1, 0x04, 0x00000001 },
- { 0x419de8, 1, 0x04, 0x000000cc },
- { 0x419dec, 1, 0x04, 0x00000000 },
- { 0x419df0, 1, 0x04, 0x000000cc },
- { 0x419df4, 1, 0x04, 0x00000000 },
- { 0x419d0c, 1, 0x04, 0x00000000 },
- { 0x419d10, 1, 0x04, 0x00000014 },
- {}
-};
-
-static const struct gf100_gr_init
-gm204_gr_init_pe_0[] = {
- { 0x419900, 1, 0x04, 0x000000ff },
- { 0x419810, 1, 0x04, 0x00000000 },
- { 0x41980c, 1, 0x04, 0x00000010 },
- { 0x419844, 1, 0x04, 0x00000000 },
- { 0x419838, 1, 0x04, 0x000000ff },
- { 0x419850, 1, 0x04, 0x00000004 },
- { 0x419854, 2, 0x04, 0x00000000 },
- { 0x419894, 3, 0x04, 0x00100401 },
- {}
-};
-
-static const struct gf100_gr_init
-gm204_gr_init_sm_0[] = {
- { 0x419e30, 1, 0x04, 0x000000ff },
- { 0x419e00, 1, 0x04, 0x00000000 },
- { 0x419ea0, 1, 0x04, 0x00000000 },
- { 0x419ee4, 1, 0x04, 0x00000000 },
- { 0x419ea4, 1, 0x04, 0x00000100 },
- { 0x419ea8, 1, 0x04, 0x00000000 },
- { 0x419ee8, 1, 0x04, 0x00000091 },
- { 0x419eb4, 1, 0x04, 0x00000000 },
- { 0x419ebc, 2, 0x04, 0x00000000 },
- { 0x419edc, 1, 0x04, 0x000c1810 },
- { 0x419ed8, 1, 0x04, 0x00000000 },
- { 0x419ee0, 1, 0x04, 0x00000000 },
- {}
-};
-
-static const struct gf100_gr_init
-gm204_gr_init_l1c_1[] = {
- { 0x419cf8, 2, 0x04, 0x00000000 },
- {}
-};
-
-static const struct gf100_gr_init
-gm204_gr_init_sm_1[] = {
- { 0x419f74, 1, 0x04, 0x00055155 },
- { 0x419f80, 4, 0x04, 0x00000000 },
- {}
-};
-
-static const struct gf100_gr_init
-gm204_gr_init_l1c_2[] = {
- { 0x419ccc, 2, 0x04, 0x00000000 },
- { 0x419c80, 1, 0x04, 0x3f006022 },
- { 0x419c88, 1, 0x04, 0x00210000 },
- {}
-};
-
-static const struct gf100_gr_init
-gm204_gr_init_pes_0[] = {
- { 0x41be50, 1, 0x04, 0x000000ff },
- { 0x41be04, 1, 0x04, 0x00000000 },
- { 0x41be08, 1, 0x04, 0x00000004 },
- { 0x41be0c, 1, 0x04, 0x00000008 },
- { 0x41be10, 1, 0x04, 0x2e3b8bc7 },
- { 0x41be14, 2, 0x04, 0x00000000 },
- { 0x41be3c, 5, 0x04, 0x00100401 },
- {}
-};
-
-static const struct gf100_gr_init
-gm204_gr_init_be_0[] = {
- { 0x408890, 1, 0x04, 0x000000ff },
- { 0x40880c, 1, 0x04, 0x00000000 },
- { 0x408850, 1, 0x04, 0x00000004 },
- { 0x408878, 1, 0x04, 0x01b4201c },
- { 0x40887c, 1, 0x04, 0x80004c55 },
- { 0x408880, 1, 0x04, 0x0018c258 },
- { 0x408884, 1, 0x04, 0x0000160f },
- { 0x408974, 1, 0x04, 0x000000ff },
- { 0x408910, 9, 0x04, 0x00000000 },
- { 0x408950, 1, 0x04, 0x00000000 },
- { 0x408954, 1, 0x04, 0x0000ffff },
- { 0x408958, 1, 0x04, 0x00000034 },
- { 0x40895c, 1, 0x04, 0x84b17403 },
- { 0x408960, 1, 0x04, 0x04c1884f },
- { 0x408964, 1, 0x04, 0x04714445 },
- { 0x408968, 1, 0x04, 0x0280802f },
- { 0x40896c, 1, 0x04, 0x04304856 },
- { 0x408970, 1, 0x04, 0x00012800 },
- { 0x408984, 1, 0x04, 0x00000000 },
- { 0x408988, 1, 0x04, 0x08040201 },
- { 0x40898c, 1, 0x04, 0x80402010 },
- {}
-};
-
-const struct gf100_gr_pack
-gm204_gr_pack_mmio[] = {
- { gm204_gr_init_main_0 },
- { gm204_gr_init_fe_0 },
- { gf100_gr_init_pri_0 },
- { gf100_gr_init_rstr2d_0 },
- { gf100_gr_init_pd_0 },
- { gm204_gr_init_ds_0 },
- { gm107_gr_init_scc_0 },
- { gm204_gr_init_sked_0 },
- { gk110_gr_init_cwd_0 },
- { gm107_gr_init_prop_0 },
- { gk208_gr_init_gpc_unk_0 },
- { gf100_gr_init_setup_0 },
- { gf100_gr_init_crstr_0 },
- { gm107_gr_init_setup_1 },
- { gm107_gr_init_zcull_0 },
- { gf100_gr_init_gpm_0 },
- { gm107_gr_init_gpc_unk_1 },
- { gf100_gr_init_gcc_0 },
- { gm204_gr_init_tpccs_0 },
- { gm107_gr_init_tex_0 },
- { gm204_gr_init_pe_0 },
- { gm107_gr_init_l1c_0 },
- { gf100_gr_init_mpc_0 },
- { gm204_gr_init_sm_0 },
- { gm204_gr_init_l1c_1 },
- { gm204_gr_init_sm_1 },
- { gm204_gr_init_l1c_2 },
- { gm204_gr_init_pes_0 },
- { gm107_gr_init_wwdx_0 },
- { gm107_gr_init_cbm_0 },
- { gm204_gr_init_be_0 },
- {}
-};
-
-const struct gf100_gr_pack *
-gm204_gr_data[] = {
- gm204_gr_pack_mmio,
- NULL
-};
-
-/*******************************************************************************
- * PGRAPH engine/subdev functions
- ******************************************************************************/
-
-static int
-gm204_gr_init_ctxctl(struct gf100_gr *gr)
-{
- return 0;
-}
-
-int
-gm204_gr_init(struct gf100_gr *gr)
-{
- struct nvkm_device *device = gr->base.engine.subdev.device;
- const u32 magicgpc918 = DIV_ROUND_UP(0x00800000, gr->tpc_total);
- u32 data[TPC_MAX / 8] = {}, tmp;
- u8 tpcnr[GPC_MAX];
- int gpc, tpc, ppc, rop;
- int i;
-
- tmp = nvkm_rd32(device, 0x100c80); /*XXX: mask? */
- nvkm_wr32(device, 0x418880, 0x00001000 | (tmp & 0x00000fff));
- nvkm_wr32(device, 0x418890, 0x00000000);
- nvkm_wr32(device, 0x418894, 0x00000000);
- nvkm_wr32(device, 0x4188b4, nvkm_memory_addr(gr->unk4188b4) >> 8);
- nvkm_wr32(device, 0x4188b8, nvkm_memory_addr(gr->unk4188b8) >> 8);
- nvkm_mask(device, 0x4188b0, 0x00040000, 0x00040000);
-
- /*XXX: belongs in fb */
- nvkm_wr32(device, 0x100cc8, nvkm_memory_addr(gr->unk4188b4) >> 8);
- nvkm_wr32(device, 0x100ccc, nvkm_memory_addr(gr->unk4188b8) >> 8);
- nvkm_mask(device, 0x100cc4, 0x00040000, 0x00040000);
-
- gf100_gr_mmio(gr, gr->func->mmio);
-
- gm107_gr_init_bios(gr);
-
- nvkm_wr32(device, GPC_UNIT(0, 0x3018), 0x00000001);
-
- memset(data, 0x00, sizeof(data));
- memcpy(tpcnr, gr->tpc_nr, sizeof(gr->tpc_nr));
- for (i = 0, gpc = -1; i < gr->tpc_total; i++) {
- do {
- gpc = (gpc + 1) % gr->gpc_nr;
- } while (!tpcnr[gpc]);
- tpc = gr->tpc_nr[gpc] - tpcnr[gpc]--;
-
- data[i / 8] |= tpc << ((i % 8) * 4);
- }
-
- nvkm_wr32(device, GPC_BCAST(0x0980), data[0]);
- nvkm_wr32(device, GPC_BCAST(0x0984), data[1]);
- nvkm_wr32(device, GPC_BCAST(0x0988), data[2]);
- nvkm_wr32(device, GPC_BCAST(0x098c), data[3]);
-
- for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
- nvkm_wr32(device, GPC_UNIT(gpc, 0x0914),
- gr->magic_not_rop_nr << 8 | gr->tpc_nr[gpc]);
- nvkm_wr32(device, GPC_UNIT(gpc, 0x0910), 0x00040000 |
- gr->tpc_total);
- nvkm_wr32(device, GPC_UNIT(gpc, 0x0918), magicgpc918);
- }
-
- nvkm_wr32(device, GPC_BCAST(0x3fd4), magicgpc918);
- nvkm_wr32(device, GPC_BCAST(0x08ac), nvkm_rd32(device, 0x100800));
- nvkm_wr32(device, GPC_BCAST(0x033c), nvkm_rd32(device, 0x100804));
-
- nvkm_wr32(device, 0x400500, 0x00010001);
- nvkm_wr32(device, 0x400100, 0xffffffff);
- nvkm_wr32(device, 0x40013c, 0xffffffff);
- nvkm_wr32(device, 0x400124, 0x00000002);
- nvkm_wr32(device, 0x409c24, 0x000e0000);
- nvkm_wr32(device, 0x405848, 0xc0000000);
- nvkm_wr32(device, 0x40584c, 0x00000001);
- nvkm_wr32(device, 0x404000, 0xc0000000);
- nvkm_wr32(device, 0x404600, 0xc0000000);
- nvkm_wr32(device, 0x408030, 0xc0000000);
- nvkm_wr32(device, 0x404490, 0xc0000000);
- nvkm_wr32(device, 0x406018, 0xc0000000);
- nvkm_wr32(device, 0x407020, 0x40000000);
- nvkm_wr32(device, 0x405840, 0xc0000000);
- nvkm_wr32(device, 0x405844, 0x00ffffff);
- nvkm_mask(device, 0x419cc0, 0x00000008, 0x00000008);
-
- for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
- for (ppc = 0; ppc < gr->ppc_nr[gpc]; ppc++)
- nvkm_wr32(device, PPC_UNIT(gpc, ppc, 0x038), 0xc0000000);
- nvkm_wr32(device, GPC_UNIT(gpc, 0x0420), 0xc0000000);
- nvkm_wr32(device, GPC_UNIT(gpc, 0x0900), 0xc0000000);
- nvkm_wr32(device, GPC_UNIT(gpc, 0x1028), 0xc0000000);
- nvkm_wr32(device, GPC_UNIT(gpc, 0x0824), 0xc0000000);
- for (tpc = 0; tpc < gr->tpc_nr[gpc]; tpc++) {
- nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x508), 0xffffffff);
- nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x50c), 0xffffffff);
- nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x224), 0xc0000000);
- nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x48c), 0xc0000000);
- nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x084), 0xc0000000);
- nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x430), 0xc0000000);
- nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x644), 0x00dffffe);
- nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x64c), 0x00000005);
- }
- nvkm_wr32(device, GPC_UNIT(gpc, 0x2c90), 0xffffffff);
- nvkm_wr32(device, GPC_UNIT(gpc, 0x2c94), 0xffffffff);
- }
-
- for (rop = 0; rop < gr->rop_nr; rop++) {
- nvkm_wr32(device, ROP_UNIT(rop, 0x144), 0x40000000);
- nvkm_wr32(device, ROP_UNIT(rop, 0x070), 0x40000000);
- nvkm_wr32(device, ROP_UNIT(rop, 0x204), 0xffffffff);
- nvkm_wr32(device, ROP_UNIT(rop, 0x208), 0xffffffff);
- }
-
- nvkm_wr32(device, 0x400108, 0xffffffff);
- nvkm_wr32(device, 0x400138, 0xffffffff);
- nvkm_wr32(device, 0x400118, 0xffffffff);
- nvkm_wr32(device, 0x400130, 0xffffffff);
- nvkm_wr32(device, 0x40011c, 0xffffffff);
- nvkm_wr32(device, 0x400134, 0xffffffff);
-
- nvkm_wr32(device, 0x400054, 0x2c350f63);
-
- gf100_gr_zbc_init(gr);
-
- return gm204_gr_init_ctxctl(gr);
-}
-
-static const struct gf100_gr_func
-gm204_gr = {
- .init = gm204_gr_init,
- .mmio = gm204_gr_pack_mmio,
- .ppc_nr = 2,
- .grctx = &gm204_grctx,
- .sclass = {
- { -1, -1, FERMI_TWOD_A },
- { -1, -1, KEPLER_INLINE_TO_MEMORY_B },
- { -1, -1, MAXWELL_B, &gf100_fermi },
- { -1, -1, MAXWELL_COMPUTE_B },
- {}
- }
-};
-
-int
-gm204_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
-{
- return gf100_gr_new_(&gm204_gr, device, index, pgr);
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm20b.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm20b.c
index 65b6e3d1e90d..29732bc14415 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm20b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm20b.c
@@ -32,12 +32,15 @@ gm20b_gr_init_gpc_mmu(struct gf100_gr *gr)
struct nvkm_device *device = gr->base.engine.subdev.device;
u32 val;
- /* TODO this needs to be removed once secure boot works */
- if (1) {
+ /* Bypass MMU check for non-secure boot */
+ if (!device->secboot) {
nvkm_wr32(device, 0x100ce4, 0xffffffff);
+
+ if (nvkm_rd32(device, 0x100ce4) != 0xffffffff)
+ nvdev_warn(device,
+ "cannot bypass secure boot - expect failure soon!\n");
}
- /* TODO update once secure boot works */
val = nvkm_rd32(device, 0x100c80);
val &= 0xf000087f;
nvkm_wr32(device, 0x418880, val);
@@ -61,7 +64,6 @@ gm20b_gr_set_hww_esr_report_mask(struct gf100_gr *gr)
static const struct gf100_gr_func
gm20b_gr = {
- .dtor = gk20a_gr_dtor,
.init = gk20a_gr_init,
.init_gpc_mmu = gm20b_gr_init_gpc_mmu,
.set_hww_esr_report_mask = gm20b_gr_set_hww_esr_report_mask,
@@ -79,5 +81,5 @@ gm20b_gr = {
int
gm20b_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
{
- return gk20a_gr_new_(&gm20b_gr, device, index, pgr);
+ return gm200_gr_new_(&gm20b_gr, device, index, pgr);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/msenc/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/msenc/Kbuild
new file mode 100644
index 000000000000..b5119564f608
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/msenc/Kbuild
@@ -0,0 +1 @@
+#nvkm-y += nvkm/engine/msenc/base.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/Kbuild
new file mode 100644
index 000000000000..13b7c71ff900
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/Kbuild
@@ -0,0 +1 @@
+#nvkm-y += nvkm/engine/nvdec/base.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/Kbuild
new file mode 100644
index 000000000000..ad8f1820fa53
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/Kbuild
@@ -0,0 +1 @@
+#nvkm-y += nvkm/engine/nvenc/base.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/nv40.c b/drivers/gpu/drm/nouveau/nvkm/engine/pm/nv40.c
index 4bef72a9d106..3fda594700e0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/nv40.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/pm/nv40.c
@@ -59,9 +59,11 @@ static void
nv40_perfctr_next(struct nvkm_pm *pm, struct nvkm_perfdom *dom)
{
struct nvkm_device *device = pm->engine.subdev.device;
- if (pm->sequence != pm->sequence) {
+ struct nv40_pm *nv40pm = container_of(pm, struct nv40_pm, base);
+
+ if (nv40pm->sequence != pm->sequence) {
nvkm_wr32(device, 0x400084, 0x00000020);
- pm->sequence = pm->sequence;
+ nv40pm->sequence = pm->sequence;
}
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/vic/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/vic/Kbuild
new file mode 100644
index 000000000000..ed4fb6488013
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/vic/Kbuild
@@ -0,0 +1 @@
+#nvkm-y += nvkm/engine/vic/base.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/Kbuild
index ee2c38f50ef5..642d27dc99a3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/Kbuild
@@ -8,6 +8,7 @@ include $(src)/nvkm/subdev/fuse/Kbuild
include $(src)/nvkm/subdev/gpio/Kbuild
include $(src)/nvkm/subdev/i2c/Kbuild
include $(src)/nvkm/subdev/ibus/Kbuild
+include $(src)/nvkm/subdev/iccsense/Kbuild
include $(src)/nvkm/subdev/instmem/Kbuild
include $(src)/nvkm/subdev/ltc/Kbuild
include $(src)/nvkm/subdev/mc/Kbuild
@@ -15,6 +16,7 @@ include $(src)/nvkm/subdev/mmu/Kbuild
include $(src)/nvkm/subdev/mxm/Kbuild
include $(src)/nvkm/subdev/pci/Kbuild
include $(src)/nvkm/subdev/pmu/Kbuild
+include $(src)/nvkm/subdev/secboot/Kbuild
include $(src)/nvkm/subdev/therm/Kbuild
include $(src)/nvkm/subdev/timer/Kbuild
include $(src)/nvkm/subdev/volt/Kbuild
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/Kbuild
index 64730d5e9351..dbcb0ef21587 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/Kbuild
@@ -10,6 +10,7 @@ nvkm-y += nvkm/subdev/bios/extdev.o
nvkm-y += nvkm/subdev/bios/fan.o
nvkm-y += nvkm/subdev/bios/gpio.o
nvkm-y += nvkm/subdev/bios/i2c.o
+nvkm-y += nvkm/subdev/bios/iccsense.o
nvkm-y += nvkm/subdev/bios/image.o
nvkm-y += nvkm/subdev/bios/init.o
nvkm-y += nvkm/subdev/bios/mxm.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/extdev.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/extdev.c
index c9e6f6ff7c50..b8578359e61b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/extdev.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/extdev.c
@@ -32,7 +32,7 @@ extdev_table(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *len, u8 *cnt)
u16 dcb, extdev = 0;
dcb = dcb_table(bios, &dcb_ver, &dcb_hdr, &dcb_cnt, &dcb_len);
- if (!dcb || (dcb_ver != 0x30 && dcb_ver != 0x40))
+ if (!dcb || (dcb_ver != 0x30 && dcb_ver != 0x40 && dcb_ver != 0x41))
return 0x0000;
extdev = nvbios_rd16(bios, dcb + 18);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/iccsense.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/iccsense.c
new file mode 100644
index 000000000000..084328028af1
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/iccsense.c
@@ -0,0 +1,100 @@
+/*
+ * Copyright 2015 Martin Peres
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Martin Peres
+ */
+#include <subdev/bios.h>
+#include <subdev/bios/bit.h>
+#include <subdev/bios/iccsense.h>
+
+static u16
+nvbios_iccsense_table(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt,
+ u8 *len)
+{
+ struct bit_entry bit_P;
+ u16 iccsense;
+
+ if (bit_entry(bios, 'P', &bit_P) || bit_P.version != 2 ||
+ bit_P.length < 0x2c)
+ return 0;
+
+ iccsense = nvbios_rd16(bios, bit_P.offset + 0x28);
+ if (!iccsense)
+ return 0;
+
+ *ver = nvbios_rd08(bios, iccsense + 0);
+ switch (*ver) {
+ case 0x10:
+ case 0x20:
+ *hdr = nvbios_rd08(bios, iccsense + 1);
+ *len = nvbios_rd08(bios, iccsense + 2);
+ *cnt = nvbios_rd08(bios, iccsense + 3);
+ return iccsense;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+int
+nvbios_iccsense_parse(struct nvkm_bios *bios, struct nvbios_iccsense *iccsense)
+{
+ struct nvkm_subdev *subdev = &bios->subdev;
+ u8 ver, hdr, cnt, len, i;
+ u16 table, entry;
+
+ table = nvbios_iccsense_table(bios, &ver, &hdr, &cnt, &len);
+ if (!table || !cnt)
+ return -EINVAL;
+
+ if (ver != 0x10 && ver != 0x20) {
+ nvkm_error(subdev, "ICCSENSE version 0x%02x unknown\n", ver);
+ return -EINVAL;
+ }
+
+ iccsense->nr_entry = cnt;
+ iccsense->rail = kmalloc(sizeof(struct pwr_rail_t) * cnt, GFP_KERNEL);
+ if (!iccsense->rail)
+ return -ENOMEM;
+
+ for (i = 0; i < cnt; ++i) {
+ struct pwr_rail_t *rail = &iccsense->rail[i];
+ entry = table + hdr + i * len;
+
+ switch(ver) {
+ case 0x10:
+ rail->mode = nvbios_rd08(bios, entry + 0x1);
+ rail->extdev_id = nvbios_rd08(bios, entry + 0x2);
+ rail->resistor_mohm = nvbios_rd08(bios, entry + 0x3);
+ rail->rail = nvbios_rd08(bios, entry + 0x4);
+ break;
+ case 0x20:
+ rail->mode = nvbios_rd08(bios, entry);
+ rail->extdev_id = nvbios_rd08(bios, entry + 0x1);
+ rail->resistor_mohm = nvbios_rd08(bios, entry + 0x5);
+ rail->rail = nvbios_rd08(bios, entry + 0x6);
+ break;
+ };
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c
index a7d69ce7abc1..38ed09fd3d2f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c
@@ -786,20 +786,20 @@ init_io_flag_condition(struct nvbios_init *init)
}
/**
- * INIT_DP_CONDITION - opcode 0x3a
+ * INIT_GENERIC_CONDITION - opcode 0x3a
*
*/
static void
-init_dp_condition(struct nvbios_init *init)
+init_generic_condition(struct nvbios_init *init)
{
struct nvkm_bios *bios = init->bios;
struct nvbios_dpout info;
u8 cond = nvbios_rd08(bios, init->offset + 1);
- u8 unkn = nvbios_rd08(bios, init->offset + 2);
+ u8 size = nvbios_rd08(bios, init->offset + 2);
u8 ver, hdr, cnt, len;
u16 data;
- trace("DP_CONDITION\t0x%02x 0x%02x\n", cond, unkn);
+ trace("GENERIC_CONDITION\t0x%02x 0x%02x\n", cond, size);
init->offset += 3;
switch (cond) {
@@ -828,7 +828,8 @@ init_dp_condition(struct nvbios_init *init)
init_exec_set(init, false);
break;
default:
- warn("unknown dp condition 0x%02x\n", cond);
+ warn("INIT_GENERIC_CONDITON: unknown 0x%02x\n", cond);
+ init->offset += size;
break;
}
}
@@ -2205,7 +2206,7 @@ static struct nvbios_init_opcode {
[0x37] = { init_copy },
[0x38] = { init_not },
[0x39] = { init_io_flag_condition },
- [0x3a] = { init_dp_condition },
+ [0x3a] = { init_generic_condition },
[0x3b] = { init_io_mask_or },
[0x3c] = { init_io_or },
[0x47] = { init_andn_reg },
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/Kbuild
index ed7717bcc3a1..87d94883f790 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/Kbuild
@@ -8,6 +8,7 @@ nvkm-y += nvkm/subdev/clk/mcp77.o
nvkm-y += nvkm/subdev/clk/gf100.o
nvkm-y += nvkm/subdev/clk/gk104.o
nvkm-y += nvkm/subdev/clk/gk20a.o
+nvkm-y += nvkm/subdev/clk/gm20b.o
nvkm-y += nvkm/subdev/clk/pllnv04.o
nvkm-y += nvkm/subdev/clk/pllgt215.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.c
index 5da2aa8cc333..5f0ee24e31b8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved.
+ * Copyright (c) 2014-2016, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -22,19 +22,17 @@
* Shamelessly ripped off from ChromeOS's gk20a/clk_pllg.c
*
*/
-#define gk20a_clk(p) container_of((p), struct gk20a_clk, base)
#include "priv.h"
+#include "gk20a.h"
#include <core/tegra.h>
#include <subdev/timer.h>
-#define MHZ (1000 * 1000)
+#define KHZ (1000)
+#define MHZ (KHZ * 1000)
#define MASK(w) ((1 << w) - 1)
-#define SYS_GPCPLL_CFG_BASE 0x00137000
-#define GPC_BCASE_GPCPLL_CFG_BASE 0x00132800
-
#define GPCPLL_CFG (SYS_GPCPLL_CFG_BASE + 0)
#define GPCPLL_CFG_ENABLE BIT(0)
#define GPCPLL_CFG_IDDQ BIT(1)
@@ -56,6 +54,7 @@
#define GPCPLL_CFG3 (SYS_GPCPLL_CFG_BASE + 0x18)
#define GPCPLL_CFG3_PLL_STEPB_SHIFT 16
+#define GPC_BCASE_GPCPLL_CFG_BASE 0x00132800
#define GPCPLL_NDIV_SLOWDOWN (SYS_GPCPLL_CFG_BASE + 0x1c)
#define GPCPLL_NDIV_SLOWDOWN_NDIV_LO_SHIFT 0
#define GPCPLL_NDIV_SLOWDOWN_NDIV_MID_SHIFT 8
@@ -75,7 +74,7 @@
#define GPC2CLK_OUT_VCODIV1 0
#define GPC2CLK_OUT_VCODIV_MASK (MASK(GPC2CLK_OUT_VCODIV_WIDTH) << \
GPC2CLK_OUT_VCODIV_SHIFT)
-#define GPC2CLK_OUT_BYPDIV_WIDTH 6
+#define GPC2CLK_OUT_BYPDIV_WIDTH 6
#define GPC2CLK_OUT_BYPDIV_SHIFT 0
#define GPC2CLK_OUT_BYPDIV31 0x3c
#define GPC2CLK_OUT_INIT_MASK ((MASK(GPC2CLK_OUT_SDIV14_INDIV4_WIDTH) << \
@@ -92,45 +91,49 @@
#define GPC_BCAST_NDIV_SLOWDOWN_DEBUG_PLL_DYNRAMP_DONE_SYNCED_MASK \
(0x1 << GPC_BCAST_NDIV_SLOWDOWN_DEBUG_PLL_DYNRAMP_DONE_SYNCED_SHIFT)
-static const u8 pl_to_div[] = {
+static const u8 _pl_to_div[] = {
/* PL: 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14 */
/* p: */ 1, 2, 3, 4, 5, 6, 8, 10, 12, 16, 12, 16, 20, 24, 32,
};
-/* All frequencies in Mhz */
-struct gk20a_clk_pllg_params {
- u32 min_vco, max_vco;
- u32 min_u, max_u;
- u32 min_m, max_m;
- u32 min_n, max_n;
- u32 min_pl, max_pl;
-};
+static u32 pl_to_div(u32 pl)
+{
+ if (pl >= ARRAY_SIZE(_pl_to_div))
+ return 1;
+
+ return _pl_to_div[pl];
+}
+
+static u32 div_to_pl(u32 div)
+{
+ u32 pl;
+
+ for (pl = 0; pl < ARRAY_SIZE(_pl_to_div) - 1; pl++) {
+ if (_pl_to_div[pl] >= div)
+ return pl;
+ }
+
+ return ARRAY_SIZE(_pl_to_div) - 1;
+}
static const struct gk20a_clk_pllg_params gk20a_pllg_params = {
- .min_vco = 1000, .max_vco = 2064,
- .min_u = 12, .max_u = 38,
+ .min_vco = 1000000, .max_vco = 2064000,
+ .min_u = 12000, .max_u = 38000,
.min_m = 1, .max_m = 255,
.min_n = 8, .max_n = 255,
.min_pl = 1, .max_pl = 32,
};
-struct gk20a_clk {
- struct nvkm_clk base;
- const struct gk20a_clk_pllg_params *params;
- u32 m, n, pl;
- u32 parent_rate;
-};
-
static void
-gk20a_pllg_read_mnp(struct gk20a_clk *clk)
+gk20a_pllg_read_mnp(struct gk20a_clk *clk, struct gk20a_pll *pll)
{
struct nvkm_device *device = clk->base.subdev.device;
u32 val;
val = nvkm_rd32(device, GPCPLL_COEFF);
- clk->m = (val >> GPCPLL_COEFF_M_SHIFT) & MASK(GPCPLL_COEFF_M_WIDTH);
- clk->n = (val >> GPCPLL_COEFF_N_SHIFT) & MASK(GPCPLL_COEFF_N_WIDTH);
- clk->pl = (val >> GPCPLL_COEFF_P_SHIFT) & MASK(GPCPLL_COEFF_P_WIDTH);
+ pll->m = (val >> GPCPLL_COEFF_M_SHIFT) & MASK(GPCPLL_COEFF_M_WIDTH);
+ pll->n = (val >> GPCPLL_COEFF_N_SHIFT) & MASK(GPCPLL_COEFF_N_WIDTH);
+ pll->pl = (val >> GPCPLL_COEFF_P_SHIFT) & MASK(GPCPLL_COEFF_P_WIDTH);
}
static u32
@@ -139,8 +142,8 @@ gk20a_pllg_calc_rate(struct gk20a_clk *clk)
u32 rate;
u32 divider;
- rate = clk->parent_rate * clk->n;
- divider = clk->m * pl_to_div[clk->pl];
+ rate = clk->parent_rate * clk->pll.n;
+ divider = clk->pll.m * clk->pl_to_div(clk->pll.pl);
return rate / divider / 2;
}
@@ -152,15 +155,13 @@ gk20a_pllg_calc_mnp(struct gk20a_clk *clk, unsigned long rate)
u32 target_clk_f, ref_clk_f, target_freq;
u32 min_vco_f, max_vco_f;
u32 low_pl, high_pl, best_pl;
- u32 target_vco_f, vco_f;
+ u32 target_vco_f;
u32 best_m, best_n;
- u32 u_f;
- u32 m, n, n2;
- u32 delta, lwv, best_delta = ~0;
+ u32 best_delta = ~0;
u32 pl;
- target_clk_f = rate * 2 / MHZ;
- ref_clk_f = clk->parent_rate / MHZ;
+ target_clk_f = rate * 2 / KHZ;
+ ref_clk_f = clk->parent_rate / KHZ;
max_vco_f = clk->params->max_vco;
min_vco_f = clk->params->min_vco;
@@ -176,33 +177,26 @@ gk20a_pllg_calc_mnp(struct gk20a_clk *clk, unsigned long rate)
high_pl = (max_vco_f + target_vco_f - 1) / target_vco_f;
high_pl = min(high_pl, clk->params->max_pl);
high_pl = max(high_pl, clk->params->min_pl);
+ high_pl = clk->div_to_pl(high_pl);
/* min_pl <= low_pl <= max_pl */
low_pl = min_vco_f / target_vco_f;
low_pl = min(low_pl, clk->params->max_pl);
low_pl = max(low_pl, clk->params->min_pl);
-
- /* Find Indices of high_pl and low_pl */
- for (pl = 0; pl < ARRAY_SIZE(pl_to_div) - 1; pl++) {
- if (pl_to_div[pl] >= low_pl) {
- low_pl = pl;
- break;
- }
- }
- for (pl = 0; pl < ARRAY_SIZE(pl_to_div) - 1; pl++) {
- if (pl_to_div[pl] >= high_pl) {
- high_pl = pl;
- break;
- }
- }
+ low_pl = clk->div_to_pl(low_pl);
nvkm_debug(subdev, "low_PL %d(div%d), high_PL %d(div%d)", low_pl,
- pl_to_div[low_pl], high_pl, pl_to_div[high_pl]);
+ clk->pl_to_div(low_pl), high_pl, clk->pl_to_div(high_pl));
/* Select lowest possible VCO */
for (pl = low_pl; pl <= high_pl; pl++) {
- target_vco_f = target_clk_f * pl_to_div[pl];
+ u32 m, n, n2;
+
+ target_vco_f = target_clk_f * clk->pl_to_div(pl);
+
for (m = clk->params->min_m; m <= clk->params->max_m; m++) {
+ u32 u_f, vco_f;
+
u_f = ref_clk_f / m;
if (u_f < clk->params->min_u)
@@ -225,8 +219,10 @@ gk20a_pllg_calc_mnp(struct gk20a_clk *clk, unsigned long rate)
vco_f = ref_clk_f * n / m;
if (vco_f >= min_vco_f && vco_f <= max_vco_f) {
- lwv = (vco_f + (pl_to_div[pl] / 2))
- / pl_to_div[pl];
+ u32 delta, lwv;
+
+ lwv = (vco_f + (clk->pl_to_div(pl) / 2))
+ / clk->pl_to_div(pl);
delta = abs(lwv - target_clk_f);
if (delta < best_delta) {
@@ -249,17 +245,18 @@ found_match:
if (best_delta != 0)
nvkm_debug(subdev,
"no best match for target @ %dMHz on gpc_pll",
- target_clk_f);
+ target_clk_f / KHZ);
- clk->m = best_m;
- clk->n = best_n;
- clk->pl = best_pl;
+ clk->pll.m = best_m;
+ clk->pll.n = best_n;
+ clk->pll.pl = best_pl;
- target_freq = gk20a_pllg_calc_rate(clk) / MHZ;
+ target_freq = gk20a_pllg_calc_rate(clk);
nvkm_debug(subdev,
"actual target freq %d MHz, M %d, N %d, PL %d(div%d)\n",
- target_freq, clk->m, clk->n, clk->pl, pl_to_div[clk->pl]);
+ target_freq / MHZ, clk->pll.m, clk->pll.n, clk->pll.pl,
+ clk->pl_to_div(clk->pll.pl));
return 0;
}
@@ -323,17 +320,19 @@ gk20a_pllg_slide(struct gk20a_clk *clk, u32 n)
}
static void
-_gk20a_pllg_enable(struct gk20a_clk *clk)
+gk20a_pllg_enable(struct gk20a_clk *clk)
{
struct nvkm_device *device = clk->base.subdev.device;
+
nvkm_mask(device, GPCPLL_CFG, GPCPLL_CFG_ENABLE, GPCPLL_CFG_ENABLE);
nvkm_rd32(device, GPCPLL_CFG);
}
static void
-_gk20a_pllg_disable(struct gk20a_clk *clk)
+gk20a_pllg_disable(struct gk20a_clk *clk)
{
struct nvkm_device *device = clk->base.subdev.device;
+
nvkm_mask(device, GPCPLL_CFG, GPCPLL_CFG_ENABLE, 0);
nvkm_rd32(device, GPCPLL_CFG);
}
@@ -344,25 +343,26 @@ _gk20a_pllg_program_mnp(struct gk20a_clk *clk, bool allow_slide)
struct nvkm_subdev *subdev = &clk->base.subdev;
struct nvkm_device *device = subdev->device;
u32 val, cfg;
- u32 m_old, pl_old, n_lo;
+ struct gk20a_pll old_pll;
+ u32 n_lo;
/* get old coefficients */
- val = nvkm_rd32(device, GPCPLL_COEFF);
- m_old = (val >> GPCPLL_COEFF_M_SHIFT) & MASK(GPCPLL_COEFF_M_WIDTH);
- pl_old = (val >> GPCPLL_COEFF_P_SHIFT) & MASK(GPCPLL_COEFF_P_WIDTH);
+ gk20a_pllg_read_mnp(clk, &old_pll);
/* do NDIV slide if there is no change in M and PL */
cfg = nvkm_rd32(device, GPCPLL_CFG);
- if (allow_slide && clk->m == m_old && clk->pl == pl_old &&
- (cfg & GPCPLL_CFG_ENABLE)) {
- return gk20a_pllg_slide(clk, clk->n);
+ if (allow_slide && clk->pll.m == old_pll.m &&
+ clk->pll.pl == old_pll.pl && (cfg & GPCPLL_CFG_ENABLE)) {
+ return gk20a_pllg_slide(clk, clk->pll.n);
}
/* slide down to NDIV_LO */
- n_lo = DIV_ROUND_UP(m_old * clk->params->min_vco,
- clk->parent_rate / MHZ);
if (allow_slide && (cfg & GPCPLL_CFG_ENABLE)) {
- int ret = gk20a_pllg_slide(clk, n_lo);
+ int ret;
+
+ n_lo = DIV_ROUND_UP(old_pll.m * clk->params->min_vco,
+ clk->parent_rate / KHZ);
+ ret = gk20a_pllg_slide(clk, n_lo);
if (ret)
return ret;
@@ -387,19 +387,19 @@ _gk20a_pllg_program_mnp(struct gk20a_clk *clk, bool allow_slide)
udelay(2);
}
- _gk20a_pllg_disable(clk);
+ gk20a_pllg_disable(clk);
nvkm_debug(subdev, "%s: m=%d n=%d pl=%d\n", __func__,
- clk->m, clk->n, clk->pl);
+ clk->pll.m, clk->pll.n, clk->pll.pl);
- n_lo = DIV_ROUND_UP(clk->m * clk->params->min_vco,
- clk->parent_rate / MHZ);
- val = clk->m << GPCPLL_COEFF_M_SHIFT;
- val |= (allow_slide ? n_lo : clk->n) << GPCPLL_COEFF_N_SHIFT;
- val |= clk->pl << GPCPLL_COEFF_P_SHIFT;
+ n_lo = DIV_ROUND_UP(clk->pll.m * clk->params->min_vco,
+ clk->parent_rate / KHZ);
+ val = clk->pll.m << GPCPLL_COEFF_M_SHIFT;
+ val |= (allow_slide ? n_lo : clk->pll.n) << GPCPLL_COEFF_N_SHIFT;
+ val |= clk->pll.pl << GPCPLL_COEFF_P_SHIFT;
nvkm_wr32(device, GPCPLL_COEFF, val);
- _gk20a_pllg_enable(clk);
+ gk20a_pllg_enable(clk);
val = nvkm_rd32(device, GPCPLL_CFG);
if (val & GPCPLL_CFG_LOCK_DET_OFF) {
@@ -414,16 +414,24 @@ _gk20a_pllg_program_mnp(struct gk20a_clk *clk, bool allow_slide)
return -ETIMEDOUT;
/* switch to VCO mode */
- nvkm_mask(device, SEL_VCO, 0, BIT(SEL_VCO_GPC2CLK_OUT_SHIFT));
+ nvkm_mask(device, SEL_VCO, BIT(SEL_VCO_GPC2CLK_OUT_SHIFT),
+ BIT(SEL_VCO_GPC2CLK_OUT_SHIFT));
/* restore out divider 1:1 */
val = nvkm_rd32(device, GPC2CLK_OUT);
- val &= ~GPC2CLK_OUT_VCODIV_MASK;
- udelay(2);
- nvkm_wr32(device, GPC2CLK_OUT, val);
+ if ((val & GPC2CLK_OUT_VCODIV_MASK) !=
+ (GPC2CLK_OUT_VCODIV1 << GPC2CLK_OUT_VCODIV_SHIFT)) {
+ val &= ~GPC2CLK_OUT_VCODIV_MASK;
+ val |= GPC2CLK_OUT_VCODIV1 << GPC2CLK_OUT_VCODIV_SHIFT;
+ udelay(2);
+ nvkm_wr32(device, GPC2CLK_OUT, val);
+ /* Intentional 2nd write to assure linear divider operation */
+ nvkm_wr32(device, GPC2CLK_OUT, val);
+ nvkm_rd32(device, GPC2CLK_OUT);
+ }
/* slide up to new NDIV */
- return allow_slide ? gk20a_pllg_slide(clk, clk->n) : 0;
+ return allow_slide ? gk20a_pllg_slide(clk, clk->pll.n) : 0;
}
static int
@@ -438,32 +446,6 @@ gk20a_pllg_program_mnp(struct gk20a_clk *clk)
return err;
}
-static void
-gk20a_pllg_disable(struct gk20a_clk *clk)
-{
- struct nvkm_device *device = clk->base.subdev.device;
- u32 val;
-
- /* slide to VCO min */
- val = nvkm_rd32(device, GPCPLL_CFG);
- if (val & GPCPLL_CFG_ENABLE) {
- u32 coeff, m, n_lo;
-
- coeff = nvkm_rd32(device, GPCPLL_COEFF);
- m = (coeff >> GPCPLL_COEFF_M_SHIFT) & MASK(GPCPLL_COEFF_M_WIDTH);
- n_lo = DIV_ROUND_UP(m * clk->params->min_vco,
- clk->parent_rate / MHZ);
- gk20a_pllg_slide(clk, n_lo);
- }
-
- /* put PLL in bypass before disabling it */
- nvkm_mask(device, SEL_VCO, BIT(SEL_VCO_GPC2CLK_OUT_SHIFT), 0);
-
- _gk20a_pllg_disable(clk);
-}
-
-#define GK20A_CLK_GPC_MDIV 1000
-
static struct nvkm_pstate
gk20a_pstates[] = {
{
@@ -558,7 +540,7 @@ gk20a_pstates[] = {
},
};
-static int
+int
gk20a_clk_read(struct nvkm_clk *base, enum nv_clk_src src)
{
struct gk20a_clk *clk = gk20a_clk(base);
@@ -569,7 +551,7 @@ gk20a_clk_read(struct nvkm_clk *base, enum nv_clk_src src)
case nv_clk_src_crystal:
return device->crystal;
case nv_clk_src_gpc:
- gk20a_pllg_read_mnp(clk);
+ gk20a_pllg_read_mnp(clk, &clk->pll);
return gk20a_pllg_calc_rate(clk) / GK20A_CLK_GPC_MDIV;
default:
nvkm_error(subdev, "invalid clock source %d\n", src);
@@ -577,7 +559,7 @@ gk20a_clk_read(struct nvkm_clk *base, enum nv_clk_src src)
}
}
-static int
+int
gk20a_clk_calc(struct nvkm_clk *base, struct nvkm_cstate *cstate)
{
struct gk20a_clk *clk = gk20a_clk(base);
@@ -586,7 +568,7 @@ gk20a_clk_calc(struct nvkm_clk *base, struct nvkm_cstate *cstate)
GK20A_CLK_GPC_MDIV);
}
-static int
+int
gk20a_clk_prog(struct nvkm_clk *base)
{
struct gk20a_clk *clk = gk20a_clk(base);
@@ -594,15 +576,33 @@ gk20a_clk_prog(struct nvkm_clk *base)
return gk20a_pllg_program_mnp(clk);
}
-static void
+void
gk20a_clk_tidy(struct nvkm_clk *base)
{
}
-static void
+void
gk20a_clk_fini(struct nvkm_clk *base)
{
+ struct nvkm_device *device = base->subdev.device;
struct gk20a_clk *clk = gk20a_clk(base);
+ u32 val;
+
+ /* slide to VCO min */
+ val = nvkm_rd32(device, GPCPLL_CFG);
+ if (val & GPCPLL_CFG_ENABLE) {
+ struct gk20a_pll pll;
+ u32 n_lo;
+
+ gk20a_pllg_read_mnp(clk, &pll);
+ n_lo = DIV_ROUND_UP(pll.m * clk->params->min_vco,
+ clk->parent_rate / KHZ);
+ gk20a_pllg_slide(clk, n_lo);
+ }
+
+ /* put PLL in bypass before disabling it */
+ nvkm_mask(device, SEL_VCO, BIT(SEL_VCO_GPC2CLK_OUT_SHIFT), 0);
+
gk20a_pllg_disable(clk);
}
@@ -614,9 +614,12 @@ gk20a_clk_init(struct nvkm_clk *base)
struct nvkm_device *device = subdev->device;
int ret;
- nvkm_mask(device, GPC2CLK_OUT, GPC2CLK_OUT_INIT_MASK, GPC2CLK_OUT_INIT_VAL);
+ nvkm_mask(device, GPC2CLK_OUT, GPC2CLK_OUT_INIT_MASK,
+ GPC2CLK_OUT_INIT_VAL);
- ret = gk20a_clk_prog(&clk->base);
+ /* Start with lowest frequency */
+ base->func->calc(base, &base->func->pstates[0].base);
+ ret = base->func->prog(&clk->base);
if (ret) {
nvkm_error(subdev, "cannot initialize clock\n");
return ret;
@@ -643,27 +646,50 @@ gk20a_clk = {
};
int
-gk20a_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk)
+_gk20a_clk_ctor(struct nvkm_device *device, int index,
+ const struct nvkm_clk_func *func,
+ const struct gk20a_clk_pllg_params *params,
+ struct gk20a_clk *clk)
{
struct nvkm_device_tegra *tdev = device->func->tegra(device);
+ int ret;
+ int i;
+
+ /* Finish initializing the pstates */
+ for (i = 0; i < func->nr_pstates; i++) {
+ INIT_LIST_HEAD(&func->pstates[i].list);
+ func->pstates[i].pstate = i + 1;
+ }
+
+ clk->params = params;
+ clk->parent_rate = clk_get_rate(tdev->clk);
+
+ ret = nvkm_clk_ctor(func, device, index, true, &clk->base);
+ if (ret)
+ return ret;
+
+ nvkm_debug(&clk->base.subdev, "parent clock rate: %d Khz\n",
+ clk->parent_rate / KHZ);
+
+ return 0;
+}
+
+int
+gk20a_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk)
+{
struct gk20a_clk *clk;
- int ret, i;
+ int ret;
- if (!(clk = kzalloc(sizeof(*clk), GFP_KERNEL)))
+ clk = kzalloc(sizeof(*clk), GFP_KERNEL);
+ if (!clk)
return -ENOMEM;
*pclk = &clk->base;
- /* Finish initializing the pstates */
- for (i = 0; i < ARRAY_SIZE(gk20a_pstates); i++) {
- INIT_LIST_HEAD(&gk20a_pstates[i].list);
- gk20a_pstates[i].pstate = i + 1;
- }
+ ret = _gk20a_clk_ctor(device, index, &gk20a_clk, &gk20a_pllg_params,
+ clk);
- clk->params = &gk20a_pllg_params;
- clk->parent_rate = clk_get_rate(tdev->clk);
+ clk->pl_to_div = pl_to_div;
+ clk->div_to_pl = div_to_pl;
- ret = nvkm_clk_ctor(&gk20a_clk, device, index, true, &clk->base);
- nvkm_info(&clk->base.subdev, "parent clock rate: %d Mhz\n",
- clk->parent_rate / MHZ);
return ret;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.h b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.h
new file mode 100644
index 000000000000..13c46740197d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.h
@@ -0,0 +1,65 @@
+/*
+ * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __NVKM_CLK_GK20A_H__
+#define __NVKM_CLK_GK20A_H__
+
+#define GK20A_CLK_GPC_MDIV 1000
+
+#define SYS_GPCPLL_CFG_BASE 0x00137000
+
+/* All frequencies in Khz */
+struct gk20a_clk_pllg_params {
+ u32 min_vco, max_vco;
+ u32 min_u, max_u;
+ u32 min_m, max_m;
+ u32 min_n, max_n;
+ u32 min_pl, max_pl;
+};
+
+struct gk20a_pll {
+ u32 m;
+ u32 n;
+ u32 pl;
+};
+
+struct gk20a_clk {
+ struct nvkm_clk base;
+ const struct gk20a_clk_pllg_params *params;
+ struct gk20a_pll pll;
+ u32 parent_rate;
+
+ u32 (*div_to_pl)(u32);
+ u32 (*pl_to_div)(u32);
+};
+#define gk20a_clk(p) container_of((p), struct gk20a_clk, base)
+
+int _gk20a_clk_ctor(struct nvkm_device *, int, const struct nvkm_clk_func *,
+ const struct gk20a_clk_pllg_params *, struct gk20a_clk *);
+void gk20a_clk_fini(struct nvkm_clk *);
+int gk20a_clk_read(struct nvkm_clk *, enum nv_clk_src);
+int gk20a_clk_calc(struct nvkm_clk *, struct nvkm_cstate *);
+int gk20a_clk_prog(struct nvkm_clk *);
+void gk20a_clk_tidy(struct nvkm_clk *);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gm20b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gm20b.c
new file mode 100644
index 000000000000..71b2bbb61973
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gm20b.c
@@ -0,0 +1,198 @@
+/*
+ * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include <subdev/clk.h>
+#include <core/device.h>
+
+#include "priv.h"
+#include "gk20a.h"
+
+#define KHZ (1000)
+#define MHZ (KHZ * 1000)
+
+#define MASK(w) ((1 << w) - 1)
+
+#define BYPASSCTRL_SYS (SYS_GPCPLL_CFG_BASE + 0x340)
+#define BYPASSCTRL_SYS_GPCPLL_SHIFT 0
+#define BYPASSCTRL_SYS_GPCPLL_WIDTH 1
+
+static u32 pl_to_div(u32 pl)
+{
+ return pl;
+}
+
+static u32 div_to_pl(u32 div)
+{
+ return div;
+}
+
+static const struct gk20a_clk_pllg_params gm20b_pllg_params = {
+ .min_vco = 1300000, .max_vco = 2600000,
+ .min_u = 12000, .max_u = 38400,
+ .min_m = 1, .max_m = 255,
+ .min_n = 8, .max_n = 255,
+ .min_pl = 1, .max_pl = 31,
+};
+
+static struct nvkm_pstate
+gm20b_pstates[] = {
+ {
+ .base = {
+ .domain[nv_clk_src_gpc] = 76800,
+ .voltage = 0,
+ },
+ },
+ {
+ .base = {
+ .domain[nv_clk_src_gpc] = 153600,
+ .voltage = 1,
+ },
+ },
+ {
+ .base = {
+ .domain[nv_clk_src_gpc] = 230400,
+ .voltage = 2,
+ },
+ },
+ {
+ .base = {
+ .domain[nv_clk_src_gpc] = 307200,
+ .voltage = 3,
+ },
+ },
+ {
+ .base = {
+ .domain[nv_clk_src_gpc] = 384000,
+ .voltage = 4,
+ },
+ },
+ {
+ .base = {
+ .domain[nv_clk_src_gpc] = 460800,
+ .voltage = 5,
+ },
+ },
+ {
+ .base = {
+ .domain[nv_clk_src_gpc] = 537600,
+ .voltage = 6,
+ },
+ },
+ {
+ .base = {
+ .domain[nv_clk_src_gpc] = 614400,
+ .voltage = 7,
+ },
+ },
+ {
+ .base = {
+ .domain[nv_clk_src_gpc] = 691200,
+ .voltage = 8,
+ },
+ },
+ {
+ .base = {
+ .domain[nv_clk_src_gpc] = 768000,
+ .voltage = 9,
+ },
+ },
+ {
+ .base = {
+ .domain[nv_clk_src_gpc] = 844800,
+ .voltage = 10,
+ },
+ },
+ {
+ .base = {
+ .domain[nv_clk_src_gpc] = 921600,
+ .voltage = 11,
+ },
+ },
+ {
+ .base = {
+ .domain[nv_clk_src_gpc] = 998400,
+ .voltage = 12,
+ },
+ },
+
+};
+
+static int
+gm20b_clk_init(struct nvkm_clk *base)
+{
+ struct gk20a_clk *clk = gk20a_clk(base);
+ struct nvkm_subdev *subdev = &clk->base.subdev;
+ struct nvkm_device *device = subdev->device;
+ int ret;
+
+ /* Set the global bypass control to VCO */
+ nvkm_mask(device, BYPASSCTRL_SYS,
+ MASK(BYPASSCTRL_SYS_GPCPLL_WIDTH) << BYPASSCTRL_SYS_GPCPLL_SHIFT,
+ 0);
+
+ /* Start with lowest frequency */
+ base->func->calc(base, &base->func->pstates[0].base);
+ ret = base->func->prog(&clk->base);
+ if (ret) {
+ nvkm_error(subdev, "cannot initialize clock\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct nvkm_clk_func
+gm20b_clk_speedo0 = {
+ .init = gm20b_clk_init,
+ .fini = gk20a_clk_fini,
+ .read = gk20a_clk_read,
+ .calc = gk20a_clk_calc,
+ .prog = gk20a_clk_prog,
+ .tidy = gk20a_clk_tidy,
+ .pstates = gm20b_pstates,
+ .nr_pstates = ARRAY_SIZE(gm20b_pstates) - 1,
+ .domains = {
+ { nv_clk_src_crystal, 0xff },
+ { nv_clk_src_gpc, 0xff, 0, "core", GK20A_CLK_GPC_MDIV },
+ { nv_clk_src_max },
+ },
+};
+
+int
+gm20b_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk)
+{
+ struct gk20a_clk *clk;
+ int ret;
+
+ clk = kzalloc(sizeof(*clk), GFP_KERNEL);
+ if (!clk)
+ return -ENOMEM;
+ *pclk = &clk->base;
+
+ ret = _gk20a_clk_ctor(device, index, &gm20b_clk_speedo0,
+ &gm20b_pllg_params, clk);
+
+ clk->pl_to_div = pl_to_div;
+ clk->div_to_pl = div_to_pl;
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/Kbuild
index 793e73d16dac..eac88e3dc6e5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/Kbuild
@@ -11,4 +11,4 @@ nvkm-y += nvkm/subdev/devinit/gt215.o
nvkm-y += nvkm/subdev/devinit/mcp89.o
nvkm-y += nvkm/subdev/devinit/gf100.o
nvkm-y += nvkm/subdev/devinit/gm107.o
-nvkm-y += nvkm/subdev/devinit/gm204.o
+nvkm-y += nvkm/subdev/devinit/gm200.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gf100.c
index 22b0140e28c6..2923598b5fe9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gf100.c
@@ -90,9 +90,21 @@ gf100_devinit_disable(struct nvkm_devinit *init)
return disable;
}
+void
+gf100_devinit_preinit(struct nvkm_devinit *base)
+{
+ struct nv50_devinit *init = nv50_devinit(base);
+ struct nvkm_subdev *subdev = &init->base.subdev;
+ struct nvkm_device *device = subdev->device;
+
+ /* This bit is set by devinit, and flips back to 0 on suspend */
+ if (!base->post)
+ base->post = ((nvkm_rd32(device, 0x2240c) & BIT(1)) == 0);
+}
+
static const struct nvkm_devinit_func
gf100_devinit = {
- .preinit = nv50_devinit_preinit,
+ .preinit = gf100_devinit_preinit,
.init = nv50_devinit_init,
.post = nv04_devinit_post,
.pll_set = gf100_devinit_pll_set,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm107.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm107.c
index 2be98bd78214..28ca01be3d38 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm107.c
@@ -46,7 +46,7 @@ gm107_devinit_disable(struct nvkm_devinit *init)
static const struct nvkm_devinit_func
gm107_devinit = {
- .preinit = nv50_devinit_preinit,
+ .preinit = gf100_devinit_preinit,
.init = nv50_devinit_init,
.post = nv04_devinit_post,
.pll_set = gf100_devinit_pll_set,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm204.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm200.c
index 2b9c3f11b7a8..a410c0db8a08 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm204.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm200.c
@@ -107,7 +107,7 @@ pmu_load(struct nv50_devinit *init, u8 type, bool post,
}
static int
-gm204_devinit_post(struct nvkm_devinit *base, bool post)
+gm200_devinit_post(struct nvkm_devinit *base, bool post)
{
struct nv50_devinit *init = nv50_devinit(base);
struct nvkm_subdev *subdev = &init->base.subdev;
@@ -165,17 +165,17 @@ gm204_devinit_post(struct nvkm_devinit *base, bool post)
}
static const struct nvkm_devinit_func
-gm204_devinit = {
- .preinit = nv50_devinit_preinit,
+gm200_devinit = {
+ .preinit = gf100_devinit_preinit,
.init = nv50_devinit_init,
- .post = gm204_devinit_post,
+ .post = gm200_devinit_post,
.pll_set = gf100_devinit_pll_set,
.disable = gm107_devinit_disable,
};
int
-gm204_devinit_new(struct nvkm_device *device, int index,
+gm200_devinit_new(struct nvkm_device *device, int index,
struct nvkm_devinit **pinit)
{
- return nv50_devinit_new_(&gm204_devinit, device, index, pinit);
+ return nv50_devinit_new_(&gm200_devinit, device, index, pinit);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.c
index 337c2c692dc7..c714b097719c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.c
@@ -93,28 +93,27 @@ nv50_devinit_disable(struct nvkm_devinit *init)
void
nv50_devinit_preinit(struct nvkm_devinit *base)
{
- struct nv50_devinit *init = nv50_devinit(base);
- struct nvkm_subdev *subdev = &init->base.subdev;
+ struct nvkm_subdev *subdev = &base->subdev;
struct nvkm_device *device = subdev->device;
/* our heuristics can't detect whether the board has had its
* devinit scripts executed or not if the display engine is
* missing, assume it's a secondary gpu which requires post
*/
- if (!init->base.post) {
- u64 disable = nvkm_devinit_disable(&init->base);
+ if (!base->post) {
+ u64 disable = nvkm_devinit_disable(base);
if (disable & (1ULL << NVKM_ENGINE_DISP))
- init->base.post = true;
+ base->post = true;
}
/* magic to detect whether or not x86 vbios code has executed
* the devinit scripts to initialise the board
*/
- if (!init->base.post) {
+ if (!base->post) {
if (!nvkm_rdvgac(device, 0, 0x00) &&
!nvkm_rdvgac(device, 0, 0x1a)) {
nvkm_debug(subdev, "adaptor not initialised\n");
- init->base.post = true;
+ base->post = true;
}
}
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.h b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.h
index 5de70a8486b4..25d2ae3af1c6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.h
@@ -20,6 +20,7 @@ int gf100_devinit_ctor(struct nvkm_object *, struct nvkm_object *,
struct nvkm_oclass *, void *, u32,
struct nvkm_object **);
int gf100_devinit_pll_set(struct nvkm_devinit *, u32, u32);
+void gf100_devinit_preinit(struct nvkm_devinit *);
u64 gm107_devinit_disable(struct nvkm_devinit *);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/Kbuild
index 1f730613c237..48f01e40b8fc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/Kbuild
@@ -6,7 +6,7 @@ nvkm-y += nvkm/subdev/i2c/g94.o
nvkm-y += nvkm/subdev/i2c/gf117.o
nvkm-y += nvkm/subdev/i2c/gf119.o
nvkm-y += nvkm/subdev/i2c/gk104.o
-nvkm-y += nvkm/subdev/i2c/gm204.o
+nvkm-y += nvkm/subdev/i2c/gm200.o
nvkm-y += nvkm/subdev/i2c/pad.o
nvkm-y += nvkm/subdev/i2c/padnv04.o
@@ -14,7 +14,7 @@ nvkm-y += nvkm/subdev/i2c/padnv4e.o
nvkm-y += nvkm/subdev/i2c/padnv50.o
nvkm-y += nvkm/subdev/i2c/padg94.o
nvkm-y += nvkm/subdev/i2c/padgf119.o
-nvkm-y += nvkm/subdev/i2c/padgm204.o
+nvkm-y += nvkm/subdev/i2c/padgm200.o
nvkm-y += nvkm/subdev/i2c/bus.o
nvkm-y += nvkm/subdev/i2c/busnv04.o
@@ -25,6 +25,6 @@ nvkm-y += nvkm/subdev/i2c/bit.o
nvkm-y += nvkm/subdev/i2c/aux.o
nvkm-y += nvkm/subdev/i2c/auxg94.o
-nvkm-y += nvkm/subdev/i2c/auxgm204.o
+nvkm-y += nvkm/subdev/i2c/auxgm200.o
nvkm-y += nvkm/subdev/i2c/anx9805.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.h b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.h
index 35a892e4a4c3..fc6b162fa0b1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.h
@@ -18,7 +18,7 @@ int nvkm_i2c_aux_xfer(struct nvkm_i2c_aux *, bool retry, u8 type,
u32 addr, u8 *data, u8 size);
int g94_i2c_aux_new(struct nvkm_i2c_pad *, int, u8, struct nvkm_i2c_aux **);
-int gm204_i2c_aux_new(struct nvkm_i2c_pad *, int, u8, struct nvkm_i2c_aux **);
+int gm200_i2c_aux_new(struct nvkm_i2c_pad *, int, u8, struct nvkm_i2c_aux **);
#define AUX_MSG(b,l,f,a...) do { \
struct nvkm_i2c_aux *_aux = (b); \
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm204.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm200.c
index bed231b56dbd..61d729b82c69 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm204.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm200.c
@@ -21,23 +21,23 @@
*
* Authors: Ben Skeggs <bskeggs@redhat.com>
*/
-#define gm204_i2c_aux(p) container_of((p), struct gm204_i2c_aux, base)
+#define gm200_i2c_aux(p) container_of((p), struct gm200_i2c_aux, base)
#include "aux.h"
-struct gm204_i2c_aux {
+struct gm200_i2c_aux {
struct nvkm_i2c_aux base;
int ch;
};
static void
-gm204_i2c_aux_fini(struct gm204_i2c_aux *aux)
+gm200_i2c_aux_fini(struct gm200_i2c_aux *aux)
{
struct nvkm_device *device = aux->base.pad->i2c->subdev.device;
nvkm_mask(device, 0x00d954 + (aux->ch * 0x50), 0x00310000, 0x00000000);
}
static int
-gm204_i2c_aux_init(struct gm204_i2c_aux *aux)
+gm200_i2c_aux_init(struct gm200_i2c_aux *aux)
{
struct nvkm_device *device = aux->base.pad->i2c->subdev.device;
const u32 unksel = 1; /* nfi which to use, or if it matters.. */
@@ -64,7 +64,7 @@ gm204_i2c_aux_init(struct gm204_i2c_aux *aux)
udelay(1);
if (!timeout--) {
AUX_ERR(&aux->base, "magic wait %08x", ctrl);
- gm204_i2c_aux_fini(aux);
+ gm200_i2c_aux_fini(aux);
return -EBUSY;
}
} while ((ctrl & 0x03000000) != urep);
@@ -73,10 +73,10 @@ gm204_i2c_aux_init(struct gm204_i2c_aux *aux)
}
static int
-gm204_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry,
+gm200_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry,
u8 type, u32 addr, u8 *data, u8 size)
{
- struct gm204_i2c_aux *aux = gm204_i2c_aux(obj);
+ struct gm200_i2c_aux *aux = gm200_i2c_aux(obj);
struct nvkm_device *device = aux->base.pad->i2c->subdev.device;
const u32 base = aux->ch * 0x50;
u32 ctrl, stat, timeout, retries;
@@ -85,7 +85,7 @@ gm204_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry,
AUX_TRACE(&aux->base, "%d: %08x %d", type, addr, size);
- ret = gm204_i2c_aux_init(aux);
+ ret = gm200_i2c_aux_init(aux);
if (ret < 0)
goto out;
@@ -155,26 +155,26 @@ gm204_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry,
}
out:
- gm204_i2c_aux_fini(aux);
+ gm200_i2c_aux_fini(aux);
return ret < 0 ? ret : (stat & 0x000f0000) >> 16;
}
static const struct nvkm_i2c_aux_func
-gm204_i2c_aux_func = {
- .xfer = gm204_i2c_aux_xfer,
+gm200_i2c_aux_func = {
+ .xfer = gm200_i2c_aux_xfer,
};
int
-gm204_i2c_aux_new(struct nvkm_i2c_pad *pad, int index, u8 drive,
+gm200_i2c_aux_new(struct nvkm_i2c_pad *pad, int index, u8 drive,
struct nvkm_i2c_aux **paux)
{
- struct gm204_i2c_aux *aux;
+ struct gm200_i2c_aux *aux;
if (!(aux = kzalloc(sizeof(*aux), GFP_KERNEL)))
return -ENOMEM;
*paux = &aux->base;
- nvkm_i2c_aux_ctor(&gm204_i2c_aux_func, pad, index, &aux->base);
+ nvkm_i2c_aux_ctor(&gm200_i2c_aux_func, pad, index, &aux->base);
aux->ch = drive;
aux->base.intr = 1 << aux->ch;
return 0;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gm204.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gm200.c
index ff9f7d62f6be..a23c5f315221 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gm204.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gm200.c
@@ -25,16 +25,16 @@
#include "pad.h"
static const struct nvkm_i2c_func
-gm204_i2c = {
+gm200_i2c = {
.pad_x_new = gf119_i2c_pad_x_new,
- .pad_s_new = gm204_i2c_pad_s_new,
+ .pad_s_new = gm200_i2c_pad_s_new,
.aux = 8,
.aux_stat = gk104_aux_stat,
.aux_mask = gk104_aux_mask,
};
int
-gm204_i2c_new(struct nvkm_device *device, int index, struct nvkm_i2c **pi2c)
+gm200_i2c_new(struct nvkm_device *device, int index, struct nvkm_i2c **pi2c)
{
- return nvkm_i2c_new_(&gm204_i2c, device, index, pi2c);
+ return nvkm_i2c_new_(&gm200_i2c, device, index, pi2c);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/pad.h b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/pad.h
index 9eeb992944c6..316c4536f29a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/pad.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/pad.h
@@ -49,11 +49,11 @@ int nv4e_i2c_pad_new(struct nvkm_i2c *, int, struct nvkm_i2c_pad **);
int nv50_i2c_pad_new(struct nvkm_i2c *, int, struct nvkm_i2c_pad **);
int g94_i2c_pad_x_new(struct nvkm_i2c *, int, struct nvkm_i2c_pad **);
int gf119_i2c_pad_x_new(struct nvkm_i2c *, int, struct nvkm_i2c_pad **);
-int gm204_i2c_pad_x_new(struct nvkm_i2c *, int, struct nvkm_i2c_pad **);
+int gm200_i2c_pad_x_new(struct nvkm_i2c *, int, struct nvkm_i2c_pad **);
int g94_i2c_pad_s_new(struct nvkm_i2c *, int, struct nvkm_i2c_pad **);
int gf119_i2c_pad_s_new(struct nvkm_i2c *, int, struct nvkm_i2c_pad **);
-int gm204_i2c_pad_s_new(struct nvkm_i2c *, int, struct nvkm_i2c_pad **);
+int gm200_i2c_pad_s_new(struct nvkm_i2c *, int, struct nvkm_i2c_pad **);
int anx9805_pad_new(struct nvkm_i2c_bus *, int, u8, struct nvkm_i2c_pad **);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padgm204.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padgm200.c
index 24a4d760c67b..7d417f6a816e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padgm204.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padgm200.c
@@ -26,7 +26,7 @@
#include "bus.h"
static void
-gm204_i2c_pad_mode(struct nvkm_i2c_pad *pad, enum nvkm_i2c_pad_mode mode)
+gm200_i2c_pad_mode(struct nvkm_i2c_pad *pad, enum nvkm_i2c_pad_mode mode)
{
struct nvkm_subdev *subdev = &pad->i2c->subdev;
struct nvkm_device *device = subdev->device;
@@ -51,26 +51,26 @@ gm204_i2c_pad_mode(struct nvkm_i2c_pad *pad, enum nvkm_i2c_pad_mode mode)
}
static const struct nvkm_i2c_pad_func
-gm204_i2c_pad_s_func = {
+gm200_i2c_pad_s_func = {
.bus_new_4 = gf119_i2c_bus_new,
- .aux_new_6 = gm204_i2c_aux_new,
- .mode = gm204_i2c_pad_mode,
+ .aux_new_6 = gm200_i2c_aux_new,
+ .mode = gm200_i2c_pad_mode,
};
int
-gm204_i2c_pad_s_new(struct nvkm_i2c *i2c, int id, struct nvkm_i2c_pad **ppad)
+gm200_i2c_pad_s_new(struct nvkm_i2c *i2c, int id, struct nvkm_i2c_pad **ppad)
{
- return nvkm_i2c_pad_new_(&gm204_i2c_pad_s_func, i2c, id, ppad);
+ return nvkm_i2c_pad_new_(&gm200_i2c_pad_s_func, i2c, id, ppad);
}
static const struct nvkm_i2c_pad_func
-gm204_i2c_pad_x_func = {
+gm200_i2c_pad_x_func = {
.bus_new_4 = gf119_i2c_bus_new,
- .aux_new_6 = gm204_i2c_aux_new,
+ .aux_new_6 = gm200_i2c_aux_new,
};
int
-gm204_i2c_pad_x_new(struct nvkm_i2c *i2c, int id, struct nvkm_i2c_pad **ppad)
+gm200_i2c_pad_x_new(struct nvkm_i2c *i2c, int id, struct nvkm_i2c_pad **ppad)
{
- return nvkm_i2c_pad_new_(&gm204_i2c_pad_x_func, i2c, id, ppad);
+ return nvkm_i2c_pad_new_(&gm200_i2c_pad_x_func, i2c, id, ppad);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/Kbuild
index 7e77a7466992..ad572d3b5466 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/Kbuild
@@ -2,4 +2,4 @@ nvkm-y += nvkm/subdev/ibus/gf100.o
nvkm-y += nvkm/subdev/ibus/gf117.o
nvkm-y += nvkm/subdev/ibus/gk104.o
nvkm-y += nvkm/subdev/ibus/gk20a.o
-nvkm-y += nvkm/subdev/ibus/gm204.o
+nvkm-y += nvkm/subdev/ibus/gm200.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gm204.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gm200.c
index b3839dc254ee..ef0b7f3b1128 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gm204.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gm200.c
@@ -24,17 +24,17 @@
#include "priv.h"
static const struct nvkm_subdev_func
-gm204_ibus = {
+gm200_ibus = {
.intr = gk104_ibus_intr,
};
int
-gm204_ibus_new(struct nvkm_device *device, int index,
+gm200_ibus_new(struct nvkm_device *device, int index,
struct nvkm_subdev **pibus)
{
struct nvkm_subdev *ibus;
if (!(ibus = *pibus = kzalloc(sizeof(*ibus), GFP_KERNEL)))
return -ENOMEM;
- nvkm_subdev_ctor(&gm204_ibus, device, index, 0, ibus);
+ nvkm_subdev_ctor(&gm200_ibus, device, index, 0, ibus);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/Kbuild
new file mode 100644
index 000000000000..98a4bd3e98ed
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/Kbuild
@@ -0,0 +1,2 @@
+nvkm-y += nvkm/subdev/iccsense/base.o
+nvkm-y += nvkm/subdev/iccsense/gf100.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/base.c
new file mode 100644
index 000000000000..c44a85228074
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/base.c
@@ -0,0 +1,232 @@
+/*
+ * Copyright 2015 Martin Peres
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Martin Peres
+ */
+#include "priv.h"
+
+#include <subdev/bios.h>
+#include <subdev/bios/extdev.h>
+#include <subdev/bios/iccsense.h>
+#include <subdev/i2c.h>
+
+static bool
+nvkm_iccsense_validate_device(struct i2c_adapter *i2c, u8 addr,
+ enum nvbios_extdev_type type, u8 rail)
+{
+ switch (type) {
+ case NVBIOS_EXTDEV_INA209:
+ case NVBIOS_EXTDEV_INA219:
+ return rail == 0 && nv_rd16i2cr(i2c, addr, 0x0) >= 0;
+ case NVBIOS_EXTDEV_INA3221:
+ return rail <= 3 &&
+ nv_rd16i2cr(i2c, addr, 0xff) == 0x3220 &&
+ nv_rd16i2cr(i2c, addr, 0xfe) == 0x5449;
+ default:
+ return false;
+ }
+}
+
+static int
+nvkm_iccsense_poll_lane(struct i2c_adapter *i2c, u8 addr, u8 shunt_reg,
+ u8 shunt_shift, u8 bus_reg, u8 bus_shift, u8 shunt,
+ u16 lsb)
+{
+ int vshunt = nv_rd16i2cr(i2c, addr, shunt_reg);
+ int vbus = nv_rd16i2cr(i2c, addr, bus_reg);
+
+ if (vshunt < 0 || vbus < 0)
+ return -EINVAL;
+
+ vshunt >>= shunt_shift;
+ vbus >>= bus_shift;
+
+ return vbus * vshunt * lsb / shunt;
+}
+
+static int
+nvkm_iccsense_ina2x9_read(struct nvkm_iccsense *iccsense,
+ struct nvkm_iccsense_rail *rail,
+ u8 shunt_reg, u8 bus_reg)
+{
+ return nvkm_iccsense_poll_lane(rail->i2c, rail->addr, shunt_reg, 0,
+ bus_reg, 3, rail->mohm, 10 * 4);
+}
+
+static int
+nvkm_iccsense_ina209_read(struct nvkm_iccsense *iccsense,
+ struct nvkm_iccsense_rail *rail)
+{
+ return nvkm_iccsense_ina2x9_read(iccsense, rail, 3, 4);
+}
+
+static int
+nvkm_iccsense_ina219_read(struct nvkm_iccsense *iccsense,
+ struct nvkm_iccsense_rail *rail)
+{
+ return nvkm_iccsense_ina2x9_read(iccsense, rail, 1, 2);
+}
+
+static int
+nvkm_iccsense_ina3221_read(struct nvkm_iccsense *iccsense,
+ struct nvkm_iccsense_rail *rail)
+{
+ return nvkm_iccsense_poll_lane(rail->i2c, rail->addr,
+ 1 + (rail->rail * 2), 3,
+ 2 + (rail->rail * 2), 3, rail->mohm,
+ 40 * 8);
+}
+
+int
+nvkm_iccsense_read(struct nvkm_iccsense *iccsense, u8 idx)
+{
+ struct nvkm_iccsense_rail *rail;
+
+ if (!iccsense || idx >= iccsense->rail_count)
+ return -EINVAL;
+
+ rail = &iccsense->rails[idx];
+ if (!rail->read)
+ return -ENODEV;
+
+ return rail->read(iccsense, rail);
+}
+
+int
+nvkm_iccsense_read_all(struct nvkm_iccsense *iccsense)
+{
+ int result = 0, i;
+ for (i = 0; i < iccsense->rail_count; ++i) {
+ int res = nvkm_iccsense_read(iccsense, i);
+ if (res >= 0)
+ result += res;
+ else
+ return res;
+ }
+ return result;
+}
+
+static void *
+nvkm_iccsense_dtor(struct nvkm_subdev *subdev)
+{
+ struct nvkm_iccsense *iccsense = nvkm_iccsense(subdev);
+
+ if (iccsense->rails)
+ kfree(iccsense->rails);
+
+ return iccsense;
+}
+
+static int
+nvkm_iccsense_oneinit(struct nvkm_subdev *subdev)
+{
+ struct nvkm_iccsense *iccsense = nvkm_iccsense(subdev);
+ struct nvkm_bios *bios = subdev->device->bios;
+ struct nvkm_i2c *i2c = subdev->device->i2c;
+ struct nvbios_iccsense stbl;
+ int i;
+
+ if (!i2c || !bios || nvbios_iccsense_parse(bios, &stbl)
+ || !stbl.nr_entry)
+ return 0;
+
+ iccsense->rails = kmalloc(sizeof(*iccsense->rails) * stbl.nr_entry,
+ GFP_KERNEL);
+ if (!iccsense->rails)
+ return -ENOMEM;
+
+ iccsense->data_valid = true;
+ for (i = 0; i < stbl.nr_entry; ++i) {
+ struct pwr_rail_t *r = &stbl.rail[i];
+ struct nvbios_extdev_func extdev;
+ struct nvkm_iccsense_rail *rail;
+ struct nvkm_i2c_bus *i2c_bus;
+ u8 addr;
+
+ if (!r->mode || r->resistor_mohm == 0)
+ continue;
+
+ if (nvbios_extdev_parse(bios, r->extdev_id, &extdev))
+ continue;
+
+ if (extdev.type == 0xff)
+ continue;
+
+ if (extdev.bus)
+ i2c_bus = nvkm_i2c_bus_find(i2c, NVKM_I2C_BUS_SEC);
+ else
+ i2c_bus = nvkm_i2c_bus_find(i2c, NVKM_I2C_BUS_PRI);
+ if (!i2c_bus)
+ continue;
+
+ addr = extdev.addr >> 1;
+ if (!nvkm_iccsense_validate_device(&i2c_bus->i2c, addr,
+ extdev.type, r->rail)) {
+ iccsense->data_valid = false;
+ nvkm_warn(subdev, "found unknown or invalid rail entry"
+ " type 0x%x rail %i, power reading might be"
+ " invalid\n", extdev.type, r->rail);
+ continue;
+ }
+
+ rail = &iccsense->rails[iccsense->rail_count];
+ switch (extdev.type) {
+ case NVBIOS_EXTDEV_INA209:
+ rail->read = nvkm_iccsense_ina209_read;
+ break;
+ case NVBIOS_EXTDEV_INA219:
+ rail->read = nvkm_iccsense_ina219_read;
+ break;
+ case NVBIOS_EXTDEV_INA3221:
+ rail->read = nvkm_iccsense_ina3221_read;
+ break;
+ }
+
+ rail->addr = addr;
+ rail->rail = r->rail;
+ rail->mohm = r->resistor_mohm;
+ rail->i2c = &i2c_bus->i2c;
+ ++iccsense->rail_count;
+ }
+ return 0;
+}
+
+struct nvkm_subdev_func iccsense_func = {
+ .oneinit = nvkm_iccsense_oneinit,
+ .dtor = nvkm_iccsense_dtor,
+};
+
+void
+nvkm_iccsense_ctor(struct nvkm_device *device, int index,
+ struct nvkm_iccsense *iccsense)
+{
+ nvkm_subdev_ctor(&iccsense_func, device, index, 0, &iccsense->subdev);
+}
+
+int
+nvkm_iccsense_new_(struct nvkm_device *device, int index,
+ struct nvkm_iccsense **iccsense)
+{
+ if (!(*iccsense = kzalloc(sizeof(**iccsense), GFP_KERNEL)))
+ return -ENOMEM;
+ nvkm_iccsense_ctor(device, index, *iccsense);
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/gf100.c
new file mode 100644
index 000000000000..cccff1c8a409
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/gf100.c
@@ -0,0 +1,31 @@
+/*
+ * Copyright 2015 Karol Herbst
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Karol Herbst
+ */
+#include "priv.h"
+
+int
+gf100_iccsense_new(struct nvkm_device *device, int index,
+ struct nvkm_iccsense **piccsense)
+{
+ return nvkm_iccsense_new_(device, index, piccsense);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/priv.h
new file mode 100644
index 000000000000..ed398b81e86e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/priv.h
@@ -0,0 +1,16 @@
+#ifndef __NVKM_ICCSENSE_PRIV_H__
+#define __NVKM_ICCSENSE_PRIV_H__
+#define nvkm_iccsense(p) container_of((p), struct nvkm_iccsense, subdev)
+#include <subdev/iccsense.h>
+
+struct nvkm_iccsense_rail {
+ int (*read)(struct nvkm_iccsense *, struct nvkm_iccsense_rail *);
+ struct i2c_adapter *i2c;
+ u8 addr;
+ u8 rail;
+ u8 mohm;
+};
+
+void nvkm_iccsense_ctor(struct nvkm_device *, int, struct nvkm_iccsense *);
+int nvkm_iccsense_new_(struct nvkm_device *, int, struct nvkm_iccsense **);
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
index 4c20fec64d96..6b8f2a19b2d9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
@@ -228,6 +228,8 @@ gk20a_instobj_release_dma(struct nvkm_memory *memory)
struct gk20a_instmem *imem = node->imem;
struct nvkm_ltc *ltc = imem->base.subdev.device->ltc;
+ /* in case we got a write-combined mapping */
+ wmb();
nvkm_ltc_invalidate(ltc);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/Kbuild
index f8108df3cb38..932b366598aa 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/Kbuild
@@ -2,4 +2,4 @@ nvkm-y += nvkm/subdev/ltc/base.o
nvkm-y += nvkm/subdev/ltc/gf100.o
nvkm-y += nvkm/subdev/ltc/gk104.o
nvkm-y += nvkm/subdev/ltc/gm107.o
-nvkm-y += nvkm/subdev/ltc/gm204.o
+nvkm-y += nvkm/subdev/ltc/gm200.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gf100.c
index fb0de83da13c..c9eb677967a8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gf100.c
@@ -129,9 +129,7 @@ gf100_ltc_invalidate(struct nvkm_ltc *ltc)
s64 taken;
nvkm_wr32(device, 0x70004, 0x00000001);
- taken = nvkm_wait_msec(device, 2, 0x70004, 0x00000003, 0x00000000);
- if (taken < 0)
- nvkm_warn(&ltc->subdev, "LTC invalidate timeout\n");
+ taken = nvkm_wait_msec(device, 2000, 0x70004, 0x00000003, 0x00000000);
if (taken > 0)
nvkm_debug(&ltc->subdev, "LTC invalidate took %lld ns\n", taken);
@@ -144,9 +142,7 @@ gf100_ltc_flush(struct nvkm_ltc *ltc)
s64 taken;
nvkm_wr32(device, 0x70010, 0x00000001);
- taken = nvkm_wait_msec(device, 2, 0x70010, 0x00000003, 0x00000000);
- if (taken < 0)
- nvkm_warn(&ltc->subdev, "LTC flush timeout\n");
+ taken = nvkm_wait_msec(device, 2000, 0x70010, 0x00000003, 0x00000000);
if (taken > 0)
nvkm_debug(&ltc->subdev, "LTC flush took %lld ns\n", taken);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm107.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm107.c
index 2af1f9e100fc..e292f5679418 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm107.c
@@ -43,10 +43,8 @@ gm107_ltc_cbc_wait(struct nvkm_ltc *ltc)
for (c = 0; c < ltc->ltc_nr; c++) {
for (s = 0; s < ltc->lts_nr; s++) {
const u32 addr = 0x14046c + (c * 0x2000) + (s * 0x200);
- nvkm_msec(device, 2000,
- if (!nvkm_rd32(device, addr))
- break;
- );
+ nvkm_wait_msec(device, 2000, addr,
+ 0x00000004, 0x00000000);
}
}
}
@@ -75,7 +73,7 @@ gm107_ltc_lts_isr(struct nvkm_ltc *ltc, int c, int s)
{
struct nvkm_subdev *subdev = &ltc->subdev;
struct nvkm_device *device = subdev->device;
- u32 base = 0x140000 + (c * 0x2000) + (s * 0x400);
+ u32 base = 0x140000 + (c * 0x2000) + (s * 0x200);
u32 stat = nvkm_rd32(device, base + 0x00c);
if (stat) {
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm204.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm200.c
index 5ad6fb9d022d..2a29bfd5125a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm204.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm200.c
@@ -27,7 +27,7 @@
#include <subdev/timer.h>
static int
-gm204_ltc_oneinit(struct nvkm_ltc *ltc)
+gm200_ltc_oneinit(struct nvkm_ltc *ltc)
{
struct nvkm_device *device = ltc->subdev.device;
@@ -37,15 +37,15 @@ gm204_ltc_oneinit(struct nvkm_ltc *ltc)
return gf100_ltc_oneinit_tag_ram(ltc);
}
static void
-gm204_ltc_init(struct nvkm_ltc *ltc)
+gm200_ltc_init(struct nvkm_ltc *ltc)
{
nvkm_wr32(ltc->subdev.device, 0x17e278, ltc->tag_base);
}
static const struct nvkm_ltc_func
-gm204_ltc = {
- .oneinit = gm204_ltc_oneinit,
- .init = gm204_ltc_init,
+gm200_ltc = {
+ .oneinit = gm200_ltc_oneinit,
+ .init = gm200_ltc_init,
.intr = gm107_ltc_intr, /*XXX: not validated */
.cbc_clear = gm107_ltc_cbc_clear,
.cbc_wait = gm107_ltc_cbc_wait,
@@ -57,7 +57,7 @@ gm204_ltc = {
};
int
-gm204_ltc_new(struct nvkm_device *device, int index, struct nvkm_ltc **pltc)
+gm200_ltc_new(struct nvkm_device *device, int index, struct nvkm_ltc **pltc)
{
- return nvkm_ltc_new_(&gm204_ltc, device, index, pltc);
+ return nvkm_ltc_new_(&gm200_ltc, device, index, pltc);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf100.fuc3.h b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf100.fuc3.h
index 770294457274..e2faccffee6f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf100.fuc3.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf100.fuc3.h
@@ -24,8 +24,8 @@ uint32_t gf100_pmu_data[] = {
0x00000000,
/* 0x0058: proc_list_head */
0x54534f48,
- 0x00000507,
- 0x000004a4,
+ 0x0000050a,
+ 0x000004a7,
0x00000000,
0x00000000,
0x00000000,
@@ -46,8 +46,8 @@ uint32_t gf100_pmu_data[] = {
0x00000000,
0x00000000,
0x584d454d,
- 0x00000753,
- 0x00000745,
+ 0x00000756,
+ 0x00000748,
0x00000000,
0x00000000,
0x00000000,
@@ -68,8 +68,8 @@ uint32_t gf100_pmu_data[] = {
0x00000000,
0x00000000,
0x46524550,
- 0x00000757,
- 0x00000755,
+ 0x0000075a,
+ 0x00000758,
0x00000000,
0x00000000,
0x00000000,
@@ -90,8 +90,8 @@ uint32_t gf100_pmu_data[] = {
0x00000000,
0x00000000,
0x5f433249,
- 0x00000b87,
- 0x00000a2a,
+ 0x00000b8a,
+ 0x00000a2d,
0x00000000,
0x00000000,
0x00000000,
@@ -112,8 +112,8 @@ uint32_t gf100_pmu_data[] = {
0x00000000,
0x00000000,
0x54534554,
- 0x00000bb0,
- 0x00000b89,
+ 0x00000bb3,
+ 0x00000b8c,
0x00000000,
0x00000000,
0x00000000,
@@ -134,8 +134,8 @@ uint32_t gf100_pmu_data[] = {
0x00000000,
0x00000000,
0x454c4449,
- 0x00000bbc,
- 0x00000bba,
+ 0x00000bbf,
+ 0x00000bbd,
0x00000000,
0x00000000,
0x00000000,
@@ -229,26 +229,26 @@ uint32_t gf100_pmu_data[] = {
/* 0x0370: memx_func_head */
0x00000001,
0x00000000,
- 0x00000546,
+ 0x00000549,
/* 0x037c: memx_func_next */
0x00000002,
0x00000000,
- 0x000005d0,
+ 0x000005d3,
0x00000003,
0x00000002,
- 0x0000069a,
+ 0x0000069d,
0x00040004,
0x00000000,
- 0x000006b6,
+ 0x000006b9,
0x00010005,
0x00000000,
- 0x000006d3,
+ 0x000006d6,
0x00010006,
0x00000000,
- 0x00000658,
+ 0x0000065b,
0x00000007,
0x00000000,
- 0x000006de,
+ 0x000006e1,
/* 0x03c4: memx_func_tail */
/* 0x03c4: memx_ts_start */
0x00000000,
@@ -917,887 +917,887 @@ uint32_t gf100_pmu_data[] = {
};
uint32_t gf100_pmu_code[] = {
- 0x03930ef5,
+ 0x03920ef5,
/* 0x0004: rd32 */
0x07a007f1,
0xd00604b6,
0x04bd000e,
- 0xf001d7f0,
- 0x07f101d3,
- 0x04b607ac,
- 0x000dd006,
-/* 0x0022: rd32_wait */
- 0xd7f104bd,
- 0xd4b607ac,
- 0x00ddcf06,
- 0x7000d4f1,
- 0xf1f21bf4,
- 0xb607a4d7,
- 0xddcf06d4,
-/* 0x003f: wr32 */
- 0xf100f800,
- 0xb607a007,
- 0x0ed00604,
- 0xf104bd00,
- 0xb607a407,
+ 0x0001d7f1,
+ 0xf101d3f0,
+ 0xb607ac07,
0x0dd00604,
- 0xf004bd00,
- 0xd5f002d7,
- 0x01d3f0f0,
- 0x07ac07f1,
+/* 0x0023: rd32_wait */
+ 0xf104bd00,
+ 0xb607acd7,
+ 0xddcf06d4,
+ 0x00d4f100,
+ 0xf21bf470,
+ 0x07a4d7f1,
+ 0xcf06d4b6,
+ 0x00f800dd,
+/* 0x0040: wr32 */
+ 0x07a007f1,
+ 0xd00604b6,
+ 0x04bd000e,
+ 0x07a407f1,
0xd00604b6,
0x04bd000d,
-/* 0x006c: wr32_wait */
- 0x07acd7f1,
- 0xcf06d4b6,
- 0xd4f100dd,
- 0x1bf47000,
-/* 0x007f: nsec */
- 0xf900f8f2,
- 0xf080f990,
- 0x84b62c87,
- 0x0088cf06,
-/* 0x008c: nsec_loop */
- 0xb62c97f0,
- 0x99cf0694,
- 0x0298bb00,
- 0xf4069eb8,
- 0x80fcf11e,
- 0x00f890fc,
-/* 0x00a4: wait */
- 0x80f990f9,
- 0xb62c87f0,
- 0x88cf0684,
-/* 0x00b1: wait_loop */
- 0x02eeb900,
- 0xb90421f4,
- 0xadfd02da,
- 0x06acb804,
- 0xf0150bf4,
+ 0x00f2d7f1,
+ 0xf101d3f0,
+ 0xb607ac07,
+ 0x0dd00604,
+/* 0x006b: wr32_wait */
+ 0xf104bd00,
+ 0xb607acd7,
+ 0xddcf06d4,
+ 0x00d4f100,
+ 0xf21bf470,
+/* 0x007e: nsec */
+ 0x90f900f8,
+ 0x87f080f9,
+ 0x0684b62c,
+/* 0x008b: nsec_loop */
+ 0xf00088cf,
0x94b62c97,
0x0099cf06,
0xb80298bb,
- 0x1ef4069b,
-/* 0x00d5: wait_done */
- 0xfc80fcdf,
-/* 0x00db: intr_watchdog */
- 0x9800f890,
- 0x96b003e9,
- 0x2a0bf400,
- 0xbb9a0a98,
- 0x1cf4029a,
- 0x01d7f00f,
- 0x02d221f5,
- 0x0ef494bd,
-/* 0x00f9: intr_watchdog_next_time */
- 0x9b0a9815,
- 0xf400a6b0,
- 0x9ab8090b,
- 0x061cf406,
-/* 0x0108: intr_watchdog_next_time_set */
-/* 0x010b: intr_watchdog_next_proc */
- 0x809b0980,
- 0xe0b603e9,
- 0x68e6b158,
- 0xc61bf402,
-/* 0x011a: intr */
- 0x00f900f8,
- 0x80f904bd,
- 0xa0f990f9,
- 0xc0f9b0f9,
- 0xe0f9d0f9,
- 0xf7f0f0f9,
- 0x0188fe00,
- 0x87f180f9,
- 0x84b605d0,
+ 0x1ef4069e,
+ 0xfc80fcf1,
+/* 0x00a3: wait */
+ 0xf900f890,
+ 0xf080f990,
+ 0x84b62c87,
0x0088cf06,
- 0xf10180b6,
- 0xb605d007,
+/* 0x00b0: wait_loop */
+ 0xf402eeb9,
+ 0xdab90421,
+ 0x04adfd02,
+ 0xf406acb8,
+ 0x97f0150b,
+ 0x0694b62c,
+ 0xbb0099cf,
+ 0x9bb80298,
+ 0xdf1ef406,
+/* 0x00d4: wait_done */
+ 0x90fc80fc,
+/* 0x00da: intr_watchdog */
+ 0xe99800f8,
+ 0x0096b003,
+ 0x982a0bf4,
+ 0x9abb9a0a,
+ 0x0f1cf402,
+ 0xf501d7f0,
+ 0xbd02d121,
+ 0x150ef494,
+/* 0x00f8: intr_watchdog_next_time */
+ 0xb09b0a98,
+ 0x0bf400a6,
+ 0x069ab809,
+/* 0x0107: intr_watchdog_next_time_set */
+ 0x80061cf4,
+/* 0x010a: intr_watchdog_next_proc */
+ 0xe9809b09,
+ 0x58e0b603,
+ 0x0268e6b1,
+ 0xf8c61bf4,
+/* 0x0119: intr */
+ 0xbd00f900,
+ 0xf980f904,
+ 0xf9a0f990,
+ 0xf9c0f9b0,
+ 0xf9e0f9d0,
+ 0x00f7f0f0,
+ 0xf90188fe,
+ 0xd087f180,
+ 0x0684b605,
+ 0xb60088cf,
+ 0x07f10180,
+ 0x04b605d0,
+ 0x0008d006,
+ 0x87f004bd,
+ 0x0684b608,
+ 0xc40088cf,
+ 0x0bf40289,
+ 0x9b008023,
+ 0xf458e7f0,
+ 0x0998da21,
+ 0x0096b09b,
+ 0xf0110bf4,
+ 0x04b63407,
+ 0x0009d006,
+ 0x098004bd,
+/* 0x017d: intr_skip_watchdog */
+ 0x0089e49a,
+ 0x480bf408,
+ 0x068897f1,
+ 0xcf0694b6,
+ 0x9ac40099,
+ 0x2c0bf402,
+ 0x04c0c7f1,
+ 0xcf06c4b6,
+ 0xc0f900cc,
+ 0x4f48e7f1,
+ 0x5453e3f1,
+ 0xf500d7f0,
+ 0xfc033621,
+ 0xc007f1c0,
+ 0x0604b604,
+ 0xbd000cd0,
+/* 0x01bd: intr_subintr_skip_fifo */
+ 0x8807f104,
+ 0x0604b606,
+ 0xbd0009d0,
+/* 0x01c9: intr_skip_subintr */
+ 0xe097f104,
+ 0xfd90bd00,
+ 0x07f00489,
+ 0x0604b604,
+ 0xbd0008d0,
+ 0xfe80fc04,
+ 0xf0fc0088,
+ 0xd0fce0fc,
+ 0xb0fcc0fc,
+ 0x90fca0fc,
+ 0x00fc80fc,
+ 0xf80032f4,
+/* 0x01f9: ticks_from_ns */
+ 0xf9c0f901,
+ 0xcbd7f1b0,
+ 0x00d3f000,
+ 0x040b21f5,
+ 0x03e8ccec,
+ 0xf400b4b0,
+ 0xeeec120b,
+ 0xd7f103e8,
+ 0xd3f000cb,
+ 0x0b21f500,
+/* 0x0221: ticks_from_ns_quit */
+ 0x02ceb904,
+ 0xc0fcb0fc,
+/* 0x022a: ticks_from_us */
+ 0xc0f900f8,
+ 0xd7f1b0f9,
+ 0xd3f000cb,
+ 0x0b21f500,
+ 0x02ceb904,
+ 0xf400b4b0,
+ 0xe4bd050b,
+/* 0x0244: ticks_from_us_quit */
+ 0xc0fcb0fc,
+/* 0x024a: ticks_to_us */
+ 0xd7f100f8,
+ 0xd3f000cb,
+ 0xecedff00,
+/* 0x0256: timer */
+ 0x90f900f8,
+ 0x32f480f9,
+ 0x03f89810,
+ 0xf40086b0,
+ 0x84bd651c,
+ 0xb63807f0,
0x08d00604,
0xf004bd00,
- 0x84b60887,
+ 0x84b63487,
0x0088cf06,
- 0xf40289c4,
- 0x0080230b,
- 0x58e7f09b,
- 0x98db21f4,
- 0x96b09b09,
- 0x110bf400,
+ 0xbb9a0998,
+ 0xe9bb0298,
+ 0x03fe8000,
+ 0xb60887f0,
+ 0x88cf0684,
+ 0x0284f000,
+ 0xf0261bf4,
+ 0x84b63487,
+ 0x0088cf06,
+ 0xf406e0b8,
+ 0xe8b8090b,
+ 0x111cf406,
+/* 0x02ac: timer_reset */
0xb63407f0,
- 0x09d00604,
+ 0x0ed00604,
0x8004bd00,
-/* 0x017e: intr_skip_watchdog */
- 0x89e49a09,
- 0x0bf40800,
- 0x8897f148,
- 0x0694b606,
- 0xc40099cf,
- 0x0bf4029a,
- 0xc0c7f12c,
- 0x06c4b604,
- 0xf900cccf,
- 0x48e7f1c0,
- 0x53e3f14f,
- 0x00d7f054,
- 0x033721f5,
- 0x07f1c0fc,
- 0x04b604c0,
- 0x000cd006,
-/* 0x01be: intr_subintr_skip_fifo */
- 0x07f104bd,
- 0x04b60688,
- 0x0009d006,
-/* 0x01ca: intr_skip_subintr */
- 0x97f104bd,
- 0x90bd00e0,
- 0xf00489fd,
- 0x04b60407,
- 0x0008d006,
- 0x80fc04bd,
- 0xfc0088fe,
- 0xfce0fcf0,
- 0xfcc0fcd0,
- 0xfca0fcb0,
- 0xfc80fc90,
- 0x0032f400,
-/* 0x01fa: ticks_from_ns */
- 0xc0f901f8,
- 0xd7f1b0f9,
- 0xd3f000cb,
- 0x0821f500,
- 0xe8ccec04,
- 0x00b4b003,
- 0xec120bf4,
- 0xf103e8ee,
- 0xf000cbd7,
- 0x21f500d3,
-/* 0x0222: ticks_from_ns_quit */
- 0xceb90408,
- 0xfcb0fc02,
-/* 0x022b: ticks_from_us */
- 0xf900f8c0,
- 0xf1b0f9c0,
- 0xf000cbd7,
- 0x21f500d3,
- 0xceb90408,
- 0x00b4b002,
- 0xbd050bf4,
-/* 0x0245: ticks_from_us_quit */
- 0xfcb0fce4,
-/* 0x024b: ticks_to_us */
- 0xf100f8c0,
- 0xf000cbd7,
- 0xedff00d3,
-/* 0x0257: timer */
- 0xf900f8ec,
- 0xf480f990,
- 0xf8981032,
- 0x0086b003,
- 0xbd651cf4,
- 0x3807f084,
+/* 0x02ba: timer_enable */
+ 0x87f09a0e,
+ 0x3807f001,
0xd00604b6,
0x04bd0008,
- 0xb63487f0,
- 0x88cf0684,
- 0x9a099800,
- 0xbb0298bb,
- 0xfe8000e9,
- 0x0887f003,
- 0xcf0684b6,
- 0x84f00088,
- 0x261bf402,
- 0xb63487f0,
- 0x88cf0684,
- 0x06e0b800,
- 0xb8090bf4,
- 0x1cf406e8,
-/* 0x02ad: timer_reset */
- 0x3407f011,
- 0xd00604b6,
- 0x04bd000e,
-/* 0x02bb: timer_enable */
- 0xf09a0e80,
- 0x07f00187,
- 0x0604b638,
- 0xbd0008d0,
-/* 0x02c9: timer_done */
- 0x1031f404,
+/* 0x02c8: timer_done */
+ 0xfc1031f4,
+ 0xf890fc80,
+/* 0x02d1: send_proc */
+ 0xf980f900,
+ 0x05e89890,
+ 0xf004e998,
+ 0x89b80486,
+ 0x2a0bf406,
+ 0x940398c4,
+ 0x80b60488,
+ 0x008ebb18,
+ 0x8000fa98,
+ 0x8d80008a,
+ 0x028c8001,
+ 0xb6038b80,
+ 0x94f00190,
+ 0x04e98007,
+/* 0x030b: send_done */
+ 0xfc0231f4,
+ 0xf880fc90,
+/* 0x0311: find */
+ 0xf080f900,
+ 0x31f45887,
+/* 0x0319: find_loop */
+ 0x008a9801,
+ 0xf406aeb8,
+ 0x80b6100b,
+ 0x6886b158,
+ 0xf01bf402,
+/* 0x032f: find_done */
+ 0xb90132f4,
+ 0x80fc028e,
+/* 0x0336: send */
+ 0x21f500f8,
+ 0x01f40311,
+/* 0x033f: recv */
+ 0xf900f897,
+ 0x9880f990,
+ 0xe99805e8,
+ 0x0132f404,
+ 0xf40689b8,
+ 0x89c43d0b,
+ 0x0180b603,
+ 0x800784f0,
+ 0xea9805e8,
+ 0xfef0f902,
+ 0xf0f9018f,
+ 0x9402efb9,
+ 0xe9bb0499,
+ 0x18e0b600,
+ 0x9803eb98,
+ 0xed9802ec,
+ 0x00ee9801,
+ 0xf0fca5f9,
+ 0xf400f8fe,
+ 0xf0fc0131,
+/* 0x038c: recv_done */
0x90fc80fc,
-/* 0x02d2: send_proc */
- 0x80f900f8,
- 0xe89890f9,
- 0x04e99805,
- 0xb80486f0,
- 0x0bf40689,
- 0x0398c42a,
- 0xb6048894,
- 0x8ebb1880,
- 0x00fa9800,
- 0x80008a80,
- 0x8c80018d,
- 0x038b8002,
- 0xf00190b6,
- 0xe9800794,
- 0x0231f404,
-/* 0x030c: send_done */
- 0x80fc90fc,
-/* 0x0312: find */
- 0x80f900f8,
- 0xf45887f0,
-/* 0x031a: find_loop */
- 0x8a980131,
- 0x06aeb800,
- 0xb6100bf4,
- 0x86b15880,
- 0x1bf40268,
- 0x0132f4f0,
-/* 0x0330: find_done */
- 0xfc028eb9,
-/* 0x0337: send */
- 0xf500f880,
- 0xf4031221,
- 0x00f89701,
-/* 0x0340: recv */
- 0x80f990f9,
- 0x9805e898,
- 0x32f404e9,
- 0x0689b801,
- 0xc43d0bf4,
- 0x80b60389,
- 0x0784f001,
- 0x9805e880,
- 0xf0f902ea,
- 0xf9018ffe,
- 0x02efb9f0,
- 0xbb049994,
- 0xe0b600e9,
- 0x03eb9818,
- 0x9802ec98,
- 0xee9801ed,
- 0xfca5f900,
- 0x00f8fef0,
- 0xfc0131f4,
-/* 0x038d: recv_done */
- 0xfc80fcf0,
-/* 0x0393: init */
- 0xf100f890,
- 0xb6010817,
- 0x11cf0614,
- 0x0911e700,
- 0x0814b601,
- 0xf10014fe,
- 0xf000e017,
- 0x07f00013,
- 0x0604b61c,
- 0xbd0001d0,
- 0xff17f004,
- 0xb61407f0,
- 0x01d00604,
- 0xf004bd00,
- 0x15f10217,
- 0x07f00800,
- 0x0604b610,
- 0xbd0001d0,
- 0x1a17f104,
- 0x0013f001,
- 0xf40010fe,
- 0x17f01031,
- 0x3807f001,
+/* 0x0392: init */
+ 0x17f100f8,
+ 0x14b60108,
+ 0x0011cf06,
+ 0x010911e7,
+ 0xfe0814b6,
+ 0x17f10014,
+ 0x13f000e0,
+ 0x1c07f000,
+ 0xd00604b6,
+ 0x04bd0001,
+ 0xf0ff17f0,
+ 0x04b61407,
+ 0x0001d006,
+ 0x17f004bd,
+ 0x0015f102,
+ 0x1007f008,
0xd00604b6,
0x04bd0001,
-/* 0x03f7: init_proc */
- 0x9858f7f0,
- 0x16b001f1,
- 0xfa0bf400,
- 0xf0b615f9,
- 0xf20ef458,
-/* 0x0408: mulu32_32_64 */
- 0x20f910f9,
- 0x40f930f9,
- 0x9510e195,
- 0xc4bd10d2,
- 0xedffb4bd,
- 0x301dffc0,
- 0xf10234b9,
- 0xb6ffff34,
- 0x45b61034,
- 0x00c3bb10,
- 0xff01b4bb,
- 0x34b930e2,
- 0xff34f102,
- 0x1034b6ff,
- 0xbb1045b6,
- 0xb4bb00c3,
- 0x3012ff01,
- 0xfc00b3bb,
- 0xfc30fc40,
- 0xf810fc20,
-/* 0x0459: host_send */
- 0xb017f100,
+ 0x011917f1,
+ 0xf10013f0,
+ 0xfeffff14,
+ 0x31f40010,
+ 0x0117f010,
+ 0xb63807f0,
+ 0x01d00604,
+ 0xf004bd00,
+/* 0x03fa: init_proc */
+ 0xf19858f7,
+ 0x0016b001,
+ 0xf9fa0bf4,
+ 0x58f0b615,
+/* 0x040b: mulu32_32_64 */
+ 0xf9f20ef4,
+ 0xf920f910,
+ 0x9540f930,
+ 0xd29510e1,
+ 0xbdc4bd10,
+ 0xc0edffb4,
+ 0xb9301dff,
+ 0x34f10234,
+ 0x34b6ffff,
+ 0x1045b610,
+ 0xbb00c3bb,
+ 0xe2ff01b4,
+ 0x0234b930,
+ 0xffff34f1,
+ 0xb61034b6,
+ 0xc3bb1045,
+ 0x01b4bb00,
+ 0xbb3012ff,
+ 0x40fc00b3,
+ 0x20fc30fc,
+ 0x00f810fc,
+/* 0x045c: host_send */
+ 0x04b017f1,
+ 0xcf0614b6,
+ 0x27f10011,
+ 0x24b604a0,
+ 0x0022cf06,
+ 0xf40612b8,
+ 0x1ec4320b,
+ 0x04ee9407,
+ 0x0270e0b7,
+ 0x9803eb98,
+ 0xed9802ec,
+ 0x00ee9801,
+ 0x033621f5,
+ 0xc40110b6,
+ 0x07f10f1e,
+ 0x04b604b0,
+ 0x000ed006,
+ 0x0ef404bd,
+/* 0x04a5: host_send_done */
+/* 0x04a7: host_recv */
+ 0xf100f8ba,
+ 0xf14e4917,
+ 0xb8525413,
+ 0x0bf406e1,
+/* 0x04b5: host_recv_wait */
+ 0xcc17f1aa,
0x0614b604,
0xf10011cf,
- 0xb604a027,
+ 0xb604c827,
0x22cf0624,
- 0x0612b800,
- 0xc4320bf4,
- 0xee94071e,
- 0x70e0b704,
- 0x03eb9802,
- 0x9802ec98,
- 0xee9801ed,
- 0x3721f500,
- 0x0110b603,
- 0xf10f1ec4,
- 0xb604b007,
- 0x0ed00604,
- 0xf404bd00,
-/* 0x04a2: host_send_done */
- 0x00f8ba0e,
-/* 0x04a4: host_recv */
- 0x4e4917f1,
- 0x525413f1,
- 0xf406e1b8,
-/* 0x04b2: host_recv_wait */
- 0x17f1aa0b,
- 0x14b604cc,
- 0x0011cf06,
- 0x04c827f1,
- 0xcf0624b6,
- 0x16f00022,
- 0x0612b808,
- 0xc4e60bf4,
- 0x34b60723,
- 0xf030b704,
- 0x033b8002,
- 0x80023c80,
- 0x3e80013d,
- 0x0120b600,
- 0xf10f24f0,
- 0xb604c807,
- 0x02d00604,
- 0xf004bd00,
- 0x07f04027,
- 0x0604b600,
- 0xbd0002d0,
-/* 0x0507: host_init */
- 0xf100f804,
- 0xb6008017,
- 0x15f11014,
- 0x07f10270,
- 0x04b604d0,
- 0x0001d006,
- 0x17f104bd,
+ 0x0816f000,
+ 0xf40612b8,
+ 0x23c4e60b,
+ 0x0434b607,
+ 0x02f030b7,
+ 0x80033b80,
+ 0x3d80023c,
+ 0x003e8001,
+ 0xf00120b6,
+ 0x07f10f24,
+ 0x04b604c8,
+ 0x0002d006,
+ 0x27f004bd,
+ 0x0007f040,
+ 0xd00604b6,
+ 0x04bd0002,
+/* 0x050a: host_init */
+ 0x17f100f8,
0x14b60080,
- 0xf015f110,
- 0xdc07f102,
+ 0x7015f110,
+ 0xd007f102,
0x0604b604,
0xbd0001d0,
- 0x0117f004,
- 0x04c407f1,
+ 0x8017f104,
+ 0x1014b600,
+ 0x02f015f1,
+ 0x04dc07f1,
0xd00604b6,
0x04bd0001,
-/* 0x0546: memx_func_enter */
- 0x67f100f8,
- 0x77f11620,
- 0x73f1f55d,
- 0x6eb9ffff,
- 0x0421f402,
- 0xfd02d8b9,
- 0x60f90487,
- 0xd0fc80f9,
- 0x21f4e0fc,
- 0xfe77f13f,
- 0xff73f1ff,
+ 0xf10117f0,
+ 0xb604c407,
+ 0x01d00604,
+ 0xf804bd00,
+/* 0x0549: memx_func_enter */
+ 0x2067f100,
+ 0x5d77f116,
+ 0xff73f1f5,
0x026eb9ff,
0xb90421f4,
0x87fd02d8,
0xf960f904,
0xfcd0fc80,
- 0x3f21f4e0,
- 0x26f067f1,
+ 0x4021f4e0,
+ 0xfffe77f1,
+ 0xffff73f1,
0xf4026eb9,
0xd8b90421,
0x0487fd02,
0x80f960f9,
0xe0fcd0fc,
- 0xf03f21f4,
+ 0xf14021f4,
+ 0xb926f067,
+ 0x21f4026e,
+ 0x02d8b904,
+ 0xf90487fd,
+ 0xfc80f960,
+ 0xf4e0fcd0,
+ 0x67f04021,
+ 0xe007f104,
+ 0x0604b607,
+ 0xbd0006d0,
+/* 0x05b5: memx_func_enter_wait */
+ 0xc067f104,
+ 0x0664b607,
+ 0xf00066cf,
+ 0x0bf40464,
+ 0x2c67f0f3,
+ 0xcf0664b6,
+ 0x06800066,
+/* 0x05d3: memx_func_leave */
+ 0xf000f8f1,
+ 0x64b62c67,
+ 0x0066cf06,
+ 0xf0f20680,
0x07f10467,
- 0x04b607e0,
+ 0x04b607e4,
0x0006d006,
-/* 0x05b2: memx_func_enter_wait */
+/* 0x05ee: memx_func_leave_wait */
0x67f104bd,
0x64b607c0,
0x0066cf06,
0xf40464f0,
- 0x67f0f30b,
- 0x0664b62c,
- 0x800066cf,
- 0x00f8f106,
-/* 0x05d0: memx_func_leave */
- 0xb62c67f0,
- 0x66cf0664,
- 0xf2068000,
- 0xf10467f0,
- 0xb607e407,
- 0x06d00604,
-/* 0x05eb: memx_func_leave_wait */
- 0xf104bd00,
- 0xb607c067,
- 0x66cf0664,
- 0x0464f000,
- 0xf1f31bf4,
- 0xf126f067,
- 0xf0000177,
+ 0x67f1f31b,
+ 0x77f126f0,
+ 0x73f00001,
+ 0x026eb900,
+ 0xb90421f4,
+ 0x87fd02d8,
+ 0xf960f905,
+ 0xfcd0fc80,
+ 0x4021f4e0,
+ 0x162067f1,
+ 0xf4026eb9,
+ 0xd8b90421,
+ 0x0587fd02,
+ 0x80f960f9,
+ 0xe0fcd0fc,
+ 0xf14021f4,
+ 0xf00aa277,
0x6eb90073,
0x0421f402,
0xfd02d8b9,
0x60f90587,
0xd0fc80f9,
0x21f4e0fc,
- 0x2067f13f,
- 0x026eb916,
- 0xb90421f4,
- 0x87fd02d8,
- 0xf960f905,
- 0xfcd0fc80,
- 0x3f21f4e0,
- 0x0aa277f1,
- 0xb90073f0,
- 0x21f4026e,
- 0x02d8b904,
- 0xf90587fd,
- 0xfc80f960,
- 0xf4e0fcd0,
- 0x00f83f21,
-/* 0x0658: memx_func_wait_vblank */
- 0xb0001698,
- 0x0bf40066,
- 0x0166b013,
- 0xf4060bf4,
-/* 0x066a: memx_func_wait_vblank_head1 */
- 0x77f12e0e,
- 0x0ef40020,
-/* 0x0671: memx_func_wait_vblank_head0 */
- 0x0877f107,
-/* 0x0675: memx_func_wait_vblank_0 */
- 0xc467f100,
- 0x0664b607,
- 0xfd0066cf,
- 0x1bf40467,
-/* 0x0685: memx_func_wait_vblank_1 */
- 0xc467f1f3,
- 0x0664b607,
- 0xfd0066cf,
- 0x0bf40467,
-/* 0x0695: memx_func_wait_vblank_fini */
- 0x0410b6f3,
-/* 0x069a: memx_func_wr32 */
- 0x169800f8,
- 0x01159800,
- 0xf90810b6,
- 0xfc50f960,
- 0xf4e0fcd0,
- 0x42b63f21,
- 0xe91bf402,
-/* 0x06b6: memx_func_wait */
- 0x87f000f8,
- 0x0684b62c,
- 0x980088cf,
- 0x1d98001e,
- 0x021c9801,
- 0xb6031b98,
- 0x21f41010,
-/* 0x06d3: memx_func_delay */
- 0x9800f8a4,
- 0x10b6001e,
- 0x7f21f404,
-/* 0x06de: memx_func_train */
- 0x00f800f8,
-/* 0x06e0: memx_exec */
- 0xd0f9e0f9,
- 0xb902c1b9,
-/* 0x06ea: memx_exec_next */
- 0x139802b2,
+/* 0x065b: memx_func_wait_vblank */
+ 0x9800f840,
+ 0x66b00016,
+ 0x130bf400,
+ 0xf40166b0,
+ 0x0ef4060b,
+/* 0x066d: memx_func_wait_vblank_head1 */
+ 0x2077f12e,
+ 0x070ef400,
+/* 0x0674: memx_func_wait_vblank_head0 */
+ 0x000877f1,
+/* 0x0678: memx_func_wait_vblank_0 */
+ 0x07c467f1,
+ 0xcf0664b6,
+ 0x67fd0066,
+ 0xf31bf404,
+/* 0x0688: memx_func_wait_vblank_1 */
+ 0x07c467f1,
+ 0xcf0664b6,
+ 0x67fd0066,
+ 0xf30bf404,
+/* 0x0698: memx_func_wait_vblank_fini */
+ 0xf80410b6,
+/* 0x069d: memx_func_wr32 */
+ 0x00169800,
+ 0xb6011598,
+ 0x60f90810,
+ 0xd0fc50f9,
+ 0x21f4e0fc,
+ 0x0242b640,
+ 0xf8e91bf4,
+/* 0x06b9: memx_func_wait */
+ 0x2c87f000,
+ 0xcf0684b6,
+ 0x1e980088,
+ 0x011d9800,
+ 0x98021c98,
+ 0x10b6031b,
+ 0xa321f410,
+/* 0x06d6: memx_func_delay */
+ 0x1e9800f8,
0x0410b600,
- 0x01f034e7,
- 0x01e033e7,
- 0xf00132b6,
- 0x35980c30,
- 0xb855f9de,
- 0x1ef40612,
- 0xf10b98e4,
- 0xbbf20c98,
- 0xb7f102cb,
- 0xb4b607c4,
- 0x00bbcf06,
- 0xe0fcd0fc,
- 0x033721f5,
-/* 0x0726: memx_info */
- 0xc67000f8,
- 0x0e0bf401,
-/* 0x072c: memx_info_data */
- 0x03ccc7f1,
- 0x0800b7f1,
-/* 0x0737: memx_info_train */
- 0xf10b0ef4,
- 0xf10bccc7,
-/* 0x073f: memx_info_send */
- 0xf50100b7,
- 0xf8033721,
-/* 0x0745: memx_recv */
- 0x01d6b000,
- 0xb0980bf4,
- 0x0bf400d6,
-/* 0x0753: memx_init */
- 0xf800f8d8,
-/* 0x0755: perf_recv */
-/* 0x0757: perf_init */
- 0xf800f800,
-/* 0x0759: i2c_drive_scl */
- 0x0036b000,
- 0xf1110bf4,
- 0xb607e007,
- 0x01d00604,
- 0xf804bd00,
-/* 0x076d: i2c_drive_scl_lo */
- 0xe407f100,
- 0x0604b607,
- 0xbd0001d0,
-/* 0x077b: i2c_drive_sda */
- 0xb000f804,
- 0x0bf40036,
- 0xe007f111,
- 0x0604b607,
- 0xbd0002d0,
-/* 0x078f: i2c_drive_sda_lo */
- 0xf100f804,
- 0xb607e407,
- 0x02d00604,
- 0xf804bd00,
-/* 0x079d: i2c_sense_scl */
- 0x0132f400,
- 0x07c437f1,
- 0xcf0634b6,
- 0x31fd0033,
- 0x060bf404,
-/* 0x07b3: i2c_sense_scl_done */
- 0xf80131f4,
-/* 0x07b5: i2c_sense_sda */
- 0x0132f400,
- 0x07c437f1,
- 0xcf0634b6,
- 0x32fd0033,
- 0x060bf404,
-/* 0x07cb: i2c_sense_sda_done */
- 0xf80131f4,
-/* 0x07cd: i2c_raise_scl */
- 0xf140f900,
- 0xf0089847,
- 0x21f50137,
-/* 0x07da: i2c_raise_scl_wait */
- 0xe7f10759,
- 0x21f403e8,
- 0x9d21f57f,
- 0x0901f407,
- 0xf40142b6,
-/* 0x07ee: i2c_raise_scl_done */
- 0x40fcef1b,
-/* 0x07f2: i2c_start */
- 0x21f500f8,
- 0x11f4079d,
- 0xb521f50d,
- 0x0611f407,
-/* 0x0803: i2c_start_rep */
- 0xf0300ef4,
- 0x21f50037,
- 0x37f00759,
- 0x7b21f501,
- 0x0076bb07,
- 0xf90465b6,
- 0x04659450,
- 0xbd0256bb,
- 0x0475fd50,
- 0x21f550fc,
- 0x64b607cd,
- 0x1f11f404,
-/* 0x0830: i2c_start_send */
- 0xf50037f0,
- 0xf1077b21,
- 0xf41388e7,
- 0x37f07f21,
- 0x5921f500,
- 0x88e7f107,
- 0x7f21f413,
-/* 0x084c: i2c_start_out */
-/* 0x084e: i2c_stop */
- 0x37f000f8,
- 0x5921f500,
- 0x0037f007,
- 0x077b21f5,
- 0x03e8e7f1,
- 0xf07f21f4,
- 0x21f50137,
- 0xe7f10759,
- 0x21f41388,
- 0x0137f07f,
- 0x077b21f5,
- 0x1388e7f1,
- 0xf87f21f4,
-/* 0x0881: i2c_bitw */
- 0x7b21f500,
- 0xe8e7f107,
- 0x7f21f403,
- 0xb60076bb,
- 0x50f90465,
- 0xbb046594,
- 0x50bd0256,
- 0xfc0475fd,
- 0xcd21f550,
- 0x0464b607,
- 0xf11811f4,
- 0xf41388e7,
- 0x37f07f21,
- 0x5921f500,
- 0x88e7f107,
- 0x7f21f413,
-/* 0x08c0: i2c_bitw_out */
-/* 0x08c2: i2c_bitr */
- 0x37f000f8,
- 0x7b21f501,
+ 0xf87e21f4,
+/* 0x06e1: memx_func_train */
+/* 0x06e3: memx_exec */
+ 0xf900f800,
+ 0xb9d0f9e0,
+ 0xb2b902c1,
+/* 0x06ed: memx_exec_next */
+ 0x00139802,
+ 0xe70410b6,
+ 0xe701f034,
+ 0xb601e033,
+ 0x30f00132,
+ 0xde35980c,
+ 0x12b855f9,
+ 0xe41ef406,
+ 0x98f10b98,
+ 0xcbbbf20c,
+ 0xc4b7f102,
+ 0x06b4b607,
+ 0xfc00bbcf,
+ 0xf5e0fcd0,
+ 0xf8033621,
+/* 0x0729: memx_info */
+ 0x01c67000,
+/* 0x072f: memx_info_data */
+ 0xf10e0bf4,
+ 0xf103ccc7,
+ 0xf40800b7,
+/* 0x073a: memx_info_train */
+ 0xc7f10b0e,
+ 0xb7f10bcc,
+/* 0x0742: memx_info_send */
+ 0x21f50100,
+ 0x00f80336,
+/* 0x0748: memx_recv */
+ 0xf401d6b0,
+ 0xd6b0980b,
+ 0xd80bf400,
+/* 0x0756: memx_init */
+ 0x00f800f8,
+/* 0x0758: perf_recv */
+/* 0x075a: perf_init */
+ 0x00f800f8,
+/* 0x075c: i2c_drive_scl */
+ 0xf40036b0,
+ 0x07f1110b,
+ 0x04b607e0,
+ 0x0001d006,
+ 0x00f804bd,
+/* 0x0770: i2c_drive_scl_lo */
+ 0x07e407f1,
+ 0xd00604b6,
+ 0x04bd0001,
+/* 0x077e: i2c_drive_sda */
+ 0x36b000f8,
+ 0x110bf400,
+ 0x07e007f1,
+ 0xd00604b6,
+ 0x04bd0002,
+/* 0x0792: i2c_drive_sda_lo */
+ 0x07f100f8,
+ 0x04b607e4,
+ 0x0002d006,
+ 0x00f804bd,
+/* 0x07a0: i2c_sense_scl */
+ 0xf10132f4,
+ 0xb607c437,
+ 0x33cf0634,
+ 0x0431fd00,
+ 0xf4060bf4,
+/* 0x07b6: i2c_sense_scl_done */
+ 0x00f80131,
+/* 0x07b8: i2c_sense_sda */
+ 0xf10132f4,
+ 0xb607c437,
+ 0x33cf0634,
+ 0x0432fd00,
+ 0xf4060bf4,
+/* 0x07ce: i2c_sense_sda_done */
+ 0x00f80131,
+/* 0x07d0: i2c_raise_scl */
+ 0x47f140f9,
+ 0x37f00898,
+ 0x5c21f501,
+/* 0x07dd: i2c_raise_scl_wait */
0xe8e7f107,
- 0x7f21f403,
+ 0x7e21f403,
+ 0x07a021f5,
+ 0xb60901f4,
+ 0x1bf40142,
+/* 0x07f1: i2c_raise_scl_done */
+ 0xf840fcef,
+/* 0x07f5: i2c_start */
+ 0xa021f500,
+ 0x0d11f407,
+ 0x07b821f5,
+ 0xf40611f4,
+/* 0x0806: i2c_start_rep */
+ 0x37f0300e,
+ 0x5c21f500,
+ 0x0137f007,
+ 0x077e21f5,
0xb60076bb,
0x50f90465,
0xbb046594,
0x50bd0256,
0xfc0475fd,
- 0xcd21f550,
+ 0xd021f550,
0x0464b607,
- 0xf51b11f4,
- 0xf007b521,
+/* 0x0833: i2c_start_send */
+ 0xf01f11f4,
0x21f50037,
- 0xe7f10759,
+ 0xe7f1077e,
0x21f41388,
- 0x013cf07f,
-/* 0x0907: i2c_bitr_done */
- 0xf80131f4,
-/* 0x0909: i2c_get_byte */
- 0x0057f000,
-/* 0x090f: i2c_get_byte_next */
- 0xb60847f0,
- 0x76bb0154,
- 0x0465b600,
- 0x659450f9,
- 0x0256bb04,
- 0x75fd50bd,
- 0xf550fc04,
- 0xb608c221,
- 0x11f40464,
- 0x0553fd2b,
- 0xf40142b6,
- 0x37f0d81b,
+ 0x0037f07e,
+ 0x075c21f5,
+ 0x1388e7f1,
+/* 0x084f: i2c_start_out */
+ 0xf87e21f4,
+/* 0x0851: i2c_stop */
+ 0x0037f000,
+ 0x075c21f5,
+ 0xf50037f0,
+ 0xf1077e21,
+ 0xf403e8e7,
+ 0x37f07e21,
+ 0x5c21f501,
+ 0x88e7f107,
+ 0x7e21f413,
+ 0xf50137f0,
+ 0xf1077e21,
+ 0xf41388e7,
+ 0x00f87e21,
+/* 0x0884: i2c_bitw */
+ 0x077e21f5,
+ 0x03e8e7f1,
+ 0xbb7e21f4,
+ 0x65b60076,
+ 0x9450f904,
+ 0x56bb0465,
+ 0xfd50bd02,
+ 0x50fc0475,
+ 0x07d021f5,
+ 0xf40464b6,
+ 0xe7f11811,
+ 0x21f41388,
+ 0x0037f07e,
+ 0x075c21f5,
+ 0x1388e7f1,
+/* 0x08c3: i2c_bitw_out */
+ 0xf87e21f4,
+/* 0x08c5: i2c_bitr */
+ 0x0137f000,
+ 0x077e21f5,
+ 0x03e8e7f1,
+ 0xbb7e21f4,
+ 0x65b60076,
+ 0x9450f904,
+ 0x56bb0465,
+ 0xfd50bd02,
+ 0x50fc0475,
+ 0x07d021f5,
+ 0xf40464b6,
+ 0x21f51b11,
+ 0x37f007b8,
+ 0x5c21f500,
+ 0x88e7f107,
+ 0x7e21f413,
+ 0xf4013cf0,
+/* 0x090a: i2c_bitr_done */
+ 0x00f80131,
+/* 0x090c: i2c_get_byte */
+ 0xf00057f0,
+/* 0x0912: i2c_get_byte_next */
+ 0x54b60847,
0x0076bb01,
0xf90465b6,
0x04659450,
0xbd0256bb,
0x0475fd50,
0x21f550fc,
- 0x64b60881,
-/* 0x0959: i2c_get_byte_done */
-/* 0x095b: i2c_put_byte */
- 0xf000f804,
-/* 0x095e: i2c_put_byte_next */
- 0x42b60847,
- 0x3854ff01,
+ 0x64b608c5,
+ 0x2b11f404,
+ 0xb60553fd,
+ 0x1bf40142,
+ 0x0137f0d8,
+ 0xb60076bb,
+ 0x50f90465,
+ 0xbb046594,
+ 0x50bd0256,
+ 0xfc0475fd,
+ 0x8421f550,
+ 0x0464b608,
+/* 0x095c: i2c_get_byte_done */
+/* 0x095e: i2c_put_byte */
+ 0x47f000f8,
+/* 0x0961: i2c_put_byte_next */
+ 0x0142b608,
+ 0xbb3854ff,
+ 0x65b60076,
+ 0x9450f904,
+ 0x56bb0465,
+ 0xfd50bd02,
+ 0x50fc0475,
+ 0x088421f5,
+ 0xf40464b6,
+ 0x46b03411,
+ 0xd81bf400,
0xb60076bb,
0x50f90465,
0xbb046594,
0x50bd0256,
0xfc0475fd,
- 0x8121f550,
+ 0xc521f550,
0x0464b608,
- 0xb03411f4,
- 0x1bf40046,
- 0x0076bbd8,
+ 0xbb0f11f4,
+ 0x36b00076,
+ 0x061bf401,
+/* 0x09b7: i2c_put_byte_done */
+ 0xf80132f4,
+/* 0x09b9: i2c_addr */
+ 0x0076bb00,
0xf90465b6,
0x04659450,
0xbd0256bb,
0x0475fd50,
0x21f550fc,
- 0x64b608c2,
- 0x0f11f404,
- 0xb00076bb,
- 0x1bf40136,
- 0x0132f406,
-/* 0x09b4: i2c_put_byte_done */
-/* 0x09b6: i2c_addr */
- 0x76bb00f8,
+ 0x64b607f5,
+ 0x2911f404,
+ 0x012ec3e7,
+ 0xfd0134b6,
+ 0x76bb0553,
0x0465b600,
0x659450f9,
0x0256bb04,
0x75fd50bd,
0xf550fc04,
- 0xb607f221,
- 0x11f40464,
- 0x2ec3e729,
- 0x0134b601,
- 0xbb0553fd,
+ 0xb6095e21,
+/* 0x09fe: i2c_addr_done */
+ 0x00f80464,
+/* 0x0a00: i2c_acquire_addr */
+ 0xb6f8cec7,
+ 0xe0b702e4,
+ 0xee980d1c,
+/* 0x0a0f: i2c_acquire */
+ 0xf500f800,
+ 0xf40a0021,
+ 0xd9f00421,
+ 0x4021f403,
+/* 0x0a1e: i2c_release */
+ 0x21f500f8,
+ 0x21f40a00,
+ 0x03daf004,
+ 0xf84021f4,
+/* 0x0a2d: i2c_recv */
+ 0x0132f400,
+ 0xb6f8c1c7,
+ 0x16b00214,
+ 0x3a1ff528,
+ 0xf413a001,
+ 0x0032980c,
+ 0x0ccc13a0,
+ 0xf4003198,
+ 0xd0f90231,
+ 0xd0f9e0f9,
+ 0x000067f1,
+ 0x100063f1,
+ 0xbb016792,
0x65b60076,
0x9450f904,
0x56bb0465,
0xfd50bd02,
0x50fc0475,
- 0x095b21f5,
-/* 0x09fb: i2c_addr_done */
- 0xf80464b6,
-/* 0x09fd: i2c_acquire_addr */
- 0xf8cec700,
- 0xb702e4b6,
- 0x980d1ce0,
- 0x00f800ee,
-/* 0x0a0c: i2c_acquire */
- 0x09fd21f5,
- 0xf00421f4,
- 0x21f403d9,
-/* 0x0a1b: i2c_release */
- 0xf500f83f,
- 0xf409fd21,
- 0xdaf00421,
- 0x3f21f403,
-/* 0x0a2a: i2c_recv */
- 0x32f400f8,
- 0xf8c1c701,
- 0xb00214b6,
- 0x1ff52816,
- 0x13a0013a,
- 0x32980cf4,
- 0xcc13a000,
- 0x0031980c,
- 0xf90231f4,
- 0xf9e0f9d0,
- 0x0067f1d0,
- 0x0063f100,
- 0x01679210,
- 0xb60076bb,
- 0x50f90465,
- 0xbb046594,
- 0x50bd0256,
- 0xfc0475fd,
- 0x0c21f550,
- 0x0464b60a,
- 0xd6b0d0fc,
- 0xb31bf500,
- 0x0057f000,
- 0xb60076bb,
- 0x50f90465,
- 0xbb046594,
- 0x50bd0256,
- 0xfc0475fd,
- 0xb621f550,
- 0x0464b609,
- 0x00d011f5,
- 0xbbe0c5c7,
+ 0x0a0f21f5,
+ 0xfc0464b6,
+ 0x00d6b0d0,
+ 0x00b31bf5,
+ 0xbb0057f0,
0x65b60076,
0x9450f904,
0x56bb0465,
0xfd50bd02,
0x50fc0475,
- 0x095b21f5,
+ 0x09b921f5,
0xf50464b6,
- 0xf000ad11,
- 0x76bb0157,
+ 0xc700d011,
+ 0x76bbe0c5,
0x0465b600,
0x659450f9,
0x0256bb04,
0x75fd50bd,
0xf550fc04,
- 0xb609b621,
+ 0xb6095e21,
0x11f50464,
- 0x76bb008a,
- 0x0465b600,
- 0x659450f9,
- 0x0256bb04,
- 0x75fd50bd,
- 0xf550fc04,
- 0xb6090921,
- 0x11f40464,
- 0xe05bcb6a,
- 0xb60076bb,
- 0x50f90465,
- 0xbb046594,
- 0x50bd0256,
- 0xfc0475fd,
- 0x4e21f550,
- 0x0464b608,
- 0xbd025bb9,
- 0x430ef474,
-/* 0x0b30: i2c_recv_not_rd08 */
- 0xf401d6b0,
- 0x57f03d1b,
- 0xb621f500,
- 0x3311f409,
- 0xf5e0c5c7,
- 0xf4095b21,
- 0x57f02911,
- 0xb621f500,
- 0x1f11f409,
- 0xf5e0b5c7,
- 0xf4095b21,
- 0x21f51511,
- 0x74bd084e,
- 0xf408c5c7,
- 0x32f4091b,
- 0x030ef402,
-/* 0x0b70: i2c_recv_not_wr08 */
-/* 0x0b70: i2c_recv_done */
- 0xf5f8cec7,
- 0xfc0a1b21,
- 0xf4d0fce0,
- 0x7cb90a12,
- 0x3721f502,
-/* 0x0b85: i2c_recv_exit */
-/* 0x0b87: i2c_init */
- 0xf800f803,
-/* 0x0b89: test_recv */
- 0xd817f100,
- 0x0614b605,
- 0xb60011cf,
- 0x07f10110,
- 0x04b605d8,
- 0x0001d006,
- 0xe7f104bd,
- 0xe3f1d900,
- 0x21f5134f,
- 0x00f80257,
-/* 0x0bb0: test_init */
- 0x0800e7f1,
- 0x025721f5,
-/* 0x0bba: idle_recv */
+ 0x57f000ad,
+ 0x0076bb01,
+ 0xf90465b6,
+ 0x04659450,
+ 0xbd0256bb,
+ 0x0475fd50,
+ 0x21f550fc,
+ 0x64b609b9,
+ 0x8a11f504,
+ 0x0076bb00,
+ 0xf90465b6,
+ 0x04659450,
+ 0xbd0256bb,
+ 0x0475fd50,
+ 0x21f550fc,
+ 0x64b6090c,
+ 0x6a11f404,
+ 0xbbe05bcb,
+ 0x65b60076,
+ 0x9450f904,
+ 0x56bb0465,
+ 0xfd50bd02,
+ 0x50fc0475,
+ 0x085121f5,
+ 0xb90464b6,
+ 0x74bd025b,
+/* 0x0b33: i2c_recv_not_rd08 */
+ 0xb0430ef4,
+ 0x1bf401d6,
+ 0x0057f03d,
+ 0x09b921f5,
+ 0xc73311f4,
+ 0x21f5e0c5,
+ 0x11f4095e,
+ 0x0057f029,
+ 0x09b921f5,
+ 0xc71f11f4,
+ 0x21f5e0b5,
+ 0x11f4095e,
+ 0x5121f515,
+ 0xc774bd08,
+ 0x1bf408c5,
+ 0x0232f409,
+/* 0x0b73: i2c_recv_not_wr08 */
+/* 0x0b73: i2c_recv_done */
+ 0xc7030ef4,
+ 0x21f5f8ce,
+ 0xe0fc0a1e,
+ 0x12f4d0fc,
+ 0x027cb90a,
+ 0x033621f5,
+/* 0x0b88: i2c_recv_exit */
+/* 0x0b8a: i2c_init */
0x00f800f8,
-/* 0x0bbc: idle */
- 0xf10031f4,
- 0xb605d417,
- 0x11cf0614,
- 0x0110b600,
- 0x05d407f1,
- 0xd00604b6,
- 0x04bd0001,
-/* 0x0bd8: idle_loop */
- 0xf45817f0,
-/* 0x0bde: idle_proc */
-/* 0x0bde: idle_proc_exec */
- 0x10f90232,
- 0xf5021eb9,
- 0xfc034021,
- 0x0911f410,
- 0xf40231f4,
-/* 0x0bf2: idle_proc_next */
- 0x10b6ef0e,
- 0x061fb858,
- 0xf4e61bf4,
- 0x28f4dd02,
- 0xbb0ef400,
- 0x00000000,
+/* 0x0b8c: test_recv */
+ 0x05d817f1,
+ 0xcf0614b6,
+ 0x10b60011,
+ 0xd807f101,
+ 0x0604b605,
+ 0xbd0001d0,
+ 0x00e7f104,
+ 0x4fe3f1d9,
+ 0x5621f513,
+/* 0x0bb3: test_init */
+ 0xf100f802,
+ 0xf50800e7,
+ 0xf8025621,
+/* 0x0bbd: idle_recv */
+/* 0x0bbf: idle */
+ 0xf400f800,
+ 0x17f10031,
+ 0x14b605d4,
+ 0x0011cf06,
+ 0xf10110b6,
+ 0xb605d407,
+ 0x01d00604,
+/* 0x0bdb: idle_loop */
+ 0xf004bd00,
+ 0x32f45817,
+/* 0x0be1: idle_proc */
+/* 0x0be1: idle_proc_exec */
+ 0xb910f902,
+ 0x21f5021e,
+ 0x10fc033f,
+ 0xf40911f4,
+ 0x0ef40231,
+/* 0x0bf5: idle_proc_next */
+ 0x5810b6ef,
+ 0xf4061fb8,
+ 0x02f4e61b,
+ 0x0028f4dd,
+ 0x00bb0ef4,
0x00000000,
0x00000000,
0x00000000,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf119.fuc4.h b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf119.fuc4.h
index 7bf6b39ed205..2d5bdc539697 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf119.fuc4.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf119.fuc4.h
@@ -24,8 +24,8 @@ uint32_t gf119_pmu_data[] = {
0x00000000,
/* 0x0058: proc_list_head */
0x54534f48,
- 0x00000492,
- 0x0000043b,
+ 0x00000495,
+ 0x0000043e,
0x00000000,
0x00000000,
0x00000000,
@@ -46,8 +46,8 @@ uint32_t gf119_pmu_data[] = {
0x00000000,
0x00000000,
0x584d454d,
- 0x00000680,
- 0x00000672,
+ 0x00000683,
+ 0x00000675,
0x00000000,
0x00000000,
0x00000000,
@@ -68,8 +68,8 @@ uint32_t gf119_pmu_data[] = {
0x00000000,
0x00000000,
0x46524550,
- 0x00000684,
- 0x00000682,
+ 0x00000687,
+ 0x00000685,
0x00000000,
0x00000000,
0x00000000,
@@ -90,8 +90,8 @@ uint32_t gf119_pmu_data[] = {
0x00000000,
0x00000000,
0x5f433249,
- 0x00000a9f,
- 0x00000942,
+ 0x00000aa2,
+ 0x00000945,
0x00000000,
0x00000000,
0x00000000,
@@ -112,8 +112,8 @@ uint32_t gf119_pmu_data[] = {
0x00000000,
0x00000000,
0x54534554,
- 0x00000ac2,
- 0x00000aa1,
+ 0x00000ac5,
+ 0x00000aa4,
0x00000000,
0x00000000,
0x00000000,
@@ -134,8 +134,8 @@ uint32_t gf119_pmu_data[] = {
0x00000000,
0x00000000,
0x454c4449,
- 0x00000ace,
- 0x00000acc,
+ 0x00000ad1,
+ 0x00000acf,
0x00000000,
0x00000000,
0x00000000,
@@ -229,26 +229,26 @@ uint32_t gf119_pmu_data[] = {
/* 0x0370: memx_func_head */
0x00000001,
0x00000000,
- 0x000004c8,
+ 0x000004cb,
/* 0x037c: memx_func_next */
0x00000002,
0x00000000,
- 0x00000549,
+ 0x0000054c,
0x00000003,
0x00000002,
- 0x000005cd,
+ 0x000005d0,
0x00040004,
0x00000000,
- 0x000005e9,
+ 0x000005ec,
0x00010005,
0x00000000,
- 0x00000603,
+ 0x00000606,
0x00010006,
0x00000000,
- 0x000005c8,
+ 0x000005cb,
0x00000007,
0x00000000,
- 0x0000060e,
+ 0x00000611,
/* 0x03c4: memx_func_tail */
/* 0x03c4: memx_ts_start */
0x00000000,
@@ -916,821 +916,821 @@ uint32_t gf119_pmu_data[] = {
};
uint32_t gf119_pmu_code[] = {
- 0x03420ef5,
+ 0x03410ef5,
/* 0x0004: rd32 */
0x07a007f1,
0xbd000ed0,
- 0x01d7f004,
- 0xf101d3f0,
- 0xd007ac07,
- 0x04bd000d,
-/* 0x001c: rd32_wait */
- 0x07acd7f1,
- 0xf100ddcf,
- 0xf47000d4,
- 0xd7f1f51b,
- 0xddcf07a4,
-/* 0x0033: wr32 */
- 0xf100f800,
- 0xd007a007,
- 0x04bd000e,
- 0x07a407f1,
+ 0x01d7f104,
+ 0x01d3f000,
+ 0x07ac07f1,
0xbd000dd0,
- 0x02d7f004,
- 0xf0f0d5f0,
- 0x07f101d3,
- 0x0dd007ac,
-/* 0x0057: wr32_wait */
- 0xf104bd00,
- 0xcf07acd7,
- 0xd4f100dd,
- 0x1bf47000,
-/* 0x0067: nsec */
- 0xf900f8f5,
- 0xf080f990,
- 0x88cf2c87,
-/* 0x0071: nsec_loop */
- 0x2c97f000,
- 0xbb0099cf,
- 0x9eb80298,
- 0xf41ef406,
- 0x90fc80fc,
-/* 0x0086: wait */
+/* 0x001d: rd32_wait */
+ 0xacd7f104,
+ 0x00ddcf07,
+ 0x7000d4f1,
+ 0xf1f51bf4,
+ 0xcf07a4d7,
+ 0x00f800dd,
+/* 0x0034: wr32 */
+ 0x07a007f1,
+ 0xbd000ed0,
+ 0xa407f104,
+ 0x000dd007,
+ 0xd7f104bd,
+ 0xd3f000f2,
+ 0xac07f101,
+ 0x000dd007,
+/* 0x0056: wr32_wait */
+ 0xd7f104bd,
+ 0xddcf07ac,
+ 0x00d4f100,
+ 0xf51bf470,
+/* 0x0066: nsec */
0x90f900f8,
0x87f080f9,
0x0088cf2c,
-/* 0x0090: wait_loop */
- 0xf402eeb9,
- 0xdab90421,
- 0x04adfd02,
- 0xf406acb8,
- 0x97f0120b,
- 0x0099cf2c,
- 0xb80298bb,
- 0x1ef4069b,
-/* 0x00b1: wait_done */
- 0xfc80fce2,
-/* 0x00b7: intr_watchdog */
- 0x9800f890,
- 0x96b003e9,
- 0x2a0bf400,
- 0xbb9a0a98,
- 0x1cf4029a,
- 0x01d7f00f,
- 0x028121f5,
- 0x0ef494bd,
-/* 0x00d5: intr_watchdog_next_time */
- 0x9b0a9815,
- 0xf400a6b0,
- 0x9ab8090b,
- 0x061cf406,
-/* 0x00e4: intr_watchdog_next_time_set */
-/* 0x00e7: intr_watchdog_next_proc */
- 0x809b0980,
- 0xe0b603e9,
- 0x68e6b158,
- 0xc61bf402,
-/* 0x00f6: intr */
- 0x00f900f8,
- 0x80f904bd,
- 0xa0f990f9,
- 0xc0f9b0f9,
- 0xe0f9d0f9,
- 0xf7f0f0f9,
- 0x0188fe00,
- 0x87f180f9,
- 0x88cf05d0,
- 0x0180b600,
- 0x05d007f1,
- 0xbd0008d0,
- 0x0887f004,
- 0xc40088cf,
- 0x0bf40289,
- 0x9b008020,
- 0xf458e7f0,
- 0x0998b721,
- 0x0096b09b,
- 0xf00e0bf4,
- 0x09d03407,
- 0x8004bd00,
-/* 0x014e: intr_skip_watchdog */
- 0x89e49a09,
- 0x0bf40800,
- 0x8897f13c,
- 0x0099cf06,
- 0xf4029ac4,
- 0xc7f1260b,
- 0xcccf04c0,
- 0xf1c0f900,
- 0xf14f48e7,
- 0xf05453e3,
- 0x21f500d7,
- 0xc0fc02e6,
- 0x04c007f1,
- 0xbd000cd0,
-/* 0x0185: intr_subintr_skip_fifo */
- 0x8807f104,
- 0x0009d006,
-/* 0x018e: intr_skip_subintr */
- 0x97f104bd,
- 0x90bd00e0,
- 0xf00489fd,
- 0x08d00407,
- 0xfc04bd00,
- 0x0088fe80,
- 0xe0fcf0fc,
- 0xc0fcd0fc,
- 0xa0fcb0fc,
- 0x80fc90fc,
- 0x32f400fc,
-/* 0x01bb: ticks_from_ns */
- 0xf901f800,
+/* 0x0070: nsec_loop */
+ 0xcf2c97f0,
+ 0x98bb0099,
+ 0x069eb802,
+ 0xfcf41ef4,
+ 0xf890fc80,
+/* 0x0085: wait */
+ 0xf990f900,
+ 0x2c87f080,
+/* 0x008f: wait_loop */
+ 0xb90088cf,
+ 0x21f402ee,
+ 0x02dab904,
+ 0xb804adfd,
+ 0x0bf406ac,
+ 0x2c97f012,
+ 0xbb0099cf,
+ 0x9bb80298,
+ 0xe21ef406,
+/* 0x00b0: wait_done */
+ 0x90fc80fc,
+/* 0x00b6: intr_watchdog */
+ 0xe99800f8,
+ 0x0096b003,
+ 0x982a0bf4,
+ 0x9abb9a0a,
+ 0x0f1cf402,
+ 0xf501d7f0,
+ 0xbd028021,
+ 0x150ef494,
+/* 0x00d4: intr_watchdog_next_time */
+ 0xb09b0a98,
+ 0x0bf400a6,
+ 0x069ab809,
+/* 0x00e3: intr_watchdog_next_time_set */
+ 0x80061cf4,
+/* 0x00e6: intr_watchdog_next_proc */
+ 0xe9809b09,
+ 0x58e0b603,
+ 0x0268e6b1,
+ 0xf8c61bf4,
+/* 0x00f5: intr */
+ 0xbd00f900,
+ 0xf980f904,
+ 0xf9a0f990,
+ 0xf9c0f9b0,
+ 0xf9e0f9d0,
+ 0x00f7f0f0,
+ 0xf90188fe,
+ 0xd087f180,
+ 0x0088cf05,
+ 0xf10180b6,
+ 0xd005d007,
+ 0x04bd0008,
+ 0xcf0887f0,
+ 0x89c40088,
+ 0x200bf402,
+ 0xf09b0080,
+ 0x21f458e7,
+ 0x9b0998b6,
+ 0xf40096b0,
+ 0x07f00e0b,
+ 0x0009d034,
+ 0x098004bd,
+/* 0x014d: intr_skip_watchdog */
+ 0x0089e49a,
+ 0x3c0bf408,
+ 0x068897f1,
+ 0xc40099cf,
+ 0x0bf4029a,
+ 0xc0c7f126,
+ 0x00cccf04,
+ 0xe7f1c0f9,
+ 0xe3f14f48,
+ 0xd7f05453,
+ 0xe521f500,
+ 0xf1c0fc02,
+ 0xd004c007,
+ 0x04bd000c,
+/* 0x0184: intr_subintr_skip_fifo */
+ 0x068807f1,
+ 0xbd0009d0,
+/* 0x018d: intr_skip_subintr */
+ 0xe097f104,
+ 0xfd90bd00,
+ 0x07f00489,
+ 0x0008d004,
+ 0x80fc04bd,
+ 0xfc0088fe,
+ 0xfce0fcf0,
+ 0xfcc0fcd0,
+ 0xfca0fcb0,
+ 0xfc80fc90,
+ 0x0032f400,
+/* 0x01ba: ticks_from_ns */
+ 0xc0f901f8,
+ 0xd7f1b0f9,
+ 0xd3f00144,
+ 0xab21f500,
+ 0xe8ccec03,
+ 0x00b4b003,
+ 0xec120bf4,
+ 0xf103e8ee,
+ 0xf00144d7,
+ 0x21f500d3,
+/* 0x01e2: ticks_from_ns_quit */
+ 0xceb903ab,
+ 0xfcb0fc02,
+/* 0x01eb: ticks_from_us */
+ 0xf900f8c0,
0xf1b0f9c0,
0xf00144d7,
0x21f500d3,
- 0xccec03a8,
- 0xb4b003e8,
- 0x120bf400,
- 0x03e8eeec,
- 0x0144d7f1,
- 0xf500d3f0,
-/* 0x01e3: ticks_from_ns_quit */
- 0xb903a821,
- 0xb0fc02ce,
- 0x00f8c0fc,
-/* 0x01ec: ticks_from_us */
- 0xb0f9c0f9,
- 0x0144d7f1,
- 0xf500d3f0,
- 0xb903a821,
- 0xb4b002ce,
- 0x050bf400,
-/* 0x0206: ticks_from_us_quit */
- 0xb0fce4bd,
- 0x00f8c0fc,
-/* 0x020c: ticks_to_us */
- 0x0144d7f1,
- 0xff00d3f0,
- 0x00f8eced,
-/* 0x0218: timer */
- 0x80f990f9,
- 0x981032f4,
- 0x86b003f8,
- 0x531cf400,
- 0x07f084bd,
- 0x0008d038,
- 0x87f004bd,
- 0x0088cf34,
- 0xbb9a0998,
- 0xe9bb0298,
- 0x03fe8000,
- 0xcf0887f0,
- 0x84f00088,
- 0x201bf402,
- 0xcf3487f0,
- 0xe0b80088,
- 0x090bf406,
- 0xf406e8b8,
-/* 0x0262: timer_reset */
- 0x07f00e1c,
- 0x000ed034,
- 0x0e8004bd,
-/* 0x026d: timer_enable */
- 0x0187f09a,
- 0xd03807f0,
- 0x04bd0008,
-/* 0x0278: timer_done */
- 0xfc1031f4,
+ 0xceb903ab,
+ 0x00b4b002,
+ 0xbd050bf4,
+/* 0x0205: ticks_from_us_quit */
+ 0xfcb0fce4,
+/* 0x020b: ticks_to_us */
+ 0xf100f8c0,
+ 0xf00144d7,
+ 0xedff00d3,
+/* 0x0217: timer */
+ 0xf900f8ec,
+ 0xf480f990,
+ 0xf8981032,
+ 0x0086b003,
+ 0xbd531cf4,
+ 0x3807f084,
+ 0xbd0008d0,
+ 0x3487f004,
+ 0x980088cf,
+ 0x98bb9a09,
+ 0x00e9bb02,
+ 0xf003fe80,
+ 0x88cf0887,
+ 0x0284f000,
+ 0xf0201bf4,
+ 0x88cf3487,
+ 0x06e0b800,
+ 0xb8090bf4,
+ 0x1cf406e8,
+/* 0x0261: timer_reset */
+ 0x3407f00e,
+ 0xbd000ed0,
+ 0x9a0e8004,
+/* 0x026c: timer_enable */
+ 0xf00187f0,
+ 0x08d03807,
+/* 0x0277: timer_done */
+ 0xf404bd00,
+ 0x80fc1031,
+ 0x00f890fc,
+/* 0x0280: send_proc */
+ 0x90f980f9,
+ 0x9805e898,
+ 0x86f004e9,
+ 0x0689b804,
+ 0xc42a0bf4,
+ 0x88940398,
+ 0x1880b604,
+ 0x98008ebb,
+ 0x8a8000fa,
+ 0x018d8000,
+ 0x80028c80,
+ 0x90b6038b,
+ 0x0794f001,
+ 0xf404e980,
+/* 0x02ba: send_done */
+ 0x90fc0231,
+ 0x00f880fc,
+/* 0x02c0: find */
+ 0x87f080f9,
+ 0x0131f458,
+/* 0x02c8: find_loop */
+ 0xb8008a98,
+ 0x0bf406ae,
+ 0x5880b610,
+ 0x026886b1,
+ 0xf4f01bf4,
+/* 0x02de: find_done */
+ 0x8eb90132,
+ 0xf880fc02,
+/* 0x02e5: send */
+ 0xc021f500,
+ 0x9701f402,
+/* 0x02ee: recv */
+ 0x90f900f8,
+ 0xe89880f9,
+ 0x04e99805,
+ 0xb80132f4,
+ 0x0bf40689,
+ 0x0389c43d,
+ 0xf00180b6,
+ 0xe8800784,
+ 0x02ea9805,
+ 0x8ffef0f9,
+ 0xb9f0f901,
+ 0x999402ef,
+ 0x00e9bb04,
+ 0x9818e0b6,
+ 0xec9803eb,
+ 0x01ed9802,
+ 0xf900ee98,
+ 0xfef0fca5,
+ 0x31f400f8,
+/* 0x033b: recv_done */
+ 0xfcf0fc01,
0xf890fc80,
-/* 0x0281: send_proc */
- 0xf980f900,
- 0x05e89890,
- 0xf004e998,
- 0x89b80486,
- 0x2a0bf406,
- 0x940398c4,
- 0x80b60488,
- 0x008ebb18,
- 0x8000fa98,
- 0x8d80008a,
- 0x028c8001,
- 0xb6038b80,
- 0x94f00190,
- 0x04e98007,
-/* 0x02bb: send_done */
- 0xfc0231f4,
- 0xf880fc90,
-/* 0x02c1: find */
- 0xf080f900,
- 0x31f45887,
-/* 0x02c9: find_loop */
- 0x008a9801,
- 0xf406aeb8,
- 0x80b6100b,
- 0x6886b158,
- 0xf01bf402,
-/* 0x02df: find_done */
- 0xb90132f4,
- 0x80fc028e,
-/* 0x02e6: send */
- 0x21f500f8,
- 0x01f402c1,
-/* 0x02ef: recv */
- 0xf900f897,
- 0x9880f990,
- 0xe99805e8,
- 0x0132f404,
- 0xf40689b8,
- 0x89c43d0b,
- 0x0180b603,
- 0x800784f0,
- 0xea9805e8,
- 0xfef0f902,
- 0xf0f9018f,
- 0x9402efb9,
- 0xe9bb0499,
- 0x18e0b600,
- 0x9803eb98,
- 0xed9802ec,
- 0x00ee9801,
- 0xf0fca5f9,
- 0xf400f8fe,
- 0xf0fc0131,
-/* 0x033c: recv_done */
- 0x90fc80fc,
-/* 0x0342: init */
- 0x17f100f8,
- 0x11cf0108,
- 0x0911e700,
- 0x0814b601,
- 0xf10014fe,
- 0xf000e017,
- 0x07f00013,
- 0x0001d01c,
- 0x17f004bd,
- 0x1407f0ff,
+/* 0x0341: init */
+ 0x0817f100,
+ 0x0011cf01,
+ 0x010911e7,
+ 0xfe0814b6,
+ 0x17f10014,
+ 0x13f000e0,
+ 0x1c07f000,
0xbd0001d0,
- 0x0217f004,
- 0x080015f1,
- 0xd01007f0,
- 0x04bd0001,
- 0x00f617f1,
- 0xfe0013f0,
- 0x31f40010,
- 0x0117f010,
- 0xd03807f0,
+ 0xff17f004,
+ 0xd01407f0,
0x04bd0001,
-/* 0x0397: init_proc */
- 0x9858f7f0,
- 0x16b001f1,
- 0xfa0bf400,
- 0xf0b615f9,
- 0xf20ef458,
-/* 0x03a8: mulu32_32_64 */
- 0x20f910f9,
- 0x40f930f9,
- 0x9510e195,
- 0xc4bd10d2,
- 0xedffb4bd,
- 0x301dffc0,
- 0xf10234b9,
- 0xb6ffff34,
- 0x45b61034,
- 0x00c3bb10,
- 0xff01b4bb,
- 0x34b930e2,
- 0xff34f102,
- 0x1034b6ff,
- 0xbb1045b6,
- 0xb4bb00c3,
- 0x3012ff01,
- 0xfc00b3bb,
- 0xfc30fc40,
- 0xf810fc20,
-/* 0x03f9: host_send */
- 0xb017f100,
- 0x0011cf04,
- 0x04a027f1,
- 0xb80022cf,
- 0x0bf40612,
- 0x071ec42f,
- 0xb704ee94,
- 0x980270e0,
- 0xec9803eb,
- 0x01ed9802,
- 0xf500ee98,
- 0xb602e621,
- 0x1ec40110,
- 0xb007f10f,
- 0x000ed004,
- 0x0ef404bd,
-/* 0x0439: host_send_done */
-/* 0x043b: host_recv */
- 0xf100f8c3,
- 0xf14e4917,
- 0xb8525413,
- 0x0bf406e1,
-/* 0x0449: host_recv_wait */
- 0xcc17f1b3,
- 0x0011cf04,
- 0x04c827f1,
- 0xf00022cf,
- 0x12b80816,
- 0xec0bf406,
- 0xb60723c4,
- 0x30b70434,
- 0x3b8002f0,
- 0x023c8003,
- 0x80013d80,
- 0x20b6003e,
- 0x0f24f001,
- 0x04c807f1,
- 0xbd0002d0,
- 0x4027f004,
- 0xd00007f0,
- 0x04bd0002,
-/* 0x0492: host_init */
+ 0xf10217f0,
+ 0xf0080015,
+ 0x01d01007,
+ 0xf104bd00,
+ 0xf000f517,
+ 0x14f10013,
+ 0x10feffff,
+ 0x1031f400,
+ 0xf00117f0,
+ 0x01d03807,
+ 0xf004bd00,
+/* 0x039a: init_proc */
+ 0xf19858f7,
+ 0x0016b001,
+ 0xf9fa0bf4,
+ 0x58f0b615,
+/* 0x03ab: mulu32_32_64 */
+ 0xf9f20ef4,
+ 0xf920f910,
+ 0x9540f930,
+ 0xd29510e1,
+ 0xbdc4bd10,
+ 0xc0edffb4,
+ 0xb9301dff,
+ 0x34f10234,
+ 0x34b6ffff,
+ 0x1045b610,
+ 0xbb00c3bb,
+ 0xe2ff01b4,
+ 0x0234b930,
+ 0xffff34f1,
+ 0xb61034b6,
+ 0xc3bb1045,
+ 0x01b4bb00,
+ 0xbb3012ff,
+ 0x40fc00b3,
+ 0x20fc30fc,
+ 0x00f810fc,
+/* 0x03fc: host_send */
+ 0x04b017f1,
+ 0xf10011cf,
+ 0xcf04a027,
+ 0x12b80022,
+ 0x2f0bf406,
+ 0x94071ec4,
+ 0xe0b704ee,
+ 0xeb980270,
+ 0x02ec9803,
+ 0x9801ed98,
+ 0x21f500ee,
+ 0x10b602e5,
+ 0x0f1ec401,
+ 0x04b007f1,
+ 0xbd000ed0,
+ 0xc30ef404,
+/* 0x043c: host_send_done */
+/* 0x043e: host_recv */
0x17f100f8,
- 0x14b60080,
- 0x7015f110,
- 0xd007f102,
- 0x0001d004,
- 0x17f104bd,
- 0x14b60080,
- 0xf015f110,
- 0xdc07f102,
- 0x0001d004,
- 0x17f004bd,
- 0xc407f101,
- 0x0001d004,
- 0x00f804bd,
-/* 0x04c8: memx_func_enter */
- 0x162067f1,
- 0xf55d77f1,
- 0xffff73f1,
- 0xf4026eb9,
- 0xd8b90421,
- 0x0487fd02,
- 0x80f960f9,
- 0xe0fcd0fc,
- 0xf13321f4,
- 0xf1fffe77,
+ 0x13f14e49,
+ 0xe1b85254,
+ 0xb30bf406,
+/* 0x044c: host_recv_wait */
+ 0x04cc17f1,
+ 0xf10011cf,
+ 0xcf04c827,
+ 0x16f00022,
+ 0x0612b808,
+ 0xc4ec0bf4,
+ 0x34b60723,
+ 0xf030b704,
+ 0x033b8002,
+ 0x80023c80,
+ 0x3e80013d,
+ 0x0120b600,
+ 0xf10f24f0,
+ 0xd004c807,
+ 0x04bd0002,
+ 0xf04027f0,
+ 0x02d00007,
+ 0xf804bd00,
+/* 0x0495: host_init */
+ 0x8017f100,
+ 0x1014b600,
+ 0x027015f1,
+ 0x04d007f1,
+ 0xbd0001d0,
+ 0x8017f104,
+ 0x1014b600,
+ 0x02f015f1,
+ 0x04dc07f1,
+ 0xbd0001d0,
+ 0x0117f004,
+ 0x04c407f1,
+ 0xbd0001d0,
+/* 0x04cb: memx_func_enter */
+ 0xf100f804,
+ 0xf1162067,
+ 0xf1f55d77,
0xb9ffff73,
0x21f4026e,
0x02d8b904,
0xf90487fd,
0xfc80f960,
0xf4e0fcd0,
- 0x67f13321,
- 0x6eb926f0,
+ 0x77f13421,
+ 0x73f1fffe,
+ 0x6eb9ffff,
0x0421f402,
0xfd02d8b9,
0x60f90487,
0xd0fc80f9,
0x21f4e0fc,
- 0x0467f033,
- 0x07e007f1,
+ 0xf067f134,
+ 0x026eb926,
+ 0xb90421f4,
+ 0x87fd02d8,
+ 0xf960f904,
+ 0xfcd0fc80,
+ 0x3421f4e0,
+ 0xf10467f0,
+ 0xd007e007,
+ 0x04bd0006,
+/* 0x0534: memx_func_enter_wait */
+ 0x07c067f1,
+ 0xf00066cf,
+ 0x0bf40464,
+ 0x2c67f0f6,
+ 0x800066cf,
+ 0x00f8f106,
+/* 0x054c: memx_func_leave */
+ 0xcf2c67f0,
+ 0x06800066,
+ 0x0467f0f2,
+ 0x07e407f1,
0xbd0006d0,
-/* 0x0531: memx_func_enter_wait */
+/* 0x0561: memx_func_leave_wait */
0xc067f104,
0x0066cf07,
0xf40464f0,
- 0x67f0f60b,
- 0x0066cf2c,
- 0xf8f10680,
-/* 0x0549: memx_func_leave */
- 0x2c67f000,
- 0x800066cf,
- 0x67f0f206,
- 0xe407f104,
- 0x0006d007,
-/* 0x055e: memx_func_leave_wait */
- 0x67f104bd,
- 0x66cf07c0,
- 0x0464f000,
- 0xf1f61bf4,
- 0xf126f067,
- 0xf0000177,
+ 0x67f1f61b,
+ 0x77f126f0,
+ 0x73f00001,
+ 0x026eb900,
+ 0xb90421f4,
+ 0x87fd02d8,
+ 0xf960f905,
+ 0xfcd0fc80,
+ 0x3421f4e0,
+ 0x162067f1,
+ 0xf4026eb9,
+ 0xd8b90421,
+ 0x0587fd02,
+ 0x80f960f9,
+ 0xe0fcd0fc,
+ 0xf13421f4,
+ 0xf00aa277,
0x6eb90073,
0x0421f402,
0xfd02d8b9,
0x60f90587,
0xd0fc80f9,
0x21f4e0fc,
- 0x2067f133,
- 0x026eb916,
- 0xb90421f4,
- 0x87fd02d8,
- 0xf960f905,
- 0xfcd0fc80,
- 0x3321f4e0,
- 0x0aa277f1,
- 0xb90073f0,
- 0x21f4026e,
- 0x02d8b904,
- 0xf90587fd,
- 0xfc80f960,
- 0xf4e0fcd0,
- 0x00f83321,
-/* 0x05c8: memx_func_wait_vblank */
- 0xf80410b6,
-/* 0x05cd: memx_func_wr32 */
- 0x00169800,
- 0xb6011598,
- 0x60f90810,
- 0xd0fc50f9,
- 0x21f4e0fc,
- 0x0242b633,
- 0xf8e91bf4,
-/* 0x05e9: memx_func_wait */
- 0x2c87f000,
- 0x980088cf,
- 0x1d98001e,
- 0x021c9801,
- 0xb6031b98,
- 0x21f41010,
-/* 0x0603: memx_func_delay */
- 0x9800f886,
- 0x10b6001e,
- 0x6721f404,
-/* 0x060e: memx_func_train */
- 0x00f800f8,
-/* 0x0610: memx_exec */
- 0xd0f9e0f9,
- 0xb902c1b9,
-/* 0x061a: memx_exec_next */
- 0x139802b2,
+/* 0x05cb: memx_func_wait_vblank */
+ 0xb600f834,
+ 0x00f80410,
+/* 0x05d0: memx_func_wr32 */
+ 0x98001698,
+ 0x10b60115,
+ 0xf960f908,
+ 0xfcd0fc50,
+ 0x3421f4e0,
+ 0xf40242b6,
+ 0x00f8e91b,
+/* 0x05ec: memx_func_wait */
+ 0xcf2c87f0,
+ 0x1e980088,
+ 0x011d9800,
+ 0x98021c98,
+ 0x10b6031b,
+ 0x8521f410,
+/* 0x0606: memx_func_delay */
+ 0x1e9800f8,
0x0410b600,
- 0x01f034e7,
- 0x01e033e7,
- 0xf00132b6,
- 0x35980c30,
- 0xb855f9de,
- 0x1ef40612,
- 0xf10b98e4,
- 0xbbf20c98,
- 0xb7f102cb,
- 0xbbcf07c4,
- 0xfcd0fc00,
- 0xe621f5e0,
-/* 0x0653: memx_info */
- 0x7000f802,
- 0x0bf401c6,
-/* 0x0659: memx_info_data */
- 0xccc7f10e,
- 0x00b7f103,
- 0x0b0ef408,
-/* 0x0664: memx_info_train */
- 0x0bccc7f1,
- 0x0100b7f1,
-/* 0x066c: memx_info_send */
- 0x02e621f5,
-/* 0x0672: memx_recv */
- 0xd6b000f8,
- 0x9b0bf401,
- 0xf400d6b0,
- 0x00f8d80b,
-/* 0x0680: memx_init */
-/* 0x0682: perf_recv */
- 0x00f800f8,
-/* 0x0684: perf_init */
-/* 0x0686: i2c_drive_scl */
- 0x36b000f8,
- 0x0e0bf400,
- 0x07e007f1,
- 0xbd0001d0,
-/* 0x0697: i2c_drive_scl_lo */
- 0xf100f804,
- 0xd007e407,
+ 0xf86621f4,
+/* 0x0611: memx_func_train */
+/* 0x0613: memx_exec */
+ 0xf900f800,
+ 0xb9d0f9e0,
+ 0xb2b902c1,
+/* 0x061d: memx_exec_next */
+ 0x00139802,
+ 0xe70410b6,
+ 0xe701f034,
+ 0xb601e033,
+ 0x30f00132,
+ 0xde35980c,
+ 0x12b855f9,
+ 0xe41ef406,
+ 0x98f10b98,
+ 0xcbbbf20c,
+ 0xc4b7f102,
+ 0x00bbcf07,
+ 0xe0fcd0fc,
+ 0x02e521f5,
+/* 0x0656: memx_info */
+ 0xc67000f8,
+ 0x0e0bf401,
+/* 0x065c: memx_info_data */
+ 0x03ccc7f1,
+ 0x0800b7f1,
+/* 0x0667: memx_info_train */
+ 0xf10b0ef4,
+ 0xf10bccc7,
+/* 0x066f: memx_info_send */
+ 0xf50100b7,
+ 0xf802e521,
+/* 0x0675: memx_recv */
+ 0x01d6b000,
+ 0xb09b0bf4,
+ 0x0bf400d6,
+/* 0x0683: memx_init */
+ 0xf800f8d8,
+/* 0x0685: perf_recv */
+/* 0x0687: perf_init */
+ 0xf800f800,
+/* 0x0689: i2c_drive_scl */
+ 0x0036b000,
+ 0xf10e0bf4,
+ 0xd007e007,
0x04bd0001,
-/* 0x06a2: i2c_drive_sda */
- 0x36b000f8,
- 0x0e0bf400,
- 0x07e007f1,
- 0xbd0002d0,
-/* 0x06b3: i2c_drive_sda_lo */
- 0xf100f804,
- 0xd007e407,
+/* 0x069a: i2c_drive_scl_lo */
+ 0x07f100f8,
+ 0x01d007e4,
+ 0xf804bd00,
+/* 0x06a5: i2c_drive_sda */
+ 0x0036b000,
+ 0xf10e0bf4,
+ 0xd007e007,
0x04bd0002,
-/* 0x06be: i2c_sense_scl */
+/* 0x06b6: i2c_drive_sda_lo */
+ 0x07f100f8,
+ 0x02d007e4,
+ 0xf804bd00,
+/* 0x06c1: i2c_sense_scl */
+ 0x0132f400,
+ 0x07c437f1,
+ 0xfd0033cf,
+ 0x0bf40431,
+ 0x0131f406,
+/* 0x06d4: i2c_sense_scl_done */
+/* 0x06d6: i2c_sense_sda */
0x32f400f8,
0xc437f101,
0x0033cf07,
- 0xf40431fd,
+ 0xf40432fd,
0x31f4060b,
-/* 0x06d1: i2c_sense_scl_done */
-/* 0x06d3: i2c_sense_sda */
- 0xf400f801,
- 0x37f10132,
- 0x33cf07c4,
- 0x0432fd00,
- 0xf4060bf4,
-/* 0x06e6: i2c_sense_sda_done */
- 0x00f80131,
-/* 0x06e8: i2c_raise_scl */
- 0x47f140f9,
- 0x37f00898,
- 0x8621f501,
-/* 0x06f5: i2c_raise_scl_wait */
- 0xe8e7f106,
- 0x6721f403,
- 0x06be21f5,
- 0xb60901f4,
- 0x1bf40142,
-/* 0x0709: i2c_raise_scl_done */
- 0xf840fcef,
-/* 0x070d: i2c_start */
- 0xbe21f500,
- 0x0d11f406,
- 0x06d321f5,
- 0xf40611f4,
-/* 0x071e: i2c_start_rep */
- 0x37f0300e,
- 0x8621f500,
- 0x0137f006,
- 0x06a221f5,
- 0xb60076bb,
- 0x50f90465,
- 0xbb046594,
- 0x50bd0256,
- 0xfc0475fd,
- 0xe821f550,
- 0x0464b606,
-/* 0x074b: i2c_start_send */
- 0xf01f11f4,
- 0x21f50037,
- 0xe7f106a2,
- 0x21f41388,
- 0x0037f067,
- 0x068621f5,
- 0x1388e7f1,
-/* 0x0767: i2c_start_out */
- 0xf86721f4,
-/* 0x0769: i2c_stop */
- 0x0037f000,
- 0x068621f5,
- 0xf50037f0,
- 0xf106a221,
- 0xf403e8e7,
- 0x37f06721,
- 0x8621f501,
- 0x88e7f106,
- 0x6721f413,
- 0xf50137f0,
- 0xf106a221,
- 0xf41388e7,
- 0x00f86721,
-/* 0x079c: i2c_bitw */
- 0x06a221f5,
+/* 0x06e9: i2c_sense_sda_done */
+/* 0x06eb: i2c_raise_scl */
+ 0xf900f801,
+ 0x9847f140,
+ 0x0137f008,
+ 0x068921f5,
+/* 0x06f8: i2c_raise_scl_wait */
0x03e8e7f1,
- 0xbb6721f4,
+ 0xf56621f4,
+ 0xf406c121,
+ 0x42b60901,
+ 0xef1bf401,
+/* 0x070c: i2c_raise_scl_done */
+ 0x00f840fc,
+/* 0x0710: i2c_start */
+ 0x06c121f5,
+ 0xf50d11f4,
+ 0xf406d621,
+ 0x0ef40611,
+/* 0x0721: i2c_start_rep */
+ 0x0037f030,
+ 0x068921f5,
+ 0xf50137f0,
+ 0xbb06a521,
0x65b60076,
0x9450f904,
0x56bb0465,
0xfd50bd02,
0x50fc0475,
- 0x06e821f5,
+ 0x06eb21f5,
0xf40464b6,
- 0xe7f11811,
+/* 0x074e: i2c_start_send */
+ 0x37f01f11,
+ 0xa521f500,
+ 0x88e7f106,
+ 0x6621f413,
+ 0xf50037f0,
+ 0xf1068921,
+ 0xf41388e7,
+/* 0x076a: i2c_start_out */
+ 0x00f86621,
+/* 0x076c: i2c_stop */
+ 0xf50037f0,
+ 0xf0068921,
+ 0x21f50037,
+ 0xe7f106a5,
+ 0x21f403e8,
+ 0x0137f066,
+ 0x068921f5,
+ 0x1388e7f1,
+ 0xf06621f4,
+ 0x21f50137,
+ 0xe7f106a5,
0x21f41388,
- 0x0037f067,
- 0x068621f5,
+/* 0x079f: i2c_bitw */
+ 0xf500f866,
+ 0xf106a521,
+ 0xf403e8e7,
+ 0x76bb6621,
+ 0x0465b600,
+ 0x659450f9,
+ 0x0256bb04,
+ 0x75fd50bd,
+ 0xf550fc04,
+ 0xb606eb21,
+ 0x11f40464,
+ 0x88e7f118,
+ 0x6621f413,
+ 0xf50037f0,
+ 0xf1068921,
+ 0xf41388e7,
+/* 0x07de: i2c_bitw_out */
+ 0x00f86621,
+/* 0x07e0: i2c_bitr */
+ 0xf50137f0,
+ 0xf106a521,
+ 0xf403e8e7,
+ 0x76bb6621,
+ 0x0465b600,
+ 0x659450f9,
+ 0x0256bb04,
+ 0x75fd50bd,
+ 0xf550fc04,
+ 0xb606eb21,
+ 0x11f40464,
+ 0xd621f51b,
+ 0x0037f006,
+ 0x068921f5,
0x1388e7f1,
-/* 0x07db: i2c_bitw_out */
- 0xf86721f4,
-/* 0x07dd: i2c_bitr */
- 0x0137f000,
- 0x06a221f5,
- 0x03e8e7f1,
- 0xbb6721f4,
- 0x65b60076,
- 0x9450f904,
- 0x56bb0465,
- 0xfd50bd02,
- 0x50fc0475,
- 0x06e821f5,
- 0xf40464b6,
- 0x21f51b11,
- 0x37f006d3,
- 0x8621f500,
- 0x88e7f106,
- 0x6721f413,
- 0xf4013cf0,
-/* 0x0822: i2c_bitr_done */
- 0x00f80131,
-/* 0x0824: i2c_get_byte */
- 0xf00057f0,
-/* 0x082a: i2c_get_byte_next */
- 0x54b60847,
- 0x0076bb01,
- 0xf90465b6,
- 0x04659450,
- 0xbd0256bb,
- 0x0475fd50,
- 0x21f550fc,
- 0x64b607dd,
- 0x2b11f404,
- 0xb60553fd,
- 0x1bf40142,
- 0x0137f0d8,
+ 0xf06621f4,
+ 0x31f4013c,
+/* 0x0825: i2c_bitr_done */
+/* 0x0827: i2c_get_byte */
+ 0xf000f801,
+ 0x47f00057,
+/* 0x082d: i2c_get_byte_next */
+ 0x0154b608,
0xb60076bb,
0x50f90465,
0xbb046594,
0x50bd0256,
0xfc0475fd,
- 0x9c21f550,
+ 0xe021f550,
0x0464b607,
-/* 0x0874: i2c_get_byte_done */
-/* 0x0876: i2c_put_byte */
- 0x47f000f8,
-/* 0x0879: i2c_put_byte_next */
- 0x0142b608,
- 0xbb3854ff,
+ 0xfd2b11f4,
+ 0x42b60553,
+ 0xd81bf401,
+ 0xbb0137f0,
+ 0x65b60076,
+ 0x9450f904,
+ 0x56bb0465,
+ 0xfd50bd02,
+ 0x50fc0475,
+ 0x079f21f5,
+/* 0x0877: i2c_get_byte_done */
+ 0xf80464b6,
+/* 0x0879: i2c_put_byte */
+ 0x0847f000,
+/* 0x087c: i2c_put_byte_next */
+ 0xff0142b6,
+ 0x76bb3854,
+ 0x0465b600,
+ 0x659450f9,
+ 0x0256bb04,
+ 0x75fd50bd,
+ 0xf550fc04,
+ 0xb6079f21,
+ 0x11f40464,
+ 0x0046b034,
+ 0xbbd81bf4,
0x65b60076,
0x9450f904,
0x56bb0465,
0xfd50bd02,
0x50fc0475,
- 0x079c21f5,
+ 0x07e021f5,
0xf40464b6,
- 0x46b03411,
- 0xd81bf400,
+ 0x76bb0f11,
+ 0x0136b000,
+ 0xf4061bf4,
+/* 0x08d2: i2c_put_byte_done */
+ 0x00f80132,
+/* 0x08d4: i2c_addr */
0xb60076bb,
0x50f90465,
0xbb046594,
0x50bd0256,
0xfc0475fd,
- 0xdd21f550,
+ 0x1021f550,
0x0464b607,
- 0xbb0f11f4,
- 0x36b00076,
- 0x061bf401,
-/* 0x08cf: i2c_put_byte_done */
- 0xf80132f4,
-/* 0x08d1: i2c_addr */
- 0x0076bb00,
+ 0xe72911f4,
+ 0xb6012ec3,
+ 0x53fd0134,
+ 0x0076bb05,
0xf90465b6,
0x04659450,
0xbd0256bb,
0x0475fd50,
0x21f550fc,
- 0x64b6070d,
- 0x2911f404,
- 0x012ec3e7,
- 0xfd0134b6,
- 0x76bb0553,
- 0x0465b600,
- 0x659450f9,
- 0x0256bb04,
- 0x75fd50bd,
- 0xf550fc04,
- 0xb6087621,
-/* 0x0916: i2c_addr_done */
- 0x00f80464,
-/* 0x0918: i2c_acquire_addr */
- 0xb6f8cec7,
- 0xe0b705e4,
- 0x00f8d014,
-/* 0x0924: i2c_acquire */
- 0x091821f5,
- 0xf00421f4,
- 0x21f403d9,
-/* 0x0933: i2c_release */
- 0xf500f833,
- 0xf4091821,
- 0xdaf00421,
- 0x3321f403,
-/* 0x0942: i2c_recv */
- 0x32f400f8,
- 0xf8c1c701,
- 0xb00214b6,
- 0x1ff52816,
- 0x13a0013a,
- 0x32980cf4,
- 0xcc13a000,
- 0x0031980c,
- 0xf90231f4,
- 0xf9e0f9d0,
- 0x0067f1d0,
- 0x0063f100,
- 0x01679210,
- 0xb60076bb,
- 0x50f90465,
- 0xbb046594,
- 0x50bd0256,
- 0xfc0475fd,
- 0x2421f550,
- 0x0464b609,
- 0xd6b0d0fc,
- 0xb31bf500,
- 0x0057f000,
- 0xb60076bb,
- 0x50f90465,
- 0xbb046594,
- 0x50bd0256,
- 0xfc0475fd,
- 0xd121f550,
- 0x0464b608,
- 0x00d011f5,
- 0xbbe0c5c7,
+ 0x64b60879,
+/* 0x0919: i2c_addr_done */
+/* 0x091b: i2c_acquire_addr */
+ 0xc700f804,
+ 0xe4b6f8ce,
+ 0x14e0b705,
+/* 0x0927: i2c_acquire */
+ 0xf500f8d0,
+ 0xf4091b21,
+ 0xd9f00421,
+ 0x3421f403,
+/* 0x0936: i2c_release */
+ 0x21f500f8,
+ 0x21f4091b,
+ 0x03daf004,
+ 0xf83421f4,
+/* 0x0945: i2c_recv */
+ 0x0132f400,
+ 0xb6f8c1c7,
+ 0x16b00214,
+ 0x3a1ff528,
+ 0xf413a001,
+ 0x0032980c,
+ 0x0ccc13a0,
+ 0xf4003198,
+ 0xd0f90231,
+ 0xd0f9e0f9,
+ 0x000067f1,
+ 0x100063f1,
+ 0xbb016792,
0x65b60076,
0x9450f904,
0x56bb0465,
0xfd50bd02,
0x50fc0475,
- 0x087621f5,
+ 0x092721f5,
+ 0xfc0464b6,
+ 0x00d6b0d0,
+ 0x00b31bf5,
+ 0xbb0057f0,
+ 0x65b60076,
+ 0x9450f904,
+ 0x56bb0465,
+ 0xfd50bd02,
+ 0x50fc0475,
+ 0x08d421f5,
0xf50464b6,
- 0xf000ad11,
- 0x76bb0157,
+ 0xc700d011,
+ 0x76bbe0c5,
0x0465b600,
0x659450f9,
0x0256bb04,
0x75fd50bd,
0xf550fc04,
- 0xb608d121,
+ 0xb6087921,
0x11f50464,
- 0x76bb008a,
- 0x0465b600,
- 0x659450f9,
- 0x0256bb04,
- 0x75fd50bd,
- 0xf550fc04,
- 0xb6082421,
- 0x11f40464,
- 0xe05bcb6a,
- 0xb60076bb,
- 0x50f90465,
- 0xbb046594,
- 0x50bd0256,
- 0xfc0475fd,
- 0x6921f550,
- 0x0464b607,
- 0xbd025bb9,
- 0x430ef474,
-/* 0x0a48: i2c_recv_not_rd08 */
- 0xf401d6b0,
- 0x57f03d1b,
- 0xd121f500,
- 0x3311f408,
- 0xf5e0c5c7,
- 0xf4087621,
- 0x57f02911,
- 0xd121f500,
- 0x1f11f408,
- 0xf5e0b5c7,
- 0xf4087621,
- 0x21f51511,
- 0x74bd0769,
- 0xf408c5c7,
- 0x32f4091b,
- 0x030ef402,
-/* 0x0a88: i2c_recv_not_wr08 */
-/* 0x0a88: i2c_recv_done */
- 0xf5f8cec7,
- 0xfc093321,
- 0xf4d0fce0,
- 0x7cb90a12,
- 0xe621f502,
-/* 0x0a9d: i2c_recv_exit */
-/* 0x0a9f: i2c_init */
+ 0x57f000ad,
+ 0x0076bb01,
+ 0xf90465b6,
+ 0x04659450,
+ 0xbd0256bb,
+ 0x0475fd50,
+ 0x21f550fc,
+ 0x64b608d4,
+ 0x8a11f504,
+ 0x0076bb00,
+ 0xf90465b6,
+ 0x04659450,
+ 0xbd0256bb,
+ 0x0475fd50,
+ 0x21f550fc,
+ 0x64b60827,
+ 0x6a11f404,
+ 0xbbe05bcb,
+ 0x65b60076,
+ 0x9450f904,
+ 0x56bb0465,
+ 0xfd50bd02,
+ 0x50fc0475,
+ 0x076c21f5,
+ 0xb90464b6,
+ 0x74bd025b,
+/* 0x0a4b: i2c_recv_not_rd08 */
+ 0xb0430ef4,
+ 0x1bf401d6,
+ 0x0057f03d,
+ 0x08d421f5,
+ 0xc73311f4,
+ 0x21f5e0c5,
+ 0x11f40879,
+ 0x0057f029,
+ 0x08d421f5,
+ 0xc71f11f4,
+ 0x21f5e0b5,
+ 0x11f40879,
+ 0x6c21f515,
+ 0xc774bd07,
+ 0x1bf408c5,
+ 0x0232f409,
+/* 0x0a8b: i2c_recv_not_wr08 */
+/* 0x0a8b: i2c_recv_done */
+ 0xc7030ef4,
+ 0x21f5f8ce,
+ 0xe0fc0936,
+ 0x12f4d0fc,
+ 0x027cb90a,
+ 0x02e521f5,
+/* 0x0aa0: i2c_recv_exit */
+/* 0x0aa2: i2c_init */
+ 0x00f800f8,
+/* 0x0aa4: test_recv */
+ 0x05d817f1,
+ 0xb60011cf,
+ 0x07f10110,
+ 0x01d005d8,
+ 0xf104bd00,
+ 0xf1d900e7,
+ 0xf5134fe3,
+ 0xf8021721,
+/* 0x0ac5: test_init */
+ 0x00e7f100,
+ 0x1721f508,
+/* 0x0acf: idle_recv */
0xf800f802,
-/* 0x0aa1: test_recv */
- 0xd817f100,
- 0x0011cf05,
- 0xf10110b6,
- 0xd005d807,
- 0x04bd0001,
- 0xd900e7f1,
- 0x134fe3f1,
- 0x021821f5,
-/* 0x0ac2: test_init */
- 0xe7f100f8,
- 0x21f50800,
- 0x00f80218,
-/* 0x0acc: idle_recv */
-/* 0x0ace: idle */
- 0x31f400f8,
- 0xd417f100,
- 0x0011cf05,
- 0xf10110b6,
- 0xd005d407,
- 0x04bd0001,
-/* 0x0ae4: idle_loop */
- 0xf45817f0,
-/* 0x0aea: idle_proc */
-/* 0x0aea: idle_proc_exec */
- 0x10f90232,
- 0xf5021eb9,
- 0xfc02ef21,
- 0x0911f410,
- 0xf40231f4,
-/* 0x0afe: idle_proc_next */
- 0x10b6ef0e,
- 0x061fb858,
- 0xf4e61bf4,
- 0x28f4dd02,
- 0xc10ef400,
- 0x00000000,
+/* 0x0ad1: idle */
+ 0x0031f400,
+ 0x05d417f1,
+ 0xb60011cf,
+ 0x07f10110,
+ 0x01d005d4,
+/* 0x0ae7: idle_loop */
+ 0xf004bd00,
+ 0x32f45817,
+/* 0x0aed: idle_proc */
+/* 0x0aed: idle_proc_exec */
+ 0xb910f902,
+ 0x21f5021e,
+ 0x10fc02ee,
+ 0xf40911f4,
+ 0x0ef40231,
+/* 0x0b01: idle_proc_next */
+ 0x5810b6ef,
+ 0xf4061fb8,
+ 0x02f4e61b,
+ 0x0028f4dd,
+ 0x00c10ef4,
0x00000000,
0x00000000,
0x00000000,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gk208.fuc5.h b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gk208.fuc5.h
index 8a2b628642ac..3c731ff12871 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gk208.fuc5.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gk208.fuc5.h
@@ -24,8 +24,8 @@ uint32_t gk208_pmu_data[] = {
0x00000000,
/* 0x0058: proc_list_head */
0x54534f48,
- 0x00000447,
- 0x000003f8,
+ 0x0000042c,
+ 0x000003df,
0x00000000,
0x00000000,
0x00000000,
@@ -46,8 +46,8 @@ uint32_t gk208_pmu_data[] = {
0x00000000,
0x00000000,
0x584d454d,
- 0x00000621,
- 0x00000613,
+ 0x000005f3,
+ 0x000005e5,
0x00000000,
0x00000000,
0x00000000,
@@ -68,8 +68,8 @@ uint32_t gk208_pmu_data[] = {
0x00000000,
0x00000000,
0x46524550,
- 0x00000625,
- 0x00000623,
+ 0x000005f7,
+ 0x000005f5,
0x00000000,
0x00000000,
0x00000000,
@@ -90,8 +90,8 @@ uint32_t gk208_pmu_data[] = {
0x00000000,
0x00000000,
0x5f433249,
- 0x00000a29,
- 0x000008d0,
+ 0x000009f8,
+ 0x000008a2,
0x00000000,
0x00000000,
0x00000000,
@@ -112,8 +112,8 @@ uint32_t gk208_pmu_data[] = {
0x00000000,
0x00000000,
0x54534554,
- 0x00000a4a,
- 0x00000a2b,
+ 0x00000a16,
+ 0x000009fa,
0x00000000,
0x00000000,
0x00000000,
@@ -134,8 +134,8 @@ uint32_t gk208_pmu_data[] = {
0x00000000,
0x00000000,
0x454c4449,
- 0x00000a55,
- 0x00000a53,
+ 0x00000a21,
+ 0x00000a1f,
0x00000000,
0x00000000,
0x00000000,
@@ -229,26 +229,26 @@ uint32_t gk208_pmu_data[] = {
/* 0x0370: memx_func_head */
0x00000001,
0x00000000,
- 0x00000477,
+ 0x0000045c,
/* 0x037c: memx_func_next */
0x00000002,
0x00000000,
- 0x000004f4,
+ 0x000004cf,
0x00000003,
0x00000002,
- 0x00000574,
+ 0x00000546,
0x00040004,
0x00000000,
- 0x00000591,
+ 0x00000563,
0x00010005,
0x00000000,
- 0x000005ab,
+ 0x0000057d,
0x00010006,
0x00000000,
- 0x0000056f,
+ 0x00000541,
0x00000007,
0x00000000,
- 0x000005b7,
+ 0x00000589,
/* 0x03c4: memx_func_tail */
/* 0x03c4: memx_ts_start */
0x00000000,
@@ -916,784 +916,771 @@ uint32_t gk208_pmu_data[] = {
};
uint32_t gk208_pmu_code[] = {
- 0x03100ef5,
+ 0x02f90ef5,
/* 0x0004: rd32 */
0xf607a040,
0x04bd000e,
- 0xd3f0010d,
+ 0x0100018d,
+ 0xf607ac40,
+ 0x04bd000d,
+/* 0x0018: rd32_wait */
+ 0xcf07ac4d,
+ 0xd4f100dd,
+ 0x1bf47000,
+ 0x07a44df6,
+ 0xf800ddcf,
+/* 0x002d: wr32 */
+ 0x07a04000,
+ 0xbd000ef6,
+ 0x07a44004,
+ 0xbd000df6,
+ 0x00f28d04,
0x07ac4001,
0xbd000df6,
-/* 0x0019: rd32_wait */
+/* 0x0049: wr32_wait */
0x07ac4d04,
0xf100ddcf,
0xf47000d4,
- 0xa44df61b,
- 0x00ddcf07,
-/* 0x002e: wr32 */
- 0xa04000f8,
- 0x000ef607,
- 0xa44004bd,
- 0x000df607,
- 0x020d04bd,
- 0xf0f0d5f0,
- 0xac4001d3,
- 0x000df607,
-/* 0x004e: wr32_wait */
- 0xac4d04bd,
- 0x00ddcf07,
- 0x7000d4f1,
- 0xf8f61bf4,
-/* 0x005d: nsec */
- 0xf990f900,
- 0xcf2c0880,
-/* 0x0066: nsec_loop */
- 0x2c090088,
- 0xbb0099cf,
- 0x9ea60298,
- 0xfcf61ef4,
- 0xf890fc80,
-/* 0x0079: wait */
- 0xf990f900,
- 0xcf2c0880,
-/* 0x0082: wait_loop */
- 0xeeb20088,
- 0x0000047e,
- 0xadfddab2,
- 0xf4aca604,
- 0x2c09100b,
- 0xbb0099cf,
- 0x9ba60298,
-/* 0x009f: wait_done */
- 0xfce61ef4,
- 0xf890fc80,
-/* 0x00a5: intr_watchdog */
- 0x03e99800,
+ 0x00f8f61b,
+/* 0x0058: nsec */
+ 0x80f990f9,
+ 0x88cf2c08,
+/* 0x0061: nsec_loop */
+ 0xcf2c0900,
+ 0x98bb0099,
+ 0xf49ea602,
+ 0x80fcf61e,
+ 0x00f890fc,
+/* 0x0074: wait */
+ 0x80f990f9,
+ 0x88cf2c08,
+/* 0x007d: wait_loop */
+ 0x7eeeb200,
+ 0xb2000004,
+ 0x04adfdda,
+ 0x0bf4aca6,
+ 0xcf2c0910,
+ 0x98bb0099,
+ 0xf49ba602,
+/* 0x009a: wait_done */
+ 0x80fce61e,
+ 0x00f890fc,
+/* 0x00a0: intr_watchdog */
+ 0xb003e998,
+ 0x0bf40096,
+ 0x9a0a9828,
+ 0xf4029abb,
+ 0x010d0e1c,
+ 0x00023e7e,
+ 0x0ef494bd,
+/* 0x00bd: intr_watchdog_next_time */
+ 0x9b0a9814,
+ 0xf400a6b0,
+ 0x9aa6080b,
+/* 0x00cb: intr_watchdog_next_time_set */
+ 0xb5061cf4,
+/* 0x00ce: intr_watchdog_next_proc */
+ 0xe9b59b09,
+ 0x58e0b603,
+ 0x0268e6b1,
+ 0xf8c81bf4,
+/* 0x00dd: intr */
+ 0xbd00f900,
+ 0xf980f904,
+ 0xf9a0f990,
+ 0xf9c0f9b0,
+ 0xf9e0f9d0,
+ 0xfe000ff0,
+ 0x80f90188,
+ 0xcf045048,
+ 0x80b60088,
+ 0x04504001,
+ 0xbd0008f6,
+ 0xcf080804,
+ 0x89c40088,
+ 0x1f0bf402,
+ 0x0e9b00b5,
+ 0x00a07e58,
+ 0x9b099800,
0xf40096b0,
- 0x0a98280b,
- 0x029abb9a,
- 0x0d0e1cf4,
- 0x02557e01,
- 0xf494bd00,
-/* 0x00c2: intr_watchdog_next_time */
- 0x0a98140e,
- 0x00a6b09b,
- 0xa6080bf4,
- 0x061cf49a,
-/* 0x00d0: intr_watchdog_next_time_set */
-/* 0x00d3: intr_watchdog_next_proc */
- 0xb59b09b5,
- 0xe0b603e9,
- 0x68e6b158,
- 0xc81bf402,
-/* 0x00e2: intr */
- 0x00f900f8,
- 0x80f904bd,
- 0xa0f990f9,
- 0xc0f9b0f9,
- 0xe0f9d0f9,
- 0x000ff0f9,
- 0xf90188fe,
- 0x04504880,
- 0xb60088cf,
- 0x50400180,
- 0x0008f604,
- 0x080804bd,
- 0xc40088cf,
- 0x0bf40289,
- 0x9b00b51f,
- 0xa57e580e,
- 0x09980000,
- 0x0096b09b,
- 0x000d0bf4,
- 0x0009f634,
- 0x09b504bd,
-/* 0x0135: intr_skip_watchdog */
- 0x0089e49a,
- 0x360bf408,
- 0xcf068849,
- 0x9ac40099,
- 0x220bf402,
- 0xcf04c04c,
- 0xc0f900cc,
- 0xf14f484e,
- 0x0d5453e3,
- 0x02b67e00,
- 0x40c0fc00,
- 0x0cf604c0,
-/* 0x0167: intr_subintr_skip_fifo */
- 0x4004bd00,
- 0x09f60688,
-/* 0x016f: intr_skip_subintr */
- 0x4904bd00,
- 0x90bd00e0,
- 0x000489fd,
- 0x0008f604,
- 0x80fc04bd,
- 0xfc0088fe,
- 0xfce0fcf0,
- 0xfcc0fcd0,
- 0xfca0fcb0,
- 0xfc80fc90,
- 0x0032f400,
-/* 0x019a: ticks_from_ns */
- 0xc0f901f8,
- 0xd7f1b0f9,
- 0xd3f00144,
- 0x6b21f500,
- 0xe8ccec03,
- 0x00b4b003,
- 0xec120bf4,
- 0xf103e8ee,
- 0xf00144d7,
- 0x21f500d3,
-/* 0x01c2: ticks_from_ns_quit */
- 0xceb2036b,
+ 0x34000d0b,
+ 0xbd0009f6,
+ 0x9a09b504,
+/* 0x0130: intr_skip_watchdog */
+ 0x080089e4,
+ 0x49340bf4,
+ 0x99cf0688,
+ 0x029ac400,
+ 0x4c200bf4,
+ 0xcccf04c0,
+ 0xdec0f900,
+ 0x54534f48,
+ 0x9f7e000d,
+ 0xc0fc0002,
+ 0xf604c040,
+ 0x04bd000c,
+/* 0x0160: intr_subintr_skip_fifo */
+ 0xf6068840,
+ 0x04bd0009,
+/* 0x0168: intr_skip_subintr */
+ 0xbd00e049,
+ 0x0489fd90,
+ 0x08f60400,
+ 0xfc04bd00,
+ 0x0088fe80,
+ 0xe0fcf0fc,
+ 0xc0fcd0fc,
+ 0xa0fcb0fc,
+ 0x80fc90fc,
+ 0x32f400fc,
+/* 0x0193: ticks_from_ns */
+ 0xf901f800,
+ 0x4db0f9c0,
+ 0x527e0144,
+ 0xccec0003,
+ 0xb4b003e8,
+ 0x0e0bf400,
+ 0x03e8eeec,
+ 0x7e01444d,
+/* 0x01b3: ticks_from_ns_quit */
+ 0xb2000352,
+ 0xfcb0fcce,
+/* 0x01bb: ticks_from_us */
+ 0xf900f8c0,
+ 0x4db0f9c0,
+ 0x527e0144,
+ 0xceb20003,
+ 0xf400b4b0,
+ 0xe4bd050b,
+/* 0x01d0: ticks_from_us_quit */
0xc0fcb0fc,
-/* 0x01ca: ticks_from_us */
- 0xc0f900f8,
- 0xd7f1b0f9,
- 0xd3f00144,
- 0x6b21f500,
- 0xb0ceb203,
- 0x0bf400b4,
-/* 0x01e3: ticks_from_us_quit */
- 0xfce4bd05,
- 0xf8c0fcb0,
-/* 0x01e9: ticks_to_us */
- 0x44d7f100,
- 0x00d3f001,
- 0xf8ecedff,
-/* 0x01f5: timer */
- 0xf990f900,
- 0x1032f480,
- 0xb003f898,
- 0x1cf40086,
- 0x0084bd4a,
- 0x0008f638,
- 0x340804bd,
- 0x980088cf,
- 0x98bb9a09,
- 0x00e9bb02,
- 0x0803feb5,
- 0x0088cf08,
- 0xf40284f0,
- 0x34081c1b,
- 0xa60088cf,
- 0x080bf4e0,
- 0x1cf4e8a6,
-/* 0x0239: timer_reset */
- 0xf634000d,
- 0x04bd000e,
-/* 0x0243: timer_enable */
- 0x089a0eb5,
- 0xf6380001,
- 0x04bd0008,
-/* 0x024c: timer_done */
- 0xfc1031f4,
+/* 0x01d6: ticks_to_us */
+ 0x444d00f8,
+ 0xecedff01,
+/* 0x01de: timer */
+ 0x90f900f8,
+ 0x32f480f9,
+ 0x03f89810,
+ 0xf40086b0,
+ 0x84bd4a1c,
+ 0x08f63800,
+ 0x0804bd00,
+ 0x0088cf34,
+ 0xbb9a0998,
+ 0xe9bb0298,
+ 0x03feb500,
+ 0x88cf0808,
+ 0x0284f000,
+ 0x081c1bf4,
+ 0x0088cf34,
+ 0x0bf4e0a6,
+ 0xf4e8a608,
+/* 0x0222: timer_reset */
+ 0x34000d1c,
+ 0xbd000ef6,
+ 0x9a0eb504,
+/* 0x022c: timer_enable */
+ 0x38000108,
+ 0xbd0008f6,
+/* 0x0235: timer_done */
+ 0x1031f404,
+ 0x90fc80fc,
+/* 0x023e: send_proc */
+ 0x80f900f8,
+ 0xe89890f9,
+ 0x04e99805,
+ 0xa60486f0,
+ 0x2a0bf489,
+ 0x940398c4,
+ 0x80b60488,
+ 0x008ebb18,
+ 0xb500fa98,
+ 0x8db5008a,
+ 0x028cb501,
+ 0xb6038bb5,
+ 0x94f00190,
+ 0x04e9b507,
+/* 0x0277: send_done */
+ 0xfc0231f4,
+ 0xf880fc90,
+/* 0x027d: find */
+ 0x0880f900,
+ 0x0131f458,
+/* 0x0284: find_loop */
+ 0xa6008a98,
+ 0x100bf4ae,
+ 0xb15880b6,
+ 0xf4026886,
+ 0x32f4f11b,
+/* 0x0299: find_done */
+ 0xfc8eb201,
+/* 0x029f: send */
+ 0x7e00f880,
+ 0xf400027d,
+ 0x00f89b01,
+/* 0x02a8: recv */
+ 0x80f990f9,
+ 0x9805e898,
+ 0x32f404e9,
+ 0xf489a601,
+ 0x89c43c0b,
+ 0x0180b603,
+ 0xb50784f0,
+ 0xea9805e8,
+ 0xfef0f902,
+ 0xf0f9018f,
+ 0x9994efb2,
+ 0x00e9bb04,
+ 0x9818e0b6,
+ 0xec9803eb,
+ 0x01ed9802,
+ 0xf900ee98,
+ 0xfef0fca5,
+ 0x31f400f8,
+/* 0x02f3: recv_done */
+ 0xfcf0fc01,
0xf890fc80,
-/* 0x0255: send_proc */
- 0xf980f900,
- 0x05e89890,
- 0xf004e998,
- 0x89a60486,
- 0xc42a0bf4,
- 0x88940398,
- 0x1880b604,
- 0x98008ebb,
- 0x8ab500fa,
- 0x018db500,
- 0xb5028cb5,
- 0x90b6038b,
- 0x0794f001,
- 0xf404e9b5,
-/* 0x028e: send_done */
- 0x90fc0231,
- 0x00f880fc,
-/* 0x0294: find */
- 0x580880f9,
-/* 0x029b: find_loop */
- 0x980131f4,
- 0xaea6008a,
- 0xb6100bf4,
- 0x86b15880,
- 0x1bf40268,
- 0x0132f4f1,
-/* 0x02b0: find_done */
- 0x80fc8eb2,
-/* 0x02b6: send */
- 0x947e00f8,
- 0x01f40002,
-/* 0x02bf: recv */
- 0xf900f89b,
- 0x9880f990,
- 0xe99805e8,
- 0x0132f404,
- 0x0bf489a6,
- 0x0389c43c,
- 0xf00180b6,
- 0xe8b50784,
- 0x02ea9805,
- 0x8ffef0f9,
- 0xb2f0f901,
- 0x049994ef,
- 0xb600e9bb,
- 0xeb9818e0,
- 0x02ec9803,
- 0x9801ed98,
- 0xa5f900ee,
- 0xf8fef0fc,
- 0x0131f400,
-/* 0x030a: recv_done */
- 0x80fcf0fc,
- 0x00f890fc,
-/* 0x0310: init */
- 0xcf010841,
- 0x11e70011,
- 0x14b60109,
- 0x0014fe08,
- 0xf000e041,
- 0x1c000013,
- 0xbd0001f6,
- 0x00ff0104,
- 0x0001f614,
- 0x020104bd,
- 0x080015f1,
- 0x01f61000,
- 0x4104bd00,
- 0x13f000e2,
- 0x0010fe00,
- 0x011031f4,
- 0xf6380001,
+/* 0x02f9: init */
+ 0x01084100,
+ 0xe70011cf,
+ 0xb6010911,
+ 0x14fe0814,
+ 0x00e04100,
+ 0x01f61c00,
+ 0x0104bd00,
+ 0xf61400ff,
0x04bd0001,
-/* 0x035a: init_proc */
- 0xf198580f,
- 0x0016b001,
- 0xf9fa0bf4,
- 0x58f0b615,
-/* 0x036b: mulu32_32_64 */
- 0xf9f20ef4,
- 0xf920f910,
- 0x9540f930,
- 0xd29510e1,
- 0xbdc4bd10,
- 0xc0edffb4,
- 0xb2301dff,
+ 0x15f10201,
+ 0x10000800,
+ 0xbd0001f6,
+ 0x00dd4104,
+ 0xffff14f1,
+ 0xf40010fe,
+ 0x01011031,
+ 0x01f63800,
+ 0x0f04bd00,
+/* 0x0341: init_proc */
+ 0x01f19858,
+ 0xf40016b0,
+ 0x15f9fa0b,
+ 0xf458f0b6,
+/* 0x0352: mulu32_32_64 */
+ 0x10f9f20e,
+ 0x30f920f9,
+ 0xe19540f9,
+ 0x10d29510,
+ 0xb4bdc4bd,
+ 0xffc0edff,
+ 0x34b2301d,
+ 0xffff34f1,
+ 0xb61034b6,
+ 0xc3bb1045,
+ 0x01b4bb00,
+ 0xb230e2ff,
0xff34f134,
0x1034b6ff,
0xbb1045b6,
0xb4bb00c3,
- 0x30e2ff01,
- 0x34f134b2,
- 0x34b6ffff,
- 0x1045b610,
- 0xbb00c3bb,
- 0x12ff01b4,
- 0x00b3bb30,
- 0x30fc40fc,
- 0x10fc20fc,
-/* 0x03ba: host_send */
- 0xb04100f8,
- 0x0011cf04,
- 0xcf04a042,
- 0x12a60022,
- 0xc42e0bf4,
- 0xee94071e,
- 0x70e0b704,
- 0x03eb9802,
- 0x9802ec98,
- 0xee9801ed,
- 0x02b67e00,
- 0x0110b600,
- 0x400f1ec4,
- 0x0ef604b0,
- 0xf404bd00,
-/* 0x03f6: host_send_done */
- 0x00f8c70e,
-/* 0x03f8: host_recv */
- 0xf14e4941,
- 0xa6525413,
- 0xb90bf4e1,
-/* 0x0404: host_recv_wait */
- 0xcf04cc41,
- 0xc8420011,
- 0x0022cf04,
- 0xa60816f0,
- 0xef0bf412,
- 0xb60723c4,
- 0x30b70434,
- 0x3bb502f0,
- 0x023cb503,
- 0xb5013db5,
- 0x20b6003e,
- 0x0f24f001,
- 0xf604c840,
- 0x04bd0002,
- 0x00004002,
+ 0x3012ff01,
+ 0xfc00b3bb,
+ 0xfc30fc40,
+ 0xf810fc20,
+/* 0x03a1: host_send */
+ 0x04b04100,
+ 0x420011cf,
+ 0x22cf04a0,
+ 0xf412a600,
+ 0x1ec42e0b,
+ 0x04ee9407,
+ 0x0270e0b7,
+ 0x9803eb98,
+ 0xed9802ec,
+ 0x00ee9801,
+ 0x00029f7e,
+ 0xc40110b6,
+ 0xb0400f1e,
+ 0x000ef604,
+ 0x0ef404bd,
+/* 0x03dd: host_send_done */
+/* 0x03df: host_recv */
+ 0xd100f8c7,
+ 0x52544e49,
+ 0x0bf4e1a6,
+/* 0x03e9: host_recv_wait */
+ 0x04cc41bb,
+ 0x420011cf,
+ 0x22cf04c8,
+ 0x0816f000,
+ 0x0bf412a6,
+ 0x0723c4ef,
+ 0xb70434b6,
+ 0xb502f030,
+ 0x3cb5033b,
+ 0x013db502,
+ 0xb6003eb5,
+ 0x24f00120,
+ 0x04c8400f,
0xbd0002f6,
-/* 0x0447: host_init */
- 0x4100f804,
- 0x14b60080,
- 0x7015f110,
- 0x04d04002,
- 0xbd0001f6,
- 0x00804104,
- 0xf11014b6,
- 0x4002f015,
- 0x01f604dc,
- 0x0104bd00,
- 0x04c44001,
- 0xbd0001f6,
-/* 0x0477: memx_func_enter */
- 0xf100f804,
- 0xf1162067,
- 0xf1f55d77,
- 0xb2ffff73,
- 0x00047e6e,
- 0xfdd8b200,
- 0x60f90487,
- 0xd0fc80f9,
- 0x2e7ee0fc,
- 0x77f10000,
- 0x73f1fffe,
- 0x6eb2ffff,
- 0x0000047e,
- 0x87fdd8b2,
- 0xf960f904,
- 0xfcd0fc80,
- 0x002e7ee0,
- 0xf067f100,
- 0x7e6eb226,
+ 0x00400204,
+ 0x0002f600,
+ 0x00f804bd,
+/* 0x042c: host_init */
+ 0xb6008041,
+ 0x15f11014,
+ 0xd0400270,
+ 0x0001f604,
+ 0x804104bd,
+ 0x1014b600,
+ 0x02f015f1,
+ 0xf604dc40,
+ 0x04bd0001,
+ 0xc4400101,
+ 0x0001f604,
+ 0x00f804bd,
+/* 0x045c: memx_func_enter */
+ 0x162067f1,
+ 0xf55d77f1,
+ 0x047e6eb2,
+ 0xd8b20000,
+ 0xf90487fd,
+ 0xfc80f960,
+ 0x7ee0fcd0,
+ 0x0700002d,
+ 0x7e6eb2fe,
0xb2000004,
0x0487fdd8,
0x80f960f9,
0xe0fcd0fc,
- 0x00002e7e,
- 0xe0400406,
- 0x0006f607,
-/* 0x04de: memx_func_enter_wait */
- 0xc04604bd,
- 0x0066cf07,
- 0xf40464f0,
- 0x2c06f70b,
- 0xb50066cf,
- 0x00f8f106,
-/* 0x04f4: memx_func_leave */
- 0x66cf2c06,
- 0xf206b500,
- 0xe4400406,
- 0x0006f607,
-/* 0x0506: memx_func_leave_wait */
- 0xc04604bd,
- 0x0066cf07,
- 0xf40464f0,
- 0x67f1f71b,
- 0x77f126f0,
- 0x73f00001,
- 0x7e6eb200,
- 0xb2000004,
- 0x0587fdd8,
- 0x80f960f9,
- 0xe0fcd0fc,
- 0x00002e7e,
- 0x162067f1,
+ 0x00002d7e,
+ 0x26f067f1,
0x047e6eb2,
0xd8b20000,
- 0xf90587fd,
+ 0xf90487fd,
0xfc80f960,
0x7ee0fcd0,
- 0xf100002e,
- 0xf00aa277,
- 0x6eb20073,
+ 0x0600002d,
+ 0x07e04004,
+ 0xbd0006f6,
+/* 0x04b9: memx_func_enter_wait */
+ 0x07c04604,
+ 0xf00066cf,
+ 0x0bf40464,
+ 0xcf2c06f7,
+ 0x06b50066,
+/* 0x04cf: memx_func_leave */
+ 0x0600f8f1,
+ 0x0066cf2c,
+ 0x06f206b5,
+ 0x07e44004,
+ 0xbd0006f6,
+/* 0x04e1: memx_func_leave_wait */
+ 0x07c04604,
+ 0xf00066cf,
+ 0x1bf40464,
+ 0xf067f1f7,
+ 0xb2010726,
+ 0x00047e6e,
+ 0xfdd8b200,
+ 0x60f90587,
+ 0xd0fc80f9,
+ 0x2d7ee0fc,
+ 0x67f10000,
+ 0x6eb21620,
0x0000047e,
0x87fdd8b2,
0xf960f905,
0xfcd0fc80,
- 0x002e7ee0,
-/* 0x056f: memx_func_wait_vblank */
- 0xb600f800,
- 0x00f80410,
-/* 0x0574: memx_func_wr32 */
- 0x98001698,
- 0x10b60115,
- 0xf960f908,
- 0xfcd0fc50,
- 0x002e7ee0,
- 0x0242b600,
- 0xf8e81bf4,
-/* 0x0591: memx_func_wait */
- 0xcf2c0800,
- 0x1e980088,
- 0x011d9800,
- 0x98021c98,
- 0x10b6031b,
- 0x00797e10,
-/* 0x05ab: memx_func_delay */
- 0x9800f800,
- 0x10b6001e,
- 0x005d7e04,
-/* 0x05b7: memx_func_train */
- 0xf800f800,
-/* 0x05b9: memx_exec */
- 0xf9e0f900,
- 0xb2c1b2d0,
-/* 0x05c1: memx_exec_next */
- 0x001398b2,
- 0xe70410b6,
- 0xe701f034,
- 0xb601e033,
- 0x30f00132,
- 0xde35980c,
- 0x12a655f9,
- 0x98e51ef4,
- 0x0c98f10b,
- 0x02cbbbf2,
- 0xcf07c44b,
- 0xd0fc00bb,
- 0xb67ee0fc,
- 0x00f80002,
-/* 0x05f8: memx_info */
- 0xf401c670,
-/* 0x05fe: memx_info_data */
- 0xcc4c0c0b,
- 0x08004b03,
-/* 0x0607: memx_info_train */
- 0x4c090ef4,
- 0x004b0bcc,
-/* 0x060d: memx_info_send */
- 0x02b67e01,
-/* 0x0613: memx_recv */
- 0xb000f800,
- 0x0bf401d6,
- 0x00d6b0a3,
- 0xf8dc0bf4,
-/* 0x0621: memx_init */
-/* 0x0623: perf_recv */
+ 0x002d7ee0,
+ 0x0aa24700,
+ 0x047e6eb2,
+ 0xd8b20000,
+ 0xf90587fd,
+ 0xfc80f960,
+ 0x7ee0fcd0,
+ 0xf800002d,
+/* 0x0541: memx_func_wait_vblank */
+ 0x0410b600,
+/* 0x0546: memx_func_wr32 */
+ 0x169800f8,
+ 0x01159800,
+ 0xf90810b6,
+ 0xfc50f960,
+ 0x7ee0fcd0,
+ 0xb600002d,
+ 0x1bf40242,
+/* 0x0563: memx_func_wait */
+ 0x0800f8e8,
+ 0x0088cf2c,
+ 0x98001e98,
+ 0x1c98011d,
+ 0x031b9802,
+ 0x7e1010b6,
+ 0xf8000074,
+/* 0x057d: memx_func_delay */
+ 0x001e9800,
+ 0x7e0410b6,
+ 0xf8000058,
+/* 0x0589: memx_func_train */
+/* 0x058b: memx_exec */
+ 0xf900f800,
+ 0xb2d0f9e0,
+/* 0x0593: memx_exec_next */
+ 0x98b2b2c1,
+ 0x10b60013,
+ 0xf034e704,
+ 0xe033e701,
+ 0x0132b601,
+ 0x980c30f0,
+ 0x55f9de35,
+ 0x1ef412a6,
+ 0xf10b98e5,
+ 0xbbf20c98,
+ 0xc44b02cb,
+ 0x00bbcf07,
+ 0xe0fcd0fc,
+ 0x00029f7e,
+/* 0x05ca: memx_info */
+ 0xc67000f8,
+ 0x0c0bf401,
+/* 0x05d0: memx_info_data */
+ 0x4b03cc4c,
+ 0x0ef40800,
+/* 0x05d9: memx_info_train */
+ 0x0bcc4c09,
+/* 0x05df: memx_info_send */
+ 0x7e01004b,
+ 0xf800029f,
+/* 0x05e5: memx_recv */
+ 0x01d6b000,
+ 0xb0a30bf4,
+ 0x0bf400d6,
+/* 0x05f3: memx_init */
+ 0xf800f8dc,
+/* 0x05f5: perf_recv */
+/* 0x05f7: perf_init */
0xf800f800,
-/* 0x0625: perf_init */
-/* 0x0627: i2c_drive_scl */
- 0xb000f800,
- 0x0bf40036,
- 0x07e0400d,
- 0xbd0001f6,
-/* 0x0637: i2c_drive_scl_lo */
- 0x4000f804,
- 0x01f607e4,
- 0xf804bd00,
-/* 0x0641: i2c_drive_sda */
+/* 0x05f9: i2c_drive_scl */
0x0036b000,
0x400d0bf4,
- 0x02f607e0,
+ 0x01f607e0,
0xf804bd00,
-/* 0x0651: i2c_drive_sda_lo */
+/* 0x0609: i2c_drive_scl_lo */
0x07e44000,
+ 0xbd0001f6,
+/* 0x0613: i2c_drive_sda */
+ 0xb000f804,
+ 0x0bf40036,
+ 0x07e0400d,
0xbd0002f6,
-/* 0x065b: i2c_sense_scl */
- 0xf400f804,
- 0xc4430132,
- 0x0033cf07,
- 0xf40431fd,
- 0x31f4060b,
-/* 0x066d: i2c_sense_scl_done */
-/* 0x066f: i2c_sense_sda */
- 0xf400f801,
- 0xc4430132,
- 0x0033cf07,
- 0xf40432fd,
- 0x31f4060b,
-/* 0x0681: i2c_sense_sda_done */
-/* 0x0683: i2c_raise_scl */
- 0xf900f801,
- 0x08984440,
- 0x277e0103,
-/* 0x068e: i2c_raise_scl_wait */
- 0xe84e0006,
- 0x005d7e03,
- 0x065b7e00,
- 0x0901f400,
- 0xf40142b6,
-/* 0x06a2: i2c_raise_scl_done */
- 0x40fcef1b,
-/* 0x06a6: i2c_start */
- 0x5b7e00f8,
- 0x11f40006,
- 0x066f7e0d,
- 0x0611f400,
-/* 0x06b7: i2c_start_rep */
- 0x032e0ef4,
- 0x06277e00,
- 0x7e010300,
- 0xbb000641,
- 0x65b60076,
- 0x9450f904,
- 0x56bb0465,
- 0xfd50bd02,
- 0x50fc0475,
- 0x0006837e,
- 0xf40464b6,
-/* 0x06e2: i2c_start_send */
- 0x00031d11,
- 0x0006417e,
- 0x7e13884e,
- 0x0300005d,
- 0x06277e00,
+/* 0x0623: i2c_drive_sda_lo */
+ 0x4000f804,
+ 0x02f607e4,
+ 0xf804bd00,
+/* 0x062d: i2c_sense_scl */
+ 0x0132f400,
+ 0xcf07c443,
+ 0x31fd0033,
+ 0x060bf404,
+/* 0x063f: i2c_sense_scl_done */
+ 0xf80131f4,
+/* 0x0641: i2c_sense_sda */
+ 0x0132f400,
+ 0xcf07c443,
+ 0x32fd0033,
+ 0x060bf404,
+/* 0x0653: i2c_sense_sda_done */
+ 0xf80131f4,
+/* 0x0655: i2c_raise_scl */
+ 0x4440f900,
+ 0x01030898,
+ 0x0005f97e,
+/* 0x0660: i2c_raise_scl_wait */
+ 0x7e03e84e,
+ 0x7e000058,
+ 0xf400062d,
+ 0x42b60901,
+ 0xef1bf401,
+/* 0x0674: i2c_raise_scl_done */
+ 0x00f840fc,
+/* 0x0678: i2c_start */
+ 0x00062d7e,
+ 0x7e0d11f4,
+ 0xf4000641,
+ 0x0ef40611,
+/* 0x0689: i2c_start_rep */
+ 0x7e00032e,
+ 0x030005f9,
+ 0x06137e01,
+ 0x0076bb00,
+ 0xf90465b6,
+ 0x04659450,
+ 0xbd0256bb,
+ 0x0475fd50,
+ 0x557e50fc,
+ 0x64b60006,
+ 0x1d11f404,
+/* 0x06b4: i2c_start_send */
+ 0x137e0003,
+ 0x884e0006,
+ 0x00587e13,
+ 0x7e000300,
+ 0x4e0005f9,
+ 0x587e1388,
+/* 0x06ce: i2c_start_out */
+ 0x00f80000,
+/* 0x06d0: i2c_stop */
+ 0xf97e0003,
+ 0x00030005,
+ 0x0006137e,
+ 0x7e03e84e,
+ 0x03000058,
+ 0x05f97e01,
0x13884e00,
- 0x00005d7e,
-/* 0x06fc: i2c_start_out */
-/* 0x06fe: i2c_stop */
- 0x000300f8,
- 0x0006277e,
- 0x417e0003,
- 0xe84e0006,
- 0x005d7e03,
- 0x7e010300,
- 0x4e000627,
- 0x5d7e1388,
- 0x01030000,
- 0x0006417e,
- 0x7e13884e,
- 0xf800005d,
-/* 0x072d: i2c_bitw */
- 0x06417e00,
- 0x03e84e00,
- 0x00005d7e,
- 0xb60076bb,
- 0x50f90465,
- 0xbb046594,
- 0x50bd0256,
- 0xfc0475fd,
- 0x06837e50,
- 0x0464b600,
- 0x4e1711f4,
- 0x5d7e1388,
- 0x00030000,
- 0x0006277e,
- 0x7e13884e,
-/* 0x076b: i2c_bitw_out */
- 0xf800005d,
-/* 0x076d: i2c_bitr */
- 0x7e010300,
- 0x4e000641,
- 0x5d7e03e8,
+ 0x0000587e,
+ 0x137e0103,
+ 0x884e0006,
+ 0x00587e13,
+/* 0x06ff: i2c_bitw */
+ 0x7e00f800,
+ 0x4e000613,
+ 0x587e03e8,
0x76bb0000,
0x0465b600,
0x659450f9,
0x0256bb04,
0x75fd50bd,
0x7e50fc04,
- 0xb6000683,
+ 0xb6000655,
0x11f40464,
- 0x066f7e1a,
- 0x7e000300,
- 0x4e000627,
- 0x5d7e1388,
- 0x3cf00000,
- 0x0131f401,
-/* 0x07b0: i2c_bitr_done */
-/* 0x07b2: i2c_get_byte */
- 0x000500f8,
-/* 0x07b6: i2c_get_byte_next */
- 0x54b60804,
- 0x0076bb01,
- 0xf90465b6,
- 0x04659450,
- 0xbd0256bb,
- 0x0475fd50,
- 0x6d7e50fc,
- 0x64b60007,
- 0x2a11f404,
- 0xb60553fd,
- 0x1bf40142,
- 0xbb0103d8,
+ 0x13884e17,
+ 0x0000587e,
+ 0xf97e0003,
+ 0x884e0005,
+ 0x00587e13,
+/* 0x073d: i2c_bitw_out */
+/* 0x073f: i2c_bitr */
+ 0x0300f800,
+ 0x06137e01,
+ 0x03e84e00,
+ 0x0000587e,
+ 0xb60076bb,
+ 0x50f90465,
+ 0xbb046594,
+ 0x50bd0256,
+ 0xfc0475fd,
+ 0x06557e50,
+ 0x0464b600,
+ 0x7e1a11f4,
+ 0x03000641,
+ 0x05f97e00,
+ 0x13884e00,
+ 0x0000587e,
+ 0xf4013cf0,
+/* 0x0782: i2c_bitr_done */
+ 0x00f80131,
+/* 0x0784: i2c_get_byte */
+ 0x08040005,
+/* 0x0788: i2c_get_byte_next */
+ 0xbb0154b6,
0x65b60076,
0x9450f904,
0x56bb0465,
0xfd50bd02,
0x50fc0475,
- 0x00072d7e,
-/* 0x07ff: i2c_get_byte_done */
- 0xf80464b6,
-/* 0x0801: i2c_put_byte */
-/* 0x0803: i2c_put_byte_next */
- 0xb6080400,
- 0x54ff0142,
- 0x0076bb38,
+ 0x00073f7e,
+ 0xf40464b6,
+ 0x53fd2a11,
+ 0x0142b605,
+ 0x03d81bf4,
+ 0x0076bb01,
0xf90465b6,
0x04659450,
0xbd0256bb,
0x0475fd50,
- 0x2d7e50fc,
- 0x64b60007,
- 0x3411f404,
- 0xf40046b0,
- 0x76bbd81b,
- 0x0465b600,
- 0x659450f9,
- 0x0256bb04,
- 0x75fd50bd,
- 0x7e50fc04,
- 0xb600076d,
- 0x11f40464,
- 0x0076bb0f,
- 0xf40136b0,
- 0x32f4061b,
-/* 0x0859: i2c_put_byte_done */
-/* 0x085b: i2c_addr */
- 0xbb00f801,
+ 0xff7e50fc,
+ 0x64b60006,
+/* 0x07d1: i2c_get_byte_done */
+/* 0x07d3: i2c_put_byte */
+ 0x0400f804,
+/* 0x07d5: i2c_put_byte_next */
+ 0x0142b608,
+ 0xbb3854ff,
0x65b60076,
0x9450f904,
0x56bb0465,
0xfd50bd02,
0x50fc0475,
- 0x0006a67e,
+ 0x0006ff7e,
0xf40464b6,
- 0xc3e72911,
- 0x34b6012e,
- 0x0553fd01,
- 0xb60076bb,
- 0x50f90465,
- 0xbb046594,
- 0x50bd0256,
- 0xfc0475fd,
- 0x08017e50,
- 0x0464b600,
-/* 0x08a0: i2c_addr_done */
-/* 0x08a2: i2c_acquire_addr */
- 0xcec700f8,
- 0x05e4b6f8,
- 0xd014e0b7,
-/* 0x08ae: i2c_acquire */
- 0xa27e00f8,
- 0x047e0008,
- 0xd9f00000,
- 0x002e7e03,
-/* 0x08bf: i2c_release */
- 0x7e00f800,
- 0x7e0008a2,
- 0xf0000004,
- 0x2e7e03da,
- 0x00f80000,
-/* 0x08d0: i2c_recv */
- 0xc70132f4,
- 0x14b6f8c1,
- 0x2816b002,
- 0x01371ff5,
- 0x0cf413b8,
- 0x00329800,
- 0x0ccc13b8,
- 0x00319800,
- 0xf90231f4,
- 0xf9e0f9d0,
- 0x0067f1d0,
- 0x0063f100,
- 0x01679210,
+ 0x46b03411,
+ 0xd81bf400,
0xb60076bb,
0x50f90465,
0xbb046594,
0x50bd0256,
0xfc0475fd,
- 0x08ae7e50,
+ 0x073f7e50,
0x0464b600,
- 0xd6b0d0fc,
- 0xb01bf500,
- 0xbb000500,
+ 0xbb0f11f4,
+ 0x36b00076,
+ 0x061bf401,
+/* 0x082b: i2c_put_byte_done */
+ 0xf80132f4,
+/* 0x082d: i2c_addr */
+ 0x0076bb00,
+ 0xf90465b6,
+ 0x04659450,
+ 0xbd0256bb,
+ 0x0475fd50,
+ 0x787e50fc,
+ 0x64b60006,
+ 0x2911f404,
+ 0x012ec3e7,
+ 0xfd0134b6,
+ 0x76bb0553,
+ 0x0465b600,
+ 0x659450f9,
+ 0x0256bb04,
+ 0x75fd50bd,
+ 0x7e50fc04,
+ 0xb60007d3,
+/* 0x0872: i2c_addr_done */
+ 0x00f80464,
+/* 0x0874: i2c_acquire_addr */
+ 0xb6f8cec7,
+ 0xe0b705e4,
+ 0x00f8d014,
+/* 0x0880: i2c_acquire */
+ 0x0008747e,
+ 0x0000047e,
+ 0x7e03d9f0,
+ 0xf800002d,
+/* 0x0891: i2c_release */
+ 0x08747e00,
+ 0x00047e00,
+ 0x03daf000,
+ 0x00002d7e,
+/* 0x08a2: i2c_recv */
+ 0x32f400f8,
+ 0xf8c1c701,
+ 0xb00214b6,
+ 0x1ff52816,
+ 0x13b80134,
+ 0x98000cf4,
+ 0x13b80032,
+ 0x98000ccc,
+ 0x31f40031,
+ 0xf9d0f902,
+ 0xd6d0f9e0,
+ 0x10000000,
+ 0xbb016792,
0x65b60076,
0x9450f904,
0x56bb0465,
0xfd50bd02,
0x50fc0475,
- 0x00085b7e,
- 0xf50464b6,
- 0xc700cc11,
- 0x76bbe0c5,
+ 0x0008807e,
+ 0xfc0464b6,
+ 0x00d6b0d0,
+ 0x00b01bf5,
+ 0x76bb0005,
0x0465b600,
0x659450f9,
0x0256bb04,
0x75fd50bd,
0x7e50fc04,
- 0xb6000801,
+ 0xb600082d,
0x11f50464,
- 0x010500a9,
- 0xb60076bb,
- 0x50f90465,
- 0xbb046594,
- 0x50bd0256,
- 0xfc0475fd,
- 0x085b7e50,
- 0x0464b600,
- 0x008711f5,
- 0xb60076bb,
- 0x50f90465,
- 0xbb046594,
- 0x50bd0256,
- 0xfc0475fd,
- 0x07b27e50,
- 0x0464b600,
- 0xcb6711f4,
- 0x76bbe05b,
- 0x0465b600,
- 0x659450f9,
- 0x0256bb04,
- 0x75fd50bd,
- 0x7e50fc04,
- 0xb60006fe,
- 0x5bb20464,
- 0x0ef474bd,
-/* 0x09d5: i2c_recv_not_rd08 */
- 0x01d6b041,
- 0x053b1bf4,
- 0x085b7e00,
- 0x3211f400,
- 0x7ee0c5c7,
- 0xf4000801,
- 0x00052811,
- 0x00085b7e,
- 0xc71f11f4,
- 0x017ee0b5,
- 0x11f40008,
- 0x06fe7e15,
- 0xc774bd00,
- 0x1bf408c5,
- 0x0232f409,
-/* 0x0a13: i2c_recv_not_wr08 */
-/* 0x0a13: i2c_recv_done */
- 0xc7030ef4,
- 0xbf7ef8ce,
- 0xe0fc0008,
- 0x12f4d0fc,
- 0x7e7cb209,
-/* 0x0a27: i2c_recv_exit */
- 0xf80002b6,
-/* 0x0a29: i2c_init */
-/* 0x0a2b: test_recv */
- 0x4100f800,
- 0x11cf0458,
- 0x0110b600,
- 0xf6045840,
- 0x04bd0001,
- 0xd900e7f1,
- 0x134fe3f1,
- 0x0001f57e,
-/* 0x0a4a: test_init */
+ 0xc5c700cc,
+ 0x0076bbe0,
+ 0xf90465b6,
+ 0x04659450,
+ 0xbd0256bb,
+ 0x0475fd50,
+ 0xd37e50fc,
+ 0x64b60007,
+ 0xa911f504,
+ 0xbb010500,
+ 0x65b60076,
+ 0x9450f904,
+ 0x56bb0465,
+ 0xfd50bd02,
+ 0x50fc0475,
+ 0x00082d7e,
+ 0xf50464b6,
+ 0xbb008711,
+ 0x65b60076,
+ 0x9450f904,
+ 0x56bb0465,
+ 0xfd50bd02,
+ 0x50fc0475,
+ 0x0007847e,
+ 0xf40464b6,
+ 0x5bcb6711,
+ 0x0076bbe0,
+ 0xf90465b6,
+ 0x04659450,
+ 0xbd0256bb,
+ 0x0475fd50,
+ 0xd07e50fc,
+ 0x64b60006,
+ 0xbd5bb204,
+ 0x410ef474,
+/* 0x09a4: i2c_recv_not_rd08 */
+ 0xf401d6b0,
+ 0x00053b1b,
+ 0x00082d7e,
+ 0xc73211f4,
+ 0xd37ee0c5,
+ 0x11f40007,
+ 0x7e000528,
+ 0xf400082d,
+ 0xb5c71f11,
+ 0x07d37ee0,
+ 0x1511f400,
+ 0x0006d07e,
+ 0xc5c774bd,
+ 0x091bf408,
+ 0xf40232f4,
+/* 0x09e2: i2c_recv_not_wr08 */
+/* 0x09e2: i2c_recv_done */
+ 0xcec7030e,
+ 0x08917ef8,
+ 0xfce0fc00,
+ 0x0912f4d0,
+ 0x9f7e7cb2,
+/* 0x09f6: i2c_recv_exit */
+ 0x00f80002,
+/* 0x09f8: i2c_init */
+/* 0x09fa: test_recv */
+ 0x584100f8,
+ 0x0011cf04,
+ 0x400110b6,
+ 0x01f60458,
+ 0xde04bd00,
+ 0x134fd900,
+ 0x0001de7e,
+/* 0x0a16: test_init */
0x004e00f8,
- 0x01f57e08,
-/* 0x0a53: idle_recv */
+ 0x01de7e08,
+/* 0x0a1f: idle_recv */
0xf800f800,
-/* 0x0a55: idle */
+/* 0x0a21: idle */
0x0031f400,
0xcf045441,
0x10b60011,
0x04544001,
0xbd0001f6,
-/* 0x0a69: idle_loop */
+/* 0x0a35: idle_loop */
0xf4580104,
-/* 0x0a6e: idle_proc */
-/* 0x0a6e: idle_proc_exec */
+/* 0x0a3a: idle_proc */
+/* 0x0a3a: idle_proc_exec */
0x10f90232,
- 0xbf7e1eb2,
+ 0xa87e1eb2,
0x10fc0002,
0xf40911f4,
0x0ef40231,
-/* 0x0a81: idle_proc_next */
+/* 0x0a4d: idle_proc_next */
0x5810b6f0,
0x1bf41fa6,
0xe002f4e8,
@@ -1726,4 +1713,17 @@ uint32_t gk208_pmu_code[] = {
0x00000000,
0x00000000,
0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gt215.fuc3.h b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gt215.fuc3.h
index 516569270bac..e83341815ec6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gt215.fuc3.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gt215.fuc3.h
@@ -24,8 +24,8 @@ uint32_t gt215_pmu_data[] = {
0x00000000,
/* 0x0058: proc_list_head */
0x54534f48,
- 0x00000507,
- 0x000004a4,
+ 0x0000050a,
+ 0x000004a7,
0x00000000,
0x00000000,
0x00000000,
@@ -46,8 +46,8 @@ uint32_t gt215_pmu_data[] = {
0x00000000,
0x00000000,
0x584d454d,
- 0x00000837,
- 0x00000829,
+ 0x0000083a,
+ 0x0000082c,
0x00000000,
0x00000000,
0x00000000,
@@ -68,8 +68,8 @@ uint32_t gt215_pmu_data[] = {
0x00000000,
0x00000000,
0x46524550,
- 0x0000083b,
- 0x00000839,
+ 0x0000083e,
+ 0x0000083c,
0x00000000,
0x00000000,
0x00000000,
@@ -90,8 +90,8 @@ uint32_t gt215_pmu_data[] = {
0x00000000,
0x00000000,
0x5f433249,
- 0x00000c6b,
- 0x00000b0e,
+ 0x00000c6e,
+ 0x00000b11,
0x00000000,
0x00000000,
0x00000000,
@@ -112,8 +112,8 @@ uint32_t gt215_pmu_data[] = {
0x00000000,
0x00000000,
0x54534554,
- 0x00000c94,
- 0x00000c6d,
+ 0x00000c97,
+ 0x00000c70,
0x00000000,
0x00000000,
0x00000000,
@@ -134,8 +134,8 @@ uint32_t gt215_pmu_data[] = {
0x00000000,
0x00000000,
0x454c4449,
- 0x00000ca0,
- 0x00000c9e,
+ 0x00000ca3,
+ 0x00000ca1,
0x00000000,
0x00000000,
0x00000000,
@@ -229,26 +229,26 @@ uint32_t gt215_pmu_data[] = {
/* 0x0370: memx_func_head */
0x00000001,
0x00000000,
- 0x00000546,
+ 0x00000549,
/* 0x037c: memx_func_next */
0x00000002,
0x00000000,
- 0x0000059d,
+ 0x000005a0,
0x00000003,
0x00000002,
- 0x0000062f,
+ 0x00000632,
0x00040004,
0x00000000,
- 0x0000064b,
+ 0x0000064e,
0x00010005,
0x00000000,
- 0x00000668,
+ 0x0000066b,
0x00010006,
0x00000000,
- 0x000005ed,
+ 0x000005f0,
0x00000007,
0x00000000,
- 0x00000673,
+ 0x00000676,
/* 0x03c4: memx_func_tail */
/* 0x03c4: memx_ts_start */
0x00000000,
@@ -917,947 +917,947 @@ uint32_t gt215_pmu_data[] = {
};
uint32_t gt215_pmu_code[] = {
- 0x03930ef5,
+ 0x03920ef5,
/* 0x0004: rd32 */
0x07a007f1,
0xd00604b6,
0x04bd000e,
- 0xf001d7f0,
- 0x07f101d3,
- 0x04b607ac,
- 0x000dd006,
-/* 0x0022: rd32_wait */
- 0xd7f104bd,
- 0xd4b607ac,
- 0x00ddcf06,
- 0x7000d4f1,
- 0xf1f21bf4,
- 0xb607a4d7,
- 0xddcf06d4,
-/* 0x003f: wr32 */
- 0xf100f800,
- 0xb607a007,
- 0x0ed00604,
- 0xf104bd00,
- 0xb607a407,
+ 0x0001d7f1,
+ 0xf101d3f0,
+ 0xb607ac07,
0x0dd00604,
- 0xf004bd00,
- 0xd5f002d7,
- 0x01d3f0f0,
- 0x07ac07f1,
+/* 0x0023: rd32_wait */
+ 0xf104bd00,
+ 0xb607acd7,
+ 0xddcf06d4,
+ 0x00d4f100,
+ 0xf21bf470,
+ 0x07a4d7f1,
+ 0xcf06d4b6,
+ 0x00f800dd,
+/* 0x0040: wr32 */
+ 0x07a007f1,
+ 0xd00604b6,
+ 0x04bd000e,
+ 0x07a407f1,
0xd00604b6,
0x04bd000d,
-/* 0x006c: wr32_wait */
- 0x07acd7f1,
- 0xcf06d4b6,
- 0xd4f100dd,
- 0x1bf47000,
-/* 0x007f: nsec */
- 0xf900f8f2,
- 0xf080f990,
- 0x84b62c87,
- 0x0088cf06,
-/* 0x008c: nsec_loop */
- 0xb62c97f0,
- 0x99cf0694,
- 0x0298bb00,
- 0xf4069eb8,
- 0x80fcf11e,
- 0x00f890fc,
-/* 0x00a4: wait */
- 0x80f990f9,
- 0xb62c87f0,
- 0x88cf0684,
-/* 0x00b1: wait_loop */
- 0x02eeb900,
- 0xb90421f4,
- 0xadfd02da,
- 0x06acb804,
- 0xf0150bf4,
+ 0x00f2d7f1,
+ 0xf101d3f0,
+ 0xb607ac07,
+ 0x0dd00604,
+/* 0x006b: wr32_wait */
+ 0xf104bd00,
+ 0xb607acd7,
+ 0xddcf06d4,
+ 0x00d4f100,
+ 0xf21bf470,
+/* 0x007e: nsec */
+ 0x90f900f8,
+ 0x87f080f9,
+ 0x0684b62c,
+/* 0x008b: nsec_loop */
+ 0xf00088cf,
0x94b62c97,
0x0099cf06,
0xb80298bb,
- 0x1ef4069b,
-/* 0x00d5: wait_done */
- 0xfc80fcdf,
-/* 0x00db: intr_watchdog */
- 0x9800f890,
- 0x96b003e9,
- 0x2a0bf400,
- 0xbb9a0a98,
- 0x1cf4029a,
- 0x01d7f00f,
- 0x02d221f5,
- 0x0ef494bd,
-/* 0x00f9: intr_watchdog_next_time */
- 0x9b0a9815,
- 0xf400a6b0,
- 0x9ab8090b,
- 0x061cf406,
-/* 0x0108: intr_watchdog_next_time_set */
-/* 0x010b: intr_watchdog_next_proc */
- 0x809b0980,
- 0xe0b603e9,
- 0x68e6b158,
- 0xc61bf402,
-/* 0x011a: intr */
- 0x00f900f8,
- 0x80f904bd,
- 0xa0f990f9,
- 0xc0f9b0f9,
- 0xe0f9d0f9,
- 0xf7f0f0f9,
- 0x0188fe00,
- 0x87f180f9,
- 0x84b605d0,
+ 0x1ef4069e,
+ 0xfc80fcf1,
+/* 0x00a3: wait */
+ 0xf900f890,
+ 0xf080f990,
+ 0x84b62c87,
0x0088cf06,
- 0xf10180b6,
- 0xb605d007,
+/* 0x00b0: wait_loop */
+ 0xf402eeb9,
+ 0xdab90421,
+ 0x04adfd02,
+ 0xf406acb8,
+ 0x97f0150b,
+ 0x0694b62c,
+ 0xbb0099cf,
+ 0x9bb80298,
+ 0xdf1ef406,
+/* 0x00d4: wait_done */
+ 0x90fc80fc,
+/* 0x00da: intr_watchdog */
+ 0xe99800f8,
+ 0x0096b003,
+ 0x982a0bf4,
+ 0x9abb9a0a,
+ 0x0f1cf402,
+ 0xf501d7f0,
+ 0xbd02d121,
+ 0x150ef494,
+/* 0x00f8: intr_watchdog_next_time */
+ 0xb09b0a98,
+ 0x0bf400a6,
+ 0x069ab809,
+/* 0x0107: intr_watchdog_next_time_set */
+ 0x80061cf4,
+/* 0x010a: intr_watchdog_next_proc */
+ 0xe9809b09,
+ 0x58e0b603,
+ 0x0268e6b1,
+ 0xf8c61bf4,
+/* 0x0119: intr */
+ 0xbd00f900,
+ 0xf980f904,
+ 0xf9a0f990,
+ 0xf9c0f9b0,
+ 0xf9e0f9d0,
+ 0x00f7f0f0,
+ 0xf90188fe,
+ 0xd087f180,
+ 0x0684b605,
+ 0xb60088cf,
+ 0x07f10180,
+ 0x04b605d0,
+ 0x0008d006,
+ 0x87f004bd,
+ 0x0684b608,
+ 0xc40088cf,
+ 0x0bf40289,
+ 0x9b008023,
+ 0xf458e7f0,
+ 0x0998da21,
+ 0x0096b09b,
+ 0xf0110bf4,
+ 0x04b63407,
+ 0x0009d006,
+ 0x098004bd,
+/* 0x017d: intr_skip_watchdog */
+ 0x0089e49a,
+ 0x480bf408,
+ 0x068897f1,
+ 0xcf0694b6,
+ 0x9ac40099,
+ 0x2c0bf402,
+ 0x04c0c7f1,
+ 0xcf06c4b6,
+ 0xc0f900cc,
+ 0x4f48e7f1,
+ 0x5453e3f1,
+ 0xf500d7f0,
+ 0xfc033621,
+ 0xc007f1c0,
+ 0x0604b604,
+ 0xbd000cd0,
+/* 0x01bd: intr_subintr_skip_fifo */
+ 0x8807f104,
+ 0x0604b606,
+ 0xbd0009d0,
+/* 0x01c9: intr_skip_subintr */
+ 0xe097f104,
+ 0xfd90bd00,
+ 0x07f00489,
+ 0x0604b604,
+ 0xbd0008d0,
+ 0xfe80fc04,
+ 0xf0fc0088,
+ 0xd0fce0fc,
+ 0xb0fcc0fc,
+ 0x90fca0fc,
+ 0x00fc80fc,
+ 0xf80032f4,
+/* 0x01f9: ticks_from_ns */
+ 0xf9c0f901,
+ 0xcbd7f1b0,
+ 0x00d3f000,
+ 0x040b21f5,
+ 0x03e8ccec,
+ 0xf400b4b0,
+ 0xeeec120b,
+ 0xd7f103e8,
+ 0xd3f000cb,
+ 0x0b21f500,
+/* 0x0221: ticks_from_ns_quit */
+ 0x02ceb904,
+ 0xc0fcb0fc,
+/* 0x022a: ticks_from_us */
+ 0xc0f900f8,
+ 0xd7f1b0f9,
+ 0xd3f000cb,
+ 0x0b21f500,
+ 0x02ceb904,
+ 0xf400b4b0,
+ 0xe4bd050b,
+/* 0x0244: ticks_from_us_quit */
+ 0xc0fcb0fc,
+/* 0x024a: ticks_to_us */
+ 0xd7f100f8,
+ 0xd3f000cb,
+ 0xecedff00,
+/* 0x0256: timer */
+ 0x90f900f8,
+ 0x32f480f9,
+ 0x03f89810,
+ 0xf40086b0,
+ 0x84bd651c,
+ 0xb63807f0,
0x08d00604,
0xf004bd00,
- 0x84b60887,
+ 0x84b63487,
0x0088cf06,
- 0xf40289c4,
- 0x0080230b,
- 0x58e7f09b,
- 0x98db21f4,
- 0x96b09b09,
- 0x110bf400,
+ 0xbb9a0998,
+ 0xe9bb0298,
+ 0x03fe8000,
+ 0xb60887f0,
+ 0x88cf0684,
+ 0x0284f000,
+ 0xf0261bf4,
+ 0x84b63487,
+ 0x0088cf06,
+ 0xf406e0b8,
+ 0xe8b8090b,
+ 0x111cf406,
+/* 0x02ac: timer_reset */
0xb63407f0,
- 0x09d00604,
+ 0x0ed00604,
0x8004bd00,
-/* 0x017e: intr_skip_watchdog */
- 0x89e49a09,
- 0x0bf40800,
- 0x8897f148,
- 0x0694b606,
- 0xc40099cf,
- 0x0bf4029a,
- 0xc0c7f12c,
- 0x06c4b604,
- 0xf900cccf,
- 0x48e7f1c0,
- 0x53e3f14f,
- 0x00d7f054,
- 0x033721f5,
- 0x07f1c0fc,
- 0x04b604c0,
- 0x000cd006,
-/* 0x01be: intr_subintr_skip_fifo */
- 0x07f104bd,
- 0x04b60688,
- 0x0009d006,
-/* 0x01ca: intr_skip_subintr */
- 0x97f104bd,
- 0x90bd00e0,
- 0xf00489fd,
- 0x04b60407,
- 0x0008d006,
- 0x80fc04bd,
- 0xfc0088fe,
- 0xfce0fcf0,
- 0xfcc0fcd0,
- 0xfca0fcb0,
- 0xfc80fc90,
- 0x0032f400,
-/* 0x01fa: ticks_from_ns */
- 0xc0f901f8,
- 0xd7f1b0f9,
- 0xd3f000cb,
- 0x0821f500,
- 0xe8ccec04,
- 0x00b4b003,
- 0xec120bf4,
- 0xf103e8ee,
- 0xf000cbd7,
- 0x21f500d3,
-/* 0x0222: ticks_from_ns_quit */
- 0xceb90408,
- 0xfcb0fc02,
-/* 0x022b: ticks_from_us */
- 0xf900f8c0,
- 0xf1b0f9c0,
- 0xf000cbd7,
- 0x21f500d3,
- 0xceb90408,
- 0x00b4b002,
- 0xbd050bf4,
-/* 0x0245: ticks_from_us_quit */
- 0xfcb0fce4,
-/* 0x024b: ticks_to_us */
- 0xf100f8c0,
- 0xf000cbd7,
- 0xedff00d3,
-/* 0x0257: timer */
- 0xf900f8ec,
- 0xf480f990,
- 0xf8981032,
- 0x0086b003,
- 0xbd651cf4,
- 0x3807f084,
+/* 0x02ba: timer_enable */
+ 0x87f09a0e,
+ 0x3807f001,
0xd00604b6,
0x04bd0008,
- 0xb63487f0,
- 0x88cf0684,
- 0x9a099800,
- 0xbb0298bb,
- 0xfe8000e9,
- 0x0887f003,
- 0xcf0684b6,
- 0x84f00088,
- 0x261bf402,
- 0xb63487f0,
- 0x88cf0684,
- 0x06e0b800,
- 0xb8090bf4,
- 0x1cf406e8,
-/* 0x02ad: timer_reset */
- 0x3407f011,
- 0xd00604b6,
- 0x04bd000e,
-/* 0x02bb: timer_enable */
- 0xf09a0e80,
- 0x07f00187,
- 0x0604b638,
- 0xbd0008d0,
-/* 0x02c9: timer_done */
- 0x1031f404,
+/* 0x02c8: timer_done */
+ 0xfc1031f4,
+ 0xf890fc80,
+/* 0x02d1: send_proc */
+ 0xf980f900,
+ 0x05e89890,
+ 0xf004e998,
+ 0x89b80486,
+ 0x2a0bf406,
+ 0x940398c4,
+ 0x80b60488,
+ 0x008ebb18,
+ 0x8000fa98,
+ 0x8d80008a,
+ 0x028c8001,
+ 0xb6038b80,
+ 0x94f00190,
+ 0x04e98007,
+/* 0x030b: send_done */
+ 0xfc0231f4,
+ 0xf880fc90,
+/* 0x0311: find */
+ 0xf080f900,
+ 0x31f45887,
+/* 0x0319: find_loop */
+ 0x008a9801,
+ 0xf406aeb8,
+ 0x80b6100b,
+ 0x6886b158,
+ 0xf01bf402,
+/* 0x032f: find_done */
+ 0xb90132f4,
+ 0x80fc028e,
+/* 0x0336: send */
+ 0x21f500f8,
+ 0x01f40311,
+/* 0x033f: recv */
+ 0xf900f897,
+ 0x9880f990,
+ 0xe99805e8,
+ 0x0132f404,
+ 0xf40689b8,
+ 0x89c43d0b,
+ 0x0180b603,
+ 0x800784f0,
+ 0xea9805e8,
+ 0xfef0f902,
+ 0xf0f9018f,
+ 0x9402efb9,
+ 0xe9bb0499,
+ 0x18e0b600,
+ 0x9803eb98,
+ 0xed9802ec,
+ 0x00ee9801,
+ 0xf0fca5f9,
+ 0xf400f8fe,
+ 0xf0fc0131,
+/* 0x038c: recv_done */
0x90fc80fc,
-/* 0x02d2: send_proc */
- 0x80f900f8,
- 0xe89890f9,
- 0x04e99805,
- 0xb80486f0,
- 0x0bf40689,
- 0x0398c42a,
- 0xb6048894,
- 0x8ebb1880,
- 0x00fa9800,
- 0x80008a80,
- 0x8c80018d,
- 0x038b8002,
- 0xf00190b6,
- 0xe9800794,
- 0x0231f404,
-/* 0x030c: send_done */
- 0x80fc90fc,
-/* 0x0312: find */
- 0x80f900f8,
- 0xf45887f0,
-/* 0x031a: find_loop */
- 0x8a980131,
- 0x06aeb800,
- 0xb6100bf4,
- 0x86b15880,
- 0x1bf40268,
- 0x0132f4f0,
-/* 0x0330: find_done */
- 0xfc028eb9,
-/* 0x0337: send */
- 0xf500f880,
- 0xf4031221,
- 0x00f89701,
-/* 0x0340: recv */
- 0x80f990f9,
- 0x9805e898,
- 0x32f404e9,
- 0x0689b801,
- 0xc43d0bf4,
- 0x80b60389,
- 0x0784f001,
- 0x9805e880,
- 0xf0f902ea,
- 0xf9018ffe,
- 0x02efb9f0,
- 0xbb049994,
- 0xe0b600e9,
- 0x03eb9818,
- 0x9802ec98,
- 0xee9801ed,
- 0xfca5f900,
- 0x00f8fef0,
- 0xfc0131f4,
-/* 0x038d: recv_done */
- 0xfc80fcf0,
-/* 0x0393: init */
- 0xf100f890,
- 0xb6010817,
- 0x11cf0614,
- 0x0911e700,
- 0x0814b601,
- 0xf10014fe,
- 0xf000e017,
- 0x07f00013,
- 0x0604b61c,
- 0xbd0001d0,
- 0xff17f004,
- 0xb61407f0,
- 0x01d00604,
- 0xf004bd00,
- 0x15f10217,
- 0x07f00800,
- 0x0604b610,
- 0xbd0001d0,
- 0x1a17f104,
- 0x0013f001,
- 0xf40010fe,
- 0x17f01031,
- 0x3807f001,
+/* 0x0392: init */
+ 0x17f100f8,
+ 0x14b60108,
+ 0x0011cf06,
+ 0x010911e7,
+ 0xfe0814b6,
+ 0x17f10014,
+ 0x13f000e0,
+ 0x1c07f000,
+ 0xd00604b6,
+ 0x04bd0001,
+ 0xf0ff17f0,
+ 0x04b61407,
+ 0x0001d006,
+ 0x17f004bd,
+ 0x0015f102,
+ 0x1007f008,
0xd00604b6,
0x04bd0001,
-/* 0x03f7: init_proc */
- 0x9858f7f0,
- 0x16b001f1,
- 0xfa0bf400,
- 0xf0b615f9,
- 0xf20ef458,
-/* 0x0408: mulu32_32_64 */
- 0x20f910f9,
- 0x40f930f9,
- 0x9510e195,
- 0xc4bd10d2,
- 0xedffb4bd,
- 0x301dffc0,
- 0xf10234b9,
- 0xb6ffff34,
- 0x45b61034,
- 0x00c3bb10,
- 0xff01b4bb,
- 0x34b930e2,
- 0xff34f102,
- 0x1034b6ff,
- 0xbb1045b6,
- 0xb4bb00c3,
- 0x3012ff01,
- 0xfc00b3bb,
- 0xfc30fc40,
- 0xf810fc20,
-/* 0x0459: host_send */
- 0xb017f100,
+ 0x011917f1,
+ 0xf10013f0,
+ 0xfeffff14,
+ 0x31f40010,
+ 0x0117f010,
+ 0xb63807f0,
+ 0x01d00604,
+ 0xf004bd00,
+/* 0x03fa: init_proc */
+ 0xf19858f7,
+ 0x0016b001,
+ 0xf9fa0bf4,
+ 0x58f0b615,
+/* 0x040b: mulu32_32_64 */
+ 0xf9f20ef4,
+ 0xf920f910,
+ 0x9540f930,
+ 0xd29510e1,
+ 0xbdc4bd10,
+ 0xc0edffb4,
+ 0xb9301dff,
+ 0x34f10234,
+ 0x34b6ffff,
+ 0x1045b610,
+ 0xbb00c3bb,
+ 0xe2ff01b4,
+ 0x0234b930,
+ 0xffff34f1,
+ 0xb61034b6,
+ 0xc3bb1045,
+ 0x01b4bb00,
+ 0xbb3012ff,
+ 0x40fc00b3,
+ 0x20fc30fc,
+ 0x00f810fc,
+/* 0x045c: host_send */
+ 0x04b017f1,
+ 0xcf0614b6,
+ 0x27f10011,
+ 0x24b604a0,
+ 0x0022cf06,
+ 0xf40612b8,
+ 0x1ec4320b,
+ 0x04ee9407,
+ 0x0270e0b7,
+ 0x9803eb98,
+ 0xed9802ec,
+ 0x00ee9801,
+ 0x033621f5,
+ 0xc40110b6,
+ 0x07f10f1e,
+ 0x04b604b0,
+ 0x000ed006,
+ 0x0ef404bd,
+/* 0x04a5: host_send_done */
+/* 0x04a7: host_recv */
+ 0xf100f8ba,
+ 0xf14e4917,
+ 0xb8525413,
+ 0x0bf406e1,
+/* 0x04b5: host_recv_wait */
+ 0xcc17f1aa,
0x0614b604,
0xf10011cf,
- 0xb604a027,
+ 0xb604c827,
0x22cf0624,
- 0x0612b800,
- 0xc4320bf4,
- 0xee94071e,
- 0x70e0b704,
- 0x03eb9802,
- 0x9802ec98,
- 0xee9801ed,
- 0x3721f500,
- 0x0110b603,
- 0xf10f1ec4,
- 0xb604b007,
- 0x0ed00604,
- 0xf404bd00,
-/* 0x04a2: host_send_done */
- 0x00f8ba0e,
-/* 0x04a4: host_recv */
- 0x4e4917f1,
- 0x525413f1,
- 0xf406e1b8,
-/* 0x04b2: host_recv_wait */
- 0x17f1aa0b,
- 0x14b604cc,
- 0x0011cf06,
- 0x04c827f1,
- 0xcf0624b6,
- 0x16f00022,
- 0x0612b808,
- 0xc4e60bf4,
- 0x34b60723,
- 0xf030b704,
- 0x033b8002,
- 0x80023c80,
- 0x3e80013d,
- 0x0120b600,
- 0xf10f24f0,
- 0xb604c807,
- 0x02d00604,
- 0xf004bd00,
- 0x07f04027,
- 0x0604b600,
- 0xbd0002d0,
-/* 0x0507: host_init */
- 0xf100f804,
- 0xb6008017,
- 0x15f11014,
- 0x07f10270,
- 0x04b604d0,
- 0x0001d006,
- 0x17f104bd,
+ 0x0816f000,
+ 0xf40612b8,
+ 0x23c4e60b,
+ 0x0434b607,
+ 0x02f030b7,
+ 0x80033b80,
+ 0x3d80023c,
+ 0x003e8001,
+ 0xf00120b6,
+ 0x07f10f24,
+ 0x04b604c8,
+ 0x0002d006,
+ 0x27f004bd,
+ 0x0007f040,
+ 0xd00604b6,
+ 0x04bd0002,
+/* 0x050a: host_init */
+ 0x17f100f8,
0x14b60080,
- 0xf015f110,
- 0xdc07f102,
+ 0x7015f110,
+ 0xd007f102,
0x0604b604,
0xbd0001d0,
- 0x0117f004,
- 0x04c407f1,
+ 0x8017f104,
+ 0x1014b600,
+ 0x02f015f1,
+ 0x04dc07f1,
0xd00604b6,
0x04bd0001,
-/* 0x0546: memx_func_enter */
- 0x87f100f8,
- 0x8eb91610,
- 0x0421f402,
- 0xf102d7b9,
- 0xf1fffc67,
- 0xfdffff63,
- 0x67f10476,
- 0x76fd0002,
- 0xf980f905,
- 0xfcd0fc70,
- 0x3f21f4e0,
+ 0xf10117f0,
+ 0xb604c407,
+ 0x01d00604,
+ 0xf804bd00,
+/* 0x0549: memx_func_enter */
+ 0x1087f100,
+ 0x028eb916,
+ 0xb90421f4,
+ 0x67f102d7,
+ 0x63f1fffc,
+ 0x76fdffff,
+ 0x0267f104,
+ 0x0576fd00,
+ 0x70f980f9,
+ 0xe0fcd0fc,
+ 0xf04021f4,
+ 0x07f10467,
+ 0x04b607e0,
+ 0x0006d006,
+/* 0x0582: memx_func_enter_wait */
+ 0x67f104bd,
+ 0x64b607c0,
+ 0x0066cf06,
+ 0xf40464f0,
+ 0x67f0f30b,
+ 0x0664b62c,
+ 0x800066cf,
+ 0x00f8f106,
+/* 0x05a0: memx_func_leave */
+ 0xb62c67f0,
+ 0x66cf0664,
+ 0xf2068000,
0xf10467f0,
- 0xb607e007,
+ 0xb607e407,
0x06d00604,
-/* 0x057f: memx_func_enter_wait */
+/* 0x05bb: memx_func_leave_wait */
0xf104bd00,
0xb607c067,
0x66cf0664,
0x0464f000,
- 0xf0f30bf4,
- 0x64b62c67,
- 0x0066cf06,
- 0xf8f10680,
-/* 0x059d: memx_func_leave */
- 0x2c67f000,
- 0xcf0664b6,
- 0x06800066,
- 0x0467f0f2,
- 0x07e407f1,
- 0xd00604b6,
- 0x04bd0006,
-/* 0x05b8: memx_func_leave_wait */
- 0x07c067f1,
- 0xcf0664b6,
- 0x64f00066,
- 0xf31bf404,
- 0x161087f1,
- 0xf4028eb9,
- 0xd7b90421,
- 0xcc67f102,
- 0xff63f1ff,
- 0x0476fdff,
- 0x70f980f9,
- 0xe0fcd0fc,
- 0xf83f21f4,
-/* 0x05ed: memx_func_wait_vblank */
- 0x00169800,
- 0xf40066b0,
- 0x66b0130b,
- 0x060bf401,
-/* 0x05ff: memx_func_wait_vblank_head1 */
- 0xf12e0ef4,
- 0xf4002077,
-/* 0x0606: memx_func_wait_vblank_head0 */
- 0x77f1070e,
-/* 0x060a: memx_func_wait_vblank_0 */
- 0x67f10008,
- 0x64b607c4,
- 0x0066cf06,
- 0xf40467fd,
-/* 0x061a: memx_func_wait_vblank_1 */
- 0x67f1f31b,
- 0x64b607c4,
- 0x0066cf06,
- 0xf40467fd,
-/* 0x062a: memx_func_wait_vblank_fini */
- 0x10b6f30b,
-/* 0x062f: memx_func_wr32 */
- 0x9800f804,
- 0x15980016,
- 0x0810b601,
- 0x50f960f9,
- 0xe0fcd0fc,
- 0xb63f21f4,
- 0x1bf40242,
-/* 0x064b: memx_func_wait */
- 0xf000f8e9,
- 0x84b62c87,
- 0x0088cf06,
- 0x98001e98,
- 0x1c98011d,
- 0x031b9802,
- 0xf41010b6,
- 0x00f8a421,
-/* 0x0668: memx_func_delay */
- 0xb6001e98,
- 0x21f40410,
-/* 0x0673: memx_func_train */
- 0xf100f87f,
- 0xf1000357,
- 0xf1000077,
- 0xf0000097,
- 0x9eb97093,
- 0x0421f402,
- 0xf102d8b9,
- 0xf42710e7,
-/* 0x0692: memx_func_train_loop_outer */
- 0x58e07f21,
- 0x83f10101,
- 0x97f10200,
- 0x93f011e0,
- 0xf990f911,
- 0xfcd0fc80,
- 0x3f21f4e0,
- 0x67f150f9,
-/* 0x06b2: memx_func_train_loop_inner */
- 0x87f10000,
- 0x68ff1111,
- 0x10989490,
- 0xf10589fd,
- 0xf0072097,
- 0x90f91093,
- 0xd0fc80f9,
- 0x21f4e0fc,
- 0x8097f13f,
- 0x1093f000,
- 0xf4029eb9,
- 0xd8b90421,
- 0x2088c502,
+ 0xf1f31bf4,
+ 0xb9161087,
+ 0x21f4028e,
+ 0x02d7b904,
+ 0xffcc67f1,
+ 0xffff63f1,
+ 0xf90476fd,
+ 0xfc70f980,
+ 0xf4e0fcd0,
+ 0x00f84021,
+/* 0x05f0: memx_func_wait_vblank */
+ 0xb0001698,
+ 0x0bf40066,
+ 0x0166b013,
+ 0xf4060bf4,
+/* 0x0602: memx_func_wait_vblank_head1 */
+ 0x77f12e0e,
+ 0x0ef40020,
+/* 0x0609: memx_func_wait_vblank_head0 */
+ 0x0877f107,
+/* 0x060d: memx_func_wait_vblank_0 */
+ 0xc467f100,
+ 0x0664b607,
+ 0xfd0066cf,
+ 0x1bf40467,
+/* 0x061d: memx_func_wait_vblank_1 */
+ 0xc467f1f3,
+ 0x0664b607,
+ 0xfd0066cf,
+ 0x0bf40467,
+/* 0x062d: memx_func_wait_vblank_fini */
+ 0x0410b6f3,
+/* 0x0632: memx_func_wr32 */
+ 0x169800f8,
+ 0x01159800,
+ 0xf90810b6,
+ 0xfc50f960,
+ 0xf4e0fcd0,
+ 0x42b64021,
+ 0xe91bf402,
+/* 0x064e: memx_func_wait */
+ 0x87f000f8,
+ 0x0684b62c,
+ 0x980088cf,
+ 0x1d98001e,
+ 0x021c9801,
+ 0xb6031b98,
+ 0x21f41010,
+/* 0x066b: memx_func_delay */
+ 0x9800f8a3,
+ 0x10b6001e,
+ 0x7e21f404,
+/* 0x0676: memx_func_train */
+ 0x57f100f8,
+ 0x77f10003,
+ 0x97f10000,
+ 0x93f00000,
+ 0x029eb970,
+ 0xb90421f4,
+ 0xe7f102d8,
+ 0x21f42710,
+/* 0x0695: memx_func_train_loop_outer */
+ 0x0158e07e,
+ 0x0083f101,
+ 0xe097f102,
+ 0x1193f011,
0x80f990f9,
0xe0fcd0fc,
- 0xf13f21f4,
- 0xf0053c97,
- 0x87f11093,
- 0x83f13002,
- 0x90f98000,
- 0xd0fc80f9,
- 0x21f4e0fc,
- 0x60e7f13f,
- 0x10e3f005,
- 0x0000d7f1,
- 0x8000d3f1,
- 0xf100dc90,
- 0xf08480b7,
- 0x21f41eb3,
- 0x0057f1a4,
- 0xff97f100,
- 0x0093f1ff,
-/* 0x0731: memx_func_train_loop_4x */
- 0x80a7f183,
- 0x10a3f000,
- 0xf402aeb9,
- 0xd8b90421,
- 0xdfb7f102,
- 0xffb3f1ff,
- 0x048bfdff,
- 0x80f9a0f9,
- 0xe0fcd0fc,
- 0xf13f21f4,
- 0xf0053ca7,
- 0x87f110a3,
- 0x83f13002,
- 0xa0f98000,
- 0xd0fc80f9,
- 0x21f4e0fc,
- 0x60e7f13f,
- 0x10e3f005,
- 0x0000d7f1,
- 0x8000d3f1,
- 0xf102dcb9,
- 0xf02710b7,
- 0x21f400b3,
- 0x02eeb9a4,
- 0xb90421f4,
- 0x9dff02dd,
- 0x0150b694,
- 0xf4045670,
- 0x7aa0921e,
- 0xa9800bcc,
- 0x0160b600,
- 0x700470b6,
- 0x1ef51066,
- 0x50fcff00,
+ 0xf94021f4,
+ 0x0067f150,
+/* 0x06b5: memx_func_train_loop_inner */
+ 0x1187f100,
+ 0x9068ff11,
+ 0xfd109894,
+ 0x97f10589,
+ 0x93f00720,
+ 0xf990f910,
+ 0xfcd0fc80,
+ 0x4021f4e0,
+ 0x008097f1,
+ 0xb91093f0,
+ 0x21f4029e,
+ 0x02d8b904,
+ 0xf92088c5,
+ 0xfc80f990,
+ 0xf4e0fcd0,
+ 0x97f14021,
+ 0x93f0053c,
+ 0x0287f110,
+ 0x0083f130,
+ 0xf990f980,
+ 0xfcd0fc80,
+ 0x4021f4e0,
+ 0x0560e7f1,
+ 0xf110e3f0,
+ 0xf10000d7,
+ 0x908000d3,
+ 0xb7f100dc,
+ 0xb3f08480,
+ 0xa321f41e,
+ 0x000057f1,
+ 0xffff97f1,
+ 0x830093f1,
+/* 0x0734: memx_func_train_loop_4x */
+ 0x0080a7f1,
+ 0xb910a3f0,
+ 0x21f402ae,
+ 0x02d8b904,
+ 0xffdfb7f1,
+ 0xffffb3f1,
+ 0xf9048bfd,
+ 0xfc80f9a0,
+ 0xf4e0fcd0,
+ 0xa7f14021,
+ 0xa3f0053c,
+ 0x0287f110,
+ 0x0083f130,
+ 0xf9a0f980,
+ 0xfcd0fc80,
+ 0x4021f4e0,
+ 0x0560e7f1,
+ 0xf110e3f0,
+ 0xf10000d7,
+ 0xb98000d3,
+ 0xb7f102dc,
+ 0xb3f02710,
+ 0xa321f400,
+ 0xf402eeb9,
+ 0xddb90421,
+ 0x949dff02,
0x700150b6,
- 0x1ef50756,
- 0x00f8fed4,
-/* 0x07c4: memx_exec */
- 0xd0f9e0f9,
- 0xb902c1b9,
-/* 0x07ce: memx_exec_next */
- 0x139802b2,
- 0x0410b600,
- 0x01f034e7,
- 0x01e033e7,
- 0xf00132b6,
- 0x35980c30,
- 0xb855f9de,
- 0x1ef40612,
- 0xf10b98e4,
- 0xbbf20c98,
- 0xb7f102cb,
- 0xb4b607c4,
- 0x00bbcf06,
- 0xe0fcd0fc,
- 0x033721f5,
-/* 0x080a: memx_info */
- 0xc67000f8,
- 0x0e0bf401,
-/* 0x0810: memx_info_data */
- 0x03ccc7f1,
- 0x0800b7f1,
-/* 0x081b: memx_info_train */
- 0xf10b0ef4,
- 0xf10bccc7,
-/* 0x0823: memx_info_send */
- 0xf50100b7,
- 0xf8033721,
-/* 0x0829: memx_recv */
- 0x01d6b000,
- 0xb0980bf4,
- 0x0bf400d6,
-/* 0x0837: memx_init */
- 0xf800f8d8,
-/* 0x0839: perf_recv */
-/* 0x083b: perf_init */
- 0xf800f800,
-/* 0x083d: i2c_drive_scl */
- 0x0036b000,
- 0xf1110bf4,
- 0xb607e007,
- 0x01d00604,
- 0xf804bd00,
-/* 0x0851: i2c_drive_scl_lo */
- 0xe407f100,
- 0x0604b607,
- 0xbd0001d0,
-/* 0x085f: i2c_drive_sda */
- 0xb000f804,
- 0x0bf40036,
- 0xe007f111,
- 0x0604b607,
- 0xbd0002d0,
-/* 0x0873: i2c_drive_sda_lo */
- 0xf100f804,
- 0xb607e407,
- 0x02d00604,
- 0xf804bd00,
-/* 0x0881: i2c_sense_scl */
- 0x0132f400,
- 0x07c437f1,
- 0xcf0634b6,
- 0x31fd0033,
- 0x060bf404,
-/* 0x0897: i2c_sense_scl_done */
- 0xf80131f4,
-/* 0x0899: i2c_sense_sda */
- 0x0132f400,
- 0x07c437f1,
- 0xcf0634b6,
- 0x32fd0033,
- 0x060bf404,
-/* 0x08af: i2c_sense_sda_done */
- 0xf80131f4,
-/* 0x08b1: i2c_raise_scl */
- 0xf140f900,
- 0xf0089847,
- 0x21f50137,
-/* 0x08be: i2c_raise_scl_wait */
- 0xe7f1083d,
- 0x21f403e8,
- 0x8121f57f,
- 0x0901f408,
- 0xf40142b6,
-/* 0x08d2: i2c_raise_scl_done */
- 0x40fcef1b,
-/* 0x08d6: i2c_start */
- 0x21f500f8,
- 0x11f40881,
- 0x9921f50d,
- 0x0611f408,
-/* 0x08e7: i2c_start_rep */
- 0xf0300ef4,
- 0x21f50037,
- 0x37f0083d,
- 0x5f21f501,
- 0x0076bb08,
- 0xf90465b6,
- 0x04659450,
- 0xbd0256bb,
- 0x0475fd50,
- 0x21f550fc,
- 0x64b608b1,
- 0x1f11f404,
-/* 0x0914: i2c_start_send */
- 0xf50037f0,
- 0xf1085f21,
- 0xf41388e7,
- 0x37f07f21,
- 0x3d21f500,
- 0x88e7f108,
- 0x7f21f413,
-/* 0x0930: i2c_start_out */
-/* 0x0932: i2c_stop */
- 0x37f000f8,
- 0x3d21f500,
- 0x0037f008,
- 0x085f21f5,
- 0x03e8e7f1,
- 0xf07f21f4,
- 0x21f50137,
- 0xe7f1083d,
- 0x21f41388,
- 0x0137f07f,
- 0x085f21f5,
- 0x1388e7f1,
- 0xf87f21f4,
-/* 0x0965: i2c_bitw */
- 0x5f21f500,
- 0xe8e7f108,
- 0x7f21f403,
- 0xb60076bb,
- 0x50f90465,
- 0xbb046594,
- 0x50bd0256,
- 0xfc0475fd,
- 0xb121f550,
- 0x0464b608,
- 0xf11811f4,
- 0xf41388e7,
- 0x37f07f21,
- 0x3d21f500,
- 0x88e7f108,
- 0x7f21f413,
-/* 0x09a4: i2c_bitw_out */
-/* 0x09a6: i2c_bitr */
- 0x37f000f8,
- 0x5f21f501,
+ 0x1ef40456,
+ 0xcc7aa092,
+ 0x00a9800b,
+ 0xb60160b6,
+ 0x66700470,
+ 0x001ef510,
+ 0xb650fcff,
+ 0x56700150,
+ 0xd41ef507,
+/* 0x07c7: memx_exec */
+ 0xf900f8fe,
+ 0xb9d0f9e0,
+ 0xb2b902c1,
+/* 0x07d1: memx_exec_next */
+ 0x00139802,
+ 0xe70410b6,
+ 0xe701f034,
+ 0xb601e033,
+ 0x30f00132,
+ 0xde35980c,
+ 0x12b855f9,
+ 0xe41ef406,
+ 0x98f10b98,
+ 0xcbbbf20c,
+ 0xc4b7f102,
+ 0x06b4b607,
+ 0xfc00bbcf,
+ 0xf5e0fcd0,
+ 0xf8033621,
+/* 0x080d: memx_info */
+ 0x01c67000,
+/* 0x0813: memx_info_data */
+ 0xf10e0bf4,
+ 0xf103ccc7,
+ 0xf40800b7,
+/* 0x081e: memx_info_train */
+ 0xc7f10b0e,
+ 0xb7f10bcc,
+/* 0x0826: memx_info_send */
+ 0x21f50100,
+ 0x00f80336,
+/* 0x082c: memx_recv */
+ 0xf401d6b0,
+ 0xd6b0980b,
+ 0xd80bf400,
+/* 0x083a: memx_init */
+ 0x00f800f8,
+/* 0x083c: perf_recv */
+/* 0x083e: perf_init */
+ 0x00f800f8,
+/* 0x0840: i2c_drive_scl */
+ 0xf40036b0,
+ 0x07f1110b,
+ 0x04b607e0,
+ 0x0001d006,
+ 0x00f804bd,
+/* 0x0854: i2c_drive_scl_lo */
+ 0x07e407f1,
+ 0xd00604b6,
+ 0x04bd0001,
+/* 0x0862: i2c_drive_sda */
+ 0x36b000f8,
+ 0x110bf400,
+ 0x07e007f1,
+ 0xd00604b6,
+ 0x04bd0002,
+/* 0x0876: i2c_drive_sda_lo */
+ 0x07f100f8,
+ 0x04b607e4,
+ 0x0002d006,
+ 0x00f804bd,
+/* 0x0884: i2c_sense_scl */
+ 0xf10132f4,
+ 0xb607c437,
+ 0x33cf0634,
+ 0x0431fd00,
+ 0xf4060bf4,
+/* 0x089a: i2c_sense_scl_done */
+ 0x00f80131,
+/* 0x089c: i2c_sense_sda */
+ 0xf10132f4,
+ 0xb607c437,
+ 0x33cf0634,
+ 0x0432fd00,
+ 0xf4060bf4,
+/* 0x08b2: i2c_sense_sda_done */
+ 0x00f80131,
+/* 0x08b4: i2c_raise_scl */
+ 0x47f140f9,
+ 0x37f00898,
+ 0x4021f501,
+/* 0x08c1: i2c_raise_scl_wait */
0xe8e7f108,
- 0x7f21f403,
+ 0x7e21f403,
+ 0x088421f5,
+ 0xb60901f4,
+ 0x1bf40142,
+/* 0x08d5: i2c_raise_scl_done */
+ 0xf840fcef,
+/* 0x08d9: i2c_start */
+ 0x8421f500,
+ 0x0d11f408,
+ 0x089c21f5,
+ 0xf40611f4,
+/* 0x08ea: i2c_start_rep */
+ 0x37f0300e,
+ 0x4021f500,
+ 0x0137f008,
+ 0x086221f5,
0xb60076bb,
0x50f90465,
0xbb046594,
0x50bd0256,
0xfc0475fd,
- 0xb121f550,
+ 0xb421f550,
0x0464b608,
- 0xf51b11f4,
- 0xf0089921,
+/* 0x0917: i2c_start_send */
+ 0xf01f11f4,
0x21f50037,
- 0xe7f1083d,
+ 0xe7f10862,
0x21f41388,
- 0x013cf07f,
-/* 0x09eb: i2c_bitr_done */
- 0xf80131f4,
-/* 0x09ed: i2c_get_byte */
- 0x0057f000,
-/* 0x09f3: i2c_get_byte_next */
- 0xb60847f0,
- 0x76bb0154,
- 0x0465b600,
- 0x659450f9,
- 0x0256bb04,
- 0x75fd50bd,
- 0xf550fc04,
- 0xb609a621,
- 0x11f40464,
- 0x0553fd2b,
- 0xf40142b6,
- 0x37f0d81b,
+ 0x0037f07e,
+ 0x084021f5,
+ 0x1388e7f1,
+/* 0x0933: i2c_start_out */
+ 0xf87e21f4,
+/* 0x0935: i2c_stop */
+ 0x0037f000,
+ 0x084021f5,
+ 0xf50037f0,
+ 0xf1086221,
+ 0xf403e8e7,
+ 0x37f07e21,
+ 0x4021f501,
+ 0x88e7f108,
+ 0x7e21f413,
+ 0xf50137f0,
+ 0xf1086221,
+ 0xf41388e7,
+ 0x00f87e21,
+/* 0x0968: i2c_bitw */
+ 0x086221f5,
+ 0x03e8e7f1,
+ 0xbb7e21f4,
+ 0x65b60076,
+ 0x9450f904,
+ 0x56bb0465,
+ 0xfd50bd02,
+ 0x50fc0475,
+ 0x08b421f5,
+ 0xf40464b6,
+ 0xe7f11811,
+ 0x21f41388,
+ 0x0037f07e,
+ 0x084021f5,
+ 0x1388e7f1,
+/* 0x09a7: i2c_bitw_out */
+ 0xf87e21f4,
+/* 0x09a9: i2c_bitr */
+ 0x0137f000,
+ 0x086221f5,
+ 0x03e8e7f1,
+ 0xbb7e21f4,
+ 0x65b60076,
+ 0x9450f904,
+ 0x56bb0465,
+ 0xfd50bd02,
+ 0x50fc0475,
+ 0x08b421f5,
+ 0xf40464b6,
+ 0x21f51b11,
+ 0x37f0089c,
+ 0x4021f500,
+ 0x88e7f108,
+ 0x7e21f413,
+ 0xf4013cf0,
+/* 0x09ee: i2c_bitr_done */
+ 0x00f80131,
+/* 0x09f0: i2c_get_byte */
+ 0xf00057f0,
+/* 0x09f6: i2c_get_byte_next */
+ 0x54b60847,
0x0076bb01,
0xf90465b6,
0x04659450,
0xbd0256bb,
0x0475fd50,
0x21f550fc,
- 0x64b60965,
-/* 0x0a3d: i2c_get_byte_done */
-/* 0x0a3f: i2c_put_byte */
- 0xf000f804,
-/* 0x0a42: i2c_put_byte_next */
- 0x42b60847,
- 0x3854ff01,
+ 0x64b609a9,
+ 0x2b11f404,
+ 0xb60553fd,
+ 0x1bf40142,
+ 0x0137f0d8,
0xb60076bb,
0x50f90465,
0xbb046594,
0x50bd0256,
0xfc0475fd,
- 0x6521f550,
+ 0x6821f550,
0x0464b609,
- 0xb03411f4,
- 0x1bf40046,
- 0x0076bbd8,
+/* 0x0a40: i2c_get_byte_done */
+/* 0x0a42: i2c_put_byte */
+ 0x47f000f8,
+/* 0x0a45: i2c_put_byte_next */
+ 0x0142b608,
+ 0xbb3854ff,
+ 0x65b60076,
+ 0x9450f904,
+ 0x56bb0465,
+ 0xfd50bd02,
+ 0x50fc0475,
+ 0x096821f5,
+ 0xf40464b6,
+ 0x46b03411,
+ 0xd81bf400,
+ 0xb60076bb,
+ 0x50f90465,
+ 0xbb046594,
+ 0x50bd0256,
+ 0xfc0475fd,
+ 0xa921f550,
+ 0x0464b609,
+ 0xbb0f11f4,
+ 0x36b00076,
+ 0x061bf401,
+/* 0x0a9b: i2c_put_byte_done */
+ 0xf80132f4,
+/* 0x0a9d: i2c_addr */
+ 0x0076bb00,
0xf90465b6,
0x04659450,
0xbd0256bb,
0x0475fd50,
0x21f550fc,
- 0x64b609a6,
- 0x0f11f404,
- 0xb00076bb,
- 0x1bf40136,
- 0x0132f406,
-/* 0x0a98: i2c_put_byte_done */
-/* 0x0a9a: i2c_addr */
- 0x76bb00f8,
+ 0x64b608d9,
+ 0x2911f404,
+ 0x012ec3e7,
+ 0xfd0134b6,
+ 0x76bb0553,
0x0465b600,
0x659450f9,
0x0256bb04,
0x75fd50bd,
0xf550fc04,
- 0xb608d621,
- 0x11f40464,
- 0x2ec3e729,
- 0x0134b601,
- 0xbb0553fd,
+ 0xb60a4221,
+/* 0x0ae2: i2c_addr_done */
+ 0x00f80464,
+/* 0x0ae4: i2c_acquire_addr */
+ 0xb6f8cec7,
+ 0xe0b702e4,
+ 0xee980d1c,
+/* 0x0af3: i2c_acquire */
+ 0xf500f800,
+ 0xf40ae421,
+ 0xd9f00421,
+ 0x4021f403,
+/* 0x0b02: i2c_release */
+ 0x21f500f8,
+ 0x21f40ae4,
+ 0x03daf004,
+ 0xf84021f4,
+/* 0x0b11: i2c_recv */
+ 0x0132f400,
+ 0xb6f8c1c7,
+ 0x16b00214,
+ 0x3a1ff528,
+ 0xf413a001,
+ 0x0032980c,
+ 0x0ccc13a0,
+ 0xf4003198,
+ 0xd0f90231,
+ 0xd0f9e0f9,
+ 0x000067f1,
+ 0x100063f1,
+ 0xbb016792,
0x65b60076,
0x9450f904,
0x56bb0465,
0xfd50bd02,
0x50fc0475,
- 0x0a3f21f5,
-/* 0x0adf: i2c_addr_done */
- 0xf80464b6,
-/* 0x0ae1: i2c_acquire_addr */
- 0xf8cec700,
- 0xb702e4b6,
- 0x980d1ce0,
- 0x00f800ee,
-/* 0x0af0: i2c_acquire */
- 0x0ae121f5,
- 0xf00421f4,
- 0x21f403d9,
-/* 0x0aff: i2c_release */
- 0xf500f83f,
- 0xf40ae121,
- 0xdaf00421,
- 0x3f21f403,
-/* 0x0b0e: i2c_recv */
- 0x32f400f8,
- 0xf8c1c701,
- 0xb00214b6,
- 0x1ff52816,
- 0x13a0013a,
- 0x32980cf4,
- 0xcc13a000,
- 0x0031980c,
- 0xf90231f4,
- 0xf9e0f9d0,
- 0x0067f1d0,
- 0x0063f100,
- 0x01679210,
- 0xb60076bb,
- 0x50f90465,
- 0xbb046594,
- 0x50bd0256,
- 0xfc0475fd,
- 0xf021f550,
- 0x0464b60a,
- 0xd6b0d0fc,
- 0xb31bf500,
- 0x0057f000,
- 0xb60076bb,
- 0x50f90465,
- 0xbb046594,
- 0x50bd0256,
- 0xfc0475fd,
- 0x9a21f550,
- 0x0464b60a,
- 0x00d011f5,
- 0xbbe0c5c7,
+ 0x0af321f5,
+ 0xfc0464b6,
+ 0x00d6b0d0,
+ 0x00b31bf5,
+ 0xbb0057f0,
0x65b60076,
0x9450f904,
0x56bb0465,
0xfd50bd02,
0x50fc0475,
- 0x0a3f21f5,
+ 0x0a9d21f5,
0xf50464b6,
- 0xf000ad11,
- 0x76bb0157,
+ 0xc700d011,
+ 0x76bbe0c5,
0x0465b600,
0x659450f9,
0x0256bb04,
0x75fd50bd,
0xf550fc04,
- 0xb60a9a21,
+ 0xb60a4221,
0x11f50464,
- 0x76bb008a,
- 0x0465b600,
- 0x659450f9,
- 0x0256bb04,
- 0x75fd50bd,
- 0xf550fc04,
- 0xb609ed21,
- 0x11f40464,
- 0xe05bcb6a,
- 0xb60076bb,
- 0x50f90465,
- 0xbb046594,
- 0x50bd0256,
- 0xfc0475fd,
- 0x3221f550,
- 0x0464b609,
- 0xbd025bb9,
- 0x430ef474,
-/* 0x0c14: i2c_recv_not_rd08 */
- 0xf401d6b0,
- 0x57f03d1b,
- 0x9a21f500,
- 0x3311f40a,
- 0xf5e0c5c7,
- 0xf40a3f21,
- 0x57f02911,
- 0x9a21f500,
- 0x1f11f40a,
- 0xf5e0b5c7,
- 0xf40a3f21,
- 0x21f51511,
- 0x74bd0932,
- 0xf408c5c7,
- 0x32f4091b,
- 0x030ef402,
-/* 0x0c54: i2c_recv_not_wr08 */
-/* 0x0c54: i2c_recv_done */
- 0xf5f8cec7,
- 0xfc0aff21,
- 0xf4d0fce0,
- 0x7cb90a12,
- 0x3721f502,
-/* 0x0c69: i2c_recv_exit */
-/* 0x0c6b: i2c_init */
- 0xf800f803,
-/* 0x0c6d: test_recv */
- 0xd817f100,
- 0x0614b605,
- 0xb60011cf,
- 0x07f10110,
- 0x04b605d8,
- 0x0001d006,
- 0xe7f104bd,
- 0xe3f1d900,
- 0x21f5134f,
- 0x00f80257,
-/* 0x0c94: test_init */
- 0x0800e7f1,
- 0x025721f5,
-/* 0x0c9e: idle_recv */
+ 0x57f000ad,
+ 0x0076bb01,
+ 0xf90465b6,
+ 0x04659450,
+ 0xbd0256bb,
+ 0x0475fd50,
+ 0x21f550fc,
+ 0x64b60a9d,
+ 0x8a11f504,
+ 0x0076bb00,
+ 0xf90465b6,
+ 0x04659450,
+ 0xbd0256bb,
+ 0x0475fd50,
+ 0x21f550fc,
+ 0x64b609f0,
+ 0x6a11f404,
+ 0xbbe05bcb,
+ 0x65b60076,
+ 0x9450f904,
+ 0x56bb0465,
+ 0xfd50bd02,
+ 0x50fc0475,
+ 0x093521f5,
+ 0xb90464b6,
+ 0x74bd025b,
+/* 0x0c17: i2c_recv_not_rd08 */
+ 0xb0430ef4,
+ 0x1bf401d6,
+ 0x0057f03d,
+ 0x0a9d21f5,
+ 0xc73311f4,
+ 0x21f5e0c5,
+ 0x11f40a42,
+ 0x0057f029,
+ 0x0a9d21f5,
+ 0xc71f11f4,
+ 0x21f5e0b5,
+ 0x11f40a42,
+ 0x3521f515,
+ 0xc774bd09,
+ 0x1bf408c5,
+ 0x0232f409,
+/* 0x0c57: i2c_recv_not_wr08 */
+/* 0x0c57: i2c_recv_done */
+ 0xc7030ef4,
+ 0x21f5f8ce,
+ 0xe0fc0b02,
+ 0x12f4d0fc,
+ 0x027cb90a,
+ 0x033621f5,
+/* 0x0c6c: i2c_recv_exit */
+/* 0x0c6e: i2c_init */
0x00f800f8,
-/* 0x0ca0: idle */
- 0xf10031f4,
- 0xb605d417,
- 0x11cf0614,
- 0x0110b600,
- 0x05d407f1,
- 0xd00604b6,
- 0x04bd0001,
-/* 0x0cbc: idle_loop */
- 0xf45817f0,
-/* 0x0cc2: idle_proc */
-/* 0x0cc2: idle_proc_exec */
- 0x10f90232,
- 0xf5021eb9,
- 0xfc034021,
- 0x0911f410,
- 0xf40231f4,
-/* 0x0cd6: idle_proc_next */
- 0x10b6ef0e,
- 0x061fb858,
- 0xf4e61bf4,
- 0x28f4dd02,
- 0xbb0ef400,
- 0x00000000,
+/* 0x0c70: test_recv */
+ 0x05d817f1,
+ 0xcf0614b6,
+ 0x10b60011,
+ 0xd807f101,
+ 0x0604b605,
+ 0xbd0001d0,
+ 0x00e7f104,
+ 0x4fe3f1d9,
+ 0x5621f513,
+/* 0x0c97: test_init */
+ 0xf100f802,
+ 0xf50800e7,
+ 0xf8025621,
+/* 0x0ca1: idle_recv */
+/* 0x0ca3: idle */
+ 0xf400f800,
+ 0x17f10031,
+ 0x14b605d4,
+ 0x0011cf06,
+ 0xf10110b6,
+ 0xb605d407,
+ 0x01d00604,
+/* 0x0cbf: idle_loop */
+ 0xf004bd00,
+ 0x32f45817,
+/* 0x0cc5: idle_proc */
+/* 0x0cc5: idle_proc_exec */
+ 0xb910f902,
+ 0x21f5021e,
+ 0x10fc033f,
+ 0xf40911f4,
+ 0x0ef40231,
+/* 0x0cd9: idle_proc_next */
+ 0x5810b6ef,
+ 0xf4061fb8,
+ 0x02f4e61b,
+ 0x0028f4dd,
+ 0x00bb0ef4,
0x00000000,
0x00000000,
0x00000000,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/host.fuc b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/host.fuc
index c2bb616a8da5..f2420a37f45b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/host.fuc
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/host.fuc
@@ -98,8 +98,7 @@ host_send:
// $r0 - zero
host_recv:
// message from intr handler == HOST->PWR comms pending
- mov $r1 (PROC_KERN & 0x0000ffff)
- sethi $r1 (PROC_KERN & 0xffff0000)
+ imm32($r1, PROC_KERN)
cmp b32 $r14 $r1
bra e #host_send
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/kernel.fuc b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/kernel.fuc
index ad35fa57be94..c20a3bd33775 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/kernel.fuc
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/kernel.fuc
@@ -51,8 +51,7 @@ time_next: .b32 0
// $r0 - zero
rd32:
nv_iowr(NV_PPWR_MMIO_ADDR, $r14)
- mov $r13 NV_PPWR_MMIO_CTRL_OP_RD
- sethi $r13 NV_PPWR_MMIO_CTRL_TRIGGER
+ imm32($r13, NV_PPWR_MMIO_CTRL_OP_RD | NV_PPWR_MMIO_CTRL_TRIGGER)
nv_iowr(NV_PPWR_MMIO_CTRL, $r13)
rd32_wait:
nv_iord($r13, NV_PPWR_MMIO_CTRL)
@@ -70,9 +69,7 @@ rd32:
wr32:
nv_iowr(NV_PPWR_MMIO_ADDR, $r14)
nv_iowr(NV_PPWR_MMIO_DATA, $r13)
- mov $r13 NV_PPWR_MMIO_CTRL_OP_WR
- or $r13 NV_PPWR_MMIO_CTRL_MASK_B32_0
- sethi $r13 NV_PPWR_MMIO_CTRL_TRIGGER
+ imm32($r13, NV_PPWR_MMIO_CTRL_OP_WR | NV_PPWR_MMIO_CTRL_MASK_B32_0 | NV_PPWR_MMIO_CTRL_TRIGGER)
#ifdef NVKM_FALCON_MMIO_TRAP
push $r13
@@ -215,8 +212,7 @@ intr:
bra z #intr_subintr_skip_fifo
nv_iord($r12, NV_PPWR_FIFO_INTR)
push $r12
- mov $r14 (PROC_HOST & 0x0000ffff)
- sethi $r14 (PROC_HOST & 0xffff0000)
+ imm32($r14, PROC_HOST)
mov $r13 KMSG_FIFO
call(send)
pop $r12
@@ -256,7 +252,7 @@ ticks_from_ns:
/* try not losing precision (multiply then divide) */
imm32($r13, HW_TICKS_PER_US)
- call #mulu32_32_64
+ call(mulu32_32_64)
/* use an immeditate, it's ok because HW_TICKS_PER_US < 16 bits */
div $r12 $r12 1000
@@ -268,7 +264,7 @@ ticks_from_ns:
/* let's divide then multiply, too bad for the precision! */
div $r14 $r14 1000
imm32($r13, HW_TICKS_PER_US)
- call #mulu32_32_64
+ call(mulu32_32_64)
/* this cannot overflow as long as HW_TICKS_PER_US < 1000 */
@@ -290,7 +286,7 @@ ticks_from_us:
/* simply multiply $us by HW_TICKS_PER_US */
imm32($r13, HW_TICKS_PER_US)
- call #mulu32_32_64
+ call(mulu32_32_64)
mov b32 $r14 $r12
/* check if there wasn't any overflow */
@@ -511,14 +507,12 @@ init:
#ifdef NVKM_FALCON_MMIO_UAS
// somehow allows the magic "access mmio via D[]" stuff that's
// used by the nv_rd32/nv_wr32 macros to work
- mov $r1 0x0010
- sethi $r1 NV_PPWR_UAS_CONFIG_ENABLE
+ imm32($r1, 0x10 | NV_PPWR_UAS_CONFIG_ENABLE)
nv_iowrs(NV_PPWR_UAS_CONFIG, $r1)
#endif
// route all interrupts except user0/1 and pause to fuc
- mov $r1 0x00e0
- sethi $r1 0x00000000
+ imm32($r1, 0xe0)
nv_iowr(NV_PPWR_INTR_ROUTE, $r1)
// enable watchdog and subintr intrs
@@ -529,8 +523,8 @@ init:
nv_iowr(NV_PPWR_INTR_EN_SET, $r1)
// enable interrupts globally
- mov $r1 #intr
- sethi $r1 0x00000000
+ imm32($r1, #intr)
+ and $r1 0xffff
mov $iv0 $r1
bset $flags ie0
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/macros.fuc b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/macros.fuc
index 96fc984dafdc..3737bd27f74e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/macros.fuc
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/macros.fuc
@@ -169,7 +169,7 @@
*/ .b32 0 /*
*/ .skip 64
-#if NV_PPWR_CHIPSET < GK208
+#if NVKM_PPWR_CHIPSET < GK208
#define imm32(reg,val) /*
*/ movw reg ((val) & 0x0000ffff) /*
*/ sethi reg ((val) & 0xffff0000)
@@ -252,12 +252,12 @@
#endif
#define st(size, addr, reg) /*
-*/ movw $r0 addr /*
+*/ imm32($r0, addr) /*
*/ st size D[$r0] reg /*
*/ clear b32 $r0
#define ld(size, reg, addr) /*
-*/ movw $r0 addr /*
+*/ imm32($r0, addr) /*
*/ ld size reg D[$r0] /*
*/ clear b32 $r0
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/test.fuc b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/test.fuc
index 0c3a71bf5459..9e3f4e690dd1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/test.fuc
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/test.fuc
@@ -48,8 +48,7 @@ test_recv:
nv_iord($r1, NV_PPWR_DSCRATCH(2))
add b32 $r1 1
nv_iowr(NV_PPWR_DSCRATCH(2), $r1)
- mov $r14 -0x2700 /* 0xd900, envyas grrr! */
- sethi $r14 0x134f0000
+ imm32($r14, 0x134fd900)
call(timer)
ret
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/Kbuild
new file mode 100644
index 000000000000..b02b868a6589
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/Kbuild
@@ -0,0 +1,3 @@
+nvkm-y += nvkm/subdev/secboot/base.o
+nvkm-y += nvkm/subdev/secboot/gm200.o
+nvkm-y += nvkm/subdev/secboot/gm20b.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/base.c
new file mode 100644
index 000000000000..520facf9bc07
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/base.c
@@ -0,0 +1,288 @@
+/*
+ * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include "priv.h"
+#include <subdev/timer.h>
+
+static const char *
+managed_falcons_names[] = {
+ [NVKM_SECBOOT_FALCON_PMU] = "PMU",
+ [NVKM_SECBOOT_FALCON_RESERVED] = "<reserved>",
+ [NVKM_SECBOOT_FALCON_FECS] = "FECS",
+ [NVKM_SECBOOT_FALCON_GPCCS] = "GPCCS",
+ [NVKM_SECBOOT_FALCON_END] = "<invalid>",
+};
+
+/*
+ * Helper falcon functions
+ */
+
+static int
+falcon_clear_halt_interrupt(struct nvkm_device *device, u32 base)
+{
+ int ret;
+
+ /* clear halt interrupt */
+ nvkm_mask(device, base + 0x004, 0x10, 0x10);
+ /* wait until halt interrupt is cleared */
+ ret = nvkm_wait_msec(device, 10, base + 0x008, 0x10, 0x0);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int
+falcon_wait_idle(struct nvkm_device *device, u32 base)
+{
+ int ret;
+
+ ret = nvkm_wait_msec(device, 10, base + 0x04c, 0xffff, 0x0);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int
+nvkm_secboot_falcon_enable(struct nvkm_secboot *sb)
+{
+ struct nvkm_device *device = sb->subdev.device;
+ int ret;
+
+ /* enable engine */
+ nvkm_mask(device, 0x200, sb->enable_mask, sb->enable_mask);
+ nvkm_rd32(device, 0x200);
+ ret = nvkm_wait_msec(device, 10, sb->base + 0x10c, 0x6, 0x0);
+ if (ret < 0) {
+ nvkm_mask(device, 0x200, sb->enable_mask, 0x0);
+ nvkm_error(&sb->subdev, "Falcon mem scrubbing timeout\n");
+ return ret;
+ }
+
+ ret = falcon_wait_idle(device, sb->base);
+ if (ret)
+ return ret;
+
+ /* enable IRQs */
+ nvkm_wr32(device, sb->base + 0x010, 0xff);
+ nvkm_mask(device, 0x640, sb->irq_mask, sb->irq_mask);
+ nvkm_mask(device, 0x644, sb->irq_mask, sb->irq_mask);
+
+ return 0;
+}
+
+static int
+nvkm_secboot_falcon_disable(struct nvkm_secboot *sb)
+{
+ struct nvkm_device *device = sb->subdev.device;
+
+ /* disable IRQs and wait for any previous code to complete */
+ nvkm_mask(device, 0x644, sb->irq_mask, 0x0);
+ nvkm_mask(device, 0x640, sb->irq_mask, 0x0);
+ nvkm_wr32(device, sb->base + 0x014, 0xff);
+
+ falcon_wait_idle(device, sb->base);
+
+ /* disable engine */
+ nvkm_mask(device, 0x200, sb->enable_mask, 0x0);
+
+ return 0;
+}
+
+int
+nvkm_secboot_falcon_reset(struct nvkm_secboot *sb)
+{
+ int ret;
+
+ ret = nvkm_secboot_falcon_disable(sb);
+ if (ret)
+ return ret;
+
+ ret = nvkm_secboot_falcon_enable(sb);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+/**
+ * nvkm_secboot_falcon_run - run the falcon that will perform secure boot
+ *
+ * This function is to be called after all chip-specific preparations have
+ * been completed. It will start the falcon to perform secure boot, wait for
+ * it to halt, and report if an error occurred.
+ */
+int
+nvkm_secboot_falcon_run(struct nvkm_secboot *sb)
+{
+ struct nvkm_device *device = sb->subdev.device;
+ int ret;
+
+ /* Start falcon */
+ nvkm_wr32(device, sb->base + 0x100, 0x2);
+
+ /* Wait for falcon halt */
+ ret = nvkm_wait_msec(device, 100, sb->base + 0x100, 0x10, 0x10);
+ if (ret < 0)
+ return ret;
+
+ /* If mailbox register contains an error code, then ACR has failed */
+ ret = nvkm_rd32(device, sb->base + 0x040);
+ if (ret) {
+ nvkm_error(&sb->subdev, "ACR boot failed, ret 0x%08x", ret);
+ falcon_clear_halt_interrupt(device, sb->base);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+
+/**
+ * nvkm_secboot_reset() - reset specified falcon
+ */
+int
+nvkm_secboot_reset(struct nvkm_secboot *sb, u32 falcon)
+{
+ /* Unmanaged falcon? */
+ if (!(BIT(falcon) & sb->func->managed_falcons)) {
+ nvkm_error(&sb->subdev, "cannot reset unmanaged falcon!\n");
+ return -EINVAL;
+ }
+
+ return sb->func->reset(sb, falcon);
+}
+
+/**
+ * nvkm_secboot_start() - start specified falcon
+ */
+int
+nvkm_secboot_start(struct nvkm_secboot *sb, u32 falcon)
+{
+ /* Unmanaged falcon? */
+ if (!(BIT(falcon) & sb->func->managed_falcons)) {
+ nvkm_error(&sb->subdev, "cannot start unmanaged falcon!\n");
+ return -EINVAL;
+ }
+
+ return sb->func->start(sb, falcon);
+}
+
+/**
+ * nvkm_secboot_is_managed() - check whether a given falcon is securely-managed
+ */
+bool
+nvkm_secboot_is_managed(struct nvkm_secboot *secboot,
+ enum nvkm_secboot_falcon fid)
+{
+ if (!secboot)
+ return false;
+
+ return secboot->func->managed_falcons & BIT(fid);
+}
+
+static int
+nvkm_secboot_oneinit(struct nvkm_subdev *subdev)
+{
+ struct nvkm_secboot *sb = nvkm_secboot(subdev);
+ int ret = 0;
+
+ /* Call chip-specific init function */
+ if (sb->func->init)
+ ret = sb->func->init(sb);
+ if (ret) {
+ nvkm_error(subdev, "Secure Boot initialization failed: %d\n",
+ ret);
+ return ret;
+ }
+
+ /*
+ * Build all blobs - the same blobs can be used to perform secure boot
+ * multiple times
+ */
+ if (sb->func->prepare_blobs)
+ ret = sb->func->prepare_blobs(sb);
+
+ return ret;
+}
+
+static int
+nvkm_secboot_fini(struct nvkm_subdev *subdev, bool suspend)
+{
+ struct nvkm_secboot *sb = nvkm_secboot(subdev);
+ int ret = 0;
+
+ if (sb->func->fini)
+ ret = sb->func->fini(sb, suspend);
+
+ return ret;
+}
+
+static void *
+nvkm_secboot_dtor(struct nvkm_subdev *subdev)
+{
+ struct nvkm_secboot *sb = nvkm_secboot(subdev);
+ void *ret = NULL;
+
+ if (sb->func->dtor)
+ ret = sb->func->dtor(sb);
+
+ return ret;
+}
+
+static const struct nvkm_subdev_func
+nvkm_secboot = {
+ .oneinit = nvkm_secboot_oneinit,
+ .fini = nvkm_secboot_fini,
+ .dtor = nvkm_secboot_dtor,
+};
+
+int
+nvkm_secboot_ctor(const struct nvkm_secboot_func *func,
+ struct nvkm_device *device, int index,
+ struct nvkm_secboot *sb)
+{
+ unsigned long fid;
+
+ nvkm_subdev_ctor(&nvkm_secboot, device, index, 0, &sb->subdev);
+ sb->func = func;
+
+ /* setup the performing falcon's base address and masks */
+ switch (func->boot_falcon) {
+ case NVKM_SECBOOT_FALCON_PMU:
+ sb->base = 0x10a000;
+ sb->irq_mask = 0x1000000;
+ sb->enable_mask = 0x2000;
+ break;
+ default:
+ nvkm_error(&sb->subdev, "invalid secure boot falcon\n");
+ return -EINVAL;
+ };
+
+ nvkm_debug(&sb->subdev, "securely managed falcons:\n");
+ for_each_set_bit(fid, &sb->func->managed_falcons,
+ NVKM_SECBOOT_FALCON_END)
+ nvkm_debug(&sb->subdev, "- %s\n", managed_falcons_names[fid]);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.c
new file mode 100644
index 000000000000..cc100dc940ea
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.c
@@ -0,0 +1,1489 @@
+/*
+ * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+/*
+ * Secure boot is the process by which NVIDIA-signed firmware is loaded into
+ * some of the falcons of a GPU. For production devices this is the only way
+ * for the firmware to access useful (but sensitive) registers.
+ *
+ * A Falcon microprocessor supporting advanced security modes can run in one of
+ * three modes:
+ *
+ * - Non-secure (NS). In this mode, functionality is similar to Falcon
+ * architectures before security modes were introduced (pre-Maxwell), but
+ * capability is restricted. In particular, certain registers may be
+ * inaccessible for reads and/or writes, and physical memory access may be
+ * disabled (on certain Falcon instances). This is the only possible mode that
+ * can be used if you don't have microcode cryptographically signed by NVIDIA.
+ *
+ * - Heavy Secure (HS). In this mode, the microprocessor is a black box - it's
+ * not possible to read or write any Falcon internal state or Falcon registers
+ * from outside the Falcon (for example, from the host system). The only way
+ * to enable this mode is by loading microcode that has been signed by NVIDIA.
+ * (The loading process involves tagging the IMEM block as secure, writing the
+ * signature into a Falcon register, and starting execution. The hardware will
+ * validate the signature, and if valid, grant HS privileges.)
+ *
+ * - Light Secure (LS). In this mode, the microprocessor has more privileges
+ * than NS but fewer than HS. Some of the microprocessor state is visible to
+ * host software to ease debugging. The only way to enable this mode is by HS
+ * microcode enabling LS mode. Some privileges available to HS mode are not
+ * available here. LS mode is introduced in GM20x.
+ *
+ * Secure boot consists in temporarily switching a HS-capable falcon (typically
+ * PMU) into HS mode in order to validate the LS firmwares of managed falcons,
+ * load them, and switch managed falcons into LS mode. Once secure boot
+ * completes, no falcon remains in HS mode.
+ *
+ * Secure boot requires a write-protected memory region (WPR) which can only be
+ * written by the secure falcon. On dGPU, the driver sets up the WPR region in
+ * video memory. On Tegra, it is set up by the bootloader and its location and
+ * size written into memory controller registers.
+ *
+ * The secure boot process takes place as follows:
+ *
+ * 1) A LS blob is constructed that contains all the LS firmwares we want to
+ * load, along with their signatures and bootloaders.
+ *
+ * 2) A HS blob (also called ACR) is created that contains the signed HS
+ * firmware in charge of loading the LS firmwares into their respective
+ * falcons.
+ *
+ * 3) The HS blob is loaded (via its own bootloader) and executed on the
+ * HS-capable falcon. It authenticates itself, switches the secure falcon to
+ * HS mode and setup the WPR region around the LS blob (dGPU) or copies the
+ * LS blob into the WPR region (Tegra).
+ *
+ * 4) The LS blob is now secure from all external tampering. The HS falcon
+ * checks the signatures of the LS firmwares and, if valid, switches the
+ * managed falcons to LS mode and makes them ready to run the LS firmware.
+ *
+ * 5) The managed falcons remain in LS mode and can be started.
+ *
+ */
+
+#include "priv.h"
+
+#include <core/gpuobj.h>
+#include <core/firmware.h>
+#include <subdev/fb.h>
+
+enum {
+ FALCON_DMAIDX_UCODE = 0,
+ FALCON_DMAIDX_VIRT = 1,
+ FALCON_DMAIDX_PHYS_VID = 2,
+ FALCON_DMAIDX_PHYS_SYS_COH = 3,
+ FALCON_DMAIDX_PHYS_SYS_NCOH = 4,
+};
+
+/**
+ * struct fw_bin_header - header of firmware files
+ * @bin_magic: always 0x3b1d14f0
+ * @bin_ver: version of the bin format
+ * @bin_size: entire image size including this header
+ * @header_offset: offset of the firmware/bootloader header in the file
+ * @data_offset: offset of the firmware/bootloader payload in the file
+ * @data_size: size of the payload
+ *
+ * This header is located at the beginning of the HS firmware and HS bootloader
+ * files, to describe where the headers and data can be found.
+ */
+struct fw_bin_header {
+ u32 bin_magic;
+ u32 bin_ver;
+ u32 bin_size;
+ u32 header_offset;
+ u32 data_offset;
+ u32 data_size;
+};
+
+/**
+ * struct fw_bl_desc - firmware bootloader descriptor
+ * @start_tag: starting tag of bootloader
+ * @desc_dmem_load_off: DMEM offset of flcn_bl_dmem_desc
+ * @code_off: offset of code section
+ * @code_size: size of code section
+ * @data_off: offset of data section
+ * @data_size: size of data section
+ *
+ * This structure is embedded in bootloader firmware files at to describe the
+ * IMEM and DMEM layout expected by the bootloader.
+ */
+struct fw_bl_desc {
+ u32 start_tag;
+ u32 dmem_load_off;
+ u32 code_off;
+ u32 code_size;
+ u32 data_off;
+ u32 data_size;
+};
+
+
+/*
+ *
+ * LS blob structures
+ *
+ */
+
+/**
+ * struct lsf_ucode_desc - LS falcon signatures
+ * @prd_keys: signature to use when the GPU is in production mode
+ * @dgb_keys: signature to use when the GPU is in debug mode
+ * @b_prd_present: whether the production key is present
+ * @b_dgb_present: whether the debug key is present
+ * @falcon_id: ID of the falcon the ucode applies to
+ *
+ * Directly loaded from a signature file.
+ */
+struct lsf_ucode_desc {
+ u8 prd_keys[2][16];
+ u8 dbg_keys[2][16];
+ u32 b_prd_present;
+ u32 b_dbg_present;
+ u32 falcon_id;
+};
+
+/**
+ * struct lsf_lsb_header - LS firmware header
+ * @signature: signature to verify the firmware against
+ * @ucode_off: offset of the ucode blob in the WPR region. The ucode
+ * blob contains the bootloader, code and data of the
+ * LS falcon
+ * @ucode_size: size of the ucode blob, including bootloader
+ * @data_size: size of the ucode blob data
+ * @bl_code_size: size of the bootloader code
+ * @bl_imem_off: offset in imem of the bootloader
+ * @bl_data_off: offset of the bootloader data in WPR region
+ * @bl_data_size: size of the bootloader data
+ * @app_code_off: offset of the app code relative to ucode_off
+ * @app_code_size: size of the app code
+ * @app_data_off: offset of the app data relative to ucode_off
+ * @app_data_size: size of the app data
+ * @flags: flags for the secure bootloader
+ *
+ * This structure is written into the WPR region for each managed falcon. Each
+ * instance is referenced by the lsb_offset member of the corresponding
+ * lsf_wpr_header.
+ */
+struct lsf_lsb_header {
+ struct lsf_ucode_desc signature;
+ u32 ucode_off;
+ u32 ucode_size;
+ u32 data_size;
+ u32 bl_code_size;
+ u32 bl_imem_off;
+ u32 bl_data_off;
+ u32 bl_data_size;
+ u32 app_code_off;
+ u32 app_code_size;
+ u32 app_data_off;
+ u32 app_data_size;
+ u32 flags;
+#define LSF_FLAG_LOAD_CODE_AT_0 1
+#define LSF_FLAG_DMACTL_REQ_CTX 4
+#define LSF_FLAG_FORCE_PRIV_LOAD 8
+};
+
+/**
+ * struct lsf_wpr_header - LS blob WPR Header
+ * @falcon_id: LS falcon ID
+ * @lsb_offset: offset of the lsb_lsf_header in the WPR region
+ * @bootstrap_owner: secure falcon reponsible for bootstrapping the LS falcon
+ * @lazy_bootstrap: skip bootstrapping by ACR
+ * @status: bootstrapping status
+ *
+ * An array of these is written at the beginning of the WPR region, one for
+ * each managed falcon. The array is terminated by an instance which falcon_id
+ * is LSF_FALCON_ID_INVALID.
+ */
+struct lsf_wpr_header {
+ u32 falcon_id;
+ u32 lsb_offset;
+ u32 bootstrap_owner;
+ u32 lazy_bootstrap;
+ u32 status;
+#define LSF_IMAGE_STATUS_NONE 0
+#define LSF_IMAGE_STATUS_COPY 1
+#define LSF_IMAGE_STATUS_VALIDATION_CODE_FAILED 2
+#define LSF_IMAGE_STATUS_VALIDATION_DATA_FAILED 3
+#define LSF_IMAGE_STATUS_VALIDATION_DONE 4
+#define LSF_IMAGE_STATUS_VALIDATION_SKIPPED 5
+#define LSF_IMAGE_STATUS_BOOTSTRAP_READY 6
+};
+
+
+/**
+ * struct ls_ucode_img_desc - descriptor of firmware image
+ * @descriptor_size: size of this descriptor
+ * @image_size: size of the whole image
+ * @bootloader_start_offset: start offset of the bootloader in ucode image
+ * @bootloader_size: size of the bootloader
+ * @bootloader_imem_offset: start off set of the bootloader in IMEM
+ * @bootloader_entry_point: entry point of the bootloader in IMEM
+ * @app_start_offset: start offset of the LS firmware
+ * @app_size: size of the LS firmware's code and data
+ * @app_imem_offset: offset of the app in IMEM
+ * @app_imem_entry: entry point of the app in IMEM
+ * @app_dmem_offset: offset of the data in DMEM
+ * @app_resident_code_offset: offset of app code from app_start_offset
+ * @app_resident_code_size: size of the code
+ * @app_resident_data_offset: offset of data from app_start_offset
+ * @app_resident_data_size: size of data
+ *
+ * A firmware image contains the code, data, and bootloader of a given LS
+ * falcon in a single blob. This structure describes where everything is.
+ *
+ * This can be generated from a (bootloader, code, data) set if they have
+ * been loaded separately, or come directly from a file.
+ */
+struct ls_ucode_img_desc {
+ u32 descriptor_size;
+ u32 image_size;
+ u32 tools_version;
+ u32 app_version;
+ char date[64];
+ u32 bootloader_start_offset;
+ u32 bootloader_size;
+ u32 bootloader_imem_offset;
+ u32 bootloader_entry_point;
+ u32 app_start_offset;
+ u32 app_size;
+ u32 app_imem_offset;
+ u32 app_imem_entry;
+ u32 app_dmem_offset;
+ u32 app_resident_code_offset;
+ u32 app_resident_code_size;
+ u32 app_resident_data_offset;
+ u32 app_resident_data_size;
+ u32 nb_overlays;
+ struct {u32 start; u32 size; } load_ovl[64];
+ u32 compressed;
+};
+
+/**
+ * struct ls_ucode_img - temporary storage for loaded LS firmwares
+ * @node: to link within lsf_ucode_mgr
+ * @falcon_id: ID of the falcon this LS firmware is for
+ * @ucode_desc: loaded or generated map of ucode_data
+ * @ucode_header: header of the firmware
+ * @ucode_data: firmware payload (code and data)
+ * @ucode_size: size in bytes of data in ucode_data
+ * @wpr_header: WPR header to be written to the LS blob
+ * @lsb_header: LSB header to be written to the LS blob
+ *
+ * Preparing the WPR LS blob requires information about all the LS firmwares
+ * (size, etc) to be known. This structure contains all the data of one LS
+ * firmware.
+ */
+struct ls_ucode_img {
+ struct list_head node;
+ enum nvkm_secboot_falcon falcon_id;
+
+ struct ls_ucode_img_desc ucode_desc;
+ u32 *ucode_header;
+ u8 *ucode_data;
+ u32 ucode_size;
+
+ struct lsf_wpr_header wpr_header;
+ struct lsf_lsb_header lsb_header;
+};
+
+/**
+ * struct ls_ucode_mgr - manager for all LS falcon firmwares
+ * @count: number of managed LS falcons
+ * @wpr_size: size of the required WPR region in bytes
+ * @img_list: linked list of lsf_ucode_img
+ */
+struct ls_ucode_mgr {
+ u16 count;
+ u32 wpr_size;
+ struct list_head img_list;
+};
+
+
+/*
+ *
+ * HS blob structures
+ *
+ */
+
+/**
+ * struct hsf_fw_header - HS firmware descriptor
+ * @sig_dbg_offset: offset of the debug signature
+ * @sig_dbg_size: size of the debug signature
+ * @sig_prod_offset: offset of the production signature
+ * @sig_prod_size: size of the production signature
+ * @patch_loc: offset of the offset (sic) of where the signature is
+ * @patch_sig: offset of the offset (sic) to add to sig_*_offset
+ * @hdr_offset: offset of the load header (see struct hs_load_header)
+ * @hdr_size: size of above header
+ *
+ * This structure is embedded in the HS firmware image at
+ * hs_bin_hdr.header_offset.
+ */
+struct hsf_fw_header {
+ u32 sig_dbg_offset;
+ u32 sig_dbg_size;
+ u32 sig_prod_offset;
+ u32 sig_prod_size;
+ u32 patch_loc;
+ u32 patch_sig;
+ u32 hdr_offset;
+ u32 hdr_size;
+};
+
+/**
+ * struct hsf_load_header - HS firmware load header
+ */
+struct hsf_load_header {
+ u32 non_sec_code_off;
+ u32 non_sec_code_size;
+ u32 data_dma_base;
+ u32 data_size;
+ u32 num_apps;
+ struct {
+ u32 sec_code_off;
+ u32 sec_code_size;
+ } app[0];
+};
+
+/**
+ * Convenience function to duplicate a firmware file in memory and check that
+ * it has the required minimum size.
+ */
+static void *
+gm200_secboot_load_firmware(struct nvkm_subdev *subdev, const char *name,
+ size_t min_size)
+{
+ const struct firmware *fw;
+ void *blob;
+ int ret;
+
+ ret = nvkm_firmware_get(subdev->device, name, &fw);
+ if (ret)
+ return ERR_PTR(ret);
+ if (fw->size < min_size) {
+ nvkm_error(subdev, "%s is smaller than expected size %zu\n",
+ name, min_size);
+ nvkm_firmware_put(fw);
+ return ERR_PTR(-EINVAL);
+ }
+ blob = kmemdup(fw->data, fw->size, GFP_KERNEL);
+ nvkm_firmware_put(fw);
+ if (!blob)
+ return ERR_PTR(-ENOMEM);
+
+ return blob;
+}
+
+
+/*
+ * Low-secure blob creation
+ */
+
+#define BL_DESC_BLK_SIZE 256
+/**
+ * Build a ucode image and descriptor from provided bootloader, code and data.
+ *
+ * @bl: bootloader image, including 16-bytes descriptor
+ * @code: LS firmware code segment
+ * @data: LS firmware data segment
+ * @desc: ucode descriptor to be written
+ *
+ * Return: allocated ucode image with corresponding descriptor information. desc
+ * is also updated to contain the right offsets within returned image.
+ */
+static void *
+ls_ucode_img_build(const struct firmware *bl, const struct firmware *code,
+ const struct firmware *data, struct ls_ucode_img_desc *desc)
+{
+ struct fw_bin_header *bin_hdr = (void *)bl->data;
+ struct fw_bl_desc *bl_desc = (void *)bl->data + bin_hdr->header_offset;
+ void *bl_data = (void *)bl->data + bin_hdr->data_offset;
+ u32 pos = 0;
+ void *image;
+
+ desc->bootloader_start_offset = pos;
+ desc->bootloader_size = ALIGN(bl_desc->code_size, sizeof(u32));
+ desc->bootloader_imem_offset = bl_desc->start_tag * 256;
+ desc->bootloader_entry_point = bl_desc->start_tag * 256;
+
+ pos = ALIGN(pos + desc->bootloader_size, BL_DESC_BLK_SIZE);
+ desc->app_start_offset = pos;
+ desc->app_size = ALIGN(code->size, BL_DESC_BLK_SIZE) +
+ ALIGN(data->size, BL_DESC_BLK_SIZE);
+ desc->app_imem_offset = 0;
+ desc->app_imem_entry = 0;
+ desc->app_dmem_offset = 0;
+ desc->app_resident_code_offset = 0;
+ desc->app_resident_code_size = ALIGN(code->size, BL_DESC_BLK_SIZE);
+
+ pos = ALIGN(pos + desc->app_resident_code_size, BL_DESC_BLK_SIZE);
+ desc->app_resident_data_offset = pos - desc->app_start_offset;
+ desc->app_resident_data_size = ALIGN(data->size, BL_DESC_BLK_SIZE);
+
+ desc->image_size = ALIGN(bl_desc->code_size, BL_DESC_BLK_SIZE) +
+ desc->app_size;
+
+ image = kzalloc(desc->image_size, GFP_KERNEL);
+ if (!image)
+ return ERR_PTR(-ENOMEM);
+
+ memcpy(image + desc->bootloader_start_offset, bl_data,
+ bl_desc->code_size);
+ memcpy(image + desc->app_start_offset, code->data, code->size);
+ memcpy(image + desc->app_start_offset + desc->app_resident_data_offset,
+ data->data, data->size);
+
+ return image;
+}
+
+/**
+ * ls_ucode_img_load_generic() - load and prepare a LS ucode image
+ *
+ * Load the LS microcode, bootloader and signature and pack them into a single
+ * blob. Also generate the corresponding ucode descriptor.
+ */
+static int
+ls_ucode_img_load_generic(struct nvkm_subdev *subdev,
+ struct ls_ucode_img *img, const char *falcon_name,
+ const u32 falcon_id)
+{
+ const struct firmware *bl, *code, *data;
+ struct lsf_ucode_desc *lsf_desc;
+ char f[64];
+ int ret;
+
+ img->ucode_header = NULL;
+
+ snprintf(f, sizeof(f), "gr/%s_bl", falcon_name);
+ ret = nvkm_firmware_get(subdev->device, f, &bl);
+ if (ret)
+ goto error;
+
+ snprintf(f, sizeof(f), "gr/%s_inst", falcon_name);
+ ret = nvkm_firmware_get(subdev->device, f, &code);
+ if (ret)
+ goto free_bl;
+
+ snprintf(f, sizeof(f), "gr/%s_data", falcon_name);
+ ret = nvkm_firmware_get(subdev->device, f, &data);
+ if (ret)
+ goto free_inst;
+
+ img->ucode_data = ls_ucode_img_build(bl, code, data,
+ &img->ucode_desc);
+ if (IS_ERR(img->ucode_data)) {
+ ret = PTR_ERR(img->ucode_data);
+ goto free_data;
+ }
+ img->ucode_size = img->ucode_desc.image_size;
+
+ snprintf(f, sizeof(f), "gr/%s_sig", falcon_name);
+ lsf_desc = gm200_secboot_load_firmware(subdev, f, sizeof(*lsf_desc));
+ if (IS_ERR(lsf_desc)) {
+ ret = PTR_ERR(lsf_desc);
+ goto free_image;
+ }
+ /* not needed? the signature should already have the right value */
+ lsf_desc->falcon_id = falcon_id;
+ memcpy(&img->lsb_header.signature, lsf_desc, sizeof(*lsf_desc));
+ img->falcon_id = lsf_desc->falcon_id;
+ kfree(lsf_desc);
+
+ /* success path - only free requested firmware files */
+ goto free_data;
+
+free_image:
+ kfree(img->ucode_data);
+free_data:
+ nvkm_firmware_put(data);
+free_inst:
+ nvkm_firmware_put(code);
+free_bl:
+ nvkm_firmware_put(bl);
+error:
+ return ret;
+}
+
+typedef int (*lsf_load_func)(struct nvkm_subdev *, struct ls_ucode_img *);
+
+static int
+ls_ucode_img_load_fecs(struct nvkm_subdev *subdev, struct ls_ucode_img *img)
+{
+ return ls_ucode_img_load_generic(subdev, img, "fecs",
+ NVKM_SECBOOT_FALCON_FECS);
+}
+
+static int
+ls_ucode_img_load_gpccs(struct nvkm_subdev *subdev, struct ls_ucode_img *img)
+{
+ return ls_ucode_img_load_generic(subdev, img, "gpccs",
+ NVKM_SECBOOT_FALCON_GPCCS);
+}
+
+/**
+ * ls_ucode_img_load() - create a lsf_ucode_img and load it
+ */
+static struct ls_ucode_img *
+ls_ucode_img_load(struct nvkm_subdev *subdev, lsf_load_func load_func)
+{
+ struct ls_ucode_img *img;
+ int ret;
+
+ img = kzalloc(sizeof(*img), GFP_KERNEL);
+ if (!img)
+ return ERR_PTR(-ENOMEM);
+
+ ret = load_func(subdev, img);
+ if (ret) {
+ kfree(img);
+ return ERR_PTR(ret);
+ }
+
+ return img;
+}
+
+static const lsf_load_func lsf_load_funcs[] = {
+ [NVKM_SECBOOT_FALCON_END] = NULL, /* reserve enough space */
+ [NVKM_SECBOOT_FALCON_FECS] = ls_ucode_img_load_fecs,
+ [NVKM_SECBOOT_FALCON_GPCCS] = ls_ucode_img_load_gpccs,
+};
+
+/**
+ * ls_ucode_img_populate_bl_desc() - populate a DMEM BL descriptor for LS image
+ * @img: ucode image to generate against
+ * @desc: descriptor to populate
+ * @sb: secure boot state to use for base addresses
+ *
+ * Populate the DMEM BL descriptor with the information contained in a
+ * ls_ucode_desc.
+ *
+ */
+static void
+ls_ucode_img_populate_bl_desc(struct ls_ucode_img *img, u64 wpr_addr,
+ struct gm200_flcn_bl_desc *desc)
+{
+ struct ls_ucode_img_desc *pdesc = &img->ucode_desc;
+ u64 addr_base;
+
+ addr_base = wpr_addr + img->lsb_header.ucode_off +
+ pdesc->app_start_offset;
+
+ memset(desc, 0, sizeof(*desc));
+ desc->ctx_dma = FALCON_DMAIDX_UCODE;
+ desc->code_dma_base.lo = lower_32_bits(
+ (addr_base + pdesc->app_resident_code_offset));
+ desc->code_dma_base.hi = upper_32_bits(
+ (addr_base + pdesc->app_resident_code_offset));
+ desc->non_sec_code_size = pdesc->app_resident_code_size;
+ desc->data_dma_base.lo = lower_32_bits(
+ (addr_base + pdesc->app_resident_data_offset));
+ desc->data_dma_base.hi = upper_32_bits(
+ (addr_base + pdesc->app_resident_data_offset));
+ desc->data_size = pdesc->app_resident_data_size;
+ desc->code_entry_point = pdesc->app_imem_entry;
+}
+
+#define LSF_LSB_HEADER_ALIGN 256
+#define LSF_BL_DATA_ALIGN 256
+#define LSF_BL_DATA_SIZE_ALIGN 256
+#define LSF_BL_CODE_SIZE_ALIGN 256
+#define LSF_UCODE_DATA_ALIGN 4096
+
+/**
+ * ls_ucode_img_fill_headers - fill the WPR and LSB headers of an image
+ * @gsb: secure boot device used
+ * @img: image to generate for
+ * @offset: offset in the WPR region where this image starts
+ *
+ * Allocate space in the WPR area from offset and write the WPR and LSB headers
+ * accordingly.
+ *
+ * Return: offset at the end of this image.
+ */
+static u32
+ls_ucode_img_fill_headers(struct gm200_secboot *gsb, struct ls_ucode_img *img,
+ u32 offset)
+{
+ struct lsf_wpr_header *whdr = &img->wpr_header;
+ struct lsf_lsb_header *lhdr = &img->lsb_header;
+ struct ls_ucode_img_desc *desc = &img->ucode_desc;
+
+ if (img->ucode_header) {
+ nvkm_fatal(&gsb->base.subdev,
+ "images withough loader are not supported yet!\n");
+ return offset;
+ }
+
+ /* Fill WPR header */
+ whdr->falcon_id = img->falcon_id;
+ whdr->bootstrap_owner = gsb->base.func->boot_falcon;
+ whdr->status = LSF_IMAGE_STATUS_COPY;
+
+ /* Align, save off, and include an LSB header size */
+ offset = ALIGN(offset, LSF_LSB_HEADER_ALIGN);
+ whdr->lsb_offset = offset;
+ offset += sizeof(struct lsf_lsb_header);
+
+ /*
+ * Align, save off, and include the original (static) ucode
+ * image size
+ */
+ offset = ALIGN(offset, LSF_UCODE_DATA_ALIGN);
+ lhdr->ucode_off = offset;
+ offset += img->ucode_size;
+
+ /*
+ * For falcons that use a boot loader (BL), we append a loader
+ * desc structure on the end of the ucode image and consider
+ * this the boot loader data. The host will then copy the loader
+ * desc args to this space within the WPR region (before locking
+ * down) and the HS bin will then copy them to DMEM 0 for the
+ * loader.
+ */
+ lhdr->bl_code_size = ALIGN(desc->bootloader_size,
+ LSF_BL_CODE_SIZE_ALIGN);
+ lhdr->ucode_size = ALIGN(desc->app_resident_data_offset,
+ LSF_BL_CODE_SIZE_ALIGN) + lhdr->bl_code_size;
+ lhdr->data_size = ALIGN(desc->app_size, LSF_BL_CODE_SIZE_ALIGN) +
+ lhdr->bl_code_size - lhdr->ucode_size;
+ /*
+ * Though the BL is located at 0th offset of the image, the VA
+ * is different to make sure that it doesn't collide the actual
+ * OS VA range
+ */
+ lhdr->bl_imem_off = desc->bootloader_imem_offset;
+ lhdr->app_code_off = desc->app_start_offset +
+ desc->app_resident_code_offset;
+ lhdr->app_code_size = desc->app_resident_code_size;
+ lhdr->app_data_off = desc->app_start_offset +
+ desc->app_resident_data_offset;
+ lhdr->app_data_size = desc->app_resident_data_size;
+
+ lhdr->flags = 0;
+ if (img->falcon_id == gsb->base.func->boot_falcon)
+ lhdr->flags = LSF_FLAG_DMACTL_REQ_CTX;
+
+ /* GPCCS will be loaded using PRI */
+ if (img->falcon_id == NVKM_SECBOOT_FALCON_GPCCS)
+ lhdr->flags |= LSF_FLAG_FORCE_PRIV_LOAD;
+
+ /* Align (size bloat) and save off BL descriptor size */
+ lhdr->bl_data_size = ALIGN(sizeof(struct gm200_flcn_bl_desc),
+ LSF_BL_DATA_SIZE_ALIGN);
+ /*
+ * Align, save off, and include the additional BL data
+ */
+ offset = ALIGN(offset, LSF_BL_DATA_ALIGN);
+ lhdr->bl_data_off = offset;
+ offset += lhdr->bl_data_size;
+
+ return offset;
+}
+
+static void
+ls_ucode_mgr_init(struct ls_ucode_mgr *mgr)
+{
+ memset(mgr, 0, sizeof(*mgr));
+ INIT_LIST_HEAD(&mgr->img_list);
+}
+
+static void
+ls_ucode_mgr_cleanup(struct ls_ucode_mgr *mgr)
+{
+ struct ls_ucode_img *img, *t;
+
+ list_for_each_entry_safe(img, t, &mgr->img_list, node) {
+ kfree(img->ucode_data);
+ kfree(img->ucode_header);
+ kfree(img);
+ }
+}
+
+static void
+ls_ucode_mgr_add_img(struct ls_ucode_mgr *mgr, struct ls_ucode_img *img)
+{
+ mgr->count++;
+ list_add_tail(&img->node, &mgr->img_list);
+}
+
+/**
+ * ls_ucode_mgr_fill_headers - fill WPR and LSB headers of all managed images
+ */
+static void
+ls_ucode_mgr_fill_headers(struct gm200_secboot *gsb, struct ls_ucode_mgr *mgr)
+{
+ struct ls_ucode_img *img;
+ u32 offset;
+
+ /*
+ * Start with an array of WPR headers at the base of the WPR.
+ * The expectation here is that the secure falcon will do a single DMA
+ * read of this array and cache it internally so it's ok to pack these.
+ * Also, we add 1 to the falcon count to indicate the end of the array.
+ */
+ offset = sizeof(struct lsf_wpr_header) * (mgr->count + 1);
+
+ /*
+ * Walk the managed falcons, accounting for the LSB structs
+ * as well as the ucode images.
+ */
+ list_for_each_entry(img, &mgr->img_list, node) {
+ offset = ls_ucode_img_fill_headers(gsb, img, offset);
+ }
+
+ mgr->wpr_size = offset;
+}
+
+/**
+ * ls_ucode_mgr_write_wpr - write the WPR blob contents
+ */
+static int
+ls_ucode_mgr_write_wpr(struct gm200_secboot *gsb, struct ls_ucode_mgr *mgr,
+ struct nvkm_gpuobj *wpr_blob)
+{
+ struct ls_ucode_img *img;
+ u32 pos = 0;
+
+ nvkm_kmap(wpr_blob);
+
+ list_for_each_entry(img, &mgr->img_list, node) {
+ nvkm_gpuobj_memcpy_to(wpr_blob, pos, &img->wpr_header,
+ sizeof(img->wpr_header));
+
+ nvkm_gpuobj_memcpy_to(wpr_blob, img->wpr_header.lsb_offset,
+ &img->lsb_header, sizeof(img->lsb_header));
+
+ /* Generate and write BL descriptor */
+ if (!img->ucode_header) {
+ u8 desc[gsb->func->bl_desc_size];
+ struct gm200_flcn_bl_desc gdesc;
+
+ ls_ucode_img_populate_bl_desc(img, gsb->wpr_addr,
+ &gdesc);
+ gsb->func->fixup_bl_desc(&gdesc, &desc);
+ nvkm_gpuobj_memcpy_to(wpr_blob,
+ img->lsb_header.bl_data_off,
+ &desc, gsb->func->bl_desc_size);
+ }
+
+ /* Copy ucode */
+ nvkm_gpuobj_memcpy_to(wpr_blob, img->lsb_header.ucode_off,
+ img->ucode_data, img->ucode_size);
+
+ pos += sizeof(img->wpr_header);
+ }
+
+ nvkm_wo32(wpr_blob, pos, NVKM_SECBOOT_FALCON_INVALID);
+
+ nvkm_done(wpr_blob);
+
+ return 0;
+}
+
+/* Both size and address of WPR need to be 128K-aligned */
+#define WPR_ALIGNMENT 0x20000
+/**
+ * gm200_secboot_prepare_ls_blob() - prepare the LS blob
+ *
+ * For each securely managed falcon, load the FW, signatures and bootloaders and
+ * prepare a ucode blob. Then, compute the offsets in the WPR region for each
+ * blob, and finally write the headers and ucode blobs into a GPU object that
+ * will be copied into the WPR region by the HS firmware.
+ */
+static int
+gm200_secboot_prepare_ls_blob(struct gm200_secboot *gsb)
+{
+ struct nvkm_secboot *sb = &gsb->base;
+ struct nvkm_device *device = sb->subdev.device;
+ struct ls_ucode_mgr mgr;
+ int falcon_id;
+ int ret;
+
+ ls_ucode_mgr_init(&mgr);
+
+ /* Load all LS blobs */
+ for_each_set_bit(falcon_id, &gsb->base.func->managed_falcons,
+ NVKM_SECBOOT_FALCON_END) {
+ struct ls_ucode_img *img;
+
+ img = ls_ucode_img_load(&sb->subdev, lsf_load_funcs[falcon_id]);
+
+ if (IS_ERR(img)) {
+ ret = PTR_ERR(img);
+ goto cleanup;
+ }
+ ls_ucode_mgr_add_img(&mgr, img);
+ }
+
+ /*
+ * Fill the WPR and LSF headers with the right offsets and compute
+ * required WPR size
+ */
+ ls_ucode_mgr_fill_headers(gsb, &mgr);
+ mgr.wpr_size = ALIGN(mgr.wpr_size, WPR_ALIGNMENT);
+
+ /* Allocate GPU object that will contain the WPR region */
+ ret = nvkm_gpuobj_new(device, mgr.wpr_size, WPR_ALIGNMENT, false, NULL,
+ &gsb->ls_blob);
+ if (ret)
+ goto cleanup;
+
+ nvkm_debug(&sb->subdev, "%d managed LS falcons, WPR size is %d bytes\n",
+ mgr.count, mgr.wpr_size);
+
+ /* If WPR address and size are not fixed, set them to fit the LS blob */
+ if (!gsb->wpr_size) {
+ gsb->wpr_addr = gsb->ls_blob->addr;
+ gsb->wpr_size = gsb->ls_blob->size;
+ }
+
+ /* Write LS blob */
+ ret = ls_ucode_mgr_write_wpr(gsb, &mgr, gsb->ls_blob);
+
+cleanup:
+ ls_ucode_mgr_cleanup(&mgr);
+
+ return ret;
+}
+
+/*
+ * High-secure blob creation
+ */
+
+/**
+ * gm200_secboot_hsf_patch_signature() - patch HS blob with correct signature
+ */
+static void
+gm200_secboot_hsf_patch_signature(struct gm200_secboot *gsb, void *acr_image)
+{
+ struct nvkm_secboot *sb = &gsb->base;
+ struct fw_bin_header *hsbin_hdr = acr_image;
+ struct hsf_fw_header *fw_hdr = acr_image + hsbin_hdr->header_offset;
+ void *hs_data = acr_image + hsbin_hdr->data_offset;
+ void *sig;
+ u32 sig_size;
+
+ /* Falcon in debug or production mode? */
+ if ((nvkm_rd32(sb->subdev.device, sb->base + 0xc08) >> 20) & 0x1) {
+ sig = acr_image + fw_hdr->sig_dbg_offset;
+ sig_size = fw_hdr->sig_dbg_size;
+ } else {
+ sig = acr_image + fw_hdr->sig_prod_offset;
+ sig_size = fw_hdr->sig_prod_size;
+ }
+
+ /* Patch signature */
+ memcpy(hs_data + fw_hdr->patch_loc, sig + fw_hdr->patch_sig, sig_size);
+}
+
+/**
+ * gm200_secboot_populate_hsf_bl_desc() - populate BL descriptor for HS image
+ */
+static void
+gm200_secboot_populate_hsf_bl_desc(void *acr_image,
+ struct gm200_flcn_bl_desc *bl_desc)
+{
+ struct fw_bin_header *hsbin_hdr = acr_image;
+ struct hsf_fw_header *fw_hdr = acr_image + hsbin_hdr->header_offset;
+ struct hsf_load_header *load_hdr = acr_image + fw_hdr->hdr_offset;
+
+ /*
+ * Descriptor for the bootloader that will load the ACR image into
+ * IMEM/DMEM memory.
+ */
+ fw_hdr = acr_image + hsbin_hdr->header_offset;
+ load_hdr = acr_image + fw_hdr->hdr_offset;
+ memset(bl_desc, 0, sizeof(*bl_desc));
+ bl_desc->ctx_dma = FALCON_DMAIDX_VIRT;
+ bl_desc->non_sec_code_off = load_hdr->non_sec_code_off;
+ bl_desc->non_sec_code_size = load_hdr->non_sec_code_size;
+ bl_desc->sec_code_off = load_hdr->app[0].sec_code_off;
+ bl_desc->sec_code_size = load_hdr->app[0].sec_code_size;
+ bl_desc->code_entry_point = 0;
+ /*
+ * We need to set code_dma_base to the virtual address of the acr_blob,
+ * and add this address to data_dma_base before writing it into DMEM
+ */
+ bl_desc->code_dma_base.lo = 0;
+ bl_desc->data_dma_base.lo = load_hdr->data_dma_base;
+ bl_desc->data_size = load_hdr->data_size;
+}
+
+/**
+ * gm200_secboot_prepare_hs_blob - load and prepare a HS blob and BL descriptor
+ *
+ * @gsb secure boot instance to prepare for
+ * @fw name of the HS firmware to load
+ * @blob pointer to gpuobj that will be allocated to receive the HS FW payload
+ * @bl_desc pointer to the BL descriptor to write for this firmware
+ * @patch whether we should patch the HS descriptor (only for HS loaders)
+ */
+static int
+gm200_secboot_prepare_hs_blob(struct gm200_secboot *gsb, const char *fw,
+ struct nvkm_gpuobj **blob,
+ struct gm200_flcn_bl_desc *bl_desc, bool patch)
+{
+ struct nvkm_subdev *subdev = &gsb->base.subdev;
+ void *acr_image;
+ struct fw_bin_header *hsbin_hdr;
+ struct hsf_fw_header *fw_hdr;
+ void *acr_data;
+ struct hsf_load_header *load_hdr;
+ struct hsflcn_acr_desc *desc;
+ int ret;
+
+ acr_image = gm200_secboot_load_firmware(subdev, fw, 0);
+ if (IS_ERR(acr_image))
+ return PTR_ERR(acr_image);
+ hsbin_hdr = acr_image;
+
+ /* Patch signature */
+ gm200_secboot_hsf_patch_signature(gsb, acr_image);
+
+ acr_data = acr_image + hsbin_hdr->data_offset;
+
+ /* Patch descriptor? */
+ if (patch) {
+ fw_hdr = acr_image + hsbin_hdr->header_offset;
+ load_hdr = acr_image + fw_hdr->hdr_offset;
+ desc = acr_data + load_hdr->data_dma_base;
+ gsb->func->fixup_hs_desc(gsb, desc);
+ }
+
+ /* Generate HS BL descriptor */
+ gm200_secboot_populate_hsf_bl_desc(acr_image, bl_desc);
+
+ /* Create ACR blob and copy HS data to it */
+ ret = nvkm_gpuobj_new(subdev->device, ALIGN(hsbin_hdr->data_size, 256),
+ 0x1000, false, NULL, blob);
+ if (ret)
+ goto cleanup;
+
+ nvkm_kmap(*blob);
+ nvkm_gpuobj_memcpy_to(*blob, 0, acr_data, hsbin_hdr->data_size);
+ nvkm_done(*blob);
+
+cleanup:
+ kfree(acr_image);
+
+ return ret;
+}
+
+/*
+ * High-secure bootloader blob creation
+ */
+
+static int
+gm200_secboot_prepare_hsbl_blob(struct gm200_secboot *gsb)
+{
+ struct nvkm_subdev *subdev = &gsb->base.subdev;
+
+ gsb->hsbl_blob = gm200_secboot_load_firmware(subdev, "acr/bl", 0);
+ if (IS_ERR(gsb->hsbl_blob)) {
+ int ret = PTR_ERR(gsb->hsbl_blob);
+
+ gsb->hsbl_blob = NULL;
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * gm20x_secboot_prepare_blobs - load blobs common to all GM20X GPUs.
+ *
+ * This includes the LS blob, HS ucode loading blob, and HS bootloader.
+ *
+ * The HS ucode unload blob is only used on dGPU.
+ */
+int
+gm20x_secboot_prepare_blobs(struct gm200_secboot *gsb)
+{
+ int ret;
+
+ /* Load and prepare the managed falcon's firmwares */
+ ret = gm200_secboot_prepare_ls_blob(gsb);
+ if (ret)
+ return ret;
+
+ /* Load the HS firmware that will load the LS firmwares */
+ ret = gm200_secboot_prepare_hs_blob(gsb, "acr/ucode_load",
+ &gsb->acr_load_blob,
+ &gsb->acr_load_bl_desc, true);
+ if (ret)
+ return ret;
+
+ /* Load the HS firmware bootloader */
+ ret = gm200_secboot_prepare_hsbl_blob(gsb);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int
+gm200_secboot_prepare_blobs(struct nvkm_secboot *sb)
+{
+ struct gm200_secboot *gsb = gm200_secboot(sb);
+ int ret;
+
+ ret = gm20x_secboot_prepare_blobs(gsb);
+ if (ret)
+ return ret;
+
+ /* dGPU only: load the HS firmware that unprotects the WPR region */
+ ret = gm200_secboot_prepare_hs_blob(gsb, "acr/ucode_unload",
+ &gsb->acr_unload_blob,
+ &gsb->acr_unload_bl_desc, false);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+
+
+/*
+ * Secure Boot Execution
+ */
+
+/**
+ * gm200_secboot_load_hs_bl() - load HS bootloader into DMEM and IMEM
+ */
+static void
+gm200_secboot_load_hs_bl(struct gm200_secboot *gsb, void *data, u32 data_size)
+{
+ struct nvkm_device *device = gsb->base.subdev.device;
+ struct fw_bin_header *hdr = gsb->hsbl_blob;
+ struct fw_bl_desc *hsbl_desc = gsb->hsbl_blob + hdr->header_offset;
+ void *blob_data = gsb->hsbl_blob + hdr->data_offset;
+ void *hsbl_code = blob_data + hsbl_desc->code_off;
+ void *hsbl_data = blob_data + hsbl_desc->data_off;
+ u32 code_size = ALIGN(hsbl_desc->code_size, 256);
+ const u32 base = gsb->base.base;
+ u32 blk;
+ u32 tag;
+ int i;
+
+ /*
+ * Copy HS bootloader data
+ */
+ nvkm_wr32(device, base + 0x1c0, (0x00000000 | (0x1 << 24)));
+ for (i = 0; i < hsbl_desc->data_size / 4; i++)
+ nvkm_wr32(device, base + 0x1c4, ((u32 *)hsbl_data)[i]);
+
+ /*
+ * Copy HS bootloader interface structure where the HS descriptor
+ * expects it to be
+ */
+ nvkm_wr32(device, base + 0x1c0,
+ (hsbl_desc->dmem_load_off | (0x1 << 24)));
+ for (i = 0; i < data_size / 4; i++)
+ nvkm_wr32(device, base + 0x1c4, ((u32 *)data)[i]);
+
+ /* Copy HS bootloader code to end of IMEM */
+ blk = (nvkm_rd32(device, base + 0x108) & 0x1ff) - (code_size >> 8);
+ tag = hsbl_desc->start_tag;
+ nvkm_wr32(device, base + 0x180, ((blk & 0xff) << 8) | (0x1 << 24));
+ for (i = 0; i < code_size / 4; i++) {
+ /* write new tag every 256B */
+ if ((i & 0x3f) == 0) {
+ nvkm_wr32(device, base + 0x188, tag & 0xffff);
+ tag++;
+ }
+ nvkm_wr32(device, base + 0x184, ((u32 *)hsbl_code)[i]);
+ }
+ nvkm_wr32(device, base + 0x188, 0);
+}
+
+/**
+ * gm200_secboot_setup_falcon() - set up the secure falcon for secure boot
+ */
+static int
+gm200_secboot_setup_falcon(struct gm200_secboot *gsb)
+{
+ struct nvkm_device *device = gsb->base.subdev.device;
+ struct fw_bin_header *hdr = gsb->hsbl_blob;
+ struct fw_bl_desc *hsbl_desc = gsb->hsbl_blob + hdr->header_offset;
+ /* virtual start address for boot vector */
+ u32 virt_addr = hsbl_desc->start_tag << 8;
+ const u32 base = gsb->base.base;
+ const u32 reg_base = base + 0xe00;
+ u32 inst_loc;
+ int ret;
+
+ ret = nvkm_secboot_falcon_reset(&gsb->base);
+ if (ret)
+ return ret;
+
+ /* setup apertures - virtual */
+ nvkm_wr32(device, reg_base + 4 * (FALCON_DMAIDX_UCODE), 0x4);
+ nvkm_wr32(device, reg_base + 4 * (FALCON_DMAIDX_VIRT), 0x0);
+ /* setup apertures - physical */
+ nvkm_wr32(device, reg_base + 4 * (FALCON_DMAIDX_PHYS_VID), 0x4);
+ nvkm_wr32(device, reg_base + 4 * (FALCON_DMAIDX_PHYS_SYS_COH),
+ 0x4 | 0x1);
+ nvkm_wr32(device, reg_base + 4 * (FALCON_DMAIDX_PHYS_SYS_NCOH),
+ 0x4 | 0x2);
+
+ /* Set context */
+ if (nvkm_memory_target(gsb->inst->memory) == NVKM_MEM_TARGET_VRAM)
+ inst_loc = 0x0; /* FB */
+ else
+ inst_loc = 0x3; /* Non-coherent sysmem */
+
+ nvkm_mask(device, base + 0x048, 0x1, 0x1);
+ nvkm_wr32(device, base + 0x480,
+ ((gsb->inst->addr >> 12) & 0xfffffff) |
+ (inst_loc << 28) | (1 << 30));
+
+ /* Set boot vector to code's starting virtual address */
+ nvkm_wr32(device, base + 0x104, virt_addr);
+
+ return 0;
+}
+
+/**
+ * gm200_secboot_run_hs_blob() - run the given high-secure blob
+ */
+static int
+gm200_secboot_run_hs_blob(struct gm200_secboot *gsb, struct nvkm_gpuobj *blob,
+ struct gm200_flcn_bl_desc *desc)
+{
+ struct nvkm_vma vma;
+ u64 vma_addr;
+ const u32 bl_desc_size = gsb->func->bl_desc_size;
+ u8 bl_desc[bl_desc_size];
+ int ret;
+
+ /* Map the HS firmware so the HS bootloader can see it */
+ ret = nvkm_gpuobj_map(blob, gsb->vm, NV_MEM_ACCESS_RW, &vma);
+ if (ret)
+ return ret;
+
+ /* Add the mapping address to the DMA bases */
+ vma_addr = flcn64_to_u64(desc->code_dma_base) + vma.offset;
+ desc->code_dma_base.lo = lower_32_bits(vma_addr);
+ desc->code_dma_base.hi = upper_32_bits(vma_addr);
+ vma_addr = flcn64_to_u64(desc->data_dma_base) + vma.offset;
+ desc->data_dma_base.lo = lower_32_bits(vma_addr);
+ desc->data_dma_base.hi = upper_32_bits(vma_addr);
+
+ /* Fixup the BL header */
+ gsb->func->fixup_bl_desc(desc, &bl_desc);
+
+ /* Reset the falcon and make it ready to run the HS bootloader */
+ ret = gm200_secboot_setup_falcon(gsb);
+ if (ret)
+ goto done;
+
+ /* Load the HS bootloader into the falcon's IMEM/DMEM */
+ gm200_secboot_load_hs_bl(gsb, &bl_desc, bl_desc_size);
+
+ /* Start the HS bootloader */
+ ret = nvkm_secboot_falcon_run(&gsb->base);
+ if (ret)
+ goto done;
+
+done:
+ /* Restore the original DMA addresses */
+ vma_addr = flcn64_to_u64(desc->code_dma_base) - vma.offset;
+ desc->code_dma_base.lo = lower_32_bits(vma_addr);
+ desc->code_dma_base.hi = upper_32_bits(vma_addr);
+ vma_addr = flcn64_to_u64(desc->data_dma_base) - vma.offset;
+ desc->data_dma_base.lo = lower_32_bits(vma_addr);
+ desc->data_dma_base.hi = upper_32_bits(vma_addr);
+
+ /* We don't need the ACR firmware anymore */
+ nvkm_gpuobj_unmap(&vma);
+
+ return ret;
+}
+
+/*
+ * gm200_secboot_reset() - execute secure boot from the prepared state
+ *
+ * Load the HS bootloader and ask the falcon to run it. This will in turn
+ * load the HS firmware and run it, so once the falcon stops all the managed
+ * falcons should have their LS firmware loaded and be ready to run.
+ */
+int
+gm200_secboot_reset(struct nvkm_secboot *sb, enum nvkm_secboot_falcon falcon)
+{
+ struct gm200_secboot *gsb = gm200_secboot(sb);
+ int ret;
+
+ /*
+ * Dummy GM200 implementation: perform secure boot each time we are
+ * called on FECS. Since only FECS and GPCCS are managed and started
+ * together, this ought to be safe.
+ *
+ * Once we have proper PMU firmware and support, this will be changed
+ * to a proper call to the PMU method.
+ */
+ if (falcon != NVKM_SECBOOT_FALCON_FECS)
+ goto end;
+
+ /* If WPR is set and we have an unload blob, run it to unlock WPR */
+ if (gsb->acr_unload_blob &&
+ gsb->falcon_state[NVKM_SECBOOT_FALCON_FECS] != NON_SECURE) {
+ ret = gm200_secboot_run_hs_blob(gsb, gsb->acr_unload_blob,
+ &gsb->acr_unload_bl_desc);
+ if (ret)
+ return ret;
+ }
+
+ /* Reload all managed falcons */
+ ret = gm200_secboot_run_hs_blob(gsb, gsb->acr_load_blob,
+ &gsb->acr_load_bl_desc);
+ if (ret)
+ return ret;
+
+end:
+ gsb->falcon_state[falcon] = RESET;
+ return 0;
+}
+
+int
+gm200_secboot_start(struct nvkm_secboot *sb, enum nvkm_secboot_falcon falcon)
+{
+ struct gm200_secboot *gsb = gm200_secboot(sb);
+ int base;
+
+ switch (falcon) {
+ case NVKM_SECBOOT_FALCON_FECS:
+ base = 0x409000;
+ break;
+ case NVKM_SECBOOT_FALCON_GPCCS:
+ base = 0x41a000;
+ break;
+ default:
+ nvkm_error(&sb->subdev, "cannot start unhandled falcon!\n");
+ return -EINVAL;
+ }
+
+ nvkm_wr32(sb->subdev.device, base + 0x130, 0x00000002);
+ gsb->falcon_state[falcon] = RUNNING;
+
+ return 0;
+}
+
+
+
+int
+gm200_secboot_init(struct nvkm_secboot *sb)
+{
+ struct gm200_secboot *gsb = gm200_secboot(sb);
+ struct nvkm_device *device = sb->subdev.device;
+ struct nvkm_vm *vm;
+ const u64 vm_area_len = 600 * 1024;
+ int ret;
+
+ /* Allocate instance block and VM */
+ ret = nvkm_gpuobj_new(device, 0x1000, 0, true, NULL, &gsb->inst);
+ if (ret)
+ return ret;
+
+ ret = nvkm_gpuobj_new(device, 0x8000, 0, true, NULL, &gsb->pgd);
+ if (ret)
+ return ret;
+
+ ret = nvkm_vm_new(device, 0, vm_area_len, 0, NULL, &vm);
+ if (ret)
+ return ret;
+
+ atomic_inc(&vm->engref[NVKM_SUBDEV_PMU]);
+
+ ret = nvkm_vm_ref(vm, &gsb->vm, gsb->pgd);
+ nvkm_vm_ref(NULL, &vm, NULL);
+ if (ret)
+ return ret;
+
+ nvkm_kmap(gsb->inst);
+ nvkm_wo32(gsb->inst, 0x200, lower_32_bits(gsb->pgd->addr));
+ nvkm_wo32(gsb->inst, 0x204, upper_32_bits(gsb->pgd->addr));
+ nvkm_wo32(gsb->inst, 0x208, lower_32_bits(vm_area_len - 1));
+ nvkm_wo32(gsb->inst, 0x20c, upper_32_bits(vm_area_len - 1));
+ nvkm_done(gsb->inst);
+
+ return 0;
+}
+
+int
+gm200_secboot_fini(struct nvkm_secboot *sb, bool suspend)
+{
+ struct gm200_secboot *gsb = gm200_secboot(sb);
+ int ret = 0;
+ int i;
+
+ /* Run the unload blob to unprotect the WPR region */
+ if (gsb->acr_unload_blob &&
+ gsb->falcon_state[NVKM_SECBOOT_FALCON_FECS] != NON_SECURE)
+ ret = gm200_secboot_run_hs_blob(gsb, gsb->acr_unload_blob,
+ &gsb->acr_unload_bl_desc);
+
+ for (i = 0; i < NVKM_SECBOOT_FALCON_END; i++)
+ gsb->falcon_state[i] = NON_SECURE;
+
+ return ret;
+}
+
+void *
+gm200_secboot_dtor(struct nvkm_secboot *sb)
+{
+ struct gm200_secboot *gsb = gm200_secboot(sb);
+
+ nvkm_gpuobj_del(&gsb->acr_unload_blob);
+
+ kfree(gsb->hsbl_blob);
+ nvkm_gpuobj_del(&gsb->acr_load_blob);
+ nvkm_gpuobj_del(&gsb->ls_blob);
+
+ nvkm_vm_ref(NULL, &gsb->vm, gsb->pgd);
+ nvkm_gpuobj_del(&gsb->pgd);
+ nvkm_gpuobj_del(&gsb->inst);
+
+ return gsb;
+}
+
+
+static const struct nvkm_secboot_func
+gm200_secboot = {
+ .dtor = gm200_secboot_dtor,
+ .init = gm200_secboot_init,
+ .fini = gm200_secboot_fini,
+ .prepare_blobs = gm200_secboot_prepare_blobs,
+ .reset = gm200_secboot_reset,
+ .start = gm200_secboot_start,
+ .managed_falcons = BIT(NVKM_SECBOOT_FALCON_FECS) |
+ BIT(NVKM_SECBOOT_FALCON_GPCCS),
+ .boot_falcon = NVKM_SECBOOT_FALCON_PMU,
+};
+
+/**
+ * gm200_fixup_bl_desc - just copy the BL descriptor
+ *
+ * Use the GM200 descriptor format by default.
+ */
+static void
+gm200_secboot_fixup_bl_desc(const struct gm200_flcn_bl_desc *desc, void *ret)
+{
+ memcpy(ret, desc, sizeof(*desc));
+}
+
+static void
+gm200_secboot_fixup_hs_desc(struct gm200_secboot *gsb,
+ struct hsflcn_acr_desc *desc)
+{
+ desc->ucode_blob_base = gsb->ls_blob->addr;
+ desc->ucode_blob_size = gsb->ls_blob->size;
+
+ desc->wpr_offset = 0;
+
+ /* WPR region information for the HS binary to set up */
+ desc->wpr_region_id = 1;
+ desc->regions.no_regions = 1;
+ desc->regions.region_props[0].region_id = 1;
+ desc->regions.region_props[0].start_addr = gsb->wpr_addr >> 8;
+ desc->regions.region_props[0].end_addr =
+ (gsb->wpr_addr + gsb->wpr_size) >> 8;
+}
+
+static const struct gm200_secboot_func
+gm200_secboot_func = {
+ .bl_desc_size = sizeof(struct gm200_flcn_bl_desc),
+ .fixup_bl_desc = gm200_secboot_fixup_bl_desc,
+ .fixup_hs_desc = gm200_secboot_fixup_hs_desc,
+};
+
+int
+gm200_secboot_new(struct nvkm_device *device, int index,
+ struct nvkm_secboot **psb)
+{
+ int ret;
+ struct gm200_secboot *gsb;
+
+ gsb = kzalloc(sizeof(*gsb), GFP_KERNEL);
+ if (!gsb) {
+ psb = NULL;
+ return -ENOMEM;
+ }
+ *psb = &gsb->base;
+
+ ret = nvkm_secboot_ctor(&gm200_secboot, device, index, &gsb->base);
+ if (ret)
+ return ret;
+
+ gsb->func = &gm200_secboot_func;
+
+ return 0;
+}
+
+MODULE_FIRMWARE("nvidia/gm200/acr/bl.bin");
+MODULE_FIRMWARE("nvidia/gm200/acr/ucode_load.bin");
+MODULE_FIRMWARE("nvidia/gm200/acr/ucode_unload.bin");
+MODULE_FIRMWARE("nvidia/gm200/gr/fecs_bl.bin");
+MODULE_FIRMWARE("nvidia/gm200/gr/fecs_inst.bin");
+MODULE_FIRMWARE("nvidia/gm200/gr/fecs_data.bin");
+MODULE_FIRMWARE("nvidia/gm200/gr/fecs_sig.bin");
+MODULE_FIRMWARE("nvidia/gm200/gr/gpccs_bl.bin");
+MODULE_FIRMWARE("nvidia/gm200/gr/gpccs_inst.bin");
+MODULE_FIRMWARE("nvidia/gm200/gr/gpccs_data.bin");
+MODULE_FIRMWARE("nvidia/gm200/gr/gpccs_sig.bin");
+MODULE_FIRMWARE("nvidia/gm200/gr/sw_ctx.bin");
+MODULE_FIRMWARE("nvidia/gm200/gr/sw_nonctx.bin");
+MODULE_FIRMWARE("nvidia/gm200/gr/sw_bundle_init.bin");
+MODULE_FIRMWARE("nvidia/gm200/gr/sw_method_init.bin");
+
+MODULE_FIRMWARE("nvidia/gm204/acr/bl.bin");
+MODULE_FIRMWARE("nvidia/gm204/acr/ucode_load.bin");
+MODULE_FIRMWARE("nvidia/gm204/acr/ucode_unload.bin");
+MODULE_FIRMWARE("nvidia/gm204/gr/fecs_bl.bin");
+MODULE_FIRMWARE("nvidia/gm204/gr/fecs_inst.bin");
+MODULE_FIRMWARE("nvidia/gm204/gr/fecs_data.bin");
+MODULE_FIRMWARE("nvidia/gm204/gr/fecs_sig.bin");
+MODULE_FIRMWARE("nvidia/gm204/gr/gpccs_bl.bin");
+MODULE_FIRMWARE("nvidia/gm204/gr/gpccs_inst.bin");
+MODULE_FIRMWARE("nvidia/gm204/gr/gpccs_data.bin");
+MODULE_FIRMWARE("nvidia/gm204/gr/gpccs_sig.bin");
+MODULE_FIRMWARE("nvidia/gm204/gr/sw_ctx.bin");
+MODULE_FIRMWARE("nvidia/gm204/gr/sw_nonctx.bin");
+MODULE_FIRMWARE("nvidia/gm204/gr/sw_bundle_init.bin");
+MODULE_FIRMWARE("nvidia/gm204/gr/sw_method_init.bin");
+
+MODULE_FIRMWARE("nvidia/gm206/acr/bl.bin");
+MODULE_FIRMWARE("nvidia/gm206/acr/ucode_load.bin");
+MODULE_FIRMWARE("nvidia/gm206/acr/ucode_unload.bin");
+MODULE_FIRMWARE("nvidia/gm206/gr/fecs_bl.bin");
+MODULE_FIRMWARE("nvidia/gm206/gr/fecs_inst.bin");
+MODULE_FIRMWARE("nvidia/gm206/gr/fecs_data.bin");
+MODULE_FIRMWARE("nvidia/gm206/gr/fecs_sig.bin");
+MODULE_FIRMWARE("nvidia/gm206/gr/gpccs_bl.bin");
+MODULE_FIRMWARE("nvidia/gm206/gr/gpccs_inst.bin");
+MODULE_FIRMWARE("nvidia/gm206/gr/gpccs_data.bin");
+MODULE_FIRMWARE("nvidia/gm206/gr/gpccs_sig.bin");
+MODULE_FIRMWARE("nvidia/gm206/gr/sw_ctx.bin");
+MODULE_FIRMWARE("nvidia/gm206/gr/sw_nonctx.bin");
+MODULE_FIRMWARE("nvidia/gm206/gr/sw_bundle_init.bin");
+MODULE_FIRMWARE("nvidia/gm206/gr/sw_method_init.bin");
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm20b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm20b.c
new file mode 100644
index 000000000000..684320484b70
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm20b.c
@@ -0,0 +1,233 @@
+/*
+ * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include "priv.h"
+
+#include <core/gpuobj.h>
+
+/*
+ * The BL header format used by GM20B's firmware is slightly different
+ * from the one of GM200. Fix the differences here.
+ */
+struct gm20b_flcn_bl_desc {
+ u32 reserved[4];
+ u32 signature[4];
+ u32 ctx_dma;
+ u32 code_dma_base;
+ u32 non_sec_code_off;
+ u32 non_sec_code_size;
+ u32 sec_code_off;
+ u32 sec_code_size;
+ u32 code_entry_point;
+ u32 data_dma_base;
+ u32 data_size;
+};
+
+/**
+ * gm20b_secboot_fixup_bl_desc - adapt BL descriptor to format used by GM20B FW
+ *
+ * There is only a slight format difference (DMA addresses being 32-bits and
+ * 256B-aligned) to address.
+ */
+static void
+gm20b_secboot_fixup_bl_desc(const struct gm200_flcn_bl_desc *desc, void *ret)
+{
+ struct gm20b_flcn_bl_desc *gdesc = ret;
+ u64 addr;
+
+ memcpy(gdesc->reserved, desc->reserved, sizeof(gdesc->reserved));
+ memcpy(gdesc->signature, desc->signature, sizeof(gdesc->signature));
+ gdesc->ctx_dma = desc->ctx_dma;
+ addr = desc->code_dma_base.hi;
+ addr <<= 32;
+ addr |= desc->code_dma_base.lo;
+ gdesc->code_dma_base = lower_32_bits(addr >> 8);
+ gdesc->non_sec_code_off = desc->non_sec_code_off;
+ gdesc->non_sec_code_size = desc->non_sec_code_size;
+ gdesc->sec_code_off = desc->sec_code_off;
+ gdesc->sec_code_size = desc->sec_code_size;
+ gdesc->code_entry_point = desc->code_entry_point;
+ addr = desc->data_dma_base.hi;
+ addr <<= 32;
+ addr |= desc->data_dma_base.lo;
+ gdesc->data_dma_base = lower_32_bits(addr >> 8);
+ gdesc->data_size = desc->data_size;
+}
+
+static void
+gm20b_secboot_fixup_hs_desc(struct gm200_secboot *gsb,
+ struct hsflcn_acr_desc *desc)
+{
+ desc->ucode_blob_base = gsb->ls_blob->addr;
+ desc->ucode_blob_size = gsb->ls_blob->size;
+
+ desc->wpr_offset = 0;
+}
+
+static const struct gm200_secboot_func
+gm20b_secboot_func = {
+ .bl_desc_size = sizeof(struct gm20b_flcn_bl_desc),
+ .fixup_bl_desc = gm20b_secboot_fixup_bl_desc,
+ .fixup_hs_desc = gm20b_secboot_fixup_hs_desc,
+};
+
+
+#ifdef CONFIG_ARCH_TEGRA
+#define TEGRA_MC_BASE 0x70019000
+#define MC_SECURITY_CARVEOUT2_CFG0 0xc58
+#define MC_SECURITY_CARVEOUT2_BOM_0 0xc5c
+#define MC_SECURITY_CARVEOUT2_BOM_HI_0 0xc60
+#define MC_SECURITY_CARVEOUT2_SIZE_128K 0xc64
+#define TEGRA_MC_SECURITY_CARVEOUT_CFG_LOCKED (1 << 1)
+/**
+ * sb_tegra_read_wpr() - read the WPR registers on Tegra
+ *
+ * On dGPU, we can manage the WPR region ourselves, but on Tegra the WPR region
+ * is reserved from system memory by the bootloader and irreversibly locked.
+ * This function reads the address and size of the pre-configured WPR region.
+ */
+static int
+gm20b_tegra_read_wpr(struct gm200_secboot *gsb)
+{
+ struct nvkm_secboot *sb = &gsb->base;
+ void __iomem *mc;
+ u32 cfg;
+
+ mc = ioremap(TEGRA_MC_BASE, 0xd00);
+ if (!mc) {
+ nvkm_error(&sb->subdev, "Cannot map Tegra MC registers\n");
+ return PTR_ERR(mc);
+ }
+ gsb->wpr_addr = ioread32_native(mc + MC_SECURITY_CARVEOUT2_BOM_0) |
+ ((u64)ioread32_native(mc + MC_SECURITY_CARVEOUT2_BOM_HI_0) << 32);
+ gsb->wpr_size = ioread32_native(mc + MC_SECURITY_CARVEOUT2_SIZE_128K)
+ << 17;
+ cfg = ioread32_native(mc + MC_SECURITY_CARVEOUT2_CFG0);
+ iounmap(mc);
+
+ /* Check that WPR settings are valid */
+ if (gsb->wpr_size == 0) {
+ nvkm_error(&sb->subdev, "WPR region is empty\n");
+ return -EINVAL;
+ }
+
+ if (!(cfg & TEGRA_MC_SECURITY_CARVEOUT_CFG_LOCKED)) {
+ nvkm_error(&sb->subdev, "WPR region not locked\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+#else
+static int
+gm20b_tegra_read_wpr(struct gm200_secboot *gsb)
+{
+ nvkm_error(&gsb->base.subdev, "Tegra support not compiled in\n");
+ return -EINVAL;
+}
+#endif
+
+static int
+gm20b_secboot_prepare_blobs(struct nvkm_secboot *sb)
+{
+ struct gm200_secboot *gsb = gm200_secboot(sb);
+ int acr_size;
+ int ret;
+
+ ret = gm20x_secboot_prepare_blobs(gsb);
+ if (ret)
+ return ret;
+
+ acr_size = gsb->acr_load_blob->size;
+ /*
+ * On Tegra the WPR region is set by the bootloader. It is illegal for
+ * the HS blob to be larger than this region.
+ */
+ if (acr_size > gsb->wpr_size) {
+ nvkm_error(&sb->subdev, "WPR region too small for FW blob!\n");
+ nvkm_error(&sb->subdev, "required: %dB\n", acr_size);
+ nvkm_error(&sb->subdev, "WPR size: %dB\n", gsb->wpr_size);
+ return -ENOSPC;
+ }
+
+ return 0;
+}
+
+static int
+gm20b_secboot_init(struct nvkm_secboot *sb)
+{
+ struct gm200_secboot *gsb = gm200_secboot(sb);
+ int ret;
+
+ ret = gm20b_tegra_read_wpr(gsb);
+ if (ret)
+ return ret;
+
+ return gm200_secboot_init(sb);
+}
+
+static const struct nvkm_secboot_func
+gm20b_secboot = {
+ .dtor = gm200_secboot_dtor,
+ .init = gm20b_secboot_init,
+ .prepare_blobs = gm20b_secboot_prepare_blobs,
+ .reset = gm200_secboot_reset,
+ .start = gm200_secboot_start,
+ .managed_falcons = BIT(NVKM_SECBOOT_FALCON_FECS),
+ .boot_falcon = NVKM_SECBOOT_FALCON_PMU,
+};
+
+int
+gm20b_secboot_new(struct nvkm_device *device, int index,
+ struct nvkm_secboot **psb)
+{
+ int ret;
+ struct gm200_secboot *gsb;
+
+ gsb = kzalloc(sizeof(*gsb), GFP_KERNEL);
+ if (!gsb) {
+ psb = NULL;
+ return -ENOMEM;
+ }
+ *psb = &gsb->base;
+
+ ret = nvkm_secboot_ctor(&gm20b_secboot, device, index, &gsb->base);
+ if (ret)
+ return ret;
+
+ gsb->func = &gm20b_secboot_func;
+
+ return 0;
+}
+
+MODULE_FIRMWARE("nvidia/gm20b/acr/bl.bin");
+MODULE_FIRMWARE("nvidia/gm20b/acr/ucode_load.bin");
+MODULE_FIRMWARE("nvidia/gm20b/gr/fecs_bl.bin");
+MODULE_FIRMWARE("nvidia/gm20b/gr/fecs_inst.bin");
+MODULE_FIRMWARE("nvidia/gm20b/gr/fecs_data.bin");
+MODULE_FIRMWARE("nvidia/gm20b/gr/fecs_sig.bin");
+MODULE_FIRMWARE("nvidia/gm20b/gr/gpccs_inst.bin");
+MODULE_FIRMWARE("nvidia/gm20b/gr/gpccs_data.bin");
+MODULE_FIRMWARE("nvidia/gm20b/gr/sw_ctx.bin");
+MODULE_FIRMWARE("nvidia/gm20b/gr/sw_nonctx.bin");
+MODULE_FIRMWARE("nvidia/gm20b/gr/sw_bundle_init.bin");
+MODULE_FIRMWARE("nvidia/gm20b/gr/sw_method_init.bin");
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/priv.h
new file mode 100644
index 000000000000..f2b09dee7c5d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/priv.h
@@ -0,0 +1,226 @@
+/*
+ * Copyright (c) 2015, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef __NVKM_SECBOOT_PRIV_H__
+#define __NVKM_SECBOOT_PRIV_H__
+
+#include <subdev/secboot.h>
+#include <subdev/mmu.h>
+
+struct nvkm_secboot_func {
+ int (*init)(struct nvkm_secboot *);
+ int (*fini)(struct nvkm_secboot *, bool suspend);
+ void *(*dtor)(struct nvkm_secboot *);
+ int (*prepare_blobs)(struct nvkm_secboot *);
+ int (*reset)(struct nvkm_secboot *, enum nvkm_secboot_falcon);
+ int (*start)(struct nvkm_secboot *, enum nvkm_secboot_falcon);
+
+ /* ID of the falcon that will perform secure boot */
+ enum nvkm_secboot_falcon boot_falcon;
+ /* Bit-mask of IDs of managed falcons */
+ unsigned long managed_falcons;
+};
+
+int nvkm_secboot_ctor(const struct nvkm_secboot_func *, struct nvkm_device *,
+ int index, struct nvkm_secboot *);
+int nvkm_secboot_falcon_reset(struct nvkm_secboot *);
+int nvkm_secboot_falcon_run(struct nvkm_secboot *);
+
+struct flcn_u64 {
+ u32 lo;
+ u32 hi;
+};
+static inline u64 flcn64_to_u64(const struct flcn_u64 f)
+{
+ return ((u64)f.hi) << 32 | f.lo;
+}
+
+/**
+ * struct gm200_flcn_bl_desc - DMEM bootloader descriptor
+ * @signature: 16B signature for secure code. 0s if no secure code
+ * @ctx_dma: DMA context to be used by BL while loading code/data
+ * @code_dma_base: 256B-aligned Physical FB Address where code is located
+ * (falcon's $xcbase register)
+ * @non_sec_code_off: offset from code_dma_base where the non-secure code is
+ * located. The offset must be multiple of 256 to help perf
+ * @non_sec_code_size: the size of the nonSecure code part.
+ * @sec_code_off: offset from code_dma_base where the secure code is
+ * located. The offset must be multiple of 256 to help perf
+ * @sec_code_size: offset from code_dma_base where the secure code is
+ * located. The offset must be multiple of 256 to help perf
+ * @code_entry_point: code entry point which will be invoked by BL after
+ * code is loaded.
+ * @data_dma_base: 256B aligned Physical FB Address where data is located.
+ * (falcon's $xdbase register)
+ * @data_size: size of data block. Should be multiple of 256B
+ *
+ * Structure used by the bootloader to load the rest of the code. This has
+ * to be filled by host and copied into DMEM at offset provided in the
+ * hsflcn_bl_desc.bl_desc_dmem_load_off.
+ */
+struct gm200_flcn_bl_desc {
+ u32 reserved[4];
+ u32 signature[4];
+ u32 ctx_dma;
+ struct flcn_u64 code_dma_base;
+ u32 non_sec_code_off;
+ u32 non_sec_code_size;
+ u32 sec_code_off;
+ u32 sec_code_size;
+ u32 code_entry_point;
+ struct flcn_u64 data_dma_base;
+ u32 data_size;
+};
+
+/**
+ * struct hsflcn_acr_desc - data section of the HS firmware
+ *
+ * This header is to be copied at the beginning of DMEM by the HS bootloader.
+ *
+ * @signature: signature of ACR ucode
+ * @wpr_region_id: region ID holding the WPR header and its details
+ * @wpr_offset: offset from the WPR region holding the wpr header
+ * @regions: region descriptors
+ * @nonwpr_ucode_blob_size: size of LS blob
+ * @nonwpr_ucode_blob_start: FB location of LS blob is
+ */
+struct hsflcn_acr_desc {
+ union {
+ u8 reserved_dmem[0x200];
+ u32 signatures[4];
+ } ucode_reserved_space;
+ u32 wpr_region_id;
+ u32 wpr_offset;
+ u32 mmu_mem_range;
+#define FLCN_ACR_MAX_REGIONS 2
+ struct {
+ u32 no_regions;
+ struct {
+ u32 start_addr;
+ u32 end_addr;
+ u32 region_id;
+ u32 read_mask;
+ u32 write_mask;
+ u32 client_mask;
+ } region_props[FLCN_ACR_MAX_REGIONS];
+ } regions;
+ u32 ucode_blob_size;
+ u64 ucode_blob_base __aligned(8);
+ struct {
+ u32 vpr_enabled;
+ u32 vpr_start;
+ u32 vpr_end;
+ u32 hdcp_policies;
+ } vpr_desc;
+};
+
+/**
+ * Contains the whole secure boot state, allowing it to be performed as needed
+ * @wpr_addr: physical address of the WPR region
+ * @wpr_size: size in bytes of the WPR region
+ * @ls_blob: LS blob of all the LS firmwares, signatures, bootloaders
+ * @ls_blob_size: size of the LS blob
+ * @ls_blob_nb_regions: number of LS firmwares that will be loaded
+ * @acr_blob: HS blob
+ * @acr_blob_vma: mapping of the HS blob into the secure falcon's VM
+ * @acr_bl_desc: bootloader descriptor of the HS blob
+ * @hsbl_blob: HS blob bootloader
+ * @inst: instance block for HS falcon
+ * @pgd: page directory for the HS falcon
+ * @vm: address space used by the HS falcon
+ * @bl_desc_size: size of the BL descriptor used by this chip.
+ * @fixup_bl_desc: hook that generates the proper BL descriptor format from
+ * the generic GM200 format into a data array of size
+ * bl_desc_size
+ */
+struct gm200_secboot {
+ struct nvkm_secboot base;
+ const struct gm200_secboot_func *func;
+
+ /*
+ * Address and size of the WPR region. On dGPU this will be the
+ * address of the LS blob. On Tegra this is a fixed region set by the
+ * bootloader
+ */
+ u64 wpr_addr;
+ u32 wpr_size;
+
+ /*
+ * HS FW - lock WPR region (dGPU only) and load LS FWs
+ * on Tegra the HS FW copies the LS blob into the fixed WPR instead
+ */
+ struct nvkm_gpuobj *acr_load_blob;
+ struct gm200_flcn_bl_desc acr_load_bl_desc;
+
+ /* HS FW - unlock WPR region (dGPU only) */
+ struct nvkm_gpuobj *acr_unload_blob;
+ struct gm200_flcn_bl_desc acr_unload_bl_desc;
+
+ /* HS bootloader */
+ void *hsbl_blob;
+
+ /* LS FWs, to be loaded by the HS ACR */
+ struct nvkm_gpuobj *ls_blob;
+
+ /* Instance block & address space used for HS FW execution */
+ struct nvkm_gpuobj *inst;
+ struct nvkm_gpuobj *pgd;
+ struct nvkm_vm *vm;
+
+ /* To keep track of the state of all managed falcons */
+ enum {
+ /* In non-secure state, no firmware loaded, no privileges*/
+ NON_SECURE = 0,
+ /* In low-secure mode and ready to be started */
+ RESET,
+ /* In low-secure mode and running */
+ RUNNING,
+ } falcon_state[NVKM_SECBOOT_FALCON_END];
+
+};
+#define gm200_secboot(sb) container_of(sb, struct gm200_secboot, base)
+
+struct gm200_secboot_func {
+ /*
+ * Size of the bootloader descriptor for this chip. A block of this
+ * size is allocated before booting a falcon and the fixup_bl_desc
+ * callback is called on it
+ */
+ u32 bl_desc_size;
+ void (*fixup_bl_desc)(const struct gm200_flcn_bl_desc *, void *);
+
+ /*
+ * Chip-specific modifications of the HS descriptor can be done here.
+ * On dGPU this is used to fill the information about the WPR region
+ * we want the HS FW to set up.
+ */
+ void (*fixup_hs_desc)(struct gm200_secboot *, struct hsflcn_acr_desc *);
+};
+
+int gm200_secboot_init(struct nvkm_secboot *);
+void *gm200_secboot_dtor(struct nvkm_secboot *);
+int gm200_secboot_reset(struct nvkm_secboot *, u32);
+int gm200_secboot_start(struct nvkm_secboot *, u32);
+
+int gm20x_secboot_prepare_blobs(struct gm200_secboot *);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/Kbuild
index b035c6e28be8..c34076223b7b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/Kbuild
@@ -3,3 +3,4 @@ nvkm-y += nvkm/subdev/volt/gpio.o
nvkm-y += nvkm/subdev/volt/nv40.o
nvkm-y += nvkm/subdev/volt/gk104.o
nvkm-y += nvkm/subdev/volt/gk20a.o
+nvkm-y += nvkm/subdev/volt/gm20b.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk20a.c
index fd56c6476064..d554455326da 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk20a.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved.
+ * Copyright (c) 2014-2016, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -24,21 +24,9 @@
#include <core/tegra.h>
-struct cvb_coef {
- int c0;
- int c1;
- int c2;
- int c3;
- int c4;
- int c5;
-};
-
-struct gk20a_volt {
- struct nvkm_volt base;
- struct regulator *vdd;
-};
+#include "gk20a.h"
-const struct cvb_coef gk20a_cvb_coef[] = {
+static const struct cvb_coef gk20a_cvb_coef[] = {
/* MHz, c0, c1, c2, c3, c4, c5 */
/* 72 */ { 1209886, -36468, 515, 417, -13123, 203},
/* 108 */ { 1130804, -27659, 296, 298, -10834, 221},
@@ -89,7 +77,7 @@ gk20a_volt_get_cvb_t_voltage(int speedo, int temp, int s_scale, int t_scale,
return mv;
}
-static int
+int
gk20a_volt_calc_voltage(const struct cvb_coef *coef, int speedo)
{
int mv;
@@ -100,7 +88,7 @@ gk20a_volt_calc_voltage(const struct cvb_coef *coef, int speedo)
return mv * 1000;
}
-static int
+int
gk20a_volt_vid_get(struct nvkm_volt *base)
{
struct gk20a_volt *volt = gk20a_volt(base);
@@ -115,7 +103,7 @@ gk20a_volt_vid_get(struct nvkm_volt *base)
return -EINVAL;
}
-static int
+int
gk20a_volt_vid_set(struct nvkm_volt *base, u8 vid)
{
struct gk20a_volt *volt = gk20a_volt(base);
@@ -125,7 +113,7 @@ gk20a_volt_vid_set(struct nvkm_volt *base, u8 vid)
return regulator_set_voltage(volt->vdd, volt->base.vid[vid].uv, 1200000);
}
-static int
+int
gk20a_volt_set_id(struct nvkm_volt *base, u8 id, int condition)
{
struct gk20a_volt *volt = gk20a_volt(base);
@@ -155,30 +143,25 @@ gk20a_volt = {
};
int
-gk20a_volt_new(struct nvkm_device *device, int index, struct nvkm_volt **pvolt)
+_gk20a_volt_ctor(struct nvkm_device *device, int index,
+ const struct cvb_coef *coefs, int nb_coefs,
+ struct gk20a_volt *volt)
{
struct nvkm_device_tegra *tdev = device->func->tegra(device);
- struct gk20a_volt *volt;
int i, uv;
- if (!(volt = kzalloc(sizeof(*volt), GFP_KERNEL)))
- return -ENOMEM;
-
nvkm_volt_ctor(&gk20a_volt, device, index, &volt->base);
- *pvolt = &volt->base;
uv = regulator_get_voltage(tdev->vdd);
- nvkm_info(&volt->base.subdev, "The default voltage is %duV\n", uv);
+ nvkm_debug(&volt->base.subdev, "the default voltage is %duV\n", uv);
volt->vdd = tdev->vdd;
- volt->base.vid_nr = ARRAY_SIZE(gk20a_cvb_coef);
- nvkm_debug(&volt->base.subdev, "%s - vid_nr = %d\n", __func__,
- volt->base.vid_nr);
+ volt->base.vid_nr = nb_coefs;
for (i = 0; i < volt->base.vid_nr; i++) {
volt->base.vid[i].vid = i;
volt->base.vid[i].uv =
- gk20a_volt_calc_voltage(&gk20a_cvb_coef[i],
+ gk20a_volt_calc_voltage(&coefs[i],
tdev->gpu_speedo);
nvkm_debug(&volt->base.subdev, "%2d: vid=%d, uv=%d\n", i,
volt->base.vid[i].vid, volt->base.vid[i].uv);
@@ -186,3 +169,17 @@ gk20a_volt_new(struct nvkm_device *device, int index, struct nvkm_volt **pvolt)
return 0;
}
+
+int
+gk20a_volt_new(struct nvkm_device *device, int index, struct nvkm_volt **pvolt)
+{
+ struct gk20a_volt *volt;
+
+ volt = kzalloc(sizeof(*volt), GFP_KERNEL);
+ if (!volt)
+ return -ENOMEM;
+ *pvolt = &volt->base;
+
+ return _gk20a_volt_ctor(device, index, gk20a_cvb_coef,
+ ARRAY_SIZE(gk20a_cvb_coef), volt);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk20a.h b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk20a.h
new file mode 100644
index 000000000000..0fa3b502bcf8
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk20a.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef __GK20A_VOLT_H__
+#define __GK20A_VOLT_H__
+
+struct cvb_coef {
+ int c0;
+ int c1;
+ int c2;
+ int c3;
+ int c4;
+ int c5;
+};
+
+struct gk20a_volt {
+ struct nvkm_volt base;
+ struct regulator *vdd;
+};
+
+int _gk20a_volt_ctor(struct nvkm_device *device, int index,
+ const struct cvb_coef *coefs, int nb_coefs,
+ struct gk20a_volt *volt);
+
+int gk20a_volt_calc_voltage(const struct cvb_coef *coef, int speedo);
+int gk20a_volt_vid_get(struct nvkm_volt *volt);
+int gk20a_volt_vid_set(struct nvkm_volt *volt, u8 vid);
+int gk20a_volt_set_id(struct nvkm_volt *volt, u8 id, int condition);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gm20b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gm20b.c
new file mode 100644
index 000000000000..49b5ecb701e4
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gm20b.c
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include "priv.h"
+#include "gk20a.h"
+
+#include <core/tegra.h>
+
+const struct cvb_coef gm20b_cvb_coef[] = {
+ /* KHz, c0, c1, c2 */
+ /* 76800 */ { 1786666, -85625, 1632 },
+ /* 153600 */ { 1846729, -87525, 1632 },
+ /* 230400 */ { 1910480, -89425, 1632 },
+ /* 307200 */ { 1977920, -91325, 1632 },
+ /* 384000 */ { 2049049, -93215, 1632 },
+ /* 460800 */ { 2122872, -95095, 1632 },
+ /* 537600 */ { 2201331, -96985, 1632 },
+ /* 614400 */ { 2283479, -98885, 1632 },
+ /* 691200 */ { 2369315, -100785, 1632 },
+ /* 768000 */ { 2458841, -102685, 1632 },
+ /* 844800 */ { 2550821, -104555, 1632 },
+ /* 921600 */ { 2647676, -106455, 1632 },
+};
+
+int
+gm20b_volt_new(struct nvkm_device *device, int index, struct nvkm_volt **pvolt)
+{
+ struct gk20a_volt *volt;
+
+ volt = kzalloc(sizeof(*volt), GFP_KERNEL);
+ if (!volt)
+ return -ENOMEM;
+ *pvolt = &volt->base;
+
+ return _gk20a_volt_ctor(device, index, gm20b_cvb_coef,
+ ARRAY_SIZE(gm20b_cvb_coef), volt);
+}
diff --git a/drivers/gpu/drm/omapdrm/Kconfig b/drivers/gpu/drm/omapdrm/Kconfig
index 336ad4de9981..73241c4eb7aa 100644
--- a/drivers/gpu/drm/omapdrm/Kconfig
+++ b/drivers/gpu/drm/omapdrm/Kconfig
@@ -2,7 +2,6 @@ config DRM_OMAP
tristate "OMAP DRM"
depends on DRM
depends on ARCH_OMAP2PLUS || ARCH_MULTIPLATFORM
- select OMAP2_DSS
select DRM_KMS_HELPER
select DRM_KMS_FB_HELPER
select FB_SYS_FILLRECT
diff --git a/drivers/gpu/drm/omapdrm/Makefile b/drivers/gpu/drm/omapdrm/Makefile
index fe4c2228bc18..48b7b750c05c 100644
--- a/drivers/gpu/drm/omapdrm/Makefile
+++ b/drivers/gpu/drm/omapdrm/Makefile
@@ -6,7 +6,7 @@
obj-y += dss/
obj-y += displays/
-ccflags-y := -Iinclude/drm -Werror
+ccflags-y := -Iinclude/drm
omapdrm-y := omap_drv.o \
omap_irq.o \
omap_debugfs.o \
diff --git a/drivers/gpu/drm/omapdrm/displays/connector-dvi.c b/drivers/gpu/drm/omapdrm/displays/connector-dvi.c
index d811e6dcaef7..747f26a55e43 100644
--- a/drivers/gpu/drm/omapdrm/displays/connector-dvi.c
+++ b/drivers/gpu/drm/omapdrm/displays/connector-dvi.c
@@ -236,46 +236,6 @@ static struct omap_dss_driver dvic_driver = {
.detect = dvic_detect,
};
-static int dvic_probe_pdata(struct platform_device *pdev)
-{
- struct panel_drv_data *ddata = platform_get_drvdata(pdev);
- struct connector_dvi_platform_data *pdata;
- struct omap_dss_device *in, *dssdev;
- int i2c_bus_num;
-
- pdata = dev_get_platdata(&pdev->dev);
- i2c_bus_num = pdata->i2c_bus_num;
-
- if (i2c_bus_num != -1) {
- struct i2c_adapter *adapter;
-
- adapter = i2c_get_adapter(i2c_bus_num);
- if (!adapter) {
- dev_err(&pdev->dev,
- "Failed to get I2C adapter, bus %d\n",
- i2c_bus_num);
- return -EPROBE_DEFER;
- }
-
- ddata->i2c_adapter = adapter;
- }
-
- in = omap_dss_find_output(pdata->source);
- if (in == NULL) {
- i2c_put_adapter(ddata->i2c_adapter);
-
- dev_err(&pdev->dev, "Failed to find video source\n");
- return -EPROBE_DEFER;
- }
-
- ddata->in = in;
-
- dssdev = &ddata->dssdev;
- dssdev->name = pdata->name;
-
- return 0;
-}
-
static int dvic_probe_of(struct platform_device *pdev)
{
struct panel_drv_data *ddata = platform_get_drvdata(pdev);
@@ -319,17 +279,12 @@ static int dvic_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, ddata);
- if (dev_get_platdata(&pdev->dev)) {
- r = dvic_probe_pdata(pdev);
- if (r)
- return r;
- } else if (pdev->dev.of_node) {
- r = dvic_probe_of(pdev);
- if (r)
- return r;
- } else {
+ if (!pdev->dev.of_node)
return -ENODEV;
- }
+
+ r = dvic_probe_of(pdev);
+ if (r)
+ return r;
ddata->timings = dvic_default_timings;
diff --git a/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c b/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c
index 6ee4129bc0c0..225fd8d6ab31 100644
--- a/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c
+++ b/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c
@@ -206,30 +206,6 @@ static struct omap_dss_driver hdmic_driver = {
.set_hdmi_infoframe = hdmic_set_infoframe,
};
-static int hdmic_probe_pdata(struct platform_device *pdev)
-{
- struct panel_drv_data *ddata = platform_get_drvdata(pdev);
- struct connector_hdmi_platform_data *pdata;
- struct omap_dss_device *in, *dssdev;
-
- pdata = dev_get_platdata(&pdev->dev);
-
- ddata->hpd_gpio = -ENODEV;
-
- in = omap_dss_find_output(pdata->source);
- if (in == NULL) {
- dev_err(&pdev->dev, "Failed to find video source\n");
- return -EPROBE_DEFER;
- }
-
- ddata->in = in;
-
- dssdev = &ddata->dssdev;
- dssdev->name = pdata->name;
-
- return 0;
-}
-
static int hdmic_probe_of(struct platform_device *pdev)
{
struct panel_drv_data *ddata = platform_get_drvdata(pdev);
@@ -268,17 +244,12 @@ static int hdmic_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, ddata);
ddata->dev = &pdev->dev;
- if (dev_get_platdata(&pdev->dev)) {
- r = hdmic_probe_pdata(pdev);
- if (r)
- return r;
- } else if (pdev->dev.of_node) {
- r = hdmic_probe_of(pdev);
- if (r)
- return r;
- } else {
+ if (!pdev->dev.of_node)
return -ENODEV;
- }
+
+ r = hdmic_probe_of(pdev);
+ if (r)
+ return r;
if (gpio_is_valid(ddata->hpd_gpio)) {
r = devm_gpio_request_one(&pdev->dev, ddata->hpd_gpio,
diff --git a/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c b/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c
index d9048b3df495..2fd5602880a7 100644
--- a/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c
+++ b/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c
@@ -166,32 +166,6 @@ static const struct omapdss_dvi_ops tfp410_dvi_ops = {
.get_timings = tfp410_get_timings,
};
-static int tfp410_probe_pdata(struct platform_device *pdev)
-{
- struct panel_drv_data *ddata = platform_get_drvdata(pdev);
- struct encoder_tfp410_platform_data *pdata;
- struct omap_dss_device *dssdev, *in;
-
- pdata = dev_get_platdata(&pdev->dev);
-
- ddata->pd_gpio = pdata->power_down_gpio;
-
- ddata->data_lines = pdata->data_lines;
-
- in = omap_dss_find_output(pdata->source);
- if (in == NULL) {
- dev_err(&pdev->dev, "Failed to find video source\n");
- return -ENODEV;
- }
-
- ddata->in = in;
-
- dssdev = &ddata->dssdev;
- dssdev->name = pdata->name;
-
- return 0;
-}
-
static int tfp410_probe_of(struct platform_device *pdev)
{
struct panel_drv_data *ddata = platform_get_drvdata(pdev);
@@ -231,17 +205,12 @@ static int tfp410_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, ddata);
- if (dev_get_platdata(&pdev->dev)) {
- r = tfp410_probe_pdata(pdev);
- if (r)
- return r;
- } else if (pdev->dev.of_node) {
- r = tfp410_probe_of(pdev);
- if (r)
- return r;
- } else {
+ if (!pdev->dev.of_node)
return -ENODEV;
- }
+
+ r = tfp410_probe_of(pdev);
+ if (r)
+ return r;
if (gpio_is_valid(ddata->pd_gpio)) {
r = devm_gpio_request_one(&pdev->dev, ddata->pd_gpio,
diff --git a/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c b/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c
index 990af6baeb0f..916a89978387 100644
--- a/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c
+++ b/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c
@@ -13,9 +13,8 @@
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/slab.h>
-#include <linux/gpio.h>
#include <linux/platform_device.h>
-#include <linux/of_gpio.h>
+#include <linux/gpio/consumer.h>
#include <video/omapdss.h>
#include <video/omap-panel-data.h>
@@ -24,9 +23,9 @@ struct panel_drv_data {
struct omap_dss_device dssdev;
struct omap_dss_device *in;
- int ct_cp_hpd_gpio;
- int ls_oe_gpio;
- int hpd_gpio;
+ struct gpio_desc *ct_cp_hpd_gpio;
+ struct gpio_desc *ls_oe_gpio;
+ struct gpio_desc *hpd_gpio;
struct omap_video_timings timings;
};
@@ -47,7 +46,7 @@ static int tpd_connect(struct omap_dss_device *dssdev,
dst->src = dssdev;
dssdev->dst = dst;
- gpio_set_value_cansleep(ddata->ct_cp_hpd_gpio, 1);
+ gpiod_set_value_cansleep(ddata->ct_cp_hpd_gpio, 1);
/* DC-DC converter needs at max 300us to get to 90% of 5V */
udelay(300);
@@ -65,7 +64,7 @@ static void tpd_disconnect(struct omap_dss_device *dssdev,
if (dst != dssdev->dst)
return;
- gpio_set_value_cansleep(ddata->ct_cp_hpd_gpio, 0);
+ gpiod_set_value_cansleep(ddata->ct_cp_hpd_gpio, 0);
dst->src = NULL;
dssdev->dst = NULL;
@@ -145,16 +144,14 @@ static int tpd_read_edid(struct omap_dss_device *dssdev,
struct omap_dss_device *in = ddata->in;
int r;
- if (!gpio_get_value_cansleep(ddata->hpd_gpio))
+ if (!gpiod_get_value_cansleep(ddata->hpd_gpio))
return -ENODEV;
- if (gpio_is_valid(ddata->ls_oe_gpio))
- gpio_set_value_cansleep(ddata->ls_oe_gpio, 1);
+ gpiod_set_value_cansleep(ddata->ls_oe_gpio, 1);
r = in->ops.hdmi->read_edid(in, edid, len);
- if (gpio_is_valid(ddata->ls_oe_gpio))
- gpio_set_value_cansleep(ddata->ls_oe_gpio, 0);
+ gpiod_set_value_cansleep(ddata->ls_oe_gpio, 0);
return r;
}
@@ -163,7 +160,7 @@ static bool tpd_detect(struct omap_dss_device *dssdev)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- return gpio_get_value_cansleep(ddata->hpd_gpio);
+ return gpiod_get_value_cansleep(ddata->hpd_gpio);
}
static int tpd_set_infoframe(struct omap_dss_device *dssdev,
@@ -201,63 +198,11 @@ static const struct omapdss_hdmi_ops tpd_hdmi_ops = {
.set_hdmi_mode = tpd_set_hdmi_mode,
};
-static int tpd_probe_pdata(struct platform_device *pdev)
-{
- struct panel_drv_data *ddata = platform_get_drvdata(pdev);
- struct encoder_tpd12s015_platform_data *pdata;
- struct omap_dss_device *dssdev, *in;
-
- pdata = dev_get_platdata(&pdev->dev);
-
- ddata->ct_cp_hpd_gpio = pdata->ct_cp_hpd_gpio;
- ddata->ls_oe_gpio = pdata->ls_oe_gpio;
- ddata->hpd_gpio = pdata->hpd_gpio;
-
- in = omap_dss_find_output(pdata->source);
- if (in == NULL) {
- dev_err(&pdev->dev, "Failed to find video source\n");
- return -ENODEV;
- }
-
- ddata->in = in;
-
- dssdev = &ddata->dssdev;
- dssdev->name = pdata->name;
-
- return 0;
-}
-
static int tpd_probe_of(struct platform_device *pdev)
{
struct panel_drv_data *ddata = platform_get_drvdata(pdev);
struct device_node *node = pdev->dev.of_node;
struct omap_dss_device *in;
- int gpio;
-
- /* CT CP HPD GPIO */
- gpio = of_get_gpio(node, 0);
- if (!gpio_is_valid(gpio)) {
- dev_err(&pdev->dev, "failed to parse CT CP HPD gpio\n");
- return gpio;
- }
- ddata->ct_cp_hpd_gpio = gpio;
-
- /* LS OE GPIO */
- gpio = of_get_gpio(node, 1);
- if (gpio_is_valid(gpio) || gpio == -ENOENT) {
- ddata->ls_oe_gpio = gpio;
- } else {
- dev_err(&pdev->dev, "failed to parse LS OE gpio\n");
- return gpio;
- }
-
- /* HPD GPIO */
- gpio = of_get_gpio(node, 2);
- if (!gpio_is_valid(gpio)) {
- dev_err(&pdev->dev, "failed to parse HPD gpio\n");
- return gpio;
- }
- ddata->hpd_gpio = gpio;
in = omapdss_of_find_source_for_first_ep(node);
if (IS_ERR(in)) {
@@ -275,6 +220,7 @@ static int tpd_probe(struct platform_device *pdev)
struct omap_dss_device *in, *dssdev;
struct panel_drv_data *ddata;
int r;
+ struct gpio_desc *gpio;
ddata = devm_kzalloc(&pdev->dev, sizeof(*ddata), GFP_KERNEL);
if (!ddata)
@@ -282,35 +228,35 @@ static int tpd_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, ddata);
- if (dev_get_platdata(&pdev->dev)) {
- r = tpd_probe_pdata(pdev);
- if (r)
- return r;
- } else if (pdev->dev.of_node) {
- r = tpd_probe_of(pdev);
- if (r)
- return r;
- } else {
+ if (!pdev->dev.of_node)
return -ENODEV;
- }
- r = devm_gpio_request_one(&pdev->dev, ddata->ct_cp_hpd_gpio,
- GPIOF_OUT_INIT_LOW, "hdmi_ct_cp_hpd");
+ r = tpd_probe_of(pdev);
if (r)
+ return r;
+
+
+ gpio = devm_gpiod_get_index_optional(&pdev->dev, NULL, 0,
+ GPIOD_OUT_LOW);
+ if (IS_ERR(gpio))
goto err_gpio;
- if (gpio_is_valid(ddata->ls_oe_gpio)) {
- r = devm_gpio_request_one(&pdev->dev, ddata->ls_oe_gpio,
- GPIOF_OUT_INIT_LOW, "hdmi_ls_oe");
- if (r)
- goto err_gpio;
- }
+ ddata->ct_cp_hpd_gpio = gpio;
- r = devm_gpio_request_one(&pdev->dev, ddata->hpd_gpio,
- GPIOF_DIR_IN, "hdmi_hpd");
- if (r)
+ gpio = devm_gpiod_get_index_optional(&pdev->dev, NULL, 1,
+ GPIOD_OUT_LOW);
+ if (IS_ERR(gpio))
+ goto err_gpio;
+
+ ddata->ls_oe_gpio = gpio;
+
+ gpio = devm_gpiod_get_index(&pdev->dev, NULL, 2,
+ GPIOD_IN);
+ if (IS_ERR(gpio))
goto err_gpio;
+ ddata->hpd_gpio = gpio;
+
dssdev = &ddata->dssdev;
dssdev->ops.hdmi = &tpd_hdmi_ops;
dssdev->dev = &pdev->dev;
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c b/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c
index 3414c2609320..36485c2137ce 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c
@@ -1127,40 +1127,6 @@ static struct omap_dss_driver dsicm_ops = {
.memory_read = dsicm_memory_read,
};
-static int dsicm_probe_pdata(struct platform_device *pdev)
-{
- const struct panel_dsicm_platform_data *pdata;
- struct panel_drv_data *ddata = platform_get_drvdata(pdev);
- struct omap_dss_device *dssdev, *in;
-
- pdata = dev_get_platdata(&pdev->dev);
-
- in = omap_dss_find_output(pdata->source);
- if (in == NULL) {
- dev_err(&pdev->dev, "failed to find video source\n");
- return -EPROBE_DEFER;
- }
- ddata->in = in;
-
- ddata->reset_gpio = pdata->reset_gpio;
-
- if (pdata->use_ext_te)
- ddata->ext_te_gpio = pdata->ext_te_gpio;
- else
- ddata->ext_te_gpio = -1;
-
- ddata->ulps_timeout = pdata->ulps_timeout;
-
- ddata->use_dsi_backlight = pdata->use_dsi_backlight;
-
- ddata->pin_config = pdata->pin_config;
-
- dssdev = &ddata->dssdev;
- dssdev->name = pdata->name;
-
- return 0;
-}
-
static int dsicm_probe_of(struct platform_device *pdev)
{
struct device_node *node = pdev->dev.of_node;
@@ -1214,17 +1180,12 @@ static int dsicm_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, ddata);
ddata->pdev = pdev;
- if (dev_get_platdata(dev)) {
- r = dsicm_probe_pdata(pdev);
- if (r)
- return r;
- } else if (pdev->dev.of_node) {
- r = dsicm_probe_of(pdev);
- if (r)
- return r;
- } else {
+ if (!pdev->dev.of_node)
return -ENODEV;
- }
+
+ r = dsicm_probe_of(pdev);
+ if (r)
+ return r;
ddata->timings.x_res = 864;
ddata->timings.y_res = 480;
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c b/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c
index 18eb60e9c9ec..458f77bc473d 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c
@@ -240,44 +240,6 @@ static struct omap_dss_driver lb035q02_ops = {
.get_resolution = omapdss_default_get_resolution,
};
-static int lb035q02_probe_pdata(struct spi_device *spi)
-{
- const struct panel_lb035q02_platform_data *pdata;
- struct panel_drv_data *ddata = dev_get_drvdata(&spi->dev);
- struct omap_dss_device *dssdev, *in;
- int r;
-
- pdata = dev_get_platdata(&spi->dev);
-
- in = omap_dss_find_output(pdata->source);
- if (in == NULL) {
- dev_err(&spi->dev, "failed to find video source '%s'\n",
- pdata->source);
- return -EPROBE_DEFER;
- }
-
- ddata->in = in;
-
- ddata->data_lines = pdata->data_lines;
-
- dssdev = &ddata->dssdev;
- dssdev->name = pdata->name;
-
- r = devm_gpio_request_one(&spi->dev, pdata->enable_gpio,
- GPIOF_OUT_INIT_LOW, "panel enable");
- if (r)
- goto err_gpio;
-
- ddata->enable_gpio = gpio_to_desc(pdata->enable_gpio);
-
- ddata->backlight_gpio = pdata->backlight_gpio;
-
- return 0;
-err_gpio:
- omap_dss_put_device(ddata->in);
- return r;
-}
-
static int lb035q02_probe_of(struct spi_device *spi)
{
struct device_node *node = spi->dev.of_node;
@@ -320,17 +282,12 @@ static int lb035q02_panel_spi_probe(struct spi_device *spi)
ddata->spi = spi;
- if (dev_get_platdata(&spi->dev)) {
- r = lb035q02_probe_pdata(spi);
- if (r)
- return r;
- } else if (spi->dev.of_node) {
- r = lb035q02_probe_of(spi);
- if (r)
- return r;
- } else {
+ if (!spi->dev.of_node)
return -ENODEV;
- }
+
+ r = lb035q02_probe_of(spi);
+ if (r)
+ return r;
if (gpio_is_valid(ddata->backlight_gpio)) {
r = devm_gpio_request_one(&spi->dev, ddata->backlight_gpio,
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c b/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c
index 8a928c9a2fc9..780cb263a318 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c
@@ -19,7 +19,6 @@
#include <linux/of_gpio.h>
#include <video/omapdss.h>
-#include <video/omap-panel-data.h>
struct panel_drv_data {
struct omap_dss_device dssdev;
@@ -232,34 +231,6 @@ static struct omap_dss_driver nec_8048_ops = {
.get_resolution = omapdss_default_get_resolution,
};
-
-static int nec_8048_probe_pdata(struct spi_device *spi)
-{
- const struct panel_nec_nl8048hl11_platform_data *pdata;
- struct panel_drv_data *ddata = dev_get_drvdata(&spi->dev);
- struct omap_dss_device *dssdev, *in;
-
- pdata = dev_get_platdata(&spi->dev);
-
- ddata->qvga_gpio = pdata->qvga_gpio;
- ddata->res_gpio = pdata->res_gpio;
-
- in = omap_dss_find_output(pdata->source);
- if (in == NULL) {
- dev_err(&spi->dev, "failed to find video source '%s'\n",
- pdata->source);
- return -EPROBE_DEFER;
- }
- ddata->in = in;
-
- ddata->data_lines = pdata->data_lines;
-
- dssdev = &ddata->dssdev;
- dssdev->name = pdata->name;
-
- return 0;
-}
-
static int nec_8048_probe_of(struct spi_device *spi)
{
struct device_node *node = spi->dev.of_node;
@@ -315,17 +286,12 @@ static int nec_8048_probe(struct spi_device *spi)
ddata->spi = spi;
- if (dev_get_platdata(&spi->dev)) {
- r = nec_8048_probe_pdata(spi);
- if (r)
- return r;
- } else if (spi->dev.of_node) {
- r = nec_8048_probe_of(spi);
- if (r)
- return r;
- } else {
+ if (!spi->dev.of_node)
return -ENODEV;
- }
+
+ r = nec_8048_probe_of(spi);
+ if (r)
+ return r;
if (gpio_is_valid(ddata->qvga_gpio)) {
r = devm_gpio_request_one(&spi->dev, ddata->qvga_gpio,
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c b/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c
index abfd1f6e3327..529a017602e4 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c
@@ -18,7 +18,6 @@
#include <linux/slab.h>
#include <linux/regulator/consumer.h>
#include <video/omapdss.h>
-#include <video/omap-panel-data.h>
struct panel_drv_data {
struct omap_dss_device dssdev;
@@ -197,73 +196,6 @@ static struct omap_dss_driver sharp_ls_ops = {
.get_resolution = omapdss_default_get_resolution,
};
-static int sharp_ls_get_gpio(struct device *dev, int gpio, unsigned long flags,
- char *desc, struct gpio_desc **gpiod)
-{
- struct gpio_desc *gd;
- int r;
-
- *gpiod = NULL;
-
- r = devm_gpio_request_one(dev, gpio, flags, desc);
- if (r)
- return r == -ENOENT ? 0 : r;
-
- gd = gpio_to_desc(gpio);
- if (IS_ERR(gd))
- return PTR_ERR(gd) == -ENOENT ? 0 : PTR_ERR(gd);
-
- *gpiod = gd;
- return 0;
-}
-
-static int sharp_ls_probe_pdata(struct platform_device *pdev)
-{
- const struct panel_sharp_ls037v7dw01_platform_data *pdata;
- struct panel_drv_data *ddata = platform_get_drvdata(pdev);
- struct omap_dss_device *dssdev, *in;
- int r;
-
- pdata = dev_get_platdata(&pdev->dev);
-
- in = omap_dss_find_output(pdata->source);
- if (in == NULL) {
- dev_err(&pdev->dev, "failed to find video source '%s'\n",
- pdata->source);
- return -EPROBE_DEFER;
- }
-
- ddata->in = in;
-
- ddata->data_lines = pdata->data_lines;
-
- dssdev = &ddata->dssdev;
- dssdev->name = pdata->name;
-
- r = sharp_ls_get_gpio(&pdev->dev, pdata->mo_gpio, GPIOF_OUT_INIT_LOW,
- "lcd MO", &ddata->mo_gpio);
- if (r)
- return r;
- r = sharp_ls_get_gpio(&pdev->dev, pdata->lr_gpio, GPIOF_OUT_INIT_HIGH,
- "lcd LR", &ddata->lr_gpio);
- if (r)
- return r;
- r = sharp_ls_get_gpio(&pdev->dev, pdata->ud_gpio, GPIOF_OUT_INIT_HIGH,
- "lcd UD", &ddata->ud_gpio);
- if (r)
- return r;
- r = sharp_ls_get_gpio(&pdev->dev, pdata->resb_gpio, GPIOF_OUT_INIT_LOW,
- "lcd RESB", &ddata->resb_gpio);
- if (r)
- return r;
- r = sharp_ls_get_gpio(&pdev->dev, pdata->ini_gpio, GPIOF_OUT_INIT_LOW,
- "lcd INI", &ddata->ini_gpio);
- if (r)
- return r;
-
- return 0;
-}
-
static int sharp_ls_get_gpio_of(struct device *dev, int index, int val,
const char *desc, struct gpio_desc **gpiod)
{
@@ -340,17 +272,12 @@ static int sharp_ls_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, ddata);
- if (dev_get_platdata(&pdev->dev)) {
- r = sharp_ls_probe_pdata(pdev);
- if (r)
- return r;
- } else if (pdev->dev.of_node) {
- r = sharp_ls_probe_of(pdev);
- if (r)
- return r;
- } else {
+ if (!pdev->dev.of_node)
return -ENODEV;
- }
+
+ r = sharp_ls_probe_of(pdev);
+ if (r)
+ return r;
ddata->videomode = sharp_ls_timings;
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c
index 4d657f3ab679..bd8d85041926 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c
@@ -29,7 +29,6 @@
#include <linux/spi/spi.h>
#include <linux/gpio.h>
#include <video/omapdss.h>
-#include <video/omap-panel-data.h>
struct panel_drv_data {
struct omap_dss_device dssdev;
@@ -365,31 +364,6 @@ static struct omap_dss_driver td028ttec1_ops = {
.check_timings = td028ttec1_panel_check_timings,
};
-static int td028ttec1_panel_probe_pdata(struct spi_device *spi)
-{
- const struct panel_tpo_td028ttec1_platform_data *pdata;
- struct panel_drv_data *ddata = dev_get_drvdata(&spi->dev);
- struct omap_dss_device *dssdev, *in;
-
- pdata = dev_get_platdata(&spi->dev);
-
- in = omap_dss_find_output(pdata->source);
- if (in == NULL) {
- dev_err(&spi->dev, "failed to find video source '%s'\n",
- pdata->source);
- return -EPROBE_DEFER;
- }
-
- ddata->in = in;
-
- ddata->data_lines = pdata->data_lines;
-
- dssdev = &ddata->dssdev;
- dssdev->name = pdata->name;
-
- return 0;
-}
-
static int td028ttec1_probe_of(struct spi_device *spi)
{
struct device_node *node = spi->dev.of_node;
@@ -432,17 +406,12 @@ static int td028ttec1_panel_probe(struct spi_device *spi)
ddata->spi_dev = spi;
- if (dev_get_platdata(&spi->dev)) {
- r = td028ttec1_panel_probe_pdata(spi);
- if (r)
- return r;
- } else if (spi->dev.of_node) {
- r = td028ttec1_probe_of(spi);
- if (r)
- return r;
- } else {
+ if (!spi->dev.of_node)
return -ENODEV;
- }
+
+ r = td028ttec1_probe_of(spi);
+ if (r)
+ return r;
ddata->videomode = td028ttec1_panel_timings;
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c
index 68e3b68a2920..03e2beb7b4f0 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c
@@ -20,7 +20,6 @@
#include <linux/of_gpio.h>
#include <video/omapdss.h>
-#include <video/omap-panel-data.h>
#define TPO_R02_MODE(x) ((x) & 7)
#define TPO_R02_MODE_800x480 7
@@ -464,33 +463,6 @@ static struct omap_dss_driver tpo_td043_ops = {
.get_resolution = omapdss_default_get_resolution,
};
-
-static int tpo_td043_probe_pdata(struct spi_device *spi)
-{
- const struct panel_tpo_td043mtea1_platform_data *pdata;
- struct panel_drv_data *ddata = dev_get_drvdata(&spi->dev);
- struct omap_dss_device *dssdev, *in;
-
- pdata = dev_get_platdata(&spi->dev);
-
- ddata->nreset_gpio = pdata->nreset_gpio;
-
- in = omap_dss_find_output(pdata->source);
- if (in == NULL) {
- dev_err(&spi->dev, "failed to find video source '%s'\n",
- pdata->source);
- return -EPROBE_DEFER;
- }
- ddata->in = in;
-
- ddata->data_lines = pdata->data_lines;
-
- dssdev = &ddata->dssdev;
- dssdev->name = pdata->name;
-
- return 0;
-}
-
static int tpo_td043_probe_of(struct spi_device *spi)
{
struct device_node *node = spi->dev.of_node;
@@ -541,17 +513,12 @@ static int tpo_td043_probe(struct spi_device *spi)
ddata->spi = spi;
- if (dev_get_platdata(&spi->dev)) {
- r = tpo_td043_probe_pdata(spi);
- if (r)
- return r;
- } else if (spi->dev.of_node) {
- r = tpo_td043_probe_of(spi);
- if (r)
- return r;
- } else {
+ if (!spi->dev.of_node)
return -ENODEV;
- }
+
+ r = tpo_td043_probe_of(spi);
+ if (r)
+ return r;
ddata->mode = TPO_R02_MODE_800x480;
memcpy(ddata->gamma, tpo_td043_def_gamma, sizeof(ddata->gamma));
diff --git a/drivers/gpu/drm/omapdrm/dss/Makefile b/drivers/gpu/drm/omapdrm/dss/Makefile
index b5136d3d4b77..b651ec9751e6 100644
--- a/drivers/gpu/drm/omapdrm/dss/Makefile
+++ b/drivers/gpu/drm/omapdrm/dss/Makefile
@@ -3,9 +3,6 @@ obj-$(CONFIG_OMAP2_DSS) += omapdss.o
# Core DSS files
omapdss-y := core.o dss.o dss_features.o dispc.o dispc_coefs.o display.o \
output.o dss-of.o pll.o video-pll.o
-# DSS compat layer files
-omapdss-y += manager.o manager-sysfs.o overlay.o overlay-sysfs.o apply.o \
- dispc-compat.o display-sysfs.o
omapdss-$(CONFIG_OMAP2_DSS_DPI) += dpi.o
omapdss-$(CONFIG_OMAP2_DSS_RFBI) += rfbi.o
omapdss-$(CONFIG_OMAP2_DSS_VENC) += venc.o
diff --git a/drivers/gpu/drm/omapdrm/dss/apply.c b/drivers/gpu/drm/omapdrm/dss/apply.c
deleted file mode 100644
index 663ccc3bf4e5..000000000000
--- a/drivers/gpu/drm/omapdrm/dss/apply.c
+++ /dev/null
@@ -1,1702 +0,0 @@
-/*
- * Copyright (C) 2011 Texas Instruments
- * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
- */
-
-#define DSS_SUBSYS_NAME "APPLY"
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/spinlock.h>
-#include <linux/jiffies.h>
-
-#include <video/omapdss.h>
-
-#include "dss.h"
-#include "dss_features.h"
-#include "dispc-compat.h"
-
-/*
- * We have 4 levels of cache for the dispc settings. First two are in SW and
- * the latter two in HW.
- *
- * set_info()
- * v
- * +--------------------+
- * | user_info |
- * +--------------------+
- * v
- * apply()
- * v
- * +--------------------+
- * | info |
- * +--------------------+
- * v
- * write_regs()
- * v
- * +--------------------+
- * | shadow registers |
- * +--------------------+
- * v
- * VFP or lcd/digit_enable
- * v
- * +--------------------+
- * | registers |
- * +--------------------+
- */
-
-struct ovl_priv_data {
-
- bool user_info_dirty;
- struct omap_overlay_info user_info;
-
- bool info_dirty;
- struct omap_overlay_info info;
-
- bool shadow_info_dirty;
-
- bool extra_info_dirty;
- bool shadow_extra_info_dirty;
-
- bool enabled;
- u32 fifo_low, fifo_high;
-
- /*
- * True if overlay is to be enabled. Used to check and calculate configs
- * for the overlay before it is enabled in the HW.
- */
- bool enabling;
-};
-
-struct mgr_priv_data {
-
- bool user_info_dirty;
- struct omap_overlay_manager_info user_info;
-
- bool info_dirty;
- struct omap_overlay_manager_info info;
-
- bool shadow_info_dirty;
-
- /* If true, GO bit is up and shadow registers cannot be written.
- * Never true for manual update displays */
- bool busy;
-
- /* If true, dispc output is enabled */
- bool updating;
-
- /* If true, a display is enabled using this manager */
- bool enabled;
-
- bool extra_info_dirty;
- bool shadow_extra_info_dirty;
-
- struct omap_video_timings timings;
- struct dss_lcd_mgr_config lcd_config;
-
- void (*framedone_handler)(void *);
- void *framedone_handler_data;
-};
-
-static struct {
- struct ovl_priv_data ovl_priv_data_array[MAX_DSS_OVERLAYS];
- struct mgr_priv_data mgr_priv_data_array[MAX_DSS_MANAGERS];
-
- bool irq_enabled;
-} dss_data;
-
-/* protects dss_data */
-static spinlock_t data_lock;
-/* lock for blocking functions */
-static DEFINE_MUTEX(apply_lock);
-static DECLARE_COMPLETION(extra_updated_completion);
-
-static void dss_register_vsync_isr(void);
-
-static struct ovl_priv_data *get_ovl_priv(struct omap_overlay *ovl)
-{
- return &dss_data.ovl_priv_data_array[ovl->id];
-}
-
-static struct mgr_priv_data *get_mgr_priv(struct omap_overlay_manager *mgr)
-{
- return &dss_data.mgr_priv_data_array[mgr->id];
-}
-
-static void apply_init_priv(void)
-{
- const int num_ovls = dss_feat_get_num_ovls();
- struct mgr_priv_data *mp;
- int i;
-
- spin_lock_init(&data_lock);
-
- for (i = 0; i < num_ovls; ++i) {
- struct ovl_priv_data *op;
-
- op = &dss_data.ovl_priv_data_array[i];
-
- op->info.color_mode = OMAP_DSS_COLOR_RGB16;
- op->info.rotation_type = OMAP_DSS_ROT_DMA;
-
- op->info.global_alpha = 255;
-
- switch (i) {
- case 0:
- op->info.zorder = 0;
- break;
- case 1:
- op->info.zorder =
- dss_has_feature(FEAT_ALPHA_FREE_ZORDER) ? 3 : 0;
- break;
- case 2:
- op->info.zorder =
- dss_has_feature(FEAT_ALPHA_FREE_ZORDER) ? 2 : 0;
- break;
- case 3:
- op->info.zorder =
- dss_has_feature(FEAT_ALPHA_FREE_ZORDER) ? 1 : 0;
- break;
- }
-
- op->user_info = op->info;
- }
-
- /*
- * Initialize some of the lcd_config fields for TV manager, this lets
- * us prevent checking if the manager is LCD or TV at some places
- */
- mp = &dss_data.mgr_priv_data_array[OMAP_DSS_CHANNEL_DIGIT];
-
- mp->lcd_config.video_port_width = 24;
- mp->lcd_config.clock_info.lck_div = 1;
- mp->lcd_config.clock_info.pck_div = 1;
-}
-
-/*
- * A LCD manager's stallmode decides whether it is in manual or auto update. TV
- * manager is always auto update, stallmode field for TV manager is false by
- * default
- */
-static bool ovl_manual_update(struct omap_overlay *ovl)
-{
- struct mgr_priv_data *mp = get_mgr_priv(ovl->manager);
-
- return mp->lcd_config.stallmode;
-}
-
-static bool mgr_manual_update(struct omap_overlay_manager *mgr)
-{
- struct mgr_priv_data *mp = get_mgr_priv(mgr);
-
- return mp->lcd_config.stallmode;
-}
-
-static int dss_check_settings_low(struct omap_overlay_manager *mgr,
- bool applying)
-{
- struct omap_overlay_info *oi;
- struct omap_overlay_manager_info *mi;
- struct omap_overlay *ovl;
- struct omap_overlay_info *ois[MAX_DSS_OVERLAYS];
- struct ovl_priv_data *op;
- struct mgr_priv_data *mp;
-
- mp = get_mgr_priv(mgr);
-
- if (!mp->enabled)
- return 0;
-
- if (applying && mp->user_info_dirty)
- mi = &mp->user_info;
- else
- mi = &mp->info;
-
- /* collect the infos to be tested into the array */
- list_for_each_entry(ovl, &mgr->overlays, list) {
- op = get_ovl_priv(ovl);
-
- if (!op->enabled && !op->enabling)
- oi = NULL;
- else if (applying && op->user_info_dirty)
- oi = &op->user_info;
- else
- oi = &op->info;
-
- ois[ovl->id] = oi;
- }
-
- return dss_mgr_check(mgr, mi, &mp->timings, &mp->lcd_config, ois);
-}
-
-/*
- * check manager and overlay settings using overlay_info from data->info
- */
-static int dss_check_settings(struct omap_overlay_manager *mgr)
-{
- return dss_check_settings_low(mgr, false);
-}
-
-/*
- * check manager and overlay settings using overlay_info from ovl->info if
- * dirty and from data->info otherwise
- */
-static int dss_check_settings_apply(struct omap_overlay_manager *mgr)
-{
- return dss_check_settings_low(mgr, true);
-}
-
-static bool need_isr(void)
-{
- const int num_mgrs = dss_feat_get_num_mgrs();
- int i;
-
- for (i = 0; i < num_mgrs; ++i) {
- struct omap_overlay_manager *mgr;
- struct mgr_priv_data *mp;
- struct omap_overlay *ovl;
-
- mgr = omap_dss_get_overlay_manager(i);
- mp = get_mgr_priv(mgr);
-
- if (!mp->enabled)
- continue;
-
- if (mgr_manual_update(mgr)) {
- /* to catch FRAMEDONE */
- if (mp->updating)
- return true;
- } else {
- /* to catch GO bit going down */
- if (mp->busy)
- return true;
-
- /* to write new values to registers */
- if (mp->info_dirty)
- return true;
-
- /* to set GO bit */
- if (mp->shadow_info_dirty)
- return true;
-
- /*
- * NOTE: we don't check extra_info flags for disabled
- * managers, once the manager is enabled, the extra_info
- * related manager changes will be taken in by HW.
- */
-
- /* to write new values to registers */
- if (mp->extra_info_dirty)
- return true;
-
- /* to set GO bit */
- if (mp->shadow_extra_info_dirty)
- return true;
-
- list_for_each_entry(ovl, &mgr->overlays, list) {
- struct ovl_priv_data *op;
-
- op = get_ovl_priv(ovl);
-
- /*
- * NOTE: we check extra_info flags even for
- * disabled overlays, as extra_infos need to be
- * always written.
- */
-
- /* to write new values to registers */
- if (op->extra_info_dirty)
- return true;
-
- /* to set GO bit */
- if (op->shadow_extra_info_dirty)
- return true;
-
- if (!op->enabled)
- continue;
-
- /* to write new values to registers */
- if (op->info_dirty)
- return true;
-
- /* to set GO bit */
- if (op->shadow_info_dirty)
- return true;
- }
- }
- }
-
- return false;
-}
-
-static bool need_go(struct omap_overlay_manager *mgr)
-{
- struct omap_overlay *ovl;
- struct mgr_priv_data *mp;
- struct ovl_priv_data *op;
-
- mp = get_mgr_priv(mgr);
-
- if (mp->shadow_info_dirty || mp->shadow_extra_info_dirty)
- return true;
-
- list_for_each_entry(ovl, &mgr->overlays, list) {
- op = get_ovl_priv(ovl);
- if (op->shadow_info_dirty || op->shadow_extra_info_dirty)
- return true;
- }
-
- return false;
-}
-
-/* returns true if an extra_info field is currently being updated */
-static bool extra_info_update_ongoing(void)
-{
- const int num_mgrs = dss_feat_get_num_mgrs();
- int i;
-
- for (i = 0; i < num_mgrs; ++i) {
- struct omap_overlay_manager *mgr;
- struct omap_overlay *ovl;
- struct mgr_priv_data *mp;
-
- mgr = omap_dss_get_overlay_manager(i);
- mp = get_mgr_priv(mgr);
-
- if (!mp->enabled)
- continue;
-
- if (!mp->updating)
- continue;
-
- if (mp->extra_info_dirty || mp->shadow_extra_info_dirty)
- return true;
-
- list_for_each_entry(ovl, &mgr->overlays, list) {
- struct ovl_priv_data *op = get_ovl_priv(ovl);
-
- if (op->extra_info_dirty || op->shadow_extra_info_dirty)
- return true;
- }
- }
-
- return false;
-}
-
-/* wait until no extra_info updates are pending */
-static void wait_pending_extra_info_updates(void)
-{
- bool updating;
- unsigned long flags;
- unsigned long t;
- int r;
-
- spin_lock_irqsave(&data_lock, flags);
-
- updating = extra_info_update_ongoing();
-
- if (!updating) {
- spin_unlock_irqrestore(&data_lock, flags);
- return;
- }
-
- init_completion(&extra_updated_completion);
-
- spin_unlock_irqrestore(&data_lock, flags);
-
- t = msecs_to_jiffies(500);
- r = wait_for_completion_timeout(&extra_updated_completion, t);
- if (r == 0)
- DSSWARN("timeout in wait_pending_extra_info_updates\n");
-}
-
-static struct omap_dss_device *dss_mgr_get_device(struct omap_overlay_manager *mgr)
-{
- struct omap_dss_device *dssdev;
-
- dssdev = mgr->output;
- if (dssdev == NULL)
- return NULL;
-
- while (dssdev->dst)
- dssdev = dssdev->dst;
-
- if (dssdev->driver)
- return dssdev;
- else
- return NULL;
-}
-
-static struct omap_dss_device *dss_ovl_get_device(struct omap_overlay *ovl)
-{
- return ovl->manager ? dss_mgr_get_device(ovl->manager) : NULL;
-}
-
-static int dss_mgr_wait_for_vsync(struct omap_overlay_manager *mgr)
-{
- unsigned long timeout = msecs_to_jiffies(500);
- u32 irq;
- int r;
-
- if (mgr->output == NULL)
- return -ENODEV;
-
- r = dispc_runtime_get();
- if (r)
- return r;
-
- switch (mgr->output->id) {
- case OMAP_DSS_OUTPUT_VENC:
- irq = DISPC_IRQ_EVSYNC_ODD;
- break;
- case OMAP_DSS_OUTPUT_HDMI:
- irq = DISPC_IRQ_EVSYNC_EVEN;
- break;
- default:
- irq = dispc_mgr_get_vsync_irq(mgr->id);
- break;
- }
-
- r = omap_dispc_wait_for_irq_interruptible_timeout(irq, timeout);
-
- dispc_runtime_put();
-
- return r;
-}
-
-static int dss_mgr_wait_for_go(struct omap_overlay_manager *mgr)
-{
- unsigned long timeout = msecs_to_jiffies(500);
- struct mgr_priv_data *mp = get_mgr_priv(mgr);
- u32 irq;
- unsigned long flags;
- int r;
- int i;
-
- spin_lock_irqsave(&data_lock, flags);
-
- if (mgr_manual_update(mgr)) {
- spin_unlock_irqrestore(&data_lock, flags);
- return 0;
- }
-
- if (!mp->enabled) {
- spin_unlock_irqrestore(&data_lock, flags);
- return 0;
- }
-
- spin_unlock_irqrestore(&data_lock, flags);
-
- r = dispc_runtime_get();
- if (r)
- return r;
-
- irq = dispc_mgr_get_vsync_irq(mgr->id);
-
- i = 0;
- while (1) {
- bool shadow_dirty, dirty;
-
- spin_lock_irqsave(&data_lock, flags);
- dirty = mp->info_dirty;
- shadow_dirty = mp->shadow_info_dirty;
- spin_unlock_irqrestore(&data_lock, flags);
-
- if (!dirty && !shadow_dirty) {
- r = 0;
- break;
- }
-
- /* 4 iterations is the worst case:
- * 1 - initial iteration, dirty = true (between VFP and VSYNC)
- * 2 - first VSYNC, dirty = true
- * 3 - dirty = false, shadow_dirty = true
- * 4 - shadow_dirty = false */
- if (i++ == 3) {
- DSSERR("mgr(%d)->wait_for_go() not finishing\n",
- mgr->id);
- r = 0;
- break;
- }
-
- r = omap_dispc_wait_for_irq_interruptible_timeout(irq, timeout);
- if (r == -ERESTARTSYS)
- break;
-
- if (r) {
- DSSERR("mgr(%d)->wait_for_go() timeout\n", mgr->id);
- break;
- }
- }
-
- dispc_runtime_put();
-
- return r;
-}
-
-static int dss_mgr_wait_for_go_ovl(struct omap_overlay *ovl)
-{
- unsigned long timeout = msecs_to_jiffies(500);
- struct ovl_priv_data *op;
- struct mgr_priv_data *mp;
- u32 irq;
- unsigned long flags;
- int r;
- int i;
-
- if (!ovl->manager)
- return 0;
-
- mp = get_mgr_priv(ovl->manager);
-
- spin_lock_irqsave(&data_lock, flags);
-
- if (ovl_manual_update(ovl)) {
- spin_unlock_irqrestore(&data_lock, flags);
- return 0;
- }
-
- if (!mp->enabled) {
- spin_unlock_irqrestore(&data_lock, flags);
- return 0;
- }
-
- spin_unlock_irqrestore(&data_lock, flags);
-
- r = dispc_runtime_get();
- if (r)
- return r;
-
- irq = dispc_mgr_get_vsync_irq(ovl->manager->id);
-
- op = get_ovl_priv(ovl);
- i = 0;
- while (1) {
- bool shadow_dirty, dirty;
-
- spin_lock_irqsave(&data_lock, flags);
- dirty = op->info_dirty;
- shadow_dirty = op->shadow_info_dirty;
- spin_unlock_irqrestore(&data_lock, flags);
-
- if (!dirty && !shadow_dirty) {
- r = 0;
- break;
- }
-
- /* 4 iterations is the worst case:
- * 1 - initial iteration, dirty = true (between VFP and VSYNC)
- * 2 - first VSYNC, dirty = true
- * 3 - dirty = false, shadow_dirty = true
- * 4 - shadow_dirty = false */
- if (i++ == 3) {
- DSSERR("ovl(%d)->wait_for_go() not finishing\n",
- ovl->id);
- r = 0;
- break;
- }
-
- r = omap_dispc_wait_for_irq_interruptible_timeout(irq, timeout);
- if (r == -ERESTARTSYS)
- break;
-
- if (r) {
- DSSERR("ovl(%d)->wait_for_go() timeout\n", ovl->id);
- break;
- }
- }
-
- dispc_runtime_put();
-
- return r;
-}
-
-static void dss_ovl_write_regs(struct omap_overlay *ovl)
-{
- struct ovl_priv_data *op = get_ovl_priv(ovl);
- struct omap_overlay_info *oi;
- bool replication;
- struct mgr_priv_data *mp;
- int r;
-
- DSSDBG("writing ovl %d regs\n", ovl->id);
-
- if (!op->enabled || !op->info_dirty)
- return;
-
- oi = &op->info;
-
- mp = get_mgr_priv(ovl->manager);
-
- replication = dss_ovl_use_replication(mp->lcd_config, oi->color_mode);
-
- r = dispc_ovl_setup(ovl->id, oi, replication, &mp->timings, false);
- if (r) {
- /*
- * We can't do much here, as this function can be called from
- * vsync interrupt.
- */
- DSSERR("dispc_ovl_setup failed for ovl %d\n", ovl->id);
-
- /* This will leave fifo configurations in a nonoptimal state */
- op->enabled = false;
- dispc_ovl_enable(ovl->id, false);
- return;
- }
-
- op->info_dirty = false;
- if (mp->updating)
- op->shadow_info_dirty = true;
-}
-
-static void dss_ovl_write_regs_extra(struct omap_overlay *ovl)
-{
- struct ovl_priv_data *op = get_ovl_priv(ovl);
- struct mgr_priv_data *mp;
-
- DSSDBG("writing ovl %d regs extra\n", ovl->id);
-
- if (!op->extra_info_dirty)
- return;
-
- /* note: write also when op->enabled == false, so that the ovl gets
- * disabled */
-
- dispc_ovl_enable(ovl->id, op->enabled);
- dispc_ovl_set_fifo_threshold(ovl->id, op->fifo_low, op->fifo_high);
-
- mp = get_mgr_priv(ovl->manager);
-
- op->extra_info_dirty = false;
- if (mp->updating)
- op->shadow_extra_info_dirty = true;
-}
-
-static void dss_mgr_write_regs(struct omap_overlay_manager *mgr)
-{
- struct mgr_priv_data *mp = get_mgr_priv(mgr);
- struct omap_overlay *ovl;
-
- DSSDBG("writing mgr %d regs\n", mgr->id);
-
- if (!mp->enabled)
- return;
-
- WARN_ON(mp->busy);
-
- /* Commit overlay settings */
- list_for_each_entry(ovl, &mgr->overlays, list) {
- dss_ovl_write_regs(ovl);
- dss_ovl_write_regs_extra(ovl);
- }
-
- if (mp->info_dirty) {
- dispc_mgr_setup(mgr->id, &mp->info);
-
- mp->info_dirty = false;
- if (mp->updating)
- mp->shadow_info_dirty = true;
- }
-}
-
-static void dss_mgr_write_regs_extra(struct omap_overlay_manager *mgr)
-{
- struct mgr_priv_data *mp = get_mgr_priv(mgr);
-
- DSSDBG("writing mgr %d regs extra\n", mgr->id);
-
- if (!mp->extra_info_dirty)
- return;
-
- dispc_mgr_set_timings(mgr->id, &mp->timings);
-
- /* lcd_config parameters */
- if (dss_mgr_is_lcd(mgr->id))
- dispc_mgr_set_lcd_config(mgr->id, &mp->lcd_config);
-
- mp->extra_info_dirty = false;
- if (mp->updating)
- mp->shadow_extra_info_dirty = true;
-}
-
-static void dss_write_regs(void)
-{
- const int num_mgrs = omap_dss_get_num_overlay_managers();
- int i;
-
- for (i = 0; i < num_mgrs; ++i) {
- struct omap_overlay_manager *mgr;
- struct mgr_priv_data *mp;
- int r;
-
- mgr = omap_dss_get_overlay_manager(i);
- mp = get_mgr_priv(mgr);
-
- if (!mp->enabled || mgr_manual_update(mgr) || mp->busy)
- continue;
-
- r = dss_check_settings(mgr);
- if (r) {
- DSSERR("cannot write registers for manager %s: "
- "illegal configuration\n", mgr->name);
- continue;
- }
-
- dss_mgr_write_regs(mgr);
- dss_mgr_write_regs_extra(mgr);
- }
-}
-
-static void dss_set_go_bits(void)
-{
- const int num_mgrs = omap_dss_get_num_overlay_managers();
- int i;
-
- for (i = 0; i < num_mgrs; ++i) {
- struct omap_overlay_manager *mgr;
- struct mgr_priv_data *mp;
-
- mgr = omap_dss_get_overlay_manager(i);
- mp = get_mgr_priv(mgr);
-
- if (!mp->enabled || mgr_manual_update(mgr) || mp->busy)
- continue;
-
- if (!need_go(mgr))
- continue;
-
- mp->busy = true;
-
- if (!dss_data.irq_enabled && need_isr())
- dss_register_vsync_isr();
-
- dispc_mgr_go(mgr->id);
- }
-
-}
-
-static void mgr_clear_shadow_dirty(struct omap_overlay_manager *mgr)
-{
- struct omap_overlay *ovl;
- struct mgr_priv_data *mp;
- struct ovl_priv_data *op;
-
- mp = get_mgr_priv(mgr);
- mp->shadow_info_dirty = false;
- mp->shadow_extra_info_dirty = false;
-
- list_for_each_entry(ovl, &mgr->overlays, list) {
- op = get_ovl_priv(ovl);
- op->shadow_info_dirty = false;
- op->shadow_extra_info_dirty = false;
- }
-}
-
-static int dss_mgr_connect_compat(struct omap_overlay_manager *mgr,
- struct omap_dss_device *dst)
-{
- return mgr->set_output(mgr, dst);
-}
-
-static void dss_mgr_disconnect_compat(struct omap_overlay_manager *mgr,
- struct omap_dss_device *dst)
-{
- mgr->unset_output(mgr);
-}
-
-static void dss_mgr_start_update_compat(struct omap_overlay_manager *mgr)
-{
- struct mgr_priv_data *mp = get_mgr_priv(mgr);
- unsigned long flags;
- int r;
-
- spin_lock_irqsave(&data_lock, flags);
-
- WARN_ON(mp->updating);
-
- r = dss_check_settings(mgr);
- if (r) {
- DSSERR("cannot start manual update: illegal configuration\n");
- spin_unlock_irqrestore(&data_lock, flags);
- return;
- }
-
- dss_mgr_write_regs(mgr);
- dss_mgr_write_regs_extra(mgr);
-
- mp->updating = true;
-
- if (!dss_data.irq_enabled && need_isr())
- dss_register_vsync_isr();
-
- dispc_mgr_enable_sync(mgr->id);
-
- spin_unlock_irqrestore(&data_lock, flags);
-}
-
-static void dss_apply_irq_handler(void *data, u32 mask);
-
-static void dss_register_vsync_isr(void)
-{
- const int num_mgrs = dss_feat_get_num_mgrs();
- u32 mask;
- int r, i;
-
- mask = 0;
- for (i = 0; i < num_mgrs; ++i)
- mask |= dispc_mgr_get_vsync_irq(i);
-
- for (i = 0; i < num_mgrs; ++i)
- mask |= dispc_mgr_get_framedone_irq(i);
-
- r = omap_dispc_register_isr(dss_apply_irq_handler, NULL, mask);
- WARN_ON(r);
-
- dss_data.irq_enabled = true;
-}
-
-static void dss_unregister_vsync_isr(void)
-{
- const int num_mgrs = dss_feat_get_num_mgrs();
- u32 mask;
- int r, i;
-
- mask = 0;
- for (i = 0; i < num_mgrs; ++i)
- mask |= dispc_mgr_get_vsync_irq(i);
-
- for (i = 0; i < num_mgrs; ++i)
- mask |= dispc_mgr_get_framedone_irq(i);
-
- r = omap_dispc_unregister_isr(dss_apply_irq_handler, NULL, mask);
- WARN_ON(r);
-
- dss_data.irq_enabled = false;
-}
-
-static void dss_apply_irq_handler(void *data, u32 mask)
-{
- const int num_mgrs = dss_feat_get_num_mgrs();
- int i;
- bool extra_updating;
-
- spin_lock(&data_lock);
-
- /* clear busy, updating flags, shadow_dirty flags */
- for (i = 0; i < num_mgrs; i++) {
- struct omap_overlay_manager *mgr;
- struct mgr_priv_data *mp;
-
- mgr = omap_dss_get_overlay_manager(i);
- mp = get_mgr_priv(mgr);
-
- if (!mp->enabled)
- continue;
-
- mp->updating = dispc_mgr_is_enabled(i);
-
- if (!mgr_manual_update(mgr)) {
- bool was_busy = mp->busy;
- mp->busy = dispc_mgr_go_busy(i);
-
- if (was_busy && !mp->busy)
- mgr_clear_shadow_dirty(mgr);
- }
- }
-
- dss_write_regs();
- dss_set_go_bits();
-
- extra_updating = extra_info_update_ongoing();
- if (!extra_updating)
- complete_all(&extra_updated_completion);
-
- /* call framedone handlers for manual update displays */
- for (i = 0; i < num_mgrs; i++) {
- struct omap_overlay_manager *mgr;
- struct mgr_priv_data *mp;
-
- mgr = omap_dss_get_overlay_manager(i);
- mp = get_mgr_priv(mgr);
-
- if (!mgr_manual_update(mgr) || !mp->framedone_handler)
- continue;
-
- if (mask & dispc_mgr_get_framedone_irq(i))
- mp->framedone_handler(mp->framedone_handler_data);
- }
-
- if (!need_isr())
- dss_unregister_vsync_isr();
-
- spin_unlock(&data_lock);
-}
-
-static void omap_dss_mgr_apply_ovl(struct omap_overlay *ovl)
-{
- struct ovl_priv_data *op;
-
- op = get_ovl_priv(ovl);
-
- if (!op->user_info_dirty)
- return;
-
- op->user_info_dirty = false;
- op->info_dirty = true;
- op->info = op->user_info;
-}
-
-static void omap_dss_mgr_apply_mgr(struct omap_overlay_manager *mgr)
-{
- struct mgr_priv_data *mp;
-
- mp = get_mgr_priv(mgr);
-
- if (!mp->user_info_dirty)
- return;
-
- mp->user_info_dirty = false;
- mp->info_dirty = true;
- mp->info = mp->user_info;
-}
-
-static int omap_dss_mgr_apply(struct omap_overlay_manager *mgr)
-{
- unsigned long flags;
- struct omap_overlay *ovl;
- int r;
-
- DSSDBG("omap_dss_mgr_apply(%s)\n", mgr->name);
-
- spin_lock_irqsave(&data_lock, flags);
-
- r = dss_check_settings_apply(mgr);
- if (r) {
- spin_unlock_irqrestore(&data_lock, flags);
- DSSERR("failed to apply settings: illegal configuration.\n");
- return r;
- }
-
- /* Configure overlays */
- list_for_each_entry(ovl, &mgr->overlays, list)
- omap_dss_mgr_apply_ovl(ovl);
-
- /* Configure manager */
- omap_dss_mgr_apply_mgr(mgr);
-
- dss_write_regs();
- dss_set_go_bits();
-
- spin_unlock_irqrestore(&data_lock, flags);
-
- return 0;
-}
-
-static void dss_apply_ovl_enable(struct omap_overlay *ovl, bool enable)
-{
- struct ovl_priv_data *op;
-
- op = get_ovl_priv(ovl);
-
- if (op->enabled == enable)
- return;
-
- op->enabled = enable;
- op->extra_info_dirty = true;
-}
-
-static void dss_apply_ovl_fifo_thresholds(struct omap_overlay *ovl,
- u32 fifo_low, u32 fifo_high)
-{
- struct ovl_priv_data *op = get_ovl_priv(ovl);
-
- if (op->fifo_low == fifo_low && op->fifo_high == fifo_high)
- return;
-
- op->fifo_low = fifo_low;
- op->fifo_high = fifo_high;
- op->extra_info_dirty = true;
-}
-
-static void dss_ovl_setup_fifo(struct omap_overlay *ovl)
-{
- struct ovl_priv_data *op = get_ovl_priv(ovl);
- u32 fifo_low, fifo_high;
- bool use_fifo_merge = false;
-
- if (!op->enabled && !op->enabling)
- return;
-
- dispc_ovl_compute_fifo_thresholds(ovl->id, &fifo_low, &fifo_high,
- use_fifo_merge, ovl_manual_update(ovl));
-
- dss_apply_ovl_fifo_thresholds(ovl, fifo_low, fifo_high);
-}
-
-static void dss_mgr_setup_fifos(struct omap_overlay_manager *mgr)
-{
- struct omap_overlay *ovl;
- struct mgr_priv_data *mp;
-
- mp = get_mgr_priv(mgr);
-
- if (!mp->enabled)
- return;
-
- list_for_each_entry(ovl, &mgr->overlays, list)
- dss_ovl_setup_fifo(ovl);
-}
-
-static void dss_setup_fifos(void)
-{
- const int num_mgrs = omap_dss_get_num_overlay_managers();
- struct omap_overlay_manager *mgr;
- int i;
-
- for (i = 0; i < num_mgrs; ++i) {
- mgr = omap_dss_get_overlay_manager(i);
- dss_mgr_setup_fifos(mgr);
- }
-}
-
-static int dss_mgr_enable_compat(struct omap_overlay_manager *mgr)
-{
- struct mgr_priv_data *mp = get_mgr_priv(mgr);
- unsigned long flags;
- int r;
-
- mutex_lock(&apply_lock);
-
- if (mp->enabled)
- goto out;
-
- spin_lock_irqsave(&data_lock, flags);
-
- mp->enabled = true;
-
- r = dss_check_settings(mgr);
- if (r) {
- DSSERR("failed to enable manager %d: check_settings failed\n",
- mgr->id);
- goto err;
- }
-
- dss_setup_fifos();
-
- dss_write_regs();
- dss_set_go_bits();
-
- if (!mgr_manual_update(mgr))
- mp->updating = true;
-
- if (!dss_data.irq_enabled && need_isr())
- dss_register_vsync_isr();
-
- spin_unlock_irqrestore(&data_lock, flags);
-
- if (!mgr_manual_update(mgr))
- dispc_mgr_enable_sync(mgr->id);
-
-out:
- mutex_unlock(&apply_lock);
-
- return 0;
-
-err:
- mp->enabled = false;
- spin_unlock_irqrestore(&data_lock, flags);
- mutex_unlock(&apply_lock);
- return r;
-}
-
-static void dss_mgr_disable_compat(struct omap_overlay_manager *mgr)
-{
- struct mgr_priv_data *mp = get_mgr_priv(mgr);
- unsigned long flags;
-
- mutex_lock(&apply_lock);
-
- if (!mp->enabled)
- goto out;
-
- wait_pending_extra_info_updates();
-
- if (!mgr_manual_update(mgr))
- dispc_mgr_disable_sync(mgr->id);
-
- spin_lock_irqsave(&data_lock, flags);
-
- mp->updating = false;
- mp->enabled = false;
-
- spin_unlock_irqrestore(&data_lock, flags);
-
-out:
- mutex_unlock(&apply_lock);
-}
-
-static int dss_mgr_set_info(struct omap_overlay_manager *mgr,
- struct omap_overlay_manager_info *info)
-{
- struct mgr_priv_data *mp = get_mgr_priv(mgr);
- unsigned long flags;
- int r;
-
- r = dss_mgr_simple_check(mgr, info);
- if (r)
- return r;
-
- spin_lock_irqsave(&data_lock, flags);
-
- mp->user_info = *info;
- mp->user_info_dirty = true;
-
- spin_unlock_irqrestore(&data_lock, flags);
-
- return 0;
-}
-
-static void dss_mgr_get_info(struct omap_overlay_manager *mgr,
- struct omap_overlay_manager_info *info)
-{
- struct mgr_priv_data *mp = get_mgr_priv(mgr);
- unsigned long flags;
-
- spin_lock_irqsave(&data_lock, flags);
-
- *info = mp->user_info;
-
- spin_unlock_irqrestore(&data_lock, flags);
-}
-
-static int dss_mgr_set_output(struct omap_overlay_manager *mgr,
- struct omap_dss_device *output)
-{
- int r;
-
- mutex_lock(&apply_lock);
-
- if (mgr->output) {
- DSSERR("manager %s is already connected to an output\n",
- mgr->name);
- r = -EINVAL;
- goto err;
- }
-
- if ((mgr->supported_outputs & output->id) == 0) {
- DSSERR("output does not support manager %s\n",
- mgr->name);
- r = -EINVAL;
- goto err;
- }
-
- output->manager = mgr;
- mgr->output = output;
-
- mutex_unlock(&apply_lock);
-
- return 0;
-err:
- mutex_unlock(&apply_lock);
- return r;
-}
-
-static int dss_mgr_unset_output(struct omap_overlay_manager *mgr)
-{
- int r;
- struct mgr_priv_data *mp = get_mgr_priv(mgr);
- unsigned long flags;
-
- mutex_lock(&apply_lock);
-
- if (!mgr->output) {
- DSSERR("failed to unset output, output not set\n");
- r = -EINVAL;
- goto err;
- }
-
- spin_lock_irqsave(&data_lock, flags);
-
- if (mp->enabled) {
- DSSERR("output can't be unset when manager is enabled\n");
- r = -EINVAL;
- goto err1;
- }
-
- spin_unlock_irqrestore(&data_lock, flags);
-
- mgr->output->manager = NULL;
- mgr->output = NULL;
-
- mutex_unlock(&apply_lock);
-
- return 0;
-err1:
- spin_unlock_irqrestore(&data_lock, flags);
-err:
- mutex_unlock(&apply_lock);
-
- return r;
-}
-
-static void dss_apply_mgr_timings(struct omap_overlay_manager *mgr,
- const struct omap_video_timings *timings)
-{
- struct mgr_priv_data *mp = get_mgr_priv(mgr);
-
- mp->timings = *timings;
- mp->extra_info_dirty = true;
-}
-
-static void dss_mgr_set_timings_compat(struct omap_overlay_manager *mgr,
- const struct omap_video_timings *timings)
-{
- unsigned long flags;
- struct mgr_priv_data *mp = get_mgr_priv(mgr);
-
- spin_lock_irqsave(&data_lock, flags);
-
- if (mp->updating) {
- DSSERR("cannot set timings for %s: manager needs to be disabled\n",
- mgr->name);
- goto out;
- }
-
- dss_apply_mgr_timings(mgr, timings);
-out:
- spin_unlock_irqrestore(&data_lock, flags);
-}
-
-static void dss_apply_mgr_lcd_config(struct omap_overlay_manager *mgr,
- const struct dss_lcd_mgr_config *config)
-{
- struct mgr_priv_data *mp = get_mgr_priv(mgr);
-
- mp->lcd_config = *config;
- mp->extra_info_dirty = true;
-}
-
-static void dss_mgr_set_lcd_config_compat(struct omap_overlay_manager *mgr,
- const struct dss_lcd_mgr_config *config)
-{
- unsigned long flags;
- struct mgr_priv_data *mp = get_mgr_priv(mgr);
-
- spin_lock_irqsave(&data_lock, flags);
-
- if (mp->enabled) {
- DSSERR("cannot apply lcd config for %s: manager needs to be disabled\n",
- mgr->name);
- goto out;
- }
-
- dss_apply_mgr_lcd_config(mgr, config);
-out:
- spin_unlock_irqrestore(&data_lock, flags);
-}
-
-static int dss_ovl_set_info(struct omap_overlay *ovl,
- struct omap_overlay_info *info)
-{
- struct ovl_priv_data *op = get_ovl_priv(ovl);
- unsigned long flags;
- int r;
-
- r = dss_ovl_simple_check(ovl, info);
- if (r)
- return r;
-
- spin_lock_irqsave(&data_lock, flags);
-
- op->user_info = *info;
- op->user_info_dirty = true;
-
- spin_unlock_irqrestore(&data_lock, flags);
-
- return 0;
-}
-
-static void dss_ovl_get_info(struct omap_overlay *ovl,
- struct omap_overlay_info *info)
-{
- struct ovl_priv_data *op = get_ovl_priv(ovl);
- unsigned long flags;
-
- spin_lock_irqsave(&data_lock, flags);
-
- *info = op->user_info;
-
- spin_unlock_irqrestore(&data_lock, flags);
-}
-
-static int dss_ovl_set_manager(struct omap_overlay *ovl,
- struct omap_overlay_manager *mgr)
-{
- struct ovl_priv_data *op = get_ovl_priv(ovl);
- unsigned long flags;
- int r;
-
- if (!mgr)
- return -EINVAL;
-
- mutex_lock(&apply_lock);
-
- if (ovl->manager) {
- DSSERR("overlay '%s' already has a manager '%s'\n",
- ovl->name, ovl->manager->name);
- r = -EINVAL;
- goto err;
- }
-
- r = dispc_runtime_get();
- if (r)
- goto err;
-
- spin_lock_irqsave(&data_lock, flags);
-
- if (op->enabled) {
- spin_unlock_irqrestore(&data_lock, flags);
- DSSERR("overlay has to be disabled to change the manager\n");
- r = -EINVAL;
- goto err1;
- }
-
- dispc_ovl_set_channel_out(ovl->id, mgr->id);
-
- ovl->manager = mgr;
- list_add_tail(&ovl->list, &mgr->overlays);
-
- spin_unlock_irqrestore(&data_lock, flags);
-
- dispc_runtime_put();
-
- mutex_unlock(&apply_lock);
-
- return 0;
-
-err1:
- dispc_runtime_put();
-err:
- mutex_unlock(&apply_lock);
- return r;
-}
-
-static int dss_ovl_unset_manager(struct omap_overlay *ovl)
-{
- struct ovl_priv_data *op = get_ovl_priv(ovl);
- unsigned long flags;
- int r;
-
- mutex_lock(&apply_lock);
-
- if (!ovl->manager) {
- DSSERR("failed to detach overlay: manager not set\n");
- r = -EINVAL;
- goto err;
- }
-
- spin_lock_irqsave(&data_lock, flags);
-
- if (op->enabled) {
- spin_unlock_irqrestore(&data_lock, flags);
- DSSERR("overlay has to be disabled to unset the manager\n");
- r = -EINVAL;
- goto err;
- }
-
- spin_unlock_irqrestore(&data_lock, flags);
-
- /* wait for pending extra_info updates to ensure the ovl is disabled */
- wait_pending_extra_info_updates();
-
- /*
- * For a manual update display, there is no guarantee that the overlay
- * is really disabled in HW, we may need an extra update from this
- * manager before the configurations can go in. Return an error if the
- * overlay needed an update from the manager.
- *
- * TODO: Instead of returning an error, try to do a dummy manager update
- * here to disable the overlay in hardware. Use the *GATED fields in
- * the DISPC_CONFIG registers to do a dummy update.
- */
- spin_lock_irqsave(&data_lock, flags);
-
- if (ovl_manual_update(ovl) && op->extra_info_dirty) {
- spin_unlock_irqrestore(&data_lock, flags);
- DSSERR("need an update to change the manager\n");
- r = -EINVAL;
- goto err;
- }
-
- ovl->manager = NULL;
- list_del(&ovl->list);
-
- spin_unlock_irqrestore(&data_lock, flags);
-
- mutex_unlock(&apply_lock);
-
- return 0;
-err:
- mutex_unlock(&apply_lock);
- return r;
-}
-
-static bool dss_ovl_is_enabled(struct omap_overlay *ovl)
-{
- struct ovl_priv_data *op = get_ovl_priv(ovl);
- unsigned long flags;
- bool e;
-
- spin_lock_irqsave(&data_lock, flags);
-
- e = op->enabled;
-
- spin_unlock_irqrestore(&data_lock, flags);
-
- return e;
-}
-
-static int dss_ovl_enable(struct omap_overlay *ovl)
-{
- struct ovl_priv_data *op = get_ovl_priv(ovl);
- unsigned long flags;
- int r;
-
- mutex_lock(&apply_lock);
-
- if (op->enabled) {
- r = 0;
- goto err1;
- }
-
- if (ovl->manager == NULL || ovl->manager->output == NULL) {
- r = -EINVAL;
- goto err1;
- }
-
- spin_lock_irqsave(&data_lock, flags);
-
- op->enabling = true;
-
- r = dss_check_settings(ovl->manager);
- if (r) {
- DSSERR("failed to enable overlay %d: check_settings failed\n",
- ovl->id);
- goto err2;
- }
-
- dss_setup_fifos();
-
- op->enabling = false;
- dss_apply_ovl_enable(ovl, true);
-
- dss_write_regs();
- dss_set_go_bits();
-
- spin_unlock_irqrestore(&data_lock, flags);
-
- mutex_unlock(&apply_lock);
-
- return 0;
-err2:
- op->enabling = false;
- spin_unlock_irqrestore(&data_lock, flags);
-err1:
- mutex_unlock(&apply_lock);
- return r;
-}
-
-static int dss_ovl_disable(struct omap_overlay *ovl)
-{
- struct ovl_priv_data *op = get_ovl_priv(ovl);
- unsigned long flags;
- int r;
-
- mutex_lock(&apply_lock);
-
- if (!op->enabled) {
- r = 0;
- goto err;
- }
-
- if (ovl->manager == NULL || ovl->manager->output == NULL) {
- r = -EINVAL;
- goto err;
- }
-
- spin_lock_irqsave(&data_lock, flags);
-
- dss_apply_ovl_enable(ovl, false);
- dss_write_regs();
- dss_set_go_bits();
-
- spin_unlock_irqrestore(&data_lock, flags);
-
- mutex_unlock(&apply_lock);
-
- return 0;
-
-err:
- mutex_unlock(&apply_lock);
- return r;
-}
-
-static int dss_mgr_register_framedone_handler_compat(struct omap_overlay_manager *mgr,
- void (*handler)(void *), void *data)
-{
- struct mgr_priv_data *mp = get_mgr_priv(mgr);
-
- if (mp->framedone_handler)
- return -EBUSY;
-
- mp->framedone_handler = handler;
- mp->framedone_handler_data = data;
-
- return 0;
-}
-
-static void dss_mgr_unregister_framedone_handler_compat(struct omap_overlay_manager *mgr,
- void (*handler)(void *), void *data)
-{
- struct mgr_priv_data *mp = get_mgr_priv(mgr);
-
- WARN_ON(mp->framedone_handler != handler ||
- mp->framedone_handler_data != data);
-
- mp->framedone_handler = NULL;
- mp->framedone_handler_data = NULL;
-}
-
-static const struct dss_mgr_ops apply_mgr_ops = {
- .connect = dss_mgr_connect_compat,
- .disconnect = dss_mgr_disconnect_compat,
- .start_update = dss_mgr_start_update_compat,
- .enable = dss_mgr_enable_compat,
- .disable = dss_mgr_disable_compat,
- .set_timings = dss_mgr_set_timings_compat,
- .set_lcd_config = dss_mgr_set_lcd_config_compat,
- .register_framedone_handler = dss_mgr_register_framedone_handler_compat,
- .unregister_framedone_handler = dss_mgr_unregister_framedone_handler_compat,
-};
-
-static int compat_refcnt;
-static DEFINE_MUTEX(compat_init_lock);
-
-int omapdss_compat_init(void)
-{
- struct platform_device *pdev = dss_get_core_pdev();
- int i, r;
-
- mutex_lock(&compat_init_lock);
-
- if (compat_refcnt++ > 0)
- goto out;
-
- apply_init_priv();
-
- dss_init_overlay_managers_sysfs(pdev);
- dss_init_overlays(pdev);
-
- for (i = 0; i < omap_dss_get_num_overlay_managers(); i++) {
- struct omap_overlay_manager *mgr;
-
- mgr = omap_dss_get_overlay_manager(i);
-
- mgr->set_output = &dss_mgr_set_output;
- mgr->unset_output = &dss_mgr_unset_output;
- mgr->apply = &omap_dss_mgr_apply;
- mgr->set_manager_info = &dss_mgr_set_info;
- mgr->get_manager_info = &dss_mgr_get_info;
- mgr->wait_for_go = &dss_mgr_wait_for_go;
- mgr->wait_for_vsync = &dss_mgr_wait_for_vsync;
- mgr->get_device = &dss_mgr_get_device;
- }
-
- for (i = 0; i < omap_dss_get_num_overlays(); i++) {
- struct omap_overlay *ovl = omap_dss_get_overlay(i);
-
- ovl->is_enabled = &dss_ovl_is_enabled;
- ovl->enable = &dss_ovl_enable;
- ovl->disable = &dss_ovl_disable;
- ovl->set_manager = &dss_ovl_set_manager;
- ovl->unset_manager = &dss_ovl_unset_manager;
- ovl->set_overlay_info = &dss_ovl_set_info;
- ovl->get_overlay_info = &dss_ovl_get_info;
- ovl->wait_for_go = &dss_mgr_wait_for_go_ovl;
- ovl->get_device = &dss_ovl_get_device;
- }
-
- r = dss_install_mgr_ops(&apply_mgr_ops);
- if (r)
- goto err_mgr_ops;
-
- r = display_init_sysfs(pdev);
- if (r)
- goto err_disp_sysfs;
-
- dispc_runtime_get();
-
- r = dss_dispc_initialize_irq();
- if (r)
- goto err_init_irq;
-
- dispc_runtime_put();
-
-out:
- mutex_unlock(&compat_init_lock);
-
- return 0;
-
-err_init_irq:
- dispc_runtime_put();
- display_uninit_sysfs(pdev);
-
-err_disp_sysfs:
- dss_uninstall_mgr_ops();
-
-err_mgr_ops:
- dss_uninit_overlay_managers_sysfs(pdev);
- dss_uninit_overlays(pdev);
-
- compat_refcnt--;
-
- mutex_unlock(&compat_init_lock);
-
- return r;
-}
-EXPORT_SYMBOL(omapdss_compat_init);
-
-void omapdss_compat_uninit(void)
-{
- struct platform_device *pdev = dss_get_core_pdev();
-
- mutex_lock(&compat_init_lock);
-
- if (--compat_refcnt > 0)
- goto out;
-
- dss_dispc_uninitialize_irq();
-
- display_uninit_sysfs(pdev);
-
- dss_uninstall_mgr_ops();
-
- dss_uninit_overlay_managers_sysfs(pdev);
- dss_uninit_overlays(pdev);
-out:
- mutex_unlock(&compat_init_lock);
-}
-EXPORT_SYMBOL(omapdss_compat_uninit);
diff --git a/drivers/gpu/drm/omapdrm/dss/core.c b/drivers/gpu/drm/omapdrm/dss/core.c
index 54eeb507f9b3..7e4e5bebabbe 100644
--- a/drivers/gpu/drm/omapdrm/dss/core.c
+++ b/drivers/gpu/drm/omapdrm/dss/core.c
@@ -165,32 +165,20 @@ int dss_debugfs_create_file(const char *name, void (*write)(struct seq_file *))
#endif /* CONFIG_OMAP2_DSS_DEBUGFS */
/* PLATFORM DEVICE */
-static int omap_dss_pm_notif(struct notifier_block *b, unsigned long v, void *d)
+
+static void dss_disable_all_devices(void)
{
- DSSDBG("pm notif %lu\n", v);
-
- switch (v) {
- case PM_SUSPEND_PREPARE:
- case PM_HIBERNATION_PREPARE:
- case PM_RESTORE_PREPARE:
- DSSDBG("suspending displays\n");
- return dss_suspend_all_devices();
-
- case PM_POST_SUSPEND:
- case PM_POST_HIBERNATION:
- case PM_POST_RESTORE:
- DSSDBG("resuming displays\n");
- return dss_resume_all_devices();
-
- default:
- return 0;
+ struct omap_dss_device *dssdev = NULL;
+
+ for_each_dss_dev(dssdev) {
+ if (!dssdev->driver)
+ continue;
+
+ if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE)
+ dssdev->driver->disable(dssdev);
}
}
-static struct notifier_block omap_dss_pm_notif_block = {
- .notifier_call = omap_dss_pm_notif,
-};
-
static int __init omap_dss_probe(struct platform_device *pdev)
{
struct omap_dss_board_info *pdata = pdev->dev.platform_data;
@@ -211,8 +199,6 @@ static int __init omap_dss_probe(struct platform_device *pdev)
else if (pdata->default_device)
core.default_display_name = pdata->default_device->name;
- register_pm_notifier(&omap_dss_pm_notif_block);
-
return 0;
err_debugfs:
@@ -222,8 +208,6 @@ err_debugfs:
static int omap_dss_remove(struct platform_device *pdev)
{
- unregister_pm_notifier(&omap_dss_pm_notif_block);
-
dss_uninitialize_debugfs();
return 0;
diff --git a/drivers/gpu/drm/omapdrm/dss/dispc-compat.c b/drivers/gpu/drm/omapdrm/dss/dispc-compat.c
deleted file mode 100644
index 0918b3bfe82a..000000000000
--- a/drivers/gpu/drm/omapdrm/dss/dispc-compat.c
+++ /dev/null
@@ -1,667 +0,0 @@
-/*
- * Copyright (C) 2012 Texas Instruments
- * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
- */
-
-#define DSS_SUBSYS_NAME "APPLY"
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/spinlock.h>
-#include <linux/jiffies.h>
-#include <linux/delay.h>
-#include <linux/interrupt.h>
-#include <linux/seq_file.h>
-
-#include <video/omapdss.h>
-
-#include "dss.h"
-#include "dss_features.h"
-#include "dispc-compat.h"
-
-#define DISPC_IRQ_MASK_ERROR (DISPC_IRQ_GFX_FIFO_UNDERFLOW | \
- DISPC_IRQ_OCP_ERR | \
- DISPC_IRQ_VID1_FIFO_UNDERFLOW | \
- DISPC_IRQ_VID2_FIFO_UNDERFLOW | \
- DISPC_IRQ_SYNC_LOST | \
- DISPC_IRQ_SYNC_LOST_DIGIT)
-
-#define DISPC_MAX_NR_ISRS 8
-
-struct omap_dispc_isr_data {
- omap_dispc_isr_t isr;
- void *arg;
- u32 mask;
-};
-
-struct dispc_irq_stats {
- unsigned long last_reset;
- unsigned irq_count;
- unsigned irqs[32];
-};
-
-static struct {
- spinlock_t irq_lock;
- u32 irq_error_mask;
- struct omap_dispc_isr_data registered_isr[DISPC_MAX_NR_ISRS];
- u32 error_irqs;
- struct work_struct error_work;
-
-#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
- spinlock_t irq_stats_lock;
- struct dispc_irq_stats irq_stats;
-#endif
-} dispc_compat;
-
-
-#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
-static void dispc_dump_irqs(struct seq_file *s)
-{
- unsigned long flags;
- struct dispc_irq_stats stats;
-
- spin_lock_irqsave(&dispc_compat.irq_stats_lock, flags);
-
- stats = dispc_compat.irq_stats;
- memset(&dispc_compat.irq_stats, 0, sizeof(dispc_compat.irq_stats));
- dispc_compat.irq_stats.last_reset = jiffies;
-
- spin_unlock_irqrestore(&dispc_compat.irq_stats_lock, flags);
-
- seq_printf(s, "period %u ms\n",
- jiffies_to_msecs(jiffies - stats.last_reset));
-
- seq_printf(s, "irqs %d\n", stats.irq_count);
-#define PIS(x) \
- seq_printf(s, "%-20s %10d\n", #x, stats.irqs[ffs(DISPC_IRQ_##x)-1]);
-
- PIS(FRAMEDONE);
- PIS(VSYNC);
- PIS(EVSYNC_EVEN);
- PIS(EVSYNC_ODD);
- PIS(ACBIAS_COUNT_STAT);
- PIS(PROG_LINE_NUM);
- PIS(GFX_FIFO_UNDERFLOW);
- PIS(GFX_END_WIN);
- PIS(PAL_GAMMA_MASK);
- PIS(OCP_ERR);
- PIS(VID1_FIFO_UNDERFLOW);
- PIS(VID1_END_WIN);
- PIS(VID2_FIFO_UNDERFLOW);
- PIS(VID2_END_WIN);
- if (dss_feat_get_num_ovls() > 3) {
- PIS(VID3_FIFO_UNDERFLOW);
- PIS(VID3_END_WIN);
- }
- PIS(SYNC_LOST);
- PIS(SYNC_LOST_DIGIT);
- PIS(WAKEUP);
- if (dss_has_feature(FEAT_MGR_LCD2)) {
- PIS(FRAMEDONE2);
- PIS(VSYNC2);
- PIS(ACBIAS_COUNT_STAT2);
- PIS(SYNC_LOST2);
- }
- if (dss_has_feature(FEAT_MGR_LCD3)) {
- PIS(FRAMEDONE3);
- PIS(VSYNC3);
- PIS(ACBIAS_COUNT_STAT3);
- PIS(SYNC_LOST3);
- }
-#undef PIS
-}
-#endif
-
-/* dispc.irq_lock has to be locked by the caller */
-static void _omap_dispc_set_irqs(void)
-{
- u32 mask;
- int i;
- struct omap_dispc_isr_data *isr_data;
-
- mask = dispc_compat.irq_error_mask;
-
- for (i = 0; i < DISPC_MAX_NR_ISRS; i++) {
- isr_data = &dispc_compat.registered_isr[i];
-
- if (isr_data->isr == NULL)
- continue;
-
- mask |= isr_data->mask;
- }
-
- dispc_write_irqenable(mask);
-}
-
-int omap_dispc_register_isr(omap_dispc_isr_t isr, void *arg, u32 mask)
-{
- int i;
- int ret;
- unsigned long flags;
- struct omap_dispc_isr_data *isr_data;
-
- if (isr == NULL)
- return -EINVAL;
-
- spin_lock_irqsave(&dispc_compat.irq_lock, flags);
-
- /* check for duplicate entry */
- for (i = 0; i < DISPC_MAX_NR_ISRS; i++) {
- isr_data = &dispc_compat.registered_isr[i];
- if (isr_data->isr == isr && isr_data->arg == arg &&
- isr_data->mask == mask) {
- ret = -EINVAL;
- goto err;
- }
- }
-
- isr_data = NULL;
- ret = -EBUSY;
-
- for (i = 0; i < DISPC_MAX_NR_ISRS; i++) {
- isr_data = &dispc_compat.registered_isr[i];
-
- if (isr_data->isr != NULL)
- continue;
-
- isr_data->isr = isr;
- isr_data->arg = arg;
- isr_data->mask = mask;
- ret = 0;
-
- break;
- }
-
- if (ret)
- goto err;
-
- _omap_dispc_set_irqs();
-
- spin_unlock_irqrestore(&dispc_compat.irq_lock, flags);
-
- return 0;
-err:
- spin_unlock_irqrestore(&dispc_compat.irq_lock, flags);
-
- return ret;
-}
-EXPORT_SYMBOL(omap_dispc_register_isr);
-
-int omap_dispc_unregister_isr(omap_dispc_isr_t isr, void *arg, u32 mask)
-{
- int i;
- unsigned long flags;
- int ret = -EINVAL;
- struct omap_dispc_isr_data *isr_data;
-
- spin_lock_irqsave(&dispc_compat.irq_lock, flags);
-
- for (i = 0; i < DISPC_MAX_NR_ISRS; i++) {
- isr_data = &dispc_compat.registered_isr[i];
- if (isr_data->isr != isr || isr_data->arg != arg ||
- isr_data->mask != mask)
- continue;
-
- /* found the correct isr */
-
- isr_data->isr = NULL;
- isr_data->arg = NULL;
- isr_data->mask = 0;
-
- ret = 0;
- break;
- }
-
- if (ret == 0)
- _omap_dispc_set_irqs();
-
- spin_unlock_irqrestore(&dispc_compat.irq_lock, flags);
-
- return ret;
-}
-EXPORT_SYMBOL(omap_dispc_unregister_isr);
-
-static void print_irq_status(u32 status)
-{
- if ((status & dispc_compat.irq_error_mask) == 0)
- return;
-
-#define PIS(x) (status & DISPC_IRQ_##x) ? (#x " ") : ""
-
- pr_debug("DISPC IRQ: 0x%x: %s%s%s%s%s%s%s%s%s\n",
- status,
- PIS(OCP_ERR),
- PIS(GFX_FIFO_UNDERFLOW),
- PIS(VID1_FIFO_UNDERFLOW),
- PIS(VID2_FIFO_UNDERFLOW),
- dss_feat_get_num_ovls() > 3 ? PIS(VID3_FIFO_UNDERFLOW) : "",
- PIS(SYNC_LOST),
- PIS(SYNC_LOST_DIGIT),
- dss_has_feature(FEAT_MGR_LCD2) ? PIS(SYNC_LOST2) : "",
- dss_has_feature(FEAT_MGR_LCD3) ? PIS(SYNC_LOST3) : "");
-#undef PIS
-}
-
-/* Called from dss.c. Note that we don't touch clocks here,
- * but we presume they are on because we got an IRQ. However,
- * an irq handler may turn the clocks off, so we may not have
- * clock later in the function. */
-static irqreturn_t omap_dispc_irq_handler(int irq, void *arg)
-{
- int i;
- u32 irqstatus, irqenable;
- u32 handledirqs = 0;
- u32 unhandled_errors;
- struct omap_dispc_isr_data *isr_data;
- struct omap_dispc_isr_data registered_isr[DISPC_MAX_NR_ISRS];
-
- spin_lock(&dispc_compat.irq_lock);
-
- irqstatus = dispc_read_irqstatus();
- irqenable = dispc_read_irqenable();
-
- /* IRQ is not for us */
- if (!(irqstatus & irqenable)) {
- spin_unlock(&dispc_compat.irq_lock);
- return IRQ_NONE;
- }
-
-#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
- spin_lock(&dispc_compat.irq_stats_lock);
- dispc_compat.irq_stats.irq_count++;
- dss_collect_irq_stats(irqstatus, dispc_compat.irq_stats.irqs);
- spin_unlock(&dispc_compat.irq_stats_lock);
-#endif
-
- print_irq_status(irqstatus);
-
- /* Ack the interrupt. Do it here before clocks are possibly turned
- * off */
- dispc_clear_irqstatus(irqstatus);
- /* flush posted write */
- dispc_read_irqstatus();
-
- /* make a copy and unlock, so that isrs can unregister
- * themselves */
- memcpy(registered_isr, dispc_compat.registered_isr,
- sizeof(registered_isr));
-
- spin_unlock(&dispc_compat.irq_lock);
-
- for (i = 0; i < DISPC_MAX_NR_ISRS; i++) {
- isr_data = &registered_isr[i];
-
- if (!isr_data->isr)
- continue;
-
- if (isr_data->mask & irqstatus) {
- isr_data->isr(isr_data->arg, irqstatus);
- handledirqs |= isr_data->mask;
- }
- }
-
- spin_lock(&dispc_compat.irq_lock);
-
- unhandled_errors = irqstatus & ~handledirqs & dispc_compat.irq_error_mask;
-
- if (unhandled_errors) {
- dispc_compat.error_irqs |= unhandled_errors;
-
- dispc_compat.irq_error_mask &= ~unhandled_errors;
- _omap_dispc_set_irqs();
-
- schedule_work(&dispc_compat.error_work);
- }
-
- spin_unlock(&dispc_compat.irq_lock);
-
- return IRQ_HANDLED;
-}
-
-static void dispc_error_worker(struct work_struct *work)
-{
- int i;
- u32 errors;
- unsigned long flags;
- static const unsigned fifo_underflow_bits[] = {
- DISPC_IRQ_GFX_FIFO_UNDERFLOW,
- DISPC_IRQ_VID1_FIFO_UNDERFLOW,
- DISPC_IRQ_VID2_FIFO_UNDERFLOW,
- DISPC_IRQ_VID3_FIFO_UNDERFLOW,
- };
-
- spin_lock_irqsave(&dispc_compat.irq_lock, flags);
- errors = dispc_compat.error_irqs;
- dispc_compat.error_irqs = 0;
- spin_unlock_irqrestore(&dispc_compat.irq_lock, flags);
-
- dispc_runtime_get();
-
- for (i = 0; i < omap_dss_get_num_overlays(); ++i) {
- struct omap_overlay *ovl;
- unsigned bit;
-
- ovl = omap_dss_get_overlay(i);
- bit = fifo_underflow_bits[i];
-
- if (bit & errors) {
- DSSERR("FIFO UNDERFLOW on %s, disabling the overlay\n",
- ovl->name);
- ovl->disable(ovl);
- msleep(50);
- }
- }
-
- for (i = 0; i < omap_dss_get_num_overlay_managers(); ++i) {
- struct omap_overlay_manager *mgr;
- unsigned bit;
-
- mgr = omap_dss_get_overlay_manager(i);
- bit = dispc_mgr_get_sync_lost_irq(i);
-
- if (bit & errors) {
- int j;
-
- DSSERR("SYNC_LOST on channel %s, restarting the output "
- "with video overlays disabled\n",
- mgr->name);
-
- dss_mgr_disable(mgr);
-
- for (j = 0; j < omap_dss_get_num_overlays(); ++j) {
- struct omap_overlay *ovl;
- ovl = omap_dss_get_overlay(j);
-
- if (ovl->id != OMAP_DSS_GFX &&
- ovl->manager == mgr)
- ovl->disable(ovl);
- }
-
- dss_mgr_enable(mgr);
- }
- }
-
- if (errors & DISPC_IRQ_OCP_ERR) {
- DSSERR("OCP_ERR\n");
- for (i = 0; i < omap_dss_get_num_overlay_managers(); ++i) {
- struct omap_overlay_manager *mgr;
-
- mgr = omap_dss_get_overlay_manager(i);
- dss_mgr_disable(mgr);
- }
- }
-
- spin_lock_irqsave(&dispc_compat.irq_lock, flags);
- dispc_compat.irq_error_mask |= errors;
- _omap_dispc_set_irqs();
- spin_unlock_irqrestore(&dispc_compat.irq_lock, flags);
-
- dispc_runtime_put();
-}
-
-int dss_dispc_initialize_irq(void)
-{
- int r;
-
-#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
- spin_lock_init(&dispc_compat.irq_stats_lock);
- dispc_compat.irq_stats.last_reset = jiffies;
- dss_debugfs_create_file("dispc_irq", dispc_dump_irqs);
-#endif
-
- spin_lock_init(&dispc_compat.irq_lock);
-
- memset(dispc_compat.registered_isr, 0,
- sizeof(dispc_compat.registered_isr));
-
- dispc_compat.irq_error_mask = DISPC_IRQ_MASK_ERROR;
- if (dss_has_feature(FEAT_MGR_LCD2))
- dispc_compat.irq_error_mask |= DISPC_IRQ_SYNC_LOST2;
- if (dss_has_feature(FEAT_MGR_LCD3))
- dispc_compat.irq_error_mask |= DISPC_IRQ_SYNC_LOST3;
- if (dss_feat_get_num_ovls() > 3)
- dispc_compat.irq_error_mask |= DISPC_IRQ_VID3_FIFO_UNDERFLOW;
-
- /*
- * there's SYNC_LOST_DIGIT waiting after enabling the DSS,
- * so clear it
- */
- dispc_clear_irqstatus(dispc_read_irqstatus());
-
- INIT_WORK(&dispc_compat.error_work, dispc_error_worker);
-
- _omap_dispc_set_irqs();
-
- r = dispc_request_irq(omap_dispc_irq_handler, &dispc_compat);
- if (r) {
- DSSERR("dispc_request_irq failed\n");
- return r;
- }
-
- return 0;
-}
-
-void dss_dispc_uninitialize_irq(void)
-{
- dispc_free_irq(&dispc_compat);
-}
-
-static void dispc_mgr_disable_isr(void *data, u32 mask)
-{
- struct completion *compl = data;
- complete(compl);
-}
-
-static void dispc_mgr_enable_lcd_out(enum omap_channel channel)
-{
- dispc_mgr_enable(channel, true);
-}
-
-static void dispc_mgr_disable_lcd_out(enum omap_channel channel)
-{
- DECLARE_COMPLETION_ONSTACK(framedone_compl);
- int r;
- u32 irq;
-
- if (!dispc_mgr_is_enabled(channel))
- return;
-
- /*
- * When we disable LCD output, we need to wait for FRAMEDONE to know
- * that DISPC has finished with the LCD output.
- */
-
- irq = dispc_mgr_get_framedone_irq(channel);
-
- r = omap_dispc_register_isr(dispc_mgr_disable_isr, &framedone_compl,
- irq);
- if (r)
- DSSERR("failed to register FRAMEDONE isr\n");
-
- dispc_mgr_enable(channel, false);
-
- /* if we couldn't register for framedone, just sleep and exit */
- if (r) {
- msleep(100);
- return;
- }
-
- if (!wait_for_completion_timeout(&framedone_compl,
- msecs_to_jiffies(100)))
- DSSERR("timeout waiting for FRAME DONE\n");
-
- r = omap_dispc_unregister_isr(dispc_mgr_disable_isr, &framedone_compl,
- irq);
- if (r)
- DSSERR("failed to unregister FRAMEDONE isr\n");
-}
-
-static void dispc_digit_out_enable_isr(void *data, u32 mask)
-{
- struct completion *compl = data;
-
- /* ignore any sync lost interrupts */
- if (mask & (DISPC_IRQ_EVSYNC_EVEN | DISPC_IRQ_EVSYNC_ODD))
- complete(compl);
-}
-
-static void dispc_mgr_enable_digit_out(void)
-{
- DECLARE_COMPLETION_ONSTACK(vsync_compl);
- int r;
- u32 irq_mask;
-
- if (dispc_mgr_is_enabled(OMAP_DSS_CHANNEL_DIGIT))
- return;
-
- /*
- * Digit output produces some sync lost interrupts during the first
- * frame when enabling. Those need to be ignored, so we register for the
- * sync lost irq to prevent the error handler from triggering.
- */
-
- irq_mask = dispc_mgr_get_vsync_irq(OMAP_DSS_CHANNEL_DIGIT) |
- dispc_mgr_get_sync_lost_irq(OMAP_DSS_CHANNEL_DIGIT);
-
- r = omap_dispc_register_isr(dispc_digit_out_enable_isr, &vsync_compl,
- irq_mask);
- if (r) {
- DSSERR("failed to register %x isr\n", irq_mask);
- return;
- }
-
- dispc_mgr_enable(OMAP_DSS_CHANNEL_DIGIT, true);
-
- /* wait for the first evsync */
- if (!wait_for_completion_timeout(&vsync_compl, msecs_to_jiffies(100)))
- DSSERR("timeout waiting for digit out to start\n");
-
- r = omap_dispc_unregister_isr(dispc_digit_out_enable_isr, &vsync_compl,
- irq_mask);
- if (r)
- DSSERR("failed to unregister %x isr\n", irq_mask);
-}
-
-static void dispc_mgr_disable_digit_out(void)
-{
- DECLARE_COMPLETION_ONSTACK(framedone_compl);
- int r, i;
- u32 irq_mask;
- int num_irqs;
-
- if (!dispc_mgr_is_enabled(OMAP_DSS_CHANNEL_DIGIT))
- return;
-
- /*
- * When we disable the digit output, we need to wait for FRAMEDONE to
- * know that DISPC has finished with the output.
- */
-
- irq_mask = dispc_mgr_get_framedone_irq(OMAP_DSS_CHANNEL_DIGIT);
- num_irqs = 1;
-
- if (!irq_mask) {
- /*
- * omap 2/3 don't have framedone irq for TV, so we need to use
- * vsyncs for this.
- */
-
- irq_mask = dispc_mgr_get_vsync_irq(OMAP_DSS_CHANNEL_DIGIT);
- /*
- * We need to wait for both even and odd vsyncs. Note that this
- * is not totally reliable, as we could get a vsync interrupt
- * before we disable the output, which leads to timeout in the
- * wait_for_completion.
- */
- num_irqs = 2;
- }
-
- r = omap_dispc_register_isr(dispc_mgr_disable_isr, &framedone_compl,
- irq_mask);
- if (r)
- DSSERR("failed to register %x isr\n", irq_mask);
-
- dispc_mgr_enable(OMAP_DSS_CHANNEL_DIGIT, false);
-
- /* if we couldn't register the irq, just sleep and exit */
- if (r) {
- msleep(100);
- return;
- }
-
- for (i = 0; i < num_irqs; ++i) {
- if (!wait_for_completion_timeout(&framedone_compl,
- msecs_to_jiffies(100)))
- DSSERR("timeout waiting for digit out to stop\n");
- }
-
- r = omap_dispc_unregister_isr(dispc_mgr_disable_isr, &framedone_compl,
- irq_mask);
- if (r)
- DSSERR("failed to unregister %x isr\n", irq_mask);
-}
-
-void dispc_mgr_enable_sync(enum omap_channel channel)
-{
- if (dss_mgr_is_lcd(channel))
- dispc_mgr_enable_lcd_out(channel);
- else if (channel == OMAP_DSS_CHANNEL_DIGIT)
- dispc_mgr_enable_digit_out();
- else
- WARN_ON(1);
-}
-
-void dispc_mgr_disable_sync(enum omap_channel channel)
-{
- if (dss_mgr_is_lcd(channel))
- dispc_mgr_disable_lcd_out(channel);
- else if (channel == OMAP_DSS_CHANNEL_DIGIT)
- dispc_mgr_disable_digit_out();
- else
- WARN_ON(1);
-}
-
-static inline void dispc_irq_wait_handler(void *data, u32 mask)
-{
- complete((struct completion *)data);
-}
-
-int omap_dispc_wait_for_irq_interruptible_timeout(u32 irqmask,
- unsigned long timeout)
-{
-
- int r;
- DECLARE_COMPLETION_ONSTACK(completion);
-
- r = omap_dispc_register_isr(dispc_irq_wait_handler, &completion,
- irqmask);
-
- if (r)
- return r;
-
- timeout = wait_for_completion_interruptible_timeout(&completion,
- timeout);
-
- omap_dispc_unregister_isr(dispc_irq_wait_handler, &completion, irqmask);
-
- if (timeout == 0)
- return -ETIMEDOUT;
-
- if (timeout == -ERESTARTSYS)
- return -ERESTARTSYS;
-
- return 0;
-}
diff --git a/drivers/gpu/drm/omapdrm/dss/dispc.c b/drivers/gpu/drm/omapdrm/dss/dispc.c
index 6b50476ec669..f83608b69e68 100644
--- a/drivers/gpu/drm/omapdrm/dss/dispc.c
+++ b/drivers/gpu/drm/omapdrm/dss/dispc.c
@@ -104,6 +104,15 @@ struct dispc_features {
bool supports_sync_align:1;
bool has_writeback:1;
+
+ bool supports_double_pixel:1;
+
+ /*
+ * Field order for VENC is different than HDMI. We should handle this in
+ * some intelligent manner, but as the SoCs have either HDMI or VENC,
+ * never both, we can just use this flag for now.
+ */
+ bool reverse_ilace_field_order:1;
};
#define DISPC_MAX_NR_FIFOS 5
@@ -2552,47 +2561,6 @@ static int dispc_ovl_calc_scaling(unsigned long pclk, unsigned long lclk,
return 0;
}
-int dispc_ovl_check(enum omap_plane plane, enum omap_channel channel,
- const struct omap_overlay_info *oi,
- const struct omap_video_timings *timings,
- int *x_predecim, int *y_predecim)
-{
- enum omap_overlay_caps caps = dss_feat_get_overlay_caps(plane);
- bool five_taps = true;
- bool fieldmode = false;
- u16 in_height = oi->height;
- u16 in_width = oi->width;
- bool ilace = timings->interlace;
- u16 out_width, out_height;
- int pos_x = oi->pos_x;
- unsigned long pclk = dispc_mgr_pclk_rate(channel);
- unsigned long lclk = dispc_mgr_lclk_rate(channel);
-
- out_width = oi->out_width == 0 ? oi->width : oi->out_width;
- out_height = oi->out_height == 0 ? oi->height : oi->out_height;
-
- if (ilace && oi->height == out_height)
- fieldmode = true;
-
- if (ilace) {
- if (fieldmode)
- in_height /= 2;
- out_height /= 2;
-
- DSSDBG("adjusting for ilace: height %d, out_height %d\n",
- in_height, out_height);
- }
-
- if (!dss_feat_color_mode_supported(plane, oi->color_mode))
- return -EINVAL;
-
- return dispc_ovl_calc_scaling(pclk, lclk, caps, timings, in_width,
- in_height, out_width, out_height, oi->color_mode,
- &five_taps, x_predecim, y_predecim, pos_x,
- oi->rotation_type, false);
-}
-EXPORT_SYMBOL(dispc_ovl_check);
-
static int dispc_ovl_setup_common(enum omap_plane plane,
enum omap_overlay_caps caps, u32 paddr, u32 p_uv_addr,
u16 screen_width, int pos_x, int pos_y, u16 width, u16 height,
@@ -2747,6 +2715,9 @@ static int dispc_ovl_setup_common(enum omap_plane plane,
dispc_ovl_configure_burst_type(plane, rotation_type);
+ if (dispc.feat->reverse_ilace_field_order)
+ swap(offset0, offset1);
+
dispc_ovl_set_ba0(plane, paddr + offset0);
dispc_ovl_set_ba1(plane, paddr + offset1);
@@ -2898,6 +2869,12 @@ bool dispc_ovl_enabled(enum omap_plane plane)
}
EXPORT_SYMBOL(dispc_ovl_enabled);
+enum omap_dss_output_id dispc_mgr_get_supported_outputs(enum omap_channel channel)
+{
+ return dss_feat_get_supported_outputs(channel);
+}
+EXPORT_SYMBOL(dispc_mgr_get_supported_outputs);
+
void dispc_mgr_enable(enum omap_channel channel, bool enable)
{
mgr_fld_write(channel, DISPC_MGR_FLD_ENABLE, enable);
@@ -3287,6 +3264,10 @@ void dispc_mgr_set_timings(enum omap_channel channel,
} else {
if (t.interlace)
t.y_res /= 2;
+
+ if (dispc.feat->supports_double_pixel)
+ REG_FLD_MOD(DISPC_CONTROL, t.double_pixel ? 1 : 0,
+ 19, 17);
}
dispc_mgr_set_size(channel, t.x_res, t.y_res);
@@ -3951,6 +3932,8 @@ static const struct dispc_features omap44xx_dispc_feats = {
.set_max_preload = true,
.supports_sync_align = true,
.has_writeback = true,
+ .supports_double_pixel = true,
+ .reverse_ilace_field_order = true,
};
static const struct dispc_features omap54xx_dispc_feats = {
@@ -3974,6 +3957,8 @@ static const struct dispc_features omap54xx_dispc_feats = {
.set_max_preload = true,
.supports_sync_align = true,
.has_writeback = true,
+ .supports_double_pixel = true,
+ .reverse_ilace_field_order = true,
};
static int dispc_init_features(struct platform_device *pdev)
@@ -4129,8 +4114,6 @@ static int dispc_bind(struct device *dev, struct device *master, void *data)
dispc_runtime_put();
- dss_init_overlay_managers();
-
dss_debugfs_create_file("dispc", dispc_dump_regs);
return 0;
@@ -4144,8 +4127,6 @@ static void dispc_unbind(struct device *dev, struct device *master,
void *data)
{
pm_runtime_disable(dev);
-
- dss_uninit_overlay_managers();
}
static const struct component_ops dispc_component_ops = {
diff --git a/drivers/gpu/drm/omapdrm/dss/display-sysfs.c b/drivers/gpu/drm/omapdrm/dss/display-sysfs.c
deleted file mode 100644
index 6ad0991f8259..000000000000
--- a/drivers/gpu/drm/omapdrm/dss/display-sysfs.c
+++ /dev/null
@@ -1,356 +0,0 @@
-/*
- * Copyright (C) 2009 Nokia Corporation
- * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
- *
- * Some code and ideas taken from drivers/video/omap/ driver
- * by Imre Deak.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
- */
-
-#define DSS_SUBSYS_NAME "DISPLAY"
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/sysfs.h>
-
-#include <video/omapdss.h>
-#include "dss.h"
-
-static ssize_t display_name_show(struct omap_dss_device *dssdev, char *buf)
-{
- return snprintf(buf, PAGE_SIZE, "%s\n",
- dssdev->name ?
- dssdev->name : "");
-}
-
-static ssize_t display_enabled_show(struct omap_dss_device *dssdev, char *buf)
-{
- return snprintf(buf, PAGE_SIZE, "%d\n",
- omapdss_device_is_enabled(dssdev));
-}
-
-static ssize_t display_enabled_store(struct omap_dss_device *dssdev,
- const char *buf, size_t size)
-{
- int r;
- bool enable;
-
- r = strtobool(buf, &enable);
- if (r)
- return r;
-
- if (enable == omapdss_device_is_enabled(dssdev))
- return size;
-
- if (omapdss_device_is_connected(dssdev) == false)
- return -ENODEV;
-
- if (enable) {
- r = dssdev->driver->enable(dssdev);
- if (r)
- return r;
- } else {
- dssdev->driver->disable(dssdev);
- }
-
- return size;
-}
-
-static ssize_t display_tear_show(struct omap_dss_device *dssdev, char *buf)
-{
- return snprintf(buf, PAGE_SIZE, "%d\n",
- dssdev->driver->get_te ?
- dssdev->driver->get_te(dssdev) : 0);
-}
-
-static ssize_t display_tear_store(struct omap_dss_device *dssdev,
- const char *buf, size_t size)
-{
- int r;
- bool te;
-
- if (!dssdev->driver->enable_te || !dssdev->driver->get_te)
- return -ENOENT;
-
- r = strtobool(buf, &te);
- if (r)
- return r;
-
- r = dssdev->driver->enable_te(dssdev, te);
- if (r)
- return r;
-
- return size;
-}
-
-static ssize_t display_timings_show(struct omap_dss_device *dssdev, char *buf)
-{
- struct omap_video_timings t;
-
- if (!dssdev->driver->get_timings)
- return -ENOENT;
-
- dssdev->driver->get_timings(dssdev, &t);
-
- return snprintf(buf, PAGE_SIZE, "%u,%u/%u/%u/%u,%u/%u/%u/%u\n",
- t.pixelclock,
- t.x_res, t.hfp, t.hbp, t.hsw,
- t.y_res, t.vfp, t.vbp, t.vsw);
-}
-
-static ssize_t display_timings_store(struct omap_dss_device *dssdev,
- const char *buf, size_t size)
-{
- struct omap_video_timings t = dssdev->panel.timings;
- int r, found;
-
- if (!dssdev->driver->set_timings || !dssdev->driver->check_timings)
- return -ENOENT;
-
- found = 0;
-#ifdef CONFIG_OMAP2_DSS_VENC
- if (strncmp("pal", buf, 3) == 0) {
- t = omap_dss_pal_timings;
- found = 1;
- } else if (strncmp("ntsc", buf, 4) == 0) {
- t = omap_dss_ntsc_timings;
- found = 1;
- }
-#endif
- if (!found && sscanf(buf, "%u,%hu/%hu/%hu/%hu,%hu/%hu/%hu/%hu",
- &t.pixelclock,
- &t.x_res, &t.hfp, &t.hbp, &t.hsw,
- &t.y_res, &t.vfp, &t.vbp, &t.vsw) != 9)
- return -EINVAL;
-
- r = dssdev->driver->check_timings(dssdev, &t);
- if (r)
- return r;
-
- dssdev->driver->disable(dssdev);
- dssdev->driver->set_timings(dssdev, &t);
- r = dssdev->driver->enable(dssdev);
- if (r)
- return r;
-
- return size;
-}
-
-static ssize_t display_rotate_show(struct omap_dss_device *dssdev, char *buf)
-{
- int rotate;
- if (!dssdev->driver->get_rotate)
- return -ENOENT;
- rotate = dssdev->driver->get_rotate(dssdev);
- return snprintf(buf, PAGE_SIZE, "%u\n", rotate);
-}
-
-static ssize_t display_rotate_store(struct omap_dss_device *dssdev,
- const char *buf, size_t size)
-{
- int rot, r;
-
- if (!dssdev->driver->set_rotate || !dssdev->driver->get_rotate)
- return -ENOENT;
-
- r = kstrtoint(buf, 0, &rot);
- if (r)
- return r;
-
- r = dssdev->driver->set_rotate(dssdev, rot);
- if (r)
- return r;
-
- return size;
-}
-
-static ssize_t display_mirror_show(struct omap_dss_device *dssdev, char *buf)
-{
- int mirror;
- if (!dssdev->driver->get_mirror)
- return -ENOENT;
- mirror = dssdev->driver->get_mirror(dssdev);
- return snprintf(buf, PAGE_SIZE, "%u\n", mirror);
-}
-
-static ssize_t display_mirror_store(struct omap_dss_device *dssdev,
- const char *buf, size_t size)
-{
- int r;
- bool mirror;
-
- if (!dssdev->driver->set_mirror || !dssdev->driver->get_mirror)
- return -ENOENT;
-
- r = strtobool(buf, &mirror);
- if (r)
- return r;
-
- r = dssdev->driver->set_mirror(dssdev, mirror);
- if (r)
- return r;
-
- return size;
-}
-
-static ssize_t display_wss_show(struct omap_dss_device *dssdev, char *buf)
-{
- unsigned int wss;
-
- if (!dssdev->driver->get_wss)
- return -ENOENT;
-
- wss = dssdev->driver->get_wss(dssdev);
-
- return snprintf(buf, PAGE_SIZE, "0x%05x\n", wss);
-}
-
-static ssize_t display_wss_store(struct omap_dss_device *dssdev,
- const char *buf, size_t size)
-{
- u32 wss;
- int r;
-
- if (!dssdev->driver->get_wss || !dssdev->driver->set_wss)
- return -ENOENT;
-
- r = kstrtou32(buf, 0, &wss);
- if (r)
- return r;
-
- if (wss > 0xfffff)
- return -EINVAL;
-
- r = dssdev->driver->set_wss(dssdev, wss);
- if (r)
- return r;
-
- return size;
-}
-
-struct display_attribute {
- struct attribute attr;
- ssize_t (*show)(struct omap_dss_device *, char *);
- ssize_t (*store)(struct omap_dss_device *, const char *, size_t);
-};
-
-#define DISPLAY_ATTR(_name, _mode, _show, _store) \
- struct display_attribute display_attr_##_name = \
- __ATTR(_name, _mode, _show, _store)
-
-static DISPLAY_ATTR(name, S_IRUGO, display_name_show, NULL);
-static DISPLAY_ATTR(display_name, S_IRUGO, display_name_show, NULL);
-static DISPLAY_ATTR(enabled, S_IRUGO|S_IWUSR,
- display_enabled_show, display_enabled_store);
-static DISPLAY_ATTR(tear_elim, S_IRUGO|S_IWUSR,
- display_tear_show, display_tear_store);
-static DISPLAY_ATTR(timings, S_IRUGO|S_IWUSR,
- display_timings_show, display_timings_store);
-static DISPLAY_ATTR(rotate, S_IRUGO|S_IWUSR,
- display_rotate_show, display_rotate_store);
-static DISPLAY_ATTR(mirror, S_IRUGO|S_IWUSR,
- display_mirror_show, display_mirror_store);
-static DISPLAY_ATTR(wss, S_IRUGO|S_IWUSR,
- display_wss_show, display_wss_store);
-
-static struct attribute *display_sysfs_attrs[] = {
- &display_attr_name.attr,
- &display_attr_display_name.attr,
- &display_attr_enabled.attr,
- &display_attr_tear_elim.attr,
- &display_attr_timings.attr,
- &display_attr_rotate.attr,
- &display_attr_mirror.attr,
- &display_attr_wss.attr,
- NULL
-};
-
-static ssize_t display_attr_show(struct kobject *kobj, struct attribute *attr,
- char *buf)
-{
- struct omap_dss_device *dssdev;
- struct display_attribute *display_attr;
-
- dssdev = container_of(kobj, struct omap_dss_device, kobj);
- display_attr = container_of(attr, struct display_attribute, attr);
-
- if (!display_attr->show)
- return -ENOENT;
-
- return display_attr->show(dssdev, buf);
-}
-
-static ssize_t display_attr_store(struct kobject *kobj, struct attribute *attr,
- const char *buf, size_t size)
-{
- struct omap_dss_device *dssdev;
- struct display_attribute *display_attr;
-
- dssdev = container_of(kobj, struct omap_dss_device, kobj);
- display_attr = container_of(attr, struct display_attribute, attr);
-
- if (!display_attr->store)
- return -ENOENT;
-
- return display_attr->store(dssdev, buf, size);
-}
-
-static const struct sysfs_ops display_sysfs_ops = {
- .show = display_attr_show,
- .store = display_attr_store,
-};
-
-static struct kobj_type display_ktype = {
- .sysfs_ops = &display_sysfs_ops,
- .default_attrs = display_sysfs_attrs,
-};
-
-int display_init_sysfs(struct platform_device *pdev)
-{
- struct omap_dss_device *dssdev = NULL;
- int r;
-
- for_each_dss_dev(dssdev) {
- r = kobject_init_and_add(&dssdev->kobj, &display_ktype,
- &pdev->dev.kobj, "%s", dssdev->alias);
- if (r) {
- DSSERR("failed to create sysfs files\n");
- omap_dss_put_device(dssdev);
- goto err;
- }
- }
-
- return 0;
-
-err:
- display_uninit_sysfs(pdev);
-
- return r;
-}
-
-void display_uninit_sysfs(struct platform_device *pdev)
-{
- struct omap_dss_device *dssdev = NULL;
-
- for_each_dss_dev(dssdev) {
- if (kobject_name(&dssdev->kobj) == NULL)
- continue;
-
- kobject_del(&dssdev->kobj);
- kobject_put(&dssdev->kobj);
-
- memset(&dssdev->kobj, 0, sizeof(dssdev->kobj));
- }
-}
diff --git a/drivers/gpu/drm/omapdrm/dss/display.c b/drivers/gpu/drm/omapdrm/dss/display.c
index ef5b9027985d..9f3dd09b0a6c 100644
--- a/drivers/gpu/drm/omapdrm/dss/display.c
+++ b/drivers/gpu/drm/omapdrm/dss/display.c
@@ -78,55 +78,6 @@ void omapdss_default_get_timings(struct omap_dss_device *dssdev,
}
EXPORT_SYMBOL(omapdss_default_get_timings);
-int dss_suspend_all_devices(void)
-{
- struct omap_dss_device *dssdev = NULL;
-
- for_each_dss_dev(dssdev) {
- if (!dssdev->driver)
- continue;
-
- if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE) {
- dssdev->driver->disable(dssdev);
- dssdev->activate_after_resume = true;
- } else {
- dssdev->activate_after_resume = false;
- }
- }
-
- return 0;
-}
-
-int dss_resume_all_devices(void)
-{
- struct omap_dss_device *dssdev = NULL;
-
- for_each_dss_dev(dssdev) {
- if (!dssdev->driver)
- continue;
-
- if (dssdev->activate_after_resume) {
- dssdev->driver->enable(dssdev);
- dssdev->activate_after_resume = false;
- }
- }
-
- return 0;
-}
-
-void dss_disable_all_devices(void)
-{
- struct omap_dss_device *dssdev = NULL;
-
- for_each_dss_dev(dssdev) {
- if (!dssdev->driver)
- continue;
-
- if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE)
- dssdev->driver->disable(dssdev);
- }
-}
-
static LIST_HEAD(panel_list);
static DEFINE_MUTEX(panel_list_mutex);
static int disp_num_counter;
diff --git a/drivers/gpu/drm/omapdrm/dss/dpi.c b/drivers/gpu/drm/omapdrm/dss/dpi.c
index 7953e6a52346..97ea60257884 100644
--- a/drivers/gpu/drm/omapdrm/dss/dpi.c
+++ b/drivers/gpu/drm/omapdrm/dss/dpi.c
@@ -334,7 +334,7 @@ static int dpi_set_dispc_clk(struct dpi_data *dpi, unsigned long pck_req,
static int dpi_set_mode(struct dpi_data *dpi)
{
struct omap_dss_device *out = &dpi->output;
- struct omap_overlay_manager *mgr = out->manager;
+ enum omap_channel channel = out->dispc_channel;
struct omap_video_timings *t = &dpi->timings;
int lck_div = 0, pck_div = 0;
unsigned long fck = 0;
@@ -342,7 +342,7 @@ static int dpi_set_mode(struct dpi_data *dpi)
int r = 0;
if (dpi->pll)
- r = dpi_set_dsi_clk(dpi, mgr->id, t->pixelclock, &fck,
+ r = dpi_set_dsi_clk(dpi, channel, t->pixelclock, &fck,
&lck_div, &pck_div);
else
r = dpi_set_dispc_clk(dpi, t->pixelclock, &fck,
@@ -359,7 +359,7 @@ static int dpi_set_mode(struct dpi_data *dpi)
t->pixelclock = pck;
}
- dss_mgr_set_timings(mgr, t);
+ dss_mgr_set_timings(channel, t);
return 0;
}
@@ -367,7 +367,7 @@ static int dpi_set_mode(struct dpi_data *dpi)
static void dpi_config_lcd_manager(struct dpi_data *dpi)
{
struct omap_dss_device *out = &dpi->output;
- struct omap_overlay_manager *mgr = out->manager;
+ enum omap_channel channel = out->dispc_channel;
dpi->mgr_config.io_pad_mode = DSS_IO_PAD_MODE_BYPASS;
@@ -378,13 +378,14 @@ static void dpi_config_lcd_manager(struct dpi_data *dpi)
dpi->mgr_config.lcden_sig_polarity = 0;
- dss_mgr_set_lcd_config(mgr, &dpi->mgr_config);
+ dss_mgr_set_lcd_config(channel, &dpi->mgr_config);
}
static int dpi_display_enable(struct omap_dss_device *dssdev)
{
struct dpi_data *dpi = dpi_get_data_from_dssdev(dssdev);
struct omap_dss_device *out = &dpi->output;
+ enum omap_channel channel = out->dispc_channel;
int r;
mutex_lock(&dpi->lock);
@@ -395,7 +396,7 @@ static int dpi_display_enable(struct omap_dss_device *dssdev)
goto err_no_reg;
}
- if (out->manager == NULL) {
+ if (!out->dispc_channel_connected) {
DSSERR("failed to enable display: no output/manager\n");
r = -ENODEV;
goto err_no_out_mgr;
@@ -411,7 +412,7 @@ static int dpi_display_enable(struct omap_dss_device *dssdev)
if (r)
goto err_get_dispc;
- r = dss_dpi_select_source(out->port_num, out->manager->id);
+ r = dss_dpi_select_source(out->port_num, channel);
if (r)
goto err_src_sel;
@@ -429,7 +430,7 @@ static int dpi_display_enable(struct omap_dss_device *dssdev)
mdelay(2);
- r = dss_mgr_enable(out->manager);
+ r = dss_mgr_enable(channel);
if (r)
goto err_mgr_enable;
@@ -457,14 +458,14 @@ err_no_reg:
static void dpi_display_disable(struct omap_dss_device *dssdev)
{
struct dpi_data *dpi = dpi_get_data_from_dssdev(dssdev);
- struct omap_overlay_manager *mgr = dpi->output.manager;
+ enum omap_channel channel = dpi->output.dispc_channel;
mutex_lock(&dpi->lock);
- dss_mgr_disable(mgr);
+ dss_mgr_disable(channel);
if (dpi->pll) {
- dss_select_lcd_clk_source(mgr->id, OMAP_DSS_CLK_SRC_FCK);
+ dss_select_lcd_clk_source(channel, OMAP_DSS_CLK_SRC_FCK);
dss_pll_disable(dpi->pll);
}
@@ -506,14 +507,17 @@ static int dpi_check_timings(struct omap_dss_device *dssdev,
struct omap_video_timings *timings)
{
struct dpi_data *dpi = dpi_get_data_from_dssdev(dssdev);
- struct omap_overlay_manager *mgr = dpi->output.manager;
+ enum omap_channel channel = dpi->output.dispc_channel;
int lck_div, pck_div;
unsigned long fck;
unsigned long pck;
struct dpi_clk_calc_ctx ctx;
bool ok;
- if (mgr && !dispc_mgr_timings_ok(mgr->id, timings))
+ if (timings->x_res % 8 != 0)
+ return -EINVAL;
+
+ if (!dispc_mgr_timings_ok(channel, timings))
return -EINVAL;
if (timings->pixelclock == 0)
@@ -660,7 +664,7 @@ static int dpi_connect(struct omap_dss_device *dssdev,
struct omap_dss_device *dst)
{
struct dpi_data *dpi = dpi_get_data_from_dssdev(dssdev);
- struct omap_overlay_manager *mgr;
+ enum omap_channel channel = dpi->output.dispc_channel;
int r;
r = dpi_init_regulator(dpi);
@@ -669,11 +673,7 @@ static int dpi_connect(struct omap_dss_device *dssdev,
dpi_init_pll(dpi);
- mgr = omap_dss_get_overlay_manager(dssdev->dispc_channel);
- if (!mgr)
- return -ENODEV;
-
- r = dss_mgr_connect(mgr, dssdev);
+ r = dss_mgr_connect(channel, dssdev);
if (r)
return r;
@@ -681,7 +681,7 @@ static int dpi_connect(struct omap_dss_device *dssdev,
if (r) {
DSSERR("failed to connect output to new device: %s\n",
dst->name);
- dss_mgr_disconnect(mgr, dssdev);
+ dss_mgr_disconnect(channel, dssdev);
return r;
}
@@ -691,6 +691,9 @@ static int dpi_connect(struct omap_dss_device *dssdev,
static void dpi_disconnect(struct omap_dss_device *dssdev,
struct omap_dss_device *dst)
{
+ struct dpi_data *dpi = dpi_get_data_from_dssdev(dssdev);
+ enum omap_channel channel = dpi->output.dispc_channel;
+
WARN_ON(dst != dssdev->dst);
if (dst != dssdev->dst)
@@ -698,8 +701,7 @@ static void dpi_disconnect(struct omap_dss_device *dssdev,
omapdss_output_unset_device(dssdev);
- if (dssdev->manager)
- dss_mgr_disconnect(dssdev->manager, dssdev);
+ dss_mgr_disconnect(channel, dssdev);
}
static const struct omapdss_dpi_ops dpi_ops = {
diff --git a/drivers/gpu/drm/omapdrm/dss/dsi.c b/drivers/gpu/drm/omapdrm/dss/dsi.c
index 43be4b2a7b05..8730646a0cbb 100644
--- a/drivers/gpu/drm/omapdrm/dss/dsi.c
+++ b/drivers/gpu/drm/omapdrm/dss/dsi.c
@@ -214,9 +214,9 @@ struct dsi_reg { u16 module; u16 idx; };
typedef void (*omap_dsi_isr_t) (void *arg, u32 mask);
static int dsi_display_init_dispc(struct platform_device *dsidev,
- struct omap_overlay_manager *mgr);
+ enum omap_channel channel);
static void dsi_display_uninit_dispc(struct platform_device *dsidev,
- struct omap_overlay_manager *mgr);
+ enum omap_channel channel);
static int dsi_vc_send_null(struct omap_dss_device *dssdev, int channel);
@@ -3826,19 +3826,19 @@ static int dsi_enable_video_output(struct omap_dss_device *dssdev, int channel)
{
struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
- struct omap_overlay_manager *mgr = dsi->output.manager;
+ enum omap_channel dispc_channel = dssdev->dispc_channel;
int bpp = dsi_get_pixel_size(dsi->pix_fmt);
struct omap_dss_device *out = &dsi->output;
u8 data_type;
u16 word_count;
int r;
- if (out->manager == NULL) {
+ if (!out->dispc_channel_connected) {
DSSERR("failed to enable display: no output/manager\n");
return -ENODEV;
}
- r = dsi_display_init_dispc(dsidev, mgr);
+ r = dsi_display_init_dispc(dsidev, dispc_channel);
if (r)
goto err_init_dispc;
@@ -3876,7 +3876,7 @@ static int dsi_enable_video_output(struct omap_dss_device *dssdev, int channel)
dsi_if_enable(dsidev, true);
}
- r = dss_mgr_enable(mgr);
+ r = dss_mgr_enable(dispc_channel);
if (r)
goto err_mgr_enable;
@@ -3888,7 +3888,7 @@ err_mgr_enable:
dsi_vc_enable(dsidev, channel, false);
}
err_pix_fmt:
- dsi_display_uninit_dispc(dsidev, mgr);
+ dsi_display_uninit_dispc(dsidev, dispc_channel);
err_init_dispc:
return r;
}
@@ -3897,7 +3897,7 @@ static void dsi_disable_video_output(struct omap_dss_device *dssdev, int channel
{
struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
- struct omap_overlay_manager *mgr = dsi->output.manager;
+ enum omap_channel dispc_channel = dssdev->dispc_channel;
if (dsi->mode == OMAP_DSS_DSI_VIDEO_MODE) {
dsi_if_enable(dsidev, false);
@@ -3910,15 +3910,15 @@ static void dsi_disable_video_output(struct omap_dss_device *dssdev, int channel
dsi_if_enable(dsidev, true);
}
- dss_mgr_disable(mgr);
+ dss_mgr_disable(dispc_channel);
- dsi_display_uninit_dispc(dsidev, mgr);
+ dsi_display_uninit_dispc(dsidev, dispc_channel);
}
static void dsi_update_screen_dispc(struct platform_device *dsidev)
{
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
- struct omap_overlay_manager *mgr = dsi->output.manager;
+ enum omap_channel dispc_channel = dsi->output.dispc_channel;
unsigned bytespp;
unsigned bytespl;
unsigned bytespf;
@@ -3980,9 +3980,9 @@ static void dsi_update_screen_dispc(struct platform_device *dsidev)
msecs_to_jiffies(250));
BUG_ON(r == 0);
- dss_mgr_set_timings(mgr, &dsi->timings);
+ dss_mgr_set_timings(dispc_channel, &dsi->timings);
- dss_mgr_start_update(mgr);
+ dss_mgr_start_update(dispc_channel);
if (dsi->te_enabled) {
/* disable LP_RX_TO, so that we can receive TE. Time to wait
@@ -4105,17 +4105,17 @@ static int dsi_configure_dispc_clocks(struct platform_device *dsidev)
}
static int dsi_display_init_dispc(struct platform_device *dsidev,
- struct omap_overlay_manager *mgr)
+ enum omap_channel channel)
{
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
int r;
- dss_select_lcd_clk_source(mgr->id, dsi->module_id == 0 ?
+ dss_select_lcd_clk_source(channel, dsi->module_id == 0 ?
OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC :
OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC);
if (dsi->mode == OMAP_DSS_DSI_CMD_MODE) {
- r = dss_mgr_register_framedone_handler(mgr,
+ r = dss_mgr_register_framedone_handler(channel,
dsi_framedone_irq_callback, dsidev);
if (r) {
DSSERR("can't register FRAMEDONE handler\n");
@@ -4140,7 +4140,7 @@ static int dsi_display_init_dispc(struct platform_device *dsidev,
dsi->timings.de_level = OMAPDSS_SIG_ACTIVE_HIGH;
dsi->timings.sync_pclk_edge = OMAPDSS_DRIVE_SIG_FALLING_EDGE;
- dss_mgr_set_timings(mgr, &dsi->timings);
+ dss_mgr_set_timings(channel, &dsi->timings);
r = dsi_configure_dispc_clocks(dsidev);
if (r)
@@ -4151,28 +4151,28 @@ static int dsi_display_init_dispc(struct platform_device *dsidev,
dsi_get_pixel_size(dsi->pix_fmt);
dsi->mgr_config.lcden_sig_polarity = 0;
- dss_mgr_set_lcd_config(mgr, &dsi->mgr_config);
+ dss_mgr_set_lcd_config(channel, &dsi->mgr_config);
return 0;
err1:
if (dsi->mode == OMAP_DSS_DSI_CMD_MODE)
- dss_mgr_unregister_framedone_handler(mgr,
+ dss_mgr_unregister_framedone_handler(channel,
dsi_framedone_irq_callback, dsidev);
err:
- dss_select_lcd_clk_source(mgr->id, OMAP_DSS_CLK_SRC_FCK);
+ dss_select_lcd_clk_source(channel, OMAP_DSS_CLK_SRC_FCK);
return r;
}
static void dsi_display_uninit_dispc(struct platform_device *dsidev,
- struct omap_overlay_manager *mgr)
+ enum omap_channel channel)
{
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
if (dsi->mode == OMAP_DSS_DSI_CMD_MODE)
- dss_mgr_unregister_framedone_handler(mgr,
+ dss_mgr_unregister_framedone_handler(channel,
dsi_framedone_irq_callback, dsidev);
- dss_select_lcd_clk_source(mgr->id, OMAP_DSS_CLK_SRC_FCK);
+ dss_select_lcd_clk_source(channel, OMAP_DSS_CLK_SRC_FCK);
}
static int dsi_configure_dsi_clocks(struct platform_device *dsidev)
@@ -4983,18 +4983,14 @@ static int dsi_connect(struct omap_dss_device *dssdev,
struct omap_dss_device *dst)
{
struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
- struct omap_overlay_manager *mgr;
+ enum omap_channel dispc_channel = dssdev->dispc_channel;
int r;
r = dsi_regulator_init(dsidev);
if (r)
return r;
- mgr = omap_dss_get_overlay_manager(dssdev->dispc_channel);
- if (!mgr)
- return -ENODEV;
-
- r = dss_mgr_connect(mgr, dssdev);
+ r = dss_mgr_connect(dispc_channel, dssdev);
if (r)
return r;
@@ -5002,7 +4998,7 @@ static int dsi_connect(struct omap_dss_device *dssdev,
if (r) {
DSSERR("failed to connect output to new device: %s\n",
dssdev->name);
- dss_mgr_disconnect(mgr, dssdev);
+ dss_mgr_disconnect(dispc_channel, dssdev);
return r;
}
@@ -5012,6 +5008,8 @@ static int dsi_connect(struct omap_dss_device *dssdev,
static void dsi_disconnect(struct omap_dss_device *dssdev,
struct omap_dss_device *dst)
{
+ enum omap_channel dispc_channel = dssdev->dispc_channel;
+
WARN_ON(dst != dssdev->dst);
if (dst != dssdev->dst)
@@ -5019,8 +5017,7 @@ static void dsi_disconnect(struct omap_dss_device *dssdev,
omapdss_output_unset_device(dssdev);
- if (dssdev->manager)
- dss_mgr_disconnect(dssdev->manager, dssdev);
+ dss_mgr_disconnect(dispc_channel, dssdev);
}
static const struct omapdss_dsi_ops dsi_ops = {
diff --git a/drivers/gpu/drm/omapdrm/dss/dss.h b/drivers/gpu/drm/omapdrm/dss/dss.h
index 9a6453235585..38e6ab50142d 100644
--- a/drivers/gpu/drm/omapdrm/dss/dss.h
+++ b/drivers/gpu/drm/omapdrm/dss/dss.h
@@ -25,6 +25,8 @@
#include <linux/interrupt.h>
+#include "omapdss.h"
+
#ifdef pr_fmt
#undef pr_fmt
#endif
@@ -205,29 +207,6 @@ void dss_dsi_disable_pads(int dsi_id, unsigned lane_mask);
int dss_set_min_bus_tput(struct device *dev, unsigned long tput);
int dss_debugfs_create_file(const char *name, void (*write)(struct seq_file *));
-/* display */
-int dss_suspend_all_devices(void);
-int dss_resume_all_devices(void);
-void dss_disable_all_devices(void);
-
-int display_init_sysfs(struct platform_device *pdev);
-void display_uninit_sysfs(struct platform_device *pdev);
-
-/* manager */
-int dss_init_overlay_managers(void);
-void dss_uninit_overlay_managers(void);
-int dss_init_overlay_managers_sysfs(struct platform_device *pdev);
-void dss_uninit_overlay_managers_sysfs(struct platform_device *pdev);
-int dss_mgr_simple_check(struct omap_overlay_manager *mgr,
- const struct omap_overlay_manager_info *info);
-int dss_mgr_check_timings(struct omap_overlay_manager *mgr,
- const struct omap_video_timings *timings);
-int dss_mgr_check(struct omap_overlay_manager *mgr,
- struct omap_overlay_manager_info *info,
- const struct omap_video_timings *mgr_timings,
- const struct dss_lcd_mgr_config *config,
- struct omap_overlay_info **overlay_infos);
-
static inline bool dss_mgr_is_lcd(enum omap_channel id)
{
if (id == OMAP_DSS_CHANNEL_LCD || id == OMAP_DSS_CHANNEL_LCD2 ||
@@ -237,24 +216,6 @@ static inline bool dss_mgr_is_lcd(enum omap_channel id)
return false;
}
-int dss_manager_kobj_init(struct omap_overlay_manager *mgr,
- struct platform_device *pdev);
-void dss_manager_kobj_uninit(struct omap_overlay_manager *mgr);
-
-/* overlay */
-void dss_init_overlays(struct platform_device *pdev);
-void dss_uninit_overlays(struct platform_device *pdev);
-void dss_overlay_setup_dispc_manager(struct omap_overlay_manager *mgr);
-int dss_ovl_simple_check(struct omap_overlay *ovl,
- const struct omap_overlay_info *info);
-int dss_ovl_check(struct omap_overlay *ovl, struct omap_overlay_info *info,
- const struct omap_video_timings *mgr_timings);
-bool dss_ovl_use_replication(struct dss_lcd_mgr_config config,
- enum omap_color_mode mode);
-int dss_overlay_kobj_init(struct omap_overlay *ovl,
- struct platform_device *pdev);
-void dss_overlay_kobj_uninit(struct omap_overlay *ovl);
-
/* DSS */
int dss_init_platform_driver(void) __init;
void dss_uninit_platform_driver(void);
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4.c b/drivers/gpu/drm/omapdrm/dss/hdmi4.c
index 7103c659a534..f892ae157ff3 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi4.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi4.c
@@ -165,9 +165,10 @@ static int hdmi_power_on_full(struct omap_dss_device *dssdev)
{
int r;
struct omap_video_timings *p;
- struct omap_overlay_manager *mgr = hdmi.output.manager;
+ enum omap_channel channel = dssdev->dispc_channel;
struct hdmi_wp_data *wp = &hdmi.wp;
struct dss_pll_clock_info hdmi_cinfo = { 0 };
+ unsigned pc;
r = hdmi_power_on_core(dssdev);
if (r)
@@ -181,7 +182,11 @@ static int hdmi_power_on_full(struct omap_dss_device *dssdev)
DSSDBG("hdmi_power_on x_res= %d y_res = %d\n", p->x_res, p->y_res);
- hdmi_pll_compute(&hdmi.pll, p->pixelclock, &hdmi_cinfo);
+ pc = p->pixelclock;
+ if (p->double_pixel)
+ pc *= 2;
+
+ hdmi_pll_compute(&hdmi.pll, pc, &hdmi_cinfo);
r = dss_pll_enable(&hdmi.pll.pll);
if (r) {
@@ -212,24 +217,24 @@ static int hdmi_power_on_full(struct omap_dss_device *dssdev)
dispc_enable_gamma_table(0);
/* tv size */
- dss_mgr_set_timings(mgr, p);
+ dss_mgr_set_timings(channel, p);
- r = hdmi_wp_video_start(&hdmi.wp);
+ r = dss_mgr_enable(channel);
if (r)
- goto err_vid_enable;
+ goto err_mgr_enable;
- r = dss_mgr_enable(mgr);
+ r = hdmi_wp_video_start(&hdmi.wp);
if (r)
- goto err_mgr_enable;
+ goto err_vid_enable;
hdmi_wp_set_irqenable(wp,
HDMI_IRQ_LINK_CONNECT | HDMI_IRQ_LINK_DISCONNECT);
return 0;
-err_mgr_enable:
- hdmi_wp_video_stop(&hdmi.wp);
err_vid_enable:
+ dss_mgr_disable(channel);
+err_mgr_enable:
hdmi_wp_set_phy_pwr(&hdmi.wp, HDMI_PHYPWRCMD_OFF);
err_phy_pwr:
err_phy_cfg:
@@ -242,14 +247,14 @@ err_pll_enable:
static void hdmi_power_off_full(struct omap_dss_device *dssdev)
{
- struct omap_overlay_manager *mgr = hdmi.output.manager;
+ enum omap_channel channel = dssdev->dispc_channel;
hdmi_wp_clear_irqenable(&hdmi.wp, 0xffffffff);
- dss_mgr_disable(mgr);
-
hdmi_wp_video_stop(&hdmi.wp);
+ dss_mgr_disable(channel);
+
hdmi_wp_set_phy_pwr(&hdmi.wp, HDMI_PHYPWRCMD_OFF);
dss_pll_disable(&hdmi.pll.pll);
@@ -260,9 +265,7 @@ static void hdmi_power_off_full(struct omap_dss_device *dssdev)
static int hdmi_display_check_timing(struct omap_dss_device *dssdev,
struct omap_video_timings *timings)
{
- struct omap_dss_device *out = &hdmi.output;
-
- if (!dispc_mgr_timings_ok(out->dispc_channel, timings))
+ if (!dispc_mgr_timings_ok(dssdev->dispc_channel, timings))
return -EINVAL;
return 0;
@@ -343,7 +346,7 @@ static int hdmi_display_enable(struct omap_dss_device *dssdev)
mutex_lock(&hdmi.lock);
- if (out->manager == NULL) {
+ if (!out->dispc_channel_connected) {
DSSERR("failed to enable display: no output/manager\n");
r = -ENODEV;
goto err0;
@@ -433,18 +436,14 @@ static void hdmi_core_disable(struct omap_dss_device *dssdev)
static int hdmi_connect(struct omap_dss_device *dssdev,
struct omap_dss_device *dst)
{
- struct omap_overlay_manager *mgr;
+ enum omap_channel channel = dssdev->dispc_channel;
int r;
r = hdmi_init_regulator();
if (r)
return r;
- mgr = omap_dss_get_overlay_manager(dssdev->dispc_channel);
- if (!mgr)
- return -ENODEV;
-
- r = dss_mgr_connect(mgr, dssdev);
+ r = dss_mgr_connect(channel, dssdev);
if (r)
return r;
@@ -452,7 +451,7 @@ static int hdmi_connect(struct omap_dss_device *dssdev,
if (r) {
DSSERR("failed to connect output to new device: %s\n",
dst->name);
- dss_mgr_disconnect(mgr, dssdev);
+ dss_mgr_disconnect(channel, dssdev);
return r;
}
@@ -462,6 +461,8 @@ static int hdmi_connect(struct omap_dss_device *dssdev,
static void hdmi_disconnect(struct omap_dss_device *dssdev,
struct omap_dss_device *dst)
{
+ enum omap_channel channel = dssdev->dispc_channel;
+
WARN_ON(dst != dssdev->dst);
if (dst != dssdev->dst)
@@ -469,8 +470,7 @@ static void hdmi_disconnect(struct omap_dss_device *dssdev,
omapdss_output_unset_device(dssdev);
- if (dssdev->manager)
- dss_mgr_disconnect(dssdev->manager, dssdev);
+ dss_mgr_disconnect(channel, dssdev);
}
static int hdmi_read_edid(struct omap_dss_device *dssdev,
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi5.c b/drivers/gpu/drm/omapdrm/dss/hdmi5.c
index a955a2c4c061..a43f7b10e113 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi5.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi5.c
@@ -182,8 +182,9 @@ static int hdmi_power_on_full(struct omap_dss_device *dssdev)
{
int r;
struct omap_video_timings *p;
- struct omap_overlay_manager *mgr = hdmi.output.manager;
+ enum omap_channel channel = dssdev->dispc_channel;
struct dss_pll_clock_info hdmi_cinfo = { 0 };
+ unsigned pc;
r = hdmi_power_on_core(dssdev);
if (r)
@@ -193,7 +194,11 @@ static int hdmi_power_on_full(struct omap_dss_device *dssdev)
DSSDBG("hdmi_power_on x_res= %d y_res = %d\n", p->x_res, p->y_res);
- hdmi_pll_compute(&hdmi.pll, p->pixelclock, &hdmi_cinfo);
+ pc = p->pixelclock;
+ if (p->double_pixel)
+ pc *= 2;
+
+ hdmi_pll_compute(&hdmi.pll, pc, &hdmi_cinfo);
/* disable and clear irqs */
hdmi_wp_clear_irqenable(&hdmi.wp, 0xffffffff);
@@ -229,24 +234,24 @@ static int hdmi_power_on_full(struct omap_dss_device *dssdev)
dispc_enable_gamma_table(0);
/* tv size */
- dss_mgr_set_timings(mgr, p);
+ dss_mgr_set_timings(channel, p);
- r = hdmi_wp_video_start(&hdmi.wp);
+ r = dss_mgr_enable(channel);
if (r)
- goto err_vid_enable;
+ goto err_mgr_enable;
- r = dss_mgr_enable(mgr);
+ r = hdmi_wp_video_start(&hdmi.wp);
if (r)
- goto err_mgr_enable;
+ goto err_vid_enable;
hdmi_wp_set_irqenable(&hdmi.wp,
HDMI_IRQ_LINK_CONNECT | HDMI_IRQ_LINK_DISCONNECT);
return 0;
-err_mgr_enable:
- hdmi_wp_video_stop(&hdmi.wp);
err_vid_enable:
+ dss_mgr_disable(channel);
+err_mgr_enable:
hdmi_wp_set_phy_pwr(&hdmi.wp, HDMI_PHYPWRCMD_OFF);
err_phy_pwr:
err_phy_cfg:
@@ -259,14 +264,14 @@ err_pll_enable:
static void hdmi_power_off_full(struct omap_dss_device *dssdev)
{
- struct omap_overlay_manager *mgr = hdmi.output.manager;
+ enum omap_channel channel = dssdev->dispc_channel;
hdmi_wp_clear_irqenable(&hdmi.wp, 0xffffffff);
- dss_mgr_disable(mgr);
-
hdmi_wp_video_stop(&hdmi.wp);
+ dss_mgr_disable(channel);
+
hdmi_wp_set_phy_pwr(&hdmi.wp, HDMI_PHYPWRCMD_OFF);
dss_pll_disable(&hdmi.pll.pll);
@@ -277,13 +282,7 @@ static void hdmi_power_off_full(struct omap_dss_device *dssdev)
static int hdmi_display_check_timing(struct omap_dss_device *dssdev,
struct omap_video_timings *timings)
{
- struct omap_dss_device *out = &hdmi.output;
-
- /* TODO: proper interlace support */
- if (timings->interlace)
- return -EINVAL;
-
- if (!dispc_mgr_timings_ok(out->dispc_channel, timings))
+ if (!dispc_mgr_timings_ok(dssdev->dispc_channel, timings))
return -EINVAL;
return 0;
@@ -373,7 +372,7 @@ static int hdmi_display_enable(struct omap_dss_device *dssdev)
mutex_lock(&hdmi.lock);
- if (out->manager == NULL) {
+ if (!out->dispc_channel_connected) {
DSSERR("failed to enable display: no output/manager\n");
r = -ENODEV;
goto err0;
@@ -463,18 +462,14 @@ static void hdmi_core_disable(struct omap_dss_device *dssdev)
static int hdmi_connect(struct omap_dss_device *dssdev,
struct omap_dss_device *dst)
{
- struct omap_overlay_manager *mgr;
+ enum omap_channel channel = dssdev->dispc_channel;
int r;
r = hdmi_init_regulator();
if (r)
return r;
- mgr = omap_dss_get_overlay_manager(dssdev->dispc_channel);
- if (!mgr)
- return -ENODEV;
-
- r = dss_mgr_connect(mgr, dssdev);
+ r = dss_mgr_connect(channel, dssdev);
if (r)
return r;
@@ -482,7 +477,7 @@ static int hdmi_connect(struct omap_dss_device *dssdev,
if (r) {
DSSERR("failed to connect output to new device: %s\n",
dst->name);
- dss_mgr_disconnect(mgr, dssdev);
+ dss_mgr_disconnect(channel, dssdev);
return r;
}
@@ -492,6 +487,8 @@ static int hdmi_connect(struct omap_dss_device *dssdev,
static void hdmi_disconnect(struct omap_dss_device *dssdev,
struct omap_dss_device *dst)
{
+ enum omap_channel channel = dssdev->dispc_channel;
+
WARN_ON(dst != dssdev->dst);
if (dst != dssdev->dst)
@@ -499,8 +496,7 @@ static void hdmi_disconnect(struct omap_dss_device *dssdev,
omapdss_output_unset_device(dssdev);
- if (dssdev->manager)
- dss_mgr_disconnect(dssdev->manager, dssdev);
+ dss_mgr_disconnect(channel, dssdev);
}
static int hdmi_read_edid(struct omap_dss_device *dssdev,
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi5_core.c b/drivers/gpu/drm/omapdrm/dss/hdmi5_core.c
index 8ea531d2652c..6a397520cae5 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi5_core.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi5_core.c
@@ -292,25 +292,36 @@ static void hdmi_core_init(struct hdmi_core_vid_config *video_cfg,
{
DSSDBG("hdmi_core_init\n");
+ video_cfg->v_fc_config.timings = cfg->timings;
+
/* video core */
video_cfg->data_enable_pol = 1; /* It is always 1*/
- video_cfg->v_fc_config.timings.hsync_level = cfg->timings.hsync_level;
- video_cfg->v_fc_config.timings.x_res = cfg->timings.x_res;
- video_cfg->v_fc_config.timings.hsw = cfg->timings.hsw - 1;
- video_cfg->v_fc_config.timings.hbp = cfg->timings.hbp;
- video_cfg->v_fc_config.timings.hfp = cfg->timings.hfp;
video_cfg->hblank = cfg->timings.hfp +
- cfg->timings.hbp + cfg->timings.hsw - 1;
- video_cfg->v_fc_config.timings.vsync_level = cfg->timings.vsync_level;
- video_cfg->v_fc_config.timings.y_res = cfg->timings.y_res;
- video_cfg->v_fc_config.timings.vsw = cfg->timings.vsw;
- video_cfg->v_fc_config.timings.vfp = cfg->timings.vfp;
- video_cfg->v_fc_config.timings.vbp = cfg->timings.vbp;
- video_cfg->vblank_osc = 0; /* Always 0 - need to confirm */
+ cfg->timings.hbp + cfg->timings.hsw;
+ video_cfg->vblank_osc = 0;
video_cfg->vblank = cfg->timings.vsw +
cfg->timings.vfp + cfg->timings.vbp;
video_cfg->v_fc_config.hdmi_dvi_mode = cfg->hdmi_dvi_mode;
- video_cfg->v_fc_config.timings.interlace = cfg->timings.interlace;
+
+ if (cfg->timings.interlace) {
+ /* set vblank_osc if vblank is fractional */
+ if (video_cfg->vblank % 2 != 0)
+ video_cfg->vblank_osc = 1;
+
+ video_cfg->v_fc_config.timings.y_res /= 2;
+ video_cfg->vblank /= 2;
+ video_cfg->v_fc_config.timings.vfp /= 2;
+ video_cfg->v_fc_config.timings.vsw /= 2;
+ video_cfg->v_fc_config.timings.vbp /= 2;
+ }
+
+ if (cfg->timings.double_pixel) {
+ video_cfg->v_fc_config.timings.x_res *= 2;
+ video_cfg->hblank *= 2;
+ video_cfg->v_fc_config.timings.hfp *= 2;
+ video_cfg->v_fc_config.timings.hsw *= 2;
+ video_cfg->v_fc_config.timings.hbp *= 2;
+ }
}
/* DSS_HDMI_CORE_VIDEO_CONFIG */
@@ -377,6 +388,11 @@ static void hdmi_core_video_config(struct hdmi_core_data *core,
/* select DVI mode */
REG_FLD_MOD(base, HDMI_CORE_FC_INVIDCONF,
cfg->v_fc_config.hdmi_dvi_mode, 3, 3);
+
+ if (cfg->v_fc_config.timings.double_pixel)
+ REG_FLD_MOD(base, HDMI_CORE_FC_PRCONF, 2, 7, 4);
+ else
+ REG_FLD_MOD(base, HDMI_CORE_FC_PRCONF, 1, 7, 4);
}
static void hdmi_core_config_video_packetizer(struct hdmi_core_data *core)
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi_wp.c b/drivers/gpu/drm/omapdrm/dss/hdmi_wp.c
index 7c544bc56fb5..13442b9052d1 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi_wp.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi_wp.c
@@ -165,12 +165,24 @@ void hdmi_wp_video_config_timing(struct hdmi_wp_data *wp,
{
u32 timing_h = 0;
u32 timing_v = 0;
+ unsigned hsw_offset = 1;
DSSDBG("Enter hdmi_wp_video_config_timing\n");
+ /*
+ * On OMAP4 and OMAP5 ES1 the HSW field is programmed as is. On OMAP5
+ * ES2+ (including DRA7/AM5 SoCs) HSW field is programmed to hsw-1.
+ * However, we don't support OMAP5 ES1 at all, so we can just check for
+ * OMAP4 here.
+ */
+ if (omapdss_get_version() == OMAPDSS_VER_OMAP4430_ES1 ||
+ omapdss_get_version() == OMAPDSS_VER_OMAP4430_ES2 ||
+ omapdss_get_version() == OMAPDSS_VER_OMAP4)
+ hsw_offset = 0;
+
timing_h |= FLD_VAL(timings->hbp, 31, 20);
timing_h |= FLD_VAL(timings->hfp, 19, 8);
- timing_h |= FLD_VAL(timings->hsw, 7, 0);
+ timing_h |= FLD_VAL(timings->hsw - hsw_offset, 7, 0);
hdmi_write_reg(wp->base, HDMI_WP_VIDEO_TIMING_H, timing_h);
timing_v |= FLD_VAL(timings->vbp, 31, 20);
@@ -187,8 +199,6 @@ void hdmi_wp_init_vid_fmt_timings(struct hdmi_video_format *video_fmt,
video_fmt->packing_mode = HDMI_PACK_10b_RGB_YUV444;
video_fmt->y_res = param->timings.y_res;
video_fmt->x_res = param->timings.x_res;
- if (param->timings.interlace)
- video_fmt->y_res /= 2;
timings->hbp = param->timings.hbp;
timings->hfp = param->timings.hfp;
@@ -196,9 +206,25 @@ void hdmi_wp_init_vid_fmt_timings(struct hdmi_video_format *video_fmt,
timings->vbp = param->timings.vbp;
timings->vfp = param->timings.vfp;
timings->vsw = param->timings.vsw;
+
timings->vsync_level = param->timings.vsync_level;
timings->hsync_level = param->timings.hsync_level;
timings->interlace = param->timings.interlace;
+ timings->double_pixel = param->timings.double_pixel;
+
+ if (param->timings.interlace) {
+ video_fmt->y_res /= 2;
+ timings->vbp /= 2;
+ timings->vfp /= 2;
+ timings->vsw /= 2;
+ }
+
+ if (param->timings.double_pixel) {
+ video_fmt->x_res *= 2;
+ timings->hfp *= 2;
+ timings->hsw *= 2;
+ timings->hbp *= 2;
+ }
}
void hdmi_wp_audio_config_format(struct hdmi_wp_data *wp,
diff --git a/drivers/gpu/drm/omapdrm/dss/manager-sysfs.c b/drivers/gpu/drm/omapdrm/dss/manager-sysfs.c
deleted file mode 100644
index a7414fb12830..000000000000
--- a/drivers/gpu/drm/omapdrm/dss/manager-sysfs.c
+++ /dev/null
@@ -1,531 +0,0 @@
-/*
- * Copyright (C) 2009 Nokia Corporation
- * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
- *
- * Some code and ideas taken from drivers/video/omap/ driver
- * by Imre Deak.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
- */
-
-#define DSS_SUBSYS_NAME "MANAGER"
-
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/jiffies.h>
-
-#include <video/omapdss.h>
-
-#include "dss.h"
-#include "dss_features.h"
-
-static ssize_t manager_name_show(struct omap_overlay_manager *mgr, char *buf)
-{
- return snprintf(buf, PAGE_SIZE, "%s\n", mgr->name);
-}
-
-static ssize_t manager_display_show(struct omap_overlay_manager *mgr, char *buf)
-{
- struct omap_dss_device *dssdev = mgr->get_device(mgr);
-
- return snprintf(buf, PAGE_SIZE, "%s\n", dssdev ?
- dssdev->name : "<none>");
-}
-
-static int manager_display_match(struct omap_dss_device *dssdev, void *data)
-{
- const char *str = data;
-
- return sysfs_streq(dssdev->name, str);
-}
-
-static ssize_t manager_display_store(struct omap_overlay_manager *mgr,
- const char *buf, size_t size)
-{
- int r = 0;
- size_t len = size;
- struct omap_dss_device *dssdev = NULL;
- struct omap_dss_device *old_dssdev;
-
- if (buf[size-1] == '\n')
- --len;
-
- if (len > 0)
- dssdev = omap_dss_find_device((void *)buf,
- manager_display_match);
-
- if (len > 0 && dssdev == NULL)
- return -EINVAL;
-
- if (dssdev) {
- DSSDBG("display %s found\n", dssdev->name);
-
- if (omapdss_device_is_connected(dssdev)) {
- DSSERR("new display is already connected\n");
- r = -EINVAL;
- goto put_device;
- }
-
- if (omapdss_device_is_enabled(dssdev)) {
- DSSERR("new display is not disabled\n");
- r = -EINVAL;
- goto put_device;
- }
- }
-
- old_dssdev = mgr->get_device(mgr);
- if (old_dssdev) {
- if (omapdss_device_is_enabled(old_dssdev)) {
- DSSERR("old display is not disabled\n");
- r = -EINVAL;
- goto put_device;
- }
-
- old_dssdev->driver->disconnect(old_dssdev);
- }
-
- if (dssdev) {
- r = dssdev->driver->connect(dssdev);
- if (r) {
- DSSERR("failed to connect new device\n");
- goto put_device;
- }
-
- old_dssdev = mgr->get_device(mgr);
- if (old_dssdev != dssdev) {
- DSSERR("failed to connect device to this manager\n");
- dssdev->driver->disconnect(dssdev);
- goto put_device;
- }
-
- r = mgr->apply(mgr);
- if (r) {
- DSSERR("failed to apply dispc config\n");
- goto put_device;
- }
- }
-
-put_device:
- if (dssdev)
- omap_dss_put_device(dssdev);
-
- return r ? r : size;
-}
-
-static ssize_t manager_default_color_show(struct omap_overlay_manager *mgr,
- char *buf)
-{
- struct omap_overlay_manager_info info;
-
- mgr->get_manager_info(mgr, &info);
-
- return snprintf(buf, PAGE_SIZE, "%#x\n", info.default_color);
-}
-
-static ssize_t manager_default_color_store(struct omap_overlay_manager *mgr,
- const char *buf, size_t size)
-{
- struct omap_overlay_manager_info info;
- u32 color;
- int r;
-
- r = kstrtouint(buf, 0, &color);
- if (r)
- return r;
-
- mgr->get_manager_info(mgr, &info);
-
- info.default_color = color;
-
- r = mgr->set_manager_info(mgr, &info);
- if (r)
- return r;
-
- r = mgr->apply(mgr);
- if (r)
- return r;
-
- return size;
-}
-
-static const char *trans_key_type_str[] = {
- "gfx-destination",
- "video-source",
-};
-
-static ssize_t manager_trans_key_type_show(struct omap_overlay_manager *mgr,
- char *buf)
-{
- enum omap_dss_trans_key_type key_type;
- struct omap_overlay_manager_info info;
-
- mgr->get_manager_info(mgr, &info);
-
- key_type = info.trans_key_type;
- BUG_ON(key_type >= ARRAY_SIZE(trans_key_type_str));
-
- return snprintf(buf, PAGE_SIZE, "%s\n", trans_key_type_str[key_type]);
-}
-
-static ssize_t manager_trans_key_type_store(struct omap_overlay_manager *mgr,
- const char *buf, size_t size)
-{
- enum omap_dss_trans_key_type key_type;
- struct omap_overlay_manager_info info;
- int r;
-
- for (key_type = OMAP_DSS_COLOR_KEY_GFX_DST;
- key_type < ARRAY_SIZE(trans_key_type_str); key_type++) {
- if (sysfs_streq(buf, trans_key_type_str[key_type]))
- break;
- }
-
- if (key_type == ARRAY_SIZE(trans_key_type_str))
- return -EINVAL;
-
- mgr->get_manager_info(mgr, &info);
-
- info.trans_key_type = key_type;
-
- r = mgr->set_manager_info(mgr, &info);
- if (r)
- return r;
-
- r = mgr->apply(mgr);
- if (r)
- return r;
-
- return size;
-}
-
-static ssize_t manager_trans_key_value_show(struct omap_overlay_manager *mgr,
- char *buf)
-{
- struct omap_overlay_manager_info info;
-
- mgr->get_manager_info(mgr, &info);
-
- return snprintf(buf, PAGE_SIZE, "%#x\n", info.trans_key);
-}
-
-static ssize_t manager_trans_key_value_store(struct omap_overlay_manager *mgr,
- const char *buf, size_t size)
-{
- struct omap_overlay_manager_info info;
- u32 key_value;
- int r;
-
- r = kstrtouint(buf, 0, &key_value);
- if (r)
- return r;
-
- mgr->get_manager_info(mgr, &info);
-
- info.trans_key = key_value;
-
- r = mgr->set_manager_info(mgr, &info);
- if (r)
- return r;
-
- r = mgr->apply(mgr);
- if (r)
- return r;
-
- return size;
-}
-
-static ssize_t manager_trans_key_enabled_show(struct omap_overlay_manager *mgr,
- char *buf)
-{
- struct omap_overlay_manager_info info;
-
- mgr->get_manager_info(mgr, &info);
-
- return snprintf(buf, PAGE_SIZE, "%d\n", info.trans_enabled);
-}
-
-static ssize_t manager_trans_key_enabled_store(struct omap_overlay_manager *mgr,
- const char *buf, size_t size)
-{
- struct omap_overlay_manager_info info;
- bool enable;
- int r;
-
- r = strtobool(buf, &enable);
- if (r)
- return r;
-
- mgr->get_manager_info(mgr, &info);
-
- info.trans_enabled = enable;
-
- r = mgr->set_manager_info(mgr, &info);
- if (r)
- return r;
-
- r = mgr->apply(mgr);
- if (r)
- return r;
-
- return size;
-}
-
-static ssize_t manager_alpha_blending_enabled_show(
- struct omap_overlay_manager *mgr, char *buf)
-{
- struct omap_overlay_manager_info info;
-
- if(!dss_has_feature(FEAT_ALPHA_FIXED_ZORDER))
- return -ENODEV;
-
- mgr->get_manager_info(mgr, &info);
-
- return snprintf(buf, PAGE_SIZE, "%d\n",
- info.partial_alpha_enabled);
-}
-
-static ssize_t manager_alpha_blending_enabled_store(
- struct omap_overlay_manager *mgr,
- const char *buf, size_t size)
-{
- struct omap_overlay_manager_info info;
- bool enable;
- int r;
-
- if(!dss_has_feature(FEAT_ALPHA_FIXED_ZORDER))
- return -ENODEV;
-
- r = strtobool(buf, &enable);
- if (r)
- return r;
-
- mgr->get_manager_info(mgr, &info);
-
- info.partial_alpha_enabled = enable;
-
- r = mgr->set_manager_info(mgr, &info);
- if (r)
- return r;
-
- r = mgr->apply(mgr);
- if (r)
- return r;
-
- return size;
-}
-
-static ssize_t manager_cpr_enable_show(struct omap_overlay_manager *mgr,
- char *buf)
-{
- struct omap_overlay_manager_info info;
-
- mgr->get_manager_info(mgr, &info);
-
- return snprintf(buf, PAGE_SIZE, "%d\n", info.cpr_enable);
-}
-
-static ssize_t manager_cpr_enable_store(struct omap_overlay_manager *mgr,
- const char *buf, size_t size)
-{
- struct omap_overlay_manager_info info;
- int r;
- bool enable;
-
- if (!dss_has_feature(FEAT_CPR))
- return -ENODEV;
-
- r = strtobool(buf, &enable);
- if (r)
- return r;
-
- mgr->get_manager_info(mgr, &info);
-
- if (info.cpr_enable == enable)
- return size;
-
- info.cpr_enable = enable;
-
- r = mgr->set_manager_info(mgr, &info);
- if (r)
- return r;
-
- r = mgr->apply(mgr);
- if (r)
- return r;
-
- return size;
-}
-
-static ssize_t manager_cpr_coef_show(struct omap_overlay_manager *mgr,
- char *buf)
-{
- struct omap_overlay_manager_info info;
-
- mgr->get_manager_info(mgr, &info);
-
- return snprintf(buf, PAGE_SIZE,
- "%d %d %d %d %d %d %d %d %d\n",
- info.cpr_coefs.rr,
- info.cpr_coefs.rg,
- info.cpr_coefs.rb,
- info.cpr_coefs.gr,
- info.cpr_coefs.gg,
- info.cpr_coefs.gb,
- info.cpr_coefs.br,
- info.cpr_coefs.bg,
- info.cpr_coefs.bb);
-}
-
-static ssize_t manager_cpr_coef_store(struct omap_overlay_manager *mgr,
- const char *buf, size_t size)
-{
- struct omap_overlay_manager_info info;
- struct omap_dss_cpr_coefs coefs;
- int r, i;
- s16 *arr;
-
- if (!dss_has_feature(FEAT_CPR))
- return -ENODEV;
-
- if (sscanf(buf, "%hd %hd %hd %hd %hd %hd %hd %hd %hd",
- &coefs.rr, &coefs.rg, &coefs.rb,
- &coefs.gr, &coefs.gg, &coefs.gb,
- &coefs.br, &coefs.bg, &coefs.bb) != 9)
- return -EINVAL;
-
- arr = (s16[]){ coefs.rr, coefs.rg, coefs.rb,
- coefs.gr, coefs.gg, coefs.gb,
- coefs.br, coefs.bg, coefs.bb };
-
- for (i = 0; i < 9; ++i) {
- if (arr[i] < -512 || arr[i] > 511)
- return -EINVAL;
- }
-
- mgr->get_manager_info(mgr, &info);
-
- info.cpr_coefs = coefs;
-
- r = mgr->set_manager_info(mgr, &info);
- if (r)
- return r;
-
- r = mgr->apply(mgr);
- if (r)
- return r;
-
- return size;
-}
-
-struct manager_attribute {
- struct attribute attr;
- ssize_t (*show)(struct omap_overlay_manager *, char *);
- ssize_t (*store)(struct omap_overlay_manager *, const char *, size_t);
-};
-
-#define MANAGER_ATTR(_name, _mode, _show, _store) \
- struct manager_attribute manager_attr_##_name = \
- __ATTR(_name, _mode, _show, _store)
-
-static MANAGER_ATTR(name, S_IRUGO, manager_name_show, NULL);
-static MANAGER_ATTR(display, S_IRUGO|S_IWUSR,
- manager_display_show, manager_display_store);
-static MANAGER_ATTR(default_color, S_IRUGO|S_IWUSR,
- manager_default_color_show, manager_default_color_store);
-static MANAGER_ATTR(trans_key_type, S_IRUGO|S_IWUSR,
- manager_trans_key_type_show, manager_trans_key_type_store);
-static MANAGER_ATTR(trans_key_value, S_IRUGO|S_IWUSR,
- manager_trans_key_value_show, manager_trans_key_value_store);
-static MANAGER_ATTR(trans_key_enabled, S_IRUGO|S_IWUSR,
- manager_trans_key_enabled_show,
- manager_trans_key_enabled_store);
-static MANAGER_ATTR(alpha_blending_enabled, S_IRUGO|S_IWUSR,
- manager_alpha_blending_enabled_show,
- manager_alpha_blending_enabled_store);
-static MANAGER_ATTR(cpr_enable, S_IRUGO|S_IWUSR,
- manager_cpr_enable_show,
- manager_cpr_enable_store);
-static MANAGER_ATTR(cpr_coef, S_IRUGO|S_IWUSR,
- manager_cpr_coef_show,
- manager_cpr_coef_store);
-
-
-static struct attribute *manager_sysfs_attrs[] = {
- &manager_attr_name.attr,
- &manager_attr_display.attr,
- &manager_attr_default_color.attr,
- &manager_attr_trans_key_type.attr,
- &manager_attr_trans_key_value.attr,
- &manager_attr_trans_key_enabled.attr,
- &manager_attr_alpha_blending_enabled.attr,
- &manager_attr_cpr_enable.attr,
- &manager_attr_cpr_coef.attr,
- NULL
-};
-
-static ssize_t manager_attr_show(struct kobject *kobj, struct attribute *attr,
- char *buf)
-{
- struct omap_overlay_manager *manager;
- struct manager_attribute *manager_attr;
-
- manager = container_of(kobj, struct omap_overlay_manager, kobj);
- manager_attr = container_of(attr, struct manager_attribute, attr);
-
- if (!manager_attr->show)
- return -ENOENT;
-
- return manager_attr->show(manager, buf);
-}
-
-static ssize_t manager_attr_store(struct kobject *kobj, struct attribute *attr,
- const char *buf, size_t size)
-{
- struct omap_overlay_manager *manager;
- struct manager_attribute *manager_attr;
-
- manager = container_of(kobj, struct omap_overlay_manager, kobj);
- manager_attr = container_of(attr, struct manager_attribute, attr);
-
- if (!manager_attr->store)
- return -ENOENT;
-
- return manager_attr->store(manager, buf, size);
-}
-
-static const struct sysfs_ops manager_sysfs_ops = {
- .show = manager_attr_show,
- .store = manager_attr_store,
-};
-
-static struct kobj_type manager_ktype = {
- .sysfs_ops = &manager_sysfs_ops,
- .default_attrs = manager_sysfs_attrs,
-};
-
-int dss_manager_kobj_init(struct omap_overlay_manager *mgr,
- struct platform_device *pdev)
-{
- return kobject_init_and_add(&mgr->kobj, &manager_ktype,
- &pdev->dev.kobj, "manager%d", mgr->id);
-}
-
-void dss_manager_kobj_uninit(struct omap_overlay_manager *mgr)
-{
- kobject_del(&mgr->kobj);
- kobject_put(&mgr->kobj);
-
- memset(&mgr->kobj, 0, sizeof(mgr->kobj));
-}
diff --git a/drivers/gpu/drm/omapdrm/dss/manager.c b/drivers/gpu/drm/omapdrm/dss/manager.c
deleted file mode 100644
index 08a67f4f6a20..000000000000
--- a/drivers/gpu/drm/omapdrm/dss/manager.c
+++ /dev/null
@@ -1,263 +0,0 @@
-/*
- * linux/drivers/video/omap2/dss/manager.c
- *
- * Copyright (C) 2009 Nokia Corporation
- * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
- *
- * Some code and ideas taken from drivers/video/omap/ driver
- * by Imre Deak.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
- */
-
-#define DSS_SUBSYS_NAME "MANAGER"
-
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/jiffies.h>
-
-#include <video/omapdss.h>
-
-#include "dss.h"
-#include "dss_features.h"
-
-static int num_managers;
-static struct omap_overlay_manager *managers;
-
-int dss_init_overlay_managers(void)
-{
- int i;
-
- num_managers = dss_feat_get_num_mgrs();
-
- managers = kzalloc(sizeof(struct omap_overlay_manager) * num_managers,
- GFP_KERNEL);
-
- BUG_ON(managers == NULL);
-
- for (i = 0; i < num_managers; ++i) {
- struct omap_overlay_manager *mgr = &managers[i];
-
- switch (i) {
- case 0:
- mgr->name = "lcd";
- mgr->id = OMAP_DSS_CHANNEL_LCD;
- break;
- case 1:
- mgr->name = "tv";
- mgr->id = OMAP_DSS_CHANNEL_DIGIT;
- break;
- case 2:
- mgr->name = "lcd2";
- mgr->id = OMAP_DSS_CHANNEL_LCD2;
- break;
- case 3:
- mgr->name = "lcd3";
- mgr->id = OMAP_DSS_CHANNEL_LCD3;
- break;
- }
-
- mgr->caps = 0;
- mgr->supported_displays =
- dss_feat_get_supported_displays(mgr->id);
- mgr->supported_outputs =
- dss_feat_get_supported_outputs(mgr->id);
-
- INIT_LIST_HEAD(&mgr->overlays);
- }
-
- return 0;
-}
-
-int dss_init_overlay_managers_sysfs(struct platform_device *pdev)
-{
- int i, r;
-
- for (i = 0; i < num_managers; ++i) {
- struct omap_overlay_manager *mgr = &managers[i];
-
- r = dss_manager_kobj_init(mgr, pdev);
- if (r)
- DSSERR("failed to create sysfs file\n");
- }
-
- return 0;
-}
-
-void dss_uninit_overlay_managers(void)
-{
- kfree(managers);
- managers = NULL;
- num_managers = 0;
-}
-
-void dss_uninit_overlay_managers_sysfs(struct platform_device *pdev)
-{
- int i;
-
- for (i = 0; i < num_managers; ++i) {
- struct omap_overlay_manager *mgr = &managers[i];
-
- dss_manager_kobj_uninit(mgr);
- }
-}
-
-int omap_dss_get_num_overlay_managers(void)
-{
- return num_managers;
-}
-EXPORT_SYMBOL(omap_dss_get_num_overlay_managers);
-
-struct omap_overlay_manager *omap_dss_get_overlay_manager(int num)
-{
- if (num >= num_managers)
- return NULL;
-
- return &managers[num];
-}
-EXPORT_SYMBOL(omap_dss_get_overlay_manager);
-
-int dss_mgr_simple_check(struct omap_overlay_manager *mgr,
- const struct omap_overlay_manager_info *info)
-{
- if (dss_has_feature(FEAT_ALPHA_FIXED_ZORDER)) {
- /*
- * OMAP3 supports only graphics source transparency color key
- * and alpha blending simultaneously. See TRM 15.4.2.4.2.2
- * Alpha Mode.
- */
- if (info->partial_alpha_enabled && info->trans_enabled
- && info->trans_key_type != OMAP_DSS_COLOR_KEY_GFX_DST) {
- DSSERR("check_manager: illegal transparency key\n");
- return -EINVAL;
- }
- }
-
- return 0;
-}
-
-static int dss_mgr_check_zorder(struct omap_overlay_manager *mgr,
- struct omap_overlay_info **overlay_infos)
-{
- struct omap_overlay *ovl1, *ovl2;
- struct omap_overlay_info *info1, *info2;
-
- list_for_each_entry(ovl1, &mgr->overlays, list) {
- info1 = overlay_infos[ovl1->id];
-
- if (info1 == NULL)
- continue;
-
- list_for_each_entry(ovl2, &mgr->overlays, list) {
- if (ovl1 == ovl2)
- continue;
-
- info2 = overlay_infos[ovl2->id];
-
- if (info2 == NULL)
- continue;
-
- if (info1->zorder == info2->zorder) {
- DSSERR("overlays %d and %d have the same "
- "zorder %d\n",
- ovl1->id, ovl2->id, info1->zorder);
- return -EINVAL;
- }
- }
- }
-
- return 0;
-}
-
-int dss_mgr_check_timings(struct omap_overlay_manager *mgr,
- const struct omap_video_timings *timings)
-{
- if (!dispc_mgr_timings_ok(mgr->id, timings)) {
- DSSERR("check_manager: invalid timings\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int dss_mgr_check_lcd_config(struct omap_overlay_manager *mgr,
- const struct dss_lcd_mgr_config *config)
-{
- struct dispc_clock_info cinfo = config->clock_info;
- int dl = config->video_port_width;
- bool stallmode = config->stallmode;
- bool fifohandcheck = config->fifohandcheck;
-
- if (cinfo.lck_div < 1 || cinfo.lck_div > 255)
- return -EINVAL;
-
- if (cinfo.pck_div < 1 || cinfo.pck_div > 255)
- return -EINVAL;
-
- if (dl != 12 && dl != 16 && dl != 18 && dl != 24)
- return -EINVAL;
-
- /* fifohandcheck should be used only with stallmode */
- if (!stallmode && fifohandcheck)
- return -EINVAL;
-
- /*
- * io pad mode can be only checked by using dssdev connected to the
- * manager. Ignore checking these for now, add checks when manager
- * is capable of holding information related to the connected interface
- */
-
- return 0;
-}
-
-int dss_mgr_check(struct omap_overlay_manager *mgr,
- struct omap_overlay_manager_info *info,
- const struct omap_video_timings *mgr_timings,
- const struct dss_lcd_mgr_config *lcd_config,
- struct omap_overlay_info **overlay_infos)
-{
- struct omap_overlay *ovl;
- int r;
-
- if (dss_has_feature(FEAT_ALPHA_FREE_ZORDER)) {
- r = dss_mgr_check_zorder(mgr, overlay_infos);
- if (r)
- return r;
- }
-
- r = dss_mgr_check_timings(mgr, mgr_timings);
- if (r)
- return r;
-
- r = dss_mgr_check_lcd_config(mgr, lcd_config);
- if (r)
- return r;
-
- list_for_each_entry(ovl, &mgr->overlays, list) {
- struct omap_overlay_info *oi;
- int r;
-
- oi = overlay_infos[ovl->id];
-
- if (oi == NULL)
- continue;
-
- r = dss_ovl_check(ovl, oi, mgr_timings);
- if (r)
- return r;
- }
-
- return 0;
-}
diff --git a/drivers/gpu/drm/omapdrm/dss/omapdss.h b/drivers/gpu/drm/omapdrm/dss/omapdss.h
new file mode 100644
index 000000000000..d7e7c909bbc2
--- /dev/null
+++ b/drivers/gpu/drm/omapdrm/dss/omapdss.h
@@ -0,0 +1,96 @@
+/*
+ * Copyright (C) 2016 Texas Instruments
+ * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __OMAP_DRM_DSS_H
+#define __OMAP_DRM_DSS_H
+
+#include <video/omapdss.h>
+
+u32 dispc_read_irqstatus(void);
+void dispc_clear_irqstatus(u32 mask);
+u32 dispc_read_irqenable(void);
+void dispc_write_irqenable(u32 mask);
+
+int dispc_request_irq(irq_handler_t handler, void *dev_id);
+void dispc_free_irq(void *dev_id);
+
+int dispc_runtime_get(void);
+void dispc_runtime_put(void);
+
+void dispc_mgr_enable(enum omap_channel channel, bool enable);
+bool dispc_mgr_is_enabled(enum omap_channel channel);
+u32 dispc_mgr_get_vsync_irq(enum omap_channel channel);
+u32 dispc_mgr_get_framedone_irq(enum omap_channel channel);
+u32 dispc_mgr_get_sync_lost_irq(enum omap_channel channel);
+bool dispc_mgr_go_busy(enum omap_channel channel);
+void dispc_mgr_go(enum omap_channel channel);
+void dispc_mgr_set_lcd_config(enum omap_channel channel,
+ const struct dss_lcd_mgr_config *config);
+void dispc_mgr_set_timings(enum omap_channel channel,
+ const struct omap_video_timings *timings);
+void dispc_mgr_setup(enum omap_channel channel,
+ const struct omap_overlay_manager_info *info);
+
+int dispc_ovl_enable(enum omap_plane plane, bool enable);
+bool dispc_ovl_enabled(enum omap_plane plane);
+void dispc_ovl_set_channel_out(enum omap_plane plane,
+ enum omap_channel channel);
+int dispc_ovl_setup(enum omap_plane plane, const struct omap_overlay_info *oi,
+ bool replication, const struct omap_video_timings *mgr_timings,
+ bool mem_to_mem);
+
+enum omap_dss_output_id dispc_mgr_get_supported_outputs(enum omap_channel channel);
+
+struct dss_mgr_ops {
+ int (*connect)(enum omap_channel channel,
+ struct omap_dss_device *dst);
+ void (*disconnect)(enum omap_channel channel,
+ struct omap_dss_device *dst);
+
+ void (*start_update)(enum omap_channel channel);
+ int (*enable)(enum omap_channel channel);
+ void (*disable)(enum omap_channel channel);
+ void (*set_timings)(enum omap_channel channel,
+ const struct omap_video_timings *timings);
+ void (*set_lcd_config)(enum omap_channel channel,
+ const struct dss_lcd_mgr_config *config);
+ int (*register_framedone_handler)(enum omap_channel channel,
+ void (*handler)(void *), void *data);
+ void (*unregister_framedone_handler)(enum omap_channel channel,
+ void (*handler)(void *), void *data);
+};
+
+int dss_install_mgr_ops(const struct dss_mgr_ops *mgr_ops);
+void dss_uninstall_mgr_ops(void);
+
+int dss_mgr_connect(enum omap_channel channel,
+ struct omap_dss_device *dst);
+void dss_mgr_disconnect(enum omap_channel channel,
+ struct omap_dss_device *dst);
+void dss_mgr_set_timings(enum omap_channel channel,
+ const struct omap_video_timings *timings);
+void dss_mgr_set_lcd_config(enum omap_channel channel,
+ const struct dss_lcd_mgr_config *config);
+int dss_mgr_enable(enum omap_channel channel);
+void dss_mgr_disable(enum omap_channel channel);
+void dss_mgr_start_update(enum omap_channel channel);
+int dss_mgr_register_framedone_handler(enum omap_channel channel,
+ void (*handler)(void *), void *data);
+void dss_mgr_unregister_framedone_handler(enum omap_channel channel,
+ void (*handler)(void *), void *data);
+
+#endif /* __OMAP_DRM_DSS_H */
diff --git a/drivers/gpu/drm/omapdrm/dss/output.c b/drivers/gpu/drm/omapdrm/dss/output.c
index 16072159bd24..829232ad8c81 100644
--- a/drivers/gpu/drm/omapdrm/dss/output.c
+++ b/drivers/gpu/drm/omapdrm/dss/output.c
@@ -169,24 +169,6 @@ struct omap_dss_device *omapdss_find_output_from_display(struct omap_dss_device
}
EXPORT_SYMBOL(omapdss_find_output_from_display);
-struct omap_overlay_manager *omapdss_find_mgr_from_display(struct omap_dss_device *dssdev)
-{
- struct omap_dss_device *out;
- struct omap_overlay_manager *mgr;
-
- out = omapdss_find_output_from_display(dssdev);
-
- if (out == NULL)
- return NULL;
-
- mgr = out->manager;
-
- omap_dss_put_device(out);
-
- return mgr;
-}
-EXPORT_SYMBOL(omapdss_find_mgr_from_display);
-
static const struct dss_mgr_ops *dss_mgr_ops;
int dss_install_mgr_ops(const struct dss_mgr_ops *mgr_ops)
@@ -206,62 +188,62 @@ void dss_uninstall_mgr_ops(void)
}
EXPORT_SYMBOL(dss_uninstall_mgr_ops);
-int dss_mgr_connect(struct omap_overlay_manager *mgr,
+int dss_mgr_connect(enum omap_channel channel,
struct omap_dss_device *dst)
{
- return dss_mgr_ops->connect(mgr, dst);
+ return dss_mgr_ops->connect(channel, dst);
}
EXPORT_SYMBOL(dss_mgr_connect);
-void dss_mgr_disconnect(struct omap_overlay_manager *mgr,
+void dss_mgr_disconnect(enum omap_channel channel,
struct omap_dss_device *dst)
{
- dss_mgr_ops->disconnect(mgr, dst);
+ dss_mgr_ops->disconnect(channel, dst);
}
EXPORT_SYMBOL(dss_mgr_disconnect);
-void dss_mgr_set_timings(struct omap_overlay_manager *mgr,
+void dss_mgr_set_timings(enum omap_channel channel,
const struct omap_video_timings *timings)
{
- dss_mgr_ops->set_timings(mgr, timings);
+ dss_mgr_ops->set_timings(channel, timings);
}
EXPORT_SYMBOL(dss_mgr_set_timings);
-void dss_mgr_set_lcd_config(struct omap_overlay_manager *mgr,
+void dss_mgr_set_lcd_config(enum omap_channel channel,
const struct dss_lcd_mgr_config *config)
{
- dss_mgr_ops->set_lcd_config(mgr, config);
+ dss_mgr_ops->set_lcd_config(channel, config);
}
EXPORT_SYMBOL(dss_mgr_set_lcd_config);
-int dss_mgr_enable(struct omap_overlay_manager *mgr)
+int dss_mgr_enable(enum omap_channel channel)
{
- return dss_mgr_ops->enable(mgr);
+ return dss_mgr_ops->enable(channel);
}
EXPORT_SYMBOL(dss_mgr_enable);
-void dss_mgr_disable(struct omap_overlay_manager *mgr)
+void dss_mgr_disable(enum omap_channel channel)
{
- dss_mgr_ops->disable(mgr);
+ dss_mgr_ops->disable(channel);
}
EXPORT_SYMBOL(dss_mgr_disable);
-void dss_mgr_start_update(struct omap_overlay_manager *mgr)
+void dss_mgr_start_update(enum omap_channel channel)
{
- dss_mgr_ops->start_update(mgr);
+ dss_mgr_ops->start_update(channel);
}
EXPORT_SYMBOL(dss_mgr_start_update);
-int dss_mgr_register_framedone_handler(struct omap_overlay_manager *mgr,
+int dss_mgr_register_framedone_handler(enum omap_channel channel,
void (*handler)(void *), void *data)
{
- return dss_mgr_ops->register_framedone_handler(mgr, handler, data);
+ return dss_mgr_ops->register_framedone_handler(channel, handler, data);
}
EXPORT_SYMBOL(dss_mgr_register_framedone_handler);
-void dss_mgr_unregister_framedone_handler(struct omap_overlay_manager *mgr,
+void dss_mgr_unregister_framedone_handler(enum omap_channel channel,
void (*handler)(void *), void *data)
{
- dss_mgr_ops->unregister_framedone_handler(mgr, handler, data);
+ dss_mgr_ops->unregister_framedone_handler(channel, handler, data);
}
EXPORT_SYMBOL(dss_mgr_unregister_framedone_handler);
diff --git a/drivers/gpu/drm/omapdrm/dss/overlay-sysfs.c b/drivers/gpu/drm/omapdrm/dss/overlay-sysfs.c
deleted file mode 100644
index 4cc5ddebfb34..000000000000
--- a/drivers/gpu/drm/omapdrm/dss/overlay-sysfs.c
+++ /dev/null
@@ -1,456 +0,0 @@
-/*
- * Copyright (C) 2009 Nokia Corporation
- * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
- *
- * Some code and ideas taken from drivers/video/omap/ driver
- * by Imre Deak.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
- */
-
-#define DSS_SUBSYS_NAME "OVERLAY"
-
-#include <linux/module.h>
-#include <linux/err.h>
-#include <linux/sysfs.h>
-#include <linux/kobject.h>
-#include <linux/platform_device.h>
-
-#include <video/omapdss.h>
-
-#include "dss.h"
-#include "dss_features.h"
-
-static ssize_t overlay_name_show(struct omap_overlay *ovl, char *buf)
-{
- return snprintf(buf, PAGE_SIZE, "%s\n", ovl->name);
-}
-
-static ssize_t overlay_manager_show(struct omap_overlay *ovl, char *buf)
-{
- return snprintf(buf, PAGE_SIZE, "%s\n",
- ovl->manager ? ovl->manager->name : "<none>");
-}
-
-static ssize_t overlay_manager_store(struct omap_overlay *ovl, const char *buf,
- size_t size)
-{
- int i, r;
- struct omap_overlay_manager *mgr = NULL;
- struct omap_overlay_manager *old_mgr;
- int len = size;
-
- if (buf[size-1] == '\n')
- --len;
-
- if (len > 0) {
- for (i = 0; i < omap_dss_get_num_overlay_managers(); ++i) {
- mgr = omap_dss_get_overlay_manager(i);
-
- if (sysfs_streq(buf, mgr->name))
- break;
-
- mgr = NULL;
- }
- }
-
- if (len > 0 && mgr == NULL)
- return -EINVAL;
-
- if (mgr)
- DSSDBG("manager %s found\n", mgr->name);
-
- if (mgr == ovl->manager)
- return size;
-
- old_mgr = ovl->manager;
-
- r = dispc_runtime_get();
- if (r)
- return r;
-
- /* detach old manager */
- if (old_mgr) {
- r = ovl->unset_manager(ovl);
- if (r) {
- DSSERR("detach failed\n");
- goto err;
- }
-
- r = old_mgr->apply(old_mgr);
- if (r)
- goto err;
- }
-
- if (mgr) {
- r = ovl->set_manager(ovl, mgr);
- if (r) {
- DSSERR("Failed to attach overlay\n");
- goto err;
- }
-
- r = mgr->apply(mgr);
- if (r)
- goto err;
- }
-
- dispc_runtime_put();
-
- return size;
-
-err:
- dispc_runtime_put();
- return r;
-}
-
-static ssize_t overlay_input_size_show(struct omap_overlay *ovl, char *buf)
-{
- struct omap_overlay_info info;
-
- ovl->get_overlay_info(ovl, &info);
-
- return snprintf(buf, PAGE_SIZE, "%d,%d\n",
- info.width, info.height);
-}
-
-static ssize_t overlay_screen_width_show(struct omap_overlay *ovl, char *buf)
-{
- struct omap_overlay_info info;
-
- ovl->get_overlay_info(ovl, &info);
-
- return snprintf(buf, PAGE_SIZE, "%d\n", info.screen_width);
-}
-
-static ssize_t overlay_position_show(struct omap_overlay *ovl, char *buf)
-{
- struct omap_overlay_info info;
-
- ovl->get_overlay_info(ovl, &info);
-
- return snprintf(buf, PAGE_SIZE, "%d,%d\n",
- info.pos_x, info.pos_y);
-}
-
-static ssize_t overlay_position_store(struct omap_overlay *ovl,
- const char *buf, size_t size)
-{
- int r;
- char *last;
- struct omap_overlay_info info;
-
- ovl->get_overlay_info(ovl, &info);
-
- info.pos_x = simple_strtoul(buf, &last, 10);
- ++last;
- if (last - buf >= size)
- return -EINVAL;
-
- info.pos_y = simple_strtoul(last, &last, 10);
-
- r = ovl->set_overlay_info(ovl, &info);
- if (r)
- return r;
-
- if (ovl->manager) {
- r = ovl->manager->apply(ovl->manager);
- if (r)
- return r;
- }
-
- return size;
-}
-
-static ssize_t overlay_output_size_show(struct omap_overlay *ovl, char *buf)
-{
- struct omap_overlay_info info;
-
- ovl->get_overlay_info(ovl, &info);
-
- return snprintf(buf, PAGE_SIZE, "%d,%d\n",
- info.out_width, info.out_height);
-}
-
-static ssize_t overlay_output_size_store(struct omap_overlay *ovl,
- const char *buf, size_t size)
-{
- int r;
- char *last;
- struct omap_overlay_info info;
-
- ovl->get_overlay_info(ovl, &info);
-
- info.out_width = simple_strtoul(buf, &last, 10);
- ++last;
- if (last - buf >= size)
- return -EINVAL;
-
- info.out_height = simple_strtoul(last, &last, 10);
-
- r = ovl->set_overlay_info(ovl, &info);
- if (r)
- return r;
-
- if (ovl->manager) {
- r = ovl->manager->apply(ovl->manager);
- if (r)
- return r;
- }
-
- return size;
-}
-
-static ssize_t overlay_enabled_show(struct omap_overlay *ovl, char *buf)
-{
- return snprintf(buf, PAGE_SIZE, "%d\n", ovl->is_enabled(ovl));
-}
-
-static ssize_t overlay_enabled_store(struct omap_overlay *ovl, const char *buf,
- size_t size)
-{
- int r;
- bool enable;
-
- r = strtobool(buf, &enable);
- if (r)
- return r;
-
- if (enable)
- r = ovl->enable(ovl);
- else
- r = ovl->disable(ovl);
-
- if (r)
- return r;
-
- return size;
-}
-
-static ssize_t overlay_global_alpha_show(struct omap_overlay *ovl, char *buf)
-{
- struct omap_overlay_info info;
-
- ovl->get_overlay_info(ovl, &info);
-
- return snprintf(buf, PAGE_SIZE, "%d\n",
- info.global_alpha);
-}
-
-static ssize_t overlay_global_alpha_store(struct omap_overlay *ovl,
- const char *buf, size_t size)
-{
- int r;
- u8 alpha;
- struct omap_overlay_info info;
-
- if ((ovl->caps & OMAP_DSS_OVL_CAP_GLOBAL_ALPHA) == 0)
- return -ENODEV;
-
- r = kstrtou8(buf, 0, &alpha);
- if (r)
- return r;
-
- ovl->get_overlay_info(ovl, &info);
-
- info.global_alpha = alpha;
-
- r = ovl->set_overlay_info(ovl, &info);
- if (r)
- return r;
-
- if (ovl->manager) {
- r = ovl->manager->apply(ovl->manager);
- if (r)
- return r;
- }
-
- return size;
-}
-
-static ssize_t overlay_pre_mult_alpha_show(struct omap_overlay *ovl,
- char *buf)
-{
- struct omap_overlay_info info;
-
- ovl->get_overlay_info(ovl, &info);
-
- return snprintf(buf, PAGE_SIZE, "%d\n",
- info.pre_mult_alpha);
-}
-
-static ssize_t overlay_pre_mult_alpha_store(struct omap_overlay *ovl,
- const char *buf, size_t size)
-{
- int r;
- u8 alpha;
- struct omap_overlay_info info;
-
- if ((ovl->caps & OMAP_DSS_OVL_CAP_PRE_MULT_ALPHA) == 0)
- return -ENODEV;
-
- r = kstrtou8(buf, 0, &alpha);
- if (r)
- return r;
-
- ovl->get_overlay_info(ovl, &info);
-
- info.pre_mult_alpha = alpha;
-
- r = ovl->set_overlay_info(ovl, &info);
- if (r)
- return r;
-
- if (ovl->manager) {
- r = ovl->manager->apply(ovl->manager);
- if (r)
- return r;
- }
-
- return size;
-}
-
-static ssize_t overlay_zorder_show(struct omap_overlay *ovl, char *buf)
-{
- struct omap_overlay_info info;
-
- ovl->get_overlay_info(ovl, &info);
-
- return snprintf(buf, PAGE_SIZE, "%d\n", info.zorder);
-}
-
-static ssize_t overlay_zorder_store(struct omap_overlay *ovl,
- const char *buf, size_t size)
-{
- int r;
- u8 zorder;
- struct omap_overlay_info info;
-
- if ((ovl->caps & OMAP_DSS_OVL_CAP_ZORDER) == 0)
- return -ENODEV;
-
- r = kstrtou8(buf, 0, &zorder);
- if (r)
- return r;
-
- ovl->get_overlay_info(ovl, &info);
-
- info.zorder = zorder;
-
- r = ovl->set_overlay_info(ovl, &info);
- if (r)
- return r;
-
- if (ovl->manager) {
- r = ovl->manager->apply(ovl->manager);
- if (r)
- return r;
- }
-
- return size;
-}
-
-struct overlay_attribute {
- struct attribute attr;
- ssize_t (*show)(struct omap_overlay *, char *);
- ssize_t (*store)(struct omap_overlay *, const char *, size_t);
-};
-
-#define OVERLAY_ATTR(_name, _mode, _show, _store) \
- struct overlay_attribute overlay_attr_##_name = \
- __ATTR(_name, _mode, _show, _store)
-
-static OVERLAY_ATTR(name, S_IRUGO, overlay_name_show, NULL);
-static OVERLAY_ATTR(manager, S_IRUGO|S_IWUSR,
- overlay_manager_show, overlay_manager_store);
-static OVERLAY_ATTR(input_size, S_IRUGO, overlay_input_size_show, NULL);
-static OVERLAY_ATTR(screen_width, S_IRUGO, overlay_screen_width_show, NULL);
-static OVERLAY_ATTR(position, S_IRUGO|S_IWUSR,
- overlay_position_show, overlay_position_store);
-static OVERLAY_ATTR(output_size, S_IRUGO|S_IWUSR,
- overlay_output_size_show, overlay_output_size_store);
-static OVERLAY_ATTR(enabled, S_IRUGO|S_IWUSR,
- overlay_enabled_show, overlay_enabled_store);
-static OVERLAY_ATTR(global_alpha, S_IRUGO|S_IWUSR,
- overlay_global_alpha_show, overlay_global_alpha_store);
-static OVERLAY_ATTR(pre_mult_alpha, S_IRUGO|S_IWUSR,
- overlay_pre_mult_alpha_show,
- overlay_pre_mult_alpha_store);
-static OVERLAY_ATTR(zorder, S_IRUGO|S_IWUSR,
- overlay_zorder_show, overlay_zorder_store);
-
-static struct attribute *overlay_sysfs_attrs[] = {
- &overlay_attr_name.attr,
- &overlay_attr_manager.attr,
- &overlay_attr_input_size.attr,
- &overlay_attr_screen_width.attr,
- &overlay_attr_position.attr,
- &overlay_attr_output_size.attr,
- &overlay_attr_enabled.attr,
- &overlay_attr_global_alpha.attr,
- &overlay_attr_pre_mult_alpha.attr,
- &overlay_attr_zorder.attr,
- NULL
-};
-
-static ssize_t overlay_attr_show(struct kobject *kobj, struct attribute *attr,
- char *buf)
-{
- struct omap_overlay *overlay;
- struct overlay_attribute *overlay_attr;
-
- overlay = container_of(kobj, struct omap_overlay, kobj);
- overlay_attr = container_of(attr, struct overlay_attribute, attr);
-
- if (!overlay_attr->show)
- return -ENOENT;
-
- return overlay_attr->show(overlay, buf);
-}
-
-static ssize_t overlay_attr_store(struct kobject *kobj, struct attribute *attr,
- const char *buf, size_t size)
-{
- struct omap_overlay *overlay;
- struct overlay_attribute *overlay_attr;
-
- overlay = container_of(kobj, struct omap_overlay, kobj);
- overlay_attr = container_of(attr, struct overlay_attribute, attr);
-
- if (!overlay_attr->store)
- return -ENOENT;
-
- return overlay_attr->store(overlay, buf, size);
-}
-
-static const struct sysfs_ops overlay_sysfs_ops = {
- .show = overlay_attr_show,
- .store = overlay_attr_store,
-};
-
-static struct kobj_type overlay_ktype = {
- .sysfs_ops = &overlay_sysfs_ops,
- .default_attrs = overlay_sysfs_attrs,
-};
-
-int dss_overlay_kobj_init(struct omap_overlay *ovl,
- struct platform_device *pdev)
-{
- return kobject_init_and_add(&ovl->kobj, &overlay_ktype,
- &pdev->dev.kobj, "overlay%d", ovl->id);
-}
-
-void dss_overlay_kobj_uninit(struct omap_overlay *ovl)
-{
- kobject_del(&ovl->kobj);
- kobject_put(&ovl->kobj);
-}
diff --git a/drivers/gpu/drm/omapdrm/dss/overlay.c b/drivers/gpu/drm/omapdrm/dss/overlay.c
deleted file mode 100644
index 2f7cee985cdd..000000000000
--- a/drivers/gpu/drm/omapdrm/dss/overlay.c
+++ /dev/null
@@ -1,202 +0,0 @@
-/*
- * linux/drivers/video/omap2/dss/overlay.c
- *
- * Copyright (C) 2009 Nokia Corporation
- * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
- *
- * Some code and ideas taken from drivers/video/omap/ driver
- * by Imre Deak.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
- */
-
-#define DSS_SUBSYS_NAME "OVERLAY"
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/err.h>
-#include <linux/sysfs.h>
-#include <linux/platform_device.h>
-#include <linux/delay.h>
-#include <linux/slab.h>
-
-#include <video/omapdss.h>
-
-#include "dss.h"
-#include "dss_features.h"
-
-static int num_overlays;
-static struct omap_overlay *overlays;
-
-int omap_dss_get_num_overlays(void)
-{
- return num_overlays;
-}
-EXPORT_SYMBOL(omap_dss_get_num_overlays);
-
-struct omap_overlay *omap_dss_get_overlay(int num)
-{
- if (num >= num_overlays)
- return NULL;
-
- return &overlays[num];
-}
-EXPORT_SYMBOL(omap_dss_get_overlay);
-
-void dss_init_overlays(struct platform_device *pdev)
-{
- int i, r;
-
- num_overlays = dss_feat_get_num_ovls();
-
- overlays = kzalloc(sizeof(struct omap_overlay) * num_overlays,
- GFP_KERNEL);
-
- BUG_ON(overlays == NULL);
-
- for (i = 0; i < num_overlays; ++i) {
- struct omap_overlay *ovl = &overlays[i];
-
- switch (i) {
- case 0:
- ovl->name = "gfx";
- ovl->id = OMAP_DSS_GFX;
- break;
- case 1:
- ovl->name = "vid1";
- ovl->id = OMAP_DSS_VIDEO1;
- break;
- case 2:
- ovl->name = "vid2";
- ovl->id = OMAP_DSS_VIDEO2;
- break;
- case 3:
- ovl->name = "vid3";
- ovl->id = OMAP_DSS_VIDEO3;
- break;
- }
-
- ovl->caps = dss_feat_get_overlay_caps(ovl->id);
- ovl->supported_modes =
- dss_feat_get_supported_color_modes(ovl->id);
-
- r = dss_overlay_kobj_init(ovl, pdev);
- if (r)
- DSSERR("failed to create sysfs file\n");
- }
-}
-
-void dss_uninit_overlays(struct platform_device *pdev)
-{
- int i;
-
- for (i = 0; i < num_overlays; ++i) {
- struct omap_overlay *ovl = &overlays[i];
- dss_overlay_kobj_uninit(ovl);
- }
-
- kfree(overlays);
- overlays = NULL;
- num_overlays = 0;
-}
-
-int dss_ovl_simple_check(struct omap_overlay *ovl,
- const struct omap_overlay_info *info)
-{
- if ((ovl->caps & OMAP_DSS_OVL_CAP_SCALE) == 0) {
- if (info->out_width != 0 && info->width != info->out_width) {
- DSSERR("check_overlay: overlay %d doesn't support "
- "scaling\n", ovl->id);
- return -EINVAL;
- }
-
- if (info->out_height != 0 && info->height != info->out_height) {
- DSSERR("check_overlay: overlay %d doesn't support "
- "scaling\n", ovl->id);
- return -EINVAL;
- }
- }
-
- if ((ovl->supported_modes & info->color_mode) == 0) {
- DSSERR("check_overlay: overlay %d doesn't support mode %d\n",
- ovl->id, info->color_mode);
- return -EINVAL;
- }
-
- if (info->zorder >= omap_dss_get_num_overlays()) {
- DSSERR("check_overlay: zorder %d too high\n", info->zorder);
- return -EINVAL;
- }
-
- if (dss_feat_rotation_type_supported(info->rotation_type) == 0) {
- DSSERR("check_overlay: rotation type %d not supported\n",
- info->rotation_type);
- return -EINVAL;
- }
-
- return 0;
-}
-
-int dss_ovl_check(struct omap_overlay *ovl, struct omap_overlay_info *info,
- const struct omap_video_timings *mgr_timings)
-{
- u16 outw, outh;
- u16 dw, dh;
-
- dw = mgr_timings->x_res;
- dh = mgr_timings->y_res;
-
- if ((ovl->caps & OMAP_DSS_OVL_CAP_SCALE) == 0) {
- outw = info->width;
- outh = info->height;
- } else {
- if (info->out_width == 0)
- outw = info->width;
- else
- outw = info->out_width;
-
- if (info->out_height == 0)
- outh = info->height;
- else
- outh = info->out_height;
- }
-
- if (dw < info->pos_x + outw) {
- DSSERR("overlay %d horizontally not inside the display area "
- "(%d + %d >= %d)\n",
- ovl->id, info->pos_x, outw, dw);
- return -EINVAL;
- }
-
- if (dh < info->pos_y + outh) {
- DSSERR("overlay %d vertically not inside the display area "
- "(%d + %d >= %d)\n",
- ovl->id, info->pos_y, outh, dh);
- return -EINVAL;
- }
-
- return 0;
-}
-
-/*
- * Checks if replication logic should be used. Only use when overlay is in
- * RGB12U or RGB16 mode, and video port width interface is 18bpp or 24bpp
- */
-bool dss_ovl_use_replication(struct dss_lcd_mgr_config config,
- enum omap_color_mode mode)
-{
- if (mode != OMAP_DSS_COLOR_RGB12U && mode != OMAP_DSS_COLOR_RGB16)
- return false;
-
- return config.video_port_width > 16;
-}
diff --git a/drivers/gpu/drm/omapdrm/dss/rfbi.c b/drivers/gpu/drm/omapdrm/dss/rfbi.c
index aea6a1d0fb20..3796576dfadf 100644
--- a/drivers/gpu/drm/omapdrm/dss/rfbi.c
+++ b/drivers/gpu/drm/omapdrm/dss/rfbi.c
@@ -880,7 +880,7 @@ static int rfbi_display_enable(struct omap_dss_device *dssdev)
struct omap_dss_device *out = &rfbi.output;
int r;
- if (out->manager == NULL) {
+ if (!out->dispc_channel_connected) {
DSSERR("failed to enable display: no output/manager\n");
return -ENODEV;
}
diff --git a/drivers/gpu/drm/omapdrm/dss/sdi.c b/drivers/gpu/drm/omapdrm/dss/sdi.c
index d747cc6b59e1..cd6d3bfb041d 100644
--- a/drivers/gpu/drm/omapdrm/dss/sdi.c
+++ b/drivers/gpu/drm/omapdrm/dss/sdi.c
@@ -114,7 +114,7 @@ static int sdi_calc_clock_div(unsigned long pclk,
static void sdi_config_lcd_manager(struct omap_dss_device *dssdev)
{
- struct omap_overlay_manager *mgr = sdi.output.manager;
+ enum omap_channel channel = dssdev->dispc_channel;
sdi.mgr_config.io_pad_mode = DSS_IO_PAD_MODE_BYPASS;
@@ -124,19 +124,20 @@ static void sdi_config_lcd_manager(struct omap_dss_device *dssdev)
sdi.mgr_config.video_port_width = 24;
sdi.mgr_config.lcden_sig_polarity = 1;
- dss_mgr_set_lcd_config(mgr, &sdi.mgr_config);
+ dss_mgr_set_lcd_config(channel, &sdi.mgr_config);
}
static int sdi_display_enable(struct omap_dss_device *dssdev)
{
struct omap_dss_device *out = &sdi.output;
+ enum omap_channel channel = dssdev->dispc_channel;
struct omap_video_timings *t = &sdi.timings;
unsigned long fck;
struct dispc_clock_info dispc_cinfo;
unsigned long pck;
int r;
- if (out->manager == NULL) {
+ if (!out->dispc_channel_connected) {
DSSERR("failed to enable display: no output/manager\n");
return -ENODEV;
}
@@ -169,7 +170,7 @@ static int sdi_display_enable(struct omap_dss_device *dssdev)
}
- dss_mgr_set_timings(out->manager, t);
+ dss_mgr_set_timings(channel, t);
r = dss_set_fck_rate(fck);
if (r)
@@ -188,7 +189,7 @@ static int sdi_display_enable(struct omap_dss_device *dssdev)
* need to care about the shadow register mechanism for pck-free. The
* exact reason for this is unknown.
*/
- dispc_mgr_set_clock_div(out->manager->id, &sdi.mgr_config.clock_info);
+ dispc_mgr_set_clock_div(channel, &sdi.mgr_config.clock_info);
dss_sdi_init(sdi.datapairs);
r = dss_sdi_enable();
@@ -196,7 +197,7 @@ static int sdi_display_enable(struct omap_dss_device *dssdev)
goto err_sdi_enable;
mdelay(2);
- r = dss_mgr_enable(out->manager);
+ r = dss_mgr_enable(channel);
if (r)
goto err_mgr_enable;
@@ -216,9 +217,9 @@ err_reg_enable:
static void sdi_display_disable(struct omap_dss_device *dssdev)
{
- struct omap_overlay_manager *mgr = sdi.output.manager;
+ enum omap_channel channel = dssdev->dispc_channel;
- dss_mgr_disable(mgr);
+ dss_mgr_disable(channel);
dss_sdi_disable();
@@ -242,9 +243,9 @@ static void sdi_get_timings(struct omap_dss_device *dssdev,
static int sdi_check_timings(struct omap_dss_device *dssdev,
struct omap_video_timings *timings)
{
- struct omap_overlay_manager *mgr = sdi.output.manager;
+ enum omap_channel channel = dssdev->dispc_channel;
- if (mgr && !dispc_mgr_timings_ok(mgr->id, timings))
+ if (!dispc_mgr_timings_ok(channel, timings))
return -EINVAL;
if (timings->pixelclock == 0)
@@ -280,18 +281,14 @@ static int sdi_init_regulator(void)
static int sdi_connect(struct omap_dss_device *dssdev,
struct omap_dss_device *dst)
{
- struct omap_overlay_manager *mgr;
+ enum omap_channel channel = dssdev->dispc_channel;
int r;
r = sdi_init_regulator();
if (r)
return r;
- mgr = omap_dss_get_overlay_manager(dssdev->dispc_channel);
- if (!mgr)
- return -ENODEV;
-
- r = dss_mgr_connect(mgr, dssdev);
+ r = dss_mgr_connect(channel, dssdev);
if (r)
return r;
@@ -299,7 +296,7 @@ static int sdi_connect(struct omap_dss_device *dssdev,
if (r) {
DSSERR("failed to connect output to new device: %s\n",
dst->name);
- dss_mgr_disconnect(mgr, dssdev);
+ dss_mgr_disconnect(channel, dssdev);
return r;
}
@@ -309,6 +306,8 @@ static int sdi_connect(struct omap_dss_device *dssdev,
static void sdi_disconnect(struct omap_dss_device *dssdev,
struct omap_dss_device *dst)
{
+ enum omap_channel channel = dssdev->dispc_channel;
+
WARN_ON(dst != dssdev->dst);
if (dst != dssdev->dst)
@@ -316,8 +315,7 @@ static void sdi_disconnect(struct omap_dss_device *dssdev,
omapdss_output_unset_device(dssdev);
- if (dssdev->manager)
- dss_mgr_disconnect(dssdev->manager, dssdev);
+ dss_mgr_disconnect(channel, dssdev);
}
static const struct omapdss_sdi_ops sdi_ops = {
diff --git a/drivers/gpu/drm/omapdrm/dss/venc.c b/drivers/gpu/drm/omapdrm/dss/venc.c
index 08f9def76e27..08a2cc778ba9 100644
--- a/drivers/gpu/drm/omapdrm/dss/venc.c
+++ b/drivers/gpu/drm/omapdrm/dss/venc.c
@@ -443,7 +443,7 @@ static const struct venc_config *venc_timings_to_config(
static int venc_power_on(struct omap_dss_device *dssdev)
{
- struct omap_overlay_manager *mgr = venc.output.manager;
+ enum omap_channel channel = dssdev->dispc_channel;
u32 l;
int r;
@@ -469,13 +469,13 @@ static int venc_power_on(struct omap_dss_device *dssdev)
venc_write_reg(VENC_OUTPUT_CONTROL, l);
- dss_mgr_set_timings(mgr, &venc.timings);
+ dss_mgr_set_timings(channel, &venc.timings);
r = regulator_enable(venc.vdda_dac_reg);
if (r)
goto err1;
- r = dss_mgr_enable(mgr);
+ r = dss_mgr_enable(channel);
if (r)
goto err2;
@@ -494,12 +494,12 @@ err0:
static void venc_power_off(struct omap_dss_device *dssdev)
{
- struct omap_overlay_manager *mgr = venc.output.manager;
+ enum omap_channel channel = dssdev->dispc_channel;
venc_write_reg(VENC_OUTPUT_CONTROL, 0);
dss_set_dac_pwrdn_bgz(0);
- dss_mgr_disable(mgr);
+ dss_mgr_disable(channel);
regulator_disable(venc.vdda_dac_reg);
@@ -515,7 +515,7 @@ static int venc_display_enable(struct omap_dss_device *dssdev)
mutex_lock(&venc.venc_lock);
- if (out->manager == NULL) {
+ if (!out->dispc_channel_connected) {
DSSERR("Failed to enable display: no output/manager\n");
r = -ENODEV;
goto err0;
@@ -742,18 +742,14 @@ static int venc_get_clocks(struct platform_device *pdev)
static int venc_connect(struct omap_dss_device *dssdev,
struct omap_dss_device *dst)
{
- struct omap_overlay_manager *mgr;
+ enum omap_channel channel = dssdev->dispc_channel;
int r;
r = venc_init_regulator();
if (r)
return r;
- mgr = omap_dss_get_overlay_manager(dssdev->dispc_channel);
- if (!mgr)
- return -ENODEV;
-
- r = dss_mgr_connect(mgr, dssdev);
+ r = dss_mgr_connect(channel, dssdev);
if (r)
return r;
@@ -761,7 +757,7 @@ static int venc_connect(struct omap_dss_device *dssdev,
if (r) {
DSSERR("failed to connect output to new device: %s\n",
dst->name);
- dss_mgr_disconnect(mgr, dssdev);
+ dss_mgr_disconnect(channel, dssdev);
return r;
}
@@ -771,6 +767,8 @@ static int venc_connect(struct omap_dss_device *dssdev,
static void venc_disconnect(struct omap_dss_device *dssdev,
struct omap_dss_device *dst)
{
+ enum omap_channel channel = dssdev->dispc_channel;
+
WARN_ON(dst != dssdev->dst);
if (dst != dssdev->dst)
@@ -778,8 +776,7 @@ static void venc_disconnect(struct omap_dss_device *dssdev,
omapdss_output_unset_device(dssdev);
- if (dssdev->manager)
- dss_mgr_disconnect(dssdev->manager, dssdev);
+ dss_mgr_disconnect(channel, dssdev);
}
static const struct omapdss_atv_ops venc_ops = {
diff --git a/drivers/gpu/drm/omapdrm/omap_connector.c b/drivers/gpu/drm/omapdrm/omap_connector.c
index 83f2a9177c14..ce2d67b6a8c7 100644
--- a/drivers/gpu/drm/omapdrm/omap_connector.c
+++ b/drivers/gpu/drm/omapdrm/omap_connector.c
@@ -63,6 +63,9 @@ void copy_timings_omap_to_drm(struct drm_display_mode *mode,
if (timings->interlace)
mode->flags |= DRM_MODE_FLAG_INTERLACE;
+ if (timings->double_pixel)
+ mode->flags |= DRM_MODE_FLAG_DBLCLK;
+
if (timings->hsync_level == OMAPDSS_SIG_ACTIVE_HIGH)
mode->flags |= DRM_MODE_FLAG_PHSYNC;
else
@@ -90,6 +93,7 @@ void copy_timings_drm_to_omap(struct omap_video_timings *timings,
timings->vbp = mode->vtotal - mode->vsync_end;
timings->interlace = !!(mode->flags & DRM_MODE_FLAG_INTERLACE);
+ timings->double_pixel = !!(mode->flags & DRM_MODE_FLAG_DBLCLK);
if (mode->flags & DRM_MODE_FLAG_PHSYNC)
timings->hsync_level = OMAPDSS_SIG_ACTIVE_HIGH;
diff --git a/drivers/gpu/drm/omapdrm/omap_crtc.c b/drivers/gpu/drm/omapdrm/omap_crtc.c
index 2ed0754ed19e..075f2bb44867 100644
--- a/drivers/gpu/drm/omapdrm/omap_crtc.c
+++ b/drivers/gpu/drm/omapdrm/omap_crtc.c
@@ -34,14 +34,6 @@ struct omap_crtc {
const char *name;
enum omap_channel channel;
- /*
- * Temporary: eventually this will go away, but it is needed
- * for now to keep the output's happy. (They only need
- * mgr->id.) Eventually this will be replaced w/ something
- * more common-panel-framework-y
- */
- struct omap_overlay_manager *mgr;
-
struct omap_video_timings timings;
struct omap_drm_irq vblank_irq;
@@ -80,9 +72,13 @@ int omap_crtc_wait_pending(struct drm_crtc *crtc)
{
struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
+ /*
+ * Timeout is set to a "sufficiently" high value, which should cover
+ * a single frame refresh even on slower displays.
+ */
return wait_event_timeout(omap_crtc->pending_wait,
!omap_crtc->pending,
- msecs_to_jiffies(50));
+ msecs_to_jiffies(250));
}
/* -----------------------------------------------------------------------------
@@ -100,31 +96,32 @@ int omap_crtc_wait_pending(struct drm_crtc *crtc)
/* ovl-mgr-id -> crtc */
static struct omap_crtc *omap_crtcs[8];
+static struct omap_dss_device *omap_crtc_output[8];
/* we can probably ignore these until we support command-mode panels: */
-static int omap_crtc_dss_connect(struct omap_overlay_manager *mgr,
+static int omap_crtc_dss_connect(enum omap_channel channel,
struct omap_dss_device *dst)
{
- if (mgr->output)
+ if (omap_crtc_output[channel])
return -EINVAL;
- if ((mgr->supported_outputs & dst->id) == 0)
+ if ((dispc_mgr_get_supported_outputs(channel) & dst->id) == 0)
return -EINVAL;
- dst->manager = mgr;
- mgr->output = dst;
+ omap_crtc_output[channel] = dst;
+ dst->dispc_channel_connected = true;
return 0;
}
-static void omap_crtc_dss_disconnect(struct omap_overlay_manager *mgr,
+static void omap_crtc_dss_disconnect(enum omap_channel channel,
struct omap_dss_device *dst)
{
- mgr->output->manager = NULL;
- mgr->output = NULL;
+ omap_crtc_output[channel] = NULL;
+ dst->dispc_channel_connected = false;
}
-static void omap_crtc_dss_start_update(struct omap_overlay_manager *mgr)
+static void omap_crtc_dss_start_update(enum omap_channel channel)
{
}
@@ -138,6 +135,11 @@ static void omap_crtc_set_enabled(struct drm_crtc *crtc, bool enable)
u32 framedone_irq, vsync_irq;
int ret;
+ if (omap_crtc_output[channel]->output_type == OMAP_DISPLAY_TYPE_HDMI) {
+ dispc_mgr_enable(channel, enable);
+ return;
+ }
+
if (dispc_mgr_is_enabled(channel) == enable)
return;
@@ -186,9 +188,9 @@ static void omap_crtc_set_enabled(struct drm_crtc *crtc, bool enable)
}
-static int omap_crtc_dss_enable(struct omap_overlay_manager *mgr)
+static int omap_crtc_dss_enable(enum omap_channel channel)
{
- struct omap_crtc *omap_crtc = omap_crtcs[mgr->id];
+ struct omap_crtc *omap_crtc = omap_crtcs[channel];
struct omap_overlay_manager_info info;
memset(&info, 0, sizeof(info));
@@ -205,38 +207,38 @@ static int omap_crtc_dss_enable(struct omap_overlay_manager *mgr)
return 0;
}
-static void omap_crtc_dss_disable(struct omap_overlay_manager *mgr)
+static void omap_crtc_dss_disable(enum omap_channel channel)
{
- struct omap_crtc *omap_crtc = omap_crtcs[mgr->id];
+ struct omap_crtc *omap_crtc = omap_crtcs[channel];
omap_crtc_set_enabled(&omap_crtc->base, false);
}
-static void omap_crtc_dss_set_timings(struct omap_overlay_manager *mgr,
+static void omap_crtc_dss_set_timings(enum omap_channel channel,
const struct omap_video_timings *timings)
{
- struct omap_crtc *omap_crtc = omap_crtcs[mgr->id];
+ struct omap_crtc *omap_crtc = omap_crtcs[channel];
DBG("%s", omap_crtc->name);
omap_crtc->timings = *timings;
}
-static void omap_crtc_dss_set_lcd_config(struct omap_overlay_manager *mgr,
+static void omap_crtc_dss_set_lcd_config(enum omap_channel channel,
const struct dss_lcd_mgr_config *config)
{
- struct omap_crtc *omap_crtc = omap_crtcs[mgr->id];
+ struct omap_crtc *omap_crtc = omap_crtcs[channel];
DBG("%s", omap_crtc->name);
dispc_mgr_set_lcd_config(omap_crtc->channel, config);
}
static int omap_crtc_dss_register_framedone(
- struct omap_overlay_manager *mgr,
+ enum omap_channel channel,
void (*handler)(void *), void *data)
{
return 0;
}
static void omap_crtc_dss_unregister_framedone(
- struct omap_overlay_manager *mgr,
+ enum omap_channel channel,
void (*handler)(void *), void *data)
{
}
@@ -269,18 +271,7 @@ static void omap_crtc_complete_page_flip(struct drm_crtc *crtc)
return;
spin_lock_irqsave(&dev->event_lock, flags);
-
- list_del(&event->base.link);
-
- /*
- * Queue the event for delivery if it's still linked to a file
- * handle, otherwise just destroy it.
- */
- if (event->base.file_priv)
- drm_crtc_send_vblank_event(crtc, event);
- else
- event->base.destroy(&event->base);
-
+ drm_crtc_send_vblank_event(crtc, event);
spin_unlock_irqrestore(&dev->event_lock, flags);
}
@@ -341,13 +332,6 @@ static void omap_crtc_destroy(struct drm_crtc *crtc)
kfree(omap_crtc);
}
-static bool omap_crtc_mode_fixup(struct drm_crtc *crtc,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- return true;
-}
-
static void omap_crtc_enable(struct drm_crtc *crtc)
{
struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
@@ -414,24 +398,40 @@ static void omap_crtc_atomic_flush(struct drm_crtc *crtc,
}
}
+static bool omap_crtc_is_plane_prop(struct drm_device *dev,
+ struct drm_property *property)
+{
+ struct omap_drm_private *priv = dev->dev_private;
+
+ return property == priv->zorder_prop ||
+ property == dev->mode_config.rotation_property;
+}
+
static int omap_crtc_atomic_set_property(struct drm_crtc *crtc,
struct drm_crtc_state *state,
struct drm_property *property,
uint64_t val)
{
- struct drm_plane_state *plane_state;
- struct drm_plane *plane = crtc->primary;
+ struct drm_device *dev = crtc->dev;
- /*
- * Delegate property set to the primary plane. Get the plane state and
- * set the property directly.
- */
+ if (omap_crtc_is_plane_prop(dev, property)) {
+ struct drm_plane_state *plane_state;
+ struct drm_plane *plane = crtc->primary;
- plane_state = drm_atomic_get_plane_state(state->state, plane);
- if (!plane_state)
- return -EINVAL;
+ /*
+ * Delegate property set to the primary plane. Get the plane
+ * state and set the property directly.
+ */
+
+ plane_state = drm_atomic_get_plane_state(state->state, plane);
+ if (IS_ERR(plane_state))
+ return PTR_ERR(plane_state);
+
+ return drm_atomic_plane_set_property(plane, plane_state,
+ property, val);
+ }
- return drm_atomic_plane_set_property(plane, plane_state, property, val);
+ return -EINVAL;
}
static int omap_crtc_atomic_get_property(struct drm_crtc *crtc,
@@ -439,14 +439,20 @@ static int omap_crtc_atomic_get_property(struct drm_crtc *crtc,
struct drm_property *property,
uint64_t *val)
{
- /*
- * Delegate property get to the primary plane. The
- * drm_atomic_plane_get_property() function isn't exported, but can be
- * called through drm_object_property_get_value() as that will call
- * drm_atomic_get_property() for atomic drivers.
- */
- return drm_object_property_get_value(&crtc->primary->base, property,
- val);
+ struct drm_device *dev = crtc->dev;
+
+ if (omap_crtc_is_plane_prop(dev, property)) {
+ /*
+ * Delegate property get to the primary plane. The
+ * drm_atomic_plane_get_property() function isn't exported, but
+ * can be called through drm_object_property_get_value() as that
+ * will call drm_atomic_get_property() for atomic drivers.
+ */
+ return drm_object_property_get_value(&crtc->primary->base,
+ property, val);
+ }
+
+ return -EINVAL;
}
static const struct drm_crtc_funcs omap_crtc_funcs = {
@@ -462,7 +468,6 @@ static const struct drm_crtc_funcs omap_crtc_funcs = {
};
static const struct drm_crtc_helper_funcs omap_crtc_helper_funcs = {
- .mode_fixup = omap_crtc_mode_fixup,
.mode_set_nofb = omap_crtc_mode_set_nofb,
.disable = omap_crtc_disable,
.enable = omap_crtc_enable,
@@ -520,9 +525,6 @@ struct drm_crtc *omap_crtc_init(struct drm_device *dev,
omap_crtc->error_irq.irq = omap_crtc_error_irq;
omap_irq_register(dev, &omap_crtc->error_irq);
- /* temporary: */
- omap_crtc->mgr = omap_dss_get_overlay_manager(channel);
-
ret = drm_crtc_init_with_planes(dev, crtc, plane, NULL,
&omap_crtc_funcs, NULL);
if (ret < 0) {
diff --git a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
index dfebdc4aa0f2..de275a5be1db 100644
--- a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
+++ b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
@@ -79,6 +79,16 @@ static const uint32_t reg[][4] = {
DMM_PAT_DESCR__2, DMM_PAT_DESCR__3},
};
+static u32 dmm_read(struct dmm *dmm, u32 reg)
+{
+ return readl(dmm->base + reg);
+}
+
+static void dmm_write(struct dmm *dmm, u32 val, u32 reg)
+{
+ writel(val, dmm->base + reg);
+}
+
/* simple allocator to grab next 16 byte aligned memory from txn */
static void *alloc_dma(struct dmm_txn *txn, size_t sz, dma_addr_t *pa)
{
@@ -108,7 +118,7 @@ static int wait_status(struct refill_engine *engine, uint32_t wait_mask)
i = DMM_FIXED_RETRY_COUNT;
while (true) {
- r = readl(dmm->base + reg[PAT_STATUS][engine->id]);
+ r = dmm_read(dmm, reg[PAT_STATUS][engine->id]);
err = r & DMM_PATSTATUS_ERR;
if (err)
return -EFAULT;
@@ -140,11 +150,11 @@ static void release_engine(struct refill_engine *engine)
static irqreturn_t omap_dmm_irq_handler(int irq, void *arg)
{
struct dmm *dmm = arg;
- uint32_t status = readl(dmm->base + DMM_PAT_IRQSTATUS);
+ uint32_t status = dmm_read(dmm, DMM_PAT_IRQSTATUS);
int i;
/* ack IRQ */
- writel(status, dmm->base + DMM_PAT_IRQSTATUS);
+ dmm_write(dmm, status, DMM_PAT_IRQSTATUS);
for (i = 0; i < dmm->num_engines; i++) {
if (status & DMM_IRQSTAT_LST) {
@@ -264,7 +274,7 @@ static int dmm_txn_commit(struct dmm_txn *txn, bool wait)
txn->last_pat->next_pa = 0;
/* write to PAT_DESCR to clear out any pending transaction */
- writel(0x0, dmm->base + reg[PAT_DESCR][engine->id]);
+ dmm_write(dmm, 0x0, reg[PAT_DESCR][engine->id]);
/* wait for engine ready: */
ret = wait_status(engine, DMM_PATSTATUS_READY);
@@ -280,8 +290,7 @@ static int dmm_txn_commit(struct dmm_txn *txn, bool wait)
smp_mb();
/* kick reload */
- writel(engine->refill_pa,
- dmm->base + reg[PAT_DESCR][engine->id]);
+ dmm_write(dmm, engine->refill_pa, reg[PAT_DESCR][engine->id]);
if (wait) {
if (!wait_for_completion_timeout(&engine->compl,
@@ -309,6 +318,21 @@ static int fill(struct tcm_area *area, struct page **pages,
struct tcm_area slice, area_s;
struct dmm_txn *txn;
+ /*
+ * FIXME
+ *
+ * Asynchronous fill does not work reliably, as the driver does not
+ * handle errors in the async code paths. The fill operation may
+ * silently fail, leading to leaking DMM engines, which may eventually
+ * lead to deadlock if we run out of DMM engines.
+ *
+ * For now, always set 'wait' so that we only use sync fills. Async
+ * fills should be fixed, or alternatively we could decide to only
+ * support sync fills and so the whole async code path could be removed.
+ */
+
+ wait = true;
+
txn = dmm_txn_init(omap_dmm, area->tcm);
if (IS_ERR_OR_NULL(txn))
return -ENOMEM;
@@ -573,10 +597,9 @@ static int omap_dmm_remove(struct platform_device *dev)
kfree(omap_dmm->engines);
if (omap_dmm->refill_va)
- dma_free_writecombine(omap_dmm->dev,
- REFILL_BUFFER_SIZE * omap_dmm->num_engines,
- omap_dmm->refill_va,
- omap_dmm->refill_pa);
+ dma_free_wc(omap_dmm->dev,
+ REFILL_BUFFER_SIZE * omap_dmm->num_engines,
+ omap_dmm->refill_va, omap_dmm->refill_pa);
if (omap_dmm->dummy_page)
__free_page(omap_dmm->dummy_page);
@@ -642,7 +665,7 @@ static int omap_dmm_probe(struct platform_device *dev)
omap_dmm->dev = &dev->dev;
- hwinfo = readl(omap_dmm->base + DMM_PAT_HWINFO);
+ hwinfo = dmm_read(omap_dmm, DMM_PAT_HWINFO);
omap_dmm->num_engines = (hwinfo >> 24) & 0x1F;
omap_dmm->num_lut = (hwinfo >> 16) & 0x1F;
omap_dmm->container_width = 256;
@@ -651,7 +674,7 @@ static int omap_dmm_probe(struct platform_device *dev)
atomic_set(&omap_dmm->engine_counter, omap_dmm->num_engines);
/* read out actual LUT width and height */
- pat_geom = readl(omap_dmm->base + DMM_PAT_GEOMETRY);
+ pat_geom = dmm_read(omap_dmm, DMM_PAT_GEOMETRY);
omap_dmm->lut_width = ((pat_geom >> 16) & 0xF) << 5;
omap_dmm->lut_height = ((pat_geom >> 24) & 0xF) << 5;
@@ -661,12 +684,12 @@ static int omap_dmm_probe(struct platform_device *dev)
omap_dmm->num_lut++;
/* initialize DMM registers */
- writel(0x88888888, omap_dmm->base + DMM_PAT_VIEW__0);
- writel(0x88888888, omap_dmm->base + DMM_PAT_VIEW__1);
- writel(0x80808080, omap_dmm->base + DMM_PAT_VIEW_MAP__0);
- writel(0x80000000, omap_dmm->base + DMM_PAT_VIEW_MAP_BASE);
- writel(0x88888888, omap_dmm->base + DMM_TILER_OR__0);
- writel(0x88888888, omap_dmm->base + DMM_TILER_OR__1);
+ dmm_write(omap_dmm, 0x88888888, DMM_PAT_VIEW__0);
+ dmm_write(omap_dmm, 0x88888888, DMM_PAT_VIEW__1);
+ dmm_write(omap_dmm, 0x80808080, DMM_PAT_VIEW_MAP__0);
+ dmm_write(omap_dmm, 0x80000000, DMM_PAT_VIEW_MAP_BASE);
+ dmm_write(omap_dmm, 0x88888888, DMM_TILER_OR__0);
+ dmm_write(omap_dmm, 0x88888888, DMM_TILER_OR__1);
ret = request_irq(omap_dmm->irq, omap_dmm_irq_handler, IRQF_SHARED,
"omap_dmm_irq_handler", omap_dmm);
@@ -684,7 +707,7 @@ static int omap_dmm_probe(struct platform_device *dev)
* buffers for accelerated pan/scroll) and FILL_DSC<n> which
* we just generally don't care about.
*/
- writel(0x7e7e7e7e, omap_dmm->base + DMM_PAT_IRQENABLE_SET);
+ dmm_write(omap_dmm, 0x7e7e7e7e, DMM_PAT_IRQENABLE_SET);
omap_dmm->dummy_page = alloc_page(GFP_KERNEL | __GFP_DMA32);
if (!omap_dmm->dummy_page) {
@@ -701,9 +724,9 @@ static int omap_dmm_probe(struct platform_device *dev)
omap_dmm->dummy_pa = page_to_phys(omap_dmm->dummy_page);
/* alloc refill memory */
- omap_dmm->refill_va = dma_alloc_writecombine(&dev->dev,
- REFILL_BUFFER_SIZE * omap_dmm->num_engines,
- &omap_dmm->refill_pa, GFP_KERNEL);
+ omap_dmm->refill_va = dma_alloc_wc(&dev->dev,
+ REFILL_BUFFER_SIZE * omap_dmm->num_engines,
+ &omap_dmm->refill_pa, GFP_KERNEL);
if (!omap_dmm->refill_va) {
dev_err(&dev->dev, "could not allocate refill memory\n");
goto fail;
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c
index dfafdb602ad2..80398a684cae 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.c
+++ b/drivers/gpu/drm/omapdrm/omap_drv.c
@@ -142,7 +142,6 @@ static int omap_atomic_commit(struct drm_device *dev,
{
struct omap_drm_private *priv = dev->dev_private;
struct omap_atomic_state_commit *commit;
- unsigned long flags;
unsigned int i;
int ret;
@@ -175,17 +174,6 @@ static int omap_atomic_commit(struct drm_device *dev,
priv->commit.pending |= commit->crtcs;
spin_unlock(&priv->commit.lock);
- /* Keep track of all CRTC events to unlink them in preclose(). */
- spin_lock_irqsave(&dev->event_lock, flags);
- for (i = 0; i < dev->mode_config.num_crtc; ++i) {
- struct drm_crtc_state *cstate = state->crtc_states[i];
-
- if (cstate && cstate->event)
- list_add_tail(&cstate->event->base.link,
- &priv->commit.events);
- }
- spin_unlock_irqrestore(&dev->event_lock, flags);
-
/* Swap the state, this is the point of no return. */
drm_atomic_helper_swap_state(dev, state);
@@ -352,7 +340,7 @@ static int omap_modeset_init(struct drm_device *dev)
struct drm_connector *connector;
struct drm_encoder *encoder;
enum omap_channel channel;
- struct omap_overlay_manager *mgr;
+ struct omap_dss_device *out;
if (!omapdss_device_is_connected(dssdev))
continue;
@@ -399,8 +387,10 @@ static int omap_modeset_init(struct drm_device *dev)
* not considered.
*/
- mgr = omapdss_find_mgr_from_display(dssdev);
- channel = mgr->id;
+ out = omapdss_find_output_from_display(dssdev);
+ channel = out->dispc_channel;
+ omap_dss_put_device(out);
+
/*
* if this channel hasn't already been taken by a previously
* allocated crtc, we create a new crtc for it
@@ -673,7 +663,6 @@ static int dev_load(struct drm_device *dev, unsigned long flags)
priv->wq = alloc_ordered_workqueue("omapdrm", 0);
init_waitqueue_head(&priv->commit.wait);
spin_lock_init(&priv->commit.lock);
- INIT_LIST_HEAD(&priv->commit.events);
spin_lock_init(&priv->list_lock);
INIT_LIST_HEAD(&priv->obj_list);
@@ -787,33 +776,6 @@ static void dev_lastclose(struct drm_device *dev)
}
}
-static void dev_preclose(struct drm_device *dev, struct drm_file *file)
-{
- struct omap_drm_private *priv = dev->dev_private;
- struct drm_pending_event *event;
- unsigned long flags;
-
- DBG("preclose: dev=%p", dev);
-
- /*
- * Unlink all pending CRTC events to make sure they won't be queued up
- * by a pending asynchronous commit.
- */
- spin_lock_irqsave(&dev->event_lock, flags);
- list_for_each_entry(event, &priv->commit.events, link) {
- if (event->file_priv == file) {
- file->event_space += event->event->length;
- event->file_priv = NULL;
- }
- }
- spin_unlock_irqrestore(&dev->event_lock, flags);
-}
-
-static void dev_postclose(struct drm_device *dev, struct drm_file *file)
-{
- DBG("postclose: dev=%p, file=%p", dev, file);
-}
-
static const struct vm_operations_struct omap_gem_vm_ops = {
.fault = omap_gem_fault,
.open = drm_gem_vm_open,
@@ -838,8 +800,6 @@ static struct drm_driver omap_drm_driver = {
.unload = dev_unload,
.open = dev_open,
.lastclose = dev_lastclose,
- .preclose = dev_preclose,
- .postclose = dev_postclose,
.set_busid = drm_platform_set_busid,
.get_vblank_counter = drm_vblank_no_hw_counter,
.enable_vblank = omap_irq_enable_vblank,
@@ -900,12 +860,52 @@ static int pdev_remove(struct platform_device *device)
}
#ifdef CONFIG_PM_SLEEP
+static int omap_drm_suspend_all_displays(void)
+{
+ struct omap_dss_device *dssdev = NULL;
+
+ for_each_dss_dev(dssdev) {
+ if (!dssdev->driver)
+ continue;
+
+ if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE) {
+ dssdev->driver->disable(dssdev);
+ dssdev->activate_after_resume = true;
+ } else {
+ dssdev->activate_after_resume = false;
+ }
+ }
+
+ return 0;
+}
+
+static int omap_drm_resume_all_displays(void)
+{
+ struct omap_dss_device *dssdev = NULL;
+
+ for_each_dss_dev(dssdev) {
+ if (!dssdev->driver)
+ continue;
+
+ if (dssdev->activate_after_resume) {
+ dssdev->driver->enable(dssdev);
+ dssdev->activate_after_resume = false;
+ }
+ }
+
+ return 0;
+}
+
static int omap_drm_suspend(struct device *dev)
{
struct drm_device *drm_dev = dev_get_drvdata(dev);
drm_kms_helper_poll_disable(drm_dev);
+ drm_modeset_lock_all(drm_dev);
+ omap_drm_suspend_all_displays();
+ drm_modeset_unlock_all(drm_dev);
+
return 0;
}
@@ -913,6 +913,10 @@ static int omap_drm_resume(struct device *dev)
{
struct drm_device *drm_dev = dev_get_drvdata(dev);
+ drm_modeset_lock_all(drm_dev);
+ omap_drm_resume_all_displays();
+ drm_modeset_unlock_all(drm_dev);
+
drm_kms_helper_poll_enable(drm_dev);
return omap_gem_resume(dev);
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.h b/drivers/gpu/drm/omapdrm/omap_drv.h
index 9e0030731c37..0fbe17d0ec6f 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.h
+++ b/drivers/gpu/drm/omapdrm/omap_drv.h
@@ -31,6 +31,8 @@
#include <drm/drm_gem.h>
#include <drm/omap_drm.h>
+#include "dss/omapdss.h"
+
#define DBG(fmt, ...) DRM_DEBUG(fmt"\n", ##__VA_ARGS__)
#define VERB(fmt, ...) if (0) DRM_DEBUG(fmt, ##__VA_ARGS__) /* verbose debug */
@@ -106,7 +108,6 @@ struct omap_drm_private {
/* atomic commit */
struct {
- struct list_head events;
wait_queue_head_t wait;
u32 pending;
spinlock_t lock; /* Protects commit.pending */
@@ -189,12 +190,15 @@ void omap_framebuffer_update_scanout(struct drm_framebuffer *fb,
struct omap_drm_window *win, struct omap_overlay_info *info);
struct drm_connector *omap_framebuffer_get_next_connector(
struct drm_framebuffer *fb, struct drm_connector *from);
+bool omap_framebuffer_supports_rotation(struct drm_framebuffer *fb);
void omap_gem_init(struct drm_device *dev);
void omap_gem_deinit(struct drm_device *dev);
struct drm_gem_object *omap_gem_new(struct drm_device *dev,
union omap_gem_size gsize, uint32_t flags);
+struct drm_gem_object *omap_gem_new_dmabuf(struct drm_device *dev, size_t size,
+ struct sg_table *sgt);
int omap_gem_new_handle(struct drm_device *dev, struct drm_file *file,
union omap_gem_size gsize, uint32_t flags, uint32_t *handle);
void omap_gem_free_object(struct drm_gem_object *obj);
diff --git a/drivers/gpu/drm/omapdrm/omap_encoder.c b/drivers/gpu/drm/omapdrm/omap_encoder.c
index 61714e9670ae..0bbb9c59622e 100644
--- a/drivers/gpu/drm/omapdrm/omap_encoder.c
+++ b/drivers/gpu/drm/omapdrm/omap_encoder.c
@@ -139,11 +139,16 @@ static void omap_encoder_enable(struct drm_encoder *encoder)
struct omap_encoder *omap_encoder = to_omap_encoder(encoder);
struct omap_dss_device *dssdev = omap_encoder->dssdev;
struct omap_dss_driver *dssdrv = dssdev->driver;
+ int r;
omap_encoder_update(encoder, omap_crtc_channel(encoder->crtc),
omap_crtc_timings(encoder->crtc));
- dssdrv->enable(dssdev);
+ r = dssdrv->enable(dssdev);
+ if (r)
+ dev_err(encoder->dev->dev,
+ "Failed to enable display '%s': %d\n",
+ dssdev->name, r);
}
static int omap_encoder_atomic_check(struct drm_encoder *encoder,
diff --git a/drivers/gpu/drm/omapdrm/omap_fb.c b/drivers/gpu/drm/omapdrm/omap_fb.c
index ad202dfc1a49..610962396eb0 100644
--- a/drivers/gpu/drm/omapdrm/omap_fb.c
+++ b/drivers/gpu/drm/omapdrm/omap_fb.c
@@ -145,6 +145,14 @@ static uint32_t get_linear_addr(struct plane *plane,
return plane->paddr + offset;
}
+bool omap_framebuffer_supports_rotation(struct drm_framebuffer *fb)
+{
+ struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb);
+ struct plane *plane = &omap_fb->planes[0];
+
+ return omap_gem_flags(plane->bo) & OMAP_BO_TILED;
+}
+
/* update ovl info for scanout, handles cases of multi-planar fb's, etc.
*/
void omap_framebuffer_update_scanout(struct drm_framebuffer *fb,
@@ -449,6 +457,14 @@ struct drm_framebuffer *omap_framebuffer_init(struct drm_device *dev,
goto fail;
}
+ if (i > 0 && pitch != mode_cmd->pitches[i - 1]) {
+ dev_err(dev->dev,
+ "pitches are not the same between framebuffer planes %d != %d\n",
+ pitch, mode_cmd->pitches[i - 1]);
+ ret = -EINVAL;
+ goto fail;
+ }
+
plane->bo = bos[i];
plane->offset = mode_cmd->offsets[i];
plane->pitch = pitch;
diff --git a/drivers/gpu/drm/omapdrm/omap_gem.c b/drivers/gpu/drm/omapdrm/omap_gem.c
index 8495a1a4b617..907154f5b67c 100644
--- a/drivers/gpu/drm/omapdrm/omap_gem.c
+++ b/drivers/gpu/drm/omapdrm/omap_gem.c
@@ -31,9 +31,9 @@
*/
/* note: we use upper 8 bits of flags for driver-internal flags: */
-#define OMAP_BO_DMA 0x01000000 /* actually is physically contiguous */
-#define OMAP_BO_EXT_SYNC 0x02000000 /* externally allocated sync object */
-#define OMAP_BO_EXT_MEM 0x04000000 /* externally allocated memory */
+#define OMAP_BO_MEM_DMA_API 0x01000000 /* memory allocated with the dma_alloc_* API */
+#define OMAP_BO_MEM_SHMEM 0x02000000 /* memory allocated through shmem backing */
+#define OMAP_BO_MEM_DMABUF 0x08000000 /* memory imported from a dmabuf */
struct omap_gem_object {
struct drm_gem_object base;
@@ -49,17 +49,25 @@ struct omap_gem_object {
uint32_t roll;
/**
- * If buffer is allocated physically contiguous, the OMAP_BO_DMA flag
- * is set and the paddr is valid. Also if the buffer is remapped in
- * TILER and paddr_cnt > 0, then paddr is valid. But if you are using
- * the physical address and OMAP_BO_DMA is not set, then you should
- * be going thru omap_gem_{get,put}_paddr() to ensure the mapping is
- * not removed from under your feet.
+ * paddr contains the buffer DMA address. It is valid for
*
- * Note that OMAP_BO_SCANOUT is a hint from userspace that DMA capable
- * buffer is requested, but doesn't mean that it is. Use the
- * OMAP_BO_DMA flag to determine if the buffer has a DMA capable
- * physical address.
+ * - buffers allocated through the DMA mapping API (with the
+ * OMAP_BO_MEM_DMA_API flag set)
+ *
+ * - buffers imported from dmabuf (with the OMAP_BO_MEM_DMABUF flag set)
+ * if they are physically contiguous (when sgt->orig_nents == 1)
+ *
+ * - buffers mapped through the TILER when paddr_cnt is not zero, in
+ * which case the DMA address points to the TILER aperture
+ *
+ * Physically contiguous buffers have their DMA address equal to the
+ * physical address as we don't remap those buffers through the TILER.
+ *
+ * Buffers mapped to the TILER have their DMA address pointing to the
+ * TILER aperture. As TILER mappings are refcounted (through paddr_cnt)
+ * the DMA address must be accessed through omap_get_get_paddr() to
+ * ensure that the mapping won't disappear unexpectedly. References must
+ * be released with omap_gem_put_paddr().
*/
dma_addr_t paddr;
@@ -69,6 +77,12 @@ struct omap_gem_object {
uint32_t paddr_cnt;
/**
+ * If the buffer has been imported from a dmabuf the OMAP_DB_DMABUF flag
+ * is set and the sgt field is valid.
+ */
+ struct sg_table *sgt;
+
+ /**
* tiler block used when buffer is remapped in DMM/TILER.
*/
struct tiler_block *block;
@@ -91,17 +105,7 @@ struct omap_gem_object {
* sync-object allocated on demand (if needed)
*
* Per-buffer sync-object for tracking pending and completed hw/dma
- * read and write operations. The layout in memory is dictated by
- * the SGX firmware, which uses this information to stall the command
- * stream if a surface is not ready yet.
- *
- * Note that when buffer is used by SGX, the sync-object needs to be
- * allocated from a special heap of sync-objects. This way many sync
- * objects can be packed in a page, and not waste GPU virtual address
- * space. Because of this we have to have a omap_gem_set_sync_object()
- * API to allow replacement of the syncobj after it has (potentially)
- * already been allocated. A bit ugly but I haven't thought of a
- * better alternative.
+ * read and write operations.
*/
struct {
uint32_t write_pending;
@@ -166,16 +170,15 @@ static uint64_t mmap_offset(struct drm_gem_object *obj)
return drm_vma_node_offset_addr(&obj->vma_node);
}
-/* GEM objects can either be allocated from contiguous memory (in which
- * case obj->filp==NULL), or w/ shmem backing (obj->filp!=NULL). But non
- * contiguous buffers can be remapped in TILER/DMM if they need to be
- * contiguous... but we don't do this all the time to reduce pressure
- * on TILER/DMM space when we know at allocation time that the buffer
- * will need to be scanned out.
- */
-static inline bool is_shmem(struct drm_gem_object *obj)
+static bool is_contiguous(struct omap_gem_object *omap_obj)
{
- return obj->filp != NULL;
+ if (omap_obj->flags & OMAP_BO_MEM_DMA_API)
+ return true;
+
+ if ((omap_obj->flags & OMAP_BO_MEM_DMABUF) && omap_obj->sgt->nents == 1)
+ return true;
+
+ return false;
}
/* -----------------------------------------------------------------------------
@@ -264,6 +267,19 @@ static int omap_gem_attach_pages(struct drm_gem_object *obj)
for (i = 0; i < npages; i++) {
addrs[i] = dma_map_page(dev->dev, pages[i],
0, PAGE_SIZE, DMA_BIDIRECTIONAL);
+
+ if (dma_mapping_error(dev->dev, addrs[i])) {
+ dev_warn(dev->dev,
+ "%s: failed to map page\n", __func__);
+
+ for (i = i - 1; i >= 0; --i) {
+ dma_unmap_page(dev->dev, addrs[i],
+ PAGE_SIZE, DMA_BIDIRECTIONAL);
+ }
+
+ ret = -ENOMEM;
+ goto free_addrs;
+ }
}
} else {
addrs = kzalloc(npages * sizeof(*addrs), GFP_KERNEL);
@@ -278,6 +294,8 @@ static int omap_gem_attach_pages(struct drm_gem_object *obj)
return 0;
+free_addrs:
+ kfree(addrs);
free_pages:
drm_gem_put_pages(obj, pages, true, false);
@@ -292,7 +310,7 @@ static int get_pages(struct drm_gem_object *obj, struct page ***pages)
struct omap_gem_object *omap_obj = to_omap_bo(obj);
int ret = 0;
- if (is_shmem(obj) && !omap_obj->pages) {
+ if ((omap_obj->flags & OMAP_BO_MEM_SHMEM) && !omap_obj->pages) {
ret = omap_gem_attach_pages(obj);
if (ret) {
dev_err(obj->dev->dev, "could not attach pages\n");
@@ -396,7 +414,7 @@ static int fault_1d(struct drm_gem_object *obj,
omap_gem_cpu_sync(obj, pgoff);
pfn = page_to_pfn(omap_obj->pages[pgoff]);
} else {
- BUG_ON(!(omap_obj->flags & OMAP_BO_DMA));
+ BUG_ON(!is_contiguous(omap_obj));
pfn = (omap_obj->paddr >> PAGE_SHIFT) + pgoff;
}
@@ -560,6 +578,11 @@ fail:
case 0:
case -ERESTARTSYS:
case -EINTR:
+ case -EBUSY:
+ /*
+ * EBUSY is ok: this just means that another thread
+ * already did the job.
+ */
return VM_FAULT_NOPAGE;
case -ENOMEM:
return VM_FAULT_OOM;
@@ -728,7 +751,8 @@ fail:
static inline bool is_cached_coherent(struct drm_gem_object *obj)
{
struct omap_gem_object *omap_obj = to_omap_bo(obj);
- return is_shmem(obj) &&
+
+ return (omap_obj->flags & OMAP_BO_MEM_SHMEM) &&
((omap_obj->flags & OMAP_BO_CACHE_MASK) == OMAP_BO_CACHED);
}
@@ -761,9 +785,20 @@ void omap_gem_dma_sync(struct drm_gem_object *obj,
for (i = 0; i < npages; i++) {
if (!omap_obj->addrs[i]) {
- omap_obj->addrs[i] = dma_map_page(dev->dev, pages[i], 0,
+ dma_addr_t addr;
+
+ addr = dma_map_page(dev->dev, pages[i], 0,
PAGE_SIZE, DMA_BIDIRECTIONAL);
+
+ if (dma_mapping_error(dev->dev, addr)) {
+ dev_warn(dev->dev,
+ "%s: failed to map page\n",
+ __func__);
+ break;
+ }
+
dirty = true;
+ omap_obj->addrs[i] = addr;
}
}
@@ -787,7 +822,7 @@ int omap_gem_get_paddr(struct drm_gem_object *obj,
mutex_lock(&obj->dev->struct_mutex);
- if (remap && is_shmem(obj) && priv->has_dmm) {
+ if (!is_contiguous(omap_obj) && remap && priv->has_dmm) {
if (omap_obj->paddr_cnt == 0) {
struct page **pages;
uint32_t npages = obj->size >> PAGE_SHIFT;
@@ -834,7 +869,7 @@ int omap_gem_get_paddr(struct drm_gem_object *obj,
omap_obj->paddr_cnt++;
*paddr = omap_obj->paddr;
- } else if (omap_obj->flags & OMAP_BO_DMA) {
+ } else if (is_contiguous(omap_obj)) {
*paddr = omap_obj->paddr;
} else {
ret = -EINVAL;
@@ -1138,20 +1173,6 @@ unlock:
return ret;
}
-/* it is a bit lame to handle updates in this sort of polling way, but
- * in case of PVR, the GPU can directly update read/write complete
- * values, and not really tell us which ones it updated.. this also
- * means that sync_lock is not quite sufficient. So we'll need to
- * do something a bit better when it comes time to add support for
- * separate 2d hw..
- */
-void omap_gem_op_update(void)
-{
- spin_lock(&sync_lock);
- sync_op_update();
- spin_unlock(&sync_lock);
-}
-
/* mark the start of read and/or write operation */
int omap_gem_op_start(struct drm_gem_object *obj, enum omap_gem_op op)
{
@@ -1219,7 +1240,7 @@ int omap_gem_op_sync(struct drm_gem_object *obj, enum omap_gem_op op)
* is currently blocked.. fxn() can be called from any context
*
* (TODO for now fxn is called back from whichever context calls
- * omap_gem_op_update().. but this could be better defined later
+ * omap_gem_op_finish().. but this could be better defined later
* if needed)
*
* TODO more code in common w/ _sync()..
@@ -1261,50 +1282,10 @@ int omap_gem_op_async(struct drm_gem_object *obj, enum omap_gem_op op,
return 0;
}
-/* special API so PVR can update the buffer to use a sync-object allocated
- * from it's sync-obj heap. Only used for a newly allocated (from PVR's
- * perspective) sync-object, so we overwrite the new syncobj w/ values
- * from the already allocated syncobj (if there is one)
- */
-int omap_gem_set_sync_object(struct drm_gem_object *obj, void *syncobj)
-{
- struct omap_gem_object *omap_obj = to_omap_bo(obj);
- int ret = 0;
-
- spin_lock(&sync_lock);
-
- if ((omap_obj->flags & OMAP_BO_EXT_SYNC) && !syncobj) {
- /* clearing a previously set syncobj */
- syncobj = kmemdup(omap_obj->sync, sizeof(*omap_obj->sync),
- GFP_ATOMIC);
- if (!syncobj) {
- ret = -ENOMEM;
- goto unlock;
- }
- omap_obj->flags &= ~OMAP_BO_EXT_SYNC;
- omap_obj->sync = syncobj;
- } else if (syncobj && !(omap_obj->flags & OMAP_BO_EXT_SYNC)) {
- /* replacing an existing syncobj */
- if (omap_obj->sync) {
- memcpy(syncobj, omap_obj->sync, sizeof(*omap_obj->sync));
- kfree(omap_obj->sync);
- }
- omap_obj->flags |= OMAP_BO_EXT_SYNC;
- omap_obj->sync = syncobj;
- }
-
-unlock:
- spin_unlock(&sync_lock);
- return ret;
-}
-
/* -----------------------------------------------------------------------------
* Constructor & Destructor
*/
-/* don't call directly.. called from GEM core when it is time to actually
- * free the object..
- */
void omap_gem_free_object(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
@@ -1324,22 +1305,23 @@ void omap_gem_free_object(struct drm_gem_object *obj)
*/
WARN_ON(omap_obj->paddr_cnt > 0);
- /* don't free externally allocated backing memory */
- if (!(omap_obj->flags & OMAP_BO_EXT_MEM)) {
- if (omap_obj->pages)
+ if (omap_obj->pages) {
+ if (omap_obj->flags & OMAP_BO_MEM_DMABUF)
+ kfree(omap_obj->pages);
+ else
omap_gem_detach_pages(obj);
+ }
- if (!is_shmem(obj)) {
- dma_free_writecombine(dev->dev, obj->size,
- omap_obj->vaddr, omap_obj->paddr);
- } else if (omap_obj->vaddr) {
- vunmap(omap_obj->vaddr);
- }
+ if (omap_obj->flags & OMAP_BO_MEM_DMA_API) {
+ dma_free_wc(dev->dev, obj->size, omap_obj->vaddr,
+ omap_obj->paddr);
+ } else if (omap_obj->vaddr) {
+ vunmap(omap_obj->vaddr);
+ } else if (obj->import_attach) {
+ drm_prime_gem_destroy(obj, omap_obj->sgt);
}
- /* don't free externally allocated syncobj */
- if (!(omap_obj->flags & OMAP_BO_EXT_SYNC))
- kfree(omap_obj->sync);
+ kfree(omap_obj->sync);
drm_gem_object_release(obj);
@@ -1357,84 +1339,160 @@ struct drm_gem_object *omap_gem_new(struct drm_device *dev,
size_t size;
int ret;
+ /* Validate the flags and compute the memory and cache flags. */
if (flags & OMAP_BO_TILED) {
if (!priv->usergart) {
dev_err(dev->dev, "Tiled buffers require DMM\n");
return NULL;
}
- /* tiled buffers are always shmem paged backed.. when they are
- * scanned out, they are remapped into DMM/TILER
+ /*
+ * Tiled buffers are always shmem paged backed. When they are
+ * scanned out, they are remapped into DMM/TILER.
*/
flags &= ~OMAP_BO_SCANOUT;
+ flags |= OMAP_BO_MEM_SHMEM;
- /* currently don't allow cached buffers.. there is some caching
- * stuff that needs to be handled better
+ /*
+ * Currently don't allow cached buffers. There is some caching
+ * stuff that needs to be handled better.
*/
flags &= ~(OMAP_BO_CACHED|OMAP_BO_WC|OMAP_BO_UNCACHED);
flags |= tiler_get_cpu_cache_flags();
-
- /* align dimensions to slot boundaries... */
- tiler_align(gem2fmt(flags),
- &gsize.tiled.width, &gsize.tiled.height);
-
- /* ...and calculate size based on aligned dimensions */
- size = tiler_size(gem2fmt(flags),
- gsize.tiled.width, gsize.tiled.height);
- } else {
- size = PAGE_ALIGN(gsize.bytes);
+ } else if ((flags & OMAP_BO_SCANOUT) && !priv->has_dmm) {
+ /*
+ * OMAP_BO_SCANOUT hints that the buffer doesn't need to be
+ * tiled. However, to lower the pressure on memory allocation,
+ * use contiguous memory only if no TILER is available.
+ */
+ flags |= OMAP_BO_MEM_DMA_API;
+ } else if (!(flags & OMAP_BO_MEM_DMABUF)) {
+ /*
+ * All other buffers not backed by dma_buf are shmem-backed.
+ */
+ flags |= OMAP_BO_MEM_SHMEM;
}
+ /* Allocate the initialize the OMAP GEM object. */
omap_obj = kzalloc(sizeof(*omap_obj), GFP_KERNEL);
if (!omap_obj)
return NULL;
obj = &omap_obj->base;
+ omap_obj->flags = flags;
- if ((flags & OMAP_BO_SCANOUT) && !priv->has_dmm) {
- /* attempt to allocate contiguous memory if we don't
- * have DMM for remappign discontiguous buffers
+ if (flags & OMAP_BO_TILED) {
+ /*
+ * For tiled buffers align dimensions to slot boundaries and
+ * calculate size based on aligned dimensions.
*/
- omap_obj->vaddr = dma_alloc_writecombine(dev->dev, size,
- &omap_obj->paddr, GFP_KERNEL);
- if (!omap_obj->vaddr) {
- kfree(omap_obj);
+ tiler_align(gem2fmt(flags), &gsize.tiled.width,
+ &gsize.tiled.height);
- return NULL;
- }
-
- flags |= OMAP_BO_DMA;
- }
+ size = tiler_size(gem2fmt(flags), gsize.tiled.width,
+ gsize.tiled.height);
- spin_lock(&priv->list_lock);
- list_add(&omap_obj->mm_list, &priv->obj_list);
- spin_unlock(&priv->list_lock);
-
- omap_obj->flags = flags;
-
- if (flags & OMAP_BO_TILED) {
omap_obj->width = gsize.tiled.width;
omap_obj->height = gsize.tiled.height;
+ } else {
+ size = PAGE_ALIGN(gsize.bytes);
}
- if (flags & (OMAP_BO_DMA|OMAP_BO_EXT_MEM)) {
+ /* Initialize the GEM object. */
+ if (!(flags & OMAP_BO_MEM_SHMEM)) {
drm_gem_private_object_init(dev, obj, size);
} else {
ret = drm_gem_object_init(dev, obj, size);
if (ret)
- goto fail;
+ goto err_free;
mapping = file_inode(obj->filp)->i_mapping;
mapping_set_gfp_mask(mapping, GFP_USER | __GFP_DMA32);
}
+ /* Allocate memory if needed. */
+ if (flags & OMAP_BO_MEM_DMA_API) {
+ omap_obj->vaddr = dma_alloc_wc(dev->dev, size,
+ &omap_obj->paddr,
+ GFP_KERNEL);
+ if (!omap_obj->vaddr)
+ goto err_release;
+ }
+
+ spin_lock(&priv->list_lock);
+ list_add(&omap_obj->mm_list, &priv->obj_list);
+ spin_unlock(&priv->list_lock);
+
return obj;
-fail:
- omap_gem_free_object(obj);
+err_release:
+ drm_gem_object_release(obj);
+err_free:
+ kfree(omap_obj);
return NULL;
}
+struct drm_gem_object *omap_gem_new_dmabuf(struct drm_device *dev, size_t size,
+ struct sg_table *sgt)
+{
+ struct omap_drm_private *priv = dev->dev_private;
+ struct omap_gem_object *omap_obj;
+ struct drm_gem_object *obj;
+ union omap_gem_size gsize;
+
+ /* Without a DMM only physically contiguous buffers can be supported. */
+ if (sgt->orig_nents != 1 && !priv->has_dmm)
+ return ERR_PTR(-EINVAL);
+
+ mutex_lock(&dev->struct_mutex);
+
+ gsize.bytes = PAGE_ALIGN(size);
+ obj = omap_gem_new(dev, gsize, OMAP_BO_MEM_DMABUF | OMAP_BO_WC);
+ if (!obj) {
+ obj = ERR_PTR(-ENOMEM);
+ goto done;
+ }
+
+ omap_obj = to_omap_bo(obj);
+ omap_obj->sgt = sgt;
+
+ if (sgt->orig_nents == 1) {
+ omap_obj->paddr = sg_dma_address(sgt->sgl);
+ } else {
+ /* Create pages list from sgt */
+ struct sg_page_iter iter;
+ struct page **pages;
+ unsigned int npages;
+ unsigned int i = 0;
+
+ npages = DIV_ROUND_UP(size, PAGE_SIZE);
+ pages = kcalloc(npages, sizeof(*pages), GFP_KERNEL);
+ if (!pages) {
+ omap_gem_free_object(obj);
+ obj = ERR_PTR(-ENOMEM);
+ goto done;
+ }
+
+ omap_obj->pages = pages;
+
+ for_each_sg_page(sgt->sgl, &iter, sgt->orig_nents, 0) {
+ pages[i++] = sg_page_iter_page(&iter);
+ if (i > npages)
+ break;
+ }
+
+ if (WARN_ON(i != npages)) {
+ omap_gem_free_object(obj);
+ obj = ERR_PTR(-ENOMEM);
+ goto done;
+ }
+ }
+
+done:
+ mutex_unlock(&dev->struct_mutex);
+ return obj;
+}
+
/* convenience method to construct a GEM buffer object, and userspace handle */
int omap_gem_new_handle(struct drm_device *dev, struct drm_file *file,
union omap_gem_size gsize, uint32_t flags, uint32_t *handle)
diff --git a/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c b/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
index 27c297672076..af267c35d813 100644
--- a/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
+++ b/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
@@ -21,6 +21,10 @@
#include "omap_drv.h"
+/* -----------------------------------------------------------------------------
+ * DMABUF Export
+ */
+
static struct sg_table *omap_gem_map_dma_buf(
struct dma_buf_attachment *attachment,
enum dma_data_direction dir)
@@ -79,7 +83,7 @@ static void omap_gem_dmabuf_release(struct dma_buf *buffer)
static int omap_gem_dmabuf_begin_cpu_access(struct dma_buf *buffer,
- size_t start, size_t len, enum dma_data_direction dir)
+ enum dma_data_direction dir)
{
struct drm_gem_object *obj = buffer->priv;
struct page **pages;
@@ -93,11 +97,12 @@ static int omap_gem_dmabuf_begin_cpu_access(struct dma_buf *buffer,
return omap_gem_get_pages(obj, &pages, true);
}
-static void omap_gem_dmabuf_end_cpu_access(struct dma_buf *buffer,
- size_t start, size_t len, enum dma_data_direction dir)
+static int omap_gem_dmabuf_end_cpu_access(struct dma_buf *buffer,
+ enum dma_data_direction dir)
{
struct drm_gem_object *obj = buffer->priv;
omap_gem_put_pages(obj);
+ return 0;
}
@@ -178,15 +183,20 @@ struct dma_buf *omap_gem_prime_export(struct drm_device *dev,
return dma_buf_export(&exp_info);
}
+/* -----------------------------------------------------------------------------
+ * DMABUF Import
+ */
+
struct drm_gem_object *omap_gem_prime_import(struct drm_device *dev,
- struct dma_buf *buffer)
+ struct dma_buf *dma_buf)
{
+ struct dma_buf_attachment *attach;
struct drm_gem_object *obj;
+ struct sg_table *sgt;
+ int ret;
- /* is this one of own objects? */
- if (buffer->ops == &omap_dmabuf_ops) {
- obj = buffer->priv;
- /* is it from our device? */
+ if (dma_buf->ops == &omap_dmabuf_ops) {
+ obj = dma_buf->priv;
if (obj->dev == dev) {
/*
* Importing dmabuf exported from out own gem increases
@@ -197,9 +207,33 @@ struct drm_gem_object *omap_gem_prime_import(struct drm_device *dev,
}
}
- /*
- * TODO add support for importing buffers from other devices..
- * for now we don't need this but would be nice to add eventually
- */
- return ERR_PTR(-EINVAL);
+ attach = dma_buf_attach(dma_buf, dev->dev);
+ if (IS_ERR(attach))
+ return ERR_CAST(attach);
+
+ get_dma_buf(dma_buf);
+
+ sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
+ if (IS_ERR(sgt)) {
+ ret = PTR_ERR(sgt);
+ goto fail_detach;
+ }
+
+ obj = omap_gem_new_dmabuf(dev, dma_buf->size, sgt);
+ if (IS_ERR(obj)) {
+ ret = PTR_ERR(obj);
+ goto fail_unmap;
+ }
+
+ obj->import_attach = attach;
+
+ return obj;
+
+fail_unmap:
+ dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
+fail_detach:
+ dma_buf_detach(dma_buf, attach);
+ dma_buf_put(dma_buf);
+
+ return ERR_PTR(ret);
}
diff --git a/drivers/gpu/drm/omapdrm/omap_plane.c b/drivers/gpu/drm/omapdrm/omap_plane.c
index d75b197eff46..93ee538a99f5 100644
--- a/drivers/gpu/drm/omapdrm/omap_plane.c
+++ b/drivers/gpu/drm/omapdrm/omap_plane.c
@@ -177,6 +177,12 @@ static int omap_plane_atomic_check(struct drm_plane *plane,
if (state->crtc_y + state->crtc_h > crtc_state->adjusted_mode.vdisplay)
return -EINVAL;
+ if (state->fb) {
+ if (state->rotation != BIT(DRM_ROTATE_0) &&
+ !omap_framebuffer_supports_rotation(state->fb))
+ return -EINVAL;
+ }
+
return 0;
}
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
index f88a631c43ab..ceb20486dacf 100644
--- a/drivers/gpu/drm/panel/panel-simple.c
+++ b/drivers/gpu/drm/panel/panel-simple.c
@@ -847,6 +847,7 @@ static const struct drm_display_mode innolux_g121x1_l03_mode = {
.vsync_end = 768 + 38 + 1,
.vtotal = 768 + 38 + 1 + 0,
.vrefresh = 60,
+ .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
};
static const struct panel_desc innolux_g121x1_l03 = {
@@ -982,6 +983,29 @@ static const struct panel_desc lg_lb070wv8 = {
.bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
};
+static const struct drm_display_mode lg_lp120up1_mode = {
+ .clock = 162300,
+ .hdisplay = 1920,
+ .hsync_start = 1920 + 40,
+ .hsync_end = 1920 + 40 + 40,
+ .htotal = 1920 + 40 + 40+ 80,
+ .vdisplay = 1280,
+ .vsync_start = 1280 + 4,
+ .vsync_end = 1280 + 4 + 4,
+ .vtotal = 1280 + 4 + 4 + 12,
+ .vrefresh = 60,
+};
+
+static const struct panel_desc lg_lp120up1 = {
+ .modes = &lg_lp120up1_mode,
+ .num_modes = 1,
+ .bpc = 8,
+ .size = {
+ .width = 267,
+ .height = 183,
+ },
+};
+
static const struct drm_display_mode lg_lp129qe_mode = {
.clock = 285250,
.hdisplay = 2560,
@@ -1016,6 +1040,7 @@ static const struct drm_display_mode nec_nl4827hc19_05b_mode = {
.vsync_end = 272 + 2 + 4,
.vtotal = 272 + 2 + 4 + 2,
.vrefresh = 74,
+ .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
};
static const struct panel_desc nec_nl4827hc19_05b = {
@@ -1176,6 +1201,42 @@ static const struct panel_desc shelly_sca07010_bfn_lnn = {
.bus_format = MEDIA_BUS_FMT_RGB666_1X18,
};
+static const struct display_timing urt_umsh_8596md_timing = {
+ .pixelclock = { 33260000, 33260000, 33260000 },
+ .hactive = { 800, 800, 800 },
+ .hfront_porch = { 41, 41, 41 },
+ .hback_porch = { 216 - 128, 216 - 128, 216 - 128 },
+ .hsync_len = { 71, 128, 128 },
+ .vactive = { 480, 480, 480 },
+ .vfront_porch = { 10, 10, 10 },
+ .vback_porch = { 35 - 2, 35 - 2, 35 - 2 },
+ .vsync_len = { 2, 2, 2 },
+ .flags = DISPLAY_FLAGS_DE_HIGH | DISPLAY_FLAGS_PIXDATA_NEGEDGE |
+ DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW,
+};
+
+static const struct panel_desc urt_umsh_8596md_lvds = {
+ .timings = &urt_umsh_8596md_timing,
+ .num_timings = 1,
+ .bpc = 6,
+ .size = {
+ .width = 152,
+ .height = 91,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB666_1X7X3_SPWG,
+};
+
+static const struct panel_desc urt_umsh_8596md_parallel = {
+ .timings = &urt_umsh_8596md_timing,
+ .num_timings = 1,
+ .bpc = 6,
+ .size = {
+ .width = 152,
+ .height = 91,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB666_1X18,
+};
+
static const struct of_device_id platform_of_match[] = {
{
.compatible = "ampire,am800480r3tmqwa1h",
@@ -1256,6 +1317,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "lg,lb070wv8",
.data = &lg_lb070wv8,
}, {
+ .compatible = "lg,lp120up1",
+ .data = &lg_lp120up1,
+ }, {
.compatible = "lg,lp129qe",
.data = &lg_lp129qe,
}, {
@@ -1280,6 +1344,24 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "shelly,sca07010-bfn-lnn",
.data = &shelly_sca07010_bfn_lnn,
}, {
+ .compatible = "urt,umsh-8596md-t",
+ .data = &urt_umsh_8596md_parallel,
+ }, {
+ .compatible = "urt,umsh-8596md-1t",
+ .data = &urt_umsh_8596md_parallel,
+ }, {
+ .compatible = "urt,umsh-8596md-7t",
+ .data = &urt_umsh_8596md_parallel,
+ }, {
+ .compatible = "urt,umsh-8596md-11t",
+ .data = &urt_umsh_8596md_lvds,
+ }, {
+ .compatible = "urt,umsh-8596md-19t",
+ .data = &urt_umsh_8596md_lvds,
+ }, {
+ .compatible = "urt,umsh-8596md-20t",
+ .data = &urt_umsh_8596md_parallel,
+ }, {
/* sentinel */
}
};
diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
index 86276519b2ef..030409a3ee4e 100644
--- a/drivers/gpu/drm/qxl/qxl_display.c
+++ b/drivers/gpu/drm/qxl/qxl_display.c
@@ -375,10 +375,15 @@ static int qxl_crtc_cursor_set2(struct drm_crtc *crtc,
qxl_bo_kunmap(user_bo);
+ qcrtc->cur_x += qcrtc->hot_spot_x - hot_x;
+ qcrtc->cur_y += qcrtc->hot_spot_y - hot_y;
+ qcrtc->hot_spot_x = hot_x;
+ qcrtc->hot_spot_y = hot_y;
+
cmd = (struct qxl_cursor_cmd *)qxl_release_map(qdev, release);
cmd->type = QXL_CURSOR_SET;
- cmd->u.set.position.x = qcrtc->cur_x;
- cmd->u.set.position.y = qcrtc->cur_y;
+ cmd->u.set.position.x = qcrtc->cur_x + qcrtc->hot_spot_x;
+ cmd->u.set.position.y = qcrtc->cur_y + qcrtc->hot_spot_y;
cmd->u.set.shape = qxl_bo_physical_address(qdev, cursor_bo, 0);
@@ -441,8 +446,8 @@ static int qxl_crtc_cursor_move(struct drm_crtc *crtc,
cmd = (struct qxl_cursor_cmd *)qxl_release_map(qdev, release);
cmd->type = QXL_CURSOR_MOVE;
- cmd->u.position.x = qcrtc->cur_x;
- cmd->u.position.y = qcrtc->cur_y;
+ cmd->u.position.x = qcrtc->cur_x + qcrtc->hot_spot_x;
+ cmd->u.position.y = qcrtc->cur_y + qcrtc->hot_spot_y;
qxl_release_unmap(qdev, release, &cmd->release_info);
qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
@@ -734,14 +739,6 @@ static void qxl_enc_dpms(struct drm_encoder *encoder, int mode)
DRM_DEBUG("\n");
}
-static bool qxl_enc_mode_fixup(struct drm_encoder *encoder,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- DRM_DEBUG("\n");
- return true;
-}
-
static void qxl_enc_prepare(struct drm_encoder *encoder)
{
DRM_DEBUG("\n");
@@ -864,7 +861,6 @@ static struct drm_encoder *qxl_best_encoder(struct drm_connector *connector)
static const struct drm_encoder_helper_funcs qxl_enc_helper_funcs = {
.dpms = qxl_enc_dpms,
- .mode_fixup = qxl_enc_mode_fixup,
.prepare = qxl_enc_prepare,
.mode_set = qxl_enc_mode_set,
.commit = qxl_enc_commit,
diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
index 6e6b9b1519b8..3f3897eb458c 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.h
+++ b/drivers/gpu/drm/qxl/qxl_drv.h
@@ -135,6 +135,8 @@ struct qxl_crtc {
int index;
int cur_x;
int cur_y;
+ int hot_spot_x;
+ int hot_spot_y;
};
struct qxl_output {
diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c
index ec1593a6a561..f66c33dd21a3 100644
--- a/drivers/gpu/drm/radeon/atom.c
+++ b/drivers/gpu/drm/radeon/atom.c
@@ -66,9 +66,10 @@ int atom_debug = 0;
static int atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t * params);
int atom_execute_table(struct atom_context *ctx, int index, uint32_t * params);
-static uint32_t atom_arg_mask[8] =
- { 0xFFFFFFFF, 0xFFFF, 0xFFFF00, 0xFFFF0000, 0xFF, 0xFF00, 0xFF0000,
-0xFF000000 };
+static uint32_t atom_arg_mask[8] = {
+ 0xFFFFFFFF, 0x0000FFFF, 0x00FFFF00, 0xFFFF0000,
+ 0x000000FF, 0x0000FF00, 0x00FF0000, 0xFF000000
+};
static int atom_arg_shift[8] = { 0, 0, 8, 16, 0, 8, 16, 24 };
static int atom_dst_to_src[8][4] = {
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index 801dd60ac192..532127c55de6 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -275,13 +275,15 @@ void atombios_crtc_dpms(struct drm_crtc *crtc, int mode)
if (ASIC_IS_DCE3(rdev) && !ASIC_IS_DCE6(rdev))
atombios_enable_crtc_memreq(crtc, ATOM_ENABLE);
atombios_blank_crtc(crtc, ATOM_DISABLE);
- drm_vblank_post_modeset(dev, radeon_crtc->crtc_id);
+ if (dev->num_crtcs > radeon_crtc->crtc_id)
+ drm_vblank_on(dev, radeon_crtc->crtc_id);
radeon_crtc_load_lut(crtc);
break;
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
case DRM_MODE_DPMS_OFF:
- drm_vblank_pre_modeset(dev, radeon_crtc->crtc_id);
+ if (dev->num_crtcs > radeon_crtc->crtc_id)
+ drm_vblank_off(dev, radeon_crtc->crtc_id);
if (radeon_crtc->enabled)
atombios_blank_crtc(crtc, ATOM_ENABLE);
if (ASIC_IS_DCE3(rdev) && !ASIC_IS_DCE6(rdev))
@@ -1665,11 +1667,11 @@ int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y,
}
int atombios_crtc_set_base_atomic(struct drm_crtc *crtc,
- struct drm_framebuffer *fb,
+ struct drm_framebuffer *fb,
int x, int y, enum mode_set_atomic state)
{
- struct drm_device *dev = crtc->dev;
- struct radeon_device *rdev = dev->dev_private;
+ struct drm_device *dev = crtc->dev;
+ struct radeon_device *rdev = dev->dev_private;
if (ASIC_IS_DCE4(rdev))
return dce4_crtc_do_set_base(crtc, fb, x, y, 1);
@@ -1740,6 +1742,7 @@ static u32 radeon_get_pll_use_mask(struct drm_crtc *crtc)
static int radeon_get_shared_dp_ppll(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
+ struct radeon_device *rdev = dev->dev_private;
struct drm_crtc *test_crtc;
struct radeon_crtc *test_radeon_crtc;
@@ -1749,6 +1752,10 @@ static int radeon_get_shared_dp_ppll(struct drm_crtc *crtc)
test_radeon_crtc = to_radeon_crtc(test_crtc);
if (test_radeon_crtc->encoder &&
ENCODER_MODE_IS_DP(atombios_get_encoder_mode(test_radeon_crtc->encoder))) {
+ /* PPLL2 is exclusive to UNIPHYA on DCE61 */
+ if (ASIC_IS_DCE61(rdev) && !ASIC_IS_DCE8(rdev) &&
+ test_radeon_crtc->pll_id == ATOM_PPLL2)
+ continue;
/* for DP use the same PLL for all */
if (test_radeon_crtc->pll_id != ATOM_PPLL_INVALID)
return test_radeon_crtc->pll_id;
@@ -1770,6 +1777,7 @@ static int radeon_get_shared_nondp_ppll(struct drm_crtc *crtc)
{
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct drm_device *dev = crtc->dev;
+ struct radeon_device *rdev = dev->dev_private;
struct drm_crtc *test_crtc;
struct radeon_crtc *test_radeon_crtc;
u32 adjusted_clock, test_adjusted_clock;
@@ -1785,6 +1793,10 @@ static int radeon_get_shared_nondp_ppll(struct drm_crtc *crtc)
test_radeon_crtc = to_radeon_crtc(test_crtc);
if (test_radeon_crtc->encoder &&
!ENCODER_MODE_IS_DP(atombios_get_encoder_mode(test_radeon_crtc->encoder))) {
+ /* PPLL2 is exclusive to UNIPHYA on DCE61 */
+ if (ASIC_IS_DCE61(rdev) && !ASIC_IS_DCE8(rdev) &&
+ test_radeon_crtc->pll_id == ATOM_PPLL2)
+ continue;
/* check if we are already driving this connector with another crtc */
if (test_radeon_crtc->connector == radeon_crtc->connector) {
/* if we are, return that pll */
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index 44ee72e04df9..cead089a9e7d 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -37,10 +37,10 @@
#define DP_DPCD_SIZE DP_RECEIVER_CAP_SIZE
static char *voltage_names[] = {
- "0.4V", "0.6V", "0.8V", "1.2V"
+ "0.4V", "0.6V", "0.8V", "1.2V"
};
static char *pre_emph_names[] = {
- "0dB", "3.5dB", "6dB", "9.5dB"
+ "0dB", "3.5dB", "6dB", "9.5dB"
};
/***** radeon AUX functions *****/
@@ -315,15 +315,27 @@ int radeon_dp_get_dp_link_config(struct drm_connector *connector,
unsigned max_lane_num = drm_dp_max_lane_count(dpcd);
unsigned lane_num, i, max_pix_clock;
- for (lane_num = 1; lane_num <= max_lane_num; lane_num <<= 1) {
- for (i = 0; i < ARRAY_SIZE(link_rates) && link_rates[i] <= max_link_rate; i++) {
- max_pix_clock = (lane_num * link_rates[i] * 8) / bpp;
+ if (radeon_connector_encoder_get_dp_bridge_encoder_id(connector) ==
+ ENCODER_OBJECT_ID_NUTMEG) {
+ for (lane_num = 1; lane_num <= max_lane_num; lane_num <<= 1) {
+ max_pix_clock = (lane_num * 270000 * 8) / bpp;
if (max_pix_clock >= pix_clock) {
*dp_lanes = lane_num;
- *dp_rate = link_rates[i];
+ *dp_rate = 270000;
return 0;
}
}
+ } else {
+ for (i = 0; i < ARRAY_SIZE(link_rates) && link_rates[i] <= max_link_rate; i++) {
+ for (lane_num = 1; lane_num <= max_lane_num; lane_num <<= 1) {
+ max_pix_clock = (lane_num * link_rates[i] * 8) / bpp;
+ if (max_pix_clock >= pix_clock) {
+ *dp_lanes = lane_num;
+ *dp_rate = link_rates[i];
+ return 0;
+ }
+ }
+ }
}
return -EINVAL;
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index 01b20e14a247..587cae4e73c9 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -310,6 +310,10 @@ static bool radeon_atom_mode_fixup(struct drm_encoder *encoder,
&& (mode->crtc_vsync_start < (mode->crtc_vdisplay + 2)))
adjusted_mode->crtc_vsync_start = adjusted_mode->crtc_vdisplay + 2;
+ /* vertical FP must be at least 1 */
+ if (mode->crtc_vsync_start == mode->crtc_vdisplay)
+ adjusted_mode->crtc_vsync_start++;
+
/* get the native mode for scaling */
if (radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT)) {
radeon_panel_mode_fixup(encoder, adjusted_mode);
@@ -892,8 +896,6 @@ atombios_dig_encoder_setup2(struct drm_encoder *encoder, int action, int panel_m
else
args.v1.ucLaneNum = 4;
- if (ENCODER_MODE_IS_DP(args.v1.ucEncoderMode) && (dp_clock == 270000))
- args.v1.ucConfig |= ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ;
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
args.v1.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER1;
@@ -910,6 +912,10 @@ atombios_dig_encoder_setup2(struct drm_encoder *encoder, int action, int panel_m
args.v1.ucConfig |= ATOM_ENCODER_CONFIG_LINKB;
else
args.v1.ucConfig |= ATOM_ENCODER_CONFIG_LINKA;
+
+ if (ENCODER_MODE_IS_DP(args.v1.ucEncoderMode) && (dp_clock == 270000))
+ args.v1.ucConfig |= ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ;
+
break;
case 2:
case 3:
@@ -2623,16 +2629,8 @@ radeon_atom_ext_dpms(struct drm_encoder *encoder, int mode)
}
-static bool radeon_atom_ext_mode_fixup(struct drm_encoder *encoder,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- return true;
-}
-
static const struct drm_encoder_helper_funcs radeon_atom_ext_helper_funcs = {
.dpms = radeon_atom_ext_dpms,
- .mode_fixup = radeon_atom_ext_mode_fixup,
.prepare = radeon_atom_ext_prepare,
.mode_set = radeon_atom_ext_mode_set,
.commit = radeon_atom_ext_commit,
diff --git a/drivers/gpu/drm/radeon/btc_dpm.c b/drivers/gpu/drm/radeon/btc_dpm.c
index 69556f5e247e..38e5123708e7 100644
--- a/drivers/gpu/drm/radeon/btc_dpm.c
+++ b/drivers/gpu/drm/radeon/btc_dpm.c
@@ -1163,12 +1163,11 @@ u32 btc_valid_sclk[40] =
155000, 160000, 165000, 170000, 175000, 180000, 185000, 190000, 195000, 200000
};
-static const struct radeon_blacklist_clocks btc_blacklist_clocks[] =
-{
- { 10000, 30000, RADEON_SCLK_UP },
- { 15000, 30000, RADEON_SCLK_UP },
- { 20000, 30000, RADEON_SCLK_UP },
- { 25000, 30000, RADEON_SCLK_UP }
+static const struct radeon_blacklist_clocks btc_blacklist_clocks[] = {
+ { 10000, 30000, RADEON_SCLK_UP },
+ { 15000, 30000, RADEON_SCLK_UP },
+ { 20000, 30000, RADEON_SCLK_UP },
+ { 25000, 30000, RADEON_SCLK_UP }
};
void btc_get_max_clock_from_voltage_dependency_table(struct radeon_clock_voltage_dependency_table *table,
@@ -1637,14 +1636,14 @@ static int btc_init_smc_table(struct radeon_device *rdev,
cypress_populate_smc_voltage_tables(rdev, table);
switch (rdev->pm.int_thermal_type) {
- case THERMAL_TYPE_EVERGREEN:
- case THERMAL_TYPE_EMC2103_WITH_INTERNAL:
+ case THERMAL_TYPE_EVERGREEN:
+ case THERMAL_TYPE_EMC2103_WITH_INTERNAL:
table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_INTERNAL;
break;
- case THERMAL_TYPE_NONE:
+ case THERMAL_TYPE_NONE:
table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_NONE;
break;
- default:
+ default:
table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_EXTERNAL;
break;
}
@@ -1860,37 +1859,37 @@ static bool btc_check_s0_mc_reg_index(u16 in_reg, u16 *out_reg)
case MC_SEQ_RAS_TIMING >> 2:
*out_reg = MC_SEQ_RAS_TIMING_LP >> 2;
break;
- case MC_SEQ_CAS_TIMING >> 2:
+ case MC_SEQ_CAS_TIMING >> 2:
*out_reg = MC_SEQ_CAS_TIMING_LP >> 2;
break;
- case MC_SEQ_MISC_TIMING >> 2:
+ case MC_SEQ_MISC_TIMING >> 2:
*out_reg = MC_SEQ_MISC_TIMING_LP >> 2;
break;
- case MC_SEQ_MISC_TIMING2 >> 2:
+ case MC_SEQ_MISC_TIMING2 >> 2:
*out_reg = MC_SEQ_MISC_TIMING2_LP >> 2;
break;
- case MC_SEQ_RD_CTL_D0 >> 2:
+ case MC_SEQ_RD_CTL_D0 >> 2:
*out_reg = MC_SEQ_RD_CTL_D0_LP >> 2;
break;
- case MC_SEQ_RD_CTL_D1 >> 2:
+ case MC_SEQ_RD_CTL_D1 >> 2:
*out_reg = MC_SEQ_RD_CTL_D1_LP >> 2;
break;
- case MC_SEQ_WR_CTL_D0 >> 2:
+ case MC_SEQ_WR_CTL_D0 >> 2:
*out_reg = MC_SEQ_WR_CTL_D0_LP >> 2;
break;
- case MC_SEQ_WR_CTL_D1 >> 2:
+ case MC_SEQ_WR_CTL_D1 >> 2:
*out_reg = MC_SEQ_WR_CTL_D1_LP >> 2;
break;
- case MC_PMG_CMD_EMRS >> 2:
+ case MC_PMG_CMD_EMRS >> 2:
*out_reg = MC_SEQ_PMG_CMD_EMRS_LP >> 2;
break;
- case MC_PMG_CMD_MRS >> 2:
+ case MC_PMG_CMD_MRS >> 2:
*out_reg = MC_SEQ_PMG_CMD_MRS_LP >> 2;
break;
- case MC_PMG_CMD_MRS1 >> 2:
+ case MC_PMG_CMD_MRS1 >> 2:
*out_reg = MC_SEQ_PMG_CMD_MRS1_LP >> 2;
break;
- default:
+ default:
result = false;
break;
}
diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c
index 4a09947be244..35e0fc3ae8a7 100644
--- a/drivers/gpu/drm/radeon/ci_dpm.c
+++ b/drivers/gpu/drm/radeon/ci_dpm.c
@@ -192,9 +192,9 @@ static void ci_fan_ctrl_set_default_mode(struct radeon_device *rdev);
static struct ci_power_info *ci_get_pi(struct radeon_device *rdev)
{
- struct ci_power_info *pi = rdev->pm.dpm.priv;
+ struct ci_power_info *pi = rdev->pm.dpm.priv;
- return pi;
+ return pi;
}
static struct ci_ps *ci_get_ps(struct radeon_ps *rps)
@@ -1632,7 +1632,7 @@ static int ci_notify_hw_of_power_source(struct radeon_device *rdev,
else
power_limit = (u32)(cac_tdp_table->battery_power_limit * 256);
- ci_set_power_limit(rdev, power_limit);
+ ci_set_power_limit(rdev, power_limit);
if (pi->caps_automatic_dc_transition) {
if (ac_power)
@@ -2017,9 +2017,9 @@ static void ci_enable_display_gap(struct radeon_device *rdev)
{
u32 tmp = RREG32_SMC(CG_DISPLAY_GAP_CNTL);
- tmp &= ~(DISP_GAP_MASK | DISP_GAP_MCHG_MASK);
- tmp |= (DISP_GAP(R600_PM_DISPLAY_GAP_IGNORE) |
- DISP_GAP_MCHG(R600_PM_DISPLAY_GAP_VBLANK));
+ tmp &= ~(DISP_GAP_MASK | DISP_GAP_MCHG_MASK);
+ tmp |= (DISP_GAP(R600_PM_DISPLAY_GAP_IGNORE) |
+ DISP_GAP_MCHG(R600_PM_DISPLAY_GAP_VBLANK));
WREG32_SMC(CG_DISPLAY_GAP_CNTL, tmp);
}
@@ -2938,8 +2938,8 @@ static int ci_populate_single_memory_level(struct radeon_device *rdev,
memory_level->MinVddc = cpu_to_be32(memory_level->MinVddc * VOLTAGE_SCALE);
memory_level->MinVddcPhases = cpu_to_be32(memory_level->MinVddcPhases);
- memory_level->MinVddci = cpu_to_be32(memory_level->MinVddci * VOLTAGE_SCALE);
- memory_level->MinMvdd = cpu_to_be32(memory_level->MinMvdd * VOLTAGE_SCALE);
+ memory_level->MinVddci = cpu_to_be32(memory_level->MinVddci * VOLTAGE_SCALE);
+ memory_level->MinMvdd = cpu_to_be32(memory_level->MinMvdd * VOLTAGE_SCALE);
memory_level->MclkFrequency = cpu_to_be32(memory_level->MclkFrequency);
memory_level->ActivityLevel = cpu_to_be16(memory_level->ActivityLevel);
@@ -3152,7 +3152,7 @@ static int ci_calculate_sclk_params(struct radeon_device *rdev,
spll_func_cntl_3 &= ~SPLL_FB_DIV_MASK;
spll_func_cntl_3 |= SPLL_FB_DIV(fbdiv);
- spll_func_cntl_3 |= SPLL_DITHEN;
+ spll_func_cntl_3 |= SPLL_DITHEN;
if (pi->caps_sclk_ss_support) {
struct radeon_atom_ss ss;
@@ -3229,7 +3229,7 @@ static int ci_populate_single_graphic_level(struct radeon_device *rdev,
graphic_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
graphic_level->Flags = cpu_to_be32(graphic_level->Flags);
- graphic_level->MinVddc = cpu_to_be32(graphic_level->MinVddc * VOLTAGE_SCALE);
+ graphic_level->MinVddc = cpu_to_be32(graphic_level->MinVddc * VOLTAGE_SCALE);
graphic_level->MinVddcPhases = cpu_to_be32(graphic_level->MinVddcPhases);
graphic_level->SclkFrequency = cpu_to_be32(graphic_level->SclkFrequency);
graphic_level->ActivityLevel = cpu_to_be16(graphic_level->ActivityLevel);
@@ -4393,7 +4393,7 @@ static bool ci_check_s0_mc_reg_index(u16 in_reg, u16 *out_reg)
break;
case MC_SEQ_CAS_TIMING >> 2:
*out_reg = MC_SEQ_CAS_TIMING_LP >> 2;
- break;
+ break;
case MC_SEQ_MISC_TIMING >> 2:
*out_reg = MC_SEQ_MISC_TIMING_LP >> 2;
break;
@@ -4625,7 +4625,7 @@ static int ci_initialize_mc_reg_table(struct radeon_device *rdev)
if (ret)
goto init_mc_done;
- ret = ci_copy_vbios_mc_reg_table(table, ci_table);
+ ret = ci_copy_vbios_mc_reg_table(table, ci_table);
if (ret)
goto init_mc_done;
@@ -4916,7 +4916,7 @@ static int ci_set_private_data_variables_based_on_pptable(struct radeon_device *
allowed_mclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].clk;
rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddc =
allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v;
- rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddci =
+ rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddci =
allowed_mclk_vddci_table->entries[allowed_mclk_vddci_table->count - 1].v;
return 0;
@@ -5517,7 +5517,7 @@ static int ci_parse_power_table(struct radeon_device *rdev)
struct _NonClockInfoArray *non_clock_info_array;
union power_info *power_info;
int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
- u16 data_offset;
+ u16 data_offset;
u8 frev, crev;
u8 *power_state_offset;
struct ci_ps *ps;
@@ -5693,8 +5693,8 @@ int ci_dpm_init(struct radeon_device *rdev)
return ret;
}
- pi->dll_default_on = false;
- pi->sram_end = SMC_RAM_END;
+ pi->dll_default_on = false;
+ pi->sram_end = SMC_RAM_END;
pi->activity_target[0] = CISLAND_TARGETACTIVITY_DFLT;
pi->activity_target[1] = CISLAND_TARGETACTIVITY_DFLT;
@@ -5734,9 +5734,9 @@ int ci_dpm_init(struct radeon_device *rdev)
pi->caps_uvd_dpm = true;
pi->caps_vce_dpm = true;
- ci_get_leakage_voltages(rdev);
- ci_patch_dependency_tables_with_leakage(rdev);
- ci_set_private_data_variables_based_on_pptable(rdev);
+ ci_get_leakage_voltages(rdev);
+ ci_patch_dependency_tables_with_leakage(rdev);
+ ci_set_private_data_variables_based_on_pptable(rdev);
rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries =
kzalloc(4 * sizeof(struct radeon_clock_voltage_dependency_entry), GFP_KERNEL);
@@ -5839,7 +5839,7 @@ int ci_dpm_init(struct radeon_device *rdev)
pi->vddci_control = CISLANDS_VOLTAGE_CONTROL_BY_SVID2;
else
rdev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_VDDCI_CONTROL;
- }
+ }
if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_MVDDCONTROL) {
if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT))
@@ -5860,7 +5860,7 @@ int ci_dpm_init(struct radeon_device *rdev)
#endif
if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size,
- &frev, &crev, &data_offset)) {
+ &frev, &crev, &data_offset)) {
pi->caps_sclk_ss_support = true;
pi->caps_mclk_ss_support = true;
pi->dynamic_ss = true;
diff --git a/drivers/gpu/drm/radeon/ci_smc.c b/drivers/gpu/drm/radeon/ci_smc.c
index 35c6f648ba04..24760ee3063e 100644
--- a/drivers/gpu/drm/radeon/ci_smc.c
+++ b/drivers/gpu/drm/radeon/ci_smc.c
@@ -194,11 +194,11 @@ PPSMC_Result ci_wait_for_smc_inactive(struct radeon_device *rdev)
return PPSMC_Result_OK;
for (i = 0; i < rdev->usec_timeout; i++) {
- tmp = RREG32_SMC(SMC_SYSCON_CLOCK_CNTL_0);
- if ((tmp & CKEN) == 0)
+ tmp = RREG32_SMC(SMC_SYSCON_CLOCK_CNTL_0);
+ if ((tmp & CKEN) == 0)
break;
- udelay(1);
- }
+ udelay(1);
+ }
return PPSMC_Result_OK;
}
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index 4c30d8c65558..8ac82df2efde 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -1712,7 +1712,7 @@ static void cik_init_golden_registers(struct radeon_device *rdev)
*/
u32 cik_get_xclk(struct radeon_device *rdev)
{
- u32 reference_clock = rdev->clock.spll.reference_freq;
+ u32 reference_clock = rdev->clock.spll.reference_freq;
if (rdev->flags & RADEON_IS_IGP) {
if (RREG32_SMC(GENERAL_PWRMGT) & GPU_COUNTER_CLK)
@@ -2343,9 +2343,13 @@ out:
*/
static void cik_tiling_mode_table_init(struct radeon_device *rdev)
{
- const u32 num_tile_mode_states = 32;
- const u32 num_secondary_tile_mode_states = 16;
- u32 reg_offset, gb_tile_moden, split_equal_to_row_size;
+ u32 *tile = rdev->config.cik.tile_mode_array;
+ u32 *macrotile = rdev->config.cik.macrotile_mode_array;
+ const u32 num_tile_mode_states =
+ ARRAY_SIZE(rdev->config.cik.tile_mode_array);
+ const u32 num_secondary_tile_mode_states =
+ ARRAY_SIZE(rdev->config.cik.macrotile_mode_array);
+ u32 reg_offset, split_equal_to_row_size;
u32 num_pipe_configs;
u32 num_rbs = rdev->config.cik.max_backends_per_se *
rdev->config.cik.max_shader_engines;
@@ -2367,1032 +2371,669 @@ static void cik_tiling_mode_table_init(struct radeon_device *rdev)
if (num_pipe_configs > 8)
num_pipe_configs = 16;
- if (num_pipe_configs == 16) {
- for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) {
- switch (reg_offset) {
- case 0:
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
- TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B));
- break;
- case 1:
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
- TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B));
- break;
- case 2:
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
- TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B));
- break;
- case 3:
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
- TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B));
- break;
- case 4:
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
- TILE_SPLIT(split_equal_to_row_size));
- break;
- case 5:
- gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
- PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
- break;
- case 6:
- gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
- TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B));
- break;
- case 7:
- gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
- TILE_SPLIT(split_equal_to_row_size));
- break;
- case 8:
- gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
- PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16));
- break;
- case 9:
- gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
- PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING));
- break;
- case 10:
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
- SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
- break;
- case 11:
- gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P16_32x32_8x16) |
- SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
- break;
- case 12:
- gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
- SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
- break;
- case 13:
- gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
- PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING));
- break;
- case 14:
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
- SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
- break;
- case 16:
- gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P16_32x32_8x16) |
- SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
- break;
- case 17:
- gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
- SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
- break;
- case 27:
- gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
- PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING));
- break;
- case 28:
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
- SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
- break;
- case 29:
- gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P16_32x32_8x16) |
- SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
- break;
- case 30:
- gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
- SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
- break;
- default:
- gb_tile_moden = 0;
- break;
- }
- rdev->config.cik.tile_mode_array[reg_offset] = gb_tile_moden;
- WREG32(GB_TILE_MODE0 + (reg_offset * 4), gb_tile_moden);
- }
- for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++) {
- switch (reg_offset) {
- case 0:
- gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
- NUM_BANKS(ADDR_SURF_16_BANK));
- break;
- case 1:
- gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
- NUM_BANKS(ADDR_SURF_16_BANK));
- break;
- case 2:
- gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
- NUM_BANKS(ADDR_SURF_16_BANK));
- break;
- case 3:
- gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
- NUM_BANKS(ADDR_SURF_16_BANK));
- break;
- case 4:
- gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
- NUM_BANKS(ADDR_SURF_8_BANK));
- break;
- case 5:
- gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
- NUM_BANKS(ADDR_SURF_4_BANK));
- break;
- case 6:
- gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
- NUM_BANKS(ADDR_SURF_2_BANK));
- break;
- case 8:
- gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
- NUM_BANKS(ADDR_SURF_16_BANK));
- break;
- case 9:
- gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
- NUM_BANKS(ADDR_SURF_16_BANK));
- break;
- case 10:
- gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
- NUM_BANKS(ADDR_SURF_16_BANK));
- break;
- case 11:
- gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
- NUM_BANKS(ADDR_SURF_8_BANK));
- break;
- case 12:
- gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
- NUM_BANKS(ADDR_SURF_4_BANK));
- break;
- case 13:
- gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
- NUM_BANKS(ADDR_SURF_2_BANK));
- break;
- case 14:
- gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
- NUM_BANKS(ADDR_SURF_2_BANK));
- break;
- default:
- gb_tile_moden = 0;
- break;
- }
- rdev->config.cik.macrotile_mode_array[reg_offset] = gb_tile_moden;
- WREG32(GB_MACROTILE_MODE0 + (reg_offset * 4), gb_tile_moden);
- }
- } else if (num_pipe_configs == 8) {
- for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) {
- switch (reg_offset) {
- case 0:
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
- TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B));
- break;
- case 1:
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
- TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B));
- break;
- case 2:
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
- TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B));
- break;
- case 3:
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
- TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B));
- break;
- case 4:
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
- TILE_SPLIT(split_equal_to_row_size));
- break;
- case 5:
- gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
- PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
- break;
- case 6:
- gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
- TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B));
- break;
- case 7:
- gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
- TILE_SPLIT(split_equal_to_row_size));
- break;
- case 8:
- gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
- PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16));
- break;
- case 9:
- gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
- PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING));
- break;
- case 10:
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
- SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
- break;
- case 11:
- gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
- SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
- break;
- case 12:
- gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
- SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
- break;
- case 13:
- gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
- PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING));
- break;
- case 14:
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
- SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
- break;
- case 16:
- gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
- SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
- break;
- case 17:
- gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
- SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
- break;
- case 27:
- gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
- PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING));
- break;
- case 28:
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
- SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
- break;
- case 29:
- gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
- SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
- break;
- case 30:
- gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
- SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
- break;
- default:
- gb_tile_moden = 0;
- break;
- }
- rdev->config.cik.tile_mode_array[reg_offset] = gb_tile_moden;
- WREG32(GB_TILE_MODE0 + (reg_offset * 4), gb_tile_moden);
- }
- for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++) {
- switch (reg_offset) {
- case 0:
- gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
- NUM_BANKS(ADDR_SURF_16_BANK));
- break;
- case 1:
- gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
- NUM_BANKS(ADDR_SURF_16_BANK));
- break;
- case 2:
- gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
- NUM_BANKS(ADDR_SURF_16_BANK));
- break;
- case 3:
- gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
- NUM_BANKS(ADDR_SURF_16_BANK));
- break;
- case 4:
- gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
- NUM_BANKS(ADDR_SURF_8_BANK));
- break;
- case 5:
- gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
- NUM_BANKS(ADDR_SURF_4_BANK));
- break;
- case 6:
- gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
- NUM_BANKS(ADDR_SURF_2_BANK));
- break;
- case 8:
- gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
- NUM_BANKS(ADDR_SURF_16_BANK));
- break;
- case 9:
- gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
- NUM_BANKS(ADDR_SURF_16_BANK));
- break;
- case 10:
- gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
- NUM_BANKS(ADDR_SURF_16_BANK));
- break;
- case 11:
- gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
- NUM_BANKS(ADDR_SURF_16_BANK));
- break;
- case 12:
- gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
- NUM_BANKS(ADDR_SURF_8_BANK));
- break;
- case 13:
- gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
- NUM_BANKS(ADDR_SURF_4_BANK));
- break;
- case 14:
- gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
- NUM_BANKS(ADDR_SURF_2_BANK));
- break;
- default:
- gb_tile_moden = 0;
- break;
- }
- rdev->config.cik.macrotile_mode_array[reg_offset] = gb_tile_moden;
- WREG32(GB_MACROTILE_MODE0 + (reg_offset * 4), gb_tile_moden);
- }
- } else if (num_pipe_configs == 4) {
+ for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
+ tile[reg_offset] = 0;
+ for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++)
+ macrotile[reg_offset] = 0;
+
+ switch(num_pipe_configs) {
+ case 16:
+ tile[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B));
+ tile[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B));
+ tile[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B));
+ tile[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B));
+ tile[4] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+ TILE_SPLIT(split_equal_to_row_size));
+ tile[5] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
+ tile[6] = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B));
+ tile[7] = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+ TILE_SPLIT(split_equal_to_row_size));
+ tile[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
+ PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16));
+ tile[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING));
+ tile[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+ tile[11] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P16_32x32_8x16) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+ tile[12] = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+ tile[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING));
+ tile[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+ tile[16] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P16_32x32_8x16) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+ tile[17] = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+ tile[27] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING));
+ tile[28] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+ tile[29] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P16_32x32_8x16) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+ tile[30] = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+
+ macrotile[0] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
+ macrotile[1] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
+ macrotile[2] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
+ macrotile[3] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
+ macrotile[4] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_8_BANK));
+ macrotile[5] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_4_BANK));
+ macrotile[6] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_2_BANK));
+ macrotile[8] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
+ macrotile[9] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
+ macrotile[10] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
+ macrotile[11] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_8_BANK));
+ macrotile[12] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_4_BANK));
+ macrotile[13] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_2_BANK));
+ macrotile[14] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_2_BANK));
+
+ for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
+ WREG32(GB_TILE_MODE0 + (reg_offset * 4), tile[reg_offset]);
+ for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++)
+ WREG32(GB_MACROTILE_MODE0 + (reg_offset * 4), macrotile[reg_offset]);
+ break;
+
+ case 8:
+ tile[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B));
+ tile[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B));
+ tile[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B));
+ tile[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B));
+ tile[4] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
+ TILE_SPLIT(split_equal_to_row_size));
+ tile[5] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
+ tile[6] = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B));
+ tile[7] = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
+ TILE_SPLIT(split_equal_to_row_size));
+ tile[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16));
+ tile[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING));
+ tile[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+ tile[11] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+ tile[12] = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+ tile[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING));
+ tile[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+ tile[16] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+ tile[17] = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+ tile[27] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING));
+ tile[28] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+ tile[29] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+ tile[30] = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+
+ macrotile[0] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
+ macrotile[1] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
+ macrotile[2] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
+ macrotile[3] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
+ macrotile[4] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_8_BANK));
+ macrotile[5] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_4_BANK));
+ macrotile[6] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_2_BANK));
+ macrotile[8] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
+ macrotile[9] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
+ macrotile[10] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
+ macrotile[11] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
+ macrotile[12] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_8_BANK));
+ macrotile[13] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_4_BANK));
+ macrotile[14] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_2_BANK));
+
+ for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
+ WREG32(GB_TILE_MODE0 + (reg_offset * 4), tile[reg_offset]);
+ for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++)
+ WREG32(GB_MACROTILE_MODE0 + (reg_offset * 4), macrotile[reg_offset]);
+ break;
+
+ case 4:
if (num_rbs == 4) {
- for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) {
- switch (reg_offset) {
- case 0:
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P4_16x16) |
- TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B));
- break;
- case 1:
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P4_16x16) |
- TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B));
- break;
- case 2:
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P4_16x16) |
- TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B));
- break;
- case 3:
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P4_16x16) |
- TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B));
- break;
- case 4:
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P4_16x16) |
- TILE_SPLIT(split_equal_to_row_size));
- break;
- case 5:
- gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
- PIPE_CONFIG(ADDR_SURF_P4_16x16) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
- break;
- case 6:
- gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P4_16x16) |
- TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B));
- break;
- case 7:
- gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P4_16x16) |
- TILE_SPLIT(split_equal_to_row_size));
- break;
- case 8:
- gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
- PIPE_CONFIG(ADDR_SURF_P4_16x16));
- break;
- case 9:
- gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
- PIPE_CONFIG(ADDR_SURF_P4_16x16) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING));
- break;
- case 10:
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P4_16x16) |
- SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
- break;
- case 11:
- gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P4_8x16) |
- SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
- break;
- case 12:
- gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P4_16x16) |
- SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
- break;
- case 13:
- gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
- PIPE_CONFIG(ADDR_SURF_P4_16x16) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING));
- break;
- case 14:
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P4_16x16) |
- SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
- break;
- case 16:
- gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P4_8x16) |
- SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
- break;
- case 17:
- gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P4_16x16) |
- SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
- break;
- case 27:
- gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
- PIPE_CONFIG(ADDR_SURF_P4_16x16) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING));
- break;
- case 28:
- gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P4_16x16) |
- SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
- break;
- case 29:
- gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P4_8x16) |
- SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
- break;
- case 30:
- gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P4_16x16) |
- SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
- break;
- default:
- gb_tile_moden = 0;
- break;
- }
- rdev->config.cik.tile_mode_array[reg_offset] = gb_tile_moden;
- WREG32(GB_TILE_MODE0 + (reg_offset * 4), gb_tile_moden);
- }
+ tile[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B));
+ tile[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B));
+ tile[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B));
+ tile[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B));
+ tile[4] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+ TILE_SPLIT(split_equal_to_row_size));
+ tile[5] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
+ tile[6] = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B));
+ tile[7] = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+ TILE_SPLIT(split_equal_to_row_size));
+ tile[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
+ PIPE_CONFIG(ADDR_SURF_P4_16x16));
+ tile[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING));
+ tile[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+ tile[11] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+ tile[12] = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+ tile[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING));
+ tile[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+ tile[16] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+ tile[17] = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+ tile[27] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING));
+ tile[28] = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+ tile[29] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+ tile[30] = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+
} else if (num_rbs < 4) {
- for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) {
- switch (reg_offset) {
- case 0:
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P4_8x16) |
- TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B));
- break;
- case 1:
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P4_8x16) |
- TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B));
- break;
- case 2:
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P4_8x16) |
- TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B));
- break;
- case 3:
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P4_8x16) |
- TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B));
- break;
- case 4:
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P4_8x16) |
- TILE_SPLIT(split_equal_to_row_size));
- break;
- case 5:
- gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
- PIPE_CONFIG(ADDR_SURF_P4_8x16) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
- break;
- case 6:
- gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P4_8x16) |
- TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B));
- break;
- case 7:
- gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P4_8x16) |
- TILE_SPLIT(split_equal_to_row_size));
- break;
- case 8:
- gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
- PIPE_CONFIG(ADDR_SURF_P4_8x16));
- break;
- case 9:
- gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
- PIPE_CONFIG(ADDR_SURF_P4_8x16) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING));
- break;
- case 10:
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P4_8x16) |
- SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
- break;
- case 11:
- gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P4_8x16) |
- SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
- break;
- case 12:
- gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P4_8x16) |
- SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
- break;
- case 13:
- gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
- PIPE_CONFIG(ADDR_SURF_P4_8x16) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING));
- break;
- case 14:
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P4_8x16) |
- SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
- break;
- case 16:
- gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P4_8x16) |
- SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
- break;
- case 17:
- gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P4_8x16) |
- SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
- break;
- case 27:
- gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
- PIPE_CONFIG(ADDR_SURF_P4_8x16) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING));
- break;
- case 28:
- gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P4_8x16) |
- SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
- break;
- case 29:
- gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P4_8x16) |
- SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
- break;
- case 30:
- gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P4_8x16) |
- SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
- break;
- default:
- gb_tile_moden = 0;
- break;
- }
- rdev->config.cik.tile_mode_array[reg_offset] = gb_tile_moden;
- WREG32(GB_TILE_MODE0 + (reg_offset * 4), gb_tile_moden);
- }
- }
- for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++) {
- switch (reg_offset) {
- case 0:
- gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
- NUM_BANKS(ADDR_SURF_16_BANK));
- break;
- case 1:
- gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
- NUM_BANKS(ADDR_SURF_16_BANK));
- break;
- case 2:
- gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
- NUM_BANKS(ADDR_SURF_16_BANK));
- break;
- case 3:
- gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
- NUM_BANKS(ADDR_SURF_16_BANK));
- break;
- case 4:
- gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
- NUM_BANKS(ADDR_SURF_16_BANK));
- break;
- case 5:
- gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
- NUM_BANKS(ADDR_SURF_8_BANK));
- break;
- case 6:
- gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
- NUM_BANKS(ADDR_SURF_4_BANK));
- break;
- case 8:
- gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
- NUM_BANKS(ADDR_SURF_16_BANK));
- break;
- case 9:
- gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
- NUM_BANKS(ADDR_SURF_16_BANK));
- break;
- case 10:
- gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
- NUM_BANKS(ADDR_SURF_16_BANK));
- break;
- case 11:
- gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
- NUM_BANKS(ADDR_SURF_16_BANK));
- break;
- case 12:
- gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
- NUM_BANKS(ADDR_SURF_16_BANK));
- break;
- case 13:
- gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
- NUM_BANKS(ADDR_SURF_8_BANK));
- break;
- case 14:
- gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
- NUM_BANKS(ADDR_SURF_4_BANK));
- break;
- default:
- gb_tile_moden = 0;
- break;
- }
- rdev->config.cik.macrotile_mode_array[reg_offset] = gb_tile_moden;
- WREG32(GB_MACROTILE_MODE0 + (reg_offset * 4), gb_tile_moden);
- }
- } else if (num_pipe_configs == 2) {
- for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) {
- switch (reg_offset) {
- case 0:
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P2) |
- TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B));
- break;
- case 1:
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P2) |
- TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B));
- break;
- case 2:
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P2) |
- TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B));
- break;
- case 3:
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P2) |
- TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B));
- break;
- case 4:
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P2) |
- TILE_SPLIT(split_equal_to_row_size));
- break;
- case 5:
- gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
- PIPE_CONFIG(ADDR_SURF_P2) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
- break;
- case 6:
- gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P2) |
- TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B));
- break;
- case 7:
- gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P2) |
- TILE_SPLIT(split_equal_to_row_size));
- break;
- case 8:
- gb_tile_moden = ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
- PIPE_CONFIG(ADDR_SURF_P2);
- break;
- case 9:
- gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P2));
- break;
- case 10:
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P2) |
- SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
- break;
- case 11:
- gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P2) |
- SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
- break;
- case 12:
- gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P2) |
- SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
- break;
- case 13:
- gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
- PIPE_CONFIG(ADDR_SURF_P2) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING));
- break;
- case 14:
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P2) |
- SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
- break;
- case 16:
- gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P2) |
- SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
- break;
- case 17:
- gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P2) |
- SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
- break;
- case 27:
- gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P2));
- break;
- case 28:
- gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P2) |
- SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
- break;
- case 29:
- gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P2) |
- SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
- break;
- case 30:
- gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P2) |
- SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
- break;
- default:
- gb_tile_moden = 0;
- break;
- }
- rdev->config.cik.tile_mode_array[reg_offset] = gb_tile_moden;
- WREG32(GB_TILE_MODE0 + (reg_offset * 4), gb_tile_moden);
+ tile[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B));
+ tile[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B));
+ tile[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B));
+ tile[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B));
+ tile[4] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ TILE_SPLIT(split_equal_to_row_size));
+ tile[5] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
+ tile[6] = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B));
+ tile[7] = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ TILE_SPLIT(split_equal_to_row_size));
+ tile[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16));
+ tile[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING));
+ tile[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+ tile[11] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+ tile[12] = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+ tile[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING));
+ tile[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+ tile[16] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+ tile[17] = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+ tile[27] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING));
+ tile[28] = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+ tile[29] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+ tile[30] = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
}
- for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++) {
- switch (reg_offset) {
- case 0:
- gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
- NUM_BANKS(ADDR_SURF_16_BANK));
- break;
- case 1:
- gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
- NUM_BANKS(ADDR_SURF_16_BANK));
- break;
- case 2:
- gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
- NUM_BANKS(ADDR_SURF_16_BANK));
- break;
- case 3:
- gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
- NUM_BANKS(ADDR_SURF_16_BANK));
- break;
- case 4:
- gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
- NUM_BANKS(ADDR_SURF_16_BANK));
- break;
- case 5:
- gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
- NUM_BANKS(ADDR_SURF_16_BANK));
- break;
- case 6:
- gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
- NUM_BANKS(ADDR_SURF_8_BANK));
- break;
- case 8:
- gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
- NUM_BANKS(ADDR_SURF_16_BANK));
- break;
- case 9:
- gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
- NUM_BANKS(ADDR_SURF_16_BANK));
- break;
- case 10:
- gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
- NUM_BANKS(ADDR_SURF_16_BANK));
- break;
- case 11:
- gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
- NUM_BANKS(ADDR_SURF_16_BANK));
- break;
- case 12:
- gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
- NUM_BANKS(ADDR_SURF_16_BANK));
- break;
- case 13:
- gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
- NUM_BANKS(ADDR_SURF_16_BANK));
- break;
- case 14:
- gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
- NUM_BANKS(ADDR_SURF_8_BANK));
- break;
- default:
- gb_tile_moden = 0;
- break;
- }
- rdev->config.cik.macrotile_mode_array[reg_offset] = gb_tile_moden;
- WREG32(GB_MACROTILE_MODE0 + (reg_offset * 4), gb_tile_moden);
- }
- } else
+
+ macrotile[0] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
+ macrotile[1] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
+ macrotile[2] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
+ macrotile[3] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
+ macrotile[4] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
+ macrotile[5] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+ NUM_BANKS(ADDR_SURF_8_BANK));
+ macrotile[6] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_4_BANK));
+ macrotile[8] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
+ macrotile[9] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
+ macrotile[10] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
+ macrotile[11] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
+ macrotile[12] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
+ macrotile[13] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+ NUM_BANKS(ADDR_SURF_8_BANK));
+ macrotile[14] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_4_BANK));
+
+ for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
+ WREG32(GB_TILE_MODE0 + (reg_offset * 4), tile[reg_offset]);
+ for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++)
+ WREG32(GB_MACROTILE_MODE0 + (reg_offset * 4), macrotile[reg_offset]);
+ break;
+
+ case 2:
+ tile[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P2) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B));
+ tile[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P2) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B));
+ tile[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P2) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B));
+ tile[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P2) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B));
+ tile[4] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P2) |
+ TILE_SPLIT(split_equal_to_row_size));
+ tile[5] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P2) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
+ tile[6] = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P2) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B));
+ tile[7] = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P2) |
+ TILE_SPLIT(split_equal_to_row_size));
+ tile[8] = ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
+ PIPE_CONFIG(ADDR_SURF_P2);
+ tile[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P2));
+ tile[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P2) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+ tile[11] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P2) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+ tile[12] = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P2) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+ tile[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P2) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING));
+ tile[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P2) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+ tile[16] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P2) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+ tile[17] = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P2) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+ tile[27] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P2));
+ tile[28] = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P2) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+ tile[29] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P2) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+ tile[30] = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P2) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+
+ macrotile[0] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
+ macrotile[1] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
+ macrotile[2] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
+ macrotile[3] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
+ macrotile[4] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
+ macrotile[5] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
+ macrotile[6] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+ NUM_BANKS(ADDR_SURF_8_BANK));
+ macrotile[8] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
+ macrotile[9] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
+ macrotile[10] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
+ macrotile[11] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
+ macrotile[12] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
+ macrotile[13] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
+ macrotile[14] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+ NUM_BANKS(ADDR_SURF_8_BANK));
+
+ for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
+ WREG32(GB_TILE_MODE0 + (reg_offset * 4), tile[reg_offset]);
+ for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++)
+ WREG32(GB_MACROTILE_MODE0 + (reg_offset * 4), macrotile[reg_offset]);
+ break;
+
+ default:
DRM_ERROR("unknown num pipe config: 0x%x\n", num_pipe_configs);
+ }
}
/**
@@ -4219,13 +3860,20 @@ int cik_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
return r;
}
- r = radeon_fence_wait(ib.fence, false);
- if (r) {
+ r = radeon_fence_wait_timeout(ib.fence, false, usecs_to_jiffies(
+ RADEON_USEC_IB_TEST_TIMEOUT));
+ if (r < 0) {
DRM_ERROR("radeon: fence wait failed (%d).\n", r);
radeon_scratch_free(rdev, scratch);
radeon_ib_free(rdev, &ib);
return r;
+ } else if (r == 0) {
+ DRM_ERROR("radeon: fence wait timed out.\n");
+ radeon_scratch_free(rdev, scratch);
+ radeon_ib_free(rdev, &ib);
+ return -ETIMEDOUT;
}
+ r = 0;
for (i = 0; i < rdev->usec_timeout; i++) {
tmp = RREG32(scratch);
if (tmp == 0xDEADBEEF)
@@ -9702,13 +9350,13 @@ uint64_t cik_get_gpu_clock_counter(struct radeon_device *rdev)
mutex_lock(&rdev->gpu_clock_mutex);
WREG32(RLC_CAPTURE_GPU_CLOCK_COUNT, 1);
clock = (uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_LSB) |
- ((uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
+ ((uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
mutex_unlock(&rdev->gpu_clock_mutex);
return clock;
}
static int cik_set_uvd_clock(struct radeon_device *rdev, u32 clock,
- u32 cntl_reg, u32 status_reg)
+ u32 cntl_reg, u32 status_reg)
{
int r, i;
struct atom_clock_dividers dividers;
diff --git a/drivers/gpu/drm/radeon/cik_sdma.c b/drivers/gpu/drm/radeon/cik_sdma.c
index d16f2eebd95e..9c351dc8a9e0 100644
--- a/drivers/gpu/drm/radeon/cik_sdma.c
+++ b/drivers/gpu/drm/radeon/cik_sdma.c
@@ -737,11 +737,16 @@ int cik_sdma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
return r;
}
- r = radeon_fence_wait(ib.fence, false);
- if (r) {
+ r = radeon_fence_wait_timeout(ib.fence, false, usecs_to_jiffies(
+ RADEON_USEC_IB_TEST_TIMEOUT));
+ if (r < 0) {
DRM_ERROR("radeon: fence wait failed (%d).\n", r);
return r;
+ } else if (r == 0) {
+ DRM_ERROR("radeon: fence wait timed out.\n");
+ return -ETIMEDOUT;
}
+ r = 0;
for (i = 0; i < rdev->usec_timeout; i++) {
tmp = le32_to_cpu(rdev->wb.wb[index/4]);
if (tmp == 0xDEADBEEF)
diff --git a/drivers/gpu/drm/radeon/cypress_dpm.c b/drivers/gpu/drm/radeon/cypress_dpm.c
index ca058589ddef..a4edd0702718 100644
--- a/drivers/gpu/drm/radeon/cypress_dpm.c
+++ b/drivers/gpu/drm/radeon/cypress_dpm.c
@@ -1620,14 +1620,14 @@ static int cypress_init_smc_table(struct radeon_device *rdev,
cypress_populate_smc_voltage_tables(rdev, table);
switch (rdev->pm.int_thermal_type) {
- case THERMAL_TYPE_EVERGREEN:
- case THERMAL_TYPE_EMC2103_WITH_INTERNAL:
+ case THERMAL_TYPE_EVERGREEN:
+ case THERMAL_TYPE_EMC2103_WITH_INTERNAL:
table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_INTERNAL;
break;
- case THERMAL_TYPE_NONE:
+ case THERMAL_TYPE_NONE:
table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_NONE;
break;
- default:
+ default:
table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_EXTERNAL;
break;
}
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 2ad462896896..34f7a29d9366 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -1140,7 +1140,7 @@ static int sumo_set_uvd_clock(struct radeon_device *rdev, u32 clock,
int r, i;
struct atom_clock_dividers dividers;
- r = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
+ r = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
clock, false, &dividers);
if (r)
return r;
@@ -2608,10 +2608,152 @@ static void evergreen_agp_enable(struct radeon_device *rdev)
WREG32(VM_CONTEXT1_CNTL, 0);
}
+static const unsigned ni_dig_offsets[] =
+{
+ NI_DIG0_REGISTER_OFFSET,
+ NI_DIG1_REGISTER_OFFSET,
+ NI_DIG2_REGISTER_OFFSET,
+ NI_DIG3_REGISTER_OFFSET,
+ NI_DIG4_REGISTER_OFFSET,
+ NI_DIG5_REGISTER_OFFSET
+};
+
+static const unsigned ni_tx_offsets[] =
+{
+ NI_DCIO_UNIPHY0_UNIPHY_TX_CONTROL1,
+ NI_DCIO_UNIPHY1_UNIPHY_TX_CONTROL1,
+ NI_DCIO_UNIPHY2_UNIPHY_TX_CONTROL1,
+ NI_DCIO_UNIPHY3_UNIPHY_TX_CONTROL1,
+ NI_DCIO_UNIPHY4_UNIPHY_TX_CONTROL1,
+ NI_DCIO_UNIPHY5_UNIPHY_TX_CONTROL1
+};
+
+static const unsigned evergreen_dp_offsets[] =
+{
+ EVERGREEN_DP0_REGISTER_OFFSET,
+ EVERGREEN_DP1_REGISTER_OFFSET,
+ EVERGREEN_DP2_REGISTER_OFFSET,
+ EVERGREEN_DP3_REGISTER_OFFSET,
+ EVERGREEN_DP4_REGISTER_OFFSET,
+ EVERGREEN_DP5_REGISTER_OFFSET
+};
+
+
+/*
+ * Assumption is that EVERGREEN_CRTC_MASTER_EN enable for requested crtc
+ * We go from crtc to connector and it is not relible since it
+ * should be an opposite direction .If crtc is enable then
+ * find the dig_fe which selects this crtc and insure that it enable.
+ * if such dig_fe is found then find dig_be which selects found dig_be and
+ * insure that it enable and in DP_SST mode.
+ * if UNIPHY_PLL_CONTROL1.enable then we should disconnect timing
+ * from dp symbols clocks .
+ */
+static bool evergreen_is_dp_sst_stream_enabled(struct radeon_device *rdev,
+ unsigned crtc_id, unsigned *ret_dig_fe)
+{
+ unsigned i;
+ unsigned dig_fe;
+ unsigned dig_be;
+ unsigned dig_en_be;
+ unsigned uniphy_pll;
+ unsigned digs_fe_selected;
+ unsigned dig_be_mode;
+ unsigned dig_fe_mask;
+ bool is_enabled = false;
+ bool found_crtc = false;
+
+ /* loop through all running dig_fe to find selected crtc */
+ for (i = 0; i < ARRAY_SIZE(ni_dig_offsets); i++) {
+ dig_fe = RREG32(NI_DIG_FE_CNTL + ni_dig_offsets[i]);
+ if (dig_fe & NI_DIG_FE_CNTL_SYMCLK_FE_ON &&
+ crtc_id == NI_DIG_FE_CNTL_SOURCE_SELECT(dig_fe)) {
+ /* found running pipe */
+ found_crtc = true;
+ dig_fe_mask = 1 << i;
+ dig_fe = i;
+ break;
+ }
+ }
+
+ if (found_crtc) {
+ /* loop through all running dig_be to find selected dig_fe */
+ for (i = 0; i < ARRAY_SIZE(ni_dig_offsets); i++) {
+ dig_be = RREG32(NI_DIG_BE_CNTL + ni_dig_offsets[i]);
+ /* if dig_fe_selected by dig_be? */
+ digs_fe_selected = NI_DIG_BE_CNTL_FE_SOURCE_SELECT(dig_be);
+ dig_be_mode = NI_DIG_FE_CNTL_MODE(dig_be);
+ if (dig_fe_mask & digs_fe_selected &&
+ /* if dig_be in sst mode? */
+ dig_be_mode == NI_DIG_BE_DPSST) {
+ dig_en_be = RREG32(NI_DIG_BE_EN_CNTL +
+ ni_dig_offsets[i]);
+ uniphy_pll = RREG32(NI_DCIO_UNIPHY0_PLL_CONTROL1 +
+ ni_tx_offsets[i]);
+ /* dig_be enable and tx is running */
+ if (dig_en_be & NI_DIG_BE_EN_CNTL_ENABLE &&
+ dig_en_be & NI_DIG_BE_EN_CNTL_SYMBCLK_ON &&
+ uniphy_pll & NI_DCIO_UNIPHY0_PLL_CONTROL1_ENABLE) {
+ is_enabled = true;
+ *ret_dig_fe = dig_fe;
+ break;
+ }
+ }
+ }
+ }
+
+ return is_enabled;
+}
+
+/*
+ * Blank dig when in dp sst mode
+ * Dig ignores crtc timing
+ */
+static void evergreen_blank_dp_output(struct radeon_device *rdev,
+ unsigned dig_fe)
+{
+ unsigned stream_ctrl;
+ unsigned fifo_ctrl;
+ unsigned counter = 0;
+
+ if (dig_fe >= ARRAY_SIZE(evergreen_dp_offsets)) {
+ DRM_ERROR("invalid dig_fe %d\n", dig_fe);
+ return;
+ }
+
+ stream_ctrl = RREG32(EVERGREEN_DP_VID_STREAM_CNTL +
+ evergreen_dp_offsets[dig_fe]);
+ if (!(stream_ctrl & EVERGREEN_DP_VID_STREAM_CNTL_ENABLE)) {
+ DRM_ERROR("dig %d , should be enable\n", dig_fe);
+ return;
+ }
+
+ stream_ctrl &=~EVERGREEN_DP_VID_STREAM_CNTL_ENABLE;
+ WREG32(EVERGREEN_DP_VID_STREAM_CNTL +
+ evergreen_dp_offsets[dig_fe], stream_ctrl);
+
+ stream_ctrl = RREG32(EVERGREEN_DP_VID_STREAM_CNTL +
+ evergreen_dp_offsets[dig_fe]);
+ while (counter < 32 && stream_ctrl & EVERGREEN_DP_VID_STREAM_STATUS) {
+ msleep(1);
+ counter++;
+ stream_ctrl = RREG32(EVERGREEN_DP_VID_STREAM_CNTL +
+ evergreen_dp_offsets[dig_fe]);
+ }
+ if (counter >= 32 )
+ DRM_ERROR("counter exceeds %d\n", counter);
+
+ fifo_ctrl = RREG32(EVERGREEN_DP_STEER_FIFO + evergreen_dp_offsets[dig_fe]);
+ fifo_ctrl |= EVERGREEN_DP_STEER_FIFO_RESET;
+ WREG32(EVERGREEN_DP_STEER_FIFO + evergreen_dp_offsets[dig_fe], fifo_ctrl);
+
+}
+
void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save)
{
u32 crtc_enabled, tmp, frame_count, blackout;
int i, j;
+ unsigned dig_fe;
if (!ASIC_IS_NODCE(rdev)) {
save->vga_render_control = RREG32(VGA_RENDER_CONTROL);
@@ -2651,7 +2793,17 @@ void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *sav
break;
udelay(1);
}
-
+ /*we should disable dig if it drives dp sst*/
+ /*but we are in radeon_device_init and the topology is unknown*/
+ /*and it is available after radeon_modeset_init*/
+ /*the following method radeon_atom_encoder_dpms_dig*/
+ /*does the job if we initialize it properly*/
+ /*for now we do it this manually*/
+ /**/
+ if (ASIC_IS_DCE5(rdev) &&
+ evergreen_is_dp_sst_stream_enabled(rdev, i ,&dig_fe))
+ evergreen_blank_dp_output(rdev, dig_fe);
+ /*we could remove 6 lines below*/
/* XXX this is a hack to avoid strange behavior with EFI on certain systems */
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);
diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c
index 46f87d4aaf31..9e93205eb9e4 100644
--- a/drivers/gpu/drm/radeon/evergreen_cs.c
+++ b/drivers/gpu/drm/radeon/evergreen_cs.c
@@ -1816,8 +1816,8 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
}
offset = reloc->gpu_offset +
- (idx_value & 0xfffffff0) +
- ((u64)(tmp & 0xff) << 32);
+ (idx_value & 0xfffffff0) +
+ ((u64)(tmp & 0xff) << 32);
ib[idx + 0] = offset;
ib[idx + 1] = (tmp & 0xffffff00) | (upper_32_bits(offset) & 0xff);
@@ -1862,8 +1862,8 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
}
offset = reloc->gpu_offset +
- idx_value +
- ((u64)(radeon_get_ib_value(p, idx+1) & 0xff) << 32);
+ idx_value +
+ ((u64)(radeon_get_ib_value(p, idx+1) & 0xff) << 32);
ib[idx+0] = offset;
ib[idx+1] = upper_32_bits(offset) & 0xff;
@@ -1897,8 +1897,8 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
}
offset = reloc->gpu_offset +
- idx_value +
- ((u64)(radeon_get_ib_value(p, idx+1) & 0xff) << 32);
+ idx_value +
+ ((u64)(radeon_get_ib_value(p, idx+1) & 0xff) << 32);
ib[idx+0] = offset;
ib[idx+1] = upper_32_bits(offset) & 0xff;
@@ -1925,8 +1925,8 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
}
offset = reloc->gpu_offset +
- radeon_get_ib_value(p, idx+1) +
- ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
+ radeon_get_ib_value(p, idx+1) +
+ ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
ib[idx+1] = offset;
ib[idx+2] = upper_32_bits(offset) & 0xff;
@@ -2098,8 +2098,8 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
}
offset = reloc->gpu_offset +
- (radeon_get_ib_value(p, idx+1) & 0xfffffffc) +
- ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
+ (radeon_get_ib_value(p, idx+1) & 0xfffffffc) +
+ ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
ib[idx+1] = (ib[idx+1] & 0x3) | (offset & 0xfffffffc);
ib[idx+2] = upper_32_bits(offset) & 0xff;
@@ -2239,8 +2239,8 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
return -EINVAL;
}
offset = reloc->gpu_offset +
- (radeon_get_ib_value(p, idx+1) & 0xfffffff8) +
- ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
+ (radeon_get_ib_value(p, idx+1) & 0xfffffff8) +
+ ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
ib[idx+1] = offset & 0xfffffff8;
ib[idx+2] = upper_32_bits(offset) & 0xff;
@@ -2261,8 +2261,8 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
}
offset = reloc->gpu_offset +
- (radeon_get_ib_value(p, idx+1) & 0xfffffffc) +
- ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
+ (radeon_get_ib_value(p, idx+1) & 0xfffffffc) +
+ ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
ib[idx+1] = offset & 0xfffffffc;
ib[idx+2] = (ib[idx+2] & 0xffffff00) | (upper_32_bits(offset) & 0xff);
@@ -2283,8 +2283,8 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
}
offset = reloc->gpu_offset +
- (radeon_get_ib_value(p, idx+1) & 0xfffffffc) +
- ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
+ (radeon_get_ib_value(p, idx+1) & 0xfffffffc) +
+ ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
ib[idx+1] = offset & 0xfffffffc;
ib[idx+2] = (ib[idx+2] & 0xffffff00) | (upper_32_bits(offset) & 0xff);
diff --git a/drivers/gpu/drm/radeon/evergreen_hdmi.c b/drivers/gpu/drm/radeon/evergreen_hdmi.c
index 3cf04a2f44bb..f766c967a284 100644
--- a/drivers/gpu/drm/radeon/evergreen_hdmi.c
+++ b/drivers/gpu/drm/radeon/evergreen_hdmi.c
@@ -206,7 +206,7 @@ void evergreen_hdmi_write_sad_regs(struct drm_encoder *encoder,
* build a AVI Info Frame
*/
void evergreen_set_avi_packet(struct radeon_device *rdev, u32 offset,
- unsigned char *buffer, size_t size)
+ unsigned char *buffer, size_t size)
{
uint8_t *frame = buffer + 3;
diff --git a/drivers/gpu/drm/radeon/evergreen_reg.h b/drivers/gpu/drm/radeon/evergreen_reg.h
index aa939dfed3a3..b436badf9efa 100644
--- a/drivers/gpu/drm/radeon/evergreen_reg.h
+++ b/drivers/gpu/drm/radeon/evergreen_reg.h
@@ -250,8 +250,43 @@
/* HDMI blocks at 0x7030, 0x7c30, 0x10830, 0x11430, 0x12030, 0x12c30 */
#define EVERGREEN_HDMI_BASE 0x7030
+/*DIG block*/
+#define NI_DIG0_REGISTER_OFFSET (0x7000 - 0x7000)
+#define NI_DIG1_REGISTER_OFFSET (0x7C00 - 0x7000)
+#define NI_DIG2_REGISTER_OFFSET (0x10800 - 0x7000)
+#define NI_DIG3_REGISTER_OFFSET (0x11400 - 0x7000)
+#define NI_DIG4_REGISTER_OFFSET (0x12000 - 0x7000)
+#define NI_DIG5_REGISTER_OFFSET (0x12C00 - 0x7000)
+
+
+#define NI_DIG_FE_CNTL 0x7000
+# define NI_DIG_FE_CNTL_SOURCE_SELECT(x) ((x) & 0x3)
+# define NI_DIG_FE_CNTL_SYMCLK_FE_ON (1<<24)
+
+
+#define NI_DIG_BE_CNTL 0x7140
+# define NI_DIG_BE_CNTL_FE_SOURCE_SELECT(x) (((x) >> 8 ) & 0x3F)
+# define NI_DIG_FE_CNTL_MODE(x) (((x) >> 16) & 0x7 )
+
+#define NI_DIG_BE_EN_CNTL 0x7144
+# define NI_DIG_BE_EN_CNTL_ENABLE (1 << 0)
+# define NI_DIG_BE_EN_CNTL_SYMBCLK_ON (1 << 8)
+# define NI_DIG_BE_DPSST 0
/* Display Port block */
+#define EVERGREEN_DP0_REGISTER_OFFSET (0x730C - 0x730C)
+#define EVERGREEN_DP1_REGISTER_OFFSET (0x7F0C - 0x730C)
+#define EVERGREEN_DP2_REGISTER_OFFSET (0x10B0C - 0x730C)
+#define EVERGREEN_DP3_REGISTER_OFFSET (0x1170C - 0x730C)
+#define EVERGREEN_DP4_REGISTER_OFFSET (0x1230C - 0x730C)
+#define EVERGREEN_DP5_REGISTER_OFFSET (0x12F0C - 0x730C)
+
+
+#define EVERGREEN_DP_VID_STREAM_CNTL 0x730C
+# define EVERGREEN_DP_VID_STREAM_CNTL_ENABLE (1 << 0)
+# define EVERGREEN_DP_VID_STREAM_STATUS (1 <<16)
+#define EVERGREEN_DP_STEER_FIFO 0x7310
+# define EVERGREEN_DP_STEER_FIFO_RESET (1 << 0)
#define EVERGREEN_DP_SEC_CNTL 0x7280
# define EVERGREEN_DP_SEC_STREAM_ENABLE (1 << 0)
# define EVERGREEN_DP_SEC_ASP_ENABLE (1 << 4)
@@ -266,4 +301,15 @@
# define EVERGREEN_DP_SEC_N_BASE_MULTIPLE(x) (((x) & 0xf) << 24)
# define EVERGREEN_DP_SEC_SS_EN (1 << 28)
+/*DCIO_UNIPHY block*/
+#define NI_DCIO_UNIPHY0_UNIPHY_TX_CONTROL1 (0x6600 -0x6600)
+#define NI_DCIO_UNIPHY1_UNIPHY_TX_CONTROL1 (0x6640 -0x6600)
+#define NI_DCIO_UNIPHY2_UNIPHY_TX_CONTROL1 (0x6680 - 0x6600)
+#define NI_DCIO_UNIPHY3_UNIPHY_TX_CONTROL1 (0x66C0 - 0x6600)
+#define NI_DCIO_UNIPHY4_UNIPHY_TX_CONTROL1 (0x6700 - 0x6600)
+#define NI_DCIO_UNIPHY5_UNIPHY_TX_CONTROL1 (0x6740 - 0x6600)
+
+#define NI_DCIO_UNIPHY0_PLL_CONTROL1 0x6618
+# define NI_DCIO_UNIPHY0_PLL_CONTROL1_ENABLE (1 << 0)
+
#endif
diff --git a/drivers/gpu/drm/radeon/kv_dpm.c b/drivers/gpu/drm/radeon/kv_dpm.c
index 2d71da448487..d0240743a17c 100644
--- a/drivers/gpu/drm/radeon/kv_dpm.c
+++ b/drivers/gpu/drm/radeon/kv_dpm.c
@@ -2640,7 +2640,7 @@ static int kv_parse_power_table(struct radeon_device *rdev)
struct _NonClockInfoArray *non_clock_info_array;
union power_info *power_info;
int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
- u16 data_offset;
+ u16 data_offset;
u8 frev, crev;
u8 *power_state_offset;
struct kv_ps *ps;
@@ -2738,7 +2738,7 @@ int kv_dpm_init(struct radeon_device *rdev)
for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++)
pi->at[i] = TRINITY_AT_DFLT;
- pi->sram_end = SMC_RAM_END;
+ pi->sram_end = SMC_RAM_END;
/* Enabling nb dpm on an asrock system prevents dpm from working */
if (rdev->pdev->subsystem_vendor == 0x1849)
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index 158872eb78e4..b88d63c9be99 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -1257,7 +1257,7 @@ static void cayman_gpu_init(struct radeon_device *rdev)
tmp = RREG32_CG(CG_CGTT_LOCAL_0);
tmp &= ~0x00380000;
WREG32_CG(CG_CGTT_LOCAL_0, tmp);
- tmp = RREG32_CG(CG_CGTT_LOCAL_1);
+ tmp = RREG32_CG(CG_CGTT_LOCAL_1);
tmp &= ~0x0e000000;
WREG32_CG(CG_CGTT_LOCAL_1, tmp);
}
@@ -2634,7 +2634,7 @@ int tn_set_vce_clocks(struct radeon_device *rdev, u32 evclk, u32 ecclk)
struct atom_clock_dividers dividers;
int r, i;
- r = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
+ r = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
ecclk, false, &dividers);
if (r)
return r;
diff --git a/drivers/gpu/drm/radeon/ni_dpm.c b/drivers/gpu/drm/radeon/ni_dpm.c
index c3d531a1114b..4a601f990562 100644
--- a/drivers/gpu/drm/radeon/ni_dpm.c
+++ b/drivers/gpu/drm/radeon/ni_dpm.c
@@ -725,9 +725,9 @@ extern int ni_mc_load_microcode(struct radeon_device *rdev);
struct ni_power_info *ni_get_pi(struct radeon_device *rdev)
{
- struct ni_power_info *pi = rdev->pm.dpm.priv;
+ struct ni_power_info *pi = rdev->pm.dpm.priv;
- return pi;
+ return pi;
}
struct ni_ps *ni_get_ps(struct radeon_ps *rps)
@@ -1096,9 +1096,9 @@ static void ni_stop_smc(struct radeon_device *rdev)
static int ni_process_firmware_header(struct radeon_device *rdev)
{
- struct rv7xx_power_info *pi = rv770_get_pi(rdev);
- struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
- struct ni_power_info *ni_pi = ni_get_pi(rdev);
+ struct rv7xx_power_info *pi = rv770_get_pi(rdev);
+ struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
+ struct ni_power_info *ni_pi = ni_get_pi(rdev);
u32 tmp;
int ret;
@@ -1202,14 +1202,14 @@ static int ni_enter_ulp_state(struct radeon_device *rdev)
struct rv7xx_power_info *pi = rv770_get_pi(rdev);
if (pi->gfx_clock_gating) {
- WREG32_P(SCLK_PWRMGT_CNTL, 0, ~DYN_GFX_CLK_OFF_EN);
+ WREG32_P(SCLK_PWRMGT_CNTL, 0, ~DYN_GFX_CLK_OFF_EN);
WREG32_P(SCLK_PWRMGT_CNTL, GFX_CLK_FORCE_ON, ~GFX_CLK_FORCE_ON);
- WREG32_P(SCLK_PWRMGT_CNTL, 0, ~GFX_CLK_FORCE_ON);
+ WREG32_P(SCLK_PWRMGT_CNTL, 0, ~GFX_CLK_FORCE_ON);
RREG32(GB_ADDR_CONFIG);
- }
+ }
WREG32_P(SMC_MSG, HOST_SMC_MSG(PPSMC_MSG_SwitchToMinimumPower),
- ~HOST_SMC_MSG_MASK);
+ ~HOST_SMC_MSG_MASK);
udelay(25000);
@@ -1321,12 +1321,12 @@ static void ni_populate_mvdd_value(struct radeon_device *rdev,
u32 mclk,
NISLANDS_SMC_VOLTAGE_VALUE *voltage)
{
- struct rv7xx_power_info *pi = rv770_get_pi(rdev);
+ struct rv7xx_power_info *pi = rv770_get_pi(rdev);
struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
if (!pi->mvdd_control) {
voltage->index = eg_pi->mvdd_high_index;
- voltage->value = cpu_to_be16(MVDD_HIGH_VALUE);
+ voltage->value = cpu_to_be16(MVDD_HIGH_VALUE);
return;
}
@@ -1510,47 +1510,47 @@ int ni_copy_and_switch_arb_sets(struct radeon_device *rdev,
u32 mc_cg_config;
switch (arb_freq_src) {
- case MC_CG_ARB_FREQ_F0:
+ case MC_CG_ARB_FREQ_F0:
mc_arb_dram_timing = RREG32(MC_ARB_DRAM_TIMING);
mc_arb_dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2);
burst_time = (RREG32(MC_ARB_BURST_TIME) & STATE0_MASK) >> STATE0_SHIFT;
break;
- case MC_CG_ARB_FREQ_F1:
+ case MC_CG_ARB_FREQ_F1:
mc_arb_dram_timing = RREG32(MC_ARB_DRAM_TIMING_1);
mc_arb_dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2_1);
burst_time = (RREG32(MC_ARB_BURST_TIME) & STATE1_MASK) >> STATE1_SHIFT;
break;
- case MC_CG_ARB_FREQ_F2:
+ case MC_CG_ARB_FREQ_F2:
mc_arb_dram_timing = RREG32(MC_ARB_DRAM_TIMING_2);
mc_arb_dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2_2);
burst_time = (RREG32(MC_ARB_BURST_TIME) & STATE2_MASK) >> STATE2_SHIFT;
break;
- case MC_CG_ARB_FREQ_F3:
+ case MC_CG_ARB_FREQ_F3:
mc_arb_dram_timing = RREG32(MC_ARB_DRAM_TIMING_3);
mc_arb_dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2_3);
burst_time = (RREG32(MC_ARB_BURST_TIME) & STATE3_MASK) >> STATE3_SHIFT;
break;
- default:
+ default:
return -EINVAL;
}
switch (arb_freq_dest) {
- case MC_CG_ARB_FREQ_F0:
+ case MC_CG_ARB_FREQ_F0:
WREG32(MC_ARB_DRAM_TIMING, mc_arb_dram_timing);
WREG32(MC_ARB_DRAM_TIMING2, mc_arb_dram_timing2);
WREG32_P(MC_ARB_BURST_TIME, STATE0(burst_time), ~STATE0_MASK);
break;
- case MC_CG_ARB_FREQ_F1:
+ case MC_CG_ARB_FREQ_F1:
WREG32(MC_ARB_DRAM_TIMING_1, mc_arb_dram_timing);
WREG32(MC_ARB_DRAM_TIMING2_1, mc_arb_dram_timing2);
WREG32_P(MC_ARB_BURST_TIME, STATE1(burst_time), ~STATE1_MASK);
break;
- case MC_CG_ARB_FREQ_F2:
+ case MC_CG_ARB_FREQ_F2:
WREG32(MC_ARB_DRAM_TIMING_2, mc_arb_dram_timing);
WREG32(MC_ARB_DRAM_TIMING2_2, mc_arb_dram_timing2);
WREG32_P(MC_ARB_BURST_TIME, STATE2(burst_time), ~STATE2_MASK);
break;
- case MC_CG_ARB_FREQ_F3:
+ case MC_CG_ARB_FREQ_F3:
WREG32(MC_ARB_DRAM_TIMING_3, mc_arb_dram_timing);
WREG32(MC_ARB_DRAM_TIMING2_3, mc_arb_dram_timing2);
WREG32_P(MC_ARB_BURST_TIME, STATE3(burst_time), ~STATE3_MASK);
@@ -1621,9 +1621,7 @@ static int ni_populate_memory_timing_parameters(struct radeon_device *rdev,
(u8)rv770_calculate_memory_refresh_rate(rdev, pl->sclk);
- radeon_atom_set_engine_dram_timings(rdev,
- pl->sclk,
- pl->mclk);
+ radeon_atom_set_engine_dram_timings(rdev, pl->sclk, pl->mclk);
dram_timing = RREG32(MC_ARB_DRAM_TIMING);
dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2);
@@ -1867,9 +1865,9 @@ static int ni_populate_smc_acpi_state(struct radeon_device *rdev,
mpll_ad_func_cntl_2 |= BIAS_GEN_PDNB | RESET_EN;
- if (pi->mem_gddr5)
- mpll_dq_func_cntl &= ~PDNB;
- mpll_dq_func_cntl_2 |= BIAS_GEN_PDNB | RESET_EN | BYPASS;
+ if (pi->mem_gddr5)
+ mpll_dq_func_cntl &= ~PDNB;
+ mpll_dq_func_cntl_2 |= BIAS_GEN_PDNB | RESET_EN | BYPASS;
mclk_pwrmgt_cntl |= (MRDCKA0_RESET |
@@ -1891,15 +1889,15 @@ static int ni_populate_smc_acpi_state(struct radeon_device *rdev,
MRDCKD1_PDNB);
dll_cntl |= (MRDCKA0_BYPASS |
- MRDCKA1_BYPASS |
- MRDCKB0_BYPASS |
- MRDCKB1_BYPASS |
- MRDCKC0_BYPASS |
- MRDCKC1_BYPASS |
- MRDCKD0_BYPASS |
- MRDCKD1_BYPASS);
-
- spll_func_cntl_2 &= ~SCLK_MUX_SEL_MASK;
+ MRDCKA1_BYPASS |
+ MRDCKB0_BYPASS |
+ MRDCKB1_BYPASS |
+ MRDCKC0_BYPASS |
+ MRDCKC1_BYPASS |
+ MRDCKD0_BYPASS |
+ MRDCKD1_BYPASS);
+
+ spll_func_cntl_2 &= ~SCLK_MUX_SEL_MASK;
spll_func_cntl_2 |= SCLK_MUX_SEL(4);
table->ACPIState.levels[0].mclk.vMPLL_AD_FUNC_CNTL = cpu_to_be32(mpll_ad_func_cntl);
@@ -2089,7 +2087,7 @@ static int ni_populate_sclk_value(struct radeon_device *rdev,
static int ni_init_smc_spll_table(struct radeon_device *rdev)
{
- struct rv7xx_power_info *pi = rv770_get_pi(rdev);
+ struct rv7xx_power_info *pi = rv770_get_pi(rdev);
struct ni_power_info *ni_pi = ni_get_pi(rdev);
SMC_NISLANDS_SPLL_DIV_TABLE *spll_table;
NISLANDS_SMC_SCLK_VALUE sclk_params;
@@ -2311,8 +2309,8 @@ static int ni_convert_power_level_to_smc(struct radeon_device *rdev,
NISLANDS_SMC_HW_PERFORMANCE_LEVEL *level)
{
struct rv7xx_power_info *pi = rv770_get_pi(rdev);
- struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
- struct ni_power_info *ni_pi = ni_get_pi(rdev);
+ struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
+ struct ni_power_info *ni_pi = ni_get_pi(rdev);
int ret;
bool dll_state_on;
u16 std_vddc;
@@ -2391,8 +2389,8 @@ static int ni_populate_smc_t(struct radeon_device *rdev,
struct radeon_ps *radeon_state,
NISLANDS_SMC_SWSTATE *smc_state)
{
- struct rv7xx_power_info *pi = rv770_get_pi(rdev);
- struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
+ struct rv7xx_power_info *pi = rv770_get_pi(rdev);
+ struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
struct ni_ps *state = ni_get_ps(radeon_state);
u32 a_t;
u32 t_l, t_h;
@@ -2451,8 +2449,8 @@ static int ni_populate_power_containment_values(struct radeon_device *rdev,
struct radeon_ps *radeon_state,
NISLANDS_SMC_SWSTATE *smc_state)
{
- struct rv7xx_power_info *pi = rv770_get_pi(rdev);
- struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
+ struct rv7xx_power_info *pi = rv770_get_pi(rdev);
+ struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
struct ni_power_info *ni_pi = ni_get_pi(rdev);
struct ni_ps *state = ni_get_ps(radeon_state);
u32 prev_sclk;
@@ -2595,7 +2593,7 @@ static int ni_enable_power_containment(struct radeon_device *rdev,
struct radeon_ps *radeon_new_state,
bool enable)
{
- struct ni_power_info *ni_pi = ni_get_pi(rdev);
+ struct ni_power_info *ni_pi = ni_get_pi(rdev);
PPSMC_Result smc_result;
int ret = 0;
@@ -2625,7 +2623,7 @@ static int ni_convert_power_state_to_smc(struct radeon_device *rdev,
struct radeon_ps *radeon_state,
NISLANDS_SMC_SWSTATE *smc_state)
{
- struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
+ struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
struct ni_power_info *ni_pi = ni_get_pi(rdev);
struct ni_ps *state = ni_get_ps(radeon_state);
int i, ret;
@@ -2770,46 +2768,46 @@ static bool ni_check_s0_mc_reg_index(u16 in_reg, u16 *out_reg)
bool result = true;
switch (in_reg) {
- case MC_SEQ_RAS_TIMING >> 2:
+ case MC_SEQ_RAS_TIMING >> 2:
*out_reg = MC_SEQ_RAS_TIMING_LP >> 2;
break;
- case MC_SEQ_CAS_TIMING >> 2:
+ case MC_SEQ_CAS_TIMING >> 2:
*out_reg = MC_SEQ_CAS_TIMING_LP >> 2;
break;
- case MC_SEQ_MISC_TIMING >> 2:
+ case MC_SEQ_MISC_TIMING >> 2:
*out_reg = MC_SEQ_MISC_TIMING_LP >> 2;
break;
- case MC_SEQ_MISC_TIMING2 >> 2:
+ case MC_SEQ_MISC_TIMING2 >> 2:
*out_reg = MC_SEQ_MISC_TIMING2_LP >> 2;
break;
- case MC_SEQ_RD_CTL_D0 >> 2:
+ case MC_SEQ_RD_CTL_D0 >> 2:
*out_reg = MC_SEQ_RD_CTL_D0_LP >> 2;
break;
- case MC_SEQ_RD_CTL_D1 >> 2:
+ case MC_SEQ_RD_CTL_D1 >> 2:
*out_reg = MC_SEQ_RD_CTL_D1_LP >> 2;
break;
- case MC_SEQ_WR_CTL_D0 >> 2:
+ case MC_SEQ_WR_CTL_D0 >> 2:
*out_reg = MC_SEQ_WR_CTL_D0_LP >> 2;
break;
- case MC_SEQ_WR_CTL_D1 >> 2:
+ case MC_SEQ_WR_CTL_D1 >> 2:
*out_reg = MC_SEQ_WR_CTL_D1_LP >> 2;
break;
- case MC_PMG_CMD_EMRS >> 2:
+ case MC_PMG_CMD_EMRS >> 2:
*out_reg = MC_SEQ_PMG_CMD_EMRS_LP >> 2;
break;
- case MC_PMG_CMD_MRS >> 2:
+ case MC_PMG_CMD_MRS >> 2:
*out_reg = MC_SEQ_PMG_CMD_MRS_LP >> 2;
break;
- case MC_PMG_CMD_MRS1 >> 2:
+ case MC_PMG_CMD_MRS1 >> 2:
*out_reg = MC_SEQ_PMG_CMD_MRS1_LP >> 2;
break;
- case MC_SEQ_PMG_TIMING >> 2:
+ case MC_SEQ_PMG_TIMING >> 2:
*out_reg = MC_SEQ_PMG_TIMING_LP >> 2;
break;
- case MC_PMG_CMD_MRS2 >> 2:
+ case MC_PMG_CMD_MRS2 >> 2:
*out_reg = MC_SEQ_PMG_CMD_MRS2_LP >> 2;
break;
- default:
+ default:
result = false;
break;
}
@@ -2876,9 +2874,9 @@ static int ni_initialize_mc_reg_table(struct radeon_device *rdev)
struct ni_mc_reg_table *ni_table = &ni_pi->mc_reg_table;
u8 module_index = rv770_get_memory_module_index(rdev);
- table = kzalloc(sizeof(struct atom_mc_reg_table), GFP_KERNEL);
- if (!table)
- return -ENOMEM;
+ table = kzalloc(sizeof(struct atom_mc_reg_table), GFP_KERNEL);
+ if (!table)
+ return -ENOMEM;
WREG32(MC_SEQ_RAS_TIMING_LP, RREG32(MC_SEQ_RAS_TIMING));
WREG32(MC_SEQ_CAS_TIMING_LP, RREG32(MC_SEQ_CAS_TIMING));
@@ -2896,25 +2894,25 @@ static int ni_initialize_mc_reg_table(struct radeon_device *rdev)
ret = radeon_atom_init_mc_reg_table(rdev, module_index, table);
- if (ret)
- goto init_mc_done;
+ if (ret)
+ goto init_mc_done;
ret = ni_copy_vbios_mc_reg_table(table, ni_table);
- if (ret)
- goto init_mc_done;
+ if (ret)
+ goto init_mc_done;
ni_set_s0_mc_reg_index(ni_table);
ret = ni_set_mc_special_registers(rdev, ni_table);
- if (ret)
- goto init_mc_done;
+ if (ret)
+ goto init_mc_done;
ni_set_valid_flag(ni_table);
init_mc_done:
- kfree(table);
+ kfree(table);
return ret;
}
@@ -2994,7 +2992,7 @@ static int ni_populate_mc_reg_table(struct radeon_device *rdev,
{
struct rv7xx_power_info *pi = rv770_get_pi(rdev);
struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
- struct ni_power_info *ni_pi = ni_get_pi(rdev);
+ struct ni_power_info *ni_pi = ni_get_pi(rdev);
struct ni_ps *boot_state = ni_get_ps(radeon_boot_state);
SMC_NIslands_MCRegisters *mc_reg_table = &ni_pi->smc_mc_reg_table;
@@ -3025,7 +3023,7 @@ static int ni_upload_mc_reg_table(struct radeon_device *rdev,
{
struct rv7xx_power_info *pi = rv770_get_pi(rdev);
struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
- struct ni_power_info *ni_pi = ni_get_pi(rdev);
+ struct ni_power_info *ni_pi = ni_get_pi(rdev);
struct ni_ps *ni_new_state = ni_get_ps(radeon_new_state);
SMC_NIslands_MCRegisters *mc_reg_table = &ni_pi->smc_mc_reg_table;
u16 address;
@@ -3142,7 +3140,7 @@ static int ni_initialize_smc_cac_tables(struct radeon_device *rdev)
struct ni_power_info *ni_pi = ni_get_pi(rdev);
PP_NIslands_CACTABLES *cac_tables = NULL;
int i, ret;
- u32 reg;
+ u32 reg;
if (ni_pi->enable_cac == false)
return 0;
@@ -3422,13 +3420,13 @@ static int ni_pcie_performance_request(struct radeon_device *rdev,
struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
if ((perf_req == PCIE_PERF_REQ_PECI_GEN1) ||
- (perf_req == PCIE_PERF_REQ_PECI_GEN2)) {
+ (perf_req == PCIE_PERF_REQ_PECI_GEN2)) {
if (eg_pi->pcie_performance_request_registered == false)
radeon_acpi_pcie_notify_device_ready(rdev);
eg_pi->pcie_performance_request_registered = true;
return radeon_acpi_pcie_performance_request(rdev, perf_req, advertise);
} else if ((perf_req == PCIE_PERF_REQ_REMOVE_REGISTRY) &&
- eg_pi->pcie_performance_request_registered) {
+ eg_pi->pcie_performance_request_registered) {
eg_pi->pcie_performance_request_registered = false;
return radeon_acpi_pcie_performance_request(rdev, perf_req, advertise);
}
@@ -3441,12 +3439,12 @@ static int ni_advertise_gen2_capability(struct radeon_device *rdev)
struct rv7xx_power_info *pi = rv770_get_pi(rdev);
u32 tmp;
- tmp = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
+ tmp = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
- if ((tmp & LC_OTHER_SIDE_EVER_SENT_GEN2) &&
- (tmp & LC_OTHER_SIDE_SUPPORTS_GEN2))
- pi->pcie_gen2 = true;
- else
+ if ((tmp & LC_OTHER_SIDE_EVER_SENT_GEN2) &&
+ (tmp & LC_OTHER_SIDE_SUPPORTS_GEN2))
+ pi->pcie_gen2 = true;
+ else
pi->pcie_gen2 = false;
if (!pi->pcie_gen2)
@@ -3458,8 +3456,8 @@ static int ni_advertise_gen2_capability(struct radeon_device *rdev)
static void ni_enable_bif_dynamic_pcie_gen2(struct radeon_device *rdev,
bool enable)
{
- struct rv7xx_power_info *pi = rv770_get_pi(rdev);
- u32 tmp, bif;
+ struct rv7xx_power_info *pi = rv770_get_pi(rdev);
+ u32 tmp, bif;
tmp = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
@@ -3502,7 +3500,7 @@ static void ni_enable_dynamic_pcie_gen2(struct radeon_device *rdev,
if (enable)
WREG32_P(GENERAL_PWRMGT, ENABLE_GEN2PCIE, ~ENABLE_GEN2PCIE);
else
- WREG32_P(GENERAL_PWRMGT, 0, ~ENABLE_GEN2PCIE);
+ WREG32_P(GENERAL_PWRMGT, 0, ~ENABLE_GEN2PCIE);
}
void ni_set_uvd_clock_before_set_eng_clock(struct radeon_device *rdev,
@@ -3563,7 +3561,7 @@ void ni_update_current_ps(struct radeon_device *rdev,
{
struct ni_ps *new_ps = ni_get_ps(rps);
struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
- struct ni_power_info *ni_pi = ni_get_pi(rdev);
+ struct ni_power_info *ni_pi = ni_get_pi(rdev);
eg_pi->current_rps = *rps;
ni_pi->current_ps = *new_ps;
@@ -3575,7 +3573,7 @@ void ni_update_requested_ps(struct radeon_device *rdev,
{
struct ni_ps *new_ps = ni_get_ps(rps);
struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
- struct ni_power_info *ni_pi = ni_get_pi(rdev);
+ struct ni_power_info *ni_pi = ni_get_pi(rdev);
eg_pi->requested_rps = *rps;
ni_pi->requested_ps = *new_ps;
@@ -3591,8 +3589,8 @@ int ni_dpm_enable(struct radeon_device *rdev)
if (pi->gfx_clock_gating)
ni_cg_clockgating_default(rdev);
- if (btc_dpm_enabled(rdev))
- return -EINVAL;
+ if (btc_dpm_enabled(rdev))
+ return -EINVAL;
if (pi->mg_clock_gating)
ni_mg_clockgating_default(rdev);
if (eg_pi->ls_clock_gating)
@@ -3991,7 +3989,7 @@ static int ni_parse_power_table(struct radeon_device *rdev)
union pplib_clock_info *clock_info;
union power_info *power_info;
int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
- u16 data_offset;
+ u16 data_offset;
u8 frev, crev;
struct ni_ps *ps;
diff --git a/drivers/gpu/drm/radeon/ni_reg.h b/drivers/gpu/drm/radeon/ni_reg.h
index da310a70c0f0..827ccc87cbc3 100644
--- a/drivers/gpu/drm/radeon/ni_reg.h
+++ b/drivers/gpu/drm/radeon/ni_reg.h
@@ -109,6 +109,8 @@
#define NI_DP_MSE_SAT2 0x7398
#define NI_DP_MSE_SAT_UPDATE 0x739c
+# define NI_DP_MSE_SAT_UPDATE_MASK 0x3
+# define NI_DP_MSE_16_MTP_KEEPOUT 0x100
#define NI_DIG_BE_CNTL 0x7140
# define NI_DIG_FE_SOURCE_SELECT(x) (((x) & 0x7f) << 8)
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index 5eae0a88dd3e..6e478a248628 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -3732,11 +3732,17 @@ int r100_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
goto free_ib;
}
- r = radeon_fence_wait(ib.fence, false);
- if (r) {
+ r = radeon_fence_wait_timeout(ib.fence, false, usecs_to_jiffies(
+ RADEON_USEC_IB_TEST_TIMEOUT));
+ if (r < 0) {
DRM_ERROR("radeon: fence wait failed (%d).\n", r);
goto free_ib;
+ } else if (r == 0) {
+ DRM_ERROR("radeon: fence wait timed out.\n");
+ r = -ETIMEDOUT;
+ goto free_ib;
}
+ r = 0;
for (i = 0; i < rdev->usec_timeout; i++) {
tmp = RREG32(scratch);
if (tmp == 0xDEADBEEF) {
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index cc2fdf0be37a..f86ab695ee8f 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -235,8 +235,8 @@ int r600_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk)
fb_div |= 1;
r = radeon_uvd_send_upll_ctlreq(rdev, CG_UPLL_FUNC_CNTL);
- if (r)
- return r;
+ if (r)
+ return r;
/* assert PLL_RESET */
WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_RESET_MASK, ~UPLL_RESET_MASK);
@@ -1490,7 +1490,7 @@ static int r600_mc_init(struct radeon_device *rdev)
rdev->fastfb_working = true;
}
}
- }
+ }
}
radeon_update_bandwidth_info(rdev);
@@ -3381,11 +3381,17 @@ int r600_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
goto free_ib;
}
- r = radeon_fence_wait(ib.fence, false);
- if (r) {
+ r = radeon_fence_wait_timeout(ib.fence, false, usecs_to_jiffies(
+ RADEON_USEC_IB_TEST_TIMEOUT));
+ if (r < 0) {
DRM_ERROR("radeon: fence wait failed (%d).\n", r);
goto free_ib;
+ } else if (r == 0) {
+ DRM_ERROR("radeon: fence wait timed out.\n");
+ r = -ETIMEDOUT;
+ goto free_ib;
}
+ r = 0;
for (i = 0; i < rdev->usec_timeout; i++) {
tmp = RREG32(scratch);
if (tmp == 0xDEADBEEF)
@@ -4568,7 +4574,7 @@ uint64_t r600_get_gpu_clock_counter(struct radeon_device *rdev)
mutex_lock(&rdev->gpu_clock_mutex);
WREG32(RLC_CAPTURE_GPU_CLOCK_COUNT, 1);
clock = (uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_LSB) |
- ((uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
+ ((uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
mutex_unlock(&rdev->gpu_clock_mutex);
return clock;
}
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index 2f36fa1576e0..b69c8de35bd3 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -1671,8 +1671,8 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
}
offset = reloc->gpu_offset +
- (idx_value & 0xfffffff0) +
- ((u64)(tmp & 0xff) << 32);
+ (idx_value & 0xfffffff0) +
+ ((u64)(tmp & 0xff) << 32);
ib[idx + 0] = offset;
ib[idx + 1] = (tmp & 0xffffff00) | (upper_32_bits(offset) & 0xff);
@@ -1712,8 +1712,8 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
}
offset = reloc->gpu_offset +
- idx_value +
- ((u64)(radeon_get_ib_value(p, idx+1) & 0xff) << 32);
+ idx_value +
+ ((u64)(radeon_get_ib_value(p, idx+1) & 0xff) << 32);
ib[idx+0] = offset;
ib[idx+1] = upper_32_bits(offset) & 0xff;
@@ -1764,8 +1764,8 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
}
offset = reloc->gpu_offset +
- (radeon_get_ib_value(p, idx+1) & 0xfffffff0) +
- ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
+ (radeon_get_ib_value(p, idx+1) & 0xfffffff0) +
+ ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
ib[idx+1] = (ib[idx+1] & 0x3) | (offset & 0xfffffff0);
ib[idx+2] = upper_32_bits(offset) & 0xff;
@@ -1876,8 +1876,8 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
return -EINVAL;
}
offset = reloc->gpu_offset +
- (radeon_get_ib_value(p, idx+1) & 0xfffffff8) +
- ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
+ (radeon_get_ib_value(p, idx+1) & 0xfffffff8) +
+ ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
ib[idx+1] = offset & 0xfffffff8;
ib[idx+2] = upper_32_bits(offset) & 0xff;
@@ -1898,8 +1898,8 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
}
offset = reloc->gpu_offset +
- (radeon_get_ib_value(p, idx+1) & 0xfffffffc) +
- ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
+ (radeon_get_ib_value(p, idx+1) & 0xfffffffc) +
+ ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
ib[idx+1] = offset & 0xfffffffc;
ib[idx+2] = (ib[idx+2] & 0xffffff00) | (upper_32_bits(offset) & 0xff);
diff --git a/drivers/gpu/drm/radeon/r600_dma.c b/drivers/gpu/drm/radeon/r600_dma.c
index d2dd29ab24fa..fb65e6fb5c4f 100644
--- a/drivers/gpu/drm/radeon/r600_dma.c
+++ b/drivers/gpu/drm/radeon/r600_dma.c
@@ -368,11 +368,16 @@ int r600_dma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
return r;
}
- r = radeon_fence_wait(ib.fence, false);
- if (r) {
+ r = radeon_fence_wait_timeout(ib.fence, false, usecs_to_jiffies(
+ RADEON_USEC_IB_TEST_TIMEOUT));
+ if (r < 0) {
DRM_ERROR("radeon: fence wait failed (%d).\n", r);
return r;
+ } else if (r == 0) {
+ DRM_ERROR("radeon: fence wait timed out.\n");
+ return -ETIMEDOUT;
}
+ r = 0;
for (i = 0; i < rdev->usec_timeout; i++) {
tmp = le32_to_cpu(rdev->wb.wb[index/4]);
if (tmp == 0xDEADBEEF)
diff --git a/drivers/gpu/drm/radeon/r600_dpm.c b/drivers/gpu/drm/radeon/r600_dpm.c
index fa2154493cf1..6a4b020dd0b4 100644
--- a/drivers/gpu/drm/radeon/r600_dpm.c
+++ b/drivers/gpu/drm/radeon/r600_dpm.c
@@ -844,7 +844,7 @@ int r600_get_platform_caps(struct radeon_device *rdev)
struct radeon_mode_info *mode_info = &rdev->mode_info;
union power_info *power_info;
int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
- u16 data_offset;
+ u16 data_offset;
u8 frev, crev;
if (!atom_parse_data_header(mode_info->atom_context, index, NULL,
@@ -874,7 +874,7 @@ int r600_parse_extended_power_table(struct radeon_device *rdev)
union fan_info *fan_info;
ATOM_PPLIB_Clock_Voltage_Dependency_Table *dep_table;
int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
- u16 data_offset;
+ u16 data_offset;
u8 frev, crev;
int ret, i;
@@ -1070,7 +1070,7 @@ int r600_parse_extended_power_table(struct radeon_device *rdev)
ext_hdr->usVCETableOffset) {
VCEClockInfoArray *array = (VCEClockInfoArray *)
(mode_info->atom_context->bios + data_offset +
- le16_to_cpu(ext_hdr->usVCETableOffset) + 1);
+ le16_to_cpu(ext_hdr->usVCETableOffset) + 1);
ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table *limits =
(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table *)
(mode_info->atom_context->bios + data_offset +
diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c
index e85894ade95c..e82a99cb2459 100644
--- a/drivers/gpu/drm/radeon/r600_hdmi.c
+++ b/drivers/gpu/drm/radeon/r600_hdmi.c
@@ -215,7 +215,7 @@ void r600_hdmi_update_acr(struct drm_encoder *encoder, long offset,
* build a HDMI Video Info Frame
*/
void r600_set_avi_packet(struct radeon_device *rdev, u32 offset,
- unsigned char *buffer, size_t size)
+ unsigned char *buffer, size_t size)
{
uint8_t *frame = buffer + 3;
@@ -312,7 +312,7 @@ void r600_hdmi_audio_workaround(struct drm_encoder *encoder)
}
void r600_hdmi_audio_set_dto(struct radeon_device *rdev,
- struct radeon_crtc *crtc, unsigned int clock)
+ struct radeon_crtc *crtc, unsigned int clock)
{
struct radeon_encoder *radeon_encoder;
struct radeon_encoder_atom_dig *dig;
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 78a51b3eda10..007be29a0020 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -120,6 +120,7 @@ extern int radeon_mst;
*/
#define RADEON_MAX_USEC_TIMEOUT 100000 /* 100 ms */
#define RADEON_FENCE_JIFFIES_TIMEOUT (HZ / 2)
+#define RADEON_USEC_IB_TEST_TIMEOUT 1000000 /* 1s */
/* RADEON_IB_POOL_SIZE must be a power of 2 */
#define RADEON_IB_POOL_SIZE 16
#define RADEON_DEBUGFS_MAX_COMPONENTS 32
@@ -382,6 +383,7 @@ void radeon_fence_driver_force_completion(struct radeon_device *rdev, int ring);
int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence **fence, int ring);
void radeon_fence_process(struct radeon_device *rdev, int ring);
bool radeon_fence_signaled(struct radeon_fence *fence);
+long radeon_fence_wait_timeout(struct radeon_fence *fence, bool interruptible, long timeout);
int radeon_fence_wait(struct radeon_fence *fence, bool interruptible);
int radeon_fence_wait_next(struct radeon_device *rdev, int ring);
int radeon_fence_wait_empty(struct radeon_device *rdev, int ring);
diff --git a/drivers/gpu/drm/radeon/radeon_acpi.h b/drivers/gpu/drm/radeon/radeon_acpi.h
index be4af76f213d..cd872f7953c6 100644
--- a/drivers/gpu/drm/radeon/radeon_acpi.h
+++ b/drivers/gpu/drm/radeon/radeon_acpi.h
@@ -291,6 +291,8 @@ int radeon_atif_handler(struct radeon_device *rdev,
# define ATPX_FIXED_NOT_SUPPORTED (1 << 9)
# define ATPX_DYNAMIC_DGPU_POWER_OFF_SUPPORTED (1 << 10)
# define ATPX_DGPU_REQ_POWER_FOR_DISPLAYS (1 << 11)
+# define ATPX_DGPU_CAN_DRIVE_DISPLAYS (1 << 12)
+# define ATPX_MS_HYBRID_GFX_SUPPORTED (1 << 14)
#define ATPX_FUNCTION_POWER_CONTROL 0x2
/* ARG0: ATPX_FUNCTION_POWER_CONTROL
* ARG1:
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index de9a2ffcf5f7..f8097a0e7a79 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -2095,7 +2095,7 @@ static int radeon_atombios_parse_power_table_1_3(struct radeon_device *rdev)
struct radeon_i2c_bus_rec i2c_bus;
union power_info *power_info;
int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
- u16 data_offset;
+ u16 data_offset;
u8 frev, crev;
if (!atom_parse_data_header(mode_info->atom_context, index, NULL,
@@ -2575,7 +2575,7 @@ static int radeon_atombios_parse_power_table_4_5(struct radeon_device *rdev)
bool valid;
union power_info *power_info;
int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
- u16 data_offset;
+ u16 data_offset;
u8 frev, crev;
if (!atom_parse_data_header(mode_info->atom_context, index, NULL,
@@ -2666,7 +2666,7 @@ static int radeon_atombios_parse_power_table_6(struct radeon_device *rdev)
bool valid;
union power_info *power_info;
int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
- u16 data_offset;
+ u16 data_offset;
u8 frev, crev;
u8 *power_state_offset;
diff --git a/drivers/gpu/drm/radeon/radeon_atpx_handler.c b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
index c4b4f298a283..95f4fea89302 100644
--- a/drivers/gpu/drm/radeon/radeon_atpx_handler.c
+++ b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
@@ -143,7 +143,10 @@ static int radeon_atpx_validate(struct radeon_atpx *atpx)
{
/* make sure required functions are enabled */
/* dGPU power control is required */
- atpx->functions.power_cntl = true;
+ if (atpx->functions.power_cntl == false) {
+ printk("ATPX dGPU power cntl not present, forcing\n");
+ atpx->functions.power_cntl = true;
+ }
if (atpx->functions.px_params) {
union acpi_object *info;
@@ -551,13 +554,14 @@ static bool radeon_atpx_detect(void)
void radeon_register_atpx_handler(void)
{
bool r;
+ enum vga_switcheroo_handler_flags_t handler_flags = 0;
/* detect if we have any ATPX + 2 VGA in the system */
r = radeon_atpx_detect();
if (!r)
return;
- vga_switcheroo_register_handler(&radeon_atpx_handler);
+ vga_switcheroo_register_handler(&radeon_atpx_handler, handler_flags);
}
/**
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index a9b01bcf7d0a..432480ff9d22 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -34,7 +34,6 @@
#include <asm/machdep.h>
#include <asm/pmac_feature.h>
#include <asm/prom.h>
-#include <asm/pci-bridge.h>
#endif /* CONFIG_PPC_PMAC */
/* from radeon_legacy_encoder.c */
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 340f3f549f29..81a63d7f5cd9 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -34,6 +34,7 @@
#include "atom.h"
#include <linux/pm_runtime.h>
+#include <linux/vga_switcheroo.h>
static int radeon_dp_handle_hpd(struct drm_connector *connector)
{
@@ -344,6 +345,11 @@ static void radeon_connector_get_edid(struct drm_connector *connector)
else if (radeon_connector->ddc_bus)
radeon_connector->edid = drm_get_edid(&radeon_connector->base,
&radeon_connector->ddc_bus->adapter);
+ } else if (vga_switcheroo_handler_flags() & VGA_SWITCHEROO_CAN_SWITCH_DDC &&
+ connector->connector_type == DRM_MODE_CONNECTOR_LVDS &&
+ radeon_connector->ddc_bus) {
+ radeon_connector->edid = drm_get_edid_switcheroo(&radeon_connector->base,
+ &radeon_connector->ddc_bus->adapter);
} else if (radeon_connector->ddc_bus) {
radeon_connector->edid = drm_get_edid(&radeon_connector->base,
&radeon_connector->ddc_bus->adapter);
@@ -1996,10 +2002,12 @@ radeon_add_atom_connector(struct drm_device *dev,
rdev->mode_info.dither_property,
RADEON_FMT_DITHER_DISABLE);
- if (radeon_audio != 0)
+ if (radeon_audio != 0) {
drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.audio_property,
RADEON_AUDIO_AUTO);
+ radeon_connector->audio = RADEON_AUDIO_AUTO;
+ }
if (ASIC_IS_DCE5(rdev))
drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.output_csc_property,
@@ -2124,6 +2132,7 @@ radeon_add_atom_connector(struct drm_device *dev,
drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.audio_property,
RADEON_AUDIO_AUTO);
+ radeon_connector->audio = RADEON_AUDIO_AUTO;
}
if (connector_type == DRM_MODE_CONNECTOR_DVII) {
radeon_connector->dac_load_detect = true;
@@ -2179,6 +2188,7 @@ radeon_add_atom_connector(struct drm_device *dev,
drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.audio_property,
RADEON_AUDIO_AUTO);
+ radeon_connector->audio = RADEON_AUDIO_AUTO;
}
if (ASIC_IS_DCE5(rdev))
drm_object_attach_property(&radeon_connector->base.base,
@@ -2231,6 +2241,7 @@ radeon_add_atom_connector(struct drm_device *dev,
drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.audio_property,
RADEON_AUDIO_AUTO);
+ radeon_connector->audio = RADEON_AUDIO_AUTO;
}
if (ASIC_IS_DCE5(rdev))
drm_object_attach_property(&radeon_connector->base.base,
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 902b59cebac5..d0826fb0434c 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -1155,9 +1155,9 @@ static void radeon_check_arguments(struct radeon_device *rdev)
radeon_vm_size = 4;
}
- /*
- * Max GPUVM size for Cayman, SI and CI are 40 bits.
- */
+ /*
+ * Max GPUVM size for Cayman, SI and CI are 40 bits.
+ */
if (radeon_vm_size > 1024) {
dev_warn(rdev->dev, "VM size (%d) too large, max is 1TB\n",
radeon_vm_size);
@@ -1299,9 +1299,9 @@ int radeon_device_init(struct radeon_device *rdev,
}
rdev->fence_context = fence_context_alloc(RADEON_NUM_RINGS);
- DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X).\n",
- radeon_family_name[rdev->family], pdev->vendor, pdev->device,
- pdev->subsystem_vendor, pdev->subsystem_device);
+ DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
+ radeon_family_name[rdev->family], pdev->vendor, pdev->device,
+ pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
/* mutex initialization are all done here so we
* can recall function without having locking issues */
@@ -1744,7 +1744,6 @@ int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
}
drm_kms_helper_poll_enable(dev);
- drm_helper_hpd_irq_event(dev);
/* set the power state here in case we are a PX system or headless */
if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled)
@@ -1896,7 +1895,7 @@ int radeon_debugfs_add_files(struct radeon_device *rdev,
if (i > RADEON_DEBUGFS_MAX_COMPONENTS) {
DRM_ERROR("Reached maximum number of debugfs components.\n");
DRM_ERROR("Report so we increase "
- "RADEON_DEBUGFS_MAX_COMPONENTS.\n");
+ "RADEON_DEBUGFS_MAX_COMPONENTS.\n");
return -EINVAL;
}
rdev->debugfs[rdev->debugfs_count].files = files;
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 2b9ba03a7c1a..fcc7483d3f7b 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -407,7 +407,7 @@ static void radeon_flip_work_func(struct work_struct *__work)
unsigned repcnt = 4;
struct drm_vblank_crtc *vblank = &crtc->dev->vblank[work->crtc_id];
- down_read(&rdev->exclusive_lock);
+ down_read(&rdev->exclusive_lock);
if (work->fence) {
struct radeon_fence *fence;
@@ -455,7 +455,7 @@ static void radeon_flip_work_func(struct work_struct *__work)
* In practice this won't execute very often unless on very fast
* machines because the time window for this to happen is very small.
*/
- while (radeon_crtc->enabled && repcnt--) {
+ while (radeon_crtc->enabled && --repcnt) {
/* GET_DISTANCE_TO_VBLANKSTART returns distance to real vblank
* start in hpos, and to the "fudged earlier" vblank start in
* vpos.
@@ -471,13 +471,13 @@ static void radeon_flip_work_func(struct work_struct *__work)
break;
/* Sleep at least until estimated real start of hw vblank */
- spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
min_udelay = (-hpos + 1) * max(vblank->linedur_ns / 1000, 5);
if (min_udelay > vblank->framedur_ns / 2000) {
/* Don't wait ridiculously long - something is wrong */
repcnt = 0;
break;
}
+ spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
usleep_range(min_udelay, 2 * min_udelay);
spin_lock_irqsave(&crtc->dev->event_lock, flags);
};
@@ -919,7 +919,7 @@ static void avivo_reduce_ratio(unsigned *nom, unsigned *den,
*den /= tmp;
/* make sure nominator is large enough */
- if (*nom < nom_min) {
+ if (*nom < nom_min) {
tmp = DIV_ROUND_UP(nom_min, *nom);
*nom *= tmp;
*den *= tmp;
@@ -959,7 +959,7 @@ static void avivo_get_fb_ref_div(unsigned nom, unsigned den, unsigned post_div,
*fb_div = DIV_ROUND_CLOSEST(nom * *ref_div * post_div, den);
/* limit fb divider to its maximum */
- if (*fb_div > fb_div_max) {
+ if (*fb_div > fb_div_max) {
*ref_div = DIV_ROUND_CLOSEST(*ref_div * fb_div_max, *fb_div);
*fb_div = fb_div_max;
}
@@ -1683,10 +1683,8 @@ int radeon_modeset_init(struct radeon_device *rdev)
/* setup afmt */
radeon_afmt_init(rdev);
- if (!list_empty(&rdev->ddev->mode_config.connector_list)) {
- radeon_fbdev_init(rdev);
- drm_kms_helper_poll_init(rdev->ddev);
- }
+ radeon_fbdev_init(rdev);
+ drm_kms_helper_poll_init(rdev->ddev);
/* do pm late init */
ret = radeon_pm_late_init(rdev);
@@ -1699,6 +1697,9 @@ void radeon_modeset_fini(struct radeon_device *rdev)
radeon_fbdev_fini(rdev);
kfree(rdev->mode_info.bios_hardcoded_edid);
+ /* free i2c buses */
+ radeon_i2c_fini(rdev);
+
if (rdev->mode_info.mode_config_initialized) {
radeon_afmt_fini(rdev);
drm_kms_helper_poll_fini(rdev->ddev);
@@ -1706,8 +1707,6 @@ void radeon_modeset_fini(struct radeon_device *rdev)
drm_mode_config_cleanup(rdev->ddev);
rdev->mode_info.mode_config_initialized = false;
}
- /* free i2c buses */
- radeon_i2c_fini(rdev);
}
static bool is_hdtv_mode(const struct drm_display_mode *mode)
diff --git a/drivers/gpu/drm/radeon/radeon_dp_auxch.c b/drivers/gpu/drm/radeon/radeon_dp_auxch.c
index 3b0c229d7dcd..db64e0062689 100644
--- a/drivers/gpu/drm/radeon/radeon_dp_auxch.c
+++ b/drivers/gpu/drm/radeon/radeon_dp_auxch.c
@@ -105,7 +105,7 @@ radeon_dp_aux_transfer_native(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg
tmp &= AUX_HPD_SEL(0x7);
tmp |= AUX_HPD_SEL(chan->rec.hpd);
- tmp |= AUX_EN | AUX_LS_READ_EN;
+ tmp |= AUX_EN | AUX_LS_READ_EN | AUX_HPD_DISCON(0x1);
WREG32(AUX_CONTROL + aux_offset[instance], tmp);
diff --git a/drivers/gpu/drm/radeon/radeon_dp_mst.c b/drivers/gpu/drm/radeon/radeon_dp_mst.c
index df7a1719c841..de504ea29c06 100644
--- a/drivers/gpu/drm/radeon/radeon_dp_mst.c
+++ b/drivers/gpu/drm/radeon/radeon_dp_mst.c
@@ -89,8 +89,16 @@ static int radeon_dp_mst_set_stream_attrib(struct radeon_encoder *primary,
WREG32(NI_DP_MSE_SAT_UPDATE + primary->offset, 1);
do {
+ unsigned value1, value2;
+ udelay(10);
temp = RREG32(NI_DP_MSE_SAT_UPDATE + primary->offset);
- } while ((temp & 0x1) && retries++ < 10000);
+
+ value1 = temp & NI_DP_MSE_SAT_UPDATE_MASK;
+ value2 = temp & NI_DP_MSE_16_MTP_KEEPOUT;
+
+ if (!value1 && !value2)
+ break;
+ } while (retries++ < 50);
if (retries == 10000)
DRM_ERROR("timed out waitin for SAT update %d\n", primary->offset);
@@ -150,7 +158,7 @@ static int radeon_dp_mst_update_stream_attribs(struct radeon_connector *mst_conn
return 0;
}
-static int radeon_dp_mst_set_vcp_size(struct radeon_encoder *mst, uint32_t x, uint32_t y)
+static int radeon_dp_mst_set_vcp_size(struct radeon_encoder *mst, s64 avg_time_slots_per_mtp)
{
struct drm_device *dev = mst->base.dev;
struct radeon_device *rdev = dev->dev_private;
@@ -158,6 +166,8 @@ static int radeon_dp_mst_set_vcp_size(struct radeon_encoder *mst, uint32_t x, ui
uint32_t val, temp;
uint32_t offset = radeon_atom_set_enc_offset(mst_enc->fe);
int retries = 0;
+ uint32_t x = drm_fixp2int(avg_time_slots_per_mtp);
+ uint32_t y = drm_fixp2int_ceil((avg_time_slots_per_mtp - x) << 26);
val = NI_DP_MSE_RATE_X(x) | NI_DP_MSE_RATE_Y(y);
@@ -165,6 +175,7 @@ static int radeon_dp_mst_set_vcp_size(struct radeon_encoder *mst, uint32_t x, ui
do {
temp = RREG32(NI_DP_MSE_RATE_UPDATE + offset);
+ udelay(10);
} while ((temp & 0x1) && (retries++ < 10000));
if (retries >= 10000)
@@ -246,14 +257,8 @@ radeon_dp_mst_connector_destroy(struct drm_connector *connector)
kfree(radeon_connector);
}
-static int radeon_connector_dpms(struct drm_connector *connector, int mode)
-{
- DRM_DEBUG_KMS("\n");
- return 0;
-}
-
static const struct drm_connector_funcs radeon_dp_mst_connector_funcs = {
- .dpms = radeon_connector_dpms,
+ .dpms = drm_helper_connector_dpms,
.detect = radeon_dp_mst_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = radeon_dp_mst_connector_destroy,
@@ -394,7 +399,7 @@ radeon_mst_encoder_dpms(struct drm_encoder *encoder, int mode)
struct drm_crtc *crtc;
struct radeon_crtc *radeon_crtc;
int ret, slots;
-
+ s64 fixed_pbn, fixed_pbn_per_slot, avg_time_slots_per_mtp;
if (!ASIC_IS_DCE5(rdev)) {
DRM_ERROR("got mst dpms on non-DCE5\n");
return;
@@ -456,7 +461,11 @@ radeon_mst_encoder_dpms(struct drm_encoder *encoder, int mode)
mst_enc->enc_active = true;
radeon_dp_mst_update_stream_attribs(radeon_connector->mst_port, primary);
- radeon_dp_mst_set_vcp_size(radeon_encoder, slots, 0);
+
+ fixed_pbn = drm_int2fixp(mst_enc->pbn);
+ fixed_pbn_per_slot = drm_int2fixp(radeon_connector->mst_port->mst_mgr.pbn_div);
+ avg_time_slots_per_mtp = drm_fixp_div(fixed_pbn, fixed_pbn_per_slot);
+ radeon_dp_mst_set_vcp_size(radeon_encoder, avg_time_slots_per_mtp);
atombios_dig_encoder_setup2(&primary->base, ATOM_ENCODER_CMD_DP_VIDEO_ON, 0,
mst_enc->fe);
@@ -510,6 +519,7 @@ static bool radeon_mst_mode_fixup(struct drm_encoder *encoder,
{
struct radeon_encoder_mst *mst_enc;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_connector_atom_dig *dig_connector;
int bpp = 24;
mst_enc = radeon_encoder->enc_priv;
@@ -523,22 +533,11 @@ static bool radeon_mst_mode_fixup(struct drm_encoder *encoder,
drm_mode_set_crtcinfo(adjusted_mode, 0);
- {
- struct radeon_connector_atom_dig *dig_connector;
- int ret;
-
- dig_connector = mst_enc->connector->con_priv;
- ret = radeon_dp_get_dp_link_config(&mst_enc->connector->base,
- dig_connector->dpcd, adjusted_mode->clock,
- &dig_connector->dp_lane_count,
- &dig_connector->dp_clock);
- if (ret) {
- dig_connector->dp_lane_count = 0;
- dig_connector->dp_clock = 0;
- }
- DRM_DEBUG_KMS("dig clock %p %d %d\n", dig_connector,
- dig_connector->dp_lane_count, dig_connector->dp_clock);
- }
+ dig_connector = mst_enc->connector->con_priv;
+ dig_connector->dp_lane_count = drm_dp_max_lane_count(dig_connector->dpcd);
+ dig_connector->dp_clock = drm_dp_max_link_rate(dig_connector->dpcd);
+ DRM_DEBUG_KMS("dig clock %p %d %d\n", dig_connector,
+ dig_connector->dp_lane_count, dig_connector->dp_clock);
return true;
}
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index e266ffc520d2..ccd4ad4ee592 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -34,9 +34,11 @@
#include "radeon_drv.h"
#include <drm/drm_pciids.h>
+#include <linux/apple-gmux.h>
#include <linux/console.h>
#include <linux/module.h>
#include <linux/pm_runtime.h>
+#include <linux/vgaarb.h>
#include <linux/vga_switcheroo.h>
#include <drm/drm_gem.h>
@@ -319,6 +321,23 @@ static int radeon_pci_probe(struct pci_dev *pdev,
{
int ret;
+ /*
+ * Initialize amdkfd before starting radeon. If it was not loaded yet,
+ * defer radeon probing
+ */
+ ret = radeon_kfd_init();
+ if (ret == -EPROBE_DEFER)
+ return ret;
+
+ /*
+ * apple-gmux is needed on dual GPU MacBook Pro
+ * to probe the panel if we're the inactive GPU.
+ */
+ if (IS_ENABLED(CONFIG_VGA_ARB) && IS_ENABLED(CONFIG_VGA_SWITCHEROO) &&
+ apple_gmux_present() && pdev != vga_default_device() &&
+ !vga_switcheroo_handler_flags())
+ return -EPROBE_DEFER;
+
/* Get rid of things like offb */
ret = radeon_kick_out_firmware_fb(pdev);
if (ret)
@@ -570,8 +589,6 @@ static int __init radeon_init(void)
return -EINVAL;
}
- radeon_kfd_init();
-
/* let modprobe override vga console setting */
return drm_pci_init(driver, pdriver);
}
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
index d2e628eea53d..0e3143acb565 100644
--- a/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -38,9 +38,9 @@
#include <linux/vga_switcheroo.h>
/* object hierarchy -
- this contains a helper + a radeon fb
- the helper contains a pointer to radeon framebuffer baseclass.
-*/
+ * this contains a helper + a radeon fb
+ * the helper contains a pointer to radeon framebuffer baseclass.
+ */
struct radeon_fbdev {
struct drm_fb_helper helper;
struct radeon_framebuffer rfb;
@@ -292,7 +292,8 @@ out_unref:
void radeon_fb_output_poll_changed(struct radeon_device *rdev)
{
- drm_fb_helper_hotplug_event(&rdev->mode_info.rfbdev->helper);
+ if (rdev->mode_info.rfbdev)
+ drm_fb_helper_hotplug_event(&rdev->mode_info.rfbdev->helper);
}
static int radeon_fbdev_destroy(struct drm_device *dev, struct radeon_fbdev *rfbdev)
@@ -325,6 +326,10 @@ int radeon_fbdev_init(struct radeon_device *rdev)
int bpp_sel = 32;
int ret;
+ /* don't enable fbdev if no connectors */
+ if (list_empty(&rdev->ddev->mode_config.connector_list))
+ return 0;
+
/* select 8 bpp console on RN50 or 16MB cards */
if (ASIC_IS_RN50(rdev) || rdev->mc.real_vram_size <= (32*1024*1024))
bpp_sel = 8;
@@ -377,11 +382,15 @@ void radeon_fbdev_fini(struct radeon_device *rdev)
void radeon_fbdev_set_suspend(struct radeon_device *rdev, int state)
{
- fb_set_suspend(rdev->mode_info.rfbdev->helper.fbdev, state);
+ if (rdev->mode_info.rfbdev)
+ fb_set_suspend(rdev->mode_info.rfbdev->helper.fbdev, state);
}
bool radeon_fbdev_robj_is_fb(struct radeon_device *rdev, struct radeon_bo *robj)
{
+ if (!rdev->mode_info.rfbdev)
+ return false;
+
if (robj == gem_to_radeon_bo(rdev->mode_info.rfbdev->rfb.obj))
return true;
return false;
@@ -389,12 +398,14 @@ bool radeon_fbdev_robj_is_fb(struct radeon_device *rdev, struct radeon_bo *robj)
void radeon_fb_add_connector(struct radeon_device *rdev, struct drm_connector *connector)
{
- drm_fb_helper_add_one_connector(&rdev->mode_info.rfbdev->helper, connector);
+ if (rdev->mode_info.rfbdev)
+ drm_fb_helper_add_one_connector(&rdev->mode_info.rfbdev->helper, connector);
}
void radeon_fb_remove_connector(struct radeon_device *rdev, struct drm_connector *connector)
{
- drm_fb_helper_remove_one_connector(&rdev->mode_info.rfbdev->helper, connector);
+ if (rdev->mode_info.rfbdev)
+ drm_fb_helper_remove_one_connector(&rdev->mode_info.rfbdev->helper, connector);
}
void radeon_fbdev_restore_mode(struct radeon_device *rdev)
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
index 05815c47b246..7ef075acde9c 100644
--- a/drivers/gpu/drm/radeon/radeon_fence.c
+++ b/drivers/gpu/drm/radeon/radeon_fence.c
@@ -527,7 +527,7 @@ static long radeon_fence_wait_seq_timeout(struct radeon_device *rdev,
}
/**
- * radeon_fence_wait - wait for a fence to signal
+ * radeon_fence_wait_timeout - wait for a fence to signal with timeout
*
* @fence: radeon fence object
* @intr: use interruptible sleep
@@ -535,12 +535,15 @@ static long radeon_fence_wait_seq_timeout(struct radeon_device *rdev,
* Wait for the requested fence to signal (all asics).
* @intr selects whether to use interruptable (true) or non-interruptable
* (false) sleep when waiting for the fence.
- * Returns 0 if the fence has passed, error for all other cases.
+ * @timeout: maximum time to wait, or MAX_SCHEDULE_TIMEOUT for infinite wait
+ * Returns remaining time if the sequence number has passed, 0 when
+ * the wait timeout, or an error for all other cases.
*/
-int radeon_fence_wait(struct radeon_fence *fence, bool intr)
+long radeon_fence_wait_timeout(struct radeon_fence *fence, bool intr, long timeout)
{
uint64_t seq[RADEON_NUM_RINGS] = {};
long r;
+ int r_sig;
/*
* This function should not be called on !radeon fences.
@@ -552,15 +555,36 @@ int radeon_fence_wait(struct radeon_fence *fence, bool intr)
return fence_wait(&fence->base, intr);
seq[fence->ring] = fence->seq;
- r = radeon_fence_wait_seq_timeout(fence->rdev, seq, intr, MAX_SCHEDULE_TIMEOUT);
- if (r < 0) {
+ r = radeon_fence_wait_seq_timeout(fence->rdev, seq, intr, timeout);
+ if (r <= 0) {
return r;
}
- r = fence_signal(&fence->base);
- if (!r)
+ r_sig = fence_signal(&fence->base);
+ if (!r_sig)
FENCE_TRACE(&fence->base, "signaled from fence_wait\n");
- return 0;
+ return r;
+}
+
+/**
+ * radeon_fence_wait - wait for a fence to signal
+ *
+ * @fence: radeon fence object
+ * @intr: use interruptible sleep
+ *
+ * Wait for the requested fence to signal (all asics).
+ * @intr selects whether to use interruptable (true) or non-interruptable
+ * (false) sleep when waiting for the fence.
+ * Returns 0 if the fence has passed, error for all other cases.
+ */
+int radeon_fence_wait(struct radeon_fence *fence, bool intr)
+{
+ long r = radeon_fence_wait_timeout(fence, intr, MAX_SCHEDULE_TIMEOUT);
+ if (r > 0) {
+ return 0;
+ } else {
+ return r;
+ }
}
/**
diff --git a/drivers/gpu/drm/radeon/radeon_ib.c b/drivers/gpu/drm/radeon/radeon_ib.c
index c39ce1f05703..92ce0e533bc0 100644
--- a/drivers/gpu/drm/radeon/radeon_ib.c
+++ b/drivers/gpu/drm/radeon/radeon_ib.c
@@ -274,7 +274,7 @@ int radeon_ib_ring_tests(struct radeon_device *rdev)
if (i == RADEON_RING_TYPE_GFX_INDEX) {
/* oh, oh, that's really bad */
DRM_ERROR("radeon: failed testing IB on GFX ring (%d).\n", r);
- rdev->accel_working = false;
+ rdev->accel_working = false;
return r;
} else {
@@ -304,7 +304,7 @@ static int radeon_debugfs_sa_info(struct seq_file *m, void *data)
}
static struct drm_info_list radeon_debugfs_sa_list[] = {
- {"radeon_sa_info", &radeon_debugfs_sa_info, 0, NULL},
+ {"radeon_sa_info", &radeon_debugfs_sa_info, 0, NULL},
};
#endif
diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c
index 979f3bf65f2c..1e9304d1c88f 100644
--- a/drivers/gpu/drm/radeon/radeon_irq_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c
@@ -291,6 +291,8 @@ int radeon_irq_kms_init(struct radeon_device *rdev)
if (r) {
return r;
}
+ rdev->ddev->vblank_disable_allowed = true;
+
/* enable msi */
rdev->msi_enabled = 0;
diff --git a/drivers/gpu/drm/radeon/radeon_kfd.c b/drivers/gpu/drm/radeon/radeon_kfd.c
index 9a4d69e59401..87a9ebb5f58f 100644
--- a/drivers/gpu/drm/radeon/radeon_kfd.c
+++ b/drivers/gpu/drm/radeon/radeon_kfd.c
@@ -132,35 +132,34 @@ static const struct kfd2kgd_calls kfd2kgd = {
static const struct kgd2kfd_calls *kgd2kfd;
-bool radeon_kfd_init(void)
+int radeon_kfd_init(void)
{
+ int ret;
+
#if defined(CONFIG_HSA_AMD_MODULE)
- bool (*kgd2kfd_init_p)(unsigned, const struct kgd2kfd_calls**);
+ int (*kgd2kfd_init_p)(unsigned, const struct kgd2kfd_calls**);
kgd2kfd_init_p = symbol_request(kgd2kfd_init);
if (kgd2kfd_init_p == NULL)
- return false;
+ return -ENOENT;
- if (!kgd2kfd_init_p(KFD_INTERFACE_VERSION, &kgd2kfd)) {
+ ret = kgd2kfd_init_p(KFD_INTERFACE_VERSION, &kgd2kfd);
+ if (ret) {
symbol_put(kgd2kfd_init);
kgd2kfd = NULL;
-
- return false;
}
- return true;
#elif defined(CONFIG_HSA_AMD)
- if (!kgd2kfd_init(KFD_INTERFACE_VERSION, &kgd2kfd)) {
+ ret = kgd2kfd_init(KFD_INTERFACE_VERSION, &kgd2kfd);
+ if (ret)
kgd2kfd = NULL;
- return false;
- }
-
- return true;
#else
- return false;
+ ret = -ENOENT;
#endif
+
+ return ret;
}
void radeon_kfd_fini(void)
diff --git a/drivers/gpu/drm/radeon/radeon_kfd.h b/drivers/gpu/drm/radeon/radeon_kfd.h
index 1103f9082f6b..9df1fea8e971 100644
--- a/drivers/gpu/drm/radeon/radeon_kfd.h
+++ b/drivers/gpu/drm/radeon/radeon_kfd.h
@@ -33,7 +33,7 @@
struct radeon_device;
-bool radeon_kfd_init(void);
+int radeon_kfd_init(void);
void radeon_kfd_fini(void);
void radeon_kfd_suspend(struct radeon_device *rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
index 32b338ff436b..478d4099b0d0 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
@@ -331,13 +331,15 @@ static void radeon_crtc_dpms(struct drm_crtc *crtc, int mode)
RADEON_CRTC_DISP_REQ_EN_B));
WREG32_P(RADEON_CRTC_EXT_CNTL, crtc_ext_cntl, ~(mask | crtc_ext_cntl));
}
- drm_vblank_post_modeset(dev, radeon_crtc->crtc_id);
+ if (dev->num_crtcs > radeon_crtc->crtc_id)
+ drm_vblank_on(dev, radeon_crtc->crtc_id);
radeon_crtc_load_lut(crtc);
break;
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
case DRM_MODE_DPMS_OFF:
- drm_vblank_pre_modeset(dev, radeon_crtc->crtc_id);
+ if (dev->num_crtcs > radeon_crtc->crtc_id)
+ drm_vblank_off(dev, radeon_crtc->crtc_id);
if (radeon_crtc->crtc_id)
WREG32_P(RADEON_CRTC2_GEN_CNTL, mask, ~(RADEON_CRTC2_EN | mask));
else {
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
index 88dc973fb209..868c3ba2efaa 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
@@ -818,52 +818,52 @@ static void radeon_legacy_tmds_int_mode_set(struct drm_encoder *encoder,
tmds_transmitter_cntl = RREG32(RADEON_TMDS_TRANSMITTER_CNTL) &
~(RADEON_TMDS_TRANSMITTER_PLLRST);
- if (rdev->family == CHIP_R200 ||
- rdev->family == CHIP_R100 ||
- ASIC_IS_R300(rdev))
- tmds_transmitter_cntl &= ~(RADEON_TMDS_TRANSMITTER_PLLEN);
- else /* RV chips got this bit reversed */
- tmds_transmitter_cntl |= RADEON_TMDS_TRANSMITTER_PLLEN;
-
- fp_gen_cntl = (RREG32(RADEON_FP_GEN_CNTL) |
- (RADEON_FP_CRTC_DONT_SHADOW_VPAR |
- RADEON_FP_CRTC_DONT_SHADOW_HEND));
-
- fp_gen_cntl &= ~(RADEON_FP_FPON | RADEON_FP_TMDS_EN);
-
- fp_gen_cntl &= ~(RADEON_FP_RMX_HVSYNC_CONTROL_EN |
- RADEON_FP_DFP_SYNC_SEL |
- RADEON_FP_CRT_SYNC_SEL |
- RADEON_FP_CRTC_LOCK_8DOT |
- RADEON_FP_USE_SHADOW_EN |
- RADEON_FP_CRTC_USE_SHADOW_VEND |
- RADEON_FP_CRT_SYNC_ALT);
-
- if (1) /* FIXME rgbBits == 8 */
- fp_gen_cntl |= RADEON_FP_PANEL_FORMAT; /* 24 bit format */
- else
- fp_gen_cntl &= ~RADEON_FP_PANEL_FORMAT;/* 18 bit format */
-
- if (radeon_crtc->crtc_id == 0) {
- if (ASIC_IS_R300(rdev) || rdev->family == CHIP_R200) {
- fp_gen_cntl &= ~R200_FP_SOURCE_SEL_MASK;
- if (radeon_encoder->rmx_type != RMX_OFF)
- fp_gen_cntl |= R200_FP_SOURCE_SEL_RMX;
- else
- fp_gen_cntl |= R200_FP_SOURCE_SEL_CRTC1;
- } else
- fp_gen_cntl &= ~RADEON_FP_SEL_CRTC2;
- } else {
- if (ASIC_IS_R300(rdev) || rdev->family == CHIP_R200) {
- fp_gen_cntl &= ~R200_FP_SOURCE_SEL_MASK;
- fp_gen_cntl |= R200_FP_SOURCE_SEL_CRTC2;
- } else
- fp_gen_cntl |= RADEON_FP_SEL_CRTC2;
- }
-
- WREG32(RADEON_TMDS_PLL_CNTL, tmds_pll_cntl);
- WREG32(RADEON_TMDS_TRANSMITTER_CNTL, tmds_transmitter_cntl);
- WREG32(RADEON_FP_GEN_CNTL, fp_gen_cntl);
+ if (rdev->family == CHIP_R200 ||
+ rdev->family == CHIP_R100 ||
+ ASIC_IS_R300(rdev))
+ tmds_transmitter_cntl &= ~(RADEON_TMDS_TRANSMITTER_PLLEN);
+ else /* RV chips got this bit reversed */
+ tmds_transmitter_cntl |= RADEON_TMDS_TRANSMITTER_PLLEN;
+
+ fp_gen_cntl = (RREG32(RADEON_FP_GEN_CNTL) |
+ (RADEON_FP_CRTC_DONT_SHADOW_VPAR |
+ RADEON_FP_CRTC_DONT_SHADOW_HEND));
+
+ fp_gen_cntl &= ~(RADEON_FP_FPON | RADEON_FP_TMDS_EN);
+
+ fp_gen_cntl &= ~(RADEON_FP_RMX_HVSYNC_CONTROL_EN |
+ RADEON_FP_DFP_SYNC_SEL |
+ RADEON_FP_CRT_SYNC_SEL |
+ RADEON_FP_CRTC_LOCK_8DOT |
+ RADEON_FP_USE_SHADOW_EN |
+ RADEON_FP_CRTC_USE_SHADOW_VEND |
+ RADEON_FP_CRT_SYNC_ALT);
+
+ if (1) /* FIXME rgbBits == 8 */
+ fp_gen_cntl |= RADEON_FP_PANEL_FORMAT; /* 24 bit format */
+ else
+ fp_gen_cntl &= ~RADEON_FP_PANEL_FORMAT;/* 18 bit format */
+
+ if (radeon_crtc->crtc_id == 0) {
+ if (ASIC_IS_R300(rdev) || rdev->family == CHIP_R200) {
+ fp_gen_cntl &= ~R200_FP_SOURCE_SEL_MASK;
+ if (radeon_encoder->rmx_type != RMX_OFF)
+ fp_gen_cntl |= R200_FP_SOURCE_SEL_RMX;
+ else
+ fp_gen_cntl |= R200_FP_SOURCE_SEL_CRTC1;
+ } else
+ fp_gen_cntl &= ~RADEON_FP_SEL_CRTC2;
+ } else {
+ if (ASIC_IS_R300(rdev) || rdev->family == CHIP_R200) {
+ fp_gen_cntl &= ~R200_FP_SOURCE_SEL_MASK;
+ fp_gen_cntl |= R200_FP_SOURCE_SEL_CRTC2;
+ } else
+ fp_gen_cntl |= RADEON_FP_SEL_CRTC2;
+ }
+
+ WREG32(RADEON_TMDS_PLL_CNTL, tmds_pll_cntl);
+ WREG32(RADEON_TMDS_TRANSMITTER_CNTL, tmds_transmitter_cntl);
+ WREG32(RADEON_FP_GEN_CNTL, fp_gen_cntl);
if (rdev->is_atom_bios)
radeon_atombios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id);
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index fb6ad143873f..2d901bf28a94 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -214,8 +214,8 @@ int radeon_bo_create(struct radeon_device *rdev,
INIT_LIST_HEAD(&bo->list);
INIT_LIST_HEAD(&bo->va);
bo->initial_domain = domain & (RADEON_GEM_DOMAIN_VRAM |
- RADEON_GEM_DOMAIN_GTT |
- RADEON_GEM_DOMAIN_CPU);
+ RADEON_GEM_DOMAIN_GTT |
+ RADEON_GEM_DOMAIN_CPU);
bo->flags = flags;
/* PCI GART is always snooped */
@@ -799,6 +799,10 @@ int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
if ((offset + size) <= rdev->mc.visible_vram_size)
return 0;
+ /* Can't move a pinned BO to visible VRAM */
+ if (rbo->pin_count > 0)
+ return -EINVAL;
+
/* hurrah the memory is not visible ! */
radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM);
lpfn = rdev->mc.visible_vram_size >> PAGE_SHIFT;
@@ -848,7 +852,7 @@ int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type, bool no_wait)
*
*/
void radeon_bo_fence(struct radeon_bo *bo, struct radeon_fence *fence,
- bool shared)
+ bool shared)
{
struct reservation_object *resv = bo->tbo.resv;
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 0f14d897baf9..38226d925a5b 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -79,7 +79,7 @@ void radeon_pm_acpi_event_handler(struct radeon_device *rdev)
radeon_dpm_enable_bapm(rdev, rdev->pm.dpm.ac_power);
}
mutex_unlock(&rdev->pm.mutex);
- } else if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
+ } else if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
if (rdev->pm.profile == PM_PROFILE_AUTO) {
mutex_lock(&rdev->pm.mutex);
radeon_pm_update_profile(rdev);
@@ -1079,6 +1079,8 @@ force:
/* update display watermarks based on new power state */
radeon_bandwidth_update(rdev);
+ /* update displays */
+ radeon_dpm_display_configuration_changed(rdev);
/* wait for the rings to drain */
for (i = 0; i < RADEON_NUM_RINGS; i++) {
@@ -1095,9 +1097,6 @@ force:
radeon_dpm_post_set_power_state(rdev);
- /* update displays */
- radeon_dpm_display_configuration_changed(rdev);
-
rdev->pm.dpm.current_active_crtcs = rdev->pm.dpm.new_active_crtcs;
rdev->pm.dpm.current_active_crtc_count = rdev->pm.dpm.new_active_crtc_count;
rdev->pm.dpm.single_display = single_display;
diff --git a/drivers/gpu/drm/radeon/radeon_semaphore.c b/drivers/gpu/drm/radeon/radeon_semaphore.c
index e6ad54cdfa62..b0eb28e8fb73 100644
--- a/drivers/gpu/drm/radeon/radeon_semaphore.c
+++ b/drivers/gpu/drm/radeon/radeon_semaphore.c
@@ -56,7 +56,7 @@ int radeon_semaphore_create(struct radeon_device *rdev,
}
bool radeon_semaphore_emit_signal(struct radeon_device *rdev, int ridx,
- struct radeon_semaphore *semaphore)
+ struct radeon_semaphore *semaphore)
{
struct radeon_ring *ring = &rdev->ring[ridx];
@@ -73,7 +73,7 @@ bool radeon_semaphore_emit_signal(struct radeon_device *rdev, int ridx,
}
bool radeon_semaphore_emit_wait(struct radeon_device *rdev, int ridx,
- struct radeon_semaphore *semaphore)
+ struct radeon_semaphore *semaphore)
{
struct radeon_ring *ring = &rdev->ring[ridx];
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index e06ac546a90f..90f739478a1b 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -235,6 +235,8 @@ static int radeon_verify_access(struct ttm_buffer_object *bo, struct file *filp)
{
struct radeon_bo *rbo = container_of(bo, struct radeon_bo, tbo);
+ if (radeon_ttm_tt_has_userptr(bo->ttm))
+ return -EPERM;
return drm_vma_node_verify_access(&rbo->gem_base.vma_node, filp);
}
@@ -397,9 +399,15 @@ static int radeon_bo_move(struct ttm_buffer_object *bo,
struct ttm_mem_reg *new_mem)
{
struct radeon_device *rdev;
+ struct radeon_bo *rbo;
struct ttm_mem_reg *old_mem = &bo->mem;
int r;
+ /* Can't move a pinned BO */
+ rbo = container_of(bo, struct radeon_bo, tbo);
+ if (WARN_ON_ONCE(rbo->pin_count > 0))
+ return -EINVAL;
+
rdev = radeon_get_rdev(bo->bdev);
if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
radeon_move_null(bo, new_mem);
@@ -554,8 +562,7 @@ static int radeon_ttm_tt_pin_userptr(struct ttm_tt *ttm)
uint64_t userptr = gtt->userptr + pinned * PAGE_SIZE;
struct page **pages = ttm->pages + pinned;
- r = get_user_pages(current, current->mm, userptr, num_pages,
- write, 0, pages, NULL);
+ r = get_user_pages(userptr, num_pages, write, 0, pages, NULL);
if (r < 0)
goto release_pages;
@@ -610,7 +617,7 @@ static void radeon_ttm_tt_unpin_userptr(struct ttm_tt *ttm)
set_page_dirty(page);
mark_page_accessed(page);
- page_cache_release(page);
+ put_page(page);
}
sg_free_table(ttm->sg);
diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c
index 6edcb5485092..6fe9e4e76284 100644
--- a/drivers/gpu/drm/radeon/radeon_uvd.c
+++ b/drivers/gpu/drm/radeon/radeon_uvd.c
@@ -722,9 +722,11 @@ static int radeon_uvd_send_msg(struct radeon_device *rdev,
return r;
}
-/* multiple fence commands without any stream commands in between can
- crash the vcpu so just try to emmit a dummy create/destroy msg to
- avoid this */
+/*
+ * multiple fence commands without any stream commands in between can
+ * crash the vcpu so just try to emmit a dummy create/destroy msg to
+ * avoid this
+ */
int radeon_uvd_get_create_msg(struct radeon_device *rdev, int ring,
uint32_t handle, struct radeon_fence **fence)
{
diff --git a/drivers/gpu/drm/radeon/radeon_vce.c b/drivers/gpu/drm/radeon/radeon_vce.c
index 7eb1ae758906..c1c619facb47 100644
--- a/drivers/gpu/drm/radeon/radeon_vce.c
+++ b/drivers/gpu/drm/radeon/radeon_vce.c
@@ -166,7 +166,7 @@ int radeon_vce_init(struct radeon_device *rdev)
for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i) {
atomic_set(&rdev->vce.handles[i], 0);
rdev->vce.filp[i] = NULL;
- }
+ }
return 0;
}
@@ -389,7 +389,7 @@ int radeon_vce_get_create_msg(struct radeon_device *rdev, int ring,
r = radeon_ib_schedule(rdev, &ib, NULL, false);
if (r) {
- DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
+ DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
}
if (fence)
@@ -446,7 +446,7 @@ int radeon_vce_get_destroy_msg(struct radeon_device *rdev, int ring,
r = radeon_ib_schedule(rdev, &ib, NULL, false);
if (r) {
- DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
+ DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
}
if (fence)
@@ -769,18 +769,18 @@ int radeon_vce_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
radeon_ring_unlock_commit(rdev, ring, false);
for (i = 0; i < rdev->usec_timeout; i++) {
- if (vce_v1_0_get_rptr(rdev, ring) != rptr)
- break;
- DRM_UDELAY(1);
+ if (vce_v1_0_get_rptr(rdev, ring) != rptr)
+ break;
+ DRM_UDELAY(1);
}
if (i < rdev->usec_timeout) {
- DRM_INFO("ring test on %d succeeded in %d usecs\n",
- ring->idx, i);
+ DRM_INFO("ring test on %d succeeded in %d usecs\n",
+ ring->idx, i);
} else {
- DRM_ERROR("radeon: ring %d test failed\n",
- ring->idx);
- r = -ETIMEDOUT;
+ DRM_ERROR("radeon: ring %d test failed\n",
+ ring->idx);
+ r = -ETIMEDOUT;
}
return r;
@@ -810,11 +810,16 @@ int radeon_vce_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
goto error;
}
- r = radeon_fence_wait(fence, false);
- if (r) {
+ r = radeon_fence_wait_timeout(fence, false, usecs_to_jiffies(
+ RADEON_USEC_IB_TEST_TIMEOUT));
+ if (r < 0) {
DRM_ERROR("radeon: fence wait failed (%d).\n", r);
+ } else if (r == 0) {
+ DRM_ERROR("radeon: fence wait timed out.\n");
+ r = -ETIMEDOUT;
} else {
- DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
+ DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
+ r = 0;
}
error:
radeon_fence_unref(&fence);
diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c
index 3979632b9225..a1358748cea5 100644
--- a/drivers/gpu/drm/radeon/radeon_vm.c
+++ b/drivers/gpu/drm/radeon/radeon_vm.c
@@ -611,15 +611,16 @@ uint64_t radeon_vm_map_gart(struct radeon_device *rdev, uint64_t addr)
*/
static uint32_t radeon_vm_page_flags(uint32_t flags)
{
- uint32_t hw_flags = 0;
- hw_flags |= (flags & RADEON_VM_PAGE_VALID) ? R600_PTE_VALID : 0;
- hw_flags |= (flags & RADEON_VM_PAGE_READABLE) ? R600_PTE_READABLE : 0;
- hw_flags |= (flags & RADEON_VM_PAGE_WRITEABLE) ? R600_PTE_WRITEABLE : 0;
- if (flags & RADEON_VM_PAGE_SYSTEM) {
- hw_flags |= R600_PTE_SYSTEM;
- hw_flags |= (flags & RADEON_VM_PAGE_SNOOPED) ? R600_PTE_SNOOPED : 0;
- }
- return hw_flags;
+ uint32_t hw_flags = 0;
+
+ hw_flags |= (flags & RADEON_VM_PAGE_VALID) ? R600_PTE_VALID : 0;
+ hw_flags |= (flags & RADEON_VM_PAGE_READABLE) ? R600_PTE_READABLE : 0;
+ hw_flags |= (flags & RADEON_VM_PAGE_WRITEABLE) ? R600_PTE_WRITEABLE : 0;
+ if (flags & RADEON_VM_PAGE_SYSTEM) {
+ hw_flags |= R600_PTE_SYSTEM;
+ hw_flags |= (flags & RADEON_VM_PAGE_SNOOPED) ? R600_PTE_SNOOPED : 0;
+ }
+ return hw_flags;
}
/**
diff --git a/drivers/gpu/drm/radeon/rs780_dpm.c b/drivers/gpu/drm/radeon/rs780_dpm.c
index cb0afe78abed..94b48fc1e266 100644
--- a/drivers/gpu/drm/radeon/rs780_dpm.c
+++ b/drivers/gpu/drm/radeon/rs780_dpm.c
@@ -795,7 +795,7 @@ static int rs780_parse_power_table(struct radeon_device *rdev)
union pplib_clock_info *clock_info;
union power_info *power_info;
int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
- u16 data_offset;
+ u16 data_offset;
u8 frev, crev;
struct igp_ps *ps;
diff --git a/drivers/gpu/drm/radeon/rv6xx_dpm.c b/drivers/gpu/drm/radeon/rv6xx_dpm.c
index 97e5a6f1ce58..25e29303b119 100644
--- a/drivers/gpu/drm/radeon/rv6xx_dpm.c
+++ b/drivers/gpu/drm/radeon/rv6xx_dpm.c
@@ -209,7 +209,7 @@ static struct rv6xx_sclk_stepping rv6xx_next_vco_step(struct radeon_device *rdev
static bool rv6xx_can_step_post_div(struct radeon_device *rdev,
struct rv6xx_sclk_stepping *cur,
- struct rv6xx_sclk_stepping *target)
+ struct rv6xx_sclk_stepping *target)
{
return (cur->post_divider > target->post_divider) &&
((cur->vco_frequency * target->post_divider) <=
@@ -239,7 +239,7 @@ static bool rv6xx_reached_stepping_target(struct radeon_device *rdev,
static void rv6xx_generate_steps(struct radeon_device *rdev,
u32 low, u32 high,
- u32 start_index, u8 *end_index)
+ u32 start_index, u8 *end_index)
{
struct rv6xx_sclk_stepping cur;
struct rv6xx_sclk_stepping target;
@@ -1356,23 +1356,23 @@ static void rv6xx_set_dpm_event_sources(struct radeon_device *rdev, u32 sources)
enum radeon_dpm_event_src dpm_event_src;
switch (sources) {
- case 0:
- default:
+ case 0:
+ default:
want_thermal_protection = false;
break;
- case (1 << RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL):
+ case (1 << RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL):
want_thermal_protection = true;
dpm_event_src = RADEON_DPM_EVENT_SRC_DIGITAL;
break;
- case (1 << RADEON_DPM_AUTO_THROTTLE_SRC_EXTERNAL):
+ case (1 << RADEON_DPM_AUTO_THROTTLE_SRC_EXTERNAL):
want_thermal_protection = true;
dpm_event_src = RADEON_DPM_EVENT_SRC_EXTERNAL;
break;
- case ((1 << RADEON_DPM_AUTO_THROTTLE_SRC_EXTERNAL) |
+ case ((1 << RADEON_DPM_AUTO_THROTTLE_SRC_EXTERNAL) |
(1 << RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL)):
- want_thermal_protection = true;
+ want_thermal_protection = true;
dpm_event_src = RADEON_DPM_EVENT_SRC_DIGIAL_OR_EXTERNAL;
break;
}
@@ -1879,7 +1879,7 @@ static int rv6xx_parse_power_table(struct radeon_device *rdev)
union pplib_clock_info *clock_info;
union power_info *power_info;
int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
- u16 data_offset;
+ u16 data_offset;
u8 frev, crev;
struct rv6xx_ps *ps;
diff --git a/drivers/gpu/drm/radeon/rv740_dpm.c b/drivers/gpu/drm/radeon/rv740_dpm.c
index c4c8da501da8..4b850824fe06 100644
--- a/drivers/gpu/drm/radeon/rv740_dpm.c
+++ b/drivers/gpu/drm/radeon/rv740_dpm.c
@@ -36,28 +36,28 @@ u32 rv740_get_decoded_reference_divider(u32 encoded_ref)
u32 ref = 0;
switch (encoded_ref) {
- case 0:
+ case 0:
ref = 1;
break;
- case 16:
+ case 16:
ref = 2;
break;
- case 17:
+ case 17:
ref = 3;
break;
- case 18:
+ case 18:
ref = 2;
break;
- case 19:
+ case 19:
ref = 3;
break;
- case 20:
+ case 20:
ref = 4;
break;
- case 21:
+ case 21:
ref = 5;
break;
- default:
+ default:
DRM_ERROR("Invalid encoded Reference Divider\n");
ref = 0;
break;
diff --git a/drivers/gpu/drm/radeon/rv770_dpm.c b/drivers/gpu/drm/radeon/rv770_dpm.c
index e830c8935db0..a010decf59af 100644
--- a/drivers/gpu/drm/radeon/rv770_dpm.c
+++ b/drivers/gpu/drm/radeon/rv770_dpm.c
@@ -345,27 +345,27 @@ static int rv770_encode_yclk_post_div(u32 postdiv, u32 *encoded_postdiv)
int ret = 0;
switch (postdiv) {
- case 1:
+ case 1:
*encoded_postdiv = 0;
break;
- case 2:
+ case 2:
*encoded_postdiv = 1;
break;
- case 4:
+ case 4:
*encoded_postdiv = 2;
break;
- case 8:
+ case 8:
*encoded_postdiv = 3;
break;
- case 16:
+ case 16:
*encoded_postdiv = 4;
break;
- default:
+ default:
ret = -EINVAL;
break;
}
- return ret;
+ return ret;
}
u32 rv770_map_clkf_to_ibias(struct radeon_device *rdev, u32 clkf)
@@ -1175,15 +1175,15 @@ static int rv770_init_smc_table(struct radeon_device *rdev,
rv770_populate_smc_mvdd_table(rdev, table);
switch (rdev->pm.int_thermal_type) {
- case THERMAL_TYPE_RV770:
- case THERMAL_TYPE_ADT7473_WITH_INTERNAL:
+ case THERMAL_TYPE_RV770:
+ case THERMAL_TYPE_ADT7473_WITH_INTERNAL:
table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_INTERNAL;
break;
- case THERMAL_TYPE_NONE:
+ case THERMAL_TYPE_NONE:
table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_NONE;
break;
- case THERMAL_TYPE_EXTERNAL_GPIO:
- default:
+ case THERMAL_TYPE_EXTERNAL_GPIO:
+ default:
table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_EXTERNAL;
break;
}
@@ -1567,18 +1567,18 @@ void rv770_reset_smio_status(struct radeon_device *rdev)
sw_smio_index =
(RREG32(GENERAL_PWRMGT) & SW_SMIO_INDEX_MASK) >> SW_SMIO_INDEX_SHIFT;
switch (sw_smio_index) {
- case 3:
+ case 3:
vid_smio_cntl = RREG32(S3_VID_LOWER_SMIO_CNTL);
break;
- case 2:
+ case 2:
vid_smio_cntl = RREG32(S2_VID_LOWER_SMIO_CNTL);
break;
- case 1:
+ case 1:
vid_smio_cntl = RREG32(S1_VID_LOWER_SMIO_CNTL);
break;
- case 0:
+ case 0:
return;
- default:
+ default:
vid_smio_cntl = pi->s0_vid_lower_smio_cntl;
break;
}
@@ -1817,21 +1817,21 @@ static void rv770_set_dpm_event_sources(struct radeon_device *rdev, u32 sources)
enum radeon_dpm_event_src dpm_event_src;
switch (sources) {
- case 0:
- default:
+ case 0:
+ default:
want_thermal_protection = false;
break;
- case (1 << RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL):
+ case (1 << RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL):
want_thermal_protection = true;
dpm_event_src = RADEON_DPM_EVENT_SRC_DIGITAL;
break;
- case (1 << RADEON_DPM_AUTO_THROTTLE_SRC_EXTERNAL):
+ case (1 << RADEON_DPM_AUTO_THROTTLE_SRC_EXTERNAL):
want_thermal_protection = true;
dpm_event_src = RADEON_DPM_EVENT_SRC_EXTERNAL;
break;
- case ((1 << RADEON_DPM_AUTO_THROTTLE_SRC_EXTERNAL) |
+ case ((1 << RADEON_DPM_AUTO_THROTTLE_SRC_EXTERNAL) |
(1 << RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL)):
want_thermal_protection = true;
dpm_event_src = RADEON_DPM_EVENT_SRC_DIGIAL_OR_EXTERNAL;
@@ -2273,7 +2273,7 @@ int rv7xx_parse_power_table(struct radeon_device *rdev)
union pplib_clock_info *clock_info;
union power_info *power_info;
int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
- u16 data_offset;
+ u16 data_offset;
u8 frev, crev;
struct rv7xx_ps *ps;
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index f878d6962da5..ae21550fe767 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -1307,7 +1307,7 @@ int si_get_allowed_info_register(struct radeon_device *rdev,
*/
u32 si_get_xclk(struct radeon_device *rdev)
{
- u32 reference_clock = rdev->clock.spll.reference_freq;
+ u32 reference_clock = rdev->clock.spll.reference_freq;
u32 tmp;
tmp = RREG32(CG_CLKPIN_CNTL_2);
@@ -2442,8 +2442,10 @@ void dce6_bandwidth_update(struct radeon_device *rdev)
*/
static void si_tiling_mode_table_init(struct radeon_device *rdev)
{
- const u32 num_tile_mode_states = 32;
- u32 reg_offset, gb_tile_moden, split_equal_to_row_size;
+ u32 *tile = rdev->config.si.tile_mode_array;
+ const u32 num_tile_mode_states =
+ ARRAY_SIZE(rdev->config.si.tile_mode_array);
+ u32 reg_offset, split_equal_to_row_size;
switch (rdev->config.si.mem_row_size_in_kb) {
case 1:
@@ -2458,491 +2460,442 @@ static void si_tiling_mode_table_init(struct radeon_device *rdev)
break;
}
- if ((rdev->family == CHIP_TAHITI) ||
- (rdev->family == CHIP_PITCAIRN)) {
- for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) {
- switch (reg_offset) {
- case 0: /* non-AA compressed depth or any compressed stencil */
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
- TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
- BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
- break;
- case 1: /* 2xAA/4xAA compressed depth only */
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
- TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
- BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
- break;
- case 2: /* 8xAA compressed depth only */
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
- TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
- BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
- break;
- case 3: /* 2xAA/4xAA compressed depth with stencil (for depth buffer) */
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
- TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
- BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
- break;
- case 4: /* Maps w/ a dimension less than the 2D macro-tile dimensions (for mipmapped depth textures) */
- gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
- TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
- BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
- break;
- case 5: /* Uncompressed 16bpp depth - and stencil buffer allocated with it */
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
- TILE_SPLIT(split_equal_to_row_size) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
- BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
- break;
- case 6: /* Uncompressed 32bpp depth - and stencil buffer allocated with it */
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
- TILE_SPLIT(split_equal_to_row_size) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
- BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
- break;
- case 7: /* Uncompressed 8bpp stencil without depth (drivers typically do not use) */
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
- TILE_SPLIT(split_equal_to_row_size) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
- BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
- break;
- case 8: /* 1D and 1D Array Surfaces */
- gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
- MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
- TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
- BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
- break;
- case 9: /* Displayable maps. */
- gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
- TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
- BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
- break;
- case 10: /* Display 8bpp. */
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
- TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
- BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
- break;
- case 11: /* Display 16bpp. */
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
- TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
- BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
- break;
- case 12: /* Display 32bpp. */
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
- TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
- BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
- break;
- case 13: /* Thin. */
- gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
- TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
- BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
- break;
- case 14: /* Thin 8 bpp. */
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
- TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
- BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
- break;
- case 15: /* Thin 16 bpp. */
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
- TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
- BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
- break;
- case 16: /* Thin 32 bpp. */
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
- TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
- BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
- break;
- case 17: /* Thin 64 bpp. */
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
- TILE_SPLIT(split_equal_to_row_size) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
- BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
- break;
- case 21: /* 8 bpp PRT. */
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
- TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
- BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
- break;
- case 22: /* 16 bpp PRT */
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
- TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
- BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
- break;
- case 23: /* 32 bpp PRT */
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
- TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
- BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
- break;
- case 24: /* 64 bpp PRT */
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
- TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
- BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
- break;
- case 25: /* 128 bpp PRT */
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
- TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) |
- NUM_BANKS(ADDR_SURF_8_BANK) |
- BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
- break;
- default:
- gb_tile_moden = 0;
- break;
- }
- rdev->config.si.tile_mode_array[reg_offset] = gb_tile_moden;
- WREG32(GB_TILE_MODE0 + (reg_offset * 4), gb_tile_moden);
- }
- } else if ((rdev->family == CHIP_VERDE) ||
- (rdev->family == CHIP_OLAND) ||
- (rdev->family == CHIP_HAINAN)) {
- for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) {
- switch (reg_offset) {
- case 0: /* non-AA compressed depth or any compressed stencil */
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P4_8x16) |
- TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
- BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
- break;
- case 1: /* 2xAA/4xAA compressed depth only */
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P4_8x16) |
- TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
- BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
- break;
- case 2: /* 8xAA compressed depth only */
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P4_8x16) |
- TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
- BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
- break;
- case 3: /* 2xAA/4xAA compressed depth with stencil (for depth buffer) */
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P4_8x16) |
- TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
- BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
- break;
- case 4: /* Maps w/ a dimension less than the 2D macro-tile dimensions (for mipmapped depth textures) */
- gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P4_8x16) |
- TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
- BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
- break;
- case 5: /* Uncompressed 16bpp depth - and stencil buffer allocated with it */
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P4_8x16) |
- TILE_SPLIT(split_equal_to_row_size) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
- BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
- break;
- case 6: /* Uncompressed 32bpp depth - and stencil buffer allocated with it */
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P4_8x16) |
- TILE_SPLIT(split_equal_to_row_size) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
- BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
- break;
- case 7: /* Uncompressed 8bpp stencil without depth (drivers typically do not use) */
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P4_8x16) |
- TILE_SPLIT(split_equal_to_row_size) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
- BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
- break;
- case 8: /* 1D and 1D Array Surfaces */
- gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
- MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P4_8x16) |
- TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
- BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
- break;
- case 9: /* Displayable maps. */
- gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P4_8x16) |
- TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
- BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
- break;
- case 10: /* Display 8bpp. */
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P4_8x16) |
- TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
- BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
- break;
- case 11: /* Display 16bpp. */
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P4_8x16) |
- TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
- BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
- break;
- case 12: /* Display 32bpp. */
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P4_8x16) |
- TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
- BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
- break;
- case 13: /* Thin. */
- gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P4_8x16) |
- TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
- BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
- break;
- case 14: /* Thin 8 bpp. */
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P4_8x16) |
- TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
- BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
- break;
- case 15: /* Thin 16 bpp. */
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P4_8x16) |
- TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
- BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
- break;
- case 16: /* Thin 32 bpp. */
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P4_8x16) |
- TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
- BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
- break;
- case 17: /* Thin 64 bpp. */
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P4_8x16) |
- TILE_SPLIT(split_equal_to_row_size) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
- BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
- break;
- case 21: /* 8 bpp PRT. */
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
- TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
- BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
- break;
- case 22: /* 16 bpp PRT */
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
- TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
- BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
- break;
- case 23: /* 32 bpp PRT */
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
- TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
- BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
- break;
- case 24: /* 64 bpp PRT */
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
- TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
- BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
- break;
- case 25: /* 128 bpp PRT */
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
- TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) |
- NUM_BANKS(ADDR_SURF_8_BANK) |
- BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
- break;
- default:
- gb_tile_moden = 0;
- break;
- }
- rdev->config.si.tile_mode_array[reg_offset] = gb_tile_moden;
- WREG32(GB_TILE_MODE0 + (reg_offset * 4), gb_tile_moden);
- }
- } else
+ for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
+ tile[reg_offset] = 0;
+
+ switch(rdev->family) {
+ case CHIP_TAHITI:
+ case CHIP_PITCAIRN:
+ /* non-AA compressed depth or any compressed stencil */
+ tile[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ /* 2xAA/4xAA compressed depth only */
+ tile[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ /* 8xAA compressed depth only */
+ tile[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ /* 2xAA/4xAA compressed depth with stencil (for depth buffer) */
+ tile[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ /* Maps w/ a dimension less than the 2D macro-tile dimensions (for mipmapped depth textures) */
+ tile[4] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ /* Uncompressed 16bpp depth - and stencil buffer allocated with it */
+ tile[5] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+ TILE_SPLIT(split_equal_to_row_size) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ /* Uncompressed 32bpp depth - and stencil buffer allocated with it */
+ tile[6] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+ TILE_SPLIT(split_equal_to_row_size) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
+ /* Uncompressed 8bpp stencil without depth (drivers typically do not use) */
+ tile[7] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+ TILE_SPLIT(split_equal_to_row_size) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ /* 1D and 1D Array Surfaces */
+ tile[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
+ MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ /* Displayable maps. */
+ tile[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ /* Display 8bpp. */
+ tile[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ /* Display 16bpp. */
+ tile[11] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ /* Display 32bpp. */
+ tile[12] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
+ /* Thin. */
+ tile[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ /* Thin 8 bpp. */
+ tile[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
+ /* Thin 16 bpp. */
+ tile[15] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
+ /* Thin 32 bpp. */
+ tile[16] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
+ /* Thin 64 bpp. */
+ tile[17] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+ TILE_SPLIT(split_equal_to_row_size) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
+ /* 8 bpp PRT. */
+ tile[21] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ /* 16 bpp PRT */
+ tile[22] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
+ /* 32 bpp PRT */
+ tile[23] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ /* 64 bpp PRT */
+ tile[24] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ /* 128 bpp PRT */
+ tile[25] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) |
+ NUM_BANKS(ADDR_SURF_8_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
+
+ for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
+ WREG32(GB_TILE_MODE0 + (reg_offset * 4), tile[reg_offset]);
+ break;
+
+ case CHIP_VERDE:
+ case CHIP_OLAND:
+ case CHIP_HAINAN:
+ /* non-AA compressed depth or any compressed stencil */
+ tile[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
+ /* 2xAA/4xAA compressed depth only */
+ tile[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
+ /* 8xAA compressed depth only */
+ tile[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
+ /* 2xAA/4xAA compressed depth with stencil (for depth buffer) */
+ tile[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
+ /* Maps w/ a dimension less than the 2D macro-tile dimensions (for mipmapped depth textures) */
+ tile[4] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ /* Uncompressed 16bpp depth - and stencil buffer allocated with it */
+ tile[5] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ TILE_SPLIT(split_equal_to_row_size) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ /* Uncompressed 32bpp depth - and stencil buffer allocated with it */
+ tile[6] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ TILE_SPLIT(split_equal_to_row_size) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ /* Uncompressed 8bpp stencil without depth (drivers typically do not use) */
+ tile[7] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ TILE_SPLIT(split_equal_to_row_size) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
+ /* 1D and 1D Array Surfaces */
+ tile[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
+ MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ /* Displayable maps. */
+ tile[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ /* Display 8bpp. */
+ tile[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
+ /* Display 16bpp. */
+ tile[11] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ /* Display 32bpp. */
+ tile[12] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ /* Thin. */
+ tile[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ /* Thin 8 bpp. */
+ tile[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ /* Thin 16 bpp. */
+ tile[15] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ /* Thin 32 bpp. */
+ tile[16] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ /* Thin 64 bpp. */
+ tile[17] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ TILE_SPLIT(split_equal_to_row_size) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ /* 8 bpp PRT. */
+ tile[21] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ /* 16 bpp PRT */
+ tile[22] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
+ /* 32 bpp PRT */
+ tile[23] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ /* 64 bpp PRT */
+ tile[24] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ /* 128 bpp PRT */
+ tile[25] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) |
+ NUM_BANKS(ADDR_SURF_8_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
+
+ for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
+ WREG32(GB_TILE_MODE0 + (reg_offset * 4), tile[reg_offset]);
+ break;
+
+ default:
DRM_ERROR("unknown asic: 0x%x\n", rdev->family);
+ }
}
static void si_select_se_sh(struct radeon_device *rdev,
@@ -7314,7 +7267,7 @@ uint64_t si_get_gpu_clock_counter(struct radeon_device *rdev)
mutex_lock(&rdev->gpu_clock_mutex);
WREG32(RLC_CAPTURE_GPU_CLOCK_COUNT, 1);
clock = (uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_LSB) |
- ((uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
+ ((uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
mutex_unlock(&rdev->gpu_clock_mutex);
return clock;
}
@@ -7775,33 +7728,33 @@ static void si_program_aspm(struct radeon_device *rdev)
int si_vce_send_vcepll_ctlreq(struct radeon_device *rdev)
{
- unsigned i;
+ unsigned i;
- /* make sure VCEPLL_CTLREQ is deasserted */
- WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, 0, ~UPLL_CTLREQ_MASK);
+ /* make sure VCEPLL_CTLREQ is deasserted */
+ WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, 0, ~UPLL_CTLREQ_MASK);
- mdelay(10);
+ mdelay(10);
- /* assert UPLL_CTLREQ */
- WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, UPLL_CTLREQ_MASK, ~UPLL_CTLREQ_MASK);
+ /* assert UPLL_CTLREQ */
+ WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, UPLL_CTLREQ_MASK, ~UPLL_CTLREQ_MASK);
- /* wait for CTLACK and CTLACK2 to get asserted */
- for (i = 0; i < 100; ++i) {
- uint32_t mask = UPLL_CTLACK_MASK | UPLL_CTLACK2_MASK;
- if ((RREG32_SMC(CG_VCEPLL_FUNC_CNTL) & mask) == mask)
- break;
- mdelay(10);
- }
+ /* wait for CTLACK and CTLACK2 to get asserted */
+ for (i = 0; i < 100; ++i) {
+ uint32_t mask = UPLL_CTLACK_MASK | UPLL_CTLACK2_MASK;
+ if ((RREG32_SMC(CG_VCEPLL_FUNC_CNTL) & mask) == mask)
+ break;
+ mdelay(10);
+ }
- /* deassert UPLL_CTLREQ */
- WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, 0, ~UPLL_CTLREQ_MASK);
+ /* deassert UPLL_CTLREQ */
+ WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, 0, ~UPLL_CTLREQ_MASK);
- if (i == 100) {
- DRM_ERROR("Timeout setting UVD clocks!\n");
- return -ETIMEDOUT;
- }
+ if (i == 100) {
+ DRM_ERROR("Timeout setting UVD clocks!\n");
+ return -ETIMEDOUT;
+ }
- return 0;
+ return 0;
}
int si_set_vce_clocks(struct radeon_device *rdev, u32 evclk, u32 ecclk)
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
index a82b891ae1fe..e6abc09b67e3 100644
--- a/drivers/gpu/drm/radeon/si_dpm.c
+++ b/drivers/gpu/drm/radeon/si_dpm.c
@@ -499,7 +499,7 @@ static const struct si_cac_config_reg lcac_pitcairn[] =
static const struct si_cac_config_reg cac_override_pitcairn[] =
{
- { 0xFFFFFFFF }
+ { 0xFFFFFFFF }
};
static const struct si_powertune_data powertune_data_pitcairn =
@@ -991,7 +991,7 @@ static const struct si_cac_config_reg lcac_cape_verde[] =
static const struct si_cac_config_reg cac_override_cape_verde[] =
{
- { 0xFFFFFFFF }
+ { 0xFFFFFFFF }
};
static const struct si_powertune_data powertune_data_cape_verde =
@@ -1762,9 +1762,9 @@ static void si_fan_ctrl_set_default_mode(struct radeon_device *rdev);
static struct si_power_info *si_get_pi(struct radeon_device *rdev)
{
- struct si_power_info *pi = rdev->pm.dpm.priv;
+ struct si_power_info *pi = rdev->pm.dpm.priv;
- return pi;
+ return pi;
}
static void si_calculate_leakage_for_v_and_t_formula(const struct ni_leakage_coeffients *coeff,
@@ -2926,9 +2926,12 @@ static struct si_dpm_quirk si_dpm_quirk_list[] = {
/* PITCAIRN - https://bugs.freedesktop.org/show_bug.cgi?id=76490 */
{ PCI_VENDOR_ID_ATI, 0x6810, 0x1462, 0x3036, 0, 120000 },
{ PCI_VENDOR_ID_ATI, 0x6811, 0x174b, 0xe271, 0, 120000 },
+ { PCI_VENDOR_ID_ATI, 0x6811, 0x174b, 0x2015, 0, 120000 },
{ PCI_VENDOR_ID_ATI, 0x6810, 0x174b, 0xe271, 85000, 90000 },
{ PCI_VENDOR_ID_ATI, 0x6811, 0x1462, 0x2015, 0, 120000 },
{ PCI_VENDOR_ID_ATI, 0x6811, 0x1043, 0x2015, 0, 120000 },
+ { PCI_VENDOR_ID_ATI, 0x6811, 0x148c, 0x2015, 0, 120000 },
+ { PCI_VENDOR_ID_ATI, 0x6810, 0x1682, 0x9275, 0, 120000 },
{ 0, 0, 0, 0 },
};
@@ -3008,6 +3011,10 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
}
++p;
}
+ /* limit mclk on all R7 370 parts for stability */
+ if (rdev->pdev->device == 0x6811 &&
+ rdev->pdev->revision == 0x81)
+ max_mclk = 120000;
if (rps->vce_active) {
rps->evclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].evclk;
@@ -3150,9 +3157,9 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
}
}
- for (i = 0; i < ps->performance_level_count; i++)
- btc_adjust_clock_combinations(rdev, max_limits,
- &ps->performance_levels[i]);
+ for (i = 0; i < ps->performance_level_count; i++)
+ btc_adjust_clock_combinations(rdev, max_limits,
+ &ps->performance_levels[i]);
for (i = 0; i < ps->performance_level_count; i++) {
if (ps->performance_levels[i].vddc < min_vce_voltage)
@@ -3291,7 +3298,7 @@ static void si_set_dpm_event_sources(struct radeon_device *rdev, u32 sources)
case 0:
default:
want_thermal_protection = false;
- break;
+ break;
case (1 << RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL):
want_thermal_protection = true;
dpm_event_src = RADEON_DPM_EVENT_SRC_DIGITAL;
@@ -3493,7 +3500,7 @@ static int si_process_firmware_header(struct radeon_device *rdev)
if (ret)
return ret;
- si_pi->state_table_start = tmp;
+ si_pi->state_table_start = tmp;
ret = si_read_smc_sram_dword(rdev,
SISLANDS_SMC_FIRMWARE_HEADER_LOCATION +
@@ -3652,7 +3659,7 @@ static void si_program_response_times(struct radeon_device *rdev)
si_write_smc_soft_register(rdev, SI_SMC_SOFT_REGISTER_mvdd_chg_time, 1);
voltage_response_time = (u32)rdev->pm.dpm.voltage_response_time;
- backbias_response_time = (u32)rdev->pm.dpm.backbias_response_time;
+ backbias_response_time = (u32)rdev->pm.dpm.backbias_response_time;
if (voltage_response_time == 0)
voltage_response_time = 1000;
@@ -3760,7 +3767,7 @@ static void si_setup_bsp(struct radeon_device *rdev)
&pi->pbsu);
- pi->dsp = BSP(pi->bsp) | BSU(pi->bsu);
+ pi->dsp = BSP(pi->bsp) | BSU(pi->bsu);
pi->psp = BSP(pi->pbsp) | BSU(pi->pbsu);
WREG32(CG_BSP, pi->dsp);
@@ -4308,7 +4315,7 @@ static int si_populate_memory_timing_parameters(struct radeon_device *rdev,
radeon_atom_set_engine_dram_timings(rdev,
pl->sclk,
- pl->mclk);
+ pl->mclk);
dram_timing = RREG32(MC_ARB_DRAM_TIMING);
dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2);
@@ -4343,7 +4350,7 @@ static int si_do_program_memory_timing_parameters(struct radeon_device *rdev,
si_pi->sram_end);
if (ret)
break;
- }
+ }
return ret;
}
@@ -4821,9 +4828,9 @@ static int si_calculate_sclk_params(struct radeon_device *rdev,
spll_func_cntl_2 &= ~SCLK_MUX_SEL_MASK;
spll_func_cntl_2 |= SCLK_MUX_SEL(2);
- spll_func_cntl_3 &= ~SPLL_FB_DIV_MASK;
- spll_func_cntl_3 |= SPLL_FB_DIV(fbdiv);
- spll_func_cntl_3 |= SPLL_DITHEN;
+ spll_func_cntl_3 &= ~SPLL_FB_DIV_MASK;
+ spll_func_cntl_3 |= SPLL_FB_DIV(fbdiv);
+ spll_func_cntl_3 |= SPLL_DITHEN;
if (pi->sclk_ss) {
struct radeon_atom_ss ss;
@@ -4930,15 +4937,15 @@ static int si_populate_mclk_value(struct radeon_device *rdev,
tmp = freq_nom / reference_clock;
tmp = tmp * tmp;
if (radeon_atombios_get_asic_ss_info(rdev, &ss,
- ASIC_INTERNAL_MEMORY_SS, freq_nom)) {
+ ASIC_INTERNAL_MEMORY_SS, freq_nom)) {
u32 clks = reference_clock * 5 / ss.rate;
u32 clkv = (u32)((((131 * ss.percentage * ss.rate) / 100) * tmp) / freq_nom);
- mpll_ss1 &= ~CLKV_MASK;
- mpll_ss1 |= CLKV(clkv);
+ mpll_ss1 &= ~CLKV_MASK;
+ mpll_ss1 |= CLKV(clkv);
- mpll_ss2 &= ~CLKS_MASK;
- mpll_ss2 |= CLKS(clks);
+ mpll_ss2 &= ~CLKS_MASK;
+ mpll_ss2 |= CLKS(clks);
}
}
@@ -5265,7 +5272,7 @@ static int si_convert_power_state_to_smc(struct radeon_device *rdev,
ni_pi->enable_power_containment = false;
ret = si_populate_sq_ramping_values(rdev, radeon_state, smc_state);
- if (ret)
+ if (ret)
ni_pi->enable_sq_ramping = false;
return si_populate_smc_t(rdev, radeon_state, smc_state);
@@ -5436,46 +5443,46 @@ static bool si_check_s0_mc_reg_index(u16 in_reg, u16 *out_reg)
case MC_SEQ_RAS_TIMING >> 2:
*out_reg = MC_SEQ_RAS_TIMING_LP >> 2;
break;
- case MC_SEQ_CAS_TIMING >> 2:
+ case MC_SEQ_CAS_TIMING >> 2:
*out_reg = MC_SEQ_CAS_TIMING_LP >> 2;
break;
- case MC_SEQ_MISC_TIMING >> 2:
+ case MC_SEQ_MISC_TIMING >> 2:
*out_reg = MC_SEQ_MISC_TIMING_LP >> 2;
break;
- case MC_SEQ_MISC_TIMING2 >> 2:
+ case MC_SEQ_MISC_TIMING2 >> 2:
*out_reg = MC_SEQ_MISC_TIMING2_LP >> 2;
break;
- case MC_SEQ_RD_CTL_D0 >> 2:
+ case MC_SEQ_RD_CTL_D0 >> 2:
*out_reg = MC_SEQ_RD_CTL_D0_LP >> 2;
break;
- case MC_SEQ_RD_CTL_D1 >> 2:
+ case MC_SEQ_RD_CTL_D1 >> 2:
*out_reg = MC_SEQ_RD_CTL_D1_LP >> 2;
break;
- case MC_SEQ_WR_CTL_D0 >> 2:
+ case MC_SEQ_WR_CTL_D0 >> 2:
*out_reg = MC_SEQ_WR_CTL_D0_LP >> 2;
break;
- case MC_SEQ_WR_CTL_D1 >> 2:
+ case MC_SEQ_WR_CTL_D1 >> 2:
*out_reg = MC_SEQ_WR_CTL_D1_LP >> 2;
break;
- case MC_PMG_CMD_EMRS >> 2:
+ case MC_PMG_CMD_EMRS >> 2:
*out_reg = MC_SEQ_PMG_CMD_EMRS_LP >> 2;
break;
- case MC_PMG_CMD_MRS >> 2:
+ case MC_PMG_CMD_MRS >> 2:
*out_reg = MC_SEQ_PMG_CMD_MRS_LP >> 2;
break;
- case MC_PMG_CMD_MRS1 >> 2:
+ case MC_PMG_CMD_MRS1 >> 2:
*out_reg = MC_SEQ_PMG_CMD_MRS1_LP >> 2;
break;
- case MC_SEQ_PMG_TIMING >> 2:
+ case MC_SEQ_PMG_TIMING >> 2:
*out_reg = MC_SEQ_PMG_TIMING_LP >> 2;
break;
- case MC_PMG_CMD_MRS2 >> 2:
+ case MC_PMG_CMD_MRS2 >> 2:
*out_reg = MC_SEQ_PMG_CMD_MRS2_LP >> 2;
break;
- case MC_SEQ_WR_CTL_2 >> 2:
+ case MC_SEQ_WR_CTL_2 >> 2:
*out_reg = MC_SEQ_WR_CTL_2_LP >> 2;
break;
- default:
+ default:
result = false;
break;
}
@@ -5562,19 +5569,19 @@ static int si_initialize_mc_reg_table(struct radeon_device *rdev)
WREG32(MC_SEQ_PMG_CMD_MRS2_LP, RREG32(MC_PMG_CMD_MRS2));
WREG32(MC_SEQ_WR_CTL_2_LP, RREG32(MC_SEQ_WR_CTL_2));
- ret = radeon_atom_init_mc_reg_table(rdev, module_index, table);
- if (ret)
- goto init_mc_done;
+ ret = radeon_atom_init_mc_reg_table(rdev, module_index, table);
+ if (ret)
+ goto init_mc_done;
- ret = si_copy_vbios_mc_reg_table(table, si_table);
- if (ret)
- goto init_mc_done;
+ ret = si_copy_vbios_mc_reg_table(table, si_table);
+ if (ret)
+ goto init_mc_done;
si_set_s0_mc_reg_index(si_table);
ret = si_set_mc_special_registers(rdev, si_table);
- if (ret)
- goto init_mc_done;
+ if (ret)
+ goto init_mc_done;
si_set_valid_flag(si_table);
@@ -5715,10 +5722,10 @@ static int si_upload_mc_reg_table(struct radeon_device *rdev,
static void si_enable_voltage_control(struct radeon_device *rdev, bool enable)
{
- if (enable)
- WREG32_P(GENERAL_PWRMGT, VOLT_PWRMGT_EN, ~VOLT_PWRMGT_EN);
- else
- WREG32_P(GENERAL_PWRMGT, 0, ~VOLT_PWRMGT_EN);
+ if (enable)
+ WREG32_P(GENERAL_PWRMGT, VOLT_PWRMGT_EN, ~VOLT_PWRMGT_EN);
+ else
+ WREG32_P(GENERAL_PWRMGT, 0, ~VOLT_PWRMGT_EN);
}
static enum radeon_pcie_gen si_get_maximum_link_speed(struct radeon_device *rdev,
@@ -6820,7 +6827,7 @@ static int si_parse_power_table(struct radeon_device *rdev)
struct _NonClockInfoArray *non_clock_info_array;
union power_info *power_info;
int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
- u16 data_offset;
+ u16 data_offset;
u8 frev, crev;
u8 *power_state_offset;
struct ni_ps *ps;
diff --git a/drivers/gpu/drm/radeon/sumo_dpm.c b/drivers/gpu/drm/radeon/sumo_dpm.c
index cd0862809adf..f0d5c1724f55 100644
--- a/drivers/gpu/drm/radeon/sumo_dpm.c
+++ b/drivers/gpu/drm/radeon/sumo_dpm.c
@@ -787,8 +787,8 @@ static void sumo_program_acpi_power_level(struct radeon_device *rdev)
struct atom_clock_dividers dividers;
int ret;
- ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
- pi->acpi_pl.sclk,
+ ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
+ pi->acpi_pl.sclk,
false, &dividers);
if (ret)
return;
@@ -1462,7 +1462,7 @@ static int sumo_parse_power_table(struct radeon_device *rdev)
struct _NonClockInfoArray *non_clock_info_array;
union power_info *power_info;
int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
- u16 data_offset;
+ u16 data_offset;
u8 frev, crev;
u8 *power_state_offset;
struct sumo_ps *ps;
diff --git a/drivers/gpu/drm/radeon/trinity_dpm.c b/drivers/gpu/drm/radeon/trinity_dpm.c
index d34bfcdab9be..6730367ac228 100644
--- a/drivers/gpu/drm/radeon/trinity_dpm.c
+++ b/drivers/gpu/drm/radeon/trinity_dpm.c
@@ -369,8 +369,8 @@ static void trinity_gfx_powergating_initialize(struct radeon_device *rdev)
int ret;
u32 hw_rev = (RREG32(HW_REV) & ATI_REV_ID_MASK) >> ATI_REV_ID_SHIFT;
- ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
- 25000, false, &dividers);
+ ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
+ 25000, false, &dividers);
if (ret)
return;
@@ -587,8 +587,8 @@ static void trinity_set_divider_value(struct radeon_device *rdev,
u32 value;
u32 ix = index * TRINITY_SIZEOF_DPM_STATE_TABLE;
- ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
- sclk, false, &dividers);
+ ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
+ sclk, false, &dividers);
if (ret)
return;
@@ -597,8 +597,8 @@ static void trinity_set_divider_value(struct radeon_device *rdev,
value |= CLK_DIVIDER(dividers.post_div);
WREG32_SMC(SMU_SCLK_DPM_STATE_0_CNTL_0 + ix, value);
- ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
- sclk/2, false, &dividers);
+ ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
+ sclk/2, false, &dividers);
if (ret)
return;
@@ -1045,14 +1045,14 @@ static int trinity_set_thermal_temperature_range(struct radeon_device *rdev,
int low_temp = 0 * 1000;
int high_temp = 255 * 1000;
- if (low_temp < min_temp)
+ if (low_temp < min_temp)
low_temp = min_temp;
- if (high_temp > max_temp)
+ if (high_temp > max_temp)
high_temp = max_temp;
- if (high_temp < low_temp) {
+ if (high_temp < low_temp) {
DRM_ERROR("invalid thermal range: %d - %d\n", low_temp, high_temp);
- return -EINVAL;
- }
+ return -EINVAL;
+ }
WREG32_P(CG_THERMAL_INT_CTRL, DIG_THERM_INTH(49 + (high_temp / 1000)), ~DIG_THERM_INTH_MASK);
WREG32_P(CG_THERMAL_INT_CTRL, DIG_THERM_INTL(49 + (low_temp / 1000)), ~DIG_THERM_INTL_MASK);
@@ -1737,7 +1737,7 @@ static int trinity_parse_power_table(struct radeon_device *rdev)
struct _NonClockInfoArray *non_clock_info_array;
union power_info *power_info;
int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
- u16 data_offset;
+ u16 data_offset;
u8 frev, crev;
u8 *power_state_offset;
struct sumo_ps *ps;
diff --git a/drivers/gpu/drm/radeon/uvd_v1_0.c b/drivers/gpu/drm/radeon/uvd_v1_0.c
index c6b1cbca47fc..12ddcfa82e20 100644
--- a/drivers/gpu/drm/radeon/uvd_v1_0.c
+++ b/drivers/gpu/drm/radeon/uvd_v1_0.c
@@ -522,11 +522,17 @@ int uvd_v1_0_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
goto error;
}
- r = radeon_fence_wait(fence, false);
- if (r) {
+ r = radeon_fence_wait_timeout(fence, false, usecs_to_jiffies(
+ RADEON_USEC_IB_TEST_TIMEOUT));
+ if (r < 0) {
DRM_ERROR("radeon: fence wait failed (%d).\n", r);
goto error;
+ } else if (r == 0) {
+ DRM_ERROR("radeon: fence wait timed out.\n");
+ r = -ETIMEDOUT;
+ goto error;
}
+ r = 0;
DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
error:
radeon_fence_unref(&fence);
diff --git a/drivers/gpu/drm/radeon/vce_v2_0.c b/drivers/gpu/drm/radeon/vce_v2_0.c
index cdeaab7c7b1e..fce214482e72 100644
--- a/drivers/gpu/drm/radeon/vce_v2_0.c
+++ b/drivers/gpu/drm/radeon/vce_v2_0.c
@@ -53,7 +53,7 @@ static void vce_v2_0_set_sw_cg(struct radeon_device *rdev, bool gated)
WREG32(VCE_UENC_REG_CLOCK_GATING, tmp);
WREG32(VCE_CGTT_CLK_OVERRIDE, 0);
- } else {
+ } else {
tmp = RREG32(VCE_CLOCK_GATING_B);
tmp |= 0xe7;
tmp &= ~0xe70000;
diff --git a/drivers/gpu/drm/rcar-du/Kconfig b/drivers/gpu/drm/rcar-du/Kconfig
index 96dcd4a78951..1f10fa0928b4 100644
--- a/drivers/gpu/drm/rcar-du/Kconfig
+++ b/drivers/gpu/drm/rcar-du/Kconfig
@@ -1,6 +1,7 @@
config DRM_RCAR_DU
tristate "DRM Support for R-Car Display Unit"
- depends on DRM && ARM && OF
+ depends on DRM && OF
+ depends on ARM || ARM64
depends on ARCH_SHMOBILE || COMPILE_TEST
select DRM_KMS_HELPER
select DRM_KMS_CMA_HELPER
@@ -14,14 +15,18 @@ config DRM_RCAR_DU
config DRM_RCAR_HDMI
bool "R-Car DU HDMI Encoder Support"
depends on DRM_RCAR_DU
- depends on OF
help
Enable support for external HDMI encoders.
config DRM_RCAR_LVDS
bool "R-Car DU LVDS Encoder Support"
depends on DRM_RCAR_DU
- depends on ARCH_R8A7790 || ARCH_R8A7791 || COMPILE_TEST
help
- Enable support for the R-Car Display Unit embedded LVDS encoders
- (currently only on R8A7790 and R8A7791).
+ Enable support for the R-Car Display Unit embedded LVDS encoders.
+
+config DRM_RCAR_VSP
+ bool "R-Car DU VSP Compositor Support"
+ depends on DRM_RCAR_DU
+ depends on VIDEO_RENESAS_VSP1
+ help
+ Enable support to expose the R-Car VSP Compositor as KMS planes.
diff --git a/drivers/gpu/drm/rcar-du/Makefile b/drivers/gpu/drm/rcar-du/Makefile
index 05de1c4097af..827711e28226 100644
--- a/drivers/gpu/drm/rcar-du/Makefile
+++ b/drivers/gpu/drm/rcar-du/Makefile
@@ -11,4 +11,6 @@ rcar-du-drm-$(CONFIG_DRM_RCAR_HDMI) += rcar_du_hdmicon.o \
rcar_du_hdmienc.o
rcar-du-drm-$(CONFIG_DRM_RCAR_LVDS) += rcar_du_lvdsenc.o
+rcar-du-drm-$(CONFIG_DRM_RCAR_VSP) += rcar_du_vsp.o
+
obj-$(CONFIG_DRM_RCAR_DU) += rcar-du-drm.o
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
index 88a4b706be16..d9f06cc361fa 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
@@ -1,7 +1,7 @@
/*
* rcar_du_crtc.c -- R-Car Display Unit CRTCs
*
- * Copyright (C) 2013-2014 Renesas Electronics Corporation
+ * Copyright (C) 2013-2015 Renesas Electronics Corporation
*
* Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
*
@@ -28,6 +28,7 @@
#include "rcar_du_kms.h"
#include "rcar_du_plane.h"
#include "rcar_du_regs.h"
+#include "rcar_du_vsp.h"
static u32 rcar_du_crtc_read(struct rcar_du_crtc *rcrtc, u32 reg)
{
@@ -150,7 +151,7 @@ static void rcar_du_crtc_set_display_timing(struct rcar_du_crtc *rcrtc)
/* Signal polarities */
value = ((mode->flags & DRM_MODE_FLAG_PVSYNC) ? 0 : DSMR_VSL)
| ((mode->flags & DRM_MODE_FLAG_PHSYNC) ? 0 : DSMR_HSL)
- | DSMR_DIPM_DE | DSMR_CSPM;
+ | DSMR_DIPM_DISP | DSMR_CSPM;
rcar_du_crtc_write(rcrtc, DSMR, value);
/* Display timings */
@@ -207,6 +208,7 @@ plane_format(struct rcar_du_plane *plane)
static void rcar_du_crtc_update_planes(struct rcar_du_crtc *rcrtc)
{
struct rcar_du_plane *planes[RCAR_DU_NUM_HW_PLANES];
+ struct rcar_du_device *rcdu = rcrtc->group->dev;
unsigned int num_planes = 0;
unsigned int dptsr_planes;
unsigned int hwplanes = 0;
@@ -250,6 +252,17 @@ static void rcar_du_crtc_update_planes(struct rcar_du_crtc *rcrtc)
}
}
+ /* If VSP+DU integration is enabled the plane assignment is fixed. */
+ if (rcar_du_has(rcdu, RCAR_DU_FEATURE_VSP1_SOURCE)) {
+ if (rcdu->info->gen < 3) {
+ dspr = (rcrtc->index % 2) + 1;
+ hwplanes = 1 << (rcrtc->index % 2);
+ } else {
+ dspr = (rcrtc->index % 2) ? 3 : 1;
+ hwplanes = 1 << ((rcrtc->index % 2) ? 2 : 0);
+ }
+ }
+
/* Update the planes to display timing and dot clock generator
* associations.
*
@@ -272,6 +285,10 @@ static void rcar_du_crtc_update_planes(struct rcar_du_crtc *rcrtc)
rcar_du_group_restart(rcrtc->group);
}
+ /* Restart the group if plane sources have changed. */
+ if (rcrtc->group->need_restart)
+ rcar_du_group_restart(rcrtc->group);
+
mutex_unlock(&rcrtc->group->lock);
rcar_du_group_write(rcrtc->group, rcrtc->index % 2 ? DS2PR : DS1PR,
@@ -282,26 +299,6 @@ static void rcar_du_crtc_update_planes(struct rcar_du_crtc *rcrtc)
* Page Flip
*/
-void rcar_du_crtc_cancel_page_flip(struct rcar_du_crtc *rcrtc,
- struct drm_file *file)
-{
- struct drm_pending_vblank_event *event;
- struct drm_device *dev = rcrtc->crtc.dev;
- unsigned long flags;
-
- /* Destroy the pending vertical blanking event associated with the
- * pending page flip, if any, and disable vertical blanking interrupts.
- */
- spin_lock_irqsave(&dev->event_lock, flags);
- event = rcrtc->event;
- if (event && event->base.file_priv == file) {
- rcrtc->event = NULL;
- event->base.destroy(&event->base);
- drm_crtc_vblank_put(&rcrtc->crtc);
- }
- spin_unlock_irqrestore(&dev->event_lock, flags);
-}
-
static void rcar_du_crtc_finish_page_flip(struct rcar_du_crtc *rcrtc)
{
struct drm_pending_vblank_event *event;
@@ -385,6 +382,10 @@ static void rcar_du_crtc_start(struct rcar_du_crtc *rcrtc)
rcar_du_group_start_stop(rcrtc->group, true);
+ /* Enable the VSP compositor. */
+ if (rcar_du_has(rcrtc->group->dev, RCAR_DU_FEATURE_VSP1_SOURCE))
+ rcar_du_vsp_enable(rcrtc);
+
/* Turn vertical blanking interrupt reporting back on. */
drm_crtc_vblank_on(crtc);
@@ -418,6 +419,10 @@ static void rcar_du_crtc_stop(struct rcar_du_crtc *rcrtc)
rcar_du_crtc_wait_page_flip(rcrtc);
drm_crtc_vblank_off(crtc);
+ /* Disable the VSP compositor. */
+ if (rcar_du_has(rcrtc->group->dev, RCAR_DU_FEATURE_VSP1_SOURCE))
+ rcar_du_vsp_disable(rcrtc);
+
/* Select switch sync mode. This stops display operation and configures
* the HSYNC and VSYNC signals as inputs.
*/
@@ -430,6 +435,9 @@ static void rcar_du_crtc_stop(struct rcar_du_crtc *rcrtc)
void rcar_du_crtc_suspend(struct rcar_du_crtc *rcrtc)
{
+ if (rcar_du_has(rcrtc->group->dev, RCAR_DU_FEATURE_VSP1_SOURCE))
+ rcar_du_vsp_disable(rcrtc);
+
rcar_du_crtc_stop(rcrtc);
rcar_du_crtc_put(rcrtc);
}
@@ -438,20 +446,24 @@ void rcar_du_crtc_resume(struct rcar_du_crtc *rcrtc)
{
unsigned int i;
- if (!rcrtc->enabled)
+ if (!rcrtc->crtc.state->active)
return;
rcar_du_crtc_get(rcrtc);
rcar_du_crtc_start(rcrtc);
/* Commit the planes state. */
- for (i = 0; i < rcrtc->group->num_planes; ++i) {
- struct rcar_du_plane *plane = &rcrtc->group->planes[i];
+ if (rcar_du_has(rcrtc->group->dev, RCAR_DU_FEATURE_VSP1_SOURCE)) {
+ rcar_du_vsp_enable(rcrtc);
+ } else {
+ for (i = 0; i < rcrtc->group->num_planes; ++i) {
+ struct rcar_du_plane *plane = &rcrtc->group->planes[i];
- if (plane->plane.state->crtc != &rcrtc->crtc)
- continue;
+ if (plane->plane.state->crtc != &rcrtc->crtc)
+ continue;
- rcar_du_plane_setup(plane);
+ rcar_du_plane_setup(plane);
+ }
}
rcar_du_crtc_update_planes(rcrtc);
@@ -465,37 +477,20 @@ static void rcar_du_crtc_enable(struct drm_crtc *crtc)
{
struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
- if (rcrtc->enabled)
- return;
-
rcar_du_crtc_get(rcrtc);
rcar_du_crtc_start(rcrtc);
-
- rcrtc->enabled = true;
}
static void rcar_du_crtc_disable(struct drm_crtc *crtc)
{
struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
- if (!rcrtc->enabled)
- return;
-
rcar_du_crtc_stop(rcrtc);
rcar_du_crtc_put(rcrtc);
- rcrtc->enabled = false;
rcrtc->outputs = 0;
}
-static bool rcar_du_crtc_mode_fixup(struct drm_crtc *crtc,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- /* TODO Fixup modes */
- return true;
-}
-
static void rcar_du_crtc_atomic_begin(struct drm_crtc *crtc,
struct drm_crtc_state *old_crtc_state)
{
@@ -511,6 +506,9 @@ static void rcar_du_crtc_atomic_begin(struct drm_crtc *crtc,
rcrtc->event = event;
spin_unlock_irqrestore(&dev->event_lock, flags);
}
+
+ if (rcar_du_has(rcrtc->group->dev, RCAR_DU_FEATURE_VSP1_SOURCE))
+ rcar_du_vsp_atomic_begin(rcrtc);
}
static void rcar_du_crtc_atomic_flush(struct drm_crtc *crtc,
@@ -519,10 +517,12 @@ static void rcar_du_crtc_atomic_flush(struct drm_crtc *crtc,
struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
rcar_du_crtc_update_planes(rcrtc);
+
+ if (rcar_du_has(rcrtc->group->dev, RCAR_DU_FEATURE_VSP1_SOURCE))
+ rcar_du_vsp_atomic_flush(rcrtc);
}
static const struct drm_crtc_helper_funcs crtc_helper_funcs = {
- .mode_fixup = rcar_du_crtc_mode_fixup,
.disable = rcar_du_crtc_disable,
.enable = rcar_du_crtc_enable,
.atomic_begin = rcar_du_crtc_atomic_begin,
@@ -567,13 +567,14 @@ static irqreturn_t rcar_du_crtc_irq(int irq, void *arg)
int rcar_du_crtc_create(struct rcar_du_group *rgrp, unsigned int index)
{
static const unsigned int mmio_offsets[] = {
- DU0_REG_OFFSET, DU1_REG_OFFSET, DU2_REG_OFFSET
+ DU0_REG_OFFSET, DU1_REG_OFFSET, DU2_REG_OFFSET, DU3_REG_OFFSET
};
struct rcar_du_device *rcdu = rgrp->dev;
struct platform_device *pdev = to_platform_device(rcdu->dev);
struct rcar_du_crtc *rcrtc = &rcdu->crtcs[index];
struct drm_crtc *crtc = &rcrtc->crtc;
+ struct drm_plane *primary;
unsigned int irqflags;
struct clk *clk;
char clk_name[9];
@@ -609,10 +610,13 @@ int rcar_du_crtc_create(struct rcar_du_group *rgrp, unsigned int index)
rcrtc->group = rgrp;
rcrtc->mmio_offset = mmio_offsets[index];
rcrtc->index = index;
- rcrtc->enabled = false;
- ret = drm_crtc_init_with_planes(rcdu->ddev, crtc,
- &rgrp->planes[index % 2].plane,
+ if (rcar_du_has(rcdu, RCAR_DU_FEATURE_VSP1_SOURCE))
+ primary = &rcrtc->vsp->planes[0].plane;
+ else
+ primary = &rgrp->planes[index % 2].plane;
+
+ ret = drm_crtc_init_with_planes(rcdu->ddev, crtc, primary,
NULL, &crtc_funcs, NULL);
if (ret < 0)
return ret;
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.h b/drivers/gpu/drm/rcar-du/rcar_du_crtc.h
index 4b95d9d08c49..6f08b7e7db06 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.h
@@ -21,6 +21,7 @@
#include <drm/drm_crtc.h>
struct rcar_du_group;
+struct rcar_du_vsp;
/**
* struct rcar_du_crtc - the CRTC, representing a DU superposition processor
@@ -33,7 +34,6 @@ struct rcar_du_group;
* @event: event to post when the pending page flip completes
* @flip_wait: wait queue used to signal page flip completion
* @outputs: bitmask of the outputs (enum rcar_du_output) driven by this CRTC
- * @enabled: whether the CRTC is enabled, used to control system resume
* @group: CRTC group this CRTC belongs to
*/
struct rcar_du_crtc {
@@ -49,9 +49,9 @@ struct rcar_du_crtc {
wait_queue_head_t flip_wait;
unsigned int outputs;
- bool enabled;
struct rcar_du_group *group;
+ struct rcar_du_vsp *vsp;
};
#define to_rcar_crtc(c) container_of(c, struct rcar_du_crtc, crtc)
@@ -67,8 +67,6 @@ enum rcar_du_output {
int rcar_du_crtc_create(struct rcar_du_group *rgrp, unsigned int index);
void rcar_du_crtc_enable_vblank(struct rcar_du_crtc *rcrtc, bool enable);
-void rcar_du_crtc_cancel_page_flip(struct rcar_du_crtc *rcrtc,
- struct drm_file *file);
void rcar_du_crtc_suspend(struct rcar_du_crtc *rcrtc);
void rcar_du_crtc_resume(struct rcar_du_crtc *rcrtc);
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.c b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
index 40422f6b645e..ed6006bf6bd8 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_drv.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
@@ -1,7 +1,7 @@
/*
* rcar_du_drv.c -- R-Car Display Unit DRM driver
*
- * Copyright (C) 2013-2014 Renesas Electronics Corporation
+ * Copyright (C) 2013-2015 Renesas Electronics Corporation
*
* Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
*
@@ -36,6 +36,7 @@
*/
static const struct rcar_du_device_info rcar_du_r8a7779_info = {
+ .gen = 2,
.features = 0,
.num_crtcs = 2,
.routes = {
@@ -57,6 +58,7 @@ static const struct rcar_du_device_info rcar_du_r8a7779_info = {
};
static const struct rcar_du_device_info rcar_du_r8a7790_info = {
+ .gen = 2,
.features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK
| RCAR_DU_FEATURE_EXT_CTRL_REGS,
.quirks = RCAR_DU_QUIRK_ALIGN_128B | RCAR_DU_QUIRK_LVDS_LANES,
@@ -86,6 +88,7 @@ static const struct rcar_du_device_info rcar_du_r8a7790_info = {
/* M2-W (r8a7791) and M2-N (r8a7793) are identical */
static const struct rcar_du_device_info rcar_du_r8a7791_info = {
+ .gen = 2,
.features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK
| RCAR_DU_FEATURE_EXT_CTRL_REGS,
.num_crtcs = 2,
@@ -108,6 +111,7 @@ static const struct rcar_du_device_info rcar_du_r8a7791_info = {
};
static const struct rcar_du_device_info rcar_du_r8a7794_info = {
+ .gen = 2,
.features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK
| RCAR_DU_FEATURE_EXT_CTRL_REGS,
.num_crtcs = 2,
@@ -129,12 +133,37 @@ static const struct rcar_du_device_info rcar_du_r8a7794_info = {
.num_lvds = 0,
};
+static const struct rcar_du_device_info rcar_du_r8a7795_info = {
+ .gen = 3,
+ .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK
+ | RCAR_DU_FEATURE_EXT_CTRL_REGS
+ | RCAR_DU_FEATURE_VSP1_SOURCE,
+ .num_crtcs = 4,
+ .routes = {
+ /* R8A7795 has one RGB output, one LVDS output and two
+ * (currently unsupported) HDMI outputs.
+ */
+ [RCAR_DU_OUTPUT_DPAD0] = {
+ .possible_crtcs = BIT(3),
+ .encoder_type = DRM_MODE_ENCODER_NONE,
+ .port = 0,
+ },
+ [RCAR_DU_OUTPUT_LVDS0] = {
+ .possible_crtcs = BIT(0),
+ .encoder_type = DRM_MODE_ENCODER_LVDS,
+ .port = 3,
+ },
+ },
+ .num_lvds = 1,
+};
+
static const struct of_device_id rcar_du_of_table[] = {
{ .compatible = "renesas,du-r8a7779", .data = &rcar_du_r8a7779_info },
{ .compatible = "renesas,du-r8a7790", .data = &rcar_du_r8a7790_info },
{ .compatible = "renesas,du-r8a7791", .data = &rcar_du_r8a7791_info },
{ .compatible = "renesas,du-r8a7793", .data = &rcar_du_r8a7791_info },
{ .compatible = "renesas,du-r8a7794", .data = &rcar_du_r8a7794_info },
+ { .compatible = "renesas,du-r8a7795", .data = &rcar_du_r8a7795_info },
{ }
};
@@ -144,91 +173,6 @@ MODULE_DEVICE_TABLE(of, rcar_du_of_table);
* DRM operations
*/
-static int rcar_du_unload(struct drm_device *dev)
-{
- struct rcar_du_device *rcdu = dev->dev_private;
-
- if (rcdu->fbdev)
- drm_fbdev_cma_fini(rcdu->fbdev);
-
- drm_kms_helper_poll_fini(dev);
- drm_mode_config_cleanup(dev);
- drm_vblank_cleanup(dev);
-
- dev->irq_enabled = 0;
- dev->dev_private = NULL;
-
- return 0;
-}
-
-static int rcar_du_load(struct drm_device *dev, unsigned long flags)
-{
- struct platform_device *pdev = dev->platformdev;
- struct device_node *np = pdev->dev.of_node;
- struct rcar_du_device *rcdu;
- struct resource *mem;
- int ret;
-
- if (np == NULL) {
- dev_err(dev->dev, "no platform data\n");
- return -ENODEV;
- }
-
- rcdu = devm_kzalloc(&pdev->dev, sizeof(*rcdu), GFP_KERNEL);
- if (rcdu == NULL) {
- dev_err(dev->dev, "failed to allocate private data\n");
- return -ENOMEM;
- }
-
- init_waitqueue_head(&rcdu->commit.wait);
-
- rcdu->dev = &pdev->dev;
- rcdu->info = of_match_device(rcar_du_of_table, rcdu->dev)->data;
- rcdu->ddev = dev;
- dev->dev_private = rcdu;
-
- /* I/O resources */
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- rcdu->mmio = devm_ioremap_resource(&pdev->dev, mem);
- if (IS_ERR(rcdu->mmio))
- return PTR_ERR(rcdu->mmio);
-
- /* Initialize vertical blanking interrupts handling. Start with vblank
- * disabled for all CRTCs.
- */
- ret = drm_vblank_init(dev, (1 << rcdu->info->num_crtcs) - 1);
- if (ret < 0) {
- dev_err(&pdev->dev, "failed to initialize vblank\n");
- goto done;
- }
-
- /* DRM/KMS objects */
- ret = rcar_du_modeset_init(rcdu);
- if (ret < 0) {
- dev_err(&pdev->dev, "failed to initialize DRM/KMS (%d)\n", ret);
- goto done;
- }
-
- dev->irq_enabled = 1;
-
- platform_set_drvdata(pdev, rcdu);
-
-done:
- if (ret)
- rcar_du_unload(dev);
-
- return ret;
-}
-
-static void rcar_du_preclose(struct drm_device *dev, struct drm_file *file)
-{
- struct rcar_du_device *rcdu = dev->dev_private;
- unsigned int i;
-
- for (i = 0; i < rcdu->num_crtcs; ++i)
- rcar_du_crtc_cancel_page_flip(&rcdu->crtcs[i], file);
-}
-
static void rcar_du_lastclose(struct drm_device *dev)
{
struct rcar_du_device *rcdu = dev->dev_private;
@@ -269,11 +213,7 @@ static const struct file_operations rcar_du_fops = {
static struct drm_driver rcar_du_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME
| DRIVER_ATOMIC,
- .load = rcar_du_load,
- .unload = rcar_du_unload,
- .preclose = rcar_du_preclose,
.lastclose = rcar_du_lastclose,
- .set_busid = drm_platform_set_busid,
.get_vblank_counter = drm_vblank_no_hw_counter,
.enable_vblank = rcar_du_enable_vblank,
.disable_vblank = rcar_du_disable_vblank,
@@ -333,18 +273,116 @@ static const struct dev_pm_ops rcar_du_pm_ops = {
* Platform driver
*/
-static int rcar_du_probe(struct platform_device *pdev)
+static int rcar_du_remove(struct platform_device *pdev)
{
- return drm_platform_init(&rcar_du_driver, pdev);
+ struct rcar_du_device *rcdu = platform_get_drvdata(pdev);
+ struct drm_device *ddev = rcdu->ddev;
+
+ mutex_lock(&ddev->mode_config.mutex);
+ drm_connector_unplug_all(ddev);
+ mutex_unlock(&ddev->mode_config.mutex);
+
+ drm_dev_unregister(ddev);
+
+ if (rcdu->fbdev)
+ drm_fbdev_cma_fini(rcdu->fbdev);
+
+ drm_kms_helper_poll_fini(ddev);
+ drm_mode_config_cleanup(ddev);
+ drm_vblank_cleanup(ddev);
+
+ drm_dev_unref(ddev);
+
+ return 0;
}
-static int rcar_du_remove(struct platform_device *pdev)
+static int rcar_du_probe(struct platform_device *pdev)
{
- struct rcar_du_device *rcdu = platform_get_drvdata(pdev);
+ struct device_node *np = pdev->dev.of_node;
+ struct rcar_du_device *rcdu;
+ struct drm_connector *connector;
+ struct drm_device *ddev;
+ struct resource *mem;
+ int ret;
+
+ if (np == NULL) {
+ dev_err(&pdev->dev, "no device tree node\n");
+ return -ENODEV;
+ }
+
+ /* Allocate and initialize the DRM and R-Car device structures. */
+ rcdu = devm_kzalloc(&pdev->dev, sizeof(*rcdu), GFP_KERNEL);
+ if (rcdu == NULL)
+ return -ENOMEM;
+
+ init_waitqueue_head(&rcdu->commit.wait);
+
+ rcdu->dev = &pdev->dev;
+ rcdu->info = of_match_device(rcar_du_of_table, rcdu->dev)->data;
+
+ ddev = drm_dev_alloc(&rcar_du_driver, &pdev->dev);
+ if (!ddev)
+ return -ENOMEM;
+
+ drm_dev_set_unique(ddev, dev_name(&pdev->dev));
+
+ rcdu->ddev = ddev;
+ ddev->dev_private = rcdu;
+
+ platform_set_drvdata(pdev, rcdu);
+
+ /* I/O resources */
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ rcdu->mmio = devm_ioremap_resource(&pdev->dev, mem);
+ if (IS_ERR(rcdu->mmio)) {
+ ret = PTR_ERR(rcdu->mmio);
+ goto error;
+ }
- drm_put_dev(rcdu->ddev);
+ /* Initialize vertical blanking interrupts handling. Start with vblank
+ * disabled for all CRTCs.
+ */
+ ret = drm_vblank_init(ddev, (1 << rcdu->info->num_crtcs) - 1);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to initialize vblank\n");
+ goto error;
+ }
+
+ /* DRM/KMS objects */
+ ret = rcar_du_modeset_init(rcdu);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to initialize DRM/KMS (%d)\n", ret);
+ goto error;
+ }
+
+ ddev->irq_enabled = 1;
+
+ /* Register the DRM device with the core and the connectors with
+ * sysfs.
+ */
+ ret = drm_dev_register(ddev, 0);
+ if (ret)
+ goto error;
+
+ mutex_lock(&ddev->mode_config.mutex);
+ drm_for_each_connector(connector, ddev) {
+ ret = drm_connector_register(connector);
+ if (ret < 0)
+ break;
+ }
+ mutex_unlock(&ddev->mode_config.mutex);
+
+ if (ret < 0)
+ goto error;
+
+ DRM_INFO("Device %s probed\n", dev_name(&pdev->dev));
return 0;
+
+error:
+ rcar_du_remove(pdev);
+
+ return ret;
}
static struct platform_driver rcar_du_platform_driver = {
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.h b/drivers/gpu/drm/rcar-du/rcar_du_drv.h
index 9f34fc86436a..ed35467d96cf 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_drv.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.h
@@ -1,7 +1,7 @@
/*
* rcar_du_drv.h -- R-Car Display Unit DRM driver
*
- * Copyright (C) 2013-2014 Renesas Electronics Corporation
+ * Copyright (C) 2013-2015 Renesas Electronics Corporation
*
* Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
*
@@ -19,6 +19,7 @@
#include "rcar_du_crtc.h"
#include "rcar_du_group.h"
+#include "rcar_du_vsp.h"
struct clk;
struct device;
@@ -29,6 +30,7 @@ struct rcar_du_lvdsenc;
#define RCAR_DU_FEATURE_CRTC_IRQ_CLOCK (1 << 0) /* Per-CRTC IRQ and clock */
#define RCAR_DU_FEATURE_EXT_CTRL_REGS (1 << 1) /* Has extended control registers */
+#define RCAR_DU_FEATURE_VSP1_SOURCE (1 << 2) /* Has inputs from VSP1 */
#define RCAR_DU_QUIRK_ALIGN_128B (1 << 0) /* Align pitches to 128 bytes */
#define RCAR_DU_QUIRK_LVDS_LANES (1 << 1) /* LVDS lanes 1 and 3 inverted */
@@ -51,6 +53,7 @@ struct rcar_du_output_routing {
/*
* struct rcar_du_device_info - DU model-specific information
+ * @gen: device generation (2 or 3)
* @features: device features (RCAR_DU_FEATURE_*)
* @quirks: device quirks (RCAR_DU_QUIRK_*)
* @num_crtcs: total number of CRTCs
@@ -58,6 +61,7 @@ struct rcar_du_output_routing {
* @num_lvds: number of internal LVDS encoders
*/
struct rcar_du_device_info {
+ unsigned int gen;
unsigned int features;
unsigned int quirks;
unsigned int num_crtcs;
@@ -65,9 +69,10 @@ struct rcar_du_device_info {
unsigned int num_lvds;
};
-#define RCAR_DU_MAX_CRTCS 3
+#define RCAR_DU_MAX_CRTCS 4
#define RCAR_DU_MAX_GROUPS DIV_ROUND_UP(RCAR_DU_MAX_CRTCS, 2)
#define RCAR_DU_MAX_LVDS 2
+#define RCAR_DU_MAX_VSPS 4
struct rcar_du_device {
struct device *dev;
@@ -82,6 +87,7 @@ struct rcar_du_device {
unsigned int num_crtcs;
struct rcar_du_group groups[RCAR_DU_MAX_GROUPS];
+ struct rcar_du_vsp vsps[RCAR_DU_MAX_VSPS];
struct {
struct drm_property *alpha;
@@ -90,6 +96,8 @@ struct rcar_du_device {
} props;
unsigned int dpad0_source;
+ unsigned int vspd1_sink;
+
struct rcar_du_lvdsenc *lvds[RCAR_DU_MAX_LVDS];
struct {
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_encoder.c b/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
index c08700757feb..4e939e41f030 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
@@ -89,12 +89,8 @@ static int rcar_du_encoder_atomic_check(struct drm_encoder *encoder,
/* The flat panel mode is fixed, just copy it to the adjusted mode. */
drm_mode_copy(adjusted_mode, panel_mode);
- /* The internal LVDS encoder has a clock frequency operating range of
- * 30MHz to 150MHz. Clamp the clock accordingly.
- */
if (renc->lvds)
- adjusted_mode->clock = clamp(adjusted_mode->clock,
- 30000, 150000);
+ rcar_du_lvdsenc_atomic_check(renc->lvds, adjusted_mode);
return 0;
}
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_group.c b/drivers/gpu/drm/rcar-du/rcar_du_group.c
index 8e2ffe025153..33b2fc53da3e 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_group.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_group.c
@@ -1,7 +1,7 @@
/*
* rcar_du_group.c -- R-Car Display Unit Channels Pair
*
- * Copyright (C) 2013-2014 Renesas Electronics Corporation
+ * Copyright (C) 2013-2015 Renesas Electronics Corporation
*
* Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
*
@@ -44,29 +44,64 @@ void rcar_du_group_write(struct rcar_du_group *rgrp, u32 reg, u32 data)
rcar_du_write(rgrp->dev, rgrp->mmio_offset + reg, data);
}
+static void rcar_du_group_setup_pins(struct rcar_du_group *rgrp)
+{
+ u32 defr6 = DEFR6_CODE | DEFR6_ODPM12_DISP;
+
+ if (rgrp->num_crtcs > 1)
+ defr6 |= DEFR6_ODPM22_DISP;
+
+ rcar_du_group_write(rgrp, DEFR6, defr6);
+}
+
static void rcar_du_group_setup_defr8(struct rcar_du_group *rgrp)
{
- u32 defr8 = DEFR8_CODE | DEFR8_DEFE8;
+ struct rcar_du_device *rcdu = rgrp->dev;
+ unsigned int possible_crtcs =
+ rcdu->info->routes[RCAR_DU_OUTPUT_DPAD0].possible_crtcs;
+ u32 defr8 = DEFR8_CODE;
- /* The DEFR8 register for the first group also controls RGB output
- * routing to DPAD0 for DU instances that support it.
- */
- if (rgrp->dev->info->routes[RCAR_DU_OUTPUT_DPAD0].possible_crtcs > 1 &&
- rgrp->index == 0)
- defr8 |= DEFR8_DRGBS_DU(rgrp->dev->dpad0_source);
+ if (rcdu->info->gen < 3) {
+ defr8 |= DEFR8_DEFE8;
+
+ /* On Gen2 the DEFR8 register for the first group also controls
+ * RGB output routing to DPAD0 and VSPD1 routing to DU0/1/2 for
+ * DU instances that support it.
+ */
+ if (rgrp->index == 0) {
+ if (possible_crtcs > 1)
+ defr8 |= DEFR8_DRGBS_DU(rcdu->dpad0_source);
+ if (rgrp->dev->vspd1_sink == 2)
+ defr8 |= DEFR8_VSCS;
+ }
+ } else {
+ /* On Gen3 VSPD routing can't be configured, but DPAD routing
+ * needs to be set despite having a single option available.
+ */
+ u32 crtc = ffs(possible_crtcs) - 1;
+
+ if (crtc / 2 == rgrp->index)
+ defr8 |= DEFR8_DRGBS_DU(crtc);
+ }
rcar_du_group_write(rgrp, DEFR8, defr8);
}
static void rcar_du_group_setup(struct rcar_du_group *rgrp)
{
+ struct rcar_du_device *rcdu = rgrp->dev;
+
/* Enable extended features */
rcar_du_group_write(rgrp, DEFR, DEFR_CODE | DEFR_DEFE);
- rcar_du_group_write(rgrp, DEFR2, DEFR2_CODE | DEFR2_DEFE2G);
- rcar_du_group_write(rgrp, DEFR3, DEFR3_CODE | DEFR3_DEFE3);
- rcar_du_group_write(rgrp, DEFR4, DEFR4_CODE);
+ if (rcdu->info->gen < 3) {
+ rcar_du_group_write(rgrp, DEFR2, DEFR2_CODE | DEFR2_DEFE2G);
+ rcar_du_group_write(rgrp, DEFR3, DEFR3_CODE | DEFR3_DEFE3);
+ rcar_du_group_write(rgrp, DEFR4, DEFR4_CODE);
+ }
rcar_du_group_write(rgrp, DEFR5, DEFR5_CODE | DEFR5_DEFE5);
+ rcar_du_group_setup_pins(rgrp);
+
if (rcar_du_has(rgrp->dev, RCAR_DU_FEATURE_EXT_CTRL_REGS)) {
rcar_du_group_setup_defr8(rgrp);
@@ -82,6 +117,9 @@ static void rcar_du_group_setup(struct rcar_du_group *rgrp)
DIDSR_PDCS_CLK(0, 0));
}
+ if (rcdu->info->gen >= 3)
+ rcar_du_group_write(rgrp, DEFR10, DEFR10_CODE | DEFR10_DEFE10);
+
/* Use DS1PR and DS2PR to configure planes priorities and connects the
* superposition 0 to DU0 pins. DU1 pins will be configured dynamically.
*/
@@ -158,21 +196,23 @@ void rcar_du_group_start_stop(struct rcar_du_group *rgrp, bool start)
void rcar_du_group_restart(struct rcar_du_group *rgrp)
{
+ rgrp->need_restart = false;
+
__rcar_du_group_start_stop(rgrp, false);
__rcar_du_group_start_stop(rgrp, true);
}
-static int rcar_du_set_dpad0_routing(struct rcar_du_device *rcdu)
+int rcar_du_set_dpad0_vsp1_routing(struct rcar_du_device *rcdu)
{
int ret;
if (!rcar_du_has(rcdu, RCAR_DU_FEATURE_EXT_CTRL_REGS))
return 0;
- /* RGB output routing to DPAD0 is configured in the DEFR8 register of
- * the first group. As this function can be called with the DU0 and DU1
- * CRTCs disabled, we need to enable the first group clock before
- * accessing the register.
+ /* RGB output routing to DPAD0 and VSP1D routing to DU0/1/2 are
+ * configured in the DEFR8 register of the first group. As this function
+ * can be called with the DU0 and DU1 CRTCs disabled, we need to enable
+ * the first group clock before accessing the register.
*/
ret = clk_prepare_enable(rcdu->crtcs[0].clock);
if (ret < 0)
@@ -203,5 +243,5 @@ int rcar_du_group_set_routing(struct rcar_du_group *rgrp)
rcar_du_group_write(rgrp, DORCR, dorcr);
- return rcar_du_set_dpad0_routing(rgrp->dev);
+ return rcar_du_set_dpad0_vsp1_routing(rgrp->dev);
}
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_group.h b/drivers/gpu/drm/rcar-du/rcar_du_group.h
index d7318e1a6b00..5e3adc6b31b5 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_group.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_group.h
@@ -32,6 +32,7 @@ struct rcar_du_device;
* @dptsr_planes: bitmask of planes driven by dot-clock and timing generator 1
* @num_planes: number of planes in the group
* @planes: planes handled by the group
+ * @need_restart: the group needs to be restarted due to a configuration change
*/
struct rcar_du_group {
struct rcar_du_device *dev;
@@ -47,6 +48,7 @@ struct rcar_du_group {
unsigned int num_planes;
struct rcar_du_plane planes[RCAR_DU_NUM_KMS_PLANES];
+ bool need_restart;
};
u32 rcar_du_group_read(struct rcar_du_group *rgrp, u32 reg);
@@ -58,4 +60,6 @@ void rcar_du_group_start_stop(struct rcar_du_group *rgrp, bool start);
void rcar_du_group_restart(struct rcar_du_group *rgrp);
int rcar_du_group_set_routing(struct rcar_du_group *rgrp);
+int rcar_du_set_dpad0_vsp1_routing(struct rcar_du_device *rcdu);
+
#endif /* __RCAR_DU_GROUP_H__ */
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_hdmicon.c b/drivers/gpu/drm/rcar-du/rcar_du_hdmicon.c
index a37b6e2fe51a..6c927144b5c9 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_hdmicon.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_hdmicon.c
@@ -55,12 +55,6 @@ static const struct drm_connector_helper_funcs connector_helper_funcs = {
.best_encoder = rcar_du_connector_best_encoder,
};
-static void rcar_du_hdmi_connector_destroy(struct drm_connector *connector)
-{
- drm_connector_unregister(connector);
- drm_connector_cleanup(connector);
-}
-
static enum drm_connector_status
rcar_du_hdmi_connector_detect(struct drm_connector *connector, bool force)
{
@@ -79,7 +73,7 @@ static const struct drm_connector_funcs connector_funcs = {
.reset = drm_atomic_helper_connector_reset,
.detect = rcar_du_hdmi_connector_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
- .destroy = rcar_du_hdmi_connector_destroy,
+ .destroy = drm_connector_cleanup,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
@@ -108,9 +102,6 @@ int rcar_du_hdmi_connector_init(struct rcar_du_device *rcdu,
return ret;
drm_connector_helper_add(connector, &connector_helper_funcs);
- ret = drm_connector_register(connector);
- if (ret < 0)
- return ret;
connector->dpms = DRM_MODE_DPMS_OFF;
drm_object_property_set_value(&connector->base,
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_hdmienc.c b/drivers/gpu/drm/rcar-du/rcar_du_hdmienc.c
index 2567efcbee36..461662d231e2 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_hdmienc.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_hdmienc.c
@@ -71,12 +71,9 @@ static int rcar_du_hdmienc_atomic_check(struct drm_encoder *encoder,
struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
const struct drm_display_mode *mode = &crtc_state->mode;
- /* The internal LVDS encoder has a clock frequency operating range of
- * 30MHz to 150MHz. Clamp the clock accordingly.
- */
if (hdmienc->renc->lvds)
- adjusted_mode->clock = clamp(adjusted_mode->clock,
- 30000, 150000);
+ rcar_du_lvdsenc_atomic_check(hdmienc->renc->lvds,
+ adjusted_mode);
if (sfuncs->mode_fixup == NULL)
return 0;
@@ -134,12 +131,19 @@ int rcar_du_hdmienc_init(struct rcar_du_device *rcdu,
/* Locate the slave I2C device and driver. */
i2c_slave = of_find_i2c_device_by_node(np);
- if (!i2c_slave || !i2c_get_clientdata(i2c_slave))
+ if (!i2c_slave || !i2c_get_clientdata(i2c_slave)) {
+ dev_dbg(rcdu->dev,
+ "can't get I2C slave for %s, deferring probe\n",
+ of_node_full_name(np));
return -EPROBE_DEFER;
+ }
hdmienc->dev = &i2c_slave->dev;
if (hdmienc->dev->driver == NULL) {
+ dev_dbg(rcdu->dev,
+ "I2C slave %s not probed yet, deferring probe\n",
+ dev_name(hdmienc->dev));
ret = -EPROBE_DEFER;
goto error;
}
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_kms.c b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
index 43bce69d8560..24725bf859b4 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_kms.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
@@ -1,7 +1,7 @@
/*
* rcar_du_kms.c -- R-Car Display Unit Mode Setting
*
- * Copyright (C) 2013-2014 Renesas Electronics Corporation
+ * Copyright (C) 2013-2015 Renesas Electronics Corporation
*
* Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
*
@@ -28,6 +28,7 @@
#include "rcar_du_kms.h"
#include "rcar_du_lvdsenc.h"
#include "rcar_du_regs.h"
+#include "rcar_du_vsp.h"
/* -----------------------------------------------------------------------------
* Format helpers
@@ -89,13 +90,44 @@ static const struct rcar_du_format_info rcar_du_format_infos[] = {
.pnmr = PnMR_SPIM_TP_OFF | PnMR_DDDF_YC,
.edf = PnDDCR4_EDF_NONE,
}, {
- /* In YUV 4:2:2, only NV16 is supported (NV61 isn't) */
.fourcc = DRM_FORMAT_NV16,
.bpp = 16,
.planes = 2,
.pnmr = PnMR_SPIM_TP_OFF | PnMR_DDDF_YC,
.edf = PnDDCR4_EDF_NONE,
},
+ /* The following formats are not supported on Gen2 and thus have no
+ * associated .pnmr or .edf settings.
+ */
+ {
+ .fourcc = DRM_FORMAT_NV61,
+ .bpp = 16,
+ .planes = 2,
+ }, {
+ .fourcc = DRM_FORMAT_YUV420,
+ .bpp = 12,
+ .planes = 3,
+ }, {
+ .fourcc = DRM_FORMAT_YVU420,
+ .bpp = 12,
+ .planes = 3,
+ }, {
+ .fourcc = DRM_FORMAT_YUV422,
+ .bpp = 16,
+ .planes = 3,
+ }, {
+ .fourcc = DRM_FORMAT_YVU422,
+ .bpp = 16,
+ .planes = 3,
+ }, {
+ .fourcc = DRM_FORMAT_YUV444,
+ .bpp = 24,
+ .planes = 3,
+ }, {
+ .fourcc = DRM_FORMAT_YVU444,
+ .bpp = 24,
+ .planes = 3,
+ },
};
const struct rcar_du_format_info *rcar_du_format_info(u32 fourcc)
@@ -143,6 +175,7 @@ rcar_du_fb_create(struct drm_device *dev, struct drm_file *file_priv,
unsigned int max_pitch;
unsigned int align;
unsigned int bpp;
+ unsigned int i;
format = rcar_du_format_info(mode_cmd->pixel_format);
if (format == NULL) {
@@ -155,7 +188,7 @@ rcar_du_fb_create(struct drm_device *dev, struct drm_file *file_priv,
* The pitch and alignment constraints are expressed in pixels on the
* hardware side and in bytes in the DRM API.
*/
- bpp = format->planes == 2 ? 1 : format->bpp / 8;
+ bpp = format->planes == 1 ? format->bpp / 8 : 1;
max_pitch = 4096 * bpp;
if (rcar_du_needs(rcdu, RCAR_DU_QUIRK_ALIGN_128B))
@@ -170,8 +203,8 @@ rcar_du_fb_create(struct drm_device *dev, struct drm_file *file_priv,
return ERR_PTR(-EINVAL);
}
- if (format->planes == 2) {
- if (mode_cmd->pitches[1] != mode_cmd->pitches[0]) {
+ for (i = 1; i < format->planes; ++i) {
+ if (mode_cmd->pitches[i] != mode_cmd->pitches[0]) {
dev_dbg(dev->dev,
"luma and chroma pitches do not match\n");
return ERR_PTR(-EINVAL);
@@ -192,252 +225,20 @@ static void rcar_du_output_poll_changed(struct drm_device *dev)
* Atomic Check and Update
*/
-/*
- * Atomic hardware plane allocator
- *
- * The hardware plane allocator is solely based on the atomic plane states
- * without keeping any external state to avoid races between .atomic_check()
- * and .atomic_commit().
- *
- * The core idea is to avoid using a free planes bitmask that would need to be
- * shared between check and commit handlers with a collective knowledge based on
- * the allocated hardware plane(s) for each KMS plane. The allocator then loops
- * over all plane states to compute the free planes bitmask, allocates hardware
- * planes based on that bitmask, and stores the result back in the plane states.
- *
- * For this to work we need to access the current state of planes not touched by
- * the atomic update. To ensure that it won't be modified, we need to lock all
- * planes using drm_atomic_get_plane_state(). This effectively serializes atomic
- * updates from .atomic_check() up to completion (when swapping the states if
- * the check step has succeeded) or rollback (when freeing the states if the
- * check step has failed).
- *
- * Allocation is performed in the .atomic_check() handler and applied
- * automatically when the core swaps the old and new states.
- */
-
-static bool rcar_du_plane_needs_realloc(struct rcar_du_plane *plane,
- struct rcar_du_plane_state *state)
-{
- const struct rcar_du_format_info *cur_format;
-
- cur_format = to_rcar_plane_state(plane->plane.state)->format;
-
- /* Lowering the number of planes doesn't strictly require reallocation
- * as the extra hardware plane will be freed when committing, but doing
- * so could lead to more fragmentation.
- */
- return !cur_format || cur_format->planes != state->format->planes;
-}
-
-static unsigned int rcar_du_plane_hwmask(struct rcar_du_plane_state *state)
-{
- unsigned int mask;
-
- if (state->hwindex == -1)
- return 0;
-
- mask = 1 << state->hwindex;
- if (state->format->planes == 2)
- mask |= 1 << ((state->hwindex + 1) % 8);
-
- return mask;
-}
-
-static int rcar_du_plane_hwalloc(unsigned int num_planes, unsigned int free)
-{
- unsigned int i;
-
- for (i = 0; i < RCAR_DU_NUM_HW_PLANES; ++i) {
- if (!(free & (1 << i)))
- continue;
-
- if (num_planes == 1 || free & (1 << ((i + 1) % 8)))
- break;
- }
-
- return i == RCAR_DU_NUM_HW_PLANES ? -EBUSY : i;
-}
-
static int rcar_du_atomic_check(struct drm_device *dev,
struct drm_atomic_state *state)
{
struct rcar_du_device *rcdu = dev->dev_private;
- unsigned int group_freed_planes[RCAR_DU_MAX_GROUPS] = { 0, };
- unsigned int group_free_planes[RCAR_DU_MAX_GROUPS] = { 0, };
- bool needs_realloc = false;
- unsigned int groups = 0;
- unsigned int i;
int ret;
ret = drm_atomic_helper_check(dev, state);
if (ret < 0)
return ret;
- /* Check if hardware planes need to be reallocated. */
- for (i = 0; i < dev->mode_config.num_total_plane; ++i) {
- struct rcar_du_plane_state *plane_state;
- struct rcar_du_plane *plane;
- unsigned int index;
-
- if (!state->planes[i])
- continue;
-
- plane = to_rcar_plane(state->planes[i]);
- plane_state = to_rcar_plane_state(state->plane_states[i]);
-
- dev_dbg(rcdu->dev, "%s: checking plane (%u,%u)\n", __func__,
- plane->group->index, plane - plane->group->planes);
-
- /* If the plane is being disabled we don't need to go through
- * the full reallocation procedure. Just mark the hardware
- * plane(s) as freed.
- */
- if (!plane_state->format) {
- dev_dbg(rcdu->dev, "%s: plane is being disabled\n",
- __func__);
- index = plane - plane->group->planes;
- group_freed_planes[plane->group->index] |= 1 << index;
- plane_state->hwindex = -1;
- continue;
- }
-
- /* If the plane needs to be reallocated mark it as such, and
- * mark the hardware plane(s) as free.
- */
- if (rcar_du_plane_needs_realloc(plane, plane_state)) {
- dev_dbg(rcdu->dev, "%s: plane needs reallocation\n",
- __func__);
- groups |= 1 << plane->group->index;
- needs_realloc = true;
-
- index = plane - plane->group->planes;
- group_freed_planes[plane->group->index] |= 1 << index;
- plane_state->hwindex = -1;
- }
- }
-
- if (!needs_realloc)
+ if (rcar_du_has(rcdu, RCAR_DU_FEATURE_VSP1_SOURCE))
return 0;
- /* Grab all plane states for the groups that need reallocation to ensure
- * locking and avoid racy updates. This serializes the update operation,
- * but there's not much we can do about it as that's the hardware
- * design.
- *
- * Compute the used planes mask for each group at the same time to avoid
- * looping over the planes separately later.
- */
- while (groups) {
- unsigned int index = ffs(groups) - 1;
- struct rcar_du_group *group = &rcdu->groups[index];
- unsigned int used_planes = 0;
-
- dev_dbg(rcdu->dev, "%s: finding free planes for group %u\n",
- __func__, index);
-
- for (i = 0; i < group->num_planes; ++i) {
- struct rcar_du_plane *plane = &group->planes[i];
- struct rcar_du_plane_state *plane_state;
- struct drm_plane_state *s;
-
- s = drm_atomic_get_plane_state(state, &plane->plane);
- if (IS_ERR(s))
- return PTR_ERR(s);
-
- /* If the plane has been freed in the above loop its
- * hardware planes must not be added to the used planes
- * bitmask. However, the current state doesn't reflect
- * the free state yet, as we've modified the new state
- * above. Use the local freed planes list to check for
- * that condition instead.
- */
- if (group_freed_planes[index] & (1 << i)) {
- dev_dbg(rcdu->dev,
- "%s: plane (%u,%u) has been freed, skipping\n",
- __func__, plane->group->index,
- plane - plane->group->planes);
- continue;
- }
-
- plane_state = to_rcar_plane_state(plane->plane.state);
- used_planes |= rcar_du_plane_hwmask(plane_state);
-
- dev_dbg(rcdu->dev,
- "%s: plane (%u,%u) uses %u hwplanes (index %d)\n",
- __func__, plane->group->index,
- plane - plane->group->planes,
- plane_state->format ?
- plane_state->format->planes : 0,
- plane_state->hwindex);
- }
-
- group_free_planes[index] = 0xff & ~used_planes;
- groups &= ~(1 << index);
-
- dev_dbg(rcdu->dev, "%s: group %u free planes mask 0x%02x\n",
- __func__, index, group_free_planes[index]);
- }
-
- /* Reallocate hardware planes for each plane that needs it. */
- for (i = 0; i < dev->mode_config.num_total_plane; ++i) {
- struct rcar_du_plane_state *plane_state;
- struct rcar_du_plane *plane;
- unsigned int crtc_planes;
- unsigned int free;
- int idx;
-
- if (!state->planes[i])
- continue;
-
- plane = to_rcar_plane(state->planes[i]);
- plane_state = to_rcar_plane_state(state->plane_states[i]);
-
- dev_dbg(rcdu->dev, "%s: allocating plane (%u,%u)\n", __func__,
- plane->group->index, plane - plane->group->planes);
-
- /* Skip planes that are being disabled or don't need to be
- * reallocated.
- */
- if (!plane_state->format ||
- !rcar_du_plane_needs_realloc(plane, plane_state))
- continue;
-
- /* Try to allocate the plane from the free planes currently
- * associated with the target CRTC to avoid restarting the CRTC
- * group and thus minimize flicker. If it fails fall back to
- * allocating from all free planes.
- */
- crtc_planes = to_rcar_crtc(plane_state->state.crtc)->index % 2
- ? plane->group->dptsr_planes
- : ~plane->group->dptsr_planes;
- free = group_free_planes[plane->group->index];
-
- idx = rcar_du_plane_hwalloc(plane_state->format->planes,
- free & crtc_planes);
- if (idx < 0)
- idx = rcar_du_plane_hwalloc(plane_state->format->planes,
- free);
- if (idx < 0) {
- dev_dbg(rcdu->dev, "%s: no available hardware plane\n",
- __func__);
- return idx;
- }
-
- dev_dbg(rcdu->dev, "%s: allocated %u hwplanes (index %u)\n",
- __func__, plane_state->format->planes, idx);
-
- plane_state->hwindex = idx;
-
- group_free_planes[plane->group->index] &=
- ~rcar_du_plane_hwmask(plane_state);
-
- dev_dbg(rcdu->dev, "%s: group %u free planes mask 0x%02x\n",
- __func__, plane->group->index,
- group_free_planes[plane->group->index]);
- }
-
- return 0;
+ return rcar_du_atomic_check_planes(dev, state);
}
struct rcar_du_commit {
@@ -456,7 +257,7 @@ static void rcar_du_atomic_complete(struct rcar_du_commit *commit)
/* Apply the atomic update. */
drm_atomic_helper_commit_modeset_disables(dev, old_state);
drm_atomic_helper_commit_modeset_enables(dev, old_state);
- drm_atomic_helper_commit_planes(dev, old_state, false);
+ drm_atomic_helper_commit_planes(dev, old_state, true);
drm_atomic_helper_wait_for_vblanks(dev, old_state);
@@ -775,14 +576,34 @@ int rcar_du_modeset_init(struct rcar_du_device *rcdu)
rgrp->num_crtcs = min(rcdu->num_crtcs - 2 * i, 2U);
/* If we have more than one CRTCs in this group pre-associate
- * planes 0-3 with CRTC 0 and planes 4-7 with CRTC 1 to minimize
- * flicker occurring when the association is changed.
+ * the low-order planes with CRTC 0 and the high-order planes
+ * with CRTC 1 to minimize flicker occurring when the
+ * association is changed.
*/
- rgrp->dptsr_planes = rgrp->num_crtcs > 1 ? 0xf0 : 0;
+ rgrp->dptsr_planes = rgrp->num_crtcs > 1
+ ? (rcdu->info->gen >= 3 ? 0x04 : 0xf0)
+ : 0;
- ret = rcar_du_planes_init(rgrp);
- if (ret < 0)
- return ret;
+ if (!rcar_du_has(rcdu, RCAR_DU_FEATURE_VSP1_SOURCE)) {
+ ret = rcar_du_planes_init(rgrp);
+ if (ret < 0)
+ return ret;
+ }
+ }
+
+ /* Initialize the compositors. */
+ if (rcar_du_has(rcdu, RCAR_DU_FEATURE_VSP1_SOURCE)) {
+ for (i = 0; i < rcdu->num_crtcs; ++i) {
+ struct rcar_du_vsp *vsp = &rcdu->vsps[i];
+
+ vsp->index = i;
+ vsp->dev = rcdu;
+ rcdu->crtcs[i].vsp = vsp;
+
+ ret = rcar_du_vsp_init(vsp);
+ if (ret < 0)
+ return ret;
+ }
}
/* Create the CRTCs. */
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c b/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c
index 0c43032fc693..e905f5da7aaa 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c
@@ -62,12 +62,6 @@ static const struct drm_connector_helper_funcs connector_helper_funcs = {
.best_encoder = rcar_du_connector_best_encoder,
};
-static void rcar_du_lvds_connector_destroy(struct drm_connector *connector)
-{
- drm_connector_unregister(connector);
- drm_connector_cleanup(connector);
-}
-
static enum drm_connector_status
rcar_du_lvds_connector_detect(struct drm_connector *connector, bool force)
{
@@ -79,7 +73,7 @@ static const struct drm_connector_funcs connector_funcs = {
.reset = drm_atomic_helper_connector_reset,
.detect = rcar_du_lvds_connector_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
- .destroy = rcar_du_lvds_connector_destroy,
+ .destroy = drm_connector_cleanup,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
@@ -117,9 +111,6 @@ int rcar_du_lvds_connector_init(struct rcar_du_device *rcdu,
return ret;
drm_connector_helper_add(connector, &connector_helper_funcs);
- ret = drm_connector_register(connector);
- if (ret < 0)
- return ret;
connector->dpms = DRM_MODE_DPMS_OFF;
drm_object_property_set_value(&connector->base,
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.c b/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.c
index 85043c5bad03..ef3a50321ecc 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.c
@@ -38,35 +38,106 @@ static void rcar_lvds_write(struct rcar_du_lvdsenc *lvds, u32 reg, u32 data)
iowrite32(data, lvds->mmio + reg);
}
-static int rcar_du_lvdsenc_start(struct rcar_du_lvdsenc *lvds,
- struct rcar_du_crtc *rcrtc)
+static void rcar_du_lvdsenc_start_gen2(struct rcar_du_lvdsenc *lvds,
+ struct rcar_du_crtc *rcrtc)
{
const struct drm_display_mode *mode = &rcrtc->crtc.mode;
unsigned int freq = mode->clock;
u32 lvdcr0;
- u32 lvdhcr;
u32 pllcr;
- int ret;
-
- if (lvds->enabled)
- return 0;
-
- ret = clk_prepare_enable(lvds->clock);
- if (ret < 0)
- return ret;
/* PLL clock configuration */
- if (freq <= 38000)
+ if (freq < 39000)
pllcr = LVDPLLCR_CEEN | LVDPLLCR_COSEL | LVDPLLCR_PLLDLYCNT_38M;
- else if (freq <= 60000)
+ else if (freq < 61000)
pllcr = LVDPLLCR_CEEN | LVDPLLCR_COSEL | LVDPLLCR_PLLDLYCNT_60M;
- else if (freq <= 121000)
+ else if (freq < 121000)
pllcr = LVDPLLCR_CEEN | LVDPLLCR_COSEL | LVDPLLCR_PLLDLYCNT_121M;
else
pllcr = LVDPLLCR_PLLDLYCNT_150M;
rcar_lvds_write(lvds, LVDPLLCR, pllcr);
+ /* Select the input, hardcode mode 0, enable LVDS operation and turn
+ * bias circuitry on.
+ */
+ lvdcr0 = LVDCR0_BEN | LVDCR0_LVEN;
+ if (rcrtc->index == 2)
+ lvdcr0 |= LVDCR0_DUSEL;
+ rcar_lvds_write(lvds, LVDCR0, lvdcr0);
+
+ /* Turn all the channels on. */
+ rcar_lvds_write(lvds, LVDCR1,
+ LVDCR1_CHSTBY_GEN2(3) | LVDCR1_CHSTBY_GEN2(2) |
+ LVDCR1_CHSTBY_GEN2(1) | LVDCR1_CHSTBY_GEN2(0) |
+ LVDCR1_CLKSTBY_GEN2);
+
+ /* Turn the PLL on, wait for the startup delay, and turn the output
+ * on.
+ */
+ lvdcr0 |= LVDCR0_PLLON;
+ rcar_lvds_write(lvds, LVDCR0, lvdcr0);
+
+ usleep_range(100, 150);
+
+ lvdcr0 |= LVDCR0_LVRES;
+ rcar_lvds_write(lvds, LVDCR0, lvdcr0);
+}
+
+static void rcar_du_lvdsenc_start_gen3(struct rcar_du_lvdsenc *lvds,
+ struct rcar_du_crtc *rcrtc)
+{
+ const struct drm_display_mode *mode = &rcrtc->crtc.mode;
+ unsigned int freq = mode->clock;
+ u32 lvdcr0;
+ u32 pllcr;
+
+ /* PLL clock configuration */
+ if (freq < 42000)
+ pllcr = LVDPLLCR_PLLDIVCNT_42M;
+ else if (freq < 85000)
+ pllcr = LVDPLLCR_PLLDIVCNT_85M;
+ else if (freq < 128000)
+ pllcr = LVDPLLCR_PLLDIVCNT_128M;
+ else
+ pllcr = LVDPLLCR_PLLDIVCNT_148M;
+
+ rcar_lvds_write(lvds, LVDPLLCR, pllcr);
+
+ /* Turn the PLL on, set it to LVDS normal mode, wait for the startup
+ * delay and turn the output on.
+ */
+ lvdcr0 = LVDCR0_PLLON;
+ rcar_lvds_write(lvds, LVDCR0, lvdcr0);
+
+ lvdcr0 |= LVDCR0_PWD;
+ rcar_lvds_write(lvds, LVDCR0, lvdcr0);
+
+ usleep_range(100, 150);
+
+ lvdcr0 |= LVDCR0_LVRES;
+ rcar_lvds_write(lvds, LVDCR0, lvdcr0);
+
+ /* Turn all the channels on. */
+ rcar_lvds_write(lvds, LVDCR1,
+ LVDCR1_CHSTBY_GEN3(3) | LVDCR1_CHSTBY_GEN3(2) |
+ LVDCR1_CHSTBY_GEN3(1) | LVDCR1_CHSTBY_GEN3(0) |
+ LVDCR1_CLKSTBY_GEN3);
+}
+
+static int rcar_du_lvdsenc_start(struct rcar_du_lvdsenc *lvds,
+ struct rcar_du_crtc *rcrtc)
+{
+ u32 lvdhcr;
+ int ret;
+
+ if (lvds->enabled)
+ return 0;
+
+ ret = clk_prepare_enable(lvds->clock);
+ if (ret < 0)
+ return ret;
+
/* Hardcode the channels and control signals routing for now.
*
* HSYNC -> CTRL0
@@ -87,30 +158,14 @@ static int rcar_du_lvdsenc_start(struct rcar_du_lvdsenc *lvds,
rcar_lvds_write(lvds, LVDCHCR, lvdhcr);
- /* Select the input, hardcode mode 0, enable LVDS operation and turn
- * bias circuitry on.
- */
- lvdcr0 = LVDCR0_BEN | LVDCR0_LVEN;
- if (rcrtc->index == 2)
- lvdcr0 |= LVDCR0_DUSEL;
- rcar_lvds_write(lvds, LVDCR0, lvdcr0);
-
- /* Turn all the channels on. */
- rcar_lvds_write(lvds, LVDCR1, LVDCR1_CHSTBY(3) | LVDCR1_CHSTBY(2) |
- LVDCR1_CHSTBY(1) | LVDCR1_CHSTBY(0) | LVDCR1_CLKSTBY);
-
- /* Turn the PLL on, wait for the startup delay, and turn the output
- * on.
- */
- lvdcr0 |= LVDCR0_PLLEN;
- rcar_lvds_write(lvds, LVDCR0, lvdcr0);
-
- usleep_range(100, 150);
-
- lvdcr0 |= LVDCR0_LVRES;
- rcar_lvds_write(lvds, LVDCR0, lvdcr0);
+ /* Perform generation-specific initialization. */
+ if (lvds->dev->info->gen < 3)
+ rcar_du_lvdsenc_start_gen2(lvds, rcrtc);
+ else
+ rcar_du_lvdsenc_start_gen3(lvds, rcrtc);
lvds->enabled = true;
+
return 0;
}
@@ -140,6 +195,21 @@ int rcar_du_lvdsenc_enable(struct rcar_du_lvdsenc *lvds, struct drm_crtc *crtc,
return -EINVAL;
}
+void rcar_du_lvdsenc_atomic_check(struct rcar_du_lvdsenc *lvds,
+ struct drm_display_mode *mode)
+{
+ struct rcar_du_device *rcdu = lvds->dev;
+
+ /* The internal LVDS encoder has a restricted clock frequency operating
+ * range (30MHz to 150MHz on Gen2, 25.175MHz to 148.5MHz on Gen3). Clamp
+ * the clock accordingly.
+ */
+ if (rcdu->info->gen < 3)
+ mode->clock = clamp(mode->clock, 30000, 150000);
+ else
+ mode->clock = clamp(mode->clock, 25175, 148500);
+}
+
static int rcar_du_lvdsenc_get_resources(struct rcar_du_lvdsenc *lvds,
struct platform_device *pdev)
{
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.h b/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.h
index 9a6001c07303..dfdba746edf4 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.h
@@ -30,6 +30,8 @@ enum rcar_lvds_input {
int rcar_du_lvdsenc_init(struct rcar_du_device *rcdu);
int rcar_du_lvdsenc_enable(struct rcar_du_lvdsenc *lvds,
struct drm_crtc *crtc, bool enable);
+void rcar_du_lvdsenc_atomic_check(struct rcar_du_lvdsenc *lvds,
+ struct drm_display_mode *mode);
#else
static inline int rcar_du_lvdsenc_init(struct rcar_du_device *rcdu)
{
@@ -40,6 +42,10 @@ static inline int rcar_du_lvdsenc_enable(struct rcar_du_lvdsenc *lvds,
{
return 0;
}
+static inline void rcar_du_lvdsenc_atomic_check(struct rcar_du_lvdsenc *lvds,
+ struct drm_display_mode *mode)
+{
+}
#endif
#endif /* __RCAR_DU_LVDSENC_H__ */
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_plane.c b/drivers/gpu/drm/rcar-du/rcar_du_plane.c
index c3ed9522c0e1..8460ae1ffa4b 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_plane.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_plane.c
@@ -1,7 +1,7 @@
/*
* rcar_du_plane.c -- R-Car Display Unit Planes
*
- * Copyright (C) 2013-2014 Renesas Electronics Corporation
+ * Copyright (C) 2013-2015 Renesas Electronics Corporation
*
* Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
*
@@ -12,6 +12,7 @@
*/
#include <drm/drmP.h>
+#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
@@ -20,21 +21,300 @@
#include <drm/drm_plane_helper.h>
#include "rcar_du_drv.h"
+#include "rcar_du_group.h"
#include "rcar_du_kms.h"
#include "rcar_du_plane.h"
#include "rcar_du_regs.h"
-#define RCAR_DU_COLORKEY_NONE (0 << 24)
-#define RCAR_DU_COLORKEY_SOURCE (1 << 24)
-#define RCAR_DU_COLORKEY_MASK (1 << 24)
+/* -----------------------------------------------------------------------------
+ * Atomic hardware plane allocator
+ *
+ * The hardware plane allocator is solely based on the atomic plane states
+ * without keeping any external state to avoid races between .atomic_check()
+ * and .atomic_commit().
+ *
+ * The core idea is to avoid using a free planes bitmask that would need to be
+ * shared between check and commit handlers with a collective knowledge based on
+ * the allocated hardware plane(s) for each KMS plane. The allocator then loops
+ * over all plane states to compute the free planes bitmask, allocates hardware
+ * planes based on that bitmask, and stores the result back in the plane states.
+ *
+ * For this to work we need to access the current state of planes not touched by
+ * the atomic update. To ensure that it won't be modified, we need to lock all
+ * planes using drm_atomic_get_plane_state(). This effectively serializes atomic
+ * updates from .atomic_check() up to completion (when swapping the states if
+ * the check step has succeeded) or rollback (when freeing the states if the
+ * check step has failed).
+ *
+ * Allocation is performed in the .atomic_check() handler and applied
+ * automatically when the core swaps the old and new states.
+ */
+
+static bool rcar_du_plane_needs_realloc(struct rcar_du_plane *plane,
+ struct rcar_du_plane_state *new_state)
+{
+ struct rcar_du_plane_state *cur_state;
+
+ cur_state = to_rcar_plane_state(plane->plane.state);
+
+ /* Lowering the number of planes doesn't strictly require reallocation
+ * as the extra hardware plane will be freed when committing, but doing
+ * so could lead to more fragmentation.
+ */
+ if (!cur_state->format ||
+ cur_state->format->planes != new_state->format->planes)
+ return true;
+
+ /* Reallocate hardware planes if the source has changed. */
+ if (cur_state->source != new_state->source)
+ return true;
-static u32 rcar_du_plane_read(struct rcar_du_group *rgrp,
- unsigned int index, u32 reg)
+ return false;
+}
+
+static unsigned int rcar_du_plane_hwmask(struct rcar_du_plane_state *state)
+{
+ unsigned int mask;
+
+ if (state->hwindex == -1)
+ return 0;
+
+ mask = 1 << state->hwindex;
+ if (state->format->planes == 2)
+ mask |= 1 << ((state->hwindex + 1) % 8);
+
+ return mask;
+}
+
+/*
+ * The R8A7790 DU can source frames directly from the VSP1 devices VSPD0 and
+ * VSPD1. VSPD0 feeds DU0/1 plane 0, and VSPD1 feeds either DU2 plane 0 or
+ * DU0/1 plane 1.
+ *
+ * Allocate the correct fixed plane when sourcing frames from VSPD0 or VSPD1,
+ * and allocate planes in reverse index order otherwise to ensure maximum
+ * availability of planes 0 and 1.
+ *
+ * The caller is responsible for ensuring that the requested source is
+ * compatible with the DU revision.
+ */
+static int rcar_du_plane_hwalloc(struct rcar_du_plane *plane,
+ struct rcar_du_plane_state *state,
+ unsigned int free)
{
- return rcar_du_read(rgrp->dev,
- rgrp->mmio_offset + index * PLANE_OFF + reg);
+ unsigned int num_planes = state->format->planes;
+ int fixed = -1;
+ int i;
+
+ if (state->source == RCAR_DU_PLANE_VSPD0) {
+ /* VSPD0 feeds plane 0 on DU0/1. */
+ if (plane->group->index != 0)
+ return -EINVAL;
+
+ fixed = 0;
+ } else if (state->source == RCAR_DU_PLANE_VSPD1) {
+ /* VSPD1 feeds plane 1 on DU0/1 or plane 0 on DU2. */
+ fixed = plane->group->index == 0 ? 1 : 0;
+ }
+
+ if (fixed >= 0)
+ return free & (1 << fixed) ? fixed : -EBUSY;
+
+ for (i = RCAR_DU_NUM_HW_PLANES - 1; i >= 0; --i) {
+ if (!(free & (1 << i)))
+ continue;
+
+ if (num_planes == 1 || free & (1 << ((i + 1) % 8)))
+ break;
+ }
+
+ return i < 0 ? -EBUSY : i;
}
+int rcar_du_atomic_check_planes(struct drm_device *dev,
+ struct drm_atomic_state *state)
+{
+ struct rcar_du_device *rcdu = dev->dev_private;
+ unsigned int group_freed_planes[RCAR_DU_MAX_GROUPS] = { 0, };
+ unsigned int group_free_planes[RCAR_DU_MAX_GROUPS] = { 0, };
+ bool needs_realloc = false;
+ unsigned int groups = 0;
+ unsigned int i;
+
+ /* Check if hardware planes need to be reallocated. */
+ for (i = 0; i < dev->mode_config.num_total_plane; ++i) {
+ struct rcar_du_plane_state *plane_state;
+ struct rcar_du_plane *plane;
+ unsigned int index;
+
+ if (!state->planes[i])
+ continue;
+
+ plane = to_rcar_plane(state->planes[i]);
+ plane_state = to_rcar_plane_state(state->plane_states[i]);
+
+ dev_dbg(rcdu->dev, "%s: checking plane (%u,%tu)\n", __func__,
+ plane->group->index, plane - plane->group->planes);
+
+ /* If the plane is being disabled we don't need to go through
+ * the full reallocation procedure. Just mark the hardware
+ * plane(s) as freed.
+ */
+ if (!plane_state->format) {
+ dev_dbg(rcdu->dev, "%s: plane is being disabled\n",
+ __func__);
+ index = plane - plane->group->planes;
+ group_freed_planes[plane->group->index] |= 1 << index;
+ plane_state->hwindex = -1;
+ continue;
+ }
+
+ /* If the plane needs to be reallocated mark it as such, and
+ * mark the hardware plane(s) as free.
+ */
+ if (rcar_du_plane_needs_realloc(plane, plane_state)) {
+ dev_dbg(rcdu->dev, "%s: plane needs reallocation\n",
+ __func__);
+ groups |= 1 << plane->group->index;
+ needs_realloc = true;
+
+ index = plane - plane->group->planes;
+ group_freed_planes[plane->group->index] |= 1 << index;
+ plane_state->hwindex = -1;
+ }
+ }
+
+ if (!needs_realloc)
+ return 0;
+
+ /* Grab all plane states for the groups that need reallocation to ensure
+ * locking and avoid racy updates. This serializes the update operation,
+ * but there's not much we can do about it as that's the hardware
+ * design.
+ *
+ * Compute the used planes mask for each group at the same time to avoid
+ * looping over the planes separately later.
+ */
+ while (groups) {
+ unsigned int index = ffs(groups) - 1;
+ struct rcar_du_group *group = &rcdu->groups[index];
+ unsigned int used_planes = 0;
+
+ dev_dbg(rcdu->dev, "%s: finding free planes for group %u\n",
+ __func__, index);
+
+ for (i = 0; i < group->num_planes; ++i) {
+ struct rcar_du_plane *plane = &group->planes[i];
+ struct rcar_du_plane_state *plane_state;
+ struct drm_plane_state *s;
+
+ s = drm_atomic_get_plane_state(state, &plane->plane);
+ if (IS_ERR(s))
+ return PTR_ERR(s);
+
+ /* If the plane has been freed in the above loop its
+ * hardware planes must not be added to the used planes
+ * bitmask. However, the current state doesn't reflect
+ * the free state yet, as we've modified the new state
+ * above. Use the local freed planes list to check for
+ * that condition instead.
+ */
+ if (group_freed_planes[index] & (1 << i)) {
+ dev_dbg(rcdu->dev,
+ "%s: plane (%u,%tu) has been freed, skipping\n",
+ __func__, plane->group->index,
+ plane - plane->group->planes);
+ continue;
+ }
+
+ plane_state = to_rcar_plane_state(plane->plane.state);
+ used_planes |= rcar_du_plane_hwmask(plane_state);
+
+ dev_dbg(rcdu->dev,
+ "%s: plane (%u,%tu) uses %u hwplanes (index %d)\n",
+ __func__, plane->group->index,
+ plane - plane->group->planes,
+ plane_state->format ?
+ plane_state->format->planes : 0,
+ plane_state->hwindex);
+ }
+
+ group_free_planes[index] = 0xff & ~used_planes;
+ groups &= ~(1 << index);
+
+ dev_dbg(rcdu->dev, "%s: group %u free planes mask 0x%02x\n",
+ __func__, index, group_free_planes[index]);
+ }
+
+ /* Reallocate hardware planes for each plane that needs it. */
+ for (i = 0; i < dev->mode_config.num_total_plane; ++i) {
+ struct rcar_du_plane_state *plane_state;
+ struct rcar_du_plane *plane;
+ unsigned int crtc_planes;
+ unsigned int free;
+ int idx;
+
+ if (!state->planes[i])
+ continue;
+
+ plane = to_rcar_plane(state->planes[i]);
+ plane_state = to_rcar_plane_state(state->plane_states[i]);
+
+ dev_dbg(rcdu->dev, "%s: allocating plane (%u,%tu)\n", __func__,
+ plane->group->index, plane - plane->group->planes);
+
+ /* Skip planes that are being disabled or don't need to be
+ * reallocated.
+ */
+ if (!plane_state->format ||
+ !rcar_du_plane_needs_realloc(plane, plane_state))
+ continue;
+
+ /* Try to allocate the plane from the free planes currently
+ * associated with the target CRTC to avoid restarting the CRTC
+ * group and thus minimize flicker. If it fails fall back to
+ * allocating from all free planes.
+ */
+ crtc_planes = to_rcar_crtc(plane_state->state.crtc)->index % 2
+ ? plane->group->dptsr_planes
+ : ~plane->group->dptsr_planes;
+ free = group_free_planes[plane->group->index];
+
+ idx = rcar_du_plane_hwalloc(plane, plane_state,
+ free & crtc_planes);
+ if (idx < 0)
+ idx = rcar_du_plane_hwalloc(plane, plane_state,
+ free);
+ if (idx < 0) {
+ dev_dbg(rcdu->dev, "%s: no available hardware plane\n",
+ __func__);
+ return idx;
+ }
+
+ dev_dbg(rcdu->dev, "%s: allocated %u hwplanes (index %u)\n",
+ __func__, plane_state->format->planes, idx);
+
+ plane_state->hwindex = idx;
+
+ group_free_planes[plane->group->index] &=
+ ~rcar_du_plane_hwmask(plane_state);
+
+ dev_dbg(rcdu->dev, "%s: group %u free planes mask 0x%02x\n",
+ __func__, plane->group->index,
+ group_free_planes[plane->group->index]);
+ }
+
+ return 0;
+}
+
+/* -----------------------------------------------------------------------------
+ * Plane Setup
+ */
+
+#define RCAR_DU_COLORKEY_NONE (0 << 24)
+#define RCAR_DU_COLORKEY_SOURCE (1 << 24)
+#define RCAR_DU_COLORKEY_MASK (1 << 24)
+
static void rcar_du_plane_write(struct rcar_du_group *rgrp,
unsigned int index, u32 reg, u32 data)
{
@@ -42,34 +322,45 @@ static void rcar_du_plane_write(struct rcar_du_group *rgrp,
data);
}
-static void rcar_du_plane_setup_fb(struct rcar_du_plane *plane)
+static void rcar_du_plane_setup_scanout(struct rcar_du_group *rgrp,
+ const struct rcar_du_plane_state *state)
{
- struct rcar_du_plane_state *state =
- to_rcar_plane_state(plane->plane.state);
- struct drm_framebuffer *fb = plane->plane.state->fb;
- struct rcar_du_group *rgrp = plane->group;
unsigned int src_x = state->state.src_x >> 16;
unsigned int src_y = state->state.src_y >> 16;
unsigned int index = state->hwindex;
- struct drm_gem_cma_object *gem;
+ unsigned int pitch;
bool interlaced;
- u32 mwr;
+ u32 dma[2];
interlaced = state->state.crtc->state->adjusted_mode.flags
& DRM_MODE_FLAG_INTERLACE;
+ if (state->source == RCAR_DU_PLANE_MEMORY) {
+ struct drm_framebuffer *fb = state->state.fb;
+ struct drm_gem_cma_object *gem;
+ unsigned int i;
+
+ if (state->format->planes == 2)
+ pitch = fb->pitches[0];
+ else
+ pitch = fb->pitches[0] * 8 / state->format->bpp;
+
+ for (i = 0; i < state->format->planes; ++i) {
+ gem = drm_fb_cma_get_gem_obj(fb, i);
+ dma[i] = gem->paddr + fb->offsets[i];
+ }
+ } else {
+ pitch = state->state.src_w >> 16;
+ dma[0] = 0;
+ dma[1] = 0;
+ }
+
/* Memory pitch (expressed in pixels). Must be doubled for interlaced
* operation with 32bpp formats.
*/
- if (state->format->planes == 2)
- mwr = fb->pitches[0];
- else
- mwr = fb->pitches[0] * 8 / state->format->bpp;
-
- if (interlaced && state->format->bpp == 32)
- mwr *= 2;
-
- rcar_du_plane_write(rgrp, index, PnMWR, mwr);
+ rcar_du_plane_write(rgrp, index, PnMWR,
+ (interlaced && state->format->bpp == 32) ?
+ pitch * 2 : pitch);
/* The Y position is expressed in raster line units and must be doubled
* for 32bpp formats, according to the R8A7790 datasheet. No mention of
@@ -87,30 +378,25 @@ static void rcar_du_plane_setup_fb(struct rcar_du_plane *plane)
rcar_du_plane_write(rgrp, index, PnSPYR, src_y *
(!interlaced && state->format->bpp == 32 ? 2 : 1));
- gem = drm_fb_cma_get_gem_obj(fb, 0);
- rcar_du_plane_write(rgrp, index, PnDSA0R, gem->paddr + fb->offsets[0]);
+ rcar_du_plane_write(rgrp, index, PnDSA0R, dma[0]);
if (state->format->planes == 2) {
index = (index + 1) % 8;
- rcar_du_plane_write(rgrp, index, PnMWR, fb->pitches[0]);
+ rcar_du_plane_write(rgrp, index, PnMWR, pitch);
rcar_du_plane_write(rgrp, index, PnSPXR, src_x);
rcar_du_plane_write(rgrp, index, PnSPYR, src_y *
(state->format->bpp == 16 ? 2 : 1) / 2);
- gem = drm_fb_cma_get_gem_obj(fb, 1);
- rcar_du_plane_write(rgrp, index, PnDSA0R,
- gem->paddr + fb->offsets[1]);
+ rcar_du_plane_write(rgrp, index, PnDSA0R, dma[1]);
}
}
-static void rcar_du_plane_setup_mode(struct rcar_du_plane *plane,
- unsigned int index)
+static void rcar_du_plane_setup_mode(struct rcar_du_group *rgrp,
+ unsigned int index,
+ const struct rcar_du_plane_state *state)
{
- struct rcar_du_plane_state *state =
- to_rcar_plane_state(plane->plane.state);
- struct rcar_du_group *rgrp = plane->group;
u32 colorkey;
u32 pnmr;
@@ -168,12 +454,10 @@ static void rcar_du_plane_setup_mode(struct rcar_du_plane *plane,
}
}
-static void __rcar_du_plane_setup(struct rcar_du_plane *plane,
- unsigned int index)
+static void rcar_du_plane_setup_format_gen2(struct rcar_du_group *rgrp,
+ unsigned int index,
+ const struct rcar_du_plane_state *state)
{
- struct rcar_du_plane_state *state =
- to_rcar_plane_state(plane->plane.state);
- struct rcar_du_group *rgrp = plane->group;
u32 ddcr2 = PnDDCR2_CODE;
u32 ddcr4;
@@ -182,11 +466,8 @@ static void __rcar_du_plane_setup(struct rcar_du_plane *plane,
* The data format is selected by the DDDF field in PnMR and the EDF
* field in DDCR4.
*/
- ddcr4 = rcar_du_plane_read(rgrp, index, PnDDCR4);
- ddcr4 &= ~PnDDCR4_EDF_MASK;
- ddcr4 |= state->format->edf | PnDDCR4_CODE;
- rcar_du_plane_setup_mode(plane, index);
+ rcar_du_plane_setup_mode(rgrp, index, state);
if (state->format->planes == 2) {
if (state->hwindex != index) {
@@ -204,31 +485,72 @@ static void __rcar_du_plane_setup(struct rcar_du_plane *plane,
}
rcar_du_plane_write(rgrp, index, PnDDCR2, ddcr2);
+
+ ddcr4 = state->format->edf | PnDDCR4_CODE;
+ if (state->source != RCAR_DU_PLANE_MEMORY)
+ ddcr4 |= PnDDCR4_VSPS;
+
rcar_du_plane_write(rgrp, index, PnDDCR4, ddcr4);
+}
+
+static void rcar_du_plane_setup_format_gen3(struct rcar_du_group *rgrp,
+ unsigned int index,
+ const struct rcar_du_plane_state *state)
+{
+ rcar_du_plane_write(rgrp, index, PnMR,
+ PnMR_SPIM_TP_OFF | state->format->pnmr);
+
+ rcar_du_plane_write(rgrp, index, PnDDCR4,
+ state->format->edf | PnDDCR4_CODE);
+}
+
+static void rcar_du_plane_setup_format(struct rcar_du_group *rgrp,
+ unsigned int index,
+ const struct rcar_du_plane_state *state)
+{
+ struct rcar_du_device *rcdu = rgrp->dev;
+
+ if (rcdu->info->gen < 3)
+ rcar_du_plane_setup_format_gen2(rgrp, index, state);
+ else
+ rcar_du_plane_setup_format_gen3(rgrp, index, state);
/* Destination position and size */
- rcar_du_plane_write(rgrp, index, PnDSXR, plane->plane.state->crtc_w);
- rcar_du_plane_write(rgrp, index, PnDSYR, plane->plane.state->crtc_h);
- rcar_du_plane_write(rgrp, index, PnDPXR, plane->plane.state->crtc_x);
- rcar_du_plane_write(rgrp, index, PnDPYR, plane->plane.state->crtc_y);
-
- /* Wrap-around and blinking, disabled */
- rcar_du_plane_write(rgrp, index, PnWASPR, 0);
- rcar_du_plane_write(rgrp, index, PnWAMWR, 4095);
- rcar_du_plane_write(rgrp, index, PnBTR, 0);
- rcar_du_plane_write(rgrp, index, PnMLR, 0);
+ rcar_du_plane_write(rgrp, index, PnDSXR, state->state.crtc_w);
+ rcar_du_plane_write(rgrp, index, PnDSYR, state->state.crtc_h);
+ rcar_du_plane_write(rgrp, index, PnDPXR, state->state.crtc_x);
+ rcar_du_plane_write(rgrp, index, PnDPYR, state->state.crtc_y);
+
+ if (rcdu->info->gen < 3) {
+ /* Wrap-around and blinking, disabled */
+ rcar_du_plane_write(rgrp, index, PnWASPR, 0);
+ rcar_du_plane_write(rgrp, index, PnWAMWR, 4095);
+ rcar_du_plane_write(rgrp, index, PnBTR, 0);
+ rcar_du_plane_write(rgrp, index, PnMLR, 0);
+ }
}
-void rcar_du_plane_setup(struct rcar_du_plane *plane)
+void __rcar_du_plane_setup(struct rcar_du_group *rgrp,
+ const struct rcar_du_plane_state *state)
{
- struct rcar_du_plane_state *state =
- to_rcar_plane_state(plane->plane.state);
+ struct rcar_du_device *rcdu = rgrp->dev;
- __rcar_du_plane_setup(plane, state->hwindex);
+ rcar_du_plane_setup_format(rgrp, state->hwindex, state);
if (state->format->planes == 2)
- __rcar_du_plane_setup(plane, (state->hwindex + 1) % 8);
+ rcar_du_plane_setup_format(rgrp, (state->hwindex + 1) % 8,
+ state);
- rcar_du_plane_setup_fb(plane);
+ if (rcdu->info->gen < 3)
+ rcar_du_plane_setup_scanout(rgrp, state);
+
+ if (state->source == RCAR_DU_PLANE_VSPD1) {
+ unsigned int vspd1_sink = rgrp->index ? 2 : 0;
+
+ if (rcdu->vspd1_sink != vspd1_sink) {
+ rcdu->vspd1_sink = vspd1_sink;
+ rcar_du_set_dpad0_vsp1_routing(rcdu);
+ }
+ }
}
static int rcar_du_plane_atomic_check(struct drm_plane *plane,
@@ -263,9 +585,27 @@ static void rcar_du_plane_atomic_update(struct drm_plane *plane,
struct drm_plane_state *old_state)
{
struct rcar_du_plane *rplane = to_rcar_plane(plane);
+ struct rcar_du_plane_state *old_rstate;
+ struct rcar_du_plane_state *new_rstate;
+
+ if (!plane->state->crtc)
+ return;
+
+ rcar_du_plane_setup(rplane);
+
+ /* Check whether the source has changed from memory to live source or
+ * from live source to memory. The source has been configured by the
+ * VSPS bit in the PnDDCR4 register. Although the datasheet states that
+ * the bit is updated during vertical blanking, it seems that updates
+ * only occur when the DU group is held in reset through the DSYSR.DRES
+ * bit. We thus need to restart the group if the source changes.
+ */
+ old_rstate = to_rcar_plane_state(old_state);
+ new_rstate = to_rcar_plane_state(plane->state);
- if (plane->state->crtc)
- rcar_du_plane_setup(rplane);
+ if ((old_rstate->source == RCAR_DU_PLANE_MEMORY) !=
+ (new_rstate->source == RCAR_DU_PLANE_MEMORY))
+ rplane->group->need_restart = true;
}
static const struct drm_plane_helper_funcs rcar_du_plane_helper_funcs = {
@@ -313,6 +653,7 @@ static void rcar_du_plane_reset(struct drm_plane *plane)
return;
state->hwindex = -1;
+ state->source = RCAR_DU_PLANE_MEMORY;
state->alpha = 255;
state->colorkey = RCAR_DU_COLORKEY_NONE;
state->zpos = plane->type == DRM_PLANE_TYPE_PRIMARY ? 0 : 1;
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_plane.h b/drivers/gpu/drm/rcar-du/rcar_du_plane.h
index 9732bff1911b..b18b7b25dbfa 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_plane.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_plane.h
@@ -28,6 +28,12 @@ struct rcar_du_group;
#define RCAR_DU_NUM_KMS_PLANES 9
#define RCAR_DU_NUM_HW_PLANES 8
+enum rcar_du_plane_source {
+ RCAR_DU_PLANE_MEMORY,
+ RCAR_DU_PLANE_VSPD0,
+ RCAR_DU_PLANE_VSPD1,
+};
+
struct rcar_du_plane {
struct drm_plane plane;
struct rcar_du_group *group;
@@ -52,6 +58,7 @@ struct rcar_du_plane_state {
const struct rcar_du_format_info *format;
int hwindex;
+ enum rcar_du_plane_source source;
unsigned int alpha;
unsigned int colorkey;
@@ -64,8 +71,20 @@ to_rcar_plane_state(struct drm_plane_state *state)
return container_of(state, struct rcar_du_plane_state, state);
}
+int rcar_du_atomic_check_planes(struct drm_device *dev,
+ struct drm_atomic_state *state);
+
int rcar_du_planes_init(struct rcar_du_group *rgrp);
-void rcar_du_plane_setup(struct rcar_du_plane *plane);
+void __rcar_du_plane_setup(struct rcar_du_group *rgrp,
+ const struct rcar_du_plane_state *state);
+
+static inline void rcar_du_plane_setup(struct rcar_du_plane *plane)
+{
+ struct rcar_du_plane_state *state =
+ to_rcar_plane_state(plane->plane.state);
+
+ return __rcar_du_plane_setup(plane->group, state);
+}
#endif /* __RCAR_DU_PLANE_H__ */
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_regs.h b/drivers/gpu/drm/rcar-du/rcar_du_regs.h
index 70fcbc471ebd..d2f66068e52c 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_regs.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_regs.h
@@ -1,7 +1,7 @@
/*
* rcar_du_regs.h -- R-Car Display Unit Registers Definitions
*
- * Copyright (C) 2013 Renesas Electronics Corporation
+ * Copyright (C) 2013-2015 Renesas Electronics Corporation
*
* Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
*
@@ -16,6 +16,7 @@
#define DU0_REG_OFFSET 0x00000
#define DU1_REG_OFFSET 0x30000
#define DU2_REG_OFFSET 0x40000
+#define DU3_REG_OFFSET 0x70000
/* -----------------------------------------------------------------------------
* Display Control Registers
@@ -186,7 +187,7 @@
#define DEFR6 0x000e8
#define DEFR6_CODE (0x7778 << 16)
-#define DEFR6_ODPM22_D2SMR (0 << 10)
+#define DEFR6_ODPM22_DSMR (0 << 10)
#define DEFR6_ODPM22_DISP (2 << 10)
#define DEFR6_ODPM22_CDE (3 << 10)
#define DEFR6_ODPM22_MASK (3 << 10)
@@ -260,6 +261,21 @@
#define DIDSR_PDCS_CLK(n, clk) (clk << ((n) * 2))
#define DIDSR_PDCS_MASK(n) (3 << ((n) * 2))
+#define DEFR10 0x20038
+#define DEFR10_CODE (0x7795 << 16)
+#define DEFR10_VSPF1_RGB (0 << 14)
+#define DEFR10_VSPF1_YC (1 << 14)
+#define DEFR10_DOCF1_RGB (0 << 12)
+#define DEFR10_DOCF1_YC (1 << 12)
+#define DEFR10_YCDF0_YCBCR444 (0 << 11)
+#define DEFR10_YCDF0_YCBCR422 (1 << 11)
+#define DEFR10_VSPF0_RGB (0 << 10)
+#define DEFR10_VSPF0_YC (1 << 10)
+#define DEFR10_DOCF0_RGB (0 << 8)
+#define DEFR10_DOCF0_YC (1 << 8)
+#define DEFR10_TSEL_H3_TCON1 (0 << 1) /* DEFR102 register only (DU2/DU3) */
+#define DEFR10_DEFE10 (1 << 0)
+
/* -----------------------------------------------------------------------------
* Display Timing Generation Registers
*/
@@ -389,6 +405,7 @@
#define PnDDCR4 0x00190
#define PnDDCR4_CODE (0x7766 << 16)
+#define PnDDCR4_VSPS (1 << 13)
#define PnDDCR4_SDFS_RGB (0 << 4)
#define PnDDCR4_SDFS_YC (5 << 4)
#define PnDDCR4_SDFS_MASK (7 << 4)
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_vgacon.c b/drivers/gpu/drm/rcar-du/rcar_du_vgacon.c
index e0a5d8f93963..9d7e5c99caf6 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_vgacon.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_vgacon.c
@@ -31,12 +31,6 @@ static const struct drm_connector_helper_funcs connector_helper_funcs = {
.best_encoder = rcar_du_connector_best_encoder,
};
-static void rcar_du_vga_connector_destroy(struct drm_connector *connector)
-{
- drm_connector_unregister(connector);
- drm_connector_cleanup(connector);
-}
-
static enum drm_connector_status
rcar_du_vga_connector_detect(struct drm_connector *connector, bool force)
{
@@ -48,7 +42,7 @@ static const struct drm_connector_funcs connector_funcs = {
.reset = drm_atomic_helper_connector_reset,
.detect = rcar_du_vga_connector_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
- .destroy = rcar_du_vga_connector_destroy,
+ .destroy = drm_connector_cleanup,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
@@ -76,9 +70,6 @@ int rcar_du_vga_connector_init(struct rcar_du_device *rcdu,
return ret;
drm_connector_helper_add(connector, &connector_helper_funcs);
- ret = drm_connector_register(connector);
- if (ret < 0)
- return ret;
connector->dpms = DRM_MODE_DPMS_OFF;
drm_object_property_set_value(&connector->base,
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_vsp.c b/drivers/gpu/drm/rcar-du/rcar_du_vsp.c
new file mode 100644
index 000000000000..de7ef041182b
--- /dev/null
+++ b/drivers/gpu/drm/rcar-du/rcar_du_vsp.c
@@ -0,0 +1,384 @@
+/*
+ * rcar_du_vsp.h -- R-Car Display Unit VSP-Based Compositor
+ *
+ * Copyright (C) 2015 Renesas Electronics Corporation
+ *
+ * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_plane_helper.h>
+
+#include <linux/of_platform.h>
+#include <linux/videodev2.h>
+
+#include <media/vsp1.h>
+
+#include "rcar_du_drv.h"
+#include "rcar_du_kms.h"
+#include "rcar_du_vsp.h"
+
+void rcar_du_vsp_enable(struct rcar_du_crtc *crtc)
+{
+ const struct drm_display_mode *mode = &crtc->crtc.state->adjusted_mode;
+ struct rcar_du_device *rcdu = crtc->group->dev;
+ struct rcar_du_plane_state state = {
+ .state = {
+ .crtc = &crtc->crtc,
+ .crtc_x = 0,
+ .crtc_y = 0,
+ .crtc_w = mode->hdisplay,
+ .crtc_h = mode->vdisplay,
+ .src_x = 0,
+ .src_y = 0,
+ .src_w = mode->hdisplay << 16,
+ .src_h = mode->vdisplay << 16,
+ },
+ .format = rcar_du_format_info(DRM_FORMAT_ARGB8888),
+ .source = RCAR_DU_PLANE_VSPD1,
+ .alpha = 255,
+ .colorkey = 0,
+ .zpos = 0,
+ };
+
+ if (rcdu->info->gen >= 3)
+ state.hwindex = (crtc->index % 2) ? 2 : 0;
+ else
+ state.hwindex = crtc->index % 2;
+
+ __rcar_du_plane_setup(crtc->group, &state);
+
+ /* Ensure that the plane source configuration takes effect by requesting
+ * a restart of the group. See rcar_du_plane_atomic_update() for a more
+ * detailed explanation.
+ *
+ * TODO: Check whether this is still needed on Gen3.
+ */
+ crtc->group->need_restart = true;
+
+ vsp1_du_setup_lif(crtc->vsp->vsp, mode->hdisplay, mode->vdisplay);
+}
+
+void rcar_du_vsp_disable(struct rcar_du_crtc *crtc)
+{
+ vsp1_du_setup_lif(crtc->vsp->vsp, 0, 0);
+}
+
+void rcar_du_vsp_atomic_begin(struct rcar_du_crtc *crtc)
+{
+ vsp1_du_atomic_begin(crtc->vsp->vsp);
+}
+
+void rcar_du_vsp_atomic_flush(struct rcar_du_crtc *crtc)
+{
+ vsp1_du_atomic_flush(crtc->vsp->vsp);
+}
+
+/* Keep the two tables in sync. */
+static const u32 formats_kms[] = {
+ DRM_FORMAT_RGB332,
+ DRM_FORMAT_ARGB4444,
+ DRM_FORMAT_XRGB4444,
+ DRM_FORMAT_ARGB1555,
+ DRM_FORMAT_XRGB1555,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_BGR888,
+ DRM_FORMAT_RGB888,
+ DRM_FORMAT_BGRA8888,
+ DRM_FORMAT_BGRX8888,
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_UYVY,
+ DRM_FORMAT_VYUY,
+ DRM_FORMAT_YUYV,
+ DRM_FORMAT_YVYU,
+ DRM_FORMAT_NV12,
+ DRM_FORMAT_NV21,
+ DRM_FORMAT_NV16,
+ DRM_FORMAT_NV61,
+ DRM_FORMAT_YUV420,
+ DRM_FORMAT_YVU420,
+ DRM_FORMAT_YUV422,
+ DRM_FORMAT_YVU422,
+ DRM_FORMAT_YUV444,
+ DRM_FORMAT_YVU444,
+};
+
+static const u32 formats_v4l2[] = {
+ V4L2_PIX_FMT_RGB332,
+ V4L2_PIX_FMT_ARGB444,
+ V4L2_PIX_FMT_XRGB444,
+ V4L2_PIX_FMT_ARGB555,
+ V4L2_PIX_FMT_XRGB555,
+ V4L2_PIX_FMT_RGB565,
+ V4L2_PIX_FMT_RGB24,
+ V4L2_PIX_FMT_BGR24,
+ V4L2_PIX_FMT_ARGB32,
+ V4L2_PIX_FMT_XRGB32,
+ V4L2_PIX_FMT_ABGR32,
+ V4L2_PIX_FMT_XBGR32,
+ V4L2_PIX_FMT_UYVY,
+ V4L2_PIX_FMT_VYUY,
+ V4L2_PIX_FMT_YUYV,
+ V4L2_PIX_FMT_YVYU,
+ V4L2_PIX_FMT_NV12M,
+ V4L2_PIX_FMT_NV21M,
+ V4L2_PIX_FMT_NV16M,
+ V4L2_PIX_FMT_NV61M,
+ V4L2_PIX_FMT_YUV420M,
+ V4L2_PIX_FMT_YVU420M,
+ V4L2_PIX_FMT_YUV422M,
+ V4L2_PIX_FMT_YVU422M,
+ V4L2_PIX_FMT_YUV444M,
+ V4L2_PIX_FMT_YVU444M,
+};
+
+static void rcar_du_vsp_plane_setup(struct rcar_du_vsp_plane *plane)
+{
+ struct rcar_du_vsp_plane_state *state =
+ to_rcar_vsp_plane_state(plane->plane.state);
+ struct drm_framebuffer *fb = plane->plane.state->fb;
+ struct v4l2_rect src;
+ struct v4l2_rect dst;
+ dma_addr_t paddr[2] = { 0, };
+ u32 pixelformat = 0;
+ unsigned int i;
+
+ src.left = state->state.src_x >> 16;
+ src.top = state->state.src_y >> 16;
+ src.width = state->state.src_w >> 16;
+ src.height = state->state.src_h >> 16;
+
+ dst.left = state->state.crtc_x;
+ dst.top = state->state.crtc_y;
+ dst.width = state->state.crtc_w;
+ dst.height = state->state.crtc_h;
+
+ for (i = 0; i < state->format->planes; ++i) {
+ struct drm_gem_cma_object *gem;
+
+ gem = drm_fb_cma_get_gem_obj(fb, i);
+ paddr[i] = gem->paddr + fb->offsets[i];
+ }
+
+ for (i = 0; i < ARRAY_SIZE(formats_kms); ++i) {
+ if (formats_kms[i] == state->format->fourcc) {
+ pixelformat = formats_v4l2[i];
+ break;
+ }
+ }
+
+ WARN_ON(!pixelformat);
+
+ vsp1_du_atomic_update(plane->vsp->vsp, plane->index, pixelformat,
+ fb->pitches[0], paddr, &src, &dst);
+}
+
+static int rcar_du_vsp_plane_atomic_check(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ struct rcar_du_vsp_plane_state *rstate = to_rcar_vsp_plane_state(state);
+ struct rcar_du_vsp_plane *rplane = to_rcar_vsp_plane(plane);
+ struct rcar_du_device *rcdu = rplane->vsp->dev;
+
+ if (!state->fb || !state->crtc) {
+ rstate->format = NULL;
+ return 0;
+ }
+
+ if (state->src_w >> 16 != state->crtc_w ||
+ state->src_h >> 16 != state->crtc_h) {
+ dev_dbg(rcdu->dev, "%s: scaling not supported\n", __func__);
+ return -EINVAL;
+ }
+
+ rstate->format = rcar_du_format_info(state->fb->pixel_format);
+ if (rstate->format == NULL) {
+ dev_dbg(rcdu->dev, "%s: unsupported format %08x\n", __func__,
+ state->fb->pixel_format);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void rcar_du_vsp_plane_atomic_update(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+ struct rcar_du_vsp_plane *rplane = to_rcar_vsp_plane(plane);
+
+ if (plane->state->crtc)
+ rcar_du_vsp_plane_setup(rplane);
+ else
+ vsp1_du_atomic_update(rplane->vsp->vsp, rplane->index, 0, 0, 0,
+ NULL, NULL);
+}
+
+static const struct drm_plane_helper_funcs rcar_du_vsp_plane_helper_funcs = {
+ .atomic_check = rcar_du_vsp_plane_atomic_check,
+ .atomic_update = rcar_du_vsp_plane_atomic_update,
+};
+
+static struct drm_plane_state *
+rcar_du_vsp_plane_atomic_duplicate_state(struct drm_plane *plane)
+{
+ struct rcar_du_vsp_plane_state *state;
+ struct rcar_du_vsp_plane_state *copy;
+
+ if (WARN_ON(!plane->state))
+ return NULL;
+
+ state = to_rcar_vsp_plane_state(plane->state);
+ copy = kmemdup(state, sizeof(*state), GFP_KERNEL);
+ if (copy == NULL)
+ return NULL;
+
+ __drm_atomic_helper_plane_duplicate_state(plane, &copy->state);
+
+ return &copy->state;
+}
+
+static void rcar_du_vsp_plane_atomic_destroy_state(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ __drm_atomic_helper_plane_destroy_state(plane, state);
+ kfree(to_rcar_vsp_plane_state(state));
+}
+
+static void rcar_du_vsp_plane_reset(struct drm_plane *plane)
+{
+ struct rcar_du_vsp_plane_state *state;
+
+ if (plane->state) {
+ rcar_du_vsp_plane_atomic_destroy_state(plane, plane->state);
+ plane->state = NULL;
+ }
+
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ if (state == NULL)
+ return;
+
+ state->alpha = 255;
+
+ plane->state = &state->state;
+ plane->state->plane = plane;
+}
+
+static int rcar_du_vsp_plane_atomic_set_property(struct drm_plane *plane,
+ struct drm_plane_state *state, struct drm_property *property,
+ uint64_t val)
+{
+ struct rcar_du_vsp_plane_state *rstate = to_rcar_vsp_plane_state(state);
+ struct rcar_du_device *rcdu = to_rcar_vsp_plane(plane)->vsp->dev;
+
+ if (property == rcdu->props.alpha)
+ rstate->alpha = val;
+ else
+ return -EINVAL;
+
+ return 0;
+}
+
+static int rcar_du_vsp_plane_atomic_get_property(struct drm_plane *plane,
+ const struct drm_plane_state *state, struct drm_property *property,
+ uint64_t *val)
+{
+ const struct rcar_du_vsp_plane_state *rstate =
+ container_of(state, const struct rcar_du_vsp_plane_state, state);
+ struct rcar_du_device *rcdu = to_rcar_vsp_plane(plane)->vsp->dev;
+
+ if (property == rcdu->props.alpha)
+ *val = rstate->alpha;
+ else
+ return -EINVAL;
+
+ return 0;
+}
+
+static const struct drm_plane_funcs rcar_du_vsp_plane_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .reset = rcar_du_vsp_plane_reset,
+ .set_property = drm_atomic_helper_plane_set_property,
+ .destroy = drm_plane_cleanup,
+ .atomic_duplicate_state = rcar_du_vsp_plane_atomic_duplicate_state,
+ .atomic_destroy_state = rcar_du_vsp_plane_atomic_destroy_state,
+ .atomic_set_property = rcar_du_vsp_plane_atomic_set_property,
+ .atomic_get_property = rcar_du_vsp_plane_atomic_get_property,
+};
+
+int rcar_du_vsp_init(struct rcar_du_vsp *vsp)
+{
+ struct rcar_du_device *rcdu = vsp->dev;
+ struct platform_device *pdev;
+ struct device_node *np;
+ unsigned int i;
+ int ret;
+
+ /* Find the VSP device and initialize it. */
+ np = of_parse_phandle(rcdu->dev->of_node, "vsps", vsp->index);
+ if (!np) {
+ dev_err(rcdu->dev, "vsps node not found\n");
+ return -ENXIO;
+ }
+
+ pdev = of_find_device_by_node(np);
+ of_node_put(np);
+ if (!pdev)
+ return -ENXIO;
+
+ vsp->vsp = &pdev->dev;
+
+ ret = vsp1_du_init(vsp->vsp);
+ if (ret < 0)
+ return ret;
+
+ /* The VSP2D (Gen3) has 5 RPFs, but the VSP1D (Gen2) is limited to
+ * 4 RPFs.
+ */
+ vsp->num_planes = rcdu->info->gen >= 3 ? 5 : 4;
+
+ vsp->planes = devm_kcalloc(rcdu->dev, vsp->num_planes,
+ sizeof(*vsp->planes), GFP_KERNEL);
+ if (!vsp->planes)
+ return -ENOMEM;
+
+ for (i = 0; i < vsp->num_planes; ++i) {
+ enum drm_plane_type type = i ? DRM_PLANE_TYPE_OVERLAY
+ : DRM_PLANE_TYPE_PRIMARY;
+ struct rcar_du_vsp_plane *plane = &vsp->planes[i];
+
+ plane->vsp = vsp;
+ plane->index = i;
+
+ ret = drm_universal_plane_init(rcdu->ddev, &plane->plane,
+ 1 << vsp->index,
+ &rcar_du_vsp_plane_funcs,
+ formats_kms,
+ ARRAY_SIZE(formats_kms), type,
+ NULL);
+ if (ret < 0)
+ return ret;
+
+ drm_plane_helper_add(&plane->plane,
+ &rcar_du_vsp_plane_helper_funcs);
+
+ if (type == DRM_PLANE_TYPE_PRIMARY)
+ continue;
+
+ drm_object_attach_property(&plane->plane.base,
+ rcdu->props.alpha, 255);
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_vsp.h b/drivers/gpu/drm/rcar-du/rcar_du_vsp.h
new file mode 100644
index 000000000000..df3bf3805c69
--- /dev/null
+++ b/drivers/gpu/drm/rcar-du/rcar_du_vsp.h
@@ -0,0 +1,76 @@
+/*
+ * rcar_du_vsp.h -- R-Car Display Unit VSP-Based Compositor
+ *
+ * Copyright (C) 2015 Renesas Electronics Corporation
+ *
+ * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __RCAR_DU_VSP_H__
+#define __RCAR_DU_VSP_H__
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+
+struct rcar_du_format_info;
+struct rcar_du_vsp;
+
+struct rcar_du_vsp_plane {
+ struct drm_plane plane;
+ struct rcar_du_vsp *vsp;
+ unsigned int index;
+};
+
+struct rcar_du_vsp {
+ unsigned int index;
+ struct device *vsp;
+ struct rcar_du_device *dev;
+ struct rcar_du_vsp_plane *planes;
+ unsigned int num_planes;
+};
+
+static inline struct rcar_du_vsp_plane *to_rcar_vsp_plane(struct drm_plane *p)
+{
+ return container_of(p, struct rcar_du_vsp_plane, plane);
+}
+
+/**
+ * struct rcar_du_vsp_plane_state - Driver-specific plane state
+ * @state: base DRM plane state
+ * @format: information about the pixel format used by the plane
+ * @alpha: value of the plane alpha property
+ */
+struct rcar_du_vsp_plane_state {
+ struct drm_plane_state state;
+
+ const struct rcar_du_format_info *format;
+
+ unsigned int alpha;
+};
+
+static inline struct rcar_du_vsp_plane_state *
+to_rcar_vsp_plane_state(struct drm_plane_state *state)
+{
+ return container_of(state, struct rcar_du_vsp_plane_state, state);
+}
+
+#ifdef CONFIG_DRM_RCAR_VSP
+int rcar_du_vsp_init(struct rcar_du_vsp *vsp);
+void rcar_du_vsp_enable(struct rcar_du_crtc *crtc);
+void rcar_du_vsp_disable(struct rcar_du_crtc *crtc);
+void rcar_du_vsp_atomic_begin(struct rcar_du_crtc *crtc);
+void rcar_du_vsp_atomic_flush(struct rcar_du_crtc *crtc);
+#else
+static inline int rcar_du_vsp_init(struct rcar_du_vsp *vsp) { return 0; };
+static inline void rcar_du_vsp_enable(struct rcar_du_crtc *crtc) { };
+static inline void rcar_du_vsp_disable(struct rcar_du_crtc *crtc) { };
+static inline void rcar_du_vsp_atomic_begin(struct rcar_du_crtc *crtc) { };
+static inline void rcar_du_vsp_atomic_flush(struct rcar_du_crtc *crtc) { };
+#endif
+
+#endif /* __RCAR_DU_VSP_H__ */
diff --git a/drivers/gpu/drm/rcar-du/rcar_lvds_regs.h b/drivers/gpu/drm/rcar-du/rcar_lvds_regs.h
index 77cf9289ab65..d7d294ba2dbe 100644
--- a/drivers/gpu/drm/rcar-du/rcar_lvds_regs.h
+++ b/drivers/gpu/drm/rcar-du/rcar_lvds_regs.h
@@ -1,7 +1,7 @@
/*
* rcar_lvds_regs.h -- R-Car LVDS Interface Registers Definitions
*
- * Copyright (C) 2013 Renesas Electronics Corporation
+ * Copyright (C) 2013-2015 Renesas Electronics Corporation
*
* Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
*
@@ -15,28 +15,38 @@
#define LVDCR0 0x0000
#define LVDCR0_DUSEL (1 << 15)
-#define LVDCR0_DMD (1 << 12)
+#define LVDCR0_DMD (1 << 12) /* Gen2 only */
#define LVDCR0_LVMD_MASK (0xf << 8)
#define LVDCR0_LVMD_SHIFT 8
-#define LVDCR0_PLLEN (1 << 4)
-#define LVDCR0_BEN (1 << 2)
-#define LVDCR0_LVEN (1 << 1)
+#define LVDCR0_PLLON (1 << 4)
+#define LVDCR0_PWD (1 << 2) /* Gen3 only */
+#define LVDCR0_BEN (1 << 2) /* Gen2 only */
+#define LVDCR0_LVEN (1 << 1) /* Gen2 only */
#define LVDCR0_LVRES (1 << 0)
#define LVDCR1 0x0004
-#define LVDCR1_CKSEL (1 << 15)
-#define LVDCR1_CHSTBY(n) (3 << (2 + (n) * 2))
-#define LVDCR1_CLKSTBY (3 << 0)
+#define LVDCR1_CKSEL (1 << 15) /* Gen2 only */
+#define LVDCR1_CHSTBY_GEN2(n) (3 << (2 + (n) * 2)) /* Gen2 only */
+#define LVDCR1_CHSTBY_GEN3(n) (1 << (2 + (n) * 2)) /* Gen3 only */
+#define LVDCR1_CLKSTBY_GEN2 (3 << 0) /* Gen2 only */
+#define LVDCR1_CLKSTBY_GEN3 (1 << 0) /* Gen3 only */
#define LVDPLLCR 0x0008
#define LVDPLLCR_CEEN (1 << 14)
#define LVDPLLCR_FBEN (1 << 13)
#define LVDPLLCR_COSEL (1 << 12)
+/* Gen2 */
#define LVDPLLCR_PLLDLYCNT_150M (0x1bf << 0)
#define LVDPLLCR_PLLDLYCNT_121M (0x22c << 0)
#define LVDPLLCR_PLLDLYCNT_60M (0x77b << 0)
#define LVDPLLCR_PLLDLYCNT_38M (0x69a << 0)
#define LVDPLLCR_PLLDLYCNT_MASK (0x7ff << 0)
+/* Gen3 */
+#define LVDPLLCR_PLLDIVCNT_42M (0x014cb << 0)
+#define LVDPLLCR_PLLDIVCNT_85M (0x00a45 << 0)
+#define LVDPLLCR_PLLDIVCNT_128M (0x006c3 << 0)
+#define LVDPLLCR_PLLDIVCNT_148M (0x046c1 << 0)
+#define LVDPLLCR_PLLDIVCNT_MASK (0x7ffff << 0)
#define LVDCTRCR 0x000c
#define LVDCTRCR_CTR3SEL_ZERO (0 << 12)
diff --git a/drivers/gpu/drm/rockchip/Kconfig b/drivers/gpu/drm/rockchip/Kconfig
index 85739859dffc..76b3362c5e59 100644
--- a/drivers/gpu/drm/rockchip/Kconfig
+++ b/drivers/gpu/drm/rockchip/Kconfig
@@ -35,3 +35,11 @@ config ROCKCHIP_DW_MIPI_DSI
for the Synopsys DesignWare HDMI driver. If you want to
enable MIPI DSI on RK3288 based SoC, you should selet this
option.
+
+config ROCKCHIP_INNO_HDMI
+ tristate "Rockchip specific extensions for Innosilicon HDMI"
+ depends on DRM_ROCKCHIP
+ help
+ This selects support for Rockchip SoC specific extensions
+ for the Innosilicon HDMI driver. If you want to enable
+ HDMI on RK3036 based SoC, you should select this option.
diff --git a/drivers/gpu/drm/rockchip/Makefile b/drivers/gpu/drm/rockchip/Makefile
index f6a809afceec..df8fbef17791 100644
--- a/drivers/gpu/drm/rockchip/Makefile
+++ b/drivers/gpu/drm/rockchip/Makefile
@@ -8,5 +8,6 @@ rockchipdrm-$(CONFIG_DRM_FBDEV_EMULATION) += rockchip_drm_fbdev.o
obj-$(CONFIG_ROCKCHIP_DW_HDMI) += dw_hdmi-rockchip.o
obj-$(CONFIG_ROCKCHIP_DW_MIPI_DSI) += dw-mipi-dsi.o
+obj-$(CONFIG_ROCKCHIP_INNO_HDMI) += inno_hdmi.o
obj-$(CONFIG_DRM_ROCKCHIP) += rockchipdrm.o rockchip_vop_reg.o
diff --git a/drivers/gpu/drm/rockchip/dw-mipi-dsi.c b/drivers/gpu/drm/rockchip/dw-mipi-dsi.c
index f8f8f29fb7c3..7975158064e8 100644
--- a/drivers/gpu/drm/rockchip/dw-mipi-dsi.c
+++ b/drivers/gpu/drm/rockchip/dw-mipi-dsi.c
@@ -875,17 +875,10 @@ static void dw_mipi_dsi_encoder_disable(struct drm_encoder *encoder)
clk_disable_unprepare(dsi->pclk);
}
-static bool dw_mipi_dsi_encoder_mode_fixup(struct drm_encoder *encoder,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- return true;
-}
-
static void dw_mipi_dsi_encoder_commit(struct drm_encoder *encoder)
{
struct dw_mipi_dsi *dsi = encoder_to_dsi(encoder);
- int mux = rockchip_drm_encoder_get_mux_id(dsi->dev->of_node, encoder);
+ int mux = drm_of_encoder_active_endpoint_id(dsi->dev->of_node, encoder);
u32 interface_pix_fmt;
u32 val;
@@ -931,7 +924,6 @@ static void dw_mipi_dsi_encoder_commit(struct drm_encoder *encoder)
static struct drm_encoder_helper_funcs
dw_mipi_dsi_encoder_helper_funcs = {
- .mode_fixup = dw_mipi_dsi_encoder_mode_fixup,
.commit = dw_mipi_dsi_encoder_commit,
.mode_set = dw_mipi_dsi_encoder_mode_set,
.disable = dw_mipi_dsi_encoder_disable,
diff --git a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
index c65ce8cb30d3..d5cfef75fc80 100644
--- a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
+++ b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
@@ -204,7 +204,7 @@ static void dw_hdmi_rockchip_encoder_enable(struct drm_encoder *encoder)
rockchip_drm_crtc_mode_config(encoder->crtc, DRM_MODE_CONNECTOR_HDMIA,
ROCKCHIP_OUT_MODE_AAAA);
- mux = rockchip_drm_encoder_get_mux_id(hdmi->dev->of_node, encoder);
+ mux = drm_of_encoder_active_endpoint_id(hdmi->dev->of_node, encoder);
if (mux)
val = HDMI_SEL_VOP_LIT | (HDMI_SEL_VOP_LIT << 16);
else
@@ -271,8 +271,6 @@ static int dw_hdmi_rockchip_bind(struct device *dev, struct device *master,
if (!iores)
return -ENXIO;
- platform_set_drvdata(pdev, hdmi);
-
encoder->possible_crtcs = drm_of_find_possible_crtcs(drm, dev->of_node);
/*
* If we failed to find the CRTC(s) which this encoder is
@@ -293,7 +291,16 @@ static int dw_hdmi_rockchip_bind(struct device *dev, struct device *master,
drm_encoder_init(drm, encoder, &dw_hdmi_rockchip_encoder_funcs,
DRM_MODE_ENCODER_TMDS, NULL);
- return dw_hdmi_bind(dev, master, data, encoder, iores, irq, plat_data);
+ ret = dw_hdmi_bind(dev, master, data, encoder, iores, irq, plat_data);
+
+ /*
+ * If dw_hdmi_bind() fails we'll never call dw_hdmi_unbind(),
+ * which would have called the encoder cleanup. Do it manually.
+ */
+ if (ret)
+ drm_encoder_cleanup(encoder);
+
+ return ret;
}
static void dw_hdmi_rockchip_unbind(struct device *dev, struct device *master,
diff --git a/drivers/gpu/drm/rockchip/inno_hdmi.c b/drivers/gpu/drm/rockchip/inno_hdmi.c
new file mode 100644
index 000000000000..10d62fff22f1
--- /dev/null
+++ b/drivers/gpu/drm/rockchip/inno_hdmi.c
@@ -0,0 +1,938 @@
+/*
+ * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
+ * Zheng Yang <zhengyang@rock-chips.com>
+ * Yakir Yang <ykk@rock-chips.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/irq.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/hdmi.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of_device.h>
+
+#include <drm/drm_of.h>
+#include <drm/drmP.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_edid.h>
+
+#include "rockchip_drm_drv.h"
+#include "rockchip_drm_vop.h"
+
+#include "inno_hdmi.h"
+
+#define to_inno_hdmi(x) container_of(x, struct inno_hdmi, x)
+
+struct hdmi_data_info {
+ int vic;
+ bool sink_is_hdmi;
+ bool sink_has_audio;
+ unsigned int enc_in_format;
+ unsigned int enc_out_format;
+ unsigned int colorimetry;
+};
+
+struct inno_hdmi_i2c {
+ struct i2c_adapter adap;
+
+ u8 ddc_addr;
+ u8 segment_addr;
+
+ struct mutex lock;
+ struct completion cmp;
+};
+
+struct inno_hdmi {
+ struct device *dev;
+ struct drm_device *drm_dev;
+
+ int irq;
+ struct clk *pclk;
+ void __iomem *regs;
+
+ struct drm_connector connector;
+ struct drm_encoder encoder;
+
+ struct inno_hdmi_i2c *i2c;
+ struct i2c_adapter *ddc;
+
+ unsigned int tmds_rate;
+
+ struct hdmi_data_info hdmi_data;
+ struct drm_display_mode previous_mode;
+};
+
+enum {
+ CSC_ITU601_16_235_TO_RGB_0_255_8BIT,
+ CSC_ITU601_0_255_TO_RGB_0_255_8BIT,
+ CSC_ITU709_16_235_TO_RGB_0_255_8BIT,
+ CSC_RGB_0_255_TO_ITU601_16_235_8BIT,
+ CSC_RGB_0_255_TO_ITU709_16_235_8BIT,
+ CSC_RGB_0_255_TO_RGB_16_235_8BIT,
+};
+
+static const char coeff_csc[][24] = {
+ /*
+ * YUV2RGB:601 SD mode(Y[16:235], UV[16:240], RGB[0:255]):
+ * R = 1.164*Y + 1.596*V - 204
+ * G = 1.164*Y - 0.391*U - 0.813*V + 154
+ * B = 1.164*Y + 2.018*U - 258
+ */
+ {
+ 0x04, 0xa7, 0x00, 0x00, 0x06, 0x62, 0x02, 0xcc,
+ 0x04, 0xa7, 0x11, 0x90, 0x13, 0x40, 0x00, 0x9a,
+ 0x04, 0xa7, 0x08, 0x12, 0x00, 0x00, 0x03, 0x02
+ },
+ /*
+ * YUV2RGB:601 SD mode(YUV[0:255],RGB[0:255]):
+ * R = Y + 1.402*V - 248
+ * G = Y - 0.344*U - 0.714*V + 135
+ * B = Y + 1.772*U - 227
+ */
+ {
+ 0x04, 0x00, 0x00, 0x00, 0x05, 0x9b, 0x02, 0xf8,
+ 0x04, 0x00, 0x11, 0x60, 0x12, 0xdb, 0x00, 0x87,
+ 0x04, 0x00, 0x07, 0x16, 0x00, 0x00, 0x02, 0xe3
+ },
+ /*
+ * YUV2RGB:709 HD mode(Y[16:235],UV[16:240],RGB[0:255]):
+ * R = 1.164*Y + 1.793*V - 248
+ * G = 1.164*Y - 0.213*U - 0.534*V + 77
+ * B = 1.164*Y + 2.115*U - 289
+ */
+ {
+ 0x04, 0xa7, 0x00, 0x00, 0x07, 0x2c, 0x02, 0xf8,
+ 0x04, 0xa7, 0x10, 0xda, 0x12, 0x22, 0x00, 0x4d,
+ 0x04, 0xa7, 0x08, 0x74, 0x00, 0x00, 0x03, 0x21
+ },
+
+ /*
+ * RGB2YUV:601 SD mode:
+ * Cb = -0.291G - 0.148R + 0.439B + 128
+ * Y = 0.504G + 0.257R + 0.098B + 16
+ * Cr = -0.368G + 0.439R - 0.071B + 128
+ */
+ {
+ 0x11, 0x5f, 0x01, 0x82, 0x10, 0x23, 0x00, 0x80,
+ 0x02, 0x1c, 0x00, 0xa1, 0x00, 0x36, 0x00, 0x1e,
+ 0x11, 0x29, 0x10, 0x59, 0x01, 0x82, 0x00, 0x80
+ },
+ /*
+ * RGB2YUV:709 HD mode:
+ * Cb = - 0.338G - 0.101R + 0.439B + 128
+ * Y = 0.614G + 0.183R + 0.062B + 16
+ * Cr = - 0.399G + 0.439R - 0.040B + 128
+ */
+ {
+ 0x11, 0x98, 0x01, 0xc1, 0x10, 0x28, 0x00, 0x80,
+ 0x02, 0x74, 0x00, 0xbb, 0x00, 0x3f, 0x00, 0x10,
+ 0x11, 0x5a, 0x10, 0x67, 0x01, 0xc1, 0x00, 0x80
+ },
+ /*
+ * RGB[0:255]2RGB[16:235]:
+ * R' = R x (235-16)/255 + 16;
+ * G' = G x (235-16)/255 + 16;
+ * B' = B x (235-16)/255 + 16;
+ */
+ {
+ 0x00, 0x00, 0x03, 0x6F, 0x00, 0x00, 0x00, 0x10,
+ 0x03, 0x6F, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10,
+ 0x00, 0x00, 0x00, 0x00, 0x03, 0x6F, 0x00, 0x10
+ },
+};
+
+static inline u8 hdmi_readb(struct inno_hdmi *hdmi, u16 offset)
+{
+ return readl_relaxed(hdmi->regs + (offset) * 0x04);
+}
+
+static inline void hdmi_writeb(struct inno_hdmi *hdmi, u16 offset, u32 val)
+{
+ writel_relaxed(val, hdmi->regs + (offset) * 0x04);
+}
+
+static inline void hdmi_modb(struct inno_hdmi *hdmi, u16 offset,
+ u32 msk, u32 val)
+{
+ u8 temp = hdmi_readb(hdmi, offset) & ~msk;
+
+ temp |= val & msk;
+ hdmi_writeb(hdmi, offset, temp);
+}
+
+static void inno_hdmi_i2c_init(struct inno_hdmi *hdmi)
+{
+ int ddc_bus_freq;
+
+ ddc_bus_freq = (hdmi->tmds_rate >> 2) / HDMI_SCL_RATE;
+
+ hdmi_writeb(hdmi, DDC_BUS_FREQ_L, ddc_bus_freq & 0xFF);
+ hdmi_writeb(hdmi, DDC_BUS_FREQ_H, (ddc_bus_freq >> 8) & 0xFF);
+
+ /* Clear the EDID interrupt flag and mute the interrupt */
+ hdmi_writeb(hdmi, HDMI_INTERRUPT_MASK1, 0);
+ hdmi_writeb(hdmi, HDMI_INTERRUPT_STATUS1, m_INT_EDID_READY);
+}
+
+static void inno_hdmi_sys_power(struct inno_hdmi *hdmi, bool enable)
+{
+ if (enable)
+ hdmi_modb(hdmi, HDMI_SYS_CTRL, m_POWER, v_PWR_ON);
+ else
+ hdmi_modb(hdmi, HDMI_SYS_CTRL, m_POWER, v_PWR_OFF);
+}
+
+static void inno_hdmi_set_pwr_mode(struct inno_hdmi *hdmi, int mode)
+{
+ switch (mode) {
+ case NORMAL:
+ inno_hdmi_sys_power(hdmi, false);
+
+ hdmi_writeb(hdmi, HDMI_PHY_PRE_EMPHASIS, 0x6f);
+ hdmi_writeb(hdmi, HDMI_PHY_DRIVER, 0xbb);
+
+ hdmi_writeb(hdmi, HDMI_PHY_SYS_CTL, 0x15);
+ hdmi_writeb(hdmi, HDMI_PHY_SYS_CTL, 0x14);
+ hdmi_writeb(hdmi, HDMI_PHY_SYS_CTL, 0x10);
+ hdmi_writeb(hdmi, HDMI_PHY_CHG_PWR, 0x0f);
+ hdmi_writeb(hdmi, HDMI_PHY_SYNC, 0x00);
+ hdmi_writeb(hdmi, HDMI_PHY_SYNC, 0x01);
+
+ inno_hdmi_sys_power(hdmi, true);
+ break;
+
+ case LOWER_PWR:
+ inno_hdmi_sys_power(hdmi, false);
+ hdmi_writeb(hdmi, HDMI_PHY_DRIVER, 0x00);
+ hdmi_writeb(hdmi, HDMI_PHY_PRE_EMPHASIS, 0x00);
+ hdmi_writeb(hdmi, HDMI_PHY_CHG_PWR, 0x00);
+ hdmi_writeb(hdmi, HDMI_PHY_SYS_CTL, 0x15);
+
+ break;
+
+ default:
+ dev_err(hdmi->dev, "Unknown power mode %d\n", mode);
+ }
+}
+
+static void inno_hdmi_reset(struct inno_hdmi *hdmi)
+{
+ u32 val;
+ u32 msk;
+
+ hdmi_modb(hdmi, HDMI_SYS_CTRL, m_RST_DIGITAL, v_NOT_RST_DIGITAL);
+ udelay(100);
+
+ hdmi_modb(hdmi, HDMI_SYS_CTRL, m_RST_ANALOG, v_NOT_RST_ANALOG);
+ udelay(100);
+
+ msk = m_REG_CLK_INV | m_REG_CLK_SOURCE | m_POWER | m_INT_POL;
+ val = v_REG_CLK_INV | v_REG_CLK_SOURCE_SYS | v_PWR_ON | v_INT_POL_HIGH;
+ hdmi_modb(hdmi, HDMI_SYS_CTRL, msk, val);
+
+ inno_hdmi_set_pwr_mode(hdmi, NORMAL);
+}
+
+static int inno_hdmi_upload_frame(struct inno_hdmi *hdmi, int setup_rc,
+ union hdmi_infoframe *frame, u32 frame_index,
+ u32 mask, u32 disable, u32 enable)
+{
+ if (mask)
+ hdmi_modb(hdmi, HDMI_PACKET_SEND_AUTO, mask, disable);
+
+ hdmi_writeb(hdmi, HDMI_CONTROL_PACKET_BUF_INDEX, frame_index);
+
+ if (setup_rc >= 0) {
+ u8 packed_frame[HDMI_MAXIMUM_INFO_FRAME_SIZE];
+ ssize_t rc, i;
+
+ rc = hdmi_infoframe_pack(frame, packed_frame,
+ sizeof(packed_frame));
+ if (rc < 0)
+ return rc;
+
+ for (i = 0; i < rc; i++)
+ hdmi_writeb(hdmi, HDMI_CONTROL_PACKET_ADDR + i,
+ packed_frame[i]);
+
+ if (mask)
+ hdmi_modb(hdmi, HDMI_PACKET_SEND_AUTO, mask, enable);
+ }
+
+ return setup_rc;
+}
+
+static int inno_hdmi_config_video_vsi(struct inno_hdmi *hdmi,
+ struct drm_display_mode *mode)
+{
+ union hdmi_infoframe frame;
+ int rc;
+
+ rc = drm_hdmi_vendor_infoframe_from_display_mode(&frame.vendor.hdmi,
+ mode);
+
+ return inno_hdmi_upload_frame(hdmi, rc, &frame, INFOFRAME_VSI,
+ m_PACKET_VSI_EN, v_PACKET_VSI_EN(0), v_PACKET_VSI_EN(1));
+}
+
+static int inno_hdmi_config_video_avi(struct inno_hdmi *hdmi,
+ struct drm_display_mode *mode)
+{
+ union hdmi_infoframe frame;
+ int rc;
+
+ rc = drm_hdmi_avi_infoframe_from_display_mode(&frame.avi, mode);
+
+ if (hdmi->hdmi_data.enc_out_format == HDMI_COLORSPACE_YUV444)
+ frame.avi.colorspace = HDMI_COLORSPACE_YUV444;
+ else if (hdmi->hdmi_data.enc_out_format == HDMI_COLORSPACE_YUV422)
+ frame.avi.colorspace = HDMI_COLORSPACE_YUV422;
+ else
+ frame.avi.colorspace = HDMI_COLORSPACE_RGB;
+
+ return inno_hdmi_upload_frame(hdmi, rc, &frame, INFOFRAME_AVI, 0, 0, 0);
+}
+
+static int inno_hdmi_config_video_csc(struct inno_hdmi *hdmi)
+{
+ struct hdmi_data_info *data = &hdmi->hdmi_data;
+ int c0_c2_change = 0;
+ int csc_enable = 0;
+ int csc_mode = 0;
+ int auto_csc = 0;
+ int value;
+ int i;
+
+ /* Input video mode is SDR RGB24bit, data enable signal from external */
+ hdmi_writeb(hdmi, HDMI_VIDEO_CONTRL1, v_DE_EXTERNAL |
+ v_VIDEO_INPUT_FORMAT(VIDEO_INPUT_SDR_RGB444));
+
+ /* Input color hardcode to RGB, and output color hardcode to RGB888 */
+ value = v_VIDEO_INPUT_BITS(VIDEO_INPUT_8BITS) |
+ v_VIDEO_OUTPUT_COLOR(0) |
+ v_VIDEO_INPUT_CSP(0);
+ hdmi_writeb(hdmi, HDMI_VIDEO_CONTRL2, value);
+
+ if (data->enc_in_format == data->enc_out_format) {
+ if ((data->enc_in_format == HDMI_COLORSPACE_RGB) ||
+ (data->enc_in_format >= HDMI_COLORSPACE_YUV444)) {
+ value = v_SOF_DISABLE | v_COLOR_DEPTH_NOT_INDICATED(1);
+ hdmi_writeb(hdmi, HDMI_VIDEO_CONTRL3, value);
+
+ hdmi_modb(hdmi, HDMI_VIDEO_CONTRL,
+ m_VIDEO_AUTO_CSC | m_VIDEO_C0_C2_SWAP,
+ v_VIDEO_AUTO_CSC(AUTO_CSC_DISABLE) |
+ v_VIDEO_C0_C2_SWAP(C0_C2_CHANGE_DISABLE));
+ return 0;
+ }
+ }
+
+ if (data->colorimetry == HDMI_COLORIMETRY_ITU_601) {
+ if ((data->enc_in_format == HDMI_COLORSPACE_RGB) &&
+ (data->enc_out_format == HDMI_COLORSPACE_YUV444)) {
+ csc_mode = CSC_RGB_0_255_TO_ITU601_16_235_8BIT;
+ auto_csc = AUTO_CSC_DISABLE;
+ c0_c2_change = C0_C2_CHANGE_DISABLE;
+ csc_enable = v_CSC_ENABLE;
+ } else if ((data->enc_in_format == HDMI_COLORSPACE_YUV444) &&
+ (data->enc_out_format == HDMI_COLORSPACE_RGB)) {
+ csc_mode = CSC_ITU601_16_235_TO_RGB_0_255_8BIT;
+ auto_csc = AUTO_CSC_ENABLE;
+ c0_c2_change = C0_C2_CHANGE_DISABLE;
+ csc_enable = v_CSC_DISABLE;
+ }
+ } else {
+ if ((data->enc_in_format == HDMI_COLORSPACE_RGB) &&
+ (data->enc_out_format == HDMI_COLORSPACE_YUV444)) {
+ csc_mode = CSC_RGB_0_255_TO_ITU709_16_235_8BIT;
+ auto_csc = AUTO_CSC_DISABLE;
+ c0_c2_change = C0_C2_CHANGE_DISABLE;
+ csc_enable = v_CSC_ENABLE;
+ } else if ((data->enc_in_format == HDMI_COLORSPACE_YUV444) &&
+ (data->enc_out_format == HDMI_COLORSPACE_RGB)) {
+ csc_mode = CSC_ITU709_16_235_TO_RGB_0_255_8BIT;
+ auto_csc = AUTO_CSC_ENABLE;
+ c0_c2_change = C0_C2_CHANGE_DISABLE;
+ csc_enable = v_CSC_DISABLE;
+ }
+ }
+
+ for (i = 0; i < 24; i++)
+ hdmi_writeb(hdmi, HDMI_VIDEO_CSC_COEF + i,
+ coeff_csc[csc_mode][i]);
+
+ value = v_SOF_DISABLE | csc_enable | v_COLOR_DEPTH_NOT_INDICATED(1);
+ hdmi_writeb(hdmi, HDMI_VIDEO_CONTRL3, value);
+ hdmi_modb(hdmi, HDMI_VIDEO_CONTRL, m_VIDEO_AUTO_CSC |
+ m_VIDEO_C0_C2_SWAP, v_VIDEO_AUTO_CSC(auto_csc) |
+ v_VIDEO_C0_C2_SWAP(c0_c2_change));
+
+ return 0;
+}
+
+static int inno_hdmi_config_video_timing(struct inno_hdmi *hdmi,
+ struct drm_display_mode *mode)
+{
+ int value;
+
+ /* Set detail external video timing polarity and interlace mode */
+ value = v_EXTERANL_VIDEO(1);
+ value |= mode->flags & DRM_MODE_FLAG_PHSYNC ?
+ v_HSYNC_POLARITY(1) : v_HSYNC_POLARITY(0);
+ value |= mode->flags & DRM_MODE_FLAG_PVSYNC ?
+ v_VSYNC_POLARITY(1) : v_VSYNC_POLARITY(0);
+ value |= mode->flags & DRM_MODE_FLAG_INTERLACE ?
+ v_INETLACE(1) : v_INETLACE(0);
+ hdmi_writeb(hdmi, HDMI_VIDEO_TIMING_CTL, value);
+
+ /* Set detail external video timing */
+ value = mode->htotal;
+ hdmi_writeb(hdmi, HDMI_VIDEO_EXT_HTOTAL_L, value & 0xFF);
+ hdmi_writeb(hdmi, HDMI_VIDEO_EXT_HTOTAL_H, (value >> 8) & 0xFF);
+
+ value = mode->htotal - mode->hdisplay;
+ hdmi_writeb(hdmi, HDMI_VIDEO_EXT_HBLANK_L, value & 0xFF);
+ hdmi_writeb(hdmi, HDMI_VIDEO_EXT_HBLANK_H, (value >> 8) & 0xFF);
+
+ value = mode->hsync_start - mode->hdisplay;
+ hdmi_writeb(hdmi, HDMI_VIDEO_EXT_HDELAY_L, value & 0xFF);
+ hdmi_writeb(hdmi, HDMI_VIDEO_EXT_HDELAY_H, (value >> 8) & 0xFF);
+
+ value = mode->hsync_end - mode->hsync_start;
+ hdmi_writeb(hdmi, HDMI_VIDEO_EXT_HDURATION_L, value & 0xFF);
+ hdmi_writeb(hdmi, HDMI_VIDEO_EXT_HDURATION_H, (value >> 8) & 0xFF);
+
+ value = mode->vtotal;
+ hdmi_writeb(hdmi, HDMI_VIDEO_EXT_VTOTAL_L, value & 0xFF);
+ hdmi_writeb(hdmi, HDMI_VIDEO_EXT_VTOTAL_H, (value >> 8) & 0xFF);
+
+ value = mode->vtotal - mode->vdisplay;
+ hdmi_writeb(hdmi, HDMI_VIDEO_EXT_VBLANK, value & 0xFF);
+
+ value = mode->vsync_start - mode->vdisplay;
+ hdmi_writeb(hdmi, HDMI_VIDEO_EXT_VDELAY, value & 0xFF);
+
+ value = mode->vsync_end - mode->vsync_start;
+ hdmi_writeb(hdmi, HDMI_VIDEO_EXT_VDURATION, value & 0xFF);
+
+ hdmi_writeb(hdmi, HDMI_PHY_PRE_DIV_RATIO, 0x1e);
+ hdmi_writeb(hdmi, HDMI_PHY_FEEDBACK_DIV_RATIO_LOW, 0x2c);
+ hdmi_writeb(hdmi, HDMI_PHY_FEEDBACK_DIV_RATIO_HIGH, 0x01);
+
+ return 0;
+}
+
+static int inno_hdmi_setup(struct inno_hdmi *hdmi,
+ struct drm_display_mode *mode)
+{
+ hdmi->hdmi_data.vic = drm_match_cea_mode(mode);
+
+ hdmi->hdmi_data.enc_in_format = HDMI_COLORSPACE_RGB;
+ hdmi->hdmi_data.enc_out_format = HDMI_COLORSPACE_RGB;
+
+ if ((hdmi->hdmi_data.vic == 6) || (hdmi->hdmi_data.vic == 7) ||
+ (hdmi->hdmi_data.vic == 21) || (hdmi->hdmi_data.vic == 22) ||
+ (hdmi->hdmi_data.vic == 2) || (hdmi->hdmi_data.vic == 3) ||
+ (hdmi->hdmi_data.vic == 17) || (hdmi->hdmi_data.vic == 18))
+ hdmi->hdmi_data.colorimetry = HDMI_COLORIMETRY_ITU_601;
+ else
+ hdmi->hdmi_data.colorimetry = HDMI_COLORIMETRY_ITU_709;
+
+ /* Mute video and audio output */
+ hdmi_modb(hdmi, HDMI_AV_MUTE, m_AUDIO_MUTE | m_VIDEO_BLACK,
+ v_AUDIO_MUTE(1) | v_VIDEO_MUTE(1));
+
+ /* Set HDMI Mode */
+ hdmi_writeb(hdmi, HDMI_HDCP_CTRL,
+ v_HDMI_DVI(hdmi->hdmi_data.sink_is_hdmi));
+
+ inno_hdmi_config_video_timing(hdmi, mode);
+
+ inno_hdmi_config_video_csc(hdmi);
+
+ if (hdmi->hdmi_data.sink_is_hdmi) {
+ inno_hdmi_config_video_avi(hdmi, mode);
+ inno_hdmi_config_video_vsi(hdmi, mode);
+ }
+
+ /*
+ * When IP controller have configured to an accurate video
+ * timing, then the TMDS clock source would be switched to
+ * DCLK_LCDC, so we need to init the TMDS rate to mode pixel
+ * clock rate, and reconfigure the DDC clock.
+ */
+ hdmi->tmds_rate = mode->clock * 1000;
+ inno_hdmi_i2c_init(hdmi);
+
+ /* Unmute video and audio output */
+ hdmi_modb(hdmi, HDMI_AV_MUTE, m_AUDIO_MUTE | m_VIDEO_BLACK,
+ v_AUDIO_MUTE(0) | v_VIDEO_MUTE(0));
+
+ return 0;
+}
+
+static void inno_hdmi_encoder_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adj_mode)
+{
+ struct inno_hdmi *hdmi = to_inno_hdmi(encoder);
+
+ inno_hdmi_setup(hdmi, adj_mode);
+
+ /* Store the display mode for plugin/DPMS poweron events */
+ memcpy(&hdmi->previous_mode, adj_mode, sizeof(hdmi->previous_mode));
+}
+
+static void inno_hdmi_encoder_enable(struct drm_encoder *encoder)
+{
+ struct inno_hdmi *hdmi = to_inno_hdmi(encoder);
+
+ rockchip_drm_crtc_mode_config(encoder->crtc, DRM_MODE_CONNECTOR_HDMIA,
+ ROCKCHIP_OUT_MODE_P888);
+
+ inno_hdmi_set_pwr_mode(hdmi, NORMAL);
+}
+
+static void inno_hdmi_encoder_disable(struct drm_encoder *encoder)
+{
+ struct inno_hdmi *hdmi = to_inno_hdmi(encoder);
+
+ inno_hdmi_set_pwr_mode(hdmi, LOWER_PWR);
+}
+
+static bool inno_hdmi_encoder_mode_fixup(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adj_mode)
+{
+ return true;
+}
+
+static struct drm_encoder_helper_funcs inno_hdmi_encoder_helper_funcs = {
+ .enable = inno_hdmi_encoder_enable,
+ .disable = inno_hdmi_encoder_disable,
+ .mode_fixup = inno_hdmi_encoder_mode_fixup,
+ .mode_set = inno_hdmi_encoder_mode_set,
+};
+
+static struct drm_encoder_funcs inno_hdmi_encoder_funcs = {
+ .destroy = drm_encoder_cleanup,
+};
+
+static enum drm_connector_status
+inno_hdmi_connector_detect(struct drm_connector *connector, bool force)
+{
+ struct inno_hdmi *hdmi = to_inno_hdmi(connector);
+
+ return (hdmi_readb(hdmi, HDMI_STATUS) & m_HOTPLUG) ?
+ connector_status_connected : connector_status_disconnected;
+}
+
+static int inno_hdmi_connector_get_modes(struct drm_connector *connector)
+{
+ struct inno_hdmi *hdmi = to_inno_hdmi(connector);
+ struct edid *edid;
+ int ret = 0;
+
+ if (!hdmi->ddc)
+ return 0;
+
+ edid = drm_get_edid(connector, hdmi->ddc);
+ if (edid) {
+ hdmi->hdmi_data.sink_is_hdmi = drm_detect_hdmi_monitor(edid);
+ hdmi->hdmi_data.sink_has_audio = drm_detect_monitor_audio(edid);
+ drm_mode_connector_update_edid_property(connector, edid);
+ ret = drm_add_edid_modes(connector, edid);
+ kfree(edid);
+ }
+
+ return ret;
+}
+
+static enum drm_mode_status
+inno_hdmi_connector_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ return MODE_OK;
+}
+
+static struct drm_encoder *
+inno_hdmi_connector_best_encoder(struct drm_connector *connector)
+{
+ struct inno_hdmi *hdmi = to_inno_hdmi(connector);
+
+ return &hdmi->encoder;
+}
+
+static int
+inno_hdmi_probe_single_connector_modes(struct drm_connector *connector,
+ uint32_t maxX, uint32_t maxY)
+{
+ return drm_helper_probe_single_connector_modes(connector, 1920, 1080);
+}
+
+static void inno_hdmi_connector_destroy(struct drm_connector *connector)
+{
+ drm_connector_unregister(connector);
+ drm_connector_cleanup(connector);
+}
+
+static struct drm_connector_funcs inno_hdmi_connector_funcs = {
+ .dpms = drm_atomic_helper_connector_dpms,
+ .fill_modes = inno_hdmi_probe_single_connector_modes,
+ .detect = inno_hdmi_connector_detect,
+ .destroy = inno_hdmi_connector_destroy,
+ .reset = drm_atomic_helper_connector_reset,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+static struct drm_connector_helper_funcs inno_hdmi_connector_helper_funcs = {
+ .get_modes = inno_hdmi_connector_get_modes,
+ .mode_valid = inno_hdmi_connector_mode_valid,
+ .best_encoder = inno_hdmi_connector_best_encoder,
+};
+
+static int inno_hdmi_register(struct drm_device *drm, struct inno_hdmi *hdmi)
+{
+ struct drm_encoder *encoder = &hdmi->encoder;
+ struct device *dev = hdmi->dev;
+
+ encoder->possible_crtcs = drm_of_find_possible_crtcs(drm, dev->of_node);
+
+ /*
+ * If we failed to find the CRTC(s) which this encoder is
+ * supposed to be connected to, it's because the CRTC has
+ * not been registered yet. Defer probing, and hope that
+ * the required CRTC is added later.
+ */
+ if (encoder->possible_crtcs == 0)
+ return -EPROBE_DEFER;
+
+ drm_encoder_helper_add(encoder, &inno_hdmi_encoder_helper_funcs);
+ drm_encoder_init(drm, encoder, &inno_hdmi_encoder_funcs,
+ DRM_MODE_ENCODER_TMDS, NULL);
+
+ hdmi->connector.polled = DRM_CONNECTOR_POLL_HPD;
+
+ drm_connector_helper_add(&hdmi->connector,
+ &inno_hdmi_connector_helper_funcs);
+ drm_connector_init(drm, &hdmi->connector, &inno_hdmi_connector_funcs,
+ DRM_MODE_CONNECTOR_HDMIA);
+
+ drm_mode_connector_attach_encoder(&hdmi->connector, encoder);
+
+ return 0;
+}
+
+static irqreturn_t inno_hdmi_i2c_irq(struct inno_hdmi *hdmi)
+{
+ struct inno_hdmi_i2c *i2c = hdmi->i2c;
+ u8 stat;
+
+ stat = hdmi_readb(hdmi, HDMI_INTERRUPT_STATUS1);
+ if (!(stat & m_INT_EDID_READY))
+ return IRQ_NONE;
+
+ /* Clear HDMI EDID interrupt flag */
+ hdmi_writeb(hdmi, HDMI_INTERRUPT_STATUS1, m_INT_EDID_READY);
+
+ complete(&i2c->cmp);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t inno_hdmi_hardirq(int irq, void *dev_id)
+{
+ struct inno_hdmi *hdmi = dev_id;
+ irqreturn_t ret = IRQ_NONE;
+ u8 interrupt;
+
+ if (hdmi->i2c)
+ ret = inno_hdmi_i2c_irq(hdmi);
+
+ interrupt = hdmi_readb(hdmi, HDMI_STATUS);
+ if (interrupt & m_INT_HOTPLUG) {
+ hdmi_modb(hdmi, HDMI_STATUS, m_INT_HOTPLUG, m_INT_HOTPLUG);
+ ret = IRQ_WAKE_THREAD;
+ }
+
+ return ret;
+}
+
+static irqreturn_t inno_hdmi_irq(int irq, void *dev_id)
+{
+ struct inno_hdmi *hdmi = dev_id;
+
+ drm_helper_hpd_irq_event(hdmi->connector.dev);
+
+ return IRQ_HANDLED;
+}
+
+static int inno_hdmi_i2c_read(struct inno_hdmi *hdmi, struct i2c_msg *msgs)
+{
+ int length = msgs->len;
+ u8 *buf = msgs->buf;
+ int ret;
+
+ ret = wait_for_completion_timeout(&hdmi->i2c->cmp, HZ / 10);
+ if (!ret)
+ return -EAGAIN;
+
+ while (length--)
+ *buf++ = hdmi_readb(hdmi, HDMI_EDID_FIFO_ADDR);
+
+ return 0;
+}
+
+static int inno_hdmi_i2c_write(struct inno_hdmi *hdmi, struct i2c_msg *msgs)
+{
+ /*
+ * The DDC module only support read EDID message, so
+ * we assume that each word write to this i2c adapter
+ * should be the offset of EDID word address.
+ */
+ if ((msgs->len != 1) ||
+ ((msgs->addr != DDC_ADDR) && (msgs->addr != DDC_SEGMENT_ADDR)))
+ return -EINVAL;
+
+ reinit_completion(&hdmi->i2c->cmp);
+
+ if (msgs->addr == DDC_SEGMENT_ADDR)
+ hdmi->i2c->segment_addr = msgs->buf[0];
+ if (msgs->addr == DDC_ADDR)
+ hdmi->i2c->ddc_addr = msgs->buf[0];
+
+ /* Set edid fifo first addr */
+ hdmi_writeb(hdmi, HDMI_EDID_FIFO_OFFSET, 0x00);
+
+ /* Set edid word address 0x00/0x80 */
+ hdmi_writeb(hdmi, HDMI_EDID_WORD_ADDR, hdmi->i2c->ddc_addr);
+
+ /* Set edid segment pointer */
+ hdmi_writeb(hdmi, HDMI_EDID_SEGMENT_POINTER, hdmi->i2c->segment_addr);
+
+ return 0;
+}
+
+static int inno_hdmi_i2c_xfer(struct i2c_adapter *adap,
+ struct i2c_msg *msgs, int num)
+{
+ struct inno_hdmi *hdmi = i2c_get_adapdata(adap);
+ struct inno_hdmi_i2c *i2c = hdmi->i2c;
+ int i, ret = 0;
+
+ mutex_lock(&i2c->lock);
+
+ /* Clear the EDID interrupt flag and unmute the interrupt */
+ hdmi_writeb(hdmi, HDMI_INTERRUPT_MASK1, m_INT_EDID_READY);
+ hdmi_writeb(hdmi, HDMI_INTERRUPT_STATUS1, m_INT_EDID_READY);
+
+ for (i = 0; i < num; i++) {
+ dev_dbg(hdmi->dev, "xfer: num: %d/%d, len: %d, flags: %#x\n",
+ i + 1, num, msgs[i].len, msgs[i].flags);
+
+ if (msgs[i].flags & I2C_M_RD)
+ ret = inno_hdmi_i2c_read(hdmi, &msgs[i]);
+ else
+ ret = inno_hdmi_i2c_write(hdmi, &msgs[i]);
+
+ if (ret < 0)
+ break;
+ }
+
+ if (!ret)
+ ret = num;
+
+ /* Mute HDMI EDID interrupt */
+ hdmi_writeb(hdmi, HDMI_INTERRUPT_MASK1, 0);
+
+ mutex_unlock(&i2c->lock);
+
+ return ret;
+}
+
+static u32 inno_hdmi_i2c_func(struct i2c_adapter *adapter)
+{
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+}
+
+static const struct i2c_algorithm inno_hdmi_algorithm = {
+ .master_xfer = inno_hdmi_i2c_xfer,
+ .functionality = inno_hdmi_i2c_func,
+};
+
+static struct i2c_adapter *inno_hdmi_i2c_adapter(struct inno_hdmi *hdmi)
+{
+ struct i2c_adapter *adap;
+ struct inno_hdmi_i2c *i2c;
+ int ret;
+
+ i2c = devm_kzalloc(hdmi->dev, sizeof(*i2c), GFP_KERNEL);
+ if (!i2c)
+ return ERR_PTR(-ENOMEM);
+
+ mutex_init(&i2c->lock);
+ init_completion(&i2c->cmp);
+
+ adap = &i2c->adap;
+ adap->class = I2C_CLASS_DDC;
+ adap->owner = THIS_MODULE;
+ adap->dev.parent = hdmi->dev;
+ adap->dev.of_node = hdmi->dev->of_node;
+ adap->algo = &inno_hdmi_algorithm;
+ strlcpy(adap->name, "Inno HDMI", sizeof(adap->name));
+ i2c_set_adapdata(adap, hdmi);
+
+ ret = i2c_add_adapter(adap);
+ if (ret) {
+ dev_warn(hdmi->dev, "cannot add %s I2C adapter\n", adap->name);
+ devm_kfree(hdmi->dev, i2c);
+ return ERR_PTR(ret);
+ }
+
+ hdmi->i2c = i2c;
+
+ dev_info(hdmi->dev, "registered %s I2C bus driver\n", adap->name);
+
+ return adap;
+}
+
+static int inno_hdmi_bind(struct device *dev, struct device *master,
+ void *data)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct drm_device *drm = data;
+ struct inno_hdmi *hdmi;
+ struct resource *iores;
+ int irq;
+ int ret;
+
+ hdmi = devm_kzalloc(dev, sizeof(*hdmi), GFP_KERNEL);
+ if (!hdmi)
+ return -ENOMEM;
+
+ hdmi->dev = dev;
+ hdmi->drm_dev = drm;
+
+ iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!iores)
+ return -ENXIO;
+
+ hdmi->regs = devm_ioremap_resource(dev, iores);
+ if (IS_ERR(hdmi->regs))
+ return PTR_ERR(hdmi->regs);
+
+ hdmi->pclk = devm_clk_get(hdmi->dev, "pclk");
+ if (IS_ERR(hdmi->pclk)) {
+ dev_err(hdmi->dev, "Unable to get HDMI pclk clk\n");
+ return PTR_ERR(hdmi->pclk);
+ }
+
+ ret = clk_prepare_enable(hdmi->pclk);
+ if (ret) {
+ dev_err(hdmi->dev, "Cannot enable HDMI pclk clock: %d\n", ret);
+ return ret;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ inno_hdmi_reset(hdmi);
+
+ hdmi->ddc = inno_hdmi_i2c_adapter(hdmi);
+ if (IS_ERR(hdmi->ddc)) {
+ hdmi->ddc = NULL;
+ return PTR_ERR(hdmi->ddc);
+ }
+
+ /*
+ * When IP controller haven't configured to an accurate video
+ * timing, then the TMDS clock source would be switched to
+ * PCLK_HDMI, so we need to init the TMDS rate to PCLK rate,
+ * and reconfigure the DDC clock.
+ */
+ hdmi->tmds_rate = clk_get_rate(hdmi->pclk);
+ inno_hdmi_i2c_init(hdmi);
+
+ ret = inno_hdmi_register(drm, hdmi);
+ if (ret)
+ return ret;
+
+ dev_set_drvdata(dev, hdmi);
+
+ /* Unmute hotplug interrupt */
+ hdmi_modb(hdmi, HDMI_STATUS, m_MASK_INT_HOTPLUG, v_MASK_INT_HOTPLUG(1));
+
+ ret = devm_request_threaded_irq(dev, irq, inno_hdmi_hardirq,
+ inno_hdmi_irq, IRQF_SHARED,
+ dev_name(dev), hdmi);
+
+ return ret;
+}
+
+static void inno_hdmi_unbind(struct device *dev, struct device *master,
+ void *data)
+{
+ struct inno_hdmi *hdmi = dev_get_drvdata(dev);
+
+ hdmi->connector.funcs->destroy(&hdmi->connector);
+ hdmi->encoder.funcs->destroy(&hdmi->encoder);
+
+ clk_disable_unprepare(hdmi->pclk);
+ i2c_put_adapter(hdmi->ddc);
+}
+
+static const struct component_ops inno_hdmi_ops = {
+ .bind = inno_hdmi_bind,
+ .unbind = inno_hdmi_unbind,
+};
+
+static int inno_hdmi_probe(struct platform_device *pdev)
+{
+ return component_add(&pdev->dev, &inno_hdmi_ops);
+}
+
+static int inno_hdmi_remove(struct platform_device *pdev)
+{
+ component_del(&pdev->dev, &inno_hdmi_ops);
+
+ return 0;
+}
+
+static const struct of_device_id inno_hdmi_dt_ids[] = {
+ { .compatible = "rockchip,rk3036-inno-hdmi",
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, inno_hdmi_dt_ids);
+
+static struct platform_driver inno_hdmi_driver = {
+ .probe = inno_hdmi_probe,
+ .remove = inno_hdmi_remove,
+ .driver = {
+ .name = "innohdmi-rockchip",
+ .of_match_table = inno_hdmi_dt_ids,
+ },
+};
+
+module_platform_driver(inno_hdmi_driver);
+
+MODULE_AUTHOR("Zheng Yang <zhengyang@rock-chips.com>");
+MODULE_AUTHOR("Yakir Yang <ykk@rock-chips.com>");
+MODULE_DESCRIPTION("Rockchip Specific INNO-HDMI Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:innohdmi-rockchip");
diff --git a/drivers/gpu/drm/rockchip/inno_hdmi.h b/drivers/gpu/drm/rockchip/inno_hdmi.h
new file mode 100644
index 000000000000..aa7c415f8cc1
--- /dev/null
+++ b/drivers/gpu/drm/rockchip/inno_hdmi.h
@@ -0,0 +1,362 @@
+/*
+ * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
+ * Zheng Yang <zhengyang@rock-chips.com>
+ * Yakir Yang <ykk@rock-chips.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __INNO_HDMI_H__
+#define __INNO_HDMI_H__
+
+#define DDC_SEGMENT_ADDR 0x30
+
+enum PWR_MODE {
+ NORMAL,
+ LOWER_PWR,
+};
+
+#define HDMI_SCL_RATE (100*1000)
+#define DDC_BUS_FREQ_L 0x4b
+#define DDC_BUS_FREQ_H 0x4c
+
+#define HDMI_SYS_CTRL 0x00
+#define m_RST_ANALOG (1 << 6)
+#define v_RST_ANALOG (0 << 6)
+#define v_NOT_RST_ANALOG (1 << 6)
+#define m_RST_DIGITAL (1 << 5)
+#define v_RST_DIGITAL (0 << 5)
+#define v_NOT_RST_DIGITAL (1 << 5)
+#define m_REG_CLK_INV (1 << 4)
+#define v_REG_CLK_NOT_INV (0 << 4)
+#define v_REG_CLK_INV (1 << 4)
+#define m_VCLK_INV (1 << 3)
+#define v_VCLK_NOT_INV (0 << 3)
+#define v_VCLK_INV (1 << 3)
+#define m_REG_CLK_SOURCE (1 << 2)
+#define v_REG_CLK_SOURCE_TMDS (0 << 2)
+#define v_REG_CLK_SOURCE_SYS (1 << 2)
+#define m_POWER (1 << 1)
+#define v_PWR_ON (0 << 1)
+#define v_PWR_OFF (1 << 1)
+#define m_INT_POL (1 << 0)
+#define v_INT_POL_HIGH 1
+#define v_INT_POL_LOW 0
+
+#define HDMI_VIDEO_CONTRL1 0x01
+#define m_VIDEO_INPUT_FORMAT (7 << 1)
+#define m_DE_SOURCE (1 << 0)
+#define v_VIDEO_INPUT_FORMAT(n) (n << 1)
+#define v_DE_EXTERNAL 1
+#define v_DE_INTERNAL 0
+enum {
+ VIDEO_INPUT_SDR_RGB444 = 0,
+ VIDEO_INPUT_DDR_RGB444 = 5,
+ VIDEO_INPUT_DDR_YCBCR422 = 6
+};
+
+#define HDMI_VIDEO_CONTRL2 0x02
+#define m_VIDEO_OUTPUT_COLOR (3 << 6)
+#define m_VIDEO_INPUT_BITS (3 << 4)
+#define m_VIDEO_INPUT_CSP (1 << 0)
+#define v_VIDEO_OUTPUT_COLOR(n) (((n) & 0x3) << 6)
+#define v_VIDEO_INPUT_BITS(n) (n << 4)
+#define v_VIDEO_INPUT_CSP(n) (n << 0)
+enum {
+ VIDEO_INPUT_12BITS = 0,
+ VIDEO_INPUT_10BITS = 1,
+ VIDEO_INPUT_REVERT = 2,
+ VIDEO_INPUT_8BITS = 3,
+};
+
+#define HDMI_VIDEO_CONTRL 0x03
+#define m_VIDEO_AUTO_CSC (1 << 7)
+#define v_VIDEO_AUTO_CSC(n) (n << 7)
+#define m_VIDEO_C0_C2_SWAP (1 << 0)
+#define v_VIDEO_C0_C2_SWAP(n) (n << 0)
+enum {
+ C0_C2_CHANGE_ENABLE = 0,
+ C0_C2_CHANGE_DISABLE = 1,
+ AUTO_CSC_DISABLE = 0,
+ AUTO_CSC_ENABLE = 1,
+};
+
+#define HDMI_VIDEO_CONTRL3 0x04
+#define m_COLOR_DEPTH_NOT_INDICATED (1 << 4)
+#define m_SOF (1 << 3)
+#define m_COLOR_RANGE (1 << 2)
+#define m_CSC (1 << 0)
+#define v_COLOR_DEPTH_NOT_INDICATED(n) ((n) << 4)
+#define v_SOF_ENABLE (0 << 3)
+#define v_SOF_DISABLE (1 << 3)
+#define v_COLOR_RANGE_FULL (1 << 2)
+#define v_COLOR_RANGE_LIMITED (0 << 2)
+#define v_CSC_ENABLE 1
+#define v_CSC_DISABLE 0
+
+#define HDMI_AV_MUTE 0x05
+#define m_AVMUTE_CLEAR (1 << 7)
+#define m_AVMUTE_ENABLE (1 << 6)
+#define m_AUDIO_MUTE (1 << 1)
+#define m_VIDEO_BLACK (1 << 0)
+#define v_AVMUTE_CLEAR(n) (n << 7)
+#define v_AVMUTE_ENABLE(n) (n << 6)
+#define v_AUDIO_MUTE(n) (n << 1)
+#define v_VIDEO_MUTE(n) (n << 0)
+
+#define HDMI_VIDEO_TIMING_CTL 0x08
+#define v_HSYNC_POLARITY(n) (n << 3)
+#define v_VSYNC_POLARITY(n) (n << 2)
+#define v_INETLACE(n) (n << 1)
+#define v_EXTERANL_VIDEO(n) (n << 0)
+
+#define HDMI_VIDEO_EXT_HTOTAL_L 0x09
+#define HDMI_VIDEO_EXT_HTOTAL_H 0x0a
+#define HDMI_VIDEO_EXT_HBLANK_L 0x0b
+#define HDMI_VIDEO_EXT_HBLANK_H 0x0c
+#define HDMI_VIDEO_EXT_HDELAY_L 0x0d
+#define HDMI_VIDEO_EXT_HDELAY_H 0x0e
+#define HDMI_VIDEO_EXT_HDURATION_L 0x0f
+#define HDMI_VIDEO_EXT_HDURATION_H 0x10
+#define HDMI_VIDEO_EXT_VTOTAL_L 0x11
+#define HDMI_VIDEO_EXT_VTOTAL_H 0x12
+#define HDMI_VIDEO_EXT_VBLANK 0x13
+#define HDMI_VIDEO_EXT_VDELAY 0x14
+#define HDMI_VIDEO_EXT_VDURATION 0x15
+
+#define HDMI_VIDEO_CSC_COEF 0x18
+
+#define HDMI_AUDIO_CTRL1 0x35
+enum {
+ CTS_SOURCE_INTERNAL = 0,
+ CTS_SOURCE_EXTERNAL = 1,
+};
+#define v_CTS_SOURCE(n) (n << 7)
+
+enum {
+ DOWNSAMPLE_DISABLE = 0,
+ DOWNSAMPLE_1_2 = 1,
+ DOWNSAMPLE_1_4 = 2,
+};
+#define v_DOWN_SAMPLE(n) (n << 5)
+
+enum {
+ AUDIO_SOURCE_IIS = 0,
+ AUDIO_SOURCE_SPDIF = 1,
+};
+#define v_AUDIO_SOURCE(n) (n << 3)
+
+#define v_MCLK_ENABLE(n) (n << 2)
+enum {
+ MCLK_128FS = 0,
+ MCLK_256FS = 1,
+ MCLK_384FS = 2,
+ MCLK_512FS = 3,
+};
+#define v_MCLK_RATIO(n) (n)
+
+#define AUDIO_SAMPLE_RATE 0x37
+enum {
+ AUDIO_32K = 0x3,
+ AUDIO_441K = 0x0,
+ AUDIO_48K = 0x2,
+ AUDIO_882K = 0x8,
+ AUDIO_96K = 0xa,
+ AUDIO_1764K = 0xc,
+ AUDIO_192K = 0xe,
+};
+
+#define AUDIO_I2S_MODE 0x38
+enum {
+ I2S_CHANNEL_1_2 = 1,
+ I2S_CHANNEL_3_4 = 3,
+ I2S_CHANNEL_5_6 = 7,
+ I2S_CHANNEL_7_8 = 0xf
+};
+#define v_I2S_CHANNEL(n) ((n) << 2)
+enum {
+ I2S_STANDARD = 0,
+ I2S_LEFT_JUSTIFIED = 1,
+ I2S_RIGHT_JUSTIFIED = 2,
+};
+#define v_I2S_MODE(n) (n)
+
+#define AUDIO_I2S_MAP 0x39
+#define AUDIO_I2S_SWAPS_SPDIF 0x3a
+#define v_SPIDF_FREQ(n) (n)
+
+#define N_32K 0x1000
+#define N_441K 0x1880
+#define N_882K 0x3100
+#define N_1764K 0x6200
+#define N_48K 0x1800
+#define N_96K 0x3000
+#define N_192K 0x6000
+
+#define HDMI_AUDIO_CHANNEL_STATUS 0x3e
+#define m_AUDIO_STATUS_NLPCM (1 << 7)
+#define m_AUDIO_STATUS_USE (1 << 6)
+#define m_AUDIO_STATUS_COPYRIGHT (1 << 5)
+#define m_AUDIO_STATUS_ADDITION (3 << 2)
+#define m_AUDIO_STATUS_CLK_ACCURACY (2 << 0)
+#define v_AUDIO_STATUS_NLPCM(n) ((n & 1) << 7)
+#define AUDIO_N_H 0x3f
+#define AUDIO_N_M 0x40
+#define AUDIO_N_L 0x41
+
+#define HDMI_AUDIO_CTS_H 0x45
+#define HDMI_AUDIO_CTS_M 0x46
+#define HDMI_AUDIO_CTS_L 0x47
+
+#define HDMI_DDC_CLK_L 0x4b
+#define HDMI_DDC_CLK_H 0x4c
+
+#define HDMI_EDID_SEGMENT_POINTER 0x4d
+#define HDMI_EDID_WORD_ADDR 0x4e
+#define HDMI_EDID_FIFO_OFFSET 0x4f
+#define HDMI_EDID_FIFO_ADDR 0x50
+
+#define HDMI_PACKET_SEND_MANUAL 0x9c
+#define HDMI_PACKET_SEND_AUTO 0x9d
+#define m_PACKET_GCP_EN (1 << 7)
+#define m_PACKET_MSI_EN (1 << 6)
+#define m_PACKET_SDI_EN (1 << 5)
+#define m_PACKET_VSI_EN (1 << 4)
+#define v_PACKET_GCP_EN(n) ((n & 1) << 7)
+#define v_PACKET_MSI_EN(n) ((n & 1) << 6)
+#define v_PACKET_SDI_EN(n) ((n & 1) << 5)
+#define v_PACKET_VSI_EN(n) ((n & 1) << 4)
+
+#define HDMI_CONTROL_PACKET_BUF_INDEX 0x9f
+enum {
+ INFOFRAME_VSI = 0x05,
+ INFOFRAME_AVI = 0x06,
+ INFOFRAME_AAI = 0x08,
+};
+
+#define HDMI_CONTROL_PACKET_ADDR 0xa0
+#define HDMI_MAXIMUM_INFO_FRAME_SIZE 0x11
+enum {
+ AVI_COLOR_MODE_RGB = 0,
+ AVI_COLOR_MODE_YCBCR422 = 1,
+ AVI_COLOR_MODE_YCBCR444 = 2,
+ AVI_COLORIMETRY_NO_DATA = 0,
+
+ AVI_COLORIMETRY_SMPTE_170M = 1,
+ AVI_COLORIMETRY_ITU709 = 2,
+ AVI_COLORIMETRY_EXTENDED = 3,
+
+ AVI_CODED_FRAME_ASPECT_NO_DATA = 0,
+ AVI_CODED_FRAME_ASPECT_4_3 = 1,
+ AVI_CODED_FRAME_ASPECT_16_9 = 2,
+
+ ACTIVE_ASPECT_RATE_SAME_AS_CODED_FRAME = 0x08,
+ ACTIVE_ASPECT_RATE_4_3 = 0x09,
+ ACTIVE_ASPECT_RATE_16_9 = 0x0A,
+ ACTIVE_ASPECT_RATE_14_9 = 0x0B,
+};
+
+#define HDMI_HDCP_CTRL 0x52
+#define m_HDMI_DVI (1 << 1)
+#define v_HDMI_DVI(n) (n << 1)
+
+#define HDMI_INTERRUPT_MASK1 0xc0
+#define HDMI_INTERRUPT_STATUS1 0xc1
+#define m_INT_ACTIVE_VSYNC (1 << 5)
+#define m_INT_EDID_READY (1 << 2)
+
+#define HDMI_INTERRUPT_MASK2 0xc2
+#define HDMI_INTERRUPT_STATUS2 0xc3
+#define m_INT_HDCP_ERR (1 << 7)
+#define m_INT_BKSV_FLAG (1 << 6)
+#define m_INT_HDCP_OK (1 << 4)
+
+#define HDMI_STATUS 0xc8
+#define m_HOTPLUG (1 << 7)
+#define m_MASK_INT_HOTPLUG (1 << 5)
+#define m_INT_HOTPLUG (1 << 1)
+#define v_MASK_INT_HOTPLUG(n) ((n & 0x1) << 5)
+
+#define HDMI_COLORBAR 0xc9
+
+#define HDMI_PHY_SYNC 0xce
+#define HDMI_PHY_SYS_CTL 0xe0
+#define m_TMDS_CLK_SOURCE (1 << 5)
+#define v_TMDS_FROM_PLL (0 << 5)
+#define v_TMDS_FROM_GEN (1 << 5)
+#define m_PHASE_CLK (1 << 4)
+#define v_DEFAULT_PHASE (0 << 4)
+#define v_SYNC_PHASE (1 << 4)
+#define m_TMDS_CURRENT_PWR (1 << 3)
+#define v_TURN_ON_CURRENT (0 << 3)
+#define v_CAT_OFF_CURRENT (1 << 3)
+#define m_BANDGAP_PWR (1 << 2)
+#define v_BANDGAP_PWR_UP (0 << 2)
+#define v_BANDGAP_PWR_DOWN (1 << 2)
+#define m_PLL_PWR (1 << 1)
+#define v_PLL_PWR_UP (0 << 1)
+#define v_PLL_PWR_DOWN (1 << 1)
+#define m_TMDS_CHG_PWR (1 << 0)
+#define v_TMDS_CHG_PWR_UP (0 << 0)
+#define v_TMDS_CHG_PWR_DOWN (1 << 0)
+
+#define HDMI_PHY_CHG_PWR 0xe1
+#define v_CLK_CHG_PWR(n) ((n & 1) << 3)
+#define v_DATA_CHG_PWR(n) ((n & 7) << 0)
+
+#define HDMI_PHY_DRIVER 0xe2
+#define v_CLK_MAIN_DRIVER(n) (n << 4)
+#define v_DATA_MAIN_DRIVER(n) (n << 0)
+
+#define HDMI_PHY_PRE_EMPHASIS 0xe3
+#define v_PRE_EMPHASIS(n) ((n & 7) << 4)
+#define v_CLK_PRE_DRIVER(n) ((n & 3) << 2)
+#define v_DATA_PRE_DRIVER(n) ((n & 3) << 0)
+
+#define HDMI_PHY_FEEDBACK_DIV_RATIO_LOW 0xe7
+#define v_FEEDBACK_DIV_LOW(n) (n & 0xff)
+#define HDMI_PHY_FEEDBACK_DIV_RATIO_HIGH 0xe8
+#define v_FEEDBACK_DIV_HIGH(n) (n & 1)
+
+#define HDMI_PHY_PRE_DIV_RATIO 0xed
+#define v_PRE_DIV_RATIO(n) (n & 0x1f)
+
+#define HDMI_CEC_CTRL 0xd0
+#define m_ADJUST_FOR_HISENSE (1 << 6)
+#define m_REJECT_RX_BROADCAST (1 << 5)
+#define m_BUSFREETIME_ENABLE (1 << 2)
+#define m_REJECT_RX (1 << 1)
+#define m_START_TX (1 << 0)
+
+#define HDMI_CEC_DATA 0xd1
+#define HDMI_CEC_TX_OFFSET 0xd2
+#define HDMI_CEC_RX_OFFSET 0xd3
+#define HDMI_CEC_CLK_H 0xd4
+#define HDMI_CEC_CLK_L 0xd5
+#define HDMI_CEC_TX_LENGTH 0xd6
+#define HDMI_CEC_RX_LENGTH 0xd7
+#define HDMI_CEC_TX_INT_MASK 0xd8
+#define m_TX_DONE (1 << 3)
+#define m_TX_NOACK (1 << 2)
+#define m_TX_BROADCAST_REJ (1 << 1)
+#define m_TX_BUSNOTFREE (1 << 0)
+
+#define HDMI_CEC_RX_INT_MASK 0xd9
+#define m_RX_LA_ERR (1 << 4)
+#define m_RX_GLITCH (1 << 3)
+#define m_RX_DONE (1 << 0)
+
+#define HDMI_CEC_TX_INT 0xda
+#define HDMI_CEC_RX_INT 0xdb
+#define HDMI_CEC_BUSFREETIME_L 0xdc
+#define HDMI_CEC_BUSFREETIME_H 0xdd
+#define HDMI_CEC_LOGICADDR 0xde
+
+#endif /* __INNO_HDMI_H__ */
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
index a0d51ccb6ea4..f556a8f4fde6 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
@@ -251,6 +251,27 @@ static int rockchip_drm_unload(struct drm_device *drm_dev)
return 0;
}
+static void rockchip_drm_crtc_cancel_pending_vblank(struct drm_crtc *crtc,
+ struct drm_file *file_priv)
+{
+ struct rockchip_drm_private *priv = crtc->dev->dev_private;
+ int pipe = drm_crtc_index(crtc);
+
+ if (pipe < ROCKCHIP_MAX_CRTC &&
+ priv->crtc_funcs[pipe] &&
+ priv->crtc_funcs[pipe]->cancel_pending_vblank)
+ priv->crtc_funcs[pipe]->cancel_pending_vblank(crtc, file_priv);
+}
+
+static void rockchip_drm_preclose(struct drm_device *dev,
+ struct drm_file *file_priv)
+{
+ struct drm_crtc *crtc;
+
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
+ rockchip_drm_crtc_cancel_pending_vblank(crtc, file_priv);
+}
+
void rockchip_drm_lastclose(struct drm_device *dev)
{
struct rockchip_drm_private *priv = dev->dev_private;
@@ -281,6 +302,7 @@ static struct drm_driver rockchip_drm_driver = {
DRIVER_PRIME | DRIVER_ATOMIC,
.load = rockchip_drm_load,
.unload = rockchip_drm_unload,
+ .preclose = rockchip_drm_preclose,
.lastclose = rockchip_drm_lastclose,
.get_vblank_counter = drm_vblank_no_hw_counter,
.enable_vblank = rockchip_drm_crtc_enable_vblank,
@@ -384,36 +406,6 @@ static const struct dev_pm_ops rockchip_drm_pm_ops = {
rockchip_drm_sys_resume)
};
-/*
- * @node: device tree node containing encoder input ports
- * @encoder: drm_encoder
- */
-int rockchip_drm_encoder_get_mux_id(struct device_node *node,
- struct drm_encoder *encoder)
-{
- struct device_node *ep;
- struct drm_crtc *crtc = encoder->crtc;
- struct of_endpoint endpoint;
- struct device_node *port;
- int ret;
-
- if (!node || !crtc)
- return -EINVAL;
-
- for_each_endpoint_of_node(node, ep) {
- port = of_graph_get_remote_port(ep);
- of_node_put(port);
- if (port == crtc->port) {
- ret = of_graph_parse_endpoint(ep, &endpoint);
- of_node_put(ep);
- return ret ?: endpoint.id;
- }
- }
-
- return -EINVAL;
-}
-EXPORT_SYMBOL_GPL(rockchip_drm_encoder_get_mux_id);
-
static int compare_of(struct device *dev, void *data)
{
struct device_node *np = data;
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.h b/drivers/gpu/drm/rockchip/rockchip_drm_drv.h
index bb8b076f1dbb..00d17d71aa4c 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.h
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.h
@@ -40,6 +40,7 @@ struct rockchip_crtc_funcs {
int (*enable_vblank)(struct drm_crtc *crtc);
void (*disable_vblank)(struct drm_crtc *crtc);
void (*wait_for_update)(struct drm_crtc *crtc);
+ void (*cancel_pending_vblank)(struct drm_crtc *crtc, struct drm_file *file_priv);
};
struct rockchip_atomic_commit {
@@ -67,8 +68,6 @@ void rockchip_drm_atomic_work(struct work_struct *work);
int rockchip_register_crtc_funcs(struct drm_crtc *crtc,
const struct rockchip_crtc_funcs *crtc_funcs);
void rockchip_unregister_crtc_funcs(struct drm_crtc *crtc);
-int rockchip_drm_encoder_get_mux_id(struct device_node *node,
- struct drm_encoder *encoder);
int rockchip_drm_crtc_mode_config(struct drm_crtc *crtc, int connector_type,
int out_mode);
int rockchip_drm_dma_attach_device(struct drm_device *drm_dev,
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
index fd370548d7d7..a619f120f801 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
@@ -499,10 +499,25 @@ err_disable_hclk:
static void vop_crtc_disable(struct drm_crtc *crtc)
{
struct vop *vop = to_vop(crtc);
+ int i;
if (!vop->is_enabled)
return;
+ /*
+ * We need to make sure that all windows are disabled before we
+ * disable that crtc. Otherwise we might try to scan from a destroyed
+ * buffer later.
+ */
+ for (i = 0; i < vop->data->win_size; i++) {
+ struct vop_win *vop_win = &vop->win[i];
+ const struct vop_win_data *win = vop_win->data;
+
+ spin_lock(&vop->reg_lock);
+ VOP_WIN_SET(vop, win, enable, 0);
+ spin_unlock(&vop->reg_lock);
+ }
+
drm_crtc_vblank_off(crtc);
/*
@@ -549,6 +564,7 @@ static int vop_plane_atomic_check(struct drm_plane *plane,
struct drm_plane_state *state)
{
struct drm_crtc *crtc = state->crtc;
+ struct drm_crtc_state *crtc_state;
struct drm_framebuffer *fb = state->fb;
struct vop_win *vop_win = to_vop_win(plane);
struct vop_plane_state *vop_plane_state = to_vop_plane_state(state);
@@ -563,12 +579,13 @@ static int vop_plane_atomic_check(struct drm_plane *plane,
int max_scale = win->phy->scl ? FRAC_16_16(8, 1) :
DRM_PLANE_HELPER_NO_SCALING;
- crtc = crtc ? crtc : plane->state->crtc;
- /*
- * Both crtc or plane->state->crtc can be null.
- */
if (!crtc || !fb)
goto out_disable;
+
+ crtc_state = drm_atomic_get_existing_crtc_state(state->state, crtc);
+ if (WARN_ON(!crtc_state))
+ return -EINVAL;
+
src->x1 = state->src_x;
src->y1 = state->src_y;
src->x2 = state->src_x + state->src_w;
@@ -580,8 +597,8 @@ static int vop_plane_atomic_check(struct drm_plane *plane,
clip.x1 = 0;
clip.y1 = 0;
- clip.x2 = crtc->mode.hdisplay;
- clip.y2 = crtc->mode.vdisplay;
+ clip.x2 = crtc_state->adjusted_mode.hdisplay;
+ clip.y2 = crtc_state->adjusted_mode.vdisplay;
ret = drm_plane_helper_check_update(plane, crtc, state->fb,
src, dest, &clip,
@@ -873,10 +890,30 @@ static void vop_crtc_wait_for_update(struct drm_crtc *crtc)
WARN_ON(!wait_for_completion_timeout(&vop->wait_update_complete, 100));
}
+static void vop_crtc_cancel_pending_vblank(struct drm_crtc *crtc,
+ struct drm_file *file_priv)
+{
+ struct drm_device *drm = crtc->dev;
+ struct vop *vop = to_vop(crtc);
+ struct drm_pending_vblank_event *e;
+ unsigned long flags;
+
+ spin_lock_irqsave(&drm->event_lock, flags);
+ e = vop->event;
+ if (e && e->base.file_priv == file_priv) {
+ vop->event = NULL;
+
+ e->base.destroy(&e->base);
+ file_priv->event_space += sizeof(e->event);
+ }
+ spin_unlock_irqrestore(&drm->event_lock, flags);
+}
+
static const struct rockchip_crtc_funcs private_crtc_funcs = {
.enable_vblank = vop_crtc_enable_vblank,
.disable_vblank = vop_crtc_disable_vblank,
.wait_for_update = vop_crtc_wait_for_update,
+ .cancel_pending_vblank = vop_crtc_cancel_pending_vblank,
};
static bool vop_crtc_mode_fixup(struct drm_crtc *crtc,
@@ -885,9 +922,6 @@ static bool vop_crtc_mode_fixup(struct drm_crtc *crtc,
{
struct vop *vop = to_vop(crtc);
- if (adjusted_mode->htotal == 0 || adjusted_mode->vtotal == 0)
- return false;
-
adjusted_mode->clock =
clk_round_rate(vop->dclk, mode->clock * 1000) / 1000;
@@ -1108,7 +1142,7 @@ static int vop_create_crtc(struct vop *vop)
const struct vop_data *vop_data = vop->data;
struct device *dev = vop->dev;
struct drm_device *drm_dev = vop->drm_dev;
- struct drm_plane *primary = NULL, *cursor = NULL, *plane;
+ struct drm_plane *primary = NULL, *cursor = NULL, *plane, *tmp;
struct drm_crtc *crtc = &vop->crtc;
struct device_node *port;
int ret;
@@ -1148,7 +1182,7 @@ static int vop_create_crtc(struct vop *vop)
ret = drm_crtc_init_with_planes(drm_dev, crtc, primary, cursor,
&vop_crtc_funcs, NULL);
if (ret)
- return ret;
+ goto err_cleanup_planes;
drm_crtc_helper_add(crtc, &vop_crtc_helper_funcs);
@@ -1181,6 +1215,7 @@ static int vop_create_crtc(struct vop *vop)
if (!port) {
DRM_ERROR("no port node found in %s\n",
dev->of_node->full_name);
+ ret = -ENOENT;
goto err_cleanup_crtc;
}
@@ -1194,7 +1229,8 @@ static int vop_create_crtc(struct vop *vop)
err_cleanup_crtc:
drm_crtc_cleanup(crtc);
err_cleanup_planes:
- list_for_each_entry(plane, &drm_dev->mode_config.plane_list, head)
+ list_for_each_entry_safe(plane, tmp, &drm_dev->mode_config.plane_list,
+ head)
drm_plane_cleanup(plane);
return ret;
}
@@ -1202,9 +1238,28 @@ err_cleanup_planes:
static void vop_destroy_crtc(struct vop *vop)
{
struct drm_crtc *crtc = &vop->crtc;
+ struct drm_device *drm_dev = vop->drm_dev;
+ struct drm_plane *plane, *tmp;
rockchip_unregister_crtc_funcs(crtc);
of_node_put(crtc->port);
+
+ /*
+ * We need to cleanup the planes now. Why?
+ *
+ * The planes are "&vop->win[i].base". That means the memory is
+ * all part of the big "struct vop" chunk of memory. That memory
+ * was devm allocated and associated with this component. We need to
+ * free it ourselves before vop_unbind() finishes.
+ */
+ list_for_each_entry_safe(plane, tmp, &drm_dev->mode_config.plane_list,
+ head)
+ vop_plane_destroy(plane);
+
+ /*
+ * Destroy CRTC after vop_plane_destroy() since vop_disable_plane()
+ * references the CRTC.
+ */
drm_crtc_cleanup(crtc);
}
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_crtc.c b/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
index db0763794edc..88643ab160bf 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
+++ b/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
@@ -359,13 +359,6 @@ static void shmob_drm_crtc_dpms(struct drm_crtc *crtc, int mode)
scrtc->dpms = mode;
}
-static bool shmob_drm_crtc_mode_fixup(struct drm_crtc *crtc,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- return true;
-}
-
static void shmob_drm_crtc_mode_prepare(struct drm_crtc *crtc)
{
shmob_drm_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
@@ -431,33 +424,12 @@ static int shmob_drm_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
static const struct drm_crtc_helper_funcs crtc_helper_funcs = {
.dpms = shmob_drm_crtc_dpms,
- .mode_fixup = shmob_drm_crtc_mode_fixup,
.prepare = shmob_drm_crtc_mode_prepare,
.commit = shmob_drm_crtc_mode_commit,
.mode_set = shmob_drm_crtc_mode_set,
.mode_set_base = shmob_drm_crtc_mode_set_base,
};
-void shmob_drm_crtc_cancel_page_flip(struct shmob_drm_crtc *scrtc,
- struct drm_file *file)
-{
- struct drm_pending_vblank_event *event;
- struct drm_device *dev = scrtc->crtc.dev;
- unsigned long flags;
-
- /* Destroy the pending vertical blanking event associated with the
- * pending page flip, if any, and disable vertical blanking interrupts.
- */
- spin_lock_irqsave(&dev->event_lock, flags);
- event = scrtc->event;
- if (event && event->base.file_priv == file) {
- scrtc->event = NULL;
- event->base.destroy(&event->base);
- drm_vblank_put(dev, 0);
- }
- spin_unlock_irqrestore(&dev->event_lock, flags);
-}
-
void shmob_drm_crtc_finish_page_flip(struct shmob_drm_crtc *scrtc)
{
struct drm_pending_vblank_event *event;
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_crtc.h b/drivers/gpu/drm/shmobile/shmob_drm_crtc.h
index eddad6dcc88a..38ed4ff8aaf2 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_crtc.h
+++ b/drivers/gpu/drm/shmobile/shmob_drm_crtc.h
@@ -47,8 +47,6 @@ struct shmob_drm_connector {
int shmob_drm_crtc_create(struct shmob_drm_device *sdev);
void shmob_drm_crtc_enable_vblank(struct shmob_drm_device *sdev, bool enable);
-void shmob_drm_crtc_cancel_page_flip(struct shmob_drm_crtc *scrtc,
- struct drm_file *file);
void shmob_drm_crtc_finish_page_flip(struct shmob_drm_crtc *scrtc);
void shmob_drm_crtc_suspend(struct shmob_drm_crtc *scrtc);
void shmob_drm_crtc_resume(struct shmob_drm_crtc *scrtc);
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_drv.c b/drivers/gpu/drm/shmobile/shmob_drm_drv.c
index 04e66e3751b4..7700ff172079 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_drv.c
+++ b/drivers/gpu/drm/shmobile/shmob_drm_drv.c
@@ -200,13 +200,6 @@ done:
return ret;
}
-static void shmob_drm_preclose(struct drm_device *dev, struct drm_file *file)
-{
- struct shmob_drm_device *sdev = dev->dev_private;
-
- shmob_drm_crtc_cancel_page_flip(&sdev->crtc, file);
-}
-
static irqreturn_t shmob_drm_irq(int irq, void *arg)
{
struct drm_device *dev = arg;
@@ -266,7 +259,6 @@ static struct drm_driver shmob_drm_driver = {
| DRIVER_PRIME,
.load = shmob_drm_load,
.unload = shmob_drm_unload,
- .preclose = shmob_drm_preclose,
.set_busid = drm_platform_set_busid,
.irq_handler = shmob_drm_irq,
.get_vblank_counter = drm_vblank_no_hw_counter,
diff --git a/drivers/gpu/drm/sti/sti_awg_utils.c b/drivers/gpu/drm/sti/sti_awg_utils.c
index 00d0698be9d3..a516eb869f6f 100644
--- a/drivers/gpu/drm/sti/sti_awg_utils.c
+++ b/drivers/gpu/drm/sti/sti_awg_utils.c
@@ -7,6 +7,7 @@
#include "sti_awg_utils.h"
#define AWG_OPCODE_OFFSET 10
+#define AWG_MAX_ARG 0x3ff
enum opcode {
SET,
@@ -34,6 +35,8 @@ static int awg_generate_instr(enum opcode opcode,
/* skip, repeat and replay arg should not exceed 1023.
* If user wants to exceed this value, the instruction should be
* duplicate and arg should be adjust for each duplicated instruction.
+ *
+ * mux_sel is used in case of SAV/EAV synchronization.
*/
while (arg_tmp > 0) {
@@ -65,7 +68,7 @@ static int awg_generate_instr(enum opcode opcode,
mux = 0;
data_enable = 0;
- arg &= (0x3ff);
+ arg &= AWG_MAX_ARG;
break;
case REPEAT:
case REPLAY:
@@ -76,13 +79,13 @@ static int awg_generate_instr(enum opcode opcode,
mux = 0;
data_enable = 0;
- arg &= (0x3ff);
+ arg &= AWG_MAX_ARG;
break;
case JUMP:
mux = 0;
data_enable = 0;
arg |= 0x40; /* for jump instruction 7th bit is 1 */
- arg &= 0x3ff;
+ arg &= AWG_MAX_ARG;
break;
case STOP:
arg = 0;
@@ -110,68 +113,75 @@ static int awg_generate_instr(enum opcode opcode,
return 0;
}
-int sti_awg_generate_code_data_enable_mode(
+static int awg_generate_line_signal(
struct awg_code_generation_params *fwparams,
struct awg_timing *timing)
{
long int val;
- long int data_en;
int ret = 0;
- if (timing->trailing_lines > 0) {
- /* skip trailing lines */
- val = timing->blanking_level;
- data_en = 0;
- ret |= awg_generate_instr(RPLSET, val, 0, data_en, fwparams);
-
- val = timing->trailing_lines - 1;
- data_en = 0;
- ret |= awg_generate_instr(REPLAY, val, 0, data_en, fwparams);
- }
-
if (timing->trailing_pixels > 0) {
/* skip trailing pixel */
val = timing->blanking_level;
- data_en = 0;
- ret |= awg_generate_instr(RPLSET, val, 0, data_en, fwparams);
+ ret |= awg_generate_instr(RPLSET, val, 0, 0, fwparams);
val = timing->trailing_pixels - 1;
- data_en = 0;
- ret |= awg_generate_instr(SKIP, val, 0, data_en, fwparams);
+ ret |= awg_generate_instr(SKIP, val, 0, 0, fwparams);
}
/* set DE signal high */
val = timing->blanking_level;
- data_en = 1;
ret |= awg_generate_instr((timing->trailing_pixels > 0) ? SET : RPLSET,
- val, 0, data_en, fwparams);
+ val, 0, 1, fwparams);
if (timing->blanking_pixels > 0) {
/* skip the number of active pixel */
val = timing->active_pixels - 1;
- data_en = 1;
- ret |= awg_generate_instr(SKIP, val, 0, data_en, fwparams);
+ ret |= awg_generate_instr(SKIP, val, 0, 1, fwparams);
/* set DE signal low */
val = timing->blanking_level;
- data_en = 0;
- ret |= awg_generate_instr(SET, val, 0, data_en, fwparams);
+ ret |= awg_generate_instr(SET, val, 0, 0, fwparams);
+ }
+
+ return ret;
+}
+
+int sti_awg_generate_code_data_enable_mode(
+ struct awg_code_generation_params *fwparams,
+ struct awg_timing *timing)
+{
+ long int val, tmp_val;
+ int ret = 0;
+
+ if (timing->trailing_lines > 0) {
+ /* skip trailing lines */
+ val = timing->blanking_level;
+ ret |= awg_generate_instr(RPLSET, val, 0, 0, fwparams);
+
+ val = timing->trailing_lines - 1;
+ ret |= awg_generate_instr(REPLAY, val, 0, 0, fwparams);
}
- /* replay the sequence as many active lines defined */
- val = timing->active_lines - 1;
- data_en = 0;
- ret |= awg_generate_instr(REPLAY, val, 0, data_en, fwparams);
+ tmp_val = timing->active_lines - 1;
+
+ while (tmp_val > 0) {
+ /* generate DE signal for each line */
+ ret |= awg_generate_line_signal(fwparams, timing);
+ /* replay the sequence as many active lines defined */
+ ret |= awg_generate_instr(REPLAY,
+ min_t(int, AWG_MAX_ARG, tmp_val),
+ 0, 0, fwparams);
+ tmp_val -= AWG_MAX_ARG;
+ }
if (timing->blanking_lines > 0) {
/* skip blanking lines */
val = timing->blanking_level;
- data_en = 0;
- ret |= awg_generate_instr(RPLSET, val, 0, data_en, fwparams);
+ ret |= awg_generate_instr(RPLSET, val, 0, 0, fwparams);
val = timing->blanking_lines - 1;
- data_en = 0;
- ret |= awg_generate_instr(REPLAY, val, 0, data_en, fwparams);
+ ret |= awg_generate_instr(REPLAY, val, 0, 0, fwparams);
}
return ret;
diff --git a/drivers/gpu/drm/sti/sti_compositor.c b/drivers/gpu/drm/sti/sti_compositor.c
index afed2171beb9..3d2fa3ab33df 100644
--- a/drivers/gpu/drm/sti/sti_compositor.c
+++ b/drivers/gpu/drm/sti/sti_compositor.c
@@ -75,13 +75,13 @@ static int sti_compositor_bind(struct device *dev,
switch (desc[i].type) {
case STI_VID_SUBDEV:
compo->vid[vid_id++] =
- sti_vid_create(compo->dev, desc[i].id,
+ sti_vid_create(compo->dev, drm_dev, desc[i].id,
compo->regs + desc[i].offset);
break;
case STI_MIXER_MAIN_SUBDEV:
case STI_MIXER_AUX_SUBDEV:
compo->mixer[mixer_id++] =
- sti_mixer_create(compo->dev, desc[i].id,
+ sti_mixer_create(compo->dev, drm_dev, desc[i].id,
compo->regs + desc[i].offset);
break;
case STI_GPD_SUBDEV:
diff --git a/drivers/gpu/drm/sti/sti_crtc.c b/drivers/gpu/drm/sti/sti_crtc.c
index de11c7cfb02f..505620c7c2c8 100644
--- a/drivers/gpu/drm/sti/sti_crtc.c
+++ b/drivers/gpu/drm/sti/sti_crtc.c
@@ -56,6 +56,7 @@ static bool sti_crtc_mode_fixup(struct drm_crtc *crtc,
struct drm_display_mode *adjusted_mode)
{
/* accept the provided drm_display_mode, do not fix it up */
+ drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V);
return true;
}
diff --git a/drivers/gpu/drm/sti/sti_cursor.c b/drivers/gpu/drm/sti/sti_cursor.c
index 807863106b8d..3abb400151ac 100644
--- a/drivers/gpu/drm/sti/sti_cursor.c
+++ b/drivers/gpu/drm/sti/sti_cursor.c
@@ -5,12 +5,10 @@
* for STMicroelectronics.
* License terms: GNU General Public License (GPL), version 2
*/
-#include <drm/drmP.h>
-#include <drm/drm_atomic_helper.h>
+#include <drm/drm_atomic.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_gem_cma_helper.h>
-#include <drm/drm_plane_helper.h>
#include "sti_compositor.h"
#include "sti_cursor.h"
@@ -74,6 +72,82 @@ static const uint32_t cursor_supported_formats[] = {
#define to_sti_cursor(x) container_of(x, struct sti_cursor, plane)
+#define DBGFS_DUMP(reg) seq_printf(s, "\n %-25s 0x%08X", #reg, \
+ readl(cursor->regs + reg))
+
+static void cursor_dbg_vpo(struct seq_file *s, u32 val)
+{
+ seq_printf(s, "\txdo:%4d\tydo:%4d", val & 0x0FFF, (val >> 16) & 0x0FFF);
+}
+
+static void cursor_dbg_size(struct seq_file *s, u32 val)
+{
+ seq_printf(s, "\t%d x %d", val & 0x07FF, (val >> 16) & 0x07FF);
+}
+
+static void cursor_dbg_pml(struct seq_file *s,
+ struct sti_cursor *cursor, u32 val)
+{
+ if (cursor->pixmap.paddr == val)
+ seq_printf(s, "\tVirt @: %p", cursor->pixmap.base);
+}
+
+static void cursor_dbg_cml(struct seq_file *s,
+ struct sti_cursor *cursor, u32 val)
+{
+ if (cursor->clut_paddr == val)
+ seq_printf(s, "\tVirt @: %p", cursor->clut);
+}
+
+static int cursor_dbg_show(struct seq_file *s, void *data)
+{
+ struct drm_info_node *node = s->private;
+ struct sti_cursor *cursor = (struct sti_cursor *)node->info_ent->data;
+ struct drm_device *dev = node->minor->dev;
+ int ret;
+
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
+
+ seq_printf(s, "%s: (vaddr = 0x%p)",
+ sti_plane_to_str(&cursor->plane), cursor->regs);
+
+ DBGFS_DUMP(CUR_CTL);
+ DBGFS_DUMP(CUR_VPO);
+ cursor_dbg_vpo(s, readl(cursor->regs + CUR_VPO));
+ DBGFS_DUMP(CUR_PML);
+ cursor_dbg_pml(s, cursor, readl(cursor->regs + CUR_PML));
+ DBGFS_DUMP(CUR_PMP);
+ DBGFS_DUMP(CUR_SIZE);
+ cursor_dbg_size(s, readl(cursor->regs + CUR_SIZE));
+ DBGFS_DUMP(CUR_CML);
+ cursor_dbg_cml(s, cursor, readl(cursor->regs + CUR_CML));
+ DBGFS_DUMP(CUR_AWS);
+ DBGFS_DUMP(CUR_AWE);
+ seq_puts(s, "\n");
+
+ mutex_unlock(&dev->struct_mutex);
+ return 0;
+}
+
+static struct drm_info_list cursor_debugfs_files[] = {
+ { "cursor", cursor_dbg_show, 0, NULL },
+};
+
+static int cursor_debugfs_init(struct sti_cursor *cursor,
+ struct drm_minor *minor)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(cursor_debugfs_files); i++)
+ cursor_debugfs_files[i].data = cursor;
+
+ return drm_debugfs_create_files(cursor_debugfs_files,
+ ARRAY_SIZE(cursor_debugfs_files),
+ minor->debugfs_root, minor);
+}
+
static void sti_cursor_argb8888_to_clut8(struct sti_cursor *cursor, u32 *src)
{
u8 *dst = cursor->pixmap.base;
@@ -110,35 +184,31 @@ static void sti_cursor_init(struct sti_cursor *cursor)
(b * 5);
}
-static void sti_cursor_atomic_update(struct drm_plane *drm_plane,
- struct drm_plane_state *oldstate)
+static int sti_cursor_atomic_check(struct drm_plane *drm_plane,
+ struct drm_plane_state *state)
{
- struct drm_plane_state *state = drm_plane->state;
struct sti_plane *plane = to_sti_plane(drm_plane);
struct sti_cursor *cursor = to_sti_cursor(plane);
struct drm_crtc *crtc = state->crtc;
- struct sti_mixer *mixer = to_sti_mixer(crtc);
struct drm_framebuffer *fb = state->fb;
- struct drm_display_mode *mode = &crtc->mode;
- int dst_x = state->crtc_x;
- int dst_y = state->crtc_y;
- int dst_w = clamp_val(state->crtc_w, 0, mode->crtc_hdisplay - dst_x);
- int dst_h = clamp_val(state->crtc_h, 0, mode->crtc_vdisplay - dst_y);
+ struct drm_crtc_state *crtc_state;
+ struct drm_display_mode *mode;
+ int dst_x, dst_y, dst_w, dst_h;
+ int src_w, src_h;
+
+ /* no need for further checks if the plane is being disabled */
+ if (!crtc || !fb)
+ return 0;
+
+ crtc_state = drm_atomic_get_crtc_state(state->state, crtc);
+ mode = &crtc_state->mode;
+ dst_x = state->crtc_x;
+ dst_y = state->crtc_y;
+ dst_w = clamp_val(state->crtc_w, 0, mode->crtc_hdisplay - dst_x);
+ dst_h = clamp_val(state->crtc_h, 0, mode->crtc_vdisplay - dst_y);
/* src_x are in 16.16 format */
- int src_w = state->src_w >> 16;
- int src_h = state->src_h >> 16;
- bool first_prepare = plane->status == STI_PLANE_DISABLED ? true : false;
- struct drm_gem_cma_object *cma_obj;
- u32 y, x;
- u32 val;
-
- DRM_DEBUG_KMS("CRTC:%d (%s) drm plane:%d (%s)\n",
- crtc->base.id, sti_mixer_to_str(mixer),
- drm_plane->base.id, sti_plane_to_str(plane));
- DRM_DEBUG_KMS("(%dx%d)@(%d,%d)\n", dst_w, dst_h, dst_x, dst_y);
-
- dev_dbg(cursor->dev, "%s %s\n", __func__,
- sti_plane_to_str(plane));
+ src_w = state->src_w >> 16;
+ src_h = state->src_h >> 16;
if (src_w < STI_CURS_MIN_SIZE ||
src_h < STI_CURS_MIN_SIZE ||
@@ -146,7 +216,7 @@ static void sti_cursor_atomic_update(struct drm_plane *drm_plane,
src_h > STI_CURS_MAX_SIZE) {
DRM_ERROR("Invalid cursor size (%dx%d)\n",
src_w, src_h);
- return;
+ return -EINVAL;
}
/* If the cursor size has changed, re-allocated the pixmap */
@@ -157,29 +227,57 @@ static void sti_cursor_atomic_update(struct drm_plane *drm_plane,
cursor->height = src_h;
if (cursor->pixmap.base)
- dma_free_writecombine(cursor->dev,
- cursor->pixmap.size,
- cursor->pixmap.base,
- cursor->pixmap.paddr);
+ dma_free_wc(cursor->dev, cursor->pixmap.size,
+ cursor->pixmap.base, cursor->pixmap.paddr);
cursor->pixmap.size = cursor->width * cursor->height;
- cursor->pixmap.base = dma_alloc_writecombine(cursor->dev,
- cursor->pixmap.size,
- &cursor->pixmap.paddr,
- GFP_KERNEL | GFP_DMA);
+ cursor->pixmap.base = dma_alloc_wc(cursor->dev,
+ cursor->pixmap.size,
+ &cursor->pixmap.paddr,
+ GFP_KERNEL | GFP_DMA);
if (!cursor->pixmap.base) {
DRM_ERROR("Failed to allocate memory for pixmap\n");
- return;
+ return -EINVAL;
}
}
- cma_obj = drm_fb_cma_get_gem_obj(fb, 0);
- if (!cma_obj) {
+ if (!drm_fb_cma_get_gem_obj(fb, 0)) {
DRM_ERROR("Can't get CMA GEM object for fb\n");
- return;
+ return -EINVAL;
}
+ DRM_DEBUG_KMS("CRTC:%d (%s) drm plane:%d (%s)\n",
+ crtc->base.id, sti_mixer_to_str(to_sti_mixer(crtc)),
+ drm_plane->base.id, sti_plane_to_str(plane));
+ DRM_DEBUG_KMS("(%dx%d)@(%d,%d)\n", dst_w, dst_h, dst_x, dst_y);
+
+ return 0;
+}
+
+static void sti_cursor_atomic_update(struct drm_plane *drm_plane,
+ struct drm_plane_state *oldstate)
+{
+ struct drm_plane_state *state = drm_plane->state;
+ struct sti_plane *plane = to_sti_plane(drm_plane);
+ struct sti_cursor *cursor = to_sti_cursor(plane);
+ struct drm_crtc *crtc = state->crtc;
+ struct drm_framebuffer *fb = state->fb;
+ struct drm_display_mode *mode;
+ int dst_x, dst_y;
+ struct drm_gem_cma_object *cma_obj;
+ u32 y, x;
+ u32 val;
+
+ if (!crtc || !fb)
+ return;
+
+ mode = &crtc->mode;
+ dst_x = state->crtc_x;
+ dst_y = state->crtc_y;
+
+ cma_obj = drm_fb_cma_get_gem_obj(fb, 0);
+
/* Convert ARGB8888 to CLUT8 */
sti_cursor_argb8888_to_clut8(cursor, (u32 *)cma_obj->vaddr);
@@ -193,21 +291,21 @@ static void sti_cursor_atomic_update(struct drm_plane *drm_plane,
val = y << 16 | x;
writel(val, cursor->regs + CUR_AWE);
- if (first_prepare) {
- /* Set and fetch CLUT */
- writel(cursor->clut_paddr, cursor->regs + CUR_CML);
- writel(CUR_CTL_CLUT_UPDATE, cursor->regs + CUR_CTL);
- }
-
/* Set memory location, size, and position */
writel(cursor->pixmap.paddr, cursor->regs + CUR_PML);
writel(cursor->width, cursor->regs + CUR_PMP);
writel(cursor->height << 16 | cursor->width, cursor->regs + CUR_SIZE);
y = sti_vtg_get_line_number(*mode, dst_y);
- x = sti_vtg_get_pixel_number(*mode, dst_y);
+ x = sti_vtg_get_pixel_number(*mode, dst_x);
writel((y << 16) | x, cursor->regs + CUR_VPO);
+ /* Set and fetch CLUT */
+ writel(cursor->clut_paddr, cursor->regs + CUR_CML);
+ writel(CUR_CTL_CLUT_UPDATE, cursor->regs + CUR_CTL);
+
+ sti_plane_update_fps(plane, true, false);
+
plane->status = STI_PLANE_UPDATED;
}
@@ -215,7 +313,6 @@ static void sti_cursor_atomic_disable(struct drm_plane *drm_plane,
struct drm_plane_state *oldstate)
{
struct sti_plane *plane = to_sti_plane(drm_plane);
- struct sti_mixer *mixer = to_sti_mixer(drm_plane->crtc);
if (!drm_plane->crtc) {
DRM_DEBUG_DRIVER("drm plane:%d not enabled\n",
@@ -224,13 +321,15 @@ static void sti_cursor_atomic_disable(struct drm_plane *drm_plane,
}
DRM_DEBUG_DRIVER("CRTC:%d (%s) drm plane:%d (%s)\n",
- drm_plane->crtc->base.id, sti_mixer_to_str(mixer),
+ drm_plane->crtc->base.id,
+ sti_mixer_to_str(to_sti_mixer(drm_plane->crtc)),
drm_plane->base.id, sti_plane_to_str(plane));
plane->status = STI_PLANE_DISABLING;
}
static const struct drm_plane_helper_funcs sti_cursor_helpers_funcs = {
+ .atomic_check = sti_cursor_atomic_check,
.atomic_update = sti_cursor_atomic_update,
.atomic_disable = sti_cursor_atomic_disable,
};
@@ -252,8 +351,8 @@ struct drm_plane *sti_cursor_create(struct drm_device *drm_dev,
/* Allocate clut buffer */
size = 0x100 * sizeof(unsigned short);
- cursor->clut = dma_alloc_writecombine(dev, size, &cursor->clut_paddr,
- GFP_KERNEL | GFP_DMA);
+ cursor->clut = dma_alloc_wc(dev, size, &cursor->clut_paddr,
+ GFP_KERNEL | GFP_DMA);
if (!cursor->clut) {
DRM_ERROR("Failed to allocate memory for cursor clut\n");
@@ -283,10 +382,13 @@ struct drm_plane *sti_cursor_create(struct drm_device *drm_dev,
sti_plane_init_property(&cursor->plane, DRM_PLANE_TYPE_CURSOR);
+ if (cursor_debugfs_init(cursor, drm_dev->primary))
+ DRM_ERROR("CURSOR debugfs setup failed\n");
+
return &cursor->plane.drm_plane;
err_plane:
- dma_free_writecombine(dev, size, cursor->clut, cursor->clut_paddr);
+ dma_free_wc(dev, size, cursor->clut, cursor->clut_paddr);
err_clut:
devm_kfree(dev, cursor);
return NULL;
diff --git a/drivers/gpu/drm/sti/sti_drv.c b/drivers/gpu/drm/sti/sti_drv.c
index 506b5626f3ed..6bd6abaa5a70 100644
--- a/drivers/gpu/drm/sti/sti_drv.c
+++ b/drivers/gpu/drm/sti/sti_drv.c
@@ -20,6 +20,7 @@
#include "sti_crtc.h"
#include "sti_drv.h"
+#include "sti_plane.h"
#define DRIVER_NAME "sti"
#define DRIVER_DESC "STMicroelectronics SoC DRM"
@@ -30,6 +31,130 @@
#define STI_MAX_FB_HEIGHT 4096
#define STI_MAX_FB_WIDTH 4096
+static int sti_drm_fps_get(void *data, u64 *val)
+{
+ struct drm_device *drm_dev = data;
+ struct drm_plane *p;
+ unsigned int i = 0;
+
+ *val = 0;
+ list_for_each_entry(p, &drm_dev->mode_config.plane_list, head) {
+ struct sti_plane *plane = to_sti_plane(p);
+
+ *val |= plane->fps_info.output << i;
+ i++;
+ }
+
+ return 0;
+}
+
+static int sti_drm_fps_set(void *data, u64 val)
+{
+ struct drm_device *drm_dev = data;
+ struct drm_plane *p;
+ unsigned int i = 0;
+
+ list_for_each_entry(p, &drm_dev->mode_config.plane_list, head) {
+ struct sti_plane *plane = to_sti_plane(p);
+
+ plane->fps_info.output = (val >> i) & 1;
+ i++;
+ }
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(sti_drm_fps_fops,
+ sti_drm_fps_get, sti_drm_fps_set, "%llu\n");
+
+static int sti_drm_fps_dbg_show(struct seq_file *s, void *data)
+{
+ struct drm_info_node *node = s->private;
+ struct drm_device *dev = node->minor->dev;
+ struct drm_plane *p;
+ int ret;
+
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
+
+ list_for_each_entry(p, &dev->mode_config.plane_list, head) {
+ struct sti_plane *plane = to_sti_plane(p);
+
+ seq_printf(s, "%s%s\n",
+ plane->fps_info.fps_str,
+ plane->fps_info.fips_str);
+ }
+
+ mutex_unlock(&dev->struct_mutex);
+ return 0;
+}
+
+static struct drm_info_list sti_drm_dbg_list[] = {
+ {"fps_get", sti_drm_fps_dbg_show, 0},
+};
+
+static int sti_drm_debugfs_create(struct dentry *root,
+ struct drm_minor *minor,
+ const char *name,
+ const struct file_operations *fops)
+{
+ struct drm_device *dev = minor->dev;
+ struct drm_info_node *node;
+ struct dentry *ent;
+
+ ent = debugfs_create_file(name, S_IRUGO | S_IWUSR, root, dev, fops);
+ if (IS_ERR(ent))
+ return PTR_ERR(ent);
+
+ node = kmalloc(sizeof(*node), GFP_KERNEL);
+ if (!node) {
+ debugfs_remove(ent);
+ return -ENOMEM;
+ }
+
+ node->minor = minor;
+ node->dent = ent;
+ node->info_ent = (void *)fops;
+
+ mutex_lock(&minor->debugfs_lock);
+ list_add(&node->list, &minor->debugfs_list);
+ mutex_unlock(&minor->debugfs_lock);
+
+ return 0;
+}
+
+static int sti_drm_dbg_init(struct drm_minor *minor)
+{
+ int ret;
+
+ ret = drm_debugfs_create_files(sti_drm_dbg_list,
+ ARRAY_SIZE(sti_drm_dbg_list),
+ minor->debugfs_root, minor);
+ if (ret)
+ goto err;
+
+ ret = sti_drm_debugfs_create(minor->debugfs_root, minor, "fps_show",
+ &sti_drm_fps_fops);
+ if (ret)
+ goto err;
+
+ DRM_INFO("%s: debugfs installed\n", DRIVER_NAME);
+ return 0;
+err:
+ DRM_ERROR("%s: cannot install debugfs\n", DRIVER_NAME);
+ return ret;
+}
+
+void sti_drm_dbg_cleanup(struct drm_minor *minor)
+{
+ drm_debugfs_remove_files(sti_drm_dbg_list,
+ ARRAY_SIZE(sti_drm_dbg_list), minor);
+
+ drm_debugfs_remove_files((struct drm_info_list *)&sti_drm_fps_fops,
+ 1, minor);
+}
+
static void sti_atomic_schedule(struct sti_private *private,
struct drm_atomic_state *state)
{
@@ -181,18 +306,9 @@ static const struct file_operations sti_driver_fops = {
.release = drm_release,
};
-static struct dma_buf *sti_gem_prime_export(struct drm_device *dev,
- struct drm_gem_object *obj,
- int flags)
-{
- /* we want to be able to write in mmapped buffer */
- flags |= O_RDWR;
- return drm_gem_prime_export(dev, obj, flags);
-}
-
static struct drm_driver sti_driver = {
.driver_features = DRIVER_HAVE_IRQ | DRIVER_MODESET |
- DRIVER_GEM | DRIVER_PRIME,
+ DRIVER_GEM | DRIVER_PRIME | DRIVER_ATOMIC,
.load = sti_load,
.gem_free_object = drm_gem_cma_free_object,
.gem_vm_ops = &drm_gem_cma_vm_ops,
@@ -207,7 +323,7 @@ static struct drm_driver sti_driver = {
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
- .gem_prime_export = sti_gem_prime_export,
+ .gem_prime_export = drm_gem_prime_export,
.gem_prime_import = drm_gem_prime_import,
.gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table,
.gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table,
@@ -215,6 +331,9 @@ static struct drm_driver sti_driver = {
.gem_prime_vunmap = drm_gem_cma_prime_vunmap,
.gem_prime_mmap = drm_gem_cma_prime_mmap,
+ .debugfs_init = sti_drm_dbg_init,
+ .debugfs_cleanup = sti_drm_dbg_cleanup,
+
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
.date = DRIVER_DATE,
diff --git a/drivers/gpu/drm/sti/sti_dvo.c b/drivers/gpu/drm/sti/sti_dvo.c
index 45cbe2bf7dd6..25f76632002c 100644
--- a/drivers/gpu/drm/sti/sti_dvo.c
+++ b/drivers/gpu/drm/sti/sti_dvo.c
@@ -6,6 +6,7 @@
#include <linux/clk.h>
#include <linux/component.h>
+#include <linux/debugfs.h>
#include <linux/module.h>
#include <linux/of_gpio.h>
#include <linux/platform_device.h>
@@ -156,6 +157,69 @@ static void dvo_awg_configure(struct sti_dvo *dvo, u32 *awg_ram_code, int nb)
writel(DVO_AWG_CTRL_EN, dvo->regs + DVO_AWG_DIGSYNC_CTRL);
}
+#define DBGFS_DUMP(reg) seq_printf(s, "\n %-25s 0x%08X", #reg, \
+ readl(dvo->regs + reg))
+
+static void dvo_dbg_awg_microcode(struct seq_file *s, void __iomem *reg)
+{
+ unsigned int i;
+
+ seq_puts(s, "\n\n");
+ seq_puts(s, " DVO AWG microcode:");
+ for (i = 0; i < AWG_MAX_INST; i++) {
+ if (i % 8 == 0)
+ seq_printf(s, "\n %04X:", i);
+ seq_printf(s, " %04X", readl(reg + i * 4));
+ }
+}
+
+static int dvo_dbg_show(struct seq_file *s, void *data)
+{
+ struct drm_info_node *node = s->private;
+ struct sti_dvo *dvo = (struct sti_dvo *)node->info_ent->data;
+ struct drm_device *dev = node->minor->dev;
+ int ret;
+
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
+
+ seq_printf(s, "DVO: (vaddr = 0x%p)", dvo->regs);
+ DBGFS_DUMP(DVO_AWG_DIGSYNC_CTRL);
+ DBGFS_DUMP(DVO_DOF_CFG);
+ DBGFS_DUMP(DVO_LUT_PROG_LOW);
+ DBGFS_DUMP(DVO_LUT_PROG_MID);
+ DBGFS_DUMP(DVO_LUT_PROG_HIGH);
+ dvo_dbg_awg_microcode(s, dvo->regs + DVO_DIGSYNC_INSTR_I);
+ seq_puts(s, "\n");
+
+ mutex_unlock(&dev->struct_mutex);
+ return 0;
+}
+
+static struct drm_info_list dvo_debugfs_files[] = {
+ { "dvo", dvo_dbg_show, 0, NULL },
+};
+
+static void dvo_debugfs_exit(struct sti_dvo *dvo, struct drm_minor *minor)
+{
+ drm_debugfs_remove_files(dvo_debugfs_files,
+ ARRAY_SIZE(dvo_debugfs_files),
+ minor);
+}
+
+static int dvo_debugfs_init(struct sti_dvo *dvo, struct drm_minor *minor)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(dvo_debugfs_files); i++)
+ dvo_debugfs_files[i].data = dvo;
+
+ return drm_debugfs_create_files(dvo_debugfs_files,
+ ARRAY_SIZE(dvo_debugfs_files),
+ minor->debugfs_root, minor);
+}
+
static void sti_dvo_disable(struct drm_bridge *bridge)
{
struct sti_dvo *dvo = bridge->driver_private;
@@ -345,12 +409,14 @@ sti_dvo_connector_detect(struct drm_connector *connector, bool force)
DRM_DEBUG_DRIVER("\n");
- if (!dvo->panel)
+ if (!dvo->panel) {
dvo->panel = of_drm_find_panel(dvo->panel_node);
+ if (dvo->panel)
+ drm_panel_attach(dvo->panel, connector);
+ }
if (dvo->panel)
- if (!drm_panel_attach(dvo->panel, connector))
- return connector_status_connected;
+ return connector_status_connected;
return connector_status_disconnected;
}
@@ -453,6 +519,9 @@ static int sti_dvo_bind(struct device *dev, struct device *master, void *data)
goto err_sysfs;
}
+ if (dvo_debugfs_init(dvo, drm_dev->primary))
+ DRM_ERROR("DVO debugfs setup failed\n");
+
return 0;
err_sysfs:
@@ -467,6 +536,9 @@ static void sti_dvo_unbind(struct device *dev,
struct device *master, void *data)
{
struct sti_dvo *dvo = dev_get_drvdata(dev);
+ struct drm_device *drm_dev = data;
+
+ dvo_debugfs_exit(dvo, drm_dev->primary);
drm_bridge_remove(dvo->bridge);
}
diff --git a/drivers/gpu/drm/sti/sti_gdp.c b/drivers/gpu/drm/sti/sti_gdp.c
index f9a1d92c9d95..ff3d3e7e7704 100644
--- a/drivers/gpu/drm/sti/sti_gdp.c
+++ b/drivers/gpu/drm/sti/sti_gdp.c
@@ -6,9 +6,7 @@
* License terms: GNU General Public License (GPL), version 2
*/
-#include <linux/clk.h>
-#include <linux/dma-mapping.h>
-
+#include <drm/drm_atomic.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_gem_cma_helper.h>
@@ -32,10 +30,23 @@
#define GDP_ABGR8888 (GDP_ARGB8888 | BIGNOTLITTLE | ALPHASWITCH)
#define GDP_ARGB1555 0x06
#define GDP_ARGB4444 0x07
-#define GDP_CLUT8 0x0B
-#define GDP_YCBR888 0x10
-#define GDP_YCBR422R 0x12
-#define GDP_AYCBR8888 0x15
+
+#define GDP2STR(fmt) { GDP_ ## fmt, #fmt }
+
+static struct gdp_format_to_str {
+ int format;
+ char name[20];
+} gdp_format_to_str[] = {
+ GDP2STR(RGB565),
+ GDP2STR(RGB888),
+ GDP2STR(RGB888_32),
+ GDP2STR(XBGR8888),
+ GDP2STR(ARGB8565),
+ GDP2STR(ARGB8888),
+ GDP2STR(ABGR8888),
+ GDP2STR(ARGB1555),
+ GDP2STR(ARGB4444)
+ };
#define GAM_GDP_CTL_OFFSET 0x00
#define GAM_GDP_AGC_OFFSET 0x04
@@ -97,6 +108,7 @@ struct sti_gdp_node_list {
* @vtg_field_nb: callback for VTG FIELD (top or bottom) notification
* @is_curr_top: true if the current node processed is the top field
* @node_list: array of node list
+ * @vtg: registered vtg
*/
struct sti_gdp {
struct sti_plane plane;
@@ -108,6 +120,7 @@ struct sti_gdp {
struct notifier_block vtg_field_nb;
bool is_curr_top;
struct sti_gdp_node_list node_list[GDP_NODE_NB_BANK];
+ struct sti_vtg *vtg;
};
#define to_sti_gdp(x) container_of(x, struct sti_gdp, plane)
@@ -121,12 +134,224 @@ static const uint32_t gdp_supported_formats[] = {
DRM_FORMAT_ARGB1555,
DRM_FORMAT_RGB565,
DRM_FORMAT_RGB888,
- DRM_FORMAT_AYUV,
- DRM_FORMAT_YUV444,
- DRM_FORMAT_VYUY,
- DRM_FORMAT_C8,
};
+#define DBGFS_DUMP(reg) seq_printf(s, "\n %-25s 0x%08X", #reg, \
+ readl(gdp->regs + reg ## _OFFSET))
+
+static void gdp_dbg_ctl(struct seq_file *s, int val)
+{
+ int i;
+
+ seq_puts(s, "\tColor:");
+ for (i = 0; i < ARRAY_SIZE(gdp_format_to_str); i++) {
+ if (gdp_format_to_str[i].format == (val & 0x1F)) {
+ seq_printf(s, gdp_format_to_str[i].name);
+ break;
+ }
+ }
+ if (i == ARRAY_SIZE(gdp_format_to_str))
+ seq_puts(s, "<UNKNOWN>");
+
+ seq_printf(s, "\tWaitNextVsync:%d", val & WAIT_NEXT_VSYNC ? 1 : 0);
+}
+
+static void gdp_dbg_vpo(struct seq_file *s, int val)
+{
+ seq_printf(s, "\txdo:%4d\tydo:%4d", val & 0xFFFF, (val >> 16) & 0xFFFF);
+}
+
+static void gdp_dbg_vps(struct seq_file *s, int val)
+{
+ seq_printf(s, "\txds:%4d\tyds:%4d", val & 0xFFFF, (val >> 16) & 0xFFFF);
+}
+
+static void gdp_dbg_size(struct seq_file *s, int val)
+{
+ seq_printf(s, "\t%d x %d", val & 0xFFFF, (val >> 16) & 0xFFFF);
+}
+
+static void gdp_dbg_nvn(struct seq_file *s, struct sti_gdp *gdp, int val)
+{
+ void *base = NULL;
+ unsigned int i;
+
+ for (i = 0; i < GDP_NODE_NB_BANK; i++) {
+ if (gdp->node_list[i].top_field_paddr == val) {
+ base = gdp->node_list[i].top_field;
+ break;
+ }
+ if (gdp->node_list[i].btm_field_paddr == val) {
+ base = gdp->node_list[i].btm_field;
+ break;
+ }
+ }
+
+ if (base)
+ seq_printf(s, "\tVirt @: %p", base);
+}
+
+static void gdp_dbg_ppt(struct seq_file *s, int val)
+{
+ if (val & GAM_GDP_PPT_IGNORE)
+ seq_puts(s, "\tNot displayed on mixer!");
+}
+
+static void gdp_dbg_mst(struct seq_file *s, int val)
+{
+ if (val & 1)
+ seq_puts(s, "\tBUFFER UNDERFLOW!");
+}
+
+static int gdp_dbg_show(struct seq_file *s, void *data)
+{
+ struct drm_info_node *node = s->private;
+ struct sti_gdp *gdp = (struct sti_gdp *)node->info_ent->data;
+ struct drm_device *dev = node->minor->dev;
+ struct drm_plane *drm_plane = &gdp->plane.drm_plane;
+ struct drm_crtc *crtc = drm_plane->crtc;
+ int ret;
+
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
+
+ seq_printf(s, "%s: (vaddr = 0x%p)",
+ sti_plane_to_str(&gdp->plane), gdp->regs);
+
+ DBGFS_DUMP(GAM_GDP_CTL);
+ gdp_dbg_ctl(s, readl(gdp->regs + GAM_GDP_CTL_OFFSET));
+ DBGFS_DUMP(GAM_GDP_AGC);
+ DBGFS_DUMP(GAM_GDP_VPO);
+ gdp_dbg_vpo(s, readl(gdp->regs + GAM_GDP_VPO_OFFSET));
+ DBGFS_DUMP(GAM_GDP_VPS);
+ gdp_dbg_vps(s, readl(gdp->regs + GAM_GDP_VPS_OFFSET));
+ DBGFS_DUMP(GAM_GDP_PML);
+ DBGFS_DUMP(GAM_GDP_PMP);
+ DBGFS_DUMP(GAM_GDP_SIZE);
+ gdp_dbg_size(s, readl(gdp->regs + GAM_GDP_SIZE_OFFSET));
+ DBGFS_DUMP(GAM_GDP_NVN);
+ gdp_dbg_nvn(s, gdp, readl(gdp->regs + GAM_GDP_NVN_OFFSET));
+ DBGFS_DUMP(GAM_GDP_KEY1);
+ DBGFS_DUMP(GAM_GDP_KEY2);
+ DBGFS_DUMP(GAM_GDP_PPT);
+ gdp_dbg_ppt(s, readl(gdp->regs + GAM_GDP_PPT_OFFSET));
+ DBGFS_DUMP(GAM_GDP_CML);
+ DBGFS_DUMP(GAM_GDP_MST);
+ gdp_dbg_mst(s, readl(gdp->regs + GAM_GDP_MST_OFFSET));
+
+ seq_puts(s, "\n\n");
+ if (!crtc)
+ seq_puts(s, " Not connected to any DRM CRTC\n");
+ else
+ seq_printf(s, " Connected to DRM CRTC #%d (%s)\n",
+ crtc->base.id, sti_mixer_to_str(to_sti_mixer(crtc)));
+
+ mutex_unlock(&dev->struct_mutex);
+ return 0;
+}
+
+static void gdp_node_dump_node(struct seq_file *s, struct sti_gdp_node *node)
+{
+ seq_printf(s, "\t@:0x%p", node);
+ seq_printf(s, "\n\tCTL 0x%08X", node->gam_gdp_ctl);
+ gdp_dbg_ctl(s, node->gam_gdp_ctl);
+ seq_printf(s, "\n\tAGC 0x%08X", node->gam_gdp_agc);
+ seq_printf(s, "\n\tVPO 0x%08X", node->gam_gdp_vpo);
+ gdp_dbg_vpo(s, node->gam_gdp_vpo);
+ seq_printf(s, "\n\tVPS 0x%08X", node->gam_gdp_vps);
+ gdp_dbg_vps(s, node->gam_gdp_vps);
+ seq_printf(s, "\n\tPML 0x%08X", node->gam_gdp_pml);
+ seq_printf(s, "\n\tPMP 0x%08X", node->gam_gdp_pmp);
+ seq_printf(s, "\n\tSIZE 0x%08X", node->gam_gdp_size);
+ gdp_dbg_size(s, node->gam_gdp_size);
+ seq_printf(s, "\n\tNVN 0x%08X", node->gam_gdp_nvn);
+ seq_printf(s, "\n\tKEY1 0x%08X", node->gam_gdp_key1);
+ seq_printf(s, "\n\tKEY2 0x%08X", node->gam_gdp_key2);
+ seq_printf(s, "\n\tPPT 0x%08X", node->gam_gdp_ppt);
+ gdp_dbg_ppt(s, node->gam_gdp_ppt);
+ seq_printf(s, "\n\tCML 0x%08X", node->gam_gdp_cml);
+ seq_puts(s, "\n");
+}
+
+static int gdp_node_dbg_show(struct seq_file *s, void *arg)
+{
+ struct drm_info_node *node = s->private;
+ struct sti_gdp *gdp = (struct sti_gdp *)node->info_ent->data;
+ struct drm_device *dev = node->minor->dev;
+ unsigned int b;
+ int ret;
+
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
+
+ for (b = 0; b < GDP_NODE_NB_BANK; b++) {
+ seq_printf(s, "\n%s[%d].top", sti_plane_to_str(&gdp->plane), b);
+ gdp_node_dump_node(s, gdp->node_list[b].top_field);
+ seq_printf(s, "\n%s[%d].btm", sti_plane_to_str(&gdp->plane), b);
+ gdp_node_dump_node(s, gdp->node_list[b].btm_field);
+ }
+
+ mutex_unlock(&dev->struct_mutex);
+ return 0;
+}
+
+static struct drm_info_list gdp0_debugfs_files[] = {
+ { "gdp0", gdp_dbg_show, 0, NULL },
+ { "gdp0_node", gdp_node_dbg_show, 0, NULL },
+};
+
+static struct drm_info_list gdp1_debugfs_files[] = {
+ { "gdp1", gdp_dbg_show, 0, NULL },
+ { "gdp1_node", gdp_node_dbg_show, 0, NULL },
+};
+
+static struct drm_info_list gdp2_debugfs_files[] = {
+ { "gdp2", gdp_dbg_show, 0, NULL },
+ { "gdp2_node", gdp_node_dbg_show, 0, NULL },
+};
+
+static struct drm_info_list gdp3_debugfs_files[] = {
+ { "gdp3", gdp_dbg_show, 0, NULL },
+ { "gdp3_node", gdp_node_dbg_show, 0, NULL },
+};
+
+static int gdp_debugfs_init(struct sti_gdp *gdp, struct drm_minor *minor)
+{
+ unsigned int i;
+ struct drm_info_list *gdp_debugfs_files;
+ int nb_files;
+
+ switch (gdp->plane.desc) {
+ case STI_GDP_0:
+ gdp_debugfs_files = gdp0_debugfs_files;
+ nb_files = ARRAY_SIZE(gdp0_debugfs_files);
+ break;
+ case STI_GDP_1:
+ gdp_debugfs_files = gdp1_debugfs_files;
+ nb_files = ARRAY_SIZE(gdp1_debugfs_files);
+ break;
+ case STI_GDP_2:
+ gdp_debugfs_files = gdp2_debugfs_files;
+ nb_files = ARRAY_SIZE(gdp2_debugfs_files);
+ break;
+ case STI_GDP_3:
+ gdp_debugfs_files = gdp3_debugfs_files;
+ nb_files = ARRAY_SIZE(gdp3_debugfs_files);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ for (i = 0; i < nb_files; i++)
+ gdp_debugfs_files[i].data = gdp;
+
+ return drm_debugfs_create_files(gdp_debugfs_files,
+ nb_files,
+ minor->debugfs_root, minor);
+}
+
static int sti_gdp_fourcc2format(int fourcc)
{
switch (fourcc) {
@@ -146,14 +371,6 @@ static int sti_gdp_fourcc2format(int fourcc)
return GDP_RGB565;
case DRM_FORMAT_RGB888:
return GDP_RGB888;
- case DRM_FORMAT_AYUV:
- return GDP_AYCBR8888;
- case DRM_FORMAT_YUV444:
- return GDP_YCBR888;
- case DRM_FORMAT_VYUY:
- return GDP_YCBR422R;
- case DRM_FORMAT_C8:
- return GDP_CLUT8;
}
return -1;
}
@@ -163,7 +380,6 @@ static int sti_gdp_get_alpharange(int format)
switch (format) {
case GDP_ARGB8565:
case GDP_ARGB8888:
- case GDP_AYCBR8888:
case GDP_ABGR8888:
return GAM_GDP_ALPHARANGE_255;
}
@@ -240,9 +456,6 @@ end:
*/
static void sti_gdp_disable(struct sti_gdp *gdp)
{
- struct drm_plane *drm_plane = &gdp->plane.drm_plane;
- struct sti_mixer *mixer = to_sti_mixer(drm_plane->crtc);
- struct sti_compositor *compo = dev_get_drvdata(gdp->dev);
unsigned int i;
DRM_DEBUG_DRIVER("%s\n", sti_plane_to_str(&gdp->plane));
@@ -253,8 +466,7 @@ static void sti_gdp_disable(struct sti_gdp *gdp)
gdp->node_list[i].btm_field->gam_gdp_ppt |= GAM_GDP_PPT_IGNORE;
}
- if (sti_vtg_unregister_client(mixer->id == STI_MIXER_MAIN ?
- compo->vtg_main : compo->vtg_aux, &gdp->vtg_field_nb))
+ if (sti_vtg_unregister_client(gdp->vtg, &gdp->vtg_field_nb))
DRM_DEBUG_DRIVER("Warning: cannot unregister VTG notifier\n");
if (gdp->clk_pix)
@@ -312,8 +524,7 @@ static void sti_gdp_init(struct sti_gdp *gdp)
/* Allocate all the nodes within a single memory page */
size = sizeof(struct sti_gdp_node) *
GDP_NODE_PER_FIELD * GDP_NODE_NB_BANK;
- base = dma_alloc_writecombine(gdp->dev,
- size, &dma_addr, GFP_KERNEL | GFP_DMA);
+ base = dma_alloc_wc(gdp->dev, size, &dma_addr, GFP_KERNEL | GFP_DMA);
if (!base) {
DRM_ERROR("Failed to allocate memory for GDP node\n");
@@ -380,20 +591,140 @@ static void sti_gdp_init(struct sti_gdp *gdp)
}
}
-static void sti_gdp_atomic_update(struct drm_plane *drm_plane,
- struct drm_plane_state *oldstate)
+/**
+ * sti_gdp_get_dst
+ * @dev: device
+ * @dst: requested destination size
+ * @src: source size
+ *
+ * Return the cropped / clamped destination size
+ *
+ * RETURNS:
+ * cropped / clamped destination size
+ */
+static int sti_gdp_get_dst(struct device *dev, int dst, int src)
+{
+ if (dst == src)
+ return dst;
+
+ if (dst < src) {
+ dev_dbg(dev, "WARNING: GDP scale not supported, will crop\n");
+ return dst;
+ }
+
+ dev_dbg(dev, "WARNING: GDP scale not supported, will clamp\n");
+ return src;
+}
+
+static int sti_gdp_atomic_check(struct drm_plane *drm_plane,
+ struct drm_plane_state *state)
{
- struct drm_plane_state *state = drm_plane->state;
struct sti_plane *plane = to_sti_plane(drm_plane);
struct sti_gdp *gdp = to_sti_gdp(plane);
struct drm_crtc *crtc = state->crtc;
struct sti_compositor *compo = dev_get_drvdata(gdp->dev);
struct drm_framebuffer *fb = state->fb;
bool first_prepare = plane->status == STI_PLANE_DISABLED ? true : false;
+ struct drm_crtc_state *crtc_state;
struct sti_mixer *mixer;
struct drm_display_mode *mode;
int dst_x, dst_y, dst_w, dst_h;
int src_x, src_y, src_w, src_h;
+ int format;
+
+ /* no need for further checks if the plane is being disabled */
+ if (!crtc || !fb)
+ return 0;
+
+ mixer = to_sti_mixer(crtc);
+ crtc_state = drm_atomic_get_crtc_state(state->state, crtc);
+ mode = &crtc_state->mode;
+ dst_x = state->crtc_x;
+ dst_y = state->crtc_y;
+ dst_w = clamp_val(state->crtc_w, 0, mode->crtc_hdisplay - dst_x);
+ dst_h = clamp_val(state->crtc_h, 0, mode->crtc_vdisplay - dst_y);
+ /* src_x are in 16.16 format */
+ src_x = state->src_x >> 16;
+ src_y = state->src_y >> 16;
+ src_w = clamp_val(state->src_w >> 16, 0, GAM_GDP_SIZE_MAX);
+ src_h = clamp_val(state->src_h >> 16, 0, GAM_GDP_SIZE_MAX);
+
+ format = sti_gdp_fourcc2format(fb->pixel_format);
+ if (format == -1) {
+ DRM_ERROR("Format not supported by GDP %.4s\n",
+ (char *)&fb->pixel_format);
+ return -EINVAL;
+ }
+
+ if (!drm_fb_cma_get_gem_obj(fb, 0)) {
+ DRM_ERROR("Can't get CMA GEM object for fb\n");
+ return -EINVAL;
+ }
+
+ if (first_prepare) {
+ /* Register gdp callback */
+ gdp->vtg = mixer->id == STI_MIXER_MAIN ?
+ compo->vtg_main : compo->vtg_aux;
+ if (sti_vtg_register_client(gdp->vtg,
+ &gdp->vtg_field_nb, crtc)) {
+ DRM_ERROR("Cannot register VTG notifier\n");
+ return -EINVAL;
+ }
+
+ /* Set and enable gdp clock */
+ if (gdp->clk_pix) {
+ struct clk *clkp;
+ int rate = mode->clock * 1000;
+ int res;
+
+ /*
+ * According to the mixer used, the gdp pixel clock
+ * should have a different parent clock.
+ */
+ if (mixer->id == STI_MIXER_MAIN)
+ clkp = gdp->clk_main_parent;
+ else
+ clkp = gdp->clk_aux_parent;
+
+ if (clkp)
+ clk_set_parent(gdp->clk_pix, clkp);
+
+ res = clk_set_rate(gdp->clk_pix, rate);
+ if (res < 0) {
+ DRM_ERROR("Cannot set rate (%dHz) for gdp\n",
+ rate);
+ return -EINVAL;
+ }
+
+ if (clk_prepare_enable(gdp->clk_pix)) {
+ DRM_ERROR("Failed to prepare/enable gdp\n");
+ return -EINVAL;
+ }
+ }
+ }
+
+ DRM_DEBUG_KMS("CRTC:%d (%s) drm plane:%d (%s)\n",
+ crtc->base.id, sti_mixer_to_str(mixer),
+ drm_plane->base.id, sti_plane_to_str(plane));
+ DRM_DEBUG_KMS("%s dst=(%dx%d)@(%d,%d) - src=(%dx%d)@(%d,%d)\n",
+ sti_plane_to_str(plane),
+ dst_w, dst_h, dst_x, dst_y,
+ src_w, src_h, src_x, src_y);
+
+ return 0;
+}
+
+static void sti_gdp_atomic_update(struct drm_plane *drm_plane,
+ struct drm_plane_state *oldstate)
+{
+ struct drm_plane_state *state = drm_plane->state;
+ struct sti_plane *plane = to_sti_plane(drm_plane);
+ struct sti_gdp *gdp = to_sti_gdp(plane);
+ struct drm_crtc *crtc = state->crtc;
+ struct drm_framebuffer *fb = state->fb;
+ struct drm_display_mode *mode;
+ int dst_x, dst_y, dst_w, dst_h;
+ int src_x, src_y, src_w, src_h;
struct drm_gem_cma_object *cma_obj;
struct sti_gdp_node_list *list;
struct sti_gdp_node_list *curr_list;
@@ -403,13 +734,10 @@ static void sti_gdp_atomic_update(struct drm_plane *drm_plane,
int format;
unsigned int depth, bpp;
u32 ydo, xdo, yds, xds;
- int res;
- /* Manage the case where crtc is null (disabled) */
- if (!crtc)
+ if (!crtc || !fb)
return;
- mixer = to_sti_mixer(crtc);
mode = &crtc->mode;
dst_x = state->crtc_x;
dst_y = state->crtc_y;
@@ -418,16 +746,8 @@ static void sti_gdp_atomic_update(struct drm_plane *drm_plane,
/* src_x are in 16.16 format */
src_x = state->src_x >> 16;
src_y = state->src_y >> 16;
- src_w = state->src_w >> 16;
- src_h = state->src_h >> 16;
-
- DRM_DEBUG_KMS("CRTC:%d (%s) drm plane:%d (%s)\n",
- crtc->base.id, sti_mixer_to_str(mixer),
- drm_plane->base.id, sti_plane_to_str(plane));
- DRM_DEBUG_KMS("%s dst=(%dx%d)@(%d,%d) - src=(%dx%d)@(%d,%d)\n",
- sti_plane_to_str(plane),
- dst_w, dst_h, dst_x, dst_y,
- src_w, src_h, src_x, src_y);
+ src_w = clamp_val(state->src_w >> 16, 0, GAM_GDP_SIZE_MAX);
+ src_h = clamp_val(state->src_h >> 16, 0, GAM_GDP_SIZE_MAX);
list = sti_gdp_get_free_nodes(gdp);
top_field = list->top_field;
@@ -440,20 +760,11 @@ static void sti_gdp_atomic_update(struct drm_plane *drm_plane,
top_field->gam_gdp_agc = GAM_GDP_AGC_FULL_RANGE;
top_field->gam_gdp_ctl = WAIT_NEXT_VSYNC;
format = sti_gdp_fourcc2format(fb->pixel_format);
- if (format == -1) {
- DRM_ERROR("Format not supported by GDP %.4s\n",
- (char *)&fb->pixel_format);
- return;
- }
top_field->gam_gdp_ctl |= format;
top_field->gam_gdp_ctl |= sti_gdp_get_alpharange(format);
top_field->gam_gdp_ppt &= ~GAM_GDP_PPT_IGNORE;
cma_obj = drm_fb_cma_get_gem_obj(fb, 0);
- if (!cma_obj) {
- DRM_ERROR("Can't get CMA GEM object for fb\n");
- return;
- }
DRM_DEBUG_DRIVER("drm FB:%d format:%.4s phys@:0x%lx\n", fb->base.id,
(char *)&fb->pixel_format,
@@ -465,12 +776,9 @@ static void sti_gdp_atomic_update(struct drm_plane *drm_plane,
top_field->gam_gdp_pml += src_x * (bpp >> 3);
top_field->gam_gdp_pml += src_y * fb->pitches[0];
- /* input parameters */
- top_field->gam_gdp_pmp = fb->pitches[0];
- top_field->gam_gdp_size = clamp_val(src_h, 0, GAM_GDP_SIZE_MAX) << 16 |
- clamp_val(src_w, 0, GAM_GDP_SIZE_MAX);
-
- /* output parameters */
+ /* output parameters (clamped / cropped) */
+ dst_w = sti_gdp_get_dst(gdp->dev, dst_w, src_w);
+ dst_h = sti_gdp_get_dst(gdp->dev, dst_h, src_h);
ydo = sti_vtg_get_line_number(*mode, dst_y);
yds = sti_vtg_get_line_number(*mode, dst_y + dst_h - 1);
xdo = sti_vtg_get_pixel_number(*mode, dst_x);
@@ -478,6 +786,11 @@ static void sti_gdp_atomic_update(struct drm_plane *drm_plane,
top_field->gam_gdp_vpo = (ydo << 16) | xdo;
top_field->gam_gdp_vps = (yds << 16) | xds;
+ /* input parameters */
+ src_w = dst_w;
+ top_field->gam_gdp_pmp = fb->pitches[0];
+ top_field->gam_gdp_size = src_h << 16 | src_w;
+
/* Same content and chained together */
memcpy(btm_field, top_field, sizeof(*btm_field));
top_field->gam_gdp_nvn = list->btm_field_paddr;
@@ -488,44 +801,6 @@ static void sti_gdp_atomic_update(struct drm_plane *drm_plane,
btm_field->gam_gdp_pml = top_field->gam_gdp_pml +
fb->pitches[0];
- if (first_prepare) {
- /* Register gdp callback */
- if (sti_vtg_register_client(mixer->id == STI_MIXER_MAIN ?
- compo->vtg_main : compo->vtg_aux,
- &gdp->vtg_field_nb, crtc)) {
- DRM_ERROR("Cannot register VTG notifier\n");
- return;
- }
-
- /* Set and enable gdp clock */
- if (gdp->clk_pix) {
- struct clk *clkp;
- int rate = mode->clock * 1000;
-
- /* According to the mixer used, the gdp pixel clock
- * should have a different parent clock. */
- if (mixer->id == STI_MIXER_MAIN)
- clkp = gdp->clk_main_parent;
- else
- clkp = gdp->clk_aux_parent;
-
- if (clkp)
- clk_set_parent(gdp->clk_pix, clkp);
-
- res = clk_set_rate(gdp->clk_pix, rate);
- if (res < 0) {
- DRM_ERROR("Cannot set rate (%dHz) for gdp\n",
- rate);
- return;
- }
-
- if (clk_prepare_enable(gdp->clk_pix)) {
- DRM_ERROR("Failed to prepare/enable gdp\n");
- return;
- }
- }
- }
-
/* Update the NVN field of the 'right' field of the current GDP node
* (being used by the HW) with the address of the updated ('free') top
* field GDP node.
@@ -574,6 +849,8 @@ static void sti_gdp_atomic_update(struct drm_plane *drm_plane,
}
end:
+ sti_plane_update_fps(plane, true, false);
+
plane->status = STI_PLANE_UPDATED;
}
@@ -581,7 +858,6 @@ static void sti_gdp_atomic_disable(struct drm_plane *drm_plane,
struct drm_plane_state *oldstate)
{
struct sti_plane *plane = to_sti_plane(drm_plane);
- struct sti_mixer *mixer = to_sti_mixer(drm_plane->crtc);
if (!drm_plane->crtc) {
DRM_DEBUG_DRIVER("drm plane:%d not enabled\n",
@@ -590,13 +866,15 @@ static void sti_gdp_atomic_disable(struct drm_plane *drm_plane,
}
DRM_DEBUG_DRIVER("CRTC:%d (%s) drm plane:%d (%s)\n",
- drm_plane->crtc->base.id, sti_mixer_to_str(mixer),
+ drm_plane->crtc->base.id,
+ sti_mixer_to_str(to_sti_mixer(drm_plane->crtc)),
drm_plane->base.id, sti_plane_to_str(plane));
plane->status = STI_PLANE_DISABLING;
}
static const struct drm_plane_helper_funcs sti_gdp_helpers_funcs = {
+ .atomic_check = sti_gdp_atomic_check,
.atomic_update = sti_gdp_atomic_update,
.atomic_disable = sti_gdp_atomic_disable,
};
@@ -640,6 +918,9 @@ struct drm_plane *sti_gdp_create(struct drm_device *drm_dev,
sti_plane_init_property(&gdp->plane, type);
+ if (gdp_debugfs_init(gdp, drm_dev->primary))
+ DRM_ERROR("GDP debugfs setup failed\n");
+
return &gdp->plane.drm_plane;
err:
diff --git a/drivers/gpu/drm/sti/sti_hda.c b/drivers/gpu/drm/sti/sti_hda.c
index 49cce833f2c8..ec0d017eaf1a 100644
--- a/drivers/gpu/drm/sti/sti_hda.c
+++ b/drivers/gpu/drm/sti/sti_hda.c
@@ -326,6 +326,103 @@ static void hda_enable_hd_dacs(struct sti_hda *hda, bool enable)
}
}
+#define DBGFS_DUMP(reg) seq_printf(s, "\n %-25s 0x%08X", #reg, \
+ readl(hda->regs + reg))
+
+static void hda_dbg_cfg(struct seq_file *s, int val)
+{
+ seq_puts(s, "\tAWG ");
+ seq_puts(s, val & CFG_AWG_ASYNC_EN ? "enabled" : "disabled");
+}
+
+static void hda_dbg_awg_microcode(struct seq_file *s, void __iomem *reg)
+{
+ unsigned int i;
+
+ seq_puts(s, "\n\n");
+ seq_puts(s, " HDA AWG microcode:");
+ for (i = 0; i < AWG_MAX_INST; i++) {
+ if (i % 8 == 0)
+ seq_printf(s, "\n %04X:", i);
+ seq_printf(s, " %04X", readl(reg + i * 4));
+ }
+}
+
+static void hda_dbg_video_dacs_ctrl(struct seq_file *s, void __iomem *reg)
+{
+ u32 val = readl(reg);
+ u32 mask;
+
+ switch ((u32)reg & VIDEO_DACS_CONTROL_MASK) {
+ case VIDEO_DACS_CONTROL_SYSCFG2535:
+ mask = DAC_CFG_HD_OFF_MASK;
+ break;
+ case VIDEO_DACS_CONTROL_SYSCFG5072:
+ mask = DAC_CFG_HD_HZUVW_OFF_MASK;
+ break;
+ default:
+ DRM_DEBUG_DRIVER("Warning: DACS ctrl register not supported!");
+ return;
+ }
+
+ seq_puts(s, "\n");
+ seq_printf(s, "\n %-25s 0x%08X", "VIDEO_DACS_CONTROL", val);
+ seq_puts(s, "\tHD DACs ");
+ seq_puts(s, val & mask ? "disabled" : "enabled");
+}
+
+static int hda_dbg_show(struct seq_file *s, void *data)
+{
+ struct drm_info_node *node = s->private;
+ struct sti_hda *hda = (struct sti_hda *)node->info_ent->data;
+ struct drm_device *dev = node->minor->dev;
+ int ret;
+
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
+
+ seq_printf(s, "HD Analog: (vaddr = 0x%p)", hda->regs);
+ DBGFS_DUMP(HDA_ANA_CFG);
+ hda_dbg_cfg(s, readl(hda->regs + HDA_ANA_CFG));
+ DBGFS_DUMP(HDA_ANA_SCALE_CTRL_Y);
+ DBGFS_DUMP(HDA_ANA_SCALE_CTRL_CB);
+ DBGFS_DUMP(HDA_ANA_SCALE_CTRL_CR);
+ DBGFS_DUMP(HDA_ANA_ANC_CTRL);
+ DBGFS_DUMP(HDA_ANA_SRC_Y_CFG);
+ DBGFS_DUMP(HDA_ANA_SRC_C_CFG);
+ hda_dbg_awg_microcode(s, hda->regs + HDA_SYNC_AWGI);
+ if (hda->video_dacs_ctrl)
+ hda_dbg_video_dacs_ctrl(s, hda->video_dacs_ctrl);
+ seq_puts(s, "\n");
+
+ mutex_unlock(&dev->struct_mutex);
+ return 0;
+}
+
+static struct drm_info_list hda_debugfs_files[] = {
+ { "hda", hda_dbg_show, 0, NULL },
+};
+
+static void hda_debugfs_exit(struct sti_hda *hda, struct drm_minor *minor)
+{
+ drm_debugfs_remove_files(hda_debugfs_files,
+ ARRAY_SIZE(hda_debugfs_files),
+ minor);
+}
+
+static int hda_debugfs_init(struct sti_hda *hda, struct drm_minor *minor)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(hda_debugfs_files); i++)
+ hda_debugfs_files[i].data = hda;
+
+ return drm_debugfs_create_files(hda_debugfs_files,
+ ARRAY_SIZE(hda_debugfs_files),
+ minor->debugfs_root, minor);
+}
+
/**
* Configure AWG, writing instructions
*
@@ -685,6 +782,12 @@ static int sti_hda_bind(struct device *dev, struct device *master, void *data)
goto err_sysfs;
}
+ /* force to disable hd dacs at startup */
+ hda_enable_hd_dacs(hda, false);
+
+ if (hda_debugfs_init(hda, drm_dev->primary))
+ DRM_ERROR("HDA debugfs setup failed\n");
+
return 0;
err_sysfs:
@@ -697,7 +800,10 @@ err_connector:
static void sti_hda_unbind(struct device *dev,
struct device *master, void *data)
{
- /* do nothing */
+ struct sti_hda *hda = dev_get_drvdata(dev);
+ struct drm_device *drm_dev = data;
+
+ hda_debugfs_exit(hda, drm_dev->primary);
}
static const struct component_ops sti_hda_ops = {
diff --git a/drivers/gpu/drm/sti/sti_hdmi.c b/drivers/gpu/drm/sti/sti_hdmi.c
index cd501563c0cc..6ef0715bd5b9 100644
--- a/drivers/gpu/drm/sti/sti_hdmi.c
+++ b/drivers/gpu/drm/sti/sti_hdmi.c
@@ -6,6 +6,7 @@
#include <linux/clk.h>
#include <linux/component.h>
+#include <linux/debugfs.h>
#include <linux/hdmi.h>
#include <linux/module.h>
#include <linux/of_gpio.h>
@@ -51,9 +52,18 @@
#define HDMI_SW_DI_2_PKT_WORD4 0x0614
#define HDMI_SW_DI_2_PKT_WORD5 0x0618
#define HDMI_SW_DI_2_PKT_WORD6 0x061C
+#define HDMI_SW_DI_3_HEAD_WORD 0x0620
+#define HDMI_SW_DI_3_PKT_WORD0 0x0624
+#define HDMI_SW_DI_3_PKT_WORD1 0x0628
+#define HDMI_SW_DI_3_PKT_WORD2 0x062C
+#define HDMI_SW_DI_3_PKT_WORD3 0x0630
+#define HDMI_SW_DI_3_PKT_WORD4 0x0634
+#define HDMI_SW_DI_3_PKT_WORD5 0x0638
+#define HDMI_SW_DI_3_PKT_WORD6 0x063C
#define HDMI_IFRAME_SLOT_AVI 1
#define HDMI_IFRAME_SLOT_AUDIO 2
+#define HDMI_IFRAME_SLOT_VENDOR 3
#define XCAT(prefix, x, suffix) prefix ## x ## suffix
#define HDMI_SW_DI_N_HEAD_WORD(x) XCAT(HDMI_SW_DI_, x, _HEAD_WORD)
@@ -65,6 +75,8 @@
#define HDMI_SW_DI_N_PKT_WORD5(x) XCAT(HDMI_SW_DI_, x, _PKT_WORD5)
#define HDMI_SW_DI_N_PKT_WORD6(x) XCAT(HDMI_SW_DI_, x, _PKT_WORD6)
+#define HDMI_SW_DI_MAX_WORD 7
+
#define HDMI_IFRAME_DISABLED 0x0
#define HDMI_IFRAME_SINGLE_SHOT 0x1
#define HDMI_IFRAME_FIELD 0x2
@@ -117,6 +129,8 @@ struct sti_hdmi_connector {
struct drm_connector drm_connector;
struct drm_encoder *encoder;
struct sti_hdmi *hdmi;
+ struct drm_property *colorspace_property;
+ struct drm_property *hdmi_mode_property;
};
#define to_sti_hdmi_connector(x) \
@@ -217,8 +231,10 @@ static void hdmi_config(struct sti_hdmi *hdmi)
/* Clear overrun and underrun fifo */
conf = HDMI_CFG_FIFO_OVERRUN_CLR | HDMI_CFG_FIFO_UNDERRUN_CLR;
- /* Enable HDMI mode not DVI */
- conf |= HDMI_CFG_HDMI_NOT_DVI | HDMI_CFG_ESS_NOT_OESS;
+ /* Select encryption type and the framing mode */
+ conf |= HDMI_CFG_ESS_NOT_OESS;
+ if (hdmi->hdmi_mode == HDMI_MODE_HDMI)
+ conf |= HDMI_CFG_HDMI_NOT_DVI;
/* Enable sink term detection */
conf |= HDMI_CFG_SINK_TERM_DET_EN;
@@ -241,6 +257,47 @@ static void hdmi_config(struct sti_hdmi *hdmi)
hdmi_write(hdmi, conf, HDMI_CFG);
}
+/*
+ * Helper to reset info frame
+ *
+ * @hdmi: pointer on the hdmi internal structure
+ * @slot: infoframe to reset
+ */
+static void hdmi_infoframe_reset(struct sti_hdmi *hdmi,
+ u32 slot)
+{
+ u32 val, i;
+ u32 head_offset, pack_offset;
+
+ switch (slot) {
+ case HDMI_IFRAME_SLOT_AVI:
+ head_offset = HDMI_SW_DI_N_HEAD_WORD(HDMI_IFRAME_SLOT_AVI);
+ pack_offset = HDMI_SW_DI_N_PKT_WORD0(HDMI_IFRAME_SLOT_AVI);
+ break;
+ case HDMI_IFRAME_SLOT_AUDIO:
+ head_offset = HDMI_SW_DI_N_HEAD_WORD(HDMI_IFRAME_SLOT_AUDIO);
+ pack_offset = HDMI_SW_DI_N_PKT_WORD0(HDMI_IFRAME_SLOT_AUDIO);
+ break;
+ case HDMI_IFRAME_SLOT_VENDOR:
+ head_offset = HDMI_SW_DI_N_HEAD_WORD(HDMI_IFRAME_SLOT_VENDOR);
+ pack_offset = HDMI_SW_DI_N_PKT_WORD0(HDMI_IFRAME_SLOT_VENDOR);
+ break;
+ default:
+ DRM_ERROR("unsupported infoframe slot: %#x\n", slot);
+ return;
+ }
+
+ /* Disable transmission for the selected slot */
+ val = hdmi_read(hdmi, HDMI_SW_DI_CFG);
+ val &= ~HDMI_IFRAME_CFG_DI_N(HDMI_IFRAME_MASK, slot);
+ hdmi_write(hdmi, val, HDMI_SW_DI_CFG);
+
+ /* Reset info frame registers */
+ hdmi_write(hdmi, 0x0, head_offset);
+ for (i = 0; i < HDMI_SW_DI_MAX_WORD; i += sizeof(u32))
+ hdmi_write(hdmi, 0x0, pack_offset + i);
+}
+
/**
* Helper to concatenate infoframe in 32 bits word
*
@@ -266,12 +323,13 @@ static inline unsigned int hdmi_infoframe_subpack(const u8 *ptr, size_t size)
* @data: infoframe to write
* @size: size to write
*/
-static void hdmi_infoframe_write_infopack(struct sti_hdmi *hdmi, const u8 *data)
+static void hdmi_infoframe_write_infopack(struct sti_hdmi *hdmi,
+ const u8 *data,
+ size_t size)
{
const u8 *ptr = data;
u32 val, slot, mode, i;
u32 head_offset, pack_offset;
- size_t size;
switch (*ptr) {
case HDMI_INFOFRAME_TYPE_AVI:
@@ -279,17 +337,19 @@ static void hdmi_infoframe_write_infopack(struct sti_hdmi *hdmi, const u8 *data)
mode = HDMI_IFRAME_FIELD;
head_offset = HDMI_SW_DI_N_HEAD_WORD(HDMI_IFRAME_SLOT_AVI);
pack_offset = HDMI_SW_DI_N_PKT_WORD0(HDMI_IFRAME_SLOT_AVI);
- size = HDMI_AVI_INFOFRAME_SIZE;
break;
-
case HDMI_INFOFRAME_TYPE_AUDIO:
slot = HDMI_IFRAME_SLOT_AUDIO;
mode = HDMI_IFRAME_FRAME;
head_offset = HDMI_SW_DI_N_HEAD_WORD(HDMI_IFRAME_SLOT_AUDIO);
pack_offset = HDMI_SW_DI_N_PKT_WORD0(HDMI_IFRAME_SLOT_AUDIO);
- size = HDMI_AUDIO_INFOFRAME_SIZE;
break;
-
+ case HDMI_INFOFRAME_TYPE_VENDOR:
+ slot = HDMI_IFRAME_SLOT_VENDOR;
+ mode = HDMI_IFRAME_FRAME;
+ head_offset = HDMI_SW_DI_N_HEAD_WORD(HDMI_IFRAME_SLOT_VENDOR);
+ pack_offset = HDMI_SW_DI_N_PKT_WORD0(HDMI_IFRAME_SLOT_VENDOR);
+ break;
default:
DRM_ERROR("unsupported infoframe type: %#x\n", *ptr);
return;
@@ -308,8 +368,9 @@ static void hdmi_infoframe_write_infopack(struct sti_hdmi *hdmi, const u8 *data)
/*
* Each subpack contains 4 bytes
* The First Bytes of the first subpacket must contain the checksum
- * Packet size in increase by one.
+ * Packet size is increase by one.
*/
+ size = size - HDMI_INFOFRAME_HEADER_SIZE + 1;
for (i = 0; i < size; i += sizeof(u32)) {
size_t num;
@@ -321,7 +382,7 @@ static void hdmi_infoframe_write_infopack(struct sti_hdmi *hdmi, const u8 *data)
/* Enable transmission slot for updated infoframe */
val = hdmi_read(hdmi, HDMI_SW_DI_CFG);
- val |= HDMI_IFRAME_CFG_DI_N(HDMI_IFRAME_FIELD, slot);
+ val |= HDMI_IFRAME_CFG_DI_N(mode, slot);
hdmi_write(hdmi, val, HDMI_SW_DI_CFG);
}
@@ -352,7 +413,7 @@ static int hdmi_avi_infoframe_config(struct sti_hdmi *hdmi)
}
/* fixed infoframe configuration not linked to the mode */
- infoframe.colorspace = HDMI_COLORSPACE_RGB;
+ infoframe.colorspace = hdmi->colorspace;
infoframe.quantization_range = HDMI_QUANTIZATION_RANGE_DEFAULT;
infoframe.colorimetry = HDMI_COLORIMETRY_NONE;
@@ -362,7 +423,7 @@ static int hdmi_avi_infoframe_config(struct sti_hdmi *hdmi)
return ret;
}
- hdmi_infoframe_write_infopack(hdmi, buffer);
+ hdmi_infoframe_write_infopack(hdmi, buffer, ret);
return 0;
}
@@ -398,7 +459,49 @@ static int hdmi_audio_infoframe_config(struct sti_hdmi *hdmi)
return ret;
}
- hdmi_infoframe_write_infopack(hdmi, buffer);
+ hdmi_infoframe_write_infopack(hdmi, buffer, ret);
+
+ return 0;
+}
+
+/*
+ * Prepare and configure the VS infoframe
+ *
+ * Vendor Specific infoframe are transmitted once per frame and
+ * contains vendor specific information.
+ *
+ * @hdmi: pointer on the hdmi internal structure
+ *
+ * Return negative value if error occurs
+ */
+#define HDMI_VENDOR_INFOFRAME_MAX_SIZE 6
+static int hdmi_vendor_infoframe_config(struct sti_hdmi *hdmi)
+{
+ struct drm_display_mode *mode = &hdmi->mode;
+ struct hdmi_vendor_infoframe infoframe;
+ u8 buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_VENDOR_INFOFRAME_MAX_SIZE];
+ int ret;
+
+ DRM_DEBUG_DRIVER("\n");
+
+ ret = drm_hdmi_vendor_infoframe_from_display_mode(&infoframe, mode);
+ if (ret < 0) {
+ /*
+ * Going into that statement does not means vendor infoframe
+ * fails. It just informed us that vendor infoframe is not
+ * needed for the selected mode. Only 4k or stereoscopic 3D
+ * mode requires vendor infoframe. So just simply return 0.
+ */
+ return 0;
+ }
+
+ ret = hdmi_vendor_infoframe_pack(&infoframe, buffer, sizeof(buffer));
+ if (ret < 0) {
+ DRM_ERROR("failed to pack VS infoframe: %d\n", ret);
+ return ret;
+ }
+
+ hdmi_infoframe_write_infopack(hdmi, buffer, ret);
return 0;
}
@@ -448,6 +551,172 @@ static void hdmi_swreset(struct sti_hdmi *hdmi)
clk_disable_unprepare(hdmi->clk_audio);
}
+#define DBGFS_PRINT_STR(str1, str2) seq_printf(s, "%-24s %s\n", str1, str2)
+#define DBGFS_PRINT_INT(str1, int2) seq_printf(s, "%-24s %d\n", str1, int2)
+#define DBGFS_DUMP(str, reg) seq_printf(s, "%s %-25s 0x%08X", str, #reg, \
+ hdmi_read(hdmi, reg))
+#define DBGFS_DUMP_DI(reg, slot) DBGFS_DUMP("\n", reg(slot))
+
+static void hdmi_dbg_cfg(struct seq_file *s, int val)
+{
+ int tmp;
+
+ seq_puts(s, "\t");
+ tmp = val & HDMI_CFG_HDMI_NOT_DVI;
+ DBGFS_PRINT_STR("mode:", tmp ? "HDMI" : "DVI");
+ seq_puts(s, "\t\t\t\t\t");
+ tmp = val & HDMI_CFG_HDCP_EN;
+ DBGFS_PRINT_STR("HDCP:", tmp ? "enable" : "disable");
+ seq_puts(s, "\t\t\t\t\t");
+ tmp = val & HDMI_CFG_ESS_NOT_OESS;
+ DBGFS_PRINT_STR("HDCP mode:", tmp ? "ESS enable" : "OESS enable");
+ seq_puts(s, "\t\t\t\t\t");
+ tmp = val & HDMI_CFG_SINK_TERM_DET_EN;
+ DBGFS_PRINT_STR("Sink term detection:", tmp ? "enable" : "disable");
+ seq_puts(s, "\t\t\t\t\t");
+ tmp = val & HDMI_CFG_H_SYNC_POL_NEG;
+ DBGFS_PRINT_STR("Hsync polarity:", tmp ? "inverted" : "normal");
+ seq_puts(s, "\t\t\t\t\t");
+ tmp = val & HDMI_CFG_V_SYNC_POL_NEG;
+ DBGFS_PRINT_STR("Vsync polarity:", tmp ? "inverted" : "normal");
+ seq_puts(s, "\t\t\t\t\t");
+ tmp = val & HDMI_CFG_422_EN;
+ DBGFS_PRINT_STR("YUV422 format:", tmp ? "enable" : "disable");
+}
+
+static void hdmi_dbg_sta(struct seq_file *s, int val)
+{
+ int tmp;
+
+ seq_puts(s, "\t");
+ tmp = (val & HDMI_STA_DLL_LCK);
+ DBGFS_PRINT_STR("pll:", tmp ? "locked" : "not locked");
+ seq_puts(s, "\t\t\t\t\t");
+ tmp = (val & HDMI_STA_HOT_PLUG);
+ DBGFS_PRINT_STR("hdmi cable:", tmp ? "connected" : "not connected");
+}
+
+static void hdmi_dbg_sw_di_cfg(struct seq_file *s, int val)
+{
+ int tmp;
+ char *const en_di[] = {"no transmission",
+ "single transmission",
+ "once every field",
+ "once every frame"};
+
+ seq_puts(s, "\t");
+ tmp = (val & HDMI_IFRAME_CFG_DI_N(HDMI_IFRAME_MASK, 1));
+ DBGFS_PRINT_STR("Data island 1:", en_di[tmp]);
+ seq_puts(s, "\t\t\t\t\t");
+ tmp = (val & HDMI_IFRAME_CFG_DI_N(HDMI_IFRAME_MASK, 2)) >> 4;
+ DBGFS_PRINT_STR("Data island 2:", en_di[tmp]);
+ seq_puts(s, "\t\t\t\t\t");
+ tmp = (val & HDMI_IFRAME_CFG_DI_N(HDMI_IFRAME_MASK, 3)) >> 8;
+ DBGFS_PRINT_STR("Data island 3:", en_di[tmp]);
+ seq_puts(s, "\t\t\t\t\t");
+ tmp = (val & HDMI_IFRAME_CFG_DI_N(HDMI_IFRAME_MASK, 4)) >> 12;
+ DBGFS_PRINT_STR("Data island 4:", en_di[tmp]);
+ seq_puts(s, "\t\t\t\t\t");
+ tmp = (val & HDMI_IFRAME_CFG_DI_N(HDMI_IFRAME_MASK, 5)) >> 16;
+ DBGFS_PRINT_STR("Data island 5:", en_di[tmp]);
+ seq_puts(s, "\t\t\t\t\t");
+ tmp = (val & HDMI_IFRAME_CFG_DI_N(HDMI_IFRAME_MASK, 6)) >> 20;
+ DBGFS_PRINT_STR("Data island 6:", en_di[tmp]);
+}
+
+static int hdmi_dbg_show(struct seq_file *s, void *data)
+{
+ struct drm_info_node *node = s->private;
+ struct sti_hdmi *hdmi = (struct sti_hdmi *)node->info_ent->data;
+ struct drm_device *dev = node->minor->dev;
+ int ret;
+
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
+
+ seq_printf(s, "HDMI: (vaddr = 0x%p)", hdmi->regs);
+ DBGFS_DUMP("\n", HDMI_CFG);
+ hdmi_dbg_cfg(s, hdmi_read(hdmi, HDMI_CFG));
+ DBGFS_DUMP("", HDMI_INT_EN);
+ DBGFS_DUMP("\n", HDMI_STA);
+ hdmi_dbg_sta(s, hdmi_read(hdmi, HDMI_STA));
+ DBGFS_DUMP("", HDMI_ACTIVE_VID_XMIN);
+ seq_puts(s, "\t");
+ DBGFS_PRINT_INT("Xmin:", hdmi_read(hdmi, HDMI_ACTIVE_VID_XMIN));
+ DBGFS_DUMP("", HDMI_ACTIVE_VID_XMAX);
+ seq_puts(s, "\t");
+ DBGFS_PRINT_INT("Xmax:", hdmi_read(hdmi, HDMI_ACTIVE_VID_XMAX));
+ DBGFS_DUMP("", HDMI_ACTIVE_VID_YMIN);
+ seq_puts(s, "\t");
+ DBGFS_PRINT_INT("Ymin:", hdmi_read(hdmi, HDMI_ACTIVE_VID_YMIN));
+ DBGFS_DUMP("", HDMI_ACTIVE_VID_YMAX);
+ seq_puts(s, "\t");
+ DBGFS_PRINT_INT("Ymax:", hdmi_read(hdmi, HDMI_ACTIVE_VID_YMAX));
+ DBGFS_DUMP("", HDMI_SW_DI_CFG);
+ hdmi_dbg_sw_di_cfg(s, hdmi_read(hdmi, HDMI_SW_DI_CFG));
+
+ seq_printf(s, "\n AVI Infoframe (Data Island slot N=%d):",
+ HDMI_IFRAME_SLOT_AVI);
+ DBGFS_DUMP_DI(HDMI_SW_DI_N_HEAD_WORD, HDMI_IFRAME_SLOT_AVI);
+ DBGFS_DUMP_DI(HDMI_SW_DI_N_PKT_WORD0, HDMI_IFRAME_SLOT_AVI);
+ DBGFS_DUMP_DI(HDMI_SW_DI_N_PKT_WORD1, HDMI_IFRAME_SLOT_AVI);
+ DBGFS_DUMP_DI(HDMI_SW_DI_N_PKT_WORD2, HDMI_IFRAME_SLOT_AVI);
+ DBGFS_DUMP_DI(HDMI_SW_DI_N_PKT_WORD3, HDMI_IFRAME_SLOT_AVI);
+ DBGFS_DUMP_DI(HDMI_SW_DI_N_PKT_WORD4, HDMI_IFRAME_SLOT_AVI);
+ DBGFS_DUMP_DI(HDMI_SW_DI_N_PKT_WORD5, HDMI_IFRAME_SLOT_AVI);
+ DBGFS_DUMP_DI(HDMI_SW_DI_N_PKT_WORD6, HDMI_IFRAME_SLOT_AVI);
+ seq_puts(s, "\n");
+ seq_printf(s, "\n AUDIO Infoframe (Data Island slot N=%d):",
+ HDMI_IFRAME_SLOT_AUDIO);
+ DBGFS_DUMP_DI(HDMI_SW_DI_N_HEAD_WORD, HDMI_IFRAME_SLOT_AUDIO);
+ DBGFS_DUMP_DI(HDMI_SW_DI_N_PKT_WORD0, HDMI_IFRAME_SLOT_AUDIO);
+ DBGFS_DUMP_DI(HDMI_SW_DI_N_PKT_WORD1, HDMI_IFRAME_SLOT_AUDIO);
+ DBGFS_DUMP_DI(HDMI_SW_DI_N_PKT_WORD2, HDMI_IFRAME_SLOT_AUDIO);
+ DBGFS_DUMP_DI(HDMI_SW_DI_N_PKT_WORD3, HDMI_IFRAME_SLOT_AUDIO);
+ DBGFS_DUMP_DI(HDMI_SW_DI_N_PKT_WORD4, HDMI_IFRAME_SLOT_AUDIO);
+ DBGFS_DUMP_DI(HDMI_SW_DI_N_PKT_WORD5, HDMI_IFRAME_SLOT_AUDIO);
+ DBGFS_DUMP_DI(HDMI_SW_DI_N_PKT_WORD6, HDMI_IFRAME_SLOT_AUDIO);
+ seq_puts(s, "\n");
+ seq_printf(s, "\n VENDOR SPECIFIC Infoframe (Data Island slot N=%d):",
+ HDMI_IFRAME_SLOT_VENDOR);
+ DBGFS_DUMP_DI(HDMI_SW_DI_N_HEAD_WORD, HDMI_IFRAME_SLOT_VENDOR);
+ DBGFS_DUMP_DI(HDMI_SW_DI_N_PKT_WORD0, HDMI_IFRAME_SLOT_VENDOR);
+ DBGFS_DUMP_DI(HDMI_SW_DI_N_PKT_WORD1, HDMI_IFRAME_SLOT_VENDOR);
+ DBGFS_DUMP_DI(HDMI_SW_DI_N_PKT_WORD2, HDMI_IFRAME_SLOT_VENDOR);
+ DBGFS_DUMP_DI(HDMI_SW_DI_N_PKT_WORD3, HDMI_IFRAME_SLOT_VENDOR);
+ DBGFS_DUMP_DI(HDMI_SW_DI_N_PKT_WORD4, HDMI_IFRAME_SLOT_VENDOR);
+ DBGFS_DUMP_DI(HDMI_SW_DI_N_PKT_WORD5, HDMI_IFRAME_SLOT_VENDOR);
+ DBGFS_DUMP_DI(HDMI_SW_DI_N_PKT_WORD6, HDMI_IFRAME_SLOT_VENDOR);
+ seq_puts(s, "\n");
+
+ mutex_unlock(&dev->struct_mutex);
+ return 0;
+}
+
+static struct drm_info_list hdmi_debugfs_files[] = {
+ { "hdmi", hdmi_dbg_show, 0, NULL },
+};
+
+static void hdmi_debugfs_exit(struct sti_hdmi *hdmi, struct drm_minor *minor)
+{
+ drm_debugfs_remove_files(hdmi_debugfs_files,
+ ARRAY_SIZE(hdmi_debugfs_files),
+ minor);
+}
+
+static int hdmi_debugfs_init(struct sti_hdmi *hdmi, struct drm_minor *minor)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(hdmi_debugfs_files); i++)
+ hdmi_debugfs_files[i].data = hdmi;
+
+ return drm_debugfs_create_files(hdmi_debugfs_files,
+ ARRAY_SIZE(hdmi_debugfs_files),
+ minor->debugfs_root, minor);
+}
+
static void sti_hdmi_disable(struct drm_bridge *bridge)
{
struct sti_hdmi *hdmi = bridge->driver_private;
@@ -468,6 +737,11 @@ static void sti_hdmi_disable(struct drm_bridge *bridge)
/* Stop the phy */
hdmi->phy_ops->stop(hdmi);
+ /* Reset info frame transmission */
+ hdmi_infoframe_reset(hdmi, HDMI_IFRAME_SLOT_AVI);
+ hdmi_infoframe_reset(hdmi, HDMI_IFRAME_SLOT_AUDIO);
+ hdmi_infoframe_reset(hdmi, HDMI_IFRAME_SLOT_VENDOR);
+
/* Set the default channel data to be a dark red */
hdmi_write(hdmi, 0x0000, HDMI_DFLT_CHL0_DAT);
hdmi_write(hdmi, 0x0000, HDMI_DFLT_CHL1_DAT);
@@ -523,6 +797,10 @@ static void sti_hdmi_pre_enable(struct drm_bridge *bridge)
if (hdmi_audio_infoframe_config(hdmi))
DRM_ERROR("Unable to configure AUDIO infoframe\n");
+ /* Program VS infoframe */
+ if (hdmi_vendor_infoframe_config(hdmi))
+ DRM_ERROR("Unable to configure VS infoframe\n");
+
/* Sw reset */
hdmi_swreset(hdmi);
}
@@ -664,12 +942,97 @@ static void sti_hdmi_connector_destroy(struct drm_connector *connector)
kfree(hdmi_connector);
}
+static void sti_hdmi_connector_init_property(struct drm_device *drm_dev,
+ struct drm_connector *connector)
+{
+ struct sti_hdmi_connector *hdmi_connector
+ = to_sti_hdmi_connector(connector);
+ struct sti_hdmi *hdmi = hdmi_connector->hdmi;
+ struct drm_property *prop;
+
+ /* colorspace property */
+ hdmi->colorspace = DEFAULT_COLORSPACE_MODE;
+ prop = drm_property_create_enum(drm_dev, 0, "colorspace",
+ colorspace_mode_names,
+ ARRAY_SIZE(colorspace_mode_names));
+ if (!prop) {
+ DRM_ERROR("fails to create colorspace property\n");
+ return;
+ }
+ hdmi_connector->colorspace_property = prop;
+ drm_object_attach_property(&connector->base, prop, hdmi->colorspace);
+
+ /* hdmi_mode property */
+ hdmi->hdmi_mode = DEFAULT_HDMI_MODE;
+ prop = drm_property_create_enum(drm_dev, 0, "hdmi_mode",
+ hdmi_mode_names,
+ ARRAY_SIZE(hdmi_mode_names));
+ if (!prop) {
+ DRM_ERROR("fails to create colorspace property\n");
+ return;
+ }
+ hdmi_connector->hdmi_mode_property = prop;
+ drm_object_attach_property(&connector->base, prop, hdmi->hdmi_mode);
+
+}
+
+static int
+sti_hdmi_connector_set_property(struct drm_connector *connector,
+ struct drm_connector_state *state,
+ struct drm_property *property,
+ uint64_t val)
+{
+ struct sti_hdmi_connector *hdmi_connector
+ = to_sti_hdmi_connector(connector);
+ struct sti_hdmi *hdmi = hdmi_connector->hdmi;
+
+ if (property == hdmi_connector->colorspace_property) {
+ hdmi->colorspace = val;
+ return 0;
+ }
+
+ if (property == hdmi_connector->hdmi_mode_property) {
+ hdmi->hdmi_mode = val;
+ return 0;
+ }
+
+ DRM_ERROR("failed to set hdmi connector property\n");
+ return -EINVAL;
+}
+
+static int
+sti_hdmi_connector_get_property(struct drm_connector *connector,
+ const struct drm_connector_state *state,
+ struct drm_property *property,
+ uint64_t *val)
+{
+ struct sti_hdmi_connector *hdmi_connector
+ = to_sti_hdmi_connector(connector);
+ struct sti_hdmi *hdmi = hdmi_connector->hdmi;
+
+ if (property == hdmi_connector->colorspace_property) {
+ *val = hdmi->colorspace;
+ return 0;
+ }
+
+ if (property == hdmi_connector->hdmi_mode_property) {
+ *val = hdmi->hdmi_mode;
+ return 0;
+ }
+
+ DRM_ERROR("failed to get hdmi connector property\n");
+ return -EINVAL;
+}
+
static const struct drm_connector_funcs sti_hdmi_connector_funcs = {
.dpms = drm_atomic_helper_connector_dpms,
.fill_modes = drm_helper_probe_single_connector_modes,
.detect = sti_hdmi_connector_detect,
.destroy = sti_hdmi_connector_destroy,
.reset = drm_atomic_helper_connector_reset,
+ .set_property = drm_atomic_helper_connector_set_property,
+ .atomic_set_property = sti_hdmi_connector_set_property,
+ .atomic_get_property = sti_hdmi_connector_get_property,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
@@ -729,6 +1092,9 @@ static int sti_hdmi_bind(struct device *dev, struct device *master, void *data)
drm_connector_helper_add(drm_connector,
&sti_hdmi_connector_helper_funcs);
+ /* initialise property */
+ sti_hdmi_connector_init_property(drm_dev, drm_connector);
+
err = drm_connector_register(drm_connector);
if (err)
goto err_connector;
@@ -742,6 +1108,9 @@ static int sti_hdmi_bind(struct device *dev, struct device *master, void *data)
/* Enable default interrupts */
hdmi_write(hdmi, HDMI_DEFAULT_INT, HDMI_INT_EN);
+ if (hdmi_debugfs_init(hdmi, drm_dev->primary))
+ DRM_ERROR("HDMI debugfs setup failed\n");
+
return 0;
err_sysfs:
@@ -755,7 +1124,10 @@ err_connector:
static void sti_hdmi_unbind(struct device *dev,
struct device *master, void *data)
{
- /* do nothing */
+ struct sti_hdmi *hdmi = dev_get_drvdata(dev);
+ struct drm_device *drm_dev = data;
+
+ hdmi_debugfs_exit(hdmi, drm_dev->primary);
}
static const struct component_ops sti_hdmi_ops = {
diff --git a/drivers/gpu/drm/sti/sti_hdmi.h b/drivers/gpu/drm/sti/sti_hdmi.h
index 3d22390e1f3b..ef3a94583bbd 100644
--- a/drivers/gpu/drm/sti/sti_hdmi.h
+++ b/drivers/gpu/drm/sti/sti_hdmi.h
@@ -7,15 +7,14 @@
#ifndef _STI_HDMI_H_
#define _STI_HDMI_H_
+#include <linux/hdmi.h>
#include <linux/platform_device.h>
#include <drm/drmP.h>
#define HDMI_STA 0x0010
#define HDMI_STA_DLL_LCK BIT(5)
-
-#define HDMI_STA_HOT_PLUG_SHIFT 4
-#define HDMI_STA_HOT_PLUG (1 << HDMI_STA_HOT_PLUG_SHIFT)
+#define HDMI_STA_HOT_PLUG BIT(4)
struct sti_hdmi;
@@ -24,6 +23,27 @@ struct hdmi_phy_ops {
void (*stop)(struct sti_hdmi *hdmi);
};
+/* values for the framing mode property */
+enum sti_hdmi_modes {
+ HDMI_MODE_HDMI,
+ HDMI_MODE_DVI,
+};
+
+static const struct drm_prop_enum_list hdmi_mode_names[] = {
+ { HDMI_MODE_HDMI, "hdmi" },
+ { HDMI_MODE_DVI, "dvi" },
+};
+
+#define DEFAULT_HDMI_MODE HDMI_MODE_HDMI
+
+static const struct drm_prop_enum_list colorspace_mode_names[] = {
+ { HDMI_COLORSPACE_RGB, "rgb" },
+ { HDMI_COLORSPACE_YUV422, "yuv422" },
+ { HDMI_COLORSPACE_YUV444, "yuv444" },
+};
+
+#define DEFAULT_COLORSPACE_MODE HDMI_COLORSPACE_RGB
+
/**
* STI hdmi structure
*
@@ -44,6 +64,9 @@ struct hdmi_phy_ops {
* @wait_event: wait event
* @event_received: wait event status
* @reset: reset control of the hdmi phy
+ * @ddc_adapt: i2c ddc adapter
+ * @colorspace: current colorspace selected
+ * @hdmi_mode: select framing for HDMI or DVI
*/
struct sti_hdmi {
struct device dev;
@@ -64,6 +87,8 @@ struct sti_hdmi {
bool event_received;
struct reset_control *reset;
struct i2c_adapter *ddc_adapt;
+ enum hdmi_colorspace colorspace;
+ enum sti_hdmi_modes hdmi_mode;
};
u32 hdmi_read(struct sti_hdmi *hdmi, int offset);
diff --git a/drivers/gpu/drm/sti/sti_hqvdp.c b/drivers/gpu/drm/sti/sti_hqvdp.c
index 43861b52261d..e05b0dc523ff 100644
--- a/drivers/gpu/drm/sti/sti_hqvdp.c
+++ b/drivers/gpu/drm/sti/sti_hqvdp.c
@@ -4,14 +4,11 @@
* License terms: GNU General Public License (GPL), version 2
*/
-#include <linux/clk.h>
#include <linux/component.h>
#include <linux/firmware.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
#include <linux/reset.h>
-#include <drm/drmP.h>
+#include <drm/drm_atomic.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_gem_cma_helper.h>
@@ -329,8 +326,6 @@ struct sti_hqvdp_cmd {
* @reset: reset control
* @vtg_nb: notifier to handle VTG Vsync
* @btm_field_pending: is there any bottom field (interlaced frame) to display
- * @curr_field_count: number of field updates
- * @last_field_count: number of field updates since last fps measure
* @hqvdp_cmd: buffer of commands
* @hqvdp_cmd_paddr: physical address of hqvdp_cmd
* @vtg: vtg for main data path
@@ -346,10 +341,8 @@ struct sti_hqvdp {
struct reset_control *reset;
struct notifier_block vtg_nb;
bool btm_field_pending;
- unsigned int curr_field_count;
- unsigned int last_field_count;
void *hqvdp_cmd;
- dma_addr_t hqvdp_cmd_paddr;
+ u32 hqvdp_cmd_paddr;
struct sti_vtg *vtg;
bool xp70_initialized;
};
@@ -372,8 +365,8 @@ static const uint32_t hqvdp_supported_formats[] = {
*/
static int sti_hqvdp_get_free_cmd(struct sti_hqvdp *hqvdp)
{
- int curr_cmd, next_cmd;
- dma_addr_t cmd = hqvdp->hqvdp_cmd_paddr;
+ u32 curr_cmd, next_cmd;
+ u32 cmd = hqvdp->hqvdp_cmd_paddr;
int i;
curr_cmd = readl(hqvdp->regs + HQVDP_MBX_CURRENT_CMD);
@@ -400,8 +393,8 @@ static int sti_hqvdp_get_free_cmd(struct sti_hqvdp *hqvdp)
*/
static int sti_hqvdp_get_curr_cmd(struct sti_hqvdp *hqvdp)
{
- int curr_cmd;
- dma_addr_t cmd = hqvdp->hqvdp_cmd_paddr;
+ u32 curr_cmd;
+ u32 cmd = hqvdp->hqvdp_cmd_paddr;
unsigned int i;
curr_cmd = readl(hqvdp->regs + HQVDP_MBX_CURRENT_CMD);
@@ -417,6 +410,246 @@ static int sti_hqvdp_get_curr_cmd(struct sti_hqvdp *hqvdp)
}
/**
+ * sti_hqvdp_get_next_cmd
+ * @hqvdp: hqvdp structure
+ *
+ * Look for the next hqvdp_cmd that will be used by the FW.
+ *
+ * RETURNS:
+ * the offset of the next command that will be used.
+ * -1 in error cases
+ */
+static int sti_hqvdp_get_next_cmd(struct sti_hqvdp *hqvdp)
+{
+ int next_cmd;
+ dma_addr_t cmd = hqvdp->hqvdp_cmd_paddr;
+ unsigned int i;
+
+ next_cmd = readl(hqvdp->regs + HQVDP_MBX_NEXT_CMD);
+
+ for (i = 0; i < NB_VDP_CMD; i++) {
+ if (cmd == next_cmd)
+ return i * sizeof(struct sti_hqvdp_cmd);
+
+ cmd += sizeof(struct sti_hqvdp_cmd);
+ }
+
+ return -1;
+}
+
+#define DBGFS_DUMP(reg) seq_printf(s, "\n %-25s 0x%08X", #reg, \
+ readl(hqvdp->regs + reg))
+
+static const char *hqvdp_dbg_get_lut(u32 *coef)
+{
+ if (!memcmp(coef, coef_lut_a_legacy, 16))
+ return "LUT A";
+ if (!memcmp(coef, coef_lut_b, 16))
+ return "LUT B";
+ if (!memcmp(coef, coef_lut_c_y_legacy, 16))
+ return "LUT C Y";
+ if (!memcmp(coef, coef_lut_c_c_legacy, 16))
+ return "LUT C C";
+ if (!memcmp(coef, coef_lut_d_y_legacy, 16))
+ return "LUT D Y";
+ if (!memcmp(coef, coef_lut_d_c_legacy, 16))
+ return "LUT D C";
+ if (!memcmp(coef, coef_lut_e_y_legacy, 16))
+ return "LUT E Y";
+ if (!memcmp(coef, coef_lut_e_c_legacy, 16))
+ return "LUT E C";
+ if (!memcmp(coef, coef_lut_f_y_legacy, 16))
+ return "LUT F Y";
+ if (!memcmp(coef, coef_lut_f_c_legacy, 16))
+ return "LUT F C";
+ return "<UNKNOWN>";
+}
+
+static void hqvdp_dbg_dump_cmd(struct seq_file *s, struct sti_hqvdp_cmd *c)
+{
+ int src_w, src_h, dst_w, dst_h;
+
+ seq_puts(s, "\n\tTOP:");
+ seq_printf(s, "\n\t %-20s 0x%08X", "Config", c->top.config);
+ switch (c->top.config) {
+ case TOP_CONFIG_PROGRESSIVE:
+ seq_puts(s, "\tProgressive");
+ break;
+ case TOP_CONFIG_INTER_TOP:
+ seq_puts(s, "\tInterlaced, top field");
+ break;
+ case TOP_CONFIG_INTER_BTM:
+ seq_puts(s, "\tInterlaced, bottom field");
+ break;
+ default:
+ seq_puts(s, "\t<UNKNOWN>");
+ break;
+ }
+
+ seq_printf(s, "\n\t %-20s 0x%08X", "MemFormat", c->top.mem_format);
+ seq_printf(s, "\n\t %-20s 0x%08X", "CurrentY", c->top.current_luma);
+ seq_printf(s, "\n\t %-20s 0x%08X", "CurrentC", c->top.current_chroma);
+ seq_printf(s, "\n\t %-20s 0x%08X", "YSrcPitch", c->top.luma_src_pitch);
+ seq_printf(s, "\n\t %-20s 0x%08X", "CSrcPitch",
+ c->top.chroma_src_pitch);
+ seq_printf(s, "\n\t %-20s 0x%08X", "InputFrameSize",
+ c->top.input_frame_size);
+ seq_printf(s, "\t%dx%d",
+ c->top.input_frame_size & 0x0000FFFF,
+ c->top.input_frame_size >> 16);
+ seq_printf(s, "\n\t %-20s 0x%08X", "InputViewportSize",
+ c->top.input_viewport_size);
+ src_w = c->top.input_viewport_size & 0x0000FFFF;
+ src_h = c->top.input_viewport_size >> 16;
+ seq_printf(s, "\t%dx%d", src_w, src_h);
+
+ seq_puts(s, "\n\tHVSRC:");
+ seq_printf(s, "\n\t %-20s 0x%08X", "OutputPictureSize",
+ c->hvsrc.output_picture_size);
+ dst_w = c->hvsrc.output_picture_size & 0x0000FFFF;
+ dst_h = c->hvsrc.output_picture_size >> 16;
+ seq_printf(s, "\t%dx%d", dst_w, dst_h);
+ seq_printf(s, "\n\t %-20s 0x%08X", "ParamCtrl", c->hvsrc.param_ctrl);
+
+ seq_printf(s, "\n\t %-20s %s", "yh_coef",
+ hqvdp_dbg_get_lut(c->hvsrc.yh_coef));
+ seq_printf(s, "\n\t %-20s %s", "ch_coef",
+ hqvdp_dbg_get_lut(c->hvsrc.ch_coef));
+ seq_printf(s, "\n\t %-20s %s", "yv_coef",
+ hqvdp_dbg_get_lut(c->hvsrc.yv_coef));
+ seq_printf(s, "\n\t %-20s %s", "cv_coef",
+ hqvdp_dbg_get_lut(c->hvsrc.cv_coef));
+
+ seq_printf(s, "\n\t %-20s", "ScaleH");
+ if (dst_w > src_w)
+ seq_printf(s, " %d/1", dst_w / src_w);
+ else
+ seq_printf(s, " 1/%d", src_w / dst_w);
+
+ seq_printf(s, "\n\t %-20s", "tScaleV");
+ if (dst_h > src_h)
+ seq_printf(s, " %d/1", dst_h / src_h);
+ else
+ seq_printf(s, " 1/%d", src_h / dst_h);
+
+ seq_puts(s, "\n\tCSDI:");
+ seq_printf(s, "\n\t %-20s 0x%08X\t", "Config", c->csdi.config);
+ switch (c->csdi.config) {
+ case CSDI_CONFIG_PROG:
+ seq_puts(s, "Bypass");
+ break;
+ case CSDI_CONFIG_INTER_DIR:
+ seq_puts(s, "Deinterlace, directional");
+ break;
+ default:
+ seq_puts(s, "<UNKNOWN>");
+ break;
+ }
+
+ seq_printf(s, "\n\t %-20s 0x%08X", "Config2", c->csdi.config2);
+ seq_printf(s, "\n\t %-20s 0x%08X", "DcdiConfig", c->csdi.dcdi_config);
+}
+
+static int hqvdp_dbg_show(struct seq_file *s, void *data)
+{
+ struct drm_info_node *node = s->private;
+ struct sti_hqvdp *hqvdp = (struct sti_hqvdp *)node->info_ent->data;
+ struct drm_device *dev = node->minor->dev;
+ int cmd, cmd_offset, infoxp70;
+ void *virt;
+ int ret;
+
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
+
+ seq_printf(s, "%s: (vaddr = 0x%p)",
+ sti_plane_to_str(&hqvdp->plane), hqvdp->regs);
+
+ DBGFS_DUMP(HQVDP_MBX_IRQ_TO_XP70);
+ DBGFS_DUMP(HQVDP_MBX_INFO_HOST);
+ DBGFS_DUMP(HQVDP_MBX_IRQ_TO_HOST);
+ DBGFS_DUMP(HQVDP_MBX_INFO_XP70);
+ infoxp70 = readl(hqvdp->regs + HQVDP_MBX_INFO_XP70);
+ seq_puts(s, "\tFirmware state: ");
+ if (infoxp70 & INFO_XP70_FW_READY)
+ seq_puts(s, "idle and ready");
+ else if (infoxp70 & INFO_XP70_FW_PROCESSING)
+ seq_puts(s, "processing a picture");
+ else if (infoxp70 & INFO_XP70_FW_INITQUEUES)
+ seq_puts(s, "programming queues");
+ else
+ seq_puts(s, "NOT READY");
+
+ DBGFS_DUMP(HQVDP_MBX_SW_RESET_CTRL);
+ DBGFS_DUMP(HQVDP_MBX_STARTUP_CTRL1);
+ if (readl(hqvdp->regs + HQVDP_MBX_STARTUP_CTRL1)
+ & STARTUP_CTRL1_RST_DONE)
+ seq_puts(s, "\tReset is done");
+ else
+ seq_puts(s, "\tReset is NOT done");
+ DBGFS_DUMP(HQVDP_MBX_STARTUP_CTRL2);
+ if (readl(hqvdp->regs + HQVDP_MBX_STARTUP_CTRL2)
+ & STARTUP_CTRL2_FETCH_EN)
+ seq_puts(s, "\tFetch is enabled");
+ else
+ seq_puts(s, "\tFetch is NOT enabled");
+ DBGFS_DUMP(HQVDP_MBX_GP_STATUS);
+ DBGFS_DUMP(HQVDP_MBX_NEXT_CMD);
+ DBGFS_DUMP(HQVDP_MBX_CURRENT_CMD);
+ DBGFS_DUMP(HQVDP_MBX_SOFT_VSYNC);
+ if (!(readl(hqvdp->regs + HQVDP_MBX_SOFT_VSYNC) & 3))
+ seq_puts(s, "\tHW Vsync");
+ else
+ seq_puts(s, "\tSW Vsync ?!?!");
+
+ /* Last command */
+ cmd = readl(hqvdp->regs + HQVDP_MBX_CURRENT_CMD);
+ cmd_offset = sti_hqvdp_get_curr_cmd(hqvdp);
+ if (cmd_offset == -1) {
+ seq_puts(s, "\n\n Last command: unknown");
+ } else {
+ virt = hqvdp->hqvdp_cmd + cmd_offset;
+ seq_printf(s, "\n\n Last command: address @ 0x%x (0x%p)",
+ cmd, virt);
+ hqvdp_dbg_dump_cmd(s, (struct sti_hqvdp_cmd *)virt);
+ }
+
+ /* Next command */
+ cmd = readl(hqvdp->regs + HQVDP_MBX_NEXT_CMD);
+ cmd_offset = sti_hqvdp_get_next_cmd(hqvdp);
+ if (cmd_offset == -1) {
+ seq_puts(s, "\n\n Next command: unknown");
+ } else {
+ virt = hqvdp->hqvdp_cmd + cmd_offset;
+ seq_printf(s, "\n\n Next command address: @ 0x%x (0x%p)",
+ cmd, virt);
+ hqvdp_dbg_dump_cmd(s, (struct sti_hqvdp_cmd *)virt);
+ }
+
+ seq_puts(s, "\n");
+
+ mutex_unlock(&dev->struct_mutex);
+ return 0;
+}
+
+static struct drm_info_list hqvdp_debugfs_files[] = {
+ { "hqvdp", hqvdp_dbg_show, 0, NULL },
+};
+
+static int hqvdp_debugfs_init(struct sti_hqvdp *hqvdp, struct drm_minor *minor)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(hqvdp_debugfs_files); i++)
+ hqvdp_debugfs_files[i].data = hqvdp;
+
+ return drm_debugfs_create_files(hqvdp_debugfs_files,
+ ARRAY_SIZE(hqvdp_debugfs_files),
+ minor->debugfs_root, minor);
+}
+
+/**
* sti_hqvdp_update_hvsrc
* @orient: horizontal or vertical
* @scale: scaling/zoom factor
@@ -580,7 +813,7 @@ int sti_hqvdp_vtg_cb(struct notifier_block *nb, unsigned long evt, void *data)
btm_cmd_offset = sti_hqvdp_get_free_cmd(hqvdp);
top_cmd_offest = sti_hqvdp_get_curr_cmd(hqvdp);
if ((btm_cmd_offset == -1) || (top_cmd_offest == -1)) {
- DRM_ERROR("Cannot get cmds, skip btm field\n");
+ DRM_DEBUG_DRIVER("Warning: no cmd, will skip field\n");
return -EBUSY;
}
@@ -599,11 +832,12 @@ int sti_hqvdp_vtg_cb(struct notifier_block *nb, unsigned long evt, void *data)
writel(hqvdp->hqvdp_cmd_paddr + btm_cmd_offset,
hqvdp->regs + HQVDP_MBX_NEXT_CMD);
- hqvdp->curr_field_count++;
hqvdp->btm_field_pending = false;
dev_dbg(hqvdp->dev, "%s Posted command:0x%x\n",
__func__, hqvdp->hqvdp_cmd_paddr);
+
+ sti_plane_update_fps(&hqvdp->plane, false, true);
}
return 0;
@@ -612,19 +846,21 @@ int sti_hqvdp_vtg_cb(struct notifier_block *nb, unsigned long evt, void *data)
static void sti_hqvdp_init(struct sti_hqvdp *hqvdp)
{
int size;
+ dma_addr_t dma_addr;
hqvdp->vtg_nb.notifier_call = sti_hqvdp_vtg_cb;
/* Allocate memory for the VDP commands */
size = NB_VDP_CMD * sizeof(struct sti_hqvdp_cmd);
- hqvdp->hqvdp_cmd = dma_alloc_writecombine(hqvdp->dev, size,
- &hqvdp->hqvdp_cmd_paddr,
- GFP_KERNEL | GFP_DMA);
+ hqvdp->hqvdp_cmd = dma_alloc_wc(hqvdp->dev, size,
+ &dma_addr,
+ GFP_KERNEL | GFP_DMA);
if (!hqvdp->hqvdp_cmd) {
DRM_ERROR("Failed to allocate memory for VDP cmd\n");
return;
}
+ hqvdp->hqvdp_cmd_paddr = (u32)dma_addr;
memset(hqvdp->hqvdp_cmd, 0, size);
}
@@ -670,7 +906,7 @@ static void sti_hqvdp_start_xp70(struct sti_hqvdp *hqvdp)
DRM_DEBUG_DRIVER("\n");
if (hqvdp->xp70_initialized) {
- DRM_INFO("HQVDP XP70 already initialized\n");
+ DRM_DEBUG_DRIVER("HQVDP XP70 already initialized\n");
return;
}
@@ -775,53 +1011,131 @@ out:
release_firmware(firmware);
}
-static void sti_hqvdp_atomic_update(struct drm_plane *drm_plane,
- struct drm_plane_state *oldstate)
+static int sti_hqvdp_atomic_check(struct drm_plane *drm_plane,
+ struct drm_plane_state *state)
{
- struct drm_plane_state *state = drm_plane->state;
struct sti_plane *plane = to_sti_plane(drm_plane);
struct sti_hqvdp *hqvdp = to_sti_hqvdp(plane);
struct drm_crtc *crtc = state->crtc;
- struct sti_mixer *mixer = to_sti_mixer(crtc);
struct drm_framebuffer *fb = state->fb;
- struct drm_display_mode *mode = &crtc->mode;
- int dst_x = state->crtc_x;
- int dst_y = state->crtc_y;
- int dst_w = clamp_val(state->crtc_w, 0, mode->crtc_hdisplay - dst_x);
- int dst_h = clamp_val(state->crtc_h, 0, mode->crtc_vdisplay - dst_y);
- /* src_x are in 16.16 format */
- int src_x = state->src_x >> 16;
- int src_y = state->src_y >> 16;
- int src_w = state->src_w >> 16;
- int src_h = state->src_h >> 16;
bool first_prepare = plane->status == STI_PLANE_DISABLED ? true : false;
- struct drm_gem_cma_object *cma_obj;
- struct sti_hqvdp_cmd *cmd;
- int scale_h, scale_v;
- int cmd_offset;
+ struct drm_crtc_state *crtc_state;
+ struct drm_display_mode *mode;
+ int dst_x, dst_y, dst_w, dst_h;
+ int src_x, src_y, src_w, src_h;
+
+ /* no need for further checks if the plane is being disabled */
+ if (!crtc || !fb)
+ return 0;
+
+ crtc_state = drm_atomic_get_crtc_state(state->state, crtc);
+ mode = &crtc_state->mode;
+ dst_x = state->crtc_x;
+ dst_y = state->crtc_y;
+ dst_w = clamp_val(state->crtc_w, 0, mode->crtc_hdisplay - dst_x);
+ dst_h = clamp_val(state->crtc_h, 0, mode->crtc_vdisplay - dst_y);
+ /* src_x are in 16.16 format */
+ src_x = state->src_x >> 16;
+ src_y = state->src_y >> 16;
+ src_w = state->src_w >> 16;
+ src_h = state->src_h >> 16;
+
+ if (!sti_hqvdp_check_hw_scaling(hqvdp, mode,
+ src_w, src_h,
+ dst_w, dst_h)) {
+ DRM_ERROR("Scaling beyond HW capabilities\n");
+ return -EINVAL;
+ }
+
+ if (!drm_fb_cma_get_gem_obj(fb, 0)) {
+ DRM_ERROR("Can't get CMA GEM object for fb\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Input / output size
+ * Align to upper even value
+ */
+ dst_w = ALIGN(dst_w, 2);
+ dst_h = ALIGN(dst_h, 2);
+
+ if ((src_w > MAX_WIDTH) || (src_w < MIN_WIDTH) ||
+ (src_h > MAX_HEIGHT) || (src_h < MIN_HEIGHT) ||
+ (dst_w > MAX_WIDTH) || (dst_w < MIN_WIDTH) ||
+ (dst_h > MAX_HEIGHT) || (dst_h < MIN_HEIGHT)) {
+ DRM_ERROR("Invalid in/out size %dx%d -> %dx%d\n",
+ src_w, src_h,
+ dst_w, dst_h);
+ return -EINVAL;
+ }
+
+ if (first_prepare) {
+ /* Start HQVDP XP70 coprocessor */
+ sti_hqvdp_start_xp70(hqvdp);
+
+ /* Prevent VTG shutdown */
+ if (clk_prepare_enable(hqvdp->clk_pix_main)) {
+ DRM_ERROR("Failed to prepare/enable pix main clk\n");
+ return -EINVAL;
+ }
+
+ /* Register VTG Vsync callback to handle bottom fields */
+ if (sti_vtg_register_client(hqvdp->vtg,
+ &hqvdp->vtg_nb,
+ crtc)) {
+ DRM_ERROR("Cannot register VTG notifier\n");
+ return -EINVAL;
+ }
+ }
DRM_DEBUG_KMS("CRTC:%d (%s) drm plane:%d (%s)\n",
- crtc->base.id, sti_mixer_to_str(mixer),
+ crtc->base.id, sti_mixer_to_str(to_sti_mixer(crtc)),
drm_plane->base.id, sti_plane_to_str(plane));
DRM_DEBUG_KMS("%s dst=(%dx%d)@(%d,%d) - src=(%dx%d)@(%d,%d)\n",
sti_plane_to_str(plane),
dst_w, dst_h, dst_x, dst_y,
src_w, src_h, src_x, src_y);
+ return 0;
+}
+
+static void sti_hqvdp_atomic_update(struct drm_plane *drm_plane,
+ struct drm_plane_state *oldstate)
+{
+ struct drm_plane_state *state = drm_plane->state;
+ struct sti_plane *plane = to_sti_plane(drm_plane);
+ struct sti_hqvdp *hqvdp = to_sti_hqvdp(plane);
+ struct drm_crtc *crtc = state->crtc;
+ struct drm_framebuffer *fb = state->fb;
+ struct drm_display_mode *mode;
+ int dst_x, dst_y, dst_w, dst_h;
+ int src_x, src_y, src_w, src_h;
+ struct drm_gem_cma_object *cma_obj;
+ struct sti_hqvdp_cmd *cmd;
+ int scale_h, scale_v;
+ int cmd_offset;
+
+ if (!crtc || !fb)
+ return;
+
+ mode = &crtc->mode;
+ dst_x = state->crtc_x;
+ dst_y = state->crtc_y;
+ dst_w = clamp_val(state->crtc_w, 0, mode->crtc_hdisplay - dst_x);
+ dst_h = clamp_val(state->crtc_h, 0, mode->crtc_vdisplay - dst_y);
+ /* src_x are in 16.16 format */
+ src_x = state->src_x >> 16;
+ src_y = state->src_y >> 16;
+ src_w = state->src_w >> 16;
+ src_h = state->src_h >> 16;
+
cmd_offset = sti_hqvdp_get_free_cmd(hqvdp);
if (cmd_offset == -1) {
- DRM_ERROR("No available hqvdp_cmd now\n");
+ DRM_DEBUG_DRIVER("Warning: no cmd, will skip frame\n");
return;
}
cmd = hqvdp->hqvdp_cmd + cmd_offset;
- if (!sti_hqvdp_check_hw_scaling(hqvdp, mode,
- src_w, src_h,
- dst_w, dst_h)) {
- DRM_ERROR("Scaling beyond HW capabilities\n");
- return;
- }
-
/* Static parameters, defaulting to progressive mode */
cmd->top.config = TOP_CONFIG_PROGRESSIVE;
cmd->top.mem_format = TOP_MEM_FORMAT_DFLT;
@@ -836,10 +1150,6 @@ static void sti_hqvdp_atomic_update(struct drm_plane *drm_plane,
cmd->iqi.pxf_conf = IQI_PXF_CONF_DFLT;
cma_obj = drm_fb_cma_get_gem_obj(fb, 0);
- if (!cma_obj) {
- DRM_ERROR("Can't get CMA GEM object for fb\n");
- return;
- }
DRM_DEBUG_DRIVER("drm FB:%d format:%.4s phys@:0x%lx\n", fb->base.id,
(char *)&fb->pixel_format,
@@ -860,16 +1170,6 @@ static void sti_hqvdp_atomic_update(struct drm_plane *drm_plane,
dst_w = ALIGN(dst_w, 2);
dst_h = ALIGN(dst_h, 2);
- if ((src_w > MAX_WIDTH) || (src_w < MIN_WIDTH) ||
- (src_h > MAX_HEIGHT) || (src_h < MIN_HEIGHT) ||
- (dst_w > MAX_WIDTH) || (dst_w < MIN_WIDTH) ||
- (dst_h > MAX_HEIGHT) || (dst_h < MIN_HEIGHT)) {
- DRM_ERROR("Invalid in/out size %dx%d -> %dx%d\n",
- src_w, src_h,
- dst_w, dst_h);
- return;
- }
-
cmd->top.input_viewport_size = src_h << 16 | src_w;
cmd->top.input_frame_size = src_h << 16 | src_w;
cmd->hvsrc.output_picture_size = dst_h << 16 | dst_w;
@@ -900,30 +1200,9 @@ static void sti_hqvdp_atomic_update(struct drm_plane *drm_plane,
scale_v = SCALE_FACTOR * dst_h / src_h;
sti_hqvdp_update_hvsrc(HVSRC_VERT, scale_v, &cmd->hvsrc);
- if (first_prepare) {
- /* Start HQVDP XP70 coprocessor */
- sti_hqvdp_start_xp70(hqvdp);
-
- /* Prevent VTG shutdown */
- if (clk_prepare_enable(hqvdp->clk_pix_main)) {
- DRM_ERROR("Failed to prepare/enable pix main clk\n");
- return;
- }
-
- /* Register VTG Vsync callback to handle bottom fields */
- if (sti_vtg_register_client(hqvdp->vtg,
- &hqvdp->vtg_nb,
- crtc)) {
- DRM_ERROR("Cannot register VTG notifier\n");
- return;
- }
- }
-
writel(hqvdp->hqvdp_cmd_paddr + cmd_offset,
hqvdp->regs + HQVDP_MBX_NEXT_CMD);
- hqvdp->curr_field_count++;
-
/* Interlaced : get ready to display the bottom field at next Vsync */
if (fb->flags & DRM_MODE_FB_INTERLACED)
hqvdp->btm_field_pending = true;
@@ -931,6 +1210,8 @@ static void sti_hqvdp_atomic_update(struct drm_plane *drm_plane,
dev_dbg(hqvdp->dev, "%s Posted command:0x%x\n",
__func__, hqvdp->hqvdp_cmd_paddr + cmd_offset);
+ sti_plane_update_fps(plane, true, true);
+
plane->status = STI_PLANE_UPDATED;
}
@@ -938,7 +1219,6 @@ static void sti_hqvdp_atomic_disable(struct drm_plane *drm_plane,
struct drm_plane_state *oldstate)
{
struct sti_plane *plane = to_sti_plane(drm_plane);
- struct sti_mixer *mixer = to_sti_mixer(drm_plane->crtc);
if (!drm_plane->crtc) {
DRM_DEBUG_DRIVER("drm plane:%d not enabled\n",
@@ -947,13 +1227,15 @@ static void sti_hqvdp_atomic_disable(struct drm_plane *drm_plane,
}
DRM_DEBUG_DRIVER("CRTC:%d (%s) drm plane:%d (%s)\n",
- drm_plane->crtc->base.id, sti_mixer_to_str(mixer),
+ drm_plane->crtc->base.id,
+ sti_mixer_to_str(to_sti_mixer(drm_plane->crtc)),
drm_plane->base.id, sti_plane_to_str(plane));
plane->status = STI_PLANE_DISABLING;
}
static const struct drm_plane_helper_funcs sti_hqvdp_helpers_funcs = {
+ .atomic_check = sti_hqvdp_atomic_check,
.atomic_update = sti_hqvdp_atomic_update,
.atomic_disable = sti_hqvdp_atomic_disable,
};
@@ -983,6 +1265,9 @@ static struct drm_plane *sti_hqvdp_create(struct drm_device *drm_dev,
sti_plane_init_property(&hqvdp->plane, DRM_PLANE_TYPE_OVERLAY);
+ if (hqvdp_debugfs_init(hqvdp, drm_dev->primary))
+ DRM_ERROR("HQVDP debugfs setup failed\n");
+
return &hqvdp->plane.drm_plane;
}
diff --git a/drivers/gpu/drm/sti/sti_mixer.c b/drivers/gpu/drm/sti/sti_mixer.c
index 49db835dce03..e7425c38fc93 100644
--- a/drivers/gpu/drm/sti/sti_mixer.c
+++ b/drivers/gpu/drm/sti/sti_mixer.c
@@ -75,6 +75,145 @@ static inline void sti_mixer_reg_write(struct sti_mixer *mixer,
writel(val, mixer->regs + reg_id);
}
+#define DBGFS_DUMP(reg) seq_printf(s, "\n %-25s 0x%08X", #reg, \
+ sti_mixer_reg_read(mixer, reg))
+
+static void mixer_dbg_ctl(struct seq_file *s, int val)
+{
+ unsigned int i;
+ int count = 0;
+ char *const disp_layer[] = {"BKG", "VID0", "VID1", "GDP0",
+ "GDP1", "GDP2", "GDP3"};
+
+ seq_puts(s, "\tEnabled: ");
+ for (i = 0; i < 7; i++) {
+ if (val & 1) {
+ seq_printf(s, "%s ", disp_layer[i]);
+ count++;
+ }
+ val = val >> 1;
+ }
+
+ val = val >> 2;
+ if (val & 1) {
+ seq_puts(s, "CURS ");
+ count++;
+ }
+ if (!count)
+ seq_puts(s, "Nothing");
+}
+
+static void mixer_dbg_crb(struct seq_file *s, int val)
+{
+ int i;
+
+ seq_puts(s, "\tDepth: ");
+ for (i = 0; i < GAM_MIXER_NB_DEPTH_LEVEL; i++) {
+ switch (val & GAM_DEPTH_MASK_ID) {
+ case GAM_DEPTH_VID0_ID:
+ seq_puts(s, "VID0");
+ break;
+ case GAM_DEPTH_VID1_ID:
+ seq_puts(s, "VID1");
+ break;
+ case GAM_DEPTH_GDP0_ID:
+ seq_puts(s, "GDP0");
+ break;
+ case GAM_DEPTH_GDP1_ID:
+ seq_puts(s, "GDP1");
+ break;
+ case GAM_DEPTH_GDP2_ID:
+ seq_puts(s, "GDP2");
+ break;
+ case GAM_DEPTH_GDP3_ID:
+ seq_puts(s, "GDP3");
+ break;
+ default:
+ seq_puts(s, "---");
+ }
+
+ if (i < GAM_MIXER_NB_DEPTH_LEVEL - 1)
+ seq_puts(s, " < ");
+ val = val >> 3;
+ }
+}
+
+static void mixer_dbg_mxn(struct seq_file *s, void *addr)
+{
+ int i;
+
+ for (i = 1; i < 8; i++)
+ seq_printf(s, "-0x%08X", (int)readl(addr + i * 4));
+}
+
+static int mixer_dbg_show(struct seq_file *s, void *arg)
+{
+ struct drm_info_node *node = s->private;
+ struct sti_mixer *mixer = (struct sti_mixer *)node->info_ent->data;
+ struct drm_device *dev = node->minor->dev;
+ int ret;
+
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
+
+ seq_printf(s, "%s: (vaddr = 0x%p)",
+ sti_mixer_to_str(mixer), mixer->regs);
+
+ DBGFS_DUMP(GAM_MIXER_CTL);
+ mixer_dbg_ctl(s, sti_mixer_reg_read(mixer, GAM_MIXER_CTL));
+ DBGFS_DUMP(GAM_MIXER_BKC);
+ DBGFS_DUMP(GAM_MIXER_BCO);
+ DBGFS_DUMP(GAM_MIXER_BCS);
+ DBGFS_DUMP(GAM_MIXER_AVO);
+ DBGFS_DUMP(GAM_MIXER_AVS);
+ DBGFS_DUMP(GAM_MIXER_CRB);
+ mixer_dbg_crb(s, sti_mixer_reg_read(mixer, GAM_MIXER_CRB));
+ DBGFS_DUMP(GAM_MIXER_ACT);
+ DBGFS_DUMP(GAM_MIXER_MBP);
+ DBGFS_DUMP(GAM_MIXER_MX0);
+ mixer_dbg_mxn(s, mixer->regs + GAM_MIXER_MX0);
+ seq_puts(s, "\n");
+
+ mutex_unlock(&dev->struct_mutex);
+ return 0;
+}
+
+static struct drm_info_list mixer0_debugfs_files[] = {
+ { "mixer_main", mixer_dbg_show, 0, NULL },
+};
+
+static struct drm_info_list mixer1_debugfs_files[] = {
+ { "mixer_aux", mixer_dbg_show, 0, NULL },
+};
+
+static int mixer_debugfs_init(struct sti_mixer *mixer, struct drm_minor *minor)
+{
+ unsigned int i;
+ struct drm_info_list *mixer_debugfs_files;
+ int nb_files;
+
+ switch (mixer->id) {
+ case STI_MIXER_MAIN:
+ mixer_debugfs_files = mixer0_debugfs_files;
+ nb_files = ARRAY_SIZE(mixer0_debugfs_files);
+ break;
+ case STI_MIXER_AUX:
+ mixer_debugfs_files = mixer1_debugfs_files;
+ nb_files = ARRAY_SIZE(mixer1_debugfs_files);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ for (i = 0; i < nb_files; i++)
+ mixer_debugfs_files[i].data = mixer;
+
+ return drm_debugfs_create_files(mixer_debugfs_files,
+ nb_files,
+ minor->debugfs_root, minor);
+}
+
void sti_mixer_set_background_status(struct sti_mixer *mixer, bool enable)
{
u32 val = sti_mixer_reg_read(mixer, GAM_MIXER_CTL);
@@ -237,7 +376,9 @@ void sti_mixer_set_matrix(struct sti_mixer *mixer)
mixerColorSpaceMatIdentity[i]);
}
-struct sti_mixer *sti_mixer_create(struct device *dev, int id,
+struct sti_mixer *sti_mixer_create(struct device *dev,
+ struct drm_device *drm_dev,
+ int id,
void __iomem *baseaddr)
{
struct sti_mixer *mixer = devm_kzalloc(dev, sizeof(*mixer), GFP_KERNEL);
@@ -258,5 +399,8 @@ struct sti_mixer *sti_mixer_create(struct device *dev, int id,
DRM_DEBUG_DRIVER("%s created. Regs=%p\n",
sti_mixer_to_str(mixer), mixer->regs);
+ if (mixer_debugfs_init(mixer, drm_dev->primary))
+ DRM_ERROR("MIXER debugfs setup failed\n");
+
return mixer;
}
diff --git a/drivers/gpu/drm/sti/sti_mixer.h b/drivers/gpu/drm/sti/sti_mixer.h
index efb1a9a5ba86..6f35fc086873 100644
--- a/drivers/gpu/drm/sti/sti_mixer.h
+++ b/drivers/gpu/drm/sti/sti_mixer.h
@@ -42,7 +42,9 @@ struct sti_mixer {
const char *sti_mixer_to_str(struct sti_mixer *mixer);
-struct sti_mixer *sti_mixer_create(struct device *dev, int id,
+struct sti_mixer *sti_mixer_create(struct device *dev,
+ struct drm_device *drm_dev,
+ int id,
void __iomem *baseaddr);
int sti_mixer_set_plane_status(struct sti_mixer *mixer,
diff --git a/drivers/gpu/drm/sti/sti_plane.c b/drivers/gpu/drm/sti/sti_plane.c
index 2e5c751910c5..f10c98d3f012 100644
--- a/drivers/gpu/drm/sti/sti_plane.c
+++ b/drivers/gpu/drm/sti/sti_plane.c
@@ -43,6 +43,69 @@ const char *sti_plane_to_str(struct sti_plane *plane)
}
}
+#define STI_FPS_INTERVAL_MS 3000
+
+static int sti_plane_timespec_ms_diff(struct timespec lhs, struct timespec rhs)
+{
+ struct timespec tmp_ts = timespec_sub(lhs, rhs);
+ u64 tmp_ns = (u64)timespec_to_ns(&tmp_ts);
+
+ do_div(tmp_ns, NSEC_PER_MSEC);
+
+ return (u32)tmp_ns;
+}
+
+void sti_plane_update_fps(struct sti_plane *plane,
+ bool new_frame,
+ bool new_field)
+{
+ struct timespec now;
+ struct sti_fps_info *fps;
+ int fpks, fipks, ms_since_last, num_frames, num_fields;
+
+ getrawmonotonic(&now);
+
+ /* Compute number of frame updates */
+ fps = &plane->fps_info;
+
+ if (new_field)
+ fps->curr_field_counter++;
+
+ /* do not perform fps calcul if new_frame is false */
+ if (!new_frame)
+ return;
+
+ fps->curr_frame_counter++;
+ ms_since_last = sti_plane_timespec_ms_diff(now, fps->last_timestamp);
+ num_frames = fps->curr_frame_counter - fps->last_frame_counter;
+
+ if (num_frames <= 0 || ms_since_last < STI_FPS_INTERVAL_MS)
+ return;
+
+ fps->last_timestamp = now;
+ fps->last_frame_counter = fps->curr_frame_counter;
+ fpks = (num_frames * 1000000) / ms_since_last;
+ snprintf(plane->fps_info.fps_str, FPS_LENGTH, "%-6s @ %d.%.3d fps",
+ sti_plane_to_str(plane), fpks / 1000, fpks % 1000);
+
+ if (fps->curr_field_counter) {
+ /* Compute number of field updates */
+ num_fields = fps->curr_field_counter - fps->last_field_counter;
+ fps->last_field_counter = fps->curr_field_counter;
+ fipks = (num_fields * 1000000) / ms_since_last;
+ snprintf(plane->fps_info.fips_str,
+ FPS_LENGTH, " - %d.%.3d field/sec",
+ fipks / 1000, fipks % 1000);
+ } else {
+ plane->fps_info.fips_str[0] = '\0';
+ }
+
+ if (fps->output)
+ DRM_INFO("%s%s\n",
+ plane->fps_info.fps_str,
+ plane->fps_info.fips_str);
+}
+
static void sti_plane_destroy(struct drm_plane *drm_plane)
{
DRM_DEBUG_DRIVER("\n");
diff --git a/drivers/gpu/drm/sti/sti_plane.h b/drivers/gpu/drm/sti/sti_plane.h
index 86f1e6fc81b9..c50a3b9f5d37 100644
--- a/drivers/gpu/drm/sti/sti_plane.h
+++ b/drivers/gpu/drm/sti/sti_plane.h
@@ -50,6 +50,18 @@ enum sti_plane_status {
STI_PLANE_DISABLED,
};
+#define FPS_LENGTH 64
+struct sti_fps_info {
+ bool output;
+ unsigned int curr_frame_counter;
+ unsigned int last_frame_counter;
+ unsigned int curr_field_counter;
+ unsigned int last_field_counter;
+ struct timespec last_timestamp;
+ char fps_str[FPS_LENGTH];
+ char fips_str[FPS_LENGTH];
+};
+
/**
* STI plane structure
*
@@ -57,15 +69,20 @@ enum sti_plane_status {
* @desc: plane type & id
* @status: to know the status of the plane
* @zorder: plane z-order
+ * @fps_info: frame per second info
*/
struct sti_plane {
struct drm_plane drm_plane;
enum sti_plane_desc desc;
enum sti_plane_status status;
int zorder;
+ struct sti_fps_info fps_info;
};
const char *sti_plane_to_str(struct sti_plane *plane);
+void sti_plane_update_fps(struct sti_plane *plane,
+ bool new_frame,
+ bool new_field);
void sti_plane_init_property(struct sti_plane *plane,
enum drm_plane_type type);
#endif
diff --git a/drivers/gpu/drm/sti/sti_tvout.c b/drivers/gpu/drm/sti/sti_tvout.c
index f2afcf5438b8..2c99016443e5 100644
--- a/drivers/gpu/drm/sti/sti_tvout.c
+++ b/drivers/gpu/drm/sti/sti_tvout.c
@@ -17,6 +17,7 @@
#include <drm/drm_crtc_helper.h>
#include "sti_crtc.h"
+#include "sti_vtg.h"
/* glue registers */
#define TVO_CSC_MAIN_M0 0x000
@@ -85,19 +86,7 @@
#define TVO_VIP_SEL_INPUT_BYPASSED 1
#define TVO_SYNC_MAIN_VTG_SET_REF 0x00
-#define TVO_SYNC_MAIN_VTG_SET_1 0x01
-#define TVO_SYNC_MAIN_VTG_SET_2 0x02
-#define TVO_SYNC_MAIN_VTG_SET_3 0x03
-#define TVO_SYNC_MAIN_VTG_SET_4 0x04
-#define TVO_SYNC_MAIN_VTG_SET_5 0x05
-#define TVO_SYNC_MAIN_VTG_SET_6 0x06
#define TVO_SYNC_AUX_VTG_SET_REF 0x10
-#define TVO_SYNC_AUX_VTG_SET_1 0x11
-#define TVO_SYNC_AUX_VTG_SET_2 0x12
-#define TVO_SYNC_AUX_VTG_SET_3 0x13
-#define TVO_SYNC_AUX_VTG_SET_4 0x14
-#define TVO_SYNC_AUX_VTG_SET_5 0x15
-#define TVO_SYNC_AUX_VTG_SET_6 0x16
#define TVO_SYNC_HD_DCS_SHIFT 8
@@ -106,6 +95,8 @@
#define ENCODER_CRTC_MASK (BIT(0) | BIT(1))
+#define TVO_MIN_HD_HEIGHT 720
+
/* enum listing the supported output data format */
enum sti_tvout_video_out_type {
STI_TVOUT_VIDEO_OUT_RGB,
@@ -269,6 +260,31 @@ static void tvout_vip_set_in_vid_fmt(struct sti_tvout *tvout,
}
/**
+ * Set preformatter matrix
+ *
+ * @tvout: tvout structure
+ * @mode: display mode structure
+ */
+static void tvout_preformatter_set_matrix(struct sti_tvout *tvout,
+ struct drm_display_mode *mode)
+{
+ unsigned int i;
+ const u32 *pf_matrix;
+
+ if (mode->vdisplay >= TVO_MIN_HD_HEIGHT)
+ pf_matrix = rgb_to_ycbcr_709;
+ else
+ pf_matrix = rgb_to_ycbcr_601;
+
+ for (i = 0; i < 8; i++) {
+ tvout_write(tvout, *(pf_matrix + i),
+ TVO_CSC_MAIN_M0 + (i * 4));
+ tvout_write(tvout, *(pf_matrix + i),
+ TVO_CSC_AUX_M0 + (i * 4));
+ }
+}
+
+/**
* Start VIP block for DVO output
*
* @tvout: pointer on tvout structure
@@ -280,24 +296,26 @@ static void tvout_dvo_start(struct sti_tvout *tvout, bool main_path)
struct device_node *node = tvout->dev->of_node;
bool sel_input_logic_inverted = false;
u32 tvo_in_vid_format;
- int val;
+ int val, tmp;
dev_dbg(tvout->dev, "%s\n", __func__);
if (main_path) {
DRM_DEBUG_DRIVER("main vip for DVO\n");
- /* Select the input sync for dvo = VTG set 4 */
- val = TVO_SYNC_MAIN_VTG_SET_4 << TVO_SYNC_DVO_PAD_VSYNC_SHIFT;
- val |= TVO_SYNC_MAIN_VTG_SET_4 << TVO_SYNC_DVO_PAD_HSYNC_SHIFT;
- val |= TVO_SYNC_MAIN_VTG_SET_4;
+ /* Select the input sync for dvo */
+ tmp = TVO_SYNC_MAIN_VTG_SET_REF | VTG_SYNC_ID_DVO;
+ val = tmp << TVO_SYNC_DVO_PAD_VSYNC_SHIFT;
+ val |= tmp << TVO_SYNC_DVO_PAD_HSYNC_SHIFT;
+ val |= tmp;
tvout_write(tvout, val, TVO_DVO_SYNC_SEL);
tvo_in_vid_format = TVO_MAIN_IN_VID_FORMAT;
} else {
DRM_DEBUG_DRIVER("aux vip for DVO\n");
- /* Select the input sync for dvo = VTG set 4 */
- val = TVO_SYNC_AUX_VTG_SET_4 << TVO_SYNC_DVO_PAD_VSYNC_SHIFT;
- val |= TVO_SYNC_AUX_VTG_SET_4 << TVO_SYNC_DVO_PAD_HSYNC_SHIFT;
- val |= TVO_SYNC_AUX_VTG_SET_4;
+ /* Select the input sync for dvo */
+ tmp = TVO_SYNC_AUX_VTG_SET_REF | VTG_SYNC_ID_DVO;
+ val = tmp << TVO_SYNC_DVO_PAD_VSYNC_SHIFT;
+ val |= tmp << TVO_SYNC_DVO_PAD_HSYNC_SHIFT;
+ val |= tmp;
tvout_write(tvout, val, TVO_DVO_SYNC_SEL);
tvo_in_vid_format = TVO_AUX_IN_VID_FORMAT;
}
@@ -308,9 +326,8 @@ static void tvout_dvo_start(struct sti_tvout *tvout, bool main_path)
TVO_VIP_REORDER_Y_G_SEL,
TVO_VIP_REORDER_CB_B_SEL);
- /* Set clipping mode (Limited range RGB/Y) */
- tvout_vip_set_clip_mode(tvout, TVO_VIP_DVO,
- TVO_VIP_CLIP_LIMITED_RANGE_RGB_Y);
+ /* Set clipping mode */
+ tvout_vip_set_clip_mode(tvout, TVO_VIP_DVO, TVO_VIP_CLIP_DISABLED);
/* Set round mode (rounded to 8-bit per component) */
tvout_vip_set_rnd(tvout, TVO_VIP_DVO, TVO_VIP_RND_8BIT_ROUNDED);
@@ -345,13 +362,17 @@ static void tvout_hdmi_start(struct sti_tvout *tvout, bool main_path)
if (main_path) {
DRM_DEBUG_DRIVER("main vip for hdmi\n");
- /* select the input sync for hdmi = VTG set 1 */
- tvout_write(tvout, TVO_SYNC_MAIN_VTG_SET_1, TVO_HDMI_SYNC_SEL);
+ /* select the input sync for hdmi */
+ tvout_write(tvout,
+ TVO_SYNC_MAIN_VTG_SET_REF | VTG_SYNC_ID_HDMI,
+ TVO_HDMI_SYNC_SEL);
tvo_in_vid_format = TVO_MAIN_IN_VID_FORMAT;
} else {
DRM_DEBUG_DRIVER("aux vip for hdmi\n");
- /* select the input sync for hdmi = VTG set 1 */
- tvout_write(tvout, TVO_SYNC_AUX_VTG_SET_1, TVO_HDMI_SYNC_SEL);
+ /* select the input sync for hdmi */
+ tvout_write(tvout,
+ TVO_SYNC_AUX_VTG_SET_REF | VTG_SYNC_ID_HDMI,
+ TVO_HDMI_SYNC_SEL);
tvo_in_vid_format = TVO_AUX_IN_VID_FORMAT;
}
@@ -361,9 +382,8 @@ static void tvout_hdmi_start(struct sti_tvout *tvout, bool main_path)
TVO_VIP_REORDER_Y_G_SEL,
TVO_VIP_REORDER_CB_B_SEL);
- /* set clipping mode (Limited range RGB/Y) */
- tvout_vip_set_clip_mode(tvout, TVO_VIP_HDMI,
- TVO_VIP_CLIP_LIMITED_RANGE_RGB_Y);
+ /* set clipping mode */
+ tvout_vip_set_clip_mode(tvout, TVO_VIP_HDMI, TVO_VIP_CLIP_DISABLED);
/* set round mode (rounded to 8-bit per component) */
tvout_vip_set_rnd(tvout, TVO_VIP_HDMI, TVO_VIP_RND_8BIT_ROUNDED);
@@ -397,13 +417,19 @@ static void tvout_hda_start(struct sti_tvout *tvout, bool main_path)
dev_dbg(tvout->dev, "%s\n", __func__);
if (main_path) {
- val = TVO_SYNC_MAIN_VTG_SET_2 << TVO_SYNC_HD_DCS_SHIFT;
- val |= TVO_SYNC_MAIN_VTG_SET_3;
+ DRM_DEBUG_DRIVER("main vip for HDF\n");
+ /* Select the input sync for HD analog and HD DCS */
+ val = TVO_SYNC_MAIN_VTG_SET_REF | VTG_SYNC_ID_HDDCS;
+ val = val << TVO_SYNC_HD_DCS_SHIFT;
+ val |= TVO_SYNC_MAIN_VTG_SET_REF | VTG_SYNC_ID_HDF;
tvout_write(tvout, val, TVO_HD_SYNC_SEL);
tvo_in_vid_format = TVO_MAIN_IN_VID_FORMAT;
} else {
- val = TVO_SYNC_AUX_VTG_SET_2 << TVO_SYNC_HD_DCS_SHIFT;
- val |= TVO_SYNC_AUX_VTG_SET_3;
+ DRM_DEBUG_DRIVER("aux vip for HDF\n");
+ /* Select the input sync for HD analog and HD DCS */
+ val = TVO_SYNC_AUX_VTG_SET_REF | VTG_SYNC_ID_HDDCS;
+ val = val << TVO_SYNC_HD_DCS_SHIFT;
+ val |= TVO_SYNC_AUX_VTG_SET_REF | VTG_SYNC_ID_HDF;
tvout_write(tvout, val, TVO_HD_SYNC_SEL);
tvo_in_vid_format = TVO_AUX_IN_VID_FORMAT;
}
@@ -414,8 +440,8 @@ static void tvout_hda_start(struct sti_tvout *tvout, bool main_path)
TVO_VIP_REORDER_Y_G_SEL,
TVO_VIP_REORDER_CB_B_SEL);
- /* set clipping mode (EAV/SAV clipping) */
- tvout_vip_set_clip_mode(tvout, TVO_VIP_HDF, TVO_VIP_CLIP_EAV_SAV);
+ /* set clipping mode */
+ tvout_vip_set_clip_mode(tvout, TVO_VIP_HDF, TVO_VIP_CLIP_DISABLED);
/* set round mode (rounded to 10-bit per component) */
tvout_vip_set_rnd(tvout, TVO_VIP_HDF, TVO_VIP_RND_10BIT_ROUNDED);
@@ -436,24 +462,164 @@ static void tvout_hda_start(struct sti_tvout *tvout, bool main_path)
tvout_write(tvout, 0, TVO_HD_DAC_CFG_OFF);
}
-static void sti_tvout_encoder_dpms(struct drm_encoder *encoder, int mode)
+#define DBGFS_DUMP(reg) seq_printf(s, "\n %-25s 0x%08X", #reg, \
+ readl(tvout->regs + reg))
+
+static void tvout_dbg_vip(struct seq_file *s, int val)
{
+ int r, g, b, tmp, mask;
+ char *const reorder[] = {"Y_G", "Cb_B", "Cr_R"};
+ char *const clipping[] = {"No", "EAV/SAV", "Limited range RGB/Y",
+ "Limited range Cb/Cr", "decided by register"};
+ char *const round[] = {"8-bit", "10-bit", "12-bit"};
+ char *const input_sel[] = {"Main (color matrix enabled)",
+ "Main (color matrix by-passed)",
+ "", "", "", "", "", "",
+ "Aux (color matrix enabled)",
+ "Aux (color matrix by-passed)",
+ "", "", "", "", "", "Force value"};
+
+ seq_puts(s, "\t");
+ mask = TVO_VIP_REORDER_MASK << TVO_VIP_REORDER_R_SHIFT;
+ r = (val & mask) >> TVO_VIP_REORDER_R_SHIFT;
+ mask = TVO_VIP_REORDER_MASK << TVO_VIP_REORDER_G_SHIFT;
+ g = (val & mask) >> TVO_VIP_REORDER_G_SHIFT;
+ mask = TVO_VIP_REORDER_MASK << TVO_VIP_REORDER_B_SHIFT;
+ b = (val & mask) >> TVO_VIP_REORDER_B_SHIFT;
+ seq_printf(s, "%-24s %s->%s %s->%s %s->%s\n", "Reorder:",
+ reorder[r], reorder[TVO_VIP_REORDER_CR_R_SEL],
+ reorder[g], reorder[TVO_VIP_REORDER_Y_G_SEL],
+ reorder[b], reorder[TVO_VIP_REORDER_CB_B_SEL]);
+ seq_puts(s, "\t\t\t\t\t");
+ mask = TVO_VIP_CLIP_MASK << TVO_VIP_CLIP_SHIFT;
+ tmp = (val & mask) >> TVO_VIP_CLIP_SHIFT;
+ seq_printf(s, "%-24s %s\n", "Clipping:", clipping[tmp]);
+ seq_puts(s, "\t\t\t\t\t");
+ mask = TVO_VIP_RND_MASK << TVO_VIP_RND_SHIFT;
+ tmp = (val & mask) >> TVO_VIP_RND_SHIFT;
+ seq_printf(s, "%-24s input data rounded to %s per component\n",
+ "Round:", round[tmp]);
+ seq_puts(s, "\t\t\t\t\t");
+ tmp = (val & TVO_VIP_SEL_INPUT_MASK);
+ seq_printf(s, "%-24s %s", "Input selection:", input_sel[tmp]);
}
-static bool sti_tvout_encoder_mode_fixup(struct drm_encoder *encoder,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
+static void tvout_dbg_hd_dac_cfg(struct seq_file *s, int val)
{
- return true;
+ seq_printf(s, "\t%-24s %s", "HD DAC:",
+ val & 1 ? "disabled" : "enabled");
}
-static void sti_tvout_encoder_mode_set(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
+static int tvout_dbg_show(struct seq_file *s, void *data)
+{
+ struct drm_info_node *node = s->private;
+ struct sti_tvout *tvout = (struct sti_tvout *)node->info_ent->data;
+ struct drm_device *dev = node->minor->dev;
+ struct drm_crtc *crtc;
+ int ret;
+
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
+
+ seq_printf(s, "TVOUT: (vaddr = 0x%p)", tvout->regs);
+
+ seq_puts(s, "\n\n HDMI encoder: ");
+ crtc = tvout->hdmi->crtc;
+ if (crtc) {
+ seq_printf(s, "connected to %s path",
+ sti_crtc_is_main(crtc) ? "main" : "aux");
+ DBGFS_DUMP(TVO_HDMI_SYNC_SEL);
+ DBGFS_DUMP(TVO_VIP_HDMI);
+ tvout_dbg_vip(s, readl(tvout->regs + TVO_VIP_HDMI));
+ } else {
+ seq_puts(s, "disabled");
+ }
+
+ seq_puts(s, "\n\n DVO encoder: ");
+ crtc = tvout->dvo->crtc;
+ if (crtc) {
+ seq_printf(s, "connected to %s path",
+ sti_crtc_is_main(crtc) ? "main" : "aux");
+ DBGFS_DUMP(TVO_DVO_SYNC_SEL);
+ DBGFS_DUMP(TVO_DVO_CONFIG);
+ DBGFS_DUMP(TVO_VIP_DVO);
+ tvout_dbg_vip(s, readl(tvout->regs + TVO_VIP_DVO));
+ } else {
+ seq_puts(s, "disabled");
+ }
+
+ seq_puts(s, "\n\n HDA encoder: ");
+ crtc = tvout->hda->crtc;
+ if (crtc) {
+ seq_printf(s, "connected to %s path",
+ sti_crtc_is_main(crtc) ? "main" : "aux");
+ DBGFS_DUMP(TVO_HD_SYNC_SEL);
+ DBGFS_DUMP(TVO_HD_DAC_CFG_OFF);
+ tvout_dbg_hd_dac_cfg(s,
+ readl(tvout->regs + TVO_HD_DAC_CFG_OFF));
+ DBGFS_DUMP(TVO_VIP_HDF);
+ tvout_dbg_vip(s, readl(tvout->regs + TVO_VIP_HDF));
+ } else {
+ seq_puts(s, "disabled");
+ }
+
+ seq_puts(s, "\n\n main path configuration");
+ DBGFS_DUMP(TVO_CSC_MAIN_M0);
+ DBGFS_DUMP(TVO_CSC_MAIN_M1);
+ DBGFS_DUMP(TVO_CSC_MAIN_M2);
+ DBGFS_DUMP(TVO_CSC_MAIN_M3);
+ DBGFS_DUMP(TVO_CSC_MAIN_M4);
+ DBGFS_DUMP(TVO_CSC_MAIN_M5);
+ DBGFS_DUMP(TVO_CSC_MAIN_M6);
+ DBGFS_DUMP(TVO_CSC_MAIN_M7);
+ DBGFS_DUMP(TVO_MAIN_IN_VID_FORMAT);
+
+ seq_puts(s, "\n\n auxiliary path configuration");
+ DBGFS_DUMP(TVO_CSC_AUX_M0);
+ DBGFS_DUMP(TVO_CSC_AUX_M2);
+ DBGFS_DUMP(TVO_CSC_AUX_M3);
+ DBGFS_DUMP(TVO_CSC_AUX_M4);
+ DBGFS_DUMP(TVO_CSC_AUX_M5);
+ DBGFS_DUMP(TVO_CSC_AUX_M6);
+ DBGFS_DUMP(TVO_CSC_AUX_M7);
+ DBGFS_DUMP(TVO_AUX_IN_VID_FORMAT);
+ seq_puts(s, "\n");
+
+ mutex_unlock(&dev->struct_mutex);
+ return 0;
+}
+
+static struct drm_info_list tvout_debugfs_files[] = {
+ { "tvout", tvout_dbg_show, 0, NULL },
+};
+
+static void tvout_debugfs_exit(struct sti_tvout *tvout, struct drm_minor *minor)
+{
+ drm_debugfs_remove_files(tvout_debugfs_files,
+ ARRAY_SIZE(tvout_debugfs_files),
+ minor);
+}
+
+static int tvout_debugfs_init(struct sti_tvout *tvout, struct drm_minor *minor)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(tvout_debugfs_files); i++)
+ tvout_debugfs_files[i].data = tvout;
+
+ return drm_debugfs_create_files(tvout_debugfs_files,
+ ARRAY_SIZE(tvout_debugfs_files),
+ minor->debugfs_root, minor);
+}
+
+static void sti_tvout_encoder_dpms(struct drm_encoder *encoder, int mode)
{
}
-static void sti_tvout_encoder_prepare(struct drm_encoder *encoder)
+static void sti_tvout_encoder_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
{
}
@@ -469,10 +635,12 @@ static const struct drm_encoder_funcs sti_tvout_encoder_funcs = {
.destroy = sti_tvout_encoder_destroy,
};
-static void sti_dvo_encoder_commit(struct drm_encoder *encoder)
+static void sti_dvo_encoder_enable(struct drm_encoder *encoder)
{
struct sti_tvout *tvout = to_sti_tvout(encoder);
+ tvout_preformatter_set_matrix(tvout, &encoder->crtc->mode);
+
tvout_dvo_start(tvout, sti_crtc_is_main(encoder->crtc));
}
@@ -486,10 +654,8 @@ static void sti_dvo_encoder_disable(struct drm_encoder *encoder)
static const struct drm_encoder_helper_funcs sti_dvo_encoder_helper_funcs = {
.dpms = sti_tvout_encoder_dpms,
- .mode_fixup = sti_tvout_encoder_mode_fixup,
.mode_set = sti_tvout_encoder_mode_set,
- .prepare = sti_tvout_encoder_prepare,
- .commit = sti_dvo_encoder_commit,
+ .enable = sti_dvo_encoder_enable,
.disable = sti_dvo_encoder_disable,
};
@@ -520,10 +686,12 @@ sti_tvout_create_dvo_encoder(struct drm_device *dev,
return drm_encoder;
}
-static void sti_hda_encoder_commit(struct drm_encoder *encoder)
+static void sti_hda_encoder_enable(struct drm_encoder *encoder)
{
struct sti_tvout *tvout = to_sti_tvout(encoder);
+ tvout_preformatter_set_matrix(tvout, &encoder->crtc->mode);
+
tvout_hda_start(tvout, sti_crtc_is_main(encoder->crtc));
}
@@ -540,10 +708,8 @@ static void sti_hda_encoder_disable(struct drm_encoder *encoder)
static const struct drm_encoder_helper_funcs sti_hda_encoder_helper_funcs = {
.dpms = sti_tvout_encoder_dpms,
- .mode_fixup = sti_tvout_encoder_mode_fixup,
.mode_set = sti_tvout_encoder_mode_set,
- .prepare = sti_tvout_encoder_prepare,
- .commit = sti_hda_encoder_commit,
+ .commit = sti_hda_encoder_enable,
.disable = sti_hda_encoder_disable,
};
@@ -572,10 +738,12 @@ static struct drm_encoder *sti_tvout_create_hda_encoder(struct drm_device *dev,
return drm_encoder;
}
-static void sti_hdmi_encoder_commit(struct drm_encoder *encoder)
+static void sti_hdmi_encoder_enable(struct drm_encoder *encoder)
{
struct sti_tvout *tvout = to_sti_tvout(encoder);
+ tvout_preformatter_set_matrix(tvout, &encoder->crtc->mode);
+
tvout_hdmi_start(tvout, sti_crtc_is_main(encoder->crtc));
}
@@ -589,10 +757,8 @@ static void sti_hdmi_encoder_disable(struct drm_encoder *encoder)
static const struct drm_encoder_helper_funcs sti_hdmi_encoder_helper_funcs = {
.dpms = sti_tvout_encoder_dpms,
- .mode_fixup = sti_tvout_encoder_mode_fixup,
.mode_set = sti_tvout_encoder_mode_set,
- .prepare = sti_tvout_encoder_prepare,
- .commit = sti_hdmi_encoder_commit,
+ .commit = sti_hdmi_encoder_enable,
.disable = sti_hdmi_encoder_disable,
};
@@ -638,26 +804,24 @@ static void sti_tvout_destroy_encoders(struct sti_tvout *tvout)
if (tvout->hda)
drm_encoder_cleanup(tvout->hda);
tvout->hda = NULL;
+
+ if (tvout->dvo)
+ drm_encoder_cleanup(tvout->dvo);
+ tvout->dvo = NULL;
}
static int sti_tvout_bind(struct device *dev, struct device *master, void *data)
{
struct sti_tvout *tvout = dev_get_drvdata(dev);
struct drm_device *drm_dev = data;
- unsigned int i;
tvout->drm_dev = drm_dev;
- /* set preformatter matrix */
- for (i = 0; i < 8; i++) {
- tvout_write(tvout, rgb_to_ycbcr_601[i],
- TVO_CSC_MAIN_M0 + (i * 4));
- tvout_write(tvout, rgb_to_ycbcr_601[i],
- TVO_CSC_AUX_M0 + (i * 4));
- }
-
sti_tvout_create_encoders(drm_dev, tvout);
+ if (tvout_debugfs_init(tvout, drm_dev->primary))
+ DRM_ERROR("TVOUT debugfs setup failed\n");
+
return 0;
}
@@ -665,8 +829,11 @@ static void sti_tvout_unbind(struct device *dev, struct device *master,
void *data)
{
struct sti_tvout *tvout = dev_get_drvdata(dev);
+ struct drm_device *drm_dev = data;
sti_tvout_destroy_encoders(tvout);
+
+ tvout_debugfs_exit(tvout, drm_dev->primary);
}
static const struct component_ops sti_tvout_ops = {
diff --git a/drivers/gpu/drm/sti/sti_vid.c b/drivers/gpu/drm/sti/sti_vid.c
index a8254cc362a1..5a2c5dc3687b 100644
--- a/drivers/gpu/drm/sti/sti_vid.c
+++ b/drivers/gpu/drm/sti/sti_vid.c
@@ -42,6 +42,104 @@
#define VID_MPR1_BT709 0x0AC50000
#define VID_MPR2_BT709 0x07150545
#define VID_MPR3_BT709 0x00000AE8
+/* YCbCr to RGB BT709:
+ * R = Y+1.3711Cr
+ * G = Y-0.6992Cr-0.3359Cb
+ * B = Y+1.7344Cb
+ */
+#define VID_MPR0_BT601 0x0A800000
+#define VID_MPR1_BT601 0x0AAF0000
+#define VID_MPR2_BT601 0x094E0754
+#define VID_MPR3_BT601 0x00000ADD
+
+#define VID_MIN_HD_HEIGHT 720
+
+#define DBGFS_DUMP(reg) seq_printf(s, "\n %-25s 0x%08X", #reg, \
+ readl(vid->regs + reg))
+
+static void vid_dbg_ctl(struct seq_file *s, int val)
+{
+ val = val >> 30;
+ seq_puts(s, "\t");
+
+ if (!(val & 1))
+ seq_puts(s, "NOT ");
+ seq_puts(s, "ignored on main mixer - ");
+
+ if (!(val & 2))
+ seq_puts(s, "NOT ");
+ seq_puts(s, "ignored on aux mixer");
+}
+
+static void vid_dbg_vpo(struct seq_file *s, int val)
+{
+ seq_printf(s, "\txdo:%4d\tydo:%4d", val & 0x0FFF, (val >> 16) & 0x0FFF);
+}
+
+static void vid_dbg_vps(struct seq_file *s, int val)
+{
+ seq_printf(s, "\txds:%4d\tyds:%4d", val & 0x0FFF, (val >> 16) & 0x0FFF);
+}
+
+static void vid_dbg_mst(struct seq_file *s, int val)
+{
+ if (val & 1)
+ seq_puts(s, "\tBUFFER UNDERFLOW!");
+}
+
+static int vid_dbg_show(struct seq_file *s, void *arg)
+{
+ struct drm_info_node *node = s->private;
+ struct sti_vid *vid = (struct sti_vid *)node->info_ent->data;
+ struct drm_device *dev = node->minor->dev;
+ int ret;
+
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
+
+ seq_printf(s, "VID: (vaddr= 0x%p)", vid->regs);
+
+ DBGFS_DUMP(VID_CTL);
+ vid_dbg_ctl(s, readl(vid->regs + VID_CTL));
+ DBGFS_DUMP(VID_ALP);
+ DBGFS_DUMP(VID_CLF);
+ DBGFS_DUMP(VID_VPO);
+ vid_dbg_vpo(s, readl(vid->regs + VID_VPO));
+ DBGFS_DUMP(VID_VPS);
+ vid_dbg_vps(s, readl(vid->regs + VID_VPS));
+ DBGFS_DUMP(VID_KEY1);
+ DBGFS_DUMP(VID_KEY2);
+ DBGFS_DUMP(VID_MPR0);
+ DBGFS_DUMP(VID_MPR1);
+ DBGFS_DUMP(VID_MPR2);
+ DBGFS_DUMP(VID_MPR3);
+ DBGFS_DUMP(VID_MST);
+ vid_dbg_mst(s, readl(vid->regs + VID_MST));
+ DBGFS_DUMP(VID_BC);
+ DBGFS_DUMP(VID_TINT);
+ DBGFS_DUMP(VID_CSAT);
+ seq_puts(s, "\n");
+
+ mutex_unlock(&dev->struct_mutex);
+ return 0;
+}
+
+static struct drm_info_list vid_debugfs_files[] = {
+ { "vid", vid_dbg_show, 0, NULL },
+};
+
+static int vid_debugfs_init(struct sti_vid *vid, struct drm_minor *minor)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(vid_debugfs_files); i++)
+ vid_debugfs_files[i].data = vid;
+
+ return drm_debugfs_create_files(vid_debugfs_files,
+ ARRAY_SIZE(vid_debugfs_files),
+ minor->debugfs_root, minor);
+}
void sti_vid_commit(struct sti_vid *vid,
struct drm_plane_state *state)
@@ -52,6 +150,7 @@ void sti_vid_commit(struct sti_vid *vid,
int dst_y = state->crtc_y;
int dst_w = clamp_val(state->crtc_w, 0, mode->crtc_hdisplay - dst_x);
int dst_h = clamp_val(state->crtc_h, 0, mode->crtc_vdisplay - dst_y);
+ int src_h = state->src_h >> 16;
u32 val, ydo, xdo, yds, xds;
/* Input / output size
@@ -71,6 +170,19 @@ void sti_vid_commit(struct sti_vid *vid,
writel((ydo << 16) | xdo, vid->regs + VID_VPO);
writel((yds << 16) | xds, vid->regs + VID_VPS);
+
+ /* Color conversion parameters */
+ if (src_h >= VID_MIN_HD_HEIGHT) {
+ writel(VID_MPR0_BT709, vid->regs + VID_MPR0);
+ writel(VID_MPR1_BT709, vid->regs + VID_MPR1);
+ writel(VID_MPR2_BT709, vid->regs + VID_MPR2);
+ writel(VID_MPR3_BT709, vid->regs + VID_MPR3);
+ } else {
+ writel(VID_MPR0_BT601, vid->regs + VID_MPR0);
+ writel(VID_MPR1_BT601, vid->regs + VID_MPR1);
+ writel(VID_MPR2_BT601, vid->regs + VID_MPR2);
+ writel(VID_MPR3_BT601, vid->regs + VID_MPR3);
+ }
}
void sti_vid_disable(struct sti_vid *vid)
@@ -91,20 +203,14 @@ static void sti_vid_init(struct sti_vid *vid)
/* Opaque */
writel(VID_ALP_OPAQUE, vid->regs + VID_ALP);
- /* Color conversion parameters */
- writel(VID_MPR0_BT709, vid->regs + VID_MPR0);
- writel(VID_MPR1_BT709, vid->regs + VID_MPR1);
- writel(VID_MPR2_BT709, vid->regs + VID_MPR2);
- writel(VID_MPR3_BT709, vid->regs + VID_MPR3);
-
/* Brightness, contrast, tint, saturation */
writel(VID_BC_DFLT, vid->regs + VID_BC);
writel(VID_TINT_DFLT, vid->regs + VID_TINT);
writel(VID_CSAT_DFLT, vid->regs + VID_CSAT);
}
-struct sti_vid *sti_vid_create(struct device *dev, int id,
- void __iomem *baseaddr)
+struct sti_vid *sti_vid_create(struct device *dev, struct drm_device *drm_dev,
+ int id, void __iomem *baseaddr)
{
struct sti_vid *vid;
@@ -120,5 +226,8 @@ struct sti_vid *sti_vid_create(struct device *dev, int id,
sti_vid_init(vid);
+ if (vid_debugfs_init(vid, drm_dev->primary))
+ DRM_ERROR("VID debugfs setup failed\n");
+
return vid;
}
diff --git a/drivers/gpu/drm/sti/sti_vid.h b/drivers/gpu/drm/sti/sti_vid.h
index 5dea4791f1d6..6c842344f3d8 100644
--- a/drivers/gpu/drm/sti/sti_vid.h
+++ b/drivers/gpu/drm/sti/sti_vid.h
@@ -23,7 +23,7 @@ struct sti_vid {
void sti_vid_commit(struct sti_vid *vid,
struct drm_plane_state *state);
void sti_vid_disable(struct sti_vid *vid);
-struct sti_vid *sti_vid_create(struct device *dev, int id,
- void __iomem *baseaddr);
+struct sti_vid *sti_vid_create(struct device *dev, struct drm_device *drm_dev,
+ int id, void __iomem *baseaddr);
#endif
diff --git a/drivers/gpu/drm/sti/sti_vtg.c b/drivers/gpu/drm/sti/sti_vtg.c
index d56630c60039..32c7986b63ab 100644
--- a/drivers/gpu/drm/sti/sti_vtg.c
+++ b/drivers/gpu/drm/sti/sti_vtg.c
@@ -15,8 +15,8 @@
#include "sti_vtg.h"
-#define VTG_TYPE_MASTER 0
-#define VTG_TYPE_SLAVE_BY_EXT0 1
+#define VTG_MODE_MASTER 0
+#define VTG_MODE_SLAVE_BY_EXT0 1
/* registers offset */
#define VTG_MODE 0x0000
@@ -64,6 +64,9 @@
/* Delay introduced by the HDMI in nb of pixel */
#define HDMI_DELAY (5)
+/* Delay introduced by the DVO in nb of pixel */
+#define DVO_DELAY (2)
+
/* delay introduced by the Arbitrary Waveform Generator in nb of pixels */
#define AWG_DELAY_HD (-9)
#define AWG_DELAY_ED (-8)
@@ -71,13 +74,61 @@
LIST_HEAD(vtg_lookup);
+/*
+ * STI VTG register offset structure
+ *
+ *@h_hd: stores the VTG_H_HD_x register offset
+ *@top_v_vd: stores the VTG_TOP_V_VD_x register offset
+ *@bot_v_vd: stores the VTG_BOT_V_VD_x register offset
+ *@top_v_hd: stores the VTG_TOP_V_HD_x register offset
+ *@bot_v_hd: stores the VTG_BOT_V_HD_x register offset
+ */
+struct sti_vtg_regs_offs {
+ u32 h_hd;
+ u32 top_v_vd;
+ u32 bot_v_vd;
+ u32 top_v_hd;
+ u32 bot_v_hd;
+};
+
+#define VTG_MAX_SYNC_OUTPUT 4
+static const struct sti_vtg_regs_offs vtg_regs_offs[VTG_MAX_SYNC_OUTPUT] = {
+ { VTG_H_HD_1,
+ VTG_TOP_V_VD_1, VTG_BOT_V_VD_1, VTG_TOP_V_HD_1, VTG_BOT_V_HD_1 },
+ { VTG_H_HD_2,
+ VTG_TOP_V_VD_2, VTG_BOT_V_VD_2, VTG_TOP_V_HD_2, VTG_BOT_V_HD_2 },
+ { VTG_H_HD_3,
+ VTG_TOP_V_VD_3, VTG_BOT_V_VD_3, VTG_TOP_V_HD_3, VTG_BOT_V_HD_3 },
+ { VTG_H_HD_4,
+ VTG_TOP_V_VD_4, VTG_BOT_V_VD_4, VTG_TOP_V_HD_4, VTG_BOT_V_HD_4 }
+};
+
+/*
+ * STI VTG synchronisation parameters structure
+ *
+ *@hsync: sample number falling and rising edge
+ *@vsync_line_top: vertical top field line number falling and rising edge
+ *@vsync_line_bot: vertical bottom field line number falling and rising edge
+ *@vsync_off_top: vertical top field sample number rising and falling edge
+ *@vsync_off_bot: vertical bottom field sample number rising and falling edge
+ */
+struct sti_vtg_sync_params {
+ u32 hsync;
+ u32 vsync_line_top;
+ u32 vsync_line_bot;
+ u32 vsync_off_top;
+ u32 vsync_off_bot;
+};
+
/**
* STI VTG structure
*
* @dev: pointer to device driver
- * @data: data associated to the device
+ * @np: device node
+ * @regs: register mapping
+ * @sync_params: synchronisation parameters used to generate timings
* @irq: VTG irq
- * @type: VTG type (main or aux)
+ * @irq_status: store the IRQ status value
* @notifier_list: notifier callback
* @crtc: the CRTC for vblank event
* @slave: slave vtg
@@ -87,6 +138,7 @@ struct sti_vtg {
struct device *dev;
struct device_node *np;
void __iomem *regs;
+ struct sti_vtg_sync_params sync_params[VTG_MAX_SYNC_OUTPUT];
int irq;
u32 irq_status;
struct raw_notifier_head notifier_list;
@@ -146,13 +198,69 @@ static void vtg_set_output_window(void __iomem *regs,
writel(video_bottom_field_stop, regs + VTG_VID_BFS);
}
+static void vtg_set_hsync_vsync_pos(struct sti_vtg_sync_params *sync,
+ int delay,
+ const struct drm_display_mode *mode)
+{
+ long clocksperline, start, stop;
+ u32 risesync_top, fallsync_top;
+ u32 risesync_offs_top, fallsync_offs_top;
+
+ clocksperline = mode->htotal;
+
+ /* Get the hsync position */
+ start = 0;
+ stop = mode->hsync_end - mode->hsync_start;
+
+ start += delay;
+ stop += delay;
+
+ if (start < 0)
+ start += clocksperline;
+ else if (start >= clocksperline)
+ start -= clocksperline;
+
+ if (stop < 0)
+ stop += clocksperline;
+ else if (stop >= clocksperline)
+ stop -= clocksperline;
+
+ sync->hsync = (stop << 16) | start;
+
+ /* Get the vsync position */
+ if (delay >= 0) {
+ risesync_top = 1;
+ fallsync_top = risesync_top;
+ fallsync_top += mode->vsync_end - mode->vsync_start;
+
+ fallsync_offs_top = (u32)delay;
+ risesync_offs_top = (u32)delay;
+ } else {
+ risesync_top = mode->vtotal;
+ fallsync_top = mode->vsync_end - mode->vsync_start;
+
+ fallsync_offs_top = clocksperline + delay;
+ risesync_offs_top = clocksperline + delay;
+ }
+
+ sync->vsync_line_top = (fallsync_top << 16) | risesync_top;
+ sync->vsync_off_top = (fallsync_offs_top << 16) | risesync_offs_top;
+
+ /* Only progressive supported for now */
+ sync->vsync_line_bot = sync->vsync_line_top;
+ sync->vsync_off_bot = sync->vsync_off_top;
+}
+
static void vtg_set_mode(struct sti_vtg *vtg,
- int type, const struct drm_display_mode *mode)
+ int type,
+ struct sti_vtg_sync_params *sync,
+ const struct drm_display_mode *mode)
{
- u32 tmp;
+ unsigned int i;
if (vtg->slave)
- vtg_set_mode(vtg->slave, VTG_TYPE_SLAVE_BY_EXT0, mode);
+ vtg_set_mode(vtg->slave, VTG_MODE_SLAVE_BY_EXT0,
+ vtg->sync_params, mode);
/* Set the number of clock cycles per line */
writel(mode->htotal, vtg->regs + VTG_CLKLN);
@@ -163,57 +271,31 @@ static void vtg_set_mode(struct sti_vtg *vtg,
/* Program output window */
vtg_set_output_window(vtg->regs, mode);
- /* prepare VTG set 1 for HDMI */
- tmp = (mode->hsync_end - mode->hsync_start + HDMI_DELAY) << 16;
- tmp |= HDMI_DELAY;
- writel(tmp, vtg->regs + VTG_H_HD_1);
-
- tmp = (mode->vsync_end - mode->vsync_start + 1) << 16;
- tmp |= 1;
- writel(tmp, vtg->regs + VTG_TOP_V_VD_1);
- writel(tmp, vtg->regs + VTG_BOT_V_VD_1);
-
- tmp = HDMI_DELAY << 16;
- tmp |= HDMI_DELAY;
- writel(tmp, vtg->regs + VTG_TOP_V_HD_1);
- writel(tmp, vtg->regs + VTG_BOT_V_HD_1);
-
- /* prepare VTG set 2 for for HD DCS */
- tmp = (mode->hsync_end - mode->hsync_start) << 16;
- writel(tmp, vtg->regs + VTG_H_HD_2);
-
- tmp = (mode->vsync_end - mode->vsync_start + 1) << 16;
- tmp |= 1;
- writel(tmp, vtg->regs + VTG_TOP_V_VD_2);
- writel(tmp, vtg->regs + VTG_BOT_V_VD_2);
- writel(0, vtg->regs + VTG_TOP_V_HD_2);
- writel(0, vtg->regs + VTG_BOT_V_HD_2);
-
- /* prepare VTG set 3 for HD Analog in HD mode */
- tmp = (mode->hsync_end - mode->hsync_start + AWG_DELAY_HD) << 16;
- tmp |= mode->htotal + AWG_DELAY_HD;
- writel(tmp, vtg->regs + VTG_H_HD_3);
-
- tmp = (mode->vsync_end - mode->vsync_start) << 16;
- tmp |= mode->vtotal;
- writel(tmp, vtg->regs + VTG_TOP_V_VD_3);
- writel(tmp, vtg->regs + VTG_BOT_V_VD_3);
-
- tmp = (mode->htotal + AWG_DELAY_HD) << 16;
- tmp |= mode->htotal + AWG_DELAY_HD;
- writel(tmp, vtg->regs + VTG_TOP_V_HD_3);
- writel(tmp, vtg->regs + VTG_BOT_V_HD_3);
-
- /* Prepare VTG set 4 for DVO */
- tmp = (mode->hsync_end - mode->hsync_start) << 16;
- writel(tmp, vtg->regs + VTG_H_HD_4);
-
- tmp = (mode->vsync_end - mode->vsync_start + 1) << 16;
- tmp |= 1;
- writel(tmp, vtg->regs + VTG_TOP_V_VD_4);
- writel(tmp, vtg->regs + VTG_BOT_V_VD_4);
- writel(0, vtg->regs + VTG_TOP_V_HD_4);
- writel(0, vtg->regs + VTG_BOT_V_HD_4);
+ /* Set hsync and vsync position for HDMI */
+ vtg_set_hsync_vsync_pos(&sync[VTG_SYNC_ID_HDMI - 1], HDMI_DELAY, mode);
+
+ /* Set hsync and vsync position for HD DCS */
+ vtg_set_hsync_vsync_pos(&sync[VTG_SYNC_ID_HDDCS - 1], 0, mode);
+
+ /* Set hsync and vsync position for HDF */
+ vtg_set_hsync_vsync_pos(&sync[VTG_SYNC_ID_HDF - 1], AWG_DELAY_HD, mode);
+
+ /* Set hsync and vsync position for DVO */
+ vtg_set_hsync_vsync_pos(&sync[VTG_SYNC_ID_DVO - 1], DVO_DELAY, mode);
+
+ /* Progam the syncs outputs */
+ for (i = 0; i < VTG_MAX_SYNC_OUTPUT ; i++) {
+ writel(sync[i].hsync,
+ vtg->regs + vtg_regs_offs[i].h_hd);
+ writel(sync[i].vsync_line_top,
+ vtg->regs + vtg_regs_offs[i].top_v_vd);
+ writel(sync[i].vsync_line_bot,
+ vtg->regs + vtg_regs_offs[i].bot_v_vd);
+ writel(sync[i].vsync_off_top,
+ vtg->regs + vtg_regs_offs[i].top_v_hd);
+ writel(sync[i].vsync_off_bot,
+ vtg->regs + vtg_regs_offs[i].bot_v_hd);
+ }
/* mode */
writel(type, vtg->regs + VTG_MODE);
@@ -231,7 +313,7 @@ void sti_vtg_set_config(struct sti_vtg *vtg,
const struct drm_display_mode *mode)
{
/* write configuration */
- vtg_set_mode(vtg, VTG_TYPE_MASTER, mode);
+ vtg_set_mode(vtg, VTG_MODE_MASTER, vtg->sync_params, mode);
vtg_reset(vtg);
diff --git a/drivers/gpu/drm/sti/sti_vtg.h b/drivers/gpu/drm/sti/sti_vtg.h
index cd2439f89d05..f1dcdf9c2342 100644
--- a/drivers/gpu/drm/sti/sti_vtg.h
+++ b/drivers/gpu/drm/sti/sti_vtg.h
@@ -10,6 +10,11 @@
#define VTG_TOP_FIELD_EVENT 1
#define VTG_BOTTOM_FIELD_EVENT 2
+#define VTG_SYNC_ID_HDMI 1
+#define VTG_SYNC_ID_HDDCS 2
+#define VTG_SYNC_ID_HDF 3
+#define VTG_SYNC_ID_DVO 4
+
struct sti_vtg;
struct drm_display_mode;
struct notifier_block;
diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
index dde6f208c347..fb2b4b0271a2 100644
--- a/drivers/gpu/drm/tegra/dc.c
+++ b/drivers/gpu/drm/tegra/dc.c
@@ -988,23 +988,6 @@ static void tegra_dc_finish_page_flip(struct tegra_dc *dc)
spin_unlock_irqrestore(&drm->event_lock, flags);
}
-void tegra_dc_cancel_page_flip(struct drm_crtc *crtc, struct drm_file *file)
-{
- struct tegra_dc *dc = to_tegra_dc(crtc);
- struct drm_device *drm = crtc->dev;
- unsigned long flags;
-
- spin_lock_irqsave(&drm->event_lock, flags);
-
- if (dc->event && dc->event->base.file_priv == file) {
- dc->event->base.destroy(&dc->event->base);
- drm_crtc_vblank_put(crtc);
- dc->event = NULL;
- }
-
- spin_unlock_irqrestore(&drm->event_lock, flags);
-}
-
static void tegra_dc_destroy(struct drm_crtc *crtc)
{
drm_crtc_cleanup(crtc);
diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
index c5c856a0879d..8e6b18caa706 100644
--- a/drivers/gpu/drm/tegra/drm.c
+++ b/drivers/gpu/drm/tegra/drm.c
@@ -858,10 +858,6 @@ static void tegra_drm_preclose(struct drm_device *drm, struct drm_file *file)
{
struct tegra_drm_file *fpriv = file->driver_priv;
struct tegra_drm_context *context, *tmp;
- struct drm_crtc *crtc;
-
- list_for_each_entry(crtc, &drm->mode_config.crtc_list, head)
- tegra_dc_cancel_page_flip(crtc, file);
list_for_each_entry_safe(context, tmp, &fpriv->contexts, list)
tegra_drm_context_free(context);
diff --git a/drivers/gpu/drm/tegra/drm.h b/drivers/gpu/drm/tegra/drm.h
index c088f2f67eda..8a10f5b7d9dc 100644
--- a/drivers/gpu/drm/tegra/drm.h
+++ b/drivers/gpu/drm/tegra/drm.h
@@ -195,7 +195,6 @@ struct tegra_dc_window {
u32 tegra_dc_get_vblank_counter(struct tegra_dc *dc);
void tegra_dc_enable_vblank(struct tegra_dc *dc);
void tegra_dc_disable_vblank(struct tegra_dc *dc);
-void tegra_dc_cancel_page_flip(struct drm_crtc *crtc, struct drm_file *file);
void tegra_dc_commit(struct tegra_dc *dc);
int tegra_dc_state_setup_clock(struct tegra_dc *dc,
struct drm_crtc_state *crtc_state,
diff --git a/drivers/gpu/drm/tegra/gem.c b/drivers/gpu/drm/tegra/gem.c
index 33add93b4ed9..3b0d8c392b70 100644
--- a/drivers/gpu/drm/tegra/gem.c
+++ b/drivers/gpu/drm/tegra/gem.c
@@ -175,8 +175,7 @@ static void tegra_bo_free(struct drm_device *drm, struct tegra_bo *bo)
sg_free_table(bo->sgt);
kfree(bo->sgt);
} else if (bo->vaddr) {
- dma_free_writecombine(drm->dev, bo->gem.size, bo->vaddr,
- bo->paddr);
+ dma_free_wc(drm->dev, bo->gem.size, bo->vaddr, bo->paddr);
}
}
@@ -233,8 +232,8 @@ static int tegra_bo_alloc(struct drm_device *drm, struct tegra_bo *bo)
} else {
size_t size = bo->gem.size;
- bo->vaddr = dma_alloc_writecombine(drm->dev, size, &bo->paddr,
- GFP_KERNEL | __GFP_NOWARN);
+ bo->vaddr = dma_alloc_wc(drm->dev, size, &bo->paddr,
+ GFP_KERNEL | __GFP_NOWARN);
if (!bo->vaddr) {
dev_err(drm->dev,
"failed to allocate buffer of size %zu\n",
@@ -472,8 +471,8 @@ int tegra_drm_mmap(struct file *file, struct vm_area_struct *vma)
vma->vm_flags &= ~VM_PFNMAP;
vma->vm_pgoff = 0;
- ret = dma_mmap_writecombine(gem->dev->dev, vma, bo->vaddr,
- bo->paddr, gem->size);
+ ret = dma_mmap_wc(gem->dev->dev, vma, bo->vaddr, bo->paddr,
+ gem->size);
if (ret) {
drm_gem_vm_close(vma);
return ret;
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
index 7d07733bdc86..051e5e1b7ad6 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
@@ -21,25 +21,31 @@
#include "tilcdc_drv.h"
#include "tilcdc_regs.h"
+#define TILCDC_VBLANK_SAFETY_THRESHOLD_US 1000
+
struct tilcdc_crtc {
struct drm_crtc base;
const struct tilcdc_panel_info *info;
- uint32_t dirty;
- dma_addr_t start, end;
struct drm_pending_vblank_event *event;
int dpms;
wait_queue_head_t frame_done_wq;
bool frame_done;
+ spinlock_t irq_lock;
- /* fb currently set to scanout 0/1: */
- struct drm_framebuffer *scanout[2];
+ ktime_t last_vblank;
+
+ struct drm_framebuffer *curr_fb;
+ struct drm_framebuffer *next_fb;
/* for deferred fb unref's: */
struct drm_flip_work unref_work;
/* Only set if an external encoder is connected */
bool simulate_vesa_sync;
+
+ int sync_lost_count;
+ bool frame_intact;
};
#define to_tilcdc_crtc(x) container_of(x, struct tilcdc_crtc, base)
@@ -54,79 +60,53 @@ static void unref_worker(struct drm_flip_work *work, void *val)
mutex_unlock(&dev->mode_config.mutex);
}
-static void set_scanout(struct drm_crtc *crtc, int n)
-{
- static const uint32_t base_reg[] = {
- LCDC_DMA_FB_BASE_ADDR_0_REG,
- LCDC_DMA_FB_BASE_ADDR_1_REG,
- };
- static const uint32_t ceil_reg[] = {
- LCDC_DMA_FB_CEILING_ADDR_0_REG,
- LCDC_DMA_FB_CEILING_ADDR_1_REG,
- };
- static const uint32_t stat[] = {
- LCDC_END_OF_FRAME0, LCDC_END_OF_FRAME1,
- };
- struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
- struct drm_device *dev = crtc->dev;
- struct tilcdc_drm_private *priv = dev->dev_private;
-
- pm_runtime_get_sync(dev->dev);
- tilcdc_write(dev, base_reg[n], tilcdc_crtc->start);
- tilcdc_write(dev, ceil_reg[n], tilcdc_crtc->end);
- if (tilcdc_crtc->scanout[n]) {
- drm_flip_work_queue(&tilcdc_crtc->unref_work, tilcdc_crtc->scanout[n]);
- drm_flip_work_commit(&tilcdc_crtc->unref_work, priv->wq);
- }
- tilcdc_crtc->scanout[n] = crtc->primary->fb;
- drm_framebuffer_reference(tilcdc_crtc->scanout[n]);
- tilcdc_crtc->dirty &= ~stat[n];
- pm_runtime_put_sync(dev->dev);
-}
-
-static void update_scanout(struct drm_crtc *crtc)
+static void set_scanout(struct drm_crtc *crtc, struct drm_framebuffer *fb)
{
struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
struct drm_device *dev = crtc->dev;
- struct drm_framebuffer *fb = crtc->primary->fb;
struct drm_gem_cma_object *gem;
unsigned int depth, bpp;
+ dma_addr_t start, end;
drm_fb_get_bpp_depth(fb->pixel_format, &depth, &bpp);
gem = drm_fb_cma_get_gem_obj(fb, 0);
- tilcdc_crtc->start = gem->paddr + fb->offsets[0] +
- (crtc->y * fb->pitches[0]) + (crtc->x * bpp/8);
+ start = gem->paddr + fb->offsets[0] +
+ crtc->y * fb->pitches[0] +
+ crtc->x * bpp / 8;
- tilcdc_crtc->end = tilcdc_crtc->start +
- (crtc->mode.vdisplay * fb->pitches[0]);
+ end = start + (crtc->mode.vdisplay * fb->pitches[0]);
- if (tilcdc_crtc->dpms == DRM_MODE_DPMS_ON) {
- /* already enabled, so just mark the frames that need
- * updating and they will be updated on vblank:
- */
- tilcdc_crtc->dirty |= LCDC_END_OF_FRAME0 | LCDC_END_OF_FRAME1;
- drm_vblank_get(dev, 0);
- } else {
- /* not enabled yet, so update registers immediately: */
- set_scanout(crtc, 0);
- set_scanout(crtc, 1);
- }
+ tilcdc_write(dev, LCDC_DMA_FB_BASE_ADDR_0_REG, start);
+ tilcdc_write(dev, LCDC_DMA_FB_CEILING_ADDR_0_REG, end);
+
+ if (tilcdc_crtc->curr_fb)
+ drm_flip_work_queue(&tilcdc_crtc->unref_work,
+ tilcdc_crtc->curr_fb);
+
+ tilcdc_crtc->curr_fb = fb;
}
-static void start(struct drm_crtc *crtc)
+static void reset(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct tilcdc_drm_private *priv = dev->dev_private;
- if (priv->rev == 2) {
- tilcdc_set(dev, LCDC_CLK_RESET_REG, LCDC_CLK_MAIN_RESET);
- msleep(1);
- tilcdc_clear(dev, LCDC_CLK_RESET_REG, LCDC_CLK_MAIN_RESET);
- msleep(1);
- }
+ if (priv->rev != 2)
+ return;
- tilcdc_set(dev, LCDC_DMA_CTRL_REG, LCDC_DUAL_FRAME_BUFFER_ENABLE);
+ tilcdc_set(dev, LCDC_CLK_RESET_REG, LCDC_CLK_MAIN_RESET);
+ usleep_range(250, 1000);
+ tilcdc_clear(dev, LCDC_CLK_RESET_REG, LCDC_CLK_MAIN_RESET);
+}
+
+static void start(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+
+ reset(crtc);
+
+ tilcdc_clear(dev, LCDC_DMA_CTRL_REG, LCDC_DUAL_FRAME_BUFFER_ENABLE);
tilcdc_set(dev, LCDC_RASTER_CTRL_REG, LCDC_PALETTE_LOAD_MODE(DATA_ONLY));
tilcdc_set(dev, LCDC_RASTER_CTRL_REG, LCDC_RASTER_ENABLE);
}
@@ -138,17 +118,31 @@ static void stop(struct drm_crtc *crtc)
tilcdc_clear(dev, LCDC_RASTER_CTRL_REG, LCDC_RASTER_ENABLE);
}
-static void tilcdc_crtc_dpms(struct drm_crtc *crtc, int mode);
static void tilcdc_crtc_destroy(struct drm_crtc *crtc)
{
struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
tilcdc_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
+ of_node_put(crtc->port);
drm_crtc_cleanup(crtc);
drm_flip_work_cleanup(&tilcdc_crtc->unref_work);
+}
+
+static int tilcdc_verify_fb(struct drm_crtc *crtc, struct drm_framebuffer *fb)
+{
+ struct drm_device *dev = crtc->dev;
+ unsigned int depth, bpp;
+
+ drm_fb_get_bpp_depth(fb->pixel_format, &depth, &bpp);
- kfree(tilcdc_crtc);
+ if (fb->pitches[0] != crtc->mode.hdisplay * bpp / 8) {
+ dev_err(dev->dev,
+ "Invalid pitch: fb and crtc widths must be the same");
+ return -EINVAL;
+ }
+
+ return 0;
}
static int tilcdc_crtc_page_flip(struct drm_crtc *crtc,
@@ -158,20 +152,48 @@ static int tilcdc_crtc_page_flip(struct drm_crtc *crtc,
{
struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
struct drm_device *dev = crtc->dev;
+ int r;
+ unsigned long flags;
+ s64 tdiff;
+ ktime_t next_vblank;
+
+ r = tilcdc_verify_fb(crtc, fb);
+ if (r)
+ return r;
if (tilcdc_crtc->event) {
dev_err(dev->dev, "already pending page flip!\n");
return -EBUSY;
}
+ drm_framebuffer_reference(fb);
+
crtc->primary->fb = fb;
+
+ pm_runtime_get_sync(dev->dev);
+
+ spin_lock_irqsave(&tilcdc_crtc->irq_lock, flags);
+
+ next_vblank = ktime_add_us(tilcdc_crtc->last_vblank,
+ 1000000 / crtc->hwmode.vrefresh);
+
+ tdiff = ktime_to_us(ktime_sub(next_vblank, ktime_get()));
+
+ if (tdiff >= TILCDC_VBLANK_SAFETY_THRESHOLD_US)
+ set_scanout(crtc, fb);
+ else
+ tilcdc_crtc->next_fb = fb;
+
tilcdc_crtc->event = event;
- update_scanout(crtc);
+
+ spin_unlock_irqrestore(&tilcdc_crtc->irq_lock, flags);
+
+ pm_runtime_put_sync(dev->dev);
return 0;
}
-static void tilcdc_crtc_dpms(struct drm_crtc *crtc, int mode)
+void tilcdc_crtc_dpms(struct drm_crtc *crtc, int mode)
{
struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
struct drm_device *dev = crtc->dev;
@@ -186,10 +208,8 @@ static void tilcdc_crtc_dpms(struct drm_crtc *crtc, int mode)
tilcdc_crtc->dpms = mode;
- pm_runtime_get_sync(dev->dev);
-
if (mode == DRM_MODE_DPMS_ON) {
- pm_runtime_forbid(dev->dev);
+ pm_runtime_get_sync(dev->dev);
start(crtc);
} else {
tilcdc_crtc->frame_done = false;
@@ -207,10 +227,23 @@ static void tilcdc_crtc_dpms(struct drm_crtc *crtc, int mode)
if (ret == 0)
dev_err(dev->dev, "timeout waiting for framedone\n");
}
- pm_runtime_allow(dev->dev);
- }
- pm_runtime_put_sync(dev->dev);
+ pm_runtime_put_sync(dev->dev);
+
+ if (tilcdc_crtc->next_fb) {
+ drm_flip_work_queue(&tilcdc_crtc->unref_work,
+ tilcdc_crtc->next_fb);
+ tilcdc_crtc->next_fb = NULL;
+ }
+
+ if (tilcdc_crtc->curr_fb) {
+ drm_flip_work_queue(&tilcdc_crtc->unref_work,
+ tilcdc_crtc->curr_fb);
+ tilcdc_crtc->curr_fb = NULL;
+ }
+
+ drm_flip_work_commit(&tilcdc_crtc->unref_work, priv->wq);
+ }
}
static bool tilcdc_crtc_mode_fixup(struct drm_crtc *crtc,
@@ -272,6 +305,10 @@ static int tilcdc_crtc_mode_set(struct drm_crtc *crtc,
if (WARN_ON(!info))
return -EINVAL;
+ ret = tilcdc_verify_fb(crtc, crtc->primary->fb);
+ if (ret)
+ return ret;
+
pm_runtime_get_sync(dev->dev);
/* Configure the Burst Size and fifo threshold of DMA: */
@@ -419,8 +456,10 @@ static int tilcdc_crtc_mode_set(struct drm_crtc *crtc,
else
tilcdc_clear(dev, LCDC_RASTER_CTRL_REG, LCDC_RASTER_ORDER);
+ drm_framebuffer_reference(crtc->primary->fb);
+
+ set_scanout(crtc, crtc->primary->fb);
- update_scanout(crtc);
tilcdc_crtc_update_clk(crtc);
pm_runtime_put_sync(dev->dev);
@@ -431,7 +470,21 @@ static int tilcdc_crtc_mode_set(struct drm_crtc *crtc,
static int tilcdc_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
struct drm_framebuffer *old_fb)
{
- update_scanout(crtc);
+ struct drm_device *dev = crtc->dev;
+ int r;
+
+ r = tilcdc_verify_fb(crtc, crtc->primary->fb);
+ if (r)
+ return r;
+
+ drm_framebuffer_reference(crtc->primary->fb);
+
+ pm_runtime_get_sync(dev->dev);
+
+ set_scanout(crtc, crtc->primary->fb);
+
+ pm_runtime_put_sync(dev->dev);
+
return 0;
}
@@ -573,7 +626,8 @@ void tilcdc_crtc_update_clk(struct drm_crtc *crtc)
struct drm_device *dev = crtc->dev;
struct tilcdc_drm_private *priv = dev->dev_private;
int dpms = tilcdc_crtc->dpms;
- unsigned int lcd_clk, div;
+ unsigned long lcd_clk;
+ const unsigned clkdiv = 2; /* using a fixed divider of 2 */
int ret;
pm_runtime_get_sync(dev->dev);
@@ -581,22 +635,21 @@ void tilcdc_crtc_update_clk(struct drm_crtc *crtc)
if (dpms == DRM_MODE_DPMS_ON)
tilcdc_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
- /* in raster mode, minimum divisor is 2: */
- ret = clk_set_rate(priv->disp_clk, crtc->mode.clock * 1000 * 2);
- if (ret) {
+ /* mode.clock is in KHz, set_rate wants parameter in Hz */
+ ret = clk_set_rate(priv->clk, crtc->mode.clock * 1000 * clkdiv);
+ if (ret < 0) {
dev_err(dev->dev, "failed to set display clock rate to: %d\n",
crtc->mode.clock);
goto out;
}
lcd_clk = clk_get_rate(priv->clk);
- div = lcd_clk / (crtc->mode.clock * 1000);
- DBG("lcd_clk=%u, mode clock=%d, div=%u", lcd_clk, crtc->mode.clock, div);
- DBG("fck=%lu, dpll_disp_ck=%lu", clk_get_rate(priv->clk), clk_get_rate(priv->disp_clk));
+ DBG("lcd_clk=%lu, mode clock=%d, div=%u",
+ lcd_clk, crtc->mode.clock, clkdiv);
/* Configure the LCD clock divisor. */
- tilcdc_write(dev, LCDC_CTRL_REG, LCDC_CLK_DIVISOR(div) |
+ tilcdc_write(dev, LCDC_CTRL_REG, LCDC_CLK_DIVISOR(clkdiv) |
LCDC_RASTER_MODE);
if (priv->rev == 2)
@@ -611,44 +664,58 @@ out:
pm_runtime_put_sync(dev->dev);
}
+#define SYNC_LOST_COUNT_LIMIT 50
+
irqreturn_t tilcdc_crtc_irq(struct drm_crtc *crtc)
{
struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct tilcdc_drm_private *priv = dev->dev_private;
- uint32_t stat = tilcdc_read_irqstatus(dev);
+ uint32_t stat;
- if ((stat & LCDC_SYNC_LOST) && (stat & LCDC_FIFO_UNDERFLOW)) {
- stop(crtc);
- dev_err(dev->dev, "error: %08x\n", stat);
- tilcdc_clear_irqstatus(dev, stat);
- start(crtc);
- } else if (stat & LCDC_PL_LOAD_DONE) {
- tilcdc_clear_irqstatus(dev, stat);
- } else {
- struct drm_pending_vblank_event *event;
+ stat = tilcdc_read_irqstatus(dev);
+ tilcdc_clear_irqstatus(dev, stat);
+
+ if (stat & LCDC_END_OF_FRAME0) {
unsigned long flags;
- uint32_t dirty = tilcdc_crtc->dirty & stat;
+ bool skip_event = false;
+ ktime_t now;
- tilcdc_clear_irqstatus(dev, stat);
+ now = ktime_get();
- if (dirty & LCDC_END_OF_FRAME0)
- set_scanout(crtc, 0);
+ drm_flip_work_commit(&tilcdc_crtc->unref_work, priv->wq);
- if (dirty & LCDC_END_OF_FRAME1)
- set_scanout(crtc, 1);
+ spin_lock_irqsave(&tilcdc_crtc->irq_lock, flags);
+
+ tilcdc_crtc->last_vblank = now;
+
+ if (tilcdc_crtc->next_fb) {
+ set_scanout(crtc, tilcdc_crtc->next_fb);
+ tilcdc_crtc->next_fb = NULL;
+ skip_event = true;
+ }
+
+ spin_unlock_irqrestore(&tilcdc_crtc->irq_lock, flags);
drm_handle_vblank(dev, 0);
- spin_lock_irqsave(&dev->event_lock, flags);
- event = tilcdc_crtc->event;
- tilcdc_crtc->event = NULL;
- if (event)
- drm_send_vblank_event(dev, 0, event);
- spin_unlock_irqrestore(&dev->event_lock, flags);
+ if (!skip_event) {
+ struct drm_pending_vblank_event *event;
+
+ spin_lock_irqsave(&dev->event_lock, flags);
+
+ event = tilcdc_crtc->event;
+ tilcdc_crtc->event = NULL;
+ if (event)
+ drm_send_vblank_event(dev, 0, event);
+
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+ }
- if (dirty && !tilcdc_crtc->dirty)
- drm_vblank_put(dev, 0);
+ if (tilcdc_crtc->frame_intact)
+ tilcdc_crtc->sync_lost_count = 0;
+ else
+ tilcdc_crtc->frame_intact = true;
}
if (priv->rev == 2) {
@@ -659,36 +726,34 @@ irqreturn_t tilcdc_crtc_irq(struct drm_crtc *crtc)
tilcdc_write(dev, LCDC_END_OF_INT_IND_REG, 0);
}
- return IRQ_HANDLED;
-}
+ if (stat & LCDC_SYNC_LOST) {
+ dev_err_ratelimited(dev->dev, "%s(0x%08x): Sync lost",
+ __func__, stat);
+ tilcdc_crtc->frame_intact = false;
+ if (tilcdc_crtc->sync_lost_count++ > SYNC_LOST_COUNT_LIMIT) {
+ dev_err(dev->dev,
+ "%s(0x%08x): Sync lost flood detected, disabling the interrupt",
+ __func__, stat);
+ tilcdc_write(dev, LCDC_INT_ENABLE_CLR_REG,
+ LCDC_SYNC_LOST);
+ }
+ }
-void tilcdc_crtc_cancel_page_flip(struct drm_crtc *crtc, struct drm_file *file)
-{
- struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
- struct drm_pending_vblank_event *event;
- struct drm_device *dev = crtc->dev;
- unsigned long flags;
+ if (stat & LCDC_FIFO_UNDERFLOW)
+ dev_err_ratelimited(dev->dev, "%s(0x%08x): FIFO underfow",
+ __func__, stat);
- /* Destroy the pending vertical blanking event associated with the
- * pending page flip, if any, and disable vertical blanking interrupts.
- */
- spin_lock_irqsave(&dev->event_lock, flags);
- event = tilcdc_crtc->event;
- if (event && event->base.file_priv == file) {
- tilcdc_crtc->event = NULL;
- event->base.destroy(&event->base);
- drm_vblank_put(dev, 0);
- }
- spin_unlock_irqrestore(&dev->event_lock, flags);
+ return IRQ_HANDLED;
}
struct drm_crtc *tilcdc_crtc_create(struct drm_device *dev)
{
+ struct tilcdc_drm_private *priv = dev->dev_private;
struct tilcdc_crtc *tilcdc_crtc;
struct drm_crtc *crtc;
int ret;
- tilcdc_crtc = kzalloc(sizeof(*tilcdc_crtc), GFP_KERNEL);
+ tilcdc_crtc = devm_kzalloc(dev->dev, sizeof(*tilcdc_crtc), GFP_KERNEL);
if (!tilcdc_crtc) {
dev_err(dev->dev, "allocation failed\n");
return NULL;
@@ -702,12 +767,32 @@ struct drm_crtc *tilcdc_crtc_create(struct drm_device *dev)
drm_flip_work_init(&tilcdc_crtc->unref_work,
"unref", unref_worker);
+ spin_lock_init(&tilcdc_crtc->irq_lock);
+
ret = drm_crtc_init(dev, crtc, &tilcdc_crtc_funcs);
if (ret < 0)
goto fail;
drm_crtc_helper_add(crtc, &tilcdc_crtc_helper_funcs);
+ if (priv->is_componentized) {
+ struct device_node *ports =
+ of_get_child_by_name(dev->dev->of_node, "ports");
+
+ if (ports) {
+ crtc->port = of_get_child_by_name(ports, "port");
+ of_node_put(ports);
+ } else {
+ crtc->port =
+ of_get_child_by_name(dev->dev->of_node, "port");
+ }
+ if (!crtc->port) { /* This should never happen */
+ dev_err(dev->dev, "Port node not found in %s\n",
+ dev->dev->of_node->full_name);
+ goto fail;
+ }
+ }
+
return crtc;
fail:
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.c b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
index d7f5b897c6c5..709bc903524d 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_drv.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
@@ -18,6 +18,8 @@
/* LCDC DRM driver, based on da8xx-fb */
#include <linux/component.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/suspend.h>
#include "tilcdc_drv.h"
#include "tilcdc_regs.h"
@@ -110,6 +112,8 @@ static int tilcdc_unload(struct drm_device *dev)
{
struct tilcdc_drm_private *priv = dev->dev_private;
+ tilcdc_crtc_dpms(priv->crtc, DRM_MODE_DPMS_OFF);
+
tilcdc_remove_external_encoders(dev);
drm_fbdev_cma_fini(priv->fbdev);
@@ -139,11 +143,11 @@ static int tilcdc_unload(struct drm_device *dev)
pm_runtime_disable(dev->dev);
- kfree(priv);
-
return 0;
}
+static size_t tilcdc_num_regs(void);
+
static int tilcdc_load(struct drm_device *dev, unsigned long flags)
{
struct platform_device *pdev = dev->platformdev;
@@ -154,8 +158,12 @@ static int tilcdc_load(struct drm_device *dev, unsigned long flags)
u32 bpp = 0;
int ret;
- priv = kzalloc(sizeof(*priv), GFP_KERNEL);
- if (!priv) {
+ priv = devm_kzalloc(dev->dev, sizeof(*priv), GFP_KERNEL);
+ if (priv)
+ priv->saved_register =
+ devm_kcalloc(dev->dev, tilcdc_num_regs(),
+ sizeof(*priv->saved_register), GFP_KERNEL);
+ if (!priv || !priv->saved_register) {
dev_err(dev->dev, "failed to allocate private data\n");
return -ENOMEM;
}
@@ -168,7 +176,7 @@ static int tilcdc_load(struct drm_device *dev, unsigned long flags)
priv->wq = alloc_ordered_workqueue("tilcdc", 0);
if (!priv->wq) {
ret = -ENOMEM;
- goto fail_free_priv;
+ goto fail_unset_priv;
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -192,13 +200,6 @@ static int tilcdc_load(struct drm_device *dev, unsigned long flags)
goto fail_iounmap;
}
- priv->disp_clk = clk_get(dev->dev, "dpll_disp_ck");
- if (IS_ERR(priv->clk)) {
- dev_err(dev->dev, "failed to get display clock\n");
- ret = -ENODEV;
- goto fail_put_clk;
- }
-
#ifdef CONFIG_CPU_FREQ
priv->lcd_fck_rate = clk_get_rate(priv->clk);
priv->freq_transition.notifier_call = cpufreq_transition;
@@ -206,7 +207,7 @@ static int tilcdc_load(struct drm_device *dev, unsigned long flags)
CPUFREQ_TRANSITION_NOTIFIER);
if (ret) {
dev_err(dev->dev, "failed to register cpufreq notifier\n");
- goto fail_put_disp_clk;
+ goto fail_put_clk;
}
#endif
@@ -227,7 +228,6 @@ static int tilcdc_load(struct drm_device *dev, unsigned long flags)
DBG("Maximum Pixel Clock Value %dKHz", priv->max_pixelclock);
pm_runtime_enable(dev->dev);
- pm_runtime_irq_safe(dev->dev);
/* Determine LCD IP Version */
pm_runtime_get_sync(dev->dev);
@@ -330,11 +330,9 @@ fail_cpufreq_unregister:
#ifdef CONFIG_CPU_FREQ
cpufreq_unregister_notifier(&priv->freq_transition,
CPUFREQ_TRANSITION_NOTIFIER);
-fail_put_disp_clk:
- clk_put(priv->disp_clk);
-#endif
fail_put_clk:
+#endif
clk_put(priv->clk);
fail_iounmap:
@@ -344,17 +342,10 @@ fail_free_wq:
flush_workqueue(priv->wq);
destroy_workqueue(priv->wq);
-fail_free_priv:
+fail_unset_priv:
dev->dev_private = NULL;
- kfree(priv);
- return ret;
-}
-static void tilcdc_preclose(struct drm_device *dev, struct drm_file *file)
-{
- struct tilcdc_drm_private *priv = dev->dev_private;
-
- tilcdc_crtc_cancel_page_flip(priv->crtc, file);
+ return ret;
}
static void tilcdc_lastclose(struct drm_device *dev)
@@ -380,10 +371,14 @@ static int tilcdc_irq_postinstall(struct drm_device *dev)
struct tilcdc_drm_private *priv = dev->dev_private;
/* enable FIFO underflow irq: */
- if (priv->rev == 1)
+ if (priv->rev == 1) {
tilcdc_set(dev, LCDC_RASTER_CTRL_REG, LCDC_V1_UNDERFLOW_INT_ENA);
- else
- tilcdc_set(dev, LCDC_INT_ENABLE_SET_REG, LCDC_V2_UNDERFLOW_INT_ENA);
+ } else {
+ tilcdc_write(dev, LCDC_INT_ENABLE_SET_REG,
+ LCDC_V2_UNDERFLOW_INT_ENA |
+ LCDC_V2_END_OF_FRAME0_INT_ENA |
+ LCDC_FRAME_DONE | LCDC_SYNC_LOST);
+ }
return 0;
}
@@ -398,43 +393,21 @@ static void tilcdc_irq_uninstall(struct drm_device *dev)
LCDC_V1_UNDERFLOW_INT_ENA | LCDC_V1_PL_INT_ENA);
tilcdc_clear(dev, LCDC_DMA_CTRL_REG, LCDC_V1_END_OF_FRAME_INT_ENA);
} else {
- tilcdc_clear(dev, LCDC_INT_ENABLE_SET_REG,
+ tilcdc_write(dev, LCDC_INT_ENABLE_CLR_REG,
LCDC_V2_UNDERFLOW_INT_ENA | LCDC_V2_PL_INT_ENA |
- LCDC_V2_END_OF_FRAME0_INT_ENA | LCDC_V2_END_OF_FRAME1_INT_ENA |
- LCDC_FRAME_DONE);
+ LCDC_V2_END_OF_FRAME0_INT_ENA |
+ LCDC_FRAME_DONE | LCDC_SYNC_LOST);
}
-
-}
-
-static void enable_vblank(struct drm_device *dev, bool enable)
-{
- struct tilcdc_drm_private *priv = dev->dev_private;
- u32 reg, mask;
-
- if (priv->rev == 1) {
- reg = LCDC_DMA_CTRL_REG;
- mask = LCDC_V1_END_OF_FRAME_INT_ENA;
- } else {
- reg = LCDC_INT_ENABLE_SET_REG;
- mask = LCDC_V2_END_OF_FRAME0_INT_ENA |
- LCDC_V2_END_OF_FRAME1_INT_ENA | LCDC_FRAME_DONE;
- }
-
- if (enable)
- tilcdc_set(dev, reg, mask);
- else
- tilcdc_clear(dev, reg, mask);
}
static int tilcdc_enable_vblank(struct drm_device *dev, unsigned int pipe)
{
- enable_vblank(dev, true);
return 0;
}
static void tilcdc_disable_vblank(struct drm_device *dev, unsigned int pipe)
{
- enable_vblank(dev, false);
+ return;
}
#if defined(CONFIG_DEBUG_FS) || defined(CONFIG_PM_SLEEP)
@@ -461,13 +434,22 @@ static const struct {
/* new in revision 2: */
REG(2, false, LCDC_RAW_STAT_REG),
REG(2, false, LCDC_MASKED_STAT_REG),
- REG(2, false, LCDC_INT_ENABLE_SET_REG),
+ REG(2, true, LCDC_INT_ENABLE_SET_REG),
REG(2, false, LCDC_INT_ENABLE_CLR_REG),
REG(2, false, LCDC_END_OF_INT_IND_REG),
REG(2, true, LCDC_CLK_ENABLE_REG),
- REG(2, true, LCDC_INT_ENABLE_SET_REG),
#undef REG
};
+
+static size_t tilcdc_num_regs(void)
+{
+ return ARRAY_SIZE(registers);
+}
+#else
+static size_t tilcdc_num_regs(void)
+{
+ return 0;
+}
#endif
#ifdef CONFIG_DEBUG_FS
@@ -554,10 +536,10 @@ static const struct file_operations fops = {
};
static struct drm_driver tilcdc_driver = {
- .driver_features = DRIVER_HAVE_IRQ | DRIVER_GEM | DRIVER_MODESET,
+ .driver_features = (DRIVER_HAVE_IRQ | DRIVER_GEM | DRIVER_MODESET |
+ DRIVER_PRIME),
.load = tilcdc_load,
.unload = tilcdc_unload,
- .preclose = tilcdc_preclose,
.lastclose = tilcdc_lastclose,
.set_busid = drm_platform_set_busid,
.irq_handler = tilcdc_irq,
@@ -572,6 +554,16 @@ static struct drm_driver tilcdc_driver = {
.dumb_create = drm_gem_cma_dumb_create,
.dumb_map_offset = drm_gem_cma_dumb_map_offset,
.dumb_destroy = drm_gem_dumb_destroy,
+
+ .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
+ .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
+ .gem_prime_import = drm_gem_prime_import,
+ .gem_prime_export = drm_gem_prime_export,
+ .gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table,
+ .gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table,
+ .gem_prime_vmap = drm_gem_cma_prime_vmap,
+ .gem_prime_vunmap = drm_gem_cma_prime_vunmap,
+ .gem_prime_mmap = drm_gem_cma_prime_mmap,
#ifdef CONFIG_DEBUG_FS
.debugfs_init = tilcdc_debugfs_init,
.debugfs_cleanup = tilcdc_debugfs_cleanup,
@@ -597,11 +589,24 @@ static int tilcdc_pm_suspend(struct device *dev)
drm_kms_helper_poll_disable(ddev);
+ /* Select sleep pin state */
+ pinctrl_pm_select_sleep_state(dev);
+
+ if (pm_runtime_suspended(dev)) {
+ priv->ctx_valid = false;
+ return 0;
+ }
+
+ /* Disable the LCDC controller, to avoid locking up the PRCM */
+ tilcdc_crtc_dpms(priv->crtc, DRM_MODE_DPMS_OFF);
+
/* Save register state: */
for (i = 0; i < ARRAY_SIZE(registers); i++)
if (registers[i].save && (priv->rev >= registers[i].rev))
priv->saved_register[n++] = tilcdc_read(ddev, registers[i].reg);
+ priv->ctx_valid = true;
+
return 0;
}
@@ -611,10 +616,17 @@ static int tilcdc_pm_resume(struct device *dev)
struct tilcdc_drm_private *priv = ddev->dev_private;
unsigned i, n = 0;
- /* Restore register state: */
- for (i = 0; i < ARRAY_SIZE(registers); i++)
- if (registers[i].save && (priv->rev >= registers[i].rev))
- tilcdc_write(ddev, registers[i].reg, priv->saved_register[n++]);
+ /* Select default pin state */
+ pinctrl_pm_select_default_state(dev);
+
+ if (priv->ctx_valid == true) {
+ /* Restore register state: */
+ for (i = 0; i < ARRAY_SIZE(registers); i++)
+ if (registers[i].save &&
+ (priv->rev >= registers[i].rev))
+ tilcdc_write(ddev, registers[i].reg,
+ priv->saved_register[n++]);
+ }
drm_kms_helper_poll_enable(ddev);
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.h b/drivers/gpu/drm/tilcdc/tilcdc_drv.h
index e863ad0d26fe..c1de18bae415 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_drv.h
+++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.h
@@ -49,7 +49,6 @@
struct tilcdc_drm_private {
void __iomem *mmio;
- struct clk *disp_clk; /* display dpll */
struct clk *clk; /* functional clock */
int rev; /* IP revision */
@@ -67,7 +66,8 @@ struct tilcdc_drm_private {
uint32_t max_width;
/* register contents saved across suspend/resume: */
- u32 saved_register[12];
+ u32 *saved_register;
+ bool ctx_valid;
#ifdef CONFIG_CPU_FREQ
struct notifier_block freq_transition;
@@ -163,7 +163,6 @@ struct tilcdc_panel_info {
#define DBG(fmt, ...) DRM_DEBUG(fmt"\n", ##__VA_ARGS__)
struct drm_crtc *tilcdc_crtc_create(struct drm_device *dev);
-void tilcdc_crtc_cancel_page_flip(struct drm_crtc *crtc, struct drm_file *file);
irqreturn_t tilcdc_crtc_irq(struct drm_crtc *crtc);
void tilcdc_crtc_update_clk(struct drm_crtc *crtc);
void tilcdc_crtc_set_panel_info(struct drm_crtc *crtc,
@@ -172,5 +171,6 @@ void tilcdc_crtc_set_simulate_vesa_sync(struct drm_crtc *crtc,
bool simulate_vesa_sync);
int tilcdc_crtc_mode_valid(struct drm_crtc *crtc, struct drm_display_mode *mode);
int tilcdc_crtc_max_width(struct drm_crtc *crtc);
+void tilcdc_crtc_dpms(struct drm_crtc *crtc, int mode);
#endif /* __TILCDC_DRV_H__ */
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_panel.c b/drivers/gpu/drm/tilcdc/tilcdc_panel.c
index 4dda6e2f464b..ff7774c17d7c 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_panel.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_panel.c
@@ -45,14 +45,6 @@ struct panel_encoder {
};
#define to_panel_encoder(x) container_of(x, struct panel_encoder, base)
-
-static void panel_encoder_destroy(struct drm_encoder *encoder)
-{
- struct panel_encoder *panel_encoder = to_panel_encoder(encoder);
- drm_encoder_cleanup(encoder);
- kfree(panel_encoder);
-}
-
static void panel_encoder_dpms(struct drm_encoder *encoder, int mode)
{
struct panel_encoder *panel_encoder = to_panel_encoder(encoder);
@@ -70,14 +62,6 @@ static void panel_encoder_dpms(struct drm_encoder *encoder, int mode)
mode == DRM_MODE_DPMS_ON ? 1 : 0);
}
-static bool panel_encoder_mode_fixup(struct drm_encoder *encoder,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- /* nothing needed */
- return true;
-}
-
static void panel_encoder_prepare(struct drm_encoder *encoder)
{
struct panel_encoder *panel_encoder = to_panel_encoder(encoder);
@@ -98,12 +82,11 @@ static void panel_encoder_mode_set(struct drm_encoder *encoder,
}
static const struct drm_encoder_funcs panel_encoder_funcs = {
- .destroy = panel_encoder_destroy,
+ .destroy = drm_encoder_cleanup,
};
static const struct drm_encoder_helper_funcs panel_encoder_helper_funcs = {
.dpms = panel_encoder_dpms,
- .mode_fixup = panel_encoder_mode_fixup,
.prepare = panel_encoder_prepare,
.commit = panel_encoder_commit,
.mode_set = panel_encoder_mode_set,
@@ -116,7 +99,8 @@ static struct drm_encoder *panel_encoder_create(struct drm_device *dev,
struct drm_encoder *encoder;
int ret;
- panel_encoder = kzalloc(sizeof(*panel_encoder), GFP_KERNEL);
+ panel_encoder = devm_kzalloc(dev->dev, sizeof(*panel_encoder),
+ GFP_KERNEL);
if (!panel_encoder) {
dev_err(dev->dev, "allocation failed\n");
return NULL;
@@ -137,7 +121,7 @@ static struct drm_encoder *panel_encoder_create(struct drm_device *dev,
return encoder;
fail:
- panel_encoder_destroy(encoder);
+ drm_encoder_cleanup(encoder);
return NULL;
}
@@ -156,10 +140,8 @@ struct panel_connector {
static void panel_connector_destroy(struct drm_connector *connector)
{
- struct panel_connector *panel_connector = to_panel_connector(connector);
drm_connector_unregister(connector);
drm_connector_cleanup(connector);
- kfree(panel_connector);
}
static enum drm_connector_status panel_connector_detect(
@@ -232,7 +214,8 @@ static struct drm_connector *panel_connector_create(struct drm_device *dev,
struct drm_connector *connector;
int ret;
- panel_connector = kzalloc(sizeof(*panel_connector), GFP_KERNEL);
+ panel_connector = devm_kzalloc(dev->dev, sizeof(*panel_connector),
+ GFP_KERNEL);
if (!panel_connector) {
dev_err(dev->dev, "allocation failed\n");
return NULL;
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c b/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c
index 5052a8af7ecb..7716f42f8aab 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c
@@ -54,14 +54,6 @@ struct tfp410_encoder {
};
#define to_tfp410_encoder(x) container_of(x, struct tfp410_encoder, base)
-
-static void tfp410_encoder_destroy(struct drm_encoder *encoder)
-{
- struct tfp410_encoder *tfp410_encoder = to_tfp410_encoder(encoder);
- drm_encoder_cleanup(encoder);
- kfree(tfp410_encoder);
-}
-
static void tfp410_encoder_dpms(struct drm_encoder *encoder, int mode)
{
struct tfp410_encoder *tfp410_encoder = to_tfp410_encoder(encoder);
@@ -80,14 +72,6 @@ static void tfp410_encoder_dpms(struct drm_encoder *encoder, int mode)
tfp410_encoder->dpms = mode;
}
-static bool tfp410_encoder_mode_fixup(struct drm_encoder *encoder,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- /* nothing needed */
- return true;
-}
-
static void tfp410_encoder_prepare(struct drm_encoder *encoder)
{
tfp410_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
@@ -107,12 +91,11 @@ static void tfp410_encoder_mode_set(struct drm_encoder *encoder,
}
static const struct drm_encoder_funcs tfp410_encoder_funcs = {
- .destroy = tfp410_encoder_destroy,
+ .destroy = drm_encoder_cleanup,
};
static const struct drm_encoder_helper_funcs tfp410_encoder_helper_funcs = {
.dpms = tfp410_encoder_dpms,
- .mode_fixup = tfp410_encoder_mode_fixup,
.prepare = tfp410_encoder_prepare,
.commit = tfp410_encoder_commit,
.mode_set = tfp410_encoder_mode_set,
@@ -125,7 +108,8 @@ static struct drm_encoder *tfp410_encoder_create(struct drm_device *dev,
struct drm_encoder *encoder;
int ret;
- tfp410_encoder = kzalloc(sizeof(*tfp410_encoder), GFP_KERNEL);
+ tfp410_encoder = devm_kzalloc(dev->dev, sizeof(*tfp410_encoder),
+ GFP_KERNEL);
if (!tfp410_encoder) {
dev_err(dev->dev, "allocation failed\n");
return NULL;
@@ -147,7 +131,7 @@ static struct drm_encoder *tfp410_encoder_create(struct drm_device *dev,
return encoder;
fail:
- tfp410_encoder_destroy(encoder);
+ drm_encoder_cleanup(encoder);
return NULL;
}
@@ -166,10 +150,8 @@ struct tfp410_connector {
static void tfp410_connector_destroy(struct drm_connector *connector)
{
- struct tfp410_connector *tfp410_connector = to_tfp410_connector(connector);
drm_connector_unregister(connector);
drm_connector_cleanup(connector);
- kfree(tfp410_connector);
}
static enum drm_connector_status tfp410_connector_detect(
@@ -237,7 +219,8 @@ static struct drm_connector *tfp410_connector_create(struct drm_device *dev,
struct drm_connector *connector;
int ret;
- tfp410_connector = kzalloc(sizeof(*tfp410_connector), GFP_KERNEL);
+ tfp410_connector = devm_kzalloc(dev->dev, sizeof(*tfp410_connector),
+ GFP_KERNEL);
if (!tfp410_connector) {
dev_err(dev->dev, "allocation failed\n");
return NULL;
@@ -322,7 +305,7 @@ static int tfp410_probe(struct platform_device *pdev)
return -ENXIO;
}
- tfp410_mod = kzalloc(sizeof(*tfp410_mod), GFP_KERNEL);
+ tfp410_mod = devm_kzalloc(&pdev->dev, sizeof(*tfp410_mod), GFP_KERNEL);
if (!tfp410_mod)
return -ENOMEM;
@@ -375,7 +358,6 @@ fail_adapter:
i2c_put_adapter(tfp410_mod->i2c);
fail:
- kfree(tfp410_mod);
tilcdc_module_cleanup(mod);
return ret;
}
@@ -389,7 +371,6 @@ static int tfp410_remove(struct platform_device *pdev)
gpio_free(tfp410_mod->gpio);
tilcdc_module_cleanup(mod);
- kfree(tfp410_mod);
return 0;
}
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 4cbf26555093..e3daafa1be13 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -230,22 +230,13 @@ EXPORT_SYMBOL(ttm_bo_del_sub_from_lru);
void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo)
{
- struct ttm_bo_device *bdev = bo->bdev;
- struct ttm_mem_type_manager *man;
+ int put_count = 0;
lockdep_assert_held(&bo->resv->lock.base);
- if (bo->mem.placement & TTM_PL_FLAG_NO_EVICT) {
- list_del_init(&bo->swap);
- list_del_init(&bo->lru);
-
- } else {
- if (bo->ttm && !(bo->ttm->page_flags & TTM_PAGE_FLAG_SG))
- list_move_tail(&bo->swap, &bo->glob->swap_lru);
-
- man = &bdev->man[bo->mem.mem_type];
- list_move_tail(&bo->lru, &man->lru);
- }
+ put_count = ttm_bo_del_from_lru(bo);
+ ttm_bo_list_ref_sub(bo, put_count, true);
+ ttm_bo_add_to_lru(bo);
}
EXPORT_SYMBOL(ttm_bo_move_to_lru_tail);
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index 4e19d0f9cc30..077ae9b2865d 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -311,7 +311,7 @@ int ttm_tt_swapin(struct ttm_tt *ttm)
goto out_err;
copy_highpage(to_page, from_page);
- page_cache_release(from_page);
+ put_page(from_page);
}
if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP))
@@ -361,7 +361,7 @@ int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistent_swap_storage)
copy_highpage(to_page, from_page);
set_page_dirty(to_page);
mark_page_accessed(to_page);
- page_cache_release(to_page);
+ put_page(to_page);
}
ttm_tt_unpopulate(ttm);
diff --git a/drivers/gpu/drm/udl/udl_drv.c b/drivers/gpu/drm/udl/udl_drv.c
index d5728ec85254..772ec9e1f590 100644
--- a/drivers/gpu/drm/udl/udl_drv.c
+++ b/drivers/gpu/drm/udl/udl_drv.c
@@ -125,17 +125,5 @@ static struct usb_driver udl_driver = {
.disconnect = udl_usb_disconnect,
.id_table = id_table,
};
-
-static int __init udl_init(void)
-{
- return usb_register(&udl_driver);
-}
-
-static void __exit udl_exit(void)
-{
- usb_deregister(&udl_driver);
-}
-
-module_init(udl_init);
-module_exit(udl_exit);
+module_usb_driver(udl_driver);
MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/udl/udl_encoder.c b/drivers/gpu/drm/udl/udl_encoder.c
index a181a647fcf9..59a4b34e87ed 100644
--- a/drivers/gpu/drm/udl/udl_encoder.c
+++ b/drivers/gpu/drm/udl/udl_encoder.c
@@ -26,13 +26,6 @@ static void udl_encoder_disable(struct drm_encoder *encoder)
{
}
-static bool udl_mode_fixup(struct drm_encoder *encoder,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- return true;
-}
-
static void udl_encoder_prepare(struct drm_encoder *encoder)
{
}
@@ -54,7 +47,6 @@ udl_encoder_dpms(struct drm_encoder *encoder, int mode)
static const struct drm_encoder_helper_funcs udl_helper_funcs = {
.dpms = udl_encoder_dpms,
- .mode_fixup = udl_mode_fixup,
.prepare = udl_encoder_prepare,
.mode_set = udl_encoder_mode_set,
.commit = udl_encoder_commit,
diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c
index 200419d4d43c..fd1eb9d03f0b 100644
--- a/drivers/gpu/drm/udl/udl_fb.c
+++ b/drivers/gpu/drm/udl/udl_fb.c
@@ -409,7 +409,6 @@ static int udl_user_framebuffer_dirty(struct drm_framebuffer *fb,
if (ufb->obj->base.import_attach) {
ret = dma_buf_begin_cpu_access(ufb->obj->base.import_attach->dmabuf,
- 0, ufb->obj->base.size,
DMA_FROM_DEVICE);
if (ret)
goto unlock;
@@ -424,9 +423,8 @@ static int udl_user_framebuffer_dirty(struct drm_framebuffer *fb,
}
if (ufb->obj->base.import_attach) {
- dma_buf_end_cpu_access(ufb->obj->base.import_attach->dmabuf,
- 0, ufb->obj->base.size,
- DMA_FROM_DEVICE);
+ ret = dma_buf_end_cpu_access(ufb->obj->base.import_attach->dmabuf,
+ DMA_FROM_DEVICE);
}
unlock:
@@ -538,7 +536,7 @@ static int udlfb_create(struct drm_fb_helper *helper,
out_destroy_fbi:
drm_fb_helper_release_fbi(helper);
out_gfree:
- drm_gem_object_unreference(&ufbdev->ufb.obj->base);
+ drm_gem_object_unreference_unlocked(&ufbdev->ufb.obj->base);
out:
return ret;
}
diff --git a/drivers/gpu/drm/udl/udl_gem.c b/drivers/gpu/drm/udl/udl_gem.c
index 2a0a784ab6ee..d7528e0d8442 100644
--- a/drivers/gpu/drm/udl/udl_gem.c
+++ b/drivers/gpu/drm/udl/udl_gem.c
@@ -52,7 +52,7 @@ udl_gem_create(struct drm_file *file,
return ret;
}
- drm_gem_object_unreference(&obj->base);
+ drm_gem_object_unreference_unlocked(&obj->base);
*handle_p = handle;
return 0;
}
diff --git a/drivers/gpu/drm/udl/udl_modeset.c b/drivers/gpu/drm/udl/udl_modeset.c
index 160ef2a08b89..b87afee44995 100644
--- a/drivers/gpu/drm/udl/udl_modeset.c
+++ b/drivers/gpu/drm/udl/udl_modeset.c
@@ -279,14 +279,6 @@ static void udl_crtc_dpms(struct drm_crtc *crtc, int mode)
}
-static bool udl_crtc_mode_fixup(struct drm_crtc *crtc,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-
-{
- return true;
-}
-
#if 0
static int
udl_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
@@ -402,7 +394,6 @@ static void udl_crtc_commit(struct drm_crtc *crtc)
static const struct drm_crtc_helper_funcs udl_helper_funcs = {
.dpms = udl_crtc_dpms,
- .mode_fixup = udl_crtc_mode_fixup,
.mode_set = udl_crtc_mode_set,
.prepare = udl_crtc_prepare,
.commit = udl_crtc_commit,
diff --git a/drivers/gpu/drm/vc4/vc4_bo.c b/drivers/gpu/drm/vc4/vc4_bo.c
index 22278bcfc60e..9807bc9d296e 100644
--- a/drivers/gpu/drm/vc4/vc4_bo.c
+++ b/drivers/gpu/drm/vc4/vc4_bo.c
@@ -398,9 +398,8 @@ int vc4_mmap(struct file *filp, struct vm_area_struct *vma)
vma->vm_flags &= ~VM_PFNMAP;
vma->vm_pgoff = 0;
- ret = dma_mmap_writecombine(bo->base.base.dev->dev, vma,
- bo->base.vaddr, bo->base.paddr,
- vma->vm_end - vma->vm_start);
+ ret = dma_mmap_wc(bo->base.base.dev->dev, vma, bo->base.vaddr,
+ bo->base.paddr, vma->vm_end - vma->vm_start);
if (ret)
drm_gem_vm_close(vma);
@@ -499,11 +498,12 @@ vc4_create_shader_bo_ioctl(struct drm_device *dev, void *data,
if (IS_ERR(bo))
return PTR_ERR(bo);
- ret = copy_from_user(bo->base.vaddr,
+ if (copy_from_user(bo->base.vaddr,
(void __user *)(uintptr_t)args->data,
- args->size);
- if (ret != 0)
+ args->size)) {
+ ret = -EFAULT;
goto fail;
+ }
/* Clear the rest of the memory from allocating from the BO
* cache.
*/
diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c
index 018145e0b87d..355ee4b091b3 100644
--- a/drivers/gpu/drm/vc4/vc4_crtc.c
+++ b/drivers/gpu/drm/vc4/vc4_crtc.c
@@ -49,22 +49,27 @@ struct vc4_crtc {
/* Which HVS channel we're using for our CRTC. */
int channel;
- /* Pointer to the actual hardware display list memory for the
- * crtc.
- */
- u32 __iomem *dlist;
-
- u32 dlist_size; /* in dwords */
-
struct drm_pending_vblank_event *event;
};
+struct vc4_crtc_state {
+ struct drm_crtc_state base;
+ /* Dlist area for this CRTC configuration. */
+ struct drm_mm_node mm;
+};
+
static inline struct vc4_crtc *
to_vc4_crtc(struct drm_crtc *crtc)
{
return (struct vc4_crtc *)crtc;
}
+static inline struct vc4_crtc_state *
+to_vc4_crtc_state(struct drm_crtc_state *crtc_state)
+{
+ return (struct vc4_crtc_state *)crtc_state;
+}
+
struct vc4_crtc_data {
/* Which channel of the HVS this pixelvalve sources from. */
int hvs_channel;
@@ -83,7 +88,7 @@ static const struct {
} crtc_regs[] = {
CRTC_REG(PV_CONTROL),
CRTC_REG(PV_V_CONTROL),
- CRTC_REG(PV_VSYNCD),
+ CRTC_REG(PV_VSYNCD_EVEN),
CRTC_REG(PV_HORZA),
CRTC_REG(PV_HORZB),
CRTC_REG(PV_VERTA),
@@ -183,6 +188,8 @@ static int vc4_get_clock_select(struct drm_crtc *crtc)
static void vc4_crtc_mode_set_nofb(struct drm_crtc *crtc)
{
+ struct drm_device *dev = crtc->dev;
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
struct drm_crtc_state *state = crtc->state;
struct drm_display_mode *mode = &state->adjusted_mode;
@@ -212,6 +219,16 @@ static void vc4_crtc_mode_set_nofb(struct drm_crtc *crtc)
PV_HORZB_HFP) |
VC4_SET_FIELD(mode->hdisplay, PV_HORZB_HACTIVE));
+ CRTC_WRITE(PV_VERTA,
+ VC4_SET_FIELD(mode->vtotal - mode->vsync_end,
+ PV_VERTA_VBP) |
+ VC4_SET_FIELD(mode->vsync_end - mode->vsync_start,
+ PV_VERTA_VSYNC));
+ CRTC_WRITE(PV_VERTB,
+ VC4_SET_FIELD(mode->vsync_start - mode->vdisplay,
+ PV_VERTB_VFP) |
+ VC4_SET_FIELD(vactive, PV_VERTB_VACTIVE));
+
if (interlace) {
CRTC_WRITE(PV_VERTA_EVEN,
VC4_SET_FIELD(mode->vtotal - mode->vsync_end - 1,
@@ -241,6 +258,10 @@ static void vc4_crtc_mode_set_nofb(struct drm_crtc *crtc)
PV_CONTROL_FIFO_CLR |
PV_CONTROL_EN);
+ HVS_WRITE(SCALER_DISPBKGNDX(vc4_crtc->channel),
+ SCALER_DISPBKGND_AUTOHS |
+ (interlace ? SCALER_DISPBKGND_INTERLACE : 0));
+
if (debug_dump_regs) {
DRM_INFO("CRTC %d regs after:\n", drm_crtc_index(crtc));
vc4_crtc_dump_regs(vc4_crtc);
@@ -319,11 +340,13 @@ static void vc4_crtc_enable(struct drm_crtc *crtc)
static int vc4_crtc_atomic_check(struct drm_crtc *crtc,
struct drm_crtc_state *state)
{
+ struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(state);
struct drm_device *dev = crtc->dev;
struct vc4_dev *vc4 = to_vc4_dev(dev);
struct drm_plane *plane;
- struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
+ unsigned long flags;
u32 dlist_count = 0;
+ int ret;
/* The pixelvalve can only feed one encoder (and encoders are
* 1:1 with connectors.)
@@ -346,18 +369,12 @@ static int vc4_crtc_atomic_check(struct drm_crtc *crtc,
dlist_count++; /* Account for SCALER_CTL0_END. */
- if (!vc4_crtc->dlist || dlist_count > vc4_crtc->dlist_size) {
- vc4_crtc->dlist = ((u32 __iomem *)vc4->hvs->dlist +
- HVS_BOOTLOADER_DLIST_END);
- vc4_crtc->dlist_size = ((SCALER_DLIST_SIZE >> 2) -
- HVS_BOOTLOADER_DLIST_END);
-
- if (dlist_count > vc4_crtc->dlist_size) {
- DRM_DEBUG_KMS("dlist too large for CRTC (%d > %d).\n",
- dlist_count, vc4_crtc->dlist_size);
- return -EINVAL;
- }
- }
+ spin_lock_irqsave(&vc4->hvs->mm_lock, flags);
+ ret = drm_mm_insert_node(&vc4->hvs->dlist_mm, &vc4_state->mm,
+ dlist_count, 1, 0);
+ spin_unlock_irqrestore(&vc4->hvs->mm_lock, flags);
+ if (ret)
+ return ret;
return 0;
}
@@ -368,47 +385,29 @@ static void vc4_crtc_atomic_flush(struct drm_crtc *crtc,
struct drm_device *dev = crtc->dev;
struct vc4_dev *vc4 = to_vc4_dev(dev);
struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
+ struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc->state);
struct drm_plane *plane;
bool debug_dump_regs = false;
- u32 __iomem *dlist_next = vc4_crtc->dlist;
+ u32 __iomem *dlist_start = vc4->hvs->dlist + vc4_state->mm.start;
+ u32 __iomem *dlist_next = dlist_start;
if (debug_dump_regs) {
DRM_INFO("CRTC %d HVS before:\n", drm_crtc_index(crtc));
vc4_hvs_dump_state(dev);
}
- /* Copy all the active planes' dlist contents to the hardware dlist.
- *
- * XXX: If the new display list was large enough that it
- * overlapped a currently-read display list, we need to do
- * something like disable scanout before putting in the new
- * list. For now, we're safe because we only have the two
- * planes.
- */
+ /* Copy all the active planes' dlist contents to the hardware dlist. */
drm_atomic_crtc_for_each_plane(plane, crtc) {
dlist_next += vc4_plane_write_dlist(plane, dlist_next);
}
- if (dlist_next == vc4_crtc->dlist) {
- /* If no planes were enabled, use the SCALER_CTL0_END
- * at the start of the display list memory (in the
- * bootloader section). We'll rewrite that
- * SCALER_CTL0_END, just in case, though.
- */
- writel(SCALER_CTL0_END, vc4->hvs->dlist);
- HVS_WRITE(SCALER_DISPLISTX(vc4_crtc->channel), 0);
- } else {
- writel(SCALER_CTL0_END, dlist_next);
- dlist_next++;
-
- HVS_WRITE(SCALER_DISPLISTX(vc4_crtc->channel),
- (u32 __iomem *)vc4_crtc->dlist -
- (u32 __iomem *)vc4->hvs->dlist);
-
- /* Make the next display list start after ours. */
- vc4_crtc->dlist_size -= (dlist_next - vc4_crtc->dlist);
- vc4_crtc->dlist = dlist_next;
- }
+ writel(SCALER_CTL0_END, dlist_next);
+ dlist_next++;
+
+ WARN_ON_ONCE(dlist_next - dlist_start != vc4_state->mm.size);
+
+ HVS_WRITE(SCALER_DISPLISTX(vc4_crtc->channel),
+ vc4_state->mm.start);
if (debug_dump_regs) {
DRM_INFO("CRTC %d HVS after:\n", drm_crtc_index(crtc));
@@ -544,6 +543,7 @@ static int vc4_async_page_flip(struct drm_crtc *crtc,
/* Make sure all other async modesetes have landed. */
ret = down_interruptible(&vc4->async_modeset);
if (ret) {
+ drm_framebuffer_unreference(fb);
kfree(flip_state);
return ret;
}
@@ -573,6 +573,36 @@ static int vc4_page_flip(struct drm_crtc *crtc,
return drm_atomic_helper_page_flip(crtc, fb, event, flags);
}
+static struct drm_crtc_state *vc4_crtc_duplicate_state(struct drm_crtc *crtc)
+{
+ struct vc4_crtc_state *vc4_state;
+
+ vc4_state = kzalloc(sizeof(*vc4_state), GFP_KERNEL);
+ if (!vc4_state)
+ return NULL;
+
+ __drm_atomic_helper_crtc_duplicate_state(crtc, &vc4_state->base);
+ return &vc4_state->base;
+}
+
+static void vc4_crtc_destroy_state(struct drm_crtc *crtc,
+ struct drm_crtc_state *state)
+{
+ struct vc4_dev *vc4 = to_vc4_dev(crtc->dev);
+ struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(state);
+
+ if (vc4_state->mm.allocated) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&vc4->hvs->mm_lock, flags);
+ drm_mm_remove_node(&vc4_state->mm);
+ spin_unlock_irqrestore(&vc4->hvs->mm_lock, flags);
+
+ }
+
+ __drm_atomic_helper_crtc_destroy_state(crtc, state);
+}
+
static const struct drm_crtc_funcs vc4_crtc_funcs = {
.set_config = drm_atomic_helper_set_config,
.destroy = vc4_crtc_destroy,
@@ -581,8 +611,8 @@ static const struct drm_crtc_funcs vc4_crtc_funcs = {
.cursor_set = NULL, /* handled by drm_mode_cursor_universal */
.cursor_move = NULL, /* handled by drm_mode_cursor_universal */
.reset = drm_atomic_helper_crtc_reset,
- .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
+ .atomic_duplicate_state = vc4_crtc_duplicate_state,
+ .atomic_destroy_state = vc4_crtc_destroy_state,
};
static const struct drm_crtc_helper_funcs vc4_crtc_helper_funcs = {
@@ -593,26 +623,6 @@ static const struct drm_crtc_helper_funcs vc4_crtc_helper_funcs = {
.atomic_flush = vc4_crtc_atomic_flush,
};
-/* Frees the page flip event when the DRM device is closed with the
- * event still outstanding.
- */
-void vc4_cancel_page_flip(struct drm_crtc *crtc, struct drm_file *file)
-{
- struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
- struct drm_device *dev = crtc->dev;
- unsigned long flags;
-
- spin_lock_irqsave(&dev->event_lock, flags);
-
- if (vc4_crtc->event && vc4_crtc->event->base.file_priv == file) {
- vc4_crtc->event->base.destroy(&vc4_crtc->event->base);
- drm_crtc_vblank_put(crtc);
- vc4_crtc->event = NULL;
- }
-
- spin_unlock_irqrestore(&dev->event_lock, flags);
-}
-
static const struct vc4_crtc_data pv0_data = {
.hvs_channel = 0,
.encoder0_type = VC4_ENCODER_TYPE_DSI0,
@@ -664,9 +674,9 @@ static int vc4_crtc_bind(struct device *dev, struct device *master, void *data)
struct vc4_dev *vc4 = to_vc4_dev(drm);
struct vc4_crtc *vc4_crtc;
struct drm_crtc *crtc;
- struct drm_plane *primary_plane, *cursor_plane;
+ struct drm_plane *primary_plane, *cursor_plane, *destroy_plane, *temp;
const struct of_device_id *match;
- int ret;
+ int ret, i;
vc4_crtc = devm_kzalloc(dev, sizeof(*vc4_crtc), GFP_KERNEL);
if (!vc4_crtc)
@@ -695,27 +705,49 @@ static int vc4_crtc_bind(struct device *dev, struct device *master, void *data)
goto err;
}
- cursor_plane = vc4_plane_init(drm, DRM_PLANE_TYPE_CURSOR);
- if (IS_ERR(cursor_plane)) {
- dev_err(dev, "failed to construct cursor plane\n");
- ret = PTR_ERR(cursor_plane);
- goto err_primary;
- }
-
- drm_crtc_init_with_planes(drm, crtc, primary_plane, cursor_plane,
+ drm_crtc_init_with_planes(drm, crtc, primary_plane, NULL,
&vc4_crtc_funcs, NULL);
drm_crtc_helper_add(crtc, &vc4_crtc_helper_funcs);
primary_plane->crtc = crtc;
- cursor_plane->crtc = crtc;
vc4->crtc[drm_crtc_index(crtc)] = vc4_crtc;
vc4_crtc->channel = vc4_crtc->data->hvs_channel;
+ /* Set up some arbitrary number of planes. We're not limited
+ * by a set number of physical registers, just the space in
+ * the HVS (16k) and how small an plane can be (28 bytes).
+ * However, each plane we set up takes up some memory, and
+ * increases the cost of looping over planes, which atomic
+ * modesetting does quite a bit. As a result, we pick a
+ * modest number of planes to expose, that should hopefully
+ * still cover any sane usecase.
+ */
+ for (i = 0; i < 8; i++) {
+ struct drm_plane *plane =
+ vc4_plane_init(drm, DRM_PLANE_TYPE_OVERLAY);
+
+ if (IS_ERR(plane))
+ continue;
+
+ plane->possible_crtcs = 1 << drm_crtc_index(crtc);
+ }
+
+ /* Set up the legacy cursor after overlay initialization,
+ * since we overlay planes on the CRTC in the order they were
+ * initialized.
+ */
+ cursor_plane = vc4_plane_init(drm, DRM_PLANE_TYPE_CURSOR);
+ if (!IS_ERR(cursor_plane)) {
+ cursor_plane->possible_crtcs = 1 << drm_crtc_index(crtc);
+ cursor_plane->crtc = crtc;
+ crtc->cursor = cursor_plane;
+ }
+
CRTC_WRITE(PV_INTEN, 0);
CRTC_WRITE(PV_INTSTAT, PV_INT_VFP_START);
ret = devm_request_irq(dev, platform_get_irq(pdev, 0),
vc4_crtc_irq_handler, 0, "vc4 crtc", vc4_crtc);
if (ret)
- goto err_cursor;
+ goto err_destroy_planes;
vc4_set_crtc_possible_masks(drm, crtc);
@@ -723,10 +755,12 @@ static int vc4_crtc_bind(struct device *dev, struct device *master, void *data)
return 0;
-err_cursor:
- cursor_plane->funcs->destroy(cursor_plane);
-err_primary:
- primary_plane->funcs->destroy(primary_plane);
+err_destroy_planes:
+ list_for_each_entry_safe(destroy_plane, temp,
+ &drm->mode_config.plane_list, head) {
+ if (destroy_plane->possible_crtcs == 1 << drm_crtc_index(crtc))
+ destroy_plane->funcs->destroy(destroy_plane);
+ }
err:
return ret;
}
diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c
index f1655fff8425..b7d2ff0e6e1f 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.c
+++ b/drivers/gpu/drm/vc4/vc4_drv.c
@@ -43,14 +43,6 @@ void __iomem *vc4_ioremap_regs(struct platform_device *dev, int index)
return map;
}
-static void vc4_drm_preclose(struct drm_device *dev, struct drm_file *file)
-{
- struct drm_crtc *crtc;
-
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
- vc4_cancel_page_flip(crtc, file);
-}
-
static void vc4_lastclose(struct drm_device *dev)
{
struct vc4_dev *vc4 = to_vc4_dev(dev);
@@ -91,8 +83,6 @@ static struct drm_driver vc4_drm_driver = {
DRIVER_HAVE_IRQ |
DRIVER_PRIME),
.lastclose = vc4_lastclose,
- .preclose = vc4_drm_preclose,
-
.irq_handler = vc4_irq,
.irq_preinstall = vc4_irq_preinstall,
.irq_postinstall = vc4_irq_postinstall,
diff --git a/drivers/gpu/drm/vc4/vc4_drv.h b/drivers/gpu/drm/vc4/vc4_drv.h
index 51a63330d4f8..fa2ad15d4f62 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.h
+++ b/drivers/gpu/drm/vc4/vc4_drv.h
@@ -52,7 +52,7 @@ struct vc4_dev {
/* Protects bo_cache and the BO stats. */
struct mutex bo_lock;
- /* Sequence number for the last job queued in job_list.
+ /* Sequence number for the last job queued in bin_job_list.
* Starts at 0 (no jobs emitted).
*/
uint64_t emit_seqno;
@@ -62,11 +62,19 @@ struct vc4_dev {
*/
uint64_t finished_seqno;
- /* List of all struct vc4_exec_info for jobs to be executed.
- * The first job in the list is the one currently programmed
- * into ct0ca/ct1ca for execution.
+ /* List of all struct vc4_exec_info for jobs to be executed in
+ * the binner. The first job in the list is the one currently
+ * programmed into ct0ca for execution.
*/
- struct list_head job_list;
+ struct list_head bin_job_list;
+
+ /* List of all struct vc4_exec_info for jobs that have
+ * completed binning and are ready for rendering. The first
+ * job in the list is the one currently programmed into ct1ca
+ * for execution.
+ */
+ struct list_head render_job_list;
+
/* List of the finished vc4_exec_infos waiting to be freed by
* job_done_work.
*/
@@ -154,7 +162,17 @@ struct vc4_v3d {
struct vc4_hvs {
struct platform_device *pdev;
void __iomem *regs;
- void __iomem *dlist;
+ u32 __iomem *dlist;
+
+ /* Memory manager for CRTCs to allocate space in the display
+ * list. Units are dwords.
+ */
+ struct drm_mm dlist_mm;
+ /* Memory manager for the LBM memory used by HVS scaling. */
+ struct drm_mm lbm_mm;
+ spinlock_t mm_lock;
+
+ struct drm_mm_node mitchell_netravali_filter;
};
struct vc4_plane {
@@ -286,11 +304,20 @@ struct vc4_exec_info {
};
static inline struct vc4_exec_info *
-vc4_first_job(struct vc4_dev *vc4)
+vc4_first_bin_job(struct vc4_dev *vc4)
+{
+ if (list_empty(&vc4->bin_job_list))
+ return NULL;
+ return list_first_entry(&vc4->bin_job_list, struct vc4_exec_info, head);
+}
+
+static inline struct vc4_exec_info *
+vc4_first_render_job(struct vc4_dev *vc4)
{
- if (list_empty(&vc4->job_list))
+ if (list_empty(&vc4->render_job_list))
return NULL;
- return list_first_entry(&vc4->job_list, struct vc4_exec_info, head);
+ return list_first_entry(&vc4->render_job_list,
+ struct vc4_exec_info, head);
}
/**
@@ -386,7 +413,6 @@ int vc4_bo_stats_debugfs(struct seq_file *m, void *arg);
extern struct platform_driver vc4_crtc_driver;
int vc4_enable_vblank(struct drm_device *dev, unsigned int crtc_id);
void vc4_disable_vblank(struct drm_device *dev, unsigned int crtc_id);
-void vc4_cancel_page_flip(struct drm_crtc *crtc, struct drm_file *file);
int vc4_crtc_debugfs_regs(struct seq_file *m, void *arg);
/* vc4_debugfs.c */
@@ -405,7 +431,9 @@ int vc4_wait_seqno_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int vc4_wait_bo_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
-void vc4_submit_next_job(struct drm_device *dev);
+void vc4_submit_next_bin_job(struct drm_device *dev);
+void vc4_submit_next_render_job(struct drm_device *dev);
+void vc4_move_job_to_render(struct drm_device *dev, struct vc4_exec_info *exec);
int vc4_wait_for_seqno(struct drm_device *dev, uint64_t seqno,
uint64_t timeout_ns, bool interruptible);
void vc4_job_handle_completed(struct vc4_dev *vc4);
diff --git a/drivers/gpu/drm/vc4/vc4_gem.c b/drivers/gpu/drm/vc4/vc4_gem.c
index 202aa1544acc..8d4384f8b78d 100644
--- a/drivers/gpu/drm/vc4/vc4_gem.c
+++ b/drivers/gpu/drm/vc4/vc4_gem.c
@@ -141,10 +141,10 @@ vc4_save_hang_state(struct drm_device *dev)
struct vc4_dev *vc4 = to_vc4_dev(dev);
struct drm_vc4_get_hang_state *state;
struct vc4_hang_state *kernel_state;
- struct vc4_exec_info *exec;
+ struct vc4_exec_info *exec[2];
struct vc4_bo *bo;
unsigned long irqflags;
- unsigned int i, unref_list_count;
+ unsigned int i, j, unref_list_count, prev_idx;
kernel_state = kcalloc(1, sizeof(*kernel_state), GFP_KERNEL);
if (!kernel_state)
@@ -153,37 +153,55 @@ vc4_save_hang_state(struct drm_device *dev)
state = &kernel_state->user_state;
spin_lock_irqsave(&vc4->job_lock, irqflags);
- exec = vc4_first_job(vc4);
- if (!exec) {
+ exec[0] = vc4_first_bin_job(vc4);
+ exec[1] = vc4_first_render_job(vc4);
+ if (!exec[0] && !exec[1]) {
spin_unlock_irqrestore(&vc4->job_lock, irqflags);
return;
}
- unref_list_count = 0;
- list_for_each_entry(bo, &exec->unref_list, unref_head)
- unref_list_count++;
+ /* Get the bos from both binner and renderer into hang state. */
+ state->bo_count = 0;
+ for (i = 0; i < 2; i++) {
+ if (!exec[i])
+ continue;
+
+ unref_list_count = 0;
+ list_for_each_entry(bo, &exec[i]->unref_list, unref_head)
+ unref_list_count++;
+ state->bo_count += exec[i]->bo_count + unref_list_count;
+ }
+
+ kernel_state->bo = kcalloc(state->bo_count,
+ sizeof(*kernel_state->bo), GFP_ATOMIC);
- state->bo_count = exec->bo_count + unref_list_count;
- kernel_state->bo = kcalloc(state->bo_count, sizeof(*kernel_state->bo),
- GFP_ATOMIC);
if (!kernel_state->bo) {
spin_unlock_irqrestore(&vc4->job_lock, irqflags);
return;
}
- for (i = 0; i < exec->bo_count; i++) {
- drm_gem_object_reference(&exec->bo[i]->base);
- kernel_state->bo[i] = &exec->bo[i]->base;
- }
+ prev_idx = 0;
+ for (i = 0; i < 2; i++) {
+ if (!exec[i])
+ continue;
- list_for_each_entry(bo, &exec->unref_list, unref_head) {
- drm_gem_object_reference(&bo->base.base);
- kernel_state->bo[i] = &bo->base.base;
- i++;
+ for (j = 0; j < exec[i]->bo_count; j++) {
+ drm_gem_object_reference(&exec[i]->bo[j]->base);
+ kernel_state->bo[j + prev_idx] = &exec[i]->bo[j]->base;
+ }
+
+ list_for_each_entry(bo, &exec[i]->unref_list, unref_head) {
+ drm_gem_object_reference(&bo->base.base);
+ kernel_state->bo[j + prev_idx] = &bo->base.base;
+ j++;
+ }
+ prev_idx = j + 1;
}
- state->start_bin = exec->ct0ca;
- state->start_render = exec->ct1ca;
+ if (exec[0])
+ state->start_bin = exec[0]->ct0ca;
+ if (exec[1])
+ state->start_render = exec[1]->ct1ca;
spin_unlock_irqrestore(&vc4->job_lock, irqflags);
@@ -267,13 +285,15 @@ vc4_hangcheck_elapsed(unsigned long data)
struct vc4_dev *vc4 = to_vc4_dev(dev);
uint32_t ct0ca, ct1ca;
unsigned long irqflags;
- struct vc4_exec_info *exec;
+ struct vc4_exec_info *bin_exec, *render_exec;
spin_lock_irqsave(&vc4->job_lock, irqflags);
- exec = vc4_first_job(vc4);
+
+ bin_exec = vc4_first_bin_job(vc4);
+ render_exec = vc4_first_render_job(vc4);
/* If idle, we can stop watching for hangs. */
- if (!exec) {
+ if (!bin_exec && !render_exec) {
spin_unlock_irqrestore(&vc4->job_lock, irqflags);
return;
}
@@ -284,9 +304,12 @@ vc4_hangcheck_elapsed(unsigned long data)
/* If we've made any progress in execution, rearm the timer
* and wait.
*/
- if (ct0ca != exec->last_ct0ca || ct1ca != exec->last_ct1ca) {
- exec->last_ct0ca = ct0ca;
- exec->last_ct1ca = ct1ca;
+ if ((bin_exec && ct0ca != bin_exec->last_ct0ca) ||
+ (render_exec && ct1ca != render_exec->last_ct1ca)) {
+ if (bin_exec)
+ bin_exec->last_ct0ca = ct0ca;
+ if (render_exec)
+ render_exec->last_ct1ca = ct1ca;
spin_unlock_irqrestore(&vc4->job_lock, irqflags);
vc4_queue_hangcheck(dev);
return;
@@ -386,11 +409,13 @@ vc4_flush_caches(struct drm_device *dev)
* The job_lock should be held during this.
*/
void
-vc4_submit_next_job(struct drm_device *dev)
+vc4_submit_next_bin_job(struct drm_device *dev)
{
struct vc4_dev *vc4 = to_vc4_dev(dev);
- struct vc4_exec_info *exec = vc4_first_job(vc4);
+ struct vc4_exec_info *exec;
+again:
+ exec = vc4_first_bin_job(vc4);
if (!exec)
return;
@@ -400,11 +425,40 @@ vc4_submit_next_job(struct drm_device *dev)
V3D_WRITE(V3D_BPOA, 0);
V3D_WRITE(V3D_BPOS, 0);
- if (exec->ct0ca != exec->ct0ea)
+ /* Either put the job in the binner if it uses the binner, or
+ * immediately move it to the to-be-rendered queue.
+ */
+ if (exec->ct0ca != exec->ct0ea) {
submit_cl(dev, 0, exec->ct0ca, exec->ct0ea);
+ } else {
+ vc4_move_job_to_render(dev, exec);
+ goto again;
+ }
+}
+
+void
+vc4_submit_next_render_job(struct drm_device *dev)
+{
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
+ struct vc4_exec_info *exec = vc4_first_render_job(vc4);
+
+ if (!exec)
+ return;
+
submit_cl(dev, 1, exec->ct1ca, exec->ct1ea);
}
+void
+vc4_move_job_to_render(struct drm_device *dev, struct vc4_exec_info *exec)
+{
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
+ bool was_empty = list_empty(&vc4->render_job_list);
+
+ list_move_tail(&exec->head, &vc4->render_job_list);
+ if (was_empty)
+ vc4_submit_next_render_job(dev);
+}
+
static void
vc4_update_bo_seqnos(struct vc4_exec_info *exec, uint64_t seqno)
{
@@ -443,14 +497,14 @@ vc4_queue_submit(struct drm_device *dev, struct vc4_exec_info *exec)
exec->seqno = seqno;
vc4_update_bo_seqnos(exec, seqno);
- list_add_tail(&exec->head, &vc4->job_list);
+ list_add_tail(&exec->head, &vc4->bin_job_list);
/* If no job was executing, kick ours off. Otherwise, it'll
- * get started when the previous job's frame done interrupt
+ * get started when the previous job's flush done interrupt
* occurs.
*/
- if (vc4_first_job(vc4) == exec) {
- vc4_submit_next_job(dev);
+ if (vc4_first_bin_job(vc4) == exec) {
+ vc4_submit_next_bin_job(dev);
vc4_queue_hangcheck(dev);
}
@@ -859,7 +913,8 @@ vc4_gem_init(struct drm_device *dev)
{
struct vc4_dev *vc4 = to_vc4_dev(dev);
- INIT_LIST_HEAD(&vc4->job_list);
+ INIT_LIST_HEAD(&vc4->bin_job_list);
+ INIT_LIST_HEAD(&vc4->render_job_list);
INIT_LIST_HEAD(&vc4->job_done_list);
INIT_LIST_HEAD(&vc4->seqno_cb_list);
spin_lock_init(&vc4->job_lock);
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c
index c69c0460196b..d8b864925fd3 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi.c
+++ b/drivers/gpu/drm/vc4/vc4_hdmi.c
@@ -47,6 +47,7 @@ struct vc4_hdmi {
void __iomem *hdmicore_regs;
void __iomem *hd_regs;
int hpd_gpio;
+ bool hpd_active_low;
struct clk *pixel_clock;
struct clk *hsm_clock;
@@ -95,6 +96,7 @@ static const struct {
HDMI_REG(VC4_HDMI_SW_RESET_CONTROL),
HDMI_REG(VC4_HDMI_HOTPLUG_INT),
HDMI_REG(VC4_HDMI_HOTPLUG),
+ HDMI_REG(VC4_HDMI_RAM_PACKET_CONFIG),
HDMI_REG(VC4_HDMI_HORZA),
HDMI_REG(VC4_HDMI_HORZB),
HDMI_REG(VC4_HDMI_FIFO_CTL),
@@ -165,7 +167,8 @@ vc4_hdmi_connector_detect(struct drm_connector *connector, bool force)
struct vc4_dev *vc4 = to_vc4_dev(dev);
if (vc4->hdmi->hpd_gpio) {
- if (gpio_get_value(vc4->hdmi->hpd_gpio))
+ if (gpio_get_value_cansleep(vc4->hdmi->hpd_gpio) ^
+ vc4->hdmi->hpd_active_low)
return connector_status_connected;
else
return connector_status_disconnected;
@@ -495,6 +498,16 @@ static int vc4_hdmi_bind(struct device *dev, struct device *master, void *data)
goto err_put_i2c;
}
+ /* This is the rate that is set by the firmware. The number
+ * needs to be a bit higher than the pixel clock rate
+ * (generally 148.5Mhz).
+ */
+ ret = clk_set_rate(hdmi->hsm_clock, 163682864);
+ if (ret) {
+ DRM_ERROR("Failed to set HSM clock rate: %d\n", ret);
+ goto err_unprepare_pix;
+ }
+
ret = clk_prepare_enable(hdmi->hsm_clock);
if (ret) {
DRM_ERROR("Failed to turn on HDMI state machine clock: %d\n",
@@ -506,17 +519,40 @@ static int vc4_hdmi_bind(struct device *dev, struct device *master, void *data)
* we'll use the HDMI core's register.
*/
if (of_find_property(dev->of_node, "hpd-gpios", &value)) {
- hdmi->hpd_gpio = of_get_named_gpio(dev->of_node, "hpd-gpios", 0);
+ enum of_gpio_flags hpd_gpio_flags;
+
+ hdmi->hpd_gpio = of_get_named_gpio_flags(dev->of_node,
+ "hpd-gpios", 0,
+ &hpd_gpio_flags);
if (hdmi->hpd_gpio < 0) {
ret = hdmi->hpd_gpio;
goto err_unprepare_hsm;
}
+
+ hdmi->hpd_active_low = hpd_gpio_flags & OF_GPIO_ACTIVE_LOW;
}
vc4->hdmi = hdmi;
/* HDMI core must be enabled. */
- WARN_ON_ONCE((HD_READ(VC4_HD_M_CTL) & VC4_HD_M_ENABLE) == 0);
+ if (!(HD_READ(VC4_HD_M_CTL) & VC4_HD_M_ENABLE)) {
+ HD_WRITE(VC4_HD_M_CTL, VC4_HD_M_SW_RST);
+ udelay(1);
+ HD_WRITE(VC4_HD_M_CTL, 0);
+
+ HD_WRITE(VC4_HD_M_CTL, VC4_HD_M_ENABLE);
+
+ HDMI_WRITE(VC4_HDMI_SW_RESET_CONTROL,
+ VC4_HDMI_SW_RESET_HDMI |
+ VC4_HDMI_SW_RESET_FORMAT_DETECT);
+
+ HDMI_WRITE(VC4_HDMI_SW_RESET_CONTROL, 0);
+
+ /* PHY should be in reset, like
+ * vc4_hdmi_encoder_disable() does.
+ */
+ HDMI_WRITE(VC4_HDMI_TX_PHY_RESET_CTL, 0xf << 16);
+ }
drm_encoder_init(drm, hdmi->encoder, &vc4_hdmi_encoder_funcs,
DRM_MODE_ENCODER_TMDS, NULL);
diff --git a/drivers/gpu/drm/vc4/vc4_hvs.c b/drivers/gpu/drm/vc4/vc4_hvs.c
index 8098c5b21ba4..6fbab1c82cb1 100644
--- a/drivers/gpu/drm/vc4/vc4_hvs.c
+++ b/drivers/gpu/drm/vc4/vc4_hvs.c
@@ -100,12 +100,76 @@ int vc4_hvs_debugfs_regs(struct seq_file *m, void *unused)
}
#endif
+/* The filter kernel is composed of dwords each containing 3 9-bit
+ * signed integers packed next to each other.
+ */
+#define VC4_INT_TO_COEFF(coeff) (coeff & 0x1ff)
+#define VC4_PPF_FILTER_WORD(c0, c1, c2) \
+ ((((c0) & 0x1ff) << 0) | \
+ (((c1) & 0x1ff) << 9) | \
+ (((c2) & 0x1ff) << 18))
+
+/* The whole filter kernel is arranged as the coefficients 0-16 going
+ * up, then a pad, then 17-31 going down and reversed within the
+ * dwords. This means that a linear phase kernel (where it's
+ * symmetrical at the boundary between 15 and 16) has the last 5
+ * dwords matching the first 5, but reversed.
+ */
+#define VC4_LINEAR_PHASE_KERNEL(c0, c1, c2, c3, c4, c5, c6, c7, c8, \
+ c9, c10, c11, c12, c13, c14, c15) \
+ {VC4_PPF_FILTER_WORD(c0, c1, c2), \
+ VC4_PPF_FILTER_WORD(c3, c4, c5), \
+ VC4_PPF_FILTER_WORD(c6, c7, c8), \
+ VC4_PPF_FILTER_WORD(c9, c10, c11), \
+ VC4_PPF_FILTER_WORD(c12, c13, c14), \
+ VC4_PPF_FILTER_WORD(c15, c15, 0)}
+
+#define VC4_LINEAR_PHASE_KERNEL_DWORDS 6
+#define VC4_KERNEL_DWORDS (VC4_LINEAR_PHASE_KERNEL_DWORDS * 2 - 1)
+
+/* Recommended B=1/3, C=1/3 filter choice from Mitchell/Netravali.
+ * http://www.cs.utexas.edu/~fussell/courses/cs384g/lectures/mitchell/Mitchell.pdf
+ */
+static const u32 mitchell_netravali_1_3_1_3_kernel[] =
+ VC4_LINEAR_PHASE_KERNEL(0, -2, -6, -8, -10, -8, -3, 2, 18,
+ 50, 82, 119, 155, 187, 213, 227);
+
+static int vc4_hvs_upload_linear_kernel(struct vc4_hvs *hvs,
+ struct drm_mm_node *space,
+ const u32 *kernel)
+{
+ int ret, i;
+ u32 __iomem *dst_kernel;
+
+ ret = drm_mm_insert_node(&hvs->dlist_mm, space, VC4_KERNEL_DWORDS, 1,
+ 0);
+ if (ret) {
+ DRM_ERROR("Failed to allocate space for filter kernel: %d\n",
+ ret);
+ return ret;
+ }
+
+ dst_kernel = hvs->dlist + space->start;
+
+ for (i = 0; i < VC4_KERNEL_DWORDS; i++) {
+ if (i < VC4_LINEAR_PHASE_KERNEL_DWORDS)
+ writel(kernel[i], &dst_kernel[i]);
+ else {
+ writel(kernel[VC4_KERNEL_DWORDS - i - 1],
+ &dst_kernel[i]);
+ }
+ }
+
+ return 0;
+}
+
static int vc4_hvs_bind(struct device *dev, struct device *master, void *data)
{
struct platform_device *pdev = to_platform_device(dev);
struct drm_device *drm = dev_get_drvdata(master);
struct vc4_dev *vc4 = drm->dev_private;
struct vc4_hvs *hvs = NULL;
+ int ret;
hvs = devm_kzalloc(&pdev->dev, sizeof(*hvs), GFP_KERNEL);
if (!hvs)
@@ -119,6 +183,33 @@ static int vc4_hvs_bind(struct device *dev, struct device *master, void *data)
hvs->dlist = hvs->regs + SCALER_DLIST_START;
+ spin_lock_init(&hvs->mm_lock);
+
+ /* Set up the HVS display list memory manager. We never
+ * overwrite the setup from the bootloader (just 128b out of
+ * our 16K), since we don't want to scramble the screen when
+ * transitioning from the firmware's boot setup to runtime.
+ */
+ drm_mm_init(&hvs->dlist_mm,
+ HVS_BOOTLOADER_DLIST_END,
+ (SCALER_DLIST_SIZE >> 2) - HVS_BOOTLOADER_DLIST_END);
+
+ /* Set up the HVS LBM memory manager. We could have some more
+ * complicated data structure that allowed reuse of LBM areas
+ * between planes when they don't overlap on the screen, but
+ * for now we just allocate globally.
+ */
+ drm_mm_init(&hvs->lbm_mm, 0, 96 * 1024);
+
+ /* Upload filter kernels. We only have the one for now, so we
+ * keep it around for the lifetime of the driver.
+ */
+ ret = vc4_hvs_upload_linear_kernel(hvs,
+ &hvs->mitchell_netravali_filter,
+ mitchell_netravali_1_3_1_3_kernel);
+ if (ret)
+ return ret;
+
vc4->hvs = hvs;
return 0;
}
@@ -129,6 +220,12 @@ static void vc4_hvs_unbind(struct device *dev, struct device *master,
struct drm_device *drm = dev_get_drvdata(master);
struct vc4_dev *vc4 = drm->dev_private;
+ if (vc4->hvs->mitchell_netravali_filter.allocated)
+ drm_mm_remove_node(&vc4->hvs->mitchell_netravali_filter);
+
+ drm_mm_takedown(&vc4->hvs->dlist_mm);
+ drm_mm_takedown(&vc4->hvs->lbm_mm);
+
vc4->hvs = NULL;
}
diff --git a/drivers/gpu/drm/vc4/vc4_irq.c b/drivers/gpu/drm/vc4/vc4_irq.c
index 78a21357fb2d..b0104a346a74 100644
--- a/drivers/gpu/drm/vc4/vc4_irq.c
+++ b/drivers/gpu/drm/vc4/vc4_irq.c
@@ -30,6 +30,10 @@
* disables that specific interrupt, and 0s written are ignored
* (reading either one returns the set of enabled interrupts).
*
+ * When we take a binning flush done interrupt, we need to submit the
+ * next frame for binning and move the finished frame to the render
+ * thread.
+ *
* When we take a render frame interrupt, we need to wake the
* processes waiting for some frame to be done, and get the next frame
* submitted ASAP (so the hardware doesn't sit idle when there's work
@@ -44,6 +48,7 @@
#include "vc4_regs.h"
#define V3D_DRIVER_IRQS (V3D_INT_OUTOMEM | \
+ V3D_INT_FLDONE | \
V3D_INT_FRDONE)
DECLARE_WAIT_QUEUE_HEAD(render_wait);
@@ -77,7 +82,7 @@ vc4_overflow_mem_work(struct work_struct *work)
unsigned long irqflags;
spin_lock_irqsave(&vc4->job_lock, irqflags);
- current_exec = vc4_first_job(vc4);
+ current_exec = vc4_first_bin_job(vc4);
if (current_exec) {
vc4->overflow_mem->seqno = vc4->finished_seqno + 1;
list_add_tail(&vc4->overflow_mem->unref_head,
@@ -98,17 +103,43 @@ vc4_overflow_mem_work(struct work_struct *work)
}
static void
-vc4_irq_finish_job(struct drm_device *dev)
+vc4_irq_finish_bin_job(struct drm_device *dev)
+{
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
+ struct vc4_exec_info *exec = vc4_first_bin_job(vc4);
+
+ if (!exec)
+ return;
+
+ vc4_move_job_to_render(dev, exec);
+ vc4_submit_next_bin_job(dev);
+}
+
+static void
+vc4_cancel_bin_job(struct drm_device *dev)
+{
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
+ struct vc4_exec_info *exec = vc4_first_bin_job(vc4);
+
+ if (!exec)
+ return;
+
+ list_move_tail(&exec->head, &vc4->bin_job_list);
+ vc4_submit_next_bin_job(dev);
+}
+
+static void
+vc4_irq_finish_render_job(struct drm_device *dev)
{
struct vc4_dev *vc4 = to_vc4_dev(dev);
- struct vc4_exec_info *exec = vc4_first_job(vc4);
+ struct vc4_exec_info *exec = vc4_first_render_job(vc4);
if (!exec)
return;
vc4->finished_seqno++;
list_move_tail(&exec->head, &vc4->job_done_list);
- vc4_submit_next_job(dev);
+ vc4_submit_next_render_job(dev);
wake_up_all(&vc4->job_wait_queue);
schedule_work(&vc4->job_done_work);
@@ -125,9 +156,10 @@ vc4_irq(int irq, void *arg)
barrier();
intctl = V3D_READ(V3D_INTCTL);
- /* Acknowledge the interrupts we're handling here. The render
- * frame done interrupt will be cleared, while OUTOMEM will
- * stay high until the underlying cause is cleared.
+ /* Acknowledge the interrupts we're handling here. The binner
+ * last flush / render frame done interrupt will be cleared,
+ * while OUTOMEM will stay high until the underlying cause is
+ * cleared.
*/
V3D_WRITE(V3D_INTCTL, intctl);
@@ -138,9 +170,16 @@ vc4_irq(int irq, void *arg)
status = IRQ_HANDLED;
}
+ if (intctl & V3D_INT_FLDONE) {
+ spin_lock(&vc4->job_lock);
+ vc4_irq_finish_bin_job(dev);
+ spin_unlock(&vc4->job_lock);
+ status = IRQ_HANDLED;
+ }
+
if (intctl & V3D_INT_FRDONE) {
spin_lock(&vc4->job_lock);
- vc4_irq_finish_job(dev);
+ vc4_irq_finish_render_job(dev);
spin_unlock(&vc4->job_lock);
status = IRQ_HANDLED;
}
@@ -205,6 +244,7 @@ void vc4_irq_reset(struct drm_device *dev)
V3D_WRITE(V3D_INTENA, V3D_DRIVER_IRQS);
spin_lock_irqsave(&vc4->job_lock, irqflags);
- vc4_irq_finish_job(dev);
+ vc4_cancel_bin_job(dev);
+ vc4_irq_finish_render_job(dev);
spin_unlock_irqrestore(&vc4->job_lock, irqflags);
}
diff --git a/drivers/gpu/drm/vc4/vc4_kms.c b/drivers/gpu/drm/vc4/vc4_kms.c
index f95f2df5f8d1..4718ae5176cc 100644
--- a/drivers/gpu/drm/vc4/vc4_kms.c
+++ b/drivers/gpu/drm/vc4/vc4_kms.c
@@ -49,6 +49,15 @@ vc4_atomic_complete_commit(struct vc4_commit *c)
drm_atomic_helper_commit_modeset_enables(dev, state);
+ /* Make sure that drm_atomic_helper_wait_for_vblanks()
+ * actually waits for vblank. If we're doing a full atomic
+ * modeset (as opposed to a vc4_update_plane() short circuit),
+ * then we need to wait for scanout to be done with our
+ * display lists before we free it and potentially reallocate
+ * and overwrite the dlist memory with a new modeset.
+ */
+ state->legacy_cursor_update = false;
+
drm_atomic_helper_wait_for_vblanks(dev, state);
drm_atomic_helper_cleanup_planes(dev, state);
diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c
index 0addbad15832..7b0c72ae02a0 100644
--- a/drivers/gpu/drm/vc4/vc4_plane.c
+++ b/drivers/gpu/drm/vc4/vc4_plane.c
@@ -24,19 +24,52 @@
#include "drm_fb_cma_helper.h"
#include "drm_plane_helper.h"
+enum vc4_scaling_mode {
+ VC4_SCALING_NONE,
+ VC4_SCALING_TPZ,
+ VC4_SCALING_PPF,
+};
+
struct vc4_plane_state {
struct drm_plane_state base;
+ /* System memory copy of the display list for this element, computed
+ * at atomic_check time.
+ */
u32 *dlist;
- u32 dlist_size; /* Number of dwords in allocated for the display list */
+ u32 dlist_size; /* Number of dwords allocated for the display list */
u32 dlist_count; /* Number of used dwords in the display list. */
- /* Offset in the dlist to pointer word 0. */
- u32 pw0_offset;
+ /* Offset in the dlist to various words, for pageflip or
+ * cursor updates.
+ */
+ u32 pos0_offset;
+ u32 pos2_offset;
+ u32 ptr0_offset;
/* Offset where the plane's dlist was last stored in the
- hardware at vc4_crtc_atomic_flush() time.
- */
- u32 *hw_dlist;
+ * hardware at vc4_crtc_atomic_flush() time.
+ */
+ u32 __iomem *hw_dlist;
+
+ /* Clipped coordinates of the plane on the display. */
+ int crtc_x, crtc_y, crtc_w, crtc_h;
+ /* Clipped area being scanned from in the FB. */
+ u32 src_x, src_y;
+
+ u32 src_w[2], src_h[2];
+
+ /* Scaling selection for the RGB/Y plane and the Cb/Cr planes. */
+ enum vc4_scaling_mode x_scaling[2], y_scaling[2];
+ bool is_unity;
+ bool is_yuv;
+
+ /* Offset to start scanning out from the start of the plane's
+ * BO.
+ */
+ u32 offsets[3];
+
+ /* Our allocation in LBM for temporary storage during scaling. */
+ struct drm_mm_node lbm;
};
static inline struct vc4_plane_state *
@@ -50,6 +83,7 @@ static const struct hvs_format {
u32 hvs; /* HVS_FORMAT_* */
u32 pixel_order;
bool has_alpha;
+ bool flip_cbcr;
} hvs_formats[] = {
{
.drm = DRM_FORMAT_XRGB8888, .hvs = HVS_PIXEL_FORMAT_RGBA8888,
@@ -59,6 +93,48 @@ static const struct hvs_format {
.drm = DRM_FORMAT_ARGB8888, .hvs = HVS_PIXEL_FORMAT_RGBA8888,
.pixel_order = HVS_PIXEL_ORDER_ABGR, .has_alpha = true,
},
+ {
+ .drm = DRM_FORMAT_RGB565, .hvs = HVS_PIXEL_FORMAT_RGB565,
+ .pixel_order = HVS_PIXEL_ORDER_XRGB, .has_alpha = false,
+ },
+ {
+ .drm = DRM_FORMAT_BGR565, .hvs = HVS_PIXEL_FORMAT_RGB565,
+ .pixel_order = HVS_PIXEL_ORDER_XBGR, .has_alpha = false,
+ },
+ {
+ .drm = DRM_FORMAT_ARGB1555, .hvs = HVS_PIXEL_FORMAT_RGBA5551,
+ .pixel_order = HVS_PIXEL_ORDER_ABGR, .has_alpha = true,
+ },
+ {
+ .drm = DRM_FORMAT_XRGB1555, .hvs = HVS_PIXEL_FORMAT_RGBA5551,
+ .pixel_order = HVS_PIXEL_ORDER_ABGR, .has_alpha = false,
+ },
+ {
+ .drm = DRM_FORMAT_YUV422,
+ .hvs = HVS_PIXEL_FORMAT_YCBCR_YUV422_3PLANE,
+ },
+ {
+ .drm = DRM_FORMAT_YVU422,
+ .hvs = HVS_PIXEL_FORMAT_YCBCR_YUV422_3PLANE,
+ .flip_cbcr = true,
+ },
+ {
+ .drm = DRM_FORMAT_YUV420,
+ .hvs = HVS_PIXEL_FORMAT_YCBCR_YUV420_3PLANE,
+ },
+ {
+ .drm = DRM_FORMAT_YVU420,
+ .hvs = HVS_PIXEL_FORMAT_YCBCR_YUV420_3PLANE,
+ .flip_cbcr = true,
+ },
+ {
+ .drm = DRM_FORMAT_NV12,
+ .hvs = HVS_PIXEL_FORMAT_YCBCR_YUV420_2PLANE,
+ },
+ {
+ .drm = DRM_FORMAT_NV16,
+ .hvs = HVS_PIXEL_FORMAT_YCBCR_YUV422_2PLANE,
+ },
};
static const struct hvs_format *vc4_get_hvs_format(u32 drm_format)
@@ -73,6 +149,16 @@ static const struct hvs_format *vc4_get_hvs_format(u32 drm_format)
return NULL;
}
+static enum vc4_scaling_mode vc4_get_scaling_mode(u32 src, u32 dst)
+{
+ if (dst > src)
+ return VC4_SCALING_PPF;
+ else if (dst < src)
+ return VC4_SCALING_TPZ;
+ else
+ return VC4_SCALING_NONE;
+}
+
static bool plane_enabled(struct drm_plane_state *state)
{
return state->fb && state->crtc;
@@ -89,6 +175,8 @@ static struct drm_plane_state *vc4_plane_duplicate_state(struct drm_plane *plane
if (!vc4_state)
return NULL;
+ memset(&vc4_state->lbm, 0, sizeof(vc4_state->lbm));
+
__drm_atomic_helper_plane_duplicate_state(plane, &vc4_state->base);
if (vc4_state->dlist) {
@@ -108,8 +196,17 @@ static struct drm_plane_state *vc4_plane_duplicate_state(struct drm_plane *plane
static void vc4_plane_destroy_state(struct drm_plane *plane,
struct drm_plane_state *state)
{
+ struct vc4_dev *vc4 = to_vc4_dev(plane->dev);
struct vc4_plane_state *vc4_state = to_vc4_plane_state(state);
+ if (vc4_state->lbm.allocated) {
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&vc4->hvs->mm_lock, irqflags);
+ drm_mm_remove_node(&vc4_state->lbm);
+ spin_unlock_irqrestore(&vc4->hvs->mm_lock, irqflags);
+ }
+
kfree(vc4_state->dlist);
__drm_atomic_helper_plane_destroy_state(plane, &vc4_state->base);
kfree(state);
@@ -148,84 +245,400 @@ static void vc4_dlist_write(struct vc4_plane_state *vc4_state, u32 val)
vc4_state->dlist[vc4_state->dlist_count++] = val;
}
+/* Returns the scl0/scl1 field based on whether the dimensions need to
+ * be up/down/non-scaled.
+ *
+ * This is a replication of a table from the spec.
+ */
+static u32 vc4_get_scl_field(struct drm_plane_state *state, int plane)
+{
+ struct vc4_plane_state *vc4_state = to_vc4_plane_state(state);
+
+ switch (vc4_state->x_scaling[plane] << 2 | vc4_state->y_scaling[plane]) {
+ case VC4_SCALING_PPF << 2 | VC4_SCALING_PPF:
+ return SCALER_CTL0_SCL_H_PPF_V_PPF;
+ case VC4_SCALING_TPZ << 2 | VC4_SCALING_PPF:
+ return SCALER_CTL0_SCL_H_TPZ_V_PPF;
+ case VC4_SCALING_PPF << 2 | VC4_SCALING_TPZ:
+ return SCALER_CTL0_SCL_H_PPF_V_TPZ;
+ case VC4_SCALING_TPZ << 2 | VC4_SCALING_TPZ:
+ return SCALER_CTL0_SCL_H_TPZ_V_TPZ;
+ case VC4_SCALING_PPF << 2 | VC4_SCALING_NONE:
+ return SCALER_CTL0_SCL_H_PPF_V_NONE;
+ case VC4_SCALING_NONE << 2 | VC4_SCALING_PPF:
+ return SCALER_CTL0_SCL_H_NONE_V_PPF;
+ case VC4_SCALING_NONE << 2 | VC4_SCALING_TPZ:
+ return SCALER_CTL0_SCL_H_NONE_V_TPZ;
+ case VC4_SCALING_TPZ << 2 | VC4_SCALING_NONE:
+ return SCALER_CTL0_SCL_H_TPZ_V_NONE;
+ default:
+ case VC4_SCALING_NONE << 2 | VC4_SCALING_NONE:
+ /* The unity case is independently handled by
+ * SCALER_CTL0_UNITY.
+ */
+ return 0;
+ }
+}
+
+static int vc4_plane_setup_clipping_and_scaling(struct drm_plane_state *state)
+{
+ struct drm_plane *plane = state->plane;
+ struct vc4_plane_state *vc4_state = to_vc4_plane_state(state);
+ struct drm_framebuffer *fb = state->fb;
+ struct drm_gem_cma_object *bo = drm_fb_cma_get_gem_obj(fb, 0);
+ u32 subpixel_src_mask = (1 << 16) - 1;
+ u32 format = fb->pixel_format;
+ int num_planes = drm_format_num_planes(format);
+ u32 h_subsample = 1;
+ u32 v_subsample = 1;
+ int i;
+
+ for (i = 0; i < num_planes; i++)
+ vc4_state->offsets[i] = bo->paddr + fb->offsets[i];
+
+ /* We don't support subpixel source positioning for scaling. */
+ if ((state->src_x & subpixel_src_mask) ||
+ (state->src_y & subpixel_src_mask) ||
+ (state->src_w & subpixel_src_mask) ||
+ (state->src_h & subpixel_src_mask)) {
+ return -EINVAL;
+ }
+
+ vc4_state->src_x = state->src_x >> 16;
+ vc4_state->src_y = state->src_y >> 16;
+ vc4_state->src_w[0] = state->src_w >> 16;
+ vc4_state->src_h[0] = state->src_h >> 16;
+
+ vc4_state->crtc_x = state->crtc_x;
+ vc4_state->crtc_y = state->crtc_y;
+ vc4_state->crtc_w = state->crtc_w;
+ vc4_state->crtc_h = state->crtc_h;
+
+ vc4_state->x_scaling[0] = vc4_get_scaling_mode(vc4_state->src_w[0],
+ vc4_state->crtc_w);
+ vc4_state->y_scaling[0] = vc4_get_scaling_mode(vc4_state->src_h[0],
+ vc4_state->crtc_h);
+
+ if (num_planes > 1) {
+ vc4_state->is_yuv = true;
+
+ h_subsample = drm_format_horz_chroma_subsampling(format);
+ v_subsample = drm_format_vert_chroma_subsampling(format);
+ vc4_state->src_w[1] = vc4_state->src_w[0] / h_subsample;
+ vc4_state->src_h[1] = vc4_state->src_h[0] / v_subsample;
+
+ vc4_state->x_scaling[1] =
+ vc4_get_scaling_mode(vc4_state->src_w[1],
+ vc4_state->crtc_w);
+ vc4_state->y_scaling[1] =
+ vc4_get_scaling_mode(vc4_state->src_h[1],
+ vc4_state->crtc_h);
+
+ /* YUV conversion requires that scaling be enabled,
+ * even on a plane that's otherwise 1:1. Choose TPZ
+ * for simplicity.
+ */
+ if (vc4_state->x_scaling[0] == VC4_SCALING_NONE)
+ vc4_state->x_scaling[0] = VC4_SCALING_TPZ;
+ if (vc4_state->y_scaling[0] == VC4_SCALING_NONE)
+ vc4_state->y_scaling[0] = VC4_SCALING_TPZ;
+ }
+
+ vc4_state->is_unity = (vc4_state->x_scaling[0] == VC4_SCALING_NONE &&
+ vc4_state->y_scaling[0] == VC4_SCALING_NONE &&
+ vc4_state->x_scaling[1] == VC4_SCALING_NONE &&
+ vc4_state->y_scaling[1] == VC4_SCALING_NONE);
+
+ /* No configuring scaling on the cursor plane, since it gets
+ non-vblank-synced updates, and scaling requires requires
+ LBM changes which have to be vblank-synced.
+ */
+ if (plane->type == DRM_PLANE_TYPE_CURSOR && !vc4_state->is_unity)
+ return -EINVAL;
+
+ /* Clamp the on-screen start x/y to 0. The hardware doesn't
+ * support negative y, and negative x wastes bandwidth.
+ */
+ if (vc4_state->crtc_x < 0) {
+ for (i = 0; i < num_planes; i++) {
+ u32 cpp = drm_format_plane_cpp(fb->pixel_format, i);
+ u32 subs = ((i == 0) ? 1 : h_subsample);
+
+ vc4_state->offsets[i] += (cpp *
+ (-vc4_state->crtc_x) / subs);
+ }
+ vc4_state->src_w[0] += vc4_state->crtc_x;
+ vc4_state->src_w[1] += vc4_state->crtc_x / h_subsample;
+ vc4_state->crtc_x = 0;
+ }
+
+ if (vc4_state->crtc_y < 0) {
+ for (i = 0; i < num_planes; i++) {
+ u32 subs = ((i == 0) ? 1 : v_subsample);
+
+ vc4_state->offsets[i] += (fb->pitches[i] *
+ (-vc4_state->crtc_y) / subs);
+ }
+ vc4_state->src_h[0] += vc4_state->crtc_y;
+ vc4_state->src_h[1] += vc4_state->crtc_y / v_subsample;
+ vc4_state->crtc_y = 0;
+ }
+
+ return 0;
+}
+
+static void vc4_write_tpz(struct vc4_plane_state *vc4_state, u32 src, u32 dst)
+{
+ u32 scale, recip;
+
+ scale = (1 << 16) * src / dst;
+
+ /* The specs note that while the reciprocal would be defined
+ * as (1<<32)/scale, ~0 is close enough.
+ */
+ recip = ~0 / scale;
+
+ vc4_dlist_write(vc4_state,
+ VC4_SET_FIELD(scale, SCALER_TPZ0_SCALE) |
+ VC4_SET_FIELD(0, SCALER_TPZ0_IPHASE));
+ vc4_dlist_write(vc4_state,
+ VC4_SET_FIELD(recip, SCALER_TPZ1_RECIP));
+}
+
+static void vc4_write_ppf(struct vc4_plane_state *vc4_state, u32 src, u32 dst)
+{
+ u32 scale = (1 << 16) * src / dst;
+
+ vc4_dlist_write(vc4_state,
+ SCALER_PPF_AGC |
+ VC4_SET_FIELD(scale, SCALER_PPF_SCALE) |
+ VC4_SET_FIELD(0, SCALER_PPF_IPHASE));
+}
+
+static u32 vc4_lbm_size(struct drm_plane_state *state)
+{
+ struct vc4_plane_state *vc4_state = to_vc4_plane_state(state);
+ /* This is the worst case number. One of the two sizes will
+ * be used depending on the scaling configuration.
+ */
+ u32 pix_per_line = max(vc4_state->src_w[0], (u32)vc4_state->crtc_w);
+ u32 lbm;
+
+ if (!vc4_state->is_yuv) {
+ if (vc4_state->is_unity)
+ return 0;
+ else if (vc4_state->y_scaling[0] == VC4_SCALING_TPZ)
+ lbm = pix_per_line * 8;
+ else {
+ /* In special cases, this multiplier might be 12. */
+ lbm = pix_per_line * 16;
+ }
+ } else {
+ /* There are cases for this going down to a multiplier
+ * of 2, but according to the firmware source, the
+ * table in the docs is somewhat wrong.
+ */
+ lbm = pix_per_line * 16;
+ }
+
+ lbm = roundup(lbm, 32);
+
+ return lbm;
+}
+
+static void vc4_write_scaling_parameters(struct drm_plane_state *state,
+ int channel)
+{
+ struct vc4_plane_state *vc4_state = to_vc4_plane_state(state);
+
+ /* Ch0 H-PPF Word 0: Scaling Parameters */
+ if (vc4_state->x_scaling[channel] == VC4_SCALING_PPF) {
+ vc4_write_ppf(vc4_state,
+ vc4_state->src_w[channel], vc4_state->crtc_w);
+ }
+
+ /* Ch0 V-PPF Words 0-1: Scaling Parameters, Context */
+ if (vc4_state->y_scaling[channel] == VC4_SCALING_PPF) {
+ vc4_write_ppf(vc4_state,
+ vc4_state->src_h[channel], vc4_state->crtc_h);
+ vc4_dlist_write(vc4_state, 0xc0c0c0c0);
+ }
+
+ /* Ch0 H-TPZ Words 0-1: Scaling Parameters, Recip */
+ if (vc4_state->x_scaling[channel] == VC4_SCALING_TPZ) {
+ vc4_write_tpz(vc4_state,
+ vc4_state->src_w[channel], vc4_state->crtc_w);
+ }
+
+ /* Ch0 V-TPZ Words 0-2: Scaling Parameters, Recip, Context */
+ if (vc4_state->y_scaling[channel] == VC4_SCALING_TPZ) {
+ vc4_write_tpz(vc4_state,
+ vc4_state->src_h[channel], vc4_state->crtc_h);
+ vc4_dlist_write(vc4_state, 0xc0c0c0c0);
+ }
+}
+
/* Writes out a full display list for an active plane to the plane's
* private dlist state.
*/
static int vc4_plane_mode_set(struct drm_plane *plane,
struct drm_plane_state *state)
{
+ struct vc4_dev *vc4 = to_vc4_dev(plane->dev);
struct vc4_plane_state *vc4_state = to_vc4_plane_state(state);
struct drm_framebuffer *fb = state->fb;
- struct drm_gem_cma_object *bo = drm_fb_cma_get_gem_obj(fb, 0);
u32 ctl0_offset = vc4_state->dlist_count;
const struct hvs_format *format = vc4_get_hvs_format(fb->pixel_format);
- uint32_t offset = fb->offsets[0];
- int crtc_x = state->crtc_x;
- int crtc_y = state->crtc_y;
- int crtc_w = state->crtc_w;
- int crtc_h = state->crtc_h;
-
- if (state->crtc_w << 16 != state->src_w ||
- state->crtc_h << 16 != state->src_h) {
- /* We don't support scaling yet, which involves
- * allocating the LBM memory for scaling temporary
- * storage, and putting filter kernels in the HVS
- * context.
- */
- return -EINVAL;
+ int num_planes = drm_format_num_planes(format->drm);
+ u32 scl0, scl1;
+ u32 lbm_size;
+ unsigned long irqflags;
+ int ret, i;
+
+ ret = vc4_plane_setup_clipping_and_scaling(state);
+ if (ret)
+ return ret;
+
+ /* Allocate the LBM memory that the HVS will use for temporary
+ * storage due to our scaling/format conversion.
+ */
+ lbm_size = vc4_lbm_size(state);
+ if (lbm_size) {
+ if (!vc4_state->lbm.allocated) {
+ spin_lock_irqsave(&vc4->hvs->mm_lock, irqflags);
+ ret = drm_mm_insert_node(&vc4->hvs->lbm_mm,
+ &vc4_state->lbm,
+ lbm_size, 32, 0);
+ spin_unlock_irqrestore(&vc4->hvs->mm_lock, irqflags);
+ } else {
+ WARN_ON_ONCE(lbm_size != vc4_state->lbm.size);
+ }
}
- if (crtc_x < 0) {
- offset += drm_format_plane_cpp(fb->pixel_format, 0) * -crtc_x;
- crtc_w += crtc_x;
- crtc_x = 0;
- }
+ if (ret)
+ return ret;
- if (crtc_y < 0) {
- offset += fb->pitches[0] * -crtc_y;
- crtc_h += crtc_y;
- crtc_y = 0;
+ /* SCL1 is used for Cb/Cr scaling of planar formats. For RGB
+ * and 4:4:4, scl1 should be set to scl0 so both channels of
+ * the scaler do the same thing. For YUV, the Y plane needs
+ * to be put in channel 1 and Cb/Cr in channel 0, so we swap
+ * the scl fields here.
+ */
+ if (num_planes == 1) {
+ scl0 = vc4_get_scl_field(state, 1);
+ scl1 = scl0;
+ } else {
+ scl0 = vc4_get_scl_field(state, 1);
+ scl1 = vc4_get_scl_field(state, 0);
}
+ /* Control word */
vc4_dlist_write(vc4_state,
SCALER_CTL0_VALID |
(format->pixel_order << SCALER_CTL0_ORDER_SHIFT) |
(format->hvs << SCALER_CTL0_PIXEL_FORMAT_SHIFT) |
- SCALER_CTL0_UNITY);
+ (vc4_state->is_unity ? SCALER_CTL0_UNITY : 0) |
+ VC4_SET_FIELD(scl0, SCALER_CTL0_SCL0) |
+ VC4_SET_FIELD(scl1, SCALER_CTL0_SCL1));
/* Position Word 0: Image Positions and Alpha Value */
+ vc4_state->pos0_offset = vc4_state->dlist_count;
vc4_dlist_write(vc4_state,
VC4_SET_FIELD(0xff, SCALER_POS0_FIXED_ALPHA) |
- VC4_SET_FIELD(crtc_x, SCALER_POS0_START_X) |
- VC4_SET_FIELD(crtc_y, SCALER_POS0_START_Y));
-
- /* Position Word 1: Scaled Image Dimensions.
- * Skipped due to SCALER_CTL0_UNITY scaling.
- */
+ VC4_SET_FIELD(vc4_state->crtc_x, SCALER_POS0_START_X) |
+ VC4_SET_FIELD(vc4_state->crtc_y, SCALER_POS0_START_Y));
+
+ /* Position Word 1: Scaled Image Dimensions. */
+ if (!vc4_state->is_unity) {
+ vc4_dlist_write(vc4_state,
+ VC4_SET_FIELD(vc4_state->crtc_w,
+ SCALER_POS1_SCL_WIDTH) |
+ VC4_SET_FIELD(vc4_state->crtc_h,
+ SCALER_POS1_SCL_HEIGHT));
+ }
/* Position Word 2: Source Image Size, Alpha Mode */
+ vc4_state->pos2_offset = vc4_state->dlist_count;
vc4_dlist_write(vc4_state,
VC4_SET_FIELD(format->has_alpha ?
SCALER_POS2_ALPHA_MODE_PIPELINE :
SCALER_POS2_ALPHA_MODE_FIXED,
SCALER_POS2_ALPHA_MODE) |
- VC4_SET_FIELD(crtc_w, SCALER_POS2_WIDTH) |
- VC4_SET_FIELD(crtc_h, SCALER_POS2_HEIGHT));
+ VC4_SET_FIELD(vc4_state->src_w[0], SCALER_POS2_WIDTH) |
+ VC4_SET_FIELD(vc4_state->src_h[0], SCALER_POS2_HEIGHT));
/* Position Word 3: Context. Written by the HVS. */
vc4_dlist_write(vc4_state, 0xc0c0c0c0);
- vc4_state->pw0_offset = vc4_state->dlist_count;
- /* Pointer Word 0: RGB / Y Pointer */
- vc4_dlist_write(vc4_state, bo->paddr + offset);
+ /* Pointer Word 0/1/2: RGB / Y / Cb / Cr Pointers
+ *
+ * The pointers may be any byte address.
+ */
+ vc4_state->ptr0_offset = vc4_state->dlist_count;
+ if (!format->flip_cbcr) {
+ for (i = 0; i < num_planes; i++)
+ vc4_dlist_write(vc4_state, vc4_state->offsets[i]);
+ } else {
+ WARN_ON_ONCE(num_planes != 3);
+ vc4_dlist_write(vc4_state, vc4_state->offsets[0]);
+ vc4_dlist_write(vc4_state, vc4_state->offsets[2]);
+ vc4_dlist_write(vc4_state, vc4_state->offsets[1]);
+ }
- /* Pointer Context Word 0: Written by the HVS */
- vc4_dlist_write(vc4_state, 0xc0c0c0c0);
+ /* Pointer Context Word 0/1/2: Written by the HVS */
+ for (i = 0; i < num_planes; i++)
+ vc4_dlist_write(vc4_state, 0xc0c0c0c0);
- /* Pitch word 0: Pointer 0 Pitch */
- vc4_dlist_write(vc4_state,
- VC4_SET_FIELD(fb->pitches[0], SCALER_SRC_PITCH));
+ /* Pitch word 0/1/2 */
+ for (i = 0; i < num_planes; i++) {
+ vc4_dlist_write(vc4_state,
+ VC4_SET_FIELD(fb->pitches[i], SCALER_SRC_PITCH));
+ }
+
+ /* Colorspace conversion words */
+ if (vc4_state->is_yuv) {
+ vc4_dlist_write(vc4_state, SCALER_CSC0_ITR_R_601_5);
+ vc4_dlist_write(vc4_state, SCALER_CSC1_ITR_R_601_5);
+ vc4_dlist_write(vc4_state, SCALER_CSC2_ITR_R_601_5);
+ }
+
+ if (!vc4_state->is_unity) {
+ /* LBM Base Address. */
+ if (vc4_state->y_scaling[0] != VC4_SCALING_NONE ||
+ vc4_state->y_scaling[1] != VC4_SCALING_NONE) {
+ vc4_dlist_write(vc4_state, vc4_state->lbm.start);
+ }
+
+ if (num_planes > 1) {
+ /* Emit Cb/Cr as channel 0 and Y as channel
+ * 1. This matches how we set up scl0/scl1
+ * above.
+ */
+ vc4_write_scaling_parameters(state, 1);
+ }
+ vc4_write_scaling_parameters(state, 0);
+
+ /* If any PPF setup was done, then all the kernel
+ * pointers get uploaded.
+ */
+ if (vc4_state->x_scaling[0] == VC4_SCALING_PPF ||
+ vc4_state->y_scaling[0] == VC4_SCALING_PPF ||
+ vc4_state->x_scaling[1] == VC4_SCALING_PPF ||
+ vc4_state->y_scaling[1] == VC4_SCALING_PPF) {
+ u32 kernel = VC4_SET_FIELD(vc4->hvs->mitchell_netravali_filter.start,
+ SCALER_PPF_KERNEL_OFFSET);
+
+ /* HPPF plane 0 */
+ vc4_dlist_write(vc4_state, kernel);
+ /* VPPF plane 0 */
+ vc4_dlist_write(vc4_state, kernel);
+ /* HPPF plane 1 */
+ vc4_dlist_write(vc4_state, kernel);
+ /* VPPF plane 1 */
+ vc4_dlist_write(vc4_state, kernel);
+ }
+ }
vc4_state->dlist[ctl0_offset] |=
VC4_SET_FIELD(vc4_state->dlist_count, SCALER_CTL0_SIZE);
@@ -303,13 +716,13 @@ void vc4_plane_async_set_fb(struct drm_plane *plane, struct drm_framebuffer *fb)
* scanout will start from this address as soon as the FIFO
* needs to refill with pixels.
*/
- writel(addr, &vc4_state->hw_dlist[vc4_state->pw0_offset]);
+ writel(addr, &vc4_state->hw_dlist[vc4_state->ptr0_offset]);
/* Also update the CPU-side dlist copy, so that any later
* atomic updates that don't do a new modeset on our plane
* also use our updated address.
*/
- vc4_state->dlist[vc4_state->pw0_offset] = addr;
+ vc4_state->dlist[vc4_state->ptr0_offset] = addr;
}
static const struct drm_plane_helper_funcs vc4_plane_helper_funcs = {
@@ -325,8 +738,83 @@ static void vc4_plane_destroy(struct drm_plane *plane)
drm_plane_cleanup(plane);
}
+/* Implements immediate (non-vblank-synced) updates of the cursor
+ * position, or falls back to the atomic helper otherwise.
+ */
+static int
+vc4_update_plane(struct drm_plane *plane,
+ struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ uint32_t src_x, uint32_t src_y,
+ uint32_t src_w, uint32_t src_h)
+{
+ struct drm_plane_state *plane_state;
+ struct vc4_plane_state *vc4_state;
+
+ if (plane != crtc->cursor)
+ goto out;
+
+ plane_state = plane->state;
+ vc4_state = to_vc4_plane_state(plane_state);
+
+ if (!plane_state)
+ goto out;
+
+ /* If we're changing the cursor contents, do that in the
+ * normal vblank-synced atomic path.
+ */
+ if (fb != plane_state->fb)
+ goto out;
+
+ /* No configuring new scaling in the fast path. */
+ if (crtc_w != plane_state->crtc_w ||
+ crtc_h != plane_state->crtc_h ||
+ src_w != plane_state->src_w ||
+ src_h != plane_state->src_h) {
+ goto out;
+ }
+
+ /* Set the cursor's position on the screen. This is the
+ * expected change from the drm_mode_cursor_universal()
+ * helper.
+ */
+ plane_state->crtc_x = crtc_x;
+ plane_state->crtc_y = crtc_y;
+
+ /* Allow changing the start position within the cursor BO, if
+ * that matters.
+ */
+ plane_state->src_x = src_x;
+ plane_state->src_y = src_y;
+
+ /* Update the display list based on the new crtc_x/y. */
+ vc4_plane_atomic_check(plane, plane_state);
+
+ /* Note that we can't just call vc4_plane_write_dlist()
+ * because that would smash the context data that the HVS is
+ * currently using.
+ */
+ writel(vc4_state->dlist[vc4_state->pos0_offset],
+ &vc4_state->hw_dlist[vc4_state->pos0_offset]);
+ writel(vc4_state->dlist[vc4_state->pos2_offset],
+ &vc4_state->hw_dlist[vc4_state->pos2_offset]);
+ writel(vc4_state->dlist[vc4_state->ptr0_offset],
+ &vc4_state->hw_dlist[vc4_state->ptr0_offset]);
+
+ return 0;
+
+out:
+ return drm_atomic_helper_update_plane(plane, crtc, fb,
+ crtc_x, crtc_y,
+ crtc_w, crtc_h,
+ src_x, src_y,
+ src_w, src_h);
+}
+
static const struct drm_plane_funcs vc4_plane_funcs = {
- .update_plane = drm_atomic_helper_update_plane,
+ .update_plane = vc4_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
.destroy = vc4_plane_destroy,
.set_property = NULL,
@@ -341,6 +829,7 @@ struct drm_plane *vc4_plane_init(struct drm_device *dev,
struct drm_plane *plane = NULL;
struct vc4_plane *vc4_plane;
u32 formats[ARRAY_SIZE(hvs_formats)];
+ u32 num_formats = 0;
int ret = 0;
unsigned i;
@@ -351,12 +840,20 @@ struct drm_plane *vc4_plane_init(struct drm_device *dev,
goto fail;
}
- for (i = 0; i < ARRAY_SIZE(hvs_formats); i++)
- formats[i] = hvs_formats[i].drm;
+ for (i = 0; i < ARRAY_SIZE(hvs_formats); i++) {
+ /* Don't allow YUV in cursor planes, since that means
+ * tuning on the scaler, which we don't allow for the
+ * cursor.
+ */
+ if (type != DRM_PLANE_TYPE_CURSOR ||
+ hvs_formats[i].hvs < HVS_PIXEL_FORMAT_YCBCR_YUV420_3PLANE) {
+ formats[num_formats++] = hvs_formats[i].drm;
+ }
+ }
plane = &vc4_plane->base;
ret = drm_universal_plane_init(dev, plane, 0xff,
&vc4_plane_funcs,
- formats, ARRAY_SIZE(formats),
+ formats, num_formats,
type, NULL);
drm_plane_helper_add(plane, &vc4_plane_helper_funcs);
diff --git a/drivers/gpu/drm/vc4/vc4_regs.h b/drivers/gpu/drm/vc4/vc4_regs.h
index 4e52a0a88551..bf42a8e87111 100644
--- a/drivers/gpu/drm/vc4/vc4_regs.h
+++ b/drivers/gpu/drm/vc4/vc4_regs.h
@@ -187,7 +187,7 @@
# define PV_VCONTROL_CONTINUOUS BIT(1)
# define PV_VCONTROL_VIDEN BIT(0)
-#define PV_VSYNCD 0x08
+#define PV_VSYNCD_EVEN 0x08
#define PV_HORZA 0x0c
# define PV_HORZA_HBP_MASK VC4_MASK(31, 16)
@@ -350,6 +350,17 @@
# define SCALER_DISPCTRLX_HEIGHT_SHIFT 0
#define SCALER_DISPBKGND0 0x00000044
+# define SCALER_DISPBKGND_AUTOHS BIT(31)
+# define SCALER_DISPBKGND_INTERLACE BIT(30)
+# define SCALER_DISPBKGND_GAMMA BIT(29)
+# define SCALER_DISPBKGND_TESTMODE_MASK VC4_MASK(28, 25)
+# define SCALER_DISPBKGND_TESTMODE_SHIFT 25
+/* Enables filling the scaler line with the RGB value in the low 24
+ * bits before compositing. Costs cycles, so should be skipped if
+ * opaque display planes will cover everything.
+ */
+# define SCALER_DISPBKGND_FILL BIT(24)
+
#define SCALER_DISPSTAT0 0x00000048
#define SCALER_DISPBASE0 0x0000004c
# define SCALER_DISPSTATX_MODE_MASK VC4_MASK(31, 30)
@@ -362,6 +373,9 @@
# define SCALER_DISPSTATX_EMPTY BIT(28)
#define SCALER_DISPCTRL1 0x00000050
#define SCALER_DISPBKGND1 0x00000054
+#define SCALER_DISPBKGNDX(x) (SCALER_DISPBKGND0 + \
+ (x) * (SCALER_DISPBKGND1 - \
+ SCALER_DISPBKGND0))
#define SCALER_DISPSTAT1 0x00000058
#define SCALER_DISPSTATX(x) (SCALER_DISPSTAT0 + \
(x) * (SCALER_DISPSTAT1 - \
@@ -456,6 +470,8 @@
#define VC4_HDMI_TX_PHY_RESET_CTL 0x2c0
#define VC4_HD_M_CTL 0x00c
+# define VC4_HD_M_REGISTER_FILE_STANDBY (3 << 6)
+# define VC4_HD_M_RAM_STANDBY (3 << 4)
# define VC4_HD_M_SW_RST BIT(2)
# define VC4_HD_M_ENABLE BIT(0)
@@ -503,7 +519,12 @@ enum hvs_pixel_format {
HVS_PIXEL_FORMAT_RGB888 = 5,
HVS_PIXEL_FORMAT_RGBA6666 = 6,
/* 32bpp */
- HVS_PIXEL_FORMAT_RGBA8888 = 7
+ HVS_PIXEL_FORMAT_RGBA8888 = 7,
+
+ HVS_PIXEL_FORMAT_YCBCR_YUV420_3PLANE = 8,
+ HVS_PIXEL_FORMAT_YCBCR_YUV420_2PLANE = 9,
+ HVS_PIXEL_FORMAT_YCBCR_YUV422_3PLANE = 10,
+ HVS_PIXEL_FORMAT_YCBCR_YUV422_2PLANE = 11,
};
/* Note: the LSB is the rightmost character shown. Only valid for
@@ -536,6 +557,21 @@ enum hvs_pixel_format {
#define SCALER_CTL0_ORDER_MASK VC4_MASK(14, 13)
#define SCALER_CTL0_ORDER_SHIFT 13
+#define SCALER_CTL0_SCL1_MASK VC4_MASK(10, 8)
+#define SCALER_CTL0_SCL1_SHIFT 8
+
+#define SCALER_CTL0_SCL0_MASK VC4_MASK(7, 5)
+#define SCALER_CTL0_SCL0_SHIFT 5
+
+#define SCALER_CTL0_SCL_H_PPF_V_PPF 0
+#define SCALER_CTL0_SCL_H_TPZ_V_PPF 1
+#define SCALER_CTL0_SCL_H_PPF_V_TPZ 2
+#define SCALER_CTL0_SCL_H_TPZ_V_TPZ 3
+#define SCALER_CTL0_SCL_H_PPF_V_NONE 4
+#define SCALER_CTL0_SCL_H_NONE_V_PPF 5
+#define SCALER_CTL0_SCL_H_NONE_V_TPZ 6
+#define SCALER_CTL0_SCL_H_TPZ_V_NONE 7
+
/* Set to indicate no scaling. */
#define SCALER_CTL0_UNITY BIT(4)
@@ -551,6 +587,12 @@ enum hvs_pixel_format {
#define SCALER_POS0_START_X_MASK VC4_MASK(11, 0)
#define SCALER_POS0_START_X_SHIFT 0
+#define SCALER_POS1_SCL_HEIGHT_MASK VC4_MASK(27, 16)
+#define SCALER_POS1_SCL_HEIGHT_SHIFT 16
+
+#define SCALER_POS1_SCL_WIDTH_MASK VC4_MASK(11, 0)
+#define SCALER_POS1_SCL_WIDTH_SHIFT 0
+
#define SCALER_POS2_ALPHA_MODE_MASK VC4_MASK(31, 30)
#define SCALER_POS2_ALPHA_MODE_SHIFT 30
#define SCALER_POS2_ALPHA_MODE_PIPELINE 0
@@ -564,6 +606,80 @@ enum hvs_pixel_format {
#define SCALER_POS2_WIDTH_MASK VC4_MASK(11, 0)
#define SCALER_POS2_WIDTH_SHIFT 0
+/* Color Space Conversion words. Some values are S2.8 signed
+ * integers, except that the 2 integer bits map as {0x0: 0, 0x1: 1,
+ * 0x2: 2, 0x3: -1}
+ */
+/* bottom 8 bits of S2.8 contribution of Cr to Blue */
+#define SCALER_CSC0_COEF_CR_BLU_MASK VC4_MASK(31, 24)
+#define SCALER_CSC0_COEF_CR_BLU_SHIFT 24
+/* Signed offset to apply to Y before CSC. (Y' = Y + YY_OFS) */
+#define SCALER_CSC0_COEF_YY_OFS_MASK VC4_MASK(23, 16)
+#define SCALER_CSC0_COEF_YY_OFS_SHIFT 16
+/* Signed offset to apply to CB before CSC (Cb' = Cb - 128 + CB_OFS). */
+#define SCALER_CSC0_COEF_CB_OFS_MASK VC4_MASK(15, 8)
+#define SCALER_CSC0_COEF_CB_OFS_SHIFT 8
+/* Signed offset to apply to CB before CSC (Cr' = Cr - 128 + CR_OFS). */
+#define SCALER_CSC0_COEF_CR_OFS_MASK VC4_MASK(7, 0)
+#define SCALER_CSC0_COEF_CR_OFS_SHIFT 0
+#define SCALER_CSC0_ITR_R_601_5 0x00f00000
+#define SCALER_CSC0_ITR_R_709_3 0x00f00000
+#define SCALER_CSC0_JPEG_JFIF 0x00000000
+
+/* S2.8 contribution of Cb to Green */
+#define SCALER_CSC1_COEF_CB_GRN_MASK VC4_MASK(31, 22)
+#define SCALER_CSC1_COEF_CB_GRN_SHIFT 22
+/* S2.8 contribution of Cr to Green */
+#define SCALER_CSC1_COEF_CR_GRN_MASK VC4_MASK(21, 12)
+#define SCALER_CSC1_COEF_CR_GRN_SHIFT 12
+/* S2.8 contribution of Y to all of RGB */
+#define SCALER_CSC1_COEF_YY_ALL_MASK VC4_MASK(11, 2)
+#define SCALER_CSC1_COEF_YY_ALL_SHIFT 2
+/* top 2 bits of S2.8 contribution of Cr to Blue */
+#define SCALER_CSC1_COEF_CR_BLU_MASK VC4_MASK(1, 0)
+#define SCALER_CSC1_COEF_CR_BLU_SHIFT 0
+#define SCALER_CSC1_ITR_R_601_5 0xe73304a8
+#define SCALER_CSC1_ITR_R_709_3 0xf2b784a8
+#define SCALER_CSC1_JPEG_JFIF 0xea34a400
+
+/* S2.8 contribution of Cb to Red */
+#define SCALER_CSC2_COEF_CB_RED_MASK VC4_MASK(29, 20)
+#define SCALER_CSC2_COEF_CB_RED_SHIFT 20
+/* S2.8 contribution of Cr to Red */
+#define SCALER_CSC2_COEF_CR_RED_MASK VC4_MASK(19, 10)
+#define SCALER_CSC2_COEF_CR_RED_SHIFT 10
+/* S2.8 contribution of Cb to Blue */
+#define SCALER_CSC2_COEF_CB_BLU_MASK VC4_MASK(19, 10)
+#define SCALER_CSC2_COEF_CB_BLU_SHIFT 10
+#define SCALER_CSC2_ITR_R_601_5 0x00066204
+#define SCALER_CSC2_ITR_R_709_3 0x00072a1c
+#define SCALER_CSC2_JPEG_JFIF 0x000599c5
+
+#define SCALER_TPZ0_VERT_RECALC BIT(31)
+#define SCALER_TPZ0_SCALE_MASK VC4_MASK(28, 8)
+#define SCALER_TPZ0_SCALE_SHIFT 8
+#define SCALER_TPZ0_IPHASE_MASK VC4_MASK(7, 0)
+#define SCALER_TPZ0_IPHASE_SHIFT 0
+#define SCALER_TPZ1_RECIP_MASK VC4_MASK(15, 0)
+#define SCALER_TPZ1_RECIP_SHIFT 0
+
+/* Skips interpolating coefficients to 64 phases, so just 8 are used.
+ * Required for nearest neighbor.
+ */
+#define SCALER_PPF_NOINTERP BIT(31)
+/* Replaes the highest valued coefficient with one that makes all 4
+ * sum to unity.
+ */
+#define SCALER_PPF_AGC BIT(30)
+#define SCALER_PPF_SCALE_MASK VC4_MASK(24, 8)
+#define SCALER_PPF_SCALE_SHIFT 8
+#define SCALER_PPF_IPHASE_MASK VC4_MASK(6, 0)
+#define SCALER_PPF_IPHASE_SHIFT 0
+
+#define SCALER_PPF_KERNEL_OFFSET_MASK VC4_MASK(13, 0)
+#define SCALER_PPF_KERNEL_OFFSET_SHIFT 0
+#define SCALER_PPF_KERNEL_UNCACHED BIT(31)
+
#define SCALER_SRC_PITCH_MASK VC4_MASK(15, 0)
#define SCALER_SRC_PITCH_SHIFT 0
diff --git a/drivers/gpu/drm/vc4/vc4_v3d.c b/drivers/gpu/drm/vc4/vc4_v3d.c
index 31de5d17bc85..e6d3c6028341 100644
--- a/drivers/gpu/drm/vc4/vc4_v3d.c
+++ b/drivers/gpu/drm/vc4/vc4_v3d.c
@@ -268,6 +268,7 @@ static int vc4_v3d_dev_remove(struct platform_device *pdev)
}
static const struct of_device_id vc4_v3d_dt_match[] = {
+ { .compatible = "brcm,bcm2835-v3d" },
{ .compatible = "brcm,vc4-v3d" },
{}
};
diff --git a/drivers/gpu/drm/via/via_dmablit.c b/drivers/gpu/drm/via/via_dmablit.c
index d0cbd5ecd7f0..7e2a12c4fed2 100644
--- a/drivers/gpu/drm/via/via_dmablit.c
+++ b/drivers/gpu/drm/via/via_dmablit.c
@@ -188,7 +188,7 @@ via_free_sg_info(struct pci_dev *pdev, drm_via_sg_info_t *vsg)
if (NULL != (page = vsg->pages[i])) {
if (!PageReserved(page) && (DMA_FROM_DEVICE == vsg->direction))
SetPageDirty(page);
- page_cache_release(page);
+ put_page(page);
}
}
case dr_via_pages_alloc:
@@ -239,8 +239,7 @@ via_lock_all_dma_pages(drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer)
if (NULL == vsg->pages)
return -ENOMEM;
down_read(&current->mm->mmap_sem);
- ret = get_user_pages(current, current->mm,
- (unsigned long)xfer->mem_addr,
+ ret = get_user_pages((unsigned long)xfer->mem_addr,
vsg->num_pages,
(vsg->direction == DMA_FROM_DEVICE),
0, vsg->pages, NULL);
diff --git a/drivers/gpu/drm/virtio/virtgpu_display.c b/drivers/gpu/drm/virtio/virtgpu_display.c
index a165f03eaa79..5fd1fd06effc 100644
--- a/drivers/gpu/drm/virtio/virtgpu_display.c
+++ b/drivers/gpu/drm/virtio/virtgpu_display.c
@@ -237,13 +237,6 @@ virtio_gpu_framebuffer_init(struct drm_device *dev,
return 0;
}
-static bool virtio_gpu_crtc_mode_fixup(struct drm_crtc *crtc,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- return true;
-}
-
static void virtio_gpu_crtc_mode_set_nofb(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
@@ -274,21 +267,25 @@ static int virtio_gpu_crtc_atomic_check(struct drm_crtc *crtc,
return 0;
}
+static void virtio_gpu_crtc_atomic_flush(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&crtc->dev->event_lock, flags);
+ if (crtc->state->event)
+ drm_crtc_send_vblank_event(crtc, crtc->state->event);
+ spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
+}
+
static const struct drm_crtc_helper_funcs virtio_gpu_crtc_helper_funcs = {
.enable = virtio_gpu_crtc_enable,
.disable = virtio_gpu_crtc_disable,
- .mode_fixup = virtio_gpu_crtc_mode_fixup,
.mode_set_nofb = virtio_gpu_crtc_mode_set_nofb,
.atomic_check = virtio_gpu_crtc_atomic_check,
+ .atomic_flush = virtio_gpu_crtc_atomic_flush,
};
-static bool virtio_gpu_enc_mode_fixup(struct drm_encoder *encoder,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- return true;
-}
-
static void virtio_gpu_enc_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
@@ -362,7 +359,6 @@ virtio_gpu_best_encoder(struct drm_connector *connector)
}
static const struct drm_encoder_helper_funcs virtio_gpu_enc_helper_funcs = {
- .mode_fixup = virtio_gpu_enc_mode_fixup,
.mode_set = virtio_gpu_enc_mode_set,
.enable = virtio_gpu_enc_enable,
.disable = virtio_gpu_enc_disable,
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.c b/drivers/gpu/drm/virtio/virtgpu_drv.c
index b40ed6061f05..7f898cfdc746 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.c
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.c
@@ -118,7 +118,7 @@ static const struct file_operations virtio_gpu_driver_fops = {
static struct drm_driver driver = {
- .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME | DRIVER_RENDER,
+ .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME | DRIVER_RENDER | DRIVER_ATOMIC,
.set_busid = drm_virtio_set_busid,
.load = virtio_gpu_driver_load,
.unload = virtio_gpu_driver_unload,
diff --git a/drivers/gpu/drm/virtio/virtgpu_plane.c b/drivers/gpu/drm/virtio/virtgpu_plane.c
index 572fb351feab..70b44a2345ab 100644
--- a/drivers/gpu/drm/virtio/virtgpu_plane.c
+++ b/drivers/gpu/drm/virtio/virtgpu_plane.c
@@ -68,10 +68,17 @@ static void virtio_gpu_plane_atomic_update(struct drm_plane *plane,
struct virtio_gpu_object *bo;
uint32_t handle;
- if (plane->fb) {
- vgfb = to_virtio_gpu_framebuffer(plane->fb);
+ if (plane->state->fb) {
+ vgfb = to_virtio_gpu_framebuffer(plane->state->fb);
bo = gem_to_virtio_gpu_obj(vgfb->obj);
handle = bo->hw_res_handle;
+ if (bo->dumb) {
+ virtio_gpu_cmd_transfer_to_host_2d
+ (vgdev, handle, 0,
+ cpu_to_le32(plane->state->crtc_w),
+ cpu_to_le32(plane->state->crtc_h),
+ plane->state->crtc_x, plane->state->crtc_y, NULL);
+ }
} else {
handle = 0;
}
@@ -84,6 +91,11 @@ static void virtio_gpu_plane_atomic_update(struct drm_plane *plane,
plane->state->crtc_h,
plane->state->crtc_x,
plane->state->crtc_y);
+ virtio_gpu_cmd_resource_flush(vgdev, handle,
+ plane->state->crtc_x,
+ plane->state->crtc_y,
+ plane->state->crtc_w,
+ plane->state->crtc_h);
}
diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga3d_surfacedefs.h b/drivers/gpu/drm/vmwgfx/device_include/svga3d_surfacedefs.h
index 58704f0a4607..531d22025fec 100644
--- a/drivers/gpu/drm/vmwgfx/device_include/svga3d_surfacedefs.h
+++ b/drivers/gpu/drm/vmwgfx/device_include/svga3d_surfacedefs.h
@@ -25,6 +25,8 @@
*
**************************************************************************/
+#include <linux/kernel.h>
+
#ifdef __KERNEL__
#include <drm/vmwgfx_drm.h>
@@ -36,7 +38,6 @@
#define ARRAY_SIZE(_A) (sizeof(_A) / sizeof((_A)[0]))
#endif /* ARRAY_SIZE */
-#define DIV_ROUND_UP(x, y) (((x) + (y) - 1) / (y))
#define max_t(type, x, y) ((x) > (y) ? (x) : (y))
#define surf_size_struct SVGA3dSize
#define u32 uint32
@@ -987,12 +988,12 @@ svga3dsurface_get_size_in_blocks(const struct svga3d_surface_desc *desc,
const surf_size_struct *pixel_size,
surf_size_struct *block_size)
{
- block_size->width = DIV_ROUND_UP(pixel_size->width,
- desc->block_size.width);
- block_size->height = DIV_ROUND_UP(pixel_size->height,
- desc->block_size.height);
- block_size->depth = DIV_ROUND_UP(pixel_size->depth,
- desc->block_size.depth);
+ block_size->width = __KERNEL_DIV_ROUND_UP(pixel_size->width,
+ desc->block_size.width);
+ block_size->height = __KERNEL_DIV_ROUND_UP(pixel_size->height,
+ desc->block_size.height);
+ block_size->depth = __KERNEL_DIV_ROUND_UP(pixel_size->depth,
+ desc->block_size.depth);
}
static inline bool
@@ -1100,8 +1101,9 @@ svga3dsurface_get_pixel_offset(SVGA3dSurfaceFormat format,
const struct svga3d_surface_desc *desc = svga3dsurface_get_desc(format);
const u32 bw = desc->block_size.width, bh = desc->block_size.height;
const u32 bd = desc->block_size.depth;
- const u32 rowstride = DIV_ROUND_UP(width, bw) * desc->bytes_per_block;
- const u32 imgstride = DIV_ROUND_UP(height, bh) * rowstride;
+ const u32 rowstride = __KERNEL_DIV_ROUND_UP(width, bw) *
+ desc->bytes_per_block;
+ const u32 imgstride = __KERNEL_DIV_ROUND_UP(height, bh) * rowstride;
const u32 offset = (z / bd * imgstride +
y / bh * rowstride +
x / bw * desc->bytes_per_block);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 24fb348a44e1..6cbb7d4bdd11 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -195,7 +195,7 @@ static const struct drm_ioctl_desc vmw_ioctls[] = {
DRM_MASTER | DRM_AUTH),
VMW_IOCTL_DEF(VMW_UPDATE_LAYOUT,
vmw_kms_update_layout_ioctl,
- DRM_MASTER),
+ DRM_MASTER | DRM_CONTROL_ALLOW),
VMW_IOCTL_DEF(VMW_CREATE_SHADER,
vmw_shader_define_ioctl,
DRM_AUTH | DRM_RENDER_ALLOW),
@@ -972,15 +972,6 @@ static int vmw_driver_unload(struct drm_device *dev)
return 0;
}
-static void vmw_preclose(struct drm_device *dev,
- struct drm_file *file_priv)
-{
- struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
- struct vmw_private *dev_priv = vmw_priv(dev);
-
- vmw_event_fence_fpriv_gone(dev_priv->fman, &vmw_fp->fence_events);
-}
-
static void vmw_postclose(struct drm_device *dev,
struct drm_file *file_priv)
{
@@ -1011,7 +1002,6 @@ static int vmw_driver_open(struct drm_device *dev, struct drm_file *file_priv)
if (unlikely(vmw_fp == NULL))
return ret;
- INIT_LIST_HEAD(&vmw_fp->fence_events);
vmw_fp->tfile = ttm_object_file_init(dev_priv->tdev, 10);
if (unlikely(vmw_fp->tfile == NULL))
goto out_no_tfile;
@@ -1214,6 +1204,7 @@ static int vmw_master_set(struct drm_device *dev,
}
dev_priv->active_master = vmaster;
+ drm_sysfs_hotplug_event(dev);
return 0;
}
@@ -1501,7 +1492,6 @@ static struct drm_driver driver = {
.master_set = vmw_master_set,
.master_drop = vmw_master_drop,
.open = vmw_driver_open,
- .preclose = vmw_preclose,
.postclose = vmw_postclose,
.set_busid = drm_pci_set_busid,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 469cdd520615..019a6ca3e8e9 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -40,9 +40,9 @@
#include <drm/ttm/ttm_module.h>
#include "vmwgfx_fence.h"
-#define VMWGFX_DRIVER_DATE "20150810"
+#define VMWGFX_DRIVER_DATE "20160210"
#define VMWGFX_DRIVER_MAJOR 2
-#define VMWGFX_DRIVER_MINOR 9
+#define VMWGFX_DRIVER_MINOR 10
#define VMWGFX_DRIVER_PATCHLEVEL 0
#define VMWGFX_FILE_PAGE_OFFSET 0x00100000
#define VMWGFX_FIFO_STATIC_SIZE (1024*1024)
@@ -80,7 +80,6 @@
struct vmw_fpriv {
struct drm_master *locked_master;
struct ttm_object_file *tfile;
- struct list_head fence_events;
bool gb_aware;
};
@@ -408,8 +407,11 @@ struct vmw_private {
void *fb_info;
enum vmw_display_unit_type active_display_unit;
struct vmw_legacy_display *ldu_priv;
- struct vmw_screen_object_display *sou_priv;
struct vmw_overlay *overlay_priv;
+ struct drm_property *hotplug_mode_update_property;
+ struct drm_property *implicit_placement_property;
+ unsigned num_implicit;
+ struct vmw_framebuffer *implicit_fb;
/*
* Context and surface management.
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 5da5de0cb522..1a1a87cbf109 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -3009,6 +3009,26 @@ out_unref:
return ret;
}
+/**
+ * vmw_cmd_dx_genmips - Validate an SVGA_3D_CMD_DX_GENMIPS command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_dx_genmips(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDXGenMips body;
+ } *cmd = container_of(header, typeof(*cmd), header);
+
+ return vmw_view_id_val_add(sw_context, vmw_view_sr,
+ cmd->body.shaderResourceViewId);
+}
+
static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
void *buf, uint32_t *size)
@@ -3273,19 +3293,19 @@ static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
&vmw_cmd_dx_cid_check, true, false, true),
VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_QUERY, &vmw_cmd_dx_define_query,
true, false, true),
- VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_QUERY, &vmw_cmd_ok,
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_QUERY, &vmw_cmd_dx_cid_check,
true, false, true),
VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_QUERY, &vmw_cmd_dx_bind_query,
true, false, true),
VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_QUERY_OFFSET,
- &vmw_cmd_ok, true, false, true),
- VMW_CMD_DEF(SVGA_3D_CMD_DX_BEGIN_QUERY, &vmw_cmd_ok,
+ &vmw_cmd_dx_cid_check, true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_BEGIN_QUERY, &vmw_cmd_dx_cid_check,
true, false, true),
- VMW_CMD_DEF(SVGA_3D_CMD_DX_END_QUERY, &vmw_cmd_ok,
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_END_QUERY, &vmw_cmd_dx_cid_check,
true, false, true),
VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_QUERY, &vmw_cmd_invalid,
true, false, true),
- VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_PREDICATION, &vmw_cmd_invalid,
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_PREDICATION, &vmw_cmd_dx_cid_check,
true, false, true),
VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VIEWPORTS, &vmw_cmd_dx_cid_check,
true, false, true),
@@ -3297,7 +3317,7 @@ static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
&vmw_cmd_dx_clear_depthstencil_view, true, false, true),
VMW_CMD_DEF(SVGA_3D_CMD_DX_PRED_COPY, &vmw_cmd_invalid,
true, false, true),
- VMW_CMD_DEF(SVGA_3D_CMD_DX_GENMIPS, &vmw_cmd_invalid,
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_GENMIPS, &vmw_cmd_dx_genmips,
true, false, true),
VMW_CMD_DEF(SVGA_3D_CMD_DX_UPDATE_SUBRESOURCE,
&vmw_cmd_dx_check_subresource, true, false, true),
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
index 3b1faf7862a5..679a4cb98ee3 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
@@ -573,9 +573,9 @@ static int vmw_fb_set_par(struct fb_info *info)
mode = old_mode;
old_mode = NULL;
} else if (!vmw_kms_validate_mode_vram(vmw_priv,
- mode->hdisplay *
- (var->bits_per_pixel + 7) / 8,
- mode->vdisplay)) {
+ mode->hdisplay *
+ DIV_ROUND_UP(var->bits_per_pixel, 8),
+ mode->vdisplay)) {
drm_mode_destroy(vmw_priv->dev, mode);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
index 8e689b439890..e959df6ede83 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
@@ -71,7 +71,6 @@ struct vmw_user_fence {
*/
struct vmw_event_fence_action {
struct vmw_fence_action action;
- struct list_head fpriv_head;
struct drm_pending_event *event;
struct vmw_fence_obj *fence;
@@ -808,44 +807,6 @@ int vmw_fence_obj_unref_ioctl(struct drm_device *dev, void *data,
}
/**
- * vmw_event_fence_fpriv_gone - Remove references to struct drm_file objects
- *
- * @fman: Pointer to a struct vmw_fence_manager
- * @event_list: Pointer to linked list of struct vmw_event_fence_action objects
- * with pointers to a struct drm_file object about to be closed.
- *
- * This function removes all pending fence events with references to a
- * specific struct drm_file object about to be closed. The caller is required
- * to pass a list of all struct vmw_event_fence_action objects with such
- * events attached. This function is typically called before the
- * struct drm_file object's event management is taken down.
- */
-void vmw_event_fence_fpriv_gone(struct vmw_fence_manager *fman,
- struct list_head *event_list)
-{
- struct vmw_event_fence_action *eaction;
- struct drm_pending_event *event;
- unsigned long irq_flags;
-
- while (1) {
- spin_lock_irqsave(&fman->lock, irq_flags);
- if (list_empty(event_list))
- goto out_unlock;
- eaction = list_first_entry(event_list,
- struct vmw_event_fence_action,
- fpriv_head);
- list_del_init(&eaction->fpriv_head);
- event = eaction->event;
- eaction->event = NULL;
- spin_unlock_irqrestore(&fman->lock, irq_flags);
- event->destroy(event);
- }
-out_unlock:
- spin_unlock_irqrestore(&fman->lock, irq_flags);
-}
-
-
-/**
* vmw_event_fence_action_seq_passed
*
* @action: The struct vmw_fence_action embedded in a struct
@@ -879,10 +840,8 @@ static void vmw_event_fence_action_seq_passed(struct vmw_fence_action *action)
*eaction->tv_usec = tv.tv_usec;
}
- list_del_init(&eaction->fpriv_head);
- list_add_tail(&eaction->event->link, &file_priv->event_list);
+ drm_send_event_locked(dev, eaction->event);
eaction->event = NULL;
- wake_up_all(&file_priv->event_wait);
spin_unlock_irqrestore(&dev->event_lock, irq_flags);
}
@@ -899,12 +858,6 @@ static void vmw_event_fence_action_cleanup(struct vmw_fence_action *action)
{
struct vmw_event_fence_action *eaction =
container_of(action, struct vmw_event_fence_action, action);
- struct vmw_fence_manager *fman = fman_from_fence(eaction->fence);
- unsigned long irq_flags;
-
- spin_lock_irqsave(&fman->lock, irq_flags);
- list_del(&eaction->fpriv_head);
- spin_unlock_irqrestore(&fman->lock, irq_flags);
vmw_fence_obj_unreference(&eaction->fence);
kfree(eaction);
@@ -984,8 +937,6 @@ int vmw_event_fence_action_queue(struct drm_file *file_priv,
{
struct vmw_event_fence_action *eaction;
struct vmw_fence_manager *fman = fman_from_fence(fence);
- struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
- unsigned long irq_flags;
eaction = kzalloc(sizeof(*eaction), GFP_KERNEL);
if (unlikely(eaction == NULL))
@@ -1002,10 +953,6 @@ int vmw_event_fence_action_queue(struct drm_file *file_priv,
eaction->tv_sec = tv_sec;
eaction->tv_usec = tv_usec;
- spin_lock_irqsave(&fman->lock, irq_flags);
- list_add_tail(&eaction->fpriv_head, &vmw_fp->fence_events);
- spin_unlock_irqrestore(&fman->lock, irq_flags);
-
vmw_fence_obj_add_action(fence, &eaction->action);
return 0;
@@ -1025,38 +972,26 @@ static int vmw_event_fence_action_create(struct drm_file *file_priv,
struct vmw_event_fence_pending *event;
struct vmw_fence_manager *fman = fman_from_fence(fence);
struct drm_device *dev = fman->dev_priv->dev;
- unsigned long irq_flags;
int ret;
- spin_lock_irqsave(&dev->event_lock, irq_flags);
-
- ret = (file_priv->event_space < sizeof(event->event)) ? -EBUSY : 0;
- if (likely(ret == 0))
- file_priv->event_space -= sizeof(event->event);
-
- spin_unlock_irqrestore(&dev->event_lock, irq_flags);
-
- if (unlikely(ret != 0)) {
- DRM_ERROR("Failed to allocate event space for this file.\n");
- goto out_no_space;
- }
-
-
event = kzalloc(sizeof(*event), GFP_KERNEL);
if (unlikely(event == NULL)) {
DRM_ERROR("Failed to allocate an event.\n");
ret = -ENOMEM;
- goto out_no_event;
+ goto out_no_space;
}
event->event.base.type = DRM_VMW_EVENT_FENCE_SIGNALED;
event->event.base.length = sizeof(*event);
event->event.user_data = user_data;
- event->base.event = &event->event.base;
- event->base.file_priv = file_priv;
- event->base.destroy = (void (*) (struct drm_pending_event *)) kfree;
+ ret = drm_event_reserve_init(dev, file_priv, &event->base, &event->event.base);
+ if (unlikely(ret != 0)) {
+ DRM_ERROR("Failed to allocate event space for this file.\n");
+ kfree(event);
+ goto out_no_space;
+ }
if (flags & DRM_VMW_FE_FLAG_REQ_TIME)
ret = vmw_event_fence_action_queue(file_priv, fence,
@@ -1076,11 +1011,7 @@ static int vmw_event_fence_action_create(struct drm_file *file_priv,
return 0;
out_no_queue:
- event->base.destroy(&event->base);
-out_no_event:
- spin_lock_irqsave(&dev->event_lock, irq_flags);
- file_priv->event_space += sizeof(*event);
- spin_unlock_irqrestore(&dev->event_lock, irq_flags);
+ drm_event_cancel_free(dev, &event->base);
out_no_space:
return ret;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h
index 8be6c29f5eb5..83ae301ee141 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h
@@ -116,8 +116,6 @@ extern int vmw_fence_obj_unref_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int vmw_fence_event_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
-extern void vmw_event_fence_fpriv_gone(struct vmw_fence_manager *fman,
- struct list_head *event_list);
extern int vmw_event_fence_action_queue(struct drm_file *filee_priv,
struct vmw_fence_obj *fence,
struct drm_pending_event *event,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index b221a8c40282..4742ec4ead27 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -236,8 +236,8 @@ int vmw_du_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
bool shown = du->cursor_surface || du->cursor_dmabuf ? true : false;
- du->cursor_x = x + crtc->x;
- du->cursor_y = y + crtc->y;
+ du->cursor_x = x + du->set_gui_x;
+ du->cursor_y = y + du->set_gui_y;
/*
* FIXME: Unclear whether there's any global state touched by the
@@ -663,9 +663,8 @@ static int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer,
break;
case vmw_du_screen_object:
ret = vmw_kms_sou_do_dmabuf_dirty(dev_priv, &vfbd->base,
- clips, num_clips, increment,
- true,
- NULL);
+ clips, NULL, num_clips,
+ increment, true, NULL);
break;
case vmw_du_legacy:
ret = vmw_kms_ldu_do_dmabuf_dirty(dev_priv, &vfbd->base, 0, 0,
@@ -1109,6 +1108,22 @@ int vmw_kms_present(struct vmw_private *dev_priv,
return 0;
}
+static void
+vmw_kms_create_hotplug_mode_update_property(struct vmw_private *dev_priv)
+{
+ if (dev_priv->hotplug_mode_update_property)
+ return;
+
+ dev_priv->hotplug_mode_update_property =
+ drm_property_create_range(dev_priv->dev,
+ DRM_MODE_PROP_IMMUTABLE,
+ "hotplug_mode_update", 0, 1);
+
+ if (!dev_priv->hotplug_mode_update_property)
+ return;
+
+}
+
int vmw_kms_init(struct vmw_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
@@ -1121,6 +1136,9 @@ int vmw_kms_init(struct vmw_private *dev_priv)
dev->mode_config.max_width = dev_priv->texture_max_width;
dev->mode_config.max_height = dev_priv->texture_max_height;
+ drm_mode_create_suggested_offset_properties(dev);
+ vmw_kms_create_hotplug_mode_update_property(dev_priv);
+
ret = vmw_kms_stdu_init_display(dev_priv);
if (ret) {
ret = vmw_kms_sou_init_display(dev_priv);
@@ -1360,15 +1378,28 @@ static int vmw_du_update_layout(struct vmw_private *dev_priv, unsigned num,
du->pref_active = true;
du->gui_x = rects[du->unit].x;
du->gui_y = rects[du->unit].y;
+ drm_object_property_set_value
+ (&con->base, dev->mode_config.suggested_x_property,
+ du->gui_x);
+ drm_object_property_set_value
+ (&con->base, dev->mode_config.suggested_y_property,
+ du->gui_y);
} else {
du->pref_width = 800;
du->pref_height = 600;
du->pref_active = false;
+ drm_object_property_set_value
+ (&con->base, dev->mode_config.suggested_x_property,
+ 0);
+ drm_object_property_set_value
+ (&con->base, dev->mode_config.suggested_y_property,
+ 0);
}
con->status = vmw_du_connector_detect(con, true);
}
mutex_unlock(&dev->mode_config.mutex);
+ drm_sysfs_hotplug_event(dev);
return 0;
}
@@ -1591,6 +1622,12 @@ int vmw_du_connector_set_property(struct drm_connector *connector,
struct drm_property *property,
uint64_t val)
{
+ struct vmw_display_unit *du = vmw_connector_to_du(connector);
+ struct vmw_private *dev_priv = vmw_priv(connector->dev);
+
+ if (property == dev_priv->implicit_placement_property)
+ du->is_implicit = val;
+
return 0;
}
@@ -2096,3 +2133,119 @@ int vmw_kms_fbdev_init_data(struct vmw_private *dev_priv,
return 0;
}
+
+/**
+ * vmw_kms_del_active - unregister a crtc binding to the implicit framebuffer
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @du: The display unit of the crtc.
+ */
+void vmw_kms_del_active(struct vmw_private *dev_priv,
+ struct vmw_display_unit *du)
+{
+ lockdep_assert_held_once(&dev_priv->dev->mode_config.mutex);
+
+ if (du->active_implicit) {
+ if (--(dev_priv->num_implicit) == 0)
+ dev_priv->implicit_fb = NULL;
+ du->active_implicit = false;
+ }
+}
+
+/**
+ * vmw_kms_add_active - register a crtc binding to an implicit framebuffer
+ *
+ * @vmw_priv: Pointer to a device private struct.
+ * @du: The display unit of the crtc.
+ * @vfb: The implicit framebuffer
+ *
+ * Registers a binding to an implicit framebuffer.
+ */
+void vmw_kms_add_active(struct vmw_private *dev_priv,
+ struct vmw_display_unit *du,
+ struct vmw_framebuffer *vfb)
+{
+ lockdep_assert_held_once(&dev_priv->dev->mode_config.mutex);
+
+ WARN_ON_ONCE(!dev_priv->num_implicit && dev_priv->implicit_fb);
+
+ if (!du->active_implicit && du->is_implicit) {
+ dev_priv->implicit_fb = vfb;
+ du->active_implicit = true;
+ dev_priv->num_implicit++;
+ }
+}
+
+/**
+ * vmw_kms_screen_object_flippable - Check whether we can page-flip a crtc.
+ *
+ * @dev_priv: Pointer to device-private struct.
+ * @crtc: The crtc we want to flip.
+ *
+ * Returns true or false depending whether it's OK to flip this crtc
+ * based on the criterion that we must not have more than one implicit
+ * frame-buffer at any one time.
+ */
+bool vmw_kms_crtc_flippable(struct vmw_private *dev_priv,
+ struct drm_crtc *crtc)
+{
+ struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
+
+ lockdep_assert_held_once(&dev_priv->dev->mode_config.mutex);
+
+ if (!du->is_implicit)
+ return true;
+
+ if (dev_priv->num_implicit != 1)
+ return false;
+
+ return true;
+}
+
+/**
+ * vmw_kms_update_implicit_fb - Update the implicit fb.
+ *
+ * @dev_priv: Pointer to device-private struct.
+ * @crtc: The crtc the new implicit frame-buffer is bound to.
+ */
+void vmw_kms_update_implicit_fb(struct vmw_private *dev_priv,
+ struct drm_crtc *crtc)
+{
+ struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
+ struct vmw_framebuffer *vfb;
+
+ lockdep_assert_held_once(&dev_priv->dev->mode_config.mutex);
+
+ if (!du->is_implicit)
+ return;
+
+ vfb = vmw_framebuffer_to_vfb(crtc->primary->fb);
+ WARN_ON_ONCE(dev_priv->num_implicit != 1 &&
+ dev_priv->implicit_fb != vfb);
+
+ dev_priv->implicit_fb = vfb;
+}
+
+/**
+ * vmw_kms_create_implicit_placement_proparty - Set up the implicit placement
+ * property.
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @immutable: Whether the property is immutable.
+ *
+ * Sets up the implicit placement property unless it's already set up.
+ */
+void
+vmw_kms_create_implicit_placement_property(struct vmw_private *dev_priv,
+ bool immutable)
+{
+ if (dev_priv->implicit_placement_property)
+ return;
+
+ dev_priv->implicit_placement_property =
+ drm_property_create_range(dev_priv->dev,
+ immutable ?
+ DRM_MODE_PROP_IMMUTABLE : 0,
+ "implicit_placement", 0, 1);
+
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
index edd81503516d..57203212c501 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
@@ -178,6 +178,9 @@ struct vmw_display_unit {
int gui_x;
int gui_y;
bool is_implicit;
+ bool active_implicit;
+ int set_gui_x;
+ int set_gui_y;
};
#define vmw_crtc_to_du(x) \
@@ -254,6 +257,18 @@ int vmw_kms_fbdev_init_data(struct vmw_private *dev_priv,
struct drm_crtc **p_crtc,
struct drm_display_mode **p_mode);
void vmw_guess_mode_timing(struct drm_display_mode *mode);
+void vmw_kms_del_active(struct vmw_private *dev_priv,
+ struct vmw_display_unit *du);
+void vmw_kms_add_active(struct vmw_private *dev_priv,
+ struct vmw_display_unit *du,
+ struct vmw_framebuffer *vfb);
+bool vmw_kms_crtc_flippable(struct vmw_private *dev_priv,
+ struct drm_crtc *crtc);
+void vmw_kms_update_implicit_fb(struct vmw_private *dev_priv,
+ struct drm_crtc *crtc);
+void vmw_kms_create_implicit_placement_property(struct vmw_private *dev_priv,
+ bool immutable);
+
/*
* Legacy display unit functions - vmwgfx_ldu.c
@@ -287,6 +302,7 @@ int vmw_kms_sou_do_surface_dirty(struct vmw_private *dev_priv,
int vmw_kms_sou_do_dmabuf_dirty(struct vmw_private *dev_priv,
struct vmw_framebuffer *framebuffer,
struct drm_clip_rect *clips,
+ struct drm_vmw_rect *vclips,
unsigned num_clips, int increment,
bool interruptible,
struct vmw_fence_obj **out_fence);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
index b6fa44fe8929..63ccd9871ec9 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
@@ -288,6 +288,8 @@ static int vmw_ldu_crtc_set_config(struct drm_mode_set *set)
crtc->y = set->y;
crtc->mode = *mode;
crtc->enabled = true;
+ ldu->base.set_gui_x = set->x;
+ ldu->base.set_gui_y = set->y;
vmw_ldu_add_active(dev_priv, ldu, vfb);
@@ -375,8 +377,19 @@ static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit)
drm_mode_crtc_set_gamma_size(crtc, 256);
drm_object_attach_property(&connector->base,
- dev->mode_config.dirty_info_property,
- 1);
+ dev->mode_config.dirty_info_property,
+ 1);
+ drm_object_attach_property(&connector->base,
+ dev_priv->hotplug_mode_update_property, 1);
+ drm_object_attach_property(&connector->base,
+ dev->mode_config.suggested_x_property, 0);
+ drm_object_attach_property(&connector->base,
+ dev->mode_config.suggested_y_property, 0);
+ if (dev_priv->implicit_placement_property)
+ drm_object_attach_property
+ (&connector->base,
+ dev_priv->implicit_placement_property,
+ 1);
return 0;
}
@@ -412,6 +425,8 @@ int vmw_kms_ldu_init_display(struct vmw_private *dev_priv)
if (ret != 0)
goto err_vblank_cleanup;
+ vmw_kms_create_implicit_placement_property(dev_priv, true);
+
if (dev_priv->capabilities & SVGA_CAP_MULTIMON)
for (i = 0; i < VMWGFX_NUM_DISPLAY_UNITS; ++i)
vmw_ldu_init(dev_priv, i);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
index db082bea8daf..0ea22fd112c9 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
@@ -74,19 +74,6 @@ struct vmw_kms_sou_dirty_cmd {
SVGA3dCmdBlitSurfaceToScreen body;
};
-
-/*
- * Other structs.
- */
-
-struct vmw_screen_object_display {
- unsigned num_implicit;
-
- struct vmw_framebuffer *implicit_fb;
- SVGAFifoCmdDefineGMRFB cur;
- struct vmw_dma_buffer *pinned_gmrfb;
-};
-
/**
* Display unit using screen objects.
*/
@@ -97,7 +84,6 @@ struct vmw_screen_object_unit {
struct vmw_dma_buffer *buffer; /**< Backing store buffer */
bool defined;
- bool active_implicit;
};
static void vmw_sou_destroy(struct vmw_screen_object_unit *sou)
@@ -116,33 +102,6 @@ static void vmw_sou_crtc_destroy(struct drm_crtc *crtc)
vmw_sou_destroy(vmw_crtc_to_sou(crtc));
}
-static void vmw_sou_del_active(struct vmw_private *vmw_priv,
- struct vmw_screen_object_unit *sou)
-{
- struct vmw_screen_object_display *ld = vmw_priv->sou_priv;
-
- if (sou->active_implicit) {
- if (--(ld->num_implicit) == 0)
- ld->implicit_fb = NULL;
- sou->active_implicit = false;
- }
-}
-
-static void vmw_sou_add_active(struct vmw_private *vmw_priv,
- struct vmw_screen_object_unit *sou,
- struct vmw_framebuffer *vfb)
-{
- struct vmw_screen_object_display *ld = vmw_priv->sou_priv;
-
- BUG_ON(!ld->num_implicit && ld->implicit_fb);
-
- if (!sou->active_implicit && sou->base.is_implicit) {
- ld->implicit_fb = vfb;
- sou->active_implicit = true;
- ld->num_implicit++;
- }
-}
-
/**
* Send the fifo command to create a screen.
*/
@@ -185,6 +144,8 @@ static int vmw_sou_fifo_create(struct vmw_private *dev_priv,
cmd->obj.root.x = sou->base.gui_x;
cmd->obj.root.y = sou->base.gui_y;
}
+ sou->base.set_gui_x = cmd->obj.root.x;
+ sou->base.set_gui_y = cmd->obj.root.y;
/* Ok to assume that buffer is pinned in vram */
vmw_bo_get_guest_ptr(&sou->buffer->base, &cmd->obj.backingStore.ptr);
@@ -323,13 +284,13 @@ static int vmw_sou_crtc_set_config(struct drm_mode_set *set)
return -EINVAL;
}
- /* sou only supports one fb active at the time */
+ /* Only one active implicit frame-buffer at a time. */
if (sou->base.is_implicit &&
- dev_priv->sou_priv->implicit_fb && vfb &&
- !(dev_priv->sou_priv->num_implicit == 1 &&
- sou->active_implicit) &&
- dev_priv->sou_priv->implicit_fb != vfb) {
- DRM_ERROR("Multiple framebuffers not supported\n");
+ dev_priv->implicit_fb && vfb &&
+ !(dev_priv->num_implicit == 1 &&
+ sou->base.active_implicit) &&
+ dev_priv->implicit_fb != vfb) {
+ DRM_ERROR("Multiple implicit framebuffers not supported.\n");
return -EINVAL;
}
@@ -351,7 +312,7 @@ static int vmw_sou_crtc_set_config(struct drm_mode_set *set)
crtc->y = 0;
crtc->enabled = false;
- vmw_sou_del_active(dev_priv, sou);
+ vmw_kms_del_active(dev_priv, &sou->base);
vmw_sou_backing_free(dev_priv, sou);
@@ -415,7 +376,7 @@ static int vmw_sou_crtc_set_config(struct drm_mode_set *set)
return ret;
}
- vmw_sou_add_active(dev_priv, sou, vfb);
+ vmw_kms_add_active(dev_priv, &sou->base, vfb);
connector->encoder = encoder;
encoder->crtc = crtc;
@@ -428,39 +389,6 @@ static int vmw_sou_crtc_set_config(struct drm_mode_set *set)
return 0;
}
-/**
- * Returns if this unit can be page flipped.
- * Must be called with the mode_config mutex held.
- */
-static bool vmw_sou_screen_object_flippable(struct vmw_private *dev_priv,
- struct drm_crtc *crtc)
-{
- struct vmw_screen_object_unit *sou = vmw_crtc_to_sou(crtc);
-
- if (!sou->base.is_implicit)
- return true;
-
- if (dev_priv->sou_priv->num_implicit != 1)
- return false;
-
- return true;
-}
-
-/**
- * Update the implicit fb to the current fb of this crtc.
- * Must be called with the mode_config mutex held.
- */
-static void vmw_sou_update_implicit_fb(struct vmw_private *dev_priv,
- struct drm_crtc *crtc)
-{
- struct vmw_screen_object_unit *sou = vmw_crtc_to_sou(crtc);
-
- BUG_ON(!sou->base.is_implicit);
-
- dev_priv->sou_priv->implicit_fb =
- vmw_framebuffer_to_vfb(sou->base.crtc.primary->fb);
-}
-
static int vmw_sou_crtc_page_flip(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_pending_vblank_event *event,
@@ -470,30 +398,27 @@ static int vmw_sou_crtc_page_flip(struct drm_crtc *crtc,
struct drm_framebuffer *old_fb = crtc->primary->fb;
struct vmw_framebuffer *vfb = vmw_framebuffer_to_vfb(fb);
struct vmw_fence_obj *fence = NULL;
- struct drm_clip_rect clips;
+ struct drm_vmw_rect vclips;
int ret;
- /* require ScreenObject support for page flipping */
- if (!dev_priv->sou_priv)
- return -ENOSYS;
-
- if (!vmw_sou_screen_object_flippable(dev_priv, crtc))
+ if (!vmw_kms_crtc_flippable(dev_priv, crtc))
return -EINVAL;
crtc->primary->fb = fb;
/* do a full screen dirty update */
- clips.x1 = clips.y1 = 0;
- clips.x2 = fb->width;
- clips.y2 = fb->height;
+ vclips.x = crtc->x;
+ vclips.y = crtc->y;
+ vclips.w = crtc->mode.hdisplay;
+ vclips.h = crtc->mode.vdisplay;
if (vfb->dmabuf)
ret = vmw_kms_sou_do_dmabuf_dirty(dev_priv, vfb,
- &clips, 1, 1,
+ NULL, &vclips, 1, 1,
true, &fence);
else
ret = vmw_kms_sou_do_surface_dirty(dev_priv, vfb,
- &clips, NULL, NULL,
+ NULL, &vclips, NULL,
0, 0, 1, 1, &fence);
@@ -521,7 +446,7 @@ static int vmw_sou_crtc_page_flip(struct drm_crtc *crtc,
vmw_fence_obj_unreference(&fence);
if (vmw_crtc_to_du(crtc)->is_implicit)
- vmw_sou_update_implicit_fb(dev_priv, crtc);
+ vmw_kms_update_implicit_fb(dev_priv, crtc);
return ret;
@@ -563,6 +488,8 @@ static void vmw_sou_connector_destroy(struct drm_connector *connector)
static const struct drm_connector_funcs vmw_sou_connector_funcs = {
.dpms = vmw_du_connector_dpms,
+ .detect = vmw_du_connector_detect,
+ .fill_modes = vmw_du_connector_fill_modes,
.set_property = vmw_du_connector_set_property,
.destroy = vmw_sou_connector_destroy,
};
@@ -584,13 +511,12 @@ static int vmw_sou_init(struct vmw_private *dev_priv, unsigned unit)
encoder = &sou->base.encoder;
connector = &sou->base.connector;
- sou->active_implicit = false;
-
+ sou->base.active_implicit = false;
sou->base.pref_active = (unit == 0);
sou->base.pref_width = dev_priv->initial_width;
sou->base.pref_height = dev_priv->initial_height;
sou->base.pref_mode = NULL;
- sou->base.is_implicit = true;
+ sou->base.is_implicit = false;
drm_connector_init(dev, connector, &vmw_sou_connector_funcs,
DRM_MODE_CONNECTOR_VIRTUAL);
@@ -609,8 +535,19 @@ static int vmw_sou_init(struct vmw_private *dev_priv, unsigned unit)
drm_mode_crtc_set_gamma_size(crtc, 256);
drm_object_attach_property(&connector->base,
- dev->mode_config.dirty_info_property,
- 1);
+ dev->mode_config.dirty_info_property,
+ 1);
+ drm_object_attach_property(&connector->base,
+ dev_priv->hotplug_mode_update_property, 1);
+ drm_object_attach_property(&connector->base,
+ dev->mode_config.suggested_x_property, 0);
+ drm_object_attach_property(&connector->base,
+ dev->mode_config.suggested_y_property, 0);
+ if (dev_priv->implicit_placement_property)
+ drm_object_attach_property
+ (&connector->base,
+ dev_priv->implicit_placement_property,
+ sou->base.is_implicit);
return 0;
}
@@ -620,11 +557,6 @@ int vmw_kms_sou_init_display(struct vmw_private *dev_priv)
struct drm_device *dev = dev_priv->dev;
int i, ret;
- if (dev_priv->sou_priv) {
- DRM_INFO("sou system already on\n");
- return -EINVAL;
- }
-
if (!(dev_priv->capabilities & SVGA_CAP_SCREEN_OBJECT_2)) {
DRM_INFO("Not using screen objects,"
" missing cap SCREEN_OBJECT_2\n");
@@ -632,21 +564,19 @@ int vmw_kms_sou_init_display(struct vmw_private *dev_priv)
}
ret = -ENOMEM;
- dev_priv->sou_priv = kmalloc(sizeof(*dev_priv->sou_priv), GFP_KERNEL);
- if (unlikely(!dev_priv->sou_priv))
- goto err_no_mem;
-
- dev_priv->sou_priv->num_implicit = 0;
- dev_priv->sou_priv->implicit_fb = NULL;
+ dev_priv->num_implicit = 0;
+ dev_priv->implicit_fb = NULL;
ret = drm_vblank_init(dev, VMWGFX_NUM_DISPLAY_UNITS);
if (unlikely(ret != 0))
- goto err_free;
+ return ret;
ret = drm_mode_create_dirty_info_property(dev);
if (unlikely(ret != 0))
goto err_vblank_cleanup;
+ vmw_kms_create_implicit_placement_property(dev_priv, false);
+
for (i = 0; i < VMWGFX_NUM_DISPLAY_UNITS; ++i)
vmw_sou_init(dev_priv, i);
@@ -658,10 +588,6 @@ int vmw_kms_sou_init_display(struct vmw_private *dev_priv)
err_vblank_cleanup:
drm_vblank_cleanup(dev);
-err_free:
- kfree(dev_priv->sou_priv);
- dev_priv->sou_priv = NULL;
-err_no_mem:
return ret;
}
@@ -669,13 +595,8 @@ int vmw_kms_sou_close_display(struct vmw_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
- if (!dev_priv->sou_priv)
- return -ENOSYS;
-
drm_vblank_cleanup(dev);
- kfree(dev_priv->sou_priv);
-
return 0;
}
@@ -736,6 +657,11 @@ static void vmw_sou_surface_fifo_commit(struct vmw_kms_dirty *dirty)
SVGASignedRect *blit = (SVGASignedRect *) &cmd[1];
int i;
+ if (!dirty->num_hits) {
+ vmw_fifo_commit(dirty->dev_priv, 0);
+ return;
+ }
+
cmd->header.id = SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN;
cmd->header.size = sizeof(cmd->body) + region_size;
@@ -873,6 +799,11 @@ int vmw_kms_sou_do_surface_dirty(struct vmw_private *dev_priv,
*/
static void vmw_sou_dmabuf_fifo_commit(struct vmw_kms_dirty *dirty)
{
+ if (!dirty->num_hits) {
+ vmw_fifo_commit(dirty->dev_priv, 0);
+ return;
+ }
+
vmw_fifo_commit(dirty->dev_priv,
sizeof(struct vmw_kms_sou_dmabuf_blit) *
dirty->num_hits);
@@ -907,6 +838,8 @@ static void vmw_sou_dmabuf_clip(struct vmw_kms_dirty *dirty)
* @dev_priv: Pointer to the device private structure.
* @framebuffer: Pointer to the dma-buffer backed framebuffer.
* @clips: Array of clip rects.
+ * @vclips: Alternate array of clip rects. Either @clips or @vclips must
+ * be NULL.
* @num_clips: Number of clip rects in @clips.
* @increment: Increment to use when looping over @clips.
* @interruptible: Whether to perform waits interruptible if possible.
@@ -920,6 +853,7 @@ static void vmw_sou_dmabuf_clip(struct vmw_kms_dirty *dirty)
int vmw_kms_sou_do_dmabuf_dirty(struct vmw_private *dev_priv,
struct vmw_framebuffer *framebuffer,
struct drm_clip_rect *clips,
+ struct drm_vmw_rect *vclips,
unsigned num_clips, int increment,
bool interruptible,
struct vmw_fence_obj **out_fence)
@@ -943,7 +877,7 @@ int vmw_kms_sou_do_dmabuf_dirty(struct vmw_private *dev_priv,
dirty.clip = vmw_sou_dmabuf_clip;
dirty.fifo_reserve_size = sizeof(struct vmw_kms_sou_dmabuf_blit) *
num_clips;
- ret = vmw_kms_helper_dirty(dev_priv, framebuffer, clips, NULL,
+ ret = vmw_kms_helper_dirty(dev_priv, framebuffer, clips, vclips,
0, 0, num_clips, increment, &dirty);
vmw_kms_helper_buffer_finish(dev_priv, NULL, buf, out_fence, NULL);
@@ -965,6 +899,11 @@ out_revert:
*/
static void vmw_sou_readback_fifo_commit(struct vmw_kms_dirty *dirty)
{
+ if (!dirty->num_hits) {
+ vmw_fifo_commit(dirty->dev_priv, 0);
+ return;
+ }
+
vmw_fifo_commit(dirty->dev_priv,
sizeof(struct vmw_kms_sou_readback_blit) *
dirty->num_hits);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
index 4ef5ffd7189d..b949102ad864 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
@@ -96,7 +96,6 @@ struct vmw_stdu_surface_copy {
* content_vfbs dimensions, then this is a pointer into the
* corresponding field in content_vfbs. If not, then this
* is a separate buffer to which content_vfbs will blit to.
- * @content_fb: holds the rendered content, can be a surface or DMA buffer
* @content_type: content_fb type
* @defined: true if the current display unit has been initialized
*/
@@ -104,8 +103,6 @@ struct vmw_screen_target_display_unit {
struct vmw_display_unit base;
struct vmw_surface *display_srf;
- struct drm_framebuffer *content_fb;
-
enum stdu_content_type content_fb_type;
bool defined;
@@ -122,22 +119,6 @@ static void vmw_stdu_destroy(struct vmw_screen_target_display_unit *stdu);
*****************************************************************************/
/**
- * vmw_stdu_pin_display - pins the resource associated with the display surface
- *
- * @stdu: contains the display surface
- *
- * Since the display surface can either be a private surface allocated by us,
- * or it can point to the content surface, we use this function to not pin the
- * same resource twice.
- */
-static int vmw_stdu_pin_display(struct vmw_screen_target_display_unit *stdu)
-{
- return vmw_resource_pin(&stdu->display_srf->res, false);
-}
-
-
-
-/**
* vmw_stdu_unpin_display - unpins the resource associated with display surface
*
* @stdu: contains the display surface
@@ -153,13 +134,7 @@ static void vmw_stdu_unpin_display(struct vmw_screen_target_display_unit *stdu)
struct vmw_resource *res = &stdu->display_srf->res;
vmw_resource_unpin(res);
-
- if (stdu->content_fb_type != SAME_AS_DISPLAY) {
- vmw_resource_unreference(&res);
- stdu->content_fb_type = SAME_AS_DISPLAY;
- }
-
- stdu->display_srf = NULL;
+ vmw_surface_unreference(&stdu->display_srf);
}
}
@@ -185,6 +160,9 @@ static void vmw_stdu_crtc_destroy(struct drm_crtc *crtc)
*
* @dev_priv: VMW DRM device
* @stdu: display unit to create a Screen Target for
+ * @mode: The mode to set.
+ * @crtc_x: X coordinate of screen target relative to framebuffer origin.
+ * @crtc_y: Y coordinate of screen target relative to framebuffer origin.
*
* Creates a STDU that we can used later. This function is called whenever the
* framebuffer size changes.
@@ -193,7 +171,9 @@ static void vmw_stdu_crtc_destroy(struct drm_crtc *crtc)
* 0 on success, error code on failure
*/
static int vmw_stdu_define_st(struct vmw_private *dev_priv,
- struct vmw_screen_target_display_unit *stdu)
+ struct vmw_screen_target_display_unit *stdu,
+ struct drm_display_mode *mode,
+ int crtc_x, int crtc_y)
{
struct {
SVGA3dCmdHeader header;
@@ -211,17 +191,19 @@ static int vmw_stdu_define_st(struct vmw_private *dev_priv,
cmd->header.size = sizeof(cmd->body);
cmd->body.stid = stdu->base.unit;
- cmd->body.width = stdu->display_srf->base_size.width;
- cmd->body.height = stdu->display_srf->base_size.height;
+ cmd->body.width = mode->hdisplay;
+ cmd->body.height = mode->vdisplay;
cmd->body.flags = (0 == cmd->body.stid) ? SVGA_STFLAG_PRIMARY : 0;
cmd->body.dpi = 0;
- cmd->body.xRoot = stdu->base.crtc.x;
- cmd->body.yRoot = stdu->base.crtc.y;
-
- if (!stdu->base.is_implicit) {
+ if (stdu->base.is_implicit) {
+ cmd->body.xRoot = crtc_x;
+ cmd->body.yRoot = crtc_y;
+ } else {
cmd->body.xRoot = stdu->base.gui_x;
cmd->body.yRoot = stdu->base.gui_y;
}
+ stdu->base.set_gui_x = cmd->body.xRoot;
+ stdu->base.set_gui_y = cmd->body.yRoot;
vmw_fifo_commit(dev_priv, sizeof(*cmd));
@@ -392,126 +374,43 @@ static int vmw_stdu_destroy_st(struct vmw_private *dev_priv,
return ret;
}
-
-
/**
- * vmw_stdu_crtc_set_config - Sets a mode
+ * vmw_stdu_bind_fb - Bind an fb to a defined screen target
*
- * @set: mode parameters
- *
- * This function is the device-specific portion of the DRM CRTC mode set.
- * For the SVGA device, we do this by defining a Screen Target, binding a
- * GB Surface to that target, and finally update the screen target.
+ * @dev_priv: Pointer to a device private struct.
+ * @crtc: The crtc holding the screen target.
+ * @mode: The mode currently used by the screen target. Must be non-NULL.
+ * @new_fb: The new framebuffer to bind. Must be non-NULL.
*
* RETURNS:
- * 0 on success, error code otherwise
+ * 0 on success, error code on failure.
*/
-static int vmw_stdu_crtc_set_config(struct drm_mode_set *set)
+static int vmw_stdu_bind_fb(struct vmw_private *dev_priv,
+ struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ struct drm_framebuffer *new_fb)
{
- struct vmw_private *dev_priv;
- struct vmw_screen_target_display_unit *stdu;
- struct vmw_framebuffer *vfb;
+ struct vmw_screen_target_display_unit *stdu = vmw_crtc_to_stdu(crtc);
+ struct vmw_framebuffer *vfb = vmw_framebuffer_to_vfb(new_fb);
+ struct vmw_surface *new_display_srf = NULL;
+ enum stdu_content_type new_content_type;
struct vmw_framebuffer_surface *new_vfbs;
- struct drm_display_mode *mode;
- struct drm_framebuffer *new_fb;
- struct drm_crtc *crtc;
- struct drm_encoder *encoder;
- struct drm_connector *connector;
- int ret;
-
-
- if (!set || !set->crtc)
- return -EINVAL;
-
- crtc = set->crtc;
- crtc->x = set->x;
- crtc->y = set->y;
- stdu = vmw_crtc_to_stdu(crtc);
- mode = set->mode;
- new_fb = set->fb;
- dev_priv = vmw_priv(crtc->dev);
-
-
- if (set->num_connectors > 1) {
- DRM_ERROR("Too many connectors\n");
- return -EINVAL;
- }
-
- if (set->num_connectors == 1 &&
- set->connectors[0] != &stdu->base.connector) {
- DRM_ERROR("Connectors don't match %p %p\n",
- set->connectors[0], &stdu->base.connector);
- return -EINVAL;
- }
-
-
- /* Since they always map one to one these are safe */
- connector = &stdu->base.connector;
- encoder = &stdu->base.encoder;
-
-
- /*
- * After this point the CRTC will be considered off unless a new fb
- * is bound
- */
- if (stdu->defined) {
- /* Unbind current surface by binding an invalid one */
- ret = vmw_stdu_bind_st(dev_priv, stdu, NULL);
- if (unlikely(ret != 0))
- return ret;
-
- /* Update Screen Target, display will now be blank */
- if (crtc->primary->fb) {
- vmw_stdu_update_st(dev_priv, stdu);
- if (unlikely(ret != 0))
- return ret;
- }
-
- crtc->primary->fb = NULL;
- crtc->enabled = false;
- encoder->crtc = NULL;
- connector->encoder = NULL;
-
- vmw_stdu_unpin_display(stdu);
- stdu->content_fb = NULL;
- stdu->content_fb_type = SAME_AS_DISPLAY;
-
- ret = vmw_stdu_destroy_st(dev_priv, stdu);
- /* The hardware is hung, give up */
- if (unlikely(ret != 0))
- return ret;
- }
-
-
- /* Any of these conditions means the caller wants CRTC off */
- if (set->num_connectors == 0 || !mode || !new_fb)
- return 0;
-
-
- if (set->x + mode->hdisplay > new_fb->width ||
- set->y + mode->vdisplay > new_fb->height) {
- DRM_ERROR("Set outside of framebuffer\n");
- return -EINVAL;
- }
+ int ret;
- stdu->content_fb = new_fb;
- vfb = vmw_framebuffer_to_vfb(stdu->content_fb);
+ WARN_ON_ONCE(!stdu->defined);
- if (vfb->dmabuf)
- stdu->content_fb_type = SEPARATE_DMA;
+ if (!vfb->dmabuf && new_fb->width == mode->hdisplay &&
+ new_fb->height == mode->vdisplay)
+ new_content_type = SAME_AS_DISPLAY;
+ else if (vfb->dmabuf)
+ new_content_type = SEPARATE_DMA;
+ else
+ new_content_type = SEPARATE_SURFACE;
- /*
- * If the requested mode is different than the width and height
- * of the FB or if the content buffer is a DMA buf, then allocate
- * a display FB that matches the dimension of the mode
- */
- if (mode->hdisplay != new_fb->width ||
- mode->vdisplay != new_fb->height ||
- stdu->content_fb_type != SAME_AS_DISPLAY) {
+ if (new_content_type != SAME_AS_DISPLAY &&
+ !stdu->display_srf) {
struct vmw_surface content_srf;
struct drm_vmw_size display_base_size = {0};
- struct vmw_surface *display_srf;
-
display_base_size.width = mode->hdisplay;
display_base_size.height = mode->vdisplay;
@@ -521,7 +420,7 @@ static int vmw_stdu_crtc_set_config(struct drm_mode_set *set)
* If content buffer is a DMA buf, then we have to construct
* surface info
*/
- if (stdu->content_fb_type == SEPARATE_DMA) {
+ if (new_content_type == SEPARATE_DMA) {
switch (new_fb->bits_per_pixel) {
case 32:
@@ -538,17 +437,13 @@ static int vmw_stdu_crtc_set_config(struct drm_mode_set *set)
default:
DRM_ERROR("Invalid format\n");
- ret = -EINVAL;
- goto err_unref_content;
+ return -EINVAL;
}
content_srf.flags = 0;
content_srf.mip_levels[0] = 1;
content_srf.multisample_count = 0;
} else {
-
- stdu->content_fb_type = SEPARATE_SURFACE;
-
new_vfbs = vmw_framebuffer_to_vfbs(new_fb);
content_srf = *new_vfbs->surface;
}
@@ -563,26 +458,136 @@ static int vmw_stdu_crtc_set_config(struct drm_mode_set *set)
content_srf.multisample_count,
0,
display_base_size,
- &display_srf);
+ &new_display_srf);
if (unlikely(ret != 0)) {
- DRM_ERROR("Cannot allocate a display FB.\n");
- goto err_unref_content;
+ DRM_ERROR("Could not allocate screen target surface.\n");
+ return ret;
}
-
- stdu->display_srf = display_srf;
- } else {
+ } else if (new_content_type == SAME_AS_DISPLAY) {
new_vfbs = vmw_framebuffer_to_vfbs(new_fb);
- stdu->display_srf = new_vfbs->surface;
+ new_display_srf = vmw_surface_reference(new_vfbs->surface);
}
+ if (new_display_srf) {
+ /* Pin new surface before flipping */
+ ret = vmw_resource_pin(&new_display_srf->res, false);
+ if (ret)
+ goto out_srf_unref;
+
+ ret = vmw_stdu_bind_st(dev_priv, stdu, &new_display_srf->res);
+ if (ret)
+ goto out_srf_unpin;
+
+ /* Unpin and unreference old surface */
+ vmw_stdu_unpin_display(stdu);
- ret = vmw_stdu_pin_display(stdu);
- if (unlikely(ret != 0)) {
- stdu->display_srf = NULL;
- goto err_unref_content;
+ /* Transfer the reference */
+ stdu->display_srf = new_display_srf;
+ new_display_srf = NULL;
}
- vmw_svga_enable(dev_priv);
+ crtc->primary->fb = new_fb;
+ stdu->content_fb_type = new_content_type;
+ return 0;
+
+out_srf_unpin:
+ vmw_resource_unpin(&new_display_srf->res);
+out_srf_unref:
+ vmw_surface_unreference(&new_display_srf);
+ return ret;
+}
+
+/**
+ * vmw_stdu_crtc_set_config - Sets a mode
+ *
+ * @set: mode parameters
+ *
+ * This function is the device-specific portion of the DRM CRTC mode set.
+ * For the SVGA device, we do this by defining a Screen Target, binding a
+ * GB Surface to that target, and finally update the screen target.
+ *
+ * RETURNS:
+ * 0 on success, error code otherwise
+ */
+static int vmw_stdu_crtc_set_config(struct drm_mode_set *set)
+{
+ struct vmw_private *dev_priv;
+ struct vmw_framebuffer *vfb;
+ struct vmw_screen_target_display_unit *stdu;
+ struct drm_display_mode *mode;
+ struct drm_framebuffer *new_fb;
+ struct drm_crtc *crtc;
+ struct drm_encoder *encoder;
+ struct drm_connector *connector;
+ bool turning_off;
+ int ret;
+
+
+ if (!set || !set->crtc)
+ return -EINVAL;
+
+ crtc = set->crtc;
+ stdu = vmw_crtc_to_stdu(crtc);
+ mode = set->mode;
+ new_fb = set->fb;
+ dev_priv = vmw_priv(crtc->dev);
+ turning_off = set->num_connectors == 0 || !mode || !new_fb;
+ vfb = (new_fb) ? vmw_framebuffer_to_vfb(new_fb) : NULL;
+
+ if (set->num_connectors > 1) {
+ DRM_ERROR("Too many connectors\n");
+ return -EINVAL;
+ }
+
+ if (set->num_connectors == 1 &&
+ set->connectors[0] != &stdu->base.connector) {
+ DRM_ERROR("Connectors don't match %p %p\n",
+ set->connectors[0], &stdu->base.connector);
+ return -EINVAL;
+ }
+
+ if (!turning_off && (set->x + mode->hdisplay > new_fb->width ||
+ set->y + mode->vdisplay > new_fb->height)) {
+ DRM_ERROR("Set outside of framebuffer\n");
+ return -EINVAL;
+ }
+
+ /* Only one active implicit frame-buffer at a time. */
+ if (!turning_off && stdu->base.is_implicit && dev_priv->implicit_fb &&
+ !(dev_priv->num_implicit == 1 && stdu->base.active_implicit)
+ && dev_priv->implicit_fb != vfb) {
+ DRM_ERROR("Multiple implicit framebuffers not supported.\n");
+ return -EINVAL;
+ }
+
+ /* Since they always map one to one these are safe */
+ connector = &stdu->base.connector;
+ encoder = &stdu->base.encoder;
+
+ if (stdu->defined) {
+ ret = vmw_stdu_bind_st(dev_priv, stdu, NULL);
+ if (ret)
+ return ret;
+
+ vmw_stdu_unpin_display(stdu);
+ (void) vmw_stdu_update_st(dev_priv, stdu);
+ vmw_kms_del_active(dev_priv, &stdu->base);
+
+ ret = vmw_stdu_destroy_st(dev_priv, stdu);
+ if (ret)
+ return ret;
+
+ crtc->primary->fb = NULL;
+ crtc->enabled = false;
+ encoder->crtc = NULL;
+ connector->encoder = NULL;
+ stdu->content_fb_type = SAME_AS_DISPLAY;
+ crtc->x = set->x;
+ crtc->y = set->y;
+ }
+
+ if (turning_off)
+ return 0;
/*
* Steps to displaying a surface, assume surface is already
@@ -592,35 +597,33 @@ static int vmw_stdu_crtc_set_config(struct drm_mode_set *set)
* 3. update that screen target (this is done later by
* vmw_kms_stdu_do_surface_dirty_or_present)
*/
- ret = vmw_stdu_define_st(dev_priv, stdu);
- if (unlikely(ret != 0))
- goto err_unpin_display_and_content;
+ /*
+ * Note on error handling: We can't really restore the crtc to
+ * it's original state on error, but we at least update the
+ * current state to what's submitted to hardware to enable
+ * future recovery.
+ */
+ vmw_svga_enable(dev_priv);
+ ret = vmw_stdu_define_st(dev_priv, stdu, mode, set->x, set->y);
+ if (ret)
+ return ret;
- ret = vmw_stdu_bind_st(dev_priv, stdu, &stdu->display_srf->res);
- if (unlikely(ret != 0))
- goto err_unpin_destroy_st;
+ crtc->x = set->x;
+ crtc->y = set->y;
+ crtc->mode = *mode;
+ ret = vmw_stdu_bind_fb(dev_priv, crtc, mode, new_fb);
+ if (ret)
+ return ret;
+ vmw_kms_add_active(dev_priv, &stdu->base, vfb);
+ crtc->enabled = true;
connector->encoder = encoder;
encoder->crtc = crtc;
- crtc->mode = *mode;
- crtc->primary->fb = new_fb;
- crtc->enabled = true;
-
- return ret;
-
-err_unpin_destroy_st:
- vmw_stdu_destroy_st(dev_priv, stdu);
-err_unpin_display_and_content:
- vmw_stdu_unpin_display(stdu);
-err_unref_content:
- stdu->content_fb = NULL;
- return ret;
+ return 0;
}
-
-
/**
* vmw_stdu_crtc_page_flip - Binds a buffer to a screen target
*
@@ -648,59 +651,34 @@ static int vmw_stdu_crtc_page_flip(struct drm_crtc *crtc,
{
struct vmw_private *dev_priv = vmw_priv(crtc->dev);
struct vmw_screen_target_display_unit *stdu;
+ struct drm_vmw_rect vclips;
+ struct vmw_framebuffer *vfb = vmw_framebuffer_to_vfb(new_fb);
int ret;
- if (crtc == NULL)
- return -EINVAL;
-
dev_priv = vmw_priv(crtc->dev);
stdu = vmw_crtc_to_stdu(crtc);
- crtc->primary->fb = new_fb;
- stdu->content_fb = new_fb;
-
- if (stdu->display_srf) {
- /*
- * If the display surface is the same as the content surface
- * then remove the reference
- */
- if (stdu->content_fb_type == SAME_AS_DISPLAY) {
- if (stdu->defined) {
- /* Unbind the current surface */
- ret = vmw_stdu_bind_st(dev_priv, stdu, NULL);
- if (unlikely(ret != 0))
- goto err_out;
- }
- vmw_stdu_unpin_display(stdu);
- stdu->display_srf = NULL;
- }
- }
-
-
- if (!new_fb) {
- /* Blanks the display */
- (void) vmw_stdu_update_st(dev_priv, stdu);
-
- return 0;
- }
+ if (!stdu->defined || !vmw_kms_crtc_flippable(dev_priv, crtc))
+ return -EINVAL;
- if (stdu->content_fb_type == SAME_AS_DISPLAY) {
- stdu->display_srf = vmw_framebuffer_to_vfbs(new_fb)->surface;
- ret = vmw_stdu_pin_display(stdu);
- if (ret) {
- stdu->display_srf = NULL;
- goto err_out;
- }
+ ret = vmw_stdu_bind_fb(dev_priv, crtc, &crtc->mode, new_fb);
+ if (ret)
+ return ret;
- /* Bind display surface */
- ret = vmw_stdu_bind_st(dev_priv, stdu, &stdu->display_srf->res);
- if (unlikely(ret != 0))
- goto err_unpin_display_and_content;
- }
+ if (stdu->base.is_implicit)
+ vmw_kms_update_implicit_fb(dev_priv, crtc);
- /* Update display surface: after this point everything is bound */
- ret = vmw_stdu_update_st(dev_priv, stdu);
- if (unlikely(ret != 0))
+ vclips.x = crtc->x;
+ vclips.y = crtc->y;
+ vclips.w = crtc->mode.hdisplay;
+ vclips.h = crtc->mode.vdisplay;
+ if (vfb->dmabuf)
+ ret = vmw_kms_stdu_dma(dev_priv, NULL, vfb, NULL, NULL, &vclips,
+ 1, 1, true, false);
+ else
+ ret = vmw_kms_stdu_surface_dirty(dev_priv, vfb, NULL, &vclips,
+ NULL, 0, 0, 1, 1, NULL);
+ if (ret)
return ret;
if (event) {
@@ -721,14 +699,7 @@ static int vmw_stdu_crtc_page_flip(struct drm_crtc *crtc,
vmw_fifo_flush(dev_priv, false);
}
- return ret;
-
-err_unpin_display_and_content:
- vmw_stdu_unpin_display(stdu);
-err_out:
- crtc->primary->fb = NULL;
- stdu->content_fb = NULL;
- return ret;
+ return 0;
}
@@ -1138,7 +1109,7 @@ static int vmw_stdu_init(struct vmw_private *dev_priv, unsigned unit)
stdu->base.pref_active = (unit == 0);
stdu->base.pref_width = dev_priv->initial_width;
stdu->base.pref_height = dev_priv->initial_height;
- stdu->base.is_implicit = true;
+ stdu->base.is_implicit = false;
drm_connector_init(dev, connector, &vmw_stdu_connector_funcs,
DRM_MODE_CONNECTOR_VIRTUAL);
@@ -1159,7 +1130,17 @@ static int vmw_stdu_init(struct vmw_private *dev_priv, unsigned unit)
drm_object_attach_property(&connector->base,
dev->mode_config.dirty_info_property,
1);
-
+ drm_object_attach_property(&connector->base,
+ dev_priv->hotplug_mode_update_property, 1);
+ drm_object_attach_property(&connector->base,
+ dev->mode_config.suggested_x_property, 0);
+ drm_object_attach_property(&connector->base,
+ dev->mode_config.suggested_y_property, 0);
+ if (dev_priv->implicit_placement_property)
+ drm_object_attach_property
+ (&connector->base,
+ dev_priv->implicit_placement_property,
+ stdu->base.is_implicit);
return 0;
}
@@ -1224,6 +1205,8 @@ int vmw_kms_stdu_init_display(struct vmw_private *dev_priv)
dev_priv->active_display_unit = vmw_du_screen_target;
+ vmw_kms_create_implicit_placement_property(dev_priv, false);
+
for (i = 0; i < VMWGFX_NUM_DISPLAY_UNITS; ++i) {
ret = vmw_stdu_init(dev_priv, i);
diff --git a/drivers/gpu/host1x/bus.c b/drivers/gpu/host1x/bus.c
index dd2dbb9746ce..c27858ae0552 100644
--- a/drivers/gpu/host1x/bus.c
+++ b/drivers/gpu/host1x/bus.c
@@ -83,8 +83,10 @@ static int host1x_device_parse_dt(struct host1x_device *device,
if (of_match_node(driver->subdevs, np) &&
of_device_is_available(np)) {
err = host1x_subdev_add(device, np);
- if (err < 0)
+ if (err < 0) {
+ of_node_put(np);
return err;
+ }
}
}
diff --git a/drivers/gpu/host1x/cdma.c b/drivers/gpu/host1x/cdma.c
index 5a8c8d55317a..a18db4d5347c 100644
--- a/drivers/gpu/host1x/cdma.c
+++ b/drivers/gpu/host1x/cdma.c
@@ -52,8 +52,8 @@ static void host1x_pushbuffer_destroy(struct push_buffer *pb)
struct host1x *host1x = cdma_to_host1x(cdma);
if (pb->phys != 0)
- dma_free_writecombine(host1x->dev, pb->size_bytes + 4,
- pb->mapped, pb->phys);
+ dma_free_wc(host1x->dev, pb->size_bytes + 4, pb->mapped,
+ pb->phys);
pb->mapped = NULL;
pb->phys = 0;
@@ -76,8 +76,8 @@ static int host1x_pushbuffer_init(struct push_buffer *pb)
pb->pos = 0;
/* allocate and map pushbuffer memory */
- pb->mapped = dma_alloc_writecombine(host1x->dev, pb->size_bytes + 4,
- &pb->phys, GFP_KERNEL);
+ pb->mapped = dma_alloc_wc(host1x->dev, pb->size_bytes + 4, &pb->phys,
+ GFP_KERNEL);
if (!pb->mapped)
goto fail;
diff --git a/drivers/gpu/host1x/job.c b/drivers/gpu/host1x/job.c
index 63bd63f3c7df..b4515d544039 100644
--- a/drivers/gpu/host1x/job.c
+++ b/drivers/gpu/host1x/job.c
@@ -225,7 +225,7 @@ unpin:
return 0;
}
-static unsigned int do_relocs(struct host1x_job *job, struct host1x_bo *cmdbuf)
+static int do_relocs(struct host1x_job *job, struct host1x_bo *cmdbuf)
{
int i = 0;
u32 last_page = ~0;
@@ -467,9 +467,8 @@ static inline int copy_gathers(struct host1x_job *job, struct device *dev)
size += g->words * sizeof(u32);
}
- job->gather_copy_mapped = dma_alloc_writecombine(dev, size,
- &job->gather_copy,
- GFP_KERNEL);
+ job->gather_copy_mapped = dma_alloc_wc(dev, size, &job->gather_copy,
+ GFP_KERNEL);
if (!job->gather_copy_mapped) {
job->gather_copy_mapped = NULL;
return -ENOMEM;
@@ -578,9 +577,8 @@ void host1x_job_unpin(struct host1x_job *job)
job->num_unpins = 0;
if (job->gather_copy_size)
- dma_free_writecombine(job->channel->dev, job->gather_copy_size,
- job->gather_copy_mapped,
- job->gather_copy);
+ dma_free_wc(job->channel->dev, job->gather_copy_size,
+ job->gather_copy_mapped, job->gather_copy);
}
EXPORT_SYMBOL(host1x_job_unpin);
diff --git a/drivers/gpu/ipu-v3/ipu-common.c b/drivers/gpu/ipu-v3/ipu-common.c
index f2e13eb8339f..abb98c77bad2 100644
--- a/drivers/gpu/ipu-v3/ipu-common.c
+++ b/drivers/gpu/ipu-v3/ipu-common.c
@@ -1050,6 +1050,17 @@ static int ipu_add_client_devices(struct ipu_soc *ipu, unsigned long ipu_base)
for (i = 0; i < ARRAY_SIZE(client_reg); i++) {
const struct ipu_platform_reg *reg = &client_reg[i];
struct platform_device *pdev;
+ struct device_node *of_node;
+
+ /* Associate subdevice with the corresponding port node */
+ of_node = of_graph_get_port_by_id(dev->of_node, i);
+ if (!of_node) {
+ dev_info(dev,
+ "no port@%d node in %s, not using %s%d\n",
+ i, dev->of_node->full_name,
+ (i / 2) ? "DI" : "CSI", i % 2);
+ continue;
+ }
pdev = platform_device_alloc(reg->name, id++);
if (!pdev) {
@@ -1059,15 +1070,6 @@ static int ipu_add_client_devices(struct ipu_soc *ipu, unsigned long ipu_base)
pdev->dev.parent = dev;
- /* Associate subdevice with the corresponding port node */
- pdev->dev.of_node = of_graph_get_port_by_id(dev->of_node, i);
- if (!pdev->dev.of_node) {
- dev_err(dev, "missing port@%d node in %s\n", i,
- dev->of_node->full_name);
- ret = -ENODEV;
- goto err_register;
- }
-
ret = platform_device_add_data(pdev, &reg->pdata,
sizeof(reg->pdata));
if (!ret)
@@ -1076,6 +1078,12 @@ static int ipu_add_client_devices(struct ipu_soc *ipu, unsigned long ipu_base)
platform_device_put(pdev);
goto err_register;
}
+
+ /*
+ * Set of_node only after calling platform_device_add. Otherwise
+ * the platform:imx-ipuv3-crtc modalias won't be used.
+ */
+ pdev->dev.of_node = of_node;
}
return 0;
@@ -1289,10 +1297,6 @@ static int ipu_probe(struct platform_device *pdev)
ipu->irq_sync = irq_sync;
ipu->irq_err = irq_err;
- ret = ipu_irq_init(ipu);
- if (ret)
- goto out_failed_irq;
-
ret = device_reset(&pdev->dev);
if (ret) {
dev_err(&pdev->dev, "failed to reset: %d\n", ret);
@@ -1302,6 +1306,10 @@ static int ipu_probe(struct platform_device *pdev)
if (ret)
goto out_failed_reset;
+ ret = ipu_irq_init(ipu);
+ if (ret)
+ goto out_failed_irq;
+
/* Set MCU_T to divide MCU access window into 2 */
ipu_cm_write(ipu, 0x00400000L | (IPU_MCU_T_DEFAULT << 18),
IPU_DISP_GEN);
@@ -1324,9 +1332,9 @@ static int ipu_probe(struct platform_device *pdev)
failed_add_clients:
ipu_submodules_exit(ipu);
failed_submodules_init:
-out_failed_reset:
ipu_irq_exit(ipu);
out_failed_irq:
+out_failed_reset:
clk_disable_unprepare(ipu->clk);
return ret;
}
diff --git a/drivers/gpu/ipu-v3/ipu-cpmem.c b/drivers/gpu/ipu-v3/ipu-cpmem.c
index 883a314cd83a..6494a4d28171 100644
--- a/drivers/gpu/ipu-v3/ipu-cpmem.c
+++ b/drivers/gpu/ipu-v3/ipu-cpmem.c
@@ -395,60 +395,48 @@ void ipu_cpmem_set_yuv_interleaved(struct ipuv3_channel *ch, u32 pixel_format)
EXPORT_SYMBOL_GPL(ipu_cpmem_set_yuv_interleaved);
void ipu_cpmem_set_yuv_planar_full(struct ipuv3_channel *ch,
- u32 pixel_format, int stride,
- int u_offset, int v_offset)
+ unsigned int uv_stride,
+ unsigned int u_offset, unsigned int v_offset)
{
- switch (pixel_format) {
- case V4L2_PIX_FMT_YUV420:
- case V4L2_PIX_FMT_YUV422P:
- ipu_ch_param_write_field(ch, IPU_FIELD_SLUV, (stride / 2) - 1);
- ipu_ch_param_write_field(ch, IPU_FIELD_UBO, u_offset / 8);
- ipu_ch_param_write_field(ch, IPU_FIELD_VBO, v_offset / 8);
- break;
- case V4L2_PIX_FMT_YVU420:
- ipu_ch_param_write_field(ch, IPU_FIELD_SLUV, (stride / 2) - 1);
- ipu_ch_param_write_field(ch, IPU_FIELD_UBO, v_offset / 8);
- ipu_ch_param_write_field(ch, IPU_FIELD_VBO, u_offset / 8);
- break;
- case V4L2_PIX_FMT_NV12:
- case V4L2_PIX_FMT_NV16:
- ipu_ch_param_write_field(ch, IPU_FIELD_SLUV, stride - 1);
- ipu_ch_param_write_field(ch, IPU_FIELD_UBO, u_offset / 8);
- ipu_ch_param_write_field(ch, IPU_FIELD_VBO, u_offset / 8);
- break;
- }
+ ipu_ch_param_write_field(ch, IPU_FIELD_SLUV, uv_stride - 1);
+ ipu_ch_param_write_field(ch, IPU_FIELD_UBO, u_offset / 8);
+ ipu_ch_param_write_field(ch, IPU_FIELD_VBO, v_offset / 8);
}
EXPORT_SYMBOL_GPL(ipu_cpmem_set_yuv_planar_full);
void ipu_cpmem_set_yuv_planar(struct ipuv3_channel *ch,
u32 pixel_format, int stride, int height)
{
- int u_offset, v_offset;
+ int fourcc, u_offset, v_offset;
int uv_stride = 0;
- switch (pixel_format) {
- case V4L2_PIX_FMT_YUV420:
- case V4L2_PIX_FMT_YVU420:
+ fourcc = v4l2_pix_fmt_to_drm_fourcc(pixel_format);
+ switch (fourcc) {
+ case DRM_FORMAT_YUV420:
uv_stride = stride / 2;
u_offset = stride * height;
v_offset = u_offset + (uv_stride * height / 2);
- ipu_cpmem_set_yuv_planar_full(ch, pixel_format, stride,
- u_offset, v_offset);
break;
- case V4L2_PIX_FMT_YUV422P:
+ case DRM_FORMAT_YVU420:
+ uv_stride = stride / 2;
+ v_offset = stride * height;
+ u_offset = v_offset + (uv_stride * height / 2);
+ break;
+ case DRM_FORMAT_YUV422:
uv_stride = stride / 2;
u_offset = stride * height;
v_offset = u_offset + (uv_stride * height);
- ipu_cpmem_set_yuv_planar_full(ch, pixel_format, stride,
- u_offset, v_offset);
break;
- case V4L2_PIX_FMT_NV12:
- case V4L2_PIX_FMT_NV16:
+ case DRM_FORMAT_NV12:
+ case DRM_FORMAT_NV16:
+ uv_stride = stride;
u_offset = stride * height;
- ipu_cpmem_set_yuv_planar_full(ch, pixel_format, stride,
- u_offset, 0);
+ v_offset = 0;
break;
+ default:
+ return;
}
+ ipu_cpmem_set_yuv_planar_full(ch, uv_stride, u_offset, v_offset);
}
EXPORT_SYMBOL_GPL(ipu_cpmem_set_yuv_planar);
@@ -684,17 +672,25 @@ int ipu_cpmem_set_image(struct ipuv3_channel *ch, struct ipu_image *image)
switch (pix->pixelformat) {
case V4L2_PIX_FMT_YUV420:
- case V4L2_PIX_FMT_YVU420:
offset = Y_OFFSET(pix, image->rect.left, image->rect.top);
u_offset = U_OFFSET(pix, image->rect.left,
image->rect.top) - offset;
v_offset = V_OFFSET(pix, image->rect.left,
image->rect.top) - offset;
- ipu_cpmem_set_yuv_planar_full(ch, pix->pixelformat,
- pix->bytesperline,
+ ipu_cpmem_set_yuv_planar_full(ch, pix->bytesperline / 2,
u_offset, v_offset);
break;
+ case V4L2_PIX_FMT_YVU420:
+ offset = Y_OFFSET(pix, image->rect.left, image->rect.top);
+ u_offset = U_OFFSET(pix, image->rect.left,
+ image->rect.top) - offset;
+ v_offset = V_OFFSET(pix, image->rect.left,
+ image->rect.top) - offset;
+
+ ipu_cpmem_set_yuv_planar_full(ch, pix->bytesperline / 2,
+ v_offset, u_offset);
+ break;
case V4L2_PIX_FMT_YUV422P:
offset = Y_OFFSET(pix, image->rect.left, image->rect.top);
u_offset = U2_OFFSET(pix, image->rect.left,
@@ -702,8 +698,7 @@ int ipu_cpmem_set_image(struct ipuv3_channel *ch, struct ipu_image *image)
v_offset = V2_OFFSET(pix, image->rect.left,
image->rect.top) - offset;
- ipu_cpmem_set_yuv_planar_full(ch, pix->pixelformat,
- pix->bytesperline,
+ ipu_cpmem_set_yuv_planar_full(ch, pix->bytesperline / 2,
u_offset, v_offset);
break;
case V4L2_PIX_FMT_NV12:
@@ -712,8 +707,7 @@ int ipu_cpmem_set_image(struct ipuv3_channel *ch, struct ipu_image *image)
image->rect.top) - offset;
v_offset = 0;
- ipu_cpmem_set_yuv_planar_full(ch, pix->pixelformat,
- pix->bytesperline,
+ ipu_cpmem_set_yuv_planar_full(ch, pix->bytesperline,
u_offset, v_offset);
break;
case V4L2_PIX_FMT_NV16:
@@ -722,8 +716,7 @@ int ipu_cpmem_set_image(struct ipuv3_channel *ch, struct ipu_image *image)
image->rect.top) - offset;
v_offset = 0;
- ipu_cpmem_set_yuv_planar_full(ch, pix->pixelformat,
- pix->bytesperline,
+ ipu_cpmem_set_yuv_planar_full(ch, pix->bytesperline,
u_offset, v_offset);
break;
case V4L2_PIX_FMT_UYVY:
diff --git a/drivers/gpu/ipu-v3/ipu-dc.c b/drivers/gpu/ipu-v3/ipu-dc.c
index d3ad5347342c..2f29780e7c68 100644
--- a/drivers/gpu/ipu-v3/ipu-dc.c
+++ b/drivers/gpu/ipu-v3/ipu-dc.c
@@ -171,6 +171,7 @@ int ipu_dc_init_sync(struct ipu_dc *dc, struct ipu_di *di, bool interlaced,
u32 bus_format, u32 width)
{
struct ipu_dc_priv *priv = dc->priv;
+ int addr, sync;
u32 reg = 0;
int map;
@@ -182,41 +183,39 @@ int ipu_dc_init_sync(struct ipu_dc *dc, struct ipu_di *di, bool interlaced,
return map;
}
- if (interlaced) {
- int addr;
-
- if (dc->di)
- addr = 1;
- else
- addr = 0;
+ /*
+ * In interlaced mode we need more counters to create the asymmetric
+ * per-field VSYNC signals. The pixel active signal synchronising DC
+ * to DI moves to signal generator #6 (see ipu-di.c). In progressive
+ * mode counter #5 is used.
+ */
+ sync = interlaced ? 6 : 5;
+
+ /* Reserve 5 microcode template words for each DI */
+ if (dc->di)
+ addr = 5;
+ else
+ addr = 0;
+ if (interlaced) {
dc_link_event(dc, DC_EVT_NL, addr, 3);
dc_link_event(dc, DC_EVT_EOL, addr, 2);
dc_link_event(dc, DC_EVT_NEW_DATA, addr, 1);
/* Init template microcode */
- dc_write_tmpl(dc, addr, WROD(0), 0, map, SYNC_WAVE, 0, 6, 1);
+ dc_write_tmpl(dc, addr, WROD(0), 0, map, SYNC_WAVE, 0, sync, 1);
} else {
- if (dc->di) {
- dc_link_event(dc, DC_EVT_NL, 2, 3);
- dc_link_event(dc, DC_EVT_EOL, 3, 2);
- dc_link_event(dc, DC_EVT_NEW_DATA, 1, 1);
- /* Init template microcode */
- dc_write_tmpl(dc, 2, WROD(0), 0, map, SYNC_WAVE, 8, 5, 1);
- dc_write_tmpl(dc, 3, WROD(0), 0, map, SYNC_WAVE, 4, 5, 0);
- dc_write_tmpl(dc, 4, WRG, 0, map, NULL_WAVE, 0, 0, 1);
- dc_write_tmpl(dc, 1, WROD(0), 0, map, SYNC_WAVE, 0, 5, 1);
- } else {
- dc_link_event(dc, DC_EVT_NL, 5, 3);
- dc_link_event(dc, DC_EVT_EOL, 6, 2);
- dc_link_event(dc, DC_EVT_NEW_DATA, 8, 1);
- /* Init template microcode */
- dc_write_tmpl(dc, 5, WROD(0), 0, map, SYNC_WAVE, 8, 5, 1);
- dc_write_tmpl(dc, 6, WROD(0), 0, map, SYNC_WAVE, 4, 5, 0);
- dc_write_tmpl(dc, 7, WRG, 0, map, NULL_WAVE, 0, 0, 1);
- dc_write_tmpl(dc, 8, WROD(0), 0, map, SYNC_WAVE, 0, 5, 1);
- }
+ dc_link_event(dc, DC_EVT_NL, addr + 2, 3);
+ dc_link_event(dc, DC_EVT_EOL, addr + 3, 2);
+ dc_link_event(dc, DC_EVT_NEW_DATA, addr + 1, 1);
+
+ /* Init template microcode */
+ dc_write_tmpl(dc, addr + 2, WROD(0), 0, map, SYNC_WAVE, 8, sync, 1);
+ dc_write_tmpl(dc, addr + 3, WROD(0), 0, map, SYNC_WAVE, 4, sync, 0);
+ dc_write_tmpl(dc, addr + 4, WRG, 0, map, NULL_WAVE, 0, 0, 1);
+ dc_write_tmpl(dc, addr + 1, WROD(0), 0, map, SYNC_WAVE, 0, sync, 1);
}
+
dc_link_event(dc, DC_EVT_NF, 0, 0);
dc_link_event(dc, DC_EVT_NFIELD, 0, 0);
dc_link_event(dc, DC_EVT_EOF, 0, 0);
diff --git a/drivers/gpu/ipu-v3/ipu-dmfc.c b/drivers/gpu/ipu-v3/ipu-dmfc.c
index 042c3958e2a0..837b1ec22800 100644
--- a/drivers/gpu/ipu-v3/ipu-dmfc.c
+++ b/drivers/gpu/ipu-v3/ipu-dmfc.c
@@ -350,11 +350,13 @@ out:
}
EXPORT_SYMBOL_GPL(ipu_dmfc_alloc_bandwidth);
-int ipu_dmfc_init_channel(struct dmfc_channel *dmfc, int width)
+void ipu_dmfc_config_wait4eot(struct dmfc_channel *dmfc, int width)
{
struct ipu_dmfc_priv *priv = dmfc->priv;
u32 dmfc_gen1;
+ mutex_lock(&priv->mutex);
+
dmfc_gen1 = readl(priv->base + DMFC_GENERAL1);
if ((dmfc->slots * 64 * 4) / width > dmfc->data->max_fifo_lines)
@@ -364,9 +366,9 @@ int ipu_dmfc_init_channel(struct dmfc_channel *dmfc, int width)
writel(dmfc_gen1, priv->base + DMFC_GENERAL1);
- return 0;
+ mutex_unlock(&priv->mutex);
}
-EXPORT_SYMBOL_GPL(ipu_dmfc_init_channel);
+EXPORT_SYMBOL_GPL(ipu_dmfc_config_wait4eot);
struct dmfc_channel *ipu_dmfc_get(struct ipu_soc *ipu, int ipu_channel)
{
diff --git a/drivers/gpu/vga/vga_switcheroo.c b/drivers/gpu/vga/vga_switcheroo.c
index 665ab9fd0e01..cbd7c986d926 100644
--- a/drivers/gpu/vga/vga_switcheroo.c
+++ b/drivers/gpu/vga/vga_switcheroo.c
@@ -74,9 +74,17 @@
* there can thus be up to three clients: Two vga clients (GPUs) and one audio
* client (on the discrete GPU). The code is mostly prepared to support
* machines with more than two GPUs should they become available.
+ *
* The GPU to which the outputs are currently switched is called the
* active client in vga_switcheroo parlance. The GPU not in use is the
- * inactive client.
+ * inactive client. When the inactive client's DRM driver is loaded,
+ * it will be unable to probe the panel's EDID and hence depends on
+ * VBIOS to provide its display modes. If the VBIOS modes are bogus or
+ * if there is no VBIOS at all (which is common on the MacBook Pro),
+ * a client may alternatively request that the DDC lines are temporarily
+ * switched to it, provided that the handler supports this. Switching
+ * only the DDC lines and not the entire output avoids unnecessary
+ * flickering.
*/
/**
@@ -126,6 +134,10 @@ static DEFINE_MUTEX(vgasr_mutex);
* (counting only vga clients, not audio clients)
* @clients: list of registered clients
* @handler: registered handler
+ * @handler_flags: flags of registered handler
+ * @mux_hw_lock: protects mux state
+ * (in particular while DDC lines are temporarily switched)
+ * @old_ddc_owner: client to which DDC lines will be switched back on unlock
*
* vga_switcheroo private data. Currently only one vga_switcheroo instance
* per system is supported.
@@ -142,6 +154,9 @@ struct vgasr_priv {
struct list_head clients;
const struct vga_switcheroo_handler *handler;
+ enum vga_switcheroo_handler_flags_t handler_flags;
+ struct mutex mux_hw_lock;
+ int old_ddc_owner;
};
#define ID_BIT_AUDIO 0x100
@@ -156,6 +171,7 @@ static void vga_switcheroo_debugfs_fini(struct vgasr_priv *priv);
/* only one switcheroo per system */
static struct vgasr_priv vgasr_priv = {
.clients = LIST_HEAD_INIT(vgasr_priv.clients),
+ .mux_hw_lock = __MUTEX_INITIALIZER(vgasr_priv.mux_hw_lock),
};
static bool vga_switcheroo_ready(void)
@@ -190,13 +206,15 @@ static void vga_switcheroo_enable(void)
/**
* vga_switcheroo_register_handler() - register handler
* @handler: handler callbacks
+ * @handler_flags: handler flags
*
* Register handler. Enable vga_switcheroo if two vga clients have already
* registered.
*
* Return: 0 on success, -EINVAL if a handler was already registered.
*/
-int vga_switcheroo_register_handler(const struct vga_switcheroo_handler *handler)
+int vga_switcheroo_register_handler(const struct vga_switcheroo_handler *handler,
+ enum vga_switcheroo_handler_flags_t handler_flags)
{
mutex_lock(&vgasr_mutex);
if (vgasr_priv.handler) {
@@ -205,6 +223,7 @@ int vga_switcheroo_register_handler(const struct vga_switcheroo_handler *handler
}
vgasr_priv.handler = handler;
+ vgasr_priv.handler_flags = handler_flags;
if (vga_switcheroo_ready()) {
pr_info("enabled\n");
vga_switcheroo_enable();
@@ -222,16 +241,33 @@ EXPORT_SYMBOL(vga_switcheroo_register_handler);
void vga_switcheroo_unregister_handler(void)
{
mutex_lock(&vgasr_mutex);
+ mutex_lock(&vgasr_priv.mux_hw_lock);
+ vgasr_priv.handler_flags = 0;
vgasr_priv.handler = NULL;
if (vgasr_priv.active) {
pr_info("disabled\n");
vga_switcheroo_debugfs_fini(&vgasr_priv);
vgasr_priv.active = false;
}
+ mutex_unlock(&vgasr_priv.mux_hw_lock);
mutex_unlock(&vgasr_mutex);
}
EXPORT_SYMBOL(vga_switcheroo_unregister_handler);
+/**
+ * vga_switcheroo_handler_flags() - obtain handler flags
+ *
+ * Helper for clients to obtain the handler flags bitmask.
+ *
+ * Return: Handler flags. A value of 0 means that no handler is registered
+ * or that the handler has no special capabilities.
+ */
+enum vga_switcheroo_handler_flags_t vga_switcheroo_handler_flags(void)
+{
+ return vgasr_priv.handler_flags;
+}
+EXPORT_SYMBOL(vga_switcheroo_handler_flags);
+
static int register_client(struct pci_dev *pdev,
const struct vga_switcheroo_client_ops *ops,
enum vga_switcheroo_client_id id, bool active,
@@ -413,6 +449,76 @@ void vga_switcheroo_client_fb_set(struct pci_dev *pdev,
EXPORT_SYMBOL(vga_switcheroo_client_fb_set);
/**
+ * vga_switcheroo_lock_ddc() - temporarily switch DDC lines to a given client
+ * @pdev: client pci device
+ *
+ * Temporarily switch DDC lines to the client identified by @pdev
+ * (but leave the outputs otherwise switched to where they are).
+ * This allows the inactive client to probe EDID. The DDC lines must
+ * afterwards be switched back by calling vga_switcheroo_unlock_ddc(),
+ * even if this function returns an error.
+ *
+ * Return: Previous DDC owner on success or a negative int on error.
+ * Specifically, %-ENODEV if no handler has registered or if the handler
+ * does not support switching the DDC lines. Also, a negative value
+ * returned by the handler is propagated back to the caller.
+ * The return value has merely an informational purpose for any caller
+ * which might be interested in it. It is acceptable to ignore the return
+ * value and simply rely on the result of the subsequent EDID probe,
+ * which will be %NULL if DDC switching failed.
+ */
+int vga_switcheroo_lock_ddc(struct pci_dev *pdev)
+{
+ enum vga_switcheroo_client_id id;
+
+ mutex_lock(&vgasr_priv.mux_hw_lock);
+ if (!vgasr_priv.handler || !vgasr_priv.handler->switch_ddc) {
+ vgasr_priv.old_ddc_owner = -ENODEV;
+ return -ENODEV;
+ }
+
+ id = vgasr_priv.handler->get_client_id(pdev);
+ vgasr_priv.old_ddc_owner = vgasr_priv.handler->switch_ddc(id);
+ return vgasr_priv.old_ddc_owner;
+}
+EXPORT_SYMBOL(vga_switcheroo_lock_ddc);
+
+/**
+ * vga_switcheroo_unlock_ddc() - switch DDC lines back to previous owner
+ * @pdev: client pci device
+ *
+ * Switch DDC lines back to the previous owner after calling
+ * vga_switcheroo_lock_ddc(). This must be called even if
+ * vga_switcheroo_lock_ddc() returned an error.
+ *
+ * Return: Previous DDC owner on success (i.e. the client identifier of @pdev)
+ * or a negative int on error.
+ * Specifically, %-ENODEV if no handler has registered or if the handler
+ * does not support switching the DDC lines. Also, a negative value
+ * returned by the handler is propagated back to the caller.
+ * Finally, invoking this function without calling vga_switcheroo_lock_ddc()
+ * first is not allowed and will result in %-EINVAL.
+ */
+int vga_switcheroo_unlock_ddc(struct pci_dev *pdev)
+{
+ enum vga_switcheroo_client_id id;
+ int ret = vgasr_priv.old_ddc_owner;
+
+ if (WARN_ON_ONCE(!mutex_is_locked(&vgasr_priv.mux_hw_lock)))
+ return -EINVAL;
+
+ if (vgasr_priv.old_ddc_owner >= 0) {
+ id = vgasr_priv.handler->get_client_id(pdev);
+ if (vgasr_priv.old_ddc_owner != id)
+ ret = vgasr_priv.handler->switch_ddc(
+ vgasr_priv.old_ddc_owner);
+ }
+ mutex_unlock(&vgasr_priv.mux_hw_lock);
+ return ret;
+}
+EXPORT_SYMBOL(vga_switcheroo_unlock_ddc);
+
+/**
* DOC: Manual switching and manual power control
*
* In this mode of use, the file /sys/kernel/debug/vgaswitcheroo/switch
@@ -549,7 +655,9 @@ static int vga_switchto_stage2(struct vga_switcheroo_client *new_client)
console_unlock();
}
+ mutex_lock(&vgasr_priv.mux_hw_lock);
ret = vgasr_priv.handler->switchto(new_client->id);
+ mutex_unlock(&vgasr_priv.mux_hw_lock);
if (ret)
return ret;
@@ -664,7 +772,9 @@ vga_switcheroo_debugfs_write(struct file *filp, const char __user *ubuf,
vgasr_priv.delayed_switch_active = false;
if (just_mux) {
+ mutex_lock(&vgasr_priv.mux_hw_lock);
ret = vgasr_priv.handler->switchto(client_id);
+ mutex_unlock(&vgasr_priv.mux_hw_lock);
goto out;
}
@@ -876,8 +986,11 @@ static int vga_switcheroo_runtime_suspend(struct device *dev)
if (ret)
return ret;
mutex_lock(&vgasr_mutex);
- if (vgasr_priv.handler->switchto)
+ if (vgasr_priv.handler->switchto) {
+ mutex_lock(&vgasr_priv.mux_hw_lock);
vgasr_priv.handler->switchto(VGA_SWITCHEROO_IGD);
+ mutex_unlock(&vgasr_priv.mux_hw_lock);
+ }
vga_switcheroo_power_switch(pdev, VGA_SWITCHEROO_OFF);
mutex_unlock(&vgasr_mutex);
return 0;
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index 513a16cc6e18..411722570035 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -196,6 +196,12 @@ config HID_PRODIKEYS
multimedia keyboard, but will lack support for the musical keyboard
and some additional multimedia keys.
+config HID_CMEDIA
+ tristate "CMedia CM6533 HID audio jack controls"
+ depends on HID
+ ---help---
+ Support for CMedia CM6533 HID audio jack controls.
+
config HID_CP2112
tristate "Silicon Labs CP2112 HID USB-to-SMBus Bridge support"
depends on USB_HID && I2C && GPIOLIB
diff --git a/drivers/hid/Makefile b/drivers/hid/Makefile
index 00011fee08b9..be56ab6f75a8 100644
--- a/drivers/hid/Makefile
+++ b/drivers/hid/Makefile
@@ -29,6 +29,7 @@ obj-$(CONFIG_HID_BELKIN) += hid-belkin.o
obj-$(CONFIG_HID_BETOP_FF) += hid-betopff.o
obj-$(CONFIG_HID_CHERRY) += hid-cherry.o
obj-$(CONFIG_HID_CHICONY) += hid-chicony.o
+obj-$(CONFIG_HID_CMEDIA) += hid-cmedia.o
obj-$(CONFIG_HID_CORSAIR) += hid-corsair.o
obj-$(CONFIG_HID_CP2112) += hid-cp2112.o
obj-$(CONFIG_HID_CYPRESS) += hid-cypress.o
diff --git a/drivers/hid/hid-cmedia.c b/drivers/hid/hid-cmedia.c
new file mode 100644
index 000000000000..7230f8513681
--- /dev/null
+++ b/drivers/hid/hid-cmedia.c
@@ -0,0 +1,168 @@
+/*
+ * HID driver for CMedia CM6533 audio jack controls
+ *
+ * Copyright (C) 2015 Ben Chen <ben_chen@bizlinktech.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/device.h>
+#include <linux/hid.h>
+#include <linux/module.h>
+#include "hid-ids.h"
+
+MODULE_AUTHOR("Ben Chen");
+MODULE_DESCRIPTION("CM6533 HID jack controls");
+MODULE_LICENSE("GPL");
+
+#define CM6533_JD_TYPE_COUNT 1
+#define CM6533_JD_RAWEV_LEN 16
+#define CM6533_JD_SFX_OFFSET 8
+
+/*
+*
+*CM6533 audio jack HID raw events:
+*
+*Plug in:
+*01000600 002083xx 080008c0 10000000
+*about 3 seconds later...
+*01000a00 002083xx 08000380 10000000
+*01000600 002083xx 08000380 10000000
+*
+*Plug out:
+*01000400 002083xx 080008c0 x0000000
+*/
+
+static const u8 ji_sfx[] = { 0x08, 0x00, 0x08, 0xc0 };
+static const u8 ji_in[] = { 0x01, 0x00, 0x06, 0x00 };
+static const u8 ji_out[] = { 0x01, 0x00, 0x04, 0x00 };
+
+static int jack_switch_types[CM6533_JD_TYPE_COUNT] = {
+ SW_HEADPHONE_INSERT,
+};
+
+struct cmhid {
+ struct input_dev *input_dev;
+ struct hid_device *hid;
+ unsigned short switch_map[CM6533_JD_TYPE_COUNT];
+};
+
+static void hp_ev(struct hid_device *hid, struct cmhid *cm, int value)
+{
+ input_report_switch(cm->input_dev, SW_HEADPHONE_INSERT, value);
+ input_sync(cm->input_dev);
+}
+
+static int cmhid_raw_event(struct hid_device *hid, struct hid_report *report,
+ u8 *data, int len)
+{
+ struct cmhid *cm = hid_get_drvdata(hid);
+
+ if (len != CM6533_JD_RAWEV_LEN)
+ goto out;
+ if (memcmp(data+CM6533_JD_SFX_OFFSET, ji_sfx, sizeof(ji_sfx)))
+ goto out;
+
+ if (!memcmp(data, ji_out, sizeof(ji_out))) {
+ hp_ev(hid, cm, 0);
+ goto out;
+ }
+ if (!memcmp(data, ji_in, sizeof(ji_in))) {
+ hp_ev(hid, cm, 1);
+ goto out;
+ }
+
+out:
+ return 0;
+}
+
+static int cmhid_input_configured(struct hid_device *hid,
+ struct hid_input *hidinput)
+{
+ struct input_dev *input_dev = hidinput->input;
+ struct cmhid *cm = hid_get_drvdata(hid);
+ int i;
+
+ cm->input_dev = input_dev;
+ memcpy(cm->switch_map, jack_switch_types, sizeof(cm->switch_map));
+ input_dev->evbit[0] = BIT(EV_SW);
+ for (i = 0; i < CM6533_JD_TYPE_COUNT; i++)
+ input_set_capability(cm->input_dev,
+ EV_SW, jack_switch_types[i]);
+ return 0;
+}
+
+static int cmhid_input_mapping(struct hid_device *hid,
+ struct hid_input *hi, struct hid_field *field,
+ struct hid_usage *usage, unsigned long **bit, int *max)
+{
+ return -1;
+}
+
+static int cmhid_probe(struct hid_device *hid, const struct hid_device_id *id)
+{
+ int ret;
+ struct cmhid *cm;
+
+ cm = kzalloc(sizeof(struct cmhid), GFP_KERNEL);
+ if (!cm) {
+ ret = -ENOMEM;
+ goto allocfail;
+ }
+
+ cm->hid = hid;
+
+ hid->quirks |= HID_QUIRK_HIDINPUT_FORCE;
+ hid_set_drvdata(hid, cm);
+
+ ret = hid_parse(hid);
+ if (ret) {
+ hid_err(hid, "parse failed\n");
+ goto fail;
+ }
+
+ ret = hid_hw_start(hid, HID_CONNECT_DEFAULT | HID_CONNECT_HIDDEV_FORCE);
+ if (ret) {
+ hid_err(hid, "hw start failed\n");
+ goto fail;
+ }
+
+ return 0;
+fail:
+ kfree(cm);
+allocfail:
+ return ret;
+}
+
+static void cmhid_remove(struct hid_device *hid)
+{
+ struct cmhid *cm = hid_get_drvdata(hid);
+
+ hid_hw_stop(hid);
+ kfree(cm);
+}
+
+static const struct hid_device_id cmhid_devices[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_CMEDIA, USB_DEVICE_ID_CM6533) },
+ { }
+};
+MODULE_DEVICE_TABLE(hid, cmhid_devices);
+
+static struct hid_driver cmhid_driver = {
+ .name = "cm6533_jd",
+ .id_table = cmhid_devices,
+ .raw_event = cmhid_raw_event,
+ .input_configured = cmhid_input_configured,
+ .probe = cmhid_probe,
+ .remove = cmhid_remove,
+ .input_mapping = cmhid_input_mapping,
+};
+module_hid_driver(cmhid_driver);
+
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 7e89288b1537..4f9c5c6deaed 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -1075,7 +1075,7 @@ static u32 s32ton(__s32 value, unsigned n)
* Extract/implement a data field from/to a little endian report (bit array).
*
* Code sort-of follows HID spec:
- * http://www.usb.org/developers/devclass_docs/HID1_11.pdf
+ * http://www.usb.org/developers/hidpage/HID1_11.pdf
*
* While the USB HID spec allows unlimited length bit fields in "report
* descriptors", most devices never use more than 16 bits.
@@ -1083,20 +1083,37 @@ static u32 s32ton(__s32 value, unsigned n)
* Search linux-kernel and linux-usb-devel archives for "hid-core extract".
*/
-__u32 hid_field_extract(const struct hid_device *hid, __u8 *report,
- unsigned offset, unsigned n)
-{
- u64 x;
+static u32 __extract(u8 *report, unsigned offset, int n)
+{
+ unsigned int idx = offset / 8;
+ unsigned int bit_nr = 0;
+ unsigned int bit_shift = offset % 8;
+ int bits_to_copy = 8 - bit_shift;
+ u32 value = 0;
+ u32 mask = n < 32 ? (1U << n) - 1 : ~0U;
+
+ while (n > 0) {
+ value |= ((u32)report[idx] >> bit_shift) << bit_nr;
+ n -= bits_to_copy;
+ bit_nr += bits_to_copy;
+ bits_to_copy = 8;
+ bit_shift = 0;
+ idx++;
+ }
+
+ return value & mask;
+}
- if (n > 32)
+u32 hid_field_extract(const struct hid_device *hid, u8 *report,
+ unsigned offset, unsigned n)
+{
+ if (n > 32) {
hid_warn(hid, "hid_field_extract() called with n (%d) > 32! (%s)\n",
n, current->comm);
+ n = 32;
+ }
- report += offset >> 3; /* adjust byte index */
- offset &= 7; /* now only need bit offset into one byte */
- x = get_unaligned_le64(report);
- x = (x >> offset) & ((1ULL << n) - 1); /* extract bit field */
- return (u32) x;
+ return __extract(report, offset, n);
}
EXPORT_SYMBOL_GPL(hid_field_extract);
@@ -1106,31 +1123,56 @@ EXPORT_SYMBOL_GPL(hid_field_extract);
* The data mangled in the bit stream remains in little endian
* order the whole time. It make more sense to talk about
* endianness of register values by considering a register
- * a "cached" copy of the little endiad bit stream.
+ * a "cached" copy of the little endian bit stream.
*/
-static void implement(const struct hid_device *hid, __u8 *report,
- unsigned offset, unsigned n, __u32 value)
+
+static void __implement(u8 *report, unsigned offset, int n, u32 value)
+{
+ unsigned int idx = offset / 8;
+ unsigned int size = offset + n;
+ unsigned int bit_shift = offset % 8;
+ int bits_to_set = 8 - bit_shift;
+ u8 bit_mask = 0xff << bit_shift;
+
+ while (n - bits_to_set >= 0) {
+ report[idx] &= ~bit_mask;
+ report[idx] |= value << bit_shift;
+ value >>= bits_to_set;
+ n -= bits_to_set;
+ bits_to_set = 8;
+ bit_mask = 0xff;
+ bit_shift = 0;
+ idx++;
+ }
+
+ /* last nibble */
+ if (n) {
+ if (size % 8)
+ bit_mask &= (1U << (size % 8)) - 1;
+ report[idx] &= ~bit_mask;
+ report[idx] |= (value << bit_shift) & bit_mask;
+ }
+}
+
+static void implement(const struct hid_device *hid, u8 *report,
+ unsigned offset, unsigned n, u32 value)
{
- u64 x;
- u64 m = (1ULL << n) - 1;
+ u64 m;
- if (n > 32)
+ if (n > 32) {
hid_warn(hid, "%s() called with n (%d) > 32! (%s)\n",
__func__, n, current->comm);
+ n = 32;
+ }
+ m = (1ULL << n) - 1;
if (value > m)
hid_warn(hid, "%s() called with too large value %d! (%s)\n",
__func__, value, current->comm);
WARN_ON(value > m);
value &= m;
- report += offset >> 3;
- offset &= 7;
-
- x = get_unaligned_le64(report);
- x &= ~(m << offset);
- x |= ((u64)value) << offset;
- put_unaligned_le64(x, report);
+ __implement(report, offset, n, value);
}
/*
@@ -1251,6 +1293,7 @@ static void hid_input_field(struct hid_device *hid, struct hid_field *field,
/* Ignore report if ErrorRollOver */
if (!(field->flags & HID_MAIN_ITEM_VARIABLE) &&
value[n] >= min && value[n] <= max &&
+ value[n] - min < field->maxusage &&
field->usage[value[n] - min].hid == HID_UP_KEYBOARD + 1)
goto exit;
}
@@ -1263,11 +1306,13 @@ static void hid_input_field(struct hid_device *hid, struct hid_field *field,
}
if (field->value[n] >= min && field->value[n] <= max
+ && field->value[n] - min < field->maxusage
&& field->usage[field->value[n] - min].hid
&& search(value, field->value[n], count))
hid_process_event(hid, field, &field->usage[field->value[n] - min], 0, interrupt);
if (value[n] >= min && value[n] <= max
+ && value[n] - min < field->maxusage
&& field->usage[value[n] - min].hid
&& search(field->value, value[n], count))
hid_process_event(hid, field, &field->usage[value[n] - min], 1, interrupt);
@@ -1891,6 +1936,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_ELITE_KBD) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_CORDLESS_DESKTOP_LX500) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_EXTREME_3D) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_DUAL_ACTION) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WHEEL) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD_CORD) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD) },
@@ -1919,6 +1965,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICOLCD) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICOLCD_BOOTLOADER) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_COMFORT_MOUSE_4500) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_COMFORT_KEYBOARD) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_SIDEWINDER_GV) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_NE4K) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_NE4K_JP) },
@@ -1932,6 +1979,9 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_3_2) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_3_JP) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_3) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_7K) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_600) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_3KV1) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_POWER_COVER) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MONTEREY, USB_DEVICE_ID_GENIUS_KB29E) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MSI, USB_DEVICE_ID_MSI_GT683R_LED_PANEL) },
@@ -2003,6 +2053,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER) },
{ HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGX_MOUSE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGP_MOUSE) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SINO_LITE, USB_DEVICE_ID_SINO_LITE_CONTROLLER) },
{ HID_USB_DEVICE(USB_VENDOR_ID_STEELSERIES, USB_DEVICE_ID_STEELSERIES_SRWS1) },
{ HID_USB_DEVICE(USB_VENDOR_ID_SUNPLUS, USB_DEVICE_ID_SUNPLUS_WDESKTOP) },
{ HID_USB_DEVICE(USB_VENDOR_ID_THINGM, USB_DEVICE_ID_BLINK1) },
@@ -2051,6 +2102,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO, USB_DEVICE_ID_NINTENDO_WIIMOTE) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO, USB_DEVICE_ID_NINTENDO_WIIMOTE2) },
{ HID_USB_DEVICE(USB_VENDOR_ID_RAZER, USB_DEVICE_ID_RAZER_BLADE_14) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CMEDIA, USB_DEVICE_ID_CM6533) },
{ }
};
@@ -2615,9 +2667,10 @@ int hid_add_device(struct hid_device *hdev)
/*
* Scan generic devices for group information
*/
- if (hid_ignore_special_drivers ||
- (!hdev->group &&
- !hid_match_id(hdev, hid_have_special_driver))) {
+ if (hid_ignore_special_drivers) {
+ hdev->group = HID_GROUP_GENERIC;
+ } else if (!hdev->group &&
+ !hid_match_id(hdev, hid_have_special_driver)) {
ret = hid_scan_report(hdev);
if (ret)
hid_warn(hdev, "bad device descriptor (%d)\n", ret);
diff --git a/drivers/hid/hid-corsair.c b/drivers/hid/hid-corsair.c
index 58551964ce86..717704e9ae07 100644
--- a/drivers/hid/hid-corsair.c
+++ b/drivers/hid/hid-corsair.c
@@ -595,6 +595,9 @@ static int corsair_input_mapping(struct hid_device *dev,
{
int gkey;
+ if ((usage->hid & HID_USAGE_PAGE) != HID_UP_KEYBOARD)
+ return 0;
+
gkey = corsair_usage_to_gkey(usage->hid & HID_USAGE);
if (gkey != 0) {
hid_map_usage_clear(input, usage, bit, max, EV_KEY,
diff --git a/drivers/hid/hid-dr.c b/drivers/hid/hid-dr.c
index 1d78ba3b799e..8fd4bf77f264 100644
--- a/drivers/hid/hid-dr.c
+++ b/drivers/hid/hid-dr.c
@@ -151,7 +151,7 @@ static inline int drff_init(struct hid_device *hid)
* descriptor. In any case, it's a wonder it works on Windows.
*
* Usage Page (Desktop), ; Generic desktop controls (01h)
- * Usage (Joystik), ; Joystik (04h, application collection)
+ * Usage (Joystick), ; Joystick (04h, application collection)
* Collection (Application),
* Collection (Logical),
* Report Size (8),
@@ -207,7 +207,7 @@ static inline int drff_init(struct hid_device *hid)
/* Fixed report descriptor for PID 0x011 joystick */
static __u8 pid0011_rdesc_fixed[] = {
0x05, 0x01, /* Usage Page (Desktop), */
- 0x09, 0x04, /* Usage (Joystik), */
+ 0x09, 0x04, /* Usage (Joystick), */
0xA1, 0x01, /* Collection (Application), */
0xA1, 0x02, /* Collection (Logical), */
0x14, /* Logical Minimum (0), */
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index b6ff6e78ac54..0238f0169e48 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -61,6 +61,9 @@
#define USB_VENDOR_ID_AIREN 0x1a2c
#define USB_DEVICE_ID_AIREN_SLIMPLUS 0x0002
+#define USB_VENDOR_ID_AKAI 0x2011
+#define USB_DEVICE_ID_AKAI_MPKMINI2 0x0715
+
#define USB_VENDOR_ID_ALCOR 0x058f
#define USB_DEVICE_ID_ALCOR_USBRS232 0x9720
@@ -246,6 +249,7 @@
#define USB_VENDOR_ID_CMEDIA 0x0d8c
#define USB_DEVICE_ID_CM109 0x000e
+#define USB_DEVICE_ID_CM6533 0x0022
#define USB_VENDOR_ID_CODEMERCS 0x07c0
#define USB_DEVICE_ID_CODEMERCS_IOW_FIRST 0x1500
@@ -255,6 +259,7 @@
#define USB_DEVICE_ID_CORSAIR_K90 0x1b02
#define USB_VENDOR_ID_CREATIVELABS 0x041e
+#define USB_DEVICE_ID_CREATIVE_SB_OMNI_SURROUND_51 0x322c
#define USB_DEVICE_ID_PRODIKEYS_PCMIDI 0x2801
#define USB_VENDOR_ID_CVTOUCH 0x1ff7
@@ -672,6 +677,7 @@
#define USB_DEVICE_ID_SIDEWINDER_GV 0x003b
#define USB_DEVICE_ID_MS_OFFICE_KB 0x0048
#define USB_DEVICE_ID_WIRELESS_OPTICAL_DESKTOP_3_0 0x009d
+#define USB_DEVICE_ID_MS_DIGITAL_MEDIA_7K 0x00b4
#define USB_DEVICE_ID_MS_NE4K 0x00db
#define USB_DEVICE_ID_MS_NE4K_JP 0x00dc
#define USB_DEVICE_ID_MS_LK6K 0x00f9
@@ -679,7 +685,10 @@
#define USB_DEVICE_ID_MS_PRESENTER_8K_USB 0x0713
#define USB_DEVICE_ID_MS_NE7K 0x071d
#define USB_DEVICE_ID_MS_DIGITAL_MEDIA_3K 0x0730
+#define USB_DEVICE_ID_MS_DIGITAL_MEDIA_3KV1 0x0732
+#define USB_DEVICE_ID_MS_DIGITAL_MEDIA_600 0x0750
#define USB_DEVICE_ID_MS_COMFORT_MOUSE_4500 0x076c
+#define USB_DEVICE_ID_MS_COMFORT_KEYBOARD 0x00e3
#define USB_DEVICE_ID_MS_SURFACE_PRO_2 0x0799
#define USB_DEVICE_ID_MS_TOUCH_COVER_2 0x07a7
#define USB_DEVICE_ID_MS_TYPE_COVER_2 0x07a9
@@ -800,6 +809,7 @@
#define USB_VENDOR_ID_QUANTA 0x0408
#define USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH 0x3000
#define USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3001 0x3001
+#define USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3003 0x3003
#define USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3008 0x3008
#define USB_VENDOR_ID_RAZER 0x1532
@@ -839,6 +849,7 @@
#define USB_VENDOR_ID_SEMICO 0x1a2c
#define USB_DEVICE_ID_SEMICO_USB_KEYKOARD 0x0023
+#define USB_DEVICE_ID_SEMICO_USB_KEYKOARD2 0x0027
#define USB_VENDOR_ID_SENNHEISER 0x1395
#define USB_DEVICE_ID_SENNHEISER_BTD500USB 0x002c
@@ -872,6 +883,9 @@
#define USB_DEVICE_ID_SONY_BUZZ_CONTROLLER 0x0002
#define USB_DEVICE_ID_SONY_WIRELESS_BUZZ_CONTROLLER 0x1000
+#define USB_VENDOR_ID_SINO_LITE 0x1345
+#define USB_DEVICE_ID_SINO_LITE_CONTROLLER 0x3008
+
#define USB_VENDOR_ID_SOUNDGRAPH 0x15c2
#define USB_DEVICE_ID_SOUNDGRAPH_IMON_FIRST 0x0034
#define USB_DEVICE_ID_SOUNDGRAPH_IMON_LAST 0x0046
@@ -1047,7 +1061,7 @@
#define USB_DEVICE_ID_RI_KA_WEBMAIL 0x1320 /* Webmail Notifier */
#define USB_VENDOR_ID_MULTIPLE_1781 0x1781
-#define USB_DEVICE_ID_RAPHNET_4NES4SNES_OLD 0x0a8d
+#define USB_DEVICE_ID_RAPHNET_4NES4SNES_OLD 0x0a9d
#define USB_VENDOR_ID_DRACAL_RAPHNET 0x289b
#define USB_DEVICE_ID_RAPHNET_2NES2SNES 0x0002
diff --git a/drivers/hid/hid-lenovo.c b/drivers/hid/hid-lenovo.c
index 0125e356bd8d..1ac4ff4d57a6 100644
--- a/drivers/hid/hid-lenovo.c
+++ b/drivers/hid/hid-lenovo.c
@@ -184,21 +184,31 @@ static int lenovo_send_cmd_cptkbd(struct hid_device *hdev,
unsigned char byte2, unsigned char byte3)
{
int ret;
- unsigned char buf[] = {0x18, byte2, byte3};
+ unsigned char *buf;
+
+ buf = kzalloc(3, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ buf[0] = 0x18;
+ buf[1] = byte2;
+ buf[2] = byte3;
switch (hdev->product) {
case USB_DEVICE_ID_LENOVO_CUSBKBD:
- ret = hid_hw_raw_request(hdev, 0x13, buf, sizeof(buf),
+ ret = hid_hw_raw_request(hdev, 0x13, buf, 3,
HID_FEATURE_REPORT, HID_REQ_SET_REPORT);
break;
case USB_DEVICE_ID_LENOVO_CBTKBD:
- ret = hid_hw_output_report(hdev, buf, sizeof(buf));
+ ret = hid_hw_output_report(hdev, buf, 3);
break;
default:
ret = -EINVAL;
break;
}
+ kfree(buf);
+
return ret < 0 ? ret : 0; /* BT returns 0, USB returns sizeof(buf) */
}
diff --git a/drivers/hid/hid-lg.c b/drivers/hid/hid-lg.c
index c690fae02cf8..feb2be71f77c 100644
--- a/drivers/hid/hid-lg.c
+++ b/drivers/hid/hid-lg.c
@@ -61,7 +61,7 @@
*/
static __u8 df_rdesc_fixed[] = {
0x05, 0x01, /* Usage Page (Desktop), */
-0x09, 0x04, /* Usage (Joystik), */
+0x09, 0x04, /* Usage (Joystick), */
0xA1, 0x01, /* Collection (Application), */
0xA1, 0x02, /* Collection (Logical), */
0x95, 0x01, /* Report Count (1), */
@@ -127,7 +127,7 @@ static __u8 df_rdesc_fixed[] = {
static __u8 dfp_rdesc_fixed[] = {
0x05, 0x01, /* Usage Page (Desktop), */
-0x09, 0x04, /* Usage (Joystik), */
+0x09, 0x04, /* Usage (Joystick), */
0xA1, 0x01, /* Collection (Application), */
0xA1, 0x02, /* Collection (Logical), */
0x95, 0x01, /* Report Count (1), */
@@ -175,7 +175,7 @@ static __u8 dfp_rdesc_fixed[] = {
static __u8 fv_rdesc_fixed[] = {
0x05, 0x01, /* Usage Page (Desktop), */
-0x09, 0x04, /* Usage (Joystik), */
+0x09, 0x04, /* Usage (Joystick), */
0xA1, 0x01, /* Collection (Application), */
0xA1, 0x02, /* Collection (Logical), */
0x95, 0x01, /* Report Count (1), */
@@ -242,7 +242,7 @@ static __u8 fv_rdesc_fixed[] = {
static __u8 momo_rdesc_fixed[] = {
0x05, 0x01, /* Usage Page (Desktop), */
-0x09, 0x04, /* Usage (Joystik), */
+0x09, 0x04, /* Usage (Joystick), */
0xA1, 0x01, /* Collection (Application), */
0xA1, 0x02, /* Collection (Logical), */
0x95, 0x01, /* Report Count (1), */
@@ -288,7 +288,7 @@ static __u8 momo_rdesc_fixed[] = {
static __u8 momo2_rdesc_fixed[] = {
0x05, 0x01, /* Usage Page (Desktop), */
-0x09, 0x04, /* Usage (Joystik), */
+0x09, 0x04, /* Usage (Joystick), */
0xA1, 0x01, /* Collection (Application), */
0xA1, 0x02, /* Collection (Logical), */
0x95, 0x01, /* Report Count (1), */
diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c
index bd2ab476c65e..2e2515a4c070 100644
--- a/drivers/hid/hid-logitech-hidpp.c
+++ b/drivers/hid/hid-logitech-hidpp.c
@@ -15,13 +15,19 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/device.h>
+#include <linux/input.h>
+#include <linux/usb.h>
#include <linux/hid.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/sched.h>
#include <linux/kfifo.h>
#include <linux/input/mt.h>
+#include <linux/workqueue.h>
+#include <linux/atomic.h>
+#include <linux/fixp-arith.h>
#include <asm/unaligned.h>
+#include "usbhid/usbhid.h"
#include "hid-ids.h"
MODULE_LICENSE("GPL");
@@ -773,6 +779,589 @@ static void hidpp_touchpad_raw_xy_event(struct hidpp_device *hidpp_dev,
}
}
+/* -------------------------------------------------------------------------- */
+/* 0x8123: Force feedback support */
+/* -------------------------------------------------------------------------- */
+
+#define HIDPP_FF_GET_INFO 0x01
+#define HIDPP_FF_RESET_ALL 0x11
+#define HIDPP_FF_DOWNLOAD_EFFECT 0x21
+#define HIDPP_FF_SET_EFFECT_STATE 0x31
+#define HIDPP_FF_DESTROY_EFFECT 0x41
+#define HIDPP_FF_GET_APERTURE 0x51
+#define HIDPP_FF_SET_APERTURE 0x61
+#define HIDPP_FF_GET_GLOBAL_GAINS 0x71
+#define HIDPP_FF_SET_GLOBAL_GAINS 0x81
+
+#define HIDPP_FF_EFFECT_STATE_GET 0x00
+#define HIDPP_FF_EFFECT_STATE_STOP 0x01
+#define HIDPP_FF_EFFECT_STATE_PLAY 0x02
+#define HIDPP_FF_EFFECT_STATE_PAUSE 0x03
+
+#define HIDPP_FF_EFFECT_CONSTANT 0x00
+#define HIDPP_FF_EFFECT_PERIODIC_SINE 0x01
+#define HIDPP_FF_EFFECT_PERIODIC_SQUARE 0x02
+#define HIDPP_FF_EFFECT_PERIODIC_TRIANGLE 0x03
+#define HIDPP_FF_EFFECT_PERIODIC_SAWTOOTHUP 0x04
+#define HIDPP_FF_EFFECT_PERIODIC_SAWTOOTHDOWN 0x05
+#define HIDPP_FF_EFFECT_SPRING 0x06
+#define HIDPP_FF_EFFECT_DAMPER 0x07
+#define HIDPP_FF_EFFECT_FRICTION 0x08
+#define HIDPP_FF_EFFECT_INERTIA 0x09
+#define HIDPP_FF_EFFECT_RAMP 0x0A
+
+#define HIDPP_FF_EFFECT_AUTOSTART 0x80
+
+#define HIDPP_FF_EFFECTID_NONE -1
+#define HIDPP_FF_EFFECTID_AUTOCENTER -2
+
+#define HIDPP_FF_MAX_PARAMS 20
+#define HIDPP_FF_RESERVED_SLOTS 1
+
+struct hidpp_ff_private_data {
+ struct hidpp_device *hidpp;
+ u8 feature_index;
+ u8 version;
+ u16 gain;
+ s16 range;
+ u8 slot_autocenter;
+ u8 num_effects;
+ int *effect_ids;
+ struct workqueue_struct *wq;
+ atomic_t workqueue_size;
+};
+
+struct hidpp_ff_work_data {
+ struct work_struct work;
+ struct hidpp_ff_private_data *data;
+ int effect_id;
+ u8 command;
+ u8 params[HIDPP_FF_MAX_PARAMS];
+ u8 size;
+};
+
+static const signed short hiddpp_ff_effects[] = {
+ FF_CONSTANT,
+ FF_PERIODIC,
+ FF_SINE,
+ FF_SQUARE,
+ FF_SAW_UP,
+ FF_SAW_DOWN,
+ FF_TRIANGLE,
+ FF_SPRING,
+ FF_DAMPER,
+ FF_AUTOCENTER,
+ FF_GAIN,
+ -1
+};
+
+static const signed short hiddpp_ff_effects_v2[] = {
+ FF_RAMP,
+ FF_FRICTION,
+ FF_INERTIA,
+ -1
+};
+
+static const u8 HIDPP_FF_CONDITION_CMDS[] = {
+ HIDPP_FF_EFFECT_SPRING,
+ HIDPP_FF_EFFECT_FRICTION,
+ HIDPP_FF_EFFECT_DAMPER,
+ HIDPP_FF_EFFECT_INERTIA
+};
+
+static const char *HIDPP_FF_CONDITION_NAMES[] = {
+ "spring",
+ "friction",
+ "damper",
+ "inertia"
+};
+
+
+static u8 hidpp_ff_find_effect(struct hidpp_ff_private_data *data, int effect_id)
+{
+ int i;
+
+ for (i = 0; i < data->num_effects; i++)
+ if (data->effect_ids[i] == effect_id)
+ return i+1;
+
+ return 0;
+}
+
+static void hidpp_ff_work_handler(struct work_struct *w)
+{
+ struct hidpp_ff_work_data *wd = container_of(w, struct hidpp_ff_work_data, work);
+ struct hidpp_ff_private_data *data = wd->data;
+ struct hidpp_report response;
+ u8 slot;
+ int ret;
+
+ /* add slot number if needed */
+ switch (wd->effect_id) {
+ case HIDPP_FF_EFFECTID_AUTOCENTER:
+ wd->params[0] = data->slot_autocenter;
+ break;
+ case HIDPP_FF_EFFECTID_NONE:
+ /* leave slot as zero */
+ break;
+ default:
+ /* find current slot for effect */
+ wd->params[0] = hidpp_ff_find_effect(data, wd->effect_id);
+ break;
+ }
+
+ /* send command and wait for reply */
+ ret = hidpp_send_fap_command_sync(data->hidpp, data->feature_index,
+ wd->command, wd->params, wd->size, &response);
+
+ if (ret) {
+ hid_err(data->hidpp->hid_dev, "Failed to send command to device!\n");
+ goto out;
+ }
+
+ /* parse return data */
+ switch (wd->command) {
+ case HIDPP_FF_DOWNLOAD_EFFECT:
+ slot = response.fap.params[0];
+ if (slot > 0 && slot <= data->num_effects) {
+ if (wd->effect_id >= 0)
+ /* regular effect uploaded */
+ data->effect_ids[slot-1] = wd->effect_id;
+ else if (wd->effect_id >= HIDPP_FF_EFFECTID_AUTOCENTER)
+ /* autocenter spring uploaded */
+ data->slot_autocenter = slot;
+ }
+ break;
+ case HIDPP_FF_DESTROY_EFFECT:
+ if (wd->effect_id >= 0)
+ /* regular effect destroyed */
+ data->effect_ids[wd->params[0]-1] = -1;
+ else if (wd->effect_id >= HIDPP_FF_EFFECTID_AUTOCENTER)
+ /* autocenter spring destoyed */
+ data->slot_autocenter = 0;
+ break;
+ case HIDPP_FF_SET_GLOBAL_GAINS:
+ data->gain = (wd->params[0] << 8) + wd->params[1];
+ break;
+ case HIDPP_FF_SET_APERTURE:
+ data->range = (wd->params[0] << 8) + wd->params[1];
+ break;
+ default:
+ /* no action needed */
+ break;
+ }
+
+out:
+ atomic_dec(&data->workqueue_size);
+ kfree(wd);
+}
+
+static int hidpp_ff_queue_work(struct hidpp_ff_private_data *data, int effect_id, u8 command, u8 *params, u8 size)
+{
+ struct hidpp_ff_work_data *wd = kzalloc(sizeof(*wd), GFP_KERNEL);
+ int s;
+
+ if (!wd)
+ return -ENOMEM;
+
+ INIT_WORK(&wd->work, hidpp_ff_work_handler);
+
+ wd->data = data;
+ wd->effect_id = effect_id;
+ wd->command = command;
+ wd->size = size;
+ memcpy(wd->params, params, size);
+
+ atomic_inc(&data->workqueue_size);
+ queue_work(data->wq, &wd->work);
+
+ /* warn about excessive queue size */
+ s = atomic_read(&data->workqueue_size);
+ if (s >= 20 && s % 20 == 0)
+ hid_warn(data->hidpp->hid_dev, "Force feedback command queue contains %d commands, causing substantial delays!", s);
+
+ return 0;
+}
+
+static int hidpp_ff_upload_effect(struct input_dev *dev, struct ff_effect *effect, struct ff_effect *old)
+{
+ struct hidpp_ff_private_data *data = dev->ff->private;
+ u8 params[20];
+ u8 size;
+ int force;
+
+ /* set common parameters */
+ params[2] = effect->replay.length >> 8;
+ params[3] = effect->replay.length & 255;
+ params[4] = effect->replay.delay >> 8;
+ params[5] = effect->replay.delay & 255;
+
+ switch (effect->type) {
+ case FF_CONSTANT:
+ force = (effect->u.constant.level * fixp_sin16((effect->direction * 360) >> 16)) >> 15;
+ params[1] = HIDPP_FF_EFFECT_CONSTANT;
+ params[6] = force >> 8;
+ params[7] = force & 255;
+ params[8] = effect->u.constant.envelope.attack_level >> 7;
+ params[9] = effect->u.constant.envelope.attack_length >> 8;
+ params[10] = effect->u.constant.envelope.attack_length & 255;
+ params[11] = effect->u.constant.envelope.fade_level >> 7;
+ params[12] = effect->u.constant.envelope.fade_length >> 8;
+ params[13] = effect->u.constant.envelope.fade_length & 255;
+ size = 14;
+ dbg_hid("Uploading constant force level=%d in dir %d = %d\n",
+ effect->u.constant.level,
+ effect->direction, force);
+ dbg_hid(" envelope attack=(%d, %d ms) fade=(%d, %d ms)\n",
+ effect->u.constant.envelope.attack_level,
+ effect->u.constant.envelope.attack_length,
+ effect->u.constant.envelope.fade_level,
+ effect->u.constant.envelope.fade_length);
+ break;
+ case FF_PERIODIC:
+ {
+ switch (effect->u.periodic.waveform) {
+ case FF_SINE:
+ params[1] = HIDPP_FF_EFFECT_PERIODIC_SINE;
+ break;
+ case FF_SQUARE:
+ params[1] = HIDPP_FF_EFFECT_PERIODIC_SQUARE;
+ break;
+ case FF_SAW_UP:
+ params[1] = HIDPP_FF_EFFECT_PERIODIC_SAWTOOTHUP;
+ break;
+ case FF_SAW_DOWN:
+ params[1] = HIDPP_FF_EFFECT_PERIODIC_SAWTOOTHDOWN;
+ break;
+ case FF_TRIANGLE:
+ params[1] = HIDPP_FF_EFFECT_PERIODIC_TRIANGLE;
+ break;
+ default:
+ hid_err(data->hidpp->hid_dev, "Unexpected periodic waveform type %i!\n", effect->u.periodic.waveform);
+ return -EINVAL;
+ }
+ force = (effect->u.periodic.magnitude * fixp_sin16((effect->direction * 360) >> 16)) >> 15;
+ params[6] = effect->u.periodic.magnitude >> 8;
+ params[7] = effect->u.periodic.magnitude & 255;
+ params[8] = effect->u.periodic.offset >> 8;
+ params[9] = effect->u.periodic.offset & 255;
+ params[10] = effect->u.periodic.period >> 8;
+ params[11] = effect->u.periodic.period & 255;
+ params[12] = effect->u.periodic.phase >> 8;
+ params[13] = effect->u.periodic.phase & 255;
+ params[14] = effect->u.periodic.envelope.attack_level >> 7;
+ params[15] = effect->u.periodic.envelope.attack_length >> 8;
+ params[16] = effect->u.periodic.envelope.attack_length & 255;
+ params[17] = effect->u.periodic.envelope.fade_level >> 7;
+ params[18] = effect->u.periodic.envelope.fade_length >> 8;
+ params[19] = effect->u.periodic.envelope.fade_length & 255;
+ size = 20;
+ dbg_hid("Uploading periodic force mag=%d/dir=%d, offset=%d, period=%d ms, phase=%d\n",
+ effect->u.periodic.magnitude, effect->direction,
+ effect->u.periodic.offset,
+ effect->u.periodic.period,
+ effect->u.periodic.phase);
+ dbg_hid(" envelope attack=(%d, %d ms) fade=(%d, %d ms)\n",
+ effect->u.periodic.envelope.attack_level,
+ effect->u.periodic.envelope.attack_length,
+ effect->u.periodic.envelope.fade_level,
+ effect->u.periodic.envelope.fade_length);
+ break;
+ }
+ case FF_RAMP:
+ params[1] = HIDPP_FF_EFFECT_RAMP;
+ force = (effect->u.ramp.start_level * fixp_sin16((effect->direction * 360) >> 16)) >> 15;
+ params[6] = force >> 8;
+ params[7] = force & 255;
+ force = (effect->u.ramp.end_level * fixp_sin16((effect->direction * 360) >> 16)) >> 15;
+ params[8] = force >> 8;
+ params[9] = force & 255;
+ params[10] = effect->u.ramp.envelope.attack_level >> 7;
+ params[11] = effect->u.ramp.envelope.attack_length >> 8;
+ params[12] = effect->u.ramp.envelope.attack_length & 255;
+ params[13] = effect->u.ramp.envelope.fade_level >> 7;
+ params[14] = effect->u.ramp.envelope.fade_length >> 8;
+ params[15] = effect->u.ramp.envelope.fade_length & 255;
+ size = 16;
+ dbg_hid("Uploading ramp force level=%d -> %d in dir %d = %d\n",
+ effect->u.ramp.start_level,
+ effect->u.ramp.end_level,
+ effect->direction, force);
+ dbg_hid(" envelope attack=(%d, %d ms) fade=(%d, %d ms)\n",
+ effect->u.ramp.envelope.attack_level,
+ effect->u.ramp.envelope.attack_length,
+ effect->u.ramp.envelope.fade_level,
+ effect->u.ramp.envelope.fade_length);
+ break;
+ case FF_FRICTION:
+ case FF_INERTIA:
+ case FF_SPRING:
+ case FF_DAMPER:
+ params[1] = HIDPP_FF_CONDITION_CMDS[effect->type - FF_SPRING];
+ params[6] = effect->u.condition[0].left_saturation >> 9;
+ params[7] = (effect->u.condition[0].left_saturation >> 1) & 255;
+ params[8] = effect->u.condition[0].left_coeff >> 8;
+ params[9] = effect->u.condition[0].left_coeff & 255;
+ params[10] = effect->u.condition[0].deadband >> 9;
+ params[11] = (effect->u.condition[0].deadband >> 1) & 255;
+ params[12] = effect->u.condition[0].center >> 8;
+ params[13] = effect->u.condition[0].center & 255;
+ params[14] = effect->u.condition[0].right_coeff >> 8;
+ params[15] = effect->u.condition[0].right_coeff & 255;
+ params[16] = effect->u.condition[0].right_saturation >> 9;
+ params[17] = (effect->u.condition[0].right_saturation >> 1) & 255;
+ size = 18;
+ dbg_hid("Uploading %s force left coeff=%d, left sat=%d, right coeff=%d, right sat=%d\n",
+ HIDPP_FF_CONDITION_NAMES[effect->type - FF_SPRING],
+ effect->u.condition[0].left_coeff,
+ effect->u.condition[0].left_saturation,
+ effect->u.condition[0].right_coeff,
+ effect->u.condition[0].right_saturation);
+ dbg_hid(" deadband=%d, center=%d\n",
+ effect->u.condition[0].deadband,
+ effect->u.condition[0].center);
+ break;
+ default:
+ hid_err(data->hidpp->hid_dev, "Unexpected force type %i!\n", effect->type);
+ return -EINVAL;
+ }
+
+ return hidpp_ff_queue_work(data, effect->id, HIDPP_FF_DOWNLOAD_EFFECT, params, size);
+}
+
+static int hidpp_ff_playback(struct input_dev *dev, int effect_id, int value)
+{
+ struct hidpp_ff_private_data *data = dev->ff->private;
+ u8 params[2];
+
+ params[1] = value ? HIDPP_FF_EFFECT_STATE_PLAY : HIDPP_FF_EFFECT_STATE_STOP;
+
+ dbg_hid("St%sing playback of effect %d.\n", value?"art":"opp", effect_id);
+
+ return hidpp_ff_queue_work(data, effect_id, HIDPP_FF_SET_EFFECT_STATE, params, ARRAY_SIZE(params));
+}
+
+static int hidpp_ff_erase_effect(struct input_dev *dev, int effect_id)
+{
+ struct hidpp_ff_private_data *data = dev->ff->private;
+ u8 slot = 0;
+
+ dbg_hid("Erasing effect %d.\n", effect_id);
+
+ return hidpp_ff_queue_work(data, effect_id, HIDPP_FF_DESTROY_EFFECT, &slot, 1);
+}
+
+static void hidpp_ff_set_autocenter(struct input_dev *dev, u16 magnitude)
+{
+ struct hidpp_ff_private_data *data = dev->ff->private;
+ u8 params[18];
+
+ dbg_hid("Setting autocenter to %d.\n", magnitude);
+
+ /* start a standard spring effect */
+ params[1] = HIDPP_FF_EFFECT_SPRING | HIDPP_FF_EFFECT_AUTOSTART;
+ /* zero delay and duration */
+ params[2] = params[3] = params[4] = params[5] = 0;
+ /* set coeff to 25% of saturation */
+ params[8] = params[14] = magnitude >> 11;
+ params[9] = params[15] = (magnitude >> 3) & 255;
+ params[6] = params[16] = magnitude >> 9;
+ params[7] = params[17] = (magnitude >> 1) & 255;
+ /* zero deadband and center */
+ params[10] = params[11] = params[12] = params[13] = 0;
+
+ hidpp_ff_queue_work(data, HIDPP_FF_EFFECTID_AUTOCENTER, HIDPP_FF_DOWNLOAD_EFFECT, params, ARRAY_SIZE(params));
+}
+
+static void hidpp_ff_set_gain(struct input_dev *dev, u16 gain)
+{
+ struct hidpp_ff_private_data *data = dev->ff->private;
+ u8 params[4];
+
+ dbg_hid("Setting gain to %d.\n", gain);
+
+ params[0] = gain >> 8;
+ params[1] = gain & 255;
+ params[2] = 0; /* no boost */
+ params[3] = 0;
+
+ hidpp_ff_queue_work(data, HIDPP_FF_EFFECTID_NONE, HIDPP_FF_SET_GLOBAL_GAINS, params, ARRAY_SIZE(params));
+}
+
+static ssize_t hidpp_ff_range_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct hid_device *hid = to_hid_device(dev);
+ struct hid_input *hidinput = list_entry(hid->inputs.next, struct hid_input, list);
+ struct input_dev *idev = hidinput->input;
+ struct hidpp_ff_private_data *data = idev->ff->private;
+
+ return scnprintf(buf, PAGE_SIZE, "%u\n", data->range);
+}
+
+static ssize_t hidpp_ff_range_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct hid_device *hid = to_hid_device(dev);
+ struct hid_input *hidinput = list_entry(hid->inputs.next, struct hid_input, list);
+ struct input_dev *idev = hidinput->input;
+ struct hidpp_ff_private_data *data = idev->ff->private;
+ u8 params[2];
+ int range = simple_strtoul(buf, NULL, 10);
+
+ range = clamp(range, 180, 900);
+
+ params[0] = range >> 8;
+ params[1] = range & 0x00FF;
+
+ hidpp_ff_queue_work(data, -1, HIDPP_FF_SET_APERTURE, params, ARRAY_SIZE(params));
+
+ return count;
+}
+
+static DEVICE_ATTR(range, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP | S_IROTH, hidpp_ff_range_show, hidpp_ff_range_store);
+
+static void hidpp_ff_destroy(struct ff_device *ff)
+{
+ struct hidpp_ff_private_data *data = ff->private;
+
+ kfree(data->effect_ids);
+}
+
+static int hidpp_ff_init(struct hidpp_device *hidpp, u8 feature_index)
+{
+ struct hid_device *hid = hidpp->hid_dev;
+ struct hid_input *hidinput = list_entry(hid->inputs.next, struct hid_input, list);
+ struct input_dev *dev = hidinput->input;
+ const struct usb_device_descriptor *udesc = &(hid_to_usb_dev(hid)->descriptor);
+ const u16 bcdDevice = le16_to_cpu(udesc->bcdDevice);
+ struct ff_device *ff;
+ struct hidpp_report response;
+ struct hidpp_ff_private_data *data;
+ int error, j, num_slots;
+ u8 version;
+
+ if (!dev) {
+ hid_err(hid, "Struct input_dev not set!\n");
+ return -EINVAL;
+ }
+
+ /* Get firmware release */
+ version = bcdDevice & 255;
+
+ /* Set supported force feedback capabilities */
+ for (j = 0; hiddpp_ff_effects[j] >= 0; j++)
+ set_bit(hiddpp_ff_effects[j], dev->ffbit);
+ if (version > 1)
+ for (j = 0; hiddpp_ff_effects_v2[j] >= 0; j++)
+ set_bit(hiddpp_ff_effects_v2[j], dev->ffbit);
+
+ /* Read number of slots available in device */
+ error = hidpp_send_fap_command_sync(hidpp, feature_index,
+ HIDPP_FF_GET_INFO, NULL, 0, &response);
+ if (error) {
+ if (error < 0)
+ return error;
+ hid_err(hidpp->hid_dev, "%s: received protocol error 0x%02x\n",
+ __func__, error);
+ return -EPROTO;
+ }
+
+ num_slots = response.fap.params[0] - HIDPP_FF_RESERVED_SLOTS;
+
+ error = input_ff_create(dev, num_slots);
+
+ if (error) {
+ hid_err(dev, "Failed to create FF device!\n");
+ return error;
+ }
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+ data->effect_ids = kcalloc(num_slots, sizeof(int), GFP_KERNEL);
+ if (!data->effect_ids) {
+ kfree(data);
+ return -ENOMEM;
+ }
+ data->hidpp = hidpp;
+ data->feature_index = feature_index;
+ data->version = version;
+ data->slot_autocenter = 0;
+ data->num_effects = num_slots;
+ for (j = 0; j < num_slots; j++)
+ data->effect_ids[j] = -1;
+
+ ff = dev->ff;
+ ff->private = data;
+
+ ff->upload = hidpp_ff_upload_effect;
+ ff->erase = hidpp_ff_erase_effect;
+ ff->playback = hidpp_ff_playback;
+ ff->set_gain = hidpp_ff_set_gain;
+ ff->set_autocenter = hidpp_ff_set_autocenter;
+ ff->destroy = hidpp_ff_destroy;
+
+
+ /* reset all forces */
+ error = hidpp_send_fap_command_sync(hidpp, feature_index,
+ HIDPP_FF_RESET_ALL, NULL, 0, &response);
+
+ /* Read current Range */
+ error = hidpp_send_fap_command_sync(hidpp, feature_index,
+ HIDPP_FF_GET_APERTURE, NULL, 0, &response);
+ if (error)
+ hid_warn(hidpp->hid_dev, "Failed to read range from device!\n");
+ data->range = error ? 900 : get_unaligned_be16(&response.fap.params[0]);
+
+ /* Create sysfs interface */
+ error = device_create_file(&(hidpp->hid_dev->dev), &dev_attr_range);
+ if (error)
+ hid_warn(hidpp->hid_dev, "Unable to create sysfs interface for \"range\", errno %d!\n", error);
+
+ /* Read the current gain values */
+ error = hidpp_send_fap_command_sync(hidpp, feature_index,
+ HIDPP_FF_GET_GLOBAL_GAINS, NULL, 0, &response);
+ if (error)
+ hid_warn(hidpp->hid_dev, "Failed to read gain values from device!\n");
+ data->gain = error ? 0xffff : get_unaligned_be16(&response.fap.params[0]);
+ /* ignore boost value at response.fap.params[2] */
+
+ /* init the hardware command queue */
+ data->wq = create_singlethread_workqueue("hidpp-ff-sendqueue");
+ atomic_set(&data->workqueue_size, 0);
+
+ /* initialize with zero autocenter to get wheel in usable state */
+ hidpp_ff_set_autocenter(dev, 0);
+
+ hid_info(hid, "Force feeback support loaded (firmware release %d).\n", version);
+
+ return 0;
+}
+
+static int hidpp_ff_deinit(struct hid_device *hid)
+{
+ struct hid_input *hidinput = list_entry(hid->inputs.next, struct hid_input, list);
+ struct input_dev *dev = hidinput->input;
+ struct hidpp_ff_private_data *data;
+
+ if (!dev) {
+ hid_err(hid, "Struct input_dev not found!\n");
+ return -EINVAL;
+ }
+
+ hid_info(hid, "Unloading HID++ force feedback.\n");
+ data = dev->ff->private;
+ if (!data) {
+ hid_err(hid, "Private data not found!\n");
+ return -EINVAL;
+ }
+
+ destroy_workqueue(data->wq);
+ device_remove_file(&hid->dev, &dev_attr_range);
+
+ return 0;
+}
+
+
/* ************************************************************************** */
/* */
/* Device Support */
@@ -1301,121 +1890,22 @@ static int k400_connect(struct hid_device *hdev, bool connected)
#define HIDPP_PAGE_G920_FORCE_FEEDBACK 0x8123
-/* Using session ID = 1 */
-#define CMD_G920_FORCE_GET_APERTURE 0x51
-#define CMD_G920_FORCE_SET_APERTURE 0x61
-
-struct g920_private_data {
- u8 force_feature;
- u16 range;
-};
-
-static ssize_t g920_range_show(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct hid_device *hid = to_hid_device(dev);
- struct hidpp_device *hidpp = hid_get_drvdata(hid);
- struct g920_private_data *pdata;
-
- pdata = hidpp->private_data;
- if (!pdata) {
- hid_err(hid, "Private driver data not found!\n");
- return -EINVAL;
- }
-
- return scnprintf(buf, PAGE_SIZE, "%u\n", pdata->range);
-}
-
-static ssize_t g920_range_store(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct hid_device *hid = to_hid_device(dev);
- struct hidpp_device *hidpp = hid_get_drvdata(hid);
- struct g920_private_data *pdata;
- struct hidpp_report response;
- u8 params[2];
- int ret;
- u16 range = simple_strtoul(buf, NULL, 10);
-
- pdata = hidpp->private_data;
- if (!pdata) {
- hid_err(hid, "Private driver data not found!\n");
- return -EINVAL;
- }
-
- if (range < 180)
- range = 180;
- else if (range > 900)
- range = 900;
-
- params[0] = range >> 8;
- params[1] = range & 0x00FF;
-
- ret = hidpp_send_fap_command_sync(hidpp, pdata->force_feature,
- CMD_G920_FORCE_SET_APERTURE, params, 2, &response);
- if (ret)
- return ret;
-
- pdata->range = range;
- return count;
-}
-
-static DEVICE_ATTR(range, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP | S_IROTH, g920_range_show, g920_range_store);
-
-static int g920_allocate(struct hid_device *hdev)
-{
- struct hidpp_device *hidpp = hid_get_drvdata(hdev);
- struct g920_private_data *pdata;
-
- pdata = devm_kzalloc(&hdev->dev, sizeof(struct g920_private_data),
- GFP_KERNEL);
- if (!pdata)
- return -ENOMEM;
-
- hidpp->private_data = pdata;
-
- return 0;
-}
-
static int g920_get_config(struct hidpp_device *hidpp)
{
- struct g920_private_data *pdata = hidpp->private_data;
- struct hidpp_report response;
u8 feature_type;
u8 feature_index;
int ret;
- pdata = hidpp->private_data;
- if (!pdata) {
- hid_err(hidpp->hid_dev, "Private driver data not found!\n");
- return -EINVAL;
- }
-
/* Find feature and store for later use */
ret = hidpp_root_get_feature(hidpp, HIDPP_PAGE_G920_FORCE_FEEDBACK,
&feature_index, &feature_type);
if (ret)
return ret;
- pdata->force_feature = feature_index;
-
- /* Read current Range */
- ret = hidpp_send_fap_command_sync(hidpp, feature_index,
- CMD_G920_FORCE_GET_APERTURE, NULL, 0, &response);
- if (ret > 0) {
- hid_err(hidpp->hid_dev, "%s: received protocol error 0x%02x\n",
- __func__, ret);
- return -EPROTO;
- }
- if (ret)
- return ret;
-
- pdata->range = get_unaligned_be16(&response.fap.params[0]);
-
- /* Create sysfs interface */
- ret = device_create_file(&(hidpp->hid_dev->dev), &dev_attr_range);
+ ret = hidpp_ff_init(hidpp, feature_index);
if (ret)
- hid_warn(hidpp->hid_dev, "Unable to create sysfs interface for \"range\", errno %d\n", ret);
+ hid_warn(hidpp->hid_dev, "Unable to initialize force feedback support, errno %d\n",
+ ret);
return 0;
}
@@ -1739,10 +2229,6 @@ static int hidpp_probe(struct hid_device *hdev, const struct hid_device_id *id)
ret = k400_allocate(hdev);
if (ret)
goto allocate_fail;
- } else if (hidpp->quirks & HIDPP_QUIRK_CLASS_G920) {
- ret = g920_allocate(hdev);
- if (ret)
- goto allocate_fail;
}
INIT_WORK(&hidpp->work, delayed_work_cb);
@@ -1825,7 +2311,6 @@ static int hidpp_probe(struct hid_device *hdev, const struct hid_device_id *id)
hid_hw_open_failed:
hid_device_io_stop(hdev);
if (hidpp->quirks & HIDPP_QUIRK_CLASS_G920) {
- device_remove_file(&hdev->dev, &dev_attr_range);
hid_hw_close(hdev);
hid_hw_stop(hdev);
}
@@ -1843,7 +2328,7 @@ static void hidpp_remove(struct hid_device *hdev)
struct hidpp_device *hidpp = hid_get_drvdata(hdev);
if (hidpp->quirks & HIDPP_QUIRK_CLASS_G920) {
- device_remove_file(&hdev->dev, &dev_attr_range);
+ hidpp_ff_deinit(hdev);
hid_hw_close(hdev);
}
hid_hw_stop(hdev);
diff --git a/drivers/hid/hid-microsoft.c b/drivers/hid/hid-microsoft.c
index 77a2cf3e4afe..e924d555536c 100644
--- a/drivers/hid/hid-microsoft.c
+++ b/drivers/hid/hid-microsoft.c
@@ -272,6 +272,12 @@ static const struct hid_device_id ms_devices[] = {
.driver_data = MS_PRESENTER },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_3K),
.driver_data = MS_ERGONOMY | MS_RDESC_3K },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_7K),
+ .driver_data = MS_ERGONOMY },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_600),
+ .driver_data = MS_ERGONOMY },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_3KV1),
+ .driver_data = MS_ERGONOMY },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_WIRELESS_OPTICAL_DESKTOP_3_0),
.driver_data = MS_NOGET },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_COMFORT_MOUSE_4500),
@@ -286,6 +292,8 @@ static const struct hid_device_id ms_devices[] = {
.driver_data = MS_HIDINPUT },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_POWER_COVER),
.driver_data = MS_HIDINPUT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_COMFORT_KEYBOARD),
+ .driver_data = MS_ERGONOMY},
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PRESENTER_8K_BT),
.driver_data = MS_PRESENTER },
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index 296d4991560e..c741f5e50a66 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -396,6 +396,11 @@ static void mt_feature_mapping(struct hid_device *hdev,
td->is_buttonpad = true;
break;
+ case 0xff0000c5:
+ /* Retrieve the Win8 blob once to enable some devices */
+ if (usage->usage_index == 0)
+ mt_get_feature(hdev, field->report);
+ break;
}
}
@@ -1133,6 +1138,9 @@ static int mt_probe(struct hid_device *hdev, const struct hid_device_id *id)
return ret;
ret = sysfs_create_group(&hdev->dev.kobj, &mt_attribute_group);
+ if (ret)
+ dev_warn(&hdev->dev, "Cannot allocate sysfs group for %s\n",
+ hdev->name);
mt_set_maxcontacts(hdev);
mt_set_input_mode(hdev);
@@ -1145,8 +1153,31 @@ static int mt_probe(struct hid_device *hdev, const struct hid_device_id *id)
}
#ifdef CONFIG_PM
+static void mt_release_contacts(struct hid_device *hid)
+{
+ struct hid_input *hidinput;
+
+ list_for_each_entry(hidinput, &hid->inputs, list) {
+ struct input_dev *input_dev = hidinput->input;
+ struct input_mt *mt = input_dev->mt;
+ int i;
+
+ if (mt) {
+ for (i = 0; i < mt->num_slots; i++) {
+ input_mt_slot(input_dev, i);
+ input_mt_report_slot_state(input_dev,
+ MT_TOOL_FINGER,
+ false);
+ }
+ input_mt_sync_frame(input_dev);
+ input_sync(input_dev);
+ }
+ }
+}
+
static int mt_reset_resume(struct hid_device *hdev)
{
+ mt_release_contacts(hdev);
mt_set_maxcontacts(hdev);
mt_set_input_mode(hdev);
return 0;
diff --git a/drivers/hid/hid-penmount.c b/drivers/hid/hid-penmount.c
index c11dce85cd18..d90383f788e2 100644
--- a/drivers/hid/hid-penmount.c
+++ b/drivers/hid/hid-penmount.c
@@ -23,8 +23,12 @@ static int penmount_input_mapping(struct hid_device *hdev,
struct hid_usage *usage, unsigned long **bit, int *max)
{
if ((usage->hid & HID_USAGE_PAGE) == HID_UP_BUTTON) {
- hid_map_usage(hi, usage, bit, max, EV_KEY, BTN_TOUCH);
- return 1;
+ if (((usage->hid - 1) & HID_USAGE) == 0) {
+ hid_map_usage(hi, usage, bit, max, EV_KEY, BTN_TOUCH);
+ return 1;
+ } else {
+ return -1;
+ }
}
return 0;
diff --git a/drivers/hid/hid-rmi.c b/drivers/hid/hid-rmi.c
index 67cd059a8f46..9cd2ca34a6be 100644
--- a/drivers/hid/hid-rmi.c
+++ b/drivers/hid/hid-rmi.c
@@ -594,6 +594,9 @@ static int rmi_suspend(struct hid_device *hdev, pm_message_t message)
int ret;
u8 buf[RMI_F11_CTRL_REG_COUNT];
+ if (!(data->device_flags & RMI_DEVICE))
+ return 0;
+
ret = rmi_read_block(hdev, data->f11.control_base_addr, buf,
RMI_F11_CTRL_REG_COUNT);
if (ret)
@@ -613,6 +616,9 @@ static int rmi_post_reset(struct hid_device *hdev)
struct rmi_data *data = hid_get_drvdata(hdev);
int ret;
+ if (!(data->device_flags & RMI_DEVICE))
+ return 0;
+
ret = rmi_reset_attn_mode(hdev);
if (ret) {
hid_err(hdev, "can not set rmi mode\n");
@@ -640,6 +646,11 @@ static int rmi_post_reset(struct hid_device *hdev)
static int rmi_post_resume(struct hid_device *hdev)
{
+ struct rmi_data *data = hid_get_drvdata(hdev);
+
+ if (!(data->device_flags & RMI_DEVICE))
+ return 0;
+
return rmi_reset_attn_mode(hdev);
}
#endif /* CONFIG_PM */
diff --git a/drivers/hid/hid-sony.c b/drivers/hid/hid-sony.c
index 9b8db0e0ef1c..310436a54a3f 100644
--- a/drivers/hid/hid-sony.c
+++ b/drivers/hid/hid-sony.c
@@ -50,6 +50,7 @@
#define MOTION_CONTROLLER_BT BIT(8)
#define NAVIGATION_CONTROLLER_USB BIT(9)
#define NAVIGATION_CONTROLLER_BT BIT(10)
+#define SINO_LITE_CONTROLLER BIT(11)
#define SIXAXIS_CONTROLLER (SIXAXIS_CONTROLLER_USB | SIXAXIS_CONTROLLER_BT)
#define MOTION_CONTROLLER (MOTION_CONTROLLER_USB | MOTION_CONTROLLER_BT)
@@ -74,7 +75,7 @@
* axis values. Additionally, the controller only has 20 actual, physical axes
* so there are several unused axes in between the used ones.
*/
-static __u8 sixaxis_rdesc[] = {
+static u8 sixaxis_rdesc[] = {
0x05, 0x01, /* Usage Page (Desktop), */
0x09, 0x04, /* Usage (Joystick), */
0xA1, 0x01, /* Collection (Application), */
@@ -152,7 +153,7 @@ static __u8 sixaxis_rdesc[] = {
};
/* PS/3 Motion controller */
-static __u8 motion_rdesc[] = {
+static u8 motion_rdesc[] = {
0x05, 0x01, /* Usage Page (Desktop), */
0x09, 0x04, /* Usage (Joystick), */
0xA1, 0x01, /* Collection (Application), */
@@ -249,9 +250,9 @@ static __u8 motion_rdesc[] = {
};
/* PS/3 Navigation controller */
-static __u8 navigation_rdesc[] = {
+static u8 navigation_rdesc[] = {
0x05, 0x01, /* Usage Page (Desktop), */
- 0x09, 0x04, /* Usage (Joystik), */
+ 0x09, 0x04, /* Usage (Joystick), */
0xA1, 0x01, /* Collection (Application), */
0xA1, 0x02, /* Collection (Logical), */
0x85, 0x01, /* Report ID (1), */
@@ -809,7 +810,7 @@ static u8 dualshock4_bt_rdesc[] = {
0xC0 /* End Collection */
};
-static __u8 ps3remote_rdesc[] = {
+static u8 ps3remote_rdesc[] = {
0x05, 0x01, /* GUsagePage Generic Desktop */
0x09, 0x05, /* LUsage 0x05 [Game Pad] */
0xA1, 0x01, /* MCollection Application (mouse, keyboard) */
@@ -817,14 +818,18 @@ static __u8 ps3remote_rdesc[] = {
/* Use collection 1 for joypad buttons */
0xA1, 0x02, /* MCollection Logical (interrelated data) */
- /* Ignore the 1st byte, maybe it is used for a controller
- * number but it's not needed for correct operation */
+ /*
+ * Ignore the 1st byte, maybe it is used for a controller
+ * number but it's not needed for correct operation
+ */
0x75, 0x08, /* GReportSize 0x08 [8] */
0x95, 0x01, /* GReportCount 0x01 [1] */
0x81, 0x01, /* MInput 0x01 (Const[0] Arr[1] Abs[2]) */
- /* Bytes from 2nd to 4th are a bitmap for joypad buttons, for these
- * buttons multiple keypresses are allowed */
+ /*
+ * Bytes from 2nd to 4th are a bitmap for joypad buttons, for these
+ * buttons multiple keypresses are allowed
+ */
0x05, 0x09, /* GUsagePage Button */
0x19, 0x01, /* LUsageMinimum 0x01 [Button 1 (primary/trigger)] */
0x29, 0x18, /* LUsageMaximum 0x18 [Button 24] */
@@ -849,8 +854,10 @@ static __u8 ps3remote_rdesc[] = {
0x95, 0x01, /* GReportCount 0x01 [1] */
0x80, /* MInput */
- /* Ignore bytes from 6th to 11th, 6th to 10th are always constant at
- * 0xff and 11th is for press indication */
+ /*
+ * Ignore bytes from 6th to 11th, 6th to 10th are always constant at
+ * 0xff and 11th is for press indication
+ */
0x75, 0x08, /* GReportSize 0x08 [8] */
0x95, 0x06, /* GReportCount 0x06 [6] */
0x81, 0x01, /* MInput 0x01 (Const[0] Arr[1] Abs[2]) */
@@ -929,7 +936,7 @@ static const unsigned int buzz_keymap[] = {
/*
* The controller has 4 remote buzzers, each with one LED and 5
* buttons.
- *
+ *
* We use the mapping chosen by the controller, which is:
*
* Key Offset
@@ -943,15 +950,15 @@ static const unsigned int buzz_keymap[] = {
* So, for example, the orange button on the third buzzer is mapped to
* BTN_TRIGGER_HAPPY14
*/
- [ 1] = BTN_TRIGGER_HAPPY1,
- [ 2] = BTN_TRIGGER_HAPPY2,
- [ 3] = BTN_TRIGGER_HAPPY3,
- [ 4] = BTN_TRIGGER_HAPPY4,
- [ 5] = BTN_TRIGGER_HAPPY5,
- [ 6] = BTN_TRIGGER_HAPPY6,
- [ 7] = BTN_TRIGGER_HAPPY7,
- [ 8] = BTN_TRIGGER_HAPPY8,
- [ 9] = BTN_TRIGGER_HAPPY9,
+ [1] = BTN_TRIGGER_HAPPY1,
+ [2] = BTN_TRIGGER_HAPPY2,
+ [3] = BTN_TRIGGER_HAPPY3,
+ [4] = BTN_TRIGGER_HAPPY4,
+ [5] = BTN_TRIGGER_HAPPY5,
+ [6] = BTN_TRIGGER_HAPPY6,
+ [7] = BTN_TRIGGER_HAPPY7,
+ [8] = BTN_TRIGGER_HAPPY8,
+ [9] = BTN_TRIGGER_HAPPY9,
[10] = BTN_TRIGGER_HAPPY10,
[11] = BTN_TRIGGER_HAPPY11,
[12] = BTN_TRIGGER_HAPPY12,
@@ -973,33 +980,33 @@ static enum power_supply_property sony_battery_props[] = {
};
struct sixaxis_led {
- __u8 time_enabled; /* the total time the led is active (0xff means forever) */
- __u8 duty_length; /* how long a cycle is in deciseconds (0 means "really fast") */
- __u8 enabled;
- __u8 duty_off; /* % of duty_length the led is off (0xff means 100%) */
- __u8 duty_on; /* % of duty_length the led is on (0xff mean 100%) */
+ u8 time_enabled; /* the total time the led is active (0xff means forever) */
+ u8 duty_length; /* how long a cycle is in deciseconds (0 means "really fast") */
+ u8 enabled;
+ u8 duty_off; /* % of duty_length the led is off (0xff means 100%) */
+ u8 duty_on; /* % of duty_length the led is on (0xff mean 100%) */
} __packed;
struct sixaxis_rumble {
- __u8 padding;
- __u8 right_duration; /* Right motor duration (0xff means forever) */
- __u8 right_motor_on; /* Right (small) motor on/off, only supports values of 0 or 1 (off/on) */
- __u8 left_duration; /* Left motor duration (0xff means forever) */
- __u8 left_motor_force; /* left (large) motor, supports force values from 0 to 255 */
+ u8 padding;
+ u8 right_duration; /* Right motor duration (0xff means forever) */
+ u8 right_motor_on; /* Right (small) motor on/off, only supports values of 0 or 1 (off/on) */
+ u8 left_duration; /* Left motor duration (0xff means forever) */
+ u8 left_motor_force; /* left (large) motor, supports force values from 0 to 255 */
} __packed;
struct sixaxis_output_report {
- __u8 report_id;
+ u8 report_id;
struct sixaxis_rumble rumble;
- __u8 padding[4];
- __u8 leds_bitmap; /* bitmap of enabled LEDs: LED_1 = 0x02, LED_2 = 0x04, ... */
+ u8 padding[4];
+ u8 leds_bitmap; /* bitmap of enabled LEDs: LED_1 = 0x02, LED_2 = 0x04, ... */
struct sixaxis_led led[4]; /* LEDx at (4 - x) */
struct sixaxis_led _reserved; /* LED5, not actually soldered */
} __packed;
union sixaxis_output_report_01 {
struct sixaxis_output_report data;
- __u8 buf[36];
+ u8 buf[36];
};
struct motion_output_report_02 {
@@ -1028,30 +1035,30 @@ struct sony_sc {
struct led_classdev *leds[MAX_LEDS];
unsigned long quirks;
struct work_struct state_worker;
- void(*send_output_report)(struct sony_sc*);
+ void (*send_output_report)(struct sony_sc *);
struct power_supply *battery;
struct power_supply_desc battery_desc;
int device_id;
- __u8 *output_report_dmabuf;
+ u8 *output_report_dmabuf;
#ifdef CONFIG_SONY_FF
- __u8 left;
- __u8 right;
+ u8 left;
+ u8 right;
#endif
- __u8 mac_address[6];
- __u8 worker_initialized;
- __u8 cable_state;
- __u8 battery_charging;
- __u8 battery_capacity;
- __u8 led_state[MAX_LEDS];
- __u8 resume_led_state[MAX_LEDS];
- __u8 led_delay_on[MAX_LEDS];
- __u8 led_delay_off[MAX_LEDS];
- __u8 led_count;
+ u8 mac_address[6];
+ u8 worker_initialized;
+ u8 cable_state;
+ u8 battery_charging;
+ u8 battery_capacity;
+ u8 led_state[MAX_LEDS];
+ u8 resume_led_state[MAX_LEDS];
+ u8 led_delay_on[MAX_LEDS];
+ u8 led_delay_off[MAX_LEDS];
+ u8 led_count;
};
-static __u8 *sixaxis_fixup(struct hid_device *hdev, __u8 *rdesc,
+static u8 *sixaxis_fixup(struct hid_device *hdev, u8 *rdesc,
unsigned int *rsize)
{
*rsize = sizeof(sixaxis_rdesc);
@@ -1072,7 +1079,7 @@ static u8 *navigation_fixup(struct hid_device *hdev, u8 *rdesc,
return navigation_rdesc;
}
-static __u8 *ps3remote_fixup(struct hid_device *hdev, __u8 *rdesc,
+static u8 *ps3remote_fixup(struct hid_device *hdev, u8 *rdesc,
unsigned int *rsize)
{
*rsize = sizeof(ps3remote_rdesc);
@@ -1113,11 +1120,14 @@ static int ps3remote_mapping(struct hid_device *hdev, struct hid_input *hi,
return 1;
}
-static __u8 *sony_report_fixup(struct hid_device *hdev, __u8 *rdesc,
+static u8 *sony_report_fixup(struct hid_device *hdev, u8 *rdesc,
unsigned int *rsize)
{
struct sony_sc *sc = hid_get_drvdata(hdev);
+ if (sc->quirks & SINO_LITE_CONTROLLER)
+ return rdesc;
+
/*
* Some Sony RF receivers wrongly declare the mouse pointer as a
* a constant non-data variable.
@@ -1164,12 +1174,12 @@ static __u8 *sony_report_fixup(struct hid_device *hdev, __u8 *rdesc,
return rdesc;
}
-static void sixaxis_parse_report(struct sony_sc *sc, __u8 *rd, int size)
+static void sixaxis_parse_report(struct sony_sc *sc, u8 *rd, int size)
{
- static const __u8 sixaxis_battery_capacity[] = { 0, 1, 25, 50, 75, 100 };
+ static const u8 sixaxis_battery_capacity[] = { 0, 1, 25, 50, 75, 100 };
unsigned long flags;
int offset;
- __u8 cable_state, battery_capacity, battery_charging;
+ u8 cable_state, battery_capacity, battery_charging;
/*
* The sixaxis is charging if the battery value is 0xee
@@ -1184,7 +1194,7 @@ static void sixaxis_parse_report(struct sony_sc *sc, __u8 *rd, int size)
battery_charging = !(rd[offset] & 0x01);
cable_state = 1;
} else {
- __u8 index = rd[offset] <= 5 ? rd[offset] : 5;
+ u8 index = rd[offset] <= 5 ? rd[offset] : 5;
battery_capacity = sixaxis_battery_capacity[index];
battery_charging = 0;
cable_state = 0;
@@ -1197,14 +1207,14 @@ static void sixaxis_parse_report(struct sony_sc *sc, __u8 *rd, int size)
spin_unlock_irqrestore(&sc->lock, flags);
}
-static void dualshock4_parse_report(struct sony_sc *sc, __u8 *rd, int size)
+static void dualshock4_parse_report(struct sony_sc *sc, u8 *rd, int size)
{
struct hid_input *hidinput = list_entry(sc->hdev->inputs.next,
struct hid_input, list);
struct input_dev *input_dev = hidinput->input;
unsigned long flags;
int n, offset;
- __u8 cable_state, battery_capacity, battery_charging;
+ u8 cable_state, battery_capacity, battery_charging;
/*
* Battery and touchpad data starts at byte 30 in the USB report and
@@ -1254,7 +1264,7 @@ static void dualshock4_parse_report(struct sony_sc *sc, __u8 *rd, int size)
* follows the data for the first.
*/
for (n = 0; n < 2; n++) {
- __u16 x, y;
+ u16 x, y;
x = rd[offset+1] | ((rd[offset+2] & 0xF) << 8);
y = ((rd[offset+2] & 0xF0) >> 4) | (rd[offset+3] << 4);
@@ -1270,7 +1280,7 @@ static void dualshock4_parse_report(struct sony_sc *sc, __u8 *rd, int size)
}
static int sony_raw_event(struct hid_device *hdev, struct hid_report *report,
- __u8 *rd, int size)
+ u8 *rd, int size)
{
struct sony_sc *sc = hid_get_drvdata(hdev);
@@ -1394,7 +1404,7 @@ static int sixaxis_set_operational_usb(struct hid_device *hdev)
{
const int buf_size =
max(SIXAXIS_REPORT_0xF2_SIZE, SIXAXIS_REPORT_0xF5_SIZE);
- __u8 *buf;
+ u8 *buf;
int ret;
buf = kmalloc(buf_size, GFP_KERNEL);
@@ -1420,8 +1430,10 @@ static int sixaxis_set_operational_usb(struct hid_device *hdev)
}
ret = hid_hw_output_report(hdev, buf, 1);
- if (ret < 0)
- hid_err(hdev, "can't set operational mode: step 3\n");
+ if (ret < 0) {
+ hid_info(hdev, "can't set operational mode: step 3, ignoring\n");
+ ret = 0;
+ }
out:
kfree(buf);
@@ -1431,8 +1443,8 @@ out:
static int sixaxis_set_operational_bt(struct hid_device *hdev)
{
- static const __u8 report[] = { 0xf4, 0x42, 0x03, 0x00, 0x00 };
- __u8 *buf;
+ static const u8 report[] = { 0xf4, 0x42, 0x03, 0x00, 0x00 };
+ u8 *buf;
int ret;
buf = kmemdup(report, sizeof(report), GFP_KERNEL);
@@ -1453,7 +1465,7 @@ static int sixaxis_set_operational_bt(struct hid_device *hdev)
*/
static int dualshock4_set_operational_bt(struct hid_device *hdev)
{
- __u8 *buf;
+ u8 *buf;
int ret;
buf = kmalloc(DS4_REPORT_0x02_SIZE, GFP_KERNEL);
@@ -1470,7 +1482,7 @@ static int dualshock4_set_operational_bt(struct hid_device *hdev)
static void sixaxis_set_leds_from_id(struct sony_sc *sc)
{
- static const __u8 sixaxis_leds[10][4] = {
+ static const u8 sixaxis_leds[10][4] = {
{ 0x01, 0x00, 0x00, 0x00 },
{ 0x00, 0x01, 0x00, 0x00 },
{ 0x00, 0x00, 0x01, 0x00 },
@@ -1497,7 +1509,7 @@ static void sixaxis_set_leds_from_id(struct sony_sc *sc)
static void dualshock4_set_leds_from_id(struct sony_sc *sc)
{
/* The first 4 color/index entries match what the PS4 assigns */
- static const __u8 color_code[7][3] = {
+ static const u8 color_code[7][3] = {
/* Blue */ { 0x00, 0x00, 0x01 },
/* Red */ { 0x01, 0x00, 0x00 },
/* Green */ { 0x00, 0x01, 0x00 },
@@ -1525,7 +1537,7 @@ static void buzz_set_leds(struct sony_sc *sc)
&hdev->report_enum[HID_OUTPUT_REPORT].report_list;
struct hid_report *report = list_entry(report_list->next,
struct hid_report, list);
- __s32 *value = report->field[0]->value;
+ s32 *value = report->field[0]->value;
BUILD_BUG_ON(MAX_LEDS < 4);
@@ -1619,7 +1631,7 @@ static int sony_led_blink_set(struct led_classdev *led, unsigned long *delay_on,
struct hid_device *hdev = to_hid_device(dev);
struct sony_sc *drv_data = hid_get_drvdata(hdev);
int n;
- __u8 new_on, new_off;
+ u8 new_on, new_off;
if (!drv_data) {
hid_err(hdev, "No device data\n");
@@ -1690,8 +1702,8 @@ static int sony_leds_init(struct sony_sc *sc)
const char *name_fmt;
static const char * const ds4_name_str[] = { "red", "green", "blue",
"global" };
- __u8 max_brightness[MAX_LEDS] = { [0 ... (MAX_LEDS - 1)] = 1 };
- __u8 use_hw_blink[MAX_LEDS] = { 0 };
+ u8 max_brightness[MAX_LEDS] = { [0 ... (MAX_LEDS - 1)] = 1 };
+ u8 use_hw_blink[MAX_LEDS] = { 0 };
BUG_ON(!(sc->quirks & SONY_LED_SUPPORT));
@@ -1719,7 +1731,7 @@ static int sony_leds_init(struct sony_sc *sc)
name_len = 0;
name_fmt = "%s:%s";
} else if (sc->quirks & NAVIGATION_CONTROLLER) {
- static const __u8 navigation_leds[4] = {0x01, 0x00, 0x00, 0x00};
+ static const u8 navigation_leds[4] = {0x01, 0x00, 0x00, 0x00};
memcpy(sc->led_state, navigation_leds, sizeof(navigation_leds));
sc->led_count = 1;
@@ -1796,7 +1808,7 @@ static void sixaxis_send_output_report(struct sony_sc *sc)
static const union sixaxis_output_report_01 default_report = {
.buf = {
0x01,
- 0x00, 0xff, 0x00, 0xff, 0x00,
+ 0x01, 0xff, 0x00, 0xff, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00,
0xff, 0x27, 0x10, 0x00, 0x32,
0xff, 0x27, 0x10, 0x00, 0x32,
@@ -1842,7 +1854,7 @@ static void sixaxis_send_output_report(struct sony_sc *sc)
}
}
- hid_hw_raw_request(sc->hdev, report->report_id, (__u8 *)report,
+ hid_hw_raw_request(sc->hdev, report->report_id, (u8 *)report,
sizeof(struct sixaxis_output_report),
HID_OUTPUT_REPORT, HID_REQ_SET_REPORT);
}
@@ -1850,7 +1862,7 @@ static void sixaxis_send_output_report(struct sony_sc *sc)
static void dualshock4_send_output_report(struct sony_sc *sc)
{
struct hid_device *hdev = sc->hdev;
- __u8 *buf = sc->output_report_dmabuf;
+ u8 *buf = sc->output_report_dmabuf;
int offset;
if (sc->quirks & DUALSHOCK4_CONTROLLER_USB) {
@@ -1910,7 +1922,7 @@ static void motion_send_output_report(struct sony_sc *sc)
report->rumble = max(sc->right, sc->left);
#endif
- hid_hw_output_report(hdev, (__u8 *)report, MOTION_REPORT_0x02_SIZE);
+ hid_hw_output_report(hdev, (u8 *)report, MOTION_REPORT_0x02_SIZE);
}
static inline void sony_send_output_report(struct sony_sc *sc)
@@ -1922,6 +1934,7 @@ static inline void sony_send_output_report(struct sony_sc *sc)
static void sony_state_worker(struct work_struct *work)
{
struct sony_sc *sc = container_of(work, struct sony_sc, state_worker);
+
sc->send_output_report(sc);
}
@@ -2142,7 +2155,7 @@ static int sony_get_bt_devaddr(struct sony_sc *sc)
static int sony_check_add(struct sony_sc *sc)
{
- __u8 *buf = NULL;
+ u8 *buf = NULL;
int n, ret;
if ((sc->quirks & DUALSHOCK4_CONTROLLER_BT) ||
@@ -2253,7 +2266,7 @@ static void sony_release_device_id(struct sony_sc *sc)
}
static inline void sony_init_output_report(struct sony_sc *sc,
- void(*send_output_report)(struct sony_sc*))
+ void (*send_output_report)(struct sony_sc *))
{
sc->send_output_report = send_output_report;
@@ -2441,7 +2454,7 @@ static int sony_suspend(struct hid_device *hdev, pm_message_t message)
/*
* On suspend save the current LED state,
* stop running force-feedback and blank the LEDS.
- */
+ */
if (SONY_LED_SUPPORT || SONY_FF_SUPPORT) {
struct sony_sc *sc = hid_get_drvdata(hdev);
@@ -2501,8 +2514,10 @@ static const struct hid_device_id sony_devices[] = {
.driver_data = VAIO_RDESC_CONSTANT },
{ HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGP_MOUSE),
.driver_data = VAIO_RDESC_CONSTANT },
- /* Wired Buzz Controller. Reported as Sony Hub from its USB ID and as
- * Logitech joystick from the device descriptor. */
+ /*
+ * Wired Buzz Controller. Reported as Sony Hub from its USB ID and as
+ * Logitech joystick from the device descriptor.
+ */
{ HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_BUZZ_CONTROLLER),
.driver_data = BUZZ_CONTROLLER },
{ HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_WIRELESS_BUZZ_CONTROLLER),
@@ -2521,6 +2536,9 @@ static const struct hid_device_id sony_devices[] = {
.driver_data = DUALSHOCK4_CONTROLLER_USB },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER),
.driver_data = DUALSHOCK4_CONTROLLER_BT },
+ /* Nyko Core Controller for PS3 */
+ { HID_USB_DEVICE(USB_VENDOR_ID_SINO_LITE, USB_DEVICE_ID_SINO_LITE_CONTROLLER),
+ .driver_data = SIXAXIS_CONTROLLER_USB | SINO_LITE_CONTROLLER },
{ }
};
MODULE_DEVICE_TABLE(hid, sony_devices);
diff --git a/drivers/hid/hid-thingm.c b/drivers/hid/hid-thingm.c
index b95d3978c272..847a497cd472 100644
--- a/drivers/hid/hid-thingm.c
+++ b/drivers/hid/hid-thingm.c
@@ -14,7 +14,6 @@
#include <linux/leds.h>
#include <linux/module.h>
#include <linux/mutex.h>
-#include <linux/workqueue.h>
#include "hid-ids.h"
@@ -56,7 +55,6 @@ struct thingm_rgb {
struct thingm_led red;
struct thingm_led green;
struct thingm_led blue;
- struct work_struct work;
u8 num;
};
@@ -79,9 +77,13 @@ static int thingm_send(struct thingm_device *tdev, u8 buf[REPORT_SIZE])
buf[0], buf[1], buf[2], buf[3], buf[4],
buf[5], buf[6], buf[7], buf[8]);
+ mutex_lock(&tdev->lock);
+
ret = hid_hw_raw_request(tdev->hdev, buf[0], buf, REPORT_SIZE,
HID_FEATURE_REPORT, HID_REQ_SET_REPORT);
+ mutex_unlock(&tdev->lock);
+
return ret < 0 ? ret : 0;
}
@@ -89,16 +91,31 @@ static int thingm_recv(struct thingm_device *tdev, u8 buf[REPORT_SIZE])
{
int ret;
+ /*
+ * A read consists of two operations: sending the read command
+ * and the actual read from the device. Use the mutex to protect
+ * the full sequence of both operations.
+ */
+ mutex_lock(&tdev->lock);
+
+ ret = hid_hw_raw_request(tdev->hdev, buf[0], buf, REPORT_SIZE,
+ HID_FEATURE_REPORT, HID_REQ_SET_REPORT);
+ if (ret < 0)
+ goto err;
+
ret = hid_hw_raw_request(tdev->hdev, buf[0], buf, REPORT_SIZE,
HID_FEATURE_REPORT, HID_REQ_GET_REPORT);
if (ret < 0)
- return ret;
+ goto err;
+
+ ret = 0;
hid_dbg(tdev->hdev, "<- %d %c %02hhx %02hhx %02hhx %02hhx %02hhx %02hhx %02hhx\n",
buf[0], buf[1], buf[2], buf[3], buf[4],
buf[5], buf[6], buf[7], buf[8]);
-
- return 0;
+err:
+ mutex_unlock(&tdev->lock);
+ return ret;
}
static int thingm_version(struct thingm_device *tdev)
@@ -106,10 +123,6 @@ static int thingm_version(struct thingm_device *tdev)
u8 buf[REPORT_SIZE] = { REPORT_ID, 'v', 0, 0, 0, 0, 0, 0, 0 };
int err;
- err = thingm_send(tdev, buf);
- if (err)
- return err;
-
err = thingm_recv(tdev, buf);
if (err)
return err;
@@ -131,25 +144,17 @@ static int thingm_write_color(struct thingm_rgb *rgb)
return thingm_send(rgb->tdev, buf);
}
-static void thingm_work(struct work_struct *work)
-{
- struct thingm_rgb *rgb = container_of(work, struct thingm_rgb, work);
-
- mutex_lock(&rgb->tdev->lock);
-
- if (thingm_write_color(rgb))
- hid_err(rgb->tdev->hdev, "failed to write color\n");
-
- mutex_unlock(&rgb->tdev->lock);
-}
-
-static void thingm_led_set(struct led_classdev *ldev,
- enum led_brightness brightness)
+static int thingm_led_set(struct led_classdev *ldev,
+ enum led_brightness brightness)
{
struct thingm_led *led = container_of(ldev, struct thingm_led, ldev);
+ int ret;
+
+ ret = thingm_write_color(led->rgb);
+ if (ret)
+ hid_err(led->rgb->tdev->hdev, "failed to write color\n");
- /* the ledclass has already stored the brightness value */
- schedule_work(&led->rgb->work);
+ return ret;
}
static int thingm_init_rgb(struct thingm_rgb *rgb)
@@ -162,10 +167,11 @@ static int thingm_init_rgb(struct thingm_rgb *rgb)
"thingm%d:red:led%d", minor, rgb->num);
rgb->red.ldev.name = rgb->red.name;
rgb->red.ldev.max_brightness = 255;
- rgb->red.ldev.brightness_set = thingm_led_set;
+ rgb->red.ldev.brightness_set_blocking = thingm_led_set;
rgb->red.rgb = rgb;
- err = led_classdev_register(&rgb->tdev->hdev->dev, &rgb->red.ldev);
+ err = devm_led_classdev_register(&rgb->tdev->hdev->dev,
+ &rgb->red.ldev);
if (err)
return err;
@@ -174,46 +180,27 @@ static int thingm_init_rgb(struct thingm_rgb *rgb)
"thingm%d:green:led%d", minor, rgb->num);
rgb->green.ldev.name = rgb->green.name;
rgb->green.ldev.max_brightness = 255;
- rgb->green.ldev.brightness_set = thingm_led_set;
+ rgb->green.ldev.brightness_set_blocking = thingm_led_set;
rgb->green.rgb = rgb;
- err = led_classdev_register(&rgb->tdev->hdev->dev, &rgb->green.ldev);
+ err = devm_led_classdev_register(&rgb->tdev->hdev->dev,
+ &rgb->green.ldev);
if (err)
- goto unregister_red;
+ return err;
/* Register the blue diode */
snprintf(rgb->blue.name, sizeof(rgb->blue.name),
"thingm%d:blue:led%d", minor, rgb->num);
rgb->blue.ldev.name = rgb->blue.name;
rgb->blue.ldev.max_brightness = 255;
- rgb->blue.ldev.brightness_set = thingm_led_set;
+ rgb->blue.ldev.brightness_set_blocking = thingm_led_set;
rgb->blue.rgb = rgb;
- err = led_classdev_register(&rgb->tdev->hdev->dev, &rgb->blue.ldev);
- if (err)
- goto unregister_green;
-
- INIT_WORK(&rgb->work, thingm_work);
-
- return 0;
-
-unregister_green:
- led_classdev_unregister(&rgb->green.ldev);
-
-unregister_red:
- led_classdev_unregister(&rgb->red.ldev);
-
+ err = devm_led_classdev_register(&rgb->tdev->hdev->dev,
+ &rgb->blue.ldev);
return err;
}
-static void thingm_remove_rgb(struct thingm_rgb *rgb)
-{
- led_classdev_unregister(&rgb->red.ldev);
- led_classdev_unregister(&rgb->green.ldev);
- led_classdev_unregister(&rgb->blue.ldev);
- flush_work(&rgb->work);
-}
-
static int thingm_probe(struct hid_device *hdev, const struct hid_device_id *id)
{
struct thingm_device *tdev;
@@ -229,17 +216,13 @@ static int thingm_probe(struct hid_device *hdev, const struct hid_device_id *id)
err = hid_parse(hdev);
if (err)
- goto error;
-
- err = hid_hw_start(hdev, HID_CONNECT_HIDRAW);
- if (err)
- goto error;
+ return err;
mutex_init(&tdev->lock);
err = thingm_version(tdev);
if (err)
- goto stop;
+ return err;
hid_dbg(hdev, "firmware version: %c.%c\n",
tdev->version.major, tdev->version.minor);
@@ -250,17 +233,18 @@ static int thingm_probe(struct hid_device *hdev, const struct hid_device_id *id)
if (!tdev->fwinfo) {
hid_err(hdev, "unsupported firmware %c\n", tdev->version.major);
- err = -ENODEV;
- goto stop;
+ return -ENODEV;
}
tdev->rgb = devm_kzalloc(&hdev->dev,
sizeof(struct thingm_rgb) * tdev->fwinfo->numrgb,
GFP_KERNEL);
- if (!tdev->rgb) {
- err = -ENOMEM;
- goto stop;
- }
+ if (!tdev->rgb)
+ return -ENOMEM;
+
+ err = hid_hw_start(hdev, HID_CONNECT_HIDRAW);
+ if (err)
+ return err;
for (i = 0; i < tdev->fwinfo->numrgb; ++i) {
struct thingm_rgb *rgb = tdev->rgb + i;
@@ -269,28 +253,12 @@ static int thingm_probe(struct hid_device *hdev, const struct hid_device_id *id)
rgb->num = tdev->fwinfo->first + i;
err = thingm_init_rgb(rgb);
if (err) {
- while (--i >= 0)
- thingm_remove_rgb(tdev->rgb + i);
- goto stop;
+ hid_hw_stop(hdev);
+ return err;
}
}
return 0;
-stop:
- hid_hw_stop(hdev);
-error:
- return err;
-}
-
-static void thingm_remove(struct hid_device *hdev)
-{
- struct thingm_device *tdev = hid_get_drvdata(hdev);
- int i;
-
- hid_hw_stop(hdev);
-
- for (i = 0; i < tdev->fwinfo->numrgb; ++i)
- thingm_remove_rgb(tdev->rgb + i);
}
static const struct hid_device_id thingm_table[] = {
@@ -302,7 +270,6 @@ MODULE_DEVICE_TABLE(hid, thingm_table);
static struct hid_driver thingm_driver = {
.name = "thingm",
.probe = thingm_probe,
- .remove = thingm_remove,
.id_table = thingm_table,
};
diff --git a/drivers/hid/hid-wiimote-modules.c b/drivers/hid/hid-wiimote-modules.c
index 4390eee2ce84..c830ed39348f 100644
--- a/drivers/hid/hid-wiimote-modules.c
+++ b/drivers/hid/hid-wiimote-modules.c
@@ -2049,9 +2049,11 @@ static void wiimod_mp_in_mp(struct wiimote_data *wdata, const __u8 *ext)
* -----+------------------------------+-----+-----+
* The single bits Yaw, Roll, Pitch in the lower right corner specify
* whether the wiimote is rotating fast (0) or slow (1). Speed for slow
- * roation is 440 deg/s and for fast rotation 2000 deg/s. To get a
- * linear scale we multiply by 2000/440 = ~4.5454 which is 18 for fast
- * and 9 for slow.
+ * roation is 8192/440 units / deg/s and for fast rotation 8192/2000
+ * units / deg/s. To get a linear scale for fast rotation we multiply
+ * by 2000/440 = ~4.5454 and scale both fast and slow by 9 to match the
+ * previous scale reported by this driver.
+ * This leaves a linear scale with 8192*9/440 (~167.564) units / deg/s.
* If the wiimote is not rotating the sensor reports 2^13 = 8192.
* Ext specifies whether an extension is connected to the motionp.
* which is parsed by wiimote-core.
@@ -2070,15 +2072,15 @@ static void wiimod_mp_in_mp(struct wiimote_data *wdata, const __u8 *ext)
z -= 8192;
if (!(ext[3] & 0x02))
- x *= 18;
+ x = (x * 2000 * 9) / 440;
else
x *= 9;
if (!(ext[4] & 0x02))
- y *= 18;
+ y = (y * 2000 * 9) / 440;
else
y *= 9;
if (!(ext[3] & 0x01))
- z *= 18;
+ z = (z * 2000 * 9) / 440;
else
z *= 9;
diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c
index b9216938a718..2e021ba8ff05 100644
--- a/drivers/hid/i2c-hid/i2c-hid.c
+++ b/drivers/hid/i2c-hid/i2c-hid.c
@@ -283,17 +283,21 @@ static int i2c_hid_set_or_send_report(struct i2c_client *client, u8 reportType,
u16 dataRegister = le16_to_cpu(ihid->hdesc.wDataRegister);
u16 outputRegister = le16_to_cpu(ihid->hdesc.wOutputRegister);
u16 maxOutputLength = le16_to_cpu(ihid->hdesc.wMaxOutputLength);
+ u16 size;
+ int args_len;
+ int index = 0;
+
+ i2c_hid_dbg(ihid, "%s\n", __func__);
+
+ if (data_len > ihid->bufsize)
+ return -EINVAL;
- /* hid_hw_* already checked that data_len < HID_MAX_BUFFER_SIZE */
- u16 size = 2 /* size */ +
+ size = 2 /* size */ +
(reportID ? 1 : 0) /* reportID */ +
data_len /* buf */;
- int args_len = (reportID >= 0x0F ? 1 : 0) /* optional third byte */ +
+ args_len = (reportID >= 0x0F ? 1 : 0) /* optional third byte */ +
2 /* dataRegister */ +
size /* args */;
- int index = 0;
-
- i2c_hid_dbg(ihid, "%s\n", __func__);
if (!use_data && maxOutputLength == 0)
return -ENOSYS;
@@ -1108,13 +1112,30 @@ static int i2c_hid_suspend(struct device *dev)
struct i2c_client *client = to_i2c_client(dev);
struct i2c_hid *ihid = i2c_get_clientdata(client);
struct hid_device *hid = ihid->hid;
- int ret = 0;
+ int ret;
int wake_status;
- if (hid->driver && hid->driver->suspend)
+ if (hid->driver && hid->driver->suspend) {
+ /*
+ * Wake up the device so that IO issues in
+ * HID driver's suspend code can succeed.
+ */
+ ret = pm_runtime_resume(dev);
+ if (ret < 0)
+ return ret;
+
ret = hid->driver->suspend(hid, PMSG_SUSPEND);
+ if (ret < 0)
+ return ret;
+ }
+
+ if (!pm_runtime_suspended(dev)) {
+ /* Save some power */
+ i2c_hid_set_power(client, I2C_HID_PWR_SLEEP);
+
+ disable_irq(ihid->irq);
+ }
- disable_irq(ihid->irq);
if (device_may_wakeup(&client->dev)) {
wake_status = enable_irq_wake(ihid->irq);
if (!wake_status)
@@ -1124,10 +1145,7 @@ static int i2c_hid_suspend(struct device *dev)
wake_status);
}
- /* Save some power */
- i2c_hid_set_power(client, I2C_HID_PWR_SLEEP);
-
- return ret;
+ return 0;
}
static int i2c_hid_resume(struct device *dev)
@@ -1138,11 +1156,6 @@ static int i2c_hid_resume(struct device *dev)
struct hid_device *hid = ihid->hid;
int wake_status;
- enable_irq(ihid->irq);
- ret = i2c_hid_hwreset(client);
- if (ret)
- return ret;
-
if (device_may_wakeup(&client->dev) && ihid->irq_wake_enabled) {
wake_status = disable_irq_wake(ihid->irq);
if (!wake_status)
@@ -1152,6 +1165,16 @@ static int i2c_hid_resume(struct device *dev)
wake_status);
}
+ /* We'll resume to full power */
+ pm_runtime_disable(dev);
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+
+ enable_irq(ihid->irq);
+ ret = i2c_hid_hwreset(client);
+ if (ret)
+ return ret;
+
if (hid->driver && hid->driver->reset_resume) {
ret = hid->driver->reset_resume(hid);
return ret;
@@ -1191,6 +1214,7 @@ static const struct dev_pm_ops i2c_hid_pm = {
static const struct i2c_device_id i2c_hid_id_table[] = {
{ "hid", 0 },
+ { "hid-over-i2c", 0 },
{ },
};
MODULE_DEVICE_TABLE(i2c, i2c_hid_id_table);
diff --git a/drivers/hid/uhid.c b/drivers/hid/uhid.c
index e094c572b86e..16b6f11a0700 100644
--- a/drivers/hid/uhid.c
+++ b/drivers/hid/uhid.c
@@ -384,7 +384,7 @@ struct uhid_create_req_compat {
static int uhid_event_from_user(const char __user *buffer, size_t len,
struct uhid_event *event)
{
- if (is_compat_task()) {
+ if (in_compat_syscall()) {
u32 type;
if (get_user(type, buffer))
diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c
index ad71160b9ea4..ae83af649a60 100644
--- a/drivers/hid/usbhid/hid-core.c
+++ b/drivers/hid/usbhid/hid-core.c
@@ -951,14 +951,6 @@ static int usbhid_output_report(struct hid_device *hid, __u8 *buf, size_t count)
return ret;
}
-static void usbhid_restart_queues(struct usbhid_device *usbhid)
-{
- if (usbhid->urbout && !test_bit(HID_OUT_RUNNING, &usbhid->iofl))
- usbhid_restart_out_queue(usbhid);
- if (!test_bit(HID_CTRL_RUNNING, &usbhid->iofl))
- usbhid_restart_ctrl_queue(usbhid);
-}
-
static void hid_free_buffers(struct usb_device *dev, struct hid_device *hid)
{
struct usbhid_device *usbhid = hid->driver_data;
@@ -1404,6 +1396,37 @@ static void hid_cease_io(struct usbhid_device *usbhid)
usb_kill_urb(usbhid->urbout);
}
+static void hid_restart_io(struct hid_device *hid)
+{
+ struct usbhid_device *usbhid = hid->driver_data;
+ int clear_halt = test_bit(HID_CLEAR_HALT, &usbhid->iofl);
+ int reset_pending = test_bit(HID_RESET_PENDING, &usbhid->iofl);
+
+ spin_lock_irq(&usbhid->lock);
+ clear_bit(HID_SUSPENDED, &usbhid->iofl);
+ usbhid_mark_busy(usbhid);
+
+ if (clear_halt || reset_pending)
+ schedule_work(&usbhid->reset_work);
+ usbhid->retry_delay = 0;
+ spin_unlock_irq(&usbhid->lock);
+
+ if (reset_pending || !test_bit(HID_STARTED, &usbhid->iofl))
+ return;
+
+ if (!clear_halt) {
+ if (hid_start_in(hid) < 0)
+ hid_io_error(hid);
+ }
+
+ spin_lock_irq(&usbhid->lock);
+ if (usbhid->urbout && !test_bit(HID_OUT_RUNNING, &usbhid->iofl))
+ usbhid_restart_out_queue(usbhid);
+ if (!test_bit(HID_CTRL_RUNNING, &usbhid->iofl))
+ usbhid_restart_ctrl_queue(usbhid);
+ spin_unlock_irq(&usbhid->lock);
+}
+
/* Treat USB reset pretty much the same as suspend/resume */
static int hid_pre_reset(struct usb_interface *intf)
{
@@ -1453,14 +1476,14 @@ static int hid_post_reset(struct usb_interface *intf)
return 1;
}
+ /* No need to do another reset or clear a halted endpoint */
spin_lock_irq(&usbhid->lock);
clear_bit(HID_RESET_PENDING, &usbhid->iofl);
+ clear_bit(HID_CLEAR_HALT, &usbhid->iofl);
spin_unlock_irq(&usbhid->lock);
hid_set_idle(dev, intf->cur_altsetting->desc.bInterfaceNumber, 0, 0);
- status = hid_start_in(hid);
- if (status < 0)
- hid_io_error(hid);
- usbhid_restart_queues(usbhid);
+
+ hid_restart_io(hid);
return 0;
}
@@ -1483,25 +1506,9 @@ void usbhid_put_power(struct hid_device *hid)
#ifdef CONFIG_PM
static int hid_resume_common(struct hid_device *hid, bool driver_suspended)
{
- struct usbhid_device *usbhid = hid->driver_data;
- int status;
-
- spin_lock_irq(&usbhid->lock);
- clear_bit(HID_SUSPENDED, &usbhid->iofl);
- usbhid_mark_busy(usbhid);
-
- if (test_bit(HID_CLEAR_HALT, &usbhid->iofl) ||
- test_bit(HID_RESET_PENDING, &usbhid->iofl))
- schedule_work(&usbhid->reset_work);
- usbhid->retry_delay = 0;
-
- usbhid_restart_queues(usbhid);
- spin_unlock_irq(&usbhid->lock);
-
- status = hid_start_in(hid);
- if (status < 0)
- hid_io_error(hid);
+ int status = 0;
+ hid_restart_io(hid);
if (driver_suspended && hid->driver && hid->driver->resume)
status = hid->driver->resume(hid);
return status;
@@ -1570,12 +1577,8 @@ static int hid_suspend(struct usb_interface *intf, pm_message_t message)
static int hid_resume(struct usb_interface *intf)
{
struct hid_device *hid = usb_get_intfdata (intf);
- struct usbhid_device *usbhid = hid->driver_data;
int status;
- if (!test_bit(HID_STARTED, &usbhid->iofl))
- return 0;
-
status = hid_resume_common(hid, true);
dev_dbg(&intf->dev, "resume status %d\n", status);
return 0;
@@ -1584,10 +1587,8 @@ static int hid_resume(struct usb_interface *intf)
static int hid_reset_resume(struct usb_interface *intf)
{
struct hid_device *hid = usb_get_intfdata(intf);
- struct usbhid_device *usbhid = hid->driver_data;
int status;
- clear_bit(HID_SUSPENDED, &usbhid->iofl);
status = hid_post_reset(intf);
if (status >= 0 && hid->driver && hid->driver->reset_resume) {
int ret = hid->driver->reset_resume(hid);
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
index 7dd0953cd70f..53fc856d6867 100644
--- a/drivers/hid/usbhid/hid-quirks.c
+++ b/drivers/hid/usbhid/hid-quirks.c
@@ -55,6 +55,7 @@ static const struct hid_blacklist {
{ USB_VENDOR_ID_TOUCHPACK, USB_DEVICE_ID_TOUCHPACK_RTS, HID_QUIRK_MULTI_INPUT },
{ USB_VENDOR_ID_AIREN, USB_DEVICE_ID_AIREN_SLIMPLUS, HID_QUIRK_NOGET },
+ { USB_VENDOR_ID_AKAI, USB_DEVICE_ID_AKAI_MPKMINI2, HID_QUIRK_NO_INIT_REPORTS },
{ USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_UC100KM, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_CS124U, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_2PORTKVM, HID_QUIRK_NOGET },
@@ -70,6 +71,7 @@ static const struct hid_blacklist {
{ USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_3AXIS_5BUTTON_STICK, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_AXIS_295, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_PIXART_USB_OPTICAL_MOUSE, HID_QUIRK_ALWAYS_POLL },
+ { USB_VENDOR_ID_CREATIVELABS, USB_DEVICE_ID_CREATIVE_SB_OMNI_SURROUND_51, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_WIIU, HID_QUIRK_MULTI_INPUT },
{ USB_VENDOR_ID_ELAN, HID_ANY_ID, HID_QUIRK_ALWAYS_POLL },
@@ -106,6 +108,7 @@ static const struct hid_blacklist {
{ USB_VENDOR_ID_PRIMAX, USB_DEVICE_ID_PRIMAX_MOUSE_4D22, HID_QUIRK_ALWAYS_POLL },
{ USB_VENDOR_ID_PRODIGE, USB_DEVICE_ID_PRODIGE_CORDLESS, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3001, HID_QUIRK_NOGET },
+ { USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3003, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3008, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_REALTEK, USB_DEVICE_ID_REALTEK_READER, HID_QUIRK_NO_INIT_REPORTS },
{ USB_VENDOR_ID_SENNHEISER, USB_DEVICE_ID_SENNHEISER_BTD500USB, HID_QUIRK_NOGET },
@@ -140,6 +143,7 @@ static const struct hid_blacklist {
{ USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_PENSKETCH_M912, HID_QUIRK_MULTI_INPUT },
{ USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_DUOSENSE, HID_QUIRK_NO_INIT_REPORTS },
{ USB_VENDOR_ID_SEMICO, USB_DEVICE_ID_SEMICO_USB_KEYKOARD, HID_QUIRK_NO_INIT_REPORTS },
+ { USB_VENDOR_ID_SEMICO, USB_DEVICE_ID_SEMICO_USB_KEYKOARD2, HID_QUIRK_NO_INIT_REPORTS },
{ USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_LTS1, HID_QUIRK_NO_INIT_REPORTS },
{ USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_LTS2, HID_QUIRK_NO_INIT_REPORTS },
{ USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_HD, HID_QUIRK_NO_INIT_REPORTS },
diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c
index 5cb21dd91094..ccf1883318c3 100644
--- a/drivers/hid/wacom_sys.c
+++ b/drivers/hid/wacom_sys.c
@@ -152,6 +152,25 @@ static void wacom_feature_mapping(struct hid_device *hdev,
hid_data->inputmode = field->report->id;
hid_data->inputmode_index = usage->usage_index;
break;
+
+ case HID_UP_DIGITIZER:
+ if (field->report->id == 0x0B &&
+ (field->application == WACOM_G9_DIGITIZER ||
+ field->application == WACOM_G11_DIGITIZER)) {
+ wacom->wacom_wac.mode_report = field->report->id;
+ wacom->wacom_wac.mode_value = 0;
+ }
+ break;
+
+ case WACOM_G9_PAGE:
+ case WACOM_G11_PAGE:
+ if (field->report->id == 0x03 &&
+ (field->application == WACOM_G9_TOUCHSCREEN ||
+ field->application == WACOM_G11_TOUCHSCREEN)) {
+ wacom->wacom_wac.mode_report = field->report->id;
+ wacom->wacom_wac.mode_value = 0;
+ }
+ break;
}
}
@@ -322,26 +341,41 @@ static int wacom_hid_set_device_mode(struct hid_device *hdev)
return 0;
}
-static int wacom_set_device_mode(struct hid_device *hdev, int report_id,
- int length, int mode)
+static int wacom_set_device_mode(struct hid_device *hdev,
+ struct wacom_wac *wacom_wac)
{
- unsigned char *rep_data;
+ u8 *rep_data;
+ struct hid_report *r;
+ struct hid_report_enum *re;
+ int length;
int error = -ENOMEM, limit = 0;
- rep_data = kzalloc(length, GFP_KERNEL);
+ if (wacom_wac->mode_report < 0)
+ return 0;
+
+ re = &(hdev->report_enum[HID_FEATURE_REPORT]);
+ r = re->report_id_hash[wacom_wac->mode_report];
+ if (!r)
+ return -EINVAL;
+
+ rep_data = hid_alloc_report_buf(r, GFP_KERNEL);
if (!rep_data)
- return error;
+ return -ENOMEM;
+
+ length = hid_report_len(r);
do {
- rep_data[0] = report_id;
- rep_data[1] = mode;
+ rep_data[0] = wacom_wac->mode_report;
+ rep_data[1] = wacom_wac->mode_value;
error = wacom_set_report(hdev, HID_FEATURE_REPORT, rep_data,
length, 1);
if (error >= 0)
error = wacom_get_report(hdev, HID_FEATURE_REPORT,
rep_data, length, 1);
- } while (error >= 0 && rep_data[1] != mode && limit++ < WAC_MSG_RETRIES);
+ } while (error >= 0 &&
+ rep_data[1] != wacom_wac->mode_report &&
+ limit++ < WAC_MSG_RETRIES);
kfree(rep_data);
@@ -411,32 +445,41 @@ static int wacom_bt_query_tablet_data(struct hid_device *hdev, u8 speed,
static int wacom_query_tablet_data(struct hid_device *hdev,
struct wacom_features *features)
{
+ struct wacom *wacom = hid_get_drvdata(hdev);
+ struct wacom_wac *wacom_wac = &wacom->wacom_wac;
+
if (hdev->bus == BUS_BLUETOOTH)
return wacom_bt_query_tablet_data(hdev, 1, features);
- if (features->type == HID_GENERIC)
- return wacom_hid_set_device_mode(hdev);
-
- if (features->device_type & WACOM_DEVICETYPE_TOUCH) {
- if (features->type > TABLETPC) {
- /* MT Tablet PC touch */
- return wacom_set_device_mode(hdev, 3, 4, 4);
- }
- else if (features->type == WACOM_24HDT) {
- return wacom_set_device_mode(hdev, 18, 3, 2);
- }
- else if (features->type == WACOM_27QHDT) {
- return wacom_set_device_mode(hdev, 131, 3, 2);
- }
- else if (features->type == BAMBOO_PAD) {
- return wacom_set_device_mode(hdev, 2, 2, 2);
- }
- } else if (features->device_type & WACOM_DEVICETYPE_PEN) {
- if (features->type <= BAMBOO_PT) {
- return wacom_set_device_mode(hdev, 2, 2, 2);
+ if (features->type != HID_GENERIC) {
+ if (features->device_type & WACOM_DEVICETYPE_TOUCH) {
+ if (features->type > TABLETPC) {
+ /* MT Tablet PC touch */
+ wacom_wac->mode_report = 3;
+ wacom_wac->mode_value = 4;
+ } else if (features->type == WACOM_24HDT) {
+ wacom_wac->mode_report = 18;
+ wacom_wac->mode_value = 2;
+ } else if (features->type == WACOM_27QHDT) {
+ wacom_wac->mode_report = 131;
+ wacom_wac->mode_value = 2;
+ } else if (features->type == BAMBOO_PAD) {
+ wacom_wac->mode_report = 2;
+ wacom_wac->mode_value = 2;
+ }
+ } else if (features->device_type & WACOM_DEVICETYPE_PEN) {
+ if (features->type <= BAMBOO_PT) {
+ wacom_wac->mode_report = 2;
+ wacom_wac->mode_value = 2;
+ }
}
}
+ wacom_set_device_mode(hdev, wacom_wac);
+
+ if (features->type == HID_GENERIC)
+ return wacom_hid_set_device_mode(hdev);
+
return 0;
}
@@ -1357,6 +1400,9 @@ static void wacom_clean_inputs(struct wacom *wacom)
wacom->wacom_wac.pen_input = NULL;
wacom->wacom_wac.touch_input = NULL;
wacom->wacom_wac.pad_input = NULL;
+ wacom->wacom_wac.pen_registered = false;
+ wacom->wacom_wac.touch_registered = false;
+ wacom->wacom_wac.pad_registered = false;
wacom_destroy_leds(wacom);
}
@@ -1494,123 +1540,6 @@ static void wacom_calculate_res(struct wacom_features *features)
features->unitExpo);
}
-static void wacom_wireless_work(struct work_struct *work)
-{
- struct wacom *wacom = container_of(work, struct wacom, work);
- struct usb_device *usbdev = wacom->usbdev;
- struct wacom_wac *wacom_wac = &wacom->wacom_wac;
- struct hid_device *hdev1, *hdev2;
- struct wacom *wacom1, *wacom2;
- struct wacom_wac *wacom_wac1, *wacom_wac2;
- int error;
-
- /*
- * Regardless if this is a disconnect or a new tablet,
- * remove any existing input and battery devices.
- */
-
- wacom_destroy_battery(wacom);
-
- /* Stylus interface */
- hdev1 = usb_get_intfdata(usbdev->config->interface[1]);
- wacom1 = hid_get_drvdata(hdev1);
- wacom_wac1 = &(wacom1->wacom_wac);
- wacom_clean_inputs(wacom1);
-
- /* Touch interface */
- hdev2 = usb_get_intfdata(usbdev->config->interface[2]);
- wacom2 = hid_get_drvdata(hdev2);
- wacom_wac2 = &(wacom2->wacom_wac);
- wacom_clean_inputs(wacom2);
-
- if (wacom_wac->pid == 0) {
- hid_info(wacom->hdev, "wireless tablet disconnected\n");
- wacom_wac1->shared->type = 0;
- } else {
- const struct hid_device_id *id = wacom_ids;
-
- hid_info(wacom->hdev, "wireless tablet connected with PID %x\n",
- wacom_wac->pid);
-
- while (id->bus) {
- if (id->vendor == USB_VENDOR_ID_WACOM &&
- id->product == wacom_wac->pid)
- break;
- id++;
- }
-
- if (!id->bus) {
- hid_info(wacom->hdev, "ignoring unknown PID.\n");
- return;
- }
-
- /* Stylus interface */
- wacom_wac1->features =
- *((struct wacom_features *)id->driver_data);
- wacom_wac1->features.device_type |= WACOM_DEVICETYPE_PEN;
- wacom_set_default_phy(&wacom_wac1->features);
- wacom_calculate_res(&wacom_wac1->features);
- snprintf(wacom_wac1->pen_name, WACOM_NAME_MAX, "%s (WL) Pen",
- wacom_wac1->features.name);
- if (wacom_wac1->features.type < BAMBOO_PEN ||
- wacom_wac1->features.type > BAMBOO_PT) {
- snprintf(wacom_wac1->pad_name, WACOM_NAME_MAX, "%s (WL) Pad",
- wacom_wac1->features.name);
- wacom_wac1->features.device_type |= WACOM_DEVICETYPE_PAD;
- }
- wacom_wac1->shared->touch_max = wacom_wac1->features.touch_max;
- wacom_wac1->shared->type = wacom_wac1->features.type;
- wacom_wac1->pid = wacom_wac->pid;
- error = wacom_allocate_inputs(wacom1) ||
- wacom_register_inputs(wacom1);
- if (error)
- goto fail;
-
- /* Touch interface */
- if (wacom_wac1->features.touch_max ||
- (wacom_wac1->features.type >= INTUOSHT &&
- wacom_wac1->features.type <= BAMBOO_PT)) {
- wacom_wac2->features =
- *((struct wacom_features *)id->driver_data);
- wacom_wac2->features.pktlen = WACOM_PKGLEN_BBTOUCH3;
- wacom_set_default_phy(&wacom_wac2->features);
- wacom_wac2->features.x_max = wacom_wac2->features.y_max = 4096;
- wacom_calculate_res(&wacom_wac2->features);
- snprintf(wacom_wac2->touch_name, WACOM_NAME_MAX,
- "%s (WL) Finger",wacom_wac2->features.name);
- if (wacom_wac1->features.touch_max)
- wacom_wac2->features.device_type |= WACOM_DEVICETYPE_TOUCH;
- if (wacom_wac1->features.type >= INTUOSHT &&
- wacom_wac1->features.type <= BAMBOO_PT) {
- snprintf(wacom_wac2->pad_name, WACOM_NAME_MAX,
- "%s (WL) Pad",wacom_wac2->features.name);
- wacom_wac2->features.device_type |= WACOM_DEVICETYPE_PAD;
- }
- wacom_wac2->pid = wacom_wac->pid;
- error = wacom_allocate_inputs(wacom2) ||
- wacom_register_inputs(wacom2);
- if (error)
- goto fail;
-
- if ((wacom_wac1->features.type == INTUOSHT ||
- wacom_wac1->features.type == INTUOSHT2) &&
- wacom_wac1->features.touch_max)
- wacom_wac->shared->touch_input = wacom_wac2->touch_input;
- }
-
- error = wacom_initialize_battery(wacom);
- if (error)
- goto fail;
- }
-
- return;
-
-fail:
- wacom_clean_inputs(wacom1);
- wacom_clean_inputs(wacom2);
- return;
-}
-
void wacom_battery_work(struct work_struct *work)
{
struct wacom *wacom = container_of(work, struct wacom, work);
@@ -1642,7 +1571,7 @@ static size_t wacom_compute_pktlen(struct hid_device *hdev)
return size;
}
-static void wacom_update_name(struct wacom *wacom)
+static void wacom_update_name(struct wacom *wacom, const char *suffix)
{
struct wacom_wac *wacom_wac = &wacom->wacom_wac;
struct wacom_features *features = &wacom_wac->features;
@@ -1678,68 +1607,28 @@ static void wacom_update_name(struct wacom *wacom)
/* Append the device type to the name */
snprintf(wacom_wac->pen_name, sizeof(wacom_wac->pen_name),
- "%s Pen", name);
+ "%s%s Pen", name, suffix);
snprintf(wacom_wac->touch_name, sizeof(wacom_wac->touch_name),
- "%s Finger", name);
+ "%s%s Finger", name, suffix);
snprintf(wacom_wac->pad_name, sizeof(wacom_wac->pad_name),
- "%s Pad", name);
+ "%s%s Pad", name, suffix);
}
-static int wacom_probe(struct hid_device *hdev,
- const struct hid_device_id *id)
+static int wacom_parse_and_register(struct wacom *wacom, bool wireless)
{
- struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
- struct usb_device *dev = interface_to_usbdev(intf);
- struct wacom *wacom;
- struct wacom_wac *wacom_wac;
- struct wacom_features *features;
+ struct wacom_wac *wacom_wac = &wacom->wacom_wac;
+ struct wacom_features *features = &wacom_wac->features;
+ struct hid_device *hdev = wacom->hdev;
int error;
unsigned int connect_mask = HID_CONNECT_HIDRAW;
- if (!id->driver_data)
- return -EINVAL;
-
- hdev->quirks |= HID_QUIRK_NO_INIT_REPORTS;
-
- /* hid-core sets this quirk for the boot interface */
- hdev->quirks &= ~HID_QUIRK_NOGET;
-
- wacom = kzalloc(sizeof(struct wacom), GFP_KERNEL);
- if (!wacom)
- return -ENOMEM;
-
- hid_set_drvdata(hdev, wacom);
- wacom->hdev = hdev;
-
- /* ask for the report descriptor to be loaded by HID */
- error = hid_parse(hdev);
- if (error) {
- hid_err(hdev, "parse failed\n");
- goto fail_parse;
- }
-
- wacom_wac = &wacom->wacom_wac;
- wacom_wac->features = *((struct wacom_features *)id->driver_data);
- features = &wacom_wac->features;
features->pktlen = wacom_compute_pktlen(hdev);
- if (features->pktlen > WACOM_PKGLEN_MAX) {
- error = -EINVAL;
- goto fail_pktlen;
- }
-
- if (features->check_for_hid_type && features->hid_type != hdev->type) {
- error = -ENODEV;
- goto fail_type;
- }
-
- wacom->usbdev = dev;
- wacom->intf = intf;
- mutex_init(&wacom->lock);
- INIT_WORK(&wacom->work, wacom_wireless_work);
+ if (features->pktlen > WACOM_PKGLEN_MAX)
+ return -EINVAL;
error = wacom_allocate_inputs(wacom);
if (error)
- goto fail_allocate_inputs;
+ return error;
/*
* Bamboo Pad has a generic hid handling for the Pen, and we switch it
@@ -1752,7 +1641,7 @@ static int wacom_probe(struct hid_device *hdev,
} else if ((features->pktlen != WACOM_PKGLEN_BPAD_TOUCH) &&
(features->pktlen != WACOM_PKGLEN_BPAD_TOUCH_USB)) {
error = -ENODEV;
- goto fail_shared_data;
+ goto fail_allocate_inputs;
}
}
@@ -1772,14 +1661,14 @@ static int wacom_probe(struct hid_device *hdev,
error ? "Ignoring" : "Assuming pen");
if (error)
- goto fail_shared_data;
+ goto fail_parsed;
features->device_type |= WACOM_DEVICETYPE_PEN;
}
wacom_calculate_res(features);
- wacom_update_name(wacom);
+ wacom_update_name(wacom, wireless ? " (WL)" : "");
error = wacom_add_shared_data(hdev);
if (error)
@@ -1796,14 +1685,6 @@ static int wacom_probe(struct hid_device *hdev,
if (error)
goto fail_register_inputs;
- if (hdev->bus == BUS_BLUETOOTH) {
- error = device_create_file(&hdev->dev, &dev_attr_speed);
- if (error)
- hid_warn(hdev,
- "can't create sysfs speed attribute err: %d\n",
- error);
- }
-
if (features->type == HID_GENERIC)
connect_mask |= HID_CONNECT_DRIVER;
@@ -1814,8 +1695,10 @@ static int wacom_probe(struct hid_device *hdev,
goto fail_hw_start;
}
- /* Note that if query fails it is not a hard failure */
- wacom_query_tablet_data(hdev, features);
+ if (!wireless) {
+ /* Note that if query fails it is not a hard failure */
+ wacom_query_tablet_data(hdev, features);
+ }
/* touch only Bamboo doesn't support pen */
if ((features->type == BAMBOO_TOUCH) &&
@@ -1844,18 +1727,169 @@ static int wacom_probe(struct hid_device *hdev,
return 0;
fail_hw_start:
- if (hdev->bus == BUS_BLUETOOTH)
- device_remove_file(&hdev->dev, &dev_attr_speed);
+ hid_hw_stop(hdev);
fail_register_inputs:
wacom_clean_inputs(wacom);
wacom_destroy_battery(wacom);
fail_battery:
wacom_remove_shared_data(wacom);
fail_shared_data:
- wacom_clean_inputs(wacom);
+fail_parsed:
fail_allocate_inputs:
+ wacom_clean_inputs(wacom);
+ return error;
+}
+
+static void wacom_wireless_work(struct work_struct *work)
+{
+ struct wacom *wacom = container_of(work, struct wacom, work);
+ struct usb_device *usbdev = wacom->usbdev;
+ struct wacom_wac *wacom_wac = &wacom->wacom_wac;
+ struct hid_device *hdev1, *hdev2;
+ struct wacom *wacom1, *wacom2;
+ struct wacom_wac *wacom_wac1, *wacom_wac2;
+ int error;
+
+ /*
+ * Regardless if this is a disconnect or a new tablet,
+ * remove any existing input and battery devices.
+ */
+
+ wacom_destroy_battery(wacom);
+
+ /* Stylus interface */
+ hdev1 = usb_get_intfdata(usbdev->config->interface[1]);
+ wacom1 = hid_get_drvdata(hdev1);
+ wacom_wac1 = &(wacom1->wacom_wac);
+ wacom_clean_inputs(wacom1);
+
+ /* Touch interface */
+ hdev2 = usb_get_intfdata(usbdev->config->interface[2]);
+ wacom2 = hid_get_drvdata(hdev2);
+ wacom_wac2 = &(wacom2->wacom_wac);
+ wacom_clean_inputs(wacom2);
+
+ if (wacom_wac->pid == 0) {
+ hid_info(wacom->hdev, "wireless tablet disconnected\n");
+ wacom_wac1->shared->type = 0;
+ } else {
+ const struct hid_device_id *id = wacom_ids;
+
+ hid_info(wacom->hdev, "wireless tablet connected with PID %x\n",
+ wacom_wac->pid);
+
+ while (id->bus) {
+ if (id->vendor == USB_VENDOR_ID_WACOM &&
+ id->product == wacom_wac->pid)
+ break;
+ id++;
+ }
+
+ if (!id->bus) {
+ hid_info(wacom->hdev, "ignoring unknown PID.\n");
+ return;
+ }
+
+ /* Stylus interface */
+ wacom_wac1->features =
+ *((struct wacom_features *)id->driver_data);
+
+ wacom_wac1->pid = wacom_wac->pid;
+ hid_hw_stop(hdev1);
+ error = wacom_parse_and_register(wacom1, true);
+ if (error)
+ goto fail;
+
+ /* Touch interface */
+ if (wacom_wac1->features.touch_max ||
+ (wacom_wac1->features.type >= INTUOSHT &&
+ wacom_wac1->features.type <= BAMBOO_PT)) {
+ wacom_wac2->features =
+ *((struct wacom_features *)id->driver_data);
+ wacom_wac2->pid = wacom_wac->pid;
+ hid_hw_stop(hdev2);
+ error = wacom_parse_and_register(wacom2, true);
+ if (error)
+ goto fail;
+ }
+
+ error = wacom_initialize_battery(wacom);
+ if (error)
+ goto fail;
+ }
+
+ return;
+
+fail:
+ wacom_clean_inputs(wacom1);
+ wacom_clean_inputs(wacom2);
+ return;
+}
+
+static int wacom_probe(struct hid_device *hdev,
+ const struct hid_device_id *id)
+{
+ struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
+ struct usb_device *dev = interface_to_usbdev(intf);
+ struct wacom *wacom;
+ struct wacom_wac *wacom_wac;
+ struct wacom_features *features;
+ int error;
+
+ if (!id->driver_data)
+ return -EINVAL;
+
+ hdev->quirks |= HID_QUIRK_NO_INIT_REPORTS;
+
+ /* hid-core sets this quirk for the boot interface */
+ hdev->quirks &= ~HID_QUIRK_NOGET;
+
+ wacom = kzalloc(sizeof(struct wacom), GFP_KERNEL);
+ if (!wacom)
+ return -ENOMEM;
+
+ hid_set_drvdata(hdev, wacom);
+ wacom->hdev = hdev;
+
+ wacom_wac = &wacom->wacom_wac;
+ wacom_wac->features = *((struct wacom_features *)id->driver_data);
+ features = &wacom_wac->features;
+
+ if (features->check_for_hid_type && features->hid_type != hdev->type) {
+ error = -ENODEV;
+ goto fail_type;
+ }
+
+ wacom_wac->hid_data.inputmode = -1;
+ wacom_wac->mode_report = -1;
+
+ wacom->usbdev = dev;
+ wacom->intf = intf;
+ mutex_init(&wacom->lock);
+ INIT_WORK(&wacom->work, wacom_wireless_work);
+
+ /* ask for the report descriptor to be loaded by HID */
+ error = hid_parse(hdev);
+ if (error) {
+ hid_err(hdev, "parse failed\n");
+ goto fail_parse;
+ }
+
+ error = wacom_parse_and_register(wacom, false);
+ if (error)
+ goto fail_parse;
+
+ if (hdev->bus == BUS_BLUETOOTH) {
+ error = device_create_file(&hdev->dev, &dev_attr_speed);
+ if (error)
+ hid_warn(hdev,
+ "can't create sysfs speed attribute err: %d\n",
+ error);
+ }
+
+ return 0;
+
fail_type:
-fail_pktlen:
fail_parse:
kfree(wacom);
hid_set_drvdata(hdev, NULL);
@@ -1865,6 +1899,11 @@ fail_parse:
static void wacom_remove(struct hid_device *hdev)
{
struct wacom *wacom = hid_get_drvdata(hdev);
+ struct wacom_wac *wacom_wac = &wacom->wacom_wac;
+ struct wacom_features *features = &wacom_wac->features;
+
+ if (features->device_type & WACOM_DEVICETYPE_WL_MONITOR)
+ hid_hw_close(hdev);
hid_hw_stop(hdev);
diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
index 99ef77fcfb80..cf2ba43453fd 100644
--- a/drivers/hid/wacom_wac.c
+++ b/drivers/hid/wacom_wac.c
@@ -575,16 +575,102 @@ static int wacom_intuos_pad(struct wacom_wac *wacom)
return 1;
}
+static int wacom_intuos_get_tool_type(int tool_id)
+{
+ int tool_type;
+
+ switch (tool_id) {
+ case 0x812: /* Inking pen */
+ case 0x801: /* Intuos3 Inking pen */
+ case 0x120802: /* Intuos4/5 Inking Pen */
+ case 0x012:
+ tool_type = BTN_TOOL_PENCIL;
+ break;
+
+ case 0x822: /* Pen */
+ case 0x842:
+ case 0x852:
+ case 0x823: /* Intuos3 Grip Pen */
+ case 0x813: /* Intuos3 Classic Pen */
+ case 0x885: /* Intuos3 Marker Pen */
+ case 0x802: /* Intuos4/5 13HD/24HD General Pen */
+ case 0x804: /* Intuos4/5 13HD/24HD Marker Pen */
+ case 0x8e2: /* IntuosHT2 pen */
+ case 0x022:
+ case 0x100804: /* Intuos4/5 13HD/24HD Art Pen */
+ case 0x140802: /* Intuos4/5 13HD/24HD Classic Pen */
+ case 0x160802: /* Cintiq 13HD Pro Pen */
+ case 0x180802: /* DTH2242 Pen */
+ case 0x100802: /* Intuos4/5 13HD/24HD General Pen */
+ tool_type = BTN_TOOL_PEN;
+ break;
+
+ case 0x832: /* Stroke pen */
+ case 0x032:
+ tool_type = BTN_TOOL_BRUSH;
+ break;
+
+ case 0x007: /* Mouse 4D and 2D */
+ case 0x09c:
+ case 0x094:
+ case 0x017: /* Intuos3 2D Mouse */
+ case 0x806: /* Intuos4 Mouse */
+ tool_type = BTN_TOOL_MOUSE;
+ break;
+
+ case 0x096: /* Lens cursor */
+ case 0x097: /* Intuos3 Lens cursor */
+ case 0x006: /* Intuos4 Lens cursor */
+ tool_type = BTN_TOOL_LENS;
+ break;
+
+ case 0x82a: /* Eraser */
+ case 0x85a:
+ case 0x91a:
+ case 0xd1a:
+ case 0x0fa:
+ case 0x82b: /* Intuos3 Grip Pen Eraser */
+ case 0x81b: /* Intuos3 Classic Pen Eraser */
+ case 0x91b: /* Intuos3 Airbrush Eraser */
+ case 0x80c: /* Intuos4/5 13HD/24HD Marker Pen Eraser */
+ case 0x80a: /* Intuos4/5 13HD/24HD General Pen Eraser */
+ case 0x90a: /* Intuos4/5 13HD/24HD Airbrush Eraser */
+ case 0x14080a: /* Intuos4/5 13HD/24HD Classic Pen Eraser */
+ case 0x10090a: /* Intuos4/5 13HD/24HD Airbrush Eraser */
+ case 0x10080c: /* Intuos4/5 13HD/24HD Art Pen Eraser */
+ case 0x16080a: /* Cintiq 13HD Pro Pen Eraser */
+ case 0x18080a: /* DTH2242 Eraser */
+ case 0x10080a: /* Intuos4/5 13HD/24HD General Pen Eraser */
+ tool_type = BTN_TOOL_RUBBER;
+ break;
+
+ case 0xd12:
+ case 0x912:
+ case 0x112:
+ case 0x913: /* Intuos3 Airbrush */
+ case 0x902: /* Intuos4/5 13HD/24HD Airbrush */
+ case 0x100902: /* Intuos4/5 13HD/24HD Airbrush */
+ tool_type = BTN_TOOL_AIRBRUSH;
+ break;
+
+ default: /* Unknown tool */
+ tool_type = BTN_TOOL_PEN;
+ break;
+ }
+ return tool_type;
+}
+
static int wacom_intuos_inout(struct wacom_wac *wacom)
{
struct wacom_features *features = &wacom->features;
unsigned char *data = wacom->data;
struct input_dev *input = wacom->pen_input;
- int idx = 0;
+ int idx = (features->type == INTUOS) ? (data[1] & 0x01) : 0;
- /* tool number */
- if (features->type == INTUOS)
- idx = data[1] & 0x01;
+ if (!(((data[1] & 0xfc) == 0xc0) || /* in prox */
+ ((data[1] & 0xfe) == 0x20) || /* in range */
+ ((data[1] & 0xfe) == 0x80))) /* out prox */
+ return 0;
/* Enter report */
if ((data[1] & 0xfc) == 0xc0) {
@@ -596,116 +682,25 @@ static int wacom_intuos_inout(struct wacom_wac *wacom)
wacom->id[idx] = (data[2] << 4) | (data[3] >> 4) |
((data[7] & 0x0f) << 20) | ((data[8] & 0xf0) << 12);
- switch (wacom->id[idx]) {
- case 0x812: /* Inking pen */
- case 0x801: /* Intuos3 Inking pen */
- case 0x120802: /* Intuos4/5 Inking Pen */
- case 0x012:
- wacom->tool[idx] = BTN_TOOL_PENCIL;
- break;
-
- case 0x822: /* Pen */
- case 0x842:
- case 0x852:
- case 0x823: /* Intuos3 Grip Pen */
- case 0x813: /* Intuos3 Classic Pen */
- case 0x885: /* Intuos3 Marker Pen */
- case 0x802: /* Intuos4/5 13HD/24HD General Pen */
- case 0x804: /* Intuos4/5 13HD/24HD Marker Pen */
- case 0x022:
- case 0x100804: /* Intuos4/5 13HD/24HD Art Pen */
- case 0x140802: /* Intuos4/5 13HD/24HD Classic Pen */
- case 0x160802: /* Cintiq 13HD Pro Pen */
- case 0x180802: /* DTH2242 Pen */
- case 0x100802: /* Intuos4/5 13HD/24HD General Pen */
- wacom->tool[idx] = BTN_TOOL_PEN;
- break;
-
- case 0x832: /* Stroke pen */
- case 0x032:
- wacom->tool[idx] = BTN_TOOL_BRUSH;
- break;
-
- case 0x007: /* Mouse 4D and 2D */
- case 0x09c:
- case 0x094:
- case 0x017: /* Intuos3 2D Mouse */
- case 0x806: /* Intuos4 Mouse */
- wacom->tool[idx] = BTN_TOOL_MOUSE;
- break;
-
- case 0x096: /* Lens cursor */
- case 0x097: /* Intuos3 Lens cursor */
- case 0x006: /* Intuos4 Lens cursor */
- wacom->tool[idx] = BTN_TOOL_LENS;
- break;
-
- case 0x82a: /* Eraser */
- case 0x85a:
- case 0x91a:
- case 0xd1a:
- case 0x0fa:
- case 0x82b: /* Intuos3 Grip Pen Eraser */
- case 0x81b: /* Intuos3 Classic Pen Eraser */
- case 0x91b: /* Intuos3 Airbrush Eraser */
- case 0x80c: /* Intuos4/5 13HD/24HD Marker Pen Eraser */
- case 0x80a: /* Intuos4/5 13HD/24HD General Pen Eraser */
- case 0x90a: /* Intuos4/5 13HD/24HD Airbrush Eraser */
- case 0x14080a: /* Intuos4/5 13HD/24HD Classic Pen Eraser */
- case 0x10090a: /* Intuos4/5 13HD/24HD Airbrush Eraser */
- case 0x10080c: /* Intuos4/5 13HD/24HD Art Pen Eraser */
- case 0x16080a: /* Cintiq 13HD Pro Pen Eraser */
- case 0x18080a: /* DTH2242 Eraser */
- case 0x10080a: /* Intuos4/5 13HD/24HD General Pen Eraser */
- wacom->tool[idx] = BTN_TOOL_RUBBER;
- break;
-
- case 0xd12:
- case 0x912:
- case 0x112:
- case 0x913: /* Intuos3 Airbrush */
- case 0x902: /* Intuos4/5 13HD/24HD Airbrush */
- case 0x100902: /* Intuos4/5 13HD/24HD Airbrush */
- wacom->tool[idx] = BTN_TOOL_AIRBRUSH;
- break;
-
- default: /* Unknown tool */
- wacom->tool[idx] = BTN_TOOL_PEN;
- break;
- }
+ wacom->tool[idx] = wacom_intuos_get_tool_type(wacom->id[idx]);
+
+ wacom->shared->stylus_in_proximity = true;
return 1;
}
- /*
- * don't report events for invalid data
- */
- /* older I4 styli don't work with new Cintiqs */
- if ((!((wacom->id[idx] >> 20) & 0x01) &&
- (features->type == WACOM_21UX2)) ||
- /* Only large Intuos support Lense Cursor */
- (wacom->tool[idx] == BTN_TOOL_LENS &&
- (features->type == INTUOS3 ||
- features->type == INTUOS3S ||
- features->type == INTUOS4 ||
- features->type == INTUOS4S ||
- features->type == INTUOS5 ||
- features->type == INTUOS5S ||
- features->type == INTUOSPM ||
- features->type == INTUOSPS)) ||
- /* Cintiq doesn't send data when RDY bit isn't set */
- (features->type == CINTIQ && !(data[1] & 0x40)))
- return 1;
+ /* in Range */
+ if ((data[1] & 0xfe) == 0x20) {
+ if (features->type != INTUOSHT2)
+ wacom->shared->stylus_in_proximity = true;
- wacom->shared->stylus_in_proximity = true;
- if (wacom->shared->touch_down)
+ /* in Range while exiting */
+ if (wacom->reporting_data) {
+ input_report_key(input, BTN_TOUCH, 0);
+ input_report_abs(input, ABS_PRESSURE, 0);
+ input_report_abs(input, ABS_DISTANCE, wacom->features.distance_max);
+ return 2;
+ }
return 1;
-
- /* in Range while exiting */
- if (((data[1] & 0xfe) == 0x20) && wacom->reporting_data) {
- input_report_key(input, BTN_TOUCH, 0);
- input_report_abs(input, ABS_PRESSURE, 0);
- input_report_abs(input, ABS_DISTANCE, wacom->features.distance_max);
- return 2;
}
/* Exit report */
@@ -750,13 +745,6 @@ static int wacom_intuos_inout(struct wacom_wac *wacom)
return 2;
}
- /* don't report other events if we don't know the ID */
- if (!wacom->id[idx]) {
- /* but reschedule a read of the current tool */
- wacom_intuos_schedule_prox_event(wacom);
- return 1;
- }
-
return 0;
}
@@ -897,6 +885,36 @@ static int wacom_intuos_general(struct wacom_wac *wacom)
data[0] != WACOM_REPORT_INTUOS_PEN)
return 0;
+ if (wacom->shared->touch_down)
+ return 1;
+
+ /* don't report events if we don't know the tool ID */
+ if (!wacom->id[idx]) {
+ /* but reschedule a read of the current tool */
+ wacom_intuos_schedule_prox_event(wacom);
+ return 1;
+ }
+
+ /*
+ * don't report events for invalid data
+ */
+ /* older I4 styli don't work with new Cintiqs */
+ if ((!((wacom->id[idx] >> 20) & 0x01) &&
+ (features->type == WACOM_21UX2)) ||
+ /* Only large Intuos support Lense Cursor */
+ (wacom->tool[idx] == BTN_TOOL_LENS &&
+ (features->type == INTUOS3 ||
+ features->type == INTUOS3S ||
+ features->type == INTUOS4 ||
+ features->type == INTUOS4S ||
+ features->type == INTUOS5 ||
+ features->type == INTUOS5S ||
+ features->type == INTUOSPM ||
+ features->type == INTUOSPS)) ||
+ /* Cintiq doesn't send data when RDY bit isn't set */
+ (features->type == CINTIQ && !(data[1] & 0x40)))
+ return 1;
+
x = (be16_to_cpup((__be16 *)&data[2]) << 1) | ((data[9] >> 1) & 1);
y = (be16_to_cpup((__be16 *)&data[4]) << 1) | (data[9] & 1);
distance = data[9] >> 2;
@@ -2409,6 +2427,17 @@ void wacom_setup_device_quirks(struct wacom *wacom)
}
/*
+ * Hack for the Bamboo One:
+ * the device presents a PAD/Touch interface as most Bamboos and even
+ * sends ghosts PAD data on it. However, later, we must disable this
+ * ghost interface, and we can not detect it unless we set it here
+ * to WACOM_DEVICETYPE_PAD or WACOM_DEVICETYPE_TOUCH.
+ */
+ if (features->type == BAMBOO_PEN &&
+ features->pktlen == WACOM_PKGLEN_BBTOUCH3)
+ features->device_type |= WACOM_DEVICETYPE_PAD;
+
+ /*
* Raw Wacom-mode pen and touch events both come from interface
* 0, whose HID descriptor has an application usage of 0xFF0D
* (i.e., WACOM_VENDORDEFINED_PEN). We route pen packets back
@@ -3367,6 +3396,10 @@ static const struct wacom_features wacom_features_0x33E =
{ "Wacom Intuos PT M 2", 21600, 13500, 2047, 63,
INTUOSHT2, WACOM_INTUOS_RES, WACOM_INTUOS_RES, .touch_max = 16,
.check_for_hid_type = true, .hid_type = HID_TYPE_USBNONE };
+static const struct wacom_features wacom_features_0x343 =
+ { "Wacom DTK1651", 34616, 19559, 1023, 0,
+ DTUS, WACOM_INTUOS_RES, WACOM_INTUOS_RES, 4,
+ WACOM_DTU_OFFSET, WACOM_DTU_OFFSET };
static const struct wacom_features wacom_features_HID_ANY_ID =
{ "Wacom HID", .type = HID_GENERIC };
@@ -3532,6 +3565,7 @@ const struct hid_device_id wacom_ids[] = {
{ USB_DEVICE_WACOM(0x33C) },
{ USB_DEVICE_WACOM(0x33D) },
{ USB_DEVICE_WACOM(0x33E) },
+ { USB_DEVICE_WACOM(0x343) },
{ USB_DEVICE_WACOM(0x4001) },
{ USB_DEVICE_WACOM(0x4004) },
{ USB_DEVICE_WACOM(0x5000) },
diff --git a/drivers/hid/wacom_wac.h b/drivers/hid/wacom_wac.h
index 25baa7f29599..e2084d914c14 100644
--- a/drivers/hid/wacom_wac.h
+++ b/drivers/hid/wacom_wac.h
@@ -84,6 +84,12 @@
#define WACOM_DEVICETYPE_WL_MONITOR 0x0008
#define WACOM_VENDORDEFINED_PEN 0xff0d0001
+#define WACOM_G9_PAGE 0xff090000
+#define WACOM_G9_DIGITIZER (WACOM_G9_PAGE | 0x02)
+#define WACOM_G9_TOUCHSCREEN (WACOM_G9_PAGE | 0x11)
+#define WACOM_G11_PAGE 0xff110000
+#define WACOM_G11_DIGITIZER (WACOM_G11_PAGE | 0x02)
+#define WACOM_G11_TOUCHSCREEN (WACOM_G11_PAGE | 0x11)
#define WACOM_PEN_FIELD(f) (((f)->logical == HID_DG_STYLUS) || \
((f)->physical == HID_DG_STYLUS) || \
@@ -238,6 +244,8 @@ struct wacom_wac {
int ps_connected;
u8 bt_features;
u8 bt_high_speed;
+ int mode_report;
+ int mode_value;
struct hid_data hid_data;
};
diff --git a/drivers/hsi/clients/nokia-modem.c b/drivers/hsi/clients/nokia-modem.c
index 7f82c911ad74..c000780d931f 100644
--- a/drivers/hsi/clients/nokia-modem.c
+++ b/drivers/hsi/clients/nokia-modem.c
@@ -281,6 +281,8 @@ static int nokia_modem_remove(struct device *dev)
#ifdef CONFIG_OF
static const struct of_device_id nokia_modem_of_match[] = {
{ .compatible = "nokia,n900-modem", },
+ { .compatible = "nokia,n950-modem", },
+ { .compatible = "nokia,n9-modem", },
{},
};
MODULE_DEVICE_TABLE(of, nokia_modem_of_match);
diff --git a/drivers/hsi/clients/ssi_protocol.c b/drivers/hsi/clients/ssi_protocol.c
index a38af68cf326..6595d2091268 100644
--- a/drivers/hsi/clients/ssi_protocol.c
+++ b/drivers/hsi/clients/ssi_protocol.c
@@ -521,13 +521,7 @@ static void ssip_start_rx(struct hsi_client *cl)
* high transition. Therefore we need to ignore the sencond UP event.
*/
if ((ssi->main_state != ACTIVE) || (ssi->recv_state == RECV_READY)) {
- if (ssi->main_state == INIT) {
- ssi->main_state = HANDSHAKE;
- spin_unlock(&ssi->lock);
- ssip_send_bootinfo_req_cmd(cl);
- } else {
- spin_unlock(&ssi->lock);
- }
+ spin_unlock(&ssi->lock);
return;
}
ssip_set_rxstate(ssi, RECV_READY);
@@ -671,6 +665,7 @@ static void ssip_rx_bootinforeq(struct hsi_client *cl, u32 cmd)
ssip_error(cl);
/* Fall through */
case INIT:
+ case HANDSHAKE:
spin_lock(&ssi->lock);
ssi->main_state = HANDSHAKE;
if (!ssi->waketest) {
@@ -688,9 +683,6 @@ static void ssip_rx_bootinforeq(struct hsi_client *cl, u32 cmd)
msg->complete = ssip_release_cmd;
hsi_async_write(cl, msg);
break;
- case HANDSHAKE:
- /* Ignore */
- break;
default:
dev_dbg(&cl->device, "Wrong state M(%d)\n", ssi->main_state);
break;
@@ -939,9 +931,11 @@ static int ssip_pn_open(struct net_device *dev)
ssi->waketest = 1;
ssi_waketest(cl, 1); /* FIXME: To be removed */
}
- ssi->main_state = INIT;
+ ssi->main_state = HANDSHAKE;
spin_unlock_bh(&ssi->lock);
+ ssip_send_bootinfo_req_cmd(cl);
+
return 0;
}
diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
index 1161d68a1863..56dd261f7142 100644
--- a/drivers/hv/channel.c
+++ b/drivers/hv/channel.c
@@ -219,6 +219,21 @@ error0:
}
EXPORT_SYMBOL_GPL(vmbus_open);
+/* Used for Hyper-V Socket: a guest client's connect() to the host */
+int vmbus_send_tl_connect_request(const uuid_le *shv_guest_servie_id,
+ const uuid_le *shv_host_servie_id)
+{
+ struct vmbus_channel_tl_connect_request conn_msg;
+
+ memset(&conn_msg, 0, sizeof(conn_msg));
+ conn_msg.header.msgtype = CHANNELMSG_TL_CONNECT_REQUEST;
+ conn_msg.guest_endpoint_id = *shv_guest_servie_id;
+ conn_msg.host_service_id = *shv_host_servie_id;
+
+ return vmbus_post_msg(&conn_msg, sizeof(conn_msg));
+}
+EXPORT_SYMBOL_GPL(vmbus_send_tl_connect_request);
+
/*
* create_gpadl_header - Creates a gpadl for the specified buffer
*/
@@ -624,6 +639,7 @@ int vmbus_sendpacket_ctl(struct vmbus_channel *channel, void *buffer,
u64 aligned_data = 0;
int ret;
bool signal = false;
+ bool lock = channel->acquire_ring_lock;
int num_vecs = ((bufferlen != 0) ? 3 : 1);
@@ -643,7 +659,7 @@ int vmbus_sendpacket_ctl(struct vmbus_channel *channel, void *buffer,
bufferlist[2].iov_len = (packetlen_aligned - packetlen);
ret = hv_ringbuffer_write(&channel->outbound, bufferlist, num_vecs,
- &signal);
+ &signal, lock);
/*
* Signalling the host is conditional on many factors:
@@ -659,6 +675,9 @@ int vmbus_sendpacket_ctl(struct vmbus_channel *channel, void *buffer,
* If we cannot write to the ring-buffer; signal the host
* even if we may not have written anything. This is a rare
* enough condition that it should not matter.
+ * NOTE: in this case, the hvsock channel is an exception, because
+ * it looks the host side's hvsock implementation has a throttling
+ * mechanism which can hurt the performance otherwise.
*/
if (channel->signal_policy)
@@ -666,7 +685,8 @@ int vmbus_sendpacket_ctl(struct vmbus_channel *channel, void *buffer,
else
kick_q = true;
- if (((ret == 0) && kick_q && signal) || (ret))
+ if (((ret == 0) && kick_q && signal) ||
+ (ret && !is_hvsock_channel(channel)))
vmbus_setevent(channel);
return ret;
@@ -719,6 +739,7 @@ int vmbus_sendpacket_pagebuffer_ctl(struct vmbus_channel *channel,
struct kvec bufferlist[3];
u64 aligned_data = 0;
bool signal = false;
+ bool lock = channel->acquire_ring_lock;
if (pagecount > MAX_PAGE_BUFFER_COUNT)
return -EINVAL;
@@ -755,7 +776,8 @@ int vmbus_sendpacket_pagebuffer_ctl(struct vmbus_channel *channel,
bufferlist[2].iov_base = &aligned_data;
bufferlist[2].iov_len = (packetlen_aligned - packetlen);
- ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3, &signal);
+ ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3,
+ &signal, lock);
/*
* Signalling the host is conditional on many factors:
@@ -818,6 +840,7 @@ int vmbus_sendpacket_mpb_desc(struct vmbus_channel *channel,
struct kvec bufferlist[3];
u64 aligned_data = 0;
bool signal = false;
+ bool lock = channel->acquire_ring_lock;
packetlen = desc_size + bufferlen;
packetlen_aligned = ALIGN(packetlen, sizeof(u64));
@@ -837,7 +860,8 @@ int vmbus_sendpacket_mpb_desc(struct vmbus_channel *channel,
bufferlist[2].iov_base = &aligned_data;
bufferlist[2].iov_len = (packetlen_aligned - packetlen);
- ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3, &signal);
+ ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3,
+ &signal, lock);
if (ret == 0 && signal)
vmbus_setevent(channel);
@@ -862,6 +886,7 @@ int vmbus_sendpacket_multipagebuffer(struct vmbus_channel *channel,
struct kvec bufferlist[3];
u64 aligned_data = 0;
bool signal = false;
+ bool lock = channel->acquire_ring_lock;
u32 pfncount = NUM_PAGES_SPANNED(multi_pagebuffer->offset,
multi_pagebuffer->len);
@@ -900,7 +925,8 @@ int vmbus_sendpacket_multipagebuffer(struct vmbus_channel *channel,
bufferlist[2].iov_base = &aligned_data;
bufferlist[2].iov_len = (packetlen_aligned - packetlen);
- ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3, &signal);
+ ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3,
+ &signal, lock);
if (ret == 0 && signal)
vmbus_setevent(channel);
diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c
index 1c1ad47042c5..38b682bab85a 100644
--- a/drivers/hv/channel_mgmt.c
+++ b/drivers/hv/channel_mgmt.c
@@ -28,12 +28,127 @@
#include <linux/list.h>
#include <linux/module.h>
#include <linux/completion.h>
+#include <linux/delay.h>
#include <linux/hyperv.h>
#include "hyperv_vmbus.h"
-static void init_vp_index(struct vmbus_channel *channel,
- const uuid_le *type_guid);
+static void init_vp_index(struct vmbus_channel *channel, u16 dev_type);
+
+static const struct vmbus_device vmbus_devs[] = {
+ /* IDE */
+ { .dev_type = HV_IDE,
+ HV_IDE_GUID,
+ .perf_device = true,
+ },
+
+ /* SCSI */
+ { .dev_type = HV_SCSI,
+ HV_SCSI_GUID,
+ .perf_device = true,
+ },
+
+ /* Fibre Channel */
+ { .dev_type = HV_FC,
+ HV_SYNTHFC_GUID,
+ .perf_device = true,
+ },
+
+ /* Synthetic NIC */
+ { .dev_type = HV_NIC,
+ HV_NIC_GUID,
+ .perf_device = true,
+ },
+
+ /* Network Direct */
+ { .dev_type = HV_ND,
+ HV_ND_GUID,
+ .perf_device = true,
+ },
+
+ /* PCIE */
+ { .dev_type = HV_PCIE,
+ HV_PCIE_GUID,
+ .perf_device = true,
+ },
+
+ /* Synthetic Frame Buffer */
+ { .dev_type = HV_FB,
+ HV_SYNTHVID_GUID,
+ .perf_device = false,
+ },
+
+ /* Synthetic Keyboard */
+ { .dev_type = HV_KBD,
+ HV_KBD_GUID,
+ .perf_device = false,
+ },
+
+ /* Synthetic MOUSE */
+ { .dev_type = HV_MOUSE,
+ HV_MOUSE_GUID,
+ .perf_device = false,
+ },
+
+ /* KVP */
+ { .dev_type = HV_KVP,
+ HV_KVP_GUID,
+ .perf_device = false,
+ },
+
+ /* Time Synch */
+ { .dev_type = HV_TS,
+ HV_TS_GUID,
+ .perf_device = false,
+ },
+
+ /* Heartbeat */
+ { .dev_type = HV_HB,
+ HV_HEART_BEAT_GUID,
+ .perf_device = false,
+ },
+
+ /* Shutdown */
+ { .dev_type = HV_SHUTDOWN,
+ HV_SHUTDOWN_GUID,
+ .perf_device = false,
+ },
+
+ /* File copy */
+ { .dev_type = HV_FCOPY,
+ HV_FCOPY_GUID,
+ .perf_device = false,
+ },
+
+ /* Backup */
+ { .dev_type = HV_BACKUP,
+ HV_VSS_GUID,
+ .perf_device = false,
+ },
+
+ /* Dynamic Memory */
+ { .dev_type = HV_DM,
+ HV_DM_GUID,
+ .perf_device = false,
+ },
+
+ /* Unknown GUID */
+ { .dev_type = HV_UNKOWN,
+ .perf_device = false,
+ },
+};
+
+static u16 hv_get_dev_type(const uuid_le *guid)
+{
+ u16 i;
+
+ for (i = HV_IDE; i < HV_UNKOWN; i++) {
+ if (!uuid_le_cmp(*guid, vmbus_devs[i].guid))
+ return i;
+ }
+ pr_info("Unknown GUID: %pUl\n", guid);
+ return i;
+}
/**
* vmbus_prep_negotiate_resp() - Create default response for Hyper-V Negotiate message
@@ -144,6 +259,7 @@ static struct vmbus_channel *alloc_channel(void)
return NULL;
channel->id = atomic_inc_return(&chan_num);
+ channel->acquire_ring_lock = true;
spin_lock_init(&channel->inbound_lock);
spin_lock_init(&channel->lock);
@@ -195,6 +311,7 @@ void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid)
vmbus_release_relid(relid);
BUG_ON(!channel->rescind);
+ BUG_ON(!mutex_is_locked(&vmbus_connection.channel_mutex));
if (channel->target_cpu != get_cpu()) {
put_cpu();
@@ -206,9 +323,7 @@ void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid)
}
if (channel->primary_channel == NULL) {
- mutex_lock(&vmbus_connection.channel_mutex);
list_del(&channel->listentry);
- mutex_unlock(&vmbus_connection.channel_mutex);
primary_channel = channel;
} else {
@@ -251,6 +366,8 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel)
struct vmbus_channel *channel;
bool fnew = true;
unsigned long flags;
+ u16 dev_type;
+ int ret;
/* Make sure this is a new offer */
mutex_lock(&vmbus_connection.channel_mutex);
@@ -288,7 +405,9 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel)
goto err_free_chan;
}
- init_vp_index(newchannel, &newchannel->offermsg.offer.if_type);
+ dev_type = hv_get_dev_type(&newchannel->offermsg.offer.if_type);
+
+ init_vp_index(newchannel, dev_type);
if (newchannel->target_cpu != get_cpu()) {
put_cpu();
@@ -325,12 +444,17 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel)
if (!newchannel->device_obj)
goto err_deq_chan;
+ newchannel->device_obj->device_id = dev_type;
/*
* Add the new device to the bus. This will kick off device-driver
* binding which eventually invokes the device driver's AddDevice()
* method.
*/
- if (vmbus_device_register(newchannel->device_obj) != 0) {
+ mutex_lock(&vmbus_connection.channel_mutex);
+ ret = vmbus_device_register(newchannel->device_obj);
+ mutex_unlock(&vmbus_connection.channel_mutex);
+
+ if (ret != 0) {
pr_err("unable to add child device object (relid %d)\n",
newchannel->offermsg.child_relid);
kfree(newchannel->device_obj);
@@ -358,37 +482,6 @@ err_free_chan:
free_channel(newchannel);
}
-enum {
- IDE = 0,
- SCSI,
- FC,
- NIC,
- ND_NIC,
- PCIE,
- MAX_PERF_CHN,
-};
-
-/*
- * This is an array of device_ids (device types) that are performance critical.
- * We attempt to distribute the interrupt load for these devices across
- * all available CPUs.
- */
-static const struct hv_vmbus_device_id hp_devs[] = {
- /* IDE */
- { HV_IDE_GUID, },
- /* Storage - SCSI */
- { HV_SCSI_GUID, },
- /* Storage - FC */
- { HV_SYNTHFC_GUID, },
- /* Network */
- { HV_NIC_GUID, },
- /* NetworkDirect Guest RDMA */
- { HV_ND_GUID, },
- /* PCI Express Pass Through */
- { HV_PCIE_GUID, },
-};
-
-
/*
* We use this state to statically distribute the channel interrupt load.
*/
@@ -405,22 +498,15 @@ static int next_numa_node_id;
* For pre-win8 hosts or non-performance critical channels we assign the
* first CPU in the first NUMA node.
*/
-static void init_vp_index(struct vmbus_channel *channel, const uuid_le *type_guid)
+static void init_vp_index(struct vmbus_channel *channel, u16 dev_type)
{
u32 cur_cpu;
- int i;
- bool perf_chn = false;
+ bool perf_chn = vmbus_devs[dev_type].perf_device;
struct vmbus_channel *primary = channel->primary_channel;
int next_node;
struct cpumask available_mask;
struct cpumask *alloced_mask;
- for (i = IDE; i < MAX_PERF_CHN; i++) {
- if (!uuid_le_cmp(*type_guid, hp_devs[i].guid)) {
- perf_chn = true;
- break;
- }
- }
if ((vmbus_proto_version == VERSION_WS2008) ||
(vmbus_proto_version == VERSION_WIN7) || (!perf_chn)) {
/*
@@ -469,6 +555,17 @@ static void init_vp_index(struct vmbus_channel *channel, const uuid_le *type_gui
cpumask_of_node(primary->numa_node));
cur_cpu = -1;
+
+ /*
+ * Normally Hyper-V host doesn't create more subchannels than there
+ * are VCPUs on the node but it is possible when not all present VCPUs
+ * on the node are initialized by guest. Clear the alloced_cpus_in_node
+ * to start over.
+ */
+ if (cpumask_equal(&primary->alloced_cpus_in_node,
+ cpumask_of_node(primary->numa_node)))
+ cpumask_clear(&primary->alloced_cpus_in_node);
+
while (true) {
cur_cpu = cpumask_next(cur_cpu, &available_mask);
if (cur_cpu >= nr_cpu_ids) {
@@ -498,6 +595,32 @@ static void init_vp_index(struct vmbus_channel *channel, const uuid_le *type_gui
channel->target_vp = hv_context.vp_index[cur_cpu];
}
+static void vmbus_wait_for_unload(void)
+{
+ int cpu = smp_processor_id();
+ void *page_addr = hv_context.synic_message_page[cpu];
+ struct hv_message *msg = (struct hv_message *)page_addr +
+ VMBUS_MESSAGE_SINT;
+ struct vmbus_channel_message_header *hdr;
+ bool unloaded = false;
+
+ while (1) {
+ if (READ_ONCE(msg->header.message_type) == HVMSG_NONE) {
+ mdelay(10);
+ continue;
+ }
+
+ hdr = (struct vmbus_channel_message_header *)msg->u.payload;
+ if (hdr->msgtype == CHANNELMSG_UNLOAD_RESPONSE)
+ unloaded = true;
+
+ vmbus_signal_eom(msg);
+
+ if (unloaded)
+ break;
+ }
+}
+
/*
* vmbus_unload_response - Handler for the unload response.
*/
@@ -510,7 +633,7 @@ static void vmbus_unload_response(struct vmbus_channel_message_header *hdr)
complete(&vmbus_connection.unload_event);
}
-void vmbus_initiate_unload(void)
+void vmbus_initiate_unload(bool crash)
{
struct vmbus_channel_message_header hdr;
@@ -523,7 +646,14 @@ void vmbus_initiate_unload(void)
hdr.msgtype = CHANNELMSG_UNLOAD;
vmbus_post_msg(&hdr, sizeof(struct vmbus_channel_message_header));
- wait_for_completion(&vmbus_connection.unload_event);
+ /*
+ * vmbus_initiate_unload() is also called on crash and the crash can be
+ * happening in an interrupt context, where scheduling is impossible.
+ */
+ if (!crash)
+ wait_for_completion(&vmbus_connection.unload_event);
+ else
+ vmbus_wait_for_unload();
}
/*
@@ -592,6 +722,8 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr)
struct device *dev;
rescind = (struct vmbus_channel_rescind_offer *)hdr;
+
+ mutex_lock(&vmbus_connection.channel_mutex);
channel = relid2channel(rescind->child_relid);
if (channel == NULL) {
@@ -600,7 +732,7 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr)
* vmbus_process_offer(), we have already invoked
* vmbus_release_relid() on error.
*/
- return;
+ goto out;
}
spin_lock_irqsave(&channel->lock, flags);
@@ -608,6 +740,10 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr)
spin_unlock_irqrestore(&channel->lock, flags);
if (channel->device_obj) {
+ if (channel->chn_rescind_callback) {
+ channel->chn_rescind_callback(channel);
+ goto out;
+ }
/*
* We will have to unregister this device from the
* driver core.
@@ -621,7 +757,24 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr)
hv_process_channel_removal(channel,
channel->offermsg.child_relid);
}
+
+out:
+ mutex_unlock(&vmbus_connection.channel_mutex);
+}
+
+void vmbus_hvsock_device_unregister(struct vmbus_channel *channel)
+{
+ mutex_lock(&vmbus_connection.channel_mutex);
+
+ BUG_ON(!is_hvsock_channel(channel));
+
+ channel->rescind = true;
+ vmbus_device_unregister(channel->device_obj);
+
+ mutex_unlock(&vmbus_connection.channel_mutex);
}
+EXPORT_SYMBOL_GPL(vmbus_hvsock_device_unregister);
+
/*
* vmbus_onoffers_delivered -
@@ -825,6 +978,10 @@ struct vmbus_channel_message_table_entry
{CHANNELMSG_VERSION_RESPONSE, 1, vmbus_onversion_response},
{CHANNELMSG_UNLOAD, 0, NULL},
{CHANNELMSG_UNLOAD_RESPONSE, 1, vmbus_unload_response},
+ {CHANNELMSG_18, 0, NULL},
+ {CHANNELMSG_19, 0, NULL},
+ {CHANNELMSG_20, 0, NULL},
+ {CHANNELMSG_TL_CONNECT_REQUEST, 0, NULL},
};
/*
@@ -973,3 +1130,10 @@ bool vmbus_are_subchannels_present(struct vmbus_channel *primary)
return ret;
}
EXPORT_SYMBOL_GPL(vmbus_are_subchannels_present);
+
+void vmbus_set_chn_rescind_callback(struct vmbus_channel *channel,
+ void (*chn_rescind_cb)(struct vmbus_channel *))
+{
+ channel->chn_rescind_callback = chn_rescind_cb;
+}
+EXPORT_SYMBOL_GPL(vmbus_set_chn_rescind_callback);
diff --git a/drivers/hv/connection.c b/drivers/hv/connection.c
index 3dc5a9c7fad6..d02f1373dd98 100644
--- a/drivers/hv/connection.c
+++ b/drivers/hv/connection.c
@@ -88,8 +88,16 @@ static int vmbus_negotiate_version(struct vmbus_channel_msginfo *msginfo,
* This has been the behavior pre-win8. This is not
* perf issue and having all channel messages delivered on CPU 0
* would be ok.
+ * For post win8 hosts, we support receiving channel messagges on
+ * all the CPUs. This is needed for kexec to work correctly where
+ * the CPU attempting to connect may not be CPU 0.
*/
- msg->target_vcpu = 0;
+ if (version >= VERSION_WIN8_1) {
+ msg->target_vcpu = hv_context.vp_index[get_cpu()];
+ put_cpu();
+ } else {
+ msg->target_vcpu = 0;
+ }
/*
* Add to list before we send the request since we may
@@ -236,7 +244,7 @@ void vmbus_disconnect(void)
/*
* First send the unload request to the host.
*/
- vmbus_initiate_unload();
+ vmbus_initiate_unload(false);
if (vmbus_connection.work_queue) {
drain_workqueue(vmbus_connection.work_queue);
@@ -288,7 +296,8 @@ struct vmbus_channel *relid2channel(u32 relid)
struct list_head *cur, *tmp;
struct vmbus_channel *cur_sc;
- mutex_lock(&vmbus_connection.channel_mutex);
+ BUG_ON(!mutex_is_locked(&vmbus_connection.channel_mutex));
+
list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) {
if (channel->offermsg.child_relid == relid) {
found_channel = channel;
@@ -307,7 +316,6 @@ struct vmbus_channel *relid2channel(u32 relid)
}
}
}
- mutex_unlock(&vmbus_connection.channel_mutex);
return found_channel;
}
@@ -474,7 +482,7 @@ int vmbus_post_msg(void *buffer, size_t buflen)
/*
* vmbus_set_event - Send an event notification to the parent
*/
-int vmbus_set_event(struct vmbus_channel *channel)
+void vmbus_set_event(struct vmbus_channel *channel)
{
u32 child_relid = channel->offermsg.child_relid;
@@ -485,5 +493,5 @@ int vmbus_set_event(struct vmbus_channel *channel)
(child_relid >> 5));
}
- return hv_signal_event(channel->sig_event);
+ hv_do_hypercall(HVCALL_SIGNAL_EVENT, channel->sig_event, NULL);
}
diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
index 11bca51ef5ff..a1c086ba3b9a 100644
--- a/drivers/hv/hv.c
+++ b/drivers/hv/hv.c
@@ -204,6 +204,8 @@ int hv_init(void)
sizeof(int) * NR_CPUS);
memset(hv_context.event_dpc, 0,
sizeof(void *) * NR_CPUS);
+ memset(hv_context.msg_dpc, 0,
+ sizeof(void *) * NR_CPUS);
memset(hv_context.clk_evt, 0,
sizeof(void *) * NR_CPUS);
@@ -295,8 +297,14 @@ void hv_cleanup(void)
* Cleanup the TSC page based CS.
*/
if (ms_hyperv.features & HV_X64_MSR_REFERENCE_TSC_AVAILABLE) {
- clocksource_change_rating(&hyperv_cs_tsc, 10);
- clocksource_unregister(&hyperv_cs_tsc);
+ /*
+ * Crash can happen in an interrupt context and unregistering
+ * a clocksource is impossible and redundant in this case.
+ */
+ if (!oops_in_progress) {
+ clocksource_change_rating(&hyperv_cs_tsc, 10);
+ clocksource_unregister(&hyperv_cs_tsc);
+ }
hypercall_msr.as_uint64 = 0;
wrmsrl(HV_X64_MSR_REFERENCE_TSC, hypercall_msr.as_uint64);
@@ -337,22 +345,6 @@ int hv_post_message(union hv_connection_id connection_id,
return status & 0xFFFF;
}
-
-/*
- * hv_signal_event -
- * Signal an event on the specified connection using the hypervisor event IPC.
- *
- * This involves a hypercall.
- */
-int hv_signal_event(void *con_id)
-{
- u64 status;
-
- status = hv_do_hypercall(HVCALL_SIGNAL_EVENT, con_id, NULL);
-
- return status & 0xFFFF;
-}
-
static int hv_ce_set_next_event(unsigned long delta,
struct clock_event_device *evt)
{
@@ -425,6 +417,13 @@ int hv_synic_alloc(void)
}
tasklet_init(hv_context.event_dpc[cpu], vmbus_on_event, cpu);
+ hv_context.msg_dpc[cpu] = kmalloc(size, GFP_ATOMIC);
+ if (hv_context.msg_dpc[cpu] == NULL) {
+ pr_err("Unable to allocate event dpc\n");
+ goto err;
+ }
+ tasklet_init(hv_context.msg_dpc[cpu], vmbus_on_msg_dpc, cpu);
+
hv_context.clk_evt[cpu] = kzalloc(ced_size, GFP_ATOMIC);
if (hv_context.clk_evt[cpu] == NULL) {
pr_err("Unable to allocate clock event device\n");
@@ -466,6 +465,7 @@ err:
static void hv_synic_free_cpu(int cpu)
{
kfree(hv_context.event_dpc[cpu]);
+ kfree(hv_context.msg_dpc[cpu]);
kfree(hv_context.clk_evt[cpu]);
if (hv_context.synic_event_page[cpu])
free_page((unsigned long)hv_context.synic_event_page[cpu]);
diff --git a/drivers/hv/hv_fcopy.c b/drivers/hv/hv_fcopy.c
index c37a71e13de0..23c70799ad8a 100644
--- a/drivers/hv/hv_fcopy.c
+++ b/drivers/hv/hv_fcopy.c
@@ -251,7 +251,6 @@ void hv_fcopy_onchannelcallback(void *context)
*/
fcopy_transaction.recv_len = recvlen;
- fcopy_transaction.recv_channel = channel;
fcopy_transaction.recv_req_id = requestid;
fcopy_transaction.fcopy_msg = fcopy_msg;
@@ -317,6 +316,7 @@ static void fcopy_on_reset(void)
int hv_fcopy_init(struct hv_util_service *srv)
{
recv_buffer = srv->recv_buffer;
+ fcopy_transaction.recv_channel = srv->channel;
/*
* When this driver loads, the user level daemon that
diff --git a/drivers/hv/hv_kvp.c b/drivers/hv/hv_kvp.c
index d4ab81bcd515..9b9b370fe22a 100644
--- a/drivers/hv/hv_kvp.c
+++ b/drivers/hv/hv_kvp.c
@@ -639,7 +639,6 @@ void hv_kvp_onchannelcallback(void *context)
*/
kvp_transaction.recv_len = recvlen;
- kvp_transaction.recv_channel = channel;
kvp_transaction.recv_req_id = requestid;
kvp_transaction.kvp_msg = kvp_msg;
@@ -688,6 +687,7 @@ int
hv_kvp_init(struct hv_util_service *srv)
{
recv_buffer = srv->recv_buffer;
+ kvp_transaction.recv_channel = srv->channel;
/*
* When this driver loads, the user level daemon that
diff --git a/drivers/hv/hv_snapshot.c b/drivers/hv/hv_snapshot.c
index 67def4a831c8..3fba14e88f03 100644
--- a/drivers/hv/hv_snapshot.c
+++ b/drivers/hv/hv_snapshot.c
@@ -263,7 +263,6 @@ void hv_vss_onchannelcallback(void *context)
*/
vss_transaction.recv_len = recvlen;
- vss_transaction.recv_channel = channel;
vss_transaction.recv_req_id = requestid;
vss_transaction.msg = (struct hv_vss_msg *)vss_msg;
@@ -337,6 +336,7 @@ hv_vss_init(struct hv_util_service *srv)
return -ENOTSUPP;
}
recv_buffer = srv->recv_buffer;
+ vss_transaction.recv_channel = srv->channel;
/*
* When this driver loads, the user level daemon that
diff --git a/drivers/hv/hv_util.c b/drivers/hv/hv_util.c
index 7994ec2e4151..d5acaa2d8e61 100644
--- a/drivers/hv/hv_util.c
+++ b/drivers/hv/hv_util.c
@@ -322,6 +322,7 @@ static int util_probe(struct hv_device *dev,
srv->recv_buffer = kmalloc(PAGE_SIZE * 4, GFP_KERNEL);
if (!srv->recv_buffer)
return -ENOMEM;
+ srv->channel = dev->channel;
if (srv->util_init) {
ret = srv->util_init(srv);
if (ret) {
diff --git a/drivers/hv/hv_utils_transport.c b/drivers/hv/hv_utils_transport.c
index 4f42c0e20c20..9a9983fa4531 100644
--- a/drivers/hv/hv_utils_transport.c
+++ b/drivers/hv/hv_utils_transport.c
@@ -310,6 +310,9 @@ struct hvutil_transport *hvutil_transport_init(const char *name,
return hvt;
err_free_hvt:
+ spin_lock(&hvt_list_lock);
+ list_del(&hvt->list);
+ spin_unlock(&hvt_list_lock);
kfree(hvt);
return NULL;
}
diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
index 4ebc796b4f33..12321b93a756 100644
--- a/drivers/hv/hyperv_vmbus.h
+++ b/drivers/hv/hyperv_vmbus.h
@@ -256,12 +256,6 @@ struct hv_monitor_page {
u8 rsvdz4[1984];
};
-/* Declare the various hypercall operations. */
-enum hv_call_code {
- HVCALL_POST_MESSAGE = 0x005c,
- HVCALL_SIGNAL_EVENT = 0x005d,
-};
-
/* Definition of the hv_post_message hypercall input structure. */
struct hv_input_post_message {
union hv_connection_id connectionid;
@@ -449,10 +443,11 @@ struct hv_context {
u32 vp_index[NR_CPUS];
/*
* Starting with win8, we can take channel interrupts on any CPU;
- * we will manage the tasklet that handles events on a per CPU
+ * we will manage the tasklet that handles events messages on a per CPU
* basis.
*/
struct tasklet_struct *event_dpc[NR_CPUS];
+ struct tasklet_struct *msg_dpc[NR_CPUS];
/*
* To optimize the mapping of relid to channel, maintain
* per-cpu list of the channels based on their CPU affinity.
@@ -501,8 +496,6 @@ extern int hv_post_message(union hv_connection_id connection_id,
enum hv_message_type message_type,
void *payload, size_t payload_size);
-extern int hv_signal_event(void *con_id);
-
extern int hv_synic_alloc(void);
extern void hv_synic_free(void);
@@ -531,7 +524,7 @@ void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info);
int hv_ringbuffer_write(struct hv_ring_buffer_info *ring_info,
struct kvec *kv_list,
- u32 kv_count, bool *signal);
+ u32 kv_count, bool *signal, bool lock);
int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info,
void *buffer, u32 buflen, u32 *buffer_actual_len,
@@ -626,6 +619,30 @@ struct vmbus_channel_message_table_entry {
extern struct vmbus_channel_message_table_entry
channel_message_table[CHANNELMSG_COUNT];
+/* Free the message slot and signal end-of-message if required */
+static inline void vmbus_signal_eom(struct hv_message *msg)
+{
+ msg->header.message_type = HVMSG_NONE;
+
+ /*
+ * Make sure the write to MessageType (ie set to
+ * HVMSG_NONE) happens before we read the
+ * MessagePending and EOMing. Otherwise, the EOMing
+ * will not deliver any more messages since there is
+ * no empty slot
+ */
+ mb();
+
+ if (msg->header.message_flags.msg_pending) {
+ /*
+ * This will cause message queue rescan to
+ * possibly deliver another msg from the
+ * hypervisor
+ */
+ wrmsrl(HV_X64_MSR_EOM, 0);
+ }
+}
+
/* General vmbus interface */
struct hv_device *vmbus_device_create(const uuid_le *type,
@@ -650,9 +667,10 @@ void vmbus_disconnect(void);
int vmbus_post_msg(void *buffer, size_t buflen);
-int vmbus_set_event(struct vmbus_channel *channel);
+void vmbus_set_event(struct vmbus_channel *channel);
void vmbus_on_event(unsigned long data);
+void vmbus_on_msg_dpc(unsigned long data);
int hv_kvp_init(struct hv_util_service *);
void hv_kvp_deinit(void);
@@ -665,7 +683,7 @@ void hv_vss_onchannelcallback(void *);
int hv_fcopy_init(struct hv_util_service *);
void hv_fcopy_deinit(void);
void hv_fcopy_onchannelcallback(void *);
-void vmbus_initiate_unload(void);
+void vmbus_initiate_unload(bool crash);
static inline void hv_poll_channel(struct vmbus_channel *channel,
void (*cb)(void *))
diff --git a/drivers/hv/ring_buffer.c b/drivers/hv/ring_buffer.c
index b53702ce692f..a40a73a7b71d 100644
--- a/drivers/hv/ring_buffer.c
+++ b/drivers/hv/ring_buffer.c
@@ -103,15 +103,29 @@ static bool hv_need_to_signal(u32 old_write, struct hv_ring_buffer_info *rbi)
* there is room for the producer to send the pending packet.
*/
-static bool hv_need_to_signal_on_read(u32 prev_write_sz,
- struct hv_ring_buffer_info *rbi)
+static bool hv_need_to_signal_on_read(struct hv_ring_buffer_info *rbi)
{
u32 cur_write_sz;
u32 r_size;
- u32 write_loc = rbi->ring_buffer->write_index;
+ u32 write_loc;
u32 read_loc = rbi->ring_buffer->read_index;
- u32 pending_sz = rbi->ring_buffer->pending_send_sz;
+ u32 pending_sz;
+ /*
+ * Issue a full memory barrier before making the signaling decision.
+ * Here is the reason for having this barrier:
+ * If the reading of the pend_sz (in this function)
+ * were to be reordered and read before we commit the new read
+ * index (in the calling function) we could
+ * have a problem. If the host were to set the pending_sz after we
+ * have sampled pending_sz and go to sleep before we commit the
+ * read index, we could miss sending the interrupt. Issue a full
+ * memory barrier to address this.
+ */
+ mb();
+
+ pending_sz = rbi->ring_buffer->pending_send_sz;
+ write_loc = rbi->ring_buffer->write_index;
/* If the other end is not blocked on write don't bother. */
if (pending_sz == 0)
return false;
@@ -120,7 +134,7 @@ static bool hv_need_to_signal_on_read(u32 prev_write_sz,
cur_write_sz = write_loc >= read_loc ? r_size - (write_loc - read_loc) :
read_loc - write_loc;
- if ((prev_write_sz < pending_sz) && (cur_write_sz >= pending_sz))
+ if (cur_write_sz >= pending_sz)
return true;
return false;
@@ -314,7 +328,7 @@ void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info)
/* Write to the ring buffer. */
int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info,
- struct kvec *kv_list, u32 kv_count, bool *signal)
+ struct kvec *kv_list, u32 kv_count, bool *signal, bool lock)
{
int i = 0;
u32 bytes_avail_towrite;
@@ -324,14 +338,15 @@ int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info,
u32 next_write_location;
u32 old_write;
u64 prev_indices = 0;
- unsigned long flags;
+ unsigned long flags = 0;
for (i = 0; i < kv_count; i++)
totalbytes_towrite += kv_list[i].iov_len;
totalbytes_towrite += sizeof(u64);
- spin_lock_irqsave(&outring_info->ring_lock, flags);
+ if (lock)
+ spin_lock_irqsave(&outring_info->ring_lock, flags);
hv_get_ringbuffer_availbytes(outring_info,
&bytes_avail_toread,
@@ -343,7 +358,8 @@ int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info,
* is empty since the read index == write index.
*/
if (bytes_avail_towrite <= totalbytes_towrite) {
- spin_unlock_irqrestore(&outring_info->ring_lock, flags);
+ if (lock)
+ spin_unlock_irqrestore(&outring_info->ring_lock, flags);
return -EAGAIN;
}
@@ -374,7 +390,8 @@ int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info,
hv_set_next_write_location(outring_info, next_write_location);
- spin_unlock_irqrestore(&outring_info->ring_lock, flags);
+ if (lock)
+ spin_unlock_irqrestore(&outring_info->ring_lock, flags);
*signal = hv_need_to_signal(old_write, outring_info);
return 0;
@@ -388,7 +405,6 @@ int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info,
u32 bytes_avail_toread;
u32 next_read_location = 0;
u64 prev_indices = 0;
- unsigned long flags;
struct vmpacket_descriptor desc;
u32 offset;
u32 packetlen;
@@ -397,7 +413,6 @@ int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info,
if (buflen <= 0)
return -EINVAL;
- spin_lock_irqsave(&inring_info->ring_lock, flags);
*buffer_actual_len = 0;
*requestid = 0;
@@ -412,7 +427,7 @@ int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info,
* No error is set when there is even no header, drivers are
* supposed to analyze buffer_actual_len.
*/
- goto out_unlock;
+ return ret;
}
next_read_location = hv_get_next_read_location(inring_info);
@@ -425,15 +440,11 @@ int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info,
*buffer_actual_len = packetlen;
*requestid = desc.trans_id;
- if (bytes_avail_toread < packetlen + offset) {
- ret = -EAGAIN;
- goto out_unlock;
- }
+ if (bytes_avail_toread < packetlen + offset)
+ return -EAGAIN;
- if (packetlen > buflen) {
- ret = -ENOBUFS;
- goto out_unlock;
- }
+ if (packetlen > buflen)
+ return -ENOBUFS;
next_read_location =
hv_get_next_readlocation_withoffset(inring_info, offset);
@@ -458,9 +469,7 @@ int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info,
/* Update the read index */
hv_set_next_read_location(inring_info, next_read_location);
- *signal = hv_need_to_signal_on_read(bytes_avail_towrite, inring_info);
+ *signal = hv_need_to_signal_on_read(inring_info);
-out_unlock:
- spin_unlock_irqrestore(&inring_info->ring_lock, flags);
return ret;
}
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index 328e4c3808e0..64713ff47e36 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -45,7 +45,6 @@
static struct acpi_device *hv_acpi_dev;
-static struct tasklet_struct msg_dpc;
static struct completion probe_event;
@@ -477,6 +476,24 @@ static ssize_t channel_vp_mapping_show(struct device *dev,
}
static DEVICE_ATTR_RO(channel_vp_mapping);
+static ssize_t vendor_show(struct device *dev,
+ struct device_attribute *dev_attr,
+ char *buf)
+{
+ struct hv_device *hv_dev = device_to_hv_device(dev);
+ return sprintf(buf, "0x%x\n", hv_dev->vendor_id);
+}
+static DEVICE_ATTR_RO(vendor);
+
+static ssize_t device_show(struct device *dev,
+ struct device_attribute *dev_attr,
+ char *buf)
+{
+ struct hv_device *hv_dev = device_to_hv_device(dev);
+ return sprintf(buf, "0x%x\n", hv_dev->device_id);
+}
+static DEVICE_ATTR_RO(device);
+
/* Set up per device attributes in /sys/bus/vmbus/devices/<bus device> */
static struct attribute *vmbus_attrs[] = {
&dev_attr_id.attr,
@@ -502,6 +519,8 @@ static struct attribute *vmbus_attrs[] = {
&dev_attr_in_read_bytes_avail.attr,
&dev_attr_in_write_bytes_avail.attr,
&dev_attr_channel_vp_mapping.attr,
+ &dev_attr_vendor.attr,
+ &dev_attr_device.attr,
NULL,
};
ATTRIBUTE_GROUPS(vmbus);
@@ -562,6 +581,10 @@ static int vmbus_match(struct device *device, struct device_driver *driver)
struct hv_driver *drv = drv_to_hv_drv(driver);
struct hv_device *hv_dev = device_to_hv_device(device);
+ /* The hv_sock driver handles all hv_sock offers. */
+ if (is_hvsock_channel(hv_dev->channel))
+ return drv->hvsock;
+
if (hv_vmbus_get_id(drv->id_table, &hv_dev->dev_type))
return 1;
@@ -685,28 +708,10 @@ static void hv_process_timer_expiration(struct hv_message *msg, int cpu)
if (dev->event_handler)
dev->event_handler(dev);
- msg->header.message_type = HVMSG_NONE;
-
- /*
- * Make sure the write to MessageType (ie set to
- * HVMSG_NONE) happens before we read the
- * MessagePending and EOMing. Otherwise, the EOMing
- * will not deliver any more messages since there is
- * no empty slot
- */
- mb();
-
- if (msg->header.message_flags.msg_pending) {
- /*
- * This will cause message queue rescan to
- * possibly deliver another msg from the
- * hypervisor
- */
- wrmsrl(HV_X64_MSR_EOM, 0);
- }
+ vmbus_signal_eom(msg);
}
-static void vmbus_on_msg_dpc(unsigned long data)
+void vmbus_on_msg_dpc(unsigned long data)
{
int cpu = smp_processor_id();
void *page_addr = hv_context.synic_message_page[cpu];
@@ -716,52 +721,32 @@ static void vmbus_on_msg_dpc(unsigned long data)
struct vmbus_channel_message_table_entry *entry;
struct onmessage_work_context *ctx;
- while (1) {
- if (msg->header.message_type == HVMSG_NONE)
- /* no msg */
- break;
+ if (msg->header.message_type == HVMSG_NONE)
+ /* no msg */
+ return;
- hdr = (struct vmbus_channel_message_header *)msg->u.payload;
+ hdr = (struct vmbus_channel_message_header *)msg->u.payload;
- if (hdr->msgtype >= CHANNELMSG_COUNT) {
- WARN_ONCE(1, "unknown msgtype=%d\n", hdr->msgtype);
- goto msg_handled;
- }
+ if (hdr->msgtype >= CHANNELMSG_COUNT) {
+ WARN_ONCE(1, "unknown msgtype=%d\n", hdr->msgtype);
+ goto msg_handled;
+ }
- entry = &channel_message_table[hdr->msgtype];
- if (entry->handler_type == VMHT_BLOCKING) {
- ctx = kmalloc(sizeof(*ctx), GFP_ATOMIC);
- if (ctx == NULL)
- continue;
+ entry = &channel_message_table[hdr->msgtype];
+ if (entry->handler_type == VMHT_BLOCKING) {
+ ctx = kmalloc(sizeof(*ctx), GFP_ATOMIC);
+ if (ctx == NULL)
+ return;
- INIT_WORK(&ctx->work, vmbus_onmessage_work);
- memcpy(&ctx->msg, msg, sizeof(*msg));
+ INIT_WORK(&ctx->work, vmbus_onmessage_work);
+ memcpy(&ctx->msg, msg, sizeof(*msg));
- queue_work(vmbus_connection.work_queue, &ctx->work);
- } else
- entry->message_handler(hdr);
+ queue_work(vmbus_connection.work_queue, &ctx->work);
+ } else
+ entry->message_handler(hdr);
msg_handled:
- msg->header.message_type = HVMSG_NONE;
-
- /*
- * Make sure the write to MessageType (ie set to
- * HVMSG_NONE) happens before we read the
- * MessagePending and EOMing. Otherwise, the EOMing
- * will not deliver any more messages since there is
- * no empty slot
- */
- mb();
-
- if (msg->header.message_flags.msg_pending) {
- /*
- * This will cause message queue rescan to
- * possibly deliver another msg from the
- * hypervisor
- */
- wrmsrl(HV_X64_MSR_EOM, 0);
- }
- }
+ vmbus_signal_eom(msg);
}
static void vmbus_isr(void)
@@ -814,7 +799,7 @@ static void vmbus_isr(void)
if (msg->header.message_type == HVMSG_TIMER_EXPIRED)
hv_process_timer_expiration(msg, cpu);
else
- tasklet_schedule(&msg_dpc);
+ tasklet_schedule(hv_context.msg_dpc[cpu]);
}
}
@@ -838,8 +823,6 @@ static int vmbus_bus_init(void)
return ret;
}
- tasklet_init(&msg_dpc, vmbus_on_msg_dpc, 0);
-
ret = bus_register(&hv_bus);
if (ret)
goto err_cleanup;
@@ -957,6 +940,7 @@ struct hv_device *vmbus_device_create(const uuid_le *type,
memcpy(&child_device_obj->dev_type, type, sizeof(uuid_le));
memcpy(&child_device_obj->dev_instance, instance,
sizeof(uuid_le));
+ child_device_obj->vendor_id = 0x1414; /* MSFT vendor ID */
return child_device_obj;
@@ -1268,7 +1252,7 @@ static void hv_kexec_handler(void)
int cpu;
hv_synic_clockevents_cleanup();
- vmbus_initiate_unload();
+ vmbus_initiate_unload(false);
for_each_online_cpu(cpu)
smp_call_function_single(cpu, hv_synic_cleanup, NULL, 1);
hv_cleanup();
@@ -1276,7 +1260,7 @@ static void hv_kexec_handler(void)
static void hv_crash_handler(struct pt_regs *regs)
{
- vmbus_initiate_unload();
+ vmbus_initiate_unload(true);
/*
* In crash handler we can't schedule synic cleanup for all CPUs,
* doing the cleanup for current CPU only. This should be sufficient
@@ -1334,7 +1318,8 @@ static void __exit vmbus_exit(void)
hv_synic_clockevents_cleanup();
vmbus_disconnect();
hv_remove_vmbus_irq();
- tasklet_kill(&msg_dpc);
+ for_each_online_cpu(cpu)
+ tasklet_kill(hv_context.msg_dpc[cpu]);
vmbus_free_channels();
if (ms_hyperv.misc_features & HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE) {
unregister_die_notifier(&hyperv_die_block);
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 60fb80bd353d..5c2d13a687aa 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -685,6 +685,20 @@ config SENSORS_LTC2945
This driver can also be built as a module. If so, the module will
be called ltc2945.
+config SENSORS_LTC2990
+ tristate "Linear Technology LTC2990 (current monitoring mode only)"
+ depends on I2C
+ help
+ If you say yes here you get support for Linear Technology LTC2990
+ I2C System Monitor. The LTC2990 supports a combination of voltage,
+ current and temperature monitoring, but in addition to the Vcc supply
+ voltage and chip temperature, this driver currently only supports
+ reading two currents by measuring two differential voltages across
+ series resistors.
+
+ This driver can also be built as a module. If so, the module will
+ be called ltc2990.
+
config SENSORS_LTC4151
tristate "Linear Technology LTC4151"
depends on I2C
@@ -1127,7 +1141,7 @@ config SENSORS_NTC_THERMISTOR
Currently, this driver supports
NCP15WB473, NCP18WB473, NCP21WB473, NCP03WB473, NCP15WL333,
- and NCP03WF104 from Murata and B57330V2103 from EPCOS.
+ NCP03WF104 and NCP15XH103 from Murata and B57330V2103 from EPCOS.
This driver can also be built as a module. If so, the module
will be called ntc-thermistor.
@@ -1176,6 +1190,21 @@ config SENSORS_NCT7904
This driver can also be built as a module. If so, the module
will be called nct7904.
+config SENSORS_NSA320
+ tristate "ZyXEL NSA320 and compatible fan speed and temperature sensors"
+ depends on GPIOLIB && OF
+ depends on MACH_KIRKWOOD || COMPILE_TEST
+ help
+ If you say yes here you get support for hardware monitoring
+ for the ZyXEL NSA320 Media Server and other compatible devices
+ (probably the NSA325 and some NSA310 variants).
+
+ The sensor data is taken from a Holtek HT46R065 microcontroller
+ connected to GPIO lines.
+
+ This driver can also be built as a module. If so, the module
+ will be called nsa320-hwmon.
+
config SENSORS_PCF8591
tristate "Philips PCF8591 ADC/DAC"
depends on I2C
diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile
index 30c94df31465..58cc3acba7e7 100644
--- a/drivers/hwmon/Makefile
+++ b/drivers/hwmon/Makefile
@@ -100,6 +100,7 @@ obj-$(CONFIG_SENSORS_LM95234) += lm95234.o
obj-$(CONFIG_SENSORS_LM95241) += lm95241.o
obj-$(CONFIG_SENSORS_LM95245) += lm95245.o
obj-$(CONFIG_SENSORS_LTC2945) += ltc2945.o
+obj-$(CONFIG_SENSORS_LTC2990) += ltc2990.o
obj-$(CONFIG_SENSORS_LTC4151) += ltc4151.o
obj-$(CONFIG_SENSORS_LTC4215) += ltc4215.o
obj-$(CONFIG_SENSORS_LTC4222) += ltc4222.o
@@ -123,6 +124,7 @@ obj-$(CONFIG_SENSORS_NCT6683) += nct6683.o
obj-$(CONFIG_SENSORS_NCT6775) += nct6775.o
obj-$(CONFIG_SENSORS_NCT7802) += nct7802.o
obj-$(CONFIG_SENSORS_NCT7904) += nct7904.o
+obj-$(CONFIG_SENSORS_NSA320) += nsa320-hwmon.o
obj-$(CONFIG_SENSORS_NTC_THERMISTOR) += ntc_thermistor.o
obj-$(CONFIG_SENSORS_PC87360) += pc87360.o
obj-$(CONFIG_SENSORS_PC87427) += pc87427.o
@@ -149,7 +151,7 @@ obj-$(CONFIG_SENSORS_TMP103) += tmp103.o
obj-$(CONFIG_SENSORS_TMP401) += tmp401.o
obj-$(CONFIG_SENSORS_TMP421) += tmp421.o
obj-$(CONFIG_SENSORS_TWL4030_MADC)+= twl4030-madc-hwmon.o
-obj-$(CONFIG_SENSORS_VEXPRESS) += vexpress.o
+obj-$(CONFIG_SENSORS_VEXPRESS) += vexpress-hwmon.o
obj-$(CONFIG_SENSORS_VIA_CPUTEMP)+= via-cputemp.o
obj-$(CONFIG_SENSORS_VIA686A) += via686a.o
obj-$(CONFIG_SENSORS_VT1211) += vt1211.o
diff --git a/drivers/hwmon/iio_hwmon.c b/drivers/hwmon/iio_hwmon.c
index 17ae2eb26ce2..b550ba5fa58a 100644
--- a/drivers/hwmon/iio_hwmon.c
+++ b/drivers/hwmon/iio_hwmon.c
@@ -67,6 +67,7 @@ static int iio_hwmon_probe(struct platform_device *pdev)
enum iio_chan_type type;
struct iio_channel *channels;
const char *name = "iio_hwmon";
+ char *sname;
if (dev->of_node && dev->of_node->name)
name = dev->of_node->name;
@@ -144,7 +145,15 @@ static int iio_hwmon_probe(struct platform_device *pdev)
st->attr_group.attrs = st->attrs;
st->groups[0] = &st->attr_group;
- st->hwmon_dev = hwmon_device_register_with_groups(dev, name, st,
+
+ sname = devm_kstrdup(dev, name, GFP_KERNEL);
+ if (!sname) {
+ ret = -ENOMEM;
+ goto error_release_channels;
+ }
+
+ strreplace(sname, '-', '_');
+ st->hwmon_dev = hwmon_device_register_with_groups(dev, sname, st,
st->groups);
if (IS_ERR(st->hwmon_dev)) {
ret = PTR_ERR(st->hwmon_dev);
diff --git a/drivers/hwmon/ltc2990.c b/drivers/hwmon/ltc2990.c
new file mode 100644
index 000000000000..8f8fe059ab48
--- /dev/null
+++ b/drivers/hwmon/ltc2990.c
@@ -0,0 +1,161 @@
+/*
+ * Driver for Linear Technology LTC2990 power monitor
+ *
+ * Copyright (C) 2014 Topic Embedded Products
+ * Author: Mike Looijmans <mike.looijmans@topic.nl>
+ *
+ * License: GPLv2
+ *
+ * This driver assumes the chip is wired as a dual current monitor, and
+ * reports the voltage drop across two series resistors. It also reports
+ * the chip's internal temperature and Vcc power supply voltage.
+ */
+
+#include <linux/err.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/i2c.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+#define LTC2990_STATUS 0x00
+#define LTC2990_CONTROL 0x01
+#define LTC2990_TRIGGER 0x02
+#define LTC2990_TINT_MSB 0x04
+#define LTC2990_V1_MSB 0x06
+#define LTC2990_V2_MSB 0x08
+#define LTC2990_V3_MSB 0x0A
+#define LTC2990_V4_MSB 0x0C
+#define LTC2990_VCC_MSB 0x0E
+
+#define LTC2990_CONTROL_KELVIN BIT(7)
+#define LTC2990_CONTROL_SINGLE BIT(6)
+#define LTC2990_CONTROL_MEASURE_ALL (0x3 << 3)
+#define LTC2990_CONTROL_MODE_CURRENT 0x06
+#define LTC2990_CONTROL_MODE_VOLTAGE 0x07
+
+/* convert raw register value to sign-extended integer in 16-bit range */
+static int ltc2990_voltage_to_int(int raw)
+{
+ if (raw & BIT(14))
+ return -(0x4000 - (raw & 0x3FFF)) << 2;
+ else
+ return (raw & 0x3FFF) << 2;
+}
+
+/* Return the converted value from the given register in uV or mC */
+static int ltc2990_get_value(struct i2c_client *i2c, u8 reg, int *result)
+{
+ int val;
+
+ val = i2c_smbus_read_word_swapped(i2c, reg);
+ if (unlikely(val < 0))
+ return val;
+
+ switch (reg) {
+ case LTC2990_TINT_MSB:
+ /* internal temp, 0.0625 degrees/LSB, 13-bit */
+ val = (val & 0x1FFF) << 3;
+ *result = (val * 1000) >> 7;
+ break;
+ case LTC2990_V1_MSB:
+ case LTC2990_V3_MSB:
+ /* Vx-Vy, 19.42uV/LSB. Depends on mode. */
+ *result = ltc2990_voltage_to_int(val) * 1942 / (4 * 100);
+ break;
+ case LTC2990_VCC_MSB:
+ /* Vcc, 305.18μV/LSB, 2.5V offset */
+ *result = (ltc2990_voltage_to_int(val) * 30518 /
+ (4 * 100 * 1000)) + 2500;
+ break;
+ default:
+ return -EINVAL; /* won't happen, keep compiler happy */
+ }
+
+ return 0;
+}
+
+static ssize_t ltc2990_show_value(struct device *dev,
+ struct device_attribute *da, char *buf)
+{
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
+ int value;
+ int ret;
+
+ ret = ltc2990_get_value(dev_get_drvdata(dev), attr->index, &value);
+ if (unlikely(ret < 0))
+ return ret;
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", value);
+}
+
+static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, ltc2990_show_value, NULL,
+ LTC2990_TINT_MSB);
+static SENSOR_DEVICE_ATTR(curr1_input, S_IRUGO, ltc2990_show_value, NULL,
+ LTC2990_V1_MSB);
+static SENSOR_DEVICE_ATTR(curr2_input, S_IRUGO, ltc2990_show_value, NULL,
+ LTC2990_V3_MSB);
+static SENSOR_DEVICE_ATTR(in0_input, S_IRUGO, ltc2990_show_value, NULL,
+ LTC2990_VCC_MSB);
+
+static struct attribute *ltc2990_attrs[] = {
+ &sensor_dev_attr_temp1_input.dev_attr.attr,
+ &sensor_dev_attr_curr1_input.dev_attr.attr,
+ &sensor_dev_attr_curr2_input.dev_attr.attr,
+ &sensor_dev_attr_in0_input.dev_attr.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(ltc2990);
+
+static int ltc2990_i2c_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
+{
+ int ret;
+ struct device *hwmon_dev;
+
+ if (!i2c_check_functionality(i2c->adapter, I2C_FUNC_SMBUS_BYTE_DATA |
+ I2C_FUNC_SMBUS_WORD_DATA))
+ return -ENODEV;
+
+ /* Setup continuous mode, current monitor */
+ ret = i2c_smbus_write_byte_data(i2c, LTC2990_CONTROL,
+ LTC2990_CONTROL_MEASURE_ALL |
+ LTC2990_CONTROL_MODE_CURRENT);
+ if (ret < 0) {
+ dev_err(&i2c->dev, "Error: Failed to set control mode.\n");
+ return ret;
+ }
+ /* Trigger once to start continuous conversion */
+ ret = i2c_smbus_write_byte_data(i2c, LTC2990_TRIGGER, 1);
+ if (ret < 0) {
+ dev_err(&i2c->dev, "Error: Failed to start acquisition.\n");
+ return ret;
+ }
+
+ hwmon_dev = devm_hwmon_device_register_with_groups(&i2c->dev,
+ i2c->name,
+ i2c,
+ ltc2990_groups);
+
+ return PTR_ERR_OR_ZERO(hwmon_dev);
+}
+
+static const struct i2c_device_id ltc2990_i2c_id[] = {
+ { "ltc2990", 0 },
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, ltc2990_i2c_id);
+
+static struct i2c_driver ltc2990_i2c_driver = {
+ .driver = {
+ .name = "ltc2990",
+ },
+ .probe = ltc2990_i2c_probe,
+ .id_table = ltc2990_i2c_id,
+};
+
+module_i2c_driver(ltc2990_i2c_driver);
+
+MODULE_DESCRIPTION("LTC2990 Sensor Driver");
+MODULE_AUTHOR("Topic Embedded Products");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/hwmon/max1111.c b/drivers/hwmon/max1111.c
index 36544c4f653c..303d0c9df907 100644
--- a/drivers/hwmon/max1111.c
+++ b/drivers/hwmon/max1111.c
@@ -85,6 +85,9 @@ static struct max1111_data *the_max1111;
int max1111_read_channel(int channel)
{
+ if (!the_max1111 || !the_max1111->spi)
+ return -ENODEV;
+
return max1111_read(&the_max1111->spi->dev, channel);
}
EXPORT_SYMBOL(max1111_read_channel);
@@ -258,6 +261,9 @@ static int max1111_remove(struct spi_device *spi)
{
struct max1111_data *data = spi_get_drvdata(spi);
+#ifdef CONFIG_SHARPSL_PM
+ the_max1111 = NULL;
+#endif
hwmon_device_unregister(data->hwmon_dev);
sysfs_remove_group(&spi->dev.kobj, &max1110_attr_group);
sysfs_remove_group(&spi->dev.kobj, &max1111_attr_group);
diff --git a/drivers/hwmon/nsa320-hwmon.c b/drivers/hwmon/nsa320-hwmon.c
new file mode 100644
index 000000000000..0517a265741f
--- /dev/null
+++ b/drivers/hwmon/nsa320-hwmon.c
@@ -0,0 +1,215 @@
+/*
+ * drivers/hwmon/nsa320-hwmon.c
+ *
+ * ZyXEL NSA320 Media Servers
+ * hardware monitoring
+ *
+ * Copyright (C) 2016 Adam Baker <linux@baker-net.org.uk>
+ * based on a board file driver
+ * Copyright (C) 2012 Peter Schildmann <linux@schildmann.info>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License v2 as published by the
+ * Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/gpio/consumer.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/jiffies.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+
+/* Tests for error return values rely upon this value being < 0x80 */
+#define MAGIC_NUMBER 0x55
+
+/*
+ * The Zyxel hwmon MCU is a Holtek HT46R065 that is factory programmed
+ * to perform temperature and fan speed monitoring. It is read by taking
+ * the active pin low. The 32 bit output word is then clocked onto the
+ * data line. The MSB of the data word is a magic nuber to indicate it
+ * has been read correctly, the next byte is the fan speed (in hundreds
+ * of RPM) and the last two bytes are the temperature (in tenths of a
+ * degree)
+ */
+
+struct nsa320_hwmon {
+ struct mutex update_lock; /* lock GPIO operations */
+ unsigned long last_updated; /* jiffies */
+ unsigned long mcu_data;
+ struct gpio_desc *act;
+ struct gpio_desc *clk;
+ struct gpio_desc *data;
+};
+
+enum nsa320_inputs {
+ NSA320_TEMP = 0,
+ NSA320_FAN = 1,
+};
+
+static const char * const nsa320_input_names[] = {
+ [NSA320_TEMP] = "System Temperature",
+ [NSA320_FAN] = "Chassis Fan",
+};
+
+/*
+ * Although this protocol looks similar to SPI the long delay
+ * between the active (aka chip select) signal and the shorter
+ * delay between clock pulses are needed for reliable operation.
+ * The delays provided are taken from the manufacturer kernel,
+ * testing suggest they probably incorporate a reasonable safety
+ * margin. (The single device tested became unreliable if the
+ * delay was reduced to 1/10th of this value.)
+ */
+static s32 nsa320_hwmon_update(struct device *dev)
+{
+ u32 mcu_data;
+ u32 mask;
+ struct nsa320_hwmon *hwmon = dev_get_drvdata(dev);
+
+ mutex_lock(&hwmon->update_lock);
+
+ mcu_data = hwmon->mcu_data;
+
+ if (time_after(jiffies, hwmon->last_updated + HZ) || mcu_data == 0) {
+ gpiod_set_value(hwmon->act, 1);
+ msleep(100);
+
+ mcu_data = 0;
+ for (mask = BIT(31); mask; mask >>= 1) {
+ gpiod_set_value(hwmon->clk, 0);
+ usleep_range(100, 200);
+ gpiod_set_value(hwmon->clk, 1);
+ usleep_range(100, 200);
+ if (gpiod_get_value(hwmon->data))
+ mcu_data |= mask;
+ }
+
+ gpiod_set_value(hwmon->act, 0);
+ dev_dbg(dev, "Read raw MCU data %08x\n", mcu_data);
+
+ if ((mcu_data >> 24) != MAGIC_NUMBER) {
+ dev_dbg(dev, "Read invalid MCU data %08x\n", mcu_data);
+ mcu_data = -EIO;
+ } else {
+ hwmon->mcu_data = mcu_data;
+ hwmon->last_updated = jiffies;
+ }
+ }
+
+ mutex_unlock(&hwmon->update_lock);
+
+ return mcu_data;
+}
+
+static ssize_t show_label(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int channel = to_sensor_dev_attr(attr)->index;
+
+ return sprintf(buf, "%s\n", nsa320_input_names[channel]);
+}
+
+static ssize_t show_temp(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ s32 mcu_data = nsa320_hwmon_update(dev);
+
+ if (mcu_data < 0)
+ return mcu_data;
+
+ return sprintf(buf, "%d\n", (mcu_data & 0xffff) * 100);
+}
+
+static ssize_t show_fan(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ s32 mcu_data = nsa320_hwmon_update(dev);
+
+ if (mcu_data < 0)
+ return mcu_data;
+
+ return sprintf(buf, "%d\n", ((mcu_data & 0xff0000) >> 16) * 100);
+}
+
+static SENSOR_DEVICE_ATTR(temp1_label, S_IRUGO, show_label, NULL, NSA320_TEMP);
+static DEVICE_ATTR(temp1_input, S_IRUGO, show_temp, NULL);
+static SENSOR_DEVICE_ATTR(fan1_label, S_IRUGO, show_label, NULL, NSA320_FAN);
+static DEVICE_ATTR(fan1_input, S_IRUGO, show_fan, NULL);
+
+static struct attribute *nsa320_attrs[] = {
+ &sensor_dev_attr_temp1_label.dev_attr.attr,
+ &dev_attr_temp1_input.attr,
+ &sensor_dev_attr_fan1_label.dev_attr.attr,
+ &dev_attr_fan1_input.attr,
+ NULL
+};
+
+ATTRIBUTE_GROUPS(nsa320);
+
+static const struct of_device_id of_nsa320_hwmon_match[] = {
+ { .compatible = "zyxel,nsa320-mcu", },
+ { },
+};
+
+static int nsa320_hwmon_probe(struct platform_device *pdev)
+{
+ struct nsa320_hwmon *hwmon;
+ struct device *classdev;
+
+ hwmon = devm_kzalloc(&pdev->dev, sizeof(*hwmon), GFP_KERNEL);
+ if (!hwmon)
+ return -ENOMEM;
+
+ /* Look up the GPIO pins to use */
+ hwmon->act = devm_gpiod_get(&pdev->dev, "act", GPIOD_OUT_LOW);
+ if (IS_ERR(hwmon->act))
+ return PTR_ERR(hwmon->act);
+
+ hwmon->clk = devm_gpiod_get(&pdev->dev, "clk", GPIOD_OUT_HIGH);
+ if (IS_ERR(hwmon->clk))
+ return PTR_ERR(hwmon->clk);
+
+ hwmon->data = devm_gpiod_get(&pdev->dev, "data", GPIOD_IN);
+ if (IS_ERR(hwmon->data))
+ return PTR_ERR(hwmon->data);
+
+ mutex_init(&hwmon->update_lock);
+
+ classdev = devm_hwmon_device_register_with_groups(&pdev->dev,
+ "nsa320", hwmon, nsa320_groups);
+
+ return PTR_ERR_OR_ZERO(classdev);
+
+}
+
+/* All allocations use devres so remove() is not needed. */
+
+static struct platform_driver nsa320_hwmon_driver = {
+ .probe = nsa320_hwmon_probe,
+ .driver = {
+ .name = "nsa320-hwmon",
+ .of_match_table = of_match_ptr(of_nsa320_hwmon_match),
+ },
+};
+
+module_platform_driver(nsa320_hwmon_driver);
+
+MODULE_DEVICE_TABLE(of, of_nsa320_hwmon_match);
+MODULE_AUTHOR("Peter Schildmann <linux@schildmann.info>");
+MODULE_AUTHOR("Adam Baker <linux@baker-net.org.uk>");
+MODULE_DESCRIPTION("NSA320 Hardware Monitoring");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:nsa320-hwmon");
diff --git a/drivers/hwmon/ntc_thermistor.c b/drivers/hwmon/ntc_thermistor.c
index feed30646d91..faa6e8dfbaaf 100644
--- a/drivers/hwmon/ntc_thermistor.c
+++ b/drivers/hwmon/ntc_thermistor.c
@@ -54,6 +54,7 @@ static const struct platform_device_id ntc_thermistor_id[] = {
{ "ncp15wl333", TYPE_NCPXXWL333 },
{ "b57330v2103", TYPE_B57330V2103},
{ "ncp03wf104", TYPE_NCPXXWF104 },
+ { "ncp15xh103", TYPE_NCPXXXH103 },
{ },
};
@@ -173,6 +174,43 @@ static const struct ntc_compensation ncpXXwf104[] = {
{ .temp_c = 125, .ohm = 2522 },
};
+static const struct ntc_compensation ncpXXxh103[] = {
+ { .temp_c = -40, .ohm = 247565 },
+ { .temp_c = -35, .ohm = 181742 },
+ { .temp_c = -30, .ohm = 135128 },
+ { .temp_c = -25, .ohm = 101678 },
+ { .temp_c = -20, .ohm = 77373 },
+ { .temp_c = -15, .ohm = 59504 },
+ { .temp_c = -10, .ohm = 46222 },
+ { .temp_c = -5, .ohm = 36244 },
+ { .temp_c = 0, .ohm = 28674 },
+ { .temp_c = 5, .ohm = 22878 },
+ { .temp_c = 10, .ohm = 18399 },
+ { .temp_c = 15, .ohm = 14910 },
+ { .temp_c = 20, .ohm = 12169 },
+ { .temp_c = 25, .ohm = 10000 },
+ { .temp_c = 30, .ohm = 8271 },
+ { .temp_c = 35, .ohm = 6883 },
+ { .temp_c = 40, .ohm = 5762 },
+ { .temp_c = 45, .ohm = 4851 },
+ { .temp_c = 50, .ohm = 4105 },
+ { .temp_c = 55, .ohm = 3492 },
+ { .temp_c = 60, .ohm = 2985 },
+ { .temp_c = 65, .ohm = 2563 },
+ { .temp_c = 70, .ohm = 2211 },
+ { .temp_c = 75, .ohm = 1915 },
+ { .temp_c = 80, .ohm = 1666 },
+ { .temp_c = 85, .ohm = 1454 },
+ { .temp_c = 90, .ohm = 1275 },
+ { .temp_c = 95, .ohm = 1121 },
+ { .temp_c = 100, .ohm = 990 },
+ { .temp_c = 105, .ohm = 876 },
+ { .temp_c = 110, .ohm = 779 },
+ { .temp_c = 115, .ohm = 694 },
+ { .temp_c = 120, .ohm = 620 },
+ { .temp_c = 125, .ohm = 556 },
+};
+
/*
* The following compensation table is from the specification of EPCOS NTC
* Thermistors Datasheet
@@ -260,6 +298,8 @@ static const struct of_device_id ntc_match[] = {
.data = &ntc_thermistor_id[5]},
{ .compatible = "murata,ncp03wf104",
.data = &ntc_thermistor_id[6] },
+ { .compatible = "murata,ncp15xh103",
+ .data = &ntc_thermistor_id[7] },
/* Usage of vendor name "ntc" is deprecated */
{ .compatible = "ntc,ncp15wb473",
@@ -609,6 +649,10 @@ static int ntc_thermistor_probe(struct platform_device *pdev)
data->comp = ncpXXwf104;
data->n_comp = ARRAY_SIZE(ncpXXwf104);
break;
+ case TYPE_NCPXXXH103:
+ data->comp = ncpXXxh103;
+ data->n_comp = ARRAY_SIZE(ncpXXxh103);
+ break;
default:
dev_err(&pdev->dev, "Unknown device type: %lu(%s)\n",
pdev_id->driver_data, pdev_id->name);
diff --git a/drivers/hwmon/pmbus/Kconfig b/drivers/hwmon/pmbus/Kconfig
index 7e5cc3d025ef..054d3d863802 100644
--- a/drivers/hwmon/pmbus/Kconfig
+++ b/drivers/hwmon/pmbus/Kconfig
@@ -31,8 +31,8 @@ config SENSORS_ADM1275
default n
help
If you say yes here you get hardware monitoring support for Analog
- Devices ADM1075, ADM1275, ADM1276, ADM1293, and ADM1294 Hot-Swap
- Controller and Digital Power Monitors.
+ Devices ADM1075, ADM1275, ADM1276, ADM1278, ADM1293, and ADM1294
+ Hot-Swap Controller and Digital Power Monitors.
This driver can also be built as a module. If so, the module will
be called adm1275.
diff --git a/drivers/hwmon/pmbus/adm1275.c b/drivers/hwmon/pmbus/adm1275.c
index 188af4c89f40..3baa4f4a8c5e 100644
--- a/drivers/hwmon/pmbus/adm1275.c
+++ b/drivers/hwmon/pmbus/adm1275.c
@@ -24,7 +24,7 @@
#include <linux/bitops.h>
#include "pmbus.h"
-enum chips { adm1075, adm1275, adm1276, adm1293, adm1294 };
+enum chips { adm1075, adm1275, adm1276, adm1278, adm1293, adm1294 };
#define ADM1275_MFR_STATUS_IOUT_WARN2 BIT(0)
#define ADM1293_MFR_STATUS_VAUX_UV_WARN BIT(5)
@@ -41,6 +41,10 @@ enum chips { adm1075, adm1275, adm1276, adm1293, adm1294 };
#define ADM1075_IRANGE_25 BIT(3)
#define ADM1075_IRANGE_MASK (BIT(3) | BIT(4))
+#define ADM1278_TEMP1_EN BIT(3)
+#define ADM1278_VIN_EN BIT(2)
+#define ADM1278_VOUT_EN BIT(1)
+
#define ADM1293_IRANGE_25 0
#define ADM1293_IRANGE_50 BIT(6)
#define ADM1293_IRANGE_100 BIT(7)
@@ -54,6 +58,7 @@ enum chips { adm1075, adm1275, adm1276, adm1293, adm1294 };
#define ADM1293_VAUX_EN BIT(1)
+#define ADM1278_PEAK_TEMP 0xd7
#define ADM1275_IOUT_WARN2_LIMIT 0xd7
#define ADM1275_DEVICE_CONFIG 0xd8
@@ -80,6 +85,7 @@ struct adm1275_data {
bool have_iout_min;
bool have_pin_min;
bool have_pin_max;
+ bool have_temp_max;
struct pmbus_driver_info info;
};
@@ -113,6 +119,13 @@ static const struct coefficients adm1276_coefficients[] = {
[4] = { 2115, 0, -1 }, /* power, vrange not set */
};
+static const struct coefficients adm1278_coefficients[] = {
+ [0] = { 19599, 0, -2 }, /* voltage */
+ [1] = { 800, 20475, -1 }, /* current */
+ [2] = { 6123, 0, -2 }, /* power */
+ [3] = { 42, 31880, -1 }, /* temperature */
+};
+
static const struct coefficients adm1293_coefficients[] = {
[0] = { 3333, -1, 0 }, /* voltage, vrange 1.2V */
[1] = { 5552, -5, -1 }, /* voltage, vrange 7.4V */
@@ -196,6 +209,11 @@ static int adm1275_read_word_data(struct i2c_client *client, int page, int reg)
return -ENXIO;
ret = pmbus_read_word_data(client, 0, ADM1276_PEAK_PIN);
break;
+ case PMBUS_VIRT_READ_TEMP_MAX:
+ if (!data->have_temp_max)
+ return -ENXIO;
+ ret = pmbus_read_word_data(client, 0, ADM1278_PEAK_TEMP);
+ break;
case PMBUS_VIRT_RESET_IOUT_HISTORY:
case PMBUS_VIRT_RESET_VOUT_HISTORY:
case PMBUS_VIRT_RESET_VIN_HISTORY:
@@ -204,6 +222,10 @@ static int adm1275_read_word_data(struct i2c_client *client, int page, int reg)
if (!data->have_pin_max)
return -ENXIO;
break;
+ case PMBUS_VIRT_RESET_TEMP_HISTORY:
+ if (!data->have_temp_max)
+ return -ENXIO;
+ break;
default:
ret = -ENODATA;
break;
@@ -245,6 +267,9 @@ static int adm1275_write_word_data(struct i2c_client *client, int page, int reg,
ret = pmbus_write_word_data(client, 0,
ADM1293_PIN_MIN, 0);
break;
+ case PMBUS_VIRT_RESET_TEMP_HISTORY:
+ ret = pmbus_write_word_data(client, 0, ADM1278_PEAK_TEMP, 0);
+ break;
default:
ret = -ENODATA;
break;
@@ -312,6 +337,7 @@ static const struct i2c_device_id adm1275_id[] = {
{ "adm1075", adm1075 },
{ "adm1275", adm1275 },
{ "adm1276", adm1276 },
+ { "adm1278", adm1278 },
{ "adm1293", adm1293 },
{ "adm1294", adm1294 },
{ }
@@ -329,6 +355,7 @@ static int adm1275_probe(struct i2c_client *client,
const struct i2c_device_id *mid;
const struct coefficients *coefficients;
int vindex = -1, voindex = -1, cindex = -1, pindex = -1;
+ int tindex = -1;
if (!i2c_check_functionality(client->adapter,
I2C_FUNC_SMBUS_READ_BYTE_DATA
@@ -386,6 +413,7 @@ static int adm1275_probe(struct i2c_client *client,
info->format[PSC_VOLTAGE_OUT] = direct;
info->format[PSC_CURRENT_OUT] = direct;
info->format[PSC_POWER] = direct;
+ info->format[PSC_TEMPERATURE] = direct;
info->func[0] = PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT;
info->read_word_data = adm1275_read_word_data;
@@ -460,6 +488,27 @@ static int adm1275_probe(struct i2c_client *client,
info->func[0] |=
PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT;
break;
+ case adm1278:
+ data->have_vout = true;
+ data->have_pin_max = true;
+ data->have_temp_max = true;
+
+ coefficients = adm1278_coefficients;
+ vindex = 0;
+ cindex = 1;
+ pindex = 2;
+ tindex = 3;
+
+ info->func[0] |= PMBUS_HAVE_PIN | PMBUS_HAVE_STATUS_INPUT;
+ if (config & ADM1278_TEMP1_EN)
+ info->func[0] |=
+ PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP;
+ if (config & ADM1278_VIN_EN)
+ info->func[0] |= PMBUS_HAVE_VIN;
+ if (config & ADM1278_VOUT_EN)
+ info->func[0] |=
+ PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT;
+ break;
case adm1293:
case adm1294:
data->have_iout_min = true;
@@ -537,6 +586,11 @@ static int adm1275_probe(struct i2c_client *client,
info->b[PSC_POWER] = coefficients[pindex].b;
info->R[PSC_POWER] = coefficients[pindex].R;
}
+ if (tindex >= 0) {
+ info->m[PSC_TEMPERATURE] = coefficients[tindex].m;
+ info->b[PSC_TEMPERATURE] = coefficients[tindex].b;
+ info->R[PSC_TEMPERATURE] = coefficients[tindex].R;
+ }
return pmbus_do_probe(client, id, info);
}
diff --git a/drivers/hwmon/scpi-hwmon.c b/drivers/hwmon/scpi-hwmon.c
index 7e20567bc369..912b449c8303 100644
--- a/drivers/hwmon/scpi-hwmon.c
+++ b/drivers/hwmon/scpi-hwmon.c
@@ -52,7 +52,7 @@ static int scpi_read_temp(void *dev, int *temp)
struct scpi_sensors *scpi_sensors = zone->scpi_sensors;
struct scpi_ops *scpi_ops = scpi_sensors->scpi_ops;
struct sensor_data *sensor = &scpi_sensors->data[zone->sensor_id];
- u32 value;
+ u64 value;
int ret;
ret = scpi_ops->sensor_get_value(sensor->info.sensor_id, &value);
@@ -70,7 +70,7 @@ scpi_show_sensor(struct device *dev, struct device_attribute *attr, char *buf)
struct scpi_sensors *scpi_sensors = dev_get_drvdata(dev);
struct scpi_ops *scpi_ops = scpi_sensors->scpi_ops;
struct sensor_data *sensor;
- u32 value;
+ u64 value;
int ret;
sensor = container_of(attr, struct sensor_data, dev_attr_input);
@@ -79,7 +79,7 @@ scpi_show_sensor(struct device *dev, struct device_attribute *attr, char *buf)
if (ret)
return ret;
- return sprintf(buf, "%u\n", value);
+ return sprintf(buf, "%llu\n", value);
}
static ssize_t
@@ -114,6 +114,7 @@ static int scpi_hwmon_probe(struct platform_device *pdev)
{
u16 nr_sensors, i;
int num_temp = 0, num_volt = 0, num_current = 0, num_power = 0;
+ int num_energy = 0;
struct scpi_ops *scpi_ops;
struct device *hwdev, *dev = &pdev->dev;
struct scpi_sensors *scpi_sensors;
@@ -182,6 +183,13 @@ static int scpi_hwmon_probe(struct platform_device *pdev)
"power%d_label", num_power + 1);
num_power++;
break;
+ case ENERGY:
+ snprintf(sensor->input, sizeof(sensor->input),
+ "energy%d_input", num_energy + 1);
+ snprintf(sensor->label, sizeof(sensor->input),
+ "energy%d_label", num_energy + 1);
+ num_energy++;
+ break;
default:
continue;
}
diff --git a/drivers/hwmon/vexpress.c b/drivers/hwmon/vexpress-hwmon.c
index 8ba419d343f8..8ba419d343f8 100644
--- a/drivers/hwmon/vexpress.c
+++ b/drivers/hwmon/vexpress-hwmon.c
diff --git a/drivers/hwtracing/coresight/Kconfig b/drivers/hwtracing/coresight/Kconfig
index c85935f3525a..db0541031c72 100644
--- a/drivers/hwtracing/coresight/Kconfig
+++ b/drivers/hwtracing/coresight/Kconfig
@@ -4,6 +4,7 @@
menuconfig CORESIGHT
bool "CoreSight Tracing Support"
select ARM_AMBA
+ select PERF_EVENTS
help
This framework provides a kernel interface for the CoreSight debug
and trace drivers to register themselves with. It's intended to build
diff --git a/drivers/hwtracing/coresight/Makefile b/drivers/hwtracing/coresight/Makefile
index 99f8e5f6256e..cf8c6d689747 100644
--- a/drivers/hwtracing/coresight/Makefile
+++ b/drivers/hwtracing/coresight/Makefile
@@ -8,6 +8,8 @@ obj-$(CONFIG_CORESIGHT_SINK_TPIU) += coresight-tpiu.o
obj-$(CONFIG_CORESIGHT_SINK_ETBV10) += coresight-etb10.o
obj-$(CONFIG_CORESIGHT_LINKS_AND_SINKS) += coresight-funnel.o \
coresight-replicator.o
-obj-$(CONFIG_CORESIGHT_SOURCE_ETM3X) += coresight-etm3x.o coresight-etm-cp14.o
+obj-$(CONFIG_CORESIGHT_SOURCE_ETM3X) += coresight-etm3x.o coresight-etm-cp14.o \
+ coresight-etm3x-sysfs.o \
+ coresight-etm-perf.o
obj-$(CONFIG_CORESIGHT_SOURCE_ETM4X) += coresight-etm4x.o
obj-$(CONFIG_CORESIGHT_QCOM_REPLICATOR) += coresight-replicator-qcom.o
diff --git a/drivers/hwtracing/coresight/coresight-etb10.c b/drivers/hwtracing/coresight/coresight-etb10.c
index 77d0f9c1118d..acbce79934d6 100644
--- a/drivers/hwtracing/coresight/coresight-etb10.c
+++ b/drivers/hwtracing/coresight/coresight-etb10.c
@@ -1,5 +1,7 @@
/* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
*
+ * Description: CoreSight Embedded Trace Buffer driver
+ *
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
@@ -10,8 +12,8 @@
* GNU General Public License for more details.
*/
+#include <asm/local.h>
#include <linux/kernel.h>
-#include <linux/module.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/device.h>
@@ -27,6 +29,11 @@
#include <linux/coresight.h>
#include <linux/amba/bus.h>
#include <linux/clk.h>
+#include <linux/circ_buf.h>
+#include <linux/mm.h>
+#include <linux/perf_event.h>
+
+#include <asm/local.h>
#include "coresight-priv.h"
@@ -64,6 +71,26 @@
#define ETB_FRAME_SIZE_WORDS 4
/**
+ * struct cs_buffer - keep track of a recording session' specifics
+ * @cur: index of the current buffer
+ * @nr_pages: max number of pages granted to us
+ * @offset: offset within the current buffer
+ * @data_size: how much we collected in this run
+ * @lost: other than zero if we had a HW buffer wrap around
+ * @snapshot: is this run in snapshot mode
+ * @data_pages: a handle the ring buffer
+ */
+struct cs_buffers {
+ unsigned int cur;
+ unsigned int nr_pages;
+ unsigned long offset;
+ local_t data_size;
+ local_t lost;
+ bool snapshot;
+ void **data_pages;
+};
+
+/**
* struct etb_drvdata - specifics associated to an ETB component
* @base: memory mapped base address for this component.
* @dev: the device entity associated to this component.
@@ -71,10 +98,10 @@
* @csdev: component vitals needed by the framework.
* @miscdev: specifics to handle "/dev/xyz.etb" entry.
* @spinlock: only one at a time pls.
- * @in_use: synchronise user space access to etb buffer.
+ * @reading: synchronise user space access to etb buffer.
+ * @mode: this ETB is being used.
* @buf: area of memory where ETB buffer content gets sent.
* @buffer_depth: size of @buf.
- * @enable: this ETB is being used.
* @trigger_cntr: amount of words to store after a trigger.
*/
struct etb_drvdata {
@@ -84,10 +111,10 @@ struct etb_drvdata {
struct coresight_device *csdev;
struct miscdevice miscdev;
spinlock_t spinlock;
- atomic_t in_use;
+ local_t reading;
+ local_t mode;
u8 *buf;
u32 buffer_depth;
- bool enable;
u32 trigger_cntr;
};
@@ -132,18 +159,31 @@ static void etb_enable_hw(struct etb_drvdata *drvdata)
CS_LOCK(drvdata->base);
}
-static int etb_enable(struct coresight_device *csdev)
+static int etb_enable(struct coresight_device *csdev, u32 mode)
{
- struct etb_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+ u32 val;
unsigned long flags;
+ struct etb_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
- pm_runtime_get_sync(drvdata->dev);
+ val = local_cmpxchg(&drvdata->mode,
+ CS_MODE_DISABLED, mode);
+ /*
+ * When accessing from Perf, a HW buffer can be handled
+ * by a single trace entity. In sysFS mode many tracers
+ * can be logging to the same HW buffer.
+ */
+ if (val == CS_MODE_PERF)
+ return -EBUSY;
+
+ /* Nothing to do, the tracer is already enabled. */
+ if (val == CS_MODE_SYSFS)
+ goto out;
spin_lock_irqsave(&drvdata->spinlock, flags);
etb_enable_hw(drvdata);
- drvdata->enable = true;
spin_unlock_irqrestore(&drvdata->spinlock, flags);
+out:
dev_info(drvdata->dev, "ETB enabled\n");
return 0;
}
@@ -244,17 +284,225 @@ static void etb_disable(struct coresight_device *csdev)
spin_lock_irqsave(&drvdata->spinlock, flags);
etb_disable_hw(drvdata);
etb_dump_hw(drvdata);
- drvdata->enable = false;
spin_unlock_irqrestore(&drvdata->spinlock, flags);
- pm_runtime_put(drvdata->dev);
+ local_set(&drvdata->mode, CS_MODE_DISABLED);
dev_info(drvdata->dev, "ETB disabled\n");
}
+static void *etb_alloc_buffer(struct coresight_device *csdev, int cpu,
+ void **pages, int nr_pages, bool overwrite)
+{
+ int node;
+ struct cs_buffers *buf;
+
+ if (cpu == -1)
+ cpu = smp_processor_id();
+ node = cpu_to_node(cpu);
+
+ buf = kzalloc_node(sizeof(struct cs_buffers), GFP_KERNEL, node);
+ if (!buf)
+ return NULL;
+
+ buf->snapshot = overwrite;
+ buf->nr_pages = nr_pages;
+ buf->data_pages = pages;
+
+ return buf;
+}
+
+static void etb_free_buffer(void *config)
+{
+ struct cs_buffers *buf = config;
+
+ kfree(buf);
+}
+
+static int etb_set_buffer(struct coresight_device *csdev,
+ struct perf_output_handle *handle,
+ void *sink_config)
+{
+ int ret = 0;
+ unsigned long head;
+ struct cs_buffers *buf = sink_config;
+
+ /* wrap head around to the amount of space we have */
+ head = handle->head & ((buf->nr_pages << PAGE_SHIFT) - 1);
+
+ /* find the page to write to */
+ buf->cur = head / PAGE_SIZE;
+
+ /* and offset within that page */
+ buf->offset = head % PAGE_SIZE;
+
+ local_set(&buf->data_size, 0);
+
+ return ret;
+}
+
+static unsigned long etb_reset_buffer(struct coresight_device *csdev,
+ struct perf_output_handle *handle,
+ void *sink_config, bool *lost)
+{
+ unsigned long size = 0;
+ struct cs_buffers *buf = sink_config;
+
+ if (buf) {
+ /*
+ * In snapshot mode ->data_size holds the new address of the
+ * ring buffer's head. The size itself is the whole address
+ * range since we want the latest information.
+ */
+ if (buf->snapshot)
+ handle->head = local_xchg(&buf->data_size,
+ buf->nr_pages << PAGE_SHIFT);
+
+ /*
+ * Tell the tracer PMU how much we got in this run and if
+ * something went wrong along the way. Nobody else can use
+ * this cs_buffers instance until we are done. As such
+ * resetting parameters here and squaring off with the ring
+ * buffer API in the tracer PMU is fine.
+ */
+ *lost = !!local_xchg(&buf->lost, 0);
+ size = local_xchg(&buf->data_size, 0);
+ }
+
+ return size;
+}
+
+static void etb_update_buffer(struct coresight_device *csdev,
+ struct perf_output_handle *handle,
+ void *sink_config)
+{
+ int i, cur;
+ u8 *buf_ptr;
+ u32 read_ptr, write_ptr, capacity;
+ u32 status, read_data, to_read;
+ unsigned long offset;
+ struct cs_buffers *buf = sink_config;
+ struct etb_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+
+ if (!buf)
+ return;
+
+ capacity = drvdata->buffer_depth * ETB_FRAME_SIZE_WORDS;
+
+ CS_UNLOCK(drvdata->base);
+ etb_disable_hw(drvdata);
+
+ /* unit is in words, not bytes */
+ read_ptr = readl_relaxed(drvdata->base + ETB_RAM_READ_POINTER);
+ write_ptr = readl_relaxed(drvdata->base + ETB_RAM_WRITE_POINTER);
+
+ /*
+ * Entries should be aligned to the frame size. If they are not
+ * go back to the last alignement point to give decoding tools a
+ * chance to fix things.
+ */
+ if (write_ptr % ETB_FRAME_SIZE_WORDS) {
+ dev_err(drvdata->dev,
+ "write_ptr: %lu not aligned to formatter frame size\n",
+ (unsigned long)write_ptr);
+
+ write_ptr &= ~(ETB_FRAME_SIZE_WORDS - 1);
+ local_inc(&buf->lost);
+ }
+
+ /*
+ * Get a hold of the status register and see if a wrap around
+ * has occurred. If so adjust things accordingly. Otherwise
+ * start at the beginning and go until the write pointer has
+ * been reached.
+ */
+ status = readl_relaxed(drvdata->base + ETB_STATUS_REG);
+ if (status & ETB_STATUS_RAM_FULL) {
+ local_inc(&buf->lost);
+ to_read = capacity;
+ read_ptr = write_ptr;
+ } else {
+ to_read = CIRC_CNT(write_ptr, read_ptr, drvdata->buffer_depth);
+ to_read *= ETB_FRAME_SIZE_WORDS;
+ }
+
+ /*
+ * Make sure we don't overwrite data that hasn't been consumed yet.
+ * It is entirely possible that the HW buffer has more data than the
+ * ring buffer can currently handle. If so adjust the start address
+ * to take only the last traces.
+ *
+ * In snapshot mode we are looking to get the latest traces only and as
+ * such, we don't care about not overwriting data that hasn't been
+ * processed by user space.
+ */
+ if (!buf->snapshot && to_read > handle->size) {
+ u32 mask = ~(ETB_FRAME_SIZE_WORDS - 1);
+
+ /* The new read pointer must be frame size aligned */
+ to_read -= handle->size & mask;
+ /*
+ * Move the RAM read pointer up, keeping in mind that
+ * everything is in frame size units.
+ */
+ read_ptr = (write_ptr + drvdata->buffer_depth) -
+ to_read / ETB_FRAME_SIZE_WORDS;
+ /* Wrap around if need be*/
+ read_ptr &= ~(drvdata->buffer_depth - 1);
+ /* let the decoder know we've skipped ahead */
+ local_inc(&buf->lost);
+ }
+
+ /* finally tell HW where we want to start reading from */
+ writel_relaxed(read_ptr, drvdata->base + ETB_RAM_READ_POINTER);
+
+ cur = buf->cur;
+ offset = buf->offset;
+ for (i = 0; i < to_read; i += 4) {
+ buf_ptr = buf->data_pages[cur] + offset;
+ read_data = readl_relaxed(drvdata->base +
+ ETB_RAM_READ_DATA_REG);
+ *buf_ptr++ = read_data >> 0;
+ *buf_ptr++ = read_data >> 8;
+ *buf_ptr++ = read_data >> 16;
+ *buf_ptr++ = read_data >> 24;
+
+ offset += 4;
+ if (offset >= PAGE_SIZE) {
+ offset = 0;
+ cur++;
+ /* wrap around at the end of the buffer */
+ cur &= buf->nr_pages - 1;
+ }
+ }
+
+ /* reset ETB buffer for next run */
+ writel_relaxed(0x0, drvdata->base + ETB_RAM_READ_POINTER);
+ writel_relaxed(0x0, drvdata->base + ETB_RAM_WRITE_POINTER);
+
+ /*
+ * In snapshot mode all we have to do is communicate to
+ * perf_aux_output_end() the address of the current head. In full
+ * trace mode the same function expects a size to move rb->aux_head
+ * forward.
+ */
+ if (buf->snapshot)
+ local_set(&buf->data_size, (cur * PAGE_SIZE) + offset);
+ else
+ local_add(to_read, &buf->data_size);
+
+ etb_enable_hw(drvdata);
+ CS_LOCK(drvdata->base);
+}
+
static const struct coresight_ops_sink etb_sink_ops = {
.enable = etb_enable,
.disable = etb_disable,
+ .alloc_buffer = etb_alloc_buffer,
+ .free_buffer = etb_free_buffer,
+ .set_buffer = etb_set_buffer,
+ .reset_buffer = etb_reset_buffer,
+ .update_buffer = etb_update_buffer,
};
static const struct coresight_ops etb_cs_ops = {
@@ -266,7 +514,7 @@ static void etb_dump(struct etb_drvdata *drvdata)
unsigned long flags;
spin_lock_irqsave(&drvdata->spinlock, flags);
- if (drvdata->enable) {
+ if (local_read(&drvdata->mode) == CS_MODE_SYSFS) {
etb_disable_hw(drvdata);
etb_dump_hw(drvdata);
etb_enable_hw(drvdata);
@@ -281,7 +529,7 @@ static int etb_open(struct inode *inode, struct file *file)
struct etb_drvdata *drvdata = container_of(file->private_data,
struct etb_drvdata, miscdev);
- if (atomic_cmpxchg(&drvdata->in_use, 0, 1))
+ if (local_cmpxchg(&drvdata->reading, 0, 1))
return -EBUSY;
dev_dbg(drvdata->dev, "%s: successfully opened\n", __func__);
@@ -317,7 +565,7 @@ static int etb_release(struct inode *inode, struct file *file)
{
struct etb_drvdata *drvdata = container_of(file->private_data,
struct etb_drvdata, miscdev);
- atomic_set(&drvdata->in_use, 0);
+ local_set(&drvdata->reading, 0);
dev_dbg(drvdata->dev, "%s: released\n", __func__);
return 0;
@@ -489,15 +737,6 @@ err_misc_register:
return ret;
}
-static int etb_remove(struct amba_device *adev)
-{
- struct etb_drvdata *drvdata = amba_get_drvdata(adev);
-
- misc_deregister(&drvdata->miscdev);
- coresight_unregister(drvdata->csdev);
- return 0;
-}
-
#ifdef CONFIG_PM
static int etb_runtime_suspend(struct device *dev)
{
@@ -537,14 +776,10 @@ static struct amba_driver etb_driver = {
.name = "coresight-etb10",
.owner = THIS_MODULE,
.pm = &etb_dev_pm_ops,
+ .suppress_bind_attrs = true,
},
.probe = etb_probe,
- .remove = etb_remove,
.id_table = etb_ids,
};
-
-module_amba_driver(etb_driver);
-
-MODULE_LICENSE("GPL v2");
-MODULE_DESCRIPTION("CoreSight Embedded Trace Buffer driver");
+builtin_amba_driver(etb_driver);
diff --git a/drivers/hwtracing/coresight/coresight-etm-perf.c b/drivers/hwtracing/coresight/coresight-etm-perf.c
new file mode 100644
index 000000000000..755125f7917f
--- /dev/null
+++ b/drivers/hwtracing/coresight/coresight-etm-perf.c
@@ -0,0 +1,393 @@
+/*
+ * Copyright(C) 2015 Linaro Limited. All rights reserved.
+ * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/coresight.h>
+#include <linux/coresight-pmu.h>
+#include <linux/cpumask.h>
+#include <linux/device.h>
+#include <linux/list.h>
+#include <linux/mm.h>
+#include <linux/init.h>
+#include <linux/perf_event.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+
+#include "coresight-priv.h"
+
+static struct pmu etm_pmu;
+static bool etm_perf_up;
+
+/**
+ * struct etm_event_data - Coresight specifics associated to an event
+ * @work: Handle to free allocated memory outside IRQ context.
+ * @mask: Hold the CPU(s) this event was set for.
+ * @snk_config: The sink configuration.
+ * @path: An array of path, each slot for one CPU.
+ */
+struct etm_event_data {
+ struct work_struct work;
+ cpumask_t mask;
+ void *snk_config;
+ struct list_head **path;
+};
+
+static DEFINE_PER_CPU(struct perf_output_handle, ctx_handle);
+static DEFINE_PER_CPU(struct coresight_device *, csdev_src);
+
+/* ETMv3.5/PTM's ETMCR is 'config' */
+PMU_FORMAT_ATTR(cycacc, "config:" __stringify(ETM_OPT_CYCACC));
+PMU_FORMAT_ATTR(timestamp, "config:" __stringify(ETM_OPT_TS));
+
+static struct attribute *etm_config_formats_attr[] = {
+ &format_attr_cycacc.attr,
+ &format_attr_timestamp.attr,
+ NULL,
+};
+
+static struct attribute_group etm_pmu_format_group = {
+ .name = "format",
+ .attrs = etm_config_formats_attr,
+};
+
+static const struct attribute_group *etm_pmu_attr_groups[] = {
+ &etm_pmu_format_group,
+ NULL,
+};
+
+static void etm_event_read(struct perf_event *event) {}
+
+static int etm_event_init(struct perf_event *event)
+{
+ if (event->attr.type != etm_pmu.type)
+ return -ENOENT;
+
+ return 0;
+}
+
+static void free_event_data(struct work_struct *work)
+{
+ int cpu;
+ cpumask_t *mask;
+ struct etm_event_data *event_data;
+ struct coresight_device *sink;
+
+ event_data = container_of(work, struct etm_event_data, work);
+ mask = &event_data->mask;
+ /*
+ * First deal with the sink configuration. See comment in
+ * etm_setup_aux() about why we take the first available path.
+ */
+ if (event_data->snk_config) {
+ cpu = cpumask_first(mask);
+ sink = coresight_get_sink(event_data->path[cpu]);
+ if (sink_ops(sink)->free_buffer)
+ sink_ops(sink)->free_buffer(event_data->snk_config);
+ }
+
+ for_each_cpu(cpu, mask) {
+ if (event_data->path[cpu])
+ coresight_release_path(event_data->path[cpu]);
+ }
+
+ kfree(event_data->path);
+ kfree(event_data);
+}
+
+static void *alloc_event_data(int cpu)
+{
+ int size;
+ cpumask_t *mask;
+ struct etm_event_data *event_data;
+
+ /* First get memory for the session's data */
+ event_data = kzalloc(sizeof(struct etm_event_data), GFP_KERNEL);
+ if (!event_data)
+ return NULL;
+
+ /* Make sure nothing disappears under us */
+ get_online_cpus();
+ size = num_online_cpus();
+
+ mask = &event_data->mask;
+ if (cpu != -1)
+ cpumask_set_cpu(cpu, mask);
+ else
+ cpumask_copy(mask, cpu_online_mask);
+ put_online_cpus();
+
+ /*
+ * Each CPU has a single path between source and destination. As such
+ * allocate an array using CPU numbers as indexes. That way a path
+ * for any CPU can easily be accessed at any given time. We proceed
+ * the same way for sessions involving a single CPU. The cost of
+ * unused memory when dealing with single CPU trace scenarios is small
+ * compared to the cost of searching through an optimized array.
+ */
+ event_data->path = kcalloc(size,
+ sizeof(struct list_head *), GFP_KERNEL);
+ if (!event_data->path) {
+ kfree(event_data);
+ return NULL;
+ }
+
+ return event_data;
+}
+
+static void etm_free_aux(void *data)
+{
+ struct etm_event_data *event_data = data;
+
+ schedule_work(&event_data->work);
+}
+
+static void *etm_setup_aux(int event_cpu, void **pages,
+ int nr_pages, bool overwrite)
+{
+ int cpu;
+ cpumask_t *mask;
+ struct coresight_device *sink;
+ struct etm_event_data *event_data = NULL;
+
+ event_data = alloc_event_data(event_cpu);
+ if (!event_data)
+ return NULL;
+
+ INIT_WORK(&event_data->work, free_event_data);
+
+ mask = &event_data->mask;
+
+ /* Setup the path for each CPU in a trace session */
+ for_each_cpu(cpu, mask) {
+ struct coresight_device *csdev;
+
+ csdev = per_cpu(csdev_src, cpu);
+ if (!csdev)
+ goto err;
+
+ /*
+ * Building a path doesn't enable it, it simply builds a
+ * list of devices from source to sink that can be
+ * referenced later when the path is actually needed.
+ */
+ event_data->path[cpu] = coresight_build_path(csdev);
+ if (!event_data->path[cpu])
+ goto err;
+ }
+
+ /*
+ * In theory nothing prevent tracers in a trace session from being
+ * associated with different sinks, nor having a sink per tracer. But
+ * until we have HW with this kind of topology and a way to convey
+ * sink assignement from the perf cmd line we need to assume tracers
+ * in a trace session are using the same sink. Therefore pick the sink
+ * found at the end of the first available path.
+ */
+ cpu = cpumask_first(mask);
+ /* Grab the sink at the end of the path */
+ sink = coresight_get_sink(event_data->path[cpu]);
+ if (!sink)
+ goto err;
+
+ if (!sink_ops(sink)->alloc_buffer)
+ goto err;
+
+ /* Get the AUX specific data from the sink buffer */
+ event_data->snk_config =
+ sink_ops(sink)->alloc_buffer(sink, cpu, pages,
+ nr_pages, overwrite);
+ if (!event_data->snk_config)
+ goto err;
+
+out:
+ return event_data;
+
+err:
+ etm_free_aux(event_data);
+ event_data = NULL;
+ goto out;
+}
+
+static void etm_event_start(struct perf_event *event, int flags)
+{
+ int cpu = smp_processor_id();
+ struct etm_event_data *event_data;
+ struct perf_output_handle *handle = this_cpu_ptr(&ctx_handle);
+ struct coresight_device *sink, *csdev = per_cpu(csdev_src, cpu);
+
+ if (!csdev)
+ goto fail;
+
+ /*
+ * Deal with the ring buffer API and get a handle on the
+ * session's information.
+ */
+ event_data = perf_aux_output_begin(handle, event);
+ if (!event_data)
+ goto fail;
+
+ /* We need a sink, no need to continue without one */
+ sink = coresight_get_sink(event_data->path[cpu]);
+ if (WARN_ON_ONCE(!sink || !sink_ops(sink)->set_buffer))
+ goto fail_end_stop;
+
+ /* Configure the sink */
+ if (sink_ops(sink)->set_buffer(sink, handle,
+ event_data->snk_config))
+ goto fail_end_stop;
+
+ /* Nothing will happen without a path */
+ if (coresight_enable_path(event_data->path[cpu], CS_MODE_PERF))
+ goto fail_end_stop;
+
+ /* Tell the perf core the event is alive */
+ event->hw.state = 0;
+
+ /* Finally enable the tracer */
+ if (source_ops(csdev)->enable(csdev, &event->attr, CS_MODE_PERF))
+ goto fail_end_stop;
+
+out:
+ return;
+
+fail_end_stop:
+ perf_aux_output_end(handle, 0, true);
+fail:
+ event->hw.state = PERF_HES_STOPPED;
+ goto out;
+}
+
+static void etm_event_stop(struct perf_event *event, int mode)
+{
+ bool lost;
+ int cpu = smp_processor_id();
+ unsigned long size;
+ struct coresight_device *sink, *csdev = per_cpu(csdev_src, cpu);
+ struct perf_output_handle *handle = this_cpu_ptr(&ctx_handle);
+ struct etm_event_data *event_data = perf_get_aux(handle);
+
+ if (event->hw.state == PERF_HES_STOPPED)
+ return;
+
+ if (!csdev)
+ return;
+
+ sink = coresight_get_sink(event_data->path[cpu]);
+ if (!sink)
+ return;
+
+ /* stop tracer */
+ source_ops(csdev)->disable(csdev);
+
+ /* tell the core */
+ event->hw.state = PERF_HES_STOPPED;
+
+ if (mode & PERF_EF_UPDATE) {
+ if (WARN_ON_ONCE(handle->event != event))
+ return;
+
+ /* update trace information */
+ if (!sink_ops(sink)->update_buffer)
+ return;
+
+ sink_ops(sink)->update_buffer(sink, handle,
+ event_data->snk_config);
+
+ if (!sink_ops(sink)->reset_buffer)
+ return;
+
+ size = sink_ops(sink)->reset_buffer(sink, handle,
+ event_data->snk_config,
+ &lost);
+
+ perf_aux_output_end(handle, size, lost);
+ }
+
+ /* Disabling the path make its elements available to other sessions */
+ coresight_disable_path(event_data->path[cpu]);
+}
+
+static int etm_event_add(struct perf_event *event, int mode)
+{
+ int ret = 0;
+ struct hw_perf_event *hwc = &event->hw;
+
+ if (mode & PERF_EF_START) {
+ etm_event_start(event, 0);
+ if (hwc->state & PERF_HES_STOPPED)
+ ret = -EINVAL;
+ } else {
+ hwc->state = PERF_HES_STOPPED;
+ }
+
+ return ret;
+}
+
+static void etm_event_del(struct perf_event *event, int mode)
+{
+ etm_event_stop(event, PERF_EF_UPDATE);
+}
+
+int etm_perf_symlink(struct coresight_device *csdev, bool link)
+{
+ char entry[sizeof("cpu9999999")];
+ int ret = 0, cpu = source_ops(csdev)->cpu_id(csdev);
+ struct device *pmu_dev = etm_pmu.dev;
+ struct device *cs_dev = &csdev->dev;
+
+ sprintf(entry, "cpu%d", cpu);
+
+ if (!etm_perf_up)
+ return -EPROBE_DEFER;
+
+ if (link) {
+ ret = sysfs_create_link(&pmu_dev->kobj, &cs_dev->kobj, entry);
+ if (ret)
+ return ret;
+ per_cpu(csdev_src, cpu) = csdev;
+ } else {
+ sysfs_remove_link(&pmu_dev->kobj, entry);
+ per_cpu(csdev_src, cpu) = NULL;
+ }
+
+ return 0;
+}
+
+static int __init etm_perf_init(void)
+{
+ int ret;
+
+ etm_pmu.capabilities = PERF_PMU_CAP_EXCLUSIVE;
+
+ etm_pmu.attr_groups = etm_pmu_attr_groups;
+ etm_pmu.task_ctx_nr = perf_sw_context;
+ etm_pmu.read = etm_event_read;
+ etm_pmu.event_init = etm_event_init;
+ etm_pmu.setup_aux = etm_setup_aux;
+ etm_pmu.free_aux = etm_free_aux;
+ etm_pmu.start = etm_event_start;
+ etm_pmu.stop = etm_event_stop;
+ etm_pmu.add = etm_event_add;
+ etm_pmu.del = etm_event_del;
+
+ ret = perf_pmu_register(&etm_pmu, CORESIGHT_ETM_PMU_NAME, -1);
+ if (ret == 0)
+ etm_perf_up = true;
+
+ return ret;
+}
+device_initcall(etm_perf_init);
diff --git a/drivers/gpu/drm/omapdrm/dss/dispc-compat.h b/drivers/hwtracing/coresight/coresight-etm-perf.h
index 14a69b3d4fb0..87f5a134eb6f 100644
--- a/drivers/gpu/drm/omapdrm/dss/dispc-compat.h
+++ b/drivers/hwtracing/coresight/coresight-etm-perf.h
@@ -1,6 +1,6 @@
/*
- * Copyright (C) 2012 Texas Instruments
- * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
+ * Copyright(C) 2015 Linaro Limited. All rights reserved.
+ * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
@@ -15,16 +15,18 @@
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
-#ifndef __OMAP2_DSS_DISPC_COMPAT_H
-#define __OMAP2_DSS_DISPC_COMPAT_H
+#ifndef _CORESIGHT_ETM_PERF_H
+#define _CORESIGHT_ETM_PERF_H
-void dispc_mgr_enable_sync(enum omap_channel channel);
-void dispc_mgr_disable_sync(enum omap_channel channel);
+struct coresight_device;
-int omap_dispc_wait_for_irq_interruptible_timeout(u32 irqmask,
- unsigned long timeout);
+#ifdef CONFIG_CORESIGHT
+int etm_perf_symlink(struct coresight_device *csdev, bool link);
-int dss_dispc_initialize_irq(void);
-void dss_dispc_uninitialize_irq(void);
+#else
+static inline int etm_perf_symlink(struct coresight_device *csdev, bool link)
+{ return -EINVAL; }
+
+#endif /* CONFIG_CORESIGHT */
#endif
diff --git a/drivers/hwtracing/coresight/coresight-etm.h b/drivers/hwtracing/coresight/coresight-etm.h
index b4481eb29304..51597cb2c08a 100644
--- a/drivers/hwtracing/coresight/coresight-etm.h
+++ b/drivers/hwtracing/coresight/coresight-etm.h
@@ -13,6 +13,7 @@
#ifndef _CORESIGHT_CORESIGHT_ETM_H
#define _CORESIGHT_CORESIGHT_ETM_H
+#include <asm/local.h>
#include <linux/spinlock.h>
#include "coresight-priv.h"
@@ -109,7 +110,10 @@
#define ETM_MODE_STALL BIT(2)
#define ETM_MODE_TIMESTAMP BIT(3)
#define ETM_MODE_CTXID BIT(4)
-#define ETM_MODE_ALL 0x1f
+#define ETM_MODE_ALL (ETM_MODE_EXCLUDE | ETM_MODE_CYCACC | \
+ ETM_MODE_STALL | ETM_MODE_TIMESTAMP | \
+ ETM_MODE_CTXID | ETM_MODE_EXCL_KERN | \
+ ETM_MODE_EXCL_USER)
#define ETM_SQR_MASK 0x3
#define ETM_TRACEID_MASK 0x3f
@@ -136,35 +140,16 @@
#define ETM_DEFAULT_EVENT_VAL (ETM_HARD_WIRE_RES_A | \
ETM_ADD_COMP_0 | \
ETM_EVENT_NOT_A)
+
/**
- * struct etm_drvdata - specifics associated to an ETM component
- * @base: memory mapped base address for this component.
- * @dev: the device entity associated to this component.
- * @atclk: optional clock for the core parts of the ETM.
- * @csdev: component vitals needed by the framework.
- * @spinlock: only one at a time pls.
- * @cpu: the cpu this component is affined to.
- * @port_size: port size as reported by ETMCR bit 4-6 and 21.
- * @arch: ETM/PTM version number.
- * @use_cpu14: true if management registers need to be accessed via CP14.
- * @enable: is this ETM/PTM currently tracing.
- * @sticky_enable: true if ETM base configuration has been done.
- * @boot_enable:true if we should start tracing at boot time.
- * @os_unlock: true if access to management registers is allowed.
- * @nr_addr_cmp:Number of pairs of address comparators as found in ETMCCR.
- * @nr_cntr: Number of counters as found in ETMCCR bit 13-15.
- * @nr_ext_inp: Number of external input as found in ETMCCR bit 17-19.
- * @nr_ext_out: Number of external output as found in ETMCCR bit 20-22.
- * @nr_ctxid_cmp: Number of contextID comparators as found in ETMCCR bit 24-25.
- * @etmccr: value of register ETMCCR.
- * @etmccer: value of register ETMCCER.
- * @traceid: value of the current ID for this component.
+ * struct etm_config - configuration information related to an ETM
* @mode: controls various modes supported by this ETM/PTM.
* @ctrl: used in conjunction with @mode.
* @trigger_event: setting for register ETMTRIGGER.
* @startstop_ctrl: setting for register ETMTSSCR.
* @enable_event: setting for register ETMTEEVR.
* @enable_ctrl1: setting for register ETMTECR1.
+ * @enable_ctrl2: setting for register ETMTECR2.
* @fifofull_level: setting for register ETMFFLR.
* @addr_idx: index for the address comparator selection.
* @addr_val: value for address comparator register.
@@ -189,36 +174,16 @@
* @ctxid_mask: mask applicable to all the context IDs.
* @sync_freq: Synchronisation frequency.
* @timestamp_event: Defines an event that requests the insertion
- of a timestamp into the trace stream.
+ * of a timestamp into the trace stream.
*/
-struct etm_drvdata {
- void __iomem *base;
- struct device *dev;
- struct clk *atclk;
- struct coresight_device *csdev;
- spinlock_t spinlock;
- int cpu;
- int port_size;
- u8 arch;
- bool use_cp14;
- bool enable;
- bool sticky_enable;
- bool boot_enable;
- bool os_unlock;
- u8 nr_addr_cmp;
- u8 nr_cntr;
- u8 nr_ext_inp;
- u8 nr_ext_out;
- u8 nr_ctxid_cmp;
- u32 etmccr;
- u32 etmccer;
- u32 traceid;
+struct etm_config {
u32 mode;
u32 ctrl;
u32 trigger_event;
u32 startstop_ctrl;
u32 enable_event;
u32 enable_ctrl1;
+ u32 enable_ctrl2;
u32 fifofull_level;
u8 addr_idx;
u32 addr_val[ETM_MAX_ADDR_CMP];
@@ -244,6 +209,56 @@ struct etm_drvdata {
u32 timestamp_event;
};
+/**
+ * struct etm_drvdata - specifics associated to an ETM component
+ * @base: memory mapped base address for this component.
+ * @dev: the device entity associated to this component.
+ * @atclk: optional clock for the core parts of the ETM.
+ * @csdev: component vitals needed by the framework.
+ * @spinlock: only one at a time pls.
+ * @cpu: the cpu this component is affined to.
+ * @port_size: port size as reported by ETMCR bit 4-6 and 21.
+ * @arch: ETM/PTM version number.
+ * @use_cpu14: true if management registers need to be accessed via CP14.
+ * @mode: this tracer's mode, i.e sysFS, Perf or disabled.
+ * @sticky_enable: true if ETM base configuration has been done.
+ * @boot_enable:true if we should start tracing at boot time.
+ * @os_unlock: true if access to management registers is allowed.
+ * @nr_addr_cmp:Number of pairs of address comparators as found in ETMCCR.
+ * @nr_cntr: Number of counters as found in ETMCCR bit 13-15.
+ * @nr_ext_inp: Number of external input as found in ETMCCR bit 17-19.
+ * @nr_ext_out: Number of external output as found in ETMCCR bit 20-22.
+ * @nr_ctxid_cmp: Number of contextID comparators as found in ETMCCR bit 24-25.
+ * @etmccr: value of register ETMCCR.
+ * @etmccer: value of register ETMCCER.
+ * @traceid: value of the current ID for this component.
+ * @config: structure holding configuration parameters.
+ */
+struct etm_drvdata {
+ void __iomem *base;
+ struct device *dev;
+ struct clk *atclk;
+ struct coresight_device *csdev;
+ spinlock_t spinlock;
+ int cpu;
+ int port_size;
+ u8 arch;
+ bool use_cp14;
+ local_t mode;
+ bool sticky_enable;
+ bool boot_enable;
+ bool os_unlock;
+ u8 nr_addr_cmp;
+ u8 nr_cntr;
+ u8 nr_ext_inp;
+ u8 nr_ext_out;
+ u8 nr_ctxid_cmp;
+ u32 etmccr;
+ u32 etmccer;
+ u32 traceid;
+ struct etm_config config;
+};
+
enum etm_addr_type {
ETM_ADDR_TYPE_NONE,
ETM_ADDR_TYPE_SINGLE,
@@ -251,4 +266,39 @@ enum etm_addr_type {
ETM_ADDR_TYPE_START,
ETM_ADDR_TYPE_STOP,
};
+
+static inline void etm_writel(struct etm_drvdata *drvdata,
+ u32 val, u32 off)
+{
+ if (drvdata->use_cp14) {
+ if (etm_writel_cp14(off, val)) {
+ dev_err(drvdata->dev,
+ "invalid CP14 access to ETM reg: %#x", off);
+ }
+ } else {
+ writel_relaxed(val, drvdata->base + off);
+ }
+}
+
+static inline unsigned int etm_readl(struct etm_drvdata *drvdata, u32 off)
+{
+ u32 val;
+
+ if (drvdata->use_cp14) {
+ if (etm_readl_cp14(off, &val)) {
+ dev_err(drvdata->dev,
+ "invalid CP14 access to ETM reg: %#x", off);
+ }
+ } else {
+ val = readl_relaxed(drvdata->base + off);
+ }
+
+ return val;
+}
+
+extern const struct attribute_group *coresight_etm_groups[];
+int etm_get_trace_id(struct etm_drvdata *drvdata);
+void etm_set_default(struct etm_config *config);
+void etm_config_trace_mode(struct etm_config *config);
+struct etm_config *get_etm_config(struct etm_drvdata *drvdata);
#endif
diff --git a/drivers/hwtracing/coresight/coresight-etm3x-sysfs.c b/drivers/hwtracing/coresight/coresight-etm3x-sysfs.c
new file mode 100644
index 000000000000..cbb4046c1070
--- /dev/null
+++ b/drivers/hwtracing/coresight/coresight-etm3x-sysfs.c
@@ -0,0 +1,1272 @@
+/*
+ * Copyright(C) 2015 Linaro Limited. All rights reserved.
+ * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/pm_runtime.h>
+#include <linux/sysfs.h>
+#include "coresight-etm.h"
+
+static ssize_t nr_addr_cmp_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ unsigned long val;
+ struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ val = drvdata->nr_addr_cmp;
+ return sprintf(buf, "%#lx\n", val);
+}
+static DEVICE_ATTR_RO(nr_addr_cmp);
+
+static ssize_t nr_cntr_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{ unsigned long val;
+ struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ val = drvdata->nr_cntr;
+ return sprintf(buf, "%#lx\n", val);
+}
+static DEVICE_ATTR_RO(nr_cntr);
+
+static ssize_t nr_ctxid_cmp_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ unsigned long val;
+ struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ val = drvdata->nr_ctxid_cmp;
+ return sprintf(buf, "%#lx\n", val);
+}
+static DEVICE_ATTR_RO(nr_ctxid_cmp);
+
+static ssize_t etmsr_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ unsigned long flags, val;
+ struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ pm_runtime_get_sync(drvdata->dev);
+ spin_lock_irqsave(&drvdata->spinlock, flags);
+ CS_UNLOCK(drvdata->base);
+
+ val = etm_readl(drvdata, ETMSR);
+
+ CS_LOCK(drvdata->base);
+ spin_unlock_irqrestore(&drvdata->spinlock, flags);
+ pm_runtime_put(drvdata->dev);
+
+ return sprintf(buf, "%#lx\n", val);
+}
+static DEVICE_ATTR_RO(etmsr);
+
+static ssize_t reset_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ int i, ret;
+ unsigned long val;
+ struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etm_config *config = &drvdata->config;
+
+ ret = kstrtoul(buf, 16, &val);
+ if (ret)
+ return ret;
+
+ if (val) {
+ spin_lock(&drvdata->spinlock);
+ memset(config, 0, sizeof(struct etm_config));
+ config->mode = ETM_MODE_EXCLUDE;
+ config->trigger_event = ETM_DEFAULT_EVENT_VAL;
+ for (i = 0; i < drvdata->nr_addr_cmp; i++) {
+ config->addr_type[i] = ETM_ADDR_TYPE_NONE;
+ }
+
+ etm_set_default(config);
+ spin_unlock(&drvdata->spinlock);
+ }
+
+ return size;
+}
+static DEVICE_ATTR_WO(reset);
+
+static ssize_t mode_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ unsigned long val;
+ struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etm_config *config = &drvdata->config;
+
+ val = config->mode;
+ return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t mode_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ int ret;
+ unsigned long val;
+ struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etm_config *config = &drvdata->config;
+
+ ret = kstrtoul(buf, 16, &val);
+ if (ret)
+ return ret;
+
+ spin_lock(&drvdata->spinlock);
+ config->mode = val & ETM_MODE_ALL;
+
+ if (config->mode & ETM_MODE_EXCLUDE)
+ config->enable_ctrl1 |= ETMTECR1_INC_EXC;
+ else
+ config->enable_ctrl1 &= ~ETMTECR1_INC_EXC;
+
+ if (config->mode & ETM_MODE_CYCACC)
+ config->ctrl |= ETMCR_CYC_ACC;
+ else
+ config->ctrl &= ~ETMCR_CYC_ACC;
+
+ if (config->mode & ETM_MODE_STALL) {
+ if (!(drvdata->etmccr & ETMCCR_FIFOFULL)) {
+ dev_warn(drvdata->dev, "stall mode not supported\n");
+ ret = -EINVAL;
+ goto err_unlock;
+ }
+ config->ctrl |= ETMCR_STALL_MODE;
+ } else
+ config->ctrl &= ~ETMCR_STALL_MODE;
+
+ if (config->mode & ETM_MODE_TIMESTAMP) {
+ if (!(drvdata->etmccer & ETMCCER_TIMESTAMP)) {
+ dev_warn(drvdata->dev, "timestamp not supported\n");
+ ret = -EINVAL;
+ goto err_unlock;
+ }
+ config->ctrl |= ETMCR_TIMESTAMP_EN;
+ } else
+ config->ctrl &= ~ETMCR_TIMESTAMP_EN;
+
+ if (config->mode & ETM_MODE_CTXID)
+ config->ctrl |= ETMCR_CTXID_SIZE;
+ else
+ config->ctrl &= ~ETMCR_CTXID_SIZE;
+
+ if (config->mode & (ETM_MODE_EXCL_KERN | ETM_MODE_EXCL_USER))
+ etm_config_trace_mode(config);
+
+ spin_unlock(&drvdata->spinlock);
+
+ return size;
+
+err_unlock:
+ spin_unlock(&drvdata->spinlock);
+ return ret;
+}
+static DEVICE_ATTR_RW(mode);
+
+static ssize_t trigger_event_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ unsigned long val;
+ struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etm_config *config = &drvdata->config;
+
+ val = config->trigger_event;
+ return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t trigger_event_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ int ret;
+ unsigned long val;
+ struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etm_config *config = &drvdata->config;
+
+ ret = kstrtoul(buf, 16, &val);
+ if (ret)
+ return ret;
+
+ config->trigger_event = val & ETM_EVENT_MASK;
+
+ return size;
+}
+static DEVICE_ATTR_RW(trigger_event);
+
+static ssize_t enable_event_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ unsigned long val;
+ struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etm_config *config = &drvdata->config;
+
+ val = config->enable_event;
+ return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t enable_event_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ int ret;
+ unsigned long val;
+ struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etm_config *config = &drvdata->config;
+
+ ret = kstrtoul(buf, 16, &val);
+ if (ret)
+ return ret;
+
+ config->enable_event = val & ETM_EVENT_MASK;
+
+ return size;
+}
+static DEVICE_ATTR_RW(enable_event);
+
+static ssize_t fifofull_level_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ unsigned long val;
+ struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etm_config *config = &drvdata->config;
+
+ val = config->fifofull_level;
+ return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t fifofull_level_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ int ret;
+ unsigned long val;
+ struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etm_config *config = &drvdata->config;
+
+ ret = kstrtoul(buf, 16, &val);
+ if (ret)
+ return ret;
+
+ config->fifofull_level = val;
+
+ return size;
+}
+static DEVICE_ATTR_RW(fifofull_level);
+
+static ssize_t addr_idx_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ unsigned long val;
+ struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etm_config *config = &drvdata->config;
+
+ val = config->addr_idx;
+ return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t addr_idx_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ int ret;
+ unsigned long val;
+ struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etm_config *config = &drvdata->config;
+
+ ret = kstrtoul(buf, 16, &val);
+ if (ret)
+ return ret;
+
+ if (val >= drvdata->nr_addr_cmp)
+ return -EINVAL;
+
+ /*
+ * Use spinlock to ensure index doesn't change while it gets
+ * dereferenced multiple times within a spinlock block elsewhere.
+ */
+ spin_lock(&drvdata->spinlock);
+ config->addr_idx = val;
+ spin_unlock(&drvdata->spinlock);
+
+ return size;
+}
+static DEVICE_ATTR_RW(addr_idx);
+
+static ssize_t addr_single_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ u8 idx;
+ unsigned long val;
+ struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etm_config *config = &drvdata->config;
+
+ spin_lock(&drvdata->spinlock);
+ idx = config->addr_idx;
+ if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
+ config->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) {
+ spin_unlock(&drvdata->spinlock);
+ return -EINVAL;
+ }
+
+ val = config->addr_val[idx];
+ spin_unlock(&drvdata->spinlock);
+
+ return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t addr_single_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ u8 idx;
+ int ret;
+ unsigned long val;
+ struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etm_config *config = &drvdata->config;
+
+ ret = kstrtoul(buf, 16, &val);
+ if (ret)
+ return ret;
+
+ spin_lock(&drvdata->spinlock);
+ idx = config->addr_idx;
+ if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
+ config->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) {
+ spin_unlock(&drvdata->spinlock);
+ return -EINVAL;
+ }
+
+ config->addr_val[idx] = val;
+ config->addr_type[idx] = ETM_ADDR_TYPE_SINGLE;
+ spin_unlock(&drvdata->spinlock);
+
+ return size;
+}
+static DEVICE_ATTR_RW(addr_single);
+
+static ssize_t addr_range_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ u8 idx;
+ unsigned long val1, val2;
+ struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etm_config *config = &drvdata->config;
+
+ spin_lock(&drvdata->spinlock);
+ idx = config->addr_idx;
+ if (idx % 2 != 0) {
+ spin_unlock(&drvdata->spinlock);
+ return -EPERM;
+ }
+ if (!((config->addr_type[idx] == ETM_ADDR_TYPE_NONE &&
+ config->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) ||
+ (config->addr_type[idx] == ETM_ADDR_TYPE_RANGE &&
+ config->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) {
+ spin_unlock(&drvdata->spinlock);
+ return -EPERM;
+ }
+
+ val1 = config->addr_val[idx];
+ val2 = config->addr_val[idx + 1];
+ spin_unlock(&drvdata->spinlock);
+
+ return sprintf(buf, "%#lx %#lx\n", val1, val2);
+}
+
+static ssize_t addr_range_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ u8 idx;
+ unsigned long val1, val2;
+ struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etm_config *config = &drvdata->config;
+
+ if (sscanf(buf, "%lx %lx", &val1, &val2) != 2)
+ return -EINVAL;
+ /* Lower address comparator cannot have a higher address value */
+ if (val1 > val2)
+ return -EINVAL;
+
+ spin_lock(&drvdata->spinlock);
+ idx = config->addr_idx;
+ if (idx % 2 != 0) {
+ spin_unlock(&drvdata->spinlock);
+ return -EPERM;
+ }
+ if (!((config->addr_type[idx] == ETM_ADDR_TYPE_NONE &&
+ config->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) ||
+ (config->addr_type[idx] == ETM_ADDR_TYPE_RANGE &&
+ config->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) {
+ spin_unlock(&drvdata->spinlock);
+ return -EPERM;
+ }
+
+ config->addr_val[idx] = val1;
+ config->addr_type[idx] = ETM_ADDR_TYPE_RANGE;
+ config->addr_val[idx + 1] = val2;
+ config->addr_type[idx + 1] = ETM_ADDR_TYPE_RANGE;
+ config->enable_ctrl1 |= (1 << (idx/2));
+ spin_unlock(&drvdata->spinlock);
+
+ return size;
+}
+static DEVICE_ATTR_RW(addr_range);
+
+static ssize_t addr_start_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ u8 idx;
+ unsigned long val;
+ struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etm_config *config = &drvdata->config;
+
+ spin_lock(&drvdata->spinlock);
+ idx = config->addr_idx;
+ if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
+ config->addr_type[idx] == ETM_ADDR_TYPE_START)) {
+ spin_unlock(&drvdata->spinlock);
+ return -EPERM;
+ }
+
+ val = config->addr_val[idx];
+ spin_unlock(&drvdata->spinlock);
+
+ return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t addr_start_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ u8 idx;
+ int ret;
+ unsigned long val;
+ struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etm_config *config = &drvdata->config;
+
+ ret = kstrtoul(buf, 16, &val);
+ if (ret)
+ return ret;
+
+ spin_lock(&drvdata->spinlock);
+ idx = config->addr_idx;
+ if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
+ config->addr_type[idx] == ETM_ADDR_TYPE_START)) {
+ spin_unlock(&drvdata->spinlock);
+ return -EPERM;
+ }
+
+ config->addr_val[idx] = val;
+ config->addr_type[idx] = ETM_ADDR_TYPE_START;
+ config->startstop_ctrl |= (1 << idx);
+ config->enable_ctrl1 |= BIT(25);
+ spin_unlock(&drvdata->spinlock);
+
+ return size;
+}
+static DEVICE_ATTR_RW(addr_start);
+
+static ssize_t addr_stop_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ u8 idx;
+ unsigned long val;
+ struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etm_config *config = &drvdata->config;
+
+ spin_lock(&drvdata->spinlock);
+ idx = config->addr_idx;
+ if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
+ config->addr_type[idx] == ETM_ADDR_TYPE_STOP)) {
+ spin_unlock(&drvdata->spinlock);
+ return -EPERM;
+ }
+
+ val = config->addr_val[idx];
+ spin_unlock(&drvdata->spinlock);
+
+ return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t addr_stop_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ u8 idx;
+ int ret;
+ unsigned long val;
+ struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etm_config *config = &drvdata->config;
+
+ ret = kstrtoul(buf, 16, &val);
+ if (ret)
+ return ret;
+
+ spin_lock(&drvdata->spinlock);
+ idx = config->addr_idx;
+ if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
+ config->addr_type[idx] == ETM_ADDR_TYPE_STOP)) {
+ spin_unlock(&drvdata->spinlock);
+ return -EPERM;
+ }
+
+ config->addr_val[idx] = val;
+ config->addr_type[idx] = ETM_ADDR_TYPE_STOP;
+ config->startstop_ctrl |= (1 << (idx + 16));
+ config->enable_ctrl1 |= ETMTECR1_START_STOP;
+ spin_unlock(&drvdata->spinlock);
+
+ return size;
+}
+static DEVICE_ATTR_RW(addr_stop);
+
+static ssize_t addr_acctype_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ unsigned long val;
+ struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etm_config *config = &drvdata->config;
+
+ spin_lock(&drvdata->spinlock);
+ val = config->addr_acctype[config->addr_idx];
+ spin_unlock(&drvdata->spinlock);
+
+ return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t addr_acctype_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ int ret;
+ unsigned long val;
+ struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etm_config *config = &drvdata->config;
+
+ ret = kstrtoul(buf, 16, &val);
+ if (ret)
+ return ret;
+
+ spin_lock(&drvdata->spinlock);
+ config->addr_acctype[config->addr_idx] = val;
+ spin_unlock(&drvdata->spinlock);
+
+ return size;
+}
+static DEVICE_ATTR_RW(addr_acctype);
+
+static ssize_t cntr_idx_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ unsigned long val;
+ struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etm_config *config = &drvdata->config;
+
+ val = config->cntr_idx;
+ return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t cntr_idx_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ int ret;
+ unsigned long val;
+ struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etm_config *config = &drvdata->config;
+
+ ret = kstrtoul(buf, 16, &val);
+ if (ret)
+ return ret;
+
+ if (val >= drvdata->nr_cntr)
+ return -EINVAL;
+ /*
+ * Use spinlock to ensure index doesn't change while it gets
+ * dereferenced multiple times within a spinlock block elsewhere.
+ */
+ spin_lock(&drvdata->spinlock);
+ config->cntr_idx = val;
+ spin_unlock(&drvdata->spinlock);
+
+ return size;
+}
+static DEVICE_ATTR_RW(cntr_idx);
+
+static ssize_t cntr_rld_val_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ unsigned long val;
+ struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etm_config *config = &drvdata->config;
+
+ spin_lock(&drvdata->spinlock);
+ val = config->cntr_rld_val[config->cntr_idx];
+ spin_unlock(&drvdata->spinlock);
+
+ return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t cntr_rld_val_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ int ret;
+ unsigned long val;
+ struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etm_config *config = &drvdata->config;
+
+ ret = kstrtoul(buf, 16, &val);
+ if (ret)
+ return ret;
+
+ spin_lock(&drvdata->spinlock);
+ config->cntr_rld_val[config->cntr_idx] = val;
+ spin_unlock(&drvdata->spinlock);
+
+ return size;
+}
+static DEVICE_ATTR_RW(cntr_rld_val);
+
+static ssize_t cntr_event_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ unsigned long val;
+ struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etm_config *config = &drvdata->config;
+
+ spin_lock(&drvdata->spinlock);
+ val = config->cntr_event[config->cntr_idx];
+ spin_unlock(&drvdata->spinlock);
+
+ return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t cntr_event_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ int ret;
+ unsigned long val;
+ struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etm_config *config = &drvdata->config;
+
+ ret = kstrtoul(buf, 16, &val);
+ if (ret)
+ return ret;
+
+ spin_lock(&drvdata->spinlock);
+ config->cntr_event[config->cntr_idx] = val & ETM_EVENT_MASK;
+ spin_unlock(&drvdata->spinlock);
+
+ return size;
+}
+static DEVICE_ATTR_RW(cntr_event);
+
+static ssize_t cntr_rld_event_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ unsigned long val;
+ struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etm_config *config = &drvdata->config;
+
+ spin_lock(&drvdata->spinlock);
+ val = config->cntr_rld_event[config->cntr_idx];
+ spin_unlock(&drvdata->spinlock);
+
+ return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t cntr_rld_event_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ int ret;
+ unsigned long val;
+ struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etm_config *config = &drvdata->config;
+
+ ret = kstrtoul(buf, 16, &val);
+ if (ret)
+ return ret;
+
+ spin_lock(&drvdata->spinlock);
+ config->cntr_rld_event[config->cntr_idx] = val & ETM_EVENT_MASK;
+ spin_unlock(&drvdata->spinlock);
+
+ return size;
+}
+static DEVICE_ATTR_RW(cntr_rld_event);
+
+static ssize_t cntr_val_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int i, ret = 0;
+ u32 val;
+ struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etm_config *config = &drvdata->config;
+
+ if (!local_read(&drvdata->mode)) {
+ spin_lock(&drvdata->spinlock);
+ for (i = 0; i < drvdata->nr_cntr; i++)
+ ret += sprintf(buf, "counter %d: %x\n",
+ i, config->cntr_val[i]);
+ spin_unlock(&drvdata->spinlock);
+ return ret;
+ }
+
+ for (i = 0; i < drvdata->nr_cntr; i++) {
+ val = etm_readl(drvdata, ETMCNTVRn(i));
+ ret += sprintf(buf, "counter %d: %x\n", i, val);
+ }
+
+ return ret;
+}
+
+static ssize_t cntr_val_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ int ret;
+ unsigned long val;
+ struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etm_config *config = &drvdata->config;
+
+ ret = kstrtoul(buf, 16, &val);
+ if (ret)
+ return ret;
+
+ spin_lock(&drvdata->spinlock);
+ config->cntr_val[config->cntr_idx] = val;
+ spin_unlock(&drvdata->spinlock);
+
+ return size;
+}
+static DEVICE_ATTR_RW(cntr_val);
+
+static ssize_t seq_12_event_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ unsigned long val;
+ struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etm_config *config = &drvdata->config;
+
+ val = config->seq_12_event;
+ return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t seq_12_event_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ int ret;
+ unsigned long val;
+ struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etm_config *config = &drvdata->config;
+
+ ret = kstrtoul(buf, 16, &val);
+ if (ret)
+ return ret;
+
+ config->seq_12_event = val & ETM_EVENT_MASK;
+ return size;
+}
+static DEVICE_ATTR_RW(seq_12_event);
+
+static ssize_t seq_21_event_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ unsigned long val;
+ struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etm_config *config = &drvdata->config;
+
+ val = config->seq_21_event;
+ return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t seq_21_event_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ int ret;
+ unsigned long val;
+ struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etm_config *config = &drvdata->config;
+
+ ret = kstrtoul(buf, 16, &val);
+ if (ret)
+ return ret;
+
+ config->seq_21_event = val & ETM_EVENT_MASK;
+ return size;
+}
+static DEVICE_ATTR_RW(seq_21_event);
+
+static ssize_t seq_23_event_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ unsigned long val;
+ struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etm_config *config = &drvdata->config;
+
+ val = config->seq_23_event;
+ return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t seq_23_event_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ int ret;
+ unsigned long val;
+ struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etm_config *config = &drvdata->config;
+
+ ret = kstrtoul(buf, 16, &val);
+ if (ret)
+ return ret;
+
+ config->seq_23_event = val & ETM_EVENT_MASK;
+ return size;
+}
+static DEVICE_ATTR_RW(seq_23_event);
+
+static ssize_t seq_31_event_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ unsigned long val;
+ struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etm_config *config = &drvdata->config;
+
+ val = config->seq_31_event;
+ return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t seq_31_event_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ int ret;
+ unsigned long val;
+ struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etm_config *config = &drvdata->config;
+
+ ret = kstrtoul(buf, 16, &val);
+ if (ret)
+ return ret;
+
+ config->seq_31_event = val & ETM_EVENT_MASK;
+ return size;
+}
+static DEVICE_ATTR_RW(seq_31_event);
+
+static ssize_t seq_32_event_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ unsigned long val;
+ struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etm_config *config = &drvdata->config;
+
+ val = config->seq_32_event;
+ return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t seq_32_event_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ int ret;
+ unsigned long val;
+ struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etm_config *config = &drvdata->config;
+
+ ret = kstrtoul(buf, 16, &val);
+ if (ret)
+ return ret;
+
+ config->seq_32_event = val & ETM_EVENT_MASK;
+ return size;
+}
+static DEVICE_ATTR_RW(seq_32_event);
+
+static ssize_t seq_13_event_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ unsigned long val;
+ struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etm_config *config = &drvdata->config;
+
+ val = config->seq_13_event;
+ return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t seq_13_event_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ int ret;
+ unsigned long val;
+ struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etm_config *config = &drvdata->config;
+
+ ret = kstrtoul(buf, 16, &val);
+ if (ret)
+ return ret;
+
+ config->seq_13_event = val & ETM_EVENT_MASK;
+ return size;
+}
+static DEVICE_ATTR_RW(seq_13_event);
+
+static ssize_t seq_curr_state_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ unsigned long val, flags;
+ struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etm_config *config = &drvdata->config;
+
+ if (!local_read(&drvdata->mode)) {
+ val = config->seq_curr_state;
+ goto out;
+ }
+
+ pm_runtime_get_sync(drvdata->dev);
+ spin_lock_irqsave(&drvdata->spinlock, flags);
+
+ CS_UNLOCK(drvdata->base);
+ val = (etm_readl(drvdata, ETMSQR) & ETM_SQR_MASK);
+ CS_LOCK(drvdata->base);
+
+ spin_unlock_irqrestore(&drvdata->spinlock, flags);
+ pm_runtime_put(drvdata->dev);
+out:
+ return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t seq_curr_state_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ int ret;
+ unsigned long val;
+ struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etm_config *config = &drvdata->config;
+
+ ret = kstrtoul(buf, 16, &val);
+ if (ret)
+ return ret;
+
+ if (val > ETM_SEQ_STATE_MAX_VAL)
+ return -EINVAL;
+
+ config->seq_curr_state = val;
+
+ return size;
+}
+static DEVICE_ATTR_RW(seq_curr_state);
+
+static ssize_t ctxid_idx_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ unsigned long val;
+ struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etm_config *config = &drvdata->config;
+
+ val = config->ctxid_idx;
+ return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t ctxid_idx_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ int ret;
+ unsigned long val;
+ struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etm_config *config = &drvdata->config;
+
+ ret = kstrtoul(buf, 16, &val);
+ if (ret)
+ return ret;
+
+ if (val >= drvdata->nr_ctxid_cmp)
+ return -EINVAL;
+
+ /*
+ * Use spinlock to ensure index doesn't change while it gets
+ * dereferenced multiple times within a spinlock block elsewhere.
+ */
+ spin_lock(&drvdata->spinlock);
+ config->ctxid_idx = val;
+ spin_unlock(&drvdata->spinlock);
+
+ return size;
+}
+static DEVICE_ATTR_RW(ctxid_idx);
+
+static ssize_t ctxid_pid_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ unsigned long val;
+ struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etm_config *config = &drvdata->config;
+
+ spin_lock(&drvdata->spinlock);
+ val = config->ctxid_vpid[config->ctxid_idx];
+ spin_unlock(&drvdata->spinlock);
+
+ return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t ctxid_pid_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ int ret;
+ unsigned long vpid, pid;
+ struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etm_config *config = &drvdata->config;
+
+ ret = kstrtoul(buf, 16, &vpid);
+ if (ret)
+ return ret;
+
+ pid = coresight_vpid_to_pid(vpid);
+
+ spin_lock(&drvdata->spinlock);
+ config->ctxid_pid[config->ctxid_idx] = pid;
+ config->ctxid_vpid[config->ctxid_idx] = vpid;
+ spin_unlock(&drvdata->spinlock);
+
+ return size;
+}
+static DEVICE_ATTR_RW(ctxid_pid);
+
+static ssize_t ctxid_mask_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ unsigned long val;
+ struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etm_config *config = &drvdata->config;
+
+ val = config->ctxid_mask;
+ return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t ctxid_mask_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ int ret;
+ unsigned long val;
+ struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etm_config *config = &drvdata->config;
+
+ ret = kstrtoul(buf, 16, &val);
+ if (ret)
+ return ret;
+
+ config->ctxid_mask = val;
+ return size;
+}
+static DEVICE_ATTR_RW(ctxid_mask);
+
+static ssize_t sync_freq_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ unsigned long val;
+ struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etm_config *config = &drvdata->config;
+
+ val = config->sync_freq;
+ return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t sync_freq_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ int ret;
+ unsigned long val;
+ struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etm_config *config = &drvdata->config;
+
+ ret = kstrtoul(buf, 16, &val);
+ if (ret)
+ return ret;
+
+ config->sync_freq = val & ETM_SYNC_MASK;
+ return size;
+}
+static DEVICE_ATTR_RW(sync_freq);
+
+static ssize_t timestamp_event_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ unsigned long val;
+ struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etm_config *config = &drvdata->config;
+
+ val = config->timestamp_event;
+ return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t timestamp_event_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ int ret;
+ unsigned long val;
+ struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etm_config *config = &drvdata->config;
+
+ ret = kstrtoul(buf, 16, &val);
+ if (ret)
+ return ret;
+
+ config->timestamp_event = val & ETM_EVENT_MASK;
+ return size;
+}
+static DEVICE_ATTR_RW(timestamp_event);
+
+static ssize_t cpu_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int val;
+ struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ val = drvdata->cpu;
+ return scnprintf(buf, PAGE_SIZE, "%d\n", val);
+
+}
+static DEVICE_ATTR_RO(cpu);
+
+static ssize_t traceid_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ unsigned long val;
+ struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ val = etm_get_trace_id(drvdata);
+
+ return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t traceid_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ int ret;
+ unsigned long val;
+ struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ ret = kstrtoul(buf, 16, &val);
+ if (ret)
+ return ret;
+
+ drvdata->traceid = val & ETM_TRACEID_MASK;
+ return size;
+}
+static DEVICE_ATTR_RW(traceid);
+
+static struct attribute *coresight_etm_attrs[] = {
+ &dev_attr_nr_addr_cmp.attr,
+ &dev_attr_nr_cntr.attr,
+ &dev_attr_nr_ctxid_cmp.attr,
+ &dev_attr_etmsr.attr,
+ &dev_attr_reset.attr,
+ &dev_attr_mode.attr,
+ &dev_attr_trigger_event.attr,
+ &dev_attr_enable_event.attr,
+ &dev_attr_fifofull_level.attr,
+ &dev_attr_addr_idx.attr,
+ &dev_attr_addr_single.attr,
+ &dev_attr_addr_range.attr,
+ &dev_attr_addr_start.attr,
+ &dev_attr_addr_stop.attr,
+ &dev_attr_addr_acctype.attr,
+ &dev_attr_cntr_idx.attr,
+ &dev_attr_cntr_rld_val.attr,
+ &dev_attr_cntr_event.attr,
+ &dev_attr_cntr_rld_event.attr,
+ &dev_attr_cntr_val.attr,
+ &dev_attr_seq_12_event.attr,
+ &dev_attr_seq_21_event.attr,
+ &dev_attr_seq_23_event.attr,
+ &dev_attr_seq_31_event.attr,
+ &dev_attr_seq_32_event.attr,
+ &dev_attr_seq_13_event.attr,
+ &dev_attr_seq_curr_state.attr,
+ &dev_attr_ctxid_idx.attr,
+ &dev_attr_ctxid_pid.attr,
+ &dev_attr_ctxid_mask.attr,
+ &dev_attr_sync_freq.attr,
+ &dev_attr_timestamp_event.attr,
+ &dev_attr_traceid.attr,
+ &dev_attr_cpu.attr,
+ NULL,
+};
+
+#define coresight_simple_func(name, offset) \
+static ssize_t name##_show(struct device *_dev, \
+ struct device_attribute *attr, char *buf) \
+{ \
+ struct etm_drvdata *drvdata = dev_get_drvdata(_dev->parent); \
+ return scnprintf(buf, PAGE_SIZE, "0x%x\n", \
+ readl_relaxed(drvdata->base + offset)); \
+} \
+DEVICE_ATTR_RO(name)
+
+coresight_simple_func(etmccr, ETMCCR);
+coresight_simple_func(etmccer, ETMCCER);
+coresight_simple_func(etmscr, ETMSCR);
+coresight_simple_func(etmidr, ETMIDR);
+coresight_simple_func(etmcr, ETMCR);
+coresight_simple_func(etmtraceidr, ETMTRACEIDR);
+coresight_simple_func(etmteevr, ETMTEEVR);
+coresight_simple_func(etmtssvr, ETMTSSCR);
+coresight_simple_func(etmtecr1, ETMTECR1);
+coresight_simple_func(etmtecr2, ETMTECR2);
+
+static struct attribute *coresight_etm_mgmt_attrs[] = {
+ &dev_attr_etmccr.attr,
+ &dev_attr_etmccer.attr,
+ &dev_attr_etmscr.attr,
+ &dev_attr_etmidr.attr,
+ &dev_attr_etmcr.attr,
+ &dev_attr_etmtraceidr.attr,
+ &dev_attr_etmteevr.attr,
+ &dev_attr_etmtssvr.attr,
+ &dev_attr_etmtecr1.attr,
+ &dev_attr_etmtecr2.attr,
+ NULL,
+};
+
+static const struct attribute_group coresight_etm_group = {
+ .attrs = coresight_etm_attrs,
+};
+
+static const struct attribute_group coresight_etm_mgmt_group = {
+ .attrs = coresight_etm_mgmt_attrs,
+ .name = "mgmt",
+};
+
+const struct attribute_group *coresight_etm_groups[] = {
+ &coresight_etm_group,
+ &coresight_etm_mgmt_group,
+ NULL,
+};
diff --git a/drivers/hwtracing/coresight/coresight-etm3x.c b/drivers/hwtracing/coresight/coresight-etm3x.c
index d630b7ece735..d83ab82672e4 100644
--- a/drivers/hwtracing/coresight/coresight-etm3x.c
+++ b/drivers/hwtracing/coresight/coresight-etm3x.c
@@ -1,5 +1,7 @@
/* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
*
+ * Description: CoreSight Program Flow Trace driver
+ *
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
@@ -11,7 +13,7 @@
*/
#include <linux/kernel.h>
-#include <linux/module.h>
+#include <linux/moduleparam.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/device.h>
@@ -27,14 +29,21 @@
#include <linux/cpu.h>
#include <linux/of.h>
#include <linux/coresight.h>
+#include <linux/coresight-pmu.h>
#include <linux/amba/bus.h>
#include <linux/seq_file.h>
#include <linux/uaccess.h>
#include <linux/clk.h>
+#include <linux/perf_event.h>
#include <asm/sections.h>
#include "coresight-etm.h"
+#include "coresight-etm-perf.h"
+/*
+ * Not really modular but using module_param is the easiest way to
+ * remain consistent with existing use cases for now.
+ */
static int boot_enable;
module_param_named(boot_enable, boot_enable, int, S_IRUGO);
@@ -42,45 +51,16 @@ module_param_named(boot_enable, boot_enable, int, S_IRUGO);
static int etm_count;
static struct etm_drvdata *etmdrvdata[NR_CPUS];
-static inline void etm_writel(struct etm_drvdata *drvdata,
- u32 val, u32 off)
-{
- if (drvdata->use_cp14) {
- if (etm_writel_cp14(off, val)) {
- dev_err(drvdata->dev,
- "invalid CP14 access to ETM reg: %#x", off);
- }
- } else {
- writel_relaxed(val, drvdata->base + off);
- }
-}
-
-static inline unsigned int etm_readl(struct etm_drvdata *drvdata, u32 off)
-{
- u32 val;
-
- if (drvdata->use_cp14) {
- if (etm_readl_cp14(off, &val)) {
- dev_err(drvdata->dev,
- "invalid CP14 access to ETM reg: %#x", off);
- }
- } else {
- val = readl_relaxed(drvdata->base + off);
- }
-
- return val;
-}
-
/*
* Memory mapped writes to clear os lock are not supported on some processors
* and OS lock must be unlocked before any memory mapped access on such
* processors, otherwise memory mapped reads/writes will be invalid.
*/
-static void etm_os_unlock(void *info)
+static void etm_os_unlock(struct etm_drvdata *drvdata)
{
- struct etm_drvdata *drvdata = (struct etm_drvdata *)info;
/* Writing any value to ETMOSLAR unlocks the trace registers */
etm_writel(drvdata, 0x0, ETMOSLAR);
+ drvdata->os_unlock = true;
isb();
}
@@ -215,36 +195,156 @@ static void etm_clr_prog(struct etm_drvdata *drvdata)
}
}
-static void etm_set_default(struct etm_drvdata *drvdata)
+void etm_set_default(struct etm_config *config)
{
int i;
- drvdata->trigger_event = ETM_DEFAULT_EVENT_VAL;
- drvdata->enable_event = ETM_HARD_WIRE_RES_A;
+ if (WARN_ON_ONCE(!config))
+ return;
- drvdata->seq_12_event = ETM_DEFAULT_EVENT_VAL;
- drvdata->seq_21_event = ETM_DEFAULT_EVENT_VAL;
- drvdata->seq_23_event = ETM_DEFAULT_EVENT_VAL;
- drvdata->seq_31_event = ETM_DEFAULT_EVENT_VAL;
- drvdata->seq_32_event = ETM_DEFAULT_EVENT_VAL;
- drvdata->seq_13_event = ETM_DEFAULT_EVENT_VAL;
- drvdata->timestamp_event = ETM_DEFAULT_EVENT_VAL;
+ /*
+ * Taken verbatim from the TRM:
+ *
+ * To trace all memory:
+ * set bit [24] in register 0x009, the ETMTECR1, to 1
+ * set all other bits in register 0x009, the ETMTECR1, to 0
+ * set all bits in register 0x007, the ETMTECR2, to 0
+ * set register 0x008, the ETMTEEVR, to 0x6F (TRUE).
+ */
+ config->enable_ctrl1 = BIT(24);
+ config->enable_ctrl2 = 0x0;
+ config->enable_event = ETM_HARD_WIRE_RES_A;
- for (i = 0; i < drvdata->nr_cntr; i++) {
- drvdata->cntr_rld_val[i] = 0x0;
- drvdata->cntr_event[i] = ETM_DEFAULT_EVENT_VAL;
- drvdata->cntr_rld_event[i] = ETM_DEFAULT_EVENT_VAL;
- drvdata->cntr_val[i] = 0x0;
+ config->trigger_event = ETM_DEFAULT_EVENT_VAL;
+ config->enable_event = ETM_HARD_WIRE_RES_A;
+
+ config->seq_12_event = ETM_DEFAULT_EVENT_VAL;
+ config->seq_21_event = ETM_DEFAULT_EVENT_VAL;
+ config->seq_23_event = ETM_DEFAULT_EVENT_VAL;
+ config->seq_31_event = ETM_DEFAULT_EVENT_VAL;
+ config->seq_32_event = ETM_DEFAULT_EVENT_VAL;
+ config->seq_13_event = ETM_DEFAULT_EVENT_VAL;
+ config->timestamp_event = ETM_DEFAULT_EVENT_VAL;
+
+ for (i = 0; i < ETM_MAX_CNTR; i++) {
+ config->cntr_rld_val[i] = 0x0;
+ config->cntr_event[i] = ETM_DEFAULT_EVENT_VAL;
+ config->cntr_rld_event[i] = ETM_DEFAULT_EVENT_VAL;
+ config->cntr_val[i] = 0x0;
}
- drvdata->seq_curr_state = 0x0;
- drvdata->ctxid_idx = 0x0;
- for (i = 0; i < drvdata->nr_ctxid_cmp; i++) {
- drvdata->ctxid_pid[i] = 0x0;
- drvdata->ctxid_vpid[i] = 0x0;
+ config->seq_curr_state = 0x0;
+ config->ctxid_idx = 0x0;
+ for (i = 0; i < ETM_MAX_CTXID_CMP; i++) {
+ config->ctxid_pid[i] = 0x0;
+ config->ctxid_vpid[i] = 0x0;
}
- drvdata->ctxid_mask = 0x0;
+ config->ctxid_mask = 0x0;
+}
+
+void etm_config_trace_mode(struct etm_config *config)
+{
+ u32 flags, mode;
+
+ mode = config->mode;
+
+ mode &= (ETM_MODE_EXCL_KERN | ETM_MODE_EXCL_USER);
+
+ /* excluding kernel AND user space doesn't make sense */
+ if (mode == (ETM_MODE_EXCL_KERN | ETM_MODE_EXCL_USER))
+ return;
+
+ /* nothing to do if neither flags are set */
+ if (!(mode & ETM_MODE_EXCL_KERN) && !(mode & ETM_MODE_EXCL_USER))
+ return;
+
+ flags = (1 << 0 | /* instruction execute */
+ 3 << 3 | /* ARM instruction */
+ 0 << 5 | /* No data value comparison */
+ 0 << 7 | /* No exact mach */
+ 0 << 8); /* Ignore context ID */
+
+ /* No need to worry about single address comparators. */
+ config->enable_ctrl2 = 0x0;
+
+ /* Bit 0 is address range comparator 1 */
+ config->enable_ctrl1 = ETMTECR1_ADDR_COMP_1;
+
+ /*
+ * On ETMv3.5:
+ * ETMACTRn[13,11] == Non-secure state comparison control
+ * ETMACTRn[12,10] == Secure state comparison control
+ *
+ * b00 == Match in all modes in this state
+ * b01 == Do not match in any more in this state
+ * b10 == Match in all modes excepts user mode in this state
+ * b11 == Match only in user mode in this state
+ */
+
+ /* Tracing in secure mode is not supported at this time */
+ flags |= (0 << 12 | 1 << 10);
+
+ if (mode & ETM_MODE_EXCL_USER) {
+ /* exclude user, match all modes except user mode */
+ flags |= (1 << 13 | 0 << 11);
+ } else {
+ /* exclude kernel, match only in user mode */
+ flags |= (1 << 13 | 1 << 11);
+ }
+
+ /*
+ * The ETMEEVR register is already set to "hard wire A". As such
+ * all there is to do is setup an address comparator that spans
+ * the entire address range and configure the state and mode bits.
+ */
+ config->addr_val[0] = (u32) 0x0;
+ config->addr_val[1] = (u32) ~0x0;
+ config->addr_acctype[0] = flags;
+ config->addr_acctype[1] = flags;
+ config->addr_type[0] = ETM_ADDR_TYPE_RANGE;
+ config->addr_type[1] = ETM_ADDR_TYPE_RANGE;
+}
+
+#define ETM3X_SUPPORTED_OPTIONS (ETMCR_CYC_ACC | ETMCR_TIMESTAMP_EN)
+
+static int etm_parse_event_config(struct etm_drvdata *drvdata,
+ struct perf_event_attr *attr)
+{
+ struct etm_config *config = &drvdata->config;
+
+ if (!attr)
+ return -EINVAL;
+
+ /* Clear configuration from previous run */
+ memset(config, 0, sizeof(struct etm_config));
+
+ if (attr->exclude_kernel)
+ config->mode = ETM_MODE_EXCL_KERN;
+
+ if (attr->exclude_user)
+ config->mode = ETM_MODE_EXCL_USER;
+
+ /* Always start from the default config */
+ etm_set_default(config);
+
+ /*
+ * By default the tracers are configured to trace the whole address
+ * range. Narrow the field only if requested by user space.
+ */
+ if (config->mode)
+ etm_config_trace_mode(config);
+
+ /*
+ * At this time only cycle accurate and timestamp options are
+ * available.
+ */
+ if (attr->config & ~ETM3X_SUPPORTED_OPTIONS)
+ return -EINVAL;
+
+ config->ctrl = attr->config;
+
+ return 0;
}
static void etm_enable_hw(void *info)
@@ -252,6 +352,7 @@ static void etm_enable_hw(void *info)
int i;
u32 etmcr;
struct etm_drvdata *drvdata = info;
+ struct etm_config *config = &drvdata->config;
CS_UNLOCK(drvdata->base);
@@ -265,65 +366,74 @@ static void etm_enable_hw(void *info)
etm_set_prog(drvdata);
etmcr = etm_readl(drvdata, ETMCR);
- etmcr &= (ETMCR_PWD_DWN | ETMCR_ETM_PRG);
+ /* Clear setting from a previous run if need be */
+ etmcr &= ~ETM3X_SUPPORTED_OPTIONS;
etmcr |= drvdata->port_size;
- etm_writel(drvdata, drvdata->ctrl | etmcr, ETMCR);
- etm_writel(drvdata, drvdata->trigger_event, ETMTRIGGER);
- etm_writel(drvdata, drvdata->startstop_ctrl, ETMTSSCR);
- etm_writel(drvdata, drvdata->enable_event, ETMTEEVR);
- etm_writel(drvdata, drvdata->enable_ctrl1, ETMTECR1);
- etm_writel(drvdata, drvdata->fifofull_level, ETMFFLR);
+ etmcr |= ETMCR_ETM_EN;
+ etm_writel(drvdata, config->ctrl | etmcr, ETMCR);
+ etm_writel(drvdata, config->trigger_event, ETMTRIGGER);
+ etm_writel(drvdata, config->startstop_ctrl, ETMTSSCR);
+ etm_writel(drvdata, config->enable_event, ETMTEEVR);
+ etm_writel(drvdata, config->enable_ctrl1, ETMTECR1);
+ etm_writel(drvdata, config->fifofull_level, ETMFFLR);
for (i = 0; i < drvdata->nr_addr_cmp; i++) {
- etm_writel(drvdata, drvdata->addr_val[i], ETMACVRn(i));
- etm_writel(drvdata, drvdata->addr_acctype[i], ETMACTRn(i));
+ etm_writel(drvdata, config->addr_val[i], ETMACVRn(i));
+ etm_writel(drvdata, config->addr_acctype[i], ETMACTRn(i));
}
for (i = 0; i < drvdata->nr_cntr; i++) {
- etm_writel(drvdata, drvdata->cntr_rld_val[i], ETMCNTRLDVRn(i));
- etm_writel(drvdata, drvdata->cntr_event[i], ETMCNTENRn(i));
- etm_writel(drvdata, drvdata->cntr_rld_event[i],
+ etm_writel(drvdata, config->cntr_rld_val[i], ETMCNTRLDVRn(i));
+ etm_writel(drvdata, config->cntr_event[i], ETMCNTENRn(i));
+ etm_writel(drvdata, config->cntr_rld_event[i],
ETMCNTRLDEVRn(i));
- etm_writel(drvdata, drvdata->cntr_val[i], ETMCNTVRn(i));
- }
- etm_writel(drvdata, drvdata->seq_12_event, ETMSQ12EVR);
- etm_writel(drvdata, drvdata->seq_21_event, ETMSQ21EVR);
- etm_writel(drvdata, drvdata->seq_23_event, ETMSQ23EVR);
- etm_writel(drvdata, drvdata->seq_31_event, ETMSQ31EVR);
- etm_writel(drvdata, drvdata->seq_32_event, ETMSQ32EVR);
- etm_writel(drvdata, drvdata->seq_13_event, ETMSQ13EVR);
- etm_writel(drvdata, drvdata->seq_curr_state, ETMSQR);
+ etm_writel(drvdata, config->cntr_val[i], ETMCNTVRn(i));
+ }
+ etm_writel(drvdata, config->seq_12_event, ETMSQ12EVR);
+ etm_writel(drvdata, config->seq_21_event, ETMSQ21EVR);
+ etm_writel(drvdata, config->seq_23_event, ETMSQ23EVR);
+ etm_writel(drvdata, config->seq_31_event, ETMSQ31EVR);
+ etm_writel(drvdata, config->seq_32_event, ETMSQ32EVR);
+ etm_writel(drvdata, config->seq_13_event, ETMSQ13EVR);
+ etm_writel(drvdata, config->seq_curr_state, ETMSQR);
for (i = 0; i < drvdata->nr_ext_out; i++)
etm_writel(drvdata, ETM_DEFAULT_EVENT_VAL, ETMEXTOUTEVRn(i));
for (i = 0; i < drvdata->nr_ctxid_cmp; i++)
- etm_writel(drvdata, drvdata->ctxid_pid[i], ETMCIDCVRn(i));
- etm_writel(drvdata, drvdata->ctxid_mask, ETMCIDCMR);
- etm_writel(drvdata, drvdata->sync_freq, ETMSYNCFR);
+ etm_writel(drvdata, config->ctxid_pid[i], ETMCIDCVRn(i));
+ etm_writel(drvdata, config->ctxid_mask, ETMCIDCMR);
+ etm_writel(drvdata, config->sync_freq, ETMSYNCFR);
/* No external input selected */
etm_writel(drvdata, 0x0, ETMEXTINSELR);
- etm_writel(drvdata, drvdata->timestamp_event, ETMTSEVR);
+ etm_writel(drvdata, config->timestamp_event, ETMTSEVR);
/* No auxiliary control selected */
etm_writel(drvdata, 0x0, ETMAUXCR);
etm_writel(drvdata, drvdata->traceid, ETMTRACEIDR);
/* No VMID comparator value selected */
etm_writel(drvdata, 0x0, ETMVMIDCVR);
- /* Ensures trace output is enabled from this ETM */
- etm_writel(drvdata, drvdata->ctrl | ETMCR_ETM_EN | etmcr, ETMCR);
-
etm_clr_prog(drvdata);
CS_LOCK(drvdata->base);
dev_dbg(drvdata->dev, "cpu: %d enable smp call done\n", drvdata->cpu);
}
-static int etm_trace_id(struct coresight_device *csdev)
+static int etm_cpu_id(struct coresight_device *csdev)
{
struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+
+ return drvdata->cpu;
+}
+
+int etm_get_trace_id(struct etm_drvdata *drvdata)
+{
unsigned long flags;
int trace_id = -1;
- if (!drvdata->enable)
+ if (!drvdata)
+ goto out;
+
+ if (!local_read(&drvdata->mode))
return drvdata->traceid;
- pm_runtime_get_sync(csdev->dev.parent);
+
+ pm_runtime_get_sync(drvdata->dev);
spin_lock_irqsave(&drvdata->spinlock, flags);
@@ -332,17 +442,41 @@ static int etm_trace_id(struct coresight_device *csdev)
CS_LOCK(drvdata->base);
spin_unlock_irqrestore(&drvdata->spinlock, flags);
- pm_runtime_put(csdev->dev.parent);
+ pm_runtime_put(drvdata->dev);
+out:
return trace_id;
+
+}
+
+static int etm_trace_id(struct coresight_device *csdev)
+{
+ struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+
+ return etm_get_trace_id(drvdata);
}
-static int etm_enable(struct coresight_device *csdev)
+static int etm_enable_perf(struct coresight_device *csdev,
+ struct perf_event_attr *attr)
+{
+ struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+
+ if (WARN_ON_ONCE(drvdata->cpu != smp_processor_id()))
+ return -EINVAL;
+
+ /* Configure the tracer based on the session's specifics */
+ etm_parse_event_config(drvdata, attr);
+ /* And enable it */
+ etm_enable_hw(drvdata);
+
+ return 0;
+}
+
+static int etm_enable_sysfs(struct coresight_device *csdev)
{
struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
int ret;
- pm_runtime_get_sync(csdev->dev.parent);
spin_lock(&drvdata->spinlock);
/*
@@ -357,16 +491,45 @@ static int etm_enable(struct coresight_device *csdev)
goto err;
}
- drvdata->enable = true;
drvdata->sticky_enable = true;
-
spin_unlock(&drvdata->spinlock);
dev_info(drvdata->dev, "ETM tracing enabled\n");
return 0;
+
err:
spin_unlock(&drvdata->spinlock);
- pm_runtime_put(csdev->dev.parent);
+ return ret;
+}
+
+static int etm_enable(struct coresight_device *csdev,
+ struct perf_event_attr *attr, u32 mode)
+{
+ int ret;
+ u32 val;
+ struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+
+ val = local_cmpxchg(&drvdata->mode, CS_MODE_DISABLED, mode);
+
+ /* Someone is already using the tracer */
+ if (val)
+ return -EBUSY;
+
+ switch (mode) {
+ case CS_MODE_SYSFS:
+ ret = etm_enable_sysfs(csdev);
+ break;
+ case CS_MODE_PERF:
+ ret = etm_enable_perf(csdev, attr);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ /* The tracer didn't start */
+ if (ret)
+ local_set(&drvdata->mode, CS_MODE_DISABLED);
+
return ret;
}
@@ -374,18 +537,16 @@ static void etm_disable_hw(void *info)
{
int i;
struct etm_drvdata *drvdata = info;
+ struct etm_config *config = &drvdata->config;
CS_UNLOCK(drvdata->base);
etm_set_prog(drvdata);
- /* Program trace enable to low by using always false event */
- etm_writel(drvdata, ETM_HARD_WIRE_RES_A | ETM_EVENT_NOT_A, ETMTEEVR);
-
/* Read back sequencer and counters for post trace analysis */
- drvdata->seq_curr_state = (etm_readl(drvdata, ETMSQR) & ETM_SQR_MASK);
+ config->seq_curr_state = (etm_readl(drvdata, ETMSQR) & ETM_SQR_MASK);
for (i = 0; i < drvdata->nr_cntr; i++)
- drvdata->cntr_val[i] = etm_readl(drvdata, ETMCNTVRn(i));
+ config->cntr_val[i] = etm_readl(drvdata, ETMCNTVRn(i));
etm_set_pwrdwn(drvdata);
CS_LOCK(drvdata->base);
@@ -393,7 +554,28 @@ static void etm_disable_hw(void *info)
dev_dbg(drvdata->dev, "cpu: %d disable smp call done\n", drvdata->cpu);
}
-static void etm_disable(struct coresight_device *csdev)
+static void etm_disable_perf(struct coresight_device *csdev)
+{
+ struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+
+ if (WARN_ON_ONCE(drvdata->cpu != smp_processor_id()))
+ return;
+
+ CS_UNLOCK(drvdata->base);
+
+ /* Setting the prog bit disables tracing immediately */
+ etm_set_prog(drvdata);
+
+ /*
+ * There is no way to know when the tracer will be used again so
+ * power down the tracer.
+ */
+ etm_set_pwrdwn(drvdata);
+
+ CS_LOCK(drvdata->base);
+}
+
+static void etm_disable_sysfs(struct coresight_device *csdev)
{
struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
@@ -411,1235 +593,52 @@ static void etm_disable(struct coresight_device *csdev)
* ensures that register writes occur when cpu is powered.
*/
smp_call_function_single(drvdata->cpu, etm_disable_hw, drvdata, 1);
- drvdata->enable = false;
spin_unlock(&drvdata->spinlock);
put_online_cpus();
- pm_runtime_put(csdev->dev.parent);
dev_info(drvdata->dev, "ETM tracing disabled\n");
}
-static const struct coresight_ops_source etm_source_ops = {
- .trace_id = etm_trace_id,
- .enable = etm_enable,
- .disable = etm_disable,
-};
-
-static const struct coresight_ops etm_cs_ops = {
- .source_ops = &etm_source_ops,
-};
-
-static ssize_t nr_addr_cmp_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- unsigned long val;
- struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- val = drvdata->nr_addr_cmp;
- return sprintf(buf, "%#lx\n", val);
-}
-static DEVICE_ATTR_RO(nr_addr_cmp);
-
-static ssize_t nr_cntr_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{ unsigned long val;
- struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- val = drvdata->nr_cntr;
- return sprintf(buf, "%#lx\n", val);
-}
-static DEVICE_ATTR_RO(nr_cntr);
-
-static ssize_t nr_ctxid_cmp_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- unsigned long val;
- struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- val = drvdata->nr_ctxid_cmp;
- return sprintf(buf, "%#lx\n", val);
-}
-static DEVICE_ATTR_RO(nr_ctxid_cmp);
-
-static ssize_t etmsr_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- unsigned long flags, val;
- struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- pm_runtime_get_sync(drvdata->dev);
- spin_lock_irqsave(&drvdata->spinlock, flags);
- CS_UNLOCK(drvdata->base);
-
- val = etm_readl(drvdata, ETMSR);
-
- CS_LOCK(drvdata->base);
- spin_unlock_irqrestore(&drvdata->spinlock, flags);
- pm_runtime_put(drvdata->dev);
-
- return sprintf(buf, "%#lx\n", val);
-}
-static DEVICE_ATTR_RO(etmsr);
-
-static ssize_t reset_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t size)
-{
- int i, ret;
- unsigned long val;
- struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- ret = kstrtoul(buf, 16, &val);
- if (ret)
- return ret;
-
- if (val) {
- spin_lock(&drvdata->spinlock);
- drvdata->mode = ETM_MODE_EXCLUDE;
- drvdata->ctrl = 0x0;
- drvdata->trigger_event = ETM_DEFAULT_EVENT_VAL;
- drvdata->startstop_ctrl = 0x0;
- drvdata->addr_idx = 0x0;
- for (i = 0; i < drvdata->nr_addr_cmp; i++) {
- drvdata->addr_val[i] = 0x0;
- drvdata->addr_acctype[i] = 0x0;
- drvdata->addr_type[i] = ETM_ADDR_TYPE_NONE;
- }
- drvdata->cntr_idx = 0x0;
-
- etm_set_default(drvdata);
- spin_unlock(&drvdata->spinlock);
- }
-
- return size;
-}
-static DEVICE_ATTR_WO(reset);
-
-static ssize_t mode_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- unsigned long val;
- struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- val = drvdata->mode;
- return sprintf(buf, "%#lx\n", val);
-}
-
-static ssize_t mode_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t size)
-{
- int ret;
- unsigned long val;
- struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- ret = kstrtoul(buf, 16, &val);
- if (ret)
- return ret;
-
- spin_lock(&drvdata->spinlock);
- drvdata->mode = val & ETM_MODE_ALL;
-
- if (drvdata->mode & ETM_MODE_EXCLUDE)
- drvdata->enable_ctrl1 |= ETMTECR1_INC_EXC;
- else
- drvdata->enable_ctrl1 &= ~ETMTECR1_INC_EXC;
-
- if (drvdata->mode & ETM_MODE_CYCACC)
- drvdata->ctrl |= ETMCR_CYC_ACC;
- else
- drvdata->ctrl &= ~ETMCR_CYC_ACC;
-
- if (drvdata->mode & ETM_MODE_STALL) {
- if (!(drvdata->etmccr & ETMCCR_FIFOFULL)) {
- dev_warn(drvdata->dev, "stall mode not supported\n");
- ret = -EINVAL;
- goto err_unlock;
- }
- drvdata->ctrl |= ETMCR_STALL_MODE;
- } else
- drvdata->ctrl &= ~ETMCR_STALL_MODE;
-
- if (drvdata->mode & ETM_MODE_TIMESTAMP) {
- if (!(drvdata->etmccer & ETMCCER_TIMESTAMP)) {
- dev_warn(drvdata->dev, "timestamp not supported\n");
- ret = -EINVAL;
- goto err_unlock;
- }
- drvdata->ctrl |= ETMCR_TIMESTAMP_EN;
- } else
- drvdata->ctrl &= ~ETMCR_TIMESTAMP_EN;
-
- if (drvdata->mode & ETM_MODE_CTXID)
- drvdata->ctrl |= ETMCR_CTXID_SIZE;
- else
- drvdata->ctrl &= ~ETMCR_CTXID_SIZE;
- spin_unlock(&drvdata->spinlock);
-
- return size;
-
-err_unlock:
- spin_unlock(&drvdata->spinlock);
- return ret;
-}
-static DEVICE_ATTR_RW(mode);
-
-static ssize_t trigger_event_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- unsigned long val;
- struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- val = drvdata->trigger_event;
- return sprintf(buf, "%#lx\n", val);
-}
-
-static ssize_t trigger_event_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t size)
-{
- int ret;
- unsigned long val;
- struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- ret = kstrtoul(buf, 16, &val);
- if (ret)
- return ret;
-
- drvdata->trigger_event = val & ETM_EVENT_MASK;
-
- return size;
-}
-static DEVICE_ATTR_RW(trigger_event);
-
-static ssize_t enable_event_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- unsigned long val;
- struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- val = drvdata->enable_event;
- return sprintf(buf, "%#lx\n", val);
-}
-
-static ssize_t enable_event_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t size)
-{
- int ret;
- unsigned long val;
- struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- ret = kstrtoul(buf, 16, &val);
- if (ret)
- return ret;
-
- drvdata->enable_event = val & ETM_EVENT_MASK;
-
- return size;
-}
-static DEVICE_ATTR_RW(enable_event);
-
-static ssize_t fifofull_level_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- unsigned long val;
- struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- val = drvdata->fifofull_level;
- return sprintf(buf, "%#lx\n", val);
-}
-
-static ssize_t fifofull_level_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t size)
-{
- int ret;
- unsigned long val;
- struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- ret = kstrtoul(buf, 16, &val);
- if (ret)
- return ret;
-
- drvdata->fifofull_level = val;
-
- return size;
-}
-static DEVICE_ATTR_RW(fifofull_level);
-
-static ssize_t addr_idx_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- unsigned long val;
- struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- val = drvdata->addr_idx;
- return sprintf(buf, "%#lx\n", val);
-}
-
-static ssize_t addr_idx_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t size)
-{
- int ret;
- unsigned long val;
- struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- ret = kstrtoul(buf, 16, &val);
- if (ret)
- return ret;
-
- if (val >= drvdata->nr_addr_cmp)
- return -EINVAL;
-
- /*
- * Use spinlock to ensure index doesn't change while it gets
- * dereferenced multiple times within a spinlock block elsewhere.
- */
- spin_lock(&drvdata->spinlock);
- drvdata->addr_idx = val;
- spin_unlock(&drvdata->spinlock);
-
- return size;
-}
-static DEVICE_ATTR_RW(addr_idx);
-
-static ssize_t addr_single_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- u8 idx;
- unsigned long val;
- struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- spin_lock(&drvdata->spinlock);
- idx = drvdata->addr_idx;
- if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
- drvdata->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) {
- spin_unlock(&drvdata->spinlock);
- return -EINVAL;
- }
-
- val = drvdata->addr_val[idx];
- spin_unlock(&drvdata->spinlock);
-
- return sprintf(buf, "%#lx\n", val);
-}
-
-static ssize_t addr_single_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t size)
-{
- u8 idx;
- int ret;
- unsigned long val;
- struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- ret = kstrtoul(buf, 16, &val);
- if (ret)
- return ret;
-
- spin_lock(&drvdata->spinlock);
- idx = drvdata->addr_idx;
- if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
- drvdata->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) {
- spin_unlock(&drvdata->spinlock);
- return -EINVAL;
- }
-
- drvdata->addr_val[idx] = val;
- drvdata->addr_type[idx] = ETM_ADDR_TYPE_SINGLE;
- spin_unlock(&drvdata->spinlock);
-
- return size;
-}
-static DEVICE_ATTR_RW(addr_single);
-
-static ssize_t addr_range_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- u8 idx;
- unsigned long val1, val2;
- struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- spin_lock(&drvdata->spinlock);
- idx = drvdata->addr_idx;
- if (idx % 2 != 0) {
- spin_unlock(&drvdata->spinlock);
- return -EPERM;
- }
- if (!((drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE &&
- drvdata->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) ||
- (drvdata->addr_type[idx] == ETM_ADDR_TYPE_RANGE &&
- drvdata->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) {
- spin_unlock(&drvdata->spinlock);
- return -EPERM;
- }
-
- val1 = drvdata->addr_val[idx];
- val2 = drvdata->addr_val[idx + 1];
- spin_unlock(&drvdata->spinlock);
-
- return sprintf(buf, "%#lx %#lx\n", val1, val2);
-}
-
-static ssize_t addr_range_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t size)
-{
- u8 idx;
- unsigned long val1, val2;
- struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- if (sscanf(buf, "%lx %lx", &val1, &val2) != 2)
- return -EINVAL;
- /* Lower address comparator cannot have a higher address value */
- if (val1 > val2)
- return -EINVAL;
-
- spin_lock(&drvdata->spinlock);
- idx = drvdata->addr_idx;
- if (idx % 2 != 0) {
- spin_unlock(&drvdata->spinlock);
- return -EPERM;
- }
- if (!((drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE &&
- drvdata->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) ||
- (drvdata->addr_type[idx] == ETM_ADDR_TYPE_RANGE &&
- drvdata->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) {
- spin_unlock(&drvdata->spinlock);
- return -EPERM;
- }
-
- drvdata->addr_val[idx] = val1;
- drvdata->addr_type[idx] = ETM_ADDR_TYPE_RANGE;
- drvdata->addr_val[idx + 1] = val2;
- drvdata->addr_type[idx + 1] = ETM_ADDR_TYPE_RANGE;
- drvdata->enable_ctrl1 |= (1 << (idx/2));
- spin_unlock(&drvdata->spinlock);
-
- return size;
-}
-static DEVICE_ATTR_RW(addr_range);
-
-static ssize_t addr_start_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- u8 idx;
- unsigned long val;
- struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- spin_lock(&drvdata->spinlock);
- idx = drvdata->addr_idx;
- if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
- drvdata->addr_type[idx] == ETM_ADDR_TYPE_START)) {
- spin_unlock(&drvdata->spinlock);
- return -EPERM;
- }
-
- val = drvdata->addr_val[idx];
- spin_unlock(&drvdata->spinlock);
-
- return sprintf(buf, "%#lx\n", val);
-}
-
-static ssize_t addr_start_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t size)
-{
- u8 idx;
- int ret;
- unsigned long val;
- struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- ret = kstrtoul(buf, 16, &val);
- if (ret)
- return ret;
-
- spin_lock(&drvdata->spinlock);
- idx = drvdata->addr_idx;
- if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
- drvdata->addr_type[idx] == ETM_ADDR_TYPE_START)) {
- spin_unlock(&drvdata->spinlock);
- return -EPERM;
- }
-
- drvdata->addr_val[idx] = val;
- drvdata->addr_type[idx] = ETM_ADDR_TYPE_START;
- drvdata->startstop_ctrl |= (1 << idx);
- drvdata->enable_ctrl1 |= BIT(25);
- spin_unlock(&drvdata->spinlock);
-
- return size;
-}
-static DEVICE_ATTR_RW(addr_start);
-
-static ssize_t addr_stop_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- u8 idx;
- unsigned long val;
- struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- spin_lock(&drvdata->spinlock);
- idx = drvdata->addr_idx;
- if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
- drvdata->addr_type[idx] == ETM_ADDR_TYPE_STOP)) {
- spin_unlock(&drvdata->spinlock);
- return -EPERM;
- }
-
- val = drvdata->addr_val[idx];
- spin_unlock(&drvdata->spinlock);
-
- return sprintf(buf, "%#lx\n", val);
-}
-
-static ssize_t addr_stop_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t size)
-{
- u8 idx;
- int ret;
- unsigned long val;
- struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- ret = kstrtoul(buf, 16, &val);
- if (ret)
- return ret;
-
- spin_lock(&drvdata->spinlock);
- idx = drvdata->addr_idx;
- if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
- drvdata->addr_type[idx] == ETM_ADDR_TYPE_STOP)) {
- spin_unlock(&drvdata->spinlock);
- return -EPERM;
- }
-
- drvdata->addr_val[idx] = val;
- drvdata->addr_type[idx] = ETM_ADDR_TYPE_STOP;
- drvdata->startstop_ctrl |= (1 << (idx + 16));
- drvdata->enable_ctrl1 |= ETMTECR1_START_STOP;
- spin_unlock(&drvdata->spinlock);
-
- return size;
-}
-static DEVICE_ATTR_RW(addr_stop);
-
-static ssize_t addr_acctype_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- unsigned long val;
- struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- spin_lock(&drvdata->spinlock);
- val = drvdata->addr_acctype[drvdata->addr_idx];
- spin_unlock(&drvdata->spinlock);
-
- return sprintf(buf, "%#lx\n", val);
-}
-
-static ssize_t addr_acctype_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t size)
-{
- int ret;
- unsigned long val;
- struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- ret = kstrtoul(buf, 16, &val);
- if (ret)
- return ret;
-
- spin_lock(&drvdata->spinlock);
- drvdata->addr_acctype[drvdata->addr_idx] = val;
- spin_unlock(&drvdata->spinlock);
-
- return size;
-}
-static DEVICE_ATTR_RW(addr_acctype);
-
-static ssize_t cntr_idx_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- unsigned long val;
- struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- val = drvdata->cntr_idx;
- return sprintf(buf, "%#lx\n", val);
-}
-
-static ssize_t cntr_idx_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t size)
-{
- int ret;
- unsigned long val;
- struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- ret = kstrtoul(buf, 16, &val);
- if (ret)
- return ret;
-
- if (val >= drvdata->nr_cntr)
- return -EINVAL;
- /*
- * Use spinlock to ensure index doesn't change while it gets
- * dereferenced multiple times within a spinlock block elsewhere.
- */
- spin_lock(&drvdata->spinlock);
- drvdata->cntr_idx = val;
- spin_unlock(&drvdata->spinlock);
-
- return size;
-}
-static DEVICE_ATTR_RW(cntr_idx);
-
-static ssize_t cntr_rld_val_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- unsigned long val;
- struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- spin_lock(&drvdata->spinlock);
- val = drvdata->cntr_rld_val[drvdata->cntr_idx];
- spin_unlock(&drvdata->spinlock);
-
- return sprintf(buf, "%#lx\n", val);
-}
-
-static ssize_t cntr_rld_val_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t size)
-{
- int ret;
- unsigned long val;
- struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- ret = kstrtoul(buf, 16, &val);
- if (ret)
- return ret;
-
- spin_lock(&drvdata->spinlock);
- drvdata->cntr_rld_val[drvdata->cntr_idx] = val;
- spin_unlock(&drvdata->spinlock);
-
- return size;
-}
-static DEVICE_ATTR_RW(cntr_rld_val);
-
-static ssize_t cntr_event_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- unsigned long val;
- struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- spin_lock(&drvdata->spinlock);
- val = drvdata->cntr_event[drvdata->cntr_idx];
- spin_unlock(&drvdata->spinlock);
-
- return sprintf(buf, "%#lx\n", val);
-}
-
-static ssize_t cntr_event_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t size)
-{
- int ret;
- unsigned long val;
- struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- ret = kstrtoul(buf, 16, &val);
- if (ret)
- return ret;
-
- spin_lock(&drvdata->spinlock);
- drvdata->cntr_event[drvdata->cntr_idx] = val & ETM_EVENT_MASK;
- spin_unlock(&drvdata->spinlock);
-
- return size;
-}
-static DEVICE_ATTR_RW(cntr_event);
-
-static ssize_t cntr_rld_event_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- unsigned long val;
- struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- spin_lock(&drvdata->spinlock);
- val = drvdata->cntr_rld_event[drvdata->cntr_idx];
- spin_unlock(&drvdata->spinlock);
-
- return sprintf(buf, "%#lx\n", val);
-}
-
-static ssize_t cntr_rld_event_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t size)
-{
- int ret;
- unsigned long val;
- struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- ret = kstrtoul(buf, 16, &val);
- if (ret)
- return ret;
-
- spin_lock(&drvdata->spinlock);
- drvdata->cntr_rld_event[drvdata->cntr_idx] = val & ETM_EVENT_MASK;
- spin_unlock(&drvdata->spinlock);
-
- return size;
-}
-static DEVICE_ATTR_RW(cntr_rld_event);
-
-static ssize_t cntr_val_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- int i, ret = 0;
- u32 val;
- struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- if (!drvdata->enable) {
- spin_lock(&drvdata->spinlock);
- for (i = 0; i < drvdata->nr_cntr; i++)
- ret += sprintf(buf, "counter %d: %x\n",
- i, drvdata->cntr_val[i]);
- spin_unlock(&drvdata->spinlock);
- return ret;
- }
-
- for (i = 0; i < drvdata->nr_cntr; i++) {
- val = etm_readl(drvdata, ETMCNTVRn(i));
- ret += sprintf(buf, "counter %d: %x\n", i, val);
- }
-
- return ret;
-}
-
-static ssize_t cntr_val_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t size)
-{
- int ret;
- unsigned long val;
- struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- ret = kstrtoul(buf, 16, &val);
- if (ret)
- return ret;
-
- spin_lock(&drvdata->spinlock);
- drvdata->cntr_val[drvdata->cntr_idx] = val;
- spin_unlock(&drvdata->spinlock);
-
- return size;
-}
-static DEVICE_ATTR_RW(cntr_val);
-
-static ssize_t seq_12_event_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- unsigned long val;
- struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- val = drvdata->seq_12_event;
- return sprintf(buf, "%#lx\n", val);
-}
-
-static ssize_t seq_12_event_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t size)
-{
- int ret;
- unsigned long val;
- struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- ret = kstrtoul(buf, 16, &val);
- if (ret)
- return ret;
-
- drvdata->seq_12_event = val & ETM_EVENT_MASK;
- return size;
-}
-static DEVICE_ATTR_RW(seq_12_event);
-
-static ssize_t seq_21_event_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- unsigned long val;
- struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- val = drvdata->seq_21_event;
- return sprintf(buf, "%#lx\n", val);
-}
-
-static ssize_t seq_21_event_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t size)
-{
- int ret;
- unsigned long val;
- struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- ret = kstrtoul(buf, 16, &val);
- if (ret)
- return ret;
-
- drvdata->seq_21_event = val & ETM_EVENT_MASK;
- return size;
-}
-static DEVICE_ATTR_RW(seq_21_event);
-
-static ssize_t seq_23_event_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- unsigned long val;
- struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- val = drvdata->seq_23_event;
- return sprintf(buf, "%#lx\n", val);
-}
-
-static ssize_t seq_23_event_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t size)
-{
- int ret;
- unsigned long val;
- struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- ret = kstrtoul(buf, 16, &val);
- if (ret)
- return ret;
-
- drvdata->seq_23_event = val & ETM_EVENT_MASK;
- return size;
-}
-static DEVICE_ATTR_RW(seq_23_event);
-
-static ssize_t seq_31_event_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- unsigned long val;
- struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- val = drvdata->seq_31_event;
- return sprintf(buf, "%#lx\n", val);
-}
-
-static ssize_t seq_31_event_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t size)
-{
- int ret;
- unsigned long val;
- struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- ret = kstrtoul(buf, 16, &val);
- if (ret)
- return ret;
-
- drvdata->seq_31_event = val & ETM_EVENT_MASK;
- return size;
-}
-static DEVICE_ATTR_RW(seq_31_event);
-
-static ssize_t seq_32_event_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- unsigned long val;
- struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- val = drvdata->seq_32_event;
- return sprintf(buf, "%#lx\n", val);
-}
-
-static ssize_t seq_32_event_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t size)
-{
- int ret;
- unsigned long val;
- struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- ret = kstrtoul(buf, 16, &val);
- if (ret)
- return ret;
-
- drvdata->seq_32_event = val & ETM_EVENT_MASK;
- return size;
-}
-static DEVICE_ATTR_RW(seq_32_event);
-
-static ssize_t seq_13_event_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- unsigned long val;
- struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- val = drvdata->seq_13_event;
- return sprintf(buf, "%#lx\n", val);
-}
-
-static ssize_t seq_13_event_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t size)
-{
- int ret;
- unsigned long val;
- struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- ret = kstrtoul(buf, 16, &val);
- if (ret)
- return ret;
-
- drvdata->seq_13_event = val & ETM_EVENT_MASK;
- return size;
-}
-static DEVICE_ATTR_RW(seq_13_event);
-
-static ssize_t seq_curr_state_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- unsigned long val, flags;
- struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- if (!drvdata->enable) {
- val = drvdata->seq_curr_state;
- goto out;
- }
-
- pm_runtime_get_sync(drvdata->dev);
- spin_lock_irqsave(&drvdata->spinlock, flags);
-
- CS_UNLOCK(drvdata->base);
- val = (etm_readl(drvdata, ETMSQR) & ETM_SQR_MASK);
- CS_LOCK(drvdata->base);
-
- spin_unlock_irqrestore(&drvdata->spinlock, flags);
- pm_runtime_put(drvdata->dev);
-out:
- return sprintf(buf, "%#lx\n", val);
-}
-
-static ssize_t seq_curr_state_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t size)
-{
- int ret;
- unsigned long val;
- struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- ret = kstrtoul(buf, 16, &val);
- if (ret)
- return ret;
-
- if (val > ETM_SEQ_STATE_MAX_VAL)
- return -EINVAL;
-
- drvdata->seq_curr_state = val;
-
- return size;
-}
-static DEVICE_ATTR_RW(seq_curr_state);
-
-static ssize_t ctxid_idx_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- unsigned long val;
- struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- val = drvdata->ctxid_idx;
- return sprintf(buf, "%#lx\n", val);
-}
-
-static ssize_t ctxid_idx_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t size)
+static void etm_disable(struct coresight_device *csdev)
{
- int ret;
- unsigned long val;
- struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- ret = kstrtoul(buf, 16, &val);
- if (ret)
- return ret;
-
- if (val >= drvdata->nr_ctxid_cmp)
- return -EINVAL;
+ u32 mode;
+ struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
/*
- * Use spinlock to ensure index doesn't change while it gets
- * dereferenced multiple times within a spinlock block elsewhere.
+ * For as long as the tracer isn't disabled another entity can't
+ * change its status. As such we can read the status here without
+ * fearing it will change under us.
*/
- spin_lock(&drvdata->spinlock);
- drvdata->ctxid_idx = val;
- spin_unlock(&drvdata->spinlock);
-
- return size;
-}
-static DEVICE_ATTR_RW(ctxid_idx);
-
-static ssize_t ctxid_pid_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- unsigned long val;
- struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- spin_lock(&drvdata->spinlock);
- val = drvdata->ctxid_vpid[drvdata->ctxid_idx];
- spin_unlock(&drvdata->spinlock);
+ mode = local_read(&drvdata->mode);
- return sprintf(buf, "%#lx\n", val);
-}
-
-static ssize_t ctxid_pid_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t size)
-{
- int ret;
- unsigned long vpid, pid;
- struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- ret = kstrtoul(buf, 16, &vpid);
- if (ret)
- return ret;
-
- pid = coresight_vpid_to_pid(vpid);
-
- spin_lock(&drvdata->spinlock);
- drvdata->ctxid_pid[drvdata->ctxid_idx] = pid;
- drvdata->ctxid_vpid[drvdata->ctxid_idx] = vpid;
- spin_unlock(&drvdata->spinlock);
-
- return size;
-}
-static DEVICE_ATTR_RW(ctxid_pid);
-
-static ssize_t ctxid_mask_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- unsigned long val;
- struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- val = drvdata->ctxid_mask;
- return sprintf(buf, "%#lx\n", val);
-}
-
-static ssize_t ctxid_mask_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t size)
-{
- int ret;
- unsigned long val;
- struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- ret = kstrtoul(buf, 16, &val);
- if (ret)
- return ret;
-
- drvdata->ctxid_mask = val;
- return size;
-}
-static DEVICE_ATTR_RW(ctxid_mask);
-
-static ssize_t sync_freq_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- unsigned long val;
- struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- val = drvdata->sync_freq;
- return sprintf(buf, "%#lx\n", val);
-}
-
-static ssize_t sync_freq_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t size)
-{
- int ret;
- unsigned long val;
- struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- ret = kstrtoul(buf, 16, &val);
- if (ret)
- return ret;
-
- drvdata->sync_freq = val & ETM_SYNC_MASK;
- return size;
-}
-static DEVICE_ATTR_RW(sync_freq);
-
-static ssize_t timestamp_event_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- unsigned long val;
- struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- val = drvdata->timestamp_event;
- return sprintf(buf, "%#lx\n", val);
-}
-
-static ssize_t timestamp_event_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t size)
-{
- int ret;
- unsigned long val;
- struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- ret = kstrtoul(buf, 16, &val);
- if (ret)
- return ret;
-
- drvdata->timestamp_event = val & ETM_EVENT_MASK;
- return size;
-}
-static DEVICE_ATTR_RW(timestamp_event);
-
-static ssize_t cpu_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- int val;
- struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- val = drvdata->cpu;
- return scnprintf(buf, PAGE_SIZE, "%d\n", val);
-
-}
-static DEVICE_ATTR_RO(cpu);
-
-static ssize_t traceid_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- unsigned long val, flags;
- struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- if (!drvdata->enable) {
- val = drvdata->traceid;
- goto out;
+ switch (mode) {
+ case CS_MODE_DISABLED:
+ break;
+ case CS_MODE_SYSFS:
+ etm_disable_sysfs(csdev);
+ break;
+ case CS_MODE_PERF:
+ etm_disable_perf(csdev);
+ break;
+ default:
+ WARN_ON_ONCE(mode);
+ return;
}
- pm_runtime_get_sync(drvdata->dev);
- spin_lock_irqsave(&drvdata->spinlock, flags);
- CS_UNLOCK(drvdata->base);
-
- val = (etm_readl(drvdata, ETMTRACEIDR) & ETM_TRACEID_MASK);
-
- CS_LOCK(drvdata->base);
- spin_unlock_irqrestore(&drvdata->spinlock, flags);
- pm_runtime_put(drvdata->dev);
-out:
- return sprintf(buf, "%#lx\n", val);
-}
-
-static ssize_t traceid_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t size)
-{
- int ret;
- unsigned long val;
- struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- ret = kstrtoul(buf, 16, &val);
- if (ret)
- return ret;
-
- drvdata->traceid = val & ETM_TRACEID_MASK;
- return size;
+ if (mode)
+ local_set(&drvdata->mode, CS_MODE_DISABLED);
}
-static DEVICE_ATTR_RW(traceid);
-
-static struct attribute *coresight_etm_attrs[] = {
- &dev_attr_nr_addr_cmp.attr,
- &dev_attr_nr_cntr.attr,
- &dev_attr_nr_ctxid_cmp.attr,
- &dev_attr_etmsr.attr,
- &dev_attr_reset.attr,
- &dev_attr_mode.attr,
- &dev_attr_trigger_event.attr,
- &dev_attr_enable_event.attr,
- &dev_attr_fifofull_level.attr,
- &dev_attr_addr_idx.attr,
- &dev_attr_addr_single.attr,
- &dev_attr_addr_range.attr,
- &dev_attr_addr_start.attr,
- &dev_attr_addr_stop.attr,
- &dev_attr_addr_acctype.attr,
- &dev_attr_cntr_idx.attr,
- &dev_attr_cntr_rld_val.attr,
- &dev_attr_cntr_event.attr,
- &dev_attr_cntr_rld_event.attr,
- &dev_attr_cntr_val.attr,
- &dev_attr_seq_12_event.attr,
- &dev_attr_seq_21_event.attr,
- &dev_attr_seq_23_event.attr,
- &dev_attr_seq_31_event.attr,
- &dev_attr_seq_32_event.attr,
- &dev_attr_seq_13_event.attr,
- &dev_attr_seq_curr_state.attr,
- &dev_attr_ctxid_idx.attr,
- &dev_attr_ctxid_pid.attr,
- &dev_attr_ctxid_mask.attr,
- &dev_attr_sync_freq.attr,
- &dev_attr_timestamp_event.attr,
- &dev_attr_traceid.attr,
- &dev_attr_cpu.attr,
- NULL,
-};
-
-#define coresight_simple_func(name, offset) \
-static ssize_t name##_show(struct device *_dev, \
- struct device_attribute *attr, char *buf) \
-{ \
- struct etm_drvdata *drvdata = dev_get_drvdata(_dev->parent); \
- return scnprintf(buf, PAGE_SIZE, "0x%x\n", \
- readl_relaxed(drvdata->base + offset)); \
-} \
-DEVICE_ATTR_RO(name)
-
-coresight_simple_func(etmccr, ETMCCR);
-coresight_simple_func(etmccer, ETMCCER);
-coresight_simple_func(etmscr, ETMSCR);
-coresight_simple_func(etmidr, ETMIDR);
-coresight_simple_func(etmcr, ETMCR);
-coresight_simple_func(etmtraceidr, ETMTRACEIDR);
-coresight_simple_func(etmteevr, ETMTEEVR);
-coresight_simple_func(etmtssvr, ETMTSSCR);
-coresight_simple_func(etmtecr1, ETMTECR1);
-coresight_simple_func(etmtecr2, ETMTECR2);
-
-static struct attribute *coresight_etm_mgmt_attrs[] = {
- &dev_attr_etmccr.attr,
- &dev_attr_etmccer.attr,
- &dev_attr_etmscr.attr,
- &dev_attr_etmidr.attr,
- &dev_attr_etmcr.attr,
- &dev_attr_etmtraceidr.attr,
- &dev_attr_etmteevr.attr,
- &dev_attr_etmtssvr.attr,
- &dev_attr_etmtecr1.attr,
- &dev_attr_etmtecr2.attr,
- NULL,
-};
-static const struct attribute_group coresight_etm_group = {
- .attrs = coresight_etm_attrs,
-};
-
-
-static const struct attribute_group coresight_etm_mgmt_group = {
- .attrs = coresight_etm_mgmt_attrs,
- .name = "mgmt",
+static const struct coresight_ops_source etm_source_ops = {
+ .cpu_id = etm_cpu_id,
+ .trace_id = etm_trace_id,
+ .enable = etm_enable,
+ .disable = etm_disable,
};
-static const struct attribute_group *coresight_etm_groups[] = {
- &coresight_etm_group,
- &coresight_etm_mgmt_group,
- NULL,
+static const struct coresight_ops etm_cs_ops = {
+ .source_ops = &etm_source_ops,
};
static int etm_cpu_callback(struct notifier_block *nfb, unsigned long action,
@@ -1658,7 +657,7 @@ static int etm_cpu_callback(struct notifier_block *nfb, unsigned long action,
etmdrvdata[cpu]->os_unlock = true;
}
- if (etmdrvdata[cpu]->enable)
+ if (local_read(&etmdrvdata[cpu]->mode))
etm_enable_hw(etmdrvdata[cpu]);
spin_unlock(&etmdrvdata[cpu]->spinlock);
break;
@@ -1671,7 +670,7 @@ static int etm_cpu_callback(struct notifier_block *nfb, unsigned long action,
case CPU_DYING:
spin_lock(&etmdrvdata[cpu]->spinlock);
- if (etmdrvdata[cpu]->enable)
+ if (local_read(&etmdrvdata[cpu]->mode))
etm_disable_hw(etmdrvdata[cpu]);
spin_unlock(&etmdrvdata[cpu]->spinlock);
break;
@@ -1707,6 +706,9 @@ static void etm_init_arch_data(void *info)
u32 etmccr;
struct etm_drvdata *drvdata = info;
+ /* Make sure all registers are accessible */
+ etm_os_unlock(drvdata);
+
CS_UNLOCK(drvdata->base);
/* First dummy read */
@@ -1743,40 +745,9 @@ static void etm_init_arch_data(void *info)
CS_LOCK(drvdata->base);
}
-static void etm_init_default_data(struct etm_drvdata *drvdata)
+static void etm_init_trace_id(struct etm_drvdata *drvdata)
{
- /*
- * A trace ID of value 0 is invalid, so let's start at some
- * random value that fits in 7 bits and will be just as good.
- */
- static int etm3x_traceid = 0x10;
-
- u32 flags = (1 << 0 | /* instruction execute*/
- 3 << 3 | /* ARM instruction */
- 0 << 5 | /* No data value comparison */
- 0 << 7 | /* No exact mach */
- 0 << 8 | /* Ignore context ID */
- 0 << 10); /* Security ignored */
-
- /*
- * Initial configuration only - guarantees sources handled by
- * this driver have a unique ID at startup time but not between
- * all other types of sources. For that we lean on the core
- * framework.
- */
- drvdata->traceid = etm3x_traceid++;
- drvdata->ctrl = (ETMCR_CYC_ACC | ETMCR_TIMESTAMP_EN);
- drvdata->enable_ctrl1 = ETMTECR1_ADDR_COMP_1;
- if (drvdata->nr_addr_cmp >= 2) {
- drvdata->addr_val[0] = (u32) _stext;
- drvdata->addr_val[1] = (u32) _etext;
- drvdata->addr_acctype[0] = flags;
- drvdata->addr_acctype[1] = flags;
- drvdata->addr_type[0] = ETM_ADDR_TYPE_RANGE;
- drvdata->addr_type[1] = ETM_ADDR_TYPE_RANGE;
- }
-
- etm_set_default(drvdata);
+ drvdata->traceid = coresight_get_trace_id(drvdata->cpu);
}
static int etm_probe(struct amba_device *adev, const struct amba_id *id)
@@ -1831,9 +802,6 @@ static int etm_probe(struct amba_device *adev, const struct amba_id *id)
get_online_cpus();
etmdrvdata[drvdata->cpu] = drvdata;
- if (!smp_call_function_single(drvdata->cpu, etm_os_unlock, drvdata, 1))
- drvdata->os_unlock = true;
-
if (smp_call_function_single(drvdata->cpu,
etm_init_arch_data, drvdata, 1))
dev_err(dev, "ETM arch init failed\n");
@@ -1847,7 +815,9 @@ static int etm_probe(struct amba_device *adev, const struct amba_id *id)
ret = -EINVAL;
goto err_arch_supported;
}
- etm_init_default_data(drvdata);
+
+ etm_init_trace_id(drvdata);
+ etm_set_default(&drvdata->config);
desc->type = CORESIGHT_DEV_TYPE_SOURCE;
desc->subtype.source_subtype = CORESIGHT_DEV_SUBTYPE_SOURCE_PROC;
@@ -1861,6 +831,12 @@ static int etm_probe(struct amba_device *adev, const struct amba_id *id)
goto err_arch_supported;
}
+ ret = etm_perf_symlink(drvdata->csdev, true);
+ if (ret) {
+ coresight_unregister(drvdata->csdev);
+ goto err_arch_supported;
+ }
+
pm_runtime_put(&adev->dev);
dev_info(dev, "%s initialized\n", (char *)id->data);
@@ -1877,17 +853,6 @@ err_arch_supported:
return ret;
}
-static int etm_remove(struct amba_device *adev)
-{
- struct etm_drvdata *drvdata = amba_get_drvdata(adev);
-
- coresight_unregister(drvdata->csdev);
- if (--etm_count == 0)
- unregister_hotcpu_notifier(&etm_cpu_notifier);
-
- return 0;
-}
-
#ifdef CONFIG_PM
static int etm_runtime_suspend(struct device *dev)
{
@@ -1948,13 +913,9 @@ static struct amba_driver etm_driver = {
.name = "coresight-etm3x",
.owner = THIS_MODULE,
.pm = &etm_dev_pm_ops,
+ .suppress_bind_attrs = true,
},
.probe = etm_probe,
- .remove = etm_remove,
.id_table = etm_ids,
};
-
-module_amba_driver(etm_driver);
-
-MODULE_LICENSE("GPL v2");
-MODULE_DESCRIPTION("CoreSight Program Flow Trace driver");
+builtin_amba_driver(etm_driver);
diff --git a/drivers/hwtracing/coresight/coresight-etm4x.c b/drivers/hwtracing/coresight/coresight-etm4x.c
index a6707642bb23..1c59bd36834c 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x.c
+++ b/drivers/hwtracing/coresight/coresight-etm4x.c
@@ -15,7 +15,6 @@
#include <linux/init.h>
#include <linux/types.h>
#include <linux/device.h>
-#include <linux/module.h>
#include <linux/io.h>
#include <linux/err.h>
#include <linux/fs.h>
@@ -32,6 +31,7 @@
#include <linux/seq_file.h>
#include <linux/uaccess.h>
#include <linux/pm_runtime.h>
+#include <linux/perf_event.h>
#include <asm/sections.h>
#include "coresight-etm4x.h"
@@ -63,6 +63,13 @@ static bool etm4_arch_supported(u8 arch)
return true;
}
+static int etm4_cpu_id(struct coresight_device *csdev)
+{
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+
+ return drvdata->cpu;
+}
+
static int etm4_trace_id(struct coresight_device *csdev)
{
struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
@@ -72,7 +79,6 @@ static int etm4_trace_id(struct coresight_device *csdev)
if (!drvdata->enable)
return drvdata->trcid;
- pm_runtime_get_sync(drvdata->dev);
spin_lock_irqsave(&drvdata->spinlock, flags);
CS_UNLOCK(drvdata->base);
@@ -81,7 +87,6 @@ static int etm4_trace_id(struct coresight_device *csdev)
CS_LOCK(drvdata->base);
spin_unlock_irqrestore(&drvdata->spinlock, flags);
- pm_runtime_put(drvdata->dev);
return trace_id;
}
@@ -182,12 +187,12 @@ static void etm4_enable_hw(void *info)
dev_dbg(drvdata->dev, "cpu: %d enable smp call done\n", drvdata->cpu);
}
-static int etm4_enable(struct coresight_device *csdev)
+static int etm4_enable(struct coresight_device *csdev,
+ struct perf_event_attr *attr, u32 mode)
{
struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
int ret;
- pm_runtime_get_sync(drvdata->dev);
spin_lock(&drvdata->spinlock);
/*
@@ -207,7 +212,6 @@ static int etm4_enable(struct coresight_device *csdev)
return 0;
err:
spin_unlock(&drvdata->spinlock);
- pm_runtime_put(drvdata->dev);
return ret;
}
@@ -256,12 +260,11 @@ static void etm4_disable(struct coresight_device *csdev)
spin_unlock(&drvdata->spinlock);
put_online_cpus();
- pm_runtime_put(drvdata->dev);
-
dev_info(drvdata->dev, "ETM tracing disabled\n");
}
static const struct coresight_ops_source etm4_source_ops = {
+ .cpu_id = etm4_cpu_id,
.trace_id = etm4_trace_id,
.enable = etm4_enable,
.disable = etm4_disable,
@@ -2219,7 +2222,7 @@ static ssize_t name##_show(struct device *_dev, \
return scnprintf(buf, PAGE_SIZE, "0x%x\n", \
readl_relaxed(drvdata->base + offset)); \
} \
-DEVICE_ATTR_RO(name)
+static DEVICE_ATTR_RO(name)
coresight_simple_func(trcoslsr, TRCOSLSR);
coresight_simple_func(trcpdcr, TRCPDCR);
@@ -2684,17 +2687,6 @@ err_coresight_register:
return ret;
}
-static int etm4_remove(struct amba_device *adev)
-{
- struct etmv4_drvdata *drvdata = amba_get_drvdata(adev);
-
- coresight_unregister(drvdata->csdev);
- if (--etm4_count == 0)
- unregister_hotcpu_notifier(&etm4_cpu_notifier);
-
- return 0;
-}
-
static struct amba_id etm4_ids[] = {
{ /* ETM 4.0 - Qualcomm */
.id = 0x0003b95d,
@@ -2712,10 +2704,9 @@ static struct amba_id etm4_ids[] = {
static struct amba_driver etm4x_driver = {
.drv = {
.name = "coresight-etm4x",
+ .suppress_bind_attrs = true,
},
.probe = etm4_probe,
- .remove = etm4_remove,
.id_table = etm4_ids,
};
-
-module_amba_driver(etm4x_driver);
+builtin_amba_driver(etm4x_driver);
diff --git a/drivers/hwtracing/coresight/coresight-funnel.c b/drivers/hwtracing/coresight/coresight-funnel.c
index 2e36bde7fcb4..0600ca30649d 100644
--- a/drivers/hwtracing/coresight/coresight-funnel.c
+++ b/drivers/hwtracing/coresight/coresight-funnel.c
@@ -1,5 +1,7 @@
/* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
*
+ * Description: CoreSight Funnel driver
+ *
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
@@ -11,7 +13,6 @@
*/
#include <linux/kernel.h>
-#include <linux/module.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/device.h>
@@ -69,7 +70,6 @@ static int funnel_enable(struct coresight_device *csdev, int inport,
{
struct funnel_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
- pm_runtime_get_sync(drvdata->dev);
funnel_enable_hw(drvdata, inport);
dev_info(drvdata->dev, "FUNNEL inport %d enabled\n", inport);
@@ -95,7 +95,6 @@ static void funnel_disable(struct coresight_device *csdev, int inport,
struct funnel_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
funnel_disable_hw(drvdata, inport);
- pm_runtime_put(drvdata->dev);
dev_info(drvdata->dev, "FUNNEL inport %d disabled\n", inport);
}
@@ -226,14 +225,6 @@ static int funnel_probe(struct amba_device *adev, const struct amba_id *id)
return 0;
}
-static int funnel_remove(struct amba_device *adev)
-{
- struct funnel_drvdata *drvdata = amba_get_drvdata(adev);
-
- coresight_unregister(drvdata->csdev);
- return 0;
-}
-
#ifdef CONFIG_PM
static int funnel_runtime_suspend(struct device *dev)
{
@@ -273,13 +264,9 @@ static struct amba_driver funnel_driver = {
.name = "coresight-funnel",
.owner = THIS_MODULE,
.pm = &funnel_dev_pm_ops,
+ .suppress_bind_attrs = true,
},
.probe = funnel_probe,
- .remove = funnel_remove,
.id_table = funnel_ids,
};
-
-module_amba_driver(funnel_driver);
-
-MODULE_LICENSE("GPL v2");
-MODULE_DESCRIPTION("CoreSight Funnel driver");
+builtin_amba_driver(funnel_driver);
diff --git a/drivers/hwtracing/coresight/coresight-priv.h b/drivers/hwtracing/coresight/coresight-priv.h
index 62fcd98cc7cf..333eddaed339 100644
--- a/drivers/hwtracing/coresight/coresight-priv.h
+++ b/drivers/hwtracing/coresight/coresight-priv.h
@@ -34,6 +34,15 @@
#define TIMEOUT_US 100
#define BMVAL(val, lsb, msb) ((val & GENMASK(msb, lsb)) >> lsb)
+#define ETM_MODE_EXCL_KERN BIT(30)
+#define ETM_MODE_EXCL_USER BIT(31)
+
+enum cs_mode {
+ CS_MODE_DISABLED,
+ CS_MODE_SYSFS,
+ CS_MODE_PERF,
+};
+
static inline void CS_LOCK(void __iomem *addr)
{
do {
@@ -52,6 +61,12 @@ static inline void CS_UNLOCK(void __iomem *addr)
} while (0);
}
+void coresight_disable_path(struct list_head *path);
+int coresight_enable_path(struct list_head *path, u32 mode);
+struct coresight_device *coresight_get_sink(struct list_head *path);
+struct list_head *coresight_build_path(struct coresight_device *csdev);
+void coresight_release_path(struct list_head *path);
+
#ifdef CONFIG_CORESIGHT_SOURCE_ETM3X
extern int etm_readl_cp14(u32 off, unsigned int *val);
extern int etm_writel_cp14(u32 off, u32 val);
diff --git a/drivers/hwtracing/coresight/coresight-replicator-qcom.c b/drivers/hwtracing/coresight/coresight-replicator-qcom.c
index 584059e9e866..700f710e4bfa 100644
--- a/drivers/hwtracing/coresight/coresight-replicator-qcom.c
+++ b/drivers/hwtracing/coresight/coresight-replicator-qcom.c
@@ -15,7 +15,6 @@
#include <linux/clk.h>
#include <linux/coresight.h>
#include <linux/device.h>
-#include <linux/module.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/io.h>
@@ -48,8 +47,6 @@ static int replicator_enable(struct coresight_device *csdev, int inport,
{
struct replicator_state *drvdata = dev_get_drvdata(csdev->dev.parent);
- pm_runtime_get_sync(drvdata->dev);
-
CS_UNLOCK(drvdata->base);
/*
@@ -86,8 +83,6 @@ static void replicator_disable(struct coresight_device *csdev, int inport,
CS_LOCK(drvdata->base);
- pm_runtime_put(drvdata->dev);
-
dev_info(drvdata->dev, "REPLICATOR disabled\n");
}
@@ -156,15 +151,6 @@ static int replicator_probe(struct amba_device *adev, const struct amba_id *id)
return 0;
}
-static int replicator_remove(struct amba_device *adev)
-{
- struct replicator_state *drvdata = amba_get_drvdata(adev);
-
- pm_runtime_disable(&adev->dev);
- coresight_unregister(drvdata->csdev);
- return 0;
-}
-
#ifdef CONFIG_PM
static int replicator_runtime_suspend(struct device *dev)
{
@@ -206,10 +192,9 @@ static struct amba_driver replicator_driver = {
.drv = {
.name = "coresight-replicator-qcom",
.pm = &replicator_dev_pm_ops,
+ .suppress_bind_attrs = true,
},
.probe = replicator_probe,
- .remove = replicator_remove,
.id_table = replicator_ids,
};
-
-module_amba_driver(replicator_driver);
+builtin_amba_driver(replicator_driver);
diff --git a/drivers/hwtracing/coresight/coresight-replicator.c b/drivers/hwtracing/coresight/coresight-replicator.c
index 963ac197c253..4299c0569340 100644
--- a/drivers/hwtracing/coresight/coresight-replicator.c
+++ b/drivers/hwtracing/coresight/coresight-replicator.c
@@ -1,5 +1,7 @@
/* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
*
+ * Description: CoreSight Replicator driver
+ *
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
@@ -11,7 +13,6 @@
*/
#include <linux/kernel.h>
-#include <linux/module.h>
#include <linux/device.h>
#include <linux/platform_device.h>
#include <linux/io.h>
@@ -41,7 +42,6 @@ static int replicator_enable(struct coresight_device *csdev, int inport,
{
struct replicator_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
- pm_runtime_get_sync(drvdata->dev);
dev_info(drvdata->dev, "REPLICATOR enabled\n");
return 0;
}
@@ -51,7 +51,6 @@ static void replicator_disable(struct coresight_device *csdev, int inport,
{
struct replicator_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
- pm_runtime_put(drvdata->dev);
dev_info(drvdata->dev, "REPLICATOR disabled\n");
}
@@ -127,20 +126,6 @@ out_disable_pm:
return ret;
}
-static int replicator_remove(struct platform_device *pdev)
-{
- struct replicator_drvdata *drvdata = platform_get_drvdata(pdev);
-
- coresight_unregister(drvdata->csdev);
- pm_runtime_get_sync(&pdev->dev);
- if (!IS_ERR(drvdata->atclk))
- clk_disable_unprepare(drvdata->atclk);
- pm_runtime_put_noidle(&pdev->dev);
- pm_runtime_disable(&pdev->dev);
-
- return 0;
-}
-
#ifdef CONFIG_PM
static int replicator_runtime_suspend(struct device *dev)
{
@@ -175,15 +160,11 @@ static const struct of_device_id replicator_match[] = {
static struct platform_driver replicator_driver = {
.probe = replicator_probe,
- .remove = replicator_remove,
.driver = {
.name = "coresight-replicator",
.of_match_table = replicator_match,
.pm = &replicator_dev_pm_ops,
+ .suppress_bind_attrs = true,
},
};
-
builtin_platform_driver(replicator_driver);
-
-MODULE_LICENSE("GPL v2");
-MODULE_DESCRIPTION("CoreSight Replicator driver");
diff --git a/drivers/hwtracing/coresight/coresight-tmc.c b/drivers/hwtracing/coresight/coresight-tmc.c
index a57c7ec1661f..1be191f5d39c 100644
--- a/drivers/hwtracing/coresight/coresight-tmc.c
+++ b/drivers/hwtracing/coresight/coresight-tmc.c
@@ -1,5 +1,7 @@
/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
*
+ * Description: CoreSight Trace Memory Controller driver
+ *
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
@@ -11,7 +13,6 @@
*/
#include <linux/kernel.h>
-#include <linux/module.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/device.h>
@@ -124,7 +125,7 @@ struct tmc_drvdata {
bool reading;
char *buf;
dma_addr_t paddr;
- void __iomem *vaddr;
+ void *vaddr;
u32 size;
bool enable;
enum tmc_config_type config_type;
@@ -242,12 +243,9 @@ static int tmc_enable(struct tmc_drvdata *drvdata, enum tmc_mode mode)
{
unsigned long flags;
- pm_runtime_get_sync(drvdata->dev);
-
spin_lock_irqsave(&drvdata->spinlock, flags);
if (drvdata->reading) {
spin_unlock_irqrestore(&drvdata->spinlock, flags);
- pm_runtime_put(drvdata->dev);
return -EBUSY;
}
@@ -268,7 +266,7 @@ static int tmc_enable(struct tmc_drvdata *drvdata, enum tmc_mode mode)
return 0;
}
-static int tmc_enable_sink(struct coresight_device *csdev)
+static int tmc_enable_sink(struct coresight_device *csdev, u32 mode)
{
struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
@@ -381,8 +379,6 @@ out:
drvdata->enable = false;
spin_unlock_irqrestore(&drvdata->spinlock, flags);
- pm_runtime_put(drvdata->dev);
-
dev_info(drvdata->dev, "TMC disabled\n");
}
@@ -766,23 +762,10 @@ err_misc_register:
err_devm_kzalloc:
if (drvdata->config_type == TMC_CONFIG_TYPE_ETR)
dma_free_coherent(dev, drvdata->size,
- &drvdata->paddr, GFP_KERNEL);
+ drvdata->vaddr, drvdata->paddr);
return ret;
}
-static int tmc_remove(struct amba_device *adev)
-{
- struct tmc_drvdata *drvdata = amba_get_drvdata(adev);
-
- misc_deregister(&drvdata->miscdev);
- coresight_unregister(drvdata->csdev);
- if (drvdata->config_type == TMC_CONFIG_TYPE_ETR)
- dma_free_coherent(drvdata->dev, drvdata->size,
- &drvdata->paddr, GFP_KERNEL);
-
- return 0;
-}
-
static struct amba_id tmc_ids[] = {
{
.id = 0x0003b961,
@@ -795,13 +778,9 @@ static struct amba_driver tmc_driver = {
.drv = {
.name = "coresight-tmc",
.owner = THIS_MODULE,
+ .suppress_bind_attrs = true,
},
.probe = tmc_probe,
- .remove = tmc_remove,
.id_table = tmc_ids,
};
-
-module_amba_driver(tmc_driver);
-
-MODULE_LICENSE("GPL v2");
-MODULE_DESCRIPTION("CoreSight Trace Memory Controller driver");
+builtin_amba_driver(tmc_driver);
diff --git a/drivers/hwtracing/coresight/coresight-tpiu.c b/drivers/hwtracing/coresight/coresight-tpiu.c
index 7214efd10db5..8fb09d9237ab 100644
--- a/drivers/hwtracing/coresight/coresight-tpiu.c
+++ b/drivers/hwtracing/coresight/coresight-tpiu.c
@@ -1,5 +1,7 @@
/* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
*
+ * Description: CoreSight Trace Port Interface Unit driver
+ *
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
@@ -11,7 +13,6 @@
*/
#include <linux/kernel.h>
-#include <linux/module.h>
#include <linux/init.h>
#include <linux/device.h>
#include <linux/io.h>
@@ -70,11 +71,10 @@ static void tpiu_enable_hw(struct tpiu_drvdata *drvdata)
CS_LOCK(drvdata->base);
}
-static int tpiu_enable(struct coresight_device *csdev)
+static int tpiu_enable(struct coresight_device *csdev, u32 mode)
{
struct tpiu_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
- pm_runtime_get_sync(csdev->dev.parent);
tpiu_enable_hw(drvdata);
dev_info(drvdata->dev, "TPIU enabled\n");
@@ -98,7 +98,6 @@ static void tpiu_disable(struct coresight_device *csdev)
struct tpiu_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
tpiu_disable_hw(drvdata);
- pm_runtime_put(csdev->dev.parent);
dev_info(drvdata->dev, "TPIU disabled\n");
}
@@ -172,14 +171,6 @@ static int tpiu_probe(struct amba_device *adev, const struct amba_id *id)
return 0;
}
-static int tpiu_remove(struct amba_device *adev)
-{
- struct tpiu_drvdata *drvdata = amba_get_drvdata(adev);
-
- coresight_unregister(drvdata->csdev);
- return 0;
-}
-
#ifdef CONFIG_PM
static int tpiu_runtime_suspend(struct device *dev)
{
@@ -223,13 +214,9 @@ static struct amba_driver tpiu_driver = {
.name = "coresight-tpiu",
.owner = THIS_MODULE,
.pm = &tpiu_dev_pm_ops,
+ .suppress_bind_attrs = true,
},
.probe = tpiu_probe,
- .remove = tpiu_remove,
.id_table = tpiu_ids,
};
-
-module_amba_driver(tpiu_driver);
-
-MODULE_LICENSE("GPL v2");
-MODULE_DESCRIPTION("CoreSight Trace Port Interface Unit driver");
+builtin_amba_driver(tpiu_driver);
diff --git a/drivers/hwtracing/coresight/coresight.c b/drivers/hwtracing/coresight/coresight.c
index 93738dfbf631..2ea5961092c1 100644
--- a/drivers/hwtracing/coresight/coresight.c
+++ b/drivers/hwtracing/coresight/coresight.c
@@ -11,7 +11,6 @@
*/
#include <linux/kernel.h>
-#include <linux/module.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/device.h>
@@ -24,11 +23,28 @@
#include <linux/coresight.h>
#include <linux/of_platform.h>
#include <linux/delay.h>
+#include <linux/pm_runtime.h>
#include "coresight-priv.h"
static DEFINE_MUTEX(coresight_mutex);
+/**
+ * struct coresight_node - elements of a path, from source to sink
+ * @csdev: Address of an element.
+ * @link: hook to the list.
+ */
+struct coresight_node {
+ struct coresight_device *csdev;
+ struct list_head link;
+};
+
+/*
+ * When operating Coresight drivers from the sysFS interface, only a single
+ * path can exist from a tracer (associated to a CPU) to a sink.
+ */
+static DEFINE_PER_CPU(struct list_head *, sysfs_path);
+
static int coresight_id_match(struct device *dev, void *data)
{
int trace_id, i_trace_id;
@@ -68,15 +84,12 @@ static int coresight_source_is_unique(struct coresight_device *csdev)
csdev, coresight_id_match);
}
-static int coresight_find_link_inport(struct coresight_device *csdev)
+static int coresight_find_link_inport(struct coresight_device *csdev,
+ struct coresight_device *parent)
{
int i;
- struct coresight_device *parent;
struct coresight_connection *conn;
- parent = container_of(csdev->path_link.next,
- struct coresight_device, path_link);
-
for (i = 0; i < parent->nr_outport; i++) {
conn = &parent->conns[i];
if (conn->child_dev == csdev)
@@ -89,15 +102,12 @@ static int coresight_find_link_inport(struct coresight_device *csdev)
return 0;
}
-static int coresight_find_link_outport(struct coresight_device *csdev)
+static int coresight_find_link_outport(struct coresight_device *csdev,
+ struct coresight_device *child)
{
int i;
- struct coresight_device *child;
struct coresight_connection *conn;
- child = container_of(csdev->path_link.prev,
- struct coresight_device, path_link);
-
for (i = 0; i < csdev->nr_outport; i++) {
conn = &csdev->conns[i];
if (conn->child_dev == child)
@@ -110,13 +120,13 @@ static int coresight_find_link_outport(struct coresight_device *csdev)
return 0;
}
-static int coresight_enable_sink(struct coresight_device *csdev)
+static int coresight_enable_sink(struct coresight_device *csdev, u32 mode)
{
int ret;
if (!csdev->enable) {
if (sink_ops(csdev)->enable) {
- ret = sink_ops(csdev)->enable(csdev);
+ ret = sink_ops(csdev)->enable(csdev, mode);
if (ret)
return ret;
}
@@ -138,14 +148,19 @@ static void coresight_disable_sink(struct coresight_device *csdev)
}
}
-static int coresight_enable_link(struct coresight_device *csdev)
+static int coresight_enable_link(struct coresight_device *csdev,
+ struct coresight_device *parent,
+ struct coresight_device *child)
{
int ret;
int link_subtype;
int refport, inport, outport;
- inport = coresight_find_link_inport(csdev);
- outport = coresight_find_link_outport(csdev);
+ if (!parent || !child)
+ return -EINVAL;
+
+ inport = coresight_find_link_inport(csdev, parent);
+ outport = coresight_find_link_outport(csdev, child);
link_subtype = csdev->subtype.link_subtype;
if (link_subtype == CORESIGHT_DEV_SUBTYPE_LINK_MERG)
@@ -168,14 +183,19 @@ static int coresight_enable_link(struct coresight_device *csdev)
return 0;
}
-static void coresight_disable_link(struct coresight_device *csdev)
+static void coresight_disable_link(struct coresight_device *csdev,
+ struct coresight_device *parent,
+ struct coresight_device *child)
{
int i, nr_conns;
int link_subtype;
int refport, inport, outport;
- inport = coresight_find_link_inport(csdev);
- outport = coresight_find_link_outport(csdev);
+ if (!parent || !child)
+ return;
+
+ inport = coresight_find_link_inport(csdev, parent);
+ outport = coresight_find_link_outport(csdev, child);
link_subtype = csdev->subtype.link_subtype;
if (link_subtype == CORESIGHT_DEV_SUBTYPE_LINK_MERG) {
@@ -201,7 +221,7 @@ static void coresight_disable_link(struct coresight_device *csdev)
csdev->enable = false;
}
-static int coresight_enable_source(struct coresight_device *csdev)
+static int coresight_enable_source(struct coresight_device *csdev, u32 mode)
{
int ret;
@@ -213,7 +233,7 @@ static int coresight_enable_source(struct coresight_device *csdev)
if (!csdev->enable) {
if (source_ops(csdev)->enable) {
- ret = source_ops(csdev)->enable(csdev);
+ ret = source_ops(csdev)->enable(csdev, NULL, mode);
if (ret)
return ret;
}
@@ -235,109 +255,188 @@ static void coresight_disable_source(struct coresight_device *csdev)
}
}
-static int coresight_enable_path(struct list_head *path)
+void coresight_disable_path(struct list_head *path)
{
- int ret = 0;
- struct coresight_device *cd;
-
- /*
- * At this point we have a full @path, from source to sink. The
- * sink is the first entry and the source the last one. Go through
- * all the components and enable them one by one.
- */
- list_for_each_entry(cd, path, path_link) {
- if (cd == list_first_entry(path, struct coresight_device,
- path_link)) {
- ret = coresight_enable_sink(cd);
- } else if (list_is_last(&cd->path_link, path)) {
- /*
- * Don't enable the source just yet - this needs to
- * happen at the very end when all links and sink
- * along the path have been configured properly.
- */
- ;
- } else {
- ret = coresight_enable_link(cd);
+ struct coresight_node *nd;
+ struct coresight_device *csdev, *parent, *child;
+
+ list_for_each_entry(nd, path, link) {
+ csdev = nd->csdev;
+
+ switch (csdev->type) {
+ case CORESIGHT_DEV_TYPE_SINK:
+ case CORESIGHT_DEV_TYPE_LINKSINK:
+ coresight_disable_sink(csdev);
+ break;
+ case CORESIGHT_DEV_TYPE_SOURCE:
+ /* sources are disabled from either sysFS or Perf */
+ break;
+ case CORESIGHT_DEV_TYPE_LINK:
+ parent = list_prev_entry(nd, link)->csdev;
+ child = list_next_entry(nd, link)->csdev;
+ coresight_disable_link(csdev, parent, child);
+ break;
+ default:
+ break;
}
- if (ret)
- goto err;
}
+}
- return 0;
-err:
- list_for_each_entry_continue_reverse(cd, path, path_link) {
- if (cd == list_first_entry(path, struct coresight_device,
- path_link)) {
- coresight_disable_sink(cd);
- } else if (list_is_last(&cd->path_link, path)) {
- ;
- } else {
- coresight_disable_link(cd);
+int coresight_enable_path(struct list_head *path, u32 mode)
+{
+
+ int ret = 0;
+ struct coresight_node *nd;
+ struct coresight_device *csdev, *parent, *child;
+
+ list_for_each_entry_reverse(nd, path, link) {
+ csdev = nd->csdev;
+
+ switch (csdev->type) {
+ case CORESIGHT_DEV_TYPE_SINK:
+ case CORESIGHT_DEV_TYPE_LINKSINK:
+ ret = coresight_enable_sink(csdev, mode);
+ if (ret)
+ goto err;
+ break;
+ case CORESIGHT_DEV_TYPE_SOURCE:
+ /* sources are enabled from either sysFS or Perf */
+ break;
+ case CORESIGHT_DEV_TYPE_LINK:
+ parent = list_prev_entry(nd, link)->csdev;
+ child = list_next_entry(nd, link)->csdev;
+ ret = coresight_enable_link(csdev, parent, child);
+ if (ret)
+ goto err;
+ break;
+ default:
+ goto err;
}
}
+out:
return ret;
+err:
+ coresight_disable_path(path);
+ goto out;
}
-static int coresight_disable_path(struct list_head *path)
+struct coresight_device *coresight_get_sink(struct list_head *path)
{
- struct coresight_device *cd;
+ struct coresight_device *csdev;
- list_for_each_entry_reverse(cd, path, path_link) {
- if (cd == list_first_entry(path, struct coresight_device,
- path_link)) {
- coresight_disable_sink(cd);
- } else if (list_is_last(&cd->path_link, path)) {
- /*
- * The source has already been stopped, no need
- * to do it again here.
- */
- ;
- } else {
- coresight_disable_link(cd);
+ if (!path)
+ return NULL;
+
+ csdev = list_last_entry(path, struct coresight_node, link)->csdev;
+ if (csdev->type != CORESIGHT_DEV_TYPE_SINK &&
+ csdev->type != CORESIGHT_DEV_TYPE_LINKSINK)
+ return NULL;
+
+ return csdev;
+}
+
+/**
+ * _coresight_build_path - recursively build a path from a @csdev to a sink.
+ * @csdev: The device to start from.
+ * @path: The list to add devices to.
+ *
+ * The tree of Coresight device is traversed until an activated sink is
+ * found. From there the sink is added to the list along with all the
+ * devices that led to that point - the end result is a list from source
+ * to sink. In that list the source is the first device and the sink the
+ * last one.
+ */
+static int _coresight_build_path(struct coresight_device *csdev,
+ struct list_head *path)
+{
+ int i;
+ bool found = false;
+ struct coresight_node *node;
+ struct coresight_connection *conn;
+
+ /* An activated sink has been found. Enqueue the element */
+ if ((csdev->type == CORESIGHT_DEV_TYPE_SINK ||
+ csdev->type == CORESIGHT_DEV_TYPE_LINKSINK) && csdev->activated)
+ goto out;
+
+ /* Not a sink - recursively explore each port found on this element */
+ for (i = 0; i < csdev->nr_outport; i++) {
+ conn = &csdev->conns[i];
+ if (_coresight_build_path(conn->child_dev, path) == 0) {
+ found = true;
+ break;
}
}
+ if (!found)
+ return -ENODEV;
+
+out:
+ /*
+ * A path from this element to a sink has been found. The elements
+ * leading to the sink are already enqueued, all that is left to do
+ * is tell the PM runtime core we need this element and add a node
+ * for it.
+ */
+ node = kzalloc(sizeof(struct coresight_node), GFP_KERNEL);
+ if (!node)
+ return -ENOMEM;
+
+ node->csdev = csdev;
+ list_add(&node->link, path);
+ pm_runtime_get_sync(csdev->dev.parent);
+
return 0;
}
-static int coresight_build_paths(struct coresight_device *csdev,
- struct list_head *path,
- bool enable)
+struct list_head *coresight_build_path(struct coresight_device *csdev)
{
- int i, ret = -EINVAL;
- struct coresight_connection *conn;
+ struct list_head *path;
- list_add(&csdev->path_link, path);
+ path = kzalloc(sizeof(struct list_head), GFP_KERNEL);
+ if (!path)
+ return NULL;
- if ((csdev->type == CORESIGHT_DEV_TYPE_SINK ||
- csdev->type == CORESIGHT_DEV_TYPE_LINKSINK) &&
- csdev->activated) {
- if (enable)
- ret = coresight_enable_path(path);
- else
- ret = coresight_disable_path(path);
- } else {
- for (i = 0; i < csdev->nr_outport; i++) {
- conn = &csdev->conns[i];
- if (coresight_build_paths(conn->child_dev,
- path, enable) == 0)
- ret = 0;
- }
+ INIT_LIST_HEAD(path);
+
+ if (_coresight_build_path(csdev, path)) {
+ kfree(path);
+ path = NULL;
}
- if (list_first_entry(path, struct coresight_device, path_link) != csdev)
- dev_err(&csdev->dev, "wrong device in %s\n", __func__);
+ return path;
+}
- list_del(&csdev->path_link);
+/**
+ * coresight_release_path - release a previously built path.
+ * @path: the path to release.
+ *
+ * Go through all the elements of a path and 1) removed it from the list and
+ * 2) free the memory allocated for each node.
+ */
+void coresight_release_path(struct list_head *path)
+{
+ struct coresight_device *csdev;
+ struct coresight_node *nd, *next;
- return ret;
+ list_for_each_entry_safe(nd, next, path, link) {
+ csdev = nd->csdev;
+
+ pm_runtime_put_sync(csdev->dev.parent);
+ list_del(&nd->link);
+ kfree(nd);
+ }
+
+ kfree(path);
+ path = NULL;
}
int coresight_enable(struct coresight_device *csdev)
{
int ret = 0;
- LIST_HEAD(path);
+ int cpu;
+ struct list_head *path;
mutex_lock(&coresight_mutex);
if (csdev->type != CORESIGHT_DEV_TYPE_SOURCE) {
@@ -348,22 +447,47 @@ int coresight_enable(struct coresight_device *csdev)
if (csdev->enable)
goto out;
- if (coresight_build_paths(csdev, &path, true)) {
- dev_err(&csdev->dev, "building path(s) failed\n");
+ path = coresight_build_path(csdev);
+ if (!path) {
+ pr_err("building path(s) failed\n");
goto out;
}
- if (coresight_enable_source(csdev))
- dev_err(&csdev->dev, "source enable failed\n");
+ ret = coresight_enable_path(path, CS_MODE_SYSFS);
+ if (ret)
+ goto err_path;
+
+ ret = coresight_enable_source(csdev, CS_MODE_SYSFS);
+ if (ret)
+ goto err_source;
+
+ /*
+ * When working from sysFS it is important to keep track
+ * of the paths that were created so that they can be
+ * undone in 'coresight_disable()'. Since there can only
+ * be a single session per tracer (when working from sysFS)
+ * a per-cpu variable will do just fine.
+ */
+ cpu = source_ops(csdev)->cpu_id(csdev);
+ per_cpu(sysfs_path, cpu) = path;
+
out:
mutex_unlock(&coresight_mutex);
return ret;
+
+err_source:
+ coresight_disable_path(path);
+
+err_path:
+ coresight_release_path(path);
+ goto out;
}
EXPORT_SYMBOL_GPL(coresight_enable);
void coresight_disable(struct coresight_device *csdev)
{
- LIST_HEAD(path);
+ int cpu;
+ struct list_head *path;
mutex_lock(&coresight_mutex);
if (csdev->type != CORESIGHT_DEV_TYPE_SOURCE) {
@@ -373,9 +497,12 @@ void coresight_disable(struct coresight_device *csdev)
if (!csdev->enable)
goto out;
+ cpu = source_ops(csdev)->cpu_id(csdev);
+ path = per_cpu(sysfs_path, cpu);
coresight_disable_source(csdev);
- if (coresight_build_paths(csdev, &path, false))
- dev_err(&csdev->dev, "releasing path(s) failed\n");
+ coresight_disable_path(path);
+ coresight_release_path(path);
+ per_cpu(sysfs_path, cpu) = NULL;
out:
mutex_unlock(&coresight_mutex);
@@ -481,6 +608,8 @@ static void coresight_device_release(struct device *dev)
{
struct coresight_device *csdev = to_coresight_device(dev);
+ kfree(csdev->conns);
+ kfree(csdev->refcnt);
kfree(csdev);
}
@@ -536,7 +665,7 @@ static void coresight_fixup_orphan_conns(struct coresight_device *csdev)
* are hooked-up with each newly added component.
*/
bus_for_each_dev(&coresight_bustype, NULL,
- csdev, coresight_orphan_match);
+ csdev, coresight_orphan_match);
}
@@ -568,6 +697,8 @@ static void coresight_fixup_device_conns(struct coresight_device *csdev)
if (dev) {
conn->child_dev = to_coresight_device(dev);
+ /* and put reference from 'bus_find_device()' */
+ put_device(dev);
} else {
csdev->orphan = true;
conn->child_dev = NULL;
@@ -575,6 +706,50 @@ static void coresight_fixup_device_conns(struct coresight_device *csdev)
}
}
+static int coresight_remove_match(struct device *dev, void *data)
+{
+ int i;
+ struct coresight_device *csdev, *iterator;
+ struct coresight_connection *conn;
+
+ csdev = data;
+ iterator = to_coresight_device(dev);
+
+ /* No need to check oneself */
+ if (csdev == iterator)
+ return 0;
+
+ /*
+ * Circle throuch all the connection of that component. If we find
+ * a connection whose name matches @csdev, remove it.
+ */
+ for (i = 0; i < iterator->nr_outport; i++) {
+ conn = &iterator->conns[i];
+
+ if (conn->child_dev == NULL)
+ continue;
+
+ if (!strcmp(dev_name(&csdev->dev), conn->child_name)) {
+ iterator->orphan = true;
+ conn->child_dev = NULL;
+ /* No need to continue */
+ break;
+ }
+ }
+
+ /*
+ * Returning '0' ensures that all known component on the
+ * bus will be checked.
+ */
+ return 0;
+}
+
+static void coresight_remove_conns(struct coresight_device *csdev)
+{
+ bus_for_each_dev(&coresight_bustype, NULL,
+ csdev, coresight_remove_match);
+}
+
/**
* coresight_timeout - loop until a bit has changed to a specific state.
* @addr: base address of the area of interest.
@@ -713,13 +888,8 @@ EXPORT_SYMBOL_GPL(coresight_register);
void coresight_unregister(struct coresight_device *csdev)
{
- mutex_lock(&coresight_mutex);
-
- kfree(csdev->conns);
+ /* Remove references of that device in the topology */
+ coresight_remove_conns(csdev);
device_unregister(&csdev->dev);
-
- mutex_unlock(&coresight_mutex);
}
EXPORT_SYMBOL_GPL(coresight_unregister);
-
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/hwtracing/coresight/of_coresight.c b/drivers/hwtracing/coresight/of_coresight.c
index b0973617826f..b68da1888fd5 100644
--- a/drivers/hwtracing/coresight/of_coresight.c
+++ b/drivers/hwtracing/coresight/of_coresight.c
@@ -10,7 +10,6 @@
* GNU General Public License for more details.
*/
-#include <linux/module.h>
#include <linux/types.h>
#include <linux/err.h>
#include <linux/slab.h>
@@ -86,7 +85,7 @@ static int of_coresight_alloc_memory(struct device *dev,
return -ENOMEM;
/* Children connected to this component via @outports */
- pdata->child_names = devm_kzalloc(dev, pdata->nr_outport *
+ pdata->child_names = devm_kzalloc(dev, pdata->nr_outport *
sizeof(*pdata->child_names),
GFP_KERNEL);
if (!pdata->child_names)
diff --git a/drivers/hwtracing/intel_th/Kconfig b/drivers/hwtracing/intel_th/Kconfig
index b7a9073d968b..1b412f8a56b5 100644
--- a/drivers/hwtracing/intel_th/Kconfig
+++ b/drivers/hwtracing/intel_th/Kconfig
@@ -1,5 +1,6 @@
config INTEL_TH
tristate "Intel(R) Trace Hub controller"
+ depends on HAS_DMA && HAS_IOMEM
help
Intel(R) Trace Hub (TH) is a set of hardware blocks (subdevices) that
produce, switch and output trace data from multiple hardware and
diff --git a/drivers/hwtracing/intel_th/core.c b/drivers/hwtracing/intel_th/core.c
index 165d3001c301..4272f2ce5f6e 100644
--- a/drivers/hwtracing/intel_th/core.c
+++ b/drivers/hwtracing/intel_th/core.c
@@ -124,17 +124,34 @@ static struct device_type intel_th_source_device_type = {
.release = intel_th_device_release,
};
+static struct intel_th *to_intel_th(struct intel_th_device *thdev)
+{
+ /*
+ * subdevice tree is flat: if this one is not a switch, its
+ * parent must be
+ */
+ if (thdev->type != INTEL_TH_SWITCH)
+ thdev = to_intel_th_hub(thdev);
+
+ if (WARN_ON_ONCE(!thdev || thdev->type != INTEL_TH_SWITCH))
+ return NULL;
+
+ return dev_get_drvdata(thdev->dev.parent);
+}
+
static char *intel_th_output_devnode(struct device *dev, umode_t *mode,
kuid_t *uid, kgid_t *gid)
{
struct intel_th_device *thdev = to_intel_th_device(dev);
+ struct intel_th *th = to_intel_th(thdev);
char *node;
if (thdev->id >= 0)
- node = kasprintf(GFP_KERNEL, "intel_th%d/%s%d", 0, thdev->name,
- thdev->id);
+ node = kasprintf(GFP_KERNEL, "intel_th%d/%s%d", th->id,
+ thdev->name, thdev->id);
else
- node = kasprintf(GFP_KERNEL, "intel_th%d/%s", 0, thdev->name);
+ node = kasprintf(GFP_KERNEL, "intel_th%d/%s", th->id,
+ thdev->name);
return node;
}
@@ -319,6 +336,7 @@ static struct intel_th_subdevice {
unsigned nres;
unsigned type;
unsigned otype;
+ unsigned scrpd;
int id;
} intel_th_subdevices[TH_SUBDEVICE_MAX] = {
{
@@ -352,6 +370,7 @@ static struct intel_th_subdevice {
.id = 0,
.type = INTEL_TH_OUTPUT,
.otype = GTH_MSU,
+ .scrpd = SCRPD_MEM_IS_PRIM_DEST | SCRPD_MSC0_IS_ENABLED,
},
{
.nres = 2,
@@ -371,6 +390,7 @@ static struct intel_th_subdevice {
.id = 1,
.type = INTEL_TH_OUTPUT,
.otype = GTH_MSU,
+ .scrpd = SCRPD_MEM_IS_PRIM_DEST | SCRPD_MSC1_IS_ENABLED,
},
{
.nres = 2,
@@ -403,6 +423,7 @@ static struct intel_th_subdevice {
.name = "pti",
.type = INTEL_TH_OUTPUT,
.otype = GTH_PTI,
+ .scrpd = SCRPD_PTI_IS_PRIM_DEST,
},
{
.nres = 1,
@@ -477,6 +498,7 @@ static int intel_th_populate(struct intel_th *th, struct resource *devres,
thdev->dev.devt = MKDEV(th->major, i);
thdev->output.type = subdev->otype;
thdev->output.port = -1;
+ thdev->output.scratchpad = subdev->scrpd;
}
err = device_add(&thdev->dev);
@@ -579,6 +601,8 @@ intel_th_alloc(struct device *dev, struct resource *devres,
}
th->dev = dev;
+ dev_set_drvdata(dev, th);
+
err = intel_th_populate(th, devres, ndevres, irq);
if (err)
goto err_chrdev;
diff --git a/drivers/hwtracing/intel_th/gth.c b/drivers/hwtracing/intel_th/gth.c
index 2dc5378ccd3a..9beea0b54231 100644
--- a/drivers/hwtracing/intel_th/gth.c
+++ b/drivers/hwtracing/intel_th/gth.c
@@ -146,24 +146,6 @@ gth_master_set(struct gth_device *gth, unsigned int master, int port)
iowrite32(val, gth->base + reg);
}
-/*static int gth_master_get(struct gth_device *gth, unsigned int master)
-{
- unsigned int reg = REG_GTH_SWDEST0 + ((master >> 1) & ~3u);
- unsigned int shift = (master & 0x7) * 4;
- u32 val;
-
- if (master >= 256) {
- reg = REG_GTH_GSWTDEST;
- shift = 0;
- }
-
- val = ioread32(gth->base + reg);
- val &= (0xf << shift);
- val >>= shift;
-
- return val ? val & 0x7 : -1;
- }*/
-
static ssize_t master_attr_show(struct device *dev,
struct device_attribute *attr,
char *buf)
@@ -304,6 +286,10 @@ static int intel_th_gth_reset(struct gth_device *gth)
if (scratchpad & SCRPD_DEBUGGER_IN_USE)
return -EBUSY;
+ /* Always save/restore STH and TU registers in S0ix entry/exit */
+ scratchpad |= SCRPD_STH_IS_ENABLED | SCRPD_TRIGGER_IS_ENABLED;
+ iowrite32(scratchpad, gth->base + REG_GTH_SCRPD0);
+
/* output ports */
for (port = 0; port < 8; port++) {
if (gth_output_parm_get(gth, port, TH_OUTPUT_PARM(port)) ==
@@ -506,6 +492,10 @@ static void intel_th_gth_disable(struct intel_th_device *thdev,
if (!count)
dev_dbg(&thdev->dev, "timeout waiting for GTH[%d] PLE\n",
output->port);
+
+ reg = ioread32(gth->base + REG_GTH_SCRPD0);
+ reg &= ~output->scratchpad;
+ iowrite32(reg, gth->base + REG_GTH_SCRPD0);
}
/**
@@ -520,7 +510,7 @@ static void intel_th_gth_enable(struct intel_th_device *thdev,
struct intel_th_output *output)
{
struct gth_device *gth = dev_get_drvdata(&thdev->dev);
- u32 scr = 0xfc0000;
+ u32 scr = 0xfc0000, scrpd;
int master;
spin_lock(&gth->gth_lock);
@@ -535,6 +525,10 @@ static void intel_th_gth_enable(struct intel_th_device *thdev,
output->active = true;
spin_unlock(&gth->gth_lock);
+ scrpd = ioread32(gth->base + REG_GTH_SCRPD0);
+ scrpd |= output->scratchpad;
+ iowrite32(scrpd, gth->base + REG_GTH_SCRPD0);
+
iowrite32(scr, gth->base + REG_GTH_SCR);
iowrite32(0, gth->base + REG_GTH_SCR2);
}
diff --git a/drivers/hwtracing/intel_th/gth.h b/drivers/hwtracing/intel_th/gth.h
index 3b714b7a61db..56f0d2620577 100644
--- a/drivers/hwtracing/intel_th/gth.h
+++ b/drivers/hwtracing/intel_th/gth.h
@@ -57,9 +57,6 @@ enum {
REG_GTH_SCRPD3 = 0xec, /* ScratchPad[3] */
};
-/* Externall debugger is using Intel TH */
-#define SCRPD_DEBUGGER_IN_USE BIT(24)
-
/* waiting for Pipeline Empty bit(s) to assert for GTH */
#define GTH_PLE_WAITLOOP_DEPTH 10000
diff --git a/drivers/hwtracing/intel_th/intel_th.h b/drivers/hwtracing/intel_th/intel_th.h
index 57fd72b20fae..eedd09332db6 100644
--- a/drivers/hwtracing/intel_th/intel_th.h
+++ b/drivers/hwtracing/intel_th/intel_th.h
@@ -30,6 +30,7 @@ enum {
* struct intel_th_output - descriptor INTEL_TH_OUTPUT type devices
* @port: output port number, assigned by the switch
* @type: GTH_{MSU,CTP,PTI}
+ * @scratchpad: scratchpad bits to flag when this output is enabled
* @multiblock: true for multiblock output configuration
* @active: true when this output is enabled
*
@@ -41,6 +42,7 @@ enum {
struct intel_th_output {
int port;
unsigned int type;
+ unsigned int scratchpad;
bool multiblock;
bool active;
};
@@ -241,4 +243,43 @@ enum {
GTH_PTI = 4, /* MIPI-PTI */
};
+/*
+ * Scratchpad bits: tell firmware and external debuggers
+ * what we are up to.
+ */
+enum {
+ /* Memory is the primary destination */
+ SCRPD_MEM_IS_PRIM_DEST = BIT(0),
+ /* XHCI DbC is the primary destination */
+ SCRPD_DBC_IS_PRIM_DEST = BIT(1),
+ /* PTI is the primary destination */
+ SCRPD_PTI_IS_PRIM_DEST = BIT(2),
+ /* BSSB is the primary destination */
+ SCRPD_BSSB_IS_PRIM_DEST = BIT(3),
+ /* PTI is the alternate destination */
+ SCRPD_PTI_IS_ALT_DEST = BIT(4),
+ /* BSSB is the alternate destination */
+ SCRPD_BSSB_IS_ALT_DEST = BIT(5),
+ /* DeepSx exit occurred */
+ SCRPD_DEEPSX_EXIT = BIT(6),
+ /* S4 exit occurred */
+ SCRPD_S4_EXIT = BIT(7),
+ /* S5 exit occurred */
+ SCRPD_S5_EXIT = BIT(8),
+ /* MSU controller 0/1 is enabled */
+ SCRPD_MSC0_IS_ENABLED = BIT(9),
+ SCRPD_MSC1_IS_ENABLED = BIT(10),
+ /* Sx exit occurred */
+ SCRPD_SX_EXIT = BIT(11),
+ /* Trigger Unit is enabled */
+ SCRPD_TRIGGER_IS_ENABLED = BIT(12),
+ SCRPD_ODLA_IS_ENABLED = BIT(13),
+ SCRPD_SOCHAP_IS_ENABLED = BIT(14),
+ SCRPD_STH_IS_ENABLED = BIT(15),
+ SCRPD_DCIH_IS_ENABLED = BIT(16),
+ SCRPD_VER_IS_ENABLED = BIT(17),
+ /* External debugger is using Intel TH */
+ SCRPD_DEBUGGER_IN_USE = BIT(24),
+};
+
#endif
diff --git a/drivers/hwtracing/intel_th/msu.c b/drivers/hwtracing/intel_th/msu.c
index 70ca27e45602..d9d6022c5aca 100644
--- a/drivers/hwtracing/intel_th/msu.c
+++ b/drivers/hwtracing/intel_th/msu.c
@@ -408,7 +408,7 @@ msc_buffer_iterate(struct msc_iter *iter, size_t size, void *data,
* Second time (wrap_count==1), it's just like any other block,
* containing data in the range of [MSC_BDESC..data_bytes].
*/
- if (iter->block == iter->start_block && iter->wrap_count) {
+ if (iter->block == iter->start_block && iter->wrap_count == 2) {
tocopy = DATA_IN_PAGE - data_bytes;
src += data_bytes;
}
@@ -1112,12 +1112,11 @@ static ssize_t intel_th_msc_read(struct file *file, char __user *buf,
size = msc->nr_pages << PAGE_SHIFT;
if (!size)
- return 0;
+ goto put_count;
- if (off >= size) {
- len = 0;
+ if (off >= size)
goto put_count;
- }
+
if (off + len >= size)
len = size - off;
diff --git a/drivers/hwtracing/intel_th/pci.c b/drivers/hwtracing/intel_th/pci.c
index 641e87936064..bca7a2ac00d6 100644
--- a/drivers/hwtracing/intel_th/pci.c
+++ b/drivers/hwtracing/intel_th/pci.c
@@ -46,8 +46,6 @@ static int intel_th_pci_probe(struct pci_dev *pdev,
if (IS_ERR(th))
return PTR_ERR(th);
- pci_set_drvdata(pdev, th);
-
return 0;
}
@@ -67,6 +65,16 @@ static const struct pci_device_id intel_th_pci_id_table[] = {
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xa126),
.driver_data = (kernel_ulong_t)0,
},
+ {
+ /* Apollo Lake */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x5a8e),
+ .driver_data = (kernel_ulong_t)0,
+ },
+ {
+ /* Broxton */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0a80),
+ .driver_data = (kernel_ulong_t)0,
+ },
{ 0 },
};
diff --git a/drivers/hwtracing/intel_th/sth.c b/drivers/hwtracing/intel_th/sth.c
index 56101c33e10f..e1aee61dd7b3 100644
--- a/drivers/hwtracing/intel_th/sth.c
+++ b/drivers/hwtracing/intel_th/sth.c
@@ -94,10 +94,13 @@ static ssize_t sth_stm_packet(struct stm_data *stm_data, unsigned int master,
case STP_PACKET_TRIG:
if (flags & STP_PACKET_TIMESTAMPED)
reg += 4;
- iowrite8(*payload, sth->base + reg);
+ writeb_relaxed(*payload, sth->base + reg);
break;
case STP_PACKET_MERR:
+ if (size > 4)
+ size = 4;
+
sth_iowrite(&out->MERR, payload, size);
break;
@@ -107,8 +110,8 @@ static ssize_t sth_stm_packet(struct stm_data *stm_data, unsigned int master,
else
outp = (u64 __iomem *)&out->FLAG;
- size = 1;
- sth_iowrite(outp, payload, size);
+ size = 0;
+ writeb_relaxed(0, outp);
break;
case STP_PACKET_USER:
@@ -129,6 +132,8 @@ static ssize_t sth_stm_packet(struct stm_data *stm_data, unsigned int master,
sth_iowrite(outp, payload, size);
break;
+ default:
+ return -ENOTSUPP;
}
return size;
diff --git a/drivers/hwtracing/stm/Kconfig b/drivers/hwtracing/stm/Kconfig
index 83e9f591a54b..847a39b35307 100644
--- a/drivers/hwtracing/stm/Kconfig
+++ b/drivers/hwtracing/stm/Kconfig
@@ -1,6 +1,7 @@
config STM
tristate "System Trace Module devices"
select CONFIGFS_FS
+ select SRCU
help
A System Trace Module (STM) is a device exporting data in System
Trace Protocol (STP) format as defined by MIPI STP standards.
@@ -8,6 +9,8 @@ config STM
Say Y here to enable System Trace Module device support.
+if STM
+
config STM_DUMMY
tristate "Dummy STM driver"
help
@@ -24,3 +27,16 @@ config STM_SOURCE_CONSOLE
If you want to send kernel console messages over STM devices,
say Y.
+
+config STM_SOURCE_HEARTBEAT
+ tristate "Heartbeat over STM devices"
+ help
+ This is a kernel space trace source that sends periodic
+ heartbeat messages to trace hosts over STM devices. It is
+ also useful for testing stm class drivers and the stm class
+ framework itself.
+
+ If you want to send heartbeat messages over STM devices,
+ say Y.
+
+endif
diff --git a/drivers/hwtracing/stm/Makefile b/drivers/hwtracing/stm/Makefile
index f9312c38dd7a..a9ce3d487e57 100644
--- a/drivers/hwtracing/stm/Makefile
+++ b/drivers/hwtracing/stm/Makefile
@@ -5,5 +5,7 @@ stm_core-y := core.o policy.o
obj-$(CONFIG_STM_DUMMY) += dummy_stm.o
obj-$(CONFIG_STM_SOURCE_CONSOLE) += stm_console.o
+obj-$(CONFIG_STM_SOURCE_HEARTBEAT) += stm_heartbeat.o
stm_console-y := console.o
+stm_heartbeat-y := heartbeat.o
diff --git a/drivers/hwtracing/stm/core.c b/drivers/hwtracing/stm/core.c
index b6445d9e5453..de80d45d8df9 100644
--- a/drivers/hwtracing/stm/core.c
+++ b/drivers/hwtracing/stm/core.c
@@ -113,6 +113,7 @@ struct stm_device *stm_find_device(const char *buf)
stm = to_stm_device(dev);
if (!try_module_get(stm->owner)) {
+ /* matches class_find_device() above */
put_device(dev);
return NULL;
}
@@ -125,7 +126,7 @@ struct stm_device *stm_find_device(const char *buf)
* @stm: stm device, previously acquired by stm_find_device()
*
* This drops the module reference and device reference taken by
- * stm_find_device().
+ * stm_find_device() or stm_char_open().
*/
void stm_put_device(struct stm_device *stm)
{
@@ -185,6 +186,9 @@ static void stm_output_claim(struct stm_device *stm, struct stm_output *output)
{
struct stp_master *master = stm_master(stm, output->master);
+ lockdep_assert_held(&stm->mc_lock);
+ lockdep_assert_held(&output->lock);
+
if (WARN_ON_ONCE(master->nr_free < output->nr_chans))
return;
@@ -199,6 +203,9 @@ stm_output_disclaim(struct stm_device *stm, struct stm_output *output)
{
struct stp_master *master = stm_master(stm, output->master);
+ lockdep_assert_held(&stm->mc_lock);
+ lockdep_assert_held(&output->lock);
+
bitmap_release_region(&master->chan_map[0], output->channel,
ilog2(output->nr_chans));
@@ -233,7 +240,7 @@ static int find_free_channels(unsigned long *bitmap, unsigned int start,
return -1;
}
-static unsigned int
+static int
stm_find_master_chan(struct stm_device *stm, unsigned int width,
unsigned int *mstart, unsigned int mend,
unsigned int *cstart, unsigned int cend)
@@ -288,12 +295,13 @@ static int stm_output_assign(struct stm_device *stm, unsigned int width,
}
spin_lock(&stm->mc_lock);
+ spin_lock(&output->lock);
/* output is already assigned -- shouldn't happen */
if (WARN_ON_ONCE(output->nr_chans))
goto unlock;
ret = stm_find_master_chan(stm, width, &midx, mend, &cidx, cend);
- if (ret)
+ if (ret < 0)
goto unlock;
output->master = midx;
@@ -304,6 +312,7 @@ static int stm_output_assign(struct stm_device *stm, unsigned int width,
ret = 0;
unlock:
+ spin_unlock(&output->lock);
spin_unlock(&stm->mc_lock);
return ret;
@@ -312,11 +321,18 @@ unlock:
static void stm_output_free(struct stm_device *stm, struct stm_output *output)
{
spin_lock(&stm->mc_lock);
+ spin_lock(&output->lock);
if (output->nr_chans)
stm_output_disclaim(stm, output);
+ spin_unlock(&output->lock);
spin_unlock(&stm->mc_lock);
}
+static void stm_output_init(struct stm_output *output)
+{
+ spin_lock_init(&output->lock);
+}
+
static int major_match(struct device *dev, const void *data)
{
unsigned int major = *(unsigned int *)data;
@@ -339,6 +355,7 @@ static int stm_char_open(struct inode *inode, struct file *file)
if (!stmf)
return -ENOMEM;
+ stm_output_init(&stmf->output);
stmf->stm = to_stm_device(dev);
if (!try_module_get(stmf->stm->owner))
@@ -349,6 +366,8 @@ static int stm_char_open(struct inode *inode, struct file *file)
return nonseekable_open(inode, file);
err_free:
+ /* matches class_find_device() above */
+ put_device(dev);
kfree(stmf);
return err;
@@ -357,9 +376,19 @@ err_free:
static int stm_char_release(struct inode *inode, struct file *file)
{
struct stm_file *stmf = file->private_data;
+ struct stm_device *stm = stmf->stm;
+
+ if (stm->data->unlink)
+ stm->data->unlink(stm->data, stmf->output.master,
+ stmf->output.channel);
- stm_output_free(stmf->stm, &stmf->output);
- stm_put_device(stmf->stm);
+ stm_output_free(stm, &stmf->output);
+
+ /*
+ * matches the stm_char_open()'s
+ * class_find_device() + try_module_get()
+ */
+ stm_put_device(stm);
kfree(stmf);
return 0;
@@ -380,8 +409,8 @@ static int stm_file_assign(struct stm_file *stmf, char *id, unsigned int width)
return ret;
}
-static void stm_write(struct stm_data *data, unsigned int master,
- unsigned int channel, const char *buf, size_t count)
+static ssize_t stm_write(struct stm_data *data, unsigned int master,
+ unsigned int channel, const char *buf, size_t count)
{
unsigned int flags = STP_PACKET_TIMESTAMPED;
const unsigned char *p = buf, nil = 0;
@@ -393,9 +422,14 @@ static void stm_write(struct stm_data *data, unsigned int master,
sz = data->packet(data, master, channel, STP_PACKET_DATA, flags,
sz, p);
flags = 0;
+
+ if (sz < 0)
+ break;
}
data->packet(data, master, channel, STP_PACKET_FLAG, 0, 0, &nil);
+
+ return pos;
}
static ssize_t stm_char_write(struct file *file, const char __user *buf,
@@ -406,6 +440,9 @@ static ssize_t stm_char_write(struct file *file, const char __user *buf,
char *kbuf;
int err;
+ if (count + 1 > PAGE_SIZE)
+ count = PAGE_SIZE - 1;
+
/*
* if no m/c have been assigned to this writer up to this
* point, use "default" policy entry
@@ -430,8 +467,8 @@ static ssize_t stm_char_write(struct file *file, const char __user *buf,
return -EFAULT;
}
- stm_write(stm->data, stmf->output.master, stmf->output.channel, kbuf,
- count);
+ count = stm_write(stm->data, stmf->output.master, stmf->output.channel,
+ kbuf, count);
kfree(kbuf);
@@ -515,10 +552,8 @@ static int stm_char_policy_set_ioctl(struct stm_file *stmf, void __user *arg)
ret = stm->data->link(stm->data, stmf->output.master,
stmf->output.channel);
- if (ret) {
+ if (ret)
stm_output_free(stmf->stm, &stmf->output);
- stm_put_device(stmf->stm);
- }
err_free:
kfree(id);
@@ -618,7 +653,7 @@ int stm_register_device(struct device *parent, struct stm_data *stm_data,
if (!stm_data->packet || !stm_data->sw_nchannels)
return -EINVAL;
- nmasters = stm_data->sw_end - stm_data->sw_start;
+ nmasters = stm_data->sw_end - stm_data->sw_start + 1;
stm = kzalloc(sizeof(*stm) + nmasters * sizeof(void *), GFP_KERNEL);
if (!stm)
return -ENOMEM;
@@ -641,6 +676,7 @@ int stm_register_device(struct device *parent, struct stm_data *stm_data,
if (err)
goto err_device;
+ mutex_init(&stm->link_mutex);
spin_lock_init(&stm->link_lock);
INIT_LIST_HEAD(&stm->link_list);
@@ -654,6 +690,7 @@ int stm_register_device(struct device *parent, struct stm_data *stm_data,
return 0;
err_device:
+ /* matches device_initialize() above */
put_device(&stm->dev);
err_free:
kfree(stm);
@@ -662,20 +699,28 @@ err_free:
}
EXPORT_SYMBOL_GPL(stm_register_device);
-static void __stm_source_link_drop(struct stm_source_device *src,
- struct stm_device *stm);
+static int __stm_source_link_drop(struct stm_source_device *src,
+ struct stm_device *stm);
void stm_unregister_device(struct stm_data *stm_data)
{
struct stm_device *stm = stm_data->stm;
struct stm_source_device *src, *iter;
- int i;
+ int i, ret;
- spin_lock(&stm->link_lock);
+ mutex_lock(&stm->link_mutex);
list_for_each_entry_safe(src, iter, &stm->link_list, link_entry) {
- __stm_source_link_drop(src, stm);
+ ret = __stm_source_link_drop(src, stm);
+ /*
+ * src <-> stm link must not change under the same
+ * stm::link_mutex, so complain loudly if it has;
+ * also in this situation ret!=0 means this src is
+ * not connected to this stm and it should be otherwise
+ * safe to proceed with the tear-down of stm.
+ */
+ WARN_ON_ONCE(ret);
}
- spin_unlock(&stm->link_lock);
+ mutex_unlock(&stm->link_mutex);
synchronize_srcu(&stm_source_srcu);
@@ -686,7 +731,7 @@ void stm_unregister_device(struct stm_data *stm_data)
stp_policy_unbind(stm->policy);
mutex_unlock(&stm->policy_mutex);
- for (i = 0; i < stm->sw_nmasters; i++)
+ for (i = stm->data->sw_start; i <= stm->data->sw_end; i++)
stp_master_free(stm, i);
device_unregister(&stm->dev);
@@ -694,6 +739,17 @@ void stm_unregister_device(struct stm_data *stm_data)
}
EXPORT_SYMBOL_GPL(stm_unregister_device);
+/*
+ * stm::link_list access serialization uses a spinlock and a mutex; holding
+ * either of them guarantees that the list is stable; modification requires
+ * holding both of them.
+ *
+ * Lock ordering is as follows:
+ * stm::link_mutex
+ * stm::link_lock
+ * src::link_lock
+ */
+
/**
* stm_source_link_add() - connect an stm_source device to an stm device
* @src: stm_source device
@@ -710,6 +766,7 @@ static int stm_source_link_add(struct stm_source_device *src,
char *id;
int err;
+ mutex_lock(&stm->link_mutex);
spin_lock(&stm->link_lock);
spin_lock(&src->link_lock);
@@ -719,6 +776,7 @@ static int stm_source_link_add(struct stm_source_device *src,
spin_unlock(&src->link_lock);
spin_unlock(&stm->link_lock);
+ mutex_unlock(&stm->link_mutex);
id = kstrdup(src->data->name, GFP_KERNEL);
if (id) {
@@ -753,9 +811,9 @@ static int stm_source_link_add(struct stm_source_device *src,
fail_free_output:
stm_output_free(stm, &src->output);
- stm_put_device(stm);
fail_detach:
+ mutex_lock(&stm->link_mutex);
spin_lock(&stm->link_lock);
spin_lock(&src->link_lock);
@@ -764,6 +822,7 @@ fail_detach:
spin_unlock(&src->link_lock);
spin_unlock(&stm->link_lock);
+ mutex_unlock(&stm->link_mutex);
return err;
}
@@ -776,28 +835,55 @@ fail_detach:
* If @stm is @src::link, disconnect them from one another and put the
* reference on the @stm device.
*
- * Caller must hold stm::link_lock.
+ * Caller must hold stm::link_mutex.
*/
-static void __stm_source_link_drop(struct stm_source_device *src,
- struct stm_device *stm)
+static int __stm_source_link_drop(struct stm_source_device *src,
+ struct stm_device *stm)
{
struct stm_device *link;
+ int ret = 0;
+
+ lockdep_assert_held(&stm->link_mutex);
+ /* for stm::link_list modification, we hold both mutex and spinlock */
+ spin_lock(&stm->link_lock);
spin_lock(&src->link_lock);
link = srcu_dereference_check(src->link, &stm_source_srcu, 1);
- if (WARN_ON_ONCE(link != stm)) {
- spin_unlock(&src->link_lock);
- return;
+
+ /*
+ * The linked device may have changed since we last looked, because
+ * we weren't holding the src::link_lock back then; if this is the
+ * case, tell the caller to retry.
+ */
+ if (link != stm) {
+ ret = -EAGAIN;
+ goto unlock;
}
stm_output_free(link, &src->output);
- /* caller must hold stm::link_lock */
list_del_init(&src->link_entry);
/* matches stm_find_device() from stm_source_link_store() */
stm_put_device(link);
rcu_assign_pointer(src->link, NULL);
+unlock:
spin_unlock(&src->link_lock);
+ spin_unlock(&stm->link_lock);
+
+ /*
+ * Call the unlink callbacks for both source and stm, when we know
+ * that we have actually performed the unlinking.
+ */
+ if (!ret) {
+ if (src->data->unlink)
+ src->data->unlink(src->data);
+
+ if (stm->data->unlink)
+ stm->data->unlink(stm->data, src->output.master,
+ src->output.channel);
+ }
+
+ return ret;
}
/**
@@ -813,21 +899,29 @@ static void __stm_source_link_drop(struct stm_source_device *src,
static void stm_source_link_drop(struct stm_source_device *src)
{
struct stm_device *stm;
- int idx;
+ int idx, ret;
+retry:
idx = srcu_read_lock(&stm_source_srcu);
+ /*
+ * The stm device will be valid for the duration of this
+ * read section, but the link may change before we grab
+ * the src::link_lock in __stm_source_link_drop().
+ */
stm = srcu_dereference(src->link, &stm_source_srcu);
+ ret = 0;
if (stm) {
- if (src->data->unlink)
- src->data->unlink(src->data);
-
- spin_lock(&stm->link_lock);
- __stm_source_link_drop(src, stm);
- spin_unlock(&stm->link_lock);
+ mutex_lock(&stm->link_mutex);
+ ret = __stm_source_link_drop(src, stm);
+ mutex_unlock(&stm->link_mutex);
}
srcu_read_unlock(&stm_source_srcu, idx);
+
+ /* if it did change, retry */
+ if (ret == -EAGAIN)
+ goto retry;
}
static ssize_t stm_source_link_show(struct device *dev,
@@ -862,8 +956,10 @@ static ssize_t stm_source_link_store(struct device *dev,
return -EINVAL;
err = stm_source_link_add(src, link);
- if (err)
+ if (err) {
+ /* matches the stm_find_device() above */
stm_put_device(link);
+ }
return err ? : count;
}
@@ -925,6 +1021,7 @@ int stm_source_register_device(struct device *parent,
if (err)
goto err;
+ stm_output_init(&src->output);
spin_lock_init(&src->link_lock);
INIT_LIST_HEAD(&src->link_entry);
src->data = data;
@@ -973,9 +1070,9 @@ int stm_source_write(struct stm_source_data *data, unsigned int chan,
stm = srcu_dereference(src->link, &stm_source_srcu);
if (stm)
- stm_write(stm->data, src->output.master,
- src->output.channel + chan,
- buf, count);
+ count = stm_write(stm->data, src->output.master,
+ src->output.channel + chan,
+ buf, count);
else
count = -ENODEV;
diff --git a/drivers/hwtracing/stm/dummy_stm.c b/drivers/hwtracing/stm/dummy_stm.c
index 3709bef0b21f..310adf57e7a1 100644
--- a/drivers/hwtracing/stm/dummy_stm.c
+++ b/drivers/hwtracing/stm/dummy_stm.c
@@ -40,22 +40,75 @@ dummy_stm_packet(struct stm_data *stm_data, unsigned int master,
return size;
}
-static struct stm_data dummy_stm = {
- .name = "dummy_stm",
- .sw_start = 0x0000,
- .sw_end = 0xffff,
- .sw_nchannels = 0xffff,
- .packet = dummy_stm_packet,
-};
+#define DUMMY_STM_MAX 32
+
+static struct stm_data dummy_stm[DUMMY_STM_MAX];
+
+static int nr_dummies = 4;
+
+module_param(nr_dummies, int, 0600);
+
+static unsigned int dummy_stm_nr;
+
+static unsigned int fail_mode;
+
+module_param(fail_mode, int, 0600);
+
+static int dummy_stm_link(struct stm_data *data, unsigned int master,
+ unsigned int channel)
+{
+ if (fail_mode && (channel & fail_mode))
+ return -EINVAL;
+
+ return 0;
+}
static int dummy_stm_init(void)
{
- return stm_register_device(NULL, &dummy_stm, THIS_MODULE);
+ int i, ret = -ENOMEM, __nr_dummies = ACCESS_ONCE(nr_dummies);
+
+ if (__nr_dummies < 0 || __nr_dummies > DUMMY_STM_MAX)
+ return -EINVAL;
+
+ for (i = 0; i < __nr_dummies; i++) {
+ dummy_stm[i].name = kasprintf(GFP_KERNEL, "dummy_stm.%d", i);
+ if (!dummy_stm[i].name)
+ goto fail_unregister;
+
+ dummy_stm[i].sw_start = 0x0000;
+ dummy_stm[i].sw_end = 0xffff;
+ dummy_stm[i].sw_nchannels = 0xffff;
+ dummy_stm[i].packet = dummy_stm_packet;
+ dummy_stm[i].link = dummy_stm_link;
+
+ ret = stm_register_device(NULL, &dummy_stm[i], THIS_MODULE);
+ if (ret)
+ goto fail_free;
+ }
+
+ dummy_stm_nr = __nr_dummies;
+
+ return 0;
+
+fail_unregister:
+ for (i--; i >= 0; i--) {
+ stm_unregister_device(&dummy_stm[i]);
+fail_free:
+ kfree(dummy_stm[i].name);
+ }
+
+ return ret;
+
}
static void dummy_stm_exit(void)
{
- stm_unregister_device(&dummy_stm);
+ int i;
+
+ for (i = 0; i < dummy_stm_nr; i++) {
+ stm_unregister_device(&dummy_stm[i]);
+ kfree(dummy_stm[i].name);
+ }
}
module_init(dummy_stm_init);
diff --git a/drivers/hwtracing/stm/heartbeat.c b/drivers/hwtracing/stm/heartbeat.c
new file mode 100644
index 000000000000..0133571b506f
--- /dev/null
+++ b/drivers/hwtracing/stm/heartbeat.c
@@ -0,0 +1,130 @@
+/*
+ * Simple heartbeat STM source driver
+ * Copyright (c) 2016, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * Heartbeat STM source will send repetitive messages over STM devices to a
+ * trace host.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/hrtimer.h>
+#include <linux/slab.h>
+#include <linux/stm.h>
+
+#define STM_HEARTBEAT_MAX 32
+
+static int nr_devs = 4;
+static int interval_ms = 10;
+
+module_param(nr_devs, int, 0600);
+module_param(interval_ms, int, 0600);
+
+static struct stm_heartbeat {
+ struct stm_source_data data;
+ struct hrtimer hrtimer;
+ unsigned int active;
+} stm_heartbeat[STM_HEARTBEAT_MAX];
+
+static unsigned int nr_instances;
+
+static const char str[] = "heartbeat stm source driver is here to serve you";
+
+static enum hrtimer_restart stm_heartbeat_hrtimer_handler(struct hrtimer *hr)
+{
+ struct stm_heartbeat *heartbeat = container_of(hr, struct stm_heartbeat,
+ hrtimer);
+
+ stm_source_write(&heartbeat->data, 0, str, sizeof str);
+ if (heartbeat->active)
+ hrtimer_forward_now(hr, ms_to_ktime(interval_ms));
+
+ return heartbeat->active ? HRTIMER_RESTART : HRTIMER_NORESTART;
+}
+
+static int stm_heartbeat_link(struct stm_source_data *data)
+{
+ struct stm_heartbeat *heartbeat =
+ container_of(data, struct stm_heartbeat, data);
+
+ heartbeat->active = 1;
+ hrtimer_start(&heartbeat->hrtimer, ms_to_ktime(interval_ms),
+ HRTIMER_MODE_ABS);
+
+ return 0;
+}
+
+static void stm_heartbeat_unlink(struct stm_source_data *data)
+{
+ struct stm_heartbeat *heartbeat =
+ container_of(data, struct stm_heartbeat, data);
+
+ heartbeat->active = 0;
+ hrtimer_cancel(&heartbeat->hrtimer);
+}
+
+static int stm_heartbeat_init(void)
+{
+ int i, ret = -ENOMEM, __nr_instances = ACCESS_ONCE(nr_devs);
+
+ if (__nr_instances < 0 || __nr_instances > STM_HEARTBEAT_MAX)
+ return -EINVAL;
+
+ for (i = 0; i < __nr_instances; i++) {
+ stm_heartbeat[i].data.name =
+ kasprintf(GFP_KERNEL, "heartbeat.%d", i);
+ if (!stm_heartbeat[i].data.name)
+ goto fail_unregister;
+
+ stm_heartbeat[i].data.nr_chans = 1;
+ stm_heartbeat[i].data.link = stm_heartbeat_link;
+ stm_heartbeat[i].data.unlink = stm_heartbeat_unlink;
+ hrtimer_init(&stm_heartbeat[i].hrtimer, CLOCK_MONOTONIC,
+ HRTIMER_MODE_ABS);
+ stm_heartbeat[i].hrtimer.function =
+ stm_heartbeat_hrtimer_handler;
+
+ ret = stm_source_register_device(NULL, &stm_heartbeat[i].data);
+ if (ret)
+ goto fail_free;
+ }
+
+ nr_instances = __nr_instances;
+
+ return 0;
+
+fail_unregister:
+ for (i--; i >= 0; i--) {
+ stm_source_unregister_device(&stm_heartbeat[i].data);
+fail_free:
+ kfree(stm_heartbeat[i].data.name);
+ }
+
+ return ret;
+}
+
+static void stm_heartbeat_exit(void)
+{
+ int i;
+
+ for (i = 0; i < nr_instances; i++) {
+ stm_source_unregister_device(&stm_heartbeat[i].data);
+ kfree(stm_heartbeat[i].data.name);
+ }
+}
+
+module_init(stm_heartbeat_init);
+module_exit(stm_heartbeat_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("stm_heartbeat driver");
+MODULE_AUTHOR("Alexander Shishkin <alexander.shishkin@linux.intel.com>");
diff --git a/drivers/hwtracing/stm/policy.c b/drivers/hwtracing/stm/policy.c
index 11ab6d01adf6..1db189657b2b 100644
--- a/drivers/hwtracing/stm/policy.c
+++ b/drivers/hwtracing/stm/policy.c
@@ -272,13 +272,17 @@ void stp_policy_unbind(struct stp_policy *policy)
{
struct stm_device *stm = policy->stm;
+ /*
+ * stp_policy_release() will not call here if the policy is already
+ * unbound; other users should not either, as no link exists between
+ * this policy and anything else in that case
+ */
if (WARN_ON_ONCE(!policy->stm))
return;
- mutex_lock(&stm->policy_mutex);
- stm->policy = NULL;
- mutex_unlock(&stm->policy_mutex);
+ lockdep_assert_held(&stm->policy_mutex);
+ stm->policy = NULL;
policy->stm = NULL;
stm_put_device(stm);
@@ -287,8 +291,16 @@ void stp_policy_unbind(struct stp_policy *policy)
static void stp_policy_release(struct config_item *item)
{
struct stp_policy *policy = to_stp_policy(item);
+ struct stm_device *stm = policy->stm;
+ /* a policy *can* be unbound and still exist in configfs tree */
+ if (!stm)
+ return;
+
+ mutex_lock(&stm->policy_mutex);
stp_policy_unbind(policy);
+ mutex_unlock(&stm->policy_mutex);
+
kfree(policy);
}
@@ -320,10 +332,11 @@ stp_policies_make(struct config_group *group, const char *name)
/*
* node must look like <device_name>.<policy_name>, where
- * <device_name> is the name of an existing stm device and
- * <policy_name> is an arbitrary string
+ * <device_name> is the name of an existing stm device; may
+ * contain dots;
+ * <policy_name> is an arbitrary string; may not contain dots
*/
- p = strchr(devname, '.');
+ p = strrchr(devname, '.');
if (!p) {
kfree(devname);
return ERR_PTR(-EINVAL);
diff --git a/drivers/hwtracing/stm/stm.h b/drivers/hwtracing/stm/stm.h
index 95ece0292c99..4e8c6926260f 100644
--- a/drivers/hwtracing/stm/stm.h
+++ b/drivers/hwtracing/stm/stm.h
@@ -45,6 +45,7 @@ struct stm_device {
int major;
unsigned int sw_nmasters;
struct stm_data *data;
+ struct mutex link_mutex;
spinlock_t link_lock;
struct list_head link_list;
/* master allocation */
@@ -56,6 +57,7 @@ struct stm_device {
container_of((_d), struct stm_device, dev)
struct stm_output {
+ spinlock_t lock;
unsigned int master;
unsigned int channel;
unsigned int nr_chans;
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index 0299dfa746a3..0967e1a5b3a2 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -789,7 +789,7 @@ config I2C_QUP
config I2C_RIIC
tristate "Renesas RIIC adapter"
- depends on ARCH_SHMOBILE || COMPILE_TEST
+ depends on ARCH_RENESAS || COMPILE_TEST
help
If you say yes to this option, support will be included for the
Renesas RIIC I2C interface.
@@ -833,7 +833,7 @@ config I2C_SH7760
config I2C_SH_MOBILE
tristate "SuperH Mobile I2C Controller"
depends on HAS_DMA
- depends on SUPERH || ARCH_SHMOBILE || COMPILE_TEST
+ depends on SUPERH || ARCH_RENESAS || COMPILE_TEST
help
If you say yes to this option, support will be included for the
built-in I2C interface on the Renesas SH-Mobile processor.
@@ -908,7 +908,7 @@ config I2C_TEGRA
config I2C_UNIPHIER
tristate "UniPhier FIFO-less I2C controller"
- depends on ARCH_UNIPHIER
+ depends on ARCH_UNIPHIER || COMPILE_TEST
help
If you say yes to this option, support will be included for
the UniPhier FIFO-less I2C interface embedded in PH1-LD4, PH1-sLD8,
@@ -916,7 +916,7 @@ config I2C_UNIPHIER
config I2C_UNIPHIER_F
tristate "UniPhier FIFO-builtin I2C controller"
- depends on ARCH_UNIPHIER
+ depends on ARCH_UNIPHIER || COMPILE_TEST
help
If you say yes to this option, support will be included for
the UniPhier FIFO-builtin I2C interface embedded in PH1-Pro4,
@@ -975,17 +975,17 @@ config I2C_XLR
config I2C_XLP9XX
tristate "XLP9XX I2C support"
- depends on CPU_XLP || COMPILE_TEST
+ depends on CPU_XLP || ARCH_VULCAN || COMPILE_TEST
help
This driver enables support for the on-chip I2C interface of
- the Broadcom XLP9xx/XLP5xx MIPS processors.
+ the Broadcom XLP9xx/XLP5xx MIPS and Vulcan ARM64 processors.
This driver can also be built as a module. If so, the module will
be called i2c-xlp9xx.
config I2C_RCAR
tristate "Renesas R-Car I2C Controller"
- depends on ARCH_SHMOBILE || COMPILE_TEST
+ depends on ARCH_RENESAS || COMPILE_TEST
select I2C_SLAVE
help
If you say yes to this option, support will be included for the
diff --git a/drivers/i2c/busses/i2c-bcm-iproc.c b/drivers/i2c/busses/i2c-bcm-iproc.c
index 0419f5284609..b9f0fff4e723 100644
--- a/drivers/i2c/busses/i2c-bcm-iproc.c
+++ b/drivers/i2c/busses/i2c-bcm-iproc.c
@@ -58,11 +58,13 @@
#define IE_M_RX_FIFO_FULL_SHIFT 31
#define IE_M_RX_THLD_SHIFT 30
#define IE_M_START_BUSY_SHIFT 28
+#define IE_M_TX_UNDERRUN_SHIFT 27
#define IS_OFFSET 0x3c
#define IS_M_RX_FIFO_FULL_SHIFT 31
#define IS_M_RX_THLD_SHIFT 30
#define IS_M_START_BUSY_SHIFT 28
+#define IS_M_TX_UNDERRUN_SHIFT 27
#define M_TX_OFFSET 0x40
#define M_TX_WR_STATUS_SHIFT 31
@@ -76,7 +78,7 @@
#define M_RX_DATA_SHIFT 0
#define M_RX_DATA_MASK 0xff
-#define I2C_TIMEOUT_MESC 100
+#define I2C_TIMEOUT_MSEC 50000
#define M_TX_RX_FIFO_SIZE 64
enum bus_speed_index {
@@ -95,12 +97,17 @@ struct bcm_iproc_i2c_dev {
struct completion done;
int xfer_is_done;
+
+ struct i2c_msg *msg;
+
+ /* bytes that have been transferred */
+ unsigned int tx_bytes;
};
/*
* Can be expanded in the future if more interrupt status bits are utilized
*/
-#define ISR_MASK (1 << IS_M_START_BUSY_SHIFT)
+#define ISR_MASK (BIT(IS_M_START_BUSY_SHIFT) | BIT(IS_M_TX_UNDERRUN_SHIFT))
static irqreturn_t bcm_iproc_i2c_isr(int irq, void *data)
{
@@ -112,13 +119,95 @@ static irqreturn_t bcm_iproc_i2c_isr(int irq, void *data)
if (!status)
return IRQ_NONE;
+ /* TX FIFO is empty and we have more data to send */
+ if (status & BIT(IS_M_TX_UNDERRUN_SHIFT)) {
+ struct i2c_msg *msg = iproc_i2c->msg;
+ unsigned int tx_bytes = msg->len - iproc_i2c->tx_bytes;
+ unsigned int i;
+ u32 val;
+
+ /* can only fill up to the FIFO size */
+ tx_bytes = min_t(unsigned int, tx_bytes, M_TX_RX_FIFO_SIZE);
+ for (i = 0; i < tx_bytes; i++) {
+ /* start from where we left over */
+ unsigned int idx = iproc_i2c->tx_bytes + i;
+
+ val = msg->buf[idx];
+
+ /* mark the last byte */
+ if (idx == msg->len - 1) {
+ u32 tmp;
+
+ val |= BIT(M_TX_WR_STATUS_SHIFT);
+
+ /*
+ * Since this is the last byte, we should
+ * now disable TX FIFO underrun interrupt
+ */
+ tmp = readl(iproc_i2c->base + IE_OFFSET);
+ tmp &= ~BIT(IE_M_TX_UNDERRUN_SHIFT);
+ writel(tmp, iproc_i2c->base + IE_OFFSET);
+ }
+
+ /* load data into TX FIFO */
+ writel(val, iproc_i2c->base + M_TX_OFFSET);
+ }
+ /* update number of transferred bytes */
+ iproc_i2c->tx_bytes += tx_bytes;
+ }
+
+ if (status & BIT(IS_M_START_BUSY_SHIFT)) {
+ iproc_i2c->xfer_is_done = 1;
+ complete_all(&iproc_i2c->done);
+ }
+
writel(status, iproc_i2c->base + IS_OFFSET);
- iproc_i2c->xfer_is_done = 1;
- complete_all(&iproc_i2c->done);
return IRQ_HANDLED;
}
+static int bcm_iproc_i2c_init(struct bcm_iproc_i2c_dev *iproc_i2c)
+{
+ u32 val;
+
+ /* put controller in reset */
+ val = readl(iproc_i2c->base + CFG_OFFSET);
+ val |= 1 << CFG_RESET_SHIFT;
+ val &= ~(1 << CFG_EN_SHIFT);
+ writel(val, iproc_i2c->base + CFG_OFFSET);
+
+ /* wait 100 usec per spec */
+ udelay(100);
+
+ /* bring controller out of reset */
+ val &= ~(1 << CFG_RESET_SHIFT);
+ writel(val, iproc_i2c->base + CFG_OFFSET);
+
+ /* flush TX/RX FIFOs and set RX FIFO threshold to zero */
+ val = (1 << M_FIFO_RX_FLUSH_SHIFT) | (1 << M_FIFO_TX_FLUSH_SHIFT);
+ writel(val, iproc_i2c->base + M_FIFO_CTRL_OFFSET);
+ /* disable all interrupts */
+ writel(0, iproc_i2c->base + IE_OFFSET);
+
+ /* clear all pending interrupts */
+ writel(0xffffffff, iproc_i2c->base + IS_OFFSET);
+
+ return 0;
+}
+
+static void bcm_iproc_i2c_enable_disable(struct bcm_iproc_i2c_dev *iproc_i2c,
+ bool enable)
+{
+ u32 val;
+
+ val = readl(iproc_i2c->base + CFG_OFFSET);
+ if (enable)
+ val |= BIT(CFG_EN_SHIFT);
+ else
+ val &= ~BIT(CFG_EN_SHIFT);
+ writel(val, iproc_i2c->base + CFG_OFFSET);
+}
+
static int bcm_iproc_i2c_check_status(struct bcm_iproc_i2c_dev *iproc_i2c,
struct i2c_msg *msg)
{
@@ -149,6 +238,12 @@ static int bcm_iproc_i2c_check_status(struct bcm_iproc_i2c_dev *iproc_i2c,
default:
dev_dbg(iproc_i2c->device, "unknown error code=%d\n", val);
+
+ /* re-initialize i2c for recovery */
+ bcm_iproc_i2c_enable_disable(iproc_i2c, false);
+ bcm_iproc_i2c_init(iproc_i2c);
+ bcm_iproc_i2c_enable_disable(iproc_i2c, true);
+
return -EIO;
}
}
@@ -159,7 +254,8 @@ static int bcm_iproc_i2c_xfer_single_msg(struct bcm_iproc_i2c_dev *iproc_i2c,
int ret, i;
u8 addr;
u32 val;
- unsigned long time_left = msecs_to_jiffies(I2C_TIMEOUT_MESC);
+ unsigned int tx_bytes;
+ unsigned long time_left = msecs_to_jiffies(I2C_TIMEOUT_MSEC);
/* check if bus is busy */
if (!!(readl(iproc_i2c->base + M_CMD_OFFSET) &
@@ -168,13 +264,20 @@ static int bcm_iproc_i2c_xfer_single_msg(struct bcm_iproc_i2c_dev *iproc_i2c,
return -EBUSY;
}
+ iproc_i2c->msg = msg;
+
/* format and load slave address into the TX FIFO */
addr = msg->addr << 1 | (msg->flags & I2C_M_RD ? 1 : 0);
writel(addr, iproc_i2c->base + M_TX_OFFSET);
- /* for a write transaction, load data into the TX FIFO */
+ /*
+ * For a write transaction, load data into the TX FIFO. Only allow
+ * loading up to TX FIFO size - 1 bytes of data since the first byte
+ * has been used up by the slave address
+ */
+ tx_bytes = min_t(unsigned int, msg->len, M_TX_RX_FIFO_SIZE - 1);
if (!(msg->flags & I2C_M_RD)) {
- for (i = 0; i < msg->len; i++) {
+ for (i = 0; i < tx_bytes; i++) {
val = msg->buf[i];
/* mark the last byte */
@@ -183,6 +286,7 @@ static int bcm_iproc_i2c_xfer_single_msg(struct bcm_iproc_i2c_dev *iproc_i2c,
writel(val, iproc_i2c->base + M_TX_OFFSET);
}
+ iproc_i2c->tx_bytes = tx_bytes;
}
/* mark as incomplete before starting the transaction */
@@ -194,13 +298,24 @@ static int bcm_iproc_i2c_xfer_single_msg(struct bcm_iproc_i2c_dev *iproc_i2c,
* transaction is done, i.e., the internal start_busy bit, transitions
* from 1 to 0.
*/
- writel(1 << IE_M_START_BUSY_SHIFT, iproc_i2c->base + IE_OFFSET);
+ val = BIT(IE_M_START_BUSY_SHIFT);
+
+ /*
+ * If TX data size is larger than the TX FIFO, need to enable TX
+ * underrun interrupt, which will be triggerred when the TX FIFO is
+ * empty. When that happens we can then pump more data into the FIFO
+ */
+ if (!(msg->flags & I2C_M_RD) &&
+ msg->len > iproc_i2c->tx_bytes)
+ val |= BIT(IE_M_TX_UNDERRUN_SHIFT);
+
+ writel(val, iproc_i2c->base + IE_OFFSET);
/*
* Now we can activate the transfer. For a read operation, specify the
* number of bytes to read
*/
- val = 1 << M_CMD_START_BUSY_SHIFT;
+ val = BIT(M_CMD_START_BUSY_SHIFT);
if (msg->flags & I2C_M_RD) {
val |= (M_CMD_PROTOCOL_BLK_RD << M_CMD_PROTOCOL_SHIFT) |
(msg->len << M_CMD_RD_CNT_SHIFT);
@@ -283,7 +398,6 @@ static const struct i2c_algorithm bcm_iproc_algo = {
static struct i2c_adapter_quirks bcm_iproc_i2c_quirks = {
/* need to reserve one byte in the FIFO for the slave address */
.max_read_len = M_TX_RX_FIFO_SIZE - 1,
- .max_write_len = M_TX_RX_FIFO_SIZE - 1,
};
static int bcm_iproc_i2c_cfg_speed(struct bcm_iproc_i2c_dev *iproc_i2c)
@@ -321,49 +435,6 @@ static int bcm_iproc_i2c_cfg_speed(struct bcm_iproc_i2c_dev *iproc_i2c)
return 0;
}
-static int bcm_iproc_i2c_init(struct bcm_iproc_i2c_dev *iproc_i2c)
-{
- u32 val;
-
- /* put controller in reset */
- val = readl(iproc_i2c->base + CFG_OFFSET);
- val |= 1 << CFG_RESET_SHIFT;
- val &= ~(1 << CFG_EN_SHIFT);
- writel(val, iproc_i2c->base + CFG_OFFSET);
-
- /* wait 100 usec per spec */
- udelay(100);
-
- /* bring controller out of reset */
- val &= ~(1 << CFG_RESET_SHIFT);
- writel(val, iproc_i2c->base + CFG_OFFSET);
-
- /* flush TX/RX FIFOs and set RX FIFO threshold to zero */
- val = (1 << M_FIFO_RX_FLUSH_SHIFT) | (1 << M_FIFO_TX_FLUSH_SHIFT);
- writel(val, iproc_i2c->base + M_FIFO_CTRL_OFFSET);
-
- /* disable all interrupts */
- writel(0, iproc_i2c->base + IE_OFFSET);
-
- /* clear all pending interrupts */
- writel(0xffffffff, iproc_i2c->base + IS_OFFSET);
-
- return 0;
-}
-
-static void bcm_iproc_i2c_enable_disable(struct bcm_iproc_i2c_dev *iproc_i2c,
- bool enable)
-{
- u32 val;
-
- val = readl(iproc_i2c->base + CFG_OFFSET);
- if (enable)
- val |= BIT(CFG_EN_SHIFT);
- else
- val &= ~BIT(CFG_EN_SHIFT);
- writel(val, iproc_i2c->base + CFG_OFFSET);
-}
-
static int bcm_iproc_i2c_probe(struct platform_device *pdev)
{
int irq, ret = 0;
diff --git a/drivers/i2c/busses/i2c-cadence.c b/drivers/i2c/busses/i2c-cadence.c
index 6b08d1607b7a..90bbd9f9dd8f 100644
--- a/drivers/i2c/busses/i2c-cadence.c
+++ b/drivers/i2c/busses/i2c-cadence.c
@@ -124,6 +124,8 @@
/**
* struct cdns_i2c - I2C device private data structure
+ *
+ * @dev: Pointer to device structure
* @membase: Base address of the I2C device
* @adap: I2C adapter instance
* @p_msg: Message pointer
@@ -171,7 +173,7 @@ struct cdns_platform_data {
clk_rate_change_nb)
/**
- * cdns_i2c_clear_bus_hold() - Clear bus hold bit
+ * cdns_i2c_clear_bus_hold - Clear bus hold bit
* @id: Pointer to driver data struct
*
* Helper to clear the controller's bus hold bit.
@@ -815,8 +817,8 @@ static int cdns_i2c_clk_notifier_cb(struct notifier_block *nb, unsigned long
}
/**
- * cdns_i2c_suspend - Suspend method for the driver
- * @_dev: Address of the platform_device structure
+ * cdns_i2c_runtime_suspend - Runtime suspend method for the driver
+ * @dev: Address of the platform_device structure
*
* Put the driver into low power mode.
*
@@ -833,10 +835,10 @@ static int __maybe_unused cdns_i2c_runtime_suspend(struct device *dev)
}
/**
- * cdns_i2c_resume - Resume from suspend
- * @_dev: Address of the platform_device structure
+ * cdns_i2c_runtime_resume - Runtime resume
+ * @dev: Address of the platform_device structure
*
- * Resume operation after suspend.
+ * Runtime resume callback.
*
* Return: 0 on success and error value on error
*/
diff --git a/drivers/i2c/busses/i2c-cpm.c b/drivers/i2c/busses/i2c-cpm.c
index 714bdc837769..b167ab25310a 100644
--- a/drivers/i2c/busses/i2c-cpm.c
+++ b/drivers/i2c/busses/i2c-cpm.c
@@ -116,8 +116,8 @@ struct cpm_i2c {
cbd_t __iomem *rbase;
u_char *txbuf[CPM_MAXBD];
u_char *rxbuf[CPM_MAXBD];
- u32 txdma[CPM_MAXBD];
- u32 rxdma[CPM_MAXBD];
+ dma_addr_t txdma[CPM_MAXBD];
+ dma_addr_t rxdma[CPM_MAXBD];
};
static irqreturn_t cpm_i2c_interrupt(int irq, void *dev_id)
diff --git a/drivers/i2c/busses/i2c-designware-baytrail.c b/drivers/i2c/busses/i2c-designware-baytrail.c
index e38c2bbba940..1590ad0a8081 100644
--- a/drivers/i2c/busses/i2c-designware-baytrail.c
+++ b/drivers/i2c/busses/i2c-designware-baytrail.c
@@ -11,7 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*/
-#include <linux/module.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/acpi.h>
@@ -151,7 +150,3 @@ int i2c_dw_eval_lock_support(struct dw_i2c_dev *dev)
return 0;
}
-
-MODULE_AUTHOR("David E. Box <david.e.box@linux.intel.com>");
-MODULE_DESCRIPTION("Baytrail I2C Semaphore driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/i2c/busses/i2c-designware-core.c b/drivers/i2c/busses/i2c-designware-core.c
index 10fbd6d841e0..99b54be6ba73 100644
--- a/drivers/i2c/busses/i2c-designware-core.c
+++ b/drivers/i2c/busses/i2c-designware-core.c
@@ -634,7 +634,6 @@ i2c_dw_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
dev_dbg(dev->dev, "%s: msgs: %d\n", __func__, num);
- mutex_lock(&dev->lock);
pm_runtime_get_sync(dev->dev);
reinit_completion(&dev->cmd_complete);
@@ -673,11 +672,12 @@ i2c_dw_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
}
/*
- * We must disable the adapter before unlocking the &dev->lock mutex
- * below. Otherwise the hardware might continue generating interrupts
- * which in turn causes a race condition with the following transfer.
- * Needs some more investigation if the additional interrupts are
- * a hardware bug or this driver doesn't handle them correctly yet.
+ * We must disable the adapter before returning and signaling the end
+ * of the current transfer. Otherwise the hardware might continue
+ * generating interrupts which in turn causes a race condition with
+ * the following transfer. Needs some more investigation if the
+ * additional interrupts are a hardware bug or this driver doesn't
+ * handle them correctly yet.
*/
__i2c_dw_enable(dev, false);
@@ -706,7 +706,6 @@ done:
done_nolock:
pm_runtime_mark_last_busy(dev->dev);
pm_runtime_put_autosuspend(dev->dev);
- mutex_unlock(&dev->lock);
return ret;
}
@@ -860,7 +859,6 @@ int i2c_dw_probe(struct dw_i2c_dev *dev)
int r;
init_completion(&dev->cmd_complete);
- mutex_init(&dev->lock);
r = i2c_dw_init(dev);
if (r)
@@ -883,9 +881,17 @@ int i2c_dw_probe(struct dw_i2c_dev *dev)
return r;
}
+ /*
+ * Increment PM usage count during adapter registration in order to
+ * avoid possible spurious runtime suspend when adapter device is
+ * registered to the device core and immediate resume in case bus has
+ * registered I2C slaves that do I2C transfers in their probe.
+ */
+ pm_runtime_get_noresume(dev->dev);
r = i2c_add_numbered_adapter(adap);
if (r)
dev_err(dev->dev, "failure adding adapter: %d\n", r);
+ pm_runtime_put_noidle(dev->dev);
return r;
}
diff --git a/drivers/i2c/busses/i2c-designware-core.h b/drivers/i2c/busses/i2c-designware-core.h
index 9ffb63a60f95..cd409e7fbc71 100644
--- a/drivers/i2c/busses/i2c-designware-core.h
+++ b/drivers/i2c/busses/i2c-designware-core.h
@@ -36,7 +36,6 @@
* @dev: driver model device node
* @base: IO registers pointer
* @cmd_complete: tx completion indicator
- * @lock: protect this struct and IO registers
* @clk: input reference clock
* @cmd_err: run time hadware error code
* @msgs: points to an array of messages currently being transfered
@@ -73,7 +72,6 @@ struct dw_i2c_dev {
struct device *dev;
void __iomem *base;
struct completion cmd_complete;
- struct mutex lock;
struct clk *clk;
u32 (*get_clk_rate_khz) (struct dw_i2c_dev *dev);
struct dw_pci_controller *controller;
diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c
index 438f1b4964c0..d656657b805c 100644
--- a/drivers/i2c/busses/i2c-designware-platdrv.c
+++ b/drivers/i2c/busses/i2c-designware-platdrv.c
@@ -123,6 +123,7 @@ static const struct acpi_device_id dw_i2c_acpi_match[] = {
{ "80860F41", 0 },
{ "808622C1", 0 },
{ "AMD0010", ACCESS_INTR_MASK },
+ { "AMDI0010", ACCESS_INTR_MASK },
{ "AMDI0510", 0 },
{ "APMC0D0F", 0 },
{ }
diff --git a/drivers/i2c/busses/i2c-exynos5.c b/drivers/i2c/busses/i2c-exynos5.c
index b29c7500461a..f54ece8fce78 100644
--- a/drivers/i2c/busses/i2c-exynos5.c
+++ b/drivers/i2c/busses/i2c-exynos5.c
@@ -671,7 +671,9 @@ static int exynos5_i2c_xfer(struct i2c_adapter *adap,
return -EIO;
}
- clk_prepare_enable(i2c->clk);
+ ret = clk_enable(i2c->clk);
+ if (ret)
+ return ret;
for (i = 0; i < num; i++, msgs++) {
stop = (i == num - 1);
@@ -695,7 +697,7 @@ static int exynos5_i2c_xfer(struct i2c_adapter *adap,
}
out:
- clk_disable_unprepare(i2c->clk);
+ clk_disable(i2c->clk);
return ret;
}
@@ -747,7 +749,9 @@ static int exynos5_i2c_probe(struct platform_device *pdev)
return -ENOENT;
}
- clk_prepare_enable(i2c->clk);
+ ret = clk_prepare_enable(i2c->clk);
+ if (ret)
+ return ret;
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
i2c->regs = devm_ioremap_resource(&pdev->dev, mem);
@@ -799,6 +803,10 @@ static int exynos5_i2c_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, i2c);
+ clk_disable(i2c->clk);
+
+ return 0;
+
err_clk:
clk_disable_unprepare(i2c->clk);
return ret;
@@ -810,6 +818,8 @@ static int exynos5_i2c_remove(struct platform_device *pdev)
i2c_del_adapter(&i2c->adap);
+ clk_unprepare(i2c->clk);
+
return 0;
}
@@ -821,6 +831,8 @@ static int exynos5_i2c_suspend_noirq(struct device *dev)
i2c->suspended = 1;
+ clk_unprepare(i2c->clk);
+
return 0;
}
@@ -830,7 +842,9 @@ static int exynos5_i2c_resume_noirq(struct device *dev)
struct exynos5_i2c *i2c = platform_get_drvdata(pdev);
int ret = 0;
- clk_prepare_enable(i2c->clk);
+ ret = clk_prepare_enable(i2c->clk);
+ if (ret)
+ return ret;
ret = exynos5_hsi2c_clock_setup(i2c);
if (ret) {
@@ -839,7 +853,7 @@ static int exynos5_i2c_resume_noirq(struct device *dev)
}
exynos5_i2c_init(i2c);
- clk_disable_unprepare(i2c->clk);
+ clk_disable(i2c->clk);
i2c->suspended = 0;
return 0;
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index 27fa0cb09538..585a3b7915bd 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -184,7 +184,7 @@
/* Older devices have their ID defined in <linux/pci_ids.h> */
#define PCI_DEVICE_ID_INTEL_BAYTRAIL_SMBUS 0x0f12
-#define PCI_DEVICE_ID_INTEL_BRASWELL_SMBUS 0x2292
+#define PCI_DEVICE_ID_INTEL_DNV_SMBUS 0x19df
#define PCI_DEVICE_ID_INTEL_COUGARPOINT_SMBUS 0x1c22
#define PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS 0x1d22
/* Patsburg also has three 'Integrated Device Function' SMBus controllers */
@@ -193,9 +193,11 @@
#define PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS_IDF2 0x1d72
#define PCI_DEVICE_ID_INTEL_PANTHERPOINT_SMBUS 0x1e22
#define PCI_DEVICE_ID_INTEL_AVOTON_SMBUS 0x1f3c
+#define PCI_DEVICE_ID_INTEL_BRASWELL_SMBUS 0x2292
#define PCI_DEVICE_ID_INTEL_DH89XXCC_SMBUS 0x2330
#define PCI_DEVICE_ID_INTEL_COLETOCREEK_SMBUS 0x23b0
#define PCI_DEVICE_ID_INTEL_5_3400_SERIES_SMBUS 0x3b30
+#define PCI_DEVICE_ID_INTEL_BROXTON_SMBUS 0x5ad4
#define PCI_DEVICE_ID_INTEL_LYNXPOINT_SMBUS 0x8c22
#define PCI_DEVICE_ID_INTEL_WILDCATPOINT_SMBUS 0x8ca2
#define PCI_DEVICE_ID_INTEL_WELLSBURG_SMBUS 0x8d22
@@ -204,10 +206,8 @@
#define PCI_DEVICE_ID_INTEL_WELLSBURG_SMBUS_MS2 0x8d7f
#define PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_SMBUS 0x9c22
#define PCI_DEVICE_ID_INTEL_WILDCATPOINT_LP_SMBUS 0x9ca2
-#define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_SMBUS 0xa123
#define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_SMBUS 0x9d23
-#define PCI_DEVICE_ID_INTEL_DNV_SMBUS 0x19df
-#define PCI_DEVICE_ID_INTEL_BROXTON_SMBUS 0x5ad4
+#define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_SMBUS 0xa123
#define PCI_DEVICE_ID_INTEL_LEWISBURG_SMBUS 0xa1a3
#define PCI_DEVICE_ID_INTEL_LEWISBURG_SSKU_SMBUS 0xa223
diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c
index a2b132cef717..1ca7ef2314f7 100644
--- a/drivers/i2c/busses/i2c-imx.c
+++ b/drivers/i2c/busses/i2c-imx.c
@@ -212,7 +212,7 @@ struct imx_i2c_struct {
struct imx_i2c_dma *dma;
};
-static const struct imx_i2c_hwdata imx1_i2c_hwdata = {
+static const struct imx_i2c_hwdata imx1_i2c_hwdata = {
.devtype = IMX1_I2C,
.regshift = IMX_I2C_REGSHIFT,
.clk_div = imx_i2c_clk_div,
@@ -222,7 +222,7 @@ static const struct imx_i2c_hwdata imx1_i2c_hwdata = {
};
-static const struct imx_i2c_hwdata imx21_i2c_hwdata = {
+static const struct imx_i2c_hwdata imx21_i2c_hwdata = {
.devtype = IMX21_I2C,
.regshift = IMX_I2C_REGSHIFT,
.clk_div = imx_i2c_clk_div,
@@ -871,7 +871,7 @@ static int i2c_imx_read(struct imx_i2c_struct *i2c_imx, struct i2c_msg *msgs, bo
if ((!i) && block_data)
msgs->buf[0] = len;
else
- msgs->buf[i] = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2DR);
+ msgs->buf[i] = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2DR);
dev_dbg(&i2c_imx->adapter.dev,
"<%s> read byte: B%d=0x%X\n",
__func__, i, msgs->buf[i]);
@@ -916,7 +916,7 @@ static int i2c_imx_xfer(struct i2c_adapter *adapter,
temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2CR);
temp |= I2CR_RSTA;
imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2CR);
- result = i2c_imx_bus_busy(i2c_imx, 1);
+ result = i2c_imx_bus_busy(i2c_imx, 1);
if (result)
goto fail0;
}
@@ -1192,7 +1192,7 @@ static int i2c_imx_remove(struct platform_device *pdev)
#ifdef CONFIG_PM
static int i2c_imx_runtime_suspend(struct device *dev)
{
- struct imx_i2c_struct *i2c_imx = dev_get_drvdata(dev);
+ struct imx_i2c_struct *i2c_imx = dev_get_drvdata(dev);
clk_disable_unprepare(i2c_imx->clk);
@@ -1201,7 +1201,7 @@ static int i2c_imx_runtime_suspend(struct device *dev)
static int i2c_imx_runtime_resume(struct device *dev)
{
- struct imx_i2c_struct *i2c_imx = dev_get_drvdata(dev);
+ struct imx_i2c_struct *i2c_imx = dev_get_drvdata(dev);
int ret;
ret = clk_prepare_enable(i2c_imx->clk);
diff --git a/drivers/i2c/busses/i2c-ismt.c b/drivers/i2c/busses/i2c-ismt.c
index 7ba795b24e75..1c8707710098 100644
--- a/drivers/i2c/busses/i2c-ismt.c
+++ b/drivers/i2c/busses/i2c-ismt.c
@@ -75,6 +75,7 @@
/* PCI DIDs for the Intel SMBus Message Transport (SMT) Devices */
#define PCI_DEVICE_ID_INTEL_S1200_SMT0 0x0c59
#define PCI_DEVICE_ID_INTEL_S1200_SMT1 0x0c5a
+#define PCI_DEVICE_ID_INTEL_DNV_SMT 0x19ac
#define PCI_DEVICE_ID_INTEL_AVOTON_SMT 0x1f15
#define ISMT_DESC_ENTRIES 2 /* number of descriptor entries */
@@ -180,6 +181,7 @@ struct ismt_priv {
static const struct pci_device_id ismt_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_S1200_SMT0) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_S1200_SMT1) },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_DNV_SMT) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_AVOTON_SMT) },
{ 0, }
};
diff --git a/drivers/i2c/busses/i2c-jz4780.c b/drivers/i2c/busses/i2c-jz4780.c
index f325663c27c5..ba14a863b451 100644
--- a/drivers/i2c/busses/i2c-jz4780.c
+++ b/drivers/i2c/busses/i2c-jz4780.c
@@ -771,11 +771,16 @@ static int jz4780_i2c_probe(struct platform_device *pdev)
ret = of_property_read_u32(pdev->dev.of_node, "clock-frequency",
&clk_freq);
if (ret) {
- dev_err(&pdev->dev, "clock-frequency not specified in DT");
+ dev_err(&pdev->dev, "clock-frequency not specified in DT\n");
goto err;
}
i2c->speed = clk_freq / 1000;
+ if (i2c->speed == 0) {
+ ret = -EINVAL;
+ dev_err(&pdev->dev, "clock-frequency minimum is 1000\n");
+ goto err;
+ }
jz4780_i2c_set_speed(i2c);
dev_info(&pdev->dev, "Bus frequency is %d KHz\n", i2c->speed);
diff --git a/drivers/i2c/busses/i2c-mt65xx.c b/drivers/i2c/busses/i2c-mt65xx.c
index aec8e6ce38a4..453358b4d9ca 100644
--- a/drivers/i2c/busses/i2c-mt65xx.c
+++ b/drivers/i2c/busses/i2c-mt65xx.c
@@ -60,6 +60,7 @@
#define I2C_DMA_INT_FLAG_NONE 0x0000
#define I2C_DMA_CLR_FLAG 0x0000
#define I2C_DMA_HARD_RST 0x0002
+#define I2C_DMA_4G_MODE 0x0001
#define I2C_DEFAULT_SPEED 100000 /* hz */
#define MAX_FS_MODE_SPEED 400000
@@ -88,6 +89,8 @@ enum DMA_REGS_OFFSET {
OFFSET_RX_MEM_ADDR = 0x20,
OFFSET_TX_LEN = 0x24,
OFFSET_RX_LEN = 0x28,
+ OFFSET_TX_4G_MODE = 0x54,
+ OFFSET_RX_4G_MODE = 0x58,
};
enum i2c_trans_st_rs {
@@ -133,6 +136,7 @@ struct mtk_i2c_compatible {
unsigned char dcm: 1;
unsigned char auto_restart: 1;
unsigned char aux_len_reg: 1;
+ unsigned char support_33bits: 1;
};
struct mtk_i2c {
@@ -182,6 +186,7 @@ static const struct mtk_i2c_compatible mt6577_compat = {
.dcm = 1,
.auto_restart = 0,
.aux_len_reg = 0,
+ .support_33bits = 0,
};
static const struct mtk_i2c_compatible mt6589_compat = {
@@ -190,6 +195,7 @@ static const struct mtk_i2c_compatible mt6589_compat = {
.dcm = 0,
.auto_restart = 0,
.aux_len_reg = 0,
+ .support_33bits = 0,
};
static const struct mtk_i2c_compatible mt8173_compat = {
@@ -198,6 +204,7 @@ static const struct mtk_i2c_compatible mt8173_compat = {
.dcm = 1,
.auto_restart = 1,
.aux_len_reg = 1,
+ .support_33bits = 1,
};
static const struct of_device_id mtk_i2c_of_match[] = {
@@ -366,6 +373,11 @@ static int mtk_i2c_set_speed(struct mtk_i2c *i2c, unsigned int parent_clk,
return 0;
}
+static inline u32 mtk_i2c_set_4g_mode(dma_addr_t addr)
+{
+ return (addr & BIT_ULL(32)) ? I2C_DMA_4G_MODE : I2C_DMA_CLR_FLAG;
+}
+
static int mtk_i2c_do_transfer(struct mtk_i2c *i2c, struct i2c_msg *msgs,
int num, int left_num)
{
@@ -373,6 +385,7 @@ static int mtk_i2c_do_transfer(struct mtk_i2c *i2c, struct i2c_msg *msgs,
u16 start_reg;
u16 control_reg;
u16 restart_flag = 0;
+ u32 reg_4g_mode;
dma_addr_t rpaddr = 0;
dma_addr_t wpaddr = 0;
int ret;
@@ -439,6 +452,12 @@ static int mtk_i2c_do_transfer(struct mtk_i2c *i2c, struct i2c_msg *msgs,
msgs->len, DMA_FROM_DEVICE);
if (dma_mapping_error(i2c->dev, rpaddr))
return -ENOMEM;
+
+ if (i2c->dev_comp->support_33bits) {
+ reg_4g_mode = mtk_i2c_set_4g_mode(rpaddr);
+ writel(reg_4g_mode, i2c->pdmabase + OFFSET_RX_4G_MODE);
+ }
+
writel((u32)rpaddr, i2c->pdmabase + OFFSET_RX_MEM_ADDR);
writel(msgs->len, i2c->pdmabase + OFFSET_RX_LEN);
} else if (i2c->op == I2C_MASTER_WR) {
@@ -448,6 +467,12 @@ static int mtk_i2c_do_transfer(struct mtk_i2c *i2c, struct i2c_msg *msgs,
msgs->len, DMA_TO_DEVICE);
if (dma_mapping_error(i2c->dev, wpaddr))
return -ENOMEM;
+
+ if (i2c->dev_comp->support_33bits) {
+ reg_4g_mode = mtk_i2c_set_4g_mode(wpaddr);
+ writel(reg_4g_mode, i2c->pdmabase + OFFSET_TX_4G_MODE);
+ }
+
writel((u32)wpaddr, i2c->pdmabase + OFFSET_TX_MEM_ADDR);
writel(msgs->len, i2c->pdmabase + OFFSET_TX_LEN);
} else {
@@ -465,6 +490,15 @@ static int mtk_i2c_do_transfer(struct mtk_i2c *i2c, struct i2c_msg *msgs,
msgs->len, DMA_TO_DEVICE);
return -ENOMEM;
}
+
+ if (i2c->dev_comp->support_33bits) {
+ reg_4g_mode = mtk_i2c_set_4g_mode(wpaddr);
+ writel(reg_4g_mode, i2c->pdmabase + OFFSET_TX_4G_MODE);
+
+ reg_4g_mode = mtk_i2c_set_4g_mode(rpaddr);
+ writel(reg_4g_mode, i2c->pdmabase + OFFSET_RX_4G_MODE);
+ }
+
writel((u32)wpaddr, i2c->pdmabase + OFFSET_TX_MEM_ADDR);
writel((u32)rpaddr, i2c->pdmabase + OFFSET_RX_MEM_ADDR);
writel(msgs->len, i2c->pdmabase + OFFSET_TX_LEN);
@@ -729,6 +763,14 @@ static int mtk_i2c_probe(struct platform_device *pdev)
return -EINVAL;
}
+ if (i2c->dev_comp->support_33bits) {
+ ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(33));
+ if (ret) {
+ dev_err(&pdev->dev, "dma_set_mask return error.\n");
+ return ret;
+ }
+ }
+
ret = mtk_i2c_clock_enable(i2c);
if (ret) {
dev_err(&pdev->dev, "clock enable failed!\n");
diff --git a/drivers/i2c/busses/i2c-octeon.c b/drivers/i2c/busses/i2c-octeon.c
index 32914ab42a19..46fb6c42934f 100644
--- a/drivers/i2c/busses/i2c-octeon.c
+++ b/drivers/i2c/busses/i2c-octeon.c
@@ -2,7 +2,7 @@
* (C) Copyright 2009-2010
* Nokia Siemens Networks, michael.lawnick.ext@nsn.com
*
- * Portions Copyright (C) 2010, 2011 Cavium Networks, Inc.
+ * Portions Copyright (C) 2010 - 2016 Cavium, Inc.
*
* This is a driver for the i2c adapter in Cavium Networks' OCTEON processors.
*
@@ -26,39 +26,48 @@
#define DRV_NAME "i2c-octeon"
-/* The previous out-of-tree version was implicitly version 1.0. */
-#define DRV_VERSION "2.0"
-
-/* register offsets */
-#define SW_TWSI 0x00
-#define TWSI_INT 0x10
+/* Register offsets */
+#define SW_TWSI 0x00
+#define TWSI_INT 0x10
/* Controller command patterns */
-#define SW_TWSI_V 0x8000000000000000ull
-#define SW_TWSI_EOP_TWSI_DATA 0x0C00000100000000ull
-#define SW_TWSI_EOP_TWSI_CTL 0x0C00000200000000ull
-#define SW_TWSI_EOP_TWSI_CLKCTL 0x0C00000300000000ull
-#define SW_TWSI_EOP_TWSI_STAT 0x0C00000300000000ull
-#define SW_TWSI_EOP_TWSI_RST 0x0C00000700000000ull
-#define SW_TWSI_OP_TWSI_CLK 0x0800000000000000ull
-#define SW_TWSI_R 0x0100000000000000ull
+#define SW_TWSI_V BIT_ULL(63) /* Valid bit */
+#define SW_TWSI_R BIT_ULL(56) /* Result or read bit */
+
+/* Controller opcode word (bits 60:57) */
+#define SW_TWSI_OP_SHIFT 57
+#define SW_TWSI_OP_TWSI_CLK (4ULL << SW_TWSI_OP_SHIFT)
+#define SW_TWSI_OP_EOP (6ULL << SW_TWSI_OP_SHIFT) /* Extended opcode */
+
+/* Controller extended opcode word (bits 34:32) */
+#define SW_TWSI_EOP_SHIFT 32
+#define SW_TWSI_EOP_TWSI_DATA (SW_TWSI_OP_EOP | 1ULL << SW_TWSI_EOP_SHIFT)
+#define SW_TWSI_EOP_TWSI_CTL (SW_TWSI_OP_EOP | 2ULL << SW_TWSI_EOP_SHIFT)
+#define SW_TWSI_EOP_TWSI_CLKCTL (SW_TWSI_OP_EOP | 3ULL << SW_TWSI_EOP_SHIFT)
+#define SW_TWSI_EOP_TWSI_STAT (SW_TWSI_OP_EOP | 3ULL << SW_TWSI_EOP_SHIFT)
+#define SW_TWSI_EOP_TWSI_RST (SW_TWSI_OP_EOP | 7ULL << SW_TWSI_EOP_SHIFT)
/* Controller command and status bits */
-#define TWSI_CTL_CE 0x80
-#define TWSI_CTL_ENAB 0x40
-#define TWSI_CTL_STA 0x20
-#define TWSI_CTL_STP 0x10
-#define TWSI_CTL_IFLG 0x08
-#define TWSI_CTL_AAK 0x04
+#define TWSI_CTL_CE 0x80
+#define TWSI_CTL_ENAB 0x40 /* Bus enable */
+#define TWSI_CTL_STA 0x20 /* Master-mode start, HW clears when done */
+#define TWSI_CTL_STP 0x10 /* Master-mode stop, HW clears when done */
+#define TWSI_CTL_IFLG 0x08 /* HW event, SW writes 0 to ACK */
+#define TWSI_CTL_AAK 0x04 /* Assert ACK */
/* Some status values */
-#define STAT_START 0x08
-#define STAT_RSTART 0x10
-#define STAT_TXADDR_ACK 0x18
-#define STAT_TXDATA_ACK 0x28
-#define STAT_RXADDR_ACK 0x40
-#define STAT_RXDATA_ACK 0x50
-#define STAT_IDLE 0xF8
+#define STAT_START 0x08
+#define STAT_RSTART 0x10
+#define STAT_TXADDR_ACK 0x18
+#define STAT_TXDATA_ACK 0x28
+#define STAT_RXADDR_ACK 0x40
+#define STAT_RXDATA_ACK 0x50
+#define STAT_IDLE 0xF8
+
+/* TWSI_INT values */
+#define TWSI_INT_CORE_EN BIT_ULL(6)
+#define TWSI_INT_SDA_OVR BIT_ULL(8)
+#define TWSI_INT_SCL_OVR BIT_ULL(9)
struct octeon_i2c {
wait_queue_head_t queue;
@@ -66,23 +75,19 @@ struct octeon_i2c {
int irq;
u32 twsi_freq;
int sys_freq;
- resource_size_t twsi_phys;
void __iomem *twsi_base;
- resource_size_t regsize;
struct device *dev;
};
/**
- * octeon_i2c_write_sw - write an I2C core register.
- * @i2c: The struct octeon_i2c.
- * @eop_reg: Register selector.
- * @data: Value to be written.
+ * octeon_i2c_write_sw - write an I2C core register
+ * @i2c: The struct octeon_i2c
+ * @eop_reg: Register selector
+ * @data: Value to be written
*
* The I2C core registers are accessed indirectly via the SW_TWSI CSR.
*/
-static void octeon_i2c_write_sw(struct octeon_i2c *i2c,
- u64 eop_reg,
- u8 data)
+static void octeon_i2c_write_sw(struct octeon_i2c *i2c, u64 eop_reg, u8 data)
{
u64 tmp;
@@ -93,9 +98,9 @@ static void octeon_i2c_write_sw(struct octeon_i2c *i2c,
}
/**
- * octeon_i2c_read_sw - write an I2C core register.
- * @i2c: The struct octeon_i2c.
- * @eop_reg: Register selector.
+ * octeon_i2c_read_sw - read lower bits of an I2C core register
+ * @i2c: The struct octeon_i2c
+ * @eop_reg: Register selector
*
* Returns the data.
*
@@ -115,8 +120,8 @@ static u8 octeon_i2c_read_sw(struct octeon_i2c *i2c, u64 eop_reg)
/**
* octeon_i2c_write_int - write the TWSI_INT register
- * @i2c: The struct octeon_i2c.
- * @data: Value to be written.
+ * @i2c: The struct octeon_i2c
+ * @data: Value to be written
*/
static void octeon_i2c_write_int(struct octeon_i2c *i2c, u64 data)
{
@@ -125,57 +130,52 @@ static void octeon_i2c_write_int(struct octeon_i2c *i2c, u64 data)
}
/**
- * octeon_i2c_int_enable - enable the TS interrupt.
- * @i2c: The struct octeon_i2c.
+ * octeon_i2c_int_enable - enable the CORE interrupt
+ * @i2c: The struct octeon_i2c
*
* The interrupt will be asserted when there is non-STAT_IDLE state in
* the SW_TWSI_EOP_TWSI_STAT register.
*/
static void octeon_i2c_int_enable(struct octeon_i2c *i2c)
{
- octeon_i2c_write_int(i2c, 0x40);
+ octeon_i2c_write_int(i2c, TWSI_INT_CORE_EN);
}
-/**
- * octeon_i2c_int_disable - disable the TS interrupt.
- * @i2c: The struct octeon_i2c.
- */
+/* disable the CORE interrupt */
static void octeon_i2c_int_disable(struct octeon_i2c *i2c)
{
+ /* clear TS/ST/IFLG events */
octeon_i2c_write_int(i2c, 0);
}
/**
- * octeon_i2c_unblock - unblock the bus.
- * @i2c: The struct octeon_i2c.
+ * octeon_i2c_unblock - unblock the bus
+ * @i2c: The struct octeon_i2c
*
- * If there was a reset while a device was driving 0 to bus,
- * bus is blocked. We toggle it free manually by some clock
- * cycles and send a stop.
+ * If there was a reset while a device was driving 0 to bus, bus is blocked.
+ * We toggle it free manually by some clock cycles and send a stop.
*/
static void octeon_i2c_unblock(struct octeon_i2c *i2c)
{
int i;
dev_dbg(i2c->dev, "%s\n", __func__);
+
for (i = 0; i < 9; i++) {
- octeon_i2c_write_int(i2c, 0x0);
+ octeon_i2c_write_int(i2c, 0);
udelay(5);
- octeon_i2c_write_int(i2c, 0x200);
+ octeon_i2c_write_int(i2c, TWSI_INT_SCL_OVR);
udelay(5);
}
- octeon_i2c_write_int(i2c, 0x300);
+ /* hand-crank a STOP */
+ octeon_i2c_write_int(i2c, TWSI_INT_SDA_OVR | TWSI_INT_SCL_OVR);
udelay(5);
- octeon_i2c_write_int(i2c, 0x100);
+ octeon_i2c_write_int(i2c, TWSI_INT_SDA_OVR);
udelay(5);
- octeon_i2c_write_int(i2c, 0x0);
+ octeon_i2c_write_int(i2c, 0);
}
-/**
- * octeon_i2c_isr - the interrupt service routine.
- * @int: The irq, unused.
- * @dev_id: Our struct octeon_i2c.
- */
+/* interrupt service routine */
static irqreturn_t octeon_i2c_isr(int irq, void *dev_id)
{
struct octeon_i2c *i2c = dev_id;
@@ -193,24 +193,20 @@ static int octeon_i2c_test_iflg(struct octeon_i2c *i2c)
}
/**
- * octeon_i2c_wait - wait for the IFLG to be set.
- * @i2c: The struct octeon_i2c.
+ * octeon_i2c_wait - wait for the IFLG to be set
+ * @i2c: The struct octeon_i2c
*
* Returns 0 on success, otherwise a negative errno.
*/
static int octeon_i2c_wait(struct octeon_i2c *i2c)
{
- long result;
+ long time_left;
octeon_i2c_int_enable(i2c);
-
- result = wait_event_timeout(i2c->queue,
- octeon_i2c_test_iflg(i2c),
- i2c->adap.timeout);
-
+ time_left = wait_event_timeout(i2c->queue, octeon_i2c_test_iflg(i2c),
+ i2c->adap.timeout);
octeon_i2c_int_disable(i2c);
-
- if (result == 0) {
+ if (!time_left) {
dev_dbg(i2c->dev, "%s: timeout\n", __func__);
return -ETIMEDOUT;
}
@@ -219,18 +215,18 @@ static int octeon_i2c_wait(struct octeon_i2c *i2c)
}
/**
- * octeon_i2c_start - send START to the bus.
- * @i2c: The struct octeon_i2c.
+ * octeon_i2c_start - send START to the bus
+ * @i2c: The struct octeon_i2c
*
* Returns 0 on success, otherwise a negative errno.
*/
static int octeon_i2c_start(struct octeon_i2c *i2c)
{
- u8 data;
int result;
+ u8 data;
octeon_i2c_write_sw(i2c, SW_TWSI_EOP_TWSI_CTL,
- TWSI_CTL_ENAB | TWSI_CTL_STA);
+ TWSI_CTL_ENAB | TWSI_CTL_STA);
result = octeon_i2c_wait(i2c);
if (result) {
@@ -243,7 +239,6 @@ static int octeon_i2c_start(struct octeon_i2c *i2c)
octeon_i2c_unblock(i2c);
octeon_i2c_write_sw(i2c, SW_TWSI_EOP_TWSI_CTL,
TWSI_CTL_ENAB | TWSI_CTL_STA);
-
result = octeon_i2c_wait(i2c);
}
if (result)
@@ -259,34 +254,19 @@ static int octeon_i2c_start(struct octeon_i2c *i2c)
return 0;
}
-/**
- * octeon_i2c_stop - send STOP to the bus.
- * @i2c: The struct octeon_i2c.
- *
- * Returns 0 on success, otherwise a negative errno.
- */
-static int octeon_i2c_stop(struct octeon_i2c *i2c)
+/* send STOP to the bus */
+static void octeon_i2c_stop(struct octeon_i2c *i2c)
{
- u8 data;
-
octeon_i2c_write_sw(i2c, SW_TWSI_EOP_TWSI_CTL,
TWSI_CTL_ENAB | TWSI_CTL_STP);
-
- data = octeon_i2c_read_sw(i2c, SW_TWSI_EOP_TWSI_STAT);
-
- if (data != STAT_IDLE) {
- dev_err(i2c->dev, "%s: bad status(0x%x)\n", __func__, data);
- return -EIO;
- }
- return 0;
}
/**
- * octeon_i2c_write - send data to the bus.
- * @i2c: The struct octeon_i2c.
- * @target: Target address.
- * @data: Pointer to the data to be sent.
- * @length: Length of the data.
+ * octeon_i2c_write - send data to the bus via low-level controller
+ * @i2c: The struct octeon_i2c
+ * @target: Target address
+ * @data: Pointer to the data to be sent
+ * @length: Length of the data
*
* The address is sent over the bus, then the data.
*
@@ -311,6 +291,7 @@ static int octeon_i2c_write(struct octeon_i2c *i2c, int target,
for (i = 0; i < length; i++) {
tmp = octeon_i2c_read_sw(i2c, SW_TWSI_EOP_TWSI_STAT);
+
if ((tmp != STAT_TXADDR_ACK) && (tmp != STAT_TXDATA_ACK)) {
dev_err(i2c->dev,
"%s: bad status before write (0x%x)\n",
@@ -330,20 +311,21 @@ static int octeon_i2c_write(struct octeon_i2c *i2c, int target,
}
/**
- * octeon_i2c_read - receive data from the bus.
- * @i2c: The struct octeon_i2c.
- * @target: Target address.
- * @data: Pointer to the location to store the datae .
- * @length: Length of the data.
+ * octeon_i2c_read - receive data from the bus via low-level controller
+ * @i2c: The struct octeon_i2c
+ * @target: Target address
+ * @data: Pointer to the location to store the data
+ * @rlength: Length of the data
+ * @recv_len: flag for length byte
*
* The address is sent over the bus, then the data is read.
*
* Returns 0 on success, otherwise a negative errno.
*/
static int octeon_i2c_read(struct octeon_i2c *i2c, int target,
- u8 *data, int length)
+ u8 *data, u16 *rlength, bool recv_len)
{
- int i, result;
+ int i, result, length = *rlength;
u8 tmp;
if (length < 1)
@@ -353,7 +335,7 @@ static int octeon_i2c_read(struct octeon_i2c *i2c, int target,
if (result)
return result;
- octeon_i2c_write_sw(i2c, SW_TWSI_EOP_TWSI_DATA, (target<<1) | 1);
+ octeon_i2c_write_sw(i2c, SW_TWSI_EOP_TWSI_DATA, (target << 1) | 1);
octeon_i2c_write_sw(i2c, SW_TWSI_EOP_TWSI_CTL, TWSI_CTL_ENAB);
result = octeon_i2c_wait(i2c);
@@ -362,6 +344,7 @@ static int octeon_i2c_read(struct octeon_i2c *i2c, int target,
for (i = 0; i < length; i++) {
tmp = octeon_i2c_read_sw(i2c, SW_TWSI_EOP_TWSI_STAT);
+
if ((tmp != STAT_RXDATA_ACK) && (tmp != STAT_RXADDR_ACK)) {
dev_err(i2c->dev,
"%s: bad status before read (0x%x)\n",
@@ -369,52 +352,59 @@ static int octeon_i2c_read(struct octeon_i2c *i2c, int target,
return -EIO;
}
- if (i+1 < length)
+ if (i + 1 < length)
octeon_i2c_write_sw(i2c, SW_TWSI_EOP_TWSI_CTL,
- TWSI_CTL_ENAB | TWSI_CTL_AAK);
+ TWSI_CTL_ENAB | TWSI_CTL_AAK);
else
octeon_i2c_write_sw(i2c, SW_TWSI_EOP_TWSI_CTL,
- TWSI_CTL_ENAB);
+ TWSI_CTL_ENAB);
result = octeon_i2c_wait(i2c);
if (result)
return result;
data[i] = octeon_i2c_read_sw(i2c, SW_TWSI_EOP_TWSI_DATA);
+ if (recv_len && i == 0) {
+ if (data[i] > I2C_SMBUS_BLOCK_MAX + 1) {
+ dev_err(i2c->dev,
+ "%s: read len > I2C_SMBUS_BLOCK_MAX %d\n",
+ __func__, data[i]);
+ return -EPROTO;
+ }
+ length += data[i];
+ }
}
+ *rlength = length;
return 0;
}
/**
- * octeon_i2c_xfer - The driver's master_xfer function.
- * @adap: Pointer to the i2c_adapter structure.
- * @msgs: Pointer to the messages to be processed.
- * @num: Length of the MSGS array.
+ * octeon_i2c_xfer - The driver's master_xfer function
+ * @adap: Pointer to the i2c_adapter structure
+ * @msgs: Pointer to the messages to be processed
+ * @num: Length of the MSGS array
*
- * Returns the number of messages processed, or a negative errno on
- * failure.
+ * Returns the number of messages processed, or a negative errno on failure.
*/
-static int octeon_i2c_xfer(struct i2c_adapter *adap,
- struct i2c_msg *msgs,
+static int octeon_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
int num)
{
- struct i2c_msg *pmsg;
- int i;
- int ret = 0;
struct octeon_i2c *i2c = i2c_get_adapdata(adap);
+ int i, ret = 0;
for (i = 0; ret == 0 && i < num; i++) {
- pmsg = &msgs[i];
+ struct i2c_msg *pmsg = &msgs[i];
+
dev_dbg(i2c->dev,
"Doing %s %d byte(s) to/from 0x%02x - %d of %d messages\n",
pmsg->flags & I2C_M_RD ? "read" : "write",
pmsg->len, pmsg->addr, i + 1, num);
if (pmsg->flags & I2C_M_RD)
ret = octeon_i2c_read(i2c, pmsg->addr, pmsg->buf,
- pmsg->len);
+ &pmsg->len, pmsg->flags & I2C_M_RECV_LEN);
else
ret = octeon_i2c_write(i2c, pmsg->addr, pmsg->buf,
- pmsg->len);
+ pmsg->len);
}
octeon_i2c_stop(i2c);
@@ -423,7 +413,8 @@ static int octeon_i2c_xfer(struct i2c_adapter *adap,
static u32 octeon_i2c_functionality(struct i2c_adapter *adap)
{
- return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL |
+ I2C_FUNC_SMBUS_READ_BLOCK_DATA | I2C_SMBUS_BLOCK_PROC_CALL;
}
static const struct i2c_algorithm octeon_i2c_algo = {
@@ -438,10 +429,8 @@ static struct i2c_adapter octeon_i2c_ops = {
.timeout = HZ / 50,
};
-/**
- * octeon_i2c_setclock - Calculate and set clock divisors.
- */
-static int octeon_i2c_setclock(struct octeon_i2c *i2c)
+/* calculate and set clock divisors */
+static void octeon_i2c_set_clock(struct octeon_i2c *i2c)
{
int tclk, thp_base, inc, thp_idx, mdiv_idx, ndiv_idx, foscl, diff;
int thp = 0x18, mdiv = 2, ndiv = 0, delta_hz = 1000000;
@@ -449,8 +438,7 @@ static int octeon_i2c_setclock(struct octeon_i2c *i2c)
for (ndiv_idx = 0; ndiv_idx < 8 && delta_hz != 0; ndiv_idx++) {
/*
* An mdiv value of less than 2 seems to not work well
- * with ds1337 RTCs, so we constrain it to larger
- * values.
+ * with ds1337 RTCs, so we constrain it to larger values.
*/
for (mdiv_idx = 15; mdiv_idx >= 2 && delta_hz != 0; mdiv_idx--) {
/*
@@ -460,6 +448,7 @@ static int octeon_i2c_setclock(struct octeon_i2c *i2c)
tclk = i2c->twsi_freq * (mdiv_idx + 1) * 10;
tclk *= (1 << ndiv_idx);
thp_base = (i2c->sys_freq / (tclk * 2)) - 1;
+
for (inc = 0; inc <= 1; inc++) {
thp_idx = thp_base + inc;
if (thp_idx < 5 || thp_idx > 0xff)
@@ -480,11 +469,9 @@ static int octeon_i2c_setclock(struct octeon_i2c *i2c)
}
octeon_i2c_write_sw(i2c, SW_TWSI_OP_TWSI_CLK, thp);
octeon_i2c_write_sw(i2c, SW_TWSI_EOP_TWSI_CLKCTL, (mdiv << 3) | ndiv);
-
- return 0;
}
-static int octeon_i2c_initlowlevel(struct octeon_i2c *i2c)
+static int octeon_i2c_init_lowlevel(struct octeon_i2c *i2c)
{
u8 status;
int tries;
@@ -507,9 +494,10 @@ static int octeon_i2c_initlowlevel(struct octeon_i2c *i2c)
static int octeon_i2c_probe(struct platform_device *pdev)
{
- int irq, result = 0;
- struct octeon_i2c *i2c;
+ struct device_node *node = pdev->dev.of_node;
struct resource *res_mem;
+ struct octeon_i2c *i2c;
+ int irq, result = 0;
/* All adaptors have an irq. */
irq = platform_get_irq(pdev, 0);
@@ -518,31 +506,25 @@ static int octeon_i2c_probe(struct platform_device *pdev)
i2c = devm_kzalloc(&pdev->dev, sizeof(*i2c), GFP_KERNEL);
if (!i2c) {
- dev_err(&pdev->dev, "kzalloc failed\n");
result = -ENOMEM;
goto out;
}
i2c->dev = &pdev->dev;
res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-
- if (res_mem == NULL) {
- dev_err(i2c->dev, "found no memory resource\n");
- result = -ENXIO;
+ i2c->twsi_base = devm_ioremap_resource(&pdev->dev, res_mem);
+ if (IS_ERR(i2c->twsi_base)) {
+ result = PTR_ERR(i2c->twsi_base);
goto out;
}
- i2c->twsi_phys = res_mem->start;
- i2c->regsize = resource_size(res_mem);
/*
* "clock-rate" is a legacy binding, the official binding is
* "clock-frequency". Try the official one first and then
* fall back if it doesn't exist.
*/
- if (of_property_read_u32(pdev->dev.of_node,
- "clock-frequency", &i2c->twsi_freq) &&
- of_property_read_u32(pdev->dev.of_node,
- "clock-rate", &i2c->twsi_freq)) {
+ if (of_property_read_u32(node, "clock-frequency", &i2c->twsi_freq) &&
+ of_property_read_u32(node, "clock-rate", &i2c->twsi_freq)) {
dev_err(i2c->dev,
"no I2C 'clock-rate' or 'clock-frequency' property\n");
result = -ENXIO;
@@ -551,13 +533,6 @@ static int octeon_i2c_probe(struct platform_device *pdev)
i2c->sys_freq = octeon_get_io_clock_rate();
- if (!devm_request_mem_region(&pdev->dev, i2c->twsi_phys, i2c->regsize,
- res_mem->name)) {
- dev_err(i2c->dev, "request_mem_region failed\n");
- goto out;
- }
- i2c->twsi_base = devm_ioremap(&pdev->dev, i2c->twsi_phys, i2c->regsize);
-
init_waitqueue_head(&i2c->queue);
i2c->irq = irq;
@@ -569,21 +544,17 @@ static int octeon_i2c_probe(struct platform_device *pdev)
goto out;
}
- result = octeon_i2c_initlowlevel(i2c);
+ result = octeon_i2c_init_lowlevel(i2c);
if (result) {
dev_err(i2c->dev, "init low level failed\n");
goto out;
}
- result = octeon_i2c_setclock(i2c);
- if (result) {
- dev_err(i2c->dev, "clock init failed\n");
- goto out;
- }
+ octeon_i2c_set_clock(i2c);
i2c->adap = octeon_i2c_ops;
i2c->adap.dev.parent = &pdev->dev;
- i2c->adap.dev.of_node = pdev->dev.of_node;
+ i2c->adap.dev.of_node = node;
i2c_set_adapdata(&i2c->adap, i2c);
platform_set_drvdata(pdev, i2c);
@@ -592,8 +563,7 @@ static int octeon_i2c_probe(struct platform_device *pdev)
dev_err(i2c->dev, "failed to add adapter\n");
goto out;
}
- dev_info(i2c->dev, "version %s\n", DRV_VERSION);
-
+ dev_info(i2c->dev, "probed\n");
return 0;
out:
@@ -608,10 +578,8 @@ static int octeon_i2c_remove(struct platform_device *pdev)
return 0;
};
-static struct of_device_id octeon_i2c_match[] = {
- {
- .compatible = "cavium,octeon-3860-twsi",
- },
+static const struct of_device_id octeon_i2c_match[] = {
+ { .compatible = "cavium,octeon-3860-twsi", },
{},
};
MODULE_DEVICE_TABLE(of, octeon_i2c_match);
@@ -630,4 +598,3 @@ module_platform_driver(octeon_i2c_driver);
MODULE_AUTHOR("Michael Lawnick <michael.lawnick.ext@nsn.com>");
MODULE_DESCRIPTION("I2C-Bus adapter for Cavium OCTEON processors");
MODULE_LICENSE("GPL");
-MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/i2c/busses/i2c-piix4.c b/drivers/i2c/busses/i2c-piix4.c
index 93f2895383ee..23d1c167b5d7 100644
--- a/drivers/i2c/busses/i2c-piix4.c
+++ b/drivers/i2c/busses/i2c-piix4.c
@@ -85,8 +85,14 @@
/* SB800 constants */
#define SB800_PIIX4_SMB_IDX 0xcd6
-/* SB800 port is selected by bits 2:1 of the smb_en register (0x2c) */
+/*
+ * SB800 port is selected by bits 2:1 of the smb_en register (0x2c)
+ * or the smb_sel register (0x2e), depending on bit 0 of register 0x2f.
+ * Hudson-2/Bolton port is always selected by bits 2:1 of register 0x2f.
+ */
#define SB800_PIIX4_PORT_IDX 0x2c
+#define SB800_PIIX4_PORT_IDX_ALT 0x2e
+#define SB800_PIIX4_PORT_IDX_SEL 0x2f
#define SB800_PIIX4_PORT_IDX_MASK 0x06
/* insmod parameters */
@@ -136,8 +142,13 @@ static const struct dmi_system_id piix4_dmi_ibm[] = {
{ },
};
-/* SB800 globals */
+/*
+ * SB800 globals
+ * piix4_mutex_sb800 protects piix4_port_sel_sb800 and the pair
+ * of I/O ports at SB800_PIIX4_SMB_IDX.
+ */
static DEFINE_MUTEX(piix4_mutex_sb800);
+static u8 piix4_port_sel_sb800;
static const char *piix4_main_port_names_sb800[PIIX4_MAX_ADAPTERS] = {
" port 0", " port 2", " port 3", " port 4"
};
@@ -148,7 +159,7 @@ struct i2c_piix4_adapdata {
/* SB800 */
bool sb800_main;
- unsigned short port;
+ u8 port; /* Port number, shifted */
};
static int piix4_setup(struct pci_dev *PIIX4_dev,
@@ -254,7 +265,7 @@ static int piix4_setup_sb800(struct pci_dev *PIIX4_dev,
const struct pci_device_id *id, u8 aux)
{
unsigned short piix4_smba;
- u8 smba_en_lo, smba_en_hi, smb_en, smb_en_status;
+ u8 smba_en_lo, smba_en_hi, smb_en, smb_en_status, port_sel;
u8 i2ccfg, i2ccfg_offset = 0x10;
/* SB800 and later SMBus does not support forcing address */
@@ -334,6 +345,23 @@ static int piix4_setup_sb800(struct pci_dev *PIIX4_dev,
"SMBus Host Controller at 0x%x, revision %d\n",
piix4_smba, i2ccfg >> 4);
+ /* Find which register is used for port selection */
+ if (PIIX4_dev->vendor == PCI_VENDOR_ID_AMD) {
+ piix4_port_sel_sb800 = SB800_PIIX4_PORT_IDX_ALT;
+ } else {
+ mutex_lock(&piix4_mutex_sb800);
+ outb_p(SB800_PIIX4_PORT_IDX_SEL, SB800_PIIX4_SMB_IDX);
+ port_sel = inb_p(SB800_PIIX4_SMB_IDX + 1);
+ piix4_port_sel_sb800 = (port_sel & 0x01) ?
+ SB800_PIIX4_PORT_IDX_ALT :
+ SB800_PIIX4_PORT_IDX;
+ mutex_unlock(&piix4_mutex_sb800);
+ }
+
+ dev_info(&PIIX4_dev->dev,
+ "Using register 0x%02x for SMBus port selection\n",
+ (unsigned int)piix4_port_sel_sb800);
+
return piix4_smba;
}
@@ -563,12 +591,12 @@ static s32 piix4_access_sb800(struct i2c_adapter *adap, u16 addr,
mutex_lock(&piix4_mutex_sb800);
- outb_p(SB800_PIIX4_PORT_IDX, SB800_PIIX4_SMB_IDX);
+ outb_p(piix4_port_sel_sb800, SB800_PIIX4_SMB_IDX);
smba_en_lo = inb_p(SB800_PIIX4_SMB_IDX + 1);
port = adapdata->port;
- if ((smba_en_lo & SB800_PIIX4_PORT_IDX_MASK) != (port << 1))
- outb_p((smba_en_lo & ~SB800_PIIX4_PORT_IDX_MASK) | (port << 1),
+ if ((smba_en_lo & SB800_PIIX4_PORT_IDX_MASK) != port)
+ outb_p((smba_en_lo & ~SB800_PIIX4_PORT_IDX_MASK) | port,
SB800_PIIX4_SMB_IDX + 1);
retval = piix4_access(adap, addr, flags, read_write,
@@ -627,7 +655,7 @@ static struct i2c_adapter *piix4_main_adapters[PIIX4_MAX_ADAPTERS];
static struct i2c_adapter *piix4_aux_adapter;
static int piix4_add_adapter(struct pci_dev *dev, unsigned short smba,
- bool sb800_main, unsigned short port,
+ bool sb800_main, u8 port,
const char *name, struct i2c_adapter **padap)
{
struct i2c_adapter *adap;
@@ -654,7 +682,7 @@ static int piix4_add_adapter(struct pci_dev *dev, unsigned short smba,
adapdata->smba = smba;
adapdata->sb800_main = sb800_main;
- adapdata->port = port;
+ adapdata->port = port << 1;
/* set up the sysfs linkage to our parent device */
adap->dev.parent = &dev->dev;
@@ -790,7 +818,7 @@ static void piix4_adap_remove(struct i2c_adapter *adap)
if (adapdata->smba) {
i2c_del_adapter(adap);
- if (adapdata->port == 0) {
+ if (adapdata->port == (0 << 1)) {
release_region(adapdata->smba, SMBIOSIZE);
if (adapdata->sb800_main)
release_region(SB800_PIIX4_SMB_IDX, 2);
diff --git a/drivers/i2c/busses/i2c-qup.c b/drivers/i2c/busses/i2c-qup.c
index fdcbdab808e9..23eaabb19f96 100644
--- a/drivers/i2c/busses/i2c-qup.c
+++ b/drivers/i2c/busses/i2c-qup.c
@@ -14,8 +14,12 @@
*
*/
+#include <linux/atomic.h>
#include <linux/clk.h>
#include <linux/delay.h>
+#include <linux/dmaengine.h>
+#include <linux/dmapool.h>
+#include <linux/dma-mapping.h>
#include <linux/err.h>
#include <linux/i2c.h>
#include <linux/interrupt.h>
@@ -24,6 +28,7 @@
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
+#include <linux/scatterlist.h>
/* QUP Registers */
#define QUP_CONFIG 0x000
@@ -33,6 +38,7 @@
#define QUP_OPERATIONAL 0x018
#define QUP_ERROR_FLAGS 0x01c
#define QUP_ERROR_FLAGS_EN 0x020
+#define QUP_OPERATIONAL_MASK 0x028
#define QUP_HW_VERSION 0x030
#define QUP_MX_OUTPUT_CNT 0x100
#define QUP_OUT_FIFO_BASE 0x110
@@ -42,6 +48,7 @@
#define QUP_IN_FIFO_BASE 0x218
#define QUP_I2C_CLK_CTL 0x400
#define QUP_I2C_STATUS 0x404
+#define QUP_I2C_MASTER_GEN 0x408
/* QUP States and reset values */
#define QUP_RESET_STATE 0
@@ -51,6 +58,7 @@
#define QUP_STATE_VALID BIT(2)
#define QUP_I2C_MAST_GEN BIT(4)
+#define QUP_I2C_FLUSH BIT(6)
#define QUP_OPERATIONAL_RESET 0x000ff0
#define QUP_I2C_STATUS_RESET 0xfffffc
@@ -69,16 +77,22 @@
#define QUP_CLOCK_AUTO_GATE BIT(13)
#define I2C_MINI_CORE (2 << 8)
#define I2C_N_VAL 15
+#define I2C_N_VAL_V2 7
+
/* Most significant word offset in FIFO port */
#define QUP_MSW_SHIFT (I2C_N_VAL + 1)
/* Packing/Unpacking words in FIFOs, and IO modes */
#define QUP_OUTPUT_BLK_MODE (1 << 10)
+#define QUP_OUTPUT_BAM_MODE (3 << 10)
#define QUP_INPUT_BLK_MODE (1 << 12)
+#define QUP_INPUT_BAM_MODE (3 << 12)
+#define QUP_BAM_MODE (QUP_OUTPUT_BAM_MODE | QUP_INPUT_BAM_MODE)
#define QUP_UNPACK_EN BIT(14)
#define QUP_PACK_EN BIT(15)
#define QUP_REPACK_EN (QUP_UNPACK_EN | QUP_PACK_EN)
+#define QUP_V2_TAGS_EN 1
#define QUP_OUTPUT_BLOCK_SIZE(x)(((x) >> 0) & 0x03)
#define QUP_OUTPUT_FIFO_SIZE(x) (((x) >> 2) & 0x07)
@@ -90,6 +104,15 @@
#define QUP_TAG_DATA (2 << 8)
#define QUP_TAG_STOP (3 << 8)
#define QUP_TAG_REC (4 << 8)
+#define QUP_BAM_INPUT_EOT 0x93
+#define QUP_BAM_FLUSH_STOP 0x96
+
+/* QUP v2 tags */
+#define QUP_TAG_V2_START 0x81
+#define QUP_TAG_V2_DATAWR 0x82
+#define QUP_TAG_V2_DATAWR_STOP 0x83
+#define QUP_TAG_V2_DATARD 0x85
+#define QUP_TAG_V2_DATARD_STOP 0x87
/* Status, Error flags */
#define I2C_STATUS_WR_BUFFER_FULL BIT(0)
@@ -98,6 +121,36 @@
#define QUP_STATUS_ERROR_FLAGS 0x7c
#define QUP_READ_LIMIT 256
+#define SET_BIT 0x1
+#define RESET_BIT 0x0
+#define ONE_BYTE 0x1
+#define QUP_I2C_MX_CONFIG_DURING_RUN BIT(31)
+
+#define MX_TX_RX_LEN SZ_64K
+#define MX_BLOCKS (MX_TX_RX_LEN / QUP_READ_LIMIT)
+
+/* Max timeout in ms for 32k bytes */
+#define TOUT_MAX 300
+
+struct qup_i2c_block {
+ int count;
+ int pos;
+ int tx_tag_len;
+ int rx_tag_len;
+ int data_len;
+ u8 tags[6];
+};
+
+struct qup_i2c_tag {
+ u8 *start;
+ dma_addr_t addr;
+};
+
+struct qup_i2c_bam {
+ struct qup_i2c_tag tag;
+ struct dma_chan *dma;
+ struct scatterlist *sg;
+};
struct qup_i2c_dev {
struct device *dev;
@@ -114,6 +167,7 @@ struct qup_i2c_dev {
int in_blk_sz;
unsigned long one_byte_t;
+ struct qup_i2c_block blk;
struct i2c_msg *msg;
/* Current posion in user message buffer */
@@ -123,6 +177,19 @@ struct qup_i2c_dev {
/* QUP core errors */
u32 qup_err;
+ /* To check if this is the last msg */
+ bool is_last;
+
+ /* To configure when bus is in run state */
+ int config_run;
+
+ /* dma parameters */
+ bool is_dma;
+ struct dma_pool *dpool;
+ struct qup_i2c_tag start_tag;
+ struct qup_i2c_bam brx;
+ struct qup_i2c_bam btx;
+
struct completion xfer;
};
@@ -199,6 +266,14 @@ static int qup_i2c_poll_state(struct qup_i2c_dev *qup, u32 req_state)
return qup_i2c_poll_state_mask(qup, req_state, QUP_STATE_MASK);
}
+static void qup_i2c_flush(struct qup_i2c_dev *qup)
+{
+ u32 val = readl(qup->base + QUP_STATE);
+
+ val |= QUP_I2C_FLUSH;
+ writel(val, qup->base + QUP_STATE);
+}
+
static int qup_i2c_poll_state_valid(struct qup_i2c_dev *qup)
{
return qup_i2c_poll_state_mask(qup, 0, 0);
@@ -221,26 +296,62 @@ static int qup_i2c_change_state(struct qup_i2c_dev *qup, u32 state)
return 0;
}
-static int qup_i2c_wait_writeready(struct qup_i2c_dev *qup)
+/**
+ * qup_i2c_wait_ready - wait for a give number of bytes in tx/rx path
+ * @qup: The qup_i2c_dev device
+ * @op: The bit/event to wait on
+ * @val: value of the bit to wait on, 0 or 1
+ * @len: The length the bytes to be transferred
+ */
+static int qup_i2c_wait_ready(struct qup_i2c_dev *qup, int op, bool val,
+ int len)
{
unsigned long timeout;
u32 opflags;
u32 status;
+ u32 shift = __ffs(op);
- timeout = jiffies + HZ;
+ len *= qup->one_byte_t;
+ /* timeout after a wait of twice the max time */
+ timeout = jiffies + len * 4;
for (;;) {
opflags = readl(qup->base + QUP_OPERATIONAL);
status = readl(qup->base + QUP_I2C_STATUS);
- if (!(opflags & QUP_OUT_NOT_EMPTY) &&
- !(status & I2C_STATUS_BUS_ACTIVE))
- return 0;
+ if (((opflags & op) >> shift) == val) {
+ if ((op == QUP_OUT_NOT_EMPTY) && qup->is_last) {
+ if (!(status & I2C_STATUS_BUS_ACTIVE))
+ return 0;
+ } else {
+ return 0;
+ }
+ }
if (time_after(jiffies, timeout))
return -ETIMEDOUT;
- usleep_range(qup->one_byte_t, qup->one_byte_t * 2);
+ usleep_range(len, len * 2);
+ }
+}
+
+static void qup_i2c_set_write_mode_v2(struct qup_i2c_dev *qup,
+ struct i2c_msg *msg)
+{
+ /* Number of entries to shift out, including the tags */
+ int total = msg->len + qup->blk.tx_tag_len;
+
+ total |= qup->config_run;
+
+ if (total < qup->out_fifo_sz) {
+ /* FIFO mode */
+ writel(QUP_REPACK_EN, qup->base + QUP_IO_MODE);
+ writel(total, qup->base + QUP_MX_WRITE_CNT);
+ } else {
+ /* BLOCK mode (transfer data on chunks) */
+ writel(QUP_OUTPUT_BLK_MODE | QUP_REPACK_EN,
+ qup->base + QUP_IO_MODE);
+ writel(total, qup->base + QUP_MX_OUTPUT_CNT);
}
}
@@ -261,13 +372,45 @@ static void qup_i2c_set_write_mode(struct qup_i2c_dev *qup, struct i2c_msg *msg)
}
}
-static void qup_i2c_issue_write(struct qup_i2c_dev *qup, struct i2c_msg *msg)
+static int check_for_fifo_space(struct qup_i2c_dev *qup)
+{
+ int ret;
+
+ ret = qup_i2c_change_state(qup, QUP_PAUSE_STATE);
+ if (ret)
+ goto out;
+
+ ret = qup_i2c_wait_ready(qup, QUP_OUT_FULL,
+ RESET_BIT, 4 * ONE_BYTE);
+ if (ret) {
+ /* Fifo is full. Drain out the fifo */
+ ret = qup_i2c_change_state(qup, QUP_RUN_STATE);
+ if (ret)
+ goto out;
+
+ ret = qup_i2c_wait_ready(qup, QUP_OUT_NOT_EMPTY,
+ RESET_BIT, 256 * ONE_BYTE);
+ if (ret) {
+ dev_err(qup->dev, "timeout for fifo out full");
+ goto out;
+ }
+
+ ret = qup_i2c_change_state(qup, QUP_PAUSE_STATE);
+ if (ret)
+ goto out;
+ }
+
+out:
+ return ret;
+}
+
+static int qup_i2c_issue_write(struct qup_i2c_dev *qup, struct i2c_msg *msg)
{
u32 addr = msg->addr << 1;
u32 qup_tag;
- u32 opflags;
int idx;
u32 val;
+ int ret = 0;
if (qup->pos == 0) {
val = QUP_TAG_START | addr;
@@ -279,9 +422,9 @@ static void qup_i2c_issue_write(struct qup_i2c_dev *qup, struct i2c_msg *msg)
while (qup->pos < msg->len) {
/* Check that there's space in the FIFO for our pair */
- opflags = readl(qup->base + QUP_OPERATIONAL);
- if (opflags & QUP_OUT_FULL)
- break;
+ ret = check_for_fifo_space(qup);
+ if (ret)
+ return ret;
if (qup->pos == msg->len - 1)
qup_tag = QUP_TAG_STOP;
@@ -300,11 +443,501 @@ static void qup_i2c_issue_write(struct qup_i2c_dev *qup, struct i2c_msg *msg)
qup->pos++;
idx++;
}
+
+ ret = qup_i2c_change_state(qup, QUP_RUN_STATE);
+
+ return ret;
}
-static int qup_i2c_write_one(struct qup_i2c_dev *qup, struct i2c_msg *msg)
+static void qup_i2c_set_blk_data(struct qup_i2c_dev *qup,
+ struct i2c_msg *msg)
+{
+ memset(&qup->blk, 0, sizeof(qup->blk));
+
+ qup->blk.data_len = msg->len;
+ qup->blk.count = (msg->len + QUP_READ_LIMIT - 1) / QUP_READ_LIMIT;
+
+ /* 4 bytes for first block and 2 writes for rest */
+ qup->blk.tx_tag_len = 4 + (qup->blk.count - 1) * 2;
+
+ /* There are 2 tag bytes that are read in to fifo for every block */
+ if (msg->flags & I2C_M_RD)
+ qup->blk.rx_tag_len = qup->blk.count * 2;
+}
+
+static int qup_i2c_send_data(struct qup_i2c_dev *qup, int tlen, u8 *tbuf,
+ int dlen, u8 *dbuf)
+{
+ u32 val = 0, idx = 0, pos = 0, i = 0, t;
+ int len = tlen + dlen;
+ u8 *buf = tbuf;
+ int ret = 0;
+
+ while (len > 0) {
+ ret = check_for_fifo_space(qup);
+ if (ret)
+ return ret;
+
+ t = (len >= 4) ? 4 : len;
+
+ while (idx < t) {
+ if (!i && (pos >= tlen)) {
+ buf = dbuf;
+ pos = 0;
+ i = 1;
+ }
+ val |= buf[pos++] << (idx++ * 8);
+ }
+
+ writel(val, qup->base + QUP_OUT_FIFO_BASE);
+ idx = 0;
+ val = 0;
+ len -= 4;
+ }
+
+ ret = qup_i2c_change_state(qup, QUP_RUN_STATE);
+
+ return ret;
+}
+
+static int qup_i2c_get_data_len(struct qup_i2c_dev *qup)
+{
+ int data_len;
+
+ if (qup->blk.data_len > QUP_READ_LIMIT)
+ data_len = QUP_READ_LIMIT;
+ else
+ data_len = qup->blk.data_len;
+
+ return data_len;
+}
+
+static int qup_i2c_set_tags(u8 *tags, struct qup_i2c_dev *qup,
+ struct i2c_msg *msg, int is_dma)
+{
+ u16 addr = (msg->addr << 1) | ((msg->flags & I2C_M_RD) == I2C_M_RD);
+ int len = 0;
+ int data_len;
+
+ int last = (qup->blk.pos == (qup->blk.count - 1)) && (qup->is_last);
+
+ if (qup->blk.pos == 0) {
+ tags[len++] = QUP_TAG_V2_START;
+ tags[len++] = addr & 0xff;
+
+ if (msg->flags & I2C_M_TEN)
+ tags[len++] = addr >> 8;
+ }
+
+ /* Send _STOP commands for the last block */
+ if (last) {
+ if (msg->flags & I2C_M_RD)
+ tags[len++] = QUP_TAG_V2_DATARD_STOP;
+ else
+ tags[len++] = QUP_TAG_V2_DATAWR_STOP;
+ } else {
+ if (msg->flags & I2C_M_RD)
+ tags[len++] = QUP_TAG_V2_DATARD;
+ else
+ tags[len++] = QUP_TAG_V2_DATAWR;
+ }
+
+ data_len = qup_i2c_get_data_len(qup);
+
+ /* 0 implies 256 bytes */
+ if (data_len == QUP_READ_LIMIT)
+ tags[len++] = 0;
+ else
+ tags[len++] = data_len;
+
+ if ((msg->flags & I2C_M_RD) && last && is_dma) {
+ tags[len++] = QUP_BAM_INPUT_EOT;
+ tags[len++] = QUP_BAM_FLUSH_STOP;
+ }
+
+ return len;
+}
+
+static int qup_i2c_issue_xfer_v2(struct qup_i2c_dev *qup, struct i2c_msg *msg)
+{
+ int data_len = 0, tag_len, index;
+ int ret;
+
+ tag_len = qup_i2c_set_tags(qup->blk.tags, qup, msg, 0);
+ index = msg->len - qup->blk.data_len;
+
+ /* only tags are written for read */
+ if (!(msg->flags & I2C_M_RD))
+ data_len = qup_i2c_get_data_len(qup);
+
+ ret = qup_i2c_send_data(qup, tag_len, qup->blk.tags,
+ data_len, &msg->buf[index]);
+ qup->blk.data_len -= data_len;
+
+ return ret;
+}
+
+static void qup_i2c_bam_cb(void *data)
+{
+ struct qup_i2c_dev *qup = data;
+
+ complete(&qup->xfer);
+}
+
+static int qup_sg_set_buf(struct scatterlist *sg, void *buf,
+ struct qup_i2c_tag *tg, unsigned int buflen,
+ struct qup_i2c_dev *qup, int map, int dir)
+{
+ int ret;
+
+ sg_set_buf(sg, buf, buflen);
+ ret = dma_map_sg(qup->dev, sg, 1, dir);
+ if (!ret)
+ return -EINVAL;
+
+ if (!map)
+ sg_dma_address(sg) = tg->addr + ((u8 *)buf - tg->start);
+
+ return 0;
+}
+
+static void qup_i2c_rel_dma(struct qup_i2c_dev *qup)
+{
+ if (qup->btx.dma)
+ dma_release_channel(qup->btx.dma);
+ if (qup->brx.dma)
+ dma_release_channel(qup->brx.dma);
+ qup->btx.dma = NULL;
+ qup->brx.dma = NULL;
+}
+
+static int qup_i2c_req_dma(struct qup_i2c_dev *qup)
+{
+ int err;
+
+ if (!qup->btx.dma) {
+ qup->btx.dma = dma_request_slave_channel_reason(qup->dev, "tx");
+ if (IS_ERR(qup->btx.dma)) {
+ err = PTR_ERR(qup->btx.dma);
+ qup->btx.dma = NULL;
+ dev_err(qup->dev, "\n tx channel not available");
+ return err;
+ }
+ }
+
+ if (!qup->brx.dma) {
+ qup->brx.dma = dma_request_slave_channel_reason(qup->dev, "rx");
+ if (IS_ERR(qup->brx.dma)) {
+ dev_err(qup->dev, "\n rx channel not available");
+ err = PTR_ERR(qup->brx.dma);
+ qup->brx.dma = NULL;
+ qup_i2c_rel_dma(qup);
+ return err;
+ }
+ }
+ return 0;
+}
+
+static int qup_i2c_bam_do_xfer(struct qup_i2c_dev *qup, struct i2c_msg *msg,
+ int num)
+{
+ struct dma_async_tx_descriptor *txd, *rxd = NULL;
+ int ret = 0, idx = 0, limit = QUP_READ_LIMIT;
+ dma_cookie_t cookie_rx, cookie_tx;
+ u32 rx_nents = 0, tx_nents = 0, len, blocks, rem;
+ u32 i, tlen, tx_len, tx_buf = 0, rx_buf = 0, off = 0;
+ u8 *tags;
+
+ while (idx < num) {
+ blocks = (msg->len + limit) / limit;
+ rem = msg->len % limit;
+ tx_len = 0, len = 0, i = 0;
+
+ qup->is_last = (idx == (num - 1));
+
+ qup_i2c_set_blk_data(qup, msg);
+
+ if (msg->flags & I2C_M_RD) {
+ rx_nents += (blocks * 2) + 1;
+ tx_nents += 1;
+
+ while (qup->blk.pos < blocks) {
+ /* length set to '0' implies 256 bytes */
+ tlen = (i == (blocks - 1)) ? rem : 0;
+ tags = &qup->start_tag.start[off + len];
+ len += qup_i2c_set_tags(tags, qup, msg, 1);
+
+ /* scratch buf to read the start and len tags */
+ ret = qup_sg_set_buf(&qup->brx.sg[rx_buf++],
+ &qup->brx.tag.start[0],
+ &qup->brx.tag,
+ 2, qup, 0, 0);
+
+ if (ret)
+ return ret;
+
+ ret = qup_sg_set_buf(&qup->brx.sg[rx_buf++],
+ &msg->buf[limit * i],
+ NULL, tlen, qup,
+ 1, DMA_FROM_DEVICE);
+ if (ret)
+ return ret;
+
+ i++;
+ qup->blk.pos = i;
+ }
+ ret = qup_sg_set_buf(&qup->btx.sg[tx_buf++],
+ &qup->start_tag.start[off],
+ &qup->start_tag, len, qup, 0, 0);
+ if (ret)
+ return ret;
+
+ off += len;
+ /* scratch buf to read the BAM EOT and FLUSH tags */
+ ret = qup_sg_set_buf(&qup->brx.sg[rx_buf++],
+ &qup->brx.tag.start[0],
+ &qup->brx.tag, 2,
+ qup, 0, 0);
+ if (ret)
+ return ret;
+ } else {
+ tx_nents += (blocks * 2);
+
+ while (qup->blk.pos < blocks) {
+ tlen = (i == (blocks - 1)) ? rem : 0;
+ tags = &qup->start_tag.start[off + tx_len];
+ len = qup_i2c_set_tags(tags, qup, msg, 1);
+
+ ret = qup_sg_set_buf(&qup->btx.sg[tx_buf++],
+ tags,
+ &qup->start_tag, len,
+ qup, 0, 0);
+ if (ret)
+ return ret;
+
+ tx_len += len;
+ ret = qup_sg_set_buf(&qup->btx.sg[tx_buf++],
+ &msg->buf[limit * i],
+ NULL, tlen, qup, 1,
+ DMA_TO_DEVICE);
+ if (ret)
+ return ret;
+ i++;
+ qup->blk.pos = i;
+ }
+ off += tx_len;
+
+ if (idx == (num - 1)) {
+ len = 1;
+ if (rx_nents) {
+ qup->btx.tag.start[0] =
+ QUP_BAM_INPUT_EOT;
+ len++;
+ }
+ qup->btx.tag.start[len - 1] =
+ QUP_BAM_FLUSH_STOP;
+ ret = qup_sg_set_buf(&qup->btx.sg[tx_buf++],
+ &qup->btx.tag.start[0],
+ &qup->btx.tag, len,
+ qup, 0, 0);
+ if (ret)
+ return ret;
+ tx_nents += 1;
+ }
+ }
+ idx++;
+ msg++;
+ }
+
+ txd = dmaengine_prep_slave_sg(qup->btx.dma, qup->btx.sg, tx_nents,
+ DMA_MEM_TO_DEV,
+ DMA_PREP_INTERRUPT | DMA_PREP_FENCE);
+ if (!txd) {
+ dev_err(qup->dev, "failed to get tx desc\n");
+ ret = -EINVAL;
+ goto desc_err;
+ }
+
+ if (!rx_nents) {
+ txd->callback = qup_i2c_bam_cb;
+ txd->callback_param = qup;
+ }
+
+ cookie_tx = dmaengine_submit(txd);
+ if (dma_submit_error(cookie_tx)) {
+ ret = -EINVAL;
+ goto desc_err;
+ }
+
+ dma_async_issue_pending(qup->btx.dma);
+
+ if (rx_nents) {
+ rxd = dmaengine_prep_slave_sg(qup->brx.dma, qup->brx.sg,
+ rx_nents, DMA_DEV_TO_MEM,
+ DMA_PREP_INTERRUPT);
+ if (!rxd) {
+ dev_err(qup->dev, "failed to get rx desc\n");
+ ret = -EINVAL;
+
+ /* abort TX descriptors */
+ dmaengine_terminate_all(qup->btx.dma);
+ goto desc_err;
+ }
+
+ rxd->callback = qup_i2c_bam_cb;
+ rxd->callback_param = qup;
+ cookie_rx = dmaengine_submit(rxd);
+ if (dma_submit_error(cookie_rx)) {
+ ret = -EINVAL;
+ goto desc_err;
+ }
+
+ dma_async_issue_pending(qup->brx.dma);
+ }
+
+ if (!wait_for_completion_timeout(&qup->xfer, TOUT_MAX * HZ)) {
+ dev_err(qup->dev, "normal trans timed out\n");
+ ret = -ETIMEDOUT;
+ }
+
+ if (ret || qup->bus_err || qup->qup_err) {
+ if (qup->bus_err & QUP_I2C_NACK_FLAG) {
+ msg--;
+ dev_err(qup->dev, "NACK from %x\n", msg->addr);
+ ret = -EIO;
+
+ if (qup_i2c_change_state(qup, QUP_RUN_STATE)) {
+ dev_err(qup->dev, "change to run state timed out");
+ return ret;
+ }
+
+ if (rx_nents)
+ writel(QUP_BAM_INPUT_EOT,
+ qup->base + QUP_OUT_FIFO_BASE);
+
+ writel(QUP_BAM_FLUSH_STOP,
+ qup->base + QUP_OUT_FIFO_BASE);
+
+ qup_i2c_flush(qup);
+
+ /* wait for remaining interrupts to occur */
+ if (!wait_for_completion_timeout(&qup->xfer, HZ))
+ dev_err(qup->dev, "flush timed out\n");
+
+ qup_i2c_rel_dma(qup);
+ }
+ }
+
+ dma_unmap_sg(qup->dev, qup->btx.sg, tx_nents, DMA_TO_DEVICE);
+
+ if (rx_nents)
+ dma_unmap_sg(qup->dev, qup->brx.sg, rx_nents,
+ DMA_FROM_DEVICE);
+desc_err:
+ return ret;
+}
+
+static int qup_i2c_bam_xfer(struct i2c_adapter *adap, struct i2c_msg *msg,
+ int num)
+{
+ struct qup_i2c_dev *qup = i2c_get_adapdata(adap);
+ int ret = 0;
+
+ enable_irq(qup->irq);
+ ret = qup_i2c_req_dma(qup);
+
+ if (ret)
+ goto out;
+
+ qup->bus_err = 0;
+ qup->qup_err = 0;
+
+ writel(0, qup->base + QUP_MX_INPUT_CNT);
+ writel(0, qup->base + QUP_MX_OUTPUT_CNT);
+
+ /* set BAM mode */
+ writel(QUP_REPACK_EN | QUP_BAM_MODE, qup->base + QUP_IO_MODE);
+
+ /* mask fifo irqs */
+ writel((0x3 << 8), qup->base + QUP_OPERATIONAL_MASK);
+
+ /* set RUN STATE */
+ ret = qup_i2c_change_state(qup, QUP_RUN_STATE);
+ if (ret)
+ goto out;
+
+ writel(qup->clk_ctl, qup->base + QUP_I2C_CLK_CTL);
+
+ qup->msg = msg;
+ ret = qup_i2c_bam_do_xfer(qup, qup->msg, num);
+out:
+ disable_irq(qup->irq);
+
+ qup->msg = NULL;
+ return ret;
+}
+
+static int qup_i2c_wait_for_complete(struct qup_i2c_dev *qup,
+ struct i2c_msg *msg)
{
unsigned long left;
+ int ret = 0;
+
+ left = wait_for_completion_timeout(&qup->xfer, HZ);
+ if (!left) {
+ writel(1, qup->base + QUP_SW_RESET);
+ ret = -ETIMEDOUT;
+ }
+
+ if (qup->bus_err || qup->qup_err) {
+ if (qup->bus_err & QUP_I2C_NACK_FLAG) {
+ dev_err(qup->dev, "NACK from %x\n", msg->addr);
+ ret = -EIO;
+ }
+ }
+
+ return ret;
+}
+
+static int qup_i2c_write_one_v2(struct qup_i2c_dev *qup, struct i2c_msg *msg)
+{
+ int ret = 0;
+
+ qup->msg = msg;
+ qup->pos = 0;
+ enable_irq(qup->irq);
+ qup_i2c_set_blk_data(qup, msg);
+ qup_i2c_set_write_mode_v2(qup, msg);
+
+ ret = qup_i2c_change_state(qup, QUP_RUN_STATE);
+ if (ret)
+ goto err;
+
+ writel(qup->clk_ctl, qup->base + QUP_I2C_CLK_CTL);
+
+ do {
+ ret = qup_i2c_issue_xfer_v2(qup, msg);
+ if (ret)
+ goto err;
+
+ ret = qup_i2c_wait_for_complete(qup, msg);
+ if (ret)
+ goto err;
+
+ qup->blk.pos++;
+ } while (qup->blk.pos < qup->blk.count);
+
+ ret = qup_i2c_wait_ready(qup, QUP_OUT_NOT_EMPTY, RESET_BIT, ONE_BYTE);
+
+err:
+ disable_irq(qup->irq);
+ qup->msg = NULL;
+
+ return ret;
+}
+
+static int qup_i2c_write_one(struct qup_i2c_dev *qup, struct i2c_msg *msg)
+{
int ret;
qup->msg = msg;
@@ -325,30 +958,21 @@ static int qup_i2c_write_one(struct qup_i2c_dev *qup, struct i2c_msg *msg)
if (ret)
goto err;
- qup_i2c_issue_write(qup, msg);
-
- ret = qup_i2c_change_state(qup, QUP_RUN_STATE);
+ ret = qup_i2c_issue_write(qup, msg);
if (ret)
goto err;
- left = wait_for_completion_timeout(&qup->xfer, HZ);
- if (!left) {
- writel(1, qup->base + QUP_SW_RESET);
- ret = -ETIMEDOUT;
+ ret = qup_i2c_change_state(qup, QUP_RUN_STATE);
+ if (ret)
goto err;
- }
- if (qup->bus_err || qup->qup_err) {
- if (qup->bus_err & QUP_I2C_NACK_FLAG)
- dev_err(qup->dev, "NACK from %x\n", msg->addr);
- ret = -EIO;
+ ret = qup_i2c_wait_for_complete(qup, msg);
+ if (ret)
goto err;
- }
} while (qup->pos < msg->len);
/* Wait for the outstanding data in the fifo to drain */
- ret = qup_i2c_wait_writeready(qup);
-
+ ret = qup_i2c_wait_ready(qup, QUP_OUT_NOT_EMPTY, RESET_BIT, ONE_BYTE);
err:
disable_irq(qup->irq);
qup->msg = NULL;
@@ -370,6 +994,28 @@ static void qup_i2c_set_read_mode(struct qup_i2c_dev *qup, int len)
}
}
+static void qup_i2c_set_read_mode_v2(struct qup_i2c_dev *qup, int len)
+{
+ int tx_len = qup->blk.tx_tag_len;
+
+ len += qup->blk.rx_tag_len;
+ len |= qup->config_run;
+ tx_len |= qup->config_run;
+
+ if (len < qup->in_fifo_sz) {
+ /* FIFO mode */
+ writel(QUP_REPACK_EN, qup->base + QUP_IO_MODE);
+ writel(tx_len, qup->base + QUP_MX_WRITE_CNT);
+ writel(len, qup->base + QUP_MX_READ_CNT);
+ } else {
+ /* BLOCK mode (transfer data on chunks) */
+ writel(QUP_INPUT_BLK_MODE | QUP_REPACK_EN,
+ qup->base + QUP_IO_MODE);
+ writel(tx_len, qup->base + QUP_MX_OUTPUT_CNT);
+ writel(len, qup->base + QUP_MX_INPUT_CNT);
+ }
+}
+
static void qup_i2c_issue_read(struct qup_i2c_dev *qup, struct i2c_msg *msg)
{
u32 addr, len, val;
@@ -384,18 +1030,19 @@ static void qup_i2c_issue_read(struct qup_i2c_dev *qup, struct i2c_msg *msg)
}
-static void qup_i2c_read_fifo(struct qup_i2c_dev *qup, struct i2c_msg *msg)
+static int qup_i2c_read_fifo(struct qup_i2c_dev *qup, struct i2c_msg *msg)
{
- u32 opflags;
u32 val = 0;
int idx;
+ int ret = 0;
for (idx = 0; qup->pos < msg->len; idx++) {
if ((idx & 1) == 0) {
/* Check that FIFO have data */
- opflags = readl(qup->base + QUP_OPERATIONAL);
- if (!(opflags & QUP_IN_NOT_EMPTY))
- break;
+ ret = qup_i2c_wait_ready(qup, QUP_IN_NOT_EMPTY,
+ SET_BIT, 4 * ONE_BYTE);
+ if (ret)
+ return ret;
/* Reading 2 words at time */
val = readl(qup->base + QUP_IN_FIFO_BASE);
@@ -405,18 +1052,94 @@ static void qup_i2c_read_fifo(struct qup_i2c_dev *qup, struct i2c_msg *msg)
msg->buf[qup->pos++] = val >> QUP_MSW_SHIFT;
}
}
+
+ return ret;
+}
+
+static int qup_i2c_read_fifo_v2(struct qup_i2c_dev *qup,
+ struct i2c_msg *msg)
+{
+ u32 val;
+ int idx, pos = 0, ret = 0, total;
+
+ total = qup_i2c_get_data_len(qup);
+
+ /* 2 extra bytes for read tags */
+ while (pos < (total + 2)) {
+ /* Check that FIFO have data */
+ ret = qup_i2c_wait_ready(qup, QUP_IN_NOT_EMPTY,
+ SET_BIT, 4 * ONE_BYTE);
+ if (ret) {
+ dev_err(qup->dev, "timeout for fifo not empty");
+ return ret;
+ }
+ val = readl(qup->base + QUP_IN_FIFO_BASE);
+
+ for (idx = 0; idx < 4; idx++, val >>= 8, pos++) {
+ /* first 2 bytes are tag bytes */
+ if (pos < 2)
+ continue;
+
+ if (pos >= (total + 2))
+ goto out;
+
+ msg->buf[qup->pos++] = val & 0xff;
+ }
+ }
+
+out:
+ qup->blk.data_len -= total;
+
+ return ret;
+}
+
+static int qup_i2c_read_one_v2(struct qup_i2c_dev *qup, struct i2c_msg *msg)
+{
+ int ret = 0;
+
+ qup->msg = msg;
+ qup->pos = 0;
+ enable_irq(qup->irq);
+ qup_i2c_set_blk_data(qup, msg);
+ qup_i2c_set_read_mode_v2(qup, msg->len);
+
+ ret = qup_i2c_change_state(qup, QUP_RUN_STATE);
+ if (ret)
+ goto err;
+
+ writel(qup->clk_ctl, qup->base + QUP_I2C_CLK_CTL);
+
+ do {
+ ret = qup_i2c_issue_xfer_v2(qup, msg);
+ if (ret)
+ goto err;
+
+ ret = qup_i2c_wait_for_complete(qup, msg);
+ if (ret)
+ goto err;
+
+ ret = qup_i2c_read_fifo_v2(qup, msg);
+ if (ret)
+ goto err;
+
+ qup->blk.pos++;
+ } while (qup->blk.pos < qup->blk.count);
+
+err:
+ disable_irq(qup->irq);
+ qup->msg = NULL;
+
+ return ret;
}
static int qup_i2c_read_one(struct qup_i2c_dev *qup, struct i2c_msg *msg)
{
- unsigned long left;
int ret;
qup->msg = msg;
qup->pos = 0;
enable_irq(qup->irq);
-
qup_i2c_set_read_mode(qup, msg->len);
ret = qup_i2c_change_state(qup, QUP_RUN_STATE);
@@ -436,21 +1159,13 @@ static int qup_i2c_read_one(struct qup_i2c_dev *qup, struct i2c_msg *msg)
goto err;
do {
- left = wait_for_completion_timeout(&qup->xfer, HZ);
- if (!left) {
- writel(1, qup->base + QUP_SW_RESET);
- ret = -ETIMEDOUT;
+ ret = qup_i2c_wait_for_complete(qup, msg);
+ if (ret)
goto err;
- }
- if (qup->bus_err || qup->qup_err) {
- if (qup->bus_err & QUP_I2C_NACK_FLAG)
- dev_err(qup->dev, "NACK from %x\n", msg->addr);
- ret = -EIO;
+ ret = qup_i2c_read_fifo(qup, msg);
+ if (ret)
goto err;
- }
-
- qup_i2c_read_fifo(qup, msg);
} while (qup->pos < msg->len);
err:
@@ -513,6 +1228,87 @@ out:
return ret;
}
+static int qup_i2c_xfer_v2(struct i2c_adapter *adap,
+ struct i2c_msg msgs[],
+ int num)
+{
+ struct qup_i2c_dev *qup = i2c_get_adapdata(adap);
+ int ret, len, idx = 0, use_dma = 0;
+
+ ret = pm_runtime_get_sync(qup->dev);
+ if (ret < 0)
+ goto out;
+
+ writel(1, qup->base + QUP_SW_RESET);
+ ret = qup_i2c_poll_state(qup, QUP_RESET_STATE);
+ if (ret)
+ goto out;
+
+ /* Configure QUP as I2C mini core */
+ writel(I2C_MINI_CORE | I2C_N_VAL_V2, qup->base + QUP_CONFIG);
+ writel(QUP_V2_TAGS_EN, qup->base + QUP_I2C_MASTER_GEN);
+
+ if ((qup->is_dma)) {
+ /* All i2c_msgs should be transferred using either dma or cpu */
+ for (idx = 0; idx < num; idx++) {
+ if (msgs[idx].len == 0) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ len = (msgs[idx].len > qup->out_fifo_sz) ||
+ (msgs[idx].len > qup->in_fifo_sz);
+
+ if ((!is_vmalloc_addr(msgs[idx].buf)) && len) {
+ use_dma = 1;
+ } else {
+ use_dma = 0;
+ break;
+ }
+ }
+ }
+
+ do {
+ if (msgs[idx].len == 0) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (qup_i2c_poll_state_i2c_master(qup)) {
+ ret = -EIO;
+ goto out;
+ }
+
+ qup->is_last = (idx == (num - 1));
+ if (idx)
+ qup->config_run = QUP_I2C_MX_CONFIG_DURING_RUN;
+ else
+ qup->config_run = 0;
+
+ reinit_completion(&qup->xfer);
+
+ if (use_dma) {
+ ret = qup_i2c_bam_xfer(adap, &msgs[idx], num);
+ } else {
+ if (msgs[idx].flags & I2C_M_RD)
+ ret = qup_i2c_read_one_v2(qup, &msgs[idx]);
+ else
+ ret = qup_i2c_write_one_v2(qup, &msgs[idx]);
+ }
+ } while ((idx++ < (num - 1)) && !use_dma && !ret);
+
+ if (!ret)
+ ret = qup_i2c_change_state(qup, QUP_RESET_STATE);
+
+ if (ret == 0)
+ ret = num;
+out:
+ pm_runtime_mark_last_busy(qup->dev);
+ pm_runtime_put_autosuspend(qup->dev);
+
+ return ret;
+}
+
static u32 qup_i2c_func(struct i2c_adapter *adap)
{
return I2C_FUNC_I2C | (I2C_FUNC_SMBUS_EMUL & ~I2C_FUNC_SMBUS_QUICK);
@@ -523,6 +1319,11 @@ static const struct i2c_algorithm qup_i2c_algo = {
.functionality = qup_i2c_func,
};
+static const struct i2c_algorithm qup_i2c_algo_v2 = {
+ .master_xfer = qup_i2c_xfer_v2,
+ .functionality = qup_i2c_func,
+};
+
/*
* The QUP block will issue a NACK and STOP on the bus when reaching
* the end of the read, the length of the read is specified as one byte
@@ -561,6 +1362,7 @@ static int qup_i2c_probe(struct platform_device *pdev)
int ret, fs_div, hs_div;
int src_clk_freq;
u32 clk_freq = 100000;
+ int blocks;
qup = devm_kzalloc(&pdev->dev, sizeof(*qup), GFP_KERNEL);
if (!qup)
@@ -572,6 +1374,68 @@ static int qup_i2c_probe(struct platform_device *pdev)
of_property_read_u32(node, "clock-frequency", &clk_freq);
+ if (of_device_is_compatible(pdev->dev.of_node, "qcom,i2c-qup-v1.1.1")) {
+ qup->adap.algo = &qup_i2c_algo;
+ qup->adap.quirks = &qup_i2c_quirks;
+ } else {
+ qup->adap.algo = &qup_i2c_algo_v2;
+ ret = qup_i2c_req_dma(qup);
+
+ if (ret == -EPROBE_DEFER)
+ goto fail_dma;
+ else if (ret != 0)
+ goto nodma;
+
+ blocks = (MX_BLOCKS << 1) + 1;
+ qup->btx.sg = devm_kzalloc(&pdev->dev,
+ sizeof(*qup->btx.sg) * blocks,
+ GFP_KERNEL);
+ if (!qup->btx.sg) {
+ ret = -ENOMEM;
+ goto fail_dma;
+ }
+ sg_init_table(qup->btx.sg, blocks);
+
+ qup->brx.sg = devm_kzalloc(&pdev->dev,
+ sizeof(*qup->brx.sg) * blocks,
+ GFP_KERNEL);
+ if (!qup->brx.sg) {
+ ret = -ENOMEM;
+ goto fail_dma;
+ }
+ sg_init_table(qup->brx.sg, blocks);
+
+ /* 2 tag bytes for each block + 5 for start, stop tags */
+ size = blocks * 2 + 5;
+ qup->dpool = dma_pool_create("qup_i2c-dma-pool", &pdev->dev,
+ size, 4, 0);
+
+ qup->start_tag.start = dma_pool_alloc(qup->dpool, GFP_KERNEL,
+ &qup->start_tag.addr);
+ if (!qup->start_tag.start) {
+ ret = -ENOMEM;
+ goto fail_dma;
+ }
+
+ qup->brx.tag.start = dma_pool_alloc(qup->dpool,
+ GFP_KERNEL,
+ &qup->brx.tag.addr);
+ if (!qup->brx.tag.start) {
+ ret = -ENOMEM;
+ goto fail_dma;
+ }
+
+ qup->btx.tag.start = dma_pool_alloc(qup->dpool,
+ GFP_KERNEL,
+ &qup->btx.tag.addr);
+ if (!qup->btx.tag.start) {
+ ret = -ENOMEM;
+ goto fail_dma;
+ }
+ qup->is_dma = true;
+ }
+
+nodma:
/* We support frequencies up to FAST Mode (400KHz) */
if (!clk_freq || clk_freq > 400000) {
dev_err(qup->dev, "clock frequency not supported %d\n",
@@ -667,10 +1531,10 @@ static int qup_i2c_probe(struct platform_device *pdev)
qup->out_blk_sz, qup->out_fifo_sz);
i2c_set_adapdata(&qup->adap, qup);
- qup->adap.algo = &qup_i2c_algo;
- qup->adap.quirks = &qup_i2c_quirks;
qup->adap.dev.parent = qup->dev;
qup->adap.dev.of_node = pdev->dev.of_node;
+ qup->is_last = true;
+
strlcpy(qup->adap.name, "QUP I2C adapter", sizeof(qup->adap.name));
pm_runtime_set_autosuspend_delay(qup->dev, MSEC_PER_SEC);
@@ -689,6 +1553,11 @@ fail_runtime:
pm_runtime_set_suspended(qup->dev);
fail:
qup_i2c_disable_clocks(qup);
+fail_dma:
+ if (qup->btx.dma)
+ dma_release_channel(qup->btx.dma);
+ if (qup->brx.dma)
+ dma_release_channel(qup->brx.dma);
return ret;
}
@@ -696,6 +1565,18 @@ static int qup_i2c_remove(struct platform_device *pdev)
{
struct qup_i2c_dev *qup = platform_get_drvdata(pdev);
+ if (qup->is_dma) {
+ dma_pool_free(qup->dpool, qup->start_tag.start,
+ qup->start_tag.addr);
+ dma_pool_free(qup->dpool, qup->brx.tag.start,
+ qup->brx.tag.addr);
+ dma_pool_free(qup->dpool, qup->btx.tag.start,
+ qup->btx.tag.addr);
+ dma_pool_destroy(qup->dpool);
+ dma_release_channel(qup->btx.dma);
+ dma_release_channel(qup->brx.dma);
+ }
+
disable_irq(qup->irq);
qup_i2c_disable_clocks(qup);
i2c_del_adapter(&qup->adap);
diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c
index 1abeadc8ab79..68ecb5630ad5 100644
--- a/drivers/i2c/busses/i2c-rcar.c
+++ b/drivers/i2c/busses/i2c-rcar.c
@@ -611,7 +611,7 @@ static int rcar_i2c_probe(struct platform_device *pdev)
if (IS_ERR(priv->io))
return PTR_ERR(priv->io);
- priv->devtype = (enum rcar_i2c_type)of_match_device(rcar_i2c_dt_ids, dev)->data;
+ priv->devtype = (enum rcar_i2c_type)of_device_get_match_data(dev);
init_waitqueue_head(&priv->wait);
adap = &priv->adap;
diff --git a/drivers/i2c/busses/i2c-rk3x.c b/drivers/i2c/busses/i2c-rk3x.c
index 9096d17beb5b..3dcc5f3f26cb 100644
--- a/drivers/i2c/busses/i2c-rk3x.c
+++ b/drivers/i2c/busses/i2c-rk3x.c
@@ -855,6 +855,7 @@ static struct rk3x_i2c_soc_data soc_data[3] = {
static const struct of_device_id rk3x_i2c_match[] = {
{ .compatible = "rockchip,rk3066-i2c", .data = (void *)&soc_data[0] },
{ .compatible = "rockchip,rk3188-i2c", .data = (void *)&soc_data[1] },
+ { .compatible = "rockchip,rk3228-i2c", .data = (void *)&soc_data[2] },
{ .compatible = "rockchip,rk3288-i2c", .data = (void *)&soc_data[2] },
{},
};
diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c
index a0522fcc4ff8..929185a7296c 100644
--- a/drivers/i2c/busses/i2c-tegra.c
+++ b/drivers/i2c/busses/i2c-tegra.c
@@ -805,9 +805,7 @@ static int tegra_i2c_probe(struct platform_device *pdev)
i2c_dev->hw = &tegra20_i2c_hw;
if (pdev->dev.of_node) {
- const struct of_device_id *match;
- match = of_match_device(tegra_i2c_of_match, &pdev->dev);
- i2c_dev->hw = match->data;
+ i2c_dev->hw = of_device_get_match_data(&pdev->dev);
i2c_dev->is_dvc = of_device_is_compatible(pdev->dev.of_node,
"nvidia,tegra20-i2c-dvc");
} else if (pdev->id == 3) {
diff --git a/drivers/i2c/busses/i2c-xiic.c b/drivers/i2c/busses/i2c-xiic.c
index 6efd20095d5d..74f54f2f471f 100644
--- a/drivers/i2c/busses/i2c-xiic.c
+++ b/drivers/i2c/busses/i2c-xiic.c
@@ -37,6 +37,8 @@
#include <linux/io.h>
#include <linux/slab.h>
#include <linux/of.h>
+#include <linux/clk.h>
+#include <linux/pm_runtime.h>
#define DRIVER_NAME "xiic-i2c"
@@ -66,6 +68,7 @@ enum xiic_endian {
* @endianness: big/little-endian byte order
*/
struct xiic_i2c {
+ struct device *dev;
void __iomem *base;
wait_queue_head_t wait;
struct i2c_adapter adap;
@@ -77,6 +80,7 @@ struct xiic_i2c {
struct i2c_msg *rx_msg;
int rx_pos;
enum xiic_endian endianness;
+ struct clk *clk;
};
@@ -164,6 +168,7 @@ struct xiic_i2c {
#define XIIC_RESET_MASK 0xAUL
+#define XIIC_PM_TIMEOUT 1000 /* ms */
/*
* The following constant is used for the device global interrupt enable
* register, to enable all interrupts for the device, this is the only bit
@@ -676,9 +681,13 @@ static int xiic_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
dev_dbg(adap->dev.parent, "%s entry SR: 0x%x\n", __func__,
xiic_getreg8(i2c, XIIC_SR_REG_OFFSET));
+ err = pm_runtime_get_sync(i2c->dev);
+ if (err < 0)
+ return err;
+
err = xiic_busy(i2c);
if (err)
- return err;
+ goto out;
i2c->tx_msg = msgs;
i2c->nmsgs = num;
@@ -686,14 +695,20 @@ static int xiic_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
xiic_start_xfer(i2c);
if (wait_event_timeout(i2c->wait, (i2c->state == STATE_ERROR) ||
- (i2c->state == STATE_DONE), HZ))
- return (i2c->state == STATE_DONE) ? num : -EIO;
- else {
+ (i2c->state == STATE_DONE), HZ)) {
+ err = (i2c->state == STATE_DONE) ? num : -EIO;
+ goto out;
+ } else {
i2c->tx_msg = NULL;
i2c->rx_msg = NULL;
i2c->nmsgs = 0;
- return -ETIMEDOUT;
+ err = -ETIMEDOUT;
+ goto out;
}
+out:
+ pm_runtime_mark_last_busy(i2c->dev);
+ pm_runtime_put_autosuspend(i2c->dev);
+ return err;
}
static u32 xiic_func(struct i2c_adapter *adap)
@@ -748,13 +763,28 @@ static int xiic_i2c_probe(struct platform_device *pdev)
mutex_init(&i2c->lock);
init_waitqueue_head(&i2c->wait);
+ i2c->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(i2c->clk)) {
+ dev_err(&pdev->dev, "input clock not found.\n");
+ return PTR_ERR(i2c->clk);
+ }
+ ret = clk_prepare_enable(i2c->clk);
+ if (ret) {
+ dev_err(&pdev->dev, "Unable to enable clock.\n");
+ return ret;
+ }
+ i2c->dev = &pdev->dev;
+ pm_runtime_enable(i2c->dev);
+ pm_runtime_set_autosuspend_delay(i2c->dev, XIIC_PM_TIMEOUT);
+ pm_runtime_use_autosuspend(i2c->dev);
+ pm_runtime_set_active(i2c->dev);
ret = devm_request_threaded_irq(&pdev->dev, irq, xiic_isr,
xiic_process, IRQF_ONESHOT,
pdev->name, i2c);
if (ret < 0) {
dev_err(&pdev->dev, "Cannot claim IRQ\n");
- return ret;
+ goto err_clk_dis;
}
/*
@@ -776,7 +806,7 @@ static int xiic_i2c_probe(struct platform_device *pdev)
if (ret) {
dev_err(&pdev->dev, "Failed to add adapter\n");
xiic_deinit(i2c);
- return ret;
+ goto err_clk_dis;
}
if (pdata) {
@@ -786,16 +816,30 @@ static int xiic_i2c_probe(struct platform_device *pdev)
}
return 0;
+
+err_clk_dis:
+ pm_runtime_set_suspended(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+ clk_disable_unprepare(i2c->clk);
+ return ret;
}
static int xiic_i2c_remove(struct platform_device *pdev)
{
struct xiic_i2c *i2c = platform_get_drvdata(pdev);
+ int ret;
/* remove adapter & data */
i2c_del_adapter(&i2c->adap);
+ ret = clk_prepare_enable(i2c->clk);
+ if (ret) {
+ dev_err(&pdev->dev, "Unable to enable clock.\n");
+ return ret;
+ }
xiic_deinit(i2c);
+ clk_disable_unprepare(i2c->clk);
+ pm_runtime_disable(&pdev->dev);
return 0;
}
@@ -808,12 +852,42 @@ static const struct of_device_id xiic_of_match[] = {
MODULE_DEVICE_TABLE(of, xiic_of_match);
#endif
+static int __maybe_unused cdns_i2c_runtime_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct xiic_i2c *i2c = platform_get_drvdata(pdev);
+
+ clk_disable(i2c->clk);
+
+ return 0;
+}
+
+static int __maybe_unused cdns_i2c_runtime_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct xiic_i2c *i2c = platform_get_drvdata(pdev);
+ int ret;
+
+ ret = clk_enable(i2c->clk);
+ if (ret) {
+ dev_err(dev, "Cannot enable clock.\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct dev_pm_ops xiic_dev_pm_ops = {
+ SET_RUNTIME_PM_OPS(cdns_i2c_runtime_suspend,
+ cdns_i2c_runtime_resume, NULL)
+};
static struct platform_driver xiic_i2c_driver = {
.probe = xiic_i2c_probe,
.remove = xiic_i2c_remove,
.driver = {
.name = DRIVER_NAME,
.of_match_table = of_match_ptr(xiic_of_match),
+ .pm = &xiic_dev_pm_ops,
},
};
diff --git a/drivers/i2c/i2c-boardinfo.c b/drivers/i2c/i2c-boardinfo.c
index 90e322959303..e33022e2d459 100644
--- a/drivers/i2c/i2c-boardinfo.c
+++ b/drivers/i2c/i2c-boardinfo.c
@@ -12,11 +12,11 @@
* GNU General Public License for more details.
*/
-#include <linux/kernel.h>
-#include <linux/i2c.h>
-#include <linux/slab.h>
#include <linux/export.h>
+#include <linux/i2c.h>
+#include <linux/kernel.h>
#include <linux/rwsem.h>
+#include <linux/slab.h>
#include "i2c-core.h"
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
index ffe715d346d8..e584d88ee337 100644
--- a/drivers/i2c/i2c-core.c
+++ b/drivers/i2c/i2c-core.c
@@ -28,32 +28,32 @@
*/
#include <dt-bindings/i2c/i2c.h>
-#include <linux/module.h>
-#include <linux/kernel.h>
+#include <asm/uaccess.h>
+#include <linux/acpi.h>
+#include <linux/clk/clk-conf.h>
+#include <linux/completion.h>
#include <linux/delay.h>
+#include <linux/err.h>
#include <linux/errno.h>
#include <linux/gpio.h>
-#include <linux/slab.h>
+#include <linux/hardirq.h>
#include <linux/i2c.h>
-#include <linux/init.h>
#include <linux/idr.h>
+#include <linux/init.h>
+#include <linux/irqflags.h>
+#include <linux/jump_label.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/mutex.h>
-#include <linux/of.h>
#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/of_irq.h>
-#include <linux/clk/clk-conf.h>
-#include <linux/completion.h>
-#include <linux/hardirq.h>
-#include <linux/irqflags.h>
-#include <linux/rwsem.h>
-#include <linux/pm_runtime.h>
#include <linux/pm_domain.h>
+#include <linux/pm_runtime.h>
#include <linux/pm_wakeirq.h>
-#include <linux/acpi.h>
-#include <linux/jump_label.h>
-#include <asm/uaccess.h>
-#include <linux/err.h>
#include <linux/property.h>
+#include <linux/rwsem.h>
+#include <linux/slab.h>
#include "i2c-core.h"
@@ -73,6 +73,7 @@ static struct device_type i2c_client_type;
static int i2c_detect(struct i2c_adapter *adapter, struct i2c_driver *driver);
static struct static_key i2c_trace_msg = STATIC_KEY_INIT_FALSE;
+static bool is_registered;
void i2c_transfer_trace_reg(void)
{
@@ -524,22 +525,16 @@ static int i2c_device_match(struct device *dev, struct device_driver *drv)
return 0;
}
-
-/* uevent helps with hotplug: modprobe -q $(MODALIAS) */
static int i2c_device_uevent(struct device *dev, struct kobj_uevent_env *env)
{
- struct i2c_client *client = to_i2c_client(dev);
+ struct i2c_client *client = to_i2c_client(dev);
int rc;
rc = acpi_device_uevent_modalias(dev, env);
if (rc != -ENODEV)
return rc;
- if (add_uevent_var(env, "MODALIAS=%s%s",
- I2C_MODULE_PREFIX, client->name))
- return -ENOMEM;
- dev_dbg(dev, "uevent\n");
- return 0;
+ return add_uevent_var(env, "MODALIAS=%s%s", I2C_MODULE_PREFIX, client->name);
}
/* i2c bus recovery routines */
@@ -1529,7 +1524,7 @@ static int i2c_register_adapter(struct i2c_adapter *adap)
int res = 0;
/* Can't register until after driver model init */
- if (unlikely(WARN_ON(!i2c_bus_type.p))) {
+ if (WARN_ON(!is_registered)) {
res = -EAGAIN;
goto out_list;
}
@@ -1926,7 +1921,7 @@ int i2c_register_driver(struct module *owner, struct i2c_driver *driver)
int res;
/* Can't register until after driver model init */
- if (unlikely(WARN_ON(!i2c_bus_type.p)))
+ if (WARN_ON(!is_registered))
return -EAGAIN;
/* add the driver to the list of i2c drivers in the driver core */
@@ -2104,6 +2099,9 @@ static int __init i2c_init(void)
retval = bus_register(&i2c_bus_type);
if (retval)
return retval;
+
+ is_registered = true;
+
#ifdef CONFIG_I2C_COMPAT
i2c_adapter_compat_class = class_compat_register("i2c-adapter");
if (!i2c_adapter_compat_class) {
@@ -2125,6 +2123,7 @@ class_err:
class_compat_unregister(i2c_adapter_compat_class);
bus_err:
#endif
+ is_registered = false;
bus_unregister(&i2c_bus_type);
return retval;
}
diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c
index 2413ec9f8207..0b1108d3c2f3 100644
--- a/drivers/i2c/i2c-dev.c
+++ b/drivers/i2c/i2c-dev.c
@@ -22,17 +22,17 @@
/* The I2C_RDWR ioctl code is written by Kolja Waschk <waschk@telos.de> */
-#include <linux/kernel.h>
-#include <linux/module.h>
#include <linux/device.h>
-#include <linux/notifier.h>
#include <linux/fs.h>
-#include <linux/slab.h>
-#include <linux/init.h>
-#include <linux/list.h>
-#include <linux/i2c.h>
#include <linux/i2c-dev.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/notifier.h>
+#include <linux/slab.h>
#include <linux/uaccess.h>
/*
diff --git a/drivers/i2c/i2c-mux.c b/drivers/i2c/i2c-mux.c
index 00fc5b1c7b66..d4022878b2f0 100644
--- a/drivers/i2c/i2c-mux.c
+++ b/drivers/i2c/i2c-mux.c
@@ -19,13 +19,13 @@
* warranty of any kind, whether express or implied.
*/
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/slab.h>
+#include <linux/acpi.h>
#include <linux/i2c.h>
#include <linux/i2c-mux.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/of.h>
-#include <linux/acpi.h>
+#include <linux/slab.h>
/* multiplexer per channel data */
struct i2c_mux_priv {
diff --git a/drivers/i2c/i2c-smbus.c b/drivers/i2c/i2c-smbus.c
index 94765a81970d..abb55d3e76f3 100644
--- a/drivers/i2c/i2c-smbus.c
+++ b/drivers/i2c/i2c-smbus.c
@@ -15,14 +15,14 @@
* GNU General Public License for more details.
*/
-#include <linux/kernel.h>
-#include <linux/module.h>
#include <linux/device.h>
-#include <linux/interrupt.h>
-#include <linux/workqueue.h>
#include <linux/i2c.h>
#include <linux/i2c-smbus.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/slab.h>
+#include <linux/workqueue.h>
struct i2c_smbus_alert {
unsigned int alert_edge_triggered:1;
diff --git a/drivers/i2c/i2c-stub.c b/drivers/i2c/i2c-stub.c
index af2a94e1140b..06af583d5101 100644
--- a/drivers/i2c/i2c-stub.c
+++ b/drivers/i2c/i2c-stub.c
@@ -17,13 +17,13 @@
#define DEBUG 1
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/slab.h>
#define MAX_CHIPS 10
diff --git a/drivers/i2c/muxes/Kconfig b/drivers/i2c/muxes/Kconfig
index f06b0e24673b..e280c8ecc0b5 100644
--- a/drivers/i2c/muxes/Kconfig
+++ b/drivers/i2c/muxes/Kconfig
@@ -72,4 +72,13 @@ config I2C_MUX_REG
This driver can also be built as a module. If so, the module
will be called i2c-mux-reg.
+config I2C_DEMUX_PINCTRL
+ tristate "pinctrl-based I2C demultiplexer"
+ depends on PINCTRL && OF
+ select OF_DYNAMIC
+ help
+ If you say yes to this option, support will be included for an I2C
+ demultiplexer that uses the pinctrl subsystem. This is useful if you
+ want to change the I2C master at run-time depending on features.
+
endmenu
diff --git a/drivers/i2c/muxes/Makefile b/drivers/i2c/muxes/Makefile
index e89799b76a92..7c267c29b191 100644
--- a/drivers/i2c/muxes/Makefile
+++ b/drivers/i2c/muxes/Makefile
@@ -3,6 +3,8 @@
obj-$(CONFIG_I2C_ARB_GPIO_CHALLENGE) += i2c-arb-gpio-challenge.o
+obj-$(CONFIG_I2C_DEMUX_PINCTRL) += i2c-demux-pinctrl.o
+
obj-$(CONFIG_I2C_MUX_GPIO) += i2c-mux-gpio.o
obj-$(CONFIG_I2C_MUX_PCA9541) += i2c-mux-pca9541.o
obj-$(CONFIG_I2C_MUX_PCA954x) += i2c-mux-pca954x.o
diff --git a/drivers/i2c/muxes/i2c-demux-pinctrl.c b/drivers/i2c/muxes/i2c-demux-pinctrl.c
new file mode 100644
index 000000000000..8de073aed001
--- /dev/null
+++ b/drivers/i2c/muxes/i2c-demux-pinctrl.c
@@ -0,0 +1,291 @@
+/*
+ * Pinctrl based I2C DeMultiplexer
+ *
+ * Copyright (C) 2015-16 by Wolfram Sang, Sang Engineering <wsa@sang-engineering.com>
+ * Copyright (C) 2015-16 by Renesas Electronics Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; version 2 of the License.
+ *
+ * See the bindings doc for DTS setup and the sysfs doc for usage information.
+ * (look for filenames containing 'i2c-demux-pinctrl' in Documentation/)
+ */
+
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+
+struct i2c_demux_pinctrl_chan {
+ struct device_node *parent_np;
+ struct i2c_adapter *parent_adap;
+ struct of_changeset chgset;
+};
+
+struct i2c_demux_pinctrl_priv {
+ int cur_chan;
+ int num_chan;
+ struct device *dev;
+ const char *bus_name;
+ struct i2c_adapter cur_adap;
+ struct i2c_algorithm algo;
+ struct i2c_demux_pinctrl_chan chan[];
+};
+
+static struct property status_okay = { .name = "status", .length = 3, .value = "ok" };
+
+static int i2c_demux_master_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
+{
+ struct i2c_demux_pinctrl_priv *priv = adap->algo_data;
+ struct i2c_adapter *parent = priv->chan[priv->cur_chan].parent_adap;
+
+ return __i2c_transfer(parent, msgs, num);
+}
+
+static u32 i2c_demux_functionality(struct i2c_adapter *adap)
+{
+ struct i2c_demux_pinctrl_priv *priv = adap->algo_data;
+ struct i2c_adapter *parent = priv->chan[priv->cur_chan].parent_adap;
+
+ return parent->algo->functionality(parent);
+}
+
+static int i2c_demux_activate_master(struct i2c_demux_pinctrl_priv *priv, u32 new_chan)
+{
+ struct i2c_adapter *adap;
+ struct pinctrl *p;
+ int ret;
+
+ ret = of_changeset_apply(&priv->chan[new_chan].chgset);
+ if (ret)
+ goto err;
+
+ adap = of_find_i2c_adapter_by_node(priv->chan[new_chan].parent_np);
+ if (!adap) {
+ ret = -ENODEV;
+ goto err;
+ }
+
+ p = devm_pinctrl_get_select(adap->dev.parent, priv->bus_name);
+ if (IS_ERR(p)) {
+ ret = PTR_ERR(p);
+ goto err_with_put;
+ }
+
+ priv->chan[new_chan].parent_adap = adap;
+ priv->cur_chan = new_chan;
+
+ /* Now fill out current adapter structure. cur_chan must be up to date */
+ priv->algo.master_xfer = i2c_demux_master_xfer;
+ priv->algo.functionality = i2c_demux_functionality;
+
+ snprintf(priv->cur_adap.name, sizeof(priv->cur_adap.name),
+ "i2c-demux (master i2c-%d)", i2c_adapter_id(adap));
+ priv->cur_adap.owner = THIS_MODULE;
+ priv->cur_adap.algo = &priv->algo;
+ priv->cur_adap.algo_data = priv;
+ priv->cur_adap.dev.parent = priv->dev;
+ priv->cur_adap.class = adap->class;
+ priv->cur_adap.retries = adap->retries;
+ priv->cur_adap.timeout = adap->timeout;
+ priv->cur_adap.quirks = adap->quirks;
+ priv->cur_adap.dev.of_node = priv->dev->of_node;
+ ret = i2c_add_adapter(&priv->cur_adap);
+ if (ret < 0)
+ goto err_with_put;
+
+ return 0;
+
+ err_with_put:
+ i2c_put_adapter(adap);
+ err:
+ dev_err(priv->dev, "failed to setup demux-adapter %d (%d)\n", new_chan, ret);
+ return ret;
+}
+
+static int i2c_demux_deactivate_master(struct i2c_demux_pinctrl_priv *priv)
+{
+ int ret, cur = priv->cur_chan;
+
+ if (cur < 0)
+ return 0;
+
+ i2c_del_adapter(&priv->cur_adap);
+ i2c_put_adapter(priv->chan[cur].parent_adap);
+
+ ret = of_changeset_revert(&priv->chan[cur].chgset);
+
+ priv->chan[cur].parent_adap = NULL;
+ priv->cur_chan = -EINVAL;
+
+ return ret;
+}
+
+static int i2c_demux_change_master(struct i2c_demux_pinctrl_priv *priv, u32 new_chan)
+{
+ int ret;
+
+ if (new_chan == priv->cur_chan)
+ return 0;
+
+ ret = i2c_demux_deactivate_master(priv);
+ if (ret)
+ return ret;
+
+ return i2c_demux_activate_master(priv, new_chan);
+}
+
+static ssize_t available_masters_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct i2c_demux_pinctrl_priv *priv = dev_get_drvdata(dev);
+ int count = 0, i;
+
+ for (i = 0; i < priv->num_chan && count < PAGE_SIZE; i++)
+ count += scnprintf(buf + count, PAGE_SIZE - count, "%d:%s%c",
+ i, priv->chan[i].parent_np->full_name,
+ i == priv->num_chan - 1 ? '\n' : ' ');
+
+ return count;
+}
+static DEVICE_ATTR_RO(available_masters);
+
+static ssize_t current_master_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct i2c_demux_pinctrl_priv *priv = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%d\n", priv->cur_chan);
+}
+
+static ssize_t current_master_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct i2c_demux_pinctrl_priv *priv = dev_get_drvdata(dev);
+ unsigned int val;
+ int ret;
+
+ ret = kstrtouint(buf, 0, &val);
+ if (ret < 0)
+ return ret;
+
+ if (val >= priv->num_chan)
+ return -EINVAL;
+
+ ret = i2c_demux_change_master(priv, val);
+
+ return ret < 0 ? ret : count;
+}
+static DEVICE_ATTR_RW(current_master);
+
+static int i2c_demux_pinctrl_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct i2c_demux_pinctrl_priv *priv;
+ int num_chan, i, j, err;
+
+ num_chan = of_count_phandle_with_args(np, "i2c-parent", NULL);
+ if (num_chan < 2) {
+ dev_err(&pdev->dev, "Need at least two I2C masters to switch\n");
+ return -EINVAL;
+ }
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv)
+ + num_chan * sizeof(struct i2c_demux_pinctrl_chan), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ err = of_property_read_string(np, "i2c-bus-name", &priv->bus_name);
+ if (err)
+ return err;
+
+ for (i = 0; i < num_chan; i++) {
+ struct device_node *adap_np;
+
+ adap_np = of_parse_phandle(np, "i2c-parent", i);
+ if (!adap_np) {
+ dev_err(&pdev->dev, "can't get phandle for parent %d\n", i);
+ err = -ENOENT;
+ goto err_rollback;
+ }
+ priv->chan[i].parent_np = adap_np;
+
+ of_changeset_init(&priv->chan[i].chgset);
+ of_changeset_update_property(&priv->chan[i].chgset, adap_np, &status_okay);
+ }
+
+ priv->num_chan = num_chan;
+ priv->dev = &pdev->dev;
+
+ platform_set_drvdata(pdev, priv);
+
+ /* switch to first parent as active master */
+ i2c_demux_activate_master(priv, 0);
+
+ err = device_create_file(&pdev->dev, &dev_attr_available_masters);
+ if (err)
+ goto err_rollback;
+
+ err = device_create_file(&pdev->dev, &dev_attr_current_master);
+ if (err)
+ goto err_rollback_available;
+
+ return 0;
+
+err_rollback_available:
+ device_remove_file(&pdev->dev, &dev_attr_available_masters);
+err_rollback:
+ for (j = 0; j < i; j++) {
+ of_node_put(priv->chan[j].parent_np);
+ of_changeset_destroy(&priv->chan[j].chgset);
+ }
+
+ return err;
+}
+
+static int i2c_demux_pinctrl_remove(struct platform_device *pdev)
+{
+ struct i2c_demux_pinctrl_priv *priv = platform_get_drvdata(pdev);
+ int i;
+
+ device_remove_file(&pdev->dev, &dev_attr_current_master);
+ device_remove_file(&pdev->dev, &dev_attr_available_masters);
+
+ i2c_demux_deactivate_master(priv);
+
+ for (i = 0; i < priv->num_chan; i++) {
+ of_node_put(priv->chan[i].parent_np);
+ of_changeset_destroy(&priv->chan[i].chgset);
+ }
+
+ return 0;
+}
+
+static const struct of_device_id i2c_demux_pinctrl_of_match[] = {
+ { .compatible = "i2c-demux-pinctrl", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, i2c_demux_pinctrl_of_match);
+
+static struct platform_driver i2c_demux_pinctrl_driver = {
+ .driver = {
+ .name = "i2c-demux-pinctrl",
+ .of_match_table = i2c_demux_pinctrl_of_match,
+ },
+ .probe = i2c_demux_pinctrl_probe,
+ .remove = i2c_demux_pinctrl_remove,
+};
+module_platform_driver(i2c_demux_pinctrl_driver);
+
+MODULE_DESCRIPTION("pinctrl-based I2C demux driver");
+MODULE_AUTHOR("Wolfram Sang <wsa@sang-engineering.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:i2c-demux-pinctrl");
diff --git a/drivers/ide/hpt366.c b/drivers/ide/hpt366.c
index 696b6c1ec940..f94baadbf424 100644
--- a/drivers/ide/hpt366.c
+++ b/drivers/ide/hpt366.c
@@ -531,14 +531,9 @@ static const struct hpt_info hpt371n = {
.timings = &hpt37x_timings
};
-static int check_in_drive_list(ide_drive_t *drive, const char **list)
+static bool check_in_drive_list(ide_drive_t *drive, const char **list)
{
- char *m = (char *)&drive->id[ATA_ID_PROD];
-
- while (*list)
- if (!strcmp(*list++, m))
- return 1;
- return 0;
+ return match_string(list, -1, (char *)&drive->id[ATA_ID_PROD]) >= 0;
}
static struct hpt_info *hpt3xx_get_info(struct device *dev)
diff --git a/drivers/ide/icside.c b/drivers/ide/icside.c
index 9f0a48e39b8a..80e933b296f6 100644
--- a/drivers/ide/icside.c
+++ b/drivers/ide/icside.c
@@ -451,7 +451,7 @@ err_free:
return ret;
}
-static const struct ide_port_info icside_v6_port_info __initconst = {
+static const struct ide_port_info icside_v6_port_info = {
.init_dma = icside_dma_off_init,
.port_ops = &icside_v6_no_dma_port_ops,
.host_flags = IDE_HFLAG_SERIALIZE | IDE_HFLAG_MMIO,
diff --git a/drivers/ide/palm_bk3710.c b/drivers/ide/palm_bk3710.c
index 8012e43bf8f6..46427ea01753 100644
--- a/drivers/ide/palm_bk3710.c
+++ b/drivers/ide/palm_bk3710.c
@@ -325,6 +325,8 @@ static int __init palm_bk3710_probe(struct platform_device *pdev)
clk_enable(clk);
rate = clk_get_rate(clk);
+ if (!rate)
+ return -EINVAL;
/* NOTE: round *down* to meet minimum timings; we count in clocks */
ideclk_period = 1000000000UL / rate;
diff --git a/drivers/ide/pdc202xx_new.c b/drivers/ide/pdc202xx_new.c
index 9ad014a7afc7..b33646be699c 100644
--- a/drivers/ide/pdc202xx_new.c
+++ b/drivers/ide/pdc202xx_new.c
@@ -28,7 +28,6 @@
#ifdef CONFIG_PPC_PMAC
#include <asm/prom.h>
-#include <asm/pci-bridge.h>
#endif
#define DRV_NAME "pdc202xx_new"
diff --git a/drivers/ide/pmac.c b/drivers/ide/pmac.c
index 96a345248224..7f0434f7e486 100644
--- a/drivers/ide/pmac.c
+++ b/drivers/ide/pmac.c
@@ -40,7 +40,6 @@
#include <asm/io.h>
#include <asm/dbdma.h>
#include <asm/ide.h>
-#include <asm/pci-bridge.h>
#include <asm/machdep.h>
#include <asm/pmac_feature.h>
#include <asm/sections.h>
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index cd4510a63375..c6935de425fa 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -65,7 +65,7 @@
#include <asm/mwait.h>
#include <asm/msr.h>
-#define INTEL_IDLE_VERSION "0.4"
+#define INTEL_IDLE_VERSION "0.4.1"
#define PREFIX "intel_idle: "
static struct cpuidle_driver intel_idle_driver = {
@@ -660,6 +660,35 @@ static struct cpuidle_state skl_cstates[] = {
.enter = NULL }
};
+static struct cpuidle_state skx_cstates[] = {
+ {
+ .name = "C1-SKX",
+ .desc = "MWAIT 0x00",
+ .flags = MWAIT2flg(0x00),
+ .exit_latency = 2,
+ .target_residency = 2,
+ .enter = &intel_idle,
+ .enter_freeze = intel_idle_freeze, },
+ {
+ .name = "C1E-SKX",
+ .desc = "MWAIT 0x01",
+ .flags = MWAIT2flg(0x01),
+ .exit_latency = 10,
+ .target_residency = 20,
+ .enter = &intel_idle,
+ .enter_freeze = intel_idle_freeze, },
+ {
+ .name = "C6-SKX",
+ .desc = "MWAIT 0x20",
+ .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
+ .exit_latency = 133,
+ .target_residency = 600,
+ .enter = &intel_idle,
+ .enter_freeze = intel_idle_freeze, },
+ {
+ .enter = NULL }
+};
+
static struct cpuidle_state atom_cstates[] = {
{
.name = "C1E-ATM",
@@ -716,6 +745,26 @@ static struct cpuidle_state avn_cstates[] = {
{
.enter = NULL }
};
+static struct cpuidle_state knl_cstates[] = {
+ {
+ .name = "C1-KNL",
+ .desc = "MWAIT 0x00",
+ .flags = MWAIT2flg(0x00),
+ .exit_latency = 1,
+ .target_residency = 2,
+ .enter = &intel_idle,
+ .enter_freeze = intel_idle_freeze },
+ {
+ .name = "C6-KNL",
+ .desc = "MWAIT 0x10",
+ .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED,
+ .exit_latency = 120,
+ .target_residency = 500,
+ .enter = &intel_idle,
+ .enter_freeze = intel_idle_freeze },
+ {
+ .enter = NULL }
+};
/**
* intel_idle
@@ -798,8 +847,11 @@ static int cpu_hotplug_notify(struct notifier_block *n,
* driver in this case
*/
dev = per_cpu_ptr(intel_idle_cpuidle_devices, hotcpu);
- if (!dev->registered)
- intel_idle_cpu_init(hotcpu);
+ if (dev->registered)
+ break;
+
+ if (intel_idle_cpu_init(hotcpu))
+ return NOTIFY_BAD;
break;
}
@@ -884,12 +936,20 @@ static const struct idle_cpu idle_cpu_skl = {
.disable_promotion_to_c1e = true,
};
+static const struct idle_cpu idle_cpu_skx = {
+ .state_table = skx_cstates,
+ .disable_promotion_to_c1e = true,
+};
static const struct idle_cpu idle_cpu_avn = {
.state_table = avn_cstates,
.disable_promotion_to_c1e = true,
};
+static const struct idle_cpu idle_cpu_knl = {
+ .state_table = knl_cstates,
+};
+
#define ICPU(model, cpu) \
{ X86_VENDOR_INTEL, 6, model, X86_FEATURE_MWAIT, (unsigned long)&cpu }
@@ -921,6 +981,10 @@ static const struct x86_cpu_id intel_idle_ids[] __initconst = {
ICPU(0x56, idle_cpu_bdw),
ICPU(0x4e, idle_cpu_skl),
ICPU(0x5e, idle_cpu_skl),
+ ICPU(0x8e, idle_cpu_skl),
+ ICPU(0x9e, idle_cpu_skl),
+ ICPU(0x55, idle_cpu_skx),
+ ICPU(0x57, idle_cpu_knl),
{}
};
MODULE_DEVICE_TABLE(x86cpu, intel_idle_ids);
@@ -962,22 +1026,15 @@ static int __init intel_idle_probe(void)
icpu = (const struct idle_cpu *)id->driver_data;
cpuidle_state_table = icpu->state_table;
- if (boot_cpu_has(X86_FEATURE_ARAT)) /* Always Reliable APIC Timer */
- lapic_timer_reliable_states = LAPIC_TIMER_ALWAYS_RELIABLE;
- else
- on_each_cpu(__setup_broadcast_timer, (void *)true, 1);
-
pr_debug(PREFIX "v" INTEL_IDLE_VERSION
" model 0x%X\n", boot_cpu_data.x86_model);
- pr_debug(PREFIX "lapic_timer_reliable_states 0x%x\n",
- lapic_timer_reliable_states);
return 0;
}
/*
* intel_idle_cpuidle_devices_uninit()
- * unregister, free cpuidle_devices
+ * Unregisters the cpuidle devices.
*/
static void intel_idle_cpuidle_devices_uninit(void)
{
@@ -988,49 +1045,102 @@ static void intel_idle_cpuidle_devices_uninit(void)
dev = per_cpu_ptr(intel_idle_cpuidle_devices, i);
cpuidle_unregister_device(dev);
}
-
- free_percpu(intel_idle_cpuidle_devices);
- return;
}
/*
- * intel_idle_state_table_update()
- *
- * Update the default state_table for this CPU-id
+ * ivt_idle_state_table_update(void)
*
- * Currently used to access tuned IVT multi-socket targets
+ * Tune IVT multi-socket targets
* Assumption: num_sockets == (max_package_num + 1)
*/
-void intel_idle_state_table_update(void)
+static void ivt_idle_state_table_update(void)
{
/* IVT uses a different table for 1-2, 3-4, and > 4 sockets */
- if (boot_cpu_data.x86_model == 0x3e) { /* IVT */
- int cpu, package_num, num_sockets = 1;
-
- for_each_online_cpu(cpu) {
- package_num = topology_physical_package_id(cpu);
- if (package_num + 1 > num_sockets) {
- num_sockets = package_num + 1;
-
- if (num_sockets > 4) {
- cpuidle_state_table = ivt_cstates_8s;
- return;
- }
+ int cpu, package_num, num_sockets = 1;
+
+ for_each_online_cpu(cpu) {
+ package_num = topology_physical_package_id(cpu);
+ if (package_num + 1 > num_sockets) {
+ num_sockets = package_num + 1;
+
+ if (num_sockets > 4) {
+ cpuidle_state_table = ivt_cstates_8s;
+ return;
}
}
+ }
+
+ if (num_sockets > 2)
+ cpuidle_state_table = ivt_cstates_4s;
- if (num_sockets > 2)
- cpuidle_state_table = ivt_cstates_4s;
- /* else, 1 and 2 socket systems use default ivt_cstates */
+ /* else, 1 and 2 socket systems use default ivt_cstates */
+}
+/*
+ * sklh_idle_state_table_update(void)
+ *
+ * On SKL-H (model 0x5e) disable C8 and C9 if:
+ * C10 is enabled and SGX disabled
+ */
+static void sklh_idle_state_table_update(void)
+{
+ unsigned long long msr;
+ unsigned int eax, ebx, ecx, edx;
+
+
+ /* if PC10 disabled via cmdline intel_idle.max_cstate=7 or shallower */
+ if (max_cstate <= 7)
+ return;
+
+ /* if PC10 not present in CPUID.MWAIT.EDX */
+ if ((mwait_substates & (0xF << 28)) == 0)
+ return;
+
+ rdmsrl(MSR_NHM_SNB_PKG_CST_CFG_CTL, msr);
+
+ /* PC10 is not enabled in PKG C-state limit */
+ if ((msr & 0xF) != 8)
+ return;
+
+ ecx = 0;
+ cpuid(7, &eax, &ebx, &ecx, &edx);
+
+ /* if SGX is present */
+ if (ebx & (1 << 2)) {
+
+ rdmsrl(MSR_IA32_FEATURE_CONTROL, msr);
+
+ /* if SGX is enabled */
+ if (msr & (1 << 18))
+ return;
+ }
+
+ skl_cstates[5].disabled = 1; /* C8-SKL */
+ skl_cstates[6].disabled = 1; /* C9-SKL */
+}
+/*
+ * intel_idle_state_table_update()
+ *
+ * Update the default state_table for this CPU-id
+ */
+
+static void intel_idle_state_table_update(void)
+{
+ switch (boot_cpu_data.x86_model) {
+
+ case 0x3e: /* IVT */
+ ivt_idle_state_table_update();
+ break;
+ case 0x5e: /* SKL-H */
+ sklh_idle_state_table_update();
+ break;
}
- return;
}
/*
* intel_idle_cpuidle_driver_init()
* allocate, initialize cpuidle_states
*/
-static int __init intel_idle_cpuidle_driver_init(void)
+static void __init intel_idle_cpuidle_driver_init(void)
{
int cstate;
struct cpuidle_driver *drv = &intel_idle_driver;
@@ -1063,6 +1173,14 @@ static int __init intel_idle_cpuidle_driver_init(void)
if (num_substates == 0)
continue;
+ /* if state marked as disabled, skip it */
+ if (cpuidle_state_table[cstate].disabled != 0) {
+ pr_debug(PREFIX "state %s is disabled",
+ cpuidle_state_table[cstate].name);
+ continue;
+ }
+
+
if (((mwait_cstate + 1) > 2) &&
!boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
mark_tsc_unstable("TSC halts in idle"
@@ -1074,18 +1192,10 @@ static int __init intel_idle_cpuidle_driver_init(void)
drv->state_count += 1;
}
- if (icpu->auto_demotion_disable_flags)
- on_each_cpu(auto_demotion_disable, NULL, 1);
-
if (icpu->byt_auto_demotion_disable_flag) {
wrmsrl(MSR_CC6_DEMOTION_POLICY_CONFIG, 0);
wrmsrl(MSR_MC6_DEMOTION_POLICY_CONFIG, 0);
}
-
- if (icpu->disable_promotion_to_c1e) /* each-cpu is redundant */
- on_each_cpu(c1e_promotion_disable, NULL, 1);
-
- return 0;
}
@@ -1104,7 +1214,6 @@ static int intel_idle_cpu_init(int cpu)
if (cpuidle_register_device(dev)) {
pr_debug(PREFIX "cpuidle_register_device %d failed!\n", cpu);
- intel_idle_cpuidle_devices_uninit();
return -EIO;
}
@@ -1129,40 +1238,51 @@ static int __init intel_idle_init(void)
if (retval)
return retval;
+ intel_idle_cpuidle_devices = alloc_percpu(struct cpuidle_device);
+ if (intel_idle_cpuidle_devices == NULL)
+ return -ENOMEM;
+
intel_idle_cpuidle_driver_init();
retval = cpuidle_register_driver(&intel_idle_driver);
if (retval) {
struct cpuidle_driver *drv = cpuidle_get_driver();
printk(KERN_DEBUG PREFIX "intel_idle yielding to %s",
drv ? drv->name : "none");
+ free_percpu(intel_idle_cpuidle_devices);
return retval;
}
- intel_idle_cpuidle_devices = alloc_percpu(struct cpuidle_device);
- if (intel_idle_cpuidle_devices == NULL)
- return -ENOMEM;
-
cpu_notifier_register_begin();
for_each_online_cpu(i) {
retval = intel_idle_cpu_init(i);
if (retval) {
+ intel_idle_cpuidle_devices_uninit();
cpu_notifier_register_done();
cpuidle_unregister_driver(&intel_idle_driver);
+ free_percpu(intel_idle_cpuidle_devices);
return retval;
}
}
__register_cpu_notifier(&cpu_hotplug_notifier);
+ if (boot_cpu_has(X86_FEATURE_ARAT)) /* Always Reliable APIC Timer */
+ lapic_timer_reliable_states = LAPIC_TIMER_ALWAYS_RELIABLE;
+ else
+ on_each_cpu(__setup_broadcast_timer, (void *)true, 1);
+
cpu_notifier_register_done();
+ pr_debug(PREFIX "lapic_timer_reliable_states 0x%x\n",
+ lapic_timer_reliable_states);
+
return 0;
}
static void __exit intel_idle_exit(void)
{
- intel_idle_cpuidle_devices_uninit();
- cpuidle_unregister_driver(&intel_idle_driver);
+ struct cpuidle_device *dev;
+ int i;
cpu_notifier_register_begin();
@@ -1170,9 +1290,15 @@ static void __exit intel_idle_exit(void)
on_each_cpu(__setup_broadcast_timer, (void *)false, 1);
__unregister_cpu_notifier(&cpu_hotplug_notifier);
+ for_each_possible_cpu(i) {
+ dev = per_cpu_ptr(intel_idle_cpuidle_devices, i);
+ cpuidle_unregister_device(dev);
+ }
+
cpu_notifier_register_done();
- return;
+ cpuidle_unregister_driver(&intel_idle_driver);
+ free_percpu(intel_idle_cpuidle_devices);
}
module_init(intel_idle_init);
diff --git a/drivers/iio/accel/Kconfig b/drivers/iio/accel/Kconfig
index 833ea9dd4464..b0d3ecf3318b 100644
--- a/drivers/iio/accel/Kconfig
+++ b/drivers/iio/accel/Kconfig
@@ -143,7 +143,7 @@ config MMA8452
select IIO_TRIGGERED_BUFFER
help
Say yes here to build support for the following Freescale 3-axis
- accelerometers: MMA8452Q, MMA8453Q, MMA8652FC, MMA8653FC.
+ accelerometers: MMA8451Q, MMA8452Q, MMA8453Q, MMA8652FC, MMA8653FC.
To compile this driver as a module, choose M here: the module
will be called mma8452.
diff --git a/drivers/iio/accel/bmc150-accel-core.c b/drivers/iio/accel/bmc150-accel-core.c
index c73331f7782b..2072a31e813b 100644
--- a/drivers/iio/accel/bmc150-accel-core.c
+++ b/drivers/iio/accel/bmc150-accel-core.c
@@ -547,7 +547,7 @@ static int bmc150_accel_get_axis(struct bmc150_accel_data *data,
{
int ret;
int axis = chan->scan_index;
- unsigned int raw_val;
+ __le16 raw_val;
mutex_lock(&data->mutex);
ret = bmc150_accel_set_power_state(data, true);
@@ -557,14 +557,14 @@ static int bmc150_accel_get_axis(struct bmc150_accel_data *data,
}
ret = regmap_bulk_read(data->regmap, BMC150_ACCEL_AXIS_TO_REG(axis),
- &raw_val, 2);
+ &raw_val, sizeof(raw_val));
if (ret < 0) {
dev_err(data->dev, "Error reading axis %d\n", axis);
bmc150_accel_set_power_state(data, false);
mutex_unlock(&data->mutex);
return ret;
}
- *val = sign_extend32(raw_val >> chan->scan_type.shift,
+ *val = sign_extend32(le16_to_cpu(raw_val) >> chan->scan_type.shift,
chan->scan_type.realbits - 1);
ret = bmc150_accel_set_power_state(data, false);
mutex_unlock(&data->mutex);
@@ -988,6 +988,7 @@ static const struct iio_event_spec bmc150_accel_event = {
.realbits = (bits), \
.storagebits = 16, \
.shift = 16 - (bits), \
+ .endianness = IIO_LE, \
}, \
.event_spec = &bmc150_accel_event, \
.num_event_specs = 1 \
diff --git a/drivers/iio/accel/mma8452.c b/drivers/iio/accel/mma8452.c
index ccc632a7cf01..7f4994f32a90 100644
--- a/drivers/iio/accel/mma8452.c
+++ b/drivers/iio/accel/mma8452.c
@@ -1,6 +1,7 @@
/*
* mma8452.c - Support for following Freescale 3-axis accelerometers:
*
+ * MMA8451Q (14 bit)
* MMA8452Q (12 bit)
* MMA8453Q (10 bit)
* MMA8652FC (12 bit)
@@ -15,7 +16,7 @@
*
* 7-bit I2C slave address 0x1c/0x1d (pin selectable)
*
- * TODO: orientation / freefall events, autosleep
+ * TODO: orientation events, autosleep
*/
#include <linux/module.h>
@@ -85,8 +86,9 @@
#define MMA8452_INT_FF_MT BIT(2)
#define MMA8452_INT_TRANS BIT(5)
-#define MMA8452_DEVICE_ID 0x2a
-#define MMA8453_DEVICE_ID 0x3a
+#define MMA8451_DEVICE_ID 0x1a
+#define MMA8452_DEVICE_ID 0x2a
+#define MMA8453_DEVICE_ID 0x3a
#define MMA8652_DEVICE_ID 0x4a
#define MMA8653_DEVICE_ID 0x5a
@@ -416,6 +418,51 @@ fail:
return ret;
}
+/* returns >0 if in freefall mode, 0 if not or <0 if an error occured */
+static int mma8452_freefall_mode_enabled(struct mma8452_data *data)
+{
+ int val;
+ const struct mma_chip_info *chip = data->chip_info;
+
+ val = i2c_smbus_read_byte_data(data->client, chip->ev_cfg);
+ if (val < 0)
+ return val;
+
+ return !(val & MMA8452_FF_MT_CFG_OAE);
+}
+
+static int mma8452_set_freefall_mode(struct mma8452_data *data, bool state)
+{
+ int val;
+ const struct mma_chip_info *chip = data->chip_info;
+
+ if ((state && mma8452_freefall_mode_enabled(data)) ||
+ (!state && !(mma8452_freefall_mode_enabled(data))))
+ return 0;
+
+ val = i2c_smbus_read_byte_data(data->client, chip->ev_cfg);
+ if (val < 0)
+ return val;
+
+ if (state) {
+ val |= BIT(idx_x + chip->ev_cfg_chan_shift);
+ val |= BIT(idx_y + chip->ev_cfg_chan_shift);
+ val |= BIT(idx_z + chip->ev_cfg_chan_shift);
+ val &= ~MMA8452_FF_MT_CFG_OAE;
+ } else {
+ val &= ~BIT(idx_x + chip->ev_cfg_chan_shift);
+ val &= ~BIT(idx_y + chip->ev_cfg_chan_shift);
+ val &= ~BIT(idx_z + chip->ev_cfg_chan_shift);
+ val |= MMA8452_FF_MT_CFG_OAE;
+ }
+
+ val = mma8452_change_config(data, chip->ev_cfg, val);
+ if (val)
+ return val;
+
+ return 0;
+}
+
static int mma8452_set_hp_filter_frequency(struct mma8452_data *data,
int val, int val2)
{
@@ -609,12 +656,22 @@ static int mma8452_read_event_config(struct iio_dev *indio_dev,
const struct mma_chip_info *chip = data->chip_info;
int ret;
- ret = i2c_smbus_read_byte_data(data->client,
- data->chip_info->ev_cfg);
- if (ret < 0)
- return ret;
+ switch (dir) {
+ case IIO_EV_DIR_FALLING:
+ return mma8452_freefall_mode_enabled(data);
+ case IIO_EV_DIR_RISING:
+ if (mma8452_freefall_mode_enabled(data))
+ return 0;
+
+ ret = i2c_smbus_read_byte_data(data->client,
+ data->chip_info->ev_cfg);
+ if (ret < 0)
+ return ret;
- return !!(ret & BIT(chan->scan_index + chip->ev_cfg_chan_shift));
+ return !!(ret & BIT(chan->scan_index + chip->ev_cfg_chan_shift));
+ default:
+ return -EINVAL;
+ }
}
static int mma8452_write_event_config(struct iio_dev *indio_dev,
@@ -627,19 +684,35 @@ static int mma8452_write_event_config(struct iio_dev *indio_dev,
const struct mma_chip_info *chip = data->chip_info;
int val;
- val = i2c_smbus_read_byte_data(data->client, chip->ev_cfg);
- if (val < 0)
- return val;
+ switch (dir) {
+ case IIO_EV_DIR_FALLING:
+ return mma8452_set_freefall_mode(data, state);
+ case IIO_EV_DIR_RISING:
+ val = i2c_smbus_read_byte_data(data->client, chip->ev_cfg);
+ if (val < 0)
+ return val;
+
+ if (state) {
+ if (mma8452_freefall_mode_enabled(data)) {
+ val &= ~BIT(idx_x + chip->ev_cfg_chan_shift);
+ val &= ~BIT(idx_y + chip->ev_cfg_chan_shift);
+ val &= ~BIT(idx_z + chip->ev_cfg_chan_shift);
+ val |= MMA8452_FF_MT_CFG_OAE;
+ }
+ val |= BIT(chan->scan_index + chip->ev_cfg_chan_shift);
+ } else {
+ if (mma8452_freefall_mode_enabled(data))
+ return 0;
- if (state)
- val |= BIT(chan->scan_index + chip->ev_cfg_chan_shift);
- else
- val &= ~BIT(chan->scan_index + chip->ev_cfg_chan_shift);
+ val &= ~BIT(chan->scan_index + chip->ev_cfg_chan_shift);
+ }
- val |= chip->ev_cfg_ele;
- val |= MMA8452_FF_MT_CFG_OAE;
+ val |= chip->ev_cfg_ele;
- return mma8452_change_config(data, chip->ev_cfg, val);
+ return mma8452_change_config(data, chip->ev_cfg, val);
+ default:
+ return -EINVAL;
+ }
}
static void mma8452_transient_interrupt(struct iio_dev *indio_dev)
@@ -652,6 +725,16 @@ static void mma8452_transient_interrupt(struct iio_dev *indio_dev)
if (src < 0)
return;
+ if (mma8452_freefall_mode_enabled(data)) {
+ iio_push_event(indio_dev,
+ IIO_MOD_EVENT_CODE(IIO_ACCEL, 0,
+ IIO_MOD_X_AND_Y_AND_Z,
+ IIO_EV_TYPE_MAG,
+ IIO_EV_DIR_FALLING),
+ ts);
+ return;
+ }
+
if (src & data->chip_info->ev_src_xe)
iio_push_event(indio_dev,
IIO_MOD_EVENT_CODE(IIO_ACCEL, 0, IIO_MOD_X,
@@ -745,6 +828,27 @@ static int mma8452_reg_access_dbg(struct iio_dev *indio_dev,
return 0;
}
+static const struct iio_event_spec mma8452_freefall_event[] = {
+ {
+ .type = IIO_EV_TYPE_MAG,
+ .dir = IIO_EV_DIR_FALLING,
+ .mask_separate = BIT(IIO_EV_INFO_ENABLE),
+ .mask_shared_by_type = BIT(IIO_EV_INFO_VALUE) |
+ BIT(IIO_EV_INFO_PERIOD) |
+ BIT(IIO_EV_INFO_HIGH_PASS_FILTER_3DB)
+ },
+};
+
+static const struct iio_event_spec mma8652_freefall_event[] = {
+ {
+ .type = IIO_EV_TYPE_MAG,
+ .dir = IIO_EV_DIR_FALLING,
+ .mask_separate = BIT(IIO_EV_INFO_ENABLE),
+ .mask_shared_by_type = BIT(IIO_EV_INFO_VALUE) |
+ BIT(IIO_EV_INFO_PERIOD)
+ },
+};
+
static const struct iio_event_spec mma8452_transient_event[] = {
{
.type = IIO_EV_TYPE_MAG,
@@ -781,6 +885,24 @@ static struct attribute_group mma8452_event_attribute_group = {
.attrs = mma8452_event_attributes,
};
+#define MMA8452_FREEFALL_CHANNEL(modifier) { \
+ .type = IIO_ACCEL, \
+ .modified = 1, \
+ .channel2 = modifier, \
+ .scan_index = -1, \
+ .event_spec = mma8452_freefall_event, \
+ .num_event_specs = ARRAY_SIZE(mma8452_freefall_event), \
+}
+
+#define MMA8652_FREEFALL_CHANNEL(modifier) { \
+ .type = IIO_ACCEL, \
+ .modified = 1, \
+ .channel2 = modifier, \
+ .scan_index = -1, \
+ .event_spec = mma8652_freefall_event, \
+ .num_event_specs = ARRAY_SIZE(mma8652_freefall_event), \
+}
+
#define MMA8452_CHANNEL(axis, idx, bits) { \
.type = IIO_ACCEL, \
.modified = 1, \
@@ -822,11 +944,20 @@ static struct attribute_group mma8452_event_attribute_group = {
.num_event_specs = ARRAY_SIZE(mma8452_motion_event), \
}
+static const struct iio_chan_spec mma8451_channels[] = {
+ MMA8452_CHANNEL(X, idx_x, 14),
+ MMA8452_CHANNEL(Y, idx_y, 14),
+ MMA8452_CHANNEL(Z, idx_z, 14),
+ IIO_CHAN_SOFT_TIMESTAMP(idx_ts),
+ MMA8452_FREEFALL_CHANNEL(IIO_MOD_X_AND_Y_AND_Z),
+};
+
static const struct iio_chan_spec mma8452_channels[] = {
MMA8452_CHANNEL(X, idx_x, 12),
MMA8452_CHANNEL(Y, idx_y, 12),
MMA8452_CHANNEL(Z, idx_z, 12),
IIO_CHAN_SOFT_TIMESTAMP(idx_ts),
+ MMA8452_FREEFALL_CHANNEL(IIO_MOD_X_AND_Y_AND_Z),
};
static const struct iio_chan_spec mma8453_channels[] = {
@@ -834,6 +965,7 @@ static const struct iio_chan_spec mma8453_channels[] = {
MMA8452_CHANNEL(Y, idx_y, 10),
MMA8452_CHANNEL(Z, idx_z, 10),
IIO_CHAN_SOFT_TIMESTAMP(idx_ts),
+ MMA8452_FREEFALL_CHANNEL(IIO_MOD_X_AND_Y_AND_Z),
};
static const struct iio_chan_spec mma8652_channels[] = {
@@ -841,6 +973,7 @@ static const struct iio_chan_spec mma8652_channels[] = {
MMA8652_CHANNEL(Y, idx_y, 12),
MMA8652_CHANNEL(Z, idx_z, 12),
IIO_CHAN_SOFT_TIMESTAMP(idx_ts),
+ MMA8652_FREEFALL_CHANNEL(IIO_MOD_X_AND_Y_AND_Z),
};
static const struct iio_chan_spec mma8653_channels[] = {
@@ -848,9 +981,11 @@ static const struct iio_chan_spec mma8653_channels[] = {
MMA8652_CHANNEL(Y, idx_y, 10),
MMA8652_CHANNEL(Z, idx_z, 10),
IIO_CHAN_SOFT_TIMESTAMP(idx_ts),
+ MMA8652_FREEFALL_CHANNEL(IIO_MOD_X_AND_Y_AND_Z),
};
enum {
+ mma8451,
mma8452,
mma8453,
mma8652,
@@ -858,17 +993,34 @@ enum {
};
static const struct mma_chip_info mma_chip_info_table[] = {
- [mma8452] = {
- .chip_id = MMA8452_DEVICE_ID,
- .channels = mma8452_channels,
- .num_channels = ARRAY_SIZE(mma8452_channels),
+ [mma8451] = {
+ .chip_id = MMA8451_DEVICE_ID,
+ .channels = mma8451_channels,
+ .num_channels = ARRAY_SIZE(mma8451_channels),
/*
* Hardware has fullscale of -2G, -4G, -8G corresponding to
- * raw value -2048 for 12 bit or -512 for 10 bit.
+ * raw value -8192 for 14 bit, -2048 for 12 bit or -512 for 10
+ * bit.
* The userspace interface uses m/s^2 and we declare micro units
* So scale factor for 12 bit here is given by:
- * g * N * 1000000 / 2048 for N = 2, 4, 8 and g=9.80665
+ * g * N * 1000000 / 2048 for N = 2, 4, 8 and g=9.80665
*/
+ .mma_scales = { {0, 2394}, {0, 4788}, {0, 9577} },
+ .ev_cfg = MMA8452_TRANSIENT_CFG,
+ .ev_cfg_ele = MMA8452_TRANSIENT_CFG_ELE,
+ .ev_cfg_chan_shift = 1,
+ .ev_src = MMA8452_TRANSIENT_SRC,
+ .ev_src_xe = MMA8452_TRANSIENT_SRC_XTRANSE,
+ .ev_src_ye = MMA8452_TRANSIENT_SRC_YTRANSE,
+ .ev_src_ze = MMA8452_TRANSIENT_SRC_ZTRANSE,
+ .ev_ths = MMA8452_TRANSIENT_THS,
+ .ev_ths_mask = MMA8452_TRANSIENT_THS_MASK,
+ .ev_count = MMA8452_TRANSIENT_COUNT,
+ },
+ [mma8452] = {
+ .chip_id = MMA8452_DEVICE_ID,
+ .channels = mma8452_channels,
+ .num_channels = ARRAY_SIZE(mma8452_channels),
.mma_scales = { {0, 9577}, {0, 19154}, {0, 38307} },
.ev_cfg = MMA8452_TRANSIENT_CFG,
.ev_cfg_ele = MMA8452_TRANSIENT_CFG_ELE,
@@ -1049,6 +1201,7 @@ static int mma8452_reset(struct i2c_client *client)
}
static const struct of_device_id mma8452_dt_ids[] = {
+ { .compatible = "fsl,mma8451", .data = &mma_chip_info_table[mma8451] },
{ .compatible = "fsl,mma8452", .data = &mma_chip_info_table[mma8452] },
{ .compatible = "fsl,mma8453", .data = &mma_chip_info_table[mma8453] },
{ .compatible = "fsl,mma8652", .data = &mma_chip_info_table[mma8652] },
@@ -1085,6 +1238,7 @@ static int mma8452_probe(struct i2c_client *client,
return ret;
switch (ret) {
+ case MMA8451_DEVICE_ID:
case MMA8452_DEVICE_ID:
case MMA8453_DEVICE_ID:
case MMA8652_DEVICE_ID:
@@ -1190,6 +1344,10 @@ static int mma8452_probe(struct i2c_client *client,
if (ret < 0)
goto buffer_cleanup;
+ ret = mma8452_set_freefall_mode(data, false);
+ if (ret)
+ return ret;
+
return 0;
buffer_cleanup:
diff --git a/drivers/iio/accel/st_accel_core.c b/drivers/iio/accel/st_accel_core.c
index 70f042797f15..a03a1417dd63 100644
--- a/drivers/iio/accel/st_accel_core.c
+++ b/drivers/iio/accel/st_accel_core.c
@@ -67,6 +67,8 @@
#define ST_ACCEL_1_DRDY_IRQ_ADDR 0x22
#define ST_ACCEL_1_DRDY_IRQ_INT1_MASK 0x10
#define ST_ACCEL_1_DRDY_IRQ_INT2_MASK 0x08
+#define ST_ACCEL_1_IHL_IRQ_ADDR 0x25
+#define ST_ACCEL_1_IHL_IRQ_MASK 0x02
#define ST_ACCEL_1_MULTIREAD_BIT true
/* CUSTOM VALUES FOR SENSOR 2 */
@@ -92,6 +94,8 @@
#define ST_ACCEL_2_DRDY_IRQ_ADDR 0x22
#define ST_ACCEL_2_DRDY_IRQ_INT1_MASK 0x02
#define ST_ACCEL_2_DRDY_IRQ_INT2_MASK 0x10
+#define ST_ACCEL_2_IHL_IRQ_ADDR 0x22
+#define ST_ACCEL_2_IHL_IRQ_MASK 0x80
#define ST_ACCEL_2_MULTIREAD_BIT true
/* CUSTOM VALUES FOR SENSOR 3 */
@@ -125,6 +129,8 @@
#define ST_ACCEL_3_DRDY_IRQ_ADDR 0x23
#define ST_ACCEL_3_DRDY_IRQ_INT1_MASK 0x80
#define ST_ACCEL_3_DRDY_IRQ_INT2_MASK 0x00
+#define ST_ACCEL_3_IHL_IRQ_ADDR 0x23
+#define ST_ACCEL_3_IHL_IRQ_MASK 0x40
#define ST_ACCEL_3_IG1_EN_ADDR 0x23
#define ST_ACCEL_3_IG1_EN_MASK 0x08
#define ST_ACCEL_3_MULTIREAD_BIT false
@@ -169,6 +175,8 @@
#define ST_ACCEL_5_DRDY_IRQ_ADDR 0x22
#define ST_ACCEL_5_DRDY_IRQ_INT1_MASK 0x04
#define ST_ACCEL_5_DRDY_IRQ_INT2_MASK 0x20
+#define ST_ACCEL_5_IHL_IRQ_ADDR 0x22
+#define ST_ACCEL_5_IHL_IRQ_MASK 0x80
#define ST_ACCEL_5_IG1_EN_ADDR 0x21
#define ST_ACCEL_5_IG1_EN_MASK 0x08
#define ST_ACCEL_5_MULTIREAD_BIT false
@@ -292,6 +300,8 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
.addr = ST_ACCEL_1_DRDY_IRQ_ADDR,
.mask_int1 = ST_ACCEL_1_DRDY_IRQ_INT1_MASK,
.mask_int2 = ST_ACCEL_1_DRDY_IRQ_INT2_MASK,
+ .addr_ihl = ST_ACCEL_1_IHL_IRQ_ADDR,
+ .mask_ihl = ST_ACCEL_1_IHL_IRQ_MASK,
},
.multi_read_bit = ST_ACCEL_1_MULTIREAD_BIT,
.bootime = 2,
@@ -355,6 +365,8 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
.addr = ST_ACCEL_2_DRDY_IRQ_ADDR,
.mask_int1 = ST_ACCEL_2_DRDY_IRQ_INT1_MASK,
.mask_int2 = ST_ACCEL_2_DRDY_IRQ_INT2_MASK,
+ .addr_ihl = ST_ACCEL_2_IHL_IRQ_ADDR,
+ .mask_ihl = ST_ACCEL_2_IHL_IRQ_MASK,
},
.multi_read_bit = ST_ACCEL_2_MULTIREAD_BIT,
.bootime = 2,
@@ -430,6 +442,8 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
.addr = ST_ACCEL_3_DRDY_IRQ_ADDR,
.mask_int1 = ST_ACCEL_3_DRDY_IRQ_INT1_MASK,
.mask_int2 = ST_ACCEL_3_DRDY_IRQ_INT2_MASK,
+ .addr_ihl = ST_ACCEL_3_IHL_IRQ_ADDR,
+ .mask_ihl = ST_ACCEL_3_IHL_IRQ_MASK,
.ig1 = {
.en_addr = ST_ACCEL_3_IG1_EN_ADDR,
.en_mask = ST_ACCEL_3_IG1_EN_MASK,
@@ -537,6 +551,8 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
.addr = ST_ACCEL_5_DRDY_IRQ_ADDR,
.mask_int1 = ST_ACCEL_5_DRDY_IRQ_INT1_MASK,
.mask_int2 = ST_ACCEL_5_DRDY_IRQ_INT2_MASK,
+ .addr_ihl = ST_ACCEL_5_IHL_IRQ_ADDR,
+ .mask_ihl = ST_ACCEL_5_IHL_IRQ_MASK,
},
.multi_read_bit = ST_ACCEL_5_MULTIREAD_BIT,
.bootime = 2, /* guess */
diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig
index 283ded7747a9..82c718c515a0 100644
--- a/drivers/iio/adc/Kconfig
+++ b/drivers/iio/adc/Kconfig
@@ -131,6 +131,17 @@ config AT91_ADC
To compile this driver as a module, choose M here: the module will be
called at91_adc.
+config AT91_SAMA5D2_ADC
+ tristate "Atmel AT91 SAMA5D2 ADC"
+ depends on ARCH_AT91 || COMPILE_TEST
+ depends on HAS_IOMEM
+ help
+ Say yes here to build support for Atmel SAMA5D2 ADC which is
+ available on SAMA5D2 SoC family.
+
+ To compile this driver as a module, choose M here: the module will be
+ called at91-sama5d2_adc.
+
config AXP288_ADC
tristate "X-Powers AXP288 ADC driver"
depends on MFD_AXP20X
@@ -184,6 +195,13 @@ config EXYNOS_ADC
To compile this driver as a module, choose M here: the module will be
called exynos_adc.
+config FSL_MX25_ADC
+ tristate "Freescale MX25 ADC driver"
+ depends on MFD_MX25_TSADC
+ help
+ Generic Conversion Queue driver used for general purpose ADC in the
+ MX25. This driver supports single measurements using the MX25 ADC.
+
config HI8435
tristate "Holt Integrated Circuits HI-8435 threshold detector"
select IIO_TRIGGERED_EVENT
@@ -267,11 +285,11 @@ config MCP320X
called mcp320x.
config MCP3422
- tristate "Microchip Technology MCP3422/3/4/6/7/8 driver"
+ tristate "Microchip Technology MCP3421/2/3/4/5/6/7/8 driver"
depends on I2C
help
- Say yes here to build support for Microchip Technology's
- MCP3422, MCP3423, MCP3424, MCP3426, MCP3427 or MCP3428
+ Say yes here to build support for Microchip Technology's MCP3421
+ MCP3422, MCP3423, MCP3424, MCP3425, MCP3426, MCP3427 or MCP3428
analog to digital converters.
This driver can also be built as a module. If so, the module will be
@@ -287,6 +305,20 @@ config MEN_Z188_ADC
This driver can also be built as a module. If so, the module will be
called men_z188_adc.
+config MXS_LRADC
+ tristate "Freescale i.MX23/i.MX28 LRADC"
+ depends on (ARCH_MXS || COMPILE_TEST) && HAS_IOMEM
+ depends on INPUT
+ select STMP_DEVICE
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
+ help
+ Say yes here to build support for i.MX23/i.MX28 LRADC convertor
+ built into these chips.
+
+ To compile this driver as a module, choose M here: the
+ module will be called mxs-lradc.
+
config NAU7802
tristate "Nuvoton NAU7802 ADC driver"
depends on I2C
@@ -352,6 +384,16 @@ config TI_ADC081C
This driver can also be built as a module. If so, the module will be
called ti-adc081c.
+config TI_ADC0832
+ tristate "Texas Instruments ADC0831/ADC0832/ADC0834/ADC0838"
+ depends on SPI
+ help
+ If you say yes here you get support for Texas Instruments ADC0831,
+ ADC0832, ADC0834, ADC0838 ADC chips.
+
+ This driver can also be built as a module. If so, the module will be
+ called ti-adc0832.
+
config TI_ADC128S052
tristate "Texas Instruments ADC128S052/ADC122S021/ADC124S021"
depends on SPI
@@ -362,6 +404,19 @@ config TI_ADC128S052
This driver can also be built as a module. If so, the module will be
called ti-adc128s052.
+config TI_ADS1015
+ tristate "Texas Instruments ADS1015 ADC"
+ depends on I2C && !SENSORS_ADS1015
+ select REGMAP_I2C
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
+ help
+ If you say yes here you get support for Texas Instruments ADS1015
+ ADC chip.
+
+ This driver can also be built as a module. If so, the module will be
+ called ti-ads1015.
+
config TI_ADS8688
tristate "Texas Instruments ADS8688"
depends on SPI && OF
diff --git a/drivers/iio/adc/Makefile b/drivers/iio/adc/Makefile
index 6435780e9b71..0cb79210a4b0 100644
--- a/drivers/iio/adc/Makefile
+++ b/drivers/iio/adc/Makefile
@@ -14,11 +14,13 @@ obj-$(CONFIG_AD7793) += ad7793.o
obj-$(CONFIG_AD7887) += ad7887.o
obj-$(CONFIG_AD799X) += ad799x.o
obj-$(CONFIG_AT91_ADC) += at91_adc.o
+obj-$(CONFIG_AT91_SAMA5D2_ADC) += at91-sama5d2_adc.o
obj-$(CONFIG_AXP288_ADC) += axp288_adc.o
obj-$(CONFIG_BERLIN2_ADC) += berlin2-adc.o
obj-$(CONFIG_CC10001_ADC) += cc10001_adc.o
obj-$(CONFIG_DA9150_GPADC) += da9150-gpadc.o
obj-$(CONFIG_EXYNOS_ADC) += exynos_adc.o
+obj-$(CONFIG_FSL_MX25_ADC) += fsl-imx25-gcq.o
obj-$(CONFIG_HI8435) += hi8435.o
obj-$(CONFIG_IMX7D_ADC) += imx7d_adc.o
obj-$(CONFIG_INA2XX_ADC) += ina2xx-adc.o
@@ -28,13 +30,16 @@ obj-$(CONFIG_MAX1363) += max1363.o
obj-$(CONFIG_MCP320X) += mcp320x.o
obj-$(CONFIG_MCP3422) += mcp3422.o
obj-$(CONFIG_MEN_Z188_ADC) += men_z188_adc.o
+obj-$(CONFIG_MXS_LRADC) += mxs-lradc.o
obj-$(CONFIG_NAU7802) += nau7802.o
obj-$(CONFIG_PALMAS_GPADC) += palmas_gpadc.o
obj-$(CONFIG_QCOM_SPMI_IADC) += qcom-spmi-iadc.o
obj-$(CONFIG_QCOM_SPMI_VADC) += qcom-spmi-vadc.o
obj-$(CONFIG_ROCKCHIP_SARADC) += rockchip_saradc.o
obj-$(CONFIG_TI_ADC081C) += ti-adc081c.o
+obj-$(CONFIG_TI_ADC0832) += ti-adc0832.o
obj-$(CONFIG_TI_ADC128S052) += ti-adc128s052.o
+obj-$(CONFIG_TI_ADS1015) += ti-ads1015.o
obj-$(CONFIG_TI_ADS8688) += ti-ads8688.o
obj-$(CONFIG_TI_AM335X_ADC) += ti_am335x_adc.o
obj-$(CONFIG_TWL4030_MADC) += twl4030-madc.o
diff --git a/drivers/iio/adc/at91-sama5d2_adc.c b/drivers/iio/adc/at91-sama5d2_adc.c
new file mode 100644
index 000000000000..2e154cb51685
--- /dev/null
+++ b/drivers/iio/adc/at91-sama5d2_adc.c
@@ -0,0 +1,510 @@
+/*
+ * Atmel ADC driver for SAMA5D2 devices and compatible.
+ *
+ * Copyright (C) 2015 Atmel,
+ * 2015 Ludovic Desroches <ludovic.desroches@atmel.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/regulator/consumer.h>
+
+/* Control Register */
+#define AT91_SAMA5D2_CR 0x00
+/* Software Reset */
+#define AT91_SAMA5D2_CR_SWRST BIT(0)
+/* Start Conversion */
+#define AT91_SAMA5D2_CR_START BIT(1)
+/* Touchscreen Calibration */
+#define AT91_SAMA5D2_CR_TSCALIB BIT(2)
+/* Comparison Restart */
+#define AT91_SAMA5D2_CR_CMPRST BIT(4)
+
+/* Mode Register */
+#define AT91_SAMA5D2_MR 0x04
+/* Trigger Selection */
+#define AT91_SAMA5D2_MR_TRGSEL(v) ((v) << 1)
+/* ADTRG */
+#define AT91_SAMA5D2_MR_TRGSEL_TRIG0 0
+/* TIOA0 */
+#define AT91_SAMA5D2_MR_TRGSEL_TRIG1 1
+/* TIOA1 */
+#define AT91_SAMA5D2_MR_TRGSEL_TRIG2 2
+/* TIOA2 */
+#define AT91_SAMA5D2_MR_TRGSEL_TRIG3 3
+/* PWM event line 0 */
+#define AT91_SAMA5D2_MR_TRGSEL_TRIG4 4
+/* PWM event line 1 */
+#define AT91_SAMA5D2_MR_TRGSEL_TRIG5 5
+/* TIOA3 */
+#define AT91_SAMA5D2_MR_TRGSEL_TRIG6 6
+/* RTCOUT0 */
+#define AT91_SAMA5D2_MR_TRGSEL_TRIG7 7
+/* Sleep Mode */
+#define AT91_SAMA5D2_MR_SLEEP BIT(5)
+/* Fast Wake Up */
+#define AT91_SAMA5D2_MR_FWUP BIT(6)
+/* Prescaler Rate Selection */
+#define AT91_SAMA5D2_MR_PRESCAL(v) ((v) << AT91_SAMA5D2_MR_PRESCAL_OFFSET)
+#define AT91_SAMA5D2_MR_PRESCAL_OFFSET 8
+#define AT91_SAMA5D2_MR_PRESCAL_MAX 0xff
+/* Startup Time */
+#define AT91_SAMA5D2_MR_STARTUP(v) ((v) << 16)
+/* Analog Change */
+#define AT91_SAMA5D2_MR_ANACH BIT(23)
+/* Tracking Time */
+#define AT91_SAMA5D2_MR_TRACKTIM(v) ((v) << 24)
+#define AT91_SAMA5D2_MR_TRACKTIM_MAX 0xff
+/* Transfer Time */
+#define AT91_SAMA5D2_MR_TRANSFER(v) ((v) << 28)
+#define AT91_SAMA5D2_MR_TRANSFER_MAX 0x3
+/* Use Sequence Enable */
+#define AT91_SAMA5D2_MR_USEQ BIT(31)
+
+/* Channel Sequence Register 1 */
+#define AT91_SAMA5D2_SEQR1 0x08
+/* Channel Sequence Register 2 */
+#define AT91_SAMA5D2_SEQR2 0x0c
+/* Channel Enable Register */
+#define AT91_SAMA5D2_CHER 0x10
+/* Channel Disable Register */
+#define AT91_SAMA5D2_CHDR 0x14
+/* Channel Status Register */
+#define AT91_SAMA5D2_CHSR 0x18
+/* Last Converted Data Register */
+#define AT91_SAMA5D2_LCDR 0x20
+/* Interrupt Enable Register */
+#define AT91_SAMA5D2_IER 0x24
+/* Interrupt Disable Register */
+#define AT91_SAMA5D2_IDR 0x28
+/* Interrupt Mask Register */
+#define AT91_SAMA5D2_IMR 0x2c
+/* Interrupt Status Register */
+#define AT91_SAMA5D2_ISR 0x30
+/* Last Channel Trigger Mode Register */
+#define AT91_SAMA5D2_LCTMR 0x34
+/* Last Channel Compare Window Register */
+#define AT91_SAMA5D2_LCCWR 0x38
+/* Overrun Status Register */
+#define AT91_SAMA5D2_OVER 0x3c
+/* Extended Mode Register */
+#define AT91_SAMA5D2_EMR 0x40
+/* Compare Window Register */
+#define AT91_SAMA5D2_CWR 0x44
+/* Channel Gain Register */
+#define AT91_SAMA5D2_CGR 0x48
+/* Channel Offset Register */
+#define AT91_SAMA5D2_COR 0x4c
+/* Channel Data Register 0 */
+#define AT91_SAMA5D2_CDR0 0x50
+/* Analog Control Register */
+#define AT91_SAMA5D2_ACR 0x94
+/* Touchscreen Mode Register */
+#define AT91_SAMA5D2_TSMR 0xb0
+/* Touchscreen X Position Register */
+#define AT91_SAMA5D2_XPOSR 0xb4
+/* Touchscreen Y Position Register */
+#define AT91_SAMA5D2_YPOSR 0xb8
+/* Touchscreen Pressure Register */
+#define AT91_SAMA5D2_PRESSR 0xbc
+/* Trigger Register */
+#define AT91_SAMA5D2_TRGR 0xc0
+/* Correction Select Register */
+#define AT91_SAMA5D2_COSR 0xd0
+/* Correction Value Register */
+#define AT91_SAMA5D2_CVR 0xd4
+/* Channel Error Correction Register */
+#define AT91_SAMA5D2_CECR 0xd8
+/* Write Protection Mode Register */
+#define AT91_SAMA5D2_WPMR 0xe4
+/* Write Protection Status Register */
+#define AT91_SAMA5D2_WPSR 0xe8
+/* Version Register */
+#define AT91_SAMA5D2_VERSION 0xfc
+
+#define AT91_AT91_SAMA5D2_CHAN(num, addr) \
+ { \
+ .type = IIO_VOLTAGE, \
+ .channel = num, \
+ .address = addr, \
+ .scan_type = { \
+ .sign = 'u', \
+ .realbits = 12, \
+ }, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),\
+ .datasheet_name = "CH"#num, \
+ .indexed = 1, \
+ }
+
+#define at91_adc_readl(st, reg) readl_relaxed(st->base + reg)
+#define at91_adc_writel(st, reg, val) writel_relaxed(val, st->base + reg)
+
+struct at91_adc_soc_info {
+ unsigned startup_time;
+ unsigned min_sample_rate;
+ unsigned max_sample_rate;
+};
+
+struct at91_adc_state {
+ void __iomem *base;
+ int irq;
+ struct clk *per_clk;
+ struct regulator *reg;
+ struct regulator *vref;
+ int vref_uv;
+ const struct iio_chan_spec *chan;
+ bool conversion_done;
+ u32 conversion_value;
+ struct at91_adc_soc_info soc_info;
+ wait_queue_head_t wq_data_available;
+ /*
+ * lock to prevent concurrent 'single conversion' requests through
+ * sysfs.
+ */
+ struct mutex lock;
+};
+
+static const struct iio_chan_spec at91_adc_channels[] = {
+ AT91_AT91_SAMA5D2_CHAN(0, 0x50),
+ AT91_AT91_SAMA5D2_CHAN(1, 0x54),
+ AT91_AT91_SAMA5D2_CHAN(2, 0x58),
+ AT91_AT91_SAMA5D2_CHAN(3, 0x5c),
+ AT91_AT91_SAMA5D2_CHAN(4, 0x60),
+ AT91_AT91_SAMA5D2_CHAN(5, 0x64),
+ AT91_AT91_SAMA5D2_CHAN(6, 0x68),
+ AT91_AT91_SAMA5D2_CHAN(7, 0x6c),
+ AT91_AT91_SAMA5D2_CHAN(8, 0x70),
+ AT91_AT91_SAMA5D2_CHAN(9, 0x74),
+ AT91_AT91_SAMA5D2_CHAN(10, 0x78),
+ AT91_AT91_SAMA5D2_CHAN(11, 0x7c),
+};
+
+static unsigned at91_adc_startup_time(unsigned startup_time_min,
+ unsigned adc_clk_khz)
+{
+ const unsigned startup_lookup[] = {
+ 0, 8, 16, 24,
+ 64, 80, 96, 112,
+ 512, 576, 640, 704,
+ 768, 832, 896, 960
+ };
+ unsigned ticks_min, i;
+
+ /*
+ * Since the adc frequency is checked before, there is no reason
+ * to not meet the startup time constraint.
+ */
+
+ ticks_min = startup_time_min * adc_clk_khz / 1000;
+ for (i = 0; i < ARRAY_SIZE(startup_lookup); i++)
+ if (startup_lookup[i] > ticks_min)
+ break;
+
+ return i;
+}
+
+static void at91_adc_setup_samp_freq(struct at91_adc_state *st, unsigned freq)
+{
+ struct iio_dev *indio_dev = iio_priv_to_dev(st);
+ unsigned f_per, prescal, startup;
+
+ f_per = clk_get_rate(st->per_clk);
+ prescal = (f_per / (2 * freq)) - 1;
+
+ startup = at91_adc_startup_time(st->soc_info.startup_time,
+ freq / 1000);
+
+ at91_adc_writel(st, AT91_SAMA5D2_MR,
+ AT91_SAMA5D2_MR_TRANSFER(2)
+ | AT91_SAMA5D2_MR_STARTUP(startup)
+ | AT91_SAMA5D2_MR_PRESCAL(prescal));
+
+ dev_dbg(&indio_dev->dev, "freq: %u, startup: %u, prescal: %u\n",
+ freq, startup, prescal);
+}
+
+static unsigned at91_adc_get_sample_freq(struct at91_adc_state *st)
+{
+ unsigned f_adc, f_per = clk_get_rate(st->per_clk);
+ unsigned mr, prescal;
+
+ mr = at91_adc_readl(st, AT91_SAMA5D2_MR);
+ prescal = (mr >> AT91_SAMA5D2_MR_PRESCAL_OFFSET)
+ & AT91_SAMA5D2_MR_PRESCAL_MAX;
+ f_adc = f_per / (2 * (prescal + 1));
+
+ return f_adc;
+}
+
+static irqreturn_t at91_adc_interrupt(int irq, void *private)
+{
+ struct iio_dev *indio = private;
+ struct at91_adc_state *st = iio_priv(indio);
+ u32 status = at91_adc_readl(st, AT91_SAMA5D2_ISR);
+ u32 imr = at91_adc_readl(st, AT91_SAMA5D2_IMR);
+
+ if (status & imr) {
+ st->conversion_value = at91_adc_readl(st, st->chan->address);
+ st->conversion_done = true;
+ wake_up_interruptible(&st->wq_data_available);
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_NONE;
+}
+
+static int at91_adc_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct at91_adc_state *st = iio_priv(indio_dev);
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ mutex_lock(&st->lock);
+
+ st->chan = chan;
+
+ at91_adc_writel(st, AT91_SAMA5D2_CHER, BIT(chan->channel));
+ at91_adc_writel(st, AT91_SAMA5D2_IER, BIT(chan->channel));
+ at91_adc_writel(st, AT91_SAMA5D2_CR, AT91_SAMA5D2_CR_START);
+
+ ret = wait_event_interruptible_timeout(st->wq_data_available,
+ st->conversion_done,
+ msecs_to_jiffies(1000));
+ if (ret == 0)
+ ret = -ETIMEDOUT;
+
+ if (ret > 0) {
+ *val = st->conversion_value;
+ ret = IIO_VAL_INT;
+ st->conversion_done = false;
+ }
+
+ at91_adc_writel(st, AT91_SAMA5D2_IDR, BIT(chan->channel));
+ at91_adc_writel(st, AT91_SAMA5D2_CHDR, BIT(chan->channel));
+
+ mutex_unlock(&st->lock);
+ return ret;
+
+ case IIO_CHAN_INFO_SCALE:
+ *val = st->vref_uv / 1000;
+ *val2 = chan->scan_type.realbits;
+ return IIO_VAL_FRACTIONAL_LOG2;
+
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ *val = at91_adc_get_sample_freq(st);
+ return IIO_VAL_INT;
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static int at91_adc_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ struct at91_adc_state *st = iio_priv(indio_dev);
+
+ if (mask != IIO_CHAN_INFO_SAMP_FREQ)
+ return -EINVAL;
+
+ if (val < st->soc_info.min_sample_rate ||
+ val > st->soc_info.max_sample_rate)
+ return -EINVAL;
+
+ at91_adc_setup_samp_freq(st, val);
+
+ return 0;
+}
+
+static const struct iio_info at91_adc_info = {
+ .read_raw = &at91_adc_read_raw,
+ .write_raw = &at91_adc_write_raw,
+ .driver_module = THIS_MODULE,
+};
+
+static int at91_adc_probe(struct platform_device *pdev)
+{
+ struct iio_dev *indio_dev;
+ struct at91_adc_state *st;
+ struct resource *res;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*st));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ indio_dev->dev.parent = &pdev->dev;
+ indio_dev->name = dev_name(&pdev->dev);
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->info = &at91_adc_info;
+ indio_dev->channels = at91_adc_channels;
+ indio_dev->num_channels = ARRAY_SIZE(at91_adc_channels);
+
+ st = iio_priv(indio_dev);
+
+ ret = of_property_read_u32(pdev->dev.of_node,
+ "atmel,min-sample-rate-hz",
+ &st->soc_info.min_sample_rate);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "invalid or missing value for atmel,min-sample-rate-hz\n");
+ return ret;
+ }
+
+ ret = of_property_read_u32(pdev->dev.of_node,
+ "atmel,max-sample-rate-hz",
+ &st->soc_info.max_sample_rate);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "invalid or missing value for atmel,max-sample-rate-hz\n");
+ return ret;
+ }
+
+ ret = of_property_read_u32(pdev->dev.of_node, "atmel,startup-time-ms",
+ &st->soc_info.startup_time);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "invalid or missing value for atmel,startup-time-ms\n");
+ return ret;
+ }
+
+ init_waitqueue_head(&st->wq_data_available);
+ mutex_init(&st->lock);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -EINVAL;
+
+ st->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(st->base))
+ return PTR_ERR(st->base);
+
+ st->irq = platform_get_irq(pdev, 0);
+ if (st->irq <= 0) {
+ if (!st->irq)
+ st->irq = -ENXIO;
+
+ return st->irq;
+ }
+
+ st->per_clk = devm_clk_get(&pdev->dev, "adc_clk");
+ if (IS_ERR(st->per_clk))
+ return PTR_ERR(st->per_clk);
+
+ st->reg = devm_regulator_get(&pdev->dev, "vddana");
+ if (IS_ERR(st->reg))
+ return PTR_ERR(st->reg);
+
+ st->vref = devm_regulator_get(&pdev->dev, "vref");
+ if (IS_ERR(st->vref))
+ return PTR_ERR(st->vref);
+
+ ret = devm_request_irq(&pdev->dev, st->irq, at91_adc_interrupt, 0,
+ pdev->dev.driver->name, indio_dev);
+ if (ret)
+ return ret;
+
+ ret = regulator_enable(st->reg);
+ if (ret)
+ return ret;
+
+ ret = regulator_enable(st->vref);
+ if (ret)
+ goto reg_disable;
+
+ st->vref_uv = regulator_get_voltage(st->vref);
+ if (st->vref_uv <= 0) {
+ ret = -EINVAL;
+ goto vref_disable;
+ }
+
+ at91_adc_writel(st, AT91_SAMA5D2_CR, AT91_SAMA5D2_CR_SWRST);
+ at91_adc_writel(st, AT91_SAMA5D2_IDR, 0xffffffff);
+
+ at91_adc_setup_samp_freq(st, st->soc_info.min_sample_rate);
+
+ ret = clk_prepare_enable(st->per_clk);
+ if (ret)
+ goto vref_disable;
+
+ platform_set_drvdata(pdev, indio_dev);
+
+ ret = iio_device_register(indio_dev);
+ if (ret < 0)
+ goto per_clk_disable_unprepare;
+
+ dev_info(&pdev->dev, "version: %x\n",
+ readl_relaxed(st->base + AT91_SAMA5D2_VERSION));
+
+ return 0;
+
+per_clk_disable_unprepare:
+ clk_disable_unprepare(st->per_clk);
+vref_disable:
+ regulator_disable(st->vref);
+reg_disable:
+ regulator_disable(st->reg);
+ return ret;
+}
+
+static int at91_adc_remove(struct platform_device *pdev)
+{
+ struct iio_dev *indio_dev = platform_get_drvdata(pdev);
+ struct at91_adc_state *st = iio_priv(indio_dev);
+
+ iio_device_unregister(indio_dev);
+
+ clk_disable_unprepare(st->per_clk);
+
+ regulator_disable(st->vref);
+ regulator_disable(st->reg);
+
+ return 0;
+}
+
+static const struct of_device_id at91_adc_dt_match[] = {
+ {
+ .compatible = "atmel,sama5d2-adc",
+ }, {
+ /* sentinel */
+ }
+};
+MODULE_DEVICE_TABLE(of, at91_adc_dt_match);
+
+static struct platform_driver at91_adc_driver = {
+ .probe = at91_adc_probe,
+ .remove = at91_adc_remove,
+ .driver = {
+ .name = "at91-sama5d2_adc",
+ .of_match_table = at91_adc_dt_match,
+ },
+};
+module_platform_driver(at91_adc_driver)
+
+MODULE_AUTHOR("Ludovic Desroches <ludovic.desroches@atmel.com>");
+MODULE_DESCRIPTION("Atmel AT91 SAMA5D2 ADC");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/adc/axp288_adc.c b/drivers/iio/adc/axp288_adc.c
index 0c904edd6c00..7fd24949c0c1 100644
--- a/drivers/iio/adc/axp288_adc.c
+++ b/drivers/iio/adc/axp288_adc.c
@@ -46,7 +46,7 @@ struct axp288_adc_info {
struct regmap *regmap;
};
-static const struct iio_chan_spec const axp288_adc_channels[] = {
+static const struct iio_chan_spec axp288_adc_channels[] = {
{
.indexed = 1,
.type = IIO_TEMP,
diff --git a/drivers/iio/adc/fsl-imx25-gcq.c b/drivers/iio/adc/fsl-imx25-gcq.c
new file mode 100644
index 000000000000..72b32c1ab257
--- /dev/null
+++ b/drivers/iio/adc/fsl-imx25-gcq.c
@@ -0,0 +1,417 @@
+/*
+ * Copyright (C) 2014-2015 Pengutronix, Markus Pargmann <mpa@pengutronix.de>
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU General Public License version 2 as published by the
+ * Free Software Foundation.
+ *
+ * This is the driver for the imx25 GCQ (Generic Conversion Queue)
+ * connected to the imx25 ADC.
+ */
+
+#include <dt-bindings/iio/adc/fsl-imx25-gcq.h>
+#include <linux/clk.h>
+#include <linux/iio/iio.h>
+#include <linux/interrupt.h>
+#include <linux/mfd/imx25-tsadc.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+
+#define MX25_GCQ_TIMEOUT (msecs_to_jiffies(2000))
+
+static const char * const driver_name = "mx25-gcq";
+
+enum mx25_gcq_cfgs {
+ MX25_CFG_XP = 0,
+ MX25_CFG_YP,
+ MX25_CFG_XN,
+ MX25_CFG_YN,
+ MX25_CFG_WIPER,
+ MX25_CFG_INAUX0,
+ MX25_CFG_INAUX1,
+ MX25_CFG_INAUX2,
+ MX25_NUM_CFGS,
+};
+
+struct mx25_gcq_priv {
+ struct regmap *regs;
+ struct completion completed;
+ struct clk *clk;
+ int irq;
+ struct regulator *vref[4];
+ u32 channel_vref_mv[MX25_NUM_CFGS];
+};
+
+#define MX25_CQG_CHAN(chan, id) {\
+ .type = IIO_VOLTAGE,\
+ .indexed = 1,\
+ .channel = chan,\
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_SCALE),\
+ .datasheet_name = id,\
+}
+
+static const struct iio_chan_spec mx25_gcq_channels[MX25_NUM_CFGS] = {
+ MX25_CQG_CHAN(MX25_CFG_XP, "xp"),
+ MX25_CQG_CHAN(MX25_CFG_YP, "yp"),
+ MX25_CQG_CHAN(MX25_CFG_XN, "xn"),
+ MX25_CQG_CHAN(MX25_CFG_YN, "yn"),
+ MX25_CQG_CHAN(MX25_CFG_WIPER, "wiper"),
+ MX25_CQG_CHAN(MX25_CFG_INAUX0, "inaux0"),
+ MX25_CQG_CHAN(MX25_CFG_INAUX1, "inaux1"),
+ MX25_CQG_CHAN(MX25_CFG_INAUX2, "inaux2"),
+};
+
+static const char * const mx25_gcq_refp_names[] = {
+ [MX25_ADC_REFP_YP] = "yp",
+ [MX25_ADC_REFP_XP] = "xp",
+ [MX25_ADC_REFP_INT] = "int",
+ [MX25_ADC_REFP_EXT] = "ext",
+};
+
+static irqreturn_t mx25_gcq_irq(int irq, void *data)
+{
+ struct mx25_gcq_priv *priv = data;
+ u32 stats;
+
+ regmap_read(priv->regs, MX25_ADCQ_SR, &stats);
+
+ if (stats & MX25_ADCQ_SR_EOQ) {
+ regmap_update_bits(priv->regs, MX25_ADCQ_MR,
+ MX25_ADCQ_MR_EOQ_IRQ, MX25_ADCQ_MR_EOQ_IRQ);
+ complete(&priv->completed);
+ }
+
+ /* Disable conversion queue run */
+ regmap_update_bits(priv->regs, MX25_ADCQ_CR, MX25_ADCQ_CR_FQS, 0);
+
+ /* Acknowledge all possible irqs */
+ regmap_write(priv->regs, MX25_ADCQ_SR, MX25_ADCQ_SR_FRR |
+ MX25_ADCQ_SR_FUR | MX25_ADCQ_SR_FOR |
+ MX25_ADCQ_SR_EOQ | MX25_ADCQ_SR_PD);
+
+ return IRQ_HANDLED;
+}
+
+static int mx25_gcq_get_raw_value(struct device *dev,
+ struct iio_chan_spec const *chan,
+ struct mx25_gcq_priv *priv,
+ int *val)
+{
+ long timeout;
+ u32 data;
+
+ /* Setup the configuration we want to use */
+ regmap_write(priv->regs, MX25_ADCQ_ITEM_7_0,
+ MX25_ADCQ_ITEM(0, chan->channel));
+
+ regmap_update_bits(priv->regs, MX25_ADCQ_MR, MX25_ADCQ_MR_EOQ_IRQ, 0);
+
+ /* Trigger queue for one run */
+ regmap_update_bits(priv->regs, MX25_ADCQ_CR, MX25_ADCQ_CR_FQS,
+ MX25_ADCQ_CR_FQS);
+
+ timeout = wait_for_completion_interruptible_timeout(
+ &priv->completed, MX25_GCQ_TIMEOUT);
+ if (timeout < 0) {
+ dev_err(dev, "ADC wait for measurement failed\n");
+ return timeout;
+ } else if (timeout == 0) {
+ dev_err(dev, "ADC timed out\n");
+ return -ETIMEDOUT;
+ }
+
+ regmap_read(priv->regs, MX25_ADCQ_FIFO, &data);
+
+ *val = MX25_ADCQ_FIFO_DATA(data);
+
+ return IIO_VAL_INT;
+}
+
+static int mx25_gcq_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int *val,
+ int *val2, long mask)
+{
+ struct mx25_gcq_priv *priv = iio_priv(indio_dev);
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ mutex_lock(&indio_dev->mlock);
+ ret = mx25_gcq_get_raw_value(&indio_dev->dev, chan, priv, val);
+ mutex_unlock(&indio_dev->mlock);
+ return ret;
+
+ case IIO_CHAN_INFO_SCALE:
+ *val = priv->channel_vref_mv[chan->channel];
+ *val2 = 12;
+ return IIO_VAL_FRACTIONAL_LOG2;
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct iio_info mx25_gcq_iio_info = {
+ .read_raw = mx25_gcq_read_raw,
+};
+
+static const struct regmap_config mx25_gcq_regconfig = {
+ .max_register = 0x5c,
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+};
+
+static int mx25_gcq_setup_cfgs(struct platform_device *pdev,
+ struct mx25_gcq_priv *priv)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct device_node *child;
+ struct device *dev = &pdev->dev;
+ unsigned int refp_used[4] = {};
+ int ret, i;
+
+ /*
+ * Setup all configurations registers with a default conversion
+ * configuration for each input
+ */
+ for (i = 0; i < MX25_NUM_CFGS; ++i)
+ regmap_write(priv->regs, MX25_ADCQ_CFG(i),
+ MX25_ADCQ_CFG_YPLL_OFF |
+ MX25_ADCQ_CFG_XNUR_OFF |
+ MX25_ADCQ_CFG_XPUL_OFF |
+ MX25_ADCQ_CFG_REFP_INT |
+ MX25_ADCQ_CFG_IN(i) |
+ MX25_ADCQ_CFG_REFN_NGND2);
+
+ /*
+ * First get all regulators to store them in channel_vref_mv if
+ * necessary. Later we use that information for proper IIO scale
+ * information.
+ */
+ priv->vref[MX25_ADC_REFP_INT] = NULL;
+ priv->vref[MX25_ADC_REFP_EXT] =
+ devm_regulator_get_optional(&pdev->dev, "vref-ext");
+ priv->vref[MX25_ADC_REFP_XP] =
+ devm_regulator_get_optional(&pdev->dev, "vref-xp");
+ priv->vref[MX25_ADC_REFP_YP] =
+ devm_regulator_get_optional(&pdev->dev, "vref-yp");
+
+ for_each_child_of_node(np, child) {
+ u32 reg;
+ u32 refp = MX25_ADCQ_CFG_REFP_INT;
+ u32 refn = MX25_ADCQ_CFG_REFN_NGND2;
+
+ ret = of_property_read_u32(child, "reg", &reg);
+ if (ret) {
+ dev_err(dev, "Failed to get reg property\n");
+ return ret;
+ }
+
+ if (reg >= MX25_NUM_CFGS) {
+ dev_err(dev,
+ "reg value is greater than the number of available configuration registers\n");
+ return -EINVAL;
+ }
+
+ of_property_read_u32(child, "fsl,adc-refp", &refp);
+ of_property_read_u32(child, "fsl,adc-refn", &refn);
+
+ switch (refp) {
+ case MX25_ADC_REFP_EXT:
+ case MX25_ADC_REFP_XP:
+ case MX25_ADC_REFP_YP:
+ if (IS_ERR(priv->vref[refp])) {
+ dev_err(dev, "Error, trying to use external voltage reference without a vref-%s regulator.",
+ mx25_gcq_refp_names[refp]);
+ return PTR_ERR(priv->vref[refp]);
+ }
+ priv->channel_vref_mv[reg] =
+ regulator_get_voltage(priv->vref[refp]);
+ /* Conversion from uV to mV */
+ priv->channel_vref_mv[reg] /= 1000;
+ break;
+ case MX25_ADC_REFP_INT:
+ priv->channel_vref_mv[reg] = 2500;
+ break;
+ default:
+ dev_err(dev, "Invalid positive reference %d\n", refp);
+ return -EINVAL;
+ }
+
+ ++refp_used[refp];
+
+ /*
+ * Shift the read values to the correct positions within the
+ * register.
+ */
+ refp = MX25_ADCQ_CFG_REFP(refp);
+ refn = MX25_ADCQ_CFG_REFN(refn);
+
+ if ((refp & MX25_ADCQ_CFG_REFP_MASK) != refp) {
+ dev_err(dev, "Invalid fsl,adc-refp property value\n");
+ return -EINVAL;
+ }
+ if ((refn & MX25_ADCQ_CFG_REFN_MASK) != refn) {
+ dev_err(dev, "Invalid fsl,adc-refn property value\n");
+ return -EINVAL;
+ }
+
+ regmap_update_bits(priv->regs, MX25_ADCQ_CFG(reg),
+ MX25_ADCQ_CFG_REFP_MASK |
+ MX25_ADCQ_CFG_REFN_MASK,
+ refp | refn);
+ }
+ regmap_update_bits(priv->regs, MX25_ADCQ_CR,
+ MX25_ADCQ_CR_FRST | MX25_ADCQ_CR_QRST,
+ MX25_ADCQ_CR_FRST | MX25_ADCQ_CR_QRST);
+
+ regmap_write(priv->regs, MX25_ADCQ_CR,
+ MX25_ADCQ_CR_PDMSK | MX25_ADCQ_CR_QSM_FQS);
+
+ /* Remove unused regulators */
+ for (i = 0; i != 4; ++i) {
+ if (!refp_used[i]) {
+ if (!IS_ERR_OR_NULL(priv->vref[i]))
+ devm_regulator_put(priv->vref[i]);
+ priv->vref[i] = NULL;
+ }
+ }
+
+ return 0;
+}
+
+static int mx25_gcq_probe(struct platform_device *pdev)
+{
+ struct iio_dev *indio_dev;
+ struct mx25_gcq_priv *priv;
+ struct mx25_tsadc *tsadc = dev_get_drvdata(pdev->dev.parent);
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ void __iomem *mem;
+ int ret;
+ int i;
+
+ indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*priv));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ priv = iio_priv(indio_dev);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ mem = devm_ioremap_resource(dev, res);
+ if (IS_ERR(mem))
+ return PTR_ERR(mem);
+
+ priv->regs = devm_regmap_init_mmio(dev, mem, &mx25_gcq_regconfig);
+ if (IS_ERR(priv->regs)) {
+ dev_err(dev, "Failed to initialize regmap\n");
+ return PTR_ERR(priv->regs);
+ }
+
+ init_completion(&priv->completed);
+
+ ret = mx25_gcq_setup_cfgs(pdev, priv);
+ if (ret)
+ return ret;
+
+ for (i = 0; i != 4; ++i) {
+ if (!priv->vref[i])
+ continue;
+
+ ret = regulator_enable(priv->vref[i]);
+ if (ret)
+ goto err_regulator_disable;
+ }
+
+ priv->clk = tsadc->clk;
+ ret = clk_prepare_enable(priv->clk);
+ if (ret) {
+ dev_err(dev, "Failed to enable clock\n");
+ goto err_vref_disable;
+ }
+
+ priv->irq = platform_get_irq(pdev, 0);
+ if (priv->irq <= 0) {
+ dev_err(dev, "Failed to get IRQ\n");
+ ret = priv->irq;
+ if (!ret)
+ ret = -ENXIO;
+ goto err_clk_unprepare;
+ }
+
+ ret = request_irq(priv->irq, mx25_gcq_irq, 0, pdev->name, priv);
+ if (ret) {
+ dev_err(dev, "Failed requesting IRQ\n");
+ goto err_clk_unprepare;
+ }
+
+ indio_dev->dev.parent = &pdev->dev;
+ indio_dev->channels = mx25_gcq_channels;
+ indio_dev->num_channels = ARRAY_SIZE(mx25_gcq_channels);
+ indio_dev->info = &mx25_gcq_iio_info;
+ indio_dev->name = driver_name;
+
+ ret = iio_device_register(indio_dev);
+ if (ret) {
+ dev_err(dev, "Failed to register iio device\n");
+ goto err_irq_free;
+ }
+
+ platform_set_drvdata(pdev, indio_dev);
+
+ return 0;
+
+err_irq_free:
+ free_irq(priv->irq, priv);
+err_clk_unprepare:
+ clk_disable_unprepare(priv->clk);
+err_vref_disable:
+ i = 4;
+err_regulator_disable:
+ for (; i-- > 0;) {
+ if (priv->vref[i])
+ regulator_disable(priv->vref[i]);
+ }
+ return ret;
+}
+
+static int mx25_gcq_remove(struct platform_device *pdev)
+{
+ struct iio_dev *indio_dev = platform_get_drvdata(pdev);
+ struct mx25_gcq_priv *priv = iio_priv(indio_dev);
+ int i;
+
+ iio_device_unregister(indio_dev);
+ free_irq(priv->irq, priv);
+ clk_disable_unprepare(priv->clk);
+ for (i = 4; i-- > 0;) {
+ if (priv->vref[i])
+ regulator_disable(priv->vref[i]);
+ }
+
+ return 0;
+}
+
+static const struct of_device_id mx25_gcq_ids[] = {
+ { .compatible = "fsl,imx25-gcq", },
+ { /* Sentinel */ }
+};
+
+static struct platform_driver mx25_gcq_driver = {
+ .driver = {
+ .name = "mx25-gcq",
+ .of_match_table = mx25_gcq_ids,
+ },
+ .probe = mx25_gcq_probe,
+ .remove = mx25_gcq_remove,
+};
+module_platform_driver(mx25_gcq_driver);
+
+MODULE_DESCRIPTION("ADC driver for Freescale mx25");
+MODULE_AUTHOR("Markus Pargmann <mpa@pengutronix.de>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/adc/ina2xx-adc.c b/drivers/iio/adc/ina2xx-adc.c
index d803e5018a42..65909d5858b1 100644
--- a/drivers/iio/adc/ina2xx-adc.c
+++ b/drivers/iio/adc/ina2xx-adc.c
@@ -19,17 +19,18 @@
*
* Configurable 7-bit I2C slave address from 0x40 to 0x4F
*/
-#include <linux/module.h>
-#include <linux/kthread.h>
+
#include <linux/delay.h>
+#include <linux/i2c.h>
#include <linux/iio/kfifo_buf.h>
#include <linux/iio/sysfs.h>
-#include <linux/i2c.h>
+#include <linux/kthread.h>
+#include <linux/module.h>
#include <linux/regmap.h>
-#include <linux/platform_data/ina2xx.h>
-
#include <linux/util_macros.h>
+#include <linux/platform_data/ina2xx.h>
+
/* INA2XX registers definition */
#define INA2XX_CONFIG 0x00
#define INA2XX_SHUNT_VOLTAGE 0x01 /* readonly */
@@ -38,7 +39,7 @@
#define INA2XX_CURRENT 0x04 /* readonly */
#define INA2XX_CALIBRATION 0x05
-#define INA226_ALERT_MASK 0x06
+#define INA226_ALERT_MASK GENMASK(2, 1)
#define INA266_CVRF BIT(3)
#define INA2XX_MAX_REGISTERS 8
@@ -113,7 +114,7 @@ struct ina2xx_chip_info {
struct mutex state_lock;
unsigned int shunt_resistor;
int avg;
- s64 prev_ns; /* track buffer capture time, check for underruns*/
+ s64 prev_ns; /* track buffer capture time, check for underruns */
int int_time_vbus; /* Bus voltage integration time uS */
int int_time_vshunt; /* Shunt voltage integration time uS */
bool allow_async_readout;
@@ -121,21 +122,21 @@ struct ina2xx_chip_info {
static const struct ina2xx_config ina2xx_config[] = {
[ina219] = {
- .config_default = INA219_CONFIG_DEFAULT,
- .calibration_factor = 40960000,
- .shunt_div = 100,
- .bus_voltage_shift = 3,
- .bus_voltage_lsb = 4000,
- .power_lsb = 20000,
- },
+ .config_default = INA219_CONFIG_DEFAULT,
+ .calibration_factor = 40960000,
+ .shunt_div = 100,
+ .bus_voltage_shift = 3,
+ .bus_voltage_lsb = 4000,
+ .power_lsb = 20000,
+ },
[ina226] = {
- .config_default = INA226_CONFIG_DEFAULT,
- .calibration_factor = 5120000,
- .shunt_div = 400,
- .bus_voltage_shift = 0,
- .bus_voltage_lsb = 1250,
- .power_lsb = 25000,
- },
+ .config_default = INA226_CONFIG_DEFAULT,
+ .calibration_factor = 5120000,
+ .shunt_div = 400,
+ .bus_voltage_shift = 0,
+ .bus_voltage_lsb = 1250,
+ .power_lsb = 25000,
+ },
};
static int ina2xx_read_raw(struct iio_dev *indio_dev,
@@ -149,7 +150,7 @@ static int ina2xx_read_raw(struct iio_dev *indio_dev,
switch (mask) {
case IIO_CHAN_INFO_RAW:
ret = regmap_read(chip->regmap, chan->address, &regval);
- if (ret < 0)
+ if (ret)
return ret;
if (is_signed_reg(chan->address))
@@ -251,7 +252,7 @@ static int ina226_set_int_time_vbus(struct ina2xx_chip_info *chip,
return -EINVAL;
bits = find_closest(val_us, ina226_conv_time_tab,
- ARRAY_SIZE(ina226_conv_time_tab));
+ ARRAY_SIZE(ina226_conv_time_tab));
chip->int_time_vbus = ina226_conv_time_tab[bits];
@@ -270,7 +271,7 @@ static int ina226_set_int_time_vshunt(struct ina2xx_chip_info *chip,
return -EINVAL;
bits = find_closest(val_us, ina226_conv_time_tab,
- ARRAY_SIZE(ina226_conv_time_tab));
+ ARRAY_SIZE(ina226_conv_time_tab));
chip->int_time_vshunt = ina226_conv_time_tab[bits];
@@ -285,8 +286,8 @@ static int ina2xx_write_raw(struct iio_dev *indio_dev,
int val, int val2, long mask)
{
struct ina2xx_chip_info *chip = iio_priv(indio_dev);
- int ret;
unsigned int config, tmp;
+ int ret;
if (iio_buffer_enabled(indio_dev))
return -EBUSY;
@@ -294,8 +295,8 @@ static int ina2xx_write_raw(struct iio_dev *indio_dev,
mutex_lock(&chip->state_lock);
ret = regmap_read(chip->regmap, INA2XX_CONFIG, &config);
- if (ret < 0)
- goto _err;
+ if (ret)
+ goto err;
tmp = config;
@@ -310,19 +311,19 @@ static int ina2xx_write_raw(struct iio_dev *indio_dev,
else
ret = ina226_set_int_time_vbus(chip, val2, &tmp);
break;
+
default:
ret = -EINVAL;
}
if (!ret && (tmp != config))
ret = regmap_write(chip->regmap, INA2XX_CONFIG, tmp);
-_err:
+err:
mutex_unlock(&chip->state_lock);
return ret;
}
-
static ssize_t ina2xx_allow_async_readout_show(struct device *dev,
struct device_attribute *attr,
char *buf)
@@ -355,6 +356,7 @@ static int set_shunt_resistor(struct ina2xx_chip_info *chip, unsigned int val)
return -EINVAL;
chip->shunt_resistor = val;
+
return 0;
}
@@ -438,7 +440,6 @@ static int ina2xx_work_buffer(struct iio_dev *indio_dev)
struct ina2xx_chip_info *chip = iio_priv(indio_dev);
unsigned short data[8];
int bit, ret, i = 0;
- unsigned long buffer_us, elapsed_us;
s64 time_a, time_b;
unsigned int alert;
@@ -462,8 +463,6 @@ static int ina2xx_work_buffer(struct iio_dev *indio_dev)
return ret;
alert &= INA266_CVRF;
- trace_printk("Conversion ready: %d\n", !!alert);
-
} while (!alert);
/*
@@ -488,19 +487,14 @@ static int ina2xx_work_buffer(struct iio_dev *indio_dev)
iio_push_to_buffers_with_timestamp(indio_dev,
(unsigned int *)data, time_a);
- buffer_us = (unsigned long)(time_b - time_a) / 1000;
- elapsed_us = (unsigned long)(time_a - chip->prev_ns) / 1000;
-
- trace_printk("uS: elapsed: %lu, buf: %lu\n", elapsed_us, buffer_us);
-
chip->prev_ns = time_a;
- return buffer_us;
+ return (unsigned long)(time_b - time_a) / 1000;
};
static int ina2xx_capture_thread(void *data)
{
- struct iio_dev *indio_dev = (struct iio_dev *)data;
+ struct iio_dev *indio_dev = data;
struct ina2xx_chip_info *chip = iio_priv(indio_dev);
unsigned int sampling_us = SAMPLING_PERIOD(chip);
int buffer_us;
@@ -530,12 +524,13 @@ static int ina2xx_buffer_enable(struct iio_dev *indio_dev)
struct ina2xx_chip_info *chip = iio_priv(indio_dev);
unsigned int sampling_us = SAMPLING_PERIOD(chip);
- trace_printk("Enabling buffer w/ scan_mask %02x, freq = %d, avg =%u\n",
- (unsigned int)(*indio_dev->active_scan_mask),
- 1000000/sampling_us, chip->avg);
+ dev_dbg(&indio_dev->dev, "Enabling buffer w/ scan_mask %02x, freq = %d, avg =%u\n",
+ (unsigned int)(*indio_dev->active_scan_mask),
+ 1000000 / sampling_us, chip->avg);
- trace_printk("Expected work period: %u us\n", sampling_us);
- trace_printk("Async readout mode: %d\n", chip->allow_async_readout);
+ dev_dbg(&indio_dev->dev, "Expected work period: %u us\n", sampling_us);
+ dev_dbg(&indio_dev->dev, "Async readout mode: %d\n",
+ chip->allow_async_readout);
chip->prev_ns = iio_get_time_ns();
@@ -575,8 +570,7 @@ static int ina2xx_debug_reg(struct iio_dev *indio_dev,
}
/* Possible integration times for vshunt and vbus */
-static IIO_CONST_ATTR_INT_TIME_AVAIL \
- ("0.000140 0.000204 0.000332 0.000588 0.001100 0.002116 0.004156 0.008244");
+static IIO_CONST_ATTR_INT_TIME_AVAIL("0.000140 0.000204 0.000332 0.000588 0.001100 0.002116 0.004156 0.008244");
static IIO_DEVICE_ATTR(in_allow_async_readout, S_IRUGO | S_IWUSR,
ina2xx_allow_async_readout_show,
@@ -598,21 +592,23 @@ static const struct attribute_group ina2xx_attribute_group = {
};
static const struct iio_info ina2xx_info = {
- .debugfs_reg_access = &ina2xx_debug_reg,
- .read_raw = &ina2xx_read_raw,
- .write_raw = &ina2xx_write_raw,
- .attrs = &ina2xx_attribute_group,
.driver_module = THIS_MODULE,
+ .attrs = &ina2xx_attribute_group,
+ .read_raw = ina2xx_read_raw,
+ .write_raw = ina2xx_write_raw,
+ .debugfs_reg_access = ina2xx_debug_reg,
};
/* Initialize the configuration and calibration registers. */
static int ina2xx_init(struct ina2xx_chip_info *chip, unsigned int config)
{
u16 regval;
- int ret = regmap_write(chip->regmap, INA2XX_CONFIG, config);
+ int ret;
- if (ret < 0)
+ ret = regmap_write(chip->regmap, INA2XX_CONFIG, config);
+ if (ret)
return ret;
+
/*
* Set current LSB to 1mA, shunt is in uOhms
* (equation 13 in datasheet). We hardcode a Current_LSB
@@ -621,7 +617,7 @@ static int ina2xx_init(struct ina2xx_chip_info *chip, unsigned int config)
* to the user for now.
*/
regval = DIV_ROUND_CLOSEST(chip->config->calibration_factor,
- chip->shunt_resistor);
+ chip->shunt_resistor);
return regmap_write(chip->regmap, INA2XX_CALIBRATION, regval);
}
@@ -632,8 +628,8 @@ static int ina2xx_probe(struct i2c_client *client,
struct ina2xx_chip_info *chip;
struct iio_dev *indio_dev;
struct iio_buffer *buffer;
- int ret;
unsigned int val;
+ int ret;
indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*chip));
if (!indio_dev)
@@ -641,8 +637,19 @@ static int ina2xx_probe(struct i2c_client *client,
chip = iio_priv(indio_dev);
+ /* This is only used for device removal purposes. */
+ i2c_set_clientdata(client, indio_dev);
+
+ chip->regmap = devm_regmap_init_i2c(client, &ina2xx_regmap_config);
+ if (IS_ERR(chip->regmap)) {
+ dev_err(&client->dev, "failed to allocate register map\n");
+ return PTR_ERR(chip->regmap);
+ }
+
chip->config = &ina2xx_config[id->driver_data];
+ mutex_init(&chip->state_lock);
+
if (of_property_read_u32(client->dev.of_node,
"shunt-resistor", &val) < 0) {
struct ina2xx_platform_data *pdata =
@@ -658,25 +665,6 @@ static int ina2xx_probe(struct i2c_client *client,
if (ret)
return ret;
- mutex_init(&chip->state_lock);
-
- /* This is only used for device removal purposes. */
- i2c_set_clientdata(client, indio_dev);
-
- indio_dev->name = id->name;
- indio_dev->channels = ina2xx_channels;
- indio_dev->num_channels = ARRAY_SIZE(ina2xx_channels);
-
- indio_dev->dev.parent = &client->dev;
- indio_dev->info = &ina2xx_info;
- indio_dev->modes = INDIO_DIRECT_MODE | INDIO_BUFFER_SOFTWARE;
-
- chip->regmap = devm_regmap_init_i2c(client, &ina2xx_regmap_config);
- if (IS_ERR(chip->regmap)) {
- dev_err(&client->dev, "failed to allocate register map\n");
- return PTR_ERR(chip->regmap);
- }
-
/* Patch the current config register with default. */
val = chip->config->config_default;
@@ -687,24 +675,28 @@ static int ina2xx_probe(struct i2c_client *client,
}
ret = ina2xx_init(chip, val);
- if (ret < 0) {
- dev_err(&client->dev, "error configuring the device: %d\n",
- ret);
- return -ENODEV;
+ if (ret) {
+ dev_err(&client->dev, "error configuring the device\n");
+ return ret;
}
+ indio_dev->modes = INDIO_DIRECT_MODE | INDIO_BUFFER_SOFTWARE;
+ indio_dev->dev.parent = &client->dev;
+ indio_dev->channels = ina2xx_channels;
+ indio_dev->num_channels = ARRAY_SIZE(ina2xx_channels);
+ indio_dev->name = id->name;
+ indio_dev->info = &ina2xx_info;
+ indio_dev->setup_ops = &ina2xx_setup_ops;
+
buffer = devm_iio_kfifo_allocate(&indio_dev->dev);
if (!buffer)
return -ENOMEM;
- indio_dev->setup_ops = &ina2xx_setup_ops;
-
iio_device_attach_buffer(indio_dev, buffer);
return iio_device_register(indio_dev);
}
-
static int ina2xx_remove(struct i2c_client *client)
{
struct iio_dev *indio_dev = i2c_get_clientdata(client);
@@ -717,7 +709,6 @@ static int ina2xx_remove(struct i2c_client *client)
INA2XX_MODE_MASK, 0);
}
-
static const struct i2c_device_id ina2xx_id[] = {
{"ina219", ina219},
{"ina220", ina219},
@@ -726,7 +717,6 @@ static const struct i2c_device_id ina2xx_id[] = {
{"ina231", ina226},
{}
};
-
MODULE_DEVICE_TABLE(i2c, ina2xx_id);
static struct i2c_driver ina2xx_driver = {
@@ -737,7 +727,6 @@ static struct i2c_driver ina2xx_driver = {
.remove = ina2xx_remove,
.id_table = ina2xx_id,
};
-
module_i2c_driver(ina2xx_driver);
MODULE_AUTHOR("Marc Titinger <marc.titinger@baylibre.com>");
diff --git a/drivers/iio/adc/max1363.c b/drivers/iio/adc/max1363.c
index 929508e5266c..998dc3caad4c 100644
--- a/drivers/iio/adc/max1363.c
+++ b/drivers/iio/adc/max1363.c
@@ -1386,7 +1386,7 @@ static const struct max1363_chip_info max1363_chip_info_tbl[] = {
},
[max11644] = {
.bits = 12,
- .int_vref_mv = 2048,
+ .int_vref_mv = 4096,
.mode_list = max11644_mode_list,
.num_modes = ARRAY_SIZE(max11644_mode_list),
.default_mode = s0to1,
@@ -1396,7 +1396,7 @@ static const struct max1363_chip_info max1363_chip_info_tbl[] = {
},
[max11645] = {
.bits = 12,
- .int_vref_mv = 4096,
+ .int_vref_mv = 2048,
.mode_list = max11644_mode_list,
.num_modes = ARRAY_SIZE(max11644_mode_list),
.default_mode = s0to1,
@@ -1406,7 +1406,7 @@ static const struct max1363_chip_info max1363_chip_info_tbl[] = {
},
[max11646] = {
.bits = 10,
- .int_vref_mv = 2048,
+ .int_vref_mv = 4096,
.mode_list = max11644_mode_list,
.num_modes = ARRAY_SIZE(max11644_mode_list),
.default_mode = s0to1,
@@ -1416,7 +1416,7 @@ static const struct max1363_chip_info max1363_chip_info_tbl[] = {
},
[max11647] = {
.bits = 10,
- .int_vref_mv = 4096,
+ .int_vref_mv = 2048,
.mode_list = max11644_mode_list,
.num_modes = ARRAY_SIZE(max11644_mode_list),
.default_mode = s0to1,
@@ -1680,6 +1680,10 @@ static const struct i2c_device_id max1363_id[] = {
{ "max11615", max11615 },
{ "max11616", max11616 },
{ "max11617", max11617 },
+ { "max11644", max11644 },
+ { "max11645", max11645 },
+ { "max11646", max11646 },
+ { "max11647", max11647 },
{}
};
diff --git a/drivers/iio/adc/mcp320x.c b/drivers/iio/adc/mcp320x.c
index d1c05f6eed18..a850ca7d1eda 100644
--- a/drivers/iio/adc/mcp320x.c
+++ b/drivers/iio/adc/mcp320x.c
@@ -187,26 +187,27 @@ out:
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) \
}
-#define MCP320X_VOLTAGE_CHANNEL_DIFF(num) \
+#define MCP320X_VOLTAGE_CHANNEL_DIFF(chan1, chan2) \
{ \
.type = IIO_VOLTAGE, \
.indexed = 1, \
- .channel = (num * 2), \
- .channel2 = (num * 2 + 1), \
- .address = (num * 2), \
+ .channel = (chan1), \
+ .channel2 = (chan2), \
+ .address = (chan1), \
.differential = 1, \
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) \
}
static const struct iio_chan_spec mcp3201_channels[] = {
- MCP320X_VOLTAGE_CHANNEL_DIFF(0),
+ MCP320X_VOLTAGE_CHANNEL_DIFF(0, 1),
};
static const struct iio_chan_spec mcp3202_channels[] = {
MCP320X_VOLTAGE_CHANNEL(0),
MCP320X_VOLTAGE_CHANNEL(1),
- MCP320X_VOLTAGE_CHANNEL_DIFF(0),
+ MCP320X_VOLTAGE_CHANNEL_DIFF(0, 1),
+ MCP320X_VOLTAGE_CHANNEL_DIFF(1, 0),
};
static const struct iio_chan_spec mcp3204_channels[] = {
@@ -214,8 +215,10 @@ static const struct iio_chan_spec mcp3204_channels[] = {
MCP320X_VOLTAGE_CHANNEL(1),
MCP320X_VOLTAGE_CHANNEL(2),
MCP320X_VOLTAGE_CHANNEL(3),
- MCP320X_VOLTAGE_CHANNEL_DIFF(0),
- MCP320X_VOLTAGE_CHANNEL_DIFF(1),
+ MCP320X_VOLTAGE_CHANNEL_DIFF(0, 1),
+ MCP320X_VOLTAGE_CHANNEL_DIFF(1, 0),
+ MCP320X_VOLTAGE_CHANNEL_DIFF(2, 3),
+ MCP320X_VOLTAGE_CHANNEL_DIFF(3, 2),
};
static const struct iio_chan_spec mcp3208_channels[] = {
@@ -227,10 +230,14 @@ static const struct iio_chan_spec mcp3208_channels[] = {
MCP320X_VOLTAGE_CHANNEL(5),
MCP320X_VOLTAGE_CHANNEL(6),
MCP320X_VOLTAGE_CHANNEL(7),
- MCP320X_VOLTAGE_CHANNEL_DIFF(0),
- MCP320X_VOLTAGE_CHANNEL_DIFF(1),
- MCP320X_VOLTAGE_CHANNEL_DIFF(2),
- MCP320X_VOLTAGE_CHANNEL_DIFF(3),
+ MCP320X_VOLTAGE_CHANNEL_DIFF(0, 1),
+ MCP320X_VOLTAGE_CHANNEL_DIFF(1, 0),
+ MCP320X_VOLTAGE_CHANNEL_DIFF(2, 3),
+ MCP320X_VOLTAGE_CHANNEL_DIFF(3, 2),
+ MCP320X_VOLTAGE_CHANNEL_DIFF(4, 5),
+ MCP320X_VOLTAGE_CHANNEL_DIFF(5, 4),
+ MCP320X_VOLTAGE_CHANNEL_DIFF(6, 7),
+ MCP320X_VOLTAGE_CHANNEL_DIFF(7, 6),
};
static const struct iio_info mcp320x_info = {
diff --git a/drivers/iio/adc/mcp3422.c b/drivers/iio/adc/mcp3422.c
index 6eca7aea8a37..d7b36efd2f3c 100644
--- a/drivers/iio/adc/mcp3422.c
+++ b/drivers/iio/adc/mcp3422.c
@@ -1,11 +1,12 @@
/*
- * mcp3422.c - driver for the Microchip mcp3422/3/4/6/7/8 chip family
+ * mcp3422.c - driver for the Microchip mcp3421/2/3/4/5/6/7/8 chip family
*
* Copyright (C) 2013, Angelo Compagnucci
* Author: Angelo Compagnucci <angelo.compagnucci@gmail.com>
*
* Datasheet: http://ww1.microchip.com/downloads/en/devicedoc/22088b.pdf
* http://ww1.microchip.com/downloads/en/DeviceDoc/22226a.pdf
+ * http://ww1.microchip.com/downloads/en/DeviceDoc/22072b.pdf
*
* This driver exports the value of analog input voltage to sysfs, the
* voltage unit is nV.
@@ -338,7 +339,7 @@ static int mcp3422_probe(struct i2c_client *client,
u8 config;
if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C))
- return -ENODEV;
+ return -EOPNOTSUPP;
indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*adc));
if (!indio_dev)
@@ -357,6 +358,7 @@ static int mcp3422_probe(struct i2c_client *client,
switch (adc->id) {
case 1:
+ case 5:
indio_dev->channels = mcp3421_channels;
indio_dev->num_channels = ARRAY_SIZE(mcp3421_channels);
break;
@@ -395,6 +397,7 @@ static const struct i2c_device_id mcp3422_id[] = {
{ "mcp3422", 2 },
{ "mcp3423", 3 },
{ "mcp3424", 4 },
+ { "mcp3425", 5 },
{ "mcp3426", 6 },
{ "mcp3427", 7 },
{ "mcp3428", 8 },
@@ -421,5 +424,5 @@ static struct i2c_driver mcp3422_driver = {
module_i2c_driver(mcp3422_driver);
MODULE_AUTHOR("Angelo Compagnucci <angelo.compagnucci@gmail.com>");
-MODULE_DESCRIPTION("Microchip mcp3422/3/4/6/7/8 driver");
+MODULE_DESCRIPTION("Microchip mcp3421/2/3/4/5/6/7/8 driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/iio/adc/mxs-lradc.c b/drivers/iio/adc/mxs-lradc.c
index bb1f15224ac8..33051b87aac2 100644
--- a/drivers/staging/iio/adc/mxs-lradc.c
+++ b/drivers/iio/adc/mxs-lradc.c
@@ -443,7 +443,8 @@ static void mxs_lradc_setup_ts_channel(struct mxs_lradc *lradc, unsigned ch)
LRADC_CH_NUM_SAMPLES(lradc->over_sample_cnt - 1),
LRADC_CH(ch));
- /* from the datasheet:
+ /*
+ * from the datasheet:
* "Software must clear this register in preparation for a
* multi-cycle accumulation.
*/
@@ -504,7 +505,8 @@ static void mxs_lradc_setup_ts_pressure(struct mxs_lradc *lradc, unsigned ch1,
mxs_lradc_reg_wrt(lradc, reg, LRADC_CH(ch1));
mxs_lradc_reg_wrt(lradc, reg, LRADC_CH(ch2));
- /* from the datasheet:
+ /*
+ * from the datasheet:
* "Software must clear this register in preparation for a
* multi-cycle accumulation.
*/
@@ -914,7 +916,8 @@ static int mxs_lradc_read_raw(struct iio_dev *iio_dev,
case IIO_CHAN_INFO_SCALE:
if (chan->type == IIO_TEMP) {
- /* From the datasheet, we have to multiply by 1.012 and
+ /*
+ * From the datasheet, we have to multiply by 1.012 and
* divide by 4
*/
*val = 0;
@@ -929,7 +932,8 @@ static int mxs_lradc_read_raw(struct iio_dev *iio_dev,
case IIO_CHAN_INFO_OFFSET:
if (chan->type == IIO_TEMP) {
- /* The calculated value from the ADC is in Kelvin, we
+ /*
+ * The calculated value from the ADC is in Kelvin, we
* want Celsius for hwmon so the offset is -273.15
* The offset is applied before scaling so it is
* actually -213.15 * 4 / 1.012 = -1079.644268
@@ -1750,6 +1754,7 @@ static int mxs_lradc_remove(struct platform_device *pdev)
iio_triggered_buffer_cleanup(iio);
clk_disable_unprepare(lradc->clk);
+
return 0;
}
diff --git a/drivers/iio/adc/palmas_gpadc.c b/drivers/iio/adc/palmas_gpadc.c
index f42eb8a7d21f..2bbf0c521beb 100644
--- a/drivers/iio/adc/palmas_gpadc.c
+++ b/drivers/iio/adc/palmas_gpadc.c
@@ -534,7 +534,7 @@ static int palmas_gpadc_probe(struct platform_device *pdev)
}
ret = request_threaded_irq(adc->irq, NULL,
palmas_gpadc_irq,
- IRQF_ONESHOT | IRQF_EARLY_RESUME, dev_name(adc->dev),
+ IRQF_ONESHOT, dev_name(adc->dev),
adc);
if (ret < 0) {
dev_err(adc->dev,
@@ -549,7 +549,7 @@ static int palmas_gpadc_probe(struct platform_device *pdev)
adc->irq_auto_0 = platform_get_irq(pdev, 1);
ret = request_threaded_irq(adc->irq_auto_0, NULL,
palmas_gpadc_irq_auto,
- IRQF_ONESHOT | IRQF_EARLY_RESUME,
+ IRQF_ONESHOT,
"palmas-adc-auto-0", adc);
if (ret < 0) {
dev_err(adc->dev, "request auto0 irq %d failed: %d\n",
@@ -565,7 +565,7 @@ static int palmas_gpadc_probe(struct platform_device *pdev)
adc->irq_auto_1 = platform_get_irq(pdev, 2);
ret = request_threaded_irq(adc->irq_auto_1, NULL,
palmas_gpadc_irq_auto,
- IRQF_ONESHOT | IRQF_EARLY_RESUME,
+ IRQF_ONESHOT,
"palmas-adc-auto-1", adc);
if (ret < 0) {
dev_err(adc->dev, "request auto1 irq %d failed: %d\n",
diff --git a/drivers/iio/adc/ti-adc081c.c b/drivers/iio/adc/ti-adc081c.c
index 2c8374f86252..ecbc12138d58 100644
--- a/drivers/iio/adc/ti-adc081c.c
+++ b/drivers/iio/adc/ti-adc081c.c
@@ -73,7 +73,7 @@ static int adc081c_probe(struct i2c_client *client,
int err;
if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_WORD_DATA))
- return -ENODEV;
+ return -EOPNOTSUPP;
iio = devm_iio_device_alloc(&client->dev, sizeof(*adc));
if (!iio)
diff --git a/drivers/iio/adc/ti-adc0832.c b/drivers/iio/adc/ti-adc0832.c
new file mode 100644
index 000000000000..0afeac0c9bad
--- /dev/null
+++ b/drivers/iio/adc/ti-adc0832.c
@@ -0,0 +1,288 @@
+/*
+ * ADC0831/ADC0832/ADC0834/ADC0838 8-bit ADC driver
+ *
+ * Copyright (c) 2016 Akinobu Mita <akinobu.mita@gmail.com>
+ *
+ * This file is subject to the terms and conditions of version 2 of
+ * the GNU General Public License. See the file COPYING in the main
+ * directory of this archive for more details.
+ *
+ * Datasheet: http://www.ti.com/lit/ds/symlink/adc0832-n.pdf
+ */
+
+#include <linux/module.h>
+#include <linux/spi/spi.h>
+#include <linux/iio/iio.h>
+#include <linux/regulator/consumer.h>
+
+enum {
+ adc0831,
+ adc0832,
+ adc0834,
+ adc0838,
+};
+
+struct adc0832 {
+ struct spi_device *spi;
+ struct regulator *reg;
+ struct mutex lock;
+ u8 mux_bits;
+
+ u8 tx_buf[2] ____cacheline_aligned;
+ u8 rx_buf[2];
+};
+
+#define ADC0832_VOLTAGE_CHANNEL(chan) \
+ { \
+ .type = IIO_VOLTAGE, \
+ .indexed = 1, \
+ .channel = chan, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) \
+ }
+
+#define ADC0832_VOLTAGE_CHANNEL_DIFF(chan1, chan2) \
+ { \
+ .type = IIO_VOLTAGE, \
+ .indexed = 1, \
+ .channel = (chan1), \
+ .channel2 = (chan2), \
+ .differential = 1, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) \
+ }
+
+static const struct iio_chan_spec adc0831_channels[] = {
+ ADC0832_VOLTAGE_CHANNEL_DIFF(0, 1),
+};
+
+static const struct iio_chan_spec adc0832_channels[] = {
+ ADC0832_VOLTAGE_CHANNEL(0),
+ ADC0832_VOLTAGE_CHANNEL(1),
+ ADC0832_VOLTAGE_CHANNEL_DIFF(0, 1),
+ ADC0832_VOLTAGE_CHANNEL_DIFF(1, 0),
+};
+
+static const struct iio_chan_spec adc0834_channels[] = {
+ ADC0832_VOLTAGE_CHANNEL(0),
+ ADC0832_VOLTAGE_CHANNEL(1),
+ ADC0832_VOLTAGE_CHANNEL(2),
+ ADC0832_VOLTAGE_CHANNEL(3),
+ ADC0832_VOLTAGE_CHANNEL_DIFF(0, 1),
+ ADC0832_VOLTAGE_CHANNEL_DIFF(1, 0),
+ ADC0832_VOLTAGE_CHANNEL_DIFF(2, 3),
+ ADC0832_VOLTAGE_CHANNEL_DIFF(3, 2),
+};
+
+static const struct iio_chan_spec adc0838_channels[] = {
+ ADC0832_VOLTAGE_CHANNEL(0),
+ ADC0832_VOLTAGE_CHANNEL(1),
+ ADC0832_VOLTAGE_CHANNEL(2),
+ ADC0832_VOLTAGE_CHANNEL(3),
+ ADC0832_VOLTAGE_CHANNEL(4),
+ ADC0832_VOLTAGE_CHANNEL(5),
+ ADC0832_VOLTAGE_CHANNEL(6),
+ ADC0832_VOLTAGE_CHANNEL(7),
+ ADC0832_VOLTAGE_CHANNEL_DIFF(0, 1),
+ ADC0832_VOLTAGE_CHANNEL_DIFF(1, 0),
+ ADC0832_VOLTAGE_CHANNEL_DIFF(2, 3),
+ ADC0832_VOLTAGE_CHANNEL_DIFF(3, 2),
+ ADC0832_VOLTAGE_CHANNEL_DIFF(4, 5),
+ ADC0832_VOLTAGE_CHANNEL_DIFF(5, 4),
+ ADC0832_VOLTAGE_CHANNEL_DIFF(6, 7),
+ ADC0832_VOLTAGE_CHANNEL_DIFF(7, 6),
+};
+
+static int adc0831_adc_conversion(struct adc0832 *adc)
+{
+ struct spi_device *spi = adc->spi;
+ int ret;
+
+ ret = spi_read(spi, &adc->rx_buf, 2);
+ if (ret)
+ return ret;
+
+ /*
+ * Skip TRI-STATE and a leading zero
+ */
+ return (adc->rx_buf[0] << 2 & 0xff) | (adc->rx_buf[1] >> 6);
+}
+
+static int adc0832_adc_conversion(struct adc0832 *adc, int channel,
+ bool differential)
+{
+ struct spi_device *spi = adc->spi;
+ struct spi_transfer xfer = {
+ .tx_buf = adc->tx_buf,
+ .rx_buf = adc->rx_buf,
+ .len = 2,
+ };
+ int ret;
+
+ if (!adc->mux_bits)
+ return adc0831_adc_conversion(adc);
+
+ /* start bit */
+ adc->tx_buf[0] = 1 << (adc->mux_bits + 1);
+ /* single-ended or differential */
+ adc->tx_buf[0] |= differential ? 0 : (1 << adc->mux_bits);
+ /* odd / sign */
+ adc->tx_buf[0] |= (channel % 2) << (adc->mux_bits - 1);
+ /* select */
+ if (adc->mux_bits > 1)
+ adc->tx_buf[0] |= channel / 2;
+
+ /* align Data output BIT7 (MSB) to 8-bit boundary */
+ adc->tx_buf[0] <<= 1;
+
+ ret = spi_sync_transfer(spi, &xfer, 1);
+ if (ret)
+ return ret;
+
+ return adc->rx_buf[1];
+}
+
+static int adc0832_read_raw(struct iio_dev *iio,
+ struct iio_chan_spec const *channel, int *value,
+ int *shift, long mask)
+{
+ struct adc0832 *adc = iio_priv(iio);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ mutex_lock(&adc->lock);
+ *value = adc0832_adc_conversion(adc, channel->channel,
+ channel->differential);
+ mutex_unlock(&adc->lock);
+ if (*value < 0)
+ return *value;
+
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ *value = regulator_get_voltage(adc->reg);
+ if (*value < 0)
+ return *value;
+
+ /* convert regulator output voltage to mV */
+ *value /= 1000;
+ *shift = 8;
+
+ return IIO_VAL_FRACTIONAL_LOG2;
+ }
+
+ return -EINVAL;
+}
+
+static const struct iio_info adc0832_info = {
+ .read_raw = adc0832_read_raw,
+ .driver_module = THIS_MODULE,
+};
+
+static int adc0832_probe(struct spi_device *spi)
+{
+ struct iio_dev *indio_dev;
+ struct adc0832 *adc;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*adc));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ adc = iio_priv(indio_dev);
+ adc->spi = spi;
+ mutex_init(&adc->lock);
+
+ indio_dev->name = spi_get_device_id(spi)->name;
+ indio_dev->dev.parent = &spi->dev;
+ indio_dev->info = &adc0832_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+
+ switch (spi_get_device_id(spi)->driver_data) {
+ case adc0831:
+ adc->mux_bits = 0;
+ indio_dev->channels = adc0831_channels;
+ indio_dev->num_channels = ARRAY_SIZE(adc0831_channels);
+ break;
+ case adc0832:
+ adc->mux_bits = 1;
+ indio_dev->channels = adc0832_channels;
+ indio_dev->num_channels = ARRAY_SIZE(adc0832_channels);
+ break;
+ case adc0834:
+ adc->mux_bits = 2;
+ indio_dev->channels = adc0834_channels;
+ indio_dev->num_channels = ARRAY_SIZE(adc0834_channels);
+ break;
+ case adc0838:
+ adc->mux_bits = 3;
+ indio_dev->channels = adc0838_channels;
+ indio_dev->num_channels = ARRAY_SIZE(adc0838_channels);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ adc->reg = devm_regulator_get(&spi->dev, "vref");
+ if (IS_ERR(adc->reg))
+ return PTR_ERR(adc->reg);
+
+ ret = regulator_enable(adc->reg);
+ if (ret)
+ return ret;
+
+ spi_set_drvdata(spi, indio_dev);
+
+ ret = iio_device_register(indio_dev);
+ if (ret)
+ regulator_disable(adc->reg);
+
+ return ret;
+}
+
+static int adc0832_remove(struct spi_device *spi)
+{
+ struct iio_dev *indio_dev = spi_get_drvdata(spi);
+ struct adc0832 *adc = iio_priv(indio_dev);
+
+ iio_device_unregister(indio_dev);
+ regulator_disable(adc->reg);
+
+ return 0;
+}
+
+#ifdef CONFIG_OF
+
+static const struct of_device_id adc0832_dt_ids[] = {
+ { .compatible = "ti,adc0831", },
+ { .compatible = "ti,adc0832", },
+ { .compatible = "ti,adc0834", },
+ { .compatible = "ti,adc0838", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, adc0832_dt_ids);
+
+#endif
+
+static const struct spi_device_id adc0832_id[] = {
+ { "adc0831", adc0831 },
+ { "adc0832", adc0832 },
+ { "adc0834", adc0834 },
+ { "adc0838", adc0838 },
+ {}
+};
+MODULE_DEVICE_TABLE(spi, adc0832_id);
+
+static struct spi_driver adc0832_driver = {
+ .driver = {
+ .name = "adc0832",
+ .of_match_table = of_match_ptr(adc0832_dt_ids),
+ },
+ .probe = adc0832_probe,
+ .remove = adc0832_remove,
+ .id_table = adc0832_id,
+};
+module_spi_driver(adc0832_driver);
+
+MODULE_AUTHOR("Akinobu Mita <akinobu.mita@gmail.com>");
+MODULE_DESCRIPTION("ADC0831/ADC0832/ADC0834/ADC0838 driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/adc/ti-ads1015.c b/drivers/iio/adc/ti-ads1015.c
new file mode 100644
index 000000000000..73cbf0b54e54
--- /dev/null
+++ b/drivers/iio/adc/ti-ads1015.c
@@ -0,0 +1,612 @@
+/*
+ * ADS1015 - Texas Instruments Analog-to-Digital Converter
+ *
+ * Copyright (c) 2016, Intel Corporation.
+ *
+ * This file is subject to the terms and conditions of version 2 of
+ * the GNU General Public License. See the file COPYING in the main
+ * directory of this archive for more details.
+ *
+ * IIO driver for ADS1015 ADC 7-bit I2C slave address:
+ * * 0x48 - ADDR connected to Ground
+ * * 0x49 - ADDR connected to Vdd
+ * * 0x4A - ADDR connected to SDA
+ * * 0x4B - ADDR connected to SCL
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/i2c.h>
+#include <linux/regmap.h>
+#include <linux/pm_runtime.h>
+#include <linux/mutex.h>
+#include <linux/delay.h>
+
+#include <linux/i2c/ads1015.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/types.h>
+#include <linux/iio/sysfs.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/triggered_buffer.h>
+#include <linux/iio/trigger_consumer.h>
+
+#define ADS1015_DRV_NAME "ads1015"
+
+#define ADS1015_CONV_REG 0x00
+#define ADS1015_CFG_REG 0x01
+
+#define ADS1015_CFG_DR_SHIFT 5
+#define ADS1015_CFG_MOD_SHIFT 8
+#define ADS1015_CFG_PGA_SHIFT 9
+#define ADS1015_CFG_MUX_SHIFT 12
+
+#define ADS1015_CFG_DR_MASK GENMASK(7, 5)
+#define ADS1015_CFG_MOD_MASK BIT(8)
+#define ADS1015_CFG_PGA_MASK GENMASK(11, 9)
+#define ADS1015_CFG_MUX_MASK GENMASK(14, 12)
+
+/* device operating modes */
+#define ADS1015_CONTINUOUS 0
+#define ADS1015_SINGLESHOT 1
+
+#define ADS1015_SLEEP_DELAY_MS 2000
+#define ADS1015_DEFAULT_PGA 2
+#define ADS1015_DEFAULT_DATA_RATE 4
+#define ADS1015_DEFAULT_CHAN 0
+
+enum ads1015_channels {
+ ADS1015_AIN0_AIN1 = 0,
+ ADS1015_AIN0_AIN3,
+ ADS1015_AIN1_AIN3,
+ ADS1015_AIN2_AIN3,
+ ADS1015_AIN0,
+ ADS1015_AIN1,
+ ADS1015_AIN2,
+ ADS1015_AIN3,
+ ADS1015_TIMESTAMP,
+};
+
+static const unsigned int ads1015_data_rate[] = {
+ 128, 250, 490, 920, 1600, 2400, 3300, 3300
+};
+
+static const struct {
+ int scale;
+ int uscale;
+} ads1015_scale[] = {
+ {3, 0},
+ {2, 0},
+ {1, 0},
+ {0, 500000},
+ {0, 250000},
+ {0, 125000},
+ {0, 125000},
+ {0, 125000},
+};
+
+#define ADS1015_V_CHAN(_chan, _addr) { \
+ .type = IIO_VOLTAGE, \
+ .indexed = 1, \
+ .address = _addr, \
+ .channel = _chan, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_SCALE) | \
+ BIT(IIO_CHAN_INFO_SAMP_FREQ), \
+ .scan_index = _addr, \
+ .scan_type = { \
+ .sign = 's', \
+ .realbits = 12, \
+ .storagebits = 16, \
+ .shift = 4, \
+ .endianness = IIO_CPU, \
+ }, \
+}
+
+#define ADS1015_V_DIFF_CHAN(_chan, _chan2, _addr) { \
+ .type = IIO_VOLTAGE, \
+ .differential = 1, \
+ .indexed = 1, \
+ .address = _addr, \
+ .channel = _chan, \
+ .channel2 = _chan2, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_SCALE) | \
+ BIT(IIO_CHAN_INFO_SAMP_FREQ), \
+ .scan_index = _addr, \
+ .scan_type = { \
+ .sign = 's', \
+ .realbits = 12, \
+ .storagebits = 16, \
+ .shift = 4, \
+ .endianness = IIO_CPU, \
+ }, \
+}
+
+struct ads1015_data {
+ struct regmap *regmap;
+ /*
+ * Protects ADC ops, e.g: concurrent sysfs/buffered
+ * data reads, configuration updates
+ */
+ struct mutex lock;
+ struct ads1015_channel_data channel_data[ADS1015_CHANNELS];
+};
+
+static bool ads1015_is_writeable_reg(struct device *dev, unsigned int reg)
+{
+ return (reg == ADS1015_CFG_REG);
+}
+
+static const struct regmap_config ads1015_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 16,
+ .max_register = ADS1015_CFG_REG,
+ .writeable_reg = ads1015_is_writeable_reg,
+};
+
+static const struct iio_chan_spec ads1015_channels[] = {
+ ADS1015_V_DIFF_CHAN(0, 1, ADS1015_AIN0_AIN1),
+ ADS1015_V_DIFF_CHAN(0, 3, ADS1015_AIN0_AIN3),
+ ADS1015_V_DIFF_CHAN(1, 3, ADS1015_AIN1_AIN3),
+ ADS1015_V_DIFF_CHAN(2, 3, ADS1015_AIN2_AIN3),
+ ADS1015_V_CHAN(0, ADS1015_AIN0),
+ ADS1015_V_CHAN(1, ADS1015_AIN1),
+ ADS1015_V_CHAN(2, ADS1015_AIN2),
+ ADS1015_V_CHAN(3, ADS1015_AIN3),
+ IIO_CHAN_SOFT_TIMESTAMP(ADS1015_TIMESTAMP),
+};
+
+static int ads1015_set_power_state(struct ads1015_data *data, bool on)
+{
+ int ret;
+ struct device *dev = regmap_get_device(data->regmap);
+
+ if (on) {
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0)
+ pm_runtime_put_noidle(dev);
+ } else {
+ pm_runtime_mark_last_busy(dev);
+ ret = pm_runtime_put_autosuspend(dev);
+ }
+
+ return ret;
+}
+
+static
+int ads1015_get_adc_result(struct ads1015_data *data, int chan, int *val)
+{
+ int ret, pga, dr, conv_time;
+ bool change;
+
+ if (chan < 0 || chan >= ADS1015_CHANNELS)
+ return -EINVAL;
+
+ pga = data->channel_data[chan].pga;
+ dr = data->channel_data[chan].data_rate;
+
+ ret = regmap_update_bits_check(data->regmap, ADS1015_CFG_REG,
+ ADS1015_CFG_MUX_MASK |
+ ADS1015_CFG_PGA_MASK,
+ chan << ADS1015_CFG_MUX_SHIFT |
+ pga << ADS1015_CFG_PGA_SHIFT,
+ &change);
+ if (ret < 0)
+ return ret;
+
+ if (change) {
+ conv_time = DIV_ROUND_UP(USEC_PER_SEC, ads1015_data_rate[dr]);
+ usleep_range(conv_time, conv_time + 1);
+ }
+
+ return regmap_read(data->regmap, ADS1015_CONV_REG, val);
+}
+
+static irqreturn_t ads1015_trigger_handler(int irq, void *p)
+{
+ struct iio_poll_func *pf = p;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct ads1015_data *data = iio_priv(indio_dev);
+ s16 buf[8]; /* 1x s16 ADC val + 3x s16 padding + 4x s16 timestamp */
+ int chan, ret, res;
+
+ memset(buf, 0, sizeof(buf));
+
+ mutex_lock(&data->lock);
+ chan = find_first_bit(indio_dev->active_scan_mask,
+ indio_dev->masklength);
+ ret = ads1015_get_adc_result(data, chan, &res);
+ if (ret < 0) {
+ mutex_unlock(&data->lock);
+ goto err;
+ }
+
+ buf[0] = res;
+ mutex_unlock(&data->lock);
+
+ iio_push_to_buffers_with_timestamp(indio_dev, buf, iio_get_time_ns());
+
+err:
+ iio_trigger_notify_done(indio_dev->trig);
+
+ return IRQ_HANDLED;
+}
+
+static int ads1015_set_scale(struct ads1015_data *data, int chan,
+ int scale, int uscale)
+{
+ int i, ret, rindex = -1;
+
+ for (i = 0; i < ARRAY_SIZE(ads1015_scale); i++)
+ if (ads1015_scale[i].scale == scale &&
+ ads1015_scale[i].uscale == uscale) {
+ rindex = i;
+ break;
+ }
+ if (rindex < 0)
+ return -EINVAL;
+
+ ret = regmap_update_bits(data->regmap, ADS1015_CFG_REG,
+ ADS1015_CFG_PGA_MASK,
+ rindex << ADS1015_CFG_PGA_SHIFT);
+ if (ret < 0)
+ return ret;
+
+ data->channel_data[chan].pga = rindex;
+
+ return 0;
+}
+
+static int ads1015_set_data_rate(struct ads1015_data *data, int chan, int rate)
+{
+ int i, ret, rindex = -1;
+
+ for (i = 0; i < ARRAY_SIZE(ads1015_data_rate); i++)
+ if (ads1015_data_rate[i] == rate) {
+ rindex = i;
+ break;
+ }
+ if (rindex < 0)
+ return -EINVAL;
+
+ ret = regmap_update_bits(data->regmap, ADS1015_CFG_REG,
+ ADS1015_CFG_DR_MASK,
+ rindex << ADS1015_CFG_DR_SHIFT);
+ if (ret < 0)
+ return ret;
+
+ data->channel_data[chan].data_rate = rindex;
+
+ return 0;
+}
+
+static int ads1015_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int *val,
+ int *val2, long mask)
+{
+ int ret, idx;
+ struct ads1015_data *data = iio_priv(indio_dev);
+
+ mutex_lock(&indio_dev->mlock);
+ mutex_lock(&data->lock);
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ if (iio_buffer_enabled(indio_dev)) {
+ ret = -EBUSY;
+ break;
+ }
+
+ ret = ads1015_set_power_state(data, true);
+ if (ret < 0)
+ break;
+
+ ret = ads1015_get_adc_result(data, chan->address, val);
+ if (ret < 0) {
+ ads1015_set_power_state(data, false);
+ break;
+ }
+
+ /* 12 bit res, D0 is bit 4 in conversion register */
+ *val = sign_extend32(*val >> 4, 11);
+
+ ret = ads1015_set_power_state(data, false);
+ if (ret < 0)
+ break;
+
+ ret = IIO_VAL_INT;
+ break;
+ case IIO_CHAN_INFO_SCALE:
+ idx = data->channel_data[chan->address].pga;
+ *val = ads1015_scale[idx].scale;
+ *val2 = ads1015_scale[idx].uscale;
+ ret = IIO_VAL_INT_PLUS_MICRO;
+ break;
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ idx = data->channel_data[chan->address].data_rate;
+ *val = ads1015_data_rate[idx];
+ ret = IIO_VAL_INT;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ mutex_unlock(&data->lock);
+ mutex_unlock(&indio_dev->mlock);
+
+ return ret;
+}
+
+static int ads1015_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int val,
+ int val2, long mask)
+{
+ struct ads1015_data *data = iio_priv(indio_dev);
+ int ret;
+
+ mutex_lock(&data->lock);
+ switch (mask) {
+ case IIO_CHAN_INFO_SCALE:
+ ret = ads1015_set_scale(data, chan->address, val, val2);
+ break;
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ ret = ads1015_set_data_rate(data, chan->address, val);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ mutex_unlock(&data->lock);
+
+ return ret;
+}
+
+static int ads1015_buffer_preenable(struct iio_dev *indio_dev)
+{
+ return ads1015_set_power_state(iio_priv(indio_dev), true);
+}
+
+static int ads1015_buffer_postdisable(struct iio_dev *indio_dev)
+{
+ return ads1015_set_power_state(iio_priv(indio_dev), false);
+}
+
+static const struct iio_buffer_setup_ops ads1015_buffer_setup_ops = {
+ .preenable = ads1015_buffer_preenable,
+ .postenable = iio_triggered_buffer_postenable,
+ .predisable = iio_triggered_buffer_predisable,
+ .postdisable = ads1015_buffer_postdisable,
+ .validate_scan_mask = &iio_validate_scan_mask_onehot,
+};
+
+static IIO_CONST_ATTR(scale_available, "3 2 1 0.5 0.25 0.125");
+static IIO_CONST_ATTR(sampling_frequency_available,
+ "128 250 490 920 1600 2400 3300");
+
+static struct attribute *ads1015_attributes[] = {
+ &iio_const_attr_scale_available.dev_attr.attr,
+ &iio_const_attr_sampling_frequency_available.dev_attr.attr,
+ NULL,
+};
+
+static const struct attribute_group ads1015_attribute_group = {
+ .attrs = ads1015_attributes,
+};
+
+static const struct iio_info ads1015_info = {
+ .driver_module = THIS_MODULE,
+ .read_raw = ads1015_read_raw,
+ .write_raw = ads1015_write_raw,
+ .attrs = &ads1015_attribute_group,
+};
+
+#ifdef CONFIG_OF
+static int ads1015_get_channels_config_of(struct i2c_client *client)
+{
+ struct ads1015_data *data = i2c_get_clientdata(client);
+ struct device_node *node;
+
+ if (!client->dev.of_node ||
+ !of_get_next_child(client->dev.of_node, NULL))
+ return -EINVAL;
+
+ for_each_child_of_node(client->dev.of_node, node) {
+ u32 pval;
+ unsigned int channel;
+ unsigned int pga = ADS1015_DEFAULT_PGA;
+ unsigned int data_rate = ADS1015_DEFAULT_DATA_RATE;
+
+ if (of_property_read_u32(node, "reg", &pval)) {
+ dev_err(&client->dev, "invalid reg on %s\n",
+ node->full_name);
+ continue;
+ }
+
+ channel = pval;
+ if (channel >= ADS1015_CHANNELS) {
+ dev_err(&client->dev,
+ "invalid channel index %d on %s\n",
+ channel, node->full_name);
+ continue;
+ }
+
+ if (!of_property_read_u32(node, "ti,gain", &pval)) {
+ pga = pval;
+ if (pga > 6) {
+ dev_err(&client->dev, "invalid gain on %s\n",
+ node->full_name);
+ return -EINVAL;
+ }
+ }
+
+ if (!of_property_read_u32(node, "ti,datarate", &pval)) {
+ data_rate = pval;
+ if (data_rate > 7) {
+ dev_err(&client->dev,
+ "invalid data_rate on %s\n",
+ node->full_name);
+ return -EINVAL;
+ }
+ }
+
+ data->channel_data[channel].pga = pga;
+ data->channel_data[channel].data_rate = data_rate;
+ }
+
+ return 0;
+}
+#endif
+
+static void ads1015_get_channels_config(struct i2c_client *client)
+{
+ unsigned int k;
+
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
+ struct ads1015_data *data = iio_priv(indio_dev);
+ struct ads1015_platform_data *pdata = dev_get_platdata(&client->dev);
+
+ /* prefer platform data */
+ if (pdata) {
+ memcpy(data->channel_data, pdata->channel_data,
+ sizeof(data->channel_data));
+ return;
+ }
+
+#ifdef CONFIG_OF
+ if (!ads1015_get_channels_config_of(client))
+ return;
+#endif
+ /* fallback on default configuration */
+ for (k = 0; k < ADS1015_CHANNELS; ++k) {
+ data->channel_data[k].pga = ADS1015_DEFAULT_PGA;
+ data->channel_data[k].data_rate = ADS1015_DEFAULT_DATA_RATE;
+ }
+}
+
+static int ads1015_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct iio_dev *indio_dev;
+ struct ads1015_data *data;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ data = iio_priv(indio_dev);
+ i2c_set_clientdata(client, indio_dev);
+
+ mutex_init(&data->lock);
+
+ indio_dev->dev.parent = &client->dev;
+ indio_dev->info = &ads1015_info;
+ indio_dev->name = ADS1015_DRV_NAME;
+ indio_dev->channels = ads1015_channels;
+ indio_dev->num_channels = ARRAY_SIZE(ads1015_channels);
+ indio_dev->modes = INDIO_DIRECT_MODE;
+
+ /* we need to keep this ABI the same as used by hwmon ADS1015 driver */
+ ads1015_get_channels_config(client);
+
+ data->regmap = devm_regmap_init_i2c(client, &ads1015_regmap_config);
+ if (IS_ERR(data->regmap)) {
+ dev_err(&client->dev, "Failed to allocate register map\n");
+ return PTR_ERR(data->regmap);
+ }
+
+ ret = iio_triggered_buffer_setup(indio_dev, NULL,
+ ads1015_trigger_handler,
+ &ads1015_buffer_setup_ops);
+ if (ret < 0) {
+ dev_err(&client->dev, "iio triggered buffer setup failed\n");
+ return ret;
+ }
+ ret = pm_runtime_set_active(&client->dev);
+ if (ret)
+ goto err_buffer_cleanup;
+ pm_runtime_set_autosuspend_delay(&client->dev, ADS1015_SLEEP_DELAY_MS);
+ pm_runtime_use_autosuspend(&client->dev);
+ pm_runtime_enable(&client->dev);
+
+ ret = iio_device_register(indio_dev);
+ if (ret < 0) {
+ dev_err(&client->dev, "Failed to register IIO device\n");
+ goto err_buffer_cleanup;
+ }
+
+ return 0;
+
+err_buffer_cleanup:
+ iio_triggered_buffer_cleanup(indio_dev);
+
+ return ret;
+}
+
+static int ads1015_remove(struct i2c_client *client)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
+ struct ads1015_data *data = iio_priv(indio_dev);
+
+ iio_device_unregister(indio_dev);
+
+ pm_runtime_disable(&client->dev);
+ pm_runtime_set_suspended(&client->dev);
+ pm_runtime_put_noidle(&client->dev);
+
+ iio_triggered_buffer_cleanup(indio_dev);
+
+ /* power down single shot mode */
+ return regmap_update_bits(data->regmap, ADS1015_CFG_REG,
+ ADS1015_CFG_MOD_MASK,
+ ADS1015_SINGLESHOT << ADS1015_CFG_MOD_SHIFT);
+}
+
+#ifdef CONFIG_PM
+static int ads1015_runtime_suspend(struct device *dev)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
+ struct ads1015_data *data = iio_priv(indio_dev);
+
+ return regmap_update_bits(data->regmap, ADS1015_CFG_REG,
+ ADS1015_CFG_MOD_MASK,
+ ADS1015_SINGLESHOT << ADS1015_CFG_MOD_SHIFT);
+}
+
+static int ads1015_runtime_resume(struct device *dev)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
+ struct ads1015_data *data = iio_priv(indio_dev);
+
+ return regmap_update_bits(data->regmap, ADS1015_CFG_REG,
+ ADS1015_CFG_MOD_MASK,
+ ADS1015_CONTINUOUS << ADS1015_CFG_MOD_SHIFT);
+}
+#endif
+
+static const struct dev_pm_ops ads1015_pm_ops = {
+ SET_RUNTIME_PM_OPS(ads1015_runtime_suspend,
+ ads1015_runtime_resume, NULL)
+};
+
+static const struct i2c_device_id ads1015_id[] = {
+ {"ads1015", 0},
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, ads1015_id);
+
+static struct i2c_driver ads1015_driver = {
+ .driver = {
+ .name = ADS1015_DRV_NAME,
+ .pm = &ads1015_pm_ops,
+ },
+ .probe = ads1015_probe,
+ .remove = ads1015_remove,
+ .id_table = ads1015_id,
+};
+
+module_i2c_driver(ads1015_driver);
+
+MODULE_AUTHOR("Daniel Baluta <daniel.baluta@intel.com>");
+MODULE_DESCRIPTION("Texas Instruments ADS1015 ADC driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/buffer/industrialio-buffer-dmaengine.c b/drivers/iio/buffer/industrialio-buffer-dmaengine.c
index ebdb838d3a1c..9fabed47053d 100644
--- a/drivers/iio/buffer/industrialio-buffer-dmaengine.c
+++ b/drivers/iio/buffer/industrialio-buffer-dmaengine.c
@@ -93,12 +93,7 @@ static void iio_dmaengine_buffer_abort(struct iio_dma_buffer_queue *queue)
struct dmaengine_buffer *dmaengine_buffer =
iio_buffer_to_dmaengine_buffer(&queue->buffer);
- dmaengine_terminate_all(dmaengine_buffer->chan);
- /* FIXME: There is a slight chance of a race condition here.
- * dmaengine_terminate_all() does not guarantee that all transfer
- * callbacks have finished running. Need to introduce a
- * dmaengine_terminate_all_sync().
- */
+ dmaengine_terminate_sync(dmaengine_buffer->chan);
iio_dma_buffer_block_list_abort(queue, &dmaengine_buffer->active);
}
diff --git a/drivers/iio/chemical/Kconfig b/drivers/iio/chemical/Kconfig
index f16de61be46d..f73290f84c90 100644
--- a/drivers/iio/chemical/Kconfig
+++ b/drivers/iio/chemical/Kconfig
@@ -4,6 +4,20 @@
menu "Chemical Sensors"
+config ATLAS_PH_SENSOR
+ tristate "Atlas Scientific OEM pH-SM sensor"
+ depends on I2C
+ select REGMAP_I2C
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
+ select IRQ_WORK
+ help
+ Say Y here to build I2C interface support for the Atlas
+ Scientific OEM pH-SM sensor.
+
+ To compile this driver as module, choose M here: the
+ module will be called atlas-ph-sensor.
+
config IAQCORE
tristate "AMS iAQ-Core VOC sensors"
depends on I2C
diff --git a/drivers/iio/chemical/Makefile b/drivers/iio/chemical/Makefile
index 167861fadfab..b02202b41289 100644
--- a/drivers/iio/chemical/Makefile
+++ b/drivers/iio/chemical/Makefile
@@ -3,5 +3,6 @@
#
# When adding new entries keep the list in alphabetical order
+obj-$(CONFIG_ATLAS_PH_SENSOR) += atlas-ph-sensor.o
obj-$(CONFIG_IAQCORE) += ams-iaq-core.o
obj-$(CONFIG_VZ89X) += vz89x.o
diff --git a/drivers/iio/chemical/atlas-ph-sensor.c b/drivers/iio/chemical/atlas-ph-sensor.c
new file mode 100644
index 000000000000..62b37cd8fb56
--- /dev/null
+++ b/drivers/iio/chemical/atlas-ph-sensor.c
@@ -0,0 +1,509 @@
+/*
+ * atlas-ph-sensor.c - Support for Atlas Scientific OEM pH-SM sensor
+ *
+ * Copyright (C) 2015 Matt Ranostay <mranostay@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/mutex.h>
+#include <linux/err.h>
+#include <linux/irq.h>
+#include <linux/irq_work.h>
+#include <linux/gpio.h>
+#include <linux/i2c.h>
+#include <linux/regmap.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/trigger.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/triggered_buffer.h>
+#include <linux/pm_runtime.h>
+
+#define ATLAS_REGMAP_NAME "atlas_ph_regmap"
+#define ATLAS_DRV_NAME "atlas_ph"
+
+#define ATLAS_REG_DEV_TYPE 0x00
+#define ATLAS_REG_DEV_VERSION 0x01
+
+#define ATLAS_REG_INT_CONTROL 0x04
+#define ATLAS_REG_INT_CONTROL_EN BIT(3)
+
+#define ATLAS_REG_PWR_CONTROL 0x06
+
+#define ATLAS_REG_CALIB_STATUS 0x0d
+#define ATLAS_REG_CALIB_STATUS_MASK 0x07
+#define ATLAS_REG_CALIB_STATUS_LOW BIT(0)
+#define ATLAS_REG_CALIB_STATUS_MID BIT(1)
+#define ATLAS_REG_CALIB_STATUS_HIGH BIT(2)
+
+#define ATLAS_REG_TEMP_DATA 0x0e
+#define ATLAS_REG_PH_DATA 0x16
+
+#define ATLAS_PH_INT_TIME_IN_US 450000
+
+struct atlas_data {
+ struct i2c_client *client;
+ struct iio_trigger *trig;
+ struct regmap *regmap;
+ struct irq_work work;
+
+ __be32 buffer[4]; /* 32-bit pH data + 32-bit pad + 64-bit timestamp */
+};
+
+static const struct regmap_range atlas_volatile_ranges[] = {
+ regmap_reg_range(ATLAS_REG_INT_CONTROL, ATLAS_REG_INT_CONTROL),
+ regmap_reg_range(ATLAS_REG_PH_DATA, ATLAS_REG_PH_DATA + 4),
+};
+
+static const struct regmap_access_table atlas_volatile_table = {
+ .yes_ranges = atlas_volatile_ranges,
+ .n_yes_ranges = ARRAY_SIZE(atlas_volatile_ranges),
+};
+
+static const struct regmap_config atlas_regmap_config = {
+ .name = ATLAS_REGMAP_NAME,
+
+ .reg_bits = 8,
+ .val_bits = 8,
+
+ .volatile_table = &atlas_volatile_table,
+ .max_register = ATLAS_REG_PH_DATA + 4,
+ .cache_type = REGCACHE_RBTREE,
+};
+
+static const struct iio_chan_spec atlas_channels[] = {
+ {
+ .type = IIO_PH,
+ .info_mask_separate =
+ BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE),
+ .scan_index = 0,
+ .scan_type = {
+ .sign = 'u',
+ .realbits = 32,
+ .storagebits = 32,
+ .endianness = IIO_BE,
+ },
+ },
+ IIO_CHAN_SOFT_TIMESTAMP(1),
+ {
+ .type = IIO_TEMP,
+ .address = ATLAS_REG_TEMP_DATA,
+ .info_mask_separate =
+ BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE),
+ .output = 1,
+ .scan_index = -1
+ },
+};
+
+static int atlas_set_powermode(struct atlas_data *data, int on)
+{
+ return regmap_write(data->regmap, ATLAS_REG_PWR_CONTROL, on);
+}
+
+static int atlas_set_interrupt(struct atlas_data *data, bool state)
+{
+ return regmap_update_bits(data->regmap, ATLAS_REG_INT_CONTROL,
+ ATLAS_REG_INT_CONTROL_EN,
+ state ? ATLAS_REG_INT_CONTROL_EN : 0);
+}
+
+static int atlas_buffer_postenable(struct iio_dev *indio_dev)
+{
+ struct atlas_data *data = iio_priv(indio_dev);
+ int ret;
+
+ ret = iio_triggered_buffer_postenable(indio_dev);
+ if (ret)
+ return ret;
+
+ ret = pm_runtime_get_sync(&data->client->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(&data->client->dev);
+ return ret;
+ }
+
+ return atlas_set_interrupt(data, true);
+}
+
+static int atlas_buffer_predisable(struct iio_dev *indio_dev)
+{
+ struct atlas_data *data = iio_priv(indio_dev);
+ int ret;
+
+ ret = iio_triggered_buffer_predisable(indio_dev);
+ if (ret)
+ return ret;
+
+ ret = atlas_set_interrupt(data, false);
+ if (ret)
+ return ret;
+
+ pm_runtime_mark_last_busy(&data->client->dev);
+ return pm_runtime_put_autosuspend(&data->client->dev);
+}
+
+static const struct iio_trigger_ops atlas_interrupt_trigger_ops = {
+ .owner = THIS_MODULE,
+};
+
+static const struct iio_buffer_setup_ops atlas_buffer_setup_ops = {
+ .postenable = atlas_buffer_postenable,
+ .predisable = atlas_buffer_predisable,
+};
+
+static void atlas_work_handler(struct irq_work *work)
+{
+ struct atlas_data *data = container_of(work, struct atlas_data, work);
+
+ iio_trigger_poll(data->trig);
+}
+
+static irqreturn_t atlas_trigger_handler(int irq, void *private)
+{
+ struct iio_poll_func *pf = private;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct atlas_data *data = iio_priv(indio_dev);
+ int ret;
+
+ ret = regmap_bulk_read(data->regmap, ATLAS_REG_PH_DATA,
+ (u8 *) &data->buffer, sizeof(data->buffer[0]));
+
+ if (!ret)
+ iio_push_to_buffers_with_timestamp(indio_dev, data->buffer,
+ iio_get_time_ns());
+
+ iio_trigger_notify_done(indio_dev->trig);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t atlas_interrupt_handler(int irq, void *private)
+{
+ struct iio_dev *indio_dev = private;
+ struct atlas_data *data = iio_priv(indio_dev);
+
+ irq_work_queue(&data->work);
+
+ return IRQ_HANDLED;
+}
+
+static int atlas_read_ph_measurement(struct atlas_data *data, __be32 *val)
+{
+ struct device *dev = &data->client->dev;
+ int suspended = pm_runtime_suspended(dev);
+ int ret;
+
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(dev);
+ return ret;
+ }
+
+ if (suspended)
+ usleep_range(ATLAS_PH_INT_TIME_IN_US,
+ ATLAS_PH_INT_TIME_IN_US + 100000);
+
+ ret = regmap_bulk_read(data->regmap, ATLAS_REG_PH_DATA,
+ (u8 *) val, sizeof(*val));
+
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+
+ return ret;
+}
+
+static int atlas_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct atlas_data *data = iio_priv(indio_dev);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW: {
+ int ret;
+ __be32 reg;
+
+ switch (chan->type) {
+ case IIO_TEMP:
+ ret = regmap_bulk_read(data->regmap, chan->address,
+ (u8 *) &reg, sizeof(reg));
+ break;
+ case IIO_PH:
+ mutex_lock(&indio_dev->mlock);
+
+ if (iio_buffer_enabled(indio_dev))
+ ret = -EBUSY;
+ else
+ ret = atlas_read_ph_measurement(data, &reg);
+
+ mutex_unlock(&indio_dev->mlock);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ if (!ret) {
+ *val = be32_to_cpu(reg);
+ ret = IIO_VAL_INT;
+ }
+ return ret;
+ }
+ case IIO_CHAN_INFO_SCALE:
+ switch (chan->type) {
+ case IIO_TEMP:
+ *val = 1; /* 0.01 */
+ *val2 = 100;
+ break;
+ case IIO_PH:
+ *val = 1; /* 0.001 */
+ *val2 = 1000;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return IIO_VAL_FRACTIONAL;
+ }
+
+ return -EINVAL;
+}
+
+static int atlas_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ struct atlas_data *data = iio_priv(indio_dev);
+ __be32 reg = cpu_to_be32(val);
+
+ if (val2 != 0 || val < 0 || val > 20000)
+ return -EINVAL;
+
+ if (mask != IIO_CHAN_INFO_RAW || chan->type != IIO_TEMP)
+ return -EINVAL;
+
+ return regmap_bulk_write(data->regmap, chan->address,
+ &reg, sizeof(reg));
+}
+
+static const struct iio_info atlas_info = {
+ .driver_module = THIS_MODULE,
+ .read_raw = atlas_read_raw,
+ .write_raw = atlas_write_raw,
+};
+
+static int atlas_check_calibration(struct atlas_data *data)
+{
+ struct device *dev = &data->client->dev;
+ int ret;
+ unsigned int val;
+
+ ret = regmap_read(data->regmap, ATLAS_REG_CALIB_STATUS, &val);
+ if (ret)
+ return ret;
+
+ if (!(val & ATLAS_REG_CALIB_STATUS_MASK)) {
+ dev_warn(dev, "device has not been calibrated\n");
+ return 0;
+ }
+
+ if (!(val & ATLAS_REG_CALIB_STATUS_LOW))
+ dev_warn(dev, "device missing low point calibration\n");
+
+ if (!(val & ATLAS_REG_CALIB_STATUS_MID))
+ dev_warn(dev, "device missing mid point calibration\n");
+
+ if (!(val & ATLAS_REG_CALIB_STATUS_HIGH))
+ dev_warn(dev, "device missing high point calibration\n");
+
+ return 0;
+};
+
+static int atlas_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct atlas_data *data;
+ struct iio_trigger *trig;
+ struct iio_dev *indio_dev;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ indio_dev->info = &atlas_info;
+ indio_dev->name = ATLAS_DRV_NAME;
+ indio_dev->channels = atlas_channels;
+ indio_dev->num_channels = ARRAY_SIZE(atlas_channels);
+ indio_dev->modes = INDIO_BUFFER_SOFTWARE | INDIO_DIRECT_MODE;
+ indio_dev->dev.parent = &client->dev;
+
+ trig = devm_iio_trigger_alloc(&client->dev, "%s-dev%d",
+ indio_dev->name, indio_dev->id);
+
+ if (!trig)
+ return -ENOMEM;
+
+ data = iio_priv(indio_dev);
+ data->client = client;
+ data->trig = trig;
+ trig->dev.parent = indio_dev->dev.parent;
+ trig->ops = &atlas_interrupt_trigger_ops;
+ iio_trigger_set_drvdata(trig, indio_dev);
+
+ i2c_set_clientdata(client, indio_dev);
+
+ data->regmap = devm_regmap_init_i2c(client, &atlas_regmap_config);
+ if (IS_ERR(data->regmap)) {
+ dev_err(&client->dev, "regmap initialization failed\n");
+ return PTR_ERR(data->regmap);
+ }
+
+ ret = pm_runtime_set_active(&client->dev);
+ if (ret)
+ return ret;
+
+ if (client->irq <= 0) {
+ dev_err(&client->dev, "no valid irq defined\n");
+ return -EINVAL;
+ }
+
+ ret = atlas_check_calibration(data);
+ if (ret)
+ return ret;
+
+ ret = iio_trigger_register(trig);
+ if (ret) {
+ dev_err(&client->dev, "failed to register trigger\n");
+ return ret;
+ }
+
+ ret = iio_triggered_buffer_setup(indio_dev, &iio_pollfunc_store_time,
+ &atlas_trigger_handler, &atlas_buffer_setup_ops);
+ if (ret) {
+ dev_err(&client->dev, "cannot setup iio trigger\n");
+ goto unregister_trigger;
+ }
+
+ init_irq_work(&data->work, atlas_work_handler);
+
+ /* interrupt pin toggles on new conversion */
+ ret = devm_request_threaded_irq(&client->dev, client->irq,
+ NULL, atlas_interrupt_handler,
+ IRQF_TRIGGER_RISING |
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ "atlas_irq",
+ indio_dev);
+ if (ret) {
+ dev_err(&client->dev, "request irq (%d) failed\n", client->irq);
+ goto unregister_buffer;
+ }
+
+ ret = atlas_set_powermode(data, 1);
+ if (ret) {
+ dev_err(&client->dev, "cannot power device on");
+ goto unregister_buffer;
+ }
+
+ pm_runtime_enable(&client->dev);
+ pm_runtime_set_autosuspend_delay(&client->dev, 2500);
+ pm_runtime_use_autosuspend(&client->dev);
+
+ ret = iio_device_register(indio_dev);
+ if (ret) {
+ dev_err(&client->dev, "unable to register device\n");
+ goto unregister_pm;
+ }
+
+ return 0;
+
+unregister_pm:
+ pm_runtime_disable(&client->dev);
+ atlas_set_powermode(data, 0);
+
+unregister_buffer:
+ iio_triggered_buffer_cleanup(indio_dev);
+
+unregister_trigger:
+ iio_trigger_unregister(data->trig);
+
+ return ret;
+}
+
+static int atlas_remove(struct i2c_client *client)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
+ struct atlas_data *data = iio_priv(indio_dev);
+
+ iio_device_unregister(indio_dev);
+ iio_triggered_buffer_cleanup(indio_dev);
+ iio_trigger_unregister(data->trig);
+
+ pm_runtime_disable(&client->dev);
+ pm_runtime_set_suspended(&client->dev);
+ pm_runtime_put_noidle(&client->dev);
+
+ return atlas_set_powermode(data, 0);
+}
+
+#ifdef CONFIG_PM
+static int atlas_runtime_suspend(struct device *dev)
+{
+ struct atlas_data *data =
+ iio_priv(i2c_get_clientdata(to_i2c_client(dev)));
+
+ return atlas_set_powermode(data, 0);
+}
+
+static int atlas_runtime_resume(struct device *dev)
+{
+ struct atlas_data *data =
+ iio_priv(i2c_get_clientdata(to_i2c_client(dev)));
+
+ return atlas_set_powermode(data, 1);
+}
+#endif
+
+static const struct dev_pm_ops atlas_pm_ops = {
+ SET_RUNTIME_PM_OPS(atlas_runtime_suspend,
+ atlas_runtime_resume, NULL)
+};
+
+static const struct i2c_device_id atlas_id[] = {
+ { "atlas-ph-sm", 0 },
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, atlas_id);
+
+static const struct of_device_id atlas_dt_ids[] = {
+ { .compatible = "atlas,ph-sm" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, atlas_dt_ids);
+
+static struct i2c_driver atlas_driver = {
+ .driver = {
+ .name = ATLAS_DRV_NAME,
+ .of_match_table = of_match_ptr(atlas_dt_ids),
+ .pm = &atlas_pm_ops,
+ },
+ .probe = atlas_probe,
+ .remove = atlas_remove,
+ .id_table = atlas_id,
+};
+module_i2c_driver(atlas_driver);
+
+MODULE_AUTHOR("Matt Ranostay <mranostay@gmail.com>");
+MODULE_DESCRIPTION("Atlas Scientific pH-SM sensor");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/chemical/vz89x.c b/drivers/iio/chemical/vz89x.c
index b8b804923230..652649da500f 100644
--- a/drivers/iio/chemical/vz89x.c
+++ b/drivers/iio/chemical/vz89x.c
@@ -249,7 +249,7 @@ static int vz89x_probe(struct i2c_client *client,
I2C_FUNC_SMBUS_WORD_DATA | I2C_FUNC_SMBUS_BYTE))
data->xfer = vz89x_smbus_xfer;
else
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
i2c_set_clientdata(client, indio_dev);
data->client = client;
diff --git a/drivers/iio/common/st_sensors/st_sensors_core.c b/drivers/iio/common/st_sensors/st_sensors_core.c
index 8447c31e27f2..f5a2d445d0c0 100644
--- a/drivers/iio/common/st_sensors/st_sensors_core.c
+++ b/drivers/iio/common/st_sensors/st_sensors_core.c
@@ -18,13 +18,15 @@
#include <asm/unaligned.h>
#include <linux/iio/common/st_sensors.h>
+#include "st_sensors_core.h"
+
static inline u32 st_sensors_get_unaligned_le24(const u8 *p)
{
return (s32)((p[0] | p[1] << 8 | p[2] << 16) << 8) >> 8;
}
-static int st_sensors_write_data_with_mask(struct iio_dev *indio_dev,
- u8 reg_addr, u8 mask, u8 data)
+int st_sensors_write_data_with_mask(struct iio_dev *indio_dev,
+ u8 reg_addr, u8 mask, u8 data)
{
int err;
u8 new_data;
diff --git a/drivers/iio/common/st_sensors/st_sensors_core.h b/drivers/iio/common/st_sensors/st_sensors_core.h
new file mode 100644
index 000000000000..cd88098ff6f1
--- /dev/null
+++ b/drivers/iio/common/st_sensors/st_sensors_core.h
@@ -0,0 +1,8 @@
+/*
+ * Local functions in the ST Sensors core
+ */
+#ifndef __ST_SENSORS_CORE_H
+#define __ST_SENSORS_CORE_H
+int st_sensors_write_data_with_mask(struct iio_dev *indio_dev,
+ u8 reg_addr, u8 mask, u8 data);
+#endif
diff --git a/drivers/iio/common/st_sensors/st_sensors_trigger.c b/drivers/iio/common/st_sensors/st_sensors_trigger.c
index 3e907040c2c7..6a8c98327945 100644
--- a/drivers/iio/common/st_sensors/st_sensors_trigger.c
+++ b/drivers/iio/common/st_sensors/st_sensors_trigger.c
@@ -14,32 +14,65 @@
#include <linux/iio/iio.h>
#include <linux/iio/trigger.h>
#include <linux/interrupt.h>
-
#include <linux/iio/common/st_sensors.h>
-
+#include "st_sensors_core.h"
int st_sensors_allocate_trigger(struct iio_dev *indio_dev,
const struct iio_trigger_ops *trigger_ops)
{
- int err;
+ int err, irq;
struct st_sensor_data *sdata = iio_priv(indio_dev);
+ unsigned long irq_trig;
sdata->trig = iio_trigger_alloc("%s-trigger", indio_dev->name);
if (sdata->trig == NULL) {
- err = -ENOMEM;
dev_err(&indio_dev->dev, "failed to allocate iio trigger.\n");
- goto iio_trigger_alloc_error;
+ return -ENOMEM;
}
- err = request_threaded_irq(sdata->get_irq_data_ready(indio_dev),
+ irq = sdata->get_irq_data_ready(indio_dev);
+ irq_trig = irqd_get_trigger_type(irq_get_irq_data(irq));
+ /*
+ * If the IRQ is triggered on falling edge, we need to mark the
+ * interrupt as active low, if the hardware supports this.
+ */
+ if (irq_trig == IRQF_TRIGGER_FALLING) {
+ if (!sdata->sensor_settings->drdy_irq.addr_ihl) {
+ dev_err(&indio_dev->dev,
+ "falling edge specified for IRQ but hardware "
+ "only support rising edge, will request "
+ "rising edge\n");
+ irq_trig = IRQF_TRIGGER_RISING;
+ } else {
+ /* Set up INT active low i.e. falling edge */
+ err = st_sensors_write_data_with_mask(indio_dev,
+ sdata->sensor_settings->drdy_irq.addr_ihl,
+ sdata->sensor_settings->drdy_irq.mask_ihl, 1);
+ if (err < 0)
+ goto iio_trigger_free;
+ dev_info(&indio_dev->dev,
+ "interrupts on the falling edge\n");
+ }
+ } else if (irq_trig == IRQF_TRIGGER_RISING) {
+ dev_info(&indio_dev->dev,
+ "interrupts on the rising edge\n");
+
+ } else {
+ dev_err(&indio_dev->dev,
+ "unsupported IRQ trigger specified (%lx), only "
+ "rising and falling edges supported, enforce "
+ "rising edge\n", irq_trig);
+ irq_trig = IRQF_TRIGGER_RISING;
+ }
+ err = request_threaded_irq(irq,
iio_trigger_generic_data_rdy_poll,
NULL,
- IRQF_TRIGGER_RISING,
+ irq_trig,
sdata->trig->name,
sdata->trig);
if (err) {
dev_err(&indio_dev->dev, "failed to request trigger IRQ.\n");
- goto request_irq_error;
+ goto iio_trigger_free;
}
iio_trigger_set_drvdata(sdata->trig, indio_dev);
@@ -57,9 +90,8 @@ int st_sensors_allocate_trigger(struct iio_dev *indio_dev,
iio_trigger_register_error:
free_irq(sdata->get_irq_data_ready(indio_dev), sdata->trig);
-request_irq_error:
+iio_trigger_free:
iio_trigger_free(sdata->trig);
-iio_trigger_alloc_error:
return err;
}
EXPORT_SYMBOL(st_sensors_allocate_trigger);
diff --git a/drivers/iio/dac/Kconfig b/drivers/iio/dac/Kconfig
index e701e28fb1cd..a995139f907c 100644
--- a/drivers/iio/dac/Kconfig
+++ b/drivers/iio/dac/Kconfig
@@ -10,8 +10,10 @@ config AD5064
depends on (SPI_MASTER && I2C!=m) || I2C
help
Say yes here to build support for Analog Devices AD5024, AD5025, AD5044,
- AD5045, AD5064, AD5064-1, AD5065, AD5628, AD5629R, AD5648, AD5666, AD5668,
- AD5669R Digital to Analog Converter.
+ AD5045, AD5064, AD5064-1, AD5065, AD5625, AD5625R, AD5627, AD5627R,
+ AD5628, AD5629R, AD5645R, AD5647R, AD5648, AD5665, AD5665R, AD5666,
+ AD5667, AD5667R, AD5668, AD5669R, LTC2606, LTC2607, LTC2609, LTC2616,
+ LTC2617, LTC2619, LTC2626, LTC2627, LTC2629 Digital to Analog Converter.
To compile this driver as a module, choose M here: the
module will be called ad5064.
@@ -111,6 +113,16 @@ config AD5755
To compile this driver as a module, choose M here: the
module will be called ad5755.
+config AD5761
+ tristate "Analog Devices AD5761/61R/21/21R DAC driver"
+ depends on SPI_MASTER
+ help
+ Say yes here to build support for Analog Devices AD5761, AD5761R, AD5721,
+ AD5721R Digital to Analog Converter.
+
+ To compile this driver as a module, choose M here: the
+ module will be called ad5761.
+
config AD5764
tristate "Analog Devices AD5764/64R/44/44R DAC driver"
depends on SPI_MASTER
@@ -176,11 +188,11 @@ config MAX5821
10 bits DAC.
config MCP4725
- tristate "MCP4725 DAC driver"
+ tristate "MCP4725/6 DAC driver"
depends on I2C
---help---
Say Y here if you want to build a driver for the Microchip
- MCP 4725 12-bit digital-to-analog converter (DAC) with I2C
+ MCP 4725/6 12-bit digital-to-analog converter (DAC) with I2C
interface.
To compile this driver as a module, choose M here: the module
@@ -196,4 +208,23 @@ config MCP4922
To compile this driver as a module, choose M here: the module
will be called mcp4922.
+config STX104
+ tristate "Apex Embedded Systems STX104 DAC driver"
+ depends on ISA
+ help
+ Say yes here to build support for the 2-channel DAC on the Apex
+ Embedded Systems STX104 integrated analog PC/104 card. The base port
+ addresses for the devices may be configured via the "base" module
+ parameter array.
+
+config VF610_DAC
+ tristate "Vybrid vf610 DAC driver"
+ depends on OF
+ depends on HAS_IOMEM
+ help
+ Say yes here to support Vybrid board digital-to-analog converter.
+
+ This driver can also be built as a module. If so, the module will
+ be called vf610_dac.
+
endmenu
diff --git a/drivers/iio/dac/Makefile b/drivers/iio/dac/Makefile
index 63ae05633e0c..67b48429686d 100644
--- a/drivers/iio/dac/Makefile
+++ b/drivers/iio/dac/Makefile
@@ -12,6 +12,7 @@ obj-$(CONFIG_AD5504) += ad5504.o
obj-$(CONFIG_AD5446) += ad5446.o
obj-$(CONFIG_AD5449) += ad5449.o
obj-$(CONFIG_AD5755) += ad5755.o
+obj-$(CONFIG_AD5761) += ad5761.o
obj-$(CONFIG_AD5764) += ad5764.o
obj-$(CONFIG_AD5791) += ad5791.o
obj-$(CONFIG_AD5686) += ad5686.o
@@ -21,3 +22,5 @@ obj-$(CONFIG_MAX517) += max517.o
obj-$(CONFIG_MAX5821) += max5821.o
obj-$(CONFIG_MCP4725) += mcp4725.o
obj-$(CONFIG_MCP4922) += mcp4922.o
+obj-$(CONFIG_STX104) += stx104.o
+obj-$(CONFIG_VF610_DAC) += vf610_dac.o
diff --git a/drivers/iio/dac/ad5064.c b/drivers/iio/dac/ad5064.c
index 81ca0081a019..6803e4a137cd 100644
--- a/drivers/iio/dac/ad5064.c
+++ b/drivers/iio/dac/ad5064.c
@@ -1,6 +1,9 @@
/*
- * AD5024, AD5025, AD5044, AD5045, AD5064, AD5064-1, AD5065, AD5628, AD5629R,
- * AD5648, AD5666, AD5668, AD5669R Digital to analog converters driver
+ * AD5024, AD5025, AD5044, AD5045, AD5064, AD5064-1, AD5065, AD5625, AD5625R,
+ * AD5627, AD5627R, AD5628, AD5629R, AD5645R, AD5647R, AD5648, AD5665, AD5665R,
+ * AD5666, AD5667, AD5667R, AD5668, AD5669R, LTC2606, LTC2607, LTC2609, LTC2616,
+ * LTC2617, LTC2619, LTC2626, LTC2627, LTC2629 Digital to analog converters
+ * driver
*
* Copyright 2011 Analog Devices Inc.
*
@@ -39,6 +42,9 @@
#define AD5064_CMD_RESET 0x7
#define AD5064_CMD_CONFIG 0x8
+#define AD5064_CMD_RESET_V2 0x5
+#define AD5064_CMD_CONFIG_V2 0x7
+
#define AD5064_CONFIG_DAISY_CHAIN_ENABLE BIT(1)
#define AD5064_CONFIG_INT_VREF_ENABLE BIT(0)
@@ -48,12 +54,25 @@
#define AD5064_LDAC_PWRDN_3STATE 0x3
/**
+ * enum ad5064_regmap_type - Register layout variant
+ * @AD5064_REGMAP_ADI: Old Analog Devices register map layout
+ * @AD5064_REGMAP_ADI2: New Analog Devices register map layout
+ * @AD5064_REGMAP_LTC: LTC register map layout
+ */
+enum ad5064_regmap_type {
+ AD5064_REGMAP_ADI,
+ AD5064_REGMAP_ADI2,
+ AD5064_REGMAP_LTC,
+};
+
+/**
* struct ad5064_chip_info - chip specific information
* @shared_vref: whether the vref supply is shared between channels
- * @internal_vref: internal reference voltage. 0 if the chip has no internal
- * vref.
+ * @internal_vref: internal reference voltage. 0 if the chip has no
+ internal vref.
* @channel: channel specification
* @num_channels: number of channels
+ * @regmap_type: register map layout variant
*/
struct ad5064_chip_info {
@@ -61,6 +80,7 @@ struct ad5064_chip_info {
unsigned long internal_vref;
const struct iio_chan_spec *channels;
unsigned int num_channels;
+ enum ad5064_regmap_type regmap_type;
};
struct ad5064_state;
@@ -111,18 +131,43 @@ enum ad5064_type {
ID_AD5064,
ID_AD5064_1,
ID_AD5065,
+ ID_AD5625,
+ ID_AD5625R_1V25,
+ ID_AD5625R_2V5,
+ ID_AD5627,
+ ID_AD5627R_1V25,
+ ID_AD5627R_2V5,
ID_AD5628_1,
ID_AD5628_2,
ID_AD5629_1,
ID_AD5629_2,
+ ID_AD5645R_1V25,
+ ID_AD5645R_2V5,
+ ID_AD5647R_1V25,
+ ID_AD5647R_2V5,
ID_AD5648_1,
ID_AD5648_2,
+ ID_AD5665,
+ ID_AD5665R_1V25,
+ ID_AD5665R_2V5,
ID_AD5666_1,
ID_AD5666_2,
+ ID_AD5667,
+ ID_AD5667R_1V25,
+ ID_AD5667R_2V5,
ID_AD5668_1,
ID_AD5668_2,
ID_AD5669_1,
ID_AD5669_2,
+ ID_LTC2606,
+ ID_LTC2607,
+ ID_LTC2609,
+ ID_LTC2616,
+ ID_LTC2617,
+ ID_LTC2619,
+ ID_LTC2626,
+ ID_LTC2627,
+ ID_LTC2629,
};
static int ad5064_write(struct ad5064_state *st, unsigned int cmd,
@@ -136,15 +181,27 @@ static int ad5064_write(struct ad5064_state *st, unsigned int cmd,
static int ad5064_sync_powerdown_mode(struct ad5064_state *st,
const struct iio_chan_spec *chan)
{
- unsigned int val;
+ unsigned int val, address;
+ unsigned int shift;
int ret;
- val = (0x1 << chan->address);
+ if (st->chip_info->regmap_type == AD5064_REGMAP_LTC) {
+ val = 0;
+ address = chan->address;
+ } else {
+ if (st->chip_info->regmap_type == AD5064_REGMAP_ADI2)
+ shift = 4;
+ else
+ shift = 8;
+
+ val = (0x1 << chan->address);
+ address = 0;
- if (st->pwr_down[chan->channel])
- val |= st->pwr_down_mode[chan->channel] << 8;
+ if (st->pwr_down[chan->channel])
+ val |= st->pwr_down_mode[chan->channel] << shift;
+ }
- ret = ad5064_write(st, AD5064_CMD_POWERDOWN_DAC, 0, val, 0);
+ ret = ad5064_write(st, AD5064_CMD_POWERDOWN_DAC, address, val, 0);
return ret;
}
@@ -155,6 +212,10 @@ static const char * const ad5064_powerdown_modes[] = {
"three_state",
};
+static const char * const ltc2617_powerdown_modes[] = {
+ "90kohm_to_gnd",
+};
+
static int ad5064_get_powerdown_mode(struct iio_dev *indio_dev,
const struct iio_chan_spec *chan)
{
@@ -185,6 +246,13 @@ static const struct iio_enum ad5064_powerdown_mode_enum = {
.set = ad5064_set_powerdown_mode,
};
+static const struct iio_enum ltc2617_powerdown_mode_enum = {
+ .items = ltc2617_powerdown_modes,
+ .num_items = ARRAY_SIZE(ltc2617_powerdown_modes),
+ .get = ad5064_get_powerdown_mode,
+ .set = ad5064_set_powerdown_mode,
+};
+
static ssize_t ad5064_read_dac_powerdown(struct iio_dev *indio_dev,
uintptr_t private, const struct iio_chan_spec *chan, char *buf)
{
@@ -295,7 +363,19 @@ static const struct iio_chan_spec_ext_info ad5064_ext_info[] = {
{ },
};
-#define AD5064_CHANNEL(chan, addr, bits, _shift) { \
+static const struct iio_chan_spec_ext_info ltc2617_ext_info[] = {
+ {
+ .name = "powerdown",
+ .read = ad5064_read_dac_powerdown,
+ .write = ad5064_write_dac_powerdown,
+ .shared = IIO_SEPARATE,
+ },
+ IIO_ENUM("powerdown_mode", IIO_SEPARATE, &ltc2617_powerdown_mode_enum),
+ IIO_ENUM_AVAILABLE("powerdown_mode", &ltc2617_powerdown_mode_enum),
+ { },
+};
+
+#define AD5064_CHANNEL(chan, addr, bits, _shift, _ext_info) { \
.type = IIO_VOLTAGE, \
.indexed = 1, \
.output = 1, \
@@ -309,145 +389,340 @@ static const struct iio_chan_spec_ext_info ad5064_ext_info[] = {
.storagebits = 16, \
.shift = (_shift), \
}, \
- .ext_info = ad5064_ext_info, \
+ .ext_info = (_ext_info), \
}
-#define DECLARE_AD5064_CHANNELS(name, bits, shift) \
+#define DECLARE_AD5064_CHANNELS(name, bits, shift, ext_info) \
const struct iio_chan_spec name[] = { \
- AD5064_CHANNEL(0, 0, bits, shift), \
- AD5064_CHANNEL(1, 1, bits, shift), \
- AD5064_CHANNEL(2, 2, bits, shift), \
- AD5064_CHANNEL(3, 3, bits, shift), \
- AD5064_CHANNEL(4, 4, bits, shift), \
- AD5064_CHANNEL(5, 5, bits, shift), \
- AD5064_CHANNEL(6, 6, bits, shift), \
- AD5064_CHANNEL(7, 7, bits, shift), \
+ AD5064_CHANNEL(0, 0, bits, shift, ext_info), \
+ AD5064_CHANNEL(1, 1, bits, shift, ext_info), \
+ AD5064_CHANNEL(2, 2, bits, shift, ext_info), \
+ AD5064_CHANNEL(3, 3, bits, shift, ext_info), \
+ AD5064_CHANNEL(4, 4, bits, shift, ext_info), \
+ AD5064_CHANNEL(5, 5, bits, shift, ext_info), \
+ AD5064_CHANNEL(6, 6, bits, shift, ext_info), \
+ AD5064_CHANNEL(7, 7, bits, shift, ext_info), \
}
-#define DECLARE_AD5065_CHANNELS(name, bits, shift) \
+#define DECLARE_AD5065_CHANNELS(name, bits, shift, ext_info) \
const struct iio_chan_spec name[] = { \
- AD5064_CHANNEL(0, 0, bits, shift), \
- AD5064_CHANNEL(1, 3, bits, shift), \
+ AD5064_CHANNEL(0, 0, bits, shift, ext_info), \
+ AD5064_CHANNEL(1, 3, bits, shift, ext_info), \
}
-static DECLARE_AD5064_CHANNELS(ad5024_channels, 12, 8);
-static DECLARE_AD5064_CHANNELS(ad5044_channels, 14, 6);
-static DECLARE_AD5064_CHANNELS(ad5064_channels, 16, 4);
+static DECLARE_AD5064_CHANNELS(ad5024_channels, 12, 8, ad5064_ext_info);
+static DECLARE_AD5064_CHANNELS(ad5044_channels, 14, 6, ad5064_ext_info);
+static DECLARE_AD5064_CHANNELS(ad5064_channels, 16, 4, ad5064_ext_info);
+
+static DECLARE_AD5065_CHANNELS(ad5025_channels, 12, 8, ad5064_ext_info);
+static DECLARE_AD5065_CHANNELS(ad5045_channels, 14, 6, ad5064_ext_info);
+static DECLARE_AD5065_CHANNELS(ad5065_channels, 16, 4, ad5064_ext_info);
-static DECLARE_AD5065_CHANNELS(ad5025_channels, 12, 8);
-static DECLARE_AD5065_CHANNELS(ad5045_channels, 14, 6);
-static DECLARE_AD5065_CHANNELS(ad5065_channels, 16, 4);
+static DECLARE_AD5064_CHANNELS(ad5629_channels, 12, 4, ad5064_ext_info);
+static DECLARE_AD5064_CHANNELS(ad5645_channels, 14, 2, ad5064_ext_info);
+static DECLARE_AD5064_CHANNELS(ad5669_channels, 16, 0, ad5064_ext_info);
-static DECLARE_AD5064_CHANNELS(ad5629_channels, 12, 4);
-static DECLARE_AD5064_CHANNELS(ad5669_channels, 16, 0);
+static DECLARE_AD5064_CHANNELS(ltc2607_channels, 16, 0, ltc2617_ext_info);
+static DECLARE_AD5064_CHANNELS(ltc2617_channels, 14, 2, ltc2617_ext_info);
+static DECLARE_AD5064_CHANNELS(ltc2627_channels, 12, 4, ltc2617_ext_info);
static const struct ad5064_chip_info ad5064_chip_info_tbl[] = {
[ID_AD5024] = {
.shared_vref = false,
.channels = ad5024_channels,
.num_channels = 4,
+ .regmap_type = AD5064_REGMAP_ADI,
},
[ID_AD5025] = {
.shared_vref = false,
.channels = ad5025_channels,
.num_channels = 2,
+ .regmap_type = AD5064_REGMAP_ADI,
},
[ID_AD5044] = {
.shared_vref = false,
.channels = ad5044_channels,
.num_channels = 4,
+ .regmap_type = AD5064_REGMAP_ADI,
},
[ID_AD5045] = {
.shared_vref = false,
.channels = ad5045_channels,
.num_channels = 2,
+ .regmap_type = AD5064_REGMAP_ADI,
},
[ID_AD5064] = {
.shared_vref = false,
.channels = ad5064_channels,
.num_channels = 4,
+ .regmap_type = AD5064_REGMAP_ADI,
},
[ID_AD5064_1] = {
.shared_vref = true,
.channels = ad5064_channels,
.num_channels = 4,
+ .regmap_type = AD5064_REGMAP_ADI,
},
[ID_AD5065] = {
.shared_vref = false,
.channels = ad5065_channels,
.num_channels = 2,
+ .regmap_type = AD5064_REGMAP_ADI,
+ },
+ [ID_AD5625] = {
+ .shared_vref = true,
+ .channels = ad5629_channels,
+ .num_channels = 4,
+ .regmap_type = AD5064_REGMAP_ADI2
+ },
+ [ID_AD5625R_1V25] = {
+ .shared_vref = true,
+ .internal_vref = 1250000,
+ .channels = ad5629_channels,
+ .num_channels = 4,
+ .regmap_type = AD5064_REGMAP_ADI2
+ },
+ [ID_AD5625R_2V5] = {
+ .shared_vref = true,
+ .internal_vref = 2500000,
+ .channels = ad5629_channels,
+ .num_channels = 4,
+ .regmap_type = AD5064_REGMAP_ADI2
+ },
+ [ID_AD5627] = {
+ .shared_vref = true,
+ .channels = ad5629_channels,
+ .num_channels = 2,
+ .regmap_type = AD5064_REGMAP_ADI2
+ },
+ [ID_AD5627R_1V25] = {
+ .shared_vref = true,
+ .internal_vref = 1250000,
+ .channels = ad5629_channels,
+ .num_channels = 2,
+ .regmap_type = AD5064_REGMAP_ADI2
+ },
+ [ID_AD5627R_2V5] = {
+ .shared_vref = true,
+ .internal_vref = 2500000,
+ .channels = ad5629_channels,
+ .num_channels = 2,
+ .regmap_type = AD5064_REGMAP_ADI2
},
[ID_AD5628_1] = {
.shared_vref = true,
.internal_vref = 2500000,
.channels = ad5024_channels,
.num_channels = 8,
+ .regmap_type = AD5064_REGMAP_ADI,
},
[ID_AD5628_2] = {
.shared_vref = true,
.internal_vref = 5000000,
.channels = ad5024_channels,
.num_channels = 8,
+ .regmap_type = AD5064_REGMAP_ADI,
},
[ID_AD5629_1] = {
.shared_vref = true,
.internal_vref = 2500000,
.channels = ad5629_channels,
.num_channels = 8,
+ .regmap_type = AD5064_REGMAP_ADI,
},
[ID_AD5629_2] = {
.shared_vref = true,
.internal_vref = 5000000,
.channels = ad5629_channels,
.num_channels = 8,
+ .regmap_type = AD5064_REGMAP_ADI,
+ },
+ [ID_AD5645R_1V25] = {
+ .shared_vref = true,
+ .internal_vref = 1250000,
+ .channels = ad5645_channels,
+ .num_channels = 4,
+ .regmap_type = AD5064_REGMAP_ADI2
+ },
+ [ID_AD5645R_2V5] = {
+ .shared_vref = true,
+ .internal_vref = 2500000,
+ .channels = ad5645_channels,
+ .num_channels = 4,
+ .regmap_type = AD5064_REGMAP_ADI2
+ },
+ [ID_AD5647R_1V25] = {
+ .shared_vref = true,
+ .internal_vref = 1250000,
+ .channels = ad5645_channels,
+ .num_channels = 2,
+ .regmap_type = AD5064_REGMAP_ADI2
+ },
+ [ID_AD5647R_2V5] = {
+ .shared_vref = true,
+ .internal_vref = 2500000,
+ .channels = ad5645_channels,
+ .num_channels = 2,
+ .regmap_type = AD5064_REGMAP_ADI2
},
[ID_AD5648_1] = {
.shared_vref = true,
.internal_vref = 2500000,
.channels = ad5044_channels,
.num_channels = 8,
+ .regmap_type = AD5064_REGMAP_ADI,
},
[ID_AD5648_2] = {
.shared_vref = true,
.internal_vref = 5000000,
.channels = ad5044_channels,
.num_channels = 8,
+ .regmap_type = AD5064_REGMAP_ADI,
+ },
+ [ID_AD5665] = {
+ .shared_vref = true,
+ .channels = ad5669_channels,
+ .num_channels = 4,
+ .regmap_type = AD5064_REGMAP_ADI2
+ },
+ [ID_AD5665R_1V25] = {
+ .shared_vref = true,
+ .internal_vref = 1250000,
+ .channels = ad5669_channels,
+ .num_channels = 4,
+ .regmap_type = AD5064_REGMAP_ADI2
+ },
+ [ID_AD5665R_2V5] = {
+ .shared_vref = true,
+ .internal_vref = 2500000,
+ .channels = ad5669_channels,
+ .num_channels = 4,
+ .regmap_type = AD5064_REGMAP_ADI2
},
[ID_AD5666_1] = {
.shared_vref = true,
.internal_vref = 2500000,
.channels = ad5064_channels,
.num_channels = 4,
+ .regmap_type = AD5064_REGMAP_ADI,
},
[ID_AD5666_2] = {
.shared_vref = true,
.internal_vref = 5000000,
.channels = ad5064_channels,
.num_channels = 4,
+ .regmap_type = AD5064_REGMAP_ADI,
+ },
+ [ID_AD5667] = {
+ .shared_vref = true,
+ .channels = ad5669_channels,
+ .num_channels = 2,
+ .regmap_type = AD5064_REGMAP_ADI2
+ },
+ [ID_AD5667R_1V25] = {
+ .shared_vref = true,
+ .internal_vref = 1250000,
+ .channels = ad5669_channels,
+ .num_channels = 2,
+ .regmap_type = AD5064_REGMAP_ADI2
+ },
+ [ID_AD5667R_2V5] = {
+ .shared_vref = true,
+ .internal_vref = 2500000,
+ .channels = ad5669_channels,
+ .num_channels = 2,
+ .regmap_type = AD5064_REGMAP_ADI2
},
[ID_AD5668_1] = {
.shared_vref = true,
.internal_vref = 2500000,
.channels = ad5064_channels,
.num_channels = 8,
+ .regmap_type = AD5064_REGMAP_ADI,
},
[ID_AD5668_2] = {
.shared_vref = true,
.internal_vref = 5000000,
.channels = ad5064_channels,
.num_channels = 8,
+ .regmap_type = AD5064_REGMAP_ADI,
},
[ID_AD5669_1] = {
.shared_vref = true,
.internal_vref = 2500000,
.channels = ad5669_channels,
.num_channels = 8,
+ .regmap_type = AD5064_REGMAP_ADI,
},
[ID_AD5669_2] = {
.shared_vref = true,
.internal_vref = 5000000,
.channels = ad5669_channels,
.num_channels = 8,
+ .regmap_type = AD5064_REGMAP_ADI,
+ },
+ [ID_LTC2606] = {
+ .shared_vref = true,
+ .internal_vref = 0,
+ .channels = ltc2607_channels,
+ .num_channels = 1,
+ .regmap_type = AD5064_REGMAP_LTC,
+ },
+ [ID_LTC2607] = {
+ .shared_vref = true,
+ .internal_vref = 0,
+ .channels = ltc2607_channels,
+ .num_channels = 2,
+ .regmap_type = AD5064_REGMAP_LTC,
+ },
+ [ID_LTC2609] = {
+ .shared_vref = false,
+ .internal_vref = 0,
+ .channels = ltc2607_channels,
+ .num_channels = 4,
+ .regmap_type = AD5064_REGMAP_LTC,
+ },
+ [ID_LTC2616] = {
+ .shared_vref = true,
+ .internal_vref = 0,
+ .channels = ltc2617_channels,
+ .num_channels = 1,
+ .regmap_type = AD5064_REGMAP_LTC,
+ },
+ [ID_LTC2617] = {
+ .shared_vref = true,
+ .internal_vref = 0,
+ .channels = ltc2617_channels,
+ .num_channels = 2,
+ .regmap_type = AD5064_REGMAP_LTC,
+ },
+ [ID_LTC2619] = {
+ .shared_vref = false,
+ .internal_vref = 0,
+ .channels = ltc2617_channels,
+ .num_channels = 4,
+ .regmap_type = AD5064_REGMAP_LTC,
+ },
+ [ID_LTC2626] = {
+ .shared_vref = true,
+ .internal_vref = 0,
+ .channels = ltc2627_channels,
+ .num_channels = 1,
+ .regmap_type = AD5064_REGMAP_LTC,
+ },
+ [ID_LTC2627] = {
+ .shared_vref = true,
+ .internal_vref = 0,
+ .channels = ltc2627_channels,
+ .num_channels = 2,
+ .regmap_type = AD5064_REGMAP_LTC,
+ },
+ [ID_LTC2629] = {
+ .shared_vref = false,
+ .internal_vref = 0,
+ .channels = ltc2627_channels,
+ .num_channels = 4,
+ .regmap_type = AD5064_REGMAP_LTC,
},
};
@@ -469,6 +744,22 @@ static const char * const ad5064_vref_name(struct ad5064_state *st,
return st->chip_info->shared_vref ? "vref" : ad5064_vref_names[vref];
}
+static int ad5064_set_config(struct ad5064_state *st, unsigned int val)
+{
+ unsigned int cmd;
+
+ switch (st->chip_info->regmap_type) {
+ case AD5064_REGMAP_ADI2:
+ cmd = AD5064_CMD_CONFIG_V2;
+ break;
+ default:
+ cmd = AD5064_CMD_CONFIG;
+ break;
+ }
+
+ return ad5064_write(st, cmd, 0, val, 0);
+}
+
static int ad5064_probe(struct device *dev, enum ad5064_type type,
const char *name, ad5064_write_func write)
{
@@ -498,8 +789,7 @@ static int ad5064_probe(struct device *dev, enum ad5064_type type,
if (!st->chip_info->internal_vref)
return ret;
st->use_internal_vref = true;
- ret = ad5064_write(st, AD5064_CMD_CONFIG, 0,
- AD5064_CONFIG_INT_VREF_ENABLE, 0);
+ ret = ad5064_set_config(st, AD5064_CONFIG_INT_VREF_ENABLE);
if (ret) {
dev_err(dev, "Failed to enable internal vref: %d\n",
ret);
@@ -628,9 +918,19 @@ static int ad5064_i2c_write(struct ad5064_state *st, unsigned int cmd,
unsigned int addr, unsigned int val)
{
struct i2c_client *i2c = to_i2c_client(st->dev);
+ unsigned int cmd_shift;
int ret;
- st->data.i2c[0] = (cmd << 4) | addr;
+ switch (st->chip_info->regmap_type) {
+ case AD5064_REGMAP_ADI2:
+ cmd_shift = 3;
+ break;
+ default:
+ cmd_shift = 4;
+ break;
+ }
+
+ st->data.i2c[0] = (cmd << cmd_shift) | addr;
put_unaligned_be16(val, &st->data.i2c[1]);
ret = i2c_master_send(i2c, st->data.i2c, 3);
@@ -653,12 +953,35 @@ static int ad5064_i2c_remove(struct i2c_client *i2c)
}
static const struct i2c_device_id ad5064_i2c_ids[] = {
+ {"ad5625", ID_AD5625 },
+ {"ad5625r-1v25", ID_AD5625R_1V25 },
+ {"ad5625r-2v5", ID_AD5625R_2V5 },
+ {"ad5627", ID_AD5627 },
+ {"ad5627r-1v25", ID_AD5627R_1V25 },
+ {"ad5627r-2v5", ID_AD5627R_2V5 },
{"ad5629-1", ID_AD5629_1},
{"ad5629-2", ID_AD5629_2},
{"ad5629-3", ID_AD5629_2}, /* similar enough to ad5629-2 */
+ {"ad5645r-1v25", ID_AD5645R_1V25 },
+ {"ad5645r-2v5", ID_AD5645R_2V5 },
+ {"ad5665", ID_AD5665 },
+ {"ad5665r-1v25", ID_AD5665R_1V25 },
+ {"ad5665r-2v5", ID_AD5665R_2V5 },
+ {"ad5667", ID_AD5667 },
+ {"ad5667r-1v25", ID_AD5667R_1V25 },
+ {"ad5667r-2v5", ID_AD5667R_2V5 },
{"ad5669-1", ID_AD5669_1},
{"ad5669-2", ID_AD5669_2},
{"ad5669-3", ID_AD5669_2}, /* similar enough to ad5669-2 */
+ {"ltc2606", ID_LTC2606},
+ {"ltc2607", ID_LTC2607},
+ {"ltc2609", ID_LTC2609},
+ {"ltc2616", ID_LTC2616},
+ {"ltc2617", ID_LTC2617},
+ {"ltc2619", ID_LTC2619},
+ {"ltc2626", ID_LTC2626},
+ {"ltc2627", ID_LTC2627},
+ {"ltc2629", ID_LTC2629},
{}
};
MODULE_DEVICE_TABLE(i2c, ad5064_i2c_ids);
diff --git a/drivers/iio/dac/ad5761.c b/drivers/iio/dac/ad5761.c
new file mode 100644
index 000000000000..d6510d6928b3
--- /dev/null
+++ b/drivers/iio/dac/ad5761.c
@@ -0,0 +1,430 @@
+/*
+ * AD5721, AD5721R, AD5761, AD5761R, Voltage Output Digital to Analog Converter
+ *
+ * Copyright 2016 Qtechnology A/S
+ * 2016 Ricardo Ribalda <ricardo.ribalda@gmail.com>
+ *
+ * Licensed under the GPL-2.
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/spi/spi.h>
+#include <linux/bitops.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/regulator/consumer.h>
+#include <linux/platform_data/ad5761.h>
+
+#define AD5761_ADDR(addr) ((addr & 0xf) << 16)
+#define AD5761_ADDR_NOOP 0x0
+#define AD5761_ADDR_DAC_WRITE 0x3
+#define AD5761_ADDR_CTRL_WRITE_REG 0x4
+#define AD5761_ADDR_SW_DATA_RESET 0x7
+#define AD5761_ADDR_DAC_READ 0xb
+#define AD5761_ADDR_CTRL_READ_REG 0xc
+#define AD5761_ADDR_SW_FULL_RESET 0xf
+
+#define AD5761_CTRL_USE_INTVREF BIT(5)
+#define AD5761_CTRL_ETS BIT(6)
+
+/**
+ * struct ad5761_chip_info - chip specific information
+ * @int_vref: Value of the internal reference voltage in mV - 0 if external
+ * reference voltage is used
+ * @channel: channel specification
+*/
+
+struct ad5761_chip_info {
+ unsigned long int_vref;
+ const struct iio_chan_spec channel;
+};
+
+struct ad5761_range_params {
+ int m;
+ int c;
+};
+
+enum ad5761_supported_device_ids {
+ ID_AD5721,
+ ID_AD5721R,
+ ID_AD5761,
+ ID_AD5761R,
+};
+
+/**
+ * struct ad5761_state - driver instance specific data
+ * @spi: spi_device
+ * @vref_reg: reference voltage regulator
+ * @use_intref: true when the internal voltage reference is used
+ * @vref: actual voltage reference in mVolts
+ * @range: output range mode used
+ * @data: cache aligned spi buffer
+ */
+struct ad5761_state {
+ struct spi_device *spi;
+ struct regulator *vref_reg;
+
+ bool use_intref;
+ int vref;
+ enum ad5761_voltage_range range;
+
+ /*
+ * DMA (thus cache coherency maintenance) requires the
+ * transfer buffers to live in their own cache lines.
+ */
+ union {
+ __be32 d32;
+ u8 d8[4];
+ } data[3] ____cacheline_aligned;
+};
+
+static const struct ad5761_range_params ad5761_range_params[] = {
+ [AD5761_VOLTAGE_RANGE_M10V_10V] = {
+ .m = 80,
+ .c = 40,
+ },
+ [AD5761_VOLTAGE_RANGE_0V_10V] = {
+ .m = 40,
+ .c = 0,
+ },
+ [AD5761_VOLTAGE_RANGE_M5V_5V] = {
+ .m = 40,
+ .c = 20,
+ },
+ [AD5761_VOLTAGE_RANGE_0V_5V] = {
+ .m = 20,
+ .c = 0,
+ },
+ [AD5761_VOLTAGE_RANGE_M2V5_7V5] = {
+ .m = 40,
+ .c = 10,
+ },
+ [AD5761_VOLTAGE_RANGE_M3V_3V] = {
+ .m = 24,
+ .c = 12,
+ },
+ [AD5761_VOLTAGE_RANGE_0V_16V] = {
+ .m = 64,
+ .c = 0,
+ },
+ [AD5761_VOLTAGE_RANGE_0V_20V] = {
+ .m = 80,
+ .c = 0,
+ },
+};
+
+static int _ad5761_spi_write(struct ad5761_state *st, u8 addr, u16 val)
+{
+ st->data[0].d32 = cpu_to_be32(AD5761_ADDR(addr) | val);
+
+ return spi_write(st->spi, &st->data[0].d8[1], 3);
+}
+
+static int ad5761_spi_write(struct iio_dev *indio_dev, u8 addr, u16 val)
+{
+ struct ad5761_state *st = iio_priv(indio_dev);
+ int ret;
+
+ mutex_lock(&indio_dev->mlock);
+ ret = _ad5761_spi_write(st, addr, val);
+ mutex_unlock(&indio_dev->mlock);
+
+ return ret;
+}
+
+static int _ad5761_spi_read(struct ad5761_state *st, u8 addr, u16 *val)
+{
+ int ret;
+ struct spi_transfer xfers[] = {
+ {
+ .tx_buf = &st->data[0].d8[1],
+ .bits_per_word = 8,
+ .len = 3,
+ .cs_change = true,
+ }, {
+ .tx_buf = &st->data[1].d8[1],
+ .rx_buf = &st->data[2].d8[1],
+ .bits_per_word = 8,
+ .len = 3,
+ },
+ };
+
+ st->data[0].d32 = cpu_to_be32(AD5761_ADDR(addr));
+ st->data[1].d32 = cpu_to_be32(AD5761_ADDR(AD5761_ADDR_NOOP));
+
+ ret = spi_sync_transfer(st->spi, xfers, ARRAY_SIZE(xfers));
+
+ *val = be32_to_cpu(st->data[2].d32);
+
+ return ret;
+}
+
+static int ad5761_spi_read(struct iio_dev *indio_dev, u8 addr, u16 *val)
+{
+ struct ad5761_state *st = iio_priv(indio_dev);
+ int ret;
+
+ mutex_lock(&indio_dev->mlock);
+ ret = _ad5761_spi_read(st, addr, val);
+ mutex_unlock(&indio_dev->mlock);
+
+ return ret;
+}
+
+static int ad5761_spi_set_range(struct ad5761_state *st,
+ enum ad5761_voltage_range range)
+{
+ u16 aux;
+ int ret;
+
+ aux = (range & 0x7) | AD5761_CTRL_ETS;
+
+ if (st->use_intref)
+ aux |= AD5761_CTRL_USE_INTVREF;
+
+ ret = _ad5761_spi_write(st, AD5761_ADDR_SW_FULL_RESET, 0);
+ if (ret)
+ return ret;
+
+ ret = _ad5761_spi_write(st, AD5761_ADDR_CTRL_WRITE_REG, aux);
+ if (ret)
+ return ret;
+
+ st->range = range;
+
+ return 0;
+}
+
+static int ad5761_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val,
+ int *val2,
+ long mask)
+{
+ struct ad5761_state *st;
+ int ret;
+ u16 aux;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ ret = ad5761_spi_read(indio_dev, AD5761_ADDR_DAC_READ, &aux);
+ if (ret)
+ return ret;
+ *val = aux >> chan->scan_type.shift;
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ st = iio_priv(indio_dev);
+ *val = st->vref * ad5761_range_params[st->range].m;
+ *val /= 10;
+ *val2 = chan->scan_type.realbits;
+ return IIO_VAL_FRACTIONAL_LOG2;
+ case IIO_CHAN_INFO_OFFSET:
+ st = iio_priv(indio_dev);
+ *val = -(1 << chan->scan_type.realbits);
+ *val *= ad5761_range_params[st->range].c;
+ *val /= ad5761_range_params[st->range].m;
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int ad5761_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val,
+ int val2,
+ long mask)
+{
+ u16 aux;
+
+ if (mask != IIO_CHAN_INFO_RAW)
+ return -EINVAL;
+
+ if (val2 || (val << chan->scan_type.shift) > 0xffff || val < 0)
+ return -EINVAL;
+
+ aux = val << chan->scan_type.shift;
+
+ return ad5761_spi_write(indio_dev, AD5761_ADDR_DAC_WRITE, aux);
+}
+
+static const struct iio_info ad5761_info = {
+ .read_raw = &ad5761_read_raw,
+ .write_raw = &ad5761_write_raw,
+ .driver_module = THIS_MODULE,
+};
+
+#define AD5761_CHAN(_bits) { \
+ .type = IIO_VOLTAGE, \
+ .output = 1, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) | \
+ BIT(IIO_CHAN_INFO_OFFSET), \
+ .scan_type = { \
+ .sign = 'u', \
+ .realbits = (_bits), \
+ .storagebits = 16, \
+ .shift = 16 - (_bits), \
+ }, \
+}
+
+static const struct ad5761_chip_info ad5761_chip_infos[] = {
+ [ID_AD5721] = {
+ .int_vref = 0,
+ .channel = AD5761_CHAN(12),
+ },
+ [ID_AD5721R] = {
+ .int_vref = 2500,
+ .channel = AD5761_CHAN(12),
+ },
+ [ID_AD5761] = {
+ .int_vref = 0,
+ .channel = AD5761_CHAN(16),
+ },
+ [ID_AD5761R] = {
+ .int_vref = 2500,
+ .channel = AD5761_CHAN(16),
+ },
+};
+
+static int ad5761_get_vref(struct ad5761_state *st,
+ const struct ad5761_chip_info *chip_info)
+{
+ int ret;
+
+ st->vref_reg = devm_regulator_get_optional(&st->spi->dev, "vref");
+ if (PTR_ERR(st->vref_reg) == -ENODEV) {
+ /* Use Internal regulator */
+ if (!chip_info->int_vref) {
+ dev_err(&st->spi->dev,
+ "Voltage reference not found\n");
+ return -EIO;
+ }
+
+ st->use_intref = true;
+ st->vref = chip_info->int_vref;
+ return 0;
+ }
+
+ if (IS_ERR(st->vref_reg)) {
+ dev_err(&st->spi->dev,
+ "Error getting voltage reference regulator\n");
+ return PTR_ERR(st->vref_reg);
+ }
+
+ ret = regulator_enable(st->vref_reg);
+ if (ret) {
+ dev_err(&st->spi->dev,
+ "Failed to enable voltage reference\n");
+ return ret;
+ }
+
+ ret = regulator_get_voltage(st->vref_reg);
+ if (ret < 0) {
+ dev_err(&st->spi->dev,
+ "Failed to get voltage reference value\n");
+ goto disable_regulator_vref;
+ }
+
+ if (ret < 2000000 || ret > 3000000) {
+ dev_warn(&st->spi->dev,
+ "Invalid external voltage ref. value %d uV\n", ret);
+ ret = -EIO;
+ goto disable_regulator_vref;
+ }
+
+ st->vref = ret / 1000;
+ st->use_intref = false;
+
+ return 0;
+
+disable_regulator_vref:
+ regulator_disable(st->vref_reg);
+ st->vref_reg = NULL;
+ return ret;
+}
+
+static int ad5761_probe(struct spi_device *spi)
+{
+ struct iio_dev *iio_dev;
+ struct ad5761_state *st;
+ int ret;
+ const struct ad5761_chip_info *chip_info =
+ &ad5761_chip_infos[spi_get_device_id(spi)->driver_data];
+ enum ad5761_voltage_range voltage_range = AD5761_VOLTAGE_RANGE_0V_5V;
+ struct ad5761_platform_data *pdata = dev_get_platdata(&spi->dev);
+
+ iio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
+ if (!iio_dev)
+ return -ENOMEM;
+
+ st = iio_priv(iio_dev);
+
+ st->spi = spi;
+ spi_set_drvdata(spi, iio_dev);
+
+ ret = ad5761_get_vref(st, chip_info);
+ if (ret)
+ return ret;
+
+ if (pdata)
+ voltage_range = pdata->voltage_range;
+
+ ret = ad5761_spi_set_range(st, voltage_range);
+ if (ret)
+ goto disable_regulator_err;
+
+ iio_dev->dev.parent = &spi->dev;
+ iio_dev->info = &ad5761_info;
+ iio_dev->modes = INDIO_DIRECT_MODE;
+ iio_dev->channels = &chip_info->channel;
+ iio_dev->num_channels = 1;
+ iio_dev->name = spi_get_device_id(st->spi)->name;
+ ret = iio_device_register(iio_dev);
+ if (ret)
+ goto disable_regulator_err;
+
+ return 0;
+
+disable_regulator_err:
+ if (!IS_ERR_OR_NULL(st->vref_reg))
+ regulator_disable(st->vref_reg);
+
+ return ret;
+}
+
+static int ad5761_remove(struct spi_device *spi)
+{
+ struct iio_dev *iio_dev = spi_get_drvdata(spi);
+ struct ad5761_state *st = iio_priv(iio_dev);
+
+ iio_device_unregister(iio_dev);
+
+ if (!IS_ERR_OR_NULL(st->vref_reg))
+ regulator_disable(st->vref_reg);
+
+ return 0;
+}
+
+static const struct spi_device_id ad5761_id[] = {
+ {"ad5721", ID_AD5721},
+ {"ad5721r", ID_AD5721R},
+ {"ad5761", ID_AD5761},
+ {"ad5761r", ID_AD5761R},
+ {}
+};
+MODULE_DEVICE_TABLE(spi, ad5761_id);
+
+static struct spi_driver ad5761_driver = {
+ .driver = {
+ .name = "ad5761",
+ },
+ .probe = ad5761_probe,
+ .remove = ad5761_remove,
+ .id_table = ad5761_id,
+};
+module_spi_driver(ad5761_driver);
+
+MODULE_AUTHOR("Ricardo Ribalda <ricardo.ribalda@gmail.com>");
+MODULE_DESCRIPTION("Analog Devices AD5721, AD5721R, AD5761, AD5761R driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/dac/mcp4725.c b/drivers/iio/dac/mcp4725.c
index b4dde8315210..cca935c06f2b 100644
--- a/drivers/iio/dac/mcp4725.c
+++ b/drivers/iio/dac/mcp4725.c
@@ -1,5 +1,5 @@
/*
- * mcp4725.c - Support for Microchip MCP4725
+ * mcp4725.c - Support for Microchip MCP4725/6
*
* Copyright (C) 2012 Peter Meerwald <pmeerw@pmeerw.net>
*
@@ -134,6 +134,12 @@ static const char * const mcp4725_powerdown_modes[] = {
"500kohm_to_gnd"
};
+static const char * const mcp4726_powerdown_modes[] = {
+ "1kohm_to_gnd",
+ "125kohm_to_gnd",
+ "640kohm_to_gnd"
+};
+
static int mcp4725_get_powerdown_mode(struct iio_dev *indio_dev,
const struct iio_chan_spec *chan)
{
@@ -182,11 +188,24 @@ static ssize_t mcp4725_write_powerdown(struct iio_dev *indio_dev,
return len;
}
-static const struct iio_enum mcp4725_powerdown_mode_enum = {
- .items = mcp4725_powerdown_modes,
- .num_items = ARRAY_SIZE(mcp4725_powerdown_modes),
- .get = mcp4725_get_powerdown_mode,
- .set = mcp4725_set_powerdown_mode,
+enum {
+ MCP4725,
+ MCP4726,
+};
+
+static const struct iio_enum mcp472x_powerdown_mode_enum[] = {
+ [MCP4725] = {
+ .items = mcp4725_powerdown_modes,
+ .num_items = ARRAY_SIZE(mcp4725_powerdown_modes),
+ .get = mcp4725_get_powerdown_mode,
+ .set = mcp4725_set_powerdown_mode,
+ },
+ [MCP4726] = {
+ .items = mcp4726_powerdown_modes,
+ .num_items = ARRAY_SIZE(mcp4726_powerdown_modes),
+ .get = mcp4725_get_powerdown_mode,
+ .set = mcp4725_set_powerdown_mode,
+ },
};
static const struct iio_chan_spec_ext_info mcp4725_ext_info[] = {
@@ -196,19 +215,46 @@ static const struct iio_chan_spec_ext_info mcp4725_ext_info[] = {
.write = mcp4725_write_powerdown,
.shared = IIO_SEPARATE,
},
- IIO_ENUM("powerdown_mode", IIO_SEPARATE, &mcp4725_powerdown_mode_enum),
- IIO_ENUM_AVAILABLE("powerdown_mode", &mcp4725_powerdown_mode_enum),
+ IIO_ENUM("powerdown_mode", IIO_SEPARATE,
+ &mcp472x_powerdown_mode_enum[MCP4725]),
+ IIO_ENUM_AVAILABLE("powerdown_mode",
+ &mcp472x_powerdown_mode_enum[MCP4725]),
+ { },
+};
+
+static const struct iio_chan_spec_ext_info mcp4726_ext_info[] = {
+ {
+ .name = "powerdown",
+ .read = mcp4725_read_powerdown,
+ .write = mcp4725_write_powerdown,
+ .shared = IIO_SEPARATE,
+ },
+ IIO_ENUM("powerdown_mode", IIO_SEPARATE,
+ &mcp472x_powerdown_mode_enum[MCP4726]),
+ IIO_ENUM_AVAILABLE("powerdown_mode",
+ &mcp472x_powerdown_mode_enum[MCP4726]),
{ },
};
-static const struct iio_chan_spec mcp4725_channel = {
- .type = IIO_VOLTAGE,
- .indexed = 1,
- .output = 1,
- .channel = 0,
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
- .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
- .ext_info = mcp4725_ext_info,
+static const struct iio_chan_spec mcp472x_channel[] = {
+ [MCP4725] = {
+ .type = IIO_VOLTAGE,
+ .indexed = 1,
+ .output = 1,
+ .channel = 0,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
+ .ext_info = mcp4725_ext_info,
+ },
+ [MCP4726] = {
+ .type = IIO_VOLTAGE,
+ .indexed = 1,
+ .output = 1,
+ .channel = 0,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
+ .ext_info = mcp4726_ext_info,
+ },
};
static int mcp4725_set_value(struct iio_dev *indio_dev, int val)
@@ -302,7 +348,7 @@ static int mcp4725_probe(struct i2c_client *client,
indio_dev->dev.parent = &client->dev;
indio_dev->name = id->name;
indio_dev->info = &mcp4725_info;
- indio_dev->channels = &mcp4725_channel;
+ indio_dev->channels = &mcp472x_channel[id->driver_data];
indio_dev->num_channels = 1;
indio_dev->modes = INDIO_DIRECT_MODE;
@@ -316,7 +362,7 @@ static int mcp4725_probe(struct i2c_client *client,
}
pd = (inbuf[0] >> 1) & 0x3;
data->powerdown = pd > 0 ? true : false;
- data->powerdown_mode = pd ? pd-1 : 2; /* 500kohm_to_gnd */
+ data->powerdown_mode = pd ? pd - 1 : 2; /* largest register to gnd */
data->dac_value = (inbuf[1] << 4) | (inbuf[2] >> 4);
return iio_device_register(indio_dev);
@@ -329,7 +375,8 @@ static int mcp4725_remove(struct i2c_client *client)
}
static const struct i2c_device_id mcp4725_id[] = {
- { "mcp4725", 0 },
+ { "mcp4725", MCP4725 },
+ { "mcp4726", MCP4726 },
{ }
};
MODULE_DEVICE_TABLE(i2c, mcp4725_id);
@@ -346,5 +393,5 @@ static struct i2c_driver mcp4725_driver = {
module_i2c_driver(mcp4725_driver);
MODULE_AUTHOR("Peter Meerwald <pmeerw@pmeerw.net>");
-MODULE_DESCRIPTION("MCP4725 12-bit DAC");
+MODULE_DESCRIPTION("MCP4725/6 12-bit DAC");
MODULE_LICENSE("GPL");
diff --git a/drivers/iio/dac/stx104.c b/drivers/iio/dac/stx104.c
new file mode 100644
index 000000000000..174f4b75ceed
--- /dev/null
+++ b/drivers/iio/dac/stx104.c
@@ -0,0 +1,152 @@
+/*
+ * DAC driver for the Apex Embedded Systems STX104
+ * Copyright (C) 2016 William Breathitt Gray
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+#include <linux/bitops.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/types.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/isa.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+
+#define STX104_NUM_CHAN 2
+
+#define STX104_CHAN(chan) { \
+ .type = IIO_VOLTAGE, \
+ .channel = chan, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .indexed = 1, \
+ .output = 1 \
+}
+
+#define STX104_EXTENT 16
+/**
+ * The highest base address possible for an ISA device is 0x3FF; this results in
+ * 1024 possible base addresses. Dividing the number of possible base addresses
+ * by the address extent taken by each device results in the maximum number of
+ * devices on a system.
+ */
+#define MAX_NUM_STX104 (1024 / STX104_EXTENT)
+
+static unsigned base[MAX_NUM_STX104];
+static unsigned num_stx104;
+module_param_array(base, uint, &num_stx104, 0);
+MODULE_PARM_DESC(base, "Apex Embedded Systems STX104 base addresses");
+
+/**
+ * struct stx104_iio - IIO device private data structure
+ * @chan_out_states: channels' output states
+ * @base: base port address of the IIO device
+ */
+struct stx104_iio {
+ unsigned chan_out_states[STX104_NUM_CHAN];
+ unsigned base;
+};
+
+static int stx104_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int *val, int *val2, long mask)
+{
+ struct stx104_iio *const priv = iio_priv(indio_dev);
+
+ if (mask != IIO_CHAN_INFO_RAW)
+ return -EINVAL;
+
+ *val = priv->chan_out_states[chan->channel];
+
+ return IIO_VAL_INT;
+}
+
+static int stx104_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int val, int val2, long mask)
+{
+ struct stx104_iio *const priv = iio_priv(indio_dev);
+ const unsigned chan_addr_offset = 2 * chan->channel;
+
+ if (mask != IIO_CHAN_INFO_RAW)
+ return -EINVAL;
+
+ priv->chan_out_states[chan->channel] = val;
+ outw(val, priv->base + 4 + chan_addr_offset);
+
+ return 0;
+}
+
+static const struct iio_info stx104_info = {
+ .driver_module = THIS_MODULE,
+ .read_raw = stx104_read_raw,
+ .write_raw = stx104_write_raw
+};
+
+static const struct iio_chan_spec stx104_channels[STX104_NUM_CHAN] = {
+ STX104_CHAN(0),
+ STX104_CHAN(1)
+};
+
+static int stx104_probe(struct device *dev, unsigned int id)
+{
+ struct iio_dev *indio_dev;
+ struct stx104_iio *priv;
+
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*priv));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ if (!devm_request_region(dev, base[id], STX104_EXTENT,
+ dev_name(dev))) {
+ dev_err(dev, "Unable to lock port addresses (0x%X-0x%X)\n",
+ base[id], base[id] + STX104_EXTENT);
+ return -EBUSY;
+ }
+
+ indio_dev->info = &stx104_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->channels = stx104_channels;
+ indio_dev->num_channels = STX104_NUM_CHAN;
+ indio_dev->name = dev_name(dev);
+
+ priv = iio_priv(indio_dev);
+ priv->base = base[id];
+
+ /* initialize DAC output to 0V */
+ outw(0, base[id] + 4);
+ outw(0, base[id] + 6);
+
+ return devm_iio_device_register(dev, indio_dev);
+}
+
+static struct isa_driver stx104_driver = {
+ .probe = stx104_probe,
+ .driver = {
+ .name = "stx104"
+ }
+};
+
+static void __exit stx104_exit(void)
+{
+ isa_unregister_driver(&stx104_driver);
+}
+
+static int __init stx104_init(void)
+{
+ return isa_register_driver(&stx104_driver, num_stx104);
+}
+
+module_init(stx104_init);
+module_exit(stx104_exit);
+
+MODULE_AUTHOR("William Breathitt Gray <vilhelm.gray@gmail.com>");
+MODULE_DESCRIPTION("Apex Embedded Systems STX104 DAC driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/dac/vf610_dac.c b/drivers/iio/dac/vf610_dac.c
new file mode 100644
index 000000000000..c4ec7779b394
--- /dev/null
+++ b/drivers/iio/dac/vf610_dac.c
@@ -0,0 +1,298 @@
+/*
+ * Freescale Vybrid vf610 DAC driver
+ *
+ * Copyright 2016 Toradex AG
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/slab.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+
+#define VF610_DACx_STATCTRL 0x20
+
+#define VF610_DAC_DACEN BIT(15)
+#define VF610_DAC_DACRFS BIT(14)
+#define VF610_DAC_LPEN BIT(11)
+
+#define VF610_DAC_DAT0(x) ((x) & 0xFFF)
+
+enum vf610_conversion_mode_sel {
+ VF610_DAC_CONV_HIGH_POWER,
+ VF610_DAC_CONV_LOW_POWER,
+};
+
+struct vf610_dac {
+ struct clk *clk;
+ struct device *dev;
+ enum vf610_conversion_mode_sel conv_mode;
+ void __iomem *regs;
+};
+
+static void vf610_dac_init(struct vf610_dac *info)
+{
+ int val;
+
+ info->conv_mode = VF610_DAC_CONV_LOW_POWER;
+ val = VF610_DAC_DACEN | VF610_DAC_DACRFS |
+ VF610_DAC_LPEN;
+ writel(val, info->regs + VF610_DACx_STATCTRL);
+}
+
+static void vf610_dac_exit(struct vf610_dac *info)
+{
+ int val;
+
+ val = readl(info->regs + VF610_DACx_STATCTRL);
+ val &= ~VF610_DAC_DACEN;
+ writel(val, info->regs + VF610_DACx_STATCTRL);
+}
+
+static int vf610_set_conversion_mode(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ unsigned int mode)
+{
+ struct vf610_dac *info = iio_priv(indio_dev);
+ int val;
+
+ mutex_lock(&indio_dev->mlock);
+ info->conv_mode = mode;
+ val = readl(info->regs + VF610_DACx_STATCTRL);
+ if (mode)
+ val |= VF610_DAC_LPEN;
+ else
+ val &= ~VF610_DAC_LPEN;
+ writel(val, info->regs + VF610_DACx_STATCTRL);
+ mutex_unlock(&indio_dev->mlock);
+
+ return 0;
+}
+
+static int vf610_get_conversion_mode(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan)
+{
+ struct vf610_dac *info = iio_priv(indio_dev);
+
+ return info->conv_mode;
+}
+
+static const char * const vf610_conv_modes[] = { "high-power", "low-power" };
+
+static const struct iio_enum vf610_conversion_mode = {
+ .items = vf610_conv_modes,
+ .num_items = ARRAY_SIZE(vf610_conv_modes),
+ .get = vf610_get_conversion_mode,
+ .set = vf610_set_conversion_mode,
+};
+
+static const struct iio_chan_spec_ext_info vf610_ext_info[] = {
+ IIO_ENUM("conversion_mode", IIO_SHARED_BY_DIR,
+ &vf610_conversion_mode),
+ {},
+};
+
+#define VF610_DAC_CHAN(_chan_type) { \
+ .type = (_chan_type), \
+ .output = 1, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
+ .ext_info = vf610_ext_info, \
+}
+
+static const struct iio_chan_spec vf610_dac_iio_channels[] = {
+ VF610_DAC_CHAN(IIO_VOLTAGE),
+};
+
+static int vf610_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2,
+ long mask)
+{
+ struct vf610_dac *info = iio_priv(indio_dev);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ *val = VF610_DAC_DAT0(readl(info->regs));
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ /*
+ * DACRFS is always 1 for valid reference and typical
+ * reference voltage as per Vybrid datasheet is 3.3V
+ * from section 9.1.2.1 of Vybrid datasheet
+ */
+ *val = 3300 /* mV */;
+ *val2 = 12;
+ return IIO_VAL_FRACTIONAL_LOG2;
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static int vf610_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2,
+ long mask)
+{
+ struct vf610_dac *info = iio_priv(indio_dev);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ mutex_lock(&indio_dev->mlock);
+ writel(VF610_DAC_DAT0(val), info->regs);
+ mutex_unlock(&indio_dev->mlock);
+ return 0;
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct iio_info vf610_dac_iio_info = {
+ .driver_module = THIS_MODULE,
+ .read_raw = &vf610_read_raw,
+ .write_raw = &vf610_write_raw,
+};
+
+static const struct of_device_id vf610_dac_match[] = {
+ { .compatible = "fsl,vf610-dac", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, vf610_dac_match);
+
+static int vf610_dac_probe(struct platform_device *pdev)
+{
+ struct iio_dev *indio_dev;
+ struct vf610_dac *info;
+ struct resource *mem;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(&pdev->dev,
+ sizeof(struct vf610_dac));
+ if (!indio_dev) {
+ dev_err(&pdev->dev, "Failed allocating iio device\n");
+ return -ENOMEM;
+ }
+
+ info = iio_priv(indio_dev);
+ info->dev = &pdev->dev;
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ info->regs = devm_ioremap_resource(&pdev->dev, mem);
+ if (IS_ERR(info->regs))
+ return PTR_ERR(info->regs);
+
+ info->clk = devm_clk_get(&pdev->dev, "dac");
+ if (IS_ERR(info->clk)) {
+ dev_err(&pdev->dev, "Failed getting clock, err = %ld\n",
+ PTR_ERR(info->clk));
+ return PTR_ERR(info->clk);
+ }
+
+ platform_set_drvdata(pdev, indio_dev);
+
+ indio_dev->name = dev_name(&pdev->dev);
+ indio_dev->dev.parent = &pdev->dev;
+ indio_dev->dev.of_node = pdev->dev.of_node;
+ indio_dev->info = &vf610_dac_iio_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->channels = vf610_dac_iio_channels;
+ indio_dev->num_channels = ARRAY_SIZE(vf610_dac_iio_channels);
+
+ ret = clk_prepare_enable(info->clk);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "Could not prepare or enable the clock\n");
+ return ret;
+ }
+
+ vf610_dac_init(info);
+
+ ret = iio_device_register(indio_dev);
+ if (ret) {
+ dev_err(&pdev->dev, "Couldn't register the device\n");
+ goto error_iio_device_register;
+ }
+
+ return 0;
+
+error_iio_device_register:
+ clk_disable_unprepare(info->clk);
+
+ return ret;
+}
+
+static int vf610_dac_remove(struct platform_device *pdev)
+{
+ struct iio_dev *indio_dev = platform_get_drvdata(pdev);
+ struct vf610_dac *info = iio_priv(indio_dev);
+
+ iio_device_unregister(indio_dev);
+ vf610_dac_exit(info);
+ clk_disable_unprepare(info->clk);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int vf610_dac_suspend(struct device *dev)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct vf610_dac *info = iio_priv(indio_dev);
+
+ vf610_dac_exit(info);
+ clk_disable_unprepare(info->clk);
+
+ return 0;
+}
+
+static int vf610_dac_resume(struct device *dev)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct vf610_dac *info = iio_priv(indio_dev);
+ int ret;
+
+ ret = clk_prepare_enable(info->clk);
+ if (ret)
+ return ret;
+
+ vf610_dac_init(info);
+
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(vf610_dac_pm_ops, vf610_dac_suspend, vf610_dac_resume);
+
+static struct platform_driver vf610_dac_driver = {
+ .probe = vf610_dac_probe,
+ .remove = vf610_dac_remove,
+ .driver = {
+ .name = "vf610-dac",
+ .of_match_table = vf610_dac_match,
+ .pm = &vf610_dac_pm_ops,
+ },
+};
+module_platform_driver(vf610_dac_driver);
+
+MODULE_AUTHOR("Sanchayan Maity <sanchayan.maity@toradex.com>");
+MODULE_DESCRIPTION("Freescale VF610 DAC driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/gyro/bmg160_core.c b/drivers/iio/gyro/bmg160_core.c
index bbce3b09ac45..4dac567e75b4 100644
--- a/drivers/iio/gyro/bmg160_core.c
+++ b/drivers/iio/gyro/bmg160_core.c
@@ -452,7 +452,7 @@ static int bmg160_get_temp(struct bmg160_data *data, int *val)
static int bmg160_get_axis(struct bmg160_data *data, int axis, int *val)
{
int ret;
- unsigned int raw_val;
+ __le16 raw_val;
mutex_lock(&data->mutex);
ret = bmg160_set_power_state(data, true);
@@ -462,7 +462,7 @@ static int bmg160_get_axis(struct bmg160_data *data, int axis, int *val)
}
ret = regmap_bulk_read(data->regmap, BMG160_AXIS_TO_REG(axis), &raw_val,
- 2);
+ sizeof(raw_val));
if (ret < 0) {
dev_err(data->dev, "Error reading axis %d\n", axis);
bmg160_set_power_state(data, false);
@@ -470,7 +470,7 @@ static int bmg160_get_axis(struct bmg160_data *data, int axis, int *val)
return ret;
}
- *val = sign_extend32(raw_val, 15);
+ *val = sign_extend32(le16_to_cpu(raw_val), 15);
ret = bmg160_set_power_state(data, false);
mutex_unlock(&data->mutex);
if (ret < 0)
@@ -733,6 +733,7 @@ static const struct iio_event_spec bmg160_event = {
.sign = 's', \
.realbits = 16, \
.storagebits = 16, \
+ .endianness = IIO_LE, \
}, \
.event_spec = &bmg160_event, \
.num_event_specs = 1 \
@@ -780,7 +781,7 @@ static irqreturn_t bmg160_trigger_handler(int irq, void *p)
mutex_unlock(&data->mutex);
goto err;
}
- data->buffer[i++] = ret;
+ data->buffer[i++] = val;
}
mutex_unlock(&data->mutex);
diff --git a/drivers/iio/gyro/st_gyro_core.c b/drivers/iio/gyro/st_gyro_core.c
index 02eddcebeea3..110f95b6e52f 100644
--- a/drivers/iio/gyro/st_gyro_core.c
+++ b/drivers/iio/gyro/st_gyro_core.c
@@ -185,6 +185,11 @@ static const struct st_sensor_settings st_gyro_sensors_settings[] = {
.drdy_irq = {
.addr = ST_GYRO_1_DRDY_IRQ_ADDR,
.mask_int2 = ST_GYRO_1_DRDY_IRQ_INT2_MASK,
+ /*
+ * The sensor has IHL (active low) and open
+ * drain settings, but only for INT1 and not
+ * for the DRDY line on INT2.
+ */
},
.multi_read_bit = ST_GYRO_1_MULTIREAD_BIT,
.bootime = 2,
@@ -248,6 +253,11 @@ static const struct st_sensor_settings st_gyro_sensors_settings[] = {
.drdy_irq = {
.addr = ST_GYRO_2_DRDY_IRQ_ADDR,
.mask_int2 = ST_GYRO_2_DRDY_IRQ_INT2_MASK,
+ /*
+ * The sensor has IHL (active low) and open
+ * drain settings, but only for INT1 and not
+ * for the DRDY line on INT2.
+ */
},
.multi_read_bit = ST_GYRO_2_MULTIREAD_BIT,
.bootime = 2,
@@ -307,6 +317,11 @@ static const struct st_sensor_settings st_gyro_sensors_settings[] = {
.drdy_irq = {
.addr = ST_GYRO_3_DRDY_IRQ_ADDR,
.mask_int2 = ST_GYRO_3_DRDY_IRQ_INT2_MASK,
+ /*
+ * The sensor has IHL (active low) and open
+ * drain settings, but only for INT1 and not
+ * for the DRDY line on INT2.
+ */
},
.multi_read_bit = ST_GYRO_3_MULTIREAD_BIT,
.bootime = 2,
diff --git a/drivers/iio/health/Kconfig b/drivers/iio/health/Kconfig
index a647679da805..c5f004a8e447 100644
--- a/drivers/iio/health/Kconfig
+++ b/drivers/iio/health/Kconfig
@@ -3,7 +3,35 @@
#
# When adding new entries keep the list in alphabetical order
-menu "Health sensors"
+menu "Health Sensors"
+
+menu "Heart Rate Monitors"
+
+config AFE4403
+ tristate "TI AFE4403 Heart Rate Monitor"
+ depends on SPI_MASTER
+ select REGMAP_SPI
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
+ help
+ Say yes to choose the Texas Instruments AFE4403
+ heart rate monitor and low-cost pulse oximeter.
+
+ To compile this driver as a module, choose M here: the
+ module will be called afe4403.
+
+config AFE4404
+ tristate "TI AFE4404 heart rate and pulse oximeter sensor"
+ depends on I2C
+ select REGMAP_I2C
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
+ help
+ Say yes to choose the Texas Instruments AFE4404
+ heart rate monitor and low-cost pulse oximeter.
+
+ To compile this driver as a module, choose M here: the
+ module will be called afe4404.
config MAX30100
tristate "MAX30100 heart rate and pulse oximeter sensor"
@@ -19,3 +47,5 @@ config MAX30100
module will be called max30100.
endmenu
+
+endmenu
diff --git a/drivers/iio/health/Makefile b/drivers/iio/health/Makefile
index 7c475d7faad8..9955a2ae8df1 100644
--- a/drivers/iio/health/Makefile
+++ b/drivers/iio/health/Makefile
@@ -4,4 +4,6 @@
# When adding new entries keep the list in alphabetical order
+obj-$(CONFIG_AFE4403) += afe4403.o
+obj-$(CONFIG_AFE4404) += afe4404.o
obj-$(CONFIG_MAX30100) += max30100.o
diff --git a/drivers/iio/health/afe4403.c b/drivers/iio/health/afe4403.c
new file mode 100644
index 000000000000..88e43f87b926
--- /dev/null
+++ b/drivers/iio/health/afe4403.c
@@ -0,0 +1,708 @@
+/*
+ * AFE4403 Heart Rate Monitors and Low-Cost Pulse Oximeters
+ *
+ * Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com/
+ * Andrew F. Davis <afd@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/spi/spi.h>
+#include <linux/sysfs.h>
+#include <linux/regulator/consumer.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/trigger.h>
+#include <linux/iio/triggered_buffer.h>
+#include <linux/iio/trigger_consumer.h>
+
+#include "afe440x.h"
+
+#define AFE4403_DRIVER_NAME "afe4403"
+
+/* AFE4403 Registers */
+#define AFE4403_TIAGAIN 0x20
+#define AFE4403_TIA_AMB_GAIN 0x21
+
+/* AFE4403 GAIN register fields */
+#define AFE4403_TIAGAIN_RES_MASK GENMASK(2, 0)
+#define AFE4403_TIAGAIN_RES_SHIFT 0
+#define AFE4403_TIAGAIN_CAP_MASK GENMASK(7, 3)
+#define AFE4403_TIAGAIN_CAP_SHIFT 3
+
+/* AFE4403 LEDCNTRL register fields */
+#define AFE440X_LEDCNTRL_LED1_MASK GENMASK(15, 8)
+#define AFE440X_LEDCNTRL_LED1_SHIFT 8
+#define AFE440X_LEDCNTRL_LED2_MASK GENMASK(7, 0)
+#define AFE440X_LEDCNTRL_LED2_SHIFT 0
+#define AFE440X_LEDCNTRL_LED_RANGE_MASK GENMASK(17, 16)
+#define AFE440X_LEDCNTRL_LED_RANGE_SHIFT 16
+
+/* AFE4403 CONTROL2 register fields */
+#define AFE440X_CONTROL2_PWR_DWN_TX BIT(2)
+#define AFE440X_CONTROL2_EN_SLOW_DIAG BIT(8)
+#define AFE440X_CONTROL2_DIAG_OUT_TRI BIT(10)
+#define AFE440X_CONTROL2_TX_BRDG_MOD BIT(11)
+#define AFE440X_CONTROL2_TX_REF_MASK GENMASK(18, 17)
+#define AFE440X_CONTROL2_TX_REF_SHIFT 17
+
+/* AFE4404 NULL fields */
+#define NULL_MASK 0
+#define NULL_SHIFT 0
+
+/* AFE4403 LEDCNTRL values */
+#define AFE440X_LEDCNTRL_RANGE_TX_HALF 0x1
+#define AFE440X_LEDCNTRL_RANGE_TX_FULL 0x2
+#define AFE440X_LEDCNTRL_RANGE_TX_OFF 0x3
+
+/* AFE4403 CONTROL2 values */
+#define AFE440X_CONTROL2_TX_REF_025 0x0
+#define AFE440X_CONTROL2_TX_REF_050 0x1
+#define AFE440X_CONTROL2_TX_REF_100 0x2
+#define AFE440X_CONTROL2_TX_REF_075 0x3
+
+/* AFE4403 CONTROL3 values */
+#define AFE440X_CONTROL3_CLK_DIV_2 0x0
+#define AFE440X_CONTROL3_CLK_DIV_4 0x2
+#define AFE440X_CONTROL3_CLK_DIV_6 0x3
+#define AFE440X_CONTROL3_CLK_DIV_8 0x4
+#define AFE440X_CONTROL3_CLK_DIV_12 0x5
+#define AFE440X_CONTROL3_CLK_DIV_1 0x7
+
+/* AFE4403 TIAGAIN_CAP values */
+#define AFE4403_TIAGAIN_CAP_5_P 0x0
+#define AFE4403_TIAGAIN_CAP_10_P 0x1
+#define AFE4403_TIAGAIN_CAP_20_P 0x2
+#define AFE4403_TIAGAIN_CAP_30_P 0x3
+#define AFE4403_TIAGAIN_CAP_55_P 0x8
+#define AFE4403_TIAGAIN_CAP_155_P 0x10
+
+/* AFE4403 TIAGAIN_RES values */
+#define AFE4403_TIAGAIN_RES_500_K 0x0
+#define AFE4403_TIAGAIN_RES_250_K 0x1
+#define AFE4403_TIAGAIN_RES_100_K 0x2
+#define AFE4403_TIAGAIN_RES_50_K 0x3
+#define AFE4403_TIAGAIN_RES_25_K 0x4
+#define AFE4403_TIAGAIN_RES_10_K 0x5
+#define AFE4403_TIAGAIN_RES_1_M 0x6
+#define AFE4403_TIAGAIN_RES_NONE 0x7
+
+/**
+ * struct afe4403_data
+ * @dev - Device structure
+ * @spi - SPI device handle
+ * @regmap - Register map of the device
+ * @regulator - Pointer to the regulator for the IC
+ * @trig - IIO trigger for this device
+ * @irq - ADC_RDY line interrupt number
+ */
+struct afe4403_data {
+ struct device *dev;
+ struct spi_device *spi;
+ struct regmap *regmap;
+ struct regulator *regulator;
+ struct iio_trigger *trig;
+ int irq;
+};
+
+enum afe4403_chan_id {
+ LED1,
+ ALED1,
+ LED2,
+ ALED2,
+ LED1_ALED1,
+ LED2_ALED2,
+ ILED1,
+ ILED2,
+};
+
+static const struct afe440x_reg_info afe4403_reg_info[] = {
+ [LED1] = AFE440X_REG_INFO(AFE440X_LED1VAL, 0, NULL),
+ [ALED1] = AFE440X_REG_INFO(AFE440X_ALED1VAL, 0, NULL),
+ [LED2] = AFE440X_REG_INFO(AFE440X_LED2VAL, 0, NULL),
+ [ALED2] = AFE440X_REG_INFO(AFE440X_ALED2VAL, 0, NULL),
+ [LED1_ALED1] = AFE440X_REG_INFO(AFE440X_LED1_ALED1VAL, 0, NULL),
+ [LED2_ALED2] = AFE440X_REG_INFO(AFE440X_LED2_ALED2VAL, 0, NULL),
+ [ILED1] = AFE440X_REG_INFO(AFE440X_LEDCNTRL, 0, AFE440X_LEDCNTRL_LED1),
+ [ILED2] = AFE440X_REG_INFO(AFE440X_LEDCNTRL, 0, AFE440X_LEDCNTRL_LED2),
+};
+
+static const struct iio_chan_spec afe4403_channels[] = {
+ /* ADC values */
+ AFE440X_INTENSITY_CHAN(LED1, "led1", 0),
+ AFE440X_INTENSITY_CHAN(ALED1, "led1_ambient", 0),
+ AFE440X_INTENSITY_CHAN(LED2, "led2", 0),
+ AFE440X_INTENSITY_CHAN(ALED2, "led2_ambient", 0),
+ AFE440X_INTENSITY_CHAN(LED1_ALED1, "led1-led1_ambient", 0),
+ AFE440X_INTENSITY_CHAN(LED2_ALED2, "led2-led2_ambient", 0),
+ /* LED current */
+ AFE440X_CURRENT_CHAN(ILED1, "led1"),
+ AFE440X_CURRENT_CHAN(ILED2, "led2"),
+};
+
+static const struct afe440x_val_table afe4403_res_table[] = {
+ { 500000 }, { 250000 }, { 100000 }, { 50000 },
+ { 25000 }, { 10000 }, { 1000000 }, { 0 },
+};
+AFE440X_TABLE_ATTR(tia_resistance_available, afe4403_res_table);
+
+static const struct afe440x_val_table afe4403_cap_table[] = {
+ { 0, 5000 }, { 0, 10000 }, { 0, 20000 }, { 0, 25000 },
+ { 0, 30000 }, { 0, 35000 }, { 0, 45000 }, { 0, 50000 },
+ { 0, 55000 }, { 0, 60000 }, { 0, 70000 }, { 0, 75000 },
+ { 0, 80000 }, { 0, 85000 }, { 0, 95000 }, { 0, 100000 },
+ { 0, 155000 }, { 0, 160000 }, { 0, 170000 }, { 0, 175000 },
+ { 0, 180000 }, { 0, 185000 }, { 0, 195000 }, { 0, 200000 },
+ { 0, 205000 }, { 0, 210000 }, { 0, 220000 }, { 0, 225000 },
+ { 0, 230000 }, { 0, 235000 }, { 0, 245000 }, { 0, 250000 },
+};
+AFE440X_TABLE_ATTR(tia_capacitance_available, afe4403_cap_table);
+
+static ssize_t afe440x_show_register(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct afe4403_data *afe = iio_priv(indio_dev);
+ struct afe440x_attr *afe440x_attr = to_afe440x_attr(attr);
+ unsigned int reg_val, type;
+ int vals[2];
+ int ret, val_len;
+
+ ret = regmap_read(afe->regmap, afe440x_attr->reg, &reg_val);
+ if (ret)
+ return ret;
+
+ reg_val &= afe440x_attr->mask;
+ reg_val >>= afe440x_attr->shift;
+
+ switch (afe440x_attr->type) {
+ case SIMPLE:
+ type = IIO_VAL_INT;
+ val_len = 1;
+ vals[0] = reg_val;
+ break;
+ case RESISTANCE:
+ case CAPACITANCE:
+ type = IIO_VAL_INT_PLUS_MICRO;
+ val_len = 2;
+ if (reg_val < afe440x_attr->table_size) {
+ vals[0] = afe440x_attr->val_table[reg_val].integer;
+ vals[1] = afe440x_attr->val_table[reg_val].fract;
+ break;
+ }
+ return -EINVAL;
+ default:
+ return -EINVAL;
+ }
+
+ return iio_format_value(buf, type, val_len, vals);
+}
+
+static ssize_t afe440x_store_register(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct afe4403_data *afe = iio_priv(indio_dev);
+ struct afe440x_attr *afe440x_attr = to_afe440x_attr(attr);
+ int val, integer, fract, ret;
+
+ ret = iio_str_to_fixpoint(buf, 100000, &integer, &fract);
+ if (ret)
+ return ret;
+
+ switch (afe440x_attr->type) {
+ case SIMPLE:
+ val = integer;
+ break;
+ case RESISTANCE:
+ case CAPACITANCE:
+ for (val = 0; val < afe440x_attr->table_size; val++)
+ if (afe440x_attr->val_table[val].integer == integer &&
+ afe440x_attr->val_table[val].fract == fract)
+ break;
+ if (val == afe440x_attr->table_size)
+ return -EINVAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = regmap_update_bits(afe->regmap, afe440x_attr->reg,
+ afe440x_attr->mask,
+ (val << afe440x_attr->shift));
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+static AFE440X_ATTR(tia_separate_en, AFE4403_TIAGAIN, AFE440X_TIAGAIN_ENSEPGAIN, SIMPLE, NULL, 0);
+
+static AFE440X_ATTR(tia_resistance1, AFE4403_TIAGAIN, AFE4403_TIAGAIN_RES, RESISTANCE, afe4403_res_table, ARRAY_SIZE(afe4403_res_table));
+static AFE440X_ATTR(tia_capacitance1, AFE4403_TIAGAIN, AFE4403_TIAGAIN_CAP, CAPACITANCE, afe4403_cap_table, ARRAY_SIZE(afe4403_cap_table));
+
+static AFE440X_ATTR(tia_resistance2, AFE4403_TIA_AMB_GAIN, AFE4403_TIAGAIN_RES, RESISTANCE, afe4403_res_table, ARRAY_SIZE(afe4403_res_table));
+static AFE440X_ATTR(tia_capacitance2, AFE4403_TIA_AMB_GAIN, AFE4403_TIAGAIN_RES, CAPACITANCE, afe4403_cap_table, ARRAY_SIZE(afe4403_cap_table));
+
+static struct attribute *afe440x_attributes[] = {
+ &afe440x_attr_tia_separate_en.dev_attr.attr,
+ &afe440x_attr_tia_resistance1.dev_attr.attr,
+ &afe440x_attr_tia_capacitance1.dev_attr.attr,
+ &afe440x_attr_tia_resistance2.dev_attr.attr,
+ &afe440x_attr_tia_capacitance2.dev_attr.attr,
+ &dev_attr_tia_resistance_available.attr,
+ &dev_attr_tia_capacitance_available.attr,
+ NULL
+};
+
+static const struct attribute_group afe440x_attribute_group = {
+ .attrs = afe440x_attributes
+};
+
+static int afe4403_read(struct afe4403_data *afe, unsigned int reg, u32 *val)
+{
+ u8 tx[4] = {AFE440X_CONTROL0, 0x0, 0x0, AFE440X_CONTROL0_READ};
+ u8 rx[3];
+ int ret;
+
+ /* Enable reading from the device */
+ ret = spi_write_then_read(afe->spi, tx, 4, NULL, 0);
+ if (ret)
+ return ret;
+
+ ret = spi_write_then_read(afe->spi, &reg, 1, rx, 3);
+ if (ret)
+ return ret;
+
+ *val = (rx[0] << 16) |
+ (rx[1] << 8) |
+ (rx[2]);
+
+ /* Disable reading from the device */
+ tx[3] = AFE440X_CONTROL0_WRITE;
+ ret = spi_write_then_read(afe->spi, tx, 4, NULL, 0);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int afe4403_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct afe4403_data *afe = iio_priv(indio_dev);
+ const struct afe440x_reg_info reg_info = afe4403_reg_info[chan->address];
+ int ret;
+
+ switch (chan->type) {
+ case IIO_INTENSITY:
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ ret = afe4403_read(afe, reg_info.reg, val);
+ if (ret)
+ return ret;
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_OFFSET:
+ ret = regmap_read(afe->regmap, reg_info.offreg,
+ val);
+ if (ret)
+ return ret;
+ *val &= reg_info.mask;
+ *val >>= reg_info.shift;
+ return IIO_VAL_INT;
+ }
+ break;
+ case IIO_CURRENT:
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ ret = regmap_read(afe->regmap, reg_info.reg, val);
+ if (ret)
+ return ret;
+ *val &= reg_info.mask;
+ *val >>= reg_info.shift;
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ *val = 0;
+ *val2 = 800000;
+ return IIO_VAL_INT_PLUS_MICRO;
+ }
+ break;
+ default:
+ break;
+ }
+
+ return -EINVAL;
+}
+
+static int afe4403_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ struct afe4403_data *afe = iio_priv(indio_dev);
+ const struct afe440x_reg_info reg_info = afe4403_reg_info[chan->address];
+
+ switch (chan->type) {
+ case IIO_INTENSITY:
+ switch (mask) {
+ case IIO_CHAN_INFO_OFFSET:
+ return regmap_update_bits(afe->regmap,
+ reg_info.offreg,
+ reg_info.mask,
+ (val << reg_info.shift));
+ }
+ break;
+ case IIO_CURRENT:
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ return regmap_update_bits(afe->regmap,
+ reg_info.reg,
+ reg_info.mask,
+ (val << reg_info.shift));
+ }
+ break;
+ default:
+ break;
+ }
+
+ return -EINVAL;
+}
+
+static const struct iio_info afe4403_iio_info = {
+ .attrs = &afe440x_attribute_group,
+ .read_raw = afe4403_read_raw,
+ .write_raw = afe4403_write_raw,
+ .driver_module = THIS_MODULE,
+};
+
+static irqreturn_t afe4403_trigger_handler(int irq, void *private)
+{
+ struct iio_poll_func *pf = private;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct afe4403_data *afe = iio_priv(indio_dev);
+ int ret, bit, i = 0;
+ s32 buffer[8];
+ u8 tx[4] = {AFE440X_CONTROL0, 0x0, 0x0, AFE440X_CONTROL0_READ};
+ u8 rx[3];
+
+ /* Enable reading from the device */
+ ret = spi_write_then_read(afe->spi, tx, 4, NULL, 0);
+ if (ret)
+ goto err;
+
+ for_each_set_bit(bit, indio_dev->active_scan_mask,
+ indio_dev->masklength) {
+ ret = spi_write_then_read(afe->spi,
+ &afe4403_reg_info[bit].reg, 1,
+ rx, 3);
+ if (ret)
+ goto err;
+
+ buffer[i++] = (rx[0] << 16) |
+ (rx[1] << 8) |
+ (rx[2]);
+ }
+
+ /* Disable reading from the device */
+ tx[3] = AFE440X_CONTROL0_WRITE;
+ ret = spi_write_then_read(afe->spi, tx, 4, NULL, 0);
+ if (ret)
+ goto err;
+
+ iio_push_to_buffers_with_timestamp(indio_dev, buffer, pf->timestamp);
+err:
+ iio_trigger_notify_done(indio_dev->trig);
+
+ return IRQ_HANDLED;
+}
+
+static const struct iio_trigger_ops afe4403_trigger_ops = {
+ .owner = THIS_MODULE,
+};
+
+#define AFE4403_TIMING_PAIRS \
+ { AFE440X_LED2STC, 0x000050 }, \
+ { AFE440X_LED2ENDC, 0x0003e7 }, \
+ { AFE440X_LED1LEDSTC, 0x0007d0 }, \
+ { AFE440X_LED1LEDENDC, 0x000bb7 }, \
+ { AFE440X_ALED2STC, 0x000438 }, \
+ { AFE440X_ALED2ENDC, 0x0007cf }, \
+ { AFE440X_LED1STC, 0x000820 }, \
+ { AFE440X_LED1ENDC, 0x000bb7 }, \
+ { AFE440X_LED2LEDSTC, 0x000000 }, \
+ { AFE440X_LED2LEDENDC, 0x0003e7 }, \
+ { AFE440X_ALED1STC, 0x000c08 }, \
+ { AFE440X_ALED1ENDC, 0x000f9f }, \
+ { AFE440X_LED2CONVST, 0x0003ef }, \
+ { AFE440X_LED2CONVEND, 0x0007cf }, \
+ { AFE440X_ALED2CONVST, 0x0007d7 }, \
+ { AFE440X_ALED2CONVEND, 0x000bb7 }, \
+ { AFE440X_LED1CONVST, 0x000bbf }, \
+ { AFE440X_LED1CONVEND, 0x009c3f }, \
+ { AFE440X_ALED1CONVST, 0x000fa7 }, \
+ { AFE440X_ALED1CONVEND, 0x001387 }, \
+ { AFE440X_ADCRSTSTCT0, 0x0003e8 }, \
+ { AFE440X_ADCRSTENDCT0, 0x0003eb }, \
+ { AFE440X_ADCRSTSTCT1, 0x0007d0 }, \
+ { AFE440X_ADCRSTENDCT1, 0x0007d3 }, \
+ { AFE440X_ADCRSTSTCT2, 0x000bb8 }, \
+ { AFE440X_ADCRSTENDCT2, 0x000bbb }, \
+ { AFE440X_ADCRSTSTCT3, 0x000fa0 }, \
+ { AFE440X_ADCRSTENDCT3, 0x000fa3 }, \
+ { AFE440X_PRPCOUNT, 0x009c3f }, \
+ { AFE440X_PDNCYCLESTC, 0x001518 }, \
+ { AFE440X_PDNCYCLEENDC, 0x00991f }
+
+static const struct reg_sequence afe4403_reg_sequences[] = {
+ AFE4403_TIMING_PAIRS,
+ { AFE440X_CONTROL1, AFE440X_CONTROL1_TIMEREN | 0x000007},
+ { AFE4403_TIA_AMB_GAIN, AFE4403_TIAGAIN_RES_1_M },
+ { AFE440X_LEDCNTRL, (0x14 << AFE440X_LEDCNTRL_LED1_SHIFT) |
+ (0x14 << AFE440X_LEDCNTRL_LED2_SHIFT) },
+ { AFE440X_CONTROL2, AFE440X_CONTROL2_TX_REF_050 <<
+ AFE440X_CONTROL2_TX_REF_SHIFT },
+};
+
+static const struct regmap_range afe4403_yes_ranges[] = {
+ regmap_reg_range(AFE440X_LED2VAL, AFE440X_LED1_ALED1VAL),
+};
+
+static const struct regmap_access_table afe4403_volatile_table = {
+ .yes_ranges = afe4403_yes_ranges,
+ .n_yes_ranges = ARRAY_SIZE(afe4403_yes_ranges),
+};
+
+static const struct regmap_config afe4403_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 24,
+
+ .max_register = AFE440X_PDNCYCLEENDC,
+ .cache_type = REGCACHE_RBTREE,
+ .volatile_table = &afe4403_volatile_table,
+};
+
+#ifdef CONFIG_OF
+static const struct of_device_id afe4403_of_match[] = {
+ { .compatible = "ti,afe4403", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, afe4403_of_match);
+#endif
+
+static int __maybe_unused afe4403_suspend(struct device *dev)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct afe4403_data *afe = iio_priv(indio_dev);
+ int ret;
+
+ ret = regmap_update_bits(afe->regmap, AFE440X_CONTROL2,
+ AFE440X_CONTROL2_PDN_AFE,
+ AFE440X_CONTROL2_PDN_AFE);
+ if (ret)
+ return ret;
+
+ ret = regulator_disable(afe->regulator);
+ if (ret) {
+ dev_err(dev, "Unable to disable regulator\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int __maybe_unused afe4403_resume(struct device *dev)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct afe4403_data *afe = iio_priv(indio_dev);
+ int ret;
+
+ ret = regulator_enable(afe->regulator);
+ if (ret) {
+ dev_err(dev, "Unable to enable regulator\n");
+ return ret;
+ }
+
+ ret = regmap_update_bits(afe->regmap, AFE440X_CONTROL2,
+ AFE440X_CONTROL2_PDN_AFE, 0);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(afe4403_pm_ops, afe4403_suspend, afe4403_resume);
+
+static int afe4403_probe(struct spi_device *spi)
+{
+ struct iio_dev *indio_dev;
+ struct afe4403_data *afe;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*afe));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ afe = iio_priv(indio_dev);
+ spi_set_drvdata(spi, indio_dev);
+
+ afe->dev = &spi->dev;
+ afe->spi = spi;
+ afe->irq = spi->irq;
+
+ afe->regmap = devm_regmap_init_spi(spi, &afe4403_regmap_config);
+ if (IS_ERR(afe->regmap)) {
+ dev_err(afe->dev, "Unable to allocate register map\n");
+ return PTR_ERR(afe->regmap);
+ }
+
+ afe->regulator = devm_regulator_get(afe->dev, "tx_sup");
+ if (IS_ERR(afe->regulator)) {
+ dev_err(afe->dev, "Unable to get regulator\n");
+ return PTR_ERR(afe->regulator);
+ }
+ ret = regulator_enable(afe->regulator);
+ if (ret) {
+ dev_err(afe->dev, "Unable to enable regulator\n");
+ return ret;
+ }
+
+ ret = regmap_write(afe->regmap, AFE440X_CONTROL0,
+ AFE440X_CONTROL0_SW_RESET);
+ if (ret) {
+ dev_err(afe->dev, "Unable to reset device\n");
+ goto err_disable_reg;
+ }
+
+ ret = regmap_multi_reg_write(afe->regmap, afe4403_reg_sequences,
+ ARRAY_SIZE(afe4403_reg_sequences));
+ if (ret) {
+ dev_err(afe->dev, "Unable to set register defaults\n");
+ goto err_disable_reg;
+ }
+
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->dev.parent = afe->dev;
+ indio_dev->channels = afe4403_channels;
+ indio_dev->num_channels = ARRAY_SIZE(afe4403_channels);
+ indio_dev->name = AFE4403_DRIVER_NAME;
+ indio_dev->info = &afe4403_iio_info;
+
+ if (afe->irq > 0) {
+ afe->trig = devm_iio_trigger_alloc(afe->dev,
+ "%s-dev%d",
+ indio_dev->name,
+ indio_dev->id);
+ if (!afe->trig) {
+ dev_err(afe->dev, "Unable to allocate IIO trigger\n");
+ ret = -ENOMEM;
+ goto err_disable_reg;
+ }
+
+ iio_trigger_set_drvdata(afe->trig, indio_dev);
+
+ afe->trig->ops = &afe4403_trigger_ops;
+ afe->trig->dev.parent = afe->dev;
+
+ ret = iio_trigger_register(afe->trig);
+ if (ret) {
+ dev_err(afe->dev, "Unable to register IIO trigger\n");
+ goto err_disable_reg;
+ }
+
+ ret = devm_request_threaded_irq(afe->dev, afe->irq,
+ iio_trigger_generic_data_rdy_poll,
+ NULL, IRQF_ONESHOT,
+ AFE4403_DRIVER_NAME,
+ afe->trig);
+ if (ret) {
+ dev_err(afe->dev, "Unable to request IRQ\n");
+ goto err_trig;
+ }
+ }
+
+ ret = iio_triggered_buffer_setup(indio_dev, &iio_pollfunc_store_time,
+ afe4403_trigger_handler, NULL);
+ if (ret) {
+ dev_err(afe->dev, "Unable to setup buffer\n");
+ goto err_trig;
+ }
+
+ ret = iio_device_register(indio_dev);
+ if (ret) {
+ dev_err(afe->dev, "Unable to register IIO device\n");
+ goto err_buff;
+ }
+
+ return 0;
+
+err_buff:
+ iio_triggered_buffer_cleanup(indio_dev);
+err_trig:
+ if (afe->irq > 0)
+ iio_trigger_unregister(afe->trig);
+err_disable_reg:
+ regulator_disable(afe->regulator);
+
+ return ret;
+}
+
+static int afe4403_remove(struct spi_device *spi)
+{
+ struct iio_dev *indio_dev = spi_get_drvdata(spi);
+ struct afe4403_data *afe = iio_priv(indio_dev);
+ int ret;
+
+ iio_device_unregister(indio_dev);
+
+ iio_triggered_buffer_cleanup(indio_dev);
+
+ if (afe->irq > 0)
+ iio_trigger_unregister(afe->trig);
+
+ ret = regulator_disable(afe->regulator);
+ if (ret) {
+ dev_err(afe->dev, "Unable to disable regulator\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct spi_device_id afe4403_ids[] = {
+ { "afe4403", 0 },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(spi, afe4403_ids);
+
+static struct spi_driver afe4403_spi_driver = {
+ .driver = {
+ .name = AFE4403_DRIVER_NAME,
+ .of_match_table = of_match_ptr(afe4403_of_match),
+ .pm = &afe4403_pm_ops,
+ },
+ .probe = afe4403_probe,
+ .remove = afe4403_remove,
+ .id_table = afe4403_ids,
+};
+module_spi_driver(afe4403_spi_driver);
+
+MODULE_AUTHOR("Andrew F. Davis <afd@ti.com>");
+MODULE_DESCRIPTION("TI AFE4403 Heart Rate and Pulse Oximeter");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/health/afe4404.c b/drivers/iio/health/afe4404.c
new file mode 100644
index 000000000000..5096a4643784
--- /dev/null
+++ b/drivers/iio/health/afe4404.c
@@ -0,0 +1,679 @@
+/*
+ * AFE4404 Heart Rate Monitors and Low-Cost Pulse Oximeters
+ *
+ * Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com/
+ * Andrew F. Davis <afd@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/i2c.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/sysfs.h>
+#include <linux/regulator/consumer.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/trigger.h>
+#include <linux/iio/triggered_buffer.h>
+#include <linux/iio/trigger_consumer.h>
+
+#include "afe440x.h"
+
+#define AFE4404_DRIVER_NAME "afe4404"
+
+/* AFE4404 registers */
+#define AFE4404_TIA_GAIN_SEP 0x20
+#define AFE4404_TIA_GAIN 0x21
+#define AFE4404_PROG_TG_STC 0x34
+#define AFE4404_PROG_TG_ENDC 0x35
+#define AFE4404_LED3LEDSTC 0x36
+#define AFE4404_LED3LEDENDC 0x37
+#define AFE4404_CLKDIV_PRF 0x39
+#define AFE4404_OFFDAC 0x3a
+#define AFE4404_DEC 0x3d
+#define AFE4404_AVG_LED2_ALED2VAL 0x3f
+#define AFE4404_AVG_LED1_ALED1VAL 0x40
+
+/* AFE4404 GAIN register fields */
+#define AFE4404_TIA_GAIN_RES_MASK GENMASK(2, 0)
+#define AFE4404_TIA_GAIN_RES_SHIFT 0
+#define AFE4404_TIA_GAIN_CAP_MASK GENMASK(5, 3)
+#define AFE4404_TIA_GAIN_CAP_SHIFT 3
+
+/* AFE4404 LEDCNTRL register fields */
+#define AFE4404_LEDCNTRL_ILED1_MASK GENMASK(5, 0)
+#define AFE4404_LEDCNTRL_ILED1_SHIFT 0
+#define AFE4404_LEDCNTRL_ILED2_MASK GENMASK(11, 6)
+#define AFE4404_LEDCNTRL_ILED2_SHIFT 6
+#define AFE4404_LEDCNTRL_ILED3_MASK GENMASK(17, 12)
+#define AFE4404_LEDCNTRL_ILED3_SHIFT 12
+
+/* AFE4404 CONTROL2 register fields */
+#define AFE440X_CONTROL2_ILED_2X_MASK BIT(17)
+#define AFE440X_CONTROL2_ILED_2X_SHIFT 17
+
+/* AFE4404 CONTROL3 register fields */
+#define AFE440X_CONTROL3_OSC_ENABLE BIT(9)
+
+/* AFE4404 OFFDAC register current fields */
+#define AFE4404_OFFDAC_CURR_LED1_MASK GENMASK(9, 5)
+#define AFE4404_OFFDAC_CURR_LED1_SHIFT 5
+#define AFE4404_OFFDAC_CURR_LED2_MASK GENMASK(19, 15)
+#define AFE4404_OFFDAC_CURR_LED2_SHIFT 15
+#define AFE4404_OFFDAC_CURR_LED3_MASK GENMASK(4, 0)
+#define AFE4404_OFFDAC_CURR_LED3_SHIFT 0
+#define AFE4404_OFFDAC_CURR_ALED1_MASK GENMASK(14, 10)
+#define AFE4404_OFFDAC_CURR_ALED1_SHIFT 10
+#define AFE4404_OFFDAC_CURR_ALED2_MASK GENMASK(4, 0)
+#define AFE4404_OFFDAC_CURR_ALED2_SHIFT 0
+
+/* AFE4404 NULL fields */
+#define NULL_MASK 0
+#define NULL_SHIFT 0
+
+/* AFE4404 TIA_GAIN_CAP values */
+#define AFE4404_TIA_GAIN_CAP_5_P 0x0
+#define AFE4404_TIA_GAIN_CAP_2_5_P 0x1
+#define AFE4404_TIA_GAIN_CAP_10_P 0x2
+#define AFE4404_TIA_GAIN_CAP_7_5_P 0x3
+#define AFE4404_TIA_GAIN_CAP_20_P 0x4
+#define AFE4404_TIA_GAIN_CAP_17_5_P 0x5
+#define AFE4404_TIA_GAIN_CAP_25_P 0x6
+#define AFE4404_TIA_GAIN_CAP_22_5_P 0x7
+
+/* AFE4404 TIA_GAIN_RES values */
+#define AFE4404_TIA_GAIN_RES_500_K 0x0
+#define AFE4404_TIA_GAIN_RES_250_K 0x1
+#define AFE4404_TIA_GAIN_RES_100_K 0x2
+#define AFE4404_TIA_GAIN_RES_50_K 0x3
+#define AFE4404_TIA_GAIN_RES_25_K 0x4
+#define AFE4404_TIA_GAIN_RES_10_K 0x5
+#define AFE4404_TIA_GAIN_RES_1_M 0x6
+#define AFE4404_TIA_GAIN_RES_2_M 0x7
+
+/**
+ * struct afe4404_data
+ * @dev - Device structure
+ * @regmap - Register map of the device
+ * @regulator - Pointer to the regulator for the IC
+ * @trig - IIO trigger for this device
+ * @irq - ADC_RDY line interrupt number
+ */
+struct afe4404_data {
+ struct device *dev;
+ struct regmap *regmap;
+ struct regulator *regulator;
+ struct iio_trigger *trig;
+ int irq;
+};
+
+enum afe4404_chan_id {
+ LED1,
+ ALED1,
+ LED2,
+ ALED2,
+ LED3,
+ LED1_ALED1,
+ LED2_ALED2,
+ ILED1,
+ ILED2,
+ ILED3,
+};
+
+static const struct afe440x_reg_info afe4404_reg_info[] = {
+ [LED1] = AFE440X_REG_INFO(AFE440X_LED1VAL, AFE4404_OFFDAC, AFE4404_OFFDAC_CURR_LED1),
+ [ALED1] = AFE440X_REG_INFO(AFE440X_ALED1VAL, AFE4404_OFFDAC, AFE4404_OFFDAC_CURR_ALED1),
+ [LED2] = AFE440X_REG_INFO(AFE440X_LED2VAL, AFE4404_OFFDAC, AFE4404_OFFDAC_CURR_LED2),
+ [ALED2] = AFE440X_REG_INFO(AFE440X_ALED2VAL, AFE4404_OFFDAC, AFE4404_OFFDAC_CURR_ALED2),
+ [LED3] = AFE440X_REG_INFO(AFE440X_ALED2VAL, 0, NULL),
+ [LED1_ALED1] = AFE440X_REG_INFO(AFE440X_LED1_ALED1VAL, 0, NULL),
+ [LED2_ALED2] = AFE440X_REG_INFO(AFE440X_LED2_ALED2VAL, 0, NULL),
+ [ILED1] = AFE440X_REG_INFO(AFE440X_LEDCNTRL, 0, AFE4404_LEDCNTRL_ILED1),
+ [ILED2] = AFE440X_REG_INFO(AFE440X_LEDCNTRL, 0, AFE4404_LEDCNTRL_ILED2),
+ [ILED3] = AFE440X_REG_INFO(AFE440X_LEDCNTRL, 0, AFE4404_LEDCNTRL_ILED3),
+};
+
+static const struct iio_chan_spec afe4404_channels[] = {
+ /* ADC values */
+ AFE440X_INTENSITY_CHAN(LED1, "led1", BIT(IIO_CHAN_INFO_OFFSET)),
+ AFE440X_INTENSITY_CHAN(ALED1, "led1_ambient", BIT(IIO_CHAN_INFO_OFFSET)),
+ AFE440X_INTENSITY_CHAN(LED2, "led2", BIT(IIO_CHAN_INFO_OFFSET)),
+ AFE440X_INTENSITY_CHAN(ALED2, "led2_ambient", BIT(IIO_CHAN_INFO_OFFSET)),
+ AFE440X_INTENSITY_CHAN(LED3, "led3", BIT(IIO_CHAN_INFO_OFFSET)),
+ AFE440X_INTENSITY_CHAN(LED1_ALED1, "led1-led1_ambient", 0),
+ AFE440X_INTENSITY_CHAN(LED2_ALED2, "led2-led2_ambient", 0),
+ /* LED current */
+ AFE440X_CURRENT_CHAN(ILED1, "led1"),
+ AFE440X_CURRENT_CHAN(ILED2, "led2"),
+ AFE440X_CURRENT_CHAN(ILED3, "led3"),
+};
+
+static const struct afe440x_val_table afe4404_res_table[] = {
+ { .integer = 500000, .fract = 0 },
+ { .integer = 250000, .fract = 0 },
+ { .integer = 100000, .fract = 0 },
+ { .integer = 50000, .fract = 0 },
+ { .integer = 25000, .fract = 0 },
+ { .integer = 10000, .fract = 0 },
+ { .integer = 1000000, .fract = 0 },
+ { .integer = 2000000, .fract = 0 },
+};
+AFE440X_TABLE_ATTR(tia_resistance_available, afe4404_res_table);
+
+static const struct afe440x_val_table afe4404_cap_table[] = {
+ { .integer = 0, .fract = 5000 },
+ { .integer = 0, .fract = 2500 },
+ { .integer = 0, .fract = 10000 },
+ { .integer = 0, .fract = 7500 },
+ { .integer = 0, .fract = 20000 },
+ { .integer = 0, .fract = 17500 },
+ { .integer = 0, .fract = 25000 },
+ { .integer = 0, .fract = 22500 },
+};
+AFE440X_TABLE_ATTR(tia_capacitance_available, afe4404_cap_table);
+
+static ssize_t afe440x_show_register(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct afe4404_data *afe = iio_priv(indio_dev);
+ struct afe440x_attr *afe440x_attr = to_afe440x_attr(attr);
+ unsigned int reg_val, type;
+ int vals[2];
+ int ret, val_len;
+
+ ret = regmap_read(afe->regmap, afe440x_attr->reg, &reg_val);
+ if (ret)
+ return ret;
+
+ reg_val &= afe440x_attr->mask;
+ reg_val >>= afe440x_attr->shift;
+
+ switch (afe440x_attr->type) {
+ case SIMPLE:
+ type = IIO_VAL_INT;
+ val_len = 1;
+ vals[0] = reg_val;
+ break;
+ case RESISTANCE:
+ case CAPACITANCE:
+ type = IIO_VAL_INT_PLUS_MICRO;
+ val_len = 2;
+ if (reg_val < afe440x_attr->table_size) {
+ vals[0] = afe440x_attr->val_table[reg_val].integer;
+ vals[1] = afe440x_attr->val_table[reg_val].fract;
+ break;
+ }
+ return -EINVAL;
+ default:
+ return -EINVAL;
+ }
+
+ return iio_format_value(buf, type, val_len, vals);
+}
+
+static ssize_t afe440x_store_register(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct afe4404_data *afe = iio_priv(indio_dev);
+ struct afe440x_attr *afe440x_attr = to_afe440x_attr(attr);
+ int val, integer, fract, ret;
+
+ ret = iio_str_to_fixpoint(buf, 100000, &integer, &fract);
+ if (ret)
+ return ret;
+
+ switch (afe440x_attr->type) {
+ case SIMPLE:
+ val = integer;
+ break;
+ case RESISTANCE:
+ case CAPACITANCE:
+ for (val = 0; val < afe440x_attr->table_size; val++)
+ if (afe440x_attr->val_table[val].integer == integer &&
+ afe440x_attr->val_table[val].fract == fract)
+ break;
+ if (val == afe440x_attr->table_size)
+ return -EINVAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = regmap_update_bits(afe->regmap, afe440x_attr->reg,
+ afe440x_attr->mask,
+ (val << afe440x_attr->shift));
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+static AFE440X_ATTR(tia_separate_en, AFE4404_TIA_GAIN_SEP, AFE440X_TIAGAIN_ENSEPGAIN, SIMPLE, NULL, 0);
+
+static AFE440X_ATTR(tia_resistance1, AFE4404_TIA_GAIN, AFE4404_TIA_GAIN_RES, RESISTANCE, afe4404_res_table, ARRAY_SIZE(afe4404_res_table));
+static AFE440X_ATTR(tia_capacitance1, AFE4404_TIA_GAIN, AFE4404_TIA_GAIN_CAP, CAPACITANCE, afe4404_cap_table, ARRAY_SIZE(afe4404_cap_table));
+
+static AFE440X_ATTR(tia_resistance2, AFE4404_TIA_GAIN_SEP, AFE4404_TIA_GAIN_RES, RESISTANCE, afe4404_res_table, ARRAY_SIZE(afe4404_res_table));
+static AFE440X_ATTR(tia_capacitance2, AFE4404_TIA_GAIN_SEP, AFE4404_TIA_GAIN_CAP, CAPACITANCE, afe4404_cap_table, ARRAY_SIZE(afe4404_cap_table));
+
+static struct attribute *afe440x_attributes[] = {
+ &afe440x_attr_tia_separate_en.dev_attr.attr,
+ &afe440x_attr_tia_resistance1.dev_attr.attr,
+ &afe440x_attr_tia_capacitance1.dev_attr.attr,
+ &afe440x_attr_tia_resistance2.dev_attr.attr,
+ &afe440x_attr_tia_capacitance2.dev_attr.attr,
+ &dev_attr_tia_resistance_available.attr,
+ &dev_attr_tia_capacitance_available.attr,
+ NULL
+};
+
+static const struct attribute_group afe440x_attribute_group = {
+ .attrs = afe440x_attributes
+};
+
+static int afe4404_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct afe4404_data *afe = iio_priv(indio_dev);
+ const struct afe440x_reg_info reg_info = afe4404_reg_info[chan->address];
+ int ret;
+
+ switch (chan->type) {
+ case IIO_INTENSITY:
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ ret = regmap_read(afe->regmap, reg_info.reg, val);
+ if (ret)
+ return ret;
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_OFFSET:
+ ret = regmap_read(afe->regmap, reg_info.offreg,
+ val);
+ if (ret)
+ return ret;
+ *val &= reg_info.mask;
+ *val >>= reg_info.shift;
+ return IIO_VAL_INT;
+ }
+ break;
+ case IIO_CURRENT:
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ ret = regmap_read(afe->regmap, reg_info.reg, val);
+ if (ret)
+ return ret;
+ *val &= reg_info.mask;
+ *val >>= reg_info.shift;
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ *val = 0;
+ *val2 = 800000;
+ return IIO_VAL_INT_PLUS_MICRO;
+ }
+ break;
+ default:
+ break;
+ }
+
+ return -EINVAL;
+}
+
+static int afe4404_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ struct afe4404_data *afe = iio_priv(indio_dev);
+ const struct afe440x_reg_info reg_info = afe4404_reg_info[chan->address];
+
+ switch (chan->type) {
+ case IIO_INTENSITY:
+ switch (mask) {
+ case IIO_CHAN_INFO_OFFSET:
+ return regmap_update_bits(afe->regmap,
+ reg_info.offreg,
+ reg_info.mask,
+ (val << reg_info.shift));
+ }
+ break;
+ case IIO_CURRENT:
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ return regmap_update_bits(afe->regmap,
+ reg_info.reg,
+ reg_info.mask,
+ (val << reg_info.shift));
+ }
+ break;
+ default:
+ break;
+ }
+
+ return -EINVAL;
+}
+
+static const struct iio_info afe4404_iio_info = {
+ .attrs = &afe440x_attribute_group,
+ .read_raw = afe4404_read_raw,
+ .write_raw = afe4404_write_raw,
+ .driver_module = THIS_MODULE,
+};
+
+static irqreturn_t afe4404_trigger_handler(int irq, void *private)
+{
+ struct iio_poll_func *pf = private;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct afe4404_data *afe = iio_priv(indio_dev);
+ int ret, bit, i = 0;
+ s32 buffer[10];
+
+ for_each_set_bit(bit, indio_dev->active_scan_mask,
+ indio_dev->masklength) {
+ ret = regmap_read(afe->regmap, afe4404_reg_info[bit].reg,
+ &buffer[i++]);
+ if (ret)
+ goto err;
+ }
+
+ iio_push_to_buffers_with_timestamp(indio_dev, buffer, pf->timestamp);
+err:
+ iio_trigger_notify_done(indio_dev->trig);
+
+ return IRQ_HANDLED;
+}
+
+static const struct iio_trigger_ops afe4404_trigger_ops = {
+ .owner = THIS_MODULE,
+};
+
+/* Default timings from data-sheet */
+#define AFE4404_TIMING_PAIRS \
+ { AFE440X_PRPCOUNT, 39999 }, \
+ { AFE440X_LED2LEDSTC, 0 }, \
+ { AFE440X_LED2LEDENDC, 398 }, \
+ { AFE440X_LED2STC, 80 }, \
+ { AFE440X_LED2ENDC, 398 }, \
+ { AFE440X_ADCRSTSTCT0, 5600 }, \
+ { AFE440X_ADCRSTENDCT0, 5606 }, \
+ { AFE440X_LED2CONVST, 5607 }, \
+ { AFE440X_LED2CONVEND, 6066 }, \
+ { AFE4404_LED3LEDSTC, 400 }, \
+ { AFE4404_LED3LEDENDC, 798 }, \
+ { AFE440X_ALED2STC, 480 }, \
+ { AFE440X_ALED2ENDC, 798 }, \
+ { AFE440X_ADCRSTSTCT1, 6068 }, \
+ { AFE440X_ADCRSTENDCT1, 6074 }, \
+ { AFE440X_ALED2CONVST, 6075 }, \
+ { AFE440X_ALED2CONVEND, 6534 }, \
+ { AFE440X_LED1LEDSTC, 800 }, \
+ { AFE440X_LED1LEDENDC, 1198 }, \
+ { AFE440X_LED1STC, 880 }, \
+ { AFE440X_LED1ENDC, 1198 }, \
+ { AFE440X_ADCRSTSTCT2, 6536 }, \
+ { AFE440X_ADCRSTENDCT2, 6542 }, \
+ { AFE440X_LED1CONVST, 6543 }, \
+ { AFE440X_LED1CONVEND, 7003 }, \
+ { AFE440X_ALED1STC, 1280 }, \
+ { AFE440X_ALED1ENDC, 1598 }, \
+ { AFE440X_ADCRSTSTCT3, 7005 }, \
+ { AFE440X_ADCRSTENDCT3, 7011 }, \
+ { AFE440X_ALED1CONVST, 7012 }, \
+ { AFE440X_ALED1CONVEND, 7471 }, \
+ { AFE440X_PDNCYCLESTC, 7671 }, \
+ { AFE440X_PDNCYCLEENDC, 39199 }
+
+static const struct reg_sequence afe4404_reg_sequences[] = {
+ AFE4404_TIMING_PAIRS,
+ { AFE440X_CONTROL1, AFE440X_CONTROL1_TIMEREN },
+ { AFE4404_TIA_GAIN, AFE4404_TIA_GAIN_RES_50_K },
+ { AFE440X_LEDCNTRL, (0xf << AFE4404_LEDCNTRL_ILED1_SHIFT) |
+ (0x3 << AFE4404_LEDCNTRL_ILED2_SHIFT) |
+ (0x3 << AFE4404_LEDCNTRL_ILED3_SHIFT) },
+ { AFE440X_CONTROL2, AFE440X_CONTROL3_OSC_ENABLE },
+};
+
+static const struct regmap_range afe4404_yes_ranges[] = {
+ regmap_reg_range(AFE440X_LED2VAL, AFE440X_LED1_ALED1VAL),
+ regmap_reg_range(AFE4404_AVG_LED2_ALED2VAL, AFE4404_AVG_LED1_ALED1VAL),
+};
+
+static const struct regmap_access_table afe4404_volatile_table = {
+ .yes_ranges = afe4404_yes_ranges,
+ .n_yes_ranges = ARRAY_SIZE(afe4404_yes_ranges),
+};
+
+static const struct regmap_config afe4404_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 24,
+
+ .max_register = AFE4404_AVG_LED1_ALED1VAL,
+ .cache_type = REGCACHE_RBTREE,
+ .volatile_table = &afe4404_volatile_table,
+};
+
+#ifdef CONFIG_OF
+static const struct of_device_id afe4404_of_match[] = {
+ { .compatible = "ti,afe4404", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, afe4404_of_match);
+#endif
+
+static int __maybe_unused afe4404_suspend(struct device *dev)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct afe4404_data *afe = iio_priv(indio_dev);
+ int ret;
+
+ ret = regmap_update_bits(afe->regmap, AFE440X_CONTROL2,
+ AFE440X_CONTROL2_PDN_AFE,
+ AFE440X_CONTROL2_PDN_AFE);
+ if (ret)
+ return ret;
+
+ ret = regulator_disable(afe->regulator);
+ if (ret) {
+ dev_err(dev, "Unable to disable regulator\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int __maybe_unused afe4404_resume(struct device *dev)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct afe4404_data *afe = iio_priv(indio_dev);
+ int ret;
+
+ ret = regulator_enable(afe->regulator);
+ if (ret) {
+ dev_err(dev, "Unable to enable regulator\n");
+ return ret;
+ }
+
+ ret = regmap_update_bits(afe->regmap, AFE440X_CONTROL2,
+ AFE440X_CONTROL2_PDN_AFE, 0);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(afe4404_pm_ops, afe4404_suspend, afe4404_resume);
+
+static int afe4404_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct iio_dev *indio_dev;
+ struct afe4404_data *afe;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*afe));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ afe = iio_priv(indio_dev);
+ i2c_set_clientdata(client, indio_dev);
+
+ afe->dev = &client->dev;
+ afe->irq = client->irq;
+
+ afe->regmap = devm_regmap_init_i2c(client, &afe4404_regmap_config);
+ if (IS_ERR(afe->regmap)) {
+ dev_err(afe->dev, "Unable to allocate register map\n");
+ return PTR_ERR(afe->regmap);
+ }
+
+ afe->regulator = devm_regulator_get(afe->dev, "tx_sup");
+ if (IS_ERR(afe->regulator)) {
+ dev_err(afe->dev, "Unable to get regulator\n");
+ return PTR_ERR(afe->regulator);
+ }
+ ret = regulator_enable(afe->regulator);
+ if (ret) {
+ dev_err(afe->dev, "Unable to enable regulator\n");
+ return ret;
+ }
+
+ ret = regmap_write(afe->regmap, AFE440X_CONTROL0,
+ AFE440X_CONTROL0_SW_RESET);
+ if (ret) {
+ dev_err(afe->dev, "Unable to reset device\n");
+ goto disable_reg;
+ }
+
+ ret = regmap_multi_reg_write(afe->regmap, afe4404_reg_sequences,
+ ARRAY_SIZE(afe4404_reg_sequences));
+ if (ret) {
+ dev_err(afe->dev, "Unable to set register defaults\n");
+ goto disable_reg;
+ }
+
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->dev.parent = afe->dev;
+ indio_dev->channels = afe4404_channels;
+ indio_dev->num_channels = ARRAY_SIZE(afe4404_channels);
+ indio_dev->name = AFE4404_DRIVER_NAME;
+ indio_dev->info = &afe4404_iio_info;
+
+ if (afe->irq > 0) {
+ afe->trig = devm_iio_trigger_alloc(afe->dev,
+ "%s-dev%d",
+ indio_dev->name,
+ indio_dev->id);
+ if (!afe->trig) {
+ dev_err(afe->dev, "Unable to allocate IIO trigger\n");
+ ret = -ENOMEM;
+ goto disable_reg;
+ }
+
+ iio_trigger_set_drvdata(afe->trig, indio_dev);
+
+ afe->trig->ops = &afe4404_trigger_ops;
+ afe->trig->dev.parent = afe->dev;
+
+ ret = iio_trigger_register(afe->trig);
+ if (ret) {
+ dev_err(afe->dev, "Unable to register IIO trigger\n");
+ goto disable_reg;
+ }
+
+ ret = devm_request_threaded_irq(afe->dev, afe->irq,
+ iio_trigger_generic_data_rdy_poll,
+ NULL, IRQF_ONESHOT,
+ AFE4404_DRIVER_NAME,
+ afe->trig);
+ if (ret) {
+ dev_err(afe->dev, "Unable to request IRQ\n");
+ goto disable_reg;
+ }
+ }
+
+ ret = iio_triggered_buffer_setup(indio_dev, &iio_pollfunc_store_time,
+ afe4404_trigger_handler, NULL);
+ if (ret) {
+ dev_err(afe->dev, "Unable to setup buffer\n");
+ goto unregister_trigger;
+ }
+
+ ret = iio_device_register(indio_dev);
+ if (ret) {
+ dev_err(afe->dev, "Unable to register IIO device\n");
+ goto unregister_triggered_buffer;
+ }
+
+ return 0;
+
+unregister_triggered_buffer:
+ iio_triggered_buffer_cleanup(indio_dev);
+unregister_trigger:
+ if (afe->irq > 0)
+ iio_trigger_unregister(afe->trig);
+disable_reg:
+ regulator_disable(afe->regulator);
+
+ return ret;
+}
+
+static int afe4404_remove(struct i2c_client *client)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
+ struct afe4404_data *afe = iio_priv(indio_dev);
+ int ret;
+
+ iio_device_unregister(indio_dev);
+
+ iio_triggered_buffer_cleanup(indio_dev);
+
+ if (afe->irq > 0)
+ iio_trigger_unregister(afe->trig);
+
+ ret = regulator_disable(afe->regulator);
+ if (ret) {
+ dev_err(afe->dev, "Unable to disable regulator\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct i2c_device_id afe4404_ids[] = {
+ { "afe4404", 0 },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(i2c, afe4404_ids);
+
+static struct i2c_driver afe4404_i2c_driver = {
+ .driver = {
+ .name = AFE4404_DRIVER_NAME,
+ .of_match_table = of_match_ptr(afe4404_of_match),
+ .pm = &afe4404_pm_ops,
+ },
+ .probe = afe4404_probe,
+ .remove = afe4404_remove,
+ .id_table = afe4404_ids,
+};
+module_i2c_driver(afe4404_i2c_driver);
+
+MODULE_AUTHOR("Andrew F. Davis <afd@ti.com>");
+MODULE_DESCRIPTION("TI AFE4404 Heart Rate and Pulse Oximeter");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/health/afe440x.h b/drivers/iio/health/afe440x.h
new file mode 100644
index 000000000000..c671ab78a23a
--- /dev/null
+++ b/drivers/iio/health/afe440x.h
@@ -0,0 +1,191 @@
+/*
+ * AFE440X Heart Rate Monitors and Low-Cost Pulse Oximeters
+ *
+ * Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com/
+ * Andrew F. Davis <afd@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#ifndef _AFE440X_H
+#define _AFE440X_H
+
+/* AFE440X registers */
+#define AFE440X_CONTROL0 0x00
+#define AFE440X_LED2STC 0x01
+#define AFE440X_LED2ENDC 0x02
+#define AFE440X_LED1LEDSTC 0x03
+#define AFE440X_LED1LEDENDC 0x04
+#define AFE440X_ALED2STC 0x05
+#define AFE440X_ALED2ENDC 0x06
+#define AFE440X_LED1STC 0x07
+#define AFE440X_LED1ENDC 0x08
+#define AFE440X_LED2LEDSTC 0x09
+#define AFE440X_LED2LEDENDC 0x0a
+#define AFE440X_ALED1STC 0x0b
+#define AFE440X_ALED1ENDC 0x0c
+#define AFE440X_LED2CONVST 0x0d
+#define AFE440X_LED2CONVEND 0x0e
+#define AFE440X_ALED2CONVST 0x0f
+#define AFE440X_ALED2CONVEND 0x10
+#define AFE440X_LED1CONVST 0x11
+#define AFE440X_LED1CONVEND 0x12
+#define AFE440X_ALED1CONVST 0x13
+#define AFE440X_ALED1CONVEND 0x14
+#define AFE440X_ADCRSTSTCT0 0x15
+#define AFE440X_ADCRSTENDCT0 0x16
+#define AFE440X_ADCRSTSTCT1 0x17
+#define AFE440X_ADCRSTENDCT1 0x18
+#define AFE440X_ADCRSTSTCT2 0x19
+#define AFE440X_ADCRSTENDCT2 0x1a
+#define AFE440X_ADCRSTSTCT3 0x1b
+#define AFE440X_ADCRSTENDCT3 0x1c
+#define AFE440X_PRPCOUNT 0x1d
+#define AFE440X_CONTROL1 0x1e
+#define AFE440X_LEDCNTRL 0x22
+#define AFE440X_CONTROL2 0x23
+#define AFE440X_ALARM 0x29
+#define AFE440X_LED2VAL 0x2a
+#define AFE440X_ALED2VAL 0x2b
+#define AFE440X_LED1VAL 0x2c
+#define AFE440X_ALED1VAL 0x2d
+#define AFE440X_LED2_ALED2VAL 0x2e
+#define AFE440X_LED1_ALED1VAL 0x2f
+#define AFE440X_CONTROL3 0x31
+#define AFE440X_PDNCYCLESTC 0x32
+#define AFE440X_PDNCYCLEENDC 0x33
+
+/* CONTROL0 register fields */
+#define AFE440X_CONTROL0_REG_READ BIT(0)
+#define AFE440X_CONTROL0_TM_COUNT_RST BIT(1)
+#define AFE440X_CONTROL0_SW_RESET BIT(3)
+
+/* CONTROL1 register fields */
+#define AFE440X_CONTROL1_TIMEREN BIT(8)
+
+/* TIAGAIN register fields */
+#define AFE440X_TIAGAIN_ENSEPGAIN_MASK BIT(15)
+#define AFE440X_TIAGAIN_ENSEPGAIN_SHIFT 15
+
+/* CONTROL2 register fields */
+#define AFE440X_CONTROL2_PDN_AFE BIT(0)
+#define AFE440X_CONTROL2_PDN_RX BIT(1)
+#define AFE440X_CONTROL2_DYNAMIC4 BIT(3)
+#define AFE440X_CONTROL2_DYNAMIC3 BIT(4)
+#define AFE440X_CONTROL2_DYNAMIC2 BIT(14)
+#define AFE440X_CONTROL2_DYNAMIC1 BIT(20)
+
+/* CONTROL3 register fields */
+#define AFE440X_CONTROL3_CLKDIV GENMASK(2, 0)
+
+/* CONTROL0 values */
+#define AFE440X_CONTROL0_WRITE 0x0
+#define AFE440X_CONTROL0_READ 0x1
+
+struct afe440x_reg_info {
+ unsigned int reg;
+ unsigned int offreg;
+ unsigned int shift;
+ unsigned int mask;
+};
+
+#define AFE440X_REG_INFO(_reg, _offreg, _sm) \
+ { \
+ .reg = _reg, \
+ .offreg = _offreg, \
+ .shift = _sm ## _SHIFT, \
+ .mask = _sm ## _MASK, \
+ }
+
+#define AFE440X_INTENSITY_CHAN(_index, _name, _mask) \
+ { \
+ .type = IIO_INTENSITY, \
+ .channel = _index, \
+ .address = _index, \
+ .scan_index = _index, \
+ .scan_type = { \
+ .sign = 's', \
+ .realbits = 24, \
+ .storagebits = 32, \
+ .endianness = IIO_CPU, \
+ }, \
+ .extend_name = _name, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ _mask, \
+ }
+
+#define AFE440X_CURRENT_CHAN(_index, _name) \
+ { \
+ .type = IIO_CURRENT, \
+ .channel = _index, \
+ .address = _index, \
+ .scan_index = _index, \
+ .extend_name = _name, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_SCALE), \
+ .output = true, \
+ }
+
+enum afe440x_reg_type {
+ SIMPLE,
+ RESISTANCE,
+ CAPACITANCE,
+};
+
+struct afe440x_val_table {
+ int integer;
+ int fract;
+};
+
+#define AFE440X_TABLE_ATTR(_name, _table) \
+static ssize_t _name ## _show(struct device *dev, \
+ struct device_attribute *attr, char *buf) \
+{ \
+ ssize_t len = 0; \
+ int i; \
+ \
+ for (i = 0; i < ARRAY_SIZE(_table); i++) \
+ len += scnprintf(buf + len, PAGE_SIZE - len, "%d.%06u ", \
+ _table[i].integer, \
+ _table[i].fract); \
+ \
+ buf[len - 1] = '\n'; \
+ \
+ return len; \
+} \
+static DEVICE_ATTR_RO(_name)
+
+struct afe440x_attr {
+ struct device_attribute dev_attr;
+ unsigned int reg;
+ unsigned int shift;
+ unsigned int mask;
+ enum afe440x_reg_type type;
+ const struct afe440x_val_table *val_table;
+ unsigned int table_size;
+};
+
+#define to_afe440x_attr(_dev_attr) \
+ container_of(_dev_attr, struct afe440x_attr, dev_attr)
+
+#define AFE440X_ATTR(_name, _reg, _field, _type, _table, _size) \
+ struct afe440x_attr afe440x_attr_##_name = { \
+ .dev_attr = __ATTR(_name, (S_IRUGO | S_IWUSR), \
+ afe440x_show_register, \
+ afe440x_store_register), \
+ .reg = _reg, \
+ .shift = _field ## _SHIFT, \
+ .mask = _field ## _MASK, \
+ .type = _type, \
+ .val_table = _table, \
+ .table_size = _size, \
+ }
+
+#endif /* _AFE440X_H */
diff --git a/drivers/iio/health/max30100.c b/drivers/iio/health/max30100.c
index 9d1c81f91dd7..90ab8a2d2846 100644
--- a/drivers/iio/health/max30100.c
+++ b/drivers/iio/health/max30100.c
@@ -13,7 +13,7 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * TODO: allow LED current and pulse length controls via device tree properties
+ * TODO: enable pulse length controls via device tree properties
*/
#include <linux/module.h>
@@ -24,6 +24,7 @@
#include <linux/irq.h>
#include <linux/i2c.h>
#include <linux/mutex.h>
+#include <linux/of.h>
#include <linux/regmap.h>
#include <linux/iio/iio.h>
#include <linux/iio/buffer.h>
@@ -65,6 +66,7 @@
#define MAX30100_REG_SPO2_CONFIG_1600US 0x3
#define MAX30100_REG_LED_CONFIG 0x09
+#define MAX30100_REG_LED_CONFIG_LED_MASK 0x0f
#define MAX30100_REG_LED_CONFIG_RED_LED_SHIFT 4
#define MAX30100_REG_LED_CONFIG_24MA 0x07
@@ -111,6 +113,12 @@ static const struct regmap_config max30100_regmap_config = {
.volatile_reg = max30100_is_volatile_reg,
};
+static const unsigned int max30100_led_current_mapping[] = {
+ 4400, 7600, 11000, 14200, 17400,
+ 20800, 24000, 27100, 30600, 33800,
+ 37000, 40200, 43600, 46800, 50000
+};
+
static const unsigned long max30100_scan_masks[] = {0x3, 0};
static const struct iio_chan_spec max30100_channels[] = {
@@ -230,12 +238,13 @@ static irqreturn_t max30100_interrupt_handler(int irq, void *private)
mutex_lock(&data->lock);
- while (cnt-- || (cnt = max30100_fifo_count(data) > 0)) {
+ while (cnt || (cnt = max30100_fifo_count(data) > 0)) {
ret = max30100_read_measurement(data);
if (ret)
break;
iio_push_to_buffers(data->indio_dev, data->buffer);
+ cnt--;
}
mutex_unlock(&data->lock);
@@ -243,15 +252,76 @@ static irqreturn_t max30100_interrupt_handler(int irq, void *private)
return IRQ_HANDLED;
}
+static int max30100_get_current_idx(unsigned int val, int *reg)
+{
+ int idx;
+
+ /* LED turned off */
+ if (val == 0) {
+ *reg = 0;
+ return 0;
+ }
+
+ for (idx = 0; idx < ARRAY_SIZE(max30100_led_current_mapping); idx++) {
+ if (max30100_led_current_mapping[idx] == val) {
+ *reg = idx + 1;
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int max30100_led_init(struct max30100_data *data)
+{
+ struct device *dev = &data->client->dev;
+ struct device_node *np = dev->of_node;
+ unsigned int val[2];
+ int reg, ret;
+
+ ret = of_property_read_u32_array(np, "maxim,led-current-microamp",
+ (unsigned int *) &val, 2);
+ if (ret) {
+ /* Default to 24 mA RED LED, 50 mA IR LED */
+ reg = (MAX30100_REG_LED_CONFIG_24MA <<
+ MAX30100_REG_LED_CONFIG_RED_LED_SHIFT) |
+ MAX30100_REG_LED_CONFIG_50MA;
+ dev_warn(dev, "no led-current-microamp set");
+
+ return regmap_write(data->regmap, MAX30100_REG_LED_CONFIG, reg);
+ }
+
+ /* RED LED current */
+ ret = max30100_get_current_idx(val[0], &reg);
+ if (ret) {
+ dev_err(dev, "invalid RED current setting %d", val[0]);
+ return ret;
+ }
+
+ ret = regmap_update_bits(data->regmap, MAX30100_REG_LED_CONFIG,
+ MAX30100_REG_LED_CONFIG_LED_MASK <<
+ MAX30100_REG_LED_CONFIG_RED_LED_SHIFT,
+ reg << MAX30100_REG_LED_CONFIG_RED_LED_SHIFT);
+ if (ret)
+ return ret;
+
+ /* IR LED current */
+ ret = max30100_get_current_idx(val[1], &reg);
+ if (ret) {
+ dev_err(dev, "invalid IR current setting %d", val[1]);
+ return ret;
+ }
+
+ return regmap_update_bits(data->regmap, MAX30100_REG_LED_CONFIG,
+ MAX30100_REG_LED_CONFIG_LED_MASK, reg);
+}
+
static int max30100_chip_init(struct max30100_data *data)
{
int ret;
- /* RED IR LED = 24mA, IR LED = 50mA */
- ret = regmap_write(data->regmap, MAX30100_REG_LED_CONFIG,
- (MAX30100_REG_LED_CONFIG_24MA <<
- MAX30100_REG_LED_CONFIG_RED_LED_SHIFT) |
- MAX30100_REG_LED_CONFIG_50MA);
+ /* setup LED current settings */
+ ret = max30100_led_init(data);
if (ret)
return ret;
diff --git a/drivers/iio/humidity/Kconfig b/drivers/iio/humidity/Kconfig
index 6a23698d347c..866dda133336 100644
--- a/drivers/iio/humidity/Kconfig
+++ b/drivers/iio/humidity/Kconfig
@@ -43,14 +43,16 @@ config SI7005
humidity and temperature sensor.
To compile this driver as a module, choose M here: the module
- will be called si7005.
+ will be called si7005. This driver also
+ supports Hoperf TH02 Humidity and Temperature Sensor.
config SI7020
tristate "Si7013/20/21 Relative Humidity and Temperature Sensors"
depends on I2C
help
Say yes here to build support for the Silicon Labs Si7013/20/21
- Relative Humidity and Temperature Sensors.
+ Relative Humidity and Temperature Sensors. This driver also
+ supports Hoperf TH06 Humidity and Temperature Sensor.
To compile this driver as a module, choose M here: the module
will be called si7020.
diff --git a/drivers/iio/humidity/dht11.c b/drivers/iio/humidity/dht11.c
index cfc5a051ab9f..20b500da94db 100644
--- a/drivers/iio/humidity/dht11.c
+++ b/drivers/iio/humidity/dht11.c
@@ -50,12 +50,32 @@
#define DHT11_EDGES_PER_READ (2 * DHT11_BITS_PER_READ + \
DHT11_EDGES_PREAMBLE + 1)
-/* Data transmission timing (nano seconds) */
+/*
+ * Data transmission timing:
+ * Data bits are encoded as pulse length (high time) on the data line.
+ * 0-bit: 22-30uS -- typically 26uS (AM2302)
+ * 1-bit: 68-75uS -- typically 70uS (AM2302)
+ * The acutal timings also depend on the properties of the cable, with
+ * longer cables typically making pulses shorter.
+ *
+ * Our decoding depends on the time resolution of the system:
+ * timeres > 34uS ... don't know what a 1-tick pulse is
+ * 34uS > timeres > 30uS ... no problem (30kHz and 32kHz clocks)
+ * 30uS > timeres > 23uS ... don't know what a 2-tick pulse is
+ * timeres < 23uS ... no problem
+ *
+ * Luckily clocks in the 33-44kHz range are quite uncommon, so we can
+ * support most systems if the threshold for decoding a pulse as 1-bit
+ * is chosen carefully. If somebody really wants to support clocks around
+ * 40kHz, where this driver is most unreliable, there are two options.
+ * a) select an implementation using busy loop polling on those systems
+ * b) use the checksum to do some probabilistic decoding
+ */
#define DHT11_START_TRANSMISSION 18 /* ms */
-#define DHT11_SENSOR_RESPONSE 80000
-#define DHT11_START_BIT 50000
-#define DHT11_DATA_BIT_LOW 27000
-#define DHT11_DATA_BIT_HIGH 70000
+#define DHT11_MIN_TIMERES 34000 /* ns */
+#define DHT11_THRESHOLD 49000 /* ns */
+#define DHT11_AMBIG_LOW 23000 /* ns */
+#define DHT11_AMBIG_HIGH 30000 /* ns */
struct dht11 {
struct device *dev;
@@ -76,43 +96,39 @@ struct dht11 {
struct {s64 ts; int value; } edges[DHT11_EDGES_PER_READ];
};
-static unsigned char dht11_decode_byte(int *timing, int threshold)
+static unsigned char dht11_decode_byte(char *bits)
{
unsigned char ret = 0;
int i;
for (i = 0; i < 8; ++i) {
ret <<= 1;
- if (timing[i] >= threshold)
+ if (bits[i])
++ret;
}
return ret;
}
-static int dht11_decode(struct dht11 *dht11, int offset, int timeres)
+static int dht11_decode(struct dht11 *dht11, int offset)
{
- int i, t, timing[DHT11_BITS_PER_READ], threshold;
+ int i, t;
+ char bits[DHT11_BITS_PER_READ];
unsigned char temp_int, temp_dec, hum_int, hum_dec, checksum;
- threshold = DHT11_DATA_BIT_HIGH / timeres;
- if (DHT11_DATA_BIT_LOW / timeres + 1 >= threshold)
- pr_err("dht11: WARNING: decoding ambiguous\n");
-
- /* scale down with timeres and check validity */
for (i = 0; i < DHT11_BITS_PER_READ; ++i) {
t = dht11->edges[offset + 2 * i + 2].ts -
dht11->edges[offset + 2 * i + 1].ts;
if (!dht11->edges[offset + 2 * i + 1].value)
return -EIO; /* lost synchronisation */
- timing[i] = t / timeres;
+ bits[i] = t > DHT11_THRESHOLD;
}
- hum_int = dht11_decode_byte(timing, threshold);
- hum_dec = dht11_decode_byte(&timing[8], threshold);
- temp_int = dht11_decode_byte(&timing[16], threshold);
- temp_dec = dht11_decode_byte(&timing[24], threshold);
- checksum = dht11_decode_byte(&timing[32], threshold);
+ hum_int = dht11_decode_byte(bits);
+ hum_dec = dht11_decode_byte(&bits[8]);
+ temp_int = dht11_decode_byte(&bits[16]);
+ temp_dec = dht11_decode_byte(&bits[24]);
+ checksum = dht11_decode_byte(&bits[32]);
if (((hum_int + hum_dec + temp_int + temp_dec) & 0xff) != checksum)
return -EIO;
@@ -161,12 +177,12 @@ static int dht11_read_raw(struct iio_dev *iio_dev,
int *val, int *val2, long m)
{
struct dht11 *dht11 = iio_priv(iio_dev);
- int ret, timeres;
+ int ret, timeres, offset;
mutex_lock(&dht11->lock);
if (dht11->timestamp + DHT11_DATA_VALID_TIME < ktime_get_boot_ns()) {
timeres = ktime_get_resolution_ns();
- if (DHT11_DATA_BIT_HIGH < 2 * timeres) {
+ if (timeres > DHT11_MIN_TIMERES) {
dev_err(dht11->dev, "timeresolution %dns too low\n",
timeres);
/* In theory a better clock could become available
@@ -176,6 +192,10 @@ static int dht11_read_raw(struct iio_dev *iio_dev,
ret = -EAGAIN;
goto err;
}
+ if (timeres > DHT11_AMBIG_LOW && timeres < DHT11_AMBIG_HIGH)
+ dev_warn(dht11->dev,
+ "timeresolution: %dns - decoding ambiguous\n",
+ timeres);
reinit_completion(&dht11->completion);
@@ -208,11 +228,14 @@ static int dht11_read_raw(struct iio_dev *iio_dev,
if (ret < 0)
goto err;
- ret = dht11_decode(dht11,
- dht11->num_edges == DHT11_EDGES_PER_READ ?
- DHT11_EDGES_PREAMBLE :
- DHT11_EDGES_PREAMBLE - 2,
- timeres);
+ offset = DHT11_EDGES_PREAMBLE +
+ dht11->num_edges - DHT11_EDGES_PER_READ;
+ for (; offset >= 0; --offset) {
+ ret = dht11_decode(dht11, offset);
+ if (!ret)
+ break;
+ }
+
if (ret)
goto err;
}
diff --git a/drivers/iio/humidity/hdc100x.c b/drivers/iio/humidity/hdc100x.c
index a7f61e881a49..fa4767613173 100644
--- a/drivers/iio/humidity/hdc100x.c
+++ b/drivers/iio/humidity/hdc100x.c
@@ -274,7 +274,7 @@ static int hdc100x_probe(struct i2c_client *client,
if (!i2c_check_functionality(client->adapter,
I2C_FUNC_SMBUS_WORD_DATA | I2C_FUNC_SMBUS_BYTE))
- return -ENODEV;
+ return -EOPNOTSUPP;
indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
if (!indio_dev)
diff --git a/drivers/iio/humidity/htu21.c b/drivers/iio/humidity/htu21.c
index d1636a74980e..11cbc38b450f 100644
--- a/drivers/iio/humidity/htu21.c
+++ b/drivers/iio/humidity/htu21.c
@@ -192,7 +192,7 @@ static int htu21_probe(struct i2c_client *client,
I2C_FUNC_SMBUS_READ_I2C_BLOCK)) {
dev_err(&client->dev,
"Adapter does not support some i2c transaction\n");
- return -ENODEV;
+ return -EOPNOTSUPP;
}
indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*dev_data));
diff --git a/drivers/iio/humidity/si7005.c b/drivers/iio/humidity/si7005.c
index 91972ccd8aaf..6297766e93d0 100644
--- a/drivers/iio/humidity/si7005.c
+++ b/drivers/iio/humidity/si7005.c
@@ -135,7 +135,7 @@ static int si7005_probe(struct i2c_client *client,
int ret;
if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_WORD_DATA))
- return -ENODEV;
+ return -EOPNOTSUPP;
indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
if (!indio_dev)
@@ -170,6 +170,7 @@ static int si7005_probe(struct i2c_client *client,
static const struct i2c_device_id si7005_id[] = {
{ "si7005", 0 },
+ { "th02", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, si7005_id);
diff --git a/drivers/iio/humidity/si7020.c b/drivers/iio/humidity/si7020.c
index 71991b5c0658..ffc2ccf6374e 100644
--- a/drivers/iio/humidity/si7020.c
+++ b/drivers/iio/humidity/si7020.c
@@ -121,7 +121,7 @@ static int si7020_probe(struct i2c_client *client,
if (!i2c_check_functionality(client->adapter,
I2C_FUNC_SMBUS_WRITE_BYTE |
I2C_FUNC_SMBUS_READ_WORD_DATA))
- return -ENODEV;
+ return -EOPNOTSUPP;
/* Reset device, loads default settings. */
ret = i2c_smbus_write_byte(client, SI7020CMD_RESET);
@@ -149,6 +149,7 @@ static int si7020_probe(struct i2c_client *client,
static const struct i2c_device_id si7020_id[] = {
{ "si7020", 0 },
+ { "th06", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, si7020_id);
diff --git a/drivers/iio/imu/inv_mpu6050/Kconfig b/drivers/iio/imu/inv_mpu6050/Kconfig
index 8f8d1370ed8b..847455a2d6bb 100644
--- a/drivers/iio/imu/inv_mpu6050/Kconfig
+++ b/drivers/iio/imu/inv_mpu6050/Kconfig
@@ -3,15 +3,30 @@
#
config INV_MPU6050_IIO
- tristate "Invensense MPU6050 devices"
- depends on I2C && SYSFS
- depends on I2C_MUX
+ tristate
select IIO_BUFFER
select IIO_TRIGGERED_BUFFER
+
+config INV_MPU6050_I2C
+ tristate "Invensense MPU6050 devices (I2C)"
+ depends on I2C_MUX
+ select INV_MPU6050_IIO
+ select REGMAP_I2C
help
This driver supports the Invensense MPU6050 devices.
This driver can also support MPU6500 in MPU6050 compatibility mode
and also in MPU6500 mode with some limitations.
It is a gyroscope/accelerometer combo device.
This driver can be built as a module. The module will be called
- inv-mpu6050.
+ inv-mpu6050-i2c.
+
+config INV_MPU6050_SPI
+ tristate "Invensense MPU6050 devices (SPI)"
+ depends on SPI_MASTER
+ select INV_MPU6050_IIO
+ select REGMAP_SPI
+ help
+ This driver supports the Invensense MPU6050 devices.
+ It is a gyroscope/accelerometer combo device.
+ This driver can be built as a module. The module will be called
+ inv-mpu6050-spi.
diff --git a/drivers/iio/imu/inv_mpu6050/Makefile b/drivers/iio/imu/inv_mpu6050/Makefile
index f566f6a7b3a9..734af5e6cef9 100644
--- a/drivers/iio/imu/inv_mpu6050/Makefile
+++ b/drivers/iio/imu/inv_mpu6050/Makefile
@@ -3,4 +3,10 @@
#
obj-$(CONFIG_INV_MPU6050_IIO) += inv-mpu6050.o
-inv-mpu6050-objs := inv_mpu_core.o inv_mpu_ring.o inv_mpu_trigger.o inv_mpu_acpi.o
+inv-mpu6050-objs := inv_mpu_core.o inv_mpu_ring.o inv_mpu_trigger.o
+
+obj-$(CONFIG_INV_MPU6050_I2C) += inv-mpu6050-i2c.o
+inv-mpu6050-i2c-objs := inv_mpu_i2c.o inv_mpu_acpi.o
+
+obj-$(CONFIG_INV_MPU6050_SPI) += inv-mpu6050-spi.o
+inv-mpu6050-spi-objs := inv_mpu_spi.o
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_acpi.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_acpi.c
index 1c982a56acd5..2771106fd650 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_acpi.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_acpi.c
@@ -66,11 +66,11 @@ static int asus_acpi_get_sensor_info(struct acpi_device *adev,
union acpi_object *elem;
int j;
- elem = &(cpm->package.elements[i]);
+ elem = &cpm->package.elements[i];
for (j = 0; j < elem->package.count; ++j) {
union acpi_object *sub_elem;
- sub_elem = &(elem->package.elements[j]);
+ sub_elem = &elem->package.elements[j];
if (sub_elem->type == ACPI_TYPE_STRING)
strlcpy(info->type, sub_elem->string.pointer,
sizeof(info->type));
@@ -139,22 +139,23 @@ static int inv_mpu_process_acpi_config(struct i2c_client *client,
return 0;
}
-int inv_mpu_acpi_create_mux_client(struct inv_mpu6050_state *st)
+int inv_mpu_acpi_create_mux_client(struct i2c_client *client)
{
+ struct inv_mpu6050_state *st = iio_priv(dev_get_drvdata(&client->dev));
st->mux_client = NULL;
- if (ACPI_HANDLE(&st->client->dev)) {
+ if (ACPI_HANDLE(&client->dev)) {
struct i2c_board_info info;
struct acpi_device *adev;
int ret = -1;
- adev = ACPI_COMPANION(&st->client->dev);
+ adev = ACPI_COMPANION(&client->dev);
memset(&info, 0, sizeof(info));
dmi_check_system(inv_mpu_dev_list);
switch (matched_product_name) {
case INV_MPU_ASUS_T100TA:
- ret = asus_acpi_get_sensor_info(adev, st->client,
+ ret = asus_acpi_get_sensor_info(adev, client,
&info);
break;
/* Add more matched product processing here */
@@ -166,7 +167,7 @@ int inv_mpu_acpi_create_mux_client(struct inv_mpu6050_state *st)
/* No matching DMI, so create device on INV6XX type */
unsigned short primary, secondary;
- ret = inv_mpu_process_acpi_config(st->client, &primary,
+ ret = inv_mpu_process_acpi_config(client, &primary,
&secondary);
if (!ret && secondary) {
char *name;
@@ -185,14 +186,15 @@ int inv_mpu_acpi_create_mux_client(struct inv_mpu6050_state *st)
st->mux_client = i2c_new_device(st->mux_adapter, &info);
if (!st->mux_client)
return -ENODEV;
-
}
return 0;
}
-void inv_mpu_acpi_delete_mux_client(struct inv_mpu6050_state *st)
+void inv_mpu_acpi_delete_mux_client(struct i2c_client *client)
{
+ struct inv_mpu6050_state *st = iio_priv(dev_get_drvdata(&client->dev));
+
if (st->mux_client)
i2c_unregister_device(st->mux_client);
}
@@ -200,12 +202,12 @@ void inv_mpu_acpi_delete_mux_client(struct inv_mpu6050_state *st)
#include "inv_mpu_iio.h"
-int inv_mpu_acpi_create_mux_client(struct inv_mpu6050_state *st)
+int inv_mpu_acpi_create_mux_client(struct i2c_client *client)
{
return 0;
}
-void inv_mpu_acpi_delete_mux_client(struct inv_mpu6050_state *st)
+void inv_mpu_acpi_delete_mux_client(struct i2c_client *client)
{
}
#endif
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
index f0e06093b5e8..d192953e9a38 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
@@ -39,6 +39,26 @@ static const int gyro_scale_6050[] = {133090, 266181, 532362, 1064724};
*/
static const int accel_scale[] = {598, 1196, 2392, 4785};
+static const struct inv_mpu6050_reg_map reg_set_6500 = {
+ .sample_rate_div = INV_MPU6050_REG_SAMPLE_RATE_DIV,
+ .lpf = INV_MPU6050_REG_CONFIG,
+ .user_ctrl = INV_MPU6050_REG_USER_CTRL,
+ .fifo_en = INV_MPU6050_REG_FIFO_EN,
+ .gyro_config = INV_MPU6050_REG_GYRO_CONFIG,
+ .accl_config = INV_MPU6050_REG_ACCEL_CONFIG,
+ .fifo_count_h = INV_MPU6050_REG_FIFO_COUNT_H,
+ .fifo_r_w = INV_MPU6050_REG_FIFO_R_W,
+ .raw_gyro = INV_MPU6050_REG_RAW_GYRO,
+ .raw_accl = INV_MPU6050_REG_RAW_ACCEL,
+ .temperature = INV_MPU6050_REG_TEMPERATURE,
+ .int_enable = INV_MPU6050_REG_INT_ENABLE,
+ .pwr_mgmt_1 = INV_MPU6050_REG_PWR_MGMT_1,
+ .pwr_mgmt_2 = INV_MPU6050_REG_PWR_MGMT_2,
+ .int_pin_cfg = INV_MPU6050_REG_INT_PIN_CFG,
+ .accl_offset = INV_MPU6500_REG_ACCEL_OFFSET,
+ .gyro_offset = INV_MPU6050_REG_GYRO_OFFSET,
+};
+
static const struct inv_mpu6050_reg_map reg_set_6050 = {
.sample_rate_div = INV_MPU6050_REG_SAMPLE_RATE_DIV,
.lpf = INV_MPU6050_REG_CONFIG,
@@ -55,6 +75,8 @@ static const struct inv_mpu6050_reg_map reg_set_6050 = {
.pwr_mgmt_1 = INV_MPU6050_REG_PWR_MGMT_1,
.pwr_mgmt_2 = INV_MPU6050_REG_PWR_MGMT_2,
.int_pin_cfg = INV_MPU6050_REG_INT_PIN_CFG,
+ .accl_offset = INV_MPU6050_REG_ACCEL_OFFSET,
+ .gyro_offset = INV_MPU6050_REG_GYRO_OFFSET,
};
static const struct inv_mpu6050_chip_config chip_config_6050 = {
@@ -66,7 +88,13 @@ static const struct inv_mpu6050_chip_config chip_config_6050 = {
.accl_fs = INV_MPU6050_FS_02G,
};
-static const struct inv_mpu6050_hw hw_info[INV_NUM_PARTS] = {
+static const struct inv_mpu6050_hw hw_info[] = {
+ {
+ .num_reg = 117,
+ .name = "MPU6500",
+ .reg = &reg_set_6500,
+ .config = &chip_config_6050,
+ },
{
.num_reg = 117,
.name = "MPU6050",
@@ -75,134 +103,53 @@ static const struct inv_mpu6050_hw hw_info[INV_NUM_PARTS] = {
},
};
-int inv_mpu6050_write_reg(struct inv_mpu6050_state *st, int reg, u8 d)
-{
- return i2c_smbus_write_i2c_block_data(st->client, reg, 1, &d);
-}
-
-/*
- * The i2c read/write needs to happen in unlocked mode. As the parent
- * adapter is common. If we use locked versions, it will fail as
- * the mux adapter will lock the parent i2c adapter, while calling
- * select/deselect functions.
- */
-static int inv_mpu6050_write_reg_unlocked(struct inv_mpu6050_state *st,
- u8 reg, u8 d)
-{
- int ret;
- u8 buf[2];
- struct i2c_msg msg[1] = {
- {
- .addr = st->client->addr,
- .flags = 0,
- .len = sizeof(buf),
- .buf = buf,
- }
- };
-
- buf[0] = reg;
- buf[1] = d;
- ret = __i2c_transfer(st->client->adapter, msg, 1);
- if (ret != 1)
- return ret;
-
- return 0;
-}
-
-static int inv_mpu6050_select_bypass(struct i2c_adapter *adap, void *mux_priv,
- u32 chan_id)
-{
- struct iio_dev *indio_dev = mux_priv;
- struct inv_mpu6050_state *st = iio_priv(indio_dev);
- int ret = 0;
-
- /* Use the same mutex which was used everywhere to protect power-op */
- mutex_lock(&indio_dev->mlock);
- if (!st->powerup_count) {
- ret = inv_mpu6050_write_reg_unlocked(st, st->reg->pwr_mgmt_1,
- 0);
- if (ret)
- goto write_error;
-
- msleep(INV_MPU6050_REG_UP_TIME);
- }
- if (!ret) {
- st->powerup_count++;
- ret = inv_mpu6050_write_reg_unlocked(st, st->reg->int_pin_cfg,
- st->client->irq |
- INV_MPU6050_BIT_BYPASS_EN);
- }
-write_error:
- mutex_unlock(&indio_dev->mlock);
-
- return ret;
-}
-
-static int inv_mpu6050_deselect_bypass(struct i2c_adapter *adap,
- void *mux_priv, u32 chan_id)
-{
- struct iio_dev *indio_dev = mux_priv;
- struct inv_mpu6050_state *st = iio_priv(indio_dev);
-
- mutex_lock(&indio_dev->mlock);
- /* It doesn't really mattter, if any of the calls fails */
- inv_mpu6050_write_reg_unlocked(st, st->reg->int_pin_cfg,
- st->client->irq);
- st->powerup_count--;
- if (!st->powerup_count)
- inv_mpu6050_write_reg_unlocked(st, st->reg->pwr_mgmt_1,
- INV_MPU6050_BIT_SLEEP);
- mutex_unlock(&indio_dev->mlock);
-
- return 0;
-}
-
int inv_mpu6050_switch_engine(struct inv_mpu6050_state *st, bool en, u32 mask)
{
- u8 d, mgmt_1;
+ unsigned int d, mgmt_1;
int result;
-
- /* switch clock needs to be careful. Only when gyro is on, can
- clock source be switched to gyro. Otherwise, it must be set to
- internal clock */
- if (INV_MPU6050_BIT_PWR_GYRO_STBY == mask) {
- result = i2c_smbus_read_i2c_block_data(st->client,
- st->reg->pwr_mgmt_1, 1, &mgmt_1);
- if (result != 1)
+ /*
+ * switch clock needs to be careful. Only when gyro is on, can
+ * clock source be switched to gyro. Otherwise, it must be set to
+ * internal clock
+ */
+ if (mask == INV_MPU6050_BIT_PWR_GYRO_STBY) {
+ result = regmap_read(st->map, st->reg->pwr_mgmt_1, &mgmt_1);
+ if (result)
return result;
mgmt_1 &= ~INV_MPU6050_BIT_CLK_MASK;
}
- if ((INV_MPU6050_BIT_PWR_GYRO_STBY == mask) && (!en)) {
- /* turning off gyro requires switch to internal clock first.
- Then turn off gyro engine */
+ if ((mask == INV_MPU6050_BIT_PWR_GYRO_STBY) && (!en)) {
+ /*
+ * turning off gyro requires switch to internal clock first.
+ * Then turn off gyro engine
+ */
mgmt_1 |= INV_CLK_INTERNAL;
- result = inv_mpu6050_write_reg(st, st->reg->pwr_mgmt_1, mgmt_1);
+ result = regmap_write(st->map, st->reg->pwr_mgmt_1, mgmt_1);
if (result)
return result;
}
- result = i2c_smbus_read_i2c_block_data(st->client,
- st->reg->pwr_mgmt_2, 1, &d);
- if (result != 1)
+ result = regmap_read(st->map, st->reg->pwr_mgmt_2, &d);
+ if (result)
return result;
if (en)
d &= ~mask;
else
d |= mask;
- result = inv_mpu6050_write_reg(st, st->reg->pwr_mgmt_2, d);
+ result = regmap_write(st->map, st->reg->pwr_mgmt_2, d);
if (result)
return result;
if (en) {
/* Wait for output stabilize */
msleep(INV_MPU6050_TEMP_UP_TIME);
- if (INV_MPU6050_BIT_PWR_GYRO_STBY == mask) {
+ if (mask == INV_MPU6050_BIT_PWR_GYRO_STBY) {
/* switch internal clock to PLL */
mgmt_1 |= INV_CLK_PLL;
- result = inv_mpu6050_write_reg(st,
- st->reg->pwr_mgmt_1, mgmt_1);
+ result = regmap_write(st->map,
+ st->reg->pwr_mgmt_1, mgmt_1);
if (result)
return result;
}
@@ -218,25 +165,26 @@ int inv_mpu6050_set_power_itg(struct inv_mpu6050_state *st, bool power_on)
if (power_on) {
/* Already under indio-dev->mlock mutex */
if (!st->powerup_count)
- result = inv_mpu6050_write_reg(st, st->reg->pwr_mgmt_1,
- 0);
+ result = regmap_write(st->map, st->reg->pwr_mgmt_1, 0);
if (!result)
st->powerup_count++;
} else {
st->powerup_count--;
if (!st->powerup_count)
- result = inv_mpu6050_write_reg(st, st->reg->pwr_mgmt_1,
- INV_MPU6050_BIT_SLEEP);
+ result = regmap_write(st->map, st->reg->pwr_mgmt_1,
+ INV_MPU6050_BIT_SLEEP);
}
if (result)
return result;
if (power_on)
- msleep(INV_MPU6050_REG_UP_TIME);
+ usleep_range(INV_MPU6050_REG_UP_TIME_MIN,
+ INV_MPU6050_REG_UP_TIME_MAX);
return 0;
}
+EXPORT_SYMBOL_GPL(inv_mpu6050_set_power_itg);
/**
* inv_mpu6050_init_config() - Initialize hardware, disable FIFO.
@@ -257,59 +205,73 @@ static int inv_mpu6050_init_config(struct iio_dev *indio_dev)
if (result)
return result;
d = (INV_MPU6050_FSR_2000DPS << INV_MPU6050_GYRO_CONFIG_FSR_SHIFT);
- result = inv_mpu6050_write_reg(st, st->reg->gyro_config, d);
+ result = regmap_write(st->map, st->reg->gyro_config, d);
if (result)
return result;
d = INV_MPU6050_FILTER_20HZ;
- result = inv_mpu6050_write_reg(st, st->reg->lpf, d);
+ result = regmap_write(st->map, st->reg->lpf, d);
if (result)
return result;
d = INV_MPU6050_ONE_K_HZ / INV_MPU6050_INIT_FIFO_RATE - 1;
- result = inv_mpu6050_write_reg(st, st->reg->sample_rate_div, d);
+ result = regmap_write(st->map, st->reg->sample_rate_div, d);
if (result)
return result;
d = (INV_MPU6050_FS_02G << INV_MPU6050_ACCL_CONFIG_FSR_SHIFT);
- result = inv_mpu6050_write_reg(st, st->reg->accl_config, d);
+ result = regmap_write(st->map, st->reg->accl_config, d);
if (result)
return result;
memcpy(&st->chip_config, hw_info[st->chip_type].config,
- sizeof(struct inv_mpu6050_chip_config));
+ sizeof(struct inv_mpu6050_chip_config));
result = inv_mpu6050_set_power_itg(st, false);
return result;
}
+static int inv_mpu6050_sensor_set(struct inv_mpu6050_state *st, int reg,
+ int axis, int val)
+{
+ int ind, result;
+ __be16 d = cpu_to_be16(val);
+
+ ind = (axis - IIO_MOD_X) * 2;
+ result = regmap_bulk_write(st->map, reg + ind, (u8 *)&d, 2);
+ if (result)
+ return -EINVAL;
+
+ return 0;
+}
+
static int inv_mpu6050_sensor_show(struct inv_mpu6050_state *st, int reg,
- int axis, int *val)
+ int axis, int *val)
{
int ind, result;
__be16 d;
ind = (axis - IIO_MOD_X) * 2;
- result = i2c_smbus_read_i2c_block_data(st->client, reg + ind, 2,
- (u8 *)&d);
- if (result != 2)
+ result = regmap_bulk_read(st->map, reg + ind, (u8 *)&d, 2);
+ if (result)
return -EINVAL;
*val = (short)be16_to_cpup(&d);
return IIO_VAL_INT;
}
-static int inv_mpu6050_read_raw(struct iio_dev *indio_dev,
- struct iio_chan_spec const *chan,
- int *val,
- int *val2,
- long mask) {
+static int
+inv_mpu6050_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
struct inv_mpu6050_state *st = iio_priv(indio_dev);
+ int ret = 0;
switch (mask) {
case IIO_CHAN_INFO_RAW:
{
- int ret, result;
+ int result;
ret = IIO_VAL_INT;
result = 0;
@@ -323,16 +285,16 @@ static int inv_mpu6050_read_raw(struct iio_dev *indio_dev,
switch (chan->type) {
case IIO_ANGL_VEL:
if (!st->chip_config.gyro_fifo_enable ||
- !st->chip_config.enable) {
+ !st->chip_config.enable) {
result = inv_mpu6050_switch_engine(st, true,
INV_MPU6050_BIT_PWR_GYRO_STBY);
if (result)
goto error_read_raw;
}
- ret = inv_mpu6050_sensor_show(st, st->reg->raw_gyro,
- chan->channel2, val);
+ ret = inv_mpu6050_sensor_show(st, st->reg->raw_gyro,
+ chan->channel2, val);
if (!st->chip_config.gyro_fifo_enable ||
- !st->chip_config.enable) {
+ !st->chip_config.enable) {
result = inv_mpu6050_switch_engine(st, false,
INV_MPU6050_BIT_PWR_GYRO_STBY);
if (result)
@@ -341,16 +303,16 @@ static int inv_mpu6050_read_raw(struct iio_dev *indio_dev,
break;
case IIO_ACCEL:
if (!st->chip_config.accl_fifo_enable ||
- !st->chip_config.enable) {
+ !st->chip_config.enable) {
result = inv_mpu6050_switch_engine(st, true,
INV_MPU6050_BIT_PWR_ACCL_STBY);
if (result)
goto error_read_raw;
}
ret = inv_mpu6050_sensor_show(st, st->reg->raw_accl,
- chan->channel2, val);
+ chan->channel2, val);
if (!st->chip_config.accl_fifo_enable ||
- !st->chip_config.enable) {
+ !st->chip_config.enable) {
result = inv_mpu6050_switch_engine(st, false,
INV_MPU6050_BIT_PWR_ACCL_STBY);
if (result)
@@ -360,8 +322,8 @@ static int inv_mpu6050_read_raw(struct iio_dev *indio_dev,
case IIO_TEMP:
/* wait for stablization */
msleep(INV_MPU6050_SENSOR_UP_TIME);
- inv_mpu6050_sensor_show(st, st->reg->temperature,
- IIO_MOD_X, val);
+ ret = inv_mpu6050_sensor_show(st, st->reg->temperature,
+ IIO_MOD_X, val);
break;
default:
ret = -EINVAL;
@@ -405,6 +367,20 @@ error_read_raw:
default:
return -EINVAL;
}
+ case IIO_CHAN_INFO_CALIBBIAS:
+ switch (chan->type) {
+ case IIO_ANGL_VEL:
+ ret = inv_mpu6050_sensor_show(st, st->reg->gyro_offset,
+ chan->channel2, val);
+ return IIO_VAL_INT;
+ case IIO_ACCEL:
+ ret = inv_mpu6050_sensor_show(st, st->reg->accl_offset,
+ chan->channel2, val);
+ return IIO_VAL_INT;
+
+ default:
+ return -EINVAL;
+ }
default:
return -EINVAL;
}
@@ -418,8 +394,7 @@ static int inv_mpu6050_write_gyro_scale(struct inv_mpu6050_state *st, int val)
for (i = 0; i < ARRAY_SIZE(gyro_scale_6050); ++i) {
if (gyro_scale_6050[i] == val) {
d = (i << INV_MPU6050_GYRO_CONFIG_FSR_SHIFT);
- result = inv_mpu6050_write_reg(st,
- st->reg->gyro_config, d);
+ result = regmap_write(st->map, st->reg->gyro_config, d);
if (result)
return result;
@@ -448,6 +423,7 @@ static int inv_write_raw_get_fmt(struct iio_dev *indio_dev,
return -EINVAL;
}
+
static int inv_mpu6050_write_accel_scale(struct inv_mpu6050_state *st, int val)
{
int result, i;
@@ -456,8 +432,7 @@ static int inv_mpu6050_write_accel_scale(struct inv_mpu6050_state *st, int val)
for (i = 0; i < ARRAY_SIZE(accel_scale); ++i) {
if (accel_scale[i] == val) {
d = (i << INV_MPU6050_ACCL_CONFIG_FSR_SHIFT);
- result = inv_mpu6050_write_reg(st,
- st->reg->accl_config, d);
+ result = regmap_write(st->map, st->reg->accl_config, d);
if (result)
return result;
@@ -470,16 +445,17 @@ static int inv_mpu6050_write_accel_scale(struct inv_mpu6050_state *st, int val)
}
static int inv_mpu6050_write_raw(struct iio_dev *indio_dev,
- struct iio_chan_spec const *chan,
- int val,
- int val2,
- long mask) {
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
struct inv_mpu6050_state *st = iio_priv(indio_dev);
int result;
mutex_lock(&indio_dev->mlock);
- /* we should only update scale when the chip is disabled, i.e.,
- not running */
+ /*
+ * we should only update scale when the chip is disabled, i.e.
+ * not running
+ */
if (st->chip_config.enable) {
result = -EBUSY;
goto error_write_raw;
@@ -502,6 +478,21 @@ static int inv_mpu6050_write_raw(struct iio_dev *indio_dev,
break;
}
break;
+ case IIO_CHAN_INFO_CALIBBIAS:
+ switch (chan->type) {
+ case IIO_ANGL_VEL:
+ result = inv_mpu6050_sensor_set(st,
+ st->reg->gyro_offset,
+ chan->channel2, val);
+ break;
+ case IIO_ACCEL:
+ result = inv_mpu6050_sensor_set(st,
+ st->reg->accl_offset,
+ chan->channel2, val);
+ break;
+ default:
+ result = -EINVAL;
+ }
default:
result = -EINVAL;
break;
@@ -537,7 +528,7 @@ static int inv_mpu6050_set_lpf(struct inv_mpu6050_state *st, int rate)
while ((h < hz[i]) && (i < ARRAY_SIZE(d) - 1))
i++;
data = d[i];
- result = inv_mpu6050_write_reg(st, st->reg->lpf, data);
+ result = regmap_write(st->map, st->reg->lpf, data);
if (result)
return result;
st->chip_config.lpf = data;
@@ -548,8 +539,9 @@ static int inv_mpu6050_set_lpf(struct inv_mpu6050_state *st, int rate)
/**
* inv_mpu6050_fifo_rate_store() - Set fifo rate.
*/
-static ssize_t inv_mpu6050_fifo_rate_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t count)
+static ssize_t
+inv_mpu6050_fifo_rate_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
s32 fifo_rate;
u8 d;
@@ -560,7 +552,7 @@ static ssize_t inv_mpu6050_fifo_rate_store(struct device *dev,
if (kstrtoint(buf, 10, &fifo_rate))
return -EINVAL;
if (fifo_rate < INV_MPU6050_MIN_FIFO_RATE ||
- fifo_rate > INV_MPU6050_MAX_FIFO_RATE)
+ fifo_rate > INV_MPU6050_MAX_FIFO_RATE)
return -EINVAL;
if (fifo_rate == st->chip_config.fifo_rate)
return count;
@@ -575,7 +567,7 @@ static ssize_t inv_mpu6050_fifo_rate_store(struct device *dev,
goto fifo_rate_fail;
d = INV_MPU6050_ONE_K_HZ / fifo_rate - 1;
- result = inv_mpu6050_write_reg(st, st->reg->sample_rate_div, d);
+ result = regmap_write(st->map, st->reg->sample_rate_div, d);
if (result)
goto fifo_rate_fail;
st->chip_config.fifo_rate = fifo_rate;
@@ -596,8 +588,9 @@ fifo_rate_fail:
/**
* inv_fifo_rate_show() - Get the current sampling rate.
*/
-static ssize_t inv_fifo_rate_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t
+inv_fifo_rate_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct inv_mpu6050_state *st = iio_priv(dev_to_iio_dev(dev));
@@ -608,16 +601,18 @@ static ssize_t inv_fifo_rate_show(struct device *dev,
* inv_attr_show() - calling this function will show current
* parameters.
*/
-static ssize_t inv_attr_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t inv_attr_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct inv_mpu6050_state *st = iio_priv(dev_to_iio_dev(dev));
struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
s8 *m;
switch (this_attr->address) {
- /* In MPU6050, the two matrix are the same because gyro and accel
- are integrated in one chip */
+ /*
+ * In MPU6050, the two matrix are the same because gyro and accel
+ * are integrated in one chip
+ */
case ATTR_GYRO_MATRIX:
case ATTR_ACCL_MATRIX:
m = st->plat_data.orientation;
@@ -654,14 +649,15 @@ static int inv_mpu6050_validate_trigger(struct iio_dev *indio_dev,
.type = _type, \
.modified = 1, \
.channel2 = _channel2, \
- .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_CALIBBIAS), \
.scan_index = _index, \
.scan_type = { \
.sign = 's', \
.realbits = 16, \
.storagebits = 16, \
- .shift = 0 , \
+ .shift = 0, \
.endianness = IIO_BE, \
}, \
}
@@ -674,7 +670,7 @@ static const struct iio_chan_spec inv_mpu_channels[] = {
*/
{
.type = IIO_TEMP,
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW)
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW)
| BIT(IIO_CHAN_INFO_OFFSET)
| BIT(IIO_CHAN_INFO_SCALE),
.scan_index = -1,
@@ -727,25 +723,25 @@ static const struct iio_info mpu_info = {
/**
* inv_check_and_setup_chip() - check and setup chip.
*/
-static int inv_check_and_setup_chip(struct inv_mpu6050_state *st,
- const struct i2c_device_id *id)
+static int inv_check_and_setup_chip(struct inv_mpu6050_state *st)
{
int result;
- st->chip_type = INV_MPU6050;
st->hw = &hw_info[st->chip_type];
st->reg = hw_info[st->chip_type].reg;
/* reset to make sure previous state are not there */
- result = inv_mpu6050_write_reg(st, st->reg->pwr_mgmt_1,
- INV_MPU6050_BIT_H_RESET);
+ result = regmap_write(st->map, st->reg->pwr_mgmt_1,
+ INV_MPU6050_BIT_H_RESET);
if (result)
return result;
msleep(INV_MPU6050_POWER_UP_TIME);
- /* toggle power state. After reset, the sleep bit could be on
- or off depending on the OTP settings. Toggling power would
- make it in a definite state as well as making the hardware
- state align with the software state */
+ /*
+ * toggle power state. After reset, the sleep bit could be on
+ * or off depending on the OTP settings. Toggling power would
+ * make it in a definite state as well as making the hardware
+ * state align with the software state
+ */
result = inv_mpu6050_set_power_itg(st, false);
if (result)
return result;
@@ -754,65 +750,59 @@ static int inv_check_and_setup_chip(struct inv_mpu6050_state *st,
return result;
result = inv_mpu6050_switch_engine(st, false,
- INV_MPU6050_BIT_PWR_ACCL_STBY);
+ INV_MPU6050_BIT_PWR_ACCL_STBY);
if (result)
return result;
result = inv_mpu6050_switch_engine(st, false,
- INV_MPU6050_BIT_PWR_GYRO_STBY);
+ INV_MPU6050_BIT_PWR_GYRO_STBY);
if (result)
return result;
return 0;
}
-/**
- * inv_mpu_probe() - probe function.
- * @client: i2c client.
- * @id: i2c device id.
- *
- * Returns 0 on success, a negative error code otherwise.
- */
-static int inv_mpu_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+int inv_mpu_core_probe(struct regmap *regmap, int irq, const char *name,
+ int (*inv_mpu_bus_setup)(struct iio_dev *), int chip_type)
{
struct inv_mpu6050_state *st;
struct iio_dev *indio_dev;
struct inv_mpu6050_platform_data *pdata;
+ struct device *dev = regmap_get_device(regmap);
int result;
- if (!i2c_check_functionality(client->adapter,
- I2C_FUNC_SMBUS_I2C_BLOCK))
- return -ENOSYS;
-
- indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*st));
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*st));
if (!indio_dev)
return -ENOMEM;
st = iio_priv(indio_dev);
- st->client = client;
+ st->chip_type = chip_type;
st->powerup_count = 0;
- pdata = dev_get_platdata(&client->dev);
+ st->irq = irq;
+ st->map = regmap;
+ pdata = dev_get_platdata(dev);
if (pdata)
st->plat_data = *pdata;
/* power is turned on inside check chip type*/
- result = inv_check_and_setup_chip(st, id);
+ result = inv_check_and_setup_chip(st);
if (result)
return result;
+ if (inv_mpu_bus_setup)
+ inv_mpu_bus_setup(indio_dev);
+
result = inv_mpu6050_init_config(indio_dev);
if (result) {
- dev_err(&client->dev,
- "Could not initialize device.\n");
+ dev_err(dev, "Could not initialize device.\n");
return result;
}
- i2c_set_clientdata(client, indio_dev);
- indio_dev->dev.parent = &client->dev;
- /* id will be NULL when enumerated via ACPI */
- if (id)
- indio_dev->name = (char *)id->name;
+ dev_set_drvdata(dev, indio_dev);
+ indio_dev->dev.parent = dev;
+ /* name will be NULL when enumerated via ACPI */
+ if (name)
+ indio_dev->name = name;
else
- indio_dev->name = (char *)dev_name(&client->dev);
+ indio_dev->name = dev_name(dev);
indio_dev->channels = inv_mpu_channels;
indio_dev->num_channels = ARRAY_SIZE(inv_mpu_channels);
@@ -824,13 +814,12 @@ static int inv_mpu_probe(struct i2c_client *client,
inv_mpu6050_read_fifo,
NULL);
if (result) {
- dev_err(&st->client->dev, "configure buffer fail %d\n",
- result);
+ dev_err(dev, "configure buffer fail %d\n", result);
return result;
}
result = inv_mpu6050_probe_trigger(indio_dev);
if (result) {
- dev_err(&st->client->dev, "trigger probe fail %d\n", result);
+ dev_err(dev, "trigger probe fail %d\n", result);
goto out_unreg_ring;
}
@@ -838,102 +827,47 @@ static int inv_mpu_probe(struct i2c_client *client,
spin_lock_init(&st->time_stamp_lock);
result = iio_device_register(indio_dev);
if (result) {
- dev_err(&st->client->dev, "IIO register fail %d\n", result);
+ dev_err(dev, "IIO register fail %d\n", result);
goto out_remove_trigger;
}
- st->mux_adapter = i2c_add_mux_adapter(client->adapter,
- &client->dev,
- indio_dev,
- 0, 0, 0,
- inv_mpu6050_select_bypass,
- inv_mpu6050_deselect_bypass);
- if (!st->mux_adapter) {
- result = -ENODEV;
- goto out_unreg_device;
- }
-
- result = inv_mpu_acpi_create_mux_client(st);
- if (result)
- goto out_del_mux;
-
return 0;
-out_del_mux:
- i2c_del_mux_adapter(st->mux_adapter);
-out_unreg_device:
- iio_device_unregister(indio_dev);
out_remove_trigger:
inv_mpu6050_remove_trigger(st);
out_unreg_ring:
iio_triggered_buffer_cleanup(indio_dev);
return result;
}
+EXPORT_SYMBOL_GPL(inv_mpu_core_probe);
-static int inv_mpu_remove(struct i2c_client *client)
+int inv_mpu_core_remove(struct device *dev)
{
- struct iio_dev *indio_dev = i2c_get_clientdata(client);
- struct inv_mpu6050_state *st = iio_priv(indio_dev);
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
- inv_mpu_acpi_delete_mux_client(st);
- i2c_del_mux_adapter(st->mux_adapter);
iio_device_unregister(indio_dev);
- inv_mpu6050_remove_trigger(st);
+ inv_mpu6050_remove_trigger(iio_priv(indio_dev));
iio_triggered_buffer_cleanup(indio_dev);
return 0;
}
+EXPORT_SYMBOL_GPL(inv_mpu_core_remove);
+
#ifdef CONFIG_PM_SLEEP
static int inv_mpu_resume(struct device *dev)
{
- return inv_mpu6050_set_power_itg(
- iio_priv(i2c_get_clientdata(to_i2c_client(dev))), true);
+ return inv_mpu6050_set_power_itg(iio_priv(dev_get_drvdata(dev)), true);
}
static int inv_mpu_suspend(struct device *dev)
{
- return inv_mpu6050_set_power_itg(
- iio_priv(i2c_get_clientdata(to_i2c_client(dev))), false);
+ return inv_mpu6050_set_power_itg(iio_priv(dev_get_drvdata(dev)), false);
}
-static SIMPLE_DEV_PM_OPS(inv_mpu_pmops, inv_mpu_suspend, inv_mpu_resume);
-
-#define INV_MPU6050_PMOPS (&inv_mpu_pmops)
-#else
-#define INV_MPU6050_PMOPS NULL
#endif /* CONFIG_PM_SLEEP */
-/*
- * device id table is used to identify what device can be
- * supported by this driver
- */
-static const struct i2c_device_id inv_mpu_id[] = {
- {"mpu6050", INV_MPU6050},
- {"mpu6500", INV_MPU6500},
- {}
-};
-
-MODULE_DEVICE_TABLE(i2c, inv_mpu_id);
-
-static const struct acpi_device_id inv_acpi_match[] = {
- {"INVN6500", 0},
- { },
-};
-
-MODULE_DEVICE_TABLE(acpi, inv_acpi_match);
-
-static struct i2c_driver inv_mpu_driver = {
- .probe = inv_mpu_probe,
- .remove = inv_mpu_remove,
- .id_table = inv_mpu_id,
- .driver = {
- .name = "inv-mpu6050",
- .pm = INV_MPU6050_PMOPS,
- .acpi_match_table = ACPI_PTR(inv_acpi_match),
- },
-};
-
-module_i2c_driver(inv_mpu_driver);
+SIMPLE_DEV_PM_OPS(inv_mpu_pmops, inv_mpu_suspend, inv_mpu_resume);
+EXPORT_SYMBOL_GPL(inv_mpu_pmops);
MODULE_AUTHOR("Invensense Corporation");
MODULE_DESCRIPTION("Invensense device MPU6050 driver");
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c
new file mode 100644
index 000000000000..5ee4e0dc093e
--- /dev/null
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c
@@ -0,0 +1,232 @@
+/*
+* Copyright (C) 2012 Invensense, Inc.
+*
+* This software is licensed under the terms of the GNU General Public
+* License version 2, as published by the Free Software Foundation, and
+* may be copied, distributed, and modified under those terms.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*/
+
+#include <linux/acpi.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/i2c-mux.h>
+#include <linux/iio/iio.h>
+#include <linux/module.h>
+#include "inv_mpu_iio.h"
+
+static const struct regmap_config inv_mpu_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+};
+
+/*
+ * The i2c read/write needs to happen in unlocked mode. As the parent
+ * adapter is common. If we use locked versions, it will fail as
+ * the mux adapter will lock the parent i2c adapter, while calling
+ * select/deselect functions.
+ */
+static int inv_mpu6050_write_reg_unlocked(struct i2c_client *client,
+ u8 reg, u8 d)
+{
+ int ret;
+ u8 buf[2] = {reg, d};
+ struct i2c_msg msg[1] = {
+ {
+ .addr = client->addr,
+ .flags = 0,
+ .len = sizeof(buf),
+ .buf = buf,
+ }
+ };
+
+ ret = __i2c_transfer(client->adapter, msg, 1);
+ if (ret != 1)
+ return ret;
+
+ return 0;
+}
+
+static int inv_mpu6050_select_bypass(struct i2c_adapter *adap, void *mux_priv,
+ u32 chan_id)
+{
+ struct i2c_client *client = mux_priv;
+ struct iio_dev *indio_dev = dev_get_drvdata(&client->dev);
+ struct inv_mpu6050_state *st = iio_priv(indio_dev);
+ int ret = 0;
+
+ /* Use the same mutex which was used everywhere to protect power-op */
+ mutex_lock(&indio_dev->mlock);
+ if (!st->powerup_count) {
+ ret = inv_mpu6050_write_reg_unlocked(client,
+ st->reg->pwr_mgmt_1, 0);
+ if (ret)
+ goto write_error;
+
+ usleep_range(INV_MPU6050_REG_UP_TIME_MIN,
+ INV_MPU6050_REG_UP_TIME_MAX);
+ }
+ if (!ret) {
+ st->powerup_count++;
+ ret = inv_mpu6050_write_reg_unlocked(client,
+ st->reg->int_pin_cfg,
+ INV_MPU6050_INT_PIN_CFG |
+ INV_MPU6050_BIT_BYPASS_EN);
+ }
+write_error:
+ mutex_unlock(&indio_dev->mlock);
+
+ return ret;
+}
+
+static int inv_mpu6050_deselect_bypass(struct i2c_adapter *adap,
+ void *mux_priv, u32 chan_id)
+{
+ struct i2c_client *client = mux_priv;
+ struct iio_dev *indio_dev = dev_get_drvdata(&client->dev);
+ struct inv_mpu6050_state *st = iio_priv(indio_dev);
+
+ mutex_lock(&indio_dev->mlock);
+ /* It doesn't really mattter, if any of the calls fails */
+ inv_mpu6050_write_reg_unlocked(client, st->reg->int_pin_cfg,
+ INV_MPU6050_INT_PIN_CFG);
+ st->powerup_count--;
+ if (!st->powerup_count)
+ inv_mpu6050_write_reg_unlocked(client, st->reg->pwr_mgmt_1,
+ INV_MPU6050_BIT_SLEEP);
+ mutex_unlock(&indio_dev->mlock);
+
+ return 0;
+}
+
+static const char *inv_mpu_match_acpi_device(struct device *dev, int *chip_id)
+{
+ const struct acpi_device_id *id;
+
+ id = acpi_match_device(dev->driver->acpi_match_table, dev);
+ if (!id)
+ return NULL;
+
+ *chip_id = (int)id->driver_data;
+
+ return dev_name(dev);
+}
+
+/**
+ * inv_mpu_probe() - probe function.
+ * @client: i2c client.
+ * @id: i2c device id.
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ */
+static int inv_mpu_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct inv_mpu6050_state *st;
+ int result, chip_type;
+ struct regmap *regmap;
+ const char *name;
+
+ if (!i2c_check_functionality(client->adapter,
+ I2C_FUNC_SMBUS_I2C_BLOCK))
+ return -EOPNOTSUPP;
+
+ if (id) {
+ chip_type = (int)id->driver_data;
+ name = id->name;
+ } else if (ACPI_HANDLE(&client->dev)) {
+ name = inv_mpu_match_acpi_device(&client->dev, &chip_type);
+ if (!name)
+ return -ENODEV;
+ } else {
+ return -ENOSYS;
+ }
+
+ regmap = devm_regmap_init_i2c(client, &inv_mpu_regmap_config);
+ if (IS_ERR(regmap)) {
+ dev_err(&client->dev, "Failed to register i2c regmap %d\n",
+ (int)PTR_ERR(regmap));
+ return PTR_ERR(regmap);
+ }
+
+ result = inv_mpu_core_probe(regmap, client->irq, name,
+ NULL, chip_type);
+ if (result < 0)
+ return result;
+
+ st = iio_priv(dev_get_drvdata(&client->dev));
+ st->mux_adapter = i2c_add_mux_adapter(client->adapter,
+ &client->dev,
+ client,
+ 0, 0, 0,
+ inv_mpu6050_select_bypass,
+ inv_mpu6050_deselect_bypass);
+ if (!st->mux_adapter) {
+ result = -ENODEV;
+ goto out_unreg_device;
+ }
+
+ result = inv_mpu_acpi_create_mux_client(client);
+ if (result)
+ goto out_del_mux;
+
+ return 0;
+
+out_del_mux:
+ i2c_del_mux_adapter(st->mux_adapter);
+out_unreg_device:
+ inv_mpu_core_remove(&client->dev);
+ return result;
+}
+
+static int inv_mpu_remove(struct i2c_client *client)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
+ struct inv_mpu6050_state *st = iio_priv(indio_dev);
+
+ inv_mpu_acpi_delete_mux_client(client);
+ i2c_del_mux_adapter(st->mux_adapter);
+
+ return inv_mpu_core_remove(&client->dev);
+}
+
+/*
+ * device id table is used to identify what device can be
+ * supported by this driver
+ */
+static const struct i2c_device_id inv_mpu_id[] = {
+ {"mpu6050", INV_MPU6050},
+ {"mpu6500", INV_MPU6500},
+ {}
+};
+
+MODULE_DEVICE_TABLE(i2c, inv_mpu_id);
+
+static const struct acpi_device_id inv_acpi_match[] = {
+ {"INVN6500", 0},
+ { },
+};
+
+MODULE_DEVICE_TABLE(acpi, inv_acpi_match);
+
+static struct i2c_driver inv_mpu_driver = {
+ .probe = inv_mpu_probe,
+ .remove = inv_mpu_remove,
+ .id_table = inv_mpu_id,
+ .driver = {
+ .acpi_match_table = ACPI_PTR(inv_acpi_match),
+ .name = "inv-mpu6050-i2c",
+ .pm = &inv_mpu_pmops,
+ },
+};
+
+module_i2c_driver(inv_mpu_driver);
+
+MODULE_AUTHOR("Invensense Corporation");
+MODULE_DESCRIPTION("Invensense device MPU6050 driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h b/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h
index db0a4a2758ab..e302a49703bf 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h
@@ -15,6 +15,7 @@
#include <linux/spinlock.h>
#include <linux/iio/iio.h>
#include <linux/iio/buffer.h>
+#include <linux/regmap.h>
#include <linux/iio/sysfs.h>
#include <linux/iio/kfifo_buf.h>
#include <linux/iio/trigger.h>
@@ -38,6 +39,9 @@
* @int_enable: Interrupt enable register.
* @pwr_mgmt_1: Controls chip's power state and clock source.
* @pwr_mgmt_2: Controls power state of individual sensors.
+ * @int_pin_cfg; Controls interrupt pin configuration.
+ * @accl_offset: Controls the accelerometer calibration offset.
+ * @gyro_offset: Controls the gyroscope calibration offset.
*/
struct inv_mpu6050_reg_map {
u8 sample_rate_div;
@@ -55,12 +59,15 @@ struct inv_mpu6050_reg_map {
u8 pwr_mgmt_1;
u8 pwr_mgmt_2;
u8 int_pin_cfg;
+ u8 accl_offset;
+ u8 gyro_offset;
};
/*device enum */
enum inv_devices {
INV_MPU6050,
INV_MPU6500,
+ INV_MPU6000,
INV_NUM_PARTS
};
@@ -107,9 +114,10 @@ struct inv_mpu6050_hw {
* @hw: Other hardware-specific information.
* @chip_type: chip type.
* @time_stamp_lock: spin lock to time stamp.
- * @client: i2c client handle.
* @plat_data: platform data.
* @timestamps: kfifo queue to store time stamp.
+ * @map regmap pointer.
+ * @irq interrupt number.
*/
struct inv_mpu6050_state {
#define TIMESTAMP_FIFO_SIZE 16
@@ -119,15 +127,19 @@ struct inv_mpu6050_state {
const struct inv_mpu6050_hw *hw;
enum inv_devices chip_type;
spinlock_t time_stamp_lock;
- struct i2c_client *client;
struct i2c_adapter *mux_adapter;
struct i2c_client *mux_client;
unsigned int powerup_count;
struct inv_mpu6050_platform_data plat_data;
DECLARE_KFIFO(timestamps, long long, TIMESTAMP_FIFO_SIZE);
+ struct regmap *map;
+ int irq;
};
/*register and associated bit definition*/
+#define INV_MPU6050_REG_ACCEL_OFFSET 0x06
+#define INV_MPU6050_REG_GYRO_OFFSET 0x13
+
#define INV_MPU6050_REG_SAMPLE_RATE_DIV 0x19
#define INV_MPU6050_REG_CONFIG 0x1A
#define INV_MPU6050_REG_GYRO_CONFIG 0x1B
@@ -151,6 +163,7 @@ struct inv_mpu6050_state {
#define INV_MPU6050_BIT_I2C_MST_EN 0x20
#define INV_MPU6050_BIT_FIFO_EN 0x40
#define INV_MPU6050_BIT_DMP_EN 0x80
+#define INV_MPU6050_BIT_I2C_IF_DIS 0x10
#define INV_MPU6050_REG_PWR_MGMT_1 0x6B
#define INV_MPU6050_BIT_H_RESET 0x80
@@ -167,10 +180,18 @@ struct inv_mpu6050_state {
#define INV_MPU6050_BYTES_PER_3AXIS_SENSOR 6
#define INV_MPU6050_FIFO_COUNT_BYTE 2
#define INV_MPU6050_FIFO_THRESHOLD 500
+
+/* mpu6500 registers */
+#define INV_MPU6500_REG_ACCEL_OFFSET 0x77
+
+/* delay time in milliseconds */
#define INV_MPU6050_POWER_UP_TIME 100
#define INV_MPU6050_TEMP_UP_TIME 100
#define INV_MPU6050_SENSOR_UP_TIME 30
-#define INV_MPU6050_REG_UP_TIME 5
+
+/* delay time in microseconds */
+#define INV_MPU6050_REG_UP_TIME_MIN 5000
+#define INV_MPU6050_REG_UP_TIME_MAX 10000
#define INV_MPU6050_TEMP_OFFSET 12421
#define INV_MPU6050_TEMP_SCALE 2941
@@ -185,6 +206,7 @@ struct inv_mpu6050_state {
#define INV_MPU6050_REG_INT_PIN_CFG 0x37
#define INV_MPU6050_BIT_BYPASS_EN 0x2
+#define INV_MPU6050_INT_PIN_CFG 0
/* init parameters */
#define INV_MPU6050_INIT_FIFO_RATE 50
@@ -252,5 +274,10 @@ int inv_reset_fifo(struct iio_dev *indio_dev);
int inv_mpu6050_switch_engine(struct inv_mpu6050_state *st, bool en, u32 mask);
int inv_mpu6050_write_reg(struct inv_mpu6050_state *st, int reg, u8 val);
int inv_mpu6050_set_power_itg(struct inv_mpu6050_state *st, bool power_on);
-int inv_mpu_acpi_create_mux_client(struct inv_mpu6050_state *st);
-void inv_mpu_acpi_delete_mux_client(struct inv_mpu6050_state *st);
+int inv_mpu_acpi_create_mux_client(struct i2c_client *client);
+void inv_mpu_acpi_delete_mux_client(struct i2c_client *client);
+int inv_mpu_core_probe(struct regmap *regmap, int irq, const char *name,
+ int (*inv_mpu_bus_setup)(struct iio_dev *), int chip_type);
+int inv_mpu_core_remove(struct device *dev);
+int inv_mpu6050_set_power_itg(struct inv_mpu6050_state *st, bool power_on);
+extern const struct dev_pm_ops inv_mpu_pmops;
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c
index ba27e277511f..d0700628ee6d 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c
@@ -13,7 +13,6 @@
#include <linux/module.h>
#include <linux/slab.h>
-#include <linux/i2c.h>
#include <linux/err.h>
#include <linux/delay.h>
#include <linux/sysfs.h>
@@ -41,23 +40,24 @@ int inv_reset_fifo(struct iio_dev *indio_dev)
struct inv_mpu6050_state *st = iio_priv(indio_dev);
/* disable interrupt */
- result = inv_mpu6050_write_reg(st, st->reg->int_enable, 0);
+ result = regmap_write(st->map, st->reg->int_enable, 0);
if (result) {
- dev_err(&st->client->dev, "int_enable failed %d\n", result);
+ dev_err(regmap_get_device(st->map), "int_enable failed %d\n",
+ result);
return result;
}
/* disable the sensor output to FIFO */
- result = inv_mpu6050_write_reg(st, st->reg->fifo_en, 0);
+ result = regmap_write(st->map, st->reg->fifo_en, 0);
if (result)
goto reset_fifo_fail;
/* disable fifo reading */
- result = inv_mpu6050_write_reg(st, st->reg->user_ctrl, 0);
+ result = regmap_write(st->map, st->reg->user_ctrl, 0);
if (result)
goto reset_fifo_fail;
/* reset FIFO*/
- result = inv_mpu6050_write_reg(st, st->reg->user_ctrl,
- INV_MPU6050_BIT_FIFO_RST);
+ result = regmap_write(st->map, st->reg->user_ctrl,
+ INV_MPU6050_BIT_FIFO_RST);
if (result)
goto reset_fifo_fail;
@@ -67,14 +67,14 @@ int inv_reset_fifo(struct iio_dev *indio_dev)
/* enable interrupt */
if (st->chip_config.accl_fifo_enable ||
st->chip_config.gyro_fifo_enable) {
- result = inv_mpu6050_write_reg(st, st->reg->int_enable,
- INV_MPU6050_BIT_DATA_RDY_EN);
+ result = regmap_write(st->map, st->reg->int_enable,
+ INV_MPU6050_BIT_DATA_RDY_EN);
if (result)
return result;
}
/* enable FIFO reading and I2C master interface*/
- result = inv_mpu6050_write_reg(st, st->reg->user_ctrl,
- INV_MPU6050_BIT_FIFO_EN);
+ result = regmap_write(st->map, st->reg->user_ctrl,
+ INV_MPU6050_BIT_FIFO_EN);
if (result)
goto reset_fifo_fail;
/* enable sensor output to FIFO */
@@ -83,16 +83,16 @@ int inv_reset_fifo(struct iio_dev *indio_dev)
d |= INV_MPU6050_BITS_GYRO_OUT;
if (st->chip_config.accl_fifo_enable)
d |= INV_MPU6050_BIT_ACCEL_OUT;
- result = inv_mpu6050_write_reg(st, st->reg->fifo_en, d);
+ result = regmap_write(st->map, st->reg->fifo_en, d);
if (result)
goto reset_fifo_fail;
return 0;
reset_fifo_fail:
- dev_err(&st->client->dev, "reset fifo failed %d\n", result);
- result = inv_mpu6050_write_reg(st, st->reg->int_enable,
- INV_MPU6050_BIT_DATA_RDY_EN);
+ dev_err(regmap_get_device(st->map), "reset fifo failed %d\n", result);
+ result = regmap_write(st->map, st->reg->int_enable,
+ INV_MPU6050_BIT_DATA_RDY_EN);
return result;
}
@@ -109,7 +109,7 @@ irqreturn_t inv_mpu6050_irq_handler(int irq, void *p)
timestamp = iio_get_time_ns();
kfifo_in_spinlocked(&st->timestamps, &timestamp, 1,
- &st->time_stamp_lock);
+ &st->time_stamp_lock);
return IRQ_WAKE_THREAD;
}
@@ -143,10 +143,9 @@ irqreturn_t inv_mpu6050_read_fifo(int irq, void *p)
* read fifo_count register to know how many bytes inside FIFO
* right now
*/
- result = i2c_smbus_read_i2c_block_data(st->client,
- st->reg->fifo_count_h,
- INV_MPU6050_FIFO_COUNT_BYTE, data);
- if (result != INV_MPU6050_FIFO_COUNT_BYTE)
+ result = regmap_bulk_read(st->map, st->reg->fifo_count_h, data,
+ INV_MPU6050_FIFO_COUNT_BYTE);
+ if (result)
goto end_session;
fifo_count = be16_to_cpup((__be16 *)(&data[0]));
if (fifo_count < bytes_per_datum)
@@ -158,22 +157,21 @@ irqreturn_t inv_mpu6050_read_fifo(int irq, void *p)
goto flush_fifo;
/* Timestamp mismatch. */
if (kfifo_len(&st->timestamps) >
- fifo_count / bytes_per_datum + INV_MPU6050_TIME_STAMP_TOR)
- goto flush_fifo;
+ fifo_count / bytes_per_datum + INV_MPU6050_TIME_STAMP_TOR)
+ goto flush_fifo;
while (fifo_count >= bytes_per_datum) {
- result = i2c_smbus_read_i2c_block_data(st->client,
- st->reg->fifo_r_w,
- bytes_per_datum, data);
- if (result != bytes_per_datum)
+ result = regmap_bulk_read(st->map, st->reg->fifo_r_w,
+ data, bytes_per_datum);
+ if (result)
goto flush_fifo;
result = kfifo_out(&st->timestamps, &timestamp, 1);
/* when there is no timestamp, put timestamp as 0 */
- if (0 == result)
+ if (result == 0)
timestamp = 0;
result = iio_push_to_buffers_with_timestamp(indio_dev, data,
- timestamp);
+ timestamp);
if (result)
goto flush_fifo;
fifo_count -= bytes_per_datum;
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c
new file mode 100644
index 000000000000..7bcb8d839f05
--- /dev/null
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c
@@ -0,0 +1,99 @@
+/*
+* Copyright (C) 2015 Intel Corporation Inc.
+*
+* This software is licensed under the terms of the GNU General Public
+* License version 2, as published by the Free Software Foundation, and
+* may be copied, distributed, and modified under those terms.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*/
+#include <linux/module.h>
+#include <linux/acpi.h>
+#include <linux/spi/spi.h>
+#include <linux/regmap.h>
+#include <linux/iio/iio.h>
+#include "inv_mpu_iio.h"
+
+static const struct regmap_config inv_mpu_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+};
+
+static int inv_mpu_i2c_disable(struct iio_dev *indio_dev)
+{
+ struct inv_mpu6050_state *st = iio_priv(indio_dev);
+ int ret = 0;
+
+ ret = inv_mpu6050_set_power_itg(st, true);
+ if (ret)
+ return ret;
+
+ ret = regmap_write(st->map, INV_MPU6050_REG_USER_CTRL,
+ INV_MPU6050_BIT_I2C_IF_DIS);
+ if (ret) {
+ inv_mpu6050_set_power_itg(st, false);
+ return ret;
+ }
+
+ return inv_mpu6050_set_power_itg(st, false);
+}
+
+static int inv_mpu_probe(struct spi_device *spi)
+{
+ struct regmap *regmap;
+ const struct spi_device_id *id = spi_get_device_id(spi);
+ const char *name = id ? id->name : NULL;
+ const int chip_type = id ? id->driver_data : 0;
+
+ regmap = devm_regmap_init_spi(spi, &inv_mpu_regmap_config);
+ if (IS_ERR(regmap)) {
+ dev_err(&spi->dev, "Failed to register spi regmap %d\n",
+ (int)PTR_ERR(regmap));
+ return PTR_ERR(regmap);
+ }
+
+ return inv_mpu_core_probe(regmap, spi->irq, name,
+ inv_mpu_i2c_disable, chip_type);
+}
+
+static int inv_mpu_remove(struct spi_device *spi)
+{
+ return inv_mpu_core_remove(&spi->dev);
+}
+
+/*
+ * device id table is used to identify what device can be
+ * supported by this driver
+ */
+static const struct spi_device_id inv_mpu_id[] = {
+ {"mpu6000", INV_MPU6000},
+ {}
+};
+
+MODULE_DEVICE_TABLE(spi, inv_mpu_id);
+
+static const struct acpi_device_id inv_acpi_match[] = {
+ {"INVN6000", 0},
+ { },
+};
+MODULE_DEVICE_TABLE(acpi, inv_acpi_match);
+
+static struct spi_driver inv_mpu_driver = {
+ .probe = inv_mpu_probe,
+ .remove = inv_mpu_remove,
+ .id_table = inv_mpu_id,
+ .driver = {
+ .acpi_match_table = ACPI_PTR(inv_acpi_match),
+ .name = "inv-mpu6000-spi",
+ .pm = &inv_mpu_pmops,
+ },
+};
+
+module_spi_driver(inv_mpu_driver);
+
+MODULE_AUTHOR("Adriana Reus <adriana.reus@intel.com>");
+MODULE_DESCRIPTION("Invensense device MPU6000 driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c
index 844610c3a3a9..e8818d4dd4b8 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c
@@ -19,19 +19,19 @@ static void inv_scan_query(struct iio_dev *indio_dev)
st->chip_config.gyro_fifo_enable =
test_bit(INV_MPU6050_SCAN_GYRO_X,
- indio_dev->active_scan_mask) ||
- test_bit(INV_MPU6050_SCAN_GYRO_Y,
- indio_dev->active_scan_mask) ||
- test_bit(INV_MPU6050_SCAN_GYRO_Z,
- indio_dev->active_scan_mask);
+ indio_dev->active_scan_mask) ||
+ test_bit(INV_MPU6050_SCAN_GYRO_Y,
+ indio_dev->active_scan_mask) ||
+ test_bit(INV_MPU6050_SCAN_GYRO_Z,
+ indio_dev->active_scan_mask);
st->chip_config.accl_fifo_enable =
test_bit(INV_MPU6050_SCAN_ACCL_X,
- indio_dev->active_scan_mask) ||
- test_bit(INV_MPU6050_SCAN_ACCL_Y,
- indio_dev->active_scan_mask) ||
- test_bit(INV_MPU6050_SCAN_ACCL_Z,
- indio_dev->active_scan_mask);
+ indio_dev->active_scan_mask) ||
+ test_bit(INV_MPU6050_SCAN_ACCL_Y,
+ indio_dev->active_scan_mask) ||
+ test_bit(INV_MPU6050_SCAN_ACCL_Z,
+ indio_dev->active_scan_mask);
}
/**
@@ -65,15 +65,15 @@ static int inv_mpu6050_set_enable(struct iio_dev *indio_dev, bool enable)
if (result)
return result;
} else {
- result = inv_mpu6050_write_reg(st, st->reg->fifo_en, 0);
+ result = regmap_write(st->map, st->reg->fifo_en, 0);
if (result)
return result;
- result = inv_mpu6050_write_reg(st, st->reg->int_enable, 0);
+ result = regmap_write(st->map, st->reg->int_enable, 0);
if (result)
return result;
- result = inv_mpu6050_write_reg(st, st->reg->user_ctrl, 0);
+ result = regmap_write(st->map, st->reg->user_ctrl, 0);
if (result)
return result;
@@ -101,7 +101,7 @@ static int inv_mpu6050_set_enable(struct iio_dev *indio_dev, bool enable)
* @state: Desired trigger state
*/
static int inv_mpu_data_rdy_trigger_set_state(struct iio_trigger *trig,
- bool state)
+ bool state)
{
return inv_mpu6050_set_enable(iio_trigger_get_drvdata(trig), state);
}
@@ -123,7 +123,7 @@ int inv_mpu6050_probe_trigger(struct iio_dev *indio_dev)
if (!st->trig)
return -ENOMEM;
- ret = devm_request_irq(&indio_dev->dev, st->client->irq,
+ ret = devm_request_irq(&indio_dev->dev, st->irq,
&iio_trigger_generic_data_rdy_poll,
IRQF_TRIGGER_RISING,
"inv_mpu",
@@ -131,7 +131,7 @@ int inv_mpu6050_probe_trigger(struct iio_dev *indio_dev)
if (ret)
return ret;
- st->trig->dev.parent = &st->client->dev;
+ st->trig->dev.parent = regmap_get_device(st->map);
st->trig->ops = &inv_mpu_trigger_ops;
iio_trigger_set_drvdata(st->trig, indio_dev);
diff --git a/drivers/iio/industrialio-buffer.c b/drivers/iio/industrialio-buffer.c
index 139ae916225f..90462fcf5436 100644
--- a/drivers/iio/industrialio-buffer.c
+++ b/drivers/iio/industrialio-buffer.c
@@ -512,33 +512,41 @@ static ssize_t iio_buffer_show_enable(struct device *dev,
return sprintf(buf, "%d\n", iio_buffer_is_active(indio_dev->buffer));
}
+static unsigned int iio_storage_bytes_for_si(struct iio_dev *indio_dev,
+ unsigned int scan_index)
+{
+ const struct iio_chan_spec *ch;
+ unsigned int bytes;
+
+ ch = iio_find_channel_from_si(indio_dev, scan_index);
+ bytes = ch->scan_type.storagebits / 8;
+ if (ch->scan_type.repeat > 1)
+ bytes *= ch->scan_type.repeat;
+ return bytes;
+}
+
+static unsigned int iio_storage_bytes_for_timestamp(struct iio_dev *indio_dev)
+{
+ return iio_storage_bytes_for_si(indio_dev,
+ indio_dev->scan_index_timestamp);
+}
+
static int iio_compute_scan_bytes(struct iio_dev *indio_dev,
const unsigned long *mask, bool timestamp)
{
- const struct iio_chan_spec *ch;
unsigned bytes = 0;
int length, i;
/* How much space will the demuxed element take? */
for_each_set_bit(i, mask,
indio_dev->masklength) {
- ch = iio_find_channel_from_si(indio_dev, i);
- if (ch->scan_type.repeat > 1)
- length = ch->scan_type.storagebits / 8 *
- ch->scan_type.repeat;
- else
- length = ch->scan_type.storagebits / 8;
+ length = iio_storage_bytes_for_si(indio_dev, i);
bytes = ALIGN(bytes, length);
bytes += length;
}
+
if (timestamp) {
- ch = iio_find_channel_from_si(indio_dev,
- indio_dev->scan_index_timestamp);
- if (ch->scan_type.repeat > 1)
- length = ch->scan_type.storagebits / 8 *
- ch->scan_type.repeat;
- else
- length = ch->scan_type.storagebits / 8;
+ length = iio_storage_bytes_for_timestamp(indio_dev);
bytes = ALIGN(bytes, length);
bytes += length;
}
@@ -645,6 +653,7 @@ static int iio_verify_update(struct iio_dev *indio_dev,
unsigned int modes;
memset(config, 0, sizeof(*config));
+ config->watermark = ~0;
/*
* If there is just one buffer and we are removing it there is nothing
@@ -1288,7 +1297,6 @@ static int iio_buffer_add_demux(struct iio_buffer *buffer,
static int iio_buffer_update_demux(struct iio_dev *indio_dev,
struct iio_buffer *buffer)
{
- const struct iio_chan_spec *ch;
int ret, in_ind = -1, out_ind, length;
unsigned in_loc = 0, out_loc = 0;
struct iio_demux_table *p = NULL;
@@ -1315,21 +1323,11 @@ static int iio_buffer_update_demux(struct iio_dev *indio_dev,
in_ind = find_next_bit(indio_dev->active_scan_mask,
indio_dev->masklength,
in_ind + 1);
- ch = iio_find_channel_from_si(indio_dev, in_ind);
- if (ch->scan_type.repeat > 1)
- length = ch->scan_type.storagebits / 8 *
- ch->scan_type.repeat;
- else
- length = ch->scan_type.storagebits / 8;
+ length = iio_storage_bytes_for_si(indio_dev, in_ind);
/* Make sure we are aligned */
in_loc = roundup(in_loc, length) + length;
}
- ch = iio_find_channel_from_si(indio_dev, in_ind);
- if (ch->scan_type.repeat > 1)
- length = ch->scan_type.storagebits / 8 *
- ch->scan_type.repeat;
- else
- length = ch->scan_type.storagebits / 8;
+ length = iio_storage_bytes_for_si(indio_dev, in_ind);
out_loc = roundup(out_loc, length);
in_loc = roundup(in_loc, length);
ret = iio_buffer_add_demux(buffer, &p, in_loc, out_loc, length);
@@ -1340,13 +1338,7 @@ static int iio_buffer_update_demux(struct iio_dev *indio_dev,
}
/* Relies on scan_timestamp being last */
if (buffer->scan_timestamp) {
- ch = iio_find_channel_from_si(indio_dev,
- indio_dev->scan_index_timestamp);
- if (ch->scan_type.repeat > 1)
- length = ch->scan_type.storagebits / 8 *
- ch->scan_type.repeat;
- else
- length = ch->scan_type.storagebits / 8;
+ length = iio_storage_bytes_for_timestamp(indio_dev);
out_loc = roundup(out_loc, length);
in_loc = roundup(in_loc, length);
ret = iio_buffer_add_demux(buffer, &p, in_loc, out_loc, length);
diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
index af7cc1e65656..70cb7eb0a75c 100644
--- a/drivers/iio/industrialio-core.c
+++ b/drivers/iio/industrialio-core.c
@@ -77,6 +77,7 @@ static const char * const iio_chan_type_name_spec[] = {
[IIO_VELOCITY] = "velocity",
[IIO_CONCENTRATION] = "concentration",
[IIO_RESISTANCE] = "resistance",
+ [IIO_PH] = "ph",
};
static const char * const iio_modifier_names[] = {
diff --git a/drivers/iio/light/apds9960.c b/drivers/iio/light/apds9960.c
index f6a07dc32ae4..a6af56ad10e1 100644
--- a/drivers/iio/light/apds9960.c
+++ b/drivers/iio/light/apds9960.c
@@ -769,7 +769,7 @@ static void apds9960_read_gesture_fifo(struct apds9960_data *data)
mutex_lock(&data->lock);
data->gesture_mode_running = 1;
- while (cnt-- || (cnt = apds9660_fifo_is_empty(data) > 0)) {
+ while (cnt || (cnt = apds9660_fifo_is_empty(data) > 0)) {
ret = regmap_bulk_read(data->regmap, APDS9960_REG_GFIFO_BASE,
&data->buffer, 4);
@@ -777,6 +777,7 @@ static void apds9960_read_gesture_fifo(struct apds9960_data *data)
goto err_read;
iio_push_to_buffers(data->indio_dev, data->buffer);
+ cnt--;
}
err_read:
diff --git a/drivers/iio/light/bh1750.c b/drivers/iio/light/bh1750.c
index 8b4164343f20..b05946604f80 100644
--- a/drivers/iio/light/bh1750.c
+++ b/drivers/iio/light/bh1750.c
@@ -241,7 +241,7 @@ static int bh1750_probe(struct i2c_client *client,
if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C |
I2C_FUNC_SMBUS_WRITE_BYTE))
- return -ENODEV;
+ return -EOPNOTSUPP;
indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
if (!indio_dev)
diff --git a/drivers/iio/light/jsa1212.c b/drivers/iio/light/jsa1212.c
index c4e8c6b6c3c3..99a62816c3b4 100644
--- a/drivers/iio/light/jsa1212.c
+++ b/drivers/iio/light/jsa1212.c
@@ -326,7 +326,7 @@ static int jsa1212_probe(struct i2c_client *client,
int ret;
if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA))
- return -ENODEV;
+ return -EOPNOTSUPP;
indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
if (!indio_dev)
diff --git a/drivers/iio/light/opt3001.c b/drivers/iio/light/opt3001.c
index 01e111e72d4b..b776c8ed4387 100644
--- a/drivers/iio/light/opt3001.c
+++ b/drivers/iio/light/opt3001.c
@@ -65,19 +65,25 @@
#define OPT3001_REG_EXPONENT(n) ((n) >> 12)
#define OPT3001_REG_MANTISSA(n) ((n) & 0xfff)
+#define OPT3001_INT_TIME_LONG 800000
+#define OPT3001_INT_TIME_SHORT 100000
+
/*
* Time to wait for conversion result to be ready. The device datasheet
- * worst-case max value is 880ms. Add some slack to be on the safe side.
+ * sect. 6.5 states results are ready after total integration time plus 3ms.
+ * This results in worst-case max values of 113ms or 883ms, respectively.
+ * Add some slack to be on the safe side.
*/
-#define OPT3001_RESULT_READY_TIMEOUT msecs_to_jiffies(1000)
+#define OPT3001_RESULT_READY_SHORT 150
+#define OPT3001_RESULT_READY_LONG 1000
struct opt3001 {
struct i2c_client *client;
struct device *dev;
struct mutex lock;
- u16 ok_to_ignore_lock:1;
- u16 result_ready:1;
+ bool ok_to_ignore_lock;
+ bool result_ready;
wait_queue_head_t result_ready_queue;
u16 result;
@@ -89,6 +95,8 @@ struct opt3001 {
u8 high_thresh_exp;
u8 low_thresh_exp;
+
+ bool use_irq;
};
struct opt3001_scale {
@@ -227,26 +235,30 @@ static int opt3001_get_lux(struct opt3001 *opt, int *val, int *val2)
u16 reg;
u8 exponent;
u16 value;
+ long timeout;
- /*
- * Enable the end-of-conversion interrupt mechanism. Note that doing
- * so will overwrite the low-level limit value however we will restore
- * this value later on.
- */
- ret = i2c_smbus_write_word_swapped(opt->client, OPT3001_LOW_LIMIT,
- OPT3001_LOW_LIMIT_EOC_ENABLE);
- if (ret < 0) {
- dev_err(opt->dev, "failed to write register %02x\n",
- OPT3001_LOW_LIMIT);
- return ret;
+ if (opt->use_irq) {
+ /*
+ * Enable the end-of-conversion interrupt mechanism. Note that
+ * doing so will overwrite the low-level limit value however we
+ * will restore this value later on.
+ */
+ ret = i2c_smbus_write_word_swapped(opt->client,
+ OPT3001_LOW_LIMIT,
+ OPT3001_LOW_LIMIT_EOC_ENABLE);
+ if (ret < 0) {
+ dev_err(opt->dev, "failed to write register %02x\n",
+ OPT3001_LOW_LIMIT);
+ return ret;
+ }
+
+ /* Allow IRQ to access the device despite lock being set */
+ opt->ok_to_ignore_lock = true;
}
- /* Reset data-ready indicator flag (will be set in the IRQ routine) */
+ /* Reset data-ready indicator flag */
opt->result_ready = false;
- /* Allow IRQ to access the device despite lock being set */
- opt->ok_to_ignore_lock = true;
-
/* Configure for single-conversion mode and start a new conversion */
ret = i2c_smbus_read_word_swapped(opt->client, OPT3001_CONFIGURATION);
if (ret < 0) {
@@ -266,32 +278,69 @@ static int opt3001_get_lux(struct opt3001 *opt, int *val, int *val2)
goto err;
}
- /* Wait for the IRQ to indicate the conversion is complete */
- ret = wait_event_timeout(opt->result_ready_queue, opt->result_ready,
- OPT3001_RESULT_READY_TIMEOUT);
+ if (opt->use_irq) {
+ /* Wait for the IRQ to indicate the conversion is complete */
+ ret = wait_event_timeout(opt->result_ready_queue,
+ opt->result_ready,
+ msecs_to_jiffies(OPT3001_RESULT_READY_LONG));
+ } else {
+ /* Sleep for result ready time */
+ timeout = (opt->int_time == OPT3001_INT_TIME_SHORT) ?
+ OPT3001_RESULT_READY_SHORT : OPT3001_RESULT_READY_LONG;
+ msleep(timeout);
+
+ /* Check result ready flag */
+ ret = i2c_smbus_read_word_swapped(opt->client,
+ OPT3001_CONFIGURATION);
+ if (ret < 0) {
+ dev_err(opt->dev, "failed to read register %02x\n",
+ OPT3001_CONFIGURATION);
+ goto err;
+ }
+
+ if (!(ret & OPT3001_CONFIGURATION_CRF)) {
+ ret = -ETIMEDOUT;
+ goto err;
+ }
+
+ /* Obtain value */
+ ret = i2c_smbus_read_word_swapped(opt->client, OPT3001_RESULT);
+ if (ret < 0) {
+ dev_err(opt->dev, "failed to read register %02x\n",
+ OPT3001_RESULT);
+ goto err;
+ }
+ opt->result = ret;
+ opt->result_ready = true;
+ }
err:
- /* Disallow IRQ to access the device while lock is active */
- opt->ok_to_ignore_lock = false;
+ if (opt->use_irq)
+ /* Disallow IRQ to access the device while lock is active */
+ opt->ok_to_ignore_lock = false;
if (ret == 0)
return -ETIMEDOUT;
else if (ret < 0)
return ret;
- /*
- * Disable the end-of-conversion interrupt mechanism by restoring the
- * low-level limit value (clearing OPT3001_LOW_LIMIT_EOC_ENABLE). Note
- * that selectively clearing those enable bits would affect the actual
- * limit value due to bit-overlap and therefore can't be done.
- */
- value = (opt->low_thresh_exp << 12) | opt->low_thresh_mantissa;
- ret = i2c_smbus_write_word_swapped(opt->client, OPT3001_LOW_LIMIT,
- value);
- if (ret < 0) {
- dev_err(opt->dev, "failed to write register %02x\n",
- OPT3001_LOW_LIMIT);
- return ret;
+ if (opt->use_irq) {
+ /*
+ * Disable the end-of-conversion interrupt mechanism by
+ * restoring the low-level limit value (clearing
+ * OPT3001_LOW_LIMIT_EOC_ENABLE). Note that selectively clearing
+ * those enable bits would affect the actual limit value due to
+ * bit-overlap and therefore can't be done.
+ */
+ value = (opt->low_thresh_exp << 12) | opt->low_thresh_mantissa;
+ ret = i2c_smbus_write_word_swapped(opt->client,
+ OPT3001_LOW_LIMIT,
+ value);
+ if (ret < 0) {
+ dev_err(opt->dev, "failed to write register %02x\n",
+ OPT3001_LOW_LIMIT);
+ return ret;
+ }
}
exponent = OPT3001_REG_EXPONENT(opt->result);
@@ -325,13 +374,13 @@ static int opt3001_set_int_time(struct opt3001 *opt, int time)
reg = ret;
switch (time) {
- case 100000:
+ case OPT3001_INT_TIME_SHORT:
reg &= ~OPT3001_CONFIGURATION_CT;
- opt->int_time = 100000;
+ opt->int_time = OPT3001_INT_TIME_SHORT;
break;
- case 800000:
+ case OPT3001_INT_TIME_LONG:
reg |= OPT3001_CONFIGURATION_CT;
- opt->int_time = 800000;
+ opt->int_time = OPT3001_INT_TIME_LONG;
break;
default:
return -EINVAL;
@@ -597,9 +646,9 @@ static int opt3001_configure(struct opt3001 *opt)
/* Reflect status of the device's integration time setting */
if (reg & OPT3001_CONFIGURATION_CT)
- opt->int_time = 800000;
+ opt->int_time = OPT3001_INT_TIME_LONG;
else
- opt->int_time = 100000;
+ opt->int_time = OPT3001_INT_TIME_SHORT;
/* Ensure device is in shutdown initially */
opt3001_set_mode(opt, &reg, OPT3001_CONFIGURATION_M_SHUTDOWN);
@@ -733,12 +782,18 @@ static int opt3001_probe(struct i2c_client *client,
return ret;
}
- ret = request_threaded_irq(irq, NULL, opt3001_irq,
- IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
- "opt3001", iio);
- if (ret) {
- dev_err(dev, "failed to request IRQ #%d\n", irq);
- return ret;
+ /* Make use of INT pin only if valid IRQ no. is given */
+ if (irq > 0) {
+ ret = request_threaded_irq(irq, NULL, opt3001_irq,
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ "opt3001", iio);
+ if (ret) {
+ dev_err(dev, "failed to request IRQ #%d\n", irq);
+ return ret;
+ }
+ opt->use_irq = true;
+ } else {
+ dev_dbg(opt->dev, "enabling interrupt-less operation\n");
}
return 0;
@@ -751,7 +806,8 @@ static int opt3001_remove(struct i2c_client *client)
int ret;
u16 reg;
- free_irq(client->irq, iio);
+ if (opt->use_irq)
+ free_irq(client->irq, iio);
ret = i2c_smbus_read_word_swapped(opt->client, OPT3001_CONFIGURATION);
if (ret < 0) {
diff --git a/drivers/iio/magnetometer/Kconfig b/drivers/iio/magnetometer/Kconfig
index 868abada3409..021dc5361f53 100644
--- a/drivers/iio/magnetometer/Kconfig
+++ b/drivers/iio/magnetometer/Kconfig
@@ -105,4 +105,37 @@ config IIO_ST_MAGN_SPI_3AXIS
depends on IIO_ST_MAGN_3AXIS
depends on IIO_ST_SENSORS_SPI
+config SENSORS_HMC5843
+ tristate
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
+
+config SENSORS_HMC5843_I2C
+ tristate "Honeywell HMC5843/5883/5883L 3-Axis Magnetometer (I2C)"
+ depends on I2C
+ select SENSORS_HMC5843
+ select REGMAP_I2C
+ help
+ Say Y here to add support for the Honeywell HMC5843, HMC5883 and
+ HMC5883L 3-Axis Magnetometer (digital compass).
+
+ This driver can also be compiled as a set of modules.
+ If so, these modules will be created:
+ - hmc5843_core (core functions)
+ - hmc5843_i2c (support for HMC5843, HMC5883, HMC5883L and HMC5983)
+
+config SENSORS_HMC5843_SPI
+ tristate "Honeywell HMC5983 3-Axis Magnetometer (SPI)"
+ depends on SPI_MASTER
+ select SENSORS_HMC5843
+ select REGMAP_SPI
+ help
+ Say Y here to add support for the Honeywell HMC5983 3-Axis Magnetometer
+ (digital compass).
+
+ This driver can also be compiled as a set of modules.
+ If so, these modules will be created:
+ - hmc5843_core (core functions)
+ - hmc5843_spi (support for HMC5983)
+
endmenu
diff --git a/drivers/iio/magnetometer/Makefile b/drivers/iio/magnetometer/Makefile
index 2c72df458ec2..dd03fe524481 100644
--- a/drivers/iio/magnetometer/Makefile
+++ b/drivers/iio/magnetometer/Makefile
@@ -15,3 +15,7 @@ st_magn-$(CONFIG_IIO_BUFFER) += st_magn_buffer.o
obj-$(CONFIG_IIO_ST_MAGN_I2C_3AXIS) += st_magn_i2c.o
obj-$(CONFIG_IIO_ST_MAGN_SPI_3AXIS) += st_magn_spi.o
+
+obj-$(CONFIG_SENSORS_HMC5843) += hmc5843_core.o
+obj-$(CONFIG_SENSORS_HMC5843_I2C) += hmc5843_i2c.o
+obj-$(CONFIG_SENSORS_HMC5843_SPI) += hmc5843_spi.o
diff --git a/drivers/iio/magnetometer/ak8975.c b/drivers/iio/magnetometer/ak8975.c
index b13936dacc78..0e931a9a1669 100644
--- a/drivers/iio/magnetometer/ak8975.c
+++ b/drivers/iio/magnetometer/ak8975.c
@@ -252,7 +252,7 @@ struct ak_def {
u8 data_regs[3];
};
-static struct ak_def ak_def_array[AK_MAX_TYPE] = {
+static const struct ak_def ak_def_array[AK_MAX_TYPE] = {
{
.type = AK8975,
.raw_to_gauss = ak8975_raw_to_gauss,
@@ -360,7 +360,7 @@ static struct ak_def ak_def_array[AK_MAX_TYPE] = {
*/
struct ak8975_data {
struct i2c_client *client;
- struct ak_def *def;
+ const struct ak_def *def;
struct attribute_group attrs;
struct mutex lock;
u8 asa[3];
@@ -462,6 +462,8 @@ static int ak8975_setup_irq(struct ak8975_data *data)
int rc;
int irq;
+ init_waitqueue_head(&data->data_ready_queue);
+ clear_bit(0, &data->flags);
if (client->irq)
irq = client->irq;
else
@@ -477,8 +479,6 @@ static int ak8975_setup_irq(struct ak8975_data *data)
return rc;
}
- init_waitqueue_head(&data->data_ready_queue);
- clear_bit(0, &data->flags);
data->eoc_irq = irq;
return rc;
@@ -732,7 +732,7 @@ static int ak8975_probe(struct i2c_client *client,
int eoc_gpio;
int err;
const char *name = NULL;
- enum asahi_compass_chipset chipset;
+ enum asahi_compass_chipset chipset = AK_MAX_TYPE;
/* Grab and set up the supplied GPIO. */
if (client->dev.platform_data)
diff --git a/drivers/staging/iio/magnetometer/hmc5843.h b/drivers/iio/magnetometer/hmc5843.h
index 06f35d3828e4..76a5d7484d8d 100644
--- a/drivers/staging/iio/magnetometer/hmc5843.h
+++ b/drivers/iio/magnetometer/hmc5843.h
@@ -7,8 +7,7 @@
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
- *
- * */
+ */
#ifndef HMC5843_CORE_H
#define HMC5843_CORE_H
@@ -38,7 +37,7 @@ enum hmc5843_ids {
* @regmap: hardware access register maps
* @variant: describe chip variants
* @buffer: 3x 16-bit channels + padding + 64-bit timestamp
- **/
+ */
struct hmc5843_data {
struct device *dev;
struct mutex lock;
diff --git a/drivers/staging/iio/magnetometer/hmc5843_core.c b/drivers/iio/magnetometer/hmc5843_core.c
index 394bc141a1b0..77882b466e0f 100644
--- a/drivers/staging/iio/magnetometer/hmc5843_core.c
+++ b/drivers/iio/magnetometer/hmc5843_core.c
@@ -18,7 +18,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
*/
#include <linux/module.h>
@@ -66,6 +65,33 @@
#define HMC5843_MEAS_CONF_NEGATIVE_BIAS 0x02
#define HMC5843_MEAS_CONF_MASK 0x03
+/*
+ * API for setting the measurement configuration to
+ * Normal, Positive bias and Negative bias
+ *
+ * From the datasheet:
+ * 0 - Normal measurement configuration (default): In normal measurement
+ * configuration the device follows normal measurement flow. Pins BP
+ * and BN are left floating and high impedance.
+ *
+ * 1 - Positive bias configuration: In positive bias configuration, a
+ * positive current is forced across the resistive load on pins BP
+ * and BN.
+ *
+ * 2 - Negative bias configuration. In negative bias configuration, a
+ * negative current is forced across the resistive load on pins BP
+ * and BN.
+ *
+ * 3 - Only available on HMC5983. Magnetic sensor is disabled.
+ * Temperature sensor is enabled.
+ */
+
+static const char *const hmc5843_meas_conf_modes[] = {"normal", "positivebias",
+ "negativebias"};
+
+static const char *const hmc5983_meas_conf_modes[] = {"normal", "positivebias",
+ "negativebias",
+ "disabled"};
/* Scaling factors: 10000000/Gain */
static const int hmc5843_regval_to_nanoscale[] = {
6173, 7692, 10309, 12821, 18868, 21739, 25641, 35714
@@ -174,24 +200,6 @@ static int hmc5843_read_measurement(struct hmc5843_data *data,
return IIO_VAL_INT;
}
-/*
- * API for setting the measurement configuration to
- * Normal, Positive bias and Negative bias
- *
- * From the datasheet:
- * 0 - Normal measurement configuration (default): In normal measurement
- * configuration the device follows normal measurement flow. Pins BP
- * and BN are left floating and high impedance.
- *
- * 1 - Positive bias configuration: In positive bias configuration, a
- * positive current is forced across the resistive load on pins BP
- * and BN.
- *
- * 2 - Negative bias configuration. In negative bias configuration, a
- * negative current is forced across the resistive load on pins BP
- * and BN.
- *
- */
static int hmc5843_set_meas_conf(struct hmc5843_data *data, u8 meas_conf)
{
int ret;
@@ -205,48 +213,55 @@ static int hmc5843_set_meas_conf(struct hmc5843_data *data, u8 meas_conf)
}
static
-ssize_t hmc5843_show_measurement_configuration(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+int hmc5843_show_measurement_configuration(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan)
{
- struct hmc5843_data *data = iio_priv(dev_to_iio_dev(dev));
+ struct hmc5843_data *data = iio_priv(indio_dev);
unsigned int val;
int ret;
ret = regmap_read(data->regmap, HMC5843_CONFIG_REG_A, &val);
if (ret)
return ret;
- val &= HMC5843_MEAS_CONF_MASK;
- return sprintf(buf, "%d\n", val);
+ return val & HMC5843_MEAS_CONF_MASK;
}
static
-ssize_t hmc5843_set_measurement_configuration(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t count)
+int hmc5843_set_measurement_configuration(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ unsigned int meas_conf)
{
- struct hmc5843_data *data = iio_priv(dev_to_iio_dev(dev));
- unsigned long meas_conf = 0;
- int ret;
+ struct hmc5843_data *data = iio_priv(indio_dev);
- ret = kstrtoul(buf, 10, &meas_conf);
- if (ret)
- return ret;
- if (meas_conf >= HMC5843_MEAS_CONF_MASK)
- return -EINVAL;
+ return hmc5843_set_meas_conf(data, meas_conf);
+}
- ret = hmc5843_set_meas_conf(data, meas_conf);
+static const struct iio_enum hmc5843_meas_conf_enum = {
+ .items = hmc5843_meas_conf_modes,
+ .num_items = ARRAY_SIZE(hmc5843_meas_conf_modes),
+ .get = hmc5843_show_measurement_configuration,
+ .set = hmc5843_set_measurement_configuration,
+};
- return (ret < 0) ? ret : count;
-}
+static const struct iio_chan_spec_ext_info hmc5843_ext_info[] = {
+ IIO_ENUM("meas_conf", true, &hmc5843_meas_conf_enum),
+ IIO_ENUM_AVAILABLE("meas_conf", &hmc5843_meas_conf_enum),
+ { },
+};
-static IIO_DEVICE_ATTR(meas_conf,
- S_IWUSR | S_IRUGO,
- hmc5843_show_measurement_configuration,
- hmc5843_set_measurement_configuration,
- 0);
+static const struct iio_enum hmc5983_meas_conf_enum = {
+ .items = hmc5983_meas_conf_modes,
+ .num_items = ARRAY_SIZE(hmc5983_meas_conf_modes),
+ .get = hmc5843_show_measurement_configuration,
+ .set = hmc5843_set_measurement_configuration,
+};
+
+static const struct iio_chan_spec_ext_info hmc5983_ext_info[] = {
+ IIO_ENUM("meas_conf", true, &hmc5983_meas_conf_enum),
+ IIO_ENUM_AVAILABLE("meas_conf", &hmc5983_meas_conf_enum),
+ { },
+};
static
ssize_t hmc5843_show_samp_freq_avail(struct device *dev,
@@ -459,6 +474,25 @@ done:
.storagebits = 16, \
.endianness = IIO_BE, \
}, \
+ .ext_info = hmc5843_ext_info, \
+ }
+
+#define HMC5983_CHANNEL(axis, idx) \
+ { \
+ .type = IIO_MAGN, \
+ .modified = 1, \
+ .channel2 = IIO_MOD_##axis, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) | \
+ BIT(IIO_CHAN_INFO_SAMP_FREQ), \
+ .scan_index = idx, \
+ .scan_type = { \
+ .sign = 's', \
+ .realbits = 16, \
+ .storagebits = 16, \
+ .endianness = IIO_BE, \
+ }, \
+ .ext_info = hmc5983_ext_info, \
}
static const struct iio_chan_spec hmc5843_channels[] = {
@@ -476,8 +510,14 @@ static const struct iio_chan_spec hmc5883_channels[] = {
IIO_CHAN_SOFT_TIMESTAMP(3),
};
+static const struct iio_chan_spec hmc5983_channels[] = {
+ HMC5983_CHANNEL(X, 0),
+ HMC5983_CHANNEL(Z, 1),
+ HMC5983_CHANNEL(Y, 2),
+ IIO_CHAN_SOFT_TIMESTAMP(3),
+};
+
static struct attribute *hmc5843_attributes[] = {
- &iio_dev_attr_meas_conf.dev_attr.attr,
&iio_dev_attr_scale_available.dev_attr.attr,
&iio_dev_attr_sampling_frequency_available.dev_attr.attr,
NULL
@@ -516,7 +556,7 @@ static const struct hmc5843_chip_info hmc5843_chip_info_tbl[] = {
ARRAY_SIZE(hmc5883l_regval_to_nanoscale),
},
[HMC5983_ID] = {
- .channels = hmc5883_channels,
+ .channels = hmc5983_channels,
.regval_to_samp_freq = hmc5983_regval_to_samp_freq,
.n_regval_to_samp_freq =
ARRAY_SIZE(hmc5983_regval_to_samp_freq),
@@ -565,14 +605,14 @@ static const unsigned long hmc5843_scan_masks[] = {0x7, 0};
int hmc5843_common_suspend(struct device *dev)
{
return hmc5843_set_mode(iio_priv(dev_get_drvdata(dev)),
- HMC5843_MODE_CONVERSION_CONTINUOUS);
+ HMC5843_MODE_SLEEP);
}
EXPORT_SYMBOL(hmc5843_common_suspend);
int hmc5843_common_resume(struct device *dev)
{
return hmc5843_set_mode(iio_priv(dev_get_drvdata(dev)),
- HMC5843_MODE_SLEEP);
+ HMC5843_MODE_CONVERSION_CONTINUOUS);
}
EXPORT_SYMBOL(hmc5843_common_resume);
diff --git a/drivers/staging/iio/magnetometer/hmc5843_i2c.c b/drivers/iio/magnetometer/hmc5843_i2c.c
index 3e06ceb32059..3de7f4426ac4 100644
--- a/drivers/staging/iio/magnetometer/hmc5843_i2c.c
+++ b/drivers/iio/magnetometer/hmc5843_i2c.c
@@ -7,8 +7,7 @@
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
- *
- * */
+ */
#include <linux/module.h>
#include <linux/i2c.h>
diff --git a/drivers/staging/iio/magnetometer/hmc5843_spi.c b/drivers/iio/magnetometer/hmc5843_spi.c
index 8be198058ea2..535f03a70d63 100644
--- a/drivers/staging/iio/magnetometer/hmc5843_spi.c
+++ b/drivers/iio/magnetometer/hmc5843_spi.c
@@ -6,8 +6,7 @@
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
- *
- * */
+ */
#include <linux/module.h>
#include <linux/spi/spi.h>
diff --git a/drivers/iio/magnetometer/st_magn.h b/drivers/iio/magnetometer/st_magn.h
index 06a4d9c35581..9daca4681922 100644
--- a/drivers/iio/magnetometer/st_magn.h
+++ b/drivers/iio/magnetometer/st_magn.h
@@ -44,6 +44,7 @@ static inline int st_magn_allocate_ring(struct iio_dev *indio_dev)
static inline void st_magn_deallocate_ring(struct iio_dev *indio_dev)
{
}
+#define ST_MAGN_TRIGGER_SET_STATE NULL
#endif /* CONFIG_IIO_BUFFER */
#endif /* ST_MAGN_H */
diff --git a/drivers/iio/magnetometer/st_magn_core.c b/drivers/iio/magnetometer/st_magn_core.c
index b27f0146647b..501f858df413 100644
--- a/drivers/iio/magnetometer/st_magn_core.c
+++ b/drivers/iio/magnetometer/st_magn_core.c
@@ -175,6 +175,8 @@
#define ST_MAGN_3_BDU_MASK 0x10
#define ST_MAGN_3_DRDY_IRQ_ADDR 0x62
#define ST_MAGN_3_DRDY_INT_MASK 0x01
+#define ST_MAGN_3_IHL_IRQ_ADDR 0x63
+#define ST_MAGN_3_IHL_IRQ_MASK 0x04
#define ST_MAGN_3_FS_AVL_15000_GAIN 1500
#define ST_MAGN_3_MULTIREAD_BIT false
#define ST_MAGN_3_OUT_X_L_ADDR 0x68
@@ -480,6 +482,8 @@ static const struct st_sensor_settings st_magn_sensors_settings[] = {
.drdy_irq = {
.addr = ST_MAGN_3_DRDY_IRQ_ADDR,
.mask_int1 = ST_MAGN_3_DRDY_INT_MASK,
+ .addr_ihl = ST_MAGN_3_IHL_IRQ_ADDR,
+ .mask_ihl = ST_MAGN_3_IHL_IRQ_MASK,
},
.multi_read_bit = ST_MAGN_3_MULTIREAD_BIT,
.bootime = 2,
diff --git a/drivers/iio/potentiometer/Kconfig b/drivers/iio/potentiometer/Kconfig
index fd75db73e582..ffc735c168fb 100644
--- a/drivers/iio/potentiometer/Kconfig
+++ b/drivers/iio/potentiometer/Kconfig
@@ -17,4 +17,16 @@ config MCP4531
To compile this driver as a module, choose M here: the
module will be called mcp4531.
+config TPL0102
+ tristate "Texas Instruments digital potentiometer driver"
+ depends on I2C
+ select REGMAP_I2C
+ help
+ Say yes here to build support for the Texas Instruments
+ TPL0102, TPL0402
+ digital potentiometer chips.
+
+ To compile this driver as a module, choose M here: the
+ module will be called tpl0102.
+
endmenu
diff --git a/drivers/iio/potentiometer/Makefile b/drivers/iio/potentiometer/Makefile
index 8afe49227012..b563b492b486 100644
--- a/drivers/iio/potentiometer/Makefile
+++ b/drivers/iio/potentiometer/Makefile
@@ -4,3 +4,4 @@
# When adding new entries keep the list in alphabetical order
obj-$(CONFIG_MCP4531) += mcp4531.o
+obj-$(CONFIG_TPL0102) += tpl0102.o
diff --git a/drivers/iio/potentiometer/mcp4531.c b/drivers/iio/potentiometer/mcp4531.c
index a3f66874ee2e..0db67fe14766 100644
--- a/drivers/iio/potentiometer/mcp4531.c
+++ b/drivers/iio/potentiometer/mcp4531.c
@@ -159,7 +159,7 @@ static int mcp4531_probe(struct i2c_client *client,
if (!i2c_check_functionality(client->adapter,
I2C_FUNC_SMBUS_WORD_DATA)) {
dev_err(dev, "SMBUS Word Data not supported\n");
- return -EIO;
+ return -EOPNOTSUPP;
}
indio_dev = devm_iio_device_alloc(dev, sizeof(*data));
diff --git a/drivers/iio/potentiometer/tpl0102.c b/drivers/iio/potentiometer/tpl0102.c
new file mode 100644
index 000000000000..313124b6fd59
--- /dev/null
+++ b/drivers/iio/potentiometer/tpl0102.c
@@ -0,0 +1,166 @@
+/*
+ * tpl0102.c - Support for Texas Instruments digital potentiometers
+ *
+ * Copyright (C) 2016 Matt Ranostay <mranostay@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * TODO: enable/disable hi-z output control
+ */
+
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/regmap.h>
+#include <linux/iio/iio.h>
+
+struct tpl0102_cfg {
+ int wipers;
+ int max_pos;
+ int kohms;
+};
+
+enum tpl0102_type {
+ CAT5140_503,
+ CAT5140_104,
+ TPL0102_104,
+ TPL0401_103,
+};
+
+static const struct tpl0102_cfg tpl0102_cfg[] = {
+ /* on-semiconductor parts */
+ [CAT5140_503] = { .wipers = 1, .max_pos = 256, .kohms = 50, },
+ [CAT5140_104] = { .wipers = 1, .max_pos = 256, .kohms = 100, },
+ /* ti parts */
+ [TPL0102_104] = { .wipers = 2, .max_pos = 256, .kohms = 100 },
+ [TPL0401_103] = { .wipers = 1, .max_pos = 128, .kohms = 10, },
+};
+
+struct tpl0102_data {
+ struct regmap *regmap;
+ unsigned long devid;
+};
+
+static const struct regmap_config tpl0102_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+};
+
+#define TPL0102_CHANNEL(ch) { \
+ .type = IIO_RESISTANCE, \
+ .indexed = 1, \
+ .output = 1, \
+ .channel = (ch), \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
+}
+
+static const struct iio_chan_spec tpl0102_channels[] = {
+ TPL0102_CHANNEL(0),
+ TPL0102_CHANNEL(1),
+};
+
+static int tpl0102_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct tpl0102_data *data = iio_priv(indio_dev);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW: {
+ int ret = regmap_read(data->regmap, chan->channel, val);
+
+ return ret ? ret : IIO_VAL_INT;
+ }
+ case IIO_CHAN_INFO_SCALE:
+ *val = 1000 * tpl0102_cfg[data->devid].kohms;
+ *val2 = tpl0102_cfg[data->devid].max_pos;
+ return IIO_VAL_FRACTIONAL;
+ }
+
+ return -EINVAL;
+}
+
+static int tpl0102_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ struct tpl0102_data *data = iio_priv(indio_dev);
+
+ if (mask != IIO_CHAN_INFO_RAW)
+ return -EINVAL;
+
+ if (val >= tpl0102_cfg[data->devid].max_pos || val < 0)
+ return -EINVAL;
+
+ return regmap_write(data->regmap, chan->channel, val);
+}
+
+static const struct iio_info tpl0102_info = {
+ .read_raw = tpl0102_read_raw,
+ .write_raw = tpl0102_write_raw,
+ .driver_module = THIS_MODULE,
+};
+
+static int tpl0102_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct device *dev = &client->dev;
+ struct tpl0102_data *data;
+ struct iio_dev *indio_dev;
+
+ if (!i2c_check_functionality(client->adapter,
+ I2C_FUNC_SMBUS_WORD_DATA))
+ return -ENOTSUPP;
+
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*data));
+ if (!indio_dev)
+ return -ENOMEM;
+ data = iio_priv(indio_dev);
+ i2c_set_clientdata(client, indio_dev);
+
+ data->devid = id->driver_data;
+ data->regmap = devm_regmap_init_i2c(client, &tpl0102_regmap_config);
+ if (IS_ERR(data->regmap)) {
+ dev_err(dev, "regmap initialization failed\n");
+ return PTR_ERR(data->regmap);
+ }
+
+ indio_dev->dev.parent = dev;
+ indio_dev->info = &tpl0102_info;
+ indio_dev->channels = tpl0102_channels;
+ indio_dev->num_channels = tpl0102_cfg[data->devid].wipers;
+ indio_dev->name = client->name;
+
+ return devm_iio_device_register(dev, indio_dev);
+}
+
+static const struct i2c_device_id tpl0102_id[] = {
+ { "cat5140-503", CAT5140_503 },
+ { "cat5140-104", CAT5140_104 },
+ { "tpl0102-104", TPL0102_104 },
+ { "tpl0401-103", TPL0401_103 },
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, tpl0102_id);
+
+static struct i2c_driver tpl0102_driver = {
+ .driver = {
+ .name = "tpl0102",
+ },
+ .probe = tpl0102_probe,
+ .id_table = tpl0102_id,
+};
+
+module_i2c_driver(tpl0102_driver);
+
+MODULE_AUTHOR("Matt Ranostay <mranostay@gmail.com>");
+MODULE_DESCRIPTION("TPL0102 digital potentiometer");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/pressure/Kconfig b/drivers/iio/pressure/Kconfig
index 6f2e7c9ac23e..31c0e1fd2202 100644
--- a/drivers/iio/pressure/Kconfig
+++ b/drivers/iio/pressure/Kconfig
@@ -10,11 +10,11 @@ config BMP280
depends on I2C
select REGMAP_I2C
help
- Say yes here to build support for Bosch Sensortec BMP280
- pressure and temperature sensor.
+ Say yes here to build support for Bosch Sensortec BMP280
+ pressure and temperature sensor.
- To compile this driver as a module, choose M here: the module
- will be called bmp280.
+ To compile this driver as a module, choose M here: the module
+ will be called bmp280.
config HID_SENSOR_PRESS
depends on HID_SENSOR_HUB
@@ -27,18 +27,33 @@ config HID_SENSOR_PRESS
Say yes here to build support for the HID SENSOR
Pressure driver
- To compile this driver as a module, choose M here: the module
- will be called hid-sensor-press.
+ To compile this driver as a module, choose M here: the module
+ will be called hid-sensor-press.
config MPL115
+ tristate
+
+config MPL115_I2C
tristate "Freescale MPL115A2 pressure sensor driver"
depends on I2C
+ select MPL115
help
Say yes here to build support for the Freescale MPL115A2
pressure sensor connected via I2C.
- To compile this driver as a module, choose M here: the module
- will be called mpl115.
+ To compile this driver as a module, choose M here: the module
+ will be called mpl115_i2c.
+
+config MPL115_SPI
+ tristate "Freescale MPL115A1 pressure sensor driver"
+ depends on SPI_MASTER
+ select MPL115
+ help
+ Say yes here to build support for the Freescale MPL115A1
+ pressure sensor connected via SPI.
+
+ To compile this driver as a module, choose M here: the module
+ will be called mpl115_spi.
config MPL3115
tristate "Freescale MPL3115A2 pressure sensor driver"
@@ -49,11 +64,13 @@ config MPL3115
Say yes here to build support for the Freescale MPL3115A2
pressure sensor / altimeter.
- To compile this driver as a module, choose M here: the module
- will be called mpl3115.
+ To compile this driver as a module, choose M here: the module
+ will be called mpl3115.
config MS5611
tristate "Measurement Specialties MS5611 pressure sensor driver"
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
help
Say Y here to build support for the Measurement Specialties
MS5611, MS5607 pressure and temperature sensors.
@@ -82,7 +99,7 @@ config MS5611_SPI
config MS5637
tristate "Measurement Specialties MS5637 pressure & temperature sensor"
depends on I2C
- select IIO_MS_SENSORS_I2C
+ select IIO_MS_SENSORS_I2C
help
If you say yes here you get support for the Measurement Specialties
MS5637 pressure and temperature sensor.
@@ -128,7 +145,7 @@ config T5403
Say yes here to build support for the EPCOS T5403 pressure sensor
connected via I2C.
- To compile this driver as a module, choose M here: the module
- will be called t5403.
+ To compile this driver as a module, choose M here: the module
+ will be called t5403.
endmenu
diff --git a/drivers/iio/pressure/Makefile b/drivers/iio/pressure/Makefile
index 46571c96823f..d336af14f3fe 100644
--- a/drivers/iio/pressure/Makefile
+++ b/drivers/iio/pressure/Makefile
@@ -6,6 +6,8 @@
obj-$(CONFIG_BMP280) += bmp280.o
obj-$(CONFIG_HID_SENSOR_PRESS) += hid-sensor-press.o
obj-$(CONFIG_MPL115) += mpl115.o
+obj-$(CONFIG_MPL115_I2C) += mpl115_i2c.o
+obj-$(CONFIG_MPL115_SPI) += mpl115_spi.o
obj-$(CONFIG_MPL3115) += mpl3115.o
obj-$(CONFIG_MS5611) += ms5611_core.o
obj-$(CONFIG_MS5611_I2C) += ms5611_i2c.o
diff --git a/drivers/iio/pressure/mpl115.c b/drivers/iio/pressure/mpl115.c
index a0d7deeac62f..73f2f0c46e62 100644
--- a/drivers/iio/pressure/mpl115.c
+++ b/drivers/iio/pressure/mpl115.c
@@ -1,5 +1,5 @@
/*
- * mpl115.c - Support for Freescale MPL115A2 pressure/temperature sensor
+ * mpl115.c - Support for Freescale MPL115A pressure/temperature sensor
*
* Copyright (c) 2014 Peter Meerwald <pmeerw@pmeerw.net>
*
@@ -7,17 +7,16 @@
* the GNU General Public License. See the file COPYING in the main
* directory of this archive for more details.
*
- * (7-bit I2C slave address 0x60)
- *
* TODO: shutdown pin
*
*/
#include <linux/module.h>
-#include <linux/i2c.h>
#include <linux/iio/iio.h>
#include <linux/delay.h>
+#include "mpl115.h"
+
#define MPL115_PADC 0x00 /* pressure ADC output value, MSB first, 10 bit */
#define MPL115_TADC 0x02 /* temperature ADC output value, MSB first, 10 bit */
#define MPL115_A0 0x04 /* 12 bit integer, 3 bit fraction */
@@ -27,16 +26,18 @@
#define MPL115_CONVERT 0x12 /* convert temperature and pressure */
struct mpl115_data {
- struct i2c_client *client;
+ struct device *dev;
struct mutex lock;
s16 a0;
s16 b1, b2;
s16 c12;
+ const struct mpl115_ops *ops;
};
static int mpl115_request(struct mpl115_data *data)
{
- int ret = i2c_smbus_write_byte_data(data->client, MPL115_CONVERT, 0);
+ int ret = data->ops->write(data->dev, MPL115_CONVERT, 0);
+
if (ret < 0)
return ret;
@@ -57,12 +58,12 @@ static int mpl115_comp_pressure(struct mpl115_data *data, int *val, int *val2)
if (ret < 0)
goto done;
- ret = i2c_smbus_read_word_swapped(data->client, MPL115_PADC);
+ ret = data->ops->read(data->dev, MPL115_PADC);
if (ret < 0)
goto done;
padc = ret >> 6;
- ret = i2c_smbus_read_word_swapped(data->client, MPL115_TADC);
+ ret = data->ops->read(data->dev, MPL115_TADC);
if (ret < 0)
goto done;
tadc = ret >> 6;
@@ -90,7 +91,7 @@ static int mpl115_read_temp(struct mpl115_data *data)
ret = mpl115_request(data);
if (ret < 0)
goto done;
- ret = i2c_smbus_read_word_swapped(data->client, MPL115_TADC);
+ ret = data->ops->read(data->dev, MPL115_TADC);
done:
mutex_unlock(&data->lock);
return ret;
@@ -145,66 +146,53 @@ static const struct iio_info mpl115_info = {
.driver_module = THIS_MODULE,
};
-static int mpl115_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+int mpl115_probe(struct device *dev, const char *name,
+ const struct mpl115_ops *ops)
{
struct mpl115_data *data;
struct iio_dev *indio_dev;
int ret;
- if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_WORD_DATA))
- return -ENODEV;
-
- indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*data));
if (!indio_dev)
return -ENOMEM;
data = iio_priv(indio_dev);
- data->client = client;
+ data->dev = dev;
+ data->ops = ops;
mutex_init(&data->lock);
- i2c_set_clientdata(client, indio_dev);
indio_dev->info = &mpl115_info;
- indio_dev->name = id->name;
- indio_dev->dev.parent = &client->dev;
+ indio_dev->name = name;
+ indio_dev->dev.parent = dev;
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->channels = mpl115_channels;
indio_dev->num_channels = ARRAY_SIZE(mpl115_channels);
- ret = i2c_smbus_read_word_swapped(data->client, MPL115_A0);
+ ret = data->ops->init(data->dev);
+ if (ret)
+ return ret;
+
+ ret = data->ops->read(data->dev, MPL115_A0);
if (ret < 0)
return ret;
data->a0 = ret;
- ret = i2c_smbus_read_word_swapped(data->client, MPL115_B1);
+ ret = data->ops->read(data->dev, MPL115_B1);
if (ret < 0)
return ret;
data->b1 = ret;
- ret = i2c_smbus_read_word_swapped(data->client, MPL115_B2);
+ ret = data->ops->read(data->dev, MPL115_B2);
if (ret < 0)
return ret;
data->b2 = ret;
- ret = i2c_smbus_read_word_swapped(data->client, MPL115_C12);
+ ret = data->ops->read(data->dev, MPL115_C12);
if (ret < 0)
return ret;
data->c12 = ret;
- return devm_iio_device_register(&client->dev, indio_dev);
+ return devm_iio_device_register(dev, indio_dev);
}
-
-static const struct i2c_device_id mpl115_id[] = {
- { "mpl115", 0 },
- { }
-};
-MODULE_DEVICE_TABLE(i2c, mpl115_id);
-
-static struct i2c_driver mpl115_driver = {
- .driver = {
- .name = "mpl115",
- },
- .probe = mpl115_probe,
- .id_table = mpl115_id,
-};
-module_i2c_driver(mpl115_driver);
+EXPORT_SYMBOL_GPL(mpl115_probe);
MODULE_AUTHOR("Peter Meerwald <pmeerw@pmeerw.net>");
MODULE_DESCRIPTION("Freescale MPL115 pressure/temperature driver");
diff --git a/drivers/iio/pressure/mpl115.h b/drivers/iio/pressure/mpl115.h
new file mode 100644
index 000000000000..01b652774dc3
--- /dev/null
+++ b/drivers/iio/pressure/mpl115.h
@@ -0,0 +1,24 @@
+/*
+ * Freescale MPL115A pressure/temperature sensor
+ *
+ * Copyright (c) 2014 Peter Meerwald <pmeerw@pmeerw.net>
+ * Copyright (c) 2016 Akinobu Mita <akinobu.mita@gmail.com>
+ *
+ * This file is subject to the terms and conditions of version 2 of
+ * the GNU General Public License. See the file COPYING in the main
+ * directory of this archive for more details.
+ */
+
+#ifndef _MPL115_H_
+#define _MPL115_H_
+
+struct mpl115_ops {
+ int (*init)(struct device *);
+ int (*read)(struct device *, u8);
+ int (*write)(struct device *, u8, u8);
+};
+
+int mpl115_probe(struct device *dev, const char *name,
+ const struct mpl115_ops *ops);
+
+#endif
diff --git a/drivers/iio/pressure/mpl115_i2c.c b/drivers/iio/pressure/mpl115_i2c.c
new file mode 100644
index 000000000000..1a29be462f6e
--- /dev/null
+++ b/drivers/iio/pressure/mpl115_i2c.c
@@ -0,0 +1,67 @@
+/*
+ * Freescale MPL115A2 pressure/temperature sensor
+ *
+ * Copyright (c) 2014 Peter Meerwald <pmeerw@pmeerw.net>
+ *
+ * This file is subject to the terms and conditions of version 2 of
+ * the GNU General Public License. See the file COPYING in the main
+ * directory of this archive for more details.
+ *
+ * (7-bit I2C slave address 0x60)
+ *
+ * Datasheet: http://www.nxp.com/files/sensors/doc/data_sheet/MPL115A2.pdf
+ */
+
+#include <linux/module.h>
+#include <linux/i2c.h>
+
+#include "mpl115.h"
+
+static int mpl115_i2c_init(struct device *dev)
+{
+ return 0;
+}
+
+static int mpl115_i2c_read(struct device *dev, u8 address)
+{
+ return i2c_smbus_read_word_swapped(to_i2c_client(dev), address);
+}
+
+static int mpl115_i2c_write(struct device *dev, u8 address, u8 value)
+{
+ return i2c_smbus_write_byte_data(to_i2c_client(dev), address, value);
+}
+
+static const struct mpl115_ops mpl115_i2c_ops = {
+ .init = mpl115_i2c_init,
+ .read = mpl115_i2c_read,
+ .write = mpl115_i2c_write,
+};
+
+static int mpl115_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_WORD_DATA))
+ return -EOPNOTSUPP;
+
+ return mpl115_probe(&client->dev, id->name, &mpl115_i2c_ops);
+}
+
+static const struct i2c_device_id mpl115_i2c_id[] = {
+ { "mpl115", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, mpl115_i2c_id);
+
+static struct i2c_driver mpl115_i2c_driver = {
+ .driver = {
+ .name = "mpl115",
+ },
+ .probe = mpl115_i2c_probe,
+ .id_table = mpl115_i2c_id,
+};
+module_i2c_driver(mpl115_i2c_driver);
+
+MODULE_AUTHOR("Peter Meerwald <pmeerw@pmeerw.net>");
+MODULE_DESCRIPTION("Freescale MPL115A2 pressure/temperature driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/pressure/mpl115_spi.c b/drivers/iio/pressure/mpl115_spi.c
new file mode 100644
index 000000000000..9ebf55f5b3aa
--- /dev/null
+++ b/drivers/iio/pressure/mpl115_spi.c
@@ -0,0 +1,106 @@
+/*
+ * Freescale MPL115A1 pressure/temperature sensor
+ *
+ * Copyright (c) 2016 Akinobu Mita <akinobu.mita@gmail.com>
+ *
+ * This file is subject to the terms and conditions of version 2 of
+ * the GNU General Public License. See the file COPYING in the main
+ * directory of this archive for more details.
+ *
+ * Datasheet: http://www.nxp.com/files/sensors/doc/data_sheet/MPL115A1.pdf
+ */
+
+#include <linux/module.h>
+#include <linux/spi/spi.h>
+
+#include "mpl115.h"
+
+#define MPL115_SPI_WRITE(address) ((address) << 1)
+#define MPL115_SPI_READ(address) (0x80 | (address) << 1)
+
+struct mpl115_spi_buf {
+ u8 tx[4];
+ u8 rx[4];
+};
+
+static int mpl115_spi_init(struct device *dev)
+{
+ struct spi_device *spi = to_spi_device(dev);
+ struct mpl115_spi_buf *buf;
+
+ buf = devm_kzalloc(dev, sizeof(*buf), GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ spi_set_drvdata(spi, buf);
+
+ return 0;
+}
+
+static int mpl115_spi_read(struct device *dev, u8 address)
+{
+ struct spi_device *spi = to_spi_device(dev);
+ struct mpl115_spi_buf *buf = spi_get_drvdata(spi);
+ struct spi_transfer xfer = {
+ .tx_buf = buf->tx,
+ .rx_buf = buf->rx,
+ .len = 4,
+ };
+ int ret;
+
+ buf->tx[0] = MPL115_SPI_READ(address);
+ buf->tx[2] = MPL115_SPI_READ(address + 1);
+
+ ret = spi_sync_transfer(spi, &xfer, 1);
+ if (ret)
+ return ret;
+
+ return (buf->rx[1] << 8) | buf->rx[3];
+}
+
+static int mpl115_spi_write(struct device *dev, u8 address, u8 value)
+{
+ struct spi_device *spi = to_spi_device(dev);
+ struct mpl115_spi_buf *buf = spi_get_drvdata(spi);
+ struct spi_transfer xfer = {
+ .tx_buf = buf->tx,
+ .len = 2,
+ };
+
+ buf->tx[0] = MPL115_SPI_WRITE(address);
+ buf->tx[1] = value;
+
+ return spi_sync_transfer(spi, &xfer, 1);
+}
+
+static const struct mpl115_ops mpl115_spi_ops = {
+ .init = mpl115_spi_init,
+ .read = mpl115_spi_read,
+ .write = mpl115_spi_write,
+};
+
+static int mpl115_spi_probe(struct spi_device *spi)
+{
+ const struct spi_device_id *id = spi_get_device_id(spi);
+
+ return mpl115_probe(&spi->dev, id->name, &mpl115_spi_ops);
+}
+
+static const struct spi_device_id mpl115_spi_ids[] = {
+ { "mpl115", 0 },
+ {}
+};
+MODULE_DEVICE_TABLE(spi, mpl115_spi_ids);
+
+static struct spi_driver mpl115_spi_driver = {
+ .driver = {
+ .name = "mpl115",
+ },
+ .probe = mpl115_spi_probe,
+ .id_table = mpl115_spi_ids,
+};
+module_spi_driver(mpl115_spi_driver);
+
+MODULE_AUTHOR("Akinobu Mita <akinobu.mita@gmail.com>");
+MODULE_DESCRIPTION("Freescale MPL115A1 pressure/temperature driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/pressure/ms5611.h b/drivers/iio/pressure/ms5611.h
index 23b93c797dba..8b08e4b7e3a9 100644
--- a/drivers/iio/pressure/ms5611.h
+++ b/drivers/iio/pressure/ms5611.h
@@ -51,6 +51,8 @@ struct ms5611_state {
struct ms5611_chip_info *chip_info;
};
-int ms5611_probe(struct iio_dev *indio_dev, struct device *dev, int type);
+int ms5611_probe(struct iio_dev *indio_dev, struct device *dev,
+ const char* name, int type);
+int ms5611_remove(struct iio_dev *indio_dev);
#endif /* _MS5611_H */
diff --git a/drivers/iio/pressure/ms5611_core.c b/drivers/iio/pressure/ms5611_core.c
index 2f3d9b4aca4e..992ad8d3b67a 100644
--- a/drivers/iio/pressure/ms5611_core.c
+++ b/drivers/iio/pressure/ms5611_core.c
@@ -16,7 +16,11 @@
#include <linux/module.h>
#include <linux/iio/iio.h>
#include <linux/delay.h>
+#include <linux/regulator/consumer.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/triggered_buffer.h>
+#include <linux/iio/trigger_consumer.h>
#include "ms5611.h"
static bool ms5611_prom_is_valid(u16 *prom, size_t len)
@@ -133,17 +137,17 @@ static int ms5607_temp_and_pressure_compensate(struct ms5611_chip_info *chip_inf
t = 2000 + ((chip_info->prom[6] * dt) >> 23);
if (t < 2000) {
- s64 off2, sens2, t2;
+ s64 off2, sens2, t2, tmp;
t2 = (dt * dt) >> 31;
- off2 = (61 * (t - 2000) * (t - 2000)) >> 4;
- sens2 = off2 << 1;
+ tmp = (t - 2000) * (t - 2000);
+ off2 = (61 * tmp) >> 4;
+ sens2 = tmp << 1;
if (t < -1500) {
- s64 tmp = (t + 1500) * (t + 1500);
-
+ tmp = (t + 1500) * (t + 1500);
off2 += 15 * tmp;
- sens2 += (8 * tmp);
+ sens2 += 8 * tmp;
}
t -= t2;
@@ -173,6 +177,28 @@ static int ms5611_reset(struct iio_dev *indio_dev)
return 0;
}
+static irqreturn_t ms5611_trigger_handler(int irq, void *p)
+{
+ struct iio_poll_func *pf = p;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct ms5611_state *st = iio_priv(indio_dev);
+ s32 buf[4]; /* s32 (pressure) + s32 (temp) + 2 * s32 (timestamp) */
+ int ret;
+
+ mutex_lock(&st->lock);
+ ret = ms5611_read_temp_and_pressure(indio_dev, &buf[1], &buf[0]);
+ mutex_unlock(&st->lock);
+ if (ret < 0)
+ goto err;
+
+ iio_push_to_buffers_with_timestamp(indio_dev, buf, iio_get_time_ns());
+
+err:
+ iio_trigger_notify_done(indio_dev->trig);
+
+ return IRQ_HANDLED;
+}
+
static int ms5611_read_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
int *val, int *val2, long mask)
@@ -201,11 +227,25 @@ static int ms5611_read_raw(struct iio_dev *indio_dev,
default:
return -EINVAL;
}
+ case IIO_CHAN_INFO_SCALE:
+ switch (chan->type) {
+ case IIO_TEMP:
+ *val = 10;
+ return IIO_VAL_INT;
+ case IIO_PRESSURE:
+ *val = 0;
+ *val2 = 1000;
+ return IIO_VAL_INT_PLUS_MICRO;
+ default:
+ return -EINVAL;
+ }
}
return -EINVAL;
}
+static const unsigned long ms5611_scan_masks[] = {0x3, 0};
+
static struct ms5611_chip_info chip_info_tbl[] = {
[MS5611] = {
.temp_and_pressure_compensate = ms5611_temp_and_pressure_compensate,
@@ -218,12 +258,29 @@ static struct ms5611_chip_info chip_info_tbl[] = {
static const struct iio_chan_spec ms5611_channels[] = {
{
.type = IIO_PRESSURE,
- .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED),
+ .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED) |
+ BIT(IIO_CHAN_INFO_SCALE),
+ .scan_index = 0,
+ .scan_type = {
+ .sign = 's',
+ .realbits = 32,
+ .storagebits = 32,
+ .endianness = IIO_CPU,
+ },
},
{
.type = IIO_TEMP,
- .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED),
- }
+ .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED) |
+ BIT(IIO_CHAN_INFO_SCALE),
+ .scan_index = 1,
+ .scan_type = {
+ .sign = 's',
+ .realbits = 32,
+ .storagebits = 32,
+ .endianness = IIO_CPU,
+ },
+ },
+ IIO_CHAN_SOFT_TIMESTAMP(2),
};
static const struct iio_info ms5611_info = {
@@ -234,6 +291,18 @@ static const struct iio_info ms5611_info = {
static int ms5611_init(struct iio_dev *indio_dev)
{
int ret;
+ struct regulator *vdd = devm_regulator_get(indio_dev->dev.parent,
+ "vdd");
+
+ /* Enable attached regulator if any. */
+ if (!IS_ERR(vdd)) {
+ ret = regulator_enable(vdd);
+ if (ret) {
+ dev_err(indio_dev->dev.parent,
+ "failed to enable Vdd supply: %d\n", ret);
+ return ret;
+ }
+ }
ret = ms5611_reset(indio_dev);
if (ret < 0)
@@ -242,7 +311,8 @@ static int ms5611_init(struct iio_dev *indio_dev)
return ms5611_read_prom(indio_dev);
}
-int ms5611_probe(struct iio_dev *indio_dev, struct device *dev, int type)
+int ms5611_probe(struct iio_dev *indio_dev, struct device *dev,
+ const char *name, int type)
{
int ret;
struct ms5611_state *st = iio_priv(indio_dev);
@@ -250,20 +320,48 @@ int ms5611_probe(struct iio_dev *indio_dev, struct device *dev, int type)
mutex_init(&st->lock);
st->chip_info = &chip_info_tbl[type];
indio_dev->dev.parent = dev;
- indio_dev->name = dev->driver->name;
+ indio_dev->name = name;
indio_dev->info = &ms5611_info;
indio_dev->channels = ms5611_channels;
indio_dev->num_channels = ARRAY_SIZE(ms5611_channels);
indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->available_scan_masks = ms5611_scan_masks;
ret = ms5611_init(indio_dev);
if (ret < 0)
return ret;
- return devm_iio_device_register(dev, indio_dev);
+ ret = iio_triggered_buffer_setup(indio_dev, NULL,
+ ms5611_trigger_handler, NULL);
+ if (ret < 0) {
+ dev_err(dev, "iio triggered buffer setup failed\n");
+ return ret;
+ }
+
+ ret = iio_device_register(indio_dev);
+ if (ret < 0) {
+ dev_err(dev, "unable to register iio device\n");
+ goto err_buffer_cleanup;
+ }
+
+ return 0;
+
+err_buffer_cleanup:
+ iio_triggered_buffer_cleanup(indio_dev);
+
+ return ret;
}
EXPORT_SYMBOL(ms5611_probe);
+int ms5611_remove(struct iio_dev *indio_dev)
+{
+ iio_device_unregister(indio_dev);
+ iio_triggered_buffer_cleanup(indio_dev);
+
+ return 0;
+}
+EXPORT_SYMBOL(ms5611_remove);
+
MODULE_AUTHOR("Tomasz Duszynski <tduszyns@gmail.com>");
MODULE_DESCRIPTION("MS5611 core driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/pressure/ms5611_i2c.c b/drivers/iio/pressure/ms5611_i2c.c
index 245797d1ecf0..7f6fc8eee922 100644
--- a/drivers/iio/pressure/ms5611_i2c.c
+++ b/drivers/iio/pressure/ms5611_i2c.c
@@ -92,19 +92,25 @@ static int ms5611_i2c_probe(struct i2c_client *client,
I2C_FUNC_SMBUS_WRITE_BYTE |
I2C_FUNC_SMBUS_READ_WORD_DATA |
I2C_FUNC_SMBUS_READ_I2C_BLOCK))
- return -ENODEV;
+ return -EOPNOTSUPP;
indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*st));
if (!indio_dev)
return -ENOMEM;
st = iio_priv(indio_dev);
+ i2c_set_clientdata(client, indio_dev);
st->reset = ms5611_i2c_reset;
st->read_prom_word = ms5611_i2c_read_prom_word;
st->read_adc_temp_and_pressure = ms5611_i2c_read_adc_temp_and_pressure;
st->client = client;
- return ms5611_probe(indio_dev, &client->dev, id->driver_data);
+ return ms5611_probe(indio_dev, &client->dev, id->name, id->driver_data);
+}
+
+static int ms5611_i2c_remove(struct i2c_client *client)
+{
+ return ms5611_remove(i2c_get_clientdata(client));
}
static const struct i2c_device_id ms5611_id[] = {
@@ -120,6 +126,7 @@ static struct i2c_driver ms5611_driver = {
},
.id_table = ms5611_id,
.probe = ms5611_i2c_probe,
+ .remove = ms5611_i2c_remove,
};
module_i2c_driver(ms5611_driver);
diff --git a/drivers/iio/pressure/ms5611_spi.c b/drivers/iio/pressure/ms5611_spi.c
index aaa0c4ba91a7..5cc009e85f0e 100644
--- a/drivers/iio/pressure/ms5611_spi.c
+++ b/drivers/iio/pressure/ms5611_spi.c
@@ -90,6 +90,8 @@ static int ms5611_spi_probe(struct spi_device *spi)
if (!indio_dev)
return -ENOMEM;
+ spi_set_drvdata(spi, indio_dev);
+
spi->mode = SPI_MODE_0;
spi->max_speed_hz = 20000000;
spi->bits_per_word = 8;
@@ -103,8 +105,13 @@ static int ms5611_spi_probe(struct spi_device *spi)
st->read_adc_temp_and_pressure = ms5611_spi_read_adc_temp_and_pressure;
st->client = spi;
- return ms5611_probe(indio_dev, &spi->dev,
- spi_get_device_id(spi)->driver_data);
+ return ms5611_probe(indio_dev, &spi->dev, spi_get_device_id(spi)->name,
+ spi_get_device_id(spi)->driver_data);
+}
+
+static int ms5611_spi_remove(struct spi_device *spi)
+{
+ return ms5611_remove(spi_get_drvdata(spi));
}
static const struct spi_device_id ms5611_id[] = {
@@ -120,6 +127,7 @@ static struct spi_driver ms5611_driver = {
},
.id_table = ms5611_id,
.probe = ms5611_spi_probe,
+ .remove = ms5611_spi_remove,
};
module_spi_driver(ms5611_driver);
diff --git a/drivers/iio/pressure/ms5637.c b/drivers/iio/pressure/ms5637.c
index e8d0e0da938d..e68052c118e6 100644
--- a/drivers/iio/pressure/ms5637.c
+++ b/drivers/iio/pressure/ms5637.c
@@ -136,7 +136,7 @@ static int ms5637_probe(struct i2c_client *client,
I2C_FUNC_SMBUS_READ_I2C_BLOCK)) {
dev_err(&client->dev,
"Adapter does not support some i2c transaction\n");
- return -ENODEV;
+ return -EOPNOTSUPP;
}
indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*dev_data));
diff --git a/drivers/iio/pressure/st_pressure_core.c b/drivers/iio/pressure/st_pressure_core.c
index b39a2fb0671c..172393ad34af 100644
--- a/drivers/iio/pressure/st_pressure_core.c
+++ b/drivers/iio/pressure/st_pressure_core.c
@@ -62,6 +62,8 @@
#define ST_PRESS_LPS331AP_DRDY_IRQ_ADDR 0x22
#define ST_PRESS_LPS331AP_DRDY_IRQ_INT1_MASK 0x04
#define ST_PRESS_LPS331AP_DRDY_IRQ_INT2_MASK 0x20
+#define ST_PRESS_LPS331AP_IHL_IRQ_ADDR 0x22
+#define ST_PRESS_LPS331AP_IHL_IRQ_MASK 0x80
#define ST_PRESS_LPS331AP_MULTIREAD_BIT true
#define ST_PRESS_LPS331AP_TEMP_OFFSET 42500
@@ -100,6 +102,8 @@
#define ST_PRESS_LPS25H_DRDY_IRQ_ADDR 0x23
#define ST_PRESS_LPS25H_DRDY_IRQ_INT1_MASK 0x01
#define ST_PRESS_LPS25H_DRDY_IRQ_INT2_MASK 0x10
+#define ST_PRESS_LPS25H_IHL_IRQ_ADDR 0x22
+#define ST_PRESS_LPS25H_IHL_IRQ_MASK 0x80
#define ST_PRESS_LPS25H_MULTIREAD_BIT true
#define ST_PRESS_LPS25H_TEMP_OFFSET 42500
#define ST_PRESS_LPS25H_OUT_XL_ADDR 0x28
@@ -220,6 +224,8 @@ static const struct st_sensor_settings st_press_sensors_settings[] = {
.addr = ST_PRESS_LPS331AP_DRDY_IRQ_ADDR,
.mask_int1 = ST_PRESS_LPS331AP_DRDY_IRQ_INT1_MASK,
.mask_int2 = ST_PRESS_LPS331AP_DRDY_IRQ_INT2_MASK,
+ .addr_ihl = ST_PRESS_LPS331AP_IHL_IRQ_ADDR,
+ .mask_ihl = ST_PRESS_LPS331AP_IHL_IRQ_MASK,
},
.multi_read_bit = ST_PRESS_LPS331AP_MULTIREAD_BIT,
.bootime = 2,
@@ -304,6 +310,8 @@ static const struct st_sensor_settings st_press_sensors_settings[] = {
.addr = ST_PRESS_LPS25H_DRDY_IRQ_ADDR,
.mask_int1 = ST_PRESS_LPS25H_DRDY_IRQ_INT1_MASK,
.mask_int2 = ST_PRESS_LPS25H_DRDY_IRQ_INT2_MASK,
+ .addr_ihl = ST_PRESS_LPS25H_IHL_IRQ_ADDR,
+ .mask_ihl = ST_PRESS_LPS25H_IHL_IRQ_MASK,
},
.multi_read_bit = ST_PRESS_LPS25H_MULTIREAD_BIT,
.bootime = 2,
diff --git a/drivers/iio/pressure/t5403.c b/drivers/iio/pressure/t5403.c
index e11cd3938d67..2667e71721f5 100644
--- a/drivers/iio/pressure/t5403.c
+++ b/drivers/iio/pressure/t5403.c
@@ -221,7 +221,7 @@ static int t5403_probe(struct i2c_client *client,
if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_WORD_DATA |
I2C_FUNC_SMBUS_I2C_BLOCK))
- return -ENODEV;
+ return -EOPNOTSUPP;
ret = i2c_smbus_read_byte_data(client, T5403_SLAVE_ADDR);
if (ret < 0)
diff --git a/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c b/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c
index db35e04a0637..4f502386aa86 100644
--- a/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c
+++ b/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c
@@ -278,7 +278,7 @@ static int lidar_probe(struct i2c_client *client,
I2C_FUNC_SMBUS_WORD_DATA | I2C_FUNC_SMBUS_BYTE))
data->xfer = lidar_smbus_xfer;
else
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
indio_dev->info = &lidar_info;
indio_dev->name = LIDAR_DRV_NAME;
diff --git a/drivers/iio/temperature/mlx90614.c b/drivers/iio/temperature/mlx90614.c
index a570c2e2aac3..4b645fc672aa 100644
--- a/drivers/iio/temperature/mlx90614.c
+++ b/drivers/iio/temperature/mlx90614.c
@@ -516,7 +516,7 @@ static int mlx90614_probe(struct i2c_client *client,
int ret;
if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_WORD_DATA))
- return -ENODEV;
+ return -EOPNOTSUPP;
indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
if (!indio_dev)
diff --git a/drivers/iio/temperature/tmp006.c b/drivers/iio/temperature/tmp006.c
index e78c1069a6a9..18c9b43c02cb 100644
--- a/drivers/iio/temperature/tmp006.c
+++ b/drivers/iio/temperature/tmp006.c
@@ -205,7 +205,7 @@ static int tmp006_probe(struct i2c_client *client,
int ret;
if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_WORD_DATA))
- return -ENODEV;
+ return -EOPNOTSUPP;
if (!tmp006_check_identification(client)) {
dev_err(&client->dev, "no TMP006 sensor\n");
diff --git a/drivers/iio/temperature/tsys01.c b/drivers/iio/temperature/tsys01.c
index 05c12060ce8d..3e60c6189d98 100644
--- a/drivers/iio/temperature/tsys01.c
+++ b/drivers/iio/temperature/tsys01.c
@@ -190,7 +190,7 @@ static int tsys01_i2c_probe(struct i2c_client *client,
I2C_FUNC_SMBUS_READ_I2C_BLOCK)) {
dev_err(&client->dev,
"Adapter does not support some i2c transaction\n");
- return -ENODEV;
+ return -EOPNOTSUPP;
}
indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*dev_data));
diff --git a/drivers/iio/temperature/tsys02d.c b/drivers/iio/temperature/tsys02d.c
index 4c1fbd52ea08..ab6fe8f6f2d1 100644
--- a/drivers/iio/temperature/tsys02d.c
+++ b/drivers/iio/temperature/tsys02d.c
@@ -137,7 +137,7 @@ static int tsys02d_probe(struct i2c_client *client,
I2C_FUNC_SMBUS_READ_I2C_BLOCK)) {
dev_err(&client->dev,
"Adapter does not support some i2c transaction\n");
- return -ENODEV;
+ return -EOPNOTSUPP;
}
indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*dev_data));
diff --git a/drivers/infiniband/Kconfig b/drivers/infiniband/Kconfig
index 8a8440c0eed1..6425c0e5d18a 100644
--- a/drivers/infiniband/Kconfig
+++ b/drivers/infiniband/Kconfig
@@ -68,6 +68,7 @@ source "drivers/infiniband/hw/mthca/Kconfig"
source "drivers/infiniband/hw/qib/Kconfig"
source "drivers/infiniband/hw/cxgb3/Kconfig"
source "drivers/infiniband/hw/cxgb4/Kconfig"
+source "drivers/infiniband/hw/i40iw/Kconfig"
source "drivers/infiniband/hw/mlx4/Kconfig"
source "drivers/infiniband/hw/mlx5/Kconfig"
source "drivers/infiniband/hw/nes/Kconfig"
@@ -82,4 +83,6 @@ source "drivers/infiniband/ulp/srpt/Kconfig"
source "drivers/infiniband/ulp/iser/Kconfig"
source "drivers/infiniband/ulp/isert/Kconfig"
+source "drivers/infiniband/sw/rdmavt/Kconfig"
+
endif # INFINIBAND
diff --git a/drivers/infiniband/Makefile b/drivers/infiniband/Makefile
index dc21836b5a8d..fad0b44c356f 100644
--- a/drivers/infiniband/Makefile
+++ b/drivers/infiniband/Makefile
@@ -1,3 +1,4 @@
obj-$(CONFIG_INFINIBAND) += core/
obj-$(CONFIG_INFINIBAND) += hw/
obj-$(CONFIG_INFINIBAND) += ulp/
+obj-$(CONFIG_INFINIBAND) += sw/
diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c
index 53343ffbff7a..c2e257d97eff 100644
--- a/drivers/infiniband/core/cache.c
+++ b/drivers/infiniband/core/cache.c
@@ -691,7 +691,8 @@ void ib_cache_gid_set_default_gid(struct ib_device *ib_dev, u8 port,
NULL);
/* Coudn't find default GID location */
- WARN_ON(ix < 0);
+ if (WARN_ON(ix < 0))
+ goto release;
zattr_type.gid_type = gid_type;
@@ -1043,8 +1044,8 @@ static void ib_cache_update(struct ib_device *device,
ret = ib_query_port(device, port, tprops);
if (ret) {
- printk(KERN_WARNING "ib_query_port failed (%d) for %s\n",
- ret, device->name);
+ pr_warn("ib_query_port failed (%d) for %s\n",
+ ret, device->name);
goto err;
}
@@ -1067,8 +1068,8 @@ static void ib_cache_update(struct ib_device *device,
for (i = 0; i < pkey_cache->table_len; ++i) {
ret = ib_query_pkey(device, port, i, pkey_cache->table + i);
if (ret) {
- printk(KERN_WARNING "ib_query_pkey failed (%d) for %s (index %d)\n",
- ret, device->name, i);
+ pr_warn("ib_query_pkey failed (%d) for %s (index %d)\n",
+ ret, device->name, i);
goto err;
}
}
@@ -1078,8 +1079,8 @@ static void ib_cache_update(struct ib_device *device,
ret = ib_query_gid(device, port, i,
gid_cache->table + i, NULL);
if (ret) {
- printk(KERN_WARNING "ib_query_gid failed (%d) for %s (index %d)\n",
- ret, device->name, i);
+ pr_warn("ib_query_gid failed (%d) for %s (index %d)\n",
+ ret, device->name, i);
goto err;
}
}
@@ -1161,8 +1162,7 @@ int ib_cache_setup_one(struct ib_device *device)
GFP_KERNEL);
if (!device->cache.pkey_cache ||
!device->cache.lmc_cache) {
- printk(KERN_WARNING "Couldn't allocate cache "
- "for %s\n", device->name);
+ pr_warn("Couldn't allocate cache for %s\n", device->name);
return -ENOMEM;
}
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 9729639df407..93ab0ae97208 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -1206,6 +1206,10 @@ static int cma_save_req_info(const struct ib_cm_event *ib_event,
req->has_gid = true;
req->service_id = req_param->primary_path->service_id;
req->pkey = be16_to_cpu(req_param->primary_path->pkey);
+ if (req->pkey != req_param->bth_pkey)
+ pr_warn_ratelimited("RDMA CMA: got different BTH P_Key (0x%x) and primary path P_Key (0x%x)\n"
+ "RDMA CMA: in the future this may cause the request to be dropped\n",
+ req_param->bth_pkey, req->pkey);
break;
case IB_CM_SIDR_REQ_RECEIVED:
req->device = sidr_param->listen_id->device;
@@ -1213,6 +1217,10 @@ static int cma_save_req_info(const struct ib_cm_event *ib_event,
req->has_gid = false;
req->service_id = sidr_param->service_id;
req->pkey = sidr_param->pkey;
+ if (req->pkey != sidr_param->bth_pkey)
+ pr_warn_ratelimited("RDMA CMA: got different BTH P_Key (0x%x) and SIDR request payload P_Key (0x%x)\n"
+ "RDMA CMA: in the future this may cause the request to be dropped\n",
+ sidr_param->bth_pkey, req->pkey);
break;
default:
return -EINVAL;
@@ -1713,7 +1721,7 @@ static int cma_ib_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
event.param.conn.private_data_len = IB_CM_REJ_PRIVATE_DATA_SIZE;
break;
default:
- printk(KERN_ERR "RDMA CMA: unexpected IB CM event: %d\n",
+ pr_err("RDMA CMA: unexpected IB CM event: %d\n",
ib_event->event);
goto out;
}
@@ -2186,8 +2194,8 @@ static void cma_listen_on_dev(struct rdma_id_private *id_priv,
ret = rdma_listen(id, id_priv->backlog);
if (ret)
- printk(KERN_WARNING "RDMA CMA: cma_listen_on_dev, error %d, "
- "listening on device %s\n", ret, cma_dev->device->name);
+ pr_warn("RDMA CMA: cma_listen_on_dev, error %d, listening on device %s\n",
+ ret, cma_dev->device->name);
}
static void cma_listen_on_all(struct rdma_id_private *id_priv)
@@ -3239,7 +3247,7 @@ static int cma_sidr_rep_handler(struct ib_cm_id *cm_id,
event.status = 0;
break;
default:
- printk(KERN_ERR "RDMA CMA: unexpected IB CM event: %d\n",
+ pr_err("RDMA CMA: unexpected IB CM event: %d\n",
ib_event->event);
goto out;
}
@@ -4003,8 +4011,8 @@ static int cma_netdev_change(struct net_device *ndev, struct rdma_id_private *id
if ((dev_addr->bound_dev_if == ndev->ifindex) &&
(net_eq(dev_net(ndev), dev_addr->net)) &&
memcmp(dev_addr->src_dev_addr, ndev->dev_addr, ndev->addr_len)) {
- printk(KERN_INFO "RDMA CM addr change for ndev %s used by id %p\n",
- ndev->name, &id_priv->id);
+ pr_info("RDMA CM addr change for ndev %s used by id %p\n",
+ ndev->name, &id_priv->id);
work = kzalloc(sizeof *work, GFP_KERNEL);
if (!work)
return -ENOMEM;
@@ -4287,7 +4295,7 @@ static int __init cma_init(void)
goto err;
if (ibnl_add_client(RDMA_NL_RDMA_CM, RDMA_NL_RDMA_CM_NUM_OPS, cma_cb_table))
- printk(KERN_WARNING "RDMA CMA: failed to add netlink callback\n");
+ pr_warn("RDMA CMA: failed to add netlink callback\n");
cma_configfs_init();
return 0;
diff --git a/drivers/infiniband/core/cma_configfs.c b/drivers/infiniband/core/cma_configfs.c
index 18b112aa577e..41573df1d9fc 100644
--- a/drivers/infiniband/core/cma_configfs.c
+++ b/drivers/infiniband/core/cma_configfs.c
@@ -49,8 +49,6 @@ struct cma_dev_group {
char name[IB_DEVICE_NAME_MAX];
struct config_group device_group;
struct config_group ports_group;
- struct config_group *default_dev_group[2];
- struct config_group **default_ports_group;
struct cma_dev_port_group *ports;
};
@@ -158,7 +156,6 @@ static int make_cma_ports(struct cma_dev_group *cma_dev_group,
unsigned int i;
unsigned int ports_num;
struct cma_dev_port_group *ports;
- struct config_group **ports_group;
int err;
ibdev = cma_get_ib_dev(cma_dev);
@@ -169,9 +166,8 @@ static int make_cma_ports(struct cma_dev_group *cma_dev_group,
ports_num = ibdev->phys_port_cnt;
ports = kcalloc(ports_num, sizeof(*cma_dev_group->ports),
GFP_KERNEL);
- ports_group = kcalloc(ports_num + 1, sizeof(*ports_group), GFP_KERNEL);
- if (!ports || !ports_group) {
+ if (!ports) {
err = -ENOMEM;
goto free;
}
@@ -185,18 +181,16 @@ static int make_cma_ports(struct cma_dev_group *cma_dev_group,
config_group_init_type_name(&ports[i].group,
port_str,
&cma_port_group_type);
- ports_group[i] = &ports[i].group;
+ configfs_add_default_group(&ports[i].group,
+ &cma_dev_group->ports_group);
+
}
- ports_group[i] = NULL;
- cma_dev_group->default_ports_group = ports_group;
cma_dev_group->ports = ports;
return 0;
free:
kfree(ports);
- kfree(ports_group);
cma_dev_group->ports = NULL;
- cma_dev_group->default_ports_group = NULL;
return err;
}
@@ -220,9 +214,7 @@ static void release_cma_ports_group(struct config_item *item)
ports_group);
kfree(cma_dev_group->ports);
- kfree(cma_dev_group->default_ports_group);
cma_dev_group->ports = NULL;
- cma_dev_group->default_ports_group = NULL;
};
static struct configfs_item_operations cma_ports_item_ops = {
@@ -263,22 +255,17 @@ static struct config_group *make_cma_dev(struct config_group *group,
strncpy(cma_dev_group->name, name, sizeof(cma_dev_group->name));
- err = make_cma_ports(cma_dev_group, cma_dev);
- if (err)
- goto fail;
-
- cma_dev_group->ports_group.default_groups =
- cma_dev_group->default_ports_group;
config_group_init_type_name(&cma_dev_group->ports_group, "ports",
&cma_ports_group_type);
- cma_dev_group->device_group.default_groups
- = cma_dev_group->default_dev_group;
- cma_dev_group->default_dev_group[0] = &cma_dev_group->ports_group;
- cma_dev_group->default_dev_group[1] = NULL;
+ err = make_cma_ports(cma_dev_group, cma_dev);
+ if (err)
+ goto fail;
config_group_init_type_name(&cma_dev_group->device_group, name,
&cma_device_group_type);
+ configfs_add_default_group(&cma_dev_group->ports_group,
+ &cma_dev_group->device_group);
cma_deref_dev(cma_dev);
return &cma_dev_group->device_group;
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
index 94b80a51ab68..10979844026a 100644
--- a/drivers/infiniband/core/device.c
+++ b/drivers/infiniband/core/device.c
@@ -115,8 +115,8 @@ static int ib_device_check_mandatory(struct ib_device *device)
for (i = 0; i < ARRAY_SIZE(mandatory_table); ++i) {
if (!*(void **) ((void *) device + mandatory_table[i].offset)) {
- printk(KERN_WARNING "Device %s is missing mandatory function %s\n",
- device->name, mandatory_table[i].name);
+ pr_warn("Device %s is missing mandatory function %s\n",
+ device->name, mandatory_table[i].name);
return -EINVAL;
}
}
@@ -255,8 +255,8 @@ static int add_client_context(struct ib_device *device, struct ib_client *client
context = kmalloc(sizeof *context, GFP_KERNEL);
if (!context) {
- printk(KERN_WARNING "Couldn't allocate client context for %s/%s\n",
- device->name, client->name);
+ pr_warn("Couldn't allocate client context for %s/%s\n",
+ device->name, client->name);
return -ENOMEM;
}
@@ -343,29 +343,29 @@ int ib_register_device(struct ib_device *device,
ret = read_port_immutable(device);
if (ret) {
- printk(KERN_WARNING "Couldn't create per port immutable data %s\n",
- device->name);
+ pr_warn("Couldn't create per port immutable data %s\n",
+ device->name);
goto out;
}
ret = ib_cache_setup_one(device);
if (ret) {
- printk(KERN_WARNING "Couldn't set up InfiniBand P_Key/GID cache\n");
+ pr_warn("Couldn't set up InfiniBand P_Key/GID cache\n");
goto out;
}
memset(&device->attrs, 0, sizeof(device->attrs));
ret = device->query_device(device, &device->attrs, &uhw);
if (ret) {
- printk(KERN_WARNING "Couldn't query the device attributes\n");
+ pr_warn("Couldn't query the device attributes\n");
ib_cache_cleanup_one(device);
goto out;
}
ret = ib_device_register_sysfs(device, port_callback);
if (ret) {
- printk(KERN_WARNING "Couldn't register device %s with driver model\n",
- device->name);
+ pr_warn("Couldn't register device %s with driver model\n",
+ device->name);
ib_cache_cleanup_one(device);
goto out;
}
@@ -566,8 +566,8 @@ void ib_set_client_data(struct ib_device *device, struct ib_client *client,
goto out;
}
- printk(KERN_WARNING "No client context found for %s/%s\n",
- device->name, client->name);
+ pr_warn("No client context found for %s/%s\n",
+ device->name, client->name);
out:
spin_unlock_irqrestore(&device->client_data_lock, flags);
@@ -650,10 +650,23 @@ int ib_query_port(struct ib_device *device,
u8 port_num,
struct ib_port_attr *port_attr)
{
+ union ib_gid gid;
+ int err;
+
if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device))
return -EINVAL;
- return device->query_port(device, port_num, port_attr);
+ memset(port_attr, 0, sizeof(*port_attr));
+ err = device->query_port(device, port_num, port_attr);
+ if (err || port_attr->subnet_prefix)
+ return err;
+
+ err = ib_query_gid(device, port_num, 0, &gid, NULL);
+ if (err)
+ return err;
+
+ port_attr->subnet_prefix = be64_to_cpu(gid.global.subnet_prefix);
+ return 0;
}
EXPORT_SYMBOL(ib_query_port);
@@ -960,13 +973,13 @@ static int __init ib_core_init(void)
ret = class_register(&ib_class);
if (ret) {
- printk(KERN_WARNING "Couldn't create InfiniBand device class\n");
+ pr_warn("Couldn't create InfiniBand device class\n");
goto err_comp;
}
ret = ibnl_init();
if (ret) {
- printk(KERN_WARNING "Couldn't init IB netlink interface\n");
+ pr_warn("Couldn't init IB netlink interface\n");
goto err_sysfs;
}
diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c
index 6ac3683c144b..cdbb1f1a6d97 100644
--- a/drivers/infiniband/core/fmr_pool.c
+++ b/drivers/infiniband/core/fmr_pool.c
@@ -150,8 +150,8 @@ static void ib_fmr_batch_release(struct ib_fmr_pool *pool)
#ifdef DEBUG
if (fmr->ref_count !=0) {
- printk(KERN_WARNING PFX "Unmapping FMR 0x%08x with ref count %d\n",
- fmr, fmr->ref_count);
+ pr_warn(PFX "Unmapping FMR 0x%08x with ref count %d\n",
+ fmr, fmr->ref_count);
}
#endif
}
@@ -167,7 +167,7 @@ static void ib_fmr_batch_release(struct ib_fmr_pool *pool)
ret = ib_unmap_fmr(&fmr_list);
if (ret)
- printk(KERN_WARNING PFX "ib_unmap_fmr returned %d\n", ret);
+ pr_warn(PFX "ib_unmap_fmr returned %d\n", ret);
spin_lock_irq(&pool->pool_lock);
list_splice(&unmap_list, &pool->free_list);
@@ -222,8 +222,7 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
device = pd->device;
if (!device->alloc_fmr || !device->dealloc_fmr ||
!device->map_phys_fmr || !device->unmap_fmr) {
- printk(KERN_INFO PFX "Device %s does not support FMRs\n",
- device->name);
+ pr_info(PFX "Device %s does not support FMRs\n", device->name);
return ERR_PTR(-ENOSYS);
}
@@ -233,13 +232,10 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
max_remaps = device->attrs.max_map_per_fmr;
pool = kmalloc(sizeof *pool, GFP_KERNEL);
- if (!pool) {
- printk(KERN_WARNING PFX "couldn't allocate pool struct\n");
+ if (!pool)
return ERR_PTR(-ENOMEM);
- }
pool->cache_bucket = NULL;
-
pool->flush_function = params->flush_function;
pool->flush_arg = params->flush_arg;
@@ -251,7 +247,7 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
kmalloc(IB_FMR_HASH_SIZE * sizeof *pool->cache_bucket,
GFP_KERNEL);
if (!pool->cache_bucket) {
- printk(KERN_WARNING PFX "Failed to allocate cache in pool\n");
+ pr_warn(PFX "Failed to allocate cache in pool\n");
ret = -ENOMEM;
goto out_free_pool;
}
@@ -275,7 +271,7 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
"ib_fmr(%s)",
device->name);
if (IS_ERR(pool->thread)) {
- printk(KERN_WARNING PFX "couldn't start cleanup thread\n");
+ pr_warn(PFX "couldn't start cleanup thread\n");
ret = PTR_ERR(pool->thread);
goto out_free_pool;
}
@@ -294,11 +290,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
for (i = 0; i < params->pool_size; ++i) {
fmr = kmalloc(bytes_per_fmr, GFP_KERNEL);
- if (!fmr) {
- printk(KERN_WARNING PFX "failed to allocate fmr "
- "struct for FMR %d\n", i);
+ if (!fmr)
goto out_fail;
- }
fmr->pool = pool;
fmr->remap_count = 0;
@@ -307,8 +300,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
fmr->fmr = ib_alloc_fmr(pd, params->access, &fmr_attr);
if (IS_ERR(fmr->fmr)) {
- printk(KERN_WARNING PFX "fmr_create failed "
- "for FMR %d\n", i);
+ pr_warn(PFX "fmr_create failed for FMR %d\n",
+ i);
kfree(fmr);
goto out_fail;
}
@@ -363,8 +356,8 @@ void ib_destroy_fmr_pool(struct ib_fmr_pool *pool)
}
if (i < pool->pool_size)
- printk(KERN_WARNING PFX "pool still has %d regions registered\n",
- pool->pool_size - i);
+ pr_warn(PFX "pool still has %d regions registered\n",
+ pool->pool_size - i);
kfree(pool->cache_bucket);
kfree(pool);
@@ -463,7 +456,7 @@ struct ib_pool_fmr *ib_fmr_pool_map_phys(struct ib_fmr_pool *pool_handle,
list_add(&fmr->list, &pool->free_list);
spin_unlock_irqrestore(&pool->pool_lock, flags);
- printk(KERN_WARNING PFX "fmr_map returns %d\n", result);
+ pr_warn(PFX "fmr_map returns %d\n", result);
return ERR_PTR(result);
}
@@ -517,8 +510,8 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr *fmr)
#ifdef DEBUG
if (fmr->ref_count < 0)
- printk(KERN_WARNING PFX "FMR %p has ref count %d < 0\n",
- fmr, fmr->ref_count);
+ pr_warn(PFX "FMR %p has ref count %d < 0\n",
+ fmr, fmr->ref_count);
#endif
spin_unlock_irqrestore(&pool->pool_lock, flags);
diff --git a/drivers/infiniband/core/iwcm.c b/drivers/infiniband/core/iwcm.c
index ff9163dc1596..e28a160cdab0 100644
--- a/drivers/infiniband/core/iwcm.c
+++ b/drivers/infiniband/core/iwcm.c
@@ -50,6 +50,8 @@
#include <rdma/iw_cm.h>
#include <rdma/ib_addr.h>
+#include <rdma/iw_portmap.h>
+#include <rdma/rdma_netlink.h>
#include "iwcm.h"
@@ -57,6 +59,16 @@ MODULE_AUTHOR("Tom Tucker");
MODULE_DESCRIPTION("iWARP CM");
MODULE_LICENSE("Dual BSD/GPL");
+static struct ibnl_client_cbs iwcm_nl_cb_table[] = {
+ [RDMA_NL_IWPM_REG_PID] = {.dump = iwpm_register_pid_cb},
+ [RDMA_NL_IWPM_ADD_MAPPING] = {.dump = iwpm_add_mapping_cb},
+ [RDMA_NL_IWPM_QUERY_MAPPING] = {.dump = iwpm_add_and_query_mapping_cb},
+ [RDMA_NL_IWPM_REMOTE_INFO] = {.dump = iwpm_remote_info_cb},
+ [RDMA_NL_IWPM_HANDLE_ERR] = {.dump = iwpm_mapping_error_cb},
+ [RDMA_NL_IWPM_MAPINFO] = {.dump = iwpm_mapping_info_cb},
+ [RDMA_NL_IWPM_MAPINFO_NUM] = {.dump = iwpm_ack_mapping_info_cb}
+};
+
static struct workqueue_struct *iwcm_wq;
struct iwcm_work {
struct work_struct work;
@@ -402,6 +414,11 @@ static void destroy_cm_id(struct iw_cm_id *cm_id)
}
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+ if (cm_id->mapped) {
+ iwpm_remove_mapinfo(&cm_id->local_addr, &cm_id->m_local_addr);
+ iwpm_remove_mapping(&cm_id->local_addr, RDMA_NL_IWCM);
+ }
+
(void)iwcm_deref_id(cm_id_priv);
}
@@ -426,6 +443,97 @@ void iw_destroy_cm_id(struct iw_cm_id *cm_id)
}
EXPORT_SYMBOL(iw_destroy_cm_id);
+/**
+ * iw_cm_check_wildcard - If IP address is 0 then use original
+ * @pm_addr: sockaddr containing the ip to check for wildcard
+ * @cm_addr: sockaddr containing the actual IP address
+ * @cm_outaddr: sockaddr to set IP addr which leaving port
+ *
+ * Checks the pm_addr for wildcard and then sets cm_outaddr's
+ * IP to the actual (cm_addr).
+ */
+static void iw_cm_check_wildcard(struct sockaddr_storage *pm_addr,
+ struct sockaddr_storage *cm_addr,
+ struct sockaddr_storage *cm_outaddr)
+{
+ if (pm_addr->ss_family == AF_INET) {
+ struct sockaddr_in *pm4_addr = (struct sockaddr_in *)pm_addr;
+
+ if (pm4_addr->sin_addr.s_addr == INADDR_ANY) {
+ struct sockaddr_in *cm4_addr =
+ (struct sockaddr_in *)cm_addr;
+ struct sockaddr_in *cm4_outaddr =
+ (struct sockaddr_in *)cm_outaddr;
+
+ cm4_outaddr->sin_addr = cm4_addr->sin_addr;
+ }
+ } else {
+ struct sockaddr_in6 *pm6_addr = (struct sockaddr_in6 *)pm_addr;
+
+ if (ipv6_addr_type(&pm6_addr->sin6_addr) == IPV6_ADDR_ANY) {
+ struct sockaddr_in6 *cm6_addr =
+ (struct sockaddr_in6 *)cm_addr;
+ struct sockaddr_in6 *cm6_outaddr =
+ (struct sockaddr_in6 *)cm_outaddr;
+
+ cm6_outaddr->sin6_addr = cm6_addr->sin6_addr;
+ }
+ }
+}
+
+/**
+ * iw_cm_map - Use portmapper to map the ports
+ * @cm_id: connection manager pointer
+ * @active: Indicates the active side when true
+ * returns nonzero for error only if iwpm_create_mapinfo() fails
+ *
+ * Tries to add a mapping for a port using the Portmapper. If
+ * successful in mapping the IP/Port it will check the remote
+ * mapped IP address for a wildcard IP address and replace the
+ * zero IP address with the remote_addr.
+ */
+static int iw_cm_map(struct iw_cm_id *cm_id, bool active)
+{
+ struct iwpm_dev_data pm_reg_msg;
+ struct iwpm_sa_data pm_msg;
+ int status;
+
+ cm_id->m_local_addr = cm_id->local_addr;
+ cm_id->m_remote_addr = cm_id->remote_addr;
+
+ memcpy(pm_reg_msg.dev_name, cm_id->device->name,
+ sizeof(pm_reg_msg.dev_name));
+ memcpy(pm_reg_msg.if_name, cm_id->device->iwcm->ifname,
+ sizeof(pm_reg_msg.if_name));
+
+ if (iwpm_register_pid(&pm_reg_msg, RDMA_NL_IWCM) ||
+ !iwpm_valid_pid())
+ return 0;
+
+ cm_id->mapped = true;
+ pm_msg.loc_addr = cm_id->local_addr;
+ pm_msg.rem_addr = cm_id->remote_addr;
+ if (active)
+ status = iwpm_add_and_query_mapping(&pm_msg,
+ RDMA_NL_IWCM);
+ else
+ status = iwpm_add_mapping(&pm_msg, RDMA_NL_IWCM);
+
+ if (!status) {
+ cm_id->m_local_addr = pm_msg.mapped_loc_addr;
+ if (active) {
+ cm_id->m_remote_addr = pm_msg.mapped_rem_addr;
+ iw_cm_check_wildcard(&pm_msg.mapped_rem_addr,
+ &cm_id->remote_addr,
+ &cm_id->m_remote_addr);
+ }
+ }
+
+ return iwpm_create_mapinfo(&cm_id->local_addr,
+ &cm_id->m_local_addr,
+ RDMA_NL_IWCM);
+}
+
/*
* CM_ID <-- LISTEN
*
@@ -452,7 +560,9 @@ int iw_cm_listen(struct iw_cm_id *cm_id, int backlog)
case IW_CM_STATE_IDLE:
cm_id_priv->state = IW_CM_STATE_LISTEN;
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
- ret = cm_id->device->iwcm->create_listen(cm_id, backlog);
+ ret = iw_cm_map(cm_id, false);
+ if (!ret)
+ ret = cm_id->device->iwcm->create_listen(cm_id, backlog);
if (ret)
cm_id_priv->state = IW_CM_STATE_IDLE;
spin_lock_irqsave(&cm_id_priv->lock, flags);
@@ -582,39 +692,37 @@ int iw_cm_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *iw_param)
spin_lock_irqsave(&cm_id_priv->lock, flags);
if (cm_id_priv->state != IW_CM_STATE_IDLE) {
- spin_unlock_irqrestore(&cm_id_priv->lock, flags);
- clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
- wake_up_all(&cm_id_priv->connect_wait);
- return -EINVAL;
+ ret = -EINVAL;
+ goto err;
}
/* Get the ib_qp given the QPN */
qp = cm_id->device->iwcm->get_qp(cm_id->device, iw_param->qpn);
if (!qp) {
- spin_unlock_irqrestore(&cm_id_priv->lock, flags);
- clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
- wake_up_all(&cm_id_priv->connect_wait);
- return -EINVAL;
+ ret = -EINVAL;
+ goto err;
}
cm_id->device->iwcm->add_ref(qp);
cm_id_priv->qp = qp;
cm_id_priv->state = IW_CM_STATE_CONN_SENT;
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
- ret = cm_id->device->iwcm->connect(cm_id, iw_param);
- if (ret) {
- spin_lock_irqsave(&cm_id_priv->lock, flags);
- if (cm_id_priv->qp) {
- cm_id->device->iwcm->rem_ref(qp);
- cm_id_priv->qp = NULL;
- }
- spin_unlock_irqrestore(&cm_id_priv->lock, flags);
- BUG_ON(cm_id_priv->state != IW_CM_STATE_CONN_SENT);
- cm_id_priv->state = IW_CM_STATE_IDLE;
- clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
- wake_up_all(&cm_id_priv->connect_wait);
- }
+ ret = iw_cm_map(cm_id, true);
+ if (!ret)
+ ret = cm_id->device->iwcm->connect(cm_id, iw_param);
+ if (!ret)
+ return 0; /* success */
+ spin_lock_irqsave(&cm_id_priv->lock, flags);
+ if (cm_id_priv->qp) {
+ cm_id->device->iwcm->rem_ref(qp);
+ cm_id_priv->qp = NULL;
+ }
+ cm_id_priv->state = IW_CM_STATE_IDLE;
+err:
+ spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+ clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
+ wake_up_all(&cm_id_priv->connect_wait);
return ret;
}
EXPORT_SYMBOL(iw_cm_connect);
@@ -656,8 +764,23 @@ static void cm_conn_req_handler(struct iwcm_id_private *listen_id_priv,
goto out;
cm_id->provider_data = iw_event->provider_data;
- cm_id->local_addr = iw_event->local_addr;
- cm_id->remote_addr = iw_event->remote_addr;
+ cm_id->m_local_addr = iw_event->local_addr;
+ cm_id->m_remote_addr = iw_event->remote_addr;
+ cm_id->local_addr = listen_id_priv->id.local_addr;
+
+ ret = iwpm_get_remote_info(&listen_id_priv->id.m_local_addr,
+ &iw_event->remote_addr,
+ &cm_id->remote_addr,
+ RDMA_NL_IWCM);
+ if (ret) {
+ cm_id->remote_addr = iw_event->remote_addr;
+ } else {
+ iw_cm_check_wildcard(&listen_id_priv->id.m_local_addr,
+ &iw_event->local_addr,
+ &cm_id->local_addr);
+ iw_event->local_addr = cm_id->local_addr;
+ iw_event->remote_addr = cm_id->remote_addr;
+ }
cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
cm_id_priv->state = IW_CM_STATE_CONN_RECV;
@@ -753,8 +876,10 @@ static int cm_conn_rep_handler(struct iwcm_id_private *cm_id_priv,
clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
BUG_ON(cm_id_priv->state != IW_CM_STATE_CONN_SENT);
if (iw_event->status == 0) {
- cm_id_priv->id.local_addr = iw_event->local_addr;
- cm_id_priv->id.remote_addr = iw_event->remote_addr;
+ cm_id_priv->id.m_local_addr = iw_event->local_addr;
+ cm_id_priv->id.m_remote_addr = iw_event->remote_addr;
+ iw_event->local_addr = cm_id_priv->id.local_addr;
+ iw_event->remote_addr = cm_id_priv->id.remote_addr;
cm_id_priv->state = IW_CM_STATE_ESTABLISHED;
} else {
/* REJECTED or RESET */
@@ -1044,6 +1169,17 @@ EXPORT_SYMBOL(iw_cm_init_qp_attr);
static int __init iw_cm_init(void)
{
+ int ret;
+
+ ret = iwpm_init(RDMA_NL_IWCM);
+ if (ret)
+ pr_err("iw_cm: couldn't init iwpm\n");
+
+ ret = ibnl_add_client(RDMA_NL_IWCM, RDMA_NL_IWPM_NUM_OPS,
+ iwcm_nl_cb_table);
+ if (ret)
+ pr_err("iw_cm: couldn't register netlink callbacks\n");
+
iwcm_wq = create_singlethread_workqueue("iw_cm_wq");
if (!iwcm_wq)
return -ENOMEM;
@@ -1063,6 +1199,8 @@ static void __exit iw_cm_cleanup(void)
{
unregister_net_sysctl_table(iwcm_ctl_table_hdr);
destroy_workqueue(iwcm_wq);
+ ibnl_remove_client(RDMA_NL_IWCM);
+ iwpm_exit(RDMA_NL_IWCM);
}
module_init(iw_cm_init);
diff --git a/drivers/infiniband/core/iwpm_msg.c b/drivers/infiniband/core/iwpm_msg.c
index 22a3abee2a54..43e3fa27102b 100644
--- a/drivers/infiniband/core/iwpm_msg.c
+++ b/drivers/infiniband/core/iwpm_msg.c
@@ -88,8 +88,8 @@ int iwpm_register_pid(struct iwpm_dev_data *pm_msg, u8 nl_client)
ret = ibnl_put_attr(skb, nlh, sizeof(u32), &msg_seq, IWPM_NLA_REG_PID_SEQ);
if (ret)
goto pid_query_error;
- ret = ibnl_put_attr(skb, nlh, IWPM_IFNAME_SIZE,
- pm_msg->if_name, IWPM_NLA_REG_IF_NAME);
+ ret = ibnl_put_attr(skb, nlh, IFNAMSIZ,
+ pm_msg->if_name, IWPM_NLA_REG_IF_NAME);
if (ret)
goto pid_query_error;
ret = ibnl_put_attr(skb, nlh, IWPM_DEVNAME_SIZE,
@@ -394,7 +394,7 @@ register_pid_response_exit:
/* always for found nlmsg_request */
kref_put(&nlmsg_request->kref, iwpm_free_nlmsg_request);
barrier();
- wake_up(&nlmsg_request->waitq);
+ up(&nlmsg_request->sem);
return 0;
}
EXPORT_SYMBOL(iwpm_register_pid_cb);
@@ -463,7 +463,7 @@ add_mapping_response_exit:
/* always for found request */
kref_put(&nlmsg_request->kref, iwpm_free_nlmsg_request);
barrier();
- wake_up(&nlmsg_request->waitq);
+ up(&nlmsg_request->sem);
return 0;
}
EXPORT_SYMBOL(iwpm_add_mapping_cb);
@@ -555,7 +555,7 @@ query_mapping_response_exit:
/* always for found request */
kref_put(&nlmsg_request->kref, iwpm_free_nlmsg_request);
barrier();
- wake_up(&nlmsg_request->waitq);
+ up(&nlmsg_request->sem);
return 0;
}
EXPORT_SYMBOL(iwpm_add_and_query_mapping_cb);
@@ -749,7 +749,7 @@ int iwpm_mapping_error_cb(struct sk_buff *skb, struct netlink_callback *cb)
/* always for found request */
kref_put(&nlmsg_request->kref, iwpm_free_nlmsg_request);
barrier();
- wake_up(&nlmsg_request->waitq);
+ up(&nlmsg_request->sem);
return 0;
}
EXPORT_SYMBOL(iwpm_mapping_error_cb);
diff --git a/drivers/infiniband/core/iwpm_util.c b/drivers/infiniband/core/iwpm_util.c
index 5fb089e91353..9b2bf2fb2b00 100644
--- a/drivers/infiniband/core/iwpm_util.c
+++ b/drivers/infiniband/core/iwpm_util.c
@@ -254,9 +254,9 @@ void iwpm_add_remote_info(struct iwpm_remote_info *rem_info)
}
int iwpm_get_remote_info(struct sockaddr_storage *mapped_loc_addr,
- struct sockaddr_storage *mapped_rem_addr,
- struct sockaddr_storage *remote_addr,
- u8 nl_client)
+ struct sockaddr_storage *mapped_rem_addr,
+ struct sockaddr_storage *remote_addr,
+ u8 nl_client)
{
struct hlist_node *tmp_hlist_node;
struct hlist_head *hash_bucket_head;
@@ -322,6 +322,8 @@ struct iwpm_nlmsg_request *iwpm_get_nlmsg_request(__u32 nlmsg_seq,
nlmsg_request->nl_client = nl_client;
nlmsg_request->request_done = 0;
nlmsg_request->err_code = 0;
+ sema_init(&nlmsg_request->sem, 1);
+ down(&nlmsg_request->sem);
return nlmsg_request;
}
@@ -364,11 +366,9 @@ struct iwpm_nlmsg_request *iwpm_find_nlmsg_request(__u32 echo_seq)
int iwpm_wait_complete_req(struct iwpm_nlmsg_request *nlmsg_request)
{
int ret;
- init_waitqueue_head(&nlmsg_request->waitq);
- ret = wait_event_timeout(nlmsg_request->waitq,
- (nlmsg_request->request_done != 0), IWPM_NL_TIMEOUT);
- if (!ret) {
+ ret = down_timeout(&nlmsg_request->sem, IWPM_NL_TIMEOUT);
+ if (ret) {
ret = -EINVAL;
pr_info("%s: Timeout %d sec for netlink request (seq = %u)\n",
__func__, (IWPM_NL_TIMEOUT/HZ), nlmsg_request->nlmsg_seq);
diff --git a/drivers/infiniband/core/iwpm_util.h b/drivers/infiniband/core/iwpm_util.h
index b7b9e194ce81..af1fc14a0d3d 100644
--- a/drivers/infiniband/core/iwpm_util.h
+++ b/drivers/infiniband/core/iwpm_util.h
@@ -69,7 +69,7 @@ struct iwpm_nlmsg_request {
u8 nl_client;
u8 request_done;
u16 err_code;
- wait_queue_head_t waitq;
+ struct semaphore sem;
struct kref kref;
};
diff --git a/drivers/infiniband/core/packer.c b/drivers/infiniband/core/packer.c
index 1b65986c0be3..19b1ee3279b4 100644
--- a/drivers/infiniband/core/packer.c
+++ b/drivers/infiniband/core/packer.c
@@ -44,7 +44,7 @@ static u64 value_read(int offset, int size, void *structure)
case 4: return be32_to_cpup((__be32 *) (structure + offset));
case 8: return be64_to_cpup((__be64 *) (structure + offset));
default:
- printk(KERN_WARNING "Field size %d bits not handled\n", size * 8);
+ pr_warn("Field size %d bits not handled\n", size * 8);
return 0;
}
}
@@ -104,9 +104,8 @@ void ib_pack(const struct ib_field *desc,
} else {
if (desc[i].offset_bits % 8 ||
desc[i].size_bits % 8) {
- printk(KERN_WARNING "Structure field %s of size %d "
- "bits is not byte-aligned\n",
- desc[i].field_name, desc[i].size_bits);
+ pr_warn("Structure field %s of size %d bits is not byte-aligned\n",
+ desc[i].field_name, desc[i].size_bits);
}
if (desc[i].struct_size_bytes)
@@ -132,7 +131,7 @@ static void value_write(int offset, int size, u64 val, void *structure)
case 32: *(__be32 *) (structure + offset) = cpu_to_be32(val); break;
case 64: *(__be64 *) (structure + offset) = cpu_to_be64(val); break;
default:
- printk(KERN_WARNING "Field size %d bits not handled\n", size * 8);
+ pr_warn("Field size %d bits not handled\n", size * 8);
}
}
@@ -188,9 +187,8 @@ void ib_unpack(const struct ib_field *desc,
} else {
if (desc[i].offset_bits % 8 ||
desc[i].size_bits % 8) {
- printk(KERN_WARNING "Structure field %s of size %d "
- "bits is not byte-aligned\n",
- desc[i].field_name, desc[i].size_bits);
+ pr_warn("Structure field %s of size %d bits is not byte-aligned\n",
+ desc[i].field_name, desc[i].size_bits);
}
memcpy(structure + desc[i].struct_offset_bytes,
diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c
index 1e37f3515d98..8a09c0fb268d 100644
--- a/drivers/infiniband/core/sa_query.c
+++ b/drivers/infiniband/core/sa_query.c
@@ -864,13 +864,12 @@ static void update_sm_ah(struct work_struct *work)
struct ib_ah_attr ah_attr;
if (ib_query_port(port->agent->device, port->port_num, &port_attr)) {
- printk(KERN_WARNING "Couldn't query port\n");
+ pr_warn("Couldn't query port\n");
return;
}
new_ah = kmalloc(sizeof *new_ah, GFP_KERNEL);
if (!new_ah) {
- printk(KERN_WARNING "Couldn't allocate new SM AH\n");
return;
}
@@ -880,16 +879,21 @@ static void update_sm_ah(struct work_struct *work)
new_ah->pkey_index = 0;
if (ib_find_pkey(port->agent->device, port->port_num,
IB_DEFAULT_PKEY_FULL, &new_ah->pkey_index))
- printk(KERN_ERR "Couldn't find index for default PKey\n");
+ pr_err("Couldn't find index for default PKey\n");
memset(&ah_attr, 0, sizeof ah_attr);
ah_attr.dlid = port_attr.sm_lid;
ah_attr.sl = port_attr.sm_sl;
ah_attr.port_num = port->port_num;
+ if (port_attr.grh_required) {
+ ah_attr.ah_flags = IB_AH_GRH;
+ ah_attr.grh.dgid.global.subnet_prefix = cpu_to_be64(port_attr.subnet_prefix);
+ ah_attr.grh.dgid.global.interface_id = cpu_to_be64(IB_SA_WELL_KNOWN_GUID);
+ }
new_ah->ah = ib_create_ah(port->agent->qp->pd, &ah_attr);
if (IS_ERR(new_ah->ah)) {
- printk(KERN_WARNING "Couldn't create new SM AH\n");
+ pr_warn("Couldn't create new SM AH\n");
kfree(new_ah);
return;
}
@@ -1221,7 +1225,7 @@ static void ib_sa_path_rec_callback(struct ib_sa_query *sa_query,
rec.net = NULL;
rec.ifindex = 0;
rec.gid_type = IB_GID_TYPE_IB;
- memset(rec.dmac, 0, ETH_ALEN);
+ eth_zero_addr(rec.dmac);
query->callback(status, &rec, query->context);
} else
query->callback(status, NULL, query->context);
@@ -1800,13 +1804,13 @@ static int __init ib_sa_init(void)
ret = ib_register_client(&sa_client);
if (ret) {
- printk(KERN_ERR "Couldn't register ib_sa client\n");
+ pr_err("Couldn't register ib_sa client\n");
goto err1;
}
ret = mcast_init();
if (ret) {
- printk(KERN_ERR "Couldn't initialize multicast handling\n");
+ pr_err("Couldn't initialize multicast handling\n");
goto err2;
}
diff --git a/drivers/infiniband/core/ucm.c b/drivers/infiniband/core/ucm.c
index 6b4e8a008bc0..7713ef089c3c 100644
--- a/drivers/infiniband/core/ucm.c
+++ b/drivers/infiniband/core/ucm.c
@@ -48,6 +48,7 @@
#include <asm/uaccess.h>
+#include <rdma/ib.h>
#include <rdma/ib_cm.h>
#include <rdma/ib_user_cm.h>
#include <rdma/ib_marshall.h>
@@ -1103,6 +1104,9 @@ static ssize_t ib_ucm_write(struct file *filp, const char __user *buf,
struct ib_ucm_cmd_hdr hdr;
ssize_t result;
+ if (WARN_ON_ONCE(!ib_safe_file_access(filp)))
+ return -EACCES;
+
if (len < sizeof(hdr))
return -EINVAL;
@@ -1234,7 +1238,7 @@ static int find_overflow_devnum(void)
ret = alloc_chrdev_region(&overflow_maj, 0, IB_UCM_MAX_DEVICES,
"infiniband_cm");
if (ret) {
- printk(KERN_ERR "ucm: couldn't register dynamic device number\n");
+ pr_err("ucm: couldn't register dynamic device number\n");
return ret;
}
}
@@ -1329,19 +1333,19 @@ static int __init ib_ucm_init(void)
ret = register_chrdev_region(IB_UCM_BASE_DEV, IB_UCM_MAX_DEVICES,
"infiniband_cm");
if (ret) {
- printk(KERN_ERR "ucm: couldn't register device number\n");
+ pr_err("ucm: couldn't register device number\n");
goto error1;
}
ret = class_create_file(&cm_class, &class_attr_abi_version.attr);
if (ret) {
- printk(KERN_ERR "ucm: couldn't create abi_version attribute\n");
+ pr_err("ucm: couldn't create abi_version attribute\n");
goto error2;
}
ret = ib_register_client(&ucm_client);
if (ret) {
- printk(KERN_ERR "ucm: couldn't register client\n");
+ pr_err("ucm: couldn't register client\n");
goto error3;
}
return 0;
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
index 8b5a934e1133..c0f3826abb30 100644
--- a/drivers/infiniband/core/ucma.c
+++ b/drivers/infiniband/core/ucma.c
@@ -314,7 +314,7 @@ static void ucma_removal_event_handler(struct rdma_cm_id *cm_id)
}
}
if (!event_found)
- printk(KERN_ERR "ucma_removal_event_handler: warning: connect request event wasn't found\n");
+ pr_err("ucma_removal_event_handler: warning: connect request event wasn't found\n");
}
static int ucma_event_handler(struct rdma_cm_id *cm_id,
@@ -1574,6 +1574,9 @@ static ssize_t ucma_write(struct file *filp, const char __user *buf,
struct rdma_ucm_cmd_hdr hdr;
ssize_t ret;
+ if (WARN_ON_ONCE(!ib_safe_file_access(filp)))
+ return -EACCES;
+
if (len < sizeof(hdr))
return -EINVAL;
@@ -1716,13 +1719,13 @@ static int __init ucma_init(void)
ret = device_create_file(ucma_misc.this_device, &dev_attr_abi_version);
if (ret) {
- printk(KERN_ERR "rdma_ucm: couldn't create abi_version attr\n");
+ pr_err("rdma_ucm: couldn't create abi_version attr\n");
goto err1;
}
ucma_ctl_table_hdr = register_net_sysctl(&init_net, "net/rdma_ucm", ucma_ctl_table);
if (!ucma_ctl_table_hdr) {
- printk(KERN_ERR "rdma_ucm: couldn't register sysctl paths\n");
+ pr_err("rdma_ucm: couldn't register sysctl paths\n");
ret = -ENOMEM;
goto err2;
}
diff --git a/drivers/infiniband/core/ud_header.c b/drivers/infiniband/core/ud_header.c
index 2116132568e7..29a45d2f8898 100644
--- a/drivers/infiniband/core/ud_header.c
+++ b/drivers/infiniband/core/ud_header.c
@@ -479,8 +479,8 @@ int ib_ud_header_unpack(void *buf,
buf += IB_LRH_BYTES;
if (header->lrh.link_version != 0) {
- printk(KERN_WARNING "Invalid LRH.link_version %d\n",
- header->lrh.link_version);
+ pr_warn("Invalid LRH.link_version %d\n",
+ header->lrh.link_version);
return -EINVAL;
}
@@ -496,20 +496,20 @@ int ib_ud_header_unpack(void *buf,
buf += IB_GRH_BYTES;
if (header->grh.ip_version != 6) {
- printk(KERN_WARNING "Invalid GRH.ip_version %d\n",
- header->grh.ip_version);
+ pr_warn("Invalid GRH.ip_version %d\n",
+ header->grh.ip_version);
return -EINVAL;
}
if (header->grh.next_header != 0x1b) {
- printk(KERN_WARNING "Invalid GRH.next_header 0x%02x\n",
- header->grh.next_header);
+ pr_warn("Invalid GRH.next_header 0x%02x\n",
+ header->grh.next_header);
return -EINVAL;
}
break;
default:
- printk(KERN_WARNING "Invalid LRH.link_next_header %d\n",
- header->lrh.link_next_header);
+ pr_warn("Invalid LRH.link_next_header %d\n",
+ header->lrh.link_next_header);
return -EINVAL;
}
@@ -525,14 +525,13 @@ int ib_ud_header_unpack(void *buf,
header->immediate_present = 1;
break;
default:
- printk(KERN_WARNING "Invalid BTH.opcode 0x%02x\n",
- header->bth.opcode);
+ pr_warn("Invalid BTH.opcode 0x%02x\n", header->bth.opcode);
return -EINVAL;
}
if (header->bth.transport_header_version != 0) {
- printk(KERN_WARNING "Invalid BTH.transport_header_version %d\n",
- header->bth.transport_header_version);
+ pr_warn("Invalid BTH.transport_header_version %d\n",
+ header->bth.transport_header_version);
return -EINVAL;
}
diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
index 38acb3cfc545..fe4d2e1a8b58 100644
--- a/drivers/infiniband/core/umem.c
+++ b/drivers/infiniband/core/umem.c
@@ -188,7 +188,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
sg_list_start = umem->sg_head.sgl;
while (npages) {
- ret = get_user_pages(current, current->mm, cur_base,
+ ret = get_user_pages(cur_base,
min_t(unsigned long, npages,
PAGE_SIZE / sizeof (struct page *)),
1, !umem->writable, page_list, vma_list);
diff --git a/drivers/infiniband/core/umem_odp.c b/drivers/infiniband/core/umem_odp.c
index e69bf266049d..75077a018675 100644
--- a/drivers/infiniband/core/umem_odp.c
+++ b/drivers/infiniband/core/umem_odp.c
@@ -572,10 +572,10 @@ int ib_umem_odp_map_dma_pages(struct ib_umem *umem, u64 user_virt, u64 bcnt,
* complex (and doesn't gain us much performance in most use
* cases).
*/
- npages = get_user_pages(owning_process, owning_mm, user_virt,
- gup_num_pages,
- access_mask & ODP_WRITE_ALLOWED_BIT, 0,
- local_page_list, NULL);
+ npages = get_user_pages_remote(owning_process, owning_mm,
+ user_virt, gup_num_pages,
+ access_mask & ODP_WRITE_ALLOWED_BIT,
+ 0, local_page_list, NULL);
up_read(&owning_mm->mmap_sem);
if (npages < 0)
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index 6c6fbff19752..6fdc7ecdaca0 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -402,7 +402,7 @@ static void copy_query_dev_fields(struct ib_uverbs_file *file,
resp->hw_ver = attr->hw_ver;
resp->max_qp = attr->max_qp;
resp->max_qp_wr = attr->max_qp_wr;
- resp->device_cap_flags = attr->device_cap_flags;
+ resp->device_cap_flags = lower_32_bits(attr->device_cap_flags);
resp->max_sge = attr->max_sge;
resp->max_sge_rd = attr->max_sge_rd;
resp->max_cq = attr->max_cq;
@@ -1174,6 +1174,7 @@ ssize_t ib_uverbs_alloc_mw(struct ib_uverbs_file *file,
struct ib_uobject *uobj;
struct ib_pd *pd;
struct ib_mw *mw;
+ struct ib_udata udata;
int ret;
if (out_len < sizeof(resp))
@@ -1195,7 +1196,12 @@ ssize_t ib_uverbs_alloc_mw(struct ib_uverbs_file *file,
goto err_free;
}
- mw = pd->device->alloc_mw(pd, cmd.mw_type);
+ INIT_UDATA(&udata, buf + sizeof(cmd),
+ (unsigned long)cmd.response + sizeof(resp),
+ in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr),
+ out_len - sizeof(resp));
+
+ mw = pd->device->alloc_mw(pd, cmd.mw_type, &udata);
if (IS_ERR(mw)) {
ret = PTR_ERR(mw);
goto err_put;
@@ -3086,6 +3092,14 @@ int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file,
!capable(CAP_NET_ADMIN)) || !capable(CAP_NET_RAW))
return -EPERM;
+ if (cmd.flow_attr.flags >= IB_FLOW_ATTR_FLAGS_RESERVED)
+ return -EINVAL;
+
+ if ((cmd.flow_attr.flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP) &&
+ ((cmd.flow_attr.type == IB_FLOW_ATTR_ALL_DEFAULT) ||
+ (cmd.flow_attr.type == IB_FLOW_ATTR_MC_DEFAULT)))
+ return -EINVAL;
+
if (cmd.flow_attr.num_of_specs > IB_FLOW_SPEC_SUPPORT_LAYERS)
return -EINVAL;
@@ -3586,9 +3600,9 @@ int ib_uverbs_ex_query_device(struct ib_uverbs_file *file,
struct ib_udata *ucore,
struct ib_udata *uhw)
{
- struct ib_uverbs_ex_query_device_resp resp;
+ struct ib_uverbs_ex_query_device_resp resp = { {0} };
struct ib_uverbs_ex_query_device cmd;
- struct ib_device_attr attr;
+ struct ib_device_attr attr = {0};
int err;
if (ucore->inlen < sizeof(cmd))
@@ -3609,14 +3623,11 @@ int ib_uverbs_ex_query_device(struct ib_uverbs_file *file,
if (ucore->outlen < resp.response_length)
return -ENOSPC;
- memset(&attr, 0, sizeof(attr));
-
err = ib_dev->query_device(ib_dev, &attr, uhw);
if (err)
return err;
copy_query_dev_fields(file, ib_dev, &resp.base, &attr);
- resp.comp_mask = 0;
if (ucore->outlen < resp.response_length + sizeof(resp.odp_caps))
goto end;
@@ -3629,9 +3640,6 @@ int ib_uverbs_ex_query_device(struct ib_uverbs_file *file,
attr.odp_caps.per_transport_caps.uc_odp_caps;
resp.odp_caps.per_transport_caps.ud_odp_caps =
attr.odp_caps.per_transport_caps.ud_odp_caps;
- resp.odp_caps.reserved = 0;
-#else
- memset(&resp.odp_caps, 0, sizeof(resp.odp_caps));
#endif
resp.response_length += sizeof(resp.odp_caps);
@@ -3649,8 +3657,5 @@ int ib_uverbs_ex_query_device(struct ib_uverbs_file *file,
end:
err = ib_copy_to_udata(ucore, &resp, resp.response_length);
- if (err)
- return err;
-
- return 0;
+ return err;
}
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index 39680aed99dd..31f422a70623 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -48,6 +48,8 @@
#include <asm/uaccess.h>
+#include <rdma/ib.h>
+
#include "uverbs.h"
MODULE_AUTHOR("Roland Dreier");
@@ -683,16 +685,35 @@ out:
return ev_file;
}
+static int verify_command_mask(struct ib_device *ib_dev, __u32 command)
+{
+ u64 mask;
+
+ if (command <= IB_USER_VERBS_CMD_OPEN_QP)
+ mask = ib_dev->uverbs_cmd_mask;
+ else
+ mask = ib_dev->uverbs_ex_cmd_mask;
+
+ if (mask & ((u64)1 << command))
+ return 0;
+
+ return -1;
+}
+
static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
size_t count, loff_t *pos)
{
struct ib_uverbs_file *file = filp->private_data;
struct ib_device *ib_dev;
struct ib_uverbs_cmd_hdr hdr;
+ __u32 command;
__u32 flags;
int srcu_key;
ssize_t ret;
+ if (WARN_ON_ONCE(!ib_safe_file_access(filp)))
+ return -EACCES;
+
if (count < sizeof hdr)
return -EINVAL;
@@ -707,37 +728,34 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
goto out;
}
- flags = (hdr.command &
- IB_USER_VERBS_CMD_FLAGS_MASK) >> IB_USER_VERBS_CMD_FLAGS_SHIFT;
+ if (hdr.command & ~(__u32)(IB_USER_VERBS_CMD_FLAGS_MASK |
+ IB_USER_VERBS_CMD_COMMAND_MASK)) {
+ ret = -EINVAL;
+ goto out;
+ }
- if (!flags) {
- __u32 command;
+ command = hdr.command & IB_USER_VERBS_CMD_COMMAND_MASK;
+ if (verify_command_mask(ib_dev, command)) {
+ ret = -EOPNOTSUPP;
+ goto out;
+ }
- if (hdr.command & ~(__u32)(IB_USER_VERBS_CMD_FLAGS_MASK |
- IB_USER_VERBS_CMD_COMMAND_MASK)) {
- ret = -EINVAL;
- goto out;
- }
+ if (!file->ucontext &&
+ command != IB_USER_VERBS_CMD_GET_CONTEXT) {
+ ret = -EINVAL;
+ goto out;
+ }
- command = hdr.command & IB_USER_VERBS_CMD_COMMAND_MASK;
+ flags = (hdr.command &
+ IB_USER_VERBS_CMD_FLAGS_MASK) >> IB_USER_VERBS_CMD_FLAGS_SHIFT;
+ if (!flags) {
if (command >= ARRAY_SIZE(uverbs_cmd_table) ||
!uverbs_cmd_table[command]) {
ret = -EINVAL;
goto out;
}
- if (!file->ucontext &&
- command != IB_USER_VERBS_CMD_GET_CONTEXT) {
- ret = -EINVAL;
- goto out;
- }
-
- if (!(ib_dev->uverbs_cmd_mask & (1ull << command))) {
- ret = -ENOSYS;
- goto out;
- }
-
if (hdr.in_words * 4 != count) {
ret = -EINVAL;
goto out;
@@ -749,21 +767,11 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
hdr.out_words * 4);
} else if (flags == IB_USER_VERBS_CMD_FLAG_EXTENDED) {
- __u32 command;
-
struct ib_uverbs_ex_cmd_hdr ex_hdr;
struct ib_udata ucore;
struct ib_udata uhw;
size_t written_count = count;
- if (hdr.command & ~(__u32)(IB_USER_VERBS_CMD_FLAGS_MASK |
- IB_USER_VERBS_CMD_COMMAND_MASK)) {
- ret = -EINVAL;
- goto out;
- }
-
- command = hdr.command & IB_USER_VERBS_CMD_COMMAND_MASK;
-
if (command >= ARRAY_SIZE(uverbs_ex_cmd_table) ||
!uverbs_ex_cmd_table[command]) {
ret = -ENOSYS;
@@ -775,11 +783,6 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
goto out;
}
- if (!(ib_dev->uverbs_ex_cmd_mask & (1ull << command))) {
- ret = -ENOSYS;
- goto out;
- }
-
if (count < (sizeof(hdr) + sizeof(ex_hdr))) {
ret = -EINVAL;
goto out;
@@ -1058,7 +1061,7 @@ static int find_overflow_devnum(void)
ret = alloc_chrdev_region(&overflow_maj, 0, IB_UVERBS_MAX_DEVICES,
"infiniband_verbs");
if (ret) {
- printk(KERN_ERR "user_verbs: couldn't register dynamic device number\n");
+ pr_err("user_verbs: couldn't register dynamic device number\n");
return ret;
}
}
@@ -1279,14 +1282,14 @@ static int __init ib_uverbs_init(void)
ret = register_chrdev_region(IB_UVERBS_BASE_DEV, IB_UVERBS_MAX_DEVICES,
"infiniband_verbs");
if (ret) {
- printk(KERN_ERR "user_verbs: couldn't register device number\n");
+ pr_err("user_verbs: couldn't register device number\n");
goto out;
}
uverbs_class = class_create(THIS_MODULE, "infiniband_verbs");
if (IS_ERR(uverbs_class)) {
ret = PTR_ERR(uverbs_class);
- printk(KERN_ERR "user_verbs: couldn't create class infiniband_verbs\n");
+ pr_err("user_verbs: couldn't create class infiniband_verbs\n");
goto out_chrdev;
}
@@ -1294,13 +1297,13 @@ static int __init ib_uverbs_init(void)
ret = class_create_file(uverbs_class, &class_attr_abi_version.attr);
if (ret) {
- printk(KERN_ERR "user_verbs: couldn't create abi_version attribute\n");
+ pr_err("user_verbs: couldn't create abi_version attribute\n");
goto out_class;
}
ret = ib_register_client(&uverbs_client);
if (ret) {
- printk(KERN_ERR "user_verbs: couldn't register client\n");
+ pr_err("user_verbs: couldn't register client\n");
goto out_class;
}
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
index 5af6d024e053..b65b3541e732 100644
--- a/drivers/infiniband/core/verbs.c
+++ b/drivers/infiniband/core/verbs.c
@@ -1551,6 +1551,46 @@ int ib_check_mr_status(struct ib_mr *mr, u32 check_mask,
}
EXPORT_SYMBOL(ib_check_mr_status);
+int ib_set_vf_link_state(struct ib_device *device, int vf, u8 port,
+ int state)
+{
+ if (!device->set_vf_link_state)
+ return -ENOSYS;
+
+ return device->set_vf_link_state(device, vf, port, state);
+}
+EXPORT_SYMBOL(ib_set_vf_link_state);
+
+int ib_get_vf_config(struct ib_device *device, int vf, u8 port,
+ struct ifla_vf_info *info)
+{
+ if (!device->get_vf_config)
+ return -ENOSYS;
+
+ return device->get_vf_config(device, vf, port, info);
+}
+EXPORT_SYMBOL(ib_get_vf_config);
+
+int ib_get_vf_stats(struct ib_device *device, int vf, u8 port,
+ struct ifla_vf_stats *stats)
+{
+ if (!device->get_vf_stats)
+ return -ENOSYS;
+
+ return device->get_vf_stats(device, vf, port, stats);
+}
+EXPORT_SYMBOL(ib_get_vf_stats);
+
+int ib_set_vf_guid(struct ib_device *device, int vf, u8 port, u64 guid,
+ int type)
+{
+ if (!device->set_vf_guid)
+ return -ENOSYS;
+
+ return device->set_vf_guid(device, vf, port, guid, type);
+}
+EXPORT_SYMBOL(ib_set_vf_guid);
+
/**
* ib_map_mr_sg() - Map the largest prefix of a dma mapped SG list
* and set it the memory region.
@@ -1567,6 +1607,8 @@ EXPORT_SYMBOL(ib_check_mr_status);
* - The last sg element is allowed to have length less than page_size.
* - If sg_nents total byte length exceeds the mr max_num_sge * page_size
* then only max_num_sg entries will be mapped.
+ * - If the MR was allocated with type IB_MR_TYPE_SG_GAPS_REG, non of these
+ * constraints holds and the page_size argument is ignored.
*
* Returns the number of sg elements that were mapped to the memory region.
*
@@ -1657,3 +1699,168 @@ next_page:
return i;
}
EXPORT_SYMBOL(ib_sg_to_pages);
+
+struct ib_drain_cqe {
+ struct ib_cqe cqe;
+ struct completion done;
+};
+
+static void ib_drain_qp_done(struct ib_cq *cq, struct ib_wc *wc)
+{
+ struct ib_drain_cqe *cqe = container_of(wc->wr_cqe, struct ib_drain_cqe,
+ cqe);
+
+ complete(&cqe->done);
+}
+
+/*
+ * Post a WR and block until its completion is reaped for the SQ.
+ */
+static void __ib_drain_sq(struct ib_qp *qp)
+{
+ struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
+ struct ib_drain_cqe sdrain;
+ struct ib_send_wr swr = {}, *bad_swr;
+ int ret;
+
+ if (qp->send_cq->poll_ctx == IB_POLL_DIRECT) {
+ WARN_ONCE(qp->send_cq->poll_ctx == IB_POLL_DIRECT,
+ "IB_POLL_DIRECT poll_ctx not supported for drain\n");
+ return;
+ }
+
+ swr.wr_cqe = &sdrain.cqe;
+ sdrain.cqe.done = ib_drain_qp_done;
+ init_completion(&sdrain.done);
+
+ ret = ib_modify_qp(qp, &attr, IB_QP_STATE);
+ if (ret) {
+ WARN_ONCE(ret, "failed to drain send queue: %d\n", ret);
+ return;
+ }
+
+ ret = ib_post_send(qp, &swr, &bad_swr);
+ if (ret) {
+ WARN_ONCE(ret, "failed to drain send queue: %d\n", ret);
+ return;
+ }
+
+ wait_for_completion(&sdrain.done);
+}
+
+/*
+ * Post a WR and block until its completion is reaped for the RQ.
+ */
+static void __ib_drain_rq(struct ib_qp *qp)
+{
+ struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
+ struct ib_drain_cqe rdrain;
+ struct ib_recv_wr rwr = {}, *bad_rwr;
+ int ret;
+
+ if (qp->recv_cq->poll_ctx == IB_POLL_DIRECT) {
+ WARN_ONCE(qp->recv_cq->poll_ctx == IB_POLL_DIRECT,
+ "IB_POLL_DIRECT poll_ctx not supported for drain\n");
+ return;
+ }
+
+ rwr.wr_cqe = &rdrain.cqe;
+ rdrain.cqe.done = ib_drain_qp_done;
+ init_completion(&rdrain.done);
+
+ ret = ib_modify_qp(qp, &attr, IB_QP_STATE);
+ if (ret) {
+ WARN_ONCE(ret, "failed to drain recv queue: %d\n", ret);
+ return;
+ }
+
+ ret = ib_post_recv(qp, &rwr, &bad_rwr);
+ if (ret) {
+ WARN_ONCE(ret, "failed to drain recv queue: %d\n", ret);
+ return;
+ }
+
+ wait_for_completion(&rdrain.done);
+}
+
+/**
+ * ib_drain_sq() - Block until all SQ CQEs have been consumed by the
+ * application.
+ * @qp: queue pair to drain
+ *
+ * If the device has a provider-specific drain function, then
+ * call that. Otherwise call the generic drain function
+ * __ib_drain_sq().
+ *
+ * The caller must:
+ *
+ * ensure there is room in the CQ and SQ for the drain work request and
+ * completion.
+ *
+ * allocate the CQ using ib_alloc_cq() and the CQ poll context cannot be
+ * IB_POLL_DIRECT.
+ *
+ * ensure that there are no other contexts that are posting WRs concurrently.
+ * Otherwise the drain is not guaranteed.
+ */
+void ib_drain_sq(struct ib_qp *qp)
+{
+ if (qp->device->drain_sq)
+ qp->device->drain_sq(qp);
+ else
+ __ib_drain_sq(qp);
+}
+EXPORT_SYMBOL(ib_drain_sq);
+
+/**
+ * ib_drain_rq() - Block until all RQ CQEs have been consumed by the
+ * application.
+ * @qp: queue pair to drain
+ *
+ * If the device has a provider-specific drain function, then
+ * call that. Otherwise call the generic drain function
+ * __ib_drain_rq().
+ *
+ * The caller must:
+ *
+ * ensure there is room in the CQ and RQ for the drain work request and
+ * completion.
+ *
+ * allocate the CQ using ib_alloc_cq() and the CQ poll context cannot be
+ * IB_POLL_DIRECT.
+ *
+ * ensure that there are no other contexts that are posting WRs concurrently.
+ * Otherwise the drain is not guaranteed.
+ */
+void ib_drain_rq(struct ib_qp *qp)
+{
+ if (qp->device->drain_rq)
+ qp->device->drain_rq(qp);
+ else
+ __ib_drain_rq(qp);
+}
+EXPORT_SYMBOL(ib_drain_rq);
+
+/**
+ * ib_drain_qp() - Block until all CQEs have been consumed by the
+ * application on both the RQ and SQ.
+ * @qp: queue pair to drain
+ *
+ * The caller must:
+ *
+ * ensure there is room in the CQ(s), SQ, and RQ for drain work requests
+ * and completions.
+ *
+ * allocate the CQs using ib_alloc_cq() and the CQ poll context cannot be
+ * IB_POLL_DIRECT.
+ *
+ * ensure that there are no other contexts that are posting WRs concurrently.
+ * Otherwise the drain is not guaranteed.
+ */
+void ib_drain_qp(struct ib_qp *qp)
+{
+ ib_drain_sq(qp);
+ if (!qp->srq)
+ ib_drain_rq(qp);
+}
+EXPORT_SYMBOL(ib_drain_qp);
diff --git a/drivers/infiniband/hw/Makefile b/drivers/infiniband/hw/Makefile
index aded2a5cc2d5..c7ad0a4c8b15 100644
--- a/drivers/infiniband/hw/Makefile
+++ b/drivers/infiniband/hw/Makefile
@@ -2,6 +2,7 @@ obj-$(CONFIG_INFINIBAND_MTHCA) += mthca/
obj-$(CONFIG_INFINIBAND_QIB) += qib/
obj-$(CONFIG_INFINIBAND_CXGB3) += cxgb3/
obj-$(CONFIG_INFINIBAND_CXGB4) += cxgb4/
+obj-$(CONFIG_INFINIBAND_I40IW) += i40iw/
obj-$(CONFIG_MLX4_INFINIBAND) += mlx4/
obj-$(CONFIG_MLX5_INFINIBAND) += mlx5/
obj-$(CONFIG_INFINIBAND_NES) += nes/
diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c
index f504ba73e5dc..d403231a4aff 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_cm.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c
@@ -1877,7 +1877,7 @@ err:
static int is_loopback_dst(struct iw_cm_id *cm_id)
{
struct net_device *dev;
- struct sockaddr_in *raddr = (struct sockaddr_in *)&cm_id->remote_addr;
+ struct sockaddr_in *raddr = (struct sockaddr_in *)&cm_id->m_remote_addr;
dev = ip_dev_find(&init_net, raddr->sin_addr.s_addr);
if (!dev)
@@ -1892,10 +1892,10 @@ int iwch_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
struct iwch_ep *ep;
struct rtable *rt;
int err = 0;
- struct sockaddr_in *laddr = (struct sockaddr_in *)&cm_id->local_addr;
- struct sockaddr_in *raddr = (struct sockaddr_in *)&cm_id->remote_addr;
+ struct sockaddr_in *laddr = (struct sockaddr_in *)&cm_id->m_local_addr;
+ struct sockaddr_in *raddr = (struct sockaddr_in *)&cm_id->m_remote_addr;
- if (cm_id->remote_addr.ss_family != PF_INET) {
+ if (cm_id->m_remote_addr.ss_family != PF_INET) {
err = -ENOSYS;
goto out;
}
@@ -1961,9 +1961,9 @@ int iwch_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
state_set(&ep->com, CONNECTING);
ep->tos = IPTOS_LOWDELAY;
- memcpy(&ep->com.local_addr, &cm_id->local_addr,
+ memcpy(&ep->com.local_addr, &cm_id->m_local_addr,
sizeof(ep->com.local_addr));
- memcpy(&ep->com.remote_addr, &cm_id->remote_addr,
+ memcpy(&ep->com.remote_addr, &cm_id->m_remote_addr,
sizeof(ep->com.remote_addr));
/* send connect request to rnic */
@@ -1992,7 +1992,7 @@ int iwch_create_listen(struct iw_cm_id *cm_id, int backlog)
might_sleep();
- if (cm_id->local_addr.ss_family != PF_INET) {
+ if (cm_id->m_local_addr.ss_family != PF_INET) {
err = -ENOSYS;
goto fail1;
}
@@ -2008,7 +2008,7 @@ int iwch_create_listen(struct iw_cm_id *cm_id, int backlog)
cm_id->add_ref(cm_id);
ep->com.cm_id = cm_id;
ep->backlog = backlog;
- memcpy(&ep->com.local_addr, &cm_id->local_addr,
+ memcpy(&ep->com.local_addr, &cm_id->m_local_addr,
sizeof(ep->com.local_addr));
/*
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c
index 2734820d291b..3234a8be16f6 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_provider.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c
@@ -657,7 +657,8 @@ err:
return ERR_PTR(err);
}
-static struct ib_mw *iwch_alloc_mw(struct ib_pd *pd, enum ib_mw_type type)
+static struct ib_mw *iwch_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
+ struct ib_udata *udata)
{
struct iwch_dev *rhp;
struct iwch_pd *php;
@@ -1389,6 +1390,8 @@ int iwch_register_device(struct iwch_dev *dev)
dev->ibdev.iwcm->add_ref = iwch_qp_add_ref;
dev->ibdev.iwcm->rem_ref = iwch_qp_rem_ref;
dev->ibdev.iwcm->get_qp = iwch_get_qp;
+ memcpy(dev->ibdev.iwcm->ifname, dev->rdev.t3cdev_p->lldev->name,
+ sizeof(dev->ibdev.iwcm->ifname));
ret = ib_register_device(&dev->ibdev, NULL);
if (ret)
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index cd2ff5f9518a..651711370d55 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -302,7 +302,7 @@ void _c4iw_free_ep(struct kref *kref)
if (ep->com.remote_addr.ss_family == AF_INET6) {
struct sockaddr_in6 *sin6 =
(struct sockaddr_in6 *)
- &ep->com.mapped_local_addr;
+ &ep->com.local_addr;
cxgb4_clip_release(
ep->com.dev->rdev.lldi.ports[0],
@@ -314,12 +314,6 @@ void _c4iw_free_ep(struct kref *kref)
dst_release(ep->dst);
cxgb4_l2t_release(ep->l2t);
}
- if (test_bit(RELEASE_MAPINFO, &ep->com.flags)) {
- print_addr(&ep->com, __func__, "remove_mapinfo/mapping");
- iwpm_remove_mapinfo(&ep->com.local_addr,
- &ep->com.mapped_local_addr);
- iwpm_remove_mapping(&ep->com.local_addr, RDMA_NL_C4IW);
- }
kfree(ep);
}
@@ -455,7 +449,7 @@ static void act_open_req_arp_failure(void *handle, struct sk_buff *skb)
state_set(&ep->com, DEAD);
if (ep->com.remote_addr.ss_family == AF_INET6) {
struct sockaddr_in6 *sin6 =
- (struct sockaddr_in6 *)&ep->com.mapped_local_addr;
+ (struct sockaddr_in6 *)&ep->com.local_addr;
cxgb4_clip_release(ep->com.dev->rdev.lldi.ports[0],
(const u32 *)&sin6->sin6_addr.s6_addr, 1);
}
@@ -485,12 +479,19 @@ static void send_flowc(struct c4iw_ep *ep, struct sk_buff *skb)
unsigned int flowclen = 80;
struct fw_flowc_wr *flowc;
int i;
+ u16 vlan = ep->l2t->vlan;
+ int nparams;
+
+ if (vlan == CPL_L2T_VLAN_NONE)
+ nparams = 8;
+ else
+ nparams = 9;
skb = get_skb(skb, flowclen, GFP_KERNEL);
flowc = (struct fw_flowc_wr *)__skb_put(skb, flowclen);
flowc->op_to_nparams = cpu_to_be32(FW_WR_OP_V(FW_FLOWC_WR) |
- FW_FLOWC_WR_NPARAMS_V(8));
+ FW_FLOWC_WR_NPARAMS_V(nparams));
flowc->flowid_len16 = cpu_to_be32(FW_WR_LEN16_V(DIV_ROUND_UP(flowclen,
16)) | FW_WR_FLOWID_V(ep->hwtid));
@@ -511,9 +512,17 @@ static void send_flowc(struct c4iw_ep *ep, struct sk_buff *skb)
flowc->mnemval[6].val = cpu_to_be32(ep->snd_win);
flowc->mnemval[7].mnemonic = FW_FLOWC_MNEM_MSS;
flowc->mnemval[7].val = cpu_to_be32(ep->emss);
- /* Pad WR to 16 byte boundary */
- flowc->mnemval[8].mnemonic = 0;
- flowc->mnemval[8].val = 0;
+ if (nparams == 9) {
+ u16 pri;
+
+ pri = (vlan & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
+ flowc->mnemval[8].mnemonic = FW_FLOWC_MNEM_SCHEDCLASS;
+ flowc->mnemval[8].val = cpu_to_be32(pri);
+ } else {
+ /* Pad WR to 16 byte boundary */
+ flowc->mnemval[8].mnemonic = 0;
+ flowc->mnemval[8].val = 0;
+ }
for (i = 0; i < 9; i++) {
flowc->mnemval[i].r4[0] = 0;
flowc->mnemval[i].r4[1] = 0;
@@ -568,54 +577,6 @@ static int send_abort(struct c4iw_ep *ep, struct sk_buff *skb, gfp_t gfp)
return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
}
-/*
- * c4iw_form_pm_msg - Form a port mapper message with mapping info
- */
-static void c4iw_form_pm_msg(struct c4iw_ep *ep,
- struct iwpm_sa_data *pm_msg)
-{
- memcpy(&pm_msg->loc_addr, &ep->com.local_addr,
- sizeof(ep->com.local_addr));
- memcpy(&pm_msg->rem_addr, &ep->com.remote_addr,
- sizeof(ep->com.remote_addr));
-}
-
-/*
- * c4iw_form_reg_msg - Form a port mapper message with dev info
- */
-static void c4iw_form_reg_msg(struct c4iw_dev *dev,
- struct iwpm_dev_data *pm_msg)
-{
- memcpy(pm_msg->dev_name, dev->ibdev.name, IWPM_DEVNAME_SIZE);
- memcpy(pm_msg->if_name, dev->rdev.lldi.ports[0]->name,
- IWPM_IFNAME_SIZE);
-}
-
-static void c4iw_record_pm_msg(struct c4iw_ep *ep,
- struct iwpm_sa_data *pm_msg)
-{
- memcpy(&ep->com.mapped_local_addr, &pm_msg->mapped_loc_addr,
- sizeof(ep->com.mapped_local_addr));
- memcpy(&ep->com.mapped_remote_addr, &pm_msg->mapped_rem_addr,
- sizeof(ep->com.mapped_remote_addr));
-}
-
-static int get_remote_addr(struct c4iw_ep *parent_ep, struct c4iw_ep *child_ep)
-{
- int ret;
-
- print_addr(&parent_ep->com, __func__, "get_remote_addr parent_ep ");
- print_addr(&child_ep->com, __func__, "get_remote_addr child_ep ");
-
- ret = iwpm_get_remote_info(&parent_ep->com.mapped_local_addr,
- &child_ep->com.mapped_remote_addr,
- &child_ep->com.remote_addr, RDMA_NL_C4IW);
- if (ret)
- PDBG("Unable to find remote peer addr info - err %d\n", ret);
-
- return ret;
-}
-
static void best_mtu(const unsigned short *mtus, unsigned short mtu,
unsigned int *idx, int use_ts, int ipv6)
{
@@ -645,13 +606,13 @@ static int send_connect(struct c4iw_ep *ep)
int wscale;
int win, sizev4, sizev6, wrlen;
struct sockaddr_in *la = (struct sockaddr_in *)
- &ep->com.mapped_local_addr;
+ &ep->com.local_addr;
struct sockaddr_in *ra = (struct sockaddr_in *)
- &ep->com.mapped_remote_addr;
+ &ep->com.remote_addr;
struct sockaddr_in6 *la6 = (struct sockaddr_in6 *)
- &ep->com.mapped_local_addr;
+ &ep->com.local_addr;
struct sockaddr_in6 *ra6 = (struct sockaddr_in6 *)
- &ep->com.mapped_remote_addr;
+ &ep->com.remote_addr;
int ret;
enum chip_type adapter_type = ep->com.dev->rdev.lldi.adapter_type;
u32 isn = (prandom_u32() & ~7UL) - 1;
@@ -710,7 +671,7 @@ static int send_connect(struct c4iw_ep *ep)
L2T_IDX_V(ep->l2t->idx) |
TX_CHAN_V(ep->tx_chan) |
SMAC_SEL_V(ep->smac_idx) |
- DSCP_V(ep->tos) |
+ DSCP_V(ep->tos >> 2) |
ULP_MODE_V(ULP_MODE_TCPDDP) |
RCV_BUFSIZ_V(win);
opt2 = RX_CHANNEL_V(0) |
@@ -1829,10 +1790,10 @@ static void send_fw_act_open_req(struct c4iw_ep *ep, unsigned int atid)
req->le.filter = cpu_to_be32(cxgb4_select_ntuple(
ep->com.dev->rdev.lldi.ports[0],
ep->l2t));
- sin = (struct sockaddr_in *)&ep->com.mapped_local_addr;
+ sin = (struct sockaddr_in *)&ep->com.local_addr;
req->le.lport = sin->sin_port;
req->le.u.ipv4.lip = sin->sin_addr.s_addr;
- sin = (struct sockaddr_in *)&ep->com.mapped_remote_addr;
+ sin = (struct sockaddr_in *)&ep->com.remote_addr;
req->le.pport = sin->sin_port;
req->le.u.ipv4.pip = sin->sin_addr.s_addr;
req->tcb.t_state_to_astid =
@@ -1864,7 +1825,7 @@ static void send_fw_act_open_req(struct c4iw_ep *ep, unsigned int atid)
L2T_IDX_V(ep->l2t->idx) |
TX_CHAN_V(ep->tx_chan) |
SMAC_SEL_V(ep->smac_idx) |
- DSCP_V(ep->tos) |
+ DSCP_V(ep->tos >> 2) |
ULP_MODE_V(ULP_MODE_TCPDDP) |
RCV_BUFSIZ_V(win));
req->tcb.opt2 = (__force __be32) (PACE_V(1) |
@@ -1928,7 +1889,7 @@ static void set_tcp_window(struct c4iw_ep *ep, struct port_info *pi)
static int import_ep(struct c4iw_ep *ep, int iptype, __u8 *peer_ip,
struct dst_entry *dst, struct c4iw_dev *cdev,
- bool clear_mpa_v1, enum chip_type adapter_type)
+ bool clear_mpa_v1, enum chip_type adapter_type, u8 tos)
{
struct neighbour *n;
int err, step;
@@ -1958,7 +1919,7 @@ static int import_ep(struct c4iw_ep *ep, int iptype, __u8 *peer_ip,
goto out;
}
ep->l2t = cxgb4_l2t_get(cdev->rdev.lldi.l2t,
- n, pdev, 0);
+ n, pdev, rt_tos2priority(tos));
if (!ep->l2t)
goto out;
ep->mtu = pdev->mtu;
@@ -2013,13 +1974,13 @@ static int c4iw_reconnect(struct c4iw_ep *ep)
{
int err = 0;
struct sockaddr_in *laddr = (struct sockaddr_in *)
- &ep->com.cm_id->local_addr;
+ &ep->com.cm_id->m_local_addr;
struct sockaddr_in *raddr = (struct sockaddr_in *)
- &ep->com.cm_id->remote_addr;
+ &ep->com.cm_id->m_remote_addr;
struct sockaddr_in6 *laddr6 = (struct sockaddr_in6 *)
- &ep->com.cm_id->local_addr;
+ &ep->com.cm_id->m_local_addr;
struct sockaddr_in6 *raddr6 = (struct sockaddr_in6 *)
- &ep->com.cm_id->remote_addr;
+ &ep->com.cm_id->m_remote_addr;
int iptype;
__u8 *ra;
@@ -2038,10 +1999,10 @@ static int c4iw_reconnect(struct c4iw_ep *ep)
insert_handle(ep->com.dev, &ep->com.dev->atid_idr, ep, ep->atid);
/* find a route */
- if (ep->com.cm_id->local_addr.ss_family == AF_INET) {
+ if (ep->com.cm_id->m_local_addr.ss_family == AF_INET) {
ep->dst = find_route(ep->com.dev, laddr->sin_addr.s_addr,
raddr->sin_addr.s_addr, laddr->sin_port,
- raddr->sin_port, 0);
+ raddr->sin_port, ep->com.cm_id->tos);
iptype = 4;
ra = (__u8 *)&raddr->sin_addr;
} else {
@@ -2058,7 +2019,8 @@ static int c4iw_reconnect(struct c4iw_ep *ep)
goto fail3;
}
err = import_ep(ep, iptype, ra, ep->dst, ep->com.dev, false,
- ep->com.dev->rdev.lldi.adapter_type);
+ ep->com.dev->rdev.lldi.adapter_type,
+ ep->com.cm_id->tos);
if (err) {
pr_err("%s - cannot alloc l2e.\n", __func__);
goto fail4;
@@ -2069,7 +2031,7 @@ static int c4iw_reconnect(struct c4iw_ep *ep)
ep->l2t->idx);
state_set(&ep->com, CONNECTING);
- ep->tos = 0;
+ ep->tos = ep->com.cm_id->tos;
/* send connect request to rnic */
err = send_connect(ep);
@@ -2109,10 +2071,10 @@ static int act_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
struct sockaddr_in6 *ra6;
ep = lookup_atid(t, atid);
- la = (struct sockaddr_in *)&ep->com.mapped_local_addr;
- ra = (struct sockaddr_in *)&ep->com.mapped_remote_addr;
- la6 = (struct sockaddr_in6 *)&ep->com.mapped_local_addr;
- ra6 = (struct sockaddr_in6 *)&ep->com.mapped_remote_addr;
+ la = (struct sockaddr_in *)&ep->com.local_addr;
+ ra = (struct sockaddr_in *)&ep->com.remote_addr;
+ la6 = (struct sockaddr_in6 *)&ep->com.local_addr;
+ ra6 = (struct sockaddr_in6 *)&ep->com.remote_addr;
PDBG("%s ep %p atid %u status %u errno %d\n", __func__, ep, atid,
status, status2errno(status));
@@ -2154,7 +2116,7 @@ static int act_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
if (ep->com.remote_addr.ss_family == AF_INET6) {
struct sockaddr_in6 *sin6 =
(struct sockaddr_in6 *)
- &ep->com.mapped_local_addr;
+ &ep->com.local_addr;
cxgb4_clip_release(
ep->com.dev->rdev.lldi.ports[0],
(const u32 *)
@@ -2189,7 +2151,7 @@ static int act_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
if (ep->com.remote_addr.ss_family == AF_INET6) {
struct sockaddr_in6 *sin6 =
- (struct sockaddr_in6 *)&ep->com.mapped_local_addr;
+ (struct sockaddr_in6 *)&ep->com.local_addr;
cxgb4_clip_release(ep->com.dev->rdev.lldi.ports[0],
(const u32 *)&sin6->sin6_addr.s6_addr, 1);
}
@@ -2391,6 +2353,7 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
u16 peer_mss = ntohs(req->tcpopt.mss);
int iptype;
unsigned short hdrs;
+ u8 tos = PASS_OPEN_TOS_G(ntohl(req->tos_stid));
parent_ep = lookup_stid(t, stid);
if (!parent_ep) {
@@ -2399,8 +2362,7 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
}
if (state_read(&parent_ep->com) != LISTEN) {
- printk(KERN_ERR "%s - listening ep not in LISTEN\n",
- __func__);
+ PDBG("%s - listening ep not in LISTEN\n", __func__);
goto reject;
}
@@ -2415,7 +2377,7 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
ntohs(peer_port), peer_mss);
dst = find_route(dev, *(__be32 *)local_ip, *(__be32 *)peer_ip,
local_port, peer_port,
- PASS_OPEN_TOS_G(ntohl(req->tos_stid)));
+ tos);
} else {
PDBG("%s parent ep %p hwtid %u laddr %pI6 raddr %pI6 lport %d rport %d peer_mss %d\n"
, __func__, parent_ep, hwtid,
@@ -2441,7 +2403,7 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
}
err = import_ep(child_ep, iptype, peer_ip, dst, dev, false,
- parent_ep->com.dev->rdev.lldi.adapter_type);
+ parent_ep->com.dev->rdev.lldi.adapter_type, tos);
if (err) {
printk(KERN_ERR MOD "%s - failed to allocate l2t entry!\n",
__func__);
@@ -2459,18 +2421,9 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
child_ep->com.dev = dev;
child_ep->com.cm_id = NULL;
- /*
- * The mapped_local and mapped_remote addresses get setup with
- * the actual 4-tuple. The local address will be based on the
- * actual local address of the connection, but on the port number
- * of the parent listening endpoint. The remote address is
- * setup based on a query to the IWPM since we don't know what it
- * originally was before mapping. If no mapping was done, then
- * mapped_remote == remote, and mapped_local == local.
- */
if (iptype == 4) {
struct sockaddr_in *sin = (struct sockaddr_in *)
- &child_ep->com.mapped_local_addr;
+ &child_ep->com.local_addr;
sin->sin_family = PF_INET;
sin->sin_port = local_port;
@@ -2482,12 +2435,12 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
&parent_ep->com.local_addr)->sin_port;
sin->sin_addr.s_addr = *(__be32 *)local_ip;
- sin = (struct sockaddr_in *)&child_ep->com.mapped_remote_addr;
+ sin = (struct sockaddr_in *)&child_ep->com.remote_addr;
sin->sin_family = PF_INET;
sin->sin_port = peer_port;
sin->sin_addr.s_addr = *(__be32 *)peer_ip;
} else {
- sin6 = (struct sockaddr_in6 *)&child_ep->com.mapped_local_addr;
+ sin6 = (struct sockaddr_in6 *)&child_ep->com.local_addr;
sin6->sin6_family = PF_INET6;
sin6->sin6_port = local_port;
memcpy(sin6->sin6_addr.s6_addr, local_ip, 16);
@@ -2498,18 +2451,15 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
&parent_ep->com.local_addr)->sin6_port;
memcpy(sin6->sin6_addr.s6_addr, local_ip, 16);
- sin6 = (struct sockaddr_in6 *)&child_ep->com.mapped_remote_addr;
+ sin6 = (struct sockaddr_in6 *)&child_ep->com.remote_addr;
sin6->sin6_family = PF_INET6;
sin6->sin6_port = peer_port;
memcpy(sin6->sin6_addr.s6_addr, peer_ip, 16);
}
- memcpy(&child_ep->com.remote_addr, &child_ep->com.mapped_remote_addr,
- sizeof(child_ep->com.remote_addr));
- get_remote_addr(parent_ep, child_ep);
c4iw_get_ep(&parent_ep->com);
child_ep->parent_ep = parent_ep;
- child_ep->tos = PASS_OPEN_TOS_G(ntohl(req->tos_stid));
+ child_ep->tos = tos;
child_ep->dst = dst;
child_ep->hwtid = hwtid;
@@ -2522,7 +2472,7 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
accept_cr(child_ep, skb, req);
set_bit(PASS_ACCEPT_REQ, &child_ep->com.history);
if (iptype == 6) {
- sin6 = (struct sockaddr_in6 *)&child_ep->com.mapped_local_addr;
+ sin6 = (struct sockaddr_in6 *)&child_ep->com.local_addr;
cxgb4_clip_get(child_ep->com.dev->rdev.lldi.ports[0],
(const u32 *)&sin6->sin6_addr.s6_addr, 1);
}
@@ -2765,7 +2715,7 @@ out:
if (ep->com.remote_addr.ss_family == AF_INET6) {
struct sockaddr_in6 *sin6 =
(struct sockaddr_in6 *)
- &ep->com.mapped_local_addr;
+ &ep->com.local_addr;
cxgb4_clip_release(
ep->com.dev->rdev.lldi.ports[0],
(const u32 *)&sin6->sin6_addr.s6_addr,
@@ -3026,8 +2976,8 @@ static int pick_local_ipaddrs(struct c4iw_dev *dev, struct iw_cm_id *cm_id)
{
struct in_device *ind;
int found = 0;
- struct sockaddr_in *laddr = (struct sockaddr_in *)&cm_id->local_addr;
- struct sockaddr_in *raddr = (struct sockaddr_in *)&cm_id->remote_addr;
+ struct sockaddr_in *laddr = (struct sockaddr_in *)&cm_id->m_local_addr;
+ struct sockaddr_in *raddr = (struct sockaddr_in *)&cm_id->m_remote_addr;
ind = in_dev_get(dev->rdev.lldi.ports[0]);
if (!ind)
@@ -3072,8 +3022,8 @@ static int get_lladdr(struct net_device *dev, struct in6_addr *addr,
static int pick_local_ip6addrs(struct c4iw_dev *dev, struct iw_cm_id *cm_id)
{
struct in6_addr uninitialized_var(addr);
- struct sockaddr_in6 *la6 = (struct sockaddr_in6 *)&cm_id->local_addr;
- struct sockaddr_in6 *ra6 = (struct sockaddr_in6 *)&cm_id->remote_addr;
+ struct sockaddr_in6 *la6 = (struct sockaddr_in6 *)&cm_id->m_local_addr;
+ struct sockaddr_in6 *ra6 = (struct sockaddr_in6 *)&cm_id->m_remote_addr;
if (!get_lladdr(dev->rdev.lldi.ports[0], &addr, IFA_F_TENTATIVE)) {
memcpy(la6->sin6_addr.s6_addr, &addr, 16);
@@ -3092,11 +3042,8 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
struct sockaddr_in *raddr;
struct sockaddr_in6 *laddr6;
struct sockaddr_in6 *raddr6;
- struct iwpm_dev_data pm_reg_msg;
- struct iwpm_sa_data pm_msg;
__u8 *ra;
int iptype;
- int iwpm_err = 0;
if ((conn_param->ord > cur_max_read_depth(dev)) ||
(conn_param->ird > cur_max_read_depth(dev))) {
@@ -3144,47 +3091,17 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
}
insert_handle(dev, &dev->atid_idr, ep, ep->atid);
- memcpy(&ep->com.local_addr, &cm_id->local_addr,
+ memcpy(&ep->com.local_addr, &cm_id->m_local_addr,
sizeof(ep->com.local_addr));
- memcpy(&ep->com.remote_addr, &cm_id->remote_addr,
+ memcpy(&ep->com.remote_addr, &cm_id->m_remote_addr,
sizeof(ep->com.remote_addr));
- /* No port mapper available, go with the specified peer information */
- memcpy(&ep->com.mapped_local_addr, &cm_id->local_addr,
- sizeof(ep->com.mapped_local_addr));
- memcpy(&ep->com.mapped_remote_addr, &cm_id->remote_addr,
- sizeof(ep->com.mapped_remote_addr));
-
- c4iw_form_reg_msg(dev, &pm_reg_msg);
- iwpm_err = iwpm_register_pid(&pm_reg_msg, RDMA_NL_C4IW);
- if (iwpm_err) {
- PDBG("%s: Port Mapper reg pid fail (err = %d).\n",
- __func__, iwpm_err);
- }
- if (iwpm_valid_pid() && !iwpm_err) {
- c4iw_form_pm_msg(ep, &pm_msg);
- iwpm_err = iwpm_add_and_query_mapping(&pm_msg, RDMA_NL_C4IW);
- if (iwpm_err)
- PDBG("%s: Port Mapper query fail (err = %d).\n",
- __func__, iwpm_err);
- else
- c4iw_record_pm_msg(ep, &pm_msg);
- }
- if (iwpm_create_mapinfo(&ep->com.local_addr,
- &ep->com.mapped_local_addr, RDMA_NL_C4IW)) {
- iwpm_remove_mapping(&ep->com.local_addr, RDMA_NL_C4IW);
- err = -ENOMEM;
- goto fail1;
- }
- print_addr(&ep->com, __func__, "add_query/create_mapinfo");
- set_bit(RELEASE_MAPINFO, &ep->com.flags);
-
- laddr = (struct sockaddr_in *)&ep->com.mapped_local_addr;
- raddr = (struct sockaddr_in *)&ep->com.mapped_remote_addr;
- laddr6 = (struct sockaddr_in6 *)&ep->com.mapped_local_addr;
- raddr6 = (struct sockaddr_in6 *) &ep->com.mapped_remote_addr;
+ laddr = (struct sockaddr_in *)&ep->com.local_addr;
+ raddr = (struct sockaddr_in *)&ep->com.remote_addr;
+ laddr6 = (struct sockaddr_in6 *)&ep->com.local_addr;
+ raddr6 = (struct sockaddr_in6 *) &ep->com.remote_addr;
- if (cm_id->remote_addr.ss_family == AF_INET) {
+ if (cm_id->m_remote_addr.ss_family == AF_INET) {
iptype = 4;
ra = (__u8 *)&raddr->sin_addr;
@@ -3203,7 +3120,7 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
ra, ntohs(raddr->sin_port));
ep->dst = find_route(dev, laddr->sin_addr.s_addr,
raddr->sin_addr.s_addr, laddr->sin_port,
- raddr->sin_port, 0);
+ raddr->sin_port, cm_id->tos);
} else {
iptype = 6;
ra = (__u8 *)&raddr6->sin6_addr;
@@ -3234,7 +3151,7 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
}
err = import_ep(ep, iptype, ra, ep->dst, ep->com.dev, true,
- ep->com.dev->rdev.lldi.adapter_type);
+ ep->com.dev->rdev.lldi.adapter_type, cm_id->tos);
if (err) {
printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__);
goto fail3;
@@ -3245,7 +3162,7 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
ep->l2t->idx);
state_set(&ep->com, CONNECTING);
- ep->tos = 0;
+ ep->tos = cm_id->tos;
/* send connect request to rnic */
err = send_connect(ep);
@@ -3269,7 +3186,7 @@ static int create_server6(struct c4iw_dev *dev, struct c4iw_listen_ep *ep)
{
int err;
struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)
- &ep->com.mapped_local_addr;
+ &ep->com.local_addr;
if (ipv6_addr_type(&sin6->sin6_addr) != IPV6_ADDR_ANY) {
err = cxgb4_clip_get(ep->com.dev->rdev.lldi.ports[0],
@@ -3302,7 +3219,7 @@ static int create_server4(struct c4iw_dev *dev, struct c4iw_listen_ep *ep)
{
int err;
struct sockaddr_in *sin = (struct sockaddr_in *)
- &ep->com.mapped_local_addr;
+ &ep->com.local_addr;
if (dev->rdev.lldi.enable_fw_ofld_conn) {
do {
@@ -3343,9 +3260,6 @@ int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog)
int err = 0;
struct c4iw_dev *dev = to_c4iw_dev(cm_id->device);
struct c4iw_listen_ep *ep;
- struct iwpm_dev_data pm_reg_msg;
- struct iwpm_sa_data pm_msg;
- int iwpm_err = 0;
might_sleep();
@@ -3360,7 +3274,7 @@ int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog)
ep->com.cm_id = cm_id;
ep->com.dev = dev;
ep->backlog = backlog;
- memcpy(&ep->com.local_addr, &cm_id->local_addr,
+ memcpy(&ep->com.local_addr, &cm_id->m_local_addr,
sizeof(ep->com.local_addr));
/*
@@ -3369,10 +3283,10 @@ int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog)
if (dev->rdev.lldi.enable_fw_ofld_conn &&
ep->com.local_addr.ss_family == AF_INET)
ep->stid = cxgb4_alloc_sftid(dev->rdev.lldi.tids,
- cm_id->local_addr.ss_family, ep);
+ cm_id->m_local_addr.ss_family, ep);
else
ep->stid = cxgb4_alloc_stid(dev->rdev.lldi.tids,
- cm_id->local_addr.ss_family, ep);
+ cm_id->m_local_addr.ss_family, ep);
if (ep->stid == -1) {
printk(KERN_ERR MOD "%s - cannot alloc stid.\n", __func__);
@@ -3381,36 +3295,9 @@ int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog)
}
insert_handle(dev, &dev->stid_idr, ep, ep->stid);
- /* No port mapper available, go with the specified info */
- memcpy(&ep->com.mapped_local_addr, &cm_id->local_addr,
- sizeof(ep->com.mapped_local_addr));
-
- c4iw_form_reg_msg(dev, &pm_reg_msg);
- iwpm_err = iwpm_register_pid(&pm_reg_msg, RDMA_NL_C4IW);
- if (iwpm_err) {
- PDBG("%s: Port Mapper reg pid fail (err = %d).\n",
- __func__, iwpm_err);
- }
- if (iwpm_valid_pid() && !iwpm_err) {
- memcpy(&pm_msg.loc_addr, &ep->com.local_addr,
- sizeof(ep->com.local_addr));
- iwpm_err = iwpm_add_mapping(&pm_msg, RDMA_NL_C4IW);
- if (iwpm_err)
- PDBG("%s: Port Mapper query fail (err = %d).\n",
- __func__, iwpm_err);
- else
- memcpy(&ep->com.mapped_local_addr,
- &pm_msg.mapped_loc_addr,
- sizeof(ep->com.mapped_local_addr));
- }
- if (iwpm_create_mapinfo(&ep->com.local_addr,
- &ep->com.mapped_local_addr, RDMA_NL_C4IW)) {
- err = -ENOMEM;
- goto fail3;
- }
- print_addr(&ep->com, __func__, "add_mapping/create_mapinfo");
+ memcpy(&ep->com.local_addr, &cm_id->m_local_addr,
+ sizeof(ep->com.local_addr));
- set_bit(RELEASE_MAPINFO, &ep->com.flags);
state_set(&ep->com, LISTEN);
if (ep->com.local_addr.ss_family == AF_INET)
err = create_server4(dev, ep);
@@ -3421,7 +3308,6 @@ int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog)
goto out;
}
-fail3:
cxgb4_free_stid(ep->com.dev->rdev.lldi.tids, ep->stid,
ep->com.local_addr.ss_family);
fail2:
@@ -3456,7 +3342,7 @@ int c4iw_destroy_listen(struct iw_cm_id *cm_id)
goto done;
err = c4iw_wait_for_reply(&ep->com.dev->rdev, &ep->com.wr_wait,
0, 0, __func__);
- sin6 = (struct sockaddr_in6 *)&ep->com.mapped_local_addr;
+ sin6 = (struct sockaddr_in6 *)&ep->com.local_addr;
cxgb4_clip_release(ep->com.dev->rdev.lldi.ports[0],
(const u32 *)&sin6->sin6_addr.s6_addr, 1);
}
@@ -3580,7 +3466,7 @@ static void active_ofld_conn_reply(struct c4iw_dev *dev, struct sk_buff *skb,
state_set(&ep->com, DEAD);
if (ep->com.remote_addr.ss_family == AF_INET6) {
struct sockaddr_in6 *sin6 =
- (struct sockaddr_in6 *)&ep->com.mapped_local_addr;
+ (struct sockaddr_in6 *)&ep->com.local_addr;
cxgb4_clip_release(ep->com.dev->rdev.lldi.ports[0],
(const u32 *)&sin6->sin6_addr.s6_addr, 1);
}
diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c
index cf21df4a8bf5..b0b955724458 100644
--- a/drivers/infiniband/hw/cxgb4/cq.c
+++ b/drivers/infiniband/hw/cxgb4/cq.c
@@ -162,7 +162,7 @@ static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
cq->bar2_va = c4iw_bar2_addrs(rdev, cq->cqid, T4_BAR2_QTYPE_INGRESS,
&cq->bar2_qid,
user ? &cq->bar2_pa : NULL);
- if (user && !cq->bar2_va) {
+ if (user && !cq->bar2_pa) {
pr_warn(MOD "%s: cqid %u not in BAR2 range.\n",
pci_name(rdev->lldi.pdev), cq->cqid);
ret = -EINVAL;
@@ -815,8 +815,15 @@ static int c4iw_poll_cq_one(struct c4iw_cq *chp, struct ib_wc *wc)
}
}
out:
- if (wq)
+ if (wq) {
+ if (unlikely(qhp->attr.state != C4IW_QP_STATE_RTS)) {
+ if (t4_sq_empty(wq))
+ complete(&qhp->sq_drained);
+ if (t4_rq_empty(wq))
+ complete(&qhp->rq_drained);
+ }
spin_unlock(&qhp->lock);
+ }
return ret;
}
diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c
index 8024ea4417b8..ae2e8b23d2dd 100644
--- a/drivers/infiniband/hw/cxgb4/device.c
+++ b/drivers/infiniband/hw/cxgb4/device.c
@@ -87,17 +87,6 @@ struct c4iw_debugfs_data {
int pos;
};
-/* registered cxgb4 netlink callbacks */
-static struct ibnl_client_cbs c4iw_nl_cb_table[] = {
- [RDMA_NL_IWPM_REG_PID] = {.dump = iwpm_register_pid_cb},
- [RDMA_NL_IWPM_ADD_MAPPING] = {.dump = iwpm_add_mapping_cb},
- [RDMA_NL_IWPM_QUERY_MAPPING] = {.dump = iwpm_add_and_query_mapping_cb},
- [RDMA_NL_IWPM_HANDLE_ERR] = {.dump = iwpm_mapping_error_cb},
- [RDMA_NL_IWPM_REMOTE_INFO] = {.dump = iwpm_remote_info_cb},
- [RDMA_NL_IWPM_MAPINFO] = {.dump = iwpm_mapping_info_cb},
- [RDMA_NL_IWPM_MAPINFO_NUM] = {.dump = iwpm_ack_mapping_info_cb}
-};
-
static int count_idrs(int id, void *p, void *data)
{
int *countp = data;
@@ -242,13 +231,13 @@ static int dump_qp(int id, void *p, void *data)
if (qp->ep) {
if (qp->ep->com.local_addr.ss_family == AF_INET) {
struct sockaddr_in *lsin = (struct sockaddr_in *)
- &qp->ep->com.local_addr;
+ &qp->ep->com.cm_id->local_addr;
struct sockaddr_in *rsin = (struct sockaddr_in *)
- &qp->ep->com.remote_addr;
+ &qp->ep->com.cm_id->remote_addr;
struct sockaddr_in *mapped_lsin = (struct sockaddr_in *)
- &qp->ep->com.mapped_local_addr;
+ &qp->ep->com.cm_id->m_local_addr;
struct sockaddr_in *mapped_rsin = (struct sockaddr_in *)
- &qp->ep->com.mapped_remote_addr;
+ &qp->ep->com.cm_id->m_remote_addr;
cc = snprintf(qpd->buf + qpd->pos, space,
"rc qp sq id %u rq id %u state %u "
@@ -264,15 +253,15 @@ static int dump_qp(int id, void *p, void *data)
ntohs(mapped_rsin->sin_port));
} else {
struct sockaddr_in6 *lsin6 = (struct sockaddr_in6 *)
- &qp->ep->com.local_addr;
+ &qp->ep->com.cm_id->local_addr;
struct sockaddr_in6 *rsin6 = (struct sockaddr_in6 *)
- &qp->ep->com.remote_addr;
+ &qp->ep->com.cm_id->remote_addr;
struct sockaddr_in6 *mapped_lsin6 =
(struct sockaddr_in6 *)
- &qp->ep->com.mapped_local_addr;
+ &qp->ep->com.cm_id->m_local_addr;
struct sockaddr_in6 *mapped_rsin6 =
(struct sockaddr_in6 *)
- &qp->ep->com.mapped_remote_addr;
+ &qp->ep->com.cm_id->m_remote_addr;
cc = snprintf(qpd->buf + qpd->pos, space,
"rc qp sq id %u rq id %u state %u "
@@ -545,13 +534,13 @@ static int dump_ep(int id, void *p, void *data)
if (ep->com.local_addr.ss_family == AF_INET) {
struct sockaddr_in *lsin = (struct sockaddr_in *)
- &ep->com.local_addr;
+ &ep->com.cm_id->local_addr;
struct sockaddr_in *rsin = (struct sockaddr_in *)
- &ep->com.remote_addr;
+ &ep->com.cm_id->remote_addr;
struct sockaddr_in *mapped_lsin = (struct sockaddr_in *)
- &ep->com.mapped_local_addr;
+ &ep->com.cm_id->m_local_addr;
struct sockaddr_in *mapped_rsin = (struct sockaddr_in *)
- &ep->com.mapped_remote_addr;
+ &ep->com.cm_id->m_remote_addr;
cc = snprintf(epd->buf + epd->pos, space,
"ep %p cm_id %p qp %p state %d flags 0x%lx "
@@ -569,13 +558,13 @@ static int dump_ep(int id, void *p, void *data)
ntohs(mapped_rsin->sin_port));
} else {
struct sockaddr_in6 *lsin6 = (struct sockaddr_in6 *)
- &ep->com.local_addr;
+ &ep->com.cm_id->local_addr;
struct sockaddr_in6 *rsin6 = (struct sockaddr_in6 *)
- &ep->com.remote_addr;
+ &ep->com.cm_id->remote_addr;
struct sockaddr_in6 *mapped_lsin6 = (struct sockaddr_in6 *)
- &ep->com.mapped_local_addr;
+ &ep->com.cm_id->m_local_addr;
struct sockaddr_in6 *mapped_rsin6 = (struct sockaddr_in6 *)
- &ep->com.mapped_remote_addr;
+ &ep->com.cm_id->m_remote_addr;
cc = snprintf(epd->buf + epd->pos, space,
"ep %p cm_id %p qp %p state %d flags 0x%lx "
@@ -610,9 +599,9 @@ static int dump_listen_ep(int id, void *p, void *data)
if (ep->com.local_addr.ss_family == AF_INET) {
struct sockaddr_in *lsin = (struct sockaddr_in *)
- &ep->com.local_addr;
+ &ep->com.cm_id->local_addr;
struct sockaddr_in *mapped_lsin = (struct sockaddr_in *)
- &ep->com.mapped_local_addr;
+ &ep->com.cm_id->m_local_addr;
cc = snprintf(epd->buf + epd->pos, space,
"ep %p cm_id %p state %d flags 0x%lx stid %d "
@@ -623,9 +612,9 @@ static int dump_listen_ep(int id, void *p, void *data)
ntohs(mapped_lsin->sin_port));
} else {
struct sockaddr_in6 *lsin6 = (struct sockaddr_in6 *)
- &ep->com.local_addr;
+ &ep->com.cm_id->local_addr;
struct sockaddr_in6 *mapped_lsin6 = (struct sockaddr_in6 *)
- &ep->com.mapped_local_addr;
+ &ep->com.cm_id->m_local_addr;
cc = snprintf(epd->buf + epd->pos, space,
"ep %p cm_id %p state %d flags 0x%lx stid %d "
@@ -801,10 +790,9 @@ static int c4iw_rdev_open(struct c4iw_rdev *rdev)
rdev->lldi.vr->qp.size,
rdev->lldi.vr->cq.start,
rdev->lldi.vr->cq.size);
- PDBG("udb len 0x%x udb base %p db_reg %p gts_reg %p "
+ PDBG("udb %pR db_reg %p gts_reg %p "
"qpmask 0x%x cqmask 0x%x\n",
- (unsigned)pci_resource_len(rdev->lldi.pdev, 2),
- (void *)pci_resource_start(rdev->lldi.pdev, 2),
+ &rdev->lldi.pdev->resource[2],
rdev->lldi.db_reg, rdev->lldi.gts_reg,
rdev->qpmask, rdev->cqmask);
@@ -1506,20 +1494,6 @@ static int __init c4iw_init_module(void)
printk(KERN_WARNING MOD
"could not create debugfs entry, continuing\n");
- if (ibnl_add_client(RDMA_NL_C4IW, RDMA_NL_IWPM_NUM_OPS,
- c4iw_nl_cb_table))
- pr_err("%s[%u]: Failed to add netlink callback\n"
- , __func__, __LINE__);
-
- err = iwpm_init(RDMA_NL_C4IW);
- if (err) {
- pr_err("port mapper initialization failed with %d\n", err);
- ibnl_remove_client(RDMA_NL_C4IW);
- c4iw_cm_term();
- debugfs_remove_recursive(c4iw_debugfs_root);
- return err;
- }
-
cxgb4_register_uld(CXGB4_ULD_RDMA, &c4iw_uld_info);
return 0;
@@ -1537,8 +1511,6 @@ static void __exit c4iw_exit_module(void)
}
mutex_unlock(&dev_mutex);
cxgb4_unregister_uld(CXGB4_ULD_RDMA);
- iwpm_exit(RDMA_NL_C4IW);
- ibnl_remove_client(RDMA_NL_C4IW);
c4iw_cm_term();
debugfs_remove_recursive(c4iw_debugfs_root);
}
diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
index fb2de75a0392..df43f871ab61 100644
--- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
+++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
@@ -476,6 +476,8 @@ struct c4iw_qp {
wait_queue_head_t wait;
struct timer_list timer;
int sq_sig_all;
+ struct completion rq_drained;
+ struct completion sq_drained;
};
static inline struct c4iw_qp *to_c4iw_qp(struct ib_qp *ibqp)
@@ -753,7 +755,6 @@ enum c4iw_ep_flags {
CLOSE_SENT = 3,
TIMEOUT = 4,
QP_REFERENCED = 5,
- RELEASE_MAPINFO = 6,
};
enum c4iw_ep_history {
@@ -790,8 +791,6 @@ struct c4iw_ep_common {
struct mutex mutex;
struct sockaddr_storage local_addr;
struct sockaddr_storage remote_addr;
- struct sockaddr_storage mapped_local_addr;
- struct sockaddr_storage mapped_remote_addr;
struct c4iw_wr_wait wr_wait;
unsigned long flags;
unsigned long history;
@@ -843,45 +842,6 @@ struct c4iw_ep {
struct c4iw_ep_stats stats;
};
-static inline void print_addr(struct c4iw_ep_common *epc, const char *func,
- const char *msg)
-{
-
-#define SINA(a) (&(((struct sockaddr_in *)(a))->sin_addr.s_addr))
-#define SINP(a) ntohs(((struct sockaddr_in *)(a))->sin_port)
-#define SIN6A(a) (&(((struct sockaddr_in6 *)(a))->sin6_addr))
-#define SIN6P(a) ntohs(((struct sockaddr_in6 *)(a))->sin6_port)
-
- if (c4iw_debug) {
- switch (epc->local_addr.ss_family) {
- case AF_INET:
- PDBG("%s %s %pI4:%u/%u <-> %pI4:%u/%u\n",
- func, msg, SINA(&epc->local_addr),
- SINP(&epc->local_addr),
- SINP(&epc->mapped_local_addr),
- SINA(&epc->remote_addr),
- SINP(&epc->remote_addr),
- SINP(&epc->mapped_remote_addr));
- break;
- case AF_INET6:
- PDBG("%s %s %pI6:%u/%u <-> %pI6:%u/%u\n",
- func, msg, SIN6A(&epc->local_addr),
- SIN6P(&epc->local_addr),
- SIN6P(&epc->mapped_local_addr),
- SIN6A(&epc->remote_addr),
- SIN6P(&epc->remote_addr),
- SIN6P(&epc->mapped_remote_addr));
- break;
- default:
- break;
- }
- }
-#undef SINA
-#undef SINP
-#undef SIN6A
-#undef SIN6P
-}
-
static inline struct c4iw_ep *to_ep(struct iw_cm_id *cm_id)
{
return cm_id->provider_data;
@@ -961,7 +921,8 @@ int c4iw_map_mr_sg(struct ib_mr *ibmr,
struct scatterlist *sg,
int sg_nents);
int c4iw_dealloc_mw(struct ib_mw *mw);
-struct ib_mw *c4iw_alloc_mw(struct ib_pd *pd, enum ib_mw_type type);
+struct ib_mw *c4iw_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
+ struct ib_udata *udata);
struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start,
u64 length, u64 virt, int acc,
struct ib_udata *udata);
@@ -1016,6 +977,8 @@ extern int c4iw_wr_log;
extern int db_fc_threshold;
extern int db_coalescing_threshold;
extern int use_dsgl;
+void c4iw_drain_rq(struct ib_qp *qp);
+void c4iw_drain_sq(struct ib_qp *qp);
#endif
diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
index 7849890c4781..008be07d5604 100644
--- a/drivers/infiniband/hw/cxgb4/mem.c
+++ b/drivers/infiniband/hw/cxgb4/mem.c
@@ -34,6 +34,7 @@
#include <linux/moduleparam.h>
#include <rdma/ib_umem.h>
#include <linux/atomic.h>
+#include <rdma/ib_user_verbs.h>
#include "iw_cxgb4.h"
@@ -552,7 +553,8 @@ err:
return ERR_PTR(err);
}
-struct ib_mw *c4iw_alloc_mw(struct ib_pd *pd, enum ib_mw_type type)
+struct ib_mw *c4iw_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
+ struct ib_udata *udata)
{
struct c4iw_dev *rhp;
struct c4iw_pd *php;
@@ -617,12 +619,14 @@ struct ib_mr *c4iw_alloc_mr(struct ib_pd *pd,
int ret = 0;
int length = roundup(max_num_sg * sizeof(u64), 32);
+ php = to_c4iw_pd(pd);
+ rhp = php->rhp;
+
if (mr_type != IB_MR_TYPE_MEM_REG ||
- max_num_sg > t4_max_fr_depth(use_dsgl))
+ max_num_sg > t4_max_fr_depth(&rhp->rdev.lldi.ulptx_memwrite_dsgl &&
+ use_dsgl))
return ERR_PTR(-EINVAL);
- php = to_c4iw_pd(pd);
- rhp = php->rhp;
mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
if (!mhp) {
ret = -ENOMEM;
diff --git a/drivers/infiniband/hw/cxgb4/provider.c b/drivers/infiniband/hw/cxgb4/provider.c
index ec04272fbdc2..7574f394fdac 100644
--- a/drivers/infiniband/hw/cxgb4/provider.c
+++ b/drivers/infiniband/hw/cxgb4/provider.c
@@ -339,7 +339,8 @@ static int c4iw_query_device(struct ib_device *ibdev, struct ib_device_attr *pro
props->max_mr = c4iw_num_stags(&dev->rdev);
props->max_pd = T4_MAX_NUM_PD;
props->local_ca_ack_delay = 0;
- props->max_fast_reg_page_list_len = t4_max_fr_depth(use_dsgl);
+ props->max_fast_reg_page_list_len =
+ t4_max_fr_depth(dev->rdev.lldi.ulptx_memwrite_dsgl && use_dsgl);
return 0;
}
@@ -564,6 +565,8 @@ int c4iw_register_device(struct c4iw_dev *dev)
dev->ibdev.get_protocol_stats = c4iw_get_mib;
dev->ibdev.uverbs_abi_ver = C4IW_UVERBS_ABI_VERSION;
dev->ibdev.get_port_immutable = c4iw_port_immutable;
+ dev->ibdev.drain_sq = c4iw_drain_sq;
+ dev->ibdev.drain_rq = c4iw_drain_rq;
dev->ibdev.iwcm = kmalloc(sizeof(struct iw_cm_verbs), GFP_KERNEL);
if (!dev->ibdev.iwcm)
@@ -577,6 +580,8 @@ int c4iw_register_device(struct c4iw_dev *dev)
dev->ibdev.iwcm->add_ref = c4iw_qp_add_ref;
dev->ibdev.iwcm->rem_ref = c4iw_qp_rem_ref;
dev->ibdev.iwcm->get_qp = c4iw_get_qp;
+ memcpy(dev->ibdev.iwcm->ifname, dev->rdev.lldi.ports[0]->name,
+ sizeof(dev->ibdev.iwcm->ifname));
ret = ib_register_device(&dev->ibdev, NULL);
if (ret)
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index e99345eb875a..e8993e49b8b3 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -185,6 +185,10 @@ void __iomem *c4iw_bar2_addrs(struct c4iw_rdev *rdev, unsigned int qid,
if (pbar2_pa)
*pbar2_pa = (rdev->bar2_pa + bar2_qoffset) & PAGE_MASK;
+
+ if (is_t4(rdev->lldi.adapter_type))
+ return NULL;
+
return rdev->bar2_kva + bar2_qoffset;
}
@@ -270,7 +274,7 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
/*
* User mode must have bar2 access.
*/
- if (user && (!wq->sq.bar2_va || !wq->rq.bar2_va)) {
+ if (user && (!wq->sq.bar2_pa || !wq->rq.bar2_pa)) {
pr_warn(MOD "%s: sqid %u or rqid %u not in BAR2 range.\n",
pci_name(rdev->lldi.pdev), wq->sq.qid, wq->rq.qid);
goto free_dma;
@@ -606,7 +610,7 @@ static int build_rdma_recv(struct c4iw_qp *qhp, union t4_recv_wr *wqe,
}
static int build_memreg(struct t4_sq *sq, union t4_wr *wqe,
- struct ib_reg_wr *wr, u8 *len16, u8 t5dev)
+ struct ib_reg_wr *wr, u8 *len16, bool dsgl_supported)
{
struct c4iw_mr *mhp = to_c4iw_mr(wr->mr);
struct fw_ri_immd *imdp;
@@ -615,7 +619,7 @@ static int build_memreg(struct t4_sq *sq, union t4_wr *wqe,
int pbllen = roundup(mhp->mpl_len * sizeof(u64), 32);
int rem;
- if (mhp->mpl_len > t4_max_fr_depth(use_dsgl))
+ if (mhp->mpl_len > t4_max_fr_depth(dsgl_supported && use_dsgl))
return -EINVAL;
wqe->fr.qpbinde_to_dcacpu = 0;
@@ -629,7 +633,7 @@ static int build_memreg(struct t4_sq *sq, union t4_wr *wqe,
wqe->fr.va_lo_fbo = cpu_to_be32(mhp->ibmr.iova &
0xffffffff);
- if (t5dev && use_dsgl && (pbllen > max_fr_immd)) {
+ if (dsgl_supported && use_dsgl && (pbllen > max_fr_immd)) {
struct fw_ri_dsgl *sglp;
for (i = 0; i < mhp->mpl_len; i++)
@@ -808,9 +812,7 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
fw_opcode = FW_RI_FR_NSMR_WR;
swsqe->opcode = FW_RI_FAST_REGISTER;
err = build_memreg(&qhp->wq.sq, wqe, reg_wr(wr), &len16,
- is_t5(
- qhp->rhp->rdev.lldi.adapter_type) ?
- 1 : 0);
+ qhp->rhp->rdev.lldi.ulptx_memwrite_dsgl);
break;
case IB_WR_LOCAL_INV:
if (wr->send_flags & IB_SEND_FENCE)
@@ -1621,7 +1623,8 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
unsigned int sqsize, rqsize;
struct c4iw_ucontext *ucontext;
int ret;
- struct c4iw_mm_entry *mm1, *mm2, *mm3, *mm4, *mm5 = NULL;
+ struct c4iw_mm_entry *sq_key_mm, *rq_key_mm = NULL, *sq_db_key_mm;
+ struct c4iw_mm_entry *rq_db_key_mm = NULL, *ma_sync_key_mm = NULL;
PDBG("%s ib_pd %p\n", __func__, pd);
@@ -1697,6 +1700,8 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
qhp->attr.max_ird = 0;
qhp->sq_sig_all = attrs->sq_sig_type == IB_SIGNAL_ALL_WR;
spin_lock_init(&qhp->lock);
+ init_completion(&qhp->sq_drained);
+ init_completion(&qhp->rq_drained);
mutex_init(&qhp->mutex);
init_waitqueue_head(&qhp->wait);
atomic_set(&qhp->refcnt, 1);
@@ -1706,29 +1711,30 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
goto err2;
if (udata) {
- mm1 = kmalloc(sizeof *mm1, GFP_KERNEL);
- if (!mm1) {
+ sq_key_mm = kmalloc(sizeof(*sq_key_mm), GFP_KERNEL);
+ if (!sq_key_mm) {
ret = -ENOMEM;
goto err3;
}
- mm2 = kmalloc(sizeof *mm2, GFP_KERNEL);
- if (!mm2) {
+ rq_key_mm = kmalloc(sizeof(*rq_key_mm), GFP_KERNEL);
+ if (!rq_key_mm) {
ret = -ENOMEM;
goto err4;
}
- mm3 = kmalloc(sizeof *mm3, GFP_KERNEL);
- if (!mm3) {
+ sq_db_key_mm = kmalloc(sizeof(*sq_db_key_mm), GFP_KERNEL);
+ if (!sq_db_key_mm) {
ret = -ENOMEM;
goto err5;
}
- mm4 = kmalloc(sizeof *mm4, GFP_KERNEL);
- if (!mm4) {
+ rq_db_key_mm = kmalloc(sizeof(*rq_db_key_mm), GFP_KERNEL);
+ if (!rq_db_key_mm) {
ret = -ENOMEM;
goto err6;
}
if (t4_sq_onchip(&qhp->wq.sq)) {
- mm5 = kmalloc(sizeof *mm5, GFP_KERNEL);
- if (!mm5) {
+ ma_sync_key_mm = kmalloc(sizeof(*ma_sync_key_mm),
+ GFP_KERNEL);
+ if (!ma_sync_key_mm) {
ret = -ENOMEM;
goto err7;
}
@@ -1743,7 +1749,7 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
uresp.rq_size = qhp->wq.rq.size;
uresp.rq_memsize = qhp->wq.rq.memsize;
spin_lock(&ucontext->mmap_lock);
- if (mm5) {
+ if (ma_sync_key_mm) {
uresp.ma_sync_key = ucontext->key;
ucontext->key += PAGE_SIZE;
} else {
@@ -1761,28 +1767,29 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
ret = ib_copy_to_udata(udata, &uresp, sizeof uresp);
if (ret)
goto err8;
- mm1->key = uresp.sq_key;
- mm1->addr = qhp->wq.sq.phys_addr;
- mm1->len = PAGE_ALIGN(qhp->wq.sq.memsize);
- insert_mmap(ucontext, mm1);
- mm2->key = uresp.rq_key;
- mm2->addr = virt_to_phys(qhp->wq.rq.queue);
- mm2->len = PAGE_ALIGN(qhp->wq.rq.memsize);
- insert_mmap(ucontext, mm2);
- mm3->key = uresp.sq_db_gts_key;
- mm3->addr = (__force unsigned long)qhp->wq.sq.bar2_pa;
- mm3->len = PAGE_SIZE;
- insert_mmap(ucontext, mm3);
- mm4->key = uresp.rq_db_gts_key;
- mm4->addr = (__force unsigned long)qhp->wq.rq.bar2_pa;
- mm4->len = PAGE_SIZE;
- insert_mmap(ucontext, mm4);
- if (mm5) {
- mm5->key = uresp.ma_sync_key;
- mm5->addr = (pci_resource_start(rhp->rdev.lldi.pdev, 0)
- + PCIE_MA_SYNC_A) & PAGE_MASK;
- mm5->len = PAGE_SIZE;
- insert_mmap(ucontext, mm5);
+ sq_key_mm->key = uresp.sq_key;
+ sq_key_mm->addr = qhp->wq.sq.phys_addr;
+ sq_key_mm->len = PAGE_ALIGN(qhp->wq.sq.memsize);
+ insert_mmap(ucontext, sq_key_mm);
+ rq_key_mm->key = uresp.rq_key;
+ rq_key_mm->addr = virt_to_phys(qhp->wq.rq.queue);
+ rq_key_mm->len = PAGE_ALIGN(qhp->wq.rq.memsize);
+ insert_mmap(ucontext, rq_key_mm);
+ sq_db_key_mm->key = uresp.sq_db_gts_key;
+ sq_db_key_mm->addr = (u64)(unsigned long)qhp->wq.sq.bar2_pa;
+ sq_db_key_mm->len = PAGE_SIZE;
+ insert_mmap(ucontext, sq_db_key_mm);
+ rq_db_key_mm->key = uresp.rq_db_gts_key;
+ rq_db_key_mm->addr = (u64)(unsigned long)qhp->wq.rq.bar2_pa;
+ rq_db_key_mm->len = PAGE_SIZE;
+ insert_mmap(ucontext, rq_db_key_mm);
+ if (ma_sync_key_mm) {
+ ma_sync_key_mm->key = uresp.ma_sync_key;
+ ma_sync_key_mm->addr =
+ (pci_resource_start(rhp->rdev.lldi.pdev, 0) +
+ PCIE_MA_SYNC_A) & PAGE_MASK;
+ ma_sync_key_mm->len = PAGE_SIZE;
+ insert_mmap(ucontext, ma_sync_key_mm);
}
}
qhp->ibqp.qp_num = qhp->wq.sq.qid;
@@ -1795,15 +1802,15 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
qhp->wq.rq.memsize, attrs->cap.max_recv_wr);
return &qhp->ibqp;
err8:
- kfree(mm5);
+ kfree(ma_sync_key_mm);
err7:
- kfree(mm4);
+ kfree(rq_db_key_mm);
err6:
- kfree(mm3);
+ kfree(sq_db_key_mm);
err5:
- kfree(mm2);
+ kfree(rq_key_mm);
err4:
- kfree(mm1);
+ kfree(sq_key_mm);
err3:
remove_handle(rhp, &rhp->qpidr, qhp->wq.sq.qid);
err2:
@@ -1888,3 +1895,31 @@ int c4iw_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
init_attr->sq_sig_type = qhp->sq_sig_all ? IB_SIGNAL_ALL_WR : 0;
return 0;
}
+
+void c4iw_drain_sq(struct ib_qp *ibqp)
+{
+ struct c4iw_qp *qp = to_c4iw_qp(ibqp);
+ unsigned long flag;
+ bool need_to_wait;
+
+ spin_lock_irqsave(&qp->lock, flag);
+ need_to_wait = !t4_sq_empty(&qp->wq);
+ spin_unlock_irqrestore(&qp->lock, flag);
+
+ if (need_to_wait)
+ wait_for_completion(&qp->sq_drained);
+}
+
+void c4iw_drain_rq(struct ib_qp *ibqp)
+{
+ struct c4iw_qp *qp = to_c4iw_qp(ibqp);
+ unsigned long flag;
+ bool need_to_wait;
+
+ spin_lock_irqsave(&qp->lock, flag);
+ need_to_wait = !t4_rq_empty(&qp->wq);
+ spin_unlock_irqrestore(&qp->lock, flag);
+
+ if (need_to_wait)
+ wait_for_completion(&qp->rq_drained);
+}
diff --git a/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h b/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h
index 343e8daf2270..1e26669793c3 100644
--- a/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h
+++ b/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h
@@ -753,103 +753,4 @@ struct fw_ri_wr {
#define FW_RI_WR_P2PTYPE_G(x) \
(((x) >> FW_RI_WR_P2PTYPE_S) & FW_RI_WR_P2PTYPE_M)
-struct tcp_options {
- __be16 mss;
- __u8 wsf;
-#if defined(__LITTLE_ENDIAN_BITFIELD)
- __u8:4;
- __u8 unknown:1;
- __u8:1;
- __u8 sack:1;
- __u8 tstamp:1;
-#else
- __u8 tstamp:1;
- __u8 sack:1;
- __u8:1;
- __u8 unknown:1;
- __u8:4;
-#endif
-};
-
-struct cpl_pass_accept_req {
- union opcode_tid ot;
- __be16 rsvd;
- __be16 len;
- __be32 hdr_len;
- __be16 vlan;
- __be16 l2info;
- __be32 tos_stid;
- struct tcp_options tcpopt;
-};
-
-/* cpl_pass_accept_req.hdr_len fields */
-#define SYN_RX_CHAN_S 0
-#define SYN_RX_CHAN_M 0xF
-#define SYN_RX_CHAN_V(x) ((x) << SYN_RX_CHAN_S)
-#define SYN_RX_CHAN_G(x) (((x) >> SYN_RX_CHAN_S) & SYN_RX_CHAN_M)
-
-#define TCP_HDR_LEN_S 10
-#define TCP_HDR_LEN_M 0x3F
-#define TCP_HDR_LEN_V(x) ((x) << TCP_HDR_LEN_S)
-#define TCP_HDR_LEN_G(x) (((x) >> TCP_HDR_LEN_S) & TCP_HDR_LEN_M)
-
-#define IP_HDR_LEN_S 16
-#define IP_HDR_LEN_M 0x3FF
-#define IP_HDR_LEN_V(x) ((x) << IP_HDR_LEN_S)
-#define IP_HDR_LEN_G(x) (((x) >> IP_HDR_LEN_S) & IP_HDR_LEN_M)
-
-#define ETH_HDR_LEN_S 26
-#define ETH_HDR_LEN_M 0x1F
-#define ETH_HDR_LEN_V(x) ((x) << ETH_HDR_LEN_S)
-#define ETH_HDR_LEN_G(x) (((x) >> ETH_HDR_LEN_S) & ETH_HDR_LEN_M)
-
-/* cpl_pass_accept_req.l2info fields */
-#define SYN_MAC_IDX_S 0
-#define SYN_MAC_IDX_M 0x1FF
-#define SYN_MAC_IDX_V(x) ((x) << SYN_MAC_IDX_S)
-#define SYN_MAC_IDX_G(x) (((x) >> SYN_MAC_IDX_S) & SYN_MAC_IDX_M)
-
-#define SYN_XACT_MATCH_S 9
-#define SYN_XACT_MATCH_V(x) ((x) << SYN_XACT_MATCH_S)
-#define SYN_XACT_MATCH_F SYN_XACT_MATCH_V(1U)
-
-#define SYN_INTF_S 12
-#define SYN_INTF_M 0xF
-#define SYN_INTF_V(x) ((x) << SYN_INTF_S)
-#define SYN_INTF_G(x) (((x) >> SYN_INTF_S) & SYN_INTF_M)
-
-struct ulptx_idata {
- __be32 cmd_more;
- __be32 len;
-};
-
-#define ULPTX_NSGE_S 0
-#define ULPTX_NSGE_M 0xFFFF
-#define ULPTX_NSGE_V(x) ((x) << ULPTX_NSGE_S)
-
-#define RX_DACK_MODE_S 29
-#define RX_DACK_MODE_M 0x3
-#define RX_DACK_MODE_V(x) ((x) << RX_DACK_MODE_S)
-#define RX_DACK_MODE_G(x) (((x) >> RX_DACK_MODE_S) & RX_DACK_MODE_M)
-
-#define RX_DACK_CHANGE_S 31
-#define RX_DACK_CHANGE_V(x) ((x) << RX_DACK_CHANGE_S)
-#define RX_DACK_CHANGE_F RX_DACK_CHANGE_V(1U)
-
-enum { /* TCP congestion control algorithms */
- CONG_ALG_RENO,
- CONG_ALG_TAHOE,
- CONG_ALG_NEWRENO,
- CONG_ALG_HIGHSPEED
-};
-
-#define CONG_CNTRL_S 14
-#define CONG_CNTRL_M 0x3
-#define CONG_CNTRL_V(x) ((x) << CONG_CNTRL_S)
-#define CONG_CNTRL_G(x) (((x) >> CONG_CNTRL_S) & CONG_CNTRL_M)
-
-#define T5_ISS_S 18
-#define T5_ISS_V(x) ((x) << T5_ISS_S)
-#define T5_ISS_F T5_ISS_V(1U)
-
#endif /* _T4FW_RI_API_H_ */
diff --git a/drivers/infiniband/hw/i40iw/Kconfig b/drivers/infiniband/hw/i40iw/Kconfig
new file mode 100644
index 000000000000..6e7d27a14061
--- /dev/null
+++ b/drivers/infiniband/hw/i40iw/Kconfig
@@ -0,0 +1,7 @@
+config INFINIBAND_I40IW
+ tristate "Intel(R) Ethernet X722 iWARP Driver"
+ depends on INET && I40E
+ select GENERIC_ALLOCATOR
+ ---help---
+ Intel(R) Ethernet X722 iWARP Driver
+ INET && I40IW && INFINIBAND && I40E
diff --git a/drivers/infiniband/hw/i40iw/Makefile b/drivers/infiniband/hw/i40iw/Makefile
new file mode 100644
index 000000000000..90068c03d217
--- /dev/null
+++ b/drivers/infiniband/hw/i40iw/Makefile
@@ -0,0 +1,9 @@
+ccflags-y := -Idrivers/net/ethernet/intel/i40e
+
+obj-$(CONFIG_INFINIBAND_I40IW) += i40iw.o
+
+i40iw-objs :=\
+ i40iw_cm.o i40iw_ctrl.o \
+ i40iw_hmc.o i40iw_hw.o i40iw_main.o \
+ i40iw_pble.o i40iw_puda.o i40iw_uk.o i40iw_utils.o \
+ i40iw_verbs.o i40iw_virtchnl.o i40iw_vf.o
diff --git a/drivers/infiniband/hw/i40iw/i40iw.h b/drivers/infiniband/hw/i40iw/i40iw.h
new file mode 100644
index 000000000000..819767681445
--- /dev/null
+++ b/drivers/infiniband/hw/i40iw/i40iw.h
@@ -0,0 +1,570 @@
+/*******************************************************************************
+*
+* Copyright (c) 2015-2016 Intel Corporation. All rights reserved.
+*
+* This software is available to you under a choice of one of two
+* licenses. You may choose to be licensed under the terms of the GNU
+* General Public License (GPL) Version 2, available from the file
+* COPYING in the main directory of this source tree, or the
+* OpenFabrics.org BSD license below:
+*
+* Redistribution and use in source and binary forms, with or
+* without modification, are permitted provided that the following
+* conditions are met:
+*
+* - Redistributions of source code must retain the above
+* copyright notice, this list of conditions and the following
+* disclaimer.
+*
+* - Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials
+* provided with the distribution.
+*
+* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+* SOFTWARE.
+*
+*******************************************************************************/
+
+#ifndef I40IW_IW_H
+#define I40IW_IW_H
+#include <linux/netdevice.h>
+#include <linux/inetdevice.h>
+#include <linux/spinlock.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include <linux/workqueue.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/crc32c.h>
+#include <rdma/ib_smi.h>
+#include <rdma/ib_verbs.h>
+#include <rdma/ib_pack.h>
+#include <rdma/rdma_cm.h>
+#include <rdma/iw_cm.h>
+#include <rdma/iw_portmap.h>
+#include <rdma/rdma_netlink.h>
+#include <crypto/hash.h>
+
+#include "i40iw_status.h"
+#include "i40iw_osdep.h"
+#include "i40iw_d.h"
+#include "i40iw_hmc.h"
+
+#include <i40e_client.h>
+#include "i40iw_type.h"
+#include "i40iw_p.h"
+#include "i40iw_ucontext.h"
+#include "i40iw_pble.h"
+#include "i40iw_verbs.h"
+#include "i40iw_cm.h"
+#include "i40iw_user.h"
+#include "i40iw_puda.h"
+
+#define I40IW_FW_VERSION 2
+#define I40IW_HW_VERSION 2
+
+#define I40IW_ARP_ADD 1
+#define I40IW_ARP_DELETE 2
+#define I40IW_ARP_RESOLVE 3
+
+#define I40IW_MACIP_ADD 1
+#define I40IW_MACIP_DELETE 2
+
+#define IW_CCQ_SIZE (I40IW_CQP_SW_SQSIZE_2048 + 1)
+#define IW_CEQ_SIZE 2048
+#define IW_AEQ_SIZE 2048
+
+#define RX_BUF_SIZE (1536 + 8)
+#define IW_REG0_SIZE (4 * 1024)
+#define IW_TX_TIMEOUT (6 * HZ)
+#define IW_FIRST_QPN 1
+#define IW_SW_CONTEXT_ALIGN 1024
+
+#define MAX_DPC_ITERATIONS 128
+
+#define I40IW_EVENT_TIMEOUT 100000
+#define I40IW_VCHNL_EVENT_TIMEOUT 100000
+
+#define I40IW_NO_VLAN 0xffff
+#define I40IW_NO_QSET 0xffff
+
+/* access to mcast filter list */
+#define IW_ADD_MCAST false
+#define IW_DEL_MCAST true
+
+#define I40IW_DRV_OPT_ENABLE_MPA_VER_0 0x00000001
+#define I40IW_DRV_OPT_DISABLE_MPA_CRC 0x00000002
+#define I40IW_DRV_OPT_DISABLE_FIRST_WRITE 0x00000004
+#define I40IW_DRV_OPT_DISABLE_INTF 0x00000008
+#define I40IW_DRV_OPT_ENABLE_MSI 0x00000010
+#define I40IW_DRV_OPT_DUAL_LOGICAL_PORT 0x00000020
+#define I40IW_DRV_OPT_NO_INLINE_DATA 0x00000080
+#define I40IW_DRV_OPT_DISABLE_INT_MOD 0x00000100
+#define I40IW_DRV_OPT_DISABLE_VIRT_WQ 0x00000200
+#define I40IW_DRV_OPT_ENABLE_PAU 0x00000400
+#define I40IW_DRV_OPT_MCAST_LOGPORT_MAP 0x00000800
+
+#define IW_HMC_OBJ_TYPE_NUM ARRAY_SIZE(iw_hmc_obj_types)
+#define IW_CFG_FPM_QP_COUNT 32768
+
+#define I40IW_MTU_TO_MSS 40
+#define I40IW_DEFAULT_MSS 1460
+
+struct i40iw_cqp_compl_info {
+ u32 op_ret_val;
+ u16 maj_err_code;
+ u16 min_err_code;
+ bool error;
+ u8 op_code;
+};
+
+#define i40iw_pr_err(fmt, args ...) pr_err("%s: "fmt, __func__, ## args)
+
+#define i40iw_pr_info(fmt, args ...) pr_info("%s: " fmt, __func__, ## args)
+
+#define i40iw_pr_warn(fmt, args ...) pr_warn("%s: " fmt, __func__, ## args)
+
+struct i40iw_cqp_request {
+ struct cqp_commands_info info;
+ wait_queue_head_t waitq;
+ struct list_head list;
+ atomic_t refcount;
+ void (*callback_fcn)(struct i40iw_cqp_request*, u32);
+ void *param;
+ struct i40iw_cqp_compl_info compl_info;
+ bool waiting;
+ bool request_done;
+ bool dynamic;
+};
+
+struct i40iw_cqp {
+ struct i40iw_sc_cqp sc_cqp;
+ spinlock_t req_lock; /*cqp request list */
+ wait_queue_head_t waitq;
+ struct i40iw_dma_mem sq;
+ struct i40iw_dma_mem host_ctx;
+ u64 *scratch_array;
+ struct i40iw_cqp_request *cqp_requests;
+ struct list_head cqp_avail_reqs;
+ struct list_head cqp_pending_reqs;
+};
+
+struct i40iw_device;
+
+struct i40iw_ccq {
+ struct i40iw_sc_cq sc_cq;
+ spinlock_t lock; /* ccq control */
+ wait_queue_head_t waitq;
+ struct i40iw_dma_mem mem_cq;
+ struct i40iw_dma_mem shadow_area;
+};
+
+struct i40iw_ceq {
+ struct i40iw_sc_ceq sc_ceq;
+ struct i40iw_dma_mem mem;
+ u32 irq;
+ u32 msix_idx;
+ struct i40iw_device *iwdev;
+ struct tasklet_struct dpc_tasklet;
+};
+
+struct i40iw_aeq {
+ struct i40iw_sc_aeq sc_aeq;
+ struct i40iw_dma_mem mem;
+};
+
+struct i40iw_arp_entry {
+ u32 ip_addr[4];
+ u8 mac_addr[ETH_ALEN];
+};
+
+enum init_completion_state {
+ INVALID_STATE = 0,
+ INITIAL_STATE,
+ CQP_CREATED,
+ HMC_OBJS_CREATED,
+ PBLE_CHUNK_MEM,
+ CCQ_CREATED,
+ AEQ_CREATED,
+ CEQ_CREATED,
+ ILQ_CREATED,
+ IEQ_CREATED,
+ INET_NOTIFIER,
+ IP_ADDR_REGISTERED,
+ RDMA_DEV_REGISTERED
+};
+
+struct i40iw_msix_vector {
+ u32 idx;
+ u32 irq;
+ u32 cpu_affinity;
+ u32 ceq_id;
+};
+
+#define I40IW_MSIX_TABLE_SIZE 65
+
+struct virtchnl_work {
+ struct work_struct work;
+ union {
+ struct i40iw_cqp_request *cqp_request;
+ struct i40iw_virtchnl_work_info work_info;
+ };
+};
+
+struct i40e_qvlist_info;
+
+struct i40iw_device {
+ struct i40iw_ib_device *iwibdev;
+ struct net_device *netdev;
+ wait_queue_head_t vchnl_waitq;
+ struct i40iw_sc_dev sc_dev;
+ struct i40iw_handler *hdl;
+ struct i40e_info *ldev;
+ struct i40e_client *client;
+ struct i40iw_hw hw;
+ struct i40iw_cm_core cm_core;
+ unsigned long *mem_resources;
+ unsigned long *allocated_qps;
+ unsigned long *allocated_cqs;
+ unsigned long *allocated_mrs;
+ unsigned long *allocated_pds;
+ unsigned long *allocated_arps;
+ struct i40iw_qp **qp_table;
+ bool msix_shared;
+ u32 msix_count;
+ struct i40iw_msix_vector *iw_msixtbl;
+ struct i40e_qvlist_info *iw_qvlist;
+
+ struct i40iw_hmc_pble_rsrc *pble_rsrc;
+ struct i40iw_arp_entry *arp_table;
+ struct i40iw_cqp cqp;
+ struct i40iw_ccq ccq;
+ u32 ceqs_count;
+ struct i40iw_ceq *ceqlist;
+ struct i40iw_aeq aeq;
+ u32 arp_table_size;
+ u32 next_arp_index;
+ spinlock_t resource_lock; /* hw resource access */
+ u32 vendor_id;
+ u32 vendor_part_id;
+ u32 of_device_registered;
+
+ u32 device_cap_flags;
+ unsigned long db_start;
+ u8 resource_profile;
+ u8 max_rdma_vfs;
+ u8 max_enabled_vfs;
+ u8 max_sge;
+ u8 iw_status;
+ u8 send_term_ok;
+ bool push_mode; /* Initialized from parameter passed to driver */
+
+ /* x710 specific */
+ struct mutex pbl_mutex;
+ struct tasklet_struct dpc_tasklet;
+ struct workqueue_struct *virtchnl_wq;
+ struct virtchnl_work virtchnl_w[I40IW_MAX_PE_ENABLED_VF_COUNT];
+ struct i40iw_dma_mem obj_mem;
+ struct i40iw_dma_mem obj_next;
+ u8 *hmc_info_mem;
+ u32 sd_type;
+ struct workqueue_struct *param_wq;
+ atomic_t params_busy;
+ u32 mss;
+ enum init_completion_state init_state;
+ u16 mac_ip_table_idx;
+ atomic_t vchnl_msgs;
+ u32 max_mr;
+ u32 max_qp;
+ u32 max_cq;
+ u32 max_pd;
+ u32 next_qp;
+ u32 next_cq;
+ u32 next_pd;
+ u32 max_mr_size;
+ u32 max_qp_wr;
+ u32 max_cqe;
+ u32 mr_stagmask;
+ u32 mpa_version;
+ bool dcb;
+};
+
+struct i40iw_ib_device {
+ struct ib_device ibdev;
+ struct i40iw_device *iwdev;
+};
+
+struct i40iw_handler {
+ struct list_head list;
+ struct i40e_client *client;
+ struct i40iw_device device;
+ struct i40e_info ldev;
+};
+
+/**
+ * to_iwdev - get device
+ * @ibdev: ib device
+ **/
+static inline struct i40iw_device *to_iwdev(struct ib_device *ibdev)
+{
+ return container_of(ibdev, struct i40iw_ib_device, ibdev)->iwdev;
+}
+
+/**
+ * to_ucontext - get user context
+ * @ibucontext: ib user context
+ **/
+static inline struct i40iw_ucontext *to_ucontext(struct ib_ucontext *ibucontext)
+{
+ return container_of(ibucontext, struct i40iw_ucontext, ibucontext);
+}
+
+/**
+ * to_iwpd - get protection domain
+ * @ibpd: ib pd
+ **/
+static inline struct i40iw_pd *to_iwpd(struct ib_pd *ibpd)
+{
+ return container_of(ibpd, struct i40iw_pd, ibpd);
+}
+
+/**
+ * to_iwmr - get device memory region
+ * @ibdev: ib memory region
+ **/
+static inline struct i40iw_mr *to_iwmr(struct ib_mr *ibmr)
+{
+ return container_of(ibmr, struct i40iw_mr, ibmr);
+}
+
+/**
+ * to_iwmr_from_ibfmr - get device memory region
+ * @ibfmr: ib fmr
+ **/
+static inline struct i40iw_mr *to_iwmr_from_ibfmr(struct ib_fmr *ibfmr)
+{
+ return container_of(ibfmr, struct i40iw_mr, ibfmr);
+}
+
+/**
+ * to_iwmw - get device memory window
+ * @ibmw: ib memory window
+ **/
+static inline struct i40iw_mr *to_iwmw(struct ib_mw *ibmw)
+{
+ return container_of(ibmw, struct i40iw_mr, ibmw);
+}
+
+/**
+ * to_iwcq - get completion queue
+ * @ibcq: ib cqdevice
+ **/
+static inline struct i40iw_cq *to_iwcq(struct ib_cq *ibcq)
+{
+ return container_of(ibcq, struct i40iw_cq, ibcq);
+}
+
+/**
+ * to_iwqp - get device qp
+ * @ibqp: ib qp
+ **/
+static inline struct i40iw_qp *to_iwqp(struct ib_qp *ibqp)
+{
+ return container_of(ibqp, struct i40iw_qp, ibqp);
+}
+
+/* i40iw.c */
+void i40iw_add_ref(struct ib_qp *);
+void i40iw_rem_ref(struct ib_qp *);
+struct ib_qp *i40iw_get_qp(struct ib_device *, int);
+
+void i40iw_flush_wqes(struct i40iw_device *iwdev,
+ struct i40iw_qp *qp);
+
+void i40iw_manage_arp_cache(struct i40iw_device *iwdev,
+ unsigned char *mac_addr,
+ __be32 *ip_addr,
+ bool ipv4,
+ u32 action);
+
+int i40iw_manage_apbvt(struct i40iw_device *iwdev,
+ u16 accel_local_port,
+ bool add_port);
+
+struct i40iw_cqp_request *i40iw_get_cqp_request(struct i40iw_cqp *cqp, bool wait);
+void i40iw_free_cqp_request(struct i40iw_cqp *cqp, struct i40iw_cqp_request *cqp_request);
+void i40iw_put_cqp_request(struct i40iw_cqp *cqp, struct i40iw_cqp_request *cqp_request);
+
+/**
+ * i40iw_alloc_resource - allocate a resource
+ * @iwdev: device pointer
+ * @resource_array: resource bit array:
+ * @max_resources: maximum resource number
+ * @req_resources_num: Allocated resource number
+ * @next: next free id
+ **/
+static inline int i40iw_alloc_resource(struct i40iw_device *iwdev,
+ unsigned long *resource_array,
+ u32 max_resources,
+ u32 *req_resource_num,
+ u32 *next)
+{
+ u32 resource_num;
+ unsigned long flags;
+
+ spin_lock_irqsave(&iwdev->resource_lock, flags);
+ resource_num = find_next_zero_bit(resource_array, max_resources, *next);
+ if (resource_num >= max_resources) {
+ resource_num = find_first_zero_bit(resource_array, max_resources);
+ if (resource_num >= max_resources) {
+ spin_unlock_irqrestore(&iwdev->resource_lock, flags);
+ return -EOVERFLOW;
+ }
+ }
+ set_bit(resource_num, resource_array);
+ *next = resource_num + 1;
+ if (*next == max_resources)
+ *next = 0;
+ spin_unlock_irqrestore(&iwdev->resource_lock, flags);
+ *req_resource_num = resource_num;
+
+ return 0;
+}
+
+/**
+ * i40iw_is_resource_allocated - detrmine if resource is
+ * allocated
+ * @iwdev: device pointer
+ * @resource_array: resource array for the resource_num
+ * @resource_num: resource number to check
+ **/
+static inline bool i40iw_is_resource_allocated(struct i40iw_device *iwdev,
+ unsigned long *resource_array,
+ u32 resource_num)
+{
+ bool bit_is_set;
+ unsigned long flags;
+
+ spin_lock_irqsave(&iwdev->resource_lock, flags);
+
+ bit_is_set = test_bit(resource_num, resource_array);
+ spin_unlock_irqrestore(&iwdev->resource_lock, flags);
+
+ return bit_is_set;
+}
+
+/**
+ * i40iw_free_resource - free a resource
+ * @iwdev: device pointer
+ * @resource_array: resource array for the resource_num
+ * @resource_num: resource number to free
+ **/
+static inline void i40iw_free_resource(struct i40iw_device *iwdev,
+ unsigned long *resource_array,
+ u32 resource_num)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&iwdev->resource_lock, flags);
+ clear_bit(resource_num, resource_array);
+ spin_unlock_irqrestore(&iwdev->resource_lock, flags);
+}
+
+/**
+ * to_iwhdl - Get the handler from the device pointer
+ * @iwdev: device pointer
+ **/
+static inline struct i40iw_handler *to_iwhdl(struct i40iw_device *iw_dev)
+{
+ return container_of(iw_dev, struct i40iw_handler, device);
+}
+
+struct i40iw_handler *i40iw_find_netdev(struct net_device *netdev);
+
+/**
+ * iw_init_resources -
+ */
+u32 i40iw_initialize_hw_resources(struct i40iw_device *iwdev);
+
+int i40iw_register_rdma_device(struct i40iw_device *iwdev);
+void i40iw_port_ibevent(struct i40iw_device *iwdev);
+int i40iw_cm_disconn(struct i40iw_qp *);
+void i40iw_cm_disconn_worker(void *);
+int mini_cm_recv_pkt(struct i40iw_cm_core *, struct i40iw_device *,
+ struct sk_buff *);
+
+enum i40iw_status_code i40iw_handle_cqp_op(struct i40iw_device *iwdev,
+ struct i40iw_cqp_request *cqp_request);
+enum i40iw_status_code i40iw_add_mac_addr(struct i40iw_device *iwdev,
+ u8 *mac_addr, u8 *mac_index);
+int i40iw_modify_qp(struct ib_qp *, struct ib_qp_attr *, int, struct ib_udata *);
+
+void i40iw_rem_pdusecount(struct i40iw_pd *iwpd, struct i40iw_device *iwdev);
+void i40iw_add_pdusecount(struct i40iw_pd *iwpd);
+void i40iw_hw_modify_qp(struct i40iw_device *iwdev, struct i40iw_qp *iwqp,
+ struct i40iw_modify_qp_info *info, bool wait);
+
+enum i40iw_status_code i40iw_manage_qhash(struct i40iw_device *iwdev,
+ struct i40iw_cm_info *cminfo,
+ enum i40iw_quad_entry_type etype,
+ enum i40iw_quad_hash_manage_type mtype,
+ void *cmnode,
+ bool wait);
+void i40iw_receive_ilq(struct i40iw_sc_dev *dev, struct i40iw_puda_buf *rbuf);
+void i40iw_free_sqbuf(struct i40iw_sc_dev *dev, void *bufp);
+void i40iw_free_qp_resources(struct i40iw_device *iwdev,
+ struct i40iw_qp *iwqp,
+ u32 qp_num);
+enum i40iw_status_code i40iw_obj_aligned_mem(struct i40iw_device *iwdev,
+ struct i40iw_dma_mem *memptr,
+ u32 size, u32 mask);
+
+void i40iw_request_reset(struct i40iw_device *iwdev);
+void i40iw_destroy_rdma_device(struct i40iw_ib_device *iwibdev);
+void i40iw_setup_cm_core(struct i40iw_device *iwdev);
+void i40iw_cleanup_cm_core(struct i40iw_cm_core *cm_core);
+void i40iw_process_ceq(struct i40iw_device *, struct i40iw_ceq *iwceq);
+void i40iw_process_aeq(struct i40iw_device *);
+void i40iw_next_iw_state(struct i40iw_qp *iwqp,
+ u8 state, u8 del_hash,
+ u8 term, u8 term_len);
+int i40iw_send_syn(struct i40iw_cm_node *cm_node, u32 sendack);
+struct i40iw_cm_node *i40iw_find_node(struct i40iw_cm_core *cm_core,
+ u16 rem_port,
+ u32 *rem_addr,
+ u16 loc_port,
+ u32 *loc_addr,
+ bool add_refcnt);
+
+enum i40iw_status_code i40iw_hw_flush_wqes(struct i40iw_device *iwdev,
+ struct i40iw_sc_qp *qp,
+ struct i40iw_qp_flush_info *info,
+ bool wait);
+
+void i40iw_copy_ip_ntohl(u32 *dst, u32 *src);
+struct ib_mr *i40iw_reg_phys_mr(struct ib_pd *ib_pd,
+ u64 addr,
+ u64 size,
+ int acc,
+ u64 *iova_start);
+
+int i40iw_inetaddr_event(struct notifier_block *notifier,
+ unsigned long event,
+ void *ptr);
+int i40iw_inet6addr_event(struct notifier_block *notifier,
+ unsigned long event,
+ void *ptr);
+int i40iw_net_event(struct notifier_block *notifier,
+ unsigned long event,
+ void *ptr);
+
+#endif
diff --git a/drivers/infiniband/hw/i40iw/i40iw_cm.c b/drivers/infiniband/hw/i40iw/i40iw_cm.c
new file mode 100644
index 000000000000..38f917a6c778
--- /dev/null
+++ b/drivers/infiniband/hw/i40iw/i40iw_cm.c
@@ -0,0 +1,4137 @@
+/*******************************************************************************
+*
+* Copyright (c) 2015-2016 Intel Corporation. All rights reserved.
+*
+* This software is available to you under a choice of one of two
+* licenses. You may choose to be licensed under the terms of the GNU
+* General Public License (GPL) Version 2, available from the file
+* COPYING in the main directory of this source tree, or the
+* OpenFabrics.org BSD license below:
+*
+* Redistribution and use in source and binary forms, with or
+* without modification, are permitted provided that the following
+* conditions are met:
+*
+* - Redistributions of source code must retain the above
+* copyright notice, this list of conditions and the following
+* disclaimer.
+*
+* - Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials
+* provided with the distribution.
+*
+* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+* SOFTWARE.
+*
+*******************************************************************************/
+
+#include <linux/atomic.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/init.h>
+#include <linux/if_arp.h>
+#include <linux/if_vlan.h>
+#include <linux/notifier.h>
+#include <linux/net.h>
+#include <linux/types.h>
+#include <linux/timer.h>
+#include <linux/time.h>
+#include <linux/delay.h>
+#include <linux/etherdevice.h>
+#include <linux/netdevice.h>
+#include <linux/random.h>
+#include <linux/list.h>
+#include <linux/threads.h>
+#include <linux/highmem.h>
+#include <net/arp.h>
+#include <net/ndisc.h>
+#include <net/neighbour.h>
+#include <net/route.h>
+#include <net/addrconf.h>
+#include <net/ip6_route.h>
+#include <net/ip_fib.h>
+#include <net/tcp.h>
+#include <asm/checksum.h>
+
+#include "i40iw.h"
+
+static void i40iw_rem_ref_cm_node(struct i40iw_cm_node *);
+static void i40iw_cm_post_event(struct i40iw_cm_event *event);
+static void i40iw_disconnect_worker(struct work_struct *work);
+
+/**
+ * i40iw_free_sqbuf - put back puda buffer if refcount = 0
+ * @dev: FPK device
+ * @buf: puda buffer to free
+ */
+void i40iw_free_sqbuf(struct i40iw_sc_dev *dev, void *bufp)
+{
+ struct i40iw_puda_buf *buf = (struct i40iw_puda_buf *)bufp;
+ struct i40iw_puda_rsrc *ilq = dev->ilq;
+
+ if (!atomic_dec_return(&buf->refcount))
+ i40iw_puda_ret_bufpool(ilq, buf);
+}
+
+/**
+ * i40iw_derive_hw_ird_setting - Calculate IRD
+ *
+ * @cm_ird: IRD of connection's node
+ *
+ * The ird from the connection is rounded to a supported HW
+ * setting (2,8,32,64) and then encoded for ird_size field of
+ * qp_ctx
+ */
+static u8 i40iw_derive_hw_ird_setting(u16 cm_ird)
+{
+ u8 encoded_ird_size;
+ u8 pof2_cm_ird = 1;
+
+ /* round-off to next powerof2 */
+ while (pof2_cm_ird < cm_ird)
+ pof2_cm_ird *= 2;
+
+ /* ird_size field is encoded in qp_ctx */
+ switch (pof2_cm_ird) {
+ case I40IW_HW_IRD_SETTING_64:
+ encoded_ird_size = 3;
+ break;
+ case I40IW_HW_IRD_SETTING_32:
+ case I40IW_HW_IRD_SETTING_16:
+ encoded_ird_size = 2;
+ break;
+ case I40IW_HW_IRD_SETTING_8:
+ case I40IW_HW_IRD_SETTING_4:
+ encoded_ird_size = 1;
+ break;
+ case I40IW_HW_IRD_SETTING_2:
+ default:
+ encoded_ird_size = 0;
+ break;
+ }
+ return encoded_ird_size;
+}
+
+/**
+ * i40iw_record_ird_ord - Record IRD/ORD passed in
+ * @cm_node: connection's node
+ * @conn_ird: connection IRD
+ * @conn_ord: connection ORD
+ */
+static void i40iw_record_ird_ord(struct i40iw_cm_node *cm_node, u16 conn_ird, u16 conn_ord)
+{
+ if (conn_ird > I40IW_MAX_IRD_SIZE)
+ conn_ird = I40IW_MAX_IRD_SIZE;
+
+ if (conn_ord > I40IW_MAX_ORD_SIZE)
+ conn_ord = I40IW_MAX_ORD_SIZE;
+
+ cm_node->ird_size = conn_ird;
+ cm_node->ord_size = conn_ord;
+}
+
+/**
+ * i40iw_copy_ip_ntohl - change network to host ip
+ * @dst: host ip
+ * @src: big endian
+ */
+void i40iw_copy_ip_ntohl(u32 *dst, __be32 *src)
+{
+ *dst++ = ntohl(*src++);
+ *dst++ = ntohl(*src++);
+ *dst++ = ntohl(*src++);
+ *dst = ntohl(*src);
+}
+
+/**
+ * i40iw_copy_ip_htonl - change host addr to network ip
+ * @dst: host ip
+ * @src: little endian
+ */
+static inline void i40iw_copy_ip_htonl(__be32 *dst, u32 *src)
+{
+ *dst++ = htonl(*src++);
+ *dst++ = htonl(*src++);
+ *dst++ = htonl(*src++);
+ *dst = htonl(*src);
+}
+
+/**
+ * i40iw_fill_sockaddr4 - get addr info for passive connection
+ * @cm_node: connection's node
+ * @event: upper layer's cm event
+ */
+static inline void i40iw_fill_sockaddr4(struct i40iw_cm_node *cm_node,
+ struct iw_cm_event *event)
+{
+ struct sockaddr_in *laddr = (struct sockaddr_in *)&event->local_addr;
+ struct sockaddr_in *raddr = (struct sockaddr_in *)&event->remote_addr;
+
+ laddr->sin_family = AF_INET;
+ raddr->sin_family = AF_INET;
+
+ laddr->sin_port = htons(cm_node->loc_port);
+ raddr->sin_port = htons(cm_node->rem_port);
+
+ laddr->sin_addr.s_addr = htonl(cm_node->loc_addr[0]);
+ raddr->sin_addr.s_addr = htonl(cm_node->rem_addr[0]);
+}
+
+/**
+ * i40iw_fill_sockaddr6 - get ipv6 addr info for passive side
+ * @cm_node: connection's node
+ * @event: upper layer's cm event
+ */
+static inline void i40iw_fill_sockaddr6(struct i40iw_cm_node *cm_node,
+ struct iw_cm_event *event)
+{
+ struct sockaddr_in6 *laddr6 = (struct sockaddr_in6 *)&event->local_addr;
+ struct sockaddr_in6 *raddr6 = (struct sockaddr_in6 *)&event->remote_addr;
+
+ laddr6->sin6_family = AF_INET6;
+ raddr6->sin6_family = AF_INET6;
+
+ laddr6->sin6_port = htons(cm_node->loc_port);
+ raddr6->sin6_port = htons(cm_node->rem_port);
+
+ i40iw_copy_ip_htonl(laddr6->sin6_addr.in6_u.u6_addr32,
+ cm_node->loc_addr);
+ i40iw_copy_ip_htonl(raddr6->sin6_addr.in6_u.u6_addr32,
+ cm_node->rem_addr);
+}
+
+/**
+ * i40iw_get_addr_info
+ * @cm_node: contains ip/tcp info
+ * @cm_info: to get a copy of the cm_node ip/tcp info
+*/
+static void i40iw_get_addr_info(struct i40iw_cm_node *cm_node,
+ struct i40iw_cm_info *cm_info)
+{
+ cm_info->ipv4 = cm_node->ipv4;
+ cm_info->vlan_id = cm_node->vlan_id;
+ memcpy(cm_info->loc_addr, cm_node->loc_addr, sizeof(cm_info->loc_addr));
+ memcpy(cm_info->rem_addr, cm_node->rem_addr, sizeof(cm_info->rem_addr));
+ cm_info->loc_port = cm_node->loc_port;
+ cm_info->rem_port = cm_node->rem_port;
+}
+
+/**
+ * i40iw_get_cmevent_info - for cm event upcall
+ * @cm_node: connection's node
+ * @cm_id: upper layers cm struct for the event
+ * @event: upper layer's cm event
+ */
+static inline void i40iw_get_cmevent_info(struct i40iw_cm_node *cm_node,
+ struct iw_cm_id *cm_id,
+ struct iw_cm_event *event)
+{
+ memcpy(&event->local_addr, &cm_id->m_local_addr,
+ sizeof(event->local_addr));
+ memcpy(&event->remote_addr, &cm_id->m_remote_addr,
+ sizeof(event->remote_addr));
+ if (cm_node) {
+ event->private_data = (void *)cm_node->pdata_buf;
+ event->private_data_len = (u8)cm_node->pdata.size;
+ event->ird = cm_node->ird_size;
+ event->ord = cm_node->ord_size;
+ }
+}
+
+/**
+ * i40iw_send_cm_event - upcall cm's event handler
+ * @cm_node: connection's node
+ * @cm_id: upper layer's cm info struct
+ * @type: Event type to indicate
+ * @status: status for the event type
+ */
+static int i40iw_send_cm_event(struct i40iw_cm_node *cm_node,
+ struct iw_cm_id *cm_id,
+ enum iw_cm_event_type type,
+ int status)
+{
+ struct iw_cm_event event;
+
+ memset(&event, 0, sizeof(event));
+ event.event = type;
+ event.status = status;
+ switch (type) {
+ case IW_CM_EVENT_CONNECT_REQUEST:
+ if (cm_node->ipv4)
+ i40iw_fill_sockaddr4(cm_node, &event);
+ else
+ i40iw_fill_sockaddr6(cm_node, &event);
+ event.provider_data = (void *)cm_node;
+ event.private_data = (void *)cm_node->pdata_buf;
+ event.private_data_len = (u8)cm_node->pdata.size;
+ break;
+ case IW_CM_EVENT_CONNECT_REPLY:
+ i40iw_get_cmevent_info(cm_node, cm_id, &event);
+ break;
+ case IW_CM_EVENT_ESTABLISHED:
+ event.ird = cm_node->ird_size;
+ event.ord = cm_node->ord_size;
+ break;
+ case IW_CM_EVENT_DISCONNECT:
+ break;
+ case IW_CM_EVENT_CLOSE:
+ break;
+ default:
+ i40iw_pr_err("event type received type = %d\n", type);
+ return -1;
+ }
+ return cm_id->event_handler(cm_id, &event);
+}
+
+/**
+ * i40iw_create_event - create cm event
+ * @cm_node: connection's node
+ * @type: Event type to generate
+ */
+static struct i40iw_cm_event *i40iw_create_event(struct i40iw_cm_node *cm_node,
+ enum i40iw_cm_event_type type)
+{
+ struct i40iw_cm_event *event;
+
+ if (!cm_node->cm_id)
+ return NULL;
+
+ event = kzalloc(sizeof(*event), GFP_ATOMIC);
+
+ if (!event)
+ return NULL;
+
+ event->type = type;
+ event->cm_node = cm_node;
+ memcpy(event->cm_info.rem_addr, cm_node->rem_addr, sizeof(event->cm_info.rem_addr));
+ memcpy(event->cm_info.loc_addr, cm_node->loc_addr, sizeof(event->cm_info.loc_addr));
+ event->cm_info.rem_port = cm_node->rem_port;
+ event->cm_info.loc_port = cm_node->loc_port;
+ event->cm_info.cm_id = cm_node->cm_id;
+
+ i40iw_debug(cm_node->dev,
+ I40IW_DEBUG_CM,
+ "node=%p event=%p type=%u dst=%pI4 src=%pI4\n",
+ cm_node,
+ event,
+ type,
+ event->cm_info.loc_addr,
+ event->cm_info.rem_addr);
+
+ i40iw_cm_post_event(event);
+ return event;
+}
+
+/**
+ * i40iw_free_retrans_entry - free send entry
+ * @cm_node: connection's node
+ */
+static void i40iw_free_retrans_entry(struct i40iw_cm_node *cm_node)
+{
+ struct i40iw_sc_dev *dev = cm_node->dev;
+ struct i40iw_timer_entry *send_entry;
+
+ send_entry = cm_node->send_entry;
+ if (send_entry) {
+ cm_node->send_entry = NULL;
+ i40iw_free_sqbuf(dev, (void *)send_entry->sqbuf);
+ kfree(send_entry);
+ atomic_dec(&cm_node->ref_count);
+ }
+}
+
+/**
+ * i40iw_cleanup_retrans_entry - free send entry with lock
+ * @cm_node: connection's node
+ */
+static void i40iw_cleanup_retrans_entry(struct i40iw_cm_node *cm_node)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&cm_node->retrans_list_lock, flags);
+ i40iw_free_retrans_entry(cm_node);
+ spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags);
+}
+
+static bool is_remote_ne020_or_chelsio(struct i40iw_cm_node *cm_node)
+{
+ if ((cm_node->rem_mac[0] == 0x0) &&
+ (((cm_node->rem_mac[1] == 0x12) && (cm_node->rem_mac[2] == 0x55)) ||
+ ((cm_node->rem_mac[1] == 0x07 && (cm_node->rem_mac[2] == 0x43)))))
+ return true;
+ return false;
+}
+
+/**
+ * i40iw_form_cm_frame - get a free packet and build frame
+ * @cm_node: connection's node ionfo to use in frame
+ * @options: pointer to options info
+ * @hdr: pointer mpa header
+ * @pdata: pointer to private data
+ * @flags: indicates FIN or ACK
+ */
+static struct i40iw_puda_buf *i40iw_form_cm_frame(struct i40iw_cm_node *cm_node,
+ struct i40iw_kmem_info *options,
+ struct i40iw_kmem_info *hdr,
+ struct i40iw_kmem_info *pdata,
+ u8 flags)
+{
+ struct i40iw_puda_buf *sqbuf;
+ struct i40iw_sc_dev *dev = cm_node->dev;
+ u8 *buf;
+
+ struct tcphdr *tcph;
+ struct iphdr *iph;
+ struct ipv6hdr *ip6h;
+ struct ethhdr *ethh;
+ u16 packetsize;
+ u16 eth_hlen = ETH_HLEN;
+ u32 opts_len = 0;
+ u32 pd_len = 0;
+ u32 hdr_len = 0;
+
+ sqbuf = i40iw_puda_get_bufpool(dev->ilq);
+ if (!sqbuf)
+ return NULL;
+ buf = sqbuf->mem.va;
+
+ if (options)
+ opts_len = (u32)options->size;
+
+ if (hdr)
+ hdr_len = hdr->size;
+
+ if (pdata) {
+ pd_len = pdata->size;
+ if (!is_remote_ne020_or_chelsio(cm_node))
+ pd_len += MPA_ZERO_PAD_LEN;
+ }
+
+ if (cm_node->vlan_id < VLAN_TAG_PRESENT)
+ eth_hlen += 4;
+
+ if (cm_node->ipv4)
+ packetsize = sizeof(*iph) + sizeof(*tcph);
+ else
+ packetsize = sizeof(*ip6h) + sizeof(*tcph);
+ packetsize += opts_len + hdr_len + pd_len;
+
+ memset(buf, 0x00, eth_hlen + packetsize);
+
+ sqbuf->totallen = packetsize + eth_hlen;
+ sqbuf->maclen = eth_hlen;
+ sqbuf->tcphlen = sizeof(*tcph) + opts_len;
+ sqbuf->scratch = (void *)cm_node;
+
+ ethh = (struct ethhdr *)buf;
+ buf += eth_hlen;
+
+ if (cm_node->ipv4) {
+ sqbuf->ipv4 = true;
+
+ iph = (struct iphdr *)buf;
+ buf += sizeof(*iph);
+ tcph = (struct tcphdr *)buf;
+ buf += sizeof(*tcph);
+
+ ether_addr_copy(ethh->h_dest, cm_node->rem_mac);
+ ether_addr_copy(ethh->h_source, cm_node->loc_mac);
+ if (cm_node->vlan_id < VLAN_TAG_PRESENT) {
+ ((struct vlan_ethhdr *)ethh)->h_vlan_proto = htons(ETH_P_8021Q);
+ ((struct vlan_ethhdr *)ethh)->h_vlan_TCI = htons(cm_node->vlan_id);
+
+ ((struct vlan_ethhdr *)ethh)->h_vlan_encapsulated_proto = htons(ETH_P_IP);
+ } else {
+ ethh->h_proto = htons(ETH_P_IP);
+ }
+
+ iph->version = IPVERSION;
+ iph->ihl = 5; /* 5 * 4Byte words, IP headr len */
+ iph->tos = 0;
+ iph->tot_len = htons(packetsize);
+ iph->id = htons(++cm_node->tcp_cntxt.loc_id);
+
+ iph->frag_off = htons(0x4000);
+ iph->ttl = 0x40;
+ iph->protocol = IPPROTO_TCP;
+ iph->saddr = htonl(cm_node->loc_addr[0]);
+ iph->daddr = htonl(cm_node->rem_addr[0]);
+ } else {
+ sqbuf->ipv4 = false;
+ ip6h = (struct ipv6hdr *)buf;
+ buf += sizeof(*ip6h);
+ tcph = (struct tcphdr *)buf;
+ buf += sizeof(*tcph);
+
+ ether_addr_copy(ethh->h_dest, cm_node->rem_mac);
+ ether_addr_copy(ethh->h_source, cm_node->loc_mac);
+ if (cm_node->vlan_id < VLAN_TAG_PRESENT) {
+ ((struct vlan_ethhdr *)ethh)->h_vlan_proto = htons(ETH_P_8021Q);
+ ((struct vlan_ethhdr *)ethh)->h_vlan_TCI = htons(cm_node->vlan_id);
+ ((struct vlan_ethhdr *)ethh)->h_vlan_encapsulated_proto = htons(ETH_P_IPV6);
+ } else {
+ ethh->h_proto = htons(ETH_P_IPV6);
+ }
+ ip6h->version = 6;
+ ip6h->flow_lbl[0] = 0;
+ ip6h->flow_lbl[1] = 0;
+ ip6h->flow_lbl[2] = 0;
+ ip6h->payload_len = htons(packetsize - sizeof(*ip6h));
+ ip6h->nexthdr = 6;
+ ip6h->hop_limit = 128;
+ i40iw_copy_ip_htonl(ip6h->saddr.in6_u.u6_addr32,
+ cm_node->loc_addr);
+ i40iw_copy_ip_htonl(ip6h->daddr.in6_u.u6_addr32,
+ cm_node->rem_addr);
+ }
+
+ tcph->source = htons(cm_node->loc_port);
+ tcph->dest = htons(cm_node->rem_port);
+
+ tcph->seq = htonl(cm_node->tcp_cntxt.loc_seq_num);
+
+ if (flags & SET_ACK) {
+ cm_node->tcp_cntxt.loc_ack_num = cm_node->tcp_cntxt.rcv_nxt;
+ tcph->ack_seq = htonl(cm_node->tcp_cntxt.loc_ack_num);
+ tcph->ack = 1;
+ } else {
+ tcph->ack_seq = 0;
+ }
+
+ if (flags & SET_SYN) {
+ cm_node->tcp_cntxt.loc_seq_num++;
+ tcph->syn = 1;
+ } else {
+ cm_node->tcp_cntxt.loc_seq_num += hdr_len + pd_len;
+ }
+
+ if (flags & SET_FIN) {
+ cm_node->tcp_cntxt.loc_seq_num++;
+ tcph->fin = 1;
+ }
+
+ if (flags & SET_RST)
+ tcph->rst = 1;
+
+ tcph->doff = (u16)((sizeof(*tcph) + opts_len + 3) >> 2);
+ sqbuf->tcphlen = tcph->doff << 2;
+ tcph->window = htons(cm_node->tcp_cntxt.rcv_wnd);
+ tcph->urg_ptr = 0;
+
+ if (opts_len) {
+ memcpy(buf, options->addr, opts_len);
+ buf += opts_len;
+ }
+
+ if (hdr_len) {
+ memcpy(buf, hdr->addr, hdr_len);
+ buf += hdr_len;
+ }
+
+ if (pd_len)
+ memcpy(buf, pdata->addr, pd_len);
+
+ atomic_set(&sqbuf->refcount, 1);
+
+ return sqbuf;
+}
+
+/**
+ * i40iw_send_reset - Send RST packet
+ * @cm_node: connection's node
+ */
+static int i40iw_send_reset(struct i40iw_cm_node *cm_node)
+{
+ struct i40iw_puda_buf *sqbuf;
+ int flags = SET_RST | SET_ACK;
+
+ sqbuf = i40iw_form_cm_frame(cm_node, NULL, NULL, NULL, flags);
+ if (!sqbuf) {
+ i40iw_pr_err("no sqbuf\n");
+ return -1;
+ }
+
+ return i40iw_schedule_cm_timer(cm_node, sqbuf, I40IW_TIMER_TYPE_SEND, 0, 1);
+}
+
+/**
+ * i40iw_active_open_err - send event for active side cm error
+ * @cm_node: connection's node
+ * @reset: Flag to send reset or not
+ */
+static void i40iw_active_open_err(struct i40iw_cm_node *cm_node, bool reset)
+{
+ i40iw_cleanup_retrans_entry(cm_node);
+ cm_node->cm_core->stats_connect_errs++;
+ if (reset) {
+ i40iw_debug(cm_node->dev,
+ I40IW_DEBUG_CM,
+ "%s cm_node=%p state=%d\n",
+ __func__,
+ cm_node,
+ cm_node->state);
+ atomic_inc(&cm_node->ref_count);
+ i40iw_send_reset(cm_node);
+ }
+
+ cm_node->state = I40IW_CM_STATE_CLOSED;
+ i40iw_create_event(cm_node, I40IW_CM_EVENT_ABORTED);
+}
+
+/**
+ * i40iw_passive_open_err - handle passive side cm error
+ * @cm_node: connection's node
+ * @reset: send reset or just free cm_node
+ */
+static void i40iw_passive_open_err(struct i40iw_cm_node *cm_node, bool reset)
+{
+ i40iw_cleanup_retrans_entry(cm_node);
+ cm_node->cm_core->stats_passive_errs++;
+ cm_node->state = I40IW_CM_STATE_CLOSED;
+ i40iw_debug(cm_node->dev,
+ I40IW_DEBUG_CM,
+ "%s cm_node=%p state =%d\n",
+ __func__,
+ cm_node,
+ cm_node->state);
+ if (reset)
+ i40iw_send_reset(cm_node);
+ else
+ i40iw_rem_ref_cm_node(cm_node);
+}
+
+/**
+ * i40iw_event_connect_error - to create connect error event
+ * @event: cm information for connect event
+ */
+static void i40iw_event_connect_error(struct i40iw_cm_event *event)
+{
+ struct i40iw_qp *iwqp;
+ struct iw_cm_id *cm_id;
+
+ cm_id = event->cm_node->cm_id;
+ if (!cm_id)
+ return;
+
+ iwqp = cm_id->provider_data;
+
+ if (!iwqp || !iwqp->iwdev)
+ return;
+
+ iwqp->cm_id = NULL;
+ cm_id->provider_data = NULL;
+ i40iw_send_cm_event(event->cm_node, cm_id,
+ IW_CM_EVENT_CONNECT_REPLY,
+ -ECONNRESET);
+ cm_id->rem_ref(cm_id);
+ i40iw_rem_ref_cm_node(event->cm_node);
+}
+
+/**
+ * i40iw_process_options
+ * @cm_node: connection's node
+ * @optionsloc: point to start of options
+ * @optionsize: size of all options
+ * @syn_packet: flag if syn packet
+ */
+static int i40iw_process_options(struct i40iw_cm_node *cm_node,
+ u8 *optionsloc,
+ u32 optionsize,
+ u32 syn_packet)
+{
+ u32 tmp;
+ u32 offset = 0;
+ union all_known_options *all_options;
+ char got_mss_option = 0;
+
+ while (offset < optionsize) {
+ all_options = (union all_known_options *)(optionsloc + offset);
+ switch (all_options->as_base.optionnum) {
+ case OPTION_NUMBER_END:
+ offset = optionsize;
+ break;
+ case OPTION_NUMBER_NONE:
+ offset += 1;
+ continue;
+ case OPTION_NUMBER_MSS:
+ i40iw_debug(cm_node->dev,
+ I40IW_DEBUG_CM,
+ "%s: MSS Length: %d Offset: %d Size: %d\n",
+ __func__,
+ all_options->as_mss.length,
+ offset,
+ optionsize);
+ got_mss_option = 1;
+ if (all_options->as_mss.length != 4)
+ return -1;
+ tmp = ntohs(all_options->as_mss.mss);
+ if (tmp > 0 && tmp < cm_node->tcp_cntxt.mss)
+ cm_node->tcp_cntxt.mss = tmp;
+ break;
+ case OPTION_NUMBER_WINDOW_SCALE:
+ cm_node->tcp_cntxt.snd_wscale =
+ all_options->as_windowscale.shiftcount;
+ break;
+ default:
+ i40iw_debug(cm_node->dev,
+ I40IW_DEBUG_CM,
+ "TCP Option not understood: %x\n",
+ all_options->as_base.optionnum);
+ break;
+ }
+ offset += all_options->as_base.length;
+ }
+ if (!got_mss_option && syn_packet)
+ cm_node->tcp_cntxt.mss = I40IW_CM_DEFAULT_MSS;
+ return 0;
+}
+
+/**
+ * i40iw_handle_tcp_options -
+ * @cm_node: connection's node
+ * @tcph: pointer tcp header
+ * @optionsize: size of options rcvd
+ * @passive: active or passive flag
+ */
+static int i40iw_handle_tcp_options(struct i40iw_cm_node *cm_node,
+ struct tcphdr *tcph,
+ int optionsize,
+ int passive)
+{
+ u8 *optionsloc = (u8 *)&tcph[1];
+
+ if (optionsize) {
+ if (i40iw_process_options(cm_node,
+ optionsloc,
+ optionsize,
+ (u32)tcph->syn)) {
+ i40iw_debug(cm_node->dev,
+ I40IW_DEBUG_CM,
+ "%s: Node %p, Sending RESET\n",
+ __func__,
+ cm_node);
+ if (passive)
+ i40iw_passive_open_err(cm_node, true);
+ else
+ i40iw_active_open_err(cm_node, true);
+ return -1;
+ }
+ }
+
+ cm_node->tcp_cntxt.snd_wnd = ntohs(tcph->window) <<
+ cm_node->tcp_cntxt.snd_wscale;
+
+ if (cm_node->tcp_cntxt.snd_wnd > cm_node->tcp_cntxt.max_snd_wnd)
+ cm_node->tcp_cntxt.max_snd_wnd = cm_node->tcp_cntxt.snd_wnd;
+ return 0;
+}
+
+/**
+ * i40iw_build_mpa_v1 - build a MPA V1 frame
+ * @cm_node: connection's node
+ * @mpa_key: to do read0 or write0
+ */
+static void i40iw_build_mpa_v1(struct i40iw_cm_node *cm_node,
+ void *start_addr,
+ u8 mpa_key)
+{
+ struct ietf_mpa_v1 *mpa_frame = (struct ietf_mpa_v1 *)start_addr;
+
+ switch (mpa_key) {
+ case MPA_KEY_REQUEST:
+ memcpy(mpa_frame->key, IEFT_MPA_KEY_REQ, IETF_MPA_KEY_SIZE);
+ break;
+ case MPA_KEY_REPLY:
+ memcpy(mpa_frame->key, IEFT_MPA_KEY_REP, IETF_MPA_KEY_SIZE);
+ break;
+ default:
+ break;
+ }
+ mpa_frame->flags = IETF_MPA_FLAGS_CRC;
+ mpa_frame->rev = cm_node->mpa_frame_rev;
+ mpa_frame->priv_data_len = htons(cm_node->pdata.size);
+}
+
+/**
+ * i40iw_build_mpa_v2 - build a MPA V2 frame
+ * @cm_node: connection's node
+ * @start_addr: buffer start address
+ * @mpa_key: to do read0 or write0
+ */
+static void i40iw_build_mpa_v2(struct i40iw_cm_node *cm_node,
+ void *start_addr,
+ u8 mpa_key)
+{
+ struct ietf_mpa_v2 *mpa_frame = (struct ietf_mpa_v2 *)start_addr;
+ struct ietf_rtr_msg *rtr_msg = &mpa_frame->rtr_msg;
+
+ /* initialize the upper 5 bytes of the frame */
+ i40iw_build_mpa_v1(cm_node, start_addr, mpa_key);
+ mpa_frame->flags |= IETF_MPA_V2_FLAG;
+ mpa_frame->priv_data_len += htons(IETF_RTR_MSG_SIZE);
+
+ /* initialize RTR msg */
+ if (cm_node->mpav2_ird_ord == IETF_NO_IRD_ORD) {
+ rtr_msg->ctrl_ird = IETF_NO_IRD_ORD;
+ rtr_msg->ctrl_ord = IETF_NO_IRD_ORD;
+ } else {
+ rtr_msg->ctrl_ird = (cm_node->ird_size > IETF_NO_IRD_ORD) ?
+ IETF_NO_IRD_ORD : cm_node->ird_size;
+ rtr_msg->ctrl_ord = (cm_node->ord_size > IETF_NO_IRD_ORD) ?
+ IETF_NO_IRD_ORD : cm_node->ord_size;
+ }
+
+ rtr_msg->ctrl_ird |= IETF_PEER_TO_PEER;
+ rtr_msg->ctrl_ird |= IETF_FLPDU_ZERO_LEN;
+
+ switch (mpa_key) {
+ case MPA_KEY_REQUEST:
+ rtr_msg->ctrl_ord |= IETF_RDMA0_WRITE;
+ rtr_msg->ctrl_ord |= IETF_RDMA0_READ;
+ break;
+ case MPA_KEY_REPLY:
+ switch (cm_node->send_rdma0_op) {
+ case SEND_RDMA_WRITE_ZERO:
+ rtr_msg->ctrl_ord |= IETF_RDMA0_WRITE;
+ break;
+ case SEND_RDMA_READ_ZERO:
+ rtr_msg->ctrl_ord |= IETF_RDMA0_READ;
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+ rtr_msg->ctrl_ird = htons(rtr_msg->ctrl_ird);
+ rtr_msg->ctrl_ord = htons(rtr_msg->ctrl_ord);
+}
+
+/**
+ * i40iw_cm_build_mpa_frame - build mpa frame for mpa version 1 or version 2
+ * @cm_node: connection's node
+ * @mpa: mpa: data buffer
+ * @mpa_key: to do read0 or write0
+ */
+static int i40iw_cm_build_mpa_frame(struct i40iw_cm_node *cm_node,
+ struct i40iw_kmem_info *mpa,
+ u8 mpa_key)
+{
+ int hdr_len = 0;
+
+ switch (cm_node->mpa_frame_rev) {
+ case IETF_MPA_V1:
+ hdr_len = sizeof(struct ietf_mpa_v1);
+ i40iw_build_mpa_v1(cm_node, mpa->addr, mpa_key);
+ break;
+ case IETF_MPA_V2:
+ hdr_len = sizeof(struct ietf_mpa_v2);
+ i40iw_build_mpa_v2(cm_node, mpa->addr, mpa_key);
+ break;
+ default:
+ break;
+ }
+
+ return hdr_len;
+}
+
+/**
+ * i40iw_send_mpa_request - active node send mpa request to passive node
+ * @cm_node: connection's node
+ */
+static int i40iw_send_mpa_request(struct i40iw_cm_node *cm_node)
+{
+ struct i40iw_puda_buf *sqbuf;
+
+ if (!cm_node) {
+ i40iw_pr_err("cm_node == NULL\n");
+ return -1;
+ }
+
+ cm_node->mpa_hdr.addr = &cm_node->mpa_frame;
+ cm_node->mpa_hdr.size = i40iw_cm_build_mpa_frame(cm_node,
+ &cm_node->mpa_hdr,
+ MPA_KEY_REQUEST);
+ if (!cm_node->mpa_hdr.size) {
+ i40iw_pr_err("mpa size = %d\n", cm_node->mpa_hdr.size);
+ return -1;
+ }
+
+ sqbuf = i40iw_form_cm_frame(cm_node,
+ NULL,
+ &cm_node->mpa_hdr,
+ &cm_node->pdata,
+ SET_ACK);
+ if (!sqbuf) {
+ i40iw_pr_err("sq_buf == NULL\n");
+ return -1;
+ }
+ return i40iw_schedule_cm_timer(cm_node, sqbuf, I40IW_TIMER_TYPE_SEND, 1, 0);
+}
+
+/**
+ * i40iw_send_mpa_reject -
+ * @cm_node: connection's node
+ * @pdata: reject data for connection
+ * @plen: length of reject data
+ */
+static int i40iw_send_mpa_reject(struct i40iw_cm_node *cm_node,
+ const void *pdata,
+ u8 plen)
+{
+ struct i40iw_puda_buf *sqbuf;
+ struct i40iw_kmem_info priv_info;
+
+ cm_node->mpa_hdr.addr = &cm_node->mpa_frame;
+ cm_node->mpa_hdr.size = i40iw_cm_build_mpa_frame(cm_node,
+ &cm_node->mpa_hdr,
+ MPA_KEY_REPLY);
+
+ cm_node->mpa_frame.flags |= IETF_MPA_FLAGS_REJECT;
+ priv_info.addr = (void *)pdata;
+ priv_info.size = plen;
+
+ sqbuf = i40iw_form_cm_frame(cm_node,
+ NULL,
+ &cm_node->mpa_hdr,
+ &priv_info,
+ SET_ACK | SET_FIN);
+ if (!sqbuf) {
+ i40iw_pr_err("no sqbuf\n");
+ return -ENOMEM;
+ }
+ cm_node->state = I40IW_CM_STATE_FIN_WAIT1;
+ return i40iw_schedule_cm_timer(cm_node, sqbuf, I40IW_TIMER_TYPE_SEND, 1, 0);
+}
+
+/**
+ * recv_mpa - process an IETF MPA frame
+ * @cm_node: connection's node
+ * @buffer: Data pointer
+ * @type: to return accept or reject
+ * @len: Len of mpa buffer
+ */
+static int i40iw_parse_mpa(struct i40iw_cm_node *cm_node, u8 *buffer, u32 *type, u32 len)
+{
+ struct ietf_mpa_v1 *mpa_frame;
+ struct ietf_mpa_v2 *mpa_v2_frame;
+ struct ietf_rtr_msg *rtr_msg;
+ int mpa_hdr_len;
+ int priv_data_len;
+
+ *type = I40IW_MPA_REQUEST_ACCEPT;
+
+ if (len < sizeof(struct ietf_mpa_v1)) {
+ i40iw_pr_err("ietf buffer small (%x)\n", len);
+ return -1;
+ }
+
+ mpa_frame = (struct ietf_mpa_v1 *)buffer;
+ mpa_hdr_len = sizeof(struct ietf_mpa_v1);
+ priv_data_len = ntohs(mpa_frame->priv_data_len);
+
+ if (priv_data_len > IETF_MAX_PRIV_DATA_LEN) {
+ i40iw_pr_err("large pri_data %d\n", priv_data_len);
+ return -1;
+ }
+ if (mpa_frame->rev != IETF_MPA_V1 && mpa_frame->rev != IETF_MPA_V2) {
+ i40iw_pr_err("unsupported mpa rev = %d\n", mpa_frame->rev);
+ return -1;
+ }
+ if (mpa_frame->rev > cm_node->mpa_frame_rev) {
+ i40iw_pr_err("rev %d\n", mpa_frame->rev);
+ return -1;
+ }
+ cm_node->mpa_frame_rev = mpa_frame->rev;
+
+ if (cm_node->state != I40IW_CM_STATE_MPAREQ_SENT) {
+ if (memcmp(mpa_frame->key, IEFT_MPA_KEY_REQ, IETF_MPA_KEY_SIZE)) {
+ i40iw_pr_err("Unexpected MPA Key received\n");
+ return -1;
+ }
+ } else {
+ if (memcmp(mpa_frame->key, IEFT_MPA_KEY_REP, IETF_MPA_KEY_SIZE)) {
+ i40iw_pr_err("Unexpected MPA Key received\n");
+ return -1;
+ }
+ }
+
+ if (priv_data_len + mpa_hdr_len > len) {
+ i40iw_pr_err("ietf buffer len(%x + %x != %x)\n",
+ priv_data_len, mpa_hdr_len, len);
+ return -1;
+ }
+ if (len > MAX_CM_BUFFER) {
+ i40iw_pr_err("ietf buffer large len = %d\n", len);
+ return -1;
+ }
+
+ switch (mpa_frame->rev) {
+ case IETF_MPA_V2:{
+ u16 ird_size;
+ u16 ord_size;
+ u16 ctrl_ord;
+ u16 ctrl_ird;
+
+ mpa_v2_frame = (struct ietf_mpa_v2 *)buffer;
+ mpa_hdr_len += IETF_RTR_MSG_SIZE;
+ rtr_msg = &mpa_v2_frame->rtr_msg;
+
+ /* parse rtr message */
+ ctrl_ord = ntohs(rtr_msg->ctrl_ord);
+ ctrl_ird = ntohs(rtr_msg->ctrl_ird);
+ ird_size = ctrl_ird & IETF_NO_IRD_ORD;
+ ord_size = ctrl_ord & IETF_NO_IRD_ORD;
+
+ if (!(ctrl_ird & IETF_PEER_TO_PEER))
+ return -1;
+
+ if (ird_size == IETF_NO_IRD_ORD || ord_size == IETF_NO_IRD_ORD) {
+ cm_node->mpav2_ird_ord = IETF_NO_IRD_ORD;
+ goto negotiate_done;
+ }
+
+ if (cm_node->state != I40IW_CM_STATE_MPAREQ_SENT) {
+ /* responder */
+ if (!ord_size && (ctrl_ord & IETF_RDMA0_READ))
+ cm_node->ird_size = 1;
+ if (cm_node->ord_size > ird_size)
+ cm_node->ord_size = ird_size;
+ } else {
+ /* initiator */
+ if (!ird_size && (ctrl_ord & IETF_RDMA0_READ))
+ return -1;
+ if (cm_node->ord_size > ird_size)
+ cm_node->ord_size = ird_size;
+
+ if (cm_node->ird_size < ord_size)
+ /* no resources available */
+ return -1;
+ }
+
+negotiate_done:
+ if (ctrl_ord & IETF_RDMA0_READ)
+ cm_node->send_rdma0_op = SEND_RDMA_READ_ZERO;
+ else if (ctrl_ord & IETF_RDMA0_WRITE)
+ cm_node->send_rdma0_op = SEND_RDMA_WRITE_ZERO;
+ else /* Not supported RDMA0 operation */
+ return -1;
+ i40iw_debug(cm_node->dev, I40IW_DEBUG_CM,
+ "MPAV2: Negotiated ORD: %d, IRD: %d\n",
+ cm_node->ord_size, cm_node->ird_size);
+ break;
+ }
+ break;
+ case IETF_MPA_V1:
+ default:
+ break;
+ }
+
+ memcpy(cm_node->pdata_buf, buffer + mpa_hdr_len, priv_data_len);
+ cm_node->pdata.size = priv_data_len;
+
+ if (mpa_frame->flags & IETF_MPA_FLAGS_REJECT)
+ *type = I40IW_MPA_REQUEST_REJECT;
+
+ if (mpa_frame->flags & IETF_MPA_FLAGS_MARKERS)
+ cm_node->snd_mark_en = true;
+
+ return 0;
+}
+
+/**
+ * i40iw_schedule_cm_timer
+ * @@cm_node: connection's node
+ * @sqbuf: buffer to send
+ * @type: if it es send ot close
+ * @send_retrans: if rexmits to be done
+ * @close_when_complete: is cm_node to be removed
+ *
+ * note - cm_node needs to be protected before calling this. Encase in:
+ * i40iw_rem_ref_cm_node(cm_core, cm_node);
+ * i40iw_schedule_cm_timer(...)
+ * atomic_inc(&cm_node->ref_count);
+ */
+int i40iw_schedule_cm_timer(struct i40iw_cm_node *cm_node,
+ struct i40iw_puda_buf *sqbuf,
+ enum i40iw_timer_type type,
+ int send_retrans,
+ int close_when_complete)
+{
+ struct i40iw_sc_dev *dev = cm_node->dev;
+ struct i40iw_cm_core *cm_core = cm_node->cm_core;
+ struct i40iw_timer_entry *new_send;
+ int ret = 0;
+ u32 was_timer_set;
+ unsigned long flags;
+
+ new_send = kzalloc(sizeof(*new_send), GFP_ATOMIC);
+ if (!new_send) {
+ i40iw_free_sqbuf(cm_node->dev, (void *)sqbuf);
+ return -ENOMEM;
+ }
+ new_send->retrycount = I40IW_DEFAULT_RETRYS;
+ new_send->retranscount = I40IW_DEFAULT_RETRANS;
+ new_send->sqbuf = sqbuf;
+ new_send->timetosend = jiffies;
+ new_send->type = type;
+ new_send->send_retrans = send_retrans;
+ new_send->close_when_complete = close_when_complete;
+
+ if (type == I40IW_TIMER_TYPE_CLOSE) {
+ new_send->timetosend += (HZ / 10);
+ if (cm_node->close_entry) {
+ kfree(new_send);
+ i40iw_free_sqbuf(cm_node->dev, (void *)sqbuf);
+ i40iw_pr_err("already close entry\n");
+ return -EINVAL;
+ }
+ cm_node->close_entry = new_send;
+ }
+
+ if (type == I40IW_TIMER_TYPE_SEND) {
+ spin_lock_irqsave(&cm_node->retrans_list_lock, flags);
+ cm_node->send_entry = new_send;
+ atomic_inc(&cm_node->ref_count);
+ spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags);
+ new_send->timetosend = jiffies + I40IW_RETRY_TIMEOUT;
+
+ atomic_inc(&sqbuf->refcount);
+ i40iw_puda_send_buf(dev->ilq, sqbuf);
+ if (!send_retrans) {
+ i40iw_cleanup_retrans_entry(cm_node);
+ if (close_when_complete)
+ i40iw_rem_ref_cm_node(cm_node);
+ return ret;
+ }
+ }
+
+ spin_lock_irqsave(&cm_core->ht_lock, flags);
+ was_timer_set = timer_pending(&cm_core->tcp_timer);
+
+ if (!was_timer_set) {
+ cm_core->tcp_timer.expires = new_send->timetosend;
+ add_timer(&cm_core->tcp_timer);
+ }
+ spin_unlock_irqrestore(&cm_core->ht_lock, flags);
+
+ return ret;
+}
+
+/**
+ * i40iw_retrans_expired - Could not rexmit the packet
+ * @cm_node: connection's node
+ */
+static void i40iw_retrans_expired(struct i40iw_cm_node *cm_node)
+{
+ struct iw_cm_id *cm_id = cm_node->cm_id;
+ enum i40iw_cm_node_state state = cm_node->state;
+
+ cm_node->state = I40IW_CM_STATE_CLOSED;
+ switch (state) {
+ case I40IW_CM_STATE_SYN_RCVD:
+ case I40IW_CM_STATE_CLOSING:
+ i40iw_rem_ref_cm_node(cm_node);
+ break;
+ case I40IW_CM_STATE_FIN_WAIT1:
+ case I40IW_CM_STATE_LAST_ACK:
+ if (cm_node->cm_id)
+ cm_id->rem_ref(cm_id);
+ i40iw_send_reset(cm_node);
+ break;
+ default:
+ atomic_inc(&cm_node->ref_count);
+ i40iw_send_reset(cm_node);
+ i40iw_create_event(cm_node, I40IW_CM_EVENT_ABORTED);
+ break;
+ }
+}
+
+/**
+ * i40iw_handle_close_entry - for handling retry/timeouts
+ * @cm_node: connection's node
+ * @rem_node: flag for remove cm_node
+ */
+static void i40iw_handle_close_entry(struct i40iw_cm_node *cm_node, u32 rem_node)
+{
+ struct i40iw_timer_entry *close_entry = cm_node->close_entry;
+ struct iw_cm_id *cm_id = cm_node->cm_id;
+ struct i40iw_qp *iwqp;
+ unsigned long flags;
+
+ if (!close_entry)
+ return;
+ iwqp = (struct i40iw_qp *)close_entry->sqbuf;
+ if (iwqp) {
+ spin_lock_irqsave(&iwqp->lock, flags);
+ if (iwqp->cm_id) {
+ iwqp->hw_tcp_state = I40IW_TCP_STATE_CLOSED;
+ iwqp->hw_iwarp_state = I40IW_QP_STATE_ERROR;
+ iwqp->last_aeq = I40IW_AE_RESET_SENT;
+ iwqp->ibqp_state = IB_QPS_ERR;
+ spin_unlock_irqrestore(&iwqp->lock, flags);
+ i40iw_cm_disconn(iwqp);
+ } else {
+ spin_unlock_irqrestore(&iwqp->lock, flags);
+ }
+ } else if (rem_node) {
+ /* TIME_WAIT state */
+ i40iw_rem_ref_cm_node(cm_node);
+ }
+ if (cm_id)
+ cm_id->rem_ref(cm_id);
+ kfree(close_entry);
+ cm_node->close_entry = NULL;
+}
+
+/**
+ * i40iw_cm_timer_tick - system's timer expired callback
+ * @pass: Pointing to cm_core
+ */
+static void i40iw_cm_timer_tick(unsigned long pass)
+{
+ unsigned long nexttimeout = jiffies + I40IW_LONG_TIME;
+ struct i40iw_cm_node *cm_node;
+ struct i40iw_timer_entry *send_entry, *close_entry;
+ struct list_head *list_core_temp;
+ struct list_head *list_node;
+ struct i40iw_cm_core *cm_core = (struct i40iw_cm_core *)pass;
+ u32 settimer = 0;
+ unsigned long timetosend;
+ struct i40iw_sc_dev *dev;
+ unsigned long flags;
+
+ struct list_head timer_list;
+
+ INIT_LIST_HEAD(&timer_list);
+ spin_lock_irqsave(&cm_core->ht_lock, flags);
+
+ list_for_each_safe(list_node, list_core_temp, &cm_core->connected_nodes) {
+ cm_node = container_of(list_node, struct i40iw_cm_node, list);
+ if (cm_node->close_entry || cm_node->send_entry) {
+ atomic_inc(&cm_node->ref_count);
+ list_add(&cm_node->timer_entry, &timer_list);
+ }
+ }
+ spin_unlock_irqrestore(&cm_core->ht_lock, flags);
+
+ list_for_each_safe(list_node, list_core_temp, &timer_list) {
+ cm_node = container_of(list_node,
+ struct i40iw_cm_node,
+ timer_entry);
+ close_entry = cm_node->close_entry;
+
+ if (close_entry) {
+ if (time_after(close_entry->timetosend, jiffies)) {
+ if (nexttimeout > close_entry->timetosend ||
+ !settimer) {
+ nexttimeout = close_entry->timetosend;
+ settimer = 1;
+ }
+ } else {
+ i40iw_handle_close_entry(cm_node, 1);
+ }
+ }
+
+ spin_lock_irqsave(&cm_node->retrans_list_lock, flags);
+
+ send_entry = cm_node->send_entry;
+ if (!send_entry)
+ goto done;
+ if (time_after(send_entry->timetosend, jiffies)) {
+ if (cm_node->state != I40IW_CM_STATE_OFFLOADED) {
+ if ((nexttimeout > send_entry->timetosend) ||
+ !settimer) {
+ nexttimeout = send_entry->timetosend;
+ settimer = 1;
+ }
+ } else {
+ i40iw_free_retrans_entry(cm_node);
+ }
+ goto done;
+ }
+
+ if ((cm_node->state == I40IW_CM_STATE_OFFLOADED) ||
+ (cm_node->state == I40IW_CM_STATE_CLOSED)) {
+ i40iw_free_retrans_entry(cm_node);
+ goto done;
+ }
+
+ if (!send_entry->retranscount || !send_entry->retrycount) {
+ i40iw_free_retrans_entry(cm_node);
+
+ spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags);
+ i40iw_retrans_expired(cm_node);
+ cm_node->state = I40IW_CM_STATE_CLOSED;
+ spin_lock_irqsave(&cm_node->retrans_list_lock, flags);
+ goto done;
+ }
+ cm_node->cm_core->stats_pkt_retrans++;
+ spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags);
+
+ dev = cm_node->dev;
+ atomic_inc(&send_entry->sqbuf->refcount);
+ i40iw_puda_send_buf(dev->ilq, send_entry->sqbuf);
+ spin_lock_irqsave(&cm_node->retrans_list_lock, flags);
+ if (send_entry->send_retrans) {
+ send_entry->retranscount--;
+ timetosend = (I40IW_RETRY_TIMEOUT <<
+ (I40IW_DEFAULT_RETRANS -
+ send_entry->retranscount));
+
+ send_entry->timetosend = jiffies +
+ min(timetosend, I40IW_MAX_TIMEOUT);
+ if (nexttimeout > send_entry->timetosend || !settimer) {
+ nexttimeout = send_entry->timetosend;
+ settimer = 1;
+ }
+ } else {
+ int close_when_complete;
+
+ close_when_complete = send_entry->close_when_complete;
+ i40iw_debug(cm_node->dev,
+ I40IW_DEBUG_CM,
+ "cm_node=%p state=%d\n",
+ cm_node,
+ cm_node->state);
+ i40iw_free_retrans_entry(cm_node);
+ if (close_when_complete)
+ i40iw_rem_ref_cm_node(cm_node);
+ }
+done:
+ spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags);
+ i40iw_rem_ref_cm_node(cm_node);
+ }
+
+ if (settimer) {
+ spin_lock_irqsave(&cm_core->ht_lock, flags);
+ if (!timer_pending(&cm_core->tcp_timer)) {
+ cm_core->tcp_timer.expires = nexttimeout;
+ add_timer(&cm_core->tcp_timer);
+ }
+ spin_unlock_irqrestore(&cm_core->ht_lock, flags);
+ }
+}
+
+/**
+ * i40iw_send_syn - send SYN packet
+ * @cm_node: connection's node
+ * @sendack: flag to set ACK bit or not
+ */
+int i40iw_send_syn(struct i40iw_cm_node *cm_node, u32 sendack)
+{
+ struct i40iw_puda_buf *sqbuf;
+ int flags = SET_SYN;
+ char optionsbuffer[sizeof(struct option_mss) +
+ sizeof(struct option_windowscale) +
+ sizeof(struct option_base) + TCP_OPTIONS_PADDING];
+ struct i40iw_kmem_info opts;
+
+ int optionssize = 0;
+ /* Sending MSS option */
+ union all_known_options *options;
+
+ opts.addr = optionsbuffer;
+ if (!cm_node) {
+ i40iw_pr_err("no cm_node\n");
+ return -EINVAL;
+ }
+
+ options = (union all_known_options *)&optionsbuffer[optionssize];
+ options->as_mss.optionnum = OPTION_NUMBER_MSS;
+ options->as_mss.length = sizeof(struct option_mss);
+ options->as_mss.mss = htons(cm_node->tcp_cntxt.mss);
+ optionssize += sizeof(struct option_mss);
+
+ options = (union all_known_options *)&optionsbuffer[optionssize];
+ options->as_windowscale.optionnum = OPTION_NUMBER_WINDOW_SCALE;
+ options->as_windowscale.length = sizeof(struct option_windowscale);
+ options->as_windowscale.shiftcount = cm_node->tcp_cntxt.rcv_wscale;
+ optionssize += sizeof(struct option_windowscale);
+ options = (union all_known_options *)&optionsbuffer[optionssize];
+ options->as_end = OPTION_NUMBER_END;
+ optionssize += 1;
+
+ if (sendack)
+ flags |= SET_ACK;
+
+ opts.size = optionssize;
+
+ sqbuf = i40iw_form_cm_frame(cm_node, &opts, NULL, NULL, flags);
+ if (!sqbuf) {
+ i40iw_pr_err("no sqbuf\n");
+ return -1;
+ }
+ return i40iw_schedule_cm_timer(cm_node, sqbuf, I40IW_TIMER_TYPE_SEND, 1, 0);
+}
+
+/**
+ * i40iw_send_ack - Send ACK packet
+ * @cm_node: connection's node
+ */
+static void i40iw_send_ack(struct i40iw_cm_node *cm_node)
+{
+ struct i40iw_puda_buf *sqbuf;
+
+ sqbuf = i40iw_form_cm_frame(cm_node, NULL, NULL, NULL, SET_ACK);
+ if (sqbuf)
+ i40iw_puda_send_buf(cm_node->dev->ilq, sqbuf);
+ else
+ i40iw_pr_err("no sqbuf\n");
+}
+
+/**
+ * i40iw_send_fin - Send FIN pkt
+ * @cm_node: connection's node
+ */
+static int i40iw_send_fin(struct i40iw_cm_node *cm_node)
+{
+ struct i40iw_puda_buf *sqbuf;
+
+ sqbuf = i40iw_form_cm_frame(cm_node, NULL, NULL, NULL, SET_ACK | SET_FIN);
+ if (!sqbuf) {
+ i40iw_pr_err("no sqbuf\n");
+ return -1;
+ }
+ return i40iw_schedule_cm_timer(cm_node, sqbuf, I40IW_TIMER_TYPE_SEND, 1, 0);
+}
+
+/**
+ * i40iw_find_node - find a cm node that matches the reference cm node
+ * @cm_core: cm's core
+ * @rem_port: remote tcp port num
+ * @rem_addr: remote ip addr
+ * @loc_port: local tcp port num
+ * @loc_addr: loc ip addr
+ * @add_refcnt: flag to increment refcount of cm_node
+ */
+struct i40iw_cm_node *i40iw_find_node(struct i40iw_cm_core *cm_core,
+ u16 rem_port,
+ u32 *rem_addr,
+ u16 loc_port,
+ u32 *loc_addr,
+ bool add_refcnt)
+{
+ struct list_head *hte;
+ struct i40iw_cm_node *cm_node;
+ unsigned long flags;
+
+ hte = &cm_core->connected_nodes;
+
+ /* walk list and find cm_node associated with this session ID */
+ spin_lock_irqsave(&cm_core->ht_lock, flags);
+ list_for_each_entry(cm_node, hte, list) {
+ if (!memcmp(cm_node->loc_addr, loc_addr, sizeof(cm_node->loc_addr)) &&
+ (cm_node->loc_port == loc_port) &&
+ !memcmp(cm_node->rem_addr, rem_addr, sizeof(cm_node->rem_addr)) &&
+ (cm_node->rem_port == rem_port)) {
+ if (add_refcnt)
+ atomic_inc(&cm_node->ref_count);
+ spin_unlock_irqrestore(&cm_core->ht_lock, flags);
+ return cm_node;
+ }
+ }
+ spin_unlock_irqrestore(&cm_core->ht_lock, flags);
+
+ /* no owner node */
+ return NULL;
+}
+
+/**
+ * i40iw_find_listener - find a cm node listening on this addr-port pair
+ * @cm_core: cm's core
+ * @dst_port: listener tcp port num
+ * @dst_addr: listener ip addr
+ * @listener_state: state to match with listen node's
+ */
+static struct i40iw_cm_listener *i40iw_find_listener(
+ struct i40iw_cm_core *cm_core,
+ u32 *dst_addr,
+ u16 dst_port,
+ u16 vlan_id,
+ enum i40iw_cm_listener_state
+ listener_state)
+{
+ struct i40iw_cm_listener *listen_node;
+ static const u32 ip_zero[4] = { 0, 0, 0, 0 };
+ u32 listen_addr[4];
+ u16 listen_port;
+ unsigned long flags;
+
+ /* walk list and find cm_node associated with this session ID */
+ spin_lock_irqsave(&cm_core->listen_list_lock, flags);
+ list_for_each_entry(listen_node, &cm_core->listen_nodes, list) {
+ memcpy(listen_addr, listen_node->loc_addr, sizeof(listen_addr));
+ listen_port = listen_node->loc_port;
+ /* compare node pair, return node handle if a match */
+ if ((!memcmp(listen_addr, dst_addr, sizeof(listen_addr)) ||
+ !memcmp(listen_addr, ip_zero, sizeof(listen_addr))) &&
+ (listen_port == dst_port) &&
+ (listener_state & listen_node->listener_state)) {
+ atomic_inc(&listen_node->ref_count);
+ spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
+ return listen_node;
+ }
+ }
+ spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
+ return NULL;
+}
+
+/**
+ * i40iw_add_hte_node - add a cm node to the hash table
+ * @cm_core: cm's core
+ * @cm_node: connection's node
+ */
+static void i40iw_add_hte_node(struct i40iw_cm_core *cm_core,
+ struct i40iw_cm_node *cm_node)
+{
+ struct list_head *hte;
+ unsigned long flags;
+
+ if (!cm_node || !cm_core) {
+ i40iw_pr_err("cm_node or cm_core == NULL\n");
+ return;
+ }
+ spin_lock_irqsave(&cm_core->ht_lock, flags);
+
+ /* get a handle on the hash table element (list head for this slot) */
+ hte = &cm_core->connected_nodes;
+ list_add_tail(&cm_node->list, hte);
+ spin_unlock_irqrestore(&cm_core->ht_lock, flags);
+}
+
+/**
+ * listen_port_in_use - determine if port is in use
+ * @port: Listen port number
+ */
+static bool i40iw_listen_port_in_use(struct i40iw_cm_core *cm_core, u16 port)
+{
+ struct i40iw_cm_listener *listen_node;
+ unsigned long flags;
+ bool ret = false;
+
+ spin_lock_irqsave(&cm_core->listen_list_lock, flags);
+ list_for_each_entry(listen_node, &cm_core->listen_nodes, list) {
+ if (listen_node->loc_port == port) {
+ ret = true;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
+ return ret;
+}
+
+/**
+ * i40iw_del_multiple_qhash - Remove qhash and child listens
+ * @iwdev: iWarp device
+ * @cm_info: CM info for parent listen node
+ * @cm_parent_listen_node: The parent listen node
+ */
+static enum i40iw_status_code i40iw_del_multiple_qhash(
+ struct i40iw_device *iwdev,
+ struct i40iw_cm_info *cm_info,
+ struct i40iw_cm_listener *cm_parent_listen_node)
+{
+ struct i40iw_cm_listener *child_listen_node;
+ enum i40iw_status_code ret = I40IW_ERR_CONFIG;
+ struct list_head *pos, *tpos;
+ unsigned long flags;
+
+ spin_lock_irqsave(&iwdev->cm_core.listen_list_lock, flags);
+ list_for_each_safe(pos, tpos, &cm_parent_listen_node->child_listen_list) {
+ child_listen_node = list_entry(pos, struct i40iw_cm_listener, child_listen_list);
+ if (child_listen_node->ipv4)
+ i40iw_debug(&iwdev->sc_dev,
+ I40IW_DEBUG_CM,
+ "removing child listen for IP=%pI4, port=%d, vlan=%d\n",
+ child_listen_node->loc_addr,
+ child_listen_node->loc_port,
+ child_listen_node->vlan_id);
+ else
+ i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_CM,
+ "removing child listen for IP=%pI6, port=%d, vlan=%d\n",
+ child_listen_node->loc_addr,
+ child_listen_node->loc_port,
+ child_listen_node->vlan_id);
+ list_del(pos);
+ memcpy(cm_info->loc_addr, child_listen_node->loc_addr,
+ sizeof(cm_info->loc_addr));
+ cm_info->vlan_id = child_listen_node->vlan_id;
+ ret = i40iw_manage_qhash(iwdev, cm_info,
+ I40IW_QHASH_TYPE_TCP_SYN,
+ I40IW_QHASH_MANAGE_TYPE_DELETE, NULL, false);
+ kfree(child_listen_node);
+ cm_parent_listen_node->cm_core->stats_listen_nodes_destroyed++;
+ i40iw_debug(&iwdev->sc_dev,
+ I40IW_DEBUG_CM,
+ "freed pointer = %p\n",
+ child_listen_node);
+ }
+ spin_unlock_irqrestore(&iwdev->cm_core.listen_list_lock, flags);
+
+ return ret;
+}
+
+/**
+ * i40iw_netdev_vlan_ipv6 - Gets the netdev and mac
+ * @addr: local IPv6 address
+ * @vlan_id: vlan id for the given IPv6 address
+ * @mac: mac address for the given IPv6 address
+ *
+ * Returns the net_device of the IPv6 address and also sets the
+ * vlan id and mac for that address.
+ */
+static struct net_device *i40iw_netdev_vlan_ipv6(u32 *addr, u16 *vlan_id, u8 *mac)
+{
+ struct net_device *ip_dev = NULL;
+#if IS_ENABLED(CONFIG_IPV6)
+ struct in6_addr laddr6;
+
+ i40iw_copy_ip_htonl(laddr6.in6_u.u6_addr32, addr);
+ if (vlan_id)
+ *vlan_id = I40IW_NO_VLAN;
+ if (mac)
+ eth_zero_addr(mac);
+ rcu_read_lock();
+ for_each_netdev_rcu(&init_net, ip_dev) {
+ if (ipv6_chk_addr(&init_net, &laddr6, ip_dev, 1)) {
+ if (vlan_id)
+ *vlan_id = rdma_vlan_dev_vlan_id(ip_dev);
+ if (ip_dev->dev_addr && mac)
+ ether_addr_copy(mac, ip_dev->dev_addr);
+ break;
+ }
+ }
+ rcu_read_unlock();
+#endif
+ return ip_dev;
+}
+
+/**
+ * i40iw_get_vlan_ipv4 - Returns the vlan_id for IPv4 address
+ * @addr: local IPv4 address
+ */
+static u16 i40iw_get_vlan_ipv4(u32 *addr)
+{
+ struct net_device *netdev;
+ u16 vlan_id = I40IW_NO_VLAN;
+
+ netdev = ip_dev_find(&init_net, htonl(addr[0]));
+ if (netdev) {
+ vlan_id = rdma_vlan_dev_vlan_id(netdev);
+ dev_put(netdev);
+ }
+ return vlan_id;
+}
+
+/**
+ * i40iw_add_mqh_6 - Adds multiple qhashes for IPv6
+ * @iwdev: iWarp device
+ * @cm_info: CM info for parent listen node
+ * @cm_parent_listen_node: The parent listen node
+ *
+ * Adds a qhash and a child listen node for every IPv6 address
+ * on the adapter and adds the associated qhash filter
+ */
+static enum i40iw_status_code i40iw_add_mqh_6(struct i40iw_device *iwdev,
+ struct i40iw_cm_info *cm_info,
+ struct i40iw_cm_listener *cm_parent_listen_node)
+{
+ struct net_device *ip_dev;
+ struct inet6_dev *idev;
+ struct inet6_ifaddr *ifp;
+ enum i40iw_status_code ret = 0;
+ struct i40iw_cm_listener *child_listen_node;
+ unsigned long flags;
+
+ rtnl_lock();
+ for_each_netdev_rcu(&init_net, ip_dev) {
+ if ((((rdma_vlan_dev_vlan_id(ip_dev) < I40IW_NO_VLAN) &&
+ (rdma_vlan_dev_real_dev(ip_dev) == iwdev->netdev)) ||
+ (ip_dev == iwdev->netdev)) && (ip_dev->flags & IFF_UP)) {
+ idev = __in6_dev_get(ip_dev);
+ if (!idev) {
+ i40iw_pr_err("idev == NULL\n");
+ break;
+ }
+ list_for_each_entry(ifp, &idev->addr_list, if_list) {
+ i40iw_debug(&iwdev->sc_dev,
+ I40IW_DEBUG_CM,
+ "IP=%pI6, vlan_id=%d, MAC=%pM\n",
+ &ifp->addr,
+ rdma_vlan_dev_vlan_id(ip_dev),
+ ip_dev->dev_addr);
+ child_listen_node =
+ kzalloc(sizeof(*child_listen_node), GFP_ATOMIC);
+ i40iw_debug(&iwdev->sc_dev,
+ I40IW_DEBUG_CM,
+ "Allocating child listener %p\n",
+ child_listen_node);
+ if (!child_listen_node) {
+ i40iw_pr_err("listener memory allocation\n");
+ ret = I40IW_ERR_NO_MEMORY;
+ goto exit;
+ }
+ cm_info->vlan_id = rdma_vlan_dev_vlan_id(ip_dev);
+ cm_parent_listen_node->vlan_id = cm_info->vlan_id;
+
+ memcpy(child_listen_node, cm_parent_listen_node,
+ sizeof(*child_listen_node));
+
+ i40iw_copy_ip_ntohl(child_listen_node->loc_addr,
+ ifp->addr.in6_u.u6_addr32);
+ memcpy(cm_info->loc_addr, child_listen_node->loc_addr,
+ sizeof(cm_info->loc_addr));
+
+ ret = i40iw_manage_qhash(iwdev, cm_info,
+ I40IW_QHASH_TYPE_TCP_SYN,
+ I40IW_QHASH_MANAGE_TYPE_ADD,
+ NULL, true);
+ if (!ret) {
+ spin_lock_irqsave(&iwdev->cm_core.listen_list_lock, flags);
+ list_add(&child_listen_node->child_listen_list,
+ &cm_parent_listen_node->child_listen_list);
+ spin_unlock_irqrestore(&iwdev->cm_core.listen_list_lock, flags);
+ cm_parent_listen_node->cm_core->stats_listen_nodes_created++;
+ } else {
+ kfree(child_listen_node);
+ }
+ }
+ }
+ }
+exit:
+ rtnl_unlock();
+ return ret;
+}
+
+/**
+ * i40iw_add_mqh_4 - Adds multiple qhashes for IPv4
+ * @iwdev: iWarp device
+ * @cm_info: CM info for parent listen node
+ * @cm_parent_listen_node: The parent listen node
+ *
+ * Adds a qhash and a child listen node for every IPv4 address
+ * on the adapter and adds the associated qhash filter
+ */
+static enum i40iw_status_code i40iw_add_mqh_4(
+ struct i40iw_device *iwdev,
+ struct i40iw_cm_info *cm_info,
+ struct i40iw_cm_listener *cm_parent_listen_node)
+{
+ struct net_device *dev;
+ struct in_device *idev;
+ struct i40iw_cm_listener *child_listen_node;
+ enum i40iw_status_code ret = 0;
+ unsigned long flags;
+
+ rtnl_lock();
+ for_each_netdev(&init_net, dev) {
+ if ((((rdma_vlan_dev_vlan_id(dev) < I40IW_NO_VLAN) &&
+ (rdma_vlan_dev_real_dev(dev) == iwdev->netdev)) ||
+ (dev == iwdev->netdev)) && (dev->flags & IFF_UP)) {
+ idev = in_dev_get(dev);
+ for_ifa(idev) {
+ i40iw_debug(&iwdev->sc_dev,
+ I40IW_DEBUG_CM,
+ "Allocating child CM Listener forIP=%pI4, vlan_id=%d, MAC=%pM\n",
+ &ifa->ifa_address,
+ rdma_vlan_dev_vlan_id(dev),
+ dev->dev_addr);
+ child_listen_node = kzalloc(sizeof(*child_listen_node), GFP_ATOMIC);
+ cm_parent_listen_node->cm_core->stats_listen_nodes_created++;
+ i40iw_debug(&iwdev->sc_dev,
+ I40IW_DEBUG_CM,
+ "Allocating child listener %p\n",
+ child_listen_node);
+ if (!child_listen_node) {
+ i40iw_pr_err("listener memory allocation\n");
+ in_dev_put(idev);
+ ret = I40IW_ERR_NO_MEMORY;
+ goto exit;
+ }
+ cm_info->vlan_id = rdma_vlan_dev_vlan_id(dev);
+ cm_parent_listen_node->vlan_id = cm_info->vlan_id;
+ memcpy(child_listen_node,
+ cm_parent_listen_node,
+ sizeof(*child_listen_node));
+
+ child_listen_node->loc_addr[0] = ntohl(ifa->ifa_address);
+ memcpy(cm_info->loc_addr, child_listen_node->loc_addr,
+ sizeof(cm_info->loc_addr));
+
+ ret = i40iw_manage_qhash(iwdev,
+ cm_info,
+ I40IW_QHASH_TYPE_TCP_SYN,
+ I40IW_QHASH_MANAGE_TYPE_ADD,
+ NULL,
+ true);
+ if (!ret) {
+ spin_lock_irqsave(&iwdev->cm_core.listen_list_lock, flags);
+ list_add(&child_listen_node->child_listen_list,
+ &cm_parent_listen_node->child_listen_list);
+ spin_unlock_irqrestore(&iwdev->cm_core.listen_list_lock, flags);
+ } else {
+ kfree(child_listen_node);
+ cm_parent_listen_node->cm_core->stats_listen_nodes_created--;
+ }
+ }
+ endfor_ifa(idev);
+ in_dev_put(idev);
+ }
+ }
+exit:
+ rtnl_unlock();
+ return ret;
+}
+
+/**
+ * i40iw_dec_refcnt_listen - delete listener and associated cm nodes
+ * @cm_core: cm's core
+ * @free_hanging_nodes: to free associated cm_nodes
+ * @apbvt_del: flag to delete the apbvt
+ */
+static int i40iw_dec_refcnt_listen(struct i40iw_cm_core *cm_core,
+ struct i40iw_cm_listener *listener,
+ int free_hanging_nodes, bool apbvt_del)
+{
+ int ret = -EINVAL;
+ int err = 0;
+ struct list_head *list_pos;
+ struct list_head *list_temp;
+ struct i40iw_cm_node *cm_node;
+ struct list_head reset_list;
+ struct i40iw_cm_info nfo;
+ struct i40iw_cm_node *loopback;
+ enum i40iw_cm_node_state old_state;
+ unsigned long flags;
+
+ /* free non-accelerated child nodes for this listener */
+ INIT_LIST_HEAD(&reset_list);
+ if (free_hanging_nodes) {
+ spin_lock_irqsave(&cm_core->ht_lock, flags);
+ list_for_each_safe(list_pos, list_temp, &cm_core->connected_nodes) {
+ cm_node = container_of(list_pos, struct i40iw_cm_node, list);
+ if ((cm_node->listener == listener) && !cm_node->accelerated) {
+ atomic_inc(&cm_node->ref_count);
+ list_add(&cm_node->reset_entry, &reset_list);
+ }
+ }
+ spin_unlock_irqrestore(&cm_core->ht_lock, flags);
+ }
+
+ list_for_each_safe(list_pos, list_temp, &reset_list) {
+ cm_node = container_of(list_pos, struct i40iw_cm_node, reset_entry);
+ loopback = cm_node->loopbackpartner;
+ if (cm_node->state >= I40IW_CM_STATE_FIN_WAIT1) {
+ i40iw_rem_ref_cm_node(cm_node);
+ } else {
+ if (!loopback) {
+ i40iw_cleanup_retrans_entry(cm_node);
+ err = i40iw_send_reset(cm_node);
+ if (err) {
+ cm_node->state = I40IW_CM_STATE_CLOSED;
+ i40iw_pr_err("send reset\n");
+ } else {
+ old_state = cm_node->state;
+ cm_node->state = I40IW_CM_STATE_LISTENER_DESTROYED;
+ if (old_state != I40IW_CM_STATE_MPAREQ_RCVD)
+ i40iw_rem_ref_cm_node(cm_node);
+ }
+ } else {
+ struct i40iw_cm_event event;
+
+ event.cm_node = loopback;
+ memcpy(event.cm_info.rem_addr,
+ loopback->rem_addr, sizeof(event.cm_info.rem_addr));
+ memcpy(event.cm_info.loc_addr,
+ loopback->loc_addr, sizeof(event.cm_info.loc_addr));
+ event.cm_info.rem_port = loopback->rem_port;
+ event.cm_info.loc_port = loopback->loc_port;
+ event.cm_info.cm_id = loopback->cm_id;
+ event.cm_info.ipv4 = loopback->ipv4;
+ atomic_inc(&loopback->ref_count);
+ loopback->state = I40IW_CM_STATE_CLOSED;
+ i40iw_event_connect_error(&event);
+ cm_node->state = I40IW_CM_STATE_LISTENER_DESTROYED;
+ i40iw_rem_ref_cm_node(cm_node);
+ }
+ }
+ }
+
+ if (!atomic_dec_return(&listener->ref_count)) {
+ spin_lock_irqsave(&cm_core->listen_list_lock, flags);
+ list_del(&listener->list);
+ spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
+
+ if (listener->iwdev) {
+ if (apbvt_del && !i40iw_listen_port_in_use(cm_core, listener->loc_port))
+ i40iw_manage_apbvt(listener->iwdev,
+ listener->loc_port,
+ I40IW_MANAGE_APBVT_DEL);
+
+ memcpy(nfo.loc_addr, listener->loc_addr, sizeof(nfo.loc_addr));
+ nfo.loc_port = listener->loc_port;
+ nfo.ipv4 = listener->ipv4;
+ nfo.vlan_id = listener->vlan_id;
+
+ if (!list_empty(&listener->child_listen_list)) {
+ i40iw_del_multiple_qhash(listener->iwdev, &nfo, listener);
+ } else {
+ if (listener->qhash_set)
+ i40iw_manage_qhash(listener->iwdev,
+ &nfo,
+ I40IW_QHASH_TYPE_TCP_SYN,
+ I40IW_QHASH_MANAGE_TYPE_DELETE,
+ NULL,
+ false);
+ }
+ }
+
+ cm_core->stats_listen_destroyed++;
+ kfree(listener);
+ cm_core->stats_listen_nodes_destroyed++;
+ listener = NULL;
+ ret = 0;
+ }
+
+ if (listener) {
+ if (atomic_read(&listener->pend_accepts_cnt) > 0)
+ i40iw_debug(cm_core->dev,
+ I40IW_DEBUG_CM,
+ "%s: listener (%p) pending accepts=%u\n",
+ __func__,
+ listener,
+ atomic_read(&listener->pend_accepts_cnt));
+ }
+
+ return ret;
+}
+
+/**
+ * i40iw_cm_del_listen - delete a linstener
+ * @cm_core: cm's core
+ * @listener: passive connection's listener
+ * @apbvt_del: flag to delete apbvt
+ */
+static int i40iw_cm_del_listen(struct i40iw_cm_core *cm_core,
+ struct i40iw_cm_listener *listener,
+ bool apbvt_del)
+{
+ listener->listener_state = I40IW_CM_LISTENER_PASSIVE_STATE;
+ listener->cm_id = NULL; /* going to be destroyed pretty soon */
+ return i40iw_dec_refcnt_listen(cm_core, listener, 1, apbvt_del);
+}
+
+/**
+ * i40iw_addr_resolve_neigh - resolve neighbor address
+ * @iwdev: iwarp device structure
+ * @src_ip: local ip address
+ * @dst_ip: remote ip address
+ * @arpindex: if there is an arp entry
+ */
+static int i40iw_addr_resolve_neigh(struct i40iw_device *iwdev,
+ u32 src_ip,
+ u32 dst_ip,
+ int arpindex)
+{
+ struct rtable *rt;
+ struct neighbour *neigh;
+ int rc = arpindex;
+ struct net_device *netdev = iwdev->netdev;
+ __be32 dst_ipaddr = htonl(dst_ip);
+ __be32 src_ipaddr = htonl(src_ip);
+
+ rt = ip_route_output(&init_net, dst_ipaddr, src_ipaddr, 0, 0);
+ if (IS_ERR(rt)) {
+ i40iw_pr_err("ip_route_output\n");
+ return rc;
+ }
+
+ if (netif_is_bond_slave(netdev))
+ netdev = netdev_master_upper_dev_get(netdev);
+
+ neigh = dst_neigh_lookup(&rt->dst, &dst_ipaddr);
+
+ rcu_read_lock();
+ if (neigh) {
+ if (neigh->nud_state & NUD_VALID) {
+ if (arpindex >= 0) {
+ if (ether_addr_equal(iwdev->arp_table[arpindex].mac_addr,
+ neigh->ha))
+ /* Mac address same as arp table */
+ goto resolve_neigh_exit;
+ i40iw_manage_arp_cache(iwdev,
+ iwdev->arp_table[arpindex].mac_addr,
+ &dst_ip,
+ true,
+ I40IW_ARP_DELETE);
+ }
+
+ i40iw_manage_arp_cache(iwdev, neigh->ha, &dst_ip, true, I40IW_ARP_ADD);
+ rc = i40iw_arp_table(iwdev, &dst_ip, true, NULL, I40IW_ARP_RESOLVE);
+ } else {
+ neigh_event_send(neigh, NULL);
+ }
+ }
+ resolve_neigh_exit:
+
+ rcu_read_unlock();
+ if (neigh)
+ neigh_release(neigh);
+
+ ip_rt_put(rt);
+ return rc;
+}
+
+/**
+ * i40iw_get_dst_ipv6
+ */
+static struct dst_entry *i40iw_get_dst_ipv6(struct sockaddr_in6 *src_addr,
+ struct sockaddr_in6 *dst_addr)
+{
+ struct dst_entry *dst;
+ struct flowi6 fl6;
+
+ memset(&fl6, 0, sizeof(fl6));
+ fl6.daddr = dst_addr->sin6_addr;
+ fl6.saddr = src_addr->sin6_addr;
+ if (ipv6_addr_type(&fl6.daddr) & IPV6_ADDR_LINKLOCAL)
+ fl6.flowi6_oif = dst_addr->sin6_scope_id;
+
+ dst = ip6_route_output(&init_net, NULL, &fl6);
+ return dst;
+}
+
+/**
+ * i40iw_addr_resolve_neigh_ipv6 - resolve neighbor ipv6 address
+ * @iwdev: iwarp device structure
+ * @dst_ip: remote ip address
+ * @arpindex: if there is an arp entry
+ */
+static int i40iw_addr_resolve_neigh_ipv6(struct i40iw_device *iwdev,
+ u32 *src,
+ u32 *dest,
+ int arpindex)
+{
+ struct neighbour *neigh;
+ int rc = arpindex;
+ struct net_device *netdev = iwdev->netdev;
+ struct dst_entry *dst;
+ struct sockaddr_in6 dst_addr;
+ struct sockaddr_in6 src_addr;
+
+ memset(&dst_addr, 0, sizeof(dst_addr));
+ dst_addr.sin6_family = AF_INET6;
+ i40iw_copy_ip_htonl(dst_addr.sin6_addr.in6_u.u6_addr32, dest);
+ memset(&src_addr, 0, sizeof(src_addr));
+ src_addr.sin6_family = AF_INET6;
+ i40iw_copy_ip_htonl(src_addr.sin6_addr.in6_u.u6_addr32, src);
+ dst = i40iw_get_dst_ipv6(&src_addr, &dst_addr);
+ if (!dst || dst->error) {
+ if (dst) {
+ dst_release(dst);
+ i40iw_pr_err("ip6_route_output returned dst->error = %d\n",
+ dst->error);
+ }
+ return rc;
+ }
+
+ if (netif_is_bond_slave(netdev))
+ netdev = netdev_master_upper_dev_get(netdev);
+
+ neigh = dst_neigh_lookup(dst, &dst_addr);
+
+ rcu_read_lock();
+ if (neigh) {
+ i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_CM, "dst_neigh_lookup MAC=%pM\n", neigh->ha);
+ if (neigh->nud_state & NUD_VALID) {
+ if (arpindex >= 0) {
+ if (ether_addr_equal
+ (iwdev->arp_table[arpindex].mac_addr,
+ neigh->ha)) {
+ /* Mac address same as in arp table */
+ goto resolve_neigh_exit6;
+ }
+ i40iw_manage_arp_cache(iwdev,
+ iwdev->arp_table[arpindex].mac_addr,
+ dest,
+ false,
+ I40IW_ARP_DELETE);
+ }
+ i40iw_manage_arp_cache(iwdev,
+ neigh->ha,
+ dest,
+ false,
+ I40IW_ARP_ADD);
+ rc = i40iw_arp_table(iwdev,
+ dest,
+ false,
+ NULL,
+ I40IW_ARP_RESOLVE);
+ } else {
+ neigh_event_send(neigh, NULL);
+ }
+ }
+
+ resolve_neigh_exit6:
+ rcu_read_unlock();
+ if (neigh)
+ neigh_release(neigh);
+ dst_release(dst);
+ return rc;
+}
+
+/**
+ * i40iw_ipv4_is_loopback - check if loopback
+ * @loc_addr: local addr to compare
+ * @rem_addr: remote address
+ */
+static bool i40iw_ipv4_is_loopback(u32 loc_addr, u32 rem_addr)
+{
+ return ipv4_is_loopback(htonl(rem_addr)) || (loc_addr == rem_addr);
+}
+
+/**
+ * i40iw_ipv6_is_loopback - check if loopback
+ * @loc_addr: local addr to compare
+ * @rem_addr: remote address
+ */
+static bool i40iw_ipv6_is_loopback(u32 *loc_addr, u32 *rem_addr)
+{
+ struct in6_addr raddr6;
+
+ i40iw_copy_ip_htonl(raddr6.in6_u.u6_addr32, rem_addr);
+ return (!memcmp(loc_addr, rem_addr, 16) || ipv6_addr_loopback(&raddr6));
+}
+
+/**
+ * i40iw_make_cm_node - create a new instance of a cm node
+ * @cm_core: cm's core
+ * @iwdev: iwarp device structure
+ * @cm_info: quad info for connection
+ * @listener: passive connection's listener
+ */
+static struct i40iw_cm_node *i40iw_make_cm_node(
+ struct i40iw_cm_core *cm_core,
+ struct i40iw_device *iwdev,
+ struct i40iw_cm_info *cm_info,
+ struct i40iw_cm_listener *listener)
+{
+ struct i40iw_cm_node *cm_node;
+ struct timespec ts;
+ int oldarpindex;
+ int arpindex;
+ struct net_device *netdev = iwdev->netdev;
+
+ /* create an hte and cm_node for this instance */
+ cm_node = kzalloc(sizeof(*cm_node), GFP_ATOMIC);
+ if (!cm_node)
+ return NULL;
+
+ /* set our node specific transport info */
+ cm_node->ipv4 = cm_info->ipv4;
+ cm_node->vlan_id = cm_info->vlan_id;
+ memcpy(cm_node->loc_addr, cm_info->loc_addr, sizeof(cm_node->loc_addr));
+ memcpy(cm_node->rem_addr, cm_info->rem_addr, sizeof(cm_node->rem_addr));
+ cm_node->loc_port = cm_info->loc_port;
+ cm_node->rem_port = cm_info->rem_port;
+
+ cm_node->mpa_frame_rev = iwdev->mpa_version;
+ cm_node->send_rdma0_op = SEND_RDMA_READ_ZERO;
+ cm_node->ird_size = I40IW_MAX_IRD_SIZE;
+ cm_node->ord_size = I40IW_MAX_ORD_SIZE;
+
+ cm_node->listener = listener;
+ cm_node->cm_id = cm_info->cm_id;
+ ether_addr_copy(cm_node->loc_mac, netdev->dev_addr);
+ spin_lock_init(&cm_node->retrans_list_lock);
+
+ atomic_set(&cm_node->ref_count, 1);
+ /* associate our parent CM core */
+ cm_node->cm_core = cm_core;
+ cm_node->tcp_cntxt.loc_id = I40IW_CM_DEF_LOCAL_ID;
+ cm_node->tcp_cntxt.rcv_wscale = I40IW_CM_DEFAULT_RCV_WND_SCALE;
+ cm_node->tcp_cntxt.rcv_wnd =
+ I40IW_CM_DEFAULT_RCV_WND_SCALED >> I40IW_CM_DEFAULT_RCV_WND_SCALE;
+ ts = current_kernel_time();
+ cm_node->tcp_cntxt.loc_seq_num = htonl(ts.tv_nsec);
+ cm_node->tcp_cntxt.mss = iwdev->mss;
+
+ cm_node->iwdev = iwdev;
+ cm_node->dev = &iwdev->sc_dev;
+
+ if ((cm_node->ipv4 &&
+ i40iw_ipv4_is_loopback(cm_node->loc_addr[0], cm_node->rem_addr[0])) ||
+ (!cm_node->ipv4 && i40iw_ipv6_is_loopback(cm_node->loc_addr,
+ cm_node->rem_addr))) {
+ arpindex = i40iw_arp_table(iwdev,
+ cm_node->rem_addr,
+ false,
+ NULL,
+ I40IW_ARP_RESOLVE);
+ } else {
+ oldarpindex = i40iw_arp_table(iwdev,
+ cm_node->rem_addr,
+ false,
+ NULL,
+ I40IW_ARP_RESOLVE);
+ if (cm_node->ipv4)
+ arpindex = i40iw_addr_resolve_neigh(iwdev,
+ cm_info->loc_addr[0],
+ cm_info->rem_addr[0],
+ oldarpindex);
+ else if (IS_ENABLED(CONFIG_IPV6))
+ arpindex = i40iw_addr_resolve_neigh_ipv6(iwdev,
+ cm_info->loc_addr,
+ cm_info->rem_addr,
+ oldarpindex);
+ else
+ arpindex = -EINVAL;
+ }
+ if (arpindex < 0) {
+ i40iw_pr_err("cm_node arpindex\n");
+ kfree(cm_node);
+ return NULL;
+ }
+ ether_addr_copy(cm_node->rem_mac, iwdev->arp_table[arpindex].mac_addr);
+ i40iw_add_hte_node(cm_core, cm_node);
+ cm_core->stats_nodes_created++;
+ return cm_node;
+}
+
+/**
+ * i40iw_rem_ref_cm_node - destroy an instance of a cm node
+ * @cm_node: connection's node
+ */
+static void i40iw_rem_ref_cm_node(struct i40iw_cm_node *cm_node)
+{
+ struct i40iw_cm_core *cm_core = cm_node->cm_core;
+ struct i40iw_qp *iwqp;
+ struct i40iw_cm_info nfo;
+ unsigned long flags;
+
+ spin_lock_irqsave(&cm_node->cm_core->ht_lock, flags);
+ if (atomic_dec_return(&cm_node->ref_count)) {
+ spin_unlock_irqrestore(&cm_node->cm_core->ht_lock, flags);
+ return;
+ }
+ list_del(&cm_node->list);
+ spin_unlock_irqrestore(&cm_node->cm_core->ht_lock, flags);
+
+ /* if the node is destroyed before connection was accelerated */
+ if (!cm_node->accelerated && cm_node->accept_pend) {
+ pr_err("node destroyed before established\n");
+ atomic_dec(&cm_node->listener->pend_accepts_cnt);
+ }
+ if (cm_node->close_entry)
+ i40iw_handle_close_entry(cm_node, 0);
+ if (cm_node->listener) {
+ i40iw_dec_refcnt_listen(cm_core, cm_node->listener, 0, true);
+ } else {
+ if (!i40iw_listen_port_in_use(cm_core, htons(cm_node->loc_port)) &&
+ cm_node->apbvt_set && cm_node->iwdev) {
+ i40iw_manage_apbvt(cm_node->iwdev,
+ cm_node->loc_port,
+ I40IW_MANAGE_APBVT_DEL);
+ i40iw_get_addr_info(cm_node, &nfo);
+ if (cm_node->qhash_set) {
+ i40iw_manage_qhash(cm_node->iwdev,
+ &nfo,
+ I40IW_QHASH_TYPE_TCP_ESTABLISHED,
+ I40IW_QHASH_MANAGE_TYPE_DELETE,
+ NULL,
+ false);
+ cm_node->qhash_set = 0;
+ }
+ }
+ }
+
+ iwqp = cm_node->iwqp;
+ if (iwqp) {
+ iwqp->cm_node = NULL;
+ i40iw_rem_ref(&iwqp->ibqp);
+ cm_node->iwqp = NULL;
+ } else if (cm_node->qhash_set) {
+ i40iw_get_addr_info(cm_node, &nfo);
+ i40iw_manage_qhash(cm_node->iwdev,
+ &nfo,
+ I40IW_QHASH_TYPE_TCP_ESTABLISHED,
+ I40IW_QHASH_MANAGE_TYPE_DELETE,
+ NULL,
+ false);
+ cm_node->qhash_set = 0;
+ }
+
+ cm_node->cm_core->stats_nodes_destroyed++;
+ kfree(cm_node);
+}
+
+/**
+ * i40iw_handle_fin_pkt - FIN packet received
+ * @cm_node: connection's node
+ */
+static void i40iw_handle_fin_pkt(struct i40iw_cm_node *cm_node)
+{
+ u32 ret;
+
+ switch (cm_node->state) {
+ case I40IW_CM_STATE_SYN_RCVD:
+ case I40IW_CM_STATE_SYN_SENT:
+ case I40IW_CM_STATE_ESTABLISHED:
+ case I40IW_CM_STATE_MPAREJ_RCVD:
+ cm_node->tcp_cntxt.rcv_nxt++;
+ i40iw_cleanup_retrans_entry(cm_node);
+ cm_node->state = I40IW_CM_STATE_LAST_ACK;
+ i40iw_send_fin(cm_node);
+ break;
+ case I40IW_CM_STATE_MPAREQ_SENT:
+ i40iw_create_event(cm_node, I40IW_CM_EVENT_ABORTED);
+ cm_node->tcp_cntxt.rcv_nxt++;
+ i40iw_cleanup_retrans_entry(cm_node);
+ cm_node->state = I40IW_CM_STATE_CLOSED;
+ atomic_inc(&cm_node->ref_count);
+ i40iw_send_reset(cm_node);
+ break;
+ case I40IW_CM_STATE_FIN_WAIT1:
+ cm_node->tcp_cntxt.rcv_nxt++;
+ i40iw_cleanup_retrans_entry(cm_node);
+ cm_node->state = I40IW_CM_STATE_CLOSING;
+ i40iw_send_ack(cm_node);
+ /*
+ * Wait for ACK as this is simultaneous close.
+ * After we receive ACK, do not send anything.
+ * Just rm the node.
+ */
+ break;
+ case I40IW_CM_STATE_FIN_WAIT2:
+ cm_node->tcp_cntxt.rcv_nxt++;
+ i40iw_cleanup_retrans_entry(cm_node);
+ cm_node->state = I40IW_CM_STATE_TIME_WAIT;
+ i40iw_send_ack(cm_node);
+ ret =
+ i40iw_schedule_cm_timer(cm_node, NULL, I40IW_TIMER_TYPE_CLOSE, 1, 0);
+ if (ret)
+ i40iw_pr_err("node %p state = %d\n", cm_node, cm_node->state);
+ break;
+ case I40IW_CM_STATE_TIME_WAIT:
+ cm_node->tcp_cntxt.rcv_nxt++;
+ i40iw_cleanup_retrans_entry(cm_node);
+ cm_node->state = I40IW_CM_STATE_CLOSED;
+ i40iw_rem_ref_cm_node(cm_node);
+ break;
+ case I40IW_CM_STATE_OFFLOADED:
+ default:
+ i40iw_pr_err("bad state node %p state = %d\n", cm_node, cm_node->state);
+ break;
+ }
+}
+
+/**
+ * i40iw_handle_rst_pkt - process received RST packet
+ * @cm_node: connection's node
+ * @rbuf: receive buffer
+ */
+static void i40iw_handle_rst_pkt(struct i40iw_cm_node *cm_node,
+ struct i40iw_puda_buf *rbuf)
+{
+ i40iw_cleanup_retrans_entry(cm_node);
+ switch (cm_node->state) {
+ case I40IW_CM_STATE_SYN_SENT:
+ case I40IW_CM_STATE_MPAREQ_SENT:
+ switch (cm_node->mpa_frame_rev) {
+ case IETF_MPA_V2:
+ cm_node->mpa_frame_rev = IETF_MPA_V1;
+ /* send a syn and goto syn sent state */
+ cm_node->state = I40IW_CM_STATE_SYN_SENT;
+ if (i40iw_send_syn(cm_node, 0))
+ i40iw_active_open_err(cm_node, false);
+ break;
+ case IETF_MPA_V1:
+ default:
+ i40iw_active_open_err(cm_node, false);
+ break;
+ }
+ break;
+ case I40IW_CM_STATE_MPAREQ_RCVD:
+ atomic_add_return(1, &cm_node->passive_state);
+ break;
+ case I40IW_CM_STATE_ESTABLISHED:
+ case I40IW_CM_STATE_SYN_RCVD:
+ case I40IW_CM_STATE_LISTENING:
+ i40iw_pr_err("Bad state state = %d\n", cm_node->state);
+ i40iw_passive_open_err(cm_node, false);
+ break;
+ case I40IW_CM_STATE_OFFLOADED:
+ i40iw_active_open_err(cm_node, false);
+ break;
+ case I40IW_CM_STATE_CLOSED:
+ break;
+ case I40IW_CM_STATE_FIN_WAIT2:
+ case I40IW_CM_STATE_FIN_WAIT1:
+ case I40IW_CM_STATE_LAST_ACK:
+ cm_node->cm_id->rem_ref(cm_node->cm_id);
+ case I40IW_CM_STATE_TIME_WAIT:
+ cm_node->state = I40IW_CM_STATE_CLOSED;
+ i40iw_rem_ref_cm_node(cm_node);
+ break;
+ default:
+ break;
+ }
+}
+
+/**
+ * i40iw_handle_rcv_mpa - Process a recv'd mpa buffer
+ * @cm_node: connection's node
+ * @rbuf: receive buffer
+ */
+static void i40iw_handle_rcv_mpa(struct i40iw_cm_node *cm_node,
+ struct i40iw_puda_buf *rbuf)
+{
+ int ret;
+ int datasize = rbuf->datalen;
+ u8 *dataloc = rbuf->data;
+
+ enum i40iw_cm_event_type type = I40IW_CM_EVENT_UNKNOWN;
+ u32 res_type;
+
+ ret = i40iw_parse_mpa(cm_node, dataloc, &res_type, datasize);
+ if (ret) {
+ if (cm_node->state == I40IW_CM_STATE_MPAREQ_SENT)
+ i40iw_active_open_err(cm_node, true);
+ else
+ i40iw_passive_open_err(cm_node, true);
+ return;
+ }
+
+ switch (cm_node->state) {
+ case I40IW_CM_STATE_ESTABLISHED:
+ if (res_type == I40IW_MPA_REQUEST_REJECT)
+ i40iw_pr_err("state for reject\n");
+ cm_node->state = I40IW_CM_STATE_MPAREQ_RCVD;
+ type = I40IW_CM_EVENT_MPA_REQ;
+ i40iw_send_ack(cm_node); /* ACK received MPA request */
+ atomic_set(&cm_node->passive_state,
+ I40IW_PASSIVE_STATE_INDICATED);
+ break;
+ case I40IW_CM_STATE_MPAREQ_SENT:
+ i40iw_cleanup_retrans_entry(cm_node);
+ if (res_type == I40IW_MPA_REQUEST_REJECT) {
+ type = I40IW_CM_EVENT_MPA_REJECT;
+ cm_node->state = I40IW_CM_STATE_MPAREJ_RCVD;
+ } else {
+ type = I40IW_CM_EVENT_CONNECTED;
+ cm_node->state = I40IW_CM_STATE_OFFLOADED;
+ i40iw_send_ack(cm_node);
+ }
+ break;
+ default:
+ pr_err("%s wrong cm_node state =%d\n", __func__, cm_node->state);
+ break;
+ }
+ i40iw_create_event(cm_node, type);
+}
+
+/**
+ * i40iw_indicate_pkt_err - Send up err event to cm
+ * @cm_node: connection's node
+ */
+static void i40iw_indicate_pkt_err(struct i40iw_cm_node *cm_node)
+{
+ switch (cm_node->state) {
+ case I40IW_CM_STATE_SYN_SENT:
+ case I40IW_CM_STATE_MPAREQ_SENT:
+ i40iw_active_open_err(cm_node, true);
+ break;
+ case I40IW_CM_STATE_ESTABLISHED:
+ case I40IW_CM_STATE_SYN_RCVD:
+ i40iw_passive_open_err(cm_node, true);
+ break;
+ case I40IW_CM_STATE_OFFLOADED:
+ default:
+ break;
+ }
+}
+
+/**
+ * i40iw_check_syn - Check for error on received syn ack
+ * @cm_node: connection's node
+ * @tcph: pointer tcp header
+ */
+static int i40iw_check_syn(struct i40iw_cm_node *cm_node, struct tcphdr *tcph)
+{
+ int err = 0;
+
+ if (ntohl(tcph->ack_seq) != cm_node->tcp_cntxt.loc_seq_num) {
+ err = 1;
+ i40iw_active_open_err(cm_node, true);
+ }
+ return err;
+}
+
+/**
+ * i40iw_check_seq - check seq numbers if OK
+ * @cm_node: connection's node
+ * @tcph: pointer tcp header
+ */
+static int i40iw_check_seq(struct i40iw_cm_node *cm_node, struct tcphdr *tcph)
+{
+ int err = 0;
+ u32 seq;
+ u32 ack_seq;
+ u32 loc_seq_num = cm_node->tcp_cntxt.loc_seq_num;
+ u32 rcv_nxt = cm_node->tcp_cntxt.rcv_nxt;
+ u32 rcv_wnd;
+
+ seq = ntohl(tcph->seq);
+ ack_seq = ntohl(tcph->ack_seq);
+ rcv_wnd = cm_node->tcp_cntxt.rcv_wnd;
+ if (ack_seq != loc_seq_num)
+ err = -1;
+ else if (!between(seq, rcv_nxt, (rcv_nxt + rcv_wnd)))
+ err = -1;
+ if (err) {
+ i40iw_pr_err("seq number\n");
+ i40iw_indicate_pkt_err(cm_node);
+ }
+ return err;
+}
+
+/**
+ * i40iw_handle_syn_pkt - is for Passive node
+ * @cm_node: connection's node
+ * @rbuf: receive buffer
+ */
+static void i40iw_handle_syn_pkt(struct i40iw_cm_node *cm_node,
+ struct i40iw_puda_buf *rbuf)
+{
+ struct tcphdr *tcph = (struct tcphdr *)rbuf->tcph;
+ int ret;
+ u32 inc_sequence;
+ int optionsize;
+ struct i40iw_cm_info nfo;
+
+ optionsize = (tcph->doff << 2) - sizeof(struct tcphdr);
+ inc_sequence = ntohl(tcph->seq);
+
+ switch (cm_node->state) {
+ case I40IW_CM_STATE_SYN_SENT:
+ case I40IW_CM_STATE_MPAREQ_SENT:
+ /* Rcvd syn on active open connection */
+ i40iw_active_open_err(cm_node, 1);
+ break;
+ case I40IW_CM_STATE_LISTENING:
+ /* Passive OPEN */
+ if (atomic_read(&cm_node->listener->pend_accepts_cnt) >
+ cm_node->listener->backlog) {
+ cm_node->cm_core->stats_backlog_drops++;
+ i40iw_passive_open_err(cm_node, false);
+ break;
+ }
+ ret = i40iw_handle_tcp_options(cm_node, tcph, optionsize, 1);
+ if (ret) {
+ i40iw_passive_open_err(cm_node, false);
+ /* drop pkt */
+ break;
+ }
+ cm_node->tcp_cntxt.rcv_nxt = inc_sequence + 1;
+ cm_node->accept_pend = 1;
+ atomic_inc(&cm_node->listener->pend_accepts_cnt);
+
+ cm_node->state = I40IW_CM_STATE_SYN_RCVD;
+ i40iw_get_addr_info(cm_node, &nfo);
+ ret = i40iw_manage_qhash(cm_node->iwdev,
+ &nfo,
+ I40IW_QHASH_TYPE_TCP_ESTABLISHED,
+ I40IW_QHASH_MANAGE_TYPE_ADD,
+ (void *)cm_node,
+ false);
+ cm_node->qhash_set = true;
+ break;
+ case I40IW_CM_STATE_CLOSED:
+ i40iw_cleanup_retrans_entry(cm_node);
+ atomic_inc(&cm_node->ref_count);
+ i40iw_send_reset(cm_node);
+ break;
+ case I40IW_CM_STATE_OFFLOADED:
+ case I40IW_CM_STATE_ESTABLISHED:
+ case I40IW_CM_STATE_FIN_WAIT1:
+ case I40IW_CM_STATE_FIN_WAIT2:
+ case I40IW_CM_STATE_MPAREQ_RCVD:
+ case I40IW_CM_STATE_LAST_ACK:
+ case I40IW_CM_STATE_CLOSING:
+ case I40IW_CM_STATE_UNKNOWN:
+ default:
+ break;
+ }
+}
+
+/**
+ * i40iw_handle_synack_pkt - Process SYN+ACK packet (active side)
+ * @cm_node: connection's node
+ * @rbuf: receive buffer
+ */
+static void i40iw_handle_synack_pkt(struct i40iw_cm_node *cm_node,
+ struct i40iw_puda_buf *rbuf)
+{
+ struct tcphdr *tcph = (struct tcphdr *)rbuf->tcph;
+ int ret;
+ u32 inc_sequence;
+ int optionsize;
+
+ optionsize = (tcph->doff << 2) - sizeof(struct tcphdr);
+ inc_sequence = ntohl(tcph->seq);
+ switch (cm_node->state) {
+ case I40IW_CM_STATE_SYN_SENT:
+ i40iw_cleanup_retrans_entry(cm_node);
+ /* active open */
+ if (i40iw_check_syn(cm_node, tcph)) {
+ i40iw_pr_err("check syn fail\n");
+ return;
+ }
+ cm_node->tcp_cntxt.rem_ack_num = ntohl(tcph->ack_seq);
+ /* setup options */
+ ret = i40iw_handle_tcp_options(cm_node, tcph, optionsize, 0);
+ if (ret) {
+ i40iw_debug(cm_node->dev,
+ I40IW_DEBUG_CM,
+ "cm_node=%p tcp_options failed\n",
+ cm_node);
+ break;
+ }
+ i40iw_cleanup_retrans_entry(cm_node);
+ cm_node->tcp_cntxt.rcv_nxt = inc_sequence + 1;
+ i40iw_send_ack(cm_node); /* ACK for the syn_ack */
+ ret = i40iw_send_mpa_request(cm_node);
+ if (ret) {
+ i40iw_debug(cm_node->dev,
+ I40IW_DEBUG_CM,
+ "cm_node=%p i40iw_send_mpa_request failed\n",
+ cm_node);
+ break;
+ }
+ cm_node->state = I40IW_CM_STATE_MPAREQ_SENT;
+ break;
+ case I40IW_CM_STATE_MPAREQ_RCVD:
+ i40iw_passive_open_err(cm_node, true);
+ break;
+ case I40IW_CM_STATE_LISTENING:
+ cm_node->tcp_cntxt.loc_seq_num = ntohl(tcph->ack_seq);
+ i40iw_cleanup_retrans_entry(cm_node);
+ cm_node->state = I40IW_CM_STATE_CLOSED;
+ i40iw_send_reset(cm_node);
+ break;
+ case I40IW_CM_STATE_CLOSED:
+ cm_node->tcp_cntxt.loc_seq_num = ntohl(tcph->ack_seq);
+ i40iw_cleanup_retrans_entry(cm_node);
+ atomic_inc(&cm_node->ref_count);
+ i40iw_send_reset(cm_node);
+ break;
+ case I40IW_CM_STATE_ESTABLISHED:
+ case I40IW_CM_STATE_FIN_WAIT1:
+ case I40IW_CM_STATE_FIN_WAIT2:
+ case I40IW_CM_STATE_LAST_ACK:
+ case I40IW_CM_STATE_OFFLOADED:
+ case I40IW_CM_STATE_CLOSING:
+ case I40IW_CM_STATE_UNKNOWN:
+ case I40IW_CM_STATE_MPAREQ_SENT:
+ default:
+ break;
+ }
+}
+
+/**
+ * i40iw_handle_ack_pkt - process packet with ACK
+ * @cm_node: connection's node
+ * @rbuf: receive buffer
+ */
+static int i40iw_handle_ack_pkt(struct i40iw_cm_node *cm_node,
+ struct i40iw_puda_buf *rbuf)
+{
+ struct tcphdr *tcph = (struct tcphdr *)rbuf->tcph;
+ u32 inc_sequence;
+ int ret = 0;
+ int optionsize;
+ u32 datasize = rbuf->datalen;
+
+ optionsize = (tcph->doff << 2) - sizeof(struct tcphdr);
+
+ if (i40iw_check_seq(cm_node, tcph))
+ return -EINVAL;
+
+ inc_sequence = ntohl(tcph->seq);
+ switch (cm_node->state) {
+ case I40IW_CM_STATE_SYN_RCVD:
+ i40iw_cleanup_retrans_entry(cm_node);
+ ret = i40iw_handle_tcp_options(cm_node, tcph, optionsize, 1);
+ if (ret)
+ break;
+ cm_node->tcp_cntxt.rem_ack_num = ntohl(tcph->ack_seq);
+ cm_node->state = I40IW_CM_STATE_ESTABLISHED;
+ if (datasize) {
+ cm_node->tcp_cntxt.rcv_nxt = inc_sequence + datasize;
+ i40iw_handle_rcv_mpa(cm_node, rbuf);
+ }
+ break;
+ case I40IW_CM_STATE_ESTABLISHED:
+ i40iw_cleanup_retrans_entry(cm_node);
+ if (datasize) {
+ cm_node->tcp_cntxt.rcv_nxt = inc_sequence + datasize;
+ i40iw_handle_rcv_mpa(cm_node, rbuf);
+ }
+ break;
+ case I40IW_CM_STATE_MPAREQ_SENT:
+ cm_node->tcp_cntxt.rem_ack_num = ntohl(tcph->ack_seq);
+ if (datasize) {
+ cm_node->tcp_cntxt.rcv_nxt = inc_sequence + datasize;
+ i40iw_handle_rcv_mpa(cm_node, rbuf);
+ }
+ break;
+ case I40IW_CM_STATE_LISTENING:
+ i40iw_cleanup_retrans_entry(cm_node);
+ cm_node->state = I40IW_CM_STATE_CLOSED;
+ i40iw_send_reset(cm_node);
+ break;
+ case I40IW_CM_STATE_CLOSED:
+ i40iw_cleanup_retrans_entry(cm_node);
+ atomic_inc(&cm_node->ref_count);
+ i40iw_send_reset(cm_node);
+ break;
+ case I40IW_CM_STATE_LAST_ACK:
+ case I40IW_CM_STATE_CLOSING:
+ i40iw_cleanup_retrans_entry(cm_node);
+ cm_node->state = I40IW_CM_STATE_CLOSED;
+ if (!cm_node->accept_pend)
+ cm_node->cm_id->rem_ref(cm_node->cm_id);
+ i40iw_rem_ref_cm_node(cm_node);
+ break;
+ case I40IW_CM_STATE_FIN_WAIT1:
+ i40iw_cleanup_retrans_entry(cm_node);
+ cm_node->state = I40IW_CM_STATE_FIN_WAIT2;
+ break;
+ case I40IW_CM_STATE_SYN_SENT:
+ case I40IW_CM_STATE_FIN_WAIT2:
+ case I40IW_CM_STATE_OFFLOADED:
+ case I40IW_CM_STATE_MPAREQ_RCVD:
+ case I40IW_CM_STATE_UNKNOWN:
+ default:
+ i40iw_cleanup_retrans_entry(cm_node);
+ break;
+ }
+ return ret;
+}
+
+/**
+ * i40iw_process_packet - process cm packet
+ * @cm_node: connection's node
+ * @rbuf: receive buffer
+ */
+static void i40iw_process_packet(struct i40iw_cm_node *cm_node,
+ struct i40iw_puda_buf *rbuf)
+{
+ enum i40iw_tcpip_pkt_type pkt_type = I40IW_PKT_TYPE_UNKNOWN;
+ struct tcphdr *tcph = (struct tcphdr *)rbuf->tcph;
+ u32 fin_set = 0;
+ int ret;
+
+ if (tcph->rst) {
+ pkt_type = I40IW_PKT_TYPE_RST;
+ } else if (tcph->syn) {
+ pkt_type = I40IW_PKT_TYPE_SYN;
+ if (tcph->ack)
+ pkt_type = I40IW_PKT_TYPE_SYNACK;
+ } else if (tcph->ack) {
+ pkt_type = I40IW_PKT_TYPE_ACK;
+ }
+ if (tcph->fin)
+ fin_set = 1;
+
+ switch (pkt_type) {
+ case I40IW_PKT_TYPE_SYN:
+ i40iw_handle_syn_pkt(cm_node, rbuf);
+ break;
+ case I40IW_PKT_TYPE_SYNACK:
+ i40iw_handle_synack_pkt(cm_node, rbuf);
+ break;
+ case I40IW_PKT_TYPE_ACK:
+ ret = i40iw_handle_ack_pkt(cm_node, rbuf);
+ if (fin_set && !ret)
+ i40iw_handle_fin_pkt(cm_node);
+ break;
+ case I40IW_PKT_TYPE_RST:
+ i40iw_handle_rst_pkt(cm_node, rbuf);
+ break;
+ default:
+ if (fin_set &&
+ (!i40iw_check_seq(cm_node, (struct tcphdr *)rbuf->tcph)))
+ i40iw_handle_fin_pkt(cm_node);
+ break;
+ }
+}
+
+/**
+ * i40iw_make_listen_node - create a listen node with params
+ * @cm_core: cm's core
+ * @iwdev: iwarp device structure
+ * @cm_info: quad info for connection
+ */
+static struct i40iw_cm_listener *i40iw_make_listen_node(
+ struct i40iw_cm_core *cm_core,
+ struct i40iw_device *iwdev,
+ struct i40iw_cm_info *cm_info)
+{
+ struct i40iw_cm_listener *listener;
+ unsigned long flags;
+
+ /* cannot have multiple matching listeners */
+ listener = i40iw_find_listener(cm_core, cm_info->loc_addr,
+ cm_info->loc_port,
+ cm_info->vlan_id,
+ I40IW_CM_LISTENER_EITHER_STATE);
+ if (listener &&
+ (listener->listener_state == I40IW_CM_LISTENER_ACTIVE_STATE)) {
+ atomic_dec(&listener->ref_count);
+ i40iw_debug(cm_core->dev,
+ I40IW_DEBUG_CM,
+ "Not creating listener since it already exists\n");
+ return NULL;
+ }
+
+ if (!listener) {
+ /* create a CM listen node (1/2 node to compare incoming traffic to) */
+ listener = kzalloc(sizeof(*listener), GFP_ATOMIC);
+ if (!listener)
+ return NULL;
+ cm_core->stats_listen_nodes_created++;
+ memcpy(listener->loc_addr, cm_info->loc_addr, sizeof(listener->loc_addr));
+ listener->loc_port = cm_info->loc_port;
+
+ INIT_LIST_HEAD(&listener->child_listen_list);
+
+ atomic_set(&listener->ref_count, 1);
+ } else {
+ listener->reused_node = 1;
+ }
+
+ listener->cm_id = cm_info->cm_id;
+ listener->ipv4 = cm_info->ipv4;
+ listener->vlan_id = cm_info->vlan_id;
+ atomic_set(&listener->pend_accepts_cnt, 0);
+ listener->cm_core = cm_core;
+ listener->iwdev = iwdev;
+
+ listener->backlog = cm_info->backlog;
+ listener->listener_state = I40IW_CM_LISTENER_ACTIVE_STATE;
+
+ if (!listener->reused_node) {
+ spin_lock_irqsave(&cm_core->listen_list_lock, flags);
+ list_add(&listener->list, &cm_core->listen_nodes);
+ spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
+ }
+
+ return listener;
+}
+
+/**
+ * i40iw_create_cm_node - make a connection node with params
+ * @cm_core: cm's core
+ * @iwdev: iwarp device structure
+ * @private_data_len: len to provate data for mpa request
+ * @private_data: pointer to private data for connection
+ * @cm_info: quad info for connection
+ */
+static struct i40iw_cm_node *i40iw_create_cm_node(
+ struct i40iw_cm_core *cm_core,
+ struct i40iw_device *iwdev,
+ u16 private_data_len,
+ void *private_data,
+ struct i40iw_cm_info *cm_info)
+{
+ int ret;
+ struct i40iw_cm_node *cm_node;
+ struct i40iw_cm_listener *loopback_remotelistener;
+ struct i40iw_cm_node *loopback_remotenode;
+ struct i40iw_cm_info loopback_cm_info;
+
+ /* create a CM connection node */
+ cm_node = i40iw_make_cm_node(cm_core, iwdev, cm_info, NULL);
+ if (!cm_node)
+ return NULL;
+ /* set our node side to client (active) side */
+ cm_node->tcp_cntxt.client = 1;
+ cm_node->tcp_cntxt.rcv_wscale = I40IW_CM_DEFAULT_RCV_WND_SCALE;
+
+ if (!memcmp(cm_info->loc_addr, cm_info->rem_addr, sizeof(cm_info->loc_addr))) {
+ loopback_remotelistener = i40iw_find_listener(
+ cm_core,
+ cm_info->rem_addr,
+ cm_node->rem_port,
+ cm_node->vlan_id,
+ I40IW_CM_LISTENER_ACTIVE_STATE);
+ if (!loopback_remotelistener) {
+ i40iw_create_event(cm_node, I40IW_CM_EVENT_ABORTED);
+ } else {
+ loopback_cm_info = *cm_info;
+ loopback_cm_info.loc_port = cm_info->rem_port;
+ loopback_cm_info.rem_port = cm_info->loc_port;
+ loopback_cm_info.cm_id = loopback_remotelistener->cm_id;
+ loopback_cm_info.ipv4 = cm_info->ipv4;
+ loopback_remotenode = i40iw_make_cm_node(cm_core,
+ iwdev,
+ &loopback_cm_info,
+ loopback_remotelistener);
+ if (!loopback_remotenode) {
+ i40iw_rem_ref_cm_node(cm_node);
+ return NULL;
+ }
+ cm_core->stats_loopbacks++;
+ loopback_remotenode->loopbackpartner = cm_node;
+ loopback_remotenode->tcp_cntxt.rcv_wscale =
+ I40IW_CM_DEFAULT_RCV_WND_SCALE;
+ cm_node->loopbackpartner = loopback_remotenode;
+ memcpy(loopback_remotenode->pdata_buf, private_data,
+ private_data_len);
+ loopback_remotenode->pdata.size = private_data_len;
+
+ cm_node->state = I40IW_CM_STATE_OFFLOADED;
+ cm_node->tcp_cntxt.rcv_nxt =
+ loopback_remotenode->tcp_cntxt.loc_seq_num;
+ loopback_remotenode->tcp_cntxt.rcv_nxt =
+ cm_node->tcp_cntxt.loc_seq_num;
+ cm_node->tcp_cntxt.max_snd_wnd =
+ loopback_remotenode->tcp_cntxt.rcv_wnd;
+ loopback_remotenode->tcp_cntxt.max_snd_wnd = cm_node->tcp_cntxt.rcv_wnd;
+ cm_node->tcp_cntxt.snd_wnd = loopback_remotenode->tcp_cntxt.rcv_wnd;
+ loopback_remotenode->tcp_cntxt.snd_wnd = cm_node->tcp_cntxt.rcv_wnd;
+ cm_node->tcp_cntxt.snd_wscale = loopback_remotenode->tcp_cntxt.rcv_wscale;
+ loopback_remotenode->tcp_cntxt.snd_wscale = cm_node->tcp_cntxt.rcv_wscale;
+ loopback_remotenode->state = I40IW_CM_STATE_MPAREQ_RCVD;
+ i40iw_create_event(loopback_remotenode, I40IW_CM_EVENT_MPA_REQ);
+ }
+ return cm_node;
+ }
+
+ cm_node->pdata.size = private_data_len;
+ cm_node->pdata.addr = cm_node->pdata_buf;
+
+ memcpy(cm_node->pdata_buf, private_data, private_data_len);
+
+ cm_node->state = I40IW_CM_STATE_SYN_SENT;
+ ret = i40iw_send_syn(cm_node, 0);
+
+ if (ret) {
+ if (cm_node->ipv4)
+ i40iw_debug(cm_node->dev,
+ I40IW_DEBUG_CM,
+ "Api - connect() FAILED: dest addr=%pI4",
+ cm_node->rem_addr);
+ else
+ i40iw_debug(cm_node->dev, I40IW_DEBUG_CM,
+ "Api - connect() FAILED: dest addr=%pI6",
+ cm_node->rem_addr);
+ i40iw_rem_ref_cm_node(cm_node);
+ cm_node = NULL;
+ }
+
+ if (cm_node)
+ i40iw_debug(cm_node->dev,
+ I40IW_DEBUG_CM,
+ "Api - connect(): port=0x%04x, cm_node=%p, cm_id = %p.\n",
+ cm_node->rem_port,
+ cm_node,
+ cm_node->cm_id);
+
+ return cm_node;
+}
+
+/**
+ * i40iw_cm_reject - reject and teardown a connection
+ * @cm_node: connection's node
+ * @pdate: ptr to private data for reject
+ * @plen: size of private data
+ */
+static int i40iw_cm_reject(struct i40iw_cm_node *cm_node, const void *pdata, u8 plen)
+{
+ int ret = 0;
+ int err;
+ int passive_state;
+ struct iw_cm_id *cm_id = cm_node->cm_id;
+ struct i40iw_cm_node *loopback = cm_node->loopbackpartner;
+
+ if (cm_node->tcp_cntxt.client)
+ return ret;
+ i40iw_cleanup_retrans_entry(cm_node);
+
+ if (!loopback) {
+ passive_state = atomic_add_return(1, &cm_node->passive_state);
+ if (passive_state == I40IW_SEND_RESET_EVENT) {
+ cm_node->state = I40IW_CM_STATE_CLOSED;
+ i40iw_rem_ref_cm_node(cm_node);
+ } else {
+ if (cm_node->state == I40IW_CM_STATE_LISTENER_DESTROYED) {
+ i40iw_rem_ref_cm_node(cm_node);
+ } else {
+ ret = i40iw_send_mpa_reject(cm_node, pdata, plen);
+ if (ret) {
+ cm_node->state = I40IW_CM_STATE_CLOSED;
+ err = i40iw_send_reset(cm_node);
+ if (err)
+ i40iw_pr_err("send reset failed\n");
+ } else {
+ cm_id->add_ref(cm_id);
+ }
+ }
+ }
+ } else {
+ cm_node->cm_id = NULL;
+ if (cm_node->state == I40IW_CM_STATE_LISTENER_DESTROYED) {
+ i40iw_rem_ref_cm_node(cm_node);
+ i40iw_rem_ref_cm_node(loopback);
+ } else {
+ ret = i40iw_send_cm_event(loopback,
+ loopback->cm_id,
+ IW_CM_EVENT_CONNECT_REPLY,
+ -ECONNREFUSED);
+ i40iw_rem_ref_cm_node(cm_node);
+ loopback->state = I40IW_CM_STATE_CLOSING;
+
+ cm_id = loopback->cm_id;
+ i40iw_rem_ref_cm_node(loopback);
+ cm_id->rem_ref(cm_id);
+ }
+ }
+
+ return ret;
+}
+
+/**
+ * i40iw_cm_close - close of cm connection
+ * @cm_node: connection's node
+ */
+static int i40iw_cm_close(struct i40iw_cm_node *cm_node)
+{
+ int ret = 0;
+
+ if (!cm_node)
+ return -EINVAL;
+
+ switch (cm_node->state) {
+ case I40IW_CM_STATE_SYN_RCVD:
+ case I40IW_CM_STATE_SYN_SENT:
+ case I40IW_CM_STATE_ONE_SIDE_ESTABLISHED:
+ case I40IW_CM_STATE_ESTABLISHED:
+ case I40IW_CM_STATE_ACCEPTING:
+ case I40IW_CM_STATE_MPAREQ_SENT:
+ case I40IW_CM_STATE_MPAREQ_RCVD:
+ i40iw_cleanup_retrans_entry(cm_node);
+ i40iw_send_reset(cm_node);
+ break;
+ case I40IW_CM_STATE_CLOSE_WAIT:
+ cm_node->state = I40IW_CM_STATE_LAST_ACK;
+ i40iw_send_fin(cm_node);
+ break;
+ case I40IW_CM_STATE_FIN_WAIT1:
+ case I40IW_CM_STATE_FIN_WAIT2:
+ case I40IW_CM_STATE_LAST_ACK:
+ case I40IW_CM_STATE_TIME_WAIT:
+ case I40IW_CM_STATE_CLOSING:
+ ret = -1;
+ break;
+ case I40IW_CM_STATE_LISTENING:
+ i40iw_cleanup_retrans_entry(cm_node);
+ i40iw_send_reset(cm_node);
+ break;
+ case I40IW_CM_STATE_MPAREJ_RCVD:
+ case I40IW_CM_STATE_UNKNOWN:
+ case I40IW_CM_STATE_INITED:
+ case I40IW_CM_STATE_CLOSED:
+ case I40IW_CM_STATE_LISTENER_DESTROYED:
+ i40iw_rem_ref_cm_node(cm_node);
+ break;
+ case I40IW_CM_STATE_OFFLOADED:
+ if (cm_node->send_entry)
+ i40iw_pr_err("send_entry\n");
+ i40iw_rem_ref_cm_node(cm_node);
+ break;
+ }
+ return ret;
+}
+
+/**
+ * i40iw_receive_ilq - recv an ETHERNET packet, and process it
+ * through CM
+ * @dev: FPK dev struct
+ * @rbuf: receive buffer
+ */
+void i40iw_receive_ilq(struct i40iw_sc_dev *dev, struct i40iw_puda_buf *rbuf)
+{
+ struct i40iw_cm_node *cm_node;
+ struct i40iw_cm_listener *listener;
+ struct iphdr *iph;
+ struct ipv6hdr *ip6h;
+ struct tcphdr *tcph;
+ struct i40iw_cm_info cm_info;
+ struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev;
+ struct i40iw_cm_core *cm_core = &iwdev->cm_core;
+ struct vlan_ethhdr *ethh;
+
+ /* if vlan, then maclen = 18 else 14 */
+ iph = (struct iphdr *)rbuf->iph;
+ memset(&cm_info, 0, sizeof(cm_info));
+
+ i40iw_debug_buf(dev,
+ I40IW_DEBUG_ILQ,
+ "RECEIVE ILQ BUFFER",
+ rbuf->mem.va,
+ rbuf->totallen);
+ ethh = (struct vlan_ethhdr *)rbuf->mem.va;
+
+ if (ethh->h_vlan_proto == htons(ETH_P_8021Q)) {
+ cm_info.vlan_id = ntohs(ethh->h_vlan_TCI) & VLAN_VID_MASK;
+ i40iw_debug(cm_core->dev,
+ I40IW_DEBUG_CM,
+ "%s vlan_id=%d\n",
+ __func__,
+ cm_info.vlan_id);
+ } else {
+ cm_info.vlan_id = I40IW_NO_VLAN;
+ }
+ tcph = (struct tcphdr *)rbuf->tcph;
+
+ if (rbuf->ipv4) {
+ cm_info.loc_addr[0] = ntohl(iph->daddr);
+ cm_info.rem_addr[0] = ntohl(iph->saddr);
+ cm_info.ipv4 = true;
+ } else {
+ ip6h = (struct ipv6hdr *)rbuf->iph;
+ i40iw_copy_ip_ntohl(cm_info.loc_addr,
+ ip6h->daddr.in6_u.u6_addr32);
+ i40iw_copy_ip_ntohl(cm_info.rem_addr,
+ ip6h->saddr.in6_u.u6_addr32);
+ cm_info.ipv4 = false;
+ }
+ cm_info.loc_port = ntohs(tcph->dest);
+ cm_info.rem_port = ntohs(tcph->source);
+ cm_node = i40iw_find_node(cm_core,
+ cm_info.rem_port,
+ cm_info.rem_addr,
+ cm_info.loc_port,
+ cm_info.loc_addr,
+ true);
+
+ if (!cm_node) {
+ /* Only type of packet accepted are for */
+ /* the PASSIVE open (syn only) */
+ if (!tcph->syn || tcph->ack)
+ return;
+ listener =
+ i40iw_find_listener(cm_core,
+ cm_info.loc_addr,
+ cm_info.loc_port,
+ cm_info.vlan_id,
+ I40IW_CM_LISTENER_ACTIVE_STATE);
+ if (!listener) {
+ cm_info.cm_id = NULL;
+ i40iw_debug(cm_core->dev,
+ I40IW_DEBUG_CM,
+ "%s no listener found\n",
+ __func__);
+ return;
+ }
+ cm_info.cm_id = listener->cm_id;
+ cm_node = i40iw_make_cm_node(cm_core, iwdev, &cm_info, listener);
+ if (!cm_node) {
+ i40iw_debug(cm_core->dev,
+ I40IW_DEBUG_CM,
+ "%s allocate node failed\n",
+ __func__);
+ atomic_dec(&listener->ref_count);
+ return;
+ }
+ if (!tcph->rst && !tcph->fin) {
+ cm_node->state = I40IW_CM_STATE_LISTENING;
+ } else {
+ i40iw_rem_ref_cm_node(cm_node);
+ return;
+ }
+ atomic_inc(&cm_node->ref_count);
+ } else if (cm_node->state == I40IW_CM_STATE_OFFLOADED) {
+ i40iw_rem_ref_cm_node(cm_node);
+ return;
+ }
+ i40iw_process_packet(cm_node, rbuf);
+ i40iw_rem_ref_cm_node(cm_node);
+}
+
+/**
+ * i40iw_setup_cm_core - allocate a top level instance of a cm
+ * core
+ * @iwdev: iwarp device structure
+ */
+void i40iw_setup_cm_core(struct i40iw_device *iwdev)
+{
+ struct i40iw_cm_core *cm_core = &iwdev->cm_core;
+
+ cm_core->iwdev = iwdev;
+ cm_core->dev = &iwdev->sc_dev;
+
+ INIT_LIST_HEAD(&cm_core->connected_nodes);
+ INIT_LIST_HEAD(&cm_core->listen_nodes);
+
+ init_timer(&cm_core->tcp_timer);
+ cm_core->tcp_timer.function = i40iw_cm_timer_tick;
+ cm_core->tcp_timer.data = (unsigned long)cm_core;
+
+ spin_lock_init(&cm_core->ht_lock);
+ spin_lock_init(&cm_core->listen_list_lock);
+
+ cm_core->event_wq = create_singlethread_workqueue("iwewq");
+ cm_core->disconn_wq = create_singlethread_workqueue("iwdwq");
+}
+
+/**
+ * i40iw_cleanup_cm_core - deallocate a top level instance of a
+ * cm core
+ * @cm_core: cm's core
+ */
+void i40iw_cleanup_cm_core(struct i40iw_cm_core *cm_core)
+{
+ unsigned long flags;
+
+ if (!cm_core)
+ return;
+
+ spin_lock_irqsave(&cm_core->ht_lock, flags);
+ if (timer_pending(&cm_core->tcp_timer))
+ del_timer_sync(&cm_core->tcp_timer);
+ spin_unlock_irqrestore(&cm_core->ht_lock, flags);
+
+ destroy_workqueue(cm_core->event_wq);
+ destroy_workqueue(cm_core->disconn_wq);
+}
+
+/**
+ * i40iw_init_tcp_ctx - setup qp context
+ * @cm_node: connection's node
+ * @tcp_info: offload info for tcp
+ * @iwqp: associate qp for the connection
+ */
+static void i40iw_init_tcp_ctx(struct i40iw_cm_node *cm_node,
+ struct i40iw_tcp_offload_info *tcp_info,
+ struct i40iw_qp *iwqp)
+{
+ tcp_info->ipv4 = cm_node->ipv4;
+ tcp_info->drop_ooo_seg = true;
+ tcp_info->wscale = true;
+ tcp_info->ignore_tcp_opt = true;
+ tcp_info->ignore_tcp_uns_opt = true;
+ tcp_info->no_nagle = false;
+
+ tcp_info->ttl = I40IW_DEFAULT_TTL;
+ tcp_info->rtt_var = cpu_to_le32(I40IW_DEFAULT_RTT_VAR);
+ tcp_info->ss_thresh = cpu_to_le32(I40IW_DEFAULT_SS_THRESH);
+ tcp_info->rexmit_thresh = I40IW_DEFAULT_REXMIT_THRESH;
+
+ tcp_info->tcp_state = I40IW_TCP_STATE_ESTABLISHED;
+ tcp_info->snd_wscale = cm_node->tcp_cntxt.snd_wscale;
+ tcp_info->rcv_wscale = cm_node->tcp_cntxt.rcv_wscale;
+
+ tcp_info->snd_nxt = cpu_to_le32(cm_node->tcp_cntxt.loc_seq_num);
+ tcp_info->snd_wnd = cpu_to_le32(cm_node->tcp_cntxt.snd_wnd);
+ tcp_info->rcv_nxt = cpu_to_le32(cm_node->tcp_cntxt.rcv_nxt);
+ tcp_info->snd_max = cpu_to_le32(cm_node->tcp_cntxt.loc_seq_num);
+
+ tcp_info->snd_una = cpu_to_le32(cm_node->tcp_cntxt.loc_seq_num);
+ tcp_info->cwnd = cpu_to_le32(2 * cm_node->tcp_cntxt.mss);
+ tcp_info->snd_wl1 = cpu_to_le32(cm_node->tcp_cntxt.rcv_nxt);
+ tcp_info->snd_wl2 = cpu_to_le32(cm_node->tcp_cntxt.loc_seq_num);
+ tcp_info->max_snd_window = cpu_to_le32(cm_node->tcp_cntxt.max_snd_wnd);
+ tcp_info->rcv_wnd = cpu_to_le32(cm_node->tcp_cntxt.rcv_wnd <<
+ cm_node->tcp_cntxt.rcv_wscale);
+
+ tcp_info->flow_label = 0;
+ tcp_info->snd_mss = cpu_to_le32(((u32)cm_node->tcp_cntxt.mss));
+ if (cm_node->vlan_id < VLAN_TAG_PRESENT) {
+ tcp_info->insert_vlan_tag = true;
+ tcp_info->vlan_tag = cpu_to_le16(cm_node->vlan_id);
+ }
+ if (cm_node->ipv4) {
+ tcp_info->src_port = cpu_to_le16(cm_node->loc_port);
+ tcp_info->dst_port = cpu_to_le16(cm_node->rem_port);
+
+ tcp_info->dest_ip_addr3 = cpu_to_le32(cm_node->rem_addr[0]);
+ tcp_info->local_ipaddr3 = cpu_to_le32(cm_node->loc_addr[0]);
+ tcp_info->arp_idx = cpu_to_le32(i40iw_arp_table(iwqp->iwdev,
+ &tcp_info->dest_ip_addr3,
+ true,
+ NULL,
+ I40IW_ARP_RESOLVE));
+ } else {
+ tcp_info->src_port = cpu_to_le16(cm_node->loc_port);
+ tcp_info->dst_port = cpu_to_le16(cm_node->rem_port);
+ tcp_info->dest_ip_addr0 = cpu_to_le32(cm_node->rem_addr[0]);
+ tcp_info->dest_ip_addr1 = cpu_to_le32(cm_node->rem_addr[1]);
+ tcp_info->dest_ip_addr2 = cpu_to_le32(cm_node->rem_addr[2]);
+ tcp_info->dest_ip_addr3 = cpu_to_le32(cm_node->rem_addr[3]);
+ tcp_info->local_ipaddr0 = cpu_to_le32(cm_node->loc_addr[0]);
+ tcp_info->local_ipaddr1 = cpu_to_le32(cm_node->loc_addr[1]);
+ tcp_info->local_ipaddr2 = cpu_to_le32(cm_node->loc_addr[2]);
+ tcp_info->local_ipaddr3 = cpu_to_le32(cm_node->loc_addr[3]);
+ tcp_info->arp_idx = cpu_to_le32(i40iw_arp_table(
+ iwqp->iwdev,
+ &tcp_info->dest_ip_addr0,
+ false,
+ NULL,
+ I40IW_ARP_RESOLVE));
+ }
+}
+
+/**
+ * i40iw_cm_init_tsa_conn - setup qp for RTS
+ * @iwqp: associate qp for the connection
+ * @cm_node: connection's node
+ */
+static void i40iw_cm_init_tsa_conn(struct i40iw_qp *iwqp,
+ struct i40iw_cm_node *cm_node)
+{
+ struct i40iw_tcp_offload_info tcp_info;
+ struct i40iwarp_offload_info *iwarp_info;
+ struct i40iw_qp_host_ctx_info *ctx_info;
+ struct i40iw_device *iwdev = iwqp->iwdev;
+ struct i40iw_sc_dev *dev = &iwqp->iwdev->sc_dev;
+
+ memset(&tcp_info, 0x00, sizeof(struct i40iw_tcp_offload_info));
+ iwarp_info = &iwqp->iwarp_info;
+ ctx_info = &iwqp->ctx_info;
+
+ ctx_info->tcp_info = &tcp_info;
+ ctx_info->send_cq_num = iwqp->iwscq->sc_cq.cq_uk.cq_id;
+ ctx_info->rcv_cq_num = iwqp->iwrcq->sc_cq.cq_uk.cq_id;
+
+ iwarp_info->ord_size = cm_node->ord_size;
+ iwarp_info->ird_size = i40iw_derive_hw_ird_setting(cm_node->ird_size);
+
+ if (iwarp_info->ord_size == 1)
+ iwarp_info->ord_size = 2;
+
+ iwarp_info->rd_enable = true;
+ iwarp_info->rdmap_ver = 1;
+ iwarp_info->ddp_ver = 1;
+
+ iwarp_info->pd_id = iwqp->iwpd->sc_pd.pd_id;
+
+ ctx_info->tcp_info_valid = true;
+ ctx_info->iwarp_info_valid = true;
+
+ i40iw_init_tcp_ctx(cm_node, &tcp_info, iwqp);
+ if (cm_node->snd_mark_en) {
+ iwarp_info->snd_mark_en = true;
+ iwarp_info->snd_mark_offset = (tcp_info.snd_nxt &
+ SNDMARKER_SEQNMASK) + cm_node->lsmm_size;
+ }
+
+ cm_node->state = I40IW_CM_STATE_OFFLOADED;
+ tcp_info.tcp_state = I40IW_TCP_STATE_ESTABLISHED;
+ tcp_info.src_mac_addr_idx = iwdev->mac_ip_table_idx;
+
+ dev->iw_priv_qp_ops->qp_setctx(&iwqp->sc_qp, (u64 *)(iwqp->host_ctx.va), ctx_info);
+
+ /* once tcp_info is set, no need to do it again */
+ ctx_info->tcp_info_valid = false;
+ ctx_info->iwarp_info_valid = false;
+}
+
+/**
+ * i40iw_cm_disconn - when a connection is being closed
+ * @iwqp: associate qp for the connection
+ */
+int i40iw_cm_disconn(struct i40iw_qp *iwqp)
+{
+ struct disconn_work *work;
+ struct i40iw_device *iwdev = iwqp->iwdev;
+ struct i40iw_cm_core *cm_core = &iwdev->cm_core;
+
+ work = kzalloc(sizeof(*work), GFP_ATOMIC);
+ if (!work)
+ return -ENOMEM; /* Timer will clean up */
+
+ i40iw_add_ref(&iwqp->ibqp);
+ work->iwqp = iwqp;
+ INIT_WORK(&work->work, i40iw_disconnect_worker);
+ queue_work(cm_core->disconn_wq, &work->work);
+ return 0;
+}
+
+/**
+ * i40iw_loopback_nop - Send a nop
+ * @qp: associated hw qp
+ */
+static void i40iw_loopback_nop(struct i40iw_sc_qp *qp)
+{
+ u64 *wqe;
+ u64 header;
+
+ wqe = qp->qp_uk.sq_base->elem;
+ set_64bit_val(wqe, 0, 0);
+ set_64bit_val(wqe, 8, 0);
+ set_64bit_val(wqe, 16, 0);
+
+ header = LS_64(I40IWQP_OP_NOP, I40IWQPSQ_OPCODE) |
+ LS_64(0, I40IWQPSQ_SIGCOMPL) |
+ LS_64(qp->qp_uk.swqe_polarity, I40IWQPSQ_VALID);
+ set_64bit_val(wqe, 24, header);
+}
+
+/**
+ * i40iw_qp_disconnect - free qp and close cm
+ * @iwqp: associate qp for the connection
+ */
+static void i40iw_qp_disconnect(struct i40iw_qp *iwqp)
+{
+ struct i40iw_device *iwdev;
+ struct i40iw_ib_device *iwibdev;
+
+ iwdev = to_iwdev(iwqp->ibqp.device);
+ if (!iwdev) {
+ i40iw_pr_err("iwdev == NULL\n");
+ return;
+ }
+
+ iwibdev = iwdev->iwibdev;
+
+ if (iwqp->active_conn) {
+ /* indicate this connection is NOT active */
+ iwqp->active_conn = 0;
+ } else {
+ /* Need to free the Last Streaming Mode Message */
+ if (iwqp->ietf_mem.va) {
+ if (iwqp->lsmm_mr)
+ iwibdev->ibdev.dereg_mr(iwqp->lsmm_mr);
+ i40iw_free_dma_mem(iwdev->sc_dev.hw, &iwqp->ietf_mem);
+ }
+ }
+
+ /* close the CM node down if it is still active */
+ if (iwqp->cm_node) {
+ i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_CM, "%s Call close API\n", __func__);
+ i40iw_cm_close(iwqp->cm_node);
+ }
+}
+
+/**
+ * i40iw_cm_disconn_true - called by worker thread to disconnect qp
+ * @iwqp: associate qp for the connection
+ */
+static void i40iw_cm_disconn_true(struct i40iw_qp *iwqp)
+{
+ struct iw_cm_id *cm_id;
+ struct i40iw_device *iwdev;
+ struct i40iw_sc_qp *qp = &iwqp->sc_qp;
+ u16 last_ae;
+ u8 original_hw_tcp_state;
+ u8 original_ibqp_state;
+ int disconn_status = 0;
+ int issue_disconn = 0;
+ int issue_close = 0;
+ int issue_flush = 0;
+ struct ib_event ibevent;
+ unsigned long flags;
+ int ret;
+
+ if (!iwqp) {
+ i40iw_pr_err("iwqp == NULL\n");
+ return;
+ }
+
+ spin_lock_irqsave(&iwqp->lock, flags);
+ cm_id = iwqp->cm_id;
+ /* make sure we havent already closed this connection */
+ if (!cm_id) {
+ spin_unlock_irqrestore(&iwqp->lock, flags);
+ return;
+ }
+
+ iwdev = to_iwdev(iwqp->ibqp.device);
+
+ original_hw_tcp_state = iwqp->hw_tcp_state;
+ original_ibqp_state = iwqp->ibqp_state;
+ last_ae = iwqp->last_aeq;
+
+ if (qp->term_flags) {
+ issue_disconn = 1;
+ issue_close = 1;
+ iwqp->cm_id = NULL;
+ /*When term timer expires after cm_timer, don't want
+ *terminate-handler to issue cm_disconn which can re-free
+ *a QP even after its refcnt=0.
+ */
+ del_timer(&iwqp->terminate_timer);
+ if (!iwqp->flush_issued) {
+ iwqp->flush_issued = 1;
+ issue_flush = 1;
+ }
+ } else if ((original_hw_tcp_state == I40IW_TCP_STATE_CLOSE_WAIT) ||
+ ((original_ibqp_state == IB_QPS_RTS) &&
+ (last_ae == I40IW_AE_LLP_CONNECTION_RESET))) {
+ issue_disconn = 1;
+ if (last_ae == I40IW_AE_LLP_CONNECTION_RESET)
+ disconn_status = -ECONNRESET;
+ }
+
+ if (((original_hw_tcp_state == I40IW_TCP_STATE_CLOSED) ||
+ (original_hw_tcp_state == I40IW_TCP_STATE_TIME_WAIT) ||
+ (last_ae == I40IW_AE_RDMAP_ROE_BAD_LLP_CLOSE) ||
+ (last_ae == I40IW_AE_LLP_CONNECTION_RESET))) {
+ issue_close = 1;
+ iwqp->cm_id = NULL;
+ if (!iwqp->flush_issued) {
+ iwqp->flush_issued = 1;
+ issue_flush = 1;
+ }
+ }
+
+ spin_unlock_irqrestore(&iwqp->lock, flags);
+ if (issue_flush && !iwqp->destroyed) {
+ /* Flush the queues */
+ i40iw_flush_wqes(iwdev, iwqp);
+
+ if (qp->term_flags) {
+ ibevent.device = iwqp->ibqp.device;
+ ibevent.event = (qp->eventtype == TERM_EVENT_QP_FATAL) ?
+ IB_EVENT_QP_FATAL : IB_EVENT_QP_ACCESS_ERR;
+ ibevent.element.qp = &iwqp->ibqp;
+ iwqp->ibqp.event_handler(&ibevent, iwqp->ibqp.qp_context);
+ }
+ }
+
+ if (cm_id && cm_id->event_handler) {
+ if (issue_disconn) {
+ ret = i40iw_send_cm_event(NULL,
+ cm_id,
+ IW_CM_EVENT_DISCONNECT,
+ disconn_status);
+
+ if (ret)
+ i40iw_debug(&iwdev->sc_dev,
+ I40IW_DEBUG_CM,
+ "disconnect event failed %s: - cm_id = %p\n",
+ __func__, cm_id);
+ }
+ if (issue_close) {
+ i40iw_qp_disconnect(iwqp);
+ cm_id->provider_data = iwqp;
+ ret = i40iw_send_cm_event(NULL, cm_id, IW_CM_EVENT_CLOSE, 0);
+ if (ret)
+ i40iw_debug(&iwdev->sc_dev,
+ I40IW_DEBUG_CM,
+ "close event failed %s: - cm_id = %p\n",
+ __func__, cm_id);
+ cm_id->rem_ref(cm_id);
+ }
+ }
+}
+
+/**
+ * i40iw_disconnect_worker - worker for connection close
+ * @work: points or disconn structure
+ */
+static void i40iw_disconnect_worker(struct work_struct *work)
+{
+ struct disconn_work *dwork = container_of(work, struct disconn_work, work);
+ struct i40iw_qp *iwqp = dwork->iwqp;
+
+ kfree(dwork);
+ i40iw_cm_disconn_true(iwqp);
+ i40iw_rem_ref(&iwqp->ibqp);
+}
+
+/**
+ * i40iw_accept - registered call for connection to be accepted
+ * @cm_id: cm information for passive connection
+ * @conn_param: accpet parameters
+ */
+int i40iw_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
+{
+ struct ib_qp *ibqp;
+ struct i40iw_qp *iwqp;
+ struct i40iw_device *iwdev;
+ struct i40iw_sc_dev *dev;
+ struct i40iw_cm_node *cm_node;
+ struct ib_qp_attr attr;
+ int passive_state;
+ struct i40iw_ib_device *iwibdev;
+ struct ib_mr *ibmr;
+ struct i40iw_pd *iwpd;
+ u16 buf_len = 0;
+ struct i40iw_kmem_info accept;
+ enum i40iw_status_code status;
+ u64 tagged_offset;
+
+ memset(&attr, 0, sizeof(attr));
+ ibqp = i40iw_get_qp(cm_id->device, conn_param->qpn);
+ if (!ibqp)
+ return -EINVAL;
+
+ iwqp = to_iwqp(ibqp);
+ iwdev = iwqp->iwdev;
+ dev = &iwdev->sc_dev;
+ cm_node = (struct i40iw_cm_node *)cm_id->provider_data;
+
+ if (((struct sockaddr_in *)&cm_id->local_addr)->sin_family == AF_INET) {
+ cm_node->ipv4 = true;
+ cm_node->vlan_id = i40iw_get_vlan_ipv4(cm_node->loc_addr);
+ } else {
+ cm_node->ipv4 = false;
+ i40iw_netdev_vlan_ipv6(cm_node->loc_addr, &cm_node->vlan_id, NULL);
+ }
+ i40iw_debug(cm_node->dev,
+ I40IW_DEBUG_CM,
+ "Accept vlan_id=%d\n",
+ cm_node->vlan_id);
+ if (cm_node->state == I40IW_CM_STATE_LISTENER_DESTROYED) {
+ if (cm_node->loopbackpartner)
+ i40iw_rem_ref_cm_node(cm_node->loopbackpartner);
+ i40iw_rem_ref_cm_node(cm_node);
+ return -EINVAL;
+ }
+
+ passive_state = atomic_add_return(1, &cm_node->passive_state);
+ if (passive_state == I40IW_SEND_RESET_EVENT) {
+ i40iw_rem_ref_cm_node(cm_node);
+ return -ECONNRESET;
+ }
+
+ cm_node->cm_core->stats_accepts++;
+ iwqp->cm_node = (void *)cm_node;
+ cm_node->iwqp = iwqp;
+
+ buf_len = conn_param->private_data_len + I40IW_MAX_IETF_SIZE + MPA_ZERO_PAD_LEN;
+
+ status = i40iw_allocate_dma_mem(dev->hw, &iwqp->ietf_mem, buf_len, 1);
+
+ if (status)
+ return -ENOMEM;
+ cm_node->pdata.size = conn_param->private_data_len;
+ accept.addr = iwqp->ietf_mem.va;
+ accept.size = i40iw_cm_build_mpa_frame(cm_node, &accept, MPA_KEY_REPLY);
+ memcpy(accept.addr + accept.size, conn_param->private_data,
+ conn_param->private_data_len);
+
+ /* setup our first outgoing iWarp send WQE (the IETF frame response) */
+ if ((cm_node->ipv4 &&
+ !i40iw_ipv4_is_loopback(cm_node->loc_addr[0], cm_node->rem_addr[0])) ||
+ (!cm_node->ipv4 &&
+ !i40iw_ipv6_is_loopback(cm_node->loc_addr, cm_node->rem_addr))) {
+ iwibdev = iwdev->iwibdev;
+ iwpd = iwqp->iwpd;
+ tagged_offset = (uintptr_t)iwqp->ietf_mem.va;
+ ibmr = i40iw_reg_phys_mr(&iwpd->ibpd,
+ iwqp->ietf_mem.pa,
+ buf_len,
+ IB_ACCESS_LOCAL_WRITE,
+ &tagged_offset);
+ if (IS_ERR(ibmr)) {
+ i40iw_free_dma_mem(dev->hw, &iwqp->ietf_mem);
+ return -ENOMEM;
+ }
+
+ ibmr->pd = &iwpd->ibpd;
+ ibmr->device = iwpd->ibpd.device;
+ iwqp->lsmm_mr = ibmr;
+ if (iwqp->page)
+ iwqp->sc_qp.qp_uk.sq_base = kmap(iwqp->page);
+ if (is_remote_ne020_or_chelsio(cm_node))
+ dev->iw_priv_qp_ops->qp_send_lsmm(
+ &iwqp->sc_qp,
+ iwqp->ietf_mem.va,
+ (accept.size + conn_param->private_data_len),
+ ibmr->lkey);
+ else
+ dev->iw_priv_qp_ops->qp_send_lsmm(
+ &iwqp->sc_qp,
+ iwqp->ietf_mem.va,
+ (accept.size + conn_param->private_data_len + MPA_ZERO_PAD_LEN),
+ ibmr->lkey);
+
+ } else {
+ if (iwqp->page)
+ iwqp->sc_qp.qp_uk.sq_base = kmap(iwqp->page);
+ i40iw_loopback_nop(&iwqp->sc_qp);
+ }
+
+ if (iwqp->page)
+ kunmap(iwqp->page);
+
+ iwqp->cm_id = cm_id;
+ cm_node->cm_id = cm_id;
+
+ cm_id->provider_data = (void *)iwqp;
+ iwqp->active_conn = 0;
+
+ cm_node->lsmm_size = accept.size + conn_param->private_data_len;
+ i40iw_cm_init_tsa_conn(iwqp, cm_node);
+ cm_id->add_ref(cm_id);
+ i40iw_add_ref(&iwqp->ibqp);
+
+ i40iw_send_cm_event(cm_node, cm_id, IW_CM_EVENT_ESTABLISHED, 0);
+
+ attr.qp_state = IB_QPS_RTS;
+ cm_node->qhash_set = false;
+ i40iw_modify_qp(&iwqp->ibqp, &attr, IB_QP_STATE, NULL);
+ if (cm_node->loopbackpartner) {
+ cm_node->loopbackpartner->pdata.size = conn_param->private_data_len;
+
+ /* copy entire MPA frame to our cm_node's frame */
+ memcpy(cm_node->loopbackpartner->pdata_buf,
+ conn_param->private_data,
+ conn_param->private_data_len);
+ i40iw_create_event(cm_node->loopbackpartner, I40IW_CM_EVENT_CONNECTED);
+ }
+
+ cm_node->accelerated = 1;
+ if (cm_node->accept_pend) {
+ if (!cm_node->listener)
+ i40iw_pr_err("cm_node->listener NULL for passive node\n");
+ atomic_dec(&cm_node->listener->pend_accepts_cnt);
+ cm_node->accept_pend = 0;
+ }
+ return 0;
+}
+
+/**
+ * i40iw_reject - registered call for connection to be rejected
+ * @cm_id: cm information for passive connection
+ * @pdata: private data to be sent
+ * @pdata_len: private data length
+ */
+int i40iw_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
+{
+ struct i40iw_device *iwdev;
+ struct i40iw_cm_node *cm_node;
+ struct i40iw_cm_node *loopback;
+
+ cm_node = (struct i40iw_cm_node *)cm_id->provider_data;
+ loopback = cm_node->loopbackpartner;
+ cm_node->cm_id = cm_id;
+ cm_node->pdata.size = pdata_len;
+
+ iwdev = to_iwdev(cm_id->device);
+ if (!iwdev)
+ return -EINVAL;
+ cm_node->cm_core->stats_rejects++;
+
+ if (pdata_len + sizeof(struct ietf_mpa_v2) > MAX_CM_BUFFER)
+ return -EINVAL;
+
+ if (loopback) {
+ memcpy(&loopback->pdata_buf, pdata, pdata_len);
+ loopback->pdata.size = pdata_len;
+ }
+
+ return i40iw_cm_reject(cm_node, pdata, pdata_len);
+}
+
+/**
+ * i40iw_connect - registered call for connection to be established
+ * @cm_id: cm information for passive connection
+ * @conn_param: Information about the connection
+ */
+int i40iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
+{
+ struct ib_qp *ibqp;
+ struct i40iw_qp *iwqp;
+ struct i40iw_device *iwdev;
+ struct i40iw_cm_node *cm_node;
+ struct i40iw_cm_info cm_info;
+ struct sockaddr_in *laddr;
+ struct sockaddr_in *raddr;
+ struct sockaddr_in6 *laddr6;
+ struct sockaddr_in6 *raddr6;
+ int apbvt_set = 0;
+ enum i40iw_status_code status;
+
+ ibqp = i40iw_get_qp(cm_id->device, conn_param->qpn);
+ if (!ibqp)
+ return -EINVAL;
+ iwqp = to_iwqp(ibqp);
+ if (!iwqp)
+ return -EINVAL;
+ iwdev = to_iwdev(iwqp->ibqp.device);
+ if (!iwdev)
+ return -EINVAL;
+
+ laddr = (struct sockaddr_in *)&cm_id->m_local_addr;
+ raddr = (struct sockaddr_in *)&cm_id->m_remote_addr;
+ laddr6 = (struct sockaddr_in6 *)&cm_id->m_local_addr;
+ raddr6 = (struct sockaddr_in6 *)&cm_id->m_remote_addr;
+
+ if (!(laddr->sin_port) || !(raddr->sin_port))
+ return -EINVAL;
+
+ iwqp->active_conn = 1;
+ iwqp->cm_id = NULL;
+ cm_id->provider_data = iwqp;
+
+ /* set up the connection params for the node */
+ if (cm_id->remote_addr.ss_family == AF_INET) {
+ cm_info.ipv4 = true;
+ memset(cm_info.loc_addr, 0, sizeof(cm_info.loc_addr));
+ memset(cm_info.rem_addr, 0, sizeof(cm_info.rem_addr));
+ cm_info.loc_addr[0] = ntohl(laddr->sin_addr.s_addr);
+ cm_info.rem_addr[0] = ntohl(raddr->sin_addr.s_addr);
+ cm_info.loc_port = ntohs(laddr->sin_port);
+ cm_info.rem_port = ntohs(raddr->sin_port);
+ cm_info.vlan_id = i40iw_get_vlan_ipv4(cm_info.loc_addr);
+ } else {
+ cm_info.ipv4 = false;
+ i40iw_copy_ip_ntohl(cm_info.loc_addr,
+ laddr6->sin6_addr.in6_u.u6_addr32);
+ i40iw_copy_ip_ntohl(cm_info.rem_addr,
+ raddr6->sin6_addr.in6_u.u6_addr32);
+ cm_info.loc_port = ntohs(laddr6->sin6_port);
+ cm_info.rem_port = ntohs(raddr6->sin6_port);
+ i40iw_netdev_vlan_ipv6(cm_info.loc_addr, &cm_info.vlan_id, NULL);
+ }
+ cm_info.cm_id = cm_id;
+ if ((cm_info.ipv4 && (laddr->sin_addr.s_addr != raddr->sin_addr.s_addr)) ||
+ (!cm_info.ipv4 && memcmp(laddr6->sin6_addr.in6_u.u6_addr32,
+ raddr6->sin6_addr.in6_u.u6_addr32,
+ sizeof(laddr6->sin6_addr.in6_u.u6_addr32)))) {
+ status = i40iw_manage_qhash(iwdev,
+ &cm_info,
+ I40IW_QHASH_TYPE_TCP_ESTABLISHED,
+ I40IW_QHASH_MANAGE_TYPE_ADD,
+ NULL,
+ true);
+ if (status)
+ return -EINVAL;
+ }
+ status = i40iw_manage_apbvt(iwdev, cm_info.loc_port, I40IW_MANAGE_APBVT_ADD);
+ if (status) {
+ i40iw_manage_qhash(iwdev,
+ &cm_info,
+ I40IW_QHASH_TYPE_TCP_ESTABLISHED,
+ I40IW_QHASH_MANAGE_TYPE_DELETE,
+ NULL,
+ false);
+ return -EINVAL;
+ }
+
+ apbvt_set = 1;
+ cm_id->add_ref(cm_id);
+ cm_node = i40iw_create_cm_node(&iwdev->cm_core, iwdev,
+ conn_param->private_data_len,
+ (void *)conn_param->private_data,
+ &cm_info);
+ if (!cm_node) {
+ i40iw_manage_qhash(iwdev,
+ &cm_info,
+ I40IW_QHASH_TYPE_TCP_ESTABLISHED,
+ I40IW_QHASH_MANAGE_TYPE_DELETE,
+ NULL,
+ false);
+
+ if (apbvt_set && !i40iw_listen_port_in_use(&iwdev->cm_core,
+ cm_info.loc_port))
+ i40iw_manage_apbvt(iwdev,
+ cm_info.loc_port,
+ I40IW_MANAGE_APBVT_DEL);
+ cm_id->rem_ref(cm_id);
+ iwdev->cm_core.stats_connect_errs++;
+ return -ENOMEM;
+ }
+
+ i40iw_record_ird_ord(cm_node, (u16)conn_param->ird, (u16)conn_param->ord);
+ if (cm_node->send_rdma0_op == SEND_RDMA_READ_ZERO &&
+ !cm_node->ord_size)
+ cm_node->ord_size = 1;
+
+ cm_node->apbvt_set = apbvt_set;
+ cm_node->qhash_set = true;
+ iwqp->cm_node = cm_node;
+ cm_node->iwqp = iwqp;
+ iwqp->cm_id = cm_id;
+ i40iw_add_ref(&iwqp->ibqp);
+ return 0;
+}
+
+/**
+ * i40iw_create_listen - registered call creating listener
+ * @cm_id: cm information for passive connection
+ * @backlog: to max accept pending count
+ */
+int i40iw_create_listen(struct iw_cm_id *cm_id, int backlog)
+{
+ struct i40iw_device *iwdev;
+ struct i40iw_cm_listener *cm_listen_node;
+ struct i40iw_cm_info cm_info;
+ enum i40iw_status_code ret;
+ struct sockaddr_in *laddr;
+ struct sockaddr_in6 *laddr6;
+ bool wildcard = false;
+
+ iwdev = to_iwdev(cm_id->device);
+ if (!iwdev)
+ return -EINVAL;
+
+ laddr = (struct sockaddr_in *)&cm_id->m_local_addr;
+ laddr6 = (struct sockaddr_in6 *)&cm_id->m_local_addr;
+ memset(&cm_info, 0, sizeof(cm_info));
+ if (laddr->sin_family == AF_INET) {
+ cm_info.ipv4 = true;
+ cm_info.loc_addr[0] = ntohl(laddr->sin_addr.s_addr);
+ cm_info.loc_port = ntohs(laddr->sin_port);
+
+ if (laddr->sin_addr.s_addr != INADDR_ANY)
+ cm_info.vlan_id = i40iw_get_vlan_ipv4(cm_info.loc_addr);
+ else
+ wildcard = true;
+
+ } else {
+ cm_info.ipv4 = false;
+ i40iw_copy_ip_ntohl(cm_info.loc_addr,
+ laddr6->sin6_addr.in6_u.u6_addr32);
+ cm_info.loc_port = ntohs(laddr6->sin6_port);
+ if (ipv6_addr_type(&laddr6->sin6_addr) != IPV6_ADDR_ANY)
+ i40iw_netdev_vlan_ipv6(cm_info.loc_addr,
+ &cm_info.vlan_id,
+ NULL);
+ else
+ wildcard = true;
+ }
+ cm_info.backlog = backlog;
+ cm_info.cm_id = cm_id;
+
+ cm_listen_node = i40iw_make_listen_node(&iwdev->cm_core, iwdev, &cm_info);
+ if (!cm_listen_node) {
+ i40iw_pr_err("cm_listen_node == NULL\n");
+ return -ENOMEM;
+ }
+
+ cm_id->provider_data = cm_listen_node;
+
+ if (!cm_listen_node->reused_node) {
+ if (wildcard) {
+ if (cm_info.ipv4)
+ ret = i40iw_add_mqh_4(iwdev,
+ &cm_info,
+ cm_listen_node);
+ else
+ ret = i40iw_add_mqh_6(iwdev,
+ &cm_info,
+ cm_listen_node);
+ if (ret)
+ goto error;
+
+ ret = i40iw_manage_apbvt(iwdev,
+ cm_info.loc_port,
+ I40IW_MANAGE_APBVT_ADD);
+
+ if (ret)
+ goto error;
+ } else {
+ ret = i40iw_manage_qhash(iwdev,
+ &cm_info,
+ I40IW_QHASH_TYPE_TCP_SYN,
+ I40IW_QHASH_MANAGE_TYPE_ADD,
+ NULL,
+ true);
+ if (ret)
+ goto error;
+ cm_listen_node->qhash_set = true;
+ ret = i40iw_manage_apbvt(iwdev,
+ cm_info.loc_port,
+ I40IW_MANAGE_APBVT_ADD);
+ if (ret)
+ goto error;
+ }
+ }
+ cm_id->add_ref(cm_id);
+ cm_listen_node->cm_core->stats_listen_created++;
+ return 0;
+ error:
+ i40iw_cm_del_listen(&iwdev->cm_core, (void *)cm_listen_node, false);
+ return -EINVAL;
+}
+
+/**
+ * i40iw_destroy_listen - registered call to destroy listener
+ * @cm_id: cm information for passive connection
+ */
+int i40iw_destroy_listen(struct iw_cm_id *cm_id)
+{
+ struct i40iw_device *iwdev;
+
+ iwdev = to_iwdev(cm_id->device);
+ if (cm_id->provider_data)
+ i40iw_cm_del_listen(&iwdev->cm_core, cm_id->provider_data, true);
+ else
+ i40iw_pr_err("cm_id->provider_data was NULL\n");
+
+ cm_id->rem_ref(cm_id);
+
+ return 0;
+}
+
+/**
+ * i40iw_cm_event_connected - handle connected active node
+ * @event: the info for cm_node of connection
+ */
+static void i40iw_cm_event_connected(struct i40iw_cm_event *event)
+{
+ struct i40iw_qp *iwqp;
+ struct i40iw_device *iwdev;
+ struct i40iw_cm_node *cm_node;
+ struct i40iw_sc_dev *dev;
+ struct ib_qp_attr attr;
+ struct iw_cm_id *cm_id;
+ int status;
+ bool read0;
+
+ cm_node = event->cm_node;
+ cm_id = cm_node->cm_id;
+ iwqp = (struct i40iw_qp *)cm_id->provider_data;
+ iwdev = to_iwdev(iwqp->ibqp.device);
+ dev = &iwdev->sc_dev;
+
+ if (iwqp->destroyed) {
+ status = -ETIMEDOUT;
+ goto error;
+ }
+ i40iw_cm_init_tsa_conn(iwqp, cm_node);
+ read0 = (cm_node->send_rdma0_op == SEND_RDMA_READ_ZERO);
+ if (iwqp->page)
+ iwqp->sc_qp.qp_uk.sq_base = kmap(iwqp->page);
+ dev->iw_priv_qp_ops->qp_send_rtt(&iwqp->sc_qp, read0);
+ if (iwqp->page)
+ kunmap(iwqp->page);
+ status = i40iw_send_cm_event(cm_node, cm_id, IW_CM_EVENT_CONNECT_REPLY, 0);
+ if (status)
+ i40iw_pr_err("send cm event\n");
+
+ memset(&attr, 0, sizeof(attr));
+ attr.qp_state = IB_QPS_RTS;
+ cm_node->qhash_set = false;
+ i40iw_modify_qp(&iwqp->ibqp, &attr, IB_QP_STATE, NULL);
+
+ cm_node->accelerated = 1;
+ if (cm_node->accept_pend) {
+ if (!cm_node->listener)
+ i40iw_pr_err("listener is null for passive node\n");
+ atomic_dec(&cm_node->listener->pend_accepts_cnt);
+ cm_node->accept_pend = 0;
+ }
+ return;
+
+error:
+ iwqp->cm_id = NULL;
+ cm_id->provider_data = NULL;
+ i40iw_send_cm_event(event->cm_node,
+ cm_id,
+ IW_CM_EVENT_CONNECT_REPLY,
+ status);
+ cm_id->rem_ref(cm_id);
+ i40iw_rem_ref_cm_node(event->cm_node);
+}
+
+/**
+ * i40iw_cm_event_reset - handle reset
+ * @event: the info for cm_node of connection
+ */
+static void i40iw_cm_event_reset(struct i40iw_cm_event *event)
+{
+ struct i40iw_cm_node *cm_node = event->cm_node;
+ struct iw_cm_id *cm_id = cm_node->cm_id;
+ struct i40iw_qp *iwqp;
+
+ if (!cm_id)
+ return;
+
+ iwqp = cm_id->provider_data;
+ if (!iwqp)
+ return;
+
+ i40iw_debug(cm_node->dev,
+ I40IW_DEBUG_CM,
+ "reset event %p - cm_id = %p\n",
+ event->cm_node, cm_id);
+ iwqp->cm_id = NULL;
+
+ i40iw_send_cm_event(cm_node, cm_node->cm_id, IW_CM_EVENT_DISCONNECT, -ECONNRESET);
+ i40iw_send_cm_event(cm_node, cm_node->cm_id, IW_CM_EVENT_CLOSE, 0);
+}
+
+/**
+ * i40iw_cm_event_handler - worker thread callback to send event to cm upper layer
+ * @work: pointer of cm event info.
+ */
+static void i40iw_cm_event_handler(struct work_struct *work)
+{
+ struct i40iw_cm_event *event = container_of(work,
+ struct i40iw_cm_event,
+ event_work);
+ struct i40iw_cm_node *cm_node;
+
+ if (!event || !event->cm_node || !event->cm_node->cm_core)
+ return;
+
+ cm_node = event->cm_node;
+
+ switch (event->type) {
+ case I40IW_CM_EVENT_MPA_REQ:
+ i40iw_send_cm_event(cm_node,
+ cm_node->cm_id,
+ IW_CM_EVENT_CONNECT_REQUEST,
+ 0);
+ break;
+ case I40IW_CM_EVENT_RESET:
+ i40iw_cm_event_reset(event);
+ break;
+ case I40IW_CM_EVENT_CONNECTED:
+ if (!event->cm_node->cm_id ||
+ (event->cm_node->state != I40IW_CM_STATE_OFFLOADED))
+ break;
+ i40iw_cm_event_connected(event);
+ break;
+ case I40IW_CM_EVENT_MPA_REJECT:
+ if (!event->cm_node->cm_id ||
+ (cm_node->state == I40IW_CM_STATE_OFFLOADED))
+ break;
+ i40iw_send_cm_event(cm_node,
+ cm_node->cm_id,
+ IW_CM_EVENT_CONNECT_REPLY,
+ -ECONNREFUSED);
+ break;
+ case I40IW_CM_EVENT_ABORTED:
+ if (!event->cm_node->cm_id ||
+ (event->cm_node->state == I40IW_CM_STATE_OFFLOADED))
+ break;
+ i40iw_event_connect_error(event);
+ break;
+ default:
+ i40iw_pr_err("event type = %d\n", event->type);
+ break;
+ }
+
+ event->cm_info.cm_id->rem_ref(event->cm_info.cm_id);
+ i40iw_rem_ref_cm_node(event->cm_node);
+ kfree(event);
+}
+
+/**
+ * i40iw_cm_post_event - queue event request for worker thread
+ * @event: cm node's info for up event call
+ */
+static void i40iw_cm_post_event(struct i40iw_cm_event *event)
+{
+ atomic_inc(&event->cm_node->ref_count);
+ event->cm_info.cm_id->add_ref(event->cm_info.cm_id);
+ INIT_WORK(&event->event_work, i40iw_cm_event_handler);
+
+ queue_work(event->cm_node->cm_core->event_wq, &event->event_work);
+}
diff --git a/drivers/infiniband/hw/i40iw/i40iw_cm.h b/drivers/infiniband/hw/i40iw/i40iw_cm.h
new file mode 100644
index 000000000000..5f8ceb4a8e84
--- /dev/null
+++ b/drivers/infiniband/hw/i40iw/i40iw_cm.h
@@ -0,0 +1,456 @@
+/*******************************************************************************
+*
+* Copyright (c) 2015 Intel Corporation. All rights reserved.
+*
+* This software is available to you under a choice of one of two
+* licenses. You may choose to be licensed under the terms of the GNU
+* General Public License (GPL) Version 2, available from the file
+* COPYING in the main directory of this source tree, or the
+* OpenFabrics.org BSD license below:
+*
+* Redistribution and use in source and binary forms, with or
+* without modification, are permitted provided that the following
+* conditions are met:
+*
+* - Redistributions of source code must retain the above
+* copyright notice, this list of conditions and the following
+* disclaimer.
+*
+* - Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials
+* provided with the distribution.
+*
+* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+* SOFTWARE.
+*
+*******************************************************************************/
+
+#ifndef I40IW_CM_H
+#define I40IW_CM_H
+
+#define QUEUE_EVENTS
+
+#define I40IW_MANAGE_APBVT_DEL 0
+#define I40IW_MANAGE_APBVT_ADD 1
+
+#define I40IW_MPA_REQUEST_ACCEPT 1
+#define I40IW_MPA_REQUEST_REJECT 2
+
+/* IETF MPA -- defines, enums, structs */
+#define IEFT_MPA_KEY_REQ "MPA ID Req Frame"
+#define IEFT_MPA_KEY_REP "MPA ID Rep Frame"
+#define IETF_MPA_KEY_SIZE 16
+#define IETF_MPA_VERSION 1
+#define IETF_MAX_PRIV_DATA_LEN 512
+#define IETF_MPA_FRAME_SIZE 20
+#define IETF_RTR_MSG_SIZE 4
+#define IETF_MPA_V2_FLAG 0x10
+#define SNDMARKER_SEQNMASK 0x000001FF
+
+#define I40IW_MAX_IETF_SIZE 32
+
+#define MPA_ZERO_PAD_LEN 4
+
+/* IETF RTR MSG Fields */
+#define IETF_PEER_TO_PEER 0x8000
+#define IETF_FLPDU_ZERO_LEN 0x4000
+#define IETF_RDMA0_WRITE 0x8000
+#define IETF_RDMA0_READ 0x4000
+#define IETF_NO_IRD_ORD 0x3FFF
+
+/* HW-supported IRD sizes*/
+#define I40IW_HW_IRD_SETTING_2 2
+#define I40IW_HW_IRD_SETTING_4 4
+#define I40IW_HW_IRD_SETTING_8 8
+#define I40IW_HW_IRD_SETTING_16 16
+#define I40IW_HW_IRD_SETTING_32 32
+#define I40IW_HW_IRD_SETTING_64 64
+
+enum ietf_mpa_flags {
+ IETF_MPA_FLAGS_MARKERS = 0x80, /* receive Markers */
+ IETF_MPA_FLAGS_CRC = 0x40, /* receive Markers */
+ IETF_MPA_FLAGS_REJECT = 0x20, /* Reject */
+};
+
+struct ietf_mpa_v1 {
+ u8 key[IETF_MPA_KEY_SIZE];
+ u8 flags;
+ u8 rev;
+ __be16 priv_data_len;
+ u8 priv_data[0];
+};
+
+#define ietf_mpa_req_resp_frame ietf_mpa_frame
+
+struct ietf_rtr_msg {
+ __be16 ctrl_ird;
+ __be16 ctrl_ord;
+};
+
+struct ietf_mpa_v2 {
+ u8 key[IETF_MPA_KEY_SIZE];
+ u8 flags;
+ u8 rev;
+ __be16 priv_data_len;
+ struct ietf_rtr_msg rtr_msg;
+ u8 priv_data[0];
+};
+
+struct i40iw_cm_node;
+enum i40iw_timer_type {
+ I40IW_TIMER_TYPE_SEND,
+ I40IW_TIMER_TYPE_RECV,
+ I40IW_TIMER_NODE_CLEANUP,
+ I40IW_TIMER_TYPE_CLOSE,
+};
+
+#define I40IW_PASSIVE_STATE_INDICATED 0
+#define I40IW_DO_NOT_SEND_RESET_EVENT 1
+#define I40IW_SEND_RESET_EVENT 2
+
+#define MAX_I40IW_IFS 4
+
+#define SET_ACK 0x1
+#define SET_SYN 0x2
+#define SET_FIN 0x4
+#define SET_RST 0x8
+
+#define TCP_OPTIONS_PADDING 3
+
+struct option_base {
+ u8 optionnum;
+ u8 length;
+};
+
+enum option_numbers {
+ OPTION_NUMBER_END,
+ OPTION_NUMBER_NONE,
+ OPTION_NUMBER_MSS,
+ OPTION_NUMBER_WINDOW_SCALE,
+ OPTION_NUMBER_SACK_PERM,
+ OPTION_NUMBER_SACK,
+ OPTION_NUMBER_WRITE0 = 0xbc
+};
+
+struct option_mss {
+ u8 optionnum;
+ u8 length;
+ __be16 mss;
+};
+
+struct option_windowscale {
+ u8 optionnum;
+ u8 length;
+ u8 shiftcount;
+};
+
+union all_known_options {
+ char as_end;
+ struct option_base as_base;
+ struct option_mss as_mss;
+ struct option_windowscale as_windowscale;
+};
+
+struct i40iw_timer_entry {
+ struct list_head list;
+ unsigned long timetosend; /* jiffies */
+ struct i40iw_puda_buf *sqbuf;
+ u32 type;
+ u32 retrycount;
+ u32 retranscount;
+ u32 context;
+ u32 send_retrans;
+ int close_when_complete;
+};
+
+#define I40IW_DEFAULT_RETRYS 64
+#define I40IW_DEFAULT_RETRANS 8
+#define I40IW_DEFAULT_TTL 0x40
+#define I40IW_DEFAULT_RTT_VAR 0x6
+#define I40IW_DEFAULT_SS_THRESH 0x3FFFFFFF
+#define I40IW_DEFAULT_REXMIT_THRESH 8
+
+#define I40IW_RETRY_TIMEOUT HZ
+#define I40IW_SHORT_TIME 10
+#define I40IW_LONG_TIME (2 * HZ)
+#define I40IW_MAX_TIMEOUT ((unsigned long)(12 * HZ))
+
+#define I40IW_CM_HASHTABLE_SIZE 1024
+#define I40IW_CM_TCP_TIMER_INTERVAL 3000
+#define I40IW_CM_DEFAULT_MTU 1540
+#define I40IW_CM_DEFAULT_FRAME_CNT 10
+#define I40IW_CM_THREAD_STACK_SIZE 256
+#define I40IW_CM_DEFAULT_RCV_WND 64240
+#define I40IW_CM_DEFAULT_RCV_WND_SCALED 0x3fffc
+#define I40IW_CM_DEFAULT_RCV_WND_SCALE 2
+#define I40IW_CM_DEFAULT_FREE_PKTS 0x000A
+#define I40IW_CM_FREE_PKT_LO_WATERMARK 2
+
+#define I40IW_CM_DEFAULT_MSS 536
+
+#define I40IW_CM_DEF_SEQ 0x159bf75f
+#define I40IW_CM_DEF_LOCAL_ID 0x3b47
+
+#define I40IW_CM_DEF_SEQ2 0x18ed5740
+#define I40IW_CM_DEF_LOCAL_ID2 0xb807
+#define MAX_CM_BUFFER (I40IW_MAX_IETF_SIZE + IETF_MAX_PRIV_DATA_LEN)
+
+typedef u32 i40iw_addr_t;
+
+#define i40iw_cm_tsa_context i40iw_qp_context
+
+struct i40iw_qp;
+
+/* cm node transition states */
+enum i40iw_cm_node_state {
+ I40IW_CM_STATE_UNKNOWN,
+ I40IW_CM_STATE_INITED,
+ I40IW_CM_STATE_LISTENING,
+ I40IW_CM_STATE_SYN_RCVD,
+ I40IW_CM_STATE_SYN_SENT,
+ I40IW_CM_STATE_ONE_SIDE_ESTABLISHED,
+ I40IW_CM_STATE_ESTABLISHED,
+ I40IW_CM_STATE_ACCEPTING,
+ I40IW_CM_STATE_MPAREQ_SENT,
+ I40IW_CM_STATE_MPAREQ_RCVD,
+ I40IW_CM_STATE_MPAREJ_RCVD,
+ I40IW_CM_STATE_OFFLOADED,
+ I40IW_CM_STATE_FIN_WAIT1,
+ I40IW_CM_STATE_FIN_WAIT2,
+ I40IW_CM_STATE_CLOSE_WAIT,
+ I40IW_CM_STATE_TIME_WAIT,
+ I40IW_CM_STATE_LAST_ACK,
+ I40IW_CM_STATE_CLOSING,
+ I40IW_CM_STATE_LISTENER_DESTROYED,
+ I40IW_CM_STATE_CLOSED
+};
+
+enum mpa_frame_version {
+ IETF_MPA_V1 = 1,
+ IETF_MPA_V2 = 2
+};
+
+enum mpa_frame_key {
+ MPA_KEY_REQUEST,
+ MPA_KEY_REPLY
+};
+
+enum send_rdma0 {
+ SEND_RDMA_READ_ZERO = 1,
+ SEND_RDMA_WRITE_ZERO = 2
+};
+
+enum i40iw_tcpip_pkt_type {
+ I40IW_PKT_TYPE_UNKNOWN,
+ I40IW_PKT_TYPE_SYN,
+ I40IW_PKT_TYPE_SYNACK,
+ I40IW_PKT_TYPE_ACK,
+ I40IW_PKT_TYPE_FIN,
+ I40IW_PKT_TYPE_RST
+};
+
+/* CM context params */
+struct i40iw_cm_tcp_context {
+ u8 client;
+
+ u32 loc_seq_num;
+ u32 loc_ack_num;
+ u32 rem_ack_num;
+ u32 rcv_nxt;
+
+ u32 loc_id;
+ u32 rem_id;
+
+ u32 snd_wnd;
+ u32 max_snd_wnd;
+
+ u32 rcv_wnd;
+ u32 mss;
+ u8 snd_wscale;
+ u8 rcv_wscale;
+
+ struct timeval sent_ts;
+};
+
+enum i40iw_cm_listener_state {
+ I40IW_CM_LISTENER_PASSIVE_STATE = 1,
+ I40IW_CM_LISTENER_ACTIVE_STATE = 2,
+ I40IW_CM_LISTENER_EITHER_STATE = 3
+};
+
+struct i40iw_cm_listener {
+ struct list_head list;
+ struct i40iw_cm_core *cm_core;
+ u8 loc_mac[ETH_ALEN];
+ u32 loc_addr[4];
+ u16 loc_port;
+ u32 map_loc_addr[4];
+ u16 map_loc_port;
+ struct iw_cm_id *cm_id;
+ atomic_t ref_count;
+ struct i40iw_device *iwdev;
+ atomic_t pend_accepts_cnt;
+ int backlog;
+ enum i40iw_cm_listener_state listener_state;
+ u32 reused_node;
+ u8 user_pri;
+ u16 vlan_id;
+ bool qhash_set;
+ bool ipv4;
+ struct list_head child_listen_list;
+
+};
+
+struct i40iw_kmem_info {
+ void *addr;
+ u32 size;
+};
+
+/* per connection node and node state information */
+struct i40iw_cm_node {
+ u32 loc_addr[4], rem_addr[4];
+ u16 loc_port, rem_port;
+ u32 map_loc_addr[4], map_rem_addr[4];
+ u16 map_loc_port, map_rem_port;
+ u16 vlan_id;
+ enum i40iw_cm_node_state state;
+ u8 loc_mac[ETH_ALEN];
+ u8 rem_mac[ETH_ALEN];
+ atomic_t ref_count;
+ struct i40iw_qp *iwqp;
+ struct i40iw_device *iwdev;
+ struct i40iw_sc_dev *dev;
+ struct i40iw_cm_tcp_context tcp_cntxt;
+ struct i40iw_cm_core *cm_core;
+ struct i40iw_cm_node *loopbackpartner;
+ struct i40iw_timer_entry *send_entry;
+ struct i40iw_timer_entry *close_entry;
+ spinlock_t retrans_list_lock; /* cm transmit packet */
+ enum send_rdma0 send_rdma0_op;
+ u16 ird_size;
+ u16 ord_size;
+ u16 mpav2_ird_ord;
+ struct iw_cm_id *cm_id;
+ struct list_head list;
+ int accelerated;
+ struct i40iw_cm_listener *listener;
+ int apbvt_set;
+ int accept_pend;
+ struct list_head timer_entry;
+ struct list_head reset_entry;
+ atomic_t passive_state;
+ bool qhash_set;
+ u8 user_pri;
+ bool ipv4;
+ bool snd_mark_en;
+ u16 lsmm_size;
+ enum mpa_frame_version mpa_frame_rev;
+ struct i40iw_kmem_info pdata;
+ union {
+ struct ietf_mpa_v1 mpa_frame;
+ struct ietf_mpa_v2 mpa_v2_frame;
+ };
+
+ u8 pdata_buf[IETF_MAX_PRIV_DATA_LEN];
+ struct i40iw_kmem_info mpa_hdr;
+};
+
+/* structure for client or CM to fill when making CM api calls. */
+/* - only need to set relevant data, based on op. */
+struct i40iw_cm_info {
+ struct iw_cm_id *cm_id;
+ u16 loc_port;
+ u16 rem_port;
+ u32 loc_addr[4];
+ u32 rem_addr[4];
+ u16 map_loc_port;
+ u16 map_rem_port;
+ u32 map_loc_addr[4];
+ u32 map_rem_addr[4];
+ u16 vlan_id;
+ int backlog;
+ u16 user_pri;
+ bool ipv4;
+};
+
+/* CM event codes */
+enum i40iw_cm_event_type {
+ I40IW_CM_EVENT_UNKNOWN,
+ I40IW_CM_EVENT_ESTABLISHED,
+ I40IW_CM_EVENT_MPA_REQ,
+ I40IW_CM_EVENT_MPA_CONNECT,
+ I40IW_CM_EVENT_MPA_ACCEPT,
+ I40IW_CM_EVENT_MPA_REJECT,
+ I40IW_CM_EVENT_MPA_ESTABLISHED,
+ I40IW_CM_EVENT_CONNECTED,
+ I40IW_CM_EVENT_RESET,
+ I40IW_CM_EVENT_ABORTED
+};
+
+/* event to post to CM event handler */
+struct i40iw_cm_event {
+ enum i40iw_cm_event_type type;
+ struct i40iw_cm_info cm_info;
+ struct work_struct event_work;
+ struct i40iw_cm_node *cm_node;
+};
+
+struct i40iw_cm_core {
+ struct i40iw_device *iwdev;
+ struct i40iw_sc_dev *dev;
+
+ struct list_head listen_nodes;
+ struct list_head connected_nodes;
+
+ struct timer_list tcp_timer;
+
+ struct workqueue_struct *event_wq;
+ struct workqueue_struct *disconn_wq;
+
+ spinlock_t ht_lock; /* manage hash table */
+ spinlock_t listen_list_lock; /* listen list */
+
+ u64 stats_nodes_created;
+ u64 stats_nodes_destroyed;
+ u64 stats_listen_created;
+ u64 stats_listen_destroyed;
+ u64 stats_listen_nodes_created;
+ u64 stats_listen_nodes_destroyed;
+ u64 stats_loopbacks;
+ u64 stats_accepts;
+ u64 stats_rejects;
+ u64 stats_connect_errs;
+ u64 stats_passive_errs;
+ u64 stats_pkt_retrans;
+ u64 stats_backlog_drops;
+};
+
+int i40iw_schedule_cm_timer(struct i40iw_cm_node *cm_node,
+ struct i40iw_puda_buf *sqbuf,
+ enum i40iw_timer_type type,
+ int send_retrans,
+ int close_when_complete);
+
+int i40iw_accept(struct iw_cm_id *, struct iw_cm_conn_param *);
+int i40iw_reject(struct iw_cm_id *, const void *, u8);
+int i40iw_connect(struct iw_cm_id *, struct iw_cm_conn_param *);
+int i40iw_create_listen(struct iw_cm_id *, int);
+int i40iw_destroy_listen(struct iw_cm_id *);
+
+int i40iw_cm_start(struct i40iw_device *);
+int i40iw_cm_stop(struct i40iw_device *);
+
+int i40iw_arp_table(struct i40iw_device *iwdev,
+ u32 *ip_addr,
+ bool ipv4,
+ u8 *mac_addr,
+ u32 action);
+
+#endif /* I40IW_CM_H */
diff --git a/drivers/infiniband/hw/i40iw/i40iw_ctrl.c b/drivers/infiniband/hw/i40iw/i40iw_ctrl.c
new file mode 100644
index 000000000000..f05802bf6ca0
--- /dev/null
+++ b/drivers/infiniband/hw/i40iw/i40iw_ctrl.c
@@ -0,0 +1,4743 @@
+/*******************************************************************************
+*
+* Copyright (c) 2015-2016 Intel Corporation. All rights reserved.
+*
+* This software is available to you under a choice of one of two
+* licenses. You may choose to be licensed under the terms of the GNU
+* General Public License (GPL) Version 2, available from the file
+* COPYING in the main directory of this source tree, or the
+* OpenFabrics.org BSD license below:
+*
+* Redistribution and use in source and binary forms, with or
+* without modification, are permitted provided that the following
+* conditions are met:
+*
+* - Redistributions of source code must retain the above
+* copyright notice, this list of conditions and the following
+* disclaimer.
+*
+* - Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials
+* provided with the distribution.
+*
+* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+* SOFTWARE.
+*
+*******************************************************************************/
+
+#include "i40iw_osdep.h"
+#include "i40iw_register.h"
+#include "i40iw_status.h"
+#include "i40iw_hmc.h"
+
+#include "i40iw_d.h"
+#include "i40iw_type.h"
+#include "i40iw_p.h"
+#include "i40iw_vf.h"
+#include "i40iw_virtchnl.h"
+
+/**
+ * i40iw_insert_wqe_hdr - write wqe header
+ * @wqe: cqp wqe for header
+ * @header: header for the cqp wqe
+ */
+static inline void i40iw_insert_wqe_hdr(u64 *wqe, u64 header)
+{
+ wmb(); /* make sure WQE is populated before polarity is set */
+ set_64bit_val(wqe, 24, header);
+}
+
+/**
+ * i40iw_get_cqp_reg_info - get head and tail for cqp using registers
+ * @cqp: struct for cqp hw
+ * @val: cqp tail register value
+ * @tail:wqtail register value
+ * @error: cqp processing err
+ */
+static inline void i40iw_get_cqp_reg_info(struct i40iw_sc_cqp *cqp,
+ u32 *val,
+ u32 *tail,
+ u32 *error)
+{
+ if (cqp->dev->is_pf) {
+ *val = i40iw_rd32(cqp->dev->hw, I40E_PFPE_CQPTAIL);
+ *tail = RS_32(*val, I40E_PFPE_CQPTAIL_WQTAIL);
+ *error = RS_32(*val, I40E_PFPE_CQPTAIL_CQP_OP_ERR);
+ } else {
+ *val = i40iw_rd32(cqp->dev->hw, I40E_VFPE_CQPTAIL1);
+ *tail = RS_32(*val, I40E_VFPE_CQPTAIL_WQTAIL);
+ *error = RS_32(*val, I40E_VFPE_CQPTAIL_CQP_OP_ERR);
+ }
+}
+
+/**
+ * i40iw_cqp_poll_registers - poll cqp registers
+ * @cqp: struct for cqp hw
+ * @tail:wqtail register value
+ * @count: how many times to try for completion
+ */
+static enum i40iw_status_code i40iw_cqp_poll_registers(
+ struct i40iw_sc_cqp *cqp,
+ u32 tail,
+ u32 count)
+{
+ u32 i = 0;
+ u32 newtail, error, val;
+
+ while (i < count) {
+ i++;
+ i40iw_get_cqp_reg_info(cqp, &val, &newtail, &error);
+ if (error) {
+ error = (cqp->dev->is_pf) ?
+ i40iw_rd32(cqp->dev->hw, I40E_PFPE_CQPERRCODES) :
+ i40iw_rd32(cqp->dev->hw, I40E_VFPE_CQPERRCODES1);
+ return I40IW_ERR_CQP_COMPL_ERROR;
+ }
+ if (newtail != tail) {
+ /* SUCCESS */
+ I40IW_RING_MOVE_TAIL(cqp->sq_ring);
+ return 0;
+ }
+ udelay(I40IW_SLEEP_COUNT);
+ }
+ return I40IW_ERR_TIMEOUT;
+}
+
+/**
+ * i40iw_sc_parse_fpm_commit_buf - parse fpm commit buffer
+ * @buf: ptr to fpm commit buffer
+ * @info: ptr to i40iw_hmc_obj_info struct
+ *
+ * parses fpm commit info and copy base value
+ * of hmc objects in hmc_info
+ */
+static enum i40iw_status_code i40iw_sc_parse_fpm_commit_buf(
+ u64 *buf,
+ struct i40iw_hmc_obj_info *info)
+{
+ u64 temp;
+ u32 i, j;
+ u32 low;
+
+ /* copy base values in obj_info */
+ for (i = I40IW_HMC_IW_QP, j = 0;
+ i <= I40IW_HMC_IW_PBLE; i++, j += 8) {
+ get_64bit_val(buf, j, &temp);
+ info[i].base = RS_64_1(temp, 32) * 512;
+ low = (u32)(temp);
+ if (low)
+ info[i].cnt = low;
+ }
+ return 0;
+}
+
+/**
+ * i40iw_sc_parse_fpm_query_buf() - parses fpm query buffer
+ * @buf: ptr to fpm query buffer
+ * @info: ptr to i40iw_hmc_obj_info struct
+ * @hmc_fpm_misc: ptr to fpm data
+ *
+ * parses fpm query buffer and copy max_cnt and
+ * size value of hmc objects in hmc_info
+ */
+static enum i40iw_status_code i40iw_sc_parse_fpm_query_buf(
+ u64 *buf,
+ struct i40iw_hmc_info *hmc_info,
+ struct i40iw_hmc_fpm_misc *hmc_fpm_misc)
+{
+ u64 temp;
+ struct i40iw_hmc_obj_info *obj_info;
+ u32 i, j, size;
+ u16 max_pe_sds;
+
+ obj_info = hmc_info->hmc_obj;
+
+ get_64bit_val(buf, 0, &temp);
+ hmc_info->first_sd_index = (u16)RS_64(temp, I40IW_QUERY_FPM_FIRST_PE_SD_INDEX);
+ max_pe_sds = (u16)RS_64(temp, I40IW_QUERY_FPM_MAX_PE_SDS);
+
+ /* Reduce SD count for VFs by 1 to account for PBLE backing page rounding */
+ if (hmc_info->hmc_fn_id >= I40IW_FIRST_VF_FPM_ID)
+ max_pe_sds--;
+ hmc_fpm_misc->max_sds = max_pe_sds;
+ hmc_info->sd_table.sd_cnt = max_pe_sds + hmc_info->first_sd_index;
+
+ for (i = I40IW_HMC_IW_QP, j = 8;
+ i <= I40IW_HMC_IW_ARP; i++, j += 8) {
+ get_64bit_val(buf, j, &temp);
+ if (i == I40IW_HMC_IW_QP)
+ obj_info[i].max_cnt = (u32)RS_64(temp, I40IW_QUERY_FPM_MAX_QPS);
+ else if (i == I40IW_HMC_IW_CQ)
+ obj_info[i].max_cnt = (u32)RS_64(temp, I40IW_QUERY_FPM_MAX_CQS);
+ else
+ obj_info[i].max_cnt = (u32)temp;
+
+ size = (u32)RS_64_1(temp, 32);
+ obj_info[i].size = ((u64)1 << size);
+ }
+ for (i = I40IW_HMC_IW_MR, j = 48;
+ i <= I40IW_HMC_IW_PBLE; i++, j += 8) {
+ get_64bit_val(buf, j, &temp);
+ obj_info[i].max_cnt = (u32)temp;
+ size = (u32)RS_64_1(temp, 32);
+ obj_info[i].size = LS_64_1(1, size);
+ }
+
+ get_64bit_val(buf, 120, &temp);
+ hmc_fpm_misc->max_ceqs = (u8)RS_64(temp, I40IW_QUERY_FPM_MAX_CEQS);
+ get_64bit_val(buf, 120, &temp);
+ hmc_fpm_misc->ht_multiplier = RS_64(temp, I40IW_QUERY_FPM_HTMULTIPLIER);
+ get_64bit_val(buf, 120, &temp);
+ hmc_fpm_misc->timer_bucket = RS_64(temp, I40IW_QUERY_FPM_TIMERBUCKET);
+ get_64bit_val(buf, 64, &temp);
+ hmc_fpm_misc->xf_block_size = RS_64(temp, I40IW_QUERY_FPM_XFBLOCKSIZE);
+ if (!hmc_fpm_misc->xf_block_size)
+ return I40IW_ERR_INVALID_SIZE;
+ get_64bit_val(buf, 80, &temp);
+ hmc_fpm_misc->q1_block_size = RS_64(temp, I40IW_QUERY_FPM_Q1BLOCKSIZE);
+ if (!hmc_fpm_misc->q1_block_size)
+ return I40IW_ERR_INVALID_SIZE;
+ return 0;
+}
+
+/**
+ * i40iw_sc_pd_init - initialize sc pd struct
+ * @dev: sc device struct
+ * @pd: sc pd ptr
+ * @pd_id: pd_id for allocated pd
+ */
+static void i40iw_sc_pd_init(struct i40iw_sc_dev *dev,
+ struct i40iw_sc_pd *pd,
+ u16 pd_id)
+{
+ pd->size = sizeof(*pd);
+ pd->pd_id = pd_id;
+ pd->dev = dev;
+}
+
+/**
+ * i40iw_get_encoded_wqe_size - given wq size, returns hardware encoded size
+ * @wqsize: size of the wq (sq, rq, srq) to encoded_size
+ * @cqpsq: encoded size for sq for cqp as its encoded size is 1+ other wq's
+ */
+u8 i40iw_get_encoded_wqe_size(u32 wqsize, bool cqpsq)
+{
+ u8 encoded_size = 0;
+
+ /* cqp sq's hw coded value starts from 1 for size of 4
+ * while it starts from 0 for qp' wq's.
+ */
+ if (cqpsq)
+ encoded_size = 1;
+ wqsize >>= 2;
+ while (wqsize >>= 1)
+ encoded_size++;
+ return encoded_size;
+}
+
+/**
+ * i40iw_sc_cqp_init - Initialize buffers for a control Queue Pair
+ * @cqp: IWARP control queue pair pointer
+ * @info: IWARP control queue pair init info pointer
+ *
+ * Initializes the object and context buffers for a control Queue Pair.
+ */
+static enum i40iw_status_code i40iw_sc_cqp_init(struct i40iw_sc_cqp *cqp,
+ struct i40iw_cqp_init_info *info)
+{
+ u8 hw_sq_size;
+
+ if ((info->sq_size > I40IW_CQP_SW_SQSIZE_2048) ||
+ (info->sq_size < I40IW_CQP_SW_SQSIZE_4) ||
+ ((info->sq_size & (info->sq_size - 1))))
+ return I40IW_ERR_INVALID_SIZE;
+
+ hw_sq_size = i40iw_get_encoded_wqe_size(info->sq_size, true);
+ cqp->size = sizeof(*cqp);
+ cqp->sq_size = info->sq_size;
+ cqp->hw_sq_size = hw_sq_size;
+ cqp->sq_base = info->sq;
+ cqp->host_ctx = info->host_ctx;
+ cqp->sq_pa = info->sq_pa;
+ cqp->host_ctx_pa = info->host_ctx_pa;
+ cqp->dev = info->dev;
+ cqp->struct_ver = info->struct_ver;
+ cqp->scratch_array = info->scratch_array;
+ cqp->polarity = 0;
+ cqp->en_datacenter_tcp = info->en_datacenter_tcp;
+ cqp->enabled_vf_count = info->enabled_vf_count;
+ cqp->hmc_profile = info->hmc_profile;
+ info->dev->cqp = cqp;
+
+ I40IW_RING_INIT(cqp->sq_ring, cqp->sq_size);
+ i40iw_debug(cqp->dev, I40IW_DEBUG_WQE,
+ "%s: sq_size[%04d] hw_sq_size[%04d] sq_base[%p] sq_pa[%llxh] cqp[%p] polarity[x%04X]\n",
+ __func__, cqp->sq_size, cqp->hw_sq_size,
+ cqp->sq_base, cqp->sq_pa, cqp, cqp->polarity);
+ return 0;
+}
+
+/**
+ * i40iw_sc_cqp_create - create cqp during bringup
+ * @cqp: struct for cqp hw
+ * @disable_pfpdus: if pfpdu to be disabled
+ * @maj_err: If error, major err number
+ * @min_err: If error, minor err number
+ */
+static enum i40iw_status_code i40iw_sc_cqp_create(struct i40iw_sc_cqp *cqp,
+ bool disable_pfpdus,
+ u16 *maj_err,
+ u16 *min_err)
+{
+ u64 temp;
+ u32 cnt = 0, p1, p2, val = 0, err_code;
+ enum i40iw_status_code ret_code;
+
+ ret_code = i40iw_allocate_dma_mem(cqp->dev->hw,
+ &cqp->sdbuf,
+ 128,
+ I40IW_SD_BUF_ALIGNMENT);
+
+ if (ret_code)
+ goto exit;
+
+ temp = LS_64(cqp->hw_sq_size, I40IW_CQPHC_SQSIZE) |
+ LS_64(cqp->struct_ver, I40IW_CQPHC_SVER);
+
+ if (disable_pfpdus)
+ temp |= LS_64(1, I40IW_CQPHC_DISABLE_PFPDUS);
+
+ set_64bit_val(cqp->host_ctx, 0, temp);
+ set_64bit_val(cqp->host_ctx, 8, cqp->sq_pa);
+ temp = LS_64(cqp->enabled_vf_count, I40IW_CQPHC_ENABLED_VFS) |
+ LS_64(cqp->hmc_profile, I40IW_CQPHC_HMC_PROFILE);
+ set_64bit_val(cqp->host_ctx, 16, temp);
+ set_64bit_val(cqp->host_ctx, 24, (uintptr_t)cqp);
+ set_64bit_val(cqp->host_ctx, 32, 0);
+ set_64bit_val(cqp->host_ctx, 40, 0);
+ set_64bit_val(cqp->host_ctx, 48, 0);
+ set_64bit_val(cqp->host_ctx, 56, 0);
+
+ i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "CQP_HOST_CTX",
+ cqp->host_ctx, I40IW_CQP_CTX_SIZE * 8);
+
+ p1 = RS_32_1(cqp->host_ctx_pa, 32);
+ p2 = (u32)cqp->host_ctx_pa;
+
+ if (cqp->dev->is_pf) {
+ i40iw_wr32(cqp->dev->hw, I40E_PFPE_CCQPHIGH, p1);
+ i40iw_wr32(cqp->dev->hw, I40E_PFPE_CCQPLOW, p2);
+ } else {
+ i40iw_wr32(cqp->dev->hw, I40E_VFPE_CCQPHIGH1, p1);
+ i40iw_wr32(cqp->dev->hw, I40E_VFPE_CCQPLOW1, p2);
+ }
+ do {
+ if (cnt++ > I40IW_DONE_COUNT) {
+ i40iw_free_dma_mem(cqp->dev->hw, &cqp->sdbuf);
+ ret_code = I40IW_ERR_TIMEOUT;
+ /*
+ * read PFPE_CQPERRORCODES register to get the minor
+ * and major error code
+ */
+ if (cqp->dev->is_pf)
+ err_code = i40iw_rd32(cqp->dev->hw, I40E_PFPE_CQPERRCODES);
+ else
+ err_code = i40iw_rd32(cqp->dev->hw, I40E_VFPE_CQPERRCODES1);
+ *min_err = RS_32(err_code, I40E_PFPE_CQPERRCODES_CQP_MINOR_CODE);
+ *maj_err = RS_32(err_code, I40E_PFPE_CQPERRCODES_CQP_MAJOR_CODE);
+ goto exit;
+ }
+ udelay(I40IW_SLEEP_COUNT);
+ if (cqp->dev->is_pf)
+ val = i40iw_rd32(cqp->dev->hw, I40E_PFPE_CCQPSTATUS);
+ else
+ val = i40iw_rd32(cqp->dev->hw, I40E_VFPE_CCQPSTATUS1);
+ } while (!val);
+
+exit:
+ if (!ret_code)
+ cqp->process_cqp_sds = i40iw_update_sds_noccq;
+ return ret_code;
+}
+
+/**
+ * i40iw_sc_cqp_post_sq - post of cqp's sq
+ * @cqp: struct for cqp hw
+ */
+void i40iw_sc_cqp_post_sq(struct i40iw_sc_cqp *cqp)
+{
+ if (cqp->dev->is_pf)
+ i40iw_wr32(cqp->dev->hw, I40E_PFPE_CQPDB, I40IW_RING_GETCURRENT_HEAD(cqp->sq_ring));
+ else
+ i40iw_wr32(cqp->dev->hw, I40E_VFPE_CQPDB1, I40IW_RING_GETCURRENT_HEAD(cqp->sq_ring));
+
+ i40iw_debug(cqp->dev,
+ I40IW_DEBUG_WQE,
+ "%s: HEAD_TAIL[%04d,%04d,%04d]\n",
+ __func__,
+ cqp->sq_ring.head,
+ cqp->sq_ring.tail,
+ cqp->sq_ring.size);
+}
+
+/**
+ * i40iw_sc_cqp_get_next_send_wqe - get next wqe on cqp sq
+ * @cqp: struct for cqp hw
+ * @wqe_idx: we index of cqp ring
+ */
+u64 *i40iw_sc_cqp_get_next_send_wqe(struct i40iw_sc_cqp *cqp, u64 scratch)
+{
+ u64 *wqe = NULL;
+ u32 wqe_idx;
+ enum i40iw_status_code ret_code;
+
+ if (I40IW_RING_FULL_ERR(cqp->sq_ring)) {
+ i40iw_debug(cqp->dev,
+ I40IW_DEBUG_WQE,
+ "%s: ring is full head %x tail %x size %x\n",
+ __func__,
+ cqp->sq_ring.head,
+ cqp->sq_ring.tail,
+ cqp->sq_ring.size);
+ return NULL;
+ }
+ I40IW_ATOMIC_RING_MOVE_HEAD(cqp->sq_ring, wqe_idx, ret_code);
+ if (ret_code)
+ return NULL;
+ if (!wqe_idx)
+ cqp->polarity = !cqp->polarity;
+
+ wqe = cqp->sq_base[wqe_idx].elem;
+ cqp->scratch_array[wqe_idx] = scratch;
+ I40IW_CQP_INIT_WQE(wqe);
+
+ return wqe;
+}
+
+/**
+ * i40iw_sc_cqp_destroy - destroy cqp during close
+ * @cqp: struct for cqp hw
+ */
+static enum i40iw_status_code i40iw_sc_cqp_destroy(struct i40iw_sc_cqp *cqp)
+{
+ u32 cnt = 0, val = 1;
+ enum i40iw_status_code ret_code = 0;
+ u32 cqpstat_addr;
+
+ if (cqp->dev->is_pf) {
+ i40iw_wr32(cqp->dev->hw, I40E_PFPE_CCQPHIGH, 0);
+ i40iw_wr32(cqp->dev->hw, I40E_PFPE_CCQPLOW, 0);
+ cqpstat_addr = I40E_PFPE_CCQPSTATUS;
+ } else {
+ i40iw_wr32(cqp->dev->hw, I40E_VFPE_CCQPHIGH1, 0);
+ i40iw_wr32(cqp->dev->hw, I40E_VFPE_CCQPLOW1, 0);
+ cqpstat_addr = I40E_VFPE_CCQPSTATUS1;
+ }
+ do {
+ if (cnt++ > I40IW_DONE_COUNT) {
+ ret_code = I40IW_ERR_TIMEOUT;
+ break;
+ }
+ udelay(I40IW_SLEEP_COUNT);
+ val = i40iw_rd32(cqp->dev->hw, cqpstat_addr);
+ } while (val);
+
+ i40iw_free_dma_mem(cqp->dev->hw, &cqp->sdbuf);
+ return ret_code;
+}
+
+/**
+ * i40iw_sc_ccq_arm - enable intr for control cq
+ * @ccq: ccq sc struct
+ */
+static void i40iw_sc_ccq_arm(struct i40iw_sc_cq *ccq)
+{
+ u64 temp_val;
+ u16 sw_cq_sel;
+ u8 arm_next_se;
+ u8 arm_seq_num;
+
+ /* write to cq doorbell shadow area */
+ /* arm next se should always be zero */
+ get_64bit_val(ccq->cq_uk.shadow_area, 32, &temp_val);
+
+ sw_cq_sel = (u16)RS_64(temp_val, I40IW_CQ_DBSA_SW_CQ_SELECT);
+ arm_next_se = (u8)RS_64(temp_val, I40IW_CQ_DBSA_ARM_NEXT_SE);
+
+ arm_seq_num = (u8)RS_64(temp_val, I40IW_CQ_DBSA_ARM_SEQ_NUM);
+ arm_seq_num++;
+
+ temp_val = LS_64(arm_seq_num, I40IW_CQ_DBSA_ARM_SEQ_NUM) |
+ LS_64(sw_cq_sel, I40IW_CQ_DBSA_SW_CQ_SELECT) |
+ LS_64(arm_next_se, I40IW_CQ_DBSA_ARM_NEXT_SE) |
+ LS_64(1, I40IW_CQ_DBSA_ARM_NEXT);
+
+ set_64bit_val(ccq->cq_uk.shadow_area, 32, temp_val);
+
+ wmb(); /* make sure shadow area is updated before arming */
+
+ if (ccq->dev->is_pf)
+ i40iw_wr32(ccq->dev->hw, I40E_PFPE_CQARM, ccq->cq_uk.cq_id);
+ else
+ i40iw_wr32(ccq->dev->hw, I40E_VFPE_CQARM1, ccq->cq_uk.cq_id);
+}
+
+/**
+ * i40iw_sc_ccq_get_cqe_info - get ccq's cq entry
+ * @ccq: ccq sc struct
+ * @info: completion q entry to return
+ */
+static enum i40iw_status_code i40iw_sc_ccq_get_cqe_info(
+ struct i40iw_sc_cq *ccq,
+ struct i40iw_ccq_cqe_info *info)
+{
+ u64 qp_ctx, temp, temp1;
+ u64 *cqe;
+ struct i40iw_sc_cqp *cqp;
+ u32 wqe_idx;
+ u8 polarity;
+ enum i40iw_status_code ret_code = 0;
+
+ if (ccq->cq_uk.avoid_mem_cflct)
+ cqe = (u64 *)I40IW_GET_CURRENT_EXTENDED_CQ_ELEMENT(&ccq->cq_uk);
+ else
+ cqe = (u64 *)I40IW_GET_CURRENT_CQ_ELEMENT(&ccq->cq_uk);
+
+ get_64bit_val(cqe, 24, &temp);
+ polarity = (u8)RS_64(temp, I40IW_CQ_VALID);
+ if (polarity != ccq->cq_uk.polarity)
+ return I40IW_ERR_QUEUE_EMPTY;
+
+ get_64bit_val(cqe, 8, &qp_ctx);
+ cqp = (struct i40iw_sc_cqp *)(unsigned long)qp_ctx;
+ info->error = (bool)RS_64(temp, I40IW_CQ_ERROR);
+ info->min_err_code = (u16)RS_64(temp, I40IW_CQ_MINERR);
+ if (info->error) {
+ info->maj_err_code = (u16)RS_64(temp, I40IW_CQ_MAJERR);
+ info->min_err_code = (u16)RS_64(temp, I40IW_CQ_MINERR);
+ }
+ wqe_idx = (u32)RS_64(temp, I40IW_CQ_WQEIDX);
+ info->scratch = cqp->scratch_array[wqe_idx];
+
+ get_64bit_val(cqe, 16, &temp1);
+ info->op_ret_val = (u32)RS_64(temp1, I40IW_CCQ_OPRETVAL);
+ get_64bit_val(cqp->sq_base[wqe_idx].elem, 24, &temp1);
+ info->op_code = (u8)RS_64(temp1, I40IW_CQPSQ_OPCODE);
+ info->cqp = cqp;
+
+ /* move the head for cq */
+ I40IW_RING_MOVE_HEAD(ccq->cq_uk.cq_ring, ret_code);
+ if (I40IW_RING_GETCURRENT_HEAD(ccq->cq_uk.cq_ring) == 0)
+ ccq->cq_uk.polarity ^= 1;
+
+ /* update cq tail in cq shadow memory also */
+ I40IW_RING_MOVE_TAIL(ccq->cq_uk.cq_ring);
+ set_64bit_val(ccq->cq_uk.shadow_area,
+ 0,
+ I40IW_RING_GETCURRENT_HEAD(ccq->cq_uk.cq_ring));
+ wmb(); /* write shadow area before tail */
+ I40IW_RING_MOVE_TAIL(cqp->sq_ring);
+ return ret_code;
+}
+
+/**
+ * i40iw_sc_poll_for_cqp_op_done - Waits for last write to complete in CQP SQ
+ * @cqp: struct for cqp hw
+ * @op_code: cqp opcode for completion
+ * @info: completion q entry to return
+ */
+static enum i40iw_status_code i40iw_sc_poll_for_cqp_op_done(
+ struct i40iw_sc_cqp *cqp,
+ u8 op_code,
+ struct i40iw_ccq_cqe_info *compl_info)
+{
+ struct i40iw_ccq_cqe_info info;
+ struct i40iw_sc_cq *ccq;
+ enum i40iw_status_code ret_code = 0;
+ u32 cnt = 0;
+
+ memset(&info, 0, sizeof(info));
+ ccq = cqp->dev->ccq;
+ while (1) {
+ if (cnt++ > I40IW_DONE_COUNT)
+ return I40IW_ERR_TIMEOUT;
+
+ if (i40iw_sc_ccq_get_cqe_info(ccq, &info)) {
+ udelay(I40IW_SLEEP_COUNT);
+ continue;
+ }
+
+ if (info.error) {
+ ret_code = I40IW_ERR_CQP_COMPL_ERROR;
+ break;
+ }
+ /* check if opcode is cq create */
+ if (op_code != info.op_code) {
+ i40iw_debug(cqp->dev, I40IW_DEBUG_WQE,
+ "%s: opcode mismatch for my op code 0x%x, returned opcode %x\n",
+ __func__, op_code, info.op_code);
+ }
+ /* success, exit out of the loop */
+ if (op_code == info.op_code)
+ break;
+ }
+
+ if (compl_info)
+ memcpy(compl_info, &info, sizeof(*compl_info));
+
+ return ret_code;
+}
+
+/**
+ * i40iw_sc_manage_push_page - Handle push page
+ * @cqp: struct for cqp hw
+ * @info: push page info
+ * @scratch: u64 saved to be used during cqp completion
+ * @post_sq: flag for cqp db to ring
+ */
+static enum i40iw_status_code i40iw_sc_manage_push_page(
+ struct i40iw_sc_cqp *cqp,
+ struct i40iw_cqp_manage_push_page_info *info,
+ u64 scratch,
+ bool post_sq)
+{
+ u64 *wqe;
+ u64 header;
+
+ if (info->push_idx >= I40IW_MAX_PUSH_PAGE_COUNT)
+ return I40IW_ERR_INVALID_PUSH_PAGE_INDEX;
+
+ wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return I40IW_ERR_RING_FULL;
+
+ set_64bit_val(wqe, 16, info->qs_handle);
+
+ header = LS_64(info->push_idx, I40IW_CQPSQ_MPP_PPIDX) |
+ LS_64(I40IW_CQP_OP_MANAGE_PUSH_PAGES, I40IW_CQPSQ_OPCODE) |
+ LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID) |
+ LS_64(info->free_page, I40IW_CQPSQ_MPP_FREE_PAGE);
+
+ i40iw_insert_wqe_hdr(wqe, header);
+
+ i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "MANAGE_PUSH_PAGES WQE",
+ wqe, I40IW_CQP_WQE_SIZE * 8);
+
+ if (post_sq)
+ i40iw_sc_cqp_post_sq(cqp);
+ return 0;
+}
+
+/**
+ * i40iw_sc_manage_hmc_pm_func_table - manage of function table
+ * @cqp: struct for cqp hw
+ * @scratch: u64 saved to be used during cqp completion
+ * @vf_index: vf index for cqp
+ * @free_pm_fcn: function number
+ * @post_sq: flag for cqp db to ring
+ */
+static enum i40iw_status_code i40iw_sc_manage_hmc_pm_func_table(
+ struct i40iw_sc_cqp *cqp,
+ u64 scratch,
+ u8 vf_index,
+ bool free_pm_fcn,
+ bool post_sq)
+{
+ u64 *wqe;
+ u64 header;
+
+ if (vf_index >= I40IW_MAX_VF_PER_PF)
+ return I40IW_ERR_INVALID_VF_ID;
+ wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return I40IW_ERR_RING_FULL;
+
+ header = LS_64(vf_index, I40IW_CQPSQ_MHMC_VFIDX) |
+ LS_64(I40IW_CQP_OP_MANAGE_HMC_PM_FUNC_TABLE, I40IW_CQPSQ_OPCODE) |
+ LS_64(free_pm_fcn, I40IW_CQPSQ_MHMC_FREEPMFN) |
+ LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
+
+ i40iw_insert_wqe_hdr(wqe, header);
+ i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "MANAGE_HMC_PM_FUNC_TABLE WQE",
+ wqe, I40IW_CQP_WQE_SIZE * 8);
+ if (post_sq)
+ i40iw_sc_cqp_post_sq(cqp);
+ return 0;
+}
+
+/**
+ * i40iw_sc_set_hmc_resource_profile - cqp wqe for hmc profile
+ * @cqp: struct for cqp hw
+ * @scratch: u64 saved to be used during cqp completion
+ * @hmc_profile_type: type of profile to set
+ * @vf_num: vf number for profile
+ * @post_sq: flag for cqp db to ring
+ * @poll_registers: flag to poll register for cqp completion
+ */
+static enum i40iw_status_code i40iw_sc_set_hmc_resource_profile(
+ struct i40iw_sc_cqp *cqp,
+ u64 scratch,
+ u8 hmc_profile_type,
+ u8 vf_num, bool post_sq,
+ bool poll_registers)
+{
+ u64 *wqe;
+ u64 header;
+ u32 val, tail, error;
+ enum i40iw_status_code ret_code = 0;
+
+ wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return I40IW_ERR_RING_FULL;
+
+ set_64bit_val(wqe, 16,
+ (LS_64(hmc_profile_type, I40IW_CQPSQ_SHMCRP_HMC_PROFILE) |
+ LS_64(vf_num, I40IW_CQPSQ_SHMCRP_VFNUM)));
+
+ header = LS_64(I40IW_CQP_OP_SET_HMC_RESOURCE_PROFILE, I40IW_CQPSQ_OPCODE) |
+ LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
+
+ i40iw_insert_wqe_hdr(wqe, header);
+
+ i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "MANAGE_HMC_PM_FUNC_TABLE WQE",
+ wqe, I40IW_CQP_WQE_SIZE * 8);
+
+ i40iw_get_cqp_reg_info(cqp, &val, &tail, &error);
+ if (error)
+ return I40IW_ERR_CQP_COMPL_ERROR;
+
+ if (post_sq) {
+ i40iw_sc_cqp_post_sq(cqp);
+ if (poll_registers)
+ ret_code = i40iw_cqp_poll_registers(cqp, tail, 1000000);
+ else
+ ret_code = i40iw_sc_poll_for_cqp_op_done(cqp,
+ I40IW_CQP_OP_SHMC_PAGES_ALLOCATED,
+ NULL);
+ }
+
+ return ret_code;
+}
+
+/**
+ * i40iw_sc_manage_hmc_pm_func_table_done - wait for cqp wqe completion for function table
+ * @cqp: struct for cqp hw
+ */
+static enum i40iw_status_code i40iw_sc_manage_hmc_pm_func_table_done(struct i40iw_sc_cqp *cqp)
+{
+ return i40iw_sc_poll_for_cqp_op_done(cqp, I40IW_CQP_OP_MANAGE_HMC_PM_FUNC_TABLE, NULL);
+}
+
+/**
+ * i40iw_sc_commit_fpm_values_done - wait for cqp eqe completion for fpm commit
+ * @cqp: struct for cqp hw
+ */
+static enum i40iw_status_code i40iw_sc_commit_fpm_values_done(struct i40iw_sc_cqp *cqp)
+{
+ return i40iw_sc_poll_for_cqp_op_done(cqp, I40IW_CQP_OP_COMMIT_FPM_VALUES, NULL);
+}
+
+/**
+ * i40iw_sc_commit_fpm_values - cqp wqe for commit fpm values
+ * @cqp: struct for cqp hw
+ * @scratch: u64 saved to be used during cqp completion
+ * @hmc_fn_id: hmc function id
+ * @commit_fpm_mem; Memory for fpm values
+ * @post_sq: flag for cqp db to ring
+ * @wait_type: poll ccq or cqp registers for cqp completion
+ */
+static enum i40iw_status_code i40iw_sc_commit_fpm_values(
+ struct i40iw_sc_cqp *cqp,
+ u64 scratch,
+ u8 hmc_fn_id,
+ struct i40iw_dma_mem *commit_fpm_mem,
+ bool post_sq,
+ u8 wait_type)
+{
+ u64 *wqe;
+ u64 header;
+ u32 tail, val, error;
+ enum i40iw_status_code ret_code = 0;
+
+ wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return I40IW_ERR_RING_FULL;
+
+ set_64bit_val(wqe, 16, hmc_fn_id);
+ set_64bit_val(wqe, 32, commit_fpm_mem->pa);
+
+ header = LS_64(I40IW_CQP_OP_COMMIT_FPM_VALUES, I40IW_CQPSQ_OPCODE) |
+ LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
+
+ i40iw_insert_wqe_hdr(wqe, header);
+
+ i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "COMMIT_FPM_VALUES WQE",
+ wqe, I40IW_CQP_WQE_SIZE * 8);
+
+ i40iw_get_cqp_reg_info(cqp, &val, &tail, &error);
+ if (error)
+ return I40IW_ERR_CQP_COMPL_ERROR;
+
+ if (post_sq) {
+ i40iw_sc_cqp_post_sq(cqp);
+
+ if (wait_type == I40IW_CQP_WAIT_POLL_REGS)
+ ret_code = i40iw_cqp_poll_registers(cqp, tail, I40IW_DONE_COUNT);
+ else if (wait_type == I40IW_CQP_WAIT_POLL_CQ)
+ ret_code = i40iw_sc_commit_fpm_values_done(cqp);
+ }
+
+ return ret_code;
+}
+
+/**
+ * i40iw_sc_query_fpm_values_done - poll for cqp wqe completion for query fpm
+ * @cqp: struct for cqp hw
+ */
+static enum i40iw_status_code i40iw_sc_query_fpm_values_done(struct i40iw_sc_cqp *cqp)
+{
+ return i40iw_sc_poll_for_cqp_op_done(cqp, I40IW_CQP_OP_QUERY_FPM_VALUES, NULL);
+}
+
+/**
+ * i40iw_sc_query_fpm_values - cqp wqe query fpm values
+ * @cqp: struct for cqp hw
+ * @scratch: u64 saved to be used during cqp completion
+ * @hmc_fn_id: hmc function id
+ * @query_fpm_mem: memory for return fpm values
+ * @post_sq: flag for cqp db to ring
+ * @wait_type: poll ccq or cqp registers for cqp completion
+ */
+static enum i40iw_status_code i40iw_sc_query_fpm_values(
+ struct i40iw_sc_cqp *cqp,
+ u64 scratch,
+ u8 hmc_fn_id,
+ struct i40iw_dma_mem *query_fpm_mem,
+ bool post_sq,
+ u8 wait_type)
+{
+ u64 *wqe;
+ u64 header;
+ u32 tail, val, error;
+ enum i40iw_status_code ret_code = 0;
+
+ wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return I40IW_ERR_RING_FULL;
+
+ set_64bit_val(wqe, 16, hmc_fn_id);
+ set_64bit_val(wqe, 32, query_fpm_mem->pa);
+
+ header = LS_64(I40IW_CQP_OP_QUERY_FPM_VALUES, I40IW_CQPSQ_OPCODE) |
+ LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
+
+ i40iw_insert_wqe_hdr(wqe, header);
+
+ i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "QUERY_FPM WQE",
+ wqe, I40IW_CQP_WQE_SIZE * 8);
+
+ /* read the tail from CQP_TAIL register */
+ i40iw_get_cqp_reg_info(cqp, &val, &tail, &error);
+
+ if (error)
+ return I40IW_ERR_CQP_COMPL_ERROR;
+
+ if (post_sq) {
+ i40iw_sc_cqp_post_sq(cqp);
+ if (wait_type == I40IW_CQP_WAIT_POLL_REGS)
+ ret_code = i40iw_cqp_poll_registers(cqp, tail, I40IW_DONE_COUNT);
+ else if (wait_type == I40IW_CQP_WAIT_POLL_CQ)
+ ret_code = i40iw_sc_query_fpm_values_done(cqp);
+ }
+
+ return ret_code;
+}
+
+/**
+ * i40iw_sc_add_arp_cache_entry - cqp wqe add arp cache entry
+ * @cqp: struct for cqp hw
+ * @info: arp entry information
+ * @scratch: u64 saved to be used during cqp completion
+ * @post_sq: flag for cqp db to ring
+ */
+static enum i40iw_status_code i40iw_sc_add_arp_cache_entry(
+ struct i40iw_sc_cqp *cqp,
+ struct i40iw_add_arp_cache_entry_info *info,
+ u64 scratch,
+ bool post_sq)
+{
+ u64 *wqe;
+ u64 temp, header;
+
+ wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return I40IW_ERR_RING_FULL;
+ set_64bit_val(wqe, 8, info->reach_max);
+
+ temp = info->mac_addr[5] |
+ LS_64_1(info->mac_addr[4], 8) |
+ LS_64_1(info->mac_addr[3], 16) |
+ LS_64_1(info->mac_addr[2], 24) |
+ LS_64_1(info->mac_addr[1], 32) |
+ LS_64_1(info->mac_addr[0], 40);
+
+ set_64bit_val(wqe, 16, temp);
+
+ header = info->arp_index |
+ LS_64(I40IW_CQP_OP_MANAGE_ARP, I40IW_CQPSQ_OPCODE) |
+ LS_64((info->permanent ? 1 : 0), I40IW_CQPSQ_MAT_PERMANENT) |
+ LS_64(1, I40IW_CQPSQ_MAT_ENTRYVALID) |
+ LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
+
+ i40iw_insert_wqe_hdr(wqe, header);
+
+ i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "ARP_CACHE_ENTRY WQE",
+ wqe, I40IW_CQP_WQE_SIZE * 8);
+
+ if (post_sq)
+ i40iw_sc_cqp_post_sq(cqp);
+ return 0;
+}
+
+/**
+ * i40iw_sc_del_arp_cache_entry - dele arp cache entry
+ * @cqp: struct for cqp hw
+ * @scratch: u64 saved to be used during cqp completion
+ * @arp_index: arp index to delete arp entry
+ * @post_sq: flag for cqp db to ring
+ */
+static enum i40iw_status_code i40iw_sc_del_arp_cache_entry(
+ struct i40iw_sc_cqp *cqp,
+ u64 scratch,
+ u16 arp_index,
+ bool post_sq)
+{
+ u64 *wqe;
+ u64 header;
+
+ wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return I40IW_ERR_RING_FULL;
+
+ header = arp_index |
+ LS_64(I40IW_CQP_OP_MANAGE_ARP, I40IW_CQPSQ_OPCODE) |
+ LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
+ i40iw_insert_wqe_hdr(wqe, header);
+
+ i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "ARP_CACHE_DEL_ENTRY WQE",
+ wqe, I40IW_CQP_WQE_SIZE * 8);
+
+ if (post_sq)
+ i40iw_sc_cqp_post_sq(cqp);
+ return 0;
+}
+
+/**
+ * i40iw_sc_query_arp_cache_entry - cqp wqe to query arp and arp index
+ * @cqp: struct for cqp hw
+ * @scratch: u64 saved to be used during cqp completion
+ * @arp_index: arp index to delete arp entry
+ * @post_sq: flag for cqp db to ring
+ */
+static enum i40iw_status_code i40iw_sc_query_arp_cache_entry(
+ struct i40iw_sc_cqp *cqp,
+ u64 scratch,
+ u16 arp_index,
+ bool post_sq)
+{
+ u64 *wqe;
+ u64 header;
+
+ wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return I40IW_ERR_RING_FULL;
+
+ header = arp_index |
+ LS_64(I40IW_CQP_OP_MANAGE_ARP, I40IW_CQPSQ_OPCODE) |
+ LS_64(1, I40IW_CQPSQ_MAT_QUERY) |
+ LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
+
+ i40iw_insert_wqe_hdr(wqe, header);
+
+ i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "QUERY_ARP_CACHE_ENTRY WQE",
+ wqe, I40IW_CQP_WQE_SIZE * 8);
+
+ if (post_sq)
+ i40iw_sc_cqp_post_sq(cqp);
+ return 0;
+}
+
+/**
+ * i40iw_sc_manage_apbvt_entry - for adding and deleting apbvt entries
+ * @cqp: struct for cqp hw
+ * @info: info for apbvt entry to add or delete
+ * @scratch: u64 saved to be used during cqp completion
+ * @post_sq: flag for cqp db to ring
+ */
+static enum i40iw_status_code i40iw_sc_manage_apbvt_entry(
+ struct i40iw_sc_cqp *cqp,
+ struct i40iw_apbvt_info *info,
+ u64 scratch,
+ bool post_sq)
+{
+ u64 *wqe;
+ u64 header;
+
+ wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return I40IW_ERR_RING_FULL;
+
+ set_64bit_val(wqe, 16, info->port);
+
+ header = LS_64(I40IW_CQP_OP_MANAGE_APBVT, I40IW_CQPSQ_OPCODE) |
+ LS_64(info->add, I40IW_CQPSQ_MAPT_ADDPORT) |
+ LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
+
+ i40iw_insert_wqe_hdr(wqe, header);
+
+ i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "MANAGE_APBVT WQE",
+ wqe, I40IW_CQP_WQE_SIZE * 8);
+
+ if (post_sq)
+ i40iw_sc_cqp_post_sq(cqp);
+ return 0;
+}
+
+/**
+ * i40iw_sc_manage_qhash_table_entry - manage quad hash entries
+ * @cqp: struct for cqp hw
+ * @info: info for quad hash to manage
+ * @scratch: u64 saved to be used during cqp completion
+ * @post_sq: flag for cqp db to ring
+ *
+ * This is called before connection establishment is started. For passive connections, when
+ * listener is created, it will call with entry type of I40IW_QHASH_TYPE_TCP_SYN with local
+ * ip address and tcp port. When SYN is received (passive connections) or
+ * sent (active connections), this routine is called with entry type of
+ * I40IW_QHASH_TYPE_TCP_ESTABLISHED and quad is passed in info.
+ *
+ * When iwarp connection is done and its state moves to RTS, the quad hash entry in
+ * the hardware will point to iwarp's qp number and requires no calls from the driver.
+ */
+static enum i40iw_status_code i40iw_sc_manage_qhash_table_entry(
+ struct i40iw_sc_cqp *cqp,
+ struct i40iw_qhash_table_info *info,
+ u64 scratch,
+ bool post_sq)
+{
+ u64 *wqe;
+ u64 qw1 = 0;
+ u64 qw2 = 0;
+ u64 temp;
+
+ wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return I40IW_ERR_RING_FULL;
+
+ temp = info->mac_addr[5] |
+ LS_64_1(info->mac_addr[4], 8) |
+ LS_64_1(info->mac_addr[3], 16) |
+ LS_64_1(info->mac_addr[2], 24) |
+ LS_64_1(info->mac_addr[1], 32) |
+ LS_64_1(info->mac_addr[0], 40);
+
+ set_64bit_val(wqe, 0, temp);
+
+ qw1 = LS_64(info->qp_num, I40IW_CQPSQ_QHASH_QPN) |
+ LS_64(info->dest_port, I40IW_CQPSQ_QHASH_DEST_PORT);
+ if (info->ipv4_valid) {
+ set_64bit_val(wqe,
+ 48,
+ LS_64(info->dest_ip[0], I40IW_CQPSQ_QHASH_ADDR3));
+ } else {
+ set_64bit_val(wqe,
+ 56,
+ LS_64(info->dest_ip[0], I40IW_CQPSQ_QHASH_ADDR0) |
+ LS_64(info->dest_ip[1], I40IW_CQPSQ_QHASH_ADDR1));
+
+ set_64bit_val(wqe,
+ 48,
+ LS_64(info->dest_ip[2], I40IW_CQPSQ_QHASH_ADDR2) |
+ LS_64(info->dest_ip[3], I40IW_CQPSQ_QHASH_ADDR3));
+ }
+ qw2 = LS_64(cqp->dev->qs_handle, I40IW_CQPSQ_QHASH_QS_HANDLE);
+ if (info->vlan_valid)
+ qw2 |= LS_64(info->vlan_id, I40IW_CQPSQ_QHASH_VLANID);
+ set_64bit_val(wqe, 16, qw2);
+ if (info->entry_type == I40IW_QHASH_TYPE_TCP_ESTABLISHED) {
+ qw1 |= LS_64(info->src_port, I40IW_CQPSQ_QHASH_SRC_PORT);
+ if (!info->ipv4_valid) {
+ set_64bit_val(wqe,
+ 40,
+ LS_64(info->src_ip[0], I40IW_CQPSQ_QHASH_ADDR0) |
+ LS_64(info->src_ip[1], I40IW_CQPSQ_QHASH_ADDR1));
+ set_64bit_val(wqe,
+ 32,
+ LS_64(info->src_ip[2], I40IW_CQPSQ_QHASH_ADDR2) |
+ LS_64(info->src_ip[3], I40IW_CQPSQ_QHASH_ADDR3));
+ } else {
+ set_64bit_val(wqe,
+ 32,
+ LS_64(info->src_ip[0], I40IW_CQPSQ_QHASH_ADDR3));
+ }
+ }
+
+ set_64bit_val(wqe, 8, qw1);
+ temp = LS_64(cqp->polarity, I40IW_CQPSQ_QHASH_WQEVALID) |
+ LS_64(I40IW_CQP_OP_MANAGE_QUAD_HASH_TABLE_ENTRY, I40IW_CQPSQ_QHASH_OPCODE) |
+ LS_64(info->manage, I40IW_CQPSQ_QHASH_MANAGE) |
+ LS_64(info->ipv4_valid, I40IW_CQPSQ_QHASH_IPV4VALID) |
+ LS_64(info->vlan_valid, I40IW_CQPSQ_QHASH_VLANVALID) |
+ LS_64(info->entry_type, I40IW_CQPSQ_QHASH_ENTRYTYPE);
+
+ i40iw_insert_wqe_hdr(wqe, temp);
+
+ i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "MANAGE_QHASH WQE",
+ wqe, I40IW_CQP_WQE_SIZE * 8);
+
+ if (post_sq)
+ i40iw_sc_cqp_post_sq(cqp);
+ return 0;
+}
+
+/**
+ * i40iw_sc_alloc_local_mac_ipaddr_entry - cqp wqe for loc mac entry
+ * @cqp: struct for cqp hw
+ * @scratch: u64 saved to be used during cqp completion
+ * @post_sq: flag for cqp db to ring
+ */
+static enum i40iw_status_code i40iw_sc_alloc_local_mac_ipaddr_entry(
+ struct i40iw_sc_cqp *cqp,
+ u64 scratch,
+ bool post_sq)
+{
+ u64 *wqe;
+ u64 header;
+
+ wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return I40IW_ERR_RING_FULL;
+ header = LS_64(I40IW_CQP_OP_ALLOCATE_LOC_MAC_IP_TABLE_ENTRY, I40IW_CQPSQ_OPCODE) |
+ LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
+
+ i40iw_insert_wqe_hdr(wqe, header);
+ i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "ALLOCATE_LOCAL_MAC_IPADDR WQE",
+ wqe, I40IW_CQP_WQE_SIZE * 8);
+ if (post_sq)
+ i40iw_sc_cqp_post_sq(cqp);
+ return 0;
+}
+
+/**
+ * i40iw_sc_add_local_mac_ipaddr_entry - add mac enry
+ * @cqp: struct for cqp hw
+ * @info:mac addr info
+ * @scratch: u64 saved to be used during cqp completion
+ * @post_sq: flag for cqp db to ring
+ */
+static enum i40iw_status_code i40iw_sc_add_local_mac_ipaddr_entry(
+ struct i40iw_sc_cqp *cqp,
+ struct i40iw_local_mac_ipaddr_entry_info *info,
+ u64 scratch,
+ bool post_sq)
+{
+ u64 *wqe;
+ u64 temp, header;
+
+ wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return I40IW_ERR_RING_FULL;
+ temp = info->mac_addr[5] |
+ LS_64_1(info->mac_addr[4], 8) |
+ LS_64_1(info->mac_addr[3], 16) |
+ LS_64_1(info->mac_addr[2], 24) |
+ LS_64_1(info->mac_addr[1], 32) |
+ LS_64_1(info->mac_addr[0], 40);
+
+ set_64bit_val(wqe, 32, temp);
+
+ header = LS_64(info->entry_idx, I40IW_CQPSQ_MLIPA_IPTABLEIDX) |
+ LS_64(I40IW_CQP_OP_MANAGE_LOC_MAC_IP_TABLE, I40IW_CQPSQ_OPCODE) |
+ LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
+
+ i40iw_insert_wqe_hdr(wqe, header);
+
+ i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "ADD_LOCAL_MAC_IPADDR WQE",
+ wqe, I40IW_CQP_WQE_SIZE * 8);
+
+ if (post_sq)
+ i40iw_sc_cqp_post_sq(cqp);
+ return 0;
+}
+
+/**
+ * i40iw_sc_del_local_mac_ipaddr_entry - cqp wqe to dele local mac
+ * @cqp: struct for cqp hw
+ * @scratch: u64 saved to be used during cqp completion
+ * @entry_idx: index of mac entry
+ * @ ignore_ref_count: to force mac adde delete
+ * @post_sq: flag for cqp db to ring
+ */
+static enum i40iw_status_code i40iw_sc_del_local_mac_ipaddr_entry(
+ struct i40iw_sc_cqp *cqp,
+ u64 scratch,
+ u8 entry_idx,
+ u8 ignore_ref_count,
+ bool post_sq)
+{
+ u64 *wqe;
+ u64 header;
+
+ wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return I40IW_ERR_RING_FULL;
+ header = LS_64(entry_idx, I40IW_CQPSQ_MLIPA_IPTABLEIDX) |
+ LS_64(I40IW_CQP_OP_MANAGE_LOC_MAC_IP_TABLE, I40IW_CQPSQ_OPCODE) |
+ LS_64(1, I40IW_CQPSQ_MLIPA_FREEENTRY) |
+ LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID) |
+ LS_64(ignore_ref_count, I40IW_CQPSQ_MLIPA_IGNORE_REF_CNT);
+
+ i40iw_insert_wqe_hdr(wqe, header);
+
+ i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "DEL_LOCAL_MAC_IPADDR WQE",
+ wqe, I40IW_CQP_WQE_SIZE * 8);
+
+ if (post_sq)
+ i40iw_sc_cqp_post_sq(cqp);
+ return 0;
+}
+
+/**
+ * i40iw_sc_cqp_nop - send a nop wqe
+ * @cqp: struct for cqp hw
+ * @scratch: u64 saved to be used during cqp completion
+ * @post_sq: flag for cqp db to ring
+ */
+static enum i40iw_status_code i40iw_sc_cqp_nop(struct i40iw_sc_cqp *cqp,
+ u64 scratch,
+ bool post_sq)
+{
+ u64 *wqe;
+ u64 header;
+
+ wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return I40IW_ERR_RING_FULL;
+ header = LS_64(I40IW_CQP_OP_NOP, I40IW_CQPSQ_OPCODE) |
+ LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
+ i40iw_insert_wqe_hdr(wqe, header);
+ i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "NOP WQE",
+ wqe, I40IW_CQP_WQE_SIZE * 8);
+
+ if (post_sq)
+ i40iw_sc_cqp_post_sq(cqp);
+ return 0;
+}
+
+/**
+ * i40iw_sc_ceq_init - initialize ceq
+ * @ceq: ceq sc structure
+ * @info: ceq initialization info
+ */
+static enum i40iw_status_code i40iw_sc_ceq_init(struct i40iw_sc_ceq *ceq,
+ struct i40iw_ceq_init_info *info)
+{
+ u32 pble_obj_cnt;
+
+ if ((info->elem_cnt < I40IW_MIN_CEQ_ENTRIES) ||
+ (info->elem_cnt > I40IW_MAX_CEQ_ENTRIES))
+ return I40IW_ERR_INVALID_SIZE;
+
+ if (info->ceq_id >= I40IW_MAX_CEQID)
+ return I40IW_ERR_INVALID_CEQ_ID;
+
+ pble_obj_cnt = info->dev->hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].cnt;
+
+ if (info->virtual_map && (info->first_pm_pbl_idx >= pble_obj_cnt))
+ return I40IW_ERR_INVALID_PBLE_INDEX;
+
+ ceq->size = sizeof(*ceq);
+ ceq->ceqe_base = (struct i40iw_ceqe *)info->ceqe_base;
+ ceq->ceq_id = info->ceq_id;
+ ceq->dev = info->dev;
+ ceq->elem_cnt = info->elem_cnt;
+ ceq->ceq_elem_pa = info->ceqe_pa;
+ ceq->virtual_map = info->virtual_map;
+
+ ceq->pbl_chunk_size = (ceq->virtual_map ? info->pbl_chunk_size : 0);
+ ceq->first_pm_pbl_idx = (ceq->virtual_map ? info->first_pm_pbl_idx : 0);
+ ceq->pbl_list = (ceq->virtual_map ? info->pbl_list : NULL);
+
+ ceq->tph_en = info->tph_en;
+ ceq->tph_val = info->tph_val;
+ ceq->polarity = 1;
+ I40IW_RING_INIT(ceq->ceq_ring, ceq->elem_cnt);
+ ceq->dev->ceq[info->ceq_id] = ceq;
+
+ return 0;
+}
+
+/**
+ * i40iw_sc_ceq_create - create ceq wqe
+ * @ceq: ceq sc structure
+ * @scratch: u64 saved to be used during cqp completion
+ * @post_sq: flag for cqp db to ring
+ */
+static enum i40iw_status_code i40iw_sc_ceq_create(struct i40iw_sc_ceq *ceq,
+ u64 scratch,
+ bool post_sq)
+{
+ struct i40iw_sc_cqp *cqp;
+ u64 *wqe;
+ u64 header;
+
+ cqp = ceq->dev->cqp;
+ wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return I40IW_ERR_RING_FULL;
+ set_64bit_val(wqe, 16, ceq->elem_cnt);
+ set_64bit_val(wqe, 32, (ceq->virtual_map ? 0 : ceq->ceq_elem_pa));
+ set_64bit_val(wqe, 48, (ceq->virtual_map ? ceq->first_pm_pbl_idx : 0));
+ set_64bit_val(wqe, 56, LS_64(ceq->tph_val, I40IW_CQPSQ_TPHVAL));
+
+ header = ceq->ceq_id |
+ LS_64(I40IW_CQP_OP_CREATE_CEQ, I40IW_CQPSQ_OPCODE) |
+ LS_64(ceq->pbl_chunk_size, I40IW_CQPSQ_CEQ_LPBLSIZE) |
+ LS_64(ceq->virtual_map, I40IW_CQPSQ_CEQ_VMAP) |
+ LS_64(ceq->tph_en, I40IW_CQPSQ_TPHEN) |
+ LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
+
+ i40iw_insert_wqe_hdr(wqe, header);
+
+ i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "CEQ_CREATE WQE",
+ wqe, I40IW_CQP_WQE_SIZE * 8);
+
+ if (post_sq)
+ i40iw_sc_cqp_post_sq(cqp);
+ return 0;
+}
+
+/**
+ * i40iw_sc_cceq_create_done - poll for control ceq wqe to complete
+ * @ceq: ceq sc structure
+ */
+static enum i40iw_status_code i40iw_sc_cceq_create_done(struct i40iw_sc_ceq *ceq)
+{
+ struct i40iw_sc_cqp *cqp;
+
+ cqp = ceq->dev->cqp;
+ return i40iw_sc_poll_for_cqp_op_done(cqp, I40IW_CQP_OP_CREATE_CEQ, NULL);
+}
+
+/**
+ * i40iw_sc_cceq_destroy_done - poll for destroy cceq to complete
+ * @ceq: ceq sc structure
+ */
+static enum i40iw_status_code i40iw_sc_cceq_destroy_done(struct i40iw_sc_ceq *ceq)
+{
+ struct i40iw_sc_cqp *cqp;
+
+ cqp = ceq->dev->cqp;
+ cqp->process_cqp_sds = i40iw_update_sds_noccq;
+ return i40iw_sc_poll_for_cqp_op_done(cqp, I40IW_CQP_OP_DESTROY_CEQ, NULL);
+}
+
+/**
+ * i40iw_sc_cceq_create - create cceq
+ * @ceq: ceq sc structure
+ * @scratch: u64 saved to be used during cqp completion
+ */
+static enum i40iw_status_code i40iw_sc_cceq_create(struct i40iw_sc_ceq *ceq, u64 scratch)
+{
+ enum i40iw_status_code ret_code;
+
+ ret_code = i40iw_sc_ceq_create(ceq, scratch, true);
+ if (!ret_code)
+ ret_code = i40iw_sc_cceq_create_done(ceq);
+ return ret_code;
+}
+
+/**
+ * i40iw_sc_ceq_destroy - destroy ceq
+ * @ceq: ceq sc structure
+ * @scratch: u64 saved to be used during cqp completion
+ * @post_sq: flag for cqp db to ring
+ */
+static enum i40iw_status_code i40iw_sc_ceq_destroy(struct i40iw_sc_ceq *ceq,
+ u64 scratch,
+ bool post_sq)
+{
+ struct i40iw_sc_cqp *cqp;
+ u64 *wqe;
+ u64 header;
+
+ cqp = ceq->dev->cqp;
+ wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return I40IW_ERR_RING_FULL;
+ set_64bit_val(wqe, 16, ceq->elem_cnt);
+ set_64bit_val(wqe, 48, ceq->first_pm_pbl_idx);
+ header = ceq->ceq_id |
+ LS_64(I40IW_CQP_OP_DESTROY_CEQ, I40IW_CQPSQ_OPCODE) |
+ LS_64(ceq->pbl_chunk_size, I40IW_CQPSQ_CEQ_LPBLSIZE) |
+ LS_64(ceq->virtual_map, I40IW_CQPSQ_CEQ_VMAP) |
+ LS_64(ceq->tph_en, I40IW_CQPSQ_TPHEN) |
+ LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
+ i40iw_insert_wqe_hdr(wqe, header);
+ i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "CEQ_DESTROY WQE",
+ wqe, I40IW_CQP_WQE_SIZE * 8);
+
+ if (post_sq)
+ i40iw_sc_cqp_post_sq(cqp);
+ return 0;
+}
+
+/**
+ * i40iw_sc_process_ceq - process ceq
+ * @dev: sc device struct
+ * @ceq: ceq sc structure
+ */
+static void *i40iw_sc_process_ceq(struct i40iw_sc_dev *dev, struct i40iw_sc_ceq *ceq)
+{
+ u64 temp;
+ u64 *ceqe;
+ struct i40iw_sc_cq *cq = NULL;
+ u8 polarity;
+
+ ceqe = (u64 *)I40IW_GET_CURRENT_CEQ_ELEMENT(ceq);
+ get_64bit_val(ceqe, 0, &temp);
+ polarity = (u8)RS_64(temp, I40IW_CEQE_VALID);
+ if (polarity != ceq->polarity)
+ return cq;
+
+ cq = (struct i40iw_sc_cq *)(unsigned long)LS_64_1(temp, 1);
+
+ I40IW_RING_MOVE_TAIL(ceq->ceq_ring);
+ if (I40IW_RING_GETCURRENT_TAIL(ceq->ceq_ring) == 0)
+ ceq->polarity ^= 1;
+
+ if (dev->is_pf)
+ i40iw_wr32(dev->hw, I40E_PFPE_CQACK, cq->cq_uk.cq_id);
+ else
+ i40iw_wr32(dev->hw, I40E_VFPE_CQACK1, cq->cq_uk.cq_id);
+
+ return cq;
+}
+
+/**
+ * i40iw_sc_aeq_init - initialize aeq
+ * @aeq: aeq structure ptr
+ * @info: aeq initialization info
+ */
+static enum i40iw_status_code i40iw_sc_aeq_init(struct i40iw_sc_aeq *aeq,
+ struct i40iw_aeq_init_info *info)
+{
+ u32 pble_obj_cnt;
+
+ if ((info->elem_cnt < I40IW_MIN_AEQ_ENTRIES) ||
+ (info->elem_cnt > I40IW_MAX_AEQ_ENTRIES))
+ return I40IW_ERR_INVALID_SIZE;
+ pble_obj_cnt = info->dev->hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].cnt;
+
+ if (info->virtual_map && (info->first_pm_pbl_idx >= pble_obj_cnt))
+ return I40IW_ERR_INVALID_PBLE_INDEX;
+
+ aeq->size = sizeof(*aeq);
+ aeq->polarity = 1;
+ aeq->aeqe_base = (struct i40iw_sc_aeqe *)info->aeqe_base;
+ aeq->dev = info->dev;
+ aeq->elem_cnt = info->elem_cnt;
+
+ aeq->aeq_elem_pa = info->aeq_elem_pa;
+ I40IW_RING_INIT(aeq->aeq_ring, aeq->elem_cnt);
+ info->dev->aeq = aeq;
+
+ aeq->virtual_map = info->virtual_map;
+ aeq->pbl_list = (aeq->virtual_map ? info->pbl_list : NULL);
+ aeq->pbl_chunk_size = (aeq->virtual_map ? info->pbl_chunk_size : 0);
+ aeq->first_pm_pbl_idx = (aeq->virtual_map ? info->first_pm_pbl_idx : 0);
+ info->dev->aeq = aeq;
+ return 0;
+}
+
+/**
+ * i40iw_sc_aeq_create - create aeq
+ * @aeq: aeq structure ptr
+ * @scratch: u64 saved to be used during cqp completion
+ * @post_sq: flag for cqp db to ring
+ */
+static enum i40iw_status_code i40iw_sc_aeq_create(struct i40iw_sc_aeq *aeq,
+ u64 scratch,
+ bool post_sq)
+{
+ u64 *wqe;
+ struct i40iw_sc_cqp *cqp;
+ u64 header;
+
+ cqp = aeq->dev->cqp;
+ wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return I40IW_ERR_RING_FULL;
+ set_64bit_val(wqe, 16, aeq->elem_cnt);
+ set_64bit_val(wqe, 32,
+ (aeq->virtual_map ? 0 : aeq->aeq_elem_pa));
+ set_64bit_val(wqe, 48,
+ (aeq->virtual_map ? aeq->first_pm_pbl_idx : 0));
+
+ header = LS_64(I40IW_CQP_OP_CREATE_AEQ, I40IW_CQPSQ_OPCODE) |
+ LS_64(aeq->pbl_chunk_size, I40IW_CQPSQ_AEQ_LPBLSIZE) |
+ LS_64(aeq->virtual_map, I40IW_CQPSQ_AEQ_VMAP) |
+ LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
+
+ i40iw_insert_wqe_hdr(wqe, header);
+ i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "AEQ_CREATE WQE",
+ wqe, I40IW_CQP_WQE_SIZE * 8);
+ if (post_sq)
+ i40iw_sc_cqp_post_sq(cqp);
+ return 0;
+}
+
+/**
+ * i40iw_sc_aeq_destroy - destroy aeq during close
+ * @aeq: aeq structure ptr
+ * @scratch: u64 saved to be used during cqp completion
+ * @post_sq: flag for cqp db to ring
+ */
+static enum i40iw_status_code i40iw_sc_aeq_destroy(struct i40iw_sc_aeq *aeq,
+ u64 scratch,
+ bool post_sq)
+{
+ u64 *wqe;
+ struct i40iw_sc_cqp *cqp;
+ u64 header;
+
+ cqp = aeq->dev->cqp;
+ wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return I40IW_ERR_RING_FULL;
+ set_64bit_val(wqe, 16, aeq->elem_cnt);
+ set_64bit_val(wqe, 48, aeq->first_pm_pbl_idx);
+ header = LS_64(I40IW_CQP_OP_DESTROY_AEQ, I40IW_CQPSQ_OPCODE) |
+ LS_64(aeq->pbl_chunk_size, I40IW_CQPSQ_AEQ_LPBLSIZE) |
+ LS_64(aeq->virtual_map, I40IW_CQPSQ_AEQ_VMAP) |
+ LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
+ i40iw_insert_wqe_hdr(wqe, header);
+
+ i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "AEQ_DESTROY WQE",
+ wqe, I40IW_CQP_WQE_SIZE * 8);
+ if (post_sq)
+ i40iw_sc_cqp_post_sq(cqp);
+ return 0;
+}
+
+/**
+ * i40iw_sc_get_next_aeqe - get next aeq entry
+ * @aeq: aeq structure ptr
+ * @info: aeqe info to be returned
+ */
+static enum i40iw_status_code i40iw_sc_get_next_aeqe(struct i40iw_sc_aeq *aeq,
+ struct i40iw_aeqe_info *info)
+{
+ u64 temp, compl_ctx;
+ u64 *aeqe;
+ u16 wqe_idx;
+ u8 ae_src;
+ u8 polarity;
+
+ aeqe = (u64 *)I40IW_GET_CURRENT_AEQ_ELEMENT(aeq);
+ get_64bit_val(aeqe, 0, &compl_ctx);
+ get_64bit_val(aeqe, 8, &temp);
+ polarity = (u8)RS_64(temp, I40IW_AEQE_VALID);
+
+ if (aeq->polarity != polarity)
+ return I40IW_ERR_QUEUE_EMPTY;
+
+ i40iw_debug_buf(aeq->dev, I40IW_DEBUG_WQE, "AEQ_ENTRY", aeqe, 16);
+
+ ae_src = (u8)RS_64(temp, I40IW_AEQE_AESRC);
+ wqe_idx = (u16)RS_64(temp, I40IW_AEQE_WQDESCIDX);
+ info->qp_cq_id = (u32)RS_64(temp, I40IW_AEQE_QPCQID);
+ info->ae_id = (u16)RS_64(temp, I40IW_AEQE_AECODE);
+ info->tcp_state = (u8)RS_64(temp, I40IW_AEQE_TCPSTATE);
+ info->iwarp_state = (u8)RS_64(temp, I40IW_AEQE_IWSTATE);
+ info->q2_data_written = (u8)RS_64(temp, I40IW_AEQE_Q2DATA);
+ info->aeqe_overflow = (bool)RS_64(temp, I40IW_AEQE_OVERFLOW);
+ switch (ae_src) {
+ case I40IW_AE_SOURCE_RQ:
+ case I40IW_AE_SOURCE_RQ_0011:
+ info->qp = true;
+ info->wqe_idx = wqe_idx;
+ info->compl_ctx = compl_ctx;
+ break;
+ case I40IW_AE_SOURCE_CQ:
+ case I40IW_AE_SOURCE_CQ_0110:
+ case I40IW_AE_SOURCE_CQ_1010:
+ case I40IW_AE_SOURCE_CQ_1110:
+ info->cq = true;
+ info->compl_ctx = LS_64_1(compl_ctx, 1);
+ break;
+ case I40IW_AE_SOURCE_SQ:
+ case I40IW_AE_SOURCE_SQ_0111:
+ info->qp = true;
+ info->sq = true;
+ info->wqe_idx = wqe_idx;
+ info->compl_ctx = compl_ctx;
+ break;
+ case I40IW_AE_SOURCE_IN_RR_WR:
+ case I40IW_AE_SOURCE_IN_RR_WR_1011:
+ info->qp = true;
+ info->compl_ctx = compl_ctx;
+ info->in_rdrsp_wr = true;
+ break;
+ case I40IW_AE_SOURCE_OUT_RR:
+ case I40IW_AE_SOURCE_OUT_RR_1111:
+ info->qp = true;
+ info->compl_ctx = compl_ctx;
+ info->out_rdrsp = true;
+ break;
+ default:
+ break;
+ }
+ I40IW_RING_MOVE_TAIL(aeq->aeq_ring);
+ if (I40IW_RING_GETCURRENT_TAIL(aeq->aeq_ring) == 0)
+ aeq->polarity ^= 1;
+ return 0;
+}
+
+/**
+ * i40iw_sc_repost_aeq_entries - repost completed aeq entries
+ * @dev: sc device struct
+ * @count: allocate count
+ */
+static enum i40iw_status_code i40iw_sc_repost_aeq_entries(struct i40iw_sc_dev *dev,
+ u32 count)
+{
+ if (count > I40IW_MAX_AEQ_ALLOCATE_COUNT)
+ return I40IW_ERR_INVALID_SIZE;
+
+ if (dev->is_pf)
+ i40iw_wr32(dev->hw, I40E_PFPE_AEQALLOC, count);
+ else
+ i40iw_wr32(dev->hw, I40E_VFPE_AEQALLOC1, count);
+
+ return 0;
+}
+
+/**
+ * i40iw_sc_aeq_create_done - create aeq
+ * @aeq: aeq structure ptr
+ */
+static enum i40iw_status_code i40iw_sc_aeq_create_done(struct i40iw_sc_aeq *aeq)
+{
+ struct i40iw_sc_cqp *cqp;
+
+ cqp = aeq->dev->cqp;
+ return i40iw_sc_poll_for_cqp_op_done(cqp, I40IW_CQP_OP_CREATE_AEQ, NULL);
+}
+
+/**
+ * i40iw_sc_aeq_destroy_done - destroy of aeq during close
+ * @aeq: aeq structure ptr
+ */
+static enum i40iw_status_code i40iw_sc_aeq_destroy_done(struct i40iw_sc_aeq *aeq)
+{
+ struct i40iw_sc_cqp *cqp;
+
+ cqp = aeq->dev->cqp;
+ return i40iw_sc_poll_for_cqp_op_done(cqp, I40IW_CQP_OP_DESTROY_AEQ, NULL);
+}
+
+/**
+ * i40iw_sc_ccq_init - initialize control cq
+ * @cq: sc's cq ctruct
+ * @info: info for control cq initialization
+ */
+static enum i40iw_status_code i40iw_sc_ccq_init(struct i40iw_sc_cq *cq,
+ struct i40iw_ccq_init_info *info)
+{
+ u32 pble_obj_cnt;
+
+ if (info->num_elem < I40IW_MIN_CQ_SIZE || info->num_elem > I40IW_MAX_CQ_SIZE)
+ return I40IW_ERR_INVALID_SIZE;
+
+ if (info->ceq_id > I40IW_MAX_CEQID)
+ return I40IW_ERR_INVALID_CEQ_ID;
+
+ pble_obj_cnt = info->dev->hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].cnt;
+
+ if (info->virtual_map && (info->first_pm_pbl_idx >= pble_obj_cnt))
+ return I40IW_ERR_INVALID_PBLE_INDEX;
+
+ cq->cq_pa = info->cq_pa;
+ cq->cq_uk.cq_base = info->cq_base;
+ cq->shadow_area_pa = info->shadow_area_pa;
+ cq->cq_uk.shadow_area = info->shadow_area;
+ cq->shadow_read_threshold = info->shadow_read_threshold;
+ cq->dev = info->dev;
+ cq->ceq_id = info->ceq_id;
+ cq->cq_uk.cq_size = info->num_elem;
+ cq->cq_type = I40IW_CQ_TYPE_CQP;
+ cq->ceqe_mask = info->ceqe_mask;
+ I40IW_RING_INIT(cq->cq_uk.cq_ring, info->num_elem);
+
+ cq->cq_uk.cq_id = 0; /* control cq is id 0 always */
+ cq->ceq_id_valid = info->ceq_id_valid;
+ cq->tph_en = info->tph_en;
+ cq->tph_val = info->tph_val;
+ cq->cq_uk.avoid_mem_cflct = info->avoid_mem_cflct;
+
+ cq->pbl_list = info->pbl_list;
+ cq->virtual_map = info->virtual_map;
+ cq->pbl_chunk_size = info->pbl_chunk_size;
+ cq->first_pm_pbl_idx = info->first_pm_pbl_idx;
+ cq->cq_uk.polarity = true;
+
+ /* following are only for iw cqs so initialize them to zero */
+ cq->cq_uk.cqe_alloc_reg = NULL;
+ info->dev->ccq = cq;
+ return 0;
+}
+
+/**
+ * i40iw_sc_ccq_create_done - poll cqp for ccq create
+ * @ccq: ccq sc struct
+ */
+static enum i40iw_status_code i40iw_sc_ccq_create_done(struct i40iw_sc_cq *ccq)
+{
+ struct i40iw_sc_cqp *cqp;
+
+ cqp = ccq->dev->cqp;
+ return i40iw_sc_poll_for_cqp_op_done(cqp, I40IW_CQP_OP_CREATE_CQ, NULL);
+}
+
+/**
+ * i40iw_sc_ccq_create - create control cq
+ * @ccq: ccq sc struct
+ * @scratch: u64 saved to be used during cqp completion
+ * @check_overflow: overlow flag for ccq
+ * @post_sq: flag for cqp db to ring
+ */
+static enum i40iw_status_code i40iw_sc_ccq_create(struct i40iw_sc_cq *ccq,
+ u64 scratch,
+ bool check_overflow,
+ bool post_sq)
+{
+ u64 *wqe;
+ struct i40iw_sc_cqp *cqp;
+ u64 header;
+ enum i40iw_status_code ret_code;
+
+ cqp = ccq->dev->cqp;
+ wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return I40IW_ERR_RING_FULL;
+ set_64bit_val(wqe, 0, ccq->cq_uk.cq_size);
+ set_64bit_val(wqe, 8, RS_64_1(ccq, 1));
+ set_64bit_val(wqe, 16,
+ LS_64(ccq->shadow_read_threshold, I40IW_CQPSQ_CQ_SHADOW_READ_THRESHOLD));
+ set_64bit_val(wqe, 32, (ccq->virtual_map ? 0 : ccq->cq_pa));
+ set_64bit_val(wqe, 40, ccq->shadow_area_pa);
+ set_64bit_val(wqe, 48,
+ (ccq->virtual_map ? ccq->first_pm_pbl_idx : 0));
+ set_64bit_val(wqe, 56,
+ LS_64(ccq->tph_val, I40IW_CQPSQ_TPHVAL));
+
+ header = ccq->cq_uk.cq_id |
+ LS_64((ccq->ceq_id_valid ? ccq->ceq_id : 0), I40IW_CQPSQ_CQ_CEQID) |
+ LS_64(I40IW_CQP_OP_CREATE_CQ, I40IW_CQPSQ_OPCODE) |
+ LS_64(ccq->pbl_chunk_size, I40IW_CQPSQ_CQ_LPBLSIZE) |
+ LS_64(check_overflow, I40IW_CQPSQ_CQ_CHKOVERFLOW) |
+ LS_64(ccq->virtual_map, I40IW_CQPSQ_CQ_VIRTMAP) |
+ LS_64(ccq->ceqe_mask, I40IW_CQPSQ_CQ_ENCEQEMASK) |
+ LS_64(ccq->ceq_id_valid, I40IW_CQPSQ_CQ_CEQIDVALID) |
+ LS_64(ccq->tph_en, I40IW_CQPSQ_TPHEN) |
+ LS_64(ccq->cq_uk.avoid_mem_cflct, I40IW_CQPSQ_CQ_AVOIDMEMCNFLCT) |
+ LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
+
+ i40iw_insert_wqe_hdr(wqe, header);
+
+ i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "CCQ_CREATE WQE",
+ wqe, I40IW_CQP_WQE_SIZE * 8);
+
+ if (post_sq) {
+ i40iw_sc_cqp_post_sq(cqp);
+ ret_code = i40iw_sc_ccq_create_done(ccq);
+ if (ret_code)
+ return ret_code;
+ }
+ cqp->process_cqp_sds = i40iw_cqp_sds_cmd;
+
+ return 0;
+}
+
+/**
+ * i40iw_sc_ccq_destroy - destroy ccq during close
+ * @ccq: ccq sc struct
+ * @scratch: u64 saved to be used during cqp completion
+ * @post_sq: flag for cqp db to ring
+ */
+static enum i40iw_status_code i40iw_sc_ccq_destroy(struct i40iw_sc_cq *ccq,
+ u64 scratch,
+ bool post_sq)
+{
+ struct i40iw_sc_cqp *cqp;
+ u64 *wqe;
+ u64 header;
+ enum i40iw_status_code ret_code = 0;
+ u32 tail, val, error;
+
+ cqp = ccq->dev->cqp;
+ wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return I40IW_ERR_RING_FULL;
+ set_64bit_val(wqe, 0, ccq->cq_uk.cq_size);
+ set_64bit_val(wqe, 8, RS_64_1(ccq, 1));
+ set_64bit_val(wqe, 40, ccq->shadow_area_pa);
+
+ header = ccq->cq_uk.cq_id |
+ LS_64((ccq->ceq_id_valid ? ccq->ceq_id : 0), I40IW_CQPSQ_CQ_CEQID) |
+ LS_64(I40IW_CQP_OP_DESTROY_CQ, I40IW_CQPSQ_OPCODE) |
+ LS_64(ccq->ceqe_mask, I40IW_CQPSQ_CQ_ENCEQEMASK) |
+ LS_64(ccq->ceq_id_valid, I40IW_CQPSQ_CQ_CEQIDVALID) |
+ LS_64(ccq->tph_en, I40IW_CQPSQ_TPHEN) |
+ LS_64(ccq->cq_uk.avoid_mem_cflct, I40IW_CQPSQ_CQ_AVOIDMEMCNFLCT) |
+ LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
+
+ i40iw_insert_wqe_hdr(wqe, header);
+
+ i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "CCQ_DESTROY WQE",
+ wqe, I40IW_CQP_WQE_SIZE * 8);
+
+ i40iw_get_cqp_reg_info(cqp, &val, &tail, &error);
+ if (error)
+ return I40IW_ERR_CQP_COMPL_ERROR;
+
+ if (post_sq) {
+ i40iw_sc_cqp_post_sq(cqp);
+ ret_code = i40iw_cqp_poll_registers(cqp, tail, 1000);
+ }
+
+ return ret_code;
+}
+
+/**
+ * i40iw_sc_cq_init - initialize completion q
+ * @cq: cq struct
+ * @info: cq initialization info
+ */
+static enum i40iw_status_code i40iw_sc_cq_init(struct i40iw_sc_cq *cq,
+ struct i40iw_cq_init_info *info)
+{
+ u32 __iomem *cqe_alloc_reg = NULL;
+ enum i40iw_status_code ret_code;
+ u32 pble_obj_cnt;
+ u32 arm_offset;
+
+ pble_obj_cnt = info->dev->hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].cnt;
+
+ if (info->virtual_map && (info->first_pm_pbl_idx >= pble_obj_cnt))
+ return I40IW_ERR_INVALID_PBLE_INDEX;
+
+ cq->cq_pa = info->cq_base_pa;
+ cq->dev = info->dev;
+ cq->ceq_id = info->ceq_id;
+ arm_offset = (info->dev->is_pf) ? I40E_PFPE_CQARM : I40E_VFPE_CQARM1;
+ if (i40iw_get_hw_addr(cq->dev))
+ cqe_alloc_reg = (u32 __iomem *)(i40iw_get_hw_addr(cq->dev) +
+ arm_offset);
+ info->cq_uk_init_info.cqe_alloc_reg = cqe_alloc_reg;
+ ret_code = i40iw_cq_uk_init(&cq->cq_uk, &info->cq_uk_init_info);
+ if (ret_code)
+ return ret_code;
+ cq->virtual_map = info->virtual_map;
+ cq->pbl_chunk_size = info->pbl_chunk_size;
+ cq->ceqe_mask = info->ceqe_mask;
+ cq->cq_type = (info->type) ? info->type : I40IW_CQ_TYPE_IWARP;
+
+ cq->shadow_area_pa = info->shadow_area_pa;
+ cq->shadow_read_threshold = info->shadow_read_threshold;
+
+ cq->ceq_id_valid = info->ceq_id_valid;
+ cq->tph_en = info->tph_en;
+ cq->tph_val = info->tph_val;
+
+ cq->first_pm_pbl_idx = info->first_pm_pbl_idx;
+
+ return 0;
+}
+
+/**
+ * i40iw_sc_cq_create - create completion q
+ * @cq: cq struct
+ * @scratch: u64 saved to be used during cqp completion
+ * @check_overflow: flag for overflow check
+ * @post_sq: flag for cqp db to ring
+ */
+static enum i40iw_status_code i40iw_sc_cq_create(struct i40iw_sc_cq *cq,
+ u64 scratch,
+ bool check_overflow,
+ bool post_sq)
+{
+ u64 *wqe;
+ struct i40iw_sc_cqp *cqp;
+ u64 header;
+
+ if (cq->cq_uk.cq_id > I40IW_MAX_CQID)
+ return I40IW_ERR_INVALID_CQ_ID;
+
+ if (cq->ceq_id > I40IW_MAX_CEQID)
+ return I40IW_ERR_INVALID_CEQ_ID;
+
+ cqp = cq->dev->cqp;
+ wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return I40IW_ERR_RING_FULL;
+
+ set_64bit_val(wqe, 0, cq->cq_uk.cq_size);
+ set_64bit_val(wqe, 8, RS_64_1(cq, 1));
+ set_64bit_val(wqe,
+ 16,
+ LS_64(cq->shadow_read_threshold, I40IW_CQPSQ_CQ_SHADOW_READ_THRESHOLD));
+
+ set_64bit_val(wqe, 32, (cq->virtual_map ? 0 : cq->cq_pa));
+
+ set_64bit_val(wqe, 40, cq->shadow_area_pa);
+ set_64bit_val(wqe, 48, (cq->virtual_map ? cq->first_pm_pbl_idx : 0));
+ set_64bit_val(wqe, 56, LS_64(cq->tph_val, I40IW_CQPSQ_TPHVAL));
+
+ header = cq->cq_uk.cq_id |
+ LS_64((cq->ceq_id_valid ? cq->ceq_id : 0), I40IW_CQPSQ_CQ_CEQID) |
+ LS_64(I40IW_CQP_OP_CREATE_CQ, I40IW_CQPSQ_OPCODE) |
+ LS_64(cq->pbl_chunk_size, I40IW_CQPSQ_CQ_LPBLSIZE) |
+ LS_64(check_overflow, I40IW_CQPSQ_CQ_CHKOVERFLOW) |
+ LS_64(cq->virtual_map, I40IW_CQPSQ_CQ_VIRTMAP) |
+ LS_64(cq->ceqe_mask, I40IW_CQPSQ_CQ_ENCEQEMASK) |
+ LS_64(cq->ceq_id_valid, I40IW_CQPSQ_CQ_CEQIDVALID) |
+ LS_64(cq->tph_en, I40IW_CQPSQ_TPHEN) |
+ LS_64(cq->cq_uk.avoid_mem_cflct, I40IW_CQPSQ_CQ_AVOIDMEMCNFLCT) |
+ LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
+
+ i40iw_insert_wqe_hdr(wqe, header);
+
+ i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "CQ_CREATE WQE",
+ wqe, I40IW_CQP_WQE_SIZE * 8);
+
+ if (post_sq)
+ i40iw_sc_cqp_post_sq(cqp);
+ return 0;
+}
+
+/**
+ * i40iw_sc_cq_destroy - destroy completion q
+ * @cq: cq struct
+ * @scratch: u64 saved to be used during cqp completion
+ * @post_sq: flag for cqp db to ring
+ */
+static enum i40iw_status_code i40iw_sc_cq_destroy(struct i40iw_sc_cq *cq,
+ u64 scratch,
+ bool post_sq)
+{
+ struct i40iw_sc_cqp *cqp;
+ u64 *wqe;
+ u64 header;
+
+ cqp = cq->dev->cqp;
+ wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return I40IW_ERR_RING_FULL;
+ set_64bit_val(wqe, 0, cq->cq_uk.cq_size);
+ set_64bit_val(wqe, 8, RS_64_1(cq, 1));
+ set_64bit_val(wqe, 40, cq->shadow_area_pa);
+ set_64bit_val(wqe, 48, (cq->virtual_map ? cq->first_pm_pbl_idx : 0));
+
+ header = cq->cq_uk.cq_id |
+ LS_64((cq->ceq_id_valid ? cq->ceq_id : 0), I40IW_CQPSQ_CQ_CEQID) |
+ LS_64(I40IW_CQP_OP_DESTROY_CQ, I40IW_CQPSQ_OPCODE) |
+ LS_64(cq->pbl_chunk_size, I40IW_CQPSQ_CQ_LPBLSIZE) |
+ LS_64(cq->virtual_map, I40IW_CQPSQ_CQ_VIRTMAP) |
+ LS_64(cq->ceqe_mask, I40IW_CQPSQ_CQ_ENCEQEMASK) |
+ LS_64(cq->ceq_id_valid, I40IW_CQPSQ_CQ_CEQIDVALID) |
+ LS_64(cq->tph_en, I40IW_CQPSQ_TPHEN) |
+ LS_64(cq->cq_uk.avoid_mem_cflct, I40IW_CQPSQ_CQ_AVOIDMEMCNFLCT) |
+ LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
+
+ i40iw_insert_wqe_hdr(wqe, header);
+
+ i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "CQ_DESTROY WQE",
+ wqe, I40IW_CQP_WQE_SIZE * 8);
+
+ if (post_sq)
+ i40iw_sc_cqp_post_sq(cqp);
+ return 0;
+}
+
+/**
+ * i40iw_sc_cq_modify - modify a Completion Queue
+ * @cq: cq struct
+ * @info: modification info struct
+ * @scratch:
+ * @post_sq: flag to post to sq
+ */
+static enum i40iw_status_code i40iw_sc_cq_modify(struct i40iw_sc_cq *cq,
+ struct i40iw_modify_cq_info *info,
+ u64 scratch,
+ bool post_sq)
+{
+ struct i40iw_sc_cqp *cqp;
+ u64 *wqe;
+ u64 header;
+ u32 cq_size, ceq_id, first_pm_pbl_idx;
+ u8 pbl_chunk_size;
+ bool virtual_map, ceq_id_valid, check_overflow;
+ u32 pble_obj_cnt;
+
+ if (info->ceq_valid && (info->ceq_id > I40IW_MAX_CEQID))
+ return I40IW_ERR_INVALID_CEQ_ID;
+
+ pble_obj_cnt = cq->dev->hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].cnt;
+
+ if (info->cq_resize && info->virtual_map &&
+ (info->first_pm_pbl_idx >= pble_obj_cnt))
+ return I40IW_ERR_INVALID_PBLE_INDEX;
+
+ cqp = cq->dev->cqp;
+ wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return I40IW_ERR_RING_FULL;
+
+ cq->pbl_list = info->pbl_list;
+ cq->cq_pa = info->cq_pa;
+ cq->first_pm_pbl_idx = info->first_pm_pbl_idx;
+
+ cq_size = info->cq_resize ? info->cq_size : cq->cq_uk.cq_size;
+ if (info->ceq_change) {
+ ceq_id_valid = true;
+ ceq_id = info->ceq_id;
+ } else {
+ ceq_id_valid = cq->ceq_id_valid;
+ ceq_id = ceq_id_valid ? cq->ceq_id : 0;
+ }
+ virtual_map = info->cq_resize ? info->virtual_map : cq->virtual_map;
+ first_pm_pbl_idx = (info->cq_resize ?
+ (info->virtual_map ? info->first_pm_pbl_idx : 0) :
+ (cq->virtual_map ? cq->first_pm_pbl_idx : 0));
+ pbl_chunk_size = (info->cq_resize ?
+ (info->virtual_map ? info->pbl_chunk_size : 0) :
+ (cq->virtual_map ? cq->pbl_chunk_size : 0));
+ check_overflow = info->check_overflow_change ? info->check_overflow :
+ cq->check_overflow;
+ cq->cq_uk.cq_size = cq_size;
+ cq->ceq_id_valid = ceq_id_valid;
+ cq->ceq_id = ceq_id;
+ cq->virtual_map = virtual_map;
+ cq->first_pm_pbl_idx = first_pm_pbl_idx;
+ cq->pbl_chunk_size = pbl_chunk_size;
+ cq->check_overflow = check_overflow;
+
+ set_64bit_val(wqe, 0, cq_size);
+ set_64bit_val(wqe, 8, RS_64_1(cq, 1));
+ set_64bit_val(wqe, 16,
+ LS_64(info->shadow_read_threshold, I40IW_CQPSQ_CQ_SHADOW_READ_THRESHOLD));
+ set_64bit_val(wqe, 32, (cq->virtual_map ? 0 : cq->cq_pa));
+ set_64bit_val(wqe, 40, cq->shadow_area_pa);
+ set_64bit_val(wqe, 48, (cq->virtual_map ? first_pm_pbl_idx : 0));
+ set_64bit_val(wqe, 56, LS_64(cq->tph_val, I40IW_CQPSQ_TPHVAL));
+
+ header = cq->cq_uk.cq_id |
+ LS_64(ceq_id, I40IW_CQPSQ_CQ_CEQID) |
+ LS_64(I40IW_CQP_OP_MODIFY_CQ, I40IW_CQPSQ_OPCODE) |
+ LS_64(info->cq_resize, I40IW_CQPSQ_CQ_CQRESIZE) |
+ LS_64(pbl_chunk_size, I40IW_CQPSQ_CQ_LPBLSIZE) |
+ LS_64(check_overflow, I40IW_CQPSQ_CQ_CHKOVERFLOW) |
+ LS_64(virtual_map, I40IW_CQPSQ_CQ_VIRTMAP) |
+ LS_64(cq->ceqe_mask, I40IW_CQPSQ_CQ_ENCEQEMASK) |
+ LS_64(ceq_id_valid, I40IW_CQPSQ_CQ_CEQIDVALID) |
+ LS_64(cq->tph_en, I40IW_CQPSQ_TPHEN) |
+ LS_64(cq->cq_uk.avoid_mem_cflct, I40IW_CQPSQ_CQ_AVOIDMEMCNFLCT) |
+ LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
+
+ i40iw_insert_wqe_hdr(wqe, header);
+
+ i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "CQ_MODIFY WQE",
+ wqe, I40IW_CQP_WQE_SIZE * 8);
+
+ if (post_sq)
+ i40iw_sc_cqp_post_sq(cqp);
+ return 0;
+}
+
+/**
+ * i40iw_sc_qp_init - initialize qp
+ * @qp: sc qp
+ * @info: initialization qp info
+ */
+static enum i40iw_status_code i40iw_sc_qp_init(struct i40iw_sc_qp *qp,
+ struct i40iw_qp_init_info *info)
+{
+ u32 __iomem *wqe_alloc_reg = NULL;
+ enum i40iw_status_code ret_code;
+ u32 pble_obj_cnt;
+ u8 wqe_size;
+ u32 offset;
+
+ qp->dev = info->pd->dev;
+ qp->sq_pa = info->sq_pa;
+ qp->rq_pa = info->rq_pa;
+ qp->hw_host_ctx_pa = info->host_ctx_pa;
+ qp->q2_pa = info->q2_pa;
+ qp->shadow_area_pa = info->shadow_area_pa;
+
+ qp->q2_buf = info->q2;
+ qp->pd = info->pd;
+ qp->hw_host_ctx = info->host_ctx;
+ offset = (qp->pd->dev->is_pf) ? I40E_PFPE_WQEALLOC : I40E_VFPE_WQEALLOC1;
+ if (i40iw_get_hw_addr(qp->pd->dev))
+ wqe_alloc_reg = (u32 __iomem *)(i40iw_get_hw_addr(qp->pd->dev) +
+ offset);
+
+ info->qp_uk_init_info.wqe_alloc_reg = wqe_alloc_reg;
+ ret_code = i40iw_qp_uk_init(&qp->qp_uk, &info->qp_uk_init_info);
+ if (ret_code)
+ return ret_code;
+ qp->virtual_map = info->virtual_map;
+
+ pble_obj_cnt = info->pd->dev->hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].cnt;
+
+ if ((info->virtual_map && (info->sq_pa >= pble_obj_cnt)) ||
+ (info->virtual_map && (info->rq_pa >= pble_obj_cnt)))
+ return I40IW_ERR_INVALID_PBLE_INDEX;
+
+ qp->llp_stream_handle = (void *)(-1);
+ qp->qp_type = (info->type) ? info->type : I40IW_QP_TYPE_IWARP;
+
+ qp->hw_sq_size = i40iw_get_encoded_wqe_size(qp->qp_uk.sq_ring.size,
+ false);
+ i40iw_debug(qp->dev, I40IW_DEBUG_WQE, "%s: hw_sq_size[%04d] sq_ring.size[%04d]\n",
+ __func__, qp->hw_sq_size, qp->qp_uk.sq_ring.size);
+ ret_code = i40iw_fragcnt_to_wqesize_rq(qp->qp_uk.max_rq_frag_cnt,
+ &wqe_size);
+ if (ret_code)
+ return ret_code;
+ qp->hw_rq_size = i40iw_get_encoded_wqe_size(qp->qp_uk.rq_size *
+ (wqe_size / I40IW_QP_WQE_MIN_SIZE), false);
+ i40iw_debug(qp->dev, I40IW_DEBUG_WQE,
+ "%s: hw_rq_size[%04d] qp_uk.rq_size[%04d] wqe_size[%04d]\n",
+ __func__, qp->hw_rq_size, qp->qp_uk.rq_size, wqe_size);
+ qp->sq_tph_val = info->sq_tph_val;
+ qp->rq_tph_val = info->rq_tph_val;
+ qp->sq_tph_en = info->sq_tph_en;
+ qp->rq_tph_en = info->rq_tph_en;
+ qp->rcv_tph_en = info->rcv_tph_en;
+ qp->xmit_tph_en = info->xmit_tph_en;
+ qp->qs_handle = qp->pd->dev->qs_handle;
+ qp->exception_lan_queue = qp->pd->dev->exception_lan_queue;
+
+ return 0;
+}
+
+/**
+ * i40iw_sc_qp_create - create qp
+ * @qp: sc qp
+ * @info: qp create info
+ * @scratch: u64 saved to be used during cqp completion
+ * @post_sq: flag for cqp db to ring
+ */
+static enum i40iw_status_code i40iw_sc_qp_create(
+ struct i40iw_sc_qp *qp,
+ struct i40iw_create_qp_info *info,
+ u64 scratch,
+ bool post_sq)
+{
+ struct i40iw_sc_cqp *cqp;
+ u64 *wqe;
+ u64 header;
+
+ if ((qp->qp_uk.qp_id < I40IW_MIN_IW_QP_ID) ||
+ (qp->qp_uk.qp_id > I40IW_MAX_IW_QP_ID))
+ return I40IW_ERR_INVALID_QP_ID;
+
+ cqp = qp->pd->dev->cqp;
+ wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return I40IW_ERR_RING_FULL;
+
+ set_64bit_val(wqe, 16, qp->hw_host_ctx_pa);
+
+ set_64bit_val(wqe, 40, qp->shadow_area_pa);
+
+ header = qp->qp_uk.qp_id |
+ LS_64(I40IW_CQP_OP_CREATE_QP, I40IW_CQPSQ_OPCODE) |
+ LS_64((info->ord_valid ? 1 : 0), I40IW_CQPSQ_QP_ORDVALID) |
+ LS_64(info->tcp_ctx_valid, I40IW_CQPSQ_QP_TOECTXVALID) |
+ LS_64(qp->qp_type, I40IW_CQPSQ_QP_QPTYPE) |
+ LS_64(qp->virtual_map, I40IW_CQPSQ_QP_VQ) |
+ LS_64(info->cq_num_valid, I40IW_CQPSQ_QP_CQNUMVALID) |
+ LS_64(info->static_rsrc, I40IW_CQPSQ_QP_STATRSRC) |
+ LS_64(info->arp_cache_idx_valid, I40IW_CQPSQ_QP_ARPTABIDXVALID) |
+ LS_64(info->next_iwarp_state, I40IW_CQPSQ_QP_NEXTIWSTATE) |
+ LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
+
+ i40iw_insert_wqe_hdr(wqe, header);
+ i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "QP_CREATE WQE",
+ wqe, I40IW_CQP_WQE_SIZE * 8);
+
+ if (post_sq)
+ i40iw_sc_cqp_post_sq(cqp);
+ return 0;
+}
+
+/**
+ * i40iw_sc_qp_modify - modify qp cqp wqe
+ * @qp: sc qp
+ * @info: modify qp info
+ * @scratch: u64 saved to be used during cqp completion
+ * @post_sq: flag for cqp db to ring
+ */
+static enum i40iw_status_code i40iw_sc_qp_modify(
+ struct i40iw_sc_qp *qp,
+ struct i40iw_modify_qp_info *info,
+ u64 scratch,
+ bool post_sq)
+{
+ u64 *wqe;
+ struct i40iw_sc_cqp *cqp;
+ u64 header;
+ u8 term_actions = 0;
+ u8 term_len = 0;
+
+ cqp = qp->pd->dev->cqp;
+ wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return I40IW_ERR_RING_FULL;
+ if (info->next_iwarp_state == I40IW_QP_STATE_TERMINATE) {
+ if (info->dont_send_fin)
+ term_actions += I40IWQP_TERM_SEND_TERM_ONLY;
+ if (info->dont_send_term)
+ term_actions += I40IWQP_TERM_SEND_FIN_ONLY;
+ if ((term_actions == I40IWQP_TERM_SEND_TERM_AND_FIN) ||
+ (term_actions == I40IWQP_TERM_SEND_TERM_ONLY))
+ term_len = info->termlen;
+ }
+
+ set_64bit_val(wqe,
+ 8,
+ LS_64(info->new_mss, I40IW_CQPSQ_QP_NEWMSS) |
+ LS_64(term_len, I40IW_CQPSQ_QP_TERMLEN));
+
+ set_64bit_val(wqe, 16, qp->hw_host_ctx_pa);
+ set_64bit_val(wqe, 40, qp->shadow_area_pa);
+
+ header = qp->qp_uk.qp_id |
+ LS_64(I40IW_CQP_OP_MODIFY_QP, I40IW_CQPSQ_OPCODE) |
+ LS_64(info->ord_valid, I40IW_CQPSQ_QP_ORDVALID) |
+ LS_64(info->tcp_ctx_valid, I40IW_CQPSQ_QP_TOECTXVALID) |
+ LS_64(info->cached_var_valid, I40IW_CQPSQ_QP_CACHEDVARVALID) |
+ LS_64(qp->virtual_map, I40IW_CQPSQ_QP_VQ) |
+ LS_64(info->cq_num_valid, I40IW_CQPSQ_QP_CQNUMVALID) |
+ LS_64(info->force_loopback, I40IW_CQPSQ_QP_FORCELOOPBACK) |
+ LS_64(qp->qp_type, I40IW_CQPSQ_QP_QPTYPE) |
+ LS_64(info->mss_change, I40IW_CQPSQ_QP_MSSCHANGE) |
+ LS_64(info->static_rsrc, I40IW_CQPSQ_QP_STATRSRC) |
+ LS_64(info->remove_hash_idx, I40IW_CQPSQ_QP_REMOVEHASHENTRY) |
+ LS_64(term_actions, I40IW_CQPSQ_QP_TERMACT) |
+ LS_64(info->reset_tcp_conn, I40IW_CQPSQ_QP_RESETCON) |
+ LS_64(info->arp_cache_idx_valid, I40IW_CQPSQ_QP_ARPTABIDXVALID) |
+ LS_64(info->next_iwarp_state, I40IW_CQPSQ_QP_NEXTIWSTATE) |
+ LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
+
+ i40iw_insert_wqe_hdr(wqe, header);
+
+ i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "QP_MODIFY WQE",
+ wqe, I40IW_CQP_WQE_SIZE * 8);
+
+ if (post_sq)
+ i40iw_sc_cqp_post_sq(cqp);
+ return 0;
+}
+
+/**
+ * i40iw_sc_qp_destroy - cqp destroy qp
+ * @qp: sc qp
+ * @scratch: u64 saved to be used during cqp completion
+ * @remove_hash_idx: flag if to remove hash idx
+ * @ignore_mw_bnd: memory window bind flag
+ * @post_sq: flag for cqp db to ring
+ */
+static enum i40iw_status_code i40iw_sc_qp_destroy(
+ struct i40iw_sc_qp *qp,
+ u64 scratch,
+ bool remove_hash_idx,
+ bool ignore_mw_bnd,
+ bool post_sq)
+{
+ u64 *wqe;
+ struct i40iw_sc_cqp *cqp;
+ u64 header;
+
+ cqp = qp->pd->dev->cqp;
+ wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return I40IW_ERR_RING_FULL;
+ set_64bit_val(wqe, 16, qp->hw_host_ctx_pa);
+ set_64bit_val(wqe, 40, qp->shadow_area_pa);
+
+ header = qp->qp_uk.qp_id |
+ LS_64(I40IW_CQP_OP_DESTROY_QP, I40IW_CQPSQ_OPCODE) |
+ LS_64(qp->qp_type, I40IW_CQPSQ_QP_QPTYPE) |
+ LS_64(ignore_mw_bnd, I40IW_CQPSQ_QP_IGNOREMWBOUND) |
+ LS_64(remove_hash_idx, I40IW_CQPSQ_QP_REMOVEHASHENTRY) |
+ LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
+
+ i40iw_insert_wqe_hdr(wqe, header);
+ i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "QP_DESTROY WQE",
+ wqe, I40IW_CQP_WQE_SIZE * 8);
+
+ if (post_sq)
+ i40iw_sc_cqp_post_sq(cqp);
+ return 0;
+}
+
+/**
+ * i40iw_sc_qp_flush_wqes - flush qp's wqe
+ * @qp: sc qp
+ * @info: dlush information
+ * @scratch: u64 saved to be used during cqp completion
+ * @post_sq: flag for cqp db to ring
+ */
+static enum i40iw_status_code i40iw_sc_qp_flush_wqes(
+ struct i40iw_sc_qp *qp,
+ struct i40iw_qp_flush_info *info,
+ u64 scratch,
+ bool post_sq)
+{
+ u64 temp = 0;
+ u64 *wqe;
+ struct i40iw_sc_cqp *cqp;
+ u64 header;
+ bool flush_sq = false, flush_rq = false;
+
+ if (info->rq && !qp->flush_rq)
+ flush_rq = true;
+
+ if (info->sq && !qp->flush_sq)
+ flush_sq = true;
+
+ qp->flush_sq |= flush_sq;
+ qp->flush_rq |= flush_rq;
+ if (!flush_sq && !flush_rq) {
+ if (info->ae_code != I40IW_AE_LLP_RECEIVED_MPA_CRC_ERROR)
+ return 0;
+ }
+
+ cqp = qp->pd->dev->cqp;
+ wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return I40IW_ERR_RING_FULL;
+ if (info->userflushcode) {
+ if (flush_rq) {
+ temp |= LS_64(info->rq_minor_code, I40IW_CQPSQ_FWQE_RQMNERR) |
+ LS_64(info->rq_major_code, I40IW_CQPSQ_FWQE_RQMJERR);
+ }
+ if (flush_sq) {
+ temp |= LS_64(info->sq_minor_code, I40IW_CQPSQ_FWQE_SQMNERR) |
+ LS_64(info->sq_major_code, I40IW_CQPSQ_FWQE_SQMJERR);
+ }
+ }
+ set_64bit_val(wqe, 16, temp);
+
+ temp = (info->generate_ae) ?
+ info->ae_code | LS_64(info->ae_source, I40IW_CQPSQ_FWQE_AESOURCE) : 0;
+
+ set_64bit_val(wqe, 8, temp);
+
+ header = qp->qp_uk.qp_id |
+ LS_64(I40IW_CQP_OP_FLUSH_WQES, I40IW_CQPSQ_OPCODE) |
+ LS_64(info->generate_ae, I40IW_CQPSQ_FWQE_GENERATE_AE) |
+ LS_64(info->userflushcode, I40IW_CQPSQ_FWQE_USERFLCODE) |
+ LS_64(flush_sq, I40IW_CQPSQ_FWQE_FLUSHSQ) |
+ LS_64(flush_rq, I40IW_CQPSQ_FWQE_FLUSHRQ) |
+ LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
+
+ i40iw_insert_wqe_hdr(wqe, header);
+
+ i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "QP_FLUSH WQE",
+ wqe, I40IW_CQP_WQE_SIZE * 8);
+
+ if (post_sq)
+ i40iw_sc_cqp_post_sq(cqp);
+ return 0;
+}
+
+/**
+ * i40iw_sc_qp_upload_context - upload qp's context
+ * @dev: sc device struct
+ * @info: upload context info ptr for return
+ * @scratch: u64 saved to be used during cqp completion
+ * @post_sq: flag for cqp db to ring
+ */
+static enum i40iw_status_code i40iw_sc_qp_upload_context(
+ struct i40iw_sc_dev *dev,
+ struct i40iw_upload_context_info *info,
+ u64 scratch,
+ bool post_sq)
+{
+ u64 *wqe;
+ struct i40iw_sc_cqp *cqp;
+ u64 header;
+
+ cqp = dev->cqp;
+ wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return I40IW_ERR_RING_FULL;
+ set_64bit_val(wqe, 16, info->buf_pa);
+
+ header = LS_64(info->qp_id, I40IW_CQPSQ_UCTX_QPID) |
+ LS_64(I40IW_CQP_OP_UPLOAD_CONTEXT, I40IW_CQPSQ_OPCODE) |
+ LS_64(info->qp_type, I40IW_CQPSQ_UCTX_QPTYPE) |
+ LS_64(info->raw_format, I40IW_CQPSQ_UCTX_RAWFORMAT) |
+ LS_64(info->freeze_qp, I40IW_CQPSQ_UCTX_FREEZEQP) |
+ LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
+
+ i40iw_insert_wqe_hdr(wqe, header);
+
+ i40iw_debug_buf(dev, I40IW_DEBUG_WQE, "QP_UPLOAD_CTX WQE",
+ wqe, I40IW_CQP_WQE_SIZE * 8);
+
+ if (post_sq)
+ i40iw_sc_cqp_post_sq(cqp);
+ return 0;
+}
+
+/**
+ * i40iw_sc_qp_setctx - set qp's context
+ * @qp: sc qp
+ * @qp_ctx: context ptr
+ * @info: ctx info
+ */
+static enum i40iw_status_code i40iw_sc_qp_setctx(
+ struct i40iw_sc_qp *qp,
+ u64 *qp_ctx,
+ struct i40iw_qp_host_ctx_info *info)
+{
+ struct i40iwarp_offload_info *iw;
+ struct i40iw_tcp_offload_info *tcp;
+ u64 qw0, qw3, qw7 = 0;
+
+ iw = info->iwarp_info;
+ tcp = info->tcp_info;
+ qw0 = LS_64(qp->qp_uk.rq_wqe_size, I40IWQPC_RQWQESIZE) |
+ LS_64(info->err_rq_idx_valid, I40IWQPC_ERR_RQ_IDX_VALID) |
+ LS_64(qp->rcv_tph_en, I40IWQPC_RCVTPHEN) |
+ LS_64(qp->xmit_tph_en, I40IWQPC_XMITTPHEN) |
+ LS_64(qp->rq_tph_en, I40IWQPC_RQTPHEN) |
+ LS_64(qp->sq_tph_en, I40IWQPC_SQTPHEN) |
+ LS_64(info->push_idx, I40IWQPC_PPIDX) |
+ LS_64(info->push_mode_en, I40IWQPC_PMENA);
+
+ set_64bit_val(qp_ctx, 8, qp->sq_pa);
+ set_64bit_val(qp_ctx, 16, qp->rq_pa);
+
+ qw3 = LS_64(qp->src_mac_addr_idx, I40IWQPC_SRCMACADDRIDX) |
+ LS_64(qp->hw_rq_size, I40IWQPC_RQSIZE) |
+ LS_64(qp->hw_sq_size, I40IWQPC_SQSIZE);
+
+ set_64bit_val(qp_ctx,
+ 128,
+ LS_64(info->err_rq_idx, I40IWQPC_ERR_RQ_IDX));
+
+ set_64bit_val(qp_ctx,
+ 136,
+ LS_64(info->send_cq_num, I40IWQPC_TXCQNUM) |
+ LS_64(info->rcv_cq_num, I40IWQPC_RXCQNUM));
+
+ set_64bit_val(qp_ctx,
+ 168,
+ LS_64(info->qp_compl_ctx, I40IWQPC_QPCOMPCTX));
+ set_64bit_val(qp_ctx,
+ 176,
+ LS_64(qp->sq_tph_val, I40IWQPC_SQTPHVAL) |
+ LS_64(qp->rq_tph_val, I40IWQPC_RQTPHVAL) |
+ LS_64(qp->qs_handle, I40IWQPC_QSHANDLE) |
+ LS_64(qp->exception_lan_queue, I40IWQPC_EXCEPTION_LAN_QUEUE));
+
+ if (info->iwarp_info_valid) {
+ qw0 |= LS_64(iw->ddp_ver, I40IWQPC_DDP_VER) |
+ LS_64(iw->rdmap_ver, I40IWQPC_RDMAP_VER);
+
+ qw7 |= LS_64(iw->pd_id, I40IWQPC_PDIDX);
+ set_64bit_val(qp_ctx, 144, qp->q2_pa);
+ set_64bit_val(qp_ctx,
+ 152,
+ LS_64(iw->last_byte_sent, I40IWQPC_LASTBYTESENT));
+
+ /*
+ * Hard-code IRD_SIZE to hw-limit, 128, in qpctx, i.e matching an
+ *advertisable IRD of 64
+ */
+ iw->ird_size = I40IW_QPCTX_ENCD_MAXIRD;
+ set_64bit_val(qp_ctx,
+ 160,
+ LS_64(iw->ord_size, I40IWQPC_ORDSIZE) |
+ LS_64(iw->ird_size, I40IWQPC_IRDSIZE) |
+ LS_64(iw->wr_rdresp_en, I40IWQPC_WRRDRSPOK) |
+ LS_64(iw->rd_enable, I40IWQPC_RDOK) |
+ LS_64(iw->snd_mark_en, I40IWQPC_SNDMARKERS) |
+ LS_64(iw->bind_en, I40IWQPC_BINDEN) |
+ LS_64(iw->fast_reg_en, I40IWQPC_FASTREGEN) |
+ LS_64(iw->priv_mode_en, I40IWQPC_PRIVEN) |
+ LS_64(1, I40IWQPC_IWARPMODE) |
+ LS_64(iw->rcv_mark_en, I40IWQPC_RCVMARKERS) |
+ LS_64(iw->align_hdrs, I40IWQPC_ALIGNHDRS) |
+ LS_64(iw->rcv_no_mpa_crc, I40IWQPC_RCVNOMPACRC) |
+ LS_64(iw->rcv_mark_offset, I40IWQPC_RCVMARKOFFSET) |
+ LS_64(iw->snd_mark_offset, I40IWQPC_SNDMARKOFFSET));
+ }
+ if (info->tcp_info_valid) {
+ qw0 |= LS_64(tcp->ipv4, I40IWQPC_IPV4) |
+ LS_64(tcp->no_nagle, I40IWQPC_NONAGLE) |
+ LS_64(tcp->insert_vlan_tag, I40IWQPC_INSERTVLANTAG) |
+ LS_64(tcp->time_stamp, I40IWQPC_TIMESTAMP) |
+ LS_64(tcp->cwnd_inc_limit, I40IWQPC_LIMIT) |
+ LS_64(tcp->drop_ooo_seg, I40IWQPC_DROPOOOSEG) |
+ LS_64(tcp->dup_ack_thresh, I40IWQPC_DUPACK_THRESH);
+
+ qw3 |= LS_64(tcp->ttl, I40IWQPC_TTL) |
+ LS_64(tcp->src_mac_addr_idx, I40IWQPC_SRCMACADDRIDX) |
+ LS_64(tcp->avoid_stretch_ack, I40IWQPC_AVOIDSTRETCHACK) |
+ LS_64(tcp->tos, I40IWQPC_TOS) |
+ LS_64(tcp->src_port, I40IWQPC_SRCPORTNUM) |
+ LS_64(tcp->dst_port, I40IWQPC_DESTPORTNUM);
+
+ qp->src_mac_addr_idx = tcp->src_mac_addr_idx;
+ set_64bit_val(qp_ctx,
+ 32,
+ LS_64(tcp->dest_ip_addr2, I40IWQPC_DESTIPADDR2) |
+ LS_64(tcp->dest_ip_addr3, I40IWQPC_DESTIPADDR3));
+
+ set_64bit_val(qp_ctx,
+ 40,
+ LS_64(tcp->dest_ip_addr0, I40IWQPC_DESTIPADDR0) |
+ LS_64(tcp->dest_ip_addr1, I40IWQPC_DESTIPADDR1));
+
+ set_64bit_val(qp_ctx,
+ 48,
+ LS_64(tcp->snd_mss, I40IWQPC_SNDMSS) |
+ LS_64(tcp->vlan_tag, I40IWQPC_VLANTAG) |
+ LS_64(tcp->arp_idx, I40IWQPC_ARPIDX));
+
+ qw7 |= LS_64(tcp->flow_label, I40IWQPC_FLOWLABEL) |
+ LS_64(tcp->wscale, I40IWQPC_WSCALE) |
+ LS_64(tcp->ignore_tcp_opt, I40IWQPC_IGNORE_TCP_OPT) |
+ LS_64(tcp->ignore_tcp_uns_opt, I40IWQPC_IGNORE_TCP_UNS_OPT) |
+ LS_64(tcp->tcp_state, I40IWQPC_TCPSTATE) |
+ LS_64(tcp->rcv_wscale, I40IWQPC_RCVSCALE) |
+ LS_64(tcp->snd_wscale, I40IWQPC_SNDSCALE);
+
+ set_64bit_val(qp_ctx,
+ 72,
+ LS_64(tcp->time_stamp_recent, I40IWQPC_TIMESTAMP_RECENT) |
+ LS_64(tcp->time_stamp_age, I40IWQPC_TIMESTAMP_AGE));
+ set_64bit_val(qp_ctx,
+ 80,
+ LS_64(tcp->snd_nxt, I40IWQPC_SNDNXT) |
+ LS_64(tcp->snd_wnd, I40IWQPC_SNDWND));
+
+ set_64bit_val(qp_ctx,
+ 88,
+ LS_64(tcp->rcv_nxt, I40IWQPC_RCVNXT) |
+ LS_64(tcp->rcv_wnd, I40IWQPC_RCVWND));
+ set_64bit_val(qp_ctx,
+ 96,
+ LS_64(tcp->snd_max, I40IWQPC_SNDMAX) |
+ LS_64(tcp->snd_una, I40IWQPC_SNDUNA));
+ set_64bit_val(qp_ctx,
+ 104,
+ LS_64(tcp->srtt, I40IWQPC_SRTT) |
+ LS_64(tcp->rtt_var, I40IWQPC_RTTVAR));
+ set_64bit_val(qp_ctx,
+ 112,
+ LS_64(tcp->ss_thresh, I40IWQPC_SSTHRESH) |
+ LS_64(tcp->cwnd, I40IWQPC_CWND));
+ set_64bit_val(qp_ctx,
+ 120,
+ LS_64(tcp->snd_wl1, I40IWQPC_SNDWL1) |
+ LS_64(tcp->snd_wl2, I40IWQPC_SNDWL2));
+ set_64bit_val(qp_ctx,
+ 128,
+ LS_64(tcp->max_snd_window, I40IWQPC_MAXSNDWND) |
+ LS_64(tcp->rexmit_thresh, I40IWQPC_REXMIT_THRESH));
+ set_64bit_val(qp_ctx,
+ 184,
+ LS_64(tcp->local_ipaddr3, I40IWQPC_LOCAL_IPADDR3) |
+ LS_64(tcp->local_ipaddr2, I40IWQPC_LOCAL_IPADDR2));
+ set_64bit_val(qp_ctx,
+ 192,
+ LS_64(tcp->local_ipaddr1, I40IWQPC_LOCAL_IPADDR1) |
+ LS_64(tcp->local_ipaddr0, I40IWQPC_LOCAL_IPADDR0));
+ }
+
+ set_64bit_val(qp_ctx, 0, qw0);
+ set_64bit_val(qp_ctx, 24, qw3);
+ set_64bit_val(qp_ctx, 56, qw7);
+
+ i40iw_debug_buf(qp->dev, I40IW_DEBUG_WQE, "QP_HOST)CTX WQE",
+ qp_ctx, I40IW_QP_CTX_SIZE);
+ return 0;
+}
+
+/**
+ * i40iw_sc_alloc_stag - mr stag alloc
+ * @dev: sc device struct
+ * @info: stag info
+ * @scratch: u64 saved to be used during cqp completion
+ * @post_sq: flag for cqp db to ring
+ */
+static enum i40iw_status_code i40iw_sc_alloc_stag(
+ struct i40iw_sc_dev *dev,
+ struct i40iw_allocate_stag_info *info,
+ u64 scratch,
+ bool post_sq)
+{
+ u64 *wqe;
+ struct i40iw_sc_cqp *cqp;
+ u64 header;
+
+ cqp = dev->cqp;
+ wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return I40IW_ERR_RING_FULL;
+ set_64bit_val(wqe,
+ 8,
+ LS_64(info->pd_id, I40IW_CQPSQ_STAG_PDID) |
+ LS_64(info->total_len, I40IW_CQPSQ_STAG_STAGLEN));
+ set_64bit_val(wqe,
+ 16,
+ LS_64(info->stag_idx, I40IW_CQPSQ_STAG_IDX));
+ set_64bit_val(wqe,
+ 40,
+ LS_64(info->hmc_fcn_index, I40IW_CQPSQ_STAG_HMCFNIDX));
+
+ header = LS_64(I40IW_CQP_OP_ALLOC_STAG, I40IW_CQPSQ_OPCODE) |
+ LS_64(1, I40IW_CQPSQ_STAG_MR) |
+ LS_64(info->access_rights, I40IW_CQPSQ_STAG_ARIGHTS) |
+ LS_64(info->chunk_size, I40IW_CQPSQ_STAG_LPBLSIZE) |
+ LS_64(info->page_size, I40IW_CQPSQ_STAG_HPAGESIZE) |
+ LS_64(info->remote_access, I40IW_CQPSQ_STAG_REMACCENABLED) |
+ LS_64(info->use_hmc_fcn_index, I40IW_CQPSQ_STAG_USEHMCFNIDX) |
+ LS_64(info->use_pf_rid, I40IW_CQPSQ_STAG_USEPFRID) |
+ LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
+
+ i40iw_insert_wqe_hdr(wqe, header);
+
+ i40iw_debug_buf(dev, I40IW_DEBUG_WQE, "ALLOC_STAG WQE",
+ wqe, I40IW_CQP_WQE_SIZE * 8);
+
+ if (post_sq)
+ i40iw_sc_cqp_post_sq(cqp);
+ return 0;
+}
+
+/**
+ * i40iw_sc_mr_reg_non_shared - non-shared mr registration
+ * @dev: sc device struct
+ * @info: mr info
+ * @scratch: u64 saved to be used during cqp completion
+ * @post_sq: flag for cqp db to ring
+ */
+static enum i40iw_status_code i40iw_sc_mr_reg_non_shared(
+ struct i40iw_sc_dev *dev,
+ struct i40iw_reg_ns_stag_info *info,
+ u64 scratch,
+ bool post_sq)
+{
+ u64 *wqe;
+ u64 temp;
+ struct i40iw_sc_cqp *cqp;
+ u64 header;
+ u32 pble_obj_cnt;
+ bool remote_access;
+ u8 addr_type;
+
+ if (info->access_rights & (I40IW_ACCESS_FLAGS_REMOTEREAD_ONLY |
+ I40IW_ACCESS_FLAGS_REMOTEWRITE_ONLY))
+ remote_access = true;
+ else
+ remote_access = false;
+
+ pble_obj_cnt = dev->hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].cnt;
+
+ if (info->chunk_size && (info->first_pm_pbl_index >= pble_obj_cnt))
+ return I40IW_ERR_INVALID_PBLE_INDEX;
+
+ cqp = dev->cqp;
+ wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return I40IW_ERR_RING_FULL;
+
+ temp = (info->addr_type == I40IW_ADDR_TYPE_VA_BASED) ? (uintptr_t)info->va : info->fbo;
+ set_64bit_val(wqe, 0, temp);
+
+ set_64bit_val(wqe,
+ 8,
+ LS_64(info->total_len, I40IW_CQPSQ_STAG_STAGLEN) |
+ LS_64(info->pd_id, I40IW_CQPSQ_STAG_PDID));
+
+ set_64bit_val(wqe,
+ 16,
+ LS_64(info->stag_key, I40IW_CQPSQ_STAG_KEY) |
+ LS_64(info->stag_idx, I40IW_CQPSQ_STAG_IDX));
+ if (!info->chunk_size) {
+ set_64bit_val(wqe, 32, info->reg_addr_pa);
+ set_64bit_val(wqe, 48, 0);
+ } else {
+ set_64bit_val(wqe, 32, 0);
+ set_64bit_val(wqe, 48, info->first_pm_pbl_index);
+ }
+ set_64bit_val(wqe, 40, info->hmc_fcn_index);
+ set_64bit_val(wqe, 56, 0);
+
+ addr_type = (info->addr_type == I40IW_ADDR_TYPE_VA_BASED) ? 1 : 0;
+ header = LS_64(I40IW_CQP_OP_REG_MR, I40IW_CQPSQ_OPCODE) |
+ LS_64(1, I40IW_CQPSQ_STAG_MR) |
+ LS_64(info->chunk_size, I40IW_CQPSQ_STAG_LPBLSIZE) |
+ LS_64(info->page_size, I40IW_CQPSQ_STAG_HPAGESIZE) |
+ LS_64(info->access_rights, I40IW_CQPSQ_STAG_ARIGHTS) |
+ LS_64(remote_access, I40IW_CQPSQ_STAG_REMACCENABLED) |
+ LS_64(addr_type, I40IW_CQPSQ_STAG_VABASEDTO) |
+ LS_64(info->use_hmc_fcn_index, I40IW_CQPSQ_STAG_USEHMCFNIDX) |
+ LS_64(info->use_pf_rid, I40IW_CQPSQ_STAG_USEPFRID) |
+ LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
+
+ i40iw_insert_wqe_hdr(wqe, header);
+
+ i40iw_debug_buf(dev, I40IW_DEBUG_WQE, "MR_REG_NS WQE",
+ wqe, I40IW_CQP_WQE_SIZE * 8);
+
+ if (post_sq)
+ i40iw_sc_cqp_post_sq(cqp);
+ return 0;
+}
+
+/**
+ * i40iw_sc_mr_reg_shared - registered shared memory region
+ * @dev: sc device struct
+ * @info: info for shared memory registeration
+ * @scratch: u64 saved to be used during cqp completion
+ * @post_sq: flag for cqp db to ring
+ */
+static enum i40iw_status_code i40iw_sc_mr_reg_shared(
+ struct i40iw_sc_dev *dev,
+ struct i40iw_register_shared_stag *info,
+ u64 scratch,
+ bool post_sq)
+{
+ u64 *wqe;
+ struct i40iw_sc_cqp *cqp;
+ u64 temp, va64, fbo, header;
+ u32 va32;
+ bool remote_access;
+ u8 addr_type;
+
+ if (info->access_rights & (I40IW_ACCESS_FLAGS_REMOTEREAD_ONLY |
+ I40IW_ACCESS_FLAGS_REMOTEWRITE_ONLY))
+ remote_access = true;
+ else
+ remote_access = false;
+ cqp = dev->cqp;
+ wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return I40IW_ERR_RING_FULL;
+ va64 = (uintptr_t)(info->va);
+ va32 = (u32)(va64 & 0x00000000FFFFFFFF);
+ fbo = (u64)(va32 & (4096 - 1));
+
+ set_64bit_val(wqe,
+ 0,
+ (info->addr_type == I40IW_ADDR_TYPE_VA_BASED ? (uintptr_t)info->va : fbo));
+
+ set_64bit_val(wqe,
+ 8,
+ LS_64(info->pd_id, I40IW_CQPSQ_STAG_PDID));
+ temp = LS_64(info->new_stag_key, I40IW_CQPSQ_STAG_KEY) |
+ LS_64(info->new_stag_idx, I40IW_CQPSQ_STAG_IDX) |
+ LS_64(info->parent_stag_idx, I40IW_CQPSQ_STAG_PARENTSTAGIDX);
+ set_64bit_val(wqe, 16, temp);
+
+ addr_type = (info->addr_type == I40IW_ADDR_TYPE_VA_BASED) ? 1 : 0;
+ header = LS_64(I40IW_CQP_OP_REG_SMR, I40IW_CQPSQ_OPCODE) |
+ LS_64(1, I40IW_CQPSQ_STAG_MR) |
+ LS_64(info->access_rights, I40IW_CQPSQ_STAG_ARIGHTS) |
+ LS_64(remote_access, I40IW_CQPSQ_STAG_REMACCENABLED) |
+ LS_64(addr_type, I40IW_CQPSQ_STAG_VABASEDTO) |
+ LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
+
+ i40iw_insert_wqe_hdr(wqe, header);
+
+ i40iw_debug_buf(dev, I40IW_DEBUG_WQE, "MR_REG_SHARED WQE",
+ wqe, I40IW_CQP_WQE_SIZE * 8);
+
+ if (post_sq)
+ i40iw_sc_cqp_post_sq(cqp);
+ return 0;
+}
+
+/**
+ * i40iw_sc_dealloc_stag - deallocate stag
+ * @dev: sc device struct
+ * @info: dealloc stag info
+ * @scratch: u64 saved to be used during cqp completion
+ * @post_sq: flag for cqp db to ring
+ */
+static enum i40iw_status_code i40iw_sc_dealloc_stag(
+ struct i40iw_sc_dev *dev,
+ struct i40iw_dealloc_stag_info *info,
+ u64 scratch,
+ bool post_sq)
+{
+ u64 header;
+ u64 *wqe;
+ struct i40iw_sc_cqp *cqp;
+
+ cqp = dev->cqp;
+ wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return I40IW_ERR_RING_FULL;
+ set_64bit_val(wqe,
+ 8,
+ LS_64(info->pd_id, I40IW_CQPSQ_STAG_PDID));
+ set_64bit_val(wqe,
+ 16,
+ LS_64(info->stag_idx, I40IW_CQPSQ_STAG_IDX));
+
+ header = LS_64(I40IW_CQP_OP_DEALLOC_STAG, I40IW_CQPSQ_OPCODE) |
+ LS_64(info->mr, I40IW_CQPSQ_STAG_MR) |
+ LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
+
+ i40iw_insert_wqe_hdr(wqe, header);
+
+ i40iw_debug_buf(dev, I40IW_DEBUG_WQE, "DEALLOC_STAG WQE",
+ wqe, I40IW_CQP_WQE_SIZE * 8);
+
+ if (post_sq)
+ i40iw_sc_cqp_post_sq(cqp);
+ return 0;
+}
+
+/**
+ * i40iw_sc_query_stag - query hardware for stag
+ * @dev: sc device struct
+ * @scratch: u64 saved to be used during cqp completion
+ * @stag_index: stag index for query
+ * @post_sq: flag for cqp db to ring
+ */
+static enum i40iw_status_code i40iw_sc_query_stag(struct i40iw_sc_dev *dev,
+ u64 scratch,
+ u32 stag_index,
+ bool post_sq)
+{
+ u64 header;
+ u64 *wqe;
+ struct i40iw_sc_cqp *cqp;
+
+ cqp = dev->cqp;
+ wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return I40IW_ERR_RING_FULL;
+ set_64bit_val(wqe,
+ 16,
+ LS_64(stag_index, I40IW_CQPSQ_QUERYSTAG_IDX));
+
+ header = LS_64(I40IW_CQP_OP_QUERY_STAG, I40IW_CQPSQ_OPCODE) |
+ LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
+
+ i40iw_insert_wqe_hdr(wqe, header);
+
+ i40iw_debug_buf(dev, I40IW_DEBUG_WQE, "QUERY_STAG WQE",
+ wqe, I40IW_CQP_WQE_SIZE * 8);
+
+ if (post_sq)
+ i40iw_sc_cqp_post_sq(cqp);
+ return 0;
+}
+
+/**
+ * i40iw_sc_mw_alloc - mw allocate
+ * @dev: sc device struct
+ * @scratch: u64 saved to be used during cqp completion
+ * @mw_stag_index:stag index
+ * @pd_id: pd is for this mw
+ * @post_sq: flag for cqp db to ring
+ */
+static enum i40iw_status_code i40iw_sc_mw_alloc(
+ struct i40iw_sc_dev *dev,
+ u64 scratch,
+ u32 mw_stag_index,
+ u16 pd_id,
+ bool post_sq)
+{
+ u64 header;
+ struct i40iw_sc_cqp *cqp;
+ u64 *wqe;
+
+ cqp = dev->cqp;
+ wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return I40IW_ERR_RING_FULL;
+ set_64bit_val(wqe, 8, LS_64(pd_id, I40IW_CQPSQ_STAG_PDID));
+ set_64bit_val(wqe,
+ 16,
+ LS_64(mw_stag_index, I40IW_CQPSQ_STAG_IDX));
+
+ header = LS_64(I40IW_CQP_OP_ALLOC_STAG, I40IW_CQPSQ_OPCODE) |
+ LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
+
+ i40iw_insert_wqe_hdr(wqe, header);
+
+ i40iw_debug_buf(dev, I40IW_DEBUG_WQE, "MW_ALLOC WQE",
+ wqe, I40IW_CQP_WQE_SIZE * 8);
+
+ if (post_sq)
+ i40iw_sc_cqp_post_sq(cqp);
+ return 0;
+}
+
+/**
+ * i40iw_sc_send_lsmm - send last streaming mode message
+ * @qp: sc qp struct
+ * @lsmm_buf: buffer with lsmm message
+ * @size: size of lsmm buffer
+ * @stag: stag of lsmm buffer
+ */
+static void i40iw_sc_send_lsmm(struct i40iw_sc_qp *qp,
+ void *lsmm_buf,
+ u32 size,
+ i40iw_stag stag)
+{
+ u64 *wqe;
+ u64 header;
+ struct i40iw_qp_uk *qp_uk;
+
+ qp_uk = &qp->qp_uk;
+ wqe = qp_uk->sq_base->elem;
+
+ set_64bit_val(wqe, 0, (uintptr_t)lsmm_buf);
+
+ set_64bit_val(wqe, 8, (size | LS_64(stag, I40IWQPSQ_FRAG_STAG)));
+
+ set_64bit_val(wqe, 16, 0);
+
+ header = LS_64(I40IWQP_OP_RDMA_SEND, I40IWQPSQ_OPCODE) |
+ LS_64(1, I40IWQPSQ_STREAMMODE) |
+ LS_64(1, I40IWQPSQ_WAITFORRCVPDU) |
+ LS_64(qp->qp_uk.swqe_polarity, I40IWQPSQ_VALID);
+
+ i40iw_insert_wqe_hdr(wqe, header);
+
+ i40iw_debug_buf(qp->dev, I40IW_DEBUG_QP, "SEND_LSMM WQE",
+ wqe, I40IW_QP_WQE_MIN_SIZE);
+}
+
+/**
+ * i40iw_sc_send_lsmm_nostag - for privilege qp
+ * @qp: sc qp struct
+ * @lsmm_buf: buffer with lsmm message
+ * @size: size of lsmm buffer
+ */
+static void i40iw_sc_send_lsmm_nostag(struct i40iw_sc_qp *qp,
+ void *lsmm_buf,
+ u32 size)
+{
+ u64 *wqe;
+ u64 header;
+ struct i40iw_qp_uk *qp_uk;
+
+ qp_uk = &qp->qp_uk;
+ wqe = qp_uk->sq_base->elem;
+
+ set_64bit_val(wqe, 0, (uintptr_t)lsmm_buf);
+
+ set_64bit_val(wqe, 8, size);
+
+ set_64bit_val(wqe, 16, 0);
+
+ header = LS_64(I40IWQP_OP_RDMA_SEND, I40IWQPSQ_OPCODE) |
+ LS_64(1, I40IWQPSQ_STREAMMODE) |
+ LS_64(1, I40IWQPSQ_WAITFORRCVPDU) |
+ LS_64(qp->qp_uk.swqe_polarity, I40IWQPSQ_VALID);
+
+ i40iw_insert_wqe_hdr(wqe, header);
+
+ i40iw_debug_buf(qp->dev, I40IW_DEBUG_WQE, "SEND_LSMM_NOSTAG WQE",
+ wqe, I40IW_QP_WQE_MIN_SIZE);
+}
+
+/**
+ * i40iw_sc_send_rtt - send last read0 or write0
+ * @qp: sc qp struct
+ * @read: Do read0 or write0
+ */
+static void i40iw_sc_send_rtt(struct i40iw_sc_qp *qp, bool read)
+{
+ u64 *wqe;
+ u64 header;
+ struct i40iw_qp_uk *qp_uk;
+
+ qp_uk = &qp->qp_uk;
+ wqe = qp_uk->sq_base->elem;
+
+ set_64bit_val(wqe, 0, 0);
+ set_64bit_val(wqe, 8, 0);
+ set_64bit_val(wqe, 16, 0);
+ if (read) {
+ header = LS_64(0x1234, I40IWQPSQ_REMSTAG) |
+ LS_64(I40IWQP_OP_RDMA_READ, I40IWQPSQ_OPCODE) |
+ LS_64(qp->qp_uk.swqe_polarity, I40IWQPSQ_VALID);
+ set_64bit_val(wqe, 8, ((u64)0xabcd << 32));
+ } else {
+ header = LS_64(I40IWQP_OP_RDMA_WRITE, I40IWQPSQ_OPCODE) |
+ LS_64(qp->qp_uk.swqe_polarity, I40IWQPSQ_VALID);
+ }
+
+ i40iw_insert_wqe_hdr(wqe, header);
+
+ i40iw_debug_buf(qp->dev, I40IW_DEBUG_WQE, "RTR WQE",
+ wqe, I40IW_QP_WQE_MIN_SIZE);
+}
+
+/**
+ * i40iw_sc_post_wqe0 - send wqe with opcode
+ * @qp: sc qp struct
+ * @opcode: opcode to use for wqe0
+ */
+static enum i40iw_status_code i40iw_sc_post_wqe0(struct i40iw_sc_qp *qp, u8 opcode)
+{
+ u64 *wqe;
+ u64 header;
+ struct i40iw_qp_uk *qp_uk;
+
+ qp_uk = &qp->qp_uk;
+ wqe = qp_uk->sq_base->elem;
+
+ if (!wqe)
+ return I40IW_ERR_QP_TOOMANY_WRS_POSTED;
+ switch (opcode) {
+ case I40IWQP_OP_NOP:
+ set_64bit_val(wqe, 0, 0);
+ set_64bit_val(wqe, 8, 0);
+ set_64bit_val(wqe, 16, 0);
+ header = LS_64(I40IWQP_OP_NOP, I40IWQPSQ_OPCODE) |
+ LS_64(qp->qp_uk.swqe_polarity, I40IWQPSQ_VALID);
+
+ i40iw_insert_wqe_hdr(wqe, header);
+ break;
+ case I40IWQP_OP_RDMA_SEND:
+ set_64bit_val(wqe, 0, 0);
+ set_64bit_val(wqe, 8, 0);
+ set_64bit_val(wqe, 16, 0);
+ header = LS_64(I40IWQP_OP_RDMA_SEND, I40IWQPSQ_OPCODE) |
+ LS_64(qp->qp_uk.swqe_polarity, I40IWQPSQ_VALID) |
+ LS_64(1, I40IWQPSQ_STREAMMODE) |
+ LS_64(1, I40IWQPSQ_WAITFORRCVPDU);
+
+ i40iw_insert_wqe_hdr(wqe, header);
+ break;
+ default:
+ i40iw_debug(qp->dev, I40IW_DEBUG_QP, "%s: Invalid WQE zero opcode\n",
+ __func__);
+ break;
+ }
+ return 0;
+}
+
+/**
+ * i40iw_sc_init_iw_hmc() - queries fpm values using cqp and populates hmc_info
+ * @dev : ptr to i40iw_dev struct
+ * @hmc_fn_id: hmc function id
+ */
+enum i40iw_status_code i40iw_sc_init_iw_hmc(struct i40iw_sc_dev *dev, u8 hmc_fn_id)
+{
+ struct i40iw_hmc_info *hmc_info;
+ struct i40iw_dma_mem query_fpm_mem;
+ struct i40iw_virt_mem virt_mem;
+ struct i40iw_vfdev *vf_dev = NULL;
+ u32 mem_size;
+ enum i40iw_status_code ret_code = 0;
+ bool poll_registers = true;
+ u16 iw_vf_idx;
+ u8 wait_type;
+
+ if (hmc_fn_id >= I40IW_MAX_VF_FPM_ID ||
+ (dev->hmc_fn_id != hmc_fn_id && hmc_fn_id < I40IW_FIRST_VF_FPM_ID))
+ return I40IW_ERR_INVALID_HMCFN_ID;
+
+ i40iw_debug(dev, I40IW_DEBUG_HMC, "hmc_fn_id %u, dev->hmc_fn_id %u\n", hmc_fn_id,
+ dev->hmc_fn_id);
+ if (hmc_fn_id == dev->hmc_fn_id) {
+ hmc_info = dev->hmc_info;
+ query_fpm_mem.pa = dev->fpm_query_buf_pa;
+ query_fpm_mem.va = dev->fpm_query_buf;
+ } else {
+ vf_dev = i40iw_vfdev_from_fpm(dev, hmc_fn_id);
+ if (!vf_dev)
+ return I40IW_ERR_INVALID_VF_ID;
+
+ hmc_info = &vf_dev->hmc_info;
+ iw_vf_idx = vf_dev->iw_vf_idx;
+ i40iw_debug(dev, I40IW_DEBUG_HMC, "vf_dev %p, hmc_info %p, hmc_obj %p\n", vf_dev,
+ hmc_info, hmc_info->hmc_obj);
+ if (!vf_dev->fpm_query_buf) {
+ if (!dev->vf_fpm_query_buf[iw_vf_idx].va) {
+ ret_code = i40iw_alloc_query_fpm_buf(dev,
+ &dev->vf_fpm_query_buf[iw_vf_idx]);
+ if (ret_code)
+ return ret_code;
+ }
+ vf_dev->fpm_query_buf = dev->vf_fpm_query_buf[iw_vf_idx].va;
+ vf_dev->fpm_query_buf_pa = dev->vf_fpm_query_buf[iw_vf_idx].pa;
+ }
+ query_fpm_mem.pa = vf_dev->fpm_query_buf_pa;
+ query_fpm_mem.va = vf_dev->fpm_query_buf;
+ /**
+ * It is HARDWARE specific:
+ * this call is done by PF for VF and
+ * i40iw_sc_query_fpm_values needs ccq poll
+ * because PF ccq is already created.
+ */
+ poll_registers = false;
+ }
+
+ hmc_info->hmc_fn_id = hmc_fn_id;
+
+ if (hmc_fn_id != dev->hmc_fn_id) {
+ ret_code =
+ i40iw_cqp_query_fpm_values_cmd(dev, &query_fpm_mem, hmc_fn_id);
+ } else {
+ wait_type = poll_registers ? (u8)I40IW_CQP_WAIT_POLL_REGS :
+ (u8)I40IW_CQP_WAIT_POLL_CQ;
+
+ ret_code = i40iw_sc_query_fpm_values(
+ dev->cqp,
+ 0,
+ hmc_info->hmc_fn_id,
+ &query_fpm_mem,
+ true,
+ wait_type);
+ }
+ if (ret_code)
+ return ret_code;
+
+ /* parse the fpm_query_buf and fill hmc obj info */
+ ret_code =
+ i40iw_sc_parse_fpm_query_buf((u64 *)query_fpm_mem.va,
+ hmc_info,
+ &dev->hmc_fpm_misc);
+ if (ret_code)
+ return ret_code;
+ i40iw_debug_buf(dev, I40IW_DEBUG_HMC, "QUERY FPM BUFFER",
+ query_fpm_mem.va, I40IW_QUERY_FPM_BUF_SIZE);
+
+ if (hmc_fn_id != dev->hmc_fn_id) {
+ i40iw_cqp_commit_fpm_values_cmd(dev, &query_fpm_mem, hmc_fn_id);
+
+ /* parse the fpm_commit_buf and fill hmc obj info */
+ i40iw_sc_parse_fpm_commit_buf((u64 *)query_fpm_mem.va, hmc_info->hmc_obj);
+ mem_size = sizeof(struct i40iw_hmc_sd_entry) *
+ (hmc_info->sd_table.sd_cnt + hmc_info->first_sd_index);
+ ret_code = i40iw_allocate_virt_mem(dev->hw, &virt_mem, mem_size);
+ if (ret_code)
+ return ret_code;
+ hmc_info->sd_table.sd_entry = virt_mem.va;
+ }
+
+ /* fill size of objects which are fixed */
+ hmc_info->hmc_obj[I40IW_HMC_IW_XFFL].size = 4;
+ hmc_info->hmc_obj[I40IW_HMC_IW_Q1FL].size = 4;
+ hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].size = 8;
+ hmc_info->hmc_obj[I40IW_HMC_IW_APBVT_ENTRY].size = 8192;
+ hmc_info->hmc_obj[I40IW_HMC_IW_APBVT_ENTRY].max_cnt = 1;
+
+ return ret_code;
+}
+
+/**
+ * i40iw_sc_configure_iw_fpm() - commits hmc obj cnt values using cqp command and
+ * populates fpm base address in hmc_info
+ * @dev : ptr to i40iw_dev struct
+ * @hmc_fn_id: hmc function id
+ */
+static enum i40iw_status_code i40iw_sc_configure_iw_fpm(struct i40iw_sc_dev *dev,
+ u8 hmc_fn_id)
+{
+ struct i40iw_hmc_info *hmc_info;
+ struct i40iw_hmc_obj_info *obj_info;
+ u64 *buf;
+ struct i40iw_dma_mem commit_fpm_mem;
+ u32 i, j;
+ enum i40iw_status_code ret_code = 0;
+ bool poll_registers = true;
+ u8 wait_type;
+
+ if (hmc_fn_id >= I40IW_MAX_VF_FPM_ID ||
+ (dev->hmc_fn_id != hmc_fn_id && hmc_fn_id < I40IW_FIRST_VF_FPM_ID))
+ return I40IW_ERR_INVALID_HMCFN_ID;
+
+ if (hmc_fn_id == dev->hmc_fn_id) {
+ hmc_info = dev->hmc_info;
+ } else {
+ hmc_info = i40iw_vf_hmcinfo_from_fpm(dev, hmc_fn_id);
+ poll_registers = false;
+ }
+ if (!hmc_info)
+ return I40IW_ERR_BAD_PTR;
+
+ obj_info = hmc_info->hmc_obj;
+ buf = dev->fpm_commit_buf;
+
+ /* copy cnt values in commit buf */
+ for (i = I40IW_HMC_IW_QP, j = 0; i <= I40IW_HMC_IW_PBLE;
+ i++, j += 8)
+ set_64bit_val(buf, j, (u64)obj_info[i].cnt);
+
+ set_64bit_val(buf, 40, 0); /* APBVT rsvd */
+
+ commit_fpm_mem.pa = dev->fpm_commit_buf_pa;
+ commit_fpm_mem.va = dev->fpm_commit_buf;
+ wait_type = poll_registers ? (u8)I40IW_CQP_WAIT_POLL_REGS :
+ (u8)I40IW_CQP_WAIT_POLL_CQ;
+ ret_code = i40iw_sc_commit_fpm_values(
+ dev->cqp,
+ 0,
+ hmc_info->hmc_fn_id,
+ &commit_fpm_mem,
+ true,
+ wait_type);
+
+ /* parse the fpm_commit_buf and fill hmc obj info */
+ if (!ret_code)
+ ret_code = i40iw_sc_parse_fpm_commit_buf(dev->fpm_commit_buf, hmc_info->hmc_obj);
+
+ i40iw_debug_buf(dev, I40IW_DEBUG_HMC, "COMMIT FPM BUFFER",
+ commit_fpm_mem.va, I40IW_COMMIT_FPM_BUF_SIZE);
+
+ return ret_code;
+}
+
+/**
+ * cqp_sds_wqe_fill - fill cqp wqe doe sd
+ * @cqp: struct for cqp hw
+ * @info; sd info for wqe
+ * @scratch: u64 saved to be used during cqp completion
+ */
+static enum i40iw_status_code cqp_sds_wqe_fill(struct i40iw_sc_cqp *cqp,
+ struct i40iw_update_sds_info *info,
+ u64 scratch)
+{
+ u64 data;
+ u64 header;
+ u64 *wqe;
+ int mem_entries, wqe_entries;
+ struct i40iw_dma_mem *sdbuf = &cqp->sdbuf;
+
+ wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return I40IW_ERR_RING_FULL;
+
+ I40IW_CQP_INIT_WQE(wqe);
+ wqe_entries = (info->cnt > 3) ? 3 : info->cnt;
+ mem_entries = info->cnt - wqe_entries;
+
+ header = LS_64(I40IW_CQP_OP_UPDATE_PE_SDS, I40IW_CQPSQ_OPCODE) |
+ LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID) |
+ LS_64(mem_entries, I40IW_CQPSQ_UPESD_ENTRY_COUNT);
+
+ if (mem_entries) {
+ memcpy(sdbuf->va, &info->entry[3], (mem_entries << 4));
+ data = sdbuf->pa;
+ } else {
+ data = 0;
+ }
+ data |= LS_64(info->hmc_fn_id, I40IW_CQPSQ_UPESD_HMCFNID);
+
+ set_64bit_val(wqe, 16, data);
+
+ switch (wqe_entries) {
+ case 3:
+ set_64bit_val(wqe, 48,
+ (LS_64(info->entry[2].cmd, I40IW_CQPSQ_UPESD_SDCMD) |
+ LS_64(1, I40IW_CQPSQ_UPESD_ENTRY_VALID)));
+
+ set_64bit_val(wqe, 56, info->entry[2].data);
+ /* fallthrough */
+ case 2:
+ set_64bit_val(wqe, 32,
+ (LS_64(info->entry[1].cmd, I40IW_CQPSQ_UPESD_SDCMD) |
+ LS_64(1, I40IW_CQPSQ_UPESD_ENTRY_VALID)));
+
+ set_64bit_val(wqe, 40, info->entry[1].data);
+ /* fallthrough */
+ case 1:
+ set_64bit_val(wqe, 0,
+ LS_64(info->entry[0].cmd, I40IW_CQPSQ_UPESD_SDCMD));
+
+ set_64bit_val(wqe, 8, info->entry[0].data);
+ break;
+ default:
+ break;
+ }
+
+ i40iw_insert_wqe_hdr(wqe, header);
+
+ i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "UPDATE_PE_SDS WQE",
+ wqe, I40IW_CQP_WQE_SIZE * 8);
+ return 0;
+}
+
+/**
+ * i40iw_update_pe_sds - cqp wqe for sd
+ * @dev: ptr to i40iw_dev struct
+ * @info: sd info for sd's
+ * @scratch: u64 saved to be used during cqp completion
+ */
+static enum i40iw_status_code i40iw_update_pe_sds(struct i40iw_sc_dev *dev,
+ struct i40iw_update_sds_info *info,
+ u64 scratch)
+{
+ struct i40iw_sc_cqp *cqp = dev->cqp;
+ enum i40iw_status_code ret_code;
+
+ ret_code = cqp_sds_wqe_fill(cqp, info, scratch);
+ if (!ret_code)
+ i40iw_sc_cqp_post_sq(cqp);
+
+ return ret_code;
+}
+
+/**
+ * i40iw_update_sds_noccq - update sd before ccq created
+ * @dev: sc device struct
+ * @info: sd info for sd's
+ */
+enum i40iw_status_code i40iw_update_sds_noccq(struct i40iw_sc_dev *dev,
+ struct i40iw_update_sds_info *info)
+{
+ u32 error, val, tail;
+ struct i40iw_sc_cqp *cqp = dev->cqp;
+ enum i40iw_status_code ret_code;
+
+ ret_code = cqp_sds_wqe_fill(cqp, info, 0);
+ if (ret_code)
+ return ret_code;
+ i40iw_get_cqp_reg_info(cqp, &val, &tail, &error);
+ if (error)
+ return I40IW_ERR_CQP_COMPL_ERROR;
+
+ i40iw_sc_cqp_post_sq(cqp);
+ ret_code = i40iw_cqp_poll_registers(cqp, tail, I40IW_DONE_COUNT);
+
+ return ret_code;
+}
+
+/**
+ * i40iw_sc_suspend_qp - suspend qp for param change
+ * @cqp: struct for cqp hw
+ * @qp: sc qp struct
+ * @scratch: u64 saved to be used during cqp completion
+ */
+enum i40iw_status_code i40iw_sc_suspend_qp(struct i40iw_sc_cqp *cqp,
+ struct i40iw_sc_qp *qp,
+ u64 scratch)
+{
+ u64 header;
+ u64 *wqe;
+
+ wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return I40IW_ERR_RING_FULL;
+ header = LS_64(qp->qp_uk.qp_id, I40IW_CQPSQ_SUSPENDQP_QPID) |
+ LS_64(I40IW_CQP_OP_SUSPEND_QP, I40IW_CQPSQ_OPCODE) |
+ LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
+
+ i40iw_insert_wqe_hdr(wqe, header);
+
+ i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "SUSPEND_QP WQE",
+ wqe, I40IW_CQP_WQE_SIZE * 8);
+
+ i40iw_sc_cqp_post_sq(cqp);
+ return 0;
+}
+
+/**
+ * i40iw_sc_resume_qp - resume qp after suspend
+ * @cqp: struct for cqp hw
+ * @qp: sc qp struct
+ * @scratch: u64 saved to be used during cqp completion
+ */
+enum i40iw_status_code i40iw_sc_resume_qp(struct i40iw_sc_cqp *cqp,
+ struct i40iw_sc_qp *qp,
+ u64 scratch)
+{
+ u64 header;
+ u64 *wqe;
+
+ wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return I40IW_ERR_RING_FULL;
+ set_64bit_val(wqe,
+ 16,
+ LS_64(qp->qs_handle, I40IW_CQPSQ_RESUMEQP_QSHANDLE));
+
+ header = LS_64(qp->qp_uk.qp_id, I40IW_CQPSQ_RESUMEQP_QPID) |
+ LS_64(I40IW_CQP_OP_RESUME_QP, I40IW_CQPSQ_OPCODE) |
+ LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
+
+ i40iw_insert_wqe_hdr(wqe, header);
+
+ i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "RESUME_QP WQE",
+ wqe, I40IW_CQP_WQE_SIZE * 8);
+
+ i40iw_sc_cqp_post_sq(cqp);
+ return 0;
+}
+
+/**
+ * i40iw_sc_static_hmc_pages_allocated - cqp wqe to allocate hmc pages
+ * @cqp: struct for cqp hw
+ * @scratch: u64 saved to be used during cqp completion
+ * @hmc_fn_id: hmc function id
+ * @post_sq: flag for cqp db to ring
+ * @poll_registers: flag to poll register for cqp completion
+ */
+enum i40iw_status_code i40iw_sc_static_hmc_pages_allocated(
+ struct i40iw_sc_cqp *cqp,
+ u64 scratch,
+ u8 hmc_fn_id,
+ bool post_sq,
+ bool poll_registers)
+{
+ u64 header;
+ u64 *wqe;
+ u32 tail, val, error;
+ enum i40iw_status_code ret_code = 0;
+
+ wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return I40IW_ERR_RING_FULL;
+ set_64bit_val(wqe,
+ 16,
+ LS_64(hmc_fn_id, I40IW_SHMC_PAGE_ALLOCATED_HMC_FN_ID));
+
+ header = LS_64(I40IW_CQP_OP_SHMC_PAGES_ALLOCATED, I40IW_CQPSQ_OPCODE) |
+ LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
+
+ i40iw_insert_wqe_hdr(wqe, header);
+
+ i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "SHMC_PAGES_ALLOCATED WQE",
+ wqe, I40IW_CQP_WQE_SIZE * 8);
+ i40iw_get_cqp_reg_info(cqp, &val, &tail, &error);
+ if (error) {
+ ret_code = I40IW_ERR_CQP_COMPL_ERROR;
+ return ret_code;
+ }
+ if (post_sq) {
+ i40iw_sc_cqp_post_sq(cqp);
+ if (poll_registers)
+ /* check for cqp sq tail update */
+ ret_code = i40iw_cqp_poll_registers(cqp, tail, 1000);
+ else
+ ret_code = i40iw_sc_poll_for_cqp_op_done(cqp,
+ I40IW_CQP_OP_SHMC_PAGES_ALLOCATED,
+ NULL);
+ }
+
+ return ret_code;
+}
+
+/**
+ * i40iw_ring_full - check if cqp ring is full
+ * @cqp: struct for cqp hw
+ */
+static bool i40iw_ring_full(struct i40iw_sc_cqp *cqp)
+{
+ return I40IW_RING_FULL_ERR(cqp->sq_ring);
+}
+
+/**
+ * i40iw_config_fpm_values - configure HMC objects
+ * @dev: sc device struct
+ * @qp_count: desired qp count
+ */
+enum i40iw_status_code i40iw_config_fpm_values(struct i40iw_sc_dev *dev, u32 qp_count)
+{
+ struct i40iw_virt_mem virt_mem;
+ u32 i, mem_size;
+ u32 qpwantedoriginal, qpwanted, mrwanted, pblewanted;
+ u32 powerof2;
+ u64 sd_needed, bytes_needed;
+ u32 loop_count = 0;
+
+ struct i40iw_hmc_info *hmc_info;
+ struct i40iw_hmc_fpm_misc *hmc_fpm_misc;
+ enum i40iw_status_code ret_code = 0;
+
+ hmc_info = dev->hmc_info;
+ hmc_fpm_misc = &dev->hmc_fpm_misc;
+
+ ret_code = i40iw_sc_init_iw_hmc(dev, dev->hmc_fn_id);
+ if (ret_code) {
+ i40iw_debug(dev, I40IW_DEBUG_HMC,
+ "i40iw_sc_init_iw_hmc returned error_code = %d\n",
+ ret_code);
+ return ret_code;
+ }
+
+ bytes_needed = 0;
+ for (i = I40IW_HMC_IW_QP; i < I40IW_HMC_IW_MAX; i++) {
+ hmc_info->hmc_obj[i].cnt = hmc_info->hmc_obj[i].max_cnt;
+ bytes_needed +=
+ (hmc_info->hmc_obj[i].max_cnt) * (hmc_info->hmc_obj[i].size);
+ i40iw_debug(dev, I40IW_DEBUG_HMC,
+ "%s i[%04d] max_cnt[0x%04X] size[0x%04llx]\n",
+ __func__, i, hmc_info->hmc_obj[i].max_cnt,
+ hmc_info->hmc_obj[i].size);
+ }
+ sd_needed = (bytes_needed / I40IW_HMC_DIRECT_BP_SIZE) + 1; /* round up */
+ i40iw_debug(dev, I40IW_DEBUG_HMC,
+ "%s: FW initial max sd_count[%08lld] first_sd_index[%04d]\n",
+ __func__, sd_needed, hmc_info->first_sd_index);
+ i40iw_debug(dev, I40IW_DEBUG_HMC,
+ "%s: bytes_needed=0x%llx sd count %d where max sd is %d\n",
+ __func__, bytes_needed, hmc_info->sd_table.sd_cnt,
+ hmc_fpm_misc->max_sds);
+
+ qpwanted = min(qp_count, hmc_info->hmc_obj[I40IW_HMC_IW_QP].max_cnt);
+ qpwantedoriginal = qpwanted;
+ mrwanted = hmc_info->hmc_obj[I40IW_HMC_IW_MR].max_cnt;
+ pblewanted = hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].max_cnt;
+
+ i40iw_debug(dev, I40IW_DEBUG_HMC,
+ "req_qp=%d max_sd=%d, max_qp = %d, max_cq=%d, max_mr=%d, max_pble=%d\n",
+ qp_count, hmc_fpm_misc->max_sds,
+ hmc_info->hmc_obj[I40IW_HMC_IW_QP].max_cnt,
+ hmc_info->hmc_obj[I40IW_HMC_IW_CQ].max_cnt,
+ hmc_info->hmc_obj[I40IW_HMC_IW_MR].max_cnt,
+ hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].max_cnt);
+
+ do {
+ ++loop_count;
+ hmc_info->hmc_obj[I40IW_HMC_IW_QP].cnt = qpwanted;
+ hmc_info->hmc_obj[I40IW_HMC_IW_CQ].cnt =
+ min(2 * qpwanted, hmc_info->hmc_obj[I40IW_HMC_IW_CQ].cnt);
+ hmc_info->hmc_obj[I40IW_HMC_IW_SRQ].cnt = 0x00; /* Reserved */
+ hmc_info->hmc_obj[I40IW_HMC_IW_HTE].cnt =
+ qpwanted * hmc_fpm_misc->ht_multiplier;
+ hmc_info->hmc_obj[I40IW_HMC_IW_ARP].cnt =
+ hmc_info->hmc_obj[I40IW_HMC_IW_ARP].max_cnt;
+ hmc_info->hmc_obj[I40IW_HMC_IW_APBVT_ENTRY].cnt = 1;
+ hmc_info->hmc_obj[I40IW_HMC_IW_MR].cnt = mrwanted;
+
+ hmc_info->hmc_obj[I40IW_HMC_IW_XF].cnt = I40IW_MAX_WQ_ENTRIES * qpwanted;
+ hmc_info->hmc_obj[I40IW_HMC_IW_Q1].cnt = 4 * I40IW_MAX_IRD_SIZE * qpwanted;
+ hmc_info->hmc_obj[I40IW_HMC_IW_XFFL].cnt =
+ hmc_info->hmc_obj[I40IW_HMC_IW_XF].cnt / hmc_fpm_misc->xf_block_size;
+ hmc_info->hmc_obj[I40IW_HMC_IW_Q1FL].cnt =
+ hmc_info->hmc_obj[I40IW_HMC_IW_Q1].cnt / hmc_fpm_misc->q1_block_size;
+ hmc_info->hmc_obj[I40IW_HMC_IW_TIMER].cnt =
+ ((qpwanted) / 512 + 1) * hmc_fpm_misc->timer_bucket;
+ hmc_info->hmc_obj[I40IW_HMC_IW_FSIMC].cnt = 0x00;
+ hmc_info->hmc_obj[I40IW_HMC_IW_FSIAV].cnt = 0x00;
+ hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].cnt = pblewanted;
+
+ /* How much memory is needed for all the objects. */
+ bytes_needed = 0;
+ for (i = I40IW_HMC_IW_QP; i < I40IW_HMC_IW_MAX; i++)
+ bytes_needed +=
+ (hmc_info->hmc_obj[i].cnt) * (hmc_info->hmc_obj[i].size);
+ sd_needed = (bytes_needed / I40IW_HMC_DIRECT_BP_SIZE) + 1;
+ if ((loop_count > 1000) ||
+ ((!(loop_count % 10)) &&
+ (qpwanted > qpwantedoriginal * 2 / 3))) {
+ if (qpwanted > FPM_MULTIPLIER) {
+ qpwanted -= FPM_MULTIPLIER;
+ powerof2 = 1;
+ while (powerof2 < qpwanted)
+ powerof2 *= 2;
+ powerof2 /= 2;
+ qpwanted = powerof2;
+ } else {
+ qpwanted /= 2;
+ }
+ }
+ if (mrwanted > FPM_MULTIPLIER * 10)
+ mrwanted -= FPM_MULTIPLIER * 10;
+ if (pblewanted > FPM_MULTIPLIER * 1000)
+ pblewanted -= FPM_MULTIPLIER * 1000;
+ } while (sd_needed > hmc_fpm_misc->max_sds && loop_count < 2000);
+
+ bytes_needed = 0;
+ for (i = I40IW_HMC_IW_QP; i < I40IW_HMC_IW_MAX; i++) {
+ bytes_needed += (hmc_info->hmc_obj[i].cnt) * (hmc_info->hmc_obj[i].size);
+ i40iw_debug(dev, I40IW_DEBUG_HMC,
+ "%s i[%04d] cnt[0x%04x] size[0x%04llx]\n",
+ __func__, i, hmc_info->hmc_obj[i].cnt,
+ hmc_info->hmc_obj[i].size);
+ }
+ sd_needed = (bytes_needed / I40IW_HMC_DIRECT_BP_SIZE) + 1; /* round up not truncate. */
+
+ i40iw_debug(dev, I40IW_DEBUG_HMC,
+ "loop_cnt=%d, sd_needed=%lld, qpcnt = %d, cqcnt=%d, mrcnt=%d, pblecnt=%d\n",
+ loop_count, sd_needed,
+ hmc_info->hmc_obj[I40IW_HMC_IW_QP].cnt,
+ hmc_info->hmc_obj[I40IW_HMC_IW_CQ].cnt,
+ hmc_info->hmc_obj[I40IW_HMC_IW_MR].cnt,
+ hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].cnt);
+
+ ret_code = i40iw_sc_configure_iw_fpm(dev, dev->hmc_fn_id);
+ if (ret_code) {
+ i40iw_debug(dev, I40IW_DEBUG_HMC,
+ "configure_iw_fpm returned error_code[x%08X]\n",
+ i40iw_rd32(dev->hw, dev->is_pf ? I40E_PFPE_CQPERRCODES : I40E_VFPE_CQPERRCODES1));
+ return ret_code;
+ }
+
+ hmc_info->sd_table.sd_cnt = (u32)sd_needed;
+
+ mem_size = sizeof(struct i40iw_hmc_sd_entry) *
+ (hmc_info->sd_table.sd_cnt + hmc_info->first_sd_index + 1);
+ ret_code = i40iw_allocate_virt_mem(dev->hw, &virt_mem, mem_size);
+ if (ret_code) {
+ i40iw_debug(dev, I40IW_DEBUG_HMC,
+ "%s: failed to allocate memory for sd_entry buffer\n",
+ __func__);
+ return ret_code;
+ }
+ hmc_info->sd_table.sd_entry = virt_mem.va;
+
+ return ret_code;
+}
+
+/**
+ * i40iw_exec_cqp_cmd - execute cqp cmd when wqe are available
+ * @dev: rdma device
+ * @pcmdinfo: cqp command info
+ */
+static enum i40iw_status_code i40iw_exec_cqp_cmd(struct i40iw_sc_dev *dev,
+ struct cqp_commands_info *pcmdinfo)
+{
+ enum i40iw_status_code status;
+ struct i40iw_dma_mem values_mem;
+
+ dev->cqp_cmd_stats[pcmdinfo->cqp_cmd]++;
+ switch (pcmdinfo->cqp_cmd) {
+ case OP_DELETE_LOCAL_MAC_IPADDR_ENTRY:
+ status = i40iw_sc_del_local_mac_ipaddr_entry(
+ pcmdinfo->in.u.del_local_mac_ipaddr_entry.cqp,
+ pcmdinfo->in.u.del_local_mac_ipaddr_entry.scratch,
+ pcmdinfo->in.u.del_local_mac_ipaddr_entry.entry_idx,
+ pcmdinfo->in.u.del_local_mac_ipaddr_entry.ignore_ref_count,
+ pcmdinfo->post_sq);
+ break;
+ case OP_CEQ_DESTROY:
+ status = i40iw_sc_ceq_destroy(pcmdinfo->in.u.ceq_destroy.ceq,
+ pcmdinfo->in.u.ceq_destroy.scratch,
+ pcmdinfo->post_sq);
+ break;
+ case OP_AEQ_DESTROY:
+ status = i40iw_sc_aeq_destroy(pcmdinfo->in.u.aeq_destroy.aeq,
+ pcmdinfo->in.u.aeq_destroy.scratch,
+ pcmdinfo->post_sq);
+
+ break;
+ case OP_DELETE_ARP_CACHE_ENTRY:
+ status = i40iw_sc_del_arp_cache_entry(
+ pcmdinfo->in.u.del_arp_cache_entry.cqp,
+ pcmdinfo->in.u.del_arp_cache_entry.scratch,
+ pcmdinfo->in.u.del_arp_cache_entry.arp_index,
+ pcmdinfo->post_sq);
+ break;
+ case OP_MANAGE_APBVT_ENTRY:
+ status = i40iw_sc_manage_apbvt_entry(
+ pcmdinfo->in.u.manage_apbvt_entry.cqp,
+ &pcmdinfo->in.u.manage_apbvt_entry.info,
+ pcmdinfo->in.u.manage_apbvt_entry.scratch,
+ pcmdinfo->post_sq);
+ break;
+ case OP_CEQ_CREATE:
+ status = i40iw_sc_ceq_create(pcmdinfo->in.u.ceq_create.ceq,
+ pcmdinfo->in.u.ceq_create.scratch,
+ pcmdinfo->post_sq);
+ break;
+ case OP_AEQ_CREATE:
+ status = i40iw_sc_aeq_create(pcmdinfo->in.u.aeq_create.aeq,
+ pcmdinfo->in.u.aeq_create.scratch,
+ pcmdinfo->post_sq);
+ break;
+ case OP_ALLOC_LOCAL_MAC_IPADDR_ENTRY:
+ status = i40iw_sc_alloc_local_mac_ipaddr_entry(
+ pcmdinfo->in.u.alloc_local_mac_ipaddr_entry.cqp,
+ pcmdinfo->in.u.alloc_local_mac_ipaddr_entry.scratch,
+ pcmdinfo->post_sq);
+ break;
+ case OP_ADD_LOCAL_MAC_IPADDR_ENTRY:
+ status = i40iw_sc_add_local_mac_ipaddr_entry(
+ pcmdinfo->in.u.add_local_mac_ipaddr_entry.cqp,
+ &pcmdinfo->in.u.add_local_mac_ipaddr_entry.info,
+ pcmdinfo->in.u.add_local_mac_ipaddr_entry.scratch,
+ pcmdinfo->post_sq);
+ break;
+ case OP_MANAGE_QHASH_TABLE_ENTRY:
+ status = i40iw_sc_manage_qhash_table_entry(
+ pcmdinfo->in.u.manage_qhash_table_entry.cqp,
+ &pcmdinfo->in.u.manage_qhash_table_entry.info,
+ pcmdinfo->in.u.manage_qhash_table_entry.scratch,
+ pcmdinfo->post_sq);
+
+ break;
+ case OP_QP_MODIFY:
+ status = i40iw_sc_qp_modify(
+ pcmdinfo->in.u.qp_modify.qp,
+ &pcmdinfo->in.u.qp_modify.info,
+ pcmdinfo->in.u.qp_modify.scratch,
+ pcmdinfo->post_sq);
+
+ break;
+ case OP_QP_UPLOAD_CONTEXT:
+ status = i40iw_sc_qp_upload_context(
+ pcmdinfo->in.u.qp_upload_context.dev,
+ &pcmdinfo->in.u.qp_upload_context.info,
+ pcmdinfo->in.u.qp_upload_context.scratch,
+ pcmdinfo->post_sq);
+
+ break;
+ case OP_CQ_CREATE:
+ status = i40iw_sc_cq_create(
+ pcmdinfo->in.u.cq_create.cq,
+ pcmdinfo->in.u.cq_create.scratch,
+ pcmdinfo->in.u.cq_create.check_overflow,
+ pcmdinfo->post_sq);
+ break;
+ case OP_CQ_DESTROY:
+ status = i40iw_sc_cq_destroy(
+ pcmdinfo->in.u.cq_destroy.cq,
+ pcmdinfo->in.u.cq_destroy.scratch,
+ pcmdinfo->post_sq);
+
+ break;
+ case OP_QP_CREATE:
+ status = i40iw_sc_qp_create(
+ pcmdinfo->in.u.qp_create.qp,
+ &pcmdinfo->in.u.qp_create.info,
+ pcmdinfo->in.u.qp_create.scratch,
+ pcmdinfo->post_sq);
+ break;
+ case OP_QP_DESTROY:
+ status = i40iw_sc_qp_destroy(
+ pcmdinfo->in.u.qp_destroy.qp,
+ pcmdinfo->in.u.qp_destroy.scratch,
+ pcmdinfo->in.u.qp_destroy.remove_hash_idx,
+ pcmdinfo->in.u.qp_destroy.
+ ignore_mw_bnd,
+ pcmdinfo->post_sq);
+
+ break;
+ case OP_ALLOC_STAG:
+ status = i40iw_sc_alloc_stag(
+ pcmdinfo->in.u.alloc_stag.dev,
+ &pcmdinfo->in.u.alloc_stag.info,
+ pcmdinfo->in.u.alloc_stag.scratch,
+ pcmdinfo->post_sq);
+ break;
+ case OP_MR_REG_NON_SHARED:
+ status = i40iw_sc_mr_reg_non_shared(
+ pcmdinfo->in.u.mr_reg_non_shared.dev,
+ &pcmdinfo->in.u.mr_reg_non_shared.info,
+ pcmdinfo->in.u.mr_reg_non_shared.scratch,
+ pcmdinfo->post_sq);
+
+ break;
+ case OP_DEALLOC_STAG:
+ status = i40iw_sc_dealloc_stag(
+ pcmdinfo->in.u.dealloc_stag.dev,
+ &pcmdinfo->in.u.dealloc_stag.info,
+ pcmdinfo->in.u.dealloc_stag.scratch,
+ pcmdinfo->post_sq);
+
+ break;
+ case OP_MW_ALLOC:
+ status = i40iw_sc_mw_alloc(
+ pcmdinfo->in.u.mw_alloc.dev,
+ pcmdinfo->in.u.mw_alloc.scratch,
+ pcmdinfo->in.u.mw_alloc.mw_stag_index,
+ pcmdinfo->in.u.mw_alloc.pd_id,
+ pcmdinfo->post_sq);
+
+ break;
+ case OP_QP_FLUSH_WQES:
+ status = i40iw_sc_qp_flush_wqes(
+ pcmdinfo->in.u.qp_flush_wqes.qp,
+ &pcmdinfo->in.u.qp_flush_wqes.info,
+ pcmdinfo->in.u.qp_flush_wqes.
+ scratch, pcmdinfo->post_sq);
+ break;
+ case OP_ADD_ARP_CACHE_ENTRY:
+ status = i40iw_sc_add_arp_cache_entry(
+ pcmdinfo->in.u.add_arp_cache_entry.cqp,
+ &pcmdinfo->in.u.add_arp_cache_entry.info,
+ pcmdinfo->in.u.add_arp_cache_entry.scratch,
+ pcmdinfo->post_sq);
+ break;
+ case OP_MANAGE_PUSH_PAGE:
+ status = i40iw_sc_manage_push_page(
+ pcmdinfo->in.u.manage_push_page.cqp,
+ &pcmdinfo->in.u.manage_push_page.info,
+ pcmdinfo->in.u.manage_push_page.scratch,
+ pcmdinfo->post_sq);
+ break;
+ case OP_UPDATE_PE_SDS:
+ /* case I40IW_CQP_OP_UPDATE_PE_SDS */
+ status = i40iw_update_pe_sds(
+ pcmdinfo->in.u.update_pe_sds.dev,
+ &pcmdinfo->in.u.update_pe_sds.info,
+ pcmdinfo->in.u.update_pe_sds.
+ scratch);
+
+ break;
+ case OP_MANAGE_HMC_PM_FUNC_TABLE:
+ status = i40iw_sc_manage_hmc_pm_func_table(
+ pcmdinfo->in.u.manage_hmc_pm.dev->cqp,
+ pcmdinfo->in.u.manage_hmc_pm.scratch,
+ (u8)pcmdinfo->in.u.manage_hmc_pm.info.vf_id,
+ pcmdinfo->in.u.manage_hmc_pm.info.free_fcn,
+ true);
+ break;
+ case OP_SUSPEND:
+ status = i40iw_sc_suspend_qp(
+ pcmdinfo->in.u.suspend_resume.cqp,
+ pcmdinfo->in.u.suspend_resume.qp,
+ pcmdinfo->in.u.suspend_resume.scratch);
+ break;
+ case OP_RESUME:
+ status = i40iw_sc_resume_qp(
+ pcmdinfo->in.u.suspend_resume.cqp,
+ pcmdinfo->in.u.suspend_resume.qp,
+ pcmdinfo->in.u.suspend_resume.scratch);
+ break;
+ case OP_MANAGE_VF_PBLE_BP:
+ status = i40iw_manage_vf_pble_bp(
+ pcmdinfo->in.u.manage_vf_pble_bp.cqp,
+ &pcmdinfo->in.u.manage_vf_pble_bp.info,
+ pcmdinfo->in.u.manage_vf_pble_bp.scratch, true);
+ break;
+ case OP_QUERY_FPM_VALUES:
+ values_mem.pa = pcmdinfo->in.u.query_fpm_values.fpm_values_pa;
+ values_mem.va = pcmdinfo->in.u.query_fpm_values.fpm_values_va;
+ status = i40iw_sc_query_fpm_values(
+ pcmdinfo->in.u.query_fpm_values.cqp,
+ pcmdinfo->in.u.query_fpm_values.scratch,
+ pcmdinfo->in.u.query_fpm_values.hmc_fn_id,
+ &values_mem, true, I40IW_CQP_WAIT_EVENT);
+ break;
+ case OP_COMMIT_FPM_VALUES:
+ values_mem.pa = pcmdinfo->in.u.commit_fpm_values.fpm_values_pa;
+ values_mem.va = pcmdinfo->in.u.commit_fpm_values.fpm_values_va;
+ status = i40iw_sc_commit_fpm_values(
+ pcmdinfo->in.u.commit_fpm_values.cqp,
+ pcmdinfo->in.u.commit_fpm_values.scratch,
+ pcmdinfo->in.u.commit_fpm_values.hmc_fn_id,
+ &values_mem,
+ true,
+ I40IW_CQP_WAIT_EVENT);
+ break;
+ default:
+ status = I40IW_NOT_SUPPORTED;
+ break;
+ }
+
+ return status;
+}
+
+/**
+ * i40iw_process_cqp_cmd - process all cqp commands
+ * @dev: sc device struct
+ * @pcmdinfo: cqp command info
+ */
+enum i40iw_status_code i40iw_process_cqp_cmd(struct i40iw_sc_dev *dev,
+ struct cqp_commands_info *pcmdinfo)
+{
+ enum i40iw_status_code status = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->cqp_lock, flags);
+ if (list_empty(&dev->cqp_cmd_head) && !i40iw_ring_full(dev->cqp))
+ status = i40iw_exec_cqp_cmd(dev, pcmdinfo);
+ else
+ list_add_tail(&pcmdinfo->cqp_cmd_entry, &dev->cqp_cmd_head);
+ spin_unlock_irqrestore(&dev->cqp_lock, flags);
+ return status;
+}
+
+/**
+ * i40iw_process_bh - called from tasklet for cqp list
+ * @dev: sc device struct
+ */
+enum i40iw_status_code i40iw_process_bh(struct i40iw_sc_dev *dev)
+{
+ enum i40iw_status_code status = 0;
+ struct cqp_commands_info *pcmdinfo;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->cqp_lock, flags);
+ while (!list_empty(&dev->cqp_cmd_head) && !i40iw_ring_full(dev->cqp)) {
+ pcmdinfo = (struct cqp_commands_info *)i40iw_remove_head(&dev->cqp_cmd_head);
+
+ status = i40iw_exec_cqp_cmd(dev, pcmdinfo);
+ if (status)
+ break;
+ }
+ spin_unlock_irqrestore(&dev->cqp_lock, flags);
+ return status;
+}
+
+/**
+ * i40iw_iwarp_opcode - determine if incoming is rdma layer
+ * @info: aeq info for the packet
+ * @pkt: packet for error
+ */
+static u32 i40iw_iwarp_opcode(struct i40iw_aeqe_info *info, u8 *pkt)
+{
+ u16 *mpa;
+ u32 opcode = 0xffffffff;
+
+ if (info->q2_data_written) {
+ mpa = (u16 *)pkt;
+ opcode = ntohs(mpa[1]) & 0xf;
+ }
+ return opcode;
+}
+
+/**
+ * i40iw_locate_mpa - return pointer to mpa in the pkt
+ * @pkt: packet with data
+ */
+static u8 *i40iw_locate_mpa(u8 *pkt)
+{
+ /* skip over ethernet header */
+ pkt += I40IW_MAC_HLEN;
+
+ /* Skip over IP and TCP headers */
+ pkt += 4 * (pkt[0] & 0x0f);
+ pkt += 4 * ((pkt[12] >> 4) & 0x0f);
+ return pkt;
+}
+
+/**
+ * i40iw_setup_termhdr - termhdr for terminate pkt
+ * @qp: sc qp ptr for pkt
+ * @hdr: term hdr
+ * @opcode: flush opcode for termhdr
+ * @layer_etype: error layer + error type
+ * @err: error cod ein the header
+ */
+static void i40iw_setup_termhdr(struct i40iw_sc_qp *qp,
+ struct i40iw_terminate_hdr *hdr,
+ enum i40iw_flush_opcode opcode,
+ u8 layer_etype,
+ u8 err)
+{
+ qp->flush_code = opcode;
+ hdr->layer_etype = layer_etype;
+ hdr->error_code = err;
+}
+
+/**
+ * i40iw_bld_terminate_hdr - build terminate message header
+ * @qp: qp associated with received terminate AE
+ * @info: the struct contiaing AE information
+ */
+static int i40iw_bld_terminate_hdr(struct i40iw_sc_qp *qp,
+ struct i40iw_aeqe_info *info)
+{
+ u8 *pkt = qp->q2_buf + Q2_BAD_FRAME_OFFSET;
+ u16 ddp_seg_len;
+ int copy_len = 0;
+ u8 is_tagged = 0;
+ enum i40iw_flush_opcode flush_code = FLUSH_INVALID;
+ u32 opcode;
+ struct i40iw_terminate_hdr *termhdr;
+
+ termhdr = (struct i40iw_terminate_hdr *)qp->q2_buf;
+ memset(termhdr, 0, Q2_BAD_FRAME_OFFSET);
+
+ if (info->q2_data_written) {
+ /* Use data from offending packet to fill in ddp & rdma hdrs */
+ pkt = i40iw_locate_mpa(pkt);
+ ddp_seg_len = ntohs(*(u16 *)pkt);
+ if (ddp_seg_len) {
+ copy_len = 2;
+ termhdr->hdrct = DDP_LEN_FLAG;
+ if (pkt[2] & 0x80) {
+ is_tagged = 1;
+ if (ddp_seg_len >= TERM_DDP_LEN_TAGGED) {
+ copy_len += TERM_DDP_LEN_TAGGED;
+ termhdr->hdrct |= DDP_HDR_FLAG;
+ }
+ } else {
+ if (ddp_seg_len >= TERM_DDP_LEN_UNTAGGED) {
+ copy_len += TERM_DDP_LEN_UNTAGGED;
+ termhdr->hdrct |= DDP_HDR_FLAG;
+ }
+
+ if (ddp_seg_len >= (TERM_DDP_LEN_UNTAGGED + TERM_RDMA_LEN)) {
+ if ((pkt[3] & RDMA_OPCODE_MASK) == RDMA_READ_REQ_OPCODE) {
+ copy_len += TERM_RDMA_LEN;
+ termhdr->hdrct |= RDMA_HDR_FLAG;
+ }
+ }
+ }
+ }
+ }
+
+ opcode = i40iw_iwarp_opcode(info, pkt);
+
+ switch (info->ae_id) {
+ case I40IW_AE_AMP_UNALLOCATED_STAG:
+ qp->eventtype = TERM_EVENT_QP_ACCESS_ERR;
+ if (opcode == I40IW_OP_TYPE_RDMA_WRITE)
+ i40iw_setup_termhdr(qp, termhdr, FLUSH_PROT_ERR,
+ (LAYER_DDP << 4) | DDP_TAGGED_BUFFER, DDP_TAGGED_INV_STAG);
+ else
+ i40iw_setup_termhdr(qp, termhdr, FLUSH_REM_ACCESS_ERR,
+ (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT, RDMAP_INV_STAG);
+ break;
+ case I40IW_AE_AMP_BOUNDS_VIOLATION:
+ qp->eventtype = TERM_EVENT_QP_ACCESS_ERR;
+ if (info->q2_data_written)
+ i40iw_setup_termhdr(qp, termhdr, FLUSH_PROT_ERR,
+ (LAYER_DDP << 4) | DDP_TAGGED_BUFFER, DDP_TAGGED_BOUNDS);
+ else
+ i40iw_setup_termhdr(qp, termhdr, FLUSH_REM_ACCESS_ERR,
+ (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT, RDMAP_INV_BOUNDS);
+ break;
+ case I40IW_AE_AMP_BAD_PD:
+ switch (opcode) {
+ case I40IW_OP_TYPE_RDMA_WRITE:
+ i40iw_setup_termhdr(qp, termhdr, FLUSH_PROT_ERR,
+ (LAYER_DDP << 4) | DDP_TAGGED_BUFFER, DDP_TAGGED_UNASSOC_STAG);
+ break;
+ case I40IW_OP_TYPE_SEND_INV:
+ case I40IW_OP_TYPE_SEND_SOL_INV:
+ i40iw_setup_termhdr(qp, termhdr, FLUSH_REM_ACCESS_ERR,
+ (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT, RDMAP_CANT_INV_STAG);
+ break;
+ default:
+ i40iw_setup_termhdr(qp, termhdr, FLUSH_REM_ACCESS_ERR,
+ (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT, RDMAP_UNASSOC_STAG);
+ }
+ break;
+ case I40IW_AE_AMP_INVALID_STAG:
+ qp->eventtype = TERM_EVENT_QP_ACCESS_ERR;
+ i40iw_setup_termhdr(qp, termhdr, FLUSH_REM_ACCESS_ERR,
+ (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT, RDMAP_INV_STAG);
+ break;
+ case I40IW_AE_AMP_BAD_QP:
+ i40iw_setup_termhdr(qp, termhdr, FLUSH_LOC_QP_OP_ERR,
+ (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER, DDP_UNTAGGED_INV_QN);
+ break;
+ case I40IW_AE_AMP_BAD_STAG_KEY:
+ case I40IW_AE_AMP_BAD_STAG_INDEX:
+ qp->eventtype = TERM_EVENT_QP_ACCESS_ERR;
+ switch (opcode) {
+ case I40IW_OP_TYPE_SEND_INV:
+ case I40IW_OP_TYPE_SEND_SOL_INV:
+ i40iw_setup_termhdr(qp, termhdr, FLUSH_REM_OP_ERR,
+ (LAYER_RDMA << 4) | RDMAP_REMOTE_OP, RDMAP_CANT_INV_STAG);
+ break;
+ default:
+ i40iw_setup_termhdr(qp, termhdr, FLUSH_REM_ACCESS_ERR,
+ (LAYER_RDMA << 4) | RDMAP_REMOTE_OP, RDMAP_INV_STAG);
+ }
+ break;
+ case I40IW_AE_AMP_RIGHTS_VIOLATION:
+ case I40IW_AE_AMP_INVALIDATE_NO_REMOTE_ACCESS_RIGHTS:
+ case I40IW_AE_PRIV_OPERATION_DENIED:
+ qp->eventtype = TERM_EVENT_QP_ACCESS_ERR;
+ i40iw_setup_termhdr(qp, termhdr, FLUSH_REM_ACCESS_ERR,
+ (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT, RDMAP_ACCESS);
+ break;
+ case I40IW_AE_AMP_TO_WRAP:
+ qp->eventtype = TERM_EVENT_QP_ACCESS_ERR;
+ i40iw_setup_termhdr(qp, termhdr, FLUSH_REM_ACCESS_ERR,
+ (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT, RDMAP_TO_WRAP);
+ break;
+ case I40IW_AE_LLP_RECEIVED_MARKER_AND_LENGTH_FIELDS_DONT_MATCH:
+ i40iw_setup_termhdr(qp, termhdr, FLUSH_LOC_LEN_ERR,
+ (LAYER_MPA << 4) | DDP_LLP, MPA_MARKER);
+ break;
+ case I40IW_AE_LLP_RECEIVED_MPA_CRC_ERROR:
+ i40iw_setup_termhdr(qp, termhdr, FLUSH_GENERAL_ERR,
+ (LAYER_MPA << 4) | DDP_LLP, MPA_CRC);
+ break;
+ case I40IW_AE_LLP_SEGMENT_TOO_LARGE:
+ case I40IW_AE_LLP_SEGMENT_TOO_SMALL:
+ i40iw_setup_termhdr(qp, termhdr, FLUSH_LOC_LEN_ERR,
+ (LAYER_DDP << 4) | DDP_CATASTROPHIC, DDP_CATASTROPHIC_LOCAL);
+ break;
+ case I40IW_AE_LCE_QP_CATASTROPHIC:
+ case I40IW_AE_DDP_NO_L_BIT:
+ i40iw_setup_termhdr(qp, termhdr, FLUSH_FATAL_ERR,
+ (LAYER_DDP << 4) | DDP_CATASTROPHIC, DDP_CATASTROPHIC_LOCAL);
+ break;
+ case I40IW_AE_DDP_INVALID_MSN_GAP_IN_MSN:
+ case I40IW_AE_DDP_INVALID_MSN_RANGE_IS_NOT_VALID:
+ i40iw_setup_termhdr(qp, termhdr, FLUSH_GENERAL_ERR,
+ (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER, DDP_UNTAGGED_INV_MSN_RANGE);
+ break;
+ case I40IW_AE_DDP_UBE_DDP_MESSAGE_TOO_LONG_FOR_AVAILABLE_BUFFER:
+ qp->eventtype = TERM_EVENT_QP_ACCESS_ERR;
+ i40iw_setup_termhdr(qp, termhdr, FLUSH_LOC_LEN_ERR,
+ (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER, DDP_UNTAGGED_INV_TOO_LONG);
+ break;
+ case I40IW_AE_DDP_UBE_INVALID_DDP_VERSION:
+ if (is_tagged)
+ i40iw_setup_termhdr(qp, termhdr, FLUSH_GENERAL_ERR,
+ (LAYER_DDP << 4) | DDP_TAGGED_BUFFER, DDP_TAGGED_INV_DDP_VER);
+ else
+ i40iw_setup_termhdr(qp, termhdr, FLUSH_GENERAL_ERR,
+ (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER, DDP_UNTAGGED_INV_DDP_VER);
+ break;
+ case I40IW_AE_DDP_UBE_INVALID_MO:
+ i40iw_setup_termhdr(qp, termhdr, FLUSH_GENERAL_ERR,
+ (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER, DDP_UNTAGGED_INV_MO);
+ break;
+ case I40IW_AE_DDP_UBE_INVALID_MSN_NO_BUFFER_AVAILABLE:
+ i40iw_setup_termhdr(qp, termhdr, FLUSH_REM_OP_ERR,
+ (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER, DDP_UNTAGGED_INV_MSN_NO_BUF);
+ break;
+ case I40IW_AE_DDP_UBE_INVALID_QN:
+ i40iw_setup_termhdr(qp, termhdr, FLUSH_GENERAL_ERR,
+ (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER, DDP_UNTAGGED_INV_QN);
+ break;
+ case I40IW_AE_RDMAP_ROE_INVALID_RDMAP_VERSION:
+ i40iw_setup_termhdr(qp, termhdr, FLUSH_GENERAL_ERR,
+ (LAYER_RDMA << 4) | RDMAP_REMOTE_OP, RDMAP_INV_RDMAP_VER);
+ break;
+ case I40IW_AE_RDMAP_ROE_UNEXPECTED_OPCODE:
+ i40iw_setup_termhdr(qp, termhdr, FLUSH_LOC_QP_OP_ERR,
+ (LAYER_RDMA << 4) | RDMAP_REMOTE_OP, RDMAP_UNEXPECTED_OP);
+ break;
+ default:
+ i40iw_setup_termhdr(qp, termhdr, FLUSH_FATAL_ERR,
+ (LAYER_RDMA << 4) | RDMAP_REMOTE_OP, RDMAP_UNSPECIFIED);
+ break;
+ }
+
+ if (copy_len)
+ memcpy(termhdr + 1, pkt, copy_len);
+
+ if (flush_code && !info->in_rdrsp_wr)
+ qp->sq_flush = (info->sq) ? true : false;
+
+ return sizeof(struct i40iw_terminate_hdr) + copy_len;
+}
+
+/**
+ * i40iw_terminate_send_fin() - Send fin for terminate message
+ * @qp: qp associated with received terminate AE
+ */
+void i40iw_terminate_send_fin(struct i40iw_sc_qp *qp)
+{
+ /* Send the fin only */
+ i40iw_term_modify_qp(qp,
+ I40IW_QP_STATE_TERMINATE,
+ I40IWQP_TERM_SEND_FIN_ONLY,
+ 0);
+}
+
+/**
+ * i40iw_terminate_connection() - Bad AE and send terminate to remote QP
+ * @qp: qp associated with received terminate AE
+ * @info: the struct contiaing AE information
+ */
+void i40iw_terminate_connection(struct i40iw_sc_qp *qp, struct i40iw_aeqe_info *info)
+{
+ u8 termlen = 0;
+
+ if (qp->term_flags & I40IW_TERM_SENT)
+ return; /* Sanity check */
+
+ /* Eventtype can change from bld_terminate_hdr */
+ qp->eventtype = TERM_EVENT_QP_FATAL;
+ termlen = i40iw_bld_terminate_hdr(qp, info);
+ i40iw_terminate_start_timer(qp);
+ qp->term_flags |= I40IW_TERM_SENT;
+ i40iw_term_modify_qp(qp, I40IW_QP_STATE_TERMINATE,
+ I40IWQP_TERM_SEND_TERM_ONLY, termlen);
+}
+
+/**
+ * i40iw_terminate_received - handle terminate received AE
+ * @qp: qp associated with received terminate AE
+ * @info: the struct contiaing AE information
+ */
+void i40iw_terminate_received(struct i40iw_sc_qp *qp, struct i40iw_aeqe_info *info)
+{
+ u8 *pkt = qp->q2_buf + Q2_BAD_FRAME_OFFSET;
+ u32 *mpa;
+ u8 ddp_ctl;
+ u8 rdma_ctl;
+ u16 aeq_id = 0;
+ struct i40iw_terminate_hdr *termhdr;
+
+ mpa = (u32 *)i40iw_locate_mpa(pkt);
+ if (info->q2_data_written) {
+ /* did not validate the frame - do it now */
+ ddp_ctl = (ntohl(mpa[0]) >> 8) & 0xff;
+ rdma_ctl = ntohl(mpa[0]) & 0xff;
+ if ((ddp_ctl & 0xc0) != 0x40)
+ aeq_id = I40IW_AE_LCE_QP_CATASTROPHIC;
+ else if ((ddp_ctl & 0x03) != 1)
+ aeq_id = I40IW_AE_DDP_UBE_INVALID_DDP_VERSION;
+ else if (ntohl(mpa[2]) != 2)
+ aeq_id = I40IW_AE_DDP_UBE_INVALID_QN;
+ else if (ntohl(mpa[3]) != 1)
+ aeq_id = I40IW_AE_DDP_INVALID_MSN_GAP_IN_MSN;
+ else if (ntohl(mpa[4]) != 0)
+ aeq_id = I40IW_AE_DDP_UBE_INVALID_MO;
+ else if ((rdma_ctl & 0xc0) != 0x40)
+ aeq_id = I40IW_AE_RDMAP_ROE_INVALID_RDMAP_VERSION;
+
+ info->ae_id = aeq_id;
+ if (info->ae_id) {
+ /* Bad terminate recvd - send back a terminate */
+ i40iw_terminate_connection(qp, info);
+ return;
+ }
+ }
+
+ qp->term_flags |= I40IW_TERM_RCVD;
+ qp->eventtype = TERM_EVENT_QP_FATAL;
+ termhdr = (struct i40iw_terminate_hdr *)&mpa[5];
+ if (termhdr->layer_etype == RDMAP_REMOTE_PROT ||
+ termhdr->layer_etype == RDMAP_REMOTE_OP) {
+ i40iw_terminate_done(qp, 0);
+ } else {
+ i40iw_terminate_start_timer(qp);
+ i40iw_terminate_send_fin(qp);
+ }
+}
+
+/**
+ * i40iw_hw_stat_init - Initiliaze HW stats table
+ * @devstat: pestat struct
+ * @fcn_idx: PCI fn id
+ * @hw: PF i40iw_hw structure.
+ * @is_pf: Is it a PF?
+ *
+ * Populate the HW stat table with register offset addr for each
+ * stat. And start the perioidic stats timer.
+ */
+static void i40iw_hw_stat_init(struct i40iw_dev_pestat *devstat,
+ u8 fcn_idx,
+ struct i40iw_hw *hw, bool is_pf)
+{
+ u32 stat_reg_offset;
+ u32 stat_index;
+ struct i40iw_dev_hw_stat_offsets *stat_table =
+ &devstat->hw_stat_offsets;
+ struct i40iw_dev_hw_stats *last_rd_stats = &devstat->last_read_hw_stats;
+
+ devstat->hw = hw;
+
+ if (is_pf) {
+ stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_IP4RXDISCARD] =
+ I40E_GLPES_PFIP4RXDISCARD(fcn_idx);
+ stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_IP4RXTRUNC] =
+ I40E_GLPES_PFIP4RXTRUNC(fcn_idx);
+ stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_IP4TXNOROUTE] =
+ I40E_GLPES_PFIP4TXNOROUTE(fcn_idx);
+ stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_IP6RXDISCARD] =
+ I40E_GLPES_PFIP6RXDISCARD(fcn_idx);
+ stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_IP6RXTRUNC] =
+ I40E_GLPES_PFIP6RXTRUNC(fcn_idx);
+ stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_IP6TXNOROUTE] =
+ I40E_GLPES_PFIP6TXNOROUTE(fcn_idx);
+ stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_TCPRTXSEG] =
+ I40E_GLPES_PFTCPRTXSEG(fcn_idx);
+ stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_TCPRXOPTERR] =
+ I40E_GLPES_PFTCPRXOPTERR(fcn_idx);
+ stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_TCPRXPROTOERR] =
+ I40E_GLPES_PFTCPRXPROTOERR(fcn_idx);
+
+ stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4RXOCTS] =
+ I40E_GLPES_PFIP4RXOCTSLO(fcn_idx);
+ stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4RXPKTS] =
+ I40E_GLPES_PFIP4RXPKTSLO(fcn_idx);
+ stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4RXFRAGS] =
+ I40E_GLPES_PFIP4RXFRAGSLO(fcn_idx);
+ stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4RXMCPKTS] =
+ I40E_GLPES_PFIP4RXMCPKTSLO(fcn_idx);
+ stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4TXOCTS] =
+ I40E_GLPES_PFIP4TXOCTSLO(fcn_idx);
+ stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4TXPKTS] =
+ I40E_GLPES_PFIP4TXPKTSLO(fcn_idx);
+ stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4TXFRAGS] =
+ I40E_GLPES_PFIP4TXFRAGSLO(fcn_idx);
+ stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4TXMCPKTS] =
+ I40E_GLPES_PFIP4TXMCPKTSLO(fcn_idx);
+ stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6RXOCTS] =
+ I40E_GLPES_PFIP6RXOCTSLO(fcn_idx);
+ stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6RXPKTS] =
+ I40E_GLPES_PFIP6RXPKTSLO(fcn_idx);
+ stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6RXFRAGS] =
+ I40E_GLPES_PFIP6RXFRAGSLO(fcn_idx);
+ stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6RXMCPKTS] =
+ I40E_GLPES_PFIP6RXMCPKTSLO(fcn_idx);
+ stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6TXOCTS] =
+ I40E_GLPES_PFIP6TXOCTSLO(fcn_idx);
+ stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6TXPKTS] =
+ I40E_GLPES_PFIP6TXPKTSLO(fcn_idx);
+ stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6TXPKTS] =
+ I40E_GLPES_PFIP6TXPKTSLO(fcn_idx);
+ stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6TXFRAGS] =
+ I40E_GLPES_PFIP6TXFRAGSLO(fcn_idx);
+ stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_TCPRXSEGS] =
+ I40E_GLPES_PFTCPRXSEGSLO(fcn_idx);
+ stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_TCPTXSEG] =
+ I40E_GLPES_PFTCPTXSEGLO(fcn_idx);
+ stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMARXRDS] =
+ I40E_GLPES_PFRDMARXRDSLO(fcn_idx);
+ stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMARXSNDS] =
+ I40E_GLPES_PFRDMARXSNDSLO(fcn_idx);
+ stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMARXWRS] =
+ I40E_GLPES_PFRDMARXWRSLO(fcn_idx);
+ stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMATXRDS] =
+ I40E_GLPES_PFRDMATXRDSLO(fcn_idx);
+ stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMATXSNDS] =
+ I40E_GLPES_PFRDMATXSNDSLO(fcn_idx);
+ stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMATXWRS] =
+ I40E_GLPES_PFRDMATXWRSLO(fcn_idx);
+ stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMAVBND] =
+ I40E_GLPES_PFRDMAVBNDLO(fcn_idx);
+ stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMAVINV] =
+ I40E_GLPES_PFRDMAVINVLO(fcn_idx);
+ } else {
+ stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_IP4RXDISCARD] =
+ I40E_GLPES_VFIP4RXDISCARD(fcn_idx);
+ stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_IP4RXTRUNC] =
+ I40E_GLPES_VFIP4RXTRUNC(fcn_idx);
+ stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_IP4TXNOROUTE] =
+ I40E_GLPES_VFIP4TXNOROUTE(fcn_idx);
+ stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_IP6RXDISCARD] =
+ I40E_GLPES_VFIP6RXDISCARD(fcn_idx);
+ stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_IP6RXTRUNC] =
+ I40E_GLPES_VFIP6RXTRUNC(fcn_idx);
+ stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_IP6TXNOROUTE] =
+ I40E_GLPES_VFIP6TXNOROUTE(fcn_idx);
+ stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_TCPRTXSEG] =
+ I40E_GLPES_VFTCPRTXSEG(fcn_idx);
+ stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_TCPRXOPTERR] =
+ I40E_GLPES_VFTCPRXOPTERR(fcn_idx);
+ stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_TCPRXPROTOERR] =
+ I40E_GLPES_VFTCPRXPROTOERR(fcn_idx);
+
+ stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4RXOCTS] =
+ I40E_GLPES_VFIP4RXOCTSLO(fcn_idx);
+ stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4RXPKTS] =
+ I40E_GLPES_VFIP4RXPKTSLO(fcn_idx);
+ stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4RXFRAGS] =
+ I40E_GLPES_VFIP4RXFRAGSLO(fcn_idx);
+ stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4RXMCPKTS] =
+ I40E_GLPES_VFIP4RXMCPKTSLO(fcn_idx);
+ stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4TXOCTS] =
+ I40E_GLPES_VFIP4TXOCTSLO(fcn_idx);
+ stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4TXPKTS] =
+ I40E_GLPES_VFIP4TXPKTSLO(fcn_idx);
+ stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4TXFRAGS] =
+ I40E_GLPES_VFIP4TXFRAGSLO(fcn_idx);
+ stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4TXMCPKTS] =
+ I40E_GLPES_VFIP4TXMCPKTSLO(fcn_idx);
+ stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6RXOCTS] =
+ I40E_GLPES_VFIP6RXOCTSLO(fcn_idx);
+ stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6RXPKTS] =
+ I40E_GLPES_VFIP6RXPKTSLO(fcn_idx);
+ stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6RXFRAGS] =
+ I40E_GLPES_VFIP6RXFRAGSLO(fcn_idx);
+ stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6RXMCPKTS] =
+ I40E_GLPES_VFIP6RXMCPKTSLO(fcn_idx);
+ stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6TXOCTS] =
+ I40E_GLPES_VFIP6TXOCTSLO(fcn_idx);
+ stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6TXPKTS] =
+ I40E_GLPES_VFIP6TXPKTSLO(fcn_idx);
+ stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6TXPKTS] =
+ I40E_GLPES_VFIP6TXPKTSLO(fcn_idx);
+ stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6TXFRAGS] =
+ I40E_GLPES_VFIP6TXFRAGSLO(fcn_idx);
+ stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_TCPRXSEGS] =
+ I40E_GLPES_VFTCPRXSEGSLO(fcn_idx);
+ stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_TCPTXSEG] =
+ I40E_GLPES_VFTCPTXSEGLO(fcn_idx);
+ stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMARXRDS] =
+ I40E_GLPES_VFRDMARXRDSLO(fcn_idx);
+ stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMARXSNDS] =
+ I40E_GLPES_VFRDMARXSNDSLO(fcn_idx);
+ stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMARXWRS] =
+ I40E_GLPES_VFRDMARXWRSLO(fcn_idx);
+ stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMATXRDS] =
+ I40E_GLPES_VFRDMATXRDSLO(fcn_idx);
+ stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMATXSNDS] =
+ I40E_GLPES_VFRDMATXSNDSLO(fcn_idx);
+ stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMATXWRS] =
+ I40E_GLPES_VFRDMATXWRSLO(fcn_idx);
+ stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMAVBND] =
+ I40E_GLPES_VFRDMAVBNDLO(fcn_idx);
+ stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMAVINV] =
+ I40E_GLPES_VFRDMAVINVLO(fcn_idx);
+ }
+
+ for (stat_index = 0; stat_index < I40IW_HW_STAT_INDEX_MAX_64;
+ stat_index++) {
+ stat_reg_offset = stat_table->stat_offset_64[stat_index];
+ last_rd_stats->stat_value_64[stat_index] =
+ readq(devstat->hw->hw_addr + stat_reg_offset);
+ }
+
+ for (stat_index = 0; stat_index < I40IW_HW_STAT_INDEX_MAX_32;
+ stat_index++) {
+ stat_reg_offset = stat_table->stat_offset_32[stat_index];
+ last_rd_stats->stat_value_32[stat_index] =
+ i40iw_rd32(devstat->hw, stat_reg_offset);
+ }
+}
+
+/**
+ * i40iw_hw_stat_read_32 - Read 32-bit HW stat counters and accommodates for roll-overs.
+ * @devstat: pestat struct
+ * @index: index in HW stat table which contains offset reg-addr
+ * @value: hw stat value
+ */
+static void i40iw_hw_stat_read_32(struct i40iw_dev_pestat *devstat,
+ enum i40iw_hw_stat_index_32b index,
+ u64 *value)
+{
+ struct i40iw_dev_hw_stat_offsets *stat_table =
+ &devstat->hw_stat_offsets;
+ struct i40iw_dev_hw_stats *last_rd_stats = &devstat->last_read_hw_stats;
+ struct i40iw_dev_hw_stats *hw_stats = &devstat->hw_stats;
+ u64 new_stat_value = 0;
+ u32 stat_reg_offset = stat_table->stat_offset_32[index];
+
+ new_stat_value = i40iw_rd32(devstat->hw, stat_reg_offset);
+ /*roll-over case */
+ if (new_stat_value < last_rd_stats->stat_value_32[index])
+ hw_stats->stat_value_32[index] += new_stat_value;
+ else
+ hw_stats->stat_value_32[index] +=
+ new_stat_value - last_rd_stats->stat_value_32[index];
+ last_rd_stats->stat_value_32[index] = new_stat_value;
+ *value = hw_stats->stat_value_32[index];
+}
+
+/**
+ * i40iw_hw_stat_read_64 - Read HW stat counters (greater than 32-bit) and accommodates for roll-overs.
+ * @devstat: pestat struct
+ * @index: index in HW stat table which contains offset reg-addr
+ * @value: hw stat value
+ */
+static void i40iw_hw_stat_read_64(struct i40iw_dev_pestat *devstat,
+ enum i40iw_hw_stat_index_64b index,
+ u64 *value)
+{
+ struct i40iw_dev_hw_stat_offsets *stat_table =
+ &devstat->hw_stat_offsets;
+ struct i40iw_dev_hw_stats *last_rd_stats = &devstat->last_read_hw_stats;
+ struct i40iw_dev_hw_stats *hw_stats = &devstat->hw_stats;
+ u64 new_stat_value = 0;
+ u32 stat_reg_offset = stat_table->stat_offset_64[index];
+
+ new_stat_value = readq(devstat->hw->hw_addr + stat_reg_offset);
+ /*roll-over case */
+ if (new_stat_value < last_rd_stats->stat_value_64[index])
+ hw_stats->stat_value_64[index] += new_stat_value;
+ else
+ hw_stats->stat_value_64[index] +=
+ new_stat_value - last_rd_stats->stat_value_64[index];
+ last_rd_stats->stat_value_64[index] = new_stat_value;
+ *value = hw_stats->stat_value_64[index];
+}
+
+/**
+ * i40iw_hw_stat_read_all - read all HW stat counters
+ * @devstat: pestat struct
+ * @stat_values: hw stats structure
+ *
+ * Read all the HW stat counters and populates hw_stats structure
+ * of passed-in dev's pestat as well as copy created in stat_values.
+ */
+static void i40iw_hw_stat_read_all(struct i40iw_dev_pestat *devstat,
+ struct i40iw_dev_hw_stats *stat_values)
+{
+ u32 stat_index;
+
+ for (stat_index = 0; stat_index < I40IW_HW_STAT_INDEX_MAX_32;
+ stat_index++)
+ i40iw_hw_stat_read_32(devstat, stat_index,
+ &stat_values->stat_value_32[stat_index]);
+ for (stat_index = 0; stat_index < I40IW_HW_STAT_INDEX_MAX_64;
+ stat_index++)
+ i40iw_hw_stat_read_64(devstat, stat_index,
+ &stat_values->stat_value_64[stat_index]);
+}
+
+/**
+ * i40iw_hw_stat_refresh_all - Update all HW stat structs
+ * @devstat: pestat struct
+ * @stat_values: hw stats structure
+ *
+ * Read all the HW stat counters to refresh values in hw_stats structure
+ * of passed-in dev's pestat
+ */
+static void i40iw_hw_stat_refresh_all(struct i40iw_dev_pestat *devstat)
+{
+ u64 stat_value;
+ u32 stat_index;
+
+ for (stat_index = 0; stat_index < I40IW_HW_STAT_INDEX_MAX_32;
+ stat_index++)
+ i40iw_hw_stat_read_32(devstat, stat_index, &stat_value);
+ for (stat_index = 0; stat_index < I40IW_HW_STAT_INDEX_MAX_64;
+ stat_index++)
+ i40iw_hw_stat_read_64(devstat, stat_index, &stat_value);
+}
+
+static struct i40iw_cqp_ops iw_cqp_ops = {
+ i40iw_sc_cqp_init,
+ i40iw_sc_cqp_create,
+ i40iw_sc_cqp_post_sq,
+ i40iw_sc_cqp_get_next_send_wqe,
+ i40iw_sc_cqp_destroy,
+ i40iw_sc_poll_for_cqp_op_done
+};
+
+static struct i40iw_ccq_ops iw_ccq_ops = {
+ i40iw_sc_ccq_init,
+ i40iw_sc_ccq_create,
+ i40iw_sc_ccq_destroy,
+ i40iw_sc_ccq_create_done,
+ i40iw_sc_ccq_get_cqe_info,
+ i40iw_sc_ccq_arm
+};
+
+static struct i40iw_ceq_ops iw_ceq_ops = {
+ i40iw_sc_ceq_init,
+ i40iw_sc_ceq_create,
+ i40iw_sc_cceq_create_done,
+ i40iw_sc_cceq_destroy_done,
+ i40iw_sc_cceq_create,
+ i40iw_sc_ceq_destroy,
+ i40iw_sc_process_ceq
+};
+
+static struct i40iw_aeq_ops iw_aeq_ops = {
+ i40iw_sc_aeq_init,
+ i40iw_sc_aeq_create,
+ i40iw_sc_aeq_destroy,
+ i40iw_sc_get_next_aeqe,
+ i40iw_sc_repost_aeq_entries,
+ i40iw_sc_aeq_create_done,
+ i40iw_sc_aeq_destroy_done
+};
+
+/* iwarp pd ops */
+static struct i40iw_pd_ops iw_pd_ops = {
+ i40iw_sc_pd_init,
+};
+
+static struct i40iw_priv_qp_ops iw_priv_qp_ops = {
+ i40iw_sc_qp_init,
+ i40iw_sc_qp_create,
+ i40iw_sc_qp_modify,
+ i40iw_sc_qp_destroy,
+ i40iw_sc_qp_flush_wqes,
+ i40iw_sc_qp_upload_context,
+ i40iw_sc_qp_setctx,
+ i40iw_sc_send_lsmm,
+ i40iw_sc_send_lsmm_nostag,
+ i40iw_sc_send_rtt,
+ i40iw_sc_post_wqe0,
+};
+
+static struct i40iw_priv_cq_ops iw_priv_cq_ops = {
+ i40iw_sc_cq_init,
+ i40iw_sc_cq_create,
+ i40iw_sc_cq_destroy,
+ i40iw_sc_cq_modify,
+};
+
+static struct i40iw_mr_ops iw_mr_ops = {
+ i40iw_sc_alloc_stag,
+ i40iw_sc_mr_reg_non_shared,
+ i40iw_sc_mr_reg_shared,
+ i40iw_sc_dealloc_stag,
+ i40iw_sc_query_stag,
+ i40iw_sc_mw_alloc
+};
+
+static struct i40iw_cqp_misc_ops iw_cqp_misc_ops = {
+ i40iw_sc_manage_push_page,
+ i40iw_sc_manage_hmc_pm_func_table,
+ i40iw_sc_set_hmc_resource_profile,
+ i40iw_sc_commit_fpm_values,
+ i40iw_sc_query_fpm_values,
+ i40iw_sc_static_hmc_pages_allocated,
+ i40iw_sc_add_arp_cache_entry,
+ i40iw_sc_del_arp_cache_entry,
+ i40iw_sc_query_arp_cache_entry,
+ i40iw_sc_manage_apbvt_entry,
+ i40iw_sc_manage_qhash_table_entry,
+ i40iw_sc_alloc_local_mac_ipaddr_entry,
+ i40iw_sc_add_local_mac_ipaddr_entry,
+ i40iw_sc_del_local_mac_ipaddr_entry,
+ i40iw_sc_cqp_nop,
+ i40iw_sc_commit_fpm_values_done,
+ i40iw_sc_query_fpm_values_done,
+ i40iw_sc_manage_hmc_pm_func_table_done,
+ i40iw_sc_suspend_qp,
+ i40iw_sc_resume_qp
+};
+
+static struct i40iw_hmc_ops iw_hmc_ops = {
+ i40iw_sc_init_iw_hmc,
+ i40iw_sc_parse_fpm_query_buf,
+ i40iw_sc_configure_iw_fpm,
+ i40iw_sc_parse_fpm_commit_buf,
+ i40iw_sc_create_hmc_obj,
+ i40iw_sc_del_hmc_obj,
+ NULL,
+ NULL
+};
+
+static const struct i40iw_device_pestat_ops iw_device_pestat_ops = {
+ i40iw_hw_stat_init,
+ i40iw_hw_stat_read_32,
+ i40iw_hw_stat_read_64,
+ i40iw_hw_stat_read_all,
+ i40iw_hw_stat_refresh_all
+};
+
+/**
+ * i40iw_device_init_pestat - Initialize the pestat structure
+ * @dev: pestat struct
+ */
+enum i40iw_status_code i40iw_device_init_pestat(struct i40iw_dev_pestat *devstat)
+{
+ devstat->ops = iw_device_pestat_ops;
+ return 0;
+}
+
+/**
+ * i40iw_device_init - Initialize IWARP device
+ * @dev: IWARP device pointer
+ * @info: IWARP init info
+ */
+enum i40iw_status_code i40iw_device_init(struct i40iw_sc_dev *dev,
+ struct i40iw_device_init_info *info)
+{
+ u32 val;
+ u32 vchnl_ver = 0;
+ u16 hmc_fcn = 0;
+ enum i40iw_status_code ret_code = 0;
+ u8 db_size;
+
+ spin_lock_init(&dev->cqp_lock);
+ INIT_LIST_HEAD(&dev->cqp_cmd_head); /* for the cqp commands backlog. */
+
+ i40iw_device_init_uk(&dev->dev_uk);
+
+ dev->debug_mask = info->debug_mask;
+
+ ret_code = i40iw_device_init_pestat(&dev->dev_pestat);
+ if (ret_code) {
+ i40iw_debug(dev, I40IW_DEBUG_DEV,
+ "%s: i40iw_device_init_pestat failed\n", __func__);
+ return ret_code;
+ }
+ dev->hmc_fn_id = info->hmc_fn_id;
+ dev->qs_handle = info->qs_handle;
+ dev->exception_lan_queue = info->exception_lan_queue;
+ dev->is_pf = info->is_pf;
+
+ dev->fpm_query_buf_pa = info->fpm_query_buf_pa;
+ dev->fpm_query_buf = info->fpm_query_buf;
+
+ dev->fpm_commit_buf_pa = info->fpm_commit_buf_pa;
+ dev->fpm_commit_buf = info->fpm_commit_buf;
+
+ dev->hw = info->hw;
+ dev->hw->hw_addr = info->bar0;
+
+ val = i40iw_rd32(dev->hw, I40E_GLPCI_DREVID);
+ dev->hw_rev = (u8)RS_32(val, I40E_GLPCI_DREVID_DEFAULT_REVID);
+
+ if (dev->is_pf) {
+ dev->dev_pestat.ops.iw_hw_stat_init(&dev->dev_pestat,
+ dev->hmc_fn_id, dev->hw, true);
+ spin_lock_init(&dev->dev_pestat.stats_lock);
+ /*start the periodic stats_timer */
+ i40iw_hw_stats_start_timer(dev);
+ val = i40iw_rd32(dev->hw, I40E_GLPCI_LBARCTRL);
+ db_size = (u8)RS_32(val, I40E_GLPCI_LBARCTRL_PE_DB_SIZE);
+ if ((db_size != I40IW_PE_DB_SIZE_4M) &&
+ (db_size != I40IW_PE_DB_SIZE_8M)) {
+ i40iw_debug(dev, I40IW_DEBUG_DEV,
+ "%s: PE doorbell is not enabled in CSR val 0x%x\n",
+ __func__, val);
+ ret_code = I40IW_ERR_PE_DOORBELL_NOT_ENABLED;
+ return ret_code;
+ }
+ dev->db_addr = dev->hw->hw_addr + I40IW_DB_ADDR_OFFSET;
+ dev->vchnl_if.vchnl_recv = i40iw_vchnl_recv_pf;
+ } else {
+ dev->db_addr = dev->hw->hw_addr + I40IW_VF_DB_ADDR_OFFSET;
+ }
+
+ dev->cqp_ops = &iw_cqp_ops;
+ dev->ccq_ops = &iw_ccq_ops;
+ dev->ceq_ops = &iw_ceq_ops;
+ dev->aeq_ops = &iw_aeq_ops;
+ dev->cqp_misc_ops = &iw_cqp_misc_ops;
+ dev->iw_pd_ops = &iw_pd_ops;
+ dev->iw_priv_qp_ops = &iw_priv_qp_ops;
+ dev->iw_priv_cq_ops = &iw_priv_cq_ops;
+ dev->mr_ops = &iw_mr_ops;
+ dev->hmc_ops = &iw_hmc_ops;
+ dev->vchnl_if.vchnl_send = info->vchnl_send;
+ if (dev->vchnl_if.vchnl_send)
+ dev->vchnl_up = true;
+ else
+ dev->vchnl_up = false;
+ if (!dev->is_pf) {
+ dev->vchnl_if.vchnl_recv = i40iw_vchnl_recv_vf;
+ ret_code = i40iw_vchnl_vf_get_ver(dev, &vchnl_ver);
+ if (!ret_code) {
+ i40iw_debug(dev, I40IW_DEBUG_DEV,
+ "%s: Get Channel version rc = 0x%0x, version is %u\n",
+ __func__, ret_code, vchnl_ver);
+ ret_code = i40iw_vchnl_vf_get_hmc_fcn(dev, &hmc_fcn);
+ if (!ret_code) {
+ i40iw_debug(dev, I40IW_DEBUG_DEV,
+ "%s Get HMC function rc = 0x%0x, hmc fcn is %u\n",
+ __func__, ret_code, hmc_fcn);
+ dev->hmc_fn_id = (u8)hmc_fcn;
+ }
+ }
+ }
+ dev->iw_vf_cqp_ops = &iw_vf_cqp_ops;
+
+ return ret_code;
+}
diff --git a/drivers/infiniband/hw/i40iw/i40iw_d.h b/drivers/infiniband/hw/i40iw/i40iw_d.h
new file mode 100644
index 000000000000..aab88d65f805
--- /dev/null
+++ b/drivers/infiniband/hw/i40iw/i40iw_d.h
@@ -0,0 +1,1713 @@
+/*******************************************************************************
+*
+* Copyright (c) 2015-2016 Intel Corporation. All rights reserved.
+*
+* This software is available to you under a choice of one of two
+* licenses. You may choose to be licensed under the terms of the GNU
+* General Public License (GPL) Version 2, available from the file
+* COPYING in the main directory of this source tree, or the
+* OpenFabrics.org BSD license below:
+*
+* Redistribution and use in source and binary forms, with or
+* without modification, are permitted provided that the following
+* conditions are met:
+*
+* - Redistributions of source code must retain the above
+* copyright notice, this list of conditions and the following
+* disclaimer.
+*
+* - Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials
+* provided with the distribution.
+*
+* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+* SOFTWARE.
+*
+*******************************************************************************/
+
+#ifndef I40IW_D_H
+#define I40IW_D_H
+
+#define I40IW_DB_ADDR_OFFSET (4 * 1024 * 1024 - 64 * 1024)
+#define I40IW_VF_DB_ADDR_OFFSET (64 * 1024)
+
+#define I40IW_PUSH_OFFSET (4 * 1024 * 1024)
+#define I40IW_PF_FIRST_PUSH_PAGE_INDEX 16
+#define I40IW_VF_PUSH_OFFSET ((8 + 64) * 1024)
+#define I40IW_VF_FIRST_PUSH_PAGE_INDEX 2
+
+#define I40IW_PE_DB_SIZE_4M 1
+#define I40IW_PE_DB_SIZE_8M 2
+
+#define I40IW_DDP_VER 1
+#define I40IW_RDMAP_VER 1
+
+#define I40IW_RDMA_MODE_RDMAC 0
+#define I40IW_RDMA_MODE_IETF 1
+
+#define I40IW_QP_STATE_INVALID 0
+#define I40IW_QP_STATE_IDLE 1
+#define I40IW_QP_STATE_RTS 2
+#define I40IW_QP_STATE_CLOSING 3
+#define I40IW_QP_STATE_RESERVED 4
+#define I40IW_QP_STATE_TERMINATE 5
+#define I40IW_QP_STATE_ERROR 6
+
+#define I40IW_STAG_STATE_INVALID 0
+#define I40IW_STAG_STATE_VALID 1
+
+#define I40IW_STAG_TYPE_SHARED 0
+#define I40IW_STAG_TYPE_NONSHARED 1
+
+#define I40IW_MAX_USER_PRIORITY 8
+
+#define LS_64_1(val, bits) ((u64)(uintptr_t)val << bits)
+#define RS_64_1(val, bits) ((u64)(uintptr_t)val >> bits)
+#define LS_32_1(val, bits) (u32)(val << bits)
+#define RS_32_1(val, bits) (u32)(val >> bits)
+#define I40E_HI_DWORD(x) ((u32)((((x) >> 16) >> 16) & 0xFFFFFFFF))
+
+#define LS_64(val, field) (((u64)val << field ## _SHIFT) & (field ## _MASK))
+
+#define RS_64(val, field) ((u64)(val & field ## _MASK) >> field ## _SHIFT)
+#define LS_32(val, field) ((val << field ## _SHIFT) & (field ## _MASK))
+#define RS_32(val, field) ((val & field ## _MASK) >> field ## _SHIFT)
+
+#define TERM_DDP_LEN_TAGGED 14
+#define TERM_DDP_LEN_UNTAGGED 18
+#define TERM_RDMA_LEN 28
+#define RDMA_OPCODE_MASK 0x0f
+#define RDMA_READ_REQ_OPCODE 1
+#define Q2_BAD_FRAME_OFFSET 72
+#define CQE_MAJOR_DRV 0x8000
+
+#define I40IW_TERM_SENT 0x01
+#define I40IW_TERM_RCVD 0x02
+#define I40IW_TERM_DONE 0x04
+#define I40IW_MAC_HLEN 14
+
+#define I40IW_INVALID_WQE_INDEX 0xffffffff
+
+#define I40IW_CQP_WAIT_POLL_REGS 1
+#define I40IW_CQP_WAIT_POLL_CQ 2
+#define I40IW_CQP_WAIT_EVENT 3
+
+#define I40IW_CQP_INIT_WQE(wqe) memset(wqe, 0, 64)
+
+#define I40IW_GET_CURRENT_CQ_ELEMENT(_cq) \
+ ( \
+ &((_cq)->cq_base[I40IW_RING_GETCURRENT_HEAD((_cq)->cq_ring)]) \
+ )
+#define I40IW_GET_CURRENT_EXTENDED_CQ_ELEMENT(_cq) \
+ ( \
+ &(((struct i40iw_extended_cqe *) \
+ ((_cq)->cq_base))[I40IW_RING_GETCURRENT_HEAD((_cq)->cq_ring)]) \
+ )
+
+#define I40IW_GET_CURRENT_AEQ_ELEMENT(_aeq) \
+ ( \
+ &_aeq->aeqe_base[I40IW_RING_GETCURRENT_TAIL(_aeq->aeq_ring)] \
+ )
+
+#define I40IW_GET_CURRENT_CEQ_ELEMENT(_ceq) \
+ ( \
+ &_ceq->ceqe_base[I40IW_RING_GETCURRENT_TAIL(_ceq->ceq_ring)] \
+ )
+
+#define I40IW_AE_SOURCE_RQ 0x1
+#define I40IW_AE_SOURCE_RQ_0011 0x3
+
+#define I40IW_AE_SOURCE_CQ 0x2
+#define I40IW_AE_SOURCE_CQ_0110 0x6
+#define I40IW_AE_SOURCE_CQ_1010 0xA
+#define I40IW_AE_SOURCE_CQ_1110 0xE
+
+#define I40IW_AE_SOURCE_SQ 0x5
+#define I40IW_AE_SOURCE_SQ_0111 0x7
+
+#define I40IW_AE_SOURCE_IN_RR_WR 0x9
+#define I40IW_AE_SOURCE_IN_RR_WR_1011 0xB
+#define I40IW_AE_SOURCE_OUT_RR 0xD
+#define I40IW_AE_SOURCE_OUT_RR_1111 0xF
+
+#define I40IW_TCP_STATE_NON_EXISTENT 0
+#define I40IW_TCP_STATE_CLOSED 1
+#define I40IW_TCP_STATE_LISTEN 2
+#define I40IW_STATE_SYN_SEND 3
+#define I40IW_TCP_STATE_SYN_RECEIVED 4
+#define I40IW_TCP_STATE_ESTABLISHED 5
+#define I40IW_TCP_STATE_CLOSE_WAIT 6
+#define I40IW_TCP_STATE_FIN_WAIT_1 7
+#define I40IW_TCP_STATE_CLOSING 8
+#define I40IW_TCP_STATE_LAST_ACK 9
+#define I40IW_TCP_STATE_FIN_WAIT_2 10
+#define I40IW_TCP_STATE_TIME_WAIT 11
+#define I40IW_TCP_STATE_RESERVED_1 12
+#define I40IW_TCP_STATE_RESERVED_2 13
+#define I40IW_TCP_STATE_RESERVED_3 14
+#define I40IW_TCP_STATE_RESERVED_4 15
+
+/* ILQ CQP hash table fields */
+#define I40IW_CQPSQ_QHASH_VLANID_SHIFT 32
+#define I40IW_CQPSQ_QHASH_VLANID_MASK \
+ ((u64)0xfff << I40IW_CQPSQ_QHASH_VLANID_SHIFT)
+
+#define I40IW_CQPSQ_QHASH_QPN_SHIFT 32
+#define I40IW_CQPSQ_QHASH_QPN_MASK \
+ ((u64)0x3ffff << I40IW_CQPSQ_QHASH_QPN_SHIFT)
+
+#define I40IW_CQPSQ_QHASH_QS_HANDLE_SHIFT 0
+#define I40IW_CQPSQ_QHASH_QS_HANDLE_MASK ((u64)0x3ff << I40IW_CQPSQ_QHASH_QS_HANDLE_SHIFT)
+
+#define I40IW_CQPSQ_QHASH_SRC_PORT_SHIFT 16
+#define I40IW_CQPSQ_QHASH_SRC_PORT_MASK \
+ ((u64)0xffff << I40IW_CQPSQ_QHASH_SRC_PORT_SHIFT)
+
+#define I40IW_CQPSQ_QHASH_DEST_PORT_SHIFT 0
+#define I40IW_CQPSQ_QHASH_DEST_PORT_MASK \
+ ((u64)0xffff << I40IW_CQPSQ_QHASH_DEST_PORT_SHIFT)
+
+#define I40IW_CQPSQ_QHASH_ADDR0_SHIFT 32
+#define I40IW_CQPSQ_QHASH_ADDR0_MASK \
+ ((u64)0xffffffff << I40IW_CQPSQ_QHASH_ADDR0_SHIFT)
+
+#define I40IW_CQPSQ_QHASH_ADDR1_SHIFT 0
+#define I40IW_CQPSQ_QHASH_ADDR1_MASK \
+ ((u64)0xffffffff << I40IW_CQPSQ_QHASH_ADDR1_SHIFT)
+
+#define I40IW_CQPSQ_QHASH_ADDR2_SHIFT 32
+#define I40IW_CQPSQ_QHASH_ADDR2_MASK \
+ ((u64)0xffffffff << I40IW_CQPSQ_QHASH_ADDR2_SHIFT)
+
+#define I40IW_CQPSQ_QHASH_ADDR3_SHIFT 0
+#define I40IW_CQPSQ_QHASH_ADDR3_MASK \
+ ((u64)0xffffffff << I40IW_CQPSQ_QHASH_ADDR3_SHIFT)
+
+#define I40IW_CQPSQ_QHASH_WQEVALID_SHIFT 63
+#define I40IW_CQPSQ_QHASH_WQEVALID_MASK \
+ ((u64)0x1 << I40IW_CQPSQ_QHASH_WQEVALID_SHIFT)
+#define I40IW_CQPSQ_QHASH_OPCODE_SHIFT 32
+#define I40IW_CQPSQ_QHASH_OPCODE_MASK \
+ ((u64)0x3f << I40IW_CQPSQ_QHASH_OPCODE_SHIFT)
+
+#define I40IW_CQPSQ_QHASH_MANAGE_SHIFT 61
+#define I40IW_CQPSQ_QHASH_MANAGE_MASK \
+ ((u64)0x3 << I40IW_CQPSQ_QHASH_MANAGE_SHIFT)
+
+#define I40IW_CQPSQ_QHASH_IPV4VALID_SHIFT 60
+#define I40IW_CQPSQ_QHASH_IPV4VALID_MASK \
+ ((u64)0x1 << I40IW_CQPSQ_QHASH_IPV4VALID_SHIFT)
+
+#define I40IW_CQPSQ_QHASH_VLANVALID_SHIFT 59
+#define I40IW_CQPSQ_QHASH_VLANVALID_MASK \
+ ((u64)0x1 << I40IW_CQPSQ_QHASH_VLANVALID_SHIFT)
+
+#define I40IW_CQPSQ_QHASH_ENTRYTYPE_SHIFT 42
+#define I40IW_CQPSQ_QHASH_ENTRYTYPE_MASK \
+ ((u64)0x7 << I40IW_CQPSQ_QHASH_ENTRYTYPE_SHIFT)
+/* CQP Host Context */
+#define I40IW_CQPHC_EN_DC_TCP_SHIFT 0
+#define I40IW_CQPHC_EN_DC_TCP_MASK (1UL << I40IW_CQPHC_EN_DC_TCP_SHIFT)
+
+#define I40IW_CQPHC_SQSIZE_SHIFT 8
+#define I40IW_CQPHC_SQSIZE_MASK (0xfUL << I40IW_CQPHC_SQSIZE_SHIFT)
+
+#define I40IW_CQPHC_DISABLE_PFPDUS_SHIFT 1
+#define I40IW_CQPHC_DISABLE_PFPDUS_MASK (0x1UL << I40IW_CQPHC_DISABLE_PFPDUS_SHIFT)
+
+#define I40IW_CQPHC_ENABLED_VFS_SHIFT 32
+#define I40IW_CQPHC_ENABLED_VFS_MASK (0x3fULL << I40IW_CQPHC_ENABLED_VFS_SHIFT)
+
+#define I40IW_CQPHC_HMC_PROFILE_SHIFT 0
+#define I40IW_CQPHC_HMC_PROFILE_MASK (0x7ULL << I40IW_CQPHC_HMC_PROFILE_SHIFT)
+
+#define I40IW_CQPHC_SVER_SHIFT 24
+#define I40IW_CQPHC_SVER_MASK (0xffUL << I40IW_CQPHC_SVER_SHIFT)
+
+#define I40IW_CQPHC_SQBASE_SHIFT 9
+#define I40IW_CQPHC_SQBASE_MASK \
+ (0xfffffffffffffeULL << I40IW_CQPHC_SQBASE_SHIFT)
+
+#define I40IW_CQPHC_QPCTX_SHIFT 0
+#define I40IW_CQPHC_QPCTX_MASK \
+ (0xffffffffffffffffULL << I40IW_CQPHC_QPCTX_SHIFT)
+#define I40IW_CQPHC_SVER 1
+
+#define I40IW_CQP_SW_SQSIZE_4 4
+#define I40IW_CQP_SW_SQSIZE_2048 2048
+
+/* iWARP QP Doorbell shadow area */
+#define I40IW_QP_DBSA_HW_SQ_TAIL_SHIFT 0
+#define I40IW_QP_DBSA_HW_SQ_TAIL_MASK \
+ (0x3fffUL << I40IW_QP_DBSA_HW_SQ_TAIL_SHIFT)
+
+/* Completion Queue Doorbell shadow area */
+#define I40IW_CQ_DBSA_CQEIDX_SHIFT 0
+#define I40IW_CQ_DBSA_CQEIDX_MASK (0xfffffUL << I40IW_CQ_DBSA_CQEIDX_SHIFT)
+
+#define I40IW_CQ_DBSA_SW_CQ_SELECT_SHIFT 0
+#define I40IW_CQ_DBSA_SW_CQ_SELECT_MASK \
+ (0x3fffUL << I40IW_CQ_DBSA_SW_CQ_SELECT_SHIFT)
+
+#define I40IW_CQ_DBSA_ARM_NEXT_SHIFT 14
+#define I40IW_CQ_DBSA_ARM_NEXT_MASK (1UL << I40IW_CQ_DBSA_ARM_NEXT_SHIFT)
+
+#define I40IW_CQ_DBSA_ARM_NEXT_SE_SHIFT 15
+#define I40IW_CQ_DBSA_ARM_NEXT_SE_MASK (1UL << I40IW_CQ_DBSA_ARM_NEXT_SE_SHIFT)
+
+#define I40IW_CQ_DBSA_ARM_SEQ_NUM_SHIFT 16
+#define I40IW_CQ_DBSA_ARM_SEQ_NUM_MASK \
+ (0x3UL << I40IW_CQ_DBSA_ARM_SEQ_NUM_SHIFT)
+
+/* CQP and iWARP Completion Queue */
+#define I40IW_CQ_QPCTX_SHIFT I40IW_CQPHC_QPCTX_SHIFT
+#define I40IW_CQ_QPCTX_MASK I40IW_CQPHC_QPCTX_MASK
+
+#define I40IW_CCQ_OPRETVAL_SHIFT 0
+#define I40IW_CCQ_OPRETVAL_MASK (0xffffffffUL << I40IW_CCQ_OPRETVAL_SHIFT)
+
+#define I40IW_CQ_MINERR_SHIFT 0
+#define I40IW_CQ_MINERR_MASK (0xffffUL << I40IW_CQ_MINERR_SHIFT)
+
+#define I40IW_CQ_MAJERR_SHIFT 16
+#define I40IW_CQ_MAJERR_MASK (0xffffUL << I40IW_CQ_MAJERR_SHIFT)
+
+#define I40IW_CQ_WQEIDX_SHIFT 32
+#define I40IW_CQ_WQEIDX_MASK (0x3fffULL << I40IW_CQ_WQEIDX_SHIFT)
+
+#define I40IW_CQ_ERROR_SHIFT 55
+#define I40IW_CQ_ERROR_MASK (1ULL << I40IW_CQ_ERROR_SHIFT)
+
+#define I40IW_CQ_SQ_SHIFT 62
+#define I40IW_CQ_SQ_MASK (1ULL << I40IW_CQ_SQ_SHIFT)
+
+#define I40IW_CQ_VALID_SHIFT 63
+#define I40IW_CQ_VALID_MASK (1ULL << I40IW_CQ_VALID_SHIFT)
+
+#define I40IWCQ_PAYLDLEN_SHIFT 0
+#define I40IWCQ_PAYLDLEN_MASK (0xffffffffUL << I40IWCQ_PAYLDLEN_SHIFT)
+
+#define I40IWCQ_TCPSEQNUM_SHIFT 32
+#define I40IWCQ_TCPSEQNUM_MASK (0xffffffffULL << I40IWCQ_TCPSEQNUM_SHIFT)
+
+#define I40IWCQ_INVSTAG_SHIFT 0
+#define I40IWCQ_INVSTAG_MASK (0xffffffffUL << I40IWCQ_INVSTAG_SHIFT)
+
+#define I40IWCQ_QPID_SHIFT 32
+#define I40IWCQ_QPID_MASK (0x3ffffULL << I40IWCQ_QPID_SHIFT)
+
+#define I40IWCQ_PSHDROP_SHIFT 51
+#define I40IWCQ_PSHDROP_MASK (1ULL << I40IWCQ_PSHDROP_SHIFT)
+
+#define I40IWCQ_SRQ_SHIFT 52
+#define I40IWCQ_SRQ_MASK (1ULL << I40IWCQ_SRQ_SHIFT)
+
+#define I40IWCQ_STAG_SHIFT 53
+#define I40IWCQ_STAG_MASK (1ULL << I40IWCQ_STAG_SHIFT)
+
+#define I40IWCQ_SOEVENT_SHIFT 54
+#define I40IWCQ_SOEVENT_MASK (1ULL << I40IWCQ_SOEVENT_SHIFT)
+
+#define I40IWCQ_OP_SHIFT 56
+#define I40IWCQ_OP_MASK (0x3fULL << I40IWCQ_OP_SHIFT)
+
+/* CEQE format */
+#define I40IW_CEQE_CQCTX_SHIFT 0
+#define I40IW_CEQE_CQCTX_MASK \
+ (0x7fffffffffffffffULL << I40IW_CEQE_CQCTX_SHIFT)
+
+#define I40IW_CEQE_VALID_SHIFT 63
+#define I40IW_CEQE_VALID_MASK (1ULL << I40IW_CEQE_VALID_SHIFT)
+
+/* AEQE format */
+#define I40IW_AEQE_COMPCTX_SHIFT I40IW_CQPHC_QPCTX_SHIFT
+#define I40IW_AEQE_COMPCTX_MASK I40IW_CQPHC_QPCTX_MASK
+
+#define I40IW_AEQE_QPCQID_SHIFT 0
+#define I40IW_AEQE_QPCQID_MASK (0x3ffffUL << I40IW_AEQE_QPCQID_SHIFT)
+
+#define I40IW_AEQE_WQDESCIDX_SHIFT 18
+#define I40IW_AEQE_WQDESCIDX_MASK (0x3fffULL << I40IW_AEQE_WQDESCIDX_SHIFT)
+
+#define I40IW_AEQE_OVERFLOW_SHIFT 33
+#define I40IW_AEQE_OVERFLOW_MASK (1ULL << I40IW_AEQE_OVERFLOW_SHIFT)
+
+#define I40IW_AEQE_AECODE_SHIFT 34
+#define I40IW_AEQE_AECODE_MASK (0xffffULL << I40IW_AEQE_AECODE_SHIFT)
+
+#define I40IW_AEQE_AESRC_SHIFT 50
+#define I40IW_AEQE_AESRC_MASK (0xfULL << I40IW_AEQE_AESRC_SHIFT)
+
+#define I40IW_AEQE_IWSTATE_SHIFT 54
+#define I40IW_AEQE_IWSTATE_MASK (0x7ULL << I40IW_AEQE_IWSTATE_SHIFT)
+
+#define I40IW_AEQE_TCPSTATE_SHIFT 57
+#define I40IW_AEQE_TCPSTATE_MASK (0xfULL << I40IW_AEQE_TCPSTATE_SHIFT)
+
+#define I40IW_AEQE_Q2DATA_SHIFT 61
+#define I40IW_AEQE_Q2DATA_MASK (0x3ULL << I40IW_AEQE_Q2DATA_SHIFT)
+
+#define I40IW_AEQE_VALID_SHIFT 63
+#define I40IW_AEQE_VALID_MASK (1ULL << I40IW_AEQE_VALID_SHIFT)
+
+/* CQP SQ WQES */
+#define I40IW_QP_TYPE_IWARP 1
+#define I40IW_QP_TYPE_UDA 2
+#define I40IW_QP_TYPE_CQP 4
+
+#define I40IW_CQ_TYPE_IWARP 1
+#define I40IW_CQ_TYPE_ILQ 2
+#define I40IW_CQ_TYPE_IEQ 3
+#define I40IW_CQ_TYPE_CQP 4
+
+#define I40IWQP_TERM_SEND_TERM_AND_FIN 0
+#define I40IWQP_TERM_SEND_TERM_ONLY 1
+#define I40IWQP_TERM_SEND_FIN_ONLY 2
+#define I40IWQP_TERM_DONOT_SEND_TERM_OR_FIN 3
+
+#define I40IW_CQP_OP_CREATE_QP 0
+#define I40IW_CQP_OP_MODIFY_QP 0x1
+#define I40IW_CQP_OP_DESTROY_QP 0x02
+#define I40IW_CQP_OP_CREATE_CQ 0x03
+#define I40IW_CQP_OP_MODIFY_CQ 0x04
+#define I40IW_CQP_OP_DESTROY_CQ 0x05
+#define I40IW_CQP_OP_CREATE_SRQ 0x06
+#define I40IW_CQP_OP_MODIFY_SRQ 0x07
+#define I40IW_CQP_OP_DESTROY_SRQ 0x08
+#define I40IW_CQP_OP_ALLOC_STAG 0x09
+#define I40IW_CQP_OP_REG_MR 0x0a
+#define I40IW_CQP_OP_QUERY_STAG 0x0b
+#define I40IW_CQP_OP_REG_SMR 0x0c
+#define I40IW_CQP_OP_DEALLOC_STAG 0x0d
+#define I40IW_CQP_OP_MANAGE_LOC_MAC_IP_TABLE 0x0e
+#define I40IW_CQP_OP_MANAGE_ARP 0x0f
+#define I40IW_CQP_OP_MANAGE_VF_PBLE_BP 0x10
+#define I40IW_CQP_OP_MANAGE_PUSH_PAGES 0x11
+#define I40IW_CQP_OP_MANAGE_PE_TEAM 0x12
+#define I40IW_CQP_OP_UPLOAD_CONTEXT 0x13
+#define I40IW_CQP_OP_ALLOCATE_LOC_MAC_IP_TABLE_ENTRY 0x14
+#define I40IW_CQP_OP_MANAGE_HMC_PM_FUNC_TABLE 0x15
+#define I40IW_CQP_OP_CREATE_CEQ 0x16
+#define I40IW_CQP_OP_DESTROY_CEQ 0x18
+#define I40IW_CQP_OP_CREATE_AEQ 0x19
+#define I40IW_CQP_OP_DESTROY_AEQ 0x1b
+#define I40IW_CQP_OP_CREATE_ADDR_VECT 0x1c
+#define I40IW_CQP_OP_MODIFY_ADDR_VECT 0x1d
+#define I40IW_CQP_OP_DESTROY_ADDR_VECT 0x1e
+#define I40IW_CQP_OP_UPDATE_PE_SDS 0x1f
+#define I40IW_CQP_OP_QUERY_FPM_VALUES 0x20
+#define I40IW_CQP_OP_COMMIT_FPM_VALUES 0x21
+#define I40IW_CQP_OP_FLUSH_WQES 0x22
+#define I40IW_CQP_OP_MANAGE_APBVT 0x23
+#define I40IW_CQP_OP_NOP 0x24
+#define I40IW_CQP_OP_MANAGE_QUAD_HASH_TABLE_ENTRY 0x25
+#define I40IW_CQP_OP_CREATE_UDA_MCAST_GROUP 0x26
+#define I40IW_CQP_OP_MODIFY_UDA_MCAST_GROUP 0x27
+#define I40IW_CQP_OP_DESTROY_UDA_MCAST_GROUP 0x28
+#define I40IW_CQP_OP_SUSPEND_QP 0x29
+#define I40IW_CQP_OP_RESUME_QP 0x2a
+#define I40IW_CQP_OP_SHMC_PAGES_ALLOCATED 0x2b
+#define I40IW_CQP_OP_SET_HMC_RESOURCE_PROFILE 0x2d
+
+#define I40IW_UDA_QPSQ_NEXT_HEADER_SHIFT 16
+#define I40IW_UDA_QPSQ_NEXT_HEADER_MASK ((u64)0xff << I40IW_UDA_QPSQ_NEXT_HEADER_SHIFT)
+
+#define I40IW_UDA_QPSQ_OPCODE_SHIFT 32
+#define I40IW_UDA_QPSQ_OPCODE_MASK ((u64)0x3f << I40IW_UDA_QPSQ_OPCODE_SHIFT)
+
+#define I40IW_UDA_QPSQ_MACLEN_SHIFT 56
+#define I40IW_UDA_QPSQ_MACLEN_MASK \
+ ((u64)0x7f << I40IW_UDA_QPSQ_MACLEN_SHIFT)
+
+#define I40IW_UDA_QPSQ_IPLEN_SHIFT 48
+#define I40IW_UDA_QPSQ_IPLEN_MASK \
+ ((u64)0x7f << I40IW_UDA_QPSQ_IPLEN_SHIFT)
+
+#define I40IW_UDA_QPSQ_L4T_SHIFT 30
+#define I40IW_UDA_QPSQ_L4T_MASK \
+ ((u64)0x3 << I40IW_UDA_QPSQ_L4T_SHIFT)
+
+#define I40IW_UDA_QPSQ_IIPT_SHIFT 28
+#define I40IW_UDA_QPSQ_IIPT_MASK \
+ ((u64)0x3 << I40IW_UDA_QPSQ_IIPT_SHIFT)
+
+#define I40IW_UDA_QPSQ_L4LEN_SHIFT 24
+#define I40IW_UDA_QPSQ_L4LEN_MASK ((u64)0xf << I40IW_UDA_QPSQ_L4LEN_SHIFT)
+
+#define I40IW_UDA_QPSQ_AVIDX_SHIFT 0
+#define I40IW_UDA_QPSQ_AVIDX_MASK ((u64)0xffff << I40IW_UDA_QPSQ_AVIDX_SHIFT)
+
+#define I40IW_UDA_QPSQ_VALID_SHIFT 63
+#define I40IW_UDA_QPSQ_VALID_MASK \
+ ((u64)0x1 << I40IW_UDA_QPSQ_VALID_SHIFT)
+
+#define I40IW_UDA_QPSQ_SIGCOMPL_SHIFT 62
+#define I40IW_UDA_QPSQ_SIGCOMPL_MASK ((u64)0x1 << I40IW_UDA_QPSQ_SIGCOMPL_SHIFT)
+
+#define I40IW_UDA_PAYLOADLEN_SHIFT 0
+#define I40IW_UDA_PAYLOADLEN_MASK ((u64)0x3fff << I40IW_UDA_PAYLOADLEN_SHIFT)
+
+#define I40IW_UDA_HDRLEN_SHIFT 16
+#define I40IW_UDA_HDRLEN_MASK ((u64)0x1ff << I40IW_UDA_HDRLEN_SHIFT)
+
+#define I40IW_VLAN_TAG_VALID_SHIFT 50
+#define I40IW_VLAN_TAG_VALID_MASK ((u64)0x1 << I40IW_VLAN_TAG_VALID_SHIFT)
+
+#define I40IW_UDA_L3PROTO_SHIFT 0
+#define I40IW_UDA_L3PROTO_MASK ((u64)0x3 << I40IW_UDA_L3PROTO_SHIFT)
+
+#define I40IW_UDA_L4PROTO_SHIFT 16
+#define I40IW_UDA_L4PROTO_MASK ((u64)0x3 << I40IW_UDA_L4PROTO_SHIFT)
+
+#define I40IW_UDA_QPSQ_DOLOOPBACK_SHIFT 44
+#define I40IW_UDA_QPSQ_DOLOOPBACK_MASK \
+ ((u64)0x1 << I40IW_UDA_QPSQ_DOLOOPBACK_SHIFT)
+
+/* CQP SQ WQE common fields */
+#define I40IW_CQPSQ_OPCODE_SHIFT 32
+#define I40IW_CQPSQ_OPCODE_MASK (0x3fULL << I40IW_CQPSQ_OPCODE_SHIFT)
+
+#define I40IW_CQPSQ_WQEVALID_SHIFT 63
+#define I40IW_CQPSQ_WQEVALID_MASK (1ULL << I40IW_CQPSQ_WQEVALID_SHIFT)
+
+#define I40IW_CQPSQ_TPHVAL_SHIFT 0
+#define I40IW_CQPSQ_TPHVAL_MASK (0xffUL << I40IW_CQPSQ_TPHVAL_SHIFT)
+
+#define I40IW_CQPSQ_TPHEN_SHIFT 60
+#define I40IW_CQPSQ_TPHEN_MASK (1ULL << I40IW_CQPSQ_TPHEN_SHIFT)
+
+#define I40IW_CQPSQ_PBUFADDR_SHIFT I40IW_CQPHC_QPCTX_SHIFT
+#define I40IW_CQPSQ_PBUFADDR_MASK I40IW_CQPHC_QPCTX_MASK
+
+/* Create/Modify/Destroy QP */
+
+#define I40IW_CQPSQ_QP_NEWMSS_SHIFT 32
+#define I40IW_CQPSQ_QP_NEWMSS_MASK (0x3fffULL << I40IW_CQPSQ_QP_NEWMSS_SHIFT)
+
+#define I40IW_CQPSQ_QP_TERMLEN_SHIFT 48
+#define I40IW_CQPSQ_QP_TERMLEN_MASK (0xfULL << I40IW_CQPSQ_QP_TERMLEN_SHIFT)
+
+#define I40IW_CQPSQ_QP_QPCTX_SHIFT I40IW_CQPHC_QPCTX_SHIFT
+#define I40IW_CQPSQ_QP_QPCTX_MASK I40IW_CQPHC_QPCTX_MASK
+
+#define I40IW_CQPSQ_QP_QPID_SHIFT 0
+#define I40IW_CQPSQ_QP_QPID_MASK (0x3FFFFUL)
+/* I40IWCQ_QPID_MASK */
+
+#define I40IW_CQPSQ_QP_OP_SHIFT 32
+#define I40IW_CQPSQ_QP_OP_MASK I40IWCQ_OP_MASK
+
+#define I40IW_CQPSQ_QP_ORDVALID_SHIFT 42
+#define I40IW_CQPSQ_QP_ORDVALID_MASK (1ULL << I40IW_CQPSQ_QP_ORDVALID_SHIFT)
+
+#define I40IW_CQPSQ_QP_TOECTXVALID_SHIFT 43
+#define I40IW_CQPSQ_QP_TOECTXVALID_MASK \
+ (1ULL << I40IW_CQPSQ_QP_TOECTXVALID_SHIFT)
+
+#define I40IW_CQPSQ_QP_CACHEDVARVALID_SHIFT 44
+#define I40IW_CQPSQ_QP_CACHEDVARVALID_MASK \
+ (1ULL << I40IW_CQPSQ_QP_CACHEDVARVALID_SHIFT)
+
+#define I40IW_CQPSQ_QP_VQ_SHIFT 45
+#define I40IW_CQPSQ_QP_VQ_MASK (1ULL << I40IW_CQPSQ_QP_VQ_SHIFT)
+
+#define I40IW_CQPSQ_QP_FORCELOOPBACK_SHIFT 46
+#define I40IW_CQPSQ_QP_FORCELOOPBACK_MASK \
+ (1ULL << I40IW_CQPSQ_QP_FORCELOOPBACK_SHIFT)
+
+#define I40IW_CQPSQ_QP_CQNUMVALID_SHIFT 47
+#define I40IW_CQPSQ_QP_CQNUMVALID_MASK \
+ (1ULL << I40IW_CQPSQ_QP_CQNUMVALID_SHIFT)
+
+#define I40IW_CQPSQ_QP_QPTYPE_SHIFT 48
+#define I40IW_CQPSQ_QP_QPTYPE_MASK (0x3ULL << I40IW_CQPSQ_QP_QPTYPE_SHIFT)
+
+#define I40IW_CQPSQ_QP_MSSCHANGE_SHIFT 52
+#define I40IW_CQPSQ_QP_MSSCHANGE_MASK (1ULL << I40IW_CQPSQ_QP_MSSCHANGE_SHIFT)
+
+#define I40IW_CQPSQ_QP_STATRSRC_SHIFT 53
+#define I40IW_CQPSQ_QP_STATRSRC_MASK (1ULL << I40IW_CQPSQ_QP_STATRSRC_SHIFT)
+
+#define I40IW_CQPSQ_QP_IGNOREMWBOUND_SHIFT 54
+#define I40IW_CQPSQ_QP_IGNOREMWBOUND_MASK \
+ (1ULL << I40IW_CQPSQ_QP_IGNOREMWBOUND_SHIFT)
+
+#define I40IW_CQPSQ_QP_REMOVEHASHENTRY_SHIFT 55
+#define I40IW_CQPSQ_QP_REMOVEHASHENTRY_MASK \
+ (1ULL << I40IW_CQPSQ_QP_REMOVEHASHENTRY_SHIFT)
+
+#define I40IW_CQPSQ_QP_TERMACT_SHIFT 56
+#define I40IW_CQPSQ_QP_TERMACT_MASK (0x3ULL << I40IW_CQPSQ_QP_TERMACT_SHIFT)
+
+#define I40IW_CQPSQ_QP_RESETCON_SHIFT 58
+#define I40IW_CQPSQ_QP_RESETCON_MASK (1ULL << I40IW_CQPSQ_QP_RESETCON_SHIFT)
+
+#define I40IW_CQPSQ_QP_ARPTABIDXVALID_SHIFT 59
+#define I40IW_CQPSQ_QP_ARPTABIDXVALID_MASK \
+ (1ULL << I40IW_CQPSQ_QP_ARPTABIDXVALID_SHIFT)
+
+#define I40IW_CQPSQ_QP_NEXTIWSTATE_SHIFT 60
+#define I40IW_CQPSQ_QP_NEXTIWSTATE_MASK \
+ (0x7ULL << I40IW_CQPSQ_QP_NEXTIWSTATE_SHIFT)
+
+#define I40IW_CQPSQ_QP_DBSHADOWADDR_SHIFT I40IW_CQPHC_QPCTX_SHIFT
+#define I40IW_CQPSQ_QP_DBSHADOWADDR_MASK I40IW_CQPHC_QPCTX_MASK
+
+/* Create/Modify/Destroy CQ */
+#define I40IW_CQPSQ_CQ_CQSIZE_SHIFT 0
+#define I40IW_CQPSQ_CQ_CQSIZE_MASK (0x3ffffUL << I40IW_CQPSQ_CQ_CQSIZE_SHIFT)
+
+#define I40IW_CQPSQ_CQ_CQCTX_SHIFT 0
+#define I40IW_CQPSQ_CQ_CQCTX_MASK \
+ (0x7fffffffffffffffULL << I40IW_CQPSQ_CQ_CQCTX_SHIFT)
+
+#define I40IW_CQPSQ_CQ_CQCTX_SHIFT 0
+#define I40IW_CQPSQ_CQ_CQCTX_MASK \
+ (0x7fffffffffffffffULL << I40IW_CQPSQ_CQ_CQCTX_SHIFT)
+
+#define I40IW_CQPSQ_CQ_SHADOW_READ_THRESHOLD_SHIFT 0
+#define I40IW_CQPSQ_CQ_SHADOW_READ_THRESHOLD_MASK \
+ (0x3ffff << I40IW_CQPSQ_CQ_SHADOW_READ_THRESHOLD_SHIFT)
+
+#define I40IW_CQPSQ_CQ_CEQID_SHIFT 24
+#define I40IW_CQPSQ_CQ_CEQID_MASK (0x7fUL << I40IW_CQPSQ_CQ_CEQID_SHIFT)
+
+#define I40IW_CQPSQ_CQ_OP_SHIFT 32
+#define I40IW_CQPSQ_CQ_OP_MASK (0x3fULL << I40IW_CQPSQ_CQ_OP_SHIFT)
+
+#define I40IW_CQPSQ_CQ_CQRESIZE_SHIFT 43
+#define I40IW_CQPSQ_CQ_CQRESIZE_MASK (1ULL << I40IW_CQPSQ_CQ_CQRESIZE_SHIFT)
+
+#define I40IW_CQPSQ_CQ_LPBLSIZE_SHIFT 44
+#define I40IW_CQPSQ_CQ_LPBLSIZE_MASK (3ULL << I40IW_CQPSQ_CQ_LPBLSIZE_SHIFT)
+
+#define I40IW_CQPSQ_CQ_CHKOVERFLOW_SHIFT 46
+#define I40IW_CQPSQ_CQ_CHKOVERFLOW_MASK \
+ (1ULL << I40IW_CQPSQ_CQ_CHKOVERFLOW_SHIFT)
+
+#define I40IW_CQPSQ_CQ_VIRTMAP_SHIFT 47
+#define I40IW_CQPSQ_CQ_VIRTMAP_MASK (1ULL << I40IW_CQPSQ_CQ_VIRTMAP_SHIFT)
+
+#define I40IW_CQPSQ_CQ_ENCEQEMASK_SHIFT 48
+#define I40IW_CQPSQ_CQ_ENCEQEMASK_MASK \
+ (1ULL << I40IW_CQPSQ_CQ_ENCEQEMASK_SHIFT)
+
+#define I40IW_CQPSQ_CQ_CEQIDVALID_SHIFT 49
+#define I40IW_CQPSQ_CQ_CEQIDVALID_MASK \
+ (1ULL << I40IW_CQPSQ_CQ_CEQIDVALID_SHIFT)
+
+#define I40IW_CQPSQ_CQ_AVOIDMEMCNFLCT_SHIFT 61
+#define I40IW_CQPSQ_CQ_AVOIDMEMCNFLCT_MASK \
+ (1ULL << I40IW_CQPSQ_CQ_AVOIDMEMCNFLCT_SHIFT)
+
+/* Create/Modify/Destroy Shared Receive Queue */
+
+#define I40IW_CQPSQ_SRQ_RQSIZE_SHIFT 0
+#define I40IW_CQPSQ_SRQ_RQSIZE_MASK (0xfUL << I40IW_CQPSQ_SRQ_RQSIZE_SHIFT)
+
+#define I40IW_CQPSQ_SRQ_RQWQESIZE_SHIFT 4
+#define I40IW_CQPSQ_SRQ_RQWQESIZE_MASK \
+ (0x7UL << I40IW_CQPSQ_SRQ_RQWQESIZE_SHIFT)
+
+#define I40IW_CQPSQ_SRQ_SRQLIMIT_SHIFT 32
+#define I40IW_CQPSQ_SRQ_SRQLIMIT_MASK \
+ (0xfffULL << I40IW_CQPSQ_SRQ_SRQLIMIT_SHIFT)
+
+#define I40IW_CQPSQ_SRQ_SRQCTX_SHIFT I40IW_CQPHC_QPCTX_SHIFT
+#define I40IW_CQPSQ_SRQ_SRQCTX_MASK I40IW_CQPHC_QPCTX_MASK
+
+#define I40IW_CQPSQ_SRQ_PDID_SHIFT 16
+#define I40IW_CQPSQ_SRQ_PDID_MASK \
+ (0x7fffULL << I40IW_CQPSQ_SRQ_PDID_SHIFT)
+
+#define I40IW_CQPSQ_SRQ_SRQID_SHIFT 0
+#define I40IW_CQPSQ_SRQ_SRQID_MASK (0x7fffUL << I40IW_CQPSQ_SRQ_SRQID_SHIFT)
+
+#define I40IW_CQPSQ_SRQ_LPBLSIZE_SHIFT I40IW_CQPSQ_CQ_LPBLSIZE_SHIFT
+#define I40IW_CQPSQ_SRQ_LPBLSIZE_MASK I40IW_CQPSQ_CQ_LPBLSIZE_MASK
+
+#define I40IW_CQPSQ_SRQ_VIRTMAP_SHIFT I40IW_CQPSQ_CQ_VIRTMAP_SHIFT
+#define I40IW_CQPSQ_SRQ_VIRTMAP_MASK I40IW_CQPSQ_CQ_VIRTMAP_MASK
+
+#define I40IW_CQPSQ_SRQ_TPHEN_SHIFT I40IW_CQPSQ_TPHEN_SHIFT
+#define I40IW_CQPSQ_SRQ_TPHEN_MASK I40IW_CQPSQ_TPHEN_MASK
+
+#define I40IW_CQPSQ_SRQ_ARMLIMITEVENT_SHIFT 61
+#define I40IW_CQPSQ_SRQ_ARMLIMITEVENT_MASK \
+ (1ULL << I40IW_CQPSQ_SRQ_ARMLIMITEVENT_SHIFT)
+
+#define I40IW_CQPSQ_SRQ_DBSHADOWAREA_SHIFT 6
+#define I40IW_CQPSQ_SRQ_DBSHADOWAREA_MASK \
+ (0x3ffffffffffffffULL << I40IW_CQPSQ_SRQ_DBSHADOWAREA_SHIFT)
+
+#define I40IW_CQPSQ_SRQ_FIRSTPMPBLIDX_SHIFT 0
+#define I40IW_CQPSQ_SRQ_FIRSTPMPBLIDX_MASK \
+ (0xfffffffUL << I40IW_CQPSQ_SRQ_FIRSTPMPBLIDX_SHIFT)
+
+/* Allocate/Register/Register Shared/Deallocate Stag */
+#define I40IW_CQPSQ_STAG_VA_FBO_SHIFT I40IW_CQPHC_QPCTX_SHIFT
+#define I40IW_CQPSQ_STAG_VA_FBO_MASK I40IW_CQPHC_QPCTX_MASK
+
+#define I40IW_CQPSQ_STAG_STAGLEN_SHIFT 0
+#define I40IW_CQPSQ_STAG_STAGLEN_MASK \
+ (0x3fffffffffffULL << I40IW_CQPSQ_STAG_STAGLEN_SHIFT)
+
+#define I40IW_CQPSQ_STAG_PDID_SHIFT 48
+#define I40IW_CQPSQ_STAG_PDID_MASK (0x7fffULL << I40IW_CQPSQ_STAG_PDID_SHIFT)
+
+#define I40IW_CQPSQ_STAG_KEY_SHIFT 0
+#define I40IW_CQPSQ_STAG_KEY_MASK (0xffUL << I40IW_CQPSQ_STAG_KEY_SHIFT)
+
+#define I40IW_CQPSQ_STAG_IDX_SHIFT 8
+#define I40IW_CQPSQ_STAG_IDX_MASK (0xffffffUL << I40IW_CQPSQ_STAG_IDX_SHIFT)
+
+#define I40IW_CQPSQ_STAG_PARENTSTAGIDX_SHIFT 32
+#define I40IW_CQPSQ_STAG_PARENTSTAGIDX_MASK \
+ (0xffffffULL << I40IW_CQPSQ_STAG_PARENTSTAGIDX_SHIFT)
+
+#define I40IW_CQPSQ_STAG_MR_SHIFT 43
+#define I40IW_CQPSQ_STAG_MR_MASK (1ULL << I40IW_CQPSQ_STAG_MR_SHIFT)
+
+#define I40IW_CQPSQ_STAG_LPBLSIZE_SHIFT I40IW_CQPSQ_CQ_LPBLSIZE_SHIFT
+#define I40IW_CQPSQ_STAG_LPBLSIZE_MASK I40IW_CQPSQ_CQ_LPBLSIZE_MASK
+
+#define I40IW_CQPSQ_STAG_HPAGESIZE_SHIFT 46
+#define I40IW_CQPSQ_STAG_HPAGESIZE_MASK \
+ (1ULL << I40IW_CQPSQ_STAG_HPAGESIZE_SHIFT)
+
+#define I40IW_CQPSQ_STAG_ARIGHTS_SHIFT 48
+#define I40IW_CQPSQ_STAG_ARIGHTS_MASK \
+ (0x1fULL << I40IW_CQPSQ_STAG_ARIGHTS_SHIFT)
+
+#define I40IW_CQPSQ_STAG_REMACCENABLED_SHIFT 53
+#define I40IW_CQPSQ_STAG_REMACCENABLED_MASK \
+ (1ULL << I40IW_CQPSQ_STAG_REMACCENABLED_SHIFT)
+
+#define I40IW_CQPSQ_STAG_VABASEDTO_SHIFT 59
+#define I40IW_CQPSQ_STAG_VABASEDTO_MASK \
+ (1ULL << I40IW_CQPSQ_STAG_VABASEDTO_SHIFT)
+
+#define I40IW_CQPSQ_STAG_USEHMCFNIDX_SHIFT 60
+#define I40IW_CQPSQ_STAG_USEHMCFNIDX_MASK \
+ (1ULL << I40IW_CQPSQ_STAG_USEHMCFNIDX_SHIFT)
+
+#define I40IW_CQPSQ_STAG_USEPFRID_SHIFT 61
+#define I40IW_CQPSQ_STAG_USEPFRID_MASK \
+ (1ULL << I40IW_CQPSQ_STAG_USEPFRID_SHIFT)
+
+#define I40IW_CQPSQ_STAG_PBA_SHIFT I40IW_CQPHC_QPCTX_SHIFT
+#define I40IW_CQPSQ_STAG_PBA_MASK I40IW_CQPHC_QPCTX_MASK
+
+#define I40IW_CQPSQ_STAG_HMCFNIDX_SHIFT 0
+#define I40IW_CQPSQ_STAG_HMCFNIDX_MASK \
+ (0x3fUL << I40IW_CQPSQ_STAG_HMCFNIDX_SHIFT)
+
+#define I40IW_CQPSQ_STAG_FIRSTPMPBLIDX_SHIFT 0
+#define I40IW_CQPSQ_STAG_FIRSTPMPBLIDX_MASK \
+ (0xfffffffUL << I40IW_CQPSQ_STAG_FIRSTPMPBLIDX_SHIFT)
+
+/* Query stag */
+#define I40IW_CQPSQ_QUERYSTAG_IDX_SHIFT I40IW_CQPSQ_STAG_IDX_SHIFT
+#define I40IW_CQPSQ_QUERYSTAG_IDX_MASK I40IW_CQPSQ_STAG_IDX_MASK
+
+/* Allocate Local IP Address Entry */
+
+/* Manage Local IP Address Table - MLIPA */
+#define I40IW_CQPSQ_MLIPA_IPV6LO_SHIFT I40IW_CQPHC_QPCTX_SHIFT
+#define I40IW_CQPSQ_MLIPA_IPV6LO_MASK I40IW_CQPHC_QPCTX_MASK
+
+#define I40IW_CQPSQ_MLIPA_IPV6HI_SHIFT I40IW_CQPHC_QPCTX_SHIFT
+#define I40IW_CQPSQ_MLIPA_IPV6HI_MASK I40IW_CQPHC_QPCTX_MASK
+
+#define I40IW_CQPSQ_MLIPA_IPV4_SHIFT 0
+#define I40IW_CQPSQ_MLIPA_IPV4_MASK \
+ (0xffffffffUL << I40IW_CQPSQ_MLIPA_IPV4_SHIFT)
+
+#define I40IW_CQPSQ_MLIPA_IPTABLEIDX_SHIFT 0
+#define I40IW_CQPSQ_MLIPA_IPTABLEIDX_MASK \
+ (0x3fUL << I40IW_CQPSQ_MLIPA_IPTABLEIDX_SHIFT)
+
+#define I40IW_CQPSQ_MLIPA_IPV4VALID_SHIFT 42
+#define I40IW_CQPSQ_MLIPA_IPV4VALID_MASK \
+ (1ULL << I40IW_CQPSQ_MLIPA_IPV4VALID_SHIFT)
+
+#define I40IW_CQPSQ_MLIPA_IPV6VALID_SHIFT 43
+#define I40IW_CQPSQ_MLIPA_IPV6VALID_MASK \
+ (1ULL << I40IW_CQPSQ_MLIPA_IPV6VALID_SHIFT)
+
+#define I40IW_CQPSQ_MLIPA_FREEENTRY_SHIFT 62
+#define I40IW_CQPSQ_MLIPA_FREEENTRY_MASK \
+ (1ULL << I40IW_CQPSQ_MLIPA_FREEENTRY_SHIFT)
+
+#define I40IW_CQPSQ_MLIPA_IGNORE_REF_CNT_SHIFT 61
+#define I40IW_CQPSQ_MLIPA_IGNORE_REF_CNT_MASK \
+ (1ULL << I40IW_CQPSQ_MLIPA_IGNORE_REF_CNT_SHIFT)
+
+#define I40IW_CQPSQ_MLIPA_MAC0_SHIFT 0
+#define I40IW_CQPSQ_MLIPA_MAC0_MASK (0xffUL << I40IW_CQPSQ_MLIPA_MAC0_SHIFT)
+
+#define I40IW_CQPSQ_MLIPA_MAC1_SHIFT 8
+#define I40IW_CQPSQ_MLIPA_MAC1_MASK (0xffUL << I40IW_CQPSQ_MLIPA_MAC1_SHIFT)
+
+#define I40IW_CQPSQ_MLIPA_MAC2_SHIFT 16
+#define I40IW_CQPSQ_MLIPA_MAC2_MASK (0xffUL << I40IW_CQPSQ_MLIPA_MAC2_SHIFT)
+
+#define I40IW_CQPSQ_MLIPA_MAC3_SHIFT 24
+#define I40IW_CQPSQ_MLIPA_MAC3_MASK (0xffUL << I40IW_CQPSQ_MLIPA_MAC3_SHIFT)
+
+#define I40IW_CQPSQ_MLIPA_MAC4_SHIFT 32
+#define I40IW_CQPSQ_MLIPA_MAC4_MASK (0xffULL << I40IW_CQPSQ_MLIPA_MAC4_SHIFT)
+
+#define I40IW_CQPSQ_MLIPA_MAC5_SHIFT 40
+#define I40IW_CQPSQ_MLIPA_MAC5_MASK (0xffULL << I40IW_CQPSQ_MLIPA_MAC5_SHIFT)
+
+/* Manage ARP Table - MAT */
+#define I40IW_CQPSQ_MAT_REACHMAX_SHIFT 0
+#define I40IW_CQPSQ_MAT_REACHMAX_MASK \
+ (0xffffffffUL << I40IW_CQPSQ_MAT_REACHMAX_SHIFT)
+
+#define I40IW_CQPSQ_MAT_MACADDR_SHIFT 0
+#define I40IW_CQPSQ_MAT_MACADDR_MASK \
+ (0xffffffffffffULL << I40IW_CQPSQ_MAT_MACADDR_SHIFT)
+
+#define I40IW_CQPSQ_MAT_ARPENTRYIDX_SHIFT 0
+#define I40IW_CQPSQ_MAT_ARPENTRYIDX_MASK \
+ (0xfffUL << I40IW_CQPSQ_MAT_ARPENTRYIDX_SHIFT)
+
+#define I40IW_CQPSQ_MAT_ENTRYVALID_SHIFT 42
+#define I40IW_CQPSQ_MAT_ENTRYVALID_MASK \
+ (1ULL << I40IW_CQPSQ_MAT_ENTRYVALID_SHIFT)
+
+#define I40IW_CQPSQ_MAT_PERMANENT_SHIFT 43
+#define I40IW_CQPSQ_MAT_PERMANENT_MASK \
+ (1ULL << I40IW_CQPSQ_MAT_PERMANENT_SHIFT)
+
+#define I40IW_CQPSQ_MAT_QUERY_SHIFT 44
+#define I40IW_CQPSQ_MAT_QUERY_MASK (1ULL << I40IW_CQPSQ_MAT_QUERY_SHIFT)
+
+/* Manage VF PBLE Backing Pages - MVPBP*/
+#define I40IW_CQPSQ_MVPBP_PD_ENTRY_CNT_SHIFT 0
+#define I40IW_CQPSQ_MVPBP_PD_ENTRY_CNT_MASK \
+ (0x3ffULL << I40IW_CQPSQ_MVPBP_PD_ENTRY_CNT_SHIFT)
+
+#define I40IW_CQPSQ_MVPBP_FIRST_PD_INX_SHIFT 16
+#define I40IW_CQPSQ_MVPBP_FIRST_PD_INX_MASK \
+ (0x1ffULL << I40IW_CQPSQ_MVPBP_FIRST_PD_INX_SHIFT)
+
+#define I40IW_CQPSQ_MVPBP_SD_INX_SHIFT 32
+#define I40IW_CQPSQ_MVPBP_SD_INX_MASK \
+ (0xfffULL << I40IW_CQPSQ_MVPBP_SD_INX_SHIFT)
+
+#define I40IW_CQPSQ_MVPBP_INV_PD_ENT_SHIFT 62
+#define I40IW_CQPSQ_MVPBP_INV_PD_ENT_MASK \
+ (0x1ULL << I40IW_CQPSQ_MVPBP_INV_PD_ENT_SHIFT)
+
+#define I40IW_CQPSQ_MVPBP_PD_PLPBA_SHIFT 3
+#define I40IW_CQPSQ_MVPBP_PD_PLPBA_MASK \
+ (0x1fffffffffffffffULL << I40IW_CQPSQ_MVPBP_PD_PLPBA_SHIFT)
+
+/* Manage Push Page - MPP */
+#define I40IW_INVALID_PUSH_PAGE_INDEX 0xffff
+
+#define I40IW_CQPSQ_MPP_QS_HANDLE_SHIFT 0
+#define I40IW_CQPSQ_MPP_QS_HANDLE_MASK (0xffffUL << \
+ I40IW_CQPSQ_MPP_QS_HANDLE_SHIFT)
+
+#define I40IW_CQPSQ_MPP_PPIDX_SHIFT 0
+#define I40IW_CQPSQ_MPP_PPIDX_MASK (0x3ffUL << I40IW_CQPSQ_MPP_PPIDX_SHIFT)
+
+#define I40IW_CQPSQ_MPP_FREE_PAGE_SHIFT 62
+#define I40IW_CQPSQ_MPP_FREE_PAGE_MASK (1ULL << I40IW_CQPSQ_MPP_FREE_PAGE_SHIFT)
+
+/* Upload Context - UCTX */
+#define I40IW_CQPSQ_UCTX_QPCTXADDR_SHIFT I40IW_CQPHC_QPCTX_SHIFT
+#define I40IW_CQPSQ_UCTX_QPCTXADDR_MASK I40IW_CQPHC_QPCTX_MASK
+
+#define I40IW_CQPSQ_UCTX_QPID_SHIFT 0
+#define I40IW_CQPSQ_UCTX_QPID_MASK (0x3ffffUL << I40IW_CQPSQ_UCTX_QPID_SHIFT)
+
+#define I40IW_CQPSQ_UCTX_QPTYPE_SHIFT 48
+#define I40IW_CQPSQ_UCTX_QPTYPE_MASK (0xfULL << I40IW_CQPSQ_UCTX_QPTYPE_SHIFT)
+
+#define I40IW_CQPSQ_UCTX_RAWFORMAT_SHIFT 61
+#define I40IW_CQPSQ_UCTX_RAWFORMAT_MASK \
+ (1ULL << I40IW_CQPSQ_UCTX_RAWFORMAT_SHIFT)
+
+#define I40IW_CQPSQ_UCTX_FREEZEQP_SHIFT 62
+#define I40IW_CQPSQ_UCTX_FREEZEQP_MASK \
+ (1ULL << I40IW_CQPSQ_UCTX_FREEZEQP_SHIFT)
+
+/* Manage HMC PM Function Table - MHMC */
+#define I40IW_CQPSQ_MHMC_VFIDX_SHIFT 0
+#define I40IW_CQPSQ_MHMC_VFIDX_MASK (0x7fUL << I40IW_CQPSQ_MHMC_VFIDX_SHIFT)
+
+#define I40IW_CQPSQ_MHMC_FREEPMFN_SHIFT 62
+#define I40IW_CQPSQ_MHMC_FREEPMFN_MASK \
+ (1ULL << I40IW_CQPSQ_MHMC_FREEPMFN_SHIFT)
+
+/* Set HMC Resource Profile - SHMCRP */
+#define I40IW_CQPSQ_SHMCRP_HMC_PROFILE_SHIFT 0
+#define I40IW_CQPSQ_SHMCRP_HMC_PROFILE_MASK \
+ (0x7ULL << I40IW_CQPSQ_SHMCRP_HMC_PROFILE_SHIFT)
+#define I40IW_CQPSQ_SHMCRP_VFNUM_SHIFT 32
+#define I40IW_CQPSQ_SHMCRP_VFNUM_MASK (0x3fULL << I40IW_CQPSQ_SHMCRP_VFNUM_SHIFT)
+
+/* Create/Destroy CEQ */
+#define I40IW_CQPSQ_CEQ_CEQSIZE_SHIFT 0
+#define I40IW_CQPSQ_CEQ_CEQSIZE_MASK \
+ (0x1ffffUL << I40IW_CQPSQ_CEQ_CEQSIZE_SHIFT)
+
+#define I40IW_CQPSQ_CEQ_CEQID_SHIFT 0
+#define I40IW_CQPSQ_CEQ_CEQID_MASK (0x7fUL << I40IW_CQPSQ_CEQ_CEQID_SHIFT)
+
+#define I40IW_CQPSQ_CEQ_LPBLSIZE_SHIFT I40IW_CQPSQ_CQ_LPBLSIZE_SHIFT
+#define I40IW_CQPSQ_CEQ_LPBLSIZE_MASK I40IW_CQPSQ_CQ_LPBLSIZE_MASK
+
+#define I40IW_CQPSQ_CEQ_VMAP_SHIFT 47
+#define I40IW_CQPSQ_CEQ_VMAP_MASK (1ULL << I40IW_CQPSQ_CEQ_VMAP_SHIFT)
+
+#define I40IW_CQPSQ_CEQ_FIRSTPMPBLIDX_SHIFT 0
+#define I40IW_CQPSQ_CEQ_FIRSTPMPBLIDX_MASK \
+ (0xfffffffUL << I40IW_CQPSQ_CEQ_FIRSTPMPBLIDX_SHIFT)
+
+/* Create/Destroy AEQ */
+#define I40IW_CQPSQ_AEQ_AEQECNT_SHIFT 0
+#define I40IW_CQPSQ_AEQ_AEQECNT_MASK \
+ (0x7ffffUL << I40IW_CQPSQ_AEQ_AEQECNT_SHIFT)
+
+#define I40IW_CQPSQ_AEQ_LPBLSIZE_SHIFT I40IW_CQPSQ_CQ_LPBLSIZE_SHIFT
+#define I40IW_CQPSQ_AEQ_LPBLSIZE_MASK I40IW_CQPSQ_CQ_LPBLSIZE_MASK
+
+#define I40IW_CQPSQ_AEQ_VMAP_SHIFT 47
+#define I40IW_CQPSQ_AEQ_VMAP_MASK (1ULL << I40IW_CQPSQ_AEQ_VMAP_SHIFT)
+
+#define I40IW_CQPSQ_AEQ_FIRSTPMPBLIDX_SHIFT 0
+#define I40IW_CQPSQ_AEQ_FIRSTPMPBLIDX_MASK \
+ (0xfffffffUL << I40IW_CQPSQ_AEQ_FIRSTPMPBLIDX_SHIFT)
+
+/* Commit FPM Values - CFPM */
+#define I40IW_CQPSQ_CFPM_HMCFNID_SHIFT 0
+#define I40IW_CQPSQ_CFPM_HMCFNID_MASK (0x3fUL << I40IW_CQPSQ_CFPM_HMCFNID_SHIFT)
+
+/* Flush WQEs - FWQE */
+#define I40IW_CQPSQ_FWQE_AECODE_SHIFT 0
+#define I40IW_CQPSQ_FWQE_AECODE_MASK (0xffffUL << I40IW_CQPSQ_FWQE_AECODE_SHIFT)
+
+#define I40IW_CQPSQ_FWQE_AESOURCE_SHIFT 16
+#define I40IW_CQPSQ_FWQE_AESOURCE_MASK \
+ (0xfUL << I40IW_CQPSQ_FWQE_AESOURCE_SHIFT)
+
+#define I40IW_CQPSQ_FWQE_RQMNERR_SHIFT 0
+#define I40IW_CQPSQ_FWQE_RQMNERR_MASK \
+ (0xffffUL << I40IW_CQPSQ_FWQE_RQMNERR_SHIFT)
+
+#define I40IW_CQPSQ_FWQE_RQMJERR_SHIFT 16
+#define I40IW_CQPSQ_FWQE_RQMJERR_MASK \
+ (0xffffUL << I40IW_CQPSQ_FWQE_RQMJERR_SHIFT)
+
+#define I40IW_CQPSQ_FWQE_SQMNERR_SHIFT 32
+#define I40IW_CQPSQ_FWQE_SQMNERR_MASK \
+ (0xffffULL << I40IW_CQPSQ_FWQE_SQMNERR_SHIFT)
+
+#define I40IW_CQPSQ_FWQE_SQMJERR_SHIFT 48
+#define I40IW_CQPSQ_FWQE_SQMJERR_MASK \
+ (0xffffULL << I40IW_CQPSQ_FWQE_SQMJERR_SHIFT)
+
+#define I40IW_CQPSQ_FWQE_QPID_SHIFT 0
+#define I40IW_CQPSQ_FWQE_QPID_MASK (0x3ffffULL << I40IW_CQPSQ_FWQE_QPID_SHIFT)
+
+#define I40IW_CQPSQ_FWQE_GENERATE_AE_SHIFT 59
+#define I40IW_CQPSQ_FWQE_GENERATE_AE_MASK (1ULL << \
+ I40IW_CQPSQ_FWQE_GENERATE_AE_SHIFT)
+
+#define I40IW_CQPSQ_FWQE_USERFLCODE_SHIFT 60
+#define I40IW_CQPSQ_FWQE_USERFLCODE_MASK \
+ (1ULL << I40IW_CQPSQ_FWQE_USERFLCODE_SHIFT)
+
+#define I40IW_CQPSQ_FWQE_FLUSHSQ_SHIFT 61
+#define I40IW_CQPSQ_FWQE_FLUSHSQ_MASK (1ULL << I40IW_CQPSQ_FWQE_FLUSHSQ_SHIFT)
+
+#define I40IW_CQPSQ_FWQE_FLUSHRQ_SHIFT 62
+#define I40IW_CQPSQ_FWQE_FLUSHRQ_MASK (1ULL << I40IW_CQPSQ_FWQE_FLUSHRQ_SHIFT)
+
+/* Manage Accelerated Port Table - MAPT */
+#define I40IW_CQPSQ_MAPT_PORT_SHIFT 0
+#define I40IW_CQPSQ_MAPT_PORT_MASK (0xffffUL << I40IW_CQPSQ_MAPT_PORT_SHIFT)
+
+#define I40IW_CQPSQ_MAPT_ADDPORT_SHIFT 62
+#define I40IW_CQPSQ_MAPT_ADDPORT_MASK (1ULL << I40IW_CQPSQ_MAPT_ADDPORT_SHIFT)
+
+/* Update Protocol Engine SDs */
+#define I40IW_CQPSQ_UPESD_SDCMD_SHIFT 0
+#define I40IW_CQPSQ_UPESD_SDCMD_MASK (0xffffffffUL << I40IW_CQPSQ_UPESD_SDCMD_SHIFT)
+
+#define I40IW_CQPSQ_UPESD_SDDATALOW_SHIFT 0
+#define I40IW_CQPSQ_UPESD_SDDATALOW_MASK \
+ (0xffffffffUL << I40IW_CQPSQ_UPESD_SDDATALOW_SHIFT)
+
+#define I40IW_CQPSQ_UPESD_SDDATAHI_SHIFT 32
+#define I40IW_CQPSQ_UPESD_SDDATAHI_MASK \
+ (0xffffffffULL << I40IW_CQPSQ_UPESD_SDDATAHI_SHIFT)
+#define I40IW_CQPSQ_UPESD_HMCFNID_SHIFT 0
+#define I40IW_CQPSQ_UPESD_HMCFNID_MASK \
+ (0x3fUL << I40IW_CQPSQ_UPESD_HMCFNID_SHIFT)
+
+#define I40IW_CQPSQ_UPESD_ENTRY_VALID_SHIFT 63
+#define I40IW_CQPSQ_UPESD_ENTRY_VALID_MASK \
+ ((u64)1 << I40IW_CQPSQ_UPESD_ENTRY_VALID_SHIFT)
+
+#define I40IW_CQPSQ_UPESD_ENTRY_COUNT_SHIFT 0
+#define I40IW_CQPSQ_UPESD_ENTRY_COUNT_MASK \
+ (0xfUL << I40IW_CQPSQ_UPESD_ENTRY_COUNT_SHIFT)
+
+#define I40IW_CQPSQ_UPESD_SKIP_ENTRY_SHIFT 7
+#define I40IW_CQPSQ_UPESD_SKIP_ENTRY_MASK \
+ (0x1UL << I40IW_CQPSQ_UPESD_SKIP_ENTRY_SHIFT)
+
+/* Suspend QP */
+#define I40IW_CQPSQ_SUSPENDQP_QPID_SHIFT 0
+#define I40IW_CQPSQ_SUSPENDQP_QPID_MASK (0x3FFFFUL)
+/* I40IWCQ_QPID_MASK */
+
+/* Resume QP */
+#define I40IW_CQPSQ_RESUMEQP_QSHANDLE_SHIFT 0
+#define I40IW_CQPSQ_RESUMEQP_QSHANDLE_MASK \
+ (0xffffffffUL << I40IW_CQPSQ_RESUMEQP_QSHANDLE_SHIFT)
+
+#define I40IW_CQPSQ_RESUMEQP_QPID_SHIFT 0
+#define I40IW_CQPSQ_RESUMEQP_QPID_MASK (0x3FFFFUL)
+/* I40IWCQ_QPID_MASK */
+
+/* IW QP Context */
+#define I40IWQPC_DDP_VER_SHIFT 0
+#define I40IWQPC_DDP_VER_MASK (3UL << I40IWQPC_DDP_VER_SHIFT)
+
+#define I40IWQPC_SNAP_SHIFT 2
+#define I40IWQPC_SNAP_MASK (1UL << I40IWQPC_SNAP_SHIFT)
+
+#define I40IWQPC_IPV4_SHIFT 3
+#define I40IWQPC_IPV4_MASK (1UL << I40IWQPC_IPV4_SHIFT)
+
+#define I40IWQPC_NONAGLE_SHIFT 4
+#define I40IWQPC_NONAGLE_MASK (1UL << I40IWQPC_NONAGLE_SHIFT)
+
+#define I40IWQPC_INSERTVLANTAG_SHIFT 5
+#define I40IWQPC_INSERTVLANTAG_MASK (1 << I40IWQPC_INSERTVLANTAG_SHIFT)
+
+#define I40IWQPC_USESRQ_SHIFT 6
+#define I40IWQPC_USESRQ_MASK (1UL << I40IWQPC_USESRQ_SHIFT)
+
+#define I40IWQPC_TIMESTAMP_SHIFT 7
+#define I40IWQPC_TIMESTAMP_MASK (1UL << I40IWQPC_TIMESTAMP_SHIFT)
+
+#define I40IWQPC_RQWQESIZE_SHIFT 8
+#define I40IWQPC_RQWQESIZE_MASK (3UL << I40IWQPC_RQWQESIZE_SHIFT)
+
+#define I40IWQPC_INSERTL2TAG2_SHIFT 11
+#define I40IWQPC_INSERTL2TAG2_MASK (1UL << I40IWQPC_INSERTL2TAG2_SHIFT)
+
+#define I40IWQPC_LIMIT_SHIFT 12
+#define I40IWQPC_LIMIT_MASK (3UL << I40IWQPC_LIMIT_SHIFT)
+
+#define I40IWQPC_DROPOOOSEG_SHIFT 15
+#define I40IWQPC_DROPOOOSEG_MASK (1UL << I40IWQPC_DROPOOOSEG_SHIFT)
+
+#define I40IWQPC_DUPACK_THRESH_SHIFT 16
+#define I40IWQPC_DUPACK_THRESH_MASK (7UL << I40IWQPC_DUPACK_THRESH_SHIFT)
+
+#define I40IWQPC_ERR_RQ_IDX_VALID_SHIFT 19
+#define I40IWQPC_ERR_RQ_IDX_VALID_MASK (1UL << I40IWQPC_ERR_RQ_IDX_VALID_SHIFT)
+
+#define I40IWQPC_DIS_VLAN_CHECKS_SHIFT 19
+#define I40IWQPC_DIS_VLAN_CHECKS_MASK (7UL << I40IWQPC_DIS_VLAN_CHECKS_SHIFT)
+
+#define I40IWQPC_RCVTPHEN_SHIFT 28
+#define I40IWQPC_RCVTPHEN_MASK (1UL << I40IWQPC_RCVTPHEN_SHIFT)
+
+#define I40IWQPC_XMITTPHEN_SHIFT 29
+#define I40IWQPC_XMITTPHEN_MASK (1ULL << I40IWQPC_XMITTPHEN_SHIFT)
+
+#define I40IWQPC_RQTPHEN_SHIFT 30
+#define I40IWQPC_RQTPHEN_MASK (1UL << I40IWQPC_RQTPHEN_SHIFT)
+
+#define I40IWQPC_SQTPHEN_SHIFT 31
+#define I40IWQPC_SQTPHEN_MASK (1ULL << I40IWQPC_SQTPHEN_SHIFT)
+
+#define I40IWQPC_PPIDX_SHIFT 32
+#define I40IWQPC_PPIDX_MASK (0x3ffULL << I40IWQPC_PPIDX_SHIFT)
+
+#define I40IWQPC_PMENA_SHIFT 47
+#define I40IWQPC_PMENA_MASK (1ULL << I40IWQPC_PMENA_SHIFT)
+
+#define I40IWQPC_RDMAP_VER_SHIFT 62
+#define I40IWQPC_RDMAP_VER_MASK (3ULL << I40IWQPC_RDMAP_VER_SHIFT)
+
+#define I40IWQPC_SQADDR_SHIFT I40IW_CQPHC_QPCTX_SHIFT
+#define I40IWQPC_SQADDR_MASK I40IW_CQPHC_QPCTX_MASK
+
+#define I40IWQPC_RQADDR_SHIFT I40IW_CQPHC_QPCTX_SHIFT
+#define I40IWQPC_RQADDR_MASK I40IW_CQPHC_QPCTX_MASK
+
+#define I40IWQPC_TTL_SHIFT 0
+#define I40IWQPC_TTL_MASK (0xffUL << I40IWQPC_TTL_SHIFT)
+
+#define I40IWQPC_RQSIZE_SHIFT 8
+#define I40IWQPC_RQSIZE_MASK (0xfUL << I40IWQPC_RQSIZE_SHIFT)
+
+#define I40IWQPC_SQSIZE_SHIFT 12
+#define I40IWQPC_SQSIZE_MASK (0xfUL << I40IWQPC_SQSIZE_SHIFT)
+
+#define I40IWQPC_SRCMACADDRIDX_SHIFT 16
+#define I40IWQPC_SRCMACADDRIDX_MASK (0x3fUL << I40IWQPC_SRCMACADDRIDX_SHIFT)
+
+#define I40IWQPC_AVOIDSTRETCHACK_SHIFT 23
+#define I40IWQPC_AVOIDSTRETCHACK_MASK (1UL << I40IWQPC_AVOIDSTRETCHACK_SHIFT)
+
+#define I40IWQPC_TOS_SHIFT 24
+#define I40IWQPC_TOS_MASK (0xffUL << I40IWQPC_TOS_SHIFT)
+
+#define I40IWQPC_SRCPORTNUM_SHIFT 32
+#define I40IWQPC_SRCPORTNUM_MASK (0xffffULL << I40IWQPC_SRCPORTNUM_SHIFT)
+
+#define I40IWQPC_DESTPORTNUM_SHIFT 48
+#define I40IWQPC_DESTPORTNUM_MASK (0xffffULL << I40IWQPC_DESTPORTNUM_SHIFT)
+
+#define I40IWQPC_DESTIPADDR0_SHIFT 32
+#define I40IWQPC_DESTIPADDR0_MASK \
+ (0xffffffffULL << I40IWQPC_DESTIPADDR0_SHIFT)
+
+#define I40IWQPC_DESTIPADDR1_SHIFT 0
+#define I40IWQPC_DESTIPADDR1_MASK \
+ (0xffffffffULL << I40IWQPC_DESTIPADDR1_SHIFT)
+
+#define I40IWQPC_DESTIPADDR2_SHIFT 32
+#define I40IWQPC_DESTIPADDR2_MASK \
+ (0xffffffffULL << I40IWQPC_DESTIPADDR2_SHIFT)
+
+#define I40IWQPC_DESTIPADDR3_SHIFT 0
+#define I40IWQPC_DESTIPADDR3_MASK \
+ (0xffffffffULL << I40IWQPC_DESTIPADDR3_SHIFT)
+
+#define I40IWQPC_SNDMSS_SHIFT 16
+#define I40IWQPC_SNDMSS_MASK (0x3fffUL << I40IWQPC_SNDMSS_SHIFT)
+
+#define I40IWQPC_VLANTAG_SHIFT 32
+#define I40IWQPC_VLANTAG_MASK (0xffffULL << I40IWQPC_VLANTAG_SHIFT)
+
+#define I40IWQPC_ARPIDX_SHIFT 48
+#define I40IWQPC_ARPIDX_MASK (0xfffULL << I40IWQPC_ARPIDX_SHIFT)
+
+#define I40IWQPC_FLOWLABEL_SHIFT 0
+#define I40IWQPC_FLOWLABEL_MASK (0xfffffUL << I40IWQPC_FLOWLABEL_SHIFT)
+
+#define I40IWQPC_WSCALE_SHIFT 20
+#define I40IWQPC_WSCALE_MASK (1UL << I40IWQPC_WSCALE_SHIFT)
+
+#define I40IWQPC_KEEPALIVE_SHIFT 21
+#define I40IWQPC_KEEPALIVE_MASK (1UL << I40IWQPC_KEEPALIVE_SHIFT)
+
+#define I40IWQPC_IGNORE_TCP_OPT_SHIFT 22
+#define I40IWQPC_IGNORE_TCP_OPT_MASK (1UL << I40IWQPC_IGNORE_TCP_OPT_SHIFT)
+
+#define I40IWQPC_IGNORE_TCP_UNS_OPT_SHIFT 23
+#define I40IWQPC_IGNORE_TCP_UNS_OPT_MASK \
+ (1UL << I40IWQPC_IGNORE_TCP_UNS_OPT_SHIFT)
+
+#define I40IWQPC_TCPSTATE_SHIFT 28
+#define I40IWQPC_TCPSTATE_MASK (0xfUL << I40IWQPC_TCPSTATE_SHIFT)
+
+#define I40IWQPC_RCVSCALE_SHIFT 32
+#define I40IWQPC_RCVSCALE_MASK (0xfULL << I40IWQPC_RCVSCALE_SHIFT)
+
+#define I40IWQPC_SNDSCALE_SHIFT 40
+#define I40IWQPC_SNDSCALE_MASK (0xfULL << I40IWQPC_SNDSCALE_SHIFT)
+
+#define I40IWQPC_PDIDX_SHIFT 48
+#define I40IWQPC_PDIDX_MASK (0x7fffULL << I40IWQPC_PDIDX_SHIFT)
+
+#define I40IWQPC_KALIVE_TIMER_MAX_PROBES_SHIFT 16
+#define I40IWQPC_KALIVE_TIMER_MAX_PROBES_MASK \
+ (0xffUL << I40IWQPC_KALIVE_TIMER_MAX_PROBES_SHIFT)
+
+#define I40IWQPC_KEEPALIVE_INTERVAL_SHIFT 24
+#define I40IWQPC_KEEPALIVE_INTERVAL_MASK \
+ (0xffUL << I40IWQPC_KEEPALIVE_INTERVAL_SHIFT)
+
+#define I40IWQPC_TIMESTAMP_RECENT_SHIFT 0
+#define I40IWQPC_TIMESTAMP_RECENT_MASK \
+ (0xffffffffUL << I40IWQPC_TIMESTAMP_RECENT_SHIFT)
+
+#define I40IWQPC_TIMESTAMP_AGE_SHIFT 32
+#define I40IWQPC_TIMESTAMP_AGE_MASK \
+ (0xffffffffULL << I40IWQPC_TIMESTAMP_AGE_SHIFT)
+
+#define I40IWQPC_SNDNXT_SHIFT 0
+#define I40IWQPC_SNDNXT_MASK (0xffffffffUL << I40IWQPC_SNDNXT_SHIFT)
+
+#define I40IWQPC_SNDWND_SHIFT 32
+#define I40IWQPC_SNDWND_MASK (0xffffffffULL << I40IWQPC_SNDWND_SHIFT)
+
+#define I40IWQPC_RCVNXT_SHIFT 0
+#define I40IWQPC_RCVNXT_MASK (0xffffffffUL << I40IWQPC_RCVNXT_SHIFT)
+
+#define I40IWQPC_RCVWND_SHIFT 32
+#define I40IWQPC_RCVWND_MASK (0xffffffffULL << I40IWQPC_RCVWND_SHIFT)
+
+#define I40IWQPC_SNDMAX_SHIFT 0
+#define I40IWQPC_SNDMAX_MASK (0xffffffffUL << I40IWQPC_SNDMAX_SHIFT)
+
+#define I40IWQPC_SNDUNA_SHIFT 32
+#define I40IWQPC_SNDUNA_MASK (0xffffffffULL << I40IWQPC_SNDUNA_SHIFT)
+
+#define I40IWQPC_SRTT_SHIFT 0
+#define I40IWQPC_SRTT_MASK (0xffffffffUL << I40IWQPC_SRTT_SHIFT)
+
+#define I40IWQPC_RTTVAR_SHIFT 32
+#define I40IWQPC_RTTVAR_MASK (0xffffffffULL << I40IWQPC_RTTVAR_SHIFT)
+
+#define I40IWQPC_SSTHRESH_SHIFT 0
+#define I40IWQPC_SSTHRESH_MASK (0xffffffffUL << I40IWQPC_SSTHRESH_SHIFT)
+
+#define I40IWQPC_CWND_SHIFT 32
+#define I40IWQPC_CWND_MASK (0xffffffffULL << I40IWQPC_CWND_SHIFT)
+
+#define I40IWQPC_SNDWL1_SHIFT 0
+#define I40IWQPC_SNDWL1_MASK (0xffffffffUL << I40IWQPC_SNDWL1_SHIFT)
+
+#define I40IWQPC_SNDWL2_SHIFT 32
+#define I40IWQPC_SNDWL2_MASK (0xffffffffULL << I40IWQPC_SNDWL2_SHIFT)
+
+#define I40IWQPC_ERR_RQ_IDX_SHIFT 32
+#define I40IWQPC_ERR_RQ_IDX_MASK (0x3fffULL << I40IWQPC_ERR_RQ_IDX_SHIFT)
+
+#define I40IWQPC_MAXSNDWND_SHIFT 0
+#define I40IWQPC_MAXSNDWND_MASK (0xffffffffUL << I40IWQPC_MAXSNDWND_SHIFT)
+
+#define I40IWQPC_REXMIT_THRESH_SHIFT 48
+#define I40IWQPC_REXMIT_THRESH_MASK (0x3fULL << I40IWQPC_REXMIT_THRESH_SHIFT)
+
+#define I40IWQPC_TXCQNUM_SHIFT 0
+#define I40IWQPC_TXCQNUM_MASK (0x1ffffUL << I40IWQPC_TXCQNUM_SHIFT)
+
+#define I40IWQPC_RXCQNUM_SHIFT 32
+#define I40IWQPC_RXCQNUM_MASK (0x1ffffULL << I40IWQPC_RXCQNUM_SHIFT)
+
+#define I40IWQPC_Q2ADDR_SHIFT I40IW_CQPHC_QPCTX_SHIFT
+#define I40IWQPC_Q2ADDR_MASK I40IW_CQPHC_QPCTX_MASK
+
+#define I40IWQPC_LASTBYTESENT_SHIFT 0
+#define I40IWQPC_LASTBYTESENT_MASK (0xffUL << I40IWQPC_LASTBYTESENT_SHIFT)
+
+#define I40IWQPC_SRQID_SHIFT 32
+#define I40IWQPC_SRQID_MASK (0xffULL << I40IWQPC_SRQID_SHIFT)
+
+#define I40IWQPC_ORDSIZE_SHIFT 0
+#define I40IWQPC_ORDSIZE_MASK (0x7fUL << I40IWQPC_ORDSIZE_SHIFT)
+
+#define I40IWQPC_IRDSIZE_SHIFT 16
+#define I40IWQPC_IRDSIZE_MASK (0x3UL << I40IWQPC_IRDSIZE_SHIFT)
+
+#define I40IWQPC_WRRDRSPOK_SHIFT 20
+#define I40IWQPC_WRRDRSPOK_MASK (1UL << I40IWQPC_WRRDRSPOK_SHIFT)
+
+#define I40IWQPC_RDOK_SHIFT 21
+#define I40IWQPC_RDOK_MASK (1UL << I40IWQPC_RDOK_SHIFT)
+
+#define I40IWQPC_SNDMARKERS_SHIFT 22
+#define I40IWQPC_SNDMARKERS_MASK (1UL << I40IWQPC_SNDMARKERS_SHIFT)
+
+#define I40IWQPC_BINDEN_SHIFT 23
+#define I40IWQPC_BINDEN_MASK (1UL << I40IWQPC_BINDEN_SHIFT)
+
+#define I40IWQPC_FASTREGEN_SHIFT 24
+#define I40IWQPC_FASTREGEN_MASK (1UL << I40IWQPC_FASTREGEN_SHIFT)
+
+#define I40IWQPC_PRIVEN_SHIFT 25
+#define I40IWQPC_PRIVEN_MASK (1UL << I40IWQPC_PRIVEN_SHIFT)
+
+#define I40IWQPC_LSMMPRESENT_SHIFT 26
+#define I40IWQPC_LSMMPRESENT_MASK (1UL << I40IWQPC_LSMMPRESENT_SHIFT)
+
+#define I40IWQPC_ADJUSTFORLSMM_SHIFT 27
+#define I40IWQPC_ADJUSTFORLSMM_MASK (1UL << I40IWQPC_ADJUSTFORLSMM_SHIFT)
+
+#define I40IWQPC_IWARPMODE_SHIFT 28
+#define I40IWQPC_IWARPMODE_MASK (1UL << I40IWQPC_IWARPMODE_SHIFT)
+
+#define I40IWQPC_RCVMARKERS_SHIFT 29
+#define I40IWQPC_RCVMARKERS_MASK (1UL << I40IWQPC_RCVMARKERS_SHIFT)
+
+#define I40IWQPC_ALIGNHDRS_SHIFT 30
+#define I40IWQPC_ALIGNHDRS_MASK (1UL << I40IWQPC_ALIGNHDRS_SHIFT)
+
+#define I40IWQPC_RCVNOMPACRC_SHIFT 31
+#define I40IWQPC_RCVNOMPACRC_MASK (1UL << I40IWQPC_RCVNOMPACRC_SHIFT)
+
+#define I40IWQPC_RCVMARKOFFSET_SHIFT 33
+#define I40IWQPC_RCVMARKOFFSET_MASK (0x1ffULL << I40IWQPC_RCVMARKOFFSET_SHIFT)
+
+#define I40IWQPC_SNDMARKOFFSET_SHIFT 48
+#define I40IWQPC_SNDMARKOFFSET_MASK (0x1ffULL << I40IWQPC_SNDMARKOFFSET_SHIFT)
+
+#define I40IWQPC_QPCOMPCTX_SHIFT I40IW_CQPHC_QPCTX_SHIFT
+#define I40IWQPC_QPCOMPCTX_MASK I40IW_CQPHC_QPCTX_MASK
+
+#define I40IWQPC_SQTPHVAL_SHIFT 0
+#define I40IWQPC_SQTPHVAL_MASK (0xffUL << I40IWQPC_SQTPHVAL_SHIFT)
+
+#define I40IWQPC_RQTPHVAL_SHIFT 8
+#define I40IWQPC_RQTPHVAL_MASK (0xffUL << I40IWQPC_RQTPHVAL_SHIFT)
+
+#define I40IWQPC_QSHANDLE_SHIFT 16
+#define I40IWQPC_QSHANDLE_MASK (0x3ffUL << I40IWQPC_QSHANDLE_SHIFT)
+
+#define I40IWQPC_EXCEPTION_LAN_QUEUE_SHIFT 32
+#define I40IWQPC_EXCEPTION_LAN_QUEUE_MASK (0xfffULL << \
+ I40IWQPC_EXCEPTION_LAN_QUEUE_SHIFT)
+
+#define I40IWQPC_LOCAL_IPADDR3_SHIFT 0
+#define I40IWQPC_LOCAL_IPADDR3_MASK \
+ (0xffffffffUL << I40IWQPC_LOCAL_IPADDR3_SHIFT)
+
+#define I40IWQPC_LOCAL_IPADDR2_SHIFT 32
+#define I40IWQPC_LOCAL_IPADDR2_MASK \
+ (0xffffffffULL << I40IWQPC_LOCAL_IPADDR2_SHIFT)
+
+#define I40IWQPC_LOCAL_IPADDR1_SHIFT 0
+#define I40IWQPC_LOCAL_IPADDR1_MASK \
+ (0xffffffffUL << I40IWQPC_LOCAL_IPADDR1_SHIFT)
+
+#define I40IWQPC_LOCAL_IPADDR0_SHIFT 32
+#define I40IWQPC_LOCAL_IPADDR0_MASK \
+ (0xffffffffULL << I40IWQPC_LOCAL_IPADDR0_SHIFT)
+
+/* wqe size considering 32 bytes per wqe*/
+#define I40IWQP_SW_MIN_WQSIZE 4 /* 128 bytes */
+#define I40IWQP_SW_MAX_WQSIZE 16384 /* 524288 bytes */
+
+#define I40IWQP_OP_RDMA_WRITE 0
+#define I40IWQP_OP_RDMA_READ 1
+#define I40IWQP_OP_RDMA_SEND 3
+#define I40IWQP_OP_RDMA_SEND_INV 4
+#define I40IWQP_OP_RDMA_SEND_SOL_EVENT 5
+#define I40IWQP_OP_RDMA_SEND_SOL_EVENT_INV 6
+#define I40IWQP_OP_BIND_MW 8
+#define I40IWQP_OP_FAST_REGISTER 9
+#define I40IWQP_OP_LOCAL_INVALIDATE 10
+#define I40IWQP_OP_RDMA_READ_LOC_INV 11
+#define I40IWQP_OP_NOP 12
+
+#define I40IW_RSVD_SHIFT 41
+#define I40IW_RSVD_MASK (0x7fffULL << I40IW_RSVD_SHIFT)
+
+/* iwarp QP SQ WQE common fields */
+#define I40IWQPSQ_OPCODE_SHIFT 32
+#define I40IWQPSQ_OPCODE_MASK (0x3fULL << I40IWQPSQ_OPCODE_SHIFT)
+
+#define I40IWQPSQ_ADDFRAGCNT_SHIFT 38
+#define I40IWQPSQ_ADDFRAGCNT_MASK (0x7ULL << I40IWQPSQ_ADDFRAGCNT_SHIFT)
+
+#define I40IWQPSQ_PUSHWQE_SHIFT 56
+#define I40IWQPSQ_PUSHWQE_MASK (1ULL << I40IWQPSQ_PUSHWQE_SHIFT)
+
+#define I40IWQPSQ_STREAMMODE_SHIFT 58
+#define I40IWQPSQ_STREAMMODE_MASK (1ULL << I40IWQPSQ_STREAMMODE_SHIFT)
+
+#define I40IWQPSQ_WAITFORRCVPDU_SHIFT 59
+#define I40IWQPSQ_WAITFORRCVPDU_MASK (1ULL << I40IWQPSQ_WAITFORRCVPDU_SHIFT)
+
+#define I40IWQPSQ_READFENCE_SHIFT 60
+#define I40IWQPSQ_READFENCE_MASK (1ULL << I40IWQPSQ_READFENCE_SHIFT)
+
+#define I40IWQPSQ_LOCALFENCE_SHIFT 61
+#define I40IWQPSQ_LOCALFENCE_MASK (1ULL << I40IWQPSQ_LOCALFENCE_SHIFT)
+
+#define I40IWQPSQ_SIGCOMPL_SHIFT 62
+#define I40IWQPSQ_SIGCOMPL_MASK (1ULL << I40IWQPSQ_SIGCOMPL_SHIFT)
+
+#define I40IWQPSQ_VALID_SHIFT 63
+#define I40IWQPSQ_VALID_MASK (1ULL << I40IWQPSQ_VALID_SHIFT)
+
+#define I40IWQPSQ_FRAG_TO_SHIFT I40IW_CQPHC_QPCTX_SHIFT
+#define I40IWQPSQ_FRAG_TO_MASK I40IW_CQPHC_QPCTX_MASK
+
+#define I40IWQPSQ_FRAG_LEN_SHIFT 0
+#define I40IWQPSQ_FRAG_LEN_MASK (0xffffffffUL << I40IWQPSQ_FRAG_LEN_SHIFT)
+
+#define I40IWQPSQ_FRAG_STAG_SHIFT 32
+#define I40IWQPSQ_FRAG_STAG_MASK (0xffffffffULL << I40IWQPSQ_FRAG_STAG_SHIFT)
+
+#define I40IWQPSQ_REMSTAGINV_SHIFT 0
+#define I40IWQPSQ_REMSTAGINV_MASK (0xffffffffUL << I40IWQPSQ_REMSTAGINV_SHIFT)
+
+#define I40IWQPSQ_INLINEDATAFLAG_SHIFT 57
+#define I40IWQPSQ_INLINEDATAFLAG_MASK (1ULL << I40IWQPSQ_INLINEDATAFLAG_SHIFT)
+
+#define I40IWQPSQ_INLINEDATALEN_SHIFT 48
+#define I40IWQPSQ_INLINEDATALEN_MASK \
+ (0x7fULL << I40IWQPSQ_INLINEDATALEN_SHIFT)
+
+/* iwarp send with push mode */
+#define I40IWQPSQ_WQDESCIDX_SHIFT 0
+#define I40IWQPSQ_WQDESCIDX_MASK (0x3fffUL << I40IWQPSQ_WQDESCIDX_SHIFT)
+
+/* rdma write */
+#define I40IWQPSQ_REMSTAG_SHIFT 0
+#define I40IWQPSQ_REMSTAG_MASK (0xffffffffUL << I40IWQPSQ_REMSTAG_SHIFT)
+
+#define I40IWQPSQ_REMTO_SHIFT I40IW_CQPHC_QPCTX_SHIFT
+#define I40IWQPSQ_REMTO_MASK I40IW_CQPHC_QPCTX_MASK
+
+/* memory window */
+#define I40IWQPSQ_STAGRIGHTS_SHIFT 48
+#define I40IWQPSQ_STAGRIGHTS_MASK (0x1fULL << I40IWQPSQ_STAGRIGHTS_SHIFT)
+
+#define I40IWQPSQ_VABASEDTO_SHIFT 53
+#define I40IWQPSQ_VABASEDTO_MASK (1ULL << I40IWQPSQ_VABASEDTO_SHIFT)
+
+#define I40IWQPSQ_MWLEN_SHIFT I40IW_CQPHC_QPCTX_SHIFT
+#define I40IWQPSQ_MWLEN_MASK I40IW_CQPHC_QPCTX_MASK
+
+#define I40IWQPSQ_PARENTMRSTAG_SHIFT 0
+#define I40IWQPSQ_PARENTMRSTAG_MASK \
+ (0xffffffffUL << I40IWQPSQ_PARENTMRSTAG_SHIFT)
+
+#define I40IWQPSQ_MWSTAG_SHIFT 32
+#define I40IWQPSQ_MWSTAG_MASK (0xffffffffULL << I40IWQPSQ_MWSTAG_SHIFT)
+
+#define I40IWQPSQ_BASEVA_TO_FBO_SHIFT I40IW_CQPHC_QPCTX_SHIFT
+#define I40IWQPSQ_BASEVA_TO_FBO_MASK I40IW_CQPHC_QPCTX_MASK
+
+/* Local Invalidate */
+#define I40IWQPSQ_LOCSTAG_SHIFT 32
+#define I40IWQPSQ_LOCSTAG_MASK (0xffffffffULL << I40IWQPSQ_LOCSTAG_SHIFT)
+
+/* Fast Register */
+#define I40IWQPSQ_STAGKEY_SHIFT 0
+#define I40IWQPSQ_STAGKEY_MASK (0xffUL << I40IWQPSQ_STAGKEY_SHIFT)
+
+#define I40IWQPSQ_STAGINDEX_SHIFT 8
+#define I40IWQPSQ_STAGINDEX_MASK (0xffffffUL << I40IWQPSQ_STAGINDEX_SHIFT)
+
+#define I40IWQPSQ_COPYHOSTPBLS_SHIFT 43
+#define I40IWQPSQ_COPYHOSTPBLS_MASK (1ULL << I40IWQPSQ_COPYHOSTPBLS_SHIFT)
+
+#define I40IWQPSQ_LPBLSIZE_SHIFT 44
+#define I40IWQPSQ_LPBLSIZE_MASK (3ULL << I40IWQPSQ_LPBLSIZE_SHIFT)
+
+#define I40IWQPSQ_HPAGESIZE_SHIFT 46
+#define I40IWQPSQ_HPAGESIZE_MASK (3ULL << I40IWQPSQ_HPAGESIZE_SHIFT)
+
+#define I40IWQPSQ_STAGLEN_SHIFT 0
+#define I40IWQPSQ_STAGLEN_MASK (0x1ffffffffffULL << I40IWQPSQ_STAGLEN_SHIFT)
+
+#define I40IWQPSQ_FIRSTPMPBLIDXLO_SHIFT 48
+#define I40IWQPSQ_FIRSTPMPBLIDXLO_MASK \
+ (0xffffULL << I40IWQPSQ_FIRSTPMPBLIDXLO_SHIFT)
+
+#define I40IWQPSQ_FIRSTPMPBLIDXHI_SHIFT 0
+#define I40IWQPSQ_FIRSTPMPBLIDXHI_MASK \
+ (0xfffUL << I40IWQPSQ_FIRSTPMPBLIDXHI_SHIFT)
+
+#define I40IWQPSQ_PBLADDR_SHIFT 12
+#define I40IWQPSQ_PBLADDR_MASK (0xfffffffffffffULL << I40IWQPSQ_PBLADDR_SHIFT)
+
+/* iwarp QP RQ WQE common fields */
+#define I40IWQPRQ_ADDFRAGCNT_SHIFT I40IWQPSQ_ADDFRAGCNT_SHIFT
+#define I40IWQPRQ_ADDFRAGCNT_MASK I40IWQPSQ_ADDFRAGCNT_MASK
+
+#define I40IWQPRQ_VALID_SHIFT I40IWQPSQ_VALID_SHIFT
+#define I40IWQPRQ_VALID_MASK I40IWQPSQ_VALID_MASK
+
+#define I40IWQPRQ_COMPLCTX_SHIFT I40IW_CQPHC_QPCTX_SHIFT
+#define I40IWQPRQ_COMPLCTX_MASK I40IW_CQPHC_QPCTX_MASK
+
+#define I40IWQPRQ_FRAG_LEN_SHIFT I40IWQPSQ_FRAG_LEN_SHIFT
+#define I40IWQPRQ_FRAG_LEN_MASK I40IWQPSQ_FRAG_LEN_MASK
+
+#define I40IWQPRQ_STAG_SHIFT I40IWQPSQ_FRAG_STAG_SHIFT
+#define I40IWQPRQ_STAG_MASK I40IWQPSQ_FRAG_STAG_MASK
+
+#define I40IWQPRQ_TO_SHIFT I40IWQPSQ_FRAG_TO_SHIFT
+#define I40IWQPRQ_TO_MASK I40IWQPSQ_FRAG_TO_MASK
+
+/* Query FPM CQP buf */
+#define I40IW_QUERY_FPM_MAX_QPS_SHIFT 0
+#define I40IW_QUERY_FPM_MAX_QPS_MASK \
+ (0x7ffffUL << I40IW_QUERY_FPM_MAX_QPS_SHIFT)
+
+#define I40IW_QUERY_FPM_MAX_CQS_SHIFT 0
+#define I40IW_QUERY_FPM_MAX_CQS_MASK \
+ (0x3ffffUL << I40IW_QUERY_FPM_MAX_CQS_SHIFT)
+
+#define I40IW_QUERY_FPM_FIRST_PE_SD_INDEX_SHIFT 0
+#define I40IW_QUERY_FPM_FIRST_PE_SD_INDEX_MASK \
+ (0x3fffUL << I40IW_QUERY_FPM_FIRST_PE_SD_INDEX_SHIFT)
+
+#define I40IW_QUERY_FPM_MAX_PE_SDS_SHIFT 32
+#define I40IW_QUERY_FPM_MAX_PE_SDS_MASK \
+ (0x3fffULL << I40IW_QUERY_FPM_MAX_PE_SDS_SHIFT)
+
+#define I40IW_QUERY_FPM_MAX_QPS_SHIFT 0
+#define I40IW_QUERY_FPM_MAX_QPS_MASK \
+ (0x7ffffUL << I40IW_QUERY_FPM_MAX_QPS_SHIFT)
+
+#define I40IW_QUERY_FPM_MAX_CQS_SHIFT 0
+#define I40IW_QUERY_FPM_MAX_CQS_MASK \
+ (0x3ffffUL << I40IW_QUERY_FPM_MAX_CQS_SHIFT)
+
+#define I40IW_QUERY_FPM_MAX_CEQS_SHIFT 0
+#define I40IW_QUERY_FPM_MAX_CEQS_MASK \
+ (0xffUL << I40IW_QUERY_FPM_MAX_CEQS_SHIFT)
+
+#define I40IW_QUERY_FPM_XFBLOCKSIZE_SHIFT 32
+#define I40IW_QUERY_FPM_XFBLOCKSIZE_MASK \
+ (0xffffffffULL << I40IW_QUERY_FPM_XFBLOCKSIZE_SHIFT)
+
+#define I40IW_QUERY_FPM_Q1BLOCKSIZE_SHIFT 32
+#define I40IW_QUERY_FPM_Q1BLOCKSIZE_MASK \
+ (0xffffffffULL << I40IW_QUERY_FPM_Q1BLOCKSIZE_SHIFT)
+
+#define I40IW_QUERY_FPM_HTMULTIPLIER_SHIFT 16
+#define I40IW_QUERY_FPM_HTMULTIPLIER_MASK \
+ (0xfUL << I40IW_QUERY_FPM_HTMULTIPLIER_SHIFT)
+
+#define I40IW_QUERY_FPM_TIMERBUCKET_SHIFT 32
+#define I40IW_QUERY_FPM_TIMERBUCKET_MASK \
+ (0xffFFULL << I40IW_QUERY_FPM_TIMERBUCKET_SHIFT)
+
+/* Static HMC pages allocated buf */
+#define I40IW_SHMC_PAGE_ALLOCATED_HMC_FN_ID_SHIFT 0
+#define I40IW_SHMC_PAGE_ALLOCATED_HMC_FN_ID_MASK \
+ (0x3fUL << I40IW_SHMC_PAGE_ALLOCATED_HMC_FN_ID_SHIFT)
+
+#define I40IW_HW_PAGE_SIZE 4096
+#define I40IW_DONE_COUNT 1000
+#define I40IW_SLEEP_COUNT 10
+
+enum {
+ I40IW_QUEUES_ALIGNMENT_MASK = (128 - 1),
+ I40IW_AEQ_ALIGNMENT_MASK = (256 - 1),
+ I40IW_Q2_ALIGNMENT_MASK = (256 - 1),
+ I40IW_CEQ_ALIGNMENT_MASK = (256 - 1),
+ I40IW_CQ0_ALIGNMENT_MASK = (256 - 1),
+ I40IW_HOST_CTX_ALIGNMENT_MASK = (4 - 1),
+ I40IW_SHADOWAREA_MASK = (128 - 1),
+ I40IW_FPM_QUERY_BUF_ALIGNMENT_MASK = 0,
+ I40IW_FPM_COMMIT_BUF_ALIGNMENT_MASK = 0
+};
+
+enum i40iw_alignment {
+ I40IW_CQP_ALIGNMENT = 0x200,
+ I40IW_AEQ_ALIGNMENT = 0x100,
+ I40IW_CEQ_ALIGNMENT = 0x100,
+ I40IW_CQ0_ALIGNMENT = 0x100,
+ I40IW_SD_BUF_ALIGNMENT = 0x100
+};
+
+#define I40IW_QP_WQE_MIN_SIZE 32
+#define I40IW_QP_WQE_MAX_SIZE 128
+
+#define I40IW_CQE_QTYPE_RQ 0
+#define I40IW_CQE_QTYPE_SQ 1
+
+#define I40IW_RING_INIT(_ring, _size) \
+ { \
+ (_ring).head = 0; \
+ (_ring).tail = 0; \
+ (_ring).size = (_size); \
+ }
+#define I40IW_RING_GETSIZE(_ring) ((_ring).size)
+#define I40IW_RING_GETCURRENT_HEAD(_ring) ((_ring).head)
+#define I40IW_RING_GETCURRENT_TAIL(_ring) ((_ring).tail)
+
+#define I40IW_RING_MOVE_HEAD(_ring, _retcode) \
+ { \
+ register u32 size; \
+ size = (_ring).size; \
+ if (!I40IW_RING_FULL_ERR(_ring)) { \
+ (_ring).head = ((_ring).head + 1) % size; \
+ (_retcode) = 0; \
+ } else { \
+ (_retcode) = I40IW_ERR_RING_FULL; \
+ } \
+ }
+
+#define I40IW_RING_MOVE_HEAD_BY_COUNT(_ring, _count, _retcode) \
+ { \
+ register u32 size; \
+ size = (_ring).size; \
+ if ((I40IW_RING_WORK_AVAILABLE(_ring) + (_count)) < size) { \
+ (_ring).head = ((_ring).head + (_count)) % size; \
+ (_retcode) = 0; \
+ } else { \
+ (_retcode) = I40IW_ERR_RING_FULL; \
+ } \
+ }
+
+#define I40IW_RING_MOVE_TAIL(_ring) \
+ (_ring).tail = ((_ring).tail + 1) % (_ring).size
+
+#define I40IW_RING_MOVE_TAIL_BY_COUNT(_ring, _count) \
+ (_ring).tail = ((_ring).tail + (_count)) % (_ring).size
+
+#define I40IW_RING_SET_TAIL(_ring, _pos) \
+ (_ring).tail = (_pos) % (_ring).size
+
+#define I40IW_RING_FULL_ERR(_ring) \
+ ( \
+ (I40IW_RING_WORK_AVAILABLE(_ring) == ((_ring).size - 1)) \
+ )
+
+#define I40IW_ERR_RING_FULL2(_ring) \
+ ( \
+ (I40IW_RING_WORK_AVAILABLE(_ring) == ((_ring).size - 2)) \
+ )
+
+#define I40IW_ERR_RING_FULL3(_ring) \
+ ( \
+ (I40IW_RING_WORK_AVAILABLE(_ring) == ((_ring).size - 3)) \
+ )
+
+#define I40IW_RING_MORE_WORK(_ring) \
+ ( \
+ (I40IW_RING_WORK_AVAILABLE(_ring) != 0) \
+ )
+
+#define I40IW_RING_WORK_AVAILABLE(_ring) \
+ ( \
+ (((_ring).head + (_ring).size - (_ring).tail) % (_ring).size) \
+ )
+
+#define I40IW_RING_GET_WQES_AVAILABLE(_ring) \
+ ( \
+ ((_ring).size - I40IW_RING_WORK_AVAILABLE(_ring) - 1) \
+ )
+
+#define I40IW_ATOMIC_RING_MOVE_HEAD(_ring, index, _retcode) \
+ { \
+ index = I40IW_RING_GETCURRENT_HEAD(_ring); \
+ I40IW_RING_MOVE_HEAD(_ring, _retcode); \
+ }
+
+/* Async Events codes */
+#define I40IW_AE_AMP_UNALLOCATED_STAG 0x0102
+#define I40IW_AE_AMP_INVALID_STAG 0x0103
+#define I40IW_AE_AMP_BAD_QP 0x0104
+#define I40IW_AE_AMP_BAD_PD 0x0105
+#define I40IW_AE_AMP_BAD_STAG_KEY 0x0106
+#define I40IW_AE_AMP_BAD_STAG_INDEX 0x0107
+#define I40IW_AE_AMP_BOUNDS_VIOLATION 0x0108
+#define I40IW_AE_AMP_RIGHTS_VIOLATION 0x0109
+#define I40IW_AE_AMP_TO_WRAP 0x010a
+#define I40IW_AE_AMP_FASTREG_SHARED 0x010b
+#define I40IW_AE_AMP_FASTREG_VALID_STAG 0x010c
+#define I40IW_AE_AMP_FASTREG_MW_STAG 0x010d
+#define I40IW_AE_AMP_FASTREG_INVALID_RIGHTS 0x010e
+#define I40IW_AE_AMP_FASTREG_PBL_TABLE_OVERFLOW 0x010f
+#define I40IW_AE_AMP_FASTREG_INVALID_LENGTH 0x0110
+#define I40IW_AE_AMP_INVALIDATE_SHARED 0x0111
+#define I40IW_AE_AMP_INVALIDATE_NO_REMOTE_ACCESS_RIGHTS 0x0112
+#define I40IW_AE_AMP_INVALIDATE_MR_WITH_BOUND_WINDOWS 0x0113
+#define I40IW_AE_AMP_MWBIND_VALID_STAG 0x0114
+#define I40IW_AE_AMP_MWBIND_OF_MR_STAG 0x0115
+#define I40IW_AE_AMP_MWBIND_TO_ZERO_BASED_STAG 0x0116
+#define I40IW_AE_AMP_MWBIND_TO_MW_STAG 0x0117
+#define I40IW_AE_AMP_MWBIND_INVALID_RIGHTS 0x0118
+#define I40IW_AE_AMP_MWBIND_INVALID_BOUNDS 0x0119
+#define I40IW_AE_AMP_MWBIND_TO_INVALID_PARENT 0x011a
+#define I40IW_AE_AMP_MWBIND_BIND_DISABLED 0x011b
+#define I40IW_AE_AMP_WQE_INVALID_PARAMETER 0x0130
+#define I40IW_AE_BAD_CLOSE 0x0201
+#define I40IW_AE_RDMAP_ROE_BAD_LLP_CLOSE 0x0202
+#define I40IW_AE_CQ_OPERATION_ERROR 0x0203
+#define I40IW_AE_PRIV_OPERATION_DENIED 0x011c
+#define I40IW_AE_RDMA_READ_WHILE_ORD_ZERO 0x0205
+#define I40IW_AE_STAG_ZERO_INVALID 0x0206
+#define I40IW_AE_IB_RREQ_AND_Q1_FULL 0x0207
+#define I40IW_AE_SRQ_LIMIT 0x0209
+#define I40IW_AE_WQE_UNEXPECTED_OPCODE 0x020a
+#define I40IW_AE_WQE_INVALID_PARAMETER 0x020b
+#define I40IW_AE_WQE_LSMM_TOO_LONG 0x0220
+#define I40IW_AE_DDP_INVALID_MSN_GAP_IN_MSN 0x0301
+#define I40IW_AE_DDP_INVALID_MSN_RANGE_IS_NOT_VALID 0x0302
+#define I40IW_AE_DDP_UBE_DDP_MESSAGE_TOO_LONG_FOR_AVAILABLE_BUFFER 0x0303
+#define I40IW_AE_DDP_UBE_INVALID_DDP_VERSION 0x0304
+#define I40IW_AE_DDP_UBE_INVALID_MO 0x0305
+#define I40IW_AE_DDP_UBE_INVALID_MSN_NO_BUFFER_AVAILABLE 0x0306
+#define I40IW_AE_DDP_UBE_INVALID_QN 0x0307
+#define I40IW_AE_DDP_NO_L_BIT 0x0308
+#define I40IW_AE_RDMAP_ROE_INVALID_RDMAP_VERSION 0x0311
+#define I40IW_AE_RDMAP_ROE_UNEXPECTED_OPCODE 0x0312
+#define I40IW_AE_ROE_INVALID_RDMA_READ_REQUEST 0x0313
+#define I40IW_AE_ROE_INVALID_RDMA_WRITE_OR_READ_RESP 0x0314
+#define I40IW_AE_INVALID_ARP_ENTRY 0x0401
+#define I40IW_AE_INVALID_TCP_OPTION_RCVD 0x0402
+#define I40IW_AE_STALE_ARP_ENTRY 0x0403
+#define I40IW_AE_INVALID_WQE_LENGTH 0x0404
+#define I40IW_AE_INVALID_MAC_ENTRY 0x0405
+#define I40IW_AE_LLP_CLOSE_COMPLETE 0x0501
+#define I40IW_AE_LLP_CONNECTION_RESET 0x0502
+#define I40IW_AE_LLP_FIN_RECEIVED 0x0503
+#define I40IW_AE_LLP_RECEIVED_MARKER_AND_LENGTH_FIELDS_DONT_MATCH 0x0504
+#define I40IW_AE_LLP_RECEIVED_MPA_CRC_ERROR 0x0505
+#define I40IW_AE_LLP_SEGMENT_TOO_LARGE 0x0506
+#define I40IW_AE_LLP_SEGMENT_TOO_SMALL 0x0507
+#define I40IW_AE_LLP_SYN_RECEIVED 0x0508
+#define I40IW_AE_LLP_TERMINATE_RECEIVED 0x0509
+#define I40IW_AE_LLP_TOO_MANY_RETRIES 0x050a
+#define I40IW_AE_LLP_TOO_MANY_KEEPALIVE_RETRIES 0x050b
+#define I40IW_AE_LLP_DOUBT_REACHABILITY 0x050c
+#define I40IW_AE_LLP_RX_VLAN_MISMATCH 0x050d
+#define I40IW_AE_RESOURCE_EXHAUSTION 0x0520
+#define I40IW_AE_RESET_SENT 0x0601
+#define I40IW_AE_TERMINATE_SENT 0x0602
+#define I40IW_AE_RESET_NOT_SENT 0x0603
+#define I40IW_AE_LCE_QP_CATASTROPHIC 0x0700
+#define I40IW_AE_LCE_FUNCTION_CATASTROPHIC 0x0701
+#define I40IW_AE_LCE_CQ_CATASTROPHIC 0x0702
+#define I40IW_AE_UDA_XMIT_FRAG_SEQ 0x0800
+#define I40IW_AE_UDA_XMIT_DGRAM_TOO_LONG 0x0801
+#define I40IW_AE_UDA_XMIT_IPADDR_MISMATCH 0x0802
+#define I40IW_AE_QP_SUSPEND_COMPLETE 0x0900
+
+#define OP_DELETE_LOCAL_MAC_IPADDR_ENTRY 1
+#define OP_CEQ_DESTROY 2
+#define OP_AEQ_DESTROY 3
+#define OP_DELETE_ARP_CACHE_ENTRY 4
+#define OP_MANAGE_APBVT_ENTRY 5
+#define OP_CEQ_CREATE 6
+#define OP_AEQ_CREATE 7
+#define OP_ALLOC_LOCAL_MAC_IPADDR_ENTRY 8
+#define OP_ADD_LOCAL_MAC_IPADDR_ENTRY 9
+#define OP_MANAGE_QHASH_TABLE_ENTRY 10
+#define OP_QP_MODIFY 11
+#define OP_QP_UPLOAD_CONTEXT 12
+#define OP_CQ_CREATE 13
+#define OP_CQ_DESTROY 14
+#define OP_QP_CREATE 15
+#define OP_QP_DESTROY 16
+#define OP_ALLOC_STAG 17
+#define OP_MR_REG_NON_SHARED 18
+#define OP_DEALLOC_STAG 19
+#define OP_MW_ALLOC 20
+#define OP_QP_FLUSH_WQES 21
+#define OP_ADD_ARP_CACHE_ENTRY 22
+#define OP_MANAGE_PUSH_PAGE 23
+#define OP_UPDATE_PE_SDS 24
+#define OP_MANAGE_HMC_PM_FUNC_TABLE 25
+#define OP_SUSPEND 26
+#define OP_RESUME 27
+#define OP_MANAGE_VF_PBLE_BP 28
+#define OP_QUERY_FPM_VALUES 29
+#define OP_COMMIT_FPM_VALUES 30
+#define OP_SIZE_CQP_STAT_ARRAY 31
+
+#endif
diff --git a/drivers/infiniband/hw/i40iw/i40iw_hmc.c b/drivers/infiniband/hw/i40iw/i40iw_hmc.c
new file mode 100644
index 000000000000..5484cbf55f0f
--- /dev/null
+++ b/drivers/infiniband/hw/i40iw/i40iw_hmc.c
@@ -0,0 +1,821 @@
+/*******************************************************************************
+*
+* Copyright (c) 2015-2016 Intel Corporation. All rights reserved.
+*
+* This software is available to you under a choice of one of two
+* licenses. You may choose to be licensed under the terms of the GNU
+* General Public License (GPL) Version 2, available from the file
+* COPYING in the main directory of this source tree, or the
+* OpenFabrics.org BSD license below:
+*
+* Redistribution and use in source and binary forms, with or
+* without modification, are permitted provided that the following
+* conditions are met:
+*
+* - Redistributions of source code must retain the above
+* copyright notice, this list of conditions and the following
+* disclaimer.
+*
+* - Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials
+* provided with the distribution.
+*
+* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+* SOFTWARE.
+*
+*******************************************************************************/
+
+#include "i40iw_osdep.h"
+#include "i40iw_register.h"
+#include "i40iw_status.h"
+#include "i40iw_hmc.h"
+#include "i40iw_d.h"
+#include "i40iw_type.h"
+#include "i40iw_p.h"
+#include "i40iw_vf.h"
+#include "i40iw_virtchnl.h"
+
+/**
+ * i40iw_find_sd_index_limit - finds segment descriptor index limit
+ * @hmc_info: pointer to the HMC configuration information structure
+ * @type: type of HMC resources we're searching
+ * @index: starting index for the object
+ * @cnt: number of objects we're trying to create
+ * @sd_idx: pointer to return index of the segment descriptor in question
+ * @sd_limit: pointer to return the maximum number of segment descriptors
+ *
+ * This function calculates the segment descriptor index and index limit
+ * for the resource defined by i40iw_hmc_rsrc_type.
+ */
+
+static inline void i40iw_find_sd_index_limit(struct i40iw_hmc_info *hmc_info,
+ u32 type,
+ u32 idx,
+ u32 cnt,
+ u32 *sd_idx,
+ u32 *sd_limit)
+{
+ u64 fpm_addr, fpm_limit;
+
+ fpm_addr = hmc_info->hmc_obj[(type)].base +
+ hmc_info->hmc_obj[type].size * idx;
+ fpm_limit = fpm_addr + hmc_info->hmc_obj[type].size * cnt;
+ *sd_idx = (u32)(fpm_addr / I40IW_HMC_DIRECT_BP_SIZE);
+ *sd_limit = (u32)((fpm_limit - 1) / I40IW_HMC_DIRECT_BP_SIZE);
+ *sd_limit += 1;
+}
+
+/**
+ * i40iw_find_pd_index_limit - finds page descriptor index limit
+ * @hmc_info: pointer to the HMC configuration information struct
+ * @type: HMC resource type we're examining
+ * @idx: starting index for the object
+ * @cnt: number of objects we're trying to create
+ * @pd_index: pointer to return page descriptor index
+ * @pd_limit: pointer to return page descriptor index limit
+ *
+ * Calculates the page descriptor index and index limit for the resource
+ * defined by i40iw_hmc_rsrc_type.
+ */
+
+static inline void i40iw_find_pd_index_limit(struct i40iw_hmc_info *hmc_info,
+ u32 type,
+ u32 idx,
+ u32 cnt,
+ u32 *pd_idx,
+ u32 *pd_limit)
+{
+ u64 fpm_adr, fpm_limit;
+
+ fpm_adr = hmc_info->hmc_obj[type].base +
+ hmc_info->hmc_obj[type].size * idx;
+ fpm_limit = fpm_adr + (hmc_info)->hmc_obj[(type)].size * (cnt);
+ *(pd_idx) = (u32)(fpm_adr / I40IW_HMC_PAGED_BP_SIZE);
+ *(pd_limit) = (u32)((fpm_limit - 1) / I40IW_HMC_PAGED_BP_SIZE);
+ *(pd_limit) += 1;
+}
+
+/**
+ * i40iw_set_sd_entry - setup entry for sd programming
+ * @pa: physical addr
+ * @idx: sd index
+ * @type: paged or direct sd
+ * @entry: sd entry ptr
+ */
+static inline void i40iw_set_sd_entry(u64 pa,
+ u32 idx,
+ enum i40iw_sd_entry_type type,
+ struct update_sd_entry *entry)
+{
+ entry->data = pa | (I40IW_HMC_MAX_BP_COUNT << I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) |
+ (((type == I40IW_SD_TYPE_PAGED) ? 0 : 1) <<
+ I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT) |
+ (1 << I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT);
+ entry->cmd = (idx | (1 << I40E_PFHMC_SDCMD_PMSDWR_SHIFT) | (1 << 15));
+}
+
+/**
+ * i40iw_clr_sd_entry - setup entry for sd clear
+ * @idx: sd index
+ * @type: paged or direct sd
+ * @entry: sd entry ptr
+ */
+static inline void i40iw_clr_sd_entry(u32 idx, enum i40iw_sd_entry_type type,
+ struct update_sd_entry *entry)
+{
+ entry->data = (I40IW_HMC_MAX_BP_COUNT <<
+ I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) |
+ (((type == I40IW_SD_TYPE_PAGED) ? 0 : 1) <<
+ I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT);
+ entry->cmd = (idx | (1 << I40E_PFHMC_SDCMD_PMSDWR_SHIFT) | (1 << 15));
+}
+
+/**
+ * i40iw_hmc_sd_one - setup 1 sd entry for cqp
+ * @dev: pointer to the device structure
+ * @hmc_fn_id: hmc's function id
+ * @pa: physical addr
+ * @sd_idx: sd index
+ * @type: paged or direct sd
+ * @setsd: flag to set or clear sd
+ */
+enum i40iw_status_code i40iw_hmc_sd_one(struct i40iw_sc_dev *dev,
+ u8 hmc_fn_id,
+ u64 pa, u32 sd_idx,
+ enum i40iw_sd_entry_type type,
+ bool setsd)
+{
+ struct i40iw_update_sds_info sdinfo;
+
+ sdinfo.cnt = 1;
+ sdinfo.hmc_fn_id = hmc_fn_id;
+ if (setsd)
+ i40iw_set_sd_entry(pa, sd_idx, type, sdinfo.entry);
+ else
+ i40iw_clr_sd_entry(sd_idx, type, sdinfo.entry);
+
+ return dev->cqp->process_cqp_sds(dev, &sdinfo);
+}
+
+/**
+ * i40iw_hmc_sd_grp - setup group od sd entries for cqp
+ * @dev: pointer to the device structure
+ * @hmc_info: pointer to the HMC configuration information struct
+ * @sd_index: sd index
+ * @sd_cnt: number of sd entries
+ * @setsd: flag to set or clear sd
+ */
+static enum i40iw_status_code i40iw_hmc_sd_grp(struct i40iw_sc_dev *dev,
+ struct i40iw_hmc_info *hmc_info,
+ u32 sd_index,
+ u32 sd_cnt,
+ bool setsd)
+{
+ struct i40iw_hmc_sd_entry *sd_entry;
+ struct i40iw_update_sds_info sdinfo;
+ u64 pa;
+ u32 i;
+ enum i40iw_status_code ret_code = 0;
+
+ memset(&sdinfo, 0, sizeof(sdinfo));
+ sdinfo.hmc_fn_id = hmc_info->hmc_fn_id;
+ for (i = sd_index; i < sd_index + sd_cnt; i++) {
+ sd_entry = &hmc_info->sd_table.sd_entry[i];
+ if (!sd_entry ||
+ (!sd_entry->valid && setsd) ||
+ (sd_entry->valid && !setsd))
+ continue;
+ if (setsd) {
+ pa = (sd_entry->entry_type == I40IW_SD_TYPE_PAGED) ?
+ sd_entry->u.pd_table.pd_page_addr.pa :
+ sd_entry->u.bp.addr.pa;
+ i40iw_set_sd_entry(pa, i, sd_entry->entry_type,
+ &sdinfo.entry[sdinfo.cnt]);
+ } else {
+ i40iw_clr_sd_entry(i, sd_entry->entry_type,
+ &sdinfo.entry[sdinfo.cnt]);
+ }
+ sdinfo.cnt++;
+ if (sdinfo.cnt == I40IW_MAX_SD_ENTRIES) {
+ ret_code = dev->cqp->process_cqp_sds(dev, &sdinfo);
+ if (ret_code) {
+ i40iw_debug(dev, I40IW_DEBUG_HMC,
+ "i40iw_hmc_sd_grp: sd_programming failed err=%d\n",
+ ret_code);
+ return ret_code;
+ }
+ sdinfo.cnt = 0;
+ }
+ }
+ if (sdinfo.cnt)
+ ret_code = dev->cqp->process_cqp_sds(dev, &sdinfo);
+
+ return ret_code;
+}
+
+/**
+ * i40iw_vfdev_from_fpm - return vf dev ptr for hmc function id
+ * @dev: pointer to the device structure
+ * @hmc_fn_id: hmc's function id
+ */
+struct i40iw_vfdev *i40iw_vfdev_from_fpm(struct i40iw_sc_dev *dev, u8 hmc_fn_id)
+{
+ struct i40iw_vfdev *vf_dev = NULL;
+ u16 idx;
+
+ for (idx = 0; idx < I40IW_MAX_PE_ENABLED_VF_COUNT; idx++) {
+ if (dev->vf_dev[idx] &&
+ ((u8)dev->vf_dev[idx]->pmf_index == hmc_fn_id)) {
+ vf_dev = dev->vf_dev[idx];
+ break;
+ }
+ }
+ return vf_dev;
+}
+
+/**
+ * i40iw_vf_hmcinfo_from_fpm - get ptr to hmc for func_id
+ * @dev: pointer to the device structure
+ * @hmc_fn_id: hmc's function id
+ */
+struct i40iw_hmc_info *i40iw_vf_hmcinfo_from_fpm(struct i40iw_sc_dev *dev,
+ u8 hmc_fn_id)
+{
+ struct i40iw_hmc_info *hmc_info = NULL;
+ u16 idx;
+
+ for (idx = 0; idx < I40IW_MAX_PE_ENABLED_VF_COUNT; idx++) {
+ if (dev->vf_dev[idx] &&
+ ((u8)dev->vf_dev[idx]->pmf_index == hmc_fn_id)) {
+ hmc_info = &dev->vf_dev[idx]->hmc_info;
+ break;
+ }
+ }
+ return hmc_info;
+}
+
+/**
+ * i40iw_hmc_finish_add_sd_reg - program sd entries for objects
+ * @dev: pointer to the device structure
+ * @info: create obj info
+ */
+static enum i40iw_status_code i40iw_hmc_finish_add_sd_reg(struct i40iw_sc_dev *dev,
+ struct i40iw_hmc_create_obj_info *info)
+{
+ if (info->start_idx >= info->hmc_info->hmc_obj[info->rsrc_type].cnt)
+ return I40IW_ERR_INVALID_HMC_OBJ_INDEX;
+
+ if ((info->start_idx + info->count) >
+ info->hmc_info->hmc_obj[info->rsrc_type].cnt)
+ return I40IW_ERR_INVALID_HMC_OBJ_COUNT;
+
+ if (!info->add_sd_cnt)
+ return 0;
+
+ return i40iw_hmc_sd_grp(dev, info->hmc_info,
+ info->hmc_info->sd_indexes[0],
+ info->add_sd_cnt, true);
+}
+
+/**
+ * i40iw_create_iw_hmc_obj - allocate backing store for hmc objects
+ * @dev: pointer to the device structure
+ * @info: pointer to i40iw_hmc_iw_create_obj_info struct
+ *
+ * This will allocate memory for PDs and backing pages and populate
+ * the sd and pd entries.
+ */
+enum i40iw_status_code i40iw_sc_create_hmc_obj(struct i40iw_sc_dev *dev,
+ struct i40iw_hmc_create_obj_info *info)
+{
+ struct i40iw_hmc_sd_entry *sd_entry;
+ u32 sd_idx, sd_lmt;
+ u32 pd_idx = 0, pd_lmt = 0;
+ u32 pd_idx1 = 0, pd_lmt1 = 0;
+ u32 i, j;
+ bool pd_error = false;
+ enum i40iw_status_code ret_code = 0;
+
+ if (info->start_idx >= info->hmc_info->hmc_obj[info->rsrc_type].cnt)
+ return I40IW_ERR_INVALID_HMC_OBJ_INDEX;
+
+ if ((info->start_idx + info->count) >
+ info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
+ i40iw_debug(dev, I40IW_DEBUG_HMC,
+ "%s: error type %u, start = %u, req cnt %u, cnt = %u\n",
+ __func__, info->rsrc_type, info->start_idx, info->count,
+ info->hmc_info->hmc_obj[info->rsrc_type].cnt);
+ return I40IW_ERR_INVALID_HMC_OBJ_COUNT;
+ }
+
+ if (!dev->is_pf)
+ return i40iw_vchnl_vf_add_hmc_objs(dev, info->rsrc_type, 0, info->count);
+
+ i40iw_find_sd_index_limit(info->hmc_info, info->rsrc_type,
+ info->start_idx, info->count,
+ &sd_idx, &sd_lmt);
+ if (sd_idx >= info->hmc_info->sd_table.sd_cnt ||
+ sd_lmt > info->hmc_info->sd_table.sd_cnt) {
+ return I40IW_ERR_INVALID_SD_INDEX;
+ }
+ i40iw_find_pd_index_limit(info->hmc_info, info->rsrc_type,
+ info->start_idx, info->count, &pd_idx, &pd_lmt);
+
+ for (j = sd_idx; j < sd_lmt; j++) {
+ ret_code = i40iw_add_sd_table_entry(dev->hw, info->hmc_info,
+ j,
+ info->entry_type,
+ I40IW_HMC_DIRECT_BP_SIZE);
+ if (ret_code)
+ goto exit_sd_error;
+ sd_entry = &info->hmc_info->sd_table.sd_entry[j];
+
+ if ((sd_entry->entry_type == I40IW_SD_TYPE_PAGED) &&
+ ((dev->hmc_info == info->hmc_info) &&
+ (info->rsrc_type != I40IW_HMC_IW_PBLE))) {
+ pd_idx1 = max(pd_idx, (j * I40IW_HMC_MAX_BP_COUNT));
+ pd_lmt1 = min(pd_lmt,
+ (j + 1) * I40IW_HMC_MAX_BP_COUNT);
+ for (i = pd_idx1; i < pd_lmt1; i++) {
+ /* update the pd table entry */
+ ret_code = i40iw_add_pd_table_entry(dev->hw, info->hmc_info,
+ i, NULL);
+ if (ret_code) {
+ pd_error = true;
+ break;
+ }
+ }
+ if (pd_error) {
+ while (i && (i > pd_idx1)) {
+ i40iw_remove_pd_bp(dev->hw, info->hmc_info, (i - 1),
+ info->is_pf);
+ i--;
+ }
+ }
+ }
+ if (sd_entry->valid)
+ continue;
+
+ info->hmc_info->sd_indexes[info->add_sd_cnt] = (u16)j;
+ info->add_sd_cnt++;
+ sd_entry->valid = true;
+ }
+ return i40iw_hmc_finish_add_sd_reg(dev, info);
+
+exit_sd_error:
+ while (j && (j > sd_idx)) {
+ sd_entry = &info->hmc_info->sd_table.sd_entry[j - 1];
+ switch (sd_entry->entry_type) {
+ case I40IW_SD_TYPE_PAGED:
+ pd_idx1 = max(pd_idx,
+ (j - 1) * I40IW_HMC_MAX_BP_COUNT);
+ pd_lmt1 = min(pd_lmt, (j * I40IW_HMC_MAX_BP_COUNT));
+ for (i = pd_idx1; i < pd_lmt1; i++)
+ i40iw_prep_remove_pd_page(info->hmc_info, i);
+ break;
+ case I40IW_SD_TYPE_DIRECT:
+ i40iw_prep_remove_pd_page(info->hmc_info, (j - 1));
+ break;
+ default:
+ ret_code = I40IW_ERR_INVALID_SD_TYPE;
+ break;
+ }
+ j--;
+ }
+
+ return ret_code;
+}
+
+/**
+ * i40iw_finish_del_sd_reg - delete sd entries for objects
+ * @dev: pointer to the device structure
+ * @info: dele obj info
+ * @reset: true if called before reset
+ */
+static enum i40iw_status_code i40iw_finish_del_sd_reg(struct i40iw_sc_dev *dev,
+ struct i40iw_hmc_del_obj_info *info,
+ bool reset)
+{
+ struct i40iw_hmc_sd_entry *sd_entry;
+ enum i40iw_status_code ret_code = 0;
+ u32 i, sd_idx;
+ struct i40iw_dma_mem *mem;
+
+ if (dev->is_pf && !reset)
+ ret_code = i40iw_hmc_sd_grp(dev, info->hmc_info,
+ info->hmc_info->sd_indexes[0],
+ info->del_sd_cnt, false);
+
+ if (ret_code)
+ i40iw_debug(dev, I40IW_DEBUG_HMC, "%s: error cqp sd sd_grp\n", __func__);
+
+ for (i = 0; i < info->del_sd_cnt; i++) {
+ sd_idx = info->hmc_info->sd_indexes[i];
+ sd_entry = &info->hmc_info->sd_table.sd_entry[sd_idx];
+ if (!sd_entry)
+ continue;
+ mem = (sd_entry->entry_type == I40IW_SD_TYPE_PAGED) ?
+ &sd_entry->u.pd_table.pd_page_addr :
+ &sd_entry->u.bp.addr;
+
+ if (!mem || !mem->va)
+ i40iw_debug(dev, I40IW_DEBUG_HMC, "%s: error cqp sd mem\n", __func__);
+ else
+ i40iw_free_dma_mem(dev->hw, mem);
+ }
+ return ret_code;
+}
+
+/**
+ * i40iw_del_iw_hmc_obj - remove pe hmc objects
+ * @dev: pointer to the device structure
+ * @info: pointer to i40iw_hmc_del_obj_info struct
+ * @reset: true if called before reset
+ *
+ * This will de-populate the SDs and PDs. It frees
+ * the memory for PDS and backing storage. After this function is returned,
+ * caller should deallocate memory allocated previously for
+ * book-keeping information about PDs and backing storage.
+ */
+enum i40iw_status_code i40iw_sc_del_hmc_obj(struct i40iw_sc_dev *dev,
+ struct i40iw_hmc_del_obj_info *info,
+ bool reset)
+{
+ struct i40iw_hmc_pd_table *pd_table;
+ u32 sd_idx, sd_lmt;
+ u32 pd_idx, pd_lmt, rel_pd_idx;
+ u32 i, j;
+ enum i40iw_status_code ret_code = 0;
+
+ if (info->start_idx >= info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
+ i40iw_debug(dev, I40IW_DEBUG_HMC,
+ "%s: error start_idx[%04d] >= [type %04d].cnt[%04d]\n",
+ __func__, info->start_idx, info->rsrc_type,
+ info->hmc_info->hmc_obj[info->rsrc_type].cnt);
+ return I40IW_ERR_INVALID_HMC_OBJ_INDEX;
+ }
+
+ if ((info->start_idx + info->count) >
+ info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
+ i40iw_debug(dev, I40IW_DEBUG_HMC,
+ "%s: error start_idx[%04d] + count %04d >= [type %04d].cnt[%04d]\n",
+ __func__, info->start_idx, info->count,
+ info->rsrc_type,
+ info->hmc_info->hmc_obj[info->rsrc_type].cnt);
+ return I40IW_ERR_INVALID_HMC_OBJ_COUNT;
+ }
+ if (!dev->is_pf) {
+ ret_code = i40iw_vchnl_vf_del_hmc_obj(dev, info->rsrc_type, 0,
+ info->count);
+ if (info->rsrc_type != I40IW_HMC_IW_PBLE)
+ return ret_code;
+ }
+
+ i40iw_find_pd_index_limit(info->hmc_info, info->rsrc_type,
+ info->start_idx, info->count, &pd_idx, &pd_lmt);
+
+ for (j = pd_idx; j < pd_lmt; j++) {
+ sd_idx = j / I40IW_HMC_PD_CNT_IN_SD;
+
+ if (info->hmc_info->sd_table.sd_entry[sd_idx].entry_type !=
+ I40IW_SD_TYPE_PAGED)
+ continue;
+
+ rel_pd_idx = j % I40IW_HMC_PD_CNT_IN_SD;
+ pd_table = &info->hmc_info->sd_table.sd_entry[sd_idx].u.pd_table;
+ if (pd_table->pd_entry[rel_pd_idx].valid) {
+ ret_code = i40iw_remove_pd_bp(dev->hw, info->hmc_info, j,
+ info->is_pf);
+ if (ret_code) {
+ i40iw_debug(dev, I40IW_DEBUG_HMC, "%s: error\n", __func__);
+ return ret_code;
+ }
+ }
+ }
+
+ i40iw_find_sd_index_limit(info->hmc_info, info->rsrc_type,
+ info->start_idx, info->count, &sd_idx, &sd_lmt);
+ if (sd_idx >= info->hmc_info->sd_table.sd_cnt ||
+ sd_lmt > info->hmc_info->sd_table.sd_cnt) {
+ i40iw_debug(dev, I40IW_DEBUG_HMC, "%s: error invalid sd_idx\n", __func__);
+ return I40IW_ERR_INVALID_SD_INDEX;
+ }
+
+ for (i = sd_idx; i < sd_lmt; i++) {
+ if (!info->hmc_info->sd_table.sd_entry[i].valid)
+ continue;
+ switch (info->hmc_info->sd_table.sd_entry[i].entry_type) {
+ case I40IW_SD_TYPE_DIRECT:
+ ret_code = i40iw_prep_remove_sd_bp(info->hmc_info, i);
+ if (!ret_code) {
+ info->hmc_info->sd_indexes[info->del_sd_cnt] = (u16)i;
+ info->del_sd_cnt++;
+ }
+ break;
+ case I40IW_SD_TYPE_PAGED:
+ ret_code = i40iw_prep_remove_pd_page(info->hmc_info, i);
+ if (!ret_code) {
+ info->hmc_info->sd_indexes[info->del_sd_cnt] = (u16)i;
+ info->del_sd_cnt++;
+ }
+ break;
+ default:
+ break;
+ }
+ }
+ return i40iw_finish_del_sd_reg(dev, info, reset);
+}
+
+/**
+ * i40iw_add_sd_table_entry - Adds a segment descriptor to the table
+ * @hw: pointer to our hw struct
+ * @hmc_info: pointer to the HMC configuration information struct
+ * @sd_index: segment descriptor index to manipulate
+ * @type: what type of segment descriptor we're manipulating
+ * @direct_mode_sz: size to alloc in direct mode
+ */
+enum i40iw_status_code i40iw_add_sd_table_entry(struct i40iw_hw *hw,
+ struct i40iw_hmc_info *hmc_info,
+ u32 sd_index,
+ enum i40iw_sd_entry_type type,
+ u64 direct_mode_sz)
+{
+ enum i40iw_status_code ret_code = 0;
+ struct i40iw_hmc_sd_entry *sd_entry;
+ bool dma_mem_alloc_done = false;
+ struct i40iw_dma_mem mem;
+ u64 alloc_len;
+
+ sd_entry = &hmc_info->sd_table.sd_entry[sd_index];
+ if (!sd_entry->valid) {
+ if (type == I40IW_SD_TYPE_PAGED)
+ alloc_len = I40IW_HMC_PAGED_BP_SIZE;
+ else
+ alloc_len = direct_mode_sz;
+
+ /* allocate a 4K pd page or 2M backing page */
+ ret_code = i40iw_allocate_dma_mem(hw, &mem, alloc_len,
+ I40IW_HMC_PD_BP_BUF_ALIGNMENT);
+ if (ret_code)
+ goto exit;
+ dma_mem_alloc_done = true;
+ if (type == I40IW_SD_TYPE_PAGED) {
+ ret_code = i40iw_allocate_virt_mem(hw,
+ &sd_entry->u.pd_table.pd_entry_virt_mem,
+ sizeof(struct i40iw_hmc_pd_entry) * 512);
+ if (ret_code)
+ goto exit;
+ sd_entry->u.pd_table.pd_entry = (struct i40iw_hmc_pd_entry *)
+ sd_entry->u.pd_table.pd_entry_virt_mem.va;
+
+ memcpy(&sd_entry->u.pd_table.pd_page_addr, &mem, sizeof(struct i40iw_dma_mem));
+ } else {
+ memcpy(&sd_entry->u.bp.addr, &mem, sizeof(struct i40iw_dma_mem));
+ sd_entry->u.bp.sd_pd_index = sd_index;
+ }
+
+ hmc_info->sd_table.sd_entry[sd_index].entry_type = type;
+
+ I40IW_INC_SD_REFCNT(&hmc_info->sd_table);
+ }
+ if (sd_entry->entry_type == I40IW_SD_TYPE_DIRECT)
+ I40IW_INC_BP_REFCNT(&sd_entry->u.bp);
+exit:
+ if (ret_code)
+ if (dma_mem_alloc_done)
+ i40iw_free_dma_mem(hw, &mem);
+
+ return ret_code;
+}
+
+/**
+ * i40iw_add_pd_table_entry - Adds page descriptor to the specified table
+ * @hw: pointer to our HW structure
+ * @hmc_info: pointer to the HMC configuration information structure
+ * @pd_index: which page descriptor index to manipulate
+ * @rsrc_pg: if not NULL, use preallocated page instead of allocating new one.
+ *
+ * This function:
+ * 1. Initializes the pd entry
+ * 2. Adds pd_entry in the pd_table
+ * 3. Mark the entry valid in i40iw_hmc_pd_entry structure
+ * 4. Initializes the pd_entry's ref count to 1
+ * assumptions:
+ * 1. The memory for pd should be pinned down, physically contiguous and
+ * aligned on 4K boundary and zeroed memory.
+ * 2. It should be 4K in size.
+ */
+enum i40iw_status_code i40iw_add_pd_table_entry(struct i40iw_hw *hw,
+ struct i40iw_hmc_info *hmc_info,
+ u32 pd_index,
+ struct i40iw_dma_mem *rsrc_pg)
+{
+ enum i40iw_status_code ret_code = 0;
+ struct i40iw_hmc_pd_table *pd_table;
+ struct i40iw_hmc_pd_entry *pd_entry;
+ struct i40iw_dma_mem mem;
+ struct i40iw_dma_mem *page = &mem;
+ u32 sd_idx, rel_pd_idx;
+ u64 *pd_addr;
+ u64 page_desc;
+
+ if (pd_index / I40IW_HMC_PD_CNT_IN_SD >= hmc_info->sd_table.sd_cnt)
+ return I40IW_ERR_INVALID_PAGE_DESC_INDEX;
+
+ sd_idx = (pd_index / I40IW_HMC_PD_CNT_IN_SD);
+ if (hmc_info->sd_table.sd_entry[sd_idx].entry_type != I40IW_SD_TYPE_PAGED)
+ return 0;
+
+ rel_pd_idx = (pd_index % I40IW_HMC_PD_CNT_IN_SD);
+ pd_table = &hmc_info->sd_table.sd_entry[sd_idx].u.pd_table;
+ pd_entry = &pd_table->pd_entry[rel_pd_idx];
+ if (!pd_entry->valid) {
+ if (rsrc_pg) {
+ pd_entry->rsrc_pg = true;
+ page = rsrc_pg;
+ } else {
+ ret_code = i40iw_allocate_dma_mem(hw, page,
+ I40IW_HMC_PAGED_BP_SIZE,
+ I40IW_HMC_PD_BP_BUF_ALIGNMENT);
+ if (ret_code)
+ return ret_code;
+ pd_entry->rsrc_pg = false;
+ }
+
+ memcpy(&pd_entry->bp.addr, page, sizeof(struct i40iw_dma_mem));
+ pd_entry->bp.sd_pd_index = pd_index;
+ pd_entry->bp.entry_type = I40IW_SD_TYPE_PAGED;
+ page_desc = page->pa | 0x1;
+
+ pd_addr = (u64 *)pd_table->pd_page_addr.va;
+ pd_addr += rel_pd_idx;
+
+ memcpy(pd_addr, &page_desc, sizeof(*pd_addr));
+
+ pd_entry->sd_index = sd_idx;
+ pd_entry->valid = true;
+ I40IW_INC_PD_REFCNT(pd_table);
+ if (hmc_info->hmc_fn_id < I40IW_FIRST_VF_FPM_ID)
+ I40IW_INVALIDATE_PF_HMC_PD(hw, sd_idx, rel_pd_idx);
+ else if (hw->hmc.hmc_fn_id != hmc_info->hmc_fn_id)
+ I40IW_INVALIDATE_VF_HMC_PD(hw, sd_idx, rel_pd_idx,
+ hmc_info->hmc_fn_id);
+ }
+ I40IW_INC_BP_REFCNT(&pd_entry->bp);
+
+ return 0;
+}
+
+/**
+ * i40iw_remove_pd_bp - remove a backing page from a page descriptor
+ * @hw: pointer to our HW structure
+ * @hmc_info: pointer to the HMC configuration information structure
+ * @idx: the page index
+ * @is_pf: distinguishes a VF from a PF
+ *
+ * This function:
+ * 1. Marks the entry in pd table (for paged address mode) or in sd table
+ * (for direct address mode) invalid.
+ * 2. Write to register PMPDINV to invalidate the backing page in FV cache
+ * 3. Decrement the ref count for the pd _entry
+ * assumptions:
+ * 1. Caller can deallocate the memory used by backing storage after this
+ * function returns.
+ */
+enum i40iw_status_code i40iw_remove_pd_bp(struct i40iw_hw *hw,
+ struct i40iw_hmc_info *hmc_info,
+ u32 idx,
+ bool is_pf)
+{
+ struct i40iw_hmc_pd_entry *pd_entry;
+ struct i40iw_hmc_pd_table *pd_table;
+ struct i40iw_hmc_sd_entry *sd_entry;
+ u32 sd_idx, rel_pd_idx;
+ struct i40iw_dma_mem *mem;
+ u64 *pd_addr;
+
+ sd_idx = idx / I40IW_HMC_PD_CNT_IN_SD;
+ rel_pd_idx = idx % I40IW_HMC_PD_CNT_IN_SD;
+ if (sd_idx >= hmc_info->sd_table.sd_cnt)
+ return I40IW_ERR_INVALID_PAGE_DESC_INDEX;
+
+ sd_entry = &hmc_info->sd_table.sd_entry[sd_idx];
+ if (sd_entry->entry_type != I40IW_SD_TYPE_PAGED)
+ return I40IW_ERR_INVALID_SD_TYPE;
+
+ pd_table = &hmc_info->sd_table.sd_entry[sd_idx].u.pd_table;
+ pd_entry = &pd_table->pd_entry[rel_pd_idx];
+ I40IW_DEC_BP_REFCNT(&pd_entry->bp);
+ if (pd_entry->bp.ref_cnt)
+ return 0;
+
+ pd_entry->valid = false;
+ I40IW_DEC_PD_REFCNT(pd_table);
+ pd_addr = (u64 *)pd_table->pd_page_addr.va;
+ pd_addr += rel_pd_idx;
+ memset(pd_addr, 0, sizeof(u64));
+ if (is_pf)
+ I40IW_INVALIDATE_PF_HMC_PD(hw, sd_idx, idx);
+ else
+ I40IW_INVALIDATE_VF_HMC_PD(hw, sd_idx, idx,
+ hmc_info->hmc_fn_id);
+
+ if (!pd_entry->rsrc_pg) {
+ mem = &pd_entry->bp.addr;
+ if (!mem || !mem->va)
+ return I40IW_ERR_PARAM;
+ i40iw_free_dma_mem(hw, mem);
+ }
+ if (!pd_table->ref_cnt)
+ i40iw_free_virt_mem(hw, &pd_table->pd_entry_virt_mem);
+
+ return 0;
+}
+
+/**
+ * i40iw_prep_remove_sd_bp - Prepares to remove a backing page from a sd entry
+ * @hmc_info: pointer to the HMC configuration information structure
+ * @idx: the page index
+ */
+enum i40iw_status_code i40iw_prep_remove_sd_bp(struct i40iw_hmc_info *hmc_info, u32 idx)
+{
+ struct i40iw_hmc_sd_entry *sd_entry;
+
+ sd_entry = &hmc_info->sd_table.sd_entry[idx];
+ I40IW_DEC_BP_REFCNT(&sd_entry->u.bp);
+ if (sd_entry->u.bp.ref_cnt)
+ return I40IW_ERR_NOT_READY;
+
+ I40IW_DEC_SD_REFCNT(&hmc_info->sd_table);
+ sd_entry->valid = false;
+
+ return 0;
+}
+
+/**
+ * i40iw_prep_remove_pd_page - Prepares to remove a PD page from sd entry.
+ * @hmc_info: pointer to the HMC configuration information structure
+ * @idx: segment descriptor index to find the relevant page descriptor
+ */
+enum i40iw_status_code i40iw_prep_remove_pd_page(struct i40iw_hmc_info *hmc_info,
+ u32 idx)
+{
+ struct i40iw_hmc_sd_entry *sd_entry;
+
+ sd_entry = &hmc_info->sd_table.sd_entry[idx];
+
+ if (sd_entry->u.pd_table.ref_cnt)
+ return I40IW_ERR_NOT_READY;
+
+ sd_entry->valid = false;
+ I40IW_DEC_SD_REFCNT(&hmc_info->sd_table);
+
+ return 0;
+}
+
+/**
+ * i40iw_pf_init_vfhmc -
+ * @vf_cnt_array: array of cnt values of iwarp hmc objects
+ * @vf_hmc_fn_id: hmc function id ofr vf driver
+ * @dev: pointer to i40iw_dev struct
+ *
+ * Called by pf driver to initialize hmc_info for vf driver instance.
+ */
+enum i40iw_status_code i40iw_pf_init_vfhmc(struct i40iw_sc_dev *dev,
+ u8 vf_hmc_fn_id,
+ u32 *vf_cnt_array)
+{
+ struct i40iw_hmc_info *hmc_info;
+ enum i40iw_status_code ret_code = 0;
+ u32 i;
+
+ if ((vf_hmc_fn_id < I40IW_FIRST_VF_FPM_ID) ||
+ (vf_hmc_fn_id >= I40IW_FIRST_VF_FPM_ID +
+ I40IW_MAX_PE_ENABLED_VF_COUNT)) {
+ i40iw_debug(dev, I40IW_DEBUG_HMC, "%s: invalid vf_hmc_fn_id 0x%x\n",
+ __func__, vf_hmc_fn_id);
+ return I40IW_ERR_INVALID_HMCFN_ID;
+ }
+
+ ret_code = i40iw_sc_init_iw_hmc(dev, vf_hmc_fn_id);
+ if (ret_code)
+ return ret_code;
+
+ hmc_info = i40iw_vf_hmcinfo_from_fpm(dev, vf_hmc_fn_id);
+
+ for (i = I40IW_HMC_IW_QP; i < I40IW_HMC_IW_MAX; i++)
+ if (vf_cnt_array)
+ hmc_info->hmc_obj[i].cnt =
+ vf_cnt_array[i - I40IW_HMC_IW_QP];
+ else
+ hmc_info->hmc_obj[i].cnt = hmc_info->hmc_obj[i].max_cnt;
+
+ return 0;
+}
diff --git a/drivers/infiniband/hw/i40iw/i40iw_hmc.h b/drivers/infiniband/hw/i40iw/i40iw_hmc.h
new file mode 100644
index 000000000000..4c3fdd875621
--- /dev/null
+++ b/drivers/infiniband/hw/i40iw/i40iw_hmc.h
@@ -0,0 +1,241 @@
+/*******************************************************************************
+*
+* Copyright (c) 2015-2016 Intel Corporation. All rights reserved.
+*
+* This software is available to you under a choice of one of two
+* licenses. You may choose to be licensed under the terms of the GNU
+* General Public License (GPL) Version 2, available from the file
+* COPYING in the main directory of this source tree, or the
+* OpenFabrics.org BSD license below:
+*
+* Redistribution and use in source and binary forms, with or
+* without modification, are permitted provided that the following
+* conditions are met:
+*
+* - Redistributions of source code must retain the above
+* copyright notice, this list of conditions and the following
+* disclaimer.
+*
+* - Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials
+* provided with the distribution.
+*
+* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+* SOFTWARE.
+*
+*******************************************************************************/
+
+#ifndef I40IW_HMC_H
+#define I40IW_HMC_H
+
+#include "i40iw_d.h"
+
+struct i40iw_hw;
+enum i40iw_status_code;
+
+#define I40IW_HMC_MAX_BP_COUNT 512
+#define I40IW_MAX_SD_ENTRIES 11
+#define I40IW_HW_DBG_HMC_INVALID_BP_MARK 0xCA
+
+#define I40IW_HMC_INFO_SIGNATURE 0x484D5347
+#define I40IW_HMC_PD_CNT_IN_SD 512
+#define I40IW_HMC_DIRECT_BP_SIZE 0x200000
+#define I40IW_HMC_MAX_SD_COUNT 4096
+#define I40IW_HMC_PAGED_BP_SIZE 4096
+#define I40IW_HMC_PD_BP_BUF_ALIGNMENT 4096
+#define I40IW_FIRST_VF_FPM_ID 16
+#define FPM_MULTIPLIER 1024
+
+#define I40IW_INC_SD_REFCNT(sd_table) ((sd_table)->ref_cnt++)
+#define I40IW_INC_PD_REFCNT(pd_table) ((pd_table)->ref_cnt++)
+#define I40IW_INC_BP_REFCNT(bp) ((bp)->ref_cnt++)
+
+#define I40IW_DEC_SD_REFCNT(sd_table) ((sd_table)->ref_cnt--)
+#define I40IW_DEC_PD_REFCNT(pd_table) ((pd_table)->ref_cnt--)
+#define I40IW_DEC_BP_REFCNT(bp) ((bp)->ref_cnt--)
+
+/**
+ * I40IW_INVALIDATE_PF_HMC_PD - Invalidates the pd cache in the hardware
+ * @hw: pointer to our hw struct
+ * @sd_idx: segment descriptor index
+ * @pd_idx: page descriptor index
+ */
+#define I40IW_INVALIDATE_PF_HMC_PD(hw, sd_idx, pd_idx) \
+ i40iw_wr32((hw), I40E_PFHMC_PDINV, \
+ (((sd_idx) << I40E_PFHMC_PDINV_PMSDIDX_SHIFT) | \
+ (0x1 << I40E_PFHMC_PDINV_PMSDPARTSEL_SHIFT) | \
+ ((pd_idx) << I40E_PFHMC_PDINV_PMPDIDX_SHIFT)))
+
+/**
+ * I40IW_INVALIDATE_VF_HMC_PD - Invalidates the pd cache in the hardware
+ * @hw: pointer to our hw struct
+ * @sd_idx: segment descriptor index
+ * @pd_idx: page descriptor index
+ * @hmc_fn_id: VF's function id
+ */
+#define I40IW_INVALIDATE_VF_HMC_PD(hw, sd_idx, pd_idx, hmc_fn_id) \
+ i40iw_wr32(hw, I40E_GLHMC_VFPDINV(hmc_fn_id - I40IW_FIRST_VF_FPM_ID), \
+ ((sd_idx << I40E_PFHMC_PDINV_PMSDIDX_SHIFT) | \
+ (pd_idx << I40E_PFHMC_PDINV_PMPDIDX_SHIFT)))
+
+struct i40iw_hmc_obj_info {
+ u64 base;
+ u32 max_cnt;
+ u32 cnt;
+ u64 size;
+};
+
+enum i40iw_sd_entry_type {
+ I40IW_SD_TYPE_INVALID = 0,
+ I40IW_SD_TYPE_PAGED = 1,
+ I40IW_SD_TYPE_DIRECT = 2
+};
+
+struct i40iw_hmc_bp {
+ enum i40iw_sd_entry_type entry_type;
+ struct i40iw_dma_mem addr;
+ u32 sd_pd_index;
+ u32 ref_cnt;
+};
+
+struct i40iw_hmc_pd_entry {
+ struct i40iw_hmc_bp bp;
+ u32 sd_index;
+ bool rsrc_pg;
+ bool valid;
+};
+
+struct i40iw_hmc_pd_table {
+ struct i40iw_dma_mem pd_page_addr;
+ struct i40iw_hmc_pd_entry *pd_entry;
+ struct i40iw_virt_mem pd_entry_virt_mem;
+ u32 ref_cnt;
+ u32 sd_index;
+};
+
+struct i40iw_hmc_sd_entry {
+ enum i40iw_sd_entry_type entry_type;
+ bool valid;
+
+ union {
+ struct i40iw_hmc_pd_table pd_table;
+ struct i40iw_hmc_bp bp;
+ } u;
+};
+
+struct i40iw_hmc_sd_table {
+ struct i40iw_virt_mem addr;
+ u32 sd_cnt;
+ u32 ref_cnt;
+ struct i40iw_hmc_sd_entry *sd_entry;
+};
+
+struct i40iw_hmc_info {
+ u32 signature;
+ u8 hmc_fn_id;
+ u16 first_sd_index;
+
+ struct i40iw_hmc_obj_info *hmc_obj;
+ struct i40iw_virt_mem hmc_obj_virt_mem;
+ struct i40iw_hmc_sd_table sd_table;
+ u16 sd_indexes[I40IW_HMC_MAX_SD_COUNT];
+};
+
+struct update_sd_entry {
+ u64 cmd;
+ u64 data;
+};
+
+struct i40iw_update_sds_info {
+ u32 cnt;
+ u8 hmc_fn_id;
+ struct update_sd_entry entry[I40IW_MAX_SD_ENTRIES];
+};
+
+struct i40iw_ccq_cqe_info;
+struct i40iw_hmc_fcn_info {
+ void (*callback_fcn)(struct i40iw_sc_dev *, void *,
+ struct i40iw_ccq_cqe_info *);
+ void *cqp_callback_param;
+ u32 vf_id;
+ u16 iw_vf_idx;
+ bool free_fcn;
+};
+
+enum i40iw_hmc_rsrc_type {
+ I40IW_HMC_IW_QP = 0,
+ I40IW_HMC_IW_CQ = 1,
+ I40IW_HMC_IW_SRQ = 2,
+ I40IW_HMC_IW_HTE = 3,
+ I40IW_HMC_IW_ARP = 4,
+ I40IW_HMC_IW_APBVT_ENTRY = 5,
+ I40IW_HMC_IW_MR = 6,
+ I40IW_HMC_IW_XF = 7,
+ I40IW_HMC_IW_XFFL = 8,
+ I40IW_HMC_IW_Q1 = 9,
+ I40IW_HMC_IW_Q1FL = 10,
+ I40IW_HMC_IW_TIMER = 11,
+ I40IW_HMC_IW_FSIMC = 12,
+ I40IW_HMC_IW_FSIAV = 13,
+ I40IW_HMC_IW_PBLE = 14,
+ I40IW_HMC_IW_MAX = 15,
+};
+
+struct i40iw_hmc_create_obj_info {
+ struct i40iw_hmc_info *hmc_info;
+ struct i40iw_virt_mem add_sd_virt_mem;
+ u32 rsrc_type;
+ u32 start_idx;
+ u32 count;
+ u32 add_sd_cnt;
+ enum i40iw_sd_entry_type entry_type;
+ bool is_pf;
+};
+
+struct i40iw_hmc_del_obj_info {
+ struct i40iw_hmc_info *hmc_info;
+ struct i40iw_virt_mem del_sd_virt_mem;
+ u32 rsrc_type;
+ u32 start_idx;
+ u32 count;
+ u32 del_sd_cnt;
+ bool is_pf;
+};
+
+enum i40iw_status_code i40iw_copy_dma_mem(struct i40iw_hw *hw, void *dest_buf,
+ struct i40iw_dma_mem *src_mem, u64 src_offset, u64 size);
+enum i40iw_status_code i40iw_sc_create_hmc_obj(struct i40iw_sc_dev *dev,
+ struct i40iw_hmc_create_obj_info *info);
+enum i40iw_status_code i40iw_sc_del_hmc_obj(struct i40iw_sc_dev *dev,
+ struct i40iw_hmc_del_obj_info *info,
+ bool reset);
+enum i40iw_status_code i40iw_hmc_sd_one(struct i40iw_sc_dev *dev, u8 hmc_fn_id,
+ u64 pa, u32 sd_idx, enum i40iw_sd_entry_type type,
+ bool setsd);
+enum i40iw_status_code i40iw_update_sds_noccq(struct i40iw_sc_dev *dev,
+ struct i40iw_update_sds_info *info);
+struct i40iw_vfdev *i40iw_vfdev_from_fpm(struct i40iw_sc_dev *dev, u8 hmc_fn_id);
+struct i40iw_hmc_info *i40iw_vf_hmcinfo_from_fpm(struct i40iw_sc_dev *dev,
+ u8 hmc_fn_id);
+enum i40iw_status_code i40iw_add_sd_table_entry(struct i40iw_hw *hw,
+ struct i40iw_hmc_info *hmc_info, u32 sd_index,
+ enum i40iw_sd_entry_type type, u64 direct_mode_sz);
+enum i40iw_status_code i40iw_add_pd_table_entry(struct i40iw_hw *hw,
+ struct i40iw_hmc_info *hmc_info, u32 pd_index,
+ struct i40iw_dma_mem *rsrc_pg);
+enum i40iw_status_code i40iw_remove_pd_bp(struct i40iw_hw *hw,
+ struct i40iw_hmc_info *hmc_info, u32 idx, bool is_pf);
+enum i40iw_status_code i40iw_prep_remove_sd_bp(struct i40iw_hmc_info *hmc_info, u32 idx);
+enum i40iw_status_code i40iw_prep_remove_pd_page(struct i40iw_hmc_info *hmc_info, u32 idx);
+
+#define ENTER_SHARED_FUNCTION()
+#define EXIT_SHARED_FUNCTION()
+
+#endif /* I40IW_HMC_H */
diff --git a/drivers/infiniband/hw/i40iw/i40iw_hw.c b/drivers/infiniband/hw/i40iw/i40iw_hw.c
new file mode 100644
index 000000000000..9fd302425563
--- /dev/null
+++ b/drivers/infiniband/hw/i40iw/i40iw_hw.c
@@ -0,0 +1,730 @@
+/*******************************************************************************
+*
+* Copyright (c) 2015-2016 Intel Corporation. All rights reserved.
+*
+* This software is available to you under a choice of one of two
+* licenses. You may choose to be licensed under the terms of the GNU
+* General Public License (GPL) Version 2, available from the file
+* COPYING in the main directory of this source tree, or the
+* OpenFabrics.org BSD license below:
+*
+* Redistribution and use in source and binary forms, with or
+* without modification, are permitted provided that the following
+* conditions are met:
+*
+* - Redistributions of source code must retain the above
+* copyright notice, this list of conditions and the following
+* disclaimer.
+*
+* - Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials
+* provided with the distribution.
+*
+* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+* SOFTWARE.
+*
+*******************************************************************************/
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/if_vlan.h>
+
+#include "i40iw.h"
+
+/**
+ * i40iw_initialize_hw_resources - initialize hw resource during open
+ * @iwdev: iwarp device
+ */
+u32 i40iw_initialize_hw_resources(struct i40iw_device *iwdev)
+{
+ unsigned long num_pds;
+ u32 resources_size;
+ u32 max_mr;
+ u32 max_qp;
+ u32 max_cq;
+ u32 arp_table_size;
+ u32 mrdrvbits;
+ void *resource_ptr;
+
+ max_qp = iwdev->sc_dev.hmc_info->hmc_obj[I40IW_HMC_IW_QP].cnt;
+ max_cq = iwdev->sc_dev.hmc_info->hmc_obj[I40IW_HMC_IW_CQ].cnt;
+ max_mr = iwdev->sc_dev.hmc_info->hmc_obj[I40IW_HMC_IW_MR].cnt;
+ arp_table_size = iwdev->sc_dev.hmc_info->hmc_obj[I40IW_HMC_IW_ARP].cnt;
+ iwdev->max_cqe = 0xFFFFF;
+ num_pds = max_qp * 4;
+ resources_size = sizeof(struct i40iw_arp_entry) * arp_table_size;
+ resources_size += sizeof(unsigned long) * BITS_TO_LONGS(max_qp);
+ resources_size += sizeof(unsigned long) * BITS_TO_LONGS(max_mr);
+ resources_size += sizeof(unsigned long) * BITS_TO_LONGS(max_cq);
+ resources_size += sizeof(unsigned long) * BITS_TO_LONGS(num_pds);
+ resources_size += sizeof(unsigned long) * BITS_TO_LONGS(arp_table_size);
+ resources_size += sizeof(struct i40iw_qp **) * max_qp;
+ iwdev->mem_resources = kzalloc(resources_size, GFP_KERNEL);
+
+ if (!iwdev->mem_resources)
+ return -ENOMEM;
+
+ iwdev->max_qp = max_qp;
+ iwdev->max_mr = max_mr;
+ iwdev->max_cq = max_cq;
+ iwdev->max_pd = num_pds;
+ iwdev->arp_table_size = arp_table_size;
+ iwdev->arp_table = (struct i40iw_arp_entry *)iwdev->mem_resources;
+ resource_ptr = iwdev->mem_resources + (sizeof(struct i40iw_arp_entry) * arp_table_size);
+
+ iwdev->device_cap_flags = IB_DEVICE_LOCAL_DMA_LKEY |
+ IB_DEVICE_MEM_WINDOW | IB_DEVICE_MEM_MGT_EXTENSIONS;
+
+ iwdev->allocated_qps = resource_ptr;
+ iwdev->allocated_cqs = &iwdev->allocated_qps[BITS_TO_LONGS(max_qp)];
+ iwdev->allocated_mrs = &iwdev->allocated_cqs[BITS_TO_LONGS(max_cq)];
+ iwdev->allocated_pds = &iwdev->allocated_mrs[BITS_TO_LONGS(max_mr)];
+ iwdev->allocated_arps = &iwdev->allocated_pds[BITS_TO_LONGS(num_pds)];
+ iwdev->qp_table = (struct i40iw_qp **)(&iwdev->allocated_arps[BITS_TO_LONGS(arp_table_size)]);
+ set_bit(0, iwdev->allocated_mrs);
+ set_bit(0, iwdev->allocated_qps);
+ set_bit(0, iwdev->allocated_cqs);
+ set_bit(0, iwdev->allocated_pds);
+ set_bit(0, iwdev->allocated_arps);
+
+ /* Following for ILQ/IEQ */
+ set_bit(1, iwdev->allocated_qps);
+ set_bit(1, iwdev->allocated_cqs);
+ set_bit(1, iwdev->allocated_pds);
+ set_bit(2, iwdev->allocated_cqs);
+ set_bit(2, iwdev->allocated_pds);
+
+ spin_lock_init(&iwdev->resource_lock);
+ mrdrvbits = 24 - get_count_order(iwdev->max_mr);
+ iwdev->mr_stagmask = ~(((1 << mrdrvbits) - 1) << (32 - mrdrvbits));
+ return 0;
+}
+
+/**
+ * i40iw_cqp_ce_handler - handle cqp completions
+ * @iwdev: iwarp device
+ * @arm: flag to arm after completions
+ * @cq: cq for cqp completions
+ */
+static void i40iw_cqp_ce_handler(struct i40iw_device *iwdev, struct i40iw_sc_cq *cq, bool arm)
+{
+ struct i40iw_cqp_request *cqp_request;
+ struct i40iw_sc_dev *dev = &iwdev->sc_dev;
+ u32 cqe_count = 0;
+ struct i40iw_ccq_cqe_info info;
+ int ret;
+
+ do {
+ memset(&info, 0, sizeof(info));
+ ret = dev->ccq_ops->ccq_get_cqe_info(cq, &info);
+ if (ret)
+ break;
+ cqp_request = (struct i40iw_cqp_request *)(unsigned long)info.scratch;
+ if (info.error)
+ i40iw_pr_err("opcode = 0x%x maj_err_code = 0x%x min_err_code = 0x%x\n",
+ info.op_code, info.maj_err_code, info.min_err_code);
+ if (cqp_request) {
+ cqp_request->compl_info.maj_err_code = info.maj_err_code;
+ cqp_request->compl_info.min_err_code = info.min_err_code;
+ cqp_request->compl_info.op_ret_val = info.op_ret_val;
+ cqp_request->compl_info.error = info.error;
+
+ if (cqp_request->waiting) {
+ cqp_request->request_done = true;
+ wake_up(&cqp_request->waitq);
+ i40iw_put_cqp_request(&iwdev->cqp, cqp_request);
+ } else {
+ if (cqp_request->callback_fcn)
+ cqp_request->callback_fcn(cqp_request, 1);
+ i40iw_put_cqp_request(&iwdev->cqp, cqp_request);
+ }
+ }
+
+ cqe_count++;
+ } while (1);
+
+ if (arm && cqe_count) {
+ i40iw_process_bh(dev);
+ dev->ccq_ops->ccq_arm(cq);
+ }
+}
+
+/**
+ * i40iw_iwarp_ce_handler - handle iwarp completions
+ * @iwdev: iwarp device
+ * @iwcp: iwarp cq receiving event
+ */
+static void i40iw_iwarp_ce_handler(struct i40iw_device *iwdev,
+ struct i40iw_sc_cq *iwcq)
+{
+ struct i40iw_cq *i40iwcq = iwcq->back_cq;
+
+ if (i40iwcq->ibcq.comp_handler)
+ i40iwcq->ibcq.comp_handler(&i40iwcq->ibcq,
+ i40iwcq->ibcq.cq_context);
+}
+
+/**
+ * i40iw_puda_ce_handler - handle puda completion events
+ * @iwdev: iwarp device
+ * @cq: puda completion q for event
+ */
+static void i40iw_puda_ce_handler(struct i40iw_device *iwdev,
+ struct i40iw_sc_cq *cq)
+{
+ struct i40iw_sc_dev *dev = (struct i40iw_sc_dev *)&iwdev->sc_dev;
+ enum i40iw_status_code status;
+ u32 compl_error;
+
+ do {
+ status = i40iw_puda_poll_completion(dev, cq, &compl_error);
+ if (status == I40IW_ERR_QUEUE_EMPTY)
+ break;
+ if (status) {
+ i40iw_pr_err("puda status = %d\n", status);
+ break;
+ }
+ if (compl_error) {
+ i40iw_pr_err("puda compl_err =0x%x\n", compl_error);
+ break;
+ }
+ } while (1);
+
+ dev->ccq_ops->ccq_arm(cq);
+}
+
+/**
+ * i40iw_process_ceq - handle ceq for completions
+ * @iwdev: iwarp device
+ * @ceq: ceq having cq for completion
+ */
+void i40iw_process_ceq(struct i40iw_device *iwdev, struct i40iw_ceq *ceq)
+{
+ struct i40iw_sc_dev *dev = &iwdev->sc_dev;
+ struct i40iw_sc_ceq *sc_ceq;
+ struct i40iw_sc_cq *cq;
+ bool arm = true;
+
+ sc_ceq = &ceq->sc_ceq;
+ do {
+ cq = dev->ceq_ops->process_ceq(dev, sc_ceq);
+ if (!cq)
+ break;
+
+ if (cq->cq_type == I40IW_CQ_TYPE_CQP)
+ i40iw_cqp_ce_handler(iwdev, cq, arm);
+ else if (cq->cq_type == I40IW_CQ_TYPE_IWARP)
+ i40iw_iwarp_ce_handler(iwdev, cq);
+ else if ((cq->cq_type == I40IW_CQ_TYPE_ILQ) ||
+ (cq->cq_type == I40IW_CQ_TYPE_IEQ))
+ i40iw_puda_ce_handler(iwdev, cq);
+ } while (1);
+}
+
+/**
+ * i40iw_next_iw_state - modify qp state
+ * @iwqp: iwarp qp to modify
+ * @state: next state for qp
+ * @del_hash: del hash
+ * @term: term message
+ * @termlen: length of term message
+ */
+void i40iw_next_iw_state(struct i40iw_qp *iwqp,
+ u8 state,
+ u8 del_hash,
+ u8 term,
+ u8 termlen)
+{
+ struct i40iw_modify_qp_info info;
+
+ memset(&info, 0, sizeof(info));
+ info.next_iwarp_state = state;
+ info.remove_hash_idx = del_hash;
+ info.cq_num_valid = true;
+ info.arp_cache_idx_valid = true;
+ info.dont_send_term = true;
+ info.dont_send_fin = true;
+ info.termlen = termlen;
+
+ if (term & I40IWQP_TERM_SEND_TERM_ONLY)
+ info.dont_send_term = false;
+ if (term & I40IWQP_TERM_SEND_FIN_ONLY)
+ info.dont_send_fin = false;
+ if (iwqp->sc_qp.term_flags && (state == I40IW_QP_STATE_ERROR))
+ info.reset_tcp_conn = true;
+ i40iw_hw_modify_qp(iwqp->iwdev, iwqp, &info, 0);
+}
+
+/**
+ * i40iw_process_aeq - handle aeq events
+ * @iwdev: iwarp device
+ */
+void i40iw_process_aeq(struct i40iw_device *iwdev)
+{
+ struct i40iw_sc_dev *dev = &iwdev->sc_dev;
+ struct i40iw_aeq *aeq = &iwdev->aeq;
+ struct i40iw_sc_aeq *sc_aeq = &aeq->sc_aeq;
+ struct i40iw_aeqe_info aeinfo;
+ struct i40iw_aeqe_info *info = &aeinfo;
+ int ret;
+ struct i40iw_qp *iwqp = NULL;
+ struct i40iw_sc_cq *cq = NULL;
+ struct i40iw_cq *iwcq = NULL;
+ struct i40iw_sc_qp *qp = NULL;
+ struct i40iw_qp_host_ctx_info *ctx_info = NULL;
+ unsigned long flags;
+
+ u32 aeqcnt = 0;
+
+ if (!sc_aeq->size)
+ return;
+
+ do {
+ memset(info, 0, sizeof(*info));
+ ret = dev->aeq_ops->get_next_aeqe(sc_aeq, info);
+ if (ret)
+ break;
+
+ aeqcnt++;
+ i40iw_debug(dev, I40IW_DEBUG_AEQ,
+ "%s ae_id = 0x%x bool qp=%d qp_id = %d\n",
+ __func__, info->ae_id, info->qp, info->qp_cq_id);
+ if (info->qp) {
+ iwqp = iwdev->qp_table[info->qp_cq_id];
+ if (!iwqp) {
+ i40iw_pr_err("qp_id %d is already freed\n", info->qp_cq_id);
+ continue;
+ }
+ qp = &iwqp->sc_qp;
+ spin_lock_irqsave(&iwqp->lock, flags);
+ iwqp->hw_tcp_state = info->tcp_state;
+ iwqp->hw_iwarp_state = info->iwarp_state;
+ iwqp->last_aeq = info->ae_id;
+ spin_unlock_irqrestore(&iwqp->lock, flags);
+ ctx_info = &iwqp->ctx_info;
+ ctx_info->err_rq_idx_valid = true;
+ } else {
+ if (info->ae_id != I40IW_AE_CQ_OPERATION_ERROR)
+ continue;
+ }
+
+ switch (info->ae_id) {
+ case I40IW_AE_LLP_FIN_RECEIVED:
+ if (qp->term_flags)
+ continue;
+ if (atomic_inc_return(&iwqp->close_timer_started) == 1) {
+ iwqp->hw_tcp_state = I40IW_TCP_STATE_CLOSE_WAIT;
+ if ((iwqp->hw_tcp_state == I40IW_TCP_STATE_CLOSE_WAIT) &&
+ (iwqp->ibqp_state == IB_QPS_RTS)) {
+ i40iw_next_iw_state(iwqp,
+ I40IW_QP_STATE_CLOSING, 0, 0, 0);
+ i40iw_cm_disconn(iwqp);
+ }
+ iwqp->cm_id->add_ref(iwqp->cm_id);
+ i40iw_schedule_cm_timer(iwqp->cm_node,
+ (struct i40iw_puda_buf *)iwqp,
+ I40IW_TIMER_TYPE_CLOSE, 1, 0);
+ }
+ break;
+ case I40IW_AE_LLP_CLOSE_COMPLETE:
+ if (qp->term_flags)
+ i40iw_terminate_done(qp, 0);
+ else
+ i40iw_cm_disconn(iwqp);
+ break;
+ case I40IW_AE_RESET_SENT:
+ i40iw_next_iw_state(iwqp, I40IW_QP_STATE_ERROR, 1, 0, 0);
+ i40iw_cm_disconn(iwqp);
+ break;
+ case I40IW_AE_LLP_CONNECTION_RESET:
+ if (atomic_read(&iwqp->close_timer_started))
+ continue;
+ i40iw_cm_disconn(iwqp);
+ break;
+ case I40IW_AE_TERMINATE_SENT:
+ i40iw_terminate_send_fin(qp);
+ break;
+ case I40IW_AE_LLP_TERMINATE_RECEIVED:
+ i40iw_terminate_received(qp, info);
+ break;
+ case I40IW_AE_CQ_OPERATION_ERROR:
+ i40iw_pr_err("Processing an iWARP related AE for CQ misc = 0x%04X\n",
+ info->ae_id);
+ cq = (struct i40iw_sc_cq *)(unsigned long)info->compl_ctx;
+ iwcq = (struct i40iw_cq *)cq->back_cq;
+
+ if (iwcq->ibcq.event_handler) {
+ struct ib_event ibevent;
+
+ ibevent.device = iwcq->ibcq.device;
+ ibevent.event = IB_EVENT_CQ_ERR;
+ ibevent.element.cq = &iwcq->ibcq;
+ iwcq->ibcq.event_handler(&ibevent, iwcq->ibcq.cq_context);
+ }
+ break;
+ case I40IW_AE_PRIV_OPERATION_DENIED:
+ case I40IW_AE_STAG_ZERO_INVALID:
+ case I40IW_AE_IB_RREQ_AND_Q1_FULL:
+ case I40IW_AE_DDP_UBE_INVALID_DDP_VERSION:
+ case I40IW_AE_DDP_UBE_INVALID_MO:
+ case I40IW_AE_DDP_UBE_INVALID_QN:
+ case I40IW_AE_DDP_NO_L_BIT:
+ case I40IW_AE_RDMAP_ROE_INVALID_RDMAP_VERSION:
+ case I40IW_AE_RDMAP_ROE_UNEXPECTED_OPCODE:
+ case I40IW_AE_ROE_INVALID_RDMA_READ_REQUEST:
+ case I40IW_AE_ROE_INVALID_RDMA_WRITE_OR_READ_RESP:
+ case I40IW_AE_INVALID_ARP_ENTRY:
+ case I40IW_AE_INVALID_TCP_OPTION_RCVD:
+ case I40IW_AE_STALE_ARP_ENTRY:
+ case I40IW_AE_LLP_RECEIVED_MPA_CRC_ERROR:
+ case I40IW_AE_LLP_SEGMENT_TOO_SMALL:
+ case I40IW_AE_LLP_SYN_RECEIVED:
+ case I40IW_AE_LLP_TOO_MANY_RETRIES:
+ case I40IW_AE_LLP_DOUBT_REACHABILITY:
+ case I40IW_AE_LCE_QP_CATASTROPHIC:
+ case I40IW_AE_LCE_FUNCTION_CATASTROPHIC:
+ case I40IW_AE_LCE_CQ_CATASTROPHIC:
+ case I40IW_AE_UDA_XMIT_DGRAM_TOO_LONG:
+ case I40IW_AE_UDA_XMIT_IPADDR_MISMATCH:
+ case I40IW_AE_QP_SUSPEND_COMPLETE:
+ ctx_info->err_rq_idx_valid = false;
+ default:
+ if (!info->sq && ctx_info->err_rq_idx_valid) {
+ ctx_info->err_rq_idx = info->wqe_idx;
+ ctx_info->tcp_info_valid = false;
+ ctx_info->iwarp_info_valid = false;
+ ret = dev->iw_priv_qp_ops->qp_setctx(&iwqp->sc_qp,
+ iwqp->host_ctx.va,
+ ctx_info);
+ }
+ i40iw_terminate_connection(qp, info);
+ break;
+ }
+ } while (1);
+
+ if (aeqcnt)
+ dev->aeq_ops->repost_aeq_entries(dev, aeqcnt);
+}
+
+/**
+ * i40iw_manage_apbvt - add or delete tcp port
+ * @iwdev: iwarp device
+ * @accel_local_port: port for apbvt
+ * @add_port: add or delete port
+ */
+int i40iw_manage_apbvt(struct i40iw_device *iwdev, u16 accel_local_port, bool add_port)
+{
+ struct i40iw_apbvt_info *info;
+ enum i40iw_status_code status;
+ struct i40iw_cqp_request *cqp_request;
+ struct cqp_commands_info *cqp_info;
+
+ cqp_request = i40iw_get_cqp_request(&iwdev->cqp, add_port);
+ if (!cqp_request)
+ return -ENOMEM;
+
+ cqp_info = &cqp_request->info;
+ info = &cqp_info->in.u.manage_apbvt_entry.info;
+
+ memset(info, 0, sizeof(*info));
+ info->add = add_port;
+ info->port = cpu_to_le16(accel_local_port);
+
+ cqp_info->cqp_cmd = OP_MANAGE_APBVT_ENTRY;
+ cqp_info->post_sq = 1;
+ cqp_info->in.u.manage_apbvt_entry.cqp = &iwdev->cqp.sc_cqp;
+ cqp_info->in.u.manage_apbvt_entry.scratch = (uintptr_t)cqp_request;
+ status = i40iw_handle_cqp_op(iwdev, cqp_request);
+ if (status)
+ i40iw_pr_err("CQP-OP Manage APBVT entry fail");
+ return status;
+}
+
+/**
+ * i40iw_manage_arp_cache - manage hw arp cache
+ * @iwdev: iwarp device
+ * @mac_addr: mac address ptr
+ * @ip_addr: ip addr for arp cache
+ * @action: add, delete or modify
+ */
+void i40iw_manage_arp_cache(struct i40iw_device *iwdev,
+ unsigned char *mac_addr,
+ __be32 *ip_addr,
+ bool ipv4,
+ u32 action)
+{
+ struct i40iw_add_arp_cache_entry_info *info;
+ struct i40iw_cqp_request *cqp_request;
+ struct cqp_commands_info *cqp_info;
+ int arp_index;
+
+ arp_index = i40iw_arp_table(iwdev, ip_addr, ipv4, mac_addr, action);
+ if (arp_index == -1)
+ return;
+ cqp_request = i40iw_get_cqp_request(&iwdev->cqp, false);
+ if (!cqp_request)
+ return;
+
+ cqp_info = &cqp_request->info;
+ if (action == I40IW_ARP_ADD) {
+ cqp_info->cqp_cmd = OP_ADD_ARP_CACHE_ENTRY;
+ info = &cqp_info->in.u.add_arp_cache_entry.info;
+ memset(info, 0, sizeof(*info));
+ info->arp_index = cpu_to_le32(arp_index);
+ info->permanent = true;
+ ether_addr_copy(info->mac_addr, mac_addr);
+ cqp_info->in.u.add_arp_cache_entry.scratch = (uintptr_t)cqp_request;
+ cqp_info->in.u.add_arp_cache_entry.cqp = &iwdev->cqp.sc_cqp;
+ } else {
+ cqp_info->cqp_cmd = OP_DELETE_ARP_CACHE_ENTRY;
+ cqp_info->in.u.del_arp_cache_entry.scratch = (uintptr_t)cqp_request;
+ cqp_info->in.u.del_arp_cache_entry.cqp = &iwdev->cqp.sc_cqp;
+ cqp_info->in.u.del_arp_cache_entry.arp_index = arp_index;
+ }
+
+ cqp_info->in.u.add_arp_cache_entry.cqp = &iwdev->cqp.sc_cqp;
+ cqp_info->in.u.add_arp_cache_entry.scratch = (uintptr_t)cqp_request;
+ cqp_info->post_sq = 1;
+ if (i40iw_handle_cqp_op(iwdev, cqp_request))
+ i40iw_pr_err("CQP-OP Add/Del Arp Cache entry fail");
+}
+
+/**
+ * i40iw_send_syn_cqp_callback - do syn/ack after qhash
+ * @cqp_request: qhash cqp completion
+ * @send_ack: flag send ack
+ */
+static void i40iw_send_syn_cqp_callback(struct i40iw_cqp_request *cqp_request, u32 send_ack)
+{
+ i40iw_send_syn(cqp_request->param, send_ack);
+}
+
+/**
+ * i40iw_manage_qhash - add or modify qhash
+ * @iwdev: iwarp device
+ * @cminfo: cm info for qhash
+ * @etype: type (syn or quad)
+ * @mtype: type of qhash
+ * @cmnode: cmnode associated with connection
+ * @wait: wait for completion
+ * @user_pri:user pri of the connection
+ */
+enum i40iw_status_code i40iw_manage_qhash(struct i40iw_device *iwdev,
+ struct i40iw_cm_info *cminfo,
+ enum i40iw_quad_entry_type etype,
+ enum i40iw_quad_hash_manage_type mtype,
+ void *cmnode,
+ bool wait)
+{
+ struct i40iw_qhash_table_info *info;
+ struct i40iw_sc_dev *dev = &iwdev->sc_dev;
+ enum i40iw_status_code status;
+ struct i40iw_cqp *iwcqp = &iwdev->cqp;
+ struct i40iw_cqp_request *cqp_request;
+ struct cqp_commands_info *cqp_info;
+
+ cqp_request = i40iw_get_cqp_request(iwcqp, wait);
+ if (!cqp_request)
+ return I40IW_ERR_NO_MEMORY;
+ cqp_info = &cqp_request->info;
+ info = &cqp_info->in.u.manage_qhash_table_entry.info;
+ memset(info, 0, sizeof(*info));
+
+ info->manage = mtype;
+ info->entry_type = etype;
+ if (cminfo->vlan_id != 0xFFFF) {
+ info->vlan_valid = true;
+ info->vlan_id = cpu_to_le16(cminfo->vlan_id);
+ } else {
+ info->vlan_valid = false;
+ }
+
+ info->ipv4_valid = cminfo->ipv4;
+ ether_addr_copy(info->mac_addr, iwdev->netdev->dev_addr);
+ info->qp_num = cpu_to_le32(dev->ilq->qp_id);
+ info->dest_port = cpu_to_le16(cminfo->loc_port);
+ info->dest_ip[0] = cpu_to_le32(cminfo->loc_addr[0]);
+ info->dest_ip[1] = cpu_to_le32(cminfo->loc_addr[1]);
+ info->dest_ip[2] = cpu_to_le32(cminfo->loc_addr[2]);
+ info->dest_ip[3] = cpu_to_le32(cminfo->loc_addr[3]);
+ if (etype == I40IW_QHASH_TYPE_TCP_ESTABLISHED) {
+ info->src_port = cpu_to_le16(cminfo->rem_port);
+ info->src_ip[0] = cpu_to_le32(cminfo->rem_addr[0]);
+ info->src_ip[1] = cpu_to_le32(cminfo->rem_addr[1]);
+ info->src_ip[2] = cpu_to_le32(cminfo->rem_addr[2]);
+ info->src_ip[3] = cpu_to_le32(cminfo->rem_addr[3]);
+ }
+ if (cmnode) {
+ cqp_request->callback_fcn = i40iw_send_syn_cqp_callback;
+ cqp_request->param = (void *)cmnode;
+ }
+
+ if (info->ipv4_valid)
+ i40iw_debug(dev, I40IW_DEBUG_CM,
+ "%s:%s IP=%pI4, port=%d, mac=%pM, vlan_id=%d\n",
+ __func__, (!mtype) ? "DELETE" : "ADD",
+ info->dest_ip,
+ info->dest_port, info->mac_addr, cminfo->vlan_id);
+ else
+ i40iw_debug(dev, I40IW_DEBUG_CM,
+ "%s:%s IP=%pI6, port=%d, mac=%pM, vlan_id=%d\n",
+ __func__, (!mtype) ? "DELETE" : "ADD",
+ info->dest_ip,
+ info->dest_port, info->mac_addr, cminfo->vlan_id);
+ cqp_info->in.u.manage_qhash_table_entry.cqp = &iwdev->cqp.sc_cqp;
+ cqp_info->in.u.manage_qhash_table_entry.scratch = (uintptr_t)cqp_request;
+ cqp_info->cqp_cmd = OP_MANAGE_QHASH_TABLE_ENTRY;
+ cqp_info->post_sq = 1;
+ status = i40iw_handle_cqp_op(iwdev, cqp_request);
+ if (status)
+ i40iw_pr_err("CQP-OP Manage Qhash Entry fail");
+ return status;
+}
+
+/**
+ * i40iw_hw_flush_wqes - flush qp's wqe
+ * @iwdev: iwarp device
+ * @qp: hardware control qp
+ * @info: info for flush
+ * @wait: flag wait for completion
+ */
+enum i40iw_status_code i40iw_hw_flush_wqes(struct i40iw_device *iwdev,
+ struct i40iw_sc_qp *qp,
+ struct i40iw_qp_flush_info *info,
+ bool wait)
+{
+ enum i40iw_status_code status;
+ struct i40iw_qp_flush_info *hw_info;
+ struct i40iw_cqp_request *cqp_request;
+ struct cqp_commands_info *cqp_info;
+
+ cqp_request = i40iw_get_cqp_request(&iwdev->cqp, wait);
+ if (!cqp_request)
+ return I40IW_ERR_NO_MEMORY;
+
+ cqp_info = &cqp_request->info;
+ hw_info = &cqp_request->info.in.u.qp_flush_wqes.info;
+ memcpy(hw_info, info, sizeof(*hw_info));
+
+ cqp_info->cqp_cmd = OP_QP_FLUSH_WQES;
+ cqp_info->post_sq = 1;
+ cqp_info->in.u.qp_flush_wqes.qp = qp;
+ cqp_info->in.u.qp_flush_wqes.scratch = (uintptr_t)cqp_request;
+ status = i40iw_handle_cqp_op(iwdev, cqp_request);
+ if (status)
+ i40iw_pr_err("CQP-OP Flush WQE's fail");
+ return status;
+}
+
+/**
+ * i40iw_hw_manage_vf_pble_bp - manage vf pbles
+ * @iwdev: iwarp device
+ * @info: info for managing pble
+ * @wait: flag wait for completion
+ */
+enum i40iw_status_code i40iw_hw_manage_vf_pble_bp(struct i40iw_device *iwdev,
+ struct i40iw_manage_vf_pble_info *info,
+ bool wait)
+{
+ enum i40iw_status_code status;
+ struct i40iw_manage_vf_pble_info *hw_info;
+ struct i40iw_cqp_request *cqp_request;
+ struct cqp_commands_info *cqp_info;
+
+ if ((iwdev->init_state < CCQ_CREATED) && wait)
+ wait = false;
+
+ cqp_request = i40iw_get_cqp_request(&iwdev->cqp, wait);
+ if (!cqp_request)
+ return I40IW_ERR_NO_MEMORY;
+
+ cqp_info = &cqp_request->info;
+ hw_info = &cqp_request->info.in.u.manage_vf_pble_bp.info;
+ memcpy(hw_info, info, sizeof(*hw_info));
+
+ cqp_info->cqp_cmd = OP_MANAGE_VF_PBLE_BP;
+ cqp_info->post_sq = 1;
+ cqp_info->in.u.manage_vf_pble_bp.cqp = &iwdev->cqp.sc_cqp;
+ cqp_info->in.u.manage_vf_pble_bp.scratch = (uintptr_t)cqp_request;
+ status = i40iw_handle_cqp_op(iwdev, cqp_request);
+ if (status)
+ i40iw_pr_err("CQP-OP Manage VF pble_bp fail");
+ return status;
+}
+
+/**
+ * i40iw_get_ib_wc - return change flush code to IB's
+ * @opcode: iwarp flush code
+ */
+static enum ib_wc_status i40iw_get_ib_wc(enum i40iw_flush_opcode opcode)
+{
+ switch (opcode) {
+ case FLUSH_PROT_ERR:
+ return IB_WC_LOC_PROT_ERR;
+ case FLUSH_REM_ACCESS_ERR:
+ return IB_WC_REM_ACCESS_ERR;
+ case FLUSH_LOC_QP_OP_ERR:
+ return IB_WC_LOC_QP_OP_ERR;
+ case FLUSH_REM_OP_ERR:
+ return IB_WC_REM_OP_ERR;
+ case FLUSH_LOC_LEN_ERR:
+ return IB_WC_LOC_LEN_ERR;
+ case FLUSH_GENERAL_ERR:
+ return IB_WC_GENERAL_ERR;
+ case FLUSH_FATAL_ERR:
+ default:
+ return IB_WC_FATAL_ERR;
+ }
+}
+
+/**
+ * i40iw_set_flush_info - set flush info
+ * @pinfo: set flush info
+ * @min: minor err
+ * @maj: major err
+ * @opcode: flush error code
+ */
+static void i40iw_set_flush_info(struct i40iw_qp_flush_info *pinfo,
+ u16 *min,
+ u16 *maj,
+ enum i40iw_flush_opcode opcode)
+{
+ *min = (u16)i40iw_get_ib_wc(opcode);
+ *maj = CQE_MAJOR_DRV;
+ pinfo->userflushcode = true;
+}
+
+/**
+ * i40iw_flush_wqes - flush wqe for qp
+ * @iwdev: iwarp device
+ * @iwqp: qp to flush wqes
+ */
+void i40iw_flush_wqes(struct i40iw_device *iwdev, struct i40iw_qp *iwqp)
+{
+ struct i40iw_qp_flush_info info;
+ struct i40iw_qp_flush_info *pinfo = &info;
+
+ struct i40iw_sc_qp *qp = &iwqp->sc_qp;
+
+ memset(pinfo, 0, sizeof(*pinfo));
+ info.sq = true;
+ info.rq = true;
+ if (qp->term_flags) {
+ i40iw_set_flush_info(pinfo, &pinfo->sq_minor_code,
+ &pinfo->sq_major_code, qp->flush_code);
+ i40iw_set_flush_info(pinfo, &pinfo->rq_minor_code,
+ &pinfo->rq_major_code, qp->flush_code);
+ }
+ (void)i40iw_hw_flush_wqes(iwdev, &iwqp->sc_qp, &info, true);
+}
diff --git a/drivers/infiniband/hw/i40iw/i40iw_main.c b/drivers/infiniband/hw/i40iw/i40iw_main.c
new file mode 100644
index 000000000000..90e5af21737e
--- /dev/null
+++ b/drivers/infiniband/hw/i40iw/i40iw_main.c
@@ -0,0 +1,1910 @@
+/*******************************************************************************
+*
+* Copyright (c) 2015-2016 Intel Corporation. All rights reserved.
+*
+* This software is available to you under a choice of one of two
+* licenses. You may choose to be licensed under the terms of the GNU
+* General Public License (GPL) Version 2, available from the file
+* COPYING in the main directory of this source tree, or the
+* OpenFabrics.org BSD license below:
+*
+* Redistribution and use in source and binary forms, with or
+* without modification, are permitted provided that the following
+* conditions are met:
+*
+* - Redistributions of source code must retain the above
+* copyright notice, this list of conditions and the following
+* disclaimer.
+*
+* - Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials
+* provided with the distribution.
+*
+* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+* SOFTWARE.
+*
+*******************************************************************************/
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/if_vlan.h>
+#include <net/addrconf.h>
+
+#include "i40iw.h"
+#include "i40iw_register.h"
+#include <net/netevent.h>
+#define CLIENT_IW_INTERFACE_VERSION_MAJOR 0
+#define CLIENT_IW_INTERFACE_VERSION_MINOR 01
+#define CLIENT_IW_INTERFACE_VERSION_BUILD 00
+
+#define DRV_VERSION_MAJOR 0
+#define DRV_VERSION_MINOR 5
+#define DRV_VERSION_BUILD 123
+#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
+ __stringify(DRV_VERSION_MINOR) "." __stringify(DRV_VERSION_BUILD)
+
+static int push_mode;
+module_param(push_mode, int, 0644);
+MODULE_PARM_DESC(push_mode, "Low latency mode: 0=disabled (default), 1=enabled)");
+
+static int debug;
+module_param(debug, int, 0644);
+MODULE_PARM_DESC(debug, "debug flags: 0=disabled (default), 0x7fffffff=all");
+
+static int resource_profile;
+module_param(resource_profile, int, 0644);
+MODULE_PARM_DESC(resource_profile,
+ "Resource Profile: 0=no VF RDMA support (default), 1=Weighted VF, 2=Even Distribution");
+
+static int max_rdma_vfs = 32;
+module_param(max_rdma_vfs, int, 0644);
+MODULE_PARM_DESC(max_rdma_vfs, "Maximum VF count: 0-32 32=default");
+static int mpa_version = 2;
+module_param(mpa_version, int, 0644);
+MODULE_PARM_DESC(mpa_version, "MPA version to be used in MPA Req/Resp 1 or 2");
+
+MODULE_AUTHOR("Intel Corporation, <e1000-rdma@lists.sourceforge.net>");
+MODULE_DESCRIPTION("Intel(R) Ethernet Connection X722 iWARP RDMA Driver");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_VERSION(DRV_VERSION);
+
+static struct i40e_client i40iw_client;
+static char i40iw_client_name[I40E_CLIENT_STR_LENGTH] = "i40iw";
+
+static LIST_HEAD(i40iw_handlers);
+static spinlock_t i40iw_handler_lock;
+
+static enum i40iw_status_code i40iw_virtchnl_send(struct i40iw_sc_dev *dev,
+ u32 vf_id, u8 *msg, u16 len);
+
+static struct notifier_block i40iw_inetaddr_notifier = {
+ .notifier_call = i40iw_inetaddr_event
+};
+
+static struct notifier_block i40iw_inetaddr6_notifier = {
+ .notifier_call = i40iw_inet6addr_event
+};
+
+static struct notifier_block i40iw_net_notifier = {
+ .notifier_call = i40iw_net_event
+};
+
+static int i40iw_notifiers_registered;
+
+/**
+ * i40iw_find_i40e_handler - find a handler given a client info
+ * @ldev: pointer to a client info
+ */
+static struct i40iw_handler *i40iw_find_i40e_handler(struct i40e_info *ldev)
+{
+ struct i40iw_handler *hdl;
+ unsigned long flags;
+
+ spin_lock_irqsave(&i40iw_handler_lock, flags);
+ list_for_each_entry(hdl, &i40iw_handlers, list) {
+ if (hdl->ldev.netdev == ldev->netdev) {
+ spin_unlock_irqrestore(&i40iw_handler_lock, flags);
+ return hdl;
+ }
+ }
+ spin_unlock_irqrestore(&i40iw_handler_lock, flags);
+ return NULL;
+}
+
+/**
+ * i40iw_find_netdev - find a handler given a netdev
+ * @netdev: pointer to net_device
+ */
+struct i40iw_handler *i40iw_find_netdev(struct net_device *netdev)
+{
+ struct i40iw_handler *hdl;
+ unsigned long flags;
+
+ spin_lock_irqsave(&i40iw_handler_lock, flags);
+ list_for_each_entry(hdl, &i40iw_handlers, list) {
+ if (hdl->ldev.netdev == netdev) {
+ spin_unlock_irqrestore(&i40iw_handler_lock, flags);
+ return hdl;
+ }
+ }
+ spin_unlock_irqrestore(&i40iw_handler_lock, flags);
+ return NULL;
+}
+
+/**
+ * i40iw_add_handler - add a handler to the list
+ * @hdl: handler to be added to the handler list
+ */
+static void i40iw_add_handler(struct i40iw_handler *hdl)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&i40iw_handler_lock, flags);
+ list_add(&hdl->list, &i40iw_handlers);
+ spin_unlock_irqrestore(&i40iw_handler_lock, flags);
+}
+
+/**
+ * i40iw_del_handler - delete a handler from the list
+ * @hdl: handler to be deleted from the handler list
+ */
+static int i40iw_del_handler(struct i40iw_handler *hdl)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&i40iw_handler_lock, flags);
+ list_del(&hdl->list);
+ spin_unlock_irqrestore(&i40iw_handler_lock, flags);
+ return 0;
+}
+
+/**
+ * i40iw_enable_intr - set up device interrupts
+ * @dev: hardware control device structure
+ * @msix_id: id of the interrupt to be enabled
+ */
+static void i40iw_enable_intr(struct i40iw_sc_dev *dev, u32 msix_id)
+{
+ u32 val;
+
+ val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
+ I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
+ (3 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
+ if (dev->is_pf)
+ i40iw_wr32(dev->hw, I40E_PFINT_DYN_CTLN(msix_id - 1), val);
+ else
+ i40iw_wr32(dev->hw, I40E_VFINT_DYN_CTLN1(msix_id - 1), val);
+}
+
+/**
+ * i40iw_dpc - tasklet for aeq and ceq 0
+ * @data: iwarp device
+ */
+static void i40iw_dpc(unsigned long data)
+{
+ struct i40iw_device *iwdev = (struct i40iw_device *)data;
+
+ if (iwdev->msix_shared)
+ i40iw_process_ceq(iwdev, iwdev->ceqlist);
+ i40iw_process_aeq(iwdev);
+ i40iw_enable_intr(&iwdev->sc_dev, iwdev->iw_msixtbl[0].idx);
+}
+
+/**
+ * i40iw_ceq_dpc - dpc handler for CEQ
+ * @data: data points to CEQ
+ */
+static void i40iw_ceq_dpc(unsigned long data)
+{
+ struct i40iw_ceq *iwceq = (struct i40iw_ceq *)data;
+ struct i40iw_device *iwdev = iwceq->iwdev;
+
+ i40iw_process_ceq(iwdev, iwceq);
+ i40iw_enable_intr(&iwdev->sc_dev, iwceq->msix_idx);
+}
+
+/**
+ * i40iw_irq_handler - interrupt handler for aeq and ceq0
+ * @irq: Interrupt request number
+ * @data: iwarp device
+ */
+static irqreturn_t i40iw_irq_handler(int irq, void *data)
+{
+ struct i40iw_device *iwdev = (struct i40iw_device *)data;
+
+ tasklet_schedule(&iwdev->dpc_tasklet);
+ return IRQ_HANDLED;
+}
+
+/**
+ * i40iw_destroy_cqp - destroy control qp
+ * @iwdev: iwarp device
+ * @create_done: 1 if cqp create poll was success
+ *
+ * Issue destroy cqp request and
+ * free the resources associated with the cqp
+ */
+static void i40iw_destroy_cqp(struct i40iw_device *iwdev, bool free_hwcqp)
+{
+ enum i40iw_status_code status = 0;
+ struct i40iw_sc_dev *dev = &iwdev->sc_dev;
+ struct i40iw_cqp *cqp = &iwdev->cqp;
+
+ if (free_hwcqp && dev->cqp_ops->cqp_destroy)
+ status = dev->cqp_ops->cqp_destroy(dev->cqp);
+ if (status)
+ i40iw_pr_err("destroy cqp failed");
+
+ i40iw_free_dma_mem(dev->hw, &cqp->sq);
+ kfree(cqp->scratch_array);
+ iwdev->cqp.scratch_array = NULL;
+
+ kfree(cqp->cqp_requests);
+ cqp->cqp_requests = NULL;
+}
+
+/**
+ * i40iw_disable_irqs - disable device interrupts
+ * @dev: hardware control device structure
+ * @msic_vec: msix vector to disable irq
+ * @dev_id: parameter to pass to free_irq (used during irq setup)
+ *
+ * The function is called when destroying aeq/ceq
+ */
+static void i40iw_disable_irq(struct i40iw_sc_dev *dev,
+ struct i40iw_msix_vector *msix_vec,
+ void *dev_id)
+{
+ if (dev->is_pf)
+ i40iw_wr32(dev->hw, I40E_PFINT_DYN_CTLN(msix_vec->idx - 1), 0);
+ else
+ i40iw_wr32(dev->hw, I40E_VFINT_DYN_CTLN1(msix_vec->idx - 1), 0);
+ synchronize_irq(msix_vec->irq);
+ free_irq(msix_vec->irq, dev_id);
+}
+
+/**
+ * i40iw_destroy_aeq - destroy aeq
+ * @iwdev: iwarp device
+ * @reset: true if called before reset
+ *
+ * Issue a destroy aeq request and
+ * free the resources associated with the aeq
+ * The function is called during driver unload
+ */
+static void i40iw_destroy_aeq(struct i40iw_device *iwdev, bool reset)
+{
+ enum i40iw_status_code status = I40IW_ERR_NOT_READY;
+ struct i40iw_sc_dev *dev = &iwdev->sc_dev;
+ struct i40iw_aeq *aeq = &iwdev->aeq;
+
+ if (!iwdev->msix_shared)
+ i40iw_disable_irq(dev, iwdev->iw_msixtbl, (void *)iwdev);
+ if (reset)
+ goto exit;
+
+ if (!dev->aeq_ops->aeq_destroy(&aeq->sc_aeq, 0, 1))
+ status = dev->aeq_ops->aeq_destroy_done(&aeq->sc_aeq);
+ if (status)
+ i40iw_pr_err("destroy aeq failed %d\n", status);
+
+exit:
+ i40iw_free_dma_mem(dev->hw, &aeq->mem);
+}
+
+/**
+ * i40iw_destroy_ceq - destroy ceq
+ * @iwdev: iwarp device
+ * @iwceq: ceq to be destroyed
+ * @reset: true if called before reset
+ *
+ * Issue a destroy ceq request and
+ * free the resources associated with the ceq
+ */
+static void i40iw_destroy_ceq(struct i40iw_device *iwdev,
+ struct i40iw_ceq *iwceq,
+ bool reset)
+{
+ enum i40iw_status_code status;
+ struct i40iw_sc_dev *dev = &iwdev->sc_dev;
+
+ if (reset)
+ goto exit;
+
+ status = dev->ceq_ops->ceq_destroy(&iwceq->sc_ceq, 0, 1);
+ if (status) {
+ i40iw_pr_err("ceq destroy command failed %d\n", status);
+ goto exit;
+ }
+
+ status = dev->ceq_ops->cceq_destroy_done(&iwceq->sc_ceq);
+ if (status)
+ i40iw_pr_err("ceq destroy completion failed %d\n", status);
+exit:
+ i40iw_free_dma_mem(dev->hw, &iwceq->mem);
+}
+
+/**
+ * i40iw_dele_ceqs - destroy all ceq's
+ * @iwdev: iwarp device
+ * @reset: true if called before reset
+ *
+ * Go through all of the device ceq's and for each ceq
+ * disable the ceq interrupt and destroy the ceq
+ */
+static void i40iw_dele_ceqs(struct i40iw_device *iwdev, bool reset)
+{
+ u32 i = 0;
+ struct i40iw_sc_dev *dev = &iwdev->sc_dev;
+ struct i40iw_ceq *iwceq = iwdev->ceqlist;
+ struct i40iw_msix_vector *msix_vec = iwdev->iw_msixtbl;
+
+ if (iwdev->msix_shared) {
+ i40iw_disable_irq(dev, msix_vec, (void *)iwdev);
+ i40iw_destroy_ceq(iwdev, iwceq, reset);
+ iwceq++;
+ i++;
+ }
+
+ for (msix_vec++; i < iwdev->ceqs_count; i++, msix_vec++, iwceq++) {
+ i40iw_disable_irq(dev, msix_vec, (void *)iwceq);
+ i40iw_destroy_ceq(iwdev, iwceq, reset);
+ }
+}
+
+/**
+ * i40iw_destroy_ccq - destroy control cq
+ * @iwdev: iwarp device
+ * @reset: true if called before reset
+ *
+ * Issue destroy ccq request and
+ * free the resources associated with the ccq
+ */
+static void i40iw_destroy_ccq(struct i40iw_device *iwdev, bool reset)
+{
+ struct i40iw_sc_dev *dev = &iwdev->sc_dev;
+ struct i40iw_ccq *ccq = &iwdev->ccq;
+ enum i40iw_status_code status = 0;
+
+ if (!reset)
+ status = dev->ccq_ops->ccq_destroy(dev->ccq, 0, true);
+ if (status)
+ i40iw_pr_err("ccq destroy failed %d\n", status);
+ i40iw_free_dma_mem(dev->hw, &ccq->mem_cq);
+}
+
+/* types of hmc objects */
+static enum i40iw_hmc_rsrc_type iw_hmc_obj_types[] = {
+ I40IW_HMC_IW_QP,
+ I40IW_HMC_IW_CQ,
+ I40IW_HMC_IW_HTE,
+ I40IW_HMC_IW_ARP,
+ I40IW_HMC_IW_APBVT_ENTRY,
+ I40IW_HMC_IW_MR,
+ I40IW_HMC_IW_XF,
+ I40IW_HMC_IW_XFFL,
+ I40IW_HMC_IW_Q1,
+ I40IW_HMC_IW_Q1FL,
+ I40IW_HMC_IW_TIMER,
+};
+
+/**
+ * i40iw_close_hmc_objects_type - delete hmc objects of a given type
+ * @iwdev: iwarp device
+ * @obj_type: the hmc object type to be deleted
+ * @is_pf: true if the function is PF otherwise false
+ * @reset: true if called before reset
+ */
+static void i40iw_close_hmc_objects_type(struct i40iw_sc_dev *dev,
+ enum i40iw_hmc_rsrc_type obj_type,
+ struct i40iw_hmc_info *hmc_info,
+ bool is_pf,
+ bool reset)
+{
+ struct i40iw_hmc_del_obj_info info;
+
+ memset(&info, 0, sizeof(info));
+ info.hmc_info = hmc_info;
+ info.rsrc_type = obj_type;
+ info.count = hmc_info->hmc_obj[obj_type].cnt;
+ info.is_pf = is_pf;
+ if (dev->hmc_ops->del_hmc_object(dev, &info, reset))
+ i40iw_pr_err("del obj of type %d failed\n", obj_type);
+}
+
+/**
+ * i40iw_del_hmc_objects - remove all device hmc objects
+ * @dev: iwarp device
+ * @hmc_info: hmc_info to free
+ * @is_pf: true if hmc_info belongs to PF, not vf nor allocated
+ * by PF on behalf of VF
+ * @reset: true if called before reset
+ */
+static void i40iw_del_hmc_objects(struct i40iw_sc_dev *dev,
+ struct i40iw_hmc_info *hmc_info,
+ bool is_pf,
+ bool reset)
+{
+ unsigned int i;
+
+ for (i = 0; i < IW_HMC_OBJ_TYPE_NUM; i++)
+ i40iw_close_hmc_objects_type(dev, iw_hmc_obj_types[i], hmc_info, is_pf, reset);
+}
+
+/**
+ * i40iw_ceq_handler - interrupt handler for ceq
+ * @data: ceq pointer
+ */
+static irqreturn_t i40iw_ceq_handler(int irq, void *data)
+{
+ struct i40iw_ceq *iwceq = (struct i40iw_ceq *)data;
+
+ if (iwceq->irq != irq)
+ i40iw_pr_err("expected irq = %d received irq = %d\n", iwceq->irq, irq);
+ tasklet_schedule(&iwceq->dpc_tasklet);
+ return IRQ_HANDLED;
+}
+
+/**
+ * i40iw_create_hmc_obj_type - create hmc object of a given type
+ * @dev: hardware control device structure
+ * @info: information for the hmc object to create
+ */
+static enum i40iw_status_code i40iw_create_hmc_obj_type(struct i40iw_sc_dev *dev,
+ struct i40iw_hmc_create_obj_info *info)
+{
+ return dev->hmc_ops->create_hmc_object(dev, info);
+}
+
+/**
+ * i40iw_create_hmc_objs - create all hmc objects for the device
+ * @iwdev: iwarp device
+ * @is_pf: true if the function is PF otherwise false
+ *
+ * Create the device hmc objects and allocate hmc pages
+ * Return 0 if successful, otherwise clean up and return error
+ */
+static enum i40iw_status_code i40iw_create_hmc_objs(struct i40iw_device *iwdev,
+ bool is_pf)
+{
+ struct i40iw_sc_dev *dev = &iwdev->sc_dev;
+ struct i40iw_hmc_create_obj_info info;
+ enum i40iw_status_code status;
+ int i;
+
+ memset(&info, 0, sizeof(info));
+ info.hmc_info = dev->hmc_info;
+ info.is_pf = is_pf;
+ info.entry_type = iwdev->sd_type;
+ for (i = 0; i < IW_HMC_OBJ_TYPE_NUM; i++) {
+ info.rsrc_type = iw_hmc_obj_types[i];
+ info.count = dev->hmc_info->hmc_obj[info.rsrc_type].cnt;
+ status = i40iw_create_hmc_obj_type(dev, &info);
+ if (status) {
+ i40iw_pr_err("create obj type %d status = %d\n",
+ iw_hmc_obj_types[i], status);
+ break;
+ }
+ }
+ if (!status)
+ return (dev->cqp_misc_ops->static_hmc_pages_allocated(dev->cqp, 0,
+ dev->hmc_fn_id,
+ true, true));
+
+ while (i) {
+ i--;
+ /* destroy the hmc objects of a given type */
+ i40iw_close_hmc_objects_type(dev,
+ iw_hmc_obj_types[i],
+ dev->hmc_info,
+ is_pf,
+ false);
+ }
+ return status;
+}
+
+/**
+ * i40iw_obj_aligned_mem - get aligned memory from device allocated memory
+ * @iwdev: iwarp device
+ * @memptr: points to the memory addresses
+ * @size: size of memory needed
+ * @mask: mask for the aligned memory
+ *
+ * Get aligned memory of the requested size and
+ * update the memptr to point to the new aligned memory
+ * Return 0 if successful, otherwise return no memory error
+ */
+enum i40iw_status_code i40iw_obj_aligned_mem(struct i40iw_device *iwdev,
+ struct i40iw_dma_mem *memptr,
+ u32 size,
+ u32 mask)
+{
+ unsigned long va, newva;
+ unsigned long extra;
+
+ va = (unsigned long)iwdev->obj_next.va;
+ newva = va;
+ if (mask)
+ newva = ALIGN(va, (mask + 1));
+ extra = newva - va;
+ memptr->va = (u8 *)va + extra;
+ memptr->pa = iwdev->obj_next.pa + extra;
+ memptr->size = size;
+ if ((memptr->va + size) > (iwdev->obj_mem.va + iwdev->obj_mem.size))
+ return I40IW_ERR_NO_MEMORY;
+
+ iwdev->obj_next.va = memptr->va + size;
+ iwdev->obj_next.pa = memptr->pa + size;
+ return 0;
+}
+
+/**
+ * i40iw_create_cqp - create control qp
+ * @iwdev: iwarp device
+ *
+ * Return 0, if the cqp and all the resources associated with it
+ * are successfully created, otherwise return error
+ */
+static enum i40iw_status_code i40iw_create_cqp(struct i40iw_device *iwdev)
+{
+ enum i40iw_status_code status;
+ u32 sqsize = I40IW_CQP_SW_SQSIZE_2048;
+ struct i40iw_dma_mem mem;
+ struct i40iw_sc_dev *dev = &iwdev->sc_dev;
+ struct i40iw_cqp_init_info cqp_init_info;
+ struct i40iw_cqp *cqp = &iwdev->cqp;
+ u16 maj_err, min_err;
+ int i;
+
+ cqp->cqp_requests = kcalloc(sqsize, sizeof(*cqp->cqp_requests), GFP_KERNEL);
+ if (!cqp->cqp_requests)
+ return I40IW_ERR_NO_MEMORY;
+ cqp->scratch_array = kcalloc(sqsize, sizeof(*cqp->scratch_array), GFP_KERNEL);
+ if (!cqp->scratch_array) {
+ kfree(cqp->cqp_requests);
+ return I40IW_ERR_NO_MEMORY;
+ }
+ dev->cqp = &cqp->sc_cqp;
+ dev->cqp->dev = dev;
+ memset(&cqp_init_info, 0, sizeof(cqp_init_info));
+ status = i40iw_allocate_dma_mem(dev->hw, &cqp->sq,
+ (sizeof(struct i40iw_cqp_sq_wqe) * sqsize),
+ I40IW_CQP_ALIGNMENT);
+ if (status)
+ goto exit;
+ status = i40iw_obj_aligned_mem(iwdev, &mem, sizeof(struct i40iw_cqp_ctx),
+ I40IW_HOST_CTX_ALIGNMENT_MASK);
+ if (status)
+ goto exit;
+ dev->cqp->host_ctx_pa = mem.pa;
+ dev->cqp->host_ctx = mem.va;
+ /* populate the cqp init info */
+ cqp_init_info.dev = dev;
+ cqp_init_info.sq_size = sqsize;
+ cqp_init_info.sq = cqp->sq.va;
+ cqp_init_info.sq_pa = cqp->sq.pa;
+ cqp_init_info.host_ctx_pa = mem.pa;
+ cqp_init_info.host_ctx = mem.va;
+ cqp_init_info.hmc_profile = iwdev->resource_profile;
+ cqp_init_info.enabled_vf_count = iwdev->max_rdma_vfs;
+ cqp_init_info.scratch_array = cqp->scratch_array;
+ status = dev->cqp_ops->cqp_init(dev->cqp, &cqp_init_info);
+ if (status) {
+ i40iw_pr_err("cqp init status %d maj_err %d min_err %d\n",
+ status, maj_err, min_err);
+ goto exit;
+ }
+ status = dev->cqp_ops->cqp_create(dev->cqp, true, &maj_err, &min_err);
+ if (status) {
+ i40iw_pr_err("cqp create status %d maj_err %d min_err %d\n",
+ status, maj_err, min_err);
+ goto exit;
+ }
+ spin_lock_init(&cqp->req_lock);
+ INIT_LIST_HEAD(&cqp->cqp_avail_reqs);
+ INIT_LIST_HEAD(&cqp->cqp_pending_reqs);
+ /* init the waitq of the cqp_requests and add them to the list */
+ for (i = 0; i < I40IW_CQP_SW_SQSIZE_2048; i++) {
+ init_waitqueue_head(&cqp->cqp_requests[i].waitq);
+ list_add_tail(&cqp->cqp_requests[i].list, &cqp->cqp_avail_reqs);
+ }
+ return 0;
+exit:
+ /* clean up the created resources */
+ i40iw_destroy_cqp(iwdev, false);
+ return status;
+}
+
+/**
+ * i40iw_create_ccq - create control cq
+ * @iwdev: iwarp device
+ *
+ * Return 0, if the ccq and the resources associated with it
+ * are successfully created, otherwise return error
+ */
+static enum i40iw_status_code i40iw_create_ccq(struct i40iw_device *iwdev)
+{
+ struct i40iw_sc_dev *dev = &iwdev->sc_dev;
+ struct i40iw_dma_mem mem;
+ enum i40iw_status_code status;
+ struct i40iw_ccq_init_info info;
+ struct i40iw_ccq *ccq = &iwdev->ccq;
+
+ memset(&info, 0, sizeof(info));
+ dev->ccq = &ccq->sc_cq;
+ dev->ccq->dev = dev;
+ info.dev = dev;
+ ccq->shadow_area.size = sizeof(struct i40iw_cq_shadow_area);
+ ccq->mem_cq.size = sizeof(struct i40iw_cqe) * IW_CCQ_SIZE;
+ status = i40iw_allocate_dma_mem(dev->hw, &ccq->mem_cq,
+ ccq->mem_cq.size, I40IW_CQ0_ALIGNMENT);
+ if (status)
+ goto exit;
+ status = i40iw_obj_aligned_mem(iwdev, &mem, ccq->shadow_area.size,
+ I40IW_SHADOWAREA_MASK);
+ if (status)
+ goto exit;
+ ccq->sc_cq.back_cq = (void *)ccq;
+ /* populate the ccq init info */
+ info.cq_base = ccq->mem_cq.va;
+ info.cq_pa = ccq->mem_cq.pa;
+ info.num_elem = IW_CCQ_SIZE;
+ info.shadow_area = mem.va;
+ info.shadow_area_pa = mem.pa;
+ info.ceqe_mask = false;
+ info.ceq_id_valid = true;
+ info.shadow_read_threshold = 16;
+ status = dev->ccq_ops->ccq_init(dev->ccq, &info);
+ if (!status)
+ status = dev->ccq_ops->ccq_create(dev->ccq, 0, true, true);
+exit:
+ if (status)
+ i40iw_free_dma_mem(dev->hw, &ccq->mem_cq);
+ return status;
+}
+
+/**
+ * i40iw_configure_ceq_vector - set up the msix interrupt vector for ceq
+ * @iwdev: iwarp device
+ * @msix_vec: interrupt vector information
+ * @iwceq: ceq associated with the vector
+ * @ceq_id: the id number of the iwceq
+ *
+ * Allocate interrupt resources and enable irq handling
+ * Return 0 if successful, otherwise return error
+ */
+static enum i40iw_status_code i40iw_configure_ceq_vector(struct i40iw_device *iwdev,
+ struct i40iw_ceq *iwceq,
+ u32 ceq_id,
+ struct i40iw_msix_vector *msix_vec)
+{
+ enum i40iw_status_code status;
+
+ if (iwdev->msix_shared && !ceq_id) {
+ tasklet_init(&iwdev->dpc_tasklet, i40iw_dpc, (unsigned long)iwdev);
+ status = request_irq(msix_vec->irq, i40iw_irq_handler, 0, "AEQCEQ", iwdev);
+ } else {
+ tasklet_init(&iwceq->dpc_tasklet, i40iw_ceq_dpc, (unsigned long)iwceq);
+ status = request_irq(msix_vec->irq, i40iw_ceq_handler, 0, "CEQ", iwceq);
+ }
+
+ if (status) {
+ i40iw_pr_err("ceq irq config fail\n");
+ return I40IW_ERR_CONFIG;
+ }
+ msix_vec->ceq_id = ceq_id;
+ msix_vec->cpu_affinity = 0;
+
+ return 0;
+}
+
+/**
+ * i40iw_create_ceq - create completion event queue
+ * @iwdev: iwarp device
+ * @iwceq: pointer to the ceq resources to be created
+ * @ceq_id: the id number of the iwceq
+ *
+ * Return 0, if the ceq and the resources associated with it
+ * are successfully created, otherwise return error
+ */
+static enum i40iw_status_code i40iw_create_ceq(struct i40iw_device *iwdev,
+ struct i40iw_ceq *iwceq,
+ u32 ceq_id)
+{
+ enum i40iw_status_code status;
+ struct i40iw_ceq_init_info info;
+ struct i40iw_sc_dev *dev = &iwdev->sc_dev;
+ u64 scratch;
+
+ memset(&info, 0, sizeof(info));
+ info.ceq_id = ceq_id;
+ iwceq->iwdev = iwdev;
+ iwceq->mem.size = sizeof(struct i40iw_ceqe) *
+ iwdev->sc_dev.hmc_info->hmc_obj[I40IW_HMC_IW_CQ].cnt;
+ status = i40iw_allocate_dma_mem(dev->hw, &iwceq->mem, iwceq->mem.size,
+ I40IW_CEQ_ALIGNMENT);
+ if (status)
+ goto exit;
+ info.ceq_id = ceq_id;
+ info.ceqe_base = iwceq->mem.va;
+ info.ceqe_pa = iwceq->mem.pa;
+
+ info.elem_cnt = iwdev->sc_dev.hmc_info->hmc_obj[I40IW_HMC_IW_CQ].cnt;
+ iwceq->sc_ceq.ceq_id = ceq_id;
+ info.dev = dev;
+ scratch = (uintptr_t)&iwdev->cqp.sc_cqp;
+ status = dev->ceq_ops->ceq_init(&iwceq->sc_ceq, &info);
+ if (!status)
+ status = dev->ceq_ops->cceq_create(&iwceq->sc_ceq, scratch);
+
+exit:
+ if (status)
+ i40iw_free_dma_mem(dev->hw, &iwceq->mem);
+ return status;
+}
+
+void i40iw_request_reset(struct i40iw_device *iwdev)
+{
+ struct i40e_info *ldev = iwdev->ldev;
+
+ ldev->ops->request_reset(ldev, iwdev->client, 1);
+}
+
+/**
+ * i40iw_setup_ceqs - manage the device ceq's and their interrupt resources
+ * @iwdev: iwarp device
+ * @ldev: i40e lan device
+ *
+ * Allocate a list for all device completion event queues
+ * Create the ceq's and configure their msix interrupt vectors
+ * Return 0, if at least one ceq is successfully set up, otherwise return error
+ */
+static enum i40iw_status_code i40iw_setup_ceqs(struct i40iw_device *iwdev,
+ struct i40e_info *ldev)
+{
+ u32 i;
+ u32 ceq_id;
+ struct i40iw_ceq *iwceq;
+ struct i40iw_msix_vector *msix_vec;
+ enum i40iw_status_code status = 0;
+ u32 num_ceqs;
+
+ if (ldev && ldev->ops && ldev->ops->setup_qvlist) {
+ status = ldev->ops->setup_qvlist(ldev, &i40iw_client,
+ iwdev->iw_qvlist);
+ if (status)
+ goto exit;
+ } else {
+ status = I40IW_ERR_BAD_PTR;
+ goto exit;
+ }
+
+ num_ceqs = min(iwdev->msix_count, iwdev->sc_dev.hmc_fpm_misc.max_ceqs);
+ iwdev->ceqlist = kcalloc(num_ceqs, sizeof(*iwdev->ceqlist), GFP_KERNEL);
+ if (!iwdev->ceqlist) {
+ status = I40IW_ERR_NO_MEMORY;
+ goto exit;
+ }
+ i = (iwdev->msix_shared) ? 0 : 1;
+ for (ceq_id = 0; i < num_ceqs; i++, ceq_id++) {
+ iwceq = &iwdev->ceqlist[ceq_id];
+ status = i40iw_create_ceq(iwdev, iwceq, ceq_id);
+ if (status) {
+ i40iw_pr_err("create ceq status = %d\n", status);
+ break;
+ }
+
+ msix_vec = &iwdev->iw_msixtbl[i];
+ iwceq->irq = msix_vec->irq;
+ iwceq->msix_idx = msix_vec->idx;
+ status = i40iw_configure_ceq_vector(iwdev, iwceq, ceq_id, msix_vec);
+ if (status) {
+ i40iw_destroy_ceq(iwdev, iwceq, false);
+ break;
+ }
+ i40iw_enable_intr(&iwdev->sc_dev, msix_vec->idx);
+ iwdev->ceqs_count++;
+ }
+
+exit:
+ if (status) {
+ if (!iwdev->ceqs_count) {
+ kfree(iwdev->ceqlist);
+ iwdev->ceqlist = NULL;
+ } else {
+ status = 0;
+ }
+ }
+ return status;
+}
+
+/**
+ * i40iw_configure_aeq_vector - set up the msix vector for aeq
+ * @iwdev: iwarp device
+ *
+ * Allocate interrupt resources and enable irq handling
+ * Return 0 if successful, otherwise return error
+ */
+static enum i40iw_status_code i40iw_configure_aeq_vector(struct i40iw_device *iwdev)
+{
+ struct i40iw_msix_vector *msix_vec = iwdev->iw_msixtbl;
+ u32 ret = 0;
+
+ if (!iwdev->msix_shared) {
+ tasklet_init(&iwdev->dpc_tasklet, i40iw_dpc, (unsigned long)iwdev);
+ ret = request_irq(msix_vec->irq, i40iw_irq_handler, 0, "i40iw", iwdev);
+ }
+ if (ret) {
+ i40iw_pr_err("aeq irq config fail\n");
+ return I40IW_ERR_CONFIG;
+ }
+
+ return 0;
+}
+
+/**
+ * i40iw_create_aeq - create async event queue
+ * @iwdev: iwarp device
+ *
+ * Return 0, if the aeq and the resources associated with it
+ * are successfully created, otherwise return error
+ */
+static enum i40iw_status_code i40iw_create_aeq(struct i40iw_device *iwdev)
+{
+ enum i40iw_status_code status;
+ struct i40iw_aeq_init_info info;
+ struct i40iw_sc_dev *dev = &iwdev->sc_dev;
+ struct i40iw_aeq *aeq = &iwdev->aeq;
+ u64 scratch = 0;
+ u32 aeq_size;
+
+ aeq_size = 2 * iwdev->sc_dev.hmc_info->hmc_obj[I40IW_HMC_IW_QP].cnt +
+ iwdev->sc_dev.hmc_info->hmc_obj[I40IW_HMC_IW_CQ].cnt;
+ memset(&info, 0, sizeof(info));
+ aeq->mem.size = sizeof(struct i40iw_sc_aeqe) * aeq_size;
+ status = i40iw_allocate_dma_mem(dev->hw, &aeq->mem, aeq->mem.size,
+ I40IW_AEQ_ALIGNMENT);
+ if (status)
+ goto exit;
+
+ info.aeqe_base = aeq->mem.va;
+ info.aeq_elem_pa = aeq->mem.pa;
+ info.elem_cnt = aeq_size;
+ info.dev = dev;
+ status = dev->aeq_ops->aeq_init(&aeq->sc_aeq, &info);
+ if (status)
+ goto exit;
+ status = dev->aeq_ops->aeq_create(&aeq->sc_aeq, scratch, 1);
+ if (!status)
+ status = dev->aeq_ops->aeq_create_done(&aeq->sc_aeq);
+exit:
+ if (status)
+ i40iw_free_dma_mem(dev->hw, &aeq->mem);
+ return status;
+}
+
+/**
+ * i40iw_setup_aeq - set up the device aeq
+ * @iwdev: iwarp device
+ *
+ * Create the aeq and configure its msix interrupt vector
+ * Return 0 if successful, otherwise return error
+ */
+static enum i40iw_status_code i40iw_setup_aeq(struct i40iw_device *iwdev)
+{
+ struct i40iw_sc_dev *dev = &iwdev->sc_dev;
+ enum i40iw_status_code status;
+
+ status = i40iw_create_aeq(iwdev);
+ if (status)
+ return status;
+
+ status = i40iw_configure_aeq_vector(iwdev);
+ if (status) {
+ i40iw_destroy_aeq(iwdev, false);
+ return status;
+ }
+
+ if (!iwdev->msix_shared)
+ i40iw_enable_intr(dev, iwdev->iw_msixtbl[0].idx);
+ return 0;
+}
+
+/**
+ * i40iw_initialize_ilq - create iwarp local queue for cm
+ * @iwdev: iwarp device
+ *
+ * Return 0 if successful, otherwise return error
+ */
+static enum i40iw_status_code i40iw_initialize_ilq(struct i40iw_device *iwdev)
+{
+ struct i40iw_puda_rsrc_info info;
+ enum i40iw_status_code status;
+
+ info.type = I40IW_PUDA_RSRC_TYPE_ILQ;
+ info.cq_id = 1;
+ info.qp_id = 0;
+ info.count = 1;
+ info.pd_id = 1;
+ info.sq_size = 8192;
+ info.rq_size = 8192;
+ info.buf_size = 1024;
+ info.tx_buf_cnt = 16384;
+ info.mss = iwdev->mss;
+ info.receive = i40iw_receive_ilq;
+ info.xmit_complete = i40iw_free_sqbuf;
+ status = i40iw_puda_create_rsrc(&iwdev->sc_dev, &info);
+ if (status)
+ i40iw_pr_err("ilq create fail\n");
+ return status;
+}
+
+/**
+ * i40iw_initialize_ieq - create iwarp exception queue
+ * @iwdev: iwarp device
+ *
+ * Return 0 if successful, otherwise return error
+ */
+static enum i40iw_status_code i40iw_initialize_ieq(struct i40iw_device *iwdev)
+{
+ struct i40iw_puda_rsrc_info info;
+ enum i40iw_status_code status;
+
+ info.type = I40IW_PUDA_RSRC_TYPE_IEQ;
+ info.cq_id = 2;
+ info.qp_id = iwdev->sc_dev.exception_lan_queue;
+ info.count = 1;
+ info.pd_id = 2;
+ info.sq_size = 8192;
+ info.rq_size = 8192;
+ info.buf_size = 2048;
+ info.mss = iwdev->mss;
+ info.tx_buf_cnt = 16384;
+ status = i40iw_puda_create_rsrc(&iwdev->sc_dev, &info);
+ if (status)
+ i40iw_pr_err("ieq create fail\n");
+ return status;
+}
+
+/**
+ * i40iw_hmc_setup - create hmc objects for the device
+ * @iwdev: iwarp device
+ *
+ * Set up the device private memory space for the number and size of
+ * the hmc objects and create the objects
+ * Return 0 if successful, otherwise return error
+ */
+static enum i40iw_status_code i40iw_hmc_setup(struct i40iw_device *iwdev)
+{
+ enum i40iw_status_code status;
+
+ iwdev->sd_type = I40IW_SD_TYPE_DIRECT;
+ status = i40iw_config_fpm_values(&iwdev->sc_dev, IW_CFG_FPM_QP_COUNT);
+ if (status)
+ goto exit;
+ status = i40iw_create_hmc_objs(iwdev, true);
+ if (status)
+ goto exit;
+ iwdev->init_state = HMC_OBJS_CREATED;
+exit:
+ return status;
+}
+
+/**
+ * i40iw_del_init_mem - deallocate memory resources
+ * @iwdev: iwarp device
+ */
+static void i40iw_del_init_mem(struct i40iw_device *iwdev)
+{
+ struct i40iw_sc_dev *dev = &iwdev->sc_dev;
+
+ i40iw_free_dma_mem(&iwdev->hw, &iwdev->obj_mem);
+ kfree(dev->hmc_info->sd_table.sd_entry);
+ dev->hmc_info->sd_table.sd_entry = NULL;
+ kfree(iwdev->mem_resources);
+ iwdev->mem_resources = NULL;
+ kfree(iwdev->ceqlist);
+ iwdev->ceqlist = NULL;
+ kfree(iwdev->iw_msixtbl);
+ iwdev->iw_msixtbl = NULL;
+ kfree(iwdev->hmc_info_mem);
+ iwdev->hmc_info_mem = NULL;
+}
+
+/**
+ * i40iw_del_macip_entry - remove a mac ip address entry from the hw table
+ * @iwdev: iwarp device
+ * @idx: the index of the mac ip address to delete
+ */
+static void i40iw_del_macip_entry(struct i40iw_device *iwdev, u8 idx)
+{
+ struct i40iw_cqp *iwcqp = &iwdev->cqp;
+ struct i40iw_cqp_request *cqp_request;
+ struct cqp_commands_info *cqp_info;
+ enum i40iw_status_code status = 0;
+
+ cqp_request = i40iw_get_cqp_request(iwcqp, true);
+ if (!cqp_request) {
+ i40iw_pr_err("cqp_request memory failed\n");
+ return;
+ }
+ cqp_info = &cqp_request->info;
+ cqp_info->cqp_cmd = OP_DELETE_LOCAL_MAC_IPADDR_ENTRY;
+ cqp_info->post_sq = 1;
+ cqp_info->in.u.del_local_mac_ipaddr_entry.cqp = &iwcqp->sc_cqp;
+ cqp_info->in.u.del_local_mac_ipaddr_entry.scratch = (uintptr_t)cqp_request;
+ cqp_info->in.u.del_local_mac_ipaddr_entry.entry_idx = idx;
+ cqp_info->in.u.del_local_mac_ipaddr_entry.ignore_ref_count = 0;
+ status = i40iw_handle_cqp_op(iwdev, cqp_request);
+ if (status)
+ i40iw_pr_err("CQP-OP Del MAC Ip entry fail");
+}
+
+/**
+ * i40iw_add_mac_ipaddr_entry - add a mac ip address entry to the hw table
+ * @iwdev: iwarp device
+ * @mac_addr: pointer to mac address
+ * @idx: the index of the mac ip address to add
+ */
+static enum i40iw_status_code i40iw_add_mac_ipaddr_entry(struct i40iw_device *iwdev,
+ u8 *mac_addr,
+ u8 idx)
+{
+ struct i40iw_local_mac_ipaddr_entry_info *info;
+ struct i40iw_cqp *iwcqp = &iwdev->cqp;
+ struct i40iw_cqp_request *cqp_request;
+ struct cqp_commands_info *cqp_info;
+ enum i40iw_status_code status = 0;
+
+ cqp_request = i40iw_get_cqp_request(iwcqp, true);
+ if (!cqp_request) {
+ i40iw_pr_err("cqp_request memory failed\n");
+ return I40IW_ERR_NO_MEMORY;
+ }
+
+ cqp_info = &cqp_request->info;
+
+ cqp_info->post_sq = 1;
+ info = &cqp_info->in.u.add_local_mac_ipaddr_entry.info;
+ ether_addr_copy(info->mac_addr, mac_addr);
+ info->entry_idx = idx;
+ cqp_info->in.u.add_local_mac_ipaddr_entry.scratch = (uintptr_t)cqp_request;
+ cqp_info->cqp_cmd = OP_ADD_LOCAL_MAC_IPADDR_ENTRY;
+ cqp_info->in.u.add_local_mac_ipaddr_entry.cqp = &iwcqp->sc_cqp;
+ cqp_info->in.u.add_local_mac_ipaddr_entry.scratch = (uintptr_t)cqp_request;
+ status = i40iw_handle_cqp_op(iwdev, cqp_request);
+ if (status)
+ i40iw_pr_err("CQP-OP Add MAC Ip entry fail");
+ return status;
+}
+
+/**
+ * i40iw_alloc_local_mac_ipaddr_entry - allocate a mac ip address entry
+ * @iwdev: iwarp device
+ * @mac_ip_tbl_idx: the index of the new mac ip address
+ *
+ * Allocate a mac ip address entry and update the mac_ip_tbl_idx
+ * to hold the index of the newly created mac ip address
+ * Return 0 if successful, otherwise return error
+ */
+static enum i40iw_status_code i40iw_alloc_local_mac_ipaddr_entry(struct i40iw_device *iwdev,
+ u16 *mac_ip_tbl_idx)
+{
+ struct i40iw_cqp *iwcqp = &iwdev->cqp;
+ struct i40iw_cqp_request *cqp_request;
+ struct cqp_commands_info *cqp_info;
+ enum i40iw_status_code status = 0;
+
+ cqp_request = i40iw_get_cqp_request(iwcqp, true);
+ if (!cqp_request) {
+ i40iw_pr_err("cqp_request memory failed\n");
+ return I40IW_ERR_NO_MEMORY;
+ }
+
+ /* increment refcount, because we need the cqp request ret value */
+ atomic_inc(&cqp_request->refcount);
+
+ cqp_info = &cqp_request->info;
+ cqp_info->cqp_cmd = OP_ALLOC_LOCAL_MAC_IPADDR_ENTRY;
+ cqp_info->post_sq = 1;
+ cqp_info->in.u.alloc_local_mac_ipaddr_entry.cqp = &iwcqp->sc_cqp;
+ cqp_info->in.u.alloc_local_mac_ipaddr_entry.scratch = (uintptr_t)cqp_request;
+ status = i40iw_handle_cqp_op(iwdev, cqp_request);
+ if (!status)
+ *mac_ip_tbl_idx = cqp_request->compl_info.op_ret_val;
+ else
+ i40iw_pr_err("CQP-OP Alloc MAC Ip entry fail");
+ /* decrement refcount and free the cqp request, if no longer used */
+ i40iw_put_cqp_request(iwcqp, cqp_request);
+ return status;
+}
+
+/**
+ * i40iw_alloc_set_mac_ipaddr - set up a mac ip address table entry
+ * @iwdev: iwarp device
+ * @macaddr: pointer to mac address
+ *
+ * Allocate a mac ip address entry and add it to the hw table
+ * Return 0 if successful, otherwise return error
+ */
+static enum i40iw_status_code i40iw_alloc_set_mac_ipaddr(struct i40iw_device *iwdev,
+ u8 *macaddr)
+{
+ enum i40iw_status_code status;
+
+ status = i40iw_alloc_local_mac_ipaddr_entry(iwdev, &iwdev->mac_ip_table_idx);
+ if (!status) {
+ status = i40iw_add_mac_ipaddr_entry(iwdev, macaddr,
+ (u8)iwdev->mac_ip_table_idx);
+ if (!status)
+ status = i40iw_add_mac_ipaddr_entry(iwdev, macaddr,
+ (u8)iwdev->mac_ip_table_idx);
+ else
+ i40iw_del_macip_entry(iwdev, (u8)iwdev->mac_ip_table_idx);
+ }
+ return status;
+}
+
+/**
+ * i40iw_add_ipv6_addr - add ipv6 address to the hw arp table
+ * @iwdev: iwarp device
+ */
+static void i40iw_add_ipv6_addr(struct i40iw_device *iwdev)
+{
+ struct net_device *ip_dev;
+ struct inet6_dev *idev;
+ struct inet6_ifaddr *ifp;
+ __be32 local_ipaddr6[4];
+
+ rcu_read_lock();
+ for_each_netdev_rcu(&init_net, ip_dev) {
+ if ((((rdma_vlan_dev_vlan_id(ip_dev) < 0xFFFF) &&
+ (rdma_vlan_dev_real_dev(ip_dev) == iwdev->netdev)) ||
+ (ip_dev == iwdev->netdev)) && (ip_dev->flags & IFF_UP)) {
+ idev = __in6_dev_get(ip_dev);
+ if (!idev) {
+ i40iw_pr_err("ipv6 inet device not found\n");
+ break;
+ }
+ list_for_each_entry(ifp, &idev->addr_list, if_list) {
+ i40iw_pr_info("IP=%pI6, vlan_id=%d, MAC=%pM\n", &ifp->addr,
+ rdma_vlan_dev_vlan_id(ip_dev), ip_dev->dev_addr);
+ i40iw_copy_ip_ntohl(local_ipaddr6,
+ ifp->addr.in6_u.u6_addr32);
+ i40iw_manage_arp_cache(iwdev,
+ ip_dev->dev_addr,
+ local_ipaddr6,
+ false,
+ I40IW_ARP_ADD);
+ }
+ }
+ }
+ rcu_read_unlock();
+}
+
+/**
+ * i40iw_add_ipv4_addr - add ipv4 address to the hw arp table
+ * @iwdev: iwarp device
+ */
+static void i40iw_add_ipv4_addr(struct i40iw_device *iwdev)
+{
+ struct net_device *dev;
+ struct in_device *idev;
+ bool got_lock = true;
+ u32 ip_addr;
+
+ if (!rtnl_trylock())
+ got_lock = false;
+
+ for_each_netdev(&init_net, dev) {
+ if ((((rdma_vlan_dev_vlan_id(dev) < 0xFFFF) &&
+ (rdma_vlan_dev_real_dev(dev) == iwdev->netdev)) ||
+ (dev == iwdev->netdev)) && (dev->flags & IFF_UP)) {
+ idev = in_dev_get(dev);
+ for_ifa(idev) {
+ i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_CM,
+ "IP=%pI4, vlan_id=%d, MAC=%pM\n", &ifa->ifa_address,
+ rdma_vlan_dev_vlan_id(dev), dev->dev_addr);
+
+ ip_addr = ntohl(ifa->ifa_address);
+ i40iw_manage_arp_cache(iwdev,
+ dev->dev_addr,
+ &ip_addr,
+ true,
+ I40IW_ARP_ADD);
+ }
+ endfor_ifa(idev);
+ in_dev_put(idev);
+ }
+ }
+ if (got_lock)
+ rtnl_unlock();
+}
+
+/**
+ * i40iw_add_mac_ip - add mac and ip addresses
+ * @iwdev: iwarp device
+ *
+ * Create and add a mac ip address entry to the hw table and
+ * ipv4/ipv6 addresses to the arp cache
+ * Return 0 if successful, otherwise return error
+ */
+static enum i40iw_status_code i40iw_add_mac_ip(struct i40iw_device *iwdev)
+{
+ struct net_device *netdev = iwdev->netdev;
+ enum i40iw_status_code status;
+
+ status = i40iw_alloc_set_mac_ipaddr(iwdev, (u8 *)netdev->dev_addr);
+ if (status)
+ return status;
+ i40iw_add_ipv4_addr(iwdev);
+ i40iw_add_ipv6_addr(iwdev);
+ return 0;
+}
+
+/**
+ * i40iw_wait_pe_ready - Check if firmware is ready
+ * @hw: provides access to registers
+ */
+static void i40iw_wait_pe_ready(struct i40iw_hw *hw)
+{
+ u32 statusfw;
+ u32 statuscpu0;
+ u32 statuscpu1;
+ u32 statuscpu2;
+ u32 retrycount = 0;
+
+ do {
+ statusfw = i40iw_rd32(hw, I40E_GLPE_FWLDSTATUS);
+ i40iw_pr_info("[%04d] fm load status[x%04X]\n", __LINE__, statusfw);
+ statuscpu0 = i40iw_rd32(hw, I40E_GLPE_CPUSTATUS0);
+ i40iw_pr_info("[%04d] CSR_CQP status[x%04X]\n", __LINE__, statuscpu0);
+ statuscpu1 = i40iw_rd32(hw, I40E_GLPE_CPUSTATUS1);
+ i40iw_pr_info("[%04d] I40E_GLPE_CPUSTATUS1 status[x%04X]\n",
+ __LINE__, statuscpu1);
+ statuscpu2 = i40iw_rd32(hw, I40E_GLPE_CPUSTATUS2);
+ i40iw_pr_info("[%04d] I40E_GLPE_CPUSTATUS2 status[x%04X]\n",
+ __LINE__, statuscpu2);
+ if ((statuscpu0 == 0x80) && (statuscpu1 == 0x80) && (statuscpu2 == 0x80))
+ break; /* SUCCESS */
+ mdelay(1000);
+ retrycount++;
+ } while (retrycount < 14);
+ i40iw_wr32(hw, 0xb4040, 0x4C104C5);
+}
+
+/**
+ * i40iw_initialize_dev - initialize device
+ * @iwdev: iwarp device
+ * @ldev: lan device information
+ *
+ * Allocate memory for the hmc objects and initialize iwdev
+ * Return 0 if successful, otherwise clean up the resources
+ * and return error
+ */
+static enum i40iw_status_code i40iw_initialize_dev(struct i40iw_device *iwdev,
+ struct i40e_info *ldev)
+{
+ enum i40iw_status_code status;
+ struct i40iw_sc_dev *dev = &iwdev->sc_dev;
+ struct i40iw_device_init_info info;
+ struct i40iw_dma_mem mem;
+ u32 size;
+
+ memset(&info, 0, sizeof(info));
+ size = sizeof(struct i40iw_hmc_pble_rsrc) + sizeof(struct i40iw_hmc_info) +
+ (sizeof(struct i40iw_hmc_obj_info) * I40IW_HMC_IW_MAX);
+ iwdev->hmc_info_mem = kzalloc(size, GFP_KERNEL);
+ if (!iwdev->hmc_info_mem) {
+ i40iw_pr_err("memory alloc fail\n");
+ return I40IW_ERR_NO_MEMORY;
+ }
+ iwdev->pble_rsrc = (struct i40iw_hmc_pble_rsrc *)iwdev->hmc_info_mem;
+ dev->hmc_info = &iwdev->hw.hmc;
+ dev->hmc_info->hmc_obj = (struct i40iw_hmc_obj_info *)(iwdev->pble_rsrc + 1);
+ status = i40iw_obj_aligned_mem(iwdev, &mem, I40IW_QUERY_FPM_BUF_SIZE,
+ I40IW_FPM_QUERY_BUF_ALIGNMENT_MASK);
+ if (status)
+ goto exit;
+ info.fpm_query_buf_pa = mem.pa;
+ info.fpm_query_buf = mem.va;
+ status = i40iw_obj_aligned_mem(iwdev, &mem, I40IW_COMMIT_FPM_BUF_SIZE,
+ I40IW_FPM_COMMIT_BUF_ALIGNMENT_MASK);
+ if (status)
+ goto exit;
+ info.fpm_commit_buf_pa = mem.pa;
+ info.fpm_commit_buf = mem.va;
+ info.hmc_fn_id = ldev->fid;
+ info.is_pf = (ldev->ftype) ? false : true;
+ info.bar0 = ldev->hw_addr;
+ info.hw = &iwdev->hw;
+ info.debug_mask = debug;
+ info.qs_handle = ldev->params.qos.prio_qos[0].qs_handle;
+ info.exception_lan_queue = 1;
+ info.vchnl_send = i40iw_virtchnl_send;
+ status = i40iw_device_init(&iwdev->sc_dev, &info);
+exit:
+ if (status) {
+ kfree(iwdev->hmc_info_mem);
+ iwdev->hmc_info_mem = NULL;
+ }
+ return status;
+}
+
+/**
+ * i40iw_register_notifiers - register tcp ip notifiers
+ */
+static void i40iw_register_notifiers(void)
+{
+ if (!i40iw_notifiers_registered) {
+ register_inetaddr_notifier(&i40iw_inetaddr_notifier);
+ register_inet6addr_notifier(&i40iw_inetaddr6_notifier);
+ register_netevent_notifier(&i40iw_net_notifier);
+ }
+ i40iw_notifiers_registered++;
+}
+
+/**
+ * i40iw_save_msix_info - copy msix vector information to iwarp device
+ * @iwdev: iwarp device
+ * @ldev: lan device information
+ *
+ * Allocate iwdev msix table and copy the ldev msix info to the table
+ * Return 0 if successful, otherwise return error
+ */
+static enum i40iw_status_code i40iw_save_msix_info(struct i40iw_device *iwdev,
+ struct i40e_info *ldev)
+{
+ struct i40e_qvlist_info *iw_qvlist;
+ struct i40e_qv_info *iw_qvinfo;
+ u32 ceq_idx;
+ u32 i;
+ u32 size;
+
+ iwdev->msix_count = ldev->msix_count;
+
+ size = sizeof(struct i40iw_msix_vector) * iwdev->msix_count;
+ size += sizeof(struct i40e_qvlist_info);
+ size += sizeof(struct i40e_qv_info) * iwdev->msix_count - 1;
+ iwdev->iw_msixtbl = kzalloc(size, GFP_KERNEL);
+
+ if (!iwdev->iw_msixtbl)
+ return I40IW_ERR_NO_MEMORY;
+ iwdev->iw_qvlist = (struct i40e_qvlist_info *)(&iwdev->iw_msixtbl[iwdev->msix_count]);
+ iw_qvlist = iwdev->iw_qvlist;
+ iw_qvinfo = iw_qvlist->qv_info;
+ iw_qvlist->num_vectors = iwdev->msix_count;
+ if (iwdev->msix_count <= num_online_cpus())
+ iwdev->msix_shared = true;
+ for (i = 0, ceq_idx = 0; i < iwdev->msix_count; i++, iw_qvinfo++) {
+ iwdev->iw_msixtbl[i].idx = ldev->msix_entries[i].entry;
+ iwdev->iw_msixtbl[i].irq = ldev->msix_entries[i].vector;
+ if (i == 0) {
+ iw_qvinfo->aeq_idx = 0;
+ if (iwdev->msix_shared)
+ iw_qvinfo->ceq_idx = ceq_idx++;
+ else
+ iw_qvinfo->ceq_idx = I40E_QUEUE_INVALID_IDX;
+ } else {
+ iw_qvinfo->aeq_idx = I40E_QUEUE_INVALID_IDX;
+ iw_qvinfo->ceq_idx = ceq_idx++;
+ }
+ iw_qvinfo->itr_idx = 3;
+ iw_qvinfo->v_idx = iwdev->iw_msixtbl[i].idx;
+ }
+ return 0;
+}
+
+/**
+ * i40iw_deinit_device - clean up the device resources
+ * @iwdev: iwarp device
+ * @reset: true if called before reset
+ * @del_hdl: true if delete hdl entry
+ *
+ * Destroy the ib device interface, remove the mac ip entry and ipv4/ipv6 addresses,
+ * destroy the device queues and free the pble and the hmc objects
+ */
+static void i40iw_deinit_device(struct i40iw_device *iwdev, bool reset, bool del_hdl)
+{
+ struct i40e_info *ldev = iwdev->ldev;
+
+ struct i40iw_sc_dev *dev = &iwdev->sc_dev;
+
+ i40iw_pr_info("state = %d\n", iwdev->init_state);
+
+ switch (iwdev->init_state) {
+ case RDMA_DEV_REGISTERED:
+ iwdev->iw_status = 0;
+ i40iw_port_ibevent(iwdev);
+ i40iw_destroy_rdma_device(iwdev->iwibdev);
+ /* fallthrough */
+ case IP_ADDR_REGISTERED:
+ if (!reset)
+ i40iw_del_macip_entry(iwdev, (u8)iwdev->mac_ip_table_idx);
+ /* fallthrough */
+ case INET_NOTIFIER:
+ if (i40iw_notifiers_registered > 0) {
+ i40iw_notifiers_registered--;
+ unregister_netevent_notifier(&i40iw_net_notifier);
+ unregister_inetaddr_notifier(&i40iw_inetaddr_notifier);
+ unregister_inet6addr_notifier(&i40iw_inetaddr6_notifier);
+ }
+ /* fallthrough */
+ case CEQ_CREATED:
+ i40iw_dele_ceqs(iwdev, reset);
+ /* fallthrough */
+ case AEQ_CREATED:
+ i40iw_destroy_aeq(iwdev, reset);
+ /* fallthrough */
+ case IEQ_CREATED:
+ i40iw_puda_dele_resources(dev, I40IW_PUDA_RSRC_TYPE_IEQ, reset);
+ /* fallthrough */
+ case ILQ_CREATED:
+ i40iw_puda_dele_resources(dev, I40IW_PUDA_RSRC_TYPE_ILQ, reset);
+ /* fallthrough */
+ case CCQ_CREATED:
+ i40iw_destroy_ccq(iwdev, reset);
+ /* fallthrough */
+ case PBLE_CHUNK_MEM:
+ i40iw_destroy_pble_pool(dev, iwdev->pble_rsrc);
+ /* fallthrough */
+ case HMC_OBJS_CREATED:
+ i40iw_del_hmc_objects(dev, dev->hmc_info, true, reset);
+ /* fallthrough */
+ case CQP_CREATED:
+ i40iw_destroy_cqp(iwdev, !reset);
+ /* fallthrough */
+ case INITIAL_STATE:
+ i40iw_cleanup_cm_core(&iwdev->cm_core);
+ if (dev->is_pf)
+ i40iw_hw_stats_del_timer(dev);
+
+ i40iw_del_init_mem(iwdev);
+ break;
+ case INVALID_STATE:
+ /* fallthrough */
+ default:
+ i40iw_pr_err("bad init_state = %d\n", iwdev->init_state);
+ break;
+ }
+
+ if (del_hdl)
+ i40iw_del_handler(i40iw_find_i40e_handler(ldev));
+ kfree(iwdev->hdl);
+}
+
+/**
+ * i40iw_setup_init_state - set up the initial device struct
+ * @hdl: handler for iwarp device - one per instance
+ * @ldev: lan device information
+ * @client: iwarp client information, provided during registration
+ *
+ * Initialize the iwarp device and its hdl information
+ * using the ldev and client information
+ * Return 0 if successful, otherwise return error
+ */
+static enum i40iw_status_code i40iw_setup_init_state(struct i40iw_handler *hdl,
+ struct i40e_info *ldev,
+ struct i40e_client *client)
+{
+ struct i40iw_device *iwdev = &hdl->device;
+ struct i40iw_sc_dev *dev = &iwdev->sc_dev;
+ enum i40iw_status_code status;
+
+ memcpy(&hdl->ldev, ldev, sizeof(*ldev));
+ if (resource_profile == 1)
+ resource_profile = 2;
+
+ iwdev->mpa_version = mpa_version;
+ iwdev->resource_profile = (resource_profile < I40IW_HMC_PROFILE_EQUAL) ?
+ (u8)resource_profile + I40IW_HMC_PROFILE_DEFAULT :
+ I40IW_HMC_PROFILE_DEFAULT;
+ iwdev->max_rdma_vfs =
+ (iwdev->resource_profile != I40IW_HMC_PROFILE_DEFAULT) ? max_rdma_vfs : 0;
+ iwdev->netdev = ldev->netdev;
+ hdl->client = client;
+ iwdev->mss = (!ldev->params.mtu) ? I40IW_DEFAULT_MSS : ldev->params.mtu - I40IW_MTU_TO_MSS;
+ if (!ldev->ftype)
+ iwdev->db_start = pci_resource_start(ldev->pcidev, 0) + I40IW_DB_ADDR_OFFSET;
+ else
+ iwdev->db_start = pci_resource_start(ldev->pcidev, 0) + I40IW_VF_DB_ADDR_OFFSET;
+
+ status = i40iw_save_msix_info(iwdev, ldev);
+ if (status)
+ goto exit;
+ iwdev->hw.dev_context = (void *)ldev->pcidev;
+ iwdev->hw.hw_addr = ldev->hw_addr;
+ status = i40iw_allocate_dma_mem(&iwdev->hw,
+ &iwdev->obj_mem, 8192, 4096);
+ if (status)
+ goto exit;
+ iwdev->obj_next = iwdev->obj_mem;
+ iwdev->push_mode = push_mode;
+ init_waitqueue_head(&iwdev->vchnl_waitq);
+ status = i40iw_initialize_dev(iwdev, ldev);
+exit:
+ if (status) {
+ kfree(iwdev->iw_msixtbl);
+ i40iw_free_dma_mem(dev->hw, &iwdev->obj_mem);
+ iwdev->iw_msixtbl = NULL;
+ }
+ return status;
+}
+
+/**
+ * i40iw_open - client interface operation open for iwarp/uda device
+ * @ldev: lan device information
+ * @client: iwarp client information, provided during registration
+ *
+ * Called by the lan driver during the processing of client register
+ * Create device resources, set up queues, pble and hmc objects and
+ * register the device with the ib verbs interface
+ * Return 0 if successful, otherwise return error
+ */
+static int i40iw_open(struct i40e_info *ldev, struct i40e_client *client)
+{
+ struct i40iw_device *iwdev;
+ struct i40iw_sc_dev *dev;
+ enum i40iw_status_code status;
+ struct i40iw_handler *hdl;
+
+ hdl = kzalloc(sizeof(*hdl), GFP_KERNEL);
+ if (!hdl)
+ return -ENOMEM;
+ iwdev = &hdl->device;
+ iwdev->hdl = hdl;
+ dev = &iwdev->sc_dev;
+ i40iw_setup_cm_core(iwdev);
+
+ dev->back_dev = (void *)iwdev;
+ iwdev->ldev = &hdl->ldev;
+ iwdev->client = client;
+ mutex_init(&iwdev->pbl_mutex);
+ i40iw_add_handler(hdl);
+
+ do {
+ status = i40iw_setup_init_state(hdl, ldev, client);
+ if (status)
+ break;
+ iwdev->init_state = INITIAL_STATE;
+ if (dev->is_pf)
+ i40iw_wait_pe_ready(dev->hw);
+ status = i40iw_create_cqp(iwdev);
+ if (status)
+ break;
+ iwdev->init_state = CQP_CREATED;
+ status = i40iw_hmc_setup(iwdev);
+ if (status)
+ break;
+ status = i40iw_create_ccq(iwdev);
+ if (status)
+ break;
+ iwdev->init_state = CCQ_CREATED;
+ status = i40iw_initialize_ilq(iwdev);
+ if (status)
+ break;
+ iwdev->init_state = ILQ_CREATED;
+ status = i40iw_initialize_ieq(iwdev);
+ if (status)
+ break;
+ iwdev->init_state = IEQ_CREATED;
+ status = i40iw_setup_aeq(iwdev);
+ if (status)
+ break;
+ iwdev->init_state = AEQ_CREATED;
+ status = i40iw_setup_ceqs(iwdev, ldev);
+ if (status)
+ break;
+ iwdev->init_state = CEQ_CREATED;
+ status = i40iw_initialize_hw_resources(iwdev);
+ if (status)
+ break;
+ dev->ccq_ops->ccq_arm(dev->ccq);
+ status = i40iw_hmc_init_pble(&iwdev->sc_dev, iwdev->pble_rsrc);
+ if (status)
+ break;
+ iwdev->virtchnl_wq = create_singlethread_workqueue("iwvch");
+ i40iw_register_notifiers();
+ iwdev->init_state = INET_NOTIFIER;
+ status = i40iw_add_mac_ip(iwdev);
+ if (status)
+ break;
+ iwdev->init_state = IP_ADDR_REGISTERED;
+ if (i40iw_register_rdma_device(iwdev)) {
+ i40iw_pr_err("register rdma device fail\n");
+ break;
+ };
+
+ iwdev->init_state = RDMA_DEV_REGISTERED;
+ iwdev->iw_status = 1;
+ i40iw_port_ibevent(iwdev);
+ i40iw_pr_info("i40iw_open completed\n");
+ return 0;
+ } while (0);
+
+ i40iw_pr_err("status = %d last completion = %d\n", status, iwdev->init_state);
+ i40iw_deinit_device(iwdev, false, false);
+ return -ERESTART;
+}
+
+/**
+ * i40iw_l2param_change : handle qs handles for qos and mss change
+ * @ldev: lan device information
+ * @client: client for paramater change
+ * @params: new parameters from L2
+ */
+static void i40iw_l2param_change(struct i40e_info *ldev,
+ struct i40e_client *client,
+ struct i40e_params *params)
+{
+ struct i40iw_handler *hdl;
+ struct i40iw_device *iwdev;
+
+ hdl = i40iw_find_i40e_handler(ldev);
+ if (!hdl)
+ return;
+
+ iwdev = &hdl->device;
+ if (params->mtu)
+ iwdev->mss = params->mtu - I40IW_MTU_TO_MSS;
+}
+
+/**
+ * i40iw_close - client interface operation close for iwarp/uda device
+ * @ldev: lan device information
+ * @client: client to close
+ *
+ * Called by the lan driver during the processing of client unregister
+ * Destroy and clean up the driver resources
+ */
+static void i40iw_close(struct i40e_info *ldev, struct i40e_client *client, bool reset)
+{
+ struct i40iw_device *iwdev;
+ struct i40iw_handler *hdl;
+
+ hdl = i40iw_find_i40e_handler(ldev);
+ if (!hdl)
+ return;
+
+ iwdev = &hdl->device;
+ destroy_workqueue(iwdev->virtchnl_wq);
+ i40iw_deinit_device(iwdev, reset, true);
+}
+
+/**
+ * i40iw_vf_reset - process VF reset
+ * @ldev: lan device information
+ * @client: client interface instance
+ * @vf_id: virtual function id
+ *
+ * Called when a VF is reset by the PF
+ * Destroy and clean up the VF resources
+ */
+static void i40iw_vf_reset(struct i40e_info *ldev, struct i40e_client *client, u32 vf_id)
+{
+ struct i40iw_handler *hdl;
+ struct i40iw_sc_dev *dev;
+ struct i40iw_hmc_fcn_info hmc_fcn_info;
+ struct i40iw_virt_mem vf_dev_mem;
+ struct i40iw_vfdev *tmp_vfdev;
+ unsigned int i;
+ unsigned long flags;
+
+ hdl = i40iw_find_i40e_handler(ldev);
+ if (!hdl)
+ return;
+
+ dev = &hdl->device.sc_dev;
+
+ for (i = 0; i < I40IW_MAX_PE_ENABLED_VF_COUNT; i++) {
+ if (!dev->vf_dev[i] || (dev->vf_dev[i]->vf_id != vf_id))
+ continue;
+
+ /* free all resources allocated on behalf of vf */
+ tmp_vfdev = dev->vf_dev[i];
+ spin_lock_irqsave(&dev->dev_pestat.stats_lock, flags);
+ dev->vf_dev[i] = NULL;
+ spin_unlock_irqrestore(&dev->dev_pestat.stats_lock, flags);
+ i40iw_del_hmc_objects(dev, &tmp_vfdev->hmc_info, false, false);
+ /* remove vf hmc function */
+ memset(&hmc_fcn_info, 0, sizeof(hmc_fcn_info));
+ hmc_fcn_info.vf_id = vf_id;
+ hmc_fcn_info.iw_vf_idx = tmp_vfdev->iw_vf_idx;
+ hmc_fcn_info.free_fcn = true;
+ i40iw_cqp_manage_hmc_fcn_cmd(dev, &hmc_fcn_info);
+ /* free vf_dev */
+ vf_dev_mem.va = tmp_vfdev;
+ vf_dev_mem.size = sizeof(struct i40iw_vfdev) +
+ sizeof(struct i40iw_hmc_obj_info) * I40IW_HMC_IW_MAX;
+ i40iw_free_virt_mem(dev->hw, &vf_dev_mem);
+ break;
+ }
+}
+
+/**
+ * i40iw_vf_enable - enable a number of VFs
+ * @ldev: lan device information
+ * @client: client interface instance
+ * @num_vfs: number of VFs for the PF
+ *
+ * Called when the number of VFs changes
+ */
+static void i40iw_vf_enable(struct i40e_info *ldev,
+ struct i40e_client *client,
+ u32 num_vfs)
+{
+ struct i40iw_handler *hdl;
+
+ hdl = i40iw_find_i40e_handler(ldev);
+ if (!hdl)
+ return;
+
+ if (num_vfs > I40IW_MAX_PE_ENABLED_VF_COUNT)
+ hdl->device.max_enabled_vfs = I40IW_MAX_PE_ENABLED_VF_COUNT;
+ else
+ hdl->device.max_enabled_vfs = num_vfs;
+}
+
+/**
+ * i40iw_vf_capable - check if VF capable
+ * @ldev: lan device information
+ * @client: client interface instance
+ * @vf_id: virtual function id
+ *
+ * Return 1 if a VF slot is available or if VF is already RDMA enabled
+ * Return 0 otherwise
+ */
+static int i40iw_vf_capable(struct i40e_info *ldev,
+ struct i40e_client *client,
+ u32 vf_id)
+{
+ struct i40iw_handler *hdl;
+ struct i40iw_sc_dev *dev;
+ unsigned int i;
+
+ hdl = i40iw_find_i40e_handler(ldev);
+ if (!hdl)
+ return 0;
+
+ dev = &hdl->device.sc_dev;
+
+ for (i = 0; i < hdl->device.max_enabled_vfs; i++) {
+ if (!dev->vf_dev[i] || (dev->vf_dev[i]->vf_id == vf_id))
+ return 1;
+ }
+
+ return 0;
+}
+
+/**
+ * i40iw_virtchnl_receive - receive a message through the virtual channel
+ * @ldev: lan device information
+ * @client: client interface instance
+ * @vf_id: virtual function id associated with the message
+ * @msg: message buffer pointer
+ * @len: length of the message
+ *
+ * Invoke virtual channel receive operation for the given msg
+ * Return 0 if successful, otherwise return error
+ */
+static int i40iw_virtchnl_receive(struct i40e_info *ldev,
+ struct i40e_client *client,
+ u32 vf_id,
+ u8 *msg,
+ u16 len)
+{
+ struct i40iw_handler *hdl;
+ struct i40iw_sc_dev *dev;
+ struct i40iw_device *iwdev;
+ int ret_code = I40IW_NOT_SUPPORTED;
+
+ if (!len || !msg)
+ return I40IW_ERR_PARAM;
+
+ hdl = i40iw_find_i40e_handler(ldev);
+ if (!hdl)
+ return I40IW_ERR_PARAM;
+
+ dev = &hdl->device.sc_dev;
+ iwdev = dev->back_dev;
+
+ i40iw_debug(dev, I40IW_DEBUG_VIRT, "msg %p, message length %u\n", msg, len);
+
+ if (dev->vchnl_if.vchnl_recv) {
+ ret_code = dev->vchnl_if.vchnl_recv(dev, vf_id, msg, len);
+ if (!dev->is_pf) {
+ atomic_dec(&iwdev->vchnl_msgs);
+ wake_up(&iwdev->vchnl_waitq);
+ }
+ }
+ return ret_code;
+}
+
+/**
+ * i40iw_virtchnl_send - send a message through the virtual channel
+ * @dev: iwarp device
+ * @vf_id: virtual function id associated with the message
+ * @msg: virtual channel message buffer pointer
+ * @len: length of the message
+ *
+ * Invoke virtual channel send operation for the given msg
+ * Return 0 if successful, otherwise return error
+ */
+static enum i40iw_status_code i40iw_virtchnl_send(struct i40iw_sc_dev *dev,
+ u32 vf_id,
+ u8 *msg,
+ u16 len)
+{
+ struct i40iw_device *iwdev;
+ struct i40e_info *ldev;
+ enum i40iw_status_code ret_code = I40IW_ERR_BAD_PTR;
+
+ if (!dev || !dev->back_dev)
+ return ret_code;
+
+ iwdev = dev->back_dev;
+ ldev = iwdev->ldev;
+
+ if (ldev && ldev->ops && ldev->ops->virtchnl_send)
+ ret_code = ldev->ops->virtchnl_send(ldev, &i40iw_client, vf_id, msg, len);
+
+ return ret_code;
+}
+
+/* client interface functions */
+static struct i40e_client_ops i40e_ops = {
+ .open = i40iw_open,
+ .close = i40iw_close,
+ .l2_param_change = i40iw_l2param_change,
+ .virtchnl_receive = i40iw_virtchnl_receive,
+ .vf_reset = i40iw_vf_reset,
+ .vf_enable = i40iw_vf_enable,
+ .vf_capable = i40iw_vf_capable
+};
+
+/**
+ * i40iw_init_module - driver initialization function
+ *
+ * First function to call when the driver is loaded
+ * Register the driver as i40e client and port mapper client
+ */
+static int __init i40iw_init_module(void)
+{
+ int ret;
+
+ memset(&i40iw_client, 0, sizeof(i40iw_client));
+ i40iw_client.version.major = CLIENT_IW_INTERFACE_VERSION_MAJOR;
+ i40iw_client.version.minor = CLIENT_IW_INTERFACE_VERSION_MINOR;
+ i40iw_client.version.build = CLIENT_IW_INTERFACE_VERSION_BUILD;
+ i40iw_client.ops = &i40e_ops;
+ memcpy(i40iw_client.name, i40iw_client_name, I40E_CLIENT_STR_LENGTH);
+ i40iw_client.type = I40E_CLIENT_IWARP;
+ spin_lock_init(&i40iw_handler_lock);
+ ret = i40e_register_client(&i40iw_client);
+ return ret;
+}
+
+/**
+ * i40iw_exit_module - driver exit clean up function
+ *
+ * The function is called just before the driver is unloaded
+ * Unregister the driver as i40e client and port mapper client
+ */
+static void __exit i40iw_exit_module(void)
+{
+ i40e_unregister_client(&i40iw_client);
+}
+
+module_init(i40iw_init_module);
+module_exit(i40iw_exit_module);
diff --git a/drivers/infiniband/hw/i40iw/i40iw_osdep.h b/drivers/infiniband/hw/i40iw/i40iw_osdep.h
new file mode 100644
index 000000000000..7e20493510e8
--- /dev/null
+++ b/drivers/infiniband/hw/i40iw/i40iw_osdep.h
@@ -0,0 +1,215 @@
+/*******************************************************************************
+*
+* Copyright (c) 2015-2016 Intel Corporation. All rights reserved.
+*
+* This software is available to you under a choice of one of two
+* licenses. You may choose to be licensed under the terms of the GNU
+* General Public License (GPL) Version 2, available from the file
+* COPYING in the main directory of this source tree, or the
+* OpenFabrics.org BSD license below:
+*
+* Redistribution and use in source and binary forms, with or
+* without modification, are permitted provided that the following
+* conditions are met:
+*
+* - Redistributions of source code must retain the above
+* copyright notice, this list of conditions and the following
+* disclaimer.
+*
+* - Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials
+* provided with the distribution.
+*
+* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+* SOFTWARE.
+*
+*******************************************************************************/
+
+#ifndef I40IW_OSDEP_H
+#define I40IW_OSDEP_H
+
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/bitops.h>
+#include <net/tcp.h>
+#include <crypto/hash.h>
+/* get readq/writeq support for 32 bit kernels, use the low-first version */
+#include <linux/io-64-nonatomic-lo-hi.h>
+
+#define STATS_TIMER_DELAY 1000
+
+static inline void set_64bit_val(u64 *wqe_words, u32 byte_index, u64 value)
+{
+ wqe_words[byte_index >> 3] = value;
+}
+
+/**
+ * set_32bit_val - set 32 value to hw wqe
+ * @wqe_words: wqe addr to write
+ * @byte_index: index in wqe
+ * @value: value to write
+ **/
+static inline void set_32bit_val(u32 *wqe_words, u32 byte_index, u32 value)
+{
+ wqe_words[byte_index >> 2] = value;
+}
+
+/**
+ * get_64bit_val - read 64 bit value from wqe
+ * @wqe_words: wqe addr
+ * @byte_index: index to read from
+ * @value: read value
+ **/
+static inline void get_64bit_val(u64 *wqe_words, u32 byte_index, u64 *value)
+{
+ *value = wqe_words[byte_index >> 3];
+}
+
+/**
+ * get_32bit_val - read 32 bit value from wqe
+ * @wqe_words: wqe addr
+ * @byte_index: index to reaad from
+ * @value: return 32 bit value
+ **/
+static inline void get_32bit_val(u32 *wqe_words, u32 byte_index, u32 *value)
+{
+ *value = wqe_words[byte_index >> 2];
+}
+
+struct i40iw_dma_mem {
+ void *va;
+ dma_addr_t pa;
+ u32 size;
+} __packed;
+
+struct i40iw_virt_mem {
+ void *va;
+ u32 size;
+} __packed;
+
+#define i40iw_debug(h, m, s, ...) \
+do { \
+ if (((m) & (h)->debug_mask)) \
+ pr_info("i40iw " s, ##__VA_ARGS__); \
+} while (0)
+
+#define i40iw_flush(a) readl((a)->hw_addr + I40E_GLGEN_STAT)
+
+#define I40E_GLHMC_VFSDCMD(_i) (0x000C8000 + ((_i) * 4)) \
+ /* _i=0...31 */
+#define I40E_GLHMC_VFSDCMD_MAX_INDEX 31
+#define I40E_GLHMC_VFSDCMD_PMSDIDX_SHIFT 0
+#define I40E_GLHMC_VFSDCMD_PMSDIDX_MASK (0xFFF \
+ << I40E_GLHMC_VFSDCMD_PMSDIDX_SHIFT)
+#define I40E_GLHMC_VFSDCMD_PF_SHIFT 16
+#define I40E_GLHMC_VFSDCMD_PF_MASK (0xF << I40E_GLHMC_VFSDCMD_PF_SHIFT)
+#define I40E_GLHMC_VFSDCMD_VF_SHIFT 20
+#define I40E_GLHMC_VFSDCMD_VF_MASK (0x1FF << I40E_GLHMC_VFSDCMD_VF_SHIFT)
+#define I40E_GLHMC_VFSDCMD_PMF_TYPE_SHIFT 29
+#define I40E_GLHMC_VFSDCMD_PMF_TYPE_MASK (0x3 \
+ << I40E_GLHMC_VFSDCMD_PMF_TYPE_SHIFT)
+#define I40E_GLHMC_VFSDCMD_PMSDWR_SHIFT 31
+#define I40E_GLHMC_VFSDCMD_PMSDWR_MASK (0x1 << I40E_GLHMC_VFSDCMD_PMSDWR_SHIFT)
+
+#define I40E_GLHMC_VFSDDATAHIGH(_i) (0x000C8200 + ((_i) * 4)) \
+ /* _i=0...31 */
+#define I40E_GLHMC_VFSDDATAHIGH_MAX_INDEX 31
+#define I40E_GLHMC_VFSDDATAHIGH_PMSDDATAHIGH_SHIFT 0
+#define I40E_GLHMC_VFSDDATAHIGH_PMSDDATAHIGH_MASK (0xFFFFFFFF \
+ << I40E_GLHMC_VFSDDATAHIGH_PMSDDATAHIGH_SHIFT)
+
+#define I40E_GLHMC_VFSDDATALOW(_i) (0x000C8100 + ((_i) * 4)) \
+ /* _i=0...31 */
+#define I40E_GLHMC_VFSDDATALOW_MAX_INDEX 31
+#define I40E_GLHMC_VFSDDATALOW_PMSDVALID_SHIFT 0
+#define I40E_GLHMC_VFSDDATALOW_PMSDVALID_MASK (0x1 \
+ << I40E_GLHMC_VFSDDATALOW_PMSDVALID_SHIFT)
+#define I40E_GLHMC_VFSDDATALOW_PMSDTYPE_SHIFT 1
+#define I40E_GLHMC_VFSDDATALOW_PMSDTYPE_MASK (0x1 \
+ << I40E_GLHMC_VFSDDATALOW_PMSDTYPE_SHIFT)
+#define I40E_GLHMC_VFSDDATALOW_PMSDBPCOUNT_SHIFT 2
+#define I40E_GLHMC_VFSDDATALOW_PMSDBPCOUNT_MASK (0x3FF \
+ << I40E_GLHMC_VFSDDATALOW_PMSDBPCOUNT_SHIFT)
+#define I40E_GLHMC_VFSDDATALOW_PMSDDATALOW_SHIFT 12
+#define I40E_GLHMC_VFSDDATALOW_PMSDDATALOW_MASK (0xFFFFF \
+ << I40E_GLHMC_VFSDDATALOW_PMSDDATALOW_SHIFT)
+
+#define I40E_GLPE_FWLDSTATUS 0x0000D200
+#define I40E_GLPE_FWLDSTATUS_LOAD_REQUESTED_SHIFT 0
+#define I40E_GLPE_FWLDSTATUS_LOAD_REQUESTED_MASK (0x1 \
+ << I40E_GLPE_FWLDSTATUS_LOAD_REQUESTED_SHIFT)
+#define I40E_GLPE_FWLDSTATUS_DONE_SHIFT 1
+#define I40E_GLPE_FWLDSTATUS_DONE_MASK (0x1 << I40E_GLPE_FWLDSTATUS_DONE_SHIFT)
+#define I40E_GLPE_FWLDSTATUS_CQP_FAIL_SHIFT 2
+#define I40E_GLPE_FWLDSTATUS_CQP_FAIL_MASK (0x1 \
+ << I40E_GLPE_FWLDSTATUS_CQP_FAIL_SHIFT)
+#define I40E_GLPE_FWLDSTATUS_TEP_FAIL_SHIFT 3
+#define I40E_GLPE_FWLDSTATUS_TEP_FAIL_MASK (0x1 \
+ << I40E_GLPE_FWLDSTATUS_TEP_FAIL_SHIFT)
+#define I40E_GLPE_FWLDSTATUS_OOP_FAIL_SHIFT 4
+#define I40E_GLPE_FWLDSTATUS_OOP_FAIL_MASK (0x1 \
+ << I40E_GLPE_FWLDSTATUS_OOP_FAIL_SHIFT)
+
+struct i40iw_sc_dev;
+struct i40iw_sc_qp;
+struct i40iw_puda_buf;
+struct i40iw_puda_completion_info;
+struct i40iw_update_sds_info;
+struct i40iw_hmc_fcn_info;
+struct i40iw_virtchnl_work_info;
+struct i40iw_manage_vf_pble_info;
+struct i40iw_device;
+struct i40iw_hmc_info;
+struct i40iw_hw;
+
+u8 __iomem *i40iw_get_hw_addr(void *dev);
+void i40iw_ieq_mpa_crc_ae(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp);
+enum i40iw_status_code i40iw_vf_wait_vchnl_resp(struct i40iw_sc_dev *dev);
+enum i40iw_status_code i40iw_ieq_check_mpacrc(struct shash_desc *desc, void *addr,
+ u32 length, u32 value);
+struct i40iw_sc_qp *i40iw_ieq_get_qp(struct i40iw_sc_dev *dev, struct i40iw_puda_buf *buf);
+void i40iw_ieq_update_tcpip_info(struct i40iw_puda_buf *buf, u16 length, u32 seqnum);
+void i40iw_free_hash_desc(struct shash_desc *);
+enum i40iw_status_code i40iw_init_hash_desc(struct shash_desc **);
+enum i40iw_status_code i40iw_puda_get_tcpip_info(struct i40iw_puda_completion_info *info,
+ struct i40iw_puda_buf *buf);
+enum i40iw_status_code i40iw_cqp_sds_cmd(struct i40iw_sc_dev *dev,
+ struct i40iw_update_sds_info *info);
+enum i40iw_status_code i40iw_cqp_manage_hmc_fcn_cmd(struct i40iw_sc_dev *dev,
+ struct i40iw_hmc_fcn_info *hmcfcninfo);
+enum i40iw_status_code i40iw_cqp_query_fpm_values_cmd(struct i40iw_sc_dev *dev,
+ struct i40iw_dma_mem *values_mem,
+ u8 hmc_fn_id);
+enum i40iw_status_code i40iw_cqp_commit_fpm_values_cmd(struct i40iw_sc_dev *dev,
+ struct i40iw_dma_mem *values_mem,
+ u8 hmc_fn_id);
+enum i40iw_status_code i40iw_alloc_query_fpm_buf(struct i40iw_sc_dev *dev,
+ struct i40iw_dma_mem *mem);
+enum i40iw_status_code i40iw_cqp_manage_vf_pble_bp(struct i40iw_sc_dev *dev,
+ struct i40iw_manage_vf_pble_info *info);
+void i40iw_cqp_spawn_worker(struct i40iw_sc_dev *dev,
+ struct i40iw_virtchnl_work_info *work_info, u32 iw_vf_idx);
+void *i40iw_remove_head(struct list_head *list);
+
+void i40iw_term_modify_qp(struct i40iw_sc_qp *qp, u8 next_state, u8 term, u8 term_len);
+void i40iw_terminate_done(struct i40iw_sc_qp *qp, int timeout_occurred);
+void i40iw_terminate_start_timer(struct i40iw_sc_qp *qp);
+void i40iw_terminate_del_timer(struct i40iw_sc_qp *qp);
+
+enum i40iw_status_code i40iw_hw_manage_vf_pble_bp(struct i40iw_device *iwdev,
+ struct i40iw_manage_vf_pble_info *info,
+ bool wait);
+struct i40iw_dev_pestat;
+void i40iw_hw_stats_start_timer(struct i40iw_sc_dev *);
+void i40iw_hw_stats_del_timer(struct i40iw_sc_dev *);
+#define i40iw_mmiowb() mmiowb()
+void i40iw_wr32(struct i40iw_hw *hw, u32 reg, u32 value);
+u32 i40iw_rd32(struct i40iw_hw *hw, u32 reg);
+#endif /* _I40IW_OSDEP_H_ */
diff --git a/drivers/infiniband/hw/i40iw/i40iw_p.h b/drivers/infiniband/hw/i40iw/i40iw_p.h
new file mode 100644
index 000000000000..a0b8ca10d67e
--- /dev/null
+++ b/drivers/infiniband/hw/i40iw/i40iw_p.h
@@ -0,0 +1,106 @@
+/*******************************************************************************
+*
+* Copyright (c) 2015-2016 Intel Corporation. All rights reserved.
+*
+* This software is available to you under a choice of one of two
+* licenses. You may choose to be licensed under the terms of the GNU
+* General Public License (GPL) Version 2, available from the file
+* COPYING in the main directory of this source tree, or the
+* OpenFabrics.org BSD license below:
+*
+* Redistribution and use in source and binary forms, with or
+* without modification, are permitted provided that the following
+* conditions are met:
+*
+* - Redistributions of source code must retain the above
+* copyright notice, this list of conditions and the following
+* disclaimer.
+*
+* - Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials
+* provided with the distribution.
+*
+* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+* SOFTWARE.
+*
+*******************************************************************************/
+
+#ifndef I40IW_P_H
+#define I40IW_P_H
+
+#define PAUSE_TIMER_VALUE 0xFFFF
+#define REFRESH_THRESHOLD 0x7FFF
+#define HIGH_THRESHOLD 0x800
+#define LOW_THRESHOLD 0x200
+#define ALL_TC2PFC 0xFF
+
+void i40iw_debug_buf(struct i40iw_sc_dev *dev, enum i40iw_debug_flag mask,
+ char *desc, u64 *buf, u32 size);
+/* init operations */
+enum i40iw_status_code i40iw_device_init(struct i40iw_sc_dev *dev,
+ struct i40iw_device_init_info *info);
+
+enum i40iw_status_code i40iw_device_init_pestat(struct i40iw_dev_pestat *);
+
+void i40iw_sc_cqp_post_sq(struct i40iw_sc_cqp *cqp);
+
+u64 *i40iw_sc_cqp_get_next_send_wqe(struct i40iw_sc_cqp *cqp, u64 scratch);
+
+enum i40iw_status_code i40iw_sc_mr_fast_register(struct i40iw_sc_qp *qp,
+ struct i40iw_fast_reg_stag_info *info,
+ bool post_sq);
+
+/* HMC/FPM functions */
+enum i40iw_status_code i40iw_sc_init_iw_hmc(struct i40iw_sc_dev *dev,
+ u8 hmc_fn_id);
+
+enum i40iw_status_code i40iw_pf_init_vfhmc(struct i40iw_sc_dev *dev, u8 vf_hmc_fn_id,
+ u32 *vf_cnt_array);
+
+/* cqp misc functions */
+
+void i40iw_terminate_send_fin(struct i40iw_sc_qp *qp);
+
+void i40iw_terminate_connection(struct i40iw_sc_qp *qp, struct i40iw_aeqe_info *info);
+
+void i40iw_terminate_received(struct i40iw_sc_qp *qp, struct i40iw_aeqe_info *info);
+
+enum i40iw_status_code i40iw_sc_suspend_qp(struct i40iw_sc_cqp *cqp,
+ struct i40iw_sc_qp *qp, u64 scratch);
+
+enum i40iw_status_code i40iw_sc_resume_qp(struct i40iw_sc_cqp *cqp,
+ struct i40iw_sc_qp *qp, u64 scratch);
+
+enum i40iw_status_code i40iw_sc_static_hmc_pages_allocated(struct i40iw_sc_cqp *cqp,
+ u64 scratch, u8 hmc_fn_id,
+ bool post_sq,
+ bool poll_registers);
+
+enum i40iw_status_code i40iw_config_fpm_values(struct i40iw_sc_dev *dev, u32 qp_count);
+
+void free_sd_mem(struct i40iw_sc_dev *dev);
+
+enum i40iw_status_code i40iw_process_cqp_cmd(struct i40iw_sc_dev *dev,
+ struct cqp_commands_info *pcmdinfo);
+
+enum i40iw_status_code i40iw_process_bh(struct i40iw_sc_dev *dev);
+
+/* prototype for functions used for dynamic memory allocation */
+enum i40iw_status_code i40iw_allocate_dma_mem(struct i40iw_hw *hw,
+ struct i40iw_dma_mem *mem, u64 size,
+ u32 alignment);
+void i40iw_free_dma_mem(struct i40iw_hw *hw, struct i40iw_dma_mem *mem);
+enum i40iw_status_code i40iw_allocate_virt_mem(struct i40iw_hw *hw,
+ struct i40iw_virt_mem *mem, u32 size);
+enum i40iw_status_code i40iw_free_virt_mem(struct i40iw_hw *hw,
+ struct i40iw_virt_mem *mem);
+u8 i40iw_get_encoded_wqe_size(u32 wqsize, bool cqpsq);
+
+#endif
diff --git a/drivers/infiniband/hw/i40iw/i40iw_pble.c b/drivers/infiniband/hw/i40iw/i40iw_pble.c
new file mode 100644
index 000000000000..ded853d2fad8
--- /dev/null
+++ b/drivers/infiniband/hw/i40iw/i40iw_pble.c
@@ -0,0 +1,618 @@
+/*******************************************************************************
+*
+* Copyright (c) 2015-2016 Intel Corporation. All rights reserved.
+*
+* This software is available to you under a choice of one of two
+* licenses. You may choose to be licensed under the terms of the GNU
+* General Public License (GPL) Version 2, available from the file
+* COPYING in the main directory of this source tree, or the
+* OpenFabrics.org BSD license below:
+*
+* Redistribution and use in source and binary forms, with or
+* without modification, are permitted provided that the following
+* conditions are met:
+*
+* - Redistributions of source code must retain the above
+* copyright notice, this list of conditions and the following
+* disclaimer.
+*
+* - Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials
+* provided with the distribution.
+*
+* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+* SOFTWARE.
+*
+*******************************************************************************/
+
+#include "i40iw_status.h"
+#include "i40iw_osdep.h"
+#include "i40iw_register.h"
+#include "i40iw_hmc.h"
+
+#include "i40iw_d.h"
+#include "i40iw_type.h"
+#include "i40iw_p.h"
+
+#include <linux/pci.h>
+#include <linux/genalloc.h>
+#include <linux/vmalloc.h>
+#include "i40iw_pble.h"
+#include "i40iw.h"
+
+struct i40iw_device;
+static enum i40iw_status_code add_pble_pool(struct i40iw_sc_dev *dev,
+ struct i40iw_hmc_pble_rsrc *pble_rsrc);
+static void i40iw_free_vmalloc_mem(struct i40iw_hw *hw, struct i40iw_chunk *chunk);
+
+/**
+ * i40iw_destroy_pble_pool - destroy pool during module unload
+ * @pble_rsrc: pble resources
+ */
+void i40iw_destroy_pble_pool(struct i40iw_sc_dev *dev, struct i40iw_hmc_pble_rsrc *pble_rsrc)
+{
+ struct list_head *clist;
+ struct list_head *tlist;
+ struct i40iw_chunk *chunk;
+ struct i40iw_pble_pool *pinfo = &pble_rsrc->pinfo;
+
+ if (pinfo->pool) {
+ list_for_each_safe(clist, tlist, &pinfo->clist) {
+ chunk = list_entry(clist, struct i40iw_chunk, list);
+ if (chunk->type == I40IW_VMALLOC)
+ i40iw_free_vmalloc_mem(dev->hw, chunk);
+ kfree(chunk);
+ }
+ gen_pool_destroy(pinfo->pool);
+ }
+}
+
+/**
+ * i40iw_hmc_init_pble - Initialize pble resources during module load
+ * @dev: i40iw_sc_dev struct
+ * @pble_rsrc: pble resources
+ */
+enum i40iw_status_code i40iw_hmc_init_pble(struct i40iw_sc_dev *dev,
+ struct i40iw_hmc_pble_rsrc *pble_rsrc)
+{
+ struct i40iw_hmc_info *hmc_info;
+ u32 fpm_idx = 0;
+
+ hmc_info = dev->hmc_info;
+ pble_rsrc->fpm_base_addr = hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].base;
+ /* Now start the pble' on 4k boundary */
+ if (pble_rsrc->fpm_base_addr & 0xfff)
+ fpm_idx = (PAGE_SIZE - (pble_rsrc->fpm_base_addr & 0xfff)) >> 3;
+
+ pble_rsrc->unallocated_pble =
+ hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].cnt - fpm_idx;
+ pble_rsrc->next_fpm_addr = pble_rsrc->fpm_base_addr + (fpm_idx << 3);
+
+ pble_rsrc->pinfo.pool_shift = POOL_SHIFT;
+ pble_rsrc->pinfo.pool = gen_pool_create(pble_rsrc->pinfo.pool_shift, -1);
+ INIT_LIST_HEAD(&pble_rsrc->pinfo.clist);
+ if (!pble_rsrc->pinfo.pool)
+ goto error;
+
+ if (add_pble_pool(dev, pble_rsrc))
+ goto error;
+
+ return 0;
+
+ error:i40iw_destroy_pble_pool(dev, pble_rsrc);
+ return I40IW_ERR_NO_MEMORY;
+}
+
+/**
+ * get_sd_pd_idx - Returns sd index, pd index and rel_pd_idx from fpm address
+ * @ pble_rsrc: structure containing fpm address
+ * @ idx: where to return indexes
+ */
+static inline void get_sd_pd_idx(struct i40iw_hmc_pble_rsrc *pble_rsrc,
+ struct sd_pd_idx *idx)
+{
+ idx->sd_idx = (u32)(pble_rsrc->next_fpm_addr) / I40IW_HMC_DIRECT_BP_SIZE;
+ idx->pd_idx = (u32)(pble_rsrc->next_fpm_addr) / I40IW_HMC_PAGED_BP_SIZE;
+ idx->rel_pd_idx = (idx->pd_idx % I40IW_HMC_PD_CNT_IN_SD);
+}
+
+/**
+ * add_sd_direct - add sd direct for pble
+ * @dev: hardware control device structure
+ * @pble_rsrc: pble resource ptr
+ * @info: page info for sd
+ */
+static enum i40iw_status_code add_sd_direct(struct i40iw_sc_dev *dev,
+ struct i40iw_hmc_pble_rsrc *pble_rsrc,
+ struct i40iw_add_page_info *info)
+{
+ enum i40iw_status_code ret_code = 0;
+ struct sd_pd_idx *idx = &info->idx;
+ struct i40iw_chunk *chunk = info->chunk;
+ struct i40iw_hmc_info *hmc_info = info->hmc_info;
+ struct i40iw_hmc_sd_entry *sd_entry = info->sd_entry;
+ u32 offset = 0;
+
+ if (!sd_entry->valid) {
+ if (dev->is_pf) {
+ ret_code = i40iw_add_sd_table_entry(dev->hw, hmc_info,
+ info->idx.sd_idx,
+ I40IW_SD_TYPE_DIRECT,
+ I40IW_HMC_DIRECT_BP_SIZE);
+ if (ret_code)
+ return ret_code;
+ chunk->type = I40IW_DMA_COHERENT;
+ }
+ }
+ offset = idx->rel_pd_idx << I40IW_HMC_PAGED_BP_SHIFT;
+ chunk->size = info->pages << I40IW_HMC_PAGED_BP_SHIFT;
+ chunk->vaddr = ((u8 *)sd_entry->u.bp.addr.va + offset);
+ chunk->fpm_addr = pble_rsrc->next_fpm_addr;
+ i40iw_debug(dev, I40IW_DEBUG_PBLE, "chunk_size[%d] = 0x%x vaddr=%p fpm_addr = %llx\n",
+ chunk->size, chunk->size, chunk->vaddr, chunk->fpm_addr);
+ return 0;
+}
+
+/**
+ * i40iw_free_vmalloc_mem - free vmalloc during close
+ * @hw: hw struct
+ * @chunk: chunk information for vmalloc
+ */
+static void i40iw_free_vmalloc_mem(struct i40iw_hw *hw, struct i40iw_chunk *chunk)
+{
+ struct pci_dev *pcidev = (struct pci_dev *)hw->dev_context;
+ int i;
+
+ if (!chunk->pg_cnt)
+ goto done;
+ for (i = 0; i < chunk->pg_cnt; i++)
+ dma_unmap_page(&pcidev->dev, chunk->dmaaddrs[i], PAGE_SIZE, DMA_BIDIRECTIONAL);
+
+ done:
+ kfree(chunk->dmaaddrs);
+ chunk->dmaaddrs = NULL;
+ vfree(chunk->vaddr);
+ chunk->vaddr = NULL;
+ chunk->type = 0;
+}
+
+/**
+ * i40iw_get_vmalloc_mem - get 2M page for sd
+ * @hw: hardware address
+ * @chunk: chunk to adf
+ * @pg_cnt: #of 4 K pages
+ */
+static enum i40iw_status_code i40iw_get_vmalloc_mem(struct i40iw_hw *hw,
+ struct i40iw_chunk *chunk,
+ int pg_cnt)
+{
+ struct pci_dev *pcidev = (struct pci_dev *)hw->dev_context;
+ struct page *page;
+ u8 *addr;
+ u32 size;
+ int i;
+
+ chunk->dmaaddrs = kzalloc(pg_cnt << 3, GFP_KERNEL);
+ if (!chunk->dmaaddrs)
+ return I40IW_ERR_NO_MEMORY;
+ size = PAGE_SIZE * pg_cnt;
+ chunk->vaddr = vmalloc(size);
+ if (!chunk->vaddr) {
+ kfree(chunk->dmaaddrs);
+ chunk->dmaaddrs = NULL;
+ return I40IW_ERR_NO_MEMORY;
+ }
+ chunk->size = size;
+ addr = (u8 *)chunk->vaddr;
+ for (i = 0; i < pg_cnt; i++) {
+ page = vmalloc_to_page((void *)addr);
+ if (!page)
+ break;
+ chunk->dmaaddrs[i] = dma_map_page(&pcidev->dev, page, 0,
+ PAGE_SIZE, DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(&pcidev->dev, chunk->dmaaddrs[i]))
+ break;
+ addr += PAGE_SIZE;
+ }
+
+ chunk->pg_cnt = i;
+ chunk->type = I40IW_VMALLOC;
+ if (i == pg_cnt)
+ return 0;
+
+ i40iw_free_vmalloc_mem(hw, chunk);
+ return I40IW_ERR_NO_MEMORY;
+}
+
+/**
+ * fpm_to_idx - given fpm address, get pble index
+ * @pble_rsrc: pble resource management
+ * @addr: fpm address for index
+ */
+static inline u32 fpm_to_idx(struct i40iw_hmc_pble_rsrc *pble_rsrc, u64 addr)
+{
+ return (addr - (pble_rsrc->fpm_base_addr)) >> 3;
+}
+
+/**
+ * add_bp_pages - add backing pages for sd
+ * @dev: hardware control device structure
+ * @pble_rsrc: pble resource management
+ * @info: page info for sd
+ */
+static enum i40iw_status_code add_bp_pages(struct i40iw_sc_dev *dev,
+ struct i40iw_hmc_pble_rsrc *pble_rsrc,
+ struct i40iw_add_page_info *info)
+{
+ u8 *addr;
+ struct i40iw_dma_mem mem;
+ struct i40iw_hmc_pd_entry *pd_entry;
+ struct i40iw_hmc_sd_entry *sd_entry = info->sd_entry;
+ struct i40iw_hmc_info *hmc_info = info->hmc_info;
+ struct i40iw_chunk *chunk = info->chunk;
+ struct i40iw_manage_vf_pble_info vf_pble_info;
+ enum i40iw_status_code status = 0;
+ u32 rel_pd_idx = info->idx.rel_pd_idx;
+ u32 pd_idx = info->idx.pd_idx;
+ u32 i;
+
+ status = i40iw_get_vmalloc_mem(dev->hw, chunk, info->pages);
+ if (status)
+ return I40IW_ERR_NO_MEMORY;
+ status = i40iw_add_sd_table_entry(dev->hw, hmc_info,
+ info->idx.sd_idx, I40IW_SD_TYPE_PAGED,
+ I40IW_HMC_DIRECT_BP_SIZE);
+ if (status) {
+ i40iw_free_vmalloc_mem(dev->hw, chunk);
+ return status;
+ }
+ if (!dev->is_pf) {
+ status = i40iw_vchnl_vf_add_hmc_objs(dev, I40IW_HMC_IW_PBLE,
+ fpm_to_idx(pble_rsrc,
+ pble_rsrc->next_fpm_addr),
+ (info->pages << PBLE_512_SHIFT));
+ if (status) {
+ i40iw_pr_err("allocate PBLEs in the PF. Error %i\n", status);
+ i40iw_free_vmalloc_mem(dev->hw, chunk);
+ return status;
+ }
+ }
+ addr = chunk->vaddr;
+ for (i = 0; i < info->pages; i++) {
+ mem.pa = chunk->dmaaddrs[i];
+ mem.size = PAGE_SIZE;
+ mem.va = (void *)(addr);
+ pd_entry = &sd_entry->u.pd_table.pd_entry[rel_pd_idx++];
+ if (!pd_entry->valid) {
+ status = i40iw_add_pd_table_entry(dev->hw, hmc_info, pd_idx++, &mem);
+ if (status)
+ goto error;
+ addr += PAGE_SIZE;
+ } else {
+ i40iw_pr_err("pd entry is valid expecting to be invalid\n");
+ }
+ }
+ if (!dev->is_pf) {
+ vf_pble_info.first_pd_index = info->idx.rel_pd_idx;
+ vf_pble_info.inv_pd_ent = false;
+ vf_pble_info.pd_entry_cnt = PBLE_PER_PAGE;
+ vf_pble_info.pd_pl_pba = sd_entry->u.pd_table.pd_page_addr.pa;
+ vf_pble_info.sd_index = info->idx.sd_idx;
+ status = i40iw_hw_manage_vf_pble_bp(dev->back_dev,
+ &vf_pble_info, true);
+ if (status) {
+ i40iw_pr_err("CQP manage VF PBLE BP failed. %i\n", status);
+ goto error;
+ }
+ }
+ chunk->fpm_addr = pble_rsrc->next_fpm_addr;
+ return 0;
+error:
+ i40iw_free_vmalloc_mem(dev->hw, chunk);
+ return status;
+}
+
+/**
+ * add_pble_pool - add a sd entry for pble resoure
+ * @dev: hardware control device structure
+ * @pble_rsrc: pble resource management
+ */
+static enum i40iw_status_code add_pble_pool(struct i40iw_sc_dev *dev,
+ struct i40iw_hmc_pble_rsrc *pble_rsrc)
+{
+ struct i40iw_hmc_sd_entry *sd_entry;
+ struct i40iw_hmc_info *hmc_info;
+ struct i40iw_chunk *chunk;
+ struct i40iw_add_page_info info;
+ struct sd_pd_idx *idx = &info.idx;
+ enum i40iw_status_code ret_code = 0;
+ enum i40iw_sd_entry_type sd_entry_type;
+ u64 sd_reg_val = 0;
+ u32 pages;
+
+ if (pble_rsrc->unallocated_pble < PBLE_PER_PAGE)
+ return I40IW_ERR_NO_MEMORY;
+ if (pble_rsrc->next_fpm_addr & 0xfff) {
+ i40iw_pr_err("next fpm_addr %llx\n", pble_rsrc->next_fpm_addr);
+ return I40IW_ERR_INVALID_PAGE_DESC_INDEX;
+ }
+ chunk = kzalloc(sizeof(*chunk), GFP_KERNEL);
+ if (!chunk)
+ return I40IW_ERR_NO_MEMORY;
+ hmc_info = dev->hmc_info;
+ chunk->fpm_addr = pble_rsrc->next_fpm_addr;
+ get_sd_pd_idx(pble_rsrc, idx);
+ sd_entry = &hmc_info->sd_table.sd_entry[idx->sd_idx];
+ pages = (idx->rel_pd_idx) ? (I40IW_HMC_PD_CNT_IN_SD -
+ idx->rel_pd_idx) : I40IW_HMC_PD_CNT_IN_SD;
+ pages = min(pages, pble_rsrc->unallocated_pble >> PBLE_512_SHIFT);
+ if (!pages) {
+ ret_code = I40IW_ERR_NO_PBLCHUNKS_AVAILABLE;
+ goto error;
+ }
+ info.chunk = chunk;
+ info.hmc_info = hmc_info;
+ info.pages = pages;
+ info.sd_entry = sd_entry;
+ if (!sd_entry->valid) {
+ sd_entry_type = (!idx->rel_pd_idx &&
+ (pages == I40IW_HMC_PD_CNT_IN_SD) &&
+ dev->is_pf) ? I40IW_SD_TYPE_DIRECT : I40IW_SD_TYPE_PAGED;
+ } else {
+ sd_entry_type = sd_entry->entry_type;
+ }
+ i40iw_debug(dev, I40IW_DEBUG_PBLE,
+ "pages = %d, unallocated_pble[%u] current_fpm_addr = %llx\n",
+ pages, pble_rsrc->unallocated_pble, pble_rsrc->next_fpm_addr);
+ i40iw_debug(dev, I40IW_DEBUG_PBLE, "sd_entry_type = %d sd_entry valid = %d\n",
+ sd_entry_type, sd_entry->valid);
+
+ if (sd_entry_type == I40IW_SD_TYPE_DIRECT)
+ ret_code = add_sd_direct(dev, pble_rsrc, &info);
+ if (ret_code)
+ sd_entry_type = I40IW_SD_TYPE_PAGED;
+ else
+ pble_rsrc->stats_direct_sds++;
+
+ if (sd_entry_type == I40IW_SD_TYPE_PAGED) {
+ ret_code = add_bp_pages(dev, pble_rsrc, &info);
+ if (ret_code)
+ goto error;
+ else
+ pble_rsrc->stats_paged_sds++;
+ }
+
+ if (gen_pool_add_virt(pble_rsrc->pinfo.pool, (unsigned long)chunk->vaddr,
+ (phys_addr_t)chunk->fpm_addr, chunk->size, -1)) {
+ i40iw_pr_err("could not allocate memory by gen_pool_addr_virt()\n");
+ ret_code = I40IW_ERR_NO_MEMORY;
+ goto error;
+ }
+ pble_rsrc->next_fpm_addr += chunk->size;
+ i40iw_debug(dev, I40IW_DEBUG_PBLE, "next_fpm_addr = %llx chunk_size[%u] = 0x%x\n",
+ pble_rsrc->next_fpm_addr, chunk->size, chunk->size);
+ pble_rsrc->unallocated_pble -= (chunk->size >> 3);
+ list_add(&chunk->list, &pble_rsrc->pinfo.clist);
+ sd_reg_val = (sd_entry_type == I40IW_SD_TYPE_PAGED) ?
+ sd_entry->u.pd_table.pd_page_addr.pa : sd_entry->u.bp.addr.pa;
+ if (sd_entry->valid)
+ return 0;
+ if (dev->is_pf)
+ ret_code = i40iw_hmc_sd_one(dev, hmc_info->hmc_fn_id,
+ sd_reg_val, idx->sd_idx,
+ sd_entry->entry_type, true);
+ if (ret_code) {
+ i40iw_pr_err("cqp cmd failed for sd (pbles)\n");
+ goto error;
+ }
+
+ sd_entry->valid = true;
+ return 0;
+ error:
+ kfree(chunk);
+ return ret_code;
+}
+
+/**
+ * free_lvl2 - fee level 2 pble
+ * @pble_rsrc: pble resource management
+ * @palloc: level 2 pble allocation
+ */
+static void free_lvl2(struct i40iw_hmc_pble_rsrc *pble_rsrc,
+ struct i40iw_pble_alloc *palloc)
+{
+ u32 i;
+ struct gen_pool *pool;
+ struct i40iw_pble_level2 *lvl2 = &palloc->level2;
+ struct i40iw_pble_info *root = &lvl2->root;
+ struct i40iw_pble_info *leaf = lvl2->leaf;
+
+ pool = pble_rsrc->pinfo.pool;
+
+ for (i = 0; i < lvl2->leaf_cnt; i++, leaf++) {
+ if (leaf->addr)
+ gen_pool_free(pool, leaf->addr, (leaf->cnt << 3));
+ else
+ break;
+ }
+
+ if (root->addr)
+ gen_pool_free(pool, root->addr, (root->cnt << 3));
+
+ kfree(lvl2->leaf);
+ lvl2->leaf = NULL;
+}
+
+/**
+ * get_lvl2_pble - get level 2 pble resource
+ * @pble_rsrc: pble resource management
+ * @palloc: level 2 pble allocation
+ * @pool: pool pointer
+ */
+static enum i40iw_status_code get_lvl2_pble(struct i40iw_hmc_pble_rsrc *pble_rsrc,
+ struct i40iw_pble_alloc *palloc,
+ struct gen_pool *pool)
+{
+ u32 lf4k, lflast, total, i;
+ u32 pblcnt = PBLE_PER_PAGE;
+ u64 *addr;
+ struct i40iw_pble_level2 *lvl2 = &palloc->level2;
+ struct i40iw_pble_info *root = &lvl2->root;
+ struct i40iw_pble_info *leaf;
+
+ /* number of full 512 (4K) leafs) */
+ lf4k = palloc->total_cnt >> 9;
+ lflast = palloc->total_cnt % PBLE_PER_PAGE;
+ total = (lflast == 0) ? lf4k : lf4k + 1;
+ lvl2->leaf_cnt = total;
+
+ leaf = kzalloc((sizeof(*leaf) * total), GFP_ATOMIC);
+ if (!leaf)
+ return I40IW_ERR_NO_MEMORY;
+ lvl2->leaf = leaf;
+ /* allocate pbles for the root */
+ root->addr = gen_pool_alloc(pool, (total << 3));
+ if (!root->addr) {
+ kfree(lvl2->leaf);
+ lvl2->leaf = NULL;
+ return I40IW_ERR_NO_MEMORY;
+ }
+ root->idx = fpm_to_idx(pble_rsrc,
+ (u64)gen_pool_virt_to_phys(pool, root->addr));
+ root->cnt = total;
+ addr = (u64 *)root->addr;
+ for (i = 0; i < total; i++, leaf++) {
+ pblcnt = (lflast && ((i + 1) == total)) ? lflast : PBLE_PER_PAGE;
+ leaf->addr = gen_pool_alloc(pool, (pblcnt << 3));
+ if (!leaf->addr)
+ goto error;
+ leaf->idx = fpm_to_idx(pble_rsrc, (u64)gen_pool_virt_to_phys(pool, leaf->addr));
+
+ leaf->cnt = pblcnt;
+ *addr = (u64)leaf->idx;
+ addr++;
+ }
+ palloc->level = I40IW_LEVEL_2;
+ pble_rsrc->stats_lvl2++;
+ return 0;
+ error:
+ free_lvl2(pble_rsrc, palloc);
+ return I40IW_ERR_NO_MEMORY;
+}
+
+/**
+ * get_lvl1_pble - get level 1 pble resource
+ * @dev: hardware control device structure
+ * @pble_rsrc: pble resource management
+ * @palloc: level 1 pble allocation
+ */
+static enum i40iw_status_code get_lvl1_pble(struct i40iw_sc_dev *dev,
+ struct i40iw_hmc_pble_rsrc *pble_rsrc,
+ struct i40iw_pble_alloc *palloc)
+{
+ u64 *addr;
+ struct gen_pool *pool;
+ struct i40iw_pble_info *lvl1 = &palloc->level1;
+
+ pool = pble_rsrc->pinfo.pool;
+ addr = (u64 *)gen_pool_alloc(pool, (palloc->total_cnt << 3));
+
+ if (!addr)
+ return I40IW_ERR_NO_MEMORY;
+
+ palloc->level = I40IW_LEVEL_1;
+ lvl1->addr = (unsigned long)addr;
+ lvl1->idx = fpm_to_idx(pble_rsrc, (u64)gen_pool_virt_to_phys(pool,
+ (unsigned long)addr));
+ lvl1->cnt = palloc->total_cnt;
+ pble_rsrc->stats_lvl1++;
+ return 0;
+}
+
+/**
+ * get_lvl1_lvl2_pble - calls get_lvl1 and get_lvl2 pble routine
+ * @dev: i40iw_sc_dev struct
+ * @pble_rsrc: pble resources
+ * @palloc: contains all inforamtion regarding pble (idx + pble addr)
+ * @pool: pointer to general purpose special memory pool descriptor
+ */
+static inline enum i40iw_status_code get_lvl1_lvl2_pble(struct i40iw_sc_dev *dev,
+ struct i40iw_hmc_pble_rsrc *pble_rsrc,
+ struct i40iw_pble_alloc *palloc,
+ struct gen_pool *pool)
+{
+ enum i40iw_status_code status = 0;
+
+ status = get_lvl1_pble(dev, pble_rsrc, palloc);
+ if (status && (palloc->total_cnt > PBLE_PER_PAGE))
+ status = get_lvl2_pble(pble_rsrc, palloc, pool);
+ return status;
+}
+
+/**
+ * i40iw_get_pble - allocate pbles from the pool
+ * @dev: i40iw_sc_dev struct
+ * @pble_rsrc: pble resources
+ * @palloc: contains all inforamtion regarding pble (idx + pble addr)
+ * @pble_cnt: #of pbles requested
+ */
+enum i40iw_status_code i40iw_get_pble(struct i40iw_sc_dev *dev,
+ struct i40iw_hmc_pble_rsrc *pble_rsrc,
+ struct i40iw_pble_alloc *palloc,
+ u32 pble_cnt)
+{
+ struct gen_pool *pool;
+ enum i40iw_status_code status = 0;
+ u32 max_sds = 0;
+ int i;
+
+ pool = pble_rsrc->pinfo.pool;
+ palloc->total_cnt = pble_cnt;
+ palloc->level = I40IW_LEVEL_0;
+ /*check first to see if we can get pble's without acquiring additional sd's */
+ status = get_lvl1_lvl2_pble(dev, pble_rsrc, palloc, pool);
+ if (!status)
+ goto exit;
+ max_sds = (palloc->total_cnt >> 18) + 1;
+ for (i = 0; i < max_sds; i++) {
+ status = add_pble_pool(dev, pble_rsrc);
+ if (status)
+ break;
+ status = get_lvl1_lvl2_pble(dev, pble_rsrc, palloc, pool);
+ if (!status)
+ break;
+ }
+exit:
+ if (!status)
+ pble_rsrc->stats_alloc_ok++;
+ else
+ pble_rsrc->stats_alloc_fail++;
+
+ return status;
+}
+
+/**
+ * i40iw_free_pble - put pbles back into pool
+ * @pble_rsrc: pble resources
+ * @palloc: contains all inforamtion regarding pble resource being freed
+ */
+void i40iw_free_pble(struct i40iw_hmc_pble_rsrc *pble_rsrc,
+ struct i40iw_pble_alloc *palloc)
+{
+ struct gen_pool *pool;
+
+ pool = pble_rsrc->pinfo.pool;
+ if (palloc->level == I40IW_LEVEL_2)
+ free_lvl2(pble_rsrc, palloc);
+ else
+ gen_pool_free(pool, palloc->level1.addr,
+ (palloc->level1.cnt << 3));
+ pble_rsrc->stats_alloc_freed++;
+}
diff --git a/drivers/infiniband/hw/i40iw/i40iw_pble.h b/drivers/infiniband/hw/i40iw/i40iw_pble.h
new file mode 100644
index 000000000000..7b1851d21cc0
--- /dev/null
+++ b/drivers/infiniband/hw/i40iw/i40iw_pble.h
@@ -0,0 +1,131 @@
+/*******************************************************************************
+*
+* Copyright (c) 2015-2016 Intel Corporation. All rights reserved.
+*
+* This software is available to you under a choice of one of two
+* licenses. You may choose to be licensed under the terms of the GNU
+* General Public License (GPL) Version 2, available from the file
+* COPYING in the main directory of this source tree, or the
+* OpenFabrics.org BSD license below:
+*
+* Redistribution and use in source and binary forms, with or
+* without modification, are permitted provided that the following
+* conditions are met:
+*
+* - Redistributions of source code must retain the above
+* copyright notice, this list of conditions and the following
+* disclaimer.
+*
+* - Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials
+* provided with the distribution.
+*
+* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+* SOFTWARE.
+*
+*******************************************************************************/
+
+#ifndef I40IW_PBLE_H
+#define I40IW_PBLE_H
+
+#define POOL_SHIFT 6
+#define PBLE_PER_PAGE 512
+#define I40IW_HMC_PAGED_BP_SHIFT 12
+#define PBLE_512_SHIFT 9
+
+enum i40iw_pble_level {
+ I40IW_LEVEL_0 = 0,
+ I40IW_LEVEL_1 = 1,
+ I40IW_LEVEL_2 = 2
+};
+
+enum i40iw_alloc_type {
+ I40IW_NO_ALLOC = 0,
+ I40IW_DMA_COHERENT = 1,
+ I40IW_VMALLOC = 2
+};
+
+struct i40iw_pble_info {
+ unsigned long addr;
+ u32 idx;
+ u32 cnt;
+};
+
+struct i40iw_pble_level2 {
+ struct i40iw_pble_info root;
+ struct i40iw_pble_info *leaf;
+ u32 leaf_cnt;
+};
+
+struct i40iw_pble_alloc {
+ u32 total_cnt;
+ enum i40iw_pble_level level;
+ union {
+ struct i40iw_pble_info level1;
+ struct i40iw_pble_level2 level2;
+ };
+};
+
+struct sd_pd_idx {
+ u32 sd_idx;
+ u32 pd_idx;
+ u32 rel_pd_idx;
+};
+
+struct i40iw_add_page_info {
+ struct i40iw_chunk *chunk;
+ struct i40iw_hmc_sd_entry *sd_entry;
+ struct i40iw_hmc_info *hmc_info;
+ struct sd_pd_idx idx;
+ u32 pages;
+};
+
+struct i40iw_chunk {
+ struct list_head list;
+ u32 size;
+ void *vaddr;
+ u64 fpm_addr;
+ u32 pg_cnt;
+ dma_addr_t *dmaaddrs;
+ enum i40iw_alloc_type type;
+};
+
+struct i40iw_pble_pool {
+ struct gen_pool *pool;
+ struct list_head clist;
+ u32 total_pble_alloc;
+ u32 free_pble_cnt;
+ u32 pool_shift;
+};
+
+struct i40iw_hmc_pble_rsrc {
+ u32 unallocated_pble;
+ u64 fpm_base_addr;
+ u64 next_fpm_addr;
+ struct i40iw_pble_pool pinfo;
+
+ u32 stats_direct_sds;
+ u32 stats_paged_sds;
+ u64 stats_alloc_ok;
+ u64 stats_alloc_fail;
+ u64 stats_alloc_freed;
+ u64 stats_lvl1;
+ u64 stats_lvl2;
+};
+
+void i40iw_destroy_pble_pool(struct i40iw_sc_dev *dev, struct i40iw_hmc_pble_rsrc *pble_rsrc);
+enum i40iw_status_code i40iw_hmc_init_pble(struct i40iw_sc_dev *dev,
+ struct i40iw_hmc_pble_rsrc *pble_rsrc);
+void i40iw_free_pble(struct i40iw_hmc_pble_rsrc *pble_rsrc, struct i40iw_pble_alloc *palloc);
+enum i40iw_status_code i40iw_get_pble(struct i40iw_sc_dev *dev,
+ struct i40iw_hmc_pble_rsrc *pble_rsrc,
+ struct i40iw_pble_alloc *palloc,
+ u32 pble_cnt);
+#endif
diff --git a/drivers/infiniband/hw/i40iw/i40iw_puda.c b/drivers/infiniband/hw/i40iw/i40iw_puda.c
new file mode 100644
index 000000000000..8eb400d8a7a0
--- /dev/null
+++ b/drivers/infiniband/hw/i40iw/i40iw_puda.c
@@ -0,0 +1,1436 @@
+/*******************************************************************************
+*
+* Copyright (c) 2015-2016 Intel Corporation. All rights reserved.
+*
+* This software is available to you under a choice of one of two
+* licenses. You may choose to be licensed under the terms of the GNU
+* General Public License (GPL) Version 2, available from the file
+* COPYING in the main directory of this source tree, or the
+* OpenFabrics.org BSD license below:
+*
+* Redistribution and use in source and binary forms, with or
+* without modification, are permitted provided that the following
+* conditions are met:
+*
+* - Redistributions of source code must retain the above
+* copyright notice, this list of conditions and the following
+* disclaimer.
+*
+* - Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials
+* provided with the distribution.
+*
+* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+* SOFTWARE.
+*
+*******************************************************************************/
+
+#include "i40iw_osdep.h"
+#include "i40iw_register.h"
+#include "i40iw_status.h"
+#include "i40iw_hmc.h"
+
+#include "i40iw_d.h"
+#include "i40iw_type.h"
+#include "i40iw_p.h"
+#include "i40iw_puda.h"
+
+static void i40iw_ieq_receive(struct i40iw_sc_dev *dev,
+ struct i40iw_puda_buf *buf);
+static void i40iw_ieq_tx_compl(struct i40iw_sc_dev *dev, void *sqwrid);
+static void i40iw_ilq_putback_rcvbuf(struct i40iw_sc_qp *qp, u32 wqe_idx);
+static enum i40iw_status_code i40iw_puda_replenish_rq(struct i40iw_puda_rsrc
+ *rsrc, bool initial);
+/**
+ * i40iw_puda_get_listbuf - get buffer from puda list
+ * @list: list to use for buffers (ILQ or IEQ)
+ */
+static struct i40iw_puda_buf *i40iw_puda_get_listbuf(struct list_head *list)
+{
+ struct i40iw_puda_buf *buf = NULL;
+
+ if (!list_empty(list)) {
+ buf = (struct i40iw_puda_buf *)list->next;
+ list_del((struct list_head *)&buf->list);
+ }
+ return buf;
+}
+
+/**
+ * i40iw_puda_get_bufpool - return buffer from resource
+ * @rsrc: resource to use for buffer
+ */
+struct i40iw_puda_buf *i40iw_puda_get_bufpool(struct i40iw_puda_rsrc *rsrc)
+{
+ struct i40iw_puda_buf *buf = NULL;
+ struct list_head *list = &rsrc->bufpool;
+ unsigned long flags;
+
+ spin_lock_irqsave(&rsrc->bufpool_lock, flags);
+ buf = i40iw_puda_get_listbuf(list);
+ if (buf)
+ rsrc->avail_buf_count--;
+ else
+ rsrc->stats_buf_alloc_fail++;
+ spin_unlock_irqrestore(&rsrc->bufpool_lock, flags);
+ return buf;
+}
+
+/**
+ * i40iw_puda_ret_bufpool - return buffer to rsrc list
+ * @rsrc: resource to use for buffer
+ * @buf: buffe to return to resouce
+ */
+void i40iw_puda_ret_bufpool(struct i40iw_puda_rsrc *rsrc,
+ struct i40iw_puda_buf *buf)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&rsrc->bufpool_lock, flags);
+ list_add(&buf->list, &rsrc->bufpool);
+ spin_unlock_irqrestore(&rsrc->bufpool_lock, flags);
+ rsrc->avail_buf_count++;
+}
+
+/**
+ * i40iw_puda_post_recvbuf - set wqe for rcv buffer
+ * @rsrc: resource ptr
+ * @wqe_idx: wqe index to use
+ * @buf: puda buffer for rcv q
+ * @initial: flag if during init time
+ */
+static void i40iw_puda_post_recvbuf(struct i40iw_puda_rsrc *rsrc, u32 wqe_idx,
+ struct i40iw_puda_buf *buf, bool initial)
+{
+ u64 *wqe;
+ struct i40iw_sc_qp *qp = &rsrc->qp;
+ u64 offset24 = 0;
+
+ qp->qp_uk.rq_wrid_array[wqe_idx] = (uintptr_t)buf;
+ wqe = qp->qp_uk.rq_base[wqe_idx].elem;
+ i40iw_debug(rsrc->dev, I40IW_DEBUG_PUDA,
+ "%s: wqe_idx= %d buf = %p wqe = %p\n", __func__,
+ wqe_idx, buf, wqe);
+ if (!initial)
+ get_64bit_val(wqe, 24, &offset24);
+
+ offset24 = (offset24) ? 0 : LS_64(1, I40IWQPSQ_VALID);
+ set_64bit_val(wqe, 24, offset24);
+
+ set_64bit_val(wqe, 0, buf->mem.pa);
+ set_64bit_val(wqe, 8,
+ LS_64(buf->mem.size, I40IWQPSQ_FRAG_LEN));
+ set_64bit_val(wqe, 24, offset24);
+}
+
+/**
+ * i40iw_puda_replenish_rq - post rcv buffers
+ * @rsrc: resource to use for buffer
+ * @initial: flag if during init time
+ */
+static enum i40iw_status_code i40iw_puda_replenish_rq(struct i40iw_puda_rsrc *rsrc,
+ bool initial)
+{
+ u32 i;
+ u32 invalid_cnt = rsrc->rxq_invalid_cnt;
+ struct i40iw_puda_buf *buf = NULL;
+
+ for (i = 0; i < invalid_cnt; i++) {
+ buf = i40iw_puda_get_bufpool(rsrc);
+ if (!buf)
+ return I40IW_ERR_list_empty;
+ i40iw_puda_post_recvbuf(rsrc, rsrc->rx_wqe_idx, buf,
+ initial);
+ rsrc->rx_wqe_idx =
+ ((rsrc->rx_wqe_idx + 1) % rsrc->rq_size);
+ rsrc->rxq_invalid_cnt--;
+ }
+ return 0;
+}
+
+/**
+ * i40iw_puda_alloc_buf - allocate mem for buffer
+ * @dev: iwarp device
+ * @length: length of buffer
+ */
+static struct i40iw_puda_buf *i40iw_puda_alloc_buf(struct i40iw_sc_dev *dev,
+ u32 length)
+{
+ struct i40iw_puda_buf *buf = NULL;
+ struct i40iw_virt_mem buf_mem;
+ enum i40iw_status_code ret;
+
+ ret = i40iw_allocate_virt_mem(dev->hw, &buf_mem,
+ sizeof(struct i40iw_puda_buf));
+ if (ret) {
+ i40iw_debug(dev, I40IW_DEBUG_PUDA,
+ "%s: error mem for buf\n", __func__);
+ return NULL;
+ }
+ buf = (struct i40iw_puda_buf *)buf_mem.va;
+ ret = i40iw_allocate_dma_mem(dev->hw, &buf->mem, length, 1);
+ if (ret) {
+ i40iw_debug(dev, I40IW_DEBUG_PUDA,
+ "%s: error dma mem for buf\n", __func__);
+ i40iw_free_virt_mem(dev->hw, &buf_mem);
+ return NULL;
+ }
+ buf->buf_mem.va = buf_mem.va;
+ buf->buf_mem.size = buf_mem.size;
+ return buf;
+}
+
+/**
+ * i40iw_puda_dele_buf - delete buffer back to system
+ * @dev: iwarp device
+ * @buf: buffer to free
+ */
+static void i40iw_puda_dele_buf(struct i40iw_sc_dev *dev,
+ struct i40iw_puda_buf *buf)
+{
+ i40iw_free_dma_mem(dev->hw, &buf->mem);
+ i40iw_free_virt_mem(dev->hw, &buf->buf_mem);
+}
+
+/**
+ * i40iw_puda_get_next_send_wqe - return next wqe for processing
+ * @qp: puda qp for wqe
+ * @wqe_idx: wqe index for caller
+ */
+static u64 *i40iw_puda_get_next_send_wqe(struct i40iw_qp_uk *qp, u32 *wqe_idx)
+{
+ u64 *wqe = NULL;
+ enum i40iw_status_code ret_code = 0;
+
+ *wqe_idx = I40IW_RING_GETCURRENT_HEAD(qp->sq_ring);
+ if (!*wqe_idx)
+ qp->swqe_polarity = !qp->swqe_polarity;
+ I40IW_RING_MOVE_HEAD(qp->sq_ring, ret_code);
+ if (ret_code)
+ return wqe;
+ wqe = qp->sq_base[*wqe_idx].elem;
+
+ return wqe;
+}
+
+/**
+ * i40iw_puda_poll_info - poll cq for completion
+ * @cq: cq for poll
+ * @info: info return for successful completion
+ */
+static enum i40iw_status_code i40iw_puda_poll_info(struct i40iw_sc_cq *cq,
+ struct i40iw_puda_completion_info *info)
+{
+ u64 qword0, qword2, qword3;
+ u64 *cqe;
+ u64 comp_ctx;
+ bool valid_bit;
+ u32 major_err, minor_err;
+ bool error;
+
+ cqe = (u64 *)I40IW_GET_CURRENT_CQ_ELEMENT(&cq->cq_uk);
+ get_64bit_val(cqe, 24, &qword3);
+ valid_bit = (bool)RS_64(qword3, I40IW_CQ_VALID);
+
+ if (valid_bit != cq->cq_uk.polarity)
+ return I40IW_ERR_QUEUE_EMPTY;
+
+ i40iw_debug_buf(cq->dev, I40IW_DEBUG_PUDA, "PUDA CQE", cqe, 32);
+ error = (bool)RS_64(qword3, I40IW_CQ_ERROR);
+ if (error) {
+ i40iw_debug(cq->dev, I40IW_DEBUG_PUDA, "%s receive error\n", __func__);
+ major_err = (u32)(RS_64(qword3, I40IW_CQ_MAJERR));
+ minor_err = (u32)(RS_64(qword3, I40IW_CQ_MINERR));
+ info->compl_error = major_err << 16 | minor_err;
+ return I40IW_ERR_CQ_COMPL_ERROR;
+ }
+
+ get_64bit_val(cqe, 0, &qword0);
+ get_64bit_val(cqe, 16, &qword2);
+
+ info->q_type = (u8)RS_64(qword3, I40IW_CQ_SQ);
+ info->qp_id = (u32)RS_64(qword2, I40IWCQ_QPID);
+
+ get_64bit_val(cqe, 8, &comp_ctx);
+ info->qp = (struct i40iw_qp_uk *)(unsigned long)comp_ctx;
+ info->wqe_idx = (u32)RS_64(qword3, I40IW_CQ_WQEIDX);
+
+ if (info->q_type == I40IW_CQE_QTYPE_RQ) {
+ info->vlan_valid = (bool)RS_64(qword3, I40IW_VLAN_TAG_VALID);
+ info->l4proto = (u8)RS_64(qword2, I40IW_UDA_L4PROTO);
+ info->l3proto = (u8)RS_64(qword2, I40IW_UDA_L3PROTO);
+ info->payload_len = (u16)RS_64(qword0, I40IW_UDA_PAYLOADLEN);
+ }
+
+ return 0;
+}
+
+/**
+ * i40iw_puda_poll_completion - processes completion for cq
+ * @dev: iwarp device
+ * @cq: cq getting interrupt
+ * @compl_err: return any completion err
+ */
+enum i40iw_status_code i40iw_puda_poll_completion(struct i40iw_sc_dev *dev,
+ struct i40iw_sc_cq *cq, u32 *compl_err)
+{
+ struct i40iw_qp_uk *qp;
+ struct i40iw_cq_uk *cq_uk = &cq->cq_uk;
+ struct i40iw_puda_completion_info info;
+ enum i40iw_status_code ret = 0;
+ struct i40iw_puda_buf *buf;
+ struct i40iw_puda_rsrc *rsrc;
+ void *sqwrid;
+ u8 cq_type = cq->cq_type;
+ unsigned long flags;
+
+ if ((cq_type == I40IW_CQ_TYPE_ILQ) || (cq_type == I40IW_CQ_TYPE_IEQ)) {
+ rsrc = (cq_type == I40IW_CQ_TYPE_ILQ) ? dev->ilq : dev->ieq;
+ } else {
+ i40iw_debug(dev, I40IW_DEBUG_PUDA, "%s qp_type error\n", __func__);
+ return I40IW_ERR_BAD_PTR;
+ }
+ memset(&info, 0, sizeof(info));
+ ret = i40iw_puda_poll_info(cq, &info);
+ *compl_err = info.compl_error;
+ if (ret == I40IW_ERR_QUEUE_EMPTY)
+ return ret;
+ if (ret)
+ goto done;
+
+ qp = info.qp;
+ if (!qp || !rsrc) {
+ ret = I40IW_ERR_BAD_PTR;
+ goto done;
+ }
+
+ if (qp->qp_id != rsrc->qp_id) {
+ ret = I40IW_ERR_BAD_PTR;
+ goto done;
+ }
+
+ if (info.q_type == I40IW_CQE_QTYPE_RQ) {
+ buf = (struct i40iw_puda_buf *)(uintptr_t)qp->rq_wrid_array[info.wqe_idx];
+ /* Get all the tcpip information in the buf header */
+ ret = i40iw_puda_get_tcpip_info(&info, buf);
+ if (ret) {
+ rsrc->stats_rcvd_pkt_err++;
+ if (cq_type == I40IW_CQ_TYPE_ILQ) {
+ i40iw_ilq_putback_rcvbuf(&rsrc->qp,
+ info.wqe_idx);
+ } else {
+ i40iw_puda_ret_bufpool(rsrc, buf);
+ i40iw_puda_replenish_rq(rsrc, false);
+ }
+ goto done;
+ }
+
+ rsrc->stats_pkt_rcvd++;
+ rsrc->compl_rxwqe_idx = info.wqe_idx;
+ i40iw_debug(dev, I40IW_DEBUG_PUDA, "%s RQ completion\n", __func__);
+ rsrc->receive(rsrc->dev, buf);
+ if (cq_type == I40IW_CQ_TYPE_ILQ)
+ i40iw_ilq_putback_rcvbuf(&rsrc->qp, info.wqe_idx);
+ else
+ i40iw_puda_replenish_rq(rsrc, false);
+
+ } else {
+ i40iw_debug(dev, I40IW_DEBUG_PUDA, "%s SQ completion\n", __func__);
+ sqwrid = (void *)(uintptr_t)qp->sq_wrtrk_array[info.wqe_idx].wrid;
+ I40IW_RING_SET_TAIL(qp->sq_ring, info.wqe_idx);
+ rsrc->xmit_complete(rsrc->dev, sqwrid);
+ spin_lock_irqsave(&rsrc->bufpool_lock, flags);
+ rsrc->tx_wqe_avail_cnt++;
+ spin_unlock_irqrestore(&rsrc->bufpool_lock, flags);
+ if (!list_empty(&dev->ilq->txpend))
+ i40iw_puda_send_buf(dev->ilq, NULL);
+ }
+
+done:
+ I40IW_RING_MOVE_HEAD(cq_uk->cq_ring, ret);
+ if (I40IW_RING_GETCURRENT_HEAD(cq_uk->cq_ring) == 0)
+ cq_uk->polarity = !cq_uk->polarity;
+ /* update cq tail in cq shadow memory also */
+ I40IW_RING_MOVE_TAIL(cq_uk->cq_ring);
+ set_64bit_val(cq_uk->shadow_area, 0,
+ I40IW_RING_GETCURRENT_HEAD(cq_uk->cq_ring));
+ return 0;
+}
+
+/**
+ * i40iw_puda_send - complete send wqe for transmit
+ * @qp: puda qp for send
+ * @info: buffer information for transmit
+ */
+enum i40iw_status_code i40iw_puda_send(struct i40iw_sc_qp *qp,
+ struct i40iw_puda_send_info *info)
+{
+ u64 *wqe;
+ u32 iplen, l4len;
+ u64 header[2];
+ u32 wqe_idx;
+ u8 iipt;
+
+ /* number of 32 bits DWORDS in header */
+ l4len = info->tcplen >> 2;
+ if (info->ipv4) {
+ iipt = 3;
+ iplen = 5;
+ } else {
+ iipt = 1;
+ iplen = 10;
+ }
+
+ wqe = i40iw_puda_get_next_send_wqe(&qp->qp_uk, &wqe_idx);
+ if (!wqe)
+ return I40IW_ERR_QP_TOOMANY_WRS_POSTED;
+ qp->qp_uk.sq_wrtrk_array[wqe_idx].wrid = (uintptr_t)info->scratch;
+ /* Third line of WQE descriptor */
+ /* maclen is in words */
+ header[0] = LS_64((info->maclen >> 1), I40IW_UDA_QPSQ_MACLEN) |
+ LS_64(iplen, I40IW_UDA_QPSQ_IPLEN) | LS_64(1, I40IW_UDA_QPSQ_L4T) |
+ LS_64(iipt, I40IW_UDA_QPSQ_IIPT) |
+ LS_64(l4len, I40IW_UDA_QPSQ_L4LEN);
+ /* Forth line of WQE descriptor */
+ header[1] = LS_64(I40IW_OP_TYPE_SEND, I40IW_UDA_QPSQ_OPCODE) |
+ LS_64(1, I40IW_UDA_QPSQ_SIGCOMPL) |
+ LS_64(info->doloopback, I40IW_UDA_QPSQ_DOLOOPBACK) |
+ LS_64(qp->qp_uk.swqe_polarity, I40IW_UDA_QPSQ_VALID);
+
+ set_64bit_val(wqe, 0, info->paddr);
+ set_64bit_val(wqe, 8, LS_64(info->len, I40IWQPSQ_FRAG_LEN));
+ set_64bit_val(wqe, 16, header[0]);
+ set_64bit_val(wqe, 24, header[1]);
+
+ i40iw_debug_buf(qp->dev, I40IW_DEBUG_PUDA, "PUDA SEND WQE", wqe, 32);
+ i40iw_qp_post_wr(&qp->qp_uk);
+ return 0;
+}
+
+/**
+ * i40iw_puda_send_buf - transmit puda buffer
+ * @rsrc: resource to use for buffer
+ * @buf: puda buffer to transmit
+ */
+void i40iw_puda_send_buf(struct i40iw_puda_rsrc *rsrc, struct i40iw_puda_buf *buf)
+{
+ struct i40iw_puda_send_info info;
+ enum i40iw_status_code ret = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&rsrc->bufpool_lock, flags);
+ /* if no wqe available or not from a completion and we have
+ * pending buffers, we must queue new buffer
+ */
+ if (!rsrc->tx_wqe_avail_cnt || (buf && !list_empty(&rsrc->txpend))) {
+ list_add_tail(&buf->list, &rsrc->txpend);
+ spin_unlock_irqrestore(&rsrc->bufpool_lock, flags);
+ rsrc->stats_sent_pkt_q++;
+ if (rsrc->type == I40IW_PUDA_RSRC_TYPE_ILQ)
+ i40iw_debug(rsrc->dev, I40IW_DEBUG_PUDA,
+ "%s: adding to txpend\n", __func__);
+ return;
+ }
+ rsrc->tx_wqe_avail_cnt--;
+ /* if we are coming from a completion and have pending buffers
+ * then Get one from pending list
+ */
+ if (!buf) {
+ buf = i40iw_puda_get_listbuf(&rsrc->txpend);
+ if (!buf)
+ goto done;
+ }
+
+ info.scratch = (void *)buf;
+ info.paddr = buf->mem.pa;
+ info.len = buf->totallen;
+ info.tcplen = buf->tcphlen;
+ info.maclen = buf->maclen;
+ info.ipv4 = buf->ipv4;
+ info.doloopback = (rsrc->type == I40IW_PUDA_RSRC_TYPE_IEQ);
+
+ ret = i40iw_puda_send(&rsrc->qp, &info);
+ if (ret) {
+ rsrc->tx_wqe_avail_cnt++;
+ rsrc->stats_sent_pkt_q++;
+ list_add(&buf->list, &rsrc->txpend);
+ if (rsrc->type == I40IW_PUDA_RSRC_TYPE_ILQ)
+ i40iw_debug(rsrc->dev, I40IW_DEBUG_PUDA,
+ "%s: adding to puda_send\n", __func__);
+ } else {
+ rsrc->stats_pkt_sent++;
+ }
+done:
+ spin_unlock_irqrestore(&rsrc->bufpool_lock, flags);
+}
+
+/**
+ * i40iw_puda_qp_setctx - during init, set qp's context
+ * @rsrc: qp's resource
+ */
+static void i40iw_puda_qp_setctx(struct i40iw_puda_rsrc *rsrc)
+{
+ struct i40iw_sc_qp *qp = &rsrc->qp;
+ u64 *qp_ctx = qp->hw_host_ctx;
+
+ set_64bit_val(qp_ctx, 8, qp->sq_pa);
+ set_64bit_val(qp_ctx, 16, qp->rq_pa);
+
+ set_64bit_val(qp_ctx, 24,
+ LS_64(qp->hw_rq_size, I40IWQPC_RQSIZE) |
+ LS_64(qp->hw_sq_size, I40IWQPC_SQSIZE));
+
+ set_64bit_val(qp_ctx, 48, LS_64(1514, I40IWQPC_SNDMSS));
+ set_64bit_val(qp_ctx, 56, 0);
+ set_64bit_val(qp_ctx, 64, 1);
+
+ set_64bit_val(qp_ctx, 136,
+ LS_64(rsrc->cq_id, I40IWQPC_TXCQNUM) |
+ LS_64(rsrc->cq_id, I40IWQPC_RXCQNUM));
+
+ set_64bit_val(qp_ctx, 160, LS_64(1, I40IWQPC_PRIVEN));
+
+ set_64bit_val(qp_ctx, 168,
+ LS_64((uintptr_t)qp, I40IWQPC_QPCOMPCTX));
+
+ set_64bit_val(qp_ctx, 176,
+ LS_64(qp->sq_tph_val, I40IWQPC_SQTPHVAL) |
+ LS_64(qp->rq_tph_val, I40IWQPC_RQTPHVAL) |
+ LS_64(qp->qs_handle, I40IWQPC_QSHANDLE));
+
+ i40iw_debug_buf(rsrc->dev, I40IW_DEBUG_PUDA, "PUDA QP CONTEXT",
+ qp_ctx, I40IW_QP_CTX_SIZE);
+}
+
+/**
+ * i40iw_puda_qp_wqe - setup wqe for qp create
+ * @rsrc: resource for qp
+ */
+static enum i40iw_status_code i40iw_puda_qp_wqe(struct i40iw_puda_rsrc *rsrc)
+{
+ struct i40iw_sc_qp *qp = &rsrc->qp;
+ struct i40iw_sc_dev *dev = rsrc->dev;
+ struct i40iw_sc_cqp *cqp;
+ u64 *wqe;
+ u64 header;
+ struct i40iw_ccq_cqe_info compl_info;
+ enum i40iw_status_code status = 0;
+
+ cqp = dev->cqp;
+ wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, 0);
+ if (!wqe)
+ return I40IW_ERR_RING_FULL;
+
+ set_64bit_val(wqe, 16, qp->hw_host_ctx_pa);
+ set_64bit_val(wqe, 40, qp->shadow_area_pa);
+ header = qp->qp_uk.qp_id |
+ LS_64(I40IW_CQP_OP_CREATE_QP, I40IW_CQPSQ_OPCODE) |
+ LS_64(I40IW_QP_TYPE_UDA, I40IW_CQPSQ_QP_QPTYPE) |
+ LS_64(1, I40IW_CQPSQ_QP_CQNUMVALID) |
+ LS_64(2, I40IW_CQPSQ_QP_NEXTIWSTATE) |
+ LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
+
+ set_64bit_val(wqe, 24, header);
+
+ i40iw_debug_buf(cqp->dev, I40IW_DEBUG_PUDA, "PUDA CQE", wqe, 32);
+ i40iw_sc_cqp_post_sq(cqp);
+ status = dev->cqp_ops->poll_for_cqp_op_done(dev->cqp,
+ I40IW_CQP_OP_CREATE_QP,
+ &compl_info);
+ return status;
+}
+
+/**
+ * i40iw_puda_qp_create - create qp for resource
+ * @rsrc: resource to use for buffer
+ */
+static enum i40iw_status_code i40iw_puda_qp_create(struct i40iw_puda_rsrc *rsrc)
+{
+ struct i40iw_sc_qp *qp = &rsrc->qp;
+ struct i40iw_qp_uk *ukqp = &qp->qp_uk;
+ enum i40iw_status_code ret = 0;
+ u32 sq_size, rq_size, t_size;
+ struct i40iw_dma_mem *mem;
+
+ sq_size = rsrc->sq_size * I40IW_QP_WQE_MIN_SIZE;
+ rq_size = rsrc->rq_size * I40IW_QP_WQE_MIN_SIZE;
+ t_size = (sq_size + rq_size + (I40IW_SHADOW_AREA_SIZE << 3) +
+ I40IW_QP_CTX_SIZE);
+ /* Get page aligned memory */
+ ret =
+ i40iw_allocate_dma_mem(rsrc->dev->hw, &rsrc->qpmem, t_size,
+ I40IW_HW_PAGE_SIZE);
+ if (ret) {
+ i40iw_debug(rsrc->dev, I40IW_DEBUG_PUDA, "%s: error dma mem\n", __func__);
+ return ret;
+ }
+
+ mem = &rsrc->qpmem;
+ memset(mem->va, 0, t_size);
+ qp->hw_sq_size = i40iw_get_encoded_wqe_size(rsrc->sq_size, false);
+ qp->hw_rq_size = i40iw_get_encoded_wqe_size(rsrc->rq_size, false);
+ qp->pd = &rsrc->sc_pd;
+ qp->qp_type = I40IW_QP_TYPE_UDA;
+ qp->dev = rsrc->dev;
+ qp->back_qp = (void *)rsrc;
+ qp->sq_pa = mem->pa;
+ qp->rq_pa = qp->sq_pa + sq_size;
+ ukqp->sq_base = mem->va;
+ ukqp->rq_base = &ukqp->sq_base[rsrc->sq_size];
+ ukqp->shadow_area = ukqp->rq_base[rsrc->rq_size].elem;
+ qp->shadow_area_pa = qp->rq_pa + rq_size;
+ qp->hw_host_ctx = ukqp->shadow_area + I40IW_SHADOW_AREA_SIZE;
+ qp->hw_host_ctx_pa =
+ qp->shadow_area_pa + (I40IW_SHADOW_AREA_SIZE << 3);
+ ukqp->qp_id = rsrc->qp_id;
+ ukqp->sq_wrtrk_array = rsrc->sq_wrtrk_array;
+ ukqp->rq_wrid_array = rsrc->rq_wrid_array;
+
+ ukqp->qp_id = rsrc->qp_id;
+ ukqp->sq_size = rsrc->sq_size;
+ ukqp->rq_size = rsrc->rq_size;
+
+ I40IW_RING_INIT(ukqp->sq_ring, ukqp->sq_size);
+ I40IW_RING_INIT(ukqp->initial_ring, ukqp->sq_size);
+ I40IW_RING_INIT(ukqp->rq_ring, ukqp->rq_size);
+
+ if (qp->pd->dev->is_pf)
+ ukqp->wqe_alloc_reg = (u32 __iomem *)(i40iw_get_hw_addr(qp->pd->dev) +
+ I40E_PFPE_WQEALLOC);
+ else
+ ukqp->wqe_alloc_reg = (u32 __iomem *)(i40iw_get_hw_addr(qp->pd->dev) +
+ I40E_VFPE_WQEALLOC1);
+
+ qp->qs_handle = qp->dev->qs_handle;
+ i40iw_puda_qp_setctx(rsrc);
+ ret = i40iw_puda_qp_wqe(rsrc);
+ if (ret)
+ i40iw_free_dma_mem(rsrc->dev->hw, &rsrc->qpmem);
+ return ret;
+}
+
+/**
+ * i40iw_puda_cq_create - create cq for resource
+ * @rsrc: resource for which cq to create
+ */
+static enum i40iw_status_code i40iw_puda_cq_create(struct i40iw_puda_rsrc *rsrc)
+{
+ struct i40iw_sc_dev *dev = rsrc->dev;
+ struct i40iw_sc_cq *cq = &rsrc->cq;
+ u64 *wqe;
+ struct i40iw_sc_cqp *cqp;
+ u64 header;
+ enum i40iw_status_code ret = 0;
+ u32 tsize, cqsize;
+ u32 shadow_read_threshold = 128;
+ struct i40iw_dma_mem *mem;
+ struct i40iw_ccq_cqe_info compl_info;
+ struct i40iw_cq_init_info info;
+ struct i40iw_cq_uk_init_info *init_info = &info.cq_uk_init_info;
+
+ cq->back_cq = (void *)rsrc;
+ cqsize = rsrc->cq_size * (sizeof(struct i40iw_cqe));
+ tsize = cqsize + sizeof(struct i40iw_cq_shadow_area);
+ ret = i40iw_allocate_dma_mem(dev->hw, &rsrc->cqmem, tsize,
+ I40IW_CQ0_ALIGNMENT_MASK);
+ if (ret)
+ return ret;
+
+ mem = &rsrc->cqmem;
+ memset(&info, 0, sizeof(info));
+ info.dev = dev;
+ info.type = (rsrc->type == I40IW_PUDA_RSRC_TYPE_ILQ) ?
+ I40IW_CQ_TYPE_ILQ : I40IW_CQ_TYPE_IEQ;
+ info.shadow_read_threshold = rsrc->cq_size >> 2;
+ info.ceq_id_valid = true;
+ info.cq_base_pa = mem->pa;
+ info.shadow_area_pa = mem->pa + cqsize;
+ init_info->cq_base = mem->va;
+ init_info->shadow_area = (u64 *)((u8 *)mem->va + cqsize);
+ init_info->cq_size = rsrc->cq_size;
+ init_info->cq_id = rsrc->cq_id;
+ ret = dev->iw_priv_cq_ops->cq_init(cq, &info);
+ if (ret)
+ goto error;
+ cqp = dev->cqp;
+ wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, 0);
+ if (!wqe) {
+ ret = I40IW_ERR_RING_FULL;
+ goto error;
+ }
+
+ set_64bit_val(wqe, 0, rsrc->cq_size);
+ set_64bit_val(wqe, 8, RS_64_1(cq, 1));
+ set_64bit_val(wqe, 16, LS_64(shadow_read_threshold, I40IW_CQPSQ_CQ_SHADOW_READ_THRESHOLD));
+ set_64bit_val(wqe, 32, cq->cq_pa);
+
+ set_64bit_val(wqe, 40, cq->shadow_area_pa);
+
+ header = rsrc->cq_id |
+ LS_64(I40IW_CQP_OP_CREATE_CQ, I40IW_CQPSQ_OPCODE) |
+ LS_64(1, I40IW_CQPSQ_CQ_CHKOVERFLOW) |
+ LS_64(1, I40IW_CQPSQ_CQ_ENCEQEMASK) |
+ LS_64(1, I40IW_CQPSQ_CQ_CEQIDVALID) |
+ LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
+ set_64bit_val(wqe, 24, header);
+
+ i40iw_debug_buf(dev, I40IW_DEBUG_PUDA, "PUDA CQE",
+ wqe, I40IW_CQP_WQE_SIZE * 8);
+
+ i40iw_sc_cqp_post_sq(dev->cqp);
+ ret = dev->cqp_ops->poll_for_cqp_op_done(dev->cqp,
+ I40IW_CQP_OP_CREATE_CQ,
+ &compl_info);
+
+error:
+ if (ret)
+ i40iw_free_dma_mem(dev->hw, &rsrc->cqmem);
+ return ret;
+}
+
+/**
+ * i40iw_puda_dele_resources - delete all resources during close
+ * @dev: iwarp device
+ * @type: type of resource to dele
+ * @reset: true if reset chip
+ */
+void i40iw_puda_dele_resources(struct i40iw_sc_dev *dev,
+ enum puda_resource_type type,
+ bool reset)
+{
+ struct i40iw_ccq_cqe_info compl_info;
+ struct i40iw_puda_rsrc *rsrc;
+ struct i40iw_puda_buf *buf = NULL;
+ struct i40iw_puda_buf *nextbuf = NULL;
+ struct i40iw_virt_mem *vmem;
+ enum i40iw_status_code ret;
+
+ switch (type) {
+ case I40IW_PUDA_RSRC_TYPE_ILQ:
+ rsrc = dev->ilq;
+ vmem = &dev->ilq_mem;
+ break;
+ case I40IW_PUDA_RSRC_TYPE_IEQ:
+ rsrc = dev->ieq;
+ vmem = &dev->ieq_mem;
+ break;
+ default:
+ i40iw_debug(dev, I40IW_DEBUG_PUDA, "%s: error resource type = 0x%x\n",
+ __func__, type);
+ return;
+ }
+
+ switch (rsrc->completion) {
+ case PUDA_HASH_CRC_COMPLETE:
+ i40iw_free_hash_desc(rsrc->hash_desc);
+ case PUDA_QP_CREATED:
+ do {
+ if (reset)
+ break;
+ ret = dev->iw_priv_qp_ops->qp_destroy(&rsrc->qp,
+ 0, false, true, true);
+ if (ret)
+ i40iw_debug(rsrc->dev, I40IW_DEBUG_PUDA,
+ "%s error ieq qp destroy\n",
+ __func__);
+
+ ret = dev->cqp_ops->poll_for_cqp_op_done(dev->cqp,
+ I40IW_CQP_OP_DESTROY_QP,
+ &compl_info);
+ if (ret)
+ i40iw_debug(rsrc->dev, I40IW_DEBUG_PUDA,
+ "%s error ieq qp destroy done\n",
+ __func__);
+ } while (0);
+
+ i40iw_free_dma_mem(dev->hw, &rsrc->qpmem);
+ /* fallthrough */
+ case PUDA_CQ_CREATED:
+ do {
+ if (reset)
+ break;
+ ret = dev->iw_priv_cq_ops->cq_destroy(&rsrc->cq, 0, true);
+ if (ret)
+ i40iw_debug(rsrc->dev, I40IW_DEBUG_PUDA,
+ "%s error ieq cq destroy\n",
+ __func__);
+
+ ret = dev->cqp_ops->poll_for_cqp_op_done(dev->cqp,
+ I40IW_CQP_OP_DESTROY_CQ,
+ &compl_info);
+ if (ret)
+ i40iw_debug(rsrc->dev, I40IW_DEBUG_PUDA,
+ "%s error ieq qp destroy done\n",
+ __func__);
+ } while (0);
+
+ i40iw_free_dma_mem(dev->hw, &rsrc->cqmem);
+ break;
+ default:
+ i40iw_debug(rsrc->dev, I40IW_DEBUG_PUDA, "%s error no resources\n", __func__);
+ break;
+ }
+ /* Free all allocated puda buffers for both tx and rx */
+ buf = rsrc->alloclist;
+ while (buf) {
+ nextbuf = buf->next;
+ i40iw_puda_dele_buf(dev, buf);
+ buf = nextbuf;
+ rsrc->alloc_buf_count--;
+ }
+ i40iw_free_virt_mem(dev->hw, vmem);
+}
+
+/**
+ * i40iw_puda_allocbufs - allocate buffers for resource
+ * @rsrc: resource for buffer allocation
+ * @count: number of buffers to create
+ */
+static enum i40iw_status_code i40iw_puda_allocbufs(struct i40iw_puda_rsrc *rsrc,
+ u32 count)
+{
+ u32 i;
+ struct i40iw_puda_buf *buf;
+ struct i40iw_puda_buf *nextbuf;
+
+ for (i = 0; i < count; i++) {
+ buf = i40iw_puda_alloc_buf(rsrc->dev, rsrc->buf_size);
+ if (!buf) {
+ rsrc->stats_buf_alloc_fail++;
+ return I40IW_ERR_NO_MEMORY;
+ }
+ i40iw_puda_ret_bufpool(rsrc, buf);
+ rsrc->alloc_buf_count++;
+ if (!rsrc->alloclist) {
+ rsrc->alloclist = buf;
+ } else {
+ nextbuf = rsrc->alloclist;
+ rsrc->alloclist = buf;
+ buf->next = nextbuf;
+ }
+ }
+ rsrc->avail_buf_count = rsrc->alloc_buf_count;
+ return 0;
+}
+
+/**
+ * i40iw_puda_create_rsrc - create resouce (ilq or ieq)
+ * @dev: iwarp device
+ * @info: resource information
+ */
+enum i40iw_status_code i40iw_puda_create_rsrc(struct i40iw_sc_dev *dev,
+ struct i40iw_puda_rsrc_info *info)
+{
+ enum i40iw_status_code ret = 0;
+ struct i40iw_puda_rsrc *rsrc;
+ u32 pudasize;
+ u32 sqwridsize, rqwridsize;
+ struct i40iw_virt_mem *vmem;
+
+ info->count = 1;
+ pudasize = sizeof(struct i40iw_puda_rsrc);
+ sqwridsize = info->sq_size * sizeof(struct i40iw_sq_uk_wr_trk_info);
+ rqwridsize = info->rq_size * 8;
+ switch (info->type) {
+ case I40IW_PUDA_RSRC_TYPE_ILQ:
+ vmem = &dev->ilq_mem;
+ break;
+ case I40IW_PUDA_RSRC_TYPE_IEQ:
+ vmem = &dev->ieq_mem;
+ break;
+ default:
+ return I40IW_NOT_SUPPORTED;
+ }
+ ret =
+ i40iw_allocate_virt_mem(dev->hw, vmem,
+ pudasize + sqwridsize + rqwridsize);
+ if (ret)
+ return ret;
+ rsrc = (struct i40iw_puda_rsrc *)vmem->va;
+ spin_lock_init(&rsrc->bufpool_lock);
+ if (info->type == I40IW_PUDA_RSRC_TYPE_ILQ) {
+ dev->ilq = (struct i40iw_puda_rsrc *)vmem->va;
+ dev->ilq_count = info->count;
+ rsrc->receive = info->receive;
+ rsrc->xmit_complete = info->xmit_complete;
+ } else {
+ vmem = &dev->ieq_mem;
+ dev->ieq_count = info->count;
+ dev->ieq = (struct i40iw_puda_rsrc *)vmem->va;
+ rsrc->receive = i40iw_ieq_receive;
+ rsrc->xmit_complete = i40iw_ieq_tx_compl;
+ }
+
+ rsrc->type = info->type;
+ rsrc->sq_wrtrk_array = (struct i40iw_sq_uk_wr_trk_info *)((u8 *)vmem->va + pudasize);
+ rsrc->rq_wrid_array = (u64 *)((u8 *)vmem->va + pudasize + sqwridsize);
+ rsrc->mss = info->mss;
+ /* Initialize all ieq lists */
+ INIT_LIST_HEAD(&rsrc->bufpool);
+ INIT_LIST_HEAD(&rsrc->txpend);
+
+ rsrc->tx_wqe_avail_cnt = info->sq_size - 1;
+ dev->iw_pd_ops->pd_init(dev, &rsrc->sc_pd, info->pd_id);
+ rsrc->qp_id = info->qp_id;
+ rsrc->cq_id = info->cq_id;
+ rsrc->sq_size = info->sq_size;
+ rsrc->rq_size = info->rq_size;
+ rsrc->cq_size = info->rq_size + info->sq_size;
+ rsrc->buf_size = info->buf_size;
+ rsrc->dev = dev;
+
+ ret = i40iw_puda_cq_create(rsrc);
+ if (!ret) {
+ rsrc->completion = PUDA_CQ_CREATED;
+ ret = i40iw_puda_qp_create(rsrc);
+ }
+ if (ret) {
+ i40iw_debug(dev, I40IW_DEBUG_PUDA, "[%s] error qp_create\n", __func__);
+ goto error;
+ }
+ rsrc->completion = PUDA_QP_CREATED;
+
+ ret = i40iw_puda_allocbufs(rsrc, info->tx_buf_cnt + info->rq_size);
+ if (ret) {
+ i40iw_debug(dev, I40IW_DEBUG_PUDA, "[%s] error allloc_buf\n", __func__);
+ goto error;
+ }
+
+ rsrc->rxq_invalid_cnt = info->rq_size;
+ ret = i40iw_puda_replenish_rq(rsrc, true);
+ if (ret)
+ goto error;
+
+ if (info->type == I40IW_PUDA_RSRC_TYPE_IEQ) {
+ if (!i40iw_init_hash_desc(&rsrc->hash_desc)) {
+ rsrc->check_crc = true;
+ rsrc->completion = PUDA_HASH_CRC_COMPLETE;
+ ret = 0;
+ }
+ }
+
+ dev->ccq_ops->ccq_arm(&rsrc->cq);
+ return ret;
+ error:
+ i40iw_puda_dele_resources(dev, info->type, false);
+
+ return ret;
+}
+
+/**
+ * i40iw_ilq_putback_rcvbuf - ilq buffer to put back on rq
+ * @qp: ilq's qp resource
+ * @wqe_idx: wqe index of completed rcvbuf
+ */
+static void i40iw_ilq_putback_rcvbuf(struct i40iw_sc_qp *qp, u32 wqe_idx)
+{
+ u64 *wqe;
+ u64 offset24;
+
+ wqe = qp->qp_uk.rq_base[wqe_idx].elem;
+ get_64bit_val(wqe, 24, &offset24);
+ offset24 = (offset24) ? 0 : LS_64(1, I40IWQPSQ_VALID);
+ set_64bit_val(wqe, 24, offset24);
+}
+
+/**
+ * i40iw_ieq_get_fpdu - given length return fpdu length
+ * @length: length if fpdu
+ */
+static u16 i40iw_ieq_get_fpdu_length(u16 length)
+{
+ u16 fpdu_len;
+
+ fpdu_len = length + I40IW_IEQ_MPA_FRAMING;
+ fpdu_len = (fpdu_len + 3) & 0xfffffffc;
+ return fpdu_len;
+}
+
+/**
+ * i40iw_ieq_copy_to_txbuf - copydata from rcv buf to tx buf
+ * @buf: rcv buffer with partial
+ * @txbuf: tx buffer for sendign back
+ * @buf_offset: rcv buffer offset to copy from
+ * @txbuf_offset: at offset in tx buf to copy
+ * @length: length of data to copy
+ */
+static void i40iw_ieq_copy_to_txbuf(struct i40iw_puda_buf *buf,
+ struct i40iw_puda_buf *txbuf,
+ u16 buf_offset, u32 txbuf_offset,
+ u32 length)
+{
+ void *mem1 = (u8 *)buf->mem.va + buf_offset;
+ void *mem2 = (u8 *)txbuf->mem.va + txbuf_offset;
+
+ memcpy(mem2, mem1, length);
+}
+
+/**
+ * i40iw_ieq_setup_tx_buf - setup tx buffer for partial handling
+ * @buf: reeive buffer with partial
+ * @txbuf: buffer to prepare
+ */
+static void i40iw_ieq_setup_tx_buf(struct i40iw_puda_buf *buf,
+ struct i40iw_puda_buf *txbuf)
+{
+ txbuf->maclen = buf->maclen;
+ txbuf->tcphlen = buf->tcphlen;
+ txbuf->ipv4 = buf->ipv4;
+ txbuf->hdrlen = buf->hdrlen;
+ i40iw_ieq_copy_to_txbuf(buf, txbuf, 0, 0, buf->hdrlen);
+}
+
+/**
+ * i40iw_ieq_check_first_buf - check if rcv buffer's seq is in range
+ * @buf: receive exception buffer
+ * @fps: first partial sequence number
+ */
+static void i40iw_ieq_check_first_buf(struct i40iw_puda_buf *buf, u32 fps)
+{
+ u32 offset;
+
+ if (buf->seqnum < fps) {
+ offset = fps - buf->seqnum;
+ if (offset > buf->datalen)
+ return;
+ buf->data += offset;
+ buf->datalen -= (u16)offset;
+ buf->seqnum = fps;
+ }
+}
+
+/**
+ * i40iw_ieq_compl_pfpdu - write txbuf with full fpdu
+ * @ieq: ieq resource
+ * @rxlist: ieq's received buffer list
+ * @pbufl: temporary list for buffers for fpddu
+ * @txbuf: tx buffer for fpdu
+ * @fpdu_len: total length of fpdu
+ */
+static void i40iw_ieq_compl_pfpdu(struct i40iw_puda_rsrc *ieq,
+ struct list_head *rxlist,
+ struct list_head *pbufl,
+ struct i40iw_puda_buf *txbuf,
+ u16 fpdu_len)
+{
+ struct i40iw_puda_buf *buf;
+ u32 nextseqnum;
+ u16 txoffset, bufoffset;
+
+ buf = i40iw_puda_get_listbuf(pbufl);
+ nextseqnum = buf->seqnum + fpdu_len;
+ txbuf->totallen = buf->hdrlen + fpdu_len;
+ txbuf->data = (u8 *)txbuf->mem.va + buf->hdrlen;
+ i40iw_ieq_setup_tx_buf(buf, txbuf);
+
+ txoffset = buf->hdrlen;
+ bufoffset = (u16)(buf->data - (u8 *)buf->mem.va);
+
+ do {
+ if (buf->datalen >= fpdu_len) {
+ /* copied full fpdu */
+ i40iw_ieq_copy_to_txbuf(buf, txbuf, bufoffset, txoffset, fpdu_len);
+ buf->datalen -= fpdu_len;
+ buf->data += fpdu_len;
+ buf->seqnum = nextseqnum;
+ break;
+ }
+ /* copy partial fpdu */
+ i40iw_ieq_copy_to_txbuf(buf, txbuf, bufoffset, txoffset, buf->datalen);
+ txoffset += buf->datalen;
+ fpdu_len -= buf->datalen;
+ i40iw_puda_ret_bufpool(ieq, buf);
+ buf = i40iw_puda_get_listbuf(pbufl);
+ bufoffset = (u16)(buf->data - (u8 *)buf->mem.va);
+ } while (1);
+
+ /* last buffer on the list*/
+ if (buf->datalen)
+ list_add(&buf->list, rxlist);
+ else
+ i40iw_puda_ret_bufpool(ieq, buf);
+}
+
+/**
+ * i40iw_ieq_create_pbufl - create buffer list for single fpdu
+ * @rxlist: resource list for receive ieq buffes
+ * @pbufl: temp. list for buffers for fpddu
+ * @buf: first receive buffer
+ * @fpdu_len: total length of fpdu
+ */
+static enum i40iw_status_code i40iw_ieq_create_pbufl(
+ struct i40iw_pfpdu *pfpdu,
+ struct list_head *rxlist,
+ struct list_head *pbufl,
+ struct i40iw_puda_buf *buf,
+ u16 fpdu_len)
+{
+ enum i40iw_status_code status = 0;
+ struct i40iw_puda_buf *nextbuf;
+ u32 nextseqnum;
+ u16 plen = fpdu_len - buf->datalen;
+ bool done = false;
+
+ nextseqnum = buf->seqnum + buf->datalen;
+ do {
+ nextbuf = i40iw_puda_get_listbuf(rxlist);
+ if (!nextbuf) {
+ status = I40IW_ERR_list_empty;
+ break;
+ }
+ list_add_tail(&nextbuf->list, pbufl);
+ if (nextbuf->seqnum != nextseqnum) {
+ pfpdu->bad_seq_num++;
+ status = I40IW_ERR_SEQ_NUM;
+ break;
+ }
+ if (nextbuf->datalen >= plen) {
+ done = true;
+ } else {
+ plen -= nextbuf->datalen;
+ nextseqnum = nextbuf->seqnum + nextbuf->datalen;
+ }
+
+ } while (!done);
+
+ return status;
+}
+
+/**
+ * i40iw_ieq_handle_partial - process partial fpdu buffer
+ * @ieq: ieq resource
+ * @pfpdu: partial management per user qp
+ * @buf: receive buffer
+ * @fpdu_len: fpdu len in the buffer
+ */
+static enum i40iw_status_code i40iw_ieq_handle_partial(struct i40iw_puda_rsrc *ieq,
+ struct i40iw_pfpdu *pfpdu,
+ struct i40iw_puda_buf *buf,
+ u16 fpdu_len)
+{
+ enum i40iw_status_code status = 0;
+ u8 *crcptr;
+ u32 mpacrc;
+ u32 seqnum = buf->seqnum;
+ struct list_head pbufl; /* partial buffer list */
+ struct i40iw_puda_buf *txbuf = NULL;
+ struct list_head *rxlist = &pfpdu->rxlist;
+
+ INIT_LIST_HEAD(&pbufl);
+ list_add(&buf->list, &pbufl);
+
+ status = i40iw_ieq_create_pbufl(pfpdu, rxlist, &pbufl, buf, fpdu_len);
+ if (!status)
+ goto error;
+
+ txbuf = i40iw_puda_get_bufpool(ieq);
+ if (!txbuf) {
+ pfpdu->no_tx_bufs++;
+ status = I40IW_ERR_NO_TXBUFS;
+ goto error;
+ }
+
+ i40iw_ieq_compl_pfpdu(ieq, rxlist, &pbufl, txbuf, fpdu_len);
+ i40iw_ieq_update_tcpip_info(txbuf, fpdu_len, seqnum);
+ crcptr = txbuf->data + fpdu_len - 4;
+ mpacrc = *(u32 *)crcptr;
+ if (ieq->check_crc) {
+ status = i40iw_ieq_check_mpacrc(ieq->hash_desc, txbuf->data,
+ (fpdu_len - 4), mpacrc);
+ if (status) {
+ i40iw_debug(ieq->dev, I40IW_DEBUG_IEQ,
+ "%s: error bad crc\n", __func__);
+ goto error;
+ }
+ }
+
+ i40iw_debug_buf(ieq->dev, I40IW_DEBUG_IEQ, "IEQ TX BUFFER",
+ txbuf->mem.va, txbuf->totallen);
+ i40iw_puda_send_buf(ieq, txbuf);
+ pfpdu->rcv_nxt = seqnum + fpdu_len;
+ return status;
+ error:
+ while (!list_empty(&pbufl)) {
+ buf = (struct i40iw_puda_buf *)(pbufl.prev);
+ list_del(&buf->list);
+ list_add(&buf->list, rxlist);
+ }
+ if (txbuf)
+ i40iw_puda_ret_bufpool(ieq, txbuf);
+ return status;
+}
+
+/**
+ * i40iw_ieq_process_buf - process buffer rcvd for ieq
+ * @ieq: ieq resource
+ * @pfpdu: partial management per user qp
+ * @buf: receive buffer
+ */
+static enum i40iw_status_code i40iw_ieq_process_buf(struct i40iw_puda_rsrc *ieq,
+ struct i40iw_pfpdu *pfpdu,
+ struct i40iw_puda_buf *buf)
+{
+ u16 fpdu_len = 0;
+ u16 datalen = buf->datalen;
+ u8 *datap = buf->data;
+ u8 *crcptr;
+ u16 ioffset = 0;
+ u32 mpacrc;
+ u32 seqnum = buf->seqnum;
+ u16 length = 0;
+ u16 full = 0;
+ bool partial = false;
+ struct i40iw_puda_buf *txbuf;
+ struct list_head *rxlist = &pfpdu->rxlist;
+ enum i40iw_status_code ret = 0;
+ enum i40iw_status_code status = 0;
+
+ ioffset = (u16)(buf->data - (u8 *)buf->mem.va);
+ while (datalen) {
+ fpdu_len = i40iw_ieq_get_fpdu_length(ntohs(*(u16 *)datap));
+ if (fpdu_len > pfpdu->max_fpdu_data) {
+ i40iw_debug(ieq->dev, I40IW_DEBUG_IEQ,
+ "%s: error bad fpdu_len\n", __func__);
+ status = I40IW_ERR_MPA_CRC;
+ list_add(&buf->list, rxlist);
+ return status;
+ }
+
+ if (datalen < fpdu_len) {
+ partial = true;
+ break;
+ }
+ crcptr = datap + fpdu_len - 4;
+ mpacrc = *(u32 *)crcptr;
+ if (ieq->check_crc)
+ ret = i40iw_ieq_check_mpacrc(ieq->hash_desc,
+ datap, fpdu_len - 4, mpacrc);
+ if (ret) {
+ status = I40IW_ERR_MPA_CRC;
+ list_add(&buf->list, rxlist);
+ return status;
+ }
+ full++;
+ pfpdu->fpdu_processed++;
+ datap += fpdu_len;
+ length += fpdu_len;
+ datalen -= fpdu_len;
+ }
+ if (full) {
+ /* copy full pdu's in the txbuf and send them out */
+ txbuf = i40iw_puda_get_bufpool(ieq);
+ if (!txbuf) {
+ pfpdu->no_tx_bufs++;
+ status = I40IW_ERR_NO_TXBUFS;
+ list_add(&buf->list, rxlist);
+ return status;
+ }
+ /* modify txbuf's buffer header */
+ i40iw_ieq_setup_tx_buf(buf, txbuf);
+ /* copy full fpdu's to new buffer */
+ i40iw_ieq_copy_to_txbuf(buf, txbuf, ioffset, buf->hdrlen,
+ length);
+ txbuf->totallen = buf->hdrlen + length;
+
+ i40iw_ieq_update_tcpip_info(txbuf, length, buf->seqnum);
+ i40iw_puda_send_buf(ieq, txbuf);
+
+ if (!datalen) {
+ pfpdu->rcv_nxt = buf->seqnum + length;
+ i40iw_puda_ret_bufpool(ieq, buf);
+ return status;
+ }
+ buf->data = datap;
+ buf->seqnum = seqnum + length;
+ buf->datalen = datalen;
+ pfpdu->rcv_nxt = buf->seqnum;
+ }
+ if (partial)
+ status = i40iw_ieq_handle_partial(ieq, pfpdu, buf, fpdu_len);
+
+ return status;
+}
+
+/**
+ * i40iw_ieq_process_fpdus - process fpdu's buffers on its list
+ * @qp: qp for which partial fpdus
+ * @ieq: ieq resource
+ */
+static void i40iw_ieq_process_fpdus(struct i40iw_sc_qp *qp,
+ struct i40iw_puda_rsrc *ieq)
+{
+ struct i40iw_pfpdu *pfpdu = &qp->pfpdu;
+ struct list_head *rxlist = &pfpdu->rxlist;
+ struct i40iw_puda_buf *buf;
+ enum i40iw_status_code status;
+
+ do {
+ if (list_empty(rxlist))
+ break;
+ buf = i40iw_puda_get_listbuf(rxlist);
+ if (!buf) {
+ i40iw_debug(ieq->dev, I40IW_DEBUG_IEQ,
+ "%s: error no buf\n", __func__);
+ break;
+ }
+ if (buf->seqnum != pfpdu->rcv_nxt) {
+ /* This could be out of order or missing packet */
+ pfpdu->out_of_order++;
+ list_add(&buf->list, rxlist);
+ break;
+ }
+ /* keep processing buffers from the head of the list */
+ status = i40iw_ieq_process_buf(ieq, pfpdu, buf);
+ if (status == I40IW_ERR_MPA_CRC) {
+ pfpdu->mpa_crc_err = true;
+ while (!list_empty(rxlist)) {
+ buf = i40iw_puda_get_listbuf(rxlist);
+ i40iw_puda_ret_bufpool(ieq, buf);
+ pfpdu->crc_err++;
+ }
+ /* create CQP for AE */
+ i40iw_ieq_mpa_crc_ae(ieq->dev, qp);
+ }
+ } while (!status);
+}
+
+/**
+ * i40iw_ieq_handle_exception - handle qp's exception
+ * @ieq: ieq resource
+ * @qp: qp receiving excpetion
+ * @buf: receive buffer
+ */
+static void i40iw_ieq_handle_exception(struct i40iw_puda_rsrc *ieq,
+ struct i40iw_sc_qp *qp,
+ struct i40iw_puda_buf *buf)
+{
+ struct i40iw_puda_buf *tmpbuf = NULL;
+ struct i40iw_pfpdu *pfpdu = &qp->pfpdu;
+ u32 *hw_host_ctx = (u32 *)qp->hw_host_ctx;
+ u32 rcv_wnd = hw_host_ctx[23];
+ /* first partial seq # in q2 */
+ u32 fps = qp->q2_buf[16];
+ struct list_head *rxlist = &pfpdu->rxlist;
+ struct list_head *plist;
+
+ pfpdu->total_ieq_bufs++;
+
+ if (pfpdu->mpa_crc_err) {
+ pfpdu->crc_err++;
+ goto error;
+ }
+ if (pfpdu->mode && (fps != pfpdu->fps)) {
+ /* clean up qp as it is new partial sequence */
+ i40iw_ieq_cleanup_qp(ieq->dev, qp);
+ i40iw_debug(ieq->dev, I40IW_DEBUG_IEQ,
+ "%s: restarting new partial\n", __func__);
+ pfpdu->mode = false;
+ }
+
+ if (!pfpdu->mode) {
+ i40iw_debug_buf(ieq->dev, I40IW_DEBUG_IEQ, "Q2 BUFFER", (u64 *)qp->q2_buf, 128);
+ /* First_Partial_Sequence_Number check */
+ pfpdu->rcv_nxt = fps;
+ pfpdu->fps = fps;
+ pfpdu->mode = true;
+ pfpdu->max_fpdu_data = ieq->mss;
+ pfpdu->pmode_count++;
+ INIT_LIST_HEAD(rxlist);
+ i40iw_ieq_check_first_buf(buf, fps);
+ }
+
+ if (!(rcv_wnd >= (buf->seqnum - pfpdu->rcv_nxt))) {
+ pfpdu->bad_seq_num++;
+ goto error;
+ }
+
+ if (!list_empty(rxlist)) {
+ tmpbuf = (struct i40iw_puda_buf *)rxlist->next;
+ plist = &tmpbuf->list;
+ while ((struct list_head *)tmpbuf != rxlist) {
+ if ((int)(buf->seqnum - tmpbuf->seqnum) < 0)
+ break;
+ tmpbuf = (struct i40iw_puda_buf *)plist->next;
+ }
+ /* Insert buf before tmpbuf */
+ list_add_tail(&buf->list, &tmpbuf->list);
+ } else {
+ list_add_tail(&buf->list, rxlist);
+ }
+ i40iw_ieq_process_fpdus(qp, ieq);
+ return;
+ error:
+ i40iw_puda_ret_bufpool(ieq, buf);
+}
+
+/**
+ * i40iw_ieq_receive - received exception buffer
+ * @dev: iwarp device
+ * @buf: exception buffer received
+ */
+static void i40iw_ieq_receive(struct i40iw_sc_dev *dev,
+ struct i40iw_puda_buf *buf)
+{
+ struct i40iw_puda_rsrc *ieq = dev->ieq;
+ struct i40iw_sc_qp *qp = NULL;
+ u32 wqe_idx = ieq->compl_rxwqe_idx;
+
+ qp = i40iw_ieq_get_qp(dev, buf);
+ if (!qp) {
+ ieq->stats_bad_qp_id++;
+ i40iw_puda_ret_bufpool(ieq, buf);
+ } else {
+ i40iw_ieq_handle_exception(ieq, qp, buf);
+ }
+ /*
+ * ieq->rx_wqe_idx is used by i40iw_puda_replenish_rq()
+ * on which wqe_idx to start replenish rq
+ */
+ if (!ieq->rxq_invalid_cnt)
+ ieq->rx_wqe_idx = wqe_idx;
+ ieq->rxq_invalid_cnt++;
+}
+
+/**
+ * i40iw_ieq_tx_compl - put back after sending completed exception buffer
+ * @dev: iwarp device
+ * @sqwrid: pointer to puda buffer
+ */
+static void i40iw_ieq_tx_compl(struct i40iw_sc_dev *dev, void *sqwrid)
+{
+ struct i40iw_puda_rsrc *ieq = dev->ieq;
+ struct i40iw_puda_buf *buf = (struct i40iw_puda_buf *)sqwrid;
+
+ i40iw_puda_ret_bufpool(ieq, buf);
+ if (!list_empty(&ieq->txpend)) {
+ buf = i40iw_puda_get_listbuf(&ieq->txpend);
+ i40iw_puda_send_buf(ieq, buf);
+ }
+}
+
+/**
+ * i40iw_ieq_cleanup_qp - qp is being destroyed
+ * @dev: iwarp device
+ * @qp: all pending fpdu buffers
+ */
+void i40iw_ieq_cleanup_qp(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp)
+{
+ struct i40iw_puda_buf *buf;
+ struct i40iw_pfpdu *pfpdu = &qp->pfpdu;
+ struct list_head *rxlist = &pfpdu->rxlist;
+ struct i40iw_puda_rsrc *ieq = dev->ieq;
+
+ if (!pfpdu->mode)
+ return;
+ while (!list_empty(rxlist)) {
+ buf = i40iw_puda_get_listbuf(rxlist);
+ i40iw_puda_ret_bufpool(ieq, buf);
+ }
+}
diff --git a/drivers/infiniband/hw/i40iw/i40iw_puda.h b/drivers/infiniband/hw/i40iw/i40iw_puda.h
new file mode 100644
index 000000000000..52bf7826ce4e
--- /dev/null
+++ b/drivers/infiniband/hw/i40iw/i40iw_puda.h
@@ -0,0 +1,183 @@
+/*******************************************************************************
+*
+* Copyright (c) 2015-2016 Intel Corporation. All rights reserved.
+*
+* This software is available to you under a choice of one of two
+* licenses. You may choose to be licensed under the terms of the GNU
+* General Public License (GPL) Version 2, available from the file
+* COPYING in the main directory of this source tree, or the
+* OpenFabrics.org BSD license below:
+*
+* Redistribution and use in source and binary forms, with or
+* without modification, are permitted provided that the following
+* conditions are met:
+*
+* - Redistributions of source code must retain the above
+* copyright notice, this list of conditions and the following
+* disclaimer.
+*
+* - Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials
+* provided with the distribution.
+*
+* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+* SOFTWARE.
+*
+*******************************************************************************/
+
+#ifndef I40IW_PUDA_H
+#define I40IW_PUDA_H
+
+#define I40IW_IEQ_MPA_FRAMING 6
+
+struct i40iw_sc_dev;
+struct i40iw_sc_qp;
+struct i40iw_sc_cq;
+
+enum puda_resource_type {
+ I40IW_PUDA_RSRC_TYPE_ILQ = 1,
+ I40IW_PUDA_RSRC_TYPE_IEQ
+};
+
+enum puda_rsrc_complete {
+ PUDA_CQ_CREATED = 1,
+ PUDA_QP_CREATED,
+ PUDA_TX_COMPLETE,
+ PUDA_RX_COMPLETE,
+ PUDA_HASH_CRC_COMPLETE
+};
+
+struct i40iw_puda_completion_info {
+ struct i40iw_qp_uk *qp;
+ u8 q_type;
+ u8 vlan_valid;
+ u8 l3proto;
+ u8 l4proto;
+ u16 payload_len;
+ u32 compl_error; /* No_err=0, else major and minor err code */
+ u32 qp_id;
+ u32 wqe_idx;
+};
+
+struct i40iw_puda_send_info {
+ u64 paddr; /* Physical address */
+ u32 len;
+ u8 tcplen;
+ u8 maclen;
+ bool ipv4;
+ bool doloopback;
+ void *scratch;
+};
+
+struct i40iw_puda_buf {
+ struct list_head list; /* MUST be first entry */
+ struct i40iw_dma_mem mem; /* DMA memory for the buffer */
+ struct i40iw_puda_buf *next; /* for alloclist in rsrc struct */
+ struct i40iw_virt_mem buf_mem; /* Buffer memory for this buffer */
+ void *scratch;
+ u8 *iph;
+ u8 *tcph;
+ u8 *data;
+ u16 datalen;
+ u16 vlan_id;
+ u8 tcphlen; /* tcp length in bytes */
+ u8 maclen; /* mac length in bytes */
+ u32 totallen; /* machlen+iphlen+tcphlen+datalen */
+ atomic_t refcount;
+ u8 hdrlen;
+ bool ipv4;
+ u32 seqnum;
+};
+
+struct i40iw_puda_rsrc_info {
+ enum puda_resource_type type; /* ILQ or IEQ */
+ u32 count;
+ u16 pd_id;
+ u32 cq_id;
+ u32 qp_id;
+ u32 sq_size;
+ u32 rq_size;
+ u16 buf_size;
+ u16 mss;
+ u32 tx_buf_cnt; /* total bufs allocated will be rq_size + tx_buf_cnt */
+ void (*receive)(struct i40iw_sc_dev *, struct i40iw_puda_buf *);
+ void (*xmit_complete)(struct i40iw_sc_dev *, void *);
+};
+
+struct i40iw_puda_rsrc {
+ struct i40iw_sc_cq cq;
+ struct i40iw_sc_qp qp;
+ struct i40iw_sc_pd sc_pd;
+ struct i40iw_sc_dev *dev;
+ struct i40iw_dma_mem cqmem;
+ struct i40iw_dma_mem qpmem;
+ struct i40iw_virt_mem ilq_mem;
+ enum puda_rsrc_complete completion;
+ enum puda_resource_type type;
+ u16 buf_size; /*buffer must be max datalen + tcpip hdr + mac */
+ u16 mss;
+ u32 cq_id;
+ u32 qp_id;
+ u32 sq_size;
+ u32 rq_size;
+ u32 cq_size;
+ struct i40iw_sq_uk_wr_trk_info *sq_wrtrk_array;
+ u64 *rq_wrid_array;
+ u32 compl_rxwqe_idx;
+ u32 rx_wqe_idx;
+ u32 rxq_invalid_cnt;
+ u32 tx_wqe_avail_cnt;
+ bool check_crc;
+ struct shash_desc *hash_desc;
+ struct list_head txpend;
+ struct list_head bufpool; /* free buffers pool list for recv and xmit */
+ u32 alloc_buf_count;
+ u32 avail_buf_count; /* snapshot of currently available buffers */
+ spinlock_t bufpool_lock;
+ struct i40iw_puda_buf *alloclist;
+ void (*receive)(struct i40iw_sc_dev *, struct i40iw_puda_buf *);
+ void (*xmit_complete)(struct i40iw_sc_dev *, void *);
+ /* puda stats */
+ u64 stats_buf_alloc_fail;
+ u64 stats_pkt_rcvd;
+ u64 stats_pkt_sent;
+ u64 stats_rcvd_pkt_err;
+ u64 stats_sent_pkt_q;
+ u64 stats_bad_qp_id;
+};
+
+struct i40iw_puda_buf *i40iw_puda_get_bufpool(struct i40iw_puda_rsrc *rsrc);
+void i40iw_puda_ret_bufpool(struct i40iw_puda_rsrc *rsrc,
+ struct i40iw_puda_buf *buf);
+void i40iw_puda_send_buf(struct i40iw_puda_rsrc *rsrc,
+ struct i40iw_puda_buf *buf);
+enum i40iw_status_code i40iw_puda_send(struct i40iw_sc_qp *qp,
+ struct i40iw_puda_send_info *info);
+enum i40iw_status_code i40iw_puda_create_rsrc(struct i40iw_sc_dev *dev,
+ struct i40iw_puda_rsrc_info *info);
+void i40iw_puda_dele_resources(struct i40iw_sc_dev *dev,
+ enum puda_resource_type type,
+ bool reset);
+enum i40iw_status_code i40iw_puda_poll_completion(struct i40iw_sc_dev *dev,
+ struct i40iw_sc_cq *cq, u32 *compl_err);
+void i40iw_ieq_cleanup_qp(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp);
+
+struct i40iw_sc_qp *i40iw_ieq_get_qp(struct i40iw_sc_dev *dev,
+ struct i40iw_puda_buf *buf);
+enum i40iw_status_code i40iw_puda_get_tcpip_info(struct i40iw_puda_completion_info *info,
+ struct i40iw_puda_buf *buf);
+enum i40iw_status_code i40iw_ieq_check_mpacrc(struct shash_desc *desc,
+ void *addr, u32 length, u32 value);
+enum i40iw_status_code i40iw_init_hash_desc(struct shash_desc **desc);
+void i40iw_ieq_mpa_crc_ae(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp);
+void i40iw_free_hash_desc(struct shash_desc *desc);
+void i40iw_ieq_update_tcpip_info(struct i40iw_puda_buf *buf, u16 length,
+ u32 seqnum);
+#endif
diff --git a/drivers/infiniband/hw/i40iw/i40iw_register.h b/drivers/infiniband/hw/i40iw/i40iw_register.h
new file mode 100644
index 000000000000..57768184e251
--- /dev/null
+++ b/drivers/infiniband/hw/i40iw/i40iw_register.h
@@ -0,0 +1,1030 @@
+/*******************************************************************************
+*
+* Copyright (c) 2015-2016 Intel Corporation. All rights reserved.
+*
+* This software is available to you under a choice of one of two
+* licenses. You may choose to be licensed under the terms of the GNU
+* General Public License (GPL) Version 2, available from the file
+* COPYING in the main directory of this source tree, or the
+* OpenFabrics.org BSD license below:
+*
+* Redistribution and use in source and binary forms, with or
+* without modification, are permitted provided that the following
+* conditions are met:
+*
+* - Redistributions of source code must retain the above
+* copyright notice, this list of conditions and the following
+* disclaimer.
+*
+* - Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials
+* provided with the distribution.
+*
+* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+* SOFTWARE.
+*
+*******************************************************************************/
+
+#ifndef I40IW_REGISTER_H
+#define I40IW_REGISTER_H
+
+#define I40E_GLGEN_STAT 0x000B612C /* Reset: POR */
+
+#define I40E_PFHMC_PDINV 0x000C0300 /* Reset: PFR */
+#define I40E_PFHMC_PDINV_PMSDIDX_SHIFT 0
+#define I40E_PFHMC_PDINV_PMSDIDX_MASK (0xFFF << I40E_PFHMC_PDINV_PMSDIDX_SHIFT)
+#define I40E_PFHMC_PDINV_PMPDIDX_SHIFT 16
+#define I40E_PFHMC_PDINV_PMPDIDX_MASK (0x1FF << I40E_PFHMC_PDINV_PMPDIDX_SHIFT)
+#define I40E_PFHMC_SDCMD_PMSDWR_SHIFT 31
+#define I40E_PFHMC_SDCMD_PMSDWR_MASK (0x1 << I40E_PFHMC_SDCMD_PMSDWR_SHIFT)
+#define I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT 0
+#define I40E_PFHMC_SDDATALOW_PMSDVALID_MASK (0x1 << I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT)
+#define I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT 1
+#define I40E_PFHMC_SDDATALOW_PMSDTYPE_MASK (0x1 << I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT)
+#define I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT 2
+#define I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_MASK (0x3FF << I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT)
+
+#define I40E_PFINT_DYN_CTLN(_INTPF) (0x00034800 + ((_INTPF) * 4)) /* _i=0...511 */ /* Reset: PFR */
+#define I40E_PFINT_DYN_CTLN_INTENA_SHIFT 0
+#define I40E_PFINT_DYN_CTLN_INTENA_MASK (0x1 << I40E_PFINT_DYN_CTLN_INTENA_SHIFT)
+#define I40E_PFINT_DYN_CTLN_CLEARPBA_SHIFT 1
+#define I40E_PFINT_DYN_CTLN_CLEARPBA_MASK (0x1 << I40E_PFINT_DYN_CTLN_CLEARPBA_SHIFT)
+#define I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT 3
+#define I40E_PFINT_DYN_CTLN_ITR_INDX_MASK (0x3 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT)
+
+#define I40E_VFINT_DYN_CTLN1(_INTVF) (0x00003800 + ((_INTVF) * 4)) /* _i=0...15 */ /* Reset: VFR */
+#define I40E_GLHMC_VFPDINV(_i) (0x000C8300 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+
+#define I40E_PFHMC_PDINV_PMSDPARTSEL_SHIFT 15
+#define I40E_PFHMC_PDINV_PMSDPARTSEL_MASK (0x1 << I40E_PFHMC_PDINV_PMSDPARTSEL_SHIFT)
+#define I40E_GLPCI_LBARCTRL 0x000BE484 /* Reset: POR */
+#define I40E_GLPCI_LBARCTRL_PE_DB_SIZE_SHIFT 4
+#define I40E_GLPCI_LBARCTRL_PE_DB_SIZE_MASK (0x3 << I40E_GLPCI_LBARCTRL_PE_DB_SIZE_SHIFT)
+#define I40E_GLPCI_DREVID 0x0009C480 /* Reset: PCIR */
+#define I40E_GLPCI_DREVID_DEFAULT_REVID_SHIFT 0
+#define I40E_GLPCI_DREVID_DEFAULT_REVID_MASK 0xFF
+
+#define I40E_PFPE_AEQALLOC 0x00131180 /* Reset: PFR */
+#define I40E_PFPE_AEQALLOC_AECOUNT_SHIFT 0
+#define I40E_PFPE_AEQALLOC_AECOUNT_MASK (0xFFFFFFFF << I40E_PFPE_AEQALLOC_AECOUNT_SHIFT)
+#define I40E_PFPE_CCQPHIGH 0x00008200 /* Reset: PFR */
+#define I40E_PFPE_CCQPHIGH_PECCQPHIGH_SHIFT 0
+#define I40E_PFPE_CCQPHIGH_PECCQPHIGH_MASK (0xFFFFFFFF << I40E_PFPE_CCQPHIGH_PECCQPHIGH_SHIFT)
+#define I40E_PFPE_CCQPLOW 0x00008180 /* Reset: PFR */
+#define I40E_PFPE_CCQPLOW_PECCQPLOW_SHIFT 0
+#define I40E_PFPE_CCQPLOW_PECCQPLOW_MASK (0xFFFFFFFF << I40E_PFPE_CCQPLOW_PECCQPLOW_SHIFT)
+#define I40E_PFPE_CCQPSTATUS 0x00008100 /* Reset: PFR */
+#define I40E_PFPE_CCQPSTATUS_CCQP_DONE_SHIFT 0
+#define I40E_PFPE_CCQPSTATUS_CCQP_DONE_MASK (0x1 << I40E_PFPE_CCQPSTATUS_CCQP_DONE_SHIFT)
+#define I40E_PFPE_CCQPSTATUS_HMC_PROFILE_SHIFT 4
+#define I40E_PFPE_CCQPSTATUS_HMC_PROFILE_MASK (0x7 << I40E_PFPE_CCQPSTATUS_HMC_PROFILE_SHIFT)
+#define I40E_PFPE_CCQPSTATUS_RDMA_EN_VFS_SHIFT 16
+#define I40E_PFPE_CCQPSTATUS_RDMA_EN_VFS_MASK (0x3F << I40E_PFPE_CCQPSTATUS_RDMA_EN_VFS_SHIFT)
+#define I40E_PFPE_CCQPSTATUS_CCQP_ERR_SHIFT 31
+#define I40E_PFPE_CCQPSTATUS_CCQP_ERR_MASK (0x1 << I40E_PFPE_CCQPSTATUS_CCQP_ERR_SHIFT)
+#define I40E_PFPE_CQACK 0x00131100 /* Reset: PFR */
+#define I40E_PFPE_CQACK_PECQID_SHIFT 0
+#define I40E_PFPE_CQACK_PECQID_MASK (0x1FFFF << I40E_PFPE_CQACK_PECQID_SHIFT)
+#define I40E_PFPE_CQARM 0x00131080 /* Reset: PFR */
+#define I40E_PFPE_CQARM_PECQID_SHIFT 0
+#define I40E_PFPE_CQARM_PECQID_MASK (0x1FFFF << I40E_PFPE_CQARM_PECQID_SHIFT)
+#define I40E_PFPE_CQPDB 0x00008000 /* Reset: PFR */
+#define I40E_PFPE_CQPDB_WQHEAD_SHIFT 0
+#define I40E_PFPE_CQPDB_WQHEAD_MASK (0x7FF << I40E_PFPE_CQPDB_WQHEAD_SHIFT)
+#define I40E_PFPE_CQPERRCODES 0x00008880 /* Reset: PFR */
+#define I40E_PFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT 0
+#define I40E_PFPE_CQPERRCODES_CQP_MINOR_CODE_MASK (0xFFFF << I40E_PFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT)
+#define I40E_PFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT 16
+#define I40E_PFPE_CQPERRCODES_CQP_MAJOR_CODE_MASK (0xFFFF << I40E_PFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT)
+#define I40E_PFPE_CQPTAIL 0x00008080 /* Reset: PFR */
+#define I40E_PFPE_CQPTAIL_WQTAIL_SHIFT 0
+#define I40E_PFPE_CQPTAIL_WQTAIL_MASK (0x7FF << I40E_PFPE_CQPTAIL_WQTAIL_SHIFT)
+#define I40E_PFPE_CQPTAIL_CQP_OP_ERR_SHIFT 31
+#define I40E_PFPE_CQPTAIL_CQP_OP_ERR_MASK (0x1 << I40E_PFPE_CQPTAIL_CQP_OP_ERR_SHIFT)
+#define I40E_PFPE_FLMQ1ALLOCERR 0x00008980 /* Reset: PFR */
+#define I40E_PFPE_FLMQ1ALLOCERR_ERROR_COUNT_SHIFT 0
+#define I40E_PFPE_FLMQ1ALLOCERR_ERROR_COUNT_MASK (0xFFFF << I40E_PFPE_FLMQ1ALLOCERR_ERROR_COUNT_SHIFT)
+#define I40E_PFPE_FLMXMITALLOCERR 0x00008900 /* Reset: PFR */
+#define I40E_PFPE_FLMXMITALLOCERR_ERROR_COUNT_SHIFT 0
+#define I40E_PFPE_FLMXMITALLOCERR_ERROR_COUNT_MASK (0xFFFF << I40E_PFPE_FLMXMITALLOCERR_ERROR_COUNT_SHIFT)
+#define I40E_PFPE_IPCONFIG0 0x00008280 /* Reset: PFR */
+#define I40E_PFPE_IPCONFIG0_PEIPID_SHIFT 0
+#define I40E_PFPE_IPCONFIG0_PEIPID_MASK (0xFFFF << I40E_PFPE_IPCONFIG0_PEIPID_SHIFT)
+#define I40E_PFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT 16
+#define I40E_PFPE_IPCONFIG0_USEENTIREIDRANGE_MASK (0x1 << I40E_PFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT)
+#define I40E_PFPE_MRTEIDXMASK 0x00008600 /* Reset: PFR */
+#define I40E_PFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT 0
+#define I40E_PFPE_MRTEIDXMASK_MRTEIDXMASKBITS_MASK (0x1F << I40E_PFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT)
+#define I40E_PFPE_RCVUNEXPECTEDERROR 0x00008680 /* Reset: PFR */
+#define I40E_PFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT 0
+#define I40E_PFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_MASK (0xFFFFFF << I40E_PFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT)
+#define I40E_PFPE_TCPNOWTIMER 0x00008580 /* Reset: PFR */
+#define I40E_PFPE_TCPNOWTIMER_TCP_NOW_SHIFT 0
+#define I40E_PFPE_TCPNOWTIMER_TCP_NOW_MASK (0xFFFFFFFF << I40E_PFPE_TCPNOWTIMER_TCP_NOW_SHIFT)
+
+#define I40E_PFPE_WQEALLOC 0x00138C00 /* Reset: PFR */
+#define I40E_PFPE_WQEALLOC_PEQPID_SHIFT 0
+#define I40E_PFPE_WQEALLOC_PEQPID_MASK (0x3FFFF << I40E_PFPE_WQEALLOC_PEQPID_SHIFT)
+#define I40E_PFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT 20
+#define I40E_PFPE_WQEALLOC_WQE_DESC_INDEX_MASK (0xFFF << I40E_PFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT)
+
+#define I40E_VFPE_AEQALLOC(_VF) (0x00130C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_AEQALLOC_MAX_INDEX 127
+#define I40E_VFPE_AEQALLOC_AECOUNT_SHIFT 0
+#define I40E_VFPE_AEQALLOC_AECOUNT_MASK (0xFFFFFFFF << I40E_VFPE_AEQALLOC_AECOUNT_SHIFT)
+#define I40E_VFPE_CCQPHIGH(_VF) (0x00001000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_CCQPHIGH_MAX_INDEX 127
+#define I40E_VFPE_CCQPHIGH_PECCQPHIGH_SHIFT 0
+#define I40E_VFPE_CCQPHIGH_PECCQPHIGH_MASK (0xFFFFFFFF << I40E_VFPE_CCQPHIGH_PECCQPHIGH_SHIFT)
+#define I40E_VFPE_CCQPLOW(_VF) (0x00000C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_CCQPLOW_MAX_INDEX 127
+#define I40E_VFPE_CCQPLOW_PECCQPLOW_SHIFT 0
+#define I40E_VFPE_CCQPLOW_PECCQPLOW_MASK (0xFFFFFFFF << I40E_VFPE_CCQPLOW_PECCQPLOW_SHIFT)
+#define I40E_VFPE_CCQPSTATUS(_VF) (0x00000800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_CCQPSTATUS_MAX_INDEX 127
+#define I40E_VFPE_CCQPSTATUS_CCQP_DONE_SHIFT 0
+#define I40E_VFPE_CCQPSTATUS_CCQP_DONE_MASK (0x1 << I40E_VFPE_CCQPSTATUS_CCQP_DONE_SHIFT)
+#define I40E_VFPE_CCQPSTATUS_HMC_PROFILE_SHIFT 4
+#define I40E_VFPE_CCQPSTATUS_HMC_PROFILE_MASK (0x7 << I40E_VFPE_CCQPSTATUS_HMC_PROFILE_SHIFT)
+#define I40E_VFPE_CCQPSTATUS_RDMA_EN_VFS_SHIFT 16
+#define I40E_VFPE_CCQPSTATUS_RDMA_EN_VFS_MASK (0x3F << I40E_VFPE_CCQPSTATUS_RDMA_EN_VFS_SHIFT)
+#define I40E_VFPE_CCQPSTATUS_CCQP_ERR_SHIFT 31
+#define I40E_VFPE_CCQPSTATUS_CCQP_ERR_MASK (0x1 << I40E_VFPE_CCQPSTATUS_CCQP_ERR_SHIFT)
+#define I40E_VFPE_CQACK(_VF) (0x00130800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_CQACK_MAX_INDEX 127
+#define I40E_VFPE_CQACK_PECQID_SHIFT 0
+#define I40E_VFPE_CQACK_PECQID_MASK (0x1FFFF << I40E_VFPE_CQACK_PECQID_SHIFT)
+#define I40E_VFPE_CQARM(_VF) (0x00130400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_CQARM_MAX_INDEX 127
+#define I40E_VFPE_CQARM_PECQID_SHIFT 0
+#define I40E_VFPE_CQARM_PECQID_MASK (0x1FFFF << I40E_VFPE_CQARM_PECQID_SHIFT)
+#define I40E_VFPE_CQPDB(_VF) (0x00000000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_CQPDB_MAX_INDEX 127
+#define I40E_VFPE_CQPDB_WQHEAD_SHIFT 0
+#define I40E_VFPE_CQPDB_WQHEAD_MASK (0x7FF << I40E_VFPE_CQPDB_WQHEAD_SHIFT)
+#define I40E_VFPE_CQPERRCODES(_VF) (0x00001800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_CQPERRCODES_MAX_INDEX 127
+#define I40E_VFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT 0
+#define I40E_VFPE_CQPERRCODES_CQP_MINOR_CODE_MASK (0xFFFF << I40E_VFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT)
+#define I40E_VFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT 16
+#define I40E_VFPE_CQPERRCODES_CQP_MAJOR_CODE_MASK (0xFFFF << I40E_VFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT)
+#define I40E_VFPE_CQPTAIL(_VF) (0x00000400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_CQPTAIL_MAX_INDEX 127
+#define I40E_VFPE_CQPTAIL_WQTAIL_SHIFT 0
+#define I40E_VFPE_CQPTAIL_WQTAIL_MASK (0x7FF << I40E_VFPE_CQPTAIL_WQTAIL_SHIFT)
+#define I40E_VFPE_CQPTAIL_CQP_OP_ERR_SHIFT 31
+#define I40E_VFPE_CQPTAIL_CQP_OP_ERR_MASK (0x1 << I40E_VFPE_CQPTAIL_CQP_OP_ERR_SHIFT)
+#define I40E_VFPE_IPCONFIG0(_VF) (0x00001400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_IPCONFIG0_MAX_INDEX 127
+#define I40E_VFPE_IPCONFIG0_PEIPID_SHIFT 0
+#define I40E_VFPE_IPCONFIG0_PEIPID_MASK (0xFFFF << I40E_VFPE_IPCONFIG0_PEIPID_SHIFT)
+#define I40E_VFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT 16
+#define I40E_VFPE_IPCONFIG0_USEENTIREIDRANGE_MASK (0x1 << I40E_VFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT)
+#define I40E_VFPE_MRTEIDXMASK(_VF) (0x00003000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_MRTEIDXMASK_MAX_INDEX 127
+#define I40E_VFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT 0
+#define I40E_VFPE_MRTEIDXMASK_MRTEIDXMASKBITS_MASK (0x1F << I40E_VFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT)
+#define I40E_VFPE_RCVUNEXPECTEDERROR(_VF) (0x00003400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_RCVUNEXPECTEDERROR_MAX_INDEX 127
+#define I40E_VFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT 0
+#define I40E_VFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_MASK (0xFFFFFF << I40E_VFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT)
+#define I40E_VFPE_TCPNOWTIMER(_VF) (0x00002C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_TCPNOWTIMER_MAX_INDEX 127
+#define I40E_VFPE_TCPNOWTIMER_TCP_NOW_SHIFT 0
+#define I40E_VFPE_TCPNOWTIMER_TCP_NOW_MASK (0xFFFFFFFF << I40E_VFPE_TCPNOWTIMER_TCP_NOW_SHIFT)
+#define I40E_VFPE_WQEALLOC(_VF) (0x00138000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_WQEALLOC_MAX_INDEX 127
+#define I40E_VFPE_WQEALLOC_PEQPID_SHIFT 0
+#define I40E_VFPE_WQEALLOC_PEQPID_MASK (0x3FFFF << I40E_VFPE_WQEALLOC_PEQPID_SHIFT)
+#define I40E_VFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT 20
+#define I40E_VFPE_WQEALLOC_WQE_DESC_INDEX_MASK (0xFFF << I40E_VFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT)
+
+#define I40E_GLPE_CPUSTATUS0 0x0000D040 /* Reset: PE_CORER */
+#define I40E_GLPE_CPUSTATUS0_PECPUSTATUS0_SHIFT 0
+#define I40E_GLPE_CPUSTATUS0_PECPUSTATUS0_MASK (0xFFFFFFFF << I40E_GLPE_CPUSTATUS0_PECPUSTATUS0_SHIFT)
+#define I40E_GLPE_CPUSTATUS1 0x0000D044 /* Reset: PE_CORER */
+#define I40E_GLPE_CPUSTATUS1_PECPUSTATUS1_SHIFT 0
+#define I40E_GLPE_CPUSTATUS1_PECPUSTATUS1_MASK (0xFFFFFFFF << I40E_GLPE_CPUSTATUS1_PECPUSTATUS1_SHIFT)
+#define I40E_GLPE_CPUSTATUS2 0x0000D048 /* Reset: PE_CORER */
+#define I40E_GLPE_CPUSTATUS2_PECPUSTATUS2_SHIFT 0
+#define I40E_GLPE_CPUSTATUS2_PECPUSTATUS2_MASK (0xFFFFFFFF << I40E_GLPE_CPUSTATUS2_PECPUSTATUS2_SHIFT)
+#define I40E_GLPE_CPUTRIG0 0x0000D060 /* Reset: PE_CORER */
+#define I40E_GLPE_CPUTRIG0_PECPUTRIG0_SHIFT 0
+#define I40E_GLPE_CPUTRIG0_PECPUTRIG0_MASK (0xFFFF << I40E_GLPE_CPUTRIG0_PECPUTRIG0_SHIFT)
+#define I40E_GLPE_CPUTRIG0_TEPREQUEST0_SHIFT 17
+#define I40E_GLPE_CPUTRIG0_TEPREQUEST0_MASK (0x1 << I40E_GLPE_CPUTRIG0_TEPREQUEST0_SHIFT)
+#define I40E_GLPE_CPUTRIG0_OOPREQUEST0_SHIFT 18
+#define I40E_GLPE_CPUTRIG0_OOPREQUEST0_MASK (0x1 << I40E_GLPE_CPUTRIG0_OOPREQUEST0_SHIFT)
+#define I40E_GLPE_DUAL40_RUPM 0x0000DA04 /* Reset: PE_CORER */
+#define I40E_GLPE_DUAL40_RUPM_DUAL_40G_MODE_SHIFT 0
+#define I40E_GLPE_DUAL40_RUPM_DUAL_40G_MODE_MASK (0x1 << I40E_GLPE_DUAL40_RUPM_DUAL_40G_MODE_SHIFT)
+#define I40E_GLPE_PFAEQEDROPCNT(_i) (0x00131440 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLPE_PFAEQEDROPCNT_MAX_INDEX 15
+#define I40E_GLPE_PFAEQEDROPCNT_AEQEDROPCNT_SHIFT 0
+#define I40E_GLPE_PFAEQEDROPCNT_AEQEDROPCNT_MASK (0xFFFF << I40E_GLPE_PFAEQEDROPCNT_AEQEDROPCNT_SHIFT)
+#define I40E_GLPE_PFCEQEDROPCNT(_i) (0x001313C0 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLPE_PFCEQEDROPCNT_MAX_INDEX 15
+#define I40E_GLPE_PFCEQEDROPCNT_CEQEDROPCNT_SHIFT 0
+#define I40E_GLPE_PFCEQEDROPCNT_CEQEDROPCNT_MASK (0xFFFF << I40E_GLPE_PFCEQEDROPCNT_CEQEDROPCNT_SHIFT)
+#define I40E_GLPE_PFCQEDROPCNT(_i) (0x00131340 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLPE_PFCQEDROPCNT_MAX_INDEX 15
+#define I40E_GLPE_PFCQEDROPCNT_CQEDROPCNT_SHIFT 0
+#define I40E_GLPE_PFCQEDROPCNT_CQEDROPCNT_MASK (0xFFFF << I40E_GLPE_PFCQEDROPCNT_CQEDROPCNT_SHIFT)
+#define I40E_GLPE_RUPM_CQPPOOL 0x0000DACC /* Reset: PE_CORER */
+#define I40E_GLPE_RUPM_CQPPOOL_CQPSPADS_SHIFT 0
+#define I40E_GLPE_RUPM_CQPPOOL_CQPSPADS_MASK (0xFF << I40E_GLPE_RUPM_CQPPOOL_CQPSPADS_SHIFT)
+#define I40E_GLPE_RUPM_FLRPOOL 0x0000DAC4 /* Reset: PE_CORER */
+#define I40E_GLPE_RUPM_FLRPOOL_FLRSPADS_SHIFT 0
+#define I40E_GLPE_RUPM_FLRPOOL_FLRSPADS_MASK (0xFF << I40E_GLPE_RUPM_FLRPOOL_FLRSPADS_SHIFT)
+#define I40E_GLPE_RUPM_GCTL 0x0000DA00 /* Reset: PE_CORER */
+#define I40E_GLPE_RUPM_GCTL_ALLOFFTH_SHIFT 0
+#define I40E_GLPE_RUPM_GCTL_ALLOFFTH_MASK (0xFF << I40E_GLPE_RUPM_GCTL_ALLOFFTH_SHIFT)
+#define I40E_GLPE_RUPM_GCTL_RUPM_P0_DIS_SHIFT 26
+#define I40E_GLPE_RUPM_GCTL_RUPM_P0_DIS_MASK (0x1 << I40E_GLPE_RUPM_GCTL_RUPM_P0_DIS_SHIFT)
+#define I40E_GLPE_RUPM_GCTL_RUPM_P1_DIS_SHIFT 27
+#define I40E_GLPE_RUPM_GCTL_RUPM_P1_DIS_MASK (0x1 << I40E_GLPE_RUPM_GCTL_RUPM_P1_DIS_SHIFT)
+#define I40E_GLPE_RUPM_GCTL_RUPM_P2_DIS_SHIFT 28
+#define I40E_GLPE_RUPM_GCTL_RUPM_P2_DIS_MASK (0x1 << I40E_GLPE_RUPM_GCTL_RUPM_P2_DIS_SHIFT)
+#define I40E_GLPE_RUPM_GCTL_RUPM_P3_DIS_SHIFT 29
+#define I40E_GLPE_RUPM_GCTL_RUPM_P3_DIS_MASK (0x1 << I40E_GLPE_RUPM_GCTL_RUPM_P3_DIS_SHIFT)
+#define I40E_GLPE_RUPM_GCTL_RUPM_DIS_SHIFT 30
+#define I40E_GLPE_RUPM_GCTL_RUPM_DIS_MASK (0x1 << I40E_GLPE_RUPM_GCTL_RUPM_DIS_SHIFT)
+#define I40E_GLPE_RUPM_GCTL_SWLB_MODE_SHIFT 31
+#define I40E_GLPE_RUPM_GCTL_SWLB_MODE_MASK (0x1 << I40E_GLPE_RUPM_GCTL_SWLB_MODE_SHIFT)
+#define I40E_GLPE_RUPM_PTXPOOL 0x0000DAC8 /* Reset: PE_CORER */
+#define I40E_GLPE_RUPM_PTXPOOL_PTXSPADS_SHIFT 0
+#define I40E_GLPE_RUPM_PTXPOOL_PTXSPADS_MASK (0xFF << I40E_GLPE_RUPM_PTXPOOL_PTXSPADS_SHIFT)
+#define I40E_GLPE_RUPM_PUSHPOOL 0x0000DAC0 /* Reset: PE_CORER */
+#define I40E_GLPE_RUPM_PUSHPOOL_PUSHSPADS_SHIFT 0
+#define I40E_GLPE_RUPM_PUSHPOOL_PUSHSPADS_MASK (0xFF << I40E_GLPE_RUPM_PUSHPOOL_PUSHSPADS_SHIFT)
+#define I40E_GLPE_RUPM_TXHOST_EN 0x0000DA08 /* Reset: PE_CORER */
+#define I40E_GLPE_RUPM_TXHOST_EN_TXHOST_EN_SHIFT 0
+#define I40E_GLPE_RUPM_TXHOST_EN_TXHOST_EN_MASK (0x1 << I40E_GLPE_RUPM_TXHOST_EN_TXHOST_EN_SHIFT)
+#define I40E_GLPE_VFAEQEDROPCNT(_i) (0x00132540 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLPE_VFAEQEDROPCNT_MAX_INDEX 31
+#define I40E_GLPE_VFAEQEDROPCNT_AEQEDROPCNT_SHIFT 0
+#define I40E_GLPE_VFAEQEDROPCNT_AEQEDROPCNT_MASK (0xFFFF << I40E_GLPE_VFAEQEDROPCNT_AEQEDROPCNT_SHIFT)
+#define I40E_GLPE_VFCEQEDROPCNT(_i) (0x00132440 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLPE_VFCEQEDROPCNT_MAX_INDEX 31
+#define I40E_GLPE_VFCEQEDROPCNT_CEQEDROPCNT_SHIFT 0
+#define I40E_GLPE_VFCEQEDROPCNT_CEQEDROPCNT_MASK (0xFFFF << I40E_GLPE_VFCEQEDROPCNT_CEQEDROPCNT_SHIFT)
+#define I40E_GLPE_VFCQEDROPCNT(_i) (0x00132340 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLPE_VFCQEDROPCNT_MAX_INDEX 31
+#define I40E_GLPE_VFCQEDROPCNT_CQEDROPCNT_SHIFT 0
+#define I40E_GLPE_VFCQEDROPCNT_CQEDROPCNT_MASK (0xFFFF << I40E_GLPE_VFCQEDROPCNT_CQEDROPCNT_SHIFT)
+#define I40E_GLPE_VFFLMOBJCTRL(_i) (0x0000D400 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPE_VFFLMOBJCTRL_MAX_INDEX 31
+#define I40E_GLPE_VFFLMOBJCTRL_XMIT_BLOCKSIZE_SHIFT 0
+#define I40E_GLPE_VFFLMOBJCTRL_XMIT_BLOCKSIZE_MASK (0x7 << I40E_GLPE_VFFLMOBJCTRL_XMIT_BLOCKSIZE_SHIFT)
+#define I40E_GLPE_VFFLMOBJCTRL_Q1_BLOCKSIZE_SHIFT 8
+#define I40E_GLPE_VFFLMOBJCTRL_Q1_BLOCKSIZE_MASK (0x7 << I40E_GLPE_VFFLMOBJCTRL_Q1_BLOCKSIZE_SHIFT)
+#define I40E_GLPE_VFFLMQ1ALLOCERR(_i) (0x0000C700 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPE_VFFLMQ1ALLOCERR_MAX_INDEX 31
+#define I40E_GLPE_VFFLMQ1ALLOCERR_ERROR_COUNT_SHIFT 0
+#define I40E_GLPE_VFFLMQ1ALLOCERR_ERROR_COUNT_MASK (0xFFFF << I40E_GLPE_VFFLMQ1ALLOCERR_ERROR_COUNT_SHIFT)
+#define I40E_GLPE_VFFLMXMITALLOCERR(_i) (0x0000C600 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPE_VFFLMXMITALLOCERR_MAX_INDEX 31
+#define I40E_GLPE_VFFLMXMITALLOCERR_ERROR_COUNT_SHIFT 0
+#define I40E_GLPE_VFFLMXMITALLOCERR_ERROR_COUNT_MASK (0xFFFF << I40E_GLPE_VFFLMXMITALLOCERR_ERROR_COUNT_SHIFT)
+#define I40E_GLPE_VFUDACTRL(_i) (0x0000C000 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPE_VFUDACTRL_MAX_INDEX 31
+#define I40E_GLPE_VFUDACTRL_IPV4MCFRAGRESBP_SHIFT 0
+#define I40E_GLPE_VFUDACTRL_IPV4MCFRAGRESBP_MASK (0x1 << I40E_GLPE_VFUDACTRL_IPV4MCFRAGRESBP_SHIFT)
+#define I40E_GLPE_VFUDACTRL_IPV4UCFRAGRESBP_SHIFT 1
+#define I40E_GLPE_VFUDACTRL_IPV4UCFRAGRESBP_MASK (0x1 << I40E_GLPE_VFUDACTRL_IPV4UCFRAGRESBP_SHIFT)
+#define I40E_GLPE_VFUDACTRL_IPV6MCFRAGRESBP_SHIFT 2
+#define I40E_GLPE_VFUDACTRL_IPV6MCFRAGRESBP_MASK (0x1 << I40E_GLPE_VFUDACTRL_IPV6MCFRAGRESBP_SHIFT)
+#define I40E_GLPE_VFUDACTRL_IPV6UCFRAGRESBP_SHIFT 3
+#define I40E_GLPE_VFUDACTRL_IPV6UCFRAGRESBP_MASK (0x1 << I40E_GLPE_VFUDACTRL_IPV6UCFRAGRESBP_SHIFT)
+#define I40E_GLPE_VFUDACTRL_UDPMCFRAGRESFAIL_SHIFT 4
+#define I40E_GLPE_VFUDACTRL_UDPMCFRAGRESFAIL_MASK (0x1 << I40E_GLPE_VFUDACTRL_UDPMCFRAGRESFAIL_SHIFT)
+#define I40E_GLPE_VFUDAUCFBQPN(_i) (0x0000C100 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPE_VFUDAUCFBQPN_MAX_INDEX 31
+#define I40E_GLPE_VFUDAUCFBQPN_QPN_SHIFT 0
+#define I40E_GLPE_VFUDAUCFBQPN_QPN_MASK (0x3FFFF << I40E_GLPE_VFUDAUCFBQPN_QPN_SHIFT)
+#define I40E_GLPE_VFUDAUCFBQPN_VALID_SHIFT 31
+#define I40E_GLPE_VFUDAUCFBQPN_VALID_MASK (0x1 << I40E_GLPE_VFUDAUCFBQPN_VALID_SHIFT)
+
+#define I40E_GLPES_PFIP4RXDISCARD(_i) (0x00010600 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXDISCARD_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXDISCARD_IP4RXDISCARD_SHIFT 0
+#define I40E_GLPES_PFIP4RXDISCARD_IP4RXDISCARD_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4RXDISCARD_IP4RXDISCARD_SHIFT)
+#define I40E_GLPES_PFIP4RXFRAGSHI(_i) (0x00010804 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXFRAGSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT 0
+#define I40E_GLPES_PFIP4RXFRAGSHI_IP4RXFRAGSHI_MASK (0xFFFF << I40E_GLPES_PFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT)
+#define I40E_GLPES_PFIP4RXFRAGSLO(_i) (0x00010800 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXFRAGSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT 0
+#define I40E_GLPES_PFIP4RXFRAGSLO_IP4RXFRAGSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT)
+#define I40E_GLPES_PFIP4RXMCOCTSHI(_i) (0x00010A04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXMCOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4RXMCOCTSHI_IP4RXMCOCTSHI_MASK (0xFFFF << I40E_GLPES_PFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP4RXMCOCTSLO(_i) (0x00010A00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXMCOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4RXMCOCTSLO_IP4RXMCOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP4RXMCPKTSHI(_i) (0x00010C04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXMCPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4RXMCPKTSHI_IP4RXMCPKTSHI_MASK (0xFFFF << I40E_GLPES_PFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP4RXMCPKTSLO(_i) (0x00010C00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXMCPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4RXMCPKTSLO_IP4RXMCPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP4RXOCTSHI(_i) (0x00010204 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4RXOCTSHI_IP4RXOCTSHI_MASK (0xFFFF << I40E_GLPES_PFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP4RXOCTSLO(_i) (0x00010200 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4RXOCTSLO_IP4RXOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP4RXPKTSHI(_i) (0x00010404 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4RXPKTSHI_IP4RXPKTSHI_MASK (0xFFFF << I40E_GLPES_PFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP4RXPKTSLO(_i) (0x00010400 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4RXPKTSLO_IP4RXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP4RXTRUNC(_i) (0x00010700 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXTRUNC_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXTRUNC_IP4RXTRUNC_SHIFT 0
+#define I40E_GLPES_PFIP4RXTRUNC_IP4RXTRUNC_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4RXTRUNC_IP4RXTRUNC_SHIFT)
+#define I40E_GLPES_PFIP4TXFRAGSHI(_i) (0x00011E04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXFRAGSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT 0
+#define I40E_GLPES_PFIP4TXFRAGSHI_IP4TXFRAGSHI_MASK (0xFFFF << I40E_GLPES_PFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT)
+#define I40E_GLPES_PFIP4TXFRAGSLO(_i) (0x00011E00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXFRAGSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT 0
+#define I40E_GLPES_PFIP4TXFRAGSLO_IP4TXFRAGSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT)
+#define I40E_GLPES_PFIP4TXMCOCTSHI(_i) (0x00012004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXMCOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4TXMCOCTSHI_IP4TXMCOCTSHI_MASK (0xFFFF << I40E_GLPES_PFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP4TXMCOCTSLO(_i) (0x00012000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXMCOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4TXMCOCTSLO_IP4TXMCOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP4TXMCPKTSHI(_i) (0x00012204 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXMCPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4TXMCPKTSHI_IP4TXMCPKTSHI_MASK (0xFFFF << I40E_GLPES_PFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP4TXMCPKTSLO(_i) (0x00012200 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXMCPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4TXMCPKTSLO_IP4TXMCPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP4TXNOROUTE(_i) (0x00012E00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXNOROUTE_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT 0
+#define I40E_GLPES_PFIP4TXNOROUTE_IP4TXNOROUTE_MASK (0xFFFFFF << I40E_GLPES_PFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT)
+#define I40E_GLPES_PFIP4TXOCTSHI(_i) (0x00011A04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4TXOCTSHI_IP4TXOCTSHI_MASK (0xFFFF << I40E_GLPES_PFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP4TXOCTSLO(_i) (0x00011A00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4TXOCTSLO_IP4TXOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP4TXPKTSHI(_i) (0x00011C04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4TXPKTSHI_IP4TXPKTSHI_MASK (0xFFFF << I40E_GLPES_PFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP4TXPKTSLO(_i) (0x00011C00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4TXPKTSLO_IP4TXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP6RXDISCARD(_i) (0x00011200 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXDISCARD_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXDISCARD_IP6RXDISCARD_SHIFT 0
+#define I40E_GLPES_PFIP6RXDISCARD_IP6RXDISCARD_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6RXDISCARD_IP6RXDISCARD_SHIFT)
+#define I40E_GLPES_PFIP6RXFRAGSHI(_i) (0x00011404 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXFRAGSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT 0
+#define I40E_GLPES_PFIP6RXFRAGSHI_IP6RXFRAGSHI_MASK (0xFFFF << I40E_GLPES_PFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT)
+#define I40E_GLPES_PFIP6RXFRAGSLO(_i) (0x00011400 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXFRAGSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT 0
+#define I40E_GLPES_PFIP6RXFRAGSLO_IP6RXFRAGSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT)
+#define I40E_GLPES_PFIP6RXMCOCTSHI(_i) (0x00011604 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXMCOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6RXMCOCTSHI_IP6RXMCOCTSHI_MASK (0xFFFF << I40E_GLPES_PFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP6RXMCOCTSLO(_i) (0x00011600 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXMCOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6RXMCOCTSLO_IP6RXMCOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP6RXMCPKTSHI(_i) (0x00011804 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXMCPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6RXMCPKTSHI_IP6RXMCPKTSHI_MASK (0xFFFF << I40E_GLPES_PFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP6RXMCPKTSLO(_i) (0x00011800 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXMCPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6RXMCPKTSLO_IP6RXMCPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP6RXOCTSHI(_i) (0x00010E04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6RXOCTSHI_IP6RXOCTSHI_MASK (0xFFFF << I40E_GLPES_PFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP6RXOCTSLO(_i) (0x00010E00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6RXOCTSLO_IP6RXOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP6RXPKTSHI(_i) (0x00011004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6RXPKTSHI_IP6RXPKTSHI_MASK (0xFFFF << I40E_GLPES_PFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP6RXPKTSLO(_i) (0x00011000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6RXPKTSLO_IP6RXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP6RXTRUNC(_i) (0x00011300 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXTRUNC_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXTRUNC_IP6RXTRUNC_SHIFT 0
+#define I40E_GLPES_PFIP6RXTRUNC_IP6RXTRUNC_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6RXTRUNC_IP6RXTRUNC_SHIFT)
+#define I40E_GLPES_PFIP6TXFRAGSHI(_i) (0x00012804 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXFRAGSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT 0
+#define I40E_GLPES_PFIP6TXFRAGSHI_IP6TXFRAGSHI_MASK (0xFFFF << I40E_GLPES_PFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT)
+#define I40E_GLPES_PFIP6TXFRAGSLO(_i) (0x00012800 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXFRAGSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT 0
+#define I40E_GLPES_PFIP6TXFRAGSLO_IP6TXFRAGSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT)
+#define I40E_GLPES_PFIP6TXMCOCTSHI(_i) (0x00012A04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXMCOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6TXMCOCTSHI_IP6TXMCOCTSHI_MASK (0xFFFF << I40E_GLPES_PFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP6TXMCOCTSLO(_i) (0x00012A00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXMCOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6TXMCOCTSLO_IP6TXMCOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP6TXMCPKTSHI(_i) (0x00012C04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXMCPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6TXMCPKTSHI_IP6TXMCPKTSHI_MASK (0xFFFF << I40E_GLPES_PFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP6TXMCPKTSLO(_i) (0x00012C00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXMCPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6TXMCPKTSLO_IP6TXMCPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP6TXNOROUTE(_i) (0x00012F00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXNOROUTE_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT 0
+#define I40E_GLPES_PFIP6TXNOROUTE_IP6TXNOROUTE_MASK (0xFFFFFF << I40E_GLPES_PFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT)
+#define I40E_GLPES_PFIP6TXOCTSHI(_i) (0x00012404 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6TXOCTSHI_IP6TXOCTSHI_MASK (0xFFFF << I40E_GLPES_PFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP6TXOCTSLO(_i) (0x00012400 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6TXOCTSLO_IP6TXOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP6TXPKTSHI(_i) (0x00012604 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6TXPKTSHI_IP6TXPKTSHI_MASK (0xFFFF << I40E_GLPES_PFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP6TXPKTSLO(_i) (0x00012600 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6TXPKTSLO_IP6TXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT)
+#define I40E_GLPES_PFRDMARXRDSHI(_i) (0x00013E04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMARXRDSHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMARXRDSHI_RDMARXRDSHI_SHIFT 0
+#define I40E_GLPES_PFRDMARXRDSHI_RDMARXRDSHI_MASK (0xFFFF << I40E_GLPES_PFRDMARXRDSHI_RDMARXRDSHI_SHIFT)
+#define I40E_GLPES_PFRDMARXRDSLO(_i) (0x00013E00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMARXRDSLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMARXRDSLO_RDMARXRDSLO_SHIFT 0
+#define I40E_GLPES_PFRDMARXRDSLO_RDMARXRDSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMARXRDSLO_RDMARXRDSLO_SHIFT)
+#define I40E_GLPES_PFRDMARXSNDSHI(_i) (0x00014004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMARXSNDSHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT 0
+#define I40E_GLPES_PFRDMARXSNDSHI_RDMARXSNDSHI_MASK (0xFFFF << I40E_GLPES_PFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT)
+#define I40E_GLPES_PFRDMARXSNDSLO(_i) (0x00014000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMARXSNDSLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT 0
+#define I40E_GLPES_PFRDMARXSNDSLO_RDMARXSNDSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT)
+#define I40E_GLPES_PFRDMARXWRSHI(_i) (0x00013C04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMARXWRSHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMARXWRSHI_RDMARXWRSHI_SHIFT 0
+#define I40E_GLPES_PFRDMARXWRSHI_RDMARXWRSHI_MASK (0xFFFF << I40E_GLPES_PFRDMARXWRSHI_RDMARXWRSHI_SHIFT)
+#define I40E_GLPES_PFRDMARXWRSLO(_i) (0x00013C00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMARXWRSLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMARXWRSLO_RDMARXWRSLO_SHIFT 0
+#define I40E_GLPES_PFRDMARXWRSLO_RDMARXWRSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMARXWRSLO_RDMARXWRSLO_SHIFT)
+#define I40E_GLPES_PFRDMATXRDSHI(_i) (0x00014404 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMATXRDSHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMATXRDSHI_RDMARXRDSHI_SHIFT 0
+#define I40E_GLPES_PFRDMATXRDSHI_RDMARXRDSHI_MASK (0xFFFF << I40E_GLPES_PFRDMATXRDSHI_RDMARXRDSHI_SHIFT)
+#define I40E_GLPES_PFRDMATXRDSLO(_i) (0x00014400 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMATXRDSLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMATXRDSLO_RDMARXRDSLO_SHIFT 0
+#define I40E_GLPES_PFRDMATXRDSLO_RDMARXRDSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMATXRDSLO_RDMARXRDSLO_SHIFT)
+#define I40E_GLPES_PFRDMATXSNDSHI(_i) (0x00014604 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMATXSNDSHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT 0
+#define I40E_GLPES_PFRDMATXSNDSHI_RDMARXSNDSHI_MASK (0xFFFF << I40E_GLPES_PFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT)
+#define I40E_GLPES_PFRDMATXSNDSLO(_i) (0x00014600 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMATXSNDSLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT 0
+#define I40E_GLPES_PFRDMATXSNDSLO_RDMARXSNDSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT)
+#define I40E_GLPES_PFRDMATXWRSHI(_i) (0x00014204 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMATXWRSHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMATXWRSHI_RDMARXWRSHI_SHIFT 0
+#define I40E_GLPES_PFRDMATXWRSHI_RDMARXWRSHI_MASK (0xFFFF << I40E_GLPES_PFRDMATXWRSHI_RDMARXWRSHI_SHIFT)
+#define I40E_GLPES_PFRDMATXWRSLO(_i) (0x00014200 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMATXWRSLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMATXWRSLO_RDMARXWRSLO_SHIFT 0
+#define I40E_GLPES_PFRDMATXWRSLO_RDMARXWRSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMATXWRSLO_RDMARXWRSLO_SHIFT)
+#define I40E_GLPES_PFRDMAVBNDHI(_i) (0x00014804 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMAVBNDHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMAVBNDHI_RDMAVBNDHI_SHIFT 0
+#define I40E_GLPES_PFRDMAVBNDHI_RDMAVBNDHI_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMAVBNDHI_RDMAVBNDHI_SHIFT)
+#define I40E_GLPES_PFRDMAVBNDLO(_i) (0x00014800 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMAVBNDLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMAVBNDLO_RDMAVBNDLO_SHIFT 0
+#define I40E_GLPES_PFRDMAVBNDLO_RDMAVBNDLO_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMAVBNDLO_RDMAVBNDLO_SHIFT)
+#define I40E_GLPES_PFRDMAVINVHI(_i) (0x00014A04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMAVINVHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMAVINVHI_RDMAVINVHI_SHIFT 0
+#define I40E_GLPES_PFRDMAVINVHI_RDMAVINVHI_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMAVINVHI_RDMAVINVHI_SHIFT)
+#define I40E_GLPES_PFRDMAVINVLO(_i) (0x00014A00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMAVINVLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMAVINVLO_RDMAVINVLO_SHIFT 0
+#define I40E_GLPES_PFRDMAVINVLO_RDMAVINVLO_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMAVINVLO_RDMAVINVLO_SHIFT)
+#define I40E_GLPES_PFRXVLANERR(_i) (0x00010000 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRXVLANERR_MAX_INDEX 15
+#define I40E_GLPES_PFRXVLANERR_RXVLANERR_SHIFT 0
+#define I40E_GLPES_PFRXVLANERR_RXVLANERR_MASK (0xFFFFFF << I40E_GLPES_PFRXVLANERR_RXVLANERR_SHIFT)
+#define I40E_GLPES_PFTCPRTXSEG(_i) (0x00013600 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFTCPRTXSEG_MAX_INDEX 15
+#define I40E_GLPES_PFTCPRTXSEG_TCPRTXSEG_SHIFT 0
+#define I40E_GLPES_PFTCPRTXSEG_TCPRTXSEG_MASK (0xFFFFFFFF << I40E_GLPES_PFTCPRTXSEG_TCPRTXSEG_SHIFT)
+#define I40E_GLPES_PFTCPRXOPTERR(_i) (0x00013200 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFTCPRXOPTERR_MAX_INDEX 15
+#define I40E_GLPES_PFTCPRXOPTERR_TCPRXOPTERR_SHIFT 0
+#define I40E_GLPES_PFTCPRXOPTERR_TCPRXOPTERR_MASK (0xFFFFFF << I40E_GLPES_PFTCPRXOPTERR_TCPRXOPTERR_SHIFT)
+#define I40E_GLPES_PFTCPRXPROTOERR(_i) (0x00013300 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFTCPRXPROTOERR_MAX_INDEX 15
+#define I40E_GLPES_PFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT 0
+#define I40E_GLPES_PFTCPRXPROTOERR_TCPRXPROTOERR_MASK (0xFFFFFF << I40E_GLPES_PFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT)
+#define I40E_GLPES_PFTCPRXSEGSHI(_i) (0x00013004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFTCPRXSEGSHI_MAX_INDEX 15
+#define I40E_GLPES_PFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT 0
+#define I40E_GLPES_PFTCPRXSEGSHI_TCPRXSEGSHI_MASK (0xFFFF << I40E_GLPES_PFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT)
+#define I40E_GLPES_PFTCPRXSEGSLO(_i) (0x00013000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFTCPRXSEGSLO_MAX_INDEX 15
+#define I40E_GLPES_PFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT 0
+#define I40E_GLPES_PFTCPRXSEGSLO_TCPRXSEGSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT)
+#define I40E_GLPES_PFTCPTXSEGHI(_i) (0x00013404 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFTCPTXSEGHI_MAX_INDEX 15
+#define I40E_GLPES_PFTCPTXSEGHI_TCPTXSEGHI_SHIFT 0
+#define I40E_GLPES_PFTCPTXSEGHI_TCPTXSEGHI_MASK (0xFFFF << I40E_GLPES_PFTCPTXSEGHI_TCPTXSEGHI_SHIFT)
+#define I40E_GLPES_PFTCPTXSEGLO(_i) (0x00013400 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFTCPTXSEGLO_MAX_INDEX 15
+#define I40E_GLPES_PFTCPTXSEGLO_TCPTXSEGLO_SHIFT 0
+#define I40E_GLPES_PFTCPTXSEGLO_TCPTXSEGLO_MASK (0xFFFFFFFF << I40E_GLPES_PFTCPTXSEGLO_TCPTXSEGLO_SHIFT)
+#define I40E_GLPES_PFUDPRXPKTSHI(_i) (0x00013804 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFUDPRXPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT 0
+#define I40E_GLPES_PFUDPRXPKTSHI_UDPRXPKTSHI_MASK (0xFFFF << I40E_GLPES_PFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT)
+#define I40E_GLPES_PFUDPRXPKTSLO(_i) (0x00013800 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFUDPRXPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT 0
+#define I40E_GLPES_PFUDPRXPKTSLO_UDPRXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT)
+#define I40E_GLPES_PFUDPTXPKTSHI(_i) (0x00013A04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFUDPTXPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT 0
+#define I40E_GLPES_PFUDPTXPKTSHI_UDPTXPKTSHI_MASK (0xFFFF << I40E_GLPES_PFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT)
+#define I40E_GLPES_PFUDPTXPKTSLO(_i) (0x00013A00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFUDPTXPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT 0
+#define I40E_GLPES_PFUDPTXPKTSLO_UDPTXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT)
+#define I40E_GLPES_RDMARXMULTFPDUSHI 0x0001E014 /* Reset: PE_CORER */
+#define I40E_GLPES_RDMARXMULTFPDUSHI_RDMARXMULTFPDUSHI_SHIFT 0
+#define I40E_GLPES_RDMARXMULTFPDUSHI_RDMARXMULTFPDUSHI_MASK (0xFFFFFF << I40E_GLPES_RDMARXMULTFPDUSHI_RDMARXMULTFPDUSHI_SHIFT)
+#define I40E_GLPES_RDMARXMULTFPDUSLO 0x0001E010 /* Reset: PE_CORER */
+#define I40E_GLPES_RDMARXMULTFPDUSLO_RDMARXMULTFPDUSLO_SHIFT 0
+#define I40E_GLPES_RDMARXMULTFPDUSLO_RDMARXMULTFPDUSLO_MASK (0xFFFFFFFF << I40E_GLPES_RDMARXMULTFPDUSLO_RDMARXMULTFPDUSLO_SHIFT)
+#define I40E_GLPES_RDMARXOOODDPHI 0x0001E01C /* Reset: PE_CORER */
+#define I40E_GLPES_RDMARXOOODDPHI_RDMARXOOODDPHI_SHIFT 0
+#define I40E_GLPES_RDMARXOOODDPHI_RDMARXOOODDPHI_MASK (0xFFFFFF << I40E_GLPES_RDMARXOOODDPHI_RDMARXOOODDPHI_SHIFT)
+#define I40E_GLPES_RDMARXOOODDPLO 0x0001E018 /* Reset: PE_CORER */
+#define I40E_GLPES_RDMARXOOODDPLO_RDMARXOOODDPLO_SHIFT 0
+#define I40E_GLPES_RDMARXOOODDPLO_RDMARXOOODDPLO_MASK (0xFFFFFFFF << I40E_GLPES_RDMARXOOODDPLO_RDMARXOOODDPLO_SHIFT)
+#define I40E_GLPES_RDMARXOOONOMARK 0x0001E004 /* Reset: PE_CORER */
+#define I40E_GLPES_RDMARXOOONOMARK_RDMAOOONOMARK_SHIFT 0
+#define I40E_GLPES_RDMARXOOONOMARK_RDMAOOONOMARK_MASK (0xFFFFFFFF << I40E_GLPES_RDMARXOOONOMARK_RDMAOOONOMARK_SHIFT)
+#define I40E_GLPES_RDMARXUNALIGN 0x0001E000 /* Reset: PE_CORER */
+#define I40E_GLPES_RDMARXUNALIGN_RDMRXAUNALIGN_SHIFT 0
+#define I40E_GLPES_RDMARXUNALIGN_RDMRXAUNALIGN_MASK (0xFFFFFFFF << I40E_GLPES_RDMARXUNALIGN_RDMRXAUNALIGN_SHIFT)
+#define I40E_GLPES_TCPRXFOURHOLEHI 0x0001E044 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPRXFOURHOLEHI_TCPRXFOURHOLEHI_SHIFT 0
+#define I40E_GLPES_TCPRXFOURHOLEHI_TCPRXFOURHOLEHI_MASK (0xFFFFFF << I40E_GLPES_TCPRXFOURHOLEHI_TCPRXFOURHOLEHI_SHIFT)
+#define I40E_GLPES_TCPRXFOURHOLELO 0x0001E040 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPRXFOURHOLELO_TCPRXFOURHOLELO_SHIFT 0
+#define I40E_GLPES_TCPRXFOURHOLELO_TCPRXFOURHOLELO_MASK (0xFFFFFFFF << I40E_GLPES_TCPRXFOURHOLELO_TCPRXFOURHOLELO_SHIFT)
+#define I40E_GLPES_TCPRXONEHOLEHI 0x0001E02C /* Reset: PE_CORER */
+#define I40E_GLPES_TCPRXONEHOLEHI_TCPRXONEHOLEHI_SHIFT 0
+#define I40E_GLPES_TCPRXONEHOLEHI_TCPRXONEHOLEHI_MASK (0xFFFFFF << I40E_GLPES_TCPRXONEHOLEHI_TCPRXONEHOLEHI_SHIFT)
+#define I40E_GLPES_TCPRXONEHOLELO 0x0001E028 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPRXONEHOLELO_TCPRXONEHOLELO_SHIFT 0
+#define I40E_GLPES_TCPRXONEHOLELO_TCPRXONEHOLELO_MASK (0xFFFFFFFF << I40E_GLPES_TCPRXONEHOLELO_TCPRXONEHOLELO_SHIFT)
+#define I40E_GLPES_TCPRXPUREACKHI 0x0001E024 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPRXPUREACKHI_TCPRXPUREACKSHI_SHIFT 0
+#define I40E_GLPES_TCPRXPUREACKHI_TCPRXPUREACKSHI_MASK (0xFFFFFF << I40E_GLPES_TCPRXPUREACKHI_TCPRXPUREACKSHI_SHIFT)
+#define I40E_GLPES_TCPRXPUREACKSLO 0x0001E020 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPRXPUREACKSLO_TCPRXPUREACKLO_SHIFT 0
+#define I40E_GLPES_TCPRXPUREACKSLO_TCPRXPUREACKLO_MASK (0xFFFFFFFF << I40E_GLPES_TCPRXPUREACKSLO_TCPRXPUREACKLO_SHIFT)
+#define I40E_GLPES_TCPRXTHREEHOLEHI 0x0001E03C /* Reset: PE_CORER */
+#define I40E_GLPES_TCPRXTHREEHOLEHI_TCPRXTHREEHOLEHI_SHIFT 0
+#define I40E_GLPES_TCPRXTHREEHOLEHI_TCPRXTHREEHOLEHI_MASK (0xFFFFFF << I40E_GLPES_TCPRXTHREEHOLEHI_TCPRXTHREEHOLEHI_SHIFT)
+#define I40E_GLPES_TCPRXTHREEHOLELO 0x0001E038 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPRXTHREEHOLELO_TCPRXTHREEHOLELO_SHIFT 0
+#define I40E_GLPES_TCPRXTHREEHOLELO_TCPRXTHREEHOLELO_MASK (0xFFFFFFFF << I40E_GLPES_TCPRXTHREEHOLELO_TCPRXTHREEHOLELO_SHIFT)
+#define I40E_GLPES_TCPRXTWOHOLEHI 0x0001E034 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPRXTWOHOLEHI_TCPRXTWOHOLEHI_SHIFT 0
+#define I40E_GLPES_TCPRXTWOHOLEHI_TCPRXTWOHOLEHI_MASK (0xFFFFFF << I40E_GLPES_TCPRXTWOHOLEHI_TCPRXTWOHOLEHI_SHIFT)
+#define I40E_GLPES_TCPRXTWOHOLELO 0x0001E030 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPRXTWOHOLELO_TCPRXTWOHOLELO_SHIFT 0
+#define I40E_GLPES_TCPRXTWOHOLELO_TCPRXTWOHOLELO_MASK (0xFFFFFFFF << I40E_GLPES_TCPRXTWOHOLELO_TCPRXTWOHOLELO_SHIFT)
+#define I40E_GLPES_TCPTXRETRANSFASTHI 0x0001E04C /* Reset: PE_CORER */
+#define I40E_GLPES_TCPTXRETRANSFASTHI_TCPTXRETRANSFASTHI_SHIFT 0
+#define I40E_GLPES_TCPTXRETRANSFASTHI_TCPTXRETRANSFASTHI_MASK (0xFFFFFF << I40E_GLPES_TCPTXRETRANSFASTHI_TCPTXRETRANSFASTHI_SHIFT)
+#define I40E_GLPES_TCPTXRETRANSFASTLO 0x0001E048 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPTXRETRANSFASTLO_TCPTXRETRANSFASTLO_SHIFT 0
+#define I40E_GLPES_TCPTXRETRANSFASTLO_TCPTXRETRANSFASTLO_MASK (0xFFFFFFFF << I40E_GLPES_TCPTXRETRANSFASTLO_TCPTXRETRANSFASTLO_SHIFT)
+#define I40E_GLPES_TCPTXTOUTSFASTHI 0x0001E054 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPTXTOUTSFASTHI_TCPTXTOUTSFASTHI_SHIFT 0
+#define I40E_GLPES_TCPTXTOUTSFASTHI_TCPTXTOUTSFASTHI_MASK (0xFFFFFF << I40E_GLPES_TCPTXTOUTSFASTHI_TCPTXTOUTSFASTHI_SHIFT)
+#define I40E_GLPES_TCPTXTOUTSFASTLO 0x0001E050 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPTXTOUTSFASTLO_TCPTXTOUTSFASTLO_SHIFT 0
+#define I40E_GLPES_TCPTXTOUTSFASTLO_TCPTXTOUTSFASTLO_MASK (0xFFFFFFFF << I40E_GLPES_TCPTXTOUTSFASTLO_TCPTXTOUTSFASTLO_SHIFT)
+#define I40E_GLPES_TCPTXTOUTSHI 0x0001E05C /* Reset: PE_CORER */
+#define I40E_GLPES_TCPTXTOUTSHI_TCPTXTOUTSHI_SHIFT 0
+#define I40E_GLPES_TCPTXTOUTSHI_TCPTXTOUTSHI_MASK (0xFFFFFF << I40E_GLPES_TCPTXTOUTSHI_TCPTXTOUTSHI_SHIFT)
+#define I40E_GLPES_TCPTXTOUTSLO 0x0001E058 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPTXTOUTSLO_TCPTXTOUTSLO_SHIFT 0
+#define I40E_GLPES_TCPTXTOUTSLO_TCPTXTOUTSLO_MASK (0xFFFFFFFF << I40E_GLPES_TCPTXTOUTSLO_TCPTXTOUTSLO_SHIFT)
+#define I40E_GLPES_VFIP4RXDISCARD(_i) (0x00018600 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXDISCARD_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXDISCARD_IP4RXDISCARD_SHIFT 0
+#define I40E_GLPES_VFIP4RXDISCARD_IP4RXDISCARD_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4RXDISCARD_IP4RXDISCARD_SHIFT)
+#define I40E_GLPES_VFIP4RXFRAGSHI(_i) (0x00018804 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXFRAGSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT 0
+#define I40E_GLPES_VFIP4RXFRAGSHI_IP4RXFRAGSHI_MASK (0xFFFF << I40E_GLPES_VFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT)
+#define I40E_GLPES_VFIP4RXFRAGSLO(_i) (0x00018800 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXFRAGSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT 0
+#define I40E_GLPES_VFIP4RXFRAGSLO_IP4RXFRAGSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT)
+#define I40E_GLPES_VFIP4RXMCOCTSHI(_i) (0x00018A04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXMCOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4RXMCOCTSHI_IP4RXMCOCTSHI_MASK (0xFFFF << I40E_GLPES_VFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP4RXMCOCTSLO(_i) (0x00018A00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXMCOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4RXMCOCTSLO_IP4RXMCOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP4RXMCPKTSHI(_i) (0x00018C04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXMCPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4RXMCPKTSHI_IP4RXMCPKTSHI_MASK (0xFFFF << I40E_GLPES_VFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP4RXMCPKTSLO(_i) (0x00018C00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXMCPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4RXMCPKTSLO_IP4RXMCPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP4RXOCTSHI(_i) (0x00018204 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4RXOCTSHI_IP4RXOCTSHI_MASK (0xFFFF << I40E_GLPES_VFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP4RXOCTSLO(_i) (0x00018200 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4RXOCTSLO_IP4RXOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP4RXPKTSHI(_i) (0x00018404 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4RXPKTSHI_IP4RXPKTSHI_MASK (0xFFFF << I40E_GLPES_VFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP4RXPKTSLO(_i) (0x00018400 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4RXPKTSLO_IP4RXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP4RXTRUNC(_i) (0x00018700 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXTRUNC_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXTRUNC_IP4RXTRUNC_SHIFT 0
+#define I40E_GLPES_VFIP4RXTRUNC_IP4RXTRUNC_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4RXTRUNC_IP4RXTRUNC_SHIFT)
+#define I40E_GLPES_VFIP4TXFRAGSHI(_i) (0x00019E04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXFRAGSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT 0
+#define I40E_GLPES_VFIP4TXFRAGSHI_IP4TXFRAGSHI_MASK (0xFFFF << I40E_GLPES_VFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT)
+#define I40E_GLPES_VFIP4TXFRAGSLO(_i) (0x00019E00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXFRAGSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT 0
+#define I40E_GLPES_VFIP4TXFRAGSLO_IP4TXFRAGSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT)
+#define I40E_GLPES_VFIP4TXMCOCTSHI(_i) (0x0001A004 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXMCOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4TXMCOCTSHI_IP4TXMCOCTSHI_MASK (0xFFFF << I40E_GLPES_VFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP4TXMCOCTSLO(_i) (0x0001A000 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXMCOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4TXMCOCTSLO_IP4TXMCOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP4TXMCPKTSHI(_i) (0x0001A204 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXMCPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4TXMCPKTSHI_IP4TXMCPKTSHI_MASK (0xFFFF << I40E_GLPES_VFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP4TXMCPKTSLO(_i) (0x0001A200 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXMCPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4TXMCPKTSLO_IP4TXMCPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP4TXNOROUTE(_i) (0x0001AE00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXNOROUTE_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT 0
+#define I40E_GLPES_VFIP4TXNOROUTE_IP4TXNOROUTE_MASK (0xFFFFFF << I40E_GLPES_VFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT)
+#define I40E_GLPES_VFIP4TXOCTSHI(_i) (0x00019A04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4TXOCTSHI_IP4TXOCTSHI_MASK (0xFFFF << I40E_GLPES_VFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP4TXOCTSLO(_i) (0x00019A00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4TXOCTSLO_IP4TXOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP4TXPKTSHI(_i) (0x00019C04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4TXPKTSHI_IP4TXPKTSHI_MASK (0xFFFF << I40E_GLPES_VFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP4TXPKTSLO(_i) (0x00019C00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4TXPKTSLO_IP4TXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP6RXDISCARD(_i) (0x00019200 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXDISCARD_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXDISCARD_IP6RXDISCARD_SHIFT 0
+#define I40E_GLPES_VFIP6RXDISCARD_IP6RXDISCARD_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6RXDISCARD_IP6RXDISCARD_SHIFT)
+#define I40E_GLPES_VFIP6RXFRAGSHI(_i) (0x00019404 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXFRAGSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT 0
+#define I40E_GLPES_VFIP6RXFRAGSHI_IP6RXFRAGSHI_MASK (0xFFFF << I40E_GLPES_VFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT)
+#define I40E_GLPES_VFIP6RXFRAGSLO(_i) (0x00019400 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXFRAGSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT 0
+#define I40E_GLPES_VFIP6RXFRAGSLO_IP6RXFRAGSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT)
+#define I40E_GLPES_VFIP6RXMCOCTSHI(_i) (0x00019604 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXMCOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6RXMCOCTSHI_IP6RXMCOCTSHI_MASK (0xFFFF << I40E_GLPES_VFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP6RXMCOCTSLO(_i) (0x00019600 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXMCOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6RXMCOCTSLO_IP6RXMCOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP6RXMCPKTSHI(_i) (0x00019804 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXMCPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6RXMCPKTSHI_IP6RXMCPKTSHI_MASK (0xFFFF << I40E_GLPES_VFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP6RXMCPKTSLO(_i) (0x00019800 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXMCPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6RXMCPKTSLO_IP6RXMCPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP6RXOCTSHI(_i) (0x00018E04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6RXOCTSHI_IP6RXOCTSHI_MASK (0xFFFF << I40E_GLPES_VFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP6RXOCTSLO(_i) (0x00018E00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6RXOCTSLO_IP6RXOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP6RXPKTSHI(_i) (0x00019004 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6RXPKTSHI_IP6RXPKTSHI_MASK (0xFFFF << I40E_GLPES_VFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP6RXPKTSLO(_i) (0x00019000 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6RXPKTSLO_IP6RXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP6RXTRUNC(_i) (0x00019300 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXTRUNC_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXTRUNC_IP6RXTRUNC_SHIFT 0
+#define I40E_GLPES_VFIP6RXTRUNC_IP6RXTRUNC_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6RXTRUNC_IP6RXTRUNC_SHIFT)
+#define I40E_GLPES_VFIP6TXFRAGSHI(_i) (0x0001A804 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXFRAGSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT 0
+#define I40E_GLPES_VFIP6TXFRAGSHI_IP6TXFRAGSHI_MASK (0xFFFF << I40E_GLPES_VFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT)
+#define I40E_GLPES_VFIP6TXFRAGSLO(_i) (0x0001A800 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXFRAGSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT 0
+#define I40E_GLPES_VFIP6TXFRAGSLO_IP6TXFRAGSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT)
+#define I40E_GLPES_VFIP6TXMCOCTSHI(_i) (0x0001AA04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXMCOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6TXMCOCTSHI_IP6TXMCOCTSHI_MASK (0xFFFF << I40E_GLPES_VFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP6TXMCOCTSLO(_i) (0x0001AA00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXMCOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6TXMCOCTSLO_IP6TXMCOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP6TXMCPKTSHI(_i) (0x0001AC04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXMCPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6TXMCPKTSHI_IP6TXMCPKTSHI_MASK (0xFFFF << I40E_GLPES_VFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP6TXMCPKTSLO(_i) (0x0001AC00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXMCPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6TXMCPKTSLO_IP6TXMCPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP6TXNOROUTE(_i) (0x0001AF00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXNOROUTE_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT 0
+#define I40E_GLPES_VFIP6TXNOROUTE_IP6TXNOROUTE_MASK (0xFFFFFF << I40E_GLPES_VFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT)
+#define I40E_GLPES_VFIP6TXOCTSHI(_i) (0x0001A404 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6TXOCTSHI_IP6TXOCTSHI_MASK (0xFFFF << I40E_GLPES_VFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP6TXOCTSLO(_i) (0x0001A400 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6TXOCTSLO_IP6TXOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP6TXPKTSHI(_i) (0x0001A604 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6TXPKTSHI_IP6TXPKTSHI_MASK (0xFFFF << I40E_GLPES_VFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP6TXPKTSLO(_i) (0x0001A600 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6TXPKTSLO_IP6TXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT)
+#define I40E_GLPES_VFRDMARXRDSHI(_i) (0x0001BE04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMARXRDSHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMARXRDSHI_RDMARXRDSHI_SHIFT 0
+#define I40E_GLPES_VFRDMARXRDSHI_RDMARXRDSHI_MASK (0xFFFF << I40E_GLPES_VFRDMARXRDSHI_RDMARXRDSHI_SHIFT)
+#define I40E_GLPES_VFRDMARXRDSLO(_i) (0x0001BE00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMARXRDSLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMARXRDSLO_RDMARXRDSLO_SHIFT 0
+#define I40E_GLPES_VFRDMARXRDSLO_RDMARXRDSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMARXRDSLO_RDMARXRDSLO_SHIFT)
+#define I40E_GLPES_VFRDMARXSNDSHI(_i) (0x0001C004 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMARXSNDSHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT 0
+#define I40E_GLPES_VFRDMARXSNDSHI_RDMARXSNDSHI_MASK (0xFFFF << I40E_GLPES_VFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT)
+#define I40E_GLPES_VFRDMARXSNDSLO(_i) (0x0001C000 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMARXSNDSLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT 0
+#define I40E_GLPES_VFRDMARXSNDSLO_RDMARXSNDSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT)
+#define I40E_GLPES_VFRDMARXWRSHI(_i) (0x0001BC04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMARXWRSHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMARXWRSHI_RDMARXWRSHI_SHIFT 0
+#define I40E_GLPES_VFRDMARXWRSHI_RDMARXWRSHI_MASK (0xFFFF << I40E_GLPES_VFRDMARXWRSHI_RDMARXWRSHI_SHIFT)
+#define I40E_GLPES_VFRDMARXWRSLO(_i) (0x0001BC00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMARXWRSLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMARXWRSLO_RDMARXWRSLO_SHIFT 0
+#define I40E_GLPES_VFRDMARXWRSLO_RDMARXWRSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMARXWRSLO_RDMARXWRSLO_SHIFT)
+#define I40E_GLPES_VFRDMATXRDSHI(_i) (0x0001C404 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMATXRDSHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMATXRDSHI_RDMARXRDSHI_SHIFT 0
+#define I40E_GLPES_VFRDMATXRDSHI_RDMARXRDSHI_MASK (0xFFFF << I40E_GLPES_VFRDMATXRDSHI_RDMARXRDSHI_SHIFT)
+#define I40E_GLPES_VFRDMATXRDSLO(_i) (0x0001C400 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMATXRDSLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMATXRDSLO_RDMARXRDSLO_SHIFT 0
+#define I40E_GLPES_VFRDMATXRDSLO_RDMARXRDSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMATXRDSLO_RDMARXRDSLO_SHIFT)
+#define I40E_GLPES_VFRDMATXSNDSHI(_i) (0x0001C604 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMATXSNDSHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT 0
+#define I40E_GLPES_VFRDMATXSNDSHI_RDMARXSNDSHI_MASK (0xFFFF << I40E_GLPES_VFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT)
+#define I40E_GLPES_VFRDMATXSNDSLO(_i) (0x0001C600 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMATXSNDSLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT 0
+#define I40E_GLPES_VFRDMATXSNDSLO_RDMARXSNDSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT)
+#define I40E_GLPES_VFRDMATXWRSHI(_i) (0x0001C204 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMATXWRSHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMATXWRSHI_RDMARXWRSHI_SHIFT 0
+#define I40E_GLPES_VFRDMATXWRSHI_RDMARXWRSHI_MASK (0xFFFF << I40E_GLPES_VFRDMATXWRSHI_RDMARXWRSHI_SHIFT)
+#define I40E_GLPES_VFRDMATXWRSLO(_i) (0x0001C200 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMATXWRSLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMATXWRSLO_RDMARXWRSLO_SHIFT 0
+#define I40E_GLPES_VFRDMATXWRSLO_RDMARXWRSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMATXWRSLO_RDMARXWRSLO_SHIFT)
+#define I40E_GLPES_VFRDMAVBNDHI(_i) (0x0001C804 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMAVBNDHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMAVBNDHI_RDMAVBNDHI_SHIFT 0
+#define I40E_GLPES_VFRDMAVBNDHI_RDMAVBNDHI_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMAVBNDHI_RDMAVBNDHI_SHIFT)
+#define I40E_GLPES_VFRDMAVBNDLO(_i) (0x0001C800 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMAVBNDLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMAVBNDLO_RDMAVBNDLO_SHIFT 0
+#define I40E_GLPES_VFRDMAVBNDLO_RDMAVBNDLO_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMAVBNDLO_RDMAVBNDLO_SHIFT)
+#define I40E_GLPES_VFRDMAVINVHI(_i) (0x0001CA04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMAVINVHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMAVINVHI_RDMAVINVHI_SHIFT 0
+#define I40E_GLPES_VFRDMAVINVHI_RDMAVINVHI_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMAVINVHI_RDMAVINVHI_SHIFT)
+#define I40E_GLPES_VFRDMAVINVLO(_i) (0x0001CA00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMAVINVLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMAVINVLO_RDMAVINVLO_SHIFT 0
+#define I40E_GLPES_VFRDMAVINVLO_RDMAVINVLO_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMAVINVLO_RDMAVINVLO_SHIFT)
+#define I40E_GLPES_VFRXVLANERR(_i) (0x00018000 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRXVLANERR_MAX_INDEX 31
+#define I40E_GLPES_VFRXVLANERR_RXVLANERR_SHIFT 0
+#define I40E_GLPES_VFRXVLANERR_RXVLANERR_MASK (0xFFFFFF << I40E_GLPES_VFRXVLANERR_RXVLANERR_SHIFT)
+#define I40E_GLPES_VFTCPRTXSEG(_i) (0x0001B600 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFTCPRTXSEG_MAX_INDEX 31
+#define I40E_GLPES_VFTCPRTXSEG_TCPRTXSEG_SHIFT 0
+#define I40E_GLPES_VFTCPRTXSEG_TCPRTXSEG_MASK (0xFFFFFFFF << I40E_GLPES_VFTCPRTXSEG_TCPRTXSEG_SHIFT)
+#define I40E_GLPES_VFTCPRXOPTERR(_i) (0x0001B200 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFTCPRXOPTERR_MAX_INDEX 31
+#define I40E_GLPES_VFTCPRXOPTERR_TCPRXOPTERR_SHIFT 0
+#define I40E_GLPES_VFTCPRXOPTERR_TCPRXOPTERR_MASK (0xFFFFFF << I40E_GLPES_VFTCPRXOPTERR_TCPRXOPTERR_SHIFT)
+#define I40E_GLPES_VFTCPRXPROTOERR(_i) (0x0001B300 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFTCPRXPROTOERR_MAX_INDEX 31
+#define I40E_GLPES_VFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT 0
+#define I40E_GLPES_VFTCPRXPROTOERR_TCPRXPROTOERR_MASK (0xFFFFFF << I40E_GLPES_VFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT)
+#define I40E_GLPES_VFTCPRXSEGSHI(_i) (0x0001B004 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFTCPRXSEGSHI_MAX_INDEX 31
+#define I40E_GLPES_VFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT 0
+#define I40E_GLPES_VFTCPRXSEGSHI_TCPRXSEGSHI_MASK (0xFFFF << I40E_GLPES_VFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT)
+#define I40E_GLPES_VFTCPRXSEGSLO(_i) (0x0001B000 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFTCPRXSEGSLO_MAX_INDEX 31
+#define I40E_GLPES_VFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT 0
+#define I40E_GLPES_VFTCPRXSEGSLO_TCPRXSEGSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT)
+#define I40E_GLPES_VFTCPTXSEGHI(_i) (0x0001B404 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFTCPTXSEGHI_MAX_INDEX 31
+#define I40E_GLPES_VFTCPTXSEGHI_TCPTXSEGHI_SHIFT 0
+#define I40E_GLPES_VFTCPTXSEGHI_TCPTXSEGHI_MASK (0xFFFF << I40E_GLPES_VFTCPTXSEGHI_TCPTXSEGHI_SHIFT)
+#define I40E_GLPES_VFTCPTXSEGLO(_i) (0x0001B400 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFTCPTXSEGLO_MAX_INDEX 31
+#define I40E_GLPES_VFTCPTXSEGLO_TCPTXSEGLO_SHIFT 0
+#define I40E_GLPES_VFTCPTXSEGLO_TCPTXSEGLO_MASK (0xFFFFFFFF << I40E_GLPES_VFTCPTXSEGLO_TCPTXSEGLO_SHIFT)
+#define I40E_GLPES_VFUDPRXPKTSHI(_i) (0x0001B804 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFUDPRXPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT 0
+#define I40E_GLPES_VFUDPRXPKTSHI_UDPRXPKTSHI_MASK (0xFFFF << I40E_GLPES_VFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT)
+#define I40E_GLPES_VFUDPRXPKTSLO(_i) (0x0001B800 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFUDPRXPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT 0
+#define I40E_GLPES_VFUDPRXPKTSLO_UDPRXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT)
+#define I40E_GLPES_VFUDPTXPKTSHI(_i) (0x0001BA04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFUDPTXPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT 0
+#define I40E_GLPES_VFUDPTXPKTSHI_UDPTXPKTSHI_MASK (0xFFFF << I40E_GLPES_VFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT)
+#define I40E_GLPES_VFUDPTXPKTSLO(_i) (0x0001BA00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFUDPTXPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT 0
+#define I40E_GLPES_VFUDPTXPKTSLO_UDPTXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT)
+
+#define I40E_VFPE_AEQALLOC1 0x0000A400 /* Reset: VFR */
+#define I40E_VFPE_AEQALLOC1_AECOUNT_SHIFT 0
+#define I40E_VFPE_AEQALLOC1_AECOUNT_MASK (0xFFFFFFFF << I40E_VFPE_AEQALLOC1_AECOUNT_SHIFT)
+#define I40E_VFPE_CCQPHIGH1 0x00009800 /* Reset: VFR */
+#define I40E_VFPE_CCQPHIGH1_PECCQPHIGH_SHIFT 0
+#define I40E_VFPE_CCQPHIGH1_PECCQPHIGH_MASK (0xFFFFFFFF << I40E_VFPE_CCQPHIGH1_PECCQPHIGH_SHIFT)
+#define I40E_VFPE_CCQPLOW1 0x0000AC00 /* Reset: VFR */
+#define I40E_VFPE_CCQPLOW1_PECCQPLOW_SHIFT 0
+#define I40E_VFPE_CCQPLOW1_PECCQPLOW_MASK (0xFFFFFFFF << I40E_VFPE_CCQPLOW1_PECCQPLOW_SHIFT)
+#define I40E_VFPE_CCQPSTATUS1 0x0000B800 /* Reset: VFR */
+#define I40E_VFPE_CCQPSTATUS1_CCQP_DONE_SHIFT 0
+#define I40E_VFPE_CCQPSTATUS1_CCQP_DONE_MASK (0x1 << I40E_VFPE_CCQPSTATUS1_CCQP_DONE_SHIFT)
+#define I40E_VFPE_CCQPSTATUS1_HMC_PROFILE_SHIFT 4
+#define I40E_VFPE_CCQPSTATUS1_HMC_PROFILE_MASK (0x7 << I40E_VFPE_CCQPSTATUS1_HMC_PROFILE_SHIFT)
+#define I40E_VFPE_CCQPSTATUS1_RDMA_EN_VFS_SHIFT 16
+#define I40E_VFPE_CCQPSTATUS1_RDMA_EN_VFS_MASK (0x3F << I40E_VFPE_CCQPSTATUS1_RDMA_EN_VFS_SHIFT)
+#define I40E_VFPE_CCQPSTATUS1_CCQP_ERR_SHIFT 31
+#define I40E_VFPE_CCQPSTATUS1_CCQP_ERR_MASK (0x1 << I40E_VFPE_CCQPSTATUS1_CCQP_ERR_SHIFT)
+#define I40E_VFPE_CQACK1 0x0000B000 /* Reset: VFR */
+#define I40E_VFPE_CQACK1_PECQID_SHIFT 0
+#define I40E_VFPE_CQACK1_PECQID_MASK (0x1FFFF << I40E_VFPE_CQACK1_PECQID_SHIFT)
+#define I40E_VFPE_CQARM1 0x0000B400 /* Reset: VFR */
+#define I40E_VFPE_CQARM1_PECQID_SHIFT 0
+#define I40E_VFPE_CQARM1_PECQID_MASK (0x1FFFF << I40E_VFPE_CQARM1_PECQID_SHIFT)
+#define I40E_VFPE_CQPDB1 0x0000BC00 /* Reset: VFR */
+#define I40E_VFPE_CQPDB1_WQHEAD_SHIFT 0
+#define I40E_VFPE_CQPDB1_WQHEAD_MASK (0x7FF << I40E_VFPE_CQPDB1_WQHEAD_SHIFT)
+#define I40E_VFPE_CQPERRCODES1 0x00009C00 /* Reset: VFR */
+#define I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_SHIFT 0
+#define I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_MASK (0xFFFF << I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_SHIFT)
+#define I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_SHIFT 16
+#define I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_MASK (0xFFFF << I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_SHIFT)
+#define I40E_VFPE_CQPTAIL1 0x0000A000 /* Reset: VFR */
+#define I40E_VFPE_CQPTAIL1_WQTAIL_SHIFT 0
+#define I40E_VFPE_CQPTAIL1_WQTAIL_MASK (0x7FF << I40E_VFPE_CQPTAIL1_WQTAIL_SHIFT)
+#define I40E_VFPE_CQPTAIL1_CQP_OP_ERR_SHIFT 31
+#define I40E_VFPE_CQPTAIL1_CQP_OP_ERR_MASK (0x1 << I40E_VFPE_CQPTAIL1_CQP_OP_ERR_SHIFT)
+#define I40E_VFPE_IPCONFIG01 0x00008C00 /* Reset: VFR */
+#define I40E_VFPE_IPCONFIG01_PEIPID_SHIFT 0
+#define I40E_VFPE_IPCONFIG01_PEIPID_MASK (0xFFFF << I40E_VFPE_IPCONFIG01_PEIPID_SHIFT)
+#define I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_SHIFT 16
+#define I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_MASK (0x1 << I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_SHIFT)
+#define I40E_VFPE_MRTEIDXMASK1 0x00009000 /* Reset: VFR */
+#define I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_SHIFT 0
+#define I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_MASK (0x1F << I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_SHIFT)
+#define I40E_VFPE_RCVUNEXPECTEDERROR1 0x00009400 /* Reset: VFR */
+#define I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_SHIFT 0
+#define I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_MASK (0xFFFFFF << I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_SHIFT)
+#define I40E_VFPE_TCPNOWTIMER1 0x0000A800 /* Reset: VFR */
+#define I40E_VFPE_TCPNOWTIMER1_TCP_NOW_SHIFT 0
+#define I40E_VFPE_TCPNOWTIMER1_TCP_NOW_MASK (0xFFFFFFFF << I40E_VFPE_TCPNOWTIMER1_TCP_NOW_SHIFT)
+#define I40E_VFPE_WQEALLOC1 0x0000C000 /* Reset: VFR */
+#define I40E_VFPE_WQEALLOC1_PEQPID_SHIFT 0
+#define I40E_VFPE_WQEALLOC1_PEQPID_MASK (0x3FFFF << I40E_VFPE_WQEALLOC1_PEQPID_SHIFT)
+#define I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_SHIFT 20
+#define I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_MASK (0xFFF << I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_SHIFT)
+#endif /* I40IW_REGISTER_H */
diff --git a/drivers/infiniband/hw/i40iw/i40iw_status.h b/drivers/infiniband/hw/i40iw/i40iw_status.h
new file mode 100644
index 000000000000..b0110c15e044
--- /dev/null
+++ b/drivers/infiniband/hw/i40iw/i40iw_status.h
@@ -0,0 +1,100 @@
+/*******************************************************************************
+*
+* Copyright (c) 2015-2016 Intel Corporation. All rights reserved.
+*
+* This software is available to you under a choice of one of two
+* licenses. You may choose to be licensed under the terms of the GNU
+* General Public License (GPL) Version 2, available from the file
+* COPYING in the main directory of this source tree, or the
+* OpenFabrics.org BSD license below:
+*
+* Redistribution and use in source and binary forms, with or
+* without modification, are permitted provided that the following
+* conditions are met:
+*
+* - Redistributions of source code must retain the above
+* copyright notice, this list of conditions and the following
+* disclaimer.
+*
+* - Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials
+* provided with the distribution.
+*
+* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+* SOFTWARE.
+*
+*******************************************************************************/
+
+#ifndef I40IW_STATUS_H
+#define I40IW_STATUS_H
+
+/* Error Codes */
+enum i40iw_status_code {
+ I40IW_SUCCESS = 0,
+ I40IW_ERR_NVM = -1,
+ I40IW_ERR_NVM_CHECKSUM = -2,
+ I40IW_ERR_CONFIG = -4,
+ I40IW_ERR_PARAM = -5,
+ I40IW_ERR_DEVICE_NOT_SUPPORTED = -6,
+ I40IW_ERR_RESET_FAILED = -7,
+ I40IW_ERR_SWFW_SYNC = -8,
+ I40IW_ERR_NO_MEMORY = -9,
+ I40IW_ERR_BAD_PTR = -10,
+ I40IW_ERR_INVALID_PD_ID = -11,
+ I40IW_ERR_INVALID_QP_ID = -12,
+ I40IW_ERR_INVALID_CQ_ID = -13,
+ I40IW_ERR_INVALID_CEQ_ID = -14,
+ I40IW_ERR_INVALID_AEQ_ID = -15,
+ I40IW_ERR_INVALID_SIZE = -16,
+ I40IW_ERR_INVALID_ARP_INDEX = -17,
+ I40IW_ERR_INVALID_FPM_FUNC_ID = -18,
+ I40IW_ERR_QP_INVALID_MSG_SIZE = -19,
+ I40IW_ERR_QP_TOOMANY_WRS_POSTED = -20,
+ I40IW_ERR_INVALID_FRAG_COUNT = -21,
+ I40IW_ERR_QUEUE_EMPTY = -22,
+ I40IW_ERR_INVALID_ALIGNMENT = -23,
+ I40IW_ERR_FLUSHED_QUEUE = -24,
+ I40IW_ERR_INVALID_PUSH_PAGE_INDEX = -25,
+ I40IW_ERR_INVALID_IMM_DATA_SIZE = -26,
+ I40IW_ERR_TIMEOUT = -27,
+ I40IW_ERR_OPCODE_MISMATCH = -28,
+ I40IW_ERR_CQP_COMPL_ERROR = -29,
+ I40IW_ERR_INVALID_VF_ID = -30,
+ I40IW_ERR_INVALID_HMCFN_ID = -31,
+ I40IW_ERR_BACKING_PAGE_ERROR = -32,
+ I40IW_ERR_NO_PBLCHUNKS_AVAILABLE = -33,
+ I40IW_ERR_INVALID_PBLE_INDEX = -34,
+ I40IW_ERR_INVALID_SD_INDEX = -35,
+ I40IW_ERR_INVALID_PAGE_DESC_INDEX = -36,
+ I40IW_ERR_INVALID_SD_TYPE = -37,
+ I40IW_ERR_MEMCPY_FAILED = -38,
+ I40IW_ERR_INVALID_HMC_OBJ_INDEX = -39,
+ I40IW_ERR_INVALID_HMC_OBJ_COUNT = -40,
+ I40IW_ERR_INVALID_SRQ_ARM_LIMIT = -41,
+ I40IW_ERR_SRQ_ENABLED = -42,
+ I40IW_ERR_BUF_TOO_SHORT = -43,
+ I40IW_ERR_BAD_IWARP_CQE = -44,
+ I40IW_ERR_NVM_BLANK_MODE = -45,
+ I40IW_ERR_NOT_IMPLEMENTED = -46,
+ I40IW_ERR_PE_DOORBELL_NOT_ENABLED = -47,
+ I40IW_ERR_NOT_READY = -48,
+ I40IW_NOT_SUPPORTED = -49,
+ I40IW_ERR_FIRMWARE_API_VERSION = -50,
+ I40IW_ERR_RING_FULL = -51,
+ I40IW_ERR_MPA_CRC = -61,
+ I40IW_ERR_NO_TXBUFS = -62,
+ I40IW_ERR_SEQ_NUM = -63,
+ I40IW_ERR_list_empty = -64,
+ I40IW_ERR_INVALID_MAC_ADDR = -65,
+ I40IW_ERR_BAD_STAG = -66,
+ I40IW_ERR_CQ_COMPL_ERROR = -67,
+
+};
+#endif
diff --git a/drivers/infiniband/hw/i40iw/i40iw_type.h b/drivers/infiniband/hw/i40iw/i40iw_type.h
new file mode 100644
index 000000000000..edb3a8c8267a
--- /dev/null
+++ b/drivers/infiniband/hw/i40iw/i40iw_type.h
@@ -0,0 +1,1312 @@
+/*******************************************************************************
+*
+* Copyright (c) 2015-2016 Intel Corporation. All rights reserved.
+*
+* This software is available to you under a choice of one of two
+* licenses. You may choose to be licensed under the terms of the GNU
+* General Public License (GPL) Version 2, available from the file
+* COPYING in the main directory of this source tree, or the
+* OpenFabrics.org BSD license below:
+*
+* Redistribution and use in source and binary forms, with or
+* without modification, are permitted provided that the following
+* conditions are met:
+*
+* - Redistributions of source code must retain the above
+* copyright notice, this list of conditions and the following
+* disclaimer.
+*
+* - Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials
+* provided with the distribution.
+*
+* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+* SOFTWARE.
+*
+*******************************************************************************/
+
+#ifndef I40IW_TYPE_H
+#define I40IW_TYPE_H
+#include "i40iw_user.h"
+#include "i40iw_hmc.h"
+#include "i40iw_vf.h"
+#include "i40iw_virtchnl.h"
+
+struct i40iw_cqp_sq_wqe {
+ u64 buf[I40IW_CQP_WQE_SIZE];
+};
+
+struct i40iw_sc_aeqe {
+ u64 buf[I40IW_AEQE_SIZE];
+};
+
+struct i40iw_ceqe {
+ u64 buf[I40IW_CEQE_SIZE];
+};
+
+struct i40iw_cqp_ctx {
+ u64 buf[I40IW_CQP_CTX_SIZE];
+};
+
+struct i40iw_cq_shadow_area {
+ u64 buf[I40IW_SHADOW_AREA_SIZE];
+};
+
+struct i40iw_sc_dev;
+struct i40iw_hmc_info;
+struct i40iw_dev_pestat;
+
+struct i40iw_cqp_ops;
+struct i40iw_ccq_ops;
+struct i40iw_ceq_ops;
+struct i40iw_aeq_ops;
+struct i40iw_mr_ops;
+struct i40iw_cqp_misc_ops;
+struct i40iw_pd_ops;
+struct i40iw_priv_qp_ops;
+struct i40iw_priv_cq_ops;
+struct i40iw_hmc_ops;
+
+enum i40iw_resource_indicator_type {
+ I40IW_RSRC_INDICATOR_TYPE_ADAPTER = 0,
+ I40IW_RSRC_INDICATOR_TYPE_CQ,
+ I40IW_RSRC_INDICATOR_TYPE_QP,
+ I40IW_RSRC_INDICATOR_TYPE_SRQ
+};
+
+enum i40iw_hdrct_flags {
+ DDP_LEN_FLAG = 0x80,
+ DDP_HDR_FLAG = 0x40,
+ RDMA_HDR_FLAG = 0x20
+};
+
+enum i40iw_term_layers {
+ LAYER_RDMA = 0,
+ LAYER_DDP = 1,
+ LAYER_MPA = 2
+};
+
+enum i40iw_term_error_types {
+ RDMAP_REMOTE_PROT = 1,
+ RDMAP_REMOTE_OP = 2,
+ DDP_CATASTROPHIC = 0,
+ DDP_TAGGED_BUFFER = 1,
+ DDP_UNTAGGED_BUFFER = 2,
+ DDP_LLP = 3
+};
+
+enum i40iw_term_rdma_errors {
+ RDMAP_INV_STAG = 0x00,
+ RDMAP_INV_BOUNDS = 0x01,
+ RDMAP_ACCESS = 0x02,
+ RDMAP_UNASSOC_STAG = 0x03,
+ RDMAP_TO_WRAP = 0x04,
+ RDMAP_INV_RDMAP_VER = 0x05,
+ RDMAP_UNEXPECTED_OP = 0x06,
+ RDMAP_CATASTROPHIC_LOCAL = 0x07,
+ RDMAP_CATASTROPHIC_GLOBAL = 0x08,
+ RDMAP_CANT_INV_STAG = 0x09,
+ RDMAP_UNSPECIFIED = 0xff
+};
+
+enum i40iw_term_ddp_errors {
+ DDP_CATASTROPHIC_LOCAL = 0x00,
+ DDP_TAGGED_INV_STAG = 0x00,
+ DDP_TAGGED_BOUNDS = 0x01,
+ DDP_TAGGED_UNASSOC_STAG = 0x02,
+ DDP_TAGGED_TO_WRAP = 0x03,
+ DDP_TAGGED_INV_DDP_VER = 0x04,
+ DDP_UNTAGGED_INV_QN = 0x01,
+ DDP_UNTAGGED_INV_MSN_NO_BUF = 0x02,
+ DDP_UNTAGGED_INV_MSN_RANGE = 0x03,
+ DDP_UNTAGGED_INV_MO = 0x04,
+ DDP_UNTAGGED_INV_TOO_LONG = 0x05,
+ DDP_UNTAGGED_INV_DDP_VER = 0x06
+};
+
+enum i40iw_term_mpa_errors {
+ MPA_CLOSED = 0x01,
+ MPA_CRC = 0x02,
+ MPA_MARKER = 0x03,
+ MPA_REQ_RSP = 0x04,
+};
+
+enum i40iw_flush_opcode {
+ FLUSH_INVALID = 0,
+ FLUSH_PROT_ERR,
+ FLUSH_REM_ACCESS_ERR,
+ FLUSH_LOC_QP_OP_ERR,
+ FLUSH_REM_OP_ERR,
+ FLUSH_LOC_LEN_ERR,
+ FLUSH_GENERAL_ERR,
+ FLUSH_FATAL_ERR
+};
+
+enum i40iw_term_eventtypes {
+ TERM_EVENT_QP_FATAL,
+ TERM_EVENT_QP_ACCESS_ERR
+};
+
+struct i40iw_terminate_hdr {
+ u8 layer_etype;
+ u8 error_code;
+ u8 hdrct;
+ u8 rsvd;
+};
+
+enum i40iw_debug_flag {
+ I40IW_DEBUG_NONE = 0x00000000,
+ I40IW_DEBUG_ERR = 0x00000001,
+ I40IW_DEBUG_INIT = 0x00000002,
+ I40IW_DEBUG_DEV = 0x00000004,
+ I40IW_DEBUG_CM = 0x00000008,
+ I40IW_DEBUG_VERBS = 0x00000010,
+ I40IW_DEBUG_PUDA = 0x00000020,
+ I40IW_DEBUG_ILQ = 0x00000040,
+ I40IW_DEBUG_IEQ = 0x00000080,
+ I40IW_DEBUG_QP = 0x00000100,
+ I40IW_DEBUG_CQ = 0x00000200,
+ I40IW_DEBUG_MR = 0x00000400,
+ I40IW_DEBUG_PBLE = 0x00000800,
+ I40IW_DEBUG_WQE = 0x00001000,
+ I40IW_DEBUG_AEQ = 0x00002000,
+ I40IW_DEBUG_CQP = 0x00004000,
+ I40IW_DEBUG_HMC = 0x00008000,
+ I40IW_DEBUG_USER = 0x00010000,
+ I40IW_DEBUG_VIRT = 0x00020000,
+ I40IW_DEBUG_DCB = 0x00040000,
+ I40IW_DEBUG_CQE = 0x00800000,
+ I40IW_DEBUG_ALL = 0xFFFFFFFF
+};
+
+enum i40iw_hw_stat_index_32b {
+ I40IW_HW_STAT_INDEX_IP4RXDISCARD = 0,
+ I40IW_HW_STAT_INDEX_IP4RXTRUNC,
+ I40IW_HW_STAT_INDEX_IP4TXNOROUTE,
+ I40IW_HW_STAT_INDEX_IP6RXDISCARD,
+ I40IW_HW_STAT_INDEX_IP6RXTRUNC,
+ I40IW_HW_STAT_INDEX_IP6TXNOROUTE,
+ I40IW_HW_STAT_INDEX_TCPRTXSEG,
+ I40IW_HW_STAT_INDEX_TCPRXOPTERR,
+ I40IW_HW_STAT_INDEX_TCPRXPROTOERR,
+ I40IW_HW_STAT_INDEX_MAX_32
+};
+
+enum i40iw_hw_stat_index_64b {
+ I40IW_HW_STAT_INDEX_IP4RXOCTS = 0,
+ I40IW_HW_STAT_INDEX_IP4RXPKTS,
+ I40IW_HW_STAT_INDEX_IP4RXFRAGS,
+ I40IW_HW_STAT_INDEX_IP4RXMCPKTS,
+ I40IW_HW_STAT_INDEX_IP4TXOCTS,
+ I40IW_HW_STAT_INDEX_IP4TXPKTS,
+ I40IW_HW_STAT_INDEX_IP4TXFRAGS,
+ I40IW_HW_STAT_INDEX_IP4TXMCPKTS,
+ I40IW_HW_STAT_INDEX_IP6RXOCTS,
+ I40IW_HW_STAT_INDEX_IP6RXPKTS,
+ I40IW_HW_STAT_INDEX_IP6RXFRAGS,
+ I40IW_HW_STAT_INDEX_IP6RXMCPKTS,
+ I40IW_HW_STAT_INDEX_IP6TXOCTS,
+ I40IW_HW_STAT_INDEX_IP6TXPKTS,
+ I40IW_HW_STAT_INDEX_IP6TXFRAGS,
+ I40IW_HW_STAT_INDEX_IP6TXMCPKTS,
+ I40IW_HW_STAT_INDEX_TCPRXSEGS,
+ I40IW_HW_STAT_INDEX_TCPTXSEG,
+ I40IW_HW_STAT_INDEX_RDMARXRDS,
+ I40IW_HW_STAT_INDEX_RDMARXSNDS,
+ I40IW_HW_STAT_INDEX_RDMARXWRS,
+ I40IW_HW_STAT_INDEX_RDMATXRDS,
+ I40IW_HW_STAT_INDEX_RDMATXSNDS,
+ I40IW_HW_STAT_INDEX_RDMATXWRS,
+ I40IW_HW_STAT_INDEX_RDMAVBND,
+ I40IW_HW_STAT_INDEX_RDMAVINV,
+ I40IW_HW_STAT_INDEX_MAX_64
+};
+
+struct i40iw_dev_hw_stat_offsets {
+ u32 stat_offset_32[I40IW_HW_STAT_INDEX_MAX_32];
+ u32 stat_offset_64[I40IW_HW_STAT_INDEX_MAX_64];
+};
+
+struct i40iw_dev_hw_stats {
+ u64 stat_value_32[I40IW_HW_STAT_INDEX_MAX_32];
+ u64 stat_value_64[I40IW_HW_STAT_INDEX_MAX_64];
+};
+
+struct i40iw_device_pestat_ops {
+ void (*iw_hw_stat_init)(struct i40iw_dev_pestat *, u8, struct i40iw_hw *, bool);
+ void (*iw_hw_stat_read_32)(struct i40iw_dev_pestat *, enum i40iw_hw_stat_index_32b, u64 *);
+ void (*iw_hw_stat_read_64)(struct i40iw_dev_pestat *, enum i40iw_hw_stat_index_64b, u64 *);
+ void (*iw_hw_stat_read_all)(struct i40iw_dev_pestat *, struct i40iw_dev_hw_stats *);
+ void (*iw_hw_stat_refresh_all)(struct i40iw_dev_pestat *);
+};
+
+struct i40iw_dev_pestat {
+ struct i40iw_hw *hw;
+ struct i40iw_device_pestat_ops ops;
+ struct i40iw_dev_hw_stats hw_stats;
+ struct i40iw_dev_hw_stats last_read_hw_stats;
+ struct i40iw_dev_hw_stat_offsets hw_stat_offsets;
+ struct timer_list stats_timer;
+ spinlock_t stats_lock; /* rdma stats lock */
+};
+
+struct i40iw_hw {
+ u8 __iomem *hw_addr;
+ void *dev_context;
+ struct i40iw_hmc_info hmc;
+};
+
+struct i40iw_pfpdu {
+ struct list_head rxlist;
+ u32 rcv_nxt;
+ u32 fps;
+ u32 max_fpdu_data;
+ bool mode;
+ bool mpa_crc_err;
+ u64 total_ieq_bufs;
+ u64 fpdu_processed;
+ u64 bad_seq_num;
+ u64 crc_err;
+ u64 no_tx_bufs;
+ u64 tx_err;
+ u64 out_of_order;
+ u64 pmode_count;
+};
+
+struct i40iw_sc_pd {
+ u32 size;
+ struct i40iw_sc_dev *dev;
+ u16 pd_id;
+};
+
+struct i40iw_cqp_quanta {
+ u64 elem[I40IW_CQP_WQE_SIZE];
+};
+
+struct i40iw_sc_cqp {
+ u32 size;
+ u64 sq_pa;
+ u64 host_ctx_pa;
+ void *back_cqp;
+ struct i40iw_sc_dev *dev;
+ enum i40iw_status_code (*process_cqp_sds)(struct i40iw_sc_dev *,
+ struct i40iw_update_sds_info *);
+ struct i40iw_dma_mem sdbuf;
+ struct i40iw_ring sq_ring;
+ struct i40iw_cqp_quanta *sq_base;
+ u64 *host_ctx;
+ u64 *scratch_array;
+ u32 cqp_id;
+ u32 sq_size;
+ u32 hw_sq_size;
+ u8 struct_ver;
+ u8 polarity;
+ bool en_datacenter_tcp;
+ u8 hmc_profile;
+ u8 enabled_vf_count;
+ u8 timeout_count;
+};
+
+struct i40iw_sc_aeq {
+ u32 size;
+ u64 aeq_elem_pa;
+ struct i40iw_sc_dev *dev;
+ struct i40iw_sc_aeqe *aeqe_base;
+ void *pbl_list;
+ u32 elem_cnt;
+ struct i40iw_ring aeq_ring;
+ bool virtual_map;
+ u8 pbl_chunk_size;
+ u32 first_pm_pbl_idx;
+ u8 polarity;
+};
+
+struct i40iw_sc_ceq {
+ u32 size;
+ u64 ceq_elem_pa;
+ struct i40iw_sc_dev *dev;
+ struct i40iw_ceqe *ceqe_base;
+ void *pbl_list;
+ u32 ceq_id;
+ u32 elem_cnt;
+ struct i40iw_ring ceq_ring;
+ bool virtual_map;
+ u8 pbl_chunk_size;
+ bool tph_en;
+ u8 tph_val;
+ u32 first_pm_pbl_idx;
+ u8 polarity;
+};
+
+struct i40iw_sc_cq {
+ struct i40iw_cq_uk cq_uk;
+ u64 cq_pa;
+ u64 shadow_area_pa;
+ struct i40iw_sc_dev *dev;
+ void *pbl_list;
+ void *back_cq;
+ u32 ceq_id;
+ u32 shadow_read_threshold;
+ bool ceqe_mask;
+ bool virtual_map;
+ u8 pbl_chunk_size;
+ u8 cq_type;
+ bool ceq_id_valid;
+ bool tph_en;
+ u8 tph_val;
+ u32 first_pm_pbl_idx;
+ bool check_overflow;
+};
+
+struct i40iw_sc_qp {
+ struct i40iw_qp_uk qp_uk;
+ u64 sq_pa;
+ u64 rq_pa;
+ u64 hw_host_ctx_pa;
+ u64 shadow_area_pa;
+ u64 q2_pa;
+ struct i40iw_sc_dev *dev;
+ struct i40iw_sc_pd *pd;
+ u64 *hw_host_ctx;
+ void *llp_stream_handle;
+ void *back_qp;
+ struct i40iw_pfpdu pfpdu;
+ u8 *q2_buf;
+ u64 qp_compl_ctx;
+ u16 qs_handle;
+ u16 exception_lan_queue;
+ u16 push_idx;
+ u8 sq_tph_val;
+ u8 rq_tph_val;
+ u8 qp_state;
+ u8 qp_type;
+ u8 hw_sq_size;
+ u8 hw_rq_size;
+ u8 src_mac_addr_idx;
+ bool sq_tph_en;
+ bool rq_tph_en;
+ bool rcv_tph_en;
+ bool xmit_tph_en;
+ bool virtual_map;
+ bool flush_sq;
+ bool flush_rq;
+ bool sq_flush;
+ enum i40iw_flush_opcode flush_code;
+ enum i40iw_term_eventtypes eventtype;
+ u8 term_flags;
+};
+
+struct i40iw_hmc_fpm_misc {
+ u32 max_ceqs;
+ u32 max_sds;
+ u32 xf_block_size;
+ u32 q1_block_size;
+ u32 ht_multiplier;
+ u32 timer_bucket;
+};
+
+struct i40iw_vchnl_if {
+ enum i40iw_status_code (*vchnl_recv)(struct i40iw_sc_dev *, u32, u8 *, u16);
+ enum i40iw_status_code (*vchnl_send)(struct i40iw_sc_dev *dev, u32, u8 *, u16);
+};
+
+#define I40IW_VCHNL_MAX_VF_MSG_SIZE 512
+
+struct i40iw_vchnl_vf_msg_buffer {
+ struct i40iw_virtchnl_op_buf vchnl_msg;
+ char parm_buffer[I40IW_VCHNL_MAX_VF_MSG_SIZE - 1];
+};
+
+struct i40iw_vfdev {
+ struct i40iw_sc_dev *pf_dev;
+ u8 *hmc_info_mem;
+ struct i40iw_dev_pestat dev_pestat;
+ struct i40iw_hmc_pble_info *pble_info;
+ struct i40iw_hmc_info hmc_info;
+ struct i40iw_vchnl_vf_msg_buffer vf_msg_buffer;
+ u64 fpm_query_buf_pa;
+ u64 *fpm_query_buf;
+ u32 vf_id;
+ u32 msg_count;
+ bool pf_hmc_initialized;
+ u16 pmf_index;
+ u16 iw_vf_idx; /* VF Device table index */
+ bool stats_initialized;
+};
+
+struct i40iw_sc_dev {
+ struct list_head cqp_cmd_head; /* head of the CQP command list */
+ spinlock_t cqp_lock; /* cqp list sync */
+ struct i40iw_dev_uk dev_uk;
+ struct i40iw_dev_pestat dev_pestat;
+ struct i40iw_dma_mem vf_fpm_query_buf[I40IW_MAX_PE_ENABLED_VF_COUNT];
+ u64 fpm_query_buf_pa;
+ u64 fpm_commit_buf_pa;
+ u64 *fpm_query_buf;
+ u64 *fpm_commit_buf;
+ void *back_dev;
+ struct i40iw_hw *hw;
+ u8 __iomem *db_addr;
+ struct i40iw_hmc_info *hmc_info;
+ struct i40iw_hmc_pble_info *pble_info;
+ struct i40iw_vfdev *vf_dev[I40IW_MAX_PE_ENABLED_VF_COUNT];
+ struct i40iw_sc_cqp *cqp;
+ struct i40iw_sc_aeq *aeq;
+ struct i40iw_sc_ceq *ceq[I40IW_CEQ_MAX_COUNT];
+ struct i40iw_sc_cq *ccq;
+ struct i40iw_cqp_ops *cqp_ops;
+ struct i40iw_ccq_ops *ccq_ops;
+ struct i40iw_ceq_ops *ceq_ops;
+ struct i40iw_aeq_ops *aeq_ops;
+ struct i40iw_pd_ops *iw_pd_ops;
+ struct i40iw_priv_qp_ops *iw_priv_qp_ops;
+ struct i40iw_priv_cq_ops *iw_priv_cq_ops;
+ struct i40iw_mr_ops *mr_ops;
+ struct i40iw_cqp_misc_ops *cqp_misc_ops;
+ struct i40iw_hmc_ops *hmc_ops;
+ struct i40iw_vchnl_if vchnl_if;
+ u32 ilq_count;
+ struct i40iw_virt_mem ilq_mem;
+ struct i40iw_puda_rsrc *ilq;
+ u32 ieq_count;
+ struct i40iw_virt_mem ieq_mem;
+ struct i40iw_puda_rsrc *ieq;
+
+ struct i40iw_vf_cqp_ops *iw_vf_cqp_ops;
+
+ struct i40iw_hmc_fpm_misc hmc_fpm_misc;
+ u16 qs_handle;
+ u32 debug_mask;
+ u16 exception_lan_queue;
+ u8 hmc_fn_id;
+ bool is_pf;
+ bool vchnl_up;
+ u8 vf_id;
+ u64 cqp_cmd_stats[OP_SIZE_CQP_STAT_ARRAY];
+ struct i40iw_vchnl_vf_msg_buffer vchnl_vf_msg_buf;
+ u8 hw_rev;
+};
+
+struct i40iw_modify_cq_info {
+ u64 cq_pa;
+ struct i40iw_cqe *cq_base;
+ void *pbl_list;
+ u32 ceq_id;
+ u32 cq_size;
+ u32 shadow_read_threshold;
+ bool virtual_map;
+ u8 pbl_chunk_size;
+ bool check_overflow;
+ bool cq_resize;
+ bool ceq_change;
+ bool check_overflow_change;
+ u32 first_pm_pbl_idx;
+ bool ceq_valid;
+};
+
+struct i40iw_create_qp_info {
+ u8 next_iwarp_state;
+ bool ord_valid;
+ bool tcp_ctx_valid;
+ bool cq_num_valid;
+ bool static_rsrc;
+ bool arp_cache_idx_valid;
+};
+
+struct i40iw_modify_qp_info {
+ u64 rx_win0;
+ u64 rx_win1;
+ u16 new_mss;
+ u8 next_iwarp_state;
+ u8 termlen;
+ bool ord_valid;
+ bool tcp_ctx_valid;
+ bool cq_num_valid;
+ bool static_rsrc;
+ bool arp_cache_idx_valid;
+ bool reset_tcp_conn;
+ bool remove_hash_idx;
+ bool dont_send_term;
+ bool dont_send_fin;
+ bool cached_var_valid;
+ bool mss_change;
+ bool force_loopback;
+};
+
+struct i40iw_ccq_cqe_info {
+ struct i40iw_sc_cqp *cqp;
+ u64 scratch;
+ u32 op_ret_val;
+ u16 maj_err_code;
+ u16 min_err_code;
+ u8 op_code;
+ bool error;
+};
+
+struct i40iw_l2params {
+ u16 qs_handle_list[I40IW_MAX_USER_PRIORITY];
+ u16 mss;
+};
+
+struct i40iw_device_init_info {
+ u64 fpm_query_buf_pa;
+ u64 fpm_commit_buf_pa;
+ u64 *fpm_query_buf;
+ u64 *fpm_commit_buf;
+ struct i40iw_hw *hw;
+ void __iomem *bar0;
+ enum i40iw_status_code (*vchnl_send)(struct i40iw_sc_dev *, u32, u8 *, u16);
+ u16 qs_handle;
+ u16 exception_lan_queue;
+ u8 hmc_fn_id;
+ bool is_pf;
+ u32 debug_mask;
+};
+
+enum i40iw_cqp_hmc_profile {
+ I40IW_HMC_PROFILE_DEFAULT = 1,
+ I40IW_HMC_PROFILE_FAVOR_VF = 2,
+ I40IW_HMC_PROFILE_EQUAL = 3,
+};
+
+struct i40iw_cqp_init_info {
+ u64 cqp_compl_ctx;
+ u64 host_ctx_pa;
+ u64 sq_pa;
+ struct i40iw_sc_dev *dev;
+ struct i40iw_cqp_quanta *sq;
+ u64 *host_ctx;
+ u64 *scratch_array;
+ u32 sq_size;
+ u8 struct_ver;
+ bool en_datacenter_tcp;
+ u8 hmc_profile;
+ u8 enabled_vf_count;
+};
+
+struct i40iw_ceq_init_info {
+ u64 ceqe_pa;
+ struct i40iw_sc_dev *dev;
+ u64 *ceqe_base;
+ void *pbl_list;
+ u32 elem_cnt;
+ u32 ceq_id;
+ bool virtual_map;
+ u8 pbl_chunk_size;
+ bool tph_en;
+ u8 tph_val;
+ u32 first_pm_pbl_idx;
+};
+
+struct i40iw_aeq_init_info {
+ u64 aeq_elem_pa;
+ struct i40iw_sc_dev *dev;
+ u32 *aeqe_base;
+ void *pbl_list;
+ u32 elem_cnt;
+ bool virtual_map;
+ u8 pbl_chunk_size;
+ u32 first_pm_pbl_idx;
+};
+
+struct i40iw_ccq_init_info {
+ u64 cq_pa;
+ u64 shadow_area_pa;
+ struct i40iw_sc_dev *dev;
+ struct i40iw_cqe *cq_base;
+ u64 *shadow_area;
+ void *pbl_list;
+ u32 num_elem;
+ u32 ceq_id;
+ u32 shadow_read_threshold;
+ bool ceqe_mask;
+ bool ceq_id_valid;
+ bool tph_en;
+ u8 tph_val;
+ bool avoid_mem_cflct;
+ bool virtual_map;
+ u8 pbl_chunk_size;
+ u32 first_pm_pbl_idx;
+};
+
+struct i40iwarp_offload_info {
+ u16 rcv_mark_offset;
+ u16 snd_mark_offset;
+ u16 pd_id;
+ u8 ddp_ver;
+ u8 rdmap_ver;
+ u8 ord_size;
+ u8 ird_size;
+ bool wr_rdresp_en;
+ bool rd_enable;
+ bool snd_mark_en;
+ bool rcv_mark_en;
+ bool bind_en;
+ bool fast_reg_en;
+ bool priv_mode_en;
+ bool lsmm_present;
+ u8 iwarp_mode;
+ bool align_hdrs;
+ bool rcv_no_mpa_crc;
+
+ u8 last_byte_sent;
+};
+
+struct i40iw_tcp_offload_info {
+ bool ipv4;
+ bool no_nagle;
+ bool insert_vlan_tag;
+ bool time_stamp;
+ u8 cwnd_inc_limit;
+ bool drop_ooo_seg;
+ bool dup_ack_thresh;
+ u8 ttl;
+ u8 src_mac_addr_idx;
+ bool avoid_stretch_ack;
+ u8 tos;
+ u16 src_port;
+ u16 dst_port;
+ u32 dest_ip_addr0;
+ u32 dest_ip_addr1;
+ u32 dest_ip_addr2;
+ u32 dest_ip_addr3;
+ u32 snd_mss;
+ u16 vlan_tag;
+ u16 arp_idx;
+ u32 flow_label;
+ bool wscale;
+ u8 tcp_state;
+ u8 snd_wscale;
+ u8 rcv_wscale;
+ u32 time_stamp_recent;
+ u32 time_stamp_age;
+ u32 snd_nxt;
+ u32 snd_wnd;
+ u32 rcv_nxt;
+ u32 rcv_wnd;
+ u32 snd_max;
+ u32 snd_una;
+ u32 srtt;
+ u32 rtt_var;
+ u32 ss_thresh;
+ u32 cwnd;
+ u32 snd_wl1;
+ u32 snd_wl2;
+ u32 max_snd_window;
+ u8 rexmit_thresh;
+ u32 local_ipaddr0;
+ u32 local_ipaddr1;
+ u32 local_ipaddr2;
+ u32 local_ipaddr3;
+ bool ignore_tcp_opt;
+ bool ignore_tcp_uns_opt;
+};
+
+struct i40iw_qp_host_ctx_info {
+ u64 qp_compl_ctx;
+ struct i40iw_tcp_offload_info *tcp_info;
+ struct i40iwarp_offload_info *iwarp_info;
+ u32 send_cq_num;
+ u32 rcv_cq_num;
+ u16 push_idx;
+ bool push_mode_en;
+ bool tcp_info_valid;
+ bool iwarp_info_valid;
+ bool err_rq_idx_valid;
+ u16 err_rq_idx;
+};
+
+struct i40iw_aeqe_info {
+ u64 compl_ctx;
+ u32 qp_cq_id;
+ u16 ae_id;
+ u16 wqe_idx;
+ u8 tcp_state;
+ u8 iwarp_state;
+ bool qp;
+ bool cq;
+ bool sq;
+ bool in_rdrsp_wr;
+ bool out_rdrsp;
+ u8 q2_data_written;
+ bool aeqe_overflow;
+};
+
+struct i40iw_allocate_stag_info {
+ u64 total_len;
+ u32 chunk_size;
+ u32 stag_idx;
+ u32 page_size;
+ u16 pd_id;
+ u16 access_rights;
+ bool remote_access;
+ bool use_hmc_fcn_index;
+ u8 hmc_fcn_index;
+ bool use_pf_rid;
+};
+
+struct i40iw_reg_ns_stag_info {
+ u64 reg_addr_pa;
+ u64 fbo;
+ void *va;
+ u64 total_len;
+ u32 page_size;
+ u32 chunk_size;
+ u32 first_pm_pbl_index;
+ enum i40iw_addressing_type addr_type;
+ i40iw_stag_index stag_idx;
+ u16 access_rights;
+ u16 pd_id;
+ i40iw_stag_key stag_key;
+ bool use_hmc_fcn_index;
+ u8 hmc_fcn_index;
+ bool use_pf_rid;
+};
+
+struct i40iw_fast_reg_stag_info {
+ u64 wr_id;
+ u64 reg_addr_pa;
+ u64 fbo;
+ void *va;
+ u64 total_len;
+ u32 page_size;
+ u32 chunk_size;
+ u32 first_pm_pbl_index;
+ enum i40iw_addressing_type addr_type;
+ i40iw_stag_index stag_idx;
+ u16 access_rights;
+ u16 pd_id;
+ i40iw_stag_key stag_key;
+ bool local_fence;
+ bool read_fence;
+ bool signaled;
+ bool use_hmc_fcn_index;
+ u8 hmc_fcn_index;
+ bool use_pf_rid;
+ bool defer_flag;
+};
+
+struct i40iw_dealloc_stag_info {
+ u32 stag_idx;
+ u16 pd_id;
+ bool mr;
+ bool dealloc_pbl;
+};
+
+struct i40iw_register_shared_stag {
+ void *va;
+ enum i40iw_addressing_type addr_type;
+ i40iw_stag_index new_stag_idx;
+ i40iw_stag_index parent_stag_idx;
+ u32 access_rights;
+ u16 pd_id;
+ i40iw_stag_key new_stag_key;
+};
+
+struct i40iw_qp_init_info {
+ struct i40iw_qp_uk_init_info qp_uk_init_info;
+ struct i40iw_sc_pd *pd;
+ u64 *host_ctx;
+ u8 *q2;
+ u64 sq_pa;
+ u64 rq_pa;
+ u64 host_ctx_pa;
+ u64 q2_pa;
+ u64 shadow_area_pa;
+ u8 sq_tph_val;
+ u8 rq_tph_val;
+ u8 type;
+ bool sq_tph_en;
+ bool rq_tph_en;
+ bool rcv_tph_en;
+ bool xmit_tph_en;
+ bool virtual_map;
+};
+
+struct i40iw_cq_init_info {
+ struct i40iw_sc_dev *dev;
+ u64 cq_base_pa;
+ u64 shadow_area_pa;
+ u32 ceq_id;
+ u32 shadow_read_threshold;
+ bool virtual_map;
+ bool ceqe_mask;
+ u8 pbl_chunk_size;
+ u32 first_pm_pbl_idx;
+ bool ceq_id_valid;
+ bool tph_en;
+ u8 tph_val;
+ u8 type;
+ struct i40iw_cq_uk_init_info cq_uk_init_info;
+};
+
+struct i40iw_upload_context_info {
+ u64 buf_pa;
+ bool freeze_qp;
+ bool raw_format;
+ u32 qp_id;
+ u8 qp_type;
+};
+
+struct i40iw_add_arp_cache_entry_info {
+ u8 mac_addr[6];
+ u32 reach_max;
+ u16 arp_index;
+ bool permanent;
+};
+
+struct i40iw_apbvt_info {
+ u16 port;
+ bool add;
+};
+
+enum i40iw_quad_entry_type {
+ I40IW_QHASH_TYPE_TCP_ESTABLISHED = 1,
+ I40IW_QHASH_TYPE_TCP_SYN,
+};
+
+enum i40iw_quad_hash_manage_type {
+ I40IW_QHASH_MANAGE_TYPE_DELETE = 0,
+ I40IW_QHASH_MANAGE_TYPE_ADD,
+ I40IW_QHASH_MANAGE_TYPE_MODIFY
+};
+
+struct i40iw_qhash_table_info {
+ enum i40iw_quad_hash_manage_type manage;
+ enum i40iw_quad_entry_type entry_type;
+ bool vlan_valid;
+ bool ipv4_valid;
+ u8 mac_addr[6];
+ u16 vlan_id;
+ u16 qs_handle;
+ u32 qp_num;
+ u32 dest_ip[4];
+ u32 src_ip[4];
+ u32 dest_port;
+ u32 src_port;
+};
+
+struct i40iw_local_mac_ipaddr_entry_info {
+ u8 mac_addr[6];
+ u8 entry_idx;
+};
+
+struct i40iw_cqp_manage_push_page_info {
+ u32 push_idx;
+ u16 qs_handle;
+ u8 free_page;
+};
+
+struct i40iw_qp_flush_info {
+ u16 sq_minor_code;
+ u16 sq_major_code;
+ u16 rq_minor_code;
+ u16 rq_major_code;
+ u16 ae_code;
+ u8 ae_source;
+ bool sq;
+ bool rq;
+ bool userflushcode;
+ bool generate_ae;
+};
+
+struct i40iw_cqp_commit_fpm_values {
+ u64 qp_base;
+ u64 cq_base;
+ u32 hte_base;
+ u32 arp_base;
+ u32 apbvt_inuse_base;
+ u32 mr_base;
+ u32 xf_base;
+ u32 xffl_base;
+ u32 q1_base;
+ u32 q1fl_base;
+ u32 fsimc_base;
+ u32 fsiav_base;
+ u32 pbl_base;
+
+ u32 qp_cnt;
+ u32 cq_cnt;
+ u32 hte_cnt;
+ u32 arp_cnt;
+ u32 mr_cnt;
+ u32 xf_cnt;
+ u32 xffl_cnt;
+ u32 q1_cnt;
+ u32 q1fl_cnt;
+ u32 fsimc_cnt;
+ u32 fsiav_cnt;
+ u32 pbl_cnt;
+};
+
+struct i40iw_cqp_query_fpm_values {
+ u16 first_pe_sd_index;
+ u32 qp_objsize;
+ u32 cq_objsize;
+ u32 hte_objsize;
+ u32 arp_objsize;
+ u32 mr_objsize;
+ u32 xf_objsize;
+ u32 q1_objsize;
+ u32 fsimc_objsize;
+ u32 fsiav_objsize;
+
+ u32 qp_max;
+ u32 cq_max;
+ u32 hte_max;
+ u32 arp_max;
+ u32 mr_max;
+ u32 xf_max;
+ u32 xffl_max;
+ u32 q1_max;
+ u32 q1fl_max;
+ u32 fsimc_max;
+ u32 fsiav_max;
+ u32 pbl_max;
+};
+
+struct i40iw_cqp_ops {
+ enum i40iw_status_code (*cqp_init)(struct i40iw_sc_cqp *,
+ struct i40iw_cqp_init_info *);
+ enum i40iw_status_code (*cqp_create)(struct i40iw_sc_cqp *, bool, u16 *, u16 *);
+ void (*cqp_post_sq)(struct i40iw_sc_cqp *);
+ u64 *(*cqp_get_next_send_wqe)(struct i40iw_sc_cqp *, u64 scratch);
+ enum i40iw_status_code (*cqp_destroy)(struct i40iw_sc_cqp *);
+ enum i40iw_status_code (*poll_for_cqp_op_done)(struct i40iw_sc_cqp *, u8,
+ struct i40iw_ccq_cqe_info *);
+};
+
+struct i40iw_ccq_ops {
+ enum i40iw_status_code (*ccq_init)(struct i40iw_sc_cq *,
+ struct i40iw_ccq_init_info *);
+ enum i40iw_status_code (*ccq_create)(struct i40iw_sc_cq *, u64, bool, bool);
+ enum i40iw_status_code (*ccq_destroy)(struct i40iw_sc_cq *, u64, bool);
+ enum i40iw_status_code (*ccq_create_done)(struct i40iw_sc_cq *);
+ enum i40iw_status_code (*ccq_get_cqe_info)(struct i40iw_sc_cq *,
+ struct i40iw_ccq_cqe_info *);
+ void (*ccq_arm)(struct i40iw_sc_cq *);
+};
+
+struct i40iw_ceq_ops {
+ enum i40iw_status_code (*ceq_init)(struct i40iw_sc_ceq *,
+ struct i40iw_ceq_init_info *);
+ enum i40iw_status_code (*ceq_create)(struct i40iw_sc_ceq *, u64, bool);
+ enum i40iw_status_code (*cceq_create_done)(struct i40iw_sc_ceq *);
+ enum i40iw_status_code (*cceq_destroy_done)(struct i40iw_sc_ceq *);
+ enum i40iw_status_code (*cceq_create)(struct i40iw_sc_ceq *, u64);
+ enum i40iw_status_code (*ceq_destroy)(struct i40iw_sc_ceq *, u64, bool);
+ void *(*process_ceq)(struct i40iw_sc_dev *, struct i40iw_sc_ceq *);
+};
+
+struct i40iw_aeq_ops {
+ enum i40iw_status_code (*aeq_init)(struct i40iw_sc_aeq *,
+ struct i40iw_aeq_init_info *);
+ enum i40iw_status_code (*aeq_create)(struct i40iw_sc_aeq *, u64, bool);
+ enum i40iw_status_code (*aeq_destroy)(struct i40iw_sc_aeq *, u64, bool);
+ enum i40iw_status_code (*get_next_aeqe)(struct i40iw_sc_aeq *,
+ struct i40iw_aeqe_info *);
+ enum i40iw_status_code (*repost_aeq_entries)(struct i40iw_sc_dev *, u32);
+ enum i40iw_status_code (*aeq_create_done)(struct i40iw_sc_aeq *);
+ enum i40iw_status_code (*aeq_destroy_done)(struct i40iw_sc_aeq *);
+};
+
+struct i40iw_pd_ops {
+ void (*pd_init)(struct i40iw_sc_dev *, struct i40iw_sc_pd *, u16);
+};
+
+struct i40iw_priv_qp_ops {
+ enum i40iw_status_code (*qp_init)(struct i40iw_sc_qp *, struct i40iw_qp_init_info *);
+ enum i40iw_status_code (*qp_create)(struct i40iw_sc_qp *,
+ struct i40iw_create_qp_info *, u64, bool);
+ enum i40iw_status_code (*qp_modify)(struct i40iw_sc_qp *,
+ struct i40iw_modify_qp_info *, u64, bool);
+ enum i40iw_status_code (*qp_destroy)(struct i40iw_sc_qp *, u64, bool, bool, bool);
+ enum i40iw_status_code (*qp_flush_wqes)(struct i40iw_sc_qp *,
+ struct i40iw_qp_flush_info *, u64, bool);
+ enum i40iw_status_code (*qp_upload_context)(struct i40iw_sc_dev *,
+ struct i40iw_upload_context_info *,
+ u64, bool);
+ enum i40iw_status_code (*qp_setctx)(struct i40iw_sc_qp *, u64 *,
+ struct i40iw_qp_host_ctx_info *);
+
+ void (*qp_send_lsmm)(struct i40iw_sc_qp *, void *, u32, i40iw_stag);
+ void (*qp_send_lsmm_nostag)(struct i40iw_sc_qp *, void *, u32);
+ void (*qp_send_rtt)(struct i40iw_sc_qp *, bool);
+ enum i40iw_status_code (*qp_post_wqe0)(struct i40iw_sc_qp *, u8);
+};
+
+struct i40iw_priv_cq_ops {
+ enum i40iw_status_code (*cq_init)(struct i40iw_sc_cq *, struct i40iw_cq_init_info *);
+ enum i40iw_status_code (*cq_create)(struct i40iw_sc_cq *, u64, bool, bool);
+ enum i40iw_status_code (*cq_destroy)(struct i40iw_sc_cq *, u64, bool);
+ enum i40iw_status_code (*cq_modify)(struct i40iw_sc_cq *,
+ struct i40iw_modify_cq_info *, u64, bool);
+};
+
+struct i40iw_mr_ops {
+ enum i40iw_status_code (*alloc_stag)(struct i40iw_sc_dev *,
+ struct i40iw_allocate_stag_info *, u64, bool);
+ enum i40iw_status_code (*mr_reg_non_shared)(struct i40iw_sc_dev *,
+ struct i40iw_reg_ns_stag_info *,
+ u64, bool);
+ enum i40iw_status_code (*mr_reg_shared)(struct i40iw_sc_dev *,
+ struct i40iw_register_shared_stag *,
+ u64, bool);
+ enum i40iw_status_code (*dealloc_stag)(struct i40iw_sc_dev *,
+ struct i40iw_dealloc_stag_info *,
+ u64, bool);
+ enum i40iw_status_code (*query_stag)(struct i40iw_sc_dev *, u64, u32, bool);
+ enum i40iw_status_code (*mw_alloc)(struct i40iw_sc_dev *, u64, u32, u16, bool);
+};
+
+struct i40iw_cqp_misc_ops {
+ enum i40iw_status_code (*manage_push_page)(struct i40iw_sc_cqp *,
+ struct i40iw_cqp_manage_push_page_info *,
+ u64, bool);
+ enum i40iw_status_code (*manage_hmc_pm_func_table)(struct i40iw_sc_cqp *,
+ u64, u8, bool, bool);
+ enum i40iw_status_code (*set_hmc_resource_profile)(struct i40iw_sc_cqp *,
+ u64, u8, u8, bool, bool);
+ enum i40iw_status_code (*commit_fpm_values)(struct i40iw_sc_cqp *, u64, u8,
+ struct i40iw_dma_mem *, bool, u8);
+ enum i40iw_status_code (*query_fpm_values)(struct i40iw_sc_cqp *, u64, u8,
+ struct i40iw_dma_mem *, bool, u8);
+ enum i40iw_status_code (*static_hmc_pages_allocated)(struct i40iw_sc_cqp *,
+ u64, u8, bool, bool);
+ enum i40iw_status_code (*add_arp_cache_entry)(struct i40iw_sc_cqp *,
+ struct i40iw_add_arp_cache_entry_info *,
+ u64, bool);
+ enum i40iw_status_code (*del_arp_cache_entry)(struct i40iw_sc_cqp *, u64, u16, bool);
+ enum i40iw_status_code (*query_arp_cache_entry)(struct i40iw_sc_cqp *, u64, u16, bool);
+ enum i40iw_status_code (*manage_apbvt_entry)(struct i40iw_sc_cqp *,
+ struct i40iw_apbvt_info *, u64, bool);
+ enum i40iw_status_code (*manage_qhash_table_entry)(struct i40iw_sc_cqp *,
+ struct i40iw_qhash_table_info *, u64, bool);
+ enum i40iw_status_code (*alloc_local_mac_ipaddr_table_entry)(struct i40iw_sc_cqp *, u64, bool);
+ enum i40iw_status_code (*add_local_mac_ipaddr_entry)(struct i40iw_sc_cqp *,
+ struct i40iw_local_mac_ipaddr_entry_info *,
+ u64, bool);
+ enum i40iw_status_code (*del_local_mac_ipaddr_entry)(struct i40iw_sc_cqp *, u64, u8, u8, bool);
+ enum i40iw_status_code (*cqp_nop)(struct i40iw_sc_cqp *, u64, bool);
+ enum i40iw_status_code (*commit_fpm_values_done)(struct i40iw_sc_cqp
+ *);
+ enum i40iw_status_code (*query_fpm_values_done)(struct i40iw_sc_cqp *);
+ enum i40iw_status_code (*manage_hmc_pm_func_table_done)(struct i40iw_sc_cqp *);
+ enum i40iw_status_code (*update_suspend_qp)(struct i40iw_sc_cqp *, struct i40iw_sc_qp *, u64);
+ enum i40iw_status_code (*update_resume_qp)(struct i40iw_sc_cqp *, struct i40iw_sc_qp *, u64);
+};
+
+struct i40iw_hmc_ops {
+ enum i40iw_status_code (*init_iw_hmc)(struct i40iw_sc_dev *, u8);
+ enum i40iw_status_code (*parse_fpm_query_buf)(u64 *, struct i40iw_hmc_info *,
+ struct i40iw_hmc_fpm_misc *);
+ enum i40iw_status_code (*configure_iw_fpm)(struct i40iw_sc_dev *, u8);
+ enum i40iw_status_code (*parse_fpm_commit_buf)(u64 *, struct i40iw_hmc_obj_info *);
+ enum i40iw_status_code (*create_hmc_object)(struct i40iw_sc_dev *dev,
+ struct i40iw_hmc_create_obj_info *);
+ enum i40iw_status_code (*del_hmc_object)(struct i40iw_sc_dev *dev,
+ struct i40iw_hmc_del_obj_info *,
+ bool reset);
+ enum i40iw_status_code (*pf_init_vfhmc)(struct i40iw_sc_dev *, u8, u32 *);
+ enum i40iw_status_code (*vf_configure_vffpm)(struct i40iw_sc_dev *, u32 *);
+};
+
+struct cqp_info {
+ union {
+ struct {
+ struct i40iw_sc_qp *qp;
+ struct i40iw_create_qp_info info;
+ u64 scratch;
+ } qp_create;
+
+ struct {
+ struct i40iw_sc_qp *qp;
+ struct i40iw_modify_qp_info info;
+ u64 scratch;
+ } qp_modify;
+
+ struct {
+ struct i40iw_sc_qp *qp;
+ u64 scratch;
+ bool remove_hash_idx;
+ bool ignore_mw_bnd;
+ } qp_destroy;
+
+ struct {
+ struct i40iw_sc_cq *cq;
+ u64 scratch;
+ bool check_overflow;
+ } cq_create;
+
+ struct {
+ struct i40iw_sc_cq *cq;
+ u64 scratch;
+ } cq_destroy;
+
+ struct {
+ struct i40iw_sc_dev *dev;
+ struct i40iw_allocate_stag_info info;
+ u64 scratch;
+ } alloc_stag;
+
+ struct {
+ struct i40iw_sc_dev *dev;
+ u64 scratch;
+ u32 mw_stag_index;
+ u16 pd_id;
+ } mw_alloc;
+
+ struct {
+ struct i40iw_sc_dev *dev;
+ struct i40iw_reg_ns_stag_info info;
+ u64 scratch;
+ } mr_reg_non_shared;
+
+ struct {
+ struct i40iw_sc_dev *dev;
+ struct i40iw_dealloc_stag_info info;
+ u64 scratch;
+ } dealloc_stag;
+
+ struct {
+ struct i40iw_sc_cqp *cqp;
+ struct i40iw_local_mac_ipaddr_entry_info info;
+ u64 scratch;
+ } add_local_mac_ipaddr_entry;
+
+ struct {
+ struct i40iw_sc_cqp *cqp;
+ struct i40iw_add_arp_cache_entry_info info;
+ u64 scratch;
+ } add_arp_cache_entry;
+
+ struct {
+ struct i40iw_sc_cqp *cqp;
+ u64 scratch;
+ u8 entry_idx;
+ u8 ignore_ref_count;
+ } del_local_mac_ipaddr_entry;
+
+ struct {
+ struct i40iw_sc_cqp *cqp;
+ u64 scratch;
+ u16 arp_index;
+ } del_arp_cache_entry;
+
+ struct {
+ struct i40iw_sc_cqp *cqp;
+ struct i40iw_manage_vf_pble_info info;
+ u64 scratch;
+ } manage_vf_pble_bp;
+
+ struct {
+ struct i40iw_sc_cqp *cqp;
+ struct i40iw_cqp_manage_push_page_info info;
+ u64 scratch;
+ } manage_push_page;
+
+ struct {
+ struct i40iw_sc_dev *dev;
+ struct i40iw_upload_context_info info;
+ u64 scratch;
+ } qp_upload_context;
+
+ struct {
+ struct i40iw_sc_cqp *cqp;
+ u64 scratch;
+ } alloc_local_mac_ipaddr_entry;
+
+ struct {
+ struct i40iw_sc_dev *dev;
+ struct i40iw_hmc_fcn_info info;
+ u64 scratch;
+ } manage_hmc_pm;
+
+ struct {
+ struct i40iw_sc_ceq *ceq;
+ u64 scratch;
+ } ceq_create;
+
+ struct {
+ struct i40iw_sc_ceq *ceq;
+ u64 scratch;
+ } ceq_destroy;
+
+ struct {
+ struct i40iw_sc_aeq *aeq;
+ u64 scratch;
+ } aeq_create;
+
+ struct {
+ struct i40iw_sc_aeq *aeq;
+ u64 scratch;
+ } aeq_destroy;
+
+ struct {
+ struct i40iw_sc_qp *qp;
+ struct i40iw_qp_flush_info info;
+ u64 scratch;
+ } qp_flush_wqes;
+
+ struct {
+ struct i40iw_sc_cqp *cqp;
+ void *fpm_values_va;
+ u64 fpm_values_pa;
+ u8 hmc_fn_id;
+ u64 scratch;
+ } query_fpm_values;
+
+ struct {
+ struct i40iw_sc_cqp *cqp;
+ void *fpm_values_va;
+ u64 fpm_values_pa;
+ u8 hmc_fn_id;
+ u64 scratch;
+ } commit_fpm_values;
+
+ struct {
+ struct i40iw_sc_cqp *cqp;
+ struct i40iw_apbvt_info info;
+ u64 scratch;
+ } manage_apbvt_entry;
+
+ struct {
+ struct i40iw_sc_cqp *cqp;
+ struct i40iw_qhash_table_info info;
+ u64 scratch;
+ } manage_qhash_table_entry;
+
+ struct {
+ struct i40iw_sc_dev *dev;
+ struct i40iw_update_sds_info info;
+ u64 scratch;
+ } update_pe_sds;
+
+ struct {
+ struct i40iw_sc_cqp *cqp;
+ struct i40iw_sc_qp *qp;
+ u64 scratch;
+ } suspend_resume;
+ } u;
+};
+
+struct cqp_commands_info {
+ struct list_head cqp_cmd_entry;
+ u8 cqp_cmd;
+ u8 post_sq;
+ struct cqp_info in;
+};
+
+struct i40iw_virtchnl_work_info {
+ void (*callback_fcn)(void *vf_dev);
+ void *worker_vf_dev;
+};
+
+#endif
diff --git a/drivers/infiniband/hw/i40iw/i40iw_ucontext.h b/drivers/infiniband/hw/i40iw/i40iw_ucontext.h
new file mode 100644
index 000000000000..12acd688def4
--- /dev/null
+++ b/drivers/infiniband/hw/i40iw/i40iw_ucontext.h
@@ -0,0 +1,107 @@
+/*
+ * Copyright (c) 2006 - 2016 Intel Corporation. All rights reserved.
+ * Copyright (c) 2005 Topspin Communications. All rights reserved.
+ * Copyright (c) 2005 Cisco Systems. All rights reserved.
+ * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef I40IW_USER_CONTEXT_H
+#define I40IW_USER_CONTEXT_H
+
+#include <linux/types.h>
+
+#define I40IW_ABI_USERSPACE_VER 4
+#define I40IW_ABI_KERNEL_VER 4
+struct i40iw_alloc_ucontext_req {
+ __u32 reserved32;
+ __u8 userspace_ver;
+ __u8 reserved8[3];
+};
+
+struct i40iw_alloc_ucontext_resp {
+ __u32 max_pds; /* maximum pds allowed for this user process */
+ __u32 max_qps; /* maximum qps allowed for this user process */
+ __u32 wq_size; /* size of the WQs (sq+rq) allocated to the mmaped area */
+ __u8 kernel_ver;
+ __u8 reserved[3];
+};
+
+struct i40iw_alloc_pd_resp {
+ __u32 pd_id;
+ __u8 reserved[4];
+};
+
+struct i40iw_create_cq_req {
+ __u64 user_cq_buffer;
+ __u64 user_shadow_area;
+};
+
+struct i40iw_create_qp_req {
+ __u64 user_wqe_buffers;
+ __u64 user_compl_ctx;
+
+ /* UDA QP PHB */
+ __u64 user_sq_phb; /* place for VA of the sq phb buff */
+ __u64 user_rq_phb; /* place for VA of the rq phb buff */
+};
+
+enum i40iw_memreg_type {
+ IW_MEMREG_TYPE_MEM = 0x0000,
+ IW_MEMREG_TYPE_QP = 0x0001,
+ IW_MEMREG_TYPE_CQ = 0x0002,
+};
+
+struct i40iw_mem_reg_req {
+ __u16 reg_type; /* Memory, QP or CQ */
+ __u16 cq_pages;
+ __u16 rq_pages;
+ __u16 sq_pages;
+};
+
+struct i40iw_create_cq_resp {
+ __u32 cq_id;
+ __u32 cq_size;
+ __u32 mmap_db_index;
+ __u32 reserved;
+};
+
+struct i40iw_create_qp_resp {
+ __u32 qp_id;
+ __u32 actual_sq_size;
+ __u32 actual_rq_size;
+ __u32 i40iw_drv_opt;
+ __u16 push_idx;
+ __u8 lsmm;
+ __u8 rsvd2;
+};
+
+#endif
diff --git a/drivers/infiniband/hw/i40iw/i40iw_uk.c b/drivers/infiniband/hw/i40iw/i40iw_uk.c
new file mode 100644
index 000000000000..f78c3dc8bdb2
--- /dev/null
+++ b/drivers/infiniband/hw/i40iw/i40iw_uk.c
@@ -0,0 +1,1204 @@
+/*******************************************************************************
+*
+* Copyright (c) 2015-2016 Intel Corporation. All rights reserved.
+*
+* This software is available to you under a choice of one of two
+* licenses. You may choose to be licensed under the terms of the GNU
+* General Public License (GPL) Version 2, available from the file
+* COPYING in the main directory of this source tree, or the
+* OpenFabrics.org BSD license below:
+*
+* Redistribution and use in source and binary forms, with or
+* without modification, are permitted provided that the following
+* conditions are met:
+*
+* - Redistributions of source code must retain the above
+* copyright notice, this list of conditions and the following
+* disclaimer.
+*
+* - Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials
+* provided with the distribution.
+*
+* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+* SOFTWARE.
+*
+*******************************************************************************/
+
+#include "i40iw_osdep.h"
+#include "i40iw_status.h"
+#include "i40iw_d.h"
+#include "i40iw_user.h"
+#include "i40iw_register.h"
+
+static u32 nop_signature = 0x55550000;
+
+/**
+ * i40iw_nop_1 - insert a nop wqe and move head. no post work
+ * @qp: hw qp ptr
+ */
+static enum i40iw_status_code i40iw_nop_1(struct i40iw_qp_uk *qp)
+{
+ u64 header, *wqe;
+ u64 *wqe_0 = NULL;
+ u32 wqe_idx, peek_head;
+ bool signaled = false;
+
+ if (!qp->sq_ring.head)
+ return I40IW_ERR_PARAM;
+
+ wqe_idx = I40IW_RING_GETCURRENT_HEAD(qp->sq_ring);
+ wqe = qp->sq_base[wqe_idx].elem;
+ peek_head = (qp->sq_ring.head + 1) % qp->sq_ring.size;
+ wqe_0 = qp->sq_base[peek_head].elem;
+ if (peek_head)
+ wqe_0[3] = LS_64(!qp->swqe_polarity, I40IWQPSQ_VALID);
+ else
+ wqe_0[3] = LS_64(qp->swqe_polarity, I40IWQPSQ_VALID);
+
+ set_64bit_val(wqe, 0, 0);
+ set_64bit_val(wqe, 8, 0);
+ set_64bit_val(wqe, 16, 0);
+
+ header = LS_64(I40IWQP_OP_NOP, I40IWQPSQ_OPCODE) |
+ LS_64(signaled, I40IWQPSQ_SIGCOMPL) |
+ LS_64(qp->swqe_polarity, I40IWQPSQ_VALID) | nop_signature++;
+
+ wmb(); /* Memory barrier to ensure data is written before valid bit is set */
+
+ set_64bit_val(wqe, 24, header);
+ return 0;
+}
+
+/**
+ * i40iw_qp_post_wr - post wr to hrdware
+ * @qp: hw qp ptr
+ */
+void i40iw_qp_post_wr(struct i40iw_qp_uk *qp)
+{
+ u64 temp;
+ u32 hw_sq_tail;
+ u32 sw_sq_head;
+
+ mb(); /* valid bit is written and loads completed before reading shadow */
+
+ /* read the doorbell shadow area */
+ get_64bit_val(qp->shadow_area, 0, &temp);
+
+ hw_sq_tail = (u32)RS_64(temp, I40IW_QP_DBSA_HW_SQ_TAIL);
+ sw_sq_head = I40IW_RING_GETCURRENT_HEAD(qp->sq_ring);
+ if (sw_sq_head != hw_sq_tail) {
+ if (sw_sq_head > qp->initial_ring.head) {
+ if ((hw_sq_tail >= qp->initial_ring.head) &&
+ (hw_sq_tail < sw_sq_head)) {
+ writel(qp->qp_id, qp->wqe_alloc_reg);
+ }
+ } else if (sw_sq_head != qp->initial_ring.head) {
+ if ((hw_sq_tail >= qp->initial_ring.head) ||
+ (hw_sq_tail < sw_sq_head)) {
+ writel(qp->qp_id, qp->wqe_alloc_reg);
+ }
+ }
+ }
+
+ qp->initial_ring.head = qp->sq_ring.head;
+}
+
+/**
+ * i40iw_qp_ring_push_db - ring qp doorbell
+ * @qp: hw qp ptr
+ * @wqe_idx: wqe index
+ */
+static void i40iw_qp_ring_push_db(struct i40iw_qp_uk *qp, u32 wqe_idx)
+{
+ set_32bit_val(qp->push_db, 0, LS_32((wqe_idx >> 2), I40E_PFPE_WQEALLOC_WQE_DESC_INDEX) | qp->qp_id);
+ qp->initial_ring.head = I40IW_RING_GETCURRENT_HEAD(qp->sq_ring);
+}
+
+/**
+ * i40iw_qp_get_next_send_wqe - return next wqe ptr
+ * @qp: hw qp ptr
+ * @wqe_idx: return wqe index
+ * @wqe_size: size of sq wqe
+ */
+u64 *i40iw_qp_get_next_send_wqe(struct i40iw_qp_uk *qp,
+ u32 *wqe_idx,
+ u8 wqe_size)
+{
+ u64 *wqe = NULL;
+ u64 wqe_ptr;
+ u32 peek_head = 0;
+ u16 offset;
+ enum i40iw_status_code ret_code = 0;
+ u8 nop_wqe_cnt = 0, i;
+ u64 *wqe_0 = NULL;
+
+ *wqe_idx = I40IW_RING_GETCURRENT_HEAD(qp->sq_ring);
+
+ if (!*wqe_idx)
+ qp->swqe_polarity = !qp->swqe_polarity;
+ wqe_ptr = (uintptr_t)qp->sq_base[*wqe_idx].elem;
+ offset = (u16)(wqe_ptr) & 0x7F;
+ if ((offset + wqe_size) > I40IW_QP_WQE_MAX_SIZE) {
+ nop_wqe_cnt = (u8)(I40IW_QP_WQE_MAX_SIZE - offset) / I40IW_QP_WQE_MIN_SIZE;
+ for (i = 0; i < nop_wqe_cnt; i++) {
+ i40iw_nop_1(qp);
+ I40IW_RING_MOVE_HEAD(qp->sq_ring, ret_code);
+ if (ret_code)
+ return NULL;
+ }
+
+ *wqe_idx = I40IW_RING_GETCURRENT_HEAD(qp->sq_ring);
+ if (!*wqe_idx)
+ qp->swqe_polarity = !qp->swqe_polarity;
+ }
+ for (i = 0; i < wqe_size / I40IW_QP_WQE_MIN_SIZE; i++) {
+ I40IW_RING_MOVE_HEAD(qp->sq_ring, ret_code);
+ if (ret_code)
+ return NULL;
+ }
+
+ wqe = qp->sq_base[*wqe_idx].elem;
+
+ peek_head = I40IW_RING_GETCURRENT_HEAD(qp->sq_ring);
+ wqe_0 = qp->sq_base[peek_head].elem;
+ if (peek_head & 0x3)
+ wqe_0[3] = LS_64(!qp->swqe_polarity, I40IWQPSQ_VALID);
+ return wqe;
+}
+
+/**
+ * i40iw_set_fragment - set fragment in wqe
+ * @wqe: wqe for setting fragment
+ * @offset: offset value
+ * @sge: sge length and stag
+ */
+static void i40iw_set_fragment(u64 *wqe, u32 offset, struct i40iw_sge *sge)
+{
+ if (sge) {
+ set_64bit_val(wqe, offset, LS_64(sge->tag_off, I40IWQPSQ_FRAG_TO));
+ set_64bit_val(wqe, (offset + 8),
+ (LS_64(sge->len, I40IWQPSQ_FRAG_LEN) |
+ LS_64(sge->stag, I40IWQPSQ_FRAG_STAG)));
+ }
+}
+
+/**
+ * i40iw_qp_get_next_recv_wqe - get next qp's rcv wqe
+ * @qp: hw qp ptr
+ * @wqe_idx: return wqe index
+ */
+u64 *i40iw_qp_get_next_recv_wqe(struct i40iw_qp_uk *qp, u32 *wqe_idx)
+{
+ u64 *wqe = NULL;
+ enum i40iw_status_code ret_code;
+
+ if (I40IW_RING_FULL_ERR(qp->rq_ring))
+ return NULL;
+
+ I40IW_ATOMIC_RING_MOVE_HEAD(qp->rq_ring, *wqe_idx, ret_code);
+ if (ret_code)
+ return NULL;
+ if (!*wqe_idx)
+ qp->rwqe_polarity = !qp->rwqe_polarity;
+ /* rq_wqe_size_multiplier is no of qwords in one rq wqe */
+ wqe = qp->rq_base[*wqe_idx * (qp->rq_wqe_size_multiplier >> 2)].elem;
+
+ return wqe;
+}
+
+/**
+ * i40iw_rdma_write - rdma write operation
+ * @qp: hw qp ptr
+ * @info: post sq information
+ * @post_sq: flag to post sq
+ */
+static enum i40iw_status_code i40iw_rdma_write(struct i40iw_qp_uk *qp,
+ struct i40iw_post_sq_info *info,
+ bool post_sq)
+{
+ u64 header;
+ u64 *wqe;
+ struct i40iw_rdma_write *op_info;
+ u32 i, wqe_idx;
+ u32 total_size = 0, byte_off;
+ enum i40iw_status_code ret_code;
+ bool read_fence = false;
+ u8 wqe_size;
+
+ op_info = &info->op.rdma_write;
+ if (op_info->num_lo_sges > qp->max_sq_frag_cnt)
+ return I40IW_ERR_INVALID_FRAG_COUNT;
+
+ for (i = 0; i < op_info->num_lo_sges; i++)
+ total_size += op_info->lo_sg_list[i].len;
+
+ if (total_size > I40IW_MAX_OUTBOUND_MESSAGE_SIZE)
+ return I40IW_ERR_QP_INVALID_MSG_SIZE;
+
+ read_fence |= info->read_fence;
+
+ ret_code = i40iw_fragcnt_to_wqesize_sq(op_info->num_lo_sges, &wqe_size);
+ if (ret_code)
+ return ret_code;
+
+ wqe = i40iw_qp_get_next_send_wqe(qp, &wqe_idx, wqe_size);
+ if (!wqe)
+ return I40IW_ERR_QP_TOOMANY_WRS_POSTED;
+
+ qp->sq_wrtrk_array[wqe_idx].wrid = info->wr_id;
+ qp->sq_wrtrk_array[wqe_idx].wr_len = total_size;
+ set_64bit_val(wqe, 16,
+ LS_64(op_info->rem_addr.tag_off, I40IWQPSQ_FRAG_TO));
+ if (!op_info->rem_addr.stag)
+ return I40IW_ERR_BAD_STAG;
+
+ header = LS_64(op_info->rem_addr.stag, I40IWQPSQ_REMSTAG) |
+ LS_64(I40IWQP_OP_RDMA_WRITE, I40IWQPSQ_OPCODE) |
+ LS_64((op_info->num_lo_sges > 1 ? (op_info->num_lo_sges - 1) : 0), I40IWQPSQ_ADDFRAGCNT) |
+ LS_64(read_fence, I40IWQPSQ_READFENCE) |
+ LS_64(info->local_fence, I40IWQPSQ_LOCALFENCE) |
+ LS_64(info->signaled, I40IWQPSQ_SIGCOMPL) |
+ LS_64(qp->swqe_polarity, I40IWQPSQ_VALID);
+
+ i40iw_set_fragment(wqe, 0, op_info->lo_sg_list);
+
+ for (i = 1; i < op_info->num_lo_sges; i++) {
+ byte_off = 32 + (i - 1) * 16;
+ i40iw_set_fragment(wqe, byte_off, &op_info->lo_sg_list[i]);
+ }
+
+ wmb(); /* make sure WQE is populated before valid bit is set */
+
+ set_64bit_val(wqe, 24, header);
+
+ if (post_sq)
+ i40iw_qp_post_wr(qp);
+
+ return 0;
+}
+
+/**
+ * i40iw_rdma_read - rdma read command
+ * @qp: hw qp ptr
+ * @info: post sq information
+ * @inv_stag: flag for inv_stag
+ * @post_sq: flag to post sq
+ */
+static enum i40iw_status_code i40iw_rdma_read(struct i40iw_qp_uk *qp,
+ struct i40iw_post_sq_info *info,
+ bool inv_stag,
+ bool post_sq)
+{
+ u64 *wqe;
+ struct i40iw_rdma_read *op_info;
+ u64 header;
+ u32 wqe_idx;
+ enum i40iw_status_code ret_code;
+ u8 wqe_size;
+ bool local_fence = false;
+
+ op_info = &info->op.rdma_read;
+ ret_code = i40iw_fragcnt_to_wqesize_sq(1, &wqe_size);
+ if (ret_code)
+ return ret_code;
+ wqe = i40iw_qp_get_next_send_wqe(qp, &wqe_idx, wqe_size);
+ if (!wqe)
+ return I40IW_ERR_QP_TOOMANY_WRS_POSTED;
+
+ qp->sq_wrtrk_array[wqe_idx].wrid = info->wr_id;
+ qp->sq_wrtrk_array[wqe_idx].wr_len = op_info->lo_addr.len;
+ local_fence |= info->local_fence;
+
+ set_64bit_val(wqe, 16, LS_64(op_info->rem_addr.tag_off, I40IWQPSQ_FRAG_TO));
+ header = LS_64(op_info->rem_addr.stag, I40IWQPSQ_REMSTAG) |
+ LS_64((inv_stag ? I40IWQP_OP_RDMA_READ_LOC_INV : I40IWQP_OP_RDMA_READ), I40IWQPSQ_OPCODE) |
+ LS_64(info->read_fence, I40IWQPSQ_READFENCE) |
+ LS_64(local_fence, I40IWQPSQ_LOCALFENCE) |
+ LS_64(info->signaled, I40IWQPSQ_SIGCOMPL) |
+ LS_64(qp->swqe_polarity, I40IWQPSQ_VALID);
+
+ i40iw_set_fragment(wqe, 0, &op_info->lo_addr);
+
+ wmb(); /* make sure WQE is populated before valid bit is set */
+
+ set_64bit_val(wqe, 24, header);
+ if (post_sq)
+ i40iw_qp_post_wr(qp);
+
+ return 0;
+}
+
+/**
+ * i40iw_send - rdma send command
+ * @qp: hw qp ptr
+ * @info: post sq information
+ * @stag_to_inv: stag_to_inv value
+ * @post_sq: flag to post sq
+ */
+static enum i40iw_status_code i40iw_send(struct i40iw_qp_uk *qp,
+ struct i40iw_post_sq_info *info,
+ u32 stag_to_inv,
+ bool post_sq)
+{
+ u64 *wqe;
+ struct i40iw_post_send *op_info;
+ u64 header;
+ u32 i, wqe_idx, total_size = 0, byte_off;
+ enum i40iw_status_code ret_code;
+ bool read_fence = false;
+ u8 wqe_size;
+
+ op_info = &info->op.send;
+ if (qp->max_sq_frag_cnt < op_info->num_sges)
+ return I40IW_ERR_INVALID_FRAG_COUNT;
+
+ for (i = 0; i < op_info->num_sges; i++)
+ total_size += op_info->sg_list[i].len;
+ ret_code = i40iw_fragcnt_to_wqesize_sq(op_info->num_sges, &wqe_size);
+ if (ret_code)
+ return ret_code;
+
+ wqe = i40iw_qp_get_next_send_wqe(qp, &wqe_idx, wqe_size);
+ if (!wqe)
+ return I40IW_ERR_QP_TOOMANY_WRS_POSTED;
+
+ read_fence |= info->read_fence;
+ qp->sq_wrtrk_array[wqe_idx].wrid = info->wr_id;
+ qp->sq_wrtrk_array[wqe_idx].wr_len = total_size;
+ set_64bit_val(wqe, 16, 0);
+ header = LS_64(stag_to_inv, I40IWQPSQ_REMSTAG) |
+ LS_64(info->op_type, I40IWQPSQ_OPCODE) |
+ LS_64((op_info->num_sges > 1 ? (op_info->num_sges - 1) : 0),
+ I40IWQPSQ_ADDFRAGCNT) |
+ LS_64(read_fence, I40IWQPSQ_READFENCE) |
+ LS_64(info->local_fence, I40IWQPSQ_LOCALFENCE) |
+ LS_64(info->signaled, I40IWQPSQ_SIGCOMPL) |
+ LS_64(qp->swqe_polarity, I40IWQPSQ_VALID);
+
+ i40iw_set_fragment(wqe, 0, op_info->sg_list);
+
+ for (i = 1; i < op_info->num_sges; i++) {
+ byte_off = 32 + (i - 1) * 16;
+ i40iw_set_fragment(wqe, byte_off, &op_info->sg_list[i]);
+ }
+
+ wmb(); /* make sure WQE is populated before valid bit is set */
+
+ set_64bit_val(wqe, 24, header);
+ if (post_sq)
+ i40iw_qp_post_wr(qp);
+
+ return 0;
+}
+
+/**
+ * i40iw_inline_rdma_write - inline rdma write operation
+ * @qp: hw qp ptr
+ * @info: post sq information
+ * @post_sq: flag to post sq
+ */
+static enum i40iw_status_code i40iw_inline_rdma_write(struct i40iw_qp_uk *qp,
+ struct i40iw_post_sq_info *info,
+ bool post_sq)
+{
+ u64 *wqe;
+ u8 *dest, *src;
+ struct i40iw_inline_rdma_write *op_info;
+ u64 *push;
+ u64 header = 0;
+ u32 i, wqe_idx;
+ enum i40iw_status_code ret_code;
+ bool read_fence = false;
+ u8 wqe_size;
+
+ op_info = &info->op.inline_rdma_write;
+ if (op_info->len > I40IW_MAX_INLINE_DATA_SIZE)
+ return I40IW_ERR_INVALID_IMM_DATA_SIZE;
+
+ ret_code = i40iw_inline_data_size_to_wqesize(op_info->len, &wqe_size);
+ if (ret_code)
+ return ret_code;
+
+ wqe = i40iw_qp_get_next_send_wqe(qp, &wqe_idx, wqe_size);
+ if (!wqe)
+ return I40IW_ERR_QP_TOOMANY_WRS_POSTED;
+
+ read_fence |= info->read_fence;
+ qp->sq_wrtrk_array[wqe_idx].wrid = info->wr_id;
+ qp->sq_wrtrk_array[wqe_idx].wr_len = op_info->len;
+ set_64bit_val(wqe, 16,
+ LS_64(op_info->rem_addr.tag_off, I40IWQPSQ_FRAG_TO));
+
+ header = LS_64(op_info->rem_addr.stag, I40IWQPSQ_REMSTAG) |
+ LS_64(I40IWQP_OP_RDMA_WRITE, I40IWQPSQ_OPCODE) |
+ LS_64(op_info->len, I40IWQPSQ_INLINEDATALEN) |
+ LS_64(1, I40IWQPSQ_INLINEDATAFLAG) |
+ LS_64((qp->push_db ? 1 : 0), I40IWQPSQ_PUSHWQE) |
+ LS_64(read_fence, I40IWQPSQ_READFENCE) |
+ LS_64(info->local_fence, I40IWQPSQ_LOCALFENCE) |
+ LS_64(info->signaled, I40IWQPSQ_SIGCOMPL) |
+ LS_64(qp->swqe_polarity, I40IWQPSQ_VALID);
+
+ dest = (u8 *)wqe;
+ src = (u8 *)(op_info->data);
+
+ if (op_info->len <= 16) {
+ for (i = 0; i < op_info->len; i++, src++, dest++)
+ *dest = *src;
+ } else {
+ for (i = 0; i < 16; i++, src++, dest++)
+ *dest = *src;
+ dest = (u8 *)wqe + 32;
+ for (; i < op_info->len; i++, src++, dest++)
+ *dest = *src;
+ }
+
+ wmb(); /* make sure WQE is populated before valid bit is set */
+
+ set_64bit_val(wqe, 24, header);
+
+ if (qp->push_db) {
+ push = (u64 *)((uintptr_t)qp->push_wqe + (wqe_idx & 0x3) * 0x20);
+ memcpy(push, wqe, (op_info->len > 16) ? op_info->len + 16 : 32);
+ i40iw_qp_ring_push_db(qp, wqe_idx);
+ } else {
+ if (post_sq)
+ i40iw_qp_post_wr(qp);
+ }
+
+ return 0;
+}
+
+/**
+ * i40iw_inline_send - inline send operation
+ * @qp: hw qp ptr
+ * @info: post sq information
+ * @stag_to_inv: remote stag
+ * @post_sq: flag to post sq
+ */
+static enum i40iw_status_code i40iw_inline_send(struct i40iw_qp_uk *qp,
+ struct i40iw_post_sq_info *info,
+ u32 stag_to_inv,
+ bool post_sq)
+{
+ u64 *wqe;
+ u8 *dest, *src;
+ struct i40iw_post_inline_send *op_info;
+ u64 header;
+ u32 wqe_idx, i;
+ enum i40iw_status_code ret_code;
+ bool read_fence = false;
+ u8 wqe_size;
+ u64 *push;
+
+ op_info = &info->op.inline_send;
+ if (op_info->len > I40IW_MAX_INLINE_DATA_SIZE)
+ return I40IW_ERR_INVALID_IMM_DATA_SIZE;
+
+ ret_code = i40iw_inline_data_size_to_wqesize(op_info->len, &wqe_size);
+ if (ret_code)
+ return ret_code;
+
+ wqe = i40iw_qp_get_next_send_wqe(qp, &wqe_idx, wqe_size);
+ if (!wqe)
+ return I40IW_ERR_QP_TOOMANY_WRS_POSTED;
+
+ read_fence |= info->read_fence;
+
+ qp->sq_wrtrk_array[wqe_idx].wrid = info->wr_id;
+ qp->sq_wrtrk_array[wqe_idx].wr_len = op_info->len;
+ header = LS_64(stag_to_inv, I40IWQPSQ_REMSTAG) |
+ LS_64(info->op_type, I40IWQPSQ_OPCODE) |
+ LS_64(op_info->len, I40IWQPSQ_INLINEDATALEN) |
+ LS_64(1, I40IWQPSQ_INLINEDATAFLAG) |
+ LS_64((qp->push_db ? 1 : 0), I40IWQPSQ_PUSHWQE) |
+ LS_64(read_fence, I40IWQPSQ_READFENCE) |
+ LS_64(info->local_fence, I40IWQPSQ_LOCALFENCE) |
+ LS_64(info->signaled, I40IWQPSQ_SIGCOMPL) |
+ LS_64(qp->swqe_polarity, I40IWQPSQ_VALID);
+
+ dest = (u8 *)wqe;
+ src = (u8 *)(op_info->data);
+
+ if (op_info->len <= 16) {
+ for (i = 0; i < op_info->len; i++, src++, dest++)
+ *dest = *src;
+ } else {
+ for (i = 0; i < 16; i++, src++, dest++)
+ *dest = *src;
+ dest = (u8 *)wqe + 32;
+ for (; i < op_info->len; i++, src++, dest++)
+ *dest = *src;
+ }
+
+ wmb(); /* make sure WQE is populated before valid bit is set */
+
+ set_64bit_val(wqe, 24, header);
+
+ if (qp->push_db) {
+ push = (u64 *)((uintptr_t)qp->push_wqe + (wqe_idx & 0x3) * 0x20);
+ memcpy(push, wqe, (op_info->len > 16) ? op_info->len + 16 : 32);
+ i40iw_qp_ring_push_db(qp, wqe_idx);
+ } else {
+ if (post_sq)
+ i40iw_qp_post_wr(qp);
+ }
+
+ return 0;
+}
+
+/**
+ * i40iw_stag_local_invalidate - stag invalidate operation
+ * @qp: hw qp ptr
+ * @info: post sq information
+ * @post_sq: flag to post sq
+ */
+static enum i40iw_status_code i40iw_stag_local_invalidate(struct i40iw_qp_uk *qp,
+ struct i40iw_post_sq_info *info,
+ bool post_sq)
+{
+ u64 *wqe;
+ struct i40iw_inv_local_stag *op_info;
+ u64 header;
+ u32 wqe_idx;
+ bool local_fence = false;
+
+ op_info = &info->op.inv_local_stag;
+ local_fence = info->local_fence;
+
+ wqe = i40iw_qp_get_next_send_wqe(qp, &wqe_idx, I40IW_QP_WQE_MIN_SIZE);
+ if (!wqe)
+ return I40IW_ERR_QP_TOOMANY_WRS_POSTED;
+
+ qp->sq_wrtrk_array[wqe_idx].wrid = info->wr_id;
+ qp->sq_wrtrk_array[wqe_idx].wr_len = 0;
+ set_64bit_val(wqe, 0, 0);
+ set_64bit_val(wqe, 8,
+ LS_64(op_info->target_stag, I40IWQPSQ_LOCSTAG));
+ set_64bit_val(wqe, 16, 0);
+ header = LS_64(I40IW_OP_TYPE_INV_STAG, I40IWQPSQ_OPCODE) |
+ LS_64(info->read_fence, I40IWQPSQ_READFENCE) |
+ LS_64(local_fence, I40IWQPSQ_LOCALFENCE) |
+ LS_64(info->signaled, I40IWQPSQ_SIGCOMPL) |
+ LS_64(qp->swqe_polarity, I40IWQPSQ_VALID);
+
+ wmb(); /* make sure WQE is populated before valid bit is set */
+
+ set_64bit_val(wqe, 24, header);
+
+ if (post_sq)
+ i40iw_qp_post_wr(qp);
+
+ return 0;
+}
+
+/**
+ * i40iw_mw_bind - Memory Window bind operation
+ * @qp: hw qp ptr
+ * @info: post sq information
+ * @post_sq: flag to post sq
+ */
+static enum i40iw_status_code i40iw_mw_bind(struct i40iw_qp_uk *qp,
+ struct i40iw_post_sq_info *info,
+ bool post_sq)
+{
+ u64 *wqe;
+ struct i40iw_bind_window *op_info;
+ u64 header;
+ u32 wqe_idx;
+ bool local_fence = false;
+
+ op_info = &info->op.bind_window;
+
+ local_fence |= info->local_fence;
+ wqe = i40iw_qp_get_next_send_wqe(qp, &wqe_idx, I40IW_QP_WQE_MIN_SIZE);
+ if (!wqe)
+ return I40IW_ERR_QP_TOOMANY_WRS_POSTED;
+
+ qp->sq_wrtrk_array[wqe_idx].wrid = info->wr_id;
+ qp->sq_wrtrk_array[wqe_idx].wr_len = 0;
+ set_64bit_val(wqe, 0, (uintptr_t)op_info->va);
+ set_64bit_val(wqe, 8,
+ LS_64(op_info->mr_stag, I40IWQPSQ_PARENTMRSTAG) |
+ LS_64(op_info->mw_stag, I40IWQPSQ_MWSTAG));
+ set_64bit_val(wqe, 16, op_info->bind_length);
+ header = LS_64(I40IW_OP_TYPE_BIND_MW, I40IWQPSQ_OPCODE) |
+ LS_64(((op_info->enable_reads << 2) |
+ (op_info->enable_writes << 3)),
+ I40IWQPSQ_STAGRIGHTS) |
+ LS_64((op_info->addressing_type == I40IW_ADDR_TYPE_VA_BASED ? 1 : 0),
+ I40IWQPSQ_VABASEDTO) |
+ LS_64(info->read_fence, I40IWQPSQ_READFENCE) |
+ LS_64(local_fence, I40IWQPSQ_LOCALFENCE) |
+ LS_64(info->signaled, I40IWQPSQ_SIGCOMPL) |
+ LS_64(qp->swqe_polarity, I40IWQPSQ_VALID);
+
+ wmb(); /* make sure WQE is populated before valid bit is set */
+
+ set_64bit_val(wqe, 24, header);
+
+ if (post_sq)
+ i40iw_qp_post_wr(qp);
+
+ return 0;
+}
+
+/**
+ * i40iw_post_receive - post receive wqe
+ * @qp: hw qp ptr
+ * @info: post rq information
+ */
+static enum i40iw_status_code i40iw_post_receive(struct i40iw_qp_uk *qp,
+ struct i40iw_post_rq_info *info)
+{
+ u64 *wqe;
+ u64 header;
+ u32 total_size = 0, wqe_idx, i, byte_off;
+
+ if (qp->max_rq_frag_cnt < info->num_sges)
+ return I40IW_ERR_INVALID_FRAG_COUNT;
+ for (i = 0; i < info->num_sges; i++)
+ total_size += info->sg_list[i].len;
+ wqe = i40iw_qp_get_next_recv_wqe(qp, &wqe_idx);
+ if (!wqe)
+ return I40IW_ERR_QP_TOOMANY_WRS_POSTED;
+
+ qp->rq_wrid_array[wqe_idx] = info->wr_id;
+ set_64bit_val(wqe, 16, 0);
+
+ header = LS_64((info->num_sges > 1 ? (info->num_sges - 1) : 0),
+ I40IWQPSQ_ADDFRAGCNT) |
+ LS_64(qp->rwqe_polarity, I40IWQPSQ_VALID);
+
+ i40iw_set_fragment(wqe, 0, info->sg_list);
+
+ for (i = 1; i < info->num_sges; i++) {
+ byte_off = 32 + (i - 1) * 16;
+ i40iw_set_fragment(wqe, byte_off, &info->sg_list[i]);
+ }
+
+ wmb(); /* make sure WQE is populated before valid bit is set */
+
+ set_64bit_val(wqe, 24, header);
+
+ return 0;
+}
+
+/**
+ * i40iw_cq_request_notification - cq notification request (door bell)
+ * @cq: hw cq
+ * @cq_notify: notification type
+ */
+static void i40iw_cq_request_notification(struct i40iw_cq_uk *cq,
+ enum i40iw_completion_notify cq_notify)
+{
+ u64 temp_val;
+ u16 sw_cq_sel;
+ u8 arm_next_se = 0;
+ u8 arm_next = 0;
+ u8 arm_seq_num;
+
+ get_64bit_val(cq->shadow_area, 32, &temp_val);
+ arm_seq_num = (u8)RS_64(temp_val, I40IW_CQ_DBSA_ARM_SEQ_NUM);
+ arm_seq_num++;
+
+ sw_cq_sel = (u16)RS_64(temp_val, I40IW_CQ_DBSA_SW_CQ_SELECT);
+ arm_next_se = (u8)RS_64(temp_val, I40IW_CQ_DBSA_ARM_NEXT_SE);
+ arm_next_se |= 1;
+ if (cq_notify == IW_CQ_COMPL_EVENT)
+ arm_next = 1;
+ temp_val = LS_64(arm_seq_num, I40IW_CQ_DBSA_ARM_SEQ_NUM) |
+ LS_64(sw_cq_sel, I40IW_CQ_DBSA_SW_CQ_SELECT) |
+ LS_64(arm_next_se, I40IW_CQ_DBSA_ARM_NEXT_SE) |
+ LS_64(arm_next, I40IW_CQ_DBSA_ARM_NEXT);
+
+ set_64bit_val(cq->shadow_area, 32, temp_val);
+
+ wmb(); /* make sure WQE is populated before valid bit is set */
+
+ writel(cq->cq_id, cq->cqe_alloc_reg);
+}
+
+/**
+ * i40iw_cq_post_entries - update tail in shadow memory
+ * @cq: hw cq
+ * @count: # of entries processed
+ */
+static enum i40iw_status_code i40iw_cq_post_entries(struct i40iw_cq_uk *cq,
+ u8 count)
+{
+ I40IW_RING_MOVE_TAIL_BY_COUNT(cq->cq_ring, count);
+ set_64bit_val(cq->shadow_area, 0,
+ I40IW_RING_GETCURRENT_HEAD(cq->cq_ring));
+ return 0;
+}
+
+/**
+ * i40iw_cq_poll_completion - get cq completion info
+ * @cq: hw cq
+ * @info: cq poll information returned
+ * @post_cq: update cq tail
+ */
+static enum i40iw_status_code i40iw_cq_poll_completion(struct i40iw_cq_uk *cq,
+ struct i40iw_cq_poll_info *info,
+ bool post_cq)
+{
+ u64 comp_ctx, qword0, qword2, qword3, wqe_qword;
+ u64 *cqe, *sw_wqe;
+ struct i40iw_qp_uk *qp;
+ struct i40iw_ring *pring = NULL;
+ u32 wqe_idx, q_type, array_idx = 0;
+ enum i40iw_status_code ret_code = 0;
+ enum i40iw_status_code ret_code2 = 0;
+ bool move_cq_head = true;
+ u8 polarity;
+ u8 addl_frag_cnt, addl_wqes = 0;
+
+ if (cq->avoid_mem_cflct)
+ cqe = (u64 *)I40IW_GET_CURRENT_EXTENDED_CQ_ELEMENT(cq);
+ else
+ cqe = (u64 *)I40IW_GET_CURRENT_CQ_ELEMENT(cq);
+
+ get_64bit_val(cqe, 24, &qword3);
+ polarity = (u8)RS_64(qword3, I40IW_CQ_VALID);
+
+ if (polarity != cq->polarity)
+ return I40IW_ERR_QUEUE_EMPTY;
+
+ q_type = (u8)RS_64(qword3, I40IW_CQ_SQ);
+ info->error = (bool)RS_64(qword3, I40IW_CQ_ERROR);
+ info->push_dropped = (bool)RS_64(qword3, I40IWCQ_PSHDROP);
+ if (info->error) {
+ info->comp_status = I40IW_COMPL_STATUS_FLUSHED;
+ info->major_err = (bool)RS_64(qword3, I40IW_CQ_MAJERR);
+ info->minor_err = (bool)RS_64(qword3, I40IW_CQ_MINERR);
+ } else {
+ info->comp_status = I40IW_COMPL_STATUS_SUCCESS;
+ }
+
+ get_64bit_val(cqe, 0, &qword0);
+ get_64bit_val(cqe, 16, &qword2);
+
+ info->tcp_seq_num = (u8)RS_64(qword0, I40IWCQ_TCPSEQNUM);
+
+ info->qp_id = (u32)RS_64(qword2, I40IWCQ_QPID);
+
+ get_64bit_val(cqe, 8, &comp_ctx);
+
+ info->solicited_event = (bool)RS_64(qword3, I40IWCQ_SOEVENT);
+ info->is_srq = (bool)RS_64(qword3, I40IWCQ_SRQ);
+
+ qp = (struct i40iw_qp_uk *)(unsigned long)comp_ctx;
+ wqe_idx = (u32)RS_64(qword3, I40IW_CQ_WQEIDX);
+ info->qp_handle = (i40iw_qp_handle)(unsigned long)qp;
+
+ if (q_type == I40IW_CQE_QTYPE_RQ) {
+ array_idx = (wqe_idx * 4) / qp->rq_wqe_size_multiplier;
+ if (info->comp_status == I40IW_COMPL_STATUS_FLUSHED) {
+ info->wr_id = qp->rq_wrid_array[qp->rq_ring.tail];
+ array_idx = qp->rq_ring.tail;
+ } else {
+ info->wr_id = qp->rq_wrid_array[array_idx];
+ }
+
+ info->op_type = I40IW_OP_TYPE_REC;
+ if (qword3 & I40IWCQ_STAG_MASK) {
+ info->stag_invalid_set = true;
+ info->inv_stag = (u32)RS_64(qword2, I40IWCQ_INVSTAG);
+ } else {
+ info->stag_invalid_set = false;
+ }
+ info->bytes_xfered = (u32)RS_64(qword0, I40IWCQ_PAYLDLEN);
+ I40IW_RING_SET_TAIL(qp->rq_ring, array_idx + 1);
+ pring = &qp->rq_ring;
+ } else {
+ if (info->comp_status != I40IW_COMPL_STATUS_FLUSHED) {
+ info->wr_id = qp->sq_wrtrk_array[wqe_idx].wrid;
+ info->bytes_xfered = qp->sq_wrtrk_array[wqe_idx].wr_len;
+
+ info->op_type = (u8)RS_64(qword3, I40IWCQ_OP);
+ sw_wqe = qp->sq_base[wqe_idx].elem;
+ get_64bit_val(sw_wqe, 24, &wqe_qword);
+ addl_frag_cnt =
+ (u8)RS_64(wqe_qword, I40IWQPSQ_ADDFRAGCNT);
+ i40iw_fragcnt_to_wqesize_sq(addl_frag_cnt + 1, &addl_wqes);
+
+ addl_wqes = (addl_wqes / I40IW_QP_WQE_MIN_SIZE);
+ I40IW_RING_SET_TAIL(qp->sq_ring, (wqe_idx + addl_wqes));
+ } else {
+ do {
+ u8 op_type;
+ u32 tail;
+
+ tail = qp->sq_ring.tail;
+ sw_wqe = qp->sq_base[tail].elem;
+ get_64bit_val(sw_wqe, 24, &wqe_qword);
+ op_type = (u8)RS_64(wqe_qword, I40IWQPSQ_OPCODE);
+ info->op_type = op_type;
+ addl_frag_cnt = (u8)RS_64(wqe_qword, I40IWQPSQ_ADDFRAGCNT);
+ i40iw_fragcnt_to_wqesize_sq(addl_frag_cnt + 1, &addl_wqes);
+ addl_wqes = (addl_wqes / I40IW_QP_WQE_MIN_SIZE);
+ I40IW_RING_SET_TAIL(qp->sq_ring, (tail + addl_wqes));
+ if (op_type != I40IWQP_OP_NOP) {
+ info->wr_id = qp->sq_wrtrk_array[tail].wrid;
+ info->bytes_xfered = qp->sq_wrtrk_array[tail].wr_len;
+ break;
+ }
+ } while (1);
+ }
+ pring = &qp->sq_ring;
+ }
+
+ ret_code = 0;
+
+ if (!ret_code &&
+ (info->comp_status == I40IW_COMPL_STATUS_FLUSHED))
+ if (pring && (I40IW_RING_MORE_WORK(*pring)))
+ move_cq_head = false;
+
+ if (move_cq_head) {
+ I40IW_RING_MOVE_HEAD(cq->cq_ring, ret_code2);
+
+ if (ret_code2 && !ret_code)
+ ret_code = ret_code2;
+
+ if (I40IW_RING_GETCURRENT_HEAD(cq->cq_ring) == 0)
+ cq->polarity ^= 1;
+
+ if (post_cq) {
+ I40IW_RING_MOVE_TAIL(cq->cq_ring);
+ set_64bit_val(cq->shadow_area, 0,
+ I40IW_RING_GETCURRENT_HEAD(cq->cq_ring));
+ }
+ } else {
+ if (info->is_srq)
+ return ret_code;
+ qword3 &= ~I40IW_CQ_WQEIDX_MASK;
+ qword3 |= LS_64(pring->tail, I40IW_CQ_WQEIDX);
+ set_64bit_val(cqe, 24, qword3);
+ }
+
+ return ret_code;
+}
+
+/**
+ * i40iw_get_wqe_shift - get shift count for maximum wqe size
+ * @wqdepth: depth of wq required.
+ * @sge: Maximum Scatter Gather Elements wqe
+ * @shift: Returns the shift needed based on sge
+ *
+ * Shift can be used to left shift the wqe size based on sge.
+ * If sge, == 1, shift =0 (wqe_size of 32 bytes), for sge=2 and 3, shift =1
+ * (64 bytes wqes) and 2 otherwise (128 bytes wqe).
+ */
+enum i40iw_status_code i40iw_get_wqe_shift(u32 wqdepth, u8 sge, u8 *shift)
+{
+ u32 size;
+
+ *shift = 0;
+ if (sge > 1)
+ *shift = (sge < 4) ? 1 : 2;
+
+ /* check if wqdepth is multiple of 2 or not */
+
+ if ((wqdepth < I40IWQP_SW_MIN_WQSIZE) || (wqdepth & (wqdepth - 1)))
+ return I40IW_ERR_INVALID_SIZE;
+
+ size = wqdepth << *shift; /* multiple of 32 bytes count */
+ if (size > I40IWQP_SW_MAX_WQSIZE)
+ return I40IW_ERR_INVALID_SIZE;
+ return 0;
+}
+
+static struct i40iw_qp_uk_ops iw_qp_uk_ops = {
+ i40iw_qp_post_wr,
+ i40iw_qp_ring_push_db,
+ i40iw_rdma_write,
+ i40iw_rdma_read,
+ i40iw_send,
+ i40iw_inline_rdma_write,
+ i40iw_inline_send,
+ i40iw_stag_local_invalidate,
+ i40iw_mw_bind,
+ i40iw_post_receive,
+ i40iw_nop
+};
+
+static struct i40iw_cq_ops iw_cq_ops = {
+ i40iw_cq_request_notification,
+ i40iw_cq_poll_completion,
+ i40iw_cq_post_entries,
+ i40iw_clean_cq
+};
+
+static struct i40iw_device_uk_ops iw_device_uk_ops = {
+ i40iw_cq_uk_init,
+ i40iw_qp_uk_init,
+};
+
+/**
+ * i40iw_qp_uk_init - initialize shared qp
+ * @qp: hw qp (user and kernel)
+ * @info: qp initialization info
+ *
+ * initializes the vars used in both user and kernel mode.
+ * size of the wqe depends on numbers of max. fragements
+ * allowed. Then size of wqe * the number of wqes should be the
+ * amount of memory allocated for sq and rq. If srq is used,
+ * then rq_base will point to one rq wqe only (not the whole
+ * array of wqes)
+ */
+enum i40iw_status_code i40iw_qp_uk_init(struct i40iw_qp_uk *qp,
+ struct i40iw_qp_uk_init_info *info)
+{
+ enum i40iw_status_code ret_code = 0;
+ u32 sq_ring_size;
+ u8 sqshift, rqshift;
+
+ if (info->max_sq_frag_cnt > I40IW_MAX_WQ_FRAGMENT_COUNT)
+ return I40IW_ERR_INVALID_FRAG_COUNT;
+
+ if (info->max_rq_frag_cnt > I40IW_MAX_WQ_FRAGMENT_COUNT)
+ return I40IW_ERR_INVALID_FRAG_COUNT;
+ ret_code = i40iw_get_wqe_shift(info->sq_size, info->max_sq_frag_cnt, &sqshift);
+ if (ret_code)
+ return ret_code;
+
+ ret_code = i40iw_get_wqe_shift(info->rq_size, info->max_rq_frag_cnt, &rqshift);
+ if (ret_code)
+ return ret_code;
+
+ qp->sq_base = info->sq;
+ qp->rq_base = info->rq;
+ qp->shadow_area = info->shadow_area;
+ qp->sq_wrtrk_array = info->sq_wrtrk_array;
+ qp->rq_wrid_array = info->rq_wrid_array;
+
+ qp->wqe_alloc_reg = info->wqe_alloc_reg;
+ qp->qp_id = info->qp_id;
+
+ qp->sq_size = info->sq_size;
+ qp->push_db = info->push_db;
+ qp->push_wqe = info->push_wqe;
+
+ qp->max_sq_frag_cnt = info->max_sq_frag_cnt;
+ sq_ring_size = qp->sq_size << sqshift;
+
+ I40IW_RING_INIT(qp->sq_ring, sq_ring_size);
+ I40IW_RING_INIT(qp->initial_ring, sq_ring_size);
+ I40IW_RING_MOVE_HEAD(qp->sq_ring, ret_code);
+ I40IW_RING_MOVE_TAIL(qp->sq_ring);
+ I40IW_RING_MOVE_HEAD(qp->initial_ring, ret_code);
+ qp->swqe_polarity = 1;
+ qp->swqe_polarity_deferred = 1;
+ qp->rwqe_polarity = 0;
+
+ if (!qp->use_srq) {
+ qp->rq_size = info->rq_size;
+ qp->max_rq_frag_cnt = info->max_rq_frag_cnt;
+ qp->rq_wqe_size = rqshift;
+ I40IW_RING_INIT(qp->rq_ring, qp->rq_size);
+ qp->rq_wqe_size_multiplier = 4 << rqshift;
+ }
+ qp->ops = iw_qp_uk_ops;
+
+ return ret_code;
+}
+
+/**
+ * i40iw_cq_uk_init - initialize shared cq (user and kernel)
+ * @cq: hw cq
+ * @info: hw cq initialization info
+ */
+enum i40iw_status_code i40iw_cq_uk_init(struct i40iw_cq_uk *cq,
+ struct i40iw_cq_uk_init_info *info)
+{
+ if ((info->cq_size < I40IW_MIN_CQ_SIZE) ||
+ (info->cq_size > I40IW_MAX_CQ_SIZE))
+ return I40IW_ERR_INVALID_SIZE;
+ cq->cq_base = (struct i40iw_cqe *)info->cq_base;
+ cq->cq_id = info->cq_id;
+ cq->cq_size = info->cq_size;
+ cq->cqe_alloc_reg = info->cqe_alloc_reg;
+ cq->shadow_area = info->shadow_area;
+ cq->avoid_mem_cflct = info->avoid_mem_cflct;
+
+ I40IW_RING_INIT(cq->cq_ring, cq->cq_size);
+ cq->polarity = 1;
+ cq->ops = iw_cq_ops;
+
+ return 0;
+}
+
+/**
+ * i40iw_device_init_uk - setup routines for iwarp shared device
+ * @dev: iwarp shared (user and kernel)
+ */
+void i40iw_device_init_uk(struct i40iw_dev_uk *dev)
+{
+ dev->ops_uk = iw_device_uk_ops;
+}
+
+/**
+ * i40iw_clean_cq - clean cq entries
+ * @ queue completion context
+ * @cq: cq to clean
+ */
+void i40iw_clean_cq(void *queue, struct i40iw_cq_uk *cq)
+{
+ u64 *cqe;
+ u64 qword3, comp_ctx;
+ u32 cq_head;
+ u8 polarity, temp;
+
+ cq_head = cq->cq_ring.head;
+ temp = cq->polarity;
+ do {
+ if (cq->avoid_mem_cflct)
+ cqe = (u64 *)&(((struct i40iw_extended_cqe *)cq->cq_base)[cq_head]);
+ else
+ cqe = (u64 *)&cq->cq_base[cq_head];
+ get_64bit_val(cqe, 24, &qword3);
+ polarity = (u8)RS_64(qword3, I40IW_CQ_VALID);
+
+ if (polarity != temp)
+ break;
+
+ get_64bit_val(cqe, 8, &comp_ctx);
+ if ((void *)(unsigned long)comp_ctx == queue)
+ set_64bit_val(cqe, 8, 0);
+
+ cq_head = (cq_head + 1) % cq->cq_ring.size;
+ if (!cq_head)
+ temp ^= 1;
+ } while (true);
+}
+
+/**
+ * i40iw_nop - send a nop
+ * @qp: hw qp ptr
+ * @wr_id: work request id
+ * @signaled: flag if signaled for completion
+ * @post_sq: flag to post sq
+ */
+enum i40iw_status_code i40iw_nop(struct i40iw_qp_uk *qp,
+ u64 wr_id,
+ bool signaled,
+ bool post_sq)
+{
+ u64 header, *wqe;
+ u32 wqe_idx;
+
+ wqe = i40iw_qp_get_next_send_wqe(qp, &wqe_idx, I40IW_QP_WQE_MIN_SIZE);
+ if (!wqe)
+ return I40IW_ERR_QP_TOOMANY_WRS_POSTED;
+
+ qp->sq_wrtrk_array[wqe_idx].wrid = wr_id;
+ qp->sq_wrtrk_array[wqe_idx].wr_len = 0;
+ set_64bit_val(wqe, 0, 0);
+ set_64bit_val(wqe, 8, 0);
+ set_64bit_val(wqe, 16, 0);
+
+ header = LS_64(I40IWQP_OP_NOP, I40IWQPSQ_OPCODE) |
+ LS_64(signaled, I40IWQPSQ_SIGCOMPL) |
+ LS_64(qp->swqe_polarity, I40IWQPSQ_VALID);
+
+ wmb(); /* make sure WQE is populated before valid bit is set */
+
+ set_64bit_val(wqe, 24, header);
+ if (post_sq)
+ i40iw_qp_post_wr(qp);
+
+ return 0;
+}
+
+/**
+ * i40iw_fragcnt_to_wqesize_sq - calculate wqe size based on fragment count for SQ
+ * @frag_cnt: number of fragments
+ * @wqe_size: size of sq wqe returned
+ */
+enum i40iw_status_code i40iw_fragcnt_to_wqesize_sq(u8 frag_cnt, u8 *wqe_size)
+{
+ switch (frag_cnt) {
+ case 0:
+ case 1:
+ *wqe_size = I40IW_QP_WQE_MIN_SIZE;
+ break;
+ case 2:
+ case 3:
+ *wqe_size = 64;
+ break;
+ case 4:
+ case 5:
+ *wqe_size = 96;
+ break;
+ case 6:
+ case 7:
+ *wqe_size = 128;
+ break;
+ default:
+ return I40IW_ERR_INVALID_FRAG_COUNT;
+ }
+
+ return 0;
+}
+
+/**
+ * i40iw_fragcnt_to_wqesize_rq - calculate wqe size based on fragment count for RQ
+ * @frag_cnt: number of fragments
+ * @wqe_size: size of rq wqe returned
+ */
+enum i40iw_status_code i40iw_fragcnt_to_wqesize_rq(u8 frag_cnt, u8 *wqe_size)
+{
+ switch (frag_cnt) {
+ case 0:
+ case 1:
+ *wqe_size = 32;
+ break;
+ case 2:
+ case 3:
+ *wqe_size = 64;
+ break;
+ case 4:
+ case 5:
+ case 6:
+ case 7:
+ *wqe_size = 128;
+ break;
+ default:
+ return I40IW_ERR_INVALID_FRAG_COUNT;
+ }
+
+ return 0;
+}
+
+/**
+ * i40iw_inline_data_size_to_wqesize - based on inline data, wqe size
+ * @data_size: data size for inline
+ * @wqe_size: size of sq wqe returned
+ */
+enum i40iw_status_code i40iw_inline_data_size_to_wqesize(u32 data_size,
+ u8 *wqe_size)
+{
+ if (data_size > I40IW_MAX_INLINE_DATA_SIZE)
+ return I40IW_ERR_INVALID_IMM_DATA_SIZE;
+
+ if (data_size <= 16)
+ *wqe_size = I40IW_QP_WQE_MIN_SIZE;
+ else if (data_size <= 48)
+ *wqe_size = 64;
+ else if (data_size <= 80)
+ *wqe_size = 96;
+ else
+ *wqe_size = 128;
+
+ return 0;
+}
diff --git a/drivers/infiniband/hw/i40iw/i40iw_user.h b/drivers/infiniband/hw/i40iw/i40iw_user.h
new file mode 100644
index 000000000000..5cd971bb8cc7
--- /dev/null
+++ b/drivers/infiniband/hw/i40iw/i40iw_user.h
@@ -0,0 +1,442 @@
+/*******************************************************************************
+*
+* Copyright (c) 2015-2016 Intel Corporation. All rights reserved.
+*
+* This software is available to you under a choice of one of two
+* licenses. You may choose to be licensed under the terms of the GNU
+* General Public License (GPL) Version 2, available from the file
+* COPYING in the main directory of this source tree, or the
+* OpenFabrics.org BSD license below:
+*
+* Redistribution and use in source and binary forms, with or
+* without modification, are permitted provided that the following
+* conditions are met:
+*
+* - Redistributions of source code must retain the above
+* copyright notice, this list of conditions and the following
+* disclaimer.
+*
+* - Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials
+* provided with the distribution.
+*
+* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+* SOFTWARE.
+*
+*******************************************************************************/
+
+#ifndef I40IW_USER_H
+#define I40IW_USER_H
+
+enum i40iw_device_capabilities_const {
+ I40IW_WQE_SIZE = 4,
+ I40IW_CQP_WQE_SIZE = 8,
+ I40IW_CQE_SIZE = 4,
+ I40IW_EXTENDED_CQE_SIZE = 8,
+ I40IW_AEQE_SIZE = 2,
+ I40IW_CEQE_SIZE = 1,
+ I40IW_CQP_CTX_SIZE = 8,
+ I40IW_SHADOW_AREA_SIZE = 8,
+ I40IW_CEQ_MAX_COUNT = 256,
+ I40IW_QUERY_FPM_BUF_SIZE = 128,
+ I40IW_COMMIT_FPM_BUF_SIZE = 128,
+ I40IW_MIN_IW_QP_ID = 1,
+ I40IW_MAX_IW_QP_ID = 262143,
+ I40IW_MIN_CEQID = 0,
+ I40IW_MAX_CEQID = 256,
+ I40IW_MIN_CQID = 0,
+ I40IW_MAX_CQID = 131071,
+ I40IW_MIN_AEQ_ENTRIES = 1,
+ I40IW_MAX_AEQ_ENTRIES = 524287,
+ I40IW_MIN_CEQ_ENTRIES = 1,
+ I40IW_MAX_CEQ_ENTRIES = 131071,
+ I40IW_MIN_CQ_SIZE = 1,
+ I40IW_MAX_CQ_SIZE = 1048575,
+ I40IW_MAX_AEQ_ALLOCATE_COUNT = 255,
+ I40IW_DB_ID_ZERO = 0,
+ I40IW_MAX_WQ_FRAGMENT_COUNT = 6,
+ I40IW_MAX_SGE_RD = 1,
+ I40IW_MAX_OUTBOUND_MESSAGE_SIZE = 2147483647,
+ I40IW_MAX_INBOUND_MESSAGE_SIZE = 2147483647,
+ I40IW_MAX_PUSH_PAGE_COUNT = 4096,
+ I40IW_MAX_PE_ENABLED_VF_COUNT = 32,
+ I40IW_MAX_VF_FPM_ID = 47,
+ I40IW_MAX_VF_PER_PF = 127,
+ I40IW_MAX_SQ_PAYLOAD_SIZE = 2145386496,
+ I40IW_MAX_INLINE_DATA_SIZE = 112,
+ I40IW_MAX_PUSHMODE_INLINE_DATA_SIZE = 112,
+ I40IW_MAX_IRD_SIZE = 32,
+ I40IW_QPCTX_ENCD_MAXIRD = 3,
+ I40IW_MAX_WQ_ENTRIES = 2048,
+ I40IW_MAX_ORD_SIZE = 32,
+ I40IW_Q2_BUFFER_SIZE = (248 + 100),
+ I40IW_QP_CTX_SIZE = 248
+};
+
+#define i40iw_handle void *
+#define i40iw_adapter_handle i40iw_handle
+#define i40iw_qp_handle i40iw_handle
+#define i40iw_cq_handle i40iw_handle
+#define i40iw_srq_handle i40iw_handle
+#define i40iw_pd_id i40iw_handle
+#define i40iw_stag_handle i40iw_handle
+#define i40iw_stag_index u32
+#define i40iw_stag u32
+#define i40iw_stag_key u8
+
+#define i40iw_tagged_offset u64
+#define i40iw_access_privileges u32
+#define i40iw_physical_fragment u64
+#define i40iw_address_list u64 *
+
+#define I40IW_CREATE_STAG(index, key) (((index) << 8) + (key))
+
+#define I40IW_STAG_KEY_FROM_STAG(stag) ((stag) && 0x000000FF)
+
+#define I40IW_STAG_INDEX_FROM_STAG(stag) (((stag) && 0xFFFFFF00) >> 8)
+
+struct i40iw_qp_uk;
+struct i40iw_cq_uk;
+struct i40iw_srq_uk;
+struct i40iw_qp_uk_init_info;
+struct i40iw_cq_uk_init_info;
+struct i40iw_srq_uk_init_info;
+
+struct i40iw_sge {
+ i40iw_tagged_offset tag_off;
+ u32 len;
+ i40iw_stag stag;
+};
+
+#define i40iw_sgl struct i40iw_sge *
+
+struct i40iw_ring {
+ u32 head;
+ u32 tail;
+ u32 size;
+};
+
+struct i40iw_cqe {
+ u64 buf[I40IW_CQE_SIZE];
+};
+
+struct i40iw_extended_cqe {
+ u64 buf[I40IW_EXTENDED_CQE_SIZE];
+};
+
+struct i40iw_wqe {
+ u64 buf[I40IW_WQE_SIZE];
+};
+
+struct i40iw_qp_uk_ops;
+
+enum i40iw_addressing_type {
+ I40IW_ADDR_TYPE_ZERO_BASED = 0,
+ I40IW_ADDR_TYPE_VA_BASED = 1,
+};
+
+#define I40IW_ACCESS_FLAGS_LOCALREAD 0x01
+#define I40IW_ACCESS_FLAGS_LOCALWRITE 0x02
+#define I40IW_ACCESS_FLAGS_REMOTEREAD_ONLY 0x04
+#define I40IW_ACCESS_FLAGS_REMOTEREAD 0x05
+#define I40IW_ACCESS_FLAGS_REMOTEWRITE_ONLY 0x08
+#define I40IW_ACCESS_FLAGS_REMOTEWRITE 0x0a
+#define I40IW_ACCESS_FLAGS_BIND_WINDOW 0x10
+#define I40IW_ACCESS_FLAGS_ALL 0x1F
+
+#define I40IW_OP_TYPE_RDMA_WRITE 0
+#define I40IW_OP_TYPE_RDMA_READ 1
+#define I40IW_OP_TYPE_SEND 3
+#define I40IW_OP_TYPE_SEND_INV 4
+#define I40IW_OP_TYPE_SEND_SOL 5
+#define I40IW_OP_TYPE_SEND_SOL_INV 6
+#define I40IW_OP_TYPE_REC 7
+#define I40IW_OP_TYPE_BIND_MW 8
+#define I40IW_OP_TYPE_FAST_REG_NSMR 9
+#define I40IW_OP_TYPE_INV_STAG 10
+#define I40IW_OP_TYPE_RDMA_READ_INV_STAG 11
+#define I40IW_OP_TYPE_NOP 12
+
+enum i40iw_completion_status {
+ I40IW_COMPL_STATUS_SUCCESS = 0,
+ I40IW_COMPL_STATUS_FLUSHED,
+ I40IW_COMPL_STATUS_INVALID_WQE,
+ I40IW_COMPL_STATUS_QP_CATASTROPHIC,
+ I40IW_COMPL_STATUS_REMOTE_TERMINATION,
+ I40IW_COMPL_STATUS_INVALID_STAG,
+ I40IW_COMPL_STATUS_BASE_BOUND_VIOLATION,
+ I40IW_COMPL_STATUS_ACCESS_VIOLATION,
+ I40IW_COMPL_STATUS_INVALID_PD_ID,
+ I40IW_COMPL_STATUS_WRAP_ERROR,
+ I40IW_COMPL_STATUS_STAG_INVALID_PDID,
+ I40IW_COMPL_STATUS_RDMA_READ_ZERO_ORD,
+ I40IW_COMPL_STATUS_QP_NOT_PRIVLEDGED,
+ I40IW_COMPL_STATUS_STAG_NOT_INVALID,
+ I40IW_COMPL_STATUS_INVALID_PHYS_BUFFER_SIZE,
+ I40IW_COMPL_STATUS_INVALID_PHYS_BUFFER_ENTRY,
+ I40IW_COMPL_STATUS_INVALID_FBO,
+ I40IW_COMPL_STATUS_INVALID_LENGTH,
+ I40IW_COMPL_STATUS_INVALID_ACCESS,
+ I40IW_COMPL_STATUS_PHYS_BUFFER_LIST_TOO_LONG,
+ I40IW_COMPL_STATUS_INVALID_VIRT_ADDRESS,
+ I40IW_COMPL_STATUS_INVALID_REGION,
+ I40IW_COMPL_STATUS_INVALID_WINDOW,
+ I40IW_COMPL_STATUS_INVALID_TOTAL_LENGTH
+};
+
+enum i40iw_completion_notify {
+ IW_CQ_COMPL_EVENT = 0,
+ IW_CQ_COMPL_SOLICITED = 1
+};
+
+struct i40iw_post_send {
+ i40iw_sgl sg_list;
+ u8 num_sges;
+};
+
+struct i40iw_post_inline_send {
+ void *data;
+ u32 len;
+};
+
+struct i40iw_post_send_w_inv {
+ i40iw_sgl sg_list;
+ u32 num_sges;
+ i40iw_stag remote_stag_to_inv;
+};
+
+struct i40iw_post_inline_send_w_inv {
+ void *data;
+ u32 len;
+ i40iw_stag remote_stag_to_inv;
+};
+
+struct i40iw_rdma_write {
+ i40iw_sgl lo_sg_list;
+ u8 num_lo_sges;
+ struct i40iw_sge rem_addr;
+};
+
+struct i40iw_inline_rdma_write {
+ void *data;
+ u32 len;
+ struct i40iw_sge rem_addr;
+};
+
+struct i40iw_rdma_read {
+ struct i40iw_sge lo_addr;
+ struct i40iw_sge rem_addr;
+};
+
+struct i40iw_bind_window {
+ i40iw_stag mr_stag;
+ u64 bind_length;
+ void *va;
+ enum i40iw_addressing_type addressing_type;
+ bool enable_reads;
+ bool enable_writes;
+ i40iw_stag mw_stag;
+};
+
+struct i40iw_inv_local_stag {
+ i40iw_stag target_stag;
+};
+
+struct i40iw_post_sq_info {
+ u64 wr_id;
+ u8 op_type;
+ bool signaled;
+ bool read_fence;
+ bool local_fence;
+ bool inline_data;
+ bool defer_flag;
+ union {
+ struct i40iw_post_send send;
+ struct i40iw_post_send send_w_sol;
+ struct i40iw_post_send_w_inv send_w_inv;
+ struct i40iw_post_send_w_inv send_w_sol_inv;
+ struct i40iw_rdma_write rdma_write;
+ struct i40iw_rdma_read rdma_read;
+ struct i40iw_rdma_read rdma_read_inv;
+ struct i40iw_bind_window bind_window;
+ struct i40iw_inv_local_stag inv_local_stag;
+ struct i40iw_inline_rdma_write inline_rdma_write;
+ struct i40iw_post_inline_send inline_send;
+ struct i40iw_post_inline_send inline_send_w_sol;
+ struct i40iw_post_inline_send_w_inv inline_send_w_inv;
+ struct i40iw_post_inline_send_w_inv inline_send_w_sol_inv;
+ } op;
+};
+
+struct i40iw_post_rq_info {
+ u64 wr_id;
+ i40iw_sgl sg_list;
+ u32 num_sges;
+};
+
+struct i40iw_cq_poll_info {
+ u64 wr_id;
+ i40iw_qp_handle qp_handle;
+ u32 bytes_xfered;
+ u32 tcp_seq_num;
+ u32 qp_id;
+ i40iw_stag inv_stag;
+ enum i40iw_completion_status comp_status;
+ u16 major_err;
+ u16 minor_err;
+ u8 op_type;
+ bool stag_invalid_set;
+ bool push_dropped;
+ bool error;
+ bool is_srq;
+ bool solicited_event;
+};
+
+struct i40iw_qp_uk_ops {
+ void (*iw_qp_post_wr)(struct i40iw_qp_uk *);
+ void (*iw_qp_ring_push_db)(struct i40iw_qp_uk *, u32);
+ enum i40iw_status_code (*iw_rdma_write)(struct i40iw_qp_uk *,
+ struct i40iw_post_sq_info *, bool);
+ enum i40iw_status_code (*iw_rdma_read)(struct i40iw_qp_uk *,
+ struct i40iw_post_sq_info *, bool, bool);
+ enum i40iw_status_code (*iw_send)(struct i40iw_qp_uk *,
+ struct i40iw_post_sq_info *, u32, bool);
+ enum i40iw_status_code (*iw_inline_rdma_write)(struct i40iw_qp_uk *,
+ struct i40iw_post_sq_info *, bool);
+ enum i40iw_status_code (*iw_inline_send)(struct i40iw_qp_uk *,
+ struct i40iw_post_sq_info *, u32, bool);
+ enum i40iw_status_code (*iw_stag_local_invalidate)(struct i40iw_qp_uk *,
+ struct i40iw_post_sq_info *, bool);
+ enum i40iw_status_code (*iw_mw_bind)(struct i40iw_qp_uk *,
+ struct i40iw_post_sq_info *, bool);
+ enum i40iw_status_code (*iw_post_receive)(struct i40iw_qp_uk *,
+ struct i40iw_post_rq_info *);
+ enum i40iw_status_code (*iw_post_nop)(struct i40iw_qp_uk *, u64, bool, bool);
+};
+
+struct i40iw_cq_ops {
+ void (*iw_cq_request_notification)(struct i40iw_cq_uk *,
+ enum i40iw_completion_notify);
+ enum i40iw_status_code (*iw_cq_poll_completion)(struct i40iw_cq_uk *,
+ struct i40iw_cq_poll_info *, bool);
+ enum i40iw_status_code (*iw_cq_post_entries)(struct i40iw_cq_uk *, u8 count);
+ void (*iw_cq_clean)(void *, struct i40iw_cq_uk *);
+};
+
+struct i40iw_dev_uk;
+
+struct i40iw_device_uk_ops {
+ enum i40iw_status_code (*iwarp_cq_uk_init)(struct i40iw_cq_uk *,
+ struct i40iw_cq_uk_init_info *);
+ enum i40iw_status_code (*iwarp_qp_uk_init)(struct i40iw_qp_uk *,
+ struct i40iw_qp_uk_init_info *);
+};
+
+struct i40iw_dev_uk {
+ struct i40iw_device_uk_ops ops_uk;
+};
+
+struct i40iw_sq_uk_wr_trk_info {
+ u64 wrid;
+ u64 wr_len;
+};
+
+struct i40iw_qp_quanta {
+ u64 elem[I40IW_WQE_SIZE];
+};
+
+struct i40iw_qp_uk {
+ struct i40iw_qp_quanta *sq_base;
+ struct i40iw_qp_quanta *rq_base;
+ u32 __iomem *wqe_alloc_reg;
+ struct i40iw_sq_uk_wr_trk_info *sq_wrtrk_array;
+ u64 *rq_wrid_array;
+ u64 *shadow_area;
+ u32 *push_db;
+ u64 *push_wqe;
+ struct i40iw_ring sq_ring;
+ struct i40iw_ring rq_ring;
+ struct i40iw_ring initial_ring;
+ u32 qp_id;
+ u32 sq_size;
+ u32 rq_size;
+ struct i40iw_qp_uk_ops ops;
+ bool use_srq;
+ u8 swqe_polarity;
+ u8 swqe_polarity_deferred;
+ u8 rwqe_polarity;
+ u8 rq_wqe_size;
+ u8 rq_wqe_size_multiplier;
+ u8 max_sq_frag_cnt;
+ u8 max_rq_frag_cnt;
+ bool deferred_flag;
+};
+
+struct i40iw_cq_uk {
+ struct i40iw_cqe *cq_base;
+ u32 __iomem *cqe_alloc_reg;
+ u64 *shadow_area;
+ u32 cq_id;
+ u32 cq_size;
+ struct i40iw_ring cq_ring;
+ u8 polarity;
+ bool avoid_mem_cflct;
+
+ struct i40iw_cq_ops ops;
+};
+
+struct i40iw_qp_uk_init_info {
+ struct i40iw_qp_quanta *sq;
+ struct i40iw_qp_quanta *rq;
+ u32 __iomem *wqe_alloc_reg;
+ u64 *shadow_area;
+ struct i40iw_sq_uk_wr_trk_info *sq_wrtrk_array;
+ u64 *rq_wrid_array;
+ u32 *push_db;
+ u64 *push_wqe;
+ u32 qp_id;
+ u32 sq_size;
+ u32 rq_size;
+ u8 max_sq_frag_cnt;
+ u8 max_rq_frag_cnt;
+
+};
+
+struct i40iw_cq_uk_init_info {
+ u32 __iomem *cqe_alloc_reg;
+ struct i40iw_cqe *cq_base;
+ u64 *shadow_area;
+ u32 cq_size;
+ u32 cq_id;
+ bool avoid_mem_cflct;
+};
+
+void i40iw_device_init_uk(struct i40iw_dev_uk *dev);
+
+void i40iw_qp_post_wr(struct i40iw_qp_uk *qp);
+u64 *i40iw_qp_get_next_send_wqe(struct i40iw_qp_uk *qp, u32 *wqe_idx,
+ u8 wqe_size);
+u64 *i40iw_qp_get_next_recv_wqe(struct i40iw_qp_uk *qp, u32 *wqe_idx);
+u64 *i40iw_qp_get_next_srq_wqe(struct i40iw_srq_uk *srq, u32 *wqe_idx);
+
+enum i40iw_status_code i40iw_cq_uk_init(struct i40iw_cq_uk *cq,
+ struct i40iw_cq_uk_init_info *info);
+enum i40iw_status_code i40iw_qp_uk_init(struct i40iw_qp_uk *qp,
+ struct i40iw_qp_uk_init_info *info);
+
+void i40iw_clean_cq(void *queue, struct i40iw_cq_uk *cq);
+enum i40iw_status_code i40iw_nop(struct i40iw_qp_uk *qp, u64 wr_id,
+ bool signaled, bool post_sq);
+enum i40iw_status_code i40iw_fragcnt_to_wqesize_sq(u8 frag_cnt, u8 *wqe_size);
+enum i40iw_status_code i40iw_fragcnt_to_wqesize_rq(u8 frag_cnt, u8 *wqe_size);
+enum i40iw_status_code i40iw_inline_data_size_to_wqesize(u32 data_size,
+ u8 *wqe_size);
+enum i40iw_status_code i40iw_get_wqe_shift(u32 wqdepth, u8 sge, u8 *shift);
+#endif
diff --git a/drivers/infiniband/hw/i40iw/i40iw_utils.c b/drivers/infiniband/hw/i40iw/i40iw_utils.c
new file mode 100644
index 000000000000..1ceec81bd8eb
--- /dev/null
+++ b/drivers/infiniband/hw/i40iw/i40iw_utils.c
@@ -0,0 +1,1270 @@
+/*******************************************************************************
+*
+* Copyright (c) 2015-2016 Intel Corporation. All rights reserved.
+*
+* This software is available to you under a choice of one of two
+* licenses. You may choose to be licensed under the terms of the GNU
+* General Public License (GPL) Version 2, available from the file
+* COPYING in the main directory of this source tree, or the
+* OpenFabrics.org BSD license below:
+*
+* Redistribution and use in source and binary forms, with or
+* without modification, are permitted provided that the following
+* conditions are met:
+*
+* - Redistributions of source code must retain the above
+* copyright notice, this list of conditions and the following
+* disclaimer.
+*
+* - Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials
+* provided with the distribution.
+*
+* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+* SOFTWARE.
+*
+*******************************************************************************/
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/mii.h>
+#include <linux/if_vlan.h>
+#include <linux/crc32.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <asm/irq.h>
+#include <asm/byteorder.h>
+#include <net/netevent.h>
+#include <net/neighbour.h>
+#include "i40iw.h"
+
+/**
+ * i40iw_arp_table - manage arp table
+ * @iwdev: iwarp device
+ * @ip_addr: ip address for device
+ * @mac_addr: mac address ptr
+ * @action: modify, delete or add
+ */
+int i40iw_arp_table(struct i40iw_device *iwdev,
+ __be32 *ip_addr,
+ bool ipv4,
+ u8 *mac_addr,
+ u32 action)
+{
+ int arp_index;
+ int err;
+ u32 ip[4];
+
+ if (ipv4) {
+ memset(ip, 0, sizeof(ip));
+ ip[0] = *ip_addr;
+ } else {
+ memcpy(ip, ip_addr, sizeof(ip));
+ }
+
+ for (arp_index = 0; (u32)arp_index < iwdev->arp_table_size; arp_index++)
+ if (memcmp(iwdev->arp_table[arp_index].ip_addr, ip, sizeof(ip)) == 0)
+ break;
+ switch (action) {
+ case I40IW_ARP_ADD:
+ if (arp_index != iwdev->arp_table_size)
+ return -1;
+
+ arp_index = 0;
+ err = i40iw_alloc_resource(iwdev, iwdev->allocated_arps,
+ iwdev->arp_table_size,
+ (u32 *)&arp_index,
+ &iwdev->next_arp_index);
+
+ if (err)
+ return err;
+
+ memcpy(iwdev->arp_table[arp_index].ip_addr, ip, sizeof(ip));
+ ether_addr_copy(iwdev->arp_table[arp_index].mac_addr, mac_addr);
+ break;
+ case I40IW_ARP_RESOLVE:
+ if (arp_index == iwdev->arp_table_size)
+ return -1;
+ break;
+ case I40IW_ARP_DELETE:
+ if (arp_index == iwdev->arp_table_size)
+ return -1;
+ memset(iwdev->arp_table[arp_index].ip_addr, 0,
+ sizeof(iwdev->arp_table[arp_index].ip_addr));
+ eth_zero_addr(iwdev->arp_table[arp_index].mac_addr);
+ i40iw_free_resource(iwdev, iwdev->allocated_arps, arp_index);
+ break;
+ default:
+ return -1;
+ }
+ return arp_index;
+}
+
+/**
+ * i40iw_wr32 - write 32 bits to hw register
+ * @hw: hardware information including registers
+ * @reg: register offset
+ * @value: vvalue to write to register
+ */
+inline void i40iw_wr32(struct i40iw_hw *hw, u32 reg, u32 value)
+{
+ writel(value, hw->hw_addr + reg);
+}
+
+/**
+ * i40iw_rd32 - read a 32 bit hw register
+ * @hw: hardware information including registers
+ * @reg: register offset
+ *
+ * Return value of register content
+ */
+inline u32 i40iw_rd32(struct i40iw_hw *hw, u32 reg)
+{
+ return readl(hw->hw_addr + reg);
+}
+
+/**
+ * i40iw_inetaddr_event - system notifier for netdev events
+ * @notfier: not used
+ * @event: event for notifier
+ * @ptr: if address
+ */
+int i40iw_inetaddr_event(struct notifier_block *notifier,
+ unsigned long event,
+ void *ptr)
+{
+ struct in_ifaddr *ifa = ptr;
+ struct net_device *event_netdev = ifa->ifa_dev->dev;
+ struct net_device *netdev;
+ struct net_device *upper_dev;
+ struct i40iw_device *iwdev;
+ struct i40iw_handler *hdl;
+ __be32 local_ipaddr;
+
+ hdl = i40iw_find_netdev(event_netdev);
+ if (!hdl)
+ return NOTIFY_DONE;
+
+ iwdev = &hdl->device;
+ netdev = iwdev->ldev->netdev;
+ upper_dev = netdev_master_upper_dev_get(netdev);
+ if (netdev != event_netdev)
+ return NOTIFY_DONE;
+
+ switch (event) {
+ case NETDEV_DOWN:
+ if (upper_dev)
+ local_ipaddr =
+ ((struct in_device *)upper_dev->ip_ptr)->ifa_list->ifa_address;
+ else
+ local_ipaddr = ifa->ifa_address;
+ local_ipaddr = ntohl(local_ipaddr);
+ i40iw_manage_arp_cache(iwdev,
+ netdev->dev_addr,
+ &local_ipaddr,
+ true,
+ I40IW_ARP_DELETE);
+ return NOTIFY_OK;
+ case NETDEV_UP:
+ if (upper_dev)
+ local_ipaddr =
+ ((struct in_device *)upper_dev->ip_ptr)->ifa_list->ifa_address;
+ else
+ local_ipaddr = ifa->ifa_address;
+ local_ipaddr = ntohl(local_ipaddr);
+ i40iw_manage_arp_cache(iwdev,
+ netdev->dev_addr,
+ &local_ipaddr,
+ true,
+ I40IW_ARP_ADD);
+ break;
+ case NETDEV_CHANGEADDR:
+ /* Add the address to the IP table */
+ if (upper_dev)
+ local_ipaddr =
+ ((struct in_device *)upper_dev->ip_ptr)->ifa_list->ifa_address;
+ else
+ local_ipaddr = ifa->ifa_address;
+
+ local_ipaddr = ntohl(local_ipaddr);
+ i40iw_manage_arp_cache(iwdev,
+ netdev->dev_addr,
+ &local_ipaddr,
+ true,
+ I40IW_ARP_ADD);
+ break;
+ default:
+ break;
+ }
+ return NOTIFY_DONE;
+}
+
+/**
+ * i40iw_inet6addr_event - system notifier for ipv6 netdev events
+ * @notfier: not used
+ * @event: event for notifier
+ * @ptr: if address
+ */
+int i40iw_inet6addr_event(struct notifier_block *notifier,
+ unsigned long event,
+ void *ptr)
+{
+ struct inet6_ifaddr *ifa = (struct inet6_ifaddr *)ptr;
+ struct net_device *event_netdev = ifa->idev->dev;
+ struct net_device *netdev;
+ struct i40iw_device *iwdev;
+ struct i40iw_handler *hdl;
+ __be32 local_ipaddr6[4];
+
+ hdl = i40iw_find_netdev(event_netdev);
+ if (!hdl)
+ return NOTIFY_DONE;
+
+ iwdev = &hdl->device;
+ netdev = iwdev->ldev->netdev;
+ if (netdev != event_netdev)
+ return NOTIFY_DONE;
+
+ switch (event) {
+ case NETDEV_DOWN:
+ i40iw_copy_ip_ntohl(local_ipaddr6, ifa->addr.in6_u.u6_addr32);
+ i40iw_manage_arp_cache(iwdev,
+ netdev->dev_addr,
+ local_ipaddr6,
+ false,
+ I40IW_ARP_DELETE);
+ return NOTIFY_OK;
+ case NETDEV_UP:
+ /* Fall through */
+ case NETDEV_CHANGEADDR:
+ i40iw_copy_ip_ntohl(local_ipaddr6, ifa->addr.in6_u.u6_addr32);
+ i40iw_manage_arp_cache(iwdev,
+ netdev->dev_addr,
+ local_ipaddr6,
+ false,
+ I40IW_ARP_ADD);
+ break;
+ default:
+ break;
+ }
+ return NOTIFY_DONE;
+}
+
+/**
+ * i40iw_net_event - system notifier for net events
+ * @notfier: not used
+ * @event: event for notifier
+ * @ptr: neighbor
+ */
+int i40iw_net_event(struct notifier_block *notifier, unsigned long event, void *ptr)
+{
+ struct neighbour *neigh = ptr;
+ struct i40iw_device *iwdev;
+ struct i40iw_handler *iwhdl;
+ __be32 *p;
+ u32 local_ipaddr[4];
+
+ switch (event) {
+ case NETEVENT_NEIGH_UPDATE:
+ iwhdl = i40iw_find_netdev((struct net_device *)neigh->dev);
+ if (!iwhdl)
+ return NOTIFY_DONE;
+ iwdev = &iwhdl->device;
+ p = (__be32 *)neigh->primary_key;
+ i40iw_copy_ip_ntohl(local_ipaddr, p);
+ if (neigh->nud_state & NUD_VALID) {
+ i40iw_manage_arp_cache(iwdev,
+ neigh->ha,
+ local_ipaddr,
+ false,
+ I40IW_ARP_ADD);
+
+ } else {
+ i40iw_manage_arp_cache(iwdev,
+ neigh->ha,
+ local_ipaddr,
+ false,
+ I40IW_ARP_DELETE);
+ }
+ break;
+ default:
+ break;
+ }
+ return NOTIFY_DONE;
+}
+
+/**
+ * i40iw_get_cqp_request - get cqp struct
+ * @cqp: device cqp ptr
+ * @wait: cqp to be used in wait mode
+ */
+struct i40iw_cqp_request *i40iw_get_cqp_request(struct i40iw_cqp *cqp, bool wait)
+{
+ struct i40iw_cqp_request *cqp_request = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&cqp->req_lock, flags);
+ if (!list_empty(&cqp->cqp_avail_reqs)) {
+ cqp_request = list_entry(cqp->cqp_avail_reqs.next,
+ struct i40iw_cqp_request, list);
+ list_del_init(&cqp_request->list);
+ }
+ spin_unlock_irqrestore(&cqp->req_lock, flags);
+ if (!cqp_request) {
+ cqp_request = kzalloc(sizeof(*cqp_request), GFP_ATOMIC);
+ if (cqp_request) {
+ cqp_request->dynamic = true;
+ INIT_LIST_HEAD(&cqp_request->list);
+ init_waitqueue_head(&cqp_request->waitq);
+ }
+ }
+ if (!cqp_request) {
+ i40iw_pr_err("CQP Request Fail: No Memory");
+ return NULL;
+ }
+
+ if (wait) {
+ atomic_set(&cqp_request->refcount, 2);
+ cqp_request->waiting = true;
+ } else {
+ atomic_set(&cqp_request->refcount, 1);
+ }
+ return cqp_request;
+}
+
+/**
+ * i40iw_free_cqp_request - free cqp request
+ * @cqp: cqp ptr
+ * @cqp_request: to be put back in cqp list
+ */
+void i40iw_free_cqp_request(struct i40iw_cqp *cqp, struct i40iw_cqp_request *cqp_request)
+{
+ unsigned long flags;
+
+ if (cqp_request->dynamic) {
+ kfree(cqp_request);
+ } else {
+ cqp_request->request_done = false;
+ cqp_request->callback_fcn = NULL;
+ cqp_request->waiting = false;
+
+ spin_lock_irqsave(&cqp->req_lock, flags);
+ list_add_tail(&cqp_request->list, &cqp->cqp_avail_reqs);
+ spin_unlock_irqrestore(&cqp->req_lock, flags);
+ }
+}
+
+/**
+ * i40iw_put_cqp_request - dec ref count and free if 0
+ * @cqp: cqp ptr
+ * @cqp_request: to be put back in cqp list
+ */
+void i40iw_put_cqp_request(struct i40iw_cqp *cqp,
+ struct i40iw_cqp_request *cqp_request)
+{
+ if (atomic_dec_and_test(&cqp_request->refcount))
+ i40iw_free_cqp_request(cqp, cqp_request);
+}
+
+/**
+ * i40iw_free_qp - callback after destroy cqp completes
+ * @cqp_request: cqp request for destroy qp
+ * @num: not used
+ */
+static void i40iw_free_qp(struct i40iw_cqp_request *cqp_request, u32 num)
+{
+ struct i40iw_sc_qp *qp = (struct i40iw_sc_qp *)cqp_request->param;
+ struct i40iw_qp *iwqp = (struct i40iw_qp *)qp->back_qp;
+ struct i40iw_device *iwdev;
+ u32 qp_num = iwqp->ibqp.qp_num;
+
+ iwdev = iwqp->iwdev;
+
+ i40iw_rem_pdusecount(iwqp->iwpd, iwdev);
+ i40iw_free_qp_resources(iwdev, iwqp, qp_num);
+}
+
+/**
+ * i40iw_wait_event - wait for completion
+ * @iwdev: iwarp device
+ * @cqp_request: cqp request to wait
+ */
+static int i40iw_wait_event(struct i40iw_device *iwdev,
+ struct i40iw_cqp_request *cqp_request)
+{
+ struct cqp_commands_info *info = &cqp_request->info;
+ struct i40iw_cqp *iwcqp = &iwdev->cqp;
+ bool cqp_error = false;
+ int err_code = 0;
+ int timeout_ret = 0;
+
+ timeout_ret = wait_event_timeout(cqp_request->waitq,
+ cqp_request->request_done,
+ I40IW_EVENT_TIMEOUT);
+ if (!timeout_ret) {
+ i40iw_pr_err("error cqp command 0x%x timed out ret = %d\n",
+ info->cqp_cmd, timeout_ret);
+ err_code = -ETIME;
+ i40iw_request_reset(iwdev);
+ goto done;
+ }
+ cqp_error = cqp_request->compl_info.error;
+ if (cqp_error) {
+ i40iw_pr_err("error cqp command 0x%x completion maj = 0x%x min=0x%x\n",
+ info->cqp_cmd, cqp_request->compl_info.maj_err_code,
+ cqp_request->compl_info.min_err_code);
+ err_code = -EPROTO;
+ goto done;
+ }
+done:
+ i40iw_put_cqp_request(iwcqp, cqp_request);
+ return err_code;
+}
+
+/**
+ * i40iw_handle_cqp_op - process cqp command
+ * @iwdev: iwarp device
+ * @cqp_request: cqp request to process
+ */
+enum i40iw_status_code i40iw_handle_cqp_op(struct i40iw_device *iwdev,
+ struct i40iw_cqp_request
+ *cqp_request)
+{
+ struct i40iw_sc_dev *dev = &iwdev->sc_dev;
+ enum i40iw_status_code status;
+ struct cqp_commands_info *info = &cqp_request->info;
+ int err_code = 0;
+
+ status = i40iw_process_cqp_cmd(dev, info);
+ if (status) {
+ i40iw_pr_err("error cqp command 0x%x failed\n", info->cqp_cmd);
+ i40iw_free_cqp_request(&iwdev->cqp, cqp_request);
+ return status;
+ }
+ if (cqp_request->waiting)
+ err_code = i40iw_wait_event(iwdev, cqp_request);
+ if (err_code)
+ status = I40IW_ERR_CQP_COMPL_ERROR;
+ return status;
+}
+
+/**
+ * i40iw_add_pdusecount - add pd refcount
+ * @iwpd: pd for refcount
+ */
+void i40iw_add_pdusecount(struct i40iw_pd *iwpd)
+{
+ atomic_inc(&iwpd->usecount);
+}
+
+/**
+ * i40iw_rem_pdusecount - decrement refcount for pd and free if 0
+ * @iwpd: pd for refcount
+ * @iwdev: iwarp device
+ */
+void i40iw_rem_pdusecount(struct i40iw_pd *iwpd, struct i40iw_device *iwdev)
+{
+ if (!atomic_dec_and_test(&iwpd->usecount))
+ return;
+ i40iw_free_resource(iwdev, iwdev->allocated_pds, iwpd->sc_pd.pd_id);
+ kfree(iwpd);
+}
+
+/**
+ * i40iw_add_ref - add refcount for qp
+ * @ibqp: iqarp qp
+ */
+void i40iw_add_ref(struct ib_qp *ibqp)
+{
+ struct i40iw_qp *iwqp = (struct i40iw_qp *)ibqp;
+
+ atomic_inc(&iwqp->refcount);
+}
+
+/**
+ * i40iw_rem_ref - rem refcount for qp and free if 0
+ * @ibqp: iqarp qp
+ */
+void i40iw_rem_ref(struct ib_qp *ibqp)
+{
+ struct i40iw_qp *iwqp;
+ enum i40iw_status_code status;
+ struct i40iw_cqp_request *cqp_request;
+ struct cqp_commands_info *cqp_info;
+ struct i40iw_device *iwdev;
+ u32 qp_num;
+
+ iwqp = to_iwqp(ibqp);
+ if (!atomic_dec_and_test(&iwqp->refcount))
+ return;
+
+ iwdev = iwqp->iwdev;
+ qp_num = iwqp->ibqp.qp_num;
+ iwdev->qp_table[qp_num] = NULL;
+ cqp_request = i40iw_get_cqp_request(&iwdev->cqp, false);
+ if (!cqp_request)
+ return;
+
+ cqp_request->callback_fcn = i40iw_free_qp;
+ cqp_request->param = (void *)&iwqp->sc_qp;
+ cqp_info = &cqp_request->info;
+ cqp_info->cqp_cmd = OP_QP_DESTROY;
+ cqp_info->post_sq = 1;
+ cqp_info->in.u.qp_destroy.qp = &iwqp->sc_qp;
+ cqp_info->in.u.qp_destroy.scratch = (uintptr_t)cqp_request;
+ cqp_info->in.u.qp_destroy.remove_hash_idx = true;
+ status = i40iw_handle_cqp_op(iwdev, cqp_request);
+ if (status)
+ i40iw_pr_err("CQP-OP Destroy QP fail");
+}
+
+/**
+ * i40iw_get_qp - get qp address
+ * @device: iwarp device
+ * @qpn: qp number
+ */
+struct ib_qp *i40iw_get_qp(struct ib_device *device, int qpn)
+{
+ struct i40iw_device *iwdev = to_iwdev(device);
+
+ if ((qpn < IW_FIRST_QPN) || (qpn >= iwdev->max_qp))
+ return NULL;
+
+ return &iwdev->qp_table[qpn]->ibqp;
+}
+
+/**
+ * i40iw_debug_buf - print debug msg and buffer is mask set
+ * @dev: hardware control device structure
+ * @mask: mask to compare if to print debug buffer
+ * @buf: points buffer addr
+ * @size: saize of buffer to print
+ */
+void i40iw_debug_buf(struct i40iw_sc_dev *dev,
+ enum i40iw_debug_flag mask,
+ char *desc,
+ u64 *buf,
+ u32 size)
+{
+ u32 i;
+
+ if (!(dev->debug_mask & mask))
+ return;
+ i40iw_debug(dev, mask, "%s\n", desc);
+ i40iw_debug(dev, mask, "starting address virt=%p phy=%llxh\n", buf,
+ (unsigned long long)virt_to_phys(buf));
+
+ for (i = 0; i < size; i += 8)
+ i40iw_debug(dev, mask, "index %03d val: %016llx\n", i, buf[i / 8]);
+}
+
+/**
+ * i40iw_get_hw_addr - return hw addr
+ * @par: points to shared dev
+ */
+u8 __iomem *i40iw_get_hw_addr(void *par)
+{
+ struct i40iw_sc_dev *dev = (struct i40iw_sc_dev *)par;
+
+ return dev->hw->hw_addr;
+}
+
+/**
+ * i40iw_remove_head - return head entry and remove from list
+ * @list: list for entry
+ */
+void *i40iw_remove_head(struct list_head *list)
+{
+ struct list_head *entry;
+
+ if (list_empty(list))
+ return NULL;
+
+ entry = (void *)list->next;
+ list_del(entry);
+ return (void *)entry;
+}
+
+/**
+ * i40iw_allocate_dma_mem - Memory alloc helper fn
+ * @hw: pointer to the HW structure
+ * @mem: ptr to mem struct to fill out
+ * @size: size of memory requested
+ * @alignment: what to align the allocation to
+ */
+enum i40iw_status_code i40iw_allocate_dma_mem(struct i40iw_hw *hw,
+ struct i40iw_dma_mem *mem,
+ u64 size,
+ u32 alignment)
+{
+ struct pci_dev *pcidev = (struct pci_dev *)hw->dev_context;
+
+ if (!mem)
+ return I40IW_ERR_PARAM;
+ mem->size = ALIGN(size, alignment);
+ mem->va = dma_zalloc_coherent(&pcidev->dev, mem->size,
+ (dma_addr_t *)&mem->pa, GFP_KERNEL);
+ if (!mem->va)
+ return I40IW_ERR_NO_MEMORY;
+ return 0;
+}
+
+/**
+ * i40iw_free_dma_mem - Memory free helper fn
+ * @hw: pointer to the HW structure
+ * @mem: ptr to mem struct to free
+ */
+void i40iw_free_dma_mem(struct i40iw_hw *hw, struct i40iw_dma_mem *mem)
+{
+ struct pci_dev *pcidev = (struct pci_dev *)hw->dev_context;
+
+ if (!mem || !mem->va)
+ return;
+
+ dma_free_coherent(&pcidev->dev, mem->size,
+ mem->va, (dma_addr_t)mem->pa);
+ mem->va = NULL;
+}
+
+/**
+ * i40iw_allocate_virt_mem - virtual memory alloc helper fn
+ * @hw: pointer to the HW structure
+ * @mem: ptr to mem struct to fill out
+ * @size: size of memory requested
+ */
+enum i40iw_status_code i40iw_allocate_virt_mem(struct i40iw_hw *hw,
+ struct i40iw_virt_mem *mem,
+ u32 size)
+{
+ if (!mem)
+ return I40IW_ERR_PARAM;
+
+ mem->size = size;
+ mem->va = kzalloc(size, GFP_KERNEL);
+
+ if (mem->va)
+ return 0;
+ else
+ return I40IW_ERR_NO_MEMORY;
+}
+
+/**
+ * i40iw_free_virt_mem - virtual memory free helper fn
+ * @hw: pointer to the HW structure
+ * @mem: ptr to mem struct to free
+ */
+enum i40iw_status_code i40iw_free_virt_mem(struct i40iw_hw *hw,
+ struct i40iw_virt_mem *mem)
+{
+ if (!mem)
+ return I40IW_ERR_PARAM;
+ kfree(mem->va);
+ mem->va = NULL;
+ return 0;
+}
+
+/**
+ * i40iw_cqp_sds_cmd - create cqp command for sd
+ * @dev: hardware control device structure
+ * @sd_info: information for sd cqp
+ *
+ */
+enum i40iw_status_code i40iw_cqp_sds_cmd(struct i40iw_sc_dev *dev,
+ struct i40iw_update_sds_info *sdinfo)
+{
+ enum i40iw_status_code status;
+ struct i40iw_cqp_request *cqp_request;
+ struct cqp_commands_info *cqp_info;
+ struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev;
+
+ cqp_request = i40iw_get_cqp_request(&iwdev->cqp, true);
+ if (!cqp_request)
+ return I40IW_ERR_NO_MEMORY;
+ cqp_info = &cqp_request->info;
+ memcpy(&cqp_info->in.u.update_pe_sds.info, sdinfo,
+ sizeof(cqp_info->in.u.update_pe_sds.info));
+ cqp_info->cqp_cmd = OP_UPDATE_PE_SDS;
+ cqp_info->post_sq = 1;
+ cqp_info->in.u.update_pe_sds.dev = dev;
+ cqp_info->in.u.update_pe_sds.scratch = (uintptr_t)cqp_request;
+ status = i40iw_handle_cqp_op(iwdev, cqp_request);
+ if (status)
+ i40iw_pr_err("CQP-OP Update SD's fail");
+ return status;
+}
+
+/**
+ * i40iw_term_modify_qp - modify qp for term message
+ * @qp: hardware control qp
+ * @next_state: qp's next state
+ * @term: terminate code
+ * @term_len: length
+ */
+void i40iw_term_modify_qp(struct i40iw_sc_qp *qp, u8 next_state, u8 term, u8 term_len)
+{
+ struct i40iw_qp *iwqp;
+
+ iwqp = (struct i40iw_qp *)qp->back_qp;
+ i40iw_next_iw_state(iwqp, next_state, 0, term, term_len);
+};
+
+/**
+ * i40iw_terminate_done - after terminate is completed
+ * @qp: hardware control qp
+ * @timeout_occurred: indicates if terminate timer expired
+ */
+void i40iw_terminate_done(struct i40iw_sc_qp *qp, int timeout_occurred)
+{
+ struct i40iw_qp *iwqp;
+ u32 next_iwarp_state = I40IW_QP_STATE_ERROR;
+ u8 hte = 0;
+ bool first_time;
+ unsigned long flags;
+
+ iwqp = (struct i40iw_qp *)qp->back_qp;
+ spin_lock_irqsave(&iwqp->lock, flags);
+ if (iwqp->hte_added) {
+ iwqp->hte_added = 0;
+ hte = 1;
+ }
+ first_time = !(qp->term_flags & I40IW_TERM_DONE);
+ qp->term_flags |= I40IW_TERM_DONE;
+ spin_unlock_irqrestore(&iwqp->lock, flags);
+ if (first_time) {
+ if (!timeout_occurred)
+ i40iw_terminate_del_timer(qp);
+ else
+ next_iwarp_state = I40IW_QP_STATE_CLOSING;
+
+ i40iw_next_iw_state(iwqp, next_iwarp_state, hte, 0, 0);
+ i40iw_cm_disconn(iwqp);
+ }
+}
+
+/**
+ * i40iw_terminate_imeout - timeout happened
+ * @context: points to iwarp qp
+ */
+static void i40iw_terminate_timeout(unsigned long context)
+{
+ struct i40iw_qp *iwqp = (struct i40iw_qp *)context;
+ struct i40iw_sc_qp *qp = (struct i40iw_sc_qp *)&iwqp->sc_qp;
+
+ i40iw_terminate_done(qp, 1);
+}
+
+/**
+ * i40iw_terminate_start_timer - start terminate timeout
+ * @qp: hardware control qp
+ */
+void i40iw_terminate_start_timer(struct i40iw_sc_qp *qp)
+{
+ struct i40iw_qp *iwqp;
+
+ iwqp = (struct i40iw_qp *)qp->back_qp;
+ init_timer(&iwqp->terminate_timer);
+ iwqp->terminate_timer.function = i40iw_terminate_timeout;
+ iwqp->terminate_timer.expires = jiffies + HZ;
+ iwqp->terminate_timer.data = (unsigned long)iwqp;
+ add_timer(&iwqp->terminate_timer);
+}
+
+/**
+ * i40iw_terminate_del_timer - delete terminate timeout
+ * @qp: hardware control qp
+ */
+void i40iw_terminate_del_timer(struct i40iw_sc_qp *qp)
+{
+ struct i40iw_qp *iwqp;
+
+ iwqp = (struct i40iw_qp *)qp->back_qp;
+ del_timer(&iwqp->terminate_timer);
+}
+
+/**
+ * i40iw_cqp_generic_worker - generic worker for cqp
+ * @work: work pointer
+ */
+static void i40iw_cqp_generic_worker(struct work_struct *work)
+{
+ struct i40iw_virtchnl_work_info *work_info =
+ &((struct virtchnl_work *)work)->work_info;
+
+ if (work_info->worker_vf_dev)
+ work_info->callback_fcn(work_info->worker_vf_dev);
+}
+
+/**
+ * i40iw_cqp_spawn_worker - spawn worket thread
+ * @iwdev: device struct pointer
+ * @work_info: work request info
+ * @iw_vf_idx: virtual function index
+ */
+void i40iw_cqp_spawn_worker(struct i40iw_sc_dev *dev,
+ struct i40iw_virtchnl_work_info *work_info,
+ u32 iw_vf_idx)
+{
+ struct virtchnl_work *work;
+ struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev;
+
+ work = &iwdev->virtchnl_w[iw_vf_idx];
+ memcpy(&work->work_info, work_info, sizeof(*work_info));
+ INIT_WORK(&work->work, i40iw_cqp_generic_worker);
+ queue_work(iwdev->virtchnl_wq, &work->work);
+}
+
+/**
+ * i40iw_cqp_manage_hmc_fcn_worker -
+ * @work: work pointer for hmc info
+ */
+static void i40iw_cqp_manage_hmc_fcn_worker(struct work_struct *work)
+{
+ struct i40iw_cqp_request *cqp_request =
+ ((struct virtchnl_work *)work)->cqp_request;
+ struct i40iw_ccq_cqe_info ccq_cqe_info;
+ struct i40iw_hmc_fcn_info *hmcfcninfo =
+ &cqp_request->info.in.u.manage_hmc_pm.info;
+ struct i40iw_device *iwdev =
+ (struct i40iw_device *)cqp_request->info.in.u.manage_hmc_pm.dev->back_dev;
+
+ ccq_cqe_info.cqp = NULL;
+ ccq_cqe_info.maj_err_code = cqp_request->compl_info.maj_err_code;
+ ccq_cqe_info.min_err_code = cqp_request->compl_info.min_err_code;
+ ccq_cqe_info.op_code = cqp_request->compl_info.op_code;
+ ccq_cqe_info.op_ret_val = cqp_request->compl_info.op_ret_val;
+ ccq_cqe_info.scratch = 0;
+ ccq_cqe_info.error = cqp_request->compl_info.error;
+ hmcfcninfo->callback_fcn(cqp_request->info.in.u.manage_hmc_pm.dev,
+ hmcfcninfo->cqp_callback_param, &ccq_cqe_info);
+ i40iw_put_cqp_request(&iwdev->cqp, cqp_request);
+}
+
+/**
+ * i40iw_cqp_manage_hmc_fcn_callback - called function after cqp completion
+ * @cqp_request: cqp request info struct for hmc fun
+ * @unused: unused param of callback
+ */
+static void i40iw_cqp_manage_hmc_fcn_callback(struct i40iw_cqp_request *cqp_request,
+ u32 unused)
+{
+ struct virtchnl_work *work;
+ struct i40iw_hmc_fcn_info *hmcfcninfo =
+ &cqp_request->info.in.u.manage_hmc_pm.info;
+ struct i40iw_device *iwdev =
+ (struct i40iw_device *)cqp_request->info.in.u.manage_hmc_pm.dev->
+ back_dev;
+
+ if (hmcfcninfo && hmcfcninfo->callback_fcn) {
+ i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_HMC, "%s1\n", __func__);
+ atomic_inc(&cqp_request->refcount);
+ work = &iwdev->virtchnl_w[hmcfcninfo->iw_vf_idx];
+ work->cqp_request = cqp_request;
+ INIT_WORK(&work->work, i40iw_cqp_manage_hmc_fcn_worker);
+ queue_work(iwdev->virtchnl_wq, &work->work);
+ i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_HMC, "%s2\n", __func__);
+ } else {
+ i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_HMC, "%s: Something wrong\n", __func__);
+ }
+}
+
+/**
+ * i40iw_cqp_manage_hmc_fcn_cmd - issue cqp command to manage hmc
+ * @dev: hardware control device structure
+ * @hmcfcninfo: info for hmc
+ */
+enum i40iw_status_code i40iw_cqp_manage_hmc_fcn_cmd(struct i40iw_sc_dev *dev,
+ struct i40iw_hmc_fcn_info *hmcfcninfo)
+{
+ enum i40iw_status_code status;
+ struct i40iw_cqp_request *cqp_request;
+ struct cqp_commands_info *cqp_info;
+ struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev;
+
+ i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_HMC, "%s\n", __func__);
+ cqp_request = i40iw_get_cqp_request(&iwdev->cqp, false);
+ if (!cqp_request)
+ return I40IW_ERR_NO_MEMORY;
+ cqp_info = &cqp_request->info;
+ cqp_request->callback_fcn = i40iw_cqp_manage_hmc_fcn_callback;
+ cqp_request->param = hmcfcninfo;
+ memcpy(&cqp_info->in.u.manage_hmc_pm.info, hmcfcninfo,
+ sizeof(*hmcfcninfo));
+ cqp_info->in.u.manage_hmc_pm.dev = dev;
+ cqp_info->cqp_cmd = OP_MANAGE_HMC_PM_FUNC_TABLE;
+ cqp_info->post_sq = 1;
+ cqp_info->in.u.manage_hmc_pm.scratch = (uintptr_t)cqp_request;
+ status = i40iw_handle_cqp_op(iwdev, cqp_request);
+ if (status)
+ i40iw_pr_err("CQP-OP Manage HMC fail");
+ return status;
+}
+
+/**
+ * i40iw_cqp_query_fpm_values_cmd - send cqp command for fpm
+ * @iwdev: function device struct
+ * @values_mem: buffer for fpm
+ * @hmc_fn_id: function id for fpm
+ */
+enum i40iw_status_code i40iw_cqp_query_fpm_values_cmd(struct i40iw_sc_dev *dev,
+ struct i40iw_dma_mem *values_mem,
+ u8 hmc_fn_id)
+{
+ enum i40iw_status_code status;
+ struct i40iw_cqp_request *cqp_request;
+ struct cqp_commands_info *cqp_info;
+ struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev;
+
+ cqp_request = i40iw_get_cqp_request(&iwdev->cqp, true);
+ if (!cqp_request)
+ return I40IW_ERR_NO_MEMORY;
+ cqp_info = &cqp_request->info;
+ cqp_request->param = NULL;
+ cqp_info->in.u.query_fpm_values.cqp = dev->cqp;
+ cqp_info->in.u.query_fpm_values.fpm_values_pa = values_mem->pa;
+ cqp_info->in.u.query_fpm_values.fpm_values_va = values_mem->va;
+ cqp_info->in.u.query_fpm_values.hmc_fn_id = hmc_fn_id;
+ cqp_info->cqp_cmd = OP_QUERY_FPM_VALUES;
+ cqp_info->post_sq = 1;
+ cqp_info->in.u.query_fpm_values.scratch = (uintptr_t)cqp_request;
+ status = i40iw_handle_cqp_op(iwdev, cqp_request);
+ if (status)
+ i40iw_pr_err("CQP-OP Query FPM fail");
+ return status;
+}
+
+/**
+ * i40iw_cqp_commit_fpm_values_cmd - commit fpm values in hw
+ * @dev: hardware control device structure
+ * @values_mem: buffer with fpm values
+ * @hmc_fn_id: function id for fpm
+ */
+enum i40iw_status_code i40iw_cqp_commit_fpm_values_cmd(struct i40iw_sc_dev *dev,
+ struct i40iw_dma_mem *values_mem,
+ u8 hmc_fn_id)
+{
+ enum i40iw_status_code status;
+ struct i40iw_cqp_request *cqp_request;
+ struct cqp_commands_info *cqp_info;
+ struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev;
+
+ cqp_request = i40iw_get_cqp_request(&iwdev->cqp, true);
+ if (!cqp_request)
+ return I40IW_ERR_NO_MEMORY;
+ cqp_info = &cqp_request->info;
+ cqp_request->param = NULL;
+ cqp_info->in.u.commit_fpm_values.cqp = dev->cqp;
+ cqp_info->in.u.commit_fpm_values.fpm_values_pa = values_mem->pa;
+ cqp_info->in.u.commit_fpm_values.fpm_values_va = values_mem->va;
+ cqp_info->in.u.commit_fpm_values.hmc_fn_id = hmc_fn_id;
+ cqp_info->cqp_cmd = OP_COMMIT_FPM_VALUES;
+ cqp_info->post_sq = 1;
+ cqp_info->in.u.commit_fpm_values.scratch = (uintptr_t)cqp_request;
+ status = i40iw_handle_cqp_op(iwdev, cqp_request);
+ if (status)
+ i40iw_pr_err("CQP-OP Commit FPM fail");
+ return status;
+}
+
+/**
+ * i40iw_vf_wait_vchnl_resp - wait for channel msg
+ * @iwdev: function's device struct
+ */
+enum i40iw_status_code i40iw_vf_wait_vchnl_resp(struct i40iw_sc_dev *dev)
+{
+ struct i40iw_device *iwdev = dev->back_dev;
+ enum i40iw_status_code err_code = 0;
+ int timeout_ret;
+
+ i40iw_debug(dev, I40IW_DEBUG_VIRT, "%s[%u] dev %p, iwdev %p\n",
+ __func__, __LINE__, dev, iwdev);
+ atomic_add(2, &iwdev->vchnl_msgs);
+ timeout_ret = wait_event_timeout(iwdev->vchnl_waitq,
+ (atomic_read(&iwdev->vchnl_msgs) == 1),
+ I40IW_VCHNL_EVENT_TIMEOUT);
+ atomic_dec(&iwdev->vchnl_msgs);
+ if (!timeout_ret) {
+ i40iw_pr_err("virt channel completion timeout = 0x%x\n", timeout_ret);
+ err_code = I40IW_ERR_TIMEOUT;
+ }
+ return err_code;
+}
+
+/**
+ * i40iw_ieq_mpa_crc_ae - generate AE for crc error
+ * @dev: hardware control device structure
+ * @qp: hardware control qp
+ */
+void i40iw_ieq_mpa_crc_ae(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp)
+{
+ struct i40iw_qp_flush_info info;
+ struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev;
+
+ i40iw_debug(dev, I40IW_DEBUG_AEQ, "%s entered\n", __func__);
+ memset(&info, 0, sizeof(info));
+ info.ae_code = I40IW_AE_LLP_RECEIVED_MPA_CRC_ERROR;
+ info.generate_ae = true;
+ info.ae_source = 0x3;
+ (void)i40iw_hw_flush_wqes(iwdev, qp, &info, false);
+}
+
+/**
+ * i40iw_init_hash_desc - initialize hash for crc calculation
+ * @desc: cryption type
+ */
+enum i40iw_status_code i40iw_init_hash_desc(struct shash_desc **desc)
+{
+ struct crypto_shash *tfm;
+ struct shash_desc *tdesc;
+
+ tfm = crypto_alloc_shash("crc32c", 0, 0);
+ if (IS_ERR(tfm))
+ return I40IW_ERR_MPA_CRC;
+
+ tdesc = kzalloc(sizeof(*tdesc) + crypto_shash_descsize(tfm),
+ GFP_KERNEL);
+ if (!tdesc) {
+ crypto_free_shash(tfm);
+ return I40IW_ERR_MPA_CRC;
+ }
+ tdesc->tfm = tfm;
+ *desc = tdesc;
+
+ return 0;
+}
+
+/**
+ * i40iw_free_hash_desc - free hash desc
+ * @desc: to be freed
+ */
+void i40iw_free_hash_desc(struct shash_desc *desc)
+{
+ if (desc) {
+ crypto_free_shash(desc->tfm);
+ kfree(desc);
+ }
+}
+
+/**
+ * i40iw_alloc_query_fpm_buf - allocate buffer for fpm
+ * @dev: hardware control device structure
+ * @mem: buffer ptr for fpm to be allocated
+ * @return: memory allocation status
+ */
+enum i40iw_status_code i40iw_alloc_query_fpm_buf(struct i40iw_sc_dev *dev,
+ struct i40iw_dma_mem *mem)
+{
+ enum i40iw_status_code status;
+ struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev;
+
+ status = i40iw_obj_aligned_mem(iwdev, mem, I40IW_QUERY_FPM_BUF_SIZE,
+ I40IW_FPM_QUERY_BUF_ALIGNMENT_MASK);
+ return status;
+}
+
+/**
+ * i40iw_ieq_check_mpacrc - check if mpa crc is OK
+ * @desc: desc for hash
+ * @addr: address of buffer for crc
+ * @length: length of buffer
+ * @value: value to be compared
+ */
+enum i40iw_status_code i40iw_ieq_check_mpacrc(struct shash_desc *desc,
+ void *addr,
+ u32 length,
+ u32 value)
+{
+ u32 crc = 0;
+ int ret;
+ enum i40iw_status_code ret_code = 0;
+
+ crypto_shash_init(desc);
+ ret = crypto_shash_update(desc, addr, length);
+ if (!ret)
+ crypto_shash_final(desc, (u8 *)&crc);
+ if (crc != value) {
+ i40iw_pr_err("mpa crc check fail\n");
+ ret_code = I40IW_ERR_MPA_CRC;
+ }
+ return ret_code;
+}
+
+/**
+ * i40iw_ieq_get_qp - get qp based on quad in puda buffer
+ * @dev: hardware control device structure
+ * @buf: receive puda buffer on exception q
+ */
+struct i40iw_sc_qp *i40iw_ieq_get_qp(struct i40iw_sc_dev *dev,
+ struct i40iw_puda_buf *buf)
+{
+ struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev;
+ struct i40iw_qp *iwqp;
+ struct i40iw_cm_node *cm_node;
+ u32 loc_addr[4], rem_addr[4];
+ u16 loc_port, rem_port;
+ struct ipv6hdr *ip6h;
+ struct iphdr *iph = (struct iphdr *)buf->iph;
+ struct tcphdr *tcph = (struct tcphdr *)buf->tcph;
+
+ if (iph->version == 4) {
+ memset(loc_addr, 0, sizeof(loc_addr));
+ loc_addr[0] = ntohl(iph->daddr);
+ memset(rem_addr, 0, sizeof(rem_addr));
+ rem_addr[0] = ntohl(iph->saddr);
+ } else {
+ ip6h = (struct ipv6hdr *)buf->iph;
+ i40iw_copy_ip_ntohl(loc_addr, ip6h->daddr.in6_u.u6_addr32);
+ i40iw_copy_ip_ntohl(rem_addr, ip6h->saddr.in6_u.u6_addr32);
+ }
+ loc_port = ntohs(tcph->dest);
+ rem_port = ntohs(tcph->source);
+
+ cm_node = i40iw_find_node(&iwdev->cm_core, rem_port, rem_addr, loc_port,
+ loc_addr, false);
+ if (!cm_node)
+ return NULL;
+ iwqp = cm_node->iwqp;
+ return &iwqp->sc_qp;
+}
+
+/**
+ * i40iw_ieq_update_tcpip_info - update tcpip in the buffer
+ * @buf: puda to update
+ * @length: length of buffer
+ * @seqnum: seq number for tcp
+ */
+void i40iw_ieq_update_tcpip_info(struct i40iw_puda_buf *buf, u16 length, u32 seqnum)
+{
+ struct tcphdr *tcph;
+ struct iphdr *iph;
+ u16 iphlen;
+ u16 packetsize;
+ u8 *addr = (u8 *)buf->mem.va;
+
+ iphlen = (buf->ipv4) ? 20 : 40;
+ iph = (struct iphdr *)(addr + buf->maclen);
+ tcph = (struct tcphdr *)(addr + buf->maclen + iphlen);
+ packetsize = length + buf->tcphlen + iphlen;
+
+ iph->tot_len = htons(packetsize);
+ tcph->seq = htonl(seqnum);
+}
+
+/**
+ * i40iw_puda_get_tcpip_info - get tcpip info from puda buffer
+ * @info: to get information
+ * @buf: puda buffer
+ */
+enum i40iw_status_code i40iw_puda_get_tcpip_info(struct i40iw_puda_completion_info *info,
+ struct i40iw_puda_buf *buf)
+{
+ struct iphdr *iph;
+ struct ipv6hdr *ip6h;
+ struct tcphdr *tcph;
+ u16 iphlen;
+ u16 pkt_len;
+ u8 *mem = (u8 *)buf->mem.va;
+ struct ethhdr *ethh = (struct ethhdr *)buf->mem.va;
+
+ if (ethh->h_proto == htons(0x8100)) {
+ info->vlan_valid = true;
+ buf->vlan_id = ntohs(((struct vlan_ethhdr *)ethh)->h_vlan_TCI) & VLAN_VID_MASK;
+ }
+ buf->maclen = (info->vlan_valid) ? 18 : 14;
+ iphlen = (info->l3proto) ? 40 : 20;
+ buf->ipv4 = (info->l3proto) ? false : true;
+ buf->iph = mem + buf->maclen;
+ iph = (struct iphdr *)buf->iph;
+
+ buf->tcph = buf->iph + iphlen;
+ tcph = (struct tcphdr *)buf->tcph;
+
+ if (buf->ipv4) {
+ pkt_len = ntohs(iph->tot_len);
+ } else {
+ ip6h = (struct ipv6hdr *)buf->iph;
+ pkt_len = ntohs(ip6h->payload_len) + iphlen;
+ }
+
+ buf->totallen = pkt_len + buf->maclen;
+
+ if (info->payload_len < buf->totallen - 4) {
+ i40iw_pr_err("payload_len = 0x%x totallen expected0x%x\n",
+ info->payload_len, buf->totallen);
+ return I40IW_ERR_INVALID_SIZE;
+ }
+
+ buf->tcphlen = (tcph->doff) << 2;
+ buf->datalen = pkt_len - iphlen - buf->tcphlen;
+ buf->data = (buf->datalen) ? buf->tcph + buf->tcphlen : NULL;
+ buf->hdrlen = buf->maclen + iphlen + buf->tcphlen;
+ buf->seqnum = ntohl(tcph->seq);
+ return 0;
+}
+
+/**
+ * i40iw_hw_stats_timeout - Stats timer-handler which updates all HW stats
+ * @dev: hardware control device structure
+ */
+static void i40iw_hw_stats_timeout(unsigned long dev)
+{
+ struct i40iw_sc_dev *pf_dev = (struct i40iw_sc_dev *)dev;
+ struct i40iw_dev_pestat *pf_devstat = &pf_dev->dev_pestat;
+ struct i40iw_dev_pestat *vf_devstat = NULL;
+ u16 iw_vf_idx;
+ unsigned long flags;
+
+ /*PF*/
+ pf_devstat->ops.iw_hw_stat_read_all(pf_devstat, &pf_devstat->hw_stats);
+ for (iw_vf_idx = 0; iw_vf_idx < I40IW_MAX_PE_ENABLED_VF_COUNT; iw_vf_idx++) {
+ spin_lock_irqsave(&pf_devstat->stats_lock, flags);
+ if (pf_dev->vf_dev[iw_vf_idx]) {
+ if (pf_dev->vf_dev[iw_vf_idx]->stats_initialized) {
+ vf_devstat = &pf_dev->vf_dev[iw_vf_idx]->dev_pestat;
+ vf_devstat->ops.iw_hw_stat_read_all(vf_devstat, &vf_devstat->hw_stats);
+ }
+ }
+ spin_unlock_irqrestore(&pf_devstat->stats_lock, flags);
+ }
+
+ mod_timer(&pf_devstat->stats_timer,
+ jiffies + msecs_to_jiffies(STATS_TIMER_DELAY));
+}
+
+/**
+ * i40iw_hw_stats_start_timer - Start periodic stats timer
+ * @dev: hardware control device structure
+ */
+void i40iw_hw_stats_start_timer(struct i40iw_sc_dev *dev)
+{
+ struct i40iw_dev_pestat *devstat = &dev->dev_pestat;
+
+ init_timer(&devstat->stats_timer);
+ devstat->stats_timer.function = i40iw_hw_stats_timeout;
+ devstat->stats_timer.data = (unsigned long)dev;
+ mod_timer(&devstat->stats_timer,
+ jiffies + msecs_to_jiffies(STATS_TIMER_DELAY));
+}
+
+/**
+ * i40iw_hw_stats_del_timer - Delete periodic stats timer
+ * @dev: hardware control device structure
+ */
+void i40iw_hw_stats_del_timer(struct i40iw_sc_dev *dev)
+{
+ struct i40iw_dev_pestat *devstat = &dev->dev_pestat;
+
+ del_timer_sync(&devstat->stats_timer);
+}
diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.c b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
new file mode 100644
index 000000000000..1fe3b84a06e4
--- /dev/null
+++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
@@ -0,0 +1,2437 @@
+/*******************************************************************************
+*
+* Copyright (c) 2015-2016 Intel Corporation. All rights reserved.
+*
+* This software is available to you under a choice of one of two
+* licenses. You may choose to be licensed under the terms of the GNU
+* General Public License (GPL) Version 2, available from the file
+* COPYING in the main directory of this source tree, or the
+* OpenFabrics.org BSD license below:
+*
+* Redistribution and use in source and binary forms, with or
+* without modification, are permitted provided that the following
+* conditions are met:
+*
+* - Redistributions of source code must retain the above
+* copyright notice, this list of conditions and the following
+* disclaimer.
+*
+* - Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials
+* provided with the distribution.
+*
+* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+* SOFTWARE.
+*
+*******************************************************************************/
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/random.h>
+#include <linux/highmem.h>
+#include <linux/time.h>
+#include <asm/byteorder.h>
+#include <net/ip.h>
+#include <rdma/ib_verbs.h>
+#include <rdma/iw_cm.h>
+#include <rdma/ib_user_verbs.h>
+#include <rdma/ib_umem.h>
+#include "i40iw.h"
+
+/**
+ * i40iw_query_device - get device attributes
+ * @ibdev: device pointer from stack
+ * @props: returning device attributes
+ * @udata: user data
+ */
+static int i40iw_query_device(struct ib_device *ibdev,
+ struct ib_device_attr *props,
+ struct ib_udata *udata)
+{
+ struct i40iw_device *iwdev = to_iwdev(ibdev);
+
+ if (udata->inlen || udata->outlen)
+ return -EINVAL;
+ memset(props, 0, sizeof(*props));
+ ether_addr_copy((u8 *)&props->sys_image_guid, iwdev->netdev->dev_addr);
+ props->fw_ver = I40IW_FW_VERSION;
+ props->device_cap_flags = iwdev->device_cap_flags;
+ props->vendor_id = iwdev->vendor_id;
+ props->vendor_part_id = iwdev->vendor_part_id;
+ props->hw_ver = (u32)iwdev->sc_dev.hw_rev;
+ props->max_mr_size = I40IW_MAX_OUTBOUND_MESSAGE_SIZE;
+ props->max_qp = iwdev->max_qp;
+ props->max_qp_wr = (I40IW_MAX_WQ_ENTRIES >> 2) - 1;
+ props->max_sge = I40IW_MAX_WQ_FRAGMENT_COUNT;
+ props->max_cq = iwdev->max_cq;
+ props->max_cqe = iwdev->max_cqe;
+ props->max_mr = iwdev->max_mr;
+ props->max_pd = iwdev->max_pd;
+ props->max_sge_rd = 1;
+ props->max_qp_rd_atom = I40IW_MAX_IRD_SIZE;
+ props->max_qp_init_rd_atom = props->max_qp_rd_atom;
+ props->atomic_cap = IB_ATOMIC_NONE;
+ props->max_map_per_fmr = 1;
+ return 0;
+}
+
+/**
+ * i40iw_query_port - get port attrubutes
+ * @ibdev: device pointer from stack
+ * @port: port number for query
+ * @props: returning device attributes
+ */
+static int i40iw_query_port(struct ib_device *ibdev,
+ u8 port,
+ struct ib_port_attr *props)
+{
+ struct i40iw_device *iwdev = to_iwdev(ibdev);
+ struct net_device *netdev = iwdev->netdev;
+
+ memset(props, 0, sizeof(*props));
+
+ props->max_mtu = IB_MTU_4096;
+ if (netdev->mtu >= 4096)
+ props->active_mtu = IB_MTU_4096;
+ else if (netdev->mtu >= 2048)
+ props->active_mtu = IB_MTU_2048;
+ else if (netdev->mtu >= 1024)
+ props->active_mtu = IB_MTU_1024;
+ else if (netdev->mtu >= 512)
+ props->active_mtu = IB_MTU_512;
+ else
+ props->active_mtu = IB_MTU_256;
+
+ props->lid = 1;
+ if (netif_carrier_ok(iwdev->netdev))
+ props->state = IB_PORT_ACTIVE;
+ else
+ props->state = IB_PORT_DOWN;
+ props->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_REINIT_SUP |
+ IB_PORT_VENDOR_CLASS_SUP | IB_PORT_BOOT_MGMT_SUP;
+ props->gid_tbl_len = 1;
+ props->pkey_tbl_len = 1;
+ props->active_width = IB_WIDTH_4X;
+ props->active_speed = 1;
+ props->max_msg_sz = 0x80000000;
+ return 0;
+}
+
+/**
+ * i40iw_alloc_ucontext - Allocate the user context data structure
+ * @ibdev: device pointer from stack
+ * @udata: user data
+ *
+ * This keeps track of all objects associated with a particular
+ * user-mode client.
+ */
+static struct ib_ucontext *i40iw_alloc_ucontext(struct ib_device *ibdev,
+ struct ib_udata *udata)
+{
+ struct i40iw_device *iwdev = to_iwdev(ibdev);
+ struct i40iw_alloc_ucontext_req req;
+ struct i40iw_alloc_ucontext_resp uresp;
+ struct i40iw_ucontext *ucontext;
+
+ if (ib_copy_from_udata(&req, udata, sizeof(req)))
+ return ERR_PTR(-EINVAL);
+
+ if (req.userspace_ver != I40IW_ABI_USERSPACE_VER) {
+ i40iw_pr_err("Invalid userspace driver version detected. Detected version %d, should be %d\n",
+ req.userspace_ver, I40IW_ABI_USERSPACE_VER);
+ return ERR_PTR(-EINVAL);
+ }
+
+ memset(&uresp, 0, sizeof(uresp));
+ uresp.max_qps = iwdev->max_qp;
+ uresp.max_pds = iwdev->max_pd;
+ uresp.wq_size = iwdev->max_qp_wr * 2;
+ uresp.kernel_ver = I40IW_ABI_KERNEL_VER;
+
+ ucontext = kzalloc(sizeof(*ucontext), GFP_KERNEL);
+ if (!ucontext)
+ return ERR_PTR(-ENOMEM);
+
+ ucontext->iwdev = iwdev;
+
+ if (ib_copy_to_udata(udata, &uresp, sizeof(uresp))) {
+ kfree(ucontext);
+ return ERR_PTR(-EFAULT);
+ }
+
+ INIT_LIST_HEAD(&ucontext->cq_reg_mem_list);
+ spin_lock_init(&ucontext->cq_reg_mem_list_lock);
+ INIT_LIST_HEAD(&ucontext->qp_reg_mem_list);
+ spin_lock_init(&ucontext->qp_reg_mem_list_lock);
+
+ return &ucontext->ibucontext;
+}
+
+/**
+ * i40iw_dealloc_ucontext - deallocate the user context data structure
+ * @context: user context created during alloc
+ */
+static int i40iw_dealloc_ucontext(struct ib_ucontext *context)
+{
+ struct i40iw_ucontext *ucontext = to_ucontext(context);
+ unsigned long flags;
+
+ spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
+ if (!list_empty(&ucontext->cq_reg_mem_list)) {
+ spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
+ return -EBUSY;
+ }
+ spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
+ spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags);
+ if (!list_empty(&ucontext->qp_reg_mem_list)) {
+ spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags);
+ return -EBUSY;
+ }
+ spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags);
+
+ kfree(ucontext);
+ return 0;
+}
+
+/**
+ * i40iw_mmap - user memory map
+ * @context: context created during alloc
+ * @vma: kernel info for user memory map
+ */
+static int i40iw_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
+{
+ struct i40iw_ucontext *ucontext;
+ u64 db_addr_offset;
+ u64 push_offset;
+
+ ucontext = to_ucontext(context);
+ if (ucontext->iwdev->sc_dev.is_pf) {
+ db_addr_offset = I40IW_DB_ADDR_OFFSET;
+ push_offset = I40IW_PUSH_OFFSET;
+ if (vma->vm_pgoff)
+ vma->vm_pgoff += I40IW_PF_FIRST_PUSH_PAGE_INDEX - 1;
+ } else {
+ db_addr_offset = I40IW_VF_DB_ADDR_OFFSET;
+ push_offset = I40IW_VF_PUSH_OFFSET;
+ if (vma->vm_pgoff)
+ vma->vm_pgoff += I40IW_VF_FIRST_PUSH_PAGE_INDEX - 1;
+ }
+
+ vma->vm_pgoff += db_addr_offset >> PAGE_SHIFT;
+
+ if (vma->vm_pgoff == (db_addr_offset >> PAGE_SHIFT)) {
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+ vma->vm_private_data = ucontext;
+ } else {
+ if ((vma->vm_pgoff - (push_offset >> PAGE_SHIFT)) % 2)
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+ else
+ vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+ }
+
+ if (io_remap_pfn_range(vma, vma->vm_start,
+ vma->vm_pgoff + (pci_resource_start(ucontext->iwdev->ldev->pcidev, 0) >> PAGE_SHIFT),
+ PAGE_SIZE, vma->vm_page_prot))
+ return -EAGAIN;
+
+ return 0;
+}
+
+/**
+ * i40iw_alloc_push_page - allocate a push page for qp
+ * @iwdev: iwarp device
+ * @qp: hardware control qp
+ */
+static void i40iw_alloc_push_page(struct i40iw_device *iwdev, struct i40iw_sc_qp *qp)
+{
+ struct i40iw_cqp_request *cqp_request;
+ struct cqp_commands_info *cqp_info;
+ struct i40iw_sc_dev *dev = &iwdev->sc_dev;
+ enum i40iw_status_code status;
+
+ if (qp->push_idx != I40IW_INVALID_PUSH_PAGE_INDEX)
+ return;
+
+ cqp_request = i40iw_get_cqp_request(&iwdev->cqp, true);
+ if (!cqp_request)
+ return;
+
+ atomic_inc(&cqp_request->refcount);
+
+ cqp_info = &cqp_request->info;
+ cqp_info->cqp_cmd = OP_MANAGE_PUSH_PAGE;
+ cqp_info->post_sq = 1;
+
+ cqp_info->in.u.manage_push_page.info.qs_handle = dev->qs_handle;
+ cqp_info->in.u.manage_push_page.info.free_page = 0;
+ cqp_info->in.u.manage_push_page.cqp = &iwdev->cqp.sc_cqp;
+ cqp_info->in.u.manage_push_page.scratch = (uintptr_t)cqp_request;
+
+ status = i40iw_handle_cqp_op(iwdev, cqp_request);
+ if (!status)
+ qp->push_idx = cqp_request->compl_info.op_ret_val;
+ else
+ i40iw_pr_err("CQP-OP Push page fail");
+ i40iw_put_cqp_request(&iwdev->cqp, cqp_request);
+}
+
+/**
+ * i40iw_dealloc_push_page - free a push page for qp
+ * @iwdev: iwarp device
+ * @qp: hardware control qp
+ */
+static void i40iw_dealloc_push_page(struct i40iw_device *iwdev, struct i40iw_sc_qp *qp)
+{
+ struct i40iw_cqp_request *cqp_request;
+ struct cqp_commands_info *cqp_info;
+ struct i40iw_sc_dev *dev = &iwdev->sc_dev;
+ enum i40iw_status_code status;
+
+ if (qp->push_idx == I40IW_INVALID_PUSH_PAGE_INDEX)
+ return;
+
+ cqp_request = i40iw_get_cqp_request(&iwdev->cqp, false);
+ if (!cqp_request)
+ return;
+
+ cqp_info = &cqp_request->info;
+ cqp_info->cqp_cmd = OP_MANAGE_PUSH_PAGE;
+ cqp_info->post_sq = 1;
+
+ cqp_info->in.u.manage_push_page.info.push_idx = qp->push_idx;
+ cqp_info->in.u.manage_push_page.info.qs_handle = dev->qs_handle;
+ cqp_info->in.u.manage_push_page.info.free_page = 1;
+ cqp_info->in.u.manage_push_page.cqp = &iwdev->cqp.sc_cqp;
+ cqp_info->in.u.manage_push_page.scratch = (uintptr_t)cqp_request;
+
+ status = i40iw_handle_cqp_op(iwdev, cqp_request);
+ if (!status)
+ qp->push_idx = I40IW_INVALID_PUSH_PAGE_INDEX;
+ else
+ i40iw_pr_err("CQP-OP Push page fail");
+}
+
+/**
+ * i40iw_alloc_pd - allocate protection domain
+ * @ibdev: device pointer from stack
+ * @context: user context created during alloc
+ * @udata: user data
+ */
+static struct ib_pd *i40iw_alloc_pd(struct ib_device *ibdev,
+ struct ib_ucontext *context,
+ struct ib_udata *udata)
+{
+ struct i40iw_pd *iwpd;
+ struct i40iw_device *iwdev = to_iwdev(ibdev);
+ struct i40iw_sc_dev *dev = &iwdev->sc_dev;
+ struct i40iw_alloc_pd_resp uresp;
+ struct i40iw_sc_pd *sc_pd;
+ u32 pd_id = 0;
+ int err;
+
+ err = i40iw_alloc_resource(iwdev, iwdev->allocated_pds,
+ iwdev->max_pd, &pd_id, &iwdev->next_pd);
+ if (err) {
+ i40iw_pr_err("alloc resource failed\n");
+ return ERR_PTR(err);
+ }
+
+ iwpd = kzalloc(sizeof(*iwpd), GFP_KERNEL);
+ if (!iwpd) {
+ err = -ENOMEM;
+ goto free_res;
+ }
+
+ sc_pd = &iwpd->sc_pd;
+ dev->iw_pd_ops->pd_init(dev, sc_pd, pd_id);
+
+ if (context) {
+ memset(&uresp, 0, sizeof(uresp));
+ uresp.pd_id = pd_id;
+ if (ib_copy_to_udata(udata, &uresp, sizeof(uresp))) {
+ err = -EFAULT;
+ goto error;
+ }
+ }
+
+ i40iw_add_pdusecount(iwpd);
+ return &iwpd->ibpd;
+error:
+ kfree(iwpd);
+free_res:
+ i40iw_free_resource(iwdev, iwdev->allocated_pds, pd_id);
+ return ERR_PTR(err);
+}
+
+/**
+ * i40iw_dealloc_pd - deallocate pd
+ * @ibpd: ptr of pd to be deallocated
+ */
+static int i40iw_dealloc_pd(struct ib_pd *ibpd)
+{
+ struct i40iw_pd *iwpd = to_iwpd(ibpd);
+ struct i40iw_device *iwdev = to_iwdev(ibpd->device);
+
+ i40iw_rem_pdusecount(iwpd, iwdev);
+ return 0;
+}
+
+/**
+ * i40iw_qp_roundup - return round up qp ring size
+ * @wr_ring_size: ring size to round up
+ */
+static int i40iw_qp_roundup(u32 wr_ring_size)
+{
+ int scount = 1;
+
+ if (wr_ring_size < I40IWQP_SW_MIN_WQSIZE)
+ wr_ring_size = I40IWQP_SW_MIN_WQSIZE;
+
+ for (wr_ring_size--; scount <= 16; scount *= 2)
+ wr_ring_size |= wr_ring_size >> scount;
+ return ++wr_ring_size;
+}
+
+/**
+ * i40iw_get_pbl - Retrieve pbl from a list given a virtual
+ * address
+ * @va: user virtual address
+ * @pbl_list: pbl list to search in (QP's or CQ's)
+ */
+static struct i40iw_pbl *i40iw_get_pbl(unsigned long va,
+ struct list_head *pbl_list)
+{
+ struct i40iw_pbl *iwpbl;
+
+ list_for_each_entry(iwpbl, pbl_list, list) {
+ if (iwpbl->user_base == va) {
+ list_del(&iwpbl->list);
+ return iwpbl;
+ }
+ }
+ return NULL;
+}
+
+/**
+ * i40iw_free_qp_resources - free up memory resources for qp
+ * @iwdev: iwarp device
+ * @iwqp: qp ptr (user or kernel)
+ * @qp_num: qp number assigned
+ */
+void i40iw_free_qp_resources(struct i40iw_device *iwdev,
+ struct i40iw_qp *iwqp,
+ u32 qp_num)
+{
+ i40iw_dealloc_push_page(iwdev, &iwqp->sc_qp);
+ if (qp_num)
+ i40iw_free_resource(iwdev, iwdev->allocated_qps, qp_num);
+ i40iw_free_dma_mem(iwdev->sc_dev.hw, &iwqp->q2_ctx_mem);
+ i40iw_free_dma_mem(iwdev->sc_dev.hw, &iwqp->kqp.dma_mem);
+ kfree(iwqp->kqp.wrid_mem);
+ iwqp->kqp.wrid_mem = NULL;
+ kfree(iwqp->allocated_buffer);
+ iwqp->allocated_buffer = NULL;
+}
+
+/**
+ * i40iw_clean_cqes - clean cq entries for qp
+ * @iwqp: qp ptr (user or kernel)
+ * @iwcq: cq ptr
+ */
+static void i40iw_clean_cqes(struct i40iw_qp *iwqp, struct i40iw_cq *iwcq)
+{
+ struct i40iw_cq_uk *ukcq = &iwcq->sc_cq.cq_uk;
+
+ ukcq->ops.iw_cq_clean(&iwqp->sc_qp.qp_uk, ukcq);
+}
+
+/**
+ * i40iw_destroy_qp - destroy qp
+ * @ibqp: qp's ib pointer also to get to device's qp address
+ */
+static int i40iw_destroy_qp(struct ib_qp *ibqp)
+{
+ struct i40iw_qp *iwqp = to_iwqp(ibqp);
+
+ iwqp->destroyed = 1;
+
+ if (iwqp->ibqp_state >= IB_QPS_INIT && iwqp->ibqp_state < IB_QPS_RTS)
+ i40iw_next_iw_state(iwqp, I40IW_QP_STATE_ERROR, 0, 0, 0);
+
+ if (!iwqp->user_mode) {
+ if (iwqp->iwscq) {
+ i40iw_clean_cqes(iwqp, iwqp->iwscq);
+ if (iwqp->iwrcq != iwqp->iwscq)
+ i40iw_clean_cqes(iwqp, iwqp->iwrcq);
+ }
+ }
+
+ i40iw_rem_ref(&iwqp->ibqp);
+ return 0;
+}
+
+/**
+ * i40iw_setup_virt_qp - setup for allocation of virtual qp
+ * @dev: iwarp device
+ * @qp: qp ptr
+ * @init_info: initialize info to return
+ */
+static int i40iw_setup_virt_qp(struct i40iw_device *iwdev,
+ struct i40iw_qp *iwqp,
+ struct i40iw_qp_init_info *init_info)
+{
+ struct i40iw_pbl *iwpbl = iwqp->iwpbl;
+ struct i40iw_qp_mr *qpmr = &iwpbl->qp_mr;
+
+ iwqp->page = qpmr->sq_page;
+ init_info->shadow_area_pa = cpu_to_le64(qpmr->shadow);
+ if (iwpbl->pbl_allocated) {
+ init_info->virtual_map = true;
+ init_info->sq_pa = qpmr->sq_pbl.idx;
+ init_info->rq_pa = qpmr->rq_pbl.idx;
+ } else {
+ init_info->sq_pa = qpmr->sq_pbl.addr;
+ init_info->rq_pa = qpmr->rq_pbl.addr;
+ }
+ return 0;
+}
+
+/**
+ * i40iw_setup_kmode_qp - setup initialization for kernel mode qp
+ * @iwdev: iwarp device
+ * @iwqp: qp ptr (user or kernel)
+ * @info: initialize info to return
+ */
+static int i40iw_setup_kmode_qp(struct i40iw_device *iwdev,
+ struct i40iw_qp *iwqp,
+ struct i40iw_qp_init_info *info)
+{
+ struct i40iw_dma_mem *mem = &iwqp->kqp.dma_mem;
+ u32 sqdepth, rqdepth;
+ u32 sq_size, rq_size;
+ u8 sqshift, rqshift;
+ u32 size;
+ enum i40iw_status_code status;
+ struct i40iw_qp_uk_init_info *ukinfo = &info->qp_uk_init_info;
+
+ ukinfo->max_sq_frag_cnt = I40IW_MAX_WQ_FRAGMENT_COUNT;
+
+ sq_size = i40iw_qp_roundup(ukinfo->sq_size + 1);
+ rq_size = i40iw_qp_roundup(ukinfo->rq_size + 1);
+
+ status = i40iw_get_wqe_shift(sq_size, ukinfo->max_sq_frag_cnt, &sqshift);
+ if (!status)
+ status = i40iw_get_wqe_shift(rq_size, ukinfo->max_rq_frag_cnt, &rqshift);
+
+ if (status)
+ return -ENOSYS;
+
+ sqdepth = sq_size << sqshift;
+ rqdepth = rq_size << rqshift;
+
+ size = sqdepth * sizeof(struct i40iw_sq_uk_wr_trk_info) + (rqdepth << 3);
+ iwqp->kqp.wrid_mem = kzalloc(size, GFP_KERNEL);
+
+ ukinfo->sq_wrtrk_array = (struct i40iw_sq_uk_wr_trk_info *)iwqp->kqp.wrid_mem;
+ if (!ukinfo->sq_wrtrk_array)
+ return -ENOMEM;
+
+ ukinfo->rq_wrid_array = (u64 *)&ukinfo->sq_wrtrk_array[sqdepth];
+
+ size = (sqdepth + rqdepth) * I40IW_QP_WQE_MIN_SIZE;
+ size += (I40IW_SHADOW_AREA_SIZE << 3);
+
+ status = i40iw_allocate_dma_mem(iwdev->sc_dev.hw, mem, size, 256);
+ if (status) {
+ kfree(ukinfo->sq_wrtrk_array);
+ ukinfo->sq_wrtrk_array = NULL;
+ return -ENOMEM;
+ }
+
+ ukinfo->sq = mem->va;
+ info->sq_pa = mem->pa;
+
+ ukinfo->rq = &ukinfo->sq[sqdepth];
+ info->rq_pa = info->sq_pa + (sqdepth * I40IW_QP_WQE_MIN_SIZE);
+
+ ukinfo->shadow_area = ukinfo->rq[rqdepth].elem;
+ info->shadow_area_pa = info->rq_pa + (rqdepth * I40IW_QP_WQE_MIN_SIZE);
+
+ ukinfo->sq_size = sq_size;
+ ukinfo->rq_size = rq_size;
+ ukinfo->qp_id = iwqp->ibqp.qp_num;
+ return 0;
+}
+
+/**
+ * i40iw_create_qp - create qp
+ * @ibpd: ptr of pd
+ * @init_attr: attributes for qp
+ * @udata: user data for create qp
+ */
+static struct ib_qp *i40iw_create_qp(struct ib_pd *ibpd,
+ struct ib_qp_init_attr *init_attr,
+ struct ib_udata *udata)
+{
+ struct i40iw_pd *iwpd = to_iwpd(ibpd);
+ struct i40iw_device *iwdev = to_iwdev(ibpd->device);
+ struct i40iw_cqp *iwcqp = &iwdev->cqp;
+ struct i40iw_qp *iwqp;
+ struct i40iw_ucontext *ucontext;
+ struct i40iw_create_qp_req req;
+ struct i40iw_create_qp_resp uresp;
+ u32 qp_num = 0;
+ void *mem;
+ enum i40iw_status_code ret;
+ int err_code;
+ int sq_size;
+ int rq_size;
+ struct i40iw_sc_qp *qp;
+ struct i40iw_sc_dev *dev = &iwdev->sc_dev;
+ struct i40iw_qp_init_info init_info;
+ struct i40iw_create_qp_info *qp_info;
+ struct i40iw_cqp_request *cqp_request;
+ struct cqp_commands_info *cqp_info;
+
+ struct i40iw_qp_host_ctx_info *ctx_info;
+ struct i40iwarp_offload_info *iwarp_info;
+ unsigned long flags;
+
+ if (init_attr->create_flags)
+ return ERR_PTR(-EINVAL);
+ if (init_attr->cap.max_inline_data > I40IW_MAX_INLINE_DATA_SIZE)
+ init_attr->cap.max_inline_data = I40IW_MAX_INLINE_DATA_SIZE;
+
+ memset(&init_info, 0, sizeof(init_info));
+
+ sq_size = init_attr->cap.max_send_wr;
+ rq_size = init_attr->cap.max_recv_wr;
+
+ init_info.qp_uk_init_info.sq_size = sq_size;
+ init_info.qp_uk_init_info.rq_size = rq_size;
+ init_info.qp_uk_init_info.max_sq_frag_cnt = init_attr->cap.max_send_sge;
+ init_info.qp_uk_init_info.max_rq_frag_cnt = init_attr->cap.max_recv_sge;
+
+ mem = kzalloc(sizeof(*iwqp), GFP_KERNEL);
+ if (!mem)
+ return ERR_PTR(-ENOMEM);
+
+ iwqp = (struct i40iw_qp *)mem;
+ qp = &iwqp->sc_qp;
+ qp->back_qp = (void *)iwqp;
+ qp->push_idx = I40IW_INVALID_PUSH_PAGE_INDEX;
+
+ iwqp->ctx_info.iwarp_info = &iwqp->iwarp_info;
+
+ if (i40iw_allocate_dma_mem(dev->hw,
+ &iwqp->q2_ctx_mem,
+ I40IW_Q2_BUFFER_SIZE + I40IW_QP_CTX_SIZE,
+ 256)) {
+ i40iw_pr_err("dma_mem failed\n");
+ err_code = -ENOMEM;
+ goto error;
+ }
+
+ init_info.q2 = iwqp->q2_ctx_mem.va;
+ init_info.q2_pa = iwqp->q2_ctx_mem.pa;
+
+ init_info.host_ctx = (void *)init_info.q2 + I40IW_Q2_BUFFER_SIZE;
+ init_info.host_ctx_pa = init_info.q2_pa + I40IW_Q2_BUFFER_SIZE;
+
+ err_code = i40iw_alloc_resource(iwdev, iwdev->allocated_qps, iwdev->max_qp,
+ &qp_num, &iwdev->next_qp);
+ if (err_code) {
+ i40iw_pr_err("qp resource\n");
+ goto error;
+ }
+
+ iwqp->allocated_buffer = mem;
+ iwqp->iwdev = iwdev;
+ iwqp->iwpd = iwpd;
+ iwqp->ibqp.qp_num = qp_num;
+ qp = &iwqp->sc_qp;
+ iwqp->iwscq = to_iwcq(init_attr->send_cq);
+ iwqp->iwrcq = to_iwcq(init_attr->recv_cq);
+
+ iwqp->host_ctx.va = init_info.host_ctx;
+ iwqp->host_ctx.pa = init_info.host_ctx_pa;
+ iwqp->host_ctx.size = I40IW_QP_CTX_SIZE;
+
+ init_info.pd = &iwpd->sc_pd;
+ init_info.qp_uk_init_info.qp_id = iwqp->ibqp.qp_num;
+ iwqp->ctx_info.qp_compl_ctx = (uintptr_t)qp;
+
+ if (init_attr->qp_type != IB_QPT_RC) {
+ err_code = -ENOSYS;
+ goto error;
+ }
+ if (iwdev->push_mode)
+ i40iw_alloc_push_page(iwdev, qp);
+ if (udata) {
+ err_code = ib_copy_from_udata(&req, udata, sizeof(req));
+ if (err_code) {
+ i40iw_pr_err("ib_copy_from_data\n");
+ goto error;
+ }
+ iwqp->ctx_info.qp_compl_ctx = req.user_compl_ctx;
+ if (ibpd->uobject && ibpd->uobject->context) {
+ iwqp->user_mode = 1;
+ ucontext = to_ucontext(ibpd->uobject->context);
+
+ if (req.user_wqe_buffers) {
+ spin_lock_irqsave(
+ &ucontext->qp_reg_mem_list_lock, flags);
+ iwqp->iwpbl = i40iw_get_pbl(
+ (unsigned long)req.user_wqe_buffers,
+ &ucontext->qp_reg_mem_list);
+ spin_unlock_irqrestore(
+ &ucontext->qp_reg_mem_list_lock, flags);
+
+ if (!iwqp->iwpbl) {
+ err_code = -ENODATA;
+ i40iw_pr_err("no pbl info\n");
+ goto error;
+ }
+ }
+ }
+ err_code = i40iw_setup_virt_qp(iwdev, iwqp, &init_info);
+ } else {
+ err_code = i40iw_setup_kmode_qp(iwdev, iwqp, &init_info);
+ }
+
+ if (err_code) {
+ i40iw_pr_err("setup qp failed\n");
+ goto error;
+ }
+
+ init_info.type = I40IW_QP_TYPE_IWARP;
+ ret = dev->iw_priv_qp_ops->qp_init(qp, &init_info);
+ if (ret) {
+ err_code = -EPROTO;
+ i40iw_pr_err("qp_init fail\n");
+ goto error;
+ }
+ ctx_info = &iwqp->ctx_info;
+ iwarp_info = &iwqp->iwarp_info;
+ iwarp_info->rd_enable = true;
+ iwarp_info->wr_rdresp_en = true;
+ if (!iwqp->user_mode)
+ iwarp_info->priv_mode_en = true;
+ iwarp_info->ddp_ver = 1;
+ iwarp_info->rdmap_ver = 1;
+
+ ctx_info->iwarp_info_valid = true;
+ ctx_info->send_cq_num = iwqp->iwscq->sc_cq.cq_uk.cq_id;
+ ctx_info->rcv_cq_num = iwqp->iwrcq->sc_cq.cq_uk.cq_id;
+ if (qp->push_idx == I40IW_INVALID_PUSH_PAGE_INDEX) {
+ ctx_info->push_mode_en = false;
+ } else {
+ ctx_info->push_mode_en = true;
+ ctx_info->push_idx = qp->push_idx;
+ }
+
+ ret = dev->iw_priv_qp_ops->qp_setctx(&iwqp->sc_qp,
+ (u64 *)iwqp->host_ctx.va,
+ ctx_info);
+ ctx_info->iwarp_info_valid = false;
+ cqp_request = i40iw_get_cqp_request(iwcqp, true);
+ if (!cqp_request) {
+ err_code = -ENOMEM;
+ goto error;
+ }
+ cqp_info = &cqp_request->info;
+ qp_info = &cqp_request->info.in.u.qp_create.info;
+
+ memset(qp_info, 0, sizeof(*qp_info));
+
+ qp_info->cq_num_valid = true;
+ qp_info->next_iwarp_state = I40IW_QP_STATE_IDLE;
+
+ cqp_info->cqp_cmd = OP_QP_CREATE;
+ cqp_info->post_sq = 1;
+ cqp_info->in.u.qp_create.qp = qp;
+ cqp_info->in.u.qp_create.scratch = (uintptr_t)cqp_request;
+ ret = i40iw_handle_cqp_op(iwdev, cqp_request);
+ if (ret) {
+ i40iw_pr_err("CQP-OP QP create fail");
+ err_code = -EACCES;
+ goto error;
+ }
+
+ i40iw_add_ref(&iwqp->ibqp);
+ spin_lock_init(&iwqp->lock);
+ iwqp->sig_all = (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) ? 1 : 0;
+ iwdev->qp_table[qp_num] = iwqp;
+ i40iw_add_pdusecount(iwqp->iwpd);
+ if (ibpd->uobject && udata) {
+ memset(&uresp, 0, sizeof(uresp));
+ uresp.actual_sq_size = sq_size;
+ uresp.actual_rq_size = rq_size;
+ uresp.qp_id = qp_num;
+ uresp.push_idx = qp->push_idx;
+ err_code = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
+ if (err_code) {
+ i40iw_pr_err("copy_to_udata failed\n");
+ i40iw_destroy_qp(&iwqp->ibqp);
+ /* let the completion of the qp destroy free the qp */
+ return ERR_PTR(err_code);
+ }
+ }
+
+ return &iwqp->ibqp;
+error:
+ i40iw_free_qp_resources(iwdev, iwqp, qp_num);
+ kfree(mem);
+ return ERR_PTR(err_code);
+}
+
+/**
+ * i40iw_query - query qp attributes
+ * @ibqp: qp pointer
+ * @attr: attributes pointer
+ * @attr_mask: Not used
+ * @init_attr: qp attributes to return
+ */
+static int i40iw_query_qp(struct ib_qp *ibqp,
+ struct ib_qp_attr *attr,
+ int attr_mask,
+ struct ib_qp_init_attr *init_attr)
+{
+ struct i40iw_qp *iwqp = to_iwqp(ibqp);
+ struct i40iw_sc_qp *qp = &iwqp->sc_qp;
+
+ attr->qp_access_flags = 0;
+ attr->cap.max_send_wr = qp->qp_uk.sq_size;
+ attr->cap.max_recv_wr = qp->qp_uk.rq_size;
+ attr->cap.max_recv_sge = 1;
+ attr->cap.max_inline_data = I40IW_MAX_INLINE_DATA_SIZE;
+ init_attr->event_handler = iwqp->ibqp.event_handler;
+ init_attr->qp_context = iwqp->ibqp.qp_context;
+ init_attr->send_cq = iwqp->ibqp.send_cq;
+ init_attr->recv_cq = iwqp->ibqp.recv_cq;
+ init_attr->srq = iwqp->ibqp.srq;
+ init_attr->cap = attr->cap;
+ return 0;
+}
+
+/**
+ * i40iw_hw_modify_qp - setup cqp for modify qp
+ * @iwdev: iwarp device
+ * @iwqp: qp ptr (user or kernel)
+ * @info: info for modify qp
+ * @wait: flag to wait or not for modify qp completion
+ */
+void i40iw_hw_modify_qp(struct i40iw_device *iwdev, struct i40iw_qp *iwqp,
+ struct i40iw_modify_qp_info *info, bool wait)
+{
+ enum i40iw_status_code status;
+ struct i40iw_cqp_request *cqp_request;
+ struct cqp_commands_info *cqp_info;
+ struct i40iw_modify_qp_info *m_info;
+
+ cqp_request = i40iw_get_cqp_request(&iwdev->cqp, wait);
+ if (!cqp_request)
+ return;
+
+ cqp_info = &cqp_request->info;
+ m_info = &cqp_info->in.u.qp_modify.info;
+ memcpy(m_info, info, sizeof(*m_info));
+ cqp_info->cqp_cmd = OP_QP_MODIFY;
+ cqp_info->post_sq = 1;
+ cqp_info->in.u.qp_modify.qp = &iwqp->sc_qp;
+ cqp_info->in.u.qp_modify.scratch = (uintptr_t)cqp_request;
+ status = i40iw_handle_cqp_op(iwdev, cqp_request);
+ if (status)
+ i40iw_pr_err("CQP-OP Modify QP fail");
+}
+
+/**
+ * i40iw_modify_qp - modify qp request
+ * @ibqp: qp's pointer for modify
+ * @attr: access attributes
+ * @attr_mask: state mask
+ * @udata: user data
+ */
+int i40iw_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
+ int attr_mask, struct ib_udata *udata)
+{
+ struct i40iw_qp *iwqp = to_iwqp(ibqp);
+ struct i40iw_device *iwdev = iwqp->iwdev;
+ struct i40iw_qp_host_ctx_info *ctx_info;
+ struct i40iwarp_offload_info *iwarp_info;
+ struct i40iw_modify_qp_info info;
+ u8 issue_modify_qp = 0;
+ u8 dont_wait = 0;
+ u32 err;
+ unsigned long flags;
+
+ memset(&info, 0, sizeof(info));
+ ctx_info = &iwqp->ctx_info;
+ iwarp_info = &iwqp->iwarp_info;
+
+ spin_lock_irqsave(&iwqp->lock, flags);
+
+ if (attr_mask & IB_QP_STATE) {
+ switch (attr->qp_state) {
+ case IB_QPS_INIT:
+ case IB_QPS_RTR:
+ if (iwqp->iwarp_state > (u32)I40IW_QP_STATE_IDLE) {
+ err = -EINVAL;
+ goto exit;
+ }
+ if (iwqp->iwarp_state == I40IW_QP_STATE_INVALID) {
+ info.next_iwarp_state = I40IW_QP_STATE_IDLE;
+ issue_modify_qp = 1;
+ }
+ break;
+ case IB_QPS_RTS:
+ if ((iwqp->iwarp_state > (u32)I40IW_QP_STATE_RTS) ||
+ (!iwqp->cm_id)) {
+ err = -EINVAL;
+ goto exit;
+ }
+
+ issue_modify_qp = 1;
+ iwqp->hw_tcp_state = I40IW_TCP_STATE_ESTABLISHED;
+ iwqp->hte_added = 1;
+ info.next_iwarp_state = I40IW_QP_STATE_RTS;
+ info.tcp_ctx_valid = true;
+ info.ord_valid = true;
+ info.arp_cache_idx_valid = true;
+ info.cq_num_valid = true;
+ break;
+ case IB_QPS_SQD:
+ if (iwqp->hw_iwarp_state > (u32)I40IW_QP_STATE_RTS) {
+ err = 0;
+ goto exit;
+ }
+ if ((iwqp->iwarp_state == (u32)I40IW_QP_STATE_CLOSING) ||
+ (iwqp->iwarp_state < (u32)I40IW_QP_STATE_RTS)) {
+ err = 0;
+ goto exit;
+ }
+ if (iwqp->iwarp_state > (u32)I40IW_QP_STATE_CLOSING) {
+ err = -EINVAL;
+ goto exit;
+ }
+ info.next_iwarp_state = I40IW_QP_STATE_CLOSING;
+ issue_modify_qp = 1;
+ break;
+ case IB_QPS_SQE:
+ if (iwqp->iwarp_state >= (u32)I40IW_QP_STATE_TERMINATE) {
+ err = -EINVAL;
+ goto exit;
+ }
+ info.next_iwarp_state = I40IW_QP_STATE_TERMINATE;
+ issue_modify_qp = 1;
+ break;
+ case IB_QPS_ERR:
+ case IB_QPS_RESET:
+ if (iwqp->iwarp_state == (u32)I40IW_QP_STATE_ERROR) {
+ err = -EINVAL;
+ goto exit;
+ }
+ if (iwqp->sc_qp.term_flags)
+ del_timer(&iwqp->terminate_timer);
+ info.next_iwarp_state = I40IW_QP_STATE_ERROR;
+ if ((iwqp->hw_tcp_state > I40IW_TCP_STATE_CLOSED) &&
+ iwdev->iw_status &&
+ (iwqp->hw_tcp_state != I40IW_TCP_STATE_TIME_WAIT))
+ info.reset_tcp_conn = true;
+ else
+ dont_wait = 1;
+ issue_modify_qp = 1;
+ info.next_iwarp_state = I40IW_QP_STATE_ERROR;
+ break;
+ default:
+ err = -EINVAL;
+ goto exit;
+ }
+
+ iwqp->ibqp_state = attr->qp_state;
+
+ if (issue_modify_qp)
+ iwqp->iwarp_state = info.next_iwarp_state;
+ else
+ info.next_iwarp_state = iwqp->iwarp_state;
+ }
+ if (attr_mask & IB_QP_ACCESS_FLAGS) {
+ ctx_info->iwarp_info_valid = true;
+ if (attr->qp_access_flags & IB_ACCESS_LOCAL_WRITE)
+ iwarp_info->wr_rdresp_en = true;
+ if (attr->qp_access_flags & IB_ACCESS_REMOTE_WRITE)
+ iwarp_info->wr_rdresp_en = true;
+ if (attr->qp_access_flags & IB_ACCESS_REMOTE_READ)
+ iwarp_info->rd_enable = true;
+ if (attr->qp_access_flags & IB_ACCESS_MW_BIND)
+ iwarp_info->bind_en = true;
+
+ if (iwqp->user_mode) {
+ iwarp_info->rd_enable = true;
+ iwarp_info->wr_rdresp_en = true;
+ iwarp_info->priv_mode_en = false;
+ }
+ }
+
+ if (ctx_info->iwarp_info_valid) {
+ struct i40iw_sc_dev *dev = &iwdev->sc_dev;
+ int ret;
+
+ ctx_info->send_cq_num = iwqp->iwscq->sc_cq.cq_uk.cq_id;
+ ctx_info->rcv_cq_num = iwqp->iwrcq->sc_cq.cq_uk.cq_id;
+ ret = dev->iw_priv_qp_ops->qp_setctx(&iwqp->sc_qp,
+ (u64 *)iwqp->host_ctx.va,
+ ctx_info);
+ if (ret) {
+ i40iw_pr_err("setting QP context\n");
+ err = -EINVAL;
+ goto exit;
+ }
+ }
+
+ spin_unlock_irqrestore(&iwqp->lock, flags);
+
+ if (issue_modify_qp)
+ i40iw_hw_modify_qp(iwdev, iwqp, &info, true);
+
+ if (issue_modify_qp && (iwqp->ibqp_state > IB_QPS_RTS)) {
+ if (dont_wait) {
+ if (iwqp->cm_id && iwqp->hw_tcp_state) {
+ spin_lock_irqsave(&iwqp->lock, flags);
+ iwqp->hw_tcp_state = I40IW_TCP_STATE_CLOSED;
+ iwqp->last_aeq = I40IW_AE_RESET_SENT;
+ spin_unlock_irqrestore(&iwqp->lock, flags);
+ }
+ }
+ }
+ return 0;
+exit:
+ spin_unlock_irqrestore(&iwqp->lock, flags);
+ return err;
+}
+
+/**
+ * cq_free_resources - free up recources for cq
+ * @iwdev: iwarp device
+ * @iwcq: cq ptr
+ */
+static void cq_free_resources(struct i40iw_device *iwdev, struct i40iw_cq *iwcq)
+{
+ struct i40iw_sc_cq *cq = &iwcq->sc_cq;
+
+ if (!iwcq->user_mode)
+ i40iw_free_dma_mem(iwdev->sc_dev.hw, &iwcq->kmem);
+ i40iw_free_resource(iwdev, iwdev->allocated_cqs, cq->cq_uk.cq_id);
+}
+
+/**
+ * cq_wq_destroy - send cq destroy cqp
+ * @iwdev: iwarp device
+ * @cq: hardware control cq
+ */
+static void cq_wq_destroy(struct i40iw_device *iwdev, struct i40iw_sc_cq *cq)
+{
+ enum i40iw_status_code status;
+ struct i40iw_cqp_request *cqp_request;
+ struct cqp_commands_info *cqp_info;
+
+ cqp_request = i40iw_get_cqp_request(&iwdev->cqp, true);
+ if (!cqp_request)
+ return;
+
+ cqp_info = &cqp_request->info;
+
+ cqp_info->cqp_cmd = OP_CQ_DESTROY;
+ cqp_info->post_sq = 1;
+ cqp_info->in.u.cq_destroy.cq = cq;
+ cqp_info->in.u.cq_destroy.scratch = (uintptr_t)cqp_request;
+ status = i40iw_handle_cqp_op(iwdev, cqp_request);
+ if (status)
+ i40iw_pr_err("CQP-OP Destroy QP fail");
+}
+
+/**
+ * i40iw_destroy_cq - destroy cq
+ * @ib_cq: cq pointer
+ */
+static int i40iw_destroy_cq(struct ib_cq *ib_cq)
+{
+ struct i40iw_cq *iwcq;
+ struct i40iw_device *iwdev;
+ struct i40iw_sc_cq *cq;
+
+ if (!ib_cq) {
+ i40iw_pr_err("ib_cq == NULL\n");
+ return 0;
+ }
+
+ iwcq = to_iwcq(ib_cq);
+ iwdev = to_iwdev(ib_cq->device);
+ cq = &iwcq->sc_cq;
+ cq_wq_destroy(iwdev, cq);
+ cq_free_resources(iwdev, iwcq);
+ kfree(iwcq);
+ return 0;
+}
+
+/**
+ * i40iw_create_cq - create cq
+ * @ibdev: device pointer from stack
+ * @attr: attributes for cq
+ * @context: user context created during alloc
+ * @udata: user data
+ */
+static struct ib_cq *i40iw_create_cq(struct ib_device *ibdev,
+ const struct ib_cq_init_attr *attr,
+ struct ib_ucontext *context,
+ struct ib_udata *udata)
+{
+ struct i40iw_device *iwdev = to_iwdev(ibdev);
+ struct i40iw_cq *iwcq;
+ struct i40iw_pbl *iwpbl;
+ u32 cq_num = 0;
+ struct i40iw_sc_cq *cq;
+ struct i40iw_sc_dev *dev = &iwdev->sc_dev;
+ struct i40iw_cq_init_info info;
+ enum i40iw_status_code status;
+ struct i40iw_cqp_request *cqp_request;
+ struct cqp_commands_info *cqp_info;
+ struct i40iw_cq_uk_init_info *ukinfo = &info.cq_uk_init_info;
+ unsigned long flags;
+ int err_code;
+ int entries = attr->cqe;
+
+ if (entries > iwdev->max_cqe)
+ return ERR_PTR(-EINVAL);
+
+ iwcq = kzalloc(sizeof(*iwcq), GFP_KERNEL);
+ if (!iwcq)
+ return ERR_PTR(-ENOMEM);
+
+ memset(&info, 0, sizeof(info));
+
+ err_code = i40iw_alloc_resource(iwdev, iwdev->allocated_cqs,
+ iwdev->max_cq, &cq_num,
+ &iwdev->next_cq);
+ if (err_code)
+ goto error;
+
+ cq = &iwcq->sc_cq;
+ cq->back_cq = (void *)iwcq;
+ spin_lock_init(&iwcq->lock);
+
+ info.dev = dev;
+ ukinfo->cq_size = max(entries, 4);
+ ukinfo->cq_id = cq_num;
+ iwcq->ibcq.cqe = info.cq_uk_init_info.cq_size;
+ info.ceqe_mask = 0;
+ info.ceq_id = 0;
+ info.ceq_id_valid = true;
+ info.ceqe_mask = 1;
+ info.type = I40IW_CQ_TYPE_IWARP;
+ if (context) {
+ struct i40iw_ucontext *ucontext;
+ struct i40iw_create_cq_req req;
+ struct i40iw_cq_mr *cqmr;
+
+ memset(&req, 0, sizeof(req));
+ iwcq->user_mode = true;
+ ucontext = to_ucontext(context);
+ if (ib_copy_from_udata(&req, udata, sizeof(struct i40iw_create_cq_req)))
+ goto cq_free_resources;
+
+ spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
+ iwpbl = i40iw_get_pbl((unsigned long)req.user_cq_buffer,
+ &ucontext->cq_reg_mem_list);
+ spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
+ if (!iwpbl) {
+ err_code = -EPROTO;
+ goto cq_free_resources;
+ }
+
+ iwcq->iwpbl = iwpbl;
+ iwcq->cq_mem_size = 0;
+ cqmr = &iwpbl->cq_mr;
+ info.shadow_area_pa = cpu_to_le64(cqmr->shadow);
+ if (iwpbl->pbl_allocated) {
+ info.virtual_map = true;
+ info.pbl_chunk_size = 1;
+ info.first_pm_pbl_idx = cqmr->cq_pbl.idx;
+ } else {
+ info.cq_base_pa = cqmr->cq_pbl.addr;
+ }
+ } else {
+ /* Kmode allocations */
+ int rsize;
+ int shadow;
+
+ rsize = info.cq_uk_init_info.cq_size * sizeof(struct i40iw_cqe);
+ rsize = round_up(rsize, 256);
+ shadow = I40IW_SHADOW_AREA_SIZE << 3;
+ status = i40iw_allocate_dma_mem(dev->hw, &iwcq->kmem,
+ rsize + shadow, 256);
+ if (status) {
+ err_code = -ENOMEM;
+ goto cq_free_resources;
+ }
+ ukinfo->cq_base = iwcq->kmem.va;
+ info.cq_base_pa = iwcq->kmem.pa;
+ info.shadow_area_pa = info.cq_base_pa + rsize;
+ ukinfo->shadow_area = iwcq->kmem.va + rsize;
+ }
+
+ if (dev->iw_priv_cq_ops->cq_init(cq, &info)) {
+ i40iw_pr_err("init cq fail\n");
+ err_code = -EPROTO;
+ goto cq_free_resources;
+ }
+
+ cqp_request = i40iw_get_cqp_request(&iwdev->cqp, true);
+ if (!cqp_request) {
+ err_code = -ENOMEM;
+ goto cq_free_resources;
+ }
+
+ cqp_info = &cqp_request->info;
+ cqp_info->cqp_cmd = OP_CQ_CREATE;
+ cqp_info->post_sq = 1;
+ cqp_info->in.u.cq_create.cq = cq;
+ cqp_info->in.u.cq_create.scratch = (uintptr_t)cqp_request;
+ status = i40iw_handle_cqp_op(iwdev, cqp_request);
+ if (status) {
+ i40iw_pr_err("CQP-OP Create QP fail");
+ err_code = -EPROTO;
+ goto cq_free_resources;
+ }
+
+ if (context) {
+ struct i40iw_create_cq_resp resp;
+
+ memset(&resp, 0, sizeof(resp));
+ resp.cq_id = info.cq_uk_init_info.cq_id;
+ resp.cq_size = info.cq_uk_init_info.cq_size;
+ if (ib_copy_to_udata(udata, &resp, sizeof(resp))) {
+ i40iw_pr_err("copy to user data\n");
+ err_code = -EPROTO;
+ goto cq_destroy;
+ }
+ }
+
+ return (struct ib_cq *)iwcq;
+
+cq_destroy:
+ cq_wq_destroy(iwdev, cq);
+cq_free_resources:
+ cq_free_resources(iwdev, iwcq);
+error:
+ kfree(iwcq);
+ return ERR_PTR(err_code);
+}
+
+/**
+ * i40iw_get_user_access - get hw access from IB access
+ * @acc: IB access to return hw access
+ */
+static inline u16 i40iw_get_user_access(int acc)
+{
+ u16 access = 0;
+
+ access |= (acc & IB_ACCESS_LOCAL_WRITE) ? I40IW_ACCESS_FLAGS_LOCALWRITE : 0;
+ access |= (acc & IB_ACCESS_REMOTE_WRITE) ? I40IW_ACCESS_FLAGS_REMOTEWRITE : 0;
+ access |= (acc & IB_ACCESS_REMOTE_READ) ? I40IW_ACCESS_FLAGS_REMOTEREAD : 0;
+ access |= (acc & IB_ACCESS_MW_BIND) ? I40IW_ACCESS_FLAGS_BIND_WINDOW : 0;
+ return access;
+}
+
+/**
+ * i40iw_free_stag - free stag resource
+ * @iwdev: iwarp device
+ * @stag: stag to free
+ */
+static void i40iw_free_stag(struct i40iw_device *iwdev, u32 stag)
+{
+ u32 stag_idx;
+
+ stag_idx = (stag & iwdev->mr_stagmask) >> I40IW_CQPSQ_STAG_IDX_SHIFT;
+ i40iw_free_resource(iwdev, iwdev->allocated_mrs, stag_idx);
+}
+
+/**
+ * i40iw_create_stag - create random stag
+ * @iwdev: iwarp device
+ */
+static u32 i40iw_create_stag(struct i40iw_device *iwdev)
+{
+ u32 stag = 0;
+ u32 stag_index = 0;
+ u32 next_stag_index;
+ u32 driver_key;
+ u32 random;
+ u8 consumer_key;
+ int ret;
+
+ get_random_bytes(&random, sizeof(random));
+ consumer_key = (u8)random;
+
+ driver_key = random & ~iwdev->mr_stagmask;
+ next_stag_index = (random & iwdev->mr_stagmask) >> 8;
+ next_stag_index %= iwdev->max_mr;
+
+ ret = i40iw_alloc_resource(iwdev,
+ iwdev->allocated_mrs, iwdev->max_mr,
+ &stag_index, &next_stag_index);
+ if (!ret) {
+ stag = stag_index << I40IW_CQPSQ_STAG_IDX_SHIFT;
+ stag |= driver_key;
+ stag += (u32)consumer_key;
+ }
+ return stag;
+}
+
+/**
+ * i40iw_next_pbl_addr - Get next pbl address
+ * @palloc: Poiner to allocated pbles
+ * @pbl: pointer to a pble
+ * @pinfo: info pointer
+ * @idx: index
+ */
+static inline u64 *i40iw_next_pbl_addr(struct i40iw_pble_alloc *palloc,
+ u64 *pbl,
+ struct i40iw_pble_info **pinfo,
+ u32 *idx)
+{
+ *idx += 1;
+ if ((!(*pinfo)) || (*idx != (*pinfo)->cnt))
+ return ++pbl;
+ *idx = 0;
+ (*pinfo)++;
+ return (u64 *)(*pinfo)->addr;
+}
+
+/**
+ * i40iw_copy_user_pgaddrs - copy user page address to pble's os locally
+ * @iwmr: iwmr for IB's user page addresses
+ * @pbl: ple pointer to save 1 level or 0 level pble
+ * @level: indicated level 0, 1 or 2
+ */
+static void i40iw_copy_user_pgaddrs(struct i40iw_mr *iwmr,
+ u64 *pbl,
+ enum i40iw_pble_level level)
+{
+ struct ib_umem *region = iwmr->region;
+ struct i40iw_pbl *iwpbl = &iwmr->iwpbl;
+ int chunk_pages, entry, pg_shift, i;
+ struct i40iw_pble_alloc *palloc = &iwpbl->pble_alloc;
+ struct i40iw_pble_info *pinfo;
+ struct scatterlist *sg;
+ u32 idx = 0;
+
+ pinfo = (level == I40IW_LEVEL_1) ? NULL : palloc->level2.leaf;
+ pg_shift = ffs(region->page_size) - 1;
+ for_each_sg(region->sg_head.sgl, sg, region->nmap, entry) {
+ chunk_pages = sg_dma_len(sg) >> pg_shift;
+ if ((iwmr->type == IW_MEMREG_TYPE_QP) &&
+ !iwpbl->qp_mr.sq_page)
+ iwpbl->qp_mr.sq_page = sg_page(sg);
+ for (i = 0; i < chunk_pages; i++) {
+ *pbl = cpu_to_le64(sg_dma_address(sg) + region->page_size * i);
+ pbl = i40iw_next_pbl_addr(palloc, pbl, &pinfo, &idx);
+ }
+ }
+}
+
+/**
+ * i40iw_setup_pbles - copy user pg address to pble's
+ * @iwdev: iwarp device
+ * @iwmr: mr pointer for this memory registration
+ * @use_pbles: flag if to use pble's or memory (level 0)
+ */
+static int i40iw_setup_pbles(struct i40iw_device *iwdev,
+ struct i40iw_mr *iwmr,
+ bool use_pbles)
+{
+ struct i40iw_pbl *iwpbl = &iwmr->iwpbl;
+ struct i40iw_pble_alloc *palloc = &iwpbl->pble_alloc;
+ struct i40iw_pble_info *pinfo;
+ u64 *pbl;
+ enum i40iw_status_code status;
+ enum i40iw_pble_level level = I40IW_LEVEL_1;
+
+ if (!use_pbles && (iwmr->page_cnt > MAX_SAVE_PAGE_ADDRS))
+ return -ENOMEM;
+
+ if (use_pbles) {
+ mutex_lock(&iwdev->pbl_mutex);
+ status = i40iw_get_pble(&iwdev->sc_dev, iwdev->pble_rsrc, palloc, iwmr->page_cnt);
+ mutex_unlock(&iwdev->pbl_mutex);
+ if (status)
+ return -ENOMEM;
+
+ iwpbl->pbl_allocated = true;
+ level = palloc->level;
+ pinfo = (level == I40IW_LEVEL_1) ? &palloc->level1 : palloc->level2.leaf;
+ pbl = (u64 *)pinfo->addr;
+ } else {
+ pbl = iwmr->pgaddrmem;
+ }
+
+ i40iw_copy_user_pgaddrs(iwmr, pbl, level);
+ return 0;
+}
+
+/**
+ * i40iw_handle_q_mem - handle memory for qp and cq
+ * @iwdev: iwarp device
+ * @req: information for q memory management
+ * @iwpbl: pble struct
+ * @use_pbles: flag to use pble
+ */
+static int i40iw_handle_q_mem(struct i40iw_device *iwdev,
+ struct i40iw_mem_reg_req *req,
+ struct i40iw_pbl *iwpbl,
+ bool use_pbles)
+{
+ struct i40iw_pble_alloc *palloc = &iwpbl->pble_alloc;
+ struct i40iw_mr *iwmr = iwpbl->iwmr;
+ struct i40iw_qp_mr *qpmr = &iwpbl->qp_mr;
+ struct i40iw_cq_mr *cqmr = &iwpbl->cq_mr;
+ struct i40iw_hmc_pble *hmc_p;
+ u64 *arr = iwmr->pgaddrmem;
+ int err;
+ int total;
+
+ total = req->sq_pages + req->rq_pages + req->cq_pages;
+
+ err = i40iw_setup_pbles(iwdev, iwmr, use_pbles);
+ if (err)
+ return err;
+ if (use_pbles && (palloc->level != I40IW_LEVEL_1)) {
+ i40iw_free_pble(iwdev->pble_rsrc, palloc);
+ iwpbl->pbl_allocated = false;
+ return -ENOMEM;
+ }
+
+ if (use_pbles)
+ arr = (u64 *)palloc->level1.addr;
+ if (req->reg_type == IW_MEMREG_TYPE_QP) {
+ hmc_p = &qpmr->sq_pbl;
+ qpmr->shadow = (dma_addr_t)arr[total];
+ if (use_pbles) {
+ hmc_p->idx = palloc->level1.idx;
+ hmc_p = &qpmr->rq_pbl;
+ hmc_p->idx = palloc->level1.idx + req->sq_pages;
+ } else {
+ hmc_p->addr = arr[0];
+ hmc_p = &qpmr->rq_pbl;
+ hmc_p->addr = arr[1];
+ }
+ } else { /* CQ */
+ hmc_p = &cqmr->cq_pbl;
+ cqmr->shadow = (dma_addr_t)arr[total];
+ if (use_pbles)
+ hmc_p->idx = palloc->level1.idx;
+ else
+ hmc_p->addr = arr[0];
+ }
+ return err;
+}
+
+/**
+ * i40iw_hwreg_mr - send cqp command for memory registration
+ * @iwdev: iwarp device
+ * @iwmr: iwarp mr pointer
+ * @access: access for MR
+ */
+static int i40iw_hwreg_mr(struct i40iw_device *iwdev,
+ struct i40iw_mr *iwmr,
+ u16 access)
+{
+ struct i40iw_pbl *iwpbl = &iwmr->iwpbl;
+ struct i40iw_reg_ns_stag_info *stag_info;
+ struct i40iw_pd *iwpd = to_iwpd(iwmr->ibmr.pd);
+ struct i40iw_pble_alloc *palloc = &iwpbl->pble_alloc;
+ enum i40iw_status_code status;
+ int err = 0;
+ struct i40iw_cqp_request *cqp_request;
+ struct cqp_commands_info *cqp_info;
+
+ cqp_request = i40iw_get_cqp_request(&iwdev->cqp, true);
+ if (!cqp_request)
+ return -ENOMEM;
+
+ cqp_info = &cqp_request->info;
+ stag_info = &cqp_info->in.u.mr_reg_non_shared.info;
+ memset(stag_info, 0, sizeof(*stag_info));
+ stag_info->va = (void *)(unsigned long)iwpbl->user_base;
+ stag_info->stag_idx = iwmr->stag >> I40IW_CQPSQ_STAG_IDX_SHIFT;
+ stag_info->stag_key = (u8)iwmr->stag;
+ stag_info->total_len = iwmr->length;
+ stag_info->access_rights = access;
+ stag_info->pd_id = iwpd->sc_pd.pd_id;
+ stag_info->addr_type = I40IW_ADDR_TYPE_VA_BASED;
+
+ if (iwmr->page_cnt > 1) {
+ if (palloc->level == I40IW_LEVEL_1) {
+ stag_info->first_pm_pbl_index = palloc->level1.idx;
+ stag_info->chunk_size = 1;
+ } else {
+ stag_info->first_pm_pbl_index = palloc->level2.root.idx;
+ stag_info->chunk_size = 3;
+ }
+ } else {
+ stag_info->reg_addr_pa = iwmr->pgaddrmem[0];
+ }
+
+ cqp_info->cqp_cmd = OP_MR_REG_NON_SHARED;
+ cqp_info->post_sq = 1;
+ cqp_info->in.u.mr_reg_non_shared.dev = &iwdev->sc_dev;
+ cqp_info->in.u.mr_reg_non_shared.scratch = (uintptr_t)cqp_request;
+
+ status = i40iw_handle_cqp_op(iwdev, cqp_request);
+ if (status) {
+ err = -ENOMEM;
+ i40iw_pr_err("CQP-OP MR Reg fail");
+ }
+ return err;
+}
+
+/**
+ * i40iw_reg_user_mr - Register a user memory region
+ * @pd: ptr of pd
+ * @start: virtual start address
+ * @length: length of mr
+ * @virt: virtual address
+ * @acc: access of mr
+ * @udata: user data
+ */
+static struct ib_mr *i40iw_reg_user_mr(struct ib_pd *pd,
+ u64 start,
+ u64 length,
+ u64 virt,
+ int acc,
+ struct ib_udata *udata)
+{
+ struct i40iw_pd *iwpd = to_iwpd(pd);
+ struct i40iw_device *iwdev = to_iwdev(pd->device);
+ struct i40iw_ucontext *ucontext;
+ struct i40iw_pble_alloc *palloc;
+ struct i40iw_pbl *iwpbl;
+ struct i40iw_mr *iwmr;
+ struct ib_umem *region;
+ struct i40iw_mem_reg_req req;
+ u32 pbl_depth = 0;
+ u32 stag = 0;
+ u16 access;
+ u32 region_length;
+ bool use_pbles = false;
+ unsigned long flags;
+ int err = -ENOSYS;
+
+ region = ib_umem_get(pd->uobject->context, start, length, acc, 0);
+ if (IS_ERR(region))
+ return (struct ib_mr *)region;
+
+ if (ib_copy_from_udata(&req, udata, sizeof(req))) {
+ ib_umem_release(region);
+ return ERR_PTR(-EFAULT);
+ }
+
+ iwmr = kzalloc(sizeof(*iwmr), GFP_KERNEL);
+ if (!iwmr) {
+ ib_umem_release(region);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ iwpbl = &iwmr->iwpbl;
+ iwpbl->iwmr = iwmr;
+ iwmr->region = region;
+ iwmr->ibmr.pd = pd;
+ iwmr->ibmr.device = pd->device;
+ ucontext = to_ucontext(pd->uobject->context);
+ region_length = region->length + (start & 0xfff);
+ pbl_depth = region_length >> 12;
+ pbl_depth += (region_length & (4096 - 1)) ? 1 : 0;
+ iwmr->length = region->length;
+
+ iwpbl->user_base = virt;
+ palloc = &iwpbl->pble_alloc;
+
+ iwmr->type = req.reg_type;
+ iwmr->page_cnt = pbl_depth;
+
+ switch (req.reg_type) {
+ case IW_MEMREG_TYPE_QP:
+ use_pbles = ((req.sq_pages + req.rq_pages) > 2);
+ err = i40iw_handle_q_mem(iwdev, &req, iwpbl, use_pbles);
+ if (err)
+ goto error;
+ spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags);
+ list_add_tail(&iwpbl->list, &ucontext->qp_reg_mem_list);
+ spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags);
+ break;
+ case IW_MEMREG_TYPE_CQ:
+ use_pbles = (req.cq_pages > 1);
+ err = i40iw_handle_q_mem(iwdev, &req, iwpbl, use_pbles);
+ if (err)
+ goto error;
+
+ spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
+ list_add_tail(&iwpbl->list, &ucontext->cq_reg_mem_list);
+ spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
+ break;
+ case IW_MEMREG_TYPE_MEM:
+ access = I40IW_ACCESS_FLAGS_LOCALREAD;
+
+ use_pbles = (iwmr->page_cnt != 1);
+ err = i40iw_setup_pbles(iwdev, iwmr, use_pbles);
+ if (err)
+ goto error;
+
+ access |= i40iw_get_user_access(acc);
+ stag = i40iw_create_stag(iwdev);
+ if (!stag) {
+ err = -ENOMEM;
+ goto error;
+ }
+
+ iwmr->stag = stag;
+ iwmr->ibmr.rkey = stag;
+ iwmr->ibmr.lkey = stag;
+
+ err = i40iw_hwreg_mr(iwdev, iwmr, access);
+ if (err) {
+ i40iw_free_stag(iwdev, stag);
+ goto error;
+ }
+ break;
+ default:
+ goto error;
+ }
+
+ iwmr->type = req.reg_type;
+ if (req.reg_type == IW_MEMREG_TYPE_MEM)
+ i40iw_add_pdusecount(iwpd);
+ return &iwmr->ibmr;
+
+error:
+ if (palloc->level != I40IW_LEVEL_0)
+ i40iw_free_pble(iwdev->pble_rsrc, palloc);
+ ib_umem_release(region);
+ kfree(iwmr);
+ return ERR_PTR(err);
+}
+
+/**
+ * i40iw_reg_phys_mr - register kernel physical memory
+ * @pd: ibpd pointer
+ * @addr: physical address of memory to register
+ * @size: size of memory to register
+ * @acc: Access rights
+ * @iova_start: start of virtual address for physical buffers
+ */
+struct ib_mr *i40iw_reg_phys_mr(struct ib_pd *pd,
+ u64 addr,
+ u64 size,
+ int acc,
+ u64 *iova_start)
+{
+ struct i40iw_pd *iwpd = to_iwpd(pd);
+ struct i40iw_device *iwdev = to_iwdev(pd->device);
+ struct i40iw_pbl *iwpbl;
+ struct i40iw_mr *iwmr;
+ enum i40iw_status_code status;
+ u32 stag;
+ u16 access = I40IW_ACCESS_FLAGS_LOCALREAD;
+ int ret;
+
+ iwmr = kzalloc(sizeof(*iwmr), GFP_KERNEL);
+ if (!iwmr)
+ return ERR_PTR(-ENOMEM);
+ iwmr->ibmr.pd = pd;
+ iwmr->ibmr.device = pd->device;
+ iwpbl = &iwmr->iwpbl;
+ iwpbl->iwmr = iwmr;
+ iwmr->type = IW_MEMREG_TYPE_MEM;
+ iwpbl->user_base = *iova_start;
+ stag = i40iw_create_stag(iwdev);
+ if (!stag) {
+ ret = -EOVERFLOW;
+ goto err;
+ }
+ access |= i40iw_get_user_access(acc);
+ iwmr->stag = stag;
+ iwmr->ibmr.rkey = stag;
+ iwmr->ibmr.lkey = stag;
+ iwmr->page_cnt = 1;
+ iwmr->pgaddrmem[0] = addr;
+ status = i40iw_hwreg_mr(iwdev, iwmr, access);
+ if (status) {
+ i40iw_free_stag(iwdev, stag);
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ i40iw_add_pdusecount(iwpd);
+ return &iwmr->ibmr;
+ err:
+ kfree(iwmr);
+ return ERR_PTR(ret);
+}
+
+/**
+ * i40iw_get_dma_mr - register physical mem
+ * @pd: ptr of pd
+ * @acc: access for memory
+ */
+static struct ib_mr *i40iw_get_dma_mr(struct ib_pd *pd, int acc)
+{
+ u64 kva = 0;
+
+ return i40iw_reg_phys_mr(pd, 0, 0xffffffffffULL, acc, &kva);
+}
+
+/**
+ * i40iw_del_mem_list - Deleting pbl list entries for CQ/QP
+ * @iwmr: iwmr for IB's user page addresses
+ * @ucontext: ptr to user context
+ */
+static void i40iw_del_memlist(struct i40iw_mr *iwmr,
+ struct i40iw_ucontext *ucontext)
+{
+ struct i40iw_pbl *iwpbl = &iwmr->iwpbl;
+ unsigned long flags;
+
+ switch (iwmr->type) {
+ case IW_MEMREG_TYPE_CQ:
+ spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
+ if (!list_empty(&ucontext->cq_reg_mem_list))
+ list_del(&iwpbl->list);
+ spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
+ break;
+ case IW_MEMREG_TYPE_QP:
+ spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags);
+ if (!list_empty(&ucontext->qp_reg_mem_list))
+ list_del(&iwpbl->list);
+ spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags);
+ break;
+ default:
+ break;
+ }
+}
+
+/**
+ * i40iw_dereg_mr - deregister mr
+ * @ib_mr: mr ptr for dereg
+ */
+static int i40iw_dereg_mr(struct ib_mr *ib_mr)
+{
+ struct ib_pd *ibpd = ib_mr->pd;
+ struct i40iw_pd *iwpd = to_iwpd(ibpd);
+ struct i40iw_mr *iwmr = to_iwmr(ib_mr);
+ struct i40iw_device *iwdev = to_iwdev(ib_mr->device);
+ enum i40iw_status_code status;
+ struct i40iw_dealloc_stag_info *info;
+ struct i40iw_pbl *iwpbl = &iwmr->iwpbl;
+ struct i40iw_pble_alloc *palloc = &iwpbl->pble_alloc;
+ struct i40iw_cqp_request *cqp_request;
+ struct cqp_commands_info *cqp_info;
+ u32 stag_idx;
+
+ if (iwmr->region)
+ ib_umem_release(iwmr->region);
+
+ if (iwmr->type != IW_MEMREG_TYPE_MEM) {
+ if (ibpd->uobject) {
+ struct i40iw_ucontext *ucontext;
+
+ ucontext = to_ucontext(ibpd->uobject->context);
+ i40iw_del_memlist(iwmr, ucontext);
+ }
+ if (iwpbl->pbl_allocated)
+ i40iw_free_pble(iwdev->pble_rsrc, palloc);
+ kfree(iwpbl->iwmr);
+ iwpbl->iwmr = NULL;
+ return 0;
+ }
+
+ cqp_request = i40iw_get_cqp_request(&iwdev->cqp, true);
+ if (!cqp_request)
+ return -ENOMEM;
+
+ cqp_info = &cqp_request->info;
+ info = &cqp_info->in.u.dealloc_stag.info;
+ memset(info, 0, sizeof(*info));
+
+ info->pd_id = cpu_to_le32(iwpd->sc_pd.pd_id & 0x00007fff);
+ info->stag_idx = RS_64_1(ib_mr->rkey, I40IW_CQPSQ_STAG_IDX_SHIFT);
+ stag_idx = info->stag_idx;
+ info->mr = true;
+ if (iwpbl->pbl_allocated)
+ info->dealloc_pbl = true;
+
+ cqp_info->cqp_cmd = OP_DEALLOC_STAG;
+ cqp_info->post_sq = 1;
+ cqp_info->in.u.dealloc_stag.dev = &iwdev->sc_dev;
+ cqp_info->in.u.dealloc_stag.scratch = (uintptr_t)cqp_request;
+ status = i40iw_handle_cqp_op(iwdev, cqp_request);
+ if (status)
+ i40iw_pr_err("CQP-OP dealloc failed for stag_idx = 0x%x\n", stag_idx);
+ i40iw_rem_pdusecount(iwpd, iwdev);
+ i40iw_free_stag(iwdev, iwmr->stag);
+ if (iwpbl->pbl_allocated)
+ i40iw_free_pble(iwdev->pble_rsrc, palloc);
+ kfree(iwmr);
+ return 0;
+}
+
+/**
+ * i40iw_show_rev
+ */
+static ssize_t i40iw_show_rev(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct i40iw_ib_device *iwibdev = container_of(dev,
+ struct i40iw_ib_device,
+ ibdev.dev);
+ u32 hw_rev = iwibdev->iwdev->sc_dev.hw_rev;
+
+ return sprintf(buf, "%x\n", hw_rev);
+}
+
+/**
+ * i40iw_show_fw_ver
+ */
+static ssize_t i40iw_show_fw_ver(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ u32 firmware_version = I40IW_FW_VERSION;
+
+ return sprintf(buf, "%u.%u\n", firmware_version,
+ (firmware_version & 0x000000ff));
+}
+
+/**
+ * i40iw_show_hca
+ */
+static ssize_t i40iw_show_hca(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "I40IW\n");
+}
+
+/**
+ * i40iw_show_board
+ */
+static ssize_t i40iw_show_board(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "%.*s\n", 32, "I40IW Board ID");
+}
+
+static DEVICE_ATTR(hw_rev, S_IRUGO, i40iw_show_rev, NULL);
+static DEVICE_ATTR(fw_ver, S_IRUGO, i40iw_show_fw_ver, NULL);
+static DEVICE_ATTR(hca_type, S_IRUGO, i40iw_show_hca, NULL);
+static DEVICE_ATTR(board_id, S_IRUGO, i40iw_show_board, NULL);
+
+static struct device_attribute *i40iw_dev_attributes[] = {
+ &dev_attr_hw_rev,
+ &dev_attr_fw_ver,
+ &dev_attr_hca_type,
+ &dev_attr_board_id
+};
+
+/**
+ * i40iw_copy_sg_list - copy sg list for qp
+ * @sg_list: copied into sg_list
+ * @sgl: copy from sgl
+ * @num_sges: count of sg entries
+ */
+static void i40iw_copy_sg_list(struct i40iw_sge *sg_list, struct ib_sge *sgl, int num_sges)
+{
+ unsigned int i;
+
+ for (i = 0; (i < num_sges) && (i < I40IW_MAX_WQ_FRAGMENT_COUNT); i++) {
+ sg_list[i].tag_off = sgl[i].addr;
+ sg_list[i].len = sgl[i].length;
+ sg_list[i].stag = sgl[i].lkey;
+ }
+}
+
+/**
+ * i40iw_post_send - kernel application wr
+ * @ibqp: qp ptr for wr
+ * @ib_wr: work request ptr
+ * @bad_wr: return of bad wr if err
+ */
+static int i40iw_post_send(struct ib_qp *ibqp,
+ struct ib_send_wr *ib_wr,
+ struct ib_send_wr **bad_wr)
+{
+ struct i40iw_qp *iwqp;
+ struct i40iw_qp_uk *ukqp;
+ struct i40iw_post_sq_info info;
+ enum i40iw_status_code ret;
+ int err = 0;
+ unsigned long flags;
+
+ iwqp = (struct i40iw_qp *)ibqp;
+ ukqp = &iwqp->sc_qp.qp_uk;
+
+ spin_lock_irqsave(&iwqp->lock, flags);
+ while (ib_wr) {
+ memset(&info, 0, sizeof(info));
+ info.wr_id = (u64)(ib_wr->wr_id);
+ if ((ib_wr->send_flags & IB_SEND_SIGNALED) || iwqp->sig_all)
+ info.signaled = true;
+ if (ib_wr->send_flags & IB_SEND_FENCE)
+ info.read_fence = true;
+
+ switch (ib_wr->opcode) {
+ case IB_WR_SEND:
+ if (ib_wr->send_flags & IB_SEND_SOLICITED)
+ info.op_type = I40IW_OP_TYPE_SEND_SOL;
+ else
+ info.op_type = I40IW_OP_TYPE_SEND;
+
+ if (ib_wr->send_flags & IB_SEND_INLINE) {
+ info.op.inline_send.data = (void *)(unsigned long)ib_wr->sg_list[0].addr;
+ info.op.inline_send.len = ib_wr->sg_list[0].length;
+ ret = ukqp->ops.iw_inline_send(ukqp, &info, rdma_wr(ib_wr)->rkey, false);
+ } else {
+ info.op.send.num_sges = ib_wr->num_sge;
+ info.op.send.sg_list = (struct i40iw_sge *)ib_wr->sg_list;
+ ret = ukqp->ops.iw_send(ukqp, &info, rdma_wr(ib_wr)->rkey, false);
+ }
+
+ if (ret)
+ err = -EIO;
+ break;
+ case IB_WR_RDMA_WRITE:
+ info.op_type = I40IW_OP_TYPE_RDMA_WRITE;
+
+ if (ib_wr->send_flags & IB_SEND_INLINE) {
+ info.op.inline_rdma_write.data = (void *)(unsigned long)ib_wr->sg_list[0].addr;
+ info.op.inline_rdma_write.len = ib_wr->sg_list[0].length;
+ info.op.inline_rdma_write.rem_addr.tag_off = rdma_wr(ib_wr)->remote_addr;
+ info.op.inline_rdma_write.rem_addr.stag = rdma_wr(ib_wr)->rkey;
+ info.op.inline_rdma_write.rem_addr.len = ib_wr->sg_list->length;
+ ret = ukqp->ops.iw_inline_rdma_write(ukqp, &info, false);
+ } else {
+ info.op.rdma_write.lo_sg_list = (void *)ib_wr->sg_list;
+ info.op.rdma_write.num_lo_sges = ib_wr->num_sge;
+ info.op.rdma_write.rem_addr.tag_off = rdma_wr(ib_wr)->remote_addr;
+ info.op.rdma_write.rem_addr.stag = rdma_wr(ib_wr)->rkey;
+ info.op.rdma_write.rem_addr.len = ib_wr->sg_list->length;
+ ret = ukqp->ops.iw_rdma_write(ukqp, &info, false);
+ }
+
+ if (ret)
+ err = -EIO;
+ break;
+ case IB_WR_RDMA_READ:
+ info.op_type = I40IW_OP_TYPE_RDMA_READ;
+ info.op.rdma_read.rem_addr.tag_off = rdma_wr(ib_wr)->remote_addr;
+ info.op.rdma_read.rem_addr.stag = rdma_wr(ib_wr)->rkey;
+ info.op.rdma_read.rem_addr.len = ib_wr->sg_list->length;
+ info.op.rdma_read.lo_addr.tag_off = ib_wr->sg_list->addr;
+ info.op.rdma_read.lo_addr.stag = ib_wr->sg_list->lkey;
+ info.op.rdma_read.lo_addr.len = ib_wr->sg_list->length;
+ ret = ukqp->ops.iw_rdma_read(ukqp, &info, false, false);
+ if (ret)
+ err = -EIO;
+ break;
+ default:
+ err = -EINVAL;
+ i40iw_pr_err(" upost_send bad opcode = 0x%x\n",
+ ib_wr->opcode);
+ break;
+ }
+
+ if (err)
+ break;
+ ib_wr = ib_wr->next;
+ }
+
+ if (err)
+ *bad_wr = ib_wr;
+ else
+ ukqp->ops.iw_qp_post_wr(ukqp);
+ spin_unlock_irqrestore(&iwqp->lock, flags);
+
+ return err;
+}
+
+/**
+ * i40iw_post_recv - post receive wr for kernel application
+ * @ibqp: ib qp pointer
+ * @ib_wr: work request for receive
+ * @bad_wr: bad wr caused an error
+ */
+static int i40iw_post_recv(struct ib_qp *ibqp,
+ struct ib_recv_wr *ib_wr,
+ struct ib_recv_wr **bad_wr)
+{
+ struct i40iw_qp *iwqp;
+ struct i40iw_qp_uk *ukqp;
+ struct i40iw_post_rq_info post_recv;
+ struct i40iw_sge sg_list[I40IW_MAX_WQ_FRAGMENT_COUNT];
+ enum i40iw_status_code ret = 0;
+ unsigned long flags;
+
+ iwqp = (struct i40iw_qp *)ibqp;
+ ukqp = &iwqp->sc_qp.qp_uk;
+
+ memset(&post_recv, 0, sizeof(post_recv));
+ spin_lock_irqsave(&iwqp->lock, flags);
+ while (ib_wr) {
+ post_recv.num_sges = ib_wr->num_sge;
+ post_recv.wr_id = ib_wr->wr_id;
+ i40iw_copy_sg_list(sg_list, ib_wr->sg_list, ib_wr->num_sge);
+ post_recv.sg_list = sg_list;
+ ret = ukqp->ops.iw_post_receive(ukqp, &post_recv);
+ if (ret) {
+ i40iw_pr_err(" post_recv err %d\n", ret);
+ *bad_wr = ib_wr;
+ goto out;
+ }
+ ib_wr = ib_wr->next;
+ }
+ out:
+ spin_unlock_irqrestore(&iwqp->lock, flags);
+ if (ret)
+ return -ENOSYS;
+ return 0;
+}
+
+/**
+ * i40iw_poll_cq - poll cq for completion (kernel apps)
+ * @ibcq: cq to poll
+ * @num_entries: number of entries to poll
+ * @entry: wr of entry completed
+ */
+static int i40iw_poll_cq(struct ib_cq *ibcq,
+ int num_entries,
+ struct ib_wc *entry)
+{
+ struct i40iw_cq *iwcq;
+ int cqe_count = 0;
+ struct i40iw_cq_poll_info cq_poll_info;
+ enum i40iw_status_code ret;
+ struct i40iw_cq_uk *ukcq;
+ struct i40iw_sc_qp *qp;
+ unsigned long flags;
+
+ iwcq = (struct i40iw_cq *)ibcq;
+ ukcq = &iwcq->sc_cq.cq_uk;
+
+ spin_lock_irqsave(&iwcq->lock, flags);
+ while (cqe_count < num_entries) {
+ ret = ukcq->ops.iw_cq_poll_completion(ukcq, &cq_poll_info, true);
+ if (ret == I40IW_ERR_QUEUE_EMPTY) {
+ break;
+ } else if (ret) {
+ if (!cqe_count)
+ cqe_count = -1;
+ break;
+ }
+ entry->wc_flags = 0;
+ entry->wr_id = cq_poll_info.wr_id;
+ if (!cq_poll_info.error)
+ entry->status = IB_WC_SUCCESS;
+ else
+ entry->status = IB_WC_WR_FLUSH_ERR;
+
+ switch (cq_poll_info.op_type) {
+ case I40IW_OP_TYPE_RDMA_WRITE:
+ entry->opcode = IB_WC_RDMA_WRITE;
+ break;
+ case I40IW_OP_TYPE_RDMA_READ_INV_STAG:
+ case I40IW_OP_TYPE_RDMA_READ:
+ entry->opcode = IB_WC_RDMA_READ;
+ break;
+ case I40IW_OP_TYPE_SEND_SOL:
+ case I40IW_OP_TYPE_SEND_SOL_INV:
+ case I40IW_OP_TYPE_SEND_INV:
+ case I40IW_OP_TYPE_SEND:
+ entry->opcode = IB_WC_SEND;
+ break;
+ case I40IW_OP_TYPE_REC:
+ entry->opcode = IB_WC_RECV;
+ break;
+ default:
+ entry->opcode = IB_WC_RECV;
+ break;
+ }
+
+ entry->vendor_err =
+ cq_poll_info.major_err << 16 | cq_poll_info.minor_err;
+ entry->ex.imm_data = 0;
+ qp = (struct i40iw_sc_qp *)cq_poll_info.qp_handle;
+ entry->qp = (struct ib_qp *)qp->back_qp;
+ entry->src_qp = cq_poll_info.qp_id;
+ entry->byte_len = cq_poll_info.bytes_xfered;
+ entry++;
+ cqe_count++;
+ }
+ spin_unlock_irqrestore(&iwcq->lock, flags);
+ return cqe_count;
+}
+
+/**
+ * i40iw_req_notify_cq - arm cq kernel application
+ * @ibcq: cq to arm
+ * @notify_flags: notofication flags
+ */
+static int i40iw_req_notify_cq(struct ib_cq *ibcq,
+ enum ib_cq_notify_flags notify_flags)
+{
+ struct i40iw_cq *iwcq;
+ struct i40iw_cq_uk *ukcq;
+ enum i40iw_completion_notify cq_notify = IW_CQ_COMPL_SOLICITED;
+
+ iwcq = (struct i40iw_cq *)ibcq;
+ ukcq = &iwcq->sc_cq.cq_uk;
+ if (notify_flags == IB_CQ_NEXT_COMP)
+ cq_notify = IW_CQ_COMPL_EVENT;
+ ukcq->ops.iw_cq_request_notification(ukcq, cq_notify);
+ return 0;
+}
+
+/**
+ * i40iw_port_immutable - return port's immutable data
+ * @ibdev: ib dev struct
+ * @port_num: port number
+ * @immutable: immutable data for the port return
+ */
+static int i40iw_port_immutable(struct ib_device *ibdev, u8 port_num,
+ struct ib_port_immutable *immutable)
+{
+ struct ib_port_attr attr;
+ int err;
+
+ err = i40iw_query_port(ibdev, port_num, &attr);
+
+ if (err)
+ return err;
+
+ immutable->pkey_tbl_len = attr.pkey_tbl_len;
+ immutable->gid_tbl_len = attr.gid_tbl_len;
+ immutable->core_cap_flags = RDMA_CORE_PORT_IWARP;
+
+ return 0;
+}
+
+/**
+ * i40iw_get_protocol_stats - Populates the rdma_stats structure
+ * @ibdev: ib dev struct
+ * @stats: iw protocol stats struct
+ */
+static int i40iw_get_protocol_stats(struct ib_device *ibdev,
+ union rdma_protocol_stats *stats)
+{
+ struct i40iw_device *iwdev = to_iwdev(ibdev);
+ struct i40iw_sc_dev *dev = &iwdev->sc_dev;
+ struct i40iw_dev_pestat *devstat = &dev->dev_pestat;
+ struct i40iw_dev_hw_stats *hw_stats = &devstat->hw_stats;
+ struct timespec curr_time;
+ static struct timespec last_rd_time = {0, 0};
+ enum i40iw_status_code status = 0;
+ unsigned long flags;
+
+ curr_time = current_kernel_time();
+ memset(stats, 0, sizeof(*stats));
+
+ if (dev->is_pf) {
+ spin_lock_irqsave(&devstat->stats_lock, flags);
+ devstat->ops.iw_hw_stat_read_all(devstat,
+ &devstat->hw_stats);
+ spin_unlock_irqrestore(&devstat->stats_lock, flags);
+ } else {
+ if (((u64)curr_time.tv_sec - (u64)last_rd_time.tv_sec) > 1)
+ status = i40iw_vchnl_vf_get_pe_stats(dev,
+ &devstat->hw_stats);
+
+ if (status)
+ return -ENOSYS;
+ }
+
+ stats->iw.ipInReceives = hw_stats->stat_value_64[I40IW_HW_STAT_INDEX_IP4RXPKTS] +
+ hw_stats->stat_value_64[I40IW_HW_STAT_INDEX_IP6RXPKTS];
+ stats->iw.ipInTruncatedPkts = hw_stats->stat_value_32[I40IW_HW_STAT_INDEX_IP4RXTRUNC] +
+ hw_stats->stat_value_32[I40IW_HW_STAT_INDEX_IP6RXTRUNC];
+ stats->iw.ipInDiscards = hw_stats->stat_value_32[I40IW_HW_STAT_INDEX_IP4RXDISCARD] +
+ hw_stats->stat_value_32[I40IW_HW_STAT_INDEX_IP6RXDISCARD];
+ stats->iw.ipOutNoRoutes = hw_stats->stat_value_32[I40IW_HW_STAT_INDEX_IP4TXNOROUTE] +
+ hw_stats->stat_value_32[I40IW_HW_STAT_INDEX_IP6TXNOROUTE];
+ stats->iw.ipReasmReqds = hw_stats->stat_value_64[I40IW_HW_STAT_INDEX_IP4RXFRAGS] +
+ hw_stats->stat_value_64[I40IW_HW_STAT_INDEX_IP6RXFRAGS];
+ stats->iw.ipFragCreates = hw_stats->stat_value_64[I40IW_HW_STAT_INDEX_IP4TXFRAGS] +
+ hw_stats->stat_value_64[I40IW_HW_STAT_INDEX_IP6TXFRAGS];
+ stats->iw.ipInMcastPkts = hw_stats->stat_value_64[I40IW_HW_STAT_INDEX_IP4RXMCPKTS] +
+ hw_stats->stat_value_64[I40IW_HW_STAT_INDEX_IP6RXMCPKTS];
+ stats->iw.ipOutMcastPkts = hw_stats->stat_value_64[I40IW_HW_STAT_INDEX_IP4TXMCPKTS] +
+ hw_stats->stat_value_64[I40IW_HW_STAT_INDEX_IP6TXMCPKTS];
+ stats->iw.tcpOutSegs = hw_stats->stat_value_64[I40IW_HW_STAT_INDEX_TCPTXSEG];
+ stats->iw.tcpInSegs = hw_stats->stat_value_64[I40IW_HW_STAT_INDEX_TCPRXSEGS];
+ stats->iw.tcpRetransSegs = hw_stats->stat_value_32[I40IW_HW_STAT_INDEX_TCPRTXSEG];
+
+ last_rd_time = curr_time;
+ return 0;
+}
+
+/**
+ * i40iw_query_gid - Query port GID
+ * @ibdev: device pointer from stack
+ * @port: port number
+ * @index: Entry index
+ * @gid: Global ID
+ */
+static int i40iw_query_gid(struct ib_device *ibdev,
+ u8 port,
+ int index,
+ union ib_gid *gid)
+{
+ struct i40iw_device *iwdev = to_iwdev(ibdev);
+
+ memset(gid->raw, 0, sizeof(gid->raw));
+ ether_addr_copy(gid->raw, iwdev->netdev->dev_addr);
+ return 0;
+}
+
+/**
+ * i40iw_modify_port Modify port properties
+ * @ibdev: device pointer from stack
+ * @port: port number
+ * @port_modify_mask: mask for port modifications
+ * @props: port properties
+ */
+static int i40iw_modify_port(struct ib_device *ibdev,
+ u8 port,
+ int port_modify_mask,
+ struct ib_port_modify *props)
+{
+ return 0;
+}
+
+/**
+ * i40iw_query_pkey - Query partition key
+ * @ibdev: device pointer from stack
+ * @port: port number
+ * @index: index of pkey
+ * @pkey: pointer to store the pkey
+ */
+static int i40iw_query_pkey(struct ib_device *ibdev,
+ u8 port,
+ u16 index,
+ u16 *pkey)
+{
+ *pkey = 0;
+ return 0;
+}
+
+/**
+ * i40iw_create_ah - create address handle
+ * @ibpd: ptr of pd
+ * @ah_attr: address handle attributes
+ */
+static struct ib_ah *i40iw_create_ah(struct ib_pd *ibpd,
+ struct ib_ah_attr *attr)
+{
+ return ERR_PTR(-ENOSYS);
+}
+
+/**
+ * i40iw_destroy_ah - Destroy address handle
+ * @ah: pointer to address handle
+ */
+static int i40iw_destroy_ah(struct ib_ah *ah)
+{
+ return -ENOSYS;
+}
+
+/**
+ * i40iw_init_rdma_device - initialization of iwarp device
+ * @iwdev: iwarp device
+ */
+static struct i40iw_ib_device *i40iw_init_rdma_device(struct i40iw_device *iwdev)
+{
+ struct i40iw_ib_device *iwibdev;
+ struct net_device *netdev = iwdev->netdev;
+ struct pci_dev *pcidev = (struct pci_dev *)iwdev->hw.dev_context;
+
+ iwibdev = (struct i40iw_ib_device *)ib_alloc_device(sizeof(*iwibdev));
+ if (!iwibdev) {
+ i40iw_pr_err("iwdev == NULL\n");
+ return NULL;
+ }
+ strlcpy(iwibdev->ibdev.name, "i40iw%d", IB_DEVICE_NAME_MAX);
+ iwibdev->ibdev.owner = THIS_MODULE;
+ iwdev->iwibdev = iwibdev;
+ iwibdev->iwdev = iwdev;
+
+ iwibdev->ibdev.node_type = RDMA_NODE_RNIC;
+ ether_addr_copy((u8 *)&iwibdev->ibdev.node_guid, netdev->dev_addr);
+
+ iwibdev->ibdev.uverbs_cmd_mask =
+ (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
+ (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
+ (1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
+ (1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
+ (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
+ (1ull << IB_USER_VERBS_CMD_REG_MR) |
+ (1ull << IB_USER_VERBS_CMD_DEREG_MR) |
+ (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
+ (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
+ (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
+ (1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ) |
+ (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
+ (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
+ (1ull << IB_USER_VERBS_CMD_QUERY_QP) |
+ (1ull << IB_USER_VERBS_CMD_POLL_CQ) |
+ (1ull << IB_USER_VERBS_CMD_CREATE_AH) |
+ (1ull << IB_USER_VERBS_CMD_DESTROY_AH) |
+ (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
+ (1ull << IB_USER_VERBS_CMD_POST_RECV) |
+ (1ull << IB_USER_VERBS_CMD_POST_SEND);
+ iwibdev->ibdev.phys_port_cnt = 1;
+ iwibdev->ibdev.num_comp_vectors = 1;
+ iwibdev->ibdev.dma_device = &pcidev->dev;
+ iwibdev->ibdev.dev.parent = &pcidev->dev;
+ iwibdev->ibdev.query_port = i40iw_query_port;
+ iwibdev->ibdev.modify_port = i40iw_modify_port;
+ iwibdev->ibdev.query_pkey = i40iw_query_pkey;
+ iwibdev->ibdev.query_gid = i40iw_query_gid;
+ iwibdev->ibdev.alloc_ucontext = i40iw_alloc_ucontext;
+ iwibdev->ibdev.dealloc_ucontext = i40iw_dealloc_ucontext;
+ iwibdev->ibdev.mmap = i40iw_mmap;
+ iwibdev->ibdev.alloc_pd = i40iw_alloc_pd;
+ iwibdev->ibdev.dealloc_pd = i40iw_dealloc_pd;
+ iwibdev->ibdev.create_qp = i40iw_create_qp;
+ iwibdev->ibdev.modify_qp = i40iw_modify_qp;
+ iwibdev->ibdev.query_qp = i40iw_query_qp;
+ iwibdev->ibdev.destroy_qp = i40iw_destroy_qp;
+ iwibdev->ibdev.create_cq = i40iw_create_cq;
+ iwibdev->ibdev.destroy_cq = i40iw_destroy_cq;
+ iwibdev->ibdev.get_dma_mr = i40iw_get_dma_mr;
+ iwibdev->ibdev.reg_user_mr = i40iw_reg_user_mr;
+ iwibdev->ibdev.dereg_mr = i40iw_dereg_mr;
+ iwibdev->ibdev.get_protocol_stats = i40iw_get_protocol_stats;
+ iwibdev->ibdev.query_device = i40iw_query_device;
+ iwibdev->ibdev.create_ah = i40iw_create_ah;
+ iwibdev->ibdev.destroy_ah = i40iw_destroy_ah;
+ iwibdev->ibdev.iwcm = kzalloc(sizeof(*iwibdev->ibdev.iwcm), GFP_KERNEL);
+ if (!iwibdev->ibdev.iwcm) {
+ ib_dealloc_device(&iwibdev->ibdev);
+ i40iw_pr_err("iwcm == NULL\n");
+ return NULL;
+ }
+
+ iwibdev->ibdev.iwcm->add_ref = i40iw_add_ref;
+ iwibdev->ibdev.iwcm->rem_ref = i40iw_rem_ref;
+ iwibdev->ibdev.iwcm->get_qp = i40iw_get_qp;
+ iwibdev->ibdev.iwcm->connect = i40iw_connect;
+ iwibdev->ibdev.iwcm->accept = i40iw_accept;
+ iwibdev->ibdev.iwcm->reject = i40iw_reject;
+ iwibdev->ibdev.iwcm->create_listen = i40iw_create_listen;
+ iwibdev->ibdev.iwcm->destroy_listen = i40iw_destroy_listen;
+ memcpy(iwibdev->ibdev.iwcm->ifname, netdev->name,
+ sizeof(iwibdev->ibdev.iwcm->ifname));
+ iwibdev->ibdev.get_port_immutable = i40iw_port_immutable;
+ iwibdev->ibdev.poll_cq = i40iw_poll_cq;
+ iwibdev->ibdev.req_notify_cq = i40iw_req_notify_cq;
+ iwibdev->ibdev.post_send = i40iw_post_send;
+ iwibdev->ibdev.post_recv = i40iw_post_recv;
+
+ return iwibdev;
+}
+
+/**
+ * i40iw_port_ibevent - indicate port event
+ * @iwdev: iwarp device
+ */
+void i40iw_port_ibevent(struct i40iw_device *iwdev)
+{
+ struct i40iw_ib_device *iwibdev = iwdev->iwibdev;
+ struct ib_event event;
+
+ event.device = &iwibdev->ibdev;
+ event.element.port_num = 1;
+ event.event = iwdev->iw_status ? IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR;
+ ib_dispatch_event(&event);
+}
+
+/**
+ * i40iw_unregister_rdma_device - unregister of iwarp from IB
+ * @iwibdev: rdma device ptr
+ */
+static void i40iw_unregister_rdma_device(struct i40iw_ib_device *iwibdev)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(i40iw_dev_attributes); ++i)
+ device_remove_file(&iwibdev->ibdev.dev,
+ i40iw_dev_attributes[i]);
+ ib_unregister_device(&iwibdev->ibdev);
+}
+
+/**
+ * i40iw_destroy_rdma_device - destroy rdma device and free resources
+ * @iwibdev: IB device ptr
+ */
+void i40iw_destroy_rdma_device(struct i40iw_ib_device *iwibdev)
+{
+ if (!iwibdev)
+ return;
+
+ i40iw_unregister_rdma_device(iwibdev);
+ kfree(iwibdev->ibdev.iwcm);
+ iwibdev->ibdev.iwcm = NULL;
+ ib_dealloc_device(&iwibdev->ibdev);
+}
+
+/**
+ * i40iw_register_rdma_device - register iwarp device to IB
+ * @iwdev: iwarp device
+ */
+int i40iw_register_rdma_device(struct i40iw_device *iwdev)
+{
+ int i, ret;
+ struct i40iw_ib_device *iwibdev;
+
+ iwdev->iwibdev = i40iw_init_rdma_device(iwdev);
+ if (!iwdev->iwibdev)
+ return -ENOSYS;
+ iwibdev = iwdev->iwibdev;
+
+ ret = ib_register_device(&iwibdev->ibdev, NULL);
+ if (ret)
+ goto error;
+
+ for (i = 0; i < ARRAY_SIZE(i40iw_dev_attributes); ++i) {
+ ret =
+ device_create_file(&iwibdev->ibdev.dev,
+ i40iw_dev_attributes[i]);
+ if (ret) {
+ while (i > 0) {
+ i--;
+ device_remove_file(&iwibdev->ibdev.dev, i40iw_dev_attributes[i]);
+ }
+ ib_unregister_device(&iwibdev->ibdev);
+ goto error;
+ }
+ }
+ return 0;
+error:
+ kfree(iwdev->iwibdev->ibdev.iwcm);
+ iwdev->iwibdev->ibdev.iwcm = NULL;
+ ib_dealloc_device(&iwdev->iwibdev->ibdev);
+ return -ENOSYS;
+}
diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.h b/drivers/infiniband/hw/i40iw/i40iw_verbs.h
new file mode 100644
index 000000000000..1101f77080e6
--- /dev/null
+++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.h
@@ -0,0 +1,173 @@
+/*******************************************************************************
+*
+* Copyright (c) 2015-2016 Intel Corporation. All rights reserved.
+*
+* This software is available to you under a choice of one of two
+* licenses. You may choose to be licensed under the terms of the GNU
+* General Public License (GPL) Version 2, available from the file
+* COPYING in the main directory of this source tree, or the
+* OpenFabrics.org BSD license below:
+*
+* Redistribution and use in source and binary forms, with or
+* without modification, are permitted provided that the following
+* conditions are met:
+*
+* - Redistributions of source code must retain the above
+* copyright notice, this list of conditions and the following
+* disclaimer.
+*
+* - Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials
+* provided with the distribution.
+*
+* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+* SOFTWARE.
+*
+*******************************************************************************/
+
+#ifndef I40IW_VERBS_H
+#define I40IW_VERBS_H
+
+struct i40iw_ucontext {
+ struct ib_ucontext ibucontext;
+ struct i40iw_device *iwdev;
+ struct list_head cq_reg_mem_list;
+ spinlock_t cq_reg_mem_list_lock; /* memory list for cq's */
+ struct list_head qp_reg_mem_list;
+ spinlock_t qp_reg_mem_list_lock; /* memory list for qp's */
+};
+
+struct i40iw_pd {
+ struct ib_pd ibpd;
+ struct i40iw_sc_pd sc_pd;
+ atomic_t usecount;
+};
+
+struct i40iw_hmc_pble {
+ union {
+ u32 idx;
+ dma_addr_t addr;
+ };
+};
+
+struct i40iw_cq_mr {
+ struct i40iw_hmc_pble cq_pbl;
+ dma_addr_t shadow;
+};
+
+struct i40iw_qp_mr {
+ struct i40iw_hmc_pble sq_pbl;
+ struct i40iw_hmc_pble rq_pbl;
+ dma_addr_t shadow;
+ struct page *sq_page;
+};
+
+struct i40iw_pbl {
+ struct list_head list;
+ union {
+ struct i40iw_qp_mr qp_mr;
+ struct i40iw_cq_mr cq_mr;
+ };
+
+ bool pbl_allocated;
+ u64 user_base;
+ struct i40iw_pble_alloc pble_alloc;
+ struct i40iw_mr *iwmr;
+};
+
+#define MAX_SAVE_PAGE_ADDRS 4
+struct i40iw_mr {
+ union {
+ struct ib_mr ibmr;
+ struct ib_mw ibmw;
+ struct ib_fmr ibfmr;
+ };
+ struct ib_umem *region;
+ u16 type;
+ u32 page_cnt;
+ u32 stag;
+ u64 length;
+ u64 pgaddrmem[MAX_SAVE_PAGE_ADDRS];
+ struct i40iw_pbl iwpbl;
+};
+
+struct i40iw_cq {
+ struct ib_cq ibcq;
+ struct i40iw_sc_cq sc_cq;
+ u16 cq_head;
+ u16 cq_size;
+ u16 cq_number;
+ bool user_mode;
+ u32 polled_completions;
+ u32 cq_mem_size;
+ struct i40iw_dma_mem kmem;
+ spinlock_t lock; /* for poll cq */
+ struct i40iw_pbl *iwpbl;
+};
+
+struct disconn_work {
+ struct work_struct work;
+ struct i40iw_qp *iwqp;
+};
+
+struct iw_cm_id;
+struct ietf_mpa_frame;
+struct i40iw_ud_file;
+
+struct i40iw_qp_kmode {
+ struct i40iw_dma_mem dma_mem;
+ u64 *wrid_mem;
+};
+
+struct i40iw_qp {
+ struct ib_qp ibqp;
+ struct i40iw_sc_qp sc_qp;
+ struct i40iw_device *iwdev;
+ struct i40iw_cq *iwscq;
+ struct i40iw_cq *iwrcq;
+ struct i40iw_pd *iwpd;
+ struct i40iw_qp_host_ctx_info ctx_info;
+ struct i40iwarp_offload_info iwarp_info;
+ void *allocated_buffer;
+ atomic_t refcount;
+ struct iw_cm_id *cm_id;
+ void *cm_node;
+ struct ib_mr *lsmm_mr;
+ struct work_struct work;
+ enum ib_qp_state ibqp_state;
+ u32 iwarp_state;
+ u32 qp_mem_size;
+ u32 last_aeq;
+ atomic_t close_timer_started;
+ spinlock_t lock; /* for post work requests */
+ struct i40iw_qp_context *iwqp_context;
+ void *pbl_vbase;
+ dma_addr_t pbl_pbase;
+ struct page *page;
+ u8 active_conn:1;
+ u8 user_mode:1;
+ u8 hte_added:1;
+ u8 flush_issued:1;
+ u8 destroyed:1;
+ u8 sig_all:1;
+ u8 pau_mode:1;
+ u8 rsvd:1;
+ u16 term_sq_flush_code;
+ u16 term_rq_flush_code;
+ u8 hw_iwarp_state;
+ u8 hw_tcp_state;
+ struct i40iw_qp_kmode kqp;
+ struct i40iw_dma_mem host_ctx;
+ struct timer_list terminate_timer;
+ struct i40iw_pbl *iwpbl;
+ struct i40iw_dma_mem q2_ctx_mem;
+ struct i40iw_dma_mem ietf_mem;
+};
+#endif
diff --git a/drivers/infiniband/hw/i40iw/i40iw_vf.c b/drivers/infiniband/hw/i40iw/i40iw_vf.c
new file mode 100644
index 000000000000..cb0f18340e14
--- /dev/null
+++ b/drivers/infiniband/hw/i40iw/i40iw_vf.c
@@ -0,0 +1,85 @@
+/*******************************************************************************
+*
+* Copyright (c) 2015-2016 Intel Corporation. All rights reserved.
+*
+* This software is available to you under a choice of one of two
+* licenses. You may choose to be licensed under the terms of the GNU
+* General Public License (GPL) Version 2, available from the file
+* COPYING in the main directory of this source tree, or the
+* OpenFabrics.org BSD license below:
+*
+* Redistribution and use in source and binary forms, with or
+* without modification, are permitted provided that the following
+* conditions are met:
+*
+* - Redistributions of source code must retain the above
+* copyright notice, this list of conditions and the following
+* disclaimer.
+*
+* - Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials
+* provided with the distribution.
+*
+* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+* SOFTWARE.
+*
+*******************************************************************************/
+
+#include "i40iw_osdep.h"
+#include "i40iw_register.h"
+#include "i40iw_status.h"
+#include "i40iw_hmc.h"
+#include "i40iw_d.h"
+#include "i40iw_type.h"
+#include "i40iw_p.h"
+#include "i40iw_vf.h"
+
+/**
+ * i40iw_manage_vf_pble_bp - manage vf pble
+ * @cqp: cqp for cqp' sq wqe
+ * @info: pble info
+ * @scratch: pointer for completion
+ * @post_sq: to post and ring
+ */
+enum i40iw_status_code i40iw_manage_vf_pble_bp(struct i40iw_sc_cqp *cqp,
+ struct i40iw_manage_vf_pble_info *info,
+ u64 scratch,
+ bool post_sq)
+{
+ u64 *wqe;
+ u64 temp, header, pd_pl_pba = 0;
+
+ wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return I40IW_ERR_RING_FULL;
+
+ temp = LS_64(info->pd_entry_cnt, I40IW_CQPSQ_MVPBP_PD_ENTRY_CNT) |
+ LS_64(info->first_pd_index, I40IW_CQPSQ_MVPBP_FIRST_PD_INX) |
+ LS_64(info->sd_index, I40IW_CQPSQ_MVPBP_SD_INX);
+ set_64bit_val(wqe, 16, temp);
+
+ header = LS_64((info->inv_pd_ent ? 1 : 0), I40IW_CQPSQ_MVPBP_INV_PD_ENT) |
+ LS_64(I40IW_CQP_OP_MANAGE_VF_PBLE_BP, I40IW_CQPSQ_OPCODE) |
+ LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
+ set_64bit_val(wqe, 24, header);
+
+ pd_pl_pba = LS_64(info->pd_pl_pba >> 3, I40IW_CQPSQ_MVPBP_PD_PLPBA);
+ set_64bit_val(wqe, 32, pd_pl_pba);
+
+ i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "MANAGE VF_PBLE_BP WQE", wqe, I40IW_CQP_WQE_SIZE * 8);
+
+ if (post_sq)
+ i40iw_sc_cqp_post_sq(cqp);
+ return 0;
+}
+
+struct i40iw_vf_cqp_ops iw_vf_cqp_ops = {
+ i40iw_manage_vf_pble_bp
+};
diff --git a/drivers/infiniband/hw/i40iw/i40iw_vf.h b/drivers/infiniband/hw/i40iw/i40iw_vf.h
new file mode 100644
index 000000000000..f649f3a62e13
--- /dev/null
+++ b/drivers/infiniband/hw/i40iw/i40iw_vf.h
@@ -0,0 +1,62 @@
+/*******************************************************************************
+*
+* Copyright (c) 2015-2016 Intel Corporation. All rights reserved.
+*
+* This software is available to you under a choice of one of two
+* licenses. You may choose to be licensed under the terms of the GNU
+* General Public License (GPL) Version 2, available from the file
+* COPYING in the main directory of this source tree, or the
+* OpenFabrics.org BSD license below:
+*
+* Redistribution and use in source and binary forms, with or
+* without modification, are permitted provided that the following
+* conditions are met:
+*
+* - Redistributions of source code must retain the above
+* copyright notice, this list of conditions and the following
+* disclaimer.
+*
+* - Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials
+* provided with the distribution.
+*
+* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+* SOFTWARE.
+*
+*******************************************************************************/
+
+#ifndef I40IW_VF_H
+#define I40IW_VF_H
+
+struct i40iw_sc_cqp;
+
+struct i40iw_manage_vf_pble_info {
+ u32 sd_index;
+ u16 first_pd_index;
+ u16 pd_entry_cnt;
+ u8 inv_pd_ent;
+ u64 pd_pl_pba;
+};
+
+struct i40iw_vf_cqp_ops {
+ enum i40iw_status_code (*manage_vf_pble_bp)(struct i40iw_sc_cqp *,
+ struct i40iw_manage_vf_pble_info *,
+ u64,
+ bool);
+};
+
+enum i40iw_status_code i40iw_manage_vf_pble_bp(struct i40iw_sc_cqp *cqp,
+ struct i40iw_manage_vf_pble_info *info,
+ u64 scratch,
+ bool post_sq);
+
+extern struct i40iw_vf_cqp_ops iw_vf_cqp_ops;
+
+#endif
diff --git a/drivers/infiniband/hw/i40iw/i40iw_virtchnl.c b/drivers/infiniband/hw/i40iw/i40iw_virtchnl.c
new file mode 100644
index 000000000000..6b68f7890b76
--- /dev/null
+++ b/drivers/infiniband/hw/i40iw/i40iw_virtchnl.c
@@ -0,0 +1,748 @@
+/*******************************************************************************
+*
+* Copyright (c) 2015-2016 Intel Corporation. All rights reserved.
+*
+* This software is available to you under a choice of one of two
+* licenses. You may choose to be licensed under the terms of the GNU
+* General Public License (GPL) Version 2, available from the file
+* COPYING in the main directory of this source tree, or the
+* OpenFabrics.org BSD license below:
+*
+* Redistribution and use in source and binary forms, with or
+* without modification, are permitted provided that the following
+* conditions are met:
+*
+* - Redistributions of source code must retain the above
+* copyright notice, this list of conditions and the following
+* disclaimer.
+*
+* - Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials
+* provided with the distribution.
+*
+* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+* SOFTWARE.
+*
+*******************************************************************************/
+
+#include "i40iw_osdep.h"
+#include "i40iw_register.h"
+#include "i40iw_status.h"
+#include "i40iw_hmc.h"
+#include "i40iw_d.h"
+#include "i40iw_type.h"
+#include "i40iw_p.h"
+#include "i40iw_virtchnl.h"
+
+/**
+ * vchnl_vf_send_get_ver_req - Request Channel version
+ * @dev: IWARP device pointer
+ * @vchnl_req: Virtual channel message request pointer
+ */
+static enum i40iw_status_code vchnl_vf_send_get_ver_req(struct i40iw_sc_dev *dev,
+ struct i40iw_virtchnl_req *vchnl_req)
+{
+ enum i40iw_status_code ret_code = I40IW_ERR_NOT_READY;
+ struct i40iw_virtchnl_op_buf *vchnl_msg = vchnl_req->vchnl_msg;
+
+ if (!dev->vchnl_up)
+ return ret_code;
+
+ memset(vchnl_msg, 0, sizeof(*vchnl_msg));
+ vchnl_msg->iw_chnl_op_ctx = (uintptr_t)vchnl_req;
+ vchnl_msg->iw_chnl_buf_len = sizeof(*vchnl_msg);
+ vchnl_msg->iw_op_code = I40IW_VCHNL_OP_GET_VER;
+ vchnl_msg->iw_op_ver = I40IW_VCHNL_OP_GET_VER_V0;
+ ret_code = dev->vchnl_if.vchnl_send(dev, 0, (u8 *)vchnl_msg, vchnl_msg->iw_chnl_buf_len);
+ if (ret_code)
+ i40iw_debug(dev, I40IW_DEBUG_VIRT,
+ "%s: virt channel send failed 0x%x\n", __func__, ret_code);
+ return ret_code;
+}
+
+/**
+ * vchnl_vf_send_get_hmc_fcn_req - Request HMC Function from VF
+ * @dev: IWARP device pointer
+ * @vchnl_req: Virtual channel message request pointer
+ */
+static enum i40iw_status_code vchnl_vf_send_get_hmc_fcn_req(struct i40iw_sc_dev *dev,
+ struct i40iw_virtchnl_req *vchnl_req)
+{
+ enum i40iw_status_code ret_code = I40IW_ERR_NOT_READY;
+ struct i40iw_virtchnl_op_buf *vchnl_msg = vchnl_req->vchnl_msg;
+
+ if (!dev->vchnl_up)
+ return ret_code;
+
+ memset(vchnl_msg, 0, sizeof(*vchnl_msg));
+ vchnl_msg->iw_chnl_op_ctx = (uintptr_t)vchnl_req;
+ vchnl_msg->iw_chnl_buf_len = sizeof(*vchnl_msg);
+ vchnl_msg->iw_op_code = I40IW_VCHNL_OP_GET_HMC_FCN;
+ vchnl_msg->iw_op_ver = I40IW_VCHNL_OP_GET_HMC_FCN_V0;
+ ret_code = dev->vchnl_if.vchnl_send(dev, 0, (u8 *)vchnl_msg, vchnl_msg->iw_chnl_buf_len);
+ if (ret_code)
+ i40iw_debug(dev, I40IW_DEBUG_VIRT,
+ "%s: virt channel send failed 0x%x\n", __func__, ret_code);
+ return ret_code;
+}
+
+/**
+ * vchnl_vf_send_get_pe_stats_req - Request PE stats from VF
+ * @dev: IWARP device pointer
+ * @vchnl_req: Virtual channel message request pointer
+ */
+static enum i40iw_status_code vchnl_vf_send_get_pe_stats_req(struct i40iw_sc_dev *dev,
+ struct i40iw_virtchnl_req *vchnl_req)
+{
+ enum i40iw_status_code ret_code = I40IW_ERR_NOT_READY;
+ struct i40iw_virtchnl_op_buf *vchnl_msg = vchnl_req->vchnl_msg;
+
+ if (!dev->vchnl_up)
+ return ret_code;
+
+ memset(vchnl_msg, 0, sizeof(*vchnl_msg));
+ vchnl_msg->iw_chnl_op_ctx = (uintptr_t)vchnl_req;
+ vchnl_msg->iw_chnl_buf_len = sizeof(*vchnl_msg) + sizeof(struct i40iw_dev_hw_stats) - 1;
+ vchnl_msg->iw_op_code = I40IW_VCHNL_OP_GET_STATS;
+ vchnl_msg->iw_op_ver = I40IW_VCHNL_OP_GET_STATS_V0;
+ ret_code = dev->vchnl_if.vchnl_send(dev, 0, (u8 *)vchnl_msg, vchnl_msg->iw_chnl_buf_len);
+ if (ret_code)
+ i40iw_debug(dev, I40IW_DEBUG_VIRT,
+ "%s: virt channel send failed 0x%x\n", __func__, ret_code);
+ return ret_code;
+}
+
+/**
+ * vchnl_vf_send_add_hmc_objs_req - Add HMC objects
+ * @dev: IWARP device pointer
+ * @vchnl_req: Virtual channel message request pointer
+ */
+static enum i40iw_status_code vchnl_vf_send_add_hmc_objs_req(struct i40iw_sc_dev *dev,
+ struct i40iw_virtchnl_req *vchnl_req,
+ enum i40iw_hmc_rsrc_type rsrc_type,
+ u32 start_index,
+ u32 rsrc_count)
+{
+ enum i40iw_status_code ret_code = I40IW_ERR_NOT_READY;
+ struct i40iw_virtchnl_op_buf *vchnl_msg = vchnl_req->vchnl_msg;
+ struct i40iw_virtchnl_hmc_obj_range *add_hmc_obj;
+
+ if (!dev->vchnl_up)
+ return ret_code;
+
+ add_hmc_obj = (struct i40iw_virtchnl_hmc_obj_range *)vchnl_msg->iw_chnl_buf;
+ memset(vchnl_msg, 0, sizeof(*vchnl_msg));
+ memset(add_hmc_obj, 0, sizeof(*add_hmc_obj));
+ vchnl_msg->iw_chnl_op_ctx = (uintptr_t)vchnl_req;
+ vchnl_msg->iw_chnl_buf_len = sizeof(*vchnl_msg) + sizeof(struct i40iw_virtchnl_hmc_obj_range) - 1;
+ vchnl_msg->iw_op_code = I40IW_VCHNL_OP_ADD_HMC_OBJ_RANGE;
+ vchnl_msg->iw_op_ver = I40IW_VCHNL_OP_ADD_HMC_OBJ_RANGE_V0;
+ add_hmc_obj->obj_type = (u16)rsrc_type;
+ add_hmc_obj->start_index = start_index;
+ add_hmc_obj->obj_count = rsrc_count;
+ ret_code = dev->vchnl_if.vchnl_send(dev, 0, (u8 *)vchnl_msg, vchnl_msg->iw_chnl_buf_len);
+ if (ret_code)
+ i40iw_debug(dev, I40IW_DEBUG_VIRT,
+ "%s: virt channel send failed 0x%x\n", __func__, ret_code);
+ return ret_code;
+}
+
+/**
+ * vchnl_vf_send_del_hmc_objs_req - del HMC objects
+ * @dev: IWARP device pointer
+ * @vchnl_req: Virtual channel message request pointer
+ * @ rsrc_type - resource type to delete
+ * @ start_index - starting index for resource
+ * @ rsrc_count - number of resource type to delete
+ */
+static enum i40iw_status_code vchnl_vf_send_del_hmc_objs_req(struct i40iw_sc_dev *dev,
+ struct i40iw_virtchnl_req *vchnl_req,
+ enum i40iw_hmc_rsrc_type rsrc_type,
+ u32 start_index,
+ u32 rsrc_count)
+{
+ enum i40iw_status_code ret_code = I40IW_ERR_NOT_READY;
+ struct i40iw_virtchnl_op_buf *vchnl_msg = vchnl_req->vchnl_msg;
+ struct i40iw_virtchnl_hmc_obj_range *add_hmc_obj;
+
+ if (!dev->vchnl_up)
+ return ret_code;
+
+ add_hmc_obj = (struct i40iw_virtchnl_hmc_obj_range *)vchnl_msg->iw_chnl_buf;
+ memset(vchnl_msg, 0, sizeof(*vchnl_msg));
+ memset(add_hmc_obj, 0, sizeof(*add_hmc_obj));
+ vchnl_msg->iw_chnl_op_ctx = (uintptr_t)vchnl_req;
+ vchnl_msg->iw_chnl_buf_len = sizeof(*vchnl_msg) + sizeof(struct i40iw_virtchnl_hmc_obj_range) - 1;
+ vchnl_msg->iw_op_code = I40IW_VCHNL_OP_DEL_HMC_OBJ_RANGE;
+ vchnl_msg->iw_op_ver = I40IW_VCHNL_OP_DEL_HMC_OBJ_RANGE_V0;
+ add_hmc_obj->obj_type = (u16)rsrc_type;
+ add_hmc_obj->start_index = start_index;
+ add_hmc_obj->obj_count = rsrc_count;
+ ret_code = dev->vchnl_if.vchnl_send(dev, 0, (u8 *)vchnl_msg, vchnl_msg->iw_chnl_buf_len);
+ if (ret_code)
+ i40iw_debug(dev, I40IW_DEBUG_VIRT,
+ "%s: virt channel send failed 0x%x\n", __func__, ret_code);
+ return ret_code;
+}
+
+/**
+ * vchnl_pf_send_get_ver_resp - Send channel version to VF
+ * @dev: IWARP device pointer
+ * @vf_id: Virtual function ID associated with the message
+ * @vchnl_msg: Virtual channel message buffer pointer
+ */
+static void vchnl_pf_send_get_ver_resp(struct i40iw_sc_dev *dev,
+ u32 vf_id,
+ struct i40iw_virtchnl_op_buf *vchnl_msg)
+{
+ enum i40iw_status_code ret_code;
+ u8 resp_buffer[sizeof(struct i40iw_virtchnl_resp_buf) + sizeof(u32) - 1];
+ struct i40iw_virtchnl_resp_buf *vchnl_msg_resp = (struct i40iw_virtchnl_resp_buf *)resp_buffer;
+
+ memset(resp_buffer, 0, sizeof(*resp_buffer));
+ vchnl_msg_resp->iw_chnl_op_ctx = vchnl_msg->iw_chnl_op_ctx;
+ vchnl_msg_resp->iw_chnl_buf_len = sizeof(resp_buffer);
+ vchnl_msg_resp->iw_op_ret_code = I40IW_SUCCESS;
+ *((u32 *)vchnl_msg_resp->iw_chnl_buf) = I40IW_VCHNL_CHNL_VER_V0;
+ ret_code = dev->vchnl_if.vchnl_send(dev, vf_id, resp_buffer, sizeof(resp_buffer));
+ if (ret_code)
+ i40iw_debug(dev, I40IW_DEBUG_VIRT,
+ "%s: virt channel send failed 0x%x\n", __func__, ret_code);
+}
+
+/**
+ * vchnl_pf_send_get_hmc_fcn_resp - Send HMC Function to VF
+ * @dev: IWARP device pointer
+ * @vf_id: Virtual function ID associated with the message
+ * @vchnl_msg: Virtual channel message buffer pointer
+ */
+static void vchnl_pf_send_get_hmc_fcn_resp(struct i40iw_sc_dev *dev,
+ u32 vf_id,
+ struct i40iw_virtchnl_op_buf *vchnl_msg,
+ u16 hmc_fcn)
+{
+ enum i40iw_status_code ret_code;
+ u8 resp_buffer[sizeof(struct i40iw_virtchnl_resp_buf) + sizeof(u16) - 1];
+ struct i40iw_virtchnl_resp_buf *vchnl_msg_resp = (struct i40iw_virtchnl_resp_buf *)resp_buffer;
+
+ memset(resp_buffer, 0, sizeof(*resp_buffer));
+ vchnl_msg_resp->iw_chnl_op_ctx = vchnl_msg->iw_chnl_op_ctx;
+ vchnl_msg_resp->iw_chnl_buf_len = sizeof(resp_buffer);
+ vchnl_msg_resp->iw_op_ret_code = I40IW_SUCCESS;
+ *((u16 *)vchnl_msg_resp->iw_chnl_buf) = hmc_fcn;
+ ret_code = dev->vchnl_if.vchnl_send(dev, vf_id, resp_buffer, sizeof(resp_buffer));
+ if (ret_code)
+ i40iw_debug(dev, I40IW_DEBUG_VIRT,
+ "%s: virt channel send failed 0x%x\n", __func__, ret_code);
+}
+
+/**
+ * vchnl_pf_send_get_pe_stats_resp - Send PE Stats to VF
+ * @dev: IWARP device pointer
+ * @vf_id: Virtual function ID associated with the message
+ * @vchnl_msg: Virtual channel message buffer pointer
+ * @hw_stats: HW Stats struct
+ */
+
+static void vchnl_pf_send_get_pe_stats_resp(struct i40iw_sc_dev *dev,
+ u32 vf_id,
+ struct i40iw_virtchnl_op_buf *vchnl_msg,
+ struct i40iw_dev_hw_stats hw_stats)
+{
+ enum i40iw_status_code ret_code;
+ u8 resp_buffer[sizeof(struct i40iw_virtchnl_resp_buf) + sizeof(struct i40iw_dev_hw_stats) - 1];
+ struct i40iw_virtchnl_resp_buf *vchnl_msg_resp = (struct i40iw_virtchnl_resp_buf *)resp_buffer;
+
+ memset(resp_buffer, 0, sizeof(*resp_buffer));
+ vchnl_msg_resp->iw_chnl_op_ctx = vchnl_msg->iw_chnl_op_ctx;
+ vchnl_msg_resp->iw_chnl_buf_len = sizeof(resp_buffer);
+ vchnl_msg_resp->iw_op_ret_code = I40IW_SUCCESS;
+ *((struct i40iw_dev_hw_stats *)vchnl_msg_resp->iw_chnl_buf) = hw_stats;
+ ret_code = dev->vchnl_if.vchnl_send(dev, vf_id, resp_buffer, sizeof(resp_buffer));
+ if (ret_code)
+ i40iw_debug(dev, I40IW_DEBUG_VIRT,
+ "%s: virt channel send failed 0x%x\n", __func__, ret_code);
+}
+
+/**
+ * vchnl_pf_send_error_resp - Send an error response to VF
+ * @dev: IWARP device pointer
+ * @vf_id: Virtual function ID associated with the message
+ * @vchnl_msg: Virtual channel message buffer pointer
+ */
+static void vchnl_pf_send_error_resp(struct i40iw_sc_dev *dev, u32 vf_id,
+ struct i40iw_virtchnl_op_buf *vchnl_msg,
+ u16 op_ret_code)
+{
+ enum i40iw_status_code ret_code;
+ u8 resp_buffer[sizeof(struct i40iw_virtchnl_resp_buf)];
+ struct i40iw_virtchnl_resp_buf *vchnl_msg_resp = (struct i40iw_virtchnl_resp_buf *)resp_buffer;
+
+ memset(resp_buffer, 0, sizeof(resp_buffer));
+ vchnl_msg_resp->iw_chnl_op_ctx = vchnl_msg->iw_chnl_op_ctx;
+ vchnl_msg_resp->iw_chnl_buf_len = sizeof(resp_buffer);
+ vchnl_msg_resp->iw_op_ret_code = (u16)op_ret_code;
+ ret_code = dev->vchnl_if.vchnl_send(dev, vf_id, resp_buffer, sizeof(resp_buffer));
+ if (ret_code)
+ i40iw_debug(dev, I40IW_DEBUG_VIRT,
+ "%s: virt channel send failed 0x%x\n", __func__, ret_code);
+}
+
+/**
+ * pf_cqp_get_hmc_fcn_callback - Callback for Get HMC Fcn
+ * @cqp_req_param: CQP Request param value
+ * @not_used: unused CQP callback parameter
+ */
+static void pf_cqp_get_hmc_fcn_callback(struct i40iw_sc_dev *dev, void *callback_param,
+ struct i40iw_ccq_cqe_info *cqe_info)
+{
+ struct i40iw_vfdev *vf_dev = callback_param;
+ struct i40iw_virt_mem vf_dev_mem;
+
+ if (cqe_info->error) {
+ i40iw_debug(dev, I40IW_DEBUG_VIRT,
+ "CQP Completion Error on Get HMC Function. Maj = 0x%04x, Minor = 0x%04x\n",
+ cqe_info->maj_err_code, cqe_info->min_err_code);
+ dev->vf_dev[vf_dev->iw_vf_idx] = NULL;
+ vchnl_pf_send_error_resp(dev, vf_dev->vf_id, &vf_dev->vf_msg_buffer.vchnl_msg,
+ (u16)I40IW_ERR_CQP_COMPL_ERROR);
+ vf_dev_mem.va = vf_dev;
+ vf_dev_mem.size = sizeof(*vf_dev);
+ i40iw_free_virt_mem(dev->hw, &vf_dev_mem);
+ } else {
+ i40iw_debug(dev, I40IW_DEBUG_VIRT,
+ "CQP Completion Operation Return information = 0x%08x\n",
+ cqe_info->op_ret_val);
+ vf_dev->pmf_index = (u16)cqe_info->op_ret_val;
+ vf_dev->msg_count--;
+ vchnl_pf_send_get_hmc_fcn_resp(dev,
+ vf_dev->vf_id,
+ &vf_dev->vf_msg_buffer.vchnl_msg,
+ vf_dev->pmf_index);
+ }
+}
+
+/**
+ * pf_add_hmc_obj - Callback for Add HMC Object
+ * @vf_dev: pointer to the VF Device
+ */
+static void pf_add_hmc_obj_callback(void *work_vf_dev)
+{
+ struct i40iw_vfdev *vf_dev = (struct i40iw_vfdev *)work_vf_dev;
+ struct i40iw_hmc_info *hmc_info = &vf_dev->hmc_info;
+ struct i40iw_virtchnl_op_buf *vchnl_msg = &vf_dev->vf_msg_buffer.vchnl_msg;
+ struct i40iw_hmc_create_obj_info info;
+ struct i40iw_virtchnl_hmc_obj_range *add_hmc_obj;
+ enum i40iw_status_code ret_code;
+
+ if (!vf_dev->pf_hmc_initialized) {
+ ret_code = i40iw_pf_init_vfhmc(vf_dev->pf_dev, (u8)vf_dev->pmf_index, NULL);
+ if (ret_code)
+ goto add_out;
+ vf_dev->pf_hmc_initialized = true;
+ }
+
+ add_hmc_obj = (struct i40iw_virtchnl_hmc_obj_range *)vchnl_msg->iw_chnl_buf;
+
+ memset(&info, 0, sizeof(info));
+ info.hmc_info = hmc_info;
+ info.is_pf = false;
+ info.rsrc_type = (u32)add_hmc_obj->obj_type;
+ info.entry_type = (info.rsrc_type == I40IW_HMC_IW_PBLE) ? I40IW_SD_TYPE_PAGED : I40IW_SD_TYPE_DIRECT;
+ info.start_idx = add_hmc_obj->start_index;
+ info.count = add_hmc_obj->obj_count;
+ i40iw_debug(vf_dev->pf_dev, I40IW_DEBUG_VIRT,
+ "I40IW_VCHNL_OP_ADD_HMC_OBJ_RANGE. Add %u type %u objects\n",
+ info.count, info.rsrc_type);
+ ret_code = i40iw_sc_create_hmc_obj(vf_dev->pf_dev, &info);
+ if (!ret_code)
+ vf_dev->hmc_info.hmc_obj[add_hmc_obj->obj_type].cnt = add_hmc_obj->obj_count;
+add_out:
+ vf_dev->msg_count--;
+ vchnl_pf_send_error_resp(vf_dev->pf_dev, vf_dev->vf_id, vchnl_msg, (u16)ret_code);
+}
+
+/**
+ * pf_del_hmc_obj_callback - Callback for delete HMC Object
+ * @work_vf_dev: pointer to the VF Device
+ */
+static void pf_del_hmc_obj_callback(void *work_vf_dev)
+{
+ struct i40iw_vfdev *vf_dev = (struct i40iw_vfdev *)work_vf_dev;
+ struct i40iw_hmc_info *hmc_info = &vf_dev->hmc_info;
+ struct i40iw_virtchnl_op_buf *vchnl_msg = &vf_dev->vf_msg_buffer.vchnl_msg;
+ struct i40iw_hmc_del_obj_info info;
+ struct i40iw_virtchnl_hmc_obj_range *del_hmc_obj;
+ enum i40iw_status_code ret_code = I40IW_SUCCESS;
+
+ if (!vf_dev->pf_hmc_initialized)
+ goto del_out;
+
+ del_hmc_obj = (struct i40iw_virtchnl_hmc_obj_range *)vchnl_msg->iw_chnl_buf;
+
+ memset(&info, 0, sizeof(info));
+ info.hmc_info = hmc_info;
+ info.is_pf = false;
+ info.rsrc_type = (u32)del_hmc_obj->obj_type;
+ info.start_idx = del_hmc_obj->start_index;
+ info.count = del_hmc_obj->obj_count;
+ i40iw_debug(vf_dev->pf_dev, I40IW_DEBUG_VIRT,
+ "I40IW_VCHNL_OP_DEL_HMC_OBJ_RANGE. Delete %u type %u objects\n",
+ info.count, info.rsrc_type);
+ ret_code = i40iw_sc_del_hmc_obj(vf_dev->pf_dev, &info, false);
+del_out:
+ vf_dev->msg_count--;
+ vchnl_pf_send_error_resp(vf_dev->pf_dev, vf_dev->vf_id, vchnl_msg, (u16)ret_code);
+}
+
+/**
+ * i40iw_vchnl_recv_pf - Receive PF virtual channel messages
+ * @dev: IWARP device pointer
+ * @vf_id: Virtual function ID associated with the message
+ * @msg: Virtual channel message buffer pointer
+ * @len: Length of the virtual channels message
+ */
+enum i40iw_status_code i40iw_vchnl_recv_pf(struct i40iw_sc_dev *dev,
+ u32 vf_id,
+ u8 *msg,
+ u16 len)
+{
+ struct i40iw_virtchnl_op_buf *vchnl_msg = (struct i40iw_virtchnl_op_buf *)msg;
+ struct i40iw_vfdev *vf_dev = NULL;
+ struct i40iw_hmc_fcn_info hmc_fcn_info;
+ u16 iw_vf_idx;
+ u16 first_avail_iw_vf = I40IW_MAX_PE_ENABLED_VF_COUNT;
+ struct i40iw_virt_mem vf_dev_mem;
+ struct i40iw_virtchnl_work_info work_info;
+ struct i40iw_dev_pestat *devstat;
+ enum i40iw_status_code ret_code;
+ unsigned long flags;
+
+ if (!dev || !msg || !len)
+ return I40IW_ERR_PARAM;
+
+ if (!dev->vchnl_up)
+ return I40IW_ERR_NOT_READY;
+ if (vchnl_msg->iw_op_code == I40IW_VCHNL_OP_GET_VER) {
+ if (vchnl_msg->iw_op_ver != I40IW_VCHNL_OP_GET_VER_V0)
+ vchnl_pf_send_get_ver_resp(dev, vf_id, vchnl_msg);
+ else
+ vchnl_pf_send_get_ver_resp(dev, vf_id, vchnl_msg);
+ return I40IW_SUCCESS;
+ }
+ for (iw_vf_idx = 0; iw_vf_idx < I40IW_MAX_PE_ENABLED_VF_COUNT;
+ iw_vf_idx++) {
+ if (!dev->vf_dev[iw_vf_idx]) {
+ if (first_avail_iw_vf ==
+ I40IW_MAX_PE_ENABLED_VF_COUNT)
+ first_avail_iw_vf = iw_vf_idx;
+ continue;
+ }
+ if (dev->vf_dev[iw_vf_idx]->vf_id == vf_id) {
+ vf_dev = dev->vf_dev[iw_vf_idx];
+ break;
+ }
+ }
+ if (vf_dev) {
+ if (!vf_dev->msg_count) {
+ vf_dev->msg_count++;
+ } else {
+ i40iw_debug(dev, I40IW_DEBUG_VIRT,
+ "VF%u already has a channel message in progress.\n",
+ vf_id);
+ return I40IW_SUCCESS;
+ }
+ }
+ switch (vchnl_msg->iw_op_code) {
+ case I40IW_VCHNL_OP_GET_HMC_FCN:
+ if (!vf_dev &&
+ (first_avail_iw_vf != I40IW_MAX_PE_ENABLED_VF_COUNT)) {
+ ret_code = i40iw_allocate_virt_mem(dev->hw, &vf_dev_mem, sizeof(struct i40iw_vfdev) +
+ (sizeof(struct i40iw_hmc_obj_info) * I40IW_HMC_IW_MAX));
+ if (!ret_code) {
+ vf_dev = vf_dev_mem.va;
+ vf_dev->stats_initialized = false;
+ vf_dev->pf_dev = dev;
+ vf_dev->msg_count = 1;
+ vf_dev->vf_id = vf_id;
+ vf_dev->iw_vf_idx = first_avail_iw_vf;
+ vf_dev->pf_hmc_initialized = false;
+ vf_dev->hmc_info.hmc_obj = (struct i40iw_hmc_obj_info *)(&vf_dev[1]);
+ i40iw_debug(dev, I40IW_DEBUG_VIRT,
+ "vf_dev %p, hmc_info %p, hmc_obj %p\n",
+ vf_dev, &vf_dev->hmc_info, vf_dev->hmc_info.hmc_obj);
+ dev->vf_dev[first_avail_iw_vf] = vf_dev;
+ iw_vf_idx = first_avail_iw_vf;
+ } else {
+ i40iw_debug(dev, I40IW_DEBUG_VIRT,
+ "VF%u Unable to allocate a VF device structure.\n",
+ vf_id);
+ vchnl_pf_send_error_resp(dev, vf_id, vchnl_msg, (u16)I40IW_ERR_NO_MEMORY);
+ return I40IW_SUCCESS;
+ }
+ memcpy(&vf_dev->vf_msg_buffer.vchnl_msg, vchnl_msg, len);
+ hmc_fcn_info.callback_fcn = pf_cqp_get_hmc_fcn_callback;
+ hmc_fcn_info.vf_id = vf_id;
+ hmc_fcn_info.iw_vf_idx = vf_dev->iw_vf_idx;
+ hmc_fcn_info.cqp_callback_param = vf_dev;
+ hmc_fcn_info.free_fcn = false;
+ ret_code = i40iw_cqp_manage_hmc_fcn_cmd(dev, &hmc_fcn_info);
+ if (ret_code)
+ i40iw_debug(dev, I40IW_DEBUG_VIRT,
+ "VF%u error CQP HMC Function operation.\n",
+ vf_id);
+ ret_code = i40iw_device_init_pestat(&vf_dev->dev_pestat);
+ if (ret_code)
+ i40iw_debug(dev, I40IW_DEBUG_VIRT,
+ "VF%u - i40iw_device_init_pestat failed\n",
+ vf_id);
+ vf_dev->dev_pestat.ops.iw_hw_stat_init(&vf_dev->dev_pestat,
+ (u8)vf_dev->pmf_index,
+ dev->hw, false);
+ vf_dev->stats_initialized = true;
+ } else {
+ if (vf_dev) {
+ vf_dev->msg_count--;
+ vchnl_pf_send_get_hmc_fcn_resp(dev, vf_id, vchnl_msg, vf_dev->pmf_index);
+ } else {
+ vchnl_pf_send_error_resp(dev, vf_id, vchnl_msg,
+ (u16)I40IW_ERR_NO_MEMORY);
+ }
+ }
+ break;
+ case I40IW_VCHNL_OP_ADD_HMC_OBJ_RANGE:
+ if (!vf_dev)
+ return I40IW_ERR_BAD_PTR;
+ work_info.worker_vf_dev = vf_dev;
+ work_info.callback_fcn = pf_add_hmc_obj_callback;
+ memcpy(&vf_dev->vf_msg_buffer.vchnl_msg, vchnl_msg, len);
+ i40iw_cqp_spawn_worker(dev, &work_info, vf_dev->iw_vf_idx);
+ break;
+ case I40IW_VCHNL_OP_DEL_HMC_OBJ_RANGE:
+ if (!vf_dev)
+ return I40IW_ERR_BAD_PTR;
+ work_info.worker_vf_dev = vf_dev;
+ work_info.callback_fcn = pf_del_hmc_obj_callback;
+ memcpy(&vf_dev->vf_msg_buffer.vchnl_msg, vchnl_msg, len);
+ i40iw_cqp_spawn_worker(dev, &work_info, vf_dev->iw_vf_idx);
+ break;
+ case I40IW_VCHNL_OP_GET_STATS:
+ if (!vf_dev)
+ return I40IW_ERR_BAD_PTR;
+ devstat = &vf_dev->dev_pestat;
+ spin_lock_irqsave(&dev->dev_pestat.stats_lock, flags);
+ devstat->ops.iw_hw_stat_read_all(devstat, &devstat->hw_stats);
+ spin_unlock_irqrestore(&dev->dev_pestat.stats_lock, flags);
+ vf_dev->msg_count--;
+ vchnl_pf_send_get_pe_stats_resp(dev, vf_id, vchnl_msg, devstat->hw_stats);
+ break;
+ default:
+ i40iw_debug(dev, I40IW_DEBUG_VIRT,
+ "40iw_vchnl_recv_pf: Invalid OpCode 0x%x\n",
+ vchnl_msg->iw_op_code);
+ vchnl_pf_send_error_resp(dev, vf_id,
+ vchnl_msg, (u16)I40IW_ERR_NOT_IMPLEMENTED);
+ }
+ return I40IW_SUCCESS;
+}
+
+/**
+ * i40iw_vchnl_recv_vf - Receive VF virtual channel messages
+ * @dev: IWARP device pointer
+ * @vf_id: Virtual function ID associated with the message
+ * @msg: Virtual channel message buffer pointer
+ * @len: Length of the virtual channels message
+ */
+enum i40iw_status_code i40iw_vchnl_recv_vf(struct i40iw_sc_dev *dev,
+ u32 vf_id,
+ u8 *msg,
+ u16 len)
+{
+ struct i40iw_virtchnl_resp_buf *vchnl_msg_resp = (struct i40iw_virtchnl_resp_buf *)msg;
+ struct i40iw_virtchnl_req *vchnl_req;
+
+ vchnl_req = (struct i40iw_virtchnl_req *)(uintptr_t)vchnl_msg_resp->iw_chnl_op_ctx;
+ vchnl_req->ret_code = (enum i40iw_status_code)vchnl_msg_resp->iw_op_ret_code;
+ if (len == (sizeof(*vchnl_msg_resp) + vchnl_req->parm_len - 1)) {
+ if (vchnl_req->parm_len && vchnl_req->parm)
+ memcpy(vchnl_req->parm, vchnl_msg_resp->iw_chnl_buf, vchnl_req->parm_len);
+ i40iw_debug(dev, I40IW_DEBUG_VIRT,
+ "%s: Got response, data size %u\n", __func__,
+ vchnl_req->parm_len);
+ } else {
+ i40iw_debug(dev, I40IW_DEBUG_VIRT,
+ "%s: error length on response, Got %u, expected %u\n", __func__,
+ len, (u32)(sizeof(*vchnl_msg_resp) + vchnl_req->parm_len - 1));
+ }
+
+ return I40IW_SUCCESS;
+}
+
+/**
+ * i40iw_vchnl_vf_get_ver - Request Channel version
+ * @dev: IWARP device pointer
+ * @vchnl_ver: Virtual channel message version pointer
+ */
+enum i40iw_status_code i40iw_vchnl_vf_get_ver(struct i40iw_sc_dev *dev,
+ u32 *vchnl_ver)
+{
+ struct i40iw_virtchnl_req vchnl_req;
+ enum i40iw_status_code ret_code;
+
+ memset(&vchnl_req, 0, sizeof(vchnl_req));
+ vchnl_req.dev = dev;
+ vchnl_req.parm = vchnl_ver;
+ vchnl_req.parm_len = sizeof(*vchnl_ver);
+ vchnl_req.vchnl_msg = &dev->vchnl_vf_msg_buf.vchnl_msg;
+ ret_code = vchnl_vf_send_get_ver_req(dev, &vchnl_req);
+ if (!ret_code) {
+ ret_code = i40iw_vf_wait_vchnl_resp(dev);
+ if (!ret_code)
+ ret_code = vchnl_req.ret_code;
+ else
+ dev->vchnl_up = false;
+ } else {
+ i40iw_debug(dev, I40IW_DEBUG_VIRT,
+ "%s Send message failed 0x%0x\n", __func__, ret_code);
+ }
+ return ret_code;
+}
+
+/**
+ * i40iw_vchnl_vf_get_hmc_fcn - Request HMC Function
+ * @dev: IWARP device pointer
+ * @hmc_fcn: HMC function index pointer
+ */
+enum i40iw_status_code i40iw_vchnl_vf_get_hmc_fcn(struct i40iw_sc_dev *dev,
+ u16 *hmc_fcn)
+{
+ struct i40iw_virtchnl_req vchnl_req;
+ enum i40iw_status_code ret_code;
+
+ memset(&vchnl_req, 0, sizeof(vchnl_req));
+ vchnl_req.dev = dev;
+ vchnl_req.parm = hmc_fcn;
+ vchnl_req.parm_len = sizeof(*hmc_fcn);
+ vchnl_req.vchnl_msg = &dev->vchnl_vf_msg_buf.vchnl_msg;
+ ret_code = vchnl_vf_send_get_hmc_fcn_req(dev, &vchnl_req);
+ if (!ret_code) {
+ ret_code = i40iw_vf_wait_vchnl_resp(dev);
+ if (!ret_code)
+ ret_code = vchnl_req.ret_code;
+ else
+ dev->vchnl_up = false;
+ } else {
+ i40iw_debug(dev, I40IW_DEBUG_VIRT,
+ "%s Send message failed 0x%0x\n", __func__, ret_code);
+ }
+ return ret_code;
+}
+
+/**
+ * i40iw_vchnl_vf_add_hmc_objs - Add HMC Object
+ * @dev: IWARP device pointer
+ * @rsrc_type: HMC Resource type
+ * @start_index: Starting index of the objects to be added
+ * @rsrc_count: Number of resources to be added
+ */
+enum i40iw_status_code i40iw_vchnl_vf_add_hmc_objs(struct i40iw_sc_dev *dev,
+ enum i40iw_hmc_rsrc_type rsrc_type,
+ u32 start_index,
+ u32 rsrc_count)
+{
+ struct i40iw_virtchnl_req vchnl_req;
+ enum i40iw_status_code ret_code;
+
+ memset(&vchnl_req, 0, sizeof(vchnl_req));
+ vchnl_req.dev = dev;
+ vchnl_req.vchnl_msg = &dev->vchnl_vf_msg_buf.vchnl_msg;
+ ret_code = vchnl_vf_send_add_hmc_objs_req(dev,
+ &vchnl_req,
+ rsrc_type,
+ start_index,
+ rsrc_count);
+ if (!ret_code) {
+ ret_code = i40iw_vf_wait_vchnl_resp(dev);
+ if (!ret_code)
+ ret_code = vchnl_req.ret_code;
+ else
+ dev->vchnl_up = false;
+ } else {
+ i40iw_debug(dev, I40IW_DEBUG_VIRT,
+ "%s Send message failed 0x%0x\n", __func__, ret_code);
+ }
+ return ret_code;
+}
+
+/**
+ * i40iw_vchnl_vf_del_hmc_obj - del HMC obj
+ * @dev: IWARP device pointer
+ * @rsrc_type: HMC Resource type
+ * @start_index: Starting index of the object to delete
+ * @rsrc_count: Number of resources to be delete
+ */
+enum i40iw_status_code i40iw_vchnl_vf_del_hmc_obj(struct i40iw_sc_dev *dev,
+ enum i40iw_hmc_rsrc_type rsrc_type,
+ u32 start_index,
+ u32 rsrc_count)
+{
+ struct i40iw_virtchnl_req vchnl_req;
+ enum i40iw_status_code ret_code;
+
+ memset(&vchnl_req, 0, sizeof(vchnl_req));
+ vchnl_req.dev = dev;
+ vchnl_req.vchnl_msg = &dev->vchnl_vf_msg_buf.vchnl_msg;
+ ret_code = vchnl_vf_send_del_hmc_objs_req(dev,
+ &vchnl_req,
+ rsrc_type,
+ start_index,
+ rsrc_count);
+ if (!ret_code) {
+ ret_code = i40iw_vf_wait_vchnl_resp(dev);
+ if (!ret_code)
+ ret_code = vchnl_req.ret_code;
+ else
+ dev->vchnl_up = false;
+ } else {
+ i40iw_debug(dev, I40IW_DEBUG_VIRT,
+ "%s Send message failed 0x%0x\n", __func__, ret_code);
+ }
+ return ret_code;
+}
+
+/**
+ * i40iw_vchnl_vf_get_pe_stats - Get PE stats
+ * @dev: IWARP device pointer
+ * @hw_stats: HW stats struct
+ */
+enum i40iw_status_code i40iw_vchnl_vf_get_pe_stats(struct i40iw_sc_dev *dev,
+ struct i40iw_dev_hw_stats *hw_stats)
+{
+ struct i40iw_virtchnl_req vchnl_req;
+ enum i40iw_status_code ret_code;
+
+ memset(&vchnl_req, 0, sizeof(vchnl_req));
+ vchnl_req.dev = dev;
+ vchnl_req.parm = hw_stats;
+ vchnl_req.parm_len = sizeof(*hw_stats);
+ vchnl_req.vchnl_msg = &dev->vchnl_vf_msg_buf.vchnl_msg;
+ ret_code = vchnl_vf_send_get_pe_stats_req(dev, &vchnl_req);
+ if (!ret_code) {
+ ret_code = i40iw_vf_wait_vchnl_resp(dev);
+ if (!ret_code)
+ ret_code = vchnl_req.ret_code;
+ else
+ dev->vchnl_up = false;
+ } else {
+ i40iw_debug(dev, I40IW_DEBUG_VIRT,
+ "%s Send message failed 0x%0x\n", __func__, ret_code);
+ }
+ return ret_code;
+}
diff --git a/drivers/infiniband/hw/i40iw/i40iw_virtchnl.h b/drivers/infiniband/hw/i40iw/i40iw_virtchnl.h
new file mode 100644
index 000000000000..24886ef08293
--- /dev/null
+++ b/drivers/infiniband/hw/i40iw/i40iw_virtchnl.h
@@ -0,0 +1,124 @@
+/*******************************************************************************
+*
+* Copyright (c) 2015-2016 Intel Corporation. All rights reserved.
+*
+* This software is available to you under a choice of one of two
+* licenses. You may choose to be licensed under the terms of the GNU
+* General Public License (GPL) Version 2, available from the file
+* COPYING in the main directory of this source tree, or the
+* OpenFabrics.org BSD license below:
+*
+* Redistribution and use in source and binary forms, with or
+* without modification, are permitted provided that the following
+* conditions are met:
+*
+* - Redistributions of source code must retain the above
+* copyright notice, this list of conditions and the following
+* disclaimer.
+*
+* - Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials
+* provided with the distribution.
+*
+* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+* SOFTWARE.
+*
+*******************************************************************************/
+
+#ifndef I40IW_VIRTCHNL_H
+#define I40IW_VIRTCHNL_H
+
+#include "i40iw_hmc.h"
+
+#pragma pack(push, 1)
+
+struct i40iw_virtchnl_op_buf {
+ u16 iw_op_code;
+ u16 iw_op_ver;
+ u16 iw_chnl_buf_len;
+ u16 rsvd;
+ u64 iw_chnl_op_ctx;
+ /* Member alignment MUST be maintained above this location */
+ u8 iw_chnl_buf[1];
+};
+
+struct i40iw_virtchnl_resp_buf {
+ u64 iw_chnl_op_ctx;
+ u16 iw_chnl_buf_len;
+ s16 iw_op_ret_code;
+ /* Member alignment MUST be maintained above this location */
+ u16 rsvd[2];
+ u8 iw_chnl_buf[1];
+};
+
+enum i40iw_virtchnl_ops {
+ I40IW_VCHNL_OP_GET_VER = 0,
+ I40IW_VCHNL_OP_GET_HMC_FCN,
+ I40IW_VCHNL_OP_ADD_HMC_OBJ_RANGE,
+ I40IW_VCHNL_OP_DEL_HMC_OBJ_RANGE,
+ I40IW_VCHNL_OP_GET_STATS
+};
+
+#define I40IW_VCHNL_OP_GET_VER_V0 0
+#define I40IW_VCHNL_OP_GET_HMC_FCN_V0 0
+#define I40IW_VCHNL_OP_ADD_HMC_OBJ_RANGE_V0 0
+#define I40IW_VCHNL_OP_DEL_HMC_OBJ_RANGE_V0 0
+#define I40IW_VCHNL_OP_GET_STATS_V0 0
+#define I40IW_VCHNL_CHNL_VER_V0 0
+
+struct i40iw_dev_hw_stats;
+
+struct i40iw_virtchnl_hmc_obj_range {
+ u16 obj_type;
+ u16 rsvd;
+ u32 start_index;
+ u32 obj_count;
+};
+
+enum i40iw_status_code i40iw_vchnl_recv_pf(struct i40iw_sc_dev *dev,
+ u32 vf_id,
+ u8 *msg,
+ u16 len);
+
+enum i40iw_status_code i40iw_vchnl_recv_vf(struct i40iw_sc_dev *dev,
+ u32 vf_id,
+ u8 *msg,
+ u16 len);
+
+struct i40iw_virtchnl_req {
+ struct i40iw_sc_dev *dev;
+ struct i40iw_virtchnl_op_buf *vchnl_msg;
+ void *parm;
+ u32 vf_id;
+ u16 parm_len;
+ s16 ret_code;
+};
+
+#pragma pack(pop)
+
+enum i40iw_status_code i40iw_vchnl_vf_get_ver(struct i40iw_sc_dev *dev,
+ u32 *vchnl_ver);
+
+enum i40iw_status_code i40iw_vchnl_vf_get_hmc_fcn(struct i40iw_sc_dev *dev,
+ u16 *hmc_fcn);
+
+enum i40iw_status_code i40iw_vchnl_vf_add_hmc_objs(struct i40iw_sc_dev *dev,
+ enum i40iw_hmc_rsrc_type rsrc_type,
+ u32 start_index,
+ u32 rsrc_count);
+
+enum i40iw_status_code i40iw_vchnl_vf_del_hmc_obj(struct i40iw_sc_dev *dev,
+ enum i40iw_hmc_rsrc_type rsrc_type,
+ u32 start_index,
+ u32 rsrc_count);
+
+enum i40iw_status_code i40iw_vchnl_vf_get_pe_stats(struct i40iw_sc_dev *dev,
+ struct i40iw_dev_hw_stats *hw_stats);
+#endif
diff --git a/drivers/infiniband/hw/mlx4/Kconfig b/drivers/infiniband/hw/mlx4/Kconfig
index fc01deac1d3c..db4aa13ebae0 100644
--- a/drivers/infiniband/hw/mlx4/Kconfig
+++ b/drivers/infiniband/hw/mlx4/Kconfig
@@ -1,6 +1,7 @@
config MLX4_INFINIBAND
tristate "Mellanox ConnectX HCA support"
depends on NETDEVICES && ETHERNET && PCI && INET
+ depends on MAY_USE_DEVLINK
select NET_VENDOR_MELLANOX
select MLX4_CORE
---help---
diff --git a/drivers/infiniband/hw/mlx4/alias_GUID.c b/drivers/infiniband/hw/mlx4/alias_GUID.c
index 21cb41a60fe8..c74ef2620b85 100644
--- a/drivers/infiniband/hw/mlx4/alias_GUID.c
+++ b/drivers/infiniband/hw/mlx4/alias_GUID.c
@@ -310,7 +310,7 @@ static void aliasguid_query_handler(int status,
if (status) {
pr_debug("(port: %d) failed: status = %d\n",
cb_ctx->port, status);
- rec->time_to_run = ktime_get_real_ns() + 1 * NSEC_PER_SEC;
+ rec->time_to_run = ktime_get_boot_ns() + 1 * NSEC_PER_SEC;
goto out;
}
@@ -416,7 +416,7 @@ next_entry:
be64_to_cpu((__force __be64)rec->guid_indexes),
be64_to_cpu((__force __be64)applied_guid_indexes),
be64_to_cpu((__force __be64)declined_guid_indexes));
- rec->time_to_run = ktime_get_real_ns() +
+ rec->time_to_run = ktime_get_boot_ns() +
resched_delay_sec * NSEC_PER_SEC;
} else {
rec->status = MLX4_GUID_INFO_STATUS_SET;
@@ -708,7 +708,7 @@ static int get_low_record_time_index(struct mlx4_ib_dev *dev, u8 port,
}
}
if (resched_delay_sec) {
- u64 curr_time = ktime_get_real_ns();
+ u64 curr_time = ktime_get_boot_ns();
*resched_delay_sec = (low_record_time < curr_time) ? 0 :
div_u64((low_record_time - curr_time), NSEC_PER_SEC);
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 1c7ab6cabbb8..f014eaf5969b 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -41,6 +41,7 @@
#include <linux/if_vlan.h>
#include <net/ipv6.h>
#include <net/addrconf.h>
+#include <net/devlink.h>
#include <rdma/ib_smi.h>
#include <rdma/ib_user_verbs.h>
@@ -1643,6 +1644,56 @@ static int mlx4_ib_tunnel_steer_add(struct ib_qp *qp, struct ib_flow_attr *flow_
return err;
}
+static int mlx4_ib_add_dont_trap_rule(struct mlx4_dev *dev,
+ struct ib_flow_attr *flow_attr,
+ enum mlx4_net_trans_promisc_mode *type)
+{
+ int err = 0;
+
+ if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_DMFS_UC_MC_SNIFFER) ||
+ (dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_STATIC) ||
+ (flow_attr->num_of_specs > 1) || (flow_attr->priority != 0)) {
+ return -EOPNOTSUPP;
+ }
+
+ if (flow_attr->num_of_specs == 0) {
+ type[0] = MLX4_FS_MC_SNIFFER;
+ type[1] = MLX4_FS_UC_SNIFFER;
+ } else {
+ union ib_flow_spec *ib_spec;
+
+ ib_spec = (union ib_flow_spec *)(flow_attr + 1);
+ if (ib_spec->type != IB_FLOW_SPEC_ETH)
+ return -EINVAL;
+
+ /* if all is zero than MC and UC */
+ if (is_zero_ether_addr(ib_spec->eth.mask.dst_mac)) {
+ type[0] = MLX4_FS_MC_SNIFFER;
+ type[1] = MLX4_FS_UC_SNIFFER;
+ } else {
+ u8 mac[ETH_ALEN] = {ib_spec->eth.mask.dst_mac[0] ^ 0x01,
+ ib_spec->eth.mask.dst_mac[1],
+ ib_spec->eth.mask.dst_mac[2],
+ ib_spec->eth.mask.dst_mac[3],
+ ib_spec->eth.mask.dst_mac[4],
+ ib_spec->eth.mask.dst_mac[5]};
+
+ /* Above xor was only on MC bit, non empty mask is valid
+ * only if this bit is set and rest are zero.
+ */
+ if (!is_zero_ether_addr(&mac[0]))
+ return -EINVAL;
+
+ if (is_multicast_ether_addr(ib_spec->eth.val.dst_mac))
+ type[0] = MLX4_FS_MC_SNIFFER;
+ else
+ type[0] = MLX4_FS_UC_SNIFFER;
+ }
+ }
+
+ return err;
+}
+
static struct ib_flow *mlx4_ib_create_flow(struct ib_qp *qp,
struct ib_flow_attr *flow_attr,
int domain)
@@ -1653,6 +1704,10 @@ static struct ib_flow *mlx4_ib_create_flow(struct ib_qp *qp,
struct mlx4_dev *dev = (to_mdev(qp->device))->dev;
int is_bonded = mlx4_is_bonded(dev);
+ if ((flow_attr->flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP) &&
+ (flow_attr->type != IB_FLOW_ATTR_NORMAL))
+ return ERR_PTR(-EOPNOTSUPP);
+
memset(type, 0, sizeof(type));
mflow = kzalloc(sizeof(*mflow), GFP_KERNEL);
@@ -1663,7 +1718,19 @@ static struct ib_flow *mlx4_ib_create_flow(struct ib_qp *qp,
switch (flow_attr->type) {
case IB_FLOW_ATTR_NORMAL:
- type[0] = MLX4_FS_REGULAR;
+ /* If dont trap flag (continue match) is set, under specific
+ * condition traffic be replicated to given qp,
+ * without stealing it
+ */
+ if (unlikely(flow_attr->flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP)) {
+ err = mlx4_ib_add_dont_trap_rule(dev,
+ flow_attr,
+ type);
+ if (err)
+ goto err_free;
+ } else {
+ type[0] = MLX4_FS_REGULAR;
+ }
break;
case IB_FLOW_ATTR_ALL_DEFAULT:
@@ -1675,8 +1742,8 @@ static struct ib_flow *mlx4_ib_create_flow(struct ib_qp *qp,
break;
case IB_FLOW_ATTR_SNIFFER:
- type[0] = MLX4_FS_UC_SNIFFER;
- type[1] = MLX4_FS_MC_SNIFFER;
+ type[0] = MLX4_FS_MIRROR_RX_PORT;
+ type[1] = MLX4_FS_MIRROR_SX_PORT;
break;
default:
@@ -2519,6 +2586,9 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
}
ibdev->ib_active = true;
+ mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
+ devlink_port_type_ib_set(mlx4_get_devlink_port(dev, i),
+ &ibdev->ib_dev);
if (mlx4_is_mfunc(ibdev->dev))
init_pkeys(ibdev);
@@ -2643,7 +2713,10 @@ static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr)
{
struct mlx4_ib_dev *ibdev = ibdev_ptr;
int p;
+ int i;
+ mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
+ devlink_port_type_clear(mlx4_get_devlink_port(dev, i));
ibdev->ib_active = false;
flush_workqueue(wq);
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
index 52ce7b000044..1eca01cebe51 100644
--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
+++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
@@ -711,7 +711,8 @@ struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
u64 virt_addr, int access_flags,
struct ib_udata *udata);
int mlx4_ib_dereg_mr(struct ib_mr *mr);
-struct ib_mw *mlx4_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type);
+struct ib_mw *mlx4_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
+ struct ib_udata *udata);
int mlx4_ib_dealloc_mw(struct ib_mw *mw);
struct ib_mr *mlx4_ib_alloc_mr(struct ib_pd *pd,
enum ib_mr_type mr_type,
diff --git a/drivers/infiniband/hw/mlx4/mr.c b/drivers/infiniband/hw/mlx4/mr.c
index 242b94ec105b..ce0b5aa8eb9b 100644
--- a/drivers/infiniband/hw/mlx4/mr.c
+++ b/drivers/infiniband/hw/mlx4/mr.c
@@ -32,6 +32,7 @@
*/
#include <linux/slab.h>
+#include <rdma/ib_user_verbs.h>
#include "mlx4_ib.h"
@@ -334,7 +335,8 @@ int mlx4_ib_dereg_mr(struct ib_mr *ibmr)
return 0;
}
-struct ib_mw *mlx4_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type)
+struct ib_mw *mlx4_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
+ struct ib_udata *udata)
{
struct mlx4_ib_dev *dev = to_mdev(pd->device);
struct mlx4_ib_mw *mw;
diff --git a/drivers/infiniband/hw/mlx5/Makefile b/drivers/infiniband/hw/mlx5/Makefile
index 27a70159e2ea..7493a83acd28 100644
--- a/drivers/infiniband/hw/mlx5/Makefile
+++ b/drivers/infiniband/hw/mlx5/Makefile
@@ -1,4 +1,4 @@
obj-$(CONFIG_MLX5_INFINIBAND) += mlx5_ib.o
-mlx5_ib-y := main.o cq.o doorbell.o qp.o mem.o srq.o mr.o ah.o mad.o
+mlx5_ib-y := main.o cq.o doorbell.o qp.o mem.o srq.o mr.o ah.o mad.o gsi.o ib_virt.o
mlx5_ib-$(CONFIG_INFINIBAND_ON_DEMAND_PAGING) += odp.o
diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c
index fd1de31e0611..a00ba4418de9 100644
--- a/drivers/infiniband/hw/mlx5/cq.c
+++ b/drivers/infiniband/hw/mlx5/cq.c
@@ -207,7 +207,10 @@ static void handle_responder(struct ib_wc *wc, struct mlx5_cqe64 *cqe,
break;
case MLX5_CQE_RESP_SEND:
wc->opcode = IB_WC_RECV;
- wc->wc_flags = 0;
+ wc->wc_flags = IB_WC_IP_CSUM_OK;
+ if (unlikely(!((cqe->hds_ip_ext & CQE_L3_OK) &&
+ (cqe->hds_ip_ext & CQE_L4_OK))))
+ wc->wc_flags = 0;
break;
case MLX5_CQE_RESP_SEND_IMM:
wc->opcode = IB_WC_RECV;
@@ -431,7 +434,7 @@ static int mlx5_poll_one(struct mlx5_ib_cq *cq,
struct mlx5_core_qp *mqp;
struct mlx5_ib_wq *wq;
struct mlx5_sig_err_cqe *sig_err_cqe;
- struct mlx5_core_mr *mmr;
+ struct mlx5_core_mkey *mmkey;
struct mlx5_ib_mr *mr;
uint8_t opcode;
uint32_t qpn;
@@ -536,17 +539,17 @@ repoll:
case MLX5_CQE_SIG_ERR:
sig_err_cqe = (struct mlx5_sig_err_cqe *)cqe64;
- read_lock(&dev->mdev->priv.mr_table.lock);
- mmr = __mlx5_mr_lookup(dev->mdev,
- mlx5_base_mkey(be32_to_cpu(sig_err_cqe->mkey)));
- if (unlikely(!mmr)) {
- read_unlock(&dev->mdev->priv.mr_table.lock);
+ read_lock(&dev->mdev->priv.mkey_table.lock);
+ mmkey = __mlx5_mr_lookup(dev->mdev,
+ mlx5_base_mkey(be32_to_cpu(sig_err_cqe->mkey)));
+ if (unlikely(!mmkey)) {
+ read_unlock(&dev->mdev->priv.mkey_table.lock);
mlx5_ib_warn(dev, "CQE@CQ %06x for unknown MR %6x\n",
cq->mcq.cqn, be32_to_cpu(sig_err_cqe->mkey));
return -EINVAL;
}
- mr = to_mibmr(mmr);
+ mr = to_mibmr(mmkey);
get_sig_err_item(sig_err_cqe, &mr->sig->err_item);
mr->sig->sig_err_exists = true;
mr->sig->sigerr_count++;
@@ -558,25 +561,51 @@ repoll:
mr->sig->err_item.expected,
mr->sig->err_item.actual);
- read_unlock(&dev->mdev->priv.mr_table.lock);
+ read_unlock(&dev->mdev->priv.mkey_table.lock);
goto repoll;
}
return 0;
}
+static int poll_soft_wc(struct mlx5_ib_cq *cq, int num_entries,
+ struct ib_wc *wc)
+{
+ struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device);
+ struct mlx5_ib_wc *soft_wc, *next;
+ int npolled = 0;
+
+ list_for_each_entry_safe(soft_wc, next, &cq->wc_list, list) {
+ if (npolled >= num_entries)
+ break;
+
+ mlx5_ib_dbg(dev, "polled software generated completion on CQ 0x%x\n",
+ cq->mcq.cqn);
+
+ wc[npolled++] = soft_wc->wc;
+ list_del(&soft_wc->list);
+ kfree(soft_wc);
+ }
+
+ return npolled;
+}
+
int mlx5_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
{
struct mlx5_ib_cq *cq = to_mcq(ibcq);
struct mlx5_ib_qp *cur_qp = NULL;
unsigned long flags;
+ int soft_polled = 0;
int npolled;
int err = 0;
spin_lock_irqsave(&cq->lock, flags);
- for (npolled = 0; npolled < num_entries; npolled++) {
- err = mlx5_poll_one(cq, &cur_qp, wc + npolled);
+ if (unlikely(!list_empty(&cq->wc_list)))
+ soft_polled = poll_soft_wc(cq, num_entries, wc);
+
+ for (npolled = 0; npolled < num_entries - soft_polled; npolled++) {
+ err = mlx5_poll_one(cq, &cur_qp, wc + soft_polled + npolled);
if (err)
break;
}
@@ -587,7 +616,7 @@ int mlx5_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
spin_unlock_irqrestore(&cq->lock, flags);
if (err == 0 || err == -EAGAIN)
- return npolled;
+ return soft_polled + npolled;
else
return err;
}
@@ -595,16 +624,27 @@ int mlx5_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
int mlx5_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
{
struct mlx5_core_dev *mdev = to_mdev(ibcq->device)->mdev;
+ struct mlx5_ib_cq *cq = to_mcq(ibcq);
void __iomem *uar_page = mdev->priv.uuari.uars[0].map;
+ unsigned long irq_flags;
+ int ret = 0;
+
+ spin_lock_irqsave(&cq->lock, irq_flags);
+ if (cq->notify_flags != IB_CQ_NEXT_COMP)
+ cq->notify_flags = flags & IB_CQ_SOLICITED_MASK;
- mlx5_cq_arm(&to_mcq(ibcq)->mcq,
+ if ((flags & IB_CQ_REPORT_MISSED_EVENTS) && !list_empty(&cq->wc_list))
+ ret = 1;
+ spin_unlock_irqrestore(&cq->lock, irq_flags);
+
+ mlx5_cq_arm(&cq->mcq,
(flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED ?
MLX5_CQ_DB_REQ_NOT_SOL : MLX5_CQ_DB_REQ_NOT,
uar_page,
MLX5_GET_DOORBELL_LOCK(&mdev->priv.cq_uar_lock),
to_mcq(ibcq)->mcq.cons_index);
- return 0;
+ return ret;
}
static int alloc_cq_buf(struct mlx5_ib_dev *dev, struct mlx5_ib_cq_buf *buf,
@@ -757,6 +797,14 @@ static void destroy_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq)
mlx5_db_free(dev->mdev, &cq->db);
}
+static void notify_soft_wc_handler(struct work_struct *work)
+{
+ struct mlx5_ib_cq *cq = container_of(work, struct mlx5_ib_cq,
+ notify_work);
+
+ cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
+}
+
struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev,
const struct ib_cq_init_attr *attr,
struct ib_ucontext *context,
@@ -807,6 +855,8 @@ struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev,
&index, &inlen);
if (err)
goto err_create;
+
+ INIT_WORK(&cq->notify_work, notify_soft_wc_handler);
}
cq->cqe_size = cqe_size;
@@ -832,6 +882,8 @@ struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev,
cq->mcq.comp = mlx5_ib_cq_comp;
cq->mcq.event = mlx5_ib_cq_event;
+ INIT_LIST_HEAD(&cq->wc_list);
+
if (context)
if (ib_copy_to_udata(udata, &cq->mcq.cqn, sizeof(__u32))) {
err = -EFAULT;
@@ -1219,3 +1271,27 @@ int mlx5_ib_get_cqe_size(struct mlx5_ib_dev *dev, struct ib_cq *ibcq)
cq = to_mcq(ibcq);
return cq->cqe_size;
}
+
+/* Called from atomic context */
+int mlx5_ib_generate_wc(struct ib_cq *ibcq, struct ib_wc *wc)
+{
+ struct mlx5_ib_wc *soft_wc;
+ struct mlx5_ib_cq *cq = to_mcq(ibcq);
+ unsigned long flags;
+
+ soft_wc = kmalloc(sizeof(*soft_wc), GFP_ATOMIC);
+ if (!soft_wc)
+ return -ENOMEM;
+
+ soft_wc->wc = *wc;
+ spin_lock_irqsave(&cq->lock, flags);
+ list_add_tail(&soft_wc->list, &cq->wc_list);
+ if (cq->notify_flags == IB_CQ_NEXT_COMP ||
+ wc->status != IB_WC_SUCCESS) {
+ cq->notify_flags = 0;
+ schedule_work(&cq->notify_work);
+ }
+ spin_unlock_irqrestore(&cq->lock, flags);
+
+ return 0;
+}
diff --git a/drivers/infiniband/hw/mlx5/gsi.c b/drivers/infiniband/hw/mlx5/gsi.c
new file mode 100644
index 000000000000..53e03c8ede79
--- /dev/null
+++ b/drivers/infiniband/hw/mlx5/gsi.c
@@ -0,0 +1,548 @@
+/*
+ * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "mlx5_ib.h"
+
+struct mlx5_ib_gsi_wr {
+ struct ib_cqe cqe;
+ struct ib_wc wc;
+ int send_flags;
+ bool completed:1;
+};
+
+struct mlx5_ib_gsi_qp {
+ struct ib_qp ibqp;
+ struct ib_qp *rx_qp;
+ u8 port_num;
+ struct ib_qp_cap cap;
+ enum ib_sig_type sq_sig_type;
+ /* Serialize qp state modifications */
+ struct mutex mutex;
+ struct ib_cq *cq;
+ struct mlx5_ib_gsi_wr *outstanding_wrs;
+ u32 outstanding_pi, outstanding_ci;
+ int num_qps;
+ /* Protects access to the tx_qps. Post send operations synchronize
+ * with tx_qp creation in setup_qp(). Also protects the
+ * outstanding_wrs array and indices.
+ */
+ spinlock_t lock;
+ struct ib_qp **tx_qps;
+};
+
+static struct mlx5_ib_gsi_qp *gsi_qp(struct ib_qp *qp)
+{
+ return container_of(qp, struct mlx5_ib_gsi_qp, ibqp);
+}
+
+static bool mlx5_ib_deth_sqpn_cap(struct mlx5_ib_dev *dev)
+{
+ return MLX5_CAP_GEN(dev->mdev, set_deth_sqpn);
+}
+
+static u32 next_outstanding(struct mlx5_ib_gsi_qp *gsi, u32 index)
+{
+ return ++index % gsi->cap.max_send_wr;
+}
+
+#define for_each_outstanding_wr(gsi, index) \
+ for (index = gsi->outstanding_ci; index != gsi->outstanding_pi; \
+ index = next_outstanding(gsi, index))
+
+/* Call with gsi->lock locked */
+static void generate_completions(struct mlx5_ib_gsi_qp *gsi)
+{
+ struct ib_cq *gsi_cq = gsi->ibqp.send_cq;
+ struct mlx5_ib_gsi_wr *wr;
+ u32 index;
+
+ for_each_outstanding_wr(gsi, index) {
+ wr = &gsi->outstanding_wrs[index];
+
+ if (!wr->completed)
+ break;
+
+ if (gsi->sq_sig_type == IB_SIGNAL_ALL_WR ||
+ wr->send_flags & IB_SEND_SIGNALED)
+ WARN_ON_ONCE(mlx5_ib_generate_wc(gsi_cq, &wr->wc));
+
+ wr->completed = false;
+ }
+
+ gsi->outstanding_ci = index;
+}
+
+static void handle_single_completion(struct ib_cq *cq, struct ib_wc *wc)
+{
+ struct mlx5_ib_gsi_qp *gsi = cq->cq_context;
+ struct mlx5_ib_gsi_wr *wr =
+ container_of(wc->wr_cqe, struct mlx5_ib_gsi_wr, cqe);
+ u64 wr_id;
+ unsigned long flags;
+
+ spin_lock_irqsave(&gsi->lock, flags);
+ wr->completed = true;
+ wr_id = wr->wc.wr_id;
+ wr->wc = *wc;
+ wr->wc.wr_id = wr_id;
+ wr->wc.qp = &gsi->ibqp;
+
+ generate_completions(gsi);
+ spin_unlock_irqrestore(&gsi->lock, flags);
+}
+
+struct ib_qp *mlx5_ib_gsi_create_qp(struct ib_pd *pd,
+ struct ib_qp_init_attr *init_attr)
+{
+ struct mlx5_ib_dev *dev = to_mdev(pd->device);
+ struct mlx5_ib_gsi_qp *gsi;
+ struct ib_qp_init_attr hw_init_attr = *init_attr;
+ const u8 port_num = init_attr->port_num;
+ const int num_pkeys = pd->device->attrs.max_pkeys;
+ const int num_qps = mlx5_ib_deth_sqpn_cap(dev) ? num_pkeys : 0;
+ int ret;
+
+ mlx5_ib_dbg(dev, "creating GSI QP\n");
+
+ if (port_num > ARRAY_SIZE(dev->devr.ports) || port_num < 1) {
+ mlx5_ib_warn(dev,
+ "invalid port number %d during GSI QP creation\n",
+ port_num);
+ return ERR_PTR(-EINVAL);
+ }
+
+ gsi = kzalloc(sizeof(*gsi), GFP_KERNEL);
+ if (!gsi)
+ return ERR_PTR(-ENOMEM);
+
+ gsi->tx_qps = kcalloc(num_qps, sizeof(*gsi->tx_qps), GFP_KERNEL);
+ if (!gsi->tx_qps) {
+ ret = -ENOMEM;
+ goto err_free;
+ }
+
+ gsi->outstanding_wrs = kcalloc(init_attr->cap.max_send_wr,
+ sizeof(*gsi->outstanding_wrs),
+ GFP_KERNEL);
+ if (!gsi->outstanding_wrs) {
+ ret = -ENOMEM;
+ goto err_free_tx;
+ }
+
+ mutex_init(&gsi->mutex);
+
+ mutex_lock(&dev->devr.mutex);
+
+ if (dev->devr.ports[port_num - 1].gsi) {
+ mlx5_ib_warn(dev, "GSI QP already exists on port %d\n",
+ port_num);
+ ret = -EBUSY;
+ goto err_free_wrs;
+ }
+ gsi->num_qps = num_qps;
+ spin_lock_init(&gsi->lock);
+
+ gsi->cap = init_attr->cap;
+ gsi->sq_sig_type = init_attr->sq_sig_type;
+ gsi->ibqp.qp_num = 1;
+ gsi->port_num = port_num;
+
+ gsi->cq = ib_alloc_cq(pd->device, gsi, init_attr->cap.max_send_wr, 0,
+ IB_POLL_SOFTIRQ);
+ if (IS_ERR(gsi->cq)) {
+ mlx5_ib_warn(dev, "unable to create send CQ for GSI QP. error %ld\n",
+ PTR_ERR(gsi->cq));
+ ret = PTR_ERR(gsi->cq);
+ goto err_free_wrs;
+ }
+
+ hw_init_attr.qp_type = MLX5_IB_QPT_HW_GSI;
+ hw_init_attr.send_cq = gsi->cq;
+ if (num_qps) {
+ hw_init_attr.cap.max_send_wr = 0;
+ hw_init_attr.cap.max_send_sge = 0;
+ hw_init_attr.cap.max_inline_data = 0;
+ }
+ gsi->rx_qp = ib_create_qp(pd, &hw_init_attr);
+ if (IS_ERR(gsi->rx_qp)) {
+ mlx5_ib_warn(dev, "unable to create hardware GSI QP. error %ld\n",
+ PTR_ERR(gsi->rx_qp));
+ ret = PTR_ERR(gsi->rx_qp);
+ goto err_destroy_cq;
+ }
+
+ dev->devr.ports[init_attr->port_num - 1].gsi = gsi;
+
+ mutex_unlock(&dev->devr.mutex);
+
+ return &gsi->ibqp;
+
+err_destroy_cq:
+ ib_free_cq(gsi->cq);
+err_free_wrs:
+ mutex_unlock(&dev->devr.mutex);
+ kfree(gsi->outstanding_wrs);
+err_free_tx:
+ kfree(gsi->tx_qps);
+err_free:
+ kfree(gsi);
+ return ERR_PTR(ret);
+}
+
+int mlx5_ib_gsi_destroy_qp(struct ib_qp *qp)
+{
+ struct mlx5_ib_dev *dev = to_mdev(qp->device);
+ struct mlx5_ib_gsi_qp *gsi = gsi_qp(qp);
+ const int port_num = gsi->port_num;
+ int qp_index;
+ int ret;
+
+ mlx5_ib_dbg(dev, "destroying GSI QP\n");
+
+ mutex_lock(&dev->devr.mutex);
+ ret = ib_destroy_qp(gsi->rx_qp);
+ if (ret) {
+ mlx5_ib_warn(dev, "unable to destroy hardware GSI QP. error %d\n",
+ ret);
+ mutex_unlock(&dev->devr.mutex);
+ return ret;
+ }
+ dev->devr.ports[port_num - 1].gsi = NULL;
+ mutex_unlock(&dev->devr.mutex);
+ gsi->rx_qp = NULL;
+
+ for (qp_index = 0; qp_index < gsi->num_qps; ++qp_index) {
+ if (!gsi->tx_qps[qp_index])
+ continue;
+ WARN_ON_ONCE(ib_destroy_qp(gsi->tx_qps[qp_index]));
+ gsi->tx_qps[qp_index] = NULL;
+ }
+
+ ib_free_cq(gsi->cq);
+
+ kfree(gsi->outstanding_wrs);
+ kfree(gsi->tx_qps);
+ kfree(gsi);
+
+ return 0;
+}
+
+static struct ib_qp *create_gsi_ud_qp(struct mlx5_ib_gsi_qp *gsi)
+{
+ struct ib_pd *pd = gsi->rx_qp->pd;
+ struct ib_qp_init_attr init_attr = {
+ .event_handler = gsi->rx_qp->event_handler,
+ .qp_context = gsi->rx_qp->qp_context,
+ .send_cq = gsi->cq,
+ .recv_cq = gsi->rx_qp->recv_cq,
+ .cap = {
+ .max_send_wr = gsi->cap.max_send_wr,
+ .max_send_sge = gsi->cap.max_send_sge,
+ .max_inline_data = gsi->cap.max_inline_data,
+ },
+ .sq_sig_type = gsi->sq_sig_type,
+ .qp_type = IB_QPT_UD,
+ .create_flags = mlx5_ib_create_qp_sqpn_qp1(),
+ };
+
+ return ib_create_qp(pd, &init_attr);
+}
+
+static int modify_to_rts(struct mlx5_ib_gsi_qp *gsi, struct ib_qp *qp,
+ u16 qp_index)
+{
+ struct mlx5_ib_dev *dev = to_mdev(qp->device);
+ struct ib_qp_attr attr;
+ int mask;
+ int ret;
+
+ mask = IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_QKEY | IB_QP_PORT;
+ attr.qp_state = IB_QPS_INIT;
+ attr.pkey_index = qp_index;
+ attr.qkey = IB_QP1_QKEY;
+ attr.port_num = gsi->port_num;
+ ret = ib_modify_qp(qp, &attr, mask);
+ if (ret) {
+ mlx5_ib_err(dev, "could not change QP%d state to INIT: %d\n",
+ qp->qp_num, ret);
+ return ret;
+ }
+
+ attr.qp_state = IB_QPS_RTR;
+ ret = ib_modify_qp(qp, &attr, IB_QP_STATE);
+ if (ret) {
+ mlx5_ib_err(dev, "could not change QP%d state to RTR: %d\n",
+ qp->qp_num, ret);
+ return ret;
+ }
+
+ attr.qp_state = IB_QPS_RTS;
+ attr.sq_psn = 0;
+ ret = ib_modify_qp(qp, &attr, IB_QP_STATE | IB_QP_SQ_PSN);
+ if (ret) {
+ mlx5_ib_err(dev, "could not change QP%d state to RTS: %d\n",
+ qp->qp_num, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void setup_qp(struct mlx5_ib_gsi_qp *gsi, u16 qp_index)
+{
+ struct ib_device *device = gsi->rx_qp->device;
+ struct mlx5_ib_dev *dev = to_mdev(device);
+ struct ib_qp *qp;
+ unsigned long flags;
+ u16 pkey;
+ int ret;
+
+ ret = ib_query_pkey(device, gsi->port_num, qp_index, &pkey);
+ if (ret) {
+ mlx5_ib_warn(dev, "unable to read P_Key at port %d, index %d\n",
+ gsi->port_num, qp_index);
+ return;
+ }
+
+ if (!pkey) {
+ mlx5_ib_dbg(dev, "invalid P_Key at port %d, index %d. Skipping.\n",
+ gsi->port_num, qp_index);
+ return;
+ }
+
+ spin_lock_irqsave(&gsi->lock, flags);
+ qp = gsi->tx_qps[qp_index];
+ spin_unlock_irqrestore(&gsi->lock, flags);
+ if (qp) {
+ mlx5_ib_dbg(dev, "already existing GSI TX QP at port %d, index %d. Skipping\n",
+ gsi->port_num, qp_index);
+ return;
+ }
+
+ qp = create_gsi_ud_qp(gsi);
+ if (IS_ERR(qp)) {
+ mlx5_ib_warn(dev, "unable to create hardware UD QP for GSI: %ld\n",
+ PTR_ERR(qp));
+ return;
+ }
+
+ ret = modify_to_rts(gsi, qp, qp_index);
+ if (ret)
+ goto err_destroy_qp;
+
+ spin_lock_irqsave(&gsi->lock, flags);
+ WARN_ON_ONCE(gsi->tx_qps[qp_index]);
+ gsi->tx_qps[qp_index] = qp;
+ spin_unlock_irqrestore(&gsi->lock, flags);
+
+ return;
+
+err_destroy_qp:
+ WARN_ON_ONCE(qp);
+}
+
+static void setup_qps(struct mlx5_ib_gsi_qp *gsi)
+{
+ u16 qp_index;
+
+ for (qp_index = 0; qp_index < gsi->num_qps; ++qp_index)
+ setup_qp(gsi, qp_index);
+}
+
+int mlx5_ib_gsi_modify_qp(struct ib_qp *qp, struct ib_qp_attr *attr,
+ int attr_mask)
+{
+ struct mlx5_ib_dev *dev = to_mdev(qp->device);
+ struct mlx5_ib_gsi_qp *gsi = gsi_qp(qp);
+ int ret;
+
+ mlx5_ib_dbg(dev, "modifying GSI QP to state %d\n", attr->qp_state);
+
+ mutex_lock(&gsi->mutex);
+ ret = ib_modify_qp(gsi->rx_qp, attr, attr_mask);
+ if (ret) {
+ mlx5_ib_warn(dev, "unable to modify GSI rx QP: %d\n", ret);
+ goto unlock;
+ }
+
+ if (to_mqp(gsi->rx_qp)->state == IB_QPS_RTS)
+ setup_qps(gsi);
+
+unlock:
+ mutex_unlock(&gsi->mutex);
+
+ return ret;
+}
+
+int mlx5_ib_gsi_query_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
+ int qp_attr_mask,
+ struct ib_qp_init_attr *qp_init_attr)
+{
+ struct mlx5_ib_gsi_qp *gsi = gsi_qp(qp);
+ int ret;
+
+ mutex_lock(&gsi->mutex);
+ ret = ib_query_qp(gsi->rx_qp, qp_attr, qp_attr_mask, qp_init_attr);
+ qp_init_attr->cap = gsi->cap;
+ mutex_unlock(&gsi->mutex);
+
+ return ret;
+}
+
+/* Call with gsi->lock locked */
+static int mlx5_ib_add_outstanding_wr(struct mlx5_ib_gsi_qp *gsi,
+ struct ib_ud_wr *wr, struct ib_wc *wc)
+{
+ struct mlx5_ib_dev *dev = to_mdev(gsi->rx_qp->device);
+ struct mlx5_ib_gsi_wr *gsi_wr;
+
+ if (gsi->outstanding_pi == gsi->outstanding_ci + gsi->cap.max_send_wr) {
+ mlx5_ib_warn(dev, "no available GSI work request.\n");
+ return -ENOMEM;
+ }
+
+ gsi_wr = &gsi->outstanding_wrs[gsi->outstanding_pi];
+ gsi->outstanding_pi = next_outstanding(gsi, gsi->outstanding_pi);
+
+ if (!wc) {
+ memset(&gsi_wr->wc, 0, sizeof(gsi_wr->wc));
+ gsi_wr->wc.pkey_index = wr->pkey_index;
+ gsi_wr->wc.wr_id = wr->wr.wr_id;
+ } else {
+ gsi_wr->wc = *wc;
+ gsi_wr->completed = true;
+ }
+
+ gsi_wr->cqe.done = &handle_single_completion;
+ wr->wr.wr_cqe = &gsi_wr->cqe;
+
+ return 0;
+}
+
+/* Call with gsi->lock locked */
+static int mlx5_ib_gsi_silent_drop(struct mlx5_ib_gsi_qp *gsi,
+ struct ib_ud_wr *wr)
+{
+ struct ib_wc wc = {
+ { .wr_id = wr->wr.wr_id },
+ .status = IB_WC_SUCCESS,
+ .opcode = IB_WC_SEND,
+ .qp = &gsi->ibqp,
+ };
+ int ret;
+
+ ret = mlx5_ib_add_outstanding_wr(gsi, wr, &wc);
+ if (ret)
+ return ret;
+
+ generate_completions(gsi);
+
+ return 0;
+}
+
+/* Call with gsi->lock locked */
+static struct ib_qp *get_tx_qp(struct mlx5_ib_gsi_qp *gsi, struct ib_ud_wr *wr)
+{
+ struct mlx5_ib_dev *dev = to_mdev(gsi->rx_qp->device);
+ int qp_index = wr->pkey_index;
+
+ if (!mlx5_ib_deth_sqpn_cap(dev))
+ return gsi->rx_qp;
+
+ if (qp_index >= gsi->num_qps)
+ return NULL;
+
+ return gsi->tx_qps[qp_index];
+}
+
+int mlx5_ib_gsi_post_send(struct ib_qp *qp, struct ib_send_wr *wr,
+ struct ib_send_wr **bad_wr)
+{
+ struct mlx5_ib_gsi_qp *gsi = gsi_qp(qp);
+ struct ib_qp *tx_qp;
+ unsigned long flags;
+ int ret;
+
+ for (; wr; wr = wr->next) {
+ struct ib_ud_wr cur_wr = *ud_wr(wr);
+
+ cur_wr.wr.next = NULL;
+
+ spin_lock_irqsave(&gsi->lock, flags);
+ tx_qp = get_tx_qp(gsi, &cur_wr);
+ if (!tx_qp) {
+ ret = mlx5_ib_gsi_silent_drop(gsi, &cur_wr);
+ if (ret)
+ goto err;
+ spin_unlock_irqrestore(&gsi->lock, flags);
+ continue;
+ }
+
+ ret = mlx5_ib_add_outstanding_wr(gsi, &cur_wr, NULL);
+ if (ret)
+ goto err;
+
+ ret = ib_post_send(tx_qp, &cur_wr.wr, bad_wr);
+ if (ret) {
+ /* Undo the effect of adding the outstanding wr */
+ gsi->outstanding_pi = (gsi->outstanding_pi - 1) %
+ gsi->cap.max_send_wr;
+ goto err;
+ }
+ spin_unlock_irqrestore(&gsi->lock, flags);
+ }
+
+ return 0;
+
+err:
+ spin_unlock_irqrestore(&gsi->lock, flags);
+ *bad_wr = wr;
+ return ret;
+}
+
+int mlx5_ib_gsi_post_recv(struct ib_qp *qp, struct ib_recv_wr *wr,
+ struct ib_recv_wr **bad_wr)
+{
+ struct mlx5_ib_gsi_qp *gsi = gsi_qp(qp);
+
+ return ib_post_recv(gsi->rx_qp, wr, bad_wr);
+}
+
+void mlx5_ib_gsi_pkey_change(struct mlx5_ib_gsi_qp *gsi)
+{
+ if (!gsi)
+ return;
+
+ mutex_lock(&gsi->mutex);
+ setup_qps(gsi);
+ mutex_unlock(&gsi->mutex);
+}
diff --git a/drivers/infiniband/hw/mlx5/ib_virt.c b/drivers/infiniband/hw/mlx5/ib_virt.c
new file mode 100644
index 000000000000..c1b9de800fe5
--- /dev/null
+++ b/drivers/infiniband/hw/mlx5/ib_virt.c
@@ -0,0 +1,194 @@
+/*
+ * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/module.h>
+#include <linux/mlx5/vport.h>
+#include "mlx5_ib.h"
+
+static inline u32 mlx_to_net_policy(enum port_state_policy mlx_policy)
+{
+ switch (mlx_policy) {
+ case MLX5_POLICY_DOWN:
+ return IFLA_VF_LINK_STATE_DISABLE;
+ case MLX5_POLICY_UP:
+ return IFLA_VF_LINK_STATE_ENABLE;
+ case MLX5_POLICY_FOLLOW:
+ return IFLA_VF_LINK_STATE_AUTO;
+ default:
+ return __IFLA_VF_LINK_STATE_MAX;
+ }
+}
+
+int mlx5_ib_get_vf_config(struct ib_device *device, int vf, u8 port,
+ struct ifla_vf_info *info)
+{
+ struct mlx5_ib_dev *dev = to_mdev(device);
+ struct mlx5_core_dev *mdev = dev->mdev;
+ struct mlx5_hca_vport_context *rep;
+ int err;
+
+ rep = kzalloc(sizeof(*rep), GFP_KERNEL);
+ if (!rep)
+ return -ENOMEM;
+
+ err = mlx5_query_hca_vport_context(mdev, 1, 1, vf + 1, rep);
+ if (err) {
+ mlx5_ib_warn(dev, "failed to query port policy for vf %d (%d)\n",
+ vf, err);
+ goto free;
+ }
+ memset(info, 0, sizeof(*info));
+ info->linkstate = mlx_to_net_policy(rep->policy);
+ if (info->linkstate == __IFLA_VF_LINK_STATE_MAX)
+ err = -EINVAL;
+
+free:
+ kfree(rep);
+ return err;
+}
+
+static inline enum port_state_policy net_to_mlx_policy(int policy)
+{
+ switch (policy) {
+ case IFLA_VF_LINK_STATE_DISABLE:
+ return MLX5_POLICY_DOWN;
+ case IFLA_VF_LINK_STATE_ENABLE:
+ return MLX5_POLICY_UP;
+ case IFLA_VF_LINK_STATE_AUTO:
+ return MLX5_POLICY_FOLLOW;
+ default:
+ return MLX5_POLICY_INVALID;
+ }
+}
+
+int mlx5_ib_set_vf_link_state(struct ib_device *device, int vf,
+ u8 port, int state)
+{
+ struct mlx5_ib_dev *dev = to_mdev(device);
+ struct mlx5_core_dev *mdev = dev->mdev;
+ struct mlx5_hca_vport_context *in;
+ int err;
+
+ in = kzalloc(sizeof(*in), GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ in->policy = net_to_mlx_policy(state);
+ if (in->policy == MLX5_POLICY_INVALID) {
+ err = -EINVAL;
+ goto out;
+ }
+ in->field_select = MLX5_HCA_VPORT_SEL_STATE_POLICY;
+ err = mlx5_core_modify_hca_vport_context(mdev, 1, 1, vf + 1, in);
+
+out:
+ kfree(in);
+ return err;
+}
+
+int mlx5_ib_get_vf_stats(struct ib_device *device, int vf,
+ u8 port, struct ifla_vf_stats *stats)
+{
+ int out_sz = MLX5_ST_SZ_BYTES(query_vport_counter_out);
+ struct mlx5_core_dev *mdev;
+ struct mlx5_ib_dev *dev;
+ void *out;
+ int err;
+
+ dev = to_mdev(device);
+ mdev = dev->mdev;
+
+ out = kzalloc(out_sz, GFP_KERNEL);
+ if (!out)
+ return -ENOMEM;
+
+ err = mlx5_core_query_vport_counter(mdev, true, vf, port, out, out_sz);
+ if (err)
+ goto ex;
+
+ stats->rx_packets = MLX5_GET64_PR(query_vport_counter_out, out, received_ib_unicast.packets);
+ stats->tx_packets = MLX5_GET64_PR(query_vport_counter_out, out, transmitted_ib_unicast.packets);
+ stats->rx_bytes = MLX5_GET64_PR(query_vport_counter_out, out, received_ib_unicast.octets);
+ stats->tx_bytes = MLX5_GET64_PR(query_vport_counter_out, out, transmitted_ib_unicast.octets);
+ stats->multicast = MLX5_GET64_PR(query_vport_counter_out, out, received_ib_multicast.packets);
+
+ex:
+ kfree(out);
+ return err;
+}
+
+static int set_vf_node_guid(struct ib_device *device, int vf, u8 port, u64 guid)
+{
+ struct mlx5_ib_dev *dev = to_mdev(device);
+ struct mlx5_core_dev *mdev = dev->mdev;
+ struct mlx5_hca_vport_context *in;
+ int err;
+
+ in = kzalloc(sizeof(*in), GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ in->field_select = MLX5_HCA_VPORT_SEL_NODE_GUID;
+ in->node_guid = guid;
+ err = mlx5_core_modify_hca_vport_context(mdev, 1, 1, vf + 1, in);
+ kfree(in);
+ return err;
+}
+
+static int set_vf_port_guid(struct ib_device *device, int vf, u8 port, u64 guid)
+{
+ struct mlx5_ib_dev *dev = to_mdev(device);
+ struct mlx5_core_dev *mdev = dev->mdev;
+ struct mlx5_hca_vport_context *in;
+ int err;
+
+ in = kzalloc(sizeof(*in), GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ in->field_select = MLX5_HCA_VPORT_SEL_PORT_GUID;
+ in->port_guid = guid;
+ err = mlx5_core_modify_hca_vport_context(mdev, 1, 1, vf + 1, in);
+ kfree(in);
+ return err;
+}
+
+int mlx5_ib_set_vf_guid(struct ib_device *device, int vf, u8 port,
+ u64 guid, int type)
+{
+ if (type == IFLA_VF_IB_NODE_GUID)
+ return set_vf_node_guid(device, vf, port, guid);
+ else if (type == IFLA_VF_IB_PORT_GUID)
+ return set_vf_port_guid(device, vf, port, guid);
+
+ return -EINVAL;
+}
diff --git a/drivers/infiniband/hw/mlx5/mad.c b/drivers/infiniband/hw/mlx5/mad.c
index b84d13a487cc..1534af113058 100644
--- a/drivers/infiniband/hw/mlx5/mad.c
+++ b/drivers/infiniband/hw/mlx5/mad.c
@@ -31,8 +31,10 @@
*/
#include <linux/mlx5/cmd.h>
+#include <linux/mlx5/vport.h>
#include <rdma/ib_mad.h>
#include <rdma/ib_smi.h>
+#include <rdma/ib_pma.h>
#include "mlx5_ib.h"
enum {
@@ -57,20 +59,12 @@ int mlx5_MAD_IFC(struct mlx5_ib_dev *dev, int ignore_mkey, int ignore_bkey,
return mlx5_core_mad_ifc(dev->mdev, in_mad, response_mad, op_modifier, port);
}
-int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
- const struct ib_wc *in_wc, const struct ib_grh *in_grh,
- const struct ib_mad_hdr *in, size_t in_mad_size,
- struct ib_mad_hdr *out, size_t *out_mad_size,
- u16 *out_mad_pkey_index)
+static int process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
+ const struct ib_wc *in_wc, const struct ib_grh *in_grh,
+ const struct ib_mad *in_mad, struct ib_mad *out_mad)
{
u16 slid;
int err;
- const struct ib_mad *in_mad = (const struct ib_mad *)in;
- struct ib_mad *out_mad = (struct ib_mad *)out;
-
- if (WARN_ON_ONCE(in_mad_size != sizeof(*in_mad) ||
- *out_mad_size != sizeof(*out_mad)))
- return IB_MAD_RESULT_FAILURE;
slid = in_wc ? in_wc->slid : be16_to_cpu(IB_LID_PERMISSIVE);
@@ -117,6 +111,156 @@ int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
}
+static void pma_cnt_ext_assign(struct ib_pma_portcounters_ext *pma_cnt_ext,
+ void *out)
+{
+#define MLX5_SUM_CNT(p, cntr1, cntr2) \
+ (MLX5_GET64(query_vport_counter_out, p, cntr1) + \
+ MLX5_GET64(query_vport_counter_out, p, cntr2))
+
+ pma_cnt_ext->port_xmit_data =
+ cpu_to_be64(MLX5_SUM_CNT(out, transmitted_ib_unicast.octets,
+ transmitted_ib_multicast.octets) >> 2);
+ pma_cnt_ext->port_xmit_data =
+ cpu_to_be64(MLX5_SUM_CNT(out, received_ib_unicast.octets,
+ received_ib_multicast.octets) >> 2);
+ pma_cnt_ext->port_xmit_packets =
+ cpu_to_be64(MLX5_SUM_CNT(out, transmitted_ib_unicast.packets,
+ transmitted_ib_multicast.packets));
+ pma_cnt_ext->port_rcv_packets =
+ cpu_to_be64(MLX5_SUM_CNT(out, received_ib_unicast.packets,
+ received_ib_multicast.packets));
+ pma_cnt_ext->port_unicast_xmit_packets =
+ MLX5_GET64_BE(query_vport_counter_out,
+ out, transmitted_ib_unicast.packets);
+ pma_cnt_ext->port_unicast_rcv_packets =
+ MLX5_GET64_BE(query_vport_counter_out,
+ out, received_ib_unicast.packets);
+ pma_cnt_ext->port_multicast_xmit_packets =
+ MLX5_GET64_BE(query_vport_counter_out,
+ out, transmitted_ib_multicast.packets);
+ pma_cnt_ext->port_multicast_rcv_packets =
+ MLX5_GET64_BE(query_vport_counter_out,
+ out, received_ib_multicast.packets);
+}
+
+static void pma_cnt_assign(struct ib_pma_portcounters *pma_cnt,
+ void *out)
+{
+ /* Traffic counters will be reported in
+ * their 64bit form via ib_pma_portcounters_ext by default.
+ */
+ void *out_pma = MLX5_ADDR_OF(ppcnt_reg, out,
+ counter_set);
+
+#define MLX5_ASSIGN_PMA_CNTR(counter_var, counter_name) { \
+ counter_var = MLX5_GET_BE(typeof(counter_var), \
+ ib_port_cntrs_grp_data_layout, \
+ out_pma, counter_name); \
+ }
+
+ MLX5_ASSIGN_PMA_CNTR(pma_cnt->symbol_error_counter,
+ symbol_error_counter);
+ MLX5_ASSIGN_PMA_CNTR(pma_cnt->link_error_recovery_counter,
+ link_error_recovery_counter);
+ MLX5_ASSIGN_PMA_CNTR(pma_cnt->link_downed_counter,
+ link_downed_counter);
+ MLX5_ASSIGN_PMA_CNTR(pma_cnt->port_rcv_errors,
+ port_rcv_errors);
+ MLX5_ASSIGN_PMA_CNTR(pma_cnt->port_rcv_remphys_errors,
+ port_rcv_remote_physical_errors);
+ MLX5_ASSIGN_PMA_CNTR(pma_cnt->port_rcv_switch_relay_errors,
+ port_rcv_switch_relay_errors);
+ MLX5_ASSIGN_PMA_CNTR(pma_cnt->port_xmit_discards,
+ port_xmit_discards);
+ MLX5_ASSIGN_PMA_CNTR(pma_cnt->port_xmit_constraint_errors,
+ port_xmit_constraint_errors);
+ MLX5_ASSIGN_PMA_CNTR(pma_cnt->port_rcv_constraint_errors,
+ port_rcv_constraint_errors);
+ MLX5_ASSIGN_PMA_CNTR(pma_cnt->link_overrun_errors,
+ link_overrun_errors);
+ MLX5_ASSIGN_PMA_CNTR(pma_cnt->vl15_dropped,
+ vl_15_dropped);
+}
+
+static int process_pma_cmd(struct ib_device *ibdev, u8 port_num,
+ const struct ib_mad *in_mad, struct ib_mad *out_mad)
+{
+ struct mlx5_ib_dev *dev = to_mdev(ibdev);
+ int err;
+ void *out_cnt;
+
+ /* Decalring support of extended counters */
+ if (in_mad->mad_hdr.attr_id == IB_PMA_CLASS_PORT_INFO) {
+ struct ib_class_port_info cpi = {};
+
+ cpi.capability_mask = IB_PMA_CLASS_CAP_EXT_WIDTH;
+ memcpy((out_mad->data + 40), &cpi, sizeof(cpi));
+ return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
+ }
+
+ if (in_mad->mad_hdr.attr_id == IB_PMA_PORT_COUNTERS_EXT) {
+ struct ib_pma_portcounters_ext *pma_cnt_ext =
+ (struct ib_pma_portcounters_ext *)(out_mad->data + 40);
+ int sz = MLX5_ST_SZ_BYTES(query_vport_counter_out);
+
+ out_cnt = mlx5_vzalloc(sz);
+ if (!out_cnt)
+ return IB_MAD_RESULT_FAILURE;
+
+ err = mlx5_core_query_vport_counter(dev->mdev, 0, 0,
+ port_num, out_cnt, sz);
+ if (!err)
+ pma_cnt_ext_assign(pma_cnt_ext, out_cnt);
+ } else {
+ struct ib_pma_portcounters *pma_cnt =
+ (struct ib_pma_portcounters *)(out_mad->data + 40);
+ int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
+
+ out_cnt = mlx5_vzalloc(sz);
+ if (!out_cnt)
+ return IB_MAD_RESULT_FAILURE;
+
+ err = mlx5_core_query_ib_ppcnt(dev->mdev, port_num,
+ out_cnt, sz);
+ if (!err)
+ pma_cnt_assign(pma_cnt, out_cnt);
+ }
+
+ kvfree(out_cnt);
+ if (err)
+ return IB_MAD_RESULT_FAILURE;
+
+ return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
+}
+
+int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
+ const struct ib_wc *in_wc, const struct ib_grh *in_grh,
+ const struct ib_mad_hdr *in, size_t in_mad_size,
+ struct ib_mad_hdr *out, size_t *out_mad_size,
+ u16 *out_mad_pkey_index)
+{
+ struct mlx5_ib_dev *dev = to_mdev(ibdev);
+ struct mlx5_core_dev *mdev = dev->mdev;
+ const struct ib_mad *in_mad = (const struct ib_mad *)in;
+ struct ib_mad *out_mad = (struct ib_mad *)out;
+
+ if (WARN_ON_ONCE(in_mad_size != sizeof(*in_mad) ||
+ *out_mad_size != sizeof(*out_mad)))
+ return IB_MAD_RESULT_FAILURE;
+
+ memset(out_mad->data, 0, sizeof(out_mad->data));
+
+ if (MLX5_CAP_GEN(mdev, vport_counters) &&
+ in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT &&
+ in_mad->mad_hdr.method == IB_MGMT_METHOD_GET) {
+ return process_pma_cmd(ibdev, port_num, in_mad, out_mad);
+ } else {
+ return process_mad(ibdev, mad_flags, port_num, in_wc, in_grh,
+ in_mad, out_mad);
+ }
+}
+
int mlx5_query_ext_port_caps(struct mlx5_ib_dev *dev, u8 port)
{
struct ib_smp *in_mad = NULL;
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 03c418ccbc98..6ad0489cb3c5 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -42,6 +42,7 @@
#include <rdma/ib_user_verbs.h>
#include <rdma/ib_addr.h>
#include <rdma/ib_cache.h>
+#include <linux/mlx5/port.h>
#include <linux/mlx5/vport.h>
#include <rdma/ib_smi.h>
#include <rdma/ib_umem.h>
@@ -283,7 +284,7 @@ __be16 mlx5_get_roce_udp_sport(struct mlx5_ib_dev *dev, u8 port_num,
static int mlx5_use_mad_ifc(struct mlx5_ib_dev *dev)
{
- return !dev->mdev->issi;
+ return !MLX5_CAP_GEN(dev->mdev, ib_virt);
}
enum {
@@ -487,6 +488,13 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
props->device_cap_flags |= IB_DEVICE_AUTO_PATH_MIG;
if (MLX5_CAP_GEN(mdev, xrc))
props->device_cap_flags |= IB_DEVICE_XRC;
+ if (MLX5_CAP_GEN(mdev, imaicl)) {
+ props->device_cap_flags |= IB_DEVICE_MEM_WINDOW |
+ IB_DEVICE_MEM_WINDOW_TYPE_2B;
+ props->max_mw = 1 << MLX5_CAP_GEN(mdev, log_max_mkey);
+ /* We support 'Gappy' memory registration too */
+ props->device_cap_flags |= IB_DEVICE_SG_GAPS_REG;
+ }
props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS;
if (MLX5_CAP_GEN(mdev, sho)) {
props->device_cap_flags |= IB_DEVICE_SIGNATURE_HANDOVER;
@@ -504,6 +512,11 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
(MLX5_CAP_ETH(dev->mdev, csum_cap)))
props->device_cap_flags |= IB_DEVICE_RAW_IP_CSUM;
+ if (MLX5_CAP_GEN(mdev, ipoib_basic_offloads)) {
+ props->device_cap_flags |= IB_DEVICE_UD_IP_CSUM;
+ props->device_cap_flags |= IB_DEVICE_UD_TSO;
+ }
+
props->vendor_part_id = mdev->pdev->device;
props->hw_ver = mdev->pdev->revision;
@@ -517,7 +530,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
sizeof(struct mlx5_wqe_ctrl_seg)) /
sizeof(struct mlx5_wqe_data_seg);
props->max_sge = min(max_rq_sg, max_sq_sg);
- props->max_sge_rd = props->max_sge;
+ props->max_sge_rd = MLX5_MAX_SGE_RD;
props->max_cq = 1 << MLX5_CAP_GEN(mdev, log_max_cq);
props->max_cqe = (1 << MLX5_CAP_GEN(mdev, log_max_cq_sz)) - 1;
props->max_mr = 1 << MLX5_CAP_GEN(mdev, log_max_mkey);
@@ -529,7 +542,8 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
props->local_ca_ack_delay = MLX5_CAP_GEN(mdev, local_ca_ack_delay);
props->max_res_rd_atom = props->max_qp_rd_atom * props->max_qp;
props->max_srq_sge = max_rq_sg - 1;
- props->max_fast_reg_page_list_len = (unsigned int)-1;
+ props->max_fast_reg_page_list_len =
+ 1 << MLX5_CAP_GEN(mdev, log_max_klm_list_size);
get_atomic_caps(dev, props);
props->masked_atomic_cap = IB_ATOMIC_NONE;
props->max_mcast_grp = 1 << MLX5_CAP_GEN(mdev, log_max_mcg);
@@ -549,6 +563,9 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
if (MLX5_CAP_GEN(mdev, cd))
props->device_cap_flags |= IB_DEVICE_CROSS_CHANNEL;
+ if (!mlx5_core_is_pf(mdev))
+ props->device_cap_flags |= IB_DEVICE_VIRTUAL_FUNCTION;
+
return 0;
}
@@ -654,8 +671,8 @@ static int mlx5_query_hca_port(struct ib_device *ibdev, u8 port,
struct mlx5_ib_dev *dev = to_mdev(ibdev);
struct mlx5_core_dev *mdev = dev->mdev;
struct mlx5_hca_vport_context *rep;
- int max_mtu;
- int oper_mtu;
+ u16 max_mtu;
+ u16 oper_mtu;
int err;
u8 ib_link_width_oper;
u8 vl_hw_cap;
@@ -686,6 +703,7 @@ static int mlx5_query_hca_port(struct ib_device *ibdev, u8 port,
props->qkey_viol_cntr = rep->qkey_violation_counter;
props->subnet_timeout = rep->subnet_timeout;
props->init_type_reply = rep->init_type_reply;
+ props->grh_required = rep->grh_required;
err = mlx5_query_port_link_width_oper(mdev, &ib_link_width_oper, port);
if (err)
@@ -1369,11 +1387,20 @@ static int mlx5_ib_destroy_flow(struct ib_flow *flow_id)
return 0;
}
+static int ib_prio_to_core_prio(unsigned int priority, bool dont_trap)
+{
+ priority *= 2;
+ if (!dont_trap)
+ priority++;
+ return priority;
+}
+
#define MLX5_FS_MAX_TYPES 10
#define MLX5_FS_MAX_ENTRIES 32000UL
static struct mlx5_ib_flow_prio *get_flow_table(struct mlx5_ib_dev *dev,
struct ib_flow_attr *flow_attr)
{
+ bool dont_trap = flow_attr->flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP;
struct mlx5_flow_namespace *ns = NULL;
struct mlx5_ib_flow_prio *prio;
struct mlx5_flow_table *ft;
@@ -1383,10 +1410,12 @@ static struct mlx5_ib_flow_prio *get_flow_table(struct mlx5_ib_dev *dev,
int err = 0;
if (flow_attr->type == IB_FLOW_ATTR_NORMAL) {
- if (flow_is_multicast_only(flow_attr))
+ if (flow_is_multicast_only(flow_attr) &&
+ !dont_trap)
priority = MLX5_IB_FLOW_MCAST_PRIO;
else
- priority = flow_attr->priority;
+ priority = ib_prio_to_core_prio(flow_attr->priority,
+ dont_trap);
ns = mlx5_get_flow_namespace(dev->mdev,
MLX5_FLOW_NAMESPACE_BYPASS);
num_entries = MLX5_FS_MAX_ENTRIES;
@@ -1434,6 +1463,7 @@ static struct mlx5_ib_flow_handler *create_flow_rule(struct mlx5_ib_dev *dev,
unsigned int spec_index;
u32 *match_c;
u32 *match_v;
+ u32 action;
int err = 0;
if (!is_valid_attr(flow_attr))
@@ -1459,9 +1489,11 @@ static struct mlx5_ib_flow_handler *create_flow_rule(struct mlx5_ib_dev *dev,
/* Outer header support only */
match_criteria_enable = (!outer_header_zero(match_c)) << 0;
+ action = dst ? MLX5_FLOW_CONTEXT_ACTION_FWD_DEST :
+ MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO;
handler->rule = mlx5_add_flow_rule(ft, match_criteria_enable,
match_c, match_v,
- MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
+ action,
MLX5_FS_DEFAULT_FLOW_TAG,
dst);
@@ -1481,6 +1513,29 @@ free:
return err ? ERR_PTR(err) : handler;
}
+static struct mlx5_ib_flow_handler *create_dont_trap_rule(struct mlx5_ib_dev *dev,
+ struct mlx5_ib_flow_prio *ft_prio,
+ struct ib_flow_attr *flow_attr,
+ struct mlx5_flow_destination *dst)
+{
+ struct mlx5_ib_flow_handler *handler_dst = NULL;
+ struct mlx5_ib_flow_handler *handler = NULL;
+
+ handler = create_flow_rule(dev, ft_prio, flow_attr, NULL);
+ if (!IS_ERR(handler)) {
+ handler_dst = create_flow_rule(dev, ft_prio,
+ flow_attr, dst);
+ if (IS_ERR(handler_dst)) {
+ mlx5_del_flow_rule(handler->rule);
+ kfree(handler);
+ handler = handler_dst;
+ } else {
+ list_add(&handler_dst->list, &handler->list);
+ }
+ }
+
+ return handler;
+}
enum {
LEFTOVERS_MC,
LEFTOVERS_UC,
@@ -1558,7 +1613,7 @@ static struct ib_flow *mlx5_ib_create_flow(struct ib_qp *qp,
if (domain != IB_FLOW_DOMAIN_USER ||
flow_attr->port > MLX5_CAP_GEN(dev->mdev, num_ports) ||
- flow_attr->flags)
+ (flow_attr->flags & ~IB_FLOW_ATTR_FLAGS_DONT_TRAP))
return ERR_PTR(-EINVAL);
dst = kzalloc(sizeof(*dst), GFP_KERNEL);
@@ -1577,8 +1632,13 @@ static struct ib_flow *mlx5_ib_create_flow(struct ib_qp *qp,
dst->tir_num = to_mqp(qp)->raw_packet_qp.rq.tirn;
if (flow_attr->type == IB_FLOW_ATTR_NORMAL) {
- handler = create_flow_rule(dev, ft_prio, flow_attr,
- dst);
+ if (flow_attr->flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP) {
+ handler = create_dont_trap_rule(dev, ft_prio,
+ flow_attr, dst);
+ } else {
+ handler = create_flow_rule(dev, ft_prio, flow_attr,
+ dst);
+ }
} else if (flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT ||
flow_attr->type == IB_FLOW_ATTR_MC_DEFAULT) {
handler = create_leftovers_rule(dev, ft_prio, flow_attr,
@@ -1716,6 +1776,17 @@ static struct device_attribute *mlx5_class_attributes[] = {
&dev_attr_reg_pages,
};
+static void pkey_change_handler(struct work_struct *work)
+{
+ struct mlx5_ib_port_resources *ports =
+ container_of(work, struct mlx5_ib_port_resources,
+ pkey_change_work);
+
+ mutex_lock(&ports->devr->mutex);
+ mlx5_ib_gsi_pkey_change(ports->gsi);
+ mutex_unlock(&ports->devr->mutex);
+}
+
static void mlx5_ib_event(struct mlx5_core_dev *dev, void *context,
enum mlx5_dev_event event, unsigned long param)
{
@@ -1752,6 +1823,8 @@ static void mlx5_ib_event(struct mlx5_core_dev *dev, void *context,
case MLX5_DEV_EVENT_PKEY_CHANGE:
ibev.event = IB_EVENT_PKEY_CHANGE;
port = (u8)param;
+
+ schedule_work(&ibdev->devr.ports[port - 1].pkey_change_work);
break;
case MLX5_DEV_EVENT_GUID_CHANGE:
@@ -1838,7 +1911,7 @@ static void destroy_umrc_res(struct mlx5_ib_dev *dev)
mlx5_ib_warn(dev, "mr cache cleanup failed\n");
mlx5_ib_destroy_qp(dev->umrc.qp);
- ib_destroy_cq(dev->umrc.cq);
+ ib_free_cq(dev->umrc.cq);
ib_dealloc_pd(dev->umrc.pd);
}
@@ -1853,7 +1926,6 @@ static int create_umr_res(struct mlx5_ib_dev *dev)
struct ib_pd *pd;
struct ib_cq *cq;
struct ib_qp *qp;
- struct ib_cq_init_attr cq_attr = {};
int ret;
attr = kzalloc(sizeof(*attr), GFP_KERNEL);
@@ -1870,15 +1942,12 @@ static int create_umr_res(struct mlx5_ib_dev *dev)
goto error_0;
}
- cq_attr.cqe = 128;
- cq = ib_create_cq(&dev->ib_dev, mlx5_umr_cq_handler, NULL, NULL,
- &cq_attr);
+ cq = ib_alloc_cq(&dev->ib_dev, NULL, 128, 0, IB_POLL_SOFTIRQ);
if (IS_ERR(cq)) {
mlx5_ib_dbg(dev, "Couldn't create CQ for sync UMR QP\n");
ret = PTR_ERR(cq);
goto error_2;
}
- ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
init_attr->send_cq = cq;
init_attr->recv_cq = cq;
@@ -1945,7 +2014,7 @@ error_4:
mlx5_ib_destroy_qp(qp);
error_3:
- ib_destroy_cq(cq);
+ ib_free_cq(cq);
error_2:
ib_dealloc_pd(pd);
@@ -1961,10 +2030,13 @@ static int create_dev_resources(struct mlx5_ib_resources *devr)
struct ib_srq_init_attr attr;
struct mlx5_ib_dev *dev;
struct ib_cq_init_attr cq_attr = {.cqe = 1};
+ int port;
int ret = 0;
dev = container_of(devr, struct mlx5_ib_dev, devr);
+ mutex_init(&devr->mutex);
+
devr->p0 = mlx5_ib_alloc_pd(&dev->ib_dev, NULL, NULL);
if (IS_ERR(devr->p0)) {
ret = PTR_ERR(devr->p0);
@@ -2052,6 +2124,12 @@ static int create_dev_resources(struct mlx5_ib_resources *devr)
atomic_inc(&devr->p0->usecnt);
atomic_set(&devr->s0->usecnt, 0);
+ for (port = 0; port < ARRAY_SIZE(devr->ports); ++port) {
+ INIT_WORK(&devr->ports[port].pkey_change_work,
+ pkey_change_handler);
+ devr->ports[port].devr = devr;
+ }
+
return 0;
error5:
@@ -2070,12 +2148,20 @@ error0:
static void destroy_dev_resources(struct mlx5_ib_resources *devr)
{
+ struct mlx5_ib_dev *dev =
+ container_of(devr, struct mlx5_ib_dev, devr);
+ int port;
+
mlx5_ib_destroy_srq(devr->s1);
mlx5_ib_destroy_srq(devr->s0);
mlx5_ib_dealloc_xrcd(devr->x0);
mlx5_ib_dealloc_xrcd(devr->x1);
mlx5_ib_destroy_cq(devr->c0);
mlx5_ib_dealloc_pd(devr->p0);
+
+ /* Make sure no change P_Key work items are still executing */
+ for (port = 0; port < dev->num_ports; ++port)
+ cancel_work_sync(&devr->ports[port].pkey_change_work);
}
static u32 get_core_cap_flags(struct ib_device *ibdev)
@@ -2198,6 +2284,7 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
(1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
(1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
(1ull << IB_USER_VERBS_CMD_REG_MR) |
+ (1ull << IB_USER_VERBS_CMD_REREG_MR) |
(1ull << IB_USER_VERBS_CMD_DEREG_MR) |
(1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
(1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
@@ -2258,6 +2345,7 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
dev->ib_dev.req_notify_cq = mlx5_ib_arm_cq;
dev->ib_dev.get_dma_mr = mlx5_ib_get_dma_mr;
dev->ib_dev.reg_user_mr = mlx5_ib_reg_user_mr;
+ dev->ib_dev.rereg_user_mr = mlx5_ib_rereg_user_mr;
dev->ib_dev.dereg_mr = mlx5_ib_dereg_mr;
dev->ib_dev.attach_mcast = mlx5_ib_mcg_attach;
dev->ib_dev.detach_mcast = mlx5_ib_mcg_detach;
@@ -2266,9 +2354,23 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
dev->ib_dev.map_mr_sg = mlx5_ib_map_mr_sg;
dev->ib_dev.check_mr_status = mlx5_ib_check_mr_status;
dev->ib_dev.get_port_immutable = mlx5_port_immutable;
+ if (mlx5_core_is_pf(mdev)) {
+ dev->ib_dev.get_vf_config = mlx5_ib_get_vf_config;
+ dev->ib_dev.set_vf_link_state = mlx5_ib_set_vf_link_state;
+ dev->ib_dev.get_vf_stats = mlx5_ib_get_vf_stats;
+ dev->ib_dev.set_vf_guid = mlx5_ib_set_vf_guid;
+ }
mlx5_ib_internal_fill_odp_caps(dev);
+ if (MLX5_CAP_GEN(mdev, imaicl)) {
+ dev->ib_dev.alloc_mw = mlx5_ib_alloc_mw;
+ dev->ib_dev.dealloc_mw = mlx5_ib_dealloc_mw;
+ dev->ib_dev.uverbs_cmd_mask |=
+ (1ull << IB_USER_VERBS_CMD_ALLOC_MW) |
+ (1ull << IB_USER_VERBS_CMD_DEALLOC_MW);
+ }
+
if (MLX5_CAP_GEN(mdev, xrc)) {
dev->ib_dev.alloc_xrcd = mlx5_ib_alloc_xrcd;
dev->ib_dev.dealloc_xrcd = mlx5_ib_dealloc_xrcd;
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index d2b9737baa36..b46c25542a7c 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -43,6 +43,7 @@
#include <linux/mlx5/srq.h>
#include <linux/types.h>
#include <linux/mlx5/transobj.h>
+#include <rdma/ib_user_verbs.h>
#define mlx5_ib_dbg(dev, format, arg...) \
pr_debug("%s:%s:%d:(pid %d): " format, (dev)->ib_dev.name, __func__, \
@@ -126,7 +127,7 @@ struct mlx5_ib_pd {
};
#define MLX5_IB_FLOW_MCAST_PRIO (MLX5_BY_PASS_NUM_PRIOS - 1)
-#define MLX5_IB_FLOW_LAST_PRIO (MLX5_IB_FLOW_MCAST_PRIO - 1)
+#define MLX5_IB_FLOW_LAST_PRIO (MLX5_BY_PASS_NUM_REGULAR_PRIOS - 1)
#if (MLX5_IB_FLOW_LAST_PRIO <= 0)
#error "Invalid number of bypass priorities"
#endif
@@ -162,9 +163,31 @@ struct mlx5_ib_flow_db {
#define MLX5_IB_SEND_UMR_UNREG IB_SEND_RESERVED_START
#define MLX5_IB_SEND_UMR_FAIL_IF_FREE (IB_SEND_RESERVED_START << 1)
#define MLX5_IB_SEND_UMR_UPDATE_MTT (IB_SEND_RESERVED_START << 2)
+
+#define MLX5_IB_SEND_UMR_UPDATE_TRANSLATION (IB_SEND_RESERVED_START << 3)
+#define MLX5_IB_SEND_UMR_UPDATE_PD (IB_SEND_RESERVED_START << 4)
+#define MLX5_IB_SEND_UMR_UPDATE_ACCESS IB_SEND_RESERVED_END
+
#define MLX5_IB_QPT_REG_UMR IB_QPT_RESERVED1
+/*
+ * IB_QPT_GSI creates the software wrapper around GSI, and MLX5_IB_QPT_HW_GSI
+ * creates the actual hardware QP.
+ */
+#define MLX5_IB_QPT_HW_GSI IB_QPT_RESERVED2
#define MLX5_IB_WR_UMR IB_WR_RESERVED1
+/* Private QP creation flags to be passed in ib_qp_init_attr.create_flags.
+ *
+ * These flags are intended for internal use by the mlx5_ib driver, and they
+ * rely on the range reserved for that use in the ib_qp_create_flags enum.
+ */
+
+/* Create a UD QP whose source QP number is 1 */
+static inline enum ib_qp_create_flags mlx5_ib_create_qp_sqpn_qp1(void)
+{
+ return IB_QP_CREATE_RESERVED_START;
+}
+
struct wr_list {
u16 opcode;
u16 next;
@@ -325,11 +348,14 @@ struct mlx5_ib_cq_buf {
};
enum mlx5_ib_qp_flags {
- MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK = 1 << 0,
- MLX5_IB_QP_SIGNATURE_HANDLING = 1 << 1,
- MLX5_IB_QP_CROSS_CHANNEL = 1 << 2,
- MLX5_IB_QP_MANAGED_SEND = 1 << 3,
- MLX5_IB_QP_MANAGED_RECV = 1 << 4,
+ MLX5_IB_QP_LSO = IB_QP_CREATE_IPOIB_UD_LSO,
+ MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK = IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK,
+ MLX5_IB_QP_CROSS_CHANNEL = IB_QP_CREATE_CROSS_CHANNEL,
+ MLX5_IB_QP_MANAGED_SEND = IB_QP_CREATE_MANAGED_SEND,
+ MLX5_IB_QP_MANAGED_RECV = IB_QP_CREATE_MANAGED_RECV,
+ MLX5_IB_QP_SIGNATURE_HANDLING = 1 << 5,
+ /* QP uses 1 as its source QP number */
+ MLX5_IB_QP_SQPN_QP1 = 1 << 6,
};
struct mlx5_umr_wr {
@@ -373,6 +399,14 @@ struct mlx5_ib_cq {
struct ib_umem *resize_umem;
int cqe_size;
u32 create_flags;
+ struct list_head wc_list;
+ enum ib_cq_notify_flags notify_flags;
+ struct work_struct notify_work;
+};
+
+struct mlx5_ib_wc {
+ struct ib_wc wc;
+ struct list_head list;
};
struct mlx5_ib_srq {
@@ -413,7 +447,8 @@ struct mlx5_ib_mr {
int ndescs;
int max_descs;
int desc_size;
- struct mlx5_core_mr mmr;
+ int access_mode;
+ struct mlx5_core_mkey mmkey;
struct ib_umem *umem;
struct mlx5_shared_mr_info *smr_info;
struct list_head list;
@@ -425,19 +460,20 @@ struct mlx5_ib_mr {
struct mlx5_core_sig_ctx *sig;
int live;
void *descs_alloc;
+ int access_flags; /* Needed for rereg MR */
+};
+
+struct mlx5_ib_mw {
+ struct ib_mw ibmw;
+ struct mlx5_core_mkey mmkey;
};
struct mlx5_ib_umr_context {
+ struct ib_cqe cqe;
enum ib_wc_status status;
struct completion done;
};
-static inline void mlx5_ib_init_umr_context(struct mlx5_ib_umr_context *context)
-{
- context->status = -1;
- init_completion(&context->done);
-}
-
struct umr_common {
struct ib_pd *pd;
struct ib_cq *cq;
@@ -487,6 +523,14 @@ struct mlx5_mr_cache {
unsigned long last_add;
};
+struct mlx5_ib_gsi_qp;
+
+struct mlx5_ib_port_resources {
+ struct mlx5_ib_resources *devr;
+ struct mlx5_ib_gsi_qp *gsi;
+ struct work_struct pkey_change_work;
+};
+
struct mlx5_ib_resources {
struct ib_cq *c0;
struct ib_xrcd *x0;
@@ -494,6 +538,9 @@ struct mlx5_ib_resources {
struct ib_pd *p0;
struct ib_srq *s0;
struct ib_srq *s1;
+ struct mlx5_ib_port_resources ports[2];
+ /* Protects changes to the port resources */
+ struct mutex mutex;
};
struct mlx5_roce {
@@ -558,9 +605,9 @@ static inline struct mlx5_ib_qp *to_mibqp(struct mlx5_core_qp *mqp)
return container_of(mqp, struct mlx5_ib_qp_base, mqp)->container_mibqp;
}
-static inline struct mlx5_ib_mr *to_mibmr(struct mlx5_core_mr *mmr)
+static inline struct mlx5_ib_mr *to_mibmr(struct mlx5_core_mkey *mmkey)
{
- return container_of(mmr, struct mlx5_ib_mr, mmr);
+ return container_of(mmkey, struct mlx5_ib_mr, mmkey);
}
static inline struct mlx5_ib_pd *to_mpd(struct ib_pd *ibpd)
@@ -588,6 +635,11 @@ static inline struct mlx5_ib_mr *to_mmr(struct ib_mr *ibmr)
return container_of(ibmr, struct mlx5_ib_mr, ibmr);
}
+static inline struct mlx5_ib_mw *to_mmw(struct ib_mw *ibmw)
+{
+ return container_of(ibmw, struct mlx5_ib_mw, ibmw);
+}
+
struct mlx5_ib_ah {
struct ib_ah ibah;
struct mlx5_av av;
@@ -648,8 +700,14 @@ struct ib_mr *mlx5_ib_get_dma_mr(struct ib_pd *pd, int acc);
struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
u64 virt_addr, int access_flags,
struct ib_udata *udata);
+struct ib_mw *mlx5_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
+ struct ib_udata *udata);
+int mlx5_ib_dealloc_mw(struct ib_mw *mw);
int mlx5_ib_update_mtt(struct mlx5_ib_mr *mr, u64 start_page_index,
int npages, int zap);
+int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
+ u64 length, u64 virt_addr, int access_flags,
+ struct ib_pd *pd, struct ib_udata *udata);
int mlx5_ib_dereg_mr(struct ib_mr *ibmr);
struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd,
enum ib_mr_type mr_type,
@@ -700,7 +758,6 @@ int mlx5_ib_get_cqe_size(struct mlx5_ib_dev *dev, struct ib_cq *ibcq);
int mlx5_mr_cache_init(struct mlx5_ib_dev *dev);
int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev);
int mlx5_mr_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift);
-void mlx5_umr_cq_handler(struct ib_cq *cq, void *cq_context);
int mlx5_ib_check_mr_status(struct ib_mr *ibmr, u32 check_mask,
struct ib_mr_status *mr_status);
@@ -719,7 +776,6 @@ void mlx5_ib_qp_disable_pagefaults(struct mlx5_ib_qp *qp);
void mlx5_ib_qp_enable_pagefaults(struct mlx5_ib_qp *qp);
void mlx5_ib_invalidate_range(struct ib_umem *umem, unsigned long start,
unsigned long end);
-
#else /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
static inline void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev)
{
@@ -736,9 +792,35 @@ static inline void mlx5_ib_qp_enable_pagefaults(struct mlx5_ib_qp *qp) {}
#endif /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
+int mlx5_ib_get_vf_config(struct ib_device *device, int vf,
+ u8 port, struct ifla_vf_info *info);
+int mlx5_ib_set_vf_link_state(struct ib_device *device, int vf,
+ u8 port, int state);
+int mlx5_ib_get_vf_stats(struct ib_device *device, int vf,
+ u8 port, struct ifla_vf_stats *stats);
+int mlx5_ib_set_vf_guid(struct ib_device *device, int vf, u8 port,
+ u64 guid, int type);
+
__be16 mlx5_get_roce_udp_sport(struct mlx5_ib_dev *dev, u8 port_num,
int index);
+/* GSI QP helper functions */
+struct ib_qp *mlx5_ib_gsi_create_qp(struct ib_pd *pd,
+ struct ib_qp_init_attr *init_attr);
+int mlx5_ib_gsi_destroy_qp(struct ib_qp *qp);
+int mlx5_ib_gsi_modify_qp(struct ib_qp *qp, struct ib_qp_attr *attr,
+ int attr_mask);
+int mlx5_ib_gsi_query_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
+ int qp_attr_mask,
+ struct ib_qp_init_attr *qp_init_attr);
+int mlx5_ib_gsi_post_send(struct ib_qp *qp, struct ib_send_wr *wr,
+ struct ib_send_wr **bad_wr);
+int mlx5_ib_gsi_post_recv(struct ib_qp *qp, struct ib_recv_wr *wr,
+ struct ib_recv_wr **bad_wr);
+void mlx5_ib_gsi_pkey_change(struct mlx5_ib_gsi_qp *gsi);
+
+int mlx5_ib_generate_wc(struct ib_cq *ibcq, struct ib_wc *wc);
+
static inline void init_query_mad(struct ib_smp *mad)
{
mad->base_version = 1;
@@ -758,7 +840,7 @@ static inline u8 convert_access(int acc)
static inline int is_qp1(enum ib_qp_type qp_type)
{
- return qp_type == IB_QPT_GSI;
+ return qp_type == MLX5_IB_QPT_HW_GSI;
}
#define MLX5_MAX_UMR_SHIFT 16
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index 6000f7aeede9..4d5bff151cdf 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -40,6 +40,7 @@
#include <rdma/ib_umem_odp.h>
#include <rdma/ib_verbs.h>
#include "mlx5_ib.h"
+#include "user.h"
enum {
MAX_PENDING_REG_MR = 8,
@@ -57,7 +58,7 @@ static int clean_mr(struct mlx5_ib_mr *mr);
static int destroy_mkey(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
{
- int err = mlx5_core_destroy_mkey(dev->mdev, &mr->mmr);
+ int err = mlx5_core_destroy_mkey(dev->mdev, &mr->mmkey);
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
/* Wait until all page fault handlers using the mr complete. */
@@ -77,6 +78,40 @@ static int order2idx(struct mlx5_ib_dev *dev, int order)
return order - cache->ent[0].order;
}
+static bool use_umr_mtt_update(struct mlx5_ib_mr *mr, u64 start, u64 length)
+{
+ return ((u64)1 << mr->order) * MLX5_ADAPTER_PAGE_SIZE >=
+ length + (start & (MLX5_ADAPTER_PAGE_SIZE - 1));
+}
+
+#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
+static void update_odp_mr(struct mlx5_ib_mr *mr)
+{
+ if (mr->umem->odp_data) {
+ /*
+ * This barrier prevents the compiler from moving the
+ * setting of umem->odp_data->private to point to our
+ * MR, before reg_umr finished, to ensure that the MR
+ * initialization have finished before starting to
+ * handle invalidations.
+ */
+ smp_wmb();
+ mr->umem->odp_data->private = mr;
+ /*
+ * Make sure we will see the new
+ * umem->odp_data->private value in the invalidation
+ * routines, before we can get page faults on the
+ * MR. Page faults can happen once we put the MR in
+ * the tree, below this line. Without the barrier,
+ * there can be a fault handling and an invalidation
+ * before umem->odp_data->private == mr is visible to
+ * the invalidation handler.
+ */
+ smp_wmb();
+ }
+}
+#endif
+
static void reg_mr_callback(int status, void *context)
{
struct mlx5_ib_mr *mr = context;
@@ -86,7 +121,7 @@ static void reg_mr_callback(int status, void *context)
struct mlx5_cache_ent *ent = &cache->ent[c];
u8 key;
unsigned long flags;
- struct mlx5_mr_table *table = &dev->mdev->priv.mr_table;
+ struct mlx5_mkey_table *table = &dev->mdev->priv.mkey_table;
int err;
spin_lock_irqsave(&ent->lock, flags);
@@ -113,7 +148,7 @@ static void reg_mr_callback(int status, void *context)
spin_lock_irqsave(&dev->mdev->priv.mkey_lock, flags);
key = dev->mdev->priv.mkey_key++;
spin_unlock_irqrestore(&dev->mdev->priv.mkey_lock, flags);
- mr->mmr.key = mlx5_idx_to_mkey(be32_to_cpu(mr->out.mkey) & 0xffffff) | key;
+ mr->mmkey.key = mlx5_idx_to_mkey(be32_to_cpu(mr->out.mkey) & 0xffffff) | key;
cache->last_add = jiffies;
@@ -124,10 +159,10 @@ static void reg_mr_callback(int status, void *context)
spin_unlock_irqrestore(&ent->lock, flags);
write_lock_irqsave(&table->lock, flags);
- err = radix_tree_insert(&table->tree, mlx5_base_mkey(mr->mmr.key),
- &mr->mmr);
+ err = radix_tree_insert(&table->tree, mlx5_base_mkey(mr->mmkey.key),
+ &mr->mmkey);
if (err)
- pr_err("Error inserting to mr tree. 0x%x\n", -err);
+ pr_err("Error inserting to mkey tree. 0x%x\n", -err);
write_unlock_irqrestore(&table->lock, flags);
}
@@ -168,7 +203,7 @@ static int add_keys(struct mlx5_ib_dev *dev, int c, int num)
spin_lock_irq(&ent->lock);
ent->pending++;
spin_unlock_irq(&ent->lock);
- err = mlx5_core_create_mkey(dev->mdev, &mr->mmr, in,
+ err = mlx5_core_create_mkey(dev->mdev, &mr->mmkey, in,
sizeof(*in), reg_mr_callback,
mr, &mr->out);
if (err) {
@@ -657,14 +692,14 @@ struct ib_mr *mlx5_ib_get_dma_mr(struct ib_pd *pd, int acc)
seg->qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
seg->start_addr = 0;
- err = mlx5_core_create_mkey(mdev, &mr->mmr, in, sizeof(*in), NULL, NULL,
+ err = mlx5_core_create_mkey(mdev, &mr->mmkey, in, sizeof(*in), NULL, NULL,
NULL);
if (err)
goto err_in;
kfree(in);
- mr->ibmr.lkey = mr->mmr.key;
- mr->ibmr.rkey = mr->mmr.key;
+ mr->ibmr.lkey = mr->mmkey.key;
+ mr->ibmr.rkey = mr->mmkey.key;
mr->umem = NULL;
return &mr->ibmr;
@@ -693,10 +728,40 @@ static int use_umr(int order)
return order <= MLX5_MAX_UMR_SHIFT;
}
-static void prep_umr_reg_wqe(struct ib_pd *pd, struct ib_send_wr *wr,
- struct ib_sge *sg, u64 dma, int n, u32 key,
- int page_shift, u64 virt_addr, u64 len,
- int access_flags)
+static int dma_map_mr_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem,
+ int npages, int page_shift, int *size,
+ __be64 **mr_pas, dma_addr_t *dma)
+{
+ __be64 *pas;
+ struct device *ddev = dev->ib_dev.dma_device;
+
+ /*
+ * UMR copies MTTs in units of MLX5_UMR_MTT_ALIGNMENT bytes.
+ * To avoid copying garbage after the pas array, we allocate
+ * a little more.
+ */
+ *size = ALIGN(sizeof(u64) * npages, MLX5_UMR_MTT_ALIGNMENT);
+ *mr_pas = kmalloc(*size + MLX5_UMR_ALIGN - 1, GFP_KERNEL);
+ if (!(*mr_pas))
+ return -ENOMEM;
+
+ pas = PTR_ALIGN(*mr_pas, MLX5_UMR_ALIGN);
+ mlx5_ib_populate_pas(dev, umem, page_shift, pas, MLX5_IB_MTT_PRESENT);
+ /* Clear padding after the actual pages. */
+ memset(pas + npages, 0, *size - npages * sizeof(u64));
+
+ *dma = dma_map_single(ddev, pas, *size, DMA_TO_DEVICE);
+ if (dma_mapping_error(ddev, *dma)) {
+ kfree(*mr_pas);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void prep_umr_wqe_common(struct ib_pd *pd, struct ib_send_wr *wr,
+ struct ib_sge *sg, u64 dma, int n, u32 key,
+ int page_shift)
{
struct mlx5_ib_dev *dev = to_mdev(pd->device);
struct mlx5_umr_wr *umrwr = umr_wr(wr);
@@ -706,7 +771,6 @@ static void prep_umr_reg_wqe(struct ib_pd *pd, struct ib_send_wr *wr,
sg->lkey = dev->umrc.pd->local_dma_lkey;
wr->next = NULL;
- wr->send_flags = 0;
wr->sg_list = sg;
if (n)
wr->num_sge = 1;
@@ -718,6 +782,19 @@ static void prep_umr_reg_wqe(struct ib_pd *pd, struct ib_send_wr *wr,
umrwr->npages = n;
umrwr->page_shift = page_shift;
umrwr->mkey = key;
+}
+
+static void prep_umr_reg_wqe(struct ib_pd *pd, struct ib_send_wr *wr,
+ struct ib_sge *sg, u64 dma, int n, u32 key,
+ int page_shift, u64 virt_addr, u64 len,
+ int access_flags)
+{
+ struct mlx5_umr_wr *umrwr = umr_wr(wr);
+
+ prep_umr_wqe_common(pd, wr, sg, dma, n, key, page_shift);
+
+ wr->send_flags = 0;
+
umrwr->target.virt_addr = virt_addr;
umrwr->length = len;
umrwr->access_flags = access_flags;
@@ -734,26 +811,45 @@ static void prep_umr_unreg_wqe(struct mlx5_ib_dev *dev,
umrwr->mkey = key;
}
-void mlx5_umr_cq_handler(struct ib_cq *cq, void *cq_context)
+static struct ib_umem *mr_umem_get(struct ib_pd *pd, u64 start, u64 length,
+ int access_flags, int *npages,
+ int *page_shift, int *ncont, int *order)
{
- struct mlx5_ib_umr_context *context;
- struct ib_wc wc;
- int err;
-
- while (1) {
- err = ib_poll_cq(cq, 1, &wc);
- if (err < 0) {
- pr_warn("poll cq error %d\n", err);
- return;
- }
- if (err == 0)
- break;
+ struct mlx5_ib_dev *dev = to_mdev(pd->device);
+ struct ib_umem *umem = ib_umem_get(pd->uobject->context, start, length,
+ access_flags, 0);
+ if (IS_ERR(umem)) {
+ mlx5_ib_err(dev, "umem get failed (%ld)\n", PTR_ERR(umem));
+ return (void *)umem;
+ }
- context = (struct mlx5_ib_umr_context *) (unsigned long) wc.wr_id;
- context->status = wc.status;
- complete(&context->done);
+ mlx5_ib_cont_pages(umem, start, npages, page_shift, ncont, order);
+ if (!*npages) {
+ mlx5_ib_warn(dev, "avoid zero region\n");
+ ib_umem_release(umem);
+ return ERR_PTR(-EINVAL);
}
- ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
+
+ mlx5_ib_dbg(dev, "npages %d, ncont %d, order %d, page_shift %d\n",
+ *npages, *ncont, *order, *page_shift);
+
+ return umem;
+}
+
+static void mlx5_ib_umr_done(struct ib_cq *cq, struct ib_wc *wc)
+{
+ struct mlx5_ib_umr_context *context =
+ container_of(wc->wr_cqe, struct mlx5_ib_umr_context, cqe);
+
+ context->status = wc->status;
+ complete(&context->done);
+}
+
+static inline void mlx5_ib_init_umr_context(struct mlx5_ib_umr_context *context)
+{
+ context->cqe.done = mlx5_ib_umr_done;
+ context->status = -1;
+ init_completion(&context->done);
}
static struct mlx5_ib_mr *reg_umr(struct ib_pd *pd, struct ib_umem *umem,
@@ -764,13 +860,12 @@ static struct mlx5_ib_mr *reg_umr(struct ib_pd *pd, struct ib_umem *umem,
struct device *ddev = dev->ib_dev.dma_device;
struct umr_common *umrc = &dev->umrc;
struct mlx5_ib_umr_context umr_context;
- struct mlx5_umr_wr umrwr;
+ struct mlx5_umr_wr umrwr = {};
struct ib_send_wr *bad;
struct mlx5_ib_mr *mr;
struct ib_sge sg;
int size;
__be64 *mr_pas;
- __be64 *pas;
dma_addr_t dma;
int err = 0;
int i;
@@ -790,33 +885,17 @@ static struct mlx5_ib_mr *reg_umr(struct ib_pd *pd, struct ib_umem *umem,
if (!mr)
return ERR_PTR(-EAGAIN);
- /* UMR copies MTTs in units of MLX5_UMR_MTT_ALIGNMENT bytes.
- * To avoid copying garbage after the pas array, we allocate
- * a little more. */
- size = ALIGN(sizeof(u64) * npages, MLX5_UMR_MTT_ALIGNMENT);
- mr_pas = kmalloc(size + MLX5_UMR_ALIGN - 1, GFP_KERNEL);
- if (!mr_pas) {
- err = -ENOMEM;
+ err = dma_map_mr_pas(dev, umem, npages, page_shift, &size, &mr_pas,
+ &dma);
+ if (err)
goto free_mr;
- }
- pas = PTR_ALIGN(mr_pas, MLX5_UMR_ALIGN);
- mlx5_ib_populate_pas(dev, umem, page_shift, pas, MLX5_IB_MTT_PRESENT);
- /* Clear padding after the actual pages. */
- memset(pas + npages, 0, size - npages * sizeof(u64));
-
- dma = dma_map_single(ddev, pas, size, DMA_TO_DEVICE);
- if (dma_mapping_error(ddev, dma)) {
- err = -ENOMEM;
- goto free_pas;
- }
+ mlx5_ib_init_umr_context(&umr_context);
- memset(&umrwr, 0, sizeof(umrwr));
- umrwr.wr.wr_id = (u64)(unsigned long)&umr_context;
- prep_umr_reg_wqe(pd, &umrwr.wr, &sg, dma, npages, mr->mmr.key,
+ umrwr.wr.wr_cqe = &umr_context.cqe;
+ prep_umr_reg_wqe(pd, &umrwr.wr, &sg, dma, npages, mr->mmkey.key,
page_shift, virt_addr, len, access_flags);
- mlx5_ib_init_umr_context(&umr_context);
down(&umrc->sem);
err = ib_post_send(umrc->qp, &umrwr.wr, &bad);
if (err) {
@@ -830,9 +909,9 @@ static struct mlx5_ib_mr *reg_umr(struct ib_pd *pd, struct ib_umem *umem,
}
}
- mr->mmr.iova = virt_addr;
- mr->mmr.size = len;
- mr->mmr.pd = to_mpd(pd)->pdn;
+ mr->mmkey.iova = virt_addr;
+ mr->mmkey.size = len;
+ mr->mmkey.pd = to_mpd(pd)->pdn;
mr->live = 1;
@@ -840,7 +919,6 @@ unmap_dma:
up(&umrc->sem);
dma_unmap_single(ddev, dma, size, DMA_TO_DEVICE);
-free_pas:
kfree(mr_pas);
free_mr:
@@ -929,8 +1007,10 @@ int mlx5_ib_update_mtt(struct mlx5_ib_mr *mr, u64 start_page_index, int npages,
dma_sync_single_for_device(ddev, dma, size, DMA_TO_DEVICE);
+ mlx5_ib_init_umr_context(&umr_context);
+
memset(&wr, 0, sizeof(wr));
- wr.wr.wr_id = (u64)(unsigned long)&umr_context;
+ wr.wr.wr_cqe = &umr_context.cqe;
sg.addr = dma;
sg.length = ALIGN(npages * sizeof(u64),
@@ -944,10 +1024,9 @@ int mlx5_ib_update_mtt(struct mlx5_ib_mr *mr, u64 start_page_index, int npages,
wr.wr.opcode = MLX5_IB_WR_UMR;
wr.npages = sg.length / sizeof(u64);
wr.page_shift = PAGE_SHIFT;
- wr.mkey = mr->mmr.key;
+ wr.mkey = mr->mmkey.key;
wr.target.offset = start_page_index;
- mlx5_ib_init_umr_context(&umr_context);
down(&umrc->sem);
err = ib_post_send(umrc->qp, &wr.wr, &bad);
if (err) {
@@ -974,10 +1053,14 @@ free_pas:
}
#endif
-static struct mlx5_ib_mr *reg_create(struct ib_pd *pd, u64 virt_addr,
- u64 length, struct ib_umem *umem,
- int npages, int page_shift,
- int access_flags)
+/*
+ * If ibmr is NULL it will be allocated by reg_create.
+ * Else, the given ibmr will be used.
+ */
+static struct mlx5_ib_mr *reg_create(struct ib_mr *ibmr, struct ib_pd *pd,
+ u64 virt_addr, u64 length,
+ struct ib_umem *umem, int npages,
+ int page_shift, int access_flags)
{
struct mlx5_ib_dev *dev = to_mdev(pd->device);
struct mlx5_create_mkey_mbox_in *in;
@@ -986,7 +1069,7 @@ static struct mlx5_ib_mr *reg_create(struct ib_pd *pd, u64 virt_addr,
int err;
bool pg_cap = !!(MLX5_CAP_GEN(dev->mdev, pg));
- mr = kzalloc(sizeof(*mr), GFP_KERNEL);
+ mr = ibmr ? to_mmr(ibmr) : kzalloc(sizeof(*mr), GFP_KERNEL);
if (!mr)
return ERR_PTR(-ENOMEM);
@@ -1013,7 +1096,7 @@ static struct mlx5_ib_mr *reg_create(struct ib_pd *pd, u64 virt_addr,
in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
in->xlat_oct_act_size = cpu_to_be32(get_octo_len(virt_addr, length,
1 << page_shift));
- err = mlx5_core_create_mkey(dev->mdev, &mr->mmr, in, inlen, NULL,
+ err = mlx5_core_create_mkey(dev->mdev, &mr->mmkey, in, inlen, NULL,
NULL, NULL);
if (err) {
mlx5_ib_warn(dev, "create mkey failed\n");
@@ -1024,7 +1107,7 @@ static struct mlx5_ib_mr *reg_create(struct ib_pd *pd, u64 virt_addr,
mr->live = 1;
kvfree(in);
- mlx5_ib_dbg(dev, "mkey = 0x%x\n", mr->mmr.key);
+ mlx5_ib_dbg(dev, "mkey = 0x%x\n", mr->mmkey.key);
return mr;
@@ -1032,11 +1115,23 @@ err_2:
kvfree(in);
err_1:
- kfree(mr);
+ if (!ibmr)
+ kfree(mr);
return ERR_PTR(err);
}
+static void set_mr_fileds(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr,
+ int npages, u64 length, int access_flags)
+{
+ mr->npages = npages;
+ atomic_add(npages, &dev->mdev->priv.reg_pages);
+ mr->ibmr.lkey = mr->mmkey.key;
+ mr->ibmr.rkey = mr->mmkey.key;
+ mr->ibmr.length = length;
+ mr->access_flags = access_flags;
+}
+
struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
u64 virt_addr, int access_flags,
struct ib_udata *udata)
@@ -1052,22 +1147,11 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
mlx5_ib_dbg(dev, "start 0x%llx, virt_addr 0x%llx, length 0x%llx, access_flags 0x%x\n",
start, virt_addr, length, access_flags);
- umem = ib_umem_get(pd->uobject->context, start, length, access_flags,
- 0);
- if (IS_ERR(umem)) {
- mlx5_ib_dbg(dev, "umem get failed (%ld)\n", PTR_ERR(umem));
- return (void *)umem;
- }
+ umem = mr_umem_get(pd, start, length, access_flags, &npages,
+ &page_shift, &ncont, &order);
- mlx5_ib_cont_pages(umem, start, &npages, &page_shift, &ncont, &order);
- if (!npages) {
- mlx5_ib_warn(dev, "avoid zero region\n");
- err = -EINVAL;
- goto error;
- }
-
- mlx5_ib_dbg(dev, "npages %d, ncont %d, order %d, page_shift %d\n",
- npages, ncont, order, page_shift);
+ if (IS_ERR(umem))
+ return (void *)umem;
if (use_umr(order)) {
mr = reg_umr(pd, umem, virt_addr, length, ncont, page_shift,
@@ -1083,45 +1167,21 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
}
if (!mr)
- mr = reg_create(pd, virt_addr, length, umem, ncont, page_shift,
- access_flags);
+ mr = reg_create(NULL, pd, virt_addr, length, umem, ncont,
+ page_shift, access_flags);
if (IS_ERR(mr)) {
err = PTR_ERR(mr);
goto error;
}
- mlx5_ib_dbg(dev, "mkey 0x%x\n", mr->mmr.key);
+ mlx5_ib_dbg(dev, "mkey 0x%x\n", mr->mmkey.key);
mr->umem = umem;
- mr->npages = npages;
- atomic_add(npages, &dev->mdev->priv.reg_pages);
- mr->ibmr.lkey = mr->mmr.key;
- mr->ibmr.rkey = mr->mmr.key;
+ set_mr_fileds(dev, mr, npages, length, access_flags);
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
- if (umem->odp_data) {
- /*
- * This barrier prevents the compiler from moving the
- * setting of umem->odp_data->private to point to our
- * MR, before reg_umr finished, to ensure that the MR
- * initialization have finished before starting to
- * handle invalidations.
- */
- smp_wmb();
- mr->umem->odp_data->private = mr;
- /*
- * Make sure we will see the new
- * umem->odp_data->private value in the invalidation
- * routines, before we can get page faults on the
- * MR. Page faults can happen once we put the MR in
- * the tree, below this line. Without the barrier,
- * there can be a fault handling and an invalidation
- * before umem->odp_data->private == mr is visible to
- * the invalidation handler.
- */
- smp_wmb();
- }
+ update_odp_mr(mr);
#endif
return &mr->ibmr;
@@ -1135,15 +1195,15 @@ static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
{
struct umr_common *umrc = &dev->umrc;
struct mlx5_ib_umr_context umr_context;
- struct mlx5_umr_wr umrwr;
+ struct mlx5_umr_wr umrwr = {};
struct ib_send_wr *bad;
int err;
- memset(&umrwr.wr, 0, sizeof(umrwr));
- umrwr.wr.wr_id = (u64)(unsigned long)&umr_context;
- prep_umr_unreg_wqe(dev, &umrwr.wr, mr->mmr.key);
-
mlx5_ib_init_umr_context(&umr_context);
+
+ umrwr.wr.wr_cqe = &umr_context.cqe;
+ prep_umr_unreg_wqe(dev, &umrwr.wr, mr->mmkey.key);
+
down(&umrc->sem);
err = ib_post_send(umrc->qp, &umrwr.wr, &bad);
if (err) {
@@ -1165,6 +1225,167 @@ error:
return err;
}
+static int rereg_umr(struct ib_pd *pd, struct mlx5_ib_mr *mr, u64 virt_addr,
+ u64 length, int npages, int page_shift, int order,
+ int access_flags, int flags)
+{
+ struct mlx5_ib_dev *dev = to_mdev(pd->device);
+ struct device *ddev = dev->ib_dev.dma_device;
+ struct mlx5_ib_umr_context umr_context;
+ struct ib_send_wr *bad;
+ struct mlx5_umr_wr umrwr = {};
+ struct ib_sge sg;
+ struct umr_common *umrc = &dev->umrc;
+ dma_addr_t dma = 0;
+ __be64 *mr_pas = NULL;
+ int size;
+ int err;
+
+ mlx5_ib_init_umr_context(&umr_context);
+
+ umrwr.wr.wr_cqe = &umr_context.cqe;
+ umrwr.wr.send_flags = MLX5_IB_SEND_UMR_FAIL_IF_FREE;
+
+ if (flags & IB_MR_REREG_TRANS) {
+ err = dma_map_mr_pas(dev, mr->umem, npages, page_shift, &size,
+ &mr_pas, &dma);
+ if (err)
+ return err;
+
+ umrwr.target.virt_addr = virt_addr;
+ umrwr.length = length;
+ umrwr.wr.send_flags |= MLX5_IB_SEND_UMR_UPDATE_TRANSLATION;
+ }
+
+ prep_umr_wqe_common(pd, &umrwr.wr, &sg, dma, npages, mr->mmkey.key,
+ page_shift);
+
+ if (flags & IB_MR_REREG_PD) {
+ umrwr.pd = pd;
+ umrwr.wr.send_flags |= MLX5_IB_SEND_UMR_UPDATE_PD;
+ }
+
+ if (flags & IB_MR_REREG_ACCESS) {
+ umrwr.access_flags = access_flags;
+ umrwr.wr.send_flags |= MLX5_IB_SEND_UMR_UPDATE_ACCESS;
+ }
+
+ /* post send request to UMR QP */
+ down(&umrc->sem);
+ err = ib_post_send(umrc->qp, &umrwr.wr, &bad);
+
+ if (err) {
+ mlx5_ib_warn(dev, "post send failed, err %d\n", err);
+ } else {
+ wait_for_completion(&umr_context.done);
+ if (umr_context.status != IB_WC_SUCCESS) {
+ mlx5_ib_warn(dev, "reg umr failed (%u)\n",
+ umr_context.status);
+ err = -EFAULT;
+ }
+ }
+
+ up(&umrc->sem);
+ if (flags & IB_MR_REREG_TRANS) {
+ dma_unmap_single(ddev, dma, size, DMA_TO_DEVICE);
+ kfree(mr_pas);
+ }
+ return err;
+}
+
+int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
+ u64 length, u64 virt_addr, int new_access_flags,
+ struct ib_pd *new_pd, struct ib_udata *udata)
+{
+ struct mlx5_ib_dev *dev = to_mdev(ib_mr->device);
+ struct mlx5_ib_mr *mr = to_mmr(ib_mr);
+ struct ib_pd *pd = (flags & IB_MR_REREG_PD) ? new_pd : ib_mr->pd;
+ int access_flags = flags & IB_MR_REREG_ACCESS ?
+ new_access_flags :
+ mr->access_flags;
+ u64 addr = (flags & IB_MR_REREG_TRANS) ? virt_addr : mr->umem->address;
+ u64 len = (flags & IB_MR_REREG_TRANS) ? length : mr->umem->length;
+ int page_shift = 0;
+ int npages = 0;
+ int ncont = 0;
+ int order = 0;
+ int err;
+
+ mlx5_ib_dbg(dev, "start 0x%llx, virt_addr 0x%llx, length 0x%llx, access_flags 0x%x\n",
+ start, virt_addr, length, access_flags);
+
+ if (flags != IB_MR_REREG_PD) {
+ /*
+ * Replace umem. This needs to be done whether or not UMR is
+ * used.
+ */
+ flags |= IB_MR_REREG_TRANS;
+ ib_umem_release(mr->umem);
+ mr->umem = mr_umem_get(pd, addr, len, access_flags, &npages,
+ &page_shift, &ncont, &order);
+ if (IS_ERR(mr->umem)) {
+ err = PTR_ERR(mr->umem);
+ mr->umem = NULL;
+ return err;
+ }
+ }
+
+ if (flags & IB_MR_REREG_TRANS && !use_umr_mtt_update(mr, addr, len)) {
+ /*
+ * UMR can't be used - MKey needs to be replaced.
+ */
+ if (mr->umred) {
+ err = unreg_umr(dev, mr);
+ if (err)
+ mlx5_ib_warn(dev, "Failed to unregister MR\n");
+ } else {
+ err = destroy_mkey(dev, mr);
+ if (err)
+ mlx5_ib_warn(dev, "Failed to destroy MKey\n");
+ }
+ if (err)
+ return err;
+
+ mr = reg_create(ib_mr, pd, addr, len, mr->umem, ncont,
+ page_shift, access_flags);
+
+ if (IS_ERR(mr))
+ return PTR_ERR(mr);
+
+ mr->umred = 0;
+ } else {
+ /*
+ * Send a UMR WQE
+ */
+ err = rereg_umr(pd, mr, addr, len, npages, page_shift,
+ order, access_flags, flags);
+ if (err) {
+ mlx5_ib_warn(dev, "Failed to rereg UMR\n");
+ return err;
+ }
+ }
+
+ if (flags & IB_MR_REREG_PD) {
+ ib_mr->pd = pd;
+ mr->mmkey.pd = to_mpd(pd)->pdn;
+ }
+
+ if (flags & IB_MR_REREG_ACCESS)
+ mr->access_flags = access_flags;
+
+ if (flags & IB_MR_REREG_TRANS) {
+ atomic_sub(mr->npages, &dev->mdev->priv.reg_pages);
+ set_mr_fileds(dev, mr, npages, len, access_flags);
+ mr->mmkey.iova = addr;
+ mr->mmkey.size = len;
+ }
+#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
+ update_odp_mr(mr);
+#endif
+
+ return 0;
+}
+
static int
mlx5_alloc_priv_descs(struct ib_device *device,
struct mlx5_ib_mr *mr,
@@ -1236,7 +1457,7 @@ static int clean_mr(struct mlx5_ib_mr *mr)
err = destroy_mkey(dev, mr);
if (err) {
mlx5_ib_warn(dev, "failed to destroy mkey 0x%x (%d)\n",
- mr->mmr.key, err);
+ mr->mmkey.key, err);
return err;
}
} else {
@@ -1300,8 +1521,8 @@ struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd,
struct mlx5_ib_dev *dev = to_mdev(pd->device);
struct mlx5_create_mkey_mbox_in *in;
struct mlx5_ib_mr *mr;
- int access_mode, err;
- int ndescs = roundup(max_num_sg, 4);
+ int ndescs = ALIGN(max_num_sg, 4);
+ int err;
mr = kzalloc(sizeof(*mr), GFP_KERNEL);
if (!mr)
@@ -1319,7 +1540,7 @@ struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd,
in->seg.flags_pd = cpu_to_be32(to_mpd(pd)->pdn);
if (mr_type == IB_MR_TYPE_MEM_REG) {
- access_mode = MLX5_ACCESS_MODE_MTT;
+ mr->access_mode = MLX5_ACCESS_MODE_MTT;
in->seg.log2_page_size = PAGE_SHIFT;
err = mlx5_alloc_priv_descs(pd->device, mr,
@@ -1329,6 +1550,15 @@ struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd,
mr->desc_size = sizeof(u64);
mr->max_descs = ndescs;
+ } else if (mr_type == IB_MR_TYPE_SG_GAPS) {
+ mr->access_mode = MLX5_ACCESS_MODE_KLM;
+
+ err = mlx5_alloc_priv_descs(pd->device, mr,
+ ndescs, sizeof(struct mlx5_klm));
+ if (err)
+ goto err_free_in;
+ mr->desc_size = sizeof(struct mlx5_klm);
+ mr->max_descs = ndescs;
} else if (mr_type == IB_MR_TYPE_SIGNATURE) {
u32 psv_index[2];
@@ -1347,7 +1577,7 @@ struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd,
if (err)
goto err_free_sig;
- access_mode = MLX5_ACCESS_MODE_KLM;
+ mr->access_mode = MLX5_ACCESS_MODE_KLM;
mr->sig->psv_memory.psv_idx = psv_index[0];
mr->sig->psv_wire.psv_idx = psv_index[1];
@@ -1361,14 +1591,14 @@ struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd,
goto err_free_in;
}
- in->seg.flags = MLX5_PERM_UMR_EN | access_mode;
- err = mlx5_core_create_mkey(dev->mdev, &mr->mmr, in, sizeof(*in),
+ in->seg.flags = MLX5_PERM_UMR_EN | mr->access_mode;
+ err = mlx5_core_create_mkey(dev->mdev, &mr->mmkey, in, sizeof(*in),
NULL, NULL, NULL);
if (err)
goto err_destroy_psv;
- mr->ibmr.lkey = mr->mmr.key;
- mr->ibmr.rkey = mr->mmr.key;
+ mr->ibmr.lkey = mr->mmkey.key;
+ mr->ibmr.rkey = mr->mmkey.key;
mr->umem = NULL;
kfree(in);
@@ -1395,6 +1625,88 @@ err_free:
return ERR_PTR(err);
}
+struct ib_mw *mlx5_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
+ struct ib_udata *udata)
+{
+ struct mlx5_ib_dev *dev = to_mdev(pd->device);
+ struct mlx5_create_mkey_mbox_in *in = NULL;
+ struct mlx5_ib_mw *mw = NULL;
+ int ndescs;
+ int err;
+ struct mlx5_ib_alloc_mw req = {};
+ struct {
+ __u32 comp_mask;
+ __u32 response_length;
+ } resp = {};
+
+ err = ib_copy_from_udata(&req, udata, min(udata->inlen, sizeof(req)));
+ if (err)
+ return ERR_PTR(err);
+
+ if (req.comp_mask || req.reserved1 || req.reserved2)
+ return ERR_PTR(-EOPNOTSUPP);
+
+ if (udata->inlen > sizeof(req) &&
+ !ib_is_udata_cleared(udata, sizeof(req),
+ udata->inlen - sizeof(req)))
+ return ERR_PTR(-EOPNOTSUPP);
+
+ ndescs = req.num_klms ? roundup(req.num_klms, 4) : roundup(1, 4);
+
+ mw = kzalloc(sizeof(*mw), GFP_KERNEL);
+ in = kzalloc(sizeof(*in), GFP_KERNEL);
+ if (!mw || !in) {
+ err = -ENOMEM;
+ goto free;
+ }
+
+ in->seg.status = MLX5_MKEY_STATUS_FREE;
+ in->seg.xlt_oct_size = cpu_to_be32(ndescs);
+ in->seg.flags_pd = cpu_to_be32(to_mpd(pd)->pdn);
+ in->seg.flags = MLX5_PERM_UMR_EN | MLX5_ACCESS_MODE_KLM |
+ MLX5_PERM_LOCAL_READ;
+ if (type == IB_MW_TYPE_2)
+ in->seg.flags_pd |= cpu_to_be32(MLX5_MKEY_REMOTE_INVAL);
+ in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
+
+ err = mlx5_core_create_mkey(dev->mdev, &mw->mmkey, in, sizeof(*in),
+ NULL, NULL, NULL);
+ if (err)
+ goto free;
+
+ mw->ibmw.rkey = mw->mmkey.key;
+
+ resp.response_length = min(offsetof(typeof(resp), response_length) +
+ sizeof(resp.response_length), udata->outlen);
+ if (resp.response_length) {
+ err = ib_copy_to_udata(udata, &resp, resp.response_length);
+ if (err) {
+ mlx5_core_destroy_mkey(dev->mdev, &mw->mmkey);
+ goto free;
+ }
+ }
+
+ kfree(in);
+ return &mw->ibmw;
+
+free:
+ kfree(mw);
+ kfree(in);
+ return ERR_PTR(err);
+}
+
+int mlx5_ib_dealloc_mw(struct ib_mw *mw)
+{
+ struct mlx5_ib_mw *mmw = to_mmw(mw);
+ int err;
+
+ err = mlx5_core_destroy_mkey((to_mdev(mw->device))->mdev,
+ &mmw->mmkey);
+ if (!err)
+ kfree(mmw);
+ return err;
+}
+
int mlx5_ib_check_mr_status(struct ib_mr *ibmr, u32 check_mask,
struct ib_mr_status *mr_status)
{
@@ -1436,6 +1748,32 @@ done:
return ret;
}
+static int
+mlx5_ib_sg_to_klms(struct mlx5_ib_mr *mr,
+ struct scatterlist *sgl,
+ unsigned short sg_nents)
+{
+ struct scatterlist *sg = sgl;
+ struct mlx5_klm *klms = mr->descs;
+ u32 lkey = mr->ibmr.pd->local_dma_lkey;
+ int i;
+
+ mr->ibmr.iova = sg_dma_address(sg);
+ mr->ibmr.length = 0;
+ mr->ndescs = sg_nents;
+
+ for_each_sg(sgl, sg, sg_nents, i) {
+ if (unlikely(i > mr->max_descs))
+ break;
+ klms[i].va = cpu_to_be64(sg_dma_address(sg));
+ klms[i].bcount = cpu_to_be32(sg_dma_len(sg));
+ klms[i].key = cpu_to_be32(lkey);
+ mr->ibmr.length += sg_dma_len(sg);
+ }
+
+ return i;
+}
+
static int mlx5_set_page(struct ib_mr *ibmr, u64 addr)
{
struct mlx5_ib_mr *mr = to_mmr(ibmr);
@@ -1463,7 +1801,10 @@ int mlx5_ib_map_mr_sg(struct ib_mr *ibmr,
mr->desc_size * mr->max_descs,
DMA_TO_DEVICE);
- n = ib_sg_to_pages(ibmr, sg, sg_nents, mlx5_set_page);
+ if (mr->access_mode == MLX5_ACCESS_MODE_KLM)
+ n = mlx5_ib_sg_to_klms(mr, sg, sg_nents);
+ else
+ n = ib_sg_to_pages(ibmr, sg, sg_nents, mlx5_set_page);
ib_dma_sync_single_for_device(ibmr->device, mr->desc_map,
mr->desc_size * mr->max_descs,
diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c
index b8d76361a48d..34e79e709c67 100644
--- a/drivers/infiniband/hw/mlx5/odp.c
+++ b/drivers/infiniband/hw/mlx5/odp.c
@@ -142,13 +142,13 @@ static struct mlx5_ib_mr *mlx5_ib_odp_find_mr_lkey(struct mlx5_ib_dev *dev,
u32 key)
{
u32 base_key = mlx5_base_mkey(key);
- struct mlx5_core_mr *mmr = __mlx5_mr_lookup(dev->mdev, base_key);
- struct mlx5_ib_mr *mr = container_of(mmr, struct mlx5_ib_mr, mmr);
+ struct mlx5_core_mkey *mmkey = __mlx5_mr_lookup(dev->mdev, base_key);
+ struct mlx5_ib_mr *mr = container_of(mmkey, struct mlx5_ib_mr, mmkey);
- if (!mmr || mmr->key != key || !mr->live)
+ if (!mmkey || mmkey->key != key || !mr->live)
return NULL;
- return container_of(mmr, struct mlx5_ib_mr, mmr);
+ return container_of(mmkey, struct mlx5_ib_mr, mmkey);
}
static void mlx5_ib_page_fault_resume(struct mlx5_ib_qp *qp,
@@ -232,7 +232,7 @@ static int pagefault_single_data_segment(struct mlx5_ib_qp *qp,
io_virt += pfault->mpfault.bytes_committed;
bcnt -= pfault->mpfault.bytes_committed;
- start_idx = (io_virt - (mr->mmr.iova & PAGE_MASK)) >> PAGE_SHIFT;
+ start_idx = (io_virt - (mr->mmkey.iova & PAGE_MASK)) >> PAGE_SHIFT;
if (mr->umem->writable)
access_mask |= ODP_WRITE_ALLOWED_BIT;
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index 34cb8e87c7b8..8dee8bc1e0fe 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -58,6 +58,7 @@ enum {
static const u32 mlx5_ib_opcode[] = {
[IB_WR_SEND] = MLX5_OPCODE_SEND,
+ [IB_WR_LSO] = MLX5_OPCODE_LSO,
[IB_WR_SEND_WITH_IMM] = MLX5_OPCODE_SEND_IMM,
[IB_WR_RDMA_WRITE] = MLX5_OPCODE_RDMA_WRITE,
[IB_WR_RDMA_WRITE_WITH_IMM] = MLX5_OPCODE_RDMA_WRITE_IMM,
@@ -72,6 +73,9 @@ static const u32 mlx5_ib_opcode[] = {
[MLX5_IB_WR_UMR] = MLX5_OPCODE_UMR,
};
+struct mlx5_wqe_eth_pad {
+ u8 rsvd0[16];
+};
static int is_qp0(enum ib_qp_type qp_type)
{
@@ -260,11 +264,11 @@ static int set_rq_size(struct mlx5_ib_dev *dev, struct ib_qp_cap *cap,
return 0;
}
-static int sq_overhead(enum ib_qp_type qp_type)
+static int sq_overhead(struct ib_qp_init_attr *attr)
{
int size = 0;
- switch (qp_type) {
+ switch (attr->qp_type) {
case IB_QPT_XRC_INI:
size += sizeof(struct mlx5_wqe_xrc_seg);
/* fall through */
@@ -287,8 +291,12 @@ static int sq_overhead(enum ib_qp_type qp_type)
break;
case IB_QPT_UD:
+ if (attr->create_flags & IB_QP_CREATE_IPOIB_UD_LSO)
+ size += sizeof(struct mlx5_wqe_eth_pad) +
+ sizeof(struct mlx5_wqe_eth_seg);
+ /* fall through */
case IB_QPT_SMI:
- case IB_QPT_GSI:
+ case MLX5_IB_QPT_HW_GSI:
size += sizeof(struct mlx5_wqe_ctrl_seg) +
sizeof(struct mlx5_wqe_datagram_seg);
break;
@@ -311,7 +319,7 @@ static int calc_send_wqe(struct ib_qp_init_attr *attr)
int inl_size = 0;
int size;
- size = sq_overhead(attr->qp_type);
+ size = sq_overhead(attr);
if (size < 0)
return size;
@@ -348,8 +356,8 @@ static int calc_sq_size(struct mlx5_ib_dev *dev, struct ib_qp_init_attr *attr,
return -EINVAL;
}
- qp->max_inline_data = wqe_size - sq_overhead(attr->qp_type) -
- sizeof(struct mlx5_wqe_inline_seg);
+ qp->max_inline_data = wqe_size - sq_overhead(attr) -
+ sizeof(struct mlx5_wqe_inline_seg);
attr->cap.max_inline_data = qp->max_inline_data;
if (attr->create_flags & IB_QP_CREATE_SIGNATURE_EN)
@@ -590,7 +598,7 @@ static int to_mlx5_st(enum ib_qp_type type)
case IB_QPT_XRC_INI:
case IB_QPT_XRC_TGT: return MLX5_QP_ST_XRC;
case IB_QPT_SMI: return MLX5_QP_ST_QP0;
- case IB_QPT_GSI: return MLX5_QP_ST_QP1;
+ case MLX5_IB_QPT_HW_GSI: return MLX5_QP_ST_QP1;
case IB_QPT_RAW_IPV6: return MLX5_QP_ST_RAW_IPV6;
case IB_QPT_RAW_PACKET:
case IB_QPT_RAW_ETHERTYPE: return MLX5_QP_ST_RAW_ETHERTYPE;
@@ -783,7 +791,10 @@ static int create_kernel_qp(struct mlx5_ib_dev *dev,
int err;
uuari = &dev->mdev->priv.uuari;
- if (init_attr->create_flags & ~(IB_QP_CREATE_SIGNATURE_EN | IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK))
+ if (init_attr->create_flags & ~(IB_QP_CREATE_SIGNATURE_EN |
+ IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK |
+ IB_QP_CREATE_IPOIB_UD_LSO |
+ mlx5_ib_create_qp_sqpn_qp1()))
return -EINVAL;
if (init_attr->qp_type == MLX5_IB_QPT_REG_UMR)
@@ -828,6 +839,11 @@ static int create_kernel_qp(struct mlx5_ib_dev *dev,
(*in)->ctx.params1 |= cpu_to_be32(1 << 11);
(*in)->ctx.sq_crq_size |= cpu_to_be16(1 << 4);
+ if (init_attr->create_flags & mlx5_ib_create_qp_sqpn_qp1()) {
+ (*in)->ctx.deth_sqpn = cpu_to_be32(1);
+ qp->flags |= MLX5_IB_QP_SQPN_QP1;
+ }
+
mlx5_fill_page_array(&qp->buf, (*in)->pas);
err = mlx5_db_alloc(dev->mdev, &qp->db);
@@ -1228,6 +1244,14 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
if (init_attr->create_flags & IB_QP_CREATE_MANAGED_RECV)
qp->flags |= MLX5_IB_QP_MANAGED_RECV;
}
+
+ if (init_attr->qp_type == IB_QPT_UD &&
+ (init_attr->create_flags & IB_QP_CREATE_IPOIB_UD_LSO))
+ if (!MLX5_CAP_GEN(mdev, ipoib_basic_offloads)) {
+ mlx5_ib_dbg(dev, "ipoib UD lso qp isn't supported\n");
+ return -EOPNOTSUPP;
+ }
+
if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR)
qp->sq_signal_bits = MLX5_WQE_CTRL_CQ_UPDATE;
@@ -1271,6 +1295,11 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
ucmd.sq_wqe_count, max_wqes);
return -EINVAL;
}
+ if (init_attr->create_flags &
+ mlx5_ib_create_qp_sqpn_qp1()) {
+ mlx5_ib_dbg(dev, "user-space is not allowed to create UD QPs spoofing as QP1\n");
+ return -EINVAL;
+ }
err = create_user_qp(dev, pd, qp, udata, init_attr, &in,
&resp, &inlen, base);
if (err)
@@ -1385,6 +1414,13 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
/* 0xffffff means we ask to work with cqe version 0 */
MLX5_SET(qpc, qpc, user_index, uidx);
}
+ /* we use IB_QP_CREATE_IPOIB_UD_LSO to indicates ipoib qp */
+ if (init_attr->qp_type == IB_QPT_UD &&
+ (init_attr->create_flags & IB_QP_CREATE_IPOIB_UD_LSO)) {
+ qpc = MLX5_ADDR_OF(create_qp_in, in, qpc);
+ MLX5_SET(qpc, qpc, ulp_stateless_offload_mode, 1);
+ qp->flags |= MLX5_IB_QP_LSO;
+ }
if (init_attr->qp_type == IB_QPT_RAW_PACKET) {
qp->raw_packet_qp.sq.ubuffer.buf_addr = ucmd.sq_buf_addr;
@@ -1494,7 +1530,7 @@ static void get_cqs(struct mlx5_ib_qp *qp,
break;
case IB_QPT_SMI:
- case IB_QPT_GSI:
+ case MLX5_IB_QPT_HW_GSI:
case IB_QPT_RC:
case IB_QPT_UC:
case IB_QPT_UD:
@@ -1657,7 +1693,7 @@ struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd,
case IB_QPT_UC:
case IB_QPT_UD:
case IB_QPT_SMI:
- case IB_QPT_GSI:
+ case MLX5_IB_QPT_HW_GSI:
case MLX5_IB_QPT_REG_UMR:
qp = kzalloc(sizeof(*qp), GFP_KERNEL);
if (!qp)
@@ -1686,6 +1722,9 @@ struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd,
break;
+ case IB_QPT_GSI:
+ return mlx5_ib_gsi_create_qp(pd, init_attr);
+
case IB_QPT_RAW_IPV6:
case IB_QPT_RAW_ETHERTYPE:
case IB_QPT_MAX:
@@ -1704,6 +1743,9 @@ int mlx5_ib_destroy_qp(struct ib_qp *qp)
struct mlx5_ib_dev *dev = to_mdev(qp->device);
struct mlx5_ib_qp *mqp = to_mqp(qp);
+ if (unlikely(qp->qp_type == IB_QPT_GSI))
+ return mlx5_ib_gsi_destroy_qp(qp);
+
destroy_qp_common(dev, mqp);
kfree(mqp);
@@ -2161,8 +2203,10 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
context = &in->ctx;
err = to_mlx5_st(ibqp->qp_type);
- if (err < 0)
+ if (err < 0) {
+ mlx5_ib_dbg(dev, "unsupported qp type %d\n", ibqp->qp_type);
goto out;
+ }
context->flags = cpu_to_be32(err << 16);
@@ -2182,7 +2226,7 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
}
}
- if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_SMI) {
+ if (is_sqp(ibqp->qp_type)) {
context->mtu_msgmax = (IB_MTU_256 << 5) | 8;
} else if (ibqp->qp_type == IB_QPT_UD ||
ibqp->qp_type == MLX5_IB_QPT_REG_UMR) {
@@ -2284,6 +2328,8 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
if (!ibqp->uobject && cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT)
context->sq_crq_size |= cpu_to_be16(1 << 4);
+ if (qp->flags & MLX5_IB_QP_SQPN_QP1)
+ context->deth_sqpn = cpu_to_be32(1);
mlx5_cur = to_mlx5_state(cur_state);
mlx5_new = to_mlx5_state(new_state);
@@ -2363,11 +2409,18 @@ int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
{
struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
struct mlx5_ib_qp *qp = to_mqp(ibqp);
+ enum ib_qp_type qp_type;
enum ib_qp_state cur_state, new_state;
int err = -EINVAL;
int port;
enum rdma_link_layer ll = IB_LINK_LAYER_UNSPECIFIED;
+ if (unlikely(ibqp->qp_type == IB_QPT_GSI))
+ return mlx5_ib_gsi_modify_qp(ibqp, attr, attr_mask);
+
+ qp_type = (unlikely(ibqp->qp_type == MLX5_IB_QPT_HW_GSI)) ?
+ IB_QPT_GSI : ibqp->qp_type;
+
mutex_lock(&qp->mutex);
cur_state = attr_mask & IB_QP_CUR_STATE ? attr->cur_qp_state : qp->state;
@@ -2378,32 +2431,46 @@ int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
ll = dev->ib_dev.get_link_layer(&dev->ib_dev, port);
}
- if (ibqp->qp_type != MLX5_IB_QPT_REG_UMR &&
- !ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, attr_mask,
- ll))
+ if (qp_type != MLX5_IB_QPT_REG_UMR &&
+ !ib_modify_qp_is_ok(cur_state, new_state, qp_type, attr_mask, ll)) {
+ mlx5_ib_dbg(dev, "invalid QP state transition from %d to %d, qp_type %d, attr_mask 0x%x\n",
+ cur_state, new_state, ibqp->qp_type, attr_mask);
goto out;
+ }
if ((attr_mask & IB_QP_PORT) &&
(attr->port_num == 0 ||
- attr->port_num > MLX5_CAP_GEN(dev->mdev, num_ports)))
+ attr->port_num > MLX5_CAP_GEN(dev->mdev, num_ports))) {
+ mlx5_ib_dbg(dev, "invalid port number %d. number of ports is %d\n",
+ attr->port_num, dev->num_ports);
goto out;
+ }
if (attr_mask & IB_QP_PKEY_INDEX) {
port = attr_mask & IB_QP_PORT ? attr->port_num : qp->port;
if (attr->pkey_index >=
- dev->mdev->port_caps[port - 1].pkey_table_len)
+ dev->mdev->port_caps[port - 1].pkey_table_len) {
+ mlx5_ib_dbg(dev, "invalid pkey index %d\n",
+ attr->pkey_index);
goto out;
+ }
}
if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC &&
attr->max_rd_atomic >
- (1 << MLX5_CAP_GEN(dev->mdev, log_max_ra_res_qp)))
+ (1 << MLX5_CAP_GEN(dev->mdev, log_max_ra_res_qp))) {
+ mlx5_ib_dbg(dev, "invalid max_rd_atomic value %d\n",
+ attr->max_rd_atomic);
goto out;
+ }
if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC &&
attr->max_dest_rd_atomic >
- (1 << MLX5_CAP_GEN(dev->mdev, log_max_ra_req_qp)))
+ (1 << MLX5_CAP_GEN(dev->mdev, log_max_ra_req_qp))) {
+ mlx5_ib_dbg(dev, "invalid max_dest_rd_atomic value %d\n",
+ attr->max_dest_rd_atomic);
goto out;
+ }
if (cur_state == new_state && cur_state == IB_QPS_RESET) {
err = 0;
@@ -2442,6 +2509,59 @@ static __always_inline void set_raddr_seg(struct mlx5_wqe_raddr_seg *rseg,
rseg->reserved = 0;
}
+static void *set_eth_seg(struct mlx5_wqe_eth_seg *eseg,
+ struct ib_send_wr *wr, void *qend,
+ struct mlx5_ib_qp *qp, int *size)
+{
+ void *seg = eseg;
+
+ memset(eseg, 0, sizeof(struct mlx5_wqe_eth_seg));
+
+ if (wr->send_flags & IB_SEND_IP_CSUM)
+ eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM |
+ MLX5_ETH_WQE_L4_CSUM;
+
+ seg += sizeof(struct mlx5_wqe_eth_seg);
+ *size += sizeof(struct mlx5_wqe_eth_seg) / 16;
+
+ if (wr->opcode == IB_WR_LSO) {
+ struct ib_ud_wr *ud_wr = container_of(wr, struct ib_ud_wr, wr);
+ int size_of_inl_hdr_start = sizeof(eseg->inline_hdr_start);
+ u64 left, leftlen, copysz;
+ void *pdata = ud_wr->header;
+
+ left = ud_wr->hlen;
+ eseg->mss = cpu_to_be16(ud_wr->mss);
+ eseg->inline_hdr_sz = cpu_to_be16(left);
+
+ /*
+ * check if there is space till the end of queue, if yes,
+ * copy all in one shot, otherwise copy till the end of queue,
+ * rollback and than the copy the left
+ */
+ leftlen = qend - (void *)eseg->inline_hdr_start;
+ copysz = min_t(u64, leftlen, left);
+
+ memcpy(seg - size_of_inl_hdr_start, pdata, copysz);
+
+ if (likely(copysz > size_of_inl_hdr_start)) {
+ seg += ALIGN(copysz - size_of_inl_hdr_start, 16);
+ *size += ALIGN(copysz - size_of_inl_hdr_start, 16) / 16;
+ }
+
+ if (unlikely(copysz < left)) { /* the last wqe in the queue */
+ seg = mlx5_get_send_wqe(qp, 0);
+ left -= copysz;
+ pdata += copysz;
+ memcpy(seg, pdata, left);
+ seg += ALIGN(left, 16);
+ *size += ALIGN(left, 16) / 16;
+ }
+ }
+
+ return seg;
+}
+
static void set_datagram_seg(struct mlx5_wqe_datagram_seg *dseg,
struct ib_send_wr *wr)
{
@@ -2509,6 +2629,11 @@ static void set_reg_umr_seg(struct mlx5_wqe_umr_ctrl_seg *umr,
int ndescs = mr->ndescs;
memset(umr, 0, sizeof(*umr));
+
+ if (mr->access_mode == MLX5_ACCESS_MODE_KLM)
+ /* KLMs take twice the size of MTTs */
+ ndescs *= 2;
+
umr->flags = MLX5_UMR_CHECK_NOT_FREE;
umr->klm_octowords = get_klm_octo(ndescs);
umr->mkey_mask = frwr_mkey_mask();
@@ -2558,6 +2683,44 @@ static __be64 get_umr_update_mtt_mask(void)
return cpu_to_be64(result);
}
+static __be64 get_umr_update_translation_mask(void)
+{
+ u64 result;
+
+ result = MLX5_MKEY_MASK_LEN |
+ MLX5_MKEY_MASK_PAGE_SIZE |
+ MLX5_MKEY_MASK_START_ADDR |
+ MLX5_MKEY_MASK_KEY |
+ MLX5_MKEY_MASK_FREE;
+
+ return cpu_to_be64(result);
+}
+
+static __be64 get_umr_update_access_mask(void)
+{
+ u64 result;
+
+ result = MLX5_MKEY_MASK_LW |
+ MLX5_MKEY_MASK_RR |
+ MLX5_MKEY_MASK_RW |
+ MLX5_MKEY_MASK_A |
+ MLX5_MKEY_MASK_KEY |
+ MLX5_MKEY_MASK_FREE;
+
+ return cpu_to_be64(result);
+}
+
+static __be64 get_umr_update_pd_mask(void)
+{
+ u64 result;
+
+ result = MLX5_MKEY_MASK_PD |
+ MLX5_MKEY_MASK_KEY |
+ MLX5_MKEY_MASK_FREE;
+
+ return cpu_to_be64(result);
+}
+
static void set_reg_umr_segment(struct mlx5_wqe_umr_ctrl_seg *umr,
struct ib_send_wr *wr)
{
@@ -2576,9 +2739,15 @@ static void set_reg_umr_segment(struct mlx5_wqe_umr_ctrl_seg *umr,
umr->mkey_mask = get_umr_update_mtt_mask();
umr->bsf_octowords = get_klm_octo(umrwr->target.offset);
umr->flags |= MLX5_UMR_TRANSLATION_OFFSET_EN;
- } else {
- umr->mkey_mask = get_umr_reg_mr_mask();
}
+ if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_TRANSLATION)
+ umr->mkey_mask |= get_umr_update_translation_mask();
+ if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_ACCESS)
+ umr->mkey_mask |= get_umr_update_access_mask();
+ if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_PD)
+ umr->mkey_mask |= get_umr_update_pd_mask();
+ if (!umr->mkey_mask)
+ umr->mkey_mask = get_umr_reg_mr_mask();
} else {
umr->mkey_mask = get_umr_unreg_mr_mask();
}
@@ -2603,13 +2772,19 @@ static void set_reg_mkey_seg(struct mlx5_mkey_seg *seg,
int ndescs = ALIGN(mr->ndescs, 8) >> 1;
memset(seg, 0, sizeof(*seg));
- seg->flags = get_umr_flags(access) | MLX5_ACCESS_MODE_MTT;
+
+ if (mr->access_mode == MLX5_ACCESS_MODE_MTT)
+ seg->log2_page_size = ilog2(mr->ibmr.page_size);
+ else if (mr->access_mode == MLX5_ACCESS_MODE_KLM)
+ /* KLMs take twice the size of MTTs */
+ ndescs *= 2;
+
+ seg->flags = get_umr_flags(access) | mr->access_mode;
seg->qpn_mkey7_0 = cpu_to_be32((key & 0xff) | 0xffffff00);
seg->flags_pd = cpu_to_be32(MLX5_MKEY_REMOTE_INVAL);
seg->start_addr = cpu_to_be64(mr->ibmr.iova);
seg->len = cpu_to_be64(mr->ibmr.length);
seg->xlt_oct_size = cpu_to_be32(ndescs);
- seg->log2_page_size = ilog2(mr->ibmr.page_size);
}
static void set_linv_mkey_seg(struct mlx5_mkey_seg *seg)
@@ -2630,7 +2805,8 @@ static void set_reg_mkey_segment(struct mlx5_mkey_seg *seg, struct ib_send_wr *w
seg->flags = convert_access(umrwr->access_flags);
if (!(wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_MTT)) {
- seg->flags_pd = cpu_to_be32(to_mpd(umrwr->pd)->pdn);
+ if (umrwr->pd)
+ seg->flags_pd = cpu_to_be32(to_mpd(umrwr->pd)->pdn);
seg->start_addr = cpu_to_be64(umrwr->target.virt_addr);
}
seg->len = cpu_to_be64(umrwr->length);
@@ -3196,13 +3372,13 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
{
struct mlx5_wqe_ctrl_seg *ctrl = NULL; /* compiler warning */
struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
- struct mlx5_ib_qp *qp = to_mqp(ibqp);
+ struct mlx5_ib_qp *qp;
struct mlx5_ib_mr *mr;
struct mlx5_wqe_data_seg *dpseg;
struct mlx5_wqe_xrc_seg *xrc;
- struct mlx5_bf *bf = qp->bf;
+ struct mlx5_bf *bf;
int uninitialized_var(size);
- void *qend = qp->sq.qend;
+ void *qend;
unsigned long flags;
unsigned idx;
int err = 0;
@@ -3214,6 +3390,13 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
u8 next_fence = 0;
u8 fence;
+ if (unlikely(ibqp->qp_type == IB_QPT_GSI))
+ return mlx5_ib_gsi_post_send(ibqp, wr, bad_wr);
+
+ qp = to_mqp(ibqp);
+ bf = qp->bf;
+ qend = qp->sq.qend;
+
spin_lock_irqsave(&qp->sq.lock, flags);
for (nreq = 0; wr; nreq++, wr = wr->next) {
@@ -3373,16 +3556,37 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
}
break;
- case IB_QPT_UD:
case IB_QPT_SMI:
- case IB_QPT_GSI:
+ case MLX5_IB_QPT_HW_GSI:
set_datagram_seg(seg, wr);
seg += sizeof(struct mlx5_wqe_datagram_seg);
size += sizeof(struct mlx5_wqe_datagram_seg) / 16;
if (unlikely((seg == qend)))
seg = mlx5_get_send_wqe(qp, 0);
break;
+ case IB_QPT_UD:
+ set_datagram_seg(seg, wr);
+ seg += sizeof(struct mlx5_wqe_datagram_seg);
+ size += sizeof(struct mlx5_wqe_datagram_seg) / 16;
+
+ if (unlikely((seg == qend)))
+ seg = mlx5_get_send_wqe(qp, 0);
+
+ /* handle qp that supports ud offload */
+ if (qp->flags & IB_QP_CREATE_IPOIB_UD_LSO) {
+ struct mlx5_wqe_eth_pad *pad;
+
+ pad = seg;
+ memset(pad, 0, sizeof(struct mlx5_wqe_eth_pad));
+ seg += sizeof(struct mlx5_wqe_eth_pad);
+ size += sizeof(struct mlx5_wqe_eth_pad) / 16;
+ seg = set_eth_seg(seg, wr, qend, qp, &size);
+
+ if (unlikely((seg == qend)))
+ seg = mlx5_get_send_wqe(qp, 0);
+ }
+ break;
case MLX5_IB_QPT_REG_UMR:
if (wr->opcode != MLX5_IB_WR_UMR) {
err = -EINVAL;
@@ -3502,6 +3706,9 @@ int mlx5_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
int ind;
int i;
+ if (unlikely(ibqp->qp_type == IB_QPT_GSI))
+ return mlx5_ib_gsi_post_recv(ibqp, wr, bad_wr);
+
spin_lock_irqsave(&qp->rq.lock, flags);
ind = qp->rq.head & (qp->rq.wqe_cnt - 1);
@@ -3822,6 +4029,10 @@ int mlx5_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
int err = 0;
u8 raw_packet_qp_state;
+ if (unlikely(ibqp->qp_type == IB_QPT_GSI))
+ return mlx5_ib_gsi_query_qp(ibqp, qp_attr, qp_attr_mask,
+ qp_init_attr);
+
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
/*
* Wait for any outstanding page faults, in case the user frees memory
@@ -3874,6 +4085,8 @@ int mlx5_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
qp_init_attr->create_flags |= IB_QP_CREATE_MANAGED_SEND;
if (qp->flags & MLX5_IB_QP_MANAGED_RECV)
qp_init_attr->create_flags |= IB_QP_CREATE_MANAGED_RECV;
+ if (qp->flags & MLX5_IB_QP_SQPN_QP1)
+ qp_init_attr->create_flags |= mlx5_ib_create_qp_sqpn_qp1();
qp_init_attr->sq_sig_type = qp->sq_signal_bits & MLX5_WQE_CTRL_CQ_UPDATE ?
IB_SIGNAL_ALL_WR : IB_SIGNAL_REQ_WR;
diff --git a/drivers/infiniband/hw/mlx5/user.h b/drivers/infiniband/hw/mlx5/user.h
index b94a55404a59..61bc308bb802 100644
--- a/drivers/infiniband/hw/mlx5/user.h
+++ b/drivers/infiniband/hw/mlx5/user.h
@@ -152,6 +152,13 @@ struct mlx5_ib_create_qp_resp {
__u32 uuar_index;
};
+struct mlx5_ib_alloc_mw {
+ __u32 comp_mask;
+ __u8 num_klms;
+ __u8 reserved1;
+ __u16 reserved2;
+};
+
static inline int get_qp_user_index(struct mlx5_ib_ucontext *ucontext,
struct mlx5_ib_create_qp *ucmd,
int inlen,
diff --git a/drivers/infiniband/hw/mthca/mthca_memfree.c b/drivers/infiniband/hw/mthca/mthca_memfree.c
index 7d2e42dd6926..6c00d04b8b28 100644
--- a/drivers/infiniband/hw/mthca/mthca_memfree.c
+++ b/drivers/infiniband/hw/mthca/mthca_memfree.c
@@ -472,8 +472,7 @@ int mthca_map_user_db(struct mthca_dev *dev, struct mthca_uar *uar,
goto out;
}
- ret = get_user_pages(current, current->mm, uaddr & PAGE_MASK, 1, 1, 0,
- pages, NULL);
+ ret = get_user_pages(uaddr & PAGE_MASK, 1, 1, 0, pages, NULL);
if (ret < 0)
goto out;
diff --git a/drivers/infiniband/hw/nes/Kconfig b/drivers/infiniband/hw/nes/Kconfig
index 846dc97cf260..7964eba8e7ed 100644
--- a/drivers/infiniband/hw/nes/Kconfig
+++ b/drivers/infiniband/hw/nes/Kconfig
@@ -2,7 +2,6 @@ config INFINIBAND_NES
tristate "NetEffect RNIC Driver"
depends on PCI && INET && INFINIBAND
select LIBCRC32C
- select INET_LRO
---help---
This is the RDMA Network Interface Card (RNIC) driver for
NetEffect Ethernet Cluster Server Adapters.
diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
index 9f9d5c563a61..35cbb17bec12 100644
--- a/drivers/infiniband/hw/nes/nes.c
+++ b/drivers/infiniband/hw/nes/nes.c
@@ -111,17 +111,6 @@ static struct pci_device_id nes_pci_table[] = {
MODULE_DEVICE_TABLE(pci, nes_pci_table);
-/* registered nes netlink callbacks */
-static struct ibnl_client_cbs nes_nl_cb_table[] = {
- [RDMA_NL_IWPM_REG_PID] = {.dump = iwpm_register_pid_cb},
- [RDMA_NL_IWPM_ADD_MAPPING] = {.dump = iwpm_add_mapping_cb},
- [RDMA_NL_IWPM_QUERY_MAPPING] = {.dump = iwpm_add_and_query_mapping_cb},
- [RDMA_NL_IWPM_REMOTE_INFO] = {.dump = iwpm_remote_info_cb},
- [RDMA_NL_IWPM_HANDLE_ERR] = {.dump = iwpm_mapping_error_cb},
- [RDMA_NL_IWPM_MAPINFO] = {.dump = iwpm_mapping_info_cb},
- [RDMA_NL_IWPM_MAPINFO_NUM] = {.dump = iwpm_ack_mapping_info_cb}
-};
-
static int nes_inetaddr_event(struct notifier_block *, unsigned long, void *);
static int nes_net_event(struct notifier_block *, unsigned long, void *);
static int nes_notifiers_registered;
@@ -682,17 +671,6 @@ static int nes_probe(struct pci_dev *pcidev, const struct pci_device_id *ent)
}
nes_notifiers_registered++;
- if (ibnl_add_client(RDMA_NL_NES, RDMA_NL_IWPM_NUM_OPS, nes_nl_cb_table))
- printk(KERN_ERR PFX "%s[%u]: Failed to add netlink callback\n",
- __func__, __LINE__);
-
- ret = iwpm_init(RDMA_NL_NES);
- if (ret) {
- printk(KERN_ERR PFX "%s: port mapper initialization failed\n",
- pci_name(pcidev));
- goto bail7;
- }
-
INIT_DELAYED_WORK(&nesdev->work, nes_recheck_link_status);
/* Initialize network devices */
@@ -731,7 +709,6 @@ static int nes_probe(struct pci_dev *pcidev, const struct pci_device_id *ent)
nes_debug(NES_DBG_INIT, "netdev_count=%d, nesadapter->netdev_count=%d\n",
nesdev->netdev_count, nesdev->nesadapter->netdev_count);
- ibnl_remove_client(RDMA_NL_NES);
nes_notifiers_registered--;
if (nes_notifiers_registered == 0) {
@@ -795,8 +772,6 @@ static void nes_remove(struct pci_dev *pcidev)
nesdev->nesadapter->netdev_count--;
}
}
- ibnl_remove_client(RDMA_NL_NES);
- iwpm_exit(RDMA_NL_NES);
nes_notifiers_registered--;
if (nes_notifiers_registered == 0) {
diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
index cb9f0f27308d..7f0aa23aef9d 100644
--- a/drivers/infiniband/hw/nes/nes_cm.c
+++ b/drivers/infiniband/hw/nes/nes_cm.c
@@ -482,11 +482,11 @@ static void form_cm_frame(struct sk_buff *skb,
iph->ttl = 0x40;
iph->protocol = 0x06; /* IPPROTO_TCP */
- iph->saddr = htonl(cm_node->mapped_loc_addr);
- iph->daddr = htonl(cm_node->mapped_rem_addr);
+ iph->saddr = htonl(cm_node->loc_addr);
+ iph->daddr = htonl(cm_node->rem_addr);
- tcph->source = htons(cm_node->mapped_loc_port);
- tcph->dest = htons(cm_node->mapped_rem_port);
+ tcph->source = htons(cm_node->loc_port);
+ tcph->dest = htons(cm_node->rem_port);
tcph->seq = htonl(cm_node->tcp_cntxt.loc_seq_num);
if (flags & SET_ACK) {
@@ -525,125 +525,6 @@ static void form_cm_frame(struct sk_buff *skb,
cm_packets_created++;
}
-/*
- * nes_create_sockaddr - Record ip addr and tcp port in a sockaddr struct
- */
-static void nes_create_sockaddr(__be32 ip_addr, __be16 port,
- struct sockaddr_storage *addr)
-{
- struct sockaddr_in *nes_sockaddr = (struct sockaddr_in *)addr;
- nes_sockaddr->sin_family = AF_INET;
- memcpy(&nes_sockaddr->sin_addr.s_addr, &ip_addr, sizeof(__be32));
- nes_sockaddr->sin_port = port;
-}
-
-/*
- * nes_create_mapinfo - Create a mapinfo object in the port mapper data base
- */
-static int nes_create_mapinfo(struct nes_cm_info *cm_info)
-{
- struct sockaddr_storage local_sockaddr;
- struct sockaddr_storage mapped_sockaddr;
-
- nes_create_sockaddr(htonl(cm_info->loc_addr), htons(cm_info->loc_port),
- &local_sockaddr);
- nes_create_sockaddr(htonl(cm_info->mapped_loc_addr),
- htons(cm_info->mapped_loc_port), &mapped_sockaddr);
-
- return iwpm_create_mapinfo(&local_sockaddr,
- &mapped_sockaddr, RDMA_NL_NES);
-}
-
-/*
- * nes_remove_mapinfo - Remove a mapinfo object from the port mapper data base
- * and send a remove mapping op message to
- * the userspace port mapper
- */
-static int nes_remove_mapinfo(u32 loc_addr, u16 loc_port,
- u32 mapped_loc_addr, u16 mapped_loc_port)
-{
- struct sockaddr_storage local_sockaddr;
- struct sockaddr_storage mapped_sockaddr;
-
- nes_create_sockaddr(htonl(loc_addr), htons(loc_port), &local_sockaddr);
- nes_create_sockaddr(htonl(mapped_loc_addr), htons(mapped_loc_port),
- &mapped_sockaddr);
-
- iwpm_remove_mapinfo(&local_sockaddr, &mapped_sockaddr);
- return iwpm_remove_mapping(&local_sockaddr, RDMA_NL_NES);
-}
-
-/*
- * nes_form_pm_msg - Form a port mapper message with mapping info
- */
-static void nes_form_pm_msg(struct nes_cm_info *cm_info,
- struct iwpm_sa_data *pm_msg)
-{
- nes_create_sockaddr(htonl(cm_info->loc_addr), htons(cm_info->loc_port),
- &pm_msg->loc_addr);
- nes_create_sockaddr(htonl(cm_info->rem_addr), htons(cm_info->rem_port),
- &pm_msg->rem_addr);
-}
-
-/*
- * nes_form_reg_msg - Form a port mapper message with dev info
- */
-static void nes_form_reg_msg(struct nes_vnic *nesvnic,
- struct iwpm_dev_data *pm_msg)
-{
- memcpy(pm_msg->dev_name, nesvnic->nesibdev->ibdev.name,
- IWPM_DEVNAME_SIZE);
- memcpy(pm_msg->if_name, nesvnic->netdev->name, IWPM_IFNAME_SIZE);
-}
-
-static void record_sockaddr_info(struct sockaddr_storage *addr_info,
- nes_addr_t *ip_addr, u16 *port_num)
-{
- struct sockaddr_in *in_addr = (struct sockaddr_in *)addr_info;
-
- if (in_addr->sin_family == AF_INET) {
- *ip_addr = ntohl(in_addr->sin_addr.s_addr);
- *port_num = ntohs(in_addr->sin_port);
- }
-}
-
-/*
- * nes_record_pm_msg - Save the received mapping info
- */
-static void nes_record_pm_msg(struct nes_cm_info *cm_info,
- struct iwpm_sa_data *pm_msg)
-{
- record_sockaddr_info(&pm_msg->mapped_loc_addr,
- &cm_info->mapped_loc_addr, &cm_info->mapped_loc_port);
-
- record_sockaddr_info(&pm_msg->mapped_rem_addr,
- &cm_info->mapped_rem_addr, &cm_info->mapped_rem_port);
-}
-
-/*
- * nes_get_reminfo - Get the address info of the remote connecting peer
- */
-static int nes_get_remote_addr(struct nes_cm_node *cm_node)
-{
- struct sockaddr_storage mapped_loc_addr, mapped_rem_addr;
- struct sockaddr_storage remote_addr;
- int ret;
-
- nes_create_sockaddr(htonl(cm_node->mapped_loc_addr),
- htons(cm_node->mapped_loc_port), &mapped_loc_addr);
- nes_create_sockaddr(htonl(cm_node->mapped_rem_addr),
- htons(cm_node->mapped_rem_port), &mapped_rem_addr);
-
- ret = iwpm_get_remote_info(&mapped_loc_addr, &mapped_rem_addr,
- &remote_addr, RDMA_NL_NES);
- if (ret)
- nes_debug(NES_DBG_CM, "Unable to find remote peer address info\n");
- else
- record_sockaddr_info(&remote_addr, &cm_node->rem_addr,
- &cm_node->rem_port);
- return ret;
-}
-
/**
* print_core - dump a cm core
*/
@@ -1266,11 +1147,10 @@ static struct nes_cm_node *find_node(struct nes_cm_core *cm_core,
loc_addr, loc_port,
cm_node->rem_addr, cm_node->rem_port,
rem_addr, rem_port);
- if ((cm_node->mapped_loc_addr == loc_addr) &&
- (cm_node->mapped_loc_port == loc_port) &&
- (cm_node->mapped_rem_addr == rem_addr) &&
- (cm_node->mapped_rem_port == rem_port)) {
-
+ if ((cm_node->loc_addr == loc_addr) &&
+ (cm_node->loc_port == loc_port) &&
+ (cm_node->rem_addr == rem_addr) &&
+ (cm_node->rem_port == rem_port)) {
add_ref_cm_node(cm_node);
spin_unlock_irqrestore(&cm_core->ht_lock, flags);
return cm_node;
@@ -1287,8 +1167,8 @@ static struct nes_cm_node *find_node(struct nes_cm_core *cm_core,
* find_listener - find a cm node listening on this addr-port pair
*/
static struct nes_cm_listener *find_listener(struct nes_cm_core *cm_core,
- nes_addr_t dst_addr, u16 dst_port,
- enum nes_cm_listener_state listener_state, int local)
+ nes_addr_t dst_addr, u16 dst_port,
+ enum nes_cm_listener_state listener_state)
{
unsigned long flags;
struct nes_cm_listener *listen_node;
@@ -1298,13 +1178,9 @@ static struct nes_cm_listener *find_listener(struct nes_cm_core *cm_core,
/* walk list and find cm_node associated with this session ID */
spin_lock_irqsave(&cm_core->listen_list_lock, flags);
list_for_each_entry(listen_node, &cm_core->listen_list.list, list) {
- if (local) {
- listen_addr = listen_node->loc_addr;
- listen_port = listen_node->loc_port;
- } else {
- listen_addr = listen_node->mapped_loc_addr;
- listen_port = listen_node->mapped_loc_port;
- }
+ listen_addr = listen_node->loc_addr;
+ listen_port = listen_node->loc_port;
+
/* compare node pair, return node handle if a match */
if (((listen_addr == dst_addr) ||
listen_addr == 0x00000000) &&
@@ -1443,17 +1319,13 @@ static int mini_cm_dec_refcnt_listen(struct nes_cm_core *cm_core,
if (listener->nesvnic) {
nes_manage_apbvt(listener->nesvnic,
- listener->mapped_loc_port,
+ listener->loc_port,
PCI_FUNC(listener->nesvnic->nesdev->pcidev->devfn),
NES_MANAGE_APBVT_DEL);
- nes_remove_mapinfo(listener->loc_addr,
- listener->loc_port,
- listener->mapped_loc_addr,
- listener->mapped_loc_port);
nes_debug(NES_DBG_NLMSG,
- "Delete APBVT mapped_loc_port = %04X\n",
- listener->mapped_loc_port);
+ "Delete APBVT loc_port = %04X\n",
+ listener->loc_port);
}
nes_debug(NES_DBG_CM, "destroying listener (%p)\n", listener);
@@ -1602,11 +1474,6 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
cm_node->rem_addr = cm_info->rem_addr;
cm_node->rem_port = cm_info->rem_port;
- cm_node->mapped_loc_addr = cm_info->mapped_loc_addr;
- cm_node->mapped_rem_addr = cm_info->mapped_rem_addr;
- cm_node->mapped_loc_port = cm_info->mapped_loc_port;
- cm_node->mapped_rem_port = cm_info->mapped_rem_port;
-
cm_node->mpa_frame_rev = mpa_version;
cm_node->send_rdma0_op = SEND_RDMA_READ_ZERO;
cm_node->mpav2_ird_ord = 0;
@@ -1655,10 +1522,10 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
cm_node->loopbackpartner = NULL;
/* get the mac addr for the remote node */
- oldarpindex = nes_arp_table(nesdev, cm_node->mapped_rem_addr,
- NULL, NES_ARP_RESOLVE);
- arpindex = nes_addr_resolve_neigh(nesvnic,
- cm_node->mapped_rem_addr, oldarpindex);
+ oldarpindex = nes_arp_table(nesdev, cm_node->rem_addr,
+ NULL, NES_ARP_RESOLVE);
+ arpindex = nes_addr_resolve_neigh(nesvnic, cm_node->rem_addr,
+ oldarpindex);
if (arpindex < 0) {
kfree(cm_node);
return NULL;
@@ -1720,14 +1587,12 @@ static int rem_ref_cm_node(struct nes_cm_core *cm_core,
mini_cm_dec_refcnt_listen(cm_core, cm_node->listener, 0);
} else {
if (cm_node->apbvt_set && cm_node->nesvnic) {
- nes_manage_apbvt(cm_node->nesvnic, cm_node->mapped_loc_port,
+ nes_manage_apbvt(cm_node->nesvnic, cm_node->loc_port,
PCI_FUNC(cm_node->nesvnic->nesdev->pcidev->devfn),
NES_MANAGE_APBVT_DEL);
}
- nes_debug(NES_DBG_NLMSG, "Delete APBVT mapped_loc_port = %04X\n",
- cm_node->mapped_loc_port);
- nes_remove_mapinfo(cm_node->loc_addr, cm_node->loc_port,
- cm_node->mapped_loc_addr, cm_node->mapped_loc_port);
+ nes_debug(NES_DBG_NLMSG, "Delete APBVT loc_port = %04X\n",
+ cm_node->loc_port);
}
atomic_dec(&cm_core->node_cnt);
@@ -2184,7 +2049,6 @@ static int handle_ack_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
cm_node->state = NES_CM_STATE_ESTABLISHED;
if (datasize) {
cm_node->tcp_cntxt.rcv_nxt = inc_sequence + datasize;
- nes_get_remote_addr(cm_node);
handle_rcv_mpa(cm_node, skb);
} else { /* rcvd ACK only */
dev_kfree_skb_any(skb);
@@ -2399,17 +2263,14 @@ static struct nes_cm_listener *mini_cm_listen(struct nes_cm_core *cm_core,
struct nes_vnic *nesvnic, struct nes_cm_info *cm_info)
{
struct nes_cm_listener *listener;
- struct iwpm_dev_data pm_reg_msg;
- struct iwpm_sa_data pm_msg;
unsigned long flags;
- int iwpm_err = 0;
nes_debug(NES_DBG_CM, "Search for 0x%08x : 0x%04x\n",
cm_info->loc_addr, cm_info->loc_port);
/* cannot have multiple matching listeners */
listener = find_listener(cm_core, cm_info->loc_addr, cm_info->loc_port,
- NES_CM_LISTENER_EITHER_STATE, 1);
+ NES_CM_LISTENER_EITHER_STATE);
if (listener && listener->listener_state == NES_CM_LISTENER_ACTIVE_STATE) {
/* find automatically incs ref count ??? */
@@ -2419,22 +2280,6 @@ static struct nes_cm_listener *mini_cm_listen(struct nes_cm_core *cm_core,
}
if (!listener) {
- nes_form_reg_msg(nesvnic, &pm_reg_msg);
- iwpm_err = iwpm_register_pid(&pm_reg_msg, RDMA_NL_NES);
- if (iwpm_err) {
- nes_debug(NES_DBG_NLMSG,
- "Port Mapper reg pid fail (err = %d).\n", iwpm_err);
- }
- if (iwpm_valid_pid() && !iwpm_err) {
- nes_form_pm_msg(cm_info, &pm_msg);
- iwpm_err = iwpm_add_mapping(&pm_msg, RDMA_NL_NES);
- if (iwpm_err)
- nes_debug(NES_DBG_NLMSG,
- "Port Mapper query fail (err = %d).\n", iwpm_err);
- else
- nes_record_pm_msg(cm_info, &pm_msg);
- }
-
/* create a CM listen node (1/2 node to compare incoming traffic to) */
listener = kzalloc(sizeof(*listener), GFP_ATOMIC);
if (!listener) {
@@ -2444,8 +2289,6 @@ static struct nes_cm_listener *mini_cm_listen(struct nes_cm_core *cm_core,
listener->loc_addr = cm_info->loc_addr;
listener->loc_port = cm_info->loc_port;
- listener->mapped_loc_addr = cm_info->mapped_loc_addr;
- listener->mapped_loc_port = cm_info->mapped_loc_port;
listener->reused_node = 0;
atomic_set(&listener->ref_count, 1);
@@ -2507,18 +2350,18 @@ static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core,
if (cm_info->loc_addr == cm_info->rem_addr) {
loopbackremotelistener = find_listener(cm_core,
- cm_node->mapped_loc_addr, cm_node->mapped_rem_port,
- NES_CM_LISTENER_ACTIVE_STATE, 0);
+ cm_node->loc_addr, cm_node->rem_port,
+ NES_CM_LISTENER_ACTIVE_STATE);
if (loopbackremotelistener == NULL) {
create_event(cm_node, NES_CM_EVENT_ABORTED);
} else {
loopback_cm_info = *cm_info;
loopback_cm_info.loc_port = cm_info->rem_port;
loopback_cm_info.rem_port = cm_info->loc_port;
- loopback_cm_info.mapped_loc_port =
- cm_info->mapped_rem_port;
- loopback_cm_info.mapped_rem_port =
- cm_info->mapped_loc_port;
+ loopback_cm_info.loc_port =
+ cm_info->rem_port;
+ loopback_cm_info.rem_port =
+ cm_info->loc_port;
loopback_cm_info.cm_id = loopbackremotelistener->cm_id;
loopbackremotenode = make_cm_node(cm_core, nesvnic,
&loopback_cm_info, loopbackremotelistener);
@@ -2747,12 +2590,6 @@ static int mini_cm_recv_pkt(struct nes_cm_core *cm_core,
nfo.rem_addr = ntohl(iph->saddr);
nfo.rem_port = ntohs(tcph->source);
- /* If port mapper is available these should be mapped address info */
- nfo.mapped_loc_addr = ntohl(iph->daddr);
- nfo.mapped_loc_port = ntohs(tcph->dest);
- nfo.mapped_rem_addr = ntohl(iph->saddr);
- nfo.mapped_rem_port = ntohs(tcph->source);
-
tmp_daddr = cpu_to_be32(iph->daddr);
tmp_saddr = cpu_to_be32(iph->saddr);
@@ -2761,8 +2598,8 @@ static int mini_cm_recv_pkt(struct nes_cm_core *cm_core,
do {
cm_node = find_node(cm_core,
- nfo.mapped_rem_port, nfo.mapped_rem_addr,
- nfo.mapped_loc_port, nfo.mapped_loc_addr);
+ nfo.rem_port, nfo.rem_addr,
+ nfo.loc_port, nfo.loc_addr);
if (!cm_node) {
/* Only type of packet accepted are for */
@@ -2771,9 +2608,9 @@ static int mini_cm_recv_pkt(struct nes_cm_core *cm_core,
skb_handled = 0;
break;
}
- listener = find_listener(cm_core, nfo.mapped_loc_addr,
- nfo.mapped_loc_port,
- NES_CM_LISTENER_ACTIVE_STATE, 0);
+ listener = find_listener(cm_core, nfo.loc_addr,
+ nfo.loc_port,
+ NES_CM_LISTENER_ACTIVE_STATE);
if (!listener) {
nfo.cm_id = NULL;
nfo.conn_type = 0;
@@ -2856,12 +2693,22 @@ static struct nes_cm_core *nes_cm_alloc_core(void)
nes_debug(NES_DBG_CM, "Enable QUEUE EVENTS\n");
cm_core->event_wq = create_singlethread_workqueue("nesewq");
+ if (!cm_core->event_wq)
+ goto out_free_cmcore;
cm_core->post_event = nes_cm_post_event;
nes_debug(NES_DBG_CM, "Enable QUEUE DISCONNECTS\n");
cm_core->disconn_wq = create_singlethread_workqueue("nesdwq");
+ if (!cm_core->disconn_wq)
+ goto out_free_wq;
print_core(cm_core);
return cm_core;
+
+out_free_wq:
+ destroy_workqueue(cm_core->event_wq);
+out_free_cmcore:
+ kfree(cm_core);
+ return NULL;
}
@@ -3121,8 +2968,8 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
atomic_inc(&cm_disconnects);
cm_event.event = IW_CM_EVENT_DISCONNECT;
cm_event.status = disconn_status;
- cm_event.local_addr = cm_id->local_addr;
- cm_event.remote_addr = cm_id->remote_addr;
+ cm_event.local_addr = cm_id->m_local_addr;
+ cm_event.remote_addr = cm_id->m_remote_addr;
cm_event.private_data = NULL;
cm_event.private_data_len = 0;
@@ -3148,8 +2995,8 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
cm_event.event = IW_CM_EVENT_CLOSE;
cm_event.status = 0;
cm_event.provider_data = cm_id->provider_data;
- cm_event.local_addr = cm_id->local_addr;
- cm_event.remote_addr = cm_id->remote_addr;
+ cm_event.local_addr = cm_id->m_local_addr;
+ cm_event.remote_addr = cm_id->m_remote_addr;
cm_event.private_data = NULL;
cm_event.private_data_len = 0;
@@ -3240,8 +3087,8 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
u8 *start_ptr = &start_addr;
u8 **start_buff = &start_ptr;
u16 buff_len = 0;
- struct sockaddr_in *laddr = (struct sockaddr_in *)&cm_id->local_addr;
- struct sockaddr_in *raddr = (struct sockaddr_in *)&cm_id->remote_addr;
+ struct sockaddr_in *laddr = (struct sockaddr_in *)&cm_id->m_local_addr;
+ struct sockaddr_in *raddr = (struct sockaddr_in *)&cm_id->m_remote_addr;
ibqp = nes_get_qp(cm_id->device, conn_param->qpn);
if (!ibqp)
@@ -3378,11 +3225,11 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
nes_cm_init_tsa_conn(nesqp, cm_node);
nesqp->nesqp_context->tcpPorts[0] =
- cpu_to_le16(cm_node->mapped_loc_port);
+ cpu_to_le16(cm_node->loc_port);
nesqp->nesqp_context->tcpPorts[1] =
- cpu_to_le16(cm_node->mapped_rem_port);
+ cpu_to_le16(cm_node->rem_port);
- nesqp->nesqp_context->ip0 = cpu_to_le32(cm_node->mapped_rem_addr);
+ nesqp->nesqp_context->ip0 = cpu_to_le32(cm_node->rem_addr);
nesqp->nesqp_context->misc2 |= cpu_to_le32(
(u32)PCI_FUNC(nesdev->pcidev->devfn) <<
@@ -3406,9 +3253,9 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
memset(&nes_quad, 0, sizeof(nes_quad));
nes_quad.DstIpAdrIndex =
cpu_to_le32((u32)PCI_FUNC(nesdev->pcidev->devfn) << 24);
- nes_quad.SrcIpadr = htonl(cm_node->mapped_rem_addr);
- nes_quad.TcpPorts[0] = htons(cm_node->mapped_rem_port);
- nes_quad.TcpPorts[1] = htons(cm_node->mapped_loc_port);
+ nes_quad.SrcIpadr = htonl(cm_node->rem_addr);
+ nes_quad.TcpPorts[0] = htons(cm_node->rem_port);
+ nes_quad.TcpPorts[1] = htons(cm_node->loc_port);
/* Produce hash key */
crc_value = get_crc_value(&nes_quad);
@@ -3437,8 +3284,8 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
cm_event.event = IW_CM_EVENT_ESTABLISHED;
cm_event.status = 0;
cm_event.provider_data = (void *)nesqp;
- cm_event.local_addr = cm_id->local_addr;
- cm_event.remote_addr = cm_id->remote_addr;
+ cm_event.local_addr = cm_id->m_local_addr;
+ cm_event.remote_addr = cm_id->m_remote_addr;
cm_event.private_data = NULL;
cm_event.private_data_len = 0;
cm_event.ird = cm_node->ird_size;
@@ -3508,11 +3355,8 @@ int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
struct nes_cm_node *cm_node;
struct nes_cm_info cm_info;
int apbvt_set = 0;
- struct sockaddr_in *laddr = (struct sockaddr_in *)&cm_id->local_addr;
- struct sockaddr_in *raddr = (struct sockaddr_in *)&cm_id->remote_addr;
- struct iwpm_dev_data pm_reg_msg;
- struct iwpm_sa_data pm_msg;
- int iwpm_err = 0;
+ struct sockaddr_in *laddr = (struct sockaddr_in *)&cm_id->m_local_addr;
+ struct sockaddr_in *raddr = (struct sockaddr_in *)&cm_id->m_remote_addr;
if (cm_id->remote_addr.ss_family != AF_INET)
return -ENOSYS;
@@ -3558,37 +3402,13 @@ int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
cm_info.cm_id = cm_id;
cm_info.conn_type = NES_CM_IWARP_CONN_TYPE;
- /* No port mapper available, go with the specified peer information */
- cm_info.mapped_loc_addr = cm_info.loc_addr;
- cm_info.mapped_loc_port = cm_info.loc_port;
- cm_info.mapped_rem_addr = cm_info.rem_addr;
- cm_info.mapped_rem_port = cm_info.rem_port;
-
- nes_form_reg_msg(nesvnic, &pm_reg_msg);
- iwpm_err = iwpm_register_pid(&pm_reg_msg, RDMA_NL_NES);
- if (iwpm_err) {
- nes_debug(NES_DBG_NLMSG,
- "Port Mapper reg pid fail (err = %d).\n", iwpm_err);
- }
- if (iwpm_valid_pid() && !iwpm_err) {
- nes_form_pm_msg(&cm_info, &pm_msg);
- iwpm_err = iwpm_add_and_query_mapping(&pm_msg, RDMA_NL_NES);
- if (iwpm_err)
- nes_debug(NES_DBG_NLMSG,
- "Port Mapper query fail (err = %d).\n", iwpm_err);
- else
- nes_record_pm_msg(&cm_info, &pm_msg);
- }
-
if (laddr->sin_addr.s_addr != raddr->sin_addr.s_addr) {
- nes_manage_apbvt(nesvnic, cm_info.mapped_loc_port,
- PCI_FUNC(nesdev->pcidev->devfn), NES_MANAGE_APBVT_ADD);
+ nes_manage_apbvt(nesvnic, cm_info.loc_port,
+ PCI_FUNC(nesdev->pcidev->devfn),
+ NES_MANAGE_APBVT_ADD);
apbvt_set = 1;
}
- if (nes_create_mapinfo(&cm_info))
- return -ENOMEM;
-
cm_id->add_ref(cm_id);
/* create a connect CM node connection */
@@ -3597,14 +3417,12 @@ int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
&cm_info);
if (!cm_node) {
if (apbvt_set)
- nes_manage_apbvt(nesvnic, cm_info.mapped_loc_port,
+ nes_manage_apbvt(nesvnic, cm_info.loc_port,
PCI_FUNC(nesdev->pcidev->devfn),
NES_MANAGE_APBVT_DEL);
- nes_debug(NES_DBG_NLMSG, "Delete mapped_loc_port = %04X\n",
- cm_info.mapped_loc_port);
- nes_remove_mapinfo(cm_info.loc_addr, cm_info.loc_port,
- cm_info.mapped_loc_addr, cm_info.mapped_loc_port);
+ nes_debug(NES_DBG_NLMSG, "Delete loc_port = %04X\n",
+ cm_info.loc_port);
cm_id->rem_ref(cm_id);
return -ENOMEM;
}
@@ -3633,12 +3451,12 @@ int nes_create_listen(struct iw_cm_id *cm_id, int backlog)
struct nes_cm_listener *cm_node;
struct nes_cm_info cm_info;
int err;
- struct sockaddr_in *laddr = (struct sockaddr_in *)&cm_id->local_addr;
+ struct sockaddr_in *laddr = (struct sockaddr_in *)&cm_id->m_local_addr;
nes_debug(NES_DBG_CM, "cm_id = %p, local port = 0x%04X.\n",
cm_id, ntohs(laddr->sin_port));
- if (cm_id->local_addr.ss_family != AF_INET)
+ if (cm_id->m_local_addr.ss_family != AF_INET)
return -ENOSYS;
nesvnic = to_nesvnic(cm_id->device);
if (!nesvnic)
@@ -3658,10 +3476,6 @@ int nes_create_listen(struct iw_cm_id *cm_id, int backlog)
cm_info.conn_type = NES_CM_IWARP_CONN_TYPE;
- /* No port mapper available, go with the specified info */
- cm_info.mapped_loc_addr = cm_info.loc_addr;
- cm_info.mapped_loc_port = cm_info.loc_port;
-
cm_node = g_cm_core->api->listen(g_cm_core, nesvnic, &cm_info);
if (!cm_node) {
printk(KERN_ERR "%s[%u] Error returned from listen API call\n",
@@ -3673,10 +3487,7 @@ int nes_create_listen(struct iw_cm_id *cm_id, int backlog)
cm_node->tos = cm_id->tos;
if (!cm_node->reused_node) {
- if (nes_create_mapinfo(&cm_info))
- return -ENOMEM;
-
- err = nes_manage_apbvt(nesvnic, cm_node->mapped_loc_port,
+ err = nes_manage_apbvt(nesvnic, cm_node->loc_port,
PCI_FUNC(nesvnic->nesdev->pcidev->devfn),
NES_MANAGE_APBVT_ADD);
if (err) {
@@ -3786,8 +3597,8 @@ static void cm_event_connected(struct nes_cm_event *event)
nesvnic = to_nesvnic(nesqp->ibqp.device);
nesdev = nesvnic->nesdev;
nesadapter = nesdev->nesadapter;
- laddr = (struct sockaddr_in *)&cm_id->local_addr;
- raddr = (struct sockaddr_in *)&cm_id->remote_addr;
+ laddr = (struct sockaddr_in *)&cm_id->m_local_addr;
+ raddr = (struct sockaddr_in *)&cm_id->m_remote_addr;
cm_event_laddr = (struct sockaddr_in *)&cm_event.local_addr;
if (nesqp->destroyed)
@@ -3802,10 +3613,10 @@ static void cm_event_connected(struct nes_cm_event *event)
/* set the QP tsa context */
nesqp->nesqp_context->tcpPorts[0] =
- cpu_to_le16(cm_node->mapped_loc_port);
+ cpu_to_le16(cm_node->loc_port);
nesqp->nesqp_context->tcpPorts[1] =
- cpu_to_le16(cm_node->mapped_rem_port);
- nesqp->nesqp_context->ip0 = cpu_to_le32(cm_node->mapped_rem_addr);
+ cpu_to_le16(cm_node->rem_port);
+ nesqp->nesqp_context->ip0 = cpu_to_le32(cm_node->rem_addr);
nesqp->nesqp_context->misc2 |= cpu_to_le32(
(u32)PCI_FUNC(nesdev->pcidev->devfn) <<
@@ -3835,9 +3646,9 @@ static void cm_event_connected(struct nes_cm_event *event)
nes_quad.DstIpAdrIndex =
cpu_to_le32((u32)PCI_FUNC(nesdev->pcidev->devfn) << 24);
- nes_quad.SrcIpadr = htonl(cm_node->mapped_rem_addr);
- nes_quad.TcpPorts[0] = htons(cm_node->mapped_rem_port);
- nes_quad.TcpPorts[1] = htons(cm_node->mapped_loc_port);
+ nes_quad.SrcIpadr = htonl(cm_node->rem_addr);
+ nes_quad.TcpPorts[0] = htons(cm_node->rem_port);
+ nes_quad.TcpPorts[1] = htons(cm_node->loc_port);
/* Produce hash key */
crc_value = get_crc_value(&nes_quad);
@@ -3858,14 +3669,14 @@ static void cm_event_connected(struct nes_cm_event *event)
cm_event.provider_data = cm_id->provider_data;
cm_event_laddr->sin_family = AF_INET;
cm_event_laddr->sin_port = laddr->sin_port;
- cm_event.remote_addr = cm_id->remote_addr;
+ cm_event.remote_addr = cm_id->m_remote_addr;
cm_event.private_data = (void *)event->cm_node->mpa_frame_buf;
cm_event.private_data_len = (u8)event->cm_node->mpa_frame_size;
cm_event.ird = cm_node->ird_size;
cm_event.ord = cm_node->ord_size;
- cm_event_laddr->sin_addr.s_addr = htonl(event->cm_info.rem_addr);
+ cm_event_laddr->sin_addr.s_addr = htonl(event->cm_info.loc_addr);
ret = cm_id->event_handler(cm_id, &cm_event);
nes_debug(NES_DBG_CM, "OFA CM event_handler returned, ret=%d\n", ret);
@@ -3913,8 +3724,8 @@ static void cm_event_connect_error(struct nes_cm_event *event)
cm_event.event = IW_CM_EVENT_CONNECT_REPLY;
cm_event.status = -ECONNRESET;
cm_event.provider_data = cm_id->provider_data;
- cm_event.local_addr = cm_id->local_addr;
- cm_event.remote_addr = cm_id->remote_addr;
+ cm_event.local_addr = cm_id->m_local_addr;
+ cm_event.remote_addr = cm_id->m_remote_addr;
cm_event.private_data = NULL;
cm_event.private_data_len = 0;
@@ -3970,8 +3781,8 @@ static void cm_event_reset(struct nes_cm_event *event)
cm_event.event = IW_CM_EVENT_DISCONNECT;
cm_event.status = -ECONNRESET;
cm_event.provider_data = cm_id->provider_data;
- cm_event.local_addr = cm_id->local_addr;
- cm_event.remote_addr = cm_id->remote_addr;
+ cm_event.local_addr = cm_id->m_local_addr;
+ cm_event.remote_addr = cm_id->m_remote_addr;
cm_event.private_data = NULL;
cm_event.private_data_len = 0;
@@ -3981,8 +3792,8 @@ static void cm_event_reset(struct nes_cm_event *event)
cm_event.event = IW_CM_EVENT_CLOSE;
cm_event.status = 0;
cm_event.provider_data = cm_id->provider_data;
- cm_event.local_addr = cm_id->local_addr;
- cm_event.remote_addr = cm_id->remote_addr;
+ cm_event.local_addr = cm_id->m_local_addr;
+ cm_event.remote_addr = cm_id->m_remote_addr;
cm_event.private_data = NULL;
cm_event.private_data_len = 0;
nes_debug(NES_DBG_CM, "NODE %p Generating CLOSE\n", event->cm_node);
diff --git a/drivers/infiniband/hw/nes/nes_cm.h b/drivers/infiniband/hw/nes/nes_cm.h
index 147c2c884227..d827d03e3941 100644
--- a/drivers/infiniband/hw/nes/nes_cm.h
+++ b/drivers/infiniband/hw/nes/nes_cm.h
@@ -293,8 +293,8 @@ struct nes_cm_listener {
struct list_head list;
struct nes_cm_core *cm_core;
u8 loc_mac[ETH_ALEN];
- nes_addr_t loc_addr, mapped_loc_addr;
- u16 loc_port, mapped_loc_port;
+ nes_addr_t loc_addr;
+ u16 loc_port;
struct iw_cm_id *cm_id;
enum nes_cm_conn_type conn_type;
atomic_t ref_count;
@@ -309,9 +309,7 @@ struct nes_cm_listener {
/* per connection node and node state information */
struct nes_cm_node {
nes_addr_t loc_addr, rem_addr;
- nes_addr_t mapped_loc_addr, mapped_rem_addr;
u16 loc_port, rem_port;
- u16 mapped_loc_port, mapped_rem_port;
u8 loc_mac[ETH_ALEN];
u8 rem_mac[ETH_ALEN];
@@ -368,11 +366,6 @@ struct nes_cm_info {
u16 rem_port;
nes_addr_t loc_addr;
nes_addr_t rem_addr;
- u16 mapped_loc_port;
- u16 mapped_rem_port;
- nes_addr_t mapped_loc_addr;
- nes_addr_t mapped_rem_addr;
-
enum nes_cm_conn_type conn_type;
int backlog;
};
diff --git a/drivers/infiniband/hw/nes/nes_hw.c b/drivers/infiniband/hw/nes/nes_hw.c
index 4713dd7ed764..a1c6481d8038 100644
--- a/drivers/infiniband/hw/nes/nes_hw.c
+++ b/drivers/infiniband/hw/nes/nes_hw.c
@@ -35,18 +35,11 @@
#include <linux/moduleparam.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
-#include <linux/ip.h>
-#include <linux/tcp.h>
#include <linux/if_vlan.h>
-#include <linux/inet_lro.h>
#include <linux/slab.h>
#include "nes.h"
-static unsigned int nes_lro_max_aggr = NES_LRO_MAX_AGGR;
-module_param(nes_lro_max_aggr, uint, 0444);
-MODULE_PARM_DESC(nes_lro_max_aggr, "NIC LRO max packet aggregation");
-
static int wide_ppm_offset;
module_param(wide_ppm_offset, int, 0644);
MODULE_PARM_DESC(wide_ppm_offset, "Increase CX4 interface clock ppm offset, 0=100ppm (default), 1=300ppm");
@@ -1642,25 +1635,6 @@ static void nes_rq_wqes_timeout(unsigned long parm)
}
-static int nes_lro_get_skb_hdr(struct sk_buff *skb, void **iphdr,
- void **tcph, u64 *hdr_flags, void *priv)
-{
- unsigned int ip_len;
- struct iphdr *iph;
- skb_reset_network_header(skb);
- iph = ip_hdr(skb);
- if (iph->protocol != IPPROTO_TCP)
- return -1;
- ip_len = ip_hdrlen(skb);
- skb_set_transport_header(skb, ip_len);
- *tcph = tcp_hdr(skb);
-
- *hdr_flags = LRO_IPV4 | LRO_TCP;
- *iphdr = iph;
- return 0;
-}
-
-
/**
* nes_init_nic_qp
*/
@@ -1895,14 +1869,6 @@ int nes_init_nic_qp(struct nes_device *nesdev, struct net_device *netdev)
return -ENOMEM;
}
- nesvnic->lro_mgr.max_aggr = nes_lro_max_aggr;
- nesvnic->lro_mgr.max_desc = NES_MAX_LRO_DESCRIPTORS;
- nesvnic->lro_mgr.lro_arr = nesvnic->lro_desc;
- nesvnic->lro_mgr.get_skb_header = nes_lro_get_skb_hdr;
- nesvnic->lro_mgr.features = LRO_F_NAPI | LRO_F_EXTRACT_VLAN_ID;
- nesvnic->lro_mgr.dev = netdev;
- nesvnic->lro_mgr.ip_summed = CHECKSUM_UNNECESSARY;
- nesvnic->lro_mgr.ip_summed_aggr = CHECKSUM_UNNECESSARY;
return 0;
}
@@ -2809,13 +2775,10 @@ void nes_nic_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *cq)
u16 pkt_type;
u16 rqes_processed = 0;
u8 sq_cqes = 0;
- u8 nes_use_lro = 0;
head = cq->cq_head;
cq_size = cq->cq_size;
cq->cqes_pending = 1;
- if (nesvnic->netdev->features & NETIF_F_LRO)
- nes_use_lro = 1;
do {
if (le32_to_cpu(cq->cq_vbase[head].cqe_words[NES_NIC_CQE_MISC_IDX]) &
NES_NIC_CQE_VALID) {
@@ -2950,10 +2913,7 @@ void nes_nic_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *cq)
__vlan_hwaccel_put_tag(rx_skb, htons(ETH_P_8021Q), vlan_tag);
}
- if (nes_use_lro)
- lro_receive_skb(&nesvnic->lro_mgr, rx_skb, NULL);
- else
- netif_receive_skb(rx_skb);
+ napi_gro_receive(&nesvnic->napi, rx_skb);
skip_rx_indicate0:
;
@@ -2984,8 +2944,6 @@ skip_rx_indicate0:
} while (1);
- if (nes_use_lro)
- lro_flush_all(&nesvnic->lro_mgr);
if (sq_cqes) {
barrier();
/* restart the queue if it had been stopped */
diff --git a/drivers/infiniband/hw/nes/nes_hw.h b/drivers/infiniband/hw/nes/nes_hw.h
index c9080208aad2..1b66ef1e9937 100644
--- a/drivers/infiniband/hw/nes/nes_hw.h
+++ b/drivers/infiniband/hw/nes/nes_hw.h
@@ -33,8 +33,6 @@
#ifndef __NES_HW_H
#define __NES_HW_H
-#include <linux/inet_lro.h>
-
#define NES_PHY_TYPE_CX4 1
#define NES_PHY_TYPE_1G 2
#define NES_PHY_TYPE_ARGUS 4
@@ -1049,8 +1047,6 @@ struct nes_hw_tune_timer {
#define NES_TIMER_ENABLE_LIMIT 4
#define NES_MAX_LINK_INTERRUPTS 128
#define NES_MAX_LINK_CHECK 200
-#define NES_MAX_LRO_DESCRIPTORS 32
-#define NES_LRO_MAX_AGGR 64
struct nes_adapter {
u64 fw_ver;
@@ -1263,9 +1259,6 @@ struct nes_vnic {
u8 next_qp_nic_index;
u8 of_device_registered;
u8 rdma_enabled;
- u32 lro_max_aggr;
- struct net_lro_mgr lro_mgr;
- struct net_lro_desc lro_desc[NES_MAX_LRO_DESCRIPTORS];
struct timer_list event_timer;
enum ib_event_type delayed_event;
enum ib_event_type last_dispatched_event;
diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
index 6a0bdfa0ce2e..92914539edc7 100644
--- a/drivers/infiniband/hw/nes/nes_nic.c
+++ b/drivers/infiniband/hw/nes/nes_nic.c
@@ -500,9 +500,6 @@ static int nes_netdev_start_xmit(struct sk_buff *skb, struct net_device *netdev)
* skb_shinfo(skb)->nr_frags, skb_is_gso(skb));
*/
- if (!netif_carrier_ok(netdev))
- return NETDEV_TX_OK;
-
if (netif_queue_stopped(netdev))
return NETDEV_TX_BUSY;
@@ -1085,9 +1082,6 @@ static const char nes_ethtool_stringset[][ETH_GSTRING_LEN] = {
"Free 4Kpbls",
"Free 256pbls",
"Timer Inits",
- "LRO aggregated",
- "LRO flushed",
- "LRO no_desc",
"PAU CreateQPs",
"PAU DestroyQPs",
};
@@ -1302,9 +1296,6 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
target_stat_values[++index] = nesadapter->free_4kpbl;
target_stat_values[++index] = nesadapter->free_256pbl;
target_stat_values[++index] = int_mod_timer_init;
- target_stat_values[++index] = nesvnic->lro_mgr.stats.aggregated;
- target_stat_values[++index] = nesvnic->lro_mgr.stats.flushed;
- target_stat_values[++index] = nesvnic->lro_mgr.stats.no_desc;
target_stat_values[++index] = atomic_read(&pau_qps_created);
target_stat_values[++index] = atomic_read(&pau_qps_destroyed);
}
@@ -1709,7 +1700,6 @@ struct net_device *nes_netdev_init(struct nes_device *nesdev,
netdev->hw_features |= NETIF_F_TSO;
netdev->features = netdev->hw_features | NETIF_F_HIGHDMA | NETIF_F_HW_VLAN_CTAG_TX;
- netdev->hw_features |= NETIF_F_LRO;
nes_debug(NES_DBG_INIT, "nesvnic = %p, reported features = 0x%lX, QPid = %d,"
" nic_index = %d, logical_port = %d, mac_index = %d.\n",
diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
index 8c4daf7f22ec..fba69a39a7eb 100644
--- a/drivers/infiniband/hw/nes/nes_verbs.c
+++ b/drivers/infiniband/hw/nes/nes_verbs.c
@@ -56,7 +56,8 @@ static int nes_dereg_mr(struct ib_mr *ib_mr);
/**
* nes_alloc_mw
*/
-static struct ib_mw *nes_alloc_mw(struct ib_pd *ibpd, enum ib_mw_type type)
+static struct ib_mw *nes_alloc_mw(struct ib_pd *ibpd, enum ib_mw_type type,
+ struct ib_udata *udata)
{
struct nes_pd *nespd = to_nespd(ibpd);
struct nes_vnic *nesvnic = to_nesvnic(ibpd->device);
@@ -3768,6 +3769,8 @@ struct nes_ib_device *nes_init_ofa_device(struct net_device *netdev)
nesibdev->ibdev.iwcm->create_listen = nes_create_listen;
nesibdev->ibdev.iwcm->destroy_listen = nes_destroy_listen;
nesibdev->ibdev.get_port_immutable = nes_port_immutable;
+ memcpy(nesibdev->ibdev.iwcm->ifname, netdev->name,
+ sizeof(nesibdev->ibdev.iwcm->ifname));
return nesibdev;
}
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma.h b/drivers/infiniband/hw/ocrdma/ocrdma.h
index 12503f15fbd6..45bdfa0e3b2b 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma.h
@@ -114,6 +114,7 @@ struct ocrdma_dev_attr {
u8 local_ca_ack_delay;
u8 ird;
u8 num_ird_pages;
+ u8 udp_encap;
};
struct ocrdma_dma_mem {
@@ -356,6 +357,7 @@ struct ocrdma_ah {
struct ocrdma_av *av;
u16 sgid_index;
u32 id;
+ u8 hdr_type;
};
struct ocrdma_qp_hwq_info {
@@ -598,4 +600,10 @@ static inline u8 ocrdma_get_ae_link_state(u32 ae_state)
return ((ae_state & OCRDMA_AE_LSC_LS_MASK) >> OCRDMA_AE_LSC_LS_SHIFT);
}
+static inline bool ocrdma_is_udp_encap_supported(struct ocrdma_dev *dev)
+{
+ return (dev->attr.udp_encap & OCRDMA_L3_TYPE_IPV4) ||
+ (dev->attr.udp_encap & OCRDMA_L3_TYPE_IPV6);
+}
+
#endif
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_ah.c b/drivers/infiniband/hw/ocrdma/ocrdma_ah.c
index 3790771f2baa..797362a297b2 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_ah.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_ah.c
@@ -55,18 +55,46 @@
#define OCRDMA_VID_PCP_SHIFT 0xD
+static u16 ocrdma_hdr_type_to_proto_num(int devid, u8 hdr_type)
+{
+ switch (hdr_type) {
+ case OCRDMA_L3_TYPE_IB_GRH:
+ return (u16)0x8915;
+ case OCRDMA_L3_TYPE_IPV4:
+ return (u16)0x0800;
+ case OCRDMA_L3_TYPE_IPV6:
+ return (u16)0x86dd;
+ default:
+ pr_err("ocrdma%d: Invalid network header\n", devid);
+ return 0;
+ }
+}
+
static inline int set_av_attr(struct ocrdma_dev *dev, struct ocrdma_ah *ah,
struct ib_ah_attr *attr, union ib_gid *sgid,
int pdid, bool *isvlan, u16 vlan_tag)
{
- int status = 0;
+ int status;
struct ocrdma_eth_vlan eth;
struct ocrdma_grh grh;
int eth_sz;
+ u16 proto_num = 0;
+ u8 nxthdr = 0x11;
+ struct iphdr ipv4;
+ union {
+ struct sockaddr _sockaddr;
+ struct sockaddr_in _sockaddr_in;
+ struct sockaddr_in6 _sockaddr_in6;
+ } sgid_addr, dgid_addr;
memset(&eth, 0, sizeof(eth));
memset(&grh, 0, sizeof(grh));
+ /* Protocol Number */
+ proto_num = ocrdma_hdr_type_to_proto_num(dev->id, ah->hdr_type);
+ if (!proto_num)
+ return -EINVAL;
+ nxthdr = (proto_num == 0x8915) ? 0x1b : 0x11;
/* VLAN */
if (!vlan_tag || (vlan_tag > 0xFFF))
vlan_tag = dev->pvid;
@@ -78,13 +106,13 @@ static inline int set_av_attr(struct ocrdma_dev *dev, struct ocrdma_ah *ah,
dev->id);
}
eth.eth_type = cpu_to_be16(0x8100);
- eth.roce_eth_type = cpu_to_be16(OCRDMA_ROCE_ETH_TYPE);
+ eth.roce_eth_type = cpu_to_be16(proto_num);
vlan_tag |= (dev->sl & 0x07) << OCRDMA_VID_PCP_SHIFT;
eth.vlan_tag = cpu_to_be16(vlan_tag);
eth_sz = sizeof(struct ocrdma_eth_vlan);
*isvlan = true;
} else {
- eth.eth_type = cpu_to_be16(OCRDMA_ROCE_ETH_TYPE);
+ eth.eth_type = cpu_to_be16(proto_num);
eth_sz = sizeof(struct ocrdma_eth_basic);
}
/* MAC */
@@ -93,18 +121,33 @@ static inline int set_av_attr(struct ocrdma_dev *dev, struct ocrdma_ah *ah,
if (status)
return status;
ah->sgid_index = attr->grh.sgid_index;
- memcpy(&grh.sgid[0], sgid->raw, sizeof(union ib_gid));
- memcpy(&grh.dgid[0], attr->grh.dgid.raw, sizeof(attr->grh.dgid.raw));
-
- grh.tclass_flow = cpu_to_be32((6 << 28) |
- (attr->grh.traffic_class << 24) |
- attr->grh.flow_label);
- /* 0x1b is next header value in GRH */
- grh.pdid_hoplimit = cpu_to_be32((pdid << 16) |
- (0x1b << 8) | attr->grh.hop_limit);
/* Eth HDR */
memcpy(&ah->av->eth_hdr, &eth, eth_sz);
- memcpy((u8 *)ah->av + eth_sz, &grh, sizeof(struct ocrdma_grh));
+ if (ah->hdr_type == RDMA_NETWORK_IPV4) {
+ *((__be16 *)&ipv4) = htons((4 << 12) | (5 << 8) |
+ attr->grh.traffic_class);
+ ipv4.id = cpu_to_be16(pdid);
+ ipv4.frag_off = htons(IP_DF);
+ ipv4.tot_len = htons(0);
+ ipv4.ttl = attr->grh.hop_limit;
+ ipv4.protocol = nxthdr;
+ rdma_gid2ip(&sgid_addr._sockaddr, sgid);
+ ipv4.saddr = sgid_addr._sockaddr_in.sin_addr.s_addr;
+ rdma_gid2ip(&dgid_addr._sockaddr, &attr->grh.dgid);
+ ipv4.daddr = dgid_addr._sockaddr_in.sin_addr.s_addr;
+ memcpy((u8 *)ah->av + eth_sz, &ipv4, sizeof(struct iphdr));
+ } else {
+ memcpy(&grh.sgid[0], sgid->raw, sizeof(union ib_gid));
+ grh.tclass_flow = cpu_to_be32((6 << 28) |
+ (attr->grh.traffic_class << 24) |
+ attr->grh.flow_label);
+ memcpy(&grh.dgid[0], attr->grh.dgid.raw,
+ sizeof(attr->grh.dgid.raw));
+ grh.pdid_hoplimit = cpu_to_be32((pdid << 16) |
+ (nxthdr << 8) |
+ attr->grh.hop_limit);
+ memcpy((u8 *)ah->av + eth_sz, &grh, sizeof(struct ocrdma_grh));
+ }
if (*isvlan)
ah->av->valid |= OCRDMA_AV_VLAN_VALID;
ah->av->valid = cpu_to_le32(ah->av->valid);
@@ -128,6 +171,7 @@ struct ib_ah *ocrdma_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr)
if (atomic_cmpxchg(&dev->update_sl, 1, 0))
ocrdma_init_service_level(dev);
+
ah = kzalloc(sizeof(*ah), GFP_ATOMIC);
if (!ah)
return ERR_PTR(-ENOMEM);
@@ -148,6 +192,8 @@ struct ib_ah *ocrdma_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr)
vlan_tag = vlan_dev_vlan_id(sgid_attr.ndev);
dev_put(sgid_attr.ndev);
}
+ /* Get network header type for this GID */
+ ah->hdr_type = ib_gid_to_network_type(sgid_attr.gid_type, &sgid);
if ((pd->uctx) &&
(!rdma_is_multicast_addr((struct in6_addr *)attr->grh.dgid.raw)) &&
@@ -172,6 +218,11 @@ struct ib_ah *ocrdma_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr)
ahid_addr = pd->uctx->ah_tbl.va + attr->dlid;
*ahid_addr = 0;
*ahid_addr |= ah->id & OCRDMA_AH_ID_MASK;
+ if (ocrdma_is_udp_encap_supported(dev)) {
+ *ahid_addr |= ((u32)ah->hdr_type &
+ OCRDMA_AH_L3_TYPE_MASK) <<
+ OCRDMA_AH_L3_TYPE_SHIFT;
+ }
if (isvlan)
*ahid_addr |= (OCRDMA_AH_VLAN_VALID_MASK <<
OCRDMA_AH_VLAN_VALID_SHIFT);
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_ah.h b/drivers/infiniband/hw/ocrdma/ocrdma_ah.h
index 04a30ae67473..3856dd4c7e3d 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_ah.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_ah.h
@@ -46,9 +46,10 @@
enum {
OCRDMA_AH_ID_MASK = 0x3FF,
OCRDMA_AH_VLAN_VALID_MASK = 0x01,
- OCRDMA_AH_VLAN_VALID_SHIFT = 0x1F
+ OCRDMA_AH_VLAN_VALID_SHIFT = 0x1F,
+ OCRDMA_AH_L3_TYPE_MASK = 0x03,
+ OCRDMA_AH_L3_TYPE_SHIFT = 0x1D /* 29 bits */
};
-
struct ib_ah *ocrdma_create_ah(struct ib_pd *, struct ib_ah_attr *);
int ocrdma_destroy_ah(struct ib_ah *);
int ocrdma_query_ah(struct ib_ah *, struct ib_ah_attr *);
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
index 283ca842ff74..16740dcb876b 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
@@ -1113,7 +1113,7 @@ mbx_err:
static int ocrdma_nonemb_mbx_cmd(struct ocrdma_dev *dev, struct ocrdma_mqe *mqe,
void *payload_va)
{
- int status = 0;
+ int status;
struct ocrdma_mbx_rsp *rsp = payload_va;
if ((mqe->hdr.spcl_sge_cnt_emb & OCRDMA_MQE_HDR_EMB_MASK) >>
@@ -1144,6 +1144,9 @@ static void ocrdma_get_attr(struct ocrdma_dev *dev,
attr->max_pd =
(rsp->max_pd_ca_ack_delay & OCRDMA_MBX_QUERY_CFG_MAX_PD_MASK) >>
OCRDMA_MBX_QUERY_CFG_MAX_PD_SHIFT;
+ attr->udp_encap = (rsp->max_pd_ca_ack_delay &
+ OCRDMA_MBX_QUERY_CFG_L3_TYPE_MASK) >>
+ OCRDMA_MBX_QUERY_CFG_L3_TYPE_SHIFT;
attr->max_dpp_pds =
(rsp->max_dpp_pds_credits & OCRDMA_MBX_QUERY_CFG_MAX_DPP_PDS_MASK) >>
OCRDMA_MBX_QUERY_CFG_MAX_DPP_PDS_OFFSET;
@@ -2138,7 +2141,6 @@ int ocrdma_qp_state_change(struct ocrdma_qp *qp, enum ib_qp_state new_ib_state,
enum ib_qp_state *old_ib_state)
{
unsigned long flags;
- int status = 0;
enum ocrdma_qp_state new_state;
new_state = get_ocrdma_qp_state(new_ib_state);
@@ -2163,7 +2165,7 @@ int ocrdma_qp_state_change(struct ocrdma_qp *qp, enum ib_qp_state new_ib_state,
qp->state = new_state;
spin_unlock_irqrestore(&qp->q_lock, flags);
- return status;
+ return 0;
}
static u32 ocrdma_set_create_qp_mbx_access_flags(struct ocrdma_qp *qp)
@@ -2501,7 +2503,12 @@ static int ocrdma_set_av_params(struct ocrdma_qp *qp,
union ib_gid sgid, zgid;
struct ib_gid_attr sgid_attr;
u32 vlan_id = 0xFFFF;
- u8 mac_addr[6];
+ u8 mac_addr[6], hdr_type;
+ union {
+ struct sockaddr _sockaddr;
+ struct sockaddr_in _sockaddr_in;
+ struct sockaddr_in6 _sockaddr_in6;
+ } sgid_addr, dgid_addr;
struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device);
if ((ah_attr->ah_flags & IB_AH_GRH) == 0)
@@ -2516,6 +2523,8 @@ static int ocrdma_set_av_params(struct ocrdma_qp *qp,
cmd->params.hop_lmt_rq_psn |=
(ah_attr->grh.hop_limit << OCRDMA_QP_PARAMS_HOP_LMT_SHIFT);
cmd->flags |= OCRDMA_QP_PARA_FLOW_LBL_VALID;
+
+ /* GIDs */
memcpy(&cmd->params.dgid[0], &ah_attr->grh.dgid.raw[0],
sizeof(cmd->params.dgid));
@@ -2538,6 +2547,16 @@ static int ocrdma_set_av_params(struct ocrdma_qp *qp,
return status;
cmd->params.dmac_b0_to_b3 = mac_addr[0] | (mac_addr[1] << 8) |
(mac_addr[2] << 16) | (mac_addr[3] << 24);
+
+ hdr_type = ib_gid_to_network_type(sgid_attr.gid_type, &sgid);
+ if (hdr_type == RDMA_NETWORK_IPV4) {
+ rdma_gid2ip(&sgid_addr._sockaddr, &sgid);
+ rdma_gid2ip(&dgid_addr._sockaddr, &ah_attr->grh.dgid);
+ memcpy(&cmd->params.dgid[0],
+ &dgid_addr._sockaddr_in.sin_addr.s_addr, 4);
+ memcpy(&cmd->params.sgid[0],
+ &sgid_addr._sockaddr_in.sin_addr.s_addr, 4);
+ }
/* convert them to LE format. */
ocrdma_cpu_to_le32(&cmd->params.dgid[0], sizeof(cmd->params.dgid));
ocrdma_cpu_to_le32(&cmd->params.sgid[0], sizeof(cmd->params.sgid));
@@ -2558,7 +2577,9 @@ static int ocrdma_set_av_params(struct ocrdma_qp *qp,
cmd->params.rnt_rc_sl_fl |=
(dev->sl & 0x07) << OCRDMA_QP_PARAMS_SL_SHIFT;
}
-
+ cmd->params.max_sge_recv_flags |= ((hdr_type <<
+ OCRDMA_QP_PARAMS_FLAGS_L3_TYPE_SHIFT) &
+ OCRDMA_QP_PARAMS_FLAGS_L3_TYPE_MASK);
return 0;
}
@@ -2871,7 +2892,7 @@ int ocrdma_mbx_destroy_srq(struct ocrdma_dev *dev, struct ocrdma_srq *srq)
static int ocrdma_mbx_get_dcbx_config(struct ocrdma_dev *dev, u32 ptype,
struct ocrdma_dcbx_cfg *dcbxcfg)
{
- int status = 0;
+ int status;
dma_addr_t pa;
struct ocrdma_mqe cmd;
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_main.c b/drivers/infiniband/hw/ocrdma/ocrdma_main.c
index f38743018cb4..3d75f65ce87e 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_main.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_main.c
@@ -89,8 +89,10 @@ static int ocrdma_port_immutable(struct ib_device *ibdev, u8 port_num,
struct ib_port_immutable *immutable)
{
struct ib_port_attr attr;
+ struct ocrdma_dev *dev;
int err;
+ dev = get_ocrdma_dev(ibdev);
err = ocrdma_query_port(ibdev, port_num, &attr);
if (err)
return err;
@@ -98,6 +100,8 @@ static int ocrdma_port_immutable(struct ib_device *ibdev, u8 port_num,
immutable->pkey_tbl_len = attr.pkey_tbl_len;
immutable->gid_tbl_len = attr.gid_tbl_len;
immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE;
+ if (ocrdma_is_udp_encap_supported(dev))
+ immutable->core_cap_flags |= RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP;
immutable->max_mad_size = IB_MGMT_MAD_SIZE;
return 0;
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_sli.h b/drivers/infiniband/hw/ocrdma/ocrdma_sli.h
index 99dd6fdf06d7..0efc9662c6d8 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_sli.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_sli.h
@@ -140,7 +140,11 @@ enum {
OCRDMA_DB_RQ_SHIFT = 24
};
-#define OCRDMA_ROUDP_FLAGS_SHIFT 0x03
+enum {
+ OCRDMA_L3_TYPE_IB_GRH = 0x00,
+ OCRDMA_L3_TYPE_IPV4 = 0x01,
+ OCRDMA_L3_TYPE_IPV6 = 0x02
+};
#define OCRDMA_DB_CQ_RING_ID_MASK 0x3FF /* bits 0 - 9 */
#define OCRDMA_DB_CQ_RING_ID_EXT_MASK 0x0C00 /* bits 10-11 of qid at 12-11 */
@@ -546,7 +550,8 @@ enum {
OCRDMA_MBX_QUERY_CFG_CA_ACK_DELAY_SHIFT = 8,
OCRDMA_MBX_QUERY_CFG_CA_ACK_DELAY_MASK = 0xFF <<
OCRDMA_MBX_QUERY_CFG_CA_ACK_DELAY_SHIFT,
-
+ OCRDMA_MBX_QUERY_CFG_L3_TYPE_SHIFT = 3,
+ OCRDMA_MBX_QUERY_CFG_L3_TYPE_MASK = 0x18,
OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_SHIFT = 0,
OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_MASK = 0xFFFF,
OCRDMA_MBX_QUERY_CFG_MAX_WRITE_SGE_SHIFT = 16,
@@ -1107,6 +1112,8 @@ enum {
OCRDMA_QP_PARAMS_STATE_MASK = BIT(5) | BIT(6) | BIT(7),
OCRDMA_QP_PARAMS_FLAGS_SQD_ASYNC = BIT(8),
OCRDMA_QP_PARAMS_FLAGS_INB_ATEN = BIT(9),
+ OCRDMA_QP_PARAMS_FLAGS_L3_TYPE_SHIFT = 11,
+ OCRDMA_QP_PARAMS_FLAGS_L3_TYPE_MASK = BIT(11) | BIT(12) | BIT(13),
OCRDMA_QP_PARAMS_MAX_SGE_RECV_SHIFT = 16,
OCRDMA_QP_PARAMS_MAX_SGE_RECV_MASK = 0xFFFF <<
OCRDMA_QP_PARAMS_MAX_SGE_RECV_SHIFT,
@@ -1735,8 +1742,11 @@ enum {
/* w1 */
OCRDMA_CQE_UD_XFER_LEN_SHIFT = 16,
+ OCRDMA_CQE_UD_XFER_LEN_MASK = 0x1FFF,
OCRDMA_CQE_PKEY_SHIFT = 0,
OCRDMA_CQE_PKEY_MASK = 0xFFFF,
+ OCRDMA_CQE_UD_L3TYPE_SHIFT = 29,
+ OCRDMA_CQE_UD_L3TYPE_MASK = 0x07,
/* w2 */
OCRDMA_CQE_QPN_SHIFT = 0,
@@ -1861,7 +1871,7 @@ struct ocrdma_ewqe_ud_hdr {
u32 rsvd_dest_qpn;
u32 qkey;
u32 rsvd_ahid;
- u32 rsvd;
+ u32 hdr_type;
};
/* extended wqe followed by hdr_wqe for Fast Memory register */
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_stats.c b/drivers/infiniband/hw/ocrdma/ocrdma_stats.c
index 255f774080a4..8bef09a8c49f 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_stats.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_stats.c
@@ -610,7 +610,7 @@ static char *ocrdma_driver_dbg_stats(struct ocrdma_dev *dev)
static void ocrdma_update_stats(struct ocrdma_dev *dev)
{
ulong now = jiffies, secs;
- int status = 0;
+ int status;
struct ocrdma_rdma_stats_resp *rdma_stats =
(struct ocrdma_rdma_stats_resp *)dev->stats_mem.va;
struct ocrdma_rsrc_stats *rsrc_stats = &rdma_stats->act_rsrc_stats;
@@ -641,7 +641,7 @@ static ssize_t ocrdma_dbgfs_ops_write(struct file *filp,
{
char tmp_str[32];
long reset;
- int status = 0;
+ int status;
struct ocrdma_stats *pstats = filp->private_data;
struct ocrdma_dev *dev = pstats->dev;
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
index 12420e4ecf3d..a8496a18e20d 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
@@ -419,7 +419,7 @@ static struct ocrdma_pd *_ocrdma_alloc_pd(struct ocrdma_dev *dev,
struct ib_udata *udata)
{
struct ocrdma_pd *pd = NULL;
- int status = 0;
+ int status;
pd = kzalloc(sizeof(*pd), GFP_KERNEL);
if (!pd)
@@ -468,7 +468,7 @@ static inline int is_ucontext_pd(struct ocrdma_ucontext *uctx,
static int _ocrdma_dealloc_pd(struct ocrdma_dev *dev,
struct ocrdma_pd *pd)
{
- int status = 0;
+ int status;
if (dev->pd_mgr->pd_prealloc_valid)
status = ocrdma_put_pd_num(dev, pd->id, pd->dpp_enabled);
@@ -596,7 +596,7 @@ map_err:
int ocrdma_dealloc_ucontext(struct ib_ucontext *ibctx)
{
- int status = 0;
+ int status;
struct ocrdma_mm *mm, *tmp;
struct ocrdma_ucontext *uctx = get_ocrdma_ucontext(ibctx);
struct ocrdma_dev *dev = get_ocrdma_dev(ibctx->device);
@@ -623,7 +623,7 @@ int ocrdma_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
unsigned long vm_page = vma->vm_pgoff << PAGE_SHIFT;
u64 unmapped_db = (u64) dev->nic_info.unmapped_db;
unsigned long len = (vma->vm_end - vma->vm_start);
- int status = 0;
+ int status;
bool found;
if (vma->vm_start & (PAGE_SIZE - 1))
@@ -1285,7 +1285,7 @@ static int ocrdma_copy_qp_uresp(struct ocrdma_qp *qp,
struct ib_udata *udata, int dpp_offset,
int dpp_credit_lmt, int srq)
{
- int status = 0;
+ int status;
u64 usr_db;
struct ocrdma_create_qp_uresp uresp;
struct ocrdma_pd *pd = qp->pd;
@@ -1494,9 +1494,7 @@ int _ocrdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
*/
if (status < 0)
return status;
- status = ocrdma_mbx_modify_qp(dev, qp, attr, attr_mask);
-
- return status;
+ return ocrdma_mbx_modify_qp(dev, qp, attr, attr_mask);
}
int ocrdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
@@ -1949,7 +1947,7 @@ int ocrdma_modify_srq(struct ib_srq *ibsrq,
enum ib_srq_attr_mask srq_attr_mask,
struct ib_udata *udata)
{
- int status = 0;
+ int status;
struct ocrdma_srq *srq;
srq = get_ocrdma_srq(ibsrq);
@@ -2005,6 +2003,7 @@ static void ocrdma_build_ud_hdr(struct ocrdma_qp *qp,
else
ud_hdr->qkey = ud_wr(wr)->remote_qkey;
ud_hdr->rsvd_ahid = ah->id;
+ ud_hdr->hdr_type = ah->hdr_type;
if (ah->av->valid & OCRDMA_AV_VLAN_VALID)
hdr->cw |= (OCRDMA_FLAG_AH_VLAN_PR << OCRDMA_WQE_FLAGS_SHIFT);
}
@@ -2717,9 +2716,11 @@ static bool ocrdma_poll_scqe(struct ocrdma_qp *qp, struct ocrdma_cqe *cqe,
return expand;
}
-static int ocrdma_update_ud_rcqe(struct ib_wc *ibwc, struct ocrdma_cqe *cqe)
+static int ocrdma_update_ud_rcqe(struct ocrdma_dev *dev, struct ib_wc *ibwc,
+ struct ocrdma_cqe *cqe)
{
int status;
+ u16 hdr_type = 0;
status = (le32_to_cpu(cqe->flags_status_srcqpn) &
OCRDMA_CQE_UD_STATUS_MASK) >> OCRDMA_CQE_UD_STATUS_SHIFT;
@@ -2728,7 +2729,17 @@ static int ocrdma_update_ud_rcqe(struct ib_wc *ibwc, struct ocrdma_cqe *cqe)
ibwc->pkey_index = 0;
ibwc->wc_flags = IB_WC_GRH;
ibwc->byte_len = (le32_to_cpu(cqe->ud.rxlen_pkey) >>
- OCRDMA_CQE_UD_XFER_LEN_SHIFT);
+ OCRDMA_CQE_UD_XFER_LEN_SHIFT) &
+ OCRDMA_CQE_UD_XFER_LEN_MASK;
+
+ if (ocrdma_is_udp_encap_supported(dev)) {
+ hdr_type = (le32_to_cpu(cqe->ud.rxlen_pkey) >>
+ OCRDMA_CQE_UD_L3TYPE_SHIFT) &
+ OCRDMA_CQE_UD_L3TYPE_MASK;
+ ibwc->wc_flags |= IB_WC_WITH_NETWORK_HDR_TYPE;
+ ibwc->network_hdr_type = hdr_type;
+ }
+
return status;
}
@@ -2791,12 +2802,15 @@ static bool ocrdma_poll_err_rcqe(struct ocrdma_qp *qp, struct ocrdma_cqe *cqe,
static void ocrdma_poll_success_rcqe(struct ocrdma_qp *qp,
struct ocrdma_cqe *cqe, struct ib_wc *ibwc)
{
+ struct ocrdma_dev *dev;
+
+ dev = get_ocrdma_dev(qp->ibqp.device);
ibwc->opcode = IB_WC_RECV;
ibwc->qp = &qp->ibqp;
ibwc->status = IB_WC_SUCCESS;
if (qp->qp_type == IB_QPT_UD || qp->qp_type == IB_QPT_GSI)
- ocrdma_update_ud_rcqe(ibwc, cqe);
+ ocrdma_update_ud_rcqe(dev, ibwc, cqe);
else
ibwc->byte_len = le32_to_cpu(cqe->rq.rxlen);
diff --git a/drivers/infiniband/hw/qib/Kconfig b/drivers/infiniband/hw/qib/Kconfig
index 495be09781b1..e0fdb9201423 100644
--- a/drivers/infiniband/hw/qib/Kconfig
+++ b/drivers/infiniband/hw/qib/Kconfig
@@ -1,6 +1,6 @@
config INFINIBAND_QIB
tristate "Intel PCIe HCA support"
- depends on 64BIT
+ depends on 64BIT && INFINIBAND_RDMAVT
---help---
This is a low-level driver for Intel PCIe QLE InfiniBand host
channel adapters. This driver does not support the Intel
diff --git a/drivers/infiniband/hw/qib/Makefile b/drivers/infiniband/hw/qib/Makefile
index 57f8103e51f8..79ebd79e8405 100644
--- a/drivers/infiniband/hw/qib/Makefile
+++ b/drivers/infiniband/hw/qib/Makefile
@@ -1,11 +1,11 @@
obj-$(CONFIG_INFINIBAND_QIB) += ib_qib.o
-ib_qib-y := qib_cq.o qib_diag.o qib_dma.o qib_driver.o qib_eeprom.o \
- qib_file_ops.o qib_fs.o qib_init.o qib_intr.o qib_keys.o \
- qib_mad.o qib_mmap.o qib_mr.o qib_pcie.o qib_pio_copy.o \
- qib_qp.o qib_qsfp.o qib_rc.o qib_ruc.o qib_sdma.o qib_srq.o \
+ib_qib-y := qib_diag.o qib_driver.o qib_eeprom.o \
+ qib_file_ops.o qib_fs.o qib_init.o qib_intr.o \
+ qib_mad.o qib_pcie.o qib_pio_copy.o \
+ qib_qp.o qib_qsfp.o qib_rc.o qib_ruc.o qib_sdma.o \
qib_sysfs.o qib_twsi.o qib_tx.o qib_uc.o qib_ud.o \
- qib_user_pages.o qib_user_sdma.o qib_verbs_mcast.o qib_iba7220.o \
+ qib_user_pages.o qib_user_sdma.o qib_iba7220.o \
qib_sd7220.o qib_iba7322.o qib_verbs.o
# 6120 has no fallback if no MSI interrupts, others can do INTx
diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h
index 7df16f74bb45..bbf0a163aeab 100644
--- a/drivers/infiniband/hw/qib/qib.h
+++ b/drivers/infiniband/hw/qib/qib.h
@@ -52,6 +52,7 @@
#include <linux/kref.h>
#include <linux/sched.h>
#include <linux/kthread.h>
+#include <rdma/rdma_vt.h>
#include "qib_common.h"
#include "qib_verbs.h"
@@ -229,9 +230,6 @@ struct qib_ctxtdata {
u8 redirect_seq_cnt;
/* ctxt rcvhdrq head offset */
u32 head;
- /* lookaside fields */
- struct qib_qp *lookaside_qp;
- u32 lookaside_qpn;
/* QPs waiting for context processing */
struct list_head qp_wait_list;
#ifdef CONFIG_DEBUG_FS
@@ -240,7 +238,7 @@ struct qib_ctxtdata {
#endif
};
-struct qib_sge_state;
+struct rvt_sge_state;
struct qib_sdma_txreq {
int flags;
@@ -258,14 +256,14 @@ struct qib_sdma_desc {
struct qib_verbs_txreq {
struct qib_sdma_txreq txreq;
- struct qib_qp *qp;
- struct qib_swqe *wqe;
+ struct rvt_qp *qp;
+ struct rvt_swqe *wqe;
u32 dwords;
u16 hdr_dwords;
u16 hdr_inx;
struct qib_pio_header *align_buf;
- struct qib_mregion *mr;
- struct qib_sge_state *ss;
+ struct rvt_mregion *mr;
+ struct rvt_sge_state *ss;
};
#define QIB_SDMA_TXREQ_F_USELARGEBUF 0x1
@@ -1096,8 +1094,6 @@ struct qib_devdata {
u16 psxmitwait_check_rate;
/* high volume overflow errors defered to tasklet */
struct tasklet_struct error_tasklet;
- /* per device cq worker */
- struct kthread_worker *worker;
int assigned_node_id; /* NUMA node closest to HCA */
};
@@ -1135,8 +1131,9 @@ extern spinlock_t qib_devs_lock;
extern struct qib_devdata *qib_lookup(int unit);
extern u32 qib_cpulist_count;
extern unsigned long *qib_cpulist;
-
+extern u16 qpt_mask;
extern unsigned qib_cc_table_size;
+
int qib_init(struct qib_devdata *, int);
int init_chip_wc_pat(struct qib_devdata *dd, u32);
int qib_enable_wc(struct qib_devdata *dd);
@@ -1323,7 +1320,7 @@ void __qib_sdma_intr(struct qib_pportdata *);
void qib_sdma_intr(struct qib_pportdata *);
void qib_user_sdma_send_desc(struct qib_pportdata *dd,
struct list_head *pktlist);
-int qib_sdma_verbs_send(struct qib_pportdata *, struct qib_sge_state *,
+int qib_sdma_verbs_send(struct qib_pportdata *, struct rvt_sge_state *,
u32, struct qib_verbs_txreq *);
/* ppd->sdma_lock should be locked before calling this. */
int qib_sdma_make_progress(struct qib_pportdata *dd);
@@ -1454,6 +1451,8 @@ u64 qib_sps_ints(void);
dma_addr_t qib_map_page(struct pci_dev *, struct page *, unsigned long,
size_t, int);
const char *qib_get_unit_name(int unit);
+const char *qib_get_card_name(struct rvt_dev_info *rdi);
+struct pci_dev *qib_get_pci_dev(struct rvt_dev_info *rdi);
/*
* Flush write combining store buffers (if present) and perform a write
@@ -1540,4 +1539,14 @@ struct qib_hwerror_msgs {
void qib_format_hwerrors(u64 hwerrs,
const struct qib_hwerror_msgs *hwerrmsgs,
size_t nhwerrmsgs, char *msg, size_t lmsg);
+
+void qib_stop_send_queue(struct rvt_qp *qp);
+void qib_quiesce_qp(struct rvt_qp *qp);
+void qib_flush_qp_waiters(struct rvt_qp *qp);
+int qib_mtu_to_path_mtu(u32 mtu);
+u32 qib_mtu_from_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp, u32 pmtu);
+void qib_notify_error_qp(struct rvt_qp *qp);
+int qib_get_pmtu_from_attr(struct rvt_dev_info *rdi, struct rvt_qp *qp,
+ struct ib_qp_attr *attr);
+
#endif /* _QIB_KERNEL_H */
diff --git a/drivers/infiniband/hw/qib/qib_common.h b/drivers/infiniband/hw/qib/qib_common.h
index 4fb78abd8ba1..1d6e63eb1146 100644
--- a/drivers/infiniband/hw/qib/qib_common.h
+++ b/drivers/infiniband/hw/qib/qib_common.h
@@ -742,14 +742,11 @@ struct qib_tid_session_member {
#define SIZE_OF_CRC 1
#define QIB_DEFAULT_P_KEY 0xFFFF
-#define QIB_PERMISSIVE_LID 0xFFFF
#define QIB_AETH_CREDIT_SHIFT 24
#define QIB_AETH_CREDIT_MASK 0x1F
#define QIB_AETH_CREDIT_INVAL 0x1F
#define QIB_PSN_MASK 0xFFFFFF
#define QIB_MSN_MASK 0xFFFFFF
-#define QIB_QPN_MASK 0xFFFFFF
-#define QIB_MULTICAST_LID_BASE 0xC000
#define QIB_EAGER_TID_ID QLOGIC_IB_I_TID_MASK
#define QIB_MULTICAST_QPN 0xFFFFFF
diff --git a/drivers/infiniband/hw/qib/qib_cq.c b/drivers/infiniband/hw/qib/qib_cq.c
deleted file mode 100644
index 2b45d0b02300..000000000000
--- a/drivers/infiniband/hw/qib/qib_cq.c
+++ /dev/null
@@ -1,545 +0,0 @@
-/*
- * Copyright (c) 2013 Intel Corporation. All rights reserved.
- * Copyright (c) 2006, 2007, 2008, 2010 QLogic Corporation. All rights reserved.
- * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/err.h>
-#include <linux/slab.h>
-#include <linux/vmalloc.h>
-#include <linux/kthread.h>
-
-#include "qib_verbs.h"
-#include "qib.h"
-
-/**
- * qib_cq_enter - add a new entry to the completion queue
- * @cq: completion queue
- * @entry: work completion entry to add
- * @sig: true if @entry is a solicitated entry
- *
- * This may be called with qp->s_lock held.
- */
-void qib_cq_enter(struct qib_cq *cq, struct ib_wc *entry, int solicited)
-{
- struct qib_cq_wc *wc;
- unsigned long flags;
- u32 head;
- u32 next;
-
- spin_lock_irqsave(&cq->lock, flags);
-
- /*
- * Note that the head pointer might be writable by user processes.
- * Take care to verify it is a sane value.
- */
- wc = cq->queue;
- head = wc->head;
- if (head >= (unsigned) cq->ibcq.cqe) {
- head = cq->ibcq.cqe;
- next = 0;
- } else
- next = head + 1;
- if (unlikely(next == wc->tail)) {
- spin_unlock_irqrestore(&cq->lock, flags);
- if (cq->ibcq.event_handler) {
- struct ib_event ev;
-
- ev.device = cq->ibcq.device;
- ev.element.cq = &cq->ibcq;
- ev.event = IB_EVENT_CQ_ERR;
- cq->ibcq.event_handler(&ev, cq->ibcq.cq_context);
- }
- return;
- }
- if (cq->ip) {
- wc->uqueue[head].wr_id = entry->wr_id;
- wc->uqueue[head].status = entry->status;
- wc->uqueue[head].opcode = entry->opcode;
- wc->uqueue[head].vendor_err = entry->vendor_err;
- wc->uqueue[head].byte_len = entry->byte_len;
- wc->uqueue[head].ex.imm_data =
- (__u32 __force)entry->ex.imm_data;
- wc->uqueue[head].qp_num = entry->qp->qp_num;
- wc->uqueue[head].src_qp = entry->src_qp;
- wc->uqueue[head].wc_flags = entry->wc_flags;
- wc->uqueue[head].pkey_index = entry->pkey_index;
- wc->uqueue[head].slid = entry->slid;
- wc->uqueue[head].sl = entry->sl;
- wc->uqueue[head].dlid_path_bits = entry->dlid_path_bits;
- wc->uqueue[head].port_num = entry->port_num;
- /* Make sure entry is written before the head index. */
- smp_wmb();
- } else
- wc->kqueue[head] = *entry;
- wc->head = next;
-
- if (cq->notify == IB_CQ_NEXT_COMP ||
- (cq->notify == IB_CQ_SOLICITED &&
- (solicited || entry->status != IB_WC_SUCCESS))) {
- struct kthread_worker *worker;
- /*
- * This will cause send_complete() to be called in
- * another thread.
- */
- smp_rmb();
- worker = cq->dd->worker;
- if (likely(worker)) {
- cq->notify = IB_CQ_NONE;
- cq->triggered++;
- queue_kthread_work(worker, &cq->comptask);
- }
- }
-
- spin_unlock_irqrestore(&cq->lock, flags);
-}
-
-/**
- * qib_poll_cq - poll for work completion entries
- * @ibcq: the completion queue to poll
- * @num_entries: the maximum number of entries to return
- * @entry: pointer to array where work completions are placed
- *
- * Returns the number of completion entries polled.
- *
- * This may be called from interrupt context. Also called by ib_poll_cq()
- * in the generic verbs code.
- */
-int qib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry)
-{
- struct qib_cq *cq = to_icq(ibcq);
- struct qib_cq_wc *wc;
- unsigned long flags;
- int npolled;
- u32 tail;
-
- /* The kernel can only poll a kernel completion queue */
- if (cq->ip) {
- npolled = -EINVAL;
- goto bail;
- }
-
- spin_lock_irqsave(&cq->lock, flags);
-
- wc = cq->queue;
- tail = wc->tail;
- if (tail > (u32) cq->ibcq.cqe)
- tail = (u32) cq->ibcq.cqe;
- for (npolled = 0; npolled < num_entries; ++npolled, ++entry) {
- if (tail == wc->head)
- break;
- /* The kernel doesn't need a RMB since it has the lock. */
- *entry = wc->kqueue[tail];
- if (tail >= cq->ibcq.cqe)
- tail = 0;
- else
- tail++;
- }
- wc->tail = tail;
-
- spin_unlock_irqrestore(&cq->lock, flags);
-
-bail:
- return npolled;
-}
-
-static void send_complete(struct kthread_work *work)
-{
- struct qib_cq *cq = container_of(work, struct qib_cq, comptask);
-
- /*
- * The completion handler will most likely rearm the notification
- * and poll for all pending entries. If a new completion entry
- * is added while we are in this routine, queue_work()
- * won't call us again until we return so we check triggered to
- * see if we need to call the handler again.
- */
- for (;;) {
- u8 triggered = cq->triggered;
-
- /*
- * IPoIB connected mode assumes the callback is from a
- * soft IRQ. We simulate this by blocking "bottom halves".
- * See the implementation for ipoib_cm_handle_tx_wc(),
- * netif_tx_lock_bh() and netif_tx_lock().
- */
- local_bh_disable();
- cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
- local_bh_enable();
-
- if (cq->triggered == triggered)
- return;
- }
-}
-
-/**
- * qib_create_cq - create a completion queue
- * @ibdev: the device this completion queue is attached to
- * @attr: creation attributes
- * @context: unused by the QLogic_IB driver
- * @udata: user data for libibverbs.so
- *
- * Returns a pointer to the completion queue or negative errno values
- * for failure.
- *
- * Called by ib_create_cq() in the generic verbs code.
- */
-struct ib_cq *qib_create_cq(struct ib_device *ibdev,
- const struct ib_cq_init_attr *attr,
- struct ib_ucontext *context,
- struct ib_udata *udata)
-{
- int entries = attr->cqe;
- struct qib_ibdev *dev = to_idev(ibdev);
- struct qib_cq *cq;
- struct qib_cq_wc *wc;
- struct ib_cq *ret;
- u32 sz;
-
- if (attr->flags)
- return ERR_PTR(-EINVAL);
-
- if (entries < 1 || entries > ib_qib_max_cqes) {
- ret = ERR_PTR(-EINVAL);
- goto done;
- }
-
- /* Allocate the completion queue structure. */
- cq = kmalloc(sizeof(*cq), GFP_KERNEL);
- if (!cq) {
- ret = ERR_PTR(-ENOMEM);
- goto done;
- }
-
- /*
- * Allocate the completion queue entries and head/tail pointers.
- * This is allocated separately so that it can be resized and
- * also mapped into user space.
- * We need to use vmalloc() in order to support mmap and large
- * numbers of entries.
- */
- sz = sizeof(*wc);
- if (udata && udata->outlen >= sizeof(__u64))
- sz += sizeof(struct ib_uverbs_wc) * (entries + 1);
- else
- sz += sizeof(struct ib_wc) * (entries + 1);
- wc = vmalloc_user(sz);
- if (!wc) {
- ret = ERR_PTR(-ENOMEM);
- goto bail_cq;
- }
-
- /*
- * Return the address of the WC as the offset to mmap.
- * See qib_mmap() for details.
- */
- if (udata && udata->outlen >= sizeof(__u64)) {
- int err;
-
- cq->ip = qib_create_mmap_info(dev, sz, context, wc);
- if (!cq->ip) {
- ret = ERR_PTR(-ENOMEM);
- goto bail_wc;
- }
-
- err = ib_copy_to_udata(udata, &cq->ip->offset,
- sizeof(cq->ip->offset));
- if (err) {
- ret = ERR_PTR(err);
- goto bail_ip;
- }
- } else
- cq->ip = NULL;
-
- spin_lock(&dev->n_cqs_lock);
- if (dev->n_cqs_allocated == ib_qib_max_cqs) {
- spin_unlock(&dev->n_cqs_lock);
- ret = ERR_PTR(-ENOMEM);
- goto bail_ip;
- }
-
- dev->n_cqs_allocated++;
- spin_unlock(&dev->n_cqs_lock);
-
- if (cq->ip) {
- spin_lock_irq(&dev->pending_lock);
- list_add(&cq->ip->pending_mmaps, &dev->pending_mmaps);
- spin_unlock_irq(&dev->pending_lock);
- }
-
- /*
- * ib_create_cq() will initialize cq->ibcq except for cq->ibcq.cqe.
- * The number of entries should be >= the number requested or return
- * an error.
- */
- cq->dd = dd_from_dev(dev);
- cq->ibcq.cqe = entries;
- cq->notify = IB_CQ_NONE;
- cq->triggered = 0;
- spin_lock_init(&cq->lock);
- init_kthread_work(&cq->comptask, send_complete);
- wc->head = 0;
- wc->tail = 0;
- cq->queue = wc;
-
- ret = &cq->ibcq;
-
- goto done;
-
-bail_ip:
- kfree(cq->ip);
-bail_wc:
- vfree(wc);
-bail_cq:
- kfree(cq);
-done:
- return ret;
-}
-
-/**
- * qib_destroy_cq - destroy a completion queue
- * @ibcq: the completion queue to destroy.
- *
- * Returns 0 for success.
- *
- * Called by ib_destroy_cq() in the generic verbs code.
- */
-int qib_destroy_cq(struct ib_cq *ibcq)
-{
- struct qib_ibdev *dev = to_idev(ibcq->device);
- struct qib_cq *cq = to_icq(ibcq);
-
- flush_kthread_work(&cq->comptask);
- spin_lock(&dev->n_cqs_lock);
- dev->n_cqs_allocated--;
- spin_unlock(&dev->n_cqs_lock);
- if (cq->ip)
- kref_put(&cq->ip->ref, qib_release_mmap_info);
- else
- vfree(cq->queue);
- kfree(cq);
-
- return 0;
-}
-
-/**
- * qib_req_notify_cq - change the notification type for a completion queue
- * @ibcq: the completion queue
- * @notify_flags: the type of notification to request
- *
- * Returns 0 for success.
- *
- * This may be called from interrupt context. Also called by
- * ib_req_notify_cq() in the generic verbs code.
- */
-int qib_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags)
-{
- struct qib_cq *cq = to_icq(ibcq);
- unsigned long flags;
- int ret = 0;
-
- spin_lock_irqsave(&cq->lock, flags);
- /*
- * Don't change IB_CQ_NEXT_COMP to IB_CQ_SOLICITED but allow
- * any other transitions (see C11-31 and C11-32 in ch. 11.4.2.2).
- */
- if (cq->notify != IB_CQ_NEXT_COMP)
- cq->notify = notify_flags & IB_CQ_SOLICITED_MASK;
-
- if ((notify_flags & IB_CQ_REPORT_MISSED_EVENTS) &&
- cq->queue->head != cq->queue->tail)
- ret = 1;
-
- spin_unlock_irqrestore(&cq->lock, flags);
-
- return ret;
-}
-
-/**
- * qib_resize_cq - change the size of the CQ
- * @ibcq: the completion queue
- *
- * Returns 0 for success.
- */
-int qib_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
-{
- struct qib_cq *cq = to_icq(ibcq);
- struct qib_cq_wc *old_wc;
- struct qib_cq_wc *wc;
- u32 head, tail, n;
- int ret;
- u32 sz;
-
- if (cqe < 1 || cqe > ib_qib_max_cqes) {
- ret = -EINVAL;
- goto bail;
- }
-
- /*
- * Need to use vmalloc() if we want to support large #s of entries.
- */
- sz = sizeof(*wc);
- if (udata && udata->outlen >= sizeof(__u64))
- sz += sizeof(struct ib_uverbs_wc) * (cqe + 1);
- else
- sz += sizeof(struct ib_wc) * (cqe + 1);
- wc = vmalloc_user(sz);
- if (!wc) {
- ret = -ENOMEM;
- goto bail;
- }
-
- /* Check that we can write the offset to mmap. */
- if (udata && udata->outlen >= sizeof(__u64)) {
- __u64 offset = 0;
-
- ret = ib_copy_to_udata(udata, &offset, sizeof(offset));
- if (ret)
- goto bail_free;
- }
-
- spin_lock_irq(&cq->lock);
- /*
- * Make sure head and tail are sane since they
- * might be user writable.
- */
- old_wc = cq->queue;
- head = old_wc->head;
- if (head > (u32) cq->ibcq.cqe)
- head = (u32) cq->ibcq.cqe;
- tail = old_wc->tail;
- if (tail > (u32) cq->ibcq.cqe)
- tail = (u32) cq->ibcq.cqe;
- if (head < tail)
- n = cq->ibcq.cqe + 1 + head - tail;
- else
- n = head - tail;
- if (unlikely((u32)cqe < n)) {
- ret = -EINVAL;
- goto bail_unlock;
- }
- for (n = 0; tail != head; n++) {
- if (cq->ip)
- wc->uqueue[n] = old_wc->uqueue[tail];
- else
- wc->kqueue[n] = old_wc->kqueue[tail];
- if (tail == (u32) cq->ibcq.cqe)
- tail = 0;
- else
- tail++;
- }
- cq->ibcq.cqe = cqe;
- wc->head = n;
- wc->tail = 0;
- cq->queue = wc;
- spin_unlock_irq(&cq->lock);
-
- vfree(old_wc);
-
- if (cq->ip) {
- struct qib_ibdev *dev = to_idev(ibcq->device);
- struct qib_mmap_info *ip = cq->ip;
-
- qib_update_mmap_info(dev, ip, sz, wc);
-
- /*
- * Return the offset to mmap.
- * See qib_mmap() for details.
- */
- if (udata && udata->outlen >= sizeof(__u64)) {
- ret = ib_copy_to_udata(udata, &ip->offset,
- sizeof(ip->offset));
- if (ret)
- goto bail;
- }
-
- spin_lock_irq(&dev->pending_lock);
- if (list_empty(&ip->pending_mmaps))
- list_add(&ip->pending_mmaps, &dev->pending_mmaps);
- spin_unlock_irq(&dev->pending_lock);
- }
-
- ret = 0;
- goto bail;
-
-bail_unlock:
- spin_unlock_irq(&cq->lock);
-bail_free:
- vfree(wc);
-bail:
- return ret;
-}
-
-int qib_cq_init(struct qib_devdata *dd)
-{
- int ret = 0;
- int cpu;
- struct task_struct *task;
-
- if (dd->worker)
- return 0;
- dd->worker = kzalloc(sizeof(*dd->worker), GFP_KERNEL);
- if (!dd->worker)
- return -ENOMEM;
- init_kthread_worker(dd->worker);
- task = kthread_create_on_node(
- kthread_worker_fn,
- dd->worker,
- dd->assigned_node_id,
- "qib_cq%d", dd->unit);
- if (IS_ERR(task))
- goto task_fail;
- cpu = cpumask_first(cpumask_of_node(dd->assigned_node_id));
- kthread_bind(task, cpu);
- wake_up_process(task);
-out:
- return ret;
-task_fail:
- ret = PTR_ERR(task);
- kfree(dd->worker);
- dd->worker = NULL;
- goto out;
-}
-
-void qib_cq_exit(struct qib_devdata *dd)
-{
- struct kthread_worker *worker;
-
- worker = dd->worker;
- if (!worker)
- return;
- /* blocks future queuing from send_complete() */
- dd->worker = NULL;
- smp_wmb();
- flush_kthread_worker(worker);
- kthread_stop(worker->task);
- kfree(worker);
-}
diff --git a/drivers/infiniband/hw/qib/qib_driver.c b/drivers/infiniband/hw/qib/qib_driver.c
index f58fdc3d25a2..67ee6438cf59 100644
--- a/drivers/infiniband/hw/qib/qib_driver.c
+++ b/drivers/infiniband/hw/qib/qib_driver.c
@@ -90,6 +90,22 @@ const char *qib_get_unit_name(int unit)
return iname;
}
+const char *qib_get_card_name(struct rvt_dev_info *rdi)
+{
+ struct qib_ibdev *ibdev = container_of(rdi, struct qib_ibdev, rdi);
+ struct qib_devdata *dd = container_of(ibdev,
+ struct qib_devdata, verbs_dev);
+ return qib_get_unit_name(dd->unit);
+}
+
+struct pci_dev *qib_get_pci_dev(struct rvt_dev_info *rdi)
+{
+ struct qib_ibdev *ibdev = container_of(rdi, struct qib_ibdev, rdi);
+ struct qib_devdata *dd = container_of(ibdev,
+ struct qib_devdata, verbs_dev);
+ return dd->pcidev;
+}
+
/*
* Return count of units with at least one port ACTIVE.
*/
@@ -306,7 +322,9 @@ static u32 qib_rcv_hdrerr(struct qib_ctxtdata *rcd, struct qib_pportdata *ppd,
struct qib_ib_header *hdr = (struct qib_ib_header *) rhdr;
struct qib_other_headers *ohdr = NULL;
struct qib_ibport *ibp = &ppd->ibport_data;
- struct qib_qp *qp = NULL;
+ struct qib_devdata *dd = ppd->dd;
+ struct rvt_dev_info *rdi = &dd->verbs_dev.rdi;
+ struct rvt_qp *qp = NULL;
u32 tlen = qib_hdrget_length_in_bytes(rhf_addr);
u16 lid = be16_to_cpu(hdr->lrh[1]);
int lnh = be16_to_cpu(hdr->lrh[0]) & 3;
@@ -319,7 +337,7 @@ static u32 qib_rcv_hdrerr(struct qib_ctxtdata *rcd, struct qib_pportdata *ppd,
if (tlen < 24)
goto drop;
- if (lid < QIB_MULTICAST_LID_BASE) {
+ if (lid < be16_to_cpu(IB_MULTICAST_LID_BASE)) {
lid &= ~((1 << ppd->lmc) - 1);
if (unlikely(lid != ppd->lid))
goto drop;
@@ -346,13 +364,16 @@ static u32 qib_rcv_hdrerr(struct qib_ctxtdata *rcd, struct qib_pportdata *ppd,
psn = be32_to_cpu(ohdr->bth[2]);
/* Get the destination QP number. */
- qp_num = be32_to_cpu(ohdr->bth[1]) & QIB_QPN_MASK;
+ qp_num = be32_to_cpu(ohdr->bth[1]) & RVT_QPN_MASK;
if (qp_num != QIB_MULTICAST_QPN) {
int ruc_res;
- qp = qib_lookup_qpn(ibp, qp_num);
- if (!qp)
+ rcu_read_lock();
+ qp = rvt_lookup_qpn(rdi, &ibp->rvp, qp_num);
+ if (!qp) {
+ rcu_read_unlock();
goto drop;
+ }
/*
* Handle only RC QPs - for other QP types drop error
@@ -361,9 +382,9 @@ static u32 qib_rcv_hdrerr(struct qib_ctxtdata *rcd, struct qib_pportdata *ppd,
spin_lock(&qp->r_lock);
/* Check for valid receive state. */
- if (!(ib_qib_state_ops[qp->state] &
- QIB_PROCESS_RECV_OK)) {
- ibp->n_pkt_drops++;
+ if (!(ib_rvt_state_ops[qp->state] &
+ RVT_PROCESS_RECV_OK)) {
+ ibp->rvp.n_pkt_drops++;
goto unlock;
}
@@ -383,7 +404,7 @@ static u32 qib_rcv_hdrerr(struct qib_ctxtdata *rcd, struct qib_pportdata *ppd,
IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST) {
diff = qib_cmp24(psn, qp->r_psn);
if (!qp->r_nak_state && diff >= 0) {
- ibp->n_rc_seqnak++;
+ ibp->rvp.n_rc_seqnak++;
qp->r_nak_state =
IB_NAK_PSN_ERROR;
/* Use the expected PSN. */
@@ -398,7 +419,7 @@ static u32 qib_rcv_hdrerr(struct qib_ctxtdata *rcd, struct qib_pportdata *ppd,
*/
if (list_empty(&qp->rspwait)) {
qp->r_flags |=
- QIB_R_RSP_NAK;
+ RVT_R_RSP_NAK;
atomic_inc(
&qp->refcount);
list_add_tail(
@@ -419,12 +440,7 @@ static u32 qib_rcv_hdrerr(struct qib_ctxtdata *rcd, struct qib_pportdata *ppd,
unlock:
spin_unlock(&qp->r_lock);
- /*
- * Notify qib_destroy_qp() if it is waiting
- * for us to finish.
- */
- if (atomic_dec_and_test(&qp->refcount))
- wake_up(&qp->wait);
+ rcu_read_unlock();
} /* Unicast QP */
} /* Valid packet with TIDErr */
@@ -456,7 +472,7 @@ u32 qib_kreceive(struct qib_ctxtdata *rcd, u32 *llic, u32 *npkts)
u32 eflags, etype, tlen, i = 0, updegr = 0, crcs = 0;
int last;
u64 lval;
- struct qib_qp *qp, *nqp;
+ struct rvt_qp *qp, *nqp;
l = rcd->head;
rhf_addr = (__le32 *) rcd->rcvhdrq + l + dd->rhf_offset;
@@ -549,15 +565,6 @@ move_along:
updegr = 0;
}
}
- /*
- * Notify qib_destroy_qp() if it is waiting
- * for lookaside_qp to finish.
- */
- if (rcd->lookaside_qp) {
- if (atomic_dec_and_test(&rcd->lookaside_qp->refcount))
- wake_up(&rcd->lookaside_qp->wait);
- rcd->lookaside_qp = NULL;
- }
rcd->head = l;
@@ -567,17 +574,17 @@ move_along:
*/
list_for_each_entry_safe(qp, nqp, &rcd->qp_wait_list, rspwait) {
list_del_init(&qp->rspwait);
- if (qp->r_flags & QIB_R_RSP_NAK) {
- qp->r_flags &= ~QIB_R_RSP_NAK;
+ if (qp->r_flags & RVT_R_RSP_NAK) {
+ qp->r_flags &= ~RVT_R_RSP_NAK;
qib_send_rc_ack(qp);
}
- if (qp->r_flags & QIB_R_RSP_SEND) {
+ if (qp->r_flags & RVT_R_RSP_SEND) {
unsigned long flags;
- qp->r_flags &= ~QIB_R_RSP_SEND;
+ qp->r_flags &= ~RVT_R_RSP_SEND;
spin_lock_irqsave(&qp->s_lock, flags);
- if (ib_qib_state_ops[qp->state] &
- QIB_PROCESS_OR_FLUSH_SEND)
+ if (ib_rvt_state_ops[qp->state] &
+ RVT_PROCESS_OR_FLUSH_SEND)
qib_schedule_send(qp);
spin_unlock_irqrestore(&qp->s_lock, flags);
}
diff --git a/drivers/infiniband/hw/qib/qib_file_ops.c b/drivers/infiniband/hw/qib/qib_file_ops.c
index e449e394963f..24f4a782e0f4 100644
--- a/drivers/infiniband/hw/qib/qib_file_ops.c
+++ b/drivers/infiniband/hw/qib/qib_file_ops.c
@@ -45,6 +45,8 @@
#include <linux/export.h>
#include <linux/uio.h>
+#include <rdma/ib.h>
+
#include "qib.h"
#include "qib_common.h"
#include "qib_user_sdma.h"
@@ -2067,6 +2069,9 @@ static ssize_t qib_write(struct file *fp, const char __user *data,
ssize_t ret = 0;
void *dest;
+ if (WARN_ON_ONCE(!ib_safe_file_access(fp)))
+ return -EACCES;
+
if (count < sizeof(cmd.type)) {
ret = -EINVAL;
goto bail;
diff --git a/drivers/infiniband/hw/qib/qib_iba6120.c b/drivers/infiniband/hw/qib/qib_iba6120.c
index 4b927809d1a1..a3733f25280f 100644
--- a/drivers/infiniband/hw/qib/qib_iba6120.c
+++ b/drivers/infiniband/hw/qib/qib_iba6120.c
@@ -2956,13 +2956,13 @@ static void pma_6120_timer(unsigned long data)
struct qib_ibport *ibp = &ppd->ibport_data;
unsigned long flags;
- spin_lock_irqsave(&ibp->lock, flags);
+ spin_lock_irqsave(&ibp->rvp.lock, flags);
if (cs->pma_sample_status == IB_PMA_SAMPLE_STATUS_STARTED) {
cs->pma_sample_status = IB_PMA_SAMPLE_STATUS_RUNNING;
qib_snapshot_counters(ppd, &cs->sword, &cs->rword,
&cs->spkts, &cs->rpkts, &cs->xmit_wait);
mod_timer(&cs->pma_timer,
- jiffies + usecs_to_jiffies(ibp->pma_sample_interval));
+ jiffies + usecs_to_jiffies(ibp->rvp.pma_sample_interval));
} else if (cs->pma_sample_status == IB_PMA_SAMPLE_STATUS_RUNNING) {
u64 ta, tb, tc, td, te;
@@ -2975,11 +2975,11 @@ static void pma_6120_timer(unsigned long data)
cs->rpkts = td - cs->rpkts;
cs->xmit_wait = te - cs->xmit_wait;
}
- spin_unlock_irqrestore(&ibp->lock, flags);
+ spin_unlock_irqrestore(&ibp->rvp.lock, flags);
}
/*
- * Note that the caller has the ibp->lock held.
+ * Note that the caller has the ibp->rvp.lock held.
*/
static void qib_set_cntr_6120_sample(struct qib_pportdata *ppd, u32 intv,
u32 start)
diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c
index 6c8ff10101c0..82d7c4bf5970 100644
--- a/drivers/infiniband/hw/qib/qib_iba7322.c
+++ b/drivers/infiniband/hw/qib/qib_iba7322.c
@@ -2910,8 +2910,6 @@ static void qib_setup_7322_cleanup(struct qib_devdata *dd)
spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
qib_qsfp_deinit(&dd->pport[i].cpspec->qsfp_data);
}
- if (dd->pport[i].ibport_data.smi_ah)
- ib_destroy_ah(&dd->pport[i].ibport_data.smi_ah->ibah);
}
}
@@ -5497,7 +5495,7 @@ static void try_7322_ipg(struct qib_pportdata *ppd)
unsigned delay;
int ret;
- agent = ibp->send_agent;
+ agent = ibp->rvp.send_agent;
if (!agent)
goto retry;
@@ -5515,7 +5513,7 @@ static void try_7322_ipg(struct qib_pportdata *ppd)
ret = PTR_ERR(ah);
else {
send_buf->ah = ah;
- ibp->smi_ah = to_iah(ah);
+ ibp->smi_ah = ibah_to_rvtah(ah);
ret = 0;
}
} else {
diff --git a/drivers/infiniband/hw/qib/qib_init.c b/drivers/infiniband/hw/qib/qib_init.c
index 4ff340fe904f..3f062f0dd9d8 100644
--- a/drivers/infiniband/hw/qib/qib_init.c
+++ b/drivers/infiniband/hw/qib/qib_init.c
@@ -42,6 +42,7 @@
#ifdef CONFIG_INFINIBAND_QIB_DCA
#include <linux/dca.h>
#endif
+#include <rdma/rdma_vt.h>
#include "qib.h"
#include "qib_common.h"
@@ -244,6 +245,13 @@ int qib_init_pportdata(struct qib_pportdata *ppd, struct qib_devdata *dd,
alloc_percpu(struct qib_pma_counters);
if (!ppd->ibport_data.pmastats)
return -ENOMEM;
+ ppd->ibport_data.rvp.rc_acks = alloc_percpu(u64);
+ ppd->ibport_data.rvp.rc_qacks = alloc_percpu(u64);
+ ppd->ibport_data.rvp.rc_delayed_comp = alloc_percpu(u64);
+ if (!(ppd->ibport_data.rvp.rc_acks) ||
+ !(ppd->ibport_data.rvp.rc_qacks) ||
+ !(ppd->ibport_data.rvp.rc_delayed_comp))
+ return -ENOMEM;
if (qib_cc_table_size < IB_CCT_MIN_ENTRIES)
goto bail;
@@ -449,8 +457,6 @@ static int loadtime_init(struct qib_devdata *dd)
init_timer(&dd->intrchk_timer);
dd->intrchk_timer.function = verify_interrupt;
dd->intrchk_timer.data = (unsigned long) dd;
-
- ret = qib_cq_init(dd);
done:
return ret;
}
@@ -631,6 +637,9 @@ wq_error:
static void qib_free_pportdata(struct qib_pportdata *ppd)
{
free_percpu(ppd->ibport_data.pmastats);
+ free_percpu(ppd->ibport_data.rvp.rc_acks);
+ free_percpu(ppd->ibport_data.rvp.rc_qacks);
+ free_percpu(ppd->ibport_data.rvp.rc_delayed_comp);
ppd->ibport_data.pmastats = NULL;
}
@@ -1081,7 +1090,7 @@ void qib_free_devdata(struct qib_devdata *dd)
qib_dbg_ibdev_exit(&dd->verbs_dev);
#endif
free_percpu(dd->int_counter);
- ib_dealloc_device(&dd->verbs_dev.ibdev);
+ ib_dealloc_device(&dd->verbs_dev.rdi.ibdev);
}
u64 qib_int_counter(struct qib_devdata *dd)
@@ -1120,9 +1129,12 @@ struct qib_devdata *qib_alloc_devdata(struct pci_dev *pdev, size_t extra)
{
unsigned long flags;
struct qib_devdata *dd;
- int ret;
+ int ret, nports;
- dd = (struct qib_devdata *) ib_alloc_device(sizeof(*dd) + extra);
+ /* extra is * number of ports */
+ nports = extra / sizeof(struct qib_pportdata);
+ dd = (struct qib_devdata *)rvt_alloc_device(sizeof(*dd) + extra,
+ nports);
if (!dd)
return ERR_PTR(-ENOMEM);
@@ -1171,7 +1183,7 @@ struct qib_devdata *qib_alloc_devdata(struct pci_dev *pdev, size_t extra)
bail:
if (!list_empty(&dd->list))
list_del_init(&dd->list);
- ib_dealloc_device(&dd->verbs_dev.ibdev);
+ ib_dealloc_device(&dd->verbs_dev.rdi.ibdev);
return ERR_PTR(ret);
}
@@ -1421,7 +1433,6 @@ static void cleanup_device_data(struct qib_devdata *dd)
}
kfree(tmp);
kfree(dd->boardname);
- qib_cq_exit(dd);
}
/*
diff --git a/drivers/infiniband/hw/qib/qib_intr.c b/drivers/infiniband/hw/qib/qib_intr.c
index 086616d071b9..a014fd4cd076 100644
--- a/drivers/infiniband/hw/qib/qib_intr.c
+++ b/drivers/infiniband/hw/qib/qib_intr.c
@@ -74,7 +74,7 @@ static void signal_ib_event(struct qib_pportdata *ppd, enum ib_event_type ev)
struct ib_event event;
struct qib_devdata *dd = ppd->dd;
- event.device = &dd->verbs_dev.ibdev;
+ event.device = &dd->verbs_dev.rdi.ibdev;
event.element.port_num = ppd->port;
event.event = ev;
ib_dispatch_event(&event);
diff --git a/drivers/infiniband/hw/qib/qib_keys.c b/drivers/infiniband/hw/qib/qib_keys.c
index d725c565518d..2c3c93572c17 100644
--- a/drivers/infiniband/hw/qib/qib_keys.c
+++ b/drivers/infiniband/hw/qib/qib_keys.c
@@ -46,20 +46,20 @@
*
*/
-int qib_alloc_lkey(struct qib_mregion *mr, int dma_region)
+int qib_alloc_lkey(struct rvt_mregion *mr, int dma_region)
{
unsigned long flags;
u32 r;
u32 n;
int ret = 0;
struct qib_ibdev *dev = to_idev(mr->pd->device);
- struct qib_lkey_table *rkt = &dev->lk_table;
+ struct rvt_lkey_table *rkt = &dev->lk_table;
spin_lock_irqsave(&rkt->lock, flags);
/* special case for dma_mr lkey == 0 */
if (dma_region) {
- struct qib_mregion *tmr;
+ struct rvt_mregion *tmr;
tmr = rcu_access_pointer(dev->dma_mr);
if (!tmr) {
@@ -90,8 +90,8 @@ int qib_alloc_lkey(struct qib_mregion *mr, int dma_region)
* bits are capped in qib_verbs.c to insure enough bits
* for generation number
*/
- mr->lkey = (r << (32 - ib_qib_lkey_table_size)) |
- ((((1 << (24 - ib_qib_lkey_table_size)) - 1) & rkt->gen)
+ mr->lkey = (r << (32 - ib_rvt_lkey_table_size)) |
+ ((((1 << (24 - ib_rvt_lkey_table_size)) - 1) & rkt->gen)
<< 8);
if (mr->lkey == 0) {
mr->lkey |= 1 << 8;
@@ -114,13 +114,13 @@ bail:
* qib_free_lkey - free an lkey
* @mr: mr to free from tables
*/
-void qib_free_lkey(struct qib_mregion *mr)
+void qib_free_lkey(struct rvt_mregion *mr)
{
unsigned long flags;
u32 lkey = mr->lkey;
u32 r;
struct qib_ibdev *dev = to_idev(mr->pd->device);
- struct qib_lkey_table *rkt = &dev->lk_table;
+ struct rvt_lkey_table *rkt = &dev->lk_table;
spin_lock_irqsave(&rkt->lock, flags);
if (!mr->lkey_published)
@@ -128,7 +128,7 @@ void qib_free_lkey(struct qib_mregion *mr)
if (lkey == 0)
RCU_INIT_POINTER(dev->dma_mr, NULL);
else {
- r = lkey >> (32 - ib_qib_lkey_table_size);
+ r = lkey >> (32 - ib_rvt_lkey_table_size);
RCU_INIT_POINTER(rkt->table[r], NULL);
}
qib_put_mr(mr);
@@ -138,105 +138,6 @@ out:
}
/**
- * qib_lkey_ok - check IB SGE for validity and initialize
- * @rkt: table containing lkey to check SGE against
- * @pd: protection domain
- * @isge: outgoing internal SGE
- * @sge: SGE to check
- * @acc: access flags
- *
- * Return 1 if valid and successful, otherwise returns 0.
- *
- * increments the reference count upon success
- *
- * Check the IB SGE for validity and initialize our internal version
- * of it.
- */
-int qib_lkey_ok(struct qib_lkey_table *rkt, struct qib_pd *pd,
- struct qib_sge *isge, struct ib_sge *sge, int acc)
-{
- struct qib_mregion *mr;
- unsigned n, m;
- size_t off;
-
- /*
- * We use LKEY == zero for kernel virtual addresses
- * (see qib_get_dma_mr and qib_dma.c).
- */
- rcu_read_lock();
- if (sge->lkey == 0) {
- struct qib_ibdev *dev = to_idev(pd->ibpd.device);
-
- if (pd->user)
- goto bail;
- mr = rcu_dereference(dev->dma_mr);
- if (!mr)
- goto bail;
- if (unlikely(!atomic_inc_not_zero(&mr->refcount)))
- goto bail;
- rcu_read_unlock();
-
- isge->mr = mr;
- isge->vaddr = (void *) sge->addr;
- isge->length = sge->length;
- isge->sge_length = sge->length;
- isge->m = 0;
- isge->n = 0;
- goto ok;
- }
- mr = rcu_dereference(
- rkt->table[(sge->lkey >> (32 - ib_qib_lkey_table_size))]);
- if (unlikely(!mr || mr->lkey != sge->lkey || mr->pd != &pd->ibpd))
- goto bail;
-
- off = sge->addr - mr->user_base;
- if (unlikely(sge->addr < mr->user_base ||
- off + sge->length > mr->length ||
- (mr->access_flags & acc) != acc))
- goto bail;
- if (unlikely(!atomic_inc_not_zero(&mr->refcount)))
- goto bail;
- rcu_read_unlock();
-
- off += mr->offset;
- if (mr->page_shift) {
- /*
- page sizes are uniform power of 2 so no loop is necessary
- entries_spanned_by_off is the number of times the loop below
- would have executed.
- */
- size_t entries_spanned_by_off;
-
- entries_spanned_by_off = off >> mr->page_shift;
- off -= (entries_spanned_by_off << mr->page_shift);
- m = entries_spanned_by_off/QIB_SEGSZ;
- n = entries_spanned_by_off%QIB_SEGSZ;
- } else {
- m = 0;
- n = 0;
- while (off >= mr->map[m]->segs[n].length) {
- off -= mr->map[m]->segs[n].length;
- n++;
- if (n >= QIB_SEGSZ) {
- m++;
- n = 0;
- }
- }
- }
- isge->mr = mr;
- isge->vaddr = mr->map[m]->segs[n].vaddr + off;
- isge->length = mr->map[m]->segs[n].length - off;
- isge->sge_length = sge->length;
- isge->m = m;
- isge->n = n;
-ok:
- return 1;
-bail:
- rcu_read_unlock();
- return 0;
-}
-
-/**
* qib_rkey_ok - check the IB virtual address, length, and RKEY
* @qp: qp for validation
* @sge: SGE state
@@ -249,11 +150,11 @@ bail:
*
* increments the reference count upon success
*/
-int qib_rkey_ok(struct qib_qp *qp, struct qib_sge *sge,
+int qib_rkey_ok(struct rvt_qp *qp, struct rvt_sge *sge,
u32 len, u64 vaddr, u32 rkey, int acc)
{
- struct qib_lkey_table *rkt = &to_idev(qp->ibqp.device)->lk_table;
- struct qib_mregion *mr;
+ struct rvt_lkey_table *rkt = &to_idev(qp->ibqp.device)->lk_table;
+ struct rvt_mregion *mr;
unsigned n, m;
size_t off;
@@ -263,7 +164,7 @@ int qib_rkey_ok(struct qib_qp *qp, struct qib_sge *sge,
*/
rcu_read_lock();
if (rkey == 0) {
- struct qib_pd *pd = to_ipd(qp->ibqp.pd);
+ struct rvt_pd *pd = ibpd_to_rvtpd(qp->ibqp.pd);
struct qib_ibdev *dev = to_idev(pd->ibpd.device);
if (pd->user)
@@ -285,7 +186,7 @@ int qib_rkey_ok(struct qib_qp *qp, struct qib_sge *sge,
}
mr = rcu_dereference(
- rkt->table[(rkey >> (32 - ib_qib_lkey_table_size))]);
+ rkt->table[(rkey >> (32 - ib_rvt_lkey_table_size))]);
if (unlikely(!mr || mr->lkey != rkey || qp->ibqp.pd != mr->pd))
goto bail;
@@ -308,15 +209,15 @@ int qib_rkey_ok(struct qib_qp *qp, struct qib_sge *sge,
entries_spanned_by_off = off >> mr->page_shift;
off -= (entries_spanned_by_off << mr->page_shift);
- m = entries_spanned_by_off/QIB_SEGSZ;
- n = entries_spanned_by_off%QIB_SEGSZ;
+ m = entries_spanned_by_off / RVT_SEGSZ;
+ n = entries_spanned_by_off % RVT_SEGSZ;
} else {
m = 0;
n = 0;
while (off >= mr->map[m]->segs[n].length) {
off -= mr->map[m]->segs[n].length;
n++;
- if (n >= QIB_SEGSZ) {
+ if (n >= RVT_SEGSZ) {
m++;
n = 0;
}
@@ -335,58 +236,3 @@ bail:
return 0;
}
-/*
- * Initialize the memory region specified by the work request.
- */
-int qib_reg_mr(struct qib_qp *qp, struct ib_reg_wr *wr)
-{
- struct qib_lkey_table *rkt = &to_idev(qp->ibqp.device)->lk_table;
- struct qib_pd *pd = to_ipd(qp->ibqp.pd);
- struct qib_mr *mr = to_imr(wr->mr);
- struct qib_mregion *mrg;
- u32 key = wr->key;
- unsigned i, n, m;
- int ret = -EINVAL;
- unsigned long flags;
- u64 *page_list;
- size_t ps;
-
- spin_lock_irqsave(&rkt->lock, flags);
- if (pd->user || key == 0)
- goto bail;
-
- mrg = rcu_dereference_protected(
- rkt->table[(key >> (32 - ib_qib_lkey_table_size))],
- lockdep_is_held(&rkt->lock));
- if (unlikely(mrg == NULL || qp->ibqp.pd != mrg->pd))
- goto bail;
-
- if (mr->npages > mrg->max_segs)
- goto bail;
-
- ps = mr->ibmr.page_size;
- if (mr->ibmr.length > ps * mr->npages)
- goto bail;
-
- mrg->user_base = mr->ibmr.iova;
- mrg->iova = mr->ibmr.iova;
- mrg->lkey = key;
- mrg->length = mr->ibmr.length;
- mrg->access_flags = wr->access;
- page_list = mr->pages;
- m = 0;
- n = 0;
- for (i = 0; i < mr->npages; i++) {
- mrg->map[m]->segs[n].vaddr = (void *) page_list[i];
- mrg->map[m]->segs[n].length = ps;
- if (++n == QIB_SEGSZ) {
- m++;
- n = 0;
- }
- }
-
- ret = 0;
-bail:
- spin_unlock_irqrestore(&rkt->lock, flags);
- return ret;
-}
diff --git a/drivers/infiniband/hw/qib/qib_mad.c b/drivers/infiniband/hw/qib/qib_mad.c
index 9625e7c438e5..0bd18375d7df 100644
--- a/drivers/infiniband/hw/qib/qib_mad.c
+++ b/drivers/infiniband/hw/qib/qib_mad.c
@@ -70,7 +70,7 @@ static void qib_send_trap(struct qib_ibport *ibp, void *data, unsigned len)
unsigned long flags;
unsigned long timeout;
- agent = ibp->send_agent;
+ agent = ibp->rvp.send_agent;
if (!agent)
return;
@@ -79,7 +79,8 @@ static void qib_send_trap(struct qib_ibport *ibp, void *data, unsigned len)
return;
/* o14-2 */
- if (ibp->trap_timeout && time_before(jiffies, ibp->trap_timeout))
+ if (ibp->rvp.trap_timeout &&
+ time_before(jiffies, ibp->rvp.trap_timeout))
return;
send_buf = ib_create_send_mad(agent, 0, 0, 0, IB_MGMT_MAD_HDR,
@@ -93,42 +94,42 @@ static void qib_send_trap(struct qib_ibport *ibp, void *data, unsigned len)
smp->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED;
smp->class_version = 1;
smp->method = IB_MGMT_METHOD_TRAP;
- ibp->tid++;
- smp->tid = cpu_to_be64(ibp->tid);
+ ibp->rvp.tid++;
+ smp->tid = cpu_to_be64(ibp->rvp.tid);
smp->attr_id = IB_SMP_ATTR_NOTICE;
/* o14-1: smp->mkey = 0; */
memcpy(smp->data, data, len);
- spin_lock_irqsave(&ibp->lock, flags);
- if (!ibp->sm_ah) {
- if (ibp->sm_lid != be16_to_cpu(IB_LID_PERMISSIVE)) {
+ spin_lock_irqsave(&ibp->rvp.lock, flags);
+ if (!ibp->rvp.sm_ah) {
+ if (ibp->rvp.sm_lid != be16_to_cpu(IB_LID_PERMISSIVE)) {
struct ib_ah *ah;
- ah = qib_create_qp0_ah(ibp, ibp->sm_lid);
+ ah = qib_create_qp0_ah(ibp, ibp->rvp.sm_lid);
if (IS_ERR(ah))
ret = PTR_ERR(ah);
else {
send_buf->ah = ah;
- ibp->sm_ah = to_iah(ah);
+ ibp->rvp.sm_ah = ibah_to_rvtah(ah);
ret = 0;
}
} else
ret = -EINVAL;
} else {
- send_buf->ah = &ibp->sm_ah->ibah;
+ send_buf->ah = &ibp->rvp.sm_ah->ibah;
ret = 0;
}
- spin_unlock_irqrestore(&ibp->lock, flags);
+ spin_unlock_irqrestore(&ibp->rvp.lock, flags);
if (!ret)
ret = ib_post_send_mad(send_buf, NULL);
if (!ret) {
/* 4.096 usec. */
- timeout = (4096 * (1UL << ibp->subnet_timeout)) / 1000;
- ibp->trap_timeout = jiffies + usecs_to_jiffies(timeout);
+ timeout = (4096 * (1UL << ibp->rvp.subnet_timeout)) / 1000;
+ ibp->rvp.trap_timeout = jiffies + usecs_to_jiffies(timeout);
} else {
ib_free_send_mad(send_buf);
- ibp->trap_timeout = 0;
+ ibp->rvp.trap_timeout = 0;
}
}
@@ -141,10 +142,10 @@ void qib_bad_pqkey(struct qib_ibport *ibp, __be16 trap_num, u32 key, u32 sl,
struct ib_mad_notice_attr data;
if (trap_num == IB_NOTICE_TRAP_BAD_PKEY)
- ibp->pkey_violations++;
+ ibp->rvp.pkey_violations++;
else
- ibp->qkey_violations++;
- ibp->n_pkt_drops++;
+ ibp->rvp.qkey_violations++;
+ ibp->rvp.n_pkt_drops++;
/* Send violation trap */
data.generic_type = IB_NOTICE_TYPE_SECURITY;
@@ -205,8 +206,11 @@ static void qib_bad_mkey(struct qib_ibport *ibp, struct ib_smp *smp)
/*
* Send a Port Capability Mask Changed trap (ch. 14.3.11).
*/
-void qib_cap_mask_chg(struct qib_ibport *ibp)
+void qib_cap_mask_chg(struct rvt_dev_info *rdi, u8 port_num)
{
+ struct qib_ibdev *ibdev = container_of(rdi, struct qib_ibdev, rdi);
+ struct qib_devdata *dd = dd_from_dev(ibdev);
+ struct qib_ibport *ibp = &dd->pport[port_num - 1].ibport_data;
struct ib_mad_notice_attr data;
data.generic_type = IB_NOTICE_TYPE_INFO;
@@ -217,8 +221,8 @@ void qib_cap_mask_chg(struct qib_ibport *ibp)
data.toggle_count = 0;
memset(&data.details, 0, sizeof(data.details));
data.details.ntc_144.lid = data.issuer_lid;
- data.details.ntc_144.new_cap_mask = cpu_to_be32(ibp->port_cap_flags);
-
+ data.details.ntc_144.new_cap_mask =
+ cpu_to_be32(ibp->rvp.port_cap_flags);
qib_send_trap(ibp, &data, sizeof(data));
}
@@ -409,37 +413,38 @@ static int check_mkey(struct qib_ibport *ibp, struct ib_smp *smp, int mad_flags)
int ret = 0;
/* Is the mkey in the process of expiring? */
- if (ibp->mkey_lease_timeout &&
- time_after_eq(jiffies, ibp->mkey_lease_timeout)) {
+ if (ibp->rvp.mkey_lease_timeout &&
+ time_after_eq(jiffies, ibp->rvp.mkey_lease_timeout)) {
/* Clear timeout and mkey protection field. */
- ibp->mkey_lease_timeout = 0;
- ibp->mkeyprot = 0;
+ ibp->rvp.mkey_lease_timeout = 0;
+ ibp->rvp.mkeyprot = 0;
}
- if ((mad_flags & IB_MAD_IGNORE_MKEY) || ibp->mkey == 0 ||
- ibp->mkey == smp->mkey)
+ if ((mad_flags & IB_MAD_IGNORE_MKEY) || ibp->rvp.mkey == 0 ||
+ ibp->rvp.mkey == smp->mkey)
valid_mkey = 1;
/* Unset lease timeout on any valid Get/Set/TrapRepress */
- if (valid_mkey && ibp->mkey_lease_timeout &&
+ if (valid_mkey && ibp->rvp.mkey_lease_timeout &&
(smp->method == IB_MGMT_METHOD_GET ||
smp->method == IB_MGMT_METHOD_SET ||
smp->method == IB_MGMT_METHOD_TRAP_REPRESS))
- ibp->mkey_lease_timeout = 0;
+ ibp->rvp.mkey_lease_timeout = 0;
if (!valid_mkey) {
switch (smp->method) {
case IB_MGMT_METHOD_GET:
/* Bad mkey not a violation below level 2 */
- if (ibp->mkeyprot < 2)
+ if (ibp->rvp.mkeyprot < 2)
break;
case IB_MGMT_METHOD_SET:
case IB_MGMT_METHOD_TRAP_REPRESS:
- if (ibp->mkey_violations != 0xFFFF)
- ++ibp->mkey_violations;
- if (!ibp->mkey_lease_timeout && ibp->mkey_lease_period)
- ibp->mkey_lease_timeout = jiffies +
- ibp->mkey_lease_period * HZ;
+ if (ibp->rvp.mkey_violations != 0xFFFF)
+ ++ibp->rvp.mkey_violations;
+ if (!ibp->rvp.mkey_lease_timeout &&
+ ibp->rvp.mkey_lease_period)
+ ibp->rvp.mkey_lease_timeout = jiffies +
+ ibp->rvp.mkey_lease_period * HZ;
/* Generate a trap notice. */
qib_bad_mkey(ibp, smp);
ret = 1;
@@ -489,15 +494,15 @@ static int subn_get_portinfo(struct ib_smp *smp, struct ib_device *ibdev,
/* Only return the mkey if the protection field allows it. */
if (!(smp->method == IB_MGMT_METHOD_GET &&
- ibp->mkey != smp->mkey &&
- ibp->mkeyprot == 1))
- pip->mkey = ibp->mkey;
- pip->gid_prefix = ibp->gid_prefix;
+ ibp->rvp.mkey != smp->mkey &&
+ ibp->rvp.mkeyprot == 1))
+ pip->mkey = ibp->rvp.mkey;
+ pip->gid_prefix = ibp->rvp.gid_prefix;
pip->lid = cpu_to_be16(ppd->lid);
- pip->sm_lid = cpu_to_be16(ibp->sm_lid);
- pip->cap_mask = cpu_to_be32(ibp->port_cap_flags);
+ pip->sm_lid = cpu_to_be16(ibp->rvp.sm_lid);
+ pip->cap_mask = cpu_to_be32(ibp->rvp.port_cap_flags);
/* pip->diag_code; */
- pip->mkey_lease_period = cpu_to_be16(ibp->mkey_lease_period);
+ pip->mkey_lease_period = cpu_to_be16(ibp->rvp.mkey_lease_period);
pip->local_port_num = port;
pip->link_width_enabled = ppd->link_width_enabled;
pip->link_width_supported = ppd->link_width_supported;
@@ -508,7 +513,7 @@ static int subn_get_portinfo(struct ib_smp *smp, struct ib_device *ibdev,
pip->portphysstate_linkdown =
(dd->f_ibphys_portstate(ppd->lastibcstat) << 4) |
(get_linkdowndefaultstate(ppd) ? 1 : 2);
- pip->mkeyprot_resv_lmc = (ibp->mkeyprot << 6) | ppd->lmc;
+ pip->mkeyprot_resv_lmc = (ibp->rvp.mkeyprot << 6) | ppd->lmc;
pip->linkspeedactive_enabled = (ppd->link_speed_active << 4) |
ppd->link_speed_enabled;
switch (ppd->ibmtu) {
@@ -529,9 +534,9 @@ static int subn_get_portinfo(struct ib_smp *smp, struct ib_device *ibdev,
mtu = IB_MTU_256;
break;
}
- pip->neighbormtu_mastersmsl = (mtu << 4) | ibp->sm_sl;
+ pip->neighbormtu_mastersmsl = (mtu << 4) | ibp->rvp.sm_sl;
pip->vlcap_inittype = ppd->vls_supported << 4; /* InitType = 0 */
- pip->vl_high_limit = ibp->vl_high_limit;
+ pip->vl_high_limit = ibp->rvp.vl_high_limit;
pip->vl_arb_high_cap =
dd->f_get_ib_cfg(ppd, QIB_IB_CFG_VL_HIGH_CAP);
pip->vl_arb_low_cap =
@@ -542,20 +547,20 @@ static int subn_get_portinfo(struct ib_smp *smp, struct ib_device *ibdev,
/* pip->vlstallcnt_hoqlife; */
pip->operationalvl_pei_peo_fpi_fpo =
dd->f_get_ib_cfg(ppd, QIB_IB_CFG_OP_VLS) << 4;
- pip->mkey_violations = cpu_to_be16(ibp->mkey_violations);
+ pip->mkey_violations = cpu_to_be16(ibp->rvp.mkey_violations);
/* P_KeyViolations are counted by hardware. */
- pip->pkey_violations = cpu_to_be16(ibp->pkey_violations);
- pip->qkey_violations = cpu_to_be16(ibp->qkey_violations);
+ pip->pkey_violations = cpu_to_be16(ibp->rvp.pkey_violations);
+ pip->qkey_violations = cpu_to_be16(ibp->rvp.qkey_violations);
/* Only the hardware GUID is supported for now */
pip->guid_cap = QIB_GUIDS_PER_PORT;
- pip->clientrereg_resv_subnetto = ibp->subnet_timeout;
+ pip->clientrereg_resv_subnetto = ibp->rvp.subnet_timeout;
/* 32.768 usec. response time (guessing) */
pip->resv_resptimevalue = 3;
pip->localphyerrors_overrunerrors =
(get_phyerrthreshold(ppd) << 4) |
get_overrunthreshold(ppd);
/* pip->max_credit_hint; */
- if (ibp->port_cap_flags & IB_PORT_LINK_LATENCY_SUP) {
+ if (ibp->rvp.port_cap_flags & IB_PORT_LINK_LATENCY_SUP) {
u32 v;
v = dd->f_get_ib_cfg(ppd, QIB_IB_CFG_LINKLATENCY);
@@ -685,13 +690,13 @@ static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev,
event.device = ibdev;
event.element.port_num = port;
- ibp->mkey = pip->mkey;
- ibp->gid_prefix = pip->gid_prefix;
- ibp->mkey_lease_period = be16_to_cpu(pip->mkey_lease_period);
+ ibp->rvp.mkey = pip->mkey;
+ ibp->rvp.gid_prefix = pip->gid_prefix;
+ ibp->rvp.mkey_lease_period = be16_to_cpu(pip->mkey_lease_period);
lid = be16_to_cpu(pip->lid);
/* Must be a valid unicast LID address. */
- if (lid == 0 || lid >= QIB_MULTICAST_LID_BASE)
+ if (lid == 0 || lid >= be16_to_cpu(IB_MULTICAST_LID_BASE))
smp->status |= IB_SMP_INVALID_FIELD;
else if (ppd->lid != lid || ppd->lmc != (pip->mkeyprot_resv_lmc & 7)) {
if (ppd->lid != lid)
@@ -706,21 +711,21 @@ static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev,
smlid = be16_to_cpu(pip->sm_lid);
msl = pip->neighbormtu_mastersmsl & 0xF;
/* Must be a valid unicast LID address. */
- if (smlid == 0 || smlid >= QIB_MULTICAST_LID_BASE)
+ if (smlid == 0 || smlid >= be16_to_cpu(IB_MULTICAST_LID_BASE))
smp->status |= IB_SMP_INVALID_FIELD;
- else if (smlid != ibp->sm_lid || msl != ibp->sm_sl) {
- spin_lock_irqsave(&ibp->lock, flags);
- if (ibp->sm_ah) {
- if (smlid != ibp->sm_lid)
- ibp->sm_ah->attr.dlid = smlid;
- if (msl != ibp->sm_sl)
- ibp->sm_ah->attr.sl = msl;
+ else if (smlid != ibp->rvp.sm_lid || msl != ibp->rvp.sm_sl) {
+ spin_lock_irqsave(&ibp->rvp.lock, flags);
+ if (ibp->rvp.sm_ah) {
+ if (smlid != ibp->rvp.sm_lid)
+ ibp->rvp.sm_ah->attr.dlid = smlid;
+ if (msl != ibp->rvp.sm_sl)
+ ibp->rvp.sm_ah->attr.sl = msl;
}
- spin_unlock_irqrestore(&ibp->lock, flags);
- if (smlid != ibp->sm_lid)
- ibp->sm_lid = smlid;
- if (msl != ibp->sm_sl)
- ibp->sm_sl = msl;
+ spin_unlock_irqrestore(&ibp->rvp.lock, flags);
+ if (smlid != ibp->rvp.sm_lid)
+ ibp->rvp.sm_lid = smlid;
+ if (msl != ibp->rvp.sm_sl)
+ ibp->rvp.sm_sl = msl;
event.event = IB_EVENT_SM_CHANGE;
ib_dispatch_event(&event);
}
@@ -768,10 +773,10 @@ static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev,
smp->status |= IB_SMP_INVALID_FIELD;
}
- ibp->mkeyprot = pip->mkeyprot_resv_lmc >> 6;
- ibp->vl_high_limit = pip->vl_high_limit;
+ ibp->rvp.mkeyprot = pip->mkeyprot_resv_lmc >> 6;
+ ibp->rvp.vl_high_limit = pip->vl_high_limit;
(void) dd->f_set_ib_cfg(ppd, QIB_IB_CFG_VL_HIGH_LIMIT,
- ibp->vl_high_limit);
+ ibp->rvp.vl_high_limit);
mtu = ib_mtu_enum_to_int((pip->neighbormtu_mastersmsl >> 4) & 0xF);
if (mtu == -1)
@@ -789,13 +794,13 @@ static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev,
}
if (pip->mkey_violations == 0)
- ibp->mkey_violations = 0;
+ ibp->rvp.mkey_violations = 0;
if (pip->pkey_violations == 0)
- ibp->pkey_violations = 0;
+ ibp->rvp.pkey_violations = 0;
if (pip->qkey_violations == 0)
- ibp->qkey_violations = 0;
+ ibp->rvp.qkey_violations = 0;
ore = pip->localphyerrors_overrunerrors;
if (set_phyerrthreshold(ppd, (ore >> 4) & 0xF))
@@ -804,7 +809,7 @@ static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev,
if (set_overrunthreshold(ppd, (ore & 0xF)))
smp->status |= IB_SMP_INVALID_FIELD;
- ibp->subnet_timeout = pip->clientrereg_resv_subnetto & 0x1F;
+ ibp->rvp.subnet_timeout = pip->clientrereg_resv_subnetto & 0x1F;
/*
* Do the port state change now that the other link parameters
@@ -1028,7 +1033,7 @@ static int set_pkeys(struct qib_devdata *dd, u8 port, u16 *pkeys)
(void) dd->f_set_ib_cfg(ppd, QIB_IB_CFG_PKEYS, 0);
event.event = IB_EVENT_PKEY_CHANGE;
- event.device = &dd->verbs_dev.ibdev;
+ event.device = &dd->verbs_dev.rdi.ibdev;
event.element.port_num = port;
ib_dispatch_event(&event);
}
@@ -1062,7 +1067,7 @@ static int subn_get_sl_to_vl(struct ib_smp *smp, struct ib_device *ibdev,
memset(smp->data, 0, sizeof(smp->data));
- if (!(ibp->port_cap_flags & IB_PORT_SL_MAP_SUP))
+ if (!(ibp->rvp.port_cap_flags & IB_PORT_SL_MAP_SUP))
smp->status |= IB_SMP_UNSUP_METHOD;
else
for (i = 0; i < ARRAY_SIZE(ibp->sl_to_vl); i += 2)
@@ -1078,7 +1083,7 @@ static int subn_set_sl_to_vl(struct ib_smp *smp, struct ib_device *ibdev,
u8 *p = (u8 *) smp->data;
unsigned i;
- if (!(ibp->port_cap_flags & IB_PORT_SL_MAP_SUP)) {
+ if (!(ibp->rvp.port_cap_flags & IB_PORT_SL_MAP_SUP)) {
smp->status |= IB_SMP_UNSUP_METHOD;
return reply(smp);
}
@@ -1195,20 +1200,20 @@ static int pma_get_portsamplescontrol(struct ib_pma_mad *pmp,
pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
goto bail;
}
- spin_lock_irqsave(&ibp->lock, flags);
+ spin_lock_irqsave(&ibp->rvp.lock, flags);
p->tick = dd->f_get_ib_cfg(ppd, QIB_IB_CFG_PMA_TICKS);
p->sample_status = dd->f_portcntr(ppd, QIBPORTCNTR_PSSTAT);
p->counter_width = 4; /* 32 bit counters */
p->counter_mask0_9 = COUNTER_MASK0_9;
- p->sample_start = cpu_to_be32(ibp->pma_sample_start);
- p->sample_interval = cpu_to_be32(ibp->pma_sample_interval);
- p->tag = cpu_to_be16(ibp->pma_tag);
- p->counter_select[0] = ibp->pma_counter_select[0];
- p->counter_select[1] = ibp->pma_counter_select[1];
- p->counter_select[2] = ibp->pma_counter_select[2];
- p->counter_select[3] = ibp->pma_counter_select[3];
- p->counter_select[4] = ibp->pma_counter_select[4];
- spin_unlock_irqrestore(&ibp->lock, flags);
+ p->sample_start = cpu_to_be32(ibp->rvp.pma_sample_start);
+ p->sample_interval = cpu_to_be32(ibp->rvp.pma_sample_interval);
+ p->tag = cpu_to_be16(ibp->rvp.pma_tag);
+ p->counter_select[0] = ibp->rvp.pma_counter_select[0];
+ p->counter_select[1] = ibp->rvp.pma_counter_select[1];
+ p->counter_select[2] = ibp->rvp.pma_counter_select[2];
+ p->counter_select[3] = ibp->rvp.pma_counter_select[3];
+ p->counter_select[4] = ibp->rvp.pma_counter_select[4];
+ spin_unlock_irqrestore(&ibp->rvp.lock, flags);
bail:
return reply((struct ib_smp *) pmp);
@@ -1233,7 +1238,7 @@ static int pma_set_portsamplescontrol(struct ib_pma_mad *pmp,
goto bail;
}
- spin_lock_irqsave(&ibp->lock, flags);
+ spin_lock_irqsave(&ibp->rvp.lock, flags);
/* Port Sampling code owns the PS* HW counters */
xmit_flags = ppd->cong_stats.flags;
@@ -1242,18 +1247,18 @@ static int pma_set_portsamplescontrol(struct ib_pma_mad *pmp,
if (status == IB_PMA_SAMPLE_STATUS_DONE ||
(status == IB_PMA_SAMPLE_STATUS_RUNNING &&
xmit_flags == IB_PMA_CONG_HW_CONTROL_TIMER)) {
- ibp->pma_sample_start = be32_to_cpu(p->sample_start);
- ibp->pma_sample_interval = be32_to_cpu(p->sample_interval);
- ibp->pma_tag = be16_to_cpu(p->tag);
- ibp->pma_counter_select[0] = p->counter_select[0];
- ibp->pma_counter_select[1] = p->counter_select[1];
- ibp->pma_counter_select[2] = p->counter_select[2];
- ibp->pma_counter_select[3] = p->counter_select[3];
- ibp->pma_counter_select[4] = p->counter_select[4];
- dd->f_set_cntr_sample(ppd, ibp->pma_sample_interval,
- ibp->pma_sample_start);
+ ibp->rvp.pma_sample_start = be32_to_cpu(p->sample_start);
+ ibp->rvp.pma_sample_interval = be32_to_cpu(p->sample_interval);
+ ibp->rvp.pma_tag = be16_to_cpu(p->tag);
+ ibp->rvp.pma_counter_select[0] = p->counter_select[0];
+ ibp->rvp.pma_counter_select[1] = p->counter_select[1];
+ ibp->rvp.pma_counter_select[2] = p->counter_select[2];
+ ibp->rvp.pma_counter_select[3] = p->counter_select[3];
+ ibp->rvp.pma_counter_select[4] = p->counter_select[4];
+ dd->f_set_cntr_sample(ppd, ibp->rvp.pma_sample_interval,
+ ibp->rvp.pma_sample_start);
}
- spin_unlock_irqrestore(&ibp->lock, flags);
+ spin_unlock_irqrestore(&ibp->rvp.lock, flags);
ret = pma_get_portsamplescontrol(pmp, ibdev, port);
@@ -1357,8 +1362,8 @@ static int pma_get_portsamplesresult(struct ib_pma_mad *pmp,
int i;
memset(pmp->data, 0, sizeof(pmp->data));
- spin_lock_irqsave(&ibp->lock, flags);
- p->tag = cpu_to_be16(ibp->pma_tag);
+ spin_lock_irqsave(&ibp->rvp.lock, flags);
+ p->tag = cpu_to_be16(ibp->rvp.pma_tag);
if (ppd->cong_stats.flags == IB_PMA_CONG_HW_CONTROL_TIMER)
p->sample_status = IB_PMA_SAMPLE_STATUS_DONE;
else {
@@ -1373,11 +1378,11 @@ static int pma_get_portsamplesresult(struct ib_pma_mad *pmp,
ppd->cong_stats.flags = IB_PMA_CONG_HW_CONTROL_TIMER;
}
}
- for (i = 0; i < ARRAY_SIZE(ibp->pma_counter_select); i++)
+ for (i = 0; i < ARRAY_SIZE(ibp->rvp.pma_counter_select); i++)
p->counter[i] = cpu_to_be32(
get_cache_hw_sample_counters(
- ppd, ibp->pma_counter_select[i]));
- spin_unlock_irqrestore(&ibp->lock, flags);
+ ppd, ibp->rvp.pma_counter_select[i]));
+ spin_unlock_irqrestore(&ibp->rvp.lock, flags);
return reply((struct ib_smp *) pmp);
}
@@ -1397,8 +1402,8 @@ static int pma_get_portsamplesresult_ext(struct ib_pma_mad *pmp,
/* Port Sampling code owns the PS* HW counters */
memset(pmp->data, 0, sizeof(pmp->data));
- spin_lock_irqsave(&ibp->lock, flags);
- p->tag = cpu_to_be16(ibp->pma_tag);
+ spin_lock_irqsave(&ibp->rvp.lock, flags);
+ p->tag = cpu_to_be16(ibp->rvp.pma_tag);
if (ppd->cong_stats.flags == IB_PMA_CONG_HW_CONTROL_TIMER)
p->sample_status = IB_PMA_SAMPLE_STATUS_DONE;
else {
@@ -1415,11 +1420,11 @@ static int pma_get_portsamplesresult_ext(struct ib_pma_mad *pmp,
ppd->cong_stats.flags = IB_PMA_CONG_HW_CONTROL_TIMER;
}
}
- for (i = 0; i < ARRAY_SIZE(ibp->pma_counter_select); i++)
+ for (i = 0; i < ARRAY_SIZE(ibp->rvp.pma_counter_select); i++)
p->counter[i] = cpu_to_be64(
get_cache_hw_sample_counters(
- ppd, ibp->pma_counter_select[i]));
- spin_unlock_irqrestore(&ibp->lock, flags);
+ ppd, ibp->rvp.pma_counter_select[i]));
+ spin_unlock_irqrestore(&ibp->rvp.lock, flags);
return reply((struct ib_smp *) pmp);
}
@@ -1453,7 +1458,7 @@ static int pma_get_portcounters(struct ib_pma_mad *pmp,
cntrs.excessive_buffer_overrun_errors -=
ibp->z_excessive_buffer_overrun_errors;
cntrs.vl15_dropped -= ibp->z_vl15_dropped;
- cntrs.vl15_dropped += ibp->n_vl15_dropped;
+ cntrs.vl15_dropped += ibp->rvp.n_vl15_dropped;
memset(pmp->data, 0, sizeof(pmp->data));
@@ -1546,9 +1551,9 @@ static int pma_get_portcounters_cong(struct ib_pma_mad *pmp,
pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
qib_get_counters(ppd, &cntrs);
- spin_lock_irqsave(&ppd->ibport_data.lock, flags);
+ spin_lock_irqsave(&ppd->ibport_data.rvp.lock, flags);
xmit_wait_counter = xmit_wait_get_value_delta(ppd);
- spin_unlock_irqrestore(&ppd->ibport_data.lock, flags);
+ spin_unlock_irqrestore(&ppd->ibport_data.rvp.lock, flags);
/* Adjust counters for any resets done. */
cntrs.symbol_error_counter -= ibp->z_symbol_error_counter;
@@ -1564,7 +1569,7 @@ static int pma_get_portcounters_cong(struct ib_pma_mad *pmp,
cntrs.excessive_buffer_overrun_errors -=
ibp->z_excessive_buffer_overrun_errors;
cntrs.vl15_dropped -= ibp->z_vl15_dropped;
- cntrs.vl15_dropped += ibp->n_vl15_dropped;
+ cntrs.vl15_dropped += ibp->rvp.n_vl15_dropped;
cntrs.port_xmit_data -= ibp->z_port_xmit_data;
cntrs.port_rcv_data -= ibp->z_port_rcv_data;
cntrs.port_xmit_packets -= ibp->z_port_xmit_packets;
@@ -1743,7 +1748,7 @@ static int pma_set_portcounters(struct ib_pma_mad *pmp,
cntrs.excessive_buffer_overrun_errors;
if (p->counter_select & IB_PMA_SEL_PORT_VL15_DROPPED) {
- ibp->n_vl15_dropped = 0;
+ ibp->rvp.n_vl15_dropped = 0;
ibp->z_vl15_dropped = cntrs.vl15_dropped;
}
@@ -1778,11 +1783,11 @@ static int pma_set_portcounters_cong(struct ib_pma_mad *pmp,
ret = pma_get_portcounters_cong(pmp, ibdev, port);
if (counter_select & IB_PMA_SEL_CONG_XMIT) {
- spin_lock_irqsave(&ppd->ibport_data.lock, flags);
+ spin_lock_irqsave(&ppd->ibport_data.rvp.lock, flags);
ppd->cong_stats.counter = 0;
dd->f_set_cntr_sample(ppd, QIB_CONG_TIMER_PSINTERVAL,
0x0);
- spin_unlock_irqrestore(&ppd->ibport_data.lock, flags);
+ spin_unlock_irqrestore(&ppd->ibport_data.rvp.lock, flags);
}
if (counter_select & IB_PMA_SEL_CONG_PORT_DATA) {
ibp->z_port_xmit_data = cntrs.port_xmit_data;
@@ -1806,7 +1811,7 @@ static int pma_set_portcounters_cong(struct ib_pma_mad *pmp,
cntrs.local_link_integrity_errors;
ibp->z_excessive_buffer_overrun_errors =
cntrs.excessive_buffer_overrun_errors;
- ibp->n_vl15_dropped = 0;
+ ibp->rvp.n_vl15_dropped = 0;
ibp->z_vl15_dropped = cntrs.vl15_dropped;
}
@@ -1916,12 +1921,12 @@ static int process_subn(struct ib_device *ibdev, int mad_flags,
ret = subn_get_vl_arb(smp, ibdev, port);
goto bail;
case IB_SMP_ATTR_SM_INFO:
- if (ibp->port_cap_flags & IB_PORT_SM_DISABLED) {
+ if (ibp->rvp.port_cap_flags & IB_PORT_SM_DISABLED) {
ret = IB_MAD_RESULT_SUCCESS |
IB_MAD_RESULT_CONSUMED;
goto bail;
}
- if (ibp->port_cap_flags & IB_PORT_SM) {
+ if (ibp->rvp.port_cap_flags & IB_PORT_SM) {
ret = IB_MAD_RESULT_SUCCESS;
goto bail;
}
@@ -1950,12 +1955,12 @@ static int process_subn(struct ib_device *ibdev, int mad_flags,
ret = subn_set_vl_arb(smp, ibdev, port);
goto bail;
case IB_SMP_ATTR_SM_INFO:
- if (ibp->port_cap_flags & IB_PORT_SM_DISABLED) {
+ if (ibp->rvp.port_cap_flags & IB_PORT_SM_DISABLED) {
ret = IB_MAD_RESULT_SUCCESS |
IB_MAD_RESULT_CONSUMED;
goto bail;
}
- if (ibp->port_cap_flags & IB_PORT_SM) {
+ if (ibp->rvp.port_cap_flags & IB_PORT_SM) {
ret = IB_MAD_RESULT_SUCCESS;
goto bail;
}
@@ -2443,12 +2448,6 @@ bail:
return ret;
}
-static void send_handler(struct ib_mad_agent *agent,
- struct ib_mad_send_wc *mad_send_wc)
-{
- ib_free_send_mad(mad_send_wc->send_buf);
-}
-
static void xmit_wait_timer_func(unsigned long opaque)
{
struct qib_pportdata *ppd = (struct qib_pportdata *)opaque;
@@ -2456,7 +2455,7 @@ static void xmit_wait_timer_func(unsigned long opaque)
unsigned long flags;
u8 status;
- spin_lock_irqsave(&ppd->ibport_data.lock, flags);
+ spin_lock_irqsave(&ppd->ibport_data.rvp.lock, flags);
if (ppd->cong_stats.flags == IB_PMA_CONG_HW_CONTROL_SAMPLE) {
status = dd->f_portcntr(ppd, QIBPORTCNTR_PSSTAT);
if (status == IB_PMA_SAMPLE_STATUS_DONE) {
@@ -2469,74 +2468,35 @@ static void xmit_wait_timer_func(unsigned long opaque)
ppd->cong_stats.counter = xmit_wait_get_value_delta(ppd);
dd->f_set_cntr_sample(ppd, QIB_CONG_TIMER_PSINTERVAL, 0x0);
done:
- spin_unlock_irqrestore(&ppd->ibport_data.lock, flags);
+ spin_unlock_irqrestore(&ppd->ibport_data.rvp.lock, flags);
mod_timer(&ppd->cong_stats.timer, jiffies + HZ);
}
-int qib_create_agents(struct qib_ibdev *dev)
+void qib_notify_create_mad_agent(struct rvt_dev_info *rdi, int port_idx)
{
- struct qib_devdata *dd = dd_from_dev(dev);
- struct ib_mad_agent *agent;
- struct qib_ibport *ibp;
- int p;
- int ret;
+ struct qib_ibdev *ibdev = container_of(rdi, struct qib_ibdev, rdi);
+ struct qib_devdata *dd = container_of(ibdev,
+ struct qib_devdata, verbs_dev);
- for (p = 0; p < dd->num_pports; p++) {
- ibp = &dd->pport[p].ibport_data;
- agent = ib_register_mad_agent(&dev->ibdev, p + 1, IB_QPT_SMI,
- NULL, 0, send_handler,
- NULL, NULL, 0);
- if (IS_ERR(agent)) {
- ret = PTR_ERR(agent);
- goto err;
- }
-
- /* Initialize xmit_wait structure */
- dd->pport[p].cong_stats.counter = 0;
- init_timer(&dd->pport[p].cong_stats.timer);
- dd->pport[p].cong_stats.timer.function = xmit_wait_timer_func;
- dd->pport[p].cong_stats.timer.data =
- (unsigned long)(&dd->pport[p]);
- dd->pport[p].cong_stats.timer.expires = 0;
- add_timer(&dd->pport[p].cong_stats.timer);
-
- ibp->send_agent = agent;
- }
-
- return 0;
-
-err:
- for (p = 0; p < dd->num_pports; p++) {
- ibp = &dd->pport[p].ibport_data;
- if (ibp->send_agent) {
- agent = ibp->send_agent;
- ibp->send_agent = NULL;
- ib_unregister_mad_agent(agent);
- }
- }
-
- return ret;
+ /* Initialize xmit_wait structure */
+ dd->pport[port_idx].cong_stats.counter = 0;
+ init_timer(&dd->pport[port_idx].cong_stats.timer);
+ dd->pport[port_idx].cong_stats.timer.function = xmit_wait_timer_func;
+ dd->pport[port_idx].cong_stats.timer.data =
+ (unsigned long)(&dd->pport[port_idx]);
+ dd->pport[port_idx].cong_stats.timer.expires = 0;
+ add_timer(&dd->pport[port_idx].cong_stats.timer);
}
-void qib_free_agents(struct qib_ibdev *dev)
+void qib_notify_free_mad_agent(struct rvt_dev_info *rdi, int port_idx)
{
- struct qib_devdata *dd = dd_from_dev(dev);
- struct ib_mad_agent *agent;
- struct qib_ibport *ibp;
- int p;
-
- for (p = 0; p < dd->num_pports; p++) {
- ibp = &dd->pport[p].ibport_data;
- if (ibp->send_agent) {
- agent = ibp->send_agent;
- ibp->send_agent = NULL;
- ib_unregister_mad_agent(agent);
- }
- if (ibp->sm_ah) {
- ib_destroy_ah(&ibp->sm_ah->ibah);
- ibp->sm_ah = NULL;
- }
- if (dd->pport[p].cong_stats.timer.data)
- del_timer_sync(&dd->pport[p].cong_stats.timer);
- }
+ struct qib_ibdev *ibdev = container_of(rdi, struct qib_ibdev, rdi);
+ struct qib_devdata *dd = container_of(ibdev,
+ struct qib_devdata, verbs_dev);
+
+ if (dd->pport[port_idx].cong_stats.timer.data)
+ del_timer_sync(&dd->pport[port_idx].cong_stats.timer);
+
+ if (dd->pport[port_idx].ibport_data.smi_ah)
+ ib_destroy_ah(&dd->pport[port_idx].ibport_data.smi_ah->ibah);
}
diff --git a/drivers/infiniband/hw/qib/qib_mmap.c b/drivers/infiniband/hw/qib/qib_mmap.c
deleted file mode 100644
index 34927b700b0e..000000000000
--- a/drivers/infiniband/hw/qib/qib_mmap.c
+++ /dev/null
@@ -1,174 +0,0 @@
-/*
- * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/vmalloc.h>
-#include <linux/mm.h>
-#include <linux/errno.h>
-#include <asm/pgtable.h>
-
-#include "qib_verbs.h"
-
-/**
- * qib_release_mmap_info - free mmap info structure
- * @ref: a pointer to the kref within struct qib_mmap_info
- */
-void qib_release_mmap_info(struct kref *ref)
-{
- struct qib_mmap_info *ip =
- container_of(ref, struct qib_mmap_info, ref);
- struct qib_ibdev *dev = to_idev(ip->context->device);
-
- spin_lock_irq(&dev->pending_lock);
- list_del(&ip->pending_mmaps);
- spin_unlock_irq(&dev->pending_lock);
-
- vfree(ip->obj);
- kfree(ip);
-}
-
-/*
- * open and close keep track of how many times the CQ is mapped,
- * to avoid releasing it.
- */
-static void qib_vma_open(struct vm_area_struct *vma)
-{
- struct qib_mmap_info *ip = vma->vm_private_data;
-
- kref_get(&ip->ref);
-}
-
-static void qib_vma_close(struct vm_area_struct *vma)
-{
- struct qib_mmap_info *ip = vma->vm_private_data;
-
- kref_put(&ip->ref, qib_release_mmap_info);
-}
-
-static const struct vm_operations_struct qib_vm_ops = {
- .open = qib_vma_open,
- .close = qib_vma_close,
-};
-
-/**
- * qib_mmap - create a new mmap region
- * @context: the IB user context of the process making the mmap() call
- * @vma: the VMA to be initialized
- * Return zero if the mmap is OK. Otherwise, return an errno.
- */
-int qib_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
-{
- struct qib_ibdev *dev = to_idev(context->device);
- unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
- unsigned long size = vma->vm_end - vma->vm_start;
- struct qib_mmap_info *ip, *pp;
- int ret = -EINVAL;
-
- /*
- * Search the device's list of objects waiting for a mmap call.
- * Normally, this list is very short since a call to create a
- * CQ, QP, or SRQ is soon followed by a call to mmap().
- */
- spin_lock_irq(&dev->pending_lock);
- list_for_each_entry_safe(ip, pp, &dev->pending_mmaps,
- pending_mmaps) {
- /* Only the creator is allowed to mmap the object */
- if (context != ip->context || (__u64) offset != ip->offset)
- continue;
- /* Don't allow a mmap larger than the object. */
- if (size > ip->size)
- break;
-
- list_del_init(&ip->pending_mmaps);
- spin_unlock_irq(&dev->pending_lock);
-
- ret = remap_vmalloc_range(vma, ip->obj, 0);
- if (ret)
- goto done;
- vma->vm_ops = &qib_vm_ops;
- vma->vm_private_data = ip;
- qib_vma_open(vma);
- goto done;
- }
- spin_unlock_irq(&dev->pending_lock);
-done:
- return ret;
-}
-
-/*
- * Allocate information for qib_mmap
- */
-struct qib_mmap_info *qib_create_mmap_info(struct qib_ibdev *dev,
- u32 size,
- struct ib_ucontext *context,
- void *obj) {
- struct qib_mmap_info *ip;
-
- ip = kmalloc(sizeof(*ip), GFP_KERNEL);
- if (!ip)
- goto bail;
-
- size = PAGE_ALIGN(size);
-
- spin_lock_irq(&dev->mmap_offset_lock);
- if (dev->mmap_offset == 0)
- dev->mmap_offset = PAGE_SIZE;
- ip->offset = dev->mmap_offset;
- dev->mmap_offset += size;
- spin_unlock_irq(&dev->mmap_offset_lock);
-
- INIT_LIST_HEAD(&ip->pending_mmaps);
- ip->size = size;
- ip->context = context;
- ip->obj = obj;
- kref_init(&ip->ref);
-
-bail:
- return ip;
-}
-
-void qib_update_mmap_info(struct qib_ibdev *dev, struct qib_mmap_info *ip,
- u32 size, void *obj)
-{
- size = PAGE_ALIGN(size);
-
- spin_lock_irq(&dev->mmap_offset_lock);
- if (dev->mmap_offset == 0)
- dev->mmap_offset = PAGE_SIZE;
- ip->offset = dev->mmap_offset;
- dev->mmap_offset += size;
- spin_unlock_irq(&dev->mmap_offset_lock);
-
- ip->size = size;
- ip->obj = obj;
-}
diff --git a/drivers/infiniband/hw/qib/qib_mr.c b/drivers/infiniband/hw/qib/qib_mr.c
deleted file mode 100644
index 5f53304e8a9b..000000000000
--- a/drivers/infiniband/hw/qib/qib_mr.c
+++ /dev/null
@@ -1,490 +0,0 @@
-/*
- * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
- * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <rdma/ib_umem.h>
-#include <rdma/ib_smi.h>
-
-#include "qib.h"
-
-/* Fast memory region */
-struct qib_fmr {
- struct ib_fmr ibfmr;
- struct qib_mregion mr; /* must be last */
-};
-
-static inline struct qib_fmr *to_ifmr(struct ib_fmr *ibfmr)
-{
- return container_of(ibfmr, struct qib_fmr, ibfmr);
-}
-
-static int init_qib_mregion(struct qib_mregion *mr, struct ib_pd *pd,
- int count)
-{
- int m, i = 0;
- int rval = 0;
-
- m = (count + QIB_SEGSZ - 1) / QIB_SEGSZ;
- for (; i < m; i++) {
- mr->map[i] = kzalloc(sizeof(*mr->map[0]), GFP_KERNEL);
- if (!mr->map[i])
- goto bail;
- }
- mr->mapsz = m;
- init_completion(&mr->comp);
- /* count returning the ptr to user */
- atomic_set(&mr->refcount, 1);
- mr->pd = pd;
- mr->max_segs = count;
-out:
- return rval;
-bail:
- while (i)
- kfree(mr->map[--i]);
- rval = -ENOMEM;
- goto out;
-}
-
-static void deinit_qib_mregion(struct qib_mregion *mr)
-{
- int i = mr->mapsz;
-
- mr->mapsz = 0;
- while (i)
- kfree(mr->map[--i]);
-}
-
-
-/**
- * qib_get_dma_mr - get a DMA memory region
- * @pd: protection domain for this memory region
- * @acc: access flags
- *
- * Returns the memory region on success, otherwise returns an errno.
- * Note that all DMA addresses should be created via the
- * struct ib_dma_mapping_ops functions (see qib_dma.c).
- */
-struct ib_mr *qib_get_dma_mr(struct ib_pd *pd, int acc)
-{
- struct qib_mr *mr = NULL;
- struct ib_mr *ret;
- int rval;
-
- if (to_ipd(pd)->user) {
- ret = ERR_PTR(-EPERM);
- goto bail;
- }
-
- mr = kzalloc(sizeof(*mr), GFP_KERNEL);
- if (!mr) {
- ret = ERR_PTR(-ENOMEM);
- goto bail;
- }
-
- rval = init_qib_mregion(&mr->mr, pd, 0);
- if (rval) {
- ret = ERR_PTR(rval);
- goto bail;
- }
-
-
- rval = qib_alloc_lkey(&mr->mr, 1);
- if (rval) {
- ret = ERR_PTR(rval);
- goto bail_mregion;
- }
-
- mr->mr.access_flags = acc;
- ret = &mr->ibmr;
-done:
- return ret;
-
-bail_mregion:
- deinit_qib_mregion(&mr->mr);
-bail:
- kfree(mr);
- goto done;
-}
-
-static struct qib_mr *alloc_mr(int count, struct ib_pd *pd)
-{
- struct qib_mr *mr;
- int rval = -ENOMEM;
- int m;
-
- /* Allocate struct plus pointers to first level page tables. */
- m = (count + QIB_SEGSZ - 1) / QIB_SEGSZ;
- mr = kzalloc(sizeof(*mr) + m * sizeof(mr->mr.map[0]), GFP_KERNEL);
- if (!mr)
- goto bail;
-
- rval = init_qib_mregion(&mr->mr, pd, count);
- if (rval)
- goto bail;
-
- rval = qib_alloc_lkey(&mr->mr, 0);
- if (rval)
- goto bail_mregion;
- mr->ibmr.lkey = mr->mr.lkey;
- mr->ibmr.rkey = mr->mr.lkey;
-done:
- return mr;
-
-bail_mregion:
- deinit_qib_mregion(&mr->mr);
-bail:
- kfree(mr);
- mr = ERR_PTR(rval);
- goto done;
-}
-
-/**
- * qib_reg_user_mr - register a userspace memory region
- * @pd: protection domain for this memory region
- * @start: starting userspace address
- * @length: length of region to register
- * @mr_access_flags: access flags for this memory region
- * @udata: unused by the QLogic_IB driver
- *
- * Returns the memory region on success, otherwise returns an errno.
- */
-struct ib_mr *qib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
- u64 virt_addr, int mr_access_flags,
- struct ib_udata *udata)
-{
- struct qib_mr *mr;
- struct ib_umem *umem;
- struct scatterlist *sg;
- int n, m, entry;
- struct ib_mr *ret;
-
- if (length == 0) {
- ret = ERR_PTR(-EINVAL);
- goto bail;
- }
-
- umem = ib_umem_get(pd->uobject->context, start, length,
- mr_access_flags, 0);
- if (IS_ERR(umem))
- return (void *) umem;
-
- n = umem->nmap;
-
- mr = alloc_mr(n, pd);
- if (IS_ERR(mr)) {
- ret = (struct ib_mr *)mr;
- ib_umem_release(umem);
- goto bail;
- }
-
- mr->mr.user_base = start;
- mr->mr.iova = virt_addr;
- mr->mr.length = length;
- mr->mr.offset = ib_umem_offset(umem);
- mr->mr.access_flags = mr_access_flags;
- mr->umem = umem;
-
- if (is_power_of_2(umem->page_size))
- mr->mr.page_shift = ilog2(umem->page_size);
- m = 0;
- n = 0;
- for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
- void *vaddr;
-
- vaddr = page_address(sg_page(sg));
- if (!vaddr) {
- ret = ERR_PTR(-EINVAL);
- goto bail;
- }
- mr->mr.map[m]->segs[n].vaddr = vaddr;
- mr->mr.map[m]->segs[n].length = umem->page_size;
- n++;
- if (n == QIB_SEGSZ) {
- m++;
- n = 0;
- }
- }
- ret = &mr->ibmr;
-
-bail:
- return ret;
-}
-
-/**
- * qib_dereg_mr - unregister and free a memory region
- * @ibmr: the memory region to free
- *
- * Returns 0 on success.
- *
- * Note that this is called to free MRs created by qib_get_dma_mr()
- * or qib_reg_user_mr().
- */
-int qib_dereg_mr(struct ib_mr *ibmr)
-{
- struct qib_mr *mr = to_imr(ibmr);
- int ret = 0;
- unsigned long timeout;
-
- kfree(mr->pages);
- qib_free_lkey(&mr->mr);
-
- qib_put_mr(&mr->mr); /* will set completion if last */
- timeout = wait_for_completion_timeout(&mr->mr.comp,
- 5 * HZ);
- if (!timeout) {
- qib_get_mr(&mr->mr);
- ret = -EBUSY;
- goto out;
- }
- deinit_qib_mregion(&mr->mr);
- if (mr->umem)
- ib_umem_release(mr->umem);
- kfree(mr);
-out:
- return ret;
-}
-
-/*
- * Allocate a memory region usable with the
- * IB_WR_REG_MR send work request.
- *
- * Return the memory region on success, otherwise return an errno.
- */
-struct ib_mr *qib_alloc_mr(struct ib_pd *pd,
- enum ib_mr_type mr_type,
- u32 max_num_sg)
-{
- struct qib_mr *mr;
-
- if (mr_type != IB_MR_TYPE_MEM_REG)
- return ERR_PTR(-EINVAL);
-
- mr = alloc_mr(max_num_sg, pd);
- if (IS_ERR(mr))
- return (struct ib_mr *)mr;
-
- mr->pages = kcalloc(max_num_sg, sizeof(u64), GFP_KERNEL);
- if (!mr->pages)
- goto err;
-
- return &mr->ibmr;
-
-err:
- qib_dereg_mr(&mr->ibmr);
- return ERR_PTR(-ENOMEM);
-}
-
-static int qib_set_page(struct ib_mr *ibmr, u64 addr)
-{
- struct qib_mr *mr = to_imr(ibmr);
-
- if (unlikely(mr->npages == mr->mr.max_segs))
- return -ENOMEM;
-
- mr->pages[mr->npages++] = addr;
-
- return 0;
-}
-
-int qib_map_mr_sg(struct ib_mr *ibmr,
- struct scatterlist *sg,
- int sg_nents)
-{
- struct qib_mr *mr = to_imr(ibmr);
-
- mr->npages = 0;
-
- return ib_sg_to_pages(ibmr, sg, sg_nents, qib_set_page);
-}
-
-/**
- * qib_alloc_fmr - allocate a fast memory region
- * @pd: the protection domain for this memory region
- * @mr_access_flags: access flags for this memory region
- * @fmr_attr: fast memory region attributes
- *
- * Returns the memory region on success, otherwise returns an errno.
- */
-struct ib_fmr *qib_alloc_fmr(struct ib_pd *pd, int mr_access_flags,
- struct ib_fmr_attr *fmr_attr)
-{
- struct qib_fmr *fmr;
- int m;
- struct ib_fmr *ret;
- int rval = -ENOMEM;
-
- /* Allocate struct plus pointers to first level page tables. */
- m = (fmr_attr->max_pages + QIB_SEGSZ - 1) / QIB_SEGSZ;
- fmr = kzalloc(sizeof(*fmr) + m * sizeof(fmr->mr.map[0]), GFP_KERNEL);
- if (!fmr)
- goto bail;
-
- rval = init_qib_mregion(&fmr->mr, pd, fmr_attr->max_pages);
- if (rval)
- goto bail;
-
- /*
- * ib_alloc_fmr() will initialize fmr->ibfmr except for lkey &
- * rkey.
- */
- rval = qib_alloc_lkey(&fmr->mr, 0);
- if (rval)
- goto bail_mregion;
- fmr->ibfmr.rkey = fmr->mr.lkey;
- fmr->ibfmr.lkey = fmr->mr.lkey;
- /*
- * Resources are allocated but no valid mapping (RKEY can't be
- * used).
- */
- fmr->mr.access_flags = mr_access_flags;
- fmr->mr.max_segs = fmr_attr->max_pages;
- fmr->mr.page_shift = fmr_attr->page_shift;
-
- ret = &fmr->ibfmr;
-done:
- return ret;
-
-bail_mregion:
- deinit_qib_mregion(&fmr->mr);
-bail:
- kfree(fmr);
- ret = ERR_PTR(rval);
- goto done;
-}
-
-/**
- * qib_map_phys_fmr - set up a fast memory region
- * @ibmfr: the fast memory region to set up
- * @page_list: the list of pages to associate with the fast memory region
- * @list_len: the number of pages to associate with the fast memory region
- * @iova: the virtual address of the start of the fast memory region
- *
- * This may be called from interrupt context.
- */
-
-int qib_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list,
- int list_len, u64 iova)
-{
- struct qib_fmr *fmr = to_ifmr(ibfmr);
- struct qib_lkey_table *rkt;
- unsigned long flags;
- int m, n, i;
- u32 ps;
- int ret;
-
- i = atomic_read(&fmr->mr.refcount);
- if (i > 2)
- return -EBUSY;
-
- if (list_len > fmr->mr.max_segs) {
- ret = -EINVAL;
- goto bail;
- }
- rkt = &to_idev(ibfmr->device)->lk_table;
- spin_lock_irqsave(&rkt->lock, flags);
- fmr->mr.user_base = iova;
- fmr->mr.iova = iova;
- ps = 1 << fmr->mr.page_shift;
- fmr->mr.length = list_len * ps;
- m = 0;
- n = 0;
- for (i = 0; i < list_len; i++) {
- fmr->mr.map[m]->segs[n].vaddr = (void *) page_list[i];
- fmr->mr.map[m]->segs[n].length = ps;
- if (++n == QIB_SEGSZ) {
- m++;
- n = 0;
- }
- }
- spin_unlock_irqrestore(&rkt->lock, flags);
- ret = 0;
-
-bail:
- return ret;
-}
-
-/**
- * qib_unmap_fmr - unmap fast memory regions
- * @fmr_list: the list of fast memory regions to unmap
- *
- * Returns 0 on success.
- */
-int qib_unmap_fmr(struct list_head *fmr_list)
-{
- struct qib_fmr *fmr;
- struct qib_lkey_table *rkt;
- unsigned long flags;
-
- list_for_each_entry(fmr, fmr_list, ibfmr.list) {
- rkt = &to_idev(fmr->ibfmr.device)->lk_table;
- spin_lock_irqsave(&rkt->lock, flags);
- fmr->mr.user_base = 0;
- fmr->mr.iova = 0;
- fmr->mr.length = 0;
- spin_unlock_irqrestore(&rkt->lock, flags);
- }
- return 0;
-}
-
-/**
- * qib_dealloc_fmr - deallocate a fast memory region
- * @ibfmr: the fast memory region to deallocate
- *
- * Returns 0 on success.
- */
-int qib_dealloc_fmr(struct ib_fmr *ibfmr)
-{
- struct qib_fmr *fmr = to_ifmr(ibfmr);
- int ret = 0;
- unsigned long timeout;
-
- qib_free_lkey(&fmr->mr);
- qib_put_mr(&fmr->mr); /* will set completion if last */
- timeout = wait_for_completion_timeout(&fmr->mr.comp,
- 5 * HZ);
- if (!timeout) {
- qib_get_mr(&fmr->mr);
- ret = -EBUSY;
- goto out;
- }
- deinit_qib_mregion(&fmr->mr);
- kfree(fmr);
-out:
- return ret;
-}
-
-void mr_rcu_callback(struct rcu_head *list)
-{
- struct qib_mregion *mr = container_of(list, struct qib_mregion, list);
-
- complete(&mr->comp);
-}
diff --git a/drivers/infiniband/hw/qib/qib_qp.c b/drivers/infiniband/hw/qib/qib_qp.c
index 3eff35c2d453..575b737d9ef3 100644
--- a/drivers/infiniband/hw/qib/qib_qp.c
+++ b/drivers/infiniband/hw/qib/qib_qp.c
@@ -34,32 +34,38 @@
#include <linux/err.h>
#include <linux/vmalloc.h>
-#include <linux/jhash.h>
+#include <rdma/rdma_vt.h>
#ifdef CONFIG_DEBUG_FS
#include <linux/seq_file.h>
#endif
#include "qib.h"
-#define BITS_PER_PAGE (PAGE_SIZE*BITS_PER_BYTE)
-#define BITS_PER_PAGE_MASK (BITS_PER_PAGE-1)
+/*
+ * mask field which was present in now deleted qib_qpn_table
+ * is not present in rvt_qpn_table. Defining the same field
+ * as qpt_mask here instead of adding the mask field to
+ * rvt_qpn_table.
+ */
+u16 qpt_mask;
-static inline unsigned mk_qpn(struct qib_qpn_table *qpt,
- struct qpn_map *map, unsigned off)
+static inline unsigned mk_qpn(struct rvt_qpn_table *qpt,
+ struct rvt_qpn_map *map, unsigned off)
{
- return (map - qpt->map) * BITS_PER_PAGE + off;
+ return (map - qpt->map) * RVT_BITS_PER_PAGE + off;
}
-static inline unsigned find_next_offset(struct qib_qpn_table *qpt,
- struct qpn_map *map, unsigned off,
+static inline unsigned find_next_offset(struct rvt_qpn_table *qpt,
+ struct rvt_qpn_map *map, unsigned off,
unsigned n)
{
- if (qpt->mask) {
+ if (qpt_mask) {
off++;
- if (((off & qpt->mask) >> 1) >= n)
- off = (off | qpt->mask) + 2;
- } else
- off = find_next_zero_bit(map->page, BITS_PER_PAGE, off);
+ if (((off & qpt_mask) >> 1) >= n)
+ off = (off | qpt_mask) + 2;
+ } else {
+ off = find_next_zero_bit(map->page, RVT_BITS_PER_PAGE, off);
+ }
return off;
}
@@ -100,7 +106,7 @@ static u32 credit_table[31] = {
32768 /* 1E */
};
-static void get_map_page(struct qib_qpn_table *qpt, struct qpn_map *map,
+static void get_map_page(struct rvt_qpn_table *qpt, struct rvt_qpn_map *map,
gfp_t gfp)
{
unsigned long page = get_zeroed_page(gfp);
@@ -121,12 +127,15 @@ static void get_map_page(struct qib_qpn_table *qpt, struct qpn_map *map,
* Allocate the next available QPN or
* zero/one for QP type IB_QPT_SMI/IB_QPT_GSI.
*/
-static int alloc_qpn(struct qib_devdata *dd, struct qib_qpn_table *qpt,
- enum ib_qp_type type, u8 port, gfp_t gfp)
+int qib_alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt,
+ enum ib_qp_type type, u8 port, gfp_t gfp)
{
u32 i, offset, max_scan, qpn;
- struct qpn_map *map;
+ struct rvt_qpn_map *map;
u32 ret;
+ struct qib_ibdev *verbs_dev = container_of(rdi, struct qib_ibdev, rdi);
+ struct qib_devdata *dd = container_of(verbs_dev, struct qib_devdata,
+ verbs_dev);
if (type == IB_QPT_SMI || type == IB_QPT_GSI) {
unsigned n;
@@ -143,12 +152,12 @@ static int alloc_qpn(struct qib_devdata *dd, struct qib_qpn_table *qpt,
}
qpn = qpt->last + 2;
- if (qpn >= QPN_MAX)
+ if (qpn >= RVT_QPN_MAX)
qpn = 2;
- if (qpt->mask && ((qpn & qpt->mask) >> 1) >= dd->n_krcv_queues)
- qpn = (qpn | qpt->mask) + 2;
- offset = qpn & BITS_PER_PAGE_MASK;
- map = &qpt->map[qpn / BITS_PER_PAGE];
+ if (qpt_mask && ((qpn & qpt_mask) >> 1) >= dd->n_krcv_queues)
+ qpn = (qpn | qpt_mask) + 2;
+ offset = qpn & RVT_BITS_PER_PAGE_MASK;
+ map = &qpt->map[qpn / RVT_BITS_PER_PAGE];
max_scan = qpt->nmaps - !offset;
for (i = 0;;) {
if (unlikely(!map->page)) {
@@ -173,14 +182,14 @@ static int alloc_qpn(struct qib_devdata *dd, struct qib_qpn_table *qpt,
* We just need to be sure we don't loop
* forever.
*/
- } while (offset < BITS_PER_PAGE && qpn < QPN_MAX);
+ } while (offset < RVT_BITS_PER_PAGE && qpn < RVT_QPN_MAX);
/*
* In order to keep the number of pages allocated to a
* minimum, we scan the all existing pages before increasing
* the size of the bitmap table.
*/
if (++i > max_scan) {
- if (qpt->nmaps == QPNMAP_ENTRIES)
+ if (qpt->nmaps == RVT_QPNMAP_ENTRIES)
break;
map = &qpt->map[qpt->nmaps++];
offset = 0;
@@ -200,706 +209,113 @@ bail:
return ret;
}
-static void free_qpn(struct qib_qpn_table *qpt, u32 qpn)
-{
- struct qpn_map *map;
-
- map = qpt->map + qpn / BITS_PER_PAGE;
- if (map->page)
- clear_bit(qpn & BITS_PER_PAGE_MASK, map->page);
-}
-
-static inline unsigned qpn_hash(struct qib_ibdev *dev, u32 qpn)
-{
- return jhash_1word(qpn, dev->qp_rnd) &
- (dev->qp_table_size - 1);
-}
-
-
-/*
- * Put the QP into the hash table.
- * The hash table holds a reference to the QP.
- */
-static void insert_qp(struct qib_ibdev *dev, struct qib_qp *qp)
-{
- struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
- unsigned long flags;
- unsigned n = qpn_hash(dev, qp->ibqp.qp_num);
-
- atomic_inc(&qp->refcount);
- spin_lock_irqsave(&dev->qpt_lock, flags);
-
- if (qp->ibqp.qp_num == 0)
- rcu_assign_pointer(ibp->qp0, qp);
- else if (qp->ibqp.qp_num == 1)
- rcu_assign_pointer(ibp->qp1, qp);
- else {
- qp->next = dev->qp_table[n];
- rcu_assign_pointer(dev->qp_table[n], qp);
- }
-
- spin_unlock_irqrestore(&dev->qpt_lock, flags);
-}
-
-/*
- * Remove the QP from the table so it can't be found asynchronously by
- * the receive interrupt routine.
- */
-static void remove_qp(struct qib_ibdev *dev, struct qib_qp *qp)
-{
- struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
- unsigned n = qpn_hash(dev, qp->ibqp.qp_num);
- unsigned long flags;
- int removed = 1;
-
- spin_lock_irqsave(&dev->qpt_lock, flags);
-
- if (rcu_dereference_protected(ibp->qp0,
- lockdep_is_held(&dev->qpt_lock)) == qp) {
- RCU_INIT_POINTER(ibp->qp0, NULL);
- } else if (rcu_dereference_protected(ibp->qp1,
- lockdep_is_held(&dev->qpt_lock)) == qp) {
- RCU_INIT_POINTER(ibp->qp1, NULL);
- } else {
- struct qib_qp *q;
- struct qib_qp __rcu **qpp;
-
- removed = 0;
- qpp = &dev->qp_table[n];
- for (; (q = rcu_dereference_protected(*qpp,
- lockdep_is_held(&dev->qpt_lock))) != NULL;
- qpp = &q->next)
- if (q == qp) {
- RCU_INIT_POINTER(*qpp,
- rcu_dereference_protected(qp->next,
- lockdep_is_held(&dev->qpt_lock)));
- removed = 1;
- break;
- }
- }
-
- spin_unlock_irqrestore(&dev->qpt_lock, flags);
- if (removed) {
- synchronize_rcu();
- atomic_dec(&qp->refcount);
- }
-}
-
/**
* qib_free_all_qps - check for QPs still in use
- * @qpt: the QP table to empty
- *
- * There should not be any QPs still in use.
- * Free memory for table.
*/
-unsigned qib_free_all_qps(struct qib_devdata *dd)
+unsigned qib_free_all_qps(struct rvt_dev_info *rdi)
{
- struct qib_ibdev *dev = &dd->verbs_dev;
- unsigned long flags;
- struct qib_qp *qp;
+ struct qib_ibdev *verbs_dev = container_of(rdi, struct qib_ibdev, rdi);
+ struct qib_devdata *dd = container_of(verbs_dev, struct qib_devdata,
+ verbs_dev);
unsigned n, qp_inuse = 0;
for (n = 0; n < dd->num_pports; n++) {
struct qib_ibport *ibp = &dd->pport[n].ibport_data;
- if (!qib_mcast_tree_empty(ibp))
- qp_inuse++;
rcu_read_lock();
- if (rcu_dereference(ibp->qp0))
+ if (rcu_dereference(ibp->rvp.qp[0]))
qp_inuse++;
- if (rcu_dereference(ibp->qp1))
+ if (rcu_dereference(ibp->rvp.qp[1]))
qp_inuse++;
rcu_read_unlock();
}
-
- spin_lock_irqsave(&dev->qpt_lock, flags);
- for (n = 0; n < dev->qp_table_size; n++) {
- qp = rcu_dereference_protected(dev->qp_table[n],
- lockdep_is_held(&dev->qpt_lock));
- RCU_INIT_POINTER(dev->qp_table[n], NULL);
-
- for (; qp; qp = rcu_dereference_protected(qp->next,
- lockdep_is_held(&dev->qpt_lock)))
- qp_inuse++;
- }
- spin_unlock_irqrestore(&dev->qpt_lock, flags);
- synchronize_rcu();
-
return qp_inuse;
}
-/**
- * qib_lookup_qpn - return the QP with the given QPN
- * @qpt: the QP table
- * @qpn: the QP number to look up
- *
- * The caller is responsible for decrementing the QP reference count
- * when done.
- */
-struct qib_qp *qib_lookup_qpn(struct qib_ibport *ibp, u32 qpn)
+void qib_notify_qp_reset(struct rvt_qp *qp)
{
- struct qib_qp *qp = NULL;
-
- rcu_read_lock();
- if (unlikely(qpn <= 1)) {
- if (qpn == 0)
- qp = rcu_dereference(ibp->qp0);
- else
- qp = rcu_dereference(ibp->qp1);
- if (qp)
- atomic_inc(&qp->refcount);
- } else {
- struct qib_ibdev *dev = &ppd_from_ibp(ibp)->dd->verbs_dev;
- unsigned n = qpn_hash(dev, qpn);
-
- for (qp = rcu_dereference(dev->qp_table[n]); qp;
- qp = rcu_dereference(qp->next))
- if (qp->ibqp.qp_num == qpn) {
- atomic_inc(&qp->refcount);
- break;
- }
- }
- rcu_read_unlock();
- return qp;
-}
-
-/**
- * qib_reset_qp - initialize the QP state to the reset state
- * @qp: the QP to reset
- * @type: the QP type
- */
-static void qib_reset_qp(struct qib_qp *qp, enum ib_qp_type type)
-{
- qp->remote_qpn = 0;
- qp->qkey = 0;
- qp->qp_access_flags = 0;
- atomic_set(&qp->s_dma_busy, 0);
- qp->s_flags &= QIB_S_SIGNAL_REQ_WR;
- qp->s_hdrwords = 0;
- qp->s_wqe = NULL;
- qp->s_draining = 0;
- qp->s_next_psn = 0;
- qp->s_last_psn = 0;
- qp->s_sending_psn = 0;
- qp->s_sending_hpsn = 0;
- qp->s_psn = 0;
- qp->r_psn = 0;
- qp->r_msn = 0;
- if (type == IB_QPT_RC) {
- qp->s_state = IB_OPCODE_RC_SEND_LAST;
- qp->r_state = IB_OPCODE_RC_SEND_LAST;
- } else {
- qp->s_state = IB_OPCODE_UC_SEND_LAST;
- qp->r_state = IB_OPCODE_UC_SEND_LAST;
- }
- qp->s_ack_state = IB_OPCODE_RC_ACKNOWLEDGE;
- qp->r_nak_state = 0;
- qp->r_aflags = 0;
- qp->r_flags = 0;
- qp->s_head = 0;
- qp->s_tail = 0;
- qp->s_cur = 0;
- qp->s_acked = 0;
- qp->s_last = 0;
- qp->s_ssn = 1;
- qp->s_lsn = 0;
- qp->s_mig_state = IB_MIG_MIGRATED;
- memset(qp->s_ack_queue, 0, sizeof(qp->s_ack_queue));
- qp->r_head_ack_queue = 0;
- qp->s_tail_ack_queue = 0;
- qp->s_num_rd_atomic = 0;
- if (qp->r_rq.wq) {
- qp->r_rq.wq->head = 0;
- qp->r_rq.wq->tail = 0;
- }
- qp->r_sge.num_sge = 0;
-}
-
-static void clear_mr_refs(struct qib_qp *qp, int clr_sends)
-{
- unsigned n;
-
- if (test_and_clear_bit(QIB_R_REWIND_SGE, &qp->r_aflags))
- qib_put_ss(&qp->s_rdma_read_sge);
-
- qib_put_ss(&qp->r_sge);
-
- if (clr_sends) {
- while (qp->s_last != qp->s_head) {
- struct qib_swqe *wqe = get_swqe_ptr(qp, qp->s_last);
- unsigned i;
-
- for (i = 0; i < wqe->wr.num_sge; i++) {
- struct qib_sge *sge = &wqe->sg_list[i];
-
- qib_put_mr(sge->mr);
- }
- if (qp->ibqp.qp_type == IB_QPT_UD ||
- qp->ibqp.qp_type == IB_QPT_SMI ||
- qp->ibqp.qp_type == IB_QPT_GSI)
- atomic_dec(&to_iah(wqe->ud_wr.ah)->refcount);
- if (++qp->s_last >= qp->s_size)
- qp->s_last = 0;
- }
- if (qp->s_rdma_mr) {
- qib_put_mr(qp->s_rdma_mr);
- qp->s_rdma_mr = NULL;
- }
- }
-
- if (qp->ibqp.qp_type != IB_QPT_RC)
- return;
+ struct qib_qp_priv *priv = qp->priv;
- for (n = 0; n < ARRAY_SIZE(qp->s_ack_queue); n++) {
- struct qib_ack_entry *e = &qp->s_ack_queue[n];
-
- if (e->opcode == IB_OPCODE_RC_RDMA_READ_REQUEST &&
- e->rdma_sge.mr) {
- qib_put_mr(e->rdma_sge.mr);
- e->rdma_sge.mr = NULL;
- }
- }
+ atomic_set(&priv->s_dma_busy, 0);
}
-/**
- * qib_error_qp - put a QP into the error state
- * @qp: the QP to put into the error state
- * @err: the receive completion error to signal if a RWQE is active
- *
- * Flushes both send and receive work queues.
- * Returns true if last WQE event should be generated.
- * The QP r_lock and s_lock should be held and interrupts disabled.
- * If we are already in error state, just return.
- */
-int qib_error_qp(struct qib_qp *qp, enum ib_wc_status err)
+void qib_notify_error_qp(struct rvt_qp *qp)
{
+ struct qib_qp_priv *priv = qp->priv;
struct qib_ibdev *dev = to_idev(qp->ibqp.device);
- struct ib_wc wc;
- int ret = 0;
-
- if (qp->state == IB_QPS_ERR || qp->state == IB_QPS_RESET)
- goto bail;
-
- qp->state = IB_QPS_ERR;
-
- if (qp->s_flags & (QIB_S_TIMER | QIB_S_WAIT_RNR)) {
- qp->s_flags &= ~(QIB_S_TIMER | QIB_S_WAIT_RNR);
- del_timer(&qp->s_timer);
- }
-
- if (qp->s_flags & QIB_S_ANY_WAIT_SEND)
- qp->s_flags &= ~QIB_S_ANY_WAIT_SEND;
- spin_lock(&dev->pending_lock);
- if (!list_empty(&qp->iowait) && !(qp->s_flags & QIB_S_BUSY)) {
- qp->s_flags &= ~QIB_S_ANY_WAIT_IO;
- list_del_init(&qp->iowait);
+ spin_lock(&dev->rdi.pending_lock);
+ if (!list_empty(&priv->iowait) && !(qp->s_flags & RVT_S_BUSY)) {
+ qp->s_flags &= ~RVT_S_ANY_WAIT_IO;
+ list_del_init(&priv->iowait);
}
- spin_unlock(&dev->pending_lock);
+ spin_unlock(&dev->rdi.pending_lock);
- if (!(qp->s_flags & QIB_S_BUSY)) {
+ if (!(qp->s_flags & RVT_S_BUSY)) {
qp->s_hdrwords = 0;
if (qp->s_rdma_mr) {
- qib_put_mr(qp->s_rdma_mr);
+ rvt_put_mr(qp->s_rdma_mr);
qp->s_rdma_mr = NULL;
}
- if (qp->s_tx) {
- qib_put_txreq(qp->s_tx);
- qp->s_tx = NULL;
+ if (priv->s_tx) {
+ qib_put_txreq(priv->s_tx);
+ priv->s_tx = NULL;
}
}
-
- /* Schedule the sending tasklet to drain the send work queue. */
- if (qp->s_last != qp->s_head)
- qib_schedule_send(qp);
-
- clear_mr_refs(qp, 0);
-
- memset(&wc, 0, sizeof(wc));
- wc.qp = &qp->ibqp;
- wc.opcode = IB_WC_RECV;
-
- if (test_and_clear_bit(QIB_R_WRID_VALID, &qp->r_aflags)) {
- wc.wr_id = qp->r_wr_id;
- wc.status = err;
- qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1);
- }
- wc.status = IB_WC_WR_FLUSH_ERR;
-
- if (qp->r_rq.wq) {
- struct qib_rwq *wq;
- u32 head;
- u32 tail;
-
- spin_lock(&qp->r_rq.lock);
-
- /* sanity check pointers before trusting them */
- wq = qp->r_rq.wq;
- head = wq->head;
- if (head >= qp->r_rq.size)
- head = 0;
- tail = wq->tail;
- if (tail >= qp->r_rq.size)
- tail = 0;
- while (tail != head) {
- wc.wr_id = get_rwqe_ptr(&qp->r_rq, tail)->wr_id;
- if (++tail >= qp->r_rq.size)
- tail = 0;
- qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1);
- }
- wq->tail = tail;
-
- spin_unlock(&qp->r_rq.lock);
- } else if (qp->ibqp.event_handler)
- ret = 1;
-
-bail:
- return ret;
}
-/**
- * qib_modify_qp - modify the attributes of a queue pair
- * @ibqp: the queue pair who's attributes we're modifying
- * @attr: the new attributes
- * @attr_mask: the mask of attributes to modify
- * @udata: user data for libibverbs.so
- *
- * Returns 0 on success, otherwise returns an errno.
- */
-int qib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
- int attr_mask, struct ib_udata *udata)
+static int mtu_to_enum(u32 mtu)
{
- struct qib_ibdev *dev = to_idev(ibqp->device);
- struct qib_qp *qp = to_iqp(ibqp);
- enum ib_qp_state cur_state, new_state;
- struct ib_event ev;
- int lastwqe = 0;
- int mig = 0;
- int ret;
- u32 pmtu = 0; /* for gcc warning only */
-
- spin_lock_irq(&qp->r_lock);
- spin_lock(&qp->s_lock);
-
- cur_state = attr_mask & IB_QP_CUR_STATE ?
- attr->cur_qp_state : qp->state;
- new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
-
- if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type,
- attr_mask, IB_LINK_LAYER_UNSPECIFIED))
- goto inval;
-
- if (attr_mask & IB_QP_AV) {
- if (attr->ah_attr.dlid >= QIB_MULTICAST_LID_BASE)
- goto inval;
- if (qib_check_ah(qp->ibqp.device, &attr->ah_attr))
- goto inval;
- }
-
- if (attr_mask & IB_QP_ALT_PATH) {
- if (attr->alt_ah_attr.dlid >= QIB_MULTICAST_LID_BASE)
- goto inval;
- if (qib_check_ah(qp->ibqp.device, &attr->alt_ah_attr))
- goto inval;
- if (attr->alt_pkey_index >= qib_get_npkeys(dd_from_dev(dev)))
- goto inval;
- }
-
- if (attr_mask & IB_QP_PKEY_INDEX)
- if (attr->pkey_index >= qib_get_npkeys(dd_from_dev(dev)))
- goto inval;
-
- if (attr_mask & IB_QP_MIN_RNR_TIMER)
- if (attr->min_rnr_timer > 31)
- goto inval;
-
- if (attr_mask & IB_QP_PORT)
- if (qp->ibqp.qp_type == IB_QPT_SMI ||
- qp->ibqp.qp_type == IB_QPT_GSI ||
- attr->port_num == 0 ||
- attr->port_num > ibqp->device->phys_port_cnt)
- goto inval;
-
- if (attr_mask & IB_QP_DEST_QPN)
- if (attr->dest_qp_num > QIB_QPN_MASK)
- goto inval;
-
- if (attr_mask & IB_QP_RETRY_CNT)
- if (attr->retry_cnt > 7)
- goto inval;
-
- if (attr_mask & IB_QP_RNR_RETRY)
- if (attr->rnr_retry > 7)
- goto inval;
-
- /*
- * Don't allow invalid path_mtu values. OK to set greater
- * than the active mtu (or even the max_cap, if we have tuned
- * that to a small mtu. We'll set qp->path_mtu
- * to the lesser of requested attribute mtu and active,
- * for packetizing messages.
- * Note that the QP port has to be set in INIT and MTU in RTR.
- */
- if (attr_mask & IB_QP_PATH_MTU) {
- struct qib_devdata *dd = dd_from_dev(dev);
- int mtu, pidx = qp->port_num - 1;
-
- mtu = ib_mtu_enum_to_int(attr->path_mtu);
- if (mtu == -1)
- goto inval;
- if (mtu > dd->pport[pidx].ibmtu) {
- switch (dd->pport[pidx].ibmtu) {
- case 4096:
- pmtu = IB_MTU_4096;
- break;
- case 2048:
- pmtu = IB_MTU_2048;
- break;
- case 1024:
- pmtu = IB_MTU_1024;
- break;
- case 512:
- pmtu = IB_MTU_512;
- break;
- case 256:
- pmtu = IB_MTU_256;
- break;
- default:
- pmtu = IB_MTU_2048;
- }
- } else
- pmtu = attr->path_mtu;
- }
-
- if (attr_mask & IB_QP_PATH_MIG_STATE) {
- if (attr->path_mig_state == IB_MIG_REARM) {
- if (qp->s_mig_state == IB_MIG_ARMED)
- goto inval;
- if (new_state != IB_QPS_RTS)
- goto inval;
- } else if (attr->path_mig_state == IB_MIG_MIGRATED) {
- if (qp->s_mig_state == IB_MIG_REARM)
- goto inval;
- if (new_state != IB_QPS_RTS && new_state != IB_QPS_SQD)
- goto inval;
- if (qp->s_mig_state == IB_MIG_ARMED)
- mig = 1;
- } else
- goto inval;
- }
-
- if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
- if (attr->max_dest_rd_atomic > QIB_MAX_RDMA_ATOMIC)
- goto inval;
+ int enum_mtu;
- switch (new_state) {
- case IB_QPS_RESET:
- if (qp->state != IB_QPS_RESET) {
- qp->state = IB_QPS_RESET;
- spin_lock(&dev->pending_lock);
- if (!list_empty(&qp->iowait))
- list_del_init(&qp->iowait);
- spin_unlock(&dev->pending_lock);
- qp->s_flags &= ~(QIB_S_TIMER | QIB_S_ANY_WAIT);
- spin_unlock(&qp->s_lock);
- spin_unlock_irq(&qp->r_lock);
- /* Stop the sending work queue and retry timer */
- cancel_work_sync(&qp->s_work);
- del_timer_sync(&qp->s_timer);
- wait_event(qp->wait_dma, !atomic_read(&qp->s_dma_busy));
- if (qp->s_tx) {
- qib_put_txreq(qp->s_tx);
- qp->s_tx = NULL;
- }
- remove_qp(dev, qp);
- wait_event(qp->wait, !atomic_read(&qp->refcount));
- spin_lock_irq(&qp->r_lock);
- spin_lock(&qp->s_lock);
- clear_mr_refs(qp, 1);
- qib_reset_qp(qp, ibqp->qp_type);
- }
+ switch (mtu) {
+ case 4096:
+ enum_mtu = IB_MTU_4096;
break;
-
- case IB_QPS_RTR:
- /* Allow event to retrigger if QP set to RTR more than once */
- qp->r_flags &= ~QIB_R_COMM_EST;
- qp->state = new_state;
+ case 2048:
+ enum_mtu = IB_MTU_2048;
break;
-
- case IB_QPS_SQD:
- qp->s_draining = qp->s_last != qp->s_cur;
- qp->state = new_state;
+ case 1024:
+ enum_mtu = IB_MTU_1024;
break;
-
- case IB_QPS_SQE:
- if (qp->ibqp.qp_type == IB_QPT_RC)
- goto inval;
- qp->state = new_state;
+ case 512:
+ enum_mtu = IB_MTU_512;
break;
-
- case IB_QPS_ERR:
- lastwqe = qib_error_qp(qp, IB_WC_WR_FLUSH_ERR);
+ case 256:
+ enum_mtu = IB_MTU_256;
break;
-
default:
- qp->state = new_state;
- break;
- }
-
- if (attr_mask & IB_QP_PKEY_INDEX)
- qp->s_pkey_index = attr->pkey_index;
-
- if (attr_mask & IB_QP_PORT)
- qp->port_num = attr->port_num;
-
- if (attr_mask & IB_QP_DEST_QPN)
- qp->remote_qpn = attr->dest_qp_num;
-
- if (attr_mask & IB_QP_SQ_PSN) {
- qp->s_next_psn = attr->sq_psn & QIB_PSN_MASK;
- qp->s_psn = qp->s_next_psn;
- qp->s_sending_psn = qp->s_next_psn;
- qp->s_last_psn = qp->s_next_psn - 1;
- qp->s_sending_hpsn = qp->s_last_psn;
- }
-
- if (attr_mask & IB_QP_RQ_PSN)
- qp->r_psn = attr->rq_psn & QIB_PSN_MASK;
-
- if (attr_mask & IB_QP_ACCESS_FLAGS)
- qp->qp_access_flags = attr->qp_access_flags;
-
- if (attr_mask & IB_QP_AV) {
- qp->remote_ah_attr = attr->ah_attr;
- qp->s_srate = attr->ah_attr.static_rate;
- }
-
- if (attr_mask & IB_QP_ALT_PATH) {
- qp->alt_ah_attr = attr->alt_ah_attr;
- qp->s_alt_pkey_index = attr->alt_pkey_index;
- }
-
- if (attr_mask & IB_QP_PATH_MIG_STATE) {
- qp->s_mig_state = attr->path_mig_state;
- if (mig) {
- qp->remote_ah_attr = qp->alt_ah_attr;
- qp->port_num = qp->alt_ah_attr.port_num;
- qp->s_pkey_index = qp->s_alt_pkey_index;
- }
- }
-
- if (attr_mask & IB_QP_PATH_MTU) {
- qp->path_mtu = pmtu;
- qp->pmtu = ib_mtu_enum_to_int(pmtu);
- }
-
- if (attr_mask & IB_QP_RETRY_CNT) {
- qp->s_retry_cnt = attr->retry_cnt;
- qp->s_retry = attr->retry_cnt;
- }
-
- if (attr_mask & IB_QP_RNR_RETRY) {
- qp->s_rnr_retry_cnt = attr->rnr_retry;
- qp->s_rnr_retry = attr->rnr_retry;
+ enum_mtu = IB_MTU_2048;
}
-
- if (attr_mask & IB_QP_MIN_RNR_TIMER)
- qp->r_min_rnr_timer = attr->min_rnr_timer;
-
- if (attr_mask & IB_QP_TIMEOUT) {
- qp->timeout = attr->timeout;
- qp->timeout_jiffies =
- usecs_to_jiffies((4096UL * (1UL << qp->timeout)) /
- 1000UL);
- }
-
- if (attr_mask & IB_QP_QKEY)
- qp->qkey = attr->qkey;
-
- if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
- qp->r_max_rd_atomic = attr->max_dest_rd_atomic;
-
- if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC)
- qp->s_max_rd_atomic = attr->max_rd_atomic;
-
- spin_unlock(&qp->s_lock);
- spin_unlock_irq(&qp->r_lock);
-
- if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT)
- insert_qp(dev, qp);
-
- if (lastwqe) {
- ev.device = qp->ibqp.device;
- ev.element.qp = &qp->ibqp;
- ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
- qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
- }
- if (mig) {
- ev.device = qp->ibqp.device;
- ev.element.qp = &qp->ibqp;
- ev.event = IB_EVENT_PATH_MIG;
- qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
- }
- ret = 0;
- goto bail;
-
-inval:
- spin_unlock(&qp->s_lock);
- spin_unlock_irq(&qp->r_lock);
- ret = -EINVAL;
-
-bail:
- return ret;
+ return enum_mtu;
}
-int qib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
- int attr_mask, struct ib_qp_init_attr *init_attr)
+int qib_get_pmtu_from_attr(struct rvt_dev_info *rdi, struct rvt_qp *qp,
+ struct ib_qp_attr *attr)
{
- struct qib_qp *qp = to_iqp(ibqp);
+ int mtu, pmtu, pidx = qp->port_num - 1;
+ struct qib_ibdev *verbs_dev = container_of(rdi, struct qib_ibdev, rdi);
+ struct qib_devdata *dd = container_of(verbs_dev, struct qib_devdata,
+ verbs_dev);
+ mtu = ib_mtu_enum_to_int(attr->path_mtu);
+ if (mtu == -1)
+ return -EINVAL;
+
+ if (mtu > dd->pport[pidx].ibmtu)
+ pmtu = mtu_to_enum(dd->pport[pidx].ibmtu);
+ else
+ pmtu = attr->path_mtu;
+ return pmtu;
+}
- attr->qp_state = qp->state;
- attr->cur_qp_state = attr->qp_state;
- attr->path_mtu = qp->path_mtu;
- attr->path_mig_state = qp->s_mig_state;
- attr->qkey = qp->qkey;
- attr->rq_psn = qp->r_psn & QIB_PSN_MASK;
- attr->sq_psn = qp->s_next_psn & QIB_PSN_MASK;
- attr->dest_qp_num = qp->remote_qpn;
- attr->qp_access_flags = qp->qp_access_flags;
- attr->cap.max_send_wr = qp->s_size - 1;
- attr->cap.max_recv_wr = qp->ibqp.srq ? 0 : qp->r_rq.size - 1;
- attr->cap.max_send_sge = qp->s_max_sge;
- attr->cap.max_recv_sge = qp->r_rq.max_sge;
- attr->cap.max_inline_data = 0;
- attr->ah_attr = qp->remote_ah_attr;
- attr->alt_ah_attr = qp->alt_ah_attr;
- attr->pkey_index = qp->s_pkey_index;
- attr->alt_pkey_index = qp->s_alt_pkey_index;
- attr->en_sqd_async_notify = 0;
- attr->sq_draining = qp->s_draining;
- attr->max_rd_atomic = qp->s_max_rd_atomic;
- attr->max_dest_rd_atomic = qp->r_max_rd_atomic;
- attr->min_rnr_timer = qp->r_min_rnr_timer;
- attr->port_num = qp->port_num;
- attr->timeout = qp->timeout;
- attr->retry_cnt = qp->s_retry_cnt;
- attr->rnr_retry = qp->s_rnr_retry_cnt;
- attr->alt_port_num = qp->alt_ah_attr.port_num;
- attr->alt_timeout = qp->alt_timeout;
+int qib_mtu_to_path_mtu(u32 mtu)
+{
+ return mtu_to_enum(mtu);
+}
- init_attr->event_handler = qp->ibqp.event_handler;
- init_attr->qp_context = qp->ibqp.qp_context;
- init_attr->send_cq = qp->ibqp.send_cq;
- init_attr->recv_cq = qp->ibqp.recv_cq;
- init_attr->srq = qp->ibqp.srq;
- init_attr->cap = attr->cap;
- if (qp->s_flags & QIB_S_SIGNAL_REQ_WR)
- init_attr->sq_sig_type = IB_SIGNAL_REQ_WR;
- else
- init_attr->sq_sig_type = IB_SIGNAL_ALL_WR;
- init_attr->qp_type = qp->ibqp.qp_type;
- init_attr->port_num = qp->port_num;
- return 0;
+u32 qib_mtu_from_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp, u32 pmtu)
+{
+ return ib_mtu_enum_to_int(pmtu);
}
/**
@@ -908,7 +324,7 @@ int qib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
*
* Returns the AETH.
*/
-__be32 qib_compute_aeth(struct qib_qp *qp)
+__be32 qib_compute_aeth(struct rvt_qp *qp)
{
u32 aeth = qp->r_msn & QIB_MSN_MASK;
@@ -921,7 +337,7 @@ __be32 qib_compute_aeth(struct qib_qp *qp)
} else {
u32 min, max, x;
u32 credits;
- struct qib_rwq *wq = qp->r_rq.wq;
+ struct rvt_rwq *wq = qp->r_rq.wq;
u32 head;
u32 tail;
@@ -962,315 +378,63 @@ __be32 qib_compute_aeth(struct qib_qp *qp)
return cpu_to_be32(aeth);
}
-/**
- * qib_create_qp - create a queue pair for a device
- * @ibpd: the protection domain who's device we create the queue pair for
- * @init_attr: the attributes of the queue pair
- * @udata: user data for libibverbs.so
- *
- * Returns the queue pair on success, otherwise returns an errno.
- *
- * Called by the ib_create_qp() core verbs function.
- */
-struct ib_qp *qib_create_qp(struct ib_pd *ibpd,
- struct ib_qp_init_attr *init_attr,
- struct ib_udata *udata)
+void *qib_qp_priv_alloc(struct rvt_dev_info *rdi, struct rvt_qp *qp, gfp_t gfp)
{
- struct qib_qp *qp;
- int err;
- struct qib_swqe *swq = NULL;
- struct qib_ibdev *dev;
- struct qib_devdata *dd;
- size_t sz;
- size_t sg_list_sz;
- struct ib_qp *ret;
- gfp_t gfp;
+ struct qib_qp_priv *priv;
+ priv = kzalloc(sizeof(*priv), gfp);
+ if (!priv)
+ return ERR_PTR(-ENOMEM);
+ priv->owner = qp;
- if (init_attr->cap.max_send_sge > ib_qib_max_sges ||
- init_attr->cap.max_send_wr > ib_qib_max_qp_wrs ||
- init_attr->create_flags & ~(IB_QP_CREATE_USE_GFP_NOIO))
- return ERR_PTR(-EINVAL);
-
- /* GFP_NOIO is applicable in RC QPs only */
- if (init_attr->create_flags & IB_QP_CREATE_USE_GFP_NOIO &&
- init_attr->qp_type != IB_QPT_RC)
- return ERR_PTR(-EINVAL);
-
- gfp = init_attr->create_flags & IB_QP_CREATE_USE_GFP_NOIO ?
- GFP_NOIO : GFP_KERNEL;
-
- /* Check receive queue parameters if no SRQ is specified. */
- if (!init_attr->srq) {
- if (init_attr->cap.max_recv_sge > ib_qib_max_sges ||
- init_attr->cap.max_recv_wr > ib_qib_max_qp_wrs) {
- ret = ERR_PTR(-EINVAL);
- goto bail;
- }
- if (init_attr->cap.max_send_sge +
- init_attr->cap.max_send_wr +
- init_attr->cap.max_recv_sge +
- init_attr->cap.max_recv_wr == 0) {
- ret = ERR_PTR(-EINVAL);
- goto bail;
- }
+ priv->s_hdr = kzalloc(sizeof(*priv->s_hdr), gfp);
+ if (!priv->s_hdr) {
+ kfree(priv);
+ return ERR_PTR(-ENOMEM);
}
+ init_waitqueue_head(&priv->wait_dma);
+ INIT_WORK(&priv->s_work, _qib_do_send);
+ INIT_LIST_HEAD(&priv->iowait);
- switch (init_attr->qp_type) {
- case IB_QPT_SMI:
- case IB_QPT_GSI:
- if (init_attr->port_num == 0 ||
- init_attr->port_num > ibpd->device->phys_port_cnt) {
- ret = ERR_PTR(-EINVAL);
- goto bail;
- }
- case IB_QPT_UC:
- case IB_QPT_RC:
- case IB_QPT_UD:
- sz = sizeof(struct qib_sge) *
- init_attr->cap.max_send_sge +
- sizeof(struct qib_swqe);
- swq = __vmalloc((init_attr->cap.max_send_wr + 1) * sz,
- gfp, PAGE_KERNEL);
- if (swq == NULL) {
- ret = ERR_PTR(-ENOMEM);
- goto bail;
- }
- sz = sizeof(*qp);
- sg_list_sz = 0;
- if (init_attr->srq) {
- struct qib_srq *srq = to_isrq(init_attr->srq);
-
- if (srq->rq.max_sge > 1)
- sg_list_sz = sizeof(*qp->r_sg_list) *
- (srq->rq.max_sge - 1);
- } else if (init_attr->cap.max_recv_sge > 1)
- sg_list_sz = sizeof(*qp->r_sg_list) *
- (init_attr->cap.max_recv_sge - 1);
- qp = kzalloc(sz + sg_list_sz, gfp);
- if (!qp) {
- ret = ERR_PTR(-ENOMEM);
- goto bail_swq;
- }
- RCU_INIT_POINTER(qp->next, NULL);
- qp->s_hdr = kzalloc(sizeof(*qp->s_hdr), gfp);
- if (!qp->s_hdr) {
- ret = ERR_PTR(-ENOMEM);
- goto bail_qp;
- }
- qp->timeout_jiffies =
- usecs_to_jiffies((4096UL * (1UL << qp->timeout)) /
- 1000UL);
- if (init_attr->srq)
- sz = 0;
- else {
- qp->r_rq.size = init_attr->cap.max_recv_wr + 1;
- qp->r_rq.max_sge = init_attr->cap.max_recv_sge;
- sz = (sizeof(struct ib_sge) * qp->r_rq.max_sge) +
- sizeof(struct qib_rwqe);
- if (gfp != GFP_NOIO)
- qp->r_rq.wq = vmalloc_user(
- sizeof(struct qib_rwq) +
- qp->r_rq.size * sz);
- else
- qp->r_rq.wq = __vmalloc(
- sizeof(struct qib_rwq) +
- qp->r_rq.size * sz,
- gfp, PAGE_KERNEL);
-
- if (!qp->r_rq.wq) {
- ret = ERR_PTR(-ENOMEM);
- goto bail_qp;
- }
- }
-
- /*
- * ib_create_qp() will initialize qp->ibqp
- * except for qp->ibqp.qp_num.
- */
- spin_lock_init(&qp->r_lock);
- spin_lock_init(&qp->s_lock);
- spin_lock_init(&qp->r_rq.lock);
- atomic_set(&qp->refcount, 0);
- init_waitqueue_head(&qp->wait);
- init_waitqueue_head(&qp->wait_dma);
- init_timer(&qp->s_timer);
- qp->s_timer.data = (unsigned long)qp;
- INIT_WORK(&qp->s_work, qib_do_send);
- INIT_LIST_HEAD(&qp->iowait);
- INIT_LIST_HEAD(&qp->rspwait);
- qp->state = IB_QPS_RESET;
- qp->s_wq = swq;
- qp->s_size = init_attr->cap.max_send_wr + 1;
- qp->s_max_sge = init_attr->cap.max_send_sge;
- if (init_attr->sq_sig_type == IB_SIGNAL_REQ_WR)
- qp->s_flags = QIB_S_SIGNAL_REQ_WR;
- dev = to_idev(ibpd->device);
- dd = dd_from_dev(dev);
- err = alloc_qpn(dd, &dev->qpn_table, init_attr->qp_type,
- init_attr->port_num, gfp);
- if (err < 0) {
- ret = ERR_PTR(err);
- vfree(qp->r_rq.wq);
- goto bail_qp;
- }
- qp->ibqp.qp_num = err;
- qp->port_num = init_attr->port_num;
- qib_reset_qp(qp, init_attr->qp_type);
- break;
-
- default:
- /* Don't support raw QPs */
- ret = ERR_PTR(-ENOSYS);
- goto bail;
- }
-
- init_attr->cap.max_inline_data = 0;
-
- /*
- * Return the address of the RWQ as the offset to mmap.
- * See qib_mmap() for details.
- */
- if (udata && udata->outlen >= sizeof(__u64)) {
- if (!qp->r_rq.wq) {
- __u64 offset = 0;
-
- err = ib_copy_to_udata(udata, &offset,
- sizeof(offset));
- if (err) {
- ret = ERR_PTR(err);
- goto bail_ip;
- }
- } else {
- u32 s = sizeof(struct qib_rwq) + qp->r_rq.size * sz;
-
- qp->ip = qib_create_mmap_info(dev, s,
- ibpd->uobject->context,
- qp->r_rq.wq);
- if (!qp->ip) {
- ret = ERR_PTR(-ENOMEM);
- goto bail_ip;
- }
-
- err = ib_copy_to_udata(udata, &(qp->ip->offset),
- sizeof(qp->ip->offset));
- if (err) {
- ret = ERR_PTR(err);
- goto bail_ip;
- }
- }
- }
-
- spin_lock(&dev->n_qps_lock);
- if (dev->n_qps_allocated == ib_qib_max_qps) {
- spin_unlock(&dev->n_qps_lock);
- ret = ERR_PTR(-ENOMEM);
- goto bail_ip;
- }
-
- dev->n_qps_allocated++;
- spin_unlock(&dev->n_qps_lock);
-
- if (qp->ip) {
- spin_lock_irq(&dev->pending_lock);
- list_add(&qp->ip->pending_mmaps, &dev->pending_mmaps);
- spin_unlock_irq(&dev->pending_lock);
- }
-
- ret = &qp->ibqp;
- goto bail;
-
-bail_ip:
- if (qp->ip)
- kref_put(&qp->ip->ref, qib_release_mmap_info);
- else
- vfree(qp->r_rq.wq);
- free_qpn(&dev->qpn_table, qp->ibqp.qp_num);
-bail_qp:
- kfree(qp->s_hdr);
- kfree(qp);
-bail_swq:
- vfree(swq);
-bail:
- return ret;
+ return priv;
}
-/**
- * qib_destroy_qp - destroy a queue pair
- * @ibqp: the queue pair to destroy
- *
- * Returns 0 on success.
- *
- * Note that this can be called while the QP is actively sending or
- * receiving!
- */
-int qib_destroy_qp(struct ib_qp *ibqp)
+void qib_qp_priv_free(struct rvt_dev_info *rdi, struct rvt_qp *qp)
{
- struct qib_qp *qp = to_iqp(ibqp);
- struct qib_ibdev *dev = to_idev(ibqp->device);
+ struct qib_qp_priv *priv = qp->priv;
- /* Make sure HW and driver activity is stopped. */
- spin_lock_irq(&qp->s_lock);
- if (qp->state != IB_QPS_RESET) {
- qp->state = IB_QPS_RESET;
- spin_lock(&dev->pending_lock);
- if (!list_empty(&qp->iowait))
- list_del_init(&qp->iowait);
- spin_unlock(&dev->pending_lock);
- qp->s_flags &= ~(QIB_S_TIMER | QIB_S_ANY_WAIT);
- spin_unlock_irq(&qp->s_lock);
- cancel_work_sync(&qp->s_work);
- del_timer_sync(&qp->s_timer);
- wait_event(qp->wait_dma, !atomic_read(&qp->s_dma_busy));
- if (qp->s_tx) {
- qib_put_txreq(qp->s_tx);
- qp->s_tx = NULL;
- }
- remove_qp(dev, qp);
- wait_event(qp->wait, !atomic_read(&qp->refcount));
- clear_mr_refs(qp, 1);
- } else
- spin_unlock_irq(&qp->s_lock);
+ kfree(priv->s_hdr);
+ kfree(priv);
+}
- /* all user's cleaned up, mark it available */
- free_qpn(&dev->qpn_table, qp->ibqp.qp_num);
- spin_lock(&dev->n_qps_lock);
- dev->n_qps_allocated--;
- spin_unlock(&dev->n_qps_lock);
+void qib_stop_send_queue(struct rvt_qp *qp)
+{
+ struct qib_qp_priv *priv = qp->priv;
- if (qp->ip)
- kref_put(&qp->ip->ref, qib_release_mmap_info);
- else
- vfree(qp->r_rq.wq);
- vfree(qp->s_wq);
- kfree(qp->s_hdr);
- kfree(qp);
- return 0;
+ cancel_work_sync(&priv->s_work);
+ del_timer_sync(&qp->s_timer);
}
-/**
- * qib_init_qpn_table - initialize the QP number table for a device
- * @qpt: the QPN table
- */
-void qib_init_qpn_table(struct qib_devdata *dd, struct qib_qpn_table *qpt)
+void qib_quiesce_qp(struct rvt_qp *qp)
{
- spin_lock_init(&qpt->lock);
- qpt->last = 1; /* start with QPN 2 */
- qpt->nmaps = 1;
- qpt->mask = dd->qpn_mask;
+ struct qib_qp_priv *priv = qp->priv;
+
+ wait_event(priv->wait_dma, !atomic_read(&priv->s_dma_busy));
+ if (priv->s_tx) {
+ qib_put_txreq(priv->s_tx);
+ priv->s_tx = NULL;
+ }
}
-/**
- * qib_free_qpn_table - free the QP number table for a device
- * @qpt: the QPN table
- */
-void qib_free_qpn_table(struct qib_qpn_table *qpt)
+void qib_flush_qp_waiters(struct rvt_qp *qp)
{
- int i;
+ struct qib_qp_priv *priv = qp->priv;
+ struct qib_ibdev *dev = to_idev(qp->ibqp.device);
- for (i = 0; i < ARRAY_SIZE(qpt->map); i++)
- if (qpt->map[i].page)
- free_page((unsigned long) qpt->map[i].page);
+ spin_lock(&dev->rdi.pending_lock);
+ if (!list_empty(&priv->iowait))
+ list_del_init(&priv->iowait);
+ spin_unlock(&dev->rdi.pending_lock);
}
/**
@@ -1280,7 +444,7 @@ void qib_free_qpn_table(struct qib_qpn_table *qpt)
*
* The QP s_lock should be held.
*/
-void qib_get_credit(struct qib_qp *qp, u32 aeth)
+void qib_get_credit(struct rvt_qp *qp, u32 aeth)
{
u32 credit = (aeth >> QIB_AETH_CREDIT_SHIFT) & QIB_AETH_CREDIT_MASK;
@@ -1290,31 +454,70 @@ void qib_get_credit(struct qib_qp *qp, u32 aeth)
* honor the credit field.
*/
if (credit == QIB_AETH_CREDIT_INVAL) {
- if (!(qp->s_flags & QIB_S_UNLIMITED_CREDIT)) {
- qp->s_flags |= QIB_S_UNLIMITED_CREDIT;
- if (qp->s_flags & QIB_S_WAIT_SSN_CREDIT) {
- qp->s_flags &= ~QIB_S_WAIT_SSN_CREDIT;
+ if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT)) {
+ qp->s_flags |= RVT_S_UNLIMITED_CREDIT;
+ if (qp->s_flags & RVT_S_WAIT_SSN_CREDIT) {
+ qp->s_flags &= ~RVT_S_WAIT_SSN_CREDIT;
qib_schedule_send(qp);
}
}
- } else if (!(qp->s_flags & QIB_S_UNLIMITED_CREDIT)) {
+ } else if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT)) {
/* Compute new LSN (i.e., MSN + credit) */
credit = (aeth + credit_table[credit]) & QIB_MSN_MASK;
if (qib_cmp24(credit, qp->s_lsn) > 0) {
qp->s_lsn = credit;
- if (qp->s_flags & QIB_S_WAIT_SSN_CREDIT) {
- qp->s_flags &= ~QIB_S_WAIT_SSN_CREDIT;
+ if (qp->s_flags & RVT_S_WAIT_SSN_CREDIT) {
+ qp->s_flags &= ~RVT_S_WAIT_SSN_CREDIT;
qib_schedule_send(qp);
}
}
}
}
+/**
+ * qib_check_send_wqe - validate wr/wqe
+ * @qp - The qp
+ * @wqe - The built wqe
+ *
+ * validate wr/wqe. This is called
+ * prior to inserting the wqe into
+ * the ring but after the wqe has been
+ * setup.
+ *
+ * Returns 1 to force direct progress, 0 otherwise, -EINVAL on failure
+ */
+int qib_check_send_wqe(struct rvt_qp *qp,
+ struct rvt_swqe *wqe)
+{
+ struct rvt_ah *ah;
+ int ret = 0;
+
+ switch (qp->ibqp.qp_type) {
+ case IB_QPT_RC:
+ case IB_QPT_UC:
+ if (wqe->length > 0x80000000U)
+ return -EINVAL;
+ break;
+ case IB_QPT_SMI:
+ case IB_QPT_GSI:
+ case IB_QPT_UD:
+ ah = ibah_to_rvtah(wqe->ud_wr.ah);
+ if (wqe->length > (1 << ah->log_pmtu))
+ return -EINVAL;
+ /* progress hint */
+ ret = 1;
+ break;
+ default:
+ break;
+ }
+ return ret;
+}
+
#ifdef CONFIG_DEBUG_FS
struct qib_qp_iter {
struct qib_ibdev *dev;
- struct qib_qp *qp;
+ struct rvt_qp *qp;
int n;
};
@@ -1340,14 +543,14 @@ int qib_qp_iter_next(struct qib_qp_iter *iter)
struct qib_ibdev *dev = iter->dev;
int n = iter->n;
int ret = 1;
- struct qib_qp *pqp = iter->qp;
- struct qib_qp *qp;
+ struct rvt_qp *pqp = iter->qp;
+ struct rvt_qp *qp;
- for (; n < dev->qp_table_size; n++) {
+ for (; n < dev->rdi.qp_dev->qp_table_size; n++) {
if (pqp)
qp = rcu_dereference(pqp->next);
else
- qp = rcu_dereference(dev->qp_table[n]);
+ qp = rcu_dereference(dev->rdi.qp_dev->qp_table[n]);
pqp = qp;
if (qp) {
iter->qp = qp;
@@ -1364,10 +567,11 @@ static const char * const qp_type_str[] = {
void qib_qp_iter_print(struct seq_file *s, struct qib_qp_iter *iter)
{
- struct qib_swqe *wqe;
- struct qib_qp *qp = iter->qp;
+ struct rvt_swqe *wqe;
+ struct rvt_qp *qp = iter->qp;
+ struct qib_qp_priv *priv = qp->priv;
- wqe = get_swqe_ptr(qp, qp->s_last);
+ wqe = rvt_get_swqe_ptr(qp, qp->s_last);
seq_printf(s,
"N %d QP%u %s %u %u %u f=%x %u %u %u %u %u PSN %x %x %x %x %x (%u %u %u %u %u %u) QP%u LID %x\n",
iter->n,
@@ -1377,8 +581,8 @@ void qib_qp_iter_print(struct seq_file *s, struct qib_qp_iter *iter)
wqe->wr.opcode,
qp->s_hdrwords,
qp->s_flags,
- atomic_read(&qp->s_dma_busy),
- !list_empty(&qp->iowait),
+ atomic_read(&priv->s_dma_busy),
+ !list_empty(&priv->iowait),
qp->timeout,
wqe->ssn,
qp->s_lsn,
diff --git a/drivers/infiniband/hw/qib/qib_rc.c b/drivers/infiniband/hw/qib/qib_rc.c
index e6b7556d5221..9088e26d3ac8 100644
--- a/drivers/infiniband/hw/qib/qib_rc.c
+++ b/drivers/infiniband/hw/qib/qib_rc.c
@@ -40,7 +40,7 @@
static void rc_timeout(unsigned long arg);
-static u32 restart_sge(struct qib_sge_state *ss, struct qib_swqe *wqe,
+static u32 restart_sge(struct rvt_sge_state *ss, struct rvt_swqe *wqe,
u32 psn, u32 pmtu)
{
u32 len;
@@ -54,9 +54,9 @@ static u32 restart_sge(struct qib_sge_state *ss, struct qib_swqe *wqe,
return wqe->length - len;
}
-static void start_timer(struct qib_qp *qp)
+static void start_timer(struct rvt_qp *qp)
{
- qp->s_flags |= QIB_S_TIMER;
+ qp->s_flags |= RVT_S_TIMER;
qp->s_timer.function = rc_timeout;
/* 4.096 usec. * (1 << qp->timeout) */
qp->s_timer.expires = jiffies + qp->timeout_jiffies;
@@ -74,17 +74,17 @@ static void start_timer(struct qib_qp *qp)
* Note that we are in the responder's side of the QP context.
* Note the QP s_lock must be held.
*/
-static int qib_make_rc_ack(struct qib_ibdev *dev, struct qib_qp *qp,
+static int qib_make_rc_ack(struct qib_ibdev *dev, struct rvt_qp *qp,
struct qib_other_headers *ohdr, u32 pmtu)
{
- struct qib_ack_entry *e;
+ struct rvt_ack_entry *e;
u32 hwords;
u32 len;
u32 bth0;
u32 bth2;
/* Don't send an ACK if we aren't supposed to. */
- if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK))
+ if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK))
goto bail;
/* header size in 32-bit words LRH+BTH = (8+12)/4. */
@@ -95,7 +95,7 @@ static int qib_make_rc_ack(struct qib_ibdev *dev, struct qib_qp *qp,
case OP(RDMA_READ_RESPONSE_ONLY):
e = &qp->s_ack_queue[qp->s_tail_ack_queue];
if (e->rdma_sge.mr) {
- qib_put_mr(e->rdma_sge.mr);
+ rvt_put_mr(e->rdma_sge.mr);
e->rdma_sge.mr = NULL;
}
/* FALLTHROUGH */
@@ -112,7 +112,7 @@ static int qib_make_rc_ack(struct qib_ibdev *dev, struct qib_qp *qp,
case OP(ACKNOWLEDGE):
/* Check for no next entry in the queue. */
if (qp->r_head_ack_queue == qp->s_tail_ack_queue) {
- if (qp->s_flags & QIB_S_ACK_PENDING)
+ if (qp->s_flags & RVT_S_ACK_PENDING)
goto normal;
goto bail;
}
@@ -133,7 +133,7 @@ static int qib_make_rc_ack(struct qib_ibdev *dev, struct qib_qp *qp,
/* Copy SGE state in case we need to resend */
qp->s_rdma_mr = e->rdma_sge.mr;
if (qp->s_rdma_mr)
- qib_get_mr(qp->s_rdma_mr);
+ rvt_get_mr(qp->s_rdma_mr);
qp->s_ack_rdma_sge.sge = e->rdma_sge;
qp->s_ack_rdma_sge.num_sge = 1;
qp->s_cur_sge = &qp->s_ack_rdma_sge;
@@ -172,7 +172,7 @@ static int qib_make_rc_ack(struct qib_ibdev *dev, struct qib_qp *qp,
qp->s_cur_sge = &qp->s_ack_rdma_sge;
qp->s_rdma_mr = qp->s_ack_rdma_sge.sge.mr;
if (qp->s_rdma_mr)
- qib_get_mr(qp->s_rdma_mr);
+ rvt_get_mr(qp->s_rdma_mr);
len = qp->s_ack_rdma_sge.sge.sge_length;
if (len > pmtu)
len = pmtu;
@@ -196,7 +196,7 @@ normal:
* (see above).
*/
qp->s_ack_state = OP(SEND_ONLY);
- qp->s_flags &= ~QIB_S_ACK_PENDING;
+ qp->s_flags &= ~RVT_S_ACK_PENDING;
qp->s_cur_sge = NULL;
if (qp->s_nak_state)
ohdr->u.aeth =
@@ -218,7 +218,7 @@ normal:
bail:
qp->s_ack_state = OP(ACKNOWLEDGE);
- qp->s_flags &= ~(QIB_S_RESP_PENDING | QIB_S_ACK_PENDING);
+ qp->s_flags &= ~(RVT_S_RESP_PENDING | RVT_S_ACK_PENDING);
return 0;
}
@@ -226,63 +226,60 @@ bail:
* qib_make_rc_req - construct a request packet (SEND, RDMA r/w, ATOMIC)
* @qp: a pointer to the QP
*
+ * Assumes the s_lock is held.
+ *
* Return 1 if constructed; otherwise, return 0.
*/
-int qib_make_rc_req(struct qib_qp *qp)
+int qib_make_rc_req(struct rvt_qp *qp)
{
+ struct qib_qp_priv *priv = qp->priv;
struct qib_ibdev *dev = to_idev(qp->ibqp.device);
struct qib_other_headers *ohdr;
- struct qib_sge_state *ss;
- struct qib_swqe *wqe;
+ struct rvt_sge_state *ss;
+ struct rvt_swqe *wqe;
u32 hwords;
u32 len;
u32 bth0;
u32 bth2;
u32 pmtu = qp->pmtu;
char newreq;
- unsigned long flags;
int ret = 0;
int delta;
- ohdr = &qp->s_hdr->u.oth;
+ ohdr = &priv->s_hdr->u.oth;
if (qp->remote_ah_attr.ah_flags & IB_AH_GRH)
- ohdr = &qp->s_hdr->u.l.oth;
-
- /*
- * The lock is needed to synchronize between the sending tasklet,
- * the receive interrupt handler, and timeout resends.
- */
- spin_lock_irqsave(&qp->s_lock, flags);
+ ohdr = &priv->s_hdr->u.l.oth;
/* Sending responses has higher priority over sending requests. */
- if ((qp->s_flags & QIB_S_RESP_PENDING) &&
+ if ((qp->s_flags & RVT_S_RESP_PENDING) &&
qib_make_rc_ack(dev, qp, ohdr, pmtu))
goto done;
- if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_SEND_OK)) {
- if (!(ib_qib_state_ops[qp->state] & QIB_FLUSH_SEND))
+ if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_SEND_OK)) {
+ if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND))
goto bail;
/* We are in the error state, flush the work request. */
- if (qp->s_last == qp->s_head)
+ smp_read_barrier_depends(); /* see post_one_send() */
+ if (qp->s_last == ACCESS_ONCE(qp->s_head))
goto bail;
/* If DMAs are in progress, we can't flush immediately. */
- if (atomic_read(&qp->s_dma_busy)) {
- qp->s_flags |= QIB_S_WAIT_DMA;
+ if (atomic_read(&priv->s_dma_busy)) {
+ qp->s_flags |= RVT_S_WAIT_DMA;
goto bail;
}
- wqe = get_swqe_ptr(qp, qp->s_last);
+ wqe = rvt_get_swqe_ptr(qp, qp->s_last);
qib_send_complete(qp, wqe, qp->s_last != qp->s_acked ?
IB_WC_SUCCESS : IB_WC_WR_FLUSH_ERR);
/* will get called again */
goto done;
}
- if (qp->s_flags & (QIB_S_WAIT_RNR | QIB_S_WAIT_ACK))
+ if (qp->s_flags & (RVT_S_WAIT_RNR | RVT_S_WAIT_ACK))
goto bail;
if (qib_cmp24(qp->s_psn, qp->s_sending_hpsn) <= 0) {
if (qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) <= 0) {
- qp->s_flags |= QIB_S_WAIT_PSN;
+ qp->s_flags |= RVT_S_WAIT_PSN;
goto bail;
}
qp->s_sending_psn = qp->s_psn;
@@ -294,10 +291,10 @@ int qib_make_rc_req(struct qib_qp *qp)
bth0 = 0;
/* Send a request. */
- wqe = get_swqe_ptr(qp, qp->s_cur);
+ wqe = rvt_get_swqe_ptr(qp, qp->s_cur);
switch (qp->s_state) {
default:
- if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_NEXT_SEND_OK))
+ if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_NEXT_SEND_OK))
goto bail;
/*
* Resend an old request or start a new one.
@@ -317,11 +314,11 @@ int qib_make_rc_req(struct qib_qp *qp)
*/
if ((wqe->wr.send_flags & IB_SEND_FENCE) &&
qp->s_num_rd_atomic) {
- qp->s_flags |= QIB_S_WAIT_FENCE;
+ qp->s_flags |= RVT_S_WAIT_FENCE;
goto bail;
}
- wqe->psn = qp->s_next_psn;
newreq = 1;
+ qp->s_psn = wqe->psn;
}
/*
* Note that we have to be careful not to modify the
@@ -335,14 +332,12 @@ int qib_make_rc_req(struct qib_qp *qp)
case IB_WR_SEND:
case IB_WR_SEND_WITH_IMM:
/* If no credit, return. */
- if (!(qp->s_flags & QIB_S_UNLIMITED_CREDIT) &&
+ if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT) &&
qib_cmp24(wqe->ssn, qp->s_lsn + 1) > 0) {
- qp->s_flags |= QIB_S_WAIT_SSN_CREDIT;
+ qp->s_flags |= RVT_S_WAIT_SSN_CREDIT;
goto bail;
}
- wqe->lpsn = wqe->psn;
if (len > pmtu) {
- wqe->lpsn += (len - 1) / pmtu;
qp->s_state = OP(SEND_FIRST);
len = pmtu;
break;
@@ -363,14 +358,14 @@ int qib_make_rc_req(struct qib_qp *qp)
break;
case IB_WR_RDMA_WRITE:
- if (newreq && !(qp->s_flags & QIB_S_UNLIMITED_CREDIT))
+ if (newreq && !(qp->s_flags & RVT_S_UNLIMITED_CREDIT))
qp->s_lsn++;
/* FALLTHROUGH */
case IB_WR_RDMA_WRITE_WITH_IMM:
/* If no credit, return. */
- if (!(qp->s_flags & QIB_S_UNLIMITED_CREDIT) &&
+ if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT) &&
qib_cmp24(wqe->ssn, qp->s_lsn + 1) > 0) {
- qp->s_flags |= QIB_S_WAIT_SSN_CREDIT;
+ qp->s_flags |= RVT_S_WAIT_SSN_CREDIT;
goto bail;
}
@@ -380,9 +375,7 @@ int qib_make_rc_req(struct qib_qp *qp)
cpu_to_be32(wqe->rdma_wr.rkey);
ohdr->u.rc.reth.length = cpu_to_be32(len);
hwords += sizeof(struct ib_reth) / sizeof(u32);
- wqe->lpsn = wqe->psn;
if (len > pmtu) {
- wqe->lpsn += (len - 1) / pmtu;
qp->s_state = OP(RDMA_WRITE_FIRST);
len = pmtu;
break;
@@ -411,19 +404,12 @@ int qib_make_rc_req(struct qib_qp *qp)
if (newreq) {
if (qp->s_num_rd_atomic >=
qp->s_max_rd_atomic) {
- qp->s_flags |= QIB_S_WAIT_RDMAR;
+ qp->s_flags |= RVT_S_WAIT_RDMAR;
goto bail;
}
qp->s_num_rd_atomic++;
- if (!(qp->s_flags & QIB_S_UNLIMITED_CREDIT))
+ if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT))
qp->s_lsn++;
- /*
- * Adjust s_next_psn to count the
- * expected number of responses.
- */
- if (len > pmtu)
- qp->s_next_psn += (len - 1) / pmtu;
- wqe->lpsn = qp->s_next_psn++;
}
ohdr->u.rc.reth.vaddr =
@@ -449,13 +435,12 @@ int qib_make_rc_req(struct qib_qp *qp)
if (newreq) {
if (qp->s_num_rd_atomic >=
qp->s_max_rd_atomic) {
- qp->s_flags |= QIB_S_WAIT_RDMAR;
+ qp->s_flags |= RVT_S_WAIT_RDMAR;
goto bail;
}
qp->s_num_rd_atomic++;
- if (!(qp->s_flags & QIB_S_UNLIMITED_CREDIT))
+ if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT))
qp->s_lsn++;
- wqe->lpsn = wqe->psn;
}
if (wqe->atomic_wr.wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
qp->s_state = OP(COMPARE_SWAP);
@@ -498,11 +483,8 @@ int qib_make_rc_req(struct qib_qp *qp)
}
if (wqe->wr.opcode == IB_WR_RDMA_READ)
qp->s_psn = wqe->lpsn + 1;
- else {
+ else
qp->s_psn++;
- if (qib_cmp24(qp->s_psn, qp->s_next_psn) > 0)
- qp->s_next_psn = qp->s_psn;
- }
break;
case OP(RDMA_READ_RESPONSE_FIRST):
@@ -522,8 +504,6 @@ int qib_make_rc_req(struct qib_qp *qp)
/* FALLTHROUGH */
case OP(SEND_MIDDLE):
bth2 = qp->s_psn++ & QIB_PSN_MASK;
- if (qib_cmp24(qp->s_psn, qp->s_next_psn) > 0)
- qp->s_next_psn = qp->s_psn;
ss = &qp->s_sge;
len = qp->s_len;
if (len > pmtu) {
@@ -563,8 +543,6 @@ int qib_make_rc_req(struct qib_qp *qp)
/* FALLTHROUGH */
case OP(RDMA_WRITE_MIDDLE):
bth2 = qp->s_psn++ & QIB_PSN_MASK;
- if (qib_cmp24(qp->s_psn, qp->s_next_psn) > 0)
- qp->s_next_psn = qp->s_psn;
ss = &qp->s_sge;
len = qp->s_len;
if (len > pmtu) {
@@ -618,9 +596,9 @@ int qib_make_rc_req(struct qib_qp *qp)
delta = (((int) bth2 - (int) wqe->psn) << 8) >> 8;
if (delta && delta % QIB_PSN_CREDIT == 0)
bth2 |= IB_BTH_REQ_ACK;
- if (qp->s_flags & QIB_S_SEND_ONE) {
- qp->s_flags &= ~QIB_S_SEND_ONE;
- qp->s_flags |= QIB_S_WAIT_ACK;
+ if (qp->s_flags & RVT_S_SEND_ONE) {
+ qp->s_flags &= ~RVT_S_SEND_ONE;
+ qp->s_flags |= RVT_S_WAIT_ACK;
bth2 |= IB_BTH_REQ_ACK;
}
qp->s_len -= len;
@@ -629,13 +607,9 @@ int qib_make_rc_req(struct qib_qp *qp)
qp->s_cur_size = len;
qib_make_ruc_header(qp, ohdr, bth0 | (qp->s_state << 24), bth2);
done:
- ret = 1;
- goto unlock;
-
+ return 1;
bail:
- qp->s_flags &= ~QIB_S_BUSY;
-unlock:
- spin_unlock_irqrestore(&qp->s_lock, flags);
+ qp->s_flags &= ~RVT_S_BUSY;
return ret;
}
@@ -647,7 +621,7 @@ unlock:
* Note that RDMA reads and atomics are handled in the
* send side QP state and tasklet.
*/
-void qib_send_rc_ack(struct qib_qp *qp)
+void qib_send_rc_ack(struct rvt_qp *qp)
{
struct qib_devdata *dd = dd_from_ibdev(qp->ibqp.device);
struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
@@ -665,11 +639,11 @@ void qib_send_rc_ack(struct qib_qp *qp)
spin_lock_irqsave(&qp->s_lock, flags);
- if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK))
+ if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK))
goto unlock;
/* Don't send ACK or NAK if a RDMA read or atomic is pending. */
- if ((qp->s_flags & QIB_S_RESP_PENDING) || qp->s_rdma_ack_cnt)
+ if ((qp->s_flags & RVT_S_RESP_PENDING) || qp->s_rdma_ack_cnt)
goto queue_ack;
/* Construct the header with s_lock held so APM doesn't change it. */
@@ -758,9 +732,9 @@ void qib_send_rc_ack(struct qib_qp *qp)
goto done;
queue_ack:
- if (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK) {
- ibp->n_rc_qacks++;
- qp->s_flags |= QIB_S_ACK_PENDING | QIB_S_RESP_PENDING;
+ if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) {
+ this_cpu_inc(*ibp->rvp.rc_qacks);
+ qp->s_flags |= RVT_S_ACK_PENDING | RVT_S_RESP_PENDING;
qp->s_nak_state = qp->r_nak_state;
qp->s_ack_psn = qp->r_ack_psn;
@@ -782,10 +756,10 @@ done:
* for the given QP.
* Called at interrupt level with the QP s_lock held.
*/
-static void reset_psn(struct qib_qp *qp, u32 psn)
+static void reset_psn(struct rvt_qp *qp, u32 psn)
{
u32 n = qp->s_acked;
- struct qib_swqe *wqe = get_swqe_ptr(qp, n);
+ struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, n);
u32 opcode;
qp->s_cur = n;
@@ -808,7 +782,7 @@ static void reset_psn(struct qib_qp *qp, u32 psn)
n = 0;
if (n == qp->s_tail)
break;
- wqe = get_swqe_ptr(qp, n);
+ wqe = rvt_get_swqe_ptr(qp, n);
diff = qib_cmp24(psn, wqe->psn);
if (diff < 0)
break;
@@ -854,22 +828,22 @@ static void reset_psn(struct qib_qp *qp, u32 psn)
done:
qp->s_psn = psn;
/*
- * Set QIB_S_WAIT_PSN as qib_rc_complete() may start the timer
+ * Set RVT_S_WAIT_PSN as qib_rc_complete() may start the timer
* asynchronously before the send tasklet can get scheduled.
* Doing it in qib_make_rc_req() is too late.
*/
if ((qib_cmp24(qp->s_psn, qp->s_sending_hpsn) <= 0) &&
(qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) <= 0))
- qp->s_flags |= QIB_S_WAIT_PSN;
+ qp->s_flags |= RVT_S_WAIT_PSN;
}
/*
* Back up requester to resend the last un-ACKed request.
* The QP r_lock and s_lock should be held and interrupts disabled.
*/
-static void qib_restart_rc(struct qib_qp *qp, u32 psn, int wait)
+static void qib_restart_rc(struct rvt_qp *qp, u32 psn, int wait)
{
- struct qib_swqe *wqe = get_swqe_ptr(qp, qp->s_acked);
+ struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
struct qib_ibport *ibp;
if (qp->s_retry == 0) {
@@ -878,7 +852,7 @@ static void qib_restart_rc(struct qib_qp *qp, u32 psn, int wait)
qp->s_retry = qp->s_retry_cnt;
} else if (qp->s_last == qp->s_acked) {
qib_send_complete(qp, wqe, IB_WC_RETRY_EXC_ERR);
- qib_error_qp(qp, IB_WC_WR_FLUSH_ERR);
+ rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR);
return;
} else /* XXX need to handle delayed completion */
return;
@@ -887,15 +861,15 @@ static void qib_restart_rc(struct qib_qp *qp, u32 psn, int wait)
ibp = to_iport(qp->ibqp.device, qp->port_num);
if (wqe->wr.opcode == IB_WR_RDMA_READ)
- ibp->n_rc_resends++;
+ ibp->rvp.n_rc_resends++;
else
- ibp->n_rc_resends += (qp->s_psn - psn) & QIB_PSN_MASK;
+ ibp->rvp.n_rc_resends += (qp->s_psn - psn) & QIB_PSN_MASK;
- qp->s_flags &= ~(QIB_S_WAIT_FENCE | QIB_S_WAIT_RDMAR |
- QIB_S_WAIT_SSN_CREDIT | QIB_S_WAIT_PSN |
- QIB_S_WAIT_ACK);
+ qp->s_flags &= ~(RVT_S_WAIT_FENCE | RVT_S_WAIT_RDMAR |
+ RVT_S_WAIT_SSN_CREDIT | RVT_S_WAIT_PSN |
+ RVT_S_WAIT_ACK);
if (wait)
- qp->s_flags |= QIB_S_SEND_ONE;
+ qp->s_flags |= RVT_S_SEND_ONE;
reset_psn(qp, psn);
}
@@ -904,16 +878,16 @@ static void qib_restart_rc(struct qib_qp *qp, u32 psn, int wait)
*/
static void rc_timeout(unsigned long arg)
{
- struct qib_qp *qp = (struct qib_qp *)arg;
+ struct rvt_qp *qp = (struct rvt_qp *)arg;
struct qib_ibport *ibp;
unsigned long flags;
spin_lock_irqsave(&qp->r_lock, flags);
spin_lock(&qp->s_lock);
- if (qp->s_flags & QIB_S_TIMER) {
+ if (qp->s_flags & RVT_S_TIMER) {
ibp = to_iport(qp->ibqp.device, qp->port_num);
- ibp->n_rc_timeouts++;
- qp->s_flags &= ~QIB_S_TIMER;
+ ibp->rvp.n_rc_timeouts++;
+ qp->s_flags &= ~RVT_S_TIMER;
del_timer(&qp->s_timer);
qib_restart_rc(qp, qp->s_last_psn + 1, 1);
qib_schedule_send(qp);
@@ -927,12 +901,12 @@ static void rc_timeout(unsigned long arg)
*/
void qib_rc_rnr_retry(unsigned long arg)
{
- struct qib_qp *qp = (struct qib_qp *)arg;
+ struct rvt_qp *qp = (struct rvt_qp *)arg;
unsigned long flags;
spin_lock_irqsave(&qp->s_lock, flags);
- if (qp->s_flags & QIB_S_WAIT_RNR) {
- qp->s_flags &= ~QIB_S_WAIT_RNR;
+ if (qp->s_flags & RVT_S_WAIT_RNR) {
+ qp->s_flags &= ~RVT_S_WAIT_RNR;
del_timer(&qp->s_timer);
qib_schedule_send(qp);
}
@@ -943,14 +917,14 @@ void qib_rc_rnr_retry(unsigned long arg)
* Set qp->s_sending_psn to the next PSN after the given one.
* This would be psn+1 except when RDMA reads are present.
*/
-static void reset_sending_psn(struct qib_qp *qp, u32 psn)
+static void reset_sending_psn(struct rvt_qp *qp, u32 psn)
{
- struct qib_swqe *wqe;
+ struct rvt_swqe *wqe;
u32 n = qp->s_last;
/* Find the work request corresponding to the given PSN. */
for (;;) {
- wqe = get_swqe_ptr(qp, n);
+ wqe = rvt_get_swqe_ptr(qp, n);
if (qib_cmp24(psn, wqe->lpsn) <= 0) {
if (wqe->wr.opcode == IB_WR_RDMA_READ)
qp->s_sending_psn = wqe->lpsn + 1;
@@ -968,16 +942,16 @@ static void reset_sending_psn(struct qib_qp *qp, u32 psn)
/*
* This should be called with the QP s_lock held and interrupts disabled.
*/
-void qib_rc_send_complete(struct qib_qp *qp, struct qib_ib_header *hdr)
+void qib_rc_send_complete(struct rvt_qp *qp, struct qib_ib_header *hdr)
{
struct qib_other_headers *ohdr;
- struct qib_swqe *wqe;
+ struct rvt_swqe *wqe;
struct ib_wc wc;
unsigned i;
u32 opcode;
u32 psn;
- if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_OR_FLUSH_SEND))
+ if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_OR_FLUSH_SEND))
return;
/* Find out where the BTH is */
@@ -1002,22 +976,30 @@ void qib_rc_send_complete(struct qib_qp *qp, struct qib_ib_header *hdr)
* there are still requests that haven't been acked.
*/
if ((psn & IB_BTH_REQ_ACK) && qp->s_acked != qp->s_tail &&
- !(qp->s_flags & (QIB_S_TIMER | QIB_S_WAIT_RNR | QIB_S_WAIT_PSN)) &&
- (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK))
+ !(qp->s_flags & (RVT_S_TIMER | RVT_S_WAIT_RNR | RVT_S_WAIT_PSN)) &&
+ (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK))
start_timer(qp);
while (qp->s_last != qp->s_acked) {
- wqe = get_swqe_ptr(qp, qp->s_last);
+ u32 s_last;
+
+ wqe = rvt_get_swqe_ptr(qp, qp->s_last);
if (qib_cmp24(wqe->lpsn, qp->s_sending_psn) >= 0 &&
qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) <= 0)
break;
+ s_last = qp->s_last;
+ if (++s_last >= qp->s_size)
+ s_last = 0;
+ qp->s_last = s_last;
+ /* see post_send() */
+ barrier();
for (i = 0; i < wqe->wr.num_sge; i++) {
- struct qib_sge *sge = &wqe->sg_list[i];
+ struct rvt_sge *sge = &wqe->sg_list[i];
- qib_put_mr(sge->mr);
+ rvt_put_mr(sge->mr);
}
/* Post a send completion queue entry if requested. */
- if (!(qp->s_flags & QIB_S_SIGNAL_REQ_WR) ||
+ if (!(qp->s_flags & RVT_S_SIGNAL_REQ_WR) ||
(wqe->wr.send_flags & IB_SEND_SIGNALED)) {
memset(&wc, 0, sizeof(wc));
wc.wr_id = wqe->wr.wr_id;
@@ -1025,25 +1007,23 @@ void qib_rc_send_complete(struct qib_qp *qp, struct qib_ib_header *hdr)
wc.opcode = ib_qib_wc_opcode[wqe->wr.opcode];
wc.byte_len = wqe->length;
wc.qp = &qp->ibqp;
- qib_cq_enter(to_icq(qp->ibqp.send_cq), &wc, 0);
+ rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.send_cq), &wc, 0);
}
- if (++qp->s_last >= qp->s_size)
- qp->s_last = 0;
}
/*
* If we were waiting for sends to complete before resending,
* and they are now complete, restart sending.
*/
- if (qp->s_flags & QIB_S_WAIT_PSN &&
+ if (qp->s_flags & RVT_S_WAIT_PSN &&
qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) > 0) {
- qp->s_flags &= ~QIB_S_WAIT_PSN;
+ qp->s_flags &= ~RVT_S_WAIT_PSN;
qp->s_sending_psn = qp->s_psn;
qp->s_sending_hpsn = qp->s_psn - 1;
qib_schedule_send(qp);
}
}
-static inline void update_last_psn(struct qib_qp *qp, u32 psn)
+static inline void update_last_psn(struct rvt_qp *qp, u32 psn)
{
qp->s_last_psn = psn;
}
@@ -1053,8 +1033,8 @@ static inline void update_last_psn(struct qib_qp *qp, u32 psn)
* This is similar to qib_send_complete but has to check to be sure
* that the SGEs are not being referenced if the SWQE is being resent.
*/
-static struct qib_swqe *do_rc_completion(struct qib_qp *qp,
- struct qib_swqe *wqe,
+static struct rvt_swqe *do_rc_completion(struct rvt_qp *qp,
+ struct rvt_swqe *wqe,
struct qib_ibport *ibp)
{
struct ib_wc wc;
@@ -1067,13 +1047,21 @@ static struct qib_swqe *do_rc_completion(struct qib_qp *qp,
*/
if (qib_cmp24(wqe->lpsn, qp->s_sending_psn) < 0 ||
qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) > 0) {
+ u32 s_last;
+
for (i = 0; i < wqe->wr.num_sge; i++) {
- struct qib_sge *sge = &wqe->sg_list[i];
+ struct rvt_sge *sge = &wqe->sg_list[i];
- qib_put_mr(sge->mr);
+ rvt_put_mr(sge->mr);
}
+ s_last = qp->s_last;
+ if (++s_last >= qp->s_size)
+ s_last = 0;
+ qp->s_last = s_last;
+ /* see post_send() */
+ barrier();
/* Post a send completion queue entry if requested. */
- if (!(qp->s_flags & QIB_S_SIGNAL_REQ_WR) ||
+ if (!(qp->s_flags & RVT_S_SIGNAL_REQ_WR) ||
(wqe->wr.send_flags & IB_SEND_SIGNALED)) {
memset(&wc, 0, sizeof(wc));
wc.wr_id = wqe->wr.wr_id;
@@ -1081,12 +1069,10 @@ static struct qib_swqe *do_rc_completion(struct qib_qp *qp,
wc.opcode = ib_qib_wc_opcode[wqe->wr.opcode];
wc.byte_len = wqe->length;
wc.qp = &qp->ibqp;
- qib_cq_enter(to_icq(qp->ibqp.send_cq), &wc, 0);
+ rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.send_cq), &wc, 0);
}
- if (++qp->s_last >= qp->s_size)
- qp->s_last = 0;
} else
- ibp->n_rc_delayed_comp++;
+ this_cpu_inc(*ibp->rvp.rc_delayed_comp);
qp->s_retry = qp->s_retry_cnt;
update_last_psn(qp, wqe->lpsn);
@@ -1100,7 +1086,7 @@ static struct qib_swqe *do_rc_completion(struct qib_qp *qp,
if (++qp->s_cur >= qp->s_size)
qp->s_cur = 0;
qp->s_acked = qp->s_cur;
- wqe = get_swqe_ptr(qp, qp->s_cur);
+ wqe = rvt_get_swqe_ptr(qp, qp->s_cur);
if (qp->s_acked != qp->s_tail) {
qp->s_state = OP(SEND_LAST);
qp->s_psn = wqe->psn;
@@ -1110,7 +1096,7 @@ static struct qib_swqe *do_rc_completion(struct qib_qp *qp,
qp->s_acked = 0;
if (qp->state == IB_QPS_SQD && qp->s_acked == qp->s_cur)
qp->s_draining = 0;
- wqe = get_swqe_ptr(qp, qp->s_acked);
+ wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
}
return wqe;
}
@@ -1126,19 +1112,19 @@ static struct qib_swqe *do_rc_completion(struct qib_qp *qp,
* Called at interrupt level with the QP s_lock held.
* Returns 1 if OK, 0 if current operation should be aborted (NAK).
*/
-static int do_rc_ack(struct qib_qp *qp, u32 aeth, u32 psn, int opcode,
+static int do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode,
u64 val, struct qib_ctxtdata *rcd)
{
struct qib_ibport *ibp;
enum ib_wc_status status;
- struct qib_swqe *wqe;
+ struct rvt_swqe *wqe;
int ret = 0;
u32 ack_psn;
int diff;
/* Remove QP from retry timer */
- if (qp->s_flags & (QIB_S_TIMER | QIB_S_WAIT_RNR)) {
- qp->s_flags &= ~(QIB_S_TIMER | QIB_S_WAIT_RNR);
+ if (qp->s_flags & (RVT_S_TIMER | RVT_S_WAIT_RNR)) {
+ qp->s_flags &= ~(RVT_S_TIMER | RVT_S_WAIT_RNR);
del_timer(&qp->s_timer);
}
@@ -1151,7 +1137,7 @@ static int do_rc_ack(struct qib_qp *qp, u32 aeth, u32 psn, int opcode,
ack_psn = psn;
if (aeth >> 29)
ack_psn--;
- wqe = get_swqe_ptr(qp, qp->s_acked);
+ wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
ibp = to_iport(qp->ibqp.device, qp->port_num);
/*
@@ -1186,11 +1172,11 @@ static int do_rc_ack(struct qib_qp *qp, u32 aeth, u32 psn, int opcode,
wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) &&
(opcode != OP(ATOMIC_ACKNOWLEDGE) || diff != 0))) {
/* Retry this request. */
- if (!(qp->r_flags & QIB_R_RDMAR_SEQ)) {
- qp->r_flags |= QIB_R_RDMAR_SEQ;
+ if (!(qp->r_flags & RVT_R_RDMAR_SEQ)) {
+ qp->r_flags |= RVT_R_RDMAR_SEQ;
qib_restart_rc(qp, qp->s_last_psn + 1, 0);
if (list_empty(&qp->rspwait)) {
- qp->r_flags |= QIB_R_RSP_SEND;
+ qp->r_flags |= RVT_R_RSP_SEND;
atomic_inc(&qp->refcount);
list_add_tail(&qp->rspwait,
&rcd->qp_wait_list);
@@ -1213,14 +1199,14 @@ static int do_rc_ack(struct qib_qp *qp, u32 aeth, u32 psn, int opcode,
wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD)) {
qp->s_num_rd_atomic--;
/* Restart sending task if fence is complete */
- if ((qp->s_flags & QIB_S_WAIT_FENCE) &&
+ if ((qp->s_flags & RVT_S_WAIT_FENCE) &&
!qp->s_num_rd_atomic) {
- qp->s_flags &= ~(QIB_S_WAIT_FENCE |
- QIB_S_WAIT_ACK);
+ qp->s_flags &= ~(RVT_S_WAIT_FENCE |
+ RVT_S_WAIT_ACK);
qib_schedule_send(qp);
- } else if (qp->s_flags & QIB_S_WAIT_RDMAR) {
- qp->s_flags &= ~(QIB_S_WAIT_RDMAR |
- QIB_S_WAIT_ACK);
+ } else if (qp->s_flags & RVT_S_WAIT_RDMAR) {
+ qp->s_flags &= ~(RVT_S_WAIT_RDMAR |
+ RVT_S_WAIT_ACK);
qib_schedule_send(qp);
}
}
@@ -1231,7 +1217,7 @@ static int do_rc_ack(struct qib_qp *qp, u32 aeth, u32 psn, int opcode,
switch (aeth >> 29) {
case 0: /* ACK */
- ibp->n_rc_acks++;
+ this_cpu_inc(*ibp->rvp.rc_acks);
if (qp->s_acked != qp->s_tail) {
/*
* We are expecting more ACKs so
@@ -1248,8 +1234,8 @@ static int do_rc_ack(struct qib_qp *qp, u32 aeth, u32 psn, int opcode,
qp->s_state = OP(SEND_LAST);
qp->s_psn = psn + 1;
}
- if (qp->s_flags & QIB_S_WAIT_ACK) {
- qp->s_flags &= ~QIB_S_WAIT_ACK;
+ if (qp->s_flags & RVT_S_WAIT_ACK) {
+ qp->s_flags &= ~RVT_S_WAIT_ACK;
qib_schedule_send(qp);
}
qib_get_credit(qp, aeth);
@@ -1260,10 +1246,10 @@ static int do_rc_ack(struct qib_qp *qp, u32 aeth, u32 psn, int opcode,
goto bail;
case 1: /* RNR NAK */
- ibp->n_rnr_naks++;
+ ibp->rvp.n_rnr_naks++;
if (qp->s_acked == qp->s_tail)
goto bail;
- if (qp->s_flags & QIB_S_WAIT_RNR)
+ if (qp->s_flags & RVT_S_WAIT_RNR)
goto bail;
if (qp->s_rnr_retry == 0) {
status = IB_WC_RNR_RETRY_EXC_ERR;
@@ -1275,12 +1261,12 @@ static int do_rc_ack(struct qib_qp *qp, u32 aeth, u32 psn, int opcode,
/* The last valid PSN is the previous PSN. */
update_last_psn(qp, psn - 1);
- ibp->n_rc_resends += (qp->s_psn - psn) & QIB_PSN_MASK;
+ ibp->rvp.n_rc_resends += (qp->s_psn - psn) & QIB_PSN_MASK;
reset_psn(qp, psn);
- qp->s_flags &= ~(QIB_S_WAIT_SSN_CREDIT | QIB_S_WAIT_ACK);
- qp->s_flags |= QIB_S_WAIT_RNR;
+ qp->s_flags &= ~(RVT_S_WAIT_SSN_CREDIT | RVT_S_WAIT_ACK);
+ qp->s_flags |= RVT_S_WAIT_RNR;
qp->s_timer.function = qib_rc_rnr_retry;
qp->s_timer.expires = jiffies + usecs_to_jiffies(
ib_qib_rnr_table[(aeth >> QIB_AETH_CREDIT_SHIFT) &
@@ -1296,7 +1282,7 @@ static int do_rc_ack(struct qib_qp *qp, u32 aeth, u32 psn, int opcode,
switch ((aeth >> QIB_AETH_CREDIT_SHIFT) &
QIB_AETH_CREDIT_MASK) {
case 0: /* PSN sequence error */
- ibp->n_seq_naks++;
+ ibp->rvp.n_seq_naks++;
/*
* Back up to the responder's expected PSN.
* Note that we might get a NAK in the middle of an
@@ -1309,21 +1295,21 @@ static int do_rc_ack(struct qib_qp *qp, u32 aeth, u32 psn, int opcode,
case 1: /* Invalid Request */
status = IB_WC_REM_INV_REQ_ERR;
- ibp->n_other_naks++;
+ ibp->rvp.n_other_naks++;
goto class_b;
case 2: /* Remote Access Error */
status = IB_WC_REM_ACCESS_ERR;
- ibp->n_other_naks++;
+ ibp->rvp.n_other_naks++;
goto class_b;
case 3: /* Remote Operation Error */
status = IB_WC_REM_OP_ERR;
- ibp->n_other_naks++;
+ ibp->rvp.n_other_naks++;
class_b:
if (qp->s_last == qp->s_acked) {
qib_send_complete(qp, wqe, status);
- qib_error_qp(qp, IB_WC_WR_FLUSH_ERR);
+ rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR);
}
break;
@@ -1349,18 +1335,18 @@ bail:
* We have seen an out of sequence RDMA read middle or last packet.
* This ACKs SENDs and RDMA writes up to the first RDMA read or atomic SWQE.
*/
-static void rdma_seq_err(struct qib_qp *qp, struct qib_ibport *ibp, u32 psn,
+static void rdma_seq_err(struct rvt_qp *qp, struct qib_ibport *ibp, u32 psn,
struct qib_ctxtdata *rcd)
{
- struct qib_swqe *wqe;
+ struct rvt_swqe *wqe;
/* Remove QP from retry timer */
- if (qp->s_flags & (QIB_S_TIMER | QIB_S_WAIT_RNR)) {
- qp->s_flags &= ~(QIB_S_TIMER | QIB_S_WAIT_RNR);
+ if (qp->s_flags & (RVT_S_TIMER | RVT_S_WAIT_RNR)) {
+ qp->s_flags &= ~(RVT_S_TIMER | RVT_S_WAIT_RNR);
del_timer(&qp->s_timer);
}
- wqe = get_swqe_ptr(qp, qp->s_acked);
+ wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
while (qib_cmp24(psn, wqe->lpsn) > 0) {
if (wqe->wr.opcode == IB_WR_RDMA_READ ||
@@ -1370,11 +1356,11 @@ static void rdma_seq_err(struct qib_qp *qp, struct qib_ibport *ibp, u32 psn,
wqe = do_rc_completion(qp, wqe, ibp);
}
- ibp->n_rdma_seq++;
- qp->r_flags |= QIB_R_RDMAR_SEQ;
+ ibp->rvp.n_rdma_seq++;
+ qp->r_flags |= RVT_R_RDMAR_SEQ;
qib_restart_rc(qp, qp->s_last_psn + 1, 0);
if (list_empty(&qp->rspwait)) {
- qp->r_flags |= QIB_R_RSP_SEND;
+ qp->r_flags |= RVT_R_RSP_SEND;
atomic_inc(&qp->refcount);
list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
}
@@ -1399,12 +1385,12 @@ static void rdma_seq_err(struct qib_qp *qp, struct qib_ibport *ibp, u32 psn,
static void qib_rc_rcv_resp(struct qib_ibport *ibp,
struct qib_other_headers *ohdr,
void *data, u32 tlen,
- struct qib_qp *qp,
+ struct rvt_qp *qp,
u32 opcode,
u32 psn, u32 hdrsize, u32 pmtu,
struct qib_ctxtdata *rcd)
{
- struct qib_swqe *wqe;
+ struct rvt_swqe *wqe;
struct qib_pportdata *ppd = ppd_from_ibp(ibp);
enum ib_wc_status status;
unsigned long flags;
@@ -1425,7 +1411,7 @@ static void qib_rc_rcv_resp(struct qib_ibport *ibp,
* If send tasklet not running attempt to progress
* SDMA queue.
*/
- if (!(qp->s_flags & QIB_S_BUSY)) {
+ if (!(qp->s_flags & RVT_S_BUSY)) {
/* Acquire SDMA Lock */
spin_lock_irqsave(&ppd->sdma_lock, flags);
/* Invoke sdma make progress */
@@ -1437,11 +1423,12 @@ static void qib_rc_rcv_resp(struct qib_ibport *ibp,
}
spin_lock_irqsave(&qp->s_lock, flags);
- if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK))
+ if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK))
goto ack_done;
/* Ignore invalid responses. */
- if (qib_cmp24(psn, qp->s_next_psn) >= 0)
+ smp_read_barrier_depends(); /* see post_one_send */
+ if (qib_cmp24(psn, ACCESS_ONCE(qp->s_next_psn)) >= 0)
goto ack_done;
/* Ignore duplicate responses. */
@@ -1460,15 +1447,15 @@ static void qib_rc_rcv_resp(struct qib_ibport *ibp,
* Skip everything other than the PSN we expect, if we are waiting
* for a reply to a restarted RDMA read or atomic op.
*/
- if (qp->r_flags & QIB_R_RDMAR_SEQ) {
+ if (qp->r_flags & RVT_R_RDMAR_SEQ) {
if (qib_cmp24(psn, qp->s_last_psn + 1) != 0)
goto ack_done;
- qp->r_flags &= ~QIB_R_RDMAR_SEQ;
+ qp->r_flags &= ~RVT_R_RDMAR_SEQ;
}
if (unlikely(qp->s_acked == qp->s_tail))
goto ack_done;
- wqe = get_swqe_ptr(qp, qp->s_acked);
+ wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
status = IB_WC_SUCCESS;
switch (opcode) {
@@ -1487,7 +1474,7 @@ static void qib_rc_rcv_resp(struct qib_ibport *ibp,
opcode != OP(RDMA_READ_RESPONSE_FIRST))
goto ack_done;
hdrsize += 4;
- wqe = get_swqe_ptr(qp, qp->s_acked);
+ wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ))
goto ack_op_err;
/*
@@ -1515,10 +1502,10 @@ read_middle:
* We got a response so update the timeout.
* 4.096 usec. * (1 << qp->timeout)
*/
- qp->s_flags |= QIB_S_TIMER;
+ qp->s_flags |= RVT_S_TIMER;
mod_timer(&qp->s_timer, jiffies + qp->timeout_jiffies);
- if (qp->s_flags & QIB_S_WAIT_ACK) {
- qp->s_flags &= ~QIB_S_WAIT_ACK;
+ if (qp->s_flags & RVT_S_WAIT_ACK) {
+ qp->s_flags &= ~RVT_S_WAIT_ACK;
qib_schedule_send(qp);
}
@@ -1553,7 +1540,7 @@ read_middle:
* have to be careful to copy the data to the right
* location.
*/
- wqe = get_swqe_ptr(qp, qp->s_acked);
+ wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
qp->s_rdma_read_len = restart_sge(&qp->s_rdma_read_sge,
wqe, psn, pmtu);
goto read_last;
@@ -1598,7 +1585,7 @@ ack_len_err:
ack_err:
if (qp->s_last == qp->s_acked) {
qib_send_complete(qp, wqe, status);
- qib_error_qp(qp, IB_WC_WR_FLUSH_ERR);
+ rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR);
}
ack_done:
spin_unlock_irqrestore(&qp->s_lock, flags);
@@ -1623,14 +1610,14 @@ bail:
*/
static int qib_rc_rcv_error(struct qib_other_headers *ohdr,
void *data,
- struct qib_qp *qp,
+ struct rvt_qp *qp,
u32 opcode,
u32 psn,
int diff,
struct qib_ctxtdata *rcd)
{
struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
- struct qib_ack_entry *e;
+ struct rvt_ack_entry *e;
unsigned long flags;
u8 i, prev;
int old_req;
@@ -1642,7 +1629,7 @@ static int qib_rc_rcv_error(struct qib_other_headers *ohdr,
* Don't queue the NAK if we already sent one.
*/
if (!qp->r_nak_state) {
- ibp->n_rc_seqnak++;
+ ibp->rvp.n_rc_seqnak++;
qp->r_nak_state = IB_NAK_PSN_ERROR;
/* Use the expected PSN. */
qp->r_ack_psn = qp->r_psn;
@@ -1652,7 +1639,7 @@ static int qib_rc_rcv_error(struct qib_other_headers *ohdr,
* Otherwise, we end up propagating congestion.
*/
if (list_empty(&qp->rspwait)) {
- qp->r_flags |= QIB_R_RSP_NAK;
+ qp->r_flags |= RVT_R_RSP_NAK;
atomic_inc(&qp->refcount);
list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
}
@@ -1678,7 +1665,7 @@ static int qib_rc_rcv_error(struct qib_other_headers *ohdr,
*/
e = NULL;
old_req = 1;
- ibp->n_rc_dupreq++;
+ ibp->rvp.n_rc_dupreq++;
spin_lock_irqsave(&qp->s_lock, flags);
@@ -1732,7 +1719,7 @@ static int qib_rc_rcv_error(struct qib_other_headers *ohdr,
if (unlikely(offset + len != e->rdma_sge.sge_length))
goto unlock_done;
if (e->rdma_sge.mr) {
- qib_put_mr(e->rdma_sge.mr);
+ rvt_put_mr(e->rdma_sge.mr);
e->rdma_sge.mr = NULL;
}
if (len != 0) {
@@ -1740,7 +1727,7 @@ static int qib_rc_rcv_error(struct qib_other_headers *ohdr,
u64 vaddr = be64_to_cpu(reth->vaddr);
int ok;
- ok = qib_rkey_ok(qp, &e->rdma_sge, len, vaddr, rkey,
+ ok = rvt_rkey_ok(qp, &e->rdma_sge, len, vaddr, rkey,
IB_ACCESS_REMOTE_READ);
if (unlikely(!ok))
goto unlock_done;
@@ -1791,7 +1778,7 @@ static int qib_rc_rcv_error(struct qib_other_headers *ohdr,
* which doesn't accept a RDMA read response or atomic
* response as an ACK for earlier SENDs or RDMA writes.
*/
- if (!(qp->s_flags & QIB_S_RESP_PENDING)) {
+ if (!(qp->s_flags & RVT_S_RESP_PENDING)) {
spin_unlock_irqrestore(&qp->s_lock, flags);
qp->r_nak_state = 0;
qp->r_ack_psn = qp->s_ack_queue[i].psn - 1;
@@ -1805,7 +1792,7 @@ static int qib_rc_rcv_error(struct qib_other_headers *ohdr,
break;
}
qp->s_ack_state = OP(ACKNOWLEDGE);
- qp->s_flags |= QIB_S_RESP_PENDING;
+ qp->s_flags |= RVT_S_RESP_PENDING;
qp->r_nak_state = 0;
qib_schedule_send(qp);
@@ -1818,13 +1805,13 @@ send_ack:
return 0;
}
-void qib_rc_error(struct qib_qp *qp, enum ib_wc_status err)
+void qib_rc_error(struct rvt_qp *qp, enum ib_wc_status err)
{
unsigned long flags;
int lastwqe;
spin_lock_irqsave(&qp->s_lock, flags);
- lastwqe = qib_error_qp(qp, err);
+ lastwqe = rvt_error_qp(qp, err);
spin_unlock_irqrestore(&qp->s_lock, flags);
if (lastwqe) {
@@ -1837,7 +1824,7 @@ void qib_rc_error(struct qib_qp *qp, enum ib_wc_status err)
}
}
-static inline void qib_update_ack_queue(struct qib_qp *qp, unsigned n)
+static inline void qib_update_ack_queue(struct rvt_qp *qp, unsigned n)
{
unsigned next;
@@ -1862,7 +1849,7 @@ static inline void qib_update_ack_queue(struct qib_qp *qp, unsigned n)
* Called at interrupt level.
*/
void qib_rc_rcv(struct qib_ctxtdata *rcd, struct qib_ib_header *hdr,
- int has_grh, void *data, u32 tlen, struct qib_qp *qp)
+ int has_grh, void *data, u32 tlen, struct rvt_qp *qp)
{
struct qib_ibport *ibp = &rcd->ppd->ibport_data;
struct qib_other_headers *ohdr;
@@ -1948,8 +1935,8 @@ void qib_rc_rcv(struct qib_ctxtdata *rcd, struct qib_ib_header *hdr,
break;
}
- if (qp->state == IB_QPS_RTR && !(qp->r_flags & QIB_R_COMM_EST)) {
- qp->r_flags |= QIB_R_COMM_EST;
+ if (qp->state == IB_QPS_RTR && !(qp->r_flags & RVT_R_COMM_EST)) {
+ qp->r_flags |= RVT_R_COMM_EST;
if (qp->ibqp.event_handler) {
struct ib_event ev;
@@ -2026,9 +2013,9 @@ send_last:
if (unlikely(wc.byte_len > qp->r_len))
goto nack_inv;
qib_copy_sge(&qp->r_sge, data, tlen, 1);
- qib_put_ss(&qp->r_sge);
+ rvt_put_ss(&qp->r_sge);
qp->r_msn++;
- if (!test_and_clear_bit(QIB_R_WRID_VALID, &qp->r_aflags))
+ if (!test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags))
break;
wc.wr_id = qp->r_wr_id;
wc.status = IB_WC_SUCCESS;
@@ -2047,7 +2034,7 @@ send_last:
wc.dlid_path_bits = 0;
wc.port_num = 0;
/* Signal completion event if the solicited bit is set. */
- qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
+ rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc,
(ohdr->bth[0] &
cpu_to_be32(IB_BTH_SOLICITED)) != 0);
break;
@@ -2069,7 +2056,7 @@ send_last:
int ok;
/* Check rkey & NAK */
- ok = qib_rkey_ok(qp, &qp->r_sge.sge, qp->r_len, vaddr,
+ ok = rvt_rkey_ok(qp, &qp->r_sge.sge, qp->r_len, vaddr,
rkey, IB_ACCESS_REMOTE_WRITE);
if (unlikely(!ok))
goto nack_acc;
@@ -2096,7 +2083,7 @@ send_last:
goto send_last;
case OP(RDMA_READ_REQUEST): {
- struct qib_ack_entry *e;
+ struct rvt_ack_entry *e;
u32 len;
u8 next;
@@ -2114,7 +2101,7 @@ send_last:
}
e = &qp->s_ack_queue[qp->r_head_ack_queue];
if (e->opcode == OP(RDMA_READ_REQUEST) && e->rdma_sge.mr) {
- qib_put_mr(e->rdma_sge.mr);
+ rvt_put_mr(e->rdma_sge.mr);
e->rdma_sge.mr = NULL;
}
reth = &ohdr->u.rc.reth;
@@ -2125,7 +2112,7 @@ send_last:
int ok;
/* Check rkey & NAK */
- ok = qib_rkey_ok(qp, &e->rdma_sge, len, vaddr,
+ ok = rvt_rkey_ok(qp, &e->rdma_sge, len, vaddr,
rkey, IB_ACCESS_REMOTE_READ);
if (unlikely(!ok))
goto nack_acc_unlck;
@@ -2157,7 +2144,7 @@ send_last:
qp->r_head_ack_queue = next;
/* Schedule the send tasklet. */
- qp->s_flags |= QIB_S_RESP_PENDING;
+ qp->s_flags |= RVT_S_RESP_PENDING;
qib_schedule_send(qp);
goto sunlock;
@@ -2166,7 +2153,7 @@ send_last:
case OP(COMPARE_SWAP):
case OP(FETCH_ADD): {
struct ib_atomic_eth *ateth;
- struct qib_ack_entry *e;
+ struct rvt_ack_entry *e;
u64 vaddr;
atomic64_t *maddr;
u64 sdata;
@@ -2186,7 +2173,7 @@ send_last:
}
e = &qp->s_ack_queue[qp->r_head_ack_queue];
if (e->opcode == OP(RDMA_READ_REQUEST) && e->rdma_sge.mr) {
- qib_put_mr(e->rdma_sge.mr);
+ rvt_put_mr(e->rdma_sge.mr);
e->rdma_sge.mr = NULL;
}
ateth = &ohdr->u.atomic_eth;
@@ -2196,7 +2183,7 @@ send_last:
goto nack_inv_unlck;
rkey = be32_to_cpu(ateth->rkey);
/* Check rkey & NAK */
- if (unlikely(!qib_rkey_ok(qp, &qp->r_sge.sge, sizeof(u64),
+ if (unlikely(!rvt_rkey_ok(qp, &qp->r_sge.sge, sizeof(u64),
vaddr, rkey,
IB_ACCESS_REMOTE_ATOMIC)))
goto nack_acc_unlck;
@@ -2208,7 +2195,7 @@ send_last:
(u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
be64_to_cpu(ateth->compare_data),
sdata);
- qib_put_mr(qp->r_sge.sge.mr);
+ rvt_put_mr(qp->r_sge.sge.mr);
qp->r_sge.num_sge = 0;
e->opcode = opcode;
e->sent = 0;
@@ -2221,7 +2208,7 @@ send_last:
qp->r_head_ack_queue = next;
/* Schedule the send tasklet. */
- qp->s_flags |= QIB_S_RESP_PENDING;
+ qp->s_flags |= RVT_S_RESP_PENDING;
qib_schedule_send(qp);
goto sunlock;
@@ -2245,7 +2232,7 @@ rnr_nak:
qp->r_ack_psn = qp->r_psn;
/* Queue RNR NAK for later */
if (list_empty(&qp->rspwait)) {
- qp->r_flags |= QIB_R_RSP_NAK;
+ qp->r_flags |= RVT_R_RSP_NAK;
atomic_inc(&qp->refcount);
list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
}
@@ -2257,7 +2244,7 @@ nack_op_err:
qp->r_ack_psn = qp->r_psn;
/* Queue NAK for later */
if (list_empty(&qp->rspwait)) {
- qp->r_flags |= QIB_R_RSP_NAK;
+ qp->r_flags |= RVT_R_RSP_NAK;
atomic_inc(&qp->refcount);
list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
}
@@ -2271,7 +2258,7 @@ nack_inv:
qp->r_ack_psn = qp->r_psn;
/* Queue NAK for later */
if (list_empty(&qp->rspwait)) {
- qp->r_flags |= QIB_R_RSP_NAK;
+ qp->r_flags |= RVT_R_RSP_NAK;
atomic_inc(&qp->refcount);
list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
}
diff --git a/drivers/infiniband/hw/qib/qib_ruc.c b/drivers/infiniband/hw/qib/qib_ruc.c
index b1aa21bdd484..a5f07a64b228 100644
--- a/drivers/infiniband/hw/qib/qib_ruc.c
+++ b/drivers/infiniband/hw/qib/qib_ruc.c
@@ -79,16 +79,16 @@ const u32 ib_qib_rnr_table[32] = {
* Validate a RWQE and fill in the SGE state.
* Return 1 if OK.
*/
-static int qib_init_sge(struct qib_qp *qp, struct qib_rwqe *wqe)
+static int qib_init_sge(struct rvt_qp *qp, struct rvt_rwqe *wqe)
{
int i, j, ret;
struct ib_wc wc;
- struct qib_lkey_table *rkt;
- struct qib_pd *pd;
- struct qib_sge_state *ss;
+ struct rvt_lkey_table *rkt;
+ struct rvt_pd *pd;
+ struct rvt_sge_state *ss;
- rkt = &to_idev(qp->ibqp.device)->lk_table;
- pd = to_ipd(qp->ibqp.srq ? qp->ibqp.srq->pd : qp->ibqp.pd);
+ rkt = &to_idev(qp->ibqp.device)->rdi.lkey_table;
+ pd = ibpd_to_rvtpd(qp->ibqp.srq ? qp->ibqp.srq->pd : qp->ibqp.pd);
ss = &qp->r_sge;
ss->sg_list = qp->r_sg_list;
qp->r_len = 0;
@@ -96,7 +96,7 @@ static int qib_init_sge(struct qib_qp *qp, struct qib_rwqe *wqe)
if (wqe->sg_list[i].length == 0)
continue;
/* Check LKEY */
- if (!qib_lkey_ok(rkt, pd, j ? &ss->sg_list[j - 1] : &ss->sge,
+ if (!rvt_lkey_ok(rkt, pd, j ? &ss->sg_list[j - 1] : &ss->sge,
&wqe->sg_list[i], IB_ACCESS_LOCAL_WRITE))
goto bad_lkey;
qp->r_len += wqe->sg_list[i].length;
@@ -109,9 +109,9 @@ static int qib_init_sge(struct qib_qp *qp, struct qib_rwqe *wqe)
bad_lkey:
while (j) {
- struct qib_sge *sge = --j ? &ss->sg_list[j - 1] : &ss->sge;
+ struct rvt_sge *sge = --j ? &ss->sg_list[j - 1] : &ss->sge;
- qib_put_mr(sge->mr);
+ rvt_put_mr(sge->mr);
}
ss->num_sge = 0;
memset(&wc, 0, sizeof(wc));
@@ -120,7 +120,7 @@ bad_lkey:
wc.opcode = IB_WC_RECV;
wc.qp = &qp->ibqp;
/* Signal solicited completion event. */
- qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1);
+ rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, 1);
ret = 0;
bail:
return ret;
@@ -136,19 +136,19 @@ bail:
*
* Can be called from interrupt level.
*/
-int qib_get_rwqe(struct qib_qp *qp, int wr_id_only)
+int qib_get_rwqe(struct rvt_qp *qp, int wr_id_only)
{
unsigned long flags;
- struct qib_rq *rq;
- struct qib_rwq *wq;
- struct qib_srq *srq;
- struct qib_rwqe *wqe;
+ struct rvt_rq *rq;
+ struct rvt_rwq *wq;
+ struct rvt_srq *srq;
+ struct rvt_rwqe *wqe;
void (*handler)(struct ib_event *, void *);
u32 tail;
int ret;
if (qp->ibqp.srq) {
- srq = to_isrq(qp->ibqp.srq);
+ srq = ibsrq_to_rvtsrq(qp->ibqp.srq);
handler = srq->ibsrq.event_handler;
rq = &srq->rq;
} else {
@@ -158,7 +158,7 @@ int qib_get_rwqe(struct qib_qp *qp, int wr_id_only)
}
spin_lock_irqsave(&rq->lock, flags);
- if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)) {
+ if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)) {
ret = 0;
goto unlock;
}
@@ -174,7 +174,7 @@ int qib_get_rwqe(struct qib_qp *qp, int wr_id_only)
}
/* Make sure entry is read after head index is read. */
smp_rmb();
- wqe = get_rwqe_ptr(rq, tail);
+ wqe = rvt_get_rwqe_ptr(rq, tail);
/*
* Even though we update the tail index in memory, the verbs
* consumer is not supposed to post more entries until a
@@ -190,7 +190,7 @@ int qib_get_rwqe(struct qib_qp *qp, int wr_id_only)
qp->r_wr_id = wqe->wr_id;
ret = 1;
- set_bit(QIB_R_WRID_VALID, &qp->r_aflags);
+ set_bit(RVT_R_WRID_VALID, &qp->r_aflags);
if (handler) {
u32 n;
@@ -227,7 +227,7 @@ bail:
* Switch to alternate path.
* The QP s_lock should be held and interrupts disabled.
*/
-void qib_migrate_qp(struct qib_qp *qp)
+void qib_migrate_qp(struct rvt_qp *qp)
{
struct ib_event ev;
@@ -266,7 +266,7 @@ static int gid_ok(union ib_gid *gid, __be64 gid_prefix, __be64 id)
* The s_lock will be acquired around the qib_migrate_qp() call.
*/
int qib_ruc_check_hdr(struct qib_ibport *ibp, struct qib_ib_header *hdr,
- int has_grh, struct qib_qp *qp, u32 bth0)
+ int has_grh, struct rvt_qp *qp, u32 bth0)
{
__be64 guid;
unsigned long flags;
@@ -279,7 +279,8 @@ int qib_ruc_check_hdr(struct qib_ibport *ibp, struct qib_ib_header *hdr,
if (!(qp->alt_ah_attr.ah_flags & IB_AH_GRH))
goto err;
guid = get_sguid(ibp, qp->alt_ah_attr.grh.sgid_index);
- if (!gid_ok(&hdr->u.l.grh.dgid, ibp->gid_prefix, guid))
+ if (!gid_ok(&hdr->u.l.grh.dgid,
+ ibp->rvp.gid_prefix, guid))
goto err;
if (!gid_ok(&hdr->u.l.grh.sgid,
qp->alt_ah_attr.grh.dgid.global.subnet_prefix,
@@ -311,7 +312,8 @@ int qib_ruc_check_hdr(struct qib_ibport *ibp, struct qib_ib_header *hdr,
goto err;
guid = get_sguid(ibp,
qp->remote_ah_attr.grh.sgid_index);
- if (!gid_ok(&hdr->u.l.grh.dgid, ibp->gid_prefix, guid))
+ if (!gid_ok(&hdr->u.l.grh.dgid,
+ ibp->rvp.gid_prefix, guid))
goto err;
if (!gid_ok(&hdr->u.l.grh.sgid,
qp->remote_ah_attr.grh.dgid.global.subnet_prefix,
@@ -353,12 +355,15 @@ err:
* receive interrupts since this is a connected protocol and all packets
* will pass through here.
*/
-static void qib_ruc_loopback(struct qib_qp *sqp)
+static void qib_ruc_loopback(struct rvt_qp *sqp)
{
struct qib_ibport *ibp = to_iport(sqp->ibqp.device, sqp->port_num);
- struct qib_qp *qp;
- struct qib_swqe *wqe;
- struct qib_sge *sge;
+ struct qib_pportdata *ppd = ppd_from_ibp(ibp);
+ struct qib_devdata *dd = ppd->dd;
+ struct rvt_dev_info *rdi = &dd->verbs_dev.rdi;
+ struct rvt_qp *qp;
+ struct rvt_swqe *wqe;
+ struct rvt_sge *sge;
unsigned long flags;
struct ib_wc wc;
u64 sdata;
@@ -367,29 +372,33 @@ static void qib_ruc_loopback(struct qib_qp *sqp)
int release;
int ret;
+ rcu_read_lock();
/*
* Note that we check the responder QP state after
* checking the requester's state.
*/
- qp = qib_lookup_qpn(ibp, sqp->remote_qpn);
+ qp = rvt_lookup_qpn(rdi, &ibp->rvp, sqp->remote_qpn);
+ if (!qp)
+ goto done;
spin_lock_irqsave(&sqp->s_lock, flags);
/* Return if we are already busy processing a work request. */
- if ((sqp->s_flags & (QIB_S_BUSY | QIB_S_ANY_WAIT)) ||
- !(ib_qib_state_ops[sqp->state] & QIB_PROCESS_OR_FLUSH_SEND))
+ if ((sqp->s_flags & (RVT_S_BUSY | RVT_S_ANY_WAIT)) ||
+ !(ib_rvt_state_ops[sqp->state] & RVT_PROCESS_OR_FLUSH_SEND))
goto unlock;
- sqp->s_flags |= QIB_S_BUSY;
+ sqp->s_flags |= RVT_S_BUSY;
again:
- if (sqp->s_last == sqp->s_head)
+ smp_read_barrier_depends(); /* see post_one_send() */
+ if (sqp->s_last == ACCESS_ONCE(sqp->s_head))
goto clr_busy;
- wqe = get_swqe_ptr(sqp, sqp->s_last);
+ wqe = rvt_get_swqe_ptr(sqp, sqp->s_last);
/* Return if it is not OK to start a new work reqeust. */
- if (!(ib_qib_state_ops[sqp->state] & QIB_PROCESS_NEXT_SEND_OK)) {
- if (!(ib_qib_state_ops[sqp->state] & QIB_FLUSH_SEND))
+ if (!(ib_rvt_state_ops[sqp->state] & RVT_PROCESS_NEXT_SEND_OK)) {
+ if (!(ib_rvt_state_ops[sqp->state] & RVT_FLUSH_SEND))
goto clr_busy;
/* We are in the error state, flush the work request. */
send_status = IB_WC_WR_FLUSH_ERR;
@@ -407,9 +416,9 @@ again:
}
spin_unlock_irqrestore(&sqp->s_lock, flags);
- if (!qp || !(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK) ||
+ if (!qp || !(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) ||
qp->ibqp.qp_type != sqp->ibqp.qp_type) {
- ibp->n_pkt_drops++;
+ ibp->rvp.n_pkt_drops++;
/*
* For RC, the requester would timeout and retry so
* shortcut the timeouts and just signal too many retries.
@@ -458,7 +467,7 @@ again:
goto inv_err;
if (wqe->length == 0)
break;
- if (unlikely(!qib_rkey_ok(qp, &qp->r_sge.sge, wqe->length,
+ if (unlikely(!rvt_rkey_ok(qp, &qp->r_sge.sge, wqe->length,
wqe->rdma_wr.remote_addr,
wqe->rdma_wr.rkey,
IB_ACCESS_REMOTE_WRITE)))
@@ -471,7 +480,7 @@ again:
case IB_WR_RDMA_READ:
if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_READ)))
goto inv_err;
- if (unlikely(!qib_rkey_ok(qp, &sqp->s_sge.sge, wqe->length,
+ if (unlikely(!rvt_rkey_ok(qp, &sqp->s_sge.sge, wqe->length,
wqe->rdma_wr.remote_addr,
wqe->rdma_wr.rkey,
IB_ACCESS_REMOTE_READ)))
@@ -489,7 +498,7 @@ again:
case IB_WR_ATOMIC_FETCH_AND_ADD:
if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC)))
goto inv_err;
- if (unlikely(!qib_rkey_ok(qp, &qp->r_sge.sge, sizeof(u64),
+ if (unlikely(!rvt_rkey_ok(qp, &qp->r_sge.sge, sizeof(u64),
wqe->atomic_wr.remote_addr,
wqe->atomic_wr.rkey,
IB_ACCESS_REMOTE_ATOMIC)))
@@ -502,7 +511,7 @@ again:
(u64) atomic64_add_return(sdata, maddr) - sdata :
(u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
sdata, wqe->atomic_wr.swap);
- qib_put_mr(qp->r_sge.sge.mr);
+ rvt_put_mr(qp->r_sge.sge.mr);
qp->r_sge.num_sge = 0;
goto send_comp;
@@ -526,11 +535,11 @@ again:
sge->sge_length -= len;
if (sge->sge_length == 0) {
if (!release)
- qib_put_mr(sge->mr);
+ rvt_put_mr(sge->mr);
if (--sqp->s_sge.num_sge)
*sge = *sqp->s_sge.sg_list++;
} else if (sge->length == 0 && sge->mr->lkey) {
- if (++sge->n >= QIB_SEGSZ) {
+ if (++sge->n >= RVT_SEGSZ) {
if (++sge->m >= sge->mr->mapsz)
break;
sge->n = 0;
@@ -543,9 +552,9 @@ again:
sqp->s_len -= len;
}
if (release)
- qib_put_ss(&qp->r_sge);
+ rvt_put_ss(&qp->r_sge);
- if (!test_and_clear_bit(QIB_R_WRID_VALID, &qp->r_aflags))
+ if (!test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags))
goto send_comp;
if (wqe->wr.opcode == IB_WR_RDMA_WRITE_WITH_IMM)
@@ -561,12 +570,12 @@ again:
wc.sl = qp->remote_ah_attr.sl;
wc.port_num = 1;
/* Signal completion event if the solicited bit is set. */
- qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
- wqe->wr.send_flags & IB_SEND_SOLICITED);
+ rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc,
+ wqe->wr.send_flags & IB_SEND_SOLICITED);
send_comp:
spin_lock_irqsave(&sqp->s_lock, flags);
- ibp->n_loop_pkts++;
+ ibp->rvp.n_loop_pkts++;
flush_send:
sqp->s_rnr_retry = sqp->s_rnr_retry_cnt;
qib_send_complete(sqp, wqe, send_status);
@@ -576,7 +585,7 @@ rnr_nak:
/* Handle RNR NAK */
if (qp->ibqp.qp_type == IB_QPT_UC)
goto send_comp;
- ibp->n_rnr_naks++;
+ ibp->rvp.n_rnr_naks++;
/*
* Note: we don't need the s_lock held since the BUSY flag
* makes this single threaded.
@@ -588,9 +597,9 @@ rnr_nak:
if (sqp->s_rnr_retry_cnt < 7)
sqp->s_rnr_retry--;
spin_lock_irqsave(&sqp->s_lock, flags);
- if (!(ib_qib_state_ops[sqp->state] & QIB_PROCESS_RECV_OK))
+ if (!(ib_rvt_state_ops[sqp->state] & RVT_PROCESS_RECV_OK))
goto clr_busy;
- sqp->s_flags |= QIB_S_WAIT_RNR;
+ sqp->s_flags |= RVT_S_WAIT_RNR;
sqp->s_timer.function = qib_rc_rnr_retry;
sqp->s_timer.expires = jiffies +
usecs_to_jiffies(ib_qib_rnr_table[qp->r_min_rnr_timer]);
@@ -618,9 +627,9 @@ serr:
spin_lock_irqsave(&sqp->s_lock, flags);
qib_send_complete(sqp, wqe, send_status);
if (sqp->ibqp.qp_type == IB_QPT_RC) {
- int lastwqe = qib_error_qp(sqp, IB_WC_WR_FLUSH_ERR);
+ int lastwqe = rvt_error_qp(sqp, IB_WC_WR_FLUSH_ERR);
- sqp->s_flags &= ~QIB_S_BUSY;
+ sqp->s_flags &= ~RVT_S_BUSY;
spin_unlock_irqrestore(&sqp->s_lock, flags);
if (lastwqe) {
struct ib_event ev;
@@ -633,12 +642,11 @@ serr:
goto done;
}
clr_busy:
- sqp->s_flags &= ~QIB_S_BUSY;
+ sqp->s_flags &= ~RVT_S_BUSY;
unlock:
spin_unlock_irqrestore(&sqp->s_lock, flags);
done:
- if (qp && atomic_dec_and_test(&qp->refcount))
- wake_up(&qp->wait);
+ rcu_read_unlock();
}
/**
@@ -663,7 +671,7 @@ u32 qib_make_grh(struct qib_ibport *ibp, struct ib_grh *hdr,
hdr->next_hdr = IB_GRH_NEXT_HDR;
hdr->hop_limit = grh->hop_limit;
/* The SGID is 32-bit aligned. */
- hdr->sgid.global.subnet_prefix = ibp->gid_prefix;
+ hdr->sgid.global.subnet_prefix = ibp->rvp.gid_prefix;
hdr->sgid.global.interface_id = grh->sgid_index ?
ibp->guids[grh->sgid_index - 1] : ppd_from_ibp(ibp)->guid;
hdr->dgid = grh->dgid;
@@ -672,9 +680,10 @@ u32 qib_make_grh(struct qib_ibport *ibp, struct ib_grh *hdr,
return sizeof(struct ib_grh) / sizeof(u32);
}
-void qib_make_ruc_header(struct qib_qp *qp, struct qib_other_headers *ohdr,
+void qib_make_ruc_header(struct rvt_qp *qp, struct qib_other_headers *ohdr,
u32 bth0, u32 bth2)
{
+ struct qib_qp_priv *priv = qp->priv;
struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
u16 lrh0;
u32 nwords;
@@ -685,17 +694,18 @@ void qib_make_ruc_header(struct qib_qp *qp, struct qib_other_headers *ohdr,
nwords = (qp->s_cur_size + extra_bytes) >> 2;
lrh0 = QIB_LRH_BTH;
if (unlikely(qp->remote_ah_attr.ah_flags & IB_AH_GRH)) {
- qp->s_hdrwords += qib_make_grh(ibp, &qp->s_hdr->u.l.grh,
+ qp->s_hdrwords += qib_make_grh(ibp, &priv->s_hdr->u.l.grh,
&qp->remote_ah_attr.grh,
qp->s_hdrwords, nwords);
lrh0 = QIB_LRH_GRH;
}
lrh0 |= ibp->sl_to_vl[qp->remote_ah_attr.sl] << 12 |
qp->remote_ah_attr.sl << 4;
- qp->s_hdr->lrh[0] = cpu_to_be16(lrh0);
- qp->s_hdr->lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid);
- qp->s_hdr->lrh[2] = cpu_to_be16(qp->s_hdrwords + nwords + SIZE_OF_CRC);
- qp->s_hdr->lrh[3] = cpu_to_be16(ppd_from_ibp(ibp)->lid |
+ priv->s_hdr->lrh[0] = cpu_to_be16(lrh0);
+ priv->s_hdr->lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid);
+ priv->s_hdr->lrh[2] =
+ cpu_to_be16(qp->s_hdrwords + nwords + SIZE_OF_CRC);
+ priv->s_hdr->lrh[3] = cpu_to_be16(ppd_from_ibp(ibp)->lid |
qp->remote_ah_attr.src_path_bits);
bth0 |= qib_get_pkey(ibp, qp->s_pkey_index);
bth0 |= extra_bytes << 20;
@@ -707,20 +717,29 @@ void qib_make_ruc_header(struct qib_qp *qp, struct qib_other_headers *ohdr,
this_cpu_inc(ibp->pmastats->n_unicast_xmit);
}
+void _qib_do_send(struct work_struct *work)
+{
+ struct qib_qp_priv *priv = container_of(work, struct qib_qp_priv,
+ s_work);
+ struct rvt_qp *qp = priv->owner;
+
+ qib_do_send(qp);
+}
+
/**
* qib_do_send - perform a send on a QP
- * @work: contains a pointer to the QP
+ * @qp: pointer to the QP
*
* Process entries in the send work queue until credit or queue is
* exhausted. Only allow one CPU to send a packet per QP (tasklet).
* Otherwise, two threads could send packets out of order.
*/
-void qib_do_send(struct work_struct *work)
+void qib_do_send(struct rvt_qp *qp)
{
- struct qib_qp *qp = container_of(work, struct qib_qp, s_work);
+ struct qib_qp_priv *priv = qp->priv;
struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
struct qib_pportdata *ppd = ppd_from_ibp(ibp);
- int (*make_req)(struct qib_qp *qp);
+ int (*make_req)(struct rvt_qp *qp);
unsigned long flags;
if ((qp->ibqp.qp_type == IB_QPT_RC ||
@@ -745,50 +764,59 @@ void qib_do_send(struct work_struct *work)
return;
}
- qp->s_flags |= QIB_S_BUSY;
-
- spin_unlock_irqrestore(&qp->s_lock, flags);
+ qp->s_flags |= RVT_S_BUSY;
do {
/* Check for a constructed packet to be sent. */
if (qp->s_hdrwords != 0) {
+ spin_unlock_irqrestore(&qp->s_lock, flags);
/*
* If the packet cannot be sent now, return and
* the send tasklet will be woken up later.
*/
- if (qib_verbs_send(qp, qp->s_hdr, qp->s_hdrwords,
+ if (qib_verbs_send(qp, priv->s_hdr, qp->s_hdrwords,
qp->s_cur_sge, qp->s_cur_size))
- break;
+ return;
/* Record that s_hdr is empty. */
qp->s_hdrwords = 0;
+ spin_lock_irqsave(&qp->s_lock, flags);
}
} while (make_req(qp));
+
+ spin_unlock_irqrestore(&qp->s_lock, flags);
}
/*
* This should be called with s_lock held.
*/
-void qib_send_complete(struct qib_qp *qp, struct qib_swqe *wqe,
+void qib_send_complete(struct rvt_qp *qp, struct rvt_swqe *wqe,
enum ib_wc_status status)
{
u32 old_last, last;
unsigned i;
- if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_OR_FLUSH_SEND))
+ if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_OR_FLUSH_SEND))
return;
+ last = qp->s_last;
+ old_last = last;
+ if (++last >= qp->s_size)
+ last = 0;
+ qp->s_last = last;
+ /* See post_send() */
+ barrier();
for (i = 0; i < wqe->wr.num_sge; i++) {
- struct qib_sge *sge = &wqe->sg_list[i];
+ struct rvt_sge *sge = &wqe->sg_list[i];
- qib_put_mr(sge->mr);
+ rvt_put_mr(sge->mr);
}
if (qp->ibqp.qp_type == IB_QPT_UD ||
qp->ibqp.qp_type == IB_QPT_SMI ||
qp->ibqp.qp_type == IB_QPT_GSI)
- atomic_dec(&to_iah(wqe->ud_wr.ah)->refcount);
+ atomic_dec(&ibah_to_rvtah(wqe->ud_wr.ah)->refcount);
/* See ch. 11.2.4.1 and 10.7.3.1 */
- if (!(qp->s_flags & QIB_S_SIGNAL_REQ_WR) ||
+ if (!(qp->s_flags & RVT_S_SIGNAL_REQ_WR) ||
(wqe->wr.send_flags & IB_SEND_SIGNALED) ||
status != IB_WC_SUCCESS) {
struct ib_wc wc;
@@ -800,15 +828,10 @@ void qib_send_complete(struct qib_qp *qp, struct qib_swqe *wqe,
wc.qp = &qp->ibqp;
if (status == IB_WC_SUCCESS)
wc.byte_len = wqe->length;
- qib_cq_enter(to_icq(qp->ibqp.send_cq), &wc,
+ rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.send_cq), &wc,
status != IB_WC_SUCCESS);
}
- last = qp->s_last;
- old_last = last;
- if (++last >= qp->s_size)
- last = 0;
- qp->s_last = last;
if (qp->s_acked == old_last)
qp->s_acked = last;
if (qp->s_cur == old_last)
diff --git a/drivers/infiniband/hw/qib/qib_sdma.c b/drivers/infiniband/hw/qib/qib_sdma.c
index c6d6a54d2e19..891873b38a1e 100644
--- a/drivers/infiniband/hw/qib/qib_sdma.c
+++ b/drivers/infiniband/hw/qib/qib_sdma.c
@@ -513,7 +513,9 @@ int qib_sdma_running(struct qib_pportdata *ppd)
static void complete_sdma_err_req(struct qib_pportdata *ppd,
struct qib_verbs_txreq *tx)
{
- atomic_inc(&tx->qp->s_dma_busy);
+ struct qib_qp_priv *priv = tx->qp->priv;
+
+ atomic_inc(&priv->s_dma_busy);
/* no sdma descriptors, so no unmap_desc */
tx->txreq.start_idx = 0;
tx->txreq.next_descq_idx = 0;
@@ -531,18 +533,19 @@ static void complete_sdma_err_req(struct qib_pportdata *ppd,
* 3) The SGE addresses are suitable for passing to dma_map_single().
*/
int qib_sdma_verbs_send(struct qib_pportdata *ppd,
- struct qib_sge_state *ss, u32 dwords,
+ struct rvt_sge_state *ss, u32 dwords,
struct qib_verbs_txreq *tx)
{
unsigned long flags;
- struct qib_sge *sge;
- struct qib_qp *qp;
+ struct rvt_sge *sge;
+ struct rvt_qp *qp;
int ret = 0;
u16 tail;
__le64 *descqp;
u64 sdmadesc[2];
u32 dwoffset;
dma_addr_t addr;
+ struct qib_qp_priv *priv;
spin_lock_irqsave(&ppd->sdma_lock, flags);
@@ -621,7 +624,7 @@ retry:
if (--ss->num_sge)
*sge = *ss->sg_list++;
} else if (sge->length == 0 && sge->mr->lkey) {
- if (++sge->n >= QIB_SEGSZ) {
+ if (++sge->n >= RVT_SEGSZ) {
if (++sge->m >= sge->mr->mapsz)
break;
sge->n = 0;
@@ -644,8 +647,8 @@ retry:
descqp[0] |= cpu_to_le64(SDMA_DESC_DMA_HEAD);
if (tx->txreq.flags & QIB_SDMA_TXREQ_F_INTREQ)
descqp[0] |= cpu_to_le64(SDMA_DESC_INTR);
-
- atomic_inc(&tx->qp->s_dma_busy);
+ priv = tx->qp->priv;
+ atomic_inc(&priv->s_dma_busy);
tx->txreq.next_descq_idx = tail;
ppd->dd->f_sdma_update_tail(ppd, tail);
ppd->sdma_descq_added += tx->txreq.sg_count;
@@ -663,13 +666,14 @@ unmap:
unmap_desc(ppd, tail);
}
qp = tx->qp;
+ priv = qp->priv;
qib_put_txreq(tx);
spin_lock(&qp->r_lock);
spin_lock(&qp->s_lock);
if (qp->ibqp.qp_type == IB_QPT_RC) {
/* XXX what about error sending RDMA read responses? */
- if (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)
- qib_error_qp(qp, IB_WC_GENERAL_ERR);
+ if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)
+ rvt_error_qp(qp, IB_WC_GENERAL_ERR);
} else if (qp->s_wqe)
qib_send_complete(qp, qp->s_wqe, IB_WC_GENERAL_ERR);
spin_unlock(&qp->s_lock);
@@ -679,8 +683,9 @@ unmap:
busy:
qp = tx->qp;
+ priv = qp->priv;
spin_lock(&qp->s_lock);
- if (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK) {
+ if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) {
struct qib_ibdev *dev;
/*
@@ -690,19 +695,19 @@ busy:
*/
tx->ss = ss;
tx->dwords = dwords;
- qp->s_tx = tx;
+ priv->s_tx = tx;
dev = &ppd->dd->verbs_dev;
- spin_lock(&dev->pending_lock);
- if (list_empty(&qp->iowait)) {
+ spin_lock(&dev->rdi.pending_lock);
+ if (list_empty(&priv->iowait)) {
struct qib_ibport *ibp;
ibp = &ppd->ibport_data;
- ibp->n_dmawait++;
- qp->s_flags |= QIB_S_WAIT_DMA_DESC;
- list_add_tail(&qp->iowait, &dev->dmawait);
+ ibp->rvp.n_dmawait++;
+ qp->s_flags |= RVT_S_WAIT_DMA_DESC;
+ list_add_tail(&priv->iowait, &dev->dmawait);
}
- spin_unlock(&dev->pending_lock);
- qp->s_flags &= ~QIB_S_BUSY;
+ spin_unlock(&dev->rdi.pending_lock);
+ qp->s_flags &= ~RVT_S_BUSY;
spin_unlock(&qp->s_lock);
ret = -EBUSY;
} else {
diff --git a/drivers/infiniband/hw/qib/qib_srq.c b/drivers/infiniband/hw/qib/qib_srq.c
deleted file mode 100644
index d6235931a1ba..000000000000
--- a/drivers/infiniband/hw/qib/qib_srq.c
+++ /dev/null
@@ -1,380 +0,0 @@
-/*
- * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
- * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/err.h>
-#include <linux/slab.h>
-#include <linux/vmalloc.h>
-
-#include "qib_verbs.h"
-
-/**
- * qib_post_srq_receive - post a receive on a shared receive queue
- * @ibsrq: the SRQ to post the receive on
- * @wr: the list of work requests to post
- * @bad_wr: A pointer to the first WR to cause a problem is put here
- *
- * This may be called from interrupt context.
- */
-int qib_post_srq_receive(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
- struct ib_recv_wr **bad_wr)
-{
- struct qib_srq *srq = to_isrq(ibsrq);
- struct qib_rwq *wq;
- unsigned long flags;
- int ret;
-
- for (; wr; wr = wr->next) {
- struct qib_rwqe *wqe;
- u32 next;
- int i;
-
- if ((unsigned) wr->num_sge > srq->rq.max_sge) {
- *bad_wr = wr;
- ret = -EINVAL;
- goto bail;
- }
-
- spin_lock_irqsave(&srq->rq.lock, flags);
- wq = srq->rq.wq;
- next = wq->head + 1;
- if (next >= srq->rq.size)
- next = 0;
- if (next == wq->tail) {
- spin_unlock_irqrestore(&srq->rq.lock, flags);
- *bad_wr = wr;
- ret = -ENOMEM;
- goto bail;
- }
-
- wqe = get_rwqe_ptr(&srq->rq, wq->head);
- wqe->wr_id = wr->wr_id;
- wqe->num_sge = wr->num_sge;
- for (i = 0; i < wr->num_sge; i++)
- wqe->sg_list[i] = wr->sg_list[i];
- /* Make sure queue entry is written before the head index. */
- smp_wmb();
- wq->head = next;
- spin_unlock_irqrestore(&srq->rq.lock, flags);
- }
- ret = 0;
-
-bail:
- return ret;
-}
-
-/**
- * qib_create_srq - create a shared receive queue
- * @ibpd: the protection domain of the SRQ to create
- * @srq_init_attr: the attributes of the SRQ
- * @udata: data from libibverbs when creating a user SRQ
- */
-struct ib_srq *qib_create_srq(struct ib_pd *ibpd,
- struct ib_srq_init_attr *srq_init_attr,
- struct ib_udata *udata)
-{
- struct qib_ibdev *dev = to_idev(ibpd->device);
- struct qib_srq *srq;
- u32 sz;
- struct ib_srq *ret;
-
- if (srq_init_attr->srq_type != IB_SRQT_BASIC) {
- ret = ERR_PTR(-ENOSYS);
- goto done;
- }
-
- if (srq_init_attr->attr.max_sge == 0 ||
- srq_init_attr->attr.max_sge > ib_qib_max_srq_sges ||
- srq_init_attr->attr.max_wr == 0 ||
- srq_init_attr->attr.max_wr > ib_qib_max_srq_wrs) {
- ret = ERR_PTR(-EINVAL);
- goto done;
- }
-
- srq = kmalloc(sizeof(*srq), GFP_KERNEL);
- if (!srq) {
- ret = ERR_PTR(-ENOMEM);
- goto done;
- }
-
- /*
- * Need to use vmalloc() if we want to support large #s of entries.
- */
- srq->rq.size = srq_init_attr->attr.max_wr + 1;
- srq->rq.max_sge = srq_init_attr->attr.max_sge;
- sz = sizeof(struct ib_sge) * srq->rq.max_sge +
- sizeof(struct qib_rwqe);
- srq->rq.wq = vmalloc_user(sizeof(struct qib_rwq) + srq->rq.size * sz);
- if (!srq->rq.wq) {
- ret = ERR_PTR(-ENOMEM);
- goto bail_srq;
- }
-
- /*
- * Return the address of the RWQ as the offset to mmap.
- * See qib_mmap() for details.
- */
- if (udata && udata->outlen >= sizeof(__u64)) {
- int err;
- u32 s = sizeof(struct qib_rwq) + srq->rq.size * sz;
-
- srq->ip =
- qib_create_mmap_info(dev, s, ibpd->uobject->context,
- srq->rq.wq);
- if (!srq->ip) {
- ret = ERR_PTR(-ENOMEM);
- goto bail_wq;
- }
-
- err = ib_copy_to_udata(udata, &srq->ip->offset,
- sizeof(srq->ip->offset));
- if (err) {
- ret = ERR_PTR(err);
- goto bail_ip;
- }
- } else
- srq->ip = NULL;
-
- /*
- * ib_create_srq() will initialize srq->ibsrq.
- */
- spin_lock_init(&srq->rq.lock);
- srq->rq.wq->head = 0;
- srq->rq.wq->tail = 0;
- srq->limit = srq_init_attr->attr.srq_limit;
-
- spin_lock(&dev->n_srqs_lock);
- if (dev->n_srqs_allocated == ib_qib_max_srqs) {
- spin_unlock(&dev->n_srqs_lock);
- ret = ERR_PTR(-ENOMEM);
- goto bail_ip;
- }
-
- dev->n_srqs_allocated++;
- spin_unlock(&dev->n_srqs_lock);
-
- if (srq->ip) {
- spin_lock_irq(&dev->pending_lock);
- list_add(&srq->ip->pending_mmaps, &dev->pending_mmaps);
- spin_unlock_irq(&dev->pending_lock);
- }
-
- ret = &srq->ibsrq;
- goto done;
-
-bail_ip:
- kfree(srq->ip);
-bail_wq:
- vfree(srq->rq.wq);
-bail_srq:
- kfree(srq);
-done:
- return ret;
-}
-
-/**
- * qib_modify_srq - modify a shared receive queue
- * @ibsrq: the SRQ to modify
- * @attr: the new attributes of the SRQ
- * @attr_mask: indicates which attributes to modify
- * @udata: user data for libibverbs.so
- */
-int qib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
- enum ib_srq_attr_mask attr_mask,
- struct ib_udata *udata)
-{
- struct qib_srq *srq = to_isrq(ibsrq);
- struct qib_rwq *wq;
- int ret = 0;
-
- if (attr_mask & IB_SRQ_MAX_WR) {
- struct qib_rwq *owq;
- struct qib_rwqe *p;
- u32 sz, size, n, head, tail;
-
- /* Check that the requested sizes are below the limits. */
- if ((attr->max_wr > ib_qib_max_srq_wrs) ||
- ((attr_mask & IB_SRQ_LIMIT) ?
- attr->srq_limit : srq->limit) > attr->max_wr) {
- ret = -EINVAL;
- goto bail;
- }
-
- sz = sizeof(struct qib_rwqe) +
- srq->rq.max_sge * sizeof(struct ib_sge);
- size = attr->max_wr + 1;
- wq = vmalloc_user(sizeof(struct qib_rwq) + size * sz);
- if (!wq) {
- ret = -ENOMEM;
- goto bail;
- }
-
- /* Check that we can write the offset to mmap. */
- if (udata && udata->inlen >= sizeof(__u64)) {
- __u64 offset_addr;
- __u64 offset = 0;
-
- ret = ib_copy_from_udata(&offset_addr, udata,
- sizeof(offset_addr));
- if (ret)
- goto bail_free;
- udata->outbuf =
- (void __user *) (unsigned long) offset_addr;
- ret = ib_copy_to_udata(udata, &offset,
- sizeof(offset));
- if (ret)
- goto bail_free;
- }
-
- spin_lock_irq(&srq->rq.lock);
- /*
- * validate head and tail pointer values and compute
- * the number of remaining WQEs.
- */
- owq = srq->rq.wq;
- head = owq->head;
- tail = owq->tail;
- if (head >= srq->rq.size || tail >= srq->rq.size) {
- ret = -EINVAL;
- goto bail_unlock;
- }
- n = head;
- if (n < tail)
- n += srq->rq.size - tail;
- else
- n -= tail;
- if (size <= n) {
- ret = -EINVAL;
- goto bail_unlock;
- }
- n = 0;
- p = wq->wq;
- while (tail != head) {
- struct qib_rwqe *wqe;
- int i;
-
- wqe = get_rwqe_ptr(&srq->rq, tail);
- p->wr_id = wqe->wr_id;
- p->num_sge = wqe->num_sge;
- for (i = 0; i < wqe->num_sge; i++)
- p->sg_list[i] = wqe->sg_list[i];
- n++;
- p = (struct qib_rwqe *)((char *) p + sz);
- if (++tail >= srq->rq.size)
- tail = 0;
- }
- srq->rq.wq = wq;
- srq->rq.size = size;
- wq->head = n;
- wq->tail = 0;
- if (attr_mask & IB_SRQ_LIMIT)
- srq->limit = attr->srq_limit;
- spin_unlock_irq(&srq->rq.lock);
-
- vfree(owq);
-
- if (srq->ip) {
- struct qib_mmap_info *ip = srq->ip;
- struct qib_ibdev *dev = to_idev(srq->ibsrq.device);
- u32 s = sizeof(struct qib_rwq) + size * sz;
-
- qib_update_mmap_info(dev, ip, s, wq);
-
- /*
- * Return the offset to mmap.
- * See qib_mmap() for details.
- */
- if (udata && udata->inlen >= sizeof(__u64)) {
- ret = ib_copy_to_udata(udata, &ip->offset,
- sizeof(ip->offset));
- if (ret)
- goto bail;
- }
-
- /*
- * Put user mapping info onto the pending list
- * unless it already is on the list.
- */
- spin_lock_irq(&dev->pending_lock);
- if (list_empty(&ip->pending_mmaps))
- list_add(&ip->pending_mmaps,
- &dev->pending_mmaps);
- spin_unlock_irq(&dev->pending_lock);
- }
- } else if (attr_mask & IB_SRQ_LIMIT) {
- spin_lock_irq(&srq->rq.lock);
- if (attr->srq_limit >= srq->rq.size)
- ret = -EINVAL;
- else
- srq->limit = attr->srq_limit;
- spin_unlock_irq(&srq->rq.lock);
- }
- goto bail;
-
-bail_unlock:
- spin_unlock_irq(&srq->rq.lock);
-bail_free:
- vfree(wq);
-bail:
- return ret;
-}
-
-int qib_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr)
-{
- struct qib_srq *srq = to_isrq(ibsrq);
-
- attr->max_wr = srq->rq.size - 1;
- attr->max_sge = srq->rq.max_sge;
- attr->srq_limit = srq->limit;
- return 0;
-}
-
-/**
- * qib_destroy_srq - destroy a shared receive queue
- * @ibsrq: the SRQ to destroy
- */
-int qib_destroy_srq(struct ib_srq *ibsrq)
-{
- struct qib_srq *srq = to_isrq(ibsrq);
- struct qib_ibdev *dev = to_idev(ibsrq->device);
-
- spin_lock(&dev->n_srqs_lock);
- dev->n_srqs_allocated--;
- spin_unlock(&dev->n_srqs_lock);
- if (srq->ip)
- kref_put(&srq->ip->ref, qib_release_mmap_info);
- else
- vfree(srq->rq.wq);
- kfree(srq);
-
- return 0;
-}
diff --git a/drivers/infiniband/hw/qib/qib_sysfs.c b/drivers/infiniband/hw/qib/qib_sysfs.c
index 81f56cdff2bc..fe4cf5e4acec 100644
--- a/drivers/infiniband/hw/qib/qib_sysfs.c
+++ b/drivers/infiniband/hw/qib/qib_sysfs.c
@@ -406,7 +406,13 @@ static struct kobj_type qib_sl2vl_ktype = {
#define QIB_DIAGC_ATTR(N) \
static struct qib_diagc_attr qib_diagc_attr_##N = { \
.attr = { .name = __stringify(N), .mode = 0664 }, \
- .counter = offsetof(struct qib_ibport, n_##N) \
+ .counter = offsetof(struct qib_ibport, rvp.n_##N) \
+ }
+
+#define QIB_DIAGC_ATTR_PER_CPU(N) \
+ static struct qib_diagc_attr qib_diagc_attr_##N = { \
+ .attr = { .name = __stringify(N), .mode = 0664 }, \
+ .counter = offsetof(struct qib_ibport, rvp.z_##N) \
}
struct qib_diagc_attr {
@@ -414,10 +420,11 @@ struct qib_diagc_attr {
size_t counter;
};
+QIB_DIAGC_ATTR_PER_CPU(rc_acks);
+QIB_DIAGC_ATTR_PER_CPU(rc_qacks);
+QIB_DIAGC_ATTR_PER_CPU(rc_delayed_comp);
+
QIB_DIAGC_ATTR(rc_resends);
-QIB_DIAGC_ATTR(rc_acks);
-QIB_DIAGC_ATTR(rc_qacks);
-QIB_DIAGC_ATTR(rc_delayed_comp);
QIB_DIAGC_ATTR(seq_naks);
QIB_DIAGC_ATTR(rdma_seq);
QIB_DIAGC_ATTR(rnr_naks);
@@ -449,6 +456,35 @@ static struct attribute *diagc_default_attributes[] = {
NULL
};
+static u64 get_all_cpu_total(u64 __percpu *cntr)
+{
+ int cpu;
+ u64 counter = 0;
+
+ for_each_possible_cpu(cpu)
+ counter += *per_cpu_ptr(cntr, cpu);
+ return counter;
+}
+
+#define def_write_per_cpu(cntr) \
+static void write_per_cpu_##cntr(struct qib_pportdata *ppd, u32 data) \
+{ \
+ struct qib_devdata *dd = ppd->dd; \
+ struct qib_ibport *qibp = &ppd->ibport_data; \
+ /* A write can only zero the counter */ \
+ if (data == 0) \
+ qibp->rvp.z_##cntr = get_all_cpu_total(qibp->rvp.cntr); \
+ else \
+ qib_dev_err(dd, "Per CPU cntrs can only be zeroed"); \
+}
+
+def_write_per_cpu(rc_acks)
+def_write_per_cpu(rc_qacks)
+def_write_per_cpu(rc_delayed_comp)
+
+#define READ_PER_CPU_CNTR(cntr) (get_all_cpu_total(qibp->rvp.cntr) - \
+ qibp->rvp.z_##cntr)
+
static ssize_t diagc_attr_show(struct kobject *kobj, struct attribute *attr,
char *buf)
{
@@ -458,7 +494,16 @@ static ssize_t diagc_attr_show(struct kobject *kobj, struct attribute *attr,
container_of(kobj, struct qib_pportdata, diagc_kobj);
struct qib_ibport *qibp = &ppd->ibport_data;
- return sprintf(buf, "%u\n", *(u32 *)((char *)qibp + dattr->counter));
+ if (!strncmp(dattr->attr.name, "rc_acks", 7))
+ return sprintf(buf, "%llu\n", READ_PER_CPU_CNTR(rc_acks));
+ else if (!strncmp(dattr->attr.name, "rc_qacks", 8))
+ return sprintf(buf, "%llu\n", READ_PER_CPU_CNTR(rc_qacks));
+ else if (!strncmp(dattr->attr.name, "rc_delayed_comp", 15))
+ return sprintf(buf, "%llu\n",
+ READ_PER_CPU_CNTR(rc_delayed_comp));
+ else
+ return sprintf(buf, "%u\n",
+ *(u32 *)((char *)qibp + dattr->counter));
}
static ssize_t diagc_attr_store(struct kobject *kobj, struct attribute *attr,
@@ -475,7 +520,15 @@ static ssize_t diagc_attr_store(struct kobject *kobj, struct attribute *attr,
ret = kstrtou32(buf, 0, &val);
if (ret)
return ret;
- *(u32 *)((char *) qibp + dattr->counter) = val;
+
+ if (!strncmp(dattr->attr.name, "rc_acks", 7))
+ write_per_cpu_rc_acks(ppd, val);
+ else if (!strncmp(dattr->attr.name, "rc_qacks", 8))
+ write_per_cpu_rc_qacks(ppd, val);
+ else if (!strncmp(dattr->attr.name, "rc_delayed_comp", 15))
+ write_per_cpu_rc_delayed_comp(ppd, val);
+ else
+ *(u32 *)((char *)qibp + dattr->counter) = val;
return size;
}
@@ -502,7 +555,7 @@ static ssize_t show_rev(struct device *device, struct device_attribute *attr,
char *buf)
{
struct qib_ibdev *dev =
- container_of(device, struct qib_ibdev, ibdev.dev);
+ container_of(device, struct qib_ibdev, rdi.ibdev.dev);
return sprintf(buf, "%x\n", dd_from_dev(dev)->minrev);
}
@@ -511,7 +564,7 @@ static ssize_t show_hca(struct device *device, struct device_attribute *attr,
char *buf)
{
struct qib_ibdev *dev =
- container_of(device, struct qib_ibdev, ibdev.dev);
+ container_of(device, struct qib_ibdev, rdi.ibdev.dev);
struct qib_devdata *dd = dd_from_dev(dev);
int ret;
@@ -533,7 +586,7 @@ static ssize_t show_boardversion(struct device *device,
struct device_attribute *attr, char *buf)
{
struct qib_ibdev *dev =
- container_of(device, struct qib_ibdev, ibdev.dev);
+ container_of(device, struct qib_ibdev, rdi.ibdev.dev);
struct qib_devdata *dd = dd_from_dev(dev);
/* The string printed here is already newline-terminated. */
@@ -545,7 +598,7 @@ static ssize_t show_localbus_info(struct device *device,
struct device_attribute *attr, char *buf)
{
struct qib_ibdev *dev =
- container_of(device, struct qib_ibdev, ibdev.dev);
+ container_of(device, struct qib_ibdev, rdi.ibdev.dev);
struct qib_devdata *dd = dd_from_dev(dev);
/* The string printed here is already newline-terminated. */
@@ -557,7 +610,7 @@ static ssize_t show_nctxts(struct device *device,
struct device_attribute *attr, char *buf)
{
struct qib_ibdev *dev =
- container_of(device, struct qib_ibdev, ibdev.dev);
+ container_of(device, struct qib_ibdev, rdi.ibdev.dev);
struct qib_devdata *dd = dd_from_dev(dev);
/* Return the number of user ports (contexts) available. */
@@ -572,7 +625,7 @@ static ssize_t show_nfreectxts(struct device *device,
struct device_attribute *attr, char *buf)
{
struct qib_ibdev *dev =
- container_of(device, struct qib_ibdev, ibdev.dev);
+ container_of(device, struct qib_ibdev, rdi.ibdev.dev);
struct qib_devdata *dd = dd_from_dev(dev);
/* Return the number of free user ports (contexts) available. */
@@ -583,7 +636,7 @@ static ssize_t show_serial(struct device *device,
struct device_attribute *attr, char *buf)
{
struct qib_ibdev *dev =
- container_of(device, struct qib_ibdev, ibdev.dev);
+ container_of(device, struct qib_ibdev, rdi.ibdev.dev);
struct qib_devdata *dd = dd_from_dev(dev);
buf[sizeof(dd->serial)] = '\0';
@@ -597,7 +650,7 @@ static ssize_t store_chip_reset(struct device *device,
size_t count)
{
struct qib_ibdev *dev =
- container_of(device, struct qib_ibdev, ibdev.dev);
+ container_of(device, struct qib_ibdev, rdi.ibdev.dev);
struct qib_devdata *dd = dd_from_dev(dev);
int ret;
@@ -618,7 +671,7 @@ static ssize_t show_tempsense(struct device *device,
struct device_attribute *attr, char *buf)
{
struct qib_ibdev *dev =
- container_of(device, struct qib_ibdev, ibdev.dev);
+ container_of(device, struct qib_ibdev, rdi.ibdev.dev);
struct qib_devdata *dd = dd_from_dev(dev);
int ret;
int idx;
@@ -778,7 +831,7 @@ bail:
*/
int qib_verbs_register_sysfs(struct qib_devdata *dd)
{
- struct ib_device *dev = &dd->verbs_dev.ibdev;
+ struct ib_device *dev = &dd->verbs_dev.rdi.ibdev;
int i, ret;
for (i = 0; i < ARRAY_SIZE(qib_attributes); ++i) {
diff --git a/drivers/infiniband/hw/qib/qib_uc.c b/drivers/infiniband/hw/qib/qib_uc.c
index 06a564589c35..7bdbc79ceaa3 100644
--- a/drivers/infiniband/hw/qib/qib_uc.c
+++ b/drivers/infiniband/hw/qib/qib_uc.c
@@ -41,61 +41,62 @@
* qib_make_uc_req - construct a request packet (SEND, RDMA write)
* @qp: a pointer to the QP
*
+ * Assumes the s_lock is held.
+ *
* Return 1 if constructed; otherwise, return 0.
*/
-int qib_make_uc_req(struct qib_qp *qp)
+int qib_make_uc_req(struct rvt_qp *qp)
{
+ struct qib_qp_priv *priv = qp->priv;
struct qib_other_headers *ohdr;
- struct qib_swqe *wqe;
- unsigned long flags;
+ struct rvt_swqe *wqe;
u32 hwords;
u32 bth0;
u32 len;
u32 pmtu = qp->pmtu;
int ret = 0;
- spin_lock_irqsave(&qp->s_lock, flags);
-
- if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_SEND_OK)) {
- if (!(ib_qib_state_ops[qp->state] & QIB_FLUSH_SEND))
+ if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_SEND_OK)) {
+ if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND))
goto bail;
/* We are in the error state, flush the work request. */
- if (qp->s_last == qp->s_head)
+ smp_read_barrier_depends(); /* see post_one_send() */
+ if (qp->s_last == ACCESS_ONCE(qp->s_head))
goto bail;
/* If DMAs are in progress, we can't flush immediately. */
- if (atomic_read(&qp->s_dma_busy)) {
- qp->s_flags |= QIB_S_WAIT_DMA;
+ if (atomic_read(&priv->s_dma_busy)) {
+ qp->s_flags |= RVT_S_WAIT_DMA;
goto bail;
}
- wqe = get_swqe_ptr(qp, qp->s_last);
+ wqe = rvt_get_swqe_ptr(qp, qp->s_last);
qib_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR);
goto done;
}
- ohdr = &qp->s_hdr->u.oth;
+ ohdr = &priv->s_hdr->u.oth;
if (qp->remote_ah_attr.ah_flags & IB_AH_GRH)
- ohdr = &qp->s_hdr->u.l.oth;
+ ohdr = &priv->s_hdr->u.l.oth;
/* header size in 32-bit words LRH+BTH = (8+12)/4. */
hwords = 5;
bth0 = 0;
/* Get the next send request. */
- wqe = get_swqe_ptr(qp, qp->s_cur);
+ wqe = rvt_get_swqe_ptr(qp, qp->s_cur);
qp->s_wqe = NULL;
switch (qp->s_state) {
default:
- if (!(ib_qib_state_ops[qp->state] &
- QIB_PROCESS_NEXT_SEND_OK))
+ if (!(ib_rvt_state_ops[qp->state] &
+ RVT_PROCESS_NEXT_SEND_OK))
goto bail;
/* Check if send work queue is empty. */
- if (qp->s_cur == qp->s_head)
+ smp_read_barrier_depends(); /* see post_one_send() */
+ if (qp->s_cur == ACCESS_ONCE(qp->s_head))
goto bail;
/*
* Start a new request.
*/
- wqe->psn = qp->s_next_psn;
- qp->s_psn = qp->s_next_psn;
+ qp->s_psn = wqe->psn;
qp->s_sge.sge = wqe->sg_list[0];
qp->s_sge.sg_list = wqe->sg_list + 1;
qp->s_sge.num_sge = wqe->wr.num_sge;
@@ -214,15 +215,11 @@ int qib_make_uc_req(struct qib_qp *qp)
qp->s_cur_sge = &qp->s_sge;
qp->s_cur_size = len;
qib_make_ruc_header(qp, ohdr, bth0 | (qp->s_state << 24),
- qp->s_next_psn++ & QIB_PSN_MASK);
+ qp->s_psn++ & QIB_PSN_MASK);
done:
- ret = 1;
- goto unlock;
-
+ return 1;
bail:
- qp->s_flags &= ~QIB_S_BUSY;
-unlock:
- spin_unlock_irqrestore(&qp->s_lock, flags);
+ qp->s_flags &= ~RVT_S_BUSY;
return ret;
}
@@ -240,7 +237,7 @@ unlock:
* Called at interrupt level.
*/
void qib_uc_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
- int has_grh, void *data, u32 tlen, struct qib_qp *qp)
+ int has_grh, void *data, u32 tlen, struct rvt_qp *qp)
{
struct qib_other_headers *ohdr;
u32 opcode;
@@ -278,10 +275,10 @@ void qib_uc_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
inv:
if (qp->r_state == OP(SEND_FIRST) ||
qp->r_state == OP(SEND_MIDDLE)) {
- set_bit(QIB_R_REWIND_SGE, &qp->r_aflags);
+ set_bit(RVT_R_REWIND_SGE, &qp->r_aflags);
qp->r_sge.num_sge = 0;
} else
- qib_put_ss(&qp->r_sge);
+ rvt_put_ss(&qp->r_sge);
qp->r_state = OP(SEND_LAST);
switch (opcode) {
case OP(SEND_FIRST):
@@ -328,8 +325,8 @@ inv:
goto inv;
}
- if (qp->state == IB_QPS_RTR && !(qp->r_flags & QIB_R_COMM_EST)) {
- qp->r_flags |= QIB_R_COMM_EST;
+ if (qp->state == IB_QPS_RTR && !(qp->r_flags & RVT_R_COMM_EST)) {
+ qp->r_flags |= RVT_R_COMM_EST;
if (qp->ibqp.event_handler) {
struct ib_event ev;
@@ -346,7 +343,7 @@ inv:
case OP(SEND_ONLY):
case OP(SEND_ONLY_WITH_IMMEDIATE):
send_first:
- if (test_and_clear_bit(QIB_R_REWIND_SGE, &qp->r_aflags))
+ if (test_and_clear_bit(RVT_R_REWIND_SGE, &qp->r_aflags))
qp->r_sge = qp->s_rdma_read_sge;
else {
ret = qib_get_rwqe(qp, 0);
@@ -400,7 +397,7 @@ send_last:
goto rewind;
wc.opcode = IB_WC_RECV;
qib_copy_sge(&qp->r_sge, data, tlen, 0);
- qib_put_ss(&qp->s_rdma_read_sge);
+ rvt_put_ss(&qp->s_rdma_read_sge);
last_imm:
wc.wr_id = qp->r_wr_id;
wc.status = IB_WC_SUCCESS;
@@ -414,7 +411,7 @@ last_imm:
wc.dlid_path_bits = 0;
wc.port_num = 0;
/* Signal completion event if the solicited bit is set. */
- qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
+ rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc,
(ohdr->bth[0] &
cpu_to_be32(IB_BTH_SOLICITED)) != 0);
break;
@@ -438,7 +435,7 @@ rdma_first:
int ok;
/* Check rkey */
- ok = qib_rkey_ok(qp, &qp->r_sge.sge, qp->r_len,
+ ok = rvt_rkey_ok(qp, &qp->r_sge.sge, qp->r_len,
vaddr, rkey, IB_ACCESS_REMOTE_WRITE);
if (unlikely(!ok))
goto drop;
@@ -483,8 +480,8 @@ rdma_last_imm:
tlen -= (hdrsize + pad + 4);
if (unlikely(tlen + qp->r_rcv_len != qp->r_len))
goto drop;
- if (test_and_clear_bit(QIB_R_REWIND_SGE, &qp->r_aflags))
- qib_put_ss(&qp->s_rdma_read_sge);
+ if (test_and_clear_bit(RVT_R_REWIND_SGE, &qp->r_aflags))
+ rvt_put_ss(&qp->s_rdma_read_sge);
else {
ret = qib_get_rwqe(qp, 1);
if (ret < 0)
@@ -495,7 +492,7 @@ rdma_last_imm:
wc.byte_len = qp->r_len;
wc.opcode = IB_WC_RECV_RDMA_WITH_IMM;
qib_copy_sge(&qp->r_sge, data, tlen, 1);
- qib_put_ss(&qp->r_sge);
+ rvt_put_ss(&qp->r_sge);
goto last_imm;
case OP(RDMA_WRITE_LAST):
@@ -511,7 +508,7 @@ rdma_last:
if (unlikely(tlen + qp->r_rcv_len != qp->r_len))
goto drop;
qib_copy_sge(&qp->r_sge, data, tlen, 1);
- qib_put_ss(&qp->r_sge);
+ rvt_put_ss(&qp->r_sge);
break;
default:
@@ -523,10 +520,10 @@ rdma_last:
return;
rewind:
- set_bit(QIB_R_REWIND_SGE, &qp->r_aflags);
+ set_bit(RVT_R_REWIND_SGE, &qp->r_aflags);
qp->r_sge.num_sge = 0;
drop:
- ibp->n_pkt_drops++;
+ ibp->rvp.n_pkt_drops++;
return;
op_err:
diff --git a/drivers/infiniband/hw/qib/qib_ud.c b/drivers/infiniband/hw/qib/qib_ud.c
index 59193f67ea78..d9502137de62 100644
--- a/drivers/infiniband/hw/qib/qib_ud.c
+++ b/drivers/infiniband/hw/qib/qib_ud.c
@@ -32,6 +32,7 @@
*/
#include <rdma/ib_smi.h>
+#include <rdma/ib_verbs.h>
#include "qib.h"
#include "qib_mad.h"
@@ -46,22 +47,26 @@
* Note that the receive interrupt handler may be calling qib_ud_rcv()
* while this is being called.
*/
-static void qib_ud_loopback(struct qib_qp *sqp, struct qib_swqe *swqe)
+static void qib_ud_loopback(struct rvt_qp *sqp, struct rvt_swqe *swqe)
{
struct qib_ibport *ibp = to_iport(sqp->ibqp.device, sqp->port_num);
- struct qib_pportdata *ppd;
- struct qib_qp *qp;
+ struct qib_pportdata *ppd = ppd_from_ibp(ibp);
+ struct qib_devdata *dd = ppd->dd;
+ struct rvt_dev_info *rdi = &dd->verbs_dev.rdi;
+ struct rvt_qp *qp;
struct ib_ah_attr *ah_attr;
unsigned long flags;
- struct qib_sge_state ssge;
- struct qib_sge *sge;
+ struct rvt_sge_state ssge;
+ struct rvt_sge *sge;
struct ib_wc wc;
u32 length;
enum ib_qp_type sqptype, dqptype;
- qp = qib_lookup_qpn(ibp, swqe->ud_wr.remote_qpn);
+ rcu_read_lock();
+ qp = rvt_lookup_qpn(rdi, &ibp->rvp, swqe->ud_wr.remote_qpn);
if (!qp) {
- ibp->n_pkt_drops++;
+ ibp->rvp.n_pkt_drops++;
+ rcu_read_unlock();
return;
}
@@ -71,12 +76,12 @@ static void qib_ud_loopback(struct qib_qp *sqp, struct qib_swqe *swqe)
IB_QPT_UD : qp->ibqp.qp_type;
if (dqptype != sqptype ||
- !(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)) {
- ibp->n_pkt_drops++;
+ !(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)) {
+ ibp->rvp.n_pkt_drops++;
goto drop;
}
- ah_attr = &to_iah(swqe->ud_wr.ah)->attr;
+ ah_attr = &ibah_to_rvtah(swqe->ud_wr.ah)->attr;
ppd = ppd_from_ibp(ibp);
if (qp->ibqp.qp_num > 1) {
@@ -140,8 +145,8 @@ static void qib_ud_loopback(struct qib_qp *sqp, struct qib_swqe *swqe)
/*
* Get the next work request entry to find where to put the data.
*/
- if (qp->r_flags & QIB_R_REUSE_SGE)
- qp->r_flags &= ~QIB_R_REUSE_SGE;
+ if (qp->r_flags & RVT_R_REUSE_SGE)
+ qp->r_flags &= ~RVT_R_REUSE_SGE;
else {
int ret;
@@ -152,14 +157,14 @@ static void qib_ud_loopback(struct qib_qp *sqp, struct qib_swqe *swqe)
}
if (!ret) {
if (qp->ibqp.qp_num == 0)
- ibp->n_vl15_dropped++;
+ ibp->rvp.n_vl15_dropped++;
goto bail_unlock;
}
}
/* Silently drop packets which are too big. */
if (unlikely(wc.byte_len > qp->r_len)) {
- qp->r_flags |= QIB_R_REUSE_SGE;
- ibp->n_pkt_drops++;
+ qp->r_flags |= RVT_R_REUSE_SGE;
+ ibp->rvp.n_pkt_drops++;
goto bail_unlock;
}
@@ -189,7 +194,7 @@ static void qib_ud_loopback(struct qib_qp *sqp, struct qib_swqe *swqe)
if (--ssge.num_sge)
*sge = *ssge.sg_list++;
} else if (sge->length == 0 && sge->mr->lkey) {
- if (++sge->n >= QIB_SEGSZ) {
+ if (++sge->n >= RVT_SEGSZ) {
if (++sge->m >= sge->mr->mapsz)
break;
sge->n = 0;
@@ -201,8 +206,8 @@ static void qib_ud_loopback(struct qib_qp *sqp, struct qib_swqe *swqe)
}
length -= len;
}
- qib_put_ss(&qp->r_sge);
- if (!test_and_clear_bit(QIB_R_WRID_VALID, &qp->r_aflags))
+ rvt_put_ss(&qp->r_sge);
+ if (!test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags))
goto bail_unlock;
wc.wr_id = qp->r_wr_id;
wc.status = IB_WC_SUCCESS;
@@ -216,30 +221,31 @@ static void qib_ud_loopback(struct qib_qp *sqp, struct qib_swqe *swqe)
wc.dlid_path_bits = ah_attr->dlid & ((1 << ppd->lmc) - 1);
wc.port_num = qp->port_num;
/* Signal completion event if the solicited bit is set. */
- qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
+ rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc,
swqe->wr.send_flags & IB_SEND_SOLICITED);
- ibp->n_loop_pkts++;
+ ibp->rvp.n_loop_pkts++;
bail_unlock:
spin_unlock_irqrestore(&qp->r_lock, flags);
drop:
- if (atomic_dec_and_test(&qp->refcount))
- wake_up(&qp->wait);
+ rcu_read_unlock();
}
/**
* qib_make_ud_req - construct a UD request packet
* @qp: the QP
*
+ * Assumes the s_lock is held.
+ *
* Return 1 if constructed; otherwise, return 0.
*/
-int qib_make_ud_req(struct qib_qp *qp)
+int qib_make_ud_req(struct rvt_qp *qp)
{
+ struct qib_qp_priv *priv = qp->priv;
struct qib_other_headers *ohdr;
struct ib_ah_attr *ah_attr;
struct qib_pportdata *ppd;
struct qib_ibport *ibp;
- struct qib_swqe *wqe;
- unsigned long flags;
+ struct rvt_swqe *wqe;
u32 nwords;
u32 extra_bytes;
u32 bth0;
@@ -248,28 +254,29 @@ int qib_make_ud_req(struct qib_qp *qp)
int ret = 0;
int next_cur;
- spin_lock_irqsave(&qp->s_lock, flags);
-
- if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_NEXT_SEND_OK)) {
- if (!(ib_qib_state_ops[qp->state] & QIB_FLUSH_SEND))
+ if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_NEXT_SEND_OK)) {
+ if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND))
goto bail;
/* We are in the error state, flush the work request. */
- if (qp->s_last == qp->s_head)
+ smp_read_barrier_depends(); /* see post_one_send */
+ if (qp->s_last == ACCESS_ONCE(qp->s_head))
goto bail;
/* If DMAs are in progress, we can't flush immediately. */
- if (atomic_read(&qp->s_dma_busy)) {
- qp->s_flags |= QIB_S_WAIT_DMA;
+ if (atomic_read(&priv->s_dma_busy)) {
+ qp->s_flags |= RVT_S_WAIT_DMA;
goto bail;
}
- wqe = get_swqe_ptr(qp, qp->s_last);
+ wqe = rvt_get_swqe_ptr(qp, qp->s_last);
qib_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR);
goto done;
}
- if (qp->s_cur == qp->s_head)
+ /* see post_one_send() */
+ smp_read_barrier_depends();
+ if (qp->s_cur == ACCESS_ONCE(qp->s_head))
goto bail;
- wqe = get_swqe_ptr(qp, qp->s_cur);
+ wqe = rvt_get_swqe_ptr(qp, qp->s_cur);
next_cur = qp->s_cur + 1;
if (next_cur >= qp->s_size)
next_cur = 0;
@@ -277,9 +284,9 @@ int qib_make_ud_req(struct qib_qp *qp)
/* Construct the header. */
ibp = to_iport(qp->ibqp.device, qp->port_num);
ppd = ppd_from_ibp(ibp);
- ah_attr = &to_iah(wqe->ud_wr.ah)->attr;
- if (ah_attr->dlid >= QIB_MULTICAST_LID_BASE) {
- if (ah_attr->dlid != QIB_PERMISSIVE_LID)
+ ah_attr = &ibah_to_rvtah(wqe->ud_wr.ah)->attr;
+ if (ah_attr->dlid >= be16_to_cpu(IB_MULTICAST_LID_BASE)) {
+ if (ah_attr->dlid != be16_to_cpu(IB_LID_PERMISSIVE))
this_cpu_inc(ibp->pmastats->n_multicast_xmit);
else
this_cpu_inc(ibp->pmastats->n_unicast_xmit);
@@ -287,6 +294,7 @@ int qib_make_ud_req(struct qib_qp *qp)
this_cpu_inc(ibp->pmastats->n_unicast_xmit);
lid = ah_attr->dlid & ~((1 << ppd->lmc) - 1);
if (unlikely(lid == ppd->lid)) {
+ unsigned long flags;
/*
* If DMAs are in progress, we can't generate
* a completion for the loopback packet since
@@ -294,11 +302,12 @@ int qib_make_ud_req(struct qib_qp *qp)
* XXX Instead of waiting, we could queue a
* zero length descriptor so we get a callback.
*/
- if (atomic_read(&qp->s_dma_busy)) {
- qp->s_flags |= QIB_S_WAIT_DMA;
+ if (atomic_read(&priv->s_dma_busy)) {
+ qp->s_flags |= RVT_S_WAIT_DMA;
goto bail;
}
qp->s_cur = next_cur;
+ local_irq_save(flags);
spin_unlock_irqrestore(&qp->s_lock, flags);
qib_ud_loopback(qp, wqe);
spin_lock_irqsave(&qp->s_lock, flags);
@@ -324,11 +333,11 @@ int qib_make_ud_req(struct qib_qp *qp)
if (ah_attr->ah_flags & IB_AH_GRH) {
/* Header size in 32-bit words. */
- qp->s_hdrwords += qib_make_grh(ibp, &qp->s_hdr->u.l.grh,
+ qp->s_hdrwords += qib_make_grh(ibp, &priv->s_hdr->u.l.grh,
&ah_attr->grh,
qp->s_hdrwords, nwords);
lrh0 = QIB_LRH_GRH;
- ohdr = &qp->s_hdr->u.l.oth;
+ ohdr = &priv->s_hdr->u.l.oth;
/*
* Don't worry about sending to locally attached multicast
* QPs. It is unspecified by the spec. what happens.
@@ -336,7 +345,7 @@ int qib_make_ud_req(struct qib_qp *qp)
} else {
/* Header size in 32-bit words. */
lrh0 = QIB_LRH_BTH;
- ohdr = &qp->s_hdr->u.oth;
+ ohdr = &priv->s_hdr->u.oth;
}
if (wqe->wr.opcode == IB_WR_SEND_WITH_IMM) {
qp->s_hdrwords++;
@@ -349,15 +358,16 @@ int qib_make_ud_req(struct qib_qp *qp)
lrh0 |= 0xF000; /* Set VL (see ch. 13.5.3.1) */
else
lrh0 |= ibp->sl_to_vl[ah_attr->sl] << 12;
- qp->s_hdr->lrh[0] = cpu_to_be16(lrh0);
- qp->s_hdr->lrh[1] = cpu_to_be16(ah_attr->dlid); /* DEST LID */
- qp->s_hdr->lrh[2] = cpu_to_be16(qp->s_hdrwords + nwords + SIZE_OF_CRC);
+ priv->s_hdr->lrh[0] = cpu_to_be16(lrh0);
+ priv->s_hdr->lrh[1] = cpu_to_be16(ah_attr->dlid); /* DEST LID */
+ priv->s_hdr->lrh[2] =
+ cpu_to_be16(qp->s_hdrwords + nwords + SIZE_OF_CRC);
lid = ppd->lid;
if (lid) {
lid |= ah_attr->src_path_bits & ((1 << ppd->lmc) - 1);
- qp->s_hdr->lrh[3] = cpu_to_be16(lid);
+ priv->s_hdr->lrh[3] = cpu_to_be16(lid);
} else
- qp->s_hdr->lrh[3] = IB_LID_PERMISSIVE;
+ priv->s_hdr->lrh[3] = IB_LID_PERMISSIVE;
if (wqe->wr.send_flags & IB_SEND_SOLICITED)
bth0 |= IB_BTH_SOLICITED;
bth0 |= extra_bytes << 20;
@@ -368,11 +378,11 @@ int qib_make_ud_req(struct qib_qp *qp)
/*
* Use the multicast QP if the destination LID is a multicast LID.
*/
- ohdr->bth[1] = ah_attr->dlid >= QIB_MULTICAST_LID_BASE &&
- ah_attr->dlid != QIB_PERMISSIVE_LID ?
+ ohdr->bth[1] = ah_attr->dlid >= be16_to_cpu(IB_MULTICAST_LID_BASE) &&
+ ah_attr->dlid != be16_to_cpu(IB_LID_PERMISSIVE) ?
cpu_to_be32(QIB_MULTICAST_QPN) :
cpu_to_be32(wqe->ud_wr.remote_qpn);
- ohdr->bth[2] = cpu_to_be32(qp->s_next_psn++ & QIB_PSN_MASK);
+ ohdr->bth[2] = cpu_to_be32(wqe->psn & QIB_PSN_MASK);
/*
* Qkeys with the high order bit set mean use the
* qkey from the QP context instead of the WR (see 10.2.5).
@@ -382,13 +392,9 @@ int qib_make_ud_req(struct qib_qp *qp)
ohdr->u.ud.deth[1] = cpu_to_be32(qp->ibqp.qp_num);
done:
- ret = 1;
- goto unlock;
-
+ return 1;
bail:
- qp->s_flags &= ~QIB_S_BUSY;
-unlock:
- spin_unlock_irqrestore(&qp->s_lock, flags);
+ qp->s_flags &= ~RVT_S_BUSY;
return ret;
}
@@ -426,7 +432,7 @@ static unsigned qib_lookup_pkey(struct qib_ibport *ibp, u16 pkey)
* Called at interrupt level.
*/
void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
- int has_grh, void *data, u32 tlen, struct qib_qp *qp)
+ int has_grh, void *data, u32 tlen, struct rvt_qp *qp)
{
struct qib_other_headers *ohdr;
int opcode;
@@ -446,7 +452,7 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
hdrsize = 8 + 40 + 12 + 8; /* LRH + GRH + BTH + DETH */
}
qkey = be32_to_cpu(ohdr->u.ud.deth[0]);
- src_qp = be32_to_cpu(ohdr->u.ud.deth[1]) & QIB_QPN_MASK;
+ src_qp = be32_to_cpu(ohdr->u.ud.deth[1]) & RVT_QPN_MASK;
/*
* Get the number of bytes the message was padded by
@@ -531,8 +537,8 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
/*
* Get the next work request entry to find where to put the data.
*/
- if (qp->r_flags & QIB_R_REUSE_SGE)
- qp->r_flags &= ~QIB_R_REUSE_SGE;
+ if (qp->r_flags & RVT_R_REUSE_SGE)
+ qp->r_flags &= ~RVT_R_REUSE_SGE;
else {
int ret;
@@ -543,13 +549,13 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
}
if (!ret) {
if (qp->ibqp.qp_num == 0)
- ibp->n_vl15_dropped++;
+ ibp->rvp.n_vl15_dropped++;
return;
}
}
/* Silently drop packets which are too big. */
if (unlikely(wc.byte_len > qp->r_len)) {
- qp->r_flags |= QIB_R_REUSE_SGE;
+ qp->r_flags |= RVT_R_REUSE_SGE;
goto drop;
}
if (has_grh) {
@@ -559,8 +565,8 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
} else
qib_skip_sge(&qp->r_sge, sizeof(struct ib_grh), 1);
qib_copy_sge(&qp->r_sge, data, wc.byte_len - sizeof(struct ib_grh), 1);
- qib_put_ss(&qp->r_sge);
- if (!test_and_clear_bit(QIB_R_WRID_VALID, &qp->r_aflags))
+ rvt_put_ss(&qp->r_sge);
+ if (!test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags))
return;
wc.wr_id = qp->r_wr_id;
wc.status = IB_WC_SUCCESS;
@@ -576,15 +582,15 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
/*
* Save the LMC lower bits if the destination LID is a unicast LID.
*/
- wc.dlid_path_bits = dlid >= QIB_MULTICAST_LID_BASE ? 0 :
+ wc.dlid_path_bits = dlid >= be16_to_cpu(IB_MULTICAST_LID_BASE) ? 0 :
dlid & ((1 << ppd_from_ibp(ibp)->lmc) - 1);
wc.port_num = qp->port_num;
/* Signal completion event if the solicited bit is set. */
- qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
+ rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc,
(ohdr->bth[0] &
cpu_to_be32(IB_BTH_SOLICITED)) != 0);
return;
drop:
- ibp->n_pkt_drops++;
+ ibp->rvp.n_pkt_drops++;
}
diff --git a/drivers/infiniband/hw/qib/qib_user_pages.c b/drivers/infiniband/hw/qib/qib_user_pages.c
index 74f90b2619f6..2d2b94fd3633 100644
--- a/drivers/infiniband/hw/qib/qib_user_pages.c
+++ b/drivers/infiniband/hw/qib/qib_user_pages.c
@@ -66,8 +66,7 @@ static int __qib_get_user_pages(unsigned long start_page, size_t num_pages,
}
for (got = 0; got < num_pages; got += ret) {
- ret = get_user_pages(current, current->mm,
- start_page + got * PAGE_SIZE,
+ ret = get_user_pages(start_page + got * PAGE_SIZE,
num_pages - got, 1, 1,
p + got, NULL);
if (ret < 0)
diff --git a/drivers/infiniband/hw/qib/qib_verbs.c b/drivers/infiniband/hw/qib/qib_verbs.c
index baf1e42b6896..cbf6200e6afc 100644
--- a/drivers/infiniband/hw/qib/qib_verbs.c
+++ b/drivers/infiniband/hw/qib/qib_verbs.c
@@ -41,6 +41,7 @@
#include <linux/mm.h>
#include <linux/random.h>
#include <linux/vmalloc.h>
+#include <rdma/rdma_vt.h>
#include "qib.h"
#include "qib_common.h"
@@ -49,8 +50,8 @@ static unsigned int ib_qib_qp_table_size = 256;
module_param_named(qp_table_size, ib_qib_qp_table_size, uint, S_IRUGO);
MODULE_PARM_DESC(qp_table_size, "QP table size");
-unsigned int ib_qib_lkey_table_size = 16;
-module_param_named(lkey_table_size, ib_qib_lkey_table_size, uint,
+static unsigned int qib_lkey_table_size = 16;
+module_param_named(lkey_table_size, qib_lkey_table_size, uint,
S_IRUGO);
MODULE_PARM_DESC(lkey_table_size,
"LKEY table size in bits (2^n, 1 <= n <= 23)");
@@ -113,36 +114,6 @@ module_param_named(disable_sma, ib_qib_disable_sma, uint, S_IWUSR | S_IRUGO);
MODULE_PARM_DESC(disable_sma, "Disable the SMA");
/*
- * Note that it is OK to post send work requests in the SQE and ERR
- * states; qib_do_send() will process them and generate error
- * completions as per IB 1.2 C10-96.
- */
-const int ib_qib_state_ops[IB_QPS_ERR + 1] = {
- [IB_QPS_RESET] = 0,
- [IB_QPS_INIT] = QIB_POST_RECV_OK,
- [IB_QPS_RTR] = QIB_POST_RECV_OK | QIB_PROCESS_RECV_OK,
- [IB_QPS_RTS] = QIB_POST_RECV_OK | QIB_PROCESS_RECV_OK |
- QIB_POST_SEND_OK | QIB_PROCESS_SEND_OK |
- QIB_PROCESS_NEXT_SEND_OK,
- [IB_QPS_SQD] = QIB_POST_RECV_OK | QIB_PROCESS_RECV_OK |
- QIB_POST_SEND_OK | QIB_PROCESS_SEND_OK,
- [IB_QPS_SQE] = QIB_POST_RECV_OK | QIB_PROCESS_RECV_OK |
- QIB_POST_SEND_OK | QIB_FLUSH_SEND,
- [IB_QPS_ERR] = QIB_POST_RECV_OK | QIB_FLUSH_RECV |
- QIB_POST_SEND_OK | QIB_FLUSH_SEND,
-};
-
-struct qib_ucontext {
- struct ib_ucontext ibucontext;
-};
-
-static inline struct qib_ucontext *to_iucontext(struct ib_ucontext
- *ibucontext)
-{
- return container_of(ibucontext, struct qib_ucontext, ibucontext);
-}
-
-/*
* Translate ib_wr_opcode into ib_wc_opcode.
*/
const enum ib_wc_opcode ib_qib_wc_opcode[] = {
@@ -166,9 +137,9 @@ __be64 ib_qib_sys_image_guid;
* @data: the data to copy
* @length: the length of the data
*/
-void qib_copy_sge(struct qib_sge_state *ss, void *data, u32 length, int release)
+void qib_copy_sge(struct rvt_sge_state *ss, void *data, u32 length, int release)
{
- struct qib_sge *sge = &ss->sge;
+ struct rvt_sge *sge = &ss->sge;
while (length) {
u32 len = sge->length;
@@ -184,11 +155,11 @@ void qib_copy_sge(struct qib_sge_state *ss, void *data, u32 length, int release)
sge->sge_length -= len;
if (sge->sge_length == 0) {
if (release)
- qib_put_mr(sge->mr);
+ rvt_put_mr(sge->mr);
if (--ss->num_sge)
*sge = *ss->sg_list++;
} else if (sge->length == 0 && sge->mr->lkey) {
- if (++sge->n >= QIB_SEGSZ) {
+ if (++sge->n >= RVT_SEGSZ) {
if (++sge->m >= sge->mr->mapsz)
break;
sge->n = 0;
@@ -208,9 +179,9 @@ void qib_copy_sge(struct qib_sge_state *ss, void *data, u32 length, int release)
* @ss: the SGE state
* @length: the number of bytes to skip
*/
-void qib_skip_sge(struct qib_sge_state *ss, u32 length, int release)
+void qib_skip_sge(struct rvt_sge_state *ss, u32 length, int release)
{
- struct qib_sge *sge = &ss->sge;
+ struct rvt_sge *sge = &ss->sge;
while (length) {
u32 len = sge->length;
@@ -225,11 +196,11 @@ void qib_skip_sge(struct qib_sge_state *ss, u32 length, int release)
sge->sge_length -= len;
if (sge->sge_length == 0) {
if (release)
- qib_put_mr(sge->mr);
+ rvt_put_mr(sge->mr);
if (--ss->num_sge)
*sge = *ss->sg_list++;
} else if (sge->length == 0 && sge->mr->lkey) {
- if (++sge->n >= QIB_SEGSZ) {
+ if (++sge->n >= RVT_SEGSZ) {
if (++sge->m >= sge->mr->mapsz)
break;
sge->n = 0;
@@ -248,10 +219,10 @@ void qib_skip_sge(struct qib_sge_state *ss, u32 length, int release)
* Don't modify the qib_sge_state to get the count.
* Return zero if any of the segments is not aligned.
*/
-static u32 qib_count_sge(struct qib_sge_state *ss, u32 length)
+static u32 qib_count_sge(struct rvt_sge_state *ss, u32 length)
{
- struct qib_sge *sg_list = ss->sg_list;
- struct qib_sge sge = ss->sge;
+ struct rvt_sge *sg_list = ss->sg_list;
+ struct rvt_sge sge = ss->sge;
u8 num_sge = ss->num_sge;
u32 ndesc = 1; /* count the header */
@@ -276,7 +247,7 @@ static u32 qib_count_sge(struct qib_sge_state *ss, u32 length)
if (--num_sge)
sge = *sg_list++;
} else if (sge.length == 0 && sge.mr->lkey) {
- if (++sge.n >= QIB_SEGSZ) {
+ if (++sge.n >= RVT_SEGSZ) {
if (++sge.m >= sge.mr->mapsz)
break;
sge.n = 0;
@@ -294,9 +265,9 @@ static u32 qib_count_sge(struct qib_sge_state *ss, u32 length)
/*
* Copy from the SGEs to the data buffer.
*/
-static void qib_copy_from_sge(void *data, struct qib_sge_state *ss, u32 length)
+static void qib_copy_from_sge(void *data, struct rvt_sge_state *ss, u32 length)
{
- struct qib_sge *sge = &ss->sge;
+ struct rvt_sge *sge = &ss->sge;
while (length) {
u32 len = sge->length;
@@ -314,7 +285,7 @@ static void qib_copy_from_sge(void *data, struct qib_sge_state *ss, u32 length)
if (--ss->num_sge)
*sge = *ss->sg_list++;
} else if (sge->length == 0 && sge->mr->lkey) {
- if (++sge->n >= QIB_SEGSZ) {
+ if (++sge->n >= RVT_SEGSZ) {
if (++sge->m >= sge->mr->mapsz)
break;
sge->n = 0;
@@ -330,242 +301,6 @@ static void qib_copy_from_sge(void *data, struct qib_sge_state *ss, u32 length)
}
/**
- * qib_post_one_send - post one RC, UC, or UD send work request
- * @qp: the QP to post on
- * @wr: the work request to send
- */
-static int qib_post_one_send(struct qib_qp *qp, struct ib_send_wr *wr,
- int *scheduled)
-{
- struct qib_swqe *wqe;
- u32 next;
- int i;
- int j;
- int acc;
- int ret;
- unsigned long flags;
- struct qib_lkey_table *rkt;
- struct qib_pd *pd;
- int avoid_schedule = 0;
-
- spin_lock_irqsave(&qp->s_lock, flags);
-
- /* Check that state is OK to post send. */
- if (unlikely(!(ib_qib_state_ops[qp->state] & QIB_POST_SEND_OK)))
- goto bail_inval;
-
- /* IB spec says that num_sge == 0 is OK. */
- if (wr->num_sge > qp->s_max_sge)
- goto bail_inval;
-
- /*
- * Don't allow RDMA reads or atomic operations on UC or
- * undefined operations.
- * Make sure buffer is large enough to hold the result for atomics.
- */
- if (wr->opcode == IB_WR_REG_MR) {
- if (qib_reg_mr(qp, reg_wr(wr)))
- goto bail_inval;
- } else if (qp->ibqp.qp_type == IB_QPT_UC) {
- if ((unsigned) wr->opcode >= IB_WR_RDMA_READ)
- goto bail_inval;
- } else if (qp->ibqp.qp_type != IB_QPT_RC) {
- /* Check IB_QPT_SMI, IB_QPT_GSI, IB_QPT_UD opcode */
- if (wr->opcode != IB_WR_SEND &&
- wr->opcode != IB_WR_SEND_WITH_IMM)
- goto bail_inval;
- /* Check UD destination address PD */
- if (qp->ibqp.pd != ud_wr(wr)->ah->pd)
- goto bail_inval;
- } else if ((unsigned) wr->opcode > IB_WR_ATOMIC_FETCH_AND_ADD)
- goto bail_inval;
- else if (wr->opcode >= IB_WR_ATOMIC_CMP_AND_SWP &&
- (wr->num_sge == 0 ||
- wr->sg_list[0].length < sizeof(u64) ||
- wr->sg_list[0].addr & (sizeof(u64) - 1)))
- goto bail_inval;
- else if (wr->opcode >= IB_WR_RDMA_READ && !qp->s_max_rd_atomic)
- goto bail_inval;
-
- next = qp->s_head + 1;
- if (next >= qp->s_size)
- next = 0;
- if (next == qp->s_last) {
- ret = -ENOMEM;
- goto bail;
- }
-
- rkt = &to_idev(qp->ibqp.device)->lk_table;
- pd = to_ipd(qp->ibqp.pd);
- wqe = get_swqe_ptr(qp, qp->s_head);
-
- if (qp->ibqp.qp_type != IB_QPT_UC &&
- qp->ibqp.qp_type != IB_QPT_RC)
- memcpy(&wqe->ud_wr, ud_wr(wr), sizeof(wqe->ud_wr));
- else if (wr->opcode == IB_WR_REG_MR)
- memcpy(&wqe->reg_wr, reg_wr(wr),
- sizeof(wqe->reg_wr));
- else if (wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM ||
- wr->opcode == IB_WR_RDMA_WRITE ||
- wr->opcode == IB_WR_RDMA_READ)
- memcpy(&wqe->rdma_wr, rdma_wr(wr), sizeof(wqe->rdma_wr));
- else if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
- wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD)
- memcpy(&wqe->atomic_wr, atomic_wr(wr), sizeof(wqe->atomic_wr));
- else
- memcpy(&wqe->wr, wr, sizeof(wqe->wr));
-
- wqe->length = 0;
- j = 0;
- if (wr->num_sge) {
- acc = wr->opcode >= IB_WR_RDMA_READ ?
- IB_ACCESS_LOCAL_WRITE : 0;
- for (i = 0; i < wr->num_sge; i++) {
- u32 length = wr->sg_list[i].length;
- int ok;
-
- if (length == 0)
- continue;
- ok = qib_lkey_ok(rkt, pd, &wqe->sg_list[j],
- &wr->sg_list[i], acc);
- if (!ok)
- goto bail_inval_free;
- wqe->length += length;
- j++;
- }
- wqe->wr.num_sge = j;
- }
- if (qp->ibqp.qp_type == IB_QPT_UC ||
- qp->ibqp.qp_type == IB_QPT_RC) {
- if (wqe->length > 0x80000000U)
- goto bail_inval_free;
- if (wqe->length <= qp->pmtu)
- avoid_schedule = 1;
- } else if (wqe->length > (dd_from_ibdev(qp->ibqp.device)->pport +
- qp->port_num - 1)->ibmtu) {
- goto bail_inval_free;
- } else {
- atomic_inc(&to_iah(ud_wr(wr)->ah)->refcount);
- avoid_schedule = 1;
- }
- wqe->ssn = qp->s_ssn++;
- qp->s_head = next;
-
- ret = 0;
- goto bail;
-
-bail_inval_free:
- while (j) {
- struct qib_sge *sge = &wqe->sg_list[--j];
-
- qib_put_mr(sge->mr);
- }
-bail_inval:
- ret = -EINVAL;
-bail:
- if (!ret && !wr->next && !avoid_schedule &&
- !qib_sdma_empty(
- dd_from_ibdev(qp->ibqp.device)->pport + qp->port_num - 1)) {
- qib_schedule_send(qp);
- *scheduled = 1;
- }
- spin_unlock_irqrestore(&qp->s_lock, flags);
- return ret;
-}
-
-/**
- * qib_post_send - post a send on a QP
- * @ibqp: the QP to post the send on
- * @wr: the list of work requests to post
- * @bad_wr: the first bad WR is put here
- *
- * This may be called from interrupt context.
- */
-static int qib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
- struct ib_send_wr **bad_wr)
-{
- struct qib_qp *qp = to_iqp(ibqp);
- int err = 0;
- int scheduled = 0;
-
- for (; wr; wr = wr->next) {
- err = qib_post_one_send(qp, wr, &scheduled);
- if (err) {
- *bad_wr = wr;
- goto bail;
- }
- }
-
- /* Try to do the send work in the caller's context. */
- if (!scheduled)
- qib_do_send(&qp->s_work);
-
-bail:
- return err;
-}
-
-/**
- * qib_post_receive - post a receive on a QP
- * @ibqp: the QP to post the receive on
- * @wr: the WR to post
- * @bad_wr: the first bad WR is put here
- *
- * This may be called from interrupt context.
- */
-static int qib_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
- struct ib_recv_wr **bad_wr)
-{
- struct qib_qp *qp = to_iqp(ibqp);
- struct qib_rwq *wq = qp->r_rq.wq;
- unsigned long flags;
- int ret;
-
- /* Check that state is OK to post receive. */
- if (!(ib_qib_state_ops[qp->state] & QIB_POST_RECV_OK) || !wq) {
- *bad_wr = wr;
- ret = -EINVAL;
- goto bail;
- }
-
- for (; wr; wr = wr->next) {
- struct qib_rwqe *wqe;
- u32 next;
- int i;
-
- if ((unsigned) wr->num_sge > qp->r_rq.max_sge) {
- *bad_wr = wr;
- ret = -EINVAL;
- goto bail;
- }
-
- spin_lock_irqsave(&qp->r_rq.lock, flags);
- next = wq->head + 1;
- if (next >= qp->r_rq.size)
- next = 0;
- if (next == wq->tail) {
- spin_unlock_irqrestore(&qp->r_rq.lock, flags);
- *bad_wr = wr;
- ret = -ENOMEM;
- goto bail;
- }
-
- wqe = get_rwqe_ptr(&qp->r_rq, wq->head);
- wqe->wr_id = wr->wr_id;
- wqe->num_sge = wr->num_sge;
- for (i = 0; i < wr->num_sge; i++)
- wqe->sg_list[i] = wr->sg_list[i];
- /* Make sure queue entry is written before the head index. */
- smp_wmb();
- wq->head = next;
- spin_unlock_irqrestore(&qp->r_rq.lock, flags);
- }
- ret = 0;
-
-bail:
- return ret;
-}
-
-/**
* qib_qp_rcv - processing an incoming packet on a QP
* @rcd: the context pointer
* @hdr: the packet header
@@ -579,15 +314,15 @@ bail:
* Called at interrupt level.
*/
static void qib_qp_rcv(struct qib_ctxtdata *rcd, struct qib_ib_header *hdr,
- int has_grh, void *data, u32 tlen, struct qib_qp *qp)
+ int has_grh, void *data, u32 tlen, struct rvt_qp *qp)
{
struct qib_ibport *ibp = &rcd->ppd->ibport_data;
spin_lock(&qp->r_lock);
/* Check for valid receive state. */
- if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)) {
- ibp->n_pkt_drops++;
+ if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)) {
+ ibp->rvp.n_pkt_drops++;
goto unlock;
}
@@ -632,8 +367,10 @@ void qib_ib_rcv(struct qib_ctxtdata *rcd, void *rhdr, void *data, u32 tlen)
struct qib_pportdata *ppd = rcd->ppd;
struct qib_ibport *ibp = &ppd->ibport_data;
struct qib_ib_header *hdr = rhdr;
+ struct qib_devdata *dd = ppd->dd;
+ struct rvt_dev_info *rdi = &dd->verbs_dev.rdi;
struct qib_other_headers *ohdr;
- struct qib_qp *qp;
+ struct rvt_qp *qp;
u32 qp_num;
int lnh;
u8 opcode;
@@ -645,7 +382,7 @@ void qib_ib_rcv(struct qib_ctxtdata *rcd, void *rhdr, void *data, u32 tlen)
/* Check for a valid destination LID (see ch. 7.11.1). */
lid = be16_to_cpu(hdr->lrh[1]);
- if (lid < QIB_MULTICAST_LID_BASE) {
+ if (lid < be16_to_cpu(IB_MULTICAST_LID_BASE)) {
lid &= ~((1 << ppd->lmc) - 1);
if (unlikely(lid != ppd->lid))
goto drop;
@@ -674,50 +411,40 @@ void qib_ib_rcv(struct qib_ctxtdata *rcd, void *rhdr, void *data, u32 tlen)
#endif
/* Get the destination QP number. */
- qp_num = be32_to_cpu(ohdr->bth[1]) & QIB_QPN_MASK;
+ qp_num = be32_to_cpu(ohdr->bth[1]) & RVT_QPN_MASK;
if (qp_num == QIB_MULTICAST_QPN) {
- struct qib_mcast *mcast;
- struct qib_mcast_qp *p;
+ struct rvt_mcast *mcast;
+ struct rvt_mcast_qp *p;
if (lnh != QIB_LRH_GRH)
goto drop;
- mcast = qib_mcast_find(ibp, &hdr->u.l.grh.dgid);
+ mcast = rvt_mcast_find(&ibp->rvp, &hdr->u.l.grh.dgid);
if (mcast == NULL)
goto drop;
this_cpu_inc(ibp->pmastats->n_multicast_rcv);
list_for_each_entry_rcu(p, &mcast->qp_list, list)
qib_qp_rcv(rcd, hdr, 1, data, tlen, p->qp);
/*
- * Notify qib_multicast_detach() if it is waiting for us
+ * Notify rvt_multicast_detach() if it is waiting for us
* to finish.
*/
if (atomic_dec_return(&mcast->refcount) <= 1)
wake_up(&mcast->wait);
} else {
- if (rcd->lookaside_qp) {
- if (rcd->lookaside_qpn != qp_num) {
- if (atomic_dec_and_test(
- &rcd->lookaside_qp->refcount))
- wake_up(
- &rcd->lookaside_qp->wait);
- rcd->lookaside_qp = NULL;
- }
+ rcu_read_lock();
+ qp = rvt_lookup_qpn(rdi, &ibp->rvp, qp_num);
+ if (!qp) {
+ rcu_read_unlock();
+ goto drop;
}
- if (!rcd->lookaside_qp) {
- qp = qib_lookup_qpn(ibp, qp_num);
- if (!qp)
- goto drop;
- rcd->lookaside_qp = qp;
- rcd->lookaside_qpn = qp_num;
- } else
- qp = rcd->lookaside_qp;
this_cpu_inc(ibp->pmastats->n_unicast_rcv);
qib_qp_rcv(rcd, hdr, lnh == QIB_LRH_GRH, data, tlen, qp);
+ rcu_read_unlock();
}
return;
drop:
- ibp->n_pkt_drops++;
+ ibp->rvp.n_pkt_drops++;
}
/*
@@ -728,23 +455,25 @@ static void mem_timer(unsigned long data)
{
struct qib_ibdev *dev = (struct qib_ibdev *) data;
struct list_head *list = &dev->memwait;
- struct qib_qp *qp = NULL;
+ struct rvt_qp *qp = NULL;
+ struct qib_qp_priv *priv = NULL;
unsigned long flags;
- spin_lock_irqsave(&dev->pending_lock, flags);
+ spin_lock_irqsave(&dev->rdi.pending_lock, flags);
if (!list_empty(list)) {
- qp = list_entry(list->next, struct qib_qp, iowait);
- list_del_init(&qp->iowait);
+ priv = list_entry(list->next, struct qib_qp_priv, iowait);
+ qp = priv->owner;
+ list_del_init(&priv->iowait);
atomic_inc(&qp->refcount);
if (!list_empty(list))
mod_timer(&dev->mem_timer, jiffies + 1);
}
- spin_unlock_irqrestore(&dev->pending_lock, flags);
+ spin_unlock_irqrestore(&dev->rdi.pending_lock, flags);
if (qp) {
spin_lock_irqsave(&qp->s_lock, flags);
- if (qp->s_flags & QIB_S_WAIT_KMEM) {
- qp->s_flags &= ~QIB_S_WAIT_KMEM;
+ if (qp->s_flags & RVT_S_WAIT_KMEM) {
+ qp->s_flags &= ~RVT_S_WAIT_KMEM;
qib_schedule_send(qp);
}
spin_unlock_irqrestore(&qp->s_lock, flags);
@@ -753,9 +482,9 @@ static void mem_timer(unsigned long data)
}
}
-static void update_sge(struct qib_sge_state *ss, u32 length)
+static void update_sge(struct rvt_sge_state *ss, u32 length)
{
- struct qib_sge *sge = &ss->sge;
+ struct rvt_sge *sge = &ss->sge;
sge->vaddr += length;
sge->length -= length;
@@ -764,7 +493,7 @@ static void update_sge(struct qib_sge_state *ss, u32 length)
if (--ss->num_sge)
*sge = *ss->sg_list++;
} else if (sge->length == 0 && sge->mr->lkey) {
- if (++sge->n >= QIB_SEGSZ) {
+ if (++sge->n >= RVT_SEGSZ) {
if (++sge->m >= sge->mr->mapsz)
return;
sge->n = 0;
@@ -810,7 +539,7 @@ static inline u32 clear_upper_bytes(u32 data, u32 n, u32 off)
}
#endif
-static void copy_io(u32 __iomem *piobuf, struct qib_sge_state *ss,
+static void copy_io(u32 __iomem *piobuf, struct rvt_sge_state *ss,
u32 length, unsigned flush_wc)
{
u32 extra = 0;
@@ -947,30 +676,31 @@ static void copy_io(u32 __iomem *piobuf, struct qib_sge_state *ss,
}
static noinline struct qib_verbs_txreq *__get_txreq(struct qib_ibdev *dev,
- struct qib_qp *qp)
+ struct rvt_qp *qp)
{
+ struct qib_qp_priv *priv = qp->priv;
struct qib_verbs_txreq *tx;
unsigned long flags;
spin_lock_irqsave(&qp->s_lock, flags);
- spin_lock(&dev->pending_lock);
+ spin_lock(&dev->rdi.pending_lock);
if (!list_empty(&dev->txreq_free)) {
struct list_head *l = dev->txreq_free.next;
list_del(l);
- spin_unlock(&dev->pending_lock);
+ spin_unlock(&dev->rdi.pending_lock);
spin_unlock_irqrestore(&qp->s_lock, flags);
tx = list_entry(l, struct qib_verbs_txreq, txreq.list);
} else {
- if (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK &&
- list_empty(&qp->iowait)) {
+ if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK &&
+ list_empty(&priv->iowait)) {
dev->n_txwait++;
- qp->s_flags |= QIB_S_WAIT_TX;
- list_add_tail(&qp->iowait, &dev->txwait);
+ qp->s_flags |= RVT_S_WAIT_TX;
+ list_add_tail(&priv->iowait, &dev->txwait);
}
- qp->s_flags &= ~QIB_S_BUSY;
- spin_unlock(&dev->pending_lock);
+ qp->s_flags &= ~RVT_S_BUSY;
+ spin_unlock(&dev->rdi.pending_lock);
spin_unlock_irqrestore(&qp->s_lock, flags);
tx = ERR_PTR(-EBUSY);
}
@@ -978,22 +708,22 @@ static noinline struct qib_verbs_txreq *__get_txreq(struct qib_ibdev *dev,
}
static inline struct qib_verbs_txreq *get_txreq(struct qib_ibdev *dev,
- struct qib_qp *qp)
+ struct rvt_qp *qp)
{
struct qib_verbs_txreq *tx;
unsigned long flags;
- spin_lock_irqsave(&dev->pending_lock, flags);
+ spin_lock_irqsave(&dev->rdi.pending_lock, flags);
/* assume the list non empty */
if (likely(!list_empty(&dev->txreq_free))) {
struct list_head *l = dev->txreq_free.next;
list_del(l);
- spin_unlock_irqrestore(&dev->pending_lock, flags);
+ spin_unlock_irqrestore(&dev->rdi.pending_lock, flags);
tx = list_entry(l, struct qib_verbs_txreq, txreq.list);
} else {
/* call slow path to get the extra lock */
- spin_unlock_irqrestore(&dev->pending_lock, flags);
+ spin_unlock_irqrestore(&dev->rdi.pending_lock, flags);
tx = __get_txreq(dev, qp);
}
return tx;
@@ -1002,16 +732,15 @@ static inline struct qib_verbs_txreq *get_txreq(struct qib_ibdev *dev,
void qib_put_txreq(struct qib_verbs_txreq *tx)
{
struct qib_ibdev *dev;
- struct qib_qp *qp;
+ struct rvt_qp *qp;
+ struct qib_qp_priv *priv;
unsigned long flags;
qp = tx->qp;
dev = to_idev(qp->ibqp.device);
- if (atomic_dec_and_test(&qp->refcount))
- wake_up(&qp->wait);
if (tx->mr) {
- qib_put_mr(tx->mr);
+ rvt_put_mr(tx->mr);
tx->mr = NULL;
}
if (tx->txreq.flags & QIB_SDMA_TXREQ_F_FREEBUF) {
@@ -1022,21 +751,23 @@ void qib_put_txreq(struct qib_verbs_txreq *tx)
kfree(tx->align_buf);
}
- spin_lock_irqsave(&dev->pending_lock, flags);
+ spin_lock_irqsave(&dev->rdi.pending_lock, flags);
/* Put struct back on free list */
list_add(&tx->txreq.list, &dev->txreq_free);
if (!list_empty(&dev->txwait)) {
/* Wake up first QP wanting a free struct */
- qp = list_entry(dev->txwait.next, struct qib_qp, iowait);
- list_del_init(&qp->iowait);
+ priv = list_entry(dev->txwait.next, struct qib_qp_priv,
+ iowait);
+ qp = priv->owner;
+ list_del_init(&priv->iowait);
atomic_inc(&qp->refcount);
- spin_unlock_irqrestore(&dev->pending_lock, flags);
+ spin_unlock_irqrestore(&dev->rdi.pending_lock, flags);
spin_lock_irqsave(&qp->s_lock, flags);
- if (qp->s_flags & QIB_S_WAIT_TX) {
- qp->s_flags &= ~QIB_S_WAIT_TX;
+ if (qp->s_flags & RVT_S_WAIT_TX) {
+ qp->s_flags &= ~RVT_S_WAIT_TX;
qib_schedule_send(qp);
}
spin_unlock_irqrestore(&qp->s_lock, flags);
@@ -1044,7 +775,7 @@ void qib_put_txreq(struct qib_verbs_txreq *tx)
if (atomic_dec_and_test(&qp->refcount))
wake_up(&qp->wait);
} else
- spin_unlock_irqrestore(&dev->pending_lock, flags);
+ spin_unlock_irqrestore(&dev->rdi.pending_lock, flags);
}
/*
@@ -1055,36 +786,39 @@ void qib_put_txreq(struct qib_verbs_txreq *tx)
*/
void qib_verbs_sdma_desc_avail(struct qib_pportdata *ppd, unsigned avail)
{
- struct qib_qp *qp, *nqp;
- struct qib_qp *qps[20];
+ struct rvt_qp *qp, *nqp;
+ struct qib_qp_priv *qpp, *nqpp;
+ struct rvt_qp *qps[20];
struct qib_ibdev *dev;
unsigned i, n;
n = 0;
dev = &ppd->dd->verbs_dev;
- spin_lock(&dev->pending_lock);
+ spin_lock(&dev->rdi.pending_lock);
/* Search wait list for first QP wanting DMA descriptors. */
- list_for_each_entry_safe(qp, nqp, &dev->dmawait, iowait) {
+ list_for_each_entry_safe(qpp, nqpp, &dev->dmawait, iowait) {
+ qp = qpp->owner;
+ nqp = nqpp->owner;
if (qp->port_num != ppd->port)
continue;
if (n == ARRAY_SIZE(qps))
break;
- if (qp->s_tx->txreq.sg_count > avail)
+ if (qpp->s_tx->txreq.sg_count > avail)
break;
- avail -= qp->s_tx->txreq.sg_count;
- list_del_init(&qp->iowait);
+ avail -= qpp->s_tx->txreq.sg_count;
+ list_del_init(&qpp->iowait);
atomic_inc(&qp->refcount);
qps[n++] = qp;
}
- spin_unlock(&dev->pending_lock);
+ spin_unlock(&dev->rdi.pending_lock);
for (i = 0; i < n; i++) {
qp = qps[i];
spin_lock(&qp->s_lock);
- if (qp->s_flags & QIB_S_WAIT_DMA_DESC) {
- qp->s_flags &= ~QIB_S_WAIT_DMA_DESC;
+ if (qp->s_flags & RVT_S_WAIT_DMA_DESC) {
+ qp->s_flags &= ~RVT_S_WAIT_DMA_DESC;
qib_schedule_send(qp);
}
spin_unlock(&qp->s_lock);
@@ -1100,7 +834,8 @@ static void sdma_complete(struct qib_sdma_txreq *cookie, int status)
{
struct qib_verbs_txreq *tx =
container_of(cookie, struct qib_verbs_txreq, txreq);
- struct qib_qp *qp = tx->qp;
+ struct rvt_qp *qp = tx->qp;
+ struct qib_qp_priv *priv = qp->priv;
spin_lock(&qp->s_lock);
if (tx->wqe)
@@ -1117,11 +852,11 @@ static void sdma_complete(struct qib_sdma_txreq *cookie, int status)
}
qib_rc_send_complete(qp, hdr);
}
- if (atomic_dec_and_test(&qp->s_dma_busy)) {
+ if (atomic_dec_and_test(&priv->s_dma_busy)) {
if (qp->state == IB_QPS_RESET)
- wake_up(&qp->wait_dma);
- else if (qp->s_flags & QIB_S_WAIT_DMA) {
- qp->s_flags &= ~QIB_S_WAIT_DMA;
+ wake_up(&priv->wait_dma);
+ else if (qp->s_flags & RVT_S_WAIT_DMA) {
+ qp->s_flags &= ~RVT_S_WAIT_DMA;
qib_schedule_send(qp);
}
}
@@ -1130,22 +865,23 @@ static void sdma_complete(struct qib_sdma_txreq *cookie, int status)
qib_put_txreq(tx);
}
-static int wait_kmem(struct qib_ibdev *dev, struct qib_qp *qp)
+static int wait_kmem(struct qib_ibdev *dev, struct rvt_qp *qp)
{
+ struct qib_qp_priv *priv = qp->priv;
unsigned long flags;
int ret = 0;
spin_lock_irqsave(&qp->s_lock, flags);
- if (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK) {
- spin_lock(&dev->pending_lock);
- if (list_empty(&qp->iowait)) {
+ if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) {
+ spin_lock(&dev->rdi.pending_lock);
+ if (list_empty(&priv->iowait)) {
if (list_empty(&dev->memwait))
mod_timer(&dev->mem_timer, jiffies + 1);
- qp->s_flags |= QIB_S_WAIT_KMEM;
- list_add_tail(&qp->iowait, &dev->memwait);
+ qp->s_flags |= RVT_S_WAIT_KMEM;
+ list_add_tail(&priv->iowait, &dev->memwait);
}
- spin_unlock(&dev->pending_lock);
- qp->s_flags &= ~QIB_S_BUSY;
+ spin_unlock(&dev->rdi.pending_lock);
+ qp->s_flags &= ~RVT_S_BUSY;
ret = -EBUSY;
}
spin_unlock_irqrestore(&qp->s_lock, flags);
@@ -1153,10 +889,11 @@ static int wait_kmem(struct qib_ibdev *dev, struct qib_qp *qp)
return ret;
}
-static int qib_verbs_send_dma(struct qib_qp *qp, struct qib_ib_header *hdr,
- u32 hdrwords, struct qib_sge_state *ss, u32 len,
+static int qib_verbs_send_dma(struct rvt_qp *qp, struct qib_ib_header *hdr,
+ u32 hdrwords, struct rvt_sge_state *ss, u32 len,
u32 plen, u32 dwords)
{
+ struct qib_qp_priv *priv = qp->priv;
struct qib_ibdev *dev = to_idev(qp->ibqp.device);
struct qib_devdata *dd = dd_from_dev(dev);
struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
@@ -1167,9 +904,9 @@ static int qib_verbs_send_dma(struct qib_qp *qp, struct qib_ib_header *hdr,
u32 ndesc;
int ret;
- tx = qp->s_tx;
+ tx = priv->s_tx;
if (tx) {
- qp->s_tx = NULL;
+ priv->s_tx = NULL;
/* resend previously constructed packet */
ret = qib_sdma_verbs_send(ppd, tx->ss, tx->dwords, tx);
goto bail;
@@ -1182,7 +919,6 @@ static int qib_verbs_send_dma(struct qib_qp *qp, struct qib_ib_header *hdr,
control = dd->f_setpbc_control(ppd, plen, qp->s_srate,
be16_to_cpu(hdr->lrh[0]) >> 12);
tx->qp = qp;
- atomic_inc(&qp->refcount);
tx->wqe = qp->s_wqe;
tx->mr = qp->s_rdma_mr;
if (qp->s_rdma_mr)
@@ -1245,7 +981,7 @@ err_tx:
qib_put_txreq(tx);
ret = wait_kmem(dev, qp);
unaligned:
- ibp->n_unaligned++;
+ ibp->rvp.n_unaligned++;
bail:
return ret;
bail_tx:
@@ -1257,8 +993,9 @@ bail_tx:
* If we are now in the error state, return zero to flush the
* send work request.
*/
-static int no_bufs_available(struct qib_qp *qp)
+static int no_bufs_available(struct rvt_qp *qp)
{
+ struct qib_qp_priv *priv = qp->priv;
struct qib_ibdev *dev = to_idev(qp->ibqp.device);
struct qib_devdata *dd;
unsigned long flags;
@@ -1271,25 +1008,25 @@ static int no_bufs_available(struct qib_qp *qp)
* enabling the PIO avail interrupt.
*/
spin_lock_irqsave(&qp->s_lock, flags);
- if (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK) {
- spin_lock(&dev->pending_lock);
- if (list_empty(&qp->iowait)) {
+ if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) {
+ spin_lock(&dev->rdi.pending_lock);
+ if (list_empty(&priv->iowait)) {
dev->n_piowait++;
- qp->s_flags |= QIB_S_WAIT_PIO;
- list_add_tail(&qp->iowait, &dev->piowait);
+ qp->s_flags |= RVT_S_WAIT_PIO;
+ list_add_tail(&priv->iowait, &dev->piowait);
dd = dd_from_dev(dev);
dd->f_wantpiobuf_intr(dd, 1);
}
- spin_unlock(&dev->pending_lock);
- qp->s_flags &= ~QIB_S_BUSY;
+ spin_unlock(&dev->rdi.pending_lock);
+ qp->s_flags &= ~RVT_S_BUSY;
ret = -EBUSY;
}
spin_unlock_irqrestore(&qp->s_lock, flags);
return ret;
}
-static int qib_verbs_send_pio(struct qib_qp *qp, struct qib_ib_header *ibhdr,
- u32 hdrwords, struct qib_sge_state *ss, u32 len,
+static int qib_verbs_send_pio(struct rvt_qp *qp, struct qib_ib_header *ibhdr,
+ u32 hdrwords, struct rvt_sge_state *ss, u32 len,
u32 plen, u32 dwords)
{
struct qib_devdata *dd = dd_from_ibdev(qp->ibqp.device);
@@ -1370,7 +1107,7 @@ done:
}
qib_sendbuf_done(dd, pbufn);
if (qp->s_rdma_mr) {
- qib_put_mr(qp->s_rdma_mr);
+ rvt_put_mr(qp->s_rdma_mr);
qp->s_rdma_mr = NULL;
}
if (qp->s_wqe) {
@@ -1394,10 +1131,10 @@ done:
* @len: the length of the packet in bytes
*
* Return zero if packet is sent or queued OK.
- * Return non-zero and clear qp->s_flags QIB_S_BUSY otherwise.
+ * Return non-zero and clear qp->s_flags RVT_S_BUSY otherwise.
*/
-int qib_verbs_send(struct qib_qp *qp, struct qib_ib_header *hdr,
- u32 hdrwords, struct qib_sge_state *ss, u32 len)
+int qib_verbs_send(struct rvt_qp *qp, struct qib_ib_header *hdr,
+ u32 hdrwords, struct rvt_sge_state *ss, u32 len)
{
struct qib_devdata *dd = dd_from_ibdev(qp->ibqp.device);
u32 plen;
@@ -1529,10 +1266,11 @@ void qib_ib_piobufavail(struct qib_devdata *dd)
{
struct qib_ibdev *dev = &dd->verbs_dev;
struct list_head *list;
- struct qib_qp *qps[5];
- struct qib_qp *qp;
+ struct rvt_qp *qps[5];
+ struct rvt_qp *qp;
unsigned long flags;
unsigned i, n;
+ struct qib_qp_priv *priv;
list = &dev->piowait;
n = 0;
@@ -1543,25 +1281,26 @@ void qib_ib_piobufavail(struct qib_devdata *dd)
* could end up with QPs on the wait list with the interrupt
* disabled.
*/
- spin_lock_irqsave(&dev->pending_lock, flags);
+ spin_lock_irqsave(&dev->rdi.pending_lock, flags);
while (!list_empty(list)) {
if (n == ARRAY_SIZE(qps))
goto full;
- qp = list_entry(list->next, struct qib_qp, iowait);
- list_del_init(&qp->iowait);
+ priv = list_entry(list->next, struct qib_qp_priv, iowait);
+ qp = priv->owner;
+ list_del_init(&priv->iowait);
atomic_inc(&qp->refcount);
qps[n++] = qp;
}
dd->f_wantpiobuf_intr(dd, 0);
full:
- spin_unlock_irqrestore(&dev->pending_lock, flags);
+ spin_unlock_irqrestore(&dev->rdi.pending_lock, flags);
for (i = 0; i < n; i++) {
qp = qps[i];
spin_lock_irqsave(&qp->s_lock, flags);
- if (qp->s_flags & QIB_S_WAIT_PIO) {
- qp->s_flags &= ~QIB_S_WAIT_PIO;
+ if (qp->s_flags & RVT_S_WAIT_PIO) {
+ qp->s_flags &= ~RVT_S_WAIT_PIO;
qib_schedule_send(qp);
}
spin_unlock_irqrestore(&qp->s_lock, flags);
@@ -1572,82 +1311,24 @@ full:
}
}
-static int qib_query_device(struct ib_device *ibdev, struct ib_device_attr *props,
- struct ib_udata *uhw)
-{
- struct qib_devdata *dd = dd_from_ibdev(ibdev);
- struct qib_ibdev *dev = to_idev(ibdev);
-
- if (uhw->inlen || uhw->outlen)
- return -EINVAL;
- memset(props, 0, sizeof(*props));
-
- props->device_cap_flags = IB_DEVICE_BAD_PKEY_CNTR |
- IB_DEVICE_BAD_QKEY_CNTR | IB_DEVICE_SHUTDOWN_PORT |
- IB_DEVICE_SYS_IMAGE_GUID | IB_DEVICE_RC_RNR_NAK_GEN |
- IB_DEVICE_PORT_ACTIVE_EVENT | IB_DEVICE_SRQ_RESIZE;
- props->page_size_cap = PAGE_SIZE;
- props->vendor_id =
- QIB_SRC_OUI_1 << 16 | QIB_SRC_OUI_2 << 8 | QIB_SRC_OUI_3;
- props->vendor_part_id = dd->deviceid;
- props->hw_ver = dd->minrev;
- props->sys_image_guid = ib_qib_sys_image_guid;
- props->max_mr_size = ~0ULL;
- props->max_qp = ib_qib_max_qps;
- props->max_qp_wr = ib_qib_max_qp_wrs;
- props->max_sge = ib_qib_max_sges;
- props->max_sge_rd = ib_qib_max_sges;
- props->max_cq = ib_qib_max_cqs;
- props->max_ah = ib_qib_max_ahs;
- props->max_cqe = ib_qib_max_cqes;
- props->max_mr = dev->lk_table.max;
- props->max_fmr = dev->lk_table.max;
- props->max_map_per_fmr = 32767;
- props->max_pd = ib_qib_max_pds;
- props->max_qp_rd_atom = QIB_MAX_RDMA_ATOMIC;
- props->max_qp_init_rd_atom = 255;
- /* props->max_res_rd_atom */
- props->max_srq = ib_qib_max_srqs;
- props->max_srq_wr = ib_qib_max_srq_wrs;
- props->max_srq_sge = ib_qib_max_srq_sges;
- /* props->local_ca_ack_delay */
- props->atomic_cap = IB_ATOMIC_GLOB;
- props->max_pkeys = qib_get_npkeys(dd);
- props->max_mcast_grp = ib_qib_max_mcast_grps;
- props->max_mcast_qp_attach = ib_qib_max_mcast_qp_attached;
- props->max_total_mcast_qp_attach = props->max_mcast_qp_attach *
- props->max_mcast_grp;
-
- return 0;
-}
-
-static int qib_query_port(struct ib_device *ibdev, u8 port,
+static int qib_query_port(struct rvt_dev_info *rdi, u8 port_num,
struct ib_port_attr *props)
{
- struct qib_devdata *dd = dd_from_ibdev(ibdev);
- struct qib_ibport *ibp = to_iport(ibdev, port);
- struct qib_pportdata *ppd = ppd_from_ibp(ibp);
+ struct qib_ibdev *ibdev = container_of(rdi, struct qib_ibdev, rdi);
+ struct qib_devdata *dd = dd_from_dev(ibdev);
+ struct qib_pportdata *ppd = &dd->pport[port_num - 1];
enum ib_mtu mtu;
u16 lid = ppd->lid;
- memset(props, 0, sizeof(*props));
props->lid = lid ? lid : be16_to_cpu(IB_LID_PERMISSIVE);
props->lmc = ppd->lmc;
- props->sm_lid = ibp->sm_lid;
- props->sm_sl = ibp->sm_sl;
props->state = dd->f_iblink_state(ppd->lastibcstat);
props->phys_state = dd->f_ibphys_portstate(ppd->lastibcstat);
- props->port_cap_flags = ibp->port_cap_flags;
props->gid_tbl_len = QIB_GUIDS_PER_PORT;
- props->max_msg_sz = 0x80000000;
- props->pkey_tbl_len = qib_get_npkeys(dd);
- props->bad_pkey_cntr = ibp->pkey_violations;
- props->qkey_viol_cntr = ibp->qkey_violations;
props->active_width = ppd->link_width_active;
/* See rate_show() */
props->active_speed = ppd->link_speed_active;
props->max_vl_num = qib_num_vls(ppd->vls_supported);
- props->init_type_reply = 0;
props->max_mtu = qib_ibmtu ? qib_ibmtu : IB_MTU_4096;
switch (ppd->ibmtu) {
@@ -1670,7 +1351,6 @@ static int qib_query_port(struct ib_device *ibdev, u8 port,
mtu = IB_MTU_2048;
}
props->active_mtu = mtu;
- props->subnet_timeout = ibp->subnet_timeout;
return 0;
}
@@ -1714,185 +1394,70 @@ bail:
return ret;
}
-static int qib_modify_port(struct ib_device *ibdev, u8 port,
- int port_modify_mask, struct ib_port_modify *props)
+static int qib_shut_down_port(struct rvt_dev_info *rdi, u8 port_num)
{
- struct qib_ibport *ibp = to_iport(ibdev, port);
- struct qib_pportdata *ppd = ppd_from_ibp(ibp);
-
- ibp->port_cap_flags |= props->set_port_cap_mask;
- ibp->port_cap_flags &= ~props->clr_port_cap_mask;
- if (props->set_port_cap_mask || props->clr_port_cap_mask)
- qib_cap_mask_chg(ibp);
- if (port_modify_mask & IB_PORT_SHUTDOWN)
- qib_set_linkstate(ppd, QIB_IB_LINKDOWN);
- if (port_modify_mask & IB_PORT_RESET_QKEY_CNTR)
- ibp->qkey_violations = 0;
- return 0;
-}
-
-static int qib_query_gid(struct ib_device *ibdev, u8 port,
- int index, union ib_gid *gid)
-{
- struct qib_devdata *dd = dd_from_ibdev(ibdev);
- int ret = 0;
-
- if (!port || port > dd->num_pports)
- ret = -EINVAL;
- else {
- struct qib_ibport *ibp = to_iport(ibdev, port);
- struct qib_pportdata *ppd = ppd_from_ibp(ibp);
-
- gid->global.subnet_prefix = ibp->gid_prefix;
- if (index == 0)
- gid->global.interface_id = ppd->guid;
- else if (index < QIB_GUIDS_PER_PORT)
- gid->global.interface_id = ibp->guids[index - 1];
- else
- ret = -EINVAL;
- }
-
- return ret;
-}
+ struct qib_ibdev *ibdev = container_of(rdi, struct qib_ibdev, rdi);
+ struct qib_devdata *dd = dd_from_dev(ibdev);
+ struct qib_pportdata *ppd = &dd->pport[port_num - 1];
-static struct ib_pd *qib_alloc_pd(struct ib_device *ibdev,
- struct ib_ucontext *context,
- struct ib_udata *udata)
-{
- struct qib_ibdev *dev = to_idev(ibdev);
- struct qib_pd *pd;
- struct ib_pd *ret;
+ qib_set_linkstate(ppd, QIB_IB_LINKDOWN);
- /*
- * This is actually totally arbitrary. Some correctness tests
- * assume there's a maximum number of PDs that can be allocated.
- * We don't actually have this limit, but we fail the test if
- * we allow allocations of more than we report for this value.
- */
-
- pd = kmalloc(sizeof(*pd), GFP_KERNEL);
- if (!pd) {
- ret = ERR_PTR(-ENOMEM);
- goto bail;
- }
-
- spin_lock(&dev->n_pds_lock);
- if (dev->n_pds_allocated == ib_qib_max_pds) {
- spin_unlock(&dev->n_pds_lock);
- kfree(pd);
- ret = ERR_PTR(-ENOMEM);
- goto bail;
- }
-
- dev->n_pds_allocated++;
- spin_unlock(&dev->n_pds_lock);
-
- /* ib_alloc_pd() will initialize pd->ibpd. */
- pd->user = udata != NULL;
-
- ret = &pd->ibpd;
-
-bail:
- return ret;
+ return 0;
}
-static int qib_dealloc_pd(struct ib_pd *ibpd)
+static int qib_get_guid_be(struct rvt_dev_info *rdi, struct rvt_ibport *rvp,
+ int guid_index, __be64 *guid)
{
- struct qib_pd *pd = to_ipd(ibpd);
- struct qib_ibdev *dev = to_idev(ibpd->device);
-
- spin_lock(&dev->n_pds_lock);
- dev->n_pds_allocated--;
- spin_unlock(&dev->n_pds_lock);
+ struct qib_ibport *ibp = container_of(rvp, struct qib_ibport, rvp);
+ struct qib_pportdata *ppd = ppd_from_ibp(ibp);
- kfree(pd);
+ if (guid_index == 0)
+ *guid = ppd->guid;
+ else if (guid_index < QIB_GUIDS_PER_PORT)
+ *guid = ibp->guids[guid_index - 1];
+ else
+ return -EINVAL;
return 0;
}
int qib_check_ah(struct ib_device *ibdev, struct ib_ah_attr *ah_attr)
{
- /* A multicast address requires a GRH (see ch. 8.4.1). */
- if (ah_attr->dlid >= QIB_MULTICAST_LID_BASE &&
- ah_attr->dlid != QIB_PERMISSIVE_LID &&
- !(ah_attr->ah_flags & IB_AH_GRH))
- goto bail;
- if ((ah_attr->ah_flags & IB_AH_GRH) &&
- ah_attr->grh.sgid_index >= QIB_GUIDS_PER_PORT)
- goto bail;
- if (ah_attr->dlid == 0)
- goto bail;
- if (ah_attr->port_num < 1 ||
- ah_attr->port_num > ibdev->phys_port_cnt)
- goto bail;
- if (ah_attr->static_rate != IB_RATE_PORT_CURRENT &&
- ib_rate_to_mult(ah_attr->static_rate) < 0)
- goto bail;
if (ah_attr->sl > 15)
- goto bail;
+ return -EINVAL;
+
return 0;
-bail:
- return -EINVAL;
}
-/**
- * qib_create_ah - create an address handle
- * @pd: the protection domain
- * @ah_attr: the attributes of the AH
- *
- * This may be called from interrupt context.
- */
-static struct ib_ah *qib_create_ah(struct ib_pd *pd,
- struct ib_ah_attr *ah_attr)
+static void qib_notify_new_ah(struct ib_device *ibdev,
+ struct ib_ah_attr *ah_attr,
+ struct rvt_ah *ah)
{
- struct qib_ah *ah;
- struct ib_ah *ret;
- struct qib_ibdev *dev = to_idev(pd->device);
- unsigned long flags;
+ struct qib_ibport *ibp;
+ struct qib_pportdata *ppd;
- if (qib_check_ah(pd->device, ah_attr)) {
- ret = ERR_PTR(-EINVAL);
- goto bail;
- }
-
- ah = kmalloc(sizeof(*ah), GFP_ATOMIC);
- if (!ah) {
- ret = ERR_PTR(-ENOMEM);
- goto bail;
- }
-
- spin_lock_irqsave(&dev->n_ahs_lock, flags);
- if (dev->n_ahs_allocated == ib_qib_max_ahs) {
- spin_unlock_irqrestore(&dev->n_ahs_lock, flags);
- kfree(ah);
- ret = ERR_PTR(-ENOMEM);
- goto bail;
- }
-
- dev->n_ahs_allocated++;
- spin_unlock_irqrestore(&dev->n_ahs_lock, flags);
-
- /* ib_create_ah() will initialize ah->ibah. */
- ah->attr = *ah_attr;
- atomic_set(&ah->refcount, 0);
-
- ret = &ah->ibah;
+ /*
+ * Do not trust reading anything from rvt_ah at this point as it is not
+ * done being setup. We can however modify things which we need to set.
+ */
-bail:
- return ret;
+ ibp = to_iport(ibdev, ah_attr->port_num);
+ ppd = ppd_from_ibp(ibp);
+ ah->vl = ibp->sl_to_vl[ah->attr.sl];
+ ah->log_pmtu = ilog2(ppd->ibmtu);
}
struct ib_ah *qib_create_qp0_ah(struct qib_ibport *ibp, u16 dlid)
{
struct ib_ah_attr attr;
struct ib_ah *ah = ERR_PTR(-EINVAL);
- struct qib_qp *qp0;
+ struct rvt_qp *qp0;
memset(&attr, 0, sizeof(attr));
attr.dlid = dlid;
attr.port_num = ppd_from_ibp(ibp)->port;
rcu_read_lock();
- qp0 = rcu_dereference(ibp->qp0);
+ qp0 = rcu_dereference(ibp->rvp.qp[0]);
if (qp0)
ah = ib_create_ah(qp0->ibqp.pd, &attr);
rcu_read_unlock();
@@ -1900,51 +1465,6 @@ struct ib_ah *qib_create_qp0_ah(struct qib_ibport *ibp, u16 dlid)
}
/**
- * qib_destroy_ah - destroy an address handle
- * @ibah: the AH to destroy
- *
- * This may be called from interrupt context.
- */
-static int qib_destroy_ah(struct ib_ah *ibah)
-{
- struct qib_ibdev *dev = to_idev(ibah->device);
- struct qib_ah *ah = to_iah(ibah);
- unsigned long flags;
-
- if (atomic_read(&ah->refcount) != 0)
- return -EBUSY;
-
- spin_lock_irqsave(&dev->n_ahs_lock, flags);
- dev->n_ahs_allocated--;
- spin_unlock_irqrestore(&dev->n_ahs_lock, flags);
-
- kfree(ah);
-
- return 0;
-}
-
-static int qib_modify_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr)
-{
- struct qib_ah *ah = to_iah(ibah);
-
- if (qib_check_ah(ibah->device, ah_attr))
- return -EINVAL;
-
- ah->attr = *ah_attr;
-
- return 0;
-}
-
-static int qib_query_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr)
-{
- struct qib_ah *ah = to_iah(ibah);
-
- *ah_attr = ah->attr;
-
- return 0;
-}
-
-/**
* qib_get_npkeys - return the size of the PKEY table for context 0
* @dd: the qlogic_ib device
*/
@@ -1973,75 +1493,27 @@ unsigned qib_get_pkey(struct qib_ibport *ibp, unsigned index)
return ret;
}
-static int qib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
- u16 *pkey)
-{
- struct qib_devdata *dd = dd_from_ibdev(ibdev);
- int ret;
-
- if (index >= qib_get_npkeys(dd)) {
- ret = -EINVAL;
- goto bail;
- }
-
- *pkey = qib_get_pkey(to_iport(ibdev, port), index);
- ret = 0;
-
-bail:
- return ret;
-}
-
-/**
- * qib_alloc_ucontext - allocate a ucontest
- * @ibdev: the infiniband device
- * @udata: not used by the QLogic_IB driver
- */
-
-static struct ib_ucontext *qib_alloc_ucontext(struct ib_device *ibdev,
- struct ib_udata *udata)
-{
- struct qib_ucontext *context;
- struct ib_ucontext *ret;
-
- context = kmalloc(sizeof(*context), GFP_KERNEL);
- if (!context) {
- ret = ERR_PTR(-ENOMEM);
- goto bail;
- }
-
- ret = &context->ibucontext;
-
-bail:
- return ret;
-}
-
-static int qib_dealloc_ucontext(struct ib_ucontext *context)
-{
- kfree(to_iucontext(context));
- return 0;
-}
-
static void init_ibport(struct qib_pportdata *ppd)
{
struct qib_verbs_counters cntrs;
struct qib_ibport *ibp = &ppd->ibport_data;
- spin_lock_init(&ibp->lock);
+ spin_lock_init(&ibp->rvp.lock);
/* Set the prefix to the default value (see ch. 4.1.1) */
- ibp->gid_prefix = IB_DEFAULT_GID_PREFIX;
- ibp->sm_lid = be16_to_cpu(IB_LID_PERMISSIVE);
- ibp->port_cap_flags = IB_PORT_SYS_IMAGE_GUID_SUP |
+ ibp->rvp.gid_prefix = IB_DEFAULT_GID_PREFIX;
+ ibp->rvp.sm_lid = be16_to_cpu(IB_LID_PERMISSIVE);
+ ibp->rvp.port_cap_flags = IB_PORT_SYS_IMAGE_GUID_SUP |
IB_PORT_CLIENT_REG_SUP | IB_PORT_SL_MAP_SUP |
IB_PORT_TRAP_SUP | IB_PORT_AUTO_MIGR_SUP |
IB_PORT_DR_NOTICE_SUP | IB_PORT_CAP_MASK_NOTICE_SUP |
IB_PORT_OTHER_LOCAL_CHANGES_SUP;
if (ppd->dd->flags & QIB_HAS_LINK_LATENCY)
- ibp->port_cap_flags |= IB_PORT_LINK_LATENCY_SUP;
- ibp->pma_counter_select[0] = IB_PMA_PORT_XMIT_DATA;
- ibp->pma_counter_select[1] = IB_PMA_PORT_RCV_DATA;
- ibp->pma_counter_select[2] = IB_PMA_PORT_XMIT_PKTS;
- ibp->pma_counter_select[3] = IB_PMA_PORT_RCV_PKTS;
- ibp->pma_counter_select[4] = IB_PMA_PORT_XMIT_WAIT;
+ ibp->rvp.port_cap_flags |= IB_PORT_LINK_LATENCY_SUP;
+ ibp->rvp.pma_counter_select[0] = IB_PMA_PORT_XMIT_DATA;
+ ibp->rvp.pma_counter_select[1] = IB_PMA_PORT_RCV_DATA;
+ ibp->rvp.pma_counter_select[2] = IB_PMA_PORT_XMIT_PKTS;
+ ibp->rvp.pma_counter_select[3] = IB_PMA_PORT_RCV_PKTS;
+ ibp->rvp.pma_counter_select[4] = IB_PMA_PORT_XMIT_WAIT;
/* Snapshot current HW counters to "clear" them. */
qib_get_counters(ppd, &cntrs);
@@ -2061,26 +1533,55 @@ static void init_ibport(struct qib_pportdata *ppd)
ibp->z_excessive_buffer_overrun_errors =
cntrs.excessive_buffer_overrun_errors;
ibp->z_vl15_dropped = cntrs.vl15_dropped;
- RCU_INIT_POINTER(ibp->qp0, NULL);
- RCU_INIT_POINTER(ibp->qp1, NULL);
+ RCU_INIT_POINTER(ibp->rvp.qp[0], NULL);
+ RCU_INIT_POINTER(ibp->rvp.qp[1], NULL);
}
-static int qib_port_immutable(struct ib_device *ibdev, u8 port_num,
- struct ib_port_immutable *immutable)
+/**
+ * qib_fill_device_attr - Fill in rvt dev info device attributes.
+ * @dd: the device data structure
+ */
+static void qib_fill_device_attr(struct qib_devdata *dd)
{
- struct ib_port_attr attr;
- int err;
-
- err = qib_query_port(ibdev, port_num, &attr);
- if (err)
- return err;
+ struct rvt_dev_info *rdi = &dd->verbs_dev.rdi;
- immutable->pkey_tbl_len = attr.pkey_tbl_len;
- immutable->gid_tbl_len = attr.gid_tbl_len;
- immutable->core_cap_flags = RDMA_CORE_PORT_IBA_IB;
- immutable->max_mad_size = IB_MGMT_MAD_SIZE;
+ memset(&rdi->dparms.props, 0, sizeof(rdi->dparms.props));
- return 0;
+ rdi->dparms.props.max_pd = ib_qib_max_pds;
+ rdi->dparms.props.max_ah = ib_qib_max_ahs;
+ rdi->dparms.props.device_cap_flags = IB_DEVICE_BAD_PKEY_CNTR |
+ IB_DEVICE_BAD_QKEY_CNTR | IB_DEVICE_SHUTDOWN_PORT |
+ IB_DEVICE_SYS_IMAGE_GUID | IB_DEVICE_RC_RNR_NAK_GEN |
+ IB_DEVICE_PORT_ACTIVE_EVENT | IB_DEVICE_SRQ_RESIZE;
+ rdi->dparms.props.page_size_cap = PAGE_SIZE;
+ rdi->dparms.props.vendor_id =
+ QIB_SRC_OUI_1 << 16 | QIB_SRC_OUI_2 << 8 | QIB_SRC_OUI_3;
+ rdi->dparms.props.vendor_part_id = dd->deviceid;
+ rdi->dparms.props.hw_ver = dd->minrev;
+ rdi->dparms.props.sys_image_guid = ib_qib_sys_image_guid;
+ rdi->dparms.props.max_mr_size = ~0ULL;
+ rdi->dparms.props.max_qp = ib_qib_max_qps;
+ rdi->dparms.props.max_qp_wr = ib_qib_max_qp_wrs;
+ rdi->dparms.props.max_sge = ib_qib_max_sges;
+ rdi->dparms.props.max_sge_rd = ib_qib_max_sges;
+ rdi->dparms.props.max_cq = ib_qib_max_cqs;
+ rdi->dparms.props.max_cqe = ib_qib_max_cqes;
+ rdi->dparms.props.max_ah = ib_qib_max_ahs;
+ rdi->dparms.props.max_mr = rdi->lkey_table.max;
+ rdi->dparms.props.max_fmr = rdi->lkey_table.max;
+ rdi->dparms.props.max_map_per_fmr = 32767;
+ rdi->dparms.props.max_qp_rd_atom = QIB_MAX_RDMA_ATOMIC;
+ rdi->dparms.props.max_qp_init_rd_atom = 255;
+ rdi->dparms.props.max_srq = ib_qib_max_srqs;
+ rdi->dparms.props.max_srq_wr = ib_qib_max_srq_wrs;
+ rdi->dparms.props.max_srq_sge = ib_qib_max_srq_sges;
+ rdi->dparms.props.atomic_cap = IB_ATOMIC_GLOB;
+ rdi->dparms.props.max_pkeys = qib_get_npkeys(dd);
+ rdi->dparms.props.max_mcast_grp = ib_qib_max_mcast_grps;
+ rdi->dparms.props.max_mcast_qp_attach = ib_qib_max_mcast_qp_attached;
+ rdi->dparms.props.max_total_mcast_qp_attach =
+ rdi->dparms.props.max_mcast_qp_attach *
+ rdi->dparms.props.max_mcast_grp;
}
/**
@@ -2091,68 +1592,20 @@ static int qib_port_immutable(struct ib_device *ibdev, u8 port_num,
int qib_register_ib_device(struct qib_devdata *dd)
{
struct qib_ibdev *dev = &dd->verbs_dev;
- struct ib_device *ibdev = &dev->ibdev;
+ struct ib_device *ibdev = &dev->rdi.ibdev;
struct qib_pportdata *ppd = dd->pport;
- unsigned i, lk_tab_size;
+ unsigned i, ctxt;
int ret;
- dev->qp_table_size = ib_qib_qp_table_size;
get_random_bytes(&dev->qp_rnd, sizeof(dev->qp_rnd));
- dev->qp_table = kmalloc_array(
- dev->qp_table_size,
- sizeof(*dev->qp_table),
- GFP_KERNEL);
- if (!dev->qp_table) {
- ret = -ENOMEM;
- goto err_qpt;
- }
- for (i = 0; i < dev->qp_table_size; i++)
- RCU_INIT_POINTER(dev->qp_table[i], NULL);
-
for (i = 0; i < dd->num_pports; i++)
init_ibport(ppd + i);
/* Only need to initialize non-zero fields. */
- spin_lock_init(&dev->qpt_lock);
- spin_lock_init(&dev->n_pds_lock);
- spin_lock_init(&dev->n_ahs_lock);
- spin_lock_init(&dev->n_cqs_lock);
- spin_lock_init(&dev->n_qps_lock);
- spin_lock_init(&dev->n_srqs_lock);
- spin_lock_init(&dev->n_mcast_grps_lock);
- init_timer(&dev->mem_timer);
- dev->mem_timer.function = mem_timer;
- dev->mem_timer.data = (unsigned long) dev;
-
- qib_init_qpn_table(dd, &dev->qpn_table);
+ setup_timer(&dev->mem_timer, mem_timer, (unsigned long)dev);
+
+ qpt_mask = dd->qpn_mask;
- /*
- * The top ib_qib_lkey_table_size bits are used to index the
- * table. The lower 8 bits can be owned by the user (copied from
- * the LKEY). The remaining bits act as a generation number or tag.
- */
- spin_lock_init(&dev->lk_table.lock);
- /* insure generation is at least 4 bits see keys.c */
- if (ib_qib_lkey_table_size > MAX_LKEY_TABLE_BITS) {
- qib_dev_warn(dd, "lkey bits %u too large, reduced to %u\n",
- ib_qib_lkey_table_size, MAX_LKEY_TABLE_BITS);
- ib_qib_lkey_table_size = MAX_LKEY_TABLE_BITS;
- }
- dev->lk_table.max = 1 << ib_qib_lkey_table_size;
- lk_tab_size = dev->lk_table.max * sizeof(*dev->lk_table.table);
- dev->lk_table.table = (struct qib_mregion __rcu **)
- vmalloc(lk_tab_size);
- if (dev->lk_table.table == NULL) {
- ret = -ENOMEM;
- goto err_lk;
- }
- RCU_INIT_POINTER(dev->dma_mr, NULL);
- for (i = 0; i < dev->lk_table.max; i++)
- RCU_INIT_POINTER(dev->lk_table.table[i], NULL);
- INIT_LIST_HEAD(&dev->pending_mmaps);
- spin_lock_init(&dev->pending_lock);
- dev->mmap_offset = PAGE_SIZE;
- spin_lock_init(&dev->mmap_offset_lock);
INIT_LIST_HEAD(&dev->piowait);
INIT_LIST_HEAD(&dev->dmawait);
INIT_LIST_HEAD(&dev->txwait);
@@ -2194,110 +1647,91 @@ int qib_register_ib_device(struct qib_devdata *dd)
strlcpy(ibdev->name, "qib%d", IB_DEVICE_NAME_MAX);
ibdev->owner = THIS_MODULE;
ibdev->node_guid = ppd->guid;
- ibdev->uverbs_abi_ver = QIB_UVERBS_ABI_VERSION;
- ibdev->uverbs_cmd_mask =
- (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
- (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
- (1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
- (1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
- (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
- (1ull << IB_USER_VERBS_CMD_CREATE_AH) |
- (1ull << IB_USER_VERBS_CMD_MODIFY_AH) |
- (1ull << IB_USER_VERBS_CMD_QUERY_AH) |
- (1ull << IB_USER_VERBS_CMD_DESTROY_AH) |
- (1ull << IB_USER_VERBS_CMD_REG_MR) |
- (1ull << IB_USER_VERBS_CMD_DEREG_MR) |
- (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
- (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
- (1ull << IB_USER_VERBS_CMD_RESIZE_CQ) |
- (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
- (1ull << IB_USER_VERBS_CMD_POLL_CQ) |
- (1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ) |
- (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
- (1ull << IB_USER_VERBS_CMD_QUERY_QP) |
- (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
- (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
- (1ull << IB_USER_VERBS_CMD_POST_SEND) |
- (1ull << IB_USER_VERBS_CMD_POST_RECV) |
- (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST) |
- (1ull << IB_USER_VERBS_CMD_DETACH_MCAST) |
- (1ull << IB_USER_VERBS_CMD_CREATE_SRQ) |
- (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) |
- (1ull << IB_USER_VERBS_CMD_QUERY_SRQ) |
- (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ) |
- (1ull << IB_USER_VERBS_CMD_POST_SRQ_RECV);
- ibdev->node_type = RDMA_NODE_IB_CA;
ibdev->phys_port_cnt = dd->num_pports;
- ibdev->num_comp_vectors = 1;
ibdev->dma_device = &dd->pcidev->dev;
- ibdev->query_device = qib_query_device;
ibdev->modify_device = qib_modify_device;
- ibdev->query_port = qib_query_port;
- ibdev->modify_port = qib_modify_port;
- ibdev->query_pkey = qib_query_pkey;
- ibdev->query_gid = qib_query_gid;
- ibdev->alloc_ucontext = qib_alloc_ucontext;
- ibdev->dealloc_ucontext = qib_dealloc_ucontext;
- ibdev->alloc_pd = qib_alloc_pd;
- ibdev->dealloc_pd = qib_dealloc_pd;
- ibdev->create_ah = qib_create_ah;
- ibdev->destroy_ah = qib_destroy_ah;
- ibdev->modify_ah = qib_modify_ah;
- ibdev->query_ah = qib_query_ah;
- ibdev->create_srq = qib_create_srq;
- ibdev->modify_srq = qib_modify_srq;
- ibdev->query_srq = qib_query_srq;
- ibdev->destroy_srq = qib_destroy_srq;
- ibdev->create_qp = qib_create_qp;
- ibdev->modify_qp = qib_modify_qp;
- ibdev->query_qp = qib_query_qp;
- ibdev->destroy_qp = qib_destroy_qp;
- ibdev->post_send = qib_post_send;
- ibdev->post_recv = qib_post_receive;
- ibdev->post_srq_recv = qib_post_srq_receive;
- ibdev->create_cq = qib_create_cq;
- ibdev->destroy_cq = qib_destroy_cq;
- ibdev->resize_cq = qib_resize_cq;
- ibdev->poll_cq = qib_poll_cq;
- ibdev->req_notify_cq = qib_req_notify_cq;
- ibdev->get_dma_mr = qib_get_dma_mr;
- ibdev->reg_user_mr = qib_reg_user_mr;
- ibdev->dereg_mr = qib_dereg_mr;
- ibdev->alloc_mr = qib_alloc_mr;
- ibdev->map_mr_sg = qib_map_mr_sg;
- ibdev->alloc_fmr = qib_alloc_fmr;
- ibdev->map_phys_fmr = qib_map_phys_fmr;
- ibdev->unmap_fmr = qib_unmap_fmr;
- ibdev->dealloc_fmr = qib_dealloc_fmr;
- ibdev->attach_mcast = qib_multicast_attach;
- ibdev->detach_mcast = qib_multicast_detach;
ibdev->process_mad = qib_process_mad;
- ibdev->mmap = qib_mmap;
- ibdev->dma_ops = &qib_dma_mapping_ops;
- ibdev->get_port_immutable = qib_port_immutable;
snprintf(ibdev->node_desc, sizeof(ibdev->node_desc),
"Intel Infiniband HCA %s", init_utsname()->nodename);
- ret = ib_register_device(ibdev, qib_create_port_files);
- if (ret)
- goto err_reg;
+ /*
+ * Fill in rvt info object.
+ */
+ dd->verbs_dev.rdi.driver_f.port_callback = qib_create_port_files;
+ dd->verbs_dev.rdi.driver_f.get_card_name = qib_get_card_name;
+ dd->verbs_dev.rdi.driver_f.get_pci_dev = qib_get_pci_dev;
+ dd->verbs_dev.rdi.driver_f.check_ah = qib_check_ah;
+ dd->verbs_dev.rdi.driver_f.check_send_wqe = qib_check_send_wqe;
+ dd->verbs_dev.rdi.driver_f.notify_new_ah = qib_notify_new_ah;
+ dd->verbs_dev.rdi.driver_f.alloc_qpn = qib_alloc_qpn;
+ dd->verbs_dev.rdi.driver_f.qp_priv_alloc = qib_qp_priv_alloc;
+ dd->verbs_dev.rdi.driver_f.qp_priv_free = qib_qp_priv_free;
+ dd->verbs_dev.rdi.driver_f.free_all_qps = qib_free_all_qps;
+ dd->verbs_dev.rdi.driver_f.notify_qp_reset = qib_notify_qp_reset;
+ dd->verbs_dev.rdi.driver_f.do_send = qib_do_send;
+ dd->verbs_dev.rdi.driver_f.schedule_send = qib_schedule_send;
+ dd->verbs_dev.rdi.driver_f.quiesce_qp = qib_quiesce_qp;
+ dd->verbs_dev.rdi.driver_f.stop_send_queue = qib_stop_send_queue;
+ dd->verbs_dev.rdi.driver_f.flush_qp_waiters = qib_flush_qp_waiters;
+ dd->verbs_dev.rdi.driver_f.notify_error_qp = qib_notify_error_qp;
+ dd->verbs_dev.rdi.driver_f.mtu_to_path_mtu = qib_mtu_to_path_mtu;
+ dd->verbs_dev.rdi.driver_f.mtu_from_qp = qib_mtu_from_qp;
+ dd->verbs_dev.rdi.driver_f.get_pmtu_from_attr = qib_get_pmtu_from_attr;
+ dd->verbs_dev.rdi.driver_f.schedule_send_no_lock = _qib_schedule_send;
+ dd->verbs_dev.rdi.driver_f.query_port_state = qib_query_port;
+ dd->verbs_dev.rdi.driver_f.shut_down_port = qib_shut_down_port;
+ dd->verbs_dev.rdi.driver_f.cap_mask_chg = qib_cap_mask_chg;
+ dd->verbs_dev.rdi.driver_f.notify_create_mad_agent =
+ qib_notify_create_mad_agent;
+ dd->verbs_dev.rdi.driver_f.notify_free_mad_agent =
+ qib_notify_free_mad_agent;
+
+ dd->verbs_dev.rdi.dparms.max_rdma_atomic = QIB_MAX_RDMA_ATOMIC;
+ dd->verbs_dev.rdi.driver_f.get_guid_be = qib_get_guid_be;
+ dd->verbs_dev.rdi.dparms.lkey_table_size = qib_lkey_table_size;
+ dd->verbs_dev.rdi.dparms.qp_table_size = ib_qib_qp_table_size;
+ dd->verbs_dev.rdi.dparms.qpn_start = 1;
+ dd->verbs_dev.rdi.dparms.qpn_res_start = QIB_KD_QP;
+ dd->verbs_dev.rdi.dparms.qpn_res_end = QIB_KD_QP; /* Reserve one QP */
+ dd->verbs_dev.rdi.dparms.qpn_inc = 1;
+ dd->verbs_dev.rdi.dparms.qos_shift = 1;
+ dd->verbs_dev.rdi.dparms.psn_mask = QIB_PSN_MASK;
+ dd->verbs_dev.rdi.dparms.psn_shift = QIB_PSN_SHIFT;
+ dd->verbs_dev.rdi.dparms.psn_modify_mask = QIB_PSN_MASK;
+ dd->verbs_dev.rdi.dparms.nports = dd->num_pports;
+ dd->verbs_dev.rdi.dparms.npkeys = qib_get_npkeys(dd);
+ dd->verbs_dev.rdi.dparms.node = dd->assigned_node_id;
+ dd->verbs_dev.rdi.dparms.core_cap_flags = RDMA_CORE_PORT_IBA_IB;
+ dd->verbs_dev.rdi.dparms.max_mad_size = IB_MGMT_MAD_SIZE;
+
+ snprintf(dd->verbs_dev.rdi.dparms.cq_name,
+ sizeof(dd->verbs_dev.rdi.dparms.cq_name),
+ "qib_cq%d", dd->unit);
+
+ qib_fill_device_attr(dd);
+
+ ppd = dd->pport;
+ for (i = 0; i < dd->num_pports; i++, ppd++) {
+ ctxt = ppd->hw_pidx;
+ rvt_init_port(&dd->verbs_dev.rdi,
+ &ppd->ibport_data.rvp,
+ i,
+ dd->rcd[ctxt]->pkeys);
+ }
- ret = qib_create_agents(dev);
+ ret = rvt_register_device(&dd->verbs_dev.rdi);
if (ret)
- goto err_agents;
+ goto err_tx;
ret = qib_verbs_register_sysfs(dd);
if (ret)
goto err_class;
- goto bail;
+ return ret;
err_class:
- qib_free_agents(dev);
-err_agents:
- ib_unregister_device(ibdev);
-err_reg:
+ rvt_unregister_device(&dd->verbs_dev.rdi);
err_tx:
while (!list_empty(&dev->txreq_free)) {
struct list_head *l = dev->txreq_free.next;
@@ -2313,27 +1747,17 @@ err_tx:
sizeof(struct qib_pio_header),
dev->pio_hdrs, dev->pio_hdrs_phys);
err_hdrs:
- vfree(dev->lk_table.table);
-err_lk:
- kfree(dev->qp_table);
-err_qpt:
qib_dev_err(dd, "cannot register verbs: %d!\n", -ret);
-bail:
return ret;
}
void qib_unregister_ib_device(struct qib_devdata *dd)
{
struct qib_ibdev *dev = &dd->verbs_dev;
- struct ib_device *ibdev = &dev->ibdev;
- u32 qps_inuse;
- unsigned lk_tab_size;
qib_verbs_unregister_sysfs(dd);
- qib_free_agents(dev);
-
- ib_unregister_device(ibdev);
+ rvt_unregister_device(&dd->verbs_dev.rdi);
if (!list_empty(&dev->piowait))
qib_dev_err(dd, "piowait list not empty!\n");
@@ -2343,16 +1767,8 @@ void qib_unregister_ib_device(struct qib_devdata *dd)
qib_dev_err(dd, "txwait list not empty!\n");
if (!list_empty(&dev->memwait))
qib_dev_err(dd, "memwait list not empty!\n");
- if (dev->dma_mr)
- qib_dev_err(dd, "DMA MR not NULL!\n");
-
- qps_inuse = qib_free_all_qps(dd);
- if (qps_inuse)
- qib_dev_err(dd, "QP memory leak! %u still in use\n",
- qps_inuse);
del_timer_sync(&dev->mem_timer);
- qib_free_qpn_table(&dev->qpn_table);
while (!list_empty(&dev->txreq_free)) {
struct list_head *l = dev->txreq_free.next;
struct qib_verbs_txreq *tx;
@@ -2366,21 +1782,36 @@ void qib_unregister_ib_device(struct qib_devdata *dd)
dd->pport->sdma_descq_cnt *
sizeof(struct qib_pio_header),
dev->pio_hdrs, dev->pio_hdrs_phys);
- lk_tab_size = dev->lk_table.max * sizeof(*dev->lk_table.table);
- vfree(dev->lk_table.table);
- kfree(dev->qp_table);
}
-/*
- * This must be called with s_lock held.
+/**
+ * _qib_schedule_send - schedule progress
+ * @qp - the qp
+ *
+ * This schedules progress w/o regard to the s_flags.
+ *
+ * It is only used in post send, which doesn't hold
+ * the s_lock.
*/
-void qib_schedule_send(struct qib_qp *qp)
+void _qib_schedule_send(struct rvt_qp *qp)
{
- if (qib_send_ok(qp)) {
- struct qib_ibport *ibp =
- to_iport(qp->ibqp.device, qp->port_num);
- struct qib_pportdata *ppd = ppd_from_ibp(ibp);
+ struct qib_ibport *ibp =
+ to_iport(qp->ibqp.device, qp->port_num);
+ struct qib_pportdata *ppd = ppd_from_ibp(ibp);
+ struct qib_qp_priv *priv = qp->priv;
- queue_work(ppd->qib_wq, &qp->s_work);
- }
+ queue_work(ppd->qib_wq, &priv->s_work);
+}
+
+/**
+ * qib_schedule_send - schedule progress
+ * @qp - the qp
+ *
+ * This schedules qp progress. The s_lock
+ * should be held.
+ */
+void qib_schedule_send(struct rvt_qp *qp)
+{
+ if (qib_send_ok(qp))
+ _qib_schedule_send(qp);
}
diff --git a/drivers/infiniband/hw/qib/qib_verbs.h b/drivers/infiniband/hw/qib/qib_verbs.h
index 6c5e77753d85..4b76a8d59337 100644
--- a/drivers/infiniband/hw/qib/qib_verbs.h
+++ b/drivers/infiniband/hw/qib/qib_verbs.h
@@ -45,6 +45,8 @@
#include <linux/completion.h>
#include <rdma/ib_pack.h>
#include <rdma/ib_user_verbs.h>
+#include <rdma/rdma_vt.h>
+#include <rdma/rdmavt_cq.h>
struct qib_ctxtdata;
struct qib_pportdata;
@@ -53,9 +55,7 @@ struct qib_verbs_txreq;
#define QIB_MAX_RDMA_ATOMIC 16
#define QIB_GUIDS_PER_PORT 5
-
-#define QPN_MAX (1 << 24)
-#define QPNMAP_ENTRIES (QPN_MAX / PAGE_SIZE / BITS_PER_BYTE)
+#define QIB_PSN_SHIFT 8
/*
* Increment this value if any changes that break userspace ABI
@@ -63,12 +63,6 @@ struct qib_verbs_txreq;
*/
#define QIB_UVERBS_ABI_VERSION 2
-/*
- * Define an ib_cq_notify value that is not valid so we know when CQ
- * notifications are armed.
- */
-#define IB_CQ_NONE (IB_CQ_NEXT_COMP + 1)
-
#define IB_SEQ_NAK (3 << 29)
/* AETH NAK opcode values */
@@ -79,17 +73,6 @@ struct qib_verbs_txreq;
#define IB_NAK_REMOTE_OPERATIONAL_ERROR 0x63
#define IB_NAK_INVALID_RD_REQUEST 0x64
-/* Flags for checking QP state (see ib_qib_state_ops[]) */
-#define QIB_POST_SEND_OK 0x01
-#define QIB_POST_RECV_OK 0x02
-#define QIB_PROCESS_RECV_OK 0x04
-#define QIB_PROCESS_SEND_OK 0x08
-#define QIB_PROCESS_NEXT_SEND_OK 0x10
-#define QIB_FLUSH_SEND 0x20
-#define QIB_FLUSH_RECV 0x40
-#define QIB_PROCESS_OR_FLUSH_SEND \
- (QIB_PROCESS_SEND_OK | QIB_FLUSH_SEND)
-
/* IB Performance Manager status values */
#define IB_PMA_SAMPLE_STATUS_DONE 0x00
#define IB_PMA_SAMPLE_STATUS_STARTED 0x01
@@ -203,468 +186,21 @@ struct qib_pio_header {
} __packed;
/*
- * There is one struct qib_mcast for each multicast GID.
- * All attached QPs are then stored as a list of
- * struct qib_mcast_qp.
+ * qib specific data structure that will be hidden from rvt after the queue pair
+ * is made common.
*/
-struct qib_mcast_qp {
- struct list_head list;
- struct qib_qp *qp;
-};
-
-struct qib_mcast {
- struct rb_node rb_node;
- union ib_gid mgid;
- struct list_head qp_list;
- wait_queue_head_t wait;
- atomic_t refcount;
- int n_attached;
-};
-
-/* Protection domain */
-struct qib_pd {
- struct ib_pd ibpd;
- int user; /* non-zero if created from user space */
-};
-
-/* Address Handle */
-struct qib_ah {
- struct ib_ah ibah;
- struct ib_ah_attr attr;
- atomic_t refcount;
-};
-
-/*
- * This structure is used by qib_mmap() to validate an offset
- * when an mmap() request is made. The vm_area_struct then uses
- * this as its vm_private_data.
- */
-struct qib_mmap_info {
- struct list_head pending_mmaps;
- struct ib_ucontext *context;
- void *obj;
- __u64 offset;
- struct kref ref;
- unsigned size;
-};
-
-/*
- * This structure is used to contain the head pointer, tail pointer,
- * and completion queue entries as a single memory allocation so
- * it can be mmap'ed into user space.
- */
-struct qib_cq_wc {
- u32 head; /* index of next entry to fill */
- u32 tail; /* index of next ib_poll_cq() entry */
- union {
- /* these are actually size ibcq.cqe + 1 */
- struct ib_uverbs_wc uqueue[0];
- struct ib_wc kqueue[0];
- };
-};
-
-/*
- * The completion queue structure.
- */
-struct qib_cq {
- struct ib_cq ibcq;
- struct kthread_work comptask;
- struct qib_devdata *dd;
- spinlock_t lock; /* protect changes in this struct */
- u8 notify;
- u8 triggered;
- struct qib_cq_wc *queue;
- struct qib_mmap_info *ip;
-};
-
-/*
- * A segment is a linear region of low physical memory.
- * XXX Maybe we should use phys addr here and kmap()/kunmap().
- * Used by the verbs layer.
- */
-struct qib_seg {
- void *vaddr;
- size_t length;
-};
-
-/* The number of qib_segs that fit in a page. */
-#define QIB_SEGSZ (PAGE_SIZE / sizeof(struct qib_seg))
-
-struct qib_segarray {
- struct qib_seg segs[QIB_SEGSZ];
-};
-
-struct qib_mregion {
- struct ib_pd *pd; /* shares refcnt of ibmr.pd */
- u64 user_base; /* User's address for this region */
- u64 iova; /* IB start address of this region */
- size_t length;
- u32 lkey;
- u32 offset; /* offset (bytes) to start of region */
- int access_flags;
- u32 max_segs; /* number of qib_segs in all the arrays */
- u32 mapsz; /* size of the map array */
- u8 page_shift; /* 0 - non unform/non powerof2 sizes */
- u8 lkey_published; /* in global table */
- struct completion comp; /* complete when refcount goes to zero */
- struct rcu_head list;
- atomic_t refcount;
- struct qib_segarray *map[0]; /* the segments */
-};
-
-/*
- * These keep track of the copy progress within a memory region.
- * Used by the verbs layer.
- */
-struct qib_sge {
- struct qib_mregion *mr;
- void *vaddr; /* kernel virtual address of segment */
- u32 sge_length; /* length of the SGE */
- u32 length; /* remaining length of the segment */
- u16 m; /* current index: mr->map[m] */
- u16 n; /* current index: mr->map[m]->segs[n] */
-};
-
-/* Memory region */
-struct qib_mr {
- struct ib_mr ibmr;
- struct ib_umem *umem;
- u64 *pages;
- u32 npages;
- struct qib_mregion mr; /* must be last */
-};
-
-/*
- * Send work request queue entry.
- * The size of the sg_list is determined when the QP is created and stored
- * in qp->s_max_sge.
- */
-struct qib_swqe {
- union {
- struct ib_send_wr wr; /* don't use wr.sg_list */
- struct ib_ud_wr ud_wr;
- struct ib_reg_wr reg_wr;
- struct ib_rdma_wr rdma_wr;
- struct ib_atomic_wr atomic_wr;
- };
- u32 psn; /* first packet sequence number */
- u32 lpsn; /* last packet sequence number */
- u32 ssn; /* send sequence number */
- u32 length; /* total length of data in sg_list */
- struct qib_sge sg_list[0];
-};
-
-/*
- * Receive work request queue entry.
- * The size of the sg_list is determined when the QP (or SRQ) is created
- * and stored in qp->r_rq.max_sge (or srq->rq.max_sge).
- */
-struct qib_rwqe {
- u64 wr_id;
- u8 num_sge;
- struct ib_sge sg_list[0];
-};
-
-/*
- * This structure is used to contain the head pointer, tail pointer,
- * and receive work queue entries as a single memory allocation so
- * it can be mmap'ed into user space.
- * Note that the wq array elements are variable size so you can't
- * just index into the array to get the N'th element;
- * use get_rwqe_ptr() instead.
- */
-struct qib_rwq {
- u32 head; /* new work requests posted to the head */
- u32 tail; /* receives pull requests from here. */
- struct qib_rwqe wq[0];
-};
-
-struct qib_rq {
- struct qib_rwq *wq;
- u32 size; /* size of RWQE array */
- u8 max_sge;
- spinlock_t lock /* protect changes in this struct */
- ____cacheline_aligned_in_smp;
-};
-
-struct qib_srq {
- struct ib_srq ibsrq;
- struct qib_rq rq;
- struct qib_mmap_info *ip;
- /* send signal when number of RWQEs < limit */
- u32 limit;
-};
-
-struct qib_sge_state {
- struct qib_sge *sg_list; /* next SGE to be used if any */
- struct qib_sge sge; /* progress state for the current SGE */
- u32 total_len;
- u8 num_sge;
-};
-
-/*
- * This structure holds the information that the send tasklet needs
- * to send a RDMA read response or atomic operation.
- */
-struct qib_ack_entry {
- u8 opcode;
- u8 sent;
- u32 psn;
- u32 lpsn;
- union {
- struct qib_sge rdma_sge;
- u64 atomic_data;
- };
-};
-
-/*
- * Variables prefixed with s_ are for the requester (sender).
- * Variables prefixed with r_ are for the responder (receiver).
- * Variables prefixed with ack_ are for responder replies.
- *
- * Common variables are protected by both r_rq.lock and s_lock in that order
- * which only happens in modify_qp() or changing the QP 'state'.
- */
-struct qib_qp {
- struct ib_qp ibqp;
- /* read mostly fields above and below */
- struct ib_ah_attr remote_ah_attr;
- struct ib_ah_attr alt_ah_attr;
- struct qib_qp __rcu *next; /* link list for QPN hash table */
- struct qib_swqe *s_wq; /* send work queue */
- struct qib_mmap_info *ip;
- struct qib_ib_header *s_hdr; /* next packet header to send */
- unsigned long timeout_jiffies; /* computed from timeout */
-
- enum ib_mtu path_mtu;
- u32 remote_qpn;
- u32 pmtu; /* decoded from path_mtu */
- u32 qkey; /* QKEY for this QP (for UD or RD) */
- u32 s_size; /* send work queue size */
- u32 s_rnr_timeout; /* number of milliseconds for RNR timeout */
-
- u8 state; /* QP state */
- u8 qp_access_flags;
- u8 alt_timeout; /* Alternate path timeout for this QP */
- u8 timeout; /* Timeout for this QP */
- u8 s_srate;
- u8 s_mig_state;
- u8 port_num;
- u8 s_pkey_index; /* PKEY index to use */
- u8 s_alt_pkey_index; /* Alternate path PKEY index to use */
- u8 r_max_rd_atomic; /* max number of RDMA read/atomic to receive */
- u8 s_max_rd_atomic; /* max number of RDMA read/atomic to send */
- u8 s_retry_cnt; /* number of times to retry */
- u8 s_rnr_retry_cnt;
- u8 r_min_rnr_timer; /* retry timeout value for RNR NAKs */
- u8 s_max_sge; /* size of s_wq->sg_list */
- u8 s_draining;
-
- /* start of read/write fields */
-
- atomic_t refcount ____cacheline_aligned_in_smp;
- wait_queue_head_t wait;
-
-
- struct qib_ack_entry s_ack_queue[QIB_MAX_RDMA_ATOMIC + 1]
- ____cacheline_aligned_in_smp;
- struct qib_sge_state s_rdma_read_sge;
-
- spinlock_t r_lock ____cacheline_aligned_in_smp; /* used for APM */
- unsigned long r_aflags;
- u64 r_wr_id; /* ID for current receive WQE */
- u32 r_ack_psn; /* PSN for next ACK or atomic ACK */
- u32 r_len; /* total length of r_sge */
- u32 r_rcv_len; /* receive data len processed */
- u32 r_psn; /* expected rcv packet sequence number */
- u32 r_msn; /* message sequence number */
-
- u8 r_state; /* opcode of last packet received */
- u8 r_flags;
- u8 r_head_ack_queue; /* index into s_ack_queue[] */
-
- struct list_head rspwait; /* link for waititing to respond */
-
- struct qib_sge_state r_sge; /* current receive data */
- struct qib_rq r_rq; /* receive work queue */
-
- spinlock_t s_lock ____cacheline_aligned_in_smp;
- struct qib_sge_state *s_cur_sge;
- u32 s_flags;
- struct qib_verbs_txreq *s_tx;
- struct qib_swqe *s_wqe;
- struct qib_sge_state s_sge; /* current send request data */
- struct qib_mregion *s_rdma_mr;
- atomic_t s_dma_busy;
- u32 s_cur_size; /* size of send packet in bytes */
- u32 s_len; /* total length of s_sge */
- u32 s_rdma_read_len; /* total length of s_rdma_read_sge */
- u32 s_next_psn; /* PSN for next request */
- u32 s_last_psn; /* last response PSN processed */
- u32 s_sending_psn; /* lowest PSN that is being sent */
- u32 s_sending_hpsn; /* highest PSN that is being sent */
- u32 s_psn; /* current packet sequence number */
- u32 s_ack_rdma_psn; /* PSN for sending RDMA read responses */
- u32 s_ack_psn; /* PSN for acking sends and RDMA writes */
- u32 s_head; /* new entries added here */
- u32 s_tail; /* next entry to process */
- u32 s_cur; /* current work queue entry */
- u32 s_acked; /* last un-ACK'ed entry */
- u32 s_last; /* last completed entry */
- u32 s_ssn; /* SSN of tail entry */
- u32 s_lsn; /* limit sequence number (credit) */
- u16 s_hdrwords; /* size of s_hdr in 32 bit words */
- u16 s_rdma_ack_cnt;
- u8 s_state; /* opcode of last packet sent */
- u8 s_ack_state; /* opcode of packet to ACK */
- u8 s_nak_state; /* non-zero if NAK is pending */
- u8 r_nak_state; /* non-zero if NAK is pending */
- u8 s_retry; /* requester retry counter */
- u8 s_rnr_retry; /* requester RNR retry counter */
- u8 s_num_rd_atomic; /* number of RDMA read/atomic pending */
- u8 s_tail_ack_queue; /* index into s_ack_queue[] */
-
- struct qib_sge_state s_ack_rdma_sge;
- struct timer_list s_timer;
+struct qib_qp_priv {
+ struct qib_ib_header *s_hdr; /* next packet header to send */
struct list_head iowait; /* link for wait PIO buf */
-
+ atomic_t s_dma_busy;
+ struct qib_verbs_txreq *s_tx;
struct work_struct s_work;
-
wait_queue_head_t wait_dma;
-
- struct qib_sge r_sg_list[0] /* verified SGEs */
- ____cacheline_aligned_in_smp;
+ struct rvt_qp *owner;
};
-/*
- * Atomic bit definitions for r_aflags.
- */
-#define QIB_R_WRID_VALID 0
-#define QIB_R_REWIND_SGE 1
-
-/*
- * Bit definitions for r_flags.
- */
-#define QIB_R_REUSE_SGE 0x01
-#define QIB_R_RDMAR_SEQ 0x02
-#define QIB_R_RSP_NAK 0x04
-#define QIB_R_RSP_SEND 0x08
-#define QIB_R_COMM_EST 0x10
-
-/*
- * Bit definitions for s_flags.
- *
- * QIB_S_SIGNAL_REQ_WR - set if QP send WRs contain completion signaled
- * QIB_S_BUSY - send tasklet is processing the QP
- * QIB_S_TIMER - the RC retry timer is active
- * QIB_S_ACK_PENDING - an ACK is waiting to be sent after RDMA read/atomics
- * QIB_S_WAIT_FENCE - waiting for all prior RDMA read or atomic SWQEs
- * before processing the next SWQE
- * QIB_S_WAIT_RDMAR - waiting for a RDMA read or atomic SWQE to complete
- * before processing the next SWQE
- * QIB_S_WAIT_RNR - waiting for RNR timeout
- * QIB_S_WAIT_SSN_CREDIT - waiting for RC credits to process next SWQE
- * QIB_S_WAIT_DMA - waiting for send DMA queue to drain before generating
- * next send completion entry not via send DMA
- * QIB_S_WAIT_PIO - waiting for a send buffer to be available
- * QIB_S_WAIT_TX - waiting for a struct qib_verbs_txreq to be available
- * QIB_S_WAIT_DMA_DESC - waiting for DMA descriptors to be available
- * QIB_S_WAIT_KMEM - waiting for kernel memory to be available
- * QIB_S_WAIT_PSN - waiting for a packet to exit the send DMA queue
- * QIB_S_WAIT_ACK - waiting for an ACK packet before sending more requests
- * QIB_S_SEND_ONE - send one packet, request ACK, then wait for ACK
- */
-#define QIB_S_SIGNAL_REQ_WR 0x0001
-#define QIB_S_BUSY 0x0002
-#define QIB_S_TIMER 0x0004
-#define QIB_S_RESP_PENDING 0x0008
-#define QIB_S_ACK_PENDING 0x0010
-#define QIB_S_WAIT_FENCE 0x0020
-#define QIB_S_WAIT_RDMAR 0x0040
-#define QIB_S_WAIT_RNR 0x0080
-#define QIB_S_WAIT_SSN_CREDIT 0x0100
-#define QIB_S_WAIT_DMA 0x0200
-#define QIB_S_WAIT_PIO 0x0400
-#define QIB_S_WAIT_TX 0x0800
-#define QIB_S_WAIT_DMA_DESC 0x1000
-#define QIB_S_WAIT_KMEM 0x2000
-#define QIB_S_WAIT_PSN 0x4000
-#define QIB_S_WAIT_ACK 0x8000
-#define QIB_S_SEND_ONE 0x10000
-#define QIB_S_UNLIMITED_CREDIT 0x20000
-
-/*
- * Wait flags that would prevent any packet type from being sent.
- */
-#define QIB_S_ANY_WAIT_IO (QIB_S_WAIT_PIO | QIB_S_WAIT_TX | \
- QIB_S_WAIT_DMA_DESC | QIB_S_WAIT_KMEM)
-
-/*
- * Wait flags that would prevent send work requests from making progress.
- */
-#define QIB_S_ANY_WAIT_SEND (QIB_S_WAIT_FENCE | QIB_S_WAIT_RDMAR | \
- QIB_S_WAIT_RNR | QIB_S_WAIT_SSN_CREDIT | QIB_S_WAIT_DMA | \
- QIB_S_WAIT_PSN | QIB_S_WAIT_ACK)
-
-#define QIB_S_ANY_WAIT (QIB_S_ANY_WAIT_IO | QIB_S_ANY_WAIT_SEND)
-
#define QIB_PSN_CREDIT 16
-/*
- * Since struct qib_swqe is not a fixed size, we can't simply index into
- * struct qib_qp.s_wq. This function does the array index computation.
- */
-static inline struct qib_swqe *get_swqe_ptr(struct qib_qp *qp,
- unsigned n)
-{
- return (struct qib_swqe *)((char *)qp->s_wq +
- (sizeof(struct qib_swqe) +
- qp->s_max_sge *
- sizeof(struct qib_sge)) * n);
-}
-
-/*
- * Since struct qib_rwqe is not a fixed size, we can't simply index into
- * struct qib_rwq.wq. This function does the array index computation.
- */
-static inline struct qib_rwqe *get_rwqe_ptr(struct qib_rq *rq, unsigned n)
-{
- return (struct qib_rwqe *)
- ((char *) rq->wq->wq +
- (sizeof(struct qib_rwqe) +
- rq->max_sge * sizeof(struct ib_sge)) * n);
-}
-
-/*
- * QPN-map pages start out as NULL, they get allocated upon
- * first use and are never deallocated. This way,
- * large bitmaps are not allocated unless large numbers of QPs are used.
- */
-struct qpn_map {
- void *page;
-};
-
-struct qib_qpn_table {
- spinlock_t lock; /* protect changes in this struct */
- unsigned flags; /* flags for QP0/1 allocated for each port */
- u32 last; /* last QP number allocated */
- u32 nmaps; /* size of the map table */
- u16 limit;
- u16 mask;
- /* bit map of free QP numbers other than 0/1 */
- struct qpn_map map[QPNMAP_ENTRIES];
-};
-
-#define MAX_LKEY_TABLE_BITS 23
-
-struct qib_lkey_table {
- spinlock_t lock; /* protect changes in this struct */
- u32 next; /* next unused index (speeds search) */
- u32 gen; /* generation count */
- u32 max; /* size of the table */
- struct qib_mregion __rcu **table;
-};
-
struct qib_opcode_stats {
u64 n_packets; /* number of packets */
u64 n_bytes; /* total number of bytes */
@@ -682,21 +218,9 @@ struct qib_pma_counters {
};
struct qib_ibport {
- struct qib_qp __rcu *qp0;
- struct qib_qp __rcu *qp1;
- struct ib_mad_agent *send_agent; /* agent for SMI (traps) */
- struct qib_ah *sm_ah;
- struct qib_ah *smi_ah;
- struct rb_root mcast_tree;
- spinlock_t lock; /* protect changes in this struct */
-
- /* non-zero when timer is set */
- unsigned long mkey_lease_timeout;
- unsigned long trap_timeout;
- __be64 gid_prefix; /* in network order */
- __be64 mkey;
+ struct rvt_ibport rvp;
+ struct rvt_ah *smi_ah;
__be64 guids[QIB_GUIDS_PER_PORT - 1]; /* writable GUIDs */
- u64 tid; /* TID for traps */
struct qib_pma_counters __percpu *pmastats;
u64 z_unicast_xmit; /* starting count for PMA */
u64 z_unicast_rcv; /* starting count for PMA */
@@ -715,82 +239,25 @@ struct qib_ibport {
u32 z_local_link_integrity_errors; /* starting count for PMA */
u32 z_excessive_buffer_overrun_errors; /* starting count for PMA */
u32 z_vl15_dropped; /* starting count for PMA */
- u32 n_rc_resends;
- u32 n_rc_acks;
- u32 n_rc_qacks;
- u32 n_rc_delayed_comp;
- u32 n_seq_naks;
- u32 n_rdma_seq;
- u32 n_rnr_naks;
- u32 n_other_naks;
- u32 n_loop_pkts;
- u32 n_pkt_drops;
- u32 n_vl15_dropped;
- u32 n_rc_timeouts;
- u32 n_dmawait;
- u32 n_unaligned;
- u32 n_rc_dupreq;
- u32 n_rc_seqnak;
- u32 port_cap_flags;
- u32 pma_sample_start;
- u32 pma_sample_interval;
- __be16 pma_counter_select[5];
- u16 pma_tag;
- u16 pkey_violations;
- u16 qkey_violations;
- u16 mkey_violations;
- u16 mkey_lease_period;
- u16 sm_lid;
- u16 repress_traps;
- u8 sm_sl;
- u8 mkeyprot;
- u8 subnet_timeout;
- u8 vl_high_limit;
u8 sl_to_vl[16];
-
};
-
struct qib_ibdev {
- struct ib_device ibdev;
- struct list_head pending_mmaps;
- spinlock_t mmap_offset_lock; /* protect mmap_offset */
- u32 mmap_offset;
- struct qib_mregion __rcu *dma_mr;
-
- /* QP numbers are shared by all IB ports */
- struct qib_qpn_table qpn_table;
- struct qib_lkey_table lk_table;
+ struct rvt_dev_info rdi;
+
struct list_head piowait; /* list for wait PIO buf */
struct list_head dmawait; /* list for wait DMA */
struct list_head txwait; /* list for wait qib_verbs_txreq */
struct list_head memwait; /* list for wait kernel memory */
struct list_head txreq_free;
struct timer_list mem_timer;
- struct qib_qp __rcu **qp_table;
struct qib_pio_header *pio_hdrs;
dma_addr_t pio_hdrs_phys;
- /* list of QPs waiting for RNR timer */
- spinlock_t pending_lock; /* protect wait lists, PMA counters, etc. */
- u32 qp_table_size; /* size of the hash table */
u32 qp_rnd; /* random bytes for hash */
- spinlock_t qpt_lock;
u32 n_piowait;
u32 n_txwait;
- u32 n_pds_allocated; /* number of PDs allocated for device */
- spinlock_t n_pds_lock;
- u32 n_ahs_allocated; /* number of AHs allocated for device */
- spinlock_t n_ahs_lock;
- u32 n_cqs_allocated; /* number of CQs allocated for device */
- spinlock_t n_cqs_lock;
- u32 n_qps_allocated; /* number of QPs allocated for device */
- spinlock_t n_qps_lock;
- u32 n_srqs_allocated; /* number of SRQs allocated for device */
- spinlock_t n_srqs_lock;
- u32 n_mcast_grps_allocated; /* number of mcast groups allocated */
- spinlock_t n_mcast_grps_lock;
#ifdef CONFIG_DEBUG_FS
/* per HCA debugfs */
struct dentry *qib_ibdev_dbg;
@@ -813,56 +280,27 @@ struct qib_verbs_counters {
u32 vl15_dropped;
};
-static inline struct qib_mr *to_imr(struct ib_mr *ibmr)
-{
- return container_of(ibmr, struct qib_mr, ibmr);
-}
-
-static inline struct qib_pd *to_ipd(struct ib_pd *ibpd)
-{
- return container_of(ibpd, struct qib_pd, ibpd);
-}
-
-static inline struct qib_ah *to_iah(struct ib_ah *ibah)
-{
- return container_of(ibah, struct qib_ah, ibah);
-}
-
-static inline struct qib_cq *to_icq(struct ib_cq *ibcq)
-{
- return container_of(ibcq, struct qib_cq, ibcq);
-}
-
-static inline struct qib_srq *to_isrq(struct ib_srq *ibsrq)
-{
- return container_of(ibsrq, struct qib_srq, ibsrq);
-}
-
-static inline struct qib_qp *to_iqp(struct ib_qp *ibqp)
-{
- return container_of(ibqp, struct qib_qp, ibqp);
-}
-
static inline struct qib_ibdev *to_idev(struct ib_device *ibdev)
{
- return container_of(ibdev, struct qib_ibdev, ibdev);
+ struct rvt_dev_info *rdi;
+
+ rdi = container_of(ibdev, struct rvt_dev_info, ibdev);
+ return container_of(rdi, struct qib_ibdev, rdi);
}
/*
* Send if not busy or waiting for I/O and either
* a RC response is pending or we can process send work requests.
*/
-static inline int qib_send_ok(struct qib_qp *qp)
+static inline int qib_send_ok(struct rvt_qp *qp)
{
- return !(qp->s_flags & (QIB_S_BUSY | QIB_S_ANY_WAIT_IO)) &&
- (qp->s_hdrwords || (qp->s_flags & QIB_S_RESP_PENDING) ||
- !(qp->s_flags & QIB_S_ANY_WAIT_SEND));
+ return !(qp->s_flags & (RVT_S_BUSY | RVT_S_ANY_WAIT_IO)) &&
+ (qp->s_hdrwords || (qp->s_flags & RVT_S_RESP_PENDING) ||
+ !(qp->s_flags & RVT_S_ANY_WAIT_SEND));
}
-/*
- * This must be called with s_lock held.
- */
-void qib_schedule_send(struct qib_qp *qp);
+void _qib_schedule_send(struct rvt_qp *qp);
+void qib_schedule_send(struct rvt_qp *qp);
static inline int qib_pkey_ok(u16 pkey1, u16 pkey2)
{
@@ -878,7 +316,7 @@ static inline int qib_pkey_ok(u16 pkey1, u16 pkey2)
void qib_bad_pqkey(struct qib_ibport *ibp, __be16 trap_num, u32 key, u32 sl,
u32 qp1, u32 qp2, __be16 lid1, __be16 lid2);
-void qib_cap_mask_chg(struct qib_ibport *ibp);
+void qib_cap_mask_chg(struct rvt_dev_info *rdi, u8 port_num);
void qib_sys_guid_chg(struct qib_ibport *ibp);
void qib_node_desc_chg(struct qib_ibport *ibp);
int qib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
@@ -886,8 +324,8 @@ int qib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
const struct ib_mad_hdr *in, size_t in_mad_size,
struct ib_mad_hdr *out, size_t *out_mad_size,
u16 *out_mad_pkey_index);
-int qib_create_agents(struct qib_ibdev *dev);
-void qib_free_agents(struct qib_ibdev *dev);
+void qib_notify_create_mad_agent(struct rvt_dev_info *rdi, int port_idx);
+void qib_notify_free_mad_agent(struct rvt_dev_info *rdi, int port_idx);
/*
* Compare the lower 24 bits of the two values.
@@ -898,8 +336,6 @@ static inline int qib_cmp24(u32 a, u32 b)
return (((int) a) - ((int) b)) << 8;
}
-struct qib_mcast *qib_mcast_find(struct qib_ibport *ibp, union ib_gid *mgid);
-
int qib_snapshot_counters(struct qib_pportdata *ppd, u64 *swords,
u64 *rwords, u64 *spkts, u64 *rpkts,
u64 *xmit_wait);
@@ -907,35 +343,17 @@ int qib_snapshot_counters(struct qib_pportdata *ppd, u64 *swords,
int qib_get_counters(struct qib_pportdata *ppd,
struct qib_verbs_counters *cntrs);
-int qib_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid);
-
-int qib_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid);
-
-int qib_mcast_tree_empty(struct qib_ibport *ibp);
-
-__be32 qib_compute_aeth(struct qib_qp *qp);
-
-struct qib_qp *qib_lookup_qpn(struct qib_ibport *ibp, u32 qpn);
-
-struct ib_qp *qib_create_qp(struct ib_pd *ibpd,
- struct ib_qp_init_attr *init_attr,
- struct ib_udata *udata);
-
-int qib_destroy_qp(struct ib_qp *ibqp);
-
-int qib_error_qp(struct qib_qp *qp, enum ib_wc_status err);
-
-int qib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
- int attr_mask, struct ib_udata *udata);
-
-int qib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
- int attr_mask, struct ib_qp_init_attr *init_attr);
-
-unsigned qib_free_all_qps(struct qib_devdata *dd);
+__be32 qib_compute_aeth(struct rvt_qp *qp);
-void qib_init_qpn_table(struct qib_devdata *dd, struct qib_qpn_table *qpt);
-
-void qib_free_qpn_table(struct qib_qpn_table *qpt);
+/*
+ * Functions provided by qib driver for rdmavt to use
+ */
+unsigned qib_free_all_qps(struct rvt_dev_info *rdi);
+void *qib_qp_priv_alloc(struct rvt_dev_info *rdi, struct rvt_qp *qp, gfp_t gfp);
+void qib_qp_priv_free(struct rvt_dev_info *rdi, struct rvt_qp *qp);
+void qib_notify_qp_reset(struct rvt_qp *qp);
+int qib_alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt,
+ enum ib_qp_type type, u8 port, gfp_t gfp);
#ifdef CONFIG_DEBUG_FS
@@ -949,7 +367,7 @@ void qib_qp_iter_print(struct seq_file *s, struct qib_qp_iter *iter);
#endif
-void qib_get_credit(struct qib_qp *qp, u32 aeth);
+void qib_get_credit(struct rvt_qp *qp, u32 aeth);
unsigned qib_pkt_delay(u32 plen, u8 snd_mult, u8 rcv_mult);
@@ -957,166 +375,66 @@ void qib_verbs_sdma_desc_avail(struct qib_pportdata *ppd, unsigned avail);
void qib_put_txreq(struct qib_verbs_txreq *tx);
-int qib_verbs_send(struct qib_qp *qp, struct qib_ib_header *hdr,
- u32 hdrwords, struct qib_sge_state *ss, u32 len);
+int qib_verbs_send(struct rvt_qp *qp, struct qib_ib_header *hdr,
+ u32 hdrwords, struct rvt_sge_state *ss, u32 len);
-void qib_copy_sge(struct qib_sge_state *ss, void *data, u32 length,
+void qib_copy_sge(struct rvt_sge_state *ss, void *data, u32 length,
int release);
-void qib_skip_sge(struct qib_sge_state *ss, u32 length, int release);
+void qib_skip_sge(struct rvt_sge_state *ss, u32 length, int release);
void qib_uc_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
- int has_grh, void *data, u32 tlen, struct qib_qp *qp);
+ int has_grh, void *data, u32 tlen, struct rvt_qp *qp);
void qib_rc_rcv(struct qib_ctxtdata *rcd, struct qib_ib_header *hdr,
- int has_grh, void *data, u32 tlen, struct qib_qp *qp);
+ int has_grh, void *data, u32 tlen, struct rvt_qp *qp);
int qib_check_ah(struct ib_device *ibdev, struct ib_ah_attr *ah_attr);
+int qib_check_send_wqe(struct rvt_qp *qp, struct rvt_swqe *wqe);
+
struct ib_ah *qib_create_qp0_ah(struct qib_ibport *ibp, u16 dlid);
void qib_rc_rnr_retry(unsigned long arg);
-void qib_rc_send_complete(struct qib_qp *qp, struct qib_ib_header *hdr);
+void qib_rc_send_complete(struct rvt_qp *qp, struct qib_ib_header *hdr);
-void qib_rc_error(struct qib_qp *qp, enum ib_wc_status err);
+void qib_rc_error(struct rvt_qp *qp, enum ib_wc_status err);
-int qib_post_ud_send(struct qib_qp *qp, struct ib_send_wr *wr);
+int qib_post_ud_send(struct rvt_qp *qp, struct ib_send_wr *wr);
void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
- int has_grh, void *data, u32 tlen, struct qib_qp *qp);
-
-int qib_alloc_lkey(struct qib_mregion *mr, int dma_region);
-
-void qib_free_lkey(struct qib_mregion *mr);
-
-int qib_lkey_ok(struct qib_lkey_table *rkt, struct qib_pd *pd,
- struct qib_sge *isge, struct ib_sge *sge, int acc);
-
-int qib_rkey_ok(struct qib_qp *qp, struct qib_sge *sge,
- u32 len, u64 vaddr, u32 rkey, int acc);
-
-int qib_post_srq_receive(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
- struct ib_recv_wr **bad_wr);
-
-struct ib_srq *qib_create_srq(struct ib_pd *ibpd,
- struct ib_srq_init_attr *srq_init_attr,
- struct ib_udata *udata);
-
-int qib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
- enum ib_srq_attr_mask attr_mask,
- struct ib_udata *udata);
-
-int qib_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr);
-
-int qib_destroy_srq(struct ib_srq *ibsrq);
-
-int qib_cq_init(struct qib_devdata *dd);
-
-void qib_cq_exit(struct qib_devdata *dd);
-
-void qib_cq_enter(struct qib_cq *cq, struct ib_wc *entry, int sig);
-
-int qib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry);
-
-struct ib_cq *qib_create_cq(struct ib_device *ibdev,
- const struct ib_cq_init_attr *attr,
- struct ib_ucontext *context,
- struct ib_udata *udata);
-
-int qib_destroy_cq(struct ib_cq *ibcq);
-
-int qib_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags);
-
-int qib_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata);
-
-struct ib_mr *qib_get_dma_mr(struct ib_pd *pd, int acc);
-
-struct ib_mr *qib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
- u64 virt_addr, int mr_access_flags,
- struct ib_udata *udata);
-
-int qib_dereg_mr(struct ib_mr *ibmr);
-
-struct ib_mr *qib_alloc_mr(struct ib_pd *pd,
- enum ib_mr_type mr_type,
- u32 max_entries);
-
-int qib_map_mr_sg(struct ib_mr *ibmr,
- struct scatterlist *sg,
- int sg_nents);
-
-int qib_reg_mr(struct qib_qp *qp, struct ib_reg_wr *wr);
-
-struct ib_fmr *qib_alloc_fmr(struct ib_pd *pd, int mr_access_flags,
- struct ib_fmr_attr *fmr_attr);
-
-int qib_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list,
- int list_len, u64 iova);
-
-int qib_unmap_fmr(struct list_head *fmr_list);
-
-int qib_dealloc_fmr(struct ib_fmr *ibfmr);
-
-static inline void qib_get_mr(struct qib_mregion *mr)
-{
- atomic_inc(&mr->refcount);
-}
+ int has_grh, void *data, u32 tlen, struct rvt_qp *qp);
void mr_rcu_callback(struct rcu_head *list);
-static inline void qib_put_mr(struct qib_mregion *mr)
-{
- if (unlikely(atomic_dec_and_test(&mr->refcount)))
- call_rcu(&mr->list, mr_rcu_callback);
-}
-
-static inline void qib_put_ss(struct qib_sge_state *ss)
-{
- while (ss->num_sge) {
- qib_put_mr(ss->sge.mr);
- if (--ss->num_sge)
- ss->sge = *ss->sg_list++;
- }
-}
-
-
-void qib_release_mmap_info(struct kref *ref);
+int qib_get_rwqe(struct rvt_qp *qp, int wr_id_only);
-struct qib_mmap_info *qib_create_mmap_info(struct qib_ibdev *dev, u32 size,
- struct ib_ucontext *context,
- void *obj);
-
-void qib_update_mmap_info(struct qib_ibdev *dev, struct qib_mmap_info *ip,
- u32 size, void *obj);
-
-int qib_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);
-
-int qib_get_rwqe(struct qib_qp *qp, int wr_id_only);
-
-void qib_migrate_qp(struct qib_qp *qp);
+void qib_migrate_qp(struct rvt_qp *qp);
int qib_ruc_check_hdr(struct qib_ibport *ibp, struct qib_ib_header *hdr,
- int has_grh, struct qib_qp *qp, u32 bth0);
+ int has_grh, struct rvt_qp *qp, u32 bth0);
u32 qib_make_grh(struct qib_ibport *ibp, struct ib_grh *hdr,
struct ib_global_route *grh, u32 hwords, u32 nwords);
-void qib_make_ruc_header(struct qib_qp *qp, struct qib_other_headers *ohdr,
+void qib_make_ruc_header(struct rvt_qp *qp, struct qib_other_headers *ohdr,
u32 bth0, u32 bth2);
-void qib_do_send(struct work_struct *work);
+void _qib_do_send(struct work_struct *work);
+
+void qib_do_send(struct rvt_qp *qp);
-void qib_send_complete(struct qib_qp *qp, struct qib_swqe *wqe,
+void qib_send_complete(struct rvt_qp *qp, struct rvt_swqe *wqe,
enum ib_wc_status status);
-void qib_send_rc_ack(struct qib_qp *qp);
+void qib_send_rc_ack(struct rvt_qp *qp);
-int qib_make_rc_req(struct qib_qp *qp);
+int qib_make_rc_req(struct rvt_qp *qp);
-int qib_make_uc_req(struct qib_qp *qp);
+int qib_make_uc_req(struct rvt_qp *qp);
-int qib_make_ud_req(struct qib_qp *qp);
+int qib_make_ud_req(struct rvt_qp *qp);
int qib_register_ib_device(struct qib_devdata *);
@@ -1150,11 +468,11 @@ extern const enum ib_wc_opcode ib_qib_wc_opcode[];
#define IB_PHYSPORTSTATE_CFG_ENH 0x10
#define IB_PHYSPORTSTATE_CFG_WAIT_ENH 0x13
-extern const int ib_qib_state_ops[];
+extern const int ib_rvt_state_ops[];
extern __be64 ib_qib_sys_image_guid; /* in network order */
-extern unsigned int ib_qib_lkey_table_size;
+extern unsigned int ib_rvt_lkey_table_size;
extern unsigned int ib_qib_max_cqes;
@@ -1178,6 +496,4 @@ extern unsigned int ib_qib_max_srq_wrs;
extern const u32 ib_qib_rnr_table[];
-extern struct ib_dma_mapping_ops qib_dma_mapping_ops;
-
#endif /* QIB_VERBS_H */
diff --git a/drivers/infiniband/hw/qib/qib_verbs_mcast.c b/drivers/infiniband/hw/qib/qib_verbs_mcast.c
deleted file mode 100644
index b2fb5286dbd9..000000000000
--- a/drivers/infiniband/hw/qib/qib_verbs_mcast.c
+++ /dev/null
@@ -1,363 +0,0 @@
-/*
- * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
- * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/rculist.h>
-
-#include "qib.h"
-
-/**
- * qib_mcast_qp_alloc - alloc a struct to link a QP to mcast GID struct
- * @qp: the QP to link
- */
-static struct qib_mcast_qp *qib_mcast_qp_alloc(struct qib_qp *qp)
-{
- struct qib_mcast_qp *mqp;
-
- mqp = kmalloc(sizeof(*mqp), GFP_KERNEL);
- if (!mqp)
- goto bail;
-
- mqp->qp = qp;
- atomic_inc(&qp->refcount);
-
-bail:
- return mqp;
-}
-
-static void qib_mcast_qp_free(struct qib_mcast_qp *mqp)
-{
- struct qib_qp *qp = mqp->qp;
-
- /* Notify qib_destroy_qp() if it is waiting. */
- if (atomic_dec_and_test(&qp->refcount))
- wake_up(&qp->wait);
-
- kfree(mqp);
-}
-
-/**
- * qib_mcast_alloc - allocate the multicast GID structure
- * @mgid: the multicast GID
- *
- * A list of QPs will be attached to this structure.
- */
-static struct qib_mcast *qib_mcast_alloc(union ib_gid *mgid)
-{
- struct qib_mcast *mcast;
-
- mcast = kmalloc(sizeof(*mcast), GFP_KERNEL);
- if (!mcast)
- goto bail;
-
- mcast->mgid = *mgid;
- INIT_LIST_HEAD(&mcast->qp_list);
- init_waitqueue_head(&mcast->wait);
- atomic_set(&mcast->refcount, 0);
- mcast->n_attached = 0;
-
-bail:
- return mcast;
-}
-
-static void qib_mcast_free(struct qib_mcast *mcast)
-{
- struct qib_mcast_qp *p, *tmp;
-
- list_for_each_entry_safe(p, tmp, &mcast->qp_list, list)
- qib_mcast_qp_free(p);
-
- kfree(mcast);
-}
-
-/**
- * qib_mcast_find - search the global table for the given multicast GID
- * @ibp: the IB port structure
- * @mgid: the multicast GID to search for
- *
- * Returns NULL if not found.
- *
- * The caller is responsible for decrementing the reference count if found.
- */
-struct qib_mcast *qib_mcast_find(struct qib_ibport *ibp, union ib_gid *mgid)
-{
- struct rb_node *n;
- unsigned long flags;
- struct qib_mcast *mcast;
-
- spin_lock_irqsave(&ibp->lock, flags);
- n = ibp->mcast_tree.rb_node;
- while (n) {
- int ret;
-
- mcast = rb_entry(n, struct qib_mcast, rb_node);
-
- ret = memcmp(mgid->raw, mcast->mgid.raw,
- sizeof(union ib_gid));
- if (ret < 0)
- n = n->rb_left;
- else if (ret > 0)
- n = n->rb_right;
- else {
- atomic_inc(&mcast->refcount);
- spin_unlock_irqrestore(&ibp->lock, flags);
- goto bail;
- }
- }
- spin_unlock_irqrestore(&ibp->lock, flags);
-
- mcast = NULL;
-
-bail:
- return mcast;
-}
-
-/**
- * qib_mcast_add - insert mcast GID into table and attach QP struct
- * @mcast: the mcast GID table
- * @mqp: the QP to attach
- *
- * Return zero if both were added. Return EEXIST if the GID was already in
- * the table but the QP was added. Return ESRCH if the QP was already
- * attached and neither structure was added.
- */
-static int qib_mcast_add(struct qib_ibdev *dev, struct qib_ibport *ibp,
- struct qib_mcast *mcast, struct qib_mcast_qp *mqp)
-{
- struct rb_node **n = &ibp->mcast_tree.rb_node;
- struct rb_node *pn = NULL;
- int ret;
-
- spin_lock_irq(&ibp->lock);
-
- while (*n) {
- struct qib_mcast *tmcast;
- struct qib_mcast_qp *p;
-
- pn = *n;
- tmcast = rb_entry(pn, struct qib_mcast, rb_node);
-
- ret = memcmp(mcast->mgid.raw, tmcast->mgid.raw,
- sizeof(union ib_gid));
- if (ret < 0) {
- n = &pn->rb_left;
- continue;
- }
- if (ret > 0) {
- n = &pn->rb_right;
- continue;
- }
-
- /* Search the QP list to see if this is already there. */
- list_for_each_entry_rcu(p, &tmcast->qp_list, list) {
- if (p->qp == mqp->qp) {
- ret = ESRCH;
- goto bail;
- }
- }
- if (tmcast->n_attached == ib_qib_max_mcast_qp_attached) {
- ret = ENOMEM;
- goto bail;
- }
-
- tmcast->n_attached++;
-
- list_add_tail_rcu(&mqp->list, &tmcast->qp_list);
- ret = EEXIST;
- goto bail;
- }
-
- spin_lock(&dev->n_mcast_grps_lock);
- if (dev->n_mcast_grps_allocated == ib_qib_max_mcast_grps) {
- spin_unlock(&dev->n_mcast_grps_lock);
- ret = ENOMEM;
- goto bail;
- }
-
- dev->n_mcast_grps_allocated++;
- spin_unlock(&dev->n_mcast_grps_lock);
-
- mcast->n_attached++;
-
- list_add_tail_rcu(&mqp->list, &mcast->qp_list);
-
- atomic_inc(&mcast->refcount);
- rb_link_node(&mcast->rb_node, pn, n);
- rb_insert_color(&mcast->rb_node, &ibp->mcast_tree);
-
- ret = 0;
-
-bail:
- spin_unlock_irq(&ibp->lock);
-
- return ret;
-}
-
-int qib_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
-{
- struct qib_qp *qp = to_iqp(ibqp);
- struct qib_ibdev *dev = to_idev(ibqp->device);
- struct qib_ibport *ibp;
- struct qib_mcast *mcast;
- struct qib_mcast_qp *mqp;
- int ret;
-
- if (ibqp->qp_num <= 1 || qp->state == IB_QPS_RESET) {
- ret = -EINVAL;
- goto bail;
- }
-
- /*
- * Allocate data structures since its better to do this outside of
- * spin locks and it will most likely be needed.
- */
- mcast = qib_mcast_alloc(gid);
- if (mcast == NULL) {
- ret = -ENOMEM;
- goto bail;
- }
- mqp = qib_mcast_qp_alloc(qp);
- if (mqp == NULL) {
- qib_mcast_free(mcast);
- ret = -ENOMEM;
- goto bail;
- }
- ibp = to_iport(ibqp->device, qp->port_num);
- switch (qib_mcast_add(dev, ibp, mcast, mqp)) {
- case ESRCH:
- /* Neither was used: OK to attach the same QP twice. */
- qib_mcast_qp_free(mqp);
- qib_mcast_free(mcast);
- break;
-
- case EEXIST: /* The mcast wasn't used */
- qib_mcast_free(mcast);
- break;
-
- case ENOMEM:
- /* Exceeded the maximum number of mcast groups. */
- qib_mcast_qp_free(mqp);
- qib_mcast_free(mcast);
- ret = -ENOMEM;
- goto bail;
-
- default:
- break;
- }
-
- ret = 0;
-
-bail:
- return ret;
-}
-
-int qib_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
-{
- struct qib_qp *qp = to_iqp(ibqp);
- struct qib_ibdev *dev = to_idev(ibqp->device);
- struct qib_ibport *ibp = to_iport(ibqp->device, qp->port_num);
- struct qib_mcast *mcast = NULL;
- struct qib_mcast_qp *p, *tmp, *delp = NULL;
- struct rb_node *n;
- int last = 0;
- int ret;
-
- if (ibqp->qp_num <= 1 || qp->state == IB_QPS_RESET)
- return -EINVAL;
-
- spin_lock_irq(&ibp->lock);
-
- /* Find the GID in the mcast table. */
- n = ibp->mcast_tree.rb_node;
- while (1) {
- if (n == NULL) {
- spin_unlock_irq(&ibp->lock);
- return -EINVAL;
- }
-
- mcast = rb_entry(n, struct qib_mcast, rb_node);
- ret = memcmp(gid->raw, mcast->mgid.raw,
- sizeof(union ib_gid));
- if (ret < 0)
- n = n->rb_left;
- else if (ret > 0)
- n = n->rb_right;
- else
- break;
- }
-
- /* Search the QP list. */
- list_for_each_entry_safe(p, tmp, &mcast->qp_list, list) {
- if (p->qp != qp)
- continue;
- /*
- * We found it, so remove it, but don't poison the forward
- * link until we are sure there are no list walkers.
- */
- list_del_rcu(&p->list);
- mcast->n_attached--;
- delp = p;
-
- /* If this was the last attached QP, remove the GID too. */
- if (list_empty(&mcast->qp_list)) {
- rb_erase(&mcast->rb_node, &ibp->mcast_tree);
- last = 1;
- }
- break;
- }
-
- spin_unlock_irq(&ibp->lock);
- /* QP not attached */
- if (!delp)
- return -EINVAL;
- /*
- * Wait for any list walkers to finish before freeing the
- * list element.
- */
- wait_event(mcast->wait, atomic_read(&mcast->refcount) <= 1);
- qib_mcast_qp_free(delp);
-
- if (last) {
- atomic_dec(&mcast->refcount);
- wait_event(mcast->wait, !atomic_read(&mcast->refcount));
- qib_mcast_free(mcast);
- spin_lock_irq(&dev->n_mcast_grps_lock);
- dev->n_mcast_grps_allocated--;
- spin_unlock_irq(&dev->n_mcast_grps_lock);
- }
- return 0;
-}
-
-int qib_mcast_tree_empty(struct qib_ibport *ibp)
-{
- return ibp->mcast_tree.rb_node == NULL;
-}
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_qp_grp.c b/drivers/infiniband/hw/usnic/usnic_ib_qp_grp.c
index 5f44b66ccb86..5b0248adf4ce 100644
--- a/drivers/infiniband/hw/usnic/usnic_ib_qp_grp.c
+++ b/drivers/infiniband/hw/usnic/usnic_ib_qp_grp.c
@@ -64,7 +64,7 @@ const char *usnic_ib_qp_grp_state_to_string(enum ib_qp_state state)
case IB_QPS_ERR:
return "ERR";
default:
- return "UNKOWN STATE";
+ return "UNKNOWN STATE";
}
}
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
index 6cdb4d23f78f..a5bfbba6bbac 100644
--- a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
+++ b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
@@ -269,7 +269,6 @@ int usnic_ib_query_device(struct ib_device *ibdev,
struct usnic_ib_dev *us_ibdev = to_usdev(ibdev);
union ib_gid gid;
struct ethtool_drvinfo info;
- struct ethtool_cmd cmd;
int qp_per_vf;
usnic_dbg("\n");
@@ -278,7 +277,6 @@ int usnic_ib_query_device(struct ib_device *ibdev,
mutex_lock(&us_ibdev->usdev_lock);
us_ibdev->netdev->ethtool_ops->get_drvinfo(us_ibdev->netdev, &info);
- us_ibdev->netdev->ethtool_ops->get_settings(us_ibdev->netdev, &cmd);
memset(props, 0, sizeof(*props));
usnic_mac_ip_to_gid(us_ibdev->ufdev->mac, us_ibdev->ufdev->inaddr,
&gid.raw[0]);
@@ -326,12 +324,12 @@ int usnic_ib_query_port(struct ib_device *ibdev, u8 port,
struct ib_port_attr *props)
{
struct usnic_ib_dev *us_ibdev = to_usdev(ibdev);
- struct ethtool_cmd cmd;
+ struct ethtool_link_ksettings cmd;
usnic_dbg("\n");
mutex_lock(&us_ibdev->usdev_lock);
- us_ibdev->netdev->ethtool_ops->get_settings(us_ibdev->netdev, &cmd);
+ __ethtool_get_link_ksettings(us_ibdev->netdev, &cmd);
memset(props, 0, sizeof(*props));
props->lid = 0;
@@ -355,8 +353,8 @@ int usnic_ib_query_port(struct ib_device *ibdev, u8 port,
props->pkey_tbl_len = 1;
props->bad_pkey_cntr = 0;
props->qkey_viol_cntr = 0;
- eth_speed_to_ib_speed(cmd.speed, &props->active_speed,
- &props->active_width);
+ eth_speed_to_ib_speed(cmd.base.speed, &props->active_speed,
+ &props->active_width);
props->max_mtu = IB_MTU_4096;
props->active_mtu = iboe_get_mtu(us_ibdev->ufdev->mtu);
/* Userspace will adjust for hdrs */
diff --git a/drivers/infiniband/hw/usnic/usnic_uiom.c b/drivers/infiniband/hw/usnic/usnic_uiom.c
index 645a5f6e6c88..7209fbc03ccb 100644
--- a/drivers/infiniband/hw/usnic/usnic_uiom.c
+++ b/drivers/infiniband/hw/usnic/usnic_uiom.c
@@ -144,7 +144,7 @@ static int usnic_uiom_get_pages(unsigned long addr, size_t size, int writable,
ret = 0;
while (npages) {
- ret = get_user_pages(current, current->mm, cur_base,
+ ret = get_user_pages(cur_base,
min_t(unsigned long, npages,
PAGE_SIZE / sizeof(struct page *)),
1, !writable, page_list, NULL);
diff --git a/drivers/infiniband/sw/Makefile b/drivers/infiniband/sw/Makefile
new file mode 100644
index 000000000000..988b6a0101a4
--- /dev/null
+++ b/drivers/infiniband/sw/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_INFINIBAND_RDMAVT) += rdmavt/
diff --git a/drivers/infiniband/sw/rdmavt/Kconfig b/drivers/infiniband/sw/rdmavt/Kconfig
new file mode 100644
index 000000000000..11aa6a34bd71
--- /dev/null
+++ b/drivers/infiniband/sw/rdmavt/Kconfig
@@ -0,0 +1,6 @@
+config INFINIBAND_RDMAVT
+ tristate "RDMA verbs transport library"
+ depends on 64BIT
+ default m
+ ---help---
+ This is a common software verbs provider for RDMA networks.
diff --git a/drivers/infiniband/sw/rdmavt/Makefile b/drivers/infiniband/sw/rdmavt/Makefile
new file mode 100644
index 000000000000..ccaa7992ac97
--- /dev/null
+++ b/drivers/infiniband/sw/rdmavt/Makefile
@@ -0,0 +1,13 @@
+#
+# rdmavt driver
+#
+#
+#
+# Called from the kernel module build system.
+#
+obj-$(CONFIG_INFINIBAND_RDMAVT) += rdmavt.o
+
+rdmavt-y := vt.o ah.o cq.o dma.o mad.o mcast.o mmap.o mr.o pd.o qp.o srq.o \
+ trace.o
+
+CFLAGS_trace.o = -I$(src)
diff --git a/drivers/infiniband/sw/rdmavt/ah.c b/drivers/infiniband/sw/rdmavt/ah.c
new file mode 100644
index 000000000000..16c446142c2a
--- /dev/null
+++ b/drivers/infiniband/sw/rdmavt/ah.c
@@ -0,0 +1,196 @@
+/*
+ * Copyright(c) 2016 Intel Corporation.
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * - Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <linux/slab.h>
+#include "ah.h"
+#include "vt.h" /* for prints */
+
+/**
+ * rvt_check_ah - validate the attributes of AH
+ * @ibdev: the ib device
+ * @ah_attr: the attributes of the AH
+ *
+ * If driver supports a more detailed check_ah function call back to it
+ * otherwise just check the basics.
+ *
+ * Return: 0 on success
+ */
+int rvt_check_ah(struct ib_device *ibdev,
+ struct ib_ah_attr *ah_attr)
+{
+ int err;
+ struct ib_port_attr port_attr;
+ struct rvt_dev_info *rdi = ib_to_rvt(ibdev);
+ enum rdma_link_layer link = rdma_port_get_link_layer(ibdev,
+ ah_attr->port_num);
+
+ err = ib_query_port(ibdev, ah_attr->port_num, &port_attr);
+ if (err)
+ return -EINVAL;
+ if (ah_attr->port_num < 1 ||
+ ah_attr->port_num > ibdev->phys_port_cnt)
+ return -EINVAL;
+ if (ah_attr->static_rate != IB_RATE_PORT_CURRENT &&
+ ib_rate_to_mbps(ah_attr->static_rate) < 0)
+ return -EINVAL;
+ if ((ah_attr->ah_flags & IB_AH_GRH) &&
+ ah_attr->grh.sgid_index >= port_attr.gid_tbl_len)
+ return -EINVAL;
+ if (link != IB_LINK_LAYER_ETHERNET) {
+ if (ah_attr->dlid == 0)
+ return -EINVAL;
+ if (ah_attr->dlid >= be16_to_cpu(IB_MULTICAST_LID_BASE) &&
+ ah_attr->dlid != be16_to_cpu(IB_LID_PERMISSIVE) &&
+ !(ah_attr->ah_flags & IB_AH_GRH))
+ return -EINVAL;
+ }
+ if (rdi->driver_f.check_ah)
+ return rdi->driver_f.check_ah(ibdev, ah_attr);
+ return 0;
+}
+EXPORT_SYMBOL(rvt_check_ah);
+
+/**
+ * rvt_create_ah - create an address handle
+ * @pd: the protection domain
+ * @ah_attr: the attributes of the AH
+ *
+ * This may be called from interrupt context.
+ *
+ * Return: newly allocated ah
+ */
+struct ib_ah *rvt_create_ah(struct ib_pd *pd,
+ struct ib_ah_attr *ah_attr)
+{
+ struct rvt_ah *ah;
+ struct rvt_dev_info *dev = ib_to_rvt(pd->device);
+ unsigned long flags;
+
+ if (rvt_check_ah(pd->device, ah_attr))
+ return ERR_PTR(-EINVAL);
+
+ ah = kmalloc(sizeof(*ah), GFP_ATOMIC);
+ if (!ah)
+ return ERR_PTR(-ENOMEM);
+
+ spin_lock_irqsave(&dev->n_ahs_lock, flags);
+ if (dev->n_ahs_allocated == dev->dparms.props.max_ah) {
+ spin_unlock(&dev->n_ahs_lock);
+ kfree(ah);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ dev->n_ahs_allocated++;
+ spin_unlock_irqrestore(&dev->n_ahs_lock, flags);
+
+ ah->attr = *ah_attr;
+ atomic_set(&ah->refcount, 0);
+
+ if (dev->driver_f.notify_new_ah)
+ dev->driver_f.notify_new_ah(pd->device, ah_attr, ah);
+
+ return &ah->ibah;
+}
+
+/**
+ * rvt_destory_ah - Destory an address handle
+ * @ibah: address handle
+ *
+ * Return: 0 on success
+ */
+int rvt_destroy_ah(struct ib_ah *ibah)
+{
+ struct rvt_dev_info *dev = ib_to_rvt(ibah->device);
+ struct rvt_ah *ah = ibah_to_rvtah(ibah);
+ unsigned long flags;
+
+ if (atomic_read(&ah->refcount) != 0)
+ return -EBUSY;
+
+ spin_lock_irqsave(&dev->n_ahs_lock, flags);
+ dev->n_ahs_allocated--;
+ spin_unlock_irqrestore(&dev->n_ahs_lock, flags);
+
+ kfree(ah);
+
+ return 0;
+}
+
+/**
+ * rvt_modify_ah - modify an ah with given attrs
+ * @ibah: address handle to modify
+ * @ah_attr: attrs to apply
+ *
+ * Return: 0 on success
+ */
+int rvt_modify_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr)
+{
+ struct rvt_ah *ah = ibah_to_rvtah(ibah);
+
+ if (rvt_check_ah(ibah->device, ah_attr))
+ return -EINVAL;
+
+ ah->attr = *ah_attr;
+
+ return 0;
+}
+
+/**
+ * rvt_query_ah - return attrs for ah
+ * @ibah: address handle to query
+ * @ah_attr: return info in this
+ *
+ * Return: always 0
+ */
+int rvt_query_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr)
+{
+ struct rvt_ah *ah = ibah_to_rvtah(ibah);
+
+ *ah_attr = ah->attr;
+
+ return 0;
+}
diff --git a/drivers/infiniband/sw/rdmavt/ah.h b/drivers/infiniband/sw/rdmavt/ah.h
new file mode 100644
index 000000000000..e9c36be87d79
--- /dev/null
+++ b/drivers/infiniband/sw/rdmavt/ah.h
@@ -0,0 +1,59 @@
+#ifndef DEF_RVTAH_H
+#define DEF_RVTAH_H
+
+/*
+ * Copyright(c) 2016 Intel Corporation.
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * - Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <rdma/rdma_vt.h>
+
+struct ib_ah *rvt_create_ah(struct ib_pd *pd,
+ struct ib_ah_attr *ah_attr);
+int rvt_destroy_ah(struct ib_ah *ibah);
+int rvt_modify_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr);
+int rvt_query_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr);
+
+#endif /* DEF_RVTAH_H */
diff --git a/drivers/staging/rdma/hfi1/cq.c b/drivers/infiniband/sw/rdmavt/cq.c
index 4f046ffe7e60..b1ffc8b4a6c0 100644
--- a/drivers/staging/rdma/hfi1/cq.c
+++ b/drivers/infiniband/sw/rdmavt/cq.c
@@ -1,12 +1,11 @@
/*
+ * Copyright(c) 2016 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2015 Intel Corporation.
- *
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
@@ -18,8 +17,6 @@
*
* BSD LICENSE
*
- * Copyright(c) 2015 Intel Corporation.
- *
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
@@ -48,25 +45,23 @@
*
*/
-#include <linux/err.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/kthread.h>
-
-#include "verbs.h"
-#include "hfi.h"
+#include "cq.h"
+#include "vt.h"
/**
- * hfi1_cq_enter - add a new entry to the completion queue
+ * rvt_cq_enter - add a new entry to the completion queue
* @cq: completion queue
* @entry: work completion entry to add
- * @sig: true if @entry is a solicited entry
+ * @sig: true if @entry is solicited
*
* This may be called with qp->s_lock held.
*/
-void hfi1_cq_enter(struct hfi1_cq *cq, struct ib_wc *entry, int solicited)
+void rvt_cq_enter(struct rvt_cq *cq, struct ib_wc *entry, bool solicited)
{
- struct hfi1_cq_wc *wc;
+ struct rvt_cq_wc *wc;
unsigned long flags;
u32 head;
u32 next;
@@ -79,11 +74,13 @@ void hfi1_cq_enter(struct hfi1_cq *cq, struct ib_wc *entry, int solicited)
*/
wc = cq->queue;
head = wc->head;
- if (head >= (unsigned) cq->ibcq.cqe) {
+ if (head >= (unsigned)cq->ibcq.cqe) {
head = cq->ibcq.cqe;
next = 0;
- } else
+ } else {
next = head + 1;
+ }
+
if (unlikely(next == wc->tail)) {
spin_unlock_irqrestore(&cq->lock, flags);
if (cq->ibcq.event_handler) {
@@ -114,8 +111,9 @@ void hfi1_cq_enter(struct hfi1_cq *cq, struct ib_wc *entry, int solicited)
wc->uqueue[head].port_num = entry->port_num;
/* Make sure entry is written before the head index. */
smp_wmb();
- } else
+ } else {
wc->kqueue[head] = *entry;
+ }
wc->head = next;
if (cq->notify == IB_CQ_NEXT_COMP ||
@@ -126,10 +124,10 @@ void hfi1_cq_enter(struct hfi1_cq *cq, struct ib_wc *entry, int solicited)
* This will cause send_complete() to be called in
* another thread.
*/
- smp_read_barrier_depends(); /* see hfi1_cq_exit */
- worker = cq->dd->worker;
+ smp_read_barrier_depends(); /* see rvt_cq_exit */
+ worker = cq->rdi->worker;
if (likely(worker)) {
- cq->notify = IB_CQ_NONE;
+ cq->notify = RVT_CQ_NONE;
cq->triggered++;
queue_kthread_work(worker, &cq->comptask);
}
@@ -137,59 +135,11 @@ void hfi1_cq_enter(struct hfi1_cq *cq, struct ib_wc *entry, int solicited)
spin_unlock_irqrestore(&cq->lock, flags);
}
-
-/**
- * hfi1_poll_cq - poll for work completion entries
- * @ibcq: the completion queue to poll
- * @num_entries: the maximum number of entries to return
- * @entry: pointer to array where work completions are placed
- *
- * Returns the number of completion entries polled.
- *
- * This may be called from interrupt context. Also called by ib_poll_cq()
- * in the generic verbs code.
- */
-int hfi1_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry)
-{
- struct hfi1_cq *cq = to_icq(ibcq);
- struct hfi1_cq_wc *wc;
- unsigned long flags;
- int npolled;
- u32 tail;
-
- /* The kernel can only poll a kernel completion queue */
- if (cq->ip) {
- npolled = -EINVAL;
- goto bail;
- }
-
- spin_lock_irqsave(&cq->lock, flags);
-
- wc = cq->queue;
- tail = wc->tail;
- if (tail > (u32) cq->ibcq.cqe)
- tail = (u32) cq->ibcq.cqe;
- for (npolled = 0; npolled < num_entries; ++npolled, ++entry) {
- if (tail == wc->head)
- break;
- /* The kernel doesn't need a RMB since it has the lock. */
- *entry = wc->kqueue[tail];
- if (tail >= cq->ibcq.cqe)
- tail = 0;
- else
- tail++;
- }
- wc->tail = tail;
-
- spin_unlock_irqrestore(&cq->lock, flags);
-
-bail:
- return npolled;
-}
+EXPORT_SYMBOL(rvt_cq_enter);
static void send_complete(struct kthread_work *work)
{
- struct hfi1_cq *cq = container_of(work, struct hfi1_cq, comptask);
+ struct rvt_cq *cq = container_of(work, struct rvt_cq, comptask);
/*
* The completion handler will most likely rearm the notification
@@ -217,26 +167,25 @@ static void send_complete(struct kthread_work *work)
}
/**
- * hfi1_create_cq - create a completion queue
+ * rvt_create_cq - create a completion queue
* @ibdev: the device this completion queue is attached to
* @attr: creation attributes
- * @context: unused by the driver
+ * @context: unused by the QLogic_IB driver
* @udata: user data for libibverbs.so
*
- * Returns a pointer to the completion queue or negative errno values
- * for failure.
- *
* Called by ib_create_cq() in the generic verbs code.
+ *
+ * Return: pointer to the completion queue or negative errno values
+ * for failure.
*/
-struct ib_cq *hfi1_create_cq(
- struct ib_device *ibdev,
- const struct ib_cq_init_attr *attr,
- struct ib_ucontext *context,
- struct ib_udata *udata)
+struct ib_cq *rvt_create_cq(struct ib_device *ibdev,
+ const struct ib_cq_init_attr *attr,
+ struct ib_ucontext *context,
+ struct ib_udata *udata)
{
- struct hfi1_ibdev *dev = to_idev(ibdev);
- struct hfi1_cq *cq;
- struct hfi1_cq_wc *wc;
+ struct rvt_dev_info *rdi = ib_to_rvt(ibdev);
+ struct rvt_cq *cq;
+ struct rvt_cq_wc *wc;
struct ib_cq *ret;
u32 sz;
unsigned int entries = attr->cqe;
@@ -244,11 +193,11 @@ struct ib_cq *hfi1_create_cq(
if (attr->flags)
return ERR_PTR(-EINVAL);
- if (entries < 1 || entries > hfi1_max_cqes)
+ if (entries < 1 || entries > rdi->dparms.props.max_cqe)
return ERR_PTR(-EINVAL);
/* Allocate the completion queue structure. */
- cq = kmalloc(sizeof(*cq), GFP_KERNEL);
+ cq = kzalloc(sizeof(*cq), GFP_KERNEL);
if (!cq)
return ERR_PTR(-ENOMEM);
@@ -272,12 +221,12 @@ struct ib_cq *hfi1_create_cq(
/*
* Return the address of the WC as the offset to mmap.
- * See hfi1_mmap() for details.
+ * See rvt_mmap() for details.
*/
if (udata && udata->outlen >= sizeof(__u64)) {
int err;
- cq->ip = hfi1_create_mmap_info(dev, sz, context, wc);
+ cq->ip = rvt_create_mmap_info(rdi, sz, context, wc);
if (!cq->ip) {
ret = ERR_PTR(-ENOMEM);
goto bail_wc;
@@ -289,23 +238,22 @@ struct ib_cq *hfi1_create_cq(
ret = ERR_PTR(err);
goto bail_ip;
}
- } else
- cq->ip = NULL;
+ }
- spin_lock(&dev->n_cqs_lock);
- if (dev->n_cqs_allocated == hfi1_max_cqs) {
- spin_unlock(&dev->n_cqs_lock);
+ spin_lock(&rdi->n_cqs_lock);
+ if (rdi->n_cqs_allocated == rdi->dparms.props.max_cq) {
+ spin_unlock(&rdi->n_cqs_lock);
ret = ERR_PTR(-ENOMEM);
goto bail_ip;
}
- dev->n_cqs_allocated++;
- spin_unlock(&dev->n_cqs_lock);
+ rdi->n_cqs_allocated++;
+ spin_unlock(&rdi->n_cqs_lock);
if (cq->ip) {
- spin_lock_irq(&dev->pending_lock);
- list_add(&cq->ip->pending_mmaps, &dev->pending_mmaps);
- spin_unlock_irq(&dev->pending_lock);
+ spin_lock_irq(&rdi->pending_lock);
+ list_add(&cq->ip->pending_mmaps, &rdi->pending_mmaps);
+ spin_unlock_irq(&rdi->pending_lock);
}
/*
@@ -313,14 +261,11 @@ struct ib_cq *hfi1_create_cq(
* The number of entries should be >= the number requested or return
* an error.
*/
- cq->dd = dd_from_dev(dev);
+ cq->rdi = rdi;
cq->ibcq.cqe = entries;
- cq->notify = IB_CQ_NONE;
- cq->triggered = 0;
+ cq->notify = RVT_CQ_NONE;
spin_lock_init(&cq->lock);
init_kthread_work(&cq->comptask, send_complete);
- wc->head = 0;
- wc->tail = 0;
cq->queue = wc;
ret = &cq->ibcq;
@@ -338,24 +283,24 @@ done:
}
/**
- * hfi1_destroy_cq - destroy a completion queue
+ * rvt_destroy_cq - destroy a completion queue
* @ibcq: the completion queue to destroy.
*
- * Returns 0 for success.
- *
* Called by ib_destroy_cq() in the generic verbs code.
+ *
+ * Return: always 0
*/
-int hfi1_destroy_cq(struct ib_cq *ibcq)
+int rvt_destroy_cq(struct ib_cq *ibcq)
{
- struct hfi1_ibdev *dev = to_idev(ibcq->device);
- struct hfi1_cq *cq = to_icq(ibcq);
+ struct rvt_cq *cq = ibcq_to_rvtcq(ibcq);
+ struct rvt_dev_info *rdi = cq->rdi;
flush_kthread_work(&cq->comptask);
- spin_lock(&dev->n_cqs_lock);
- dev->n_cqs_allocated--;
- spin_unlock(&dev->n_cqs_lock);
+ spin_lock(&rdi->n_cqs_lock);
+ rdi->n_cqs_allocated--;
+ spin_unlock(&rdi->n_cqs_lock);
if (cq->ip)
- kref_put(&cq->ip->ref, hfi1_release_mmap_info);
+ kref_put(&cq->ip->ref, rvt_release_mmap_info);
else
vfree(cq->queue);
kfree(cq);
@@ -364,18 +309,18 @@ int hfi1_destroy_cq(struct ib_cq *ibcq)
}
/**
- * hfi1_req_notify_cq - change the notification type for a completion queue
+ * rvt_req_notify_cq - change the notification type for a completion queue
* @ibcq: the completion queue
* @notify_flags: the type of notification to request
*
- * Returns 0 for success.
- *
* This may be called from interrupt context. Also called by
* ib_req_notify_cq() in the generic verbs code.
+ *
+ * Return: 0 for success.
*/
-int hfi1_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags)
+int rvt_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags)
{
- struct hfi1_cq *cq = to_icq(ibcq);
+ struct rvt_cq *cq = ibcq_to_rvtcq(ibcq);
unsigned long flags;
int ret = 0;
@@ -397,24 +342,23 @@ int hfi1_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags)
}
/**
- * hfi1_resize_cq - change the size of the CQ
+ * rvt_resize_cq - change the size of the CQ
* @ibcq: the completion queue
*
- * Returns 0 for success.
+ * Return: 0 for success.
*/
-int hfi1_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
+int rvt_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
{
- struct hfi1_cq *cq = to_icq(ibcq);
- struct hfi1_cq_wc *old_wc;
- struct hfi1_cq_wc *wc;
+ struct rvt_cq *cq = ibcq_to_rvtcq(ibcq);
+ struct rvt_cq_wc *old_wc;
+ struct rvt_cq_wc *wc;
u32 head, tail, n;
int ret;
u32 sz;
+ struct rvt_dev_info *rdi = cq->rdi;
- if (cqe < 1 || cqe > hfi1_max_cqes) {
- ret = -EINVAL;
- goto bail;
- }
+ if (cqe < 1 || cqe > rdi->dparms.props.max_cqe)
+ return -EINVAL;
/*
* Need to use vmalloc() if we want to support large #s of entries.
@@ -425,10 +369,8 @@ int hfi1_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
else
sz += sizeof(struct ib_wc) * (cqe + 1);
wc = vmalloc_user(sz);
- if (!wc) {
- ret = -ENOMEM;
- goto bail;
- }
+ if (!wc)
+ return -ENOMEM;
/* Check that we can write the offset to mmap. */
if (udata && udata->outlen >= sizeof(__u64)) {
@@ -446,11 +388,11 @@ int hfi1_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
*/
old_wc = cq->queue;
head = old_wc->head;
- if (head > (u32) cq->ibcq.cqe)
- head = (u32) cq->ibcq.cqe;
+ if (head > (u32)cq->ibcq.cqe)
+ head = (u32)cq->ibcq.cqe;
tail = old_wc->tail;
- if (tail > (u32) cq->ibcq.cqe)
- tail = (u32) cq->ibcq.cqe;
+ if (tail > (u32)cq->ibcq.cqe)
+ tail = (u32)cq->ibcq.cqe;
if (head < tail)
n = cq->ibcq.cqe + 1 + head - tail;
else
@@ -464,7 +406,7 @@ int hfi1_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
wc->uqueue[n] = old_wc->uqueue[tail];
else
wc->kqueue[n] = old_wc->kqueue[tail];
- if (tail == (u32) cq->ibcq.cqe)
+ if (tail == (u32)cq->ibcq.cqe)
tail = 0;
else
tail++;
@@ -478,80 +420,131 @@ int hfi1_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
vfree(old_wc);
if (cq->ip) {
- struct hfi1_ibdev *dev = to_idev(ibcq->device);
- struct hfi1_mmap_info *ip = cq->ip;
+ struct rvt_mmap_info *ip = cq->ip;
- hfi1_update_mmap_info(dev, ip, sz, wc);
+ rvt_update_mmap_info(rdi, ip, sz, wc);
/*
* Return the offset to mmap.
- * See hfi1_mmap() for details.
+ * See rvt_mmap() for details.
*/
if (udata && udata->outlen >= sizeof(__u64)) {
ret = ib_copy_to_udata(udata, &ip->offset,
sizeof(ip->offset));
if (ret)
- goto bail;
+ return ret;
}
- spin_lock_irq(&dev->pending_lock);
+ spin_lock_irq(&rdi->pending_lock);
if (list_empty(&ip->pending_mmaps))
- list_add(&ip->pending_mmaps, &dev->pending_mmaps);
- spin_unlock_irq(&dev->pending_lock);
+ list_add(&ip->pending_mmaps, &rdi->pending_mmaps);
+ spin_unlock_irq(&rdi->pending_lock);
}
- ret = 0;
- goto bail;
+ return 0;
bail_unlock:
spin_unlock_irq(&cq->lock);
bail_free:
vfree(wc);
-bail:
return ret;
}
-int hfi1_cq_init(struct hfi1_devdata *dd)
+/**
+ * rvt_poll_cq - poll for work completion entries
+ * @ibcq: the completion queue to poll
+ * @num_entries: the maximum number of entries to return
+ * @entry: pointer to array where work completions are placed
+ *
+ * This may be called from interrupt context. Also called by ib_poll_cq()
+ * in the generic verbs code.
+ *
+ * Return: the number of completion entries polled.
+ */
+int rvt_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry)
+{
+ struct rvt_cq *cq = ibcq_to_rvtcq(ibcq);
+ struct rvt_cq_wc *wc;
+ unsigned long flags;
+ int npolled;
+ u32 tail;
+
+ /* The kernel can only poll a kernel completion queue */
+ if (cq->ip)
+ return -EINVAL;
+
+ spin_lock_irqsave(&cq->lock, flags);
+
+ wc = cq->queue;
+ tail = wc->tail;
+ if (tail > (u32)cq->ibcq.cqe)
+ tail = (u32)cq->ibcq.cqe;
+ for (npolled = 0; npolled < num_entries; ++npolled, ++entry) {
+ if (tail == wc->head)
+ break;
+ /* The kernel doesn't need a RMB since it has the lock. */
+ *entry = wc->kqueue[tail];
+ if (tail >= cq->ibcq.cqe)
+ tail = 0;
+ else
+ tail++;
+ }
+ wc->tail = tail;
+
+ spin_unlock_irqrestore(&cq->lock, flags);
+
+ return npolled;
+}
+
+/**
+ * rvt_driver_cq_init - Init cq resources on behalf of driver
+ * @rdi: rvt dev structure
+ *
+ * Return: 0 on success
+ */
+int rvt_driver_cq_init(struct rvt_dev_info *rdi)
{
int ret = 0;
int cpu;
struct task_struct *task;
- if (dd->worker)
+ if (rdi->worker)
return 0;
- dd->worker = kzalloc(sizeof(*dd->worker), GFP_KERNEL);
- if (!dd->worker)
+ rdi->worker = kzalloc(sizeof(*rdi->worker), GFP_KERNEL);
+ if (!rdi->worker)
return -ENOMEM;
- init_kthread_worker(dd->worker);
+ init_kthread_worker(rdi->worker);
task = kthread_create_on_node(
kthread_worker_fn,
- dd->worker,
- dd->assigned_node_id,
- "hfi1_cq%d", dd->unit);
- if (IS_ERR(task))
- goto task_fail;
- cpu = cpumask_first(cpumask_of_node(dd->assigned_node_id));
+ rdi->worker,
+ rdi->dparms.node,
+ "%s", rdi->dparms.cq_name);
+ if (IS_ERR(task)) {
+ kfree(rdi->worker);
+ rdi->worker = NULL;
+ return PTR_ERR(task);
+ }
+
+ cpu = cpumask_first(cpumask_of_node(rdi->dparms.node));
kthread_bind(task, cpu);
wake_up_process(task);
-out:
return ret;
-task_fail:
- ret = PTR_ERR(task);
- kfree(dd->worker);
- dd->worker = NULL;
- goto out;
}
-void hfi1_cq_exit(struct hfi1_devdata *dd)
+/**
+ * rvt_cq_exit - tear down cq reources
+ * @rdi: rvt dev structure
+ */
+void rvt_cq_exit(struct rvt_dev_info *rdi)
{
struct kthread_worker *worker;
- worker = dd->worker;
+ worker = rdi->worker;
if (!worker)
return;
/* blocks future queuing from send_complete() */
- dd->worker = NULL;
- smp_wmb(); /* See hfi1_cq_enter */
+ rdi->worker = NULL;
+ smp_wmb(); /* See rdi_cq_enter */
flush_kthread_worker(worker);
kthread_stop(worker->task);
kfree(worker);
diff --git a/drivers/infiniband/sw/rdmavt/cq.h b/drivers/infiniband/sw/rdmavt/cq.h
new file mode 100644
index 000000000000..6182c29eff66
--- /dev/null
+++ b/drivers/infiniband/sw/rdmavt/cq.h
@@ -0,0 +1,64 @@
+#ifndef DEF_RVTCQ_H
+#define DEF_RVTCQ_H
+
+/*
+ * Copyright(c) 2016 Intel Corporation.
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * - Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <rdma/rdma_vt.h>
+#include <rdma/rdmavt_cq.h>
+
+struct ib_cq *rvt_create_cq(struct ib_device *ibdev,
+ const struct ib_cq_init_attr *attr,
+ struct ib_ucontext *context,
+ struct ib_udata *udata);
+int rvt_destroy_cq(struct ib_cq *ibcq);
+int rvt_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags);
+int rvt_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata);
+int rvt_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry);
+int rvt_driver_cq_init(struct rvt_dev_info *rdi);
+void rvt_cq_exit(struct rvt_dev_info *rdi);
+#endif /* DEF_RVTCQ_H */
diff --git a/drivers/infiniband/sw/rdmavt/dma.c b/drivers/infiniband/sw/rdmavt/dma.c
new file mode 100644
index 000000000000..33076a5eee2f
--- /dev/null
+++ b/drivers/infiniband/sw/rdmavt/dma.c
@@ -0,0 +1,184 @@
+/*
+ * Copyright(c) 2016 Intel Corporation.
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * - Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+#include <linux/types.h>
+#include <linux/scatterlist.h>
+#include <rdma/ib_verbs.h>
+
+#include "dma.h"
+
+#define BAD_DMA_ADDRESS ((u64)0)
+
+/*
+ * The following functions implement driver specific replacements
+ * for the ib_dma_*() functions.
+ *
+ * These functions return kernel virtual addresses instead of
+ * device bus addresses since the driver uses the CPU to copy
+ * data instead of using hardware DMA.
+ */
+
+static int rvt_mapping_error(struct ib_device *dev, u64 dma_addr)
+{
+ return dma_addr == BAD_DMA_ADDRESS;
+}
+
+static u64 rvt_dma_map_single(struct ib_device *dev, void *cpu_addr,
+ size_t size, enum dma_data_direction direction)
+{
+ if (WARN_ON(!valid_dma_direction(direction)))
+ return BAD_DMA_ADDRESS;
+
+ return (u64)cpu_addr;
+}
+
+static void rvt_dma_unmap_single(struct ib_device *dev, u64 addr, size_t size,
+ enum dma_data_direction direction)
+{
+ /* This is a stub, nothing to be done here */
+}
+
+static u64 rvt_dma_map_page(struct ib_device *dev, struct page *page,
+ unsigned long offset, size_t size,
+ enum dma_data_direction direction)
+{
+ u64 addr;
+
+ if (WARN_ON(!valid_dma_direction(direction)))
+ return BAD_DMA_ADDRESS;
+
+ if (offset + size > PAGE_SIZE)
+ return BAD_DMA_ADDRESS;
+
+ addr = (u64)page_address(page);
+ if (addr)
+ addr += offset;
+
+ return addr;
+}
+
+static void rvt_dma_unmap_page(struct ib_device *dev, u64 addr, size_t size,
+ enum dma_data_direction direction)
+{
+ /* This is a stub, nothing to be done here */
+}
+
+static int rvt_map_sg(struct ib_device *dev, struct scatterlist *sgl,
+ int nents, enum dma_data_direction direction)
+{
+ struct scatterlist *sg;
+ u64 addr;
+ int i;
+ int ret = nents;
+
+ if (WARN_ON(!valid_dma_direction(direction)))
+ return 0;
+
+ for_each_sg(sgl, sg, nents, i) {
+ addr = (u64)page_address(sg_page(sg));
+ if (!addr) {
+ ret = 0;
+ break;
+ }
+ sg->dma_address = addr + sg->offset;
+#ifdef CONFIG_NEED_SG_DMA_LENGTH
+ sg->dma_length = sg->length;
+#endif
+ }
+ return ret;
+}
+
+static void rvt_unmap_sg(struct ib_device *dev,
+ struct scatterlist *sg, int nents,
+ enum dma_data_direction direction)
+{
+ /* This is a stub, nothing to be done here */
+}
+
+static void rvt_sync_single_for_cpu(struct ib_device *dev, u64 addr,
+ size_t size, enum dma_data_direction dir)
+{
+}
+
+static void rvt_sync_single_for_device(struct ib_device *dev, u64 addr,
+ size_t size,
+ enum dma_data_direction dir)
+{
+}
+
+static void *rvt_dma_alloc_coherent(struct ib_device *dev, size_t size,
+ u64 *dma_handle, gfp_t flag)
+{
+ struct page *p;
+ void *addr = NULL;
+
+ p = alloc_pages(flag, get_order(size));
+ if (p)
+ addr = page_address(p);
+ if (dma_handle)
+ *dma_handle = (u64)addr;
+ return addr;
+}
+
+static void rvt_dma_free_coherent(struct ib_device *dev, size_t size,
+ void *cpu_addr, u64 dma_handle)
+{
+ free_pages((unsigned long)cpu_addr, get_order(size));
+}
+
+struct ib_dma_mapping_ops rvt_default_dma_mapping_ops = {
+ .mapping_error = rvt_mapping_error,
+ .map_single = rvt_dma_map_single,
+ .unmap_single = rvt_dma_unmap_single,
+ .map_page = rvt_dma_map_page,
+ .unmap_page = rvt_dma_unmap_page,
+ .map_sg = rvt_map_sg,
+ .unmap_sg = rvt_unmap_sg,
+ .sync_single_for_cpu = rvt_sync_single_for_cpu,
+ .sync_single_for_device = rvt_sync_single_for_device,
+ .alloc_coherent = rvt_dma_alloc_coherent,
+ .free_coherent = rvt_dma_free_coherent
+};
diff --git a/drivers/infiniband/sw/rdmavt/dma.h b/drivers/infiniband/sw/rdmavt/dma.h
new file mode 100644
index 000000000000..979f07e09195
--- /dev/null
+++ b/drivers/infiniband/sw/rdmavt/dma.h
@@ -0,0 +1,53 @@
+#ifndef DEF_RDMAVTDMA_H
+#define DEF_RDMAVTDMA_H
+
+/*
+ * Copyright(c) 2016 Intel Corporation.
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * - Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+extern struct ib_dma_mapping_ops rvt_default_dma_mapping_ops;
+
+#endif /* DEF_RDMAVTDMA_H */
diff --git a/drivers/infiniband/sw/rdmavt/mad.c b/drivers/infiniband/sw/rdmavt/mad.c
new file mode 100644
index 000000000000..f6e99778d7ca
--- /dev/null
+++ b/drivers/infiniband/sw/rdmavt/mad.c
@@ -0,0 +1,171 @@
+/*
+ * Copyright(c) 2016 Intel Corporation.
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * - Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <rdma/ib_mad.h>
+#include "mad.h"
+#include "vt.h"
+
+/**
+ * rvt_process_mad - process an incoming MAD packet
+ * @ibdev: the infiniband device this packet came in on
+ * @mad_flags: MAD flags
+ * @port_num: the port number this packet came in on, 1 based from ib core
+ * @in_wc: the work completion entry for this packet
+ * @in_grh: the global route header for this packet
+ * @in_mad: the incoming MAD
+ * @out_mad: any outgoing MAD reply
+ *
+ * Note that the verbs framework has already done the MAD sanity checks,
+ * and hop count/pointer updating for IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
+ * MADs.
+ *
+ * This is called by the ib_mad module.
+ *
+ * Return: IB_MAD_RESULT_SUCCESS or error
+ */
+int rvt_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
+ const struct ib_wc *in_wc, const struct ib_grh *in_grh,
+ const struct ib_mad_hdr *in, size_t in_mad_size,
+ struct ib_mad_hdr *out, size_t *out_mad_size,
+ u16 *out_mad_pkey_index)
+{
+ /*
+ * MAD processing is quite different between hfi1 and qib. Therfore this
+ * is expected to be provided by the driver. Other drivers in the future
+ * may chose to implement this but it should not be made into a
+ * requirement.
+ */
+ if (ibport_num_to_idx(ibdev, port_num) < 0)
+ return -EINVAL;
+
+ return IB_MAD_RESULT_FAILURE;
+}
+
+static void rvt_send_mad_handler(struct ib_mad_agent *agent,
+ struct ib_mad_send_wc *mad_send_wc)
+{
+ ib_free_send_mad(mad_send_wc->send_buf);
+}
+
+/**
+ * rvt_create_mad_agents - create mad agents
+ * @rdi: rvt dev struct
+ *
+ * If driver needs to be notified of mad agent creation then call back
+ *
+ * Return 0 on success
+ */
+int rvt_create_mad_agents(struct rvt_dev_info *rdi)
+{
+ struct ib_mad_agent *agent;
+ struct rvt_ibport *rvp;
+ int p;
+ int ret;
+
+ for (p = 0; p < rdi->dparms.nports; p++) {
+ rvp = rdi->ports[p];
+ agent = ib_register_mad_agent(&rdi->ibdev, p + 1,
+ IB_QPT_SMI,
+ NULL, 0, rvt_send_mad_handler,
+ NULL, NULL, 0);
+ if (IS_ERR(agent)) {
+ ret = PTR_ERR(agent);
+ goto err;
+ }
+
+ rvp->send_agent = agent;
+
+ if (rdi->driver_f.notify_create_mad_agent)
+ rdi->driver_f.notify_create_mad_agent(rdi, p);
+ }
+
+ return 0;
+
+err:
+ for (p = 0; p < rdi->dparms.nports; p++) {
+ rvp = rdi->ports[p];
+ if (rvp->send_agent) {
+ agent = rvp->send_agent;
+ rvp->send_agent = NULL;
+ ib_unregister_mad_agent(agent);
+ if (rdi->driver_f.notify_free_mad_agent)
+ rdi->driver_f.notify_free_mad_agent(rdi, p);
+ }
+ }
+
+ return ret;
+}
+
+/**
+ * rvt_free_mad_agents - free up mad agents
+ * @rdi: rvt dev struct
+ *
+ * If driver needs notification of mad agent removal make the call back
+ */
+void rvt_free_mad_agents(struct rvt_dev_info *rdi)
+{
+ struct ib_mad_agent *agent;
+ struct rvt_ibport *rvp;
+ int p;
+
+ for (p = 0; p < rdi->dparms.nports; p++) {
+ rvp = rdi->ports[p];
+ if (rvp->send_agent) {
+ agent = rvp->send_agent;
+ rvp->send_agent = NULL;
+ ib_unregister_mad_agent(agent);
+ }
+ if (rvp->sm_ah) {
+ ib_destroy_ah(&rvp->sm_ah->ibah);
+ rvp->sm_ah = NULL;
+ }
+
+ if (rdi->driver_f.notify_free_mad_agent)
+ rdi->driver_f.notify_free_mad_agent(rdi, p);
+ }
+}
+
diff --git a/drivers/infiniband/sw/rdmavt/mad.h b/drivers/infiniband/sw/rdmavt/mad.h
new file mode 100644
index 000000000000..a9d6eecc3723
--- /dev/null
+++ b/drivers/infiniband/sw/rdmavt/mad.h
@@ -0,0 +1,60 @@
+#ifndef DEF_RVTMAD_H
+#define DEF_RVTMAD_H
+
+/*
+ * Copyright(c) 2016 Intel Corporation.
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * - Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <rdma/rdma_vt.h>
+
+int rvt_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
+ const struct ib_wc *in_wc, const struct ib_grh *in_grh,
+ const struct ib_mad_hdr *in, size_t in_mad_size,
+ struct ib_mad_hdr *out, size_t *out_mad_size,
+ u16 *out_mad_pkey_index);
+int rvt_create_mad_agents(struct rvt_dev_info *rdi);
+void rvt_free_mad_agents(struct rvt_dev_info *rdi);
+#endif /* DEF_RVTMAD_H */
diff --git a/drivers/staging/rdma/hfi1/verbs_mcast.c b/drivers/infiniband/sw/rdmavt/mcast.c
index afc6b4c61a1d..983d319ac976 100644
--- a/drivers/staging/rdma/hfi1/verbs_mcast.c
+++ b/drivers/infiniband/sw/rdmavt/mcast.c
@@ -1,12 +1,11 @@
/*
+ * Copyright(c) 2016 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2015 Intel Corporation.
- *
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
@@ -18,8 +17,6 @@
*
* BSD LICENSE
*
- * Copyright(c) 2015 Intel Corporation.
- *
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
@@ -48,17 +45,36 @@
*
*/
+#include <linux/slab.h>
+#include <linux/sched.h>
#include <linux/rculist.h>
+#include <rdma/rdma_vt.h>
+#include <rdma/rdmavt_qp.h>
-#include "hfi.h"
+#include "mcast.h"
+
+/**
+ * rvt_driver_mcast - init resources for multicast
+ * @rdi: rvt dev struct
+ *
+ * This is per device that registers with rdmavt
+ */
+void rvt_driver_mcast_init(struct rvt_dev_info *rdi)
+{
+ /*
+ * Anything that needs setup for multicast on a per driver or per rdi
+ * basis should be done in here.
+ */
+ spin_lock_init(&rdi->n_mcast_grps_lock);
+}
/**
* mcast_qp_alloc - alloc a struct to link a QP to mcast GID struct
* @qp: the QP to link
*/
-static struct hfi1_mcast_qp *mcast_qp_alloc(struct hfi1_qp *qp)
+static struct rvt_mcast_qp *rvt_mcast_qp_alloc(struct rvt_qp *qp)
{
- struct hfi1_mcast_qp *mqp;
+ struct rvt_mcast_qp *mqp;
mqp = kmalloc(sizeof(*mqp), GFP_KERNEL);
if (!mqp)
@@ -71,9 +87,9 @@ bail:
return mqp;
}
-static void mcast_qp_free(struct hfi1_mcast_qp *mqp)
+static void rvt_mcast_qp_free(struct rvt_mcast_qp *mqp)
{
- struct hfi1_qp *qp = mqp->qp;
+ struct rvt_qp *qp = mqp->qp;
/* Notify hfi1_destroy_qp() if it is waiting. */
if (atomic_dec_and_test(&qp->refcount))
@@ -88,11 +104,11 @@ static void mcast_qp_free(struct hfi1_mcast_qp *mqp)
*
* A list of QPs will be attached to this structure.
*/
-static struct hfi1_mcast *mcast_alloc(union ib_gid *mgid)
+static struct rvt_mcast *rvt_mcast_alloc(union ib_gid *mgid)
{
- struct hfi1_mcast *mcast;
+ struct rvt_mcast *mcast;
- mcast = kmalloc(sizeof(*mcast), GFP_KERNEL);
+ mcast = kzalloc(sizeof(*mcast), GFP_KERNEL);
if (!mcast)
goto bail;
@@ -100,75 +116,72 @@ static struct hfi1_mcast *mcast_alloc(union ib_gid *mgid)
INIT_LIST_HEAD(&mcast->qp_list);
init_waitqueue_head(&mcast->wait);
atomic_set(&mcast->refcount, 0);
- mcast->n_attached = 0;
bail:
return mcast;
}
-static void mcast_free(struct hfi1_mcast *mcast)
+static void rvt_mcast_free(struct rvt_mcast *mcast)
{
- struct hfi1_mcast_qp *p, *tmp;
+ struct rvt_mcast_qp *p, *tmp;
list_for_each_entry_safe(p, tmp, &mcast->qp_list, list)
- mcast_qp_free(p);
+ rvt_mcast_qp_free(p);
kfree(mcast);
}
/**
- * hfi1_mcast_find - search the global table for the given multicast GID
+ * rvt_mcast_find - search the global table for the given multicast GID
* @ibp: the IB port structure
* @mgid: the multicast GID to search for
*
- * Returns NULL if not found.
- *
* The caller is responsible for decrementing the reference count if found.
+ *
+ * Return: NULL if not found.
*/
-struct hfi1_mcast *hfi1_mcast_find(struct hfi1_ibport *ibp, union ib_gid *mgid)
+struct rvt_mcast *rvt_mcast_find(struct rvt_ibport *ibp, union ib_gid *mgid)
{
struct rb_node *n;
unsigned long flags;
- struct hfi1_mcast *mcast;
+ struct rvt_mcast *found = NULL;
spin_lock_irqsave(&ibp->lock, flags);
n = ibp->mcast_tree.rb_node;
while (n) {
int ret;
+ struct rvt_mcast *mcast;
- mcast = rb_entry(n, struct hfi1_mcast, rb_node);
+ mcast = rb_entry(n, struct rvt_mcast, rb_node);
ret = memcmp(mgid->raw, mcast->mgid.raw,
sizeof(union ib_gid));
- if (ret < 0)
+ if (ret < 0) {
n = n->rb_left;
- else if (ret > 0)
+ } else if (ret > 0) {
n = n->rb_right;
- else {
+ } else {
atomic_inc(&mcast->refcount);
- spin_unlock_irqrestore(&ibp->lock, flags);
- goto bail;
+ found = mcast;
+ break;
}
}
spin_unlock_irqrestore(&ibp->lock, flags);
-
- mcast = NULL;
-
-bail:
- return mcast;
+ return found;
}
+EXPORT_SYMBOL(rvt_mcast_find);
/**
* mcast_add - insert mcast GID into table and attach QP struct
* @mcast: the mcast GID table
* @mqp: the QP to attach
*
- * Return zero if both were added. Return EEXIST if the GID was already in
+ * Return: zero if both were added. Return EEXIST if the GID was already in
* the table but the QP was added. Return ESRCH if the QP was already
* attached and neither structure was added.
*/
-static int mcast_add(struct hfi1_ibdev *dev, struct hfi1_ibport *ibp,
- struct hfi1_mcast *mcast, struct hfi1_mcast_qp *mqp)
+static int rvt_mcast_add(struct rvt_dev_info *rdi, struct rvt_ibport *ibp,
+ struct rvt_mcast *mcast, struct rvt_mcast_qp *mqp)
{
struct rb_node **n = &ibp->mcast_tree.rb_node;
struct rb_node *pn = NULL;
@@ -177,11 +190,11 @@ static int mcast_add(struct hfi1_ibdev *dev, struct hfi1_ibport *ibp,
spin_lock_irq(&ibp->lock);
while (*n) {
- struct hfi1_mcast *tmcast;
- struct hfi1_mcast_qp *p;
+ struct rvt_mcast *tmcast;
+ struct rvt_mcast_qp *p;
pn = *n;
- tmcast = rb_entry(pn, struct hfi1_mcast, rb_node);
+ tmcast = rb_entry(pn, struct rvt_mcast, rb_node);
ret = memcmp(mcast->mgid.raw, tmcast->mgid.raw,
sizeof(union ib_gid));
@@ -201,7 +214,8 @@ static int mcast_add(struct hfi1_ibdev *dev, struct hfi1_ibport *ibp,
goto bail;
}
}
- if (tmcast->n_attached == hfi1_max_mcast_qp_attached) {
+ if (tmcast->n_attached ==
+ rdi->dparms.props.max_mcast_qp_attach) {
ret = ENOMEM;
goto bail;
}
@@ -213,15 +227,15 @@ static int mcast_add(struct hfi1_ibdev *dev, struct hfi1_ibport *ibp,
goto bail;
}
- spin_lock(&dev->n_mcast_grps_lock);
- if (dev->n_mcast_grps_allocated == hfi1_max_mcast_grps) {
- spin_unlock(&dev->n_mcast_grps_lock);
+ spin_lock(&rdi->n_mcast_grps_lock);
+ if (rdi->n_mcast_grps_allocated == rdi->dparms.props.max_mcast_grp) {
+ spin_unlock(&rdi->n_mcast_grps_lock);
ret = ENOMEM;
goto bail;
}
- dev->n_mcast_grps_allocated++;
- spin_unlock(&dev->n_mcast_grps_lock);
+ rdi->n_mcast_grps_allocated++;
+ spin_unlock(&rdi->n_mcast_grps_lock);
mcast->n_attached++;
@@ -239,92 +253,98 @@ bail:
return ret;
}
-int hfi1_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
+/**
+ * rvt_attach_mcast - attach a qp to a multicast group
+ * @ibqp: Infiniband qp
+ * @igd: multicast guid
+ * @lid: multicast lid
+ *
+ * Return: 0 on success
+ */
+int rvt_attach_mcast(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
{
- struct hfi1_qp *qp = to_iqp(ibqp);
- struct hfi1_ibdev *dev = to_idev(ibqp->device);
- struct hfi1_ibport *ibp;
- struct hfi1_mcast *mcast;
- struct hfi1_mcast_qp *mqp;
- int ret;
+ struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
+ struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
+ struct rvt_ibport *ibp = rdi->ports[qp->port_num - 1];
+ struct rvt_mcast *mcast;
+ struct rvt_mcast_qp *mqp;
+ int ret = -ENOMEM;
- if (ibqp->qp_num <= 1 || qp->state == IB_QPS_RESET) {
- ret = -EINVAL;
- goto bail;
- }
+ if (ibqp->qp_num <= 1 || qp->state == IB_QPS_RESET)
+ return -EINVAL;
/*
* Allocate data structures since its better to do this outside of
* spin locks and it will most likely be needed.
*/
- mcast = mcast_alloc(gid);
- if (mcast == NULL) {
- ret = -ENOMEM;
- goto bail;
- }
- mqp = mcast_qp_alloc(qp);
- if (mqp == NULL) {
- mcast_free(mcast);
- ret = -ENOMEM;
- goto bail;
- }
- ibp = to_iport(ibqp->device, qp->port_num);
- switch (mcast_add(dev, ibp, mcast, mqp)) {
- case ESRCH:
- /* Neither was used: OK to attach the same QP twice. */
- mcast_qp_free(mqp);
- mcast_free(mcast);
- break;
+ mcast = rvt_mcast_alloc(gid);
+ if (!mcast)
+ return -ENOMEM;
- case EEXIST: /* The mcast wasn't used */
- mcast_free(mcast);
- break;
+ mqp = rvt_mcast_qp_alloc(qp);
+ if (!mqp)
+ goto bail_mcast;
+ switch (rvt_mcast_add(rdi, ibp, mcast, mqp)) {
+ case ESRCH:
+ /* Neither was used: OK to attach the same QP twice. */
+ ret = 0;
+ goto bail_mqp;
+ case EEXIST: /* The mcast wasn't used */
+ ret = 0;
+ goto bail_mcast;
case ENOMEM:
/* Exceeded the maximum number of mcast groups. */
- mcast_qp_free(mqp);
- mcast_free(mcast);
ret = -ENOMEM;
- goto bail;
-
+ goto bail_mqp;
default:
break;
}
- ret = 0;
+ return 0;
+
+bail_mqp:
+ rvt_mcast_qp_free(mqp);
+
+bail_mcast:
+ rvt_mcast_free(mcast);
-bail:
return ret;
}
-int hfi1_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
+/**
+ * rvt_detach_mcast - remove a qp from a multicast group
+ * @ibqp: Infiniband qp
+ * @igd: multicast guid
+ * @lid: multicast lid
+ *
+ * Return: 0 on success
+ */
+int rvt_detach_mcast(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
{
- struct hfi1_qp *qp = to_iqp(ibqp);
- struct hfi1_ibdev *dev = to_idev(ibqp->device);
- struct hfi1_ibport *ibp = to_iport(ibqp->device, qp->port_num);
- struct hfi1_mcast *mcast = NULL;
- struct hfi1_mcast_qp *p, *tmp;
+ struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
+ struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
+ struct rvt_ibport *ibp = rdi->ports[qp->port_num - 1];
+ struct rvt_mcast *mcast = NULL;
+ struct rvt_mcast_qp *p, *tmp, *delp = NULL;
struct rb_node *n;
int last = 0;
- int ret;
+ int ret = 0;
- if (ibqp->qp_num <= 1 || qp->state == IB_QPS_RESET) {
- ret = -EINVAL;
- goto bail;
- }
+ if (ibqp->qp_num <= 1 || qp->state == IB_QPS_RESET)
+ return -EINVAL;
spin_lock_irq(&ibp->lock);
/* Find the GID in the mcast table. */
n = ibp->mcast_tree.rb_node;
while (1) {
- if (n == NULL) {
+ if (!n) {
spin_unlock_irq(&ibp->lock);
- ret = -EINVAL;
- goto bail;
+ return -EINVAL;
}
- mcast = rb_entry(n, struct hfi1_mcast, rb_node);
+ mcast = rb_entry(n, struct rvt_mcast, rb_node);
ret = memcmp(gid->raw, mcast->mgid.raw,
sizeof(union ib_gid));
if (ret < 0)
@@ -345,6 +365,7 @@ int hfi1_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
*/
list_del_rcu(&p->list);
mcast->n_attached--;
+ delp = p;
/* If this was the last attached QP, remove the GID too. */
if (list_empty(&mcast->qp_list)) {
@@ -355,31 +376,42 @@ int hfi1_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
}
spin_unlock_irq(&ibp->lock);
+ /* QP not attached */
+ if (!delp)
+ return -EINVAL;
+
+ /*
+ * Wait for any list walkers to finish before freeing the
+ * list element.
+ */
+ wait_event(mcast->wait, atomic_read(&mcast->refcount) <= 1);
+ rvt_mcast_qp_free(delp);
- if (p) {
- /*
- * Wait for any list walkers to finish before freeing the
- * list element.
- */
- wait_event(mcast->wait, atomic_read(&mcast->refcount) <= 1);
- mcast_qp_free(p);
- }
if (last) {
atomic_dec(&mcast->refcount);
wait_event(mcast->wait, !atomic_read(&mcast->refcount));
- mcast_free(mcast);
- spin_lock_irq(&dev->n_mcast_grps_lock);
- dev->n_mcast_grps_allocated--;
- spin_unlock_irq(&dev->n_mcast_grps_lock);
+ rvt_mcast_free(mcast);
+ spin_lock_irq(&rdi->n_mcast_grps_lock);
+ rdi->n_mcast_grps_allocated--;
+ spin_unlock_irq(&rdi->n_mcast_grps_lock);
}
- ret = 0;
-
-bail:
- return ret;
+ return 0;
}
-int hfi1_mcast_tree_empty(struct hfi1_ibport *ibp)
+/**
+ *rvt_mast_tree_empty - determine if any qps are attached to any mcast group
+ *@rdi: rvt dev struct
+ *
+ * Return: in use count
+ */
+int rvt_mcast_tree_empty(struct rvt_dev_info *rdi)
{
- return ibp->mcast_tree.rb_node == NULL;
+ int i;
+ int in_use = 0;
+
+ for (i = 0; i < rdi->dparms.nports; i++)
+ if (rdi->ports[i]->mcast_tree.rb_node)
+ in_use++;
+ return in_use;
}
diff --git a/drivers/infiniband/sw/rdmavt/mcast.h b/drivers/infiniband/sw/rdmavt/mcast.h
new file mode 100644
index 000000000000..29f579267608
--- /dev/null
+++ b/drivers/infiniband/sw/rdmavt/mcast.h
@@ -0,0 +1,58 @@
+#ifndef DEF_RVTMCAST_H
+#define DEF_RVTMCAST_H
+
+/*
+ * Copyright(c) 2016 Intel Corporation.
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * - Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <rdma/rdma_vt.h>
+
+void rvt_driver_mcast_init(struct rvt_dev_info *rdi);
+int rvt_attach_mcast(struct ib_qp *ibqp, union ib_gid *gid, u16 lid);
+int rvt_detach_mcast(struct ib_qp *ibqp, union ib_gid *gid, u16 lid);
+int rvt_mcast_tree_empty(struct rvt_dev_info *rdi);
+
+#endif /* DEF_RVTMCAST_H */
diff --git a/drivers/staging/rdma/hfi1/mmap.c b/drivers/infiniband/sw/rdmavt/mmap.c
index 5173b1c60b3d..e202b8142759 100644
--- a/drivers/staging/rdma/hfi1/mmap.c
+++ b/drivers/infiniband/sw/rdmavt/mmap.c
@@ -1,12 +1,11 @@
/*
+ * Copyright(c) 2016 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2015 Intel Corporation.
- *
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
@@ -18,8 +17,6 @@
*
* BSD LICENSE
*
- * Copyright(c) 2015 Intel Corporation.
- *
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
@@ -48,68 +45,74 @@
*
*/
-#include <linux/module.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/mm.h>
-#include <linux/errno.h>
#include <asm/pgtable.h>
+#include "mmap.h"
-#include "verbs.h"
+/**
+ * rvt_mmap_init - init link list and lock for mem map
+ * @rdi: rvt dev struct
+ */
+void rvt_mmap_init(struct rvt_dev_info *rdi)
+{
+ INIT_LIST_HEAD(&rdi->pending_mmaps);
+ spin_lock_init(&rdi->pending_lock);
+ rdi->mmap_offset = PAGE_SIZE;
+ spin_lock_init(&rdi->mmap_offset_lock);
+}
/**
- * hfi1_release_mmap_info - free mmap info structure
- * @ref: a pointer to the kref within struct hfi1_mmap_info
+ * rvt_release_mmap_info - free mmap info structure
+ * @ref: a pointer to the kref within struct rvt_mmap_info
*/
-void hfi1_release_mmap_info(struct kref *ref)
+void rvt_release_mmap_info(struct kref *ref)
{
- struct hfi1_mmap_info *ip =
- container_of(ref, struct hfi1_mmap_info, ref);
- struct hfi1_ibdev *dev = to_idev(ip->context->device);
+ struct rvt_mmap_info *ip =
+ container_of(ref, struct rvt_mmap_info, ref);
+ struct rvt_dev_info *rdi = ib_to_rvt(ip->context->device);
- spin_lock_irq(&dev->pending_lock);
+ spin_lock_irq(&rdi->pending_lock);
list_del(&ip->pending_mmaps);
- spin_unlock_irq(&dev->pending_lock);
+ spin_unlock_irq(&rdi->pending_lock);
vfree(ip->obj);
kfree(ip);
}
-/*
- * open and close keep track of how many times the CQ is mapped,
- * to avoid releasing it.
- */
-static void hfi1_vma_open(struct vm_area_struct *vma)
+static void rvt_vma_open(struct vm_area_struct *vma)
{
- struct hfi1_mmap_info *ip = vma->vm_private_data;
+ struct rvt_mmap_info *ip = vma->vm_private_data;
kref_get(&ip->ref);
}
-static void hfi1_vma_close(struct vm_area_struct *vma)
+static void rvt_vma_close(struct vm_area_struct *vma)
{
- struct hfi1_mmap_info *ip = vma->vm_private_data;
+ struct rvt_mmap_info *ip = vma->vm_private_data;
- kref_put(&ip->ref, hfi1_release_mmap_info);
+ kref_put(&ip->ref, rvt_release_mmap_info);
}
-static struct vm_operations_struct hfi1_vm_ops = {
- .open = hfi1_vma_open,
- .close = hfi1_vma_close,
+static const struct vm_operations_struct rvt_vm_ops = {
+ .open = rvt_vma_open,
+ .close = rvt_vma_close,
};
/**
- * hfi1_mmap - create a new mmap region
+ * rvt_mmap - create a new mmap region
* @context: the IB user context of the process making the mmap() call
* @vma: the VMA to be initialized
- * Return zero if the mmap is OK. Otherwise, return an errno.
+ *
+ * Return: zero if the mmap is OK. Otherwise, return an errno.
*/
-int hfi1_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
+int rvt_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
{
- struct hfi1_ibdev *dev = to_idev(context->device);
+ struct rvt_dev_info *rdi = ib_to_rvt(context->device);
unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
unsigned long size = vma->vm_end - vma->vm_start;
- struct hfi1_mmap_info *ip, *pp;
+ struct rvt_mmap_info *ip, *pp;
int ret = -EINVAL;
/*
@@ -117,53 +120,60 @@ int hfi1_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
* Normally, this list is very short since a call to create a
* CQ, QP, or SRQ is soon followed by a call to mmap().
*/
- spin_lock_irq(&dev->pending_lock);
- list_for_each_entry_safe(ip, pp, &dev->pending_mmaps,
+ spin_lock_irq(&rdi->pending_lock);
+ list_for_each_entry_safe(ip, pp, &rdi->pending_mmaps,
pending_mmaps) {
/* Only the creator is allowed to mmap the object */
- if (context != ip->context || (__u64) offset != ip->offset)
+ if (context != ip->context || (__u64)offset != ip->offset)
continue;
/* Don't allow a mmap larger than the object. */
if (size > ip->size)
break;
list_del_init(&ip->pending_mmaps);
- spin_unlock_irq(&dev->pending_lock);
+ spin_unlock_irq(&rdi->pending_lock);
ret = remap_vmalloc_range(vma, ip->obj, 0);
if (ret)
goto done;
- vma->vm_ops = &hfi1_vm_ops;
+ vma->vm_ops = &rvt_vm_ops;
vma->vm_private_data = ip;
- hfi1_vma_open(vma);
+ rvt_vma_open(vma);
goto done;
}
- spin_unlock_irq(&dev->pending_lock);
+ spin_unlock_irq(&rdi->pending_lock);
done:
return ret;
}
-/*
- * Allocate information for hfi1_mmap
+/**
+ * rvt_create_mmap_info - allocate information for hfi1_mmap
+ * @rdi: rvt dev struct
+ * @size: size in bytes to map
+ * @context: user context
+ * @obj: opaque pointer to a cq, wq etc
+ *
+ * Return: rvt_mmap struct on success
*/
-struct hfi1_mmap_info *hfi1_create_mmap_info(struct hfi1_ibdev *dev,
- u32 size,
- struct ib_ucontext *context,
- void *obj) {
- struct hfi1_mmap_info *ip;
+struct rvt_mmap_info *rvt_create_mmap_info(struct rvt_dev_info *rdi,
+ u32 size,
+ struct ib_ucontext *context,
+ void *obj)
+{
+ struct rvt_mmap_info *ip;
- ip = kmalloc(sizeof(*ip), GFP_KERNEL);
+ ip = kmalloc_node(sizeof(*ip), GFP_KERNEL, rdi->dparms.node);
if (!ip)
- goto bail;
+ return ip;
size = PAGE_ALIGN(size);
- spin_lock_irq(&dev->mmap_offset_lock);
- if (dev->mmap_offset == 0)
- dev->mmap_offset = PAGE_SIZE;
- ip->offset = dev->mmap_offset;
- dev->mmap_offset += size;
- spin_unlock_irq(&dev->mmap_offset_lock);
+ spin_lock_irq(&rdi->mmap_offset_lock);
+ if (rdi->mmap_offset == 0)
+ rdi->mmap_offset = PAGE_SIZE;
+ ip->offset = rdi->mmap_offset;
+ rdi->mmap_offset += size;
+ spin_unlock_irq(&rdi->mmap_offset_lock);
INIT_LIST_HEAD(&ip->pending_mmaps);
ip->size = size;
@@ -171,21 +181,27 @@ struct hfi1_mmap_info *hfi1_create_mmap_info(struct hfi1_ibdev *dev,
ip->obj = obj;
kref_init(&ip->ref);
-bail:
return ip;
}
-void hfi1_update_mmap_info(struct hfi1_ibdev *dev, struct hfi1_mmap_info *ip,
- u32 size, void *obj)
+/**
+ * rvt_update_mmap_info - update a mem map
+ * @rdi: rvt dev struct
+ * @ip: mmap info pointer
+ * @size: size to grow by
+ * @obj: opaque pointer to cq, wq, etc.
+ */
+void rvt_update_mmap_info(struct rvt_dev_info *rdi, struct rvt_mmap_info *ip,
+ u32 size, void *obj)
{
size = PAGE_ALIGN(size);
- spin_lock_irq(&dev->mmap_offset_lock);
- if (dev->mmap_offset == 0)
- dev->mmap_offset = PAGE_SIZE;
- ip->offset = dev->mmap_offset;
- dev->mmap_offset += size;
- spin_unlock_irq(&dev->mmap_offset_lock);
+ spin_lock_irq(&rdi->mmap_offset_lock);
+ if (rdi->mmap_offset == 0)
+ rdi->mmap_offset = PAGE_SIZE;
+ ip->offset = rdi->mmap_offset;
+ rdi->mmap_offset += size;
+ spin_unlock_irq(&rdi->mmap_offset_lock);
ip->size = size;
ip->obj = obj;
diff --git a/drivers/infiniband/sw/rdmavt/mmap.h b/drivers/infiniband/sw/rdmavt/mmap.h
new file mode 100644
index 000000000000..fab0e7b1daf9
--- /dev/null
+++ b/drivers/infiniband/sw/rdmavt/mmap.h
@@ -0,0 +1,63 @@
+#ifndef DEF_RDMAVTMMAP_H
+#define DEF_RDMAVTMMAP_H
+
+/*
+ * Copyright(c) 2016 Intel Corporation.
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * - Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <rdma/rdma_vt.h>
+
+void rvt_mmap_init(struct rvt_dev_info *rdi);
+void rvt_release_mmap_info(struct kref *ref);
+int rvt_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);
+struct rvt_mmap_info *rvt_create_mmap_info(struct rvt_dev_info *rdi,
+ u32 size,
+ struct ib_ucontext *context,
+ void *obj);
+void rvt_update_mmap_info(struct rvt_dev_info *rdi, struct rvt_mmap_info *ip,
+ u32 size, void *obj);
+
+#endif /* DEF_RDMAVTMMAP_H */
diff --git a/drivers/infiniband/sw/rdmavt/mr.c b/drivers/infiniband/sw/rdmavt/mr.c
new file mode 100644
index 000000000000..0ff765bfd619
--- /dev/null
+++ b/drivers/infiniband/sw/rdmavt/mr.c
@@ -0,0 +1,830 @@
+/*
+ * Copyright(c) 2016 Intel Corporation.
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * - Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <rdma/ib_umem.h>
+#include <rdma/rdma_vt.h>
+#include "vt.h"
+#include "mr.h"
+
+/**
+ * rvt_driver_mr_init - Init MR resources per driver
+ * @rdi: rvt dev struct
+ *
+ * Do any intilization needed when a driver registers with rdmavt.
+ *
+ * Return: 0 on success or errno on failure
+ */
+int rvt_driver_mr_init(struct rvt_dev_info *rdi)
+{
+ unsigned int lkey_table_size = rdi->dparms.lkey_table_size;
+ unsigned lk_tab_size;
+ int i;
+
+ /*
+ * The top hfi1_lkey_table_size bits are used to index the
+ * table. The lower 8 bits can be owned by the user (copied from
+ * the LKEY). The remaining bits act as a generation number or tag.
+ */
+ if (!lkey_table_size)
+ return -EINVAL;
+
+ spin_lock_init(&rdi->lkey_table.lock);
+
+ /* ensure generation is at least 4 bits */
+ if (lkey_table_size > RVT_MAX_LKEY_TABLE_BITS) {
+ rvt_pr_warn(rdi, "lkey bits %u too large, reduced to %u\n",
+ lkey_table_size, RVT_MAX_LKEY_TABLE_BITS);
+ rdi->dparms.lkey_table_size = RVT_MAX_LKEY_TABLE_BITS;
+ lkey_table_size = rdi->dparms.lkey_table_size;
+ }
+ rdi->lkey_table.max = 1 << lkey_table_size;
+ lk_tab_size = rdi->lkey_table.max * sizeof(*rdi->lkey_table.table);
+ rdi->lkey_table.table = (struct rvt_mregion __rcu **)
+ vmalloc_node(lk_tab_size, rdi->dparms.node);
+ if (!rdi->lkey_table.table)
+ return -ENOMEM;
+
+ RCU_INIT_POINTER(rdi->dma_mr, NULL);
+ for (i = 0; i < rdi->lkey_table.max; i++)
+ RCU_INIT_POINTER(rdi->lkey_table.table[i], NULL);
+
+ return 0;
+}
+
+/**
+ *rvt_mr_exit: clean up MR
+ *@rdi: rvt dev structure
+ *
+ * called when drivers have unregistered or perhaps failed to register with us
+ */
+void rvt_mr_exit(struct rvt_dev_info *rdi)
+{
+ if (rdi->dma_mr)
+ rvt_pr_err(rdi, "DMA MR not null!\n");
+
+ vfree(rdi->lkey_table.table);
+}
+
+static void rvt_deinit_mregion(struct rvt_mregion *mr)
+{
+ int i = mr->mapsz;
+
+ mr->mapsz = 0;
+ while (i)
+ kfree(mr->map[--i]);
+}
+
+static int rvt_init_mregion(struct rvt_mregion *mr, struct ib_pd *pd,
+ int count)
+{
+ int m, i = 0;
+
+ mr->mapsz = 0;
+ m = (count + RVT_SEGSZ - 1) / RVT_SEGSZ;
+ for (; i < m; i++) {
+ mr->map[i] = kzalloc(sizeof(*mr->map[0]), GFP_KERNEL);
+ if (!mr->map[i]) {
+ rvt_deinit_mregion(mr);
+ return -ENOMEM;
+ }
+ mr->mapsz++;
+ }
+ init_completion(&mr->comp);
+ /* count returning the ptr to user */
+ atomic_set(&mr->refcount, 1);
+ mr->pd = pd;
+ mr->max_segs = count;
+ return 0;
+}
+
+/**
+ * rvt_alloc_lkey - allocate an lkey
+ * @mr: memory region that this lkey protects
+ * @dma_region: 0->normal key, 1->restricted DMA key
+ *
+ * Returns 0 if successful, otherwise returns -errno.
+ *
+ * Increments mr reference count as required.
+ *
+ * Sets the lkey field mr for non-dma regions.
+ *
+ */
+static int rvt_alloc_lkey(struct rvt_mregion *mr, int dma_region)
+{
+ unsigned long flags;
+ u32 r;
+ u32 n;
+ int ret = 0;
+ struct rvt_dev_info *dev = ib_to_rvt(mr->pd->device);
+ struct rvt_lkey_table *rkt = &dev->lkey_table;
+
+ rvt_get_mr(mr);
+ spin_lock_irqsave(&rkt->lock, flags);
+
+ /* special case for dma_mr lkey == 0 */
+ if (dma_region) {
+ struct rvt_mregion *tmr;
+
+ tmr = rcu_access_pointer(dev->dma_mr);
+ if (!tmr) {
+ rcu_assign_pointer(dev->dma_mr, mr);
+ mr->lkey_published = 1;
+ } else {
+ rvt_put_mr(mr);
+ }
+ goto success;
+ }
+
+ /* Find the next available LKEY */
+ r = rkt->next;
+ n = r;
+ for (;;) {
+ if (!rcu_access_pointer(rkt->table[r]))
+ break;
+ r = (r + 1) & (rkt->max - 1);
+ if (r == n)
+ goto bail;
+ }
+ rkt->next = (r + 1) & (rkt->max - 1);
+ /*
+ * Make sure lkey is never zero which is reserved to indicate an
+ * unrestricted LKEY.
+ */
+ rkt->gen++;
+ /*
+ * bits are capped to ensure enough bits for generation number
+ */
+ mr->lkey = (r << (32 - dev->dparms.lkey_table_size)) |
+ ((((1 << (24 - dev->dparms.lkey_table_size)) - 1) & rkt->gen)
+ << 8);
+ if (mr->lkey == 0) {
+ mr->lkey |= 1 << 8;
+ rkt->gen++;
+ }
+ rcu_assign_pointer(rkt->table[r], mr);
+ mr->lkey_published = 1;
+success:
+ spin_unlock_irqrestore(&rkt->lock, flags);
+out:
+ return ret;
+bail:
+ rvt_put_mr(mr);
+ spin_unlock_irqrestore(&rkt->lock, flags);
+ ret = -ENOMEM;
+ goto out;
+}
+
+/**
+ * rvt_free_lkey - free an lkey
+ * @mr: mr to free from tables
+ */
+static void rvt_free_lkey(struct rvt_mregion *mr)
+{
+ unsigned long flags;
+ u32 lkey = mr->lkey;
+ u32 r;
+ struct rvt_dev_info *dev = ib_to_rvt(mr->pd->device);
+ struct rvt_lkey_table *rkt = &dev->lkey_table;
+ int freed = 0;
+
+ spin_lock_irqsave(&rkt->lock, flags);
+ if (!mr->lkey_published)
+ goto out;
+ if (lkey == 0) {
+ RCU_INIT_POINTER(dev->dma_mr, NULL);
+ } else {
+ r = lkey >> (32 - dev->dparms.lkey_table_size);
+ RCU_INIT_POINTER(rkt->table[r], NULL);
+ }
+ mr->lkey_published = 0;
+ freed++;
+out:
+ spin_unlock_irqrestore(&rkt->lock, flags);
+ if (freed) {
+ synchronize_rcu();
+ rvt_put_mr(mr);
+ }
+}
+
+static struct rvt_mr *__rvt_alloc_mr(int count, struct ib_pd *pd)
+{
+ struct rvt_mr *mr;
+ int rval = -ENOMEM;
+ int m;
+
+ /* Allocate struct plus pointers to first level page tables. */
+ m = (count + RVT_SEGSZ - 1) / RVT_SEGSZ;
+ mr = kzalloc(sizeof(*mr) + m * sizeof(mr->mr.map[0]), GFP_KERNEL);
+ if (!mr)
+ goto bail;
+
+ rval = rvt_init_mregion(&mr->mr, pd, count);
+ if (rval)
+ goto bail;
+ /*
+ * ib_reg_phys_mr() will initialize mr->ibmr except for
+ * lkey and rkey.
+ */
+ rval = rvt_alloc_lkey(&mr->mr, 0);
+ if (rval)
+ goto bail_mregion;
+ mr->ibmr.lkey = mr->mr.lkey;
+ mr->ibmr.rkey = mr->mr.lkey;
+done:
+ return mr;
+
+bail_mregion:
+ rvt_deinit_mregion(&mr->mr);
+bail:
+ kfree(mr);
+ mr = ERR_PTR(rval);
+ goto done;
+}
+
+static void __rvt_free_mr(struct rvt_mr *mr)
+{
+ rvt_deinit_mregion(&mr->mr);
+ rvt_free_lkey(&mr->mr);
+ vfree(mr);
+}
+
+/**
+ * rvt_get_dma_mr - get a DMA memory region
+ * @pd: protection domain for this memory region
+ * @acc: access flags
+ *
+ * Return: the memory region on success, otherwise returns an errno.
+ * Note that all DMA addresses should be created via the
+ * struct ib_dma_mapping_ops functions (see dma.c).
+ */
+struct ib_mr *rvt_get_dma_mr(struct ib_pd *pd, int acc)
+{
+ struct rvt_mr *mr;
+ struct ib_mr *ret;
+ int rval;
+
+ if (ibpd_to_rvtpd(pd)->user)
+ return ERR_PTR(-EPERM);
+
+ mr = kzalloc(sizeof(*mr), GFP_KERNEL);
+ if (!mr) {
+ ret = ERR_PTR(-ENOMEM);
+ goto bail;
+ }
+
+ rval = rvt_init_mregion(&mr->mr, pd, 0);
+ if (rval) {
+ ret = ERR_PTR(rval);
+ goto bail;
+ }
+
+ rval = rvt_alloc_lkey(&mr->mr, 1);
+ if (rval) {
+ ret = ERR_PTR(rval);
+ goto bail_mregion;
+ }
+
+ mr->mr.access_flags = acc;
+ ret = &mr->ibmr;
+done:
+ return ret;
+
+bail_mregion:
+ rvt_deinit_mregion(&mr->mr);
+bail:
+ kfree(mr);
+ goto done;
+}
+
+/**
+ * rvt_reg_user_mr - register a userspace memory region
+ * @pd: protection domain for this memory region
+ * @start: starting userspace address
+ * @length: length of region to register
+ * @mr_access_flags: access flags for this memory region
+ * @udata: unused by the driver
+ *
+ * Return: the memory region on success, otherwise returns an errno.
+ */
+struct ib_mr *rvt_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
+ u64 virt_addr, int mr_access_flags,
+ struct ib_udata *udata)
+{
+ struct rvt_mr *mr;
+ struct ib_umem *umem;
+ struct scatterlist *sg;
+ int n, m, entry;
+ struct ib_mr *ret;
+
+ if (length == 0)
+ return ERR_PTR(-EINVAL);
+
+ umem = ib_umem_get(pd->uobject->context, start, length,
+ mr_access_flags, 0);
+ if (IS_ERR(umem))
+ return (void *)umem;
+
+ n = umem->nmap;
+
+ mr = __rvt_alloc_mr(n, pd);
+ if (IS_ERR(mr)) {
+ ret = (struct ib_mr *)mr;
+ goto bail_umem;
+ }
+
+ mr->mr.user_base = start;
+ mr->mr.iova = virt_addr;
+ mr->mr.length = length;
+ mr->mr.offset = ib_umem_offset(umem);
+ mr->mr.access_flags = mr_access_flags;
+ mr->umem = umem;
+
+ if (is_power_of_2(umem->page_size))
+ mr->mr.page_shift = ilog2(umem->page_size);
+ m = 0;
+ n = 0;
+ for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
+ void *vaddr;
+
+ vaddr = page_address(sg_page(sg));
+ if (!vaddr) {
+ ret = ERR_PTR(-EINVAL);
+ goto bail_inval;
+ }
+ mr->mr.map[m]->segs[n].vaddr = vaddr;
+ mr->mr.map[m]->segs[n].length = umem->page_size;
+ n++;
+ if (n == RVT_SEGSZ) {
+ m++;
+ n = 0;
+ }
+ }
+ return &mr->ibmr;
+
+bail_inval:
+ __rvt_free_mr(mr);
+
+bail_umem:
+ ib_umem_release(umem);
+
+ return ret;
+}
+
+/**
+ * rvt_dereg_mr - unregister and free a memory region
+ * @ibmr: the memory region to free
+ *
+ *
+ * Note that this is called to free MRs created by rvt_get_dma_mr()
+ * or rvt_reg_user_mr().
+ *
+ * Returns 0 on success.
+ */
+int rvt_dereg_mr(struct ib_mr *ibmr)
+{
+ struct rvt_mr *mr = to_imr(ibmr);
+ struct rvt_dev_info *rdi = ib_to_rvt(ibmr->pd->device);
+ int ret = 0;
+ unsigned long timeout;
+
+ rvt_free_lkey(&mr->mr);
+
+ rvt_put_mr(&mr->mr); /* will set completion if last */
+ timeout = wait_for_completion_timeout(&mr->mr.comp, 5 * HZ);
+ if (!timeout) {
+ rvt_pr_err(rdi,
+ "rvt_dereg_mr timeout mr %p pd %p refcount %u\n",
+ mr, mr->mr.pd, atomic_read(&mr->mr.refcount));
+ rvt_get_mr(&mr->mr);
+ ret = -EBUSY;
+ goto out;
+ }
+ rvt_deinit_mregion(&mr->mr);
+ if (mr->umem)
+ ib_umem_release(mr->umem);
+ kfree(mr);
+out:
+ return ret;
+}
+
+/**
+ * rvt_alloc_mr - Allocate a memory region usable with the
+ * @pd: protection domain for this memory region
+ * @mr_type: mem region type
+ * @max_num_sg: Max number of segments allowed
+ *
+ * Return: the memory region on success, otherwise return an errno.
+ */
+struct ib_mr *rvt_alloc_mr(struct ib_pd *pd,
+ enum ib_mr_type mr_type,
+ u32 max_num_sg)
+{
+ struct rvt_mr *mr;
+
+ if (mr_type != IB_MR_TYPE_MEM_REG)
+ return ERR_PTR(-EINVAL);
+
+ mr = __rvt_alloc_mr(max_num_sg, pd);
+ if (IS_ERR(mr))
+ return (struct ib_mr *)mr;
+
+ return &mr->ibmr;
+}
+
+/**
+ * rvt_alloc_fmr - allocate a fast memory region
+ * @pd: the protection domain for this memory region
+ * @mr_access_flags: access flags for this memory region
+ * @fmr_attr: fast memory region attributes
+ *
+ * Return: the memory region on success, otherwise returns an errno.
+ */
+struct ib_fmr *rvt_alloc_fmr(struct ib_pd *pd, int mr_access_flags,
+ struct ib_fmr_attr *fmr_attr)
+{
+ struct rvt_fmr *fmr;
+ int m;
+ struct ib_fmr *ret;
+ int rval = -ENOMEM;
+
+ /* Allocate struct plus pointers to first level page tables. */
+ m = (fmr_attr->max_pages + RVT_SEGSZ - 1) / RVT_SEGSZ;
+ fmr = kzalloc(sizeof(*fmr) + m * sizeof(fmr->mr.map[0]), GFP_KERNEL);
+ if (!fmr)
+ goto bail;
+
+ rval = rvt_init_mregion(&fmr->mr, pd, fmr_attr->max_pages);
+ if (rval)
+ goto bail;
+
+ /*
+ * ib_alloc_fmr() will initialize fmr->ibfmr except for lkey &
+ * rkey.
+ */
+ rval = rvt_alloc_lkey(&fmr->mr, 0);
+ if (rval)
+ goto bail_mregion;
+ fmr->ibfmr.rkey = fmr->mr.lkey;
+ fmr->ibfmr.lkey = fmr->mr.lkey;
+ /*
+ * Resources are allocated but no valid mapping (RKEY can't be
+ * used).
+ */
+ fmr->mr.access_flags = mr_access_flags;
+ fmr->mr.max_segs = fmr_attr->max_pages;
+ fmr->mr.page_shift = fmr_attr->page_shift;
+
+ ret = &fmr->ibfmr;
+done:
+ return ret;
+
+bail_mregion:
+ rvt_deinit_mregion(&fmr->mr);
+bail:
+ kfree(fmr);
+ ret = ERR_PTR(rval);
+ goto done;
+}
+
+/**
+ * rvt_map_phys_fmr - set up a fast memory region
+ * @ibmfr: the fast memory region to set up
+ * @page_list: the list of pages to associate with the fast memory region
+ * @list_len: the number of pages to associate with the fast memory region
+ * @iova: the virtual address of the start of the fast memory region
+ *
+ * This may be called from interrupt context.
+ *
+ * Return: 0 on success
+ */
+
+int rvt_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list,
+ int list_len, u64 iova)
+{
+ struct rvt_fmr *fmr = to_ifmr(ibfmr);
+ struct rvt_lkey_table *rkt;
+ unsigned long flags;
+ int m, n, i;
+ u32 ps;
+ struct rvt_dev_info *rdi = ib_to_rvt(ibfmr->device);
+
+ i = atomic_read(&fmr->mr.refcount);
+ if (i > 2)
+ return -EBUSY;
+
+ if (list_len > fmr->mr.max_segs)
+ return -EINVAL;
+
+ rkt = &rdi->lkey_table;
+ spin_lock_irqsave(&rkt->lock, flags);
+ fmr->mr.user_base = iova;
+ fmr->mr.iova = iova;
+ ps = 1 << fmr->mr.page_shift;
+ fmr->mr.length = list_len * ps;
+ m = 0;
+ n = 0;
+ for (i = 0; i < list_len; i++) {
+ fmr->mr.map[m]->segs[n].vaddr = (void *)page_list[i];
+ fmr->mr.map[m]->segs[n].length = ps;
+ if (++n == RVT_SEGSZ) {
+ m++;
+ n = 0;
+ }
+ }
+ spin_unlock_irqrestore(&rkt->lock, flags);
+ return 0;
+}
+
+/**
+ * rvt_unmap_fmr - unmap fast memory regions
+ * @fmr_list: the list of fast memory regions to unmap
+ *
+ * Return: 0 on success.
+ */
+int rvt_unmap_fmr(struct list_head *fmr_list)
+{
+ struct rvt_fmr *fmr;
+ struct rvt_lkey_table *rkt;
+ unsigned long flags;
+ struct rvt_dev_info *rdi;
+
+ list_for_each_entry(fmr, fmr_list, ibfmr.list) {
+ rdi = ib_to_rvt(fmr->ibfmr.device);
+ rkt = &rdi->lkey_table;
+ spin_lock_irqsave(&rkt->lock, flags);
+ fmr->mr.user_base = 0;
+ fmr->mr.iova = 0;
+ fmr->mr.length = 0;
+ spin_unlock_irqrestore(&rkt->lock, flags);
+ }
+ return 0;
+}
+
+/**
+ * rvt_dealloc_fmr - deallocate a fast memory region
+ * @ibfmr: the fast memory region to deallocate
+ *
+ * Return: 0 on success.
+ */
+int rvt_dealloc_fmr(struct ib_fmr *ibfmr)
+{
+ struct rvt_fmr *fmr = to_ifmr(ibfmr);
+ int ret = 0;
+ unsigned long timeout;
+
+ rvt_free_lkey(&fmr->mr);
+ rvt_put_mr(&fmr->mr); /* will set completion if last */
+ timeout = wait_for_completion_timeout(&fmr->mr.comp, 5 * HZ);
+ if (!timeout) {
+ rvt_get_mr(&fmr->mr);
+ ret = -EBUSY;
+ goto out;
+ }
+ rvt_deinit_mregion(&fmr->mr);
+ kfree(fmr);
+out:
+ return ret;
+}
+
+/**
+ * rvt_lkey_ok - check IB SGE for validity and initialize
+ * @rkt: table containing lkey to check SGE against
+ * @pd: protection domain
+ * @isge: outgoing internal SGE
+ * @sge: SGE to check
+ * @acc: access flags
+ *
+ * Check the IB SGE for validity and initialize our internal version
+ * of it.
+ *
+ * Return: 1 if valid and successful, otherwise returns 0.
+ *
+ * increments the reference count upon success
+ *
+ */
+int rvt_lkey_ok(struct rvt_lkey_table *rkt, struct rvt_pd *pd,
+ struct rvt_sge *isge, struct ib_sge *sge, int acc)
+{
+ struct rvt_mregion *mr;
+ unsigned n, m;
+ size_t off;
+ struct rvt_dev_info *dev = ib_to_rvt(pd->ibpd.device);
+
+ /*
+ * We use LKEY == zero for kernel virtual addresses
+ * (see rvt_get_dma_mr and dma.c).
+ */
+ rcu_read_lock();
+ if (sge->lkey == 0) {
+ if (pd->user)
+ goto bail;
+ mr = rcu_dereference(dev->dma_mr);
+ if (!mr)
+ goto bail;
+ atomic_inc(&mr->refcount);
+ rcu_read_unlock();
+
+ isge->mr = mr;
+ isge->vaddr = (void *)sge->addr;
+ isge->length = sge->length;
+ isge->sge_length = sge->length;
+ isge->m = 0;
+ isge->n = 0;
+ goto ok;
+ }
+ mr = rcu_dereference(
+ rkt->table[(sge->lkey >> (32 - dev->dparms.lkey_table_size))]);
+ if (unlikely(!mr || mr->lkey != sge->lkey || mr->pd != &pd->ibpd))
+ goto bail;
+
+ off = sge->addr - mr->user_base;
+ if (unlikely(sge->addr < mr->user_base ||
+ off + sge->length > mr->length ||
+ (mr->access_flags & acc) != acc))
+ goto bail;
+ atomic_inc(&mr->refcount);
+ rcu_read_unlock();
+
+ off += mr->offset;
+ if (mr->page_shift) {
+ /*
+ * page sizes are uniform power of 2 so no loop is necessary
+ * entries_spanned_by_off is the number of times the loop below
+ * would have executed.
+ */
+ size_t entries_spanned_by_off;
+
+ entries_spanned_by_off = off >> mr->page_shift;
+ off -= (entries_spanned_by_off << mr->page_shift);
+ m = entries_spanned_by_off / RVT_SEGSZ;
+ n = entries_spanned_by_off % RVT_SEGSZ;
+ } else {
+ m = 0;
+ n = 0;
+ while (off >= mr->map[m]->segs[n].length) {
+ off -= mr->map[m]->segs[n].length;
+ n++;
+ if (n >= RVT_SEGSZ) {
+ m++;
+ n = 0;
+ }
+ }
+ }
+ isge->mr = mr;
+ isge->vaddr = mr->map[m]->segs[n].vaddr + off;
+ isge->length = mr->map[m]->segs[n].length - off;
+ isge->sge_length = sge->length;
+ isge->m = m;
+ isge->n = n;
+ok:
+ return 1;
+bail:
+ rcu_read_unlock();
+ return 0;
+}
+EXPORT_SYMBOL(rvt_lkey_ok);
+
+/**
+ * rvt_rkey_ok - check the IB virtual address, length, and RKEY
+ * @qp: qp for validation
+ * @sge: SGE state
+ * @len: length of data
+ * @vaddr: virtual address to place data
+ * @rkey: rkey to check
+ * @acc: access flags
+ *
+ * Return: 1 if successful, otherwise 0.
+ *
+ * increments the reference count upon success
+ */
+int rvt_rkey_ok(struct rvt_qp *qp, struct rvt_sge *sge,
+ u32 len, u64 vaddr, u32 rkey, int acc)
+{
+ struct rvt_dev_info *dev = ib_to_rvt(qp->ibqp.device);
+ struct rvt_lkey_table *rkt = &dev->lkey_table;
+ struct rvt_mregion *mr;
+ unsigned n, m;
+ size_t off;
+
+ /*
+ * We use RKEY == zero for kernel virtual addresses
+ * (see rvt_get_dma_mr and dma.c).
+ */
+ rcu_read_lock();
+ if (rkey == 0) {
+ struct rvt_pd *pd = ibpd_to_rvtpd(qp->ibqp.pd);
+ struct rvt_dev_info *rdi = ib_to_rvt(pd->ibpd.device);
+
+ if (pd->user)
+ goto bail;
+ mr = rcu_dereference(rdi->dma_mr);
+ if (!mr)
+ goto bail;
+ atomic_inc(&mr->refcount);
+ rcu_read_unlock();
+
+ sge->mr = mr;
+ sge->vaddr = (void *)vaddr;
+ sge->length = len;
+ sge->sge_length = len;
+ sge->m = 0;
+ sge->n = 0;
+ goto ok;
+ }
+
+ mr = rcu_dereference(
+ rkt->table[(rkey >> (32 - dev->dparms.lkey_table_size))]);
+ if (unlikely(!mr || mr->lkey != rkey || qp->ibqp.pd != mr->pd))
+ goto bail;
+
+ off = vaddr - mr->iova;
+ if (unlikely(vaddr < mr->iova || off + len > mr->length ||
+ (mr->access_flags & acc) == 0))
+ goto bail;
+ atomic_inc(&mr->refcount);
+ rcu_read_unlock();
+
+ off += mr->offset;
+ if (mr->page_shift) {
+ /*
+ * page sizes are uniform power of 2 so no loop is necessary
+ * entries_spanned_by_off is the number of times the loop below
+ * would have executed.
+ */
+ size_t entries_spanned_by_off;
+
+ entries_spanned_by_off = off >> mr->page_shift;
+ off -= (entries_spanned_by_off << mr->page_shift);
+ m = entries_spanned_by_off / RVT_SEGSZ;
+ n = entries_spanned_by_off % RVT_SEGSZ;
+ } else {
+ m = 0;
+ n = 0;
+ while (off >= mr->map[m]->segs[n].length) {
+ off -= mr->map[m]->segs[n].length;
+ n++;
+ if (n >= RVT_SEGSZ) {
+ m++;
+ n = 0;
+ }
+ }
+ }
+ sge->mr = mr;
+ sge->vaddr = mr->map[m]->segs[n].vaddr + off;
+ sge->length = mr->map[m]->segs[n].length - off;
+ sge->sge_length = len;
+ sge->m = m;
+ sge->n = n;
+ok:
+ return 1;
+bail:
+ rcu_read_unlock();
+ return 0;
+}
+EXPORT_SYMBOL(rvt_rkey_ok);
diff --git a/drivers/infiniband/sw/rdmavt/mr.h b/drivers/infiniband/sw/rdmavt/mr.h
new file mode 100644
index 000000000000..69380512c6d1
--- /dev/null
+++ b/drivers/infiniband/sw/rdmavt/mr.h
@@ -0,0 +1,92 @@
+#ifndef DEF_RVTMR_H
+#define DEF_RVTMR_H
+
+/*
+ * Copyright(c) 2016 Intel Corporation.
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * - Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <rdma/rdma_vt.h>
+struct rvt_fmr {
+ struct ib_fmr ibfmr;
+ struct rvt_mregion mr; /* must be last */
+};
+
+struct rvt_mr {
+ struct ib_mr ibmr;
+ struct ib_umem *umem;
+ struct rvt_mregion mr; /* must be last */
+};
+
+static inline struct rvt_fmr *to_ifmr(struct ib_fmr *ibfmr)
+{
+ return container_of(ibfmr, struct rvt_fmr, ibfmr);
+}
+
+static inline struct rvt_mr *to_imr(struct ib_mr *ibmr)
+{
+ return container_of(ibmr, struct rvt_mr, ibmr);
+}
+
+int rvt_driver_mr_init(struct rvt_dev_info *rdi);
+void rvt_mr_exit(struct rvt_dev_info *rdi);
+
+/* Mem Regions */
+struct ib_mr *rvt_get_dma_mr(struct ib_pd *pd, int acc);
+struct ib_mr *rvt_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
+ u64 virt_addr, int mr_access_flags,
+ struct ib_udata *udata);
+int rvt_dereg_mr(struct ib_mr *ibmr);
+struct ib_mr *rvt_alloc_mr(struct ib_pd *pd,
+ enum ib_mr_type mr_type,
+ u32 max_num_sg);
+struct ib_fmr *rvt_alloc_fmr(struct ib_pd *pd, int mr_access_flags,
+ struct ib_fmr_attr *fmr_attr);
+int rvt_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list,
+ int list_len, u64 iova);
+int rvt_unmap_fmr(struct list_head *fmr_list);
+int rvt_dealloc_fmr(struct ib_fmr *ibfmr);
+
+#endif /* DEF_RVTMR_H */
diff --git a/drivers/infiniband/sw/rdmavt/pd.c b/drivers/infiniband/sw/rdmavt/pd.c
new file mode 100644
index 000000000000..d1292f324c67
--- /dev/null
+++ b/drivers/infiniband/sw/rdmavt/pd.c
@@ -0,0 +1,119 @@
+/*
+ * Copyright(c) 2016 Intel Corporation.
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * - Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <linux/slab.h>
+#include "pd.h"
+
+/**
+ * rvt_alloc_pd - allocate a protection domain
+ * @ibdev: ib device
+ * @context: optional user context
+ * @udata: optional user data
+ *
+ * Allocate and keep track of a PD.
+ *
+ * Return: 0 on success
+ */
+struct ib_pd *rvt_alloc_pd(struct ib_device *ibdev,
+ struct ib_ucontext *context,
+ struct ib_udata *udata)
+{
+ struct rvt_dev_info *dev = ib_to_rvt(ibdev);
+ struct rvt_pd *pd;
+ struct ib_pd *ret;
+
+ pd = kmalloc(sizeof(*pd), GFP_KERNEL);
+ if (!pd) {
+ ret = ERR_PTR(-ENOMEM);
+ goto bail;
+ }
+ /*
+ * While we could continue allocating protecetion domains, being
+ * constrained only by system resources. The IBTA spec defines that
+ * there is a max_pd limit that can be set and we need to check for
+ * that.
+ */
+
+ spin_lock(&dev->n_pds_lock);
+ if (dev->n_pds_allocated == dev->dparms.props.max_pd) {
+ spin_unlock(&dev->n_pds_lock);
+ kfree(pd);
+ ret = ERR_PTR(-ENOMEM);
+ goto bail;
+ }
+
+ dev->n_pds_allocated++;
+ spin_unlock(&dev->n_pds_lock);
+
+ /* ib_alloc_pd() will initialize pd->ibpd. */
+ pd->user = udata ? 1 : 0;
+
+ ret = &pd->ibpd;
+
+bail:
+ return ret;
+}
+
+/**
+ * rvt_dealloc_pd - Free PD
+ * @ibpd: Free up PD
+ *
+ * Return: always 0
+ */
+int rvt_dealloc_pd(struct ib_pd *ibpd)
+{
+ struct rvt_pd *pd = ibpd_to_rvtpd(ibpd);
+ struct rvt_dev_info *dev = ib_to_rvt(ibpd->device);
+
+ spin_lock(&dev->n_pds_lock);
+ dev->n_pds_allocated--;
+ spin_unlock(&dev->n_pds_lock);
+
+ kfree(pd);
+
+ return 0;
+}
diff --git a/drivers/infiniband/sw/rdmavt/pd.h b/drivers/infiniband/sw/rdmavt/pd.h
new file mode 100644
index 000000000000..1892ca4a9746
--- /dev/null
+++ b/drivers/infiniband/sw/rdmavt/pd.h
@@ -0,0 +1,58 @@
+#ifndef DEF_RDMAVTPD_H
+#define DEF_RDMAVTPD_H
+
+/*
+ * Copyright(c) 2016 Intel Corporation.
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * - Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <rdma/rdma_vt.h>
+
+struct ib_pd *rvt_alloc_pd(struct ib_device *ibdev,
+ struct ib_ucontext *context,
+ struct ib_udata *udata);
+int rvt_dealloc_pd(struct ib_pd *ibpd);
+
+#endif /* DEF_RDMAVTPD_H */
diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c
new file mode 100644
index 000000000000..a9e3bcc522c4
--- /dev/null
+++ b/drivers/infiniband/sw/rdmavt/qp.c
@@ -0,0 +1,1696 @@
+/*
+ * Copyright(c) 2016 Intel Corporation.
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * - Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <linux/hash.h>
+#include <linux/bitops.h>
+#include <linux/lockdep.h>
+#include <linux/vmalloc.h>
+#include <linux/slab.h>
+#include <rdma/ib_verbs.h>
+#include "qp.h"
+#include "vt.h"
+#include "trace.h"
+
+/*
+ * Note that it is OK to post send work requests in the SQE and ERR
+ * states; rvt_do_send() will process them and generate error
+ * completions as per IB 1.2 C10-96.
+ */
+const int ib_rvt_state_ops[IB_QPS_ERR + 1] = {
+ [IB_QPS_RESET] = 0,
+ [IB_QPS_INIT] = RVT_POST_RECV_OK,
+ [IB_QPS_RTR] = RVT_POST_RECV_OK | RVT_PROCESS_RECV_OK,
+ [IB_QPS_RTS] = RVT_POST_RECV_OK | RVT_PROCESS_RECV_OK |
+ RVT_POST_SEND_OK | RVT_PROCESS_SEND_OK |
+ RVT_PROCESS_NEXT_SEND_OK,
+ [IB_QPS_SQD] = RVT_POST_RECV_OK | RVT_PROCESS_RECV_OK |
+ RVT_POST_SEND_OK | RVT_PROCESS_SEND_OK,
+ [IB_QPS_SQE] = RVT_POST_RECV_OK | RVT_PROCESS_RECV_OK |
+ RVT_POST_SEND_OK | RVT_FLUSH_SEND,
+ [IB_QPS_ERR] = RVT_POST_RECV_OK | RVT_FLUSH_RECV |
+ RVT_POST_SEND_OK | RVT_FLUSH_SEND,
+};
+EXPORT_SYMBOL(ib_rvt_state_ops);
+
+static void get_map_page(struct rvt_qpn_table *qpt,
+ struct rvt_qpn_map *map,
+ gfp_t gfp)
+{
+ unsigned long page = get_zeroed_page(gfp);
+
+ /*
+ * Free the page if someone raced with us installing it.
+ */
+
+ spin_lock(&qpt->lock);
+ if (map->page)
+ free_page(page);
+ else
+ map->page = (void *)page;
+ spin_unlock(&qpt->lock);
+}
+
+/**
+ * init_qpn_table - initialize the QP number table for a device
+ * @qpt: the QPN table
+ */
+static int init_qpn_table(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt)
+{
+ u32 offset, i;
+ struct rvt_qpn_map *map;
+ int ret = 0;
+
+ if (!(rdi->dparms.qpn_res_end >= rdi->dparms.qpn_res_start))
+ return -EINVAL;
+
+ spin_lock_init(&qpt->lock);
+
+ qpt->last = rdi->dparms.qpn_start;
+ qpt->incr = rdi->dparms.qpn_inc << rdi->dparms.qos_shift;
+
+ /*
+ * Drivers may want some QPs beyond what we need for verbs let them use
+ * our qpn table. No need for two. Lets go ahead and mark the bitmaps
+ * for those. The reserved range must be *after* the range which verbs
+ * will pick from.
+ */
+
+ /* Figure out number of bit maps needed before reserved range */
+ qpt->nmaps = rdi->dparms.qpn_res_start / RVT_BITS_PER_PAGE;
+
+ /* This should always be zero */
+ offset = rdi->dparms.qpn_res_start & RVT_BITS_PER_PAGE_MASK;
+
+ /* Starting with the first reserved bit map */
+ map = &qpt->map[qpt->nmaps];
+
+ rvt_pr_info(rdi, "Reserving QPNs from 0x%x to 0x%x for non-verbs use\n",
+ rdi->dparms.qpn_res_start, rdi->dparms.qpn_res_end);
+ for (i = rdi->dparms.qpn_res_start; i <= rdi->dparms.qpn_res_end; i++) {
+ if (!map->page) {
+ get_map_page(qpt, map, GFP_KERNEL);
+ if (!map->page) {
+ ret = -ENOMEM;
+ break;
+ }
+ }
+ set_bit(offset, map->page);
+ offset++;
+ if (offset == RVT_BITS_PER_PAGE) {
+ /* next page */
+ qpt->nmaps++;
+ map++;
+ offset = 0;
+ }
+ }
+ return ret;
+}
+
+/**
+ * free_qpn_table - free the QP number table for a device
+ * @qpt: the QPN table
+ */
+static void free_qpn_table(struct rvt_qpn_table *qpt)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(qpt->map); i++)
+ free_page((unsigned long)qpt->map[i].page);
+}
+
+/**
+ * rvt_driver_qp_init - Init driver qp resources
+ * @rdi: rvt dev strucutre
+ *
+ * Return: 0 on success
+ */
+int rvt_driver_qp_init(struct rvt_dev_info *rdi)
+{
+ int i;
+ int ret = -ENOMEM;
+
+ if (!rdi->dparms.qp_table_size)
+ return -EINVAL;
+
+ /*
+ * If driver is not doing any QP allocation then make sure it is
+ * providing the necessary QP functions.
+ */
+ if (!rdi->driver_f.free_all_qps ||
+ !rdi->driver_f.qp_priv_alloc ||
+ !rdi->driver_f.qp_priv_free ||
+ !rdi->driver_f.notify_qp_reset)
+ return -EINVAL;
+
+ /* allocate parent object */
+ rdi->qp_dev = kzalloc_node(sizeof(*rdi->qp_dev), GFP_KERNEL,
+ rdi->dparms.node);
+ if (!rdi->qp_dev)
+ return -ENOMEM;
+
+ /* allocate hash table */
+ rdi->qp_dev->qp_table_size = rdi->dparms.qp_table_size;
+ rdi->qp_dev->qp_table_bits = ilog2(rdi->dparms.qp_table_size);
+ rdi->qp_dev->qp_table =
+ kmalloc_node(rdi->qp_dev->qp_table_size *
+ sizeof(*rdi->qp_dev->qp_table),
+ GFP_KERNEL, rdi->dparms.node);
+ if (!rdi->qp_dev->qp_table)
+ goto no_qp_table;
+
+ for (i = 0; i < rdi->qp_dev->qp_table_size; i++)
+ RCU_INIT_POINTER(rdi->qp_dev->qp_table[i], NULL);
+
+ spin_lock_init(&rdi->qp_dev->qpt_lock);
+
+ /* initialize qpn map */
+ if (init_qpn_table(rdi, &rdi->qp_dev->qpn_table))
+ goto fail_table;
+
+ spin_lock_init(&rdi->n_qps_lock);
+
+ return 0;
+
+fail_table:
+ kfree(rdi->qp_dev->qp_table);
+ free_qpn_table(&rdi->qp_dev->qpn_table);
+
+no_qp_table:
+ kfree(rdi->qp_dev);
+
+ return ret;
+}
+
+/**
+ * free_all_qps - check for QPs still in use
+ * @qpt: the QP table to empty
+ *
+ * There should not be any QPs still in use.
+ * Free memory for table.
+ */
+static unsigned rvt_free_all_qps(struct rvt_dev_info *rdi)
+{
+ unsigned long flags;
+ struct rvt_qp *qp;
+ unsigned n, qp_inuse = 0;
+ spinlock_t *ql; /* work around too long line below */
+
+ if (rdi->driver_f.free_all_qps)
+ qp_inuse = rdi->driver_f.free_all_qps(rdi);
+
+ qp_inuse += rvt_mcast_tree_empty(rdi);
+
+ if (!rdi->qp_dev)
+ return qp_inuse;
+
+ ql = &rdi->qp_dev->qpt_lock;
+ spin_lock_irqsave(ql, flags);
+ for (n = 0; n < rdi->qp_dev->qp_table_size; n++) {
+ qp = rcu_dereference_protected(rdi->qp_dev->qp_table[n],
+ lockdep_is_held(ql));
+ RCU_INIT_POINTER(rdi->qp_dev->qp_table[n], NULL);
+
+ for (; qp; qp = rcu_dereference_protected(qp->next,
+ lockdep_is_held(ql)))
+ qp_inuse++;
+ }
+ spin_unlock_irqrestore(ql, flags);
+ synchronize_rcu();
+ return qp_inuse;
+}
+
+/**
+ * rvt_qp_exit - clean up qps on device exit
+ * @rdi: rvt dev structure
+ *
+ * Check for qp leaks and free resources.
+ */
+void rvt_qp_exit(struct rvt_dev_info *rdi)
+{
+ u32 qps_inuse = rvt_free_all_qps(rdi);
+
+ if (qps_inuse)
+ rvt_pr_err(rdi, "QP memory leak! %u still in use\n",
+ qps_inuse);
+ if (!rdi->qp_dev)
+ return;
+
+ kfree(rdi->qp_dev->qp_table);
+ free_qpn_table(&rdi->qp_dev->qpn_table);
+ kfree(rdi->qp_dev);
+}
+
+static inline unsigned mk_qpn(struct rvt_qpn_table *qpt,
+ struct rvt_qpn_map *map, unsigned off)
+{
+ return (map - qpt->map) * RVT_BITS_PER_PAGE + off;
+}
+
+/**
+ * alloc_qpn - Allocate the next available qpn or zero/one for QP type
+ * IB_QPT_SMI/IB_QPT_GSI
+ *@rdi: rvt device info structure
+ *@qpt: queue pair number table pointer
+ *@port_num: IB port number, 1 based, comes from core
+ *
+ * Return: The queue pair number
+ */
+static int alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt,
+ enum ib_qp_type type, u8 port_num, gfp_t gfp)
+{
+ u32 i, offset, max_scan, qpn;
+ struct rvt_qpn_map *map;
+ u32 ret;
+
+ if (rdi->driver_f.alloc_qpn)
+ return rdi->driver_f.alloc_qpn(rdi, qpt, type, port_num, gfp);
+
+ if (type == IB_QPT_SMI || type == IB_QPT_GSI) {
+ unsigned n;
+
+ ret = type == IB_QPT_GSI;
+ n = 1 << (ret + 2 * (port_num - 1));
+ spin_lock(&qpt->lock);
+ if (qpt->flags & n)
+ ret = -EINVAL;
+ else
+ qpt->flags |= n;
+ spin_unlock(&qpt->lock);
+ goto bail;
+ }
+
+ qpn = qpt->last + qpt->incr;
+ if (qpn >= RVT_QPN_MAX)
+ qpn = qpt->incr | ((qpt->last & 1) ^ 1);
+ /* offset carries bit 0 */
+ offset = qpn & RVT_BITS_PER_PAGE_MASK;
+ map = &qpt->map[qpn / RVT_BITS_PER_PAGE];
+ max_scan = qpt->nmaps - !offset;
+ for (i = 0;;) {
+ if (unlikely(!map->page)) {
+ get_map_page(qpt, map, gfp);
+ if (unlikely(!map->page))
+ break;
+ }
+ do {
+ if (!test_and_set_bit(offset, map->page)) {
+ qpt->last = qpn;
+ ret = qpn;
+ goto bail;
+ }
+ offset += qpt->incr;
+ /*
+ * This qpn might be bogus if offset >= BITS_PER_PAGE.
+ * That is OK. It gets re-assigned below
+ */
+ qpn = mk_qpn(qpt, map, offset);
+ } while (offset < RVT_BITS_PER_PAGE && qpn < RVT_QPN_MAX);
+ /*
+ * In order to keep the number of pages allocated to a
+ * minimum, we scan the all existing pages before increasing
+ * the size of the bitmap table.
+ */
+ if (++i > max_scan) {
+ if (qpt->nmaps == RVT_QPNMAP_ENTRIES)
+ break;
+ map = &qpt->map[qpt->nmaps++];
+ /* start at incr with current bit 0 */
+ offset = qpt->incr | (offset & 1);
+ } else if (map < &qpt->map[qpt->nmaps]) {
+ ++map;
+ /* start at incr with current bit 0 */
+ offset = qpt->incr | (offset & 1);
+ } else {
+ map = &qpt->map[0];
+ /* wrap to first map page, invert bit 0 */
+ offset = qpt->incr | ((offset & 1) ^ 1);
+ }
+ /* there can be no bits at shift and below */
+ WARN_ON(offset & (rdi->dparms.qos_shift - 1));
+ qpn = mk_qpn(qpt, map, offset);
+ }
+
+ ret = -ENOMEM;
+
+bail:
+ return ret;
+}
+
+static void free_qpn(struct rvt_qpn_table *qpt, u32 qpn)
+{
+ struct rvt_qpn_map *map;
+
+ map = qpt->map + qpn / RVT_BITS_PER_PAGE;
+ if (map->page)
+ clear_bit(qpn & RVT_BITS_PER_PAGE_MASK, map->page);
+}
+
+/**
+ * rvt_clear_mr_refs - Drop help mr refs
+ * @qp: rvt qp data structure
+ * @clr_sends: If shoudl clear send side or not
+ */
+static void rvt_clear_mr_refs(struct rvt_qp *qp, int clr_sends)
+{
+ unsigned n;
+
+ if (test_and_clear_bit(RVT_R_REWIND_SGE, &qp->r_aflags))
+ rvt_put_ss(&qp->s_rdma_read_sge);
+
+ rvt_put_ss(&qp->r_sge);
+
+ if (clr_sends) {
+ while (qp->s_last != qp->s_head) {
+ struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, qp->s_last);
+ unsigned i;
+
+ for (i = 0; i < wqe->wr.num_sge; i++) {
+ struct rvt_sge *sge = &wqe->sg_list[i];
+
+ rvt_put_mr(sge->mr);
+ }
+ if (qp->ibqp.qp_type == IB_QPT_UD ||
+ qp->ibqp.qp_type == IB_QPT_SMI ||
+ qp->ibqp.qp_type == IB_QPT_GSI)
+ atomic_dec(&ibah_to_rvtah(
+ wqe->ud_wr.ah)->refcount);
+ if (++qp->s_last >= qp->s_size)
+ qp->s_last = 0;
+ smp_wmb(); /* see qp_set_savail */
+ }
+ if (qp->s_rdma_mr) {
+ rvt_put_mr(qp->s_rdma_mr);
+ qp->s_rdma_mr = NULL;
+ }
+ }
+
+ if (qp->ibqp.qp_type != IB_QPT_RC)
+ return;
+
+ for (n = 0; n < ARRAY_SIZE(qp->s_ack_queue); n++) {
+ struct rvt_ack_entry *e = &qp->s_ack_queue[n];
+
+ if (e->opcode == IB_OPCODE_RC_RDMA_READ_REQUEST &&
+ e->rdma_sge.mr) {
+ rvt_put_mr(e->rdma_sge.mr);
+ e->rdma_sge.mr = NULL;
+ }
+ }
+}
+
+/**
+ * rvt_remove_qp - remove qp form table
+ * @rdi: rvt dev struct
+ * @qp: qp to remove
+ *
+ * Remove the QP from the table so it can't be found asynchronously by
+ * the receive routine.
+ */
+static void rvt_remove_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp)
+{
+ struct rvt_ibport *rvp = rdi->ports[qp->port_num - 1];
+ u32 n = hash_32(qp->ibqp.qp_num, rdi->qp_dev->qp_table_bits);
+ unsigned long flags;
+ int removed = 1;
+
+ spin_lock_irqsave(&rdi->qp_dev->qpt_lock, flags);
+
+ if (rcu_dereference_protected(rvp->qp[0],
+ lockdep_is_held(&rdi->qp_dev->qpt_lock)) == qp) {
+ RCU_INIT_POINTER(rvp->qp[0], NULL);
+ } else if (rcu_dereference_protected(rvp->qp[1],
+ lockdep_is_held(&rdi->qp_dev->qpt_lock)) == qp) {
+ RCU_INIT_POINTER(rvp->qp[1], NULL);
+ } else {
+ struct rvt_qp *q;
+ struct rvt_qp __rcu **qpp;
+
+ removed = 0;
+ qpp = &rdi->qp_dev->qp_table[n];
+ for (; (q = rcu_dereference_protected(*qpp,
+ lockdep_is_held(&rdi->qp_dev->qpt_lock))) != NULL;
+ qpp = &q->next) {
+ if (q == qp) {
+ RCU_INIT_POINTER(*qpp,
+ rcu_dereference_protected(qp->next,
+ lockdep_is_held(&rdi->qp_dev->qpt_lock)));
+ removed = 1;
+ trace_rvt_qpremove(qp, n);
+ break;
+ }
+ }
+ }
+
+ spin_unlock_irqrestore(&rdi->qp_dev->qpt_lock, flags);
+ if (removed) {
+ synchronize_rcu();
+ if (atomic_dec_and_test(&qp->refcount))
+ wake_up(&qp->wait);
+ }
+}
+
+/**
+ * reset_qp - initialize the QP state to the reset state
+ * @qp: the QP to reset
+ * @type: the QP type
+ * r and s lock are required to be held by the caller
+ */
+static void rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
+ enum ib_qp_type type)
+{
+ if (qp->state != IB_QPS_RESET) {
+ qp->state = IB_QPS_RESET;
+
+ /* Let drivers flush their waitlist */
+ rdi->driver_f.flush_qp_waiters(qp);
+ qp->s_flags &= ~(RVT_S_TIMER | RVT_S_ANY_WAIT);
+ spin_unlock(&qp->s_lock);
+ spin_unlock(&qp->s_hlock);
+ spin_unlock_irq(&qp->r_lock);
+
+ /* Stop the send queue and the retry timer */
+ rdi->driver_f.stop_send_queue(qp);
+
+ /* Wait for things to stop */
+ rdi->driver_f.quiesce_qp(qp);
+
+ /* take qp out the hash and wait for it to be unused */
+ rvt_remove_qp(rdi, qp);
+ wait_event(qp->wait, !atomic_read(&qp->refcount));
+
+ /* grab the lock b/c it was locked at call time */
+ spin_lock_irq(&qp->r_lock);
+ spin_lock(&qp->s_hlock);
+ spin_lock(&qp->s_lock);
+
+ rvt_clear_mr_refs(qp, 1);
+ }
+
+ /*
+ * Let the driver do any tear down it needs to for a qp
+ * that has been reset
+ */
+ rdi->driver_f.notify_qp_reset(qp);
+
+ qp->remote_qpn = 0;
+ qp->qkey = 0;
+ qp->qp_access_flags = 0;
+ qp->s_flags &= RVT_S_SIGNAL_REQ_WR;
+ qp->s_hdrwords = 0;
+ qp->s_wqe = NULL;
+ qp->s_draining = 0;
+ qp->s_next_psn = 0;
+ qp->s_last_psn = 0;
+ qp->s_sending_psn = 0;
+ qp->s_sending_hpsn = 0;
+ qp->s_psn = 0;
+ qp->r_psn = 0;
+ qp->r_msn = 0;
+ if (type == IB_QPT_RC) {
+ qp->s_state = IB_OPCODE_RC_SEND_LAST;
+ qp->r_state = IB_OPCODE_RC_SEND_LAST;
+ } else {
+ qp->s_state = IB_OPCODE_UC_SEND_LAST;
+ qp->r_state = IB_OPCODE_UC_SEND_LAST;
+ }
+ qp->s_ack_state = IB_OPCODE_RC_ACKNOWLEDGE;
+ qp->r_nak_state = 0;
+ qp->r_aflags = 0;
+ qp->r_flags = 0;
+ qp->s_head = 0;
+ qp->s_tail = 0;
+ qp->s_cur = 0;
+ qp->s_acked = 0;
+ qp->s_last = 0;
+ qp->s_ssn = 1;
+ qp->s_lsn = 0;
+ qp->s_mig_state = IB_MIG_MIGRATED;
+ memset(qp->s_ack_queue, 0, sizeof(qp->s_ack_queue));
+ qp->r_head_ack_queue = 0;
+ qp->s_tail_ack_queue = 0;
+ qp->s_num_rd_atomic = 0;
+ if (qp->r_rq.wq) {
+ qp->r_rq.wq->head = 0;
+ qp->r_rq.wq->tail = 0;
+ }
+ qp->r_sge.num_sge = 0;
+}
+
+/**
+ * rvt_create_qp - create a queue pair for a device
+ * @ibpd: the protection domain who's device we create the queue pair for
+ * @init_attr: the attributes of the queue pair
+ * @udata: user data for libibverbs.so
+ *
+ * Queue pair creation is mostly an rvt issue. However, drivers have their own
+ * unique idea of what queue pair numbers mean. For instance there is a reserved
+ * range for PSM.
+ *
+ * Return: the queue pair on success, otherwise returns an errno.
+ *
+ * Called by the ib_create_qp() core verbs function.
+ */
+struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
+ struct ib_qp_init_attr *init_attr,
+ struct ib_udata *udata)
+{
+ struct rvt_qp *qp;
+ int err;
+ struct rvt_swqe *swq = NULL;
+ size_t sz;
+ size_t sg_list_sz;
+ struct ib_qp *ret = ERR_PTR(-ENOMEM);
+ struct rvt_dev_info *rdi = ib_to_rvt(ibpd->device);
+ void *priv = NULL;
+ gfp_t gfp;
+
+ if (!rdi)
+ return ERR_PTR(-EINVAL);
+
+ if (init_attr->cap.max_send_sge > rdi->dparms.props.max_sge ||
+ init_attr->cap.max_send_wr > rdi->dparms.props.max_qp_wr ||
+ init_attr->create_flags & ~(IB_QP_CREATE_USE_GFP_NOIO))
+ return ERR_PTR(-EINVAL);
+
+ /* GFP_NOIO is applicable to RC QP's only */
+
+ if (init_attr->create_flags & IB_QP_CREATE_USE_GFP_NOIO &&
+ init_attr->qp_type != IB_QPT_RC)
+ return ERR_PTR(-EINVAL);
+
+ gfp = init_attr->create_flags & IB_QP_CREATE_USE_GFP_NOIO ?
+ GFP_NOIO : GFP_KERNEL;
+
+ /* Check receive queue parameters if no SRQ is specified. */
+ if (!init_attr->srq) {
+ if (init_attr->cap.max_recv_sge > rdi->dparms.props.max_sge ||
+ init_attr->cap.max_recv_wr > rdi->dparms.props.max_qp_wr)
+ return ERR_PTR(-EINVAL);
+
+ if (init_attr->cap.max_send_sge +
+ init_attr->cap.max_send_wr +
+ init_attr->cap.max_recv_sge +
+ init_attr->cap.max_recv_wr == 0)
+ return ERR_PTR(-EINVAL);
+ }
+
+ switch (init_attr->qp_type) {
+ case IB_QPT_SMI:
+ case IB_QPT_GSI:
+ if (init_attr->port_num == 0 ||
+ init_attr->port_num > ibpd->device->phys_port_cnt)
+ return ERR_PTR(-EINVAL);
+ case IB_QPT_UC:
+ case IB_QPT_RC:
+ case IB_QPT_UD:
+ sz = sizeof(struct rvt_sge) *
+ init_attr->cap.max_send_sge +
+ sizeof(struct rvt_swqe);
+ if (gfp == GFP_NOIO)
+ swq = __vmalloc(
+ (init_attr->cap.max_send_wr + 1) * sz,
+ gfp, PAGE_KERNEL);
+ else
+ swq = vmalloc_node(
+ (init_attr->cap.max_send_wr + 1) * sz,
+ rdi->dparms.node);
+ if (!swq)
+ return ERR_PTR(-ENOMEM);
+
+ sz = sizeof(*qp);
+ sg_list_sz = 0;
+ if (init_attr->srq) {
+ struct rvt_srq *srq = ibsrq_to_rvtsrq(init_attr->srq);
+
+ if (srq->rq.max_sge > 1)
+ sg_list_sz = sizeof(*qp->r_sg_list) *
+ (srq->rq.max_sge - 1);
+ } else if (init_attr->cap.max_recv_sge > 1)
+ sg_list_sz = sizeof(*qp->r_sg_list) *
+ (init_attr->cap.max_recv_sge - 1);
+ qp = kzalloc_node(sz + sg_list_sz, gfp, rdi->dparms.node);
+ if (!qp)
+ goto bail_swq;
+
+ RCU_INIT_POINTER(qp->next, NULL);
+
+ /*
+ * Driver needs to set up it's private QP structure and do any
+ * initialization that is needed.
+ */
+ priv = rdi->driver_f.qp_priv_alloc(rdi, qp, gfp);
+ if (!priv)
+ goto bail_qp;
+ qp->priv = priv;
+ qp->timeout_jiffies =
+ usecs_to_jiffies((4096UL * (1UL << qp->timeout)) /
+ 1000UL);
+ if (init_attr->srq) {
+ sz = 0;
+ } else {
+ qp->r_rq.size = init_attr->cap.max_recv_wr + 1;
+ qp->r_rq.max_sge = init_attr->cap.max_recv_sge;
+ sz = (sizeof(struct ib_sge) * qp->r_rq.max_sge) +
+ sizeof(struct rvt_rwqe);
+ if (udata)
+ qp->r_rq.wq = vmalloc_user(
+ sizeof(struct rvt_rwq) +
+ qp->r_rq.size * sz);
+ else if (gfp == GFP_NOIO)
+ qp->r_rq.wq = __vmalloc(
+ sizeof(struct rvt_rwq) +
+ qp->r_rq.size * sz,
+ gfp, PAGE_KERNEL);
+ else
+ qp->r_rq.wq = vmalloc_node(
+ sizeof(struct rvt_rwq) +
+ qp->r_rq.size * sz,
+ rdi->dparms.node);
+ if (!qp->r_rq.wq)
+ goto bail_driver_priv;
+ }
+
+ /*
+ * ib_create_qp() will initialize qp->ibqp
+ * except for qp->ibqp.qp_num.
+ */
+ spin_lock_init(&qp->r_lock);
+ spin_lock_init(&qp->s_hlock);
+ spin_lock_init(&qp->s_lock);
+ spin_lock_init(&qp->r_rq.lock);
+ atomic_set(&qp->refcount, 0);
+ init_waitqueue_head(&qp->wait);
+ init_timer(&qp->s_timer);
+ qp->s_timer.data = (unsigned long)qp;
+ INIT_LIST_HEAD(&qp->rspwait);
+ qp->state = IB_QPS_RESET;
+ qp->s_wq = swq;
+ qp->s_size = init_attr->cap.max_send_wr + 1;
+ qp->s_avail = init_attr->cap.max_send_wr;
+ qp->s_max_sge = init_attr->cap.max_send_sge;
+ if (init_attr->sq_sig_type == IB_SIGNAL_REQ_WR)
+ qp->s_flags = RVT_S_SIGNAL_REQ_WR;
+
+ err = alloc_qpn(rdi, &rdi->qp_dev->qpn_table,
+ init_attr->qp_type,
+ init_attr->port_num, gfp);
+ if (err < 0) {
+ ret = ERR_PTR(err);
+ goto bail_rq_wq;
+ }
+ qp->ibqp.qp_num = err;
+ qp->port_num = init_attr->port_num;
+ rvt_reset_qp(rdi, qp, init_attr->qp_type);
+ break;
+
+ default:
+ /* Don't support raw QPs */
+ return ERR_PTR(-EINVAL);
+ }
+
+ init_attr->cap.max_inline_data = 0;
+
+ /*
+ * Return the address of the RWQ as the offset to mmap.
+ * See rvt_mmap() for details.
+ */
+ if (udata && udata->outlen >= sizeof(__u64)) {
+ if (!qp->r_rq.wq) {
+ __u64 offset = 0;
+
+ err = ib_copy_to_udata(udata, &offset,
+ sizeof(offset));
+ if (err) {
+ ret = ERR_PTR(err);
+ goto bail_qpn;
+ }
+ } else {
+ u32 s = sizeof(struct rvt_rwq) + qp->r_rq.size * sz;
+
+ qp->ip = rvt_create_mmap_info(rdi, s,
+ ibpd->uobject->context,
+ qp->r_rq.wq);
+ if (!qp->ip) {
+ ret = ERR_PTR(-ENOMEM);
+ goto bail_qpn;
+ }
+
+ err = ib_copy_to_udata(udata, &qp->ip->offset,
+ sizeof(qp->ip->offset));
+ if (err) {
+ ret = ERR_PTR(err);
+ goto bail_ip;
+ }
+ }
+ qp->pid = current->pid;
+ }
+
+ spin_lock(&rdi->n_qps_lock);
+ if (rdi->n_qps_allocated == rdi->dparms.props.max_qp) {
+ spin_unlock(&rdi->n_qps_lock);
+ ret = ERR_PTR(-ENOMEM);
+ goto bail_ip;
+ }
+
+ rdi->n_qps_allocated++;
+ /*
+ * Maintain a busy_jiffies variable that will be added to the timeout
+ * period in mod_retry_timer and add_retry_timer. This busy jiffies
+ * is scaled by the number of rc qps created for the device to reduce
+ * the number of timeouts occurring when there is a large number of
+ * qps. busy_jiffies is incremented every rc qp scaling interval.
+ * The scaling interval is selected based on extensive performance
+ * evaluation of targeted workloads.
+ */
+ if (init_attr->qp_type == IB_QPT_RC) {
+ rdi->n_rc_qps++;
+ rdi->busy_jiffies = rdi->n_rc_qps / RC_QP_SCALING_INTERVAL;
+ }
+ spin_unlock(&rdi->n_qps_lock);
+
+ if (qp->ip) {
+ spin_lock_irq(&rdi->pending_lock);
+ list_add(&qp->ip->pending_mmaps, &rdi->pending_mmaps);
+ spin_unlock_irq(&rdi->pending_lock);
+ }
+
+ ret = &qp->ibqp;
+
+ /*
+ * We have our QP and its good, now keep track of what types of opcodes
+ * can be processed on this QP. We do this by keeping track of what the
+ * 3 high order bits of the opcode are.
+ */
+ switch (init_attr->qp_type) {
+ case IB_QPT_SMI:
+ case IB_QPT_GSI:
+ case IB_QPT_UD:
+ qp->allowed_ops = IB_OPCODE_UD_SEND_ONLY & RVT_OPCODE_QP_MASK;
+ break;
+ case IB_QPT_RC:
+ qp->allowed_ops = IB_OPCODE_RC_SEND_ONLY & RVT_OPCODE_QP_MASK;
+ break;
+ case IB_QPT_UC:
+ qp->allowed_ops = IB_OPCODE_UC_SEND_ONLY & RVT_OPCODE_QP_MASK;
+ break;
+ default:
+ ret = ERR_PTR(-EINVAL);
+ goto bail_ip;
+ }
+
+ return ret;
+
+bail_ip:
+ kref_put(&qp->ip->ref, rvt_release_mmap_info);
+
+bail_qpn:
+ free_qpn(&rdi->qp_dev->qpn_table, qp->ibqp.qp_num);
+
+bail_rq_wq:
+ vfree(qp->r_rq.wq);
+
+bail_driver_priv:
+ rdi->driver_f.qp_priv_free(rdi, qp);
+
+bail_qp:
+ kfree(qp);
+
+bail_swq:
+ vfree(swq);
+
+ return ret;
+}
+
+/**
+ * rvt_error_qp - put a QP into the error state
+ * @qp: the QP to put into the error state
+ * @err: the receive completion error to signal if a RWQE is active
+ *
+ * Flushes both send and receive work queues.
+ *
+ * Return: true if last WQE event should be generated.
+ * The QP r_lock and s_lock should be held and interrupts disabled.
+ * If we are already in error state, just return.
+ */
+int rvt_error_qp(struct rvt_qp *qp, enum ib_wc_status err)
+{
+ struct ib_wc wc;
+ int ret = 0;
+ struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
+
+ if (qp->state == IB_QPS_ERR || qp->state == IB_QPS_RESET)
+ goto bail;
+
+ qp->state = IB_QPS_ERR;
+
+ if (qp->s_flags & (RVT_S_TIMER | RVT_S_WAIT_RNR)) {
+ qp->s_flags &= ~(RVT_S_TIMER | RVT_S_WAIT_RNR);
+ del_timer(&qp->s_timer);
+ }
+
+ if (qp->s_flags & RVT_S_ANY_WAIT_SEND)
+ qp->s_flags &= ~RVT_S_ANY_WAIT_SEND;
+
+ rdi->driver_f.notify_error_qp(qp);
+
+ /* Schedule the sending tasklet to drain the send work queue. */
+ if (ACCESS_ONCE(qp->s_last) != qp->s_head)
+ rdi->driver_f.schedule_send(qp);
+
+ rvt_clear_mr_refs(qp, 0);
+
+ memset(&wc, 0, sizeof(wc));
+ wc.qp = &qp->ibqp;
+ wc.opcode = IB_WC_RECV;
+
+ if (test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags)) {
+ wc.wr_id = qp->r_wr_id;
+ wc.status = err;
+ rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, 1);
+ }
+ wc.status = IB_WC_WR_FLUSH_ERR;
+
+ if (qp->r_rq.wq) {
+ struct rvt_rwq *wq;
+ u32 head;
+ u32 tail;
+
+ spin_lock(&qp->r_rq.lock);
+
+ /* sanity check pointers before trusting them */
+ wq = qp->r_rq.wq;
+ head = wq->head;
+ if (head >= qp->r_rq.size)
+ head = 0;
+ tail = wq->tail;
+ if (tail >= qp->r_rq.size)
+ tail = 0;
+ while (tail != head) {
+ wc.wr_id = rvt_get_rwqe_ptr(&qp->r_rq, tail)->wr_id;
+ if (++tail >= qp->r_rq.size)
+ tail = 0;
+ rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, 1);
+ }
+ wq->tail = tail;
+
+ spin_unlock(&qp->r_rq.lock);
+ } else if (qp->ibqp.event_handler) {
+ ret = 1;
+ }
+
+bail:
+ return ret;
+}
+EXPORT_SYMBOL(rvt_error_qp);
+
+/*
+ * Put the QP into the hash table.
+ * The hash table holds a reference to the QP.
+ */
+static void rvt_insert_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp)
+{
+ struct rvt_ibport *rvp = rdi->ports[qp->port_num - 1];
+ unsigned long flags;
+
+ atomic_inc(&qp->refcount);
+ spin_lock_irqsave(&rdi->qp_dev->qpt_lock, flags);
+
+ if (qp->ibqp.qp_num <= 1) {
+ rcu_assign_pointer(rvp->qp[qp->ibqp.qp_num], qp);
+ } else {
+ u32 n = hash_32(qp->ibqp.qp_num, rdi->qp_dev->qp_table_bits);
+
+ qp->next = rdi->qp_dev->qp_table[n];
+ rcu_assign_pointer(rdi->qp_dev->qp_table[n], qp);
+ trace_rvt_qpinsert(qp, n);
+ }
+
+ spin_unlock_irqrestore(&rdi->qp_dev->qpt_lock, flags);
+}
+
+/**
+ * qib_modify_qp - modify the attributes of a queue pair
+ * @ibqp: the queue pair who's attributes we're modifying
+ * @attr: the new attributes
+ * @attr_mask: the mask of attributes to modify
+ * @udata: user data for libibverbs.so
+ *
+ * Return: 0 on success, otherwise returns an errno.
+ */
+int rvt_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
+ int attr_mask, struct ib_udata *udata)
+{
+ struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
+ struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
+ enum ib_qp_state cur_state, new_state;
+ struct ib_event ev;
+ int lastwqe = 0;
+ int mig = 0;
+ int pmtu = 0; /* for gcc warning only */
+ enum rdma_link_layer link;
+
+ link = rdma_port_get_link_layer(ibqp->device, qp->port_num);
+
+ spin_lock_irq(&qp->r_lock);
+ spin_lock(&qp->s_hlock);
+ spin_lock(&qp->s_lock);
+
+ cur_state = attr_mask & IB_QP_CUR_STATE ?
+ attr->cur_qp_state : qp->state;
+ new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
+
+ if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type,
+ attr_mask, link))
+ goto inval;
+
+ if (rdi->driver_f.check_modify_qp &&
+ rdi->driver_f.check_modify_qp(qp, attr, attr_mask, udata))
+ goto inval;
+
+ if (attr_mask & IB_QP_AV) {
+ if (attr->ah_attr.dlid >= be16_to_cpu(IB_MULTICAST_LID_BASE))
+ goto inval;
+ if (rvt_check_ah(qp->ibqp.device, &attr->ah_attr))
+ goto inval;
+ }
+
+ if (attr_mask & IB_QP_ALT_PATH) {
+ if (attr->alt_ah_attr.dlid >=
+ be16_to_cpu(IB_MULTICAST_LID_BASE))
+ goto inval;
+ if (rvt_check_ah(qp->ibqp.device, &attr->alt_ah_attr))
+ goto inval;
+ if (attr->alt_pkey_index >= rvt_get_npkeys(rdi))
+ goto inval;
+ }
+
+ if (attr_mask & IB_QP_PKEY_INDEX)
+ if (attr->pkey_index >= rvt_get_npkeys(rdi))
+ goto inval;
+
+ if (attr_mask & IB_QP_MIN_RNR_TIMER)
+ if (attr->min_rnr_timer > 31)
+ goto inval;
+
+ if (attr_mask & IB_QP_PORT)
+ if (qp->ibqp.qp_type == IB_QPT_SMI ||
+ qp->ibqp.qp_type == IB_QPT_GSI ||
+ attr->port_num == 0 ||
+ attr->port_num > ibqp->device->phys_port_cnt)
+ goto inval;
+
+ if (attr_mask & IB_QP_DEST_QPN)
+ if (attr->dest_qp_num > RVT_QPN_MASK)
+ goto inval;
+
+ if (attr_mask & IB_QP_RETRY_CNT)
+ if (attr->retry_cnt > 7)
+ goto inval;
+
+ if (attr_mask & IB_QP_RNR_RETRY)
+ if (attr->rnr_retry > 7)
+ goto inval;
+
+ /*
+ * Don't allow invalid path_mtu values. OK to set greater
+ * than the active mtu (or even the max_cap, if we have tuned
+ * that to a small mtu. We'll set qp->path_mtu
+ * to the lesser of requested attribute mtu and active,
+ * for packetizing messages.
+ * Note that the QP port has to be set in INIT and MTU in RTR.
+ */
+ if (attr_mask & IB_QP_PATH_MTU) {
+ pmtu = rdi->driver_f.get_pmtu_from_attr(rdi, qp, attr);
+ if (pmtu < 0)
+ goto inval;
+ }
+
+ if (attr_mask & IB_QP_PATH_MIG_STATE) {
+ if (attr->path_mig_state == IB_MIG_REARM) {
+ if (qp->s_mig_state == IB_MIG_ARMED)
+ goto inval;
+ if (new_state != IB_QPS_RTS)
+ goto inval;
+ } else if (attr->path_mig_state == IB_MIG_MIGRATED) {
+ if (qp->s_mig_state == IB_MIG_REARM)
+ goto inval;
+ if (new_state != IB_QPS_RTS && new_state != IB_QPS_SQD)
+ goto inval;
+ if (qp->s_mig_state == IB_MIG_ARMED)
+ mig = 1;
+ } else {
+ goto inval;
+ }
+ }
+
+ if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
+ if (attr->max_dest_rd_atomic > rdi->dparms.max_rdma_atomic)
+ goto inval;
+
+ switch (new_state) {
+ case IB_QPS_RESET:
+ if (qp->state != IB_QPS_RESET)
+ rvt_reset_qp(rdi, qp, ibqp->qp_type);
+ break;
+
+ case IB_QPS_RTR:
+ /* Allow event to re-trigger if QP set to RTR more than once */
+ qp->r_flags &= ~RVT_R_COMM_EST;
+ qp->state = new_state;
+ break;
+
+ case IB_QPS_SQD:
+ qp->s_draining = qp->s_last != qp->s_cur;
+ qp->state = new_state;
+ break;
+
+ case IB_QPS_SQE:
+ if (qp->ibqp.qp_type == IB_QPT_RC)
+ goto inval;
+ qp->state = new_state;
+ break;
+
+ case IB_QPS_ERR:
+ lastwqe = rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR);
+ break;
+
+ default:
+ qp->state = new_state;
+ break;
+ }
+
+ if (attr_mask & IB_QP_PKEY_INDEX)
+ qp->s_pkey_index = attr->pkey_index;
+
+ if (attr_mask & IB_QP_PORT)
+ qp->port_num = attr->port_num;
+
+ if (attr_mask & IB_QP_DEST_QPN)
+ qp->remote_qpn = attr->dest_qp_num;
+
+ if (attr_mask & IB_QP_SQ_PSN) {
+ qp->s_next_psn = attr->sq_psn & rdi->dparms.psn_modify_mask;
+ qp->s_psn = qp->s_next_psn;
+ qp->s_sending_psn = qp->s_next_psn;
+ qp->s_last_psn = qp->s_next_psn - 1;
+ qp->s_sending_hpsn = qp->s_last_psn;
+ }
+
+ if (attr_mask & IB_QP_RQ_PSN)
+ qp->r_psn = attr->rq_psn & rdi->dparms.psn_modify_mask;
+
+ if (attr_mask & IB_QP_ACCESS_FLAGS)
+ qp->qp_access_flags = attr->qp_access_flags;
+
+ if (attr_mask & IB_QP_AV) {
+ qp->remote_ah_attr = attr->ah_attr;
+ qp->s_srate = attr->ah_attr.static_rate;
+ qp->srate_mbps = ib_rate_to_mbps(qp->s_srate);
+ }
+
+ if (attr_mask & IB_QP_ALT_PATH) {
+ qp->alt_ah_attr = attr->alt_ah_attr;
+ qp->s_alt_pkey_index = attr->alt_pkey_index;
+ }
+
+ if (attr_mask & IB_QP_PATH_MIG_STATE) {
+ qp->s_mig_state = attr->path_mig_state;
+ if (mig) {
+ qp->remote_ah_attr = qp->alt_ah_attr;
+ qp->port_num = qp->alt_ah_attr.port_num;
+ qp->s_pkey_index = qp->s_alt_pkey_index;
+ }
+ }
+
+ if (attr_mask & IB_QP_PATH_MTU) {
+ qp->pmtu = rdi->driver_f.mtu_from_qp(rdi, qp, pmtu);
+ qp->path_mtu = rdi->driver_f.mtu_to_path_mtu(qp->pmtu);
+ qp->log_pmtu = ilog2(qp->pmtu);
+ }
+
+ if (attr_mask & IB_QP_RETRY_CNT) {
+ qp->s_retry_cnt = attr->retry_cnt;
+ qp->s_retry = attr->retry_cnt;
+ }
+
+ if (attr_mask & IB_QP_RNR_RETRY) {
+ qp->s_rnr_retry_cnt = attr->rnr_retry;
+ qp->s_rnr_retry = attr->rnr_retry;
+ }
+
+ if (attr_mask & IB_QP_MIN_RNR_TIMER)
+ qp->r_min_rnr_timer = attr->min_rnr_timer;
+
+ if (attr_mask & IB_QP_TIMEOUT) {
+ qp->timeout = attr->timeout;
+ qp->timeout_jiffies =
+ usecs_to_jiffies((4096UL * (1UL << qp->timeout)) /
+ 1000UL);
+ }
+
+ if (attr_mask & IB_QP_QKEY)
+ qp->qkey = attr->qkey;
+
+ if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
+ qp->r_max_rd_atomic = attr->max_dest_rd_atomic;
+
+ if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC)
+ qp->s_max_rd_atomic = attr->max_rd_atomic;
+
+ if (rdi->driver_f.modify_qp)
+ rdi->driver_f.modify_qp(qp, attr, attr_mask, udata);
+
+ spin_unlock(&qp->s_lock);
+ spin_unlock(&qp->s_hlock);
+ spin_unlock_irq(&qp->r_lock);
+
+ if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT)
+ rvt_insert_qp(rdi, qp);
+
+ if (lastwqe) {
+ ev.device = qp->ibqp.device;
+ ev.element.qp = &qp->ibqp;
+ ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
+ qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
+ }
+ if (mig) {
+ ev.device = qp->ibqp.device;
+ ev.element.qp = &qp->ibqp;
+ ev.event = IB_EVENT_PATH_MIG;
+ qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
+ }
+ return 0;
+
+inval:
+ spin_unlock(&qp->s_lock);
+ spin_unlock(&qp->s_hlock);
+ spin_unlock_irq(&qp->r_lock);
+ return -EINVAL;
+}
+
+/** rvt_free_qpn - Free a qpn from the bit map
+ * @qpt: QP table
+ * @qpn: queue pair number to free
+ */
+static void rvt_free_qpn(struct rvt_qpn_table *qpt, u32 qpn)
+{
+ struct rvt_qpn_map *map;
+
+ map = qpt->map + qpn / RVT_BITS_PER_PAGE;
+ if (map->page)
+ clear_bit(qpn & RVT_BITS_PER_PAGE_MASK, map->page);
+}
+
+/**
+ * rvt_destroy_qp - destroy a queue pair
+ * @ibqp: the queue pair to destroy
+ *
+ * Note that this can be called while the QP is actively sending or
+ * receiving!
+ *
+ * Return: 0 on success.
+ */
+int rvt_destroy_qp(struct ib_qp *ibqp)
+{
+ struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
+ struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
+
+ spin_lock_irq(&qp->r_lock);
+ spin_lock(&qp->s_hlock);
+ spin_lock(&qp->s_lock);
+ rvt_reset_qp(rdi, qp, ibqp->qp_type);
+ spin_unlock(&qp->s_lock);
+ spin_unlock(&qp->s_hlock);
+ spin_unlock_irq(&qp->r_lock);
+
+ /* qpn is now available for use again */
+ rvt_free_qpn(&rdi->qp_dev->qpn_table, qp->ibqp.qp_num);
+
+ spin_lock(&rdi->n_qps_lock);
+ rdi->n_qps_allocated--;
+ if (qp->ibqp.qp_type == IB_QPT_RC) {
+ rdi->n_rc_qps--;
+ rdi->busy_jiffies = rdi->n_rc_qps / RC_QP_SCALING_INTERVAL;
+ }
+ spin_unlock(&rdi->n_qps_lock);
+
+ if (qp->ip)
+ kref_put(&qp->ip->ref, rvt_release_mmap_info);
+ else
+ vfree(qp->r_rq.wq);
+ vfree(qp->s_wq);
+ rdi->driver_f.qp_priv_free(rdi, qp);
+ kfree(qp);
+ return 0;
+}
+
+/**
+ * rvt_query_qp - query an ipbq
+ * @ibqp: IB qp to query
+ * @attr: attr struct to fill in
+ * @attr_mask: attr mask ignored
+ * @init_attr: struct to fill in
+ *
+ * Return: always 0
+ */
+int rvt_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
+ int attr_mask, struct ib_qp_init_attr *init_attr)
+{
+ struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
+ struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
+
+ attr->qp_state = qp->state;
+ attr->cur_qp_state = attr->qp_state;
+ attr->path_mtu = qp->path_mtu;
+ attr->path_mig_state = qp->s_mig_state;
+ attr->qkey = qp->qkey;
+ attr->rq_psn = qp->r_psn & rdi->dparms.psn_mask;
+ attr->sq_psn = qp->s_next_psn & rdi->dparms.psn_mask;
+ attr->dest_qp_num = qp->remote_qpn;
+ attr->qp_access_flags = qp->qp_access_flags;
+ attr->cap.max_send_wr = qp->s_size - 1;
+ attr->cap.max_recv_wr = qp->ibqp.srq ? 0 : qp->r_rq.size - 1;
+ attr->cap.max_send_sge = qp->s_max_sge;
+ attr->cap.max_recv_sge = qp->r_rq.max_sge;
+ attr->cap.max_inline_data = 0;
+ attr->ah_attr = qp->remote_ah_attr;
+ attr->alt_ah_attr = qp->alt_ah_attr;
+ attr->pkey_index = qp->s_pkey_index;
+ attr->alt_pkey_index = qp->s_alt_pkey_index;
+ attr->en_sqd_async_notify = 0;
+ attr->sq_draining = qp->s_draining;
+ attr->max_rd_atomic = qp->s_max_rd_atomic;
+ attr->max_dest_rd_atomic = qp->r_max_rd_atomic;
+ attr->min_rnr_timer = qp->r_min_rnr_timer;
+ attr->port_num = qp->port_num;
+ attr->timeout = qp->timeout;
+ attr->retry_cnt = qp->s_retry_cnt;
+ attr->rnr_retry = qp->s_rnr_retry_cnt;
+ attr->alt_port_num = qp->alt_ah_attr.port_num;
+ attr->alt_timeout = qp->alt_timeout;
+
+ init_attr->event_handler = qp->ibqp.event_handler;
+ init_attr->qp_context = qp->ibqp.qp_context;
+ init_attr->send_cq = qp->ibqp.send_cq;
+ init_attr->recv_cq = qp->ibqp.recv_cq;
+ init_attr->srq = qp->ibqp.srq;
+ init_attr->cap = attr->cap;
+ if (qp->s_flags & RVT_S_SIGNAL_REQ_WR)
+ init_attr->sq_sig_type = IB_SIGNAL_REQ_WR;
+ else
+ init_attr->sq_sig_type = IB_SIGNAL_ALL_WR;
+ init_attr->qp_type = qp->ibqp.qp_type;
+ init_attr->port_num = qp->port_num;
+ return 0;
+}
+
+/**
+ * rvt_post_receive - post a receive on a QP
+ * @ibqp: the QP to post the receive on
+ * @wr: the WR to post
+ * @bad_wr: the first bad WR is put here
+ *
+ * This may be called from interrupt context.
+ *
+ * Return: 0 on success otherwise errno
+ */
+int rvt_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
+ struct ib_recv_wr **bad_wr)
+{
+ struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
+ struct rvt_rwq *wq = qp->r_rq.wq;
+ unsigned long flags;
+ int qp_err_flush = (ib_rvt_state_ops[qp->state] & RVT_FLUSH_RECV) &&
+ !qp->ibqp.srq;
+
+ /* Check that state is OK to post receive. */
+ if (!(ib_rvt_state_ops[qp->state] & RVT_POST_RECV_OK) || !wq) {
+ *bad_wr = wr;
+ return -EINVAL;
+ }
+
+ for (; wr; wr = wr->next) {
+ struct rvt_rwqe *wqe;
+ u32 next;
+ int i;
+
+ if ((unsigned)wr->num_sge > qp->r_rq.max_sge) {
+ *bad_wr = wr;
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&qp->r_rq.lock, flags);
+ next = wq->head + 1;
+ if (next >= qp->r_rq.size)
+ next = 0;
+ if (next == wq->tail) {
+ spin_unlock_irqrestore(&qp->r_rq.lock, flags);
+ *bad_wr = wr;
+ return -ENOMEM;
+ }
+ if (unlikely(qp_err_flush)) {
+ struct ib_wc wc;
+
+ memset(&wc, 0, sizeof(wc));
+ wc.qp = &qp->ibqp;
+ wc.opcode = IB_WC_RECV;
+ wc.wr_id = wr->wr_id;
+ wc.status = IB_WC_WR_FLUSH_ERR;
+ rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, 1);
+ } else {
+ wqe = rvt_get_rwqe_ptr(&qp->r_rq, wq->head);
+ wqe->wr_id = wr->wr_id;
+ wqe->num_sge = wr->num_sge;
+ for (i = 0; i < wr->num_sge; i++)
+ wqe->sg_list[i] = wr->sg_list[i];
+ /*
+ * Make sure queue entry is written
+ * before the head index.
+ */
+ smp_wmb();
+ wq->head = next;
+ }
+ spin_unlock_irqrestore(&qp->r_rq.lock, flags);
+ }
+ return 0;
+}
+
+/**
+ * qp_get_savail - return number of avail send entries
+ *
+ * @qp - the qp
+ *
+ * This assumes the s_hlock is held but the s_last
+ * qp variable is uncontrolled.
+ */
+static inline u32 qp_get_savail(struct rvt_qp *qp)
+{
+ u32 slast;
+ u32 ret;
+
+ smp_read_barrier_depends(); /* see rc.c */
+ slast = ACCESS_ONCE(qp->s_last);
+ if (qp->s_head >= slast)
+ ret = qp->s_size - (qp->s_head - slast);
+ else
+ ret = slast - qp->s_head;
+ return ret - 1;
+}
+
+/**
+ * rvt_post_one_wr - post one RC, UC, or UD send work request
+ * @qp: the QP to post on
+ * @wr: the work request to send
+ */
+static int rvt_post_one_wr(struct rvt_qp *qp,
+ struct ib_send_wr *wr,
+ int *call_send)
+{
+ struct rvt_swqe *wqe;
+ u32 next;
+ int i;
+ int j;
+ int acc;
+ struct rvt_lkey_table *rkt;
+ struct rvt_pd *pd;
+ struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
+ u8 log_pmtu;
+ int ret;
+
+ /* IB spec says that num_sge == 0 is OK. */
+ if (unlikely(wr->num_sge > qp->s_max_sge))
+ return -EINVAL;
+
+ /*
+ * Don't allow RDMA reads or atomic operations on UC or
+ * undefined operations.
+ * Make sure buffer is large enough to hold the result for atomics.
+ */
+ if (qp->ibqp.qp_type == IB_QPT_UC) {
+ if ((unsigned)wr->opcode >= IB_WR_RDMA_READ)
+ return -EINVAL;
+ } else if (qp->ibqp.qp_type != IB_QPT_RC) {
+ /* Check IB_QPT_SMI, IB_QPT_GSI, IB_QPT_UD opcode */
+ if (wr->opcode != IB_WR_SEND &&
+ wr->opcode != IB_WR_SEND_WITH_IMM)
+ return -EINVAL;
+ /* Check UD destination address PD */
+ if (qp->ibqp.pd != ud_wr(wr)->ah->pd)
+ return -EINVAL;
+ } else if ((unsigned)wr->opcode > IB_WR_ATOMIC_FETCH_AND_ADD) {
+ return -EINVAL;
+ } else if (wr->opcode >= IB_WR_ATOMIC_CMP_AND_SWP &&
+ (wr->num_sge == 0 ||
+ wr->sg_list[0].length < sizeof(u64) ||
+ wr->sg_list[0].addr & (sizeof(u64) - 1))) {
+ return -EINVAL;
+ } else if (wr->opcode >= IB_WR_RDMA_READ && !qp->s_max_rd_atomic) {
+ return -EINVAL;
+ }
+ /* check for avail */
+ if (unlikely(!qp->s_avail)) {
+ qp->s_avail = qp_get_savail(qp);
+ if (WARN_ON(qp->s_avail > (qp->s_size - 1)))
+ rvt_pr_err(rdi,
+ "More avail entries than QP RB size.\nQP: %u, size: %u, avail: %u\nhead: %u, tail: %u, cur: %u, acked: %u, last: %u",
+ qp->ibqp.qp_num, qp->s_size, qp->s_avail,
+ qp->s_head, qp->s_tail, qp->s_cur,
+ qp->s_acked, qp->s_last);
+ if (!qp->s_avail)
+ return -ENOMEM;
+ }
+ next = qp->s_head + 1;
+ if (next >= qp->s_size)
+ next = 0;
+
+ rkt = &rdi->lkey_table;
+ pd = ibpd_to_rvtpd(qp->ibqp.pd);
+ wqe = rvt_get_swqe_ptr(qp, qp->s_head);
+
+ if (qp->ibqp.qp_type != IB_QPT_UC &&
+ qp->ibqp.qp_type != IB_QPT_RC)
+ memcpy(&wqe->ud_wr, ud_wr(wr), sizeof(wqe->ud_wr));
+ else if (wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM ||
+ wr->opcode == IB_WR_RDMA_WRITE ||
+ wr->opcode == IB_WR_RDMA_READ)
+ memcpy(&wqe->rdma_wr, rdma_wr(wr), sizeof(wqe->rdma_wr));
+ else if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
+ wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD)
+ memcpy(&wqe->atomic_wr, atomic_wr(wr), sizeof(wqe->atomic_wr));
+ else
+ memcpy(&wqe->wr, wr, sizeof(wqe->wr));
+
+ wqe->length = 0;
+ j = 0;
+ if (wr->num_sge) {
+ acc = wr->opcode >= IB_WR_RDMA_READ ?
+ IB_ACCESS_LOCAL_WRITE : 0;
+ for (i = 0; i < wr->num_sge; i++) {
+ u32 length = wr->sg_list[i].length;
+ int ok;
+
+ if (length == 0)
+ continue;
+ ok = rvt_lkey_ok(rkt, pd, &wqe->sg_list[j],
+ &wr->sg_list[i], acc);
+ if (!ok) {
+ ret = -EINVAL;
+ goto bail_inval_free;
+ }
+ wqe->length += length;
+ j++;
+ }
+ wqe->wr.num_sge = j;
+ }
+
+ /* general part of wqe valid - allow for driver checks */
+ if (rdi->driver_f.check_send_wqe) {
+ ret = rdi->driver_f.check_send_wqe(qp, wqe);
+ if (ret < 0)
+ goto bail_inval_free;
+ if (ret)
+ *call_send = ret;
+ }
+
+ log_pmtu = qp->log_pmtu;
+ if (qp->ibqp.qp_type != IB_QPT_UC &&
+ qp->ibqp.qp_type != IB_QPT_RC) {
+ struct rvt_ah *ah = ibah_to_rvtah(wqe->ud_wr.ah);
+
+ log_pmtu = ah->log_pmtu;
+ atomic_inc(&ibah_to_rvtah(ud_wr(wr)->ah)->refcount);
+ }
+
+ wqe->ssn = qp->s_ssn++;
+ wqe->psn = qp->s_next_psn;
+ wqe->lpsn = wqe->psn +
+ (wqe->length ? ((wqe->length - 1) >> log_pmtu) : 0);
+ qp->s_next_psn = wqe->lpsn + 1;
+ trace_rvt_post_one_wr(qp, wqe);
+ smp_wmb(); /* see request builders */
+ qp->s_avail--;
+ qp->s_head = next;
+
+ return 0;
+
+bail_inval_free:
+ /* release mr holds */
+ while (j) {
+ struct rvt_sge *sge = &wqe->sg_list[--j];
+
+ rvt_put_mr(sge->mr);
+ }
+ return ret;
+}
+
+/**
+ * rvt_post_send - post a send on a QP
+ * @ibqp: the QP to post the send on
+ * @wr: the list of work requests to post
+ * @bad_wr: the first bad WR is put here
+ *
+ * This may be called from interrupt context.
+ *
+ * Return: 0 on success else errno
+ */
+int rvt_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
+ struct ib_send_wr **bad_wr)
+{
+ struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
+ struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
+ unsigned long flags = 0;
+ int call_send;
+ unsigned nreq = 0;
+ int err = 0;
+
+ spin_lock_irqsave(&qp->s_hlock, flags);
+
+ /*
+ * Ensure QP state is such that we can send. If not bail out early,
+ * there is no need to do this every time we post a send.
+ */
+ if (unlikely(!(ib_rvt_state_ops[qp->state] & RVT_POST_SEND_OK))) {
+ spin_unlock_irqrestore(&qp->s_hlock, flags);
+ return -EINVAL;
+ }
+
+ /*
+ * If the send queue is empty, and we only have a single WR then just go
+ * ahead and kick the send engine into gear. Otherwise we will always
+ * just schedule the send to happen later.
+ */
+ call_send = qp->s_head == ACCESS_ONCE(qp->s_last) && !wr->next;
+
+ for (; wr; wr = wr->next) {
+ err = rvt_post_one_wr(qp, wr, &call_send);
+ if (unlikely(err)) {
+ *bad_wr = wr;
+ goto bail;
+ }
+ nreq++;
+ }
+bail:
+ spin_unlock_irqrestore(&qp->s_hlock, flags);
+ if (nreq) {
+ if (call_send)
+ rdi->driver_f.do_send(qp);
+ else
+ rdi->driver_f.schedule_send_no_lock(qp);
+ }
+ return err;
+}
+
+/**
+ * rvt_post_srq_receive - post a receive on a shared receive queue
+ * @ibsrq: the SRQ to post the receive on
+ * @wr: the list of work requests to post
+ * @bad_wr: A pointer to the first WR to cause a problem is put here
+ *
+ * This may be called from interrupt context.
+ *
+ * Return: 0 on success else errno
+ */
+int rvt_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
+ struct ib_recv_wr **bad_wr)
+{
+ struct rvt_srq *srq = ibsrq_to_rvtsrq(ibsrq);
+ struct rvt_rwq *wq;
+ unsigned long flags;
+
+ for (; wr; wr = wr->next) {
+ struct rvt_rwqe *wqe;
+ u32 next;
+ int i;
+
+ if ((unsigned)wr->num_sge > srq->rq.max_sge) {
+ *bad_wr = wr;
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&srq->rq.lock, flags);
+ wq = srq->rq.wq;
+ next = wq->head + 1;
+ if (next >= srq->rq.size)
+ next = 0;
+ if (next == wq->tail) {
+ spin_unlock_irqrestore(&srq->rq.lock, flags);
+ *bad_wr = wr;
+ return -ENOMEM;
+ }
+
+ wqe = rvt_get_rwqe_ptr(&srq->rq, wq->head);
+ wqe->wr_id = wr->wr_id;
+ wqe->num_sge = wr->num_sge;
+ for (i = 0; i < wr->num_sge; i++)
+ wqe->sg_list[i] = wr->sg_list[i];
+ /* Make sure queue entry is written before the head index. */
+ smp_wmb();
+ wq->head = next;
+ spin_unlock_irqrestore(&srq->rq.lock, flags);
+ }
+ return 0;
+}
diff --git a/drivers/infiniband/sw/rdmavt/qp.h b/drivers/infiniband/sw/rdmavt/qp.h
new file mode 100644
index 000000000000..8409f80d5f25
--- /dev/null
+++ b/drivers/infiniband/sw/rdmavt/qp.h
@@ -0,0 +1,69 @@
+#ifndef DEF_RVTQP_H
+#define DEF_RVTQP_H
+
+/*
+ * Copyright(c) 2016 Intel Corporation.
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * - Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <rdma/rdma_vt.h>
+
+int rvt_driver_qp_init(struct rvt_dev_info *rdi);
+void rvt_qp_exit(struct rvt_dev_info *rdi);
+struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
+ struct ib_qp_init_attr *init_attr,
+ struct ib_udata *udata);
+int rvt_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
+ int attr_mask, struct ib_udata *udata);
+int rvt_destroy_qp(struct ib_qp *ibqp);
+int rvt_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
+ int attr_mask, struct ib_qp_init_attr *init_attr);
+int rvt_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
+ struct ib_recv_wr **bad_wr);
+int rvt_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
+ struct ib_send_wr **bad_wr);
+int rvt_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
+ struct ib_recv_wr **bad_wr);
+#endif /* DEF_RVTQP_H */
diff --git a/drivers/staging/rdma/hfi1/srq.c b/drivers/infiniband/sw/rdmavt/srq.c
index 67786d417493..f7c48e9023de 100644
--- a/drivers/staging/rdma/hfi1/srq.c
+++ b/drivers/infiniband/sw/rdmavt/srq.c
@@ -1,12 +1,11 @@
/*
+ * Copyright(c) 2016 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2015 Intel Corporation.
- *
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
@@ -18,8 +17,6 @@
*
* BSD LICENSE
*
- * Copyright(c) 2015 Intel Corporation.
- *
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
@@ -52,96 +49,50 @@
#include <linux/slab.h>
#include <linux/vmalloc.h>
-#include "verbs.h"
+#include "srq.h"
+#include "vt.h"
/**
- * hfi1_post_srq_receive - post a receive on a shared receive queue
- * @ibsrq: the SRQ to post the receive on
- * @wr: the list of work requests to post
- * @bad_wr: A pointer to the first WR to cause a problem is put here
+ * rvt_driver_srq_init - init srq resources on a per driver basis
+ * @rdi: rvt dev structure
*
- * This may be called from interrupt context.
+ * Do any initialization needed when a driver registers with rdmavt.
*/
-int hfi1_post_srq_receive(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
- struct ib_recv_wr **bad_wr)
+void rvt_driver_srq_init(struct rvt_dev_info *rdi)
{
- struct hfi1_srq *srq = to_isrq(ibsrq);
- struct hfi1_rwq *wq;
- unsigned long flags;
- int ret;
-
- for (; wr; wr = wr->next) {
- struct hfi1_rwqe *wqe;
- u32 next;
- int i;
-
- if ((unsigned) wr->num_sge > srq->rq.max_sge) {
- *bad_wr = wr;
- ret = -EINVAL;
- goto bail;
- }
-
- spin_lock_irqsave(&srq->rq.lock, flags);
- wq = srq->rq.wq;
- next = wq->head + 1;
- if (next >= srq->rq.size)
- next = 0;
- if (next == wq->tail) {
- spin_unlock_irqrestore(&srq->rq.lock, flags);
- *bad_wr = wr;
- ret = -ENOMEM;
- goto bail;
- }
-
- wqe = get_rwqe_ptr(&srq->rq, wq->head);
- wqe->wr_id = wr->wr_id;
- wqe->num_sge = wr->num_sge;
- for (i = 0; i < wr->num_sge; i++)
- wqe->sg_list[i] = wr->sg_list[i];
- /* Make sure queue entry is written before the head index. */
- smp_wmb();
- wq->head = next;
- spin_unlock_irqrestore(&srq->rq.lock, flags);
- }
- ret = 0;
-
-bail:
- return ret;
+ spin_lock_init(&rdi->n_srqs_lock);
+ rdi->n_srqs_allocated = 0;
}
/**
- * hfi1_create_srq - create a shared receive queue
+ * rvt_create_srq - create a shared receive queue
* @ibpd: the protection domain of the SRQ to create
* @srq_init_attr: the attributes of the SRQ
* @udata: data from libibverbs when creating a user SRQ
+ *
+ * Return: Allocated srq object
*/
-struct ib_srq *hfi1_create_srq(struct ib_pd *ibpd,
- struct ib_srq_init_attr *srq_init_attr,
- struct ib_udata *udata)
+struct ib_srq *rvt_create_srq(struct ib_pd *ibpd,
+ struct ib_srq_init_attr *srq_init_attr,
+ struct ib_udata *udata)
{
- struct hfi1_ibdev *dev = to_idev(ibpd->device);
- struct hfi1_srq *srq;
+ struct rvt_dev_info *dev = ib_to_rvt(ibpd->device);
+ struct rvt_srq *srq;
u32 sz;
struct ib_srq *ret;
- if (srq_init_attr->srq_type != IB_SRQT_BASIC) {
- ret = ERR_PTR(-ENOSYS);
- goto done;
- }
+ if (srq_init_attr->srq_type != IB_SRQT_BASIC)
+ return ERR_PTR(-ENOSYS);
if (srq_init_attr->attr.max_sge == 0 ||
- srq_init_attr->attr.max_sge > hfi1_max_srq_sges ||
+ srq_init_attr->attr.max_sge > dev->dparms.props.max_srq_sge ||
srq_init_attr->attr.max_wr == 0 ||
- srq_init_attr->attr.max_wr > hfi1_max_srq_wrs) {
- ret = ERR_PTR(-EINVAL);
- goto done;
- }
+ srq_init_attr->attr.max_wr > dev->dparms.props.max_srq_wr)
+ return ERR_PTR(-EINVAL);
srq = kmalloc(sizeof(*srq), GFP_KERNEL);
- if (!srq) {
- ret = ERR_PTR(-ENOMEM);
- goto done;
- }
+ if (!srq)
+ return ERR_PTR(-ENOMEM);
/*
* Need to use vmalloc() if we want to support large #s of entries.
@@ -149,8 +100,8 @@ struct ib_srq *hfi1_create_srq(struct ib_pd *ibpd,
srq->rq.size = srq_init_attr->attr.max_wr + 1;
srq->rq.max_sge = srq_init_attr->attr.max_sge;
sz = sizeof(struct ib_sge) * srq->rq.max_sge +
- sizeof(struct hfi1_rwqe);
- srq->rq.wq = vmalloc_user(sizeof(struct hfi1_rwq) + srq->rq.size * sz);
+ sizeof(struct rvt_rwqe);
+ srq->rq.wq = vmalloc_user(sizeof(struct rvt_rwq) + srq->rq.size * sz);
if (!srq->rq.wq) {
ret = ERR_PTR(-ENOMEM);
goto bail_srq;
@@ -158,15 +109,15 @@ struct ib_srq *hfi1_create_srq(struct ib_pd *ibpd,
/*
* Return the address of the RWQ as the offset to mmap.
- * See hfi1_mmap() for details.
+ * See rvt_mmap() for details.
*/
if (udata && udata->outlen >= sizeof(__u64)) {
int err;
- u32 s = sizeof(struct hfi1_rwq) + srq->rq.size * sz;
+ u32 s = sizeof(struct rvt_rwq) + srq->rq.size * sz;
srq->ip =
- hfi1_create_mmap_info(dev, s, ibpd->uobject->context,
- srq->rq.wq);
+ rvt_create_mmap_info(dev, s, ibpd->uobject->context,
+ srq->rq.wq);
if (!srq->ip) {
ret = ERR_PTR(-ENOMEM);
goto bail_wq;
@@ -178,8 +129,9 @@ struct ib_srq *hfi1_create_srq(struct ib_pd *ibpd,
ret = ERR_PTR(err);
goto bail_ip;
}
- } else
+ } else {
srq->ip = NULL;
+ }
/*
* ib_create_srq() will initialize srq->ibsrq.
@@ -190,7 +142,7 @@ struct ib_srq *hfi1_create_srq(struct ib_pd *ibpd,
srq->limit = srq_init_attr->attr.srq_limit;
spin_lock(&dev->n_srqs_lock);
- if (dev->n_srqs_allocated == hfi1_max_srqs) {
+ if (dev->n_srqs_allocated == dev->dparms.props.max_srq) {
spin_unlock(&dev->n_srqs_lock);
ret = ERR_PTR(-ENOMEM);
goto bail_ip;
@@ -205,8 +157,7 @@ struct ib_srq *hfi1_create_srq(struct ib_pd *ibpd,
spin_unlock_irq(&dev->pending_lock);
}
- ret = &srq->ibsrq;
- goto done;
+ return &srq->ibsrq;
bail_ip:
kfree(srq->ip);
@@ -214,46 +165,44 @@ bail_wq:
vfree(srq->rq.wq);
bail_srq:
kfree(srq);
-done:
return ret;
}
/**
- * hfi1_modify_srq - modify a shared receive queue
+ * rvt_modify_srq - modify a shared receive queue
* @ibsrq: the SRQ to modify
* @attr: the new attributes of the SRQ
* @attr_mask: indicates which attributes to modify
* @udata: user data for libibverbs.so
+ *
+ * Return: 0 on success
*/
-int hfi1_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
- enum ib_srq_attr_mask attr_mask,
- struct ib_udata *udata)
+int rvt_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
+ enum ib_srq_attr_mask attr_mask,
+ struct ib_udata *udata)
{
- struct hfi1_srq *srq = to_isrq(ibsrq);
- struct hfi1_rwq *wq;
+ struct rvt_srq *srq = ibsrq_to_rvtsrq(ibsrq);
+ struct rvt_dev_info *dev = ib_to_rvt(ibsrq->device);
+ struct rvt_rwq *wq;
int ret = 0;
if (attr_mask & IB_SRQ_MAX_WR) {
- struct hfi1_rwq *owq;
- struct hfi1_rwqe *p;
+ struct rvt_rwq *owq;
+ struct rvt_rwqe *p;
u32 sz, size, n, head, tail;
/* Check that the requested sizes are below the limits. */
- if ((attr->max_wr > hfi1_max_srq_wrs) ||
+ if ((attr->max_wr > dev->dparms.props.max_srq_wr) ||
((attr_mask & IB_SRQ_LIMIT) ?
- attr->srq_limit : srq->limit) > attr->max_wr) {
- ret = -EINVAL;
- goto bail;
- }
+ attr->srq_limit : srq->limit) > attr->max_wr)
+ return -EINVAL;
- sz = sizeof(struct hfi1_rwqe) +
+ sz = sizeof(struct rvt_rwqe) +
srq->rq.max_sge * sizeof(struct ib_sge);
size = attr->max_wr + 1;
- wq = vmalloc_user(sizeof(struct hfi1_rwq) + size * sz);
- if (!wq) {
- ret = -ENOMEM;
- goto bail;
- }
+ wq = vmalloc_user(sizeof(struct rvt_rwq) + size * sz);
+ if (!wq)
+ return -ENOMEM;
/* Check that we can write the offset to mmap. */
if (udata && udata->inlen >= sizeof(__u64)) {
@@ -264,8 +213,8 @@ int hfi1_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
sizeof(offset_addr));
if (ret)
goto bail_free;
- udata->outbuf =
- (void __user *) (unsigned long) offset_addr;
+ udata->outbuf = (void __user *)
+ (unsigned long)offset_addr;
ret = ib_copy_to_udata(udata, &offset,
sizeof(offset));
if (ret)
@@ -296,16 +245,16 @@ int hfi1_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
n = 0;
p = wq->wq;
while (tail != head) {
- struct hfi1_rwqe *wqe;
+ struct rvt_rwqe *wqe;
int i;
- wqe = get_rwqe_ptr(&srq->rq, tail);
+ wqe = rvt_get_rwqe_ptr(&srq->rq, tail);
p->wr_id = wqe->wr_id;
p->num_sge = wqe->num_sge;
for (i = 0; i < wqe->num_sge; i++)
p->sg_list[i] = wqe->sg_list[i];
n++;
- p = (struct hfi1_rwqe *)((char *)p + sz);
+ p = (struct rvt_rwqe *)((char *)p + sz);
if (++tail >= srq->rq.size)
tail = 0;
}
@@ -320,21 +269,21 @@ int hfi1_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
vfree(owq);
if (srq->ip) {
- struct hfi1_mmap_info *ip = srq->ip;
- struct hfi1_ibdev *dev = to_idev(srq->ibsrq.device);
- u32 s = sizeof(struct hfi1_rwq) + size * sz;
+ struct rvt_mmap_info *ip = srq->ip;
+ struct rvt_dev_info *dev = ib_to_rvt(srq->ibsrq.device);
+ u32 s = sizeof(struct rvt_rwq) + size * sz;
- hfi1_update_mmap_info(dev, ip, s, wq);
+ rvt_update_mmap_info(dev, ip, s, wq);
/*
* Return the offset to mmap.
- * See hfi1_mmap() for details.
+ * See rvt_mmap() for details.
*/
if (udata && udata->inlen >= sizeof(__u64)) {
ret = ib_copy_to_udata(udata, &ip->offset,
sizeof(ip->offset));
if (ret)
- goto bail;
+ return ret;
}
/*
@@ -355,19 +304,24 @@ int hfi1_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
srq->limit = attr->srq_limit;
spin_unlock_irq(&srq->rq.lock);
}
- goto bail;
+ return ret;
bail_unlock:
spin_unlock_irq(&srq->rq.lock);
bail_free:
vfree(wq);
-bail:
return ret;
}
-int hfi1_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr)
+/** rvt_query_srq - query srq data
+ * @ibsrq: srq to query
+ * @attr: return info in attr
+ *
+ * Return: always 0
+ */
+int rvt_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr)
{
- struct hfi1_srq *srq = to_isrq(ibsrq);
+ struct rvt_srq *srq = ibsrq_to_rvtsrq(ibsrq);
attr->max_wr = srq->rq.size - 1;
attr->max_sge = srq->rq.max_sge;
@@ -376,19 +330,21 @@ int hfi1_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr)
}
/**
- * hfi1_destroy_srq - destroy a shared receive queue
- * @ibsrq: the SRQ to destroy
+ * rvt_destroy_srq - destory an srq
+ * @ibsrq: srq object to destroy
+ *
+ * Return always 0
*/
-int hfi1_destroy_srq(struct ib_srq *ibsrq)
+int rvt_destroy_srq(struct ib_srq *ibsrq)
{
- struct hfi1_srq *srq = to_isrq(ibsrq);
- struct hfi1_ibdev *dev = to_idev(ibsrq->device);
+ struct rvt_srq *srq = ibsrq_to_rvtsrq(ibsrq);
+ struct rvt_dev_info *dev = ib_to_rvt(ibsrq->device);
spin_lock(&dev->n_srqs_lock);
dev->n_srqs_allocated--;
spin_unlock(&dev->n_srqs_lock);
if (srq->ip)
- kref_put(&srq->ip->ref, hfi1_release_mmap_info);
+ kref_put(&srq->ip->ref, rvt_release_mmap_info);
else
vfree(srq->rq.wq);
kfree(srq);
diff --git a/drivers/infiniband/sw/rdmavt/srq.h b/drivers/infiniband/sw/rdmavt/srq.h
new file mode 100644
index 000000000000..bf0eaaf56465
--- /dev/null
+++ b/drivers/infiniband/sw/rdmavt/srq.h
@@ -0,0 +1,62 @@
+#ifndef DEF_RVTSRQ_H
+#define DEF_RVTSRQ_H
+
+/*
+ * Copyright(c) 2016 Intel Corporation.
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * - Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <rdma/rdma_vt.h>
+void rvt_driver_srq_init(struct rvt_dev_info *rdi);
+struct ib_srq *rvt_create_srq(struct ib_pd *ibpd,
+ struct ib_srq_init_attr *srq_init_attr,
+ struct ib_udata *udata);
+int rvt_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
+ enum ib_srq_attr_mask attr_mask,
+ struct ib_udata *udata);
+int rvt_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr);
+int rvt_destroy_srq(struct ib_srq *ibsrq);
+
+#endif /* DEF_RVTSRQ_H */
diff --git a/drivers/infiniband/sw/rdmavt/trace.c b/drivers/infiniband/sw/rdmavt/trace.c
new file mode 100644
index 000000000000..d593285a349c
--- /dev/null
+++ b/drivers/infiniband/sw/rdmavt/trace.c
@@ -0,0 +1,49 @@
+/*
+ * Copyright(c) 2016 Intel Corporation.
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * - Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#define CREATE_TRACE_POINTS
+#include "trace.h"
diff --git a/drivers/infiniband/sw/rdmavt/trace.h b/drivers/infiniband/sw/rdmavt/trace.h
new file mode 100644
index 000000000000..6c0457db5499
--- /dev/null
+++ b/drivers/infiniband/sw/rdmavt/trace.h
@@ -0,0 +1,187 @@
+/*
+ * Copyright(c) 2016 Intel Corporation.
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * - Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#undef TRACE_SYSTEM_VAR
+#define TRACE_SYSTEM_VAR rdmavt
+
+#if !defined(__RDMAVT_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define __RDMAVT_TRACE_H
+
+#include <linux/tracepoint.h>
+#include <linux/trace_seq.h>
+
+#include <rdma/ib_verbs.h>
+#include <rdma/rdma_vt.h>
+
+#define RDI_DEV_ENTRY(rdi) __string(dev, rdi->driver_f.get_card_name(rdi))
+#define RDI_DEV_ASSIGN(rdi) __assign_str(dev, rdi->driver_f.get_card_name(rdi))
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM rdmavt
+
+TRACE_EVENT(rvt_dbg,
+ TP_PROTO(struct rvt_dev_info *rdi,
+ const char *msg),
+ TP_ARGS(rdi, msg),
+ TP_STRUCT__entry(
+ RDI_DEV_ENTRY(rdi)
+ __string(msg, msg)
+ ),
+ TP_fast_assign(
+ RDI_DEV_ASSIGN(rdi);
+ __assign_str(msg, msg);
+ ),
+ TP_printk("[%s]: %s", __get_str(dev), __get_str(msg))
+);
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM rvt_qphash
+DECLARE_EVENT_CLASS(rvt_qphash_template,
+ TP_PROTO(struct rvt_qp *qp, u32 bucket),
+ TP_ARGS(qp, bucket),
+ TP_STRUCT__entry(
+ RDI_DEV_ENTRY(ib_to_rvt(qp->ibqp.device))
+ __field(u32, qpn)
+ __field(u32, bucket)
+ ),
+ TP_fast_assign(
+ RDI_DEV_ASSIGN(ib_to_rvt(qp->ibqp.device))
+ __entry->qpn = qp->ibqp.qp_num;
+ __entry->bucket = bucket;
+ ),
+ TP_printk(
+ "[%s] qpn 0x%x bucket %u",
+ __get_str(dev),
+ __entry->qpn,
+ __entry->bucket
+ )
+);
+
+DEFINE_EVENT(rvt_qphash_template, rvt_qpinsert,
+ TP_PROTO(struct rvt_qp *qp, u32 bucket),
+ TP_ARGS(qp, bucket));
+
+DEFINE_EVENT(rvt_qphash_template, rvt_qpremove,
+ TP_PROTO(struct rvt_qp *qp, u32 bucket),
+ TP_ARGS(qp, bucket));
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM rvt_tx
+
+#define wr_opcode_name(opcode) { IB_WR_##opcode, #opcode }
+#define show_wr_opcode(opcode) \
+__print_symbolic(opcode, \
+ wr_opcode_name(RDMA_WRITE), \
+ wr_opcode_name(RDMA_WRITE_WITH_IMM), \
+ wr_opcode_name(SEND), \
+ wr_opcode_name(SEND_WITH_IMM), \
+ wr_opcode_name(RDMA_READ), \
+ wr_opcode_name(ATOMIC_CMP_AND_SWP), \
+ wr_opcode_name(ATOMIC_FETCH_AND_ADD), \
+ wr_opcode_name(LSO), \
+ wr_opcode_name(SEND_WITH_INV), \
+ wr_opcode_name(RDMA_READ_WITH_INV), \
+ wr_opcode_name(LOCAL_INV), \
+ wr_opcode_name(MASKED_ATOMIC_CMP_AND_SWP), \
+ wr_opcode_name(MASKED_ATOMIC_FETCH_AND_ADD))
+
+#define POS_PRN \
+"[%s] wr_id %llx qpn %x psn 0x%x lpsn 0x%x length %u opcode 0x%.2x,%s size %u avail %u head %u last %u"
+
+TRACE_EVENT(
+ rvt_post_one_wr,
+ TP_PROTO(struct rvt_qp *qp, struct rvt_swqe *wqe),
+ TP_ARGS(qp, wqe),
+ TP_STRUCT__entry(
+ RDI_DEV_ENTRY(ib_to_rvt(qp->ibqp.device))
+ __field(u64, wr_id)
+ __field(u32, qpn)
+ __field(u32, psn)
+ __field(u32, lpsn)
+ __field(u32, length)
+ __field(u32, opcode)
+ __field(u32, size)
+ __field(u32, avail)
+ __field(u32, head)
+ __field(u32, last)
+ ),
+ TP_fast_assign(
+ RDI_DEV_ASSIGN(ib_to_rvt(qp->ibqp.device))
+ __entry->wr_id = wqe->wr.wr_id;
+ __entry->qpn = qp->ibqp.qp_num;
+ __entry->psn = wqe->psn;
+ __entry->lpsn = wqe->lpsn;
+ __entry->length = wqe->length;
+ __entry->opcode = wqe->wr.opcode;
+ __entry->size = qp->s_size;
+ __entry->avail = qp->s_avail;
+ __entry->head = qp->s_head;
+ __entry->last = qp->s_last;
+ ),
+ TP_printk(
+ POS_PRN,
+ __get_str(dev),
+ __entry->wr_id,
+ __entry->qpn,
+ __entry->psn,
+ __entry->lpsn,
+ __entry->length,
+ __entry->opcode, show_wr_opcode(__entry->opcode),
+ __entry->size,
+ __entry->avail,
+ __entry->head,
+ __entry->last
+ )
+);
+
+#endif /* __RDMAVT_TRACE_H */
+
+#undef TRACE_INCLUDE_PATH
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE trace
+#include <trace/define_trace.h>
diff --git a/drivers/infiniband/sw/rdmavt/vt.c b/drivers/infiniband/sw/rdmavt/vt.c
new file mode 100644
index 000000000000..6caf5272ba1f
--- /dev/null
+++ b/drivers/infiniband/sw/rdmavt/vt.c
@@ -0,0 +1,873 @@
+/*
+ * Copyright(c) 2016 Intel Corporation.
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * - Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include "vt.h"
+#include "trace.h"
+
+#define RVT_UVERBS_ABI_VERSION 2
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("RDMA Verbs Transport Library");
+
+static int rvt_init(void)
+{
+ /*
+ * rdmavt does not need to do anything special when it starts up. All it
+ * needs to do is sit and wait until a driver attempts registration.
+ */
+ return 0;
+}
+module_init(rvt_init);
+
+static void rvt_cleanup(void)
+{
+ /*
+ * Nothing to do at exit time either. The module won't be able to be
+ * removed until all drivers are gone which means all the dev structs
+ * are gone so there is really nothing to do.
+ */
+}
+module_exit(rvt_cleanup);
+
+/**
+ * rvt_alloc_device - allocate rdi
+ * @size: how big of a structure to allocate
+ * @nports: number of ports to allocate array slots for
+ *
+ * Use IB core device alloc to allocate space for the rdi which is assumed to be
+ * inside of the ib_device. Any extra space that drivers require should be
+ * included in size.
+ *
+ * We also allocate a port array based on the number of ports.
+ *
+ * Return: pointer to allocated rdi
+ */
+struct rvt_dev_info *rvt_alloc_device(size_t size, int nports)
+{
+ struct rvt_dev_info *rdi = ERR_PTR(-ENOMEM);
+
+ rdi = (struct rvt_dev_info *)ib_alloc_device(size);
+ if (!rdi)
+ return rdi;
+
+ rdi->ports = kcalloc(nports,
+ sizeof(struct rvt_ibport **),
+ GFP_KERNEL);
+ if (!rdi->ports)
+ ib_dealloc_device(&rdi->ibdev);
+
+ return rdi;
+}
+EXPORT_SYMBOL(rvt_alloc_device);
+
+static int rvt_query_device(struct ib_device *ibdev,
+ struct ib_device_attr *props,
+ struct ib_udata *uhw)
+{
+ struct rvt_dev_info *rdi = ib_to_rvt(ibdev);
+
+ if (uhw->inlen || uhw->outlen)
+ return -EINVAL;
+ /*
+ * Return rvt_dev_info.dparms.props contents
+ */
+ *props = rdi->dparms.props;
+ return 0;
+}
+
+static int rvt_modify_device(struct ib_device *device,
+ int device_modify_mask,
+ struct ib_device_modify *device_modify)
+{
+ /*
+ * There is currently no need to supply this based on qib and hfi1.
+ * Future drivers may need to implement this though.
+ */
+
+ return -EOPNOTSUPP;
+}
+
+/**
+ * rvt_query_port: Passes the query port call to the driver
+ * @ibdev: Verbs IB dev
+ * @port_num: port number, 1 based from ib core
+ * @props: structure to hold returned properties
+ *
+ * Return: 0 on success
+ */
+static int rvt_query_port(struct ib_device *ibdev, u8 port_num,
+ struct ib_port_attr *props)
+{
+ struct rvt_dev_info *rdi = ib_to_rvt(ibdev);
+ struct rvt_ibport *rvp;
+ int port_index = ibport_num_to_idx(ibdev, port_num);
+
+ if (port_index < 0)
+ return -EINVAL;
+
+ rvp = rdi->ports[port_index];
+ memset(props, 0, sizeof(*props));
+ props->sm_lid = rvp->sm_lid;
+ props->sm_sl = rvp->sm_sl;
+ props->port_cap_flags = rvp->port_cap_flags;
+ props->max_msg_sz = 0x80000000;
+ props->pkey_tbl_len = rvt_get_npkeys(rdi);
+ props->bad_pkey_cntr = rvp->pkey_violations;
+ props->qkey_viol_cntr = rvp->qkey_violations;
+ props->subnet_timeout = rvp->subnet_timeout;
+ props->init_type_reply = 0;
+
+ /* Populate the remaining ib_port_attr elements */
+ return rdi->driver_f.query_port_state(rdi, port_num, props);
+}
+
+/**
+ * rvt_modify_port
+ * @ibdev: Verbs IB dev
+ * @port_num: Port number, 1 based from ib core
+ * @port_modify_mask: How to change the port
+ * @props: Structure to fill in
+ *
+ * Return: 0 on success
+ */
+static int rvt_modify_port(struct ib_device *ibdev, u8 port_num,
+ int port_modify_mask, struct ib_port_modify *props)
+{
+ struct rvt_dev_info *rdi = ib_to_rvt(ibdev);
+ struct rvt_ibport *rvp;
+ int ret = 0;
+ int port_index = ibport_num_to_idx(ibdev, port_num);
+
+ if (port_index < 0)
+ return -EINVAL;
+
+ rvp = rdi->ports[port_index];
+ rvp->port_cap_flags |= props->set_port_cap_mask;
+ rvp->port_cap_flags &= ~props->clr_port_cap_mask;
+
+ if (props->set_port_cap_mask || props->clr_port_cap_mask)
+ rdi->driver_f.cap_mask_chg(rdi, port_num);
+ if (port_modify_mask & IB_PORT_SHUTDOWN)
+ ret = rdi->driver_f.shut_down_port(rdi, port_num);
+ if (port_modify_mask & IB_PORT_RESET_QKEY_CNTR)
+ rvp->qkey_violations = 0;
+
+ return ret;
+}
+
+/**
+ * rvt_query_pkey - Return a pkey from the table at a given index
+ * @ibdev: Verbs IB dev
+ * @port_num: Port number, 1 based from ib core
+ * @intex: Index into pkey table
+ *
+ * Return: 0 on failure pkey otherwise
+ */
+static int rvt_query_pkey(struct ib_device *ibdev, u8 port_num, u16 index,
+ u16 *pkey)
+{
+ /*
+ * Driver will be responsible for keeping rvt_dev_info.pkey_table up to
+ * date. This function will just return that value. There is no need to
+ * lock, if a stale value is read and sent to the user so be it there is
+ * no way to protect against that anyway.
+ */
+ struct rvt_dev_info *rdi = ib_to_rvt(ibdev);
+ int port_index;
+
+ port_index = ibport_num_to_idx(ibdev, port_num);
+ if (port_index < 0)
+ return -EINVAL;
+
+ if (index >= rvt_get_npkeys(rdi))
+ return -EINVAL;
+
+ *pkey = rvt_get_pkey(rdi, port_index, index);
+ return 0;
+}
+
+/**
+ * rvt_query_gid - Return a gid from the table
+ * @ibdev: Verbs IB dev
+ * @port_num: Port number, 1 based from ib core
+ * @index: = Index in table
+ * @gid: Gid to return
+ *
+ * Return: 0 on success
+ */
+static int rvt_query_gid(struct ib_device *ibdev, u8 port_num,
+ int guid_index, union ib_gid *gid)
+{
+ struct rvt_dev_info *rdi;
+ struct rvt_ibport *rvp;
+ int port_index;
+
+ /*
+ * Driver is responsible for updating the guid table. Which will be used
+ * to craft the return value. This will work similar to how query_pkey()
+ * is being done.
+ */
+ port_index = ibport_num_to_idx(ibdev, port_num);
+ if (port_index < 0)
+ return -EINVAL;
+
+ rdi = ib_to_rvt(ibdev);
+ rvp = rdi->ports[port_index];
+
+ gid->global.subnet_prefix = rvp->gid_prefix;
+
+ return rdi->driver_f.get_guid_be(rdi, rvp, guid_index,
+ &gid->global.interface_id);
+}
+
+struct rvt_ucontext {
+ struct ib_ucontext ibucontext;
+};
+
+static inline struct rvt_ucontext *to_iucontext(struct ib_ucontext
+ *ibucontext)
+{
+ return container_of(ibucontext, struct rvt_ucontext, ibucontext);
+}
+
+/**
+ * rvt_alloc_ucontext - Allocate a user context
+ * @ibdev: Vers IB dev
+ * @data: User data allocated
+ */
+static struct ib_ucontext *rvt_alloc_ucontext(struct ib_device *ibdev,
+ struct ib_udata *udata)
+{
+ struct rvt_ucontext *context;
+
+ context = kmalloc(sizeof(*context), GFP_KERNEL);
+ if (!context)
+ return ERR_PTR(-ENOMEM);
+ return &context->ibucontext;
+}
+
+/**
+ *rvt_dealloc_ucontext - Free a user context
+ *@context - Free this
+ */
+static int rvt_dealloc_ucontext(struct ib_ucontext *context)
+{
+ kfree(to_iucontext(context));
+ return 0;
+}
+
+static int rvt_get_port_immutable(struct ib_device *ibdev, u8 port_num,
+ struct ib_port_immutable *immutable)
+{
+ struct rvt_dev_info *rdi = ib_to_rvt(ibdev);
+ struct ib_port_attr attr;
+ int err, port_index;
+
+ port_index = ibport_num_to_idx(ibdev, port_num);
+ if (port_index < 0)
+ return -EINVAL;
+
+ err = rvt_query_port(ibdev, port_num, &attr);
+ if (err)
+ return err;
+
+ immutable->pkey_tbl_len = attr.pkey_tbl_len;
+ immutable->gid_tbl_len = attr.gid_tbl_len;
+ immutable->core_cap_flags = rdi->dparms.core_cap_flags;
+ immutable->max_mad_size = rdi->dparms.max_mad_size;
+
+ return 0;
+}
+
+enum {
+ MISC,
+ QUERY_DEVICE,
+ MODIFY_DEVICE,
+ QUERY_PORT,
+ MODIFY_PORT,
+ QUERY_PKEY,
+ QUERY_GID,
+ ALLOC_UCONTEXT,
+ DEALLOC_UCONTEXT,
+ GET_PORT_IMMUTABLE,
+ CREATE_QP,
+ MODIFY_QP,
+ DESTROY_QP,
+ QUERY_QP,
+ POST_SEND,
+ POST_RECV,
+ POST_SRQ_RECV,
+ CREATE_AH,
+ DESTROY_AH,
+ MODIFY_AH,
+ QUERY_AH,
+ CREATE_SRQ,
+ MODIFY_SRQ,
+ DESTROY_SRQ,
+ QUERY_SRQ,
+ ATTACH_MCAST,
+ DETACH_MCAST,
+ GET_DMA_MR,
+ REG_USER_MR,
+ DEREG_MR,
+ ALLOC_MR,
+ ALLOC_FMR,
+ MAP_PHYS_FMR,
+ UNMAP_FMR,
+ DEALLOC_FMR,
+ MMAP,
+ CREATE_CQ,
+ DESTROY_CQ,
+ POLL_CQ,
+ REQ_NOTFIY_CQ,
+ RESIZE_CQ,
+ ALLOC_PD,
+ DEALLOC_PD,
+ _VERB_IDX_MAX /* Must always be last! */
+};
+
+static inline int check_driver_override(struct rvt_dev_info *rdi,
+ size_t offset, void *func)
+{
+ if (!*(void **)((void *)&rdi->ibdev + offset)) {
+ *(void **)((void *)&rdi->ibdev + offset) = func;
+ return 0;
+ }
+
+ return 1;
+}
+
+static noinline int check_support(struct rvt_dev_info *rdi, int verb)
+{
+ switch (verb) {
+ case MISC:
+ /*
+ * These functions are not part of verbs specifically but are
+ * required for rdmavt to function.
+ */
+ if ((!rdi->driver_f.port_callback) ||
+ (!rdi->driver_f.get_card_name) ||
+ (!rdi->driver_f.get_pci_dev))
+ return -EINVAL;
+ break;
+
+ case QUERY_DEVICE:
+ check_driver_override(rdi, offsetof(struct ib_device,
+ query_device),
+ rvt_query_device);
+ break;
+
+ case MODIFY_DEVICE:
+ /*
+ * rdmavt does not support modify device currently drivers must
+ * provide.
+ */
+ if (!check_driver_override(rdi, offsetof(struct ib_device,
+ modify_device),
+ rvt_modify_device))
+ return -EOPNOTSUPP;
+ break;
+
+ case QUERY_PORT:
+ if (!check_driver_override(rdi, offsetof(struct ib_device,
+ query_port),
+ rvt_query_port))
+ if (!rdi->driver_f.query_port_state)
+ return -EINVAL;
+ break;
+
+ case MODIFY_PORT:
+ if (!check_driver_override(rdi, offsetof(struct ib_device,
+ modify_port),
+ rvt_modify_port))
+ if (!rdi->driver_f.cap_mask_chg ||
+ !rdi->driver_f.shut_down_port)
+ return -EINVAL;
+ break;
+
+ case QUERY_PKEY:
+ check_driver_override(rdi, offsetof(struct ib_device,
+ query_pkey),
+ rvt_query_pkey);
+ break;
+
+ case QUERY_GID:
+ if (!check_driver_override(rdi, offsetof(struct ib_device,
+ query_gid),
+ rvt_query_gid))
+ if (!rdi->driver_f.get_guid_be)
+ return -EINVAL;
+ break;
+
+ case ALLOC_UCONTEXT:
+ check_driver_override(rdi, offsetof(struct ib_device,
+ alloc_ucontext),
+ rvt_alloc_ucontext);
+ break;
+
+ case DEALLOC_UCONTEXT:
+ check_driver_override(rdi, offsetof(struct ib_device,
+ dealloc_ucontext),
+ rvt_dealloc_ucontext);
+ break;
+
+ case GET_PORT_IMMUTABLE:
+ check_driver_override(rdi, offsetof(struct ib_device,
+ get_port_immutable),
+ rvt_get_port_immutable);
+ break;
+
+ case CREATE_QP:
+ if (!check_driver_override(rdi, offsetof(struct ib_device,
+ create_qp),
+ rvt_create_qp))
+ if (!rdi->driver_f.qp_priv_alloc ||
+ !rdi->driver_f.qp_priv_free ||
+ !rdi->driver_f.notify_qp_reset ||
+ !rdi->driver_f.flush_qp_waiters ||
+ !rdi->driver_f.stop_send_queue ||
+ !rdi->driver_f.quiesce_qp)
+ return -EINVAL;
+ break;
+
+ case MODIFY_QP:
+ if (!check_driver_override(rdi, offsetof(struct ib_device,
+ modify_qp),
+ rvt_modify_qp))
+ if (!rdi->driver_f.notify_qp_reset ||
+ !rdi->driver_f.schedule_send ||
+ !rdi->driver_f.get_pmtu_from_attr ||
+ !rdi->driver_f.flush_qp_waiters ||
+ !rdi->driver_f.stop_send_queue ||
+ !rdi->driver_f.quiesce_qp ||
+ !rdi->driver_f.notify_error_qp ||
+ !rdi->driver_f.mtu_from_qp ||
+ !rdi->driver_f.mtu_to_path_mtu ||
+ !rdi->driver_f.shut_down_port ||
+ !rdi->driver_f.cap_mask_chg)
+ return -EINVAL;
+ break;
+
+ case DESTROY_QP:
+ if (!check_driver_override(rdi, offsetof(struct ib_device,
+ destroy_qp),
+ rvt_destroy_qp))
+ if (!rdi->driver_f.qp_priv_free ||
+ !rdi->driver_f.notify_qp_reset ||
+ !rdi->driver_f.flush_qp_waiters ||
+ !rdi->driver_f.stop_send_queue ||
+ !rdi->driver_f.quiesce_qp)
+ return -EINVAL;
+ break;
+
+ case QUERY_QP:
+ check_driver_override(rdi, offsetof(struct ib_device,
+ query_qp),
+ rvt_query_qp);
+ break;
+
+ case POST_SEND:
+ if (!check_driver_override(rdi, offsetof(struct ib_device,
+ post_send),
+ rvt_post_send))
+ if (!rdi->driver_f.schedule_send ||
+ !rdi->driver_f.do_send)
+ return -EINVAL;
+ break;
+
+ case POST_RECV:
+ check_driver_override(rdi, offsetof(struct ib_device,
+ post_recv),
+ rvt_post_recv);
+ break;
+ case POST_SRQ_RECV:
+ check_driver_override(rdi, offsetof(struct ib_device,
+ post_srq_recv),
+ rvt_post_srq_recv);
+ break;
+
+ case CREATE_AH:
+ check_driver_override(rdi, offsetof(struct ib_device,
+ create_ah),
+ rvt_create_ah);
+ break;
+
+ case DESTROY_AH:
+ check_driver_override(rdi, offsetof(struct ib_device,
+ destroy_ah),
+ rvt_destroy_ah);
+ break;
+
+ case MODIFY_AH:
+ check_driver_override(rdi, offsetof(struct ib_device,
+ modify_ah),
+ rvt_modify_ah);
+ break;
+
+ case QUERY_AH:
+ check_driver_override(rdi, offsetof(struct ib_device,
+ query_ah),
+ rvt_query_ah);
+ break;
+
+ case CREATE_SRQ:
+ check_driver_override(rdi, offsetof(struct ib_device,
+ create_srq),
+ rvt_create_srq);
+ break;
+
+ case MODIFY_SRQ:
+ check_driver_override(rdi, offsetof(struct ib_device,
+ modify_srq),
+ rvt_modify_srq);
+ break;
+
+ case DESTROY_SRQ:
+ check_driver_override(rdi, offsetof(struct ib_device,
+ destroy_srq),
+ rvt_destroy_srq);
+ break;
+
+ case QUERY_SRQ:
+ check_driver_override(rdi, offsetof(struct ib_device,
+ query_srq),
+ rvt_query_srq);
+ break;
+
+ case ATTACH_MCAST:
+ check_driver_override(rdi, offsetof(struct ib_device,
+ attach_mcast),
+ rvt_attach_mcast);
+ break;
+
+ case DETACH_MCAST:
+ check_driver_override(rdi, offsetof(struct ib_device,
+ detach_mcast),
+ rvt_detach_mcast);
+ break;
+
+ case GET_DMA_MR:
+ check_driver_override(rdi, offsetof(struct ib_device,
+ get_dma_mr),
+ rvt_get_dma_mr);
+ break;
+
+ case REG_USER_MR:
+ check_driver_override(rdi, offsetof(struct ib_device,
+ reg_user_mr),
+ rvt_reg_user_mr);
+ break;
+
+ case DEREG_MR:
+ check_driver_override(rdi, offsetof(struct ib_device,
+ dereg_mr),
+ rvt_dereg_mr);
+ break;
+
+ case ALLOC_FMR:
+ check_driver_override(rdi, offsetof(struct ib_device,
+ alloc_fmr),
+ rvt_alloc_fmr);
+ break;
+
+ case ALLOC_MR:
+ check_driver_override(rdi, offsetof(struct ib_device,
+ alloc_mr),
+ rvt_alloc_mr);
+ break;
+
+ case MAP_PHYS_FMR:
+ check_driver_override(rdi, offsetof(struct ib_device,
+ map_phys_fmr),
+ rvt_map_phys_fmr);
+ break;
+
+ case UNMAP_FMR:
+ check_driver_override(rdi, offsetof(struct ib_device,
+ unmap_fmr),
+ rvt_unmap_fmr);
+ break;
+
+ case DEALLOC_FMR:
+ check_driver_override(rdi, offsetof(struct ib_device,
+ dealloc_fmr),
+ rvt_dealloc_fmr);
+ break;
+
+ case MMAP:
+ check_driver_override(rdi, offsetof(struct ib_device,
+ mmap),
+ rvt_mmap);
+ break;
+
+ case CREATE_CQ:
+ check_driver_override(rdi, offsetof(struct ib_device,
+ create_cq),
+ rvt_create_cq);
+ break;
+
+ case DESTROY_CQ:
+ check_driver_override(rdi, offsetof(struct ib_device,
+ destroy_cq),
+ rvt_destroy_cq);
+ break;
+
+ case POLL_CQ:
+ check_driver_override(rdi, offsetof(struct ib_device,
+ poll_cq),
+ rvt_poll_cq);
+ break;
+
+ case REQ_NOTFIY_CQ:
+ check_driver_override(rdi, offsetof(struct ib_device,
+ req_notify_cq),
+ rvt_req_notify_cq);
+ break;
+
+ case RESIZE_CQ:
+ check_driver_override(rdi, offsetof(struct ib_device,
+ resize_cq),
+ rvt_resize_cq);
+ break;
+
+ case ALLOC_PD:
+ check_driver_override(rdi, offsetof(struct ib_device,
+ alloc_pd),
+ rvt_alloc_pd);
+ break;
+
+ case DEALLOC_PD:
+ check_driver_override(rdi, offsetof(struct ib_device,
+ dealloc_pd),
+ rvt_dealloc_pd);
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * rvt_register_device - register a driver
+ * @rdi: main dev structure for all of rdmavt operations
+ *
+ * It is up to drivers to allocate the rdi and fill in the appropriate
+ * information.
+ *
+ * Return: 0 on success otherwise an errno.
+ */
+int rvt_register_device(struct rvt_dev_info *rdi)
+{
+ int ret = 0, i;
+
+ if (!rdi)
+ return -EINVAL;
+
+ /*
+ * Check to ensure drivers have setup the required helpers for the verbs
+ * they want rdmavt to handle
+ */
+ for (i = 0; i < _VERB_IDX_MAX; i++)
+ if (check_support(rdi, i)) {
+ pr_err("Driver support req not met at %d\n", i);
+ return -EINVAL;
+ }
+
+
+ /* Once we get past here we can use rvt_pr macros and tracepoints */
+ trace_rvt_dbg(rdi, "Driver attempting registration");
+ rvt_mmap_init(rdi);
+
+ /* Queue Pairs */
+ ret = rvt_driver_qp_init(rdi);
+ if (ret) {
+ pr_err("Error in driver QP init.\n");
+ return -EINVAL;
+ }
+
+ /* Address Handle */
+ spin_lock_init(&rdi->n_ahs_lock);
+ rdi->n_ahs_allocated = 0;
+
+ /* Shared Receive Queue */
+ rvt_driver_srq_init(rdi);
+
+ /* Multicast */
+ rvt_driver_mcast_init(rdi);
+
+ /* Mem Region */
+ ret = rvt_driver_mr_init(rdi);
+ if (ret) {
+ pr_err("Error in driver MR init.\n");
+ goto bail_no_mr;
+ }
+
+ /* Completion queues */
+ ret = rvt_driver_cq_init(rdi);
+ if (ret) {
+ pr_err("Error in driver CQ init.\n");
+ goto bail_mr;
+ }
+
+ /* DMA Operations */
+ rdi->ibdev.dma_ops =
+ rdi->ibdev.dma_ops ? : &rvt_default_dma_mapping_ops;
+
+ /* Protection Domain */
+ spin_lock_init(&rdi->n_pds_lock);
+ rdi->n_pds_allocated = 0;
+
+ /*
+ * There are some things which could be set by underlying drivers but
+ * really should be up to rdmavt to set. For instance drivers can't know
+ * exactly which functions rdmavt supports, nor do they know the ABI
+ * version, so we do all of this sort of stuff here.
+ */
+ rdi->ibdev.uverbs_abi_ver = RVT_UVERBS_ABI_VERSION;
+ rdi->ibdev.uverbs_cmd_mask =
+ (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
+ (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
+ (1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
+ (1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
+ (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
+ (1ull << IB_USER_VERBS_CMD_CREATE_AH) |
+ (1ull << IB_USER_VERBS_CMD_MODIFY_AH) |
+ (1ull << IB_USER_VERBS_CMD_QUERY_AH) |
+ (1ull << IB_USER_VERBS_CMD_DESTROY_AH) |
+ (1ull << IB_USER_VERBS_CMD_REG_MR) |
+ (1ull << IB_USER_VERBS_CMD_DEREG_MR) |
+ (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
+ (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
+ (1ull << IB_USER_VERBS_CMD_RESIZE_CQ) |
+ (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
+ (1ull << IB_USER_VERBS_CMD_POLL_CQ) |
+ (1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ) |
+ (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
+ (1ull << IB_USER_VERBS_CMD_QUERY_QP) |
+ (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
+ (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
+ (1ull << IB_USER_VERBS_CMD_POST_SEND) |
+ (1ull << IB_USER_VERBS_CMD_POST_RECV) |
+ (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST) |
+ (1ull << IB_USER_VERBS_CMD_DETACH_MCAST) |
+ (1ull << IB_USER_VERBS_CMD_CREATE_SRQ) |
+ (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) |
+ (1ull << IB_USER_VERBS_CMD_QUERY_SRQ) |
+ (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ) |
+ (1ull << IB_USER_VERBS_CMD_POST_SRQ_RECV);
+ rdi->ibdev.node_type = RDMA_NODE_IB_CA;
+ rdi->ibdev.num_comp_vectors = 1;
+
+ /* We are now good to announce we exist */
+ ret = ib_register_device(&rdi->ibdev, rdi->driver_f.port_callback);
+ if (ret) {
+ rvt_pr_err(rdi, "Failed to register driver with ib core.\n");
+ goto bail_cq;
+ }
+
+ rvt_create_mad_agents(rdi);
+
+ rvt_pr_info(rdi, "Registration with rdmavt done.\n");
+ return ret;
+
+bail_cq:
+ rvt_cq_exit(rdi);
+
+bail_mr:
+ rvt_mr_exit(rdi);
+
+bail_no_mr:
+ rvt_qp_exit(rdi);
+
+ return ret;
+}
+EXPORT_SYMBOL(rvt_register_device);
+
+/**
+ * rvt_unregister_device - remove a driver
+ * @rdi: rvt dev struct
+ */
+void rvt_unregister_device(struct rvt_dev_info *rdi)
+{
+ trace_rvt_dbg(rdi, "Driver is unregistering.");
+ if (!rdi)
+ return;
+
+ rvt_free_mad_agents(rdi);
+
+ ib_unregister_device(&rdi->ibdev);
+ rvt_cq_exit(rdi);
+ rvt_mr_exit(rdi);
+ rvt_qp_exit(rdi);
+}
+EXPORT_SYMBOL(rvt_unregister_device);
+
+/**
+ * rvt_init_port - init internal data for driver port
+ * @rdi: rvt dev strut
+ * @port: rvt port
+ * @port_index: 0 based index of ports, different from IB core port num
+ *
+ * Keep track of a list of ports. No need to have a detach port.
+ * They persist until the driver goes away.
+ *
+ * Return: always 0
+ */
+int rvt_init_port(struct rvt_dev_info *rdi, struct rvt_ibport *port,
+ int port_index, u16 *pkey_table)
+{
+
+ rdi->ports[port_index] = port;
+ rdi->ports[port_index]->pkey_table = pkey_table;
+
+ return 0;
+}
+EXPORT_SYMBOL(rvt_init_port);
diff --git a/drivers/infiniband/sw/rdmavt/vt.h b/drivers/infiniband/sw/rdmavt/vt.h
new file mode 100644
index 000000000000..6b01eaa4461b
--- /dev/null
+++ b/drivers/infiniband/sw/rdmavt/vt.h
@@ -0,0 +1,104 @@
+#ifndef DEF_RDMAVT_H
+#define DEF_RDMAVT_H
+
+/*
+ * Copyright(c) 2016 Intel Corporation.
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * - Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <rdma/rdma_vt.h>
+#include <linux/pci.h>
+#include "dma.h"
+#include "pd.h"
+#include "qp.h"
+#include "ah.h"
+#include "mr.h"
+#include "srq.h"
+#include "mcast.h"
+#include "mmap.h"
+#include "cq.h"
+#include "mad.h"
+#include "mmap.h"
+
+#define rvt_pr_info(rdi, fmt, ...) \
+ __rvt_pr_info(rdi->driver_f.get_pci_dev(rdi), \
+ rdi->driver_f.get_card_name(rdi), \
+ fmt, \
+ ##__VA_ARGS__)
+
+#define rvt_pr_warn(rdi, fmt, ...) \
+ __rvt_pr_warn(rdi->driver_f.get_pci_dev(rdi), \
+ rdi->driver_f.get_card_name(rdi), \
+ fmt, \
+ ##__VA_ARGS__)
+
+#define rvt_pr_err(rdi, fmt, ...) \
+ __rvt_pr_err(rdi->driver_f.get_pci_dev(rdi), \
+ rdi->driver_f.get_card_name(rdi), \
+ fmt, \
+ ##__VA_ARGS__)
+
+#define __rvt_pr_info(pdev, name, fmt, ...) \
+ dev_info(&pdev->dev, "%s: " fmt, name, ##__VA_ARGS__)
+
+#define __rvt_pr_warn(pdev, name, fmt, ...) \
+ dev_warn(&pdev->dev, "%s: " fmt, name, ##__VA_ARGS__)
+
+#define __rvt_pr_err(pdev, name, fmt, ...) \
+ dev_err(&pdev->dev, "%s: " fmt, name, ##__VA_ARGS__)
+
+static inline int ibport_num_to_idx(struct ib_device *ibdev, u8 port_num)
+{
+ struct rvt_dev_info *rdi = ib_to_rvt(ibdev);
+ int port_index;
+
+ port_index = port_num - 1; /* IB ports start at 1 our arrays at 0 */
+ if ((port_index < 0) || (port_index >= rdi->dparms.nports))
+ return -EINVAL;
+
+ return port_index;
+}
+
+#endif /* DEF_RDMAVT_H */
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
index a6f3eab0f350..caec8e9c4666 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib.h
+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -244,6 +244,7 @@ struct ipoib_cm_tx {
unsigned tx_tail;
unsigned long flags;
u32 mtu;
+ unsigned max_send_sge;
};
struct ipoib_cm_rx_buf {
@@ -387,9 +388,10 @@ struct ipoib_dev_priv {
struct dentry *mcg_dentry;
struct dentry *path_dentry;
#endif
- int hca_caps;
+ u64 hca_caps;
struct ipoib_ethtool_st ethtool;
struct timer_list poll_timer;
+ unsigned max_send_sge;
};
struct ipoib_ah {
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index 917e46ea3bf6..c8ed53562c9b 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -710,6 +710,7 @@ void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_
struct ipoib_dev_priv *priv = netdev_priv(dev);
struct ipoib_tx_buf *tx_req;
int rc;
+ unsigned usable_sge = tx->max_send_sge - !!skb_headlen(skb);
if (unlikely(skb->len > tx->mtu)) {
ipoib_warn(priv, "packet len %d (> %d) too long to send, dropping\n",
@@ -719,7 +720,23 @@ void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_
ipoib_cm_skb_too_long(dev, skb, tx->mtu - IPOIB_ENCAP_LEN);
return;
}
-
+ if (skb_shinfo(skb)->nr_frags > usable_sge) {
+ if (skb_linearize(skb) < 0) {
+ ipoib_warn(priv, "skb could not be linearized\n");
+ ++dev->stats.tx_dropped;
+ ++dev->stats.tx_errors;
+ dev_kfree_skb_any(skb);
+ return;
+ }
+ /* Does skb_linearize return ok without reducing nr_frags? */
+ if (skb_shinfo(skb)->nr_frags > usable_sge) {
+ ipoib_warn(priv, "too many frags after skb linearize\n");
+ ++dev->stats.tx_dropped;
+ ++dev->stats.tx_errors;
+ dev_kfree_skb_any(skb);
+ return;
+ }
+ }
ipoib_dbg_data(priv, "sending packet: head 0x%x length %d connection 0x%x\n",
tx->tx_head, skb->len, tx->qp->qp_num);
@@ -1031,7 +1048,8 @@ static struct ib_qp *ipoib_cm_create_tx_qp(struct net_device *dev, struct ipoib_
struct ib_qp *tx_qp;
if (dev->features & NETIF_F_SG)
- attr.cap.max_send_sge = MAX_SKB_FRAGS + 1;
+ attr.cap.max_send_sge =
+ min_t(u32, priv->ca->attrs.max_sge, MAX_SKB_FRAGS + 1);
tx_qp = ib_create_qp(priv->pd, &attr);
if (PTR_ERR(tx_qp) == -EINVAL) {
@@ -1040,6 +1058,7 @@ static struct ib_qp *ipoib_cm_create_tx_qp(struct net_device *dev, struct ipoib_
attr.create_flags &= ~IB_QP_CREATE_USE_GFP_NOIO;
tx_qp = ib_create_qp(priv->pd, &attr);
}
+ tx->max_send_sge = attr.cap.max_send_sge;
return tx_qp;
}
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index fa9c42ff1fb0..f0e55e47eb54 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -180,6 +180,7 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
struct sk_buff *skb;
u64 mapping[IPOIB_UD_RX_SG];
union ib_gid *dgid;
+ union ib_gid *sgid;
ipoib_dbg_data(priv, "recv completion: id %d, status: %d\n",
wr_id, wc->status);
@@ -203,13 +204,6 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
return;
}
- /*
- * Drop packets that this interface sent, ie multicast packets
- * that the HCA has replicated.
- */
- if (wc->slid == priv->local_lid && wc->src_qp == priv->qp->qp_num)
- goto repost;
-
memcpy(mapping, priv->rx_ring[wr_id].mapping,
IPOIB_UD_RX_SG * sizeof *mapping);
@@ -239,6 +233,25 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
else
skb->pkt_type = PACKET_MULTICAST;
+ sgid = &((struct ib_grh *)skb->data)->sgid;
+
+ /*
+ * Drop packets that this interface sent, ie multicast packets
+ * that the HCA has replicated.
+ */
+ if (wc->slid == priv->local_lid && wc->src_qp == priv->qp->qp_num) {
+ int need_repost = 1;
+
+ if ((wc->wc_flags & IB_WC_GRH) &&
+ sgid->global.interface_id != priv->local_gid.global.interface_id)
+ need_repost = 0;
+
+ if (need_repost) {
+ dev_kfree_skb_any(skb);
+ goto repost;
+ }
+ }
+
skb_pull(skb, IB_GRH_BYTES);
skb->protocol = ((struct ipoib_header *) skb->data)->proto;
@@ -538,6 +551,7 @@ void ipoib_send(struct net_device *dev, struct sk_buff *skb,
struct ipoib_tx_buf *tx_req;
int hlen, rc;
void *phead;
+ unsigned usable_sge = priv->max_send_sge - !!skb_headlen(skb);
if (skb_is_gso(skb)) {
hlen = skb_transport_offset(skb) + tcp_hdrlen(skb);
@@ -561,6 +575,23 @@ void ipoib_send(struct net_device *dev, struct sk_buff *skb,
phead = NULL;
hlen = 0;
}
+ if (skb_shinfo(skb)->nr_frags > usable_sge) {
+ if (skb_linearize(skb) < 0) {
+ ipoib_warn(priv, "skb could not be linearized\n");
+ ++dev->stats.tx_dropped;
+ ++dev->stats.tx_errors;
+ dev_kfree_skb_any(skb);
+ return;
+ }
+ /* Does skb_linearize return ok without reducing nr_frags? */
+ if (skb_shinfo(skb)->nr_frags > usable_sge) {
+ ipoib_warn(priv, "too many frags after skb linearize\n");
+ ++dev->stats.tx_dropped;
+ ++dev->stats.tx_errors;
+ dev_kfree_skb_any(skb);
+ return;
+ }
+ }
ipoib_dbg_data(priv, "sending packet, length=%d address=%p qpn=0x%06x\n",
skb->len, address, qpn);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 25509bbd4a05..80807d6e5c4c 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -51,6 +51,7 @@
#include <net/addrconf.h>
#include <linux/inetdevice.h>
#include <rdma/ib_cache.h>
+#include <linux/pci.h>
#define DRV_VERSION "1.0.0"
@@ -1590,11 +1591,67 @@ void ipoib_dev_cleanup(struct net_device *dev)
priv->tx_ring = NULL;
}
+static int ipoib_set_vf_link_state(struct net_device *dev, int vf, int link_state)
+{
+ struct ipoib_dev_priv *priv = netdev_priv(dev);
+
+ return ib_set_vf_link_state(priv->ca, vf, priv->port, link_state);
+}
+
+static int ipoib_get_vf_config(struct net_device *dev, int vf,
+ struct ifla_vf_info *ivf)
+{
+ struct ipoib_dev_priv *priv = netdev_priv(dev);
+ int err;
+
+ err = ib_get_vf_config(priv->ca, vf, priv->port, ivf);
+ if (err)
+ return err;
+
+ ivf->vf = vf;
+
+ return 0;
+}
+
+static int ipoib_set_vf_guid(struct net_device *dev, int vf, u64 guid, int type)
+{
+ struct ipoib_dev_priv *priv = netdev_priv(dev);
+
+ if (type != IFLA_VF_IB_NODE_GUID && type != IFLA_VF_IB_PORT_GUID)
+ return -EINVAL;
+
+ return ib_set_vf_guid(priv->ca, vf, priv->port, guid, type);
+}
+
+static int ipoib_get_vf_stats(struct net_device *dev, int vf,
+ struct ifla_vf_stats *vf_stats)
+{
+ struct ipoib_dev_priv *priv = netdev_priv(dev);
+
+ return ib_get_vf_stats(priv->ca, vf, priv->port, vf_stats);
+}
+
static const struct header_ops ipoib_header_ops = {
.create = ipoib_hard_header,
};
-static const struct net_device_ops ipoib_netdev_ops = {
+static const struct net_device_ops ipoib_netdev_ops_pf = {
+ .ndo_uninit = ipoib_uninit,
+ .ndo_open = ipoib_open,
+ .ndo_stop = ipoib_stop,
+ .ndo_change_mtu = ipoib_change_mtu,
+ .ndo_fix_features = ipoib_fix_features,
+ .ndo_start_xmit = ipoib_start_xmit,
+ .ndo_tx_timeout = ipoib_timeout,
+ .ndo_set_rx_mode = ipoib_set_mcast_list,
+ .ndo_get_iflink = ipoib_get_iflink,
+ .ndo_set_vf_link_state = ipoib_set_vf_link_state,
+ .ndo_get_vf_config = ipoib_get_vf_config,
+ .ndo_get_vf_stats = ipoib_get_vf_stats,
+ .ndo_set_vf_guid = ipoib_set_vf_guid,
+};
+
+static const struct net_device_ops ipoib_netdev_ops_vf = {
.ndo_uninit = ipoib_uninit,
.ndo_open = ipoib_open,
.ndo_stop = ipoib_stop,
@@ -1610,7 +1667,11 @@ void ipoib_setup(struct net_device *dev)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
- dev->netdev_ops = &ipoib_netdev_ops;
+ if (priv->hca_caps & IB_DEVICE_VIRTUAL_FUNCTION)
+ dev->netdev_ops = &ipoib_netdev_ops_vf;
+ else
+ dev->netdev_ops = &ipoib_netdev_ops_pf;
+
dev->header_ops = &ipoib_header_ops;
ipoib_set_ethtool_ops(dev);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
index d48c5bae7877..b809c373e40e 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
@@ -206,7 +206,8 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca)
init_attr.create_flags |= IB_QP_CREATE_NETIF_QP;
if (dev->features & NETIF_F_SG)
- init_attr.cap.max_send_sge = MAX_SKB_FRAGS + 1;
+ init_attr.cap.max_send_sge =
+ min_t(u32, priv->ca->attrs.max_sge, MAX_SKB_FRAGS + 1);
priv->qp = ib_create_qp(priv->pd, &init_attr);
if (IS_ERR(priv->qp)) {
@@ -233,6 +234,8 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca)
priv->rx_wr.next = NULL;
priv->rx_wr.sg_list = priv->rx_sge;
+ priv->max_send_sge = init_attr.cap.max_send_sge;
+
return 0;
out_free_send_cq:
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
index c827c93f46c5..64b3d11dcf1e 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
@@ -612,6 +612,7 @@ iscsi_iser_session_create(struct iscsi_endpoint *ep,
struct Scsi_Host *shost;
struct iser_conn *iser_conn = NULL;
struct ib_conn *ib_conn;
+ u32 max_fr_sectors;
u16 max_cmds;
shost = iscsi_host_alloc(&iscsi_iser_sht, 0, 0);
@@ -632,7 +633,6 @@ iscsi_iser_session_create(struct iscsi_endpoint *ep,
iser_conn = ep->dd_data;
max_cmds = iser_conn->max_cmds;
shost->sg_tablesize = iser_conn->scsi_sg_tablesize;
- shost->max_sectors = iser_conn->scsi_max_sectors;
mutex_lock(&iser_conn->state_mutex);
if (iser_conn->state != ISER_CONN_UP) {
@@ -657,8 +657,6 @@ iscsi_iser_session_create(struct iscsi_endpoint *ep,
*/
shost->sg_tablesize = min_t(unsigned short, shost->sg_tablesize,
ib_conn->device->ib_device->attrs.max_fast_reg_page_list_len);
- shost->max_sectors = min_t(unsigned int,
- 1024, (shost->sg_tablesize * PAGE_SIZE) >> 9);
if (iscsi_host_add(shost,
ib_conn->device->ib_device->dma_device)) {
@@ -672,6 +670,15 @@ iscsi_iser_session_create(struct iscsi_endpoint *ep,
goto free_host;
}
+ /*
+ * FRs or FMRs can only map up to a (device) page per entry, but if the
+ * first entry is misaligned we'll end up using using two entries
+ * (head and tail) for a single page worth data, so we have to drop
+ * one segment from the calculation.
+ */
+ max_fr_sectors = ((shost->sg_tablesize - 1) * PAGE_SIZE) >> 9;
+ shost->max_sectors = min(iser_max_sectors, max_fr_sectors);
+
if (cmds_max > max_cmds) {
iser_info("cmds_max changed from %u to %u\n",
cmds_max, max_cmds);
@@ -969,7 +976,16 @@ static umode_t iser_attr_is_visible(int param_type, int param)
static int iscsi_iser_slave_alloc(struct scsi_device *sdev)
{
- blk_queue_virt_boundary(sdev->request_queue, ~MASK_4K);
+ struct iscsi_session *session;
+ struct iser_conn *iser_conn;
+ struct ib_device *ib_dev;
+
+ session = starget_to_session(scsi_target(sdev))->dd_data;
+ iser_conn = session->leadconn->dd_data;
+ ib_dev = iser_conn->ib_conn.device->ib_device;
+
+ if (!(ib_dev->attrs.device_cap_flags & IB_DEVICE_SG_GAPS_REG))
+ blk_queue_virt_boundary(sdev->request_queue, ~MASK_4K);
return 0;
}
@@ -980,7 +996,6 @@ static struct scsi_host_template iscsi_iser_sht = {
.queuecommand = iscsi_queuecommand,
.change_queue_depth = scsi_change_queue_depth,
.sg_tablesize = ISCSI_ISER_DEF_SG_TABLESIZE,
- .max_sectors = ISER_DEF_MAX_SECTORS,
.cmd_per_lun = ISER_DEF_CMD_PER_LUN,
.eh_abort_handler = iscsi_eh_abort,
.eh_device_reset_handler= iscsi_eh_device_reset,
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h
index 95f0a64e076b..0351059783b1 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.h
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.h
@@ -458,9 +458,6 @@ struct iser_fr_pool {
* @comp: iser completion context
* @fr_pool: connection fast registration poool
* @pi_support: Indicate device T10-PI support
- * @last: last send wr to signal all flush errors were drained
- * @last_cqe: cqe handler for last wr
- * @last_comp: completes when all connection completions consumed
*/
struct ib_conn {
struct rdma_cm_id *cma_id;
@@ -472,10 +469,7 @@ struct ib_conn {
struct iser_comp *comp;
struct iser_fr_pool fr_pool;
bool pi_support;
- struct ib_send_wr last;
- struct ib_cqe last_cqe;
struct ib_cqe reg_cqe;
- struct completion last_comp;
};
/**
@@ -617,7 +611,6 @@ void iser_cmd_comp(struct ib_cq *cq, struct ib_wc *wc);
void iser_ctrl_comp(struct ib_cq *cq, struct ib_wc *wc);
void iser_dataout_comp(struct ib_cq *cq, struct ib_wc *wc);
void iser_reg_comp(struct ib_cq *cq, struct ib_wc *wc);
-void iser_last_comp(struct ib_cq *cq, struct ib_wc *wc);
void iser_task_rdma_init(struct iscsi_iser_task *task);
diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c
index ed54b388e7ad..81ae2e30dd12 100644
--- a/drivers/infiniband/ulp/iser/iser_initiator.c
+++ b/drivers/infiniband/ulp/iser/iser_initiator.c
@@ -729,13 +729,6 @@ void iser_dataout_comp(struct ib_cq *cq, struct ib_wc *wc)
kmem_cache_free(ig.desc_cache, desc);
}
-void iser_last_comp(struct ib_cq *cq, struct ib_wc *wc)
-{
- struct ib_conn *ib_conn = wc->qp->qp_context;
-
- complete(&ib_conn->last_comp);
-}
-
void iser_task_rdma_init(struct iscsi_iser_task *iser_task)
{
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
index 40c0f4978e2f..1b4945367e4f 100644
--- a/drivers/infiniband/ulp/iser/iser_verbs.c
+++ b/drivers/infiniband/ulp/iser/iser_verbs.c
@@ -252,14 +252,21 @@ void iser_free_fmr_pool(struct ib_conn *ib_conn)
}
static int
-iser_alloc_reg_res(struct ib_device *ib_device,
+iser_alloc_reg_res(struct iser_device *device,
struct ib_pd *pd,
struct iser_reg_resources *res,
unsigned int size)
{
+ struct ib_device *ib_dev = device->ib_device;
+ enum ib_mr_type mr_type;
int ret;
- res->mr = ib_alloc_mr(pd, IB_MR_TYPE_MEM_REG, size);
+ if (ib_dev->attrs.device_cap_flags & IB_DEVICE_SG_GAPS_REG)
+ mr_type = IB_MR_TYPE_SG_GAPS;
+ else
+ mr_type = IB_MR_TYPE_MEM_REG;
+
+ res->mr = ib_alloc_mr(pd, mr_type, size);
if (IS_ERR(res->mr)) {
ret = PTR_ERR(res->mr);
iser_err("Failed to allocate ib_fast_reg_mr err=%d\n", ret);
@@ -277,7 +284,7 @@ iser_free_reg_res(struct iser_reg_resources *rsc)
}
static int
-iser_alloc_pi_ctx(struct ib_device *ib_device,
+iser_alloc_pi_ctx(struct iser_device *device,
struct ib_pd *pd,
struct iser_fr_desc *desc,
unsigned int size)
@@ -291,7 +298,7 @@ iser_alloc_pi_ctx(struct ib_device *ib_device,
pi_ctx = desc->pi_ctx;
- ret = iser_alloc_reg_res(ib_device, pd, &pi_ctx->rsc, size);
+ ret = iser_alloc_reg_res(device, pd, &pi_ctx->rsc, size);
if (ret) {
iser_err("failed to allocate reg_resources\n");
goto alloc_reg_res_err;
@@ -324,7 +331,7 @@ iser_free_pi_ctx(struct iser_pi_context *pi_ctx)
}
static struct iser_fr_desc *
-iser_create_fastreg_desc(struct ib_device *ib_device,
+iser_create_fastreg_desc(struct iser_device *device,
struct ib_pd *pd,
bool pi_enable,
unsigned int size)
@@ -336,12 +343,12 @@ iser_create_fastreg_desc(struct ib_device *ib_device,
if (!desc)
return ERR_PTR(-ENOMEM);
- ret = iser_alloc_reg_res(ib_device, pd, &desc->rsc, size);
+ ret = iser_alloc_reg_res(device, pd, &desc->rsc, size);
if (ret)
goto reg_res_alloc_failure;
if (pi_enable) {
- ret = iser_alloc_pi_ctx(ib_device, pd, desc, size);
+ ret = iser_alloc_pi_ctx(device, pd, desc, size);
if (ret)
goto pi_ctx_alloc_failure;
}
@@ -374,7 +381,7 @@ int iser_alloc_fastreg_pool(struct ib_conn *ib_conn,
spin_lock_init(&fr_pool->lock);
fr_pool->size = 0;
for (i = 0; i < cmds_max; i++) {
- desc = iser_create_fastreg_desc(device->ib_device, device->pd,
+ desc = iser_create_fastreg_desc(device, device->pd,
ib_conn->pi_support, size);
if (IS_ERR(desc)) {
ret = PTR_ERR(desc);
@@ -663,7 +670,6 @@ void iser_conn_release(struct iser_conn *iser_conn)
int iser_conn_terminate(struct iser_conn *iser_conn)
{
struct ib_conn *ib_conn = &iser_conn->ib_conn;
- struct ib_send_wr *bad_wr;
int err = 0;
/* terminate the iser conn only if the conn state is UP */
@@ -688,14 +694,8 @@ int iser_conn_terminate(struct iser_conn *iser_conn)
iser_err("Failed to disconnect, conn: 0x%p err %d\n",
iser_conn, err);
- /* post an indication that all flush errors were consumed */
- err = ib_post_send(ib_conn->qp, &ib_conn->last, &bad_wr);
- if (err) {
- iser_err("conn %p failed to post last wr", ib_conn);
- return 1;
- }
-
- wait_for_completion(&ib_conn->last_comp);
+ /* block until all flush errors are consumed */
+ ib_drain_sq(ib_conn->qp);
}
return 1;
@@ -954,10 +954,6 @@ void iser_conn_init(struct iser_conn *iser_conn)
ib_conn->post_recv_buf_count = 0;
ib_conn->reg_cqe.done = iser_reg_comp;
- ib_conn->last_cqe.done = iser_last_comp;
- ib_conn->last.wr_cqe = &ib_conn->last_cqe;
- ib_conn->last.opcode = IB_WR_SEND;
- init_completion(&ib_conn->last_comp);
}
/**
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
index f121e6129339..411e4464ca23 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.c
+++ b/drivers/infiniband/ulp/isert/ib_isert.c
@@ -49,22 +49,24 @@ static struct workqueue_struct *isert_release_wq;
static void
isert_unmap_cmd(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn);
static int
-isert_map_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
- struct isert_rdma_wr *wr);
+isert_map_rdma(struct isert_cmd *isert_cmd, struct iscsi_conn *conn);
static void
isert_unreg_rdma(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn);
static int
-isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
- struct isert_rdma_wr *wr);
+isert_reg_rdma(struct isert_cmd *isert_cmd, struct iscsi_conn *conn);
static int
isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd);
static int
-isert_rdma_post_recvl(struct isert_conn *isert_conn);
+isert_login_post_recv(struct isert_conn *isert_conn);
static int
isert_rdma_accept(struct isert_conn *isert_conn);
struct rdma_cm_id *isert_setup_id(struct isert_np *isert_np);
static void isert_release_work(struct work_struct *work);
+static void isert_recv_done(struct ib_cq *cq, struct ib_wc *wc);
+static void isert_send_done(struct ib_cq *cq, struct ib_wc *wc);
+static void isert_login_recv_done(struct ib_cq *cq, struct ib_wc *wc);
+static void isert_login_send_done(struct ib_cq *cq, struct ib_wc *wc);
static inline bool
isert_prot_cmd(struct isert_conn *conn, struct se_cmd *cmd)
@@ -138,7 +140,7 @@ isert_create_qp(struct isert_conn *isert_conn,
attr.qp_context = isert_conn;
attr.send_cq = comp->cq;
attr.recv_cq = comp->cq;
- attr.cap.max_send_wr = ISERT_QP_MAX_REQ_DTOS;
+ attr.cap.max_send_wr = ISERT_QP_MAX_REQ_DTOS + 1;
attr.cap.max_recv_wr = ISERT_QP_MAX_RECV_DTOS + 1;
attr.cap.max_send_sge = device->ib_device->attrs.max_sge;
isert_conn->max_sge = min(device->ib_device->attrs.max_sge,
@@ -177,12 +179,6 @@ err:
return ret;
}
-static void
-isert_cq_event_callback(struct ib_event *e, void *context)
-{
- isert_dbg("event: %d\n", e->event);
-}
-
static int
isert_alloc_rx_descriptors(struct isert_conn *isert_conn)
{
@@ -212,6 +208,7 @@ isert_alloc_rx_descriptors(struct isert_conn *isert_conn)
rx_sg->addr = rx_desc->dma_addr;
rx_sg->length = ISER_RX_PAYLOAD_SIZE;
rx_sg->lkey = device->pd->local_dma_lkey;
+ rx_desc->rx_cqe.done = isert_recv_done;
}
return 0;
@@ -250,9 +247,6 @@ isert_free_rx_descriptors(struct isert_conn *isert_conn)
isert_conn->rx_descs = NULL;
}
-static void isert_cq_work(struct work_struct *);
-static void isert_cq_callback(struct ib_cq *, void *);
-
static void
isert_free_comps(struct isert_device *device)
{
@@ -261,10 +255,8 @@ isert_free_comps(struct isert_device *device)
for (i = 0; i < device->comps_used; i++) {
struct isert_comp *comp = &device->comps[i];
- if (comp->cq) {
- cancel_work_sync(&comp->work);
- ib_destroy_cq(comp->cq);
- }
+ if (comp->cq)
+ ib_free_cq(comp->cq);
}
kfree(device->comps);
}
@@ -293,28 +285,17 @@ isert_alloc_comps(struct isert_device *device)
max_cqe = min(ISER_MAX_CQ_LEN, device->ib_device->attrs.max_cqe);
for (i = 0; i < device->comps_used; i++) {
- struct ib_cq_init_attr cq_attr = {};
struct isert_comp *comp = &device->comps[i];
comp->device = device;
- INIT_WORK(&comp->work, isert_cq_work);
- cq_attr.cqe = max_cqe;
- cq_attr.comp_vector = i;
- comp->cq = ib_create_cq(device->ib_device,
- isert_cq_callback,
- isert_cq_event_callback,
- (void *)comp,
- &cq_attr);
+ comp->cq = ib_alloc_cq(device->ib_device, comp, max_cqe, i,
+ IB_POLL_WORKQUEUE);
if (IS_ERR(comp->cq)) {
isert_err("Unable to allocate cq\n");
ret = PTR_ERR(comp->cq);
comp->cq = NULL;
goto out_cq;
}
-
- ret = ib_req_notify_cq(comp->cq, IB_CQ_NEXT_COMP);
- if (ret)
- goto out_cq;
}
return 0;
@@ -582,7 +563,6 @@ isert_init_conn(struct isert_conn *isert_conn)
INIT_LIST_HEAD(&isert_conn->node);
init_completion(&isert_conn->login_comp);
init_completion(&isert_conn->login_req_comp);
- init_completion(&isert_conn->wait);
kref_init(&isert_conn->kref);
mutex_init(&isert_conn->mutex);
spin_lock_init(&isert_conn->pool_lock);
@@ -596,11 +576,13 @@ isert_free_login_buf(struct isert_conn *isert_conn)
struct ib_device *ib_dev = isert_conn->device->ib_device;
ib_dma_unmap_single(ib_dev, isert_conn->login_rsp_dma,
- ISER_RX_LOGIN_SIZE, DMA_TO_DEVICE);
+ ISER_RX_PAYLOAD_SIZE, DMA_TO_DEVICE);
+ kfree(isert_conn->login_rsp_buf);
+
ib_dma_unmap_single(ib_dev, isert_conn->login_req_dma,
- ISCSI_DEF_MAX_RECV_SEG_LEN,
+ ISER_RX_PAYLOAD_SIZE,
DMA_FROM_DEVICE);
- kfree(isert_conn->login_buf);
+ kfree(isert_conn->login_req_buf);
}
static int
@@ -609,50 +591,48 @@ isert_alloc_login_buf(struct isert_conn *isert_conn,
{
int ret;
- isert_conn->login_buf = kzalloc(ISCSI_DEF_MAX_RECV_SEG_LEN +
- ISER_RX_LOGIN_SIZE, GFP_KERNEL);
- if (!isert_conn->login_buf) {
+ isert_conn->login_req_buf = kzalloc(sizeof(*isert_conn->login_req_buf),
+ GFP_KERNEL);
+ if (!isert_conn->login_req_buf) {
isert_err("Unable to allocate isert_conn->login_buf\n");
return -ENOMEM;
}
- isert_conn->login_req_buf = isert_conn->login_buf;
- isert_conn->login_rsp_buf = isert_conn->login_buf +
- ISCSI_DEF_MAX_RECV_SEG_LEN;
-
- isert_dbg("Set login_buf: %p login_req_buf: %p login_rsp_buf: %p\n",
- isert_conn->login_buf, isert_conn->login_req_buf,
- isert_conn->login_rsp_buf);
-
isert_conn->login_req_dma = ib_dma_map_single(ib_dev,
- (void *)isert_conn->login_req_buf,
- ISCSI_DEF_MAX_RECV_SEG_LEN, DMA_FROM_DEVICE);
-
+ isert_conn->login_req_buf,
+ ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
ret = ib_dma_mapping_error(ib_dev, isert_conn->login_req_dma);
if (ret) {
isert_err("login_req_dma mapping error: %d\n", ret);
isert_conn->login_req_dma = 0;
- goto out_login_buf;
+ goto out_free_login_req_buf;
}
- isert_conn->login_rsp_dma = ib_dma_map_single(ib_dev,
- (void *)isert_conn->login_rsp_buf,
- ISER_RX_LOGIN_SIZE, DMA_TO_DEVICE);
+ isert_conn->login_rsp_buf = kzalloc(ISER_RX_PAYLOAD_SIZE, GFP_KERNEL);
+ if (!isert_conn->login_rsp_buf) {
+ isert_err("Unable to allocate isert_conn->login_rspbuf\n");
+ goto out_unmap_login_req_buf;
+ }
+ isert_conn->login_rsp_dma = ib_dma_map_single(ib_dev,
+ isert_conn->login_rsp_buf,
+ ISER_RX_PAYLOAD_SIZE, DMA_TO_DEVICE);
ret = ib_dma_mapping_error(ib_dev, isert_conn->login_rsp_dma);
if (ret) {
isert_err("login_rsp_dma mapping error: %d\n", ret);
isert_conn->login_rsp_dma = 0;
- goto out_req_dma_map;
+ goto out_free_login_rsp_buf;
}
return 0;
-out_req_dma_map:
+out_free_login_rsp_buf:
+ kfree(isert_conn->login_rsp_buf);
+out_unmap_login_req_buf:
ib_dma_unmap_single(ib_dev, isert_conn->login_req_dma,
- ISCSI_DEF_MAX_RECV_SEG_LEN, DMA_FROM_DEVICE);
-out_login_buf:
- kfree(isert_conn->login_buf);
+ ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
+out_free_login_req_buf:
+ kfree(isert_conn->login_req_buf);
return ret;
}
@@ -726,7 +706,7 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
if (ret)
goto out_conn_dev;
- ret = isert_rdma_post_recvl(isert_conn);
+ ret = isert_login_post_recv(isert_conn);
if (ret)
goto out_conn_dev;
@@ -773,7 +753,7 @@ isert_connect_release(struct isert_conn *isert_conn)
ib_destroy_qp(isert_conn->qp);
}
- if (isert_conn->login_buf)
+ if (isert_conn->login_req_buf)
isert_free_login_buf(isert_conn);
isert_device_put(device);
@@ -820,12 +800,30 @@ isert_put_conn(struct isert_conn *isert_conn)
kref_put(&isert_conn->kref, isert_release_kref);
}
+static void
+isert_handle_unbound_conn(struct isert_conn *isert_conn)
+{
+ struct isert_np *isert_np = isert_conn->cm_id->context;
+
+ mutex_lock(&isert_np->mutex);
+ if (!list_empty(&isert_conn->node)) {
+ /*
+ * This means iscsi doesn't know this connection
+ * so schedule a cleanup ourselves
+ */
+ list_del_init(&isert_conn->node);
+ isert_put_conn(isert_conn);
+ queue_work(isert_release_wq, &isert_conn->release_work);
+ }
+ mutex_unlock(&isert_np->mutex);
+}
+
/**
* isert_conn_terminate() - Initiate connection termination
* @isert_conn: isert connection struct
*
* Notes:
- * In case the connection state is FULL_FEATURE, move state
+ * In case the connection state is BOUND, move state
* to TEMINATING and start teardown sequence (rdma_disconnect).
* In case the connection state is UP, complete flush as well.
*
@@ -837,23 +835,16 @@ isert_conn_terminate(struct isert_conn *isert_conn)
{
int err;
- switch (isert_conn->state) {
- case ISER_CONN_TERMINATING:
- break;
- case ISER_CONN_UP:
- case ISER_CONN_FULL_FEATURE: /* FALLTHRU */
- isert_info("Terminating conn %p state %d\n",
- isert_conn, isert_conn->state);
- isert_conn->state = ISER_CONN_TERMINATING;
- err = rdma_disconnect(isert_conn->cm_id);
- if (err)
- isert_warn("Failed rdma_disconnect isert_conn %p\n",
- isert_conn);
- break;
- default:
- isert_warn("conn %p teminating in state %d\n",
- isert_conn, isert_conn->state);
- }
+ if (isert_conn->state >= ISER_CONN_TERMINATING)
+ return;
+
+ isert_info("Terminating conn %p state %d\n",
+ isert_conn, isert_conn->state);
+ isert_conn->state = ISER_CONN_TERMINATING;
+ err = rdma_disconnect(isert_conn->cm_id);
+ if (err)
+ isert_warn("Failed rdma_disconnect isert_conn %p\n",
+ isert_conn);
}
static int
@@ -887,35 +878,27 @@ static int
isert_disconnected_handler(struct rdma_cm_id *cma_id,
enum rdma_cm_event_type event)
{
- struct isert_np *isert_np = cma_id->context;
- struct isert_conn *isert_conn;
- bool terminating = false;
-
- if (isert_np->cm_id == cma_id)
- return isert_np_cma_handler(cma_id->context, event);
-
- isert_conn = cma_id->qp->qp_context;
+ struct isert_conn *isert_conn = cma_id->qp->qp_context;
mutex_lock(&isert_conn->mutex);
- terminating = (isert_conn->state == ISER_CONN_TERMINATING);
- isert_conn_terminate(isert_conn);
- mutex_unlock(&isert_conn->mutex);
-
- isert_info("conn %p completing wait\n", isert_conn);
- complete(&isert_conn->wait);
-
- if (terminating)
- goto out;
-
- mutex_lock(&isert_np->mutex);
- if (!list_empty(&isert_conn->node)) {
- list_del_init(&isert_conn->node);
- isert_put_conn(isert_conn);
- queue_work(isert_release_wq, &isert_conn->release_work);
+ switch (isert_conn->state) {
+ case ISER_CONN_TERMINATING:
+ break;
+ case ISER_CONN_UP:
+ isert_conn_terminate(isert_conn);
+ ib_drain_qp(isert_conn->qp);
+ isert_handle_unbound_conn(isert_conn);
+ break;
+ case ISER_CONN_BOUND:
+ case ISER_CONN_FULL_FEATURE: /* FALLTHRU */
+ iscsit_cause_connection_reinstatement(isert_conn->conn, 0);
+ break;
+ default:
+ isert_warn("conn %p teminating in state %d\n",
+ isert_conn, isert_conn->state);
}
- mutex_unlock(&isert_np->mutex);
+ mutex_unlock(&isert_conn->mutex);
-out:
return 0;
}
@@ -934,12 +917,16 @@ isert_connect_error(struct rdma_cm_id *cma_id)
static int
isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
{
+ struct isert_np *isert_np = cma_id->context;
int ret = 0;
isert_info("%s (%d): status %d id %p np %p\n",
rdma_event_msg(event->event), event->event,
event->status, cma_id, cma_id->context);
+ if (isert_np->cm_id == cma_id)
+ return isert_np_cma_handler(cma_id->context, event->event);
+
switch (event->event) {
case RDMA_CM_EVENT_CONNECT_REQUEST:
ret = isert_connect_request(cma_id, event);
@@ -977,7 +964,8 @@ isert_post_recvm(struct isert_conn *isert_conn, u32 count)
for (rx_wr = isert_conn->rx_wr, i = 0; i < count; i++, rx_wr++) {
rx_desc = &isert_conn->rx_descs[i];
- rx_wr->wr_id = (uintptr_t)rx_desc;
+
+ rx_wr->wr_cqe = &rx_desc->rx_cqe;
rx_wr->sg_list = &rx_desc->rx_sg;
rx_wr->num_sge = 1;
rx_wr->next = rx_wr + 1;
@@ -985,13 +973,10 @@ isert_post_recvm(struct isert_conn *isert_conn, u32 count)
rx_wr--;
rx_wr->next = NULL; /* mark end of work requests list */
- isert_conn->post_recv_buf_count += count;
ret = ib_post_recv(isert_conn->qp, isert_conn->rx_wr,
&rx_wr_failed);
- if (ret) {
+ if (ret)
isert_err("ib_post_recv() failed with ret: %d\n", ret);
- isert_conn->post_recv_buf_count -= count;
- }
return ret;
}
@@ -1002,23 +987,20 @@ isert_post_recv(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc)
struct ib_recv_wr *rx_wr_failed, rx_wr;
int ret;
- rx_wr.wr_id = (uintptr_t)rx_desc;
+ rx_wr.wr_cqe = &rx_desc->rx_cqe;
rx_wr.sg_list = &rx_desc->rx_sg;
rx_wr.num_sge = 1;
rx_wr.next = NULL;
- isert_conn->post_recv_buf_count++;
ret = ib_post_recv(isert_conn->qp, &rx_wr, &rx_wr_failed);
- if (ret) {
+ if (ret)
isert_err("ib_post_recv() failed with ret: %d\n", ret);
- isert_conn->post_recv_buf_count--;
- }
return ret;
}
static int
-isert_post_send(struct isert_conn *isert_conn, struct iser_tx_desc *tx_desc)
+isert_login_post_send(struct isert_conn *isert_conn, struct iser_tx_desc *tx_desc)
{
struct ib_device *ib_dev = isert_conn->cm_id->device;
struct ib_send_wr send_wr, *send_wr_failed;
@@ -1027,8 +1009,10 @@ isert_post_send(struct isert_conn *isert_conn, struct iser_tx_desc *tx_desc)
ib_dma_sync_single_for_device(ib_dev, tx_desc->dma_addr,
ISER_HEADERS_LEN, DMA_TO_DEVICE);
+ tx_desc->tx_cqe.done = isert_login_send_done;
+
send_wr.next = NULL;
- send_wr.wr_id = (uintptr_t)tx_desc;
+ send_wr.wr_cqe = &tx_desc->tx_cqe;
send_wr.sg_list = tx_desc->tx_sg;
send_wr.num_sge = tx_desc->num_sge;
send_wr.opcode = IB_WR_SEND;
@@ -1056,7 +1040,6 @@ isert_create_send_desc(struct isert_conn *isert_conn,
tx_desc->iser_header.flags = ISCSI_CTRL;
tx_desc->num_sge = 1;
- tx_desc->isert_cmd = isert_cmd;
if (tx_desc->tx_sg[0].lkey != device->pd->local_dma_lkey) {
tx_desc->tx_sg[0].lkey = device->pd->local_dma_lkey;
@@ -1097,8 +1080,9 @@ isert_init_send_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
{
struct iser_tx_desc *tx_desc = &isert_cmd->tx_desc;
- isert_cmd->rdma_wr.iser_ib_op = ISER_IB_SEND;
- send_wr->wr_id = (uintptr_t)&isert_cmd->tx_desc;
+ isert_cmd->iser_ib_op = ISER_IB_SEND;
+ tx_desc->tx_cqe.done = isert_send_done;
+ send_wr->wr_cqe = &tx_desc->tx_cqe;
if (isert_conn->snd_w_inv && isert_cmd->inv_rkey) {
send_wr->opcode = IB_WR_SEND_WITH_INV;
@@ -1113,7 +1097,7 @@ isert_init_send_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
}
static int
-isert_rdma_post_recvl(struct isert_conn *isert_conn)
+isert_login_post_recv(struct isert_conn *isert_conn)
{
struct ib_recv_wr rx_wr, *rx_wr_fail;
struct ib_sge sge;
@@ -1121,23 +1105,22 @@ isert_rdma_post_recvl(struct isert_conn *isert_conn)
memset(&sge, 0, sizeof(struct ib_sge));
sge.addr = isert_conn->login_req_dma;
- sge.length = ISER_RX_LOGIN_SIZE;
+ sge.length = ISER_RX_PAYLOAD_SIZE;
sge.lkey = isert_conn->device->pd->local_dma_lkey;
isert_dbg("Setup sge: addr: %llx length: %d 0x%08x\n",
sge.addr, sge.length, sge.lkey);
+ isert_conn->login_req_buf->rx_cqe.done = isert_login_recv_done;
+
memset(&rx_wr, 0, sizeof(struct ib_recv_wr));
- rx_wr.wr_id = (uintptr_t)isert_conn->login_req_buf;
+ rx_wr.wr_cqe = &isert_conn->login_req_buf->rx_cqe;
rx_wr.sg_list = &sge;
rx_wr.num_sge = 1;
- isert_conn->post_recv_buf_count++;
ret = ib_post_recv(isert_conn->qp, &rx_wr, &rx_wr_fail);
- if (ret) {
+ if (ret)
isert_err("ib_post_recv() failed: %d\n", ret);
- isert_conn->post_recv_buf_count--;
- }
return ret;
}
@@ -1203,12 +1186,12 @@ isert_put_login_tx(struct iscsi_conn *conn, struct iscsi_login *login,
goto post_send;
}
- ret = isert_rdma_post_recvl(isert_conn);
+ ret = isert_login_post_recv(isert_conn);
if (ret)
return ret;
}
post_send:
- ret = isert_post_send(isert_conn, tx_desc);
+ ret = isert_login_post_send(isert_conn, tx_desc);
if (ret)
return ret;
@@ -1218,7 +1201,7 @@ post_send:
static void
isert_rx_login_req(struct isert_conn *isert_conn)
{
- struct iser_rx_desc *rx_desc = (void *)isert_conn->login_req_buf;
+ struct iser_rx_desc *rx_desc = isert_conn->login_req_buf;
int rx_buflen = isert_conn->login_req_len;
struct iscsi_conn *conn = isert_conn->conn;
struct iscsi_login *login = conn->conn_login;
@@ -1551,12 +1534,42 @@ isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc,
}
static void
-isert_rx_do_work(struct iser_rx_desc *rx_desc, struct isert_conn *isert_conn)
+isert_print_wc(struct ib_wc *wc, const char *type)
{
+ if (wc->status != IB_WC_WR_FLUSH_ERR)
+ isert_err("%s failure: %s (%d) vend_err %x\n", type,
+ ib_wc_status_msg(wc->status), wc->status,
+ wc->vendor_err);
+ else
+ isert_dbg("%s failure: %s (%d)\n", type,
+ ib_wc_status_msg(wc->status), wc->status);
+}
+
+static void
+isert_recv_done(struct ib_cq *cq, struct ib_wc *wc)
+{
+ struct isert_conn *isert_conn = wc->qp->qp_context;
+ struct ib_device *ib_dev = isert_conn->cm_id->device;
+ struct iser_rx_desc *rx_desc = cqe_to_rx_desc(wc->wr_cqe);
+ struct iscsi_hdr *hdr = &rx_desc->iscsi_header;
struct iser_ctrl *iser_ctrl = &rx_desc->iser_header;
uint64_t read_va = 0, write_va = 0;
uint32_t read_stag = 0, write_stag = 0;
+ if (unlikely(wc->status != IB_WC_SUCCESS)) {
+ isert_print_wc(wc, "recv");
+ if (wc->status != IB_WC_WR_FLUSH_ERR)
+ iscsit_cause_connection_reinstatement(isert_conn->conn, 0);
+ return;
+ }
+
+ ib_dma_sync_single_for_cpu(ib_dev, rx_desc->dma_addr,
+ ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
+
+ isert_dbg("DMA: 0x%llx, iSCSI opcode: 0x%02x, ITT: 0x%08x, flags: 0x%02x dlen: %d\n",
+ rx_desc->dma_addr, hdr->opcode, hdr->itt, hdr->flags,
+ (int)(wc->byte_len - ISER_HEADERS_LEN));
+
switch (iser_ctrl->flags & 0xF0) {
case ISCSI_CTRL:
if (iser_ctrl->flags & ISER_RSV) {
@@ -1584,56 +1597,40 @@ isert_rx_do_work(struct iser_rx_desc *rx_desc, struct isert_conn *isert_conn)
isert_rx_opcode(isert_conn, rx_desc,
read_stag, read_va, write_stag, write_va);
+
+ ib_dma_sync_single_for_device(ib_dev, rx_desc->dma_addr,
+ ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
}
static void
-isert_rcv_completion(struct iser_rx_desc *desc,
- struct isert_conn *isert_conn,
- u32 xfer_len)
+isert_login_recv_done(struct ib_cq *cq, struct ib_wc *wc)
{
+ struct isert_conn *isert_conn = wc->qp->qp_context;
struct ib_device *ib_dev = isert_conn->cm_id->device;
- struct iscsi_hdr *hdr;
- u64 rx_dma;
- int rx_buflen;
-
- if ((char *)desc == isert_conn->login_req_buf) {
- rx_dma = isert_conn->login_req_dma;
- rx_buflen = ISER_RX_LOGIN_SIZE;
- isert_dbg("login_buf: Using rx_dma: 0x%llx, rx_buflen: %d\n",
- rx_dma, rx_buflen);
- } else {
- rx_dma = desc->dma_addr;
- rx_buflen = ISER_RX_PAYLOAD_SIZE;
- isert_dbg("req_buf: Using rx_dma: 0x%llx, rx_buflen: %d\n",
- rx_dma, rx_buflen);
+
+ if (unlikely(wc->status != IB_WC_SUCCESS)) {
+ isert_print_wc(wc, "login recv");
+ return;
}
- ib_dma_sync_single_for_cpu(ib_dev, rx_dma, rx_buflen, DMA_FROM_DEVICE);
+ ib_dma_sync_single_for_cpu(ib_dev, isert_conn->login_req_dma,
+ ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
- hdr = &desc->iscsi_header;
- isert_dbg("iSCSI opcode: 0x%02x, ITT: 0x%08x, flags: 0x%02x dlen: %d\n",
- hdr->opcode, hdr->itt, hdr->flags,
- (int)(xfer_len - ISER_HEADERS_LEN));
+ isert_conn->login_req_len = wc->byte_len - ISER_HEADERS_LEN;
- if ((char *)desc == isert_conn->login_req_buf) {
- isert_conn->login_req_len = xfer_len - ISER_HEADERS_LEN;
- if (isert_conn->conn) {
- struct iscsi_login *login = isert_conn->conn->conn_login;
+ if (isert_conn->conn) {
+ struct iscsi_login *login = isert_conn->conn->conn_login;
- if (login && !login->first_request)
- isert_rx_login_req(isert_conn);
- }
- mutex_lock(&isert_conn->mutex);
- complete(&isert_conn->login_req_comp);
- mutex_unlock(&isert_conn->mutex);
- } else {
- isert_rx_do_work(desc, isert_conn);
+ if (login && !login->first_request)
+ isert_rx_login_req(isert_conn);
}
- ib_dma_sync_single_for_device(ib_dev, rx_dma, rx_buflen,
- DMA_FROM_DEVICE);
+ mutex_lock(&isert_conn->mutex);
+ complete(&isert_conn->login_req_comp);
+ mutex_unlock(&isert_conn->mutex);
- isert_conn->post_recv_buf_count--;
+ ib_dma_sync_single_for_device(ib_dev, isert_conn->login_req_dma,
+ ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
}
static int
@@ -1683,54 +1680,50 @@ isert_unmap_data_buf(struct isert_conn *isert_conn, struct isert_data_buf *data)
static void
isert_unmap_cmd(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn)
{
- struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
-
isert_dbg("Cmd %p\n", isert_cmd);
- if (wr->data.sg) {
+ if (isert_cmd->data.sg) {
isert_dbg("Cmd %p unmap_sg op\n", isert_cmd);
- isert_unmap_data_buf(isert_conn, &wr->data);
+ isert_unmap_data_buf(isert_conn, &isert_cmd->data);
}
- if (wr->rdma_wr) {
+ if (isert_cmd->rdma_wr) {
isert_dbg("Cmd %p free send_wr\n", isert_cmd);
- kfree(wr->rdma_wr);
- wr->rdma_wr = NULL;
+ kfree(isert_cmd->rdma_wr);
+ isert_cmd->rdma_wr = NULL;
}
- if (wr->ib_sge) {
+ if (isert_cmd->ib_sge) {
isert_dbg("Cmd %p free ib_sge\n", isert_cmd);
- kfree(wr->ib_sge);
- wr->ib_sge = NULL;
+ kfree(isert_cmd->ib_sge);
+ isert_cmd->ib_sge = NULL;
}
}
static void
isert_unreg_rdma(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn)
{
- struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
-
isert_dbg("Cmd %p\n", isert_cmd);
- if (wr->fr_desc) {
- isert_dbg("Cmd %p free fr_desc %p\n", isert_cmd, wr->fr_desc);
- if (wr->fr_desc->ind & ISERT_PROTECTED) {
- isert_unmap_data_buf(isert_conn, &wr->prot);
- wr->fr_desc->ind &= ~ISERT_PROTECTED;
+ if (isert_cmd->fr_desc) {
+ isert_dbg("Cmd %p free fr_desc %p\n", isert_cmd, isert_cmd->fr_desc);
+ if (isert_cmd->fr_desc->ind & ISERT_PROTECTED) {
+ isert_unmap_data_buf(isert_conn, &isert_cmd->prot);
+ isert_cmd->fr_desc->ind &= ~ISERT_PROTECTED;
}
spin_lock_bh(&isert_conn->pool_lock);
- list_add_tail(&wr->fr_desc->list, &isert_conn->fr_pool);
+ list_add_tail(&isert_cmd->fr_desc->list, &isert_conn->fr_pool);
spin_unlock_bh(&isert_conn->pool_lock);
- wr->fr_desc = NULL;
+ isert_cmd->fr_desc = NULL;
}
- if (wr->data.sg) {
+ if (isert_cmd->data.sg) {
isert_dbg("Cmd %p unmap_sg op\n", isert_cmd);
- isert_unmap_data_buf(isert_conn, &wr->data);
+ isert_unmap_data_buf(isert_conn, &isert_cmd->data);
}
- wr->ib_sge = NULL;
- wr->rdma_wr = NULL;
+ isert_cmd->ib_sge = NULL;
+ isert_cmd->rdma_wr = NULL;
}
static void
@@ -1882,52 +1875,70 @@ fail_mr_status:
}
static void
-isert_completion_rdma_write(struct iser_tx_desc *tx_desc,
- struct isert_cmd *isert_cmd)
+isert_rdma_write_done(struct ib_cq *cq, struct ib_wc *wc)
{
- struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
- struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
- struct se_cmd *se_cmd = &cmd->se_cmd;
- struct isert_conn *isert_conn = isert_cmd->conn;
+ struct isert_conn *isert_conn = wc->qp->qp_context;
struct isert_device *device = isert_conn->device;
+ struct iser_tx_desc *desc = cqe_to_tx_desc(wc->wr_cqe);
+ struct isert_cmd *isert_cmd = tx_desc_to_cmd(desc);
+ struct se_cmd *cmd = &isert_cmd->iscsi_cmd->se_cmd;
int ret = 0;
- if (wr->fr_desc && wr->fr_desc->ind & ISERT_PROTECTED) {
- ret = isert_check_pi_status(se_cmd,
- wr->fr_desc->pi_ctx->sig_mr);
- wr->fr_desc->ind &= ~ISERT_PROTECTED;
+ if (unlikely(wc->status != IB_WC_SUCCESS)) {
+ isert_print_wc(wc, "rdma write");
+ if (wc->status != IB_WC_WR_FLUSH_ERR)
+ iscsit_cause_connection_reinstatement(isert_conn->conn, 0);
+ isert_completion_put(desc, isert_cmd, device->ib_device, true);
+ return;
+ }
+
+ isert_dbg("Cmd %p\n", isert_cmd);
+
+ if (isert_cmd->fr_desc && isert_cmd->fr_desc->ind & ISERT_PROTECTED) {
+ ret = isert_check_pi_status(cmd,
+ isert_cmd->fr_desc->pi_ctx->sig_mr);
+ isert_cmd->fr_desc->ind &= ~ISERT_PROTECTED;
}
device->unreg_rdma_mem(isert_cmd, isert_conn);
- wr->rdma_wr_num = 0;
+ isert_cmd->rdma_wr_num = 0;
if (ret)
- transport_send_check_condition_and_sense(se_cmd,
- se_cmd->pi_err, 0);
+ transport_send_check_condition_and_sense(cmd, cmd->pi_err, 0);
else
- isert_put_response(isert_conn->conn, cmd);
+ isert_put_response(isert_conn->conn, isert_cmd->iscsi_cmd);
}
static void
-isert_completion_rdma_read(struct iser_tx_desc *tx_desc,
- struct isert_cmd *isert_cmd)
+isert_rdma_read_done(struct ib_cq *cq, struct ib_wc *wc)
{
- struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
+ struct isert_conn *isert_conn = wc->qp->qp_context;
+ struct isert_device *device = isert_conn->device;
+ struct iser_tx_desc *desc = cqe_to_tx_desc(wc->wr_cqe);
+ struct isert_cmd *isert_cmd = tx_desc_to_cmd(desc);
struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
struct se_cmd *se_cmd = &cmd->se_cmd;
- struct isert_conn *isert_conn = isert_cmd->conn;
- struct isert_device *device = isert_conn->device;
int ret = 0;
- if (wr->fr_desc && wr->fr_desc->ind & ISERT_PROTECTED) {
+ if (unlikely(wc->status != IB_WC_SUCCESS)) {
+ isert_print_wc(wc, "rdma read");
+ if (wc->status != IB_WC_WR_FLUSH_ERR)
+ iscsit_cause_connection_reinstatement(isert_conn->conn, 0);
+ isert_completion_put(desc, isert_cmd, device->ib_device, true);
+ return;
+ }
+
+ isert_dbg("Cmd %p\n", isert_cmd);
+
+ if (isert_cmd->fr_desc && isert_cmd->fr_desc->ind & ISERT_PROTECTED) {
ret = isert_check_pi_status(se_cmd,
- wr->fr_desc->pi_ctx->sig_mr);
- wr->fr_desc->ind &= ~ISERT_PROTECTED;
+ isert_cmd->fr_desc->pi_ctx->sig_mr);
+ isert_cmd->fr_desc->ind &= ~ISERT_PROTECTED;
}
iscsit_stop_dataout_timer(cmd);
device->unreg_rdma_mem(isert_cmd, isert_conn);
- cmd->write_data_done = wr->data.len;
- wr->rdma_wr_num = 0;
+ cmd->write_data_done = isert_cmd->data.len;
+ isert_cmd->rdma_wr_num = 0;
isert_dbg("Cmd: %p RDMA_READ comp calling execute_cmd\n", isert_cmd);
spin_lock_bh(&cmd->istate_lock);
@@ -1975,170 +1986,56 @@ isert_do_control_comp(struct work_struct *work)
}
static void
-isert_response_completion(struct iser_tx_desc *tx_desc,
- struct isert_cmd *isert_cmd,
- struct isert_conn *isert_conn,
- struct ib_device *ib_dev)
+isert_login_send_done(struct ib_cq *cq, struct ib_wc *wc)
{
- struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
-
- if (cmd->i_state == ISTATE_SEND_TASKMGTRSP ||
- cmd->i_state == ISTATE_SEND_LOGOUTRSP ||
- cmd->i_state == ISTATE_SEND_REJECT ||
- cmd->i_state == ISTATE_SEND_TEXTRSP) {
- isert_unmap_tx_desc(tx_desc, ib_dev);
+ struct isert_conn *isert_conn = wc->qp->qp_context;
+ struct ib_device *ib_dev = isert_conn->cm_id->device;
+ struct iser_tx_desc *tx_desc = cqe_to_tx_desc(wc->wr_cqe);
- INIT_WORK(&isert_cmd->comp_work, isert_do_control_comp);
- queue_work(isert_comp_wq, &isert_cmd->comp_work);
- return;
+ if (unlikely(wc->status != IB_WC_SUCCESS)) {
+ isert_print_wc(wc, "login send");
+ if (wc->status != IB_WC_WR_FLUSH_ERR)
+ iscsit_cause_connection_reinstatement(isert_conn->conn, 0);
}
- cmd->i_state = ISTATE_SENT_STATUS;
- isert_completion_put(tx_desc, isert_cmd, ib_dev, false);
+ isert_unmap_tx_desc(tx_desc, ib_dev);
}
static void
-isert_snd_completion(struct iser_tx_desc *tx_desc,
- struct isert_conn *isert_conn)
+isert_send_done(struct ib_cq *cq, struct ib_wc *wc)
{
+ struct isert_conn *isert_conn = wc->qp->qp_context;
struct ib_device *ib_dev = isert_conn->cm_id->device;
- struct isert_cmd *isert_cmd = tx_desc->isert_cmd;
- struct isert_rdma_wr *wr;
+ struct iser_tx_desc *tx_desc = cqe_to_tx_desc(wc->wr_cqe);
+ struct isert_cmd *isert_cmd = tx_desc_to_cmd(tx_desc);
- if (!isert_cmd) {
- isert_unmap_tx_desc(tx_desc, ib_dev);
+ if (unlikely(wc->status != IB_WC_SUCCESS)) {
+ isert_print_wc(wc, "send");
+ if (wc->status != IB_WC_WR_FLUSH_ERR)
+ iscsit_cause_connection_reinstatement(isert_conn->conn, 0);
+ isert_completion_put(tx_desc, isert_cmd, ib_dev, true);
return;
}
- wr = &isert_cmd->rdma_wr;
- isert_dbg("Cmd %p iser_ib_op %d\n", isert_cmd, wr->iser_ib_op);
+ isert_dbg("Cmd %p\n", isert_cmd);
- switch (wr->iser_ib_op) {
- case ISER_IB_SEND:
- isert_response_completion(tx_desc, isert_cmd,
- isert_conn, ib_dev);
- break;
- case ISER_IB_RDMA_WRITE:
- isert_completion_rdma_write(tx_desc, isert_cmd);
- break;
- case ISER_IB_RDMA_READ:
- isert_completion_rdma_read(tx_desc, isert_cmd);
- break;
+ switch (isert_cmd->iscsi_cmd->i_state) {
+ case ISTATE_SEND_TASKMGTRSP:
+ case ISTATE_SEND_LOGOUTRSP:
+ case ISTATE_SEND_REJECT:
+ case ISTATE_SEND_TEXTRSP:
+ isert_unmap_tx_desc(tx_desc, ib_dev);
+
+ INIT_WORK(&isert_cmd->comp_work, isert_do_control_comp);
+ queue_work(isert_comp_wq, &isert_cmd->comp_work);
+ return;
default:
- isert_err("Unknown wr->iser_ib_op: 0x%x\n", wr->iser_ib_op);
- dump_stack();
+ isert_cmd->iscsi_cmd->i_state = ISTATE_SENT_STATUS;
+ isert_completion_put(tx_desc, isert_cmd, ib_dev, false);
break;
}
}
-/**
- * is_isert_tx_desc() - Indicate if the completion wr_id
- * is a TX descriptor or not.
- * @isert_conn: iser connection
- * @wr_id: completion WR identifier
- *
- * Since we cannot rely on wc opcode in FLUSH errors
- * we must work around it by checking if the wr_id address
- * falls in the iser connection rx_descs buffer. If so
- * it is an RX descriptor, otherwize it is a TX.
- */
-static inline bool
-is_isert_tx_desc(struct isert_conn *isert_conn, void *wr_id)
-{
- void *start = isert_conn->rx_descs;
- int len = ISERT_QP_MAX_RECV_DTOS * sizeof(*isert_conn->rx_descs);
-
- if (wr_id >= start && wr_id < start + len)
- return false;
-
- return true;
-}
-
-static void
-isert_cq_comp_err(struct isert_conn *isert_conn, struct ib_wc *wc)
-{
- if (wc->wr_id == ISER_BEACON_WRID) {
- isert_info("conn %p completing wait_comp_err\n",
- isert_conn);
- complete(&isert_conn->wait_comp_err);
- } else if (is_isert_tx_desc(isert_conn, (void *)(uintptr_t)wc->wr_id)) {
- struct ib_device *ib_dev = isert_conn->cm_id->device;
- struct isert_cmd *isert_cmd;
- struct iser_tx_desc *desc;
-
- desc = (struct iser_tx_desc *)(uintptr_t)wc->wr_id;
- isert_cmd = desc->isert_cmd;
- if (!isert_cmd)
- isert_unmap_tx_desc(desc, ib_dev);
- else
- isert_completion_put(desc, isert_cmd, ib_dev, true);
- } else {
- isert_conn->post_recv_buf_count--;
- if (!isert_conn->post_recv_buf_count)
- iscsit_cause_connection_reinstatement(isert_conn->conn, 0);
- }
-}
-
-static void
-isert_handle_wc(struct ib_wc *wc)
-{
- struct isert_conn *isert_conn;
- struct iser_tx_desc *tx_desc;
- struct iser_rx_desc *rx_desc;
-
- isert_conn = wc->qp->qp_context;
- if (likely(wc->status == IB_WC_SUCCESS)) {
- if (wc->opcode == IB_WC_RECV) {
- rx_desc = (struct iser_rx_desc *)(uintptr_t)wc->wr_id;
- isert_rcv_completion(rx_desc, isert_conn, wc->byte_len);
- } else {
- tx_desc = (struct iser_tx_desc *)(uintptr_t)wc->wr_id;
- isert_snd_completion(tx_desc, isert_conn);
- }
- } else {
- if (wc->status != IB_WC_WR_FLUSH_ERR)
- isert_err("%s (%d): wr id %llx vend_err %x\n",
- ib_wc_status_msg(wc->status), wc->status,
- wc->wr_id, wc->vendor_err);
- else
- isert_dbg("%s (%d): wr id %llx\n",
- ib_wc_status_msg(wc->status), wc->status,
- wc->wr_id);
-
- if (wc->wr_id != ISER_FASTREG_LI_WRID)
- isert_cq_comp_err(isert_conn, wc);
- }
-}
-
-static void
-isert_cq_work(struct work_struct *work)
-{
- enum { isert_poll_budget = 65536 };
- struct isert_comp *comp = container_of(work, struct isert_comp,
- work);
- struct ib_wc *const wcs = comp->wcs;
- int i, n, completed = 0;
-
- while ((n = ib_poll_cq(comp->cq, ARRAY_SIZE(comp->wcs), wcs)) > 0) {
- for (i = 0; i < n; i++)
- isert_handle_wc(&wcs[i]);
-
- completed += n;
- if (completed >= isert_poll_budget)
- break;
- }
-
- ib_req_notify_cq(comp->cq, IB_CQ_NEXT_COMP);
-}
-
-static void
-isert_cq_callback(struct ib_cq *cq, void *context)
-{
- struct isert_comp *comp = context;
-
- queue_work(isert_comp_wq, &comp->work);
-}
-
static int
isert_post_response(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd)
{
@@ -2395,7 +2292,8 @@ isert_build_rdma_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
page_off = offset % PAGE_SIZE;
rdma_wr->wr.sg_list = ib_sge;
- rdma_wr->wr.wr_id = (uintptr_t)&isert_cmd->tx_desc;
+ rdma_wr->wr.wr_cqe = &isert_cmd->tx_desc.tx_cqe;
+
/*
* Perform mapping of TCM scatterlist memory ib_sge dma_addr.
*/
@@ -2428,24 +2326,23 @@ isert_build_rdma_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
}
static int
-isert_map_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
- struct isert_rdma_wr *wr)
+isert_map_rdma(struct isert_cmd *isert_cmd, struct iscsi_conn *conn)
{
+ struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
struct se_cmd *se_cmd = &cmd->se_cmd;
- struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
struct isert_conn *isert_conn = conn->context;
- struct isert_data_buf *data = &wr->data;
+ struct isert_data_buf *data = &isert_cmd->data;
struct ib_rdma_wr *rdma_wr;
struct ib_sge *ib_sge;
u32 offset, data_len, data_left, rdma_write_max, va_offset = 0;
int ret = 0, i, ib_sge_cnt;
- isert_cmd->tx_desc.isert_cmd = isert_cmd;
-
- offset = wr->iser_ib_op == ISER_IB_RDMA_READ ? cmd->write_data_done : 0;
+ offset = isert_cmd->iser_ib_op == ISER_IB_RDMA_READ ?
+ cmd->write_data_done : 0;
ret = isert_map_data_buf(isert_conn, isert_cmd, se_cmd->t_data_sg,
se_cmd->t_data_nents, se_cmd->data_length,
- offset, wr->iser_ib_op, &wr->data);
+ offset, isert_cmd->iser_ib_op,
+ &isert_cmd->data);
if (ret)
return ret;
@@ -2458,41 +2355,44 @@ isert_map_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
ret = -ENOMEM;
goto unmap_cmd;
}
- wr->ib_sge = ib_sge;
+ isert_cmd->ib_sge = ib_sge;
- wr->rdma_wr_num = DIV_ROUND_UP(data->nents, isert_conn->max_sge);
- wr->rdma_wr = kzalloc(sizeof(struct ib_rdma_wr) * wr->rdma_wr_num,
- GFP_KERNEL);
- if (!wr->rdma_wr) {
- isert_dbg("Unable to allocate wr->rdma_wr\n");
+ isert_cmd->rdma_wr_num = DIV_ROUND_UP(data->nents, isert_conn->max_sge);
+ isert_cmd->rdma_wr = kzalloc(sizeof(struct ib_rdma_wr) *
+ isert_cmd->rdma_wr_num, GFP_KERNEL);
+ if (!isert_cmd->rdma_wr) {
+ isert_dbg("Unable to allocate isert_cmd->rdma_wr\n");
ret = -ENOMEM;
goto unmap_cmd;
}
- wr->isert_cmd = isert_cmd;
rdma_write_max = isert_conn->max_sge * PAGE_SIZE;
- for (i = 0; i < wr->rdma_wr_num; i++) {
- rdma_wr = &isert_cmd->rdma_wr.rdma_wr[i];
+ for (i = 0; i < isert_cmd->rdma_wr_num; i++) {
+ rdma_wr = &isert_cmd->rdma_wr[i];
data_len = min(data_left, rdma_write_max);
rdma_wr->wr.send_flags = 0;
- if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) {
+ if (isert_cmd->iser_ib_op == ISER_IB_RDMA_WRITE) {
+ isert_cmd->tx_desc.tx_cqe.done = isert_rdma_write_done;
+
rdma_wr->wr.opcode = IB_WR_RDMA_WRITE;
rdma_wr->remote_addr = isert_cmd->read_va + offset;
rdma_wr->rkey = isert_cmd->read_stag;
- if (i + 1 == wr->rdma_wr_num)
+ if (i + 1 == isert_cmd->rdma_wr_num)
rdma_wr->wr.next = &isert_cmd->tx_desc.send_wr;
else
- rdma_wr->wr.next = &wr->rdma_wr[i + 1].wr;
+ rdma_wr->wr.next = &isert_cmd->rdma_wr[i + 1].wr;
} else {
+ isert_cmd->tx_desc.tx_cqe.done = isert_rdma_read_done;
+
rdma_wr->wr.opcode = IB_WR_RDMA_READ;
rdma_wr->remote_addr = isert_cmd->write_va + va_offset;
rdma_wr->rkey = isert_cmd->write_stag;
- if (i + 1 == wr->rdma_wr_num)
+ if (i + 1 == isert_cmd->rdma_wr_num)
rdma_wr->wr.send_flags = IB_SEND_SIGNALED;
else
- rdma_wr->wr.next = &wr->rdma_wr[i + 1].wr;
+ rdma_wr->wr.next = &isert_cmd->rdma_wr[i + 1].wr;
}
ib_sge_cnt = isert_build_rdma_wr(isert_conn, isert_cmd, ib_sge,
@@ -2517,7 +2417,7 @@ isert_inv_rkey(struct ib_send_wr *inv_wr, struct ib_mr *mr)
u32 rkey;
memset(inv_wr, 0, sizeof(*inv_wr));
- inv_wr->wr_id = ISER_FASTREG_LI_WRID;
+ inv_wr->wr_cqe = NULL;
inv_wr->opcode = IB_WR_LOCAL_INV;
inv_wr->ex.invalidate_rkey = mr->rkey;
@@ -2573,7 +2473,7 @@ isert_fast_reg_mr(struct isert_conn *isert_conn,
reg_wr.wr.next = NULL;
reg_wr.wr.opcode = IB_WR_REG_MR;
- reg_wr.wr.wr_id = ISER_FASTREG_LI_WRID;
+ reg_wr.wr.wr_cqe = NULL;
reg_wr.wr.send_flags = 0;
reg_wr.wr.num_sge = 0;
reg_wr.mr = mr;
@@ -2660,10 +2560,10 @@ isert_set_prot_checks(u8 prot_checks)
static int
isert_reg_sig_mr(struct isert_conn *isert_conn,
- struct se_cmd *se_cmd,
- struct isert_rdma_wr *rdma_wr,
+ struct isert_cmd *isert_cmd,
struct fast_reg_descriptor *fr_desc)
{
+ struct se_cmd *se_cmd = &isert_cmd->iscsi_cmd->se_cmd;
struct ib_sig_handover_wr sig_wr;
struct ib_send_wr inv_wr, *bad_wr, *wr = NULL;
struct pi_context *pi_ctx = fr_desc->pi_ctx;
@@ -2684,14 +2584,14 @@ isert_reg_sig_mr(struct isert_conn *isert_conn,
memset(&sig_wr, 0, sizeof(sig_wr));
sig_wr.wr.opcode = IB_WR_REG_SIG_MR;
- sig_wr.wr.wr_id = ISER_FASTREG_LI_WRID;
- sig_wr.wr.sg_list = &rdma_wr->ib_sg[DATA];
+ sig_wr.wr.wr_cqe = NULL;
+ sig_wr.wr.sg_list = &isert_cmd->ib_sg[DATA];
sig_wr.wr.num_sge = 1;
sig_wr.access_flags = IB_ACCESS_LOCAL_WRITE;
sig_wr.sig_attrs = &sig_attrs;
sig_wr.sig_mr = pi_ctx->sig_mr;
if (se_cmd->t_prot_sg)
- sig_wr.prot = &rdma_wr->ib_sg[PROT];
+ sig_wr.prot = &isert_cmd->ib_sg[PROT];
if (!wr)
wr = &sig_wr.wr;
@@ -2705,35 +2605,34 @@ isert_reg_sig_mr(struct isert_conn *isert_conn,
}
fr_desc->ind &= ~ISERT_SIG_KEY_VALID;
- rdma_wr->ib_sg[SIG].lkey = pi_ctx->sig_mr->lkey;
- rdma_wr->ib_sg[SIG].addr = 0;
- rdma_wr->ib_sg[SIG].length = se_cmd->data_length;
+ isert_cmd->ib_sg[SIG].lkey = pi_ctx->sig_mr->lkey;
+ isert_cmd->ib_sg[SIG].addr = 0;
+ isert_cmd->ib_sg[SIG].length = se_cmd->data_length;
if (se_cmd->prot_op != TARGET_PROT_DIN_STRIP &&
se_cmd->prot_op != TARGET_PROT_DOUT_INSERT)
/*
* We have protection guards on the wire
* so we need to set a larget transfer
*/
- rdma_wr->ib_sg[SIG].length += se_cmd->prot_length;
+ isert_cmd->ib_sg[SIG].length += se_cmd->prot_length;
isert_dbg("sig_sge: addr: 0x%llx length: %u lkey: %x\n",
- rdma_wr->ib_sg[SIG].addr, rdma_wr->ib_sg[SIG].length,
- rdma_wr->ib_sg[SIG].lkey);
+ isert_cmd->ib_sg[SIG].addr, isert_cmd->ib_sg[SIG].length,
+ isert_cmd->ib_sg[SIG].lkey);
err:
return ret;
}
static int
isert_handle_prot_cmd(struct isert_conn *isert_conn,
- struct isert_cmd *isert_cmd,
- struct isert_rdma_wr *wr)
+ struct isert_cmd *isert_cmd)
{
struct isert_device *device = isert_conn->device;
struct se_cmd *se_cmd = &isert_cmd->iscsi_cmd->se_cmd;
int ret;
- if (!wr->fr_desc->pi_ctx) {
- ret = isert_create_pi_ctx(wr->fr_desc,
+ if (!isert_cmd->fr_desc->pi_ctx) {
+ ret = isert_create_pi_ctx(isert_cmd->fr_desc,
device->ib_device,
device->pd);
if (ret) {
@@ -2748,16 +2647,20 @@ isert_handle_prot_cmd(struct isert_conn *isert_conn,
se_cmd->t_prot_sg,
se_cmd->t_prot_nents,
se_cmd->prot_length,
- 0, wr->iser_ib_op, &wr->prot);
+ 0,
+ isert_cmd->iser_ib_op,
+ &isert_cmd->prot);
if (ret) {
isert_err("conn %p failed to map protection buffer\n",
isert_conn);
return ret;
}
- memset(&wr->ib_sg[PROT], 0, sizeof(wr->ib_sg[PROT]));
- ret = isert_fast_reg_mr(isert_conn, wr->fr_desc, &wr->prot,
- ISERT_PROT_KEY_VALID, &wr->ib_sg[PROT]);
+ memset(&isert_cmd->ib_sg[PROT], 0, sizeof(isert_cmd->ib_sg[PROT]));
+ ret = isert_fast_reg_mr(isert_conn, isert_cmd->fr_desc,
+ &isert_cmd->prot,
+ ISERT_PROT_KEY_VALID,
+ &isert_cmd->ib_sg[PROT]);
if (ret) {
isert_err("conn %p failed to fast reg mr\n",
isert_conn);
@@ -2765,29 +2668,28 @@ isert_handle_prot_cmd(struct isert_conn *isert_conn,
}
}
- ret = isert_reg_sig_mr(isert_conn, se_cmd, wr, wr->fr_desc);
+ ret = isert_reg_sig_mr(isert_conn, isert_cmd, isert_cmd->fr_desc);
if (ret) {
isert_err("conn %p failed to fast reg mr\n",
isert_conn);
goto unmap_prot_cmd;
}
- wr->fr_desc->ind |= ISERT_PROTECTED;
+ isert_cmd->fr_desc->ind |= ISERT_PROTECTED;
return 0;
unmap_prot_cmd:
if (se_cmd->t_prot_sg)
- isert_unmap_data_buf(isert_conn, &wr->prot);
+ isert_unmap_data_buf(isert_conn, &isert_cmd->prot);
return ret;
}
static int
-isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
- struct isert_rdma_wr *wr)
+isert_reg_rdma(struct isert_cmd *isert_cmd, struct iscsi_conn *conn)
{
+ struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
struct se_cmd *se_cmd = &cmd->se_cmd;
- struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
struct isert_conn *isert_conn = conn->context;
struct fast_reg_descriptor *fr_desc = NULL;
struct ib_rdma_wr *rdma_wr;
@@ -2796,57 +2698,61 @@ isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
int ret = 0;
unsigned long flags;
- isert_cmd->tx_desc.isert_cmd = isert_cmd;
-
- offset = wr->iser_ib_op == ISER_IB_RDMA_READ ? cmd->write_data_done : 0;
+ offset = isert_cmd->iser_ib_op == ISER_IB_RDMA_READ ?
+ cmd->write_data_done : 0;
ret = isert_map_data_buf(isert_conn, isert_cmd, se_cmd->t_data_sg,
se_cmd->t_data_nents, se_cmd->data_length,
- offset, wr->iser_ib_op, &wr->data);
+ offset, isert_cmd->iser_ib_op,
+ &isert_cmd->data);
if (ret)
return ret;
- if (wr->data.dma_nents != 1 || isert_prot_cmd(isert_conn, se_cmd)) {
+ if (isert_cmd->data.dma_nents != 1 ||
+ isert_prot_cmd(isert_conn, se_cmd)) {
spin_lock_irqsave(&isert_conn->pool_lock, flags);
fr_desc = list_first_entry(&isert_conn->fr_pool,
struct fast_reg_descriptor, list);
list_del(&fr_desc->list);
spin_unlock_irqrestore(&isert_conn->pool_lock, flags);
- wr->fr_desc = fr_desc;
+ isert_cmd->fr_desc = fr_desc;
}
- ret = isert_fast_reg_mr(isert_conn, fr_desc, &wr->data,
- ISERT_DATA_KEY_VALID, &wr->ib_sg[DATA]);
+ ret = isert_fast_reg_mr(isert_conn, fr_desc, &isert_cmd->data,
+ ISERT_DATA_KEY_VALID, &isert_cmd->ib_sg[DATA]);
if (ret)
goto unmap_cmd;
if (isert_prot_cmd(isert_conn, se_cmd)) {
- ret = isert_handle_prot_cmd(isert_conn, isert_cmd, wr);
+ ret = isert_handle_prot_cmd(isert_conn, isert_cmd);
if (ret)
goto unmap_cmd;
- ib_sg = &wr->ib_sg[SIG];
+ ib_sg = &isert_cmd->ib_sg[SIG];
} else {
- ib_sg = &wr->ib_sg[DATA];
+ ib_sg = &isert_cmd->ib_sg[DATA];
}
- memcpy(&wr->s_ib_sge, ib_sg, sizeof(*ib_sg));
- wr->ib_sge = &wr->s_ib_sge;
- wr->rdma_wr_num = 1;
- memset(&wr->s_rdma_wr, 0, sizeof(wr->s_rdma_wr));
- wr->rdma_wr = &wr->s_rdma_wr;
- wr->isert_cmd = isert_cmd;
+ memcpy(&isert_cmd->s_ib_sge, ib_sg, sizeof(*ib_sg));
+ isert_cmd->ib_sge = &isert_cmd->s_ib_sge;
+ isert_cmd->rdma_wr_num = 1;
+ memset(&isert_cmd->s_rdma_wr, 0, sizeof(isert_cmd->s_rdma_wr));
+ isert_cmd->rdma_wr = &isert_cmd->s_rdma_wr;
- rdma_wr = &isert_cmd->rdma_wr.s_rdma_wr;
- rdma_wr->wr.sg_list = &wr->s_ib_sge;
+ rdma_wr = &isert_cmd->s_rdma_wr;
+ rdma_wr->wr.sg_list = &isert_cmd->s_ib_sge;
rdma_wr->wr.num_sge = 1;
- rdma_wr->wr.wr_id = (uintptr_t)&isert_cmd->tx_desc;
- if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) {
+ rdma_wr->wr.wr_cqe = &isert_cmd->tx_desc.tx_cqe;
+ if (isert_cmd->iser_ib_op == ISER_IB_RDMA_WRITE) {
+ isert_cmd->tx_desc.tx_cqe.done = isert_rdma_write_done;
+
rdma_wr->wr.opcode = IB_WR_RDMA_WRITE;
rdma_wr->remote_addr = isert_cmd->read_va;
rdma_wr->rkey = isert_cmd->read_stag;
rdma_wr->wr.send_flags = !isert_prot_cmd(isert_conn, se_cmd) ?
0 : IB_SEND_SIGNALED;
} else {
+ isert_cmd->tx_desc.tx_cqe.done = isert_rdma_read_done;
+
rdma_wr->wr.opcode = IB_WR_RDMA_READ;
rdma_wr->remote_addr = isert_cmd->write_va;
rdma_wr->rkey = isert_cmd->write_stag;
@@ -2861,7 +2767,7 @@ unmap_cmd:
list_add_tail(&fr_desc->list, &isert_conn->fr_pool);
spin_unlock_irqrestore(&isert_conn->pool_lock, flags);
}
- isert_unmap_data_buf(isert_conn, &wr->data);
+ isert_unmap_data_buf(isert_conn, &isert_cmd->data);
return ret;
}
@@ -2871,7 +2777,6 @@ isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
{
struct se_cmd *se_cmd = &cmd->se_cmd;
struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
- struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
struct isert_conn *isert_conn = conn->context;
struct isert_device *device = isert_conn->device;
struct ib_send_wr *wr_failed;
@@ -2880,8 +2785,8 @@ isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
isert_dbg("Cmd: %p RDMA_WRITE data_length: %u\n",
isert_cmd, se_cmd->data_length);
- wr->iser_ib_op = ISER_IB_RDMA_WRITE;
- rc = device->reg_rdma_mem(conn, cmd, wr);
+ isert_cmd->iser_ib_op = ISER_IB_RDMA_WRITE;
+ rc = device->reg_rdma_mem(isert_cmd, conn);
if (rc) {
isert_err("Cmd: %p failed to prepare RDMA res\n", isert_cmd);
return rc;
@@ -2898,8 +2803,8 @@ isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
isert_init_send_wr(isert_conn, isert_cmd,
&isert_cmd->tx_desc.send_wr);
- isert_cmd->rdma_wr.s_rdma_wr.wr.next = &isert_cmd->tx_desc.send_wr;
- wr->rdma_wr_num += 1;
+ isert_cmd->s_rdma_wr.wr.next = &isert_cmd->tx_desc.send_wr;
+ isert_cmd->rdma_wr_num += 1;
rc = isert_post_recv(isert_conn, isert_cmd->rx_desc);
if (rc) {
@@ -2908,7 +2813,7 @@ isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
}
}
- rc = ib_post_send(isert_conn->qp, &wr->rdma_wr->wr, &wr_failed);
+ rc = ib_post_send(isert_conn->qp, &isert_cmd->rdma_wr->wr, &wr_failed);
if (rc)
isert_warn("ib_post_send() failed for IB_WR_RDMA_WRITE\n");
@@ -2927,7 +2832,6 @@ isert_get_dataout(struct iscsi_conn *conn, struct iscsi_cmd *cmd, bool recovery)
{
struct se_cmd *se_cmd = &cmd->se_cmd;
struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
- struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
struct isert_conn *isert_conn = conn->context;
struct isert_device *device = isert_conn->device;
struct ib_send_wr *wr_failed;
@@ -2935,14 +2839,14 @@ isert_get_dataout(struct iscsi_conn *conn, struct iscsi_cmd *cmd, bool recovery)
isert_dbg("Cmd: %p RDMA_READ data_length: %u write_data_done: %u\n",
isert_cmd, se_cmd->data_length, cmd->write_data_done);
- wr->iser_ib_op = ISER_IB_RDMA_READ;
- rc = device->reg_rdma_mem(conn, cmd, wr);
+ isert_cmd->iser_ib_op = ISER_IB_RDMA_READ;
+ rc = device->reg_rdma_mem(isert_cmd, conn);
if (rc) {
isert_err("Cmd: %p failed to prepare RDMA res\n", isert_cmd);
return rc;
}
- rc = ib_post_send(isert_conn->qp, &wr->rdma_wr->wr, &wr_failed);
+ rc = ib_post_send(isert_conn->qp, &isert_cmd->rdma_wr->wr, &wr_failed);
if (rc)
isert_warn("ib_post_send() failed for IB_WR_RDMA_READ\n");
@@ -3214,6 +3118,7 @@ accept_wait:
conn->context = isert_conn;
isert_conn->conn = conn;
+ isert_conn->state = ISER_CONN_BOUND;
isert_set_conn_info(np, conn, isert_conn);
@@ -3274,8 +3179,6 @@ static void isert_release_work(struct work_struct *work)
isert_info("Starting release conn %p\n", isert_conn);
- wait_for_completion(&isert_conn->wait);
-
mutex_lock(&isert_conn->mutex);
isert_conn->state = ISER_CONN_DOWN;
mutex_unlock(&isert_conn->mutex);
@@ -3309,24 +3212,6 @@ isert_wait4cmds(struct iscsi_conn *conn)
}
}
-static void
-isert_wait4flush(struct isert_conn *isert_conn)
-{
- struct ib_recv_wr *bad_wr;
-
- isert_info("conn %p\n", isert_conn);
-
- init_completion(&isert_conn->wait_comp_err);
- isert_conn->beacon.wr_id = ISER_BEACON_WRID;
- /* post an indication that all flush errors were consumed */
- if (ib_post_recv(isert_conn->qp, &isert_conn->beacon, &bad_wr)) {
- isert_err("conn %p failed to post beacon", isert_conn);
- return;
- }
-
- wait_for_completion(&isert_conn->wait_comp_err);
-}
-
/**
* isert_put_unsol_pending_cmds() - Drop commands waiting for
* unsolicitate dataout
@@ -3369,18 +3254,10 @@ static void isert_wait_conn(struct iscsi_conn *conn)
isert_info("Starting conn %p\n", isert_conn);
mutex_lock(&isert_conn->mutex);
- /*
- * Only wait for wait_comp_err if the isert_conn made it
- * into full feature phase..
- */
- if (isert_conn->state == ISER_CONN_INIT) {
- mutex_unlock(&isert_conn->mutex);
- return;
- }
isert_conn_terminate(isert_conn);
mutex_unlock(&isert_conn->mutex);
- isert_wait4flush(isert_conn);
+ ib_drain_qp(isert_conn->qp);
isert_put_unsol_pending_cmds(conn);
isert_wait4cmds(conn);
isert_wait4logout(isert_conn);
@@ -3392,7 +3269,7 @@ static void isert_free_conn(struct iscsi_conn *conn)
{
struct isert_conn *isert_conn = conn->context;
- isert_wait4flush(isert_conn);
+ ib_drain_qp(isert_conn->qp);
isert_put_conn(isert_conn);
}
diff --git a/drivers/infiniband/ulp/isert/ib_isert.h b/drivers/infiniband/ulp/isert/ib_isert.h
index 8d50453eef66..147900cbb578 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.h
+++ b/drivers/infiniband/ulp/isert/ib_isert.h
@@ -36,9 +36,7 @@
/* Constant PDU lengths calculations */
#define ISER_HEADERS_LEN (sizeof(struct iser_ctrl) + \
sizeof(struct iscsi_hdr))
-#define ISER_RECV_DATA_SEG_LEN 8192
-#define ISER_RX_PAYLOAD_SIZE (ISER_HEADERS_LEN + ISER_RECV_DATA_SEG_LEN)
-#define ISER_RX_LOGIN_SIZE (ISER_HEADERS_LEN + ISCSI_DEF_MAX_RECV_SEG_LEN)
+#define ISER_RX_PAYLOAD_SIZE (ISER_HEADERS_LEN + ISCSI_DEF_MAX_RECV_SEG_LEN)
/* QP settings */
/* Maximal bounds on received asynchronous PDUs */
@@ -62,12 +60,11 @@
ISERT_MAX_TX_MISC_PDUS + \
ISERT_MAX_RX_MISC_PDUS)
-#define ISER_RX_PAD_SIZE (ISER_RECV_DATA_SEG_LEN + 4096 - \
- (ISER_RX_PAYLOAD_SIZE + sizeof(u64) + sizeof(struct ib_sge)))
+#define ISER_RX_PAD_SIZE (ISCSI_DEF_MAX_RECV_SEG_LEN + 4096 - \
+ (ISER_RX_PAYLOAD_SIZE + sizeof(u64) + sizeof(struct ib_sge) + \
+ sizeof(struct ib_cqe)))
#define ISCSI_ISER_SG_TABLESIZE 256
-#define ISER_FASTREG_LI_WRID 0xffffffffffffffffULL
-#define ISER_BEACON_WRID 0xfffffffffffffffeULL
enum isert_desc_type {
ISCSI_TX_CONTROL,
@@ -84,6 +81,7 @@ enum iser_ib_op_code {
enum iser_conn_state {
ISER_CONN_INIT,
ISER_CONN_UP,
+ ISER_CONN_BOUND,
ISER_CONN_FULL_FEATURE,
ISER_CONN_TERMINATING,
ISER_CONN_DOWN,
@@ -92,23 +90,35 @@ enum iser_conn_state {
struct iser_rx_desc {
struct iser_ctrl iser_header;
struct iscsi_hdr iscsi_header;
- char data[ISER_RECV_DATA_SEG_LEN];
+ char data[ISCSI_DEF_MAX_RECV_SEG_LEN];
u64 dma_addr;
struct ib_sge rx_sg;
+ struct ib_cqe rx_cqe;
char pad[ISER_RX_PAD_SIZE];
} __packed;
+static inline struct iser_rx_desc *cqe_to_rx_desc(struct ib_cqe *cqe)
+{
+ return container_of(cqe, struct iser_rx_desc, rx_cqe);
+}
+
struct iser_tx_desc {
struct iser_ctrl iser_header;
struct iscsi_hdr iscsi_header;
enum isert_desc_type type;
u64 dma_addr;
struct ib_sge tx_sg[2];
+ struct ib_cqe tx_cqe;
int num_sge;
- struct isert_cmd *isert_cmd;
struct ib_send_wr send_wr;
} __packed;
+static inline struct iser_tx_desc *cqe_to_tx_desc(struct ib_cqe *cqe)
+{
+ return container_of(cqe, struct iser_tx_desc, tx_cqe);
+}
+
+
enum isert_indicator {
ISERT_PROTECTED = 1 << 0,
ISERT_DATA_KEY_VALID = 1 << 1,
@@ -144,20 +154,6 @@ enum {
SIG = 2,
};
-struct isert_rdma_wr {
- struct isert_cmd *isert_cmd;
- enum iser_ib_op_code iser_ib_op;
- struct ib_sge *ib_sge;
- struct ib_sge s_ib_sge;
- int rdma_wr_num;
- struct ib_rdma_wr *rdma_wr;
- struct ib_rdma_wr s_rdma_wr;
- struct ib_sge ib_sg[3];
- struct isert_data_buf data;
- struct isert_data_buf prot;
- struct fast_reg_descriptor *fr_desc;
-};
-
struct isert_cmd {
uint32_t read_stag;
uint32_t write_stag;
@@ -170,22 +166,34 @@ struct isert_cmd {
struct iscsi_cmd *iscsi_cmd;
struct iser_tx_desc tx_desc;
struct iser_rx_desc *rx_desc;
- struct isert_rdma_wr rdma_wr;
+ enum iser_ib_op_code iser_ib_op;
+ struct ib_sge *ib_sge;
+ struct ib_sge s_ib_sge;
+ int rdma_wr_num;
+ struct ib_rdma_wr *rdma_wr;
+ struct ib_rdma_wr s_rdma_wr;
+ struct ib_sge ib_sg[3];
+ struct isert_data_buf data;
+ struct isert_data_buf prot;
+ struct fast_reg_descriptor *fr_desc;
struct work_struct comp_work;
struct scatterlist sg;
};
+static inline struct isert_cmd *tx_desc_to_cmd(struct iser_tx_desc *desc)
+{
+ return container_of(desc, struct isert_cmd, tx_desc);
+}
+
struct isert_device;
struct isert_conn {
enum iser_conn_state state;
- int post_recv_buf_count;
u32 responder_resources;
u32 initiator_depth;
bool pi_support;
u32 max_sge;
- char *login_buf;
- char *login_req_buf;
+ struct iser_rx_desc *login_req_buf;
char *login_rsp_buf;
u64 login_req_dma;
int login_req_len;
@@ -201,15 +209,12 @@ struct isert_conn {
struct ib_qp *qp;
struct isert_device *device;
struct mutex mutex;
- struct completion wait;
- struct completion wait_comp_err;
struct kref kref;
struct list_head fr_pool;
int fr_pool_size;
/* lock to protect fastreg pool */
spinlock_t pool_lock;
struct work_struct release_work;
- struct ib_recv_wr beacon;
bool logout_posted;
bool snd_w_inv;
};
@@ -221,17 +226,13 @@ struct isert_conn {
*
* @device: pointer to device handle
* @cq: completion queue
- * @wcs: work completion array
* @active_qps: Number of active QPs attached
* to completion context
- * @work: completion work handle
*/
struct isert_comp {
struct isert_device *device;
struct ib_cq *cq;
- struct ib_wc wcs[16];
int active_qps;
- struct work_struct work;
};
struct isert_device {
@@ -243,9 +244,8 @@ struct isert_device {
struct isert_comp *comps;
int comps_used;
struct list_head dev_node;
- int (*reg_rdma_mem)(struct iscsi_conn *conn,
- struct iscsi_cmd *cmd,
- struct isert_rdma_wr *wr);
+ int (*reg_rdma_mem)(struct isert_cmd *isert_cmd,
+ struct iscsi_conn *conn);
void (*unreg_rdma_mem)(struct isert_cmd *isert_cmd,
struct isert_conn *isert_conn);
};
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index 03022f6420d7..b6bf20496021 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -446,49 +446,17 @@ static struct srp_fr_pool *srp_alloc_fr_pool(struct srp_target_port *target)
dev->max_pages_per_mr);
}
-static void srp_drain_done(struct ib_cq *cq, struct ib_wc *wc)
-{
- struct srp_rdma_ch *ch = cq->cq_context;
-
- complete(&ch->done);
-}
-
-static struct ib_cqe srp_drain_cqe = {
- .done = srp_drain_done,
-};
-
/**
* srp_destroy_qp() - destroy an RDMA queue pair
* @ch: SRP RDMA channel.
*
- * Change a queue pair into the error state and wait until all receive
- * completions have been processed before destroying it. This avoids that
- * the receive completion handler can access the queue pair while it is
+ * Drain the qp before destroying it. This avoids that the receive
+ * completion handler can access the queue pair while it is
* being destroyed.
*/
static void srp_destroy_qp(struct srp_rdma_ch *ch)
{
- static struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
- static struct ib_recv_wr wr = { 0 };
- struct ib_recv_wr *bad_wr;
- int ret;
-
- wr.wr_cqe = &srp_drain_cqe;
- /* Destroying a QP and reusing ch->done is only safe if not connected */
- WARN_ON_ONCE(ch->connected);
-
- ret = ib_modify_qp(ch->qp, &attr, IB_QP_STATE);
- WARN_ONCE(ret, "ib_cm_init_qp_attr() returned %d\n", ret);
- if (ret)
- goto out;
-
- init_completion(&ch->done);
- ret = ib_post_recv(ch->qp, &wr, &bad_wr);
- WARN_ONCE(ret, "ib_post_recv() returned %d\n", ret);
- if (ret == 0)
- wait_for_completion(&ch->done);
-
-out:
+ ib_drain_rq(ch->qp);
ib_destroy_qp(ch->qp);
}
@@ -508,7 +476,7 @@ static int srp_create_ch_ib(struct srp_rdma_ch *ch)
if (!init_attr)
return -ENOMEM;
- /* queue_size + 1 for ib_drain_qp */
+ /* queue_size + 1 for ib_drain_rq() */
recv_cq = ib_alloc_cq(dev->dev, ch, target->queue_size + 1,
ch->comp_vector, IB_POLL_SOFTIRQ);
if (IS_ERR(recv_cq)) {
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
index 0c37fee363b1..8b42401d4795 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
@@ -91,76 +91,32 @@ MODULE_PARM_DESC(srpt_service_guid,
" instead of using the node_guid of the first HCA.");
static struct ib_client srpt_client;
-static void srpt_release_channel(struct srpt_rdma_ch *ch);
+static void srpt_release_cmd(struct se_cmd *se_cmd);
+static void srpt_free_ch(struct kref *kref);
static int srpt_queue_status(struct se_cmd *cmd);
static void srpt_recv_done(struct ib_cq *cq, struct ib_wc *wc);
static void srpt_send_done(struct ib_cq *cq, struct ib_wc *wc);
+static void srpt_process_wait_list(struct srpt_rdma_ch *ch);
-/**
- * opposite_dma_dir() - Swap DMA_TO_DEVICE and DMA_FROM_DEVICE.
- */
-static inline
-enum dma_data_direction opposite_dma_dir(enum dma_data_direction dir)
-{
- switch (dir) {
- case DMA_TO_DEVICE: return DMA_FROM_DEVICE;
- case DMA_FROM_DEVICE: return DMA_TO_DEVICE;
- default: return dir;
- }
-}
-
-/**
- * srpt_sdev_name() - Return the name associated with the HCA.
- *
- * Examples are ib0, ib1, ...
- */
-static inline const char *srpt_sdev_name(struct srpt_device *sdev)
-{
- return sdev->device->name;
-}
-
-static enum rdma_ch_state srpt_get_ch_state(struct srpt_rdma_ch *ch)
-{
- unsigned long flags;
- enum rdma_ch_state state;
-
- spin_lock_irqsave(&ch->spinlock, flags);
- state = ch->state;
- spin_unlock_irqrestore(&ch->spinlock, flags);
- return state;
-}
-
-static enum rdma_ch_state
-srpt_set_ch_state(struct srpt_rdma_ch *ch, enum rdma_ch_state new_state)
-{
- unsigned long flags;
- enum rdma_ch_state prev;
-
- spin_lock_irqsave(&ch->spinlock, flags);
- prev = ch->state;
- ch->state = new_state;
- spin_unlock_irqrestore(&ch->spinlock, flags);
- return prev;
-}
-
-/**
- * srpt_test_and_set_ch_state() - Test and set the channel state.
- *
- * Returns true if and only if the channel state has been set to the new state.
+/*
+ * The only allowed channel state changes are those that change the channel
+ * state into a state with a higher numerical value. Hence the new > prev test.
*/
-static bool
-srpt_test_and_set_ch_state(struct srpt_rdma_ch *ch, enum rdma_ch_state old,
- enum rdma_ch_state new)
+static bool srpt_set_ch_state(struct srpt_rdma_ch *ch, enum rdma_ch_state new)
{
unsigned long flags;
enum rdma_ch_state prev;
+ bool changed = false;
spin_lock_irqsave(&ch->spinlock, flags);
prev = ch->state;
- if (prev == old)
+ if (new > prev) {
ch->state = new;
+ changed = true;
+ }
spin_unlock_irqrestore(&ch->spinlock, flags);
- return prev == old;
+
+ return changed;
}
/**
@@ -182,7 +138,7 @@ static void srpt_event_handler(struct ib_event_handler *handler,
return;
pr_debug("ASYNC event= %d on device= %s\n", event->event,
- srpt_sdev_name(sdev));
+ sdev->device->name);
switch (event->event) {
case IB_EVENT_PORT_ERR:
@@ -220,25 +176,39 @@ static void srpt_srq_event(struct ib_event *event, void *ctx)
pr_info("SRQ event %d\n", event->event);
}
+static const char *get_ch_state_name(enum rdma_ch_state s)
+{
+ switch (s) {
+ case CH_CONNECTING:
+ return "connecting";
+ case CH_LIVE:
+ return "live";
+ case CH_DISCONNECTING:
+ return "disconnecting";
+ case CH_DRAINING:
+ return "draining";
+ case CH_DISCONNECTED:
+ return "disconnected";
+ }
+ return "???";
+}
+
/**
* srpt_qp_event() - QP event callback function.
*/
static void srpt_qp_event(struct ib_event *event, struct srpt_rdma_ch *ch)
{
pr_debug("QP event %d on cm_id=%p sess_name=%s state=%d\n",
- event->event, ch->cm_id, ch->sess_name, srpt_get_ch_state(ch));
+ event->event, ch->cm_id, ch->sess_name, ch->state);
switch (event->event) {
case IB_EVENT_COMM_EST:
ib_cm_notify(ch->cm_id, event->event);
break;
case IB_EVENT_QP_LAST_WQE_REACHED:
- if (srpt_test_and_set_ch_state(ch, CH_DRAINING,
- CH_RELEASING))
- srpt_release_channel(ch);
- else
- pr_debug("%s: state %d - ignored LAST_WQE.\n",
- ch->sess_name, srpt_get_ch_state(ch));
+ pr_debug("%s-%d, state %s: received Last WQE event.\n",
+ ch->sess_name, ch->qp->qp_num,
+ get_ch_state_name(ch->state));
break;
default:
pr_err("received unrecognized IB QP event %d\n", event->event);
@@ -281,7 +251,7 @@ static void srpt_get_class_port_info(struct ib_dm_mad *mad)
struct ib_class_port_info *cif;
cif = (struct ib_class_port_info *)mad->data;
- memset(cif, 0, sizeof *cif);
+ memset(cif, 0, sizeof(*cif));
cif->base_version = 1;
cif->class_version = 1;
cif->resp_time_value = 20;
@@ -340,7 +310,7 @@ static void srpt_get_ioc(struct srpt_port *sport, u32 slot,
return;
}
- memset(iocp, 0, sizeof *iocp);
+ memset(iocp, 0, sizeof(*iocp));
strcpy(iocp->id_string, SRPT_ID_STRING);
iocp->guid = cpu_to_be64(srpt_service_guid);
iocp->vendor_id = cpu_to_be32(sdev->device->attrs.vendor_id);
@@ -390,7 +360,7 @@ static void srpt_get_svc_entries(u64 ioc_guid,
}
svc_entries = (struct ib_dm_svc_entries *)mad->data;
- memset(svc_entries, 0, sizeof *svc_entries);
+ memset(svc_entries, 0, sizeof(*svc_entries));
svc_entries->service_entries[0].id = cpu_to_be64(ioc_guid);
snprintf(svc_entries->service_entries[0].name,
sizeof(svc_entries->service_entries[0].name),
@@ -484,7 +454,7 @@ static void srpt_mad_recv_handler(struct ib_mad_agent *mad_agent,
rsp->ah = ah;
dm_mad = rsp->mad;
- memcpy(dm_mad, mad_wc->recv_buf.mad, sizeof *dm_mad);
+ memcpy(dm_mad, mad_wc->recv_buf.mad, sizeof(*dm_mad));
dm_mad->mad_hdr.method = IB_MGMT_METHOD_GET_RESP;
dm_mad->mad_hdr.status = 0;
@@ -532,7 +502,7 @@ static int srpt_refresh_port(struct srpt_port *sport)
struct ib_port_attr port_attr;
int ret;
- memset(&port_modify, 0, sizeof port_modify);
+ memset(&port_modify, 0, sizeof(port_modify));
port_modify.set_port_cap_mask = IB_PORT_DEVICE_MGMT_SUP;
port_modify.clr_port_cap_mask = 0;
@@ -553,7 +523,7 @@ static int srpt_refresh_port(struct srpt_port *sport)
goto err_query_port;
if (!sport->mad_agent) {
- memset(&reg_req, 0, sizeof reg_req);
+ memset(&reg_req, 0, sizeof(reg_req));
reg_req.mgmt_class = IB_MGMT_CLASS_DEVICE_MGMT;
reg_req.mgmt_class_version = IB_MGMT_BASE_VERSION;
set_bit(IB_MGMT_METHOD_GET, reg_req.method_mask);
@@ -841,6 +811,39 @@ out:
}
/**
+ * srpt_zerolength_write() - Perform a zero-length RDMA write.
+ *
+ * A quote from the InfiniBand specification: C9-88: For an HCA responder
+ * using Reliable Connection service, for each zero-length RDMA READ or WRITE
+ * request, the R_Key shall not be validated, even if the request includes
+ * Immediate data.
+ */
+static int srpt_zerolength_write(struct srpt_rdma_ch *ch)
+{
+ struct ib_send_wr wr, *bad_wr;
+
+ memset(&wr, 0, sizeof(wr));
+ wr.opcode = IB_WR_RDMA_WRITE;
+ wr.wr_cqe = &ch->zw_cqe;
+ wr.send_flags = IB_SEND_SIGNALED;
+ return ib_post_send(ch->qp, &wr, &bad_wr);
+}
+
+static void srpt_zerolength_write_done(struct ib_cq *cq, struct ib_wc *wc)
+{
+ struct srpt_rdma_ch *ch = cq->cq_context;
+
+ if (wc->status == IB_WC_SUCCESS) {
+ srpt_process_wait_list(ch);
+ } else {
+ if (srpt_set_ch_state(ch, CH_DISCONNECTED))
+ schedule_work(&ch->release_work);
+ else
+ WARN_ONCE(1, "%s-%d\n", ch->sess_name, ch->qp->qp_num);
+ }
+}
+
+/**
* srpt_get_desc_tbl() - Parse the data descriptors of an SRP_CMD request.
* @ioctx: Pointer to the I/O context associated with the request.
* @srp_cmd: Pointer to the SRP_CMD request data.
@@ -903,14 +906,14 @@ static int srpt_get_desc_tbl(struct srpt_send_ioctx *ioctx,
db = (struct srp_direct_buf *)(srp_cmd->add_data
+ add_cdb_offset);
- memcpy(ioctx->rbufs, db, sizeof *db);
+ memcpy(ioctx->rbufs, db, sizeof(*db));
*data_len = be32_to_cpu(db->len);
} else if (((srp_cmd->buf_fmt & 0xf) == SRP_DATA_DESC_INDIRECT) ||
((srp_cmd->buf_fmt >> 4) == SRP_DATA_DESC_INDIRECT)) {
idb = (struct srp_indirect_buf *)(srp_cmd->add_data
+ add_cdb_offset);
- ioctx->n_rbuf = be32_to_cpu(idb->table_desc.len) / sizeof *db;
+ ioctx->n_rbuf = be32_to_cpu(idb->table_desc.len) / sizeof(*db);
if (ioctx->n_rbuf >
(srp_cmd->data_out_desc_cnt + srp_cmd->data_in_desc_cnt)) {
@@ -929,7 +932,7 @@ static int srpt_get_desc_tbl(struct srpt_send_ioctx *ioctx,
ioctx->rbufs = &ioctx->single_rbuf;
else {
ioctx->rbufs =
- kmalloc(ioctx->n_rbuf * sizeof *db, GFP_ATOMIC);
+ kmalloc(ioctx->n_rbuf * sizeof(*db), GFP_ATOMIC);
if (!ioctx->rbufs) {
ioctx->n_rbuf = 0;
ret = -ENOMEM;
@@ -938,7 +941,7 @@ static int srpt_get_desc_tbl(struct srpt_send_ioctx *ioctx,
}
db = idb->desc_list;
- memcpy(ioctx->rbufs, db, ioctx->n_rbuf * sizeof *db);
+ memcpy(ioctx->rbufs, db, ioctx->n_rbuf * sizeof(*db));
*data_len = be32_to_cpu(idb->len);
}
out:
@@ -956,7 +959,7 @@ static int srpt_init_ch_qp(struct srpt_rdma_ch *ch, struct ib_qp *qp)
struct ib_qp_attr *attr;
int ret;
- attr = kzalloc(sizeof *attr, GFP_KERNEL);
+ attr = kzalloc(sizeof(*attr), GFP_KERNEL);
if (!attr)
return -ENOMEM;
@@ -1070,7 +1073,7 @@ static void srpt_unmap_sg_to_ib_sge(struct srpt_rdma_ch *ch,
dir = ioctx->cmd.data_direction;
BUG_ON(dir == DMA_NONE);
ib_dma_unmap_sg(ch->sport->sdev->device, sg, ioctx->sg_cnt,
- opposite_dma_dir(dir));
+ target_reverse_dma_direction(&ioctx->cmd));
ioctx->mapped_sg_count = 0;
}
}
@@ -1107,7 +1110,7 @@ static int srpt_map_sg_to_ib_sge(struct srpt_rdma_ch *ch,
ioctx->sg_cnt = sg_cnt = cmd->t_data_nents;
count = ib_dma_map_sg(ch->sport->sdev->device, sg, sg_cnt,
- opposite_dma_dir(dir));
+ target_reverse_dma_direction(cmd));
if (unlikely(!count))
return -EAGAIN;
@@ -1313,10 +1316,7 @@ static int srpt_abort_cmd(struct srpt_send_ioctx *ioctx)
/*
* If the command is in a state where the target core is waiting for
- * the ib_srpt driver, change the state to the next state. Changing
- * the state of the command from SRPT_STATE_NEED_DATA to
- * SRPT_STATE_DATA_IN ensures that srpt_xmit_response() will call this
- * function a second time.
+ * the ib_srpt driver, change the state to the next state.
*/
spin_lock_irqsave(&ioctx->spinlock, flags);
@@ -1325,25 +1325,17 @@ static int srpt_abort_cmd(struct srpt_send_ioctx *ioctx)
case SRPT_STATE_NEED_DATA:
ioctx->state = SRPT_STATE_DATA_IN;
break;
- case SRPT_STATE_DATA_IN:
case SRPT_STATE_CMD_RSP_SENT:
case SRPT_STATE_MGMT_RSP_SENT:
ioctx->state = SRPT_STATE_DONE;
break;
default:
+ WARN_ONCE(true, "%s: unexpected I/O context state %d\n",
+ __func__, state);
break;
}
spin_unlock_irqrestore(&ioctx->spinlock, flags);
- if (state == SRPT_STATE_DONE) {
- struct srpt_rdma_ch *ch = ioctx->ch;
-
- BUG_ON(ch->sess == NULL);
-
- target_put_sess_cmd(&ioctx->cmd);
- goto out;
- }
-
pr_debug("Aborting cmd with state %d and tag %lld\n", state,
ioctx->cmd.tag);
@@ -1351,19 +1343,16 @@ static int srpt_abort_cmd(struct srpt_send_ioctx *ioctx)
case SRPT_STATE_NEW:
case SRPT_STATE_DATA_IN:
case SRPT_STATE_MGMT:
+ case SRPT_STATE_DONE:
/*
* Do nothing - defer abort processing until
* srpt_queue_response() is invoked.
*/
- WARN_ON(!transport_check_aborted_status(&ioctx->cmd, false));
break;
case SRPT_STATE_NEED_DATA:
- /* DMA_TO_DEVICE (write) - RDMA read error. */
-
- /* XXX(hch): this is a horrible layering violation.. */
- spin_lock_irqsave(&ioctx->cmd.t_state_lock, flags);
- ioctx->cmd.transport_state &= ~CMD_T_ACTIVE;
- spin_unlock_irqrestore(&ioctx->cmd.t_state_lock, flags);
+ pr_debug("tag %#llx: RDMA read error\n", ioctx->cmd.tag);
+ transport_generic_request_failure(&ioctx->cmd,
+ TCM_CHECK_CONDITION_ABORT_CMD);
break;
case SRPT_STATE_CMD_RSP_SENT:
/*
@@ -1371,18 +1360,16 @@ static int srpt_abort_cmd(struct srpt_send_ioctx *ioctx)
* not been received in time.
*/
srpt_unmap_sg_to_ib_sge(ioctx->ch, ioctx);
- target_put_sess_cmd(&ioctx->cmd);
+ transport_generic_free_cmd(&ioctx->cmd, 0);
break;
case SRPT_STATE_MGMT_RSP_SENT:
- srpt_set_cmd_state(ioctx, SRPT_STATE_DONE);
- target_put_sess_cmd(&ioctx->cmd);
+ transport_generic_free_cmd(&ioctx->cmd, 0);
break;
default:
WARN(1, "Unexpected command state (%d)", state);
break;
}
-out:
return state;
}
@@ -1422,9 +1409,14 @@ static void srpt_rdma_write_done(struct ib_cq *cq, struct ib_wc *wc)
container_of(wc->wr_cqe, struct srpt_send_ioctx, rdma_cqe);
if (unlikely(wc->status != IB_WC_SUCCESS)) {
+ /*
+ * Note: if an RDMA write error completion is received that
+ * means that a SEND also has been posted. Defer further
+ * processing of the associated command until the send error
+ * completion has been received.
+ */
pr_info("RDMA_WRITE for ioctx 0x%p failed with status %d\n",
ioctx, wc->status);
- srpt_abort_cmd(ioctx);
}
}
@@ -1464,7 +1456,7 @@ static int srpt_build_cmd_rsp(struct srpt_rdma_ch *ch,
sense_data_len = ioctx->cmd.scsi_sense_length;
WARN_ON(sense_data_len > sizeof(ioctx->sense_data));
- memset(srp_rsp, 0, sizeof *srp_rsp);
+ memset(srp_rsp, 0, sizeof(*srp_rsp));
srp_rsp->opcode = SRP_RSP;
srp_rsp->req_lim_delta =
cpu_to_be32(1 + atomic_xchg(&ch->req_lim_delta, 0));
@@ -1514,7 +1506,7 @@ static int srpt_build_tskmgmt_rsp(struct srpt_rdma_ch *ch,
srp_rsp = ioctx->ioctx.buf;
BUG_ON(!srp_rsp);
- memset(srp_rsp, 0, sizeof *srp_rsp);
+ memset(srp_rsp, 0, sizeof(*srp_rsp));
srp_rsp->opcode = SRP_RSP;
srp_rsp->req_lim_delta =
@@ -1528,80 +1520,6 @@ static int srpt_build_tskmgmt_rsp(struct srpt_rdma_ch *ch,
return resp_len;
}
-#define NO_SUCH_LUN ((uint64_t)-1LL)
-
-/*
- * SCSI LUN addressing method. See also SAM-2 and the section about
- * eight byte LUNs.
- */
-enum scsi_lun_addr_method {
- SCSI_LUN_ADDR_METHOD_PERIPHERAL = 0,
- SCSI_LUN_ADDR_METHOD_FLAT = 1,
- SCSI_LUN_ADDR_METHOD_LUN = 2,
- SCSI_LUN_ADDR_METHOD_EXTENDED_LUN = 3,
-};
-
-/*
- * srpt_unpack_lun() - Convert from network LUN to linear LUN.
- *
- * Convert an 2-byte, 4-byte, 6-byte or 8-byte LUN structure in network byte
- * order (big endian) to a linear LUN. Supports three LUN addressing methods:
- * peripheral, flat and logical unit. See also SAM-2, section 4.9.4 (page 40).
- */
-static uint64_t srpt_unpack_lun(const uint8_t *lun, int len)
-{
- uint64_t res = NO_SUCH_LUN;
- int addressing_method;
-
- if (unlikely(len < 2)) {
- pr_err("Illegal LUN length %d, expected 2 bytes or more\n",
- len);
- goto out;
- }
-
- switch (len) {
- case 8:
- if ((*((__be64 *)lun) &
- cpu_to_be64(0x0000FFFFFFFFFFFFLL)) != 0)
- goto out_err;
- break;
- case 4:
- if (*((__be16 *)&lun[2]) != 0)
- goto out_err;
- break;
- case 6:
- if (*((__be32 *)&lun[2]) != 0)
- goto out_err;
- break;
- case 2:
- break;
- default:
- goto out_err;
- }
-
- addressing_method = (*lun) >> 6; /* highest two bits of byte 0 */
- switch (addressing_method) {
- case SCSI_LUN_ADDR_METHOD_PERIPHERAL:
- case SCSI_LUN_ADDR_METHOD_FLAT:
- case SCSI_LUN_ADDR_METHOD_LUN:
- res = *(lun + 1) | (((*lun) & 0x3f) << 8);
- break;
-
- case SCSI_LUN_ADDR_METHOD_EXTENDED_LUN:
- default:
- pr_err("Unimplemented LUN addressing method %u\n",
- addressing_method);
- break;
- }
-
-out:
- return res;
-
-out_err:
- pr_err("Support for multi-level LUNs has not yet been implemented\n");
- goto out;
-}
-
static int srpt_check_stop_free(struct se_cmd *cmd)
{
struct srpt_send_ioctx *ioctx = container_of(cmd,
@@ -1613,16 +1531,14 @@ static int srpt_check_stop_free(struct se_cmd *cmd)
/**
* srpt_handle_cmd() - Process SRP_CMD.
*/
-static int srpt_handle_cmd(struct srpt_rdma_ch *ch,
- struct srpt_recv_ioctx *recv_ioctx,
- struct srpt_send_ioctx *send_ioctx)
+static void srpt_handle_cmd(struct srpt_rdma_ch *ch,
+ struct srpt_recv_ioctx *recv_ioctx,
+ struct srpt_send_ioctx *send_ioctx)
{
struct se_cmd *cmd;
struct srp_cmd *srp_cmd;
- uint64_t unpacked_lun;
u64 data_len;
enum dma_data_direction dir;
- sense_reason_t ret;
int rc;
BUG_ON(!send_ioctx);
@@ -1650,65 +1566,23 @@ static int srpt_handle_cmd(struct srpt_rdma_ch *ch,
if (srpt_get_desc_tbl(send_ioctx, srp_cmd, &dir, &data_len)) {
pr_err("0x%llx: parsing SRP descriptor table failed.\n",
srp_cmd->tag);
- ret = TCM_INVALID_CDB_FIELD;
- goto send_sense;
+ goto release_ioctx;
}
- unpacked_lun = srpt_unpack_lun((uint8_t *)&srp_cmd->lun,
- sizeof(srp_cmd->lun));
rc = target_submit_cmd(cmd, ch->sess, srp_cmd->cdb,
- &send_ioctx->sense_data[0], unpacked_lun, data_len,
- TCM_SIMPLE_TAG, dir, TARGET_SCF_ACK_KREF);
+ &send_ioctx->sense_data[0],
+ scsilun_to_int(&srp_cmd->lun), data_len,
+ TCM_SIMPLE_TAG, dir, TARGET_SCF_ACK_KREF);
if (rc != 0) {
- ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
- goto send_sense;
+ pr_debug("target_submit_cmd() returned %d for tag %#llx\n", rc,
+ srp_cmd->tag);
+ goto release_ioctx;
}
- return 0;
-
-send_sense:
- transport_send_check_condition_and_sense(cmd, ret, 0);
- return -1;
-}
-
-/**
- * srpt_rx_mgmt_fn_tag() - Process a task management function by tag.
- * @ch: RDMA channel of the task management request.
- * @fn: Task management function to perform.
- * @req_tag: Tag of the SRP task management request.
- * @mgmt_ioctx: I/O context of the task management request.
- *
- * Returns zero if the target core will process the task management
- * request asynchronously.
- *
- * Note: It is assumed that the initiator serializes tag-based task management
- * requests.
- */
-static int srpt_rx_mgmt_fn_tag(struct srpt_send_ioctx *ioctx, u64 tag)
-{
- struct srpt_device *sdev;
- struct srpt_rdma_ch *ch;
- struct srpt_send_ioctx *target;
- int ret, i;
+ return;
- ret = -EINVAL;
- ch = ioctx->ch;
- BUG_ON(!ch);
- BUG_ON(!ch->sport);
- sdev = ch->sport->sdev;
- BUG_ON(!sdev);
- spin_lock_irq(&sdev->spinlock);
- for (i = 0; i < ch->rq_size; ++i) {
- target = ch->ioctx_ring[i];
- if (target->cmd.se_lun == ioctx->cmd.se_lun &&
- target->cmd.tag == tag &&
- srpt_get_cmd_state(target) != SRPT_STATE_DONE) {
- ret = 0;
- /* now let the target core abort &target->cmd; */
- break;
- }
- }
- spin_unlock_irq(&sdev->spinlock);
- return ret;
+release_ioctx:
+ send_ioctx->state = SRPT_STATE_DONE;
+ srpt_release_cmd(cmd);
}
static int srp_tmr_to_tcm(int fn)
@@ -1744,8 +1618,6 @@ static void srpt_handle_tsk_mgmt(struct srpt_rdma_ch *ch,
struct srp_tsk_mgmt *srp_tsk;
struct se_cmd *cmd;
struct se_session *sess = ch->sess;
- uint64_t unpacked_lun;
- uint32_t tag = 0;
int tcm_tmr;
int rc;
@@ -1761,26 +1633,10 @@ static void srpt_handle_tsk_mgmt(struct srpt_rdma_ch *ch,
srpt_set_cmd_state(send_ioctx, SRPT_STATE_MGMT);
send_ioctx->cmd.tag = srp_tsk->tag;
tcm_tmr = srp_tmr_to_tcm(srp_tsk->tsk_mgmt_func);
- if (tcm_tmr < 0) {
- send_ioctx->cmd.se_tmr_req->response =
- TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED;
- goto fail;
- }
- unpacked_lun = srpt_unpack_lun((uint8_t *)&srp_tsk->lun,
- sizeof(srp_tsk->lun));
-
- if (srp_tsk->tsk_mgmt_func == SRP_TSK_ABORT_TASK) {
- rc = srpt_rx_mgmt_fn_tag(send_ioctx, srp_tsk->task_tag);
- if (rc < 0) {
- send_ioctx->cmd.se_tmr_req->response =
- TMR_TASK_DOES_NOT_EXIST;
- goto fail;
- }
- tag = srp_tsk->task_tag;
- }
- rc = target_submit_tmr(&send_ioctx->cmd, sess, NULL, unpacked_lun,
- srp_tsk, tcm_tmr, GFP_KERNEL, tag,
- TARGET_SCF_ACK_KREF);
+ rc = target_submit_tmr(&send_ioctx->cmd, sess, NULL,
+ scsilun_to_int(&srp_tsk->lun), srp_tsk, tcm_tmr,
+ GFP_KERNEL, srp_tsk->task_tag,
+ TARGET_SCF_ACK_KREF);
if (rc != 0) {
send_ioctx->cmd.se_tmr_req->response = TMR_FUNCTION_REJECTED;
goto fail;
@@ -1800,7 +1656,6 @@ static void srpt_handle_new_iu(struct srpt_rdma_ch *ch,
struct srpt_send_ioctx *send_ioctx)
{
struct srp_cmd *srp_cmd;
- enum rdma_ch_state ch_state;
BUG_ON(!ch);
BUG_ON(!recv_ioctx);
@@ -1809,13 +1664,12 @@ static void srpt_handle_new_iu(struct srpt_rdma_ch *ch,
recv_ioctx->ioctx.dma, srp_max_req_size,
DMA_FROM_DEVICE);
- ch_state = srpt_get_ch_state(ch);
- if (unlikely(ch_state == CH_CONNECTING)) {
+ if (unlikely(ch->state == CH_CONNECTING)) {
list_add_tail(&recv_ioctx->wait_list, &ch->cmd_wait_list);
goto out;
}
- if (unlikely(ch_state != CH_LIVE))
+ if (unlikely(ch->state != CH_LIVE))
goto out;
srp_cmd = recv_ioctx->ioctx.buf;
@@ -1878,6 +1732,28 @@ static void srpt_recv_done(struct ib_cq *cq, struct ib_wc *wc)
}
}
+/*
+ * This function must be called from the context in which RDMA completions are
+ * processed because it accesses the wait list without protection against
+ * access from other threads.
+ */
+static void srpt_process_wait_list(struct srpt_rdma_ch *ch)
+{
+ struct srpt_send_ioctx *ioctx;
+
+ while (!list_empty(&ch->cmd_wait_list) &&
+ ch->state >= CH_LIVE &&
+ (ioctx = srpt_get_send_ioctx(ch)) != NULL) {
+ struct srpt_recv_ioctx *recv_ioctx;
+
+ recv_ioctx = list_first_entry(&ch->cmd_wait_list,
+ struct srpt_recv_ioctx,
+ wait_list);
+ list_del(&recv_ioctx->wait_list);
+ srpt_handle_new_iu(ch, recv_ioctx, ioctx);
+ }
+}
+
/**
* Note: Although this has not yet been observed during tests, at least in
* theory it is possible that the srpt_get_send_ioctx() call invoked by
@@ -1905,15 +1781,10 @@ static void srpt_send_done(struct ib_cq *cq, struct ib_wc *wc)
atomic_inc(&ch->sq_wr_avail);
- if (wc->status != IB_WC_SUCCESS) {
+ if (wc->status != IB_WC_SUCCESS)
pr_info("sending response for ioctx 0x%p failed"
" with status %d\n", ioctx, wc->status);
- atomic_dec(&ch->req_lim);
- srpt_abort_cmd(ioctx);
- goto out;
- }
-
if (state != SRPT_STATE_DONE) {
srpt_unmap_sg_to_ib_sge(ch, ioctx);
transport_generic_free_cmd(&ioctx->cmd, 0);
@@ -1922,18 +1793,7 @@ static void srpt_send_done(struct ib_cq *cq, struct ib_wc *wc)
" wr_id = %u.\n", ioctx->ioctx.index);
}
-out:
- while (!list_empty(&ch->cmd_wait_list) &&
- srpt_get_ch_state(ch) == CH_LIVE &&
- (ioctx = srpt_get_send_ioctx(ch)) != NULL) {
- struct srpt_recv_ioctx *recv_ioctx;
-
- recv_ioctx = list_first_entry(&ch->cmd_wait_list,
- struct srpt_recv_ioctx,
- wait_list);
- list_del(&recv_ioctx->wait_list);
- srpt_handle_new_iu(ch, recv_ioctx, ioctx);
- }
+ srpt_process_wait_list(ch);
}
/**
@@ -1950,7 +1810,7 @@ static int srpt_create_ch_ib(struct srpt_rdma_ch *ch)
WARN_ON(ch->rq_size < 1);
ret = -ENOMEM;
- qp_init = kzalloc(sizeof *qp_init, GFP_KERNEL);
+ qp_init = kzalloc(sizeof(*qp_init), GFP_KERNEL);
if (!qp_init)
goto out;
@@ -2017,168 +1877,102 @@ static void srpt_destroy_ch_ib(struct srpt_rdma_ch *ch)
}
/**
- * __srpt_close_ch() - Close an RDMA channel by setting the QP error state.
+ * srpt_close_ch() - Close an RDMA channel.
*
- * Reset the QP and make sure all resources associated with the channel will
- * be deallocated at an appropriate time.
+ * Make sure all resources associated with the channel will be deallocated at
+ * an appropriate time.
*
- * Note: The caller must hold ch->sport->sdev->spinlock.
+ * Returns true if and only if the channel state has been modified into
+ * CH_DRAINING.
*/
-static void __srpt_close_ch(struct srpt_rdma_ch *ch)
+static bool srpt_close_ch(struct srpt_rdma_ch *ch)
{
- enum rdma_ch_state prev_state;
- unsigned long flags;
-
- spin_lock_irqsave(&ch->spinlock, flags);
- prev_state = ch->state;
- switch (prev_state) {
- case CH_CONNECTING:
- case CH_LIVE:
- ch->state = CH_DISCONNECTING;
- break;
- default:
- break;
- }
- spin_unlock_irqrestore(&ch->spinlock, flags);
+ int ret;
- switch (prev_state) {
- case CH_CONNECTING:
- ib_send_cm_rej(ch->cm_id, IB_CM_REJ_NO_RESOURCES, NULL, 0,
- NULL, 0);
- /* fall through */
- case CH_LIVE:
- if (ib_send_cm_dreq(ch->cm_id, NULL, 0) < 0)
- pr_err("sending CM DREQ failed.\n");
- break;
- case CH_DISCONNECTING:
- break;
- case CH_DRAINING:
- case CH_RELEASING:
- break;
+ if (!srpt_set_ch_state(ch, CH_DRAINING)) {
+ pr_debug("%s-%d: already closed\n", ch->sess_name,
+ ch->qp->qp_num);
+ return false;
}
-}
-/**
- * srpt_close_ch() - Close an RDMA channel.
- */
-static void srpt_close_ch(struct srpt_rdma_ch *ch)
-{
- struct srpt_device *sdev;
+ kref_get(&ch->kref);
- sdev = ch->sport->sdev;
- spin_lock_irq(&sdev->spinlock);
- __srpt_close_ch(ch);
- spin_unlock_irq(&sdev->spinlock);
-}
+ ret = srpt_ch_qp_err(ch);
+ if (ret < 0)
+ pr_err("%s-%d: changing queue pair into error state failed: %d\n",
+ ch->sess_name, ch->qp->qp_num, ret);
-/**
- * srpt_shutdown_session() - Whether or not a session may be shut down.
- */
-static int srpt_shutdown_session(struct se_session *se_sess)
-{
- struct srpt_rdma_ch *ch = se_sess->fabric_sess_ptr;
- unsigned long flags;
-
- spin_lock_irqsave(&ch->spinlock, flags);
- if (ch->in_shutdown) {
- spin_unlock_irqrestore(&ch->spinlock, flags);
- return true;
+ pr_debug("%s-%d: queued zerolength write\n", ch->sess_name,
+ ch->qp->qp_num);
+ ret = srpt_zerolength_write(ch);
+ if (ret < 0) {
+ pr_err("%s-%d: queuing zero-length write failed: %d\n",
+ ch->sess_name, ch->qp->qp_num, ret);
+ if (srpt_set_ch_state(ch, CH_DISCONNECTED))
+ schedule_work(&ch->release_work);
+ else
+ WARN_ON_ONCE(true);
}
- ch->in_shutdown = true;
- target_sess_cmd_list_set_waiting(se_sess);
- spin_unlock_irqrestore(&ch->spinlock, flags);
+ kref_put(&ch->kref, srpt_free_ch);
return true;
}
-/**
- * srpt_drain_channel() - Drain a channel by resetting the IB queue pair.
- * @cm_id: Pointer to the CM ID of the channel to be drained.
- *
- * Note: Must be called from inside srpt_cm_handler to avoid a race between
- * accessing sdev->spinlock and the call to kfree(sdev) in srpt_remove_one()
- * (the caller of srpt_cm_handler holds the cm_id spinlock; srpt_remove_one()
- * waits until all target sessions for the associated IB device have been
- * unregistered and target session registration involves a call to
- * ib_destroy_cm_id(), which locks the cm_id spinlock and hence waits until
- * this function has finished).
+/*
+ * Change the channel state into CH_DISCONNECTING. If a channel has not yet
+ * reached the connected state, close it. If a channel is in the connected
+ * state, send a DREQ. If a DREQ has been received, send a DREP. Note: it is
+ * the responsibility of the caller to ensure that this function is not
+ * invoked concurrently with the code that accepts a connection. This means
+ * that this function must either be invoked from inside a CM callback
+ * function or that it must be invoked with the srpt_port.mutex held.
*/
-static void srpt_drain_channel(struct ib_cm_id *cm_id)
+static int srpt_disconnect_ch(struct srpt_rdma_ch *ch)
{
- struct srpt_device *sdev;
- struct srpt_rdma_ch *ch;
int ret;
- bool do_reset = false;
- WARN_ON_ONCE(irqs_disabled());
+ if (!srpt_set_ch_state(ch, CH_DISCONNECTING))
+ return -ENOTCONN;
- sdev = cm_id->context;
- BUG_ON(!sdev);
- spin_lock_irq(&sdev->spinlock);
- list_for_each_entry(ch, &sdev->rch_list, list) {
- if (ch->cm_id == cm_id) {
- do_reset = srpt_test_and_set_ch_state(ch,
- CH_CONNECTING, CH_DRAINING) ||
- srpt_test_and_set_ch_state(ch,
- CH_LIVE, CH_DRAINING) ||
- srpt_test_and_set_ch_state(ch,
- CH_DISCONNECTING, CH_DRAINING);
- break;
- }
- }
- spin_unlock_irq(&sdev->spinlock);
+ ret = ib_send_cm_dreq(ch->cm_id, NULL, 0);
+ if (ret < 0)
+ ret = ib_send_cm_drep(ch->cm_id, NULL, 0);
- if (do_reset) {
- if (ch->sess)
- srpt_shutdown_session(ch->sess);
+ if (ret < 0 && srpt_close_ch(ch))
+ ret = 0;
- ret = srpt_ch_qp_err(ch);
- if (ret < 0)
- pr_err("Setting queue pair in error state"
- " failed: %d\n", ret);
- }
+ return ret;
}
-/**
- * srpt_find_channel() - Look up an RDMA channel.
- * @cm_id: Pointer to the CM ID of the channel to be looked up.
- *
- * Return NULL if no matching RDMA channel has been found.
- */
-static struct srpt_rdma_ch *srpt_find_channel(struct srpt_device *sdev,
- struct ib_cm_id *cm_id)
+static void __srpt_close_all_ch(struct srpt_device *sdev)
{
struct srpt_rdma_ch *ch;
- bool found;
- WARN_ON_ONCE(irqs_disabled());
- BUG_ON(!sdev);
+ lockdep_assert_held(&sdev->mutex);
- found = false;
- spin_lock_irq(&sdev->spinlock);
list_for_each_entry(ch, &sdev->rch_list, list) {
- if (ch->cm_id == cm_id) {
- found = true;
- break;
- }
+ if (srpt_disconnect_ch(ch) >= 0)
+ pr_info("Closing channel %s-%d because target %s has been disabled\n",
+ ch->sess_name, ch->qp->qp_num,
+ sdev->device->name);
+ srpt_close_ch(ch);
}
- spin_unlock_irq(&sdev->spinlock);
-
- return found ? ch : NULL;
}
/**
- * srpt_release_channel() - Release channel resources.
- *
- * Schedules the actual release because:
- * - Calling the ib_destroy_cm_id() call from inside an IB CM callback would
- * trigger a deadlock.
- * - It is not safe to call TCM transport_* functions from interrupt context.
+ * srpt_shutdown_session() - Whether or not a session may be shut down.
*/
-static void srpt_release_channel(struct srpt_rdma_ch *ch)
+static int srpt_shutdown_session(struct se_session *se_sess)
{
- schedule_work(&ch->release_work);
+ return 1;
+}
+
+static void srpt_free_ch(struct kref *kref)
+{
+ struct srpt_rdma_ch *ch = container_of(kref, struct srpt_rdma_ch, kref);
+
+ kfree(ch);
}
static void srpt_release_channel_work(struct work_struct *w)
@@ -2188,8 +1982,8 @@ static void srpt_release_channel_work(struct work_struct *w)
struct se_session *se_sess;
ch = container_of(w, struct srpt_rdma_ch, release_work);
- pr_debug("ch = %p; ch->sess = %p; release_done = %p\n", ch, ch->sess,
- ch->release_done);
+ pr_debug("%s: %s-%d; release_done = %p\n", __func__, ch->sess_name,
+ ch->qp->qp_num, ch->release_done);
sdev = ch->sport->sdev;
BUG_ON(!sdev);
@@ -2197,6 +1991,7 @@ static void srpt_release_channel_work(struct work_struct *w)
se_sess = ch->sess;
BUG_ON(!se_sess);
+ target_sess_cmd_list_set_waiting(se_sess);
target_wait_for_sess_cmds(se_sess);
transport_deregister_session_configfs(se_sess);
@@ -2211,16 +2006,15 @@ static void srpt_release_channel_work(struct work_struct *w)
ch->sport->sdev, ch->rq_size,
ch->rsp_size, DMA_TO_DEVICE);
- spin_lock_irq(&sdev->spinlock);
- list_del(&ch->list);
- spin_unlock_irq(&sdev->spinlock);
-
+ mutex_lock(&sdev->mutex);
+ list_del_init(&ch->list);
if (ch->release_done)
complete(ch->release_done);
+ mutex_unlock(&sdev->mutex);
wake_up(&sdev->ch_releaseQ);
- kfree(ch);
+ kref_put(&ch->kref, srpt_free_ch);
}
/**
@@ -2240,7 +2034,6 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
struct srp_login_rej *rej;
struct ib_cm_rep_param *rep_param;
struct srpt_rdma_ch *ch, *tmp_ch;
- struct se_node_acl *se_acl;
u32 it_iu_len;
int i, ret = 0;
unsigned char *p;
@@ -2266,9 +2059,9 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
be64_to_cpu(*(__be64 *)&sdev->port[param->port - 1].gid.raw[0]),
be64_to_cpu(*(__be64 *)&sdev->port[param->port - 1].gid.raw[8]));
- rsp = kzalloc(sizeof *rsp, GFP_KERNEL);
- rej = kzalloc(sizeof *rej, GFP_KERNEL);
- rep_param = kzalloc(sizeof *rep_param, GFP_KERNEL);
+ rsp = kzalloc(sizeof(*rsp), GFP_KERNEL);
+ rej = kzalloc(sizeof(*rej), GFP_KERNEL);
+ rep_param = kzalloc(sizeof(*rep_param), GFP_KERNEL);
if (!rsp || !rej || !rep_param) {
ret = -ENOMEM;
@@ -2297,7 +2090,7 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
if ((req->req_flags & SRP_MTCH_ACTION) == SRP_MULTICHAN_SINGLE) {
rsp->rsp_flags = SRP_LOGIN_RSP_MULTICHAN_NO_CHAN;
- spin_lock_irq(&sdev->spinlock);
+ mutex_lock(&sdev->mutex);
list_for_each_entry_safe(ch, tmp_ch, &sdev->rch_list, list) {
if (!memcmp(ch->i_port_id, req->initiator_port_id, 16)
@@ -2305,26 +2098,16 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
&& param->port == ch->sport->port
&& param->listen_id == ch->sport->sdev->cm_id
&& ch->cm_id) {
- enum rdma_ch_state ch_state;
-
- ch_state = srpt_get_ch_state(ch);
- if (ch_state != CH_CONNECTING
- && ch_state != CH_LIVE)
+ if (srpt_disconnect_ch(ch) < 0)
continue;
-
- /* found an existing channel */
- pr_debug("Found existing channel %s"
- " cm_id= %p state= %d\n",
- ch->sess_name, ch->cm_id, ch_state);
-
- __srpt_close_ch(ch);
-
+ pr_info("Relogin - closed existing channel %s\n",
+ ch->sess_name);
rsp->rsp_flags =
SRP_LOGIN_RSP_MULTICHAN_TERMINATED;
}
}
- spin_unlock_irq(&sdev->spinlock);
+ mutex_unlock(&sdev->mutex);
} else
rsp->rsp_flags = SRP_LOGIN_RSP_MULTICHAN_MAINTAINED;
@@ -2340,7 +2123,7 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
goto reject;
}
- ch = kzalloc(sizeof *ch, GFP_KERNEL);
+ ch = kzalloc(sizeof(*ch), GFP_KERNEL);
if (!ch) {
rej->reason = cpu_to_be32(
SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
@@ -2349,11 +2132,14 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
goto reject;
}
+ kref_init(&ch->kref);
+ ch->zw_cqe.done = srpt_zerolength_write_done;
INIT_WORK(&ch->release_work, srpt_release_channel_work);
memcpy(ch->i_port_id, req->initiator_port_id, 16);
memcpy(ch->t_port_id, req->target_port_id, 16);
ch->sport = &sdev->port[param->port - 1];
ch->cm_id = cm_id;
+ cm_id->context = ch;
/*
* Avoid QUEUE_FULL conditions by limiting the number of buffers used
* for the SRP protocol to the command queue size.
@@ -2406,19 +2192,12 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
pr_debug("registering session %s\n", ch->sess_name);
p = &ch->sess_name[0];
- ch->sess = transport_init_session(TARGET_PROT_NORMAL);
- if (IS_ERR(ch->sess)) {
- rej->reason = cpu_to_be32(
- SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
- pr_debug("Failed to create session\n");
- goto destroy_ib;
- }
-
try_again:
- se_acl = core_tpg_get_initiator_node_acl(&sport->port_tpg_1, p);
- if (!se_acl) {
+ ch->sess = target_alloc_session(&sport->port_tpg_1, 0, 0,
+ TARGET_PROT_NORMAL, p, ch, NULL);
+ if (IS_ERR(ch->sess)) {
pr_info("Rejected login because no ACL has been"
- " configured yet for initiator %s.\n", ch->sess_name);
+ " configured yet for initiator %s.\n", p);
/*
* XXX: Hack to retry of ch->i_port_id without leading '0x'
*/
@@ -2426,14 +2205,11 @@ try_again:
p += 2;
goto try_again;
}
- rej->reason = cpu_to_be32(
+ rej->reason = cpu_to_be32((PTR_ERR(ch->sess) == -ENOMEM) ?
+ SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES :
SRP_LOGIN_REJ_CHANNEL_LIMIT_REACHED);
- transport_free_session(ch->sess);
goto destroy_ib;
}
- ch->sess->se_node_acl = se_acl;
-
- transport_register_session(&sport->port_tpg_1, se_acl, ch->sess, ch);
pr_debug("Establish connection sess=%p name=%s cm_id=%p\n", ch->sess,
ch->sess_name, ch->cm_id);
@@ -2453,7 +2229,7 @@ try_again:
/* create cm reply */
rep_param->qp_num = ch->qp->qp_num;
rep_param->private_data = (void *)rsp;
- rep_param->private_data_len = sizeof *rsp;
+ rep_param->private_data_len = sizeof(*rsp);
rep_param->rnr_retry_count = 7;
rep_param->flow_control = 1;
rep_param->failover_accepted = 0;
@@ -2468,14 +2244,14 @@ try_again:
goto release_channel;
}
- spin_lock_irq(&sdev->spinlock);
+ mutex_lock(&sdev->mutex);
list_add_tail(&ch->list, &sdev->rch_list);
- spin_unlock_irq(&sdev->spinlock);
+ mutex_unlock(&sdev->mutex);
goto out;
release_channel:
- srpt_set_ch_state(ch, CH_RELEASING);
+ srpt_disconnect_ch(ch);
transport_deregister_session_configfs(ch->sess);
transport_deregister_session(ch->sess);
ch->sess = NULL;
@@ -2497,7 +2273,7 @@ reject:
| SRP_BUF_FORMAT_INDIRECT);
ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED, NULL, 0,
- (void *)rej, sizeof *rej);
+ (void *)rej, sizeof(*rej));
out:
kfree(rep_param);
@@ -2507,10 +2283,23 @@ out:
return ret;
}
-static void srpt_cm_rej_recv(struct ib_cm_id *cm_id)
+static void srpt_cm_rej_recv(struct srpt_rdma_ch *ch,
+ enum ib_cm_rej_reason reason,
+ const u8 *private_data,
+ u8 private_data_len)
{
- pr_info("Received IB REJ for cm_id %p.\n", cm_id);
- srpt_drain_channel(cm_id);
+ char *priv = NULL;
+ int i;
+
+ if (private_data_len && (priv = kmalloc(private_data_len * 3 + 1,
+ GFP_KERNEL))) {
+ for (i = 0; i < private_data_len; i++)
+ sprintf(priv + 3 * i, " %02x", private_data[i]);
+ }
+ pr_info("Received CM REJ for ch %s-%d; reason %d%s%s.\n",
+ ch->sess_name, ch->qp->qp_num, reason, private_data_len ?
+ "; private data" : "", priv ? priv : " (?)");
+ kfree(priv);
}
/**
@@ -2519,88 +2308,24 @@ static void srpt_cm_rej_recv(struct ib_cm_id *cm_id)
* An IB_CM_RTU_RECEIVED message indicates that the connection is established
* and that the recipient may begin transmitting (RTU = ready to use).
*/
-static void srpt_cm_rtu_recv(struct ib_cm_id *cm_id)
+static void srpt_cm_rtu_recv(struct srpt_rdma_ch *ch)
{
- struct srpt_rdma_ch *ch;
int ret;
- ch = srpt_find_channel(cm_id->context, cm_id);
- BUG_ON(!ch);
-
- if (srpt_test_and_set_ch_state(ch, CH_CONNECTING, CH_LIVE)) {
- struct srpt_recv_ioctx *ioctx, *ioctx_tmp;
-
+ if (srpt_set_ch_state(ch, CH_LIVE)) {
ret = srpt_ch_qp_rts(ch, ch->qp);
- list_for_each_entry_safe(ioctx, ioctx_tmp, &ch->cmd_wait_list,
- wait_list) {
- list_del(&ioctx->wait_list);
- srpt_handle_new_iu(ch, ioctx, NULL);
- }
- if (ret)
+ if (ret == 0) {
+ /* Trigger wait list processing. */
+ ret = srpt_zerolength_write(ch);
+ WARN_ONCE(ret < 0, "%d\n", ret);
+ } else {
srpt_close_ch(ch);
- }
-}
-
-static void srpt_cm_timewait_exit(struct ib_cm_id *cm_id)
-{
- pr_info("Received IB TimeWait exit for cm_id %p.\n", cm_id);
- srpt_drain_channel(cm_id);
-}
-
-static void srpt_cm_rep_error(struct ib_cm_id *cm_id)
-{
- pr_info("Received IB REP error for cm_id %p.\n", cm_id);
- srpt_drain_channel(cm_id);
-}
-
-/**
- * srpt_cm_dreq_recv() - Process reception of a DREQ message.
- */
-static void srpt_cm_dreq_recv(struct ib_cm_id *cm_id)
-{
- struct srpt_rdma_ch *ch;
- unsigned long flags;
- bool send_drep = false;
-
- ch = srpt_find_channel(cm_id->context, cm_id);
- BUG_ON(!ch);
-
- pr_debug("cm_id= %p ch->state= %d\n", cm_id, srpt_get_ch_state(ch));
-
- spin_lock_irqsave(&ch->spinlock, flags);
- switch (ch->state) {
- case CH_CONNECTING:
- case CH_LIVE:
- send_drep = true;
- ch->state = CH_DISCONNECTING;
- break;
- case CH_DISCONNECTING:
- case CH_DRAINING:
- case CH_RELEASING:
- WARN(true, "unexpected channel state %d\n", ch->state);
- break;
- }
- spin_unlock_irqrestore(&ch->spinlock, flags);
-
- if (send_drep) {
- if (ib_send_cm_drep(ch->cm_id, NULL, 0) < 0)
- pr_err("Sending IB DREP failed.\n");
- pr_info("Received DREQ and sent DREP for session %s.\n",
- ch->sess_name);
+ }
}
}
/**
- * srpt_cm_drep_recv() - Process reception of a DREP message.
- */
-static void srpt_cm_drep_recv(struct ib_cm_id *cm_id)
-{
- pr_info("Received InfiniBand DREP message for cm_id %p.\n", cm_id);
- srpt_drain_channel(cm_id);
-}
-
-/**
* srpt_cm_handler() - IB connection manager callback function.
*
* A non-zero return value will cause the caller destroy the CM ID.
@@ -2612,6 +2337,7 @@ static void srpt_cm_drep_recv(struct ib_cm_id *cm_id)
*/
static int srpt_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
{
+ struct srpt_rdma_ch *ch = cm_id->context;
int ret;
ret = 0;
@@ -2621,32 +2347,39 @@ static int srpt_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
event->private_data);
break;
case IB_CM_REJ_RECEIVED:
- srpt_cm_rej_recv(cm_id);
+ srpt_cm_rej_recv(ch, event->param.rej_rcvd.reason,
+ event->private_data,
+ IB_CM_REJ_PRIVATE_DATA_SIZE);
break;
case IB_CM_RTU_RECEIVED:
case IB_CM_USER_ESTABLISHED:
- srpt_cm_rtu_recv(cm_id);
+ srpt_cm_rtu_recv(ch);
break;
case IB_CM_DREQ_RECEIVED:
- srpt_cm_dreq_recv(cm_id);
+ srpt_disconnect_ch(ch);
break;
case IB_CM_DREP_RECEIVED:
- srpt_cm_drep_recv(cm_id);
+ pr_info("Received CM DREP message for ch %s-%d.\n",
+ ch->sess_name, ch->qp->qp_num);
+ srpt_close_ch(ch);
break;
case IB_CM_TIMEWAIT_EXIT:
- srpt_cm_timewait_exit(cm_id);
+ pr_info("Received CM TimeWait exit for ch %s-%d.\n",
+ ch->sess_name, ch->qp->qp_num);
+ srpt_close_ch(ch);
break;
case IB_CM_REP_ERROR:
- srpt_cm_rep_error(cm_id);
+ pr_info("Received CM REP error for ch %s-%d.\n", ch->sess_name,
+ ch->qp->qp_num);
break;
case IB_CM_DREQ_ERROR:
- pr_info("Received IB DREQ ERROR event.\n");
+ pr_info("Received CM DREQ ERROR event.\n");
break;
case IB_CM_MRA_RECEIVED:
- pr_info("Received IB MRA event\n");
+ pr_info("Received CM MRA event\n");
break;
default:
- pr_err("received unrecognized IB CM event %d\n", event->event);
+ pr_err("received unrecognized CM event %d\n", event->event);
break;
}
@@ -2755,41 +2488,14 @@ static int srpt_write_pending_status(struct se_cmd *se_cmd)
*/
static int srpt_write_pending(struct se_cmd *se_cmd)
{
- struct srpt_rdma_ch *ch;
- struct srpt_send_ioctx *ioctx;
+ struct srpt_send_ioctx *ioctx =
+ container_of(se_cmd, struct srpt_send_ioctx, cmd);
+ struct srpt_rdma_ch *ch = ioctx->ch;
enum srpt_command_state new_state;
- enum rdma_ch_state ch_state;
- int ret;
-
- ioctx = container_of(se_cmd, struct srpt_send_ioctx, cmd);
new_state = srpt_set_cmd_state(ioctx, SRPT_STATE_NEED_DATA);
WARN_ON(new_state == SRPT_STATE_DONE);
-
- ch = ioctx->ch;
- BUG_ON(!ch);
-
- ch_state = srpt_get_ch_state(ch);
- switch (ch_state) {
- case CH_CONNECTING:
- WARN(true, "unexpected channel state %d\n", ch_state);
- ret = -EINVAL;
- goto out;
- case CH_LIVE:
- break;
- case CH_DISCONNECTING:
- case CH_DRAINING:
- case CH_RELEASING:
- pr_debug("cmd with tag %lld: channel disconnecting\n",
- ioctx->cmd.tag);
- srpt_set_cmd_state(ioctx, SRPT_STATE_DATA_IN);
- ret = -EINVAL;
- goto out;
- }
- ret = srpt_xfer_data(ch, ioctx);
-
-out:
- return ret;
+ return srpt_xfer_data(ch, ioctx);
}
static u8 tcm_to_srp_tsk_mgmt_status(const int tcm_mgmt_status)
@@ -2920,36 +2626,25 @@ static void srpt_refresh_port_work(struct work_struct *work)
srpt_refresh_port(sport);
}
-static int srpt_ch_list_empty(struct srpt_device *sdev)
-{
- int res;
-
- spin_lock_irq(&sdev->spinlock);
- res = list_empty(&sdev->rch_list);
- spin_unlock_irq(&sdev->spinlock);
-
- return res;
-}
-
/**
* srpt_release_sdev() - Free the channel resources associated with a target.
*/
static int srpt_release_sdev(struct srpt_device *sdev)
{
- struct srpt_rdma_ch *ch, *tmp_ch;
- int res;
+ int i, res;
WARN_ON_ONCE(irqs_disabled());
BUG_ON(!sdev);
- spin_lock_irq(&sdev->spinlock);
- list_for_each_entry_safe(ch, tmp_ch, &sdev->rch_list, list)
- __srpt_close_ch(ch);
- spin_unlock_irq(&sdev->spinlock);
+ mutex_lock(&sdev->mutex);
+ for (i = 0; i < ARRAY_SIZE(sdev->port); i++)
+ sdev->port[i].enabled = false;
+ __srpt_close_all_ch(sdev);
+ mutex_unlock(&sdev->mutex);
res = wait_event_interruptible(sdev->ch_releaseQ,
- srpt_ch_list_empty(sdev));
+ list_empty_careful(&sdev->rch_list));
if (res)
pr_err("%s: interrupted.\n", __func__);
@@ -3003,14 +2698,14 @@ static void srpt_add_one(struct ib_device *device)
pr_debug("device = %p, device->dma_ops = %p\n", device,
device->dma_ops);
- sdev = kzalloc(sizeof *sdev, GFP_KERNEL);
+ sdev = kzalloc(sizeof(*sdev), GFP_KERNEL);
if (!sdev)
goto err;
sdev->device = device;
INIT_LIST_HEAD(&sdev->rch_list);
init_waitqueue_head(&sdev->ch_releaseQ);
- spin_lock_init(&sdev->spinlock);
+ mutex_init(&sdev->mutex);
sdev->pd = ib_alloc_pd(device);
if (IS_ERR(sdev->pd))
@@ -3082,7 +2777,7 @@ static void srpt_add_one(struct ib_device *device)
if (srpt_refresh_port(sport)) {
pr_err("MAD registration failed for %s-%d.\n",
- srpt_sdev_name(sdev), i);
+ sdev->device->name, i);
goto err_ring;
}
snprintf(sport->port_guid, sizeof(sport->port_guid),
@@ -3231,24 +2926,26 @@ static void srpt_release_cmd(struct se_cmd *se_cmd)
static void srpt_close_session(struct se_session *se_sess)
{
DECLARE_COMPLETION_ONSTACK(release_done);
- struct srpt_rdma_ch *ch;
- struct srpt_device *sdev;
- unsigned long res;
-
- ch = se_sess->fabric_sess_ptr;
- WARN_ON(ch->sess != se_sess);
+ struct srpt_rdma_ch *ch = se_sess->fabric_sess_ptr;
+ struct srpt_device *sdev = ch->sport->sdev;
+ bool wait;
- pr_debug("ch %p state %d\n", ch, srpt_get_ch_state(ch));
+ pr_debug("ch %s-%d state %d\n", ch->sess_name, ch->qp->qp_num,
+ ch->state);
- sdev = ch->sport->sdev;
- spin_lock_irq(&sdev->spinlock);
+ mutex_lock(&sdev->mutex);
BUG_ON(ch->release_done);
ch->release_done = &release_done;
- __srpt_close_ch(ch);
- spin_unlock_irq(&sdev->spinlock);
+ wait = !list_empty(&ch->list);
+ srpt_disconnect_ch(ch);
+ mutex_unlock(&sdev->mutex);
- res = wait_for_completion_timeout(&release_done, 60 * HZ);
- WARN_ON(res == 0);
+ if (!wait)
+ return;
+
+ while (wait_for_completion_timeout(&release_done, 180 * HZ) == 0)
+ pr_info("%s(%s-%d state %d): still waiting ...\n", __func__,
+ ch->sess_name, ch->qp->qp_num, ch->state);
}
/**
@@ -3456,6 +3153,8 @@ static ssize_t srpt_tpg_enable_store(struct config_item *item,
{
struct se_portal_group *se_tpg = to_tpg(item);
struct srpt_port *sport = container_of(se_tpg, struct srpt_port, port_tpg_1);
+ struct srpt_device *sdev = sport->sdev;
+ struct srpt_rdma_ch *ch;
unsigned long tmp;
int ret;
@@ -3469,11 +3168,24 @@ static ssize_t srpt_tpg_enable_store(struct config_item *item,
pr_err("Illegal value for srpt_tpg_store_enable: %lu\n", tmp);
return -EINVAL;
}
- if (tmp == 1)
- sport->enabled = true;
- else
- sport->enabled = false;
+ if (sport->enabled == tmp)
+ goto out;
+ sport->enabled = tmp;
+ if (sport->enabled)
+ goto out;
+
+ mutex_lock(&sdev->mutex);
+ list_for_each_entry(ch, &sdev->rch_list, list) {
+ if (ch->sport == sport) {
+ pr_debug("%s: ch %p %s-%d\n", __func__, ch,
+ ch->sess_name, ch->qp->qp_num);
+ srpt_disconnect_ch(ch);
+ srpt_close_ch(ch);
+ }
+ }
+ mutex_unlock(&sdev->mutex);
+out:
return count;
}
@@ -3565,7 +3277,6 @@ static struct configfs_attribute *srpt_wwn_attrs[] = {
static const struct target_core_fabric_ops srpt_template = {
.module = THIS_MODULE,
.name = "srpt",
- .node_acl_size = sizeof(struct srpt_node_acl),
.get_fabric_name = srpt_get_fabric_name,
.tpg_get_wwn = srpt_get_fabric_wwn,
.tpg_get_tag = srpt_get_tag,
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.h b/drivers/infiniband/ulp/srpt/ib_srpt.h
index 09037f2b0b51..af9b8b527340 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.h
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.h
@@ -218,20 +218,20 @@ struct srpt_send_ioctx {
/**
* enum rdma_ch_state - SRP channel state.
- * @CH_CONNECTING: QP is in RTR state; waiting for RTU.
- * @CH_LIVE: QP is in RTS state.
- * @CH_DISCONNECTING: DREQ has been received; waiting for DREP
- * or DREQ has been send and waiting for DREP
- * or .
- * @CH_DRAINING: QP is in ERR state; waiting for last WQE event.
- * @CH_RELEASING: Last WQE event has been received; releasing resources.
+ * @CH_CONNECTING: QP is in RTR state; waiting for RTU.
+ * @CH_LIVE: QP is in RTS state.
+ * @CH_DISCONNECTING: DREQ has been sent and waiting for DREP or DREQ has
+ * been received.
+ * @CH_DRAINING: DREP has been received or waiting for DREP timed out
+ * and last work request has been queued.
+ * @CH_DISCONNECTED: Last completion has been received.
*/
enum rdma_ch_state {
CH_CONNECTING,
CH_LIVE,
CH_DISCONNECTING,
CH_DRAINING,
- CH_RELEASING
+ CH_DISCONNECTED,
};
/**
@@ -267,6 +267,8 @@ struct srpt_rdma_ch {
struct ib_cm_id *cm_id;
struct ib_qp *qp;
struct ib_cq *cq;
+ struct ib_cqe zw_cqe;
+ struct kref kref;
int rq_size;
u32 rsp_size;
atomic_t sq_wr_avail;
@@ -286,7 +288,6 @@ struct srpt_rdma_ch {
u8 sess_name[36];
struct work_struct release_work;
struct completion *release_done;
- bool in_shutdown;
};
/**
@@ -343,7 +344,7 @@ struct srpt_port {
* @ioctx_ring: Per-HCA SRQ.
* @rch_list: Per-device channel list -- see also srpt_rdma_ch.list.
* @ch_releaseQ: Enables waiting for removal from rch_list.
- * @spinlock: Protects rch_list and tpg.
+ * @mutex: Protects rch_list.
* @port: Information about the ports owned by this HCA.
* @event_handler: Per-HCA asynchronous IB event handler.
* @list: Node in srpt_dev_list.
@@ -357,18 +358,10 @@ struct srpt_device {
struct srpt_recv_ioctx **ioctx_ring;
struct list_head rch_list;
wait_queue_head_t ch_releaseQ;
- spinlock_t spinlock;
+ struct mutex mutex;
struct srpt_port port[2];
struct ib_event_handler event_handler;
struct list_head list;
};
-/**
- * struct srpt_node_acl - Per-initiator ACL data (managed via configfs).
- * @nacl: Target core node ACL information.
- */
-struct srpt_node_acl {
- struct se_node_acl nacl;
-};
-
#endif /* IB_SRPT_H */
diff --git a/drivers/input/Kconfig b/drivers/input/Kconfig
index a35532ec00e4..6261874c07c9 100644
--- a/drivers/input/Kconfig
+++ b/drivers/input/Kconfig
@@ -201,6 +201,8 @@ source "drivers/input/touchscreen/Kconfig"
source "drivers/input/misc/Kconfig"
+source "drivers/input/rmi4/Kconfig"
+
endif
menu "Hardware I/O ports"
diff --git a/drivers/input/Makefile b/drivers/input/Makefile
index 0c9302ca9954..595820bbabe9 100644
--- a/drivers/input/Makefile
+++ b/drivers/input/Makefile
@@ -26,3 +26,5 @@ obj-$(CONFIG_INPUT_TOUCHSCREEN) += touchscreen/
obj-$(CONFIG_INPUT_MISC) += misc/
obj-$(CONFIG_INPUT_APMPOWER) += apm-power.o
+
+obj-$(CONFIG_RMI4_CORE) += rmi4/
diff --git a/drivers/input/input-compat.c b/drivers/input/input-compat.c
index 64ca7113ff28..d84d20b9cec0 100644
--- a/drivers/input/input-compat.c
+++ b/drivers/input/input-compat.c
@@ -17,7 +17,7 @@
int input_event_from_user(const char __user *buffer,
struct input_event *event)
{
- if (INPUT_COMPAT_TEST && !COMPAT_USE_64BIT_TIME) {
+ if (in_compat_syscall() && !COMPAT_USE_64BIT_TIME) {
struct input_event_compat compat_event;
if (copy_from_user(&compat_event, buffer,
@@ -41,7 +41,7 @@ int input_event_from_user(const char __user *buffer,
int input_event_to_user(char __user *buffer,
const struct input_event *event)
{
- if (INPUT_COMPAT_TEST && !COMPAT_USE_64BIT_TIME) {
+ if (in_compat_syscall() && !COMPAT_USE_64BIT_TIME) {
struct input_event_compat compat_event;
compat_event.time.tv_sec = event->time.tv_sec;
@@ -65,7 +65,7 @@ int input_event_to_user(char __user *buffer,
int input_ff_effect_from_user(const char __user *buffer, size_t size,
struct ff_effect *effect)
{
- if (INPUT_COMPAT_TEST) {
+ if (in_compat_syscall()) {
struct ff_effect_compat *compat_effect;
if (size != sizeof(struct ff_effect_compat))
diff --git a/drivers/input/input-compat.h b/drivers/input/input-compat.h
index 148f66fe3205..1563160a7af3 100644
--- a/drivers/input/input-compat.h
+++ b/drivers/input/input-compat.h
@@ -17,18 +17,6 @@
#ifdef CONFIG_COMPAT
-/* Note to the author of this code: did it ever occur to
- you why the ifdefs are needed? Think about it again. -AK */
-#if defined(CONFIG_X86_64) || defined(CONFIG_TILE)
-# define INPUT_COMPAT_TEST is_compat_task()
-#elif defined(CONFIG_S390)
-# define INPUT_COMPAT_TEST test_thread_flag(TIF_31BIT)
-#elif defined(CONFIG_MIPS)
-# define INPUT_COMPAT_TEST test_thread_flag(TIF_32BIT_ADDR)
-#else
-# define INPUT_COMPAT_TEST test_thread_flag(TIF_32BIT)
-#endif
-
struct input_event_compat {
struct compat_timeval time;
__u16 type;
@@ -67,7 +55,7 @@ struct ff_effect_compat {
static inline size_t input_event_size(void)
{
- return (INPUT_COMPAT_TEST && !COMPAT_USE_64BIT_TIME) ?
+ return (in_compat_syscall() && !COMPAT_USE_64BIT_TIME) ?
sizeof(struct input_event_compat) : sizeof(struct input_event);
}
diff --git a/drivers/input/input.c b/drivers/input/input.c
index 880605959aa6..b87ffbd4547d 100644
--- a/drivers/input/input.c
+++ b/drivers/input/input.c
@@ -1015,7 +1015,7 @@ static int input_bits_to_string(char *buf, int buf_size,
{
int len = 0;
- if (INPUT_COMPAT_TEST) {
+ if (in_compat_syscall()) {
u32 dword = bits >> 32;
if (dword || !skip_empty)
len += snprintf(buf, buf_size, "%x ", dword);
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
index e8a84d12b7ff..1142a93dd90b 100644
--- a/drivers/input/joystick/xpad.c
+++ b/drivers/input/joystick/xpad.c
@@ -153,6 +153,7 @@ static const struct xpad_device {
{ 0x0738, 0x4728, "Mad Catz Street Fighter IV FightPad", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
{ 0x0738, 0x4738, "Mad Catz Wired Xbox 360 Controller (SFIV)", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
{ 0x0738, 0x4740, "Mad Catz Beat Pad", 0, XTYPE_XBOX360 },
+ { 0x0738, 0x4a01, "Mad Catz FightStick TE 2", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOXONE },
{ 0x0738, 0x6040, "Mad Catz Beat Pad Pro", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX },
{ 0x0738, 0xb726, "Mad Catz Xbox controller - MW2", 0, XTYPE_XBOX360 },
{ 0x0738, 0xbeef, "Mad Catz JOYTECH NEO SE Advanced GamePad", XTYPE_XBOX360 },
@@ -304,6 +305,7 @@ static struct usb_device_id xpad_table[] = {
XPAD_XBOX360_VENDOR(0x046d), /* Logitech X-Box 360 style controllers */
XPAD_XBOX360_VENDOR(0x0738), /* Mad Catz X-Box 360 controllers */
{ USB_DEVICE(0x0738, 0x4540) }, /* Mad Catz Beat Pad */
+ XPAD_XBOXONE_VENDOR(0x0738), /* Mad Catz FightStick TE 2 */
XPAD_XBOX360_VENDOR(0x0e6f), /* 0x0e6f X-Box 360 controllers */
XPAD_XBOX360_VENDOR(0x12ab), /* X-Box 360 dance pads */
XPAD_XBOX360_VENDOR(0x1430), /* RedOctane X-Box 360 controllers */
diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig
index ddd8148d51d7..509608c95994 100644
--- a/drivers/input/keyboard/Kconfig
+++ b/drivers/input/keyboard/Kconfig
@@ -560,7 +560,7 @@ config KEYBOARD_SUNKBD
config KEYBOARD_SH_KEYSC
tristate "SuperH KEYSC keypad support"
- depends on SUPERH || ARCH_SHMOBILE || COMPILE_TEST
+ depends on ARCH_SHMOBILE || COMPILE_TEST
help
Say Y here if you want to use a keypad attached to the KEYSC block
on SuperH processors such as sh7722 and sh7343.
diff --git a/drivers/input/keyboard/goldfish_events.c b/drivers/input/keyboard/goldfish_events.c
index 907e4e278fce..f6e643b589b6 100644
--- a/drivers/input/keyboard/goldfish_events.c
+++ b/drivers/input/keyboard/goldfish_events.c
@@ -22,6 +22,7 @@
#include <linux/slab.h>
#include <linux/irq.h>
#include <linux/io.h>
+#include <linux/acpi.h>
enum {
REG_READ = 0x00,
@@ -178,10 +179,26 @@ static int events_probe(struct platform_device *pdev)
return 0;
}
+static const struct of_device_id goldfish_events_of_match[] = {
+ { .compatible = "google,goldfish-events-keypad", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, goldfish_events_of_match);
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id goldfish_events_acpi_match[] = {
+ { "GFSH0002", 0 },
+ { },
+};
+MODULE_DEVICE_TABLE(acpi, goldfish_events_acpi_match);
+#endif
+
static struct platform_driver events_driver = {
.probe = events_probe,
.driver = {
.name = "goldfish_events",
+ .of_match_table = goldfish_events_of_match,
+ .acpi_match_table = ACPI_PTR(goldfish_events_acpi_match),
},
};
diff --git a/drivers/input/keyboard/snvs_pwrkey.c b/drivers/input/keyboard/snvs_pwrkey.c
index 9adf13a5864a..24a9f599082f 100644
--- a/drivers/input/keyboard/snvs_pwrkey.c
+++ b/drivers/input/keyboard/snvs_pwrkey.c
@@ -111,9 +111,9 @@ static int imx_snvs_pwrkey_probe(struct platform_device *pdev)
return -ENOMEM;
pdata->snvs = syscon_regmap_lookup_by_phandle(np, "regmap");
- if (!pdata->snvs) {
+ if (IS_ERR(pdata->snvs)) {
dev_err(&pdev->dev, "Can't get snvs syscon\n");
- return -ENODEV;
+ return PTR_ERR(pdata->snvs);
}
if (of_property_read_u32(np, "linux,keycode", &pdata->keycode)) {
@@ -180,7 +180,7 @@ static int imx_snvs_pwrkey_probe(struct platform_device *pdev)
return 0;
}
-static int imx_snvs_pwrkey_suspend(struct device *dev)
+static int __maybe_unused imx_snvs_pwrkey_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct pwrkey_drv_data *pdata = platform_get_drvdata(pdev);
@@ -191,7 +191,7 @@ static int imx_snvs_pwrkey_suspend(struct device *dev)
return 0;
}
-static int imx_snvs_pwrkey_resume(struct device *dev)
+static int __maybe_unused imx_snvs_pwrkey_resume(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct pwrkey_drv_data *pdata = platform_get_drvdata(pdev);
diff --git a/drivers/input/keyboard/spear-keyboard.c b/drivers/input/keyboard/spear-keyboard.c
index 623d451767e3..8083eaa0524a 100644
--- a/drivers/input/keyboard/spear-keyboard.c
+++ b/drivers/input/keyboard/spear-keyboard.c
@@ -288,8 +288,7 @@ static int spear_kbd_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM
-static int spear_kbd_suspend(struct device *dev)
+static int __maybe_unused spear_kbd_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct spear_kbd *kbd = platform_get_drvdata(pdev);
@@ -342,7 +341,7 @@ static int spear_kbd_suspend(struct device *dev)
return 0;
}
-static int spear_kbd_resume(struct device *dev)
+static int __maybe_unused spear_kbd_resume(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct spear_kbd *kbd = platform_get_drvdata(pdev);
@@ -368,7 +367,6 @@ static int spear_kbd_resume(struct device *dev)
return 0;
}
-#endif
static SIMPLE_DEV_PM_OPS(spear_kbd_pm_ops, spear_kbd_suspend, spear_kbd_resume);
diff --git a/drivers/input/misc/arizona-haptics.c b/drivers/input/misc/arizona-haptics.c
index d5994a745ffa..982936334537 100644
--- a/drivers/input/misc/arizona-haptics.c
+++ b/drivers/input/misc/arizona-haptics.c
@@ -178,7 +178,6 @@ static int arizona_haptics_probe(struct platform_device *pdev)
input_set_drvdata(haptics->input_dev, haptics);
haptics->input_dev->name = "arizona:haptics";
- haptics->input_dev->dev.parent = pdev->dev.parent;
haptics->input_dev->close = arizona_haptics_close;
__set_bit(FF_RUMBLE, haptics->input_dev->ffbit);
diff --git a/drivers/input/misc/ati_remote2.c b/drivers/input/misc/ati_remote2.c
index cfd58e87da26..1c5914cae853 100644
--- a/drivers/input/misc/ati_remote2.c
+++ b/drivers/input/misc/ati_remote2.c
@@ -817,26 +817,49 @@ static int ati_remote2_probe(struct usb_interface *interface, const struct usb_d
ar2->udev = udev;
+ /* Sanity check, first interface must have an endpoint */
+ if (alt->desc.bNumEndpoints < 1 || !alt->endpoint) {
+ dev_err(&interface->dev,
+ "%s(): interface 0 must have an endpoint\n", __func__);
+ r = -ENODEV;
+ goto fail1;
+ }
ar2->intf[0] = interface;
ar2->ep[0] = &alt->endpoint[0].desc;
+ /* Sanity check, the device must have two interfaces */
ar2->intf[1] = usb_ifnum_to_if(udev, 1);
+ if ((udev->actconfig->desc.bNumInterfaces < 2) || !ar2->intf[1]) {
+ dev_err(&interface->dev, "%s(): need 2 interfaces, found %d\n",
+ __func__, udev->actconfig->desc.bNumInterfaces);
+ r = -ENODEV;
+ goto fail1;
+ }
+
r = usb_driver_claim_interface(&ati_remote2_driver, ar2->intf[1], ar2);
if (r)
goto fail1;
+
+ /* Sanity check, second interface must have an endpoint */
alt = ar2->intf[1]->cur_altsetting;
+ if (alt->desc.bNumEndpoints < 1 || !alt->endpoint) {
+ dev_err(&interface->dev,
+ "%s(): interface 1 must have an endpoint\n", __func__);
+ r = -ENODEV;
+ goto fail2;
+ }
ar2->ep[1] = &alt->endpoint[0].desc;
r = ati_remote2_urb_init(ar2);
if (r)
- goto fail2;
+ goto fail3;
ar2->channel_mask = channel_mask;
ar2->mode_mask = mode_mask;
r = ati_remote2_setup(ar2, ar2->channel_mask);
if (r)
- goto fail2;
+ goto fail3;
usb_make_path(udev, ar2->phys, sizeof(ar2->phys));
strlcat(ar2->phys, "/input0", sizeof(ar2->phys));
@@ -845,11 +868,11 @@ static int ati_remote2_probe(struct usb_interface *interface, const struct usb_d
r = sysfs_create_group(&udev->dev.kobj, &ati_remote2_attr_group);
if (r)
- goto fail2;
+ goto fail3;
r = ati_remote2_input_init(ar2);
if (r)
- goto fail3;
+ goto fail4;
usb_set_intfdata(interface, ar2);
@@ -857,10 +880,11 @@ static int ati_remote2_probe(struct usb_interface *interface, const struct usb_d
return 0;
- fail3:
+ fail4:
sysfs_remove_group(&udev->dev.kobj, &ati_remote2_attr_group);
- fail2:
+ fail3:
ati_remote2_urb_cleanup(ar2);
+ fail2:
usb_driver_release_interface(&ati_remote2_driver, ar2->intf[1]);
fail1:
kfree(ar2);
diff --git a/drivers/input/misc/ims-pcu.c b/drivers/input/misc/ims-pcu.c
index ac1fa5f44580..9c0ea36913b4 100644
--- a/drivers/input/misc/ims-pcu.c
+++ b/drivers/input/misc/ims-pcu.c
@@ -1663,6 +1663,8 @@ static int ims_pcu_parse_cdc_data(struct usb_interface *intf, struct ims_pcu *pc
pcu->ctrl_intf = usb_ifnum_to_if(pcu->udev,
union_desc->bMasterInterface0);
+ if (!pcu->ctrl_intf)
+ return -EINVAL;
alt = pcu->ctrl_intf->cur_altsetting;
pcu->ep_ctrl = &alt->endpoint[0].desc;
@@ -1670,6 +1672,8 @@ static int ims_pcu_parse_cdc_data(struct usb_interface *intf, struct ims_pcu *pc
pcu->data_intf = usb_ifnum_to_if(pcu->udev,
union_desc->bSlaveInterface0);
+ if (!pcu->data_intf)
+ return -EINVAL;
alt = pcu->data_intf->cur_altsetting;
if (alt->desc.bNumEndpoints != 2) {
diff --git a/drivers/input/misc/max8997_haptic.c b/drivers/input/misc/max8997_haptic.c
index a806ba3818f7..8d6326d7e7be 100644
--- a/drivers/input/misc/max8997_haptic.c
+++ b/drivers/input/misc/max8997_haptic.c
@@ -255,12 +255,14 @@ static int max8997_haptic_probe(struct platform_device *pdev)
struct max8997_dev *iodev = dev_get_drvdata(pdev->dev.parent);
const struct max8997_platform_data *pdata =
dev_get_platdata(iodev->dev);
- const struct max8997_haptic_platform_data *haptic_pdata =
- pdata->haptic_pdata;
+ const struct max8997_haptic_platform_data *haptic_pdata = NULL;
struct max8997_haptic *chip;
struct input_dev *input_dev;
int error;
+ if (pdata)
+ haptic_pdata = pdata->haptic_pdata;
+
if (!haptic_pdata) {
dev_err(&pdev->dev, "no haptic platform data\n");
return -EINVAL;
diff --git a/drivers/input/misc/pmic8xxx-pwrkey.c b/drivers/input/misc/pmic8xxx-pwrkey.c
index 3f02e0e03d12..67aab86048ad 100644
--- a/drivers/input/misc/pmic8xxx-pwrkey.c
+++ b/drivers/input/misc/pmic8xxx-pwrkey.c
@@ -353,7 +353,8 @@ static int pmic8xxx_pwrkey_probe(struct platform_device *pdev)
if (of_property_read_u32(pdev->dev.of_node, "debounce", &kpd_delay))
kpd_delay = 15625;
- if (kpd_delay > 62500 || kpd_delay == 0) {
+ /* Valid range of pwr key trigger delay is 1/64 sec to 2 seconds. */
+ if (kpd_delay > USEC_PER_SEC * 2 || kpd_delay < USEC_PER_SEC / 64) {
dev_err(&pdev->dev, "invalid power key trigger delay\n");
return -EINVAL;
}
@@ -385,8 +386,8 @@ static int pmic8xxx_pwrkey_probe(struct platform_device *pdev)
pwr->name = "pmic8xxx_pwrkey";
pwr->phys = "pmic8xxx_pwrkey/input0";
- delay = (kpd_delay << 10) / USEC_PER_SEC;
- delay = 1 + ilog2(delay);
+ delay = (kpd_delay << 6) / USEC_PER_SEC;
+ delay = ilog2(delay);
err = regmap_read(regmap, PON_CNTL_1, &pon_cntl);
if (err < 0) {
diff --git a/drivers/input/misc/powermate.c b/drivers/input/misc/powermate.c
index 63b539d3daba..84909a12ff36 100644
--- a/drivers/input/misc/powermate.c
+++ b/drivers/input/misc/powermate.c
@@ -307,6 +307,9 @@ static int powermate_probe(struct usb_interface *intf, const struct usb_device_i
int error = -ENOMEM;
interface = intf->cur_altsetting;
+ if (interface->desc.bNumEndpoints < 1)
+ return -EINVAL;
+
endpoint = &interface->endpoint[0].desc;
if (!usb_endpoint_is_int_in(endpoint))
return -EIO;
diff --git a/drivers/input/misc/rotary_encoder.c b/drivers/input/misc/rotary_encoder.c
index 8aee71986430..96c486de49e0 100644
--- a/drivers/input/misc/rotary_encoder.c
+++ b/drivers/input/misc/rotary_encoder.c
@@ -20,70 +20,78 @@
#include <linux/input.h>
#include <linux/device.h>
#include <linux/platform_device.h>
-#include <linux/gpio.h>
-#include <linux/rotary_encoder.h>
+#include <linux/gpio/consumer.h>
#include <linux/slab.h>
#include <linux/of.h>
-#include <linux/of_platform.h>
-#include <linux/of_gpio.h>
#include <linux/pm.h>
+#include <linux/property.h>
#define DRV_NAME "rotary-encoder"
struct rotary_encoder {
struct input_dev *input;
- const struct rotary_encoder_platform_data *pdata;
- unsigned int axis;
+ struct mutex access_mutex;
+
+ u32 steps;
+ u32 axis;
+ bool relative_axis;
+ bool rollover;
+
unsigned int pos;
- unsigned int irq_a;
- unsigned int irq_b;
+ struct gpio_descs *gpios;
+
+ unsigned int *irq;
bool armed;
- unsigned char dir; /* 0 - clockwise, 1 - CCW */
+ signed char dir; /* 1 - clockwise, -1 - CCW */
- char last_stable;
+ unsigned last_stable;
};
-static int rotary_encoder_get_state(const struct rotary_encoder_platform_data *pdata)
+static unsigned rotary_encoder_get_state(struct rotary_encoder *encoder)
{
- int a = !!gpio_get_value(pdata->gpio_a);
- int b = !!gpio_get_value(pdata->gpio_b);
+ int i;
+ unsigned ret = 0;
- a ^= pdata->inverted_a;
- b ^= pdata->inverted_b;
+ for (i = 0; i < encoder->gpios->ndescs; ++i) {
+ int val = gpiod_get_value_cansleep(encoder->gpios->desc[i]);
+ /* convert from gray encoding to normal */
+ if (ret & 1)
+ val = !val;
- return ((a << 1) | b);
+ ret = ret << 1 | val;
+ }
+
+ return ret & 3;
}
static void rotary_encoder_report_event(struct rotary_encoder *encoder)
{
- const struct rotary_encoder_platform_data *pdata = encoder->pdata;
-
- if (pdata->relative_axis) {
+ if (encoder->relative_axis) {
input_report_rel(encoder->input,
- pdata->axis, encoder->dir ? -1 : 1);
+ encoder->axis, encoder->dir);
} else {
unsigned int pos = encoder->pos;
- if (encoder->dir) {
+ if (encoder->dir < 0) {
/* turning counter-clockwise */
- if (pdata->rollover)
- pos += pdata->steps;
+ if (encoder->rollover)
+ pos += encoder->steps;
if (pos)
pos--;
} else {
/* turning clockwise */
- if (pdata->rollover || pos < pdata->steps)
+ if (encoder->rollover || pos < encoder->steps)
pos++;
}
- if (pdata->rollover)
- pos %= pdata->steps;
+ if (encoder->rollover)
+ pos %= encoder->steps;
encoder->pos = pos;
- input_report_abs(encoder->input, pdata->axis, encoder->pos);
+ input_report_abs(encoder->input, encoder->axis, encoder->pos);
}
input_sync(encoder->input);
@@ -92,9 +100,11 @@ static void rotary_encoder_report_event(struct rotary_encoder *encoder)
static irqreturn_t rotary_encoder_irq(int irq, void *dev_id)
{
struct rotary_encoder *encoder = dev_id;
- int state;
+ unsigned state;
- state = rotary_encoder_get_state(encoder->pdata);
+ mutex_lock(&encoder->access_mutex);
+
+ state = rotary_encoder_get_state(encoder);
switch (state) {
case 0x0:
@@ -105,334 +115,227 @@ static irqreturn_t rotary_encoder_irq(int irq, void *dev_id)
break;
case 0x1:
- case 0x2:
+ case 0x3:
if (encoder->armed)
- encoder->dir = state - 1;
+ encoder->dir = 2 - state;
break;
- case 0x3:
+ case 0x2:
encoder->armed = true;
break;
}
+ mutex_unlock(&encoder->access_mutex);
+
return IRQ_HANDLED;
}
static irqreturn_t rotary_encoder_half_period_irq(int irq, void *dev_id)
{
struct rotary_encoder *encoder = dev_id;
- int state;
+ unsigned int state;
- state = rotary_encoder_get_state(encoder->pdata);
+ mutex_lock(&encoder->access_mutex);
- switch (state) {
- case 0x00:
- case 0x03:
+ state = rotary_encoder_get_state(encoder);
+
+ if (state & 1) {
+ encoder->dir = ((encoder->last_stable - state + 1) % 4) - 1;
+ } else {
if (state != encoder->last_stable) {
rotary_encoder_report_event(encoder);
encoder->last_stable = state;
}
- break;
-
- case 0x01:
- case 0x02:
- encoder->dir = (encoder->last_stable + state) & 0x01;
- break;
}
+ mutex_unlock(&encoder->access_mutex);
+
return IRQ_HANDLED;
}
static irqreturn_t rotary_encoder_quarter_period_irq(int irq, void *dev_id)
{
struct rotary_encoder *encoder = dev_id;
- unsigned char sum;
- int state;
-
- state = rotary_encoder_get_state(encoder->pdata);
-
- /*
- * We encode the previous and the current state using a byte.
- * The previous state in the MSB nibble, the current state in the LSB
- * nibble. Then use a table to decide the direction of the turn.
- */
- sum = (encoder->last_stable << 4) + state;
- switch (sum) {
- case 0x31:
- case 0x10:
- case 0x02:
- case 0x23:
- encoder->dir = 0; /* clockwise */
- break;
+ unsigned int state;
- case 0x13:
- case 0x01:
- case 0x20:
- case 0x32:
- encoder->dir = 1; /* counter-clockwise */
- break;
+ mutex_lock(&encoder->access_mutex);
- default:
- /*
- * Ignore all other values. This covers the case when the
- * state didn't change (a spurious interrupt) and the
- * cases where the state changed by two steps, making it
- * impossible to tell the direction.
- *
- * In either case, don't report any event and save the
- * state for later.
- */
+ state = rotary_encoder_get_state(encoder);
+
+ if ((encoder->last_stable + 1) % 4 == state)
+ encoder->dir = 1;
+ else if (encoder->last_stable == (state + 1) % 4)
+ encoder->dir = -1;
+ else
goto out;
- }
rotary_encoder_report_event(encoder);
out:
encoder->last_stable = state;
+ mutex_unlock(&encoder->access_mutex);
+
return IRQ_HANDLED;
}
-#ifdef CONFIG_OF
-static const struct of_device_id rotary_encoder_of_match[] = {
- { .compatible = "rotary-encoder", },
- { },
-};
-MODULE_DEVICE_TABLE(of, rotary_encoder_of_match);
-
-static struct rotary_encoder_platform_data *rotary_encoder_parse_dt(struct device *dev)
+static int rotary_encoder_probe(struct platform_device *pdev)
{
- const struct of_device_id *of_id =
- of_match_device(rotary_encoder_of_match, dev);
- struct device_node *np = dev->of_node;
- struct rotary_encoder_platform_data *pdata;
- enum of_gpio_flags flags;
- int error;
-
- if (!of_id || !np)
- return NULL;
-
- pdata = kzalloc(sizeof(struct rotary_encoder_platform_data),
- GFP_KERNEL);
- if (!pdata)
- return ERR_PTR(-ENOMEM);
-
- of_property_read_u32(np, "rotary-encoder,steps", &pdata->steps);
- of_property_read_u32(np, "linux,axis", &pdata->axis);
+ struct device *dev = &pdev->dev;
+ struct rotary_encoder *encoder;
+ struct input_dev *input;
+ irq_handler_t handler;
+ u32 steps_per_period;
+ unsigned int i;
+ int err;
- pdata->gpio_a = of_get_gpio_flags(np, 0, &flags);
- pdata->inverted_a = flags & OF_GPIO_ACTIVE_LOW;
+ encoder = devm_kzalloc(dev, sizeof(struct rotary_encoder), GFP_KERNEL);
+ if (!encoder)
+ return -ENOMEM;
- pdata->gpio_b = of_get_gpio_flags(np, 1, &flags);
- pdata->inverted_b = flags & OF_GPIO_ACTIVE_LOW;
+ mutex_init(&encoder->access_mutex);
- pdata->relative_axis =
- of_property_read_bool(np, "rotary-encoder,relative-axis");
- pdata->rollover = of_property_read_bool(np, "rotary-encoder,rollover");
+ device_property_read_u32(dev, "rotary-encoder,steps", &encoder->steps);
- error = of_property_read_u32(np, "rotary-encoder,steps-per-period",
- &pdata->steps_per_period);
- if (error) {
+ err = device_property_read_u32(dev, "rotary-encoder,steps-per-period",
+ &steps_per_period);
+ if (err) {
/*
- * The 'half-period' property has been deprecated, you must use
- * 'steps-per-period' and set an appropriate value, but we still
- * need to parse it to maintain compatibility.
+ * The 'half-period' property has been deprecated, you must
+ * use 'steps-per-period' and set an appropriate value, but
+ * we still need to parse it to maintain compatibility. If
+ * neither property is present we fall back to the one step
+ * per period behavior.
*/
- if (of_property_read_bool(np, "rotary-encoder,half-period")) {
- pdata->steps_per_period = 2;
- } else {
- /* Fallback to one step per period behavior */
- pdata->steps_per_period = 1;
- }
+ steps_per_period = device_property_read_bool(dev,
+ "rotary-encoder,half-period") ? 2 : 1;
}
- pdata->wakeup_source = of_property_read_bool(np, "wakeup-source");
+ encoder->rollover =
+ device_property_read_bool(dev, "rotary-encoder,rollover");
- return pdata;
-}
-#else
-static inline struct rotary_encoder_platform_data *
-rotary_encoder_parse_dt(struct device *dev)
-{
- return NULL;
-}
-#endif
-
-static int rotary_encoder_probe(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- const struct rotary_encoder_platform_data *pdata = dev_get_platdata(dev);
- struct rotary_encoder *encoder;
- struct input_dev *input;
- irq_handler_t handler;
- int err;
-
- if (!pdata) {
- pdata = rotary_encoder_parse_dt(dev);
- if (IS_ERR(pdata))
- return PTR_ERR(pdata);
+ device_property_read_u32(dev, "linux,axis", &encoder->axis);
+ encoder->relative_axis =
+ device_property_read_bool(dev, "rotary-encoder,relative-axis");
- if (!pdata) {
- dev_err(dev, "missing platform data\n");
- return -EINVAL;
- }
+ encoder->gpios = devm_gpiod_get_array(dev, NULL, GPIOD_IN);
+ if (IS_ERR(encoder->gpios)) {
+ dev_err(dev, "unable to get gpios\n");
+ return PTR_ERR(encoder->gpios);
}
-
- encoder = kzalloc(sizeof(struct rotary_encoder), GFP_KERNEL);
- input = input_allocate_device();
- if (!encoder || !input) {
- err = -ENOMEM;
- goto exit_free_mem;
+ if (encoder->gpios->ndescs < 2) {
+ dev_err(dev, "not enough gpios found\n");
+ return -EINVAL;
}
+ input = devm_input_allocate_device(dev);
+ if (!input)
+ return -ENOMEM;
+
encoder->input = input;
- encoder->pdata = pdata;
input->name = pdev->name;
input->id.bustype = BUS_HOST;
input->dev.parent = dev;
- if (pdata->relative_axis) {
- input->evbit[0] = BIT_MASK(EV_REL);
- input->relbit[0] = BIT_MASK(pdata->axis);
- } else {
- input->evbit[0] = BIT_MASK(EV_ABS);
- input_set_abs_params(encoder->input,
- pdata->axis, 0, pdata->steps, 0, 1);
- }
-
- /* request the GPIOs */
- err = gpio_request_one(pdata->gpio_a, GPIOF_IN, dev_name(dev));
- if (err) {
- dev_err(dev, "unable to request GPIO %d\n", pdata->gpio_a);
- goto exit_free_mem;
- }
-
- err = gpio_request_one(pdata->gpio_b, GPIOF_IN, dev_name(dev));
- if (err) {
- dev_err(dev, "unable to request GPIO %d\n", pdata->gpio_b);
- goto exit_free_gpio_a;
- }
-
- encoder->irq_a = gpio_to_irq(pdata->gpio_a);
- encoder->irq_b = gpio_to_irq(pdata->gpio_b);
+ if (encoder->relative_axis)
+ input_set_capability(input, EV_REL, encoder->axis);
+ else
+ input_set_abs_params(input,
+ encoder->axis, 0, encoder->steps, 0, 1);
- switch (pdata->steps_per_period) {
+ switch (steps_per_period >> (encoder->gpios->ndescs - 2)) {
case 4:
handler = &rotary_encoder_quarter_period_irq;
- encoder->last_stable = rotary_encoder_get_state(pdata);
+ encoder->last_stable = rotary_encoder_get_state(encoder);
break;
case 2:
handler = &rotary_encoder_half_period_irq;
- encoder->last_stable = rotary_encoder_get_state(pdata);
+ encoder->last_stable = rotary_encoder_get_state(encoder);
break;
case 1:
handler = &rotary_encoder_irq;
break;
default:
dev_err(dev, "'%d' is not a valid steps-per-period value\n",
- pdata->steps_per_period);
- err = -EINVAL;
- goto exit_free_gpio_b;
+ steps_per_period);
+ return -EINVAL;
}
- err = request_irq(encoder->irq_a, handler,
- IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
- DRV_NAME, encoder);
- if (err) {
- dev_err(dev, "unable to request IRQ %d\n", encoder->irq_a);
- goto exit_free_gpio_b;
- }
-
- err = request_irq(encoder->irq_b, handler,
- IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
- DRV_NAME, encoder);
- if (err) {
- dev_err(dev, "unable to request IRQ %d\n", encoder->irq_b);
- goto exit_free_irq_a;
+ encoder->irq =
+ devm_kzalloc(dev,
+ sizeof(*encoder->irq) * encoder->gpios->ndescs,
+ GFP_KERNEL);
+ if (!encoder->irq)
+ return -ENOMEM;
+
+ for (i = 0; i < encoder->gpios->ndescs; ++i) {
+ encoder->irq[i] = gpiod_to_irq(encoder->gpios->desc[i]);
+
+ err = devm_request_threaded_irq(dev, encoder->irq[i],
+ NULL, handler,
+ IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING |
+ IRQF_ONESHOT,
+ DRV_NAME, encoder);
+ if (err) {
+ dev_err(dev, "unable to request IRQ %d (gpio#%d)\n",
+ encoder->irq[i], i);
+ return err;
+ }
}
err = input_register_device(input);
if (err) {
dev_err(dev, "failed to register input device\n");
- goto exit_free_irq_b;
+ return err;
}
- device_init_wakeup(&pdev->dev, pdata->wakeup_source);
+ device_init_wakeup(dev,
+ device_property_read_bool(dev, "wakeup-source"));
platform_set_drvdata(pdev, encoder);
return 0;
-
-exit_free_irq_b:
- free_irq(encoder->irq_b, encoder);
-exit_free_irq_a:
- free_irq(encoder->irq_a, encoder);
-exit_free_gpio_b:
- gpio_free(pdata->gpio_b);
-exit_free_gpio_a:
- gpio_free(pdata->gpio_a);
-exit_free_mem:
- input_free_device(input);
- kfree(encoder);
- if (!dev_get_platdata(&pdev->dev))
- kfree(pdata);
-
- return err;
}
-static int rotary_encoder_remove(struct platform_device *pdev)
-{
- struct rotary_encoder *encoder = platform_get_drvdata(pdev);
- const struct rotary_encoder_platform_data *pdata = encoder->pdata;
-
- device_init_wakeup(&pdev->dev, false);
-
- free_irq(encoder->irq_a, encoder);
- free_irq(encoder->irq_b, encoder);
- gpio_free(pdata->gpio_a);
- gpio_free(pdata->gpio_b);
-
- input_unregister_device(encoder->input);
- kfree(encoder);
-
- if (!dev_get_platdata(&pdev->dev))
- kfree(pdata);
-
- return 0;
-}
-
-#ifdef CONFIG_PM_SLEEP
-static int rotary_encoder_suspend(struct device *dev)
+static int __maybe_unused rotary_encoder_suspend(struct device *dev)
{
struct rotary_encoder *encoder = dev_get_drvdata(dev);
+ unsigned int i;
if (device_may_wakeup(dev)) {
- enable_irq_wake(encoder->irq_a);
- enable_irq_wake(encoder->irq_b);
+ for (i = 0; i < encoder->gpios->ndescs; ++i)
+ enable_irq_wake(encoder->irq[i]);
}
return 0;
}
-static int rotary_encoder_resume(struct device *dev)
+static int __maybe_unused rotary_encoder_resume(struct device *dev)
{
struct rotary_encoder *encoder = dev_get_drvdata(dev);
+ unsigned int i;
if (device_may_wakeup(dev)) {
- disable_irq_wake(encoder->irq_a);
- disable_irq_wake(encoder->irq_b);
+ for (i = 0; i < encoder->gpios->ndescs; ++i)
+ disable_irq_wake(encoder->irq[i]);
}
return 0;
}
-#endif
static SIMPLE_DEV_PM_OPS(rotary_encoder_pm_ops,
- rotary_encoder_suspend, rotary_encoder_resume);
+ rotary_encoder_suspend, rotary_encoder_resume);
+
+#ifdef CONFIG_OF
+static const struct of_device_id rotary_encoder_of_match[] = {
+ { .compatible = "rotary-encoder", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, rotary_encoder_of_match);
+#endif
static struct platform_driver rotary_encoder_driver = {
.probe = rotary_encoder_probe,
- .remove = rotary_encoder_remove,
.driver = {
.name = DRV_NAME,
.pm = &rotary_encoder_pm_ops,
diff --git a/drivers/input/misc/twl4030-vibra.c b/drivers/input/misc/twl4030-vibra.c
index 10c4e3d462f1..caa5a62c42fb 100644
--- a/drivers/input/misc/twl4030-vibra.c
+++ b/drivers/input/misc/twl4030-vibra.c
@@ -222,7 +222,6 @@ static int twl4030_vibra_probe(struct platform_device *pdev)
info->input_dev->name = "twl4030:vibrator";
info->input_dev->id.version = 1;
- info->input_dev->dev.parent = pdev->dev.parent;
info->input_dev->close = twl4030_vibra_close;
__set_bit(FF_RUMBLE, info->input_dev->ffbit);
diff --git a/drivers/input/misc/twl6040-vibra.c b/drivers/input/misc/twl6040-vibra.c
index ea63fad48de6..42de34b92996 100644
--- a/drivers/input/misc/twl6040-vibra.c
+++ b/drivers/input/misc/twl6040-vibra.c
@@ -45,7 +45,6 @@
struct vibra_info {
struct device *dev;
struct input_dev *input_dev;
- struct workqueue_struct *workqueue;
struct work_struct play_work;
struct mutex mutex;
int irq;
@@ -182,6 +181,14 @@ static void vibra_play_work(struct work_struct *work)
{
struct vibra_info *info = container_of(work,
struct vibra_info, play_work);
+ int ret;
+
+ /* Do not allow effect, while the routing is set to use audio */
+ ret = twl6040_get_vibralr_status(info->twl6040);
+ if (ret & TWL6040_VIBSEL) {
+ dev_info(info->dev, "Vibra is configured for audio\n");
+ return;
+ }
mutex_lock(&info->mutex);
@@ -200,24 +207,12 @@ static int vibra_play(struct input_dev *input, void *data,
struct ff_effect *effect)
{
struct vibra_info *info = input_get_drvdata(input);
- int ret;
-
- /* Do not allow effect, while the routing is set to use audio */
- ret = twl6040_get_vibralr_status(info->twl6040);
- if (ret & TWL6040_VIBSEL) {
- dev_info(&input->dev, "Vibra is configured for audio\n");
- return -EBUSY;
- }
info->weak_speed = effect->u.rumble.weak_magnitude;
info->strong_speed = effect->u.rumble.strong_magnitude;
info->direction = effect->direction < EFFECT_DIR_180_DEG ? 1 : -1;
- ret = queue_work(info->workqueue, &info->play_work);
- if (!ret) {
- dev_info(&input->dev, "work is already on queue\n");
- return ret;
- }
+ schedule_work(&info->play_work);
return 0;
}
@@ -262,6 +257,7 @@ static int twl6040_vibra_probe(struct platform_device *pdev)
int vddvibr_uV = 0;
int error;
+ of_node_get(twl6040_core_dev->of_node);
twl6040_core_node = of_find_node_by_name(twl6040_core_dev->of_node,
"vibra");
if (!twl6040_core_node) {
@@ -362,7 +358,6 @@ static int twl6040_vibra_probe(struct platform_device *pdev)
info->input_dev->name = "twl6040:vibrator";
info->input_dev->id.version = 1;
- info->input_dev->dev.parent = pdev->dev.parent;
info->input_dev->close = twl6040_vibra_close;
__set_bit(FF_RUMBLE, info->input_dev->ffbit);
diff --git a/drivers/input/misc/uinput.c b/drivers/input/misc/uinput.c
index 4eb9e4d94f46..abe1a927b332 100644
--- a/drivers/input/misc/uinput.c
+++ b/drivers/input/misc/uinput.c
@@ -664,7 +664,7 @@ struct uinput_ff_upload_compat {
static int uinput_ff_upload_to_user(char __user *buffer,
const struct uinput_ff_upload *ff_up)
{
- if (INPUT_COMPAT_TEST) {
+ if (in_compat_syscall()) {
struct uinput_ff_upload_compat ff_up_compat;
ff_up_compat.request_id = ff_up->request_id;
@@ -695,7 +695,7 @@ static int uinput_ff_upload_to_user(char __user *buffer,
static int uinput_ff_upload_from_user(const char __user *buffer,
struct uinput_ff_upload *ff_up)
{
- if (INPUT_COMPAT_TEST) {
+ if (in_compat_syscall()) {
struct uinput_ff_upload_compat ff_up_compat;
if (copy_from_user(&ff_up_compat, buffer,
diff --git a/drivers/input/mouse/Kconfig b/drivers/input/mouse/Kconfig
index 17f97e5e11e7..096abb4ad5cd 100644
--- a/drivers/input/mouse/Kconfig
+++ b/drivers/input/mouse/Kconfig
@@ -48,6 +48,16 @@ config MOUSE_PS2_ALPS
If unsure, say Y.
+config MOUSE_PS2_BYD
+ bool "BYD PS/2 mouse protocol extension" if EXPERT
+ default y
+ depends on MOUSE_PS2
+ help
+ Say Y here if you have a BYD PS/2 touchpad connected to
+ your system.
+
+ If unsure, say Y.
+
config MOUSE_PS2_LOGIPS2PP
bool "Logitech PS/2++ mouse protocol extension" if EXPERT
default y
diff --git a/drivers/input/mouse/Makefile b/drivers/input/mouse/Makefile
index ee6a6e9563d4..6168b134937b 100644
--- a/drivers/input/mouse/Makefile
+++ b/drivers/input/mouse/Makefile
@@ -28,6 +28,7 @@ cyapatp-objs := cyapa.o cyapa_gen3.o cyapa_gen5.o cyapa_gen6.o
psmouse-objs := psmouse-base.o synaptics.o focaltech.o
psmouse-$(CONFIG_MOUSE_PS2_ALPS) += alps.o
+psmouse-$(CONFIG_MOUSE_PS2_BYD) += byd.o
psmouse-$(CONFIG_MOUSE_PS2_ELANTECH) += elantech.o
psmouse-$(CONFIG_MOUSE_PS2_OLPC) += hgpk.o
psmouse-$(CONFIG_MOUSE_PS2_LOGIPS2PP) += logips2pp.o
diff --git a/drivers/input/mouse/byd.c b/drivers/input/mouse/byd.c
new file mode 100644
index 000000000000..e583f8b50454
--- /dev/null
+++ b/drivers/input/mouse/byd.c
@@ -0,0 +1,512 @@
+/*
+ * BYD TouchPad PS/2 mouse driver
+ *
+ * Copyright (C) 2015 Chris Diamand <chris@diamand.org>
+ * Copyright (C) 2015 Richard Pospesel
+ * Copyright (C) 2015 Tai Chi Minh Ralph Eastwood
+ * Copyright (C) 2015 Martin Wimpress
+ * Copyright (C) 2015 Jay Kuri
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/delay.h>
+#include <linux/input.h>
+#include <linux/libps2.h>
+#include <linux/serio.h>
+#include <linux/slab.h>
+
+#include "psmouse.h"
+#include "byd.h"
+
+/* PS2 Bits */
+#define PS2_Y_OVERFLOW BIT_MASK(7)
+#define PS2_X_OVERFLOW BIT_MASK(6)
+#define PS2_Y_SIGN BIT_MASK(5)
+#define PS2_X_SIGN BIT_MASK(4)
+#define PS2_ALWAYS_1 BIT_MASK(3)
+#define PS2_MIDDLE BIT_MASK(2)
+#define PS2_RIGHT BIT_MASK(1)
+#define PS2_LEFT BIT_MASK(0)
+
+/*
+ * BYD pad constants
+ */
+
+/*
+ * True device resolution is unknown, however experiments show the
+ * resolution is about 111 units/mm.
+ * Absolute coordinate packets are in the range 0-255 for both X and Y
+ * we pick ABS_X/ABS_Y dimensions which are multiples of 256 and in
+ * the right ballpark given the touchpad's physical dimensions and estimate
+ * resolution per spec sheet, device active area dimensions are
+ * 101.6 x 60.1 mm.
+ */
+#define BYD_PAD_WIDTH 11264
+#define BYD_PAD_HEIGHT 6656
+#define BYD_PAD_RESOLUTION 111
+
+/*
+ * Given the above dimensions, relative packets velocity is in multiples of
+ * 1 unit / 11 milliseconds. We use this dt to estimate distance traveled
+ */
+#define BYD_DT 11
+/* Time in jiffies used to timeout various touch events (64 ms) */
+#define BYD_TOUCH_TIMEOUT msecs_to_jiffies(64)
+
+/* BYD commands reverse engineered from windows driver */
+
+/*
+ * Swipe gesture from off-pad to on-pad
+ * 0 : disable
+ * 1 : enable
+ */
+#define BYD_CMD_SET_OFFSCREEN_SWIPE 0x10cc
+/*
+ * Tap and drag delay time
+ * 0 : disable
+ * 1 - 8 : least to most delay
+ */
+#define BYD_CMD_SET_TAP_DRAG_DELAY_TIME 0x10cf
+/*
+ * Physical buttons function mapping
+ * 0 : enable
+ * 4 : normal
+ * 5 : left button custom command
+ * 6 : right button custom command
+ * 8 : disable
+ */
+#define BYD_CMD_SET_PHYSICAL_BUTTONS 0x10d0
+/*
+ * Absolute mode (1 byte X/Y resolution)
+ * 0 : disable
+ * 2 : enable
+ */
+#define BYD_CMD_SET_ABSOLUTE_MODE 0x10d1
+/*
+ * Two finger scrolling
+ * 1 : vertical
+ * 2 : horizontal
+ * 3 : vertical + horizontal
+ * 4 : disable
+ */
+#define BYD_CMD_SET_TWO_FINGER_SCROLL 0x10d2
+/*
+ * Handedness
+ * 1 : right handed
+ * 2 : left handed
+ */
+#define BYD_CMD_SET_HANDEDNESS 0x10d3
+/*
+ * Tap to click
+ * 1 : enable
+ * 2 : disable
+ */
+#define BYD_CMD_SET_TAP 0x10d4
+/*
+ * Tap and drag
+ * 1 : tap and hold to drag
+ * 2 : tap and hold to drag + lock
+ * 3 : disable
+ */
+#define BYD_CMD_SET_TAP_DRAG 0x10d5
+/*
+ * Touch sensitivity
+ * 1 - 7 : least to most sensitive
+ */
+#define BYD_CMD_SET_TOUCH_SENSITIVITY 0x10d6
+/*
+ * One finger scrolling
+ * 1 : vertical
+ * 2 : horizontal
+ * 3 : vertical + horizontal
+ * 4 : disable
+ */
+#define BYD_CMD_SET_ONE_FINGER_SCROLL 0x10d7
+/*
+ * One finger scrolling function
+ * 1 : free scrolling
+ * 2 : edge motion
+ * 3 : free scrolling + edge motion
+ * 4 : disable
+ */
+#define BYD_CMD_SET_ONE_FINGER_SCROLL_FUNC 0x10d8
+/*
+ * Sliding speed
+ * 1 - 5 : slowest to fastest
+ */
+#define BYD_CMD_SET_SLIDING_SPEED 0x10da
+/*
+ * Edge motion
+ * 1 : disable
+ * 2 : enable when dragging
+ * 3 : enable when dragging and pointing
+ */
+#define BYD_CMD_SET_EDGE_MOTION 0x10db
+/*
+ * Left edge region size
+ * 0 - 7 : smallest to largest width
+ */
+#define BYD_CMD_SET_LEFT_EDGE_REGION 0x10dc
+/*
+ * Top edge region size
+ * 0 - 9 : smallest to largest height
+ */
+#define BYD_CMD_SET_TOP_EDGE_REGION 0x10dd
+/*
+ * Disregard palm press as clicks
+ * 1 - 6 : smallest to largest
+ */
+#define BYD_CMD_SET_PALM_CHECK 0x10de
+/*
+ * Right edge region size
+ * 0 - 7 : smallest to largest width
+ */
+#define BYD_CMD_SET_RIGHT_EDGE_REGION 0x10df
+/*
+ * Bottom edge region size
+ * 0 - 9 : smallest to largest height
+ */
+#define BYD_CMD_SET_BOTTOM_EDGE_REGION 0x10e1
+/*
+ * Multitouch gestures
+ * 1 : enable
+ * 2 : disable
+ */
+#define BYD_CMD_SET_MULTITOUCH 0x10e3
+/*
+ * Edge motion speed
+ * 0 : control with finger pressure
+ * 1 - 9 : slowest to fastest
+ */
+#define BYD_CMD_SET_EDGE_MOTION_SPEED 0x10e4
+/*
+ * Two finger scolling function
+ * 0 : free scrolling
+ * 1 : free scrolling (with momentum)
+ * 2 : edge motion
+ * 3 : free scrolling (with momentum) + edge motion
+ * 4 : disable
+ */
+#define BYD_CMD_SET_TWO_FINGER_SCROLL_FUNC 0x10e5
+
+/*
+ * The touchpad generates a mixture of absolute and relative packets, indicated
+ * by the the last byte of each packet being set to one of the following:
+ */
+#define BYD_PACKET_ABSOLUTE 0xf8
+#define BYD_PACKET_RELATIVE 0x00
+/* Multitouch gesture packets */
+#define BYD_PACKET_PINCH_IN 0xd8
+#define BYD_PACKET_PINCH_OUT 0x28
+#define BYD_PACKET_ROTATE_CLOCKWISE 0x29
+#define BYD_PACKET_ROTATE_ANTICLOCKWISE 0xd7
+#define BYD_PACKET_TWO_FINGER_SCROLL_RIGHT 0x2a
+#define BYD_PACKET_TWO_FINGER_SCROLL_DOWN 0x2b
+#define BYD_PACKET_TWO_FINGER_SCROLL_UP 0xd5
+#define BYD_PACKET_TWO_FINGER_SCROLL_LEFT 0xd6
+#define BYD_PACKET_THREE_FINGER_SWIPE_RIGHT 0x2c
+#define BYD_PACKET_THREE_FINGER_SWIPE_DOWN 0x2d
+#define BYD_PACKET_THREE_FINGER_SWIPE_UP 0xd3
+#define BYD_PACKET_THREE_FINGER_SWIPE_LEFT 0xd4
+#define BYD_PACKET_FOUR_FINGER_DOWN 0x33
+#define BYD_PACKET_FOUR_FINGER_UP 0xcd
+#define BYD_PACKET_REGION_SCROLL_RIGHT 0x35
+#define BYD_PACKET_REGION_SCROLL_DOWN 0x36
+#define BYD_PACKET_REGION_SCROLL_UP 0xca
+#define BYD_PACKET_REGION_SCROLL_LEFT 0xcb
+#define BYD_PACKET_RIGHT_CORNER_CLICK 0xd2
+#define BYD_PACKET_LEFT_CORNER_CLICK 0x2e
+#define BYD_PACKET_LEFT_AND_RIGHT_CORNER_CLICK 0x2f
+#define BYD_PACKET_ONTO_PAD_SWIPE_RIGHT 0x37
+#define BYD_PACKET_ONTO_PAD_SWIPE_DOWN 0x30
+#define BYD_PACKET_ONTO_PAD_SWIPE_UP 0xd0
+#define BYD_PACKET_ONTO_PAD_SWIPE_LEFT 0xc9
+
+struct byd_data {
+ struct timer_list timer;
+ s32 abs_x;
+ s32 abs_y;
+ typeof(jiffies) last_touch_time;
+ bool btn_left;
+ bool btn_right;
+ bool touch;
+};
+
+static void byd_report_input(struct psmouse *psmouse)
+{
+ struct byd_data *priv = psmouse->private;
+ struct input_dev *dev = psmouse->dev;
+
+ input_report_key(dev, BTN_TOUCH, priv->touch);
+ input_report_key(dev, BTN_TOOL_FINGER, priv->touch);
+
+ input_report_abs(dev, ABS_X, priv->abs_x);
+ input_report_abs(dev, ABS_Y, priv->abs_y);
+ input_report_key(dev, BTN_LEFT, priv->btn_left);
+ input_report_key(dev, BTN_RIGHT, priv->btn_right);
+
+ input_sync(dev);
+}
+
+static void byd_clear_touch(unsigned long data)
+{
+ struct psmouse *psmouse = (struct psmouse *)data;
+ struct byd_data *priv = psmouse->private;
+
+ serio_pause_rx(psmouse->ps2dev.serio);
+ priv->touch = false;
+
+ byd_report_input(psmouse);
+
+ serio_continue_rx(psmouse->ps2dev.serio);
+
+ /*
+ * Move cursor back to center of pad when we lose touch - this
+ * specifically improves user experience when moving cursor with one
+ * finger, and pressing a button with another.
+ */
+ priv->abs_x = BYD_PAD_WIDTH / 2;
+ priv->abs_y = BYD_PAD_HEIGHT / 2;
+}
+
+static psmouse_ret_t byd_process_byte(struct psmouse *psmouse)
+{
+ struct byd_data *priv = psmouse->private;
+ u8 *pkt = psmouse->packet;
+
+ if (psmouse->pktcnt > 0 && !(pkt[0] & PS2_ALWAYS_1)) {
+ psmouse_warn(psmouse, "Always_1 bit not 1. pkt[0] = %02x\n",
+ pkt[0]);
+ return PSMOUSE_BAD_DATA;
+ }
+
+ if (psmouse->pktcnt < psmouse->pktsize)
+ return PSMOUSE_GOOD_DATA;
+
+ /* Otherwise, a full packet has been received */
+ switch (pkt[3]) {
+ case BYD_PACKET_ABSOLUTE:
+ /* Only use absolute packets for the start of movement. */
+ if (!priv->touch) {
+ /* needed to detect tap */
+ typeof(jiffies) tap_time =
+ priv->last_touch_time + BYD_TOUCH_TIMEOUT;
+ priv->touch = time_after(jiffies, tap_time);
+
+ /* init abs position */
+ priv->abs_x = pkt[1] * (BYD_PAD_WIDTH / 256);
+ priv->abs_y = (255 - pkt[2]) * (BYD_PAD_HEIGHT / 256);
+ }
+ break;
+ case BYD_PACKET_RELATIVE: {
+ /* Standard packet */
+ /* Sign-extend if a sign bit is set. */
+ u32 signx = pkt[0] & PS2_X_SIGN ? ~0xFF : 0;
+ u32 signy = pkt[0] & PS2_Y_SIGN ? ~0xFF : 0;
+ s32 dx = signx | (int) pkt[1];
+ s32 dy = signy | (int) pkt[2];
+
+ /* Update position based on velocity */
+ priv->abs_x += dx * BYD_DT;
+ priv->abs_y -= dy * BYD_DT;
+
+ priv->touch = true;
+ break;
+ }
+ default:
+ psmouse_warn(psmouse,
+ "Unrecognized Z: pkt = %02x %02x %02x %02x\n",
+ psmouse->packet[0], psmouse->packet[1],
+ psmouse->packet[2], psmouse->packet[3]);
+ return PSMOUSE_BAD_DATA;
+ }
+
+ priv->btn_left = pkt[0] & PS2_LEFT;
+ priv->btn_right = pkt[0] & PS2_RIGHT;
+
+ byd_report_input(psmouse);
+
+ /* Reset time since last touch. */
+ if (priv->touch) {
+ priv->last_touch_time = jiffies;
+ mod_timer(&priv->timer, jiffies + BYD_TOUCH_TIMEOUT);
+ }
+
+ return PSMOUSE_FULL_PACKET;
+}
+
+static int byd_reset_touchpad(struct psmouse *psmouse)
+{
+ struct ps2dev *ps2dev = &psmouse->ps2dev;
+ u8 param[4];
+ size_t i;
+
+ const struct {
+ u16 command;
+ u8 arg;
+ } seq[] = {
+ /*
+ * Intellimouse initialization sequence, to get 4-byte instead
+ * of 3-byte packets.
+ */
+ { PSMOUSE_CMD_SETRATE, 0xC8 },
+ { PSMOUSE_CMD_SETRATE, 0x64 },
+ { PSMOUSE_CMD_SETRATE, 0x50 },
+ { PSMOUSE_CMD_GETID, 0 },
+ { PSMOUSE_CMD_ENABLE, 0 },
+ /*
+ * BYD-specific initialization, which enables absolute mode and
+ * (if desired), the touchpad's built-in gesture detection.
+ */
+ { 0x10E2, 0x00 },
+ { 0x10E0, 0x02 },
+ /* The touchpad should reply with 4 seemingly-random bytes */
+ { 0x14E0, 0x01 },
+ /* Pairs of parameters and values. */
+ { BYD_CMD_SET_HANDEDNESS, 0x01 },
+ { BYD_CMD_SET_PHYSICAL_BUTTONS, 0x04 },
+ { BYD_CMD_SET_TAP, 0x02 },
+ { BYD_CMD_SET_ONE_FINGER_SCROLL, 0x04 },
+ { BYD_CMD_SET_ONE_FINGER_SCROLL_FUNC, 0x04 },
+ { BYD_CMD_SET_EDGE_MOTION, 0x01 },
+ { BYD_CMD_SET_PALM_CHECK, 0x00 },
+ { BYD_CMD_SET_MULTITOUCH, 0x02 },
+ { BYD_CMD_SET_TWO_FINGER_SCROLL, 0x04 },
+ { BYD_CMD_SET_TWO_FINGER_SCROLL_FUNC, 0x04 },
+ { BYD_CMD_SET_LEFT_EDGE_REGION, 0x00 },
+ { BYD_CMD_SET_TOP_EDGE_REGION, 0x00 },
+ { BYD_CMD_SET_RIGHT_EDGE_REGION, 0x00 },
+ { BYD_CMD_SET_BOTTOM_EDGE_REGION, 0x00 },
+ { BYD_CMD_SET_ABSOLUTE_MODE, 0x02 },
+ /* Finalize initialization. */
+ { 0x10E0, 0x00 },
+ { 0x10E2, 0x01 },
+ };
+
+ for (i = 0; i < ARRAY_SIZE(seq); ++i) {
+ memset(param, 0, sizeof(param));
+ param[0] = seq[i].arg;
+ if (ps2_command(ps2dev, param, seq[i].command))
+ return -EIO;
+ }
+
+ psmouse_set_state(psmouse, PSMOUSE_ACTIVATED);
+ return 0;
+}
+
+static int byd_reconnect(struct psmouse *psmouse)
+{
+ int retry = 0, error = 0;
+
+ psmouse_dbg(psmouse, "Reconnect\n");
+ do {
+ psmouse_reset(psmouse);
+ if (retry)
+ ssleep(1);
+ error = byd_detect(psmouse, 0);
+ } while (error && ++retry < 3);
+
+ if (error)
+ return error;
+
+ psmouse_dbg(psmouse, "Reconnected after %d attempts\n", retry);
+
+ error = byd_reset_touchpad(psmouse);
+ if (error) {
+ psmouse_err(psmouse, "Unable to initialize device\n");
+ return error;
+ }
+
+ return 0;
+}
+
+static void byd_disconnect(struct psmouse *psmouse)
+{
+ struct byd_data *priv = psmouse->private;
+
+ if (priv) {
+ del_timer(&priv->timer);
+ kfree(psmouse->private);
+ psmouse->private = NULL;
+ }
+}
+
+int byd_detect(struct psmouse *psmouse, bool set_properties)
+{
+ struct ps2dev *ps2dev = &psmouse->ps2dev;
+ u8 param[4] = {0x03, 0x00, 0x00, 0x00};
+
+ if (ps2_command(ps2dev, param, PSMOUSE_CMD_SETRES))
+ return -1;
+ if (ps2_command(ps2dev, param, PSMOUSE_CMD_SETRES))
+ return -1;
+ if (ps2_command(ps2dev, param, PSMOUSE_CMD_SETRES))
+ return -1;
+ if (ps2_command(ps2dev, param, PSMOUSE_CMD_SETRES))
+ return -1;
+ if (ps2_command(ps2dev, param, PSMOUSE_CMD_GETINFO))
+ return -1;
+
+ if (param[1] != 0x03 || param[2] != 0x64)
+ return -ENODEV;
+
+ psmouse_dbg(psmouse, "BYD touchpad detected\n");
+
+ if (set_properties) {
+ psmouse->vendor = "BYD";
+ psmouse->name = "TouchPad";
+ }
+
+ return 0;
+}
+
+int byd_init(struct psmouse *psmouse)
+{
+ struct input_dev *dev = psmouse->dev;
+ struct byd_data *priv;
+
+ if (psmouse_reset(psmouse))
+ return -EIO;
+
+ if (byd_reset_touchpad(psmouse))
+ return -EIO;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ memset(priv, 0, sizeof(*priv));
+ setup_timer(&priv->timer, byd_clear_touch, (unsigned long) psmouse);
+
+ psmouse->private = priv;
+ psmouse->disconnect = byd_disconnect;
+ psmouse->reconnect = byd_reconnect;
+ psmouse->protocol_handler = byd_process_byte;
+ psmouse->pktsize = 4;
+ psmouse->resync_time = 0;
+
+ __set_bit(INPUT_PROP_POINTER, dev->propbit);
+ /* Touchpad */
+ __set_bit(BTN_TOUCH, dev->keybit);
+ __set_bit(BTN_TOOL_FINGER, dev->keybit);
+ /* Buttons */
+ __set_bit(BTN_LEFT, dev->keybit);
+ __set_bit(BTN_RIGHT, dev->keybit);
+ __clear_bit(BTN_MIDDLE, dev->keybit);
+
+ /* Absolute position */
+ __set_bit(EV_ABS, dev->evbit);
+ input_set_abs_params(dev, ABS_X, 0, BYD_PAD_WIDTH, 0, 0);
+ input_set_abs_params(dev, ABS_Y, 0, BYD_PAD_HEIGHT, 0, 0);
+ input_abs_set_res(dev, ABS_X, BYD_PAD_RESOLUTION);
+ input_abs_set_res(dev, ABS_Y, BYD_PAD_RESOLUTION);
+ /* No relative support */
+ __clear_bit(EV_REL, dev->evbit);
+ __clear_bit(REL_X, dev->relbit);
+ __clear_bit(REL_Y, dev->relbit);
+
+ return 0;
+}
diff --git a/drivers/input/mouse/byd.h b/drivers/input/mouse/byd.h
new file mode 100644
index 000000000000..d6c120cf36cd
--- /dev/null
+++ b/drivers/input/mouse/byd.h
@@ -0,0 +1,18 @@
+#ifndef _BYD_H
+#define _BYD_H
+
+#ifdef CONFIG_MOUSE_PS2_BYD
+int byd_detect(struct psmouse *psmouse, bool set_properties);
+int byd_init(struct psmouse *psmouse);
+#else
+static inline int byd_detect(struct psmouse *psmouse, bool set_properties)
+{
+ return -ENOSYS;
+}
+static inline int byd_init(struct psmouse *psmouse)
+{
+ return -ENOSYS;
+}
+#endif /* CONFIG_MOUSE_PS2_BYD */
+
+#endif /* _BYD_H */
diff --git a/drivers/input/mouse/cyapa.c b/drivers/input/mouse/cyapa.c
index eb76b61418f3..dc2394292088 100644
--- a/drivers/input/mouse/cyapa.c
+++ b/drivers/input/mouse/cyapa.c
@@ -383,7 +383,7 @@ static int cyapa_open(struct input_dev *input)
* when in operational mode.
*/
error = cyapa->ops->set_power_mode(cyapa,
- PWR_MODE_FULL_ACTIVE, 0, false);
+ PWR_MODE_FULL_ACTIVE, 0, CYAPA_PM_ACTIVE);
if (error) {
dev_warn(dev, "set active power failed: %d\n", error);
goto out;
@@ -424,7 +424,8 @@ static void cyapa_close(struct input_dev *input)
pm_runtime_set_suspended(dev);
if (cyapa->operational)
- cyapa->ops->set_power_mode(cyapa, PWR_MODE_OFF, 0, false);
+ cyapa->ops->set_power_mode(cyapa,
+ PWR_MODE_OFF, 0, CYAPA_PM_DEACTIVE);
mutex_unlock(&cyapa->state_sync_lock);
}
@@ -534,7 +535,7 @@ static void cyapa_enable_irq_for_cmd(struct cyapa *cyapa)
*/
if (!input || cyapa->operational)
cyapa->ops->set_power_mode(cyapa,
- PWR_MODE_FULL_ACTIVE, 0, false);
+ PWR_MODE_FULL_ACTIVE, 0, CYAPA_PM_ACTIVE);
/* Gen3 always using polling mode for command. */
if (cyapa->gen >= CYAPA_GEN5)
enable_irq(cyapa->client->irq);
@@ -550,7 +551,7 @@ static void cyapa_disable_irq_for_cmd(struct cyapa *cyapa)
disable_irq(cyapa->client->irq);
if (!input || cyapa->operational)
cyapa->ops->set_power_mode(cyapa,
- PWR_MODE_OFF, 0, false);
+ PWR_MODE_OFF, 0, CYAPA_PM_ACTIVE);
}
}
@@ -617,7 +618,8 @@ static int cyapa_initialize(struct cyapa *cyapa)
/* Power down the device until we need it. */
if (cyapa->operational)
- cyapa->ops->set_power_mode(cyapa, PWR_MODE_OFF, 0, false);
+ cyapa->ops->set_power_mode(cyapa,
+ PWR_MODE_OFF, 0, CYAPA_PM_ACTIVE);
return 0;
}
@@ -634,7 +636,7 @@ static int cyapa_reinitialize(struct cyapa *cyapa)
/* Avoid command failures when TP was in OFF state. */
if (cyapa->operational)
cyapa->ops->set_power_mode(cyapa,
- PWR_MODE_FULL_ACTIVE, 0, false);
+ PWR_MODE_FULL_ACTIVE, 0, CYAPA_PM_ACTIVE);
error = cyapa_detect(cyapa);
if (error)
@@ -654,7 +656,7 @@ out:
/* Reset to power OFF state to save power when no user open. */
if (cyapa->operational)
cyapa->ops->set_power_mode(cyapa,
- PWR_MODE_OFF, 0, false);
+ PWR_MODE_OFF, 0, CYAPA_PM_DEACTIVE);
} else if (!error && cyapa->operational) {
/*
* Make sure only enable runtime PM when device is
@@ -1392,7 +1394,7 @@ static int __maybe_unused cyapa_suspend(struct device *dev)
power_mode = device_may_wakeup(dev) ? cyapa->suspend_power_mode
: PWR_MODE_OFF;
error = cyapa->ops->set_power_mode(cyapa, power_mode,
- cyapa->suspend_sleep_time, true);
+ cyapa->suspend_sleep_time, CYAPA_PM_SUSPEND);
if (error)
dev_err(dev, "suspend set power mode failed: %d\n",
error);
@@ -1447,7 +1449,7 @@ static int __maybe_unused cyapa_runtime_suspend(struct device *dev)
error = cyapa->ops->set_power_mode(cyapa,
cyapa->runtime_suspend_power_mode,
cyapa->runtime_suspend_sleep_time,
- false);
+ CYAPA_PM_RUNTIME_SUSPEND);
if (error)
dev_warn(dev, "runtime suspend failed: %d\n", error);
@@ -1460,7 +1462,7 @@ static int __maybe_unused cyapa_runtime_resume(struct device *dev)
int error;
error = cyapa->ops->set_power_mode(cyapa,
- PWR_MODE_FULL_ACTIVE, 0, false);
+ PWR_MODE_FULL_ACTIVE, 0, CYAPA_PM_RUNTIME_RESUME);
if (error)
dev_warn(dev, "runtime resume failed: %d\n", error);
diff --git a/drivers/input/mouse/cyapa.h b/drivers/input/mouse/cyapa.h
index b812bba8cdd7..ce951fe4516a 100644
--- a/drivers/input/mouse/cyapa.h
+++ b/drivers/input/mouse/cyapa.h
@@ -250,6 +250,15 @@ struct cyapa;
typedef bool (*cb_sort)(struct cyapa *, u8 *, int);
+enum cyapa_pm_stage {
+ CYAPA_PM_DEACTIVE,
+ CYAPA_PM_ACTIVE,
+ CYAPA_PM_SUSPEND,
+ CYAPA_PM_RESUME,
+ CYAPA_PM_RUNTIME_SUSPEND,
+ CYAPA_PM_RUNTIME_RESUME,
+};
+
struct cyapa_dev_ops {
int (*check_fw)(struct cyapa *, const struct firmware *);
int (*bl_enter)(struct cyapa *);
@@ -273,7 +282,7 @@ struct cyapa_dev_ops {
int (*sort_empty_output_data)(struct cyapa *,
u8 *, int *, cb_sort);
- int (*set_power_mode)(struct cyapa *, u8, u16, bool);
+ int (*set_power_mode)(struct cyapa *, u8, u16, enum cyapa_pm_stage);
int (*set_proximity)(struct cyapa *, bool);
};
@@ -289,6 +298,9 @@ struct cyapa_pip_cmd_states {
u8 *resp_data;
int *resp_len;
+ enum cyapa_pm_stage pm_stage;
+ struct mutex pm_stage_lock;
+
u8 irq_cmd_buf[CYAPA_REG_MAP_SIZE];
u8 empty_buf[CYAPA_REG_MAP_SIZE];
};
diff --git a/drivers/input/mouse/cyapa_gen3.c b/drivers/input/mouse/cyapa_gen3.c
index 1a9d12ae7538..f9600753eca5 100644
--- a/drivers/input/mouse/cyapa_gen3.c
+++ b/drivers/input/mouse/cyapa_gen3.c
@@ -269,6 +269,7 @@ static const struct cyapa_cmd_len cyapa_smbus_cmds[] = {
{ CYAPA_SMBUS_MIN_BASELINE, 1 }, /* CYAPA_CMD_MIN_BASELINE */
};
+static int cyapa_gen3_try_poll_handler(struct cyapa *cyapa);
/*
* cyapa_smbus_read_block - perform smbus block read command
@@ -950,12 +951,14 @@ static u16 cyapa_get_wait_time_for_pwr_cmd(u8 pwr_mode)
* Device power mode can only be set when device is in operational mode.
*/
static int cyapa_gen3_set_power_mode(struct cyapa *cyapa, u8 power_mode,
- u16 always_unused, bool is_suspend_unused)
+ u16 always_unused, enum cyapa_pm_stage pm_stage)
{
- int ret;
+ struct input_dev *input = cyapa->input;
u8 power;
int tries;
- u16 sleep_time;
+ int sleep_time;
+ int interval;
+ int ret;
if (cyapa->state != CYAPA_STATE_OP)
return 0;
@@ -977,7 +980,7 @@ static int cyapa_gen3_set_power_mode(struct cyapa *cyapa, u8 power_mode,
if ((ret & PWR_MODE_MASK) == power_mode)
return 0;
- sleep_time = cyapa_get_wait_time_for_pwr_cmd(ret & PWR_MODE_MASK);
+ sleep_time = (int)cyapa_get_wait_time_for_pwr_cmd(ret & PWR_MODE_MASK);
power = ret;
power &= ~PWR_MODE_MASK;
power |= power_mode & PWR_MODE_MASK;
@@ -995,7 +998,23 @@ static int cyapa_gen3_set_power_mode(struct cyapa *cyapa, u8 power_mode,
* doing so before issuing the next command may result in errors
* depending on the command's content.
*/
- msleep(sleep_time);
+ if (cyapa->operational && input && input->users &&
+ (pm_stage == CYAPA_PM_RUNTIME_SUSPEND ||
+ pm_stage == CYAPA_PM_RUNTIME_RESUME)) {
+ /* Try to polling in 120Hz, read may fail, just ignore it. */
+ interval = 1000 / 120;
+ while (sleep_time > 0) {
+ if (sleep_time > interval)
+ msleep(interval);
+ else
+ msleep(sleep_time);
+ sleep_time -= interval;
+ cyapa_gen3_try_poll_handler(cyapa);
+ }
+ } else {
+ msleep(sleep_time);
+ }
+
return ret;
}
@@ -1112,7 +1131,7 @@ static int cyapa_gen3_do_operational_check(struct cyapa *cyapa)
* may cause problems, so we set the power mode first here.
*/
error = cyapa_gen3_set_power_mode(cyapa,
- PWR_MODE_FULL_ACTIVE, 0, false);
+ PWR_MODE_FULL_ACTIVE, 0, CYAPA_PM_ACTIVE);
if (error)
dev_err(dev, "%s: set full power mode failed: %d\n",
__func__, error);
@@ -1168,32 +1187,16 @@ static bool cyapa_gen3_irq_cmd_handler(struct cyapa *cyapa)
return false;
}
-static int cyapa_gen3_irq_handler(struct cyapa *cyapa)
+static int cyapa_gen3_event_process(struct cyapa *cyapa,
+ struct cyapa_reg_data *data)
{
struct input_dev *input = cyapa->input;
- struct device *dev = &cyapa->client->dev;
- struct cyapa_reg_data data;
int num_fingers;
- int ret;
int i;
- ret = cyapa_read_block(cyapa, CYAPA_CMD_GROUP_DATA, (u8 *)&data);
- if (ret != sizeof(data)) {
- dev_err(dev, "failed to read report data, (%d)\n", ret);
- return -EINVAL;
- }
-
- if ((data.device_status & OP_STATUS_SRC) != OP_STATUS_SRC ||
- (data.device_status & OP_STATUS_DEV) != CYAPA_DEV_NORMAL ||
- (data.finger_btn & OP_DATA_VALID) != OP_DATA_VALID) {
- dev_err(dev, "invalid device state bytes, %02x %02x\n",
- data.device_status, data.finger_btn);
- return -EINVAL;
- }
-
- num_fingers = (data.finger_btn >> 4) & 0x0f;
+ num_fingers = (data->finger_btn >> 4) & 0x0f;
for (i = 0; i < num_fingers; i++) {
- const struct cyapa_touch *touch = &data.touches[i];
+ const struct cyapa_touch *touch = &data->touches[i];
/* Note: touch->id range is 1 to 15; slots are 0 to 14. */
int slot = touch->id - 1;
@@ -1210,18 +1213,65 @@ static int cyapa_gen3_irq_handler(struct cyapa *cyapa)
if (cyapa->btn_capability & CAPABILITY_LEFT_BTN_MASK)
input_report_key(input, BTN_LEFT,
- !!(data.finger_btn & OP_DATA_LEFT_BTN));
+ !!(data->finger_btn & OP_DATA_LEFT_BTN));
if (cyapa->btn_capability & CAPABILITY_MIDDLE_BTN_MASK)
input_report_key(input, BTN_MIDDLE,
- !!(data.finger_btn & OP_DATA_MIDDLE_BTN));
+ !!(data->finger_btn & OP_DATA_MIDDLE_BTN));
if (cyapa->btn_capability & CAPABILITY_RIGHT_BTN_MASK)
input_report_key(input, BTN_RIGHT,
- !!(data.finger_btn & OP_DATA_RIGHT_BTN));
+ !!(data->finger_btn & OP_DATA_RIGHT_BTN));
input_sync(input);
return 0;
}
+static int cyapa_gen3_irq_handler(struct cyapa *cyapa)
+{
+ struct device *dev = &cyapa->client->dev;
+ struct cyapa_reg_data data;
+ int ret;
+
+ ret = cyapa_read_block(cyapa, CYAPA_CMD_GROUP_DATA, (u8 *)&data);
+ if (ret != sizeof(data)) {
+ dev_err(dev, "failed to read report data, (%d)\n", ret);
+ return -EINVAL;
+ }
+
+ if ((data.device_status & OP_STATUS_SRC) != OP_STATUS_SRC ||
+ (data.device_status & OP_STATUS_DEV) != CYAPA_DEV_NORMAL ||
+ (data.finger_btn & OP_DATA_VALID) != OP_DATA_VALID) {
+ dev_err(dev, "invalid device state bytes: %02x %02x\n",
+ data.device_status, data.finger_btn);
+ return -EINVAL;
+ }
+
+ return cyapa_gen3_event_process(cyapa, &data);
+}
+
+/*
+ * This function will be called in the cyapa_gen3_set_power_mode function,
+ * and it's known that it may failed in some situation after the set power
+ * mode command was sent. So this function is aimed to avoid the knwon
+ * and unwanted output I2C and data parse error messages.
+ */
+static int cyapa_gen3_try_poll_handler(struct cyapa *cyapa)
+{
+ struct cyapa_reg_data data;
+ int ret;
+
+ ret = cyapa_read_block(cyapa, CYAPA_CMD_GROUP_DATA, (u8 *)&data);
+ if (ret != sizeof(data))
+ return -EINVAL;
+
+ if ((data.device_status & OP_STATUS_SRC) != OP_STATUS_SRC ||
+ (data.device_status & OP_STATUS_DEV) != CYAPA_DEV_NORMAL ||
+ (data.finger_btn & OP_DATA_VALID) != OP_DATA_VALID)
+ return -EINVAL;
+
+ return cyapa_gen3_event_process(cyapa, &data);
+
+}
+
static int cyapa_gen3_initialize(struct cyapa *cyapa) { return 0; }
static int cyapa_gen3_bl_initiate(struct cyapa *cyapa,
const struct firmware *fw) { return 0; }
diff --git a/drivers/input/mouse/cyapa_gen5.c b/drivers/input/mouse/cyapa_gen5.c
index 118ba977181e..5775d40b3d53 100644
--- a/drivers/input/mouse/cyapa_gen5.c
+++ b/drivers/input/mouse/cyapa_gen5.c
@@ -342,6 +342,9 @@ u8 pip_bl_read_app_info[] = { 0x04, 0x00, 0x0b, 0x00, 0x40, 0x00,
static u8 cyapa_pip_bl_cmd_key[] = { 0xa5, 0x01, 0x02, 0x03,
0xff, 0xfe, 0xfd, 0x5a };
+static int cyapa_pip_event_process(struct cyapa *cyapa,
+ struct cyapa_pip_report_data *report_data);
+
int cyapa_pip_cmd_state_initialize(struct cyapa *cyapa)
{
struct cyapa_pip_cmd_states *pip = &cyapa->cmd_states.pip;
@@ -350,6 +353,9 @@ int cyapa_pip_cmd_state_initialize(struct cyapa *cyapa)
atomic_set(&pip->cmd_issued, 0);
mutex_init(&pip->cmd_lock);
+ mutex_init(&pip->pm_stage_lock);
+ pip->pm_stage = CYAPA_PM_DEACTIVE;
+
pip->resp_sort_func = NULL;
pip->in_progress_cmd = PIP_INVALID_CMD;
pip->resp_data = NULL;
@@ -397,6 +403,38 @@ ssize_t cyapa_i2c_pip_write(struct cyapa *cyapa, u8 *buf, size_t size)
return 0;
}
+static void cyapa_set_pip_pm_state(struct cyapa *cyapa,
+ enum cyapa_pm_stage pm_stage)
+{
+ struct cyapa_pip_cmd_states *pip = &cyapa->cmd_states.pip;
+
+ mutex_lock(&pip->pm_stage_lock);
+ pip->pm_stage = pm_stage;
+ mutex_unlock(&pip->pm_stage_lock);
+}
+
+static void cyapa_reset_pip_pm_state(struct cyapa *cyapa)
+{
+ struct cyapa_pip_cmd_states *pip = &cyapa->cmd_states.pip;
+
+ /* Indicates the pip->pm_stage is not valid. */
+ mutex_lock(&pip->pm_stage_lock);
+ pip->pm_stage = CYAPA_PM_DEACTIVE;
+ mutex_unlock(&pip->pm_stage_lock);
+}
+
+static enum cyapa_pm_stage cyapa_get_pip_pm_state(struct cyapa *cyapa)
+{
+ struct cyapa_pip_cmd_states *pip = &cyapa->cmd_states.pip;
+ enum cyapa_pm_stage pm_stage;
+
+ mutex_lock(&pip->pm_stage_lock);
+ pm_stage = pip->pm_stage;
+ mutex_unlock(&pip->pm_stage_lock);
+
+ return pm_stage;
+}
+
/**
* This function is aimed to dump all not read data in Gen5 trackpad
* before send any command, otherwise, the interrupt line will be blocked.
@@ -404,7 +442,9 @@ ssize_t cyapa_i2c_pip_write(struct cyapa *cyapa, u8 *buf, size_t size)
int cyapa_empty_pip_output_data(struct cyapa *cyapa,
u8 *buf, int *len, cb_sort func)
{
+ struct input_dev *input = cyapa->input;
struct cyapa_pip_cmd_states *pip = &cyapa->cmd_states.pip;
+ enum cyapa_pm_stage pm_stage = cyapa_get_pip_pm_state(cyapa);
int length;
int report_count;
int empty_count;
@@ -478,6 +518,12 @@ int cyapa_empty_pip_output_data(struct cyapa *cyapa,
*len = length;
/* Response found, success. */
return 0;
+ } else if (cyapa->operational && input && input->users &&
+ (pm_stage == CYAPA_PM_RUNTIME_RESUME ||
+ pm_stage == CYAPA_PM_RUNTIME_SUSPEND)) {
+ /* Parse the data and report it if it's valid. */
+ cyapa_pip_event_process(cyapa,
+ (struct cyapa_pip_report_data *)pip->empty_buf);
}
error = -EINVAL;
@@ -1566,15 +1612,17 @@ int cyapa_pip_deep_sleep(struct cyapa *cyapa, u8 state)
}
static int cyapa_gen5_set_power_mode(struct cyapa *cyapa,
- u8 power_mode, u16 sleep_time, bool is_suspend)
+ u8 power_mode, u16 sleep_time, enum cyapa_pm_stage pm_stage)
{
struct device *dev = &cyapa->client->dev;
u8 power_state;
- int error;
+ int error = 0;
if (cyapa->state != CYAPA_STATE_GEN5_APP)
return 0;
+ cyapa_set_pip_pm_state(cyapa, pm_stage);
+
if (PIP_DEV_GET_PWR_STATE(cyapa) == UNINIT_PWR_MODE) {
/*
* Assume TP in deep sleep mode when driver is loaded,
@@ -1597,7 +1645,7 @@ static int cyapa_gen5_set_power_mode(struct cyapa *cyapa,
power_mode == PWR_MODE_BTN_ONLY ||
PIP_DEV_GET_SLEEP_TIME(cyapa) == sleep_time) {
/* Has in correct power mode state, early return. */
- return 0;
+ goto out;
}
}
@@ -1605,11 +1653,11 @@ static int cyapa_gen5_set_power_mode(struct cyapa *cyapa,
error = cyapa_pip_deep_sleep(cyapa, PIP_DEEP_SLEEP_STATE_OFF);
if (error) {
dev_err(dev, "enter deep sleep fail: %d\n", error);
- return error;
+ goto out;
}
PIP_DEV_SET_PWR_STATE(cyapa, PWR_MODE_OFF);
- return 0;
+ goto out;
}
/*
@@ -1621,7 +1669,7 @@ static int cyapa_gen5_set_power_mode(struct cyapa *cyapa,
error = cyapa_pip_deep_sleep(cyapa, PIP_DEEP_SLEEP_STATE_ON);
if (error) {
dev_err(dev, "deep sleep wake fail: %d\n", error);
- return error;
+ goto out;
}
}
@@ -1630,7 +1678,7 @@ static int cyapa_gen5_set_power_mode(struct cyapa *cyapa,
GEN5_POWER_STATE_ACTIVE);
if (error) {
dev_err(dev, "change to active fail: %d\n", error);
- return error;
+ goto out;
}
PIP_DEV_SET_PWR_STATE(cyapa, PWR_MODE_FULL_ACTIVE);
@@ -1639,7 +1687,7 @@ static int cyapa_gen5_set_power_mode(struct cyapa *cyapa,
GEN5_POWER_STATE_BTN_ONLY);
if (error) {
dev_err(dev, "fail to button only mode: %d\n", error);
- return error;
+ goto out;
}
PIP_DEV_SET_PWR_STATE(cyapa, PWR_MODE_BTN_ONLY);
@@ -1664,7 +1712,7 @@ static int cyapa_gen5_set_power_mode(struct cyapa *cyapa,
if (error) {
dev_err(dev, "set power state to 0x%02x failed: %d\n",
power_state, error);
- return error;
+ goto out;
}
/*
@@ -1677,14 +1725,16 @@ static int cyapa_gen5_set_power_mode(struct cyapa *cyapa,
* is suspending which may cause interrupt line unable to be
* asserted again.
*/
- if (is_suspend)
+ if (pm_stage == CYAPA_PM_SUSPEND)
cyapa_gen5_disable_pip_report(cyapa);
PIP_DEV_SET_PWR_STATE(cyapa,
cyapa_sleep_time_to_pwr_cmd(sleep_time));
}
- return 0;
+out:
+ cyapa_reset_pip_pm_state(cyapa);
+ return error;
}
int cyapa_pip_resume_scanning(struct cyapa *cyapa)
@@ -2513,7 +2563,7 @@ static int cyapa_gen5_do_operational_check(struct cyapa *cyapa)
* the device state is required.
*/
error = cyapa_gen5_set_power_mode(cyapa,
- PWR_MODE_FULL_ACTIVE, 0, false);
+ PWR_MODE_FULL_ACTIVE, 0, CYAPA_PM_ACTIVE);
if (error)
dev_warn(dev, "%s: failed to set power active mode.\n",
__func__);
@@ -2715,7 +2765,6 @@ int cyapa_pip_irq_handler(struct cyapa *cyapa)
struct device *dev = &cyapa->client->dev;
struct cyapa_pip_report_data report_data;
unsigned int report_len;
- u8 report_id;
int ret;
if (!cyapa_is_pip_app_mode(cyapa)) {
@@ -2752,7 +2801,23 @@ int cyapa_pip_irq_handler(struct cyapa *cyapa)
return -EINVAL;
}
- report_id = report_data.report_head[PIP_RESP_REPORT_ID_OFFSET];
+ return cyapa_pip_event_process(cyapa, &report_data);
+}
+
+static int cyapa_pip_event_process(struct cyapa *cyapa,
+ struct cyapa_pip_report_data *report_data)
+{
+ struct device *dev = &cyapa->client->dev;
+ unsigned int report_len;
+ u8 report_id;
+
+ report_len = get_unaligned_le16(
+ &report_data->report_head[PIP_RESP_LENGTH_OFFSET]);
+ /* Idle, no data for report. */
+ if (report_len == PIP_RESP_LENGTH_SIZE)
+ return 0;
+
+ report_id = report_data->report_head[PIP_RESP_REPORT_ID_OFFSET];
if (report_id == PIP_WAKEUP_EVENT_REPORT_ID &&
report_len == PIP_WAKEUP_EVENT_SIZE) {
/*
@@ -2805,11 +2870,11 @@ int cyapa_pip_irq_handler(struct cyapa *cyapa)
}
if (report_id == PIP_TOUCH_REPORT_ID)
- cyapa_pip_report_touches(cyapa, &report_data);
+ cyapa_pip_report_touches(cyapa, report_data);
else if (report_id == PIP_PROXIMITY_REPORT_ID)
- cyapa_pip_report_proximity(cyapa, &report_data);
+ cyapa_pip_report_proximity(cyapa, report_data);
else
- cyapa_pip_report_buttons(cyapa, &report_data);
+ cyapa_pip_report_buttons(cyapa, report_data);
return 0;
}
diff --git a/drivers/input/mouse/cyapa_gen6.c b/drivers/input/mouse/cyapa_gen6.c
index e4eb048d1bf6..016397850b1b 100644
--- a/drivers/input/mouse/cyapa_gen6.c
+++ b/drivers/input/mouse/cyapa_gen6.c
@@ -425,7 +425,7 @@ static int cyapa_gen6_deep_sleep(struct cyapa *cyapa, u8 state)
}
static int cyapa_gen6_set_power_mode(struct cyapa *cyapa,
- u8 power_mode, u16 sleep_time, bool is_suspend)
+ u8 power_mode, u16 sleep_time, enum cyapa_pm_stage pm_stage)
{
struct device *dev = &cyapa->client->dev;
struct gen6_interval_setting *interval_setting =
@@ -689,7 +689,7 @@ static int cyapa_gen6_operational_check(struct cyapa *cyapa)
* the device state is required.
*/
error = cyapa_gen6_set_power_mode(cyapa,
- PWR_MODE_FULL_ACTIVE, 0, false);
+ PWR_MODE_FULL_ACTIVE, 0, CYAPA_PM_ACTIVE);
if (error)
dev_warn(dev, "%s: failed to set power active mode.\n",
__func__);
diff --git a/drivers/input/mouse/psmouse-base.c b/drivers/input/mouse/psmouse-base.c
index b9e4ee34c132..5784e20542a4 100644
--- a/drivers/input/mouse/psmouse-base.c
+++ b/drivers/input/mouse/psmouse-base.c
@@ -37,6 +37,7 @@
#include "cypress_ps2.h"
#include "focaltech.h"
#include "vmmouse.h"
+#include "byd.h"
#define DRIVER_DESC "PS/2 mouse driver"
@@ -842,6 +843,15 @@ static const struct psmouse_protocol psmouse_protocols[] = {
.init = vmmouse_init,
},
#endif
+#ifdef CONFIG_MOUSE_PS2_BYD
+ {
+ .type = PSMOUSE_BYD,
+ .name = "BYDPS/2",
+ .alias = "byd",
+ .detect = byd_detect,
+ .init = byd_init,
+ },
+#endif
{
.type = PSMOUSE_AUTO,
.name = "auto",
@@ -1105,6 +1115,10 @@ static int psmouse_extensions(struct psmouse *psmouse,
if (psmouse_try_protocol(psmouse, PSMOUSE_TOUCHKIT_PS2,
&max_proto, set_properties, true))
return PSMOUSE_TOUCHKIT_PS2;
+
+ if (psmouse_try_protocol(psmouse, PSMOUSE_BYD,
+ &max_proto, set_properties, true))
+ return PSMOUSE_BYD;
}
/*
diff --git a/drivers/input/mouse/psmouse.h b/drivers/input/mouse/psmouse.h
index ad5a5a1ea872..e0ca6cda3d16 100644
--- a/drivers/input/mouse/psmouse.h
+++ b/drivers/input/mouse/psmouse.h
@@ -104,6 +104,7 @@ enum psmouse_type {
PSMOUSE_CYPRESS,
PSMOUSE_FOCALTECH,
PSMOUSE_VMMOUSE,
+ PSMOUSE_BYD,
PSMOUSE_AUTO /* This one should always be last */
};
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
index 6025eb430c0a..a41d8328c064 100644
--- a/drivers/input/mouse/synaptics.c
+++ b/drivers/input/mouse/synaptics.c
@@ -862,8 +862,9 @@ static void synaptics_report_ext_buttons(struct psmouse *psmouse,
if (!SYN_CAP_MULTI_BUTTON_NO(priv->ext_cap))
return;
- /* Bug in FW 8.1, buttons are reported only when ExtBit is 1 */
- if (SYN_ID_FULL(priv->identity) == 0x801 &&
+ /* Bug in FW 8.1 & 8.2, buttons are reported only when ExtBit is 1 */
+ if ((SYN_ID_FULL(priv->identity) == 0x801 ||
+ SYN_ID_FULL(priv->identity) == 0x802) &&
!((psmouse->packet[0] ^ psmouse->packet[3]) & 0x02))
return;
diff --git a/drivers/input/rmi4/Kconfig b/drivers/input/rmi4/Kconfig
new file mode 100644
index 000000000000..f73df2495fed
--- /dev/null
+++ b/drivers/input/rmi4/Kconfig
@@ -0,0 +1,63 @@
+#
+# RMI4 configuration
+#
+config RMI4_CORE
+ tristate "Synaptics RMI4 bus support"
+ help
+ Say Y here if you want to support the Synaptics RMI4 bus. This is
+ required for all RMI4 device support.
+
+ If unsure, say Y.
+
+config RMI4_I2C
+ tristate "RMI4 I2C Support"
+ depends on RMI4_CORE && I2C
+ help
+ Say Y here if you want to support RMI4 devices connected to an I2C
+ bus.
+
+ If unsure, say Y.
+
+config RMI4_SPI
+ tristate "RMI4 SPI Support"
+ depends on RMI4_CORE && SPI
+ help
+ Say Y here if you want to support RMI4 devices connected to a SPI
+ bus.
+
+ If unsure, say N.
+
+config RMI4_2D_SENSOR
+ bool
+ depends on RMI4_CORE
+
+config RMI4_F11
+ bool "RMI4 Function 11 (2D pointing)"
+ select RMI4_2D_SENSOR
+ depends on RMI4_CORE
+ help
+ Say Y here if you want to add support for RMI4 function 11.
+
+ Function 11 provides 2D multifinger pointing for touchscreens and
+ touchpads. For sensors that support relative pointing, F11 also
+ provides mouse input.
+
+config RMI4_F12
+ bool "RMI4 Function 12 (2D pointing)"
+ select RMI4_2D_SENSOR
+ depends on RMI4_CORE
+ help
+ Say Y here if you want to add support for RMI4 function 12.
+
+ Function 12 provides 2D multifinger pointing for touchscreens and
+ touchpads. For sensors that support relative pointing, F12 also
+ provides mouse input.
+
+config RMI4_F30
+ bool "RMI4 Function 30 (GPIO LED)"
+ depends on RMI4_CORE
+ help
+ Say Y here if you want to add support for RMI4 function 30.
+
+ Function 30 provides GPIO and LED support for RMI4 devices. This
+ includes support for buttons on TouchPads and ClickPads.
diff --git a/drivers/input/rmi4/Makefile b/drivers/input/rmi4/Makefile
new file mode 100644
index 000000000000..95c00a783992
--- /dev/null
+++ b/drivers/input/rmi4/Makefile
@@ -0,0 +1,13 @@
+obj-$(CONFIG_RMI4_CORE) += rmi_core.o
+rmi_core-y := rmi_bus.o rmi_driver.o rmi_f01.o
+
+rmi_core-$(CONFIG_RMI4_2D_SENSOR) += rmi_2d_sensor.o
+
+# Function drivers
+rmi_core-$(CONFIG_RMI4_F11) += rmi_f11.o
+rmi_core-$(CONFIG_RMI4_F12) += rmi_f12.o
+rmi_core-$(CONFIG_RMI4_F30) += rmi_f30.o
+
+# Transports
+obj-$(CONFIG_RMI4_I2C) += rmi_i2c.o
+obj-$(CONFIG_RMI4_SPI) += rmi_spi.o
diff --git a/drivers/input/rmi4/rmi_2d_sensor.c b/drivers/input/rmi4/rmi_2d_sensor.c
new file mode 100644
index 000000000000..e97bd7fabccc
--- /dev/null
+++ b/drivers/input/rmi4/rmi_2d_sensor.c
@@ -0,0 +1,329 @@
+/*
+ * Copyright (c) 2011-2016 Synaptics Incorporated
+ * Copyright (c) 2011 Unixphere
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/of.h>
+#include <linux/input.h>
+#include <linux/input/mt.h>
+#include <linux/rmi.h>
+#include "rmi_driver.h"
+#include "rmi_2d_sensor.h"
+
+#define RMI_2D_REL_POS_MIN -128
+#define RMI_2D_REL_POS_MAX 127
+
+/* maximum ABS_MT_POSITION displacement (in mm) */
+#define DMAX 10
+
+void rmi_2d_sensor_abs_process(struct rmi_2d_sensor *sensor,
+ struct rmi_2d_sensor_abs_object *obj,
+ int slot)
+{
+ struct rmi_2d_axis_alignment *axis_align = &sensor->axis_align;
+
+ /* we keep the previous values if the finger is released */
+ if (obj->type == RMI_2D_OBJECT_NONE)
+ return;
+
+ if (axis_align->swap_axes)
+ swap(obj->x, obj->y);
+
+ if (axis_align->flip_x)
+ obj->x = sensor->max_x - obj->x;
+
+ if (axis_align->flip_y)
+ obj->y = sensor->max_y - obj->y;
+
+ /*
+ * Here checking if X offset or y offset are specified is
+ * redundant. We just add the offsets or clip the values.
+ *
+ * Note: offsets need to be applied before clipping occurs,
+ * or we could get funny values that are outside of
+ * clipping boundaries.
+ */
+ obj->x += axis_align->offset_x;
+ obj->y += axis_align->offset_y;
+
+ obj->x = max(axis_align->clip_x_low, obj->x);
+ obj->y = max(axis_align->clip_y_low, obj->y);
+
+ if (axis_align->clip_x_high)
+ obj->x = min(sensor->max_x, obj->x);
+
+ if (axis_align->clip_y_high)
+ obj->y = min(sensor->max_y, obj->y);
+
+ sensor->tracking_pos[slot].x = obj->x;
+ sensor->tracking_pos[slot].y = obj->y;
+}
+EXPORT_SYMBOL_GPL(rmi_2d_sensor_abs_process);
+
+void rmi_2d_sensor_abs_report(struct rmi_2d_sensor *sensor,
+ struct rmi_2d_sensor_abs_object *obj,
+ int slot)
+{
+ struct rmi_2d_axis_alignment *axis_align = &sensor->axis_align;
+ struct input_dev *input = sensor->input;
+ int wide, major, minor;
+
+ if (sensor->kernel_tracking)
+ input_mt_slot(input, sensor->tracking_slots[slot]);
+ else
+ input_mt_slot(input, slot);
+
+ input_mt_report_slot_state(input, obj->mt_tool,
+ obj->type != RMI_2D_OBJECT_NONE);
+
+ if (obj->type != RMI_2D_OBJECT_NONE) {
+ obj->x = sensor->tracking_pos[slot].x;
+ obj->y = sensor->tracking_pos[slot].y;
+
+ if (axis_align->swap_axes)
+ swap(obj->wx, obj->wy);
+
+ wide = (obj->wx > obj->wy);
+ major = max(obj->wx, obj->wy);
+ minor = min(obj->wx, obj->wy);
+
+ if (obj->type == RMI_2D_OBJECT_STYLUS) {
+ major = max(1, major);
+ minor = max(1, minor);
+ }
+
+ input_event(sensor->input, EV_ABS, ABS_MT_POSITION_X, obj->x);
+ input_event(sensor->input, EV_ABS, ABS_MT_POSITION_Y, obj->y);
+ input_event(sensor->input, EV_ABS, ABS_MT_ORIENTATION, wide);
+ input_event(sensor->input, EV_ABS, ABS_MT_PRESSURE, obj->z);
+ input_event(sensor->input, EV_ABS, ABS_MT_TOUCH_MAJOR, major);
+ input_event(sensor->input, EV_ABS, ABS_MT_TOUCH_MINOR, minor);
+
+ rmi_dbg(RMI_DEBUG_2D_SENSOR, &sensor->input->dev,
+ "%s: obj[%d]: type: 0x%02x X: %d Y: %d Z: %d WX: %d WY: %d\n",
+ __func__, slot, obj->type, obj->x, obj->y, obj->z,
+ obj->wx, obj->wy);
+ }
+}
+EXPORT_SYMBOL_GPL(rmi_2d_sensor_abs_report);
+
+void rmi_2d_sensor_rel_report(struct rmi_2d_sensor *sensor, int x, int y)
+{
+ struct rmi_2d_axis_alignment *axis_align = &sensor->axis_align;
+
+ x = min(RMI_2D_REL_POS_MAX, max(RMI_2D_REL_POS_MIN, (int)x));
+ y = min(RMI_2D_REL_POS_MAX, max(RMI_2D_REL_POS_MIN, (int)y));
+
+ if (axis_align->swap_axes)
+ swap(x, y);
+
+ if (axis_align->flip_x)
+ x = min(RMI_2D_REL_POS_MAX, -x);
+
+ if (axis_align->flip_y)
+ y = min(RMI_2D_REL_POS_MAX, -y);
+
+ if (x || y) {
+ input_report_rel(sensor->input, REL_X, x);
+ input_report_rel(sensor->input, REL_Y, y);
+ }
+}
+EXPORT_SYMBOL_GPL(rmi_2d_sensor_rel_report);
+
+static void rmi_2d_sensor_set_input_params(struct rmi_2d_sensor *sensor)
+{
+ struct input_dev *input = sensor->input;
+ int res_x;
+ int res_y;
+ int input_flags = 0;
+
+ if (sensor->report_abs) {
+ if (sensor->axis_align.swap_axes)
+ swap(sensor->max_x, sensor->max_y);
+
+ sensor->min_x = sensor->axis_align.clip_x_low;
+ if (sensor->axis_align.clip_x_high)
+ sensor->max_x = min(sensor->max_x,
+ sensor->axis_align.clip_x_high);
+
+ sensor->min_y = sensor->axis_align.clip_y_low;
+ if (sensor->axis_align.clip_y_high)
+ sensor->max_y = min(sensor->max_y,
+ sensor->axis_align.clip_y_high);
+
+ set_bit(EV_ABS, input->evbit);
+ input_set_abs_params(input, ABS_MT_POSITION_X, 0, sensor->max_x,
+ 0, 0);
+ input_set_abs_params(input, ABS_MT_POSITION_Y, 0, sensor->max_y,
+ 0, 0);
+
+ if (sensor->x_mm && sensor->y_mm) {
+ res_x = (sensor->max_x - sensor->min_x) / sensor->x_mm;
+ res_y = (sensor->max_y - sensor->min_y) / sensor->y_mm;
+
+ input_abs_set_res(input, ABS_X, res_x);
+ input_abs_set_res(input, ABS_Y, res_y);
+
+ input_abs_set_res(input, ABS_MT_POSITION_X, res_x);
+ input_abs_set_res(input, ABS_MT_POSITION_Y, res_y);
+
+ if (!sensor->dmax)
+ sensor->dmax = DMAX * res_x;
+ }
+
+ input_set_abs_params(input, ABS_MT_PRESSURE, 0, 0xff, 0, 0);
+ input_set_abs_params(input, ABS_MT_TOUCH_MAJOR, 0, 0x0f, 0, 0);
+ input_set_abs_params(input, ABS_MT_TOUCH_MINOR, 0, 0x0f, 0, 0);
+ input_set_abs_params(input, ABS_MT_ORIENTATION, 0, 1, 0, 0);
+
+ if (sensor->sensor_type == rmi_sensor_touchpad)
+ input_flags = INPUT_MT_POINTER;
+ else
+ input_flags = INPUT_MT_DIRECT;
+
+ if (sensor->kernel_tracking)
+ input_flags |= INPUT_MT_TRACK;
+
+ input_mt_init_slots(input, sensor->nbr_fingers, input_flags);
+ }
+
+ if (sensor->report_rel) {
+ set_bit(EV_REL, input->evbit);
+ set_bit(REL_X, input->relbit);
+ set_bit(REL_Y, input->relbit);
+ }
+
+ if (sensor->topbuttonpad)
+ set_bit(INPUT_PROP_TOPBUTTONPAD, input->propbit);
+}
+EXPORT_SYMBOL_GPL(rmi_2d_sensor_set_input_params);
+
+int rmi_2d_sensor_configure_input(struct rmi_function *fn,
+ struct rmi_2d_sensor *sensor)
+{
+ struct rmi_device *rmi_dev = fn->rmi_dev;
+ struct rmi_driver_data *drv_data = dev_get_drvdata(&rmi_dev->dev);
+
+ if (!drv_data->input)
+ return -ENODEV;
+
+ sensor->input = drv_data->input;
+ rmi_2d_sensor_set_input_params(sensor);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(rmi_2d_sensor_configure_input);
+
+#ifdef CONFIG_OF
+int rmi_2d_sensor_of_probe(struct device *dev,
+ struct rmi_2d_sensor_platform_data *pdata)
+{
+ int retval;
+ u32 val;
+
+ pdata->axis_align.swap_axes = of_property_read_bool(dev->of_node,
+ "touchscreen-swapped-x-y");
+
+ pdata->axis_align.flip_x = of_property_read_bool(dev->of_node,
+ "touchscreen-inverted-x");
+
+ pdata->axis_align.flip_y = of_property_read_bool(dev->of_node,
+ "touchscreen-inverted-y");
+
+ retval = rmi_of_property_read_u32(dev, &val, "syna,clip-x-low", 1);
+ if (retval)
+ return retval;
+
+ pdata->axis_align.clip_x_low = val;
+
+ retval = rmi_of_property_read_u32(dev, &val, "syna,clip-y-low", 1);
+ if (retval)
+ return retval;
+
+ pdata->axis_align.clip_y_low = val;
+
+ retval = rmi_of_property_read_u32(dev, &val, "syna,clip-x-high", 1);
+ if (retval)
+ return retval;
+
+ pdata->axis_align.clip_x_high = val;
+
+ retval = rmi_of_property_read_u32(dev, &val, "syna,clip-y-high", 1);
+ if (retval)
+ return retval;
+
+ pdata->axis_align.clip_y_high = val;
+
+ retval = rmi_of_property_read_u32(dev, &val, "syna,offset-x", 1);
+ if (retval)
+ return retval;
+
+ pdata->axis_align.offset_x = val;
+
+ retval = rmi_of_property_read_u32(dev, &val, "syna,offset-y", 1);
+ if (retval)
+ return retval;
+
+ pdata->axis_align.offset_y = val;
+
+ retval = rmi_of_property_read_u32(dev, &val, "syna,delta-x-threshold",
+ 1);
+ if (retval)
+ return retval;
+
+ pdata->axis_align.delta_x_threshold = val;
+
+ retval = rmi_of_property_read_u32(dev, &val, "syna,delta-y-threshold",
+ 1);
+ if (retval)
+ return retval;
+
+ pdata->axis_align.delta_y_threshold = val;
+
+ retval = rmi_of_property_read_u32(dev, (u32 *)&pdata->sensor_type,
+ "syna,sensor-type", 1);
+ if (retval)
+ return retval;
+
+ retval = rmi_of_property_read_u32(dev, &val, "touchscreen-x-mm", 1);
+ if (retval)
+ return retval;
+
+ pdata->x_mm = val;
+
+ retval = rmi_of_property_read_u32(dev, &val, "touchscreen-y-mm", 1);
+ if (retval)
+ return retval;
+
+ pdata->y_mm = val;
+
+ retval = rmi_of_property_read_u32(dev, &val,
+ "syna,disable-report-mask", 1);
+ if (retval)
+ return retval;
+
+ pdata->disable_report_mask = val;
+
+ retval = rmi_of_property_read_u32(dev, &val, "syna,rezero-wait-ms",
+ 1);
+ if (retval)
+ return retval;
+
+ pdata->rezero_wait = val;
+
+ return 0;
+}
+#else
+inline int rmi_2d_sensor_of_probe(struct device *dev,
+ struct rmi_2d_sensor_platform_data *pdata)
+{
+ return -ENODEV;
+}
+#endif
+EXPORT_SYMBOL_GPL(rmi_2d_sensor_of_probe);
diff --git a/drivers/input/rmi4/rmi_2d_sensor.h b/drivers/input/rmi4/rmi_2d_sensor.h
new file mode 100644
index 000000000000..77fcdfef003c
--- /dev/null
+++ b/drivers/input/rmi4/rmi_2d_sensor.h
@@ -0,0 +1,87 @@
+/*
+ * Copyright (c) 2011-2016 Synaptics Incorporated
+ * Copyright (c) 2011 Unixphere
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#ifndef _RMI_2D_SENSOR_H
+#define _RMI_2D_SENSOR_H
+
+enum rmi_2d_sensor_object_type {
+ RMI_2D_OBJECT_NONE,
+ RMI_2D_OBJECT_FINGER,
+ RMI_2D_OBJECT_STYLUS,
+ RMI_2D_OBJECT_PALM,
+ RMI_2D_OBJECT_UNCLASSIFIED,
+};
+
+struct rmi_2d_sensor_abs_object {
+ enum rmi_2d_sensor_object_type type;
+ int mt_tool;
+ u16 x;
+ u16 y;
+ u8 z;
+ u8 wx;
+ u8 wy;
+};
+
+/**
+ * @axis_align - controls parameters that are useful in system prototyping
+ * and bring up.
+ * @max_x - The maximum X coordinate that will be reported by this sensor.
+ * @max_y - The maximum Y coordinate that will be reported by this sensor.
+ * @nbr_fingers - How many fingers can this sensor report?
+ * @data_pkt - buffer for data reported by this sensor.
+ * @pkt_size - number of bytes in that buffer.
+ * @attn_size - Size of the HID attention report (only contains abs data).
+ * position when two fingers are on the device. When this is true, we
+ * assume we have one of those sensors and report events appropriately.
+ * @sensor_type - indicates whether we're touchscreen or touchpad.
+ * @input - input device for absolute pointing stream
+ * @input_phys - buffer for the absolute phys name for this sensor.
+ */
+struct rmi_2d_sensor {
+ struct rmi_2d_axis_alignment axis_align;
+ struct input_mt_pos *tracking_pos;
+ int *tracking_slots;
+ bool kernel_tracking;
+ struct rmi_2d_sensor_abs_object *objs;
+ int dmax;
+ u16 min_x;
+ u16 max_x;
+ u16 min_y;
+ u16 max_y;
+ u8 nbr_fingers;
+ u8 *data_pkt;
+ int pkt_size;
+ int attn_size;
+ bool topbuttonpad;
+ enum rmi_sensor_type sensor_type;
+ struct input_dev *input;
+ struct rmi_function *fn;
+ char input_phys[32];
+ u8 report_abs;
+ u8 report_rel;
+ u8 x_mm;
+ u8 y_mm;
+};
+
+int rmi_2d_sensor_of_probe(struct device *dev,
+ struct rmi_2d_sensor_platform_data *pdata);
+
+void rmi_2d_sensor_abs_process(struct rmi_2d_sensor *sensor,
+ struct rmi_2d_sensor_abs_object *obj,
+ int slot);
+
+void rmi_2d_sensor_abs_report(struct rmi_2d_sensor *sensor,
+ struct rmi_2d_sensor_abs_object *obj,
+ int slot);
+
+void rmi_2d_sensor_rel_report(struct rmi_2d_sensor *sensor, int x, int y);
+
+int rmi_2d_sensor_configure_input(struct rmi_function *fn,
+ struct rmi_2d_sensor *sensor);
+#endif /* _RMI_2D_SENSOR_H */
diff --git a/drivers/input/rmi4/rmi_bus.c b/drivers/input/rmi4/rmi_bus.c
new file mode 100644
index 000000000000..b368b0515c5a
--- /dev/null
+++ b/drivers/input/rmi4/rmi_bus.c
@@ -0,0 +1,419 @@
+/*
+ * Copyright (c) 2011-2016 Synaptics Incorporated
+ * Copyright (c) 2011 Unixphere
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/kconfig.h>
+#include <linux/list.h>
+#include <linux/pm.h>
+#include <linux/rmi.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/of.h>
+#include "rmi_bus.h"
+#include "rmi_driver.h"
+
+static int debug_flags;
+module_param(debug_flags, int, 0644);
+MODULE_PARM_DESC(debug_flags, "control debugging information");
+
+void rmi_dbg(int flags, struct device *dev, const char *fmt, ...)
+{
+ struct va_format vaf;
+ va_list args;
+
+ if (flags & debug_flags) {
+ va_start(args, fmt);
+
+ vaf.fmt = fmt;
+ vaf.va = &args;
+
+ dev_printk(KERN_DEBUG, dev, "%pV", &vaf);
+
+ va_end(args);
+ }
+}
+EXPORT_SYMBOL_GPL(rmi_dbg);
+
+/*
+ * RMI Physical devices
+ *
+ * Physical RMI device consists of several functions serving particular
+ * purpose. For example F11 is a 2D touch sensor while F01 is a generic
+ * function present in every RMI device.
+ */
+
+static void rmi_release_device(struct device *dev)
+{
+ struct rmi_device *rmi_dev = to_rmi_device(dev);
+
+ kfree(rmi_dev);
+}
+
+static struct device_type rmi_device_type = {
+ .name = "rmi4_sensor",
+ .release = rmi_release_device,
+};
+
+bool rmi_is_physical_device(struct device *dev)
+{
+ return dev->type == &rmi_device_type;
+}
+
+/**
+ * rmi_register_transport_device - register a transport device connection
+ * on the RMI bus. Transport drivers provide communication from the devices
+ * on a bus (such as SPI, I2C, and so on) to the RMI4 sensor.
+ *
+ * @xport: the transport device to register
+ */
+int rmi_register_transport_device(struct rmi_transport_dev *xport)
+{
+ static atomic_t transport_device_count = ATOMIC_INIT(0);
+ struct rmi_device *rmi_dev;
+ int error;
+
+ rmi_dev = kzalloc(sizeof(struct rmi_device), GFP_KERNEL);
+ if (!rmi_dev)
+ return -ENOMEM;
+
+ device_initialize(&rmi_dev->dev);
+
+ rmi_dev->xport = xport;
+ rmi_dev->number = atomic_inc_return(&transport_device_count) - 1;
+
+ dev_set_name(&rmi_dev->dev, "rmi4-%02d", rmi_dev->number);
+
+ rmi_dev->dev.bus = &rmi_bus_type;
+ rmi_dev->dev.type = &rmi_device_type;
+
+ xport->rmi_dev = rmi_dev;
+
+ error = device_add(&rmi_dev->dev);
+ if (error)
+ goto err_put_device;
+
+ rmi_dbg(RMI_DEBUG_CORE, xport->dev,
+ "%s: Registered %s as %s.\n", __func__,
+ dev_name(rmi_dev->xport->dev), dev_name(&rmi_dev->dev));
+
+ return 0;
+
+err_put_device:
+ put_device(&rmi_dev->dev);
+ return error;
+}
+EXPORT_SYMBOL_GPL(rmi_register_transport_device);
+
+/**
+ * rmi_unregister_transport_device - unregister a transport device connection
+ * @xport: the transport driver to unregister
+ *
+ */
+void rmi_unregister_transport_device(struct rmi_transport_dev *xport)
+{
+ struct rmi_device *rmi_dev = xport->rmi_dev;
+
+ device_del(&rmi_dev->dev);
+ put_device(&rmi_dev->dev);
+}
+EXPORT_SYMBOL(rmi_unregister_transport_device);
+
+
+/* Function specific stuff */
+
+static void rmi_release_function(struct device *dev)
+{
+ struct rmi_function *fn = to_rmi_function(dev);
+
+ kfree(fn);
+}
+
+static struct device_type rmi_function_type = {
+ .name = "rmi4_function",
+ .release = rmi_release_function,
+};
+
+bool rmi_is_function_device(struct device *dev)
+{
+ return dev->type == &rmi_function_type;
+}
+
+static int rmi_function_match(struct device *dev, struct device_driver *drv)
+{
+ struct rmi_function_handler *handler = to_rmi_function_handler(drv);
+ struct rmi_function *fn = to_rmi_function(dev);
+
+ return fn->fd.function_number == handler->func;
+}
+
+#ifdef CONFIG_OF
+static void rmi_function_of_probe(struct rmi_function *fn)
+{
+ char of_name[9];
+
+ snprintf(of_name, sizeof(of_name), "rmi4-f%02x",
+ fn->fd.function_number);
+ fn->dev.of_node = of_find_node_by_name(
+ fn->rmi_dev->xport->dev->of_node, of_name);
+}
+#else
+static inline void rmi_function_of_probe(struct rmi_function *fn)
+{}
+#endif
+
+static int rmi_function_probe(struct device *dev)
+{
+ struct rmi_function *fn = to_rmi_function(dev);
+ struct rmi_function_handler *handler =
+ to_rmi_function_handler(dev->driver);
+ int error;
+
+ rmi_function_of_probe(fn);
+
+ if (handler->probe) {
+ error = handler->probe(fn);
+ return error;
+ }
+
+ return 0;
+}
+
+static int rmi_function_remove(struct device *dev)
+{
+ struct rmi_function *fn = to_rmi_function(dev);
+ struct rmi_function_handler *handler =
+ to_rmi_function_handler(dev->driver);
+
+ if (handler->remove)
+ handler->remove(fn);
+
+ return 0;
+}
+
+int rmi_register_function(struct rmi_function *fn)
+{
+ struct rmi_device *rmi_dev = fn->rmi_dev;
+ int error;
+
+ device_initialize(&fn->dev);
+
+ dev_set_name(&fn->dev, "%s.fn%02x",
+ dev_name(&rmi_dev->dev), fn->fd.function_number);
+
+ fn->dev.parent = &rmi_dev->dev;
+ fn->dev.type = &rmi_function_type;
+ fn->dev.bus = &rmi_bus_type;
+
+ error = device_add(&fn->dev);
+ if (error) {
+ dev_err(&rmi_dev->dev,
+ "Failed device_register function device %s\n",
+ dev_name(&fn->dev));
+ goto err_put_device;
+ }
+
+ rmi_dbg(RMI_DEBUG_CORE, &rmi_dev->dev, "Registered F%02X.\n",
+ fn->fd.function_number);
+
+ return 0;
+
+err_put_device:
+ put_device(&fn->dev);
+ return error;
+}
+
+void rmi_unregister_function(struct rmi_function *fn)
+{
+ device_del(&fn->dev);
+
+ if (fn->dev.of_node)
+ of_node_put(fn->dev.of_node);
+
+ put_device(&fn->dev);
+}
+
+/**
+ * rmi_register_function_handler - register a handler for an RMI function
+ * @handler: RMI handler that should be registered.
+ * @module: pointer to module that implements the handler
+ * @mod_name: name of the module implementing the handler
+ *
+ * This function performs additional setup of RMI function handler and
+ * registers it with the RMI core so that it can be bound to
+ * RMI function devices.
+ */
+int __rmi_register_function_handler(struct rmi_function_handler *handler,
+ struct module *owner,
+ const char *mod_name)
+{
+ struct device_driver *driver = &handler->driver;
+ int error;
+
+ driver->bus = &rmi_bus_type;
+ driver->owner = owner;
+ driver->mod_name = mod_name;
+ driver->probe = rmi_function_probe;
+ driver->remove = rmi_function_remove;
+
+ error = driver_register(&handler->driver);
+ if (error) {
+ pr_err("driver_register() failed for %s, error: %d\n",
+ handler->driver.name, error);
+ return error;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(__rmi_register_function_handler);
+
+/**
+ * rmi_unregister_function_handler - unregister given RMI function handler
+ * @handler: RMI handler that should be unregistered.
+ *
+ * This function unregisters given function handler from RMI core which
+ * causes it to be unbound from the function devices.
+ */
+void rmi_unregister_function_handler(struct rmi_function_handler *handler)
+{
+ driver_unregister(&handler->driver);
+}
+EXPORT_SYMBOL_GPL(rmi_unregister_function_handler);
+
+/* Bus specific stuff */
+
+static int rmi_bus_match(struct device *dev, struct device_driver *drv)
+{
+ bool physical = rmi_is_physical_device(dev);
+
+ /* First see if types are not compatible */
+ if (physical != rmi_is_physical_driver(drv))
+ return 0;
+
+ return physical || rmi_function_match(dev, drv);
+}
+
+struct bus_type rmi_bus_type = {
+ .match = rmi_bus_match,
+ .name = "rmi4",
+};
+
+static struct rmi_function_handler *fn_handlers[] = {
+ &rmi_f01_handler,
+#ifdef CONFIG_RMI4_F11
+ &rmi_f11_handler,
+#endif
+#ifdef CONFIG_RMI4_F12
+ &rmi_f12_handler,
+#endif
+#ifdef CONFIG_RMI4_F30
+ &rmi_f30_handler,
+#endif
+};
+
+static void __rmi_unregister_function_handlers(int start_idx)
+{
+ int i;
+
+ for (i = start_idx; i >= 0; i--)
+ rmi_unregister_function_handler(fn_handlers[i]);
+}
+
+static void rmi_unregister_function_handlers(void)
+{
+ __rmi_unregister_function_handlers(ARRAY_SIZE(fn_handlers) - 1);
+}
+
+static int rmi_register_function_handlers(void)
+{
+ int ret;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(fn_handlers); i++) {
+ ret = rmi_register_function_handler(fn_handlers[i]);
+ if (ret) {
+ pr_err("%s: error registering the RMI F%02x handler: %d\n",
+ __func__, fn_handlers[i]->func, ret);
+ goto err_unregister_function_handlers;
+ }
+ }
+
+ return 0;
+
+err_unregister_function_handlers:
+ __rmi_unregister_function_handlers(i - 1);
+ return ret;
+}
+
+int rmi_of_property_read_u32(struct device *dev, u32 *result,
+ const char *prop, bool optional)
+{
+ int retval;
+ u32 val = 0;
+
+ retval = of_property_read_u32(dev->of_node, prop, &val);
+ if (retval && (!optional && retval == -EINVAL)) {
+ dev_err(dev, "Failed to get %s value: %d\n",
+ prop, retval);
+ return retval;
+ }
+ *result = val;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(rmi_of_property_read_u32);
+
+static int __init rmi_bus_init(void)
+{
+ int error;
+
+ error = bus_register(&rmi_bus_type);
+ if (error) {
+ pr_err("%s: error registering the RMI bus: %d\n",
+ __func__, error);
+ return error;
+ }
+
+ error = rmi_register_function_handlers();
+ if (error)
+ goto err_unregister_bus;
+
+ error = rmi_register_physical_driver();
+ if (error) {
+ pr_err("%s: error registering the RMI physical driver: %d\n",
+ __func__, error);
+ goto err_unregister_bus;
+ }
+
+ return 0;
+
+err_unregister_bus:
+ bus_unregister(&rmi_bus_type);
+ return error;
+}
+module_init(rmi_bus_init);
+
+static void __exit rmi_bus_exit(void)
+{
+ /*
+ * We should only ever get here if all drivers are unloaded, so
+ * all we have to do at this point is unregister ourselves.
+ */
+
+ rmi_unregister_physical_driver();
+ rmi_unregister_function_handlers();
+ bus_unregister(&rmi_bus_type);
+}
+module_exit(rmi_bus_exit);
+
+MODULE_AUTHOR("Christopher Heiny <cheiny@synaptics.com");
+MODULE_AUTHOR("Andrew Duggan <aduggan@synaptics.com");
+MODULE_DESCRIPTION("RMI bus");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(RMI_DRIVER_VERSION);
diff --git a/drivers/input/rmi4/rmi_bus.h b/drivers/input/rmi4/rmi_bus.h
new file mode 100644
index 000000000000..899579830536
--- /dev/null
+++ b/drivers/input/rmi4/rmi_bus.h
@@ -0,0 +1,182 @@
+/*
+ * Copyright (c) 2011-2016 Synaptics Incorporated
+ * Copyright (c) 2011 Unixphere
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#ifndef _RMI_BUS_H
+#define _RMI_BUS_H
+
+#include <linux/rmi.h>
+
+struct rmi_device;
+
+/**
+ * struct rmi_function - represents the implementation of an RMI4
+ * function for a particular device (basically, a driver for that RMI4 function)
+ *
+ * @fd: The function descriptor of the RMI function
+ * @rmi_dev: Pointer to the RMI device associated with this function container
+ * @dev: The device associated with this particular function.
+ *
+ * @num_of_irqs: The number of irqs needed by this function
+ * @irq_pos: The position in the irq bitfield this function holds
+ * @irq_mask: For convenience, can be used to mask IRQ bits off during ATTN
+ * interrupt handling.
+ *
+ * @node: entry in device's list of functions
+ */
+struct rmi_function {
+ struct rmi_function_descriptor fd;
+ struct rmi_device *rmi_dev;
+ struct device dev;
+ struct list_head node;
+
+ unsigned int num_of_irqs;
+ unsigned int irq_pos;
+ unsigned long irq_mask[];
+};
+
+#define to_rmi_function(d) container_of(d, struct rmi_function, dev)
+
+bool rmi_is_function_device(struct device *dev);
+
+int __must_check rmi_register_function(struct rmi_function *);
+void rmi_unregister_function(struct rmi_function *);
+
+/**
+ * struct rmi_function_handler - driver routines for a particular RMI function.
+ *
+ * @func: The RMI function number
+ * @reset: Called when a reset of the touch sensor is detected. The routine
+ * should perform any out-of-the-ordinary reset handling that might be
+ * necessary. Restoring of touch sensor configuration registers should be
+ * handled in the config() callback, below.
+ * @config: Called when the function container is first initialized, and
+ * after a reset is detected. This routine should write any necessary
+ * configuration settings to the device.
+ * @attention: Called when the IRQ(s) for the function are set by the touch
+ * sensor.
+ * @suspend: Should perform any required operations to suspend the particular
+ * function.
+ * @resume: Should perform any required operations to resume the particular
+ * function.
+ *
+ * All callbacks are expected to return 0 on success, error code on failure.
+ */
+struct rmi_function_handler {
+ struct device_driver driver;
+
+ u8 func;
+
+ int (*probe)(struct rmi_function *fn);
+ void (*remove)(struct rmi_function *fn);
+ int (*config)(struct rmi_function *fn);
+ int (*reset)(struct rmi_function *fn);
+ int (*attention)(struct rmi_function *fn, unsigned long *irq_bits);
+ int (*suspend)(struct rmi_function *fn);
+ int (*resume)(struct rmi_function *fn);
+};
+
+#define to_rmi_function_handler(d) \
+ container_of(d, struct rmi_function_handler, driver)
+
+int __must_check __rmi_register_function_handler(struct rmi_function_handler *,
+ struct module *, const char *);
+#define rmi_register_function_handler(handler) \
+ __rmi_register_function_handler(handler, THIS_MODULE, KBUILD_MODNAME)
+
+void rmi_unregister_function_handler(struct rmi_function_handler *);
+
+#define to_rmi_driver(d) \
+ container_of(d, struct rmi_driver, driver)
+
+#define to_rmi_device(d) container_of(d, struct rmi_device, dev)
+
+static inline struct rmi_device_platform_data *
+rmi_get_platform_data(struct rmi_device *d)
+{
+ return &d->xport->pdata;
+}
+
+bool rmi_is_physical_device(struct device *dev);
+
+/**
+ * rmi_read - read a single byte
+ * @d: Pointer to an RMI device
+ * @addr: The address to read from
+ * @buf: The read buffer
+ *
+ * Reads a single byte of data using the underlying transport protocol
+ * into memory pointed by @buf. It returns 0 on success or a negative
+ * error code.
+ */
+static inline int rmi_read(struct rmi_device *d, u16 addr, u8 *buf)
+{
+ return d->xport->ops->read_block(d->xport, addr, buf, 1);
+}
+
+/**
+ * rmi_read_block - read a block of bytes
+ * @d: Pointer to an RMI device
+ * @addr: The start address to read from
+ * @buf: The read buffer
+ * @len: Length of the read buffer
+ *
+ * Reads a block of byte data using the underlying transport protocol
+ * into memory pointed by @buf. It returns 0 on success or a negative
+ * error code.
+ */
+static inline int rmi_read_block(struct rmi_device *d, u16 addr,
+ void *buf, size_t len)
+{
+ return d->xport->ops->read_block(d->xport, addr, buf, len);
+}
+
+/**
+ * rmi_write - write a single byte
+ * @d: Pointer to an RMI device
+ * @addr: The address to write to
+ * @data: The data to write
+ *
+ * Writes a single byte using the underlying transport protocol. It
+ * returns zero on success or a negative error code.
+ */
+static inline int rmi_write(struct rmi_device *d, u16 addr, u8 data)
+{
+ return d->xport->ops->write_block(d->xport, addr, &data, 1);
+}
+
+/**
+ * rmi_write_block - write a block of bytes
+ * @d: Pointer to an RMI device
+ * @addr: The start address to write to
+ * @buf: The write buffer
+ * @len: Length of the write buffer
+ *
+ * Writes a block of byte data from buf using the underlaying transport
+ * protocol. It returns the amount of bytes written or a negative error code.
+ */
+static inline int rmi_write_block(struct rmi_device *d, u16 addr,
+ const void *buf, size_t len)
+{
+ return d->xport->ops->write_block(d->xport, addr, buf, len);
+}
+
+int rmi_for_each_dev(void *data, int (*func)(struct device *dev, void *data));
+
+extern struct bus_type rmi_bus_type;
+
+int rmi_of_property_read_u32(struct device *dev, u32 *result,
+ const char *prop, bool optional);
+
+#define RMI_DEBUG_CORE BIT(0)
+#define RMI_DEBUG_XPORT BIT(1)
+#define RMI_DEBUG_FN BIT(2)
+#define RMI_DEBUG_2D_SENSOR BIT(3)
+
+void rmi_dbg(int flags, struct device *dev, const char *fmt, ...);
+#endif
diff --git a/drivers/input/rmi4/rmi_driver.c b/drivers/input/rmi4/rmi_driver.c
new file mode 100644
index 000000000000..faa295ec4f31
--- /dev/null
+++ b/drivers/input/rmi4/rmi_driver.c
@@ -0,0 +1,1054 @@
+/*
+ * Copyright (c) 2011-2016 Synaptics Incorporated
+ * Copyright (c) 2011 Unixphere
+ *
+ * This driver provides the core support for a single RMI4-based device.
+ *
+ * The RMI4 specification can be found here (URL split for line length):
+ *
+ * http://www.synaptics.com/sites/default/files/
+ * 511-000136-01-Rev-E-RMI4-Interfacing-Guide.pdf
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/bitmap.h>
+#include <linux/delay.h>
+#include <linux/fs.h>
+#include <linux/kconfig.h>
+#include <linux/pm.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <uapi/linux/input.h>
+#include <linux/rmi.h>
+#include "rmi_bus.h"
+#include "rmi_driver.h"
+
+#define HAS_NONSTANDARD_PDT_MASK 0x40
+#define RMI4_MAX_PAGE 0xff
+#define RMI4_PAGE_SIZE 0x100
+#define RMI4_PAGE_MASK 0xFF00
+
+#define RMI_DEVICE_RESET_CMD 0x01
+#define DEFAULT_RESET_DELAY_MS 100
+
+static void rmi_free_function_list(struct rmi_device *rmi_dev)
+{
+ struct rmi_function *fn, *tmp;
+ struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev);
+
+ data->f01_container = NULL;
+
+ /* Doing it in the reverse order so F01 will be removed last */
+ list_for_each_entry_safe_reverse(fn, tmp,
+ &data->function_list, node) {
+ list_del(&fn->node);
+ rmi_unregister_function(fn);
+ }
+}
+
+static int reset_one_function(struct rmi_function *fn)
+{
+ struct rmi_function_handler *fh;
+ int retval = 0;
+
+ if (!fn || !fn->dev.driver)
+ return 0;
+
+ fh = to_rmi_function_handler(fn->dev.driver);
+ if (fh->reset) {
+ retval = fh->reset(fn);
+ if (retval < 0)
+ dev_err(&fn->dev, "Reset failed with code %d.\n",
+ retval);
+ }
+
+ return retval;
+}
+
+static int configure_one_function(struct rmi_function *fn)
+{
+ struct rmi_function_handler *fh;
+ int retval = 0;
+
+ if (!fn || !fn->dev.driver)
+ return 0;
+
+ fh = to_rmi_function_handler(fn->dev.driver);
+ if (fh->config) {
+ retval = fh->config(fn);
+ if (retval < 0)
+ dev_err(&fn->dev, "Config failed with code %d.\n",
+ retval);
+ }
+
+ return retval;
+}
+
+static int rmi_driver_process_reset_requests(struct rmi_device *rmi_dev)
+{
+ struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev);
+ struct rmi_function *entry;
+ int retval;
+
+ list_for_each_entry(entry, &data->function_list, node) {
+ retval = reset_one_function(entry);
+ if (retval < 0)
+ return retval;
+ }
+
+ return 0;
+}
+
+static int rmi_driver_process_config_requests(struct rmi_device *rmi_dev)
+{
+ struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev);
+ struct rmi_function *entry;
+ int retval;
+
+ list_for_each_entry(entry, &data->function_list, node) {
+ retval = configure_one_function(entry);
+ if (retval < 0)
+ return retval;
+ }
+
+ return 0;
+}
+
+static void process_one_interrupt(struct rmi_driver_data *data,
+ struct rmi_function *fn)
+{
+ struct rmi_function_handler *fh;
+
+ if (!fn || !fn->dev.driver)
+ return;
+
+ fh = to_rmi_function_handler(fn->dev.driver);
+ if (fh->attention) {
+ bitmap_and(data->fn_irq_bits, data->irq_status, fn->irq_mask,
+ data->irq_count);
+ if (!bitmap_empty(data->fn_irq_bits, data->irq_count))
+ fh->attention(fn, data->fn_irq_bits);
+ }
+}
+
+int rmi_process_interrupt_requests(struct rmi_device *rmi_dev)
+{
+ struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev);
+ struct device *dev = &rmi_dev->dev;
+ struct rmi_function *entry;
+ int error;
+
+ if (!data)
+ return 0;
+
+ if (!rmi_dev->xport->attn_data) {
+ error = rmi_read_block(rmi_dev,
+ data->f01_container->fd.data_base_addr + 1,
+ data->irq_status, data->num_of_irq_regs);
+ if (error < 0) {
+ dev_err(dev, "Failed to read irqs, code=%d\n", error);
+ return error;
+ }
+ }
+
+ mutex_lock(&data->irq_mutex);
+ bitmap_and(data->irq_status, data->irq_status, data->current_irq_mask,
+ data->irq_count);
+ /*
+ * At this point, irq_status has all bits that are set in the
+ * interrupt status register and are enabled.
+ */
+ mutex_unlock(&data->irq_mutex);
+
+ /*
+ * It would be nice to be able to use irq_chip to handle these
+ * nested IRQs. Unfortunately, most of the current customers for
+ * this driver are using older kernels (3.0.x) that don't support
+ * the features required for that. Once they've shifted to more
+ * recent kernels (say, 3.3 and higher), this should be switched to
+ * use irq_chip.
+ */
+ list_for_each_entry(entry, &data->function_list, node)
+ process_one_interrupt(data, entry);
+
+ if (data->input)
+ input_sync(data->input);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(rmi_process_interrupt_requests);
+
+static int suspend_one_function(struct rmi_function *fn)
+{
+ struct rmi_function_handler *fh;
+ int retval = 0;
+
+ if (!fn || !fn->dev.driver)
+ return 0;
+
+ fh = to_rmi_function_handler(fn->dev.driver);
+ if (fh->suspend) {
+ retval = fh->suspend(fn);
+ if (retval < 0)
+ dev_err(&fn->dev, "Suspend failed with code %d.\n",
+ retval);
+ }
+
+ return retval;
+}
+
+static int rmi_suspend_functions(struct rmi_device *rmi_dev)
+{
+ struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev);
+ struct rmi_function *entry;
+ int retval;
+
+ list_for_each_entry(entry, &data->function_list, node) {
+ retval = suspend_one_function(entry);
+ if (retval < 0)
+ return retval;
+ }
+
+ return 0;
+}
+
+static int resume_one_function(struct rmi_function *fn)
+{
+ struct rmi_function_handler *fh;
+ int retval = 0;
+
+ if (!fn || !fn->dev.driver)
+ return 0;
+
+ fh = to_rmi_function_handler(fn->dev.driver);
+ if (fh->resume) {
+ retval = fh->resume(fn);
+ if (retval < 0)
+ dev_err(&fn->dev, "Resume failed with code %d.\n",
+ retval);
+ }
+
+ return retval;
+}
+
+static int rmi_resume_functions(struct rmi_device *rmi_dev)
+{
+ struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev);
+ struct rmi_function *entry;
+ int retval;
+
+ list_for_each_entry(entry, &data->function_list, node) {
+ retval = resume_one_function(entry);
+ if (retval < 0)
+ return retval;
+ }
+
+ return 0;
+}
+
+static int enable_sensor(struct rmi_device *rmi_dev)
+{
+ int retval = 0;
+
+ retval = rmi_driver_process_config_requests(rmi_dev);
+ if (retval < 0)
+ return retval;
+
+ return rmi_process_interrupt_requests(rmi_dev);
+}
+
+/**
+ * rmi_driver_set_input_params - set input device id and other data.
+ *
+ * @rmi_dev: Pointer to an RMI device
+ * @input: Pointer to input device
+ *
+ */
+static int rmi_driver_set_input_params(struct rmi_device *rmi_dev,
+ struct input_dev *input)
+{
+ input->name = SYNAPTICS_INPUT_DEVICE_NAME;
+ input->id.vendor = SYNAPTICS_VENDOR_ID;
+ input->id.bustype = BUS_RMI;
+ return 0;
+}
+
+static void rmi_driver_set_input_name(struct rmi_device *rmi_dev,
+ struct input_dev *input)
+{
+ struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev);
+ char *device_name = rmi_f01_get_product_ID(data->f01_container);
+ char *name;
+
+ name = devm_kasprintf(&rmi_dev->dev, GFP_KERNEL,
+ "Synaptics %s", device_name);
+ if (!name)
+ return;
+
+ input->name = name;
+}
+
+static int rmi_driver_set_irq_bits(struct rmi_device *rmi_dev,
+ unsigned long *mask)
+{
+ int error = 0;
+ struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev);
+ struct device *dev = &rmi_dev->dev;
+
+ mutex_lock(&data->irq_mutex);
+ bitmap_or(data->new_irq_mask,
+ data->current_irq_mask, mask, data->irq_count);
+
+ error = rmi_write_block(rmi_dev,
+ data->f01_container->fd.control_base_addr + 1,
+ data->new_irq_mask, data->num_of_irq_regs);
+ if (error < 0) {
+ dev_err(dev, "%s: Failed to change enabled interrupts!",
+ __func__);
+ goto error_unlock;
+ }
+ bitmap_copy(data->current_irq_mask, data->new_irq_mask,
+ data->num_of_irq_regs);
+
+error_unlock:
+ mutex_unlock(&data->irq_mutex);
+ return error;
+}
+
+static int rmi_driver_clear_irq_bits(struct rmi_device *rmi_dev,
+ unsigned long *mask)
+{
+ int error = 0;
+ struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev);
+ struct device *dev = &rmi_dev->dev;
+
+ mutex_lock(&data->irq_mutex);
+ bitmap_andnot(data->new_irq_mask,
+ data->current_irq_mask, mask, data->irq_count);
+
+ error = rmi_write_block(rmi_dev,
+ data->f01_container->fd.control_base_addr + 1,
+ data->new_irq_mask, data->num_of_irq_regs);
+ if (error < 0) {
+ dev_err(dev, "%s: Failed to change enabled interrupts!",
+ __func__);
+ goto error_unlock;
+ }
+ bitmap_copy(data->current_irq_mask, data->new_irq_mask,
+ data->num_of_irq_regs);
+
+error_unlock:
+ mutex_unlock(&data->irq_mutex);
+ return error;
+}
+
+static int rmi_driver_reset_handler(struct rmi_device *rmi_dev)
+{
+ struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev);
+ int error;
+
+ /*
+ * Can get called before the driver is fully ready to deal with
+ * this situation.
+ */
+ if (!data || !data->f01_container) {
+ dev_warn(&rmi_dev->dev,
+ "Not ready to handle reset yet!\n");
+ return 0;
+ }
+
+ error = rmi_read_block(rmi_dev,
+ data->f01_container->fd.control_base_addr + 1,
+ data->current_irq_mask, data->num_of_irq_regs);
+ if (error < 0) {
+ dev_err(&rmi_dev->dev, "%s: Failed to read current IRQ mask.\n",
+ __func__);
+ return error;
+ }
+
+ error = rmi_driver_process_reset_requests(rmi_dev);
+ if (error < 0)
+ return error;
+
+ error = rmi_driver_process_config_requests(rmi_dev);
+ if (error < 0)
+ return error;
+
+ return 0;
+}
+
+int rmi_read_pdt_entry(struct rmi_device *rmi_dev, struct pdt_entry *entry,
+ u16 pdt_address)
+{
+ u8 buf[RMI_PDT_ENTRY_SIZE];
+ int error;
+
+ error = rmi_read_block(rmi_dev, pdt_address, buf, RMI_PDT_ENTRY_SIZE);
+ if (error) {
+ dev_err(&rmi_dev->dev, "Read PDT entry at %#06x failed, code: %d.\n",
+ pdt_address, error);
+ return error;
+ }
+
+ entry->page_start = pdt_address & RMI4_PAGE_MASK;
+ entry->query_base_addr = buf[0];
+ entry->command_base_addr = buf[1];
+ entry->control_base_addr = buf[2];
+ entry->data_base_addr = buf[3];
+ entry->interrupt_source_count = buf[4] & RMI_PDT_INT_SOURCE_COUNT_MASK;
+ entry->function_version = (buf[4] & RMI_PDT_FUNCTION_VERSION_MASK) >> 5;
+ entry->function_number = buf[5];
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(rmi_read_pdt_entry);
+
+static void rmi_driver_copy_pdt_to_fd(const struct pdt_entry *pdt,
+ struct rmi_function_descriptor *fd)
+{
+ fd->query_base_addr = pdt->query_base_addr + pdt->page_start;
+ fd->command_base_addr = pdt->command_base_addr + pdt->page_start;
+ fd->control_base_addr = pdt->control_base_addr + pdt->page_start;
+ fd->data_base_addr = pdt->data_base_addr + pdt->page_start;
+ fd->function_number = pdt->function_number;
+ fd->interrupt_source_count = pdt->interrupt_source_count;
+ fd->function_version = pdt->function_version;
+}
+
+#define RMI_SCAN_CONTINUE 0
+#define RMI_SCAN_DONE 1
+
+static int rmi_scan_pdt_page(struct rmi_device *rmi_dev,
+ int page,
+ void *ctx,
+ int (*callback)(struct rmi_device *rmi_dev,
+ void *ctx,
+ const struct pdt_entry *entry))
+{
+ struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev);
+ struct pdt_entry pdt_entry;
+ u16 page_start = RMI4_PAGE_SIZE * page;
+ u16 pdt_start = page_start + PDT_START_SCAN_LOCATION;
+ u16 pdt_end = page_start + PDT_END_SCAN_LOCATION;
+ u16 addr;
+ int error;
+ int retval;
+
+ for (addr = pdt_start; addr >= pdt_end; addr -= RMI_PDT_ENTRY_SIZE) {
+ error = rmi_read_pdt_entry(rmi_dev, &pdt_entry, addr);
+ if (error)
+ return error;
+
+ if (RMI4_END_OF_PDT(pdt_entry.function_number))
+ break;
+
+ retval = callback(rmi_dev, ctx, &pdt_entry);
+ if (retval != RMI_SCAN_CONTINUE)
+ return retval;
+ }
+
+ return (data->f01_bootloader_mode || addr == pdt_start) ?
+ RMI_SCAN_DONE : RMI_SCAN_CONTINUE;
+}
+
+static int rmi_scan_pdt(struct rmi_device *rmi_dev, void *ctx,
+ int (*callback)(struct rmi_device *rmi_dev,
+ void *ctx,
+ const struct pdt_entry *entry))
+{
+ int page;
+ int retval = RMI_SCAN_DONE;
+
+ for (page = 0; page <= RMI4_MAX_PAGE; page++) {
+ retval = rmi_scan_pdt_page(rmi_dev, page, ctx, callback);
+ if (retval != RMI_SCAN_CONTINUE)
+ break;
+ }
+
+ return retval < 0 ? retval : 0;
+}
+
+int rmi_read_register_desc(struct rmi_device *d, u16 addr,
+ struct rmi_register_descriptor *rdesc)
+{
+ int ret;
+ u8 size_presence_reg;
+ u8 buf[35];
+ int presense_offset = 1;
+ u8 *struct_buf;
+ int reg;
+ int offset = 0;
+ int map_offset = 0;
+ int i;
+ int b;
+
+ /*
+ * The first register of the register descriptor is the size of
+ * the register descriptor's presense register.
+ */
+ ret = rmi_read(d, addr, &size_presence_reg);
+ if (ret)
+ return ret;
+ ++addr;
+
+ if (size_presence_reg < 0 || size_presence_reg > 35)
+ return -EIO;
+
+ memset(buf, 0, sizeof(buf));
+
+ /*
+ * The presence register contains the size of the register structure
+ * and a bitmap which identified which packet registers are present
+ * for this particular register type (ie query, control, or data).
+ */
+ ret = rmi_read_block(d, addr, buf, size_presence_reg);
+ if (ret)
+ return ret;
+ ++addr;
+
+ if (buf[0] == 0) {
+ presense_offset = 3;
+ rdesc->struct_size = buf[1] | (buf[2] << 8);
+ } else {
+ rdesc->struct_size = buf[0];
+ }
+
+ for (i = presense_offset; i < size_presence_reg; i++) {
+ for (b = 0; b < 8; b++) {
+ if (buf[i] & (0x1 << b))
+ bitmap_set(rdesc->presense_map, map_offset, 1);
+ ++map_offset;
+ }
+ }
+
+ rdesc->num_registers = bitmap_weight(rdesc->presense_map,
+ RMI_REG_DESC_PRESENSE_BITS);
+
+ rdesc->registers = devm_kzalloc(&d->dev, rdesc->num_registers *
+ sizeof(struct rmi_register_desc_item),
+ GFP_KERNEL);
+ if (!rdesc->registers)
+ return -ENOMEM;
+
+ /*
+ * Allocate a temporary buffer to hold the register structure.
+ * I'm not using devm_kzalloc here since it will not be retained
+ * after exiting this function
+ */
+ struct_buf = kzalloc(rdesc->struct_size, GFP_KERNEL);
+ if (!struct_buf)
+ return -ENOMEM;
+
+ /*
+ * The register structure contains information about every packet
+ * register of this type. This includes the size of the packet
+ * register and a bitmap of all subpackets contained in the packet
+ * register.
+ */
+ ret = rmi_read_block(d, addr, struct_buf, rdesc->struct_size);
+ if (ret)
+ goto free_struct_buff;
+
+ reg = find_first_bit(rdesc->presense_map, RMI_REG_DESC_PRESENSE_BITS);
+ map_offset = 0;
+ for (i = 0; i < rdesc->num_registers; i++) {
+ struct rmi_register_desc_item *item = &rdesc->registers[i];
+ int reg_size = struct_buf[offset];
+
+ ++offset;
+ if (reg_size == 0) {
+ reg_size = struct_buf[offset] |
+ (struct_buf[offset + 1] << 8);
+ offset += 2;
+ }
+
+ if (reg_size == 0) {
+ reg_size = struct_buf[offset] |
+ (struct_buf[offset + 1] << 8) |
+ (struct_buf[offset + 2] << 16) |
+ (struct_buf[offset + 3] << 24);
+ offset += 4;
+ }
+
+ item->reg = reg;
+ item->reg_size = reg_size;
+
+ do {
+ for (b = 0; b < 7; b++) {
+ if (struct_buf[offset] & (0x1 << b))
+ bitmap_set(item->subpacket_map,
+ map_offset, 1);
+ ++map_offset;
+ }
+ } while (struct_buf[offset++] & 0x80);
+
+ item->num_subpackets = bitmap_weight(item->subpacket_map,
+ RMI_REG_DESC_SUBPACKET_BITS);
+
+ rmi_dbg(RMI_DEBUG_CORE, &d->dev,
+ "%s: reg: %d reg size: %ld subpackets: %d\n", __func__,
+ item->reg, item->reg_size, item->num_subpackets);
+
+ reg = find_next_bit(rdesc->presense_map,
+ RMI_REG_DESC_PRESENSE_BITS, reg + 1);
+ }
+
+free_struct_buff:
+ kfree(struct_buf);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(rmi_read_register_desc);
+
+const struct rmi_register_desc_item *rmi_get_register_desc_item(
+ struct rmi_register_descriptor *rdesc, u16 reg)
+{
+ const struct rmi_register_desc_item *item;
+ int i;
+
+ for (i = 0; i < rdesc->num_registers; i++) {
+ item = &rdesc->registers[i];
+ if (item->reg == reg)
+ return item;
+ }
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(rmi_get_register_desc_item);
+
+size_t rmi_register_desc_calc_size(struct rmi_register_descriptor *rdesc)
+{
+ const struct rmi_register_desc_item *item;
+ int i;
+ size_t size = 0;
+
+ for (i = 0; i < rdesc->num_registers; i++) {
+ item = &rdesc->registers[i];
+ size += item->reg_size;
+ }
+ return size;
+}
+EXPORT_SYMBOL_GPL(rmi_register_desc_calc_size);
+
+/* Compute the register offset relative to the base address */
+int rmi_register_desc_calc_reg_offset(
+ struct rmi_register_descriptor *rdesc, u16 reg)
+{
+ const struct rmi_register_desc_item *item;
+ int offset = 0;
+ int i;
+
+ for (i = 0; i < rdesc->num_registers; i++) {
+ item = &rdesc->registers[i];
+ if (item->reg == reg)
+ return offset;
+ ++offset;
+ }
+ return -1;
+}
+EXPORT_SYMBOL_GPL(rmi_register_desc_calc_reg_offset);
+
+bool rmi_register_desc_has_subpacket(const struct rmi_register_desc_item *item,
+ u8 subpacket)
+{
+ return find_next_bit(item->subpacket_map, RMI_REG_DESC_PRESENSE_BITS,
+ subpacket) == subpacket;
+}
+
+/* Indicates that flash programming is enabled (bootloader mode). */
+#define RMI_F01_STATUS_BOOTLOADER(status) (!!((status) & 0x40))
+
+/*
+ * Given the PDT entry for F01, read the device status register to determine
+ * if we're stuck in bootloader mode or not.
+ *
+ */
+static int rmi_check_bootloader_mode(struct rmi_device *rmi_dev,
+ const struct pdt_entry *pdt)
+{
+ int error;
+ u8 device_status;
+
+ error = rmi_read(rmi_dev, pdt->data_base_addr + pdt->page_start,
+ &device_status);
+ if (error) {
+ dev_err(&rmi_dev->dev,
+ "Failed to read device status: %d.\n", error);
+ return error;
+ }
+
+ return RMI_F01_STATUS_BOOTLOADER(device_status);
+}
+
+static int rmi_count_irqs(struct rmi_device *rmi_dev,
+ void *ctx, const struct pdt_entry *pdt)
+{
+ struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev);
+ int *irq_count = ctx;
+
+ *irq_count += pdt->interrupt_source_count;
+ if (pdt->function_number == 0x01) {
+ data->f01_bootloader_mode =
+ rmi_check_bootloader_mode(rmi_dev, pdt);
+ if (data->f01_bootloader_mode)
+ dev_warn(&rmi_dev->dev,
+ "WARNING: RMI4 device is in bootloader mode!\n");
+ }
+
+ return RMI_SCAN_CONTINUE;
+}
+
+static int rmi_initial_reset(struct rmi_device *rmi_dev,
+ void *ctx, const struct pdt_entry *pdt)
+{
+ int error;
+
+ if (pdt->function_number == 0x01) {
+ u16 cmd_addr = pdt->page_start + pdt->command_base_addr;
+ u8 cmd_buf = RMI_DEVICE_RESET_CMD;
+ const struct rmi_device_platform_data *pdata =
+ rmi_get_platform_data(rmi_dev);
+
+ if (rmi_dev->xport->ops->reset) {
+ error = rmi_dev->xport->ops->reset(rmi_dev->xport,
+ cmd_addr);
+ if (error)
+ return error;
+
+ return RMI_SCAN_DONE;
+ }
+
+ error = rmi_write_block(rmi_dev, cmd_addr, &cmd_buf, 1);
+ if (error) {
+ dev_err(&rmi_dev->dev,
+ "Initial reset failed. Code = %d.\n", error);
+ return error;
+ }
+
+ mdelay(pdata->reset_delay_ms ?: DEFAULT_RESET_DELAY_MS);
+
+ return RMI_SCAN_DONE;
+ }
+
+ /* F01 should always be on page 0. If we don't find it there, fail. */
+ return pdt->page_start == 0 ? RMI_SCAN_CONTINUE : -ENODEV;
+}
+
+static int rmi_create_function(struct rmi_device *rmi_dev,
+ void *ctx, const struct pdt_entry *pdt)
+{
+ struct device *dev = &rmi_dev->dev;
+ struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev);
+ int *current_irq_count = ctx;
+ struct rmi_function *fn;
+ int i;
+ int error;
+
+ rmi_dbg(RMI_DEBUG_CORE, dev, "Initializing F%02X.\n",
+ pdt->function_number);
+
+ fn = kzalloc(sizeof(struct rmi_function) +
+ BITS_TO_LONGS(data->irq_count) * sizeof(unsigned long),
+ GFP_KERNEL);
+ if (!fn) {
+ dev_err(dev, "Failed to allocate memory for F%02X\n",
+ pdt->function_number);
+ return -ENOMEM;
+ }
+
+ INIT_LIST_HEAD(&fn->node);
+ rmi_driver_copy_pdt_to_fd(pdt, &fn->fd);
+
+ fn->rmi_dev = rmi_dev;
+
+ fn->num_of_irqs = pdt->interrupt_source_count;
+ fn->irq_pos = *current_irq_count;
+ *current_irq_count += fn->num_of_irqs;
+
+ for (i = 0; i < fn->num_of_irqs; i++)
+ set_bit(fn->irq_pos + i, fn->irq_mask);
+
+ error = rmi_register_function(fn);
+ if (error)
+ goto err_put_fn;
+
+ if (pdt->function_number == 0x01)
+ data->f01_container = fn;
+
+ list_add_tail(&fn->node, &data->function_list);
+
+ return RMI_SCAN_CONTINUE;
+
+err_put_fn:
+ put_device(&fn->dev);
+ return error;
+}
+
+int rmi_driver_suspend(struct rmi_device *rmi_dev)
+{
+ int retval = 0;
+
+ retval = rmi_suspend_functions(rmi_dev);
+ if (retval)
+ dev_warn(&rmi_dev->dev, "Failed to suspend functions: %d\n",
+ retval);
+
+ return retval;
+}
+EXPORT_SYMBOL_GPL(rmi_driver_suspend);
+
+int rmi_driver_resume(struct rmi_device *rmi_dev)
+{
+ int retval;
+
+ retval = rmi_resume_functions(rmi_dev);
+ if (retval)
+ dev_warn(&rmi_dev->dev, "Failed to suspend functions: %d\n",
+ retval);
+
+ return retval;
+}
+EXPORT_SYMBOL_GPL(rmi_driver_resume);
+
+static int rmi_driver_remove(struct device *dev)
+{
+ struct rmi_device *rmi_dev = to_rmi_device(dev);
+
+ rmi_free_function_list(rmi_dev);
+
+ return 0;
+}
+
+#ifdef CONFIG_OF
+static int rmi_driver_of_probe(struct device *dev,
+ struct rmi_device_platform_data *pdata)
+{
+ int retval;
+
+ retval = rmi_of_property_read_u32(dev, &pdata->reset_delay_ms,
+ "syna,reset-delay-ms", 1);
+ if (retval)
+ return retval;
+
+ return 0;
+}
+#else
+static inline int rmi_driver_of_probe(struct device *dev,
+ struct rmi_device_platform_data *pdata)
+{
+ return -ENODEV;
+}
+#endif
+
+static int rmi_driver_probe(struct device *dev)
+{
+ struct rmi_driver *rmi_driver;
+ struct rmi_driver_data *data;
+ struct rmi_device_platform_data *pdata;
+ struct rmi_device *rmi_dev;
+ size_t size;
+ void *irq_memory;
+ int irq_count;
+ int retval;
+
+ rmi_dbg(RMI_DEBUG_CORE, dev, "%s: Starting probe.\n",
+ __func__);
+
+ if (!rmi_is_physical_device(dev)) {
+ rmi_dbg(RMI_DEBUG_CORE, dev, "Not a physical device.\n");
+ return -ENODEV;
+ }
+
+ rmi_dev = to_rmi_device(dev);
+ rmi_driver = to_rmi_driver(dev->driver);
+ rmi_dev->driver = rmi_driver;
+
+ pdata = rmi_get_platform_data(rmi_dev);
+
+ if (rmi_dev->xport->dev->of_node) {
+ retval = rmi_driver_of_probe(rmi_dev->xport->dev, pdata);
+ if (retval)
+ return retval;
+ }
+
+ data = devm_kzalloc(dev, sizeof(struct rmi_driver_data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&data->function_list);
+ data->rmi_dev = rmi_dev;
+ dev_set_drvdata(&rmi_dev->dev, data);
+
+ /*
+ * Right before a warm boot, the sensor might be in some unusual state,
+ * such as F54 diagnostics, or F34 bootloader mode after a firmware
+ * or configuration update. In order to clear the sensor to a known
+ * state and/or apply any updates, we issue a initial reset to clear any
+ * previous settings and force it into normal operation.
+ *
+ * We have to do this before actually building the PDT because
+ * the reflash updates (if any) might cause various registers to move
+ * around.
+ *
+ * For a number of reasons, this initial reset may fail to return
+ * within the specified time, but we'll still be able to bring up the
+ * driver normally after that failure. This occurs most commonly in
+ * a cold boot situation (where then firmware takes longer to come up
+ * than from a warm boot) and the reset_delay_ms in the platform data
+ * has been set too short to accommodate that. Since the sensor will
+ * eventually come up and be usable, we don't want to just fail here
+ * and leave the customer's device unusable. So we warn them, and
+ * continue processing.
+ */
+ retval = rmi_scan_pdt(rmi_dev, NULL, rmi_initial_reset);
+ if (retval < 0)
+ dev_warn(dev, "RMI initial reset failed! Continuing in spite of this.\n");
+
+ retval = rmi_read(rmi_dev, PDT_PROPERTIES_LOCATION, &data->pdt_props);
+ if (retval < 0) {
+ /*
+ * we'll print out a warning and continue since
+ * failure to get the PDT properties is not a cause to fail
+ */
+ dev_warn(dev, "Could not read PDT properties from %#06x (code %d). Assuming 0x00.\n",
+ PDT_PROPERTIES_LOCATION, retval);
+ }
+
+ /*
+ * We need to count the IRQs and allocate their storage before scanning
+ * the PDT and creating the function entries, because adding a new
+ * function can trigger events that result in the IRQ related storage
+ * being accessed.
+ */
+ rmi_dbg(RMI_DEBUG_CORE, dev, "Counting IRQs.\n");
+ irq_count = 0;
+ retval = rmi_scan_pdt(rmi_dev, &irq_count, rmi_count_irqs);
+ if (retval < 0) {
+ dev_err(dev, "IRQ counting failed with code %d.\n", retval);
+ goto err;
+ }
+ data->irq_count = irq_count;
+ data->num_of_irq_regs = (data->irq_count + 7) / 8;
+
+ mutex_init(&data->irq_mutex);
+
+ size = BITS_TO_LONGS(data->irq_count) * sizeof(unsigned long);
+ irq_memory = devm_kzalloc(dev, size * 4, GFP_KERNEL);
+ if (!irq_memory) {
+ dev_err(dev, "Failed to allocate memory for irq masks.\n");
+ goto err;
+ }
+
+ data->irq_status = irq_memory + size * 0;
+ data->fn_irq_bits = irq_memory + size * 1;
+ data->current_irq_mask = irq_memory + size * 2;
+ data->new_irq_mask = irq_memory + size * 3;
+
+ if (rmi_dev->xport->input) {
+ /*
+ * The transport driver already has an input device.
+ * In some cases it is preferable to reuse the transport
+ * devices input device instead of creating a new one here.
+ * One example is some HID touchpads report "pass-through"
+ * button events are not reported by rmi registers.
+ */
+ data->input = rmi_dev->xport->input;
+ } else {
+ data->input = devm_input_allocate_device(dev);
+ if (!data->input) {
+ dev_err(dev, "%s: Failed to allocate input device.\n",
+ __func__);
+ retval = -ENOMEM;
+ goto err_destroy_functions;
+ }
+ rmi_driver_set_input_params(rmi_dev, data->input);
+ data->input->phys = devm_kasprintf(dev, GFP_KERNEL,
+ "%s/input0", dev_name(dev));
+ }
+
+ irq_count = 0;
+ rmi_dbg(RMI_DEBUG_CORE, dev, "Creating functions.");
+ retval = rmi_scan_pdt(rmi_dev, &irq_count, rmi_create_function);
+ if (retval < 0) {
+ dev_err(dev, "Function creation failed with code %d.\n",
+ retval);
+ goto err_destroy_functions;
+ }
+
+ if (!data->f01_container) {
+ dev_err(dev, "Missing F01 container!\n");
+ retval = -EINVAL;
+ goto err_destroy_functions;
+ }
+
+ retval = rmi_read_block(rmi_dev,
+ data->f01_container->fd.control_base_addr + 1,
+ data->current_irq_mask, data->num_of_irq_regs);
+ if (retval < 0) {
+ dev_err(dev, "%s: Failed to read current IRQ mask.\n",
+ __func__);
+ goto err_destroy_functions;
+ }
+
+ if (data->input) {
+ rmi_driver_set_input_name(rmi_dev, data->input);
+ if (!rmi_dev->xport->input) {
+ if (input_register_device(data->input)) {
+ dev_err(dev, "%s: Failed to register input device.\n",
+ __func__);
+ goto err_destroy_functions;
+ }
+ }
+ }
+
+ if (data->f01_container->dev.driver)
+ /* Driver already bound, so enable ATTN now. */
+ return enable_sensor(rmi_dev);
+
+ return 0;
+
+err_destroy_functions:
+ rmi_free_function_list(rmi_dev);
+err:
+ return retval < 0 ? retval : 0;
+}
+
+static struct rmi_driver rmi_physical_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "rmi4_physical",
+ .bus = &rmi_bus_type,
+ .probe = rmi_driver_probe,
+ .remove = rmi_driver_remove,
+ },
+ .reset_handler = rmi_driver_reset_handler,
+ .clear_irq_bits = rmi_driver_clear_irq_bits,
+ .set_irq_bits = rmi_driver_set_irq_bits,
+ .set_input_params = rmi_driver_set_input_params,
+};
+
+bool rmi_is_physical_driver(struct device_driver *drv)
+{
+ return drv == &rmi_physical_driver.driver;
+}
+
+int __init rmi_register_physical_driver(void)
+{
+ int error;
+
+ error = driver_register(&rmi_physical_driver.driver);
+ if (error) {
+ pr_err("%s: driver register failed, code=%d.\n", __func__,
+ error);
+ return error;
+ }
+
+ return 0;
+}
+
+void __exit rmi_unregister_physical_driver(void)
+{
+ driver_unregister(&rmi_physical_driver.driver);
+}
diff --git a/drivers/input/rmi4/rmi_driver.h b/drivers/input/rmi4/rmi_driver.h
new file mode 100644
index 000000000000..6e140fa3cce1
--- /dev/null
+++ b/drivers/input/rmi4/rmi_driver.h
@@ -0,0 +1,105 @@
+/*
+ * Copyright (c) 2011-2016 Synaptics Incorporated
+ * Copyright (c) 2011 Unixphere
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#ifndef _RMI_DRIVER_H
+#define _RMI_DRIVER_H
+
+#include <linux/ctype.h>
+#include <linux/hrtimer.h>
+#include <linux/ktime.h>
+#include <linux/input.h>
+#include "rmi_bus.h"
+
+#define RMI_DRIVER_VERSION "2.0"
+
+#define SYNAPTICS_INPUT_DEVICE_NAME "Synaptics RMI4 Touch Sensor"
+#define SYNAPTICS_VENDOR_ID 0x06cb
+
+#define GROUP(_attrs) { \
+ .attrs = _attrs, \
+}
+
+#define PDT_PROPERTIES_LOCATION 0x00EF
+#define BSR_LOCATION 0x00FE
+
+#define RMI_PDT_PROPS_HAS_BSR 0x02
+
+#define NAME_BUFFER_SIZE 256
+
+#define RMI_PDT_ENTRY_SIZE 6
+#define RMI_PDT_FUNCTION_VERSION_MASK 0x60
+#define RMI_PDT_INT_SOURCE_COUNT_MASK 0x07
+
+#define PDT_START_SCAN_LOCATION 0x00e9
+#define PDT_END_SCAN_LOCATION 0x0005
+#define RMI4_END_OF_PDT(id) ((id) == 0x00 || (id) == 0xff)
+
+struct pdt_entry {
+ u16 page_start;
+ u8 query_base_addr;
+ u8 command_base_addr;
+ u8 control_base_addr;
+ u8 data_base_addr;
+ u8 interrupt_source_count;
+ u8 function_version;
+ u8 function_number;
+};
+
+int rmi_read_pdt_entry(struct rmi_device *rmi_dev, struct pdt_entry *entry,
+ u16 pdt_address);
+
+#define RMI_REG_DESC_PRESENSE_BITS (32 * BITS_PER_BYTE)
+#define RMI_REG_DESC_SUBPACKET_BITS (37 * BITS_PER_BYTE)
+
+/* describes a single packet register */
+struct rmi_register_desc_item {
+ u16 reg;
+ unsigned long reg_size;
+ u8 num_subpackets;
+ unsigned long subpacket_map[BITS_TO_LONGS(
+ RMI_REG_DESC_SUBPACKET_BITS)];
+};
+
+/*
+ * describes the packet registers for a particular type
+ * (ie query, control, data)
+ */
+struct rmi_register_descriptor {
+ unsigned long struct_size;
+ unsigned long presense_map[BITS_TO_LONGS(RMI_REG_DESC_PRESENSE_BITS)];
+ u8 num_registers;
+ struct rmi_register_desc_item *registers;
+};
+
+int rmi_read_register_desc(struct rmi_device *d, u16 addr,
+ struct rmi_register_descriptor *rdesc);
+const struct rmi_register_desc_item *rmi_get_register_desc_item(
+ struct rmi_register_descriptor *rdesc, u16 reg);
+
+/*
+ * Calculate the total size of all of the registers described in the
+ * descriptor.
+ */
+size_t rmi_register_desc_calc_size(struct rmi_register_descriptor *rdesc);
+int rmi_register_desc_calc_reg_offset(
+ struct rmi_register_descriptor *rdesc, u16 reg);
+bool rmi_register_desc_has_subpacket(const struct rmi_register_desc_item *item,
+ u8 subpacket);
+
+bool rmi_is_physical_driver(struct device_driver *);
+int rmi_register_physical_driver(void);
+void rmi_unregister_physical_driver(void);
+
+char *rmi_f01_get_product_ID(struct rmi_function *fn);
+
+extern struct rmi_function_handler rmi_f01_handler;
+extern struct rmi_function_handler rmi_f11_handler;
+extern struct rmi_function_handler rmi_f12_handler;
+extern struct rmi_function_handler rmi_f30_handler;
+#endif
diff --git a/drivers/input/rmi4/rmi_f01.c b/drivers/input/rmi4/rmi_f01.c
new file mode 100644
index 000000000000..eb362bc71a4c
--- /dev/null
+++ b/drivers/input/rmi4/rmi_f01.c
@@ -0,0 +1,624 @@
+/*
+ * Copyright (c) 2011-2016 Synaptics Incorporated
+ * Copyright (c) 2011 Unixphere
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/kconfig.h>
+#include <linux/rmi.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/of.h>
+#include "rmi_driver.h"
+
+#define RMI_PRODUCT_ID_LENGTH 10
+#define RMI_PRODUCT_INFO_LENGTH 2
+
+#define RMI_DATE_CODE_LENGTH 3
+
+#define PRODUCT_ID_OFFSET 0x10
+#define PRODUCT_INFO_OFFSET 0x1E
+
+
+/* Force a firmware reset of the sensor */
+#define RMI_F01_CMD_DEVICE_RESET 1
+
+/* Various F01_RMI_QueryX bits */
+
+#define RMI_F01_QRY1_CUSTOM_MAP BIT(0)
+#define RMI_F01_QRY1_NON_COMPLIANT BIT(1)
+#define RMI_F01_QRY1_HAS_LTS BIT(2)
+#define RMI_F01_QRY1_HAS_SENSOR_ID BIT(3)
+#define RMI_F01_QRY1_HAS_CHARGER_INP BIT(4)
+#define RMI_F01_QRY1_HAS_ADJ_DOZE BIT(5)
+#define RMI_F01_QRY1_HAS_ADJ_DOZE_HOFF BIT(6)
+#define RMI_F01_QRY1_HAS_QUERY42 BIT(7)
+
+#define RMI_F01_QRY5_YEAR_MASK 0x1f
+#define RMI_F01_QRY6_MONTH_MASK 0x0f
+#define RMI_F01_QRY7_DAY_MASK 0x1f
+
+#define RMI_F01_QRY2_PRODINFO_MASK 0x7f
+
+#define RMI_F01_BASIC_QUERY_LEN 21 /* From Query 00 through 20 */
+
+struct f01_basic_properties {
+ u8 manufacturer_id;
+ bool has_lts;
+ bool has_adjustable_doze;
+ bool has_adjustable_doze_holdoff;
+ char dom[11]; /* YYYY/MM/DD + '\0' */
+ u8 product_id[RMI_PRODUCT_ID_LENGTH + 1];
+ u16 productinfo;
+ u32 firmware_id;
+};
+
+/* F01 device status bits */
+
+/* Most recent device status event */
+#define RMI_F01_STATUS_CODE(status) ((status) & 0x0f)
+/* The device has lost its configuration for some reason. */
+#define RMI_F01_STATUS_UNCONFIGURED(status) (!!((status) & 0x80))
+
+/* Control register bits */
+
+/*
+ * Sleep mode controls power management on the device and affects all
+ * functions of the device.
+ */
+#define RMI_F01_CTRL0_SLEEP_MODE_MASK 0x03
+
+#define RMI_SLEEP_MODE_NORMAL 0x00
+#define RMI_SLEEP_MODE_SENSOR_SLEEP 0x01
+#define RMI_SLEEP_MODE_RESERVED0 0x02
+#define RMI_SLEEP_MODE_RESERVED1 0x03
+
+/*
+ * This bit disables whatever sleep mode may be selected by the sleep_mode
+ * field and forces the device to run at full power without sleeping.
+ */
+#define RMI_F01_CRTL0_NOSLEEP_BIT BIT(2)
+
+/*
+ * When this bit is set, the touch controller employs a noise-filtering
+ * algorithm designed for use with a connected battery charger.
+ */
+#define RMI_F01_CRTL0_CHARGER_BIT BIT(5)
+
+/*
+ * Sets the report rate for the device. The effect of this setting is
+ * highly product dependent. Check the spec sheet for your particular
+ * touch sensor.
+ */
+#define RMI_F01_CRTL0_REPORTRATE_BIT BIT(6)
+
+/*
+ * Written by the host as an indicator that the device has been
+ * successfully configured.
+ */
+#define RMI_F01_CRTL0_CONFIGURED_BIT BIT(7)
+
+/**
+ * @ctrl0 - see the bit definitions above.
+ * @doze_interval - controls the interval between checks for finger presence
+ * when the touch sensor is in doze mode, in units of 10ms.
+ * @wakeup_threshold - controls the capacitance threshold at which the touch
+ * sensor will decide to wake up from that low power state.
+ * @doze_holdoff - controls how long the touch sensor waits after the last
+ * finger lifts before entering the doze state, in units of 100ms.
+ */
+struct f01_device_control {
+ u8 ctrl0;
+ u8 doze_interval;
+ u8 wakeup_threshold;
+ u8 doze_holdoff;
+};
+
+struct f01_data {
+ struct f01_basic_properties properties;
+ struct f01_device_control device_control;
+
+ u16 doze_interval_addr;
+ u16 wakeup_threshold_addr;
+ u16 doze_holdoff_addr;
+
+ bool suspended;
+ bool old_nosleep;
+
+ unsigned int num_of_irq_regs;
+};
+
+static int rmi_f01_read_properties(struct rmi_device *rmi_dev,
+ u16 query_base_addr,
+ struct f01_basic_properties *props)
+{
+ u8 queries[RMI_F01_BASIC_QUERY_LEN];
+ int ret;
+ int query_offset = query_base_addr;
+ bool has_ds4_queries = false;
+ bool has_query42 = false;
+ bool has_sensor_id = false;
+ bool has_package_id_query = false;
+ bool has_build_id_query = false;
+ u16 prod_info_addr;
+ u8 ds4_query_len;
+
+ ret = rmi_read_block(rmi_dev, query_offset,
+ queries, RMI_F01_BASIC_QUERY_LEN);
+ if (ret) {
+ dev_err(&rmi_dev->dev,
+ "Failed to read device query registers: %d\n", ret);
+ return ret;
+ }
+
+ prod_info_addr = query_offset + 17;
+ query_offset += RMI_F01_BASIC_QUERY_LEN;
+
+ /* Now parse what we got */
+ props->manufacturer_id = queries[0];
+
+ props->has_lts = queries[1] & RMI_F01_QRY1_HAS_LTS;
+ props->has_adjustable_doze =
+ queries[1] & RMI_F01_QRY1_HAS_ADJ_DOZE;
+ props->has_adjustable_doze_holdoff =
+ queries[1] & RMI_F01_QRY1_HAS_ADJ_DOZE_HOFF;
+ has_query42 = queries[1] & RMI_F01_QRY1_HAS_QUERY42;
+ has_sensor_id = queries[1] & RMI_F01_QRY1_HAS_SENSOR_ID;
+
+ snprintf(props->dom, sizeof(props->dom), "20%02d/%02d/%02d",
+ queries[5] & RMI_F01_QRY5_YEAR_MASK,
+ queries[6] & RMI_F01_QRY6_MONTH_MASK,
+ queries[7] & RMI_F01_QRY7_DAY_MASK);
+
+ memcpy(props->product_id, &queries[11],
+ RMI_PRODUCT_ID_LENGTH);
+ props->product_id[RMI_PRODUCT_ID_LENGTH] = '\0';
+
+ props->productinfo =
+ ((queries[2] & RMI_F01_QRY2_PRODINFO_MASK) << 7) |
+ (queries[3] & RMI_F01_QRY2_PRODINFO_MASK);
+
+ if (has_sensor_id)
+ query_offset++;
+
+ if (has_query42) {
+ ret = rmi_read(rmi_dev, query_offset, queries);
+ if (ret) {
+ dev_err(&rmi_dev->dev,
+ "Failed to read query 42 register: %d\n", ret);
+ return ret;
+ }
+
+ has_ds4_queries = !!(queries[0] & BIT(0));
+ query_offset++;
+ }
+
+ if (has_ds4_queries) {
+ ret = rmi_read(rmi_dev, query_offset, &ds4_query_len);
+ if (ret) {
+ dev_err(&rmi_dev->dev,
+ "Failed to read DS4 queries length: %d\n", ret);
+ return ret;
+ }
+ query_offset++;
+
+ if (ds4_query_len > 0) {
+ ret = rmi_read(rmi_dev, query_offset, queries);
+ if (ret) {
+ dev_err(&rmi_dev->dev,
+ "Failed to read DS4 queries: %d\n",
+ ret);
+ return ret;
+ }
+
+ has_package_id_query = !!(queries[0] & BIT(0));
+ has_build_id_query = !!(queries[0] & BIT(1));
+ }
+
+ if (has_package_id_query)
+ prod_info_addr++;
+
+ if (has_build_id_query) {
+ ret = rmi_read_block(rmi_dev, prod_info_addr, queries,
+ 3);
+ if (ret) {
+ dev_err(&rmi_dev->dev,
+ "Failed to read product info: %d\n",
+ ret);
+ return ret;
+ }
+
+ props->firmware_id = queries[1] << 8 | queries[0];
+ props->firmware_id += queries[2] * 65536;
+ }
+ }
+
+ return 0;
+}
+
+char *rmi_f01_get_product_ID(struct rmi_function *fn)
+{
+ struct f01_data *f01 = dev_get_drvdata(&fn->dev);
+
+ return f01->properties.product_id;
+}
+
+#ifdef CONFIG_OF
+static int rmi_f01_of_probe(struct device *dev,
+ struct rmi_device_platform_data *pdata)
+{
+ int retval;
+ u32 val;
+
+ retval = rmi_of_property_read_u32(dev,
+ (u32 *)&pdata->power_management.nosleep,
+ "syna,nosleep-mode", 1);
+ if (retval)
+ return retval;
+
+ retval = rmi_of_property_read_u32(dev, &val,
+ "syna,wakeup-threshold", 1);
+ if (retval)
+ return retval;
+
+ pdata->power_management.wakeup_threshold = val;
+
+ retval = rmi_of_property_read_u32(dev, &val,
+ "syna,doze-holdoff-ms", 1);
+ if (retval)
+ return retval;
+
+ pdata->power_management.doze_holdoff = val * 100;
+
+ retval = rmi_of_property_read_u32(dev, &val,
+ "syna,doze-interval-ms", 1);
+ if (retval)
+ return retval;
+
+ pdata->power_management.doze_interval = val / 10;
+
+ return 0;
+}
+#else
+static inline int rmi_f01_of_probe(struct device *dev,
+ struct rmi_device_platform_data *pdata)
+{
+ return -ENODEV;
+}
+#endif
+
+static int rmi_f01_probe(struct rmi_function *fn)
+{
+ struct rmi_device *rmi_dev = fn->rmi_dev;
+ struct rmi_driver_data *driver_data = dev_get_drvdata(&rmi_dev->dev);
+ struct rmi_device_platform_data *pdata = rmi_get_platform_data(rmi_dev);
+ struct f01_data *f01;
+ int error;
+ u16 ctrl_base_addr = fn->fd.control_base_addr;
+ u8 device_status;
+ u8 temp;
+
+ if (fn->dev.of_node) {
+ error = rmi_f01_of_probe(&fn->dev, pdata);
+ if (error)
+ return error;
+ }
+
+ f01 = devm_kzalloc(&fn->dev, sizeof(struct f01_data), GFP_KERNEL);
+ if (!f01)
+ return -ENOMEM;
+
+ f01->num_of_irq_regs = driver_data->num_of_irq_regs;
+
+ /*
+ * Set the configured bit and (optionally) other important stuff
+ * in the device control register.
+ */
+
+ error = rmi_read(rmi_dev, fn->fd.control_base_addr,
+ &f01->device_control.ctrl0);
+ if (error) {
+ dev_err(&fn->dev, "Failed to read F01 control: %d\n", error);
+ return error;
+ }
+
+ switch (pdata->power_management.nosleep) {
+ case RMI_F01_NOSLEEP_DEFAULT:
+ break;
+ case RMI_F01_NOSLEEP_OFF:
+ f01->device_control.ctrl0 &= ~RMI_F01_CRTL0_NOSLEEP_BIT;
+ break;
+ case RMI_F01_NOSLEEP_ON:
+ f01->device_control.ctrl0 |= RMI_F01_CRTL0_NOSLEEP_BIT;
+ break;
+ }
+
+ /*
+ * Sleep mode might be set as a hangover from a system crash or
+ * reboot without power cycle. If so, clear it so the sensor
+ * is certain to function.
+ */
+ if ((f01->device_control.ctrl0 & RMI_F01_CTRL0_SLEEP_MODE_MASK) !=
+ RMI_SLEEP_MODE_NORMAL) {
+ dev_warn(&fn->dev,
+ "WARNING: Non-zero sleep mode found. Clearing...\n");
+ f01->device_control.ctrl0 &= ~RMI_F01_CTRL0_SLEEP_MODE_MASK;
+ }
+
+ f01->device_control.ctrl0 |= RMI_F01_CRTL0_CONFIGURED_BIT;
+
+ error = rmi_write(rmi_dev, fn->fd.control_base_addr,
+ f01->device_control.ctrl0);
+ if (error) {
+ dev_err(&fn->dev, "Failed to write F01 control: %d\n", error);
+ return error;
+ }
+
+ /* Dummy read in order to clear irqs */
+ error = rmi_read(rmi_dev, fn->fd.data_base_addr + 1, &temp);
+ if (error < 0) {
+ dev_err(&fn->dev, "Failed to read Interrupt Status.\n");
+ return error;
+ }
+
+ error = rmi_f01_read_properties(rmi_dev, fn->fd.query_base_addr,
+ &f01->properties);
+ if (error < 0) {
+ dev_err(&fn->dev, "Failed to read F01 properties.\n");
+ return error;
+ }
+
+ dev_info(&fn->dev, "found RMI device, manufacturer: %s, product: %s, fw id: %d\n",
+ f01->properties.manufacturer_id == 1 ? "Synaptics" : "unknown",
+ f01->properties.product_id, f01->properties.firmware_id);
+
+ /* Advance to interrupt control registers, then skip over them. */
+ ctrl_base_addr++;
+ ctrl_base_addr += f01->num_of_irq_regs;
+
+ /* read control register */
+ if (f01->properties.has_adjustable_doze) {
+ f01->doze_interval_addr = ctrl_base_addr;
+ ctrl_base_addr++;
+
+ if (pdata->power_management.doze_interval) {
+ f01->device_control.doze_interval =
+ pdata->power_management.doze_interval;
+ error = rmi_write(rmi_dev, f01->doze_interval_addr,
+ f01->device_control.doze_interval);
+ if (error) {
+ dev_err(&fn->dev,
+ "Failed to configure F01 doze interval register: %d\n",
+ error);
+ return error;
+ }
+ } else {
+ error = rmi_read(rmi_dev, f01->doze_interval_addr,
+ &f01->device_control.doze_interval);
+ if (error) {
+ dev_err(&fn->dev,
+ "Failed to read F01 doze interval register: %d\n",
+ error);
+ return error;
+ }
+ }
+
+ f01->wakeup_threshold_addr = ctrl_base_addr;
+ ctrl_base_addr++;
+
+ if (pdata->power_management.wakeup_threshold) {
+ f01->device_control.wakeup_threshold =
+ pdata->power_management.wakeup_threshold;
+ error = rmi_write(rmi_dev, f01->wakeup_threshold_addr,
+ f01->device_control.wakeup_threshold);
+ if (error) {
+ dev_err(&fn->dev,
+ "Failed to configure F01 wakeup threshold register: %d\n",
+ error);
+ return error;
+ }
+ } else {
+ error = rmi_read(rmi_dev, f01->wakeup_threshold_addr,
+ &f01->device_control.wakeup_threshold);
+ if (error < 0) {
+ dev_err(&fn->dev,
+ "Failed to read F01 wakeup threshold register: %d\n",
+ error);
+ return error;
+ }
+ }
+ }
+
+ if (f01->properties.has_lts)
+ ctrl_base_addr++;
+
+ if (f01->properties.has_adjustable_doze_holdoff) {
+ f01->doze_holdoff_addr = ctrl_base_addr;
+ ctrl_base_addr++;
+
+ if (pdata->power_management.doze_holdoff) {
+ f01->device_control.doze_holdoff =
+ pdata->power_management.doze_holdoff;
+ error = rmi_write(rmi_dev, f01->doze_holdoff_addr,
+ f01->device_control.doze_holdoff);
+ if (error) {
+ dev_err(&fn->dev,
+ "Failed to configure F01 doze holdoff register: %d\n",
+ error);
+ return error;
+ }
+ } else {
+ error = rmi_read(rmi_dev, f01->doze_holdoff_addr,
+ &f01->device_control.doze_holdoff);
+ if (error) {
+ dev_err(&fn->dev,
+ "Failed to read F01 doze holdoff register: %d\n",
+ error);
+ return error;
+ }
+ }
+ }
+
+ error = rmi_read(rmi_dev, fn->fd.data_base_addr, &device_status);
+ if (error < 0) {
+ dev_err(&fn->dev,
+ "Failed to read device status: %d\n", error);
+ return error;
+ }
+
+ if (RMI_F01_STATUS_UNCONFIGURED(device_status)) {
+ dev_err(&fn->dev,
+ "Device was reset during configuration process, status: %#02x!\n",
+ RMI_F01_STATUS_CODE(device_status));
+ return -EINVAL;
+ }
+
+ dev_set_drvdata(&fn->dev, f01);
+
+ return 0;
+}
+
+static int rmi_f01_config(struct rmi_function *fn)
+{
+ struct f01_data *f01 = dev_get_drvdata(&fn->dev);
+ int error;
+
+ error = rmi_write(fn->rmi_dev, fn->fd.control_base_addr,
+ f01->device_control.ctrl0);
+ if (error) {
+ dev_err(&fn->dev,
+ "Failed to write device_control register: %d\n", error);
+ return error;
+ }
+
+ if (f01->properties.has_adjustable_doze) {
+ error = rmi_write(fn->rmi_dev, f01->doze_interval_addr,
+ f01->device_control.doze_interval);
+ if (error) {
+ dev_err(&fn->dev,
+ "Failed to write doze interval: %d\n", error);
+ return error;
+ }
+
+ error = rmi_write_block(fn->rmi_dev,
+ f01->wakeup_threshold_addr,
+ &f01->device_control.wakeup_threshold,
+ sizeof(u8));
+ if (error) {
+ dev_err(&fn->dev,
+ "Failed to write wakeup threshold: %d\n",
+ error);
+ return error;
+ }
+ }
+
+ if (f01->properties.has_adjustable_doze_holdoff) {
+ error = rmi_write(fn->rmi_dev, f01->doze_holdoff_addr,
+ f01->device_control.doze_holdoff);
+ if (error) {
+ dev_err(&fn->dev,
+ "Failed to write doze holdoff: %d\n", error);
+ return error;
+ }
+ }
+
+ return 0;
+}
+
+static int rmi_f01_suspend(struct rmi_function *fn)
+{
+ struct f01_data *f01 = dev_get_drvdata(&fn->dev);
+ int error;
+
+ f01->old_nosleep =
+ f01->device_control.ctrl0 & RMI_F01_CRTL0_NOSLEEP_BIT;
+ f01->device_control.ctrl0 &= ~RMI_F01_CRTL0_NOSLEEP_BIT;
+
+ f01->device_control.ctrl0 &= ~RMI_F01_CTRL0_SLEEP_MODE_MASK;
+ if (device_may_wakeup(fn->rmi_dev->xport->dev))
+ f01->device_control.ctrl0 |= RMI_SLEEP_MODE_RESERVED1;
+ else
+ f01->device_control.ctrl0 |= RMI_SLEEP_MODE_SENSOR_SLEEP;
+
+ error = rmi_write(fn->rmi_dev, fn->fd.control_base_addr,
+ f01->device_control.ctrl0);
+ if (error) {
+ dev_err(&fn->dev, "Failed to write sleep mode: %d.\n", error);
+ if (f01->old_nosleep)
+ f01->device_control.ctrl0 |= RMI_F01_CRTL0_NOSLEEP_BIT;
+ f01->device_control.ctrl0 &= ~RMI_F01_CTRL0_SLEEP_MODE_MASK;
+ f01->device_control.ctrl0 |= RMI_SLEEP_MODE_NORMAL;
+ return error;
+ }
+
+ return 0;
+}
+
+static int rmi_f01_resume(struct rmi_function *fn)
+{
+ struct f01_data *f01 = dev_get_drvdata(&fn->dev);
+ int error;
+
+ if (f01->old_nosleep)
+ f01->device_control.ctrl0 |= RMI_F01_CRTL0_NOSLEEP_BIT;
+
+ f01->device_control.ctrl0 &= ~RMI_F01_CTRL0_SLEEP_MODE_MASK;
+ f01->device_control.ctrl0 |= RMI_SLEEP_MODE_NORMAL;
+
+ error = rmi_write(fn->rmi_dev, fn->fd.control_base_addr,
+ f01->device_control.ctrl0);
+ if (error) {
+ dev_err(&fn->dev,
+ "Failed to restore normal operation: %d.\n", error);
+ return error;
+ }
+
+ return 0;
+}
+
+static int rmi_f01_attention(struct rmi_function *fn,
+ unsigned long *irq_bits)
+{
+ struct rmi_device *rmi_dev = fn->rmi_dev;
+ int error;
+ u8 device_status;
+
+ error = rmi_read(rmi_dev, fn->fd.data_base_addr, &device_status);
+ if (error) {
+ dev_err(&fn->dev,
+ "Failed to read device status: %d.\n", error);
+ return error;
+ }
+
+ if (RMI_F01_STATUS_UNCONFIGURED(device_status)) {
+ dev_warn(&fn->dev, "Device reset detected.\n");
+ error = rmi_dev->driver->reset_handler(rmi_dev);
+ if (error) {
+ dev_err(&fn->dev, "Device reset failed: %d\n", error);
+ return error;
+ }
+ }
+
+ return 0;
+}
+
+struct rmi_function_handler rmi_f01_handler = {
+ .driver = {
+ .name = "rmi4_f01",
+ /*
+ * Do not allow user unbinding F01 as it is critical
+ * function.
+ */
+ .suppress_bind_attrs = true,
+ },
+ .func = 0x01,
+ .probe = rmi_f01_probe,
+ .config = rmi_f01_config,
+ .attention = rmi_f01_attention,
+ .suspend = rmi_f01_suspend,
+ .resume = rmi_f01_resume,
+};
diff --git a/drivers/input/rmi4/rmi_f11.c b/drivers/input/rmi4/rmi_f11.c
new file mode 100644
index 000000000000..ec8a10d53288
--- /dev/null
+++ b/drivers/input/rmi4/rmi_f11.c
@@ -0,0 +1,1317 @@
+/*
+ * Copyright (c) 2011-2015 Synaptics Incorporated
+ * Copyright (c) 2011 Unixphere
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/input.h>
+#include <linux/input/mt.h>
+#include <linux/kconfig.h>
+#include <linux/rmi.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include "rmi_driver.h"
+#include "rmi_2d_sensor.h"
+
+#define F11_MAX_NUM_OF_FINGERS 10
+#define F11_MAX_NUM_OF_TOUCH_SHAPES 16
+
+#define FINGER_STATE_MASK 0x03
+
+#define F11_CTRL_SENSOR_MAX_X_POS_OFFSET 6
+#define F11_CTRL_SENSOR_MAX_Y_POS_OFFSET 8
+
+#define DEFAULT_XY_MAX 9999
+#define DEFAULT_MAX_ABS_MT_PRESSURE 255
+#define DEFAULT_MAX_ABS_MT_TOUCH 15
+#define DEFAULT_MAX_ABS_MT_ORIENTATION 1
+#define DEFAULT_MIN_ABS_MT_TRACKING_ID 1
+#define DEFAULT_MAX_ABS_MT_TRACKING_ID 10
+
+/** A note about RMI4 F11 register structure.
+ *
+ * The properties for
+ * a given sensor are described by its query registers. The number of query
+ * registers and the layout of their contents are described by the F11 device
+ * queries as well as the sensor query information.
+ *
+ * Similarly, each sensor has control registers that govern its behavior. The
+ * size and layout of the control registers for a given sensor can be determined
+ * by parsing that sensors query registers.
+ *
+ * And in a likewise fashion, each sensor has data registers where it reports
+ * its touch data and other interesting stuff. The size and layout of a
+ * sensors data registers must be determined by parsing its query registers.
+ *
+ * The short story is that we need to read and parse a lot of query
+ * registers in order to determine the attributes of a sensor. Then
+ * we need to use that data to compute the size of the control and data
+ * registers for sensor.
+ *
+ * The end result is that we have a number of structs that aren't used to
+ * directly generate the input events, but their size, location and contents
+ * are critical to determining where the data we are interested in lives.
+ *
+ * At this time, the driver does not yet comprehend all possible F11
+ * configuration options, but it should be sufficient to cover 99% of RMI4 F11
+ * devices currently in the field.
+ */
+
+/* maximum ABS_MT_POSITION displacement (in mm) */
+#define DMAX 10
+
+/**
+ * @rezero - writing this to the F11 command register will cause the sensor to
+ * calibrate to the current capacitive state.
+ */
+#define RMI_F11_REZERO 0x01
+
+#define RMI_F11_HAS_QUERY9 (1 << 3)
+#define RMI_F11_HAS_QUERY11 (1 << 4)
+#define RMI_F11_HAS_QUERY12 (1 << 5)
+#define RMI_F11_HAS_QUERY27 (1 << 6)
+#define RMI_F11_HAS_QUERY28 (1 << 7)
+
+/** Defs for Query 1 */
+
+#define RMI_F11_NR_FINGERS_MASK 0x07
+#define RMI_F11_HAS_REL (1 << 3)
+#define RMI_F11_HAS_ABS (1 << 4)
+#define RMI_F11_HAS_GESTURES (1 << 5)
+#define RMI_F11_HAS_SENSITIVITY_ADJ (1 << 6)
+#define RMI_F11_CONFIGURABLE (1 << 7)
+
+/** Defs for Query 2, 3, and 4. */
+#define RMI_F11_NR_ELECTRODES_MASK 0x7F
+
+/** Defs for Query 5 */
+
+#define RMI_F11_ABS_DATA_SIZE_MASK 0x03
+#define RMI_F11_HAS_ANCHORED_FINGER (1 << 2)
+#define RMI_F11_HAS_ADJ_HYST (1 << 3)
+#define RMI_F11_HAS_DRIBBLE (1 << 4)
+#define RMI_F11_HAS_BENDING_CORRECTION (1 << 5)
+#define RMI_F11_HAS_LARGE_OBJECT_SUPPRESSION (1 << 6)
+#define RMI_F11_HAS_JITTER_FILTER (1 << 7)
+
+/** Defs for Query 7 */
+#define RMI_F11_HAS_SINGLE_TAP (1 << 0)
+#define RMI_F11_HAS_TAP_AND_HOLD (1 << 1)
+#define RMI_F11_HAS_DOUBLE_TAP (1 << 2)
+#define RMI_F11_HAS_EARLY_TAP (1 << 3)
+#define RMI_F11_HAS_FLICK (1 << 4)
+#define RMI_F11_HAS_PRESS (1 << 5)
+#define RMI_F11_HAS_PINCH (1 << 6)
+#define RMI_F11_HAS_CHIRAL (1 << 7)
+
+/** Defs for Query 8 */
+#define RMI_F11_HAS_PALM_DET (1 << 0)
+#define RMI_F11_HAS_ROTATE (1 << 1)
+#define RMI_F11_HAS_TOUCH_SHAPES (1 << 2)
+#define RMI_F11_HAS_SCROLL_ZONES (1 << 3)
+#define RMI_F11_HAS_INDIVIDUAL_SCROLL_ZONES (1 << 4)
+#define RMI_F11_HAS_MF_SCROLL (1 << 5)
+#define RMI_F11_HAS_MF_EDGE_MOTION (1 << 6)
+#define RMI_F11_HAS_MF_SCROLL_INERTIA (1 << 7)
+
+/** Defs for Query 9. */
+#define RMI_F11_HAS_PEN (1 << 0)
+#define RMI_F11_HAS_PROXIMITY (1 << 1)
+#define RMI_F11_HAS_PALM_DET_SENSITIVITY (1 << 2)
+#define RMI_F11_HAS_SUPPRESS_ON_PALM_DETECT (1 << 3)
+#define RMI_F11_HAS_TWO_PEN_THRESHOLDS (1 << 4)
+#define RMI_F11_HAS_CONTACT_GEOMETRY (1 << 5)
+#define RMI_F11_HAS_PEN_HOVER_DISCRIMINATION (1 << 6)
+#define RMI_F11_HAS_PEN_FILTERS (1 << 7)
+
+/** Defs for Query 10. */
+#define RMI_F11_NR_TOUCH_SHAPES_MASK 0x1F
+
+/** Defs for Query 11 */
+
+#define RMI_F11_HAS_Z_TUNING (1 << 0)
+#define RMI_F11_HAS_ALGORITHM_SELECTION (1 << 1)
+#define RMI_F11_HAS_W_TUNING (1 << 2)
+#define RMI_F11_HAS_PITCH_INFO (1 << 3)
+#define RMI_F11_HAS_FINGER_SIZE (1 << 4)
+#define RMI_F11_HAS_SEGMENTATION_AGGRESSIVENESS (1 << 5)
+#define RMI_F11_HAS_XY_CLIP (1 << 6)
+#define RMI_F11_HAS_DRUMMING_FILTER (1 << 7)
+
+/** Defs for Query 12. */
+
+#define RMI_F11_HAS_GAPLESS_FINGER (1 << 0)
+#define RMI_F11_HAS_GAPLESS_FINGER_TUNING (1 << 1)
+#define RMI_F11_HAS_8BIT_W (1 << 2)
+#define RMI_F11_HAS_ADJUSTABLE_MAPPING (1 << 3)
+#define RMI_F11_HAS_INFO2 (1 << 4)
+#define RMI_F11_HAS_PHYSICAL_PROPS (1 << 5)
+#define RMI_F11_HAS_FINGER_LIMIT (1 << 6)
+#define RMI_F11_HAS_LINEAR_COEFF (1 << 7)
+
+/** Defs for Query 13. */
+
+#define RMI_F11_JITTER_WINDOW_MASK 0x1F
+#define RMI_F11_JITTER_FILTER_MASK 0x60
+#define RMI_F11_JITTER_FILTER_SHIFT 5
+
+/** Defs for Query 14. */
+#define RMI_F11_LIGHT_CONTROL_MASK 0x03
+#define RMI_F11_IS_CLEAR (1 << 2)
+#define RMI_F11_CLICKPAD_PROPS_MASK 0x18
+#define RMI_F11_CLICKPAD_PROPS_SHIFT 3
+#define RMI_F11_MOUSE_BUTTONS_MASK 0x60
+#define RMI_F11_MOUSE_BUTTONS_SHIFT 5
+#define RMI_F11_HAS_ADVANCED_GESTURES (1 << 7)
+
+#define RMI_F11_QUERY_SIZE 4
+#define RMI_F11_QUERY_GESTURE_SIZE 2
+
+#define F11_LIGHT_CTL_NONE 0x00
+#define F11_LUXPAD 0x01
+#define F11_DUAL_MODE 0x02
+
+#define F11_NOT_CLICKPAD 0x00
+#define F11_HINGED_CLICKPAD 0x01
+#define F11_UNIFORM_CLICKPAD 0x02
+
+/**
+ * Query registers 1 through 4 are always present.
+ *
+ * @nr_fingers - describes the maximum number of fingers the 2-D sensor
+ * supports.
+ * @has_rel - the sensor supports relative motion reporting.
+ * @has_abs - the sensor supports absolute poition reporting.
+ * @has_gestures - the sensor supports gesture reporting.
+ * @has_sensitivity_adjust - the sensor supports a global sensitivity
+ * adjustment.
+ * @configurable - the sensor supports various configuration options.
+ * @num_of_x_electrodes - the maximum number of electrodes the 2-D sensor
+ * supports on the X axis.
+ * @num_of_y_electrodes - the maximum number of electrodes the 2-D sensor
+ * supports on the Y axis.
+ * @max_electrodes - the total number of X and Y electrodes that may be
+ * configured.
+ *
+ * Query 5 is present if the has_abs bit is set.
+ *
+ * @abs_data_size - describes the format of data reported by the absolute
+ * data source. Only one format (the kind used here) is supported at this
+ * time.
+ * @has_anchored_finger - then the sensor supports the high-precision second
+ * finger tracking provided by the manual tracking and motion sensitivity
+ * options.
+ * @has_adjust_hyst - the difference between the finger release threshold and
+ * the touch threshold.
+ * @has_dribble - the sensor supports the generation of dribble interrupts,
+ * which may be enabled or disabled with the dribble control bit.
+ * @has_bending_correction - Bending related data registers 28 and 36, and
+ * control register 52..57 are present.
+ * @has_large_object_suppression - control register 58 and data register 28
+ * exist.
+ * @has_jitter_filter - query 13 and control 73..76 exist.
+ *
+ * Gesture information queries 7 and 8 are present if has_gestures bit is set.
+ *
+ * @has_single_tap - a basic single-tap gesture is supported.
+ * @has_tap_n_hold - tap-and-hold gesture is supported.
+ * @has_double_tap - double-tap gesture is supported.
+ * @has_early_tap - early tap is supported and reported as soon as the finger
+ * lifts for any tap event that could be interpreted as either a single tap
+ * or as the first tap of a double-tap or tap-and-hold gesture.
+ * @has_flick - flick detection is supported.
+ * @has_press - press gesture reporting is supported.
+ * @has_pinch - pinch gesture detection is supported.
+ * @has_palm_det - the 2-D sensor notifies the host whenever a large conductive
+ * object such as a palm or a cheek touches the 2-D sensor.
+ * @has_rotate - rotation gesture detection is supported.
+ * @has_touch_shapes - TouchShapes are supported. A TouchShape is a fixed
+ * rectangular area on the sensor that behaves like a capacitive button.
+ * @has_scroll_zones - scrolling areas near the sensor edges are supported.
+ * @has_individual_scroll_zones - if 1, then 4 scroll zones are supported;
+ * if 0, then only two are supported.
+ * @has_mf_scroll - the multifinger_scrolling bit will be set when
+ * more than one finger is involved in a scrolling action.
+ *
+ * Convenience for checking bytes in the gesture info registers. This is done
+ * often enough that we put it here to declutter the conditionals
+ *
+ * @query7_nonzero - true if none of the query 7 bits are set
+ * @query8_nonzero - true if none of the query 8 bits are set
+ *
+ * Query 9 is present if the has_query9 is set.
+ *
+ * @has_pen - detection of a stylus is supported and registers F11_2D_Ctrl20
+ * and F11_2D_Ctrl21 exist.
+ * @has_proximity - detection of fingers near the sensor is supported and
+ * registers F11_2D_Ctrl22 through F11_2D_Ctrl26 exist.
+ * @has_palm_det_sensitivity - the sensor supports the palm detect sensitivity
+ * feature and register F11_2D_Ctrl27 exists.
+ * @has_two_pen_thresholds - is has_pen is also set, then F11_2D_Ctrl35 exists.
+ * @has_contact_geometry - the sensor supports the use of contact geometry to
+ * map absolute X and Y target positions and registers F11_2D_Data18
+ * through F11_2D_Data27 exist.
+ *
+ * Touch shape info (query 10) is present if has_touch_shapes is set.
+ *
+ * @nr_touch_shapes - the total number of touch shapes supported.
+ *
+ * Query 11 is present if the has_query11 bit is set in query 0.
+ *
+ * @has_z_tuning - if set, the sensor supports Z tuning and registers
+ * F11_2D_Ctrl29 through F11_2D_Ctrl33 exist.
+ * @has_algorithm_selection - controls choice of noise suppression algorithm
+ * @has_w_tuning - the sensor supports Wx and Wy scaling and registers
+ * F11_2D_Ctrl36 through F11_2D_Ctrl39 exist.
+ * @has_pitch_info - the X and Y pitches of the sensor electrodes can be
+ * configured and registers F11_2D_Ctrl40 and F11_2D_Ctrl41 exist.
+ * @has_finger_size - the default finger width settings for the
+ * sensor can be configured and registers F11_2D_Ctrl42 through F11_2D_Ctrl44
+ * exist.
+ * @has_segmentation_aggressiveness - the sensor’s ability to distinguish
+ * multiple objects close together can be configured and register F11_2D_Ctrl45
+ * exists.
+ * @has_XY_clip - the inactive outside borders of the sensor can be
+ * configured and registers F11_2D_Ctrl46 through F11_2D_Ctrl49 exist.
+ * @has_drumming_filter - the sensor can be configured to distinguish
+ * between a fast flick and a quick drumming movement and registers
+ * F11_2D_Ctrl50 and F11_2D_Ctrl51 exist.
+ *
+ * Query 12 is present if hasQuery12 bit is set.
+ *
+ * @has_gapless_finger - control registers relating to gapless finger are
+ * present.
+ * @has_gapless_finger_tuning - additional control and data registers relating
+ * to gapless finger are present.
+ * @has_8bit_w - larger W value reporting is supported.
+ * @has_adjustable_mapping - TBD
+ * @has_info2 - the general info query14 is present
+ * @has_physical_props - additional queries describing the physical properties
+ * of the sensor are present.
+ * @has_finger_limit - indicates that F11 Ctrl 80 exists.
+ * @has_linear_coeff - indicates that F11 Ctrl 81 exists.
+ *
+ * Query 13 is present if Query 5's has_jitter_filter bit is set.
+ * @jitter_window_size - used by Design Studio 4.
+ * @jitter_filter_type - used by Design Studio 4.
+ *
+ * Query 14 is present if query 12's has_general_info2 flag is set.
+ *
+ * @light_control - Indicates what light/led control features are present, if
+ * any.
+ * @is_clear - if set, this is a clear sensor (indicating direct pointing
+ * application), otherwise it's opaque (indicating indirect pointing).
+ * @clickpad_props - specifies if this is a clickpad, and if so what sort of
+ * mechanism it uses
+ * @mouse_buttons - specifies the number of mouse buttons present (if any).
+ * @has_advanced_gestures - advanced driver gestures are supported.
+ */
+struct f11_2d_sensor_queries {
+ /* query1 */
+ u8 nr_fingers;
+ bool has_rel;
+ bool has_abs;
+ bool has_gestures;
+ bool has_sensitivity_adjust;
+ bool configurable;
+
+ /* query2 */
+ u8 nr_x_electrodes;
+
+ /* query3 */
+ u8 nr_y_electrodes;
+
+ /* query4 */
+ u8 max_electrodes;
+
+ /* query5 */
+ u8 abs_data_size;
+ bool has_anchored_finger;
+ bool has_adj_hyst;
+ bool has_dribble;
+ bool has_bending_correction;
+ bool has_large_object_suppression;
+ bool has_jitter_filter;
+
+ u8 f11_2d_query6;
+
+ /* query 7 */
+ bool has_single_tap;
+ bool has_tap_n_hold;
+ bool has_double_tap;
+ bool has_early_tap;
+ bool has_flick;
+ bool has_press;
+ bool has_pinch;
+ bool has_chiral;
+
+ bool query7_nonzero;
+
+ /* query 8 */
+ bool has_palm_det;
+ bool has_rotate;
+ bool has_touch_shapes;
+ bool has_scroll_zones;
+ bool has_individual_scroll_zones;
+ bool has_mf_scroll;
+ bool has_mf_edge_motion;
+ bool has_mf_scroll_inertia;
+
+ bool query8_nonzero;
+
+ /* Query 9 */
+ bool has_pen;
+ bool has_proximity;
+ bool has_palm_det_sensitivity;
+ bool has_suppress_on_palm_detect;
+ bool has_two_pen_thresholds;
+ bool has_contact_geometry;
+ bool has_pen_hover_discrimination;
+ bool has_pen_filters;
+
+ /* Query 10 */
+ u8 nr_touch_shapes;
+
+ /* Query 11. */
+ bool has_z_tuning;
+ bool has_algorithm_selection;
+ bool has_w_tuning;
+ bool has_pitch_info;
+ bool has_finger_size;
+ bool has_segmentation_aggressiveness;
+ bool has_XY_clip;
+ bool has_drumming_filter;
+
+ /* Query 12 */
+ bool has_gapless_finger;
+ bool has_gapless_finger_tuning;
+ bool has_8bit_w;
+ bool has_adjustable_mapping;
+ bool has_info2;
+ bool has_physical_props;
+ bool has_finger_limit;
+ bool has_linear_coeff_2;
+
+ /* Query 13 */
+ u8 jitter_window_size;
+ u8 jitter_filter_type;
+
+ /* Query 14 */
+ u8 light_control;
+ bool is_clear;
+ u8 clickpad_props;
+ u8 mouse_buttons;
+ bool has_advanced_gestures;
+
+ /* Query 15 - 18 */
+ u16 x_sensor_size_mm;
+ u16 y_sensor_size_mm;
+};
+
+/* Defs for Ctrl0. */
+#define RMI_F11_REPORT_MODE_MASK 0x07
+#define RMI_F11_ABS_POS_FILT (1 << 3)
+#define RMI_F11_REL_POS_FILT (1 << 4)
+#define RMI_F11_REL_BALLISTICS (1 << 5)
+#define RMI_F11_DRIBBLE (1 << 6)
+#define RMI_F11_REPORT_BEYOND_CLIP (1 << 7)
+
+/* Defs for Ctrl1. */
+#define RMI_F11_PALM_DETECT_THRESH_MASK 0x0F
+#define RMI_F11_MOTION_SENSITIVITY_MASK 0x30
+#define RMI_F11_MANUAL_TRACKING (1 << 6)
+#define RMI_F11_MANUAL_TRACKED_FINGER (1 << 7)
+
+#define RMI_F11_DELTA_X_THRESHOLD 2
+#define RMI_F11_DELTA_Y_THRESHOLD 3
+
+#define RMI_F11_CTRL_REG_COUNT 12
+
+struct f11_2d_ctrl {
+ u8 ctrl0_11[RMI_F11_CTRL_REG_COUNT];
+ u16 ctrl0_11_address;
+};
+
+#define RMI_F11_ABS_BYTES 5
+#define RMI_F11_REL_BYTES 2
+
+/* Defs for Data 8 */
+
+#define RMI_F11_SINGLE_TAP (1 << 0)
+#define RMI_F11_TAP_AND_HOLD (1 << 1)
+#define RMI_F11_DOUBLE_TAP (1 << 2)
+#define RMI_F11_EARLY_TAP (1 << 3)
+#define RMI_F11_FLICK (1 << 4)
+#define RMI_F11_PRESS (1 << 5)
+#define RMI_F11_PINCH (1 << 6)
+
+/* Defs for Data 9 */
+
+#define RMI_F11_PALM_DETECT (1 << 0)
+#define RMI_F11_ROTATE (1 << 1)
+#define RMI_F11_SHAPE (1 << 2)
+#define RMI_F11_SCROLLZONE (1 << 3)
+#define RMI_F11_GESTURE_FINGER_COUNT_MASK 0x70
+
+/** Handy pointers into our data buffer.
+ *
+ * @f_state - start of finger state registers.
+ * @abs_pos - start of absolute position registers (if present).
+ * @rel_pos - start of relative data registers (if present).
+ * @gest_1 - gesture flags (if present).
+ * @gest_2 - gesture flags & finger count (if present).
+ * @pinch - pinch motion register (if present).
+ * @flick - flick distance X & Y, flick time (if present).
+ * @rotate - rotate motion and finger separation.
+ * @multi_scroll - chiral deltas for X and Y (if present).
+ * @scroll_zones - scroll deltas for 4 regions (if present).
+ */
+struct f11_2d_data {
+ u8 *f_state;
+ u8 *abs_pos;
+ s8 *rel_pos;
+ u8 *gest_1;
+ u8 *gest_2;
+ s8 *pinch;
+ u8 *flick;
+ u8 *rotate;
+ u8 *shapes;
+ s8 *multi_scroll;
+ s8 *scroll_zones;
+};
+
+/** Data pertaining to F11 in general. For per-sensor data, see struct
+ * f11_2d_sensor.
+ *
+ * @dev_query - F11 device specific query registers.
+ * @dev_controls - F11 device specific control registers.
+ * @dev_controls_mutex - lock for the control registers.
+ * @rezero_wait_ms - if nonzero, upon resume we will wait this many
+ * milliseconds before rezeroing the sensor(s). This is useful in systems with
+ * poor electrical behavior on resume, where the initial calibration of the
+ * sensor(s) coming out of sleep state may be bogus.
+ * @sensors - per sensor data structures.
+ */
+struct f11_data {
+ bool has_query9;
+ bool has_query11;
+ bool has_query12;
+ bool has_query27;
+ bool has_query28;
+ bool has_acm;
+ struct f11_2d_ctrl dev_controls;
+ struct mutex dev_controls_mutex;
+ u16 rezero_wait_ms;
+ struct rmi_2d_sensor sensor;
+ struct f11_2d_sensor_queries sens_query;
+ struct f11_2d_data data;
+ struct rmi_2d_sensor_platform_data sensor_pdata;
+ unsigned long *abs_mask;
+ unsigned long *rel_mask;
+ unsigned long *result_bits;
+};
+
+enum f11_finger_state {
+ F11_NO_FINGER = 0x00,
+ F11_PRESENT = 0x01,
+ F11_INACCURATE = 0x02,
+ F11_RESERVED = 0x03
+};
+
+static void rmi_f11_rel_pos_report(struct f11_data *f11, u8 n_finger)
+{
+ struct rmi_2d_sensor *sensor = &f11->sensor;
+ struct f11_2d_data *data = &f11->data;
+ s8 x, y;
+
+ x = data->rel_pos[n_finger * 2];
+ y = data->rel_pos[n_finger * 2 + 1];
+
+ rmi_2d_sensor_rel_report(sensor, x, y);
+}
+
+static void rmi_f11_abs_pos_process(struct f11_data *f11,
+ struct rmi_2d_sensor *sensor,
+ struct rmi_2d_sensor_abs_object *obj,
+ enum f11_finger_state finger_state,
+ u8 n_finger)
+{
+ struct f11_2d_data *data = &f11->data;
+ u8 *pos_data = &data->abs_pos[n_finger * RMI_F11_ABS_BYTES];
+ int tool_type = MT_TOOL_FINGER;
+
+ switch (finger_state) {
+ case F11_PRESENT:
+ obj->type = RMI_2D_OBJECT_FINGER;
+ break;
+ default:
+ obj->type = RMI_2D_OBJECT_NONE;
+ }
+
+ obj->mt_tool = tool_type;
+ obj->x = (pos_data[0] << 4) | (pos_data[2] & 0x0F);
+ obj->y = (pos_data[1] << 4) | (pos_data[2] >> 4);
+ obj->z = pos_data[4];
+ obj->wx = pos_data[3] & 0x0f;
+ obj->wy = pos_data[3] >> 4;
+
+ rmi_2d_sensor_abs_process(sensor, obj, n_finger);
+}
+
+static inline u8 rmi_f11_parse_finger_state(const u8 *f_state, u8 n_finger)
+{
+ return (f_state[n_finger / 4] >> (2 * (n_finger % 4))) &
+ FINGER_STATE_MASK;
+}
+
+static void rmi_f11_finger_handler(struct f11_data *f11,
+ struct rmi_2d_sensor *sensor,
+ unsigned long *irq_bits, int num_irq_regs)
+{
+ const u8 *f_state = f11->data.f_state;
+ u8 finger_state;
+ u8 i;
+
+ int abs_bits = bitmap_and(f11->result_bits, irq_bits, f11->abs_mask,
+ num_irq_regs * 8);
+ int rel_bits = bitmap_and(f11->result_bits, irq_bits, f11->rel_mask,
+ num_irq_regs * 8);
+
+ for (i = 0; i < sensor->nbr_fingers; i++) {
+ /* Possible of having 4 fingers per f_statet register */
+ finger_state = rmi_f11_parse_finger_state(f_state, i);
+ if (finger_state == F11_RESERVED) {
+ pr_err("Invalid finger state[%d]: 0x%02x", i,
+ finger_state);
+ continue;
+ }
+
+ if (abs_bits)
+ rmi_f11_abs_pos_process(f11, sensor, &sensor->objs[i],
+ finger_state, i);
+
+ if (rel_bits)
+ rmi_f11_rel_pos_report(f11, i);
+ }
+
+ if (abs_bits) {
+ /*
+ * the absolute part is made in 2 parts to allow the kernel
+ * tracking to take place.
+ */
+ if (sensor->kernel_tracking)
+ input_mt_assign_slots(sensor->input,
+ sensor->tracking_slots,
+ sensor->tracking_pos,
+ sensor->nbr_fingers,
+ sensor->dmax);
+
+ for (i = 0; i < sensor->nbr_fingers; i++) {
+ finger_state = rmi_f11_parse_finger_state(f_state, i);
+ if (finger_state == F11_RESERVED)
+ /* no need to send twice the error */
+ continue;
+
+ rmi_2d_sensor_abs_report(sensor, &sensor->objs[i], i);
+ }
+
+ input_mt_sync_frame(sensor->input);
+ }
+}
+
+static int f11_2d_construct_data(struct f11_data *f11)
+{
+ struct rmi_2d_sensor *sensor = &f11->sensor;
+ struct f11_2d_sensor_queries *query = &f11->sens_query;
+ struct f11_2d_data *data = &f11->data;
+ int i;
+
+ sensor->nbr_fingers = (query->nr_fingers == 5 ? 10 :
+ query->nr_fingers + 1);
+
+ sensor->pkt_size = DIV_ROUND_UP(sensor->nbr_fingers, 4);
+
+ if (query->has_abs) {
+ sensor->pkt_size += (sensor->nbr_fingers * 5);
+ sensor->attn_size = sensor->pkt_size;
+ }
+
+ if (query->has_rel)
+ sensor->pkt_size += (sensor->nbr_fingers * 2);
+
+ /* Check if F11_2D_Query7 is non-zero */
+ if (query->query7_nonzero)
+ sensor->pkt_size += sizeof(u8);
+
+ /* Check if F11_2D_Query7 or F11_2D_Query8 is non-zero */
+ if (query->query7_nonzero || query->query8_nonzero)
+ sensor->pkt_size += sizeof(u8);
+
+ if (query->has_pinch || query->has_flick || query->has_rotate) {
+ sensor->pkt_size += 3;
+ if (!query->has_flick)
+ sensor->pkt_size--;
+ if (!query->has_rotate)
+ sensor->pkt_size--;
+ }
+
+ if (query->has_touch_shapes)
+ sensor->pkt_size +=
+ DIV_ROUND_UP(query->nr_touch_shapes + 1, 8);
+
+ sensor->data_pkt = devm_kzalloc(&sensor->fn->dev, sensor->pkt_size,
+ GFP_KERNEL);
+ if (!sensor->data_pkt)
+ return -ENOMEM;
+
+ data->f_state = sensor->data_pkt;
+ i = DIV_ROUND_UP(sensor->nbr_fingers, 4);
+
+ if (query->has_abs) {
+ data->abs_pos = &sensor->data_pkt[i];
+ i += (sensor->nbr_fingers * RMI_F11_ABS_BYTES);
+ }
+
+ if (query->has_rel) {
+ data->rel_pos = &sensor->data_pkt[i];
+ i += (sensor->nbr_fingers * RMI_F11_REL_BYTES);
+ }
+
+ if (query->query7_nonzero) {
+ data->gest_1 = &sensor->data_pkt[i];
+ i++;
+ }
+
+ if (query->query7_nonzero || query->query8_nonzero) {
+ data->gest_2 = &sensor->data_pkt[i];
+ i++;
+ }
+
+ if (query->has_pinch) {
+ data->pinch = &sensor->data_pkt[i];
+ i++;
+ }
+
+ if (query->has_flick) {
+ if (query->has_pinch) {
+ data->flick = data->pinch;
+ i += 2;
+ } else {
+ data->flick = &sensor->data_pkt[i];
+ i += 3;
+ }
+ }
+
+ if (query->has_rotate) {
+ if (query->has_flick) {
+ data->rotate = data->flick + 1;
+ } else {
+ data->rotate = &sensor->data_pkt[i];
+ i += 2;
+ }
+ }
+
+ if (query->has_touch_shapes)
+ data->shapes = &sensor->data_pkt[i];
+
+ return 0;
+}
+
+static int f11_read_control_regs(struct rmi_function *fn,
+ struct f11_2d_ctrl *ctrl, u16 ctrl_base_addr) {
+ struct rmi_device *rmi_dev = fn->rmi_dev;
+ int error = 0;
+
+ ctrl->ctrl0_11_address = ctrl_base_addr;
+ error = rmi_read_block(rmi_dev, ctrl_base_addr, ctrl->ctrl0_11,
+ RMI_F11_CTRL_REG_COUNT);
+ if (error < 0) {
+ dev_err(&fn->dev, "Failed to read ctrl0, code: %d.\n", error);
+ return error;
+ }
+
+ return 0;
+}
+
+static int f11_write_control_regs(struct rmi_function *fn,
+ struct f11_2d_sensor_queries *query,
+ struct f11_2d_ctrl *ctrl,
+ u16 ctrl_base_addr)
+{
+ struct rmi_device *rmi_dev = fn->rmi_dev;
+ int error;
+
+ error = rmi_write_block(rmi_dev, ctrl_base_addr, ctrl->ctrl0_11,
+ RMI_F11_CTRL_REG_COUNT);
+ if (error < 0)
+ return error;
+
+ return 0;
+}
+
+static int rmi_f11_get_query_parameters(struct rmi_device *rmi_dev,
+ struct f11_data *f11,
+ struct f11_2d_sensor_queries *sensor_query,
+ u16 query_base_addr)
+{
+ int query_size;
+ int rc;
+ u8 query_buf[RMI_F11_QUERY_SIZE];
+ bool has_query36 = false;
+
+ rc = rmi_read_block(rmi_dev, query_base_addr, query_buf,
+ RMI_F11_QUERY_SIZE);
+ if (rc < 0)
+ return rc;
+
+ sensor_query->nr_fingers = query_buf[0] & RMI_F11_NR_FINGERS_MASK;
+ sensor_query->has_rel = !!(query_buf[0] & RMI_F11_HAS_REL);
+ sensor_query->has_abs = !!(query_buf[0] & RMI_F11_HAS_ABS);
+ sensor_query->has_gestures = !!(query_buf[0] & RMI_F11_HAS_GESTURES);
+ sensor_query->has_sensitivity_adjust =
+ !!(query_buf[0] & RMI_F11_HAS_SENSITIVITY_ADJ);
+ sensor_query->configurable = !!(query_buf[0] & RMI_F11_CONFIGURABLE);
+
+ sensor_query->nr_x_electrodes =
+ query_buf[1] & RMI_F11_NR_ELECTRODES_MASK;
+ sensor_query->nr_y_electrodes =
+ query_buf[2] & RMI_F11_NR_ELECTRODES_MASK;
+ sensor_query->max_electrodes =
+ query_buf[3] & RMI_F11_NR_ELECTRODES_MASK;
+
+ query_size = RMI_F11_QUERY_SIZE;
+
+ if (sensor_query->has_abs) {
+ rc = rmi_read(rmi_dev, query_base_addr + query_size, query_buf);
+ if (rc < 0)
+ return rc;
+
+ sensor_query->abs_data_size =
+ query_buf[0] & RMI_F11_ABS_DATA_SIZE_MASK;
+ sensor_query->has_anchored_finger =
+ !!(query_buf[0] & RMI_F11_HAS_ANCHORED_FINGER);
+ sensor_query->has_adj_hyst =
+ !!(query_buf[0] & RMI_F11_HAS_ADJ_HYST);
+ sensor_query->has_dribble =
+ !!(query_buf[0] & RMI_F11_HAS_DRIBBLE);
+ sensor_query->has_bending_correction =
+ !!(query_buf[0] & RMI_F11_HAS_BENDING_CORRECTION);
+ sensor_query->has_large_object_suppression =
+ !!(query_buf[0] & RMI_F11_HAS_LARGE_OBJECT_SUPPRESSION);
+ sensor_query->has_jitter_filter =
+ !!(query_buf[0] & RMI_F11_HAS_JITTER_FILTER);
+ query_size++;
+ }
+
+ if (sensor_query->has_rel) {
+ rc = rmi_read(rmi_dev, query_base_addr + query_size,
+ &sensor_query->f11_2d_query6);
+ if (rc < 0)
+ return rc;
+ query_size++;
+ }
+
+ if (sensor_query->has_gestures) {
+ rc = rmi_read_block(rmi_dev, query_base_addr + query_size,
+ query_buf, RMI_F11_QUERY_GESTURE_SIZE);
+ if (rc < 0)
+ return rc;
+
+ sensor_query->has_single_tap =
+ !!(query_buf[0] & RMI_F11_HAS_SINGLE_TAP);
+ sensor_query->has_tap_n_hold =
+ !!(query_buf[0] & RMI_F11_HAS_TAP_AND_HOLD);
+ sensor_query->has_double_tap =
+ !!(query_buf[0] & RMI_F11_HAS_DOUBLE_TAP);
+ sensor_query->has_early_tap =
+ !!(query_buf[0] & RMI_F11_HAS_EARLY_TAP);
+ sensor_query->has_flick =
+ !!(query_buf[0] & RMI_F11_HAS_FLICK);
+ sensor_query->has_press =
+ !!(query_buf[0] & RMI_F11_HAS_PRESS);
+ sensor_query->has_pinch =
+ !!(query_buf[0] & RMI_F11_HAS_PINCH);
+ sensor_query->has_chiral =
+ !!(query_buf[0] & RMI_F11_HAS_CHIRAL);
+
+ /* query 8 */
+ sensor_query->has_palm_det =
+ !!(query_buf[1] & RMI_F11_HAS_PALM_DET);
+ sensor_query->has_rotate =
+ !!(query_buf[1] & RMI_F11_HAS_ROTATE);
+ sensor_query->has_touch_shapes =
+ !!(query_buf[1] & RMI_F11_HAS_TOUCH_SHAPES);
+ sensor_query->has_scroll_zones =
+ !!(query_buf[1] & RMI_F11_HAS_SCROLL_ZONES);
+ sensor_query->has_individual_scroll_zones =
+ !!(query_buf[1] & RMI_F11_HAS_INDIVIDUAL_SCROLL_ZONES);
+ sensor_query->has_mf_scroll =
+ !!(query_buf[1] & RMI_F11_HAS_MF_SCROLL);
+ sensor_query->has_mf_edge_motion =
+ !!(query_buf[1] & RMI_F11_HAS_MF_EDGE_MOTION);
+ sensor_query->has_mf_scroll_inertia =
+ !!(query_buf[1] & RMI_F11_HAS_MF_SCROLL_INERTIA);
+
+ sensor_query->query7_nonzero = !!(query_buf[0]);
+ sensor_query->query8_nonzero = !!(query_buf[1]);
+
+ query_size += 2;
+ }
+
+ if (f11->has_query9) {
+ rc = rmi_read(rmi_dev, query_base_addr + query_size, query_buf);
+ if (rc < 0)
+ return rc;
+
+ sensor_query->has_pen =
+ !!(query_buf[0] & RMI_F11_HAS_PEN);
+ sensor_query->has_proximity =
+ !!(query_buf[0] & RMI_F11_HAS_PROXIMITY);
+ sensor_query->has_palm_det_sensitivity =
+ !!(query_buf[0] & RMI_F11_HAS_PALM_DET_SENSITIVITY);
+ sensor_query->has_suppress_on_palm_detect =
+ !!(query_buf[0] & RMI_F11_HAS_SUPPRESS_ON_PALM_DETECT);
+ sensor_query->has_two_pen_thresholds =
+ !!(query_buf[0] & RMI_F11_HAS_TWO_PEN_THRESHOLDS);
+ sensor_query->has_contact_geometry =
+ !!(query_buf[0] & RMI_F11_HAS_CONTACT_GEOMETRY);
+ sensor_query->has_pen_hover_discrimination =
+ !!(query_buf[0] & RMI_F11_HAS_PEN_HOVER_DISCRIMINATION);
+ sensor_query->has_pen_filters =
+ !!(query_buf[0] & RMI_F11_HAS_PEN_FILTERS);
+
+ query_size++;
+ }
+
+ if (sensor_query->has_touch_shapes) {
+ rc = rmi_read(rmi_dev, query_base_addr + query_size, query_buf);
+ if (rc < 0)
+ return rc;
+
+ sensor_query->nr_touch_shapes = query_buf[0] &
+ RMI_F11_NR_TOUCH_SHAPES_MASK;
+
+ query_size++;
+ }
+
+ if (f11->has_query11) {
+ rc = rmi_read(rmi_dev, query_base_addr + query_size, query_buf);
+ if (rc < 0)
+ return rc;
+
+ sensor_query->has_z_tuning =
+ !!(query_buf[0] & RMI_F11_HAS_Z_TUNING);
+ sensor_query->has_algorithm_selection =
+ !!(query_buf[0] & RMI_F11_HAS_ALGORITHM_SELECTION);
+ sensor_query->has_w_tuning =
+ !!(query_buf[0] & RMI_F11_HAS_W_TUNING);
+ sensor_query->has_pitch_info =
+ !!(query_buf[0] & RMI_F11_HAS_PITCH_INFO);
+ sensor_query->has_finger_size =
+ !!(query_buf[0] & RMI_F11_HAS_FINGER_SIZE);
+ sensor_query->has_segmentation_aggressiveness =
+ !!(query_buf[0] &
+ RMI_F11_HAS_SEGMENTATION_AGGRESSIVENESS);
+ sensor_query->has_XY_clip =
+ !!(query_buf[0] & RMI_F11_HAS_XY_CLIP);
+ sensor_query->has_drumming_filter =
+ !!(query_buf[0] & RMI_F11_HAS_DRUMMING_FILTER);
+
+ query_size++;
+ }
+
+ if (f11->has_query12) {
+ rc = rmi_read(rmi_dev, query_base_addr + query_size, query_buf);
+ if (rc < 0)
+ return rc;
+
+ sensor_query->has_gapless_finger =
+ !!(query_buf[0] & RMI_F11_HAS_GAPLESS_FINGER);
+ sensor_query->has_gapless_finger_tuning =
+ !!(query_buf[0] & RMI_F11_HAS_GAPLESS_FINGER_TUNING);
+ sensor_query->has_8bit_w =
+ !!(query_buf[0] & RMI_F11_HAS_8BIT_W);
+ sensor_query->has_adjustable_mapping =
+ !!(query_buf[0] & RMI_F11_HAS_ADJUSTABLE_MAPPING);
+ sensor_query->has_info2 =
+ !!(query_buf[0] & RMI_F11_HAS_INFO2);
+ sensor_query->has_physical_props =
+ !!(query_buf[0] & RMI_F11_HAS_PHYSICAL_PROPS);
+ sensor_query->has_finger_limit =
+ !!(query_buf[0] & RMI_F11_HAS_FINGER_LIMIT);
+ sensor_query->has_linear_coeff_2 =
+ !!(query_buf[0] & RMI_F11_HAS_LINEAR_COEFF);
+
+ query_size++;
+ }
+
+ if (sensor_query->has_jitter_filter) {
+ rc = rmi_read(rmi_dev, query_base_addr + query_size, query_buf);
+ if (rc < 0)
+ return rc;
+
+ sensor_query->jitter_window_size = query_buf[0] &
+ RMI_F11_JITTER_WINDOW_MASK;
+ sensor_query->jitter_filter_type = (query_buf[0] &
+ RMI_F11_JITTER_FILTER_MASK) >>
+ RMI_F11_JITTER_FILTER_SHIFT;
+
+ query_size++;
+ }
+
+ if (sensor_query->has_info2) {
+ rc = rmi_read(rmi_dev, query_base_addr + query_size, query_buf);
+ if (rc < 0)
+ return rc;
+
+ sensor_query->light_control =
+ query_buf[0] & RMI_F11_LIGHT_CONTROL_MASK;
+ sensor_query->is_clear =
+ !!(query_buf[0] & RMI_F11_IS_CLEAR);
+ sensor_query->clickpad_props =
+ (query_buf[0] & RMI_F11_CLICKPAD_PROPS_MASK) >>
+ RMI_F11_CLICKPAD_PROPS_SHIFT;
+ sensor_query->mouse_buttons =
+ (query_buf[0] & RMI_F11_MOUSE_BUTTONS_MASK) >>
+ RMI_F11_MOUSE_BUTTONS_SHIFT;
+ sensor_query->has_advanced_gestures =
+ !!(query_buf[0] & RMI_F11_HAS_ADVANCED_GESTURES);
+
+ query_size++;
+ }
+
+ if (sensor_query->has_physical_props) {
+ rc = rmi_read_block(rmi_dev, query_base_addr
+ + query_size, query_buf, 4);
+ if (rc < 0)
+ return rc;
+
+ sensor_query->x_sensor_size_mm =
+ (query_buf[0] | (query_buf[1] << 8)) / 10;
+ sensor_query->y_sensor_size_mm =
+ (query_buf[2] | (query_buf[3] << 8)) / 10;
+
+ /*
+ * query 15 - 18 contain the size of the sensor
+ * and query 19 - 26 contain bezel dimensions
+ */
+ query_size += 12;
+ }
+
+ if (f11->has_query27)
+ ++query_size;
+
+ if (f11->has_query28) {
+ rc = rmi_read(rmi_dev, query_base_addr + query_size,
+ query_buf);
+ if (rc < 0)
+ return rc;
+
+ has_query36 = !!(query_buf[0] & BIT(6));
+ }
+
+ if (has_query36) {
+ query_size += 2;
+ rc = rmi_read(rmi_dev, query_base_addr + query_size,
+ query_buf);
+ if (rc < 0)
+ return rc;
+
+ if (!!(query_buf[0] & BIT(5)))
+ f11->has_acm = true;
+ }
+
+ return query_size;
+}
+
+static int rmi_f11_initialize(struct rmi_function *fn)
+{
+ struct rmi_device *rmi_dev = fn->rmi_dev;
+ struct f11_data *f11;
+ struct f11_2d_ctrl *ctrl;
+ u8 query_offset;
+ u16 query_base_addr;
+ u16 control_base_addr;
+ u16 max_x_pos, max_y_pos;
+ int rc;
+ const struct rmi_device_platform_data *pdata =
+ rmi_get_platform_data(rmi_dev);
+ struct rmi_driver_data *drvdata = dev_get_drvdata(&rmi_dev->dev);
+ struct rmi_2d_sensor *sensor;
+ u8 buf;
+ int mask_size;
+
+ rmi_dbg(RMI_DEBUG_FN, &fn->dev, "Initializing F11 values.\n");
+
+ mask_size = BITS_TO_LONGS(drvdata->irq_count) * sizeof(unsigned long);
+
+ /*
+ ** init instance data, fill in values and create any sysfs files
+ */
+ f11 = devm_kzalloc(&fn->dev, sizeof(struct f11_data) + mask_size * 3,
+ GFP_KERNEL);
+ if (!f11)
+ return -ENOMEM;
+
+ if (fn->dev.of_node) {
+ rc = rmi_2d_sensor_of_probe(&fn->dev, &f11->sensor_pdata);
+ if (rc)
+ return rc;
+ } else if (pdata->sensor_pdata) {
+ f11->sensor_pdata = *pdata->sensor_pdata;
+ }
+
+ f11->rezero_wait_ms = f11->sensor_pdata.rezero_wait;
+
+ f11->abs_mask = (unsigned long *)((char *)f11
+ + sizeof(struct f11_data));
+ f11->rel_mask = (unsigned long *)((char *)f11
+ + sizeof(struct f11_data) + mask_size);
+ f11->result_bits = (unsigned long *)((char *)f11
+ + sizeof(struct f11_data) + mask_size * 2);
+
+ set_bit(fn->irq_pos, f11->abs_mask);
+ set_bit(fn->irq_pos + 1, f11->rel_mask);
+
+ query_base_addr = fn->fd.query_base_addr;
+ control_base_addr = fn->fd.control_base_addr;
+
+ rc = rmi_read(rmi_dev, query_base_addr, &buf);
+ if (rc < 0)
+ return rc;
+
+ f11->has_query9 = !!(buf & RMI_F11_HAS_QUERY9);
+ f11->has_query11 = !!(buf & RMI_F11_HAS_QUERY11);
+ f11->has_query12 = !!(buf & RMI_F11_HAS_QUERY12);
+ f11->has_query27 = !!(buf & RMI_F11_HAS_QUERY27);
+ f11->has_query28 = !!(buf & RMI_F11_HAS_QUERY28);
+
+ query_offset = (query_base_addr + 1);
+ sensor = &f11->sensor;
+ sensor->fn = fn;
+
+ rc = rmi_f11_get_query_parameters(rmi_dev, f11,
+ &f11->sens_query, query_offset);
+ if (rc < 0)
+ return rc;
+ query_offset += rc;
+
+ rc = f11_read_control_regs(fn, &f11->dev_controls,
+ control_base_addr);
+ if (rc < 0) {
+ dev_err(&fn->dev,
+ "Failed to read F11 control params.\n");
+ return rc;
+ }
+
+ if (f11->sens_query.has_info2) {
+ if (f11->sens_query.is_clear)
+ f11->sensor.sensor_type = rmi_sensor_touchscreen;
+ else
+ f11->sensor.sensor_type = rmi_sensor_touchpad;
+ }
+
+ sensor->report_abs = f11->sens_query.has_abs;
+
+ sensor->axis_align =
+ f11->sensor_pdata.axis_align;
+
+ sensor->topbuttonpad = f11->sensor_pdata.topbuttonpad;
+ sensor->kernel_tracking = f11->sensor_pdata.kernel_tracking;
+ sensor->dmax = f11->sensor_pdata.dmax;
+
+ if (f11->sens_query.has_physical_props) {
+ sensor->x_mm = f11->sens_query.x_sensor_size_mm;
+ sensor->y_mm = f11->sens_query.y_sensor_size_mm;
+ } else {
+ sensor->x_mm = f11->sensor_pdata.x_mm;
+ sensor->y_mm = f11->sensor_pdata.y_mm;
+ }
+
+ if (sensor->sensor_type == rmi_sensor_default)
+ sensor->sensor_type =
+ f11->sensor_pdata.sensor_type;
+
+ sensor->report_abs = sensor->report_abs
+ && !(f11->sensor_pdata.disable_report_mask
+ & RMI_F11_DISABLE_ABS_REPORT);
+
+ if (!sensor->report_abs)
+ /*
+ * If device doesn't have abs or if it has been disables
+ * fallback to reporting rel data.
+ */
+ sensor->report_rel = f11->sens_query.has_rel;
+
+ rc = rmi_read_block(rmi_dev,
+ control_base_addr + F11_CTRL_SENSOR_MAX_X_POS_OFFSET,
+ (u8 *)&max_x_pos, sizeof(max_x_pos));
+ if (rc < 0)
+ return rc;
+
+ rc = rmi_read_block(rmi_dev,
+ control_base_addr + F11_CTRL_SENSOR_MAX_Y_POS_OFFSET,
+ (u8 *)&max_y_pos, sizeof(max_y_pos));
+ if (rc < 0)
+ return rc;
+
+ sensor->max_x = max_x_pos;
+ sensor->max_y = max_y_pos;
+
+ rc = f11_2d_construct_data(f11);
+ if (rc < 0)
+ return rc;
+
+ if (f11->has_acm)
+ f11->sensor.attn_size += f11->sensor.nbr_fingers * 2;
+
+ /* allocate the in-kernel tracking buffers */
+ sensor->tracking_pos = devm_kzalloc(&fn->dev,
+ sizeof(struct input_mt_pos) * sensor->nbr_fingers,
+ GFP_KERNEL);
+ sensor->tracking_slots = devm_kzalloc(&fn->dev,
+ sizeof(int) * sensor->nbr_fingers, GFP_KERNEL);
+ sensor->objs = devm_kzalloc(&fn->dev,
+ sizeof(struct rmi_2d_sensor_abs_object)
+ * sensor->nbr_fingers, GFP_KERNEL);
+ if (!sensor->tracking_pos || !sensor->tracking_slots || !sensor->objs)
+ return -ENOMEM;
+
+ ctrl = &f11->dev_controls;
+ if (sensor->axis_align.delta_x_threshold)
+ ctrl->ctrl0_11[RMI_F11_DELTA_X_THRESHOLD] =
+ sensor->axis_align.delta_x_threshold;
+
+ if (sensor->axis_align.delta_y_threshold)
+ ctrl->ctrl0_11[RMI_F11_DELTA_Y_THRESHOLD] =
+ sensor->axis_align.delta_y_threshold;
+
+ if (f11->sens_query.has_dribble)
+ ctrl->ctrl0_11[0] = ctrl->ctrl0_11[0] & ~BIT(6);
+
+ if (f11->sens_query.has_palm_det)
+ ctrl->ctrl0_11[11] = ctrl->ctrl0_11[11] & ~BIT(0);
+
+ rc = f11_write_control_regs(fn, &f11->sens_query,
+ &f11->dev_controls, fn->fd.query_base_addr);
+ if (rc)
+ dev_warn(&fn->dev, "Failed to write control registers\n");
+
+ mutex_init(&f11->dev_controls_mutex);
+
+ dev_set_drvdata(&fn->dev, f11);
+
+ return 0;
+}
+
+static int rmi_f11_config(struct rmi_function *fn)
+{
+ struct f11_data *f11 = dev_get_drvdata(&fn->dev);
+ struct rmi_driver *drv = fn->rmi_dev->driver;
+ struct rmi_2d_sensor *sensor = &f11->sensor;
+ int rc;
+
+ if (!sensor->report_abs)
+ drv->clear_irq_bits(fn->rmi_dev, f11->abs_mask);
+ else
+ drv->set_irq_bits(fn->rmi_dev, f11->abs_mask);
+
+ if (!sensor->report_rel)
+ drv->clear_irq_bits(fn->rmi_dev, f11->rel_mask);
+ else
+ drv->set_irq_bits(fn->rmi_dev, f11->rel_mask);
+
+ rc = f11_write_control_regs(fn, &f11->sens_query,
+ &f11->dev_controls, fn->fd.query_base_addr);
+ if (rc < 0)
+ return rc;
+
+ return 0;
+}
+
+static int rmi_f11_attention(struct rmi_function *fn, unsigned long *irq_bits)
+{
+ struct rmi_device *rmi_dev = fn->rmi_dev;
+ struct rmi_driver_data *drvdata = dev_get_drvdata(&rmi_dev->dev);
+ struct f11_data *f11 = dev_get_drvdata(&fn->dev);
+ u16 data_base_addr = fn->fd.data_base_addr;
+ u16 data_base_addr_offset = 0;
+ int error;
+
+ if (rmi_dev->xport->attn_data) {
+ memcpy(f11->sensor.data_pkt, rmi_dev->xport->attn_data,
+ f11->sensor.attn_size);
+ rmi_dev->xport->attn_data += f11->sensor.attn_size;
+ rmi_dev->xport->attn_size -= f11->sensor.attn_size;
+ } else {
+ error = rmi_read_block(rmi_dev,
+ data_base_addr + data_base_addr_offset,
+ f11->sensor.data_pkt,
+ f11->sensor.pkt_size);
+ if (error < 0)
+ return error;
+ }
+
+ rmi_f11_finger_handler(f11, &f11->sensor, irq_bits,
+ drvdata->num_of_irq_regs);
+ data_base_addr_offset += f11->sensor.pkt_size;
+
+ return 0;
+}
+
+static int rmi_f11_resume(struct rmi_function *fn)
+{
+ struct f11_data *f11 = dev_get_drvdata(&fn->dev);
+ int error;
+
+ rmi_dbg(RMI_DEBUG_FN, &fn->dev, "Resuming...\n");
+ if (!f11->rezero_wait_ms)
+ return 0;
+
+ mdelay(f11->rezero_wait_ms);
+
+ error = rmi_write(fn->rmi_dev, fn->fd.command_base_addr,
+ RMI_F11_REZERO);
+ if (error) {
+ dev_err(&fn->dev,
+ "%s: failed to issue rezero command, error = %d.",
+ __func__, error);
+ return error;
+ }
+
+ return 0;
+}
+
+static int rmi_f11_probe(struct rmi_function *fn)
+{
+ int error;
+ struct f11_data *f11;
+
+ error = rmi_f11_initialize(fn);
+ if (error)
+ return error;
+
+ f11 = dev_get_drvdata(&fn->dev);
+ error = rmi_2d_sensor_configure_input(fn, &f11->sensor);
+ if (error)
+ return error;
+
+ return 0;
+}
+
+struct rmi_function_handler rmi_f11_handler = {
+ .driver = {
+ .name = "rmi4_f11",
+ },
+ .func = 0x11,
+ .probe = rmi_f11_probe,
+ .config = rmi_f11_config,
+ .attention = rmi_f11_attention,
+ .resume = rmi_f11_resume,
+};
diff --git a/drivers/input/rmi4/rmi_f12.c b/drivers/input/rmi4/rmi_f12.c
new file mode 100644
index 000000000000..8dd3fb5e1f94
--- /dev/null
+++ b/drivers/input/rmi4/rmi_f12.c
@@ -0,0 +1,457 @@
+/*
+ * Copyright (c) 2012-2016 Synaptics Incorporated
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+#include <linux/input.h>
+#include <linux/input/mt.h>
+#include <linux/rmi.h>
+#include "rmi_driver.h"
+#include "rmi_2d_sensor.h"
+
+enum rmi_f12_object_type {
+ RMI_F12_OBJECT_NONE = 0x00,
+ RMI_F12_OBJECT_FINGER = 0x01,
+ RMI_F12_OBJECT_STYLUS = 0x02,
+ RMI_F12_OBJECT_PALM = 0x03,
+ RMI_F12_OBJECT_UNCLASSIFIED = 0x04,
+ RMI_F12_OBJECT_GLOVED_FINGER = 0x06,
+ RMI_F12_OBJECT_NARROW_OBJECT = 0x07,
+ RMI_F12_OBJECT_HAND_EDGE = 0x08,
+ RMI_F12_OBJECT_COVER = 0x0A,
+ RMI_F12_OBJECT_STYLUS_2 = 0x0B,
+ RMI_F12_OBJECT_ERASER = 0x0C,
+ RMI_F12_OBJECT_SMALL_OBJECT = 0x0D,
+};
+
+struct f12_data {
+ struct rmi_function *fn;
+ struct rmi_2d_sensor sensor;
+ struct rmi_2d_sensor_platform_data sensor_pdata;
+
+ u16 data_addr;
+
+ struct rmi_register_descriptor query_reg_desc;
+ struct rmi_register_descriptor control_reg_desc;
+ struct rmi_register_descriptor data_reg_desc;
+
+ /* F12 Data1 describes sensed objects */
+ const struct rmi_register_desc_item *data1;
+ u16 data1_offset;
+
+ /* F12 Data5 describes finger ACM */
+ const struct rmi_register_desc_item *data5;
+ u16 data5_offset;
+
+ /* F12 Data5 describes Pen */
+ const struct rmi_register_desc_item *data6;
+ u16 data6_offset;
+
+
+ /* F12 Data9 reports relative data */
+ const struct rmi_register_desc_item *data9;
+ u16 data9_offset;
+
+ const struct rmi_register_desc_item *data15;
+ u16 data15_offset;
+};
+
+static int rmi_f12_read_sensor_tuning(struct f12_data *f12)
+{
+ const struct rmi_register_desc_item *item;
+ struct rmi_2d_sensor *sensor = &f12->sensor;
+ struct rmi_function *fn = sensor->fn;
+ struct rmi_device *rmi_dev = fn->rmi_dev;
+ int ret;
+ int offset;
+ u8 buf[14];
+ int pitch_x = 0;
+ int pitch_y = 0;
+ int clip_x_low = 0;
+ int clip_x_high = 0;
+ int clip_y_low = 0;
+ int clip_y_high = 0;
+ int rx_receivers = 0;
+ int tx_receivers = 0;
+ int sensor_flags = 0;
+
+ item = rmi_get_register_desc_item(&f12->control_reg_desc, 8);
+ if (!item) {
+ dev_err(&fn->dev,
+ "F12 does not have the sensor tuning control register\n");
+ return -ENODEV;
+ }
+
+ offset = rmi_register_desc_calc_reg_offset(&f12->control_reg_desc, 8);
+
+ if (item->reg_size > 14) {
+ dev_err(&fn->dev, "F12 control8 should be 14 bytes, not: %ld\n",
+ item->reg_size);
+ return -ENODEV;
+ }
+
+ ret = rmi_read_block(rmi_dev, fn->fd.control_base_addr + offset, buf,
+ item->reg_size);
+ if (ret)
+ return ret;
+
+ offset = 0;
+ if (rmi_register_desc_has_subpacket(item, 0)) {
+ sensor->max_x = (buf[offset + 1] << 8) | buf[offset];
+ sensor->max_y = (buf[offset + 3] << 8) | buf[offset + 2];
+ offset += 4;
+ }
+
+ rmi_dbg(RMI_DEBUG_FN, &fn->dev, "%s: max_x: %d max_y: %d\n", __func__,
+ sensor->max_x, sensor->max_y);
+
+ if (rmi_register_desc_has_subpacket(item, 1)) {
+ pitch_x = (buf[offset + 1] << 8) | buf[offset];
+ pitch_y = (buf[offset + 3] << 8) | buf[offset + 2];
+ offset += 4;
+ }
+
+ if (rmi_register_desc_has_subpacket(item, 2)) {
+ sensor->axis_align.clip_x_low = buf[offset];
+ sensor->axis_align.clip_x_high = sensor->max_x
+ - buf[offset + 1];
+ sensor->axis_align.clip_y_low = buf[offset + 2];
+ sensor->axis_align.clip_y_high = sensor->max_y
+ - buf[offset + 3];
+ offset += 4;
+ }
+
+ rmi_dbg(RMI_DEBUG_FN, &fn->dev, "%s: x low: %d x high: %d y low: %d y high: %d\n",
+ __func__, clip_x_low, clip_x_high, clip_y_low, clip_y_high);
+
+ if (rmi_register_desc_has_subpacket(item, 3)) {
+ rx_receivers = buf[offset];
+ tx_receivers = buf[offset + 1];
+ offset += 2;
+ }
+
+ if (rmi_register_desc_has_subpacket(item, 4)) {
+ sensor_flags = buf[offset];
+ offset += 1;
+ }
+
+ sensor->x_mm = (pitch_x * rx_receivers) >> 12;
+ sensor->y_mm = (pitch_y * tx_receivers) >> 12;
+
+ rmi_dbg(RMI_DEBUG_FN, &fn->dev, "%s: x_mm: %d y_mm: %d\n", __func__,
+ sensor->x_mm, sensor->y_mm);
+
+ return 0;
+}
+
+static void rmi_f12_process_objects(struct f12_data *f12, u8 *data1)
+{
+ int i;
+ struct rmi_2d_sensor *sensor = &f12->sensor;
+
+ for (i = 0; i < f12->data1->num_subpackets; i++) {
+ struct rmi_2d_sensor_abs_object *obj = &sensor->objs[i];
+
+ obj->type = RMI_2D_OBJECT_NONE;
+ obj->mt_tool = MT_TOOL_FINGER;
+
+ switch (data1[0]) {
+ case RMI_F12_OBJECT_FINGER:
+ obj->type = RMI_2D_OBJECT_FINGER;
+ break;
+ case RMI_F12_OBJECT_STYLUS:
+ obj->type = RMI_2D_OBJECT_STYLUS;
+ obj->mt_tool = MT_TOOL_PEN;
+ break;
+ case RMI_F12_OBJECT_PALM:
+ obj->type = RMI_2D_OBJECT_PALM;
+ obj->mt_tool = MT_TOOL_PALM;
+ break;
+ case RMI_F12_OBJECT_UNCLASSIFIED:
+ obj->type = RMI_2D_OBJECT_UNCLASSIFIED;
+ break;
+ }
+
+ obj->x = (data1[2] << 8) | data1[1];
+ obj->y = (data1[4] << 8) | data1[3];
+ obj->z = data1[5];
+ obj->wx = data1[6];
+ obj->wy = data1[7];
+
+ rmi_2d_sensor_abs_process(sensor, obj, i);
+
+ data1 += 8;
+ }
+
+ if (sensor->kernel_tracking)
+ input_mt_assign_slots(sensor->input,
+ sensor->tracking_slots,
+ sensor->tracking_pos,
+ sensor->nbr_fingers,
+ sensor->dmax);
+
+ for (i = 0; i < sensor->nbr_fingers; i++)
+ rmi_2d_sensor_abs_report(sensor, &sensor->objs[i], i);
+}
+
+static int rmi_f12_attention(struct rmi_function *fn,
+ unsigned long *irq_nr_regs)
+{
+ int retval;
+ struct rmi_device *rmi_dev = fn->rmi_dev;
+ struct f12_data *f12 = dev_get_drvdata(&fn->dev);
+ struct rmi_2d_sensor *sensor = &f12->sensor;
+
+ if (rmi_dev->xport->attn_data) {
+ memcpy(sensor->data_pkt, rmi_dev->xport->attn_data,
+ sensor->attn_size);
+ rmi_dev->xport->attn_data += sensor->attn_size;
+ rmi_dev->xport->attn_size -= sensor->attn_size;
+ } else {
+ retval = rmi_read_block(rmi_dev, f12->data_addr,
+ sensor->data_pkt, sensor->pkt_size);
+ if (retval < 0) {
+ dev_err(&fn->dev, "Failed to read object data. Code: %d.\n",
+ retval);
+ return retval;
+ }
+ }
+
+ if (f12->data1)
+ rmi_f12_process_objects(f12,
+ &sensor->data_pkt[f12->data1_offset]);
+
+ input_mt_sync_frame(sensor->input);
+
+ return 0;
+}
+
+static int rmi_f12_config(struct rmi_function *fn)
+{
+ struct rmi_driver *drv = fn->rmi_dev->driver;
+
+ drv->set_irq_bits(fn->rmi_dev, fn->irq_mask);
+
+ return 0;
+}
+
+static int rmi_f12_probe(struct rmi_function *fn)
+{
+ struct f12_data *f12;
+ int ret;
+ struct rmi_device *rmi_dev = fn->rmi_dev;
+ char buf;
+ u16 query_addr = fn->fd.query_base_addr;
+ const struct rmi_register_desc_item *item;
+ struct rmi_2d_sensor *sensor;
+ struct rmi_device_platform_data *pdata = rmi_get_platform_data(rmi_dev);
+ struct rmi_transport_dev *xport = rmi_dev->xport;
+ u16 data_offset = 0;
+
+ rmi_dbg(RMI_DEBUG_FN, &fn->dev, "%s\n", __func__);
+
+ ret = rmi_read(fn->rmi_dev, query_addr, &buf);
+ if (ret < 0) {
+ dev_err(&fn->dev, "Failed to read general info register: %d\n",
+ ret);
+ return -ENODEV;
+ }
+ ++query_addr;
+
+ if (!(buf & 0x1)) {
+ dev_err(&fn->dev,
+ "Behavior of F12 without register descriptors is undefined.\n");
+ return -ENODEV;
+ }
+
+ f12 = devm_kzalloc(&fn->dev, sizeof(struct f12_data), GFP_KERNEL);
+ if (!f12)
+ return -ENOMEM;
+
+ if (fn->dev.of_node) {
+ ret = rmi_2d_sensor_of_probe(&fn->dev, &f12->sensor_pdata);
+ if (ret)
+ return ret;
+ } else if (pdata->sensor_pdata) {
+ f12->sensor_pdata = *pdata->sensor_pdata;
+ }
+
+ ret = rmi_read_register_desc(rmi_dev, query_addr,
+ &f12->query_reg_desc);
+ if (ret) {
+ dev_err(&fn->dev,
+ "Failed to read the Query Register Descriptor: %d\n",
+ ret);
+ return ret;
+ }
+ query_addr += 3;
+
+ ret = rmi_read_register_desc(rmi_dev, query_addr,
+ &f12->control_reg_desc);
+ if (ret) {
+ dev_err(&fn->dev,
+ "Failed to read the Control Register Descriptor: %d\n",
+ ret);
+ return ret;
+ }
+ query_addr += 3;
+
+ ret = rmi_read_register_desc(rmi_dev, query_addr,
+ &f12->data_reg_desc);
+ if (ret) {
+ dev_err(&fn->dev,
+ "Failed to read the Data Register Descriptor: %d\n",
+ ret);
+ return ret;
+ }
+ query_addr += 3;
+
+ sensor = &f12->sensor;
+ sensor->fn = fn;
+ f12->data_addr = fn->fd.data_base_addr;
+ sensor->pkt_size = rmi_register_desc_calc_size(&f12->data_reg_desc);
+
+ sensor->axis_align =
+ f12->sensor_pdata.axis_align;
+
+ sensor->x_mm = f12->sensor_pdata.x_mm;
+ sensor->y_mm = f12->sensor_pdata.y_mm;
+
+ if (sensor->sensor_type == rmi_sensor_default)
+ sensor->sensor_type =
+ f12->sensor_pdata.sensor_type;
+
+ rmi_dbg(RMI_DEBUG_FN, &fn->dev, "%s: data packet size: %d\n", __func__,
+ sensor->pkt_size);
+ sensor->data_pkt = devm_kzalloc(&fn->dev, sensor->pkt_size, GFP_KERNEL);
+ if (!sensor->data_pkt)
+ return -ENOMEM;
+
+ dev_set_drvdata(&fn->dev, f12);
+
+ ret = rmi_f12_read_sensor_tuning(f12);
+ if (ret)
+ return ret;
+
+ /*
+ * Figure out what data is contained in the data registers. HID devices
+ * may have registers defined, but their data is not reported in the
+ * HID attention report. Registers which are not reported in the HID
+ * attention report check to see if the device is receiving data from
+ * HID attention reports.
+ */
+ item = rmi_get_register_desc_item(&f12->data_reg_desc, 0);
+ if (item && !xport->attn_data)
+ data_offset += item->reg_size;
+
+ item = rmi_get_register_desc_item(&f12->data_reg_desc, 1);
+ if (item) {
+ f12->data1 = item;
+ f12->data1_offset = data_offset;
+ data_offset += item->reg_size;
+ sensor->nbr_fingers = item->num_subpackets;
+ sensor->report_abs = 1;
+ sensor->attn_size += item->reg_size;
+ }
+
+ item = rmi_get_register_desc_item(&f12->data_reg_desc, 2);
+ if (item && !xport->attn_data)
+ data_offset += item->reg_size;
+
+ item = rmi_get_register_desc_item(&f12->data_reg_desc, 3);
+ if (item && !xport->attn_data)
+ data_offset += item->reg_size;
+
+ item = rmi_get_register_desc_item(&f12->data_reg_desc, 4);
+ if (item && !xport->attn_data)
+ data_offset += item->reg_size;
+
+ item = rmi_get_register_desc_item(&f12->data_reg_desc, 5);
+ if (item) {
+ f12->data5 = item;
+ f12->data5_offset = data_offset;
+ data_offset += item->reg_size;
+ sensor->attn_size += item->reg_size;
+ }
+
+ item = rmi_get_register_desc_item(&f12->data_reg_desc, 6);
+ if (item && !xport->attn_data) {
+ f12->data6 = item;
+ f12->data6_offset = data_offset;
+ data_offset += item->reg_size;
+ }
+
+ item = rmi_get_register_desc_item(&f12->data_reg_desc, 7);
+ if (item && !xport->attn_data)
+ data_offset += item->reg_size;
+
+ item = rmi_get_register_desc_item(&f12->data_reg_desc, 8);
+ if (item && !xport->attn_data)
+ data_offset += item->reg_size;
+
+ item = rmi_get_register_desc_item(&f12->data_reg_desc, 9);
+ if (item && !xport->attn_data) {
+ f12->data9 = item;
+ f12->data9_offset = data_offset;
+ data_offset += item->reg_size;
+ if (!sensor->report_abs)
+ sensor->report_rel = 1;
+ }
+
+ item = rmi_get_register_desc_item(&f12->data_reg_desc, 10);
+ if (item && !xport->attn_data)
+ data_offset += item->reg_size;
+
+ item = rmi_get_register_desc_item(&f12->data_reg_desc, 11);
+ if (item && !xport->attn_data)
+ data_offset += item->reg_size;
+
+ item = rmi_get_register_desc_item(&f12->data_reg_desc, 12);
+ if (item && !xport->attn_data)
+ data_offset += item->reg_size;
+
+ item = rmi_get_register_desc_item(&f12->data_reg_desc, 13);
+ if (item && !xport->attn_data)
+ data_offset += item->reg_size;
+
+ item = rmi_get_register_desc_item(&f12->data_reg_desc, 14);
+ if (item && !xport->attn_data)
+ data_offset += item->reg_size;
+
+ item = rmi_get_register_desc_item(&f12->data_reg_desc, 15);
+ if (item && !xport->attn_data) {
+ f12->data15 = item;
+ f12->data15_offset = data_offset;
+ data_offset += item->reg_size;
+ }
+
+ /* allocate the in-kernel tracking buffers */
+ sensor->tracking_pos = devm_kzalloc(&fn->dev,
+ sizeof(struct input_mt_pos) * sensor->nbr_fingers,
+ GFP_KERNEL);
+ sensor->tracking_slots = devm_kzalloc(&fn->dev,
+ sizeof(int) * sensor->nbr_fingers, GFP_KERNEL);
+ sensor->objs = devm_kzalloc(&fn->dev,
+ sizeof(struct rmi_2d_sensor_abs_object)
+ * sensor->nbr_fingers, GFP_KERNEL);
+ if (!sensor->tracking_pos || !sensor->tracking_slots || !sensor->objs)
+ return -ENOMEM;
+
+ ret = rmi_2d_sensor_configure_input(fn, sensor);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+struct rmi_function_handler rmi_f12_handler = {
+ .driver = {
+ .name = "rmi4_f12",
+ },
+ .func = 0x12,
+ .probe = rmi_f12_probe,
+ .config = rmi_f12_config,
+ .attention = rmi_f12_attention,
+};
diff --git a/drivers/input/rmi4/rmi_f30.c b/drivers/input/rmi4/rmi_f30.c
new file mode 100644
index 000000000000..760aff1bc420
--- /dev/null
+++ b/drivers/input/rmi4/rmi_f30.c
@@ -0,0 +1,407 @@
+/*
+ * Copyright (c) 2012-2016 Synaptics Incorporated
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/rmi.h>
+#include <linux/input.h>
+#include <linux/slab.h>
+#include "rmi_driver.h"
+
+#define RMI_F30_QUERY_SIZE 2
+
+/* Defs for Query 0 */
+#define RMI_F30_EXTENDED_PATTERNS 0x01
+#define RMI_F30_HAS_MAPPABLE_BUTTONS (1 << 1)
+#define RMI_F30_HAS_LED (1 << 2)
+#define RMI_F30_HAS_GPIO (1 << 3)
+#define RMI_F30_HAS_HAPTIC (1 << 4)
+#define RMI_F30_HAS_GPIO_DRV_CTL (1 << 5)
+#define RMI_F30_HAS_MECH_MOUSE_BTNS (1 << 6)
+
+/* Defs for Query 1 */
+#define RMI_F30_GPIO_LED_COUNT 0x1F
+
+/* Defs for Control Registers */
+#define RMI_F30_CTRL_1_GPIO_DEBOUNCE 0x01
+#define RMI_F30_CTRL_1_HALT (1 << 4)
+#define RMI_F30_CTRL_1_HALTED (1 << 5)
+#define RMI_F30_CTRL_10_NUM_MECH_MOUSE_BTNS 0x03
+
+struct rmi_f30_ctrl_data {
+ int address;
+ int length;
+ u8 *regs;
+};
+
+#define RMI_F30_CTRL_MAX_REGS 32
+#define RMI_F30_CTRL_MAX_BYTES ((RMI_F30_CTRL_MAX_REGS + 7) >> 3)
+#define RMI_F30_CTRL_MAX_REG_BLOCKS 11
+
+#define RMI_F30_CTRL_REGS_MAX_SIZE (RMI_F30_CTRL_MAX_BYTES \
+ + 1 \
+ + RMI_F30_CTRL_MAX_BYTES \
+ + RMI_F30_CTRL_MAX_BYTES \
+ + RMI_F30_CTRL_MAX_BYTES \
+ + 6 \
+ + RMI_F30_CTRL_MAX_REGS \
+ + RMI_F30_CTRL_MAX_REGS \
+ + RMI_F30_CTRL_MAX_BYTES \
+ + 1 \
+ + 1)
+
+struct f30_data {
+ /* Query Data */
+ bool has_extended_pattern;
+ bool has_mappable_buttons;
+ bool has_led;
+ bool has_gpio;
+ bool has_haptic;
+ bool has_gpio_driver_control;
+ bool has_mech_mouse_btns;
+ u8 gpioled_count;
+
+ u8 register_count;
+
+ /* Control Register Data */
+ struct rmi_f30_ctrl_data ctrl[RMI_F30_CTRL_MAX_REG_BLOCKS];
+ u8 ctrl_regs[RMI_F30_CTRL_REGS_MAX_SIZE];
+ u32 ctrl_regs_size;
+
+ u8 data_regs[RMI_F30_CTRL_MAX_BYTES];
+ u16 *gpioled_key_map;
+
+ struct input_dev *input;
+};
+
+static int rmi_f30_read_control_parameters(struct rmi_function *fn,
+ struct f30_data *f30)
+{
+ struct rmi_device *rmi_dev = fn->rmi_dev;
+ int error = 0;
+
+ error = rmi_read_block(rmi_dev, fn->fd.control_base_addr,
+ f30->ctrl_regs, f30->ctrl_regs_size);
+ if (error) {
+ dev_err(&rmi_dev->dev, "%s : Could not read control registers at 0x%x error (%d)\n",
+ __func__, fn->fd.control_base_addr, error);
+ return error;
+ }
+
+ return 0;
+}
+
+static int rmi_f30_attention(struct rmi_function *fn, unsigned long *irq_bits)
+{
+ struct f30_data *f30 = dev_get_drvdata(&fn->dev);
+ struct rmi_device *rmi_dev = fn->rmi_dev;
+ int retval;
+ int gpiled = 0;
+ int value = 0;
+ int i;
+ int reg_num;
+
+ if (!f30->input)
+ return 0;
+
+ /* Read the gpi led data. */
+ if (rmi_dev->xport->attn_data) {
+ memcpy(f30->data_regs, rmi_dev->xport->attn_data,
+ f30->register_count);
+ rmi_dev->xport->attn_data += f30->register_count;
+ rmi_dev->xport->attn_size -= f30->register_count;
+ } else {
+ retval = rmi_read_block(rmi_dev, fn->fd.data_base_addr,
+ f30->data_regs, f30->register_count);
+
+ if (retval) {
+ dev_err(&fn->dev, "%s: Failed to read F30 data registers.\n",
+ __func__);
+ return retval;
+ }
+ }
+
+ for (reg_num = 0; reg_num < f30->register_count; ++reg_num) {
+ for (i = 0; gpiled < f30->gpioled_count && i < 8; ++i,
+ ++gpiled) {
+ if (f30->gpioled_key_map[gpiled] != 0) {
+ /* buttons have pull up resistors */
+ value = (((f30->data_regs[reg_num] >> i) & 0x01)
+ == 0);
+
+ rmi_dbg(RMI_DEBUG_FN, &fn->dev,
+ "%s: call input report key (0x%04x) value (0x%02x)",
+ __func__,
+ f30->gpioled_key_map[gpiled], value);
+ input_report_key(f30->input,
+ f30->gpioled_key_map[gpiled],
+ value);
+ }
+
+ }
+ }
+
+ return 0;
+}
+
+static int rmi_f30_register_device(struct rmi_function *fn)
+{
+ int i;
+ struct rmi_device *rmi_dev = fn->rmi_dev;
+ struct rmi_driver_data *drv_data = dev_get_drvdata(&rmi_dev->dev);
+ struct f30_data *f30 = dev_get_drvdata(&fn->dev);
+ struct input_dev *input_dev;
+ int button_count = 0;
+
+ input_dev = drv_data->input;
+ if (!input_dev) {
+ dev_info(&fn->dev, "F30: no input device found, ignoring.\n");
+ return -EINVAL;
+ }
+
+ f30->input = input_dev;
+
+ set_bit(EV_KEY, input_dev->evbit);
+
+ input_dev->keycode = f30->gpioled_key_map;
+ input_dev->keycodesize = sizeof(u16);
+ input_dev->keycodemax = f30->gpioled_count;
+
+ for (i = 0; i < f30->gpioled_count; i++) {
+ if (f30->gpioled_key_map[i] != 0) {
+ input_set_capability(input_dev, EV_KEY,
+ f30->gpioled_key_map[i]);
+ button_count++;
+ }
+ }
+
+ if (button_count == 1)
+ __set_bit(INPUT_PROP_BUTTONPAD, input_dev->propbit);
+ return 0;
+}
+
+static int rmi_f30_config(struct rmi_function *fn)
+{
+ struct f30_data *f30 = dev_get_drvdata(&fn->dev);
+ struct rmi_driver *drv = fn->rmi_dev->driver;
+ const struct rmi_device_platform_data *pdata =
+ rmi_get_platform_data(fn->rmi_dev);
+ int error;
+
+ if (pdata->f30_data && pdata->f30_data->disable) {
+ drv->clear_irq_bits(fn->rmi_dev, fn->irq_mask);
+ } else {
+ /* Write Control Register values back to device */
+ error = rmi_write_block(fn->rmi_dev, fn->fd.control_base_addr,
+ f30->ctrl_regs, f30->ctrl_regs_size);
+ if (error) {
+ dev_err(&fn->rmi_dev->dev,
+ "%s : Could not write control registers at 0x%x error (%d)\n",
+ __func__, fn->fd.control_base_addr, error);
+ return error;
+ }
+
+ drv->set_irq_bits(fn->rmi_dev, fn->irq_mask);
+ }
+ return 0;
+}
+
+static inline void rmi_f30_set_ctrl_data(struct rmi_f30_ctrl_data *ctrl,
+ int *ctrl_addr, int len, u8 **reg)
+{
+ ctrl->address = *ctrl_addr;
+ ctrl->length = len;
+ ctrl->regs = *reg;
+ *ctrl_addr += len;
+ *reg += len;
+}
+
+static inline bool rmi_f30_is_valid_button(int button,
+ struct rmi_f30_ctrl_data *ctrl)
+{
+ int byte_position = button >> 3;
+ int bit_position = button & 0x07;
+
+ /*
+ * ctrl2 -> dir == 0 -> input mode
+ * ctrl3 -> data == 1 -> actual button
+ */
+ return !(ctrl[2].regs[byte_position] & BIT(bit_position)) &&
+ (ctrl[3].regs[byte_position] & BIT(bit_position));
+}
+
+static inline int rmi_f30_initialize(struct rmi_function *fn)
+{
+ struct f30_data *f30;
+ struct rmi_device *rmi_dev = fn->rmi_dev;
+ const struct rmi_device_platform_data *pdata;
+ int retval = 0;
+ int control_address;
+ int i;
+ int button;
+ u8 buf[RMI_F30_QUERY_SIZE];
+ u8 *ctrl_reg;
+ u8 *map_memory;
+
+ f30 = devm_kzalloc(&fn->dev, sizeof(struct f30_data),
+ GFP_KERNEL);
+ if (!f30)
+ return -ENOMEM;
+
+ dev_set_drvdata(&fn->dev, f30);
+
+ retval = rmi_read_block(fn->rmi_dev, fn->fd.query_base_addr, buf,
+ RMI_F30_QUERY_SIZE);
+
+ if (retval) {
+ dev_err(&fn->dev, "Failed to read query register.\n");
+ return retval;
+ }
+
+ f30->has_extended_pattern = buf[0] & RMI_F30_EXTENDED_PATTERNS;
+ f30->has_mappable_buttons = buf[0] & RMI_F30_HAS_MAPPABLE_BUTTONS;
+ f30->has_led = buf[0] & RMI_F30_HAS_LED;
+ f30->has_gpio = buf[0] & RMI_F30_HAS_GPIO;
+ f30->has_haptic = buf[0] & RMI_F30_HAS_HAPTIC;
+ f30->has_gpio_driver_control = buf[0] & RMI_F30_HAS_GPIO_DRV_CTL;
+ f30->has_mech_mouse_btns = buf[0] & RMI_F30_HAS_MECH_MOUSE_BTNS;
+ f30->gpioled_count = buf[1] & RMI_F30_GPIO_LED_COUNT;
+
+ f30->register_count = (f30->gpioled_count + 7) >> 3;
+
+ control_address = fn->fd.control_base_addr;
+ ctrl_reg = f30->ctrl_regs;
+
+ if (f30->has_gpio && f30->has_led)
+ rmi_f30_set_ctrl_data(&f30->ctrl[0], &control_address,
+ f30->register_count, &ctrl_reg);
+
+ rmi_f30_set_ctrl_data(&f30->ctrl[1], &control_address, sizeof(u8),
+ &ctrl_reg);
+
+ if (f30->has_gpio) {
+ rmi_f30_set_ctrl_data(&f30->ctrl[2], &control_address,
+ f30->register_count, &ctrl_reg);
+
+ rmi_f30_set_ctrl_data(&f30->ctrl[3], &control_address,
+ f30->register_count, &ctrl_reg);
+ }
+
+ if (f30->has_led) {
+ int ctrl5_len;
+
+ rmi_f30_set_ctrl_data(&f30->ctrl[4], &control_address,
+ f30->register_count, &ctrl_reg);
+
+ if (f30->has_extended_pattern)
+ ctrl5_len = 6;
+ else
+ ctrl5_len = 2;
+
+ rmi_f30_set_ctrl_data(&f30->ctrl[5], &control_address,
+ ctrl5_len, &ctrl_reg);
+ }
+
+ if (f30->has_led || f30->has_gpio_driver_control) {
+ /* control 6 uses a byte per gpio/led */
+ rmi_f30_set_ctrl_data(&f30->ctrl[6], &control_address,
+ f30->gpioled_count, &ctrl_reg);
+ }
+
+ if (f30->has_mappable_buttons) {
+ /* control 7 uses a byte per gpio/led */
+ rmi_f30_set_ctrl_data(&f30->ctrl[7], &control_address,
+ f30->gpioled_count, &ctrl_reg);
+ }
+
+ if (f30->has_haptic) {
+ rmi_f30_set_ctrl_data(&f30->ctrl[8], &control_address,
+ f30->register_count, &ctrl_reg);
+
+ rmi_f30_set_ctrl_data(&f30->ctrl[9], &control_address,
+ sizeof(u8), &ctrl_reg);
+ }
+
+ if (f30->has_mech_mouse_btns)
+ rmi_f30_set_ctrl_data(&f30->ctrl[10], &control_address,
+ sizeof(u8), &ctrl_reg);
+
+ f30->ctrl_regs_size = ctrl_reg - f30->ctrl_regs
+ ?: RMI_F30_CTRL_REGS_MAX_SIZE;
+
+ retval = rmi_f30_read_control_parameters(fn, f30);
+ if (retval < 0) {
+ dev_err(&fn->dev,
+ "Failed to initialize F19 control params.\n");
+ return retval;
+ }
+
+ map_memory = devm_kzalloc(&fn->dev,
+ (f30->gpioled_count * (sizeof(u16))),
+ GFP_KERNEL);
+ if (!map_memory) {
+ dev_err(&fn->dev, "Failed to allocate gpioled map memory.\n");
+ return -ENOMEM;
+ }
+
+ f30->gpioled_key_map = (u16 *)map_memory;
+
+ pdata = rmi_get_platform_data(rmi_dev);
+ if (pdata && f30->has_gpio) {
+ button = BTN_LEFT;
+ for (i = 0; i < f30->gpioled_count; i++) {
+ if (rmi_f30_is_valid_button(i, f30->ctrl)) {
+ f30->gpioled_key_map[i] = button++;
+
+ /*
+ * buttonpad might be given by
+ * f30->has_mech_mouse_btns, but I am
+ * not sure, so use only the pdata info
+ */
+ if (pdata->f30_data &&
+ pdata->f30_data->buttonpad)
+ break;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int rmi_f30_probe(struct rmi_function *fn)
+{
+ int rc;
+ const struct rmi_device_platform_data *pdata =
+ rmi_get_platform_data(fn->rmi_dev);
+
+ if (pdata->f30_data && pdata->f30_data->disable)
+ return 0;
+
+ rc = rmi_f30_initialize(fn);
+ if (rc < 0)
+ goto error_exit;
+
+ rc = rmi_f30_register_device(fn);
+ if (rc < 0)
+ goto error_exit;
+
+ return 0;
+
+error_exit:
+ return rc;
+
+}
+
+struct rmi_function_handler rmi_f30_handler = {
+ .driver = {
+ .name = "rmi4_f30",
+ },
+ .func = 0x30,
+ .probe = rmi_f30_probe,
+ .config = rmi_f30_config,
+ .attention = rmi_f30_attention,
+};
diff --git a/drivers/input/rmi4/rmi_i2c.c b/drivers/input/rmi4/rmi_i2c.c
new file mode 100644
index 000000000000..a96a326b53bd
--- /dev/null
+++ b/drivers/input/rmi4/rmi_i2c.c
@@ -0,0 +1,397 @@
+/*
+ * Copyright (c) 2011-2016 Synaptics Incorporated
+ * Copyright (c) 2011 Unixphere
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/i2c.h>
+#include <linux/rmi.h>
+#include <linux/irq.h>
+#include <linux/of.h>
+#include "rmi_driver.h"
+
+#define BUFFER_SIZE_INCREMENT 32
+
+/**
+ * struct rmi_i2c_xport - stores information for i2c communication
+ *
+ * @xport: The transport interface structure
+ *
+ * @page_mutex: Locks current page to avoid changing pages in unexpected ways.
+ * @page: Keeps track of the current virtual page
+ *
+ * @tx_buf: Buffer used for transmitting data to the sensor over i2c.
+ * @tx_buf_size: Size of the buffer
+ */
+struct rmi_i2c_xport {
+ struct rmi_transport_dev xport;
+ struct i2c_client *client;
+
+ struct mutex page_mutex;
+ int page;
+
+ int irq;
+
+ u8 *tx_buf;
+ size_t tx_buf_size;
+};
+
+#define RMI_PAGE_SELECT_REGISTER 0xff
+#define RMI_I2C_PAGE(addr) (((addr) >> 8) & 0xff)
+
+/*
+ * rmi_set_page - Set RMI page
+ * @xport: The pointer to the rmi_transport_dev struct
+ * @page: The new page address.
+ *
+ * RMI devices have 16-bit addressing, but some of the transport
+ * implementations (like SMBus) only have 8-bit addressing. So RMI implements
+ * a page address at 0xff of every page so we can reliable page addresses
+ * every 256 registers.
+ *
+ * The page_mutex lock must be held when this function is entered.
+ *
+ * Returns zero on success, non-zero on failure.
+ */
+static int rmi_set_page(struct rmi_i2c_xport *rmi_i2c, u8 page)
+{
+ struct i2c_client *client = rmi_i2c->client;
+ u8 txbuf[2] = {RMI_PAGE_SELECT_REGISTER, page};
+ int retval;
+
+ retval = i2c_master_send(client, txbuf, sizeof(txbuf));
+ if (retval != sizeof(txbuf)) {
+ dev_err(&client->dev,
+ "%s: set page failed: %d.", __func__, retval);
+ return (retval < 0) ? retval : -EIO;
+ }
+
+ rmi_i2c->page = page;
+ return 0;
+}
+
+static int rmi_i2c_write_block(struct rmi_transport_dev *xport, u16 addr,
+ const void *buf, size_t len)
+{
+ struct rmi_i2c_xport *rmi_i2c =
+ container_of(xport, struct rmi_i2c_xport, xport);
+ struct i2c_client *client = rmi_i2c->client;
+ size_t tx_size = len + 1;
+ int retval;
+
+ mutex_lock(&rmi_i2c->page_mutex);
+
+ if (!rmi_i2c->tx_buf || rmi_i2c->tx_buf_size < tx_size) {
+ if (rmi_i2c->tx_buf)
+ devm_kfree(&client->dev, rmi_i2c->tx_buf);
+ rmi_i2c->tx_buf_size = tx_size + BUFFER_SIZE_INCREMENT;
+ rmi_i2c->tx_buf = devm_kzalloc(&client->dev,
+ rmi_i2c->tx_buf_size,
+ GFP_KERNEL);
+ if (!rmi_i2c->tx_buf) {
+ rmi_i2c->tx_buf_size = 0;
+ retval = -ENOMEM;
+ goto exit;
+ }
+ }
+
+ rmi_i2c->tx_buf[0] = addr & 0xff;
+ memcpy(rmi_i2c->tx_buf + 1, buf, len);
+
+ if (RMI_I2C_PAGE(addr) != rmi_i2c->page) {
+ retval = rmi_set_page(rmi_i2c, RMI_I2C_PAGE(addr));
+ if (retval)
+ goto exit;
+ }
+
+ retval = i2c_master_send(client, rmi_i2c->tx_buf, tx_size);
+ if (retval == tx_size)
+ retval = 0;
+ else if (retval >= 0)
+ retval = -EIO;
+
+exit:
+ rmi_dbg(RMI_DEBUG_XPORT, &client->dev,
+ "write %zd bytes at %#06x: %d (%*ph)\n",
+ len, addr, retval, (int)len, buf);
+
+ mutex_unlock(&rmi_i2c->page_mutex);
+ return retval;
+}
+
+static int rmi_i2c_read_block(struct rmi_transport_dev *xport, u16 addr,
+ void *buf, size_t len)
+{
+ struct rmi_i2c_xport *rmi_i2c =
+ container_of(xport, struct rmi_i2c_xport, xport);
+ struct i2c_client *client = rmi_i2c->client;
+ u8 addr_offset = addr & 0xff;
+ int retval;
+ struct i2c_msg msgs[] = {
+ {
+ .addr = client->addr,
+ .len = sizeof(addr_offset),
+ .buf = &addr_offset,
+ },
+ {
+ .addr = client->addr,
+ .flags = I2C_M_RD,
+ .len = len,
+ .buf = buf,
+ },
+ };
+
+ mutex_lock(&rmi_i2c->page_mutex);
+
+ if (RMI_I2C_PAGE(addr) != rmi_i2c->page) {
+ retval = rmi_set_page(rmi_i2c, RMI_I2C_PAGE(addr));
+ if (retval)
+ goto exit;
+ }
+
+ retval = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
+ if (retval == ARRAY_SIZE(msgs))
+ retval = 0; /* success */
+ else if (retval >= 0)
+ retval = -EIO;
+
+exit:
+ rmi_dbg(RMI_DEBUG_XPORT, &client->dev,
+ "read %zd bytes at %#06x: %d (%*ph)\n",
+ len, addr, retval, (int)len, buf);
+
+ mutex_unlock(&rmi_i2c->page_mutex);
+ return retval;
+}
+
+static const struct rmi_transport_ops rmi_i2c_ops = {
+ .write_block = rmi_i2c_write_block,
+ .read_block = rmi_i2c_read_block,
+};
+
+static irqreturn_t rmi_i2c_irq(int irq, void *dev_id)
+{
+ struct rmi_i2c_xport *rmi_i2c = dev_id;
+ struct rmi_device *rmi_dev = rmi_i2c->xport.rmi_dev;
+ int ret;
+
+ ret = rmi_process_interrupt_requests(rmi_dev);
+ if (ret)
+ rmi_dbg(RMI_DEBUG_XPORT, &rmi_dev->dev,
+ "Failed to process interrupt request: %d\n", ret);
+
+ return IRQ_HANDLED;
+}
+
+static int rmi_i2c_init_irq(struct i2c_client *client)
+{
+ struct rmi_i2c_xport *rmi_i2c = i2c_get_clientdata(client);
+ int irq_flags = irqd_get_trigger_type(irq_get_irq_data(rmi_i2c->irq));
+ int ret;
+
+ if (!irq_flags)
+ irq_flags = IRQF_TRIGGER_LOW;
+
+ ret = devm_request_threaded_irq(&client->dev, rmi_i2c->irq, NULL,
+ rmi_i2c_irq, irq_flags | IRQF_ONESHOT, client->name,
+ rmi_i2c);
+ if (ret < 0) {
+ dev_warn(&client->dev, "Failed to register interrupt %d\n",
+ rmi_i2c->irq);
+
+ return ret;
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id rmi_i2c_of_match[] = {
+ { .compatible = "syna,rmi4-i2c" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, rmi_i2c_of_match);
+#endif
+
+static int rmi_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct rmi_device_platform_data *pdata;
+ struct rmi_device_platform_data *client_pdata =
+ dev_get_platdata(&client->dev);
+ struct rmi_i2c_xport *rmi_i2c;
+ int retval;
+
+ rmi_i2c = devm_kzalloc(&client->dev, sizeof(struct rmi_i2c_xport),
+ GFP_KERNEL);
+ if (!rmi_i2c)
+ return -ENOMEM;
+
+ pdata = &rmi_i2c->xport.pdata;
+
+ if (!client->dev.of_node && client_pdata)
+ *pdata = *client_pdata;
+
+ if (client->irq > 0)
+ rmi_i2c->irq = client->irq;
+
+ rmi_dbg(RMI_DEBUG_XPORT, &client->dev, "Probing %s.\n",
+ dev_name(&client->dev));
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+ dev_err(&client->dev,
+ "adapter does not support required functionality.\n");
+ return -ENODEV;
+ }
+
+ rmi_i2c->client = client;
+ mutex_init(&rmi_i2c->page_mutex);
+
+ rmi_i2c->xport.dev = &client->dev;
+ rmi_i2c->xport.proto_name = "i2c";
+ rmi_i2c->xport.ops = &rmi_i2c_ops;
+
+ i2c_set_clientdata(client, rmi_i2c);
+
+ /*
+ * Setting the page to zero will (a) make sure the PSR is in a
+ * known state, and (b) make sure we can talk to the device.
+ */
+ retval = rmi_set_page(rmi_i2c, 0);
+ if (retval) {
+ dev_err(&client->dev, "Failed to set page select to 0.\n");
+ return retval;
+ }
+
+ retval = rmi_register_transport_device(&rmi_i2c->xport);
+ if (retval) {
+ dev_err(&client->dev, "Failed to register transport driver at 0x%.2X.\n",
+ client->addr);
+ return retval;
+ }
+
+ retval = rmi_i2c_init_irq(client);
+ if (retval < 0)
+ return retval;
+
+ dev_info(&client->dev, "registered rmi i2c driver at %#04x.\n",
+ client->addr);
+ return 0;
+}
+
+static int rmi_i2c_remove(struct i2c_client *client)
+{
+ struct rmi_i2c_xport *rmi_i2c = i2c_get_clientdata(client);
+
+ rmi_unregister_transport_device(&rmi_i2c->xport);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int rmi_i2c_suspend(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct rmi_i2c_xport *rmi_i2c = i2c_get_clientdata(client);
+ int ret;
+
+ ret = rmi_driver_suspend(rmi_i2c->xport.rmi_dev);
+ if (ret)
+ dev_warn(dev, "Failed to resume device: %d\n", ret);
+
+ disable_irq(rmi_i2c->irq);
+ if (device_may_wakeup(&client->dev)) {
+ ret = enable_irq_wake(rmi_i2c->irq);
+ if (!ret)
+ dev_warn(dev, "Failed to enable irq for wake: %d\n",
+ ret);
+ }
+ return ret;
+}
+
+static int rmi_i2c_resume(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct rmi_i2c_xport *rmi_i2c = i2c_get_clientdata(client);
+ int ret;
+
+ enable_irq(rmi_i2c->irq);
+ if (device_may_wakeup(&client->dev)) {
+ ret = disable_irq_wake(rmi_i2c->irq);
+ if (!ret)
+ dev_warn(dev, "Failed to disable irq for wake: %d\n",
+ ret);
+ }
+
+ ret = rmi_driver_resume(rmi_i2c->xport.rmi_dev);
+ if (ret)
+ dev_warn(dev, "Failed to resume device: %d\n", ret);
+
+ return ret;
+}
+#endif
+
+#ifdef CONFIG_PM
+static int rmi_i2c_runtime_suspend(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct rmi_i2c_xport *rmi_i2c = i2c_get_clientdata(client);
+ int ret;
+
+ ret = rmi_driver_suspend(rmi_i2c->xport.rmi_dev);
+ if (ret)
+ dev_warn(dev, "Failed to resume device: %d\n", ret);
+
+ disable_irq(rmi_i2c->irq);
+
+ return 0;
+}
+
+static int rmi_i2c_runtime_resume(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct rmi_i2c_xport *rmi_i2c = i2c_get_clientdata(client);
+ int ret;
+
+ enable_irq(rmi_i2c->irq);
+
+ ret = rmi_driver_resume(rmi_i2c->xport.rmi_dev);
+ if (ret)
+ dev_warn(dev, "Failed to resume device: %d\n", ret);
+
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops rmi_i2c_pm = {
+ SET_SYSTEM_SLEEP_PM_OPS(rmi_i2c_suspend, rmi_i2c_resume)
+ SET_RUNTIME_PM_OPS(rmi_i2c_runtime_suspend, rmi_i2c_runtime_resume,
+ NULL)
+};
+
+static const struct i2c_device_id rmi_id[] = {
+ { "rmi4_i2c", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, rmi_id);
+
+static struct i2c_driver rmi_i2c_driver = {
+ .driver = {
+ .name = "rmi4_i2c",
+ .pm = &rmi_i2c_pm,
+ .of_match_table = of_match_ptr(rmi_i2c_of_match),
+ },
+ .id_table = rmi_id,
+ .probe = rmi_i2c_probe,
+ .remove = rmi_i2c_remove,
+};
+
+module_i2c_driver(rmi_i2c_driver);
+
+MODULE_AUTHOR("Christopher Heiny <cheiny@synaptics.com>");
+MODULE_AUTHOR("Andrew Duggan <aduggan@synaptics.com>");
+MODULE_DESCRIPTION("RMI I2C driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(RMI_DRIVER_VERSION);
diff --git a/drivers/input/rmi4/rmi_spi.c b/drivers/input/rmi4/rmi_spi.c
new file mode 100644
index 000000000000..55bd1b34970c
--- /dev/null
+++ b/drivers/input/rmi4/rmi_spi.c
@@ -0,0 +1,589 @@
+/*
+ * Copyright (c) 2011-2016 Synaptics Incorporated
+ * Copyright (c) 2011 Unixphere
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/rmi.h>
+#include <linux/slab.h>
+#include <linux/spi/spi.h>
+#include <linux/irq.h>
+#include <linux/of.h>
+#include "rmi_driver.h"
+
+#define RMI_SPI_DEFAULT_XFER_BUF_SIZE 64
+
+#define RMI_PAGE_SELECT_REGISTER 0x00FF
+#define RMI_SPI_PAGE(addr) (((addr) >> 8) & 0x80)
+#define RMI_SPI_XFER_SIZE_LIMIT 255
+
+#define BUFFER_SIZE_INCREMENT 32
+
+enum rmi_spi_op {
+ RMI_SPI_WRITE = 0,
+ RMI_SPI_READ,
+ RMI_SPI_V2_READ_UNIFIED,
+ RMI_SPI_V2_READ_SPLIT,
+ RMI_SPI_V2_WRITE,
+};
+
+struct rmi_spi_cmd {
+ enum rmi_spi_op op;
+ u16 addr;
+};
+
+struct rmi_spi_xport {
+ struct rmi_transport_dev xport;
+ struct spi_device *spi;
+
+ struct mutex page_mutex;
+ int page;
+
+ int irq;
+
+ u8 *rx_buf;
+ u8 *tx_buf;
+ int xfer_buf_size;
+
+ struct spi_transfer *rx_xfers;
+ struct spi_transfer *tx_xfers;
+ int rx_xfer_count;
+ int tx_xfer_count;
+};
+
+static int rmi_spi_manage_pools(struct rmi_spi_xport *rmi_spi, int len)
+{
+ struct spi_device *spi = rmi_spi->spi;
+ int buf_size = rmi_spi->xfer_buf_size
+ ? rmi_spi->xfer_buf_size : RMI_SPI_DEFAULT_XFER_BUF_SIZE;
+ struct spi_transfer *xfer_buf;
+ void *buf;
+ void *tmp;
+
+ while (buf_size < len)
+ buf_size *= 2;
+
+ if (buf_size > RMI_SPI_XFER_SIZE_LIMIT)
+ buf_size = RMI_SPI_XFER_SIZE_LIMIT;
+
+ tmp = rmi_spi->rx_buf;
+ buf = devm_kzalloc(&spi->dev, buf_size * 2,
+ GFP_KERNEL | GFP_DMA);
+ if (!buf)
+ return -ENOMEM;
+
+ rmi_spi->rx_buf = buf;
+ rmi_spi->tx_buf = &rmi_spi->rx_buf[buf_size];
+ rmi_spi->xfer_buf_size = buf_size;
+
+ if (tmp)
+ devm_kfree(&spi->dev, tmp);
+
+ if (rmi_spi->xport.pdata.spi_data.read_delay_us)
+ rmi_spi->rx_xfer_count = buf_size;
+ else
+ rmi_spi->rx_xfer_count = 1;
+
+ if (rmi_spi->xport.pdata.spi_data.write_delay_us)
+ rmi_spi->tx_xfer_count = buf_size;
+ else
+ rmi_spi->tx_xfer_count = 1;
+
+ /*
+ * Allocate a pool of spi_transfer buffers for devices which need
+ * per byte delays.
+ */
+ tmp = rmi_spi->rx_xfers;
+ xfer_buf = devm_kzalloc(&spi->dev,
+ (rmi_spi->rx_xfer_count + rmi_spi->tx_xfer_count)
+ * sizeof(struct spi_transfer), GFP_KERNEL);
+ if (!xfer_buf)
+ return -ENOMEM;
+
+ rmi_spi->rx_xfers = xfer_buf;
+ rmi_spi->tx_xfers = &xfer_buf[rmi_spi->rx_xfer_count];
+
+ if (tmp)
+ devm_kfree(&spi->dev, tmp);
+
+ return 0;
+}
+
+static int rmi_spi_xfer(struct rmi_spi_xport *rmi_spi,
+ const struct rmi_spi_cmd *cmd, const u8 *tx_buf,
+ int tx_len, u8 *rx_buf, int rx_len)
+{
+ struct spi_device *spi = rmi_spi->spi;
+ struct rmi_device_platform_data_spi *spi_data =
+ &rmi_spi->xport.pdata.spi_data;
+ struct spi_message msg;
+ struct spi_transfer *xfer;
+ int ret = 0;
+ int len;
+ int cmd_len = 0;
+ int total_tx_len;
+ int i;
+ u16 addr = cmd->addr;
+
+ spi_message_init(&msg);
+
+ switch (cmd->op) {
+ case RMI_SPI_WRITE:
+ case RMI_SPI_READ:
+ cmd_len += 2;
+ break;
+ case RMI_SPI_V2_READ_UNIFIED:
+ case RMI_SPI_V2_READ_SPLIT:
+ case RMI_SPI_V2_WRITE:
+ cmd_len += 4;
+ break;
+ }
+
+ total_tx_len = cmd_len + tx_len;
+ len = max(total_tx_len, rx_len);
+
+ if (len > RMI_SPI_XFER_SIZE_LIMIT)
+ return -EINVAL;
+
+ if (rmi_spi->xfer_buf_size < len)
+ rmi_spi_manage_pools(rmi_spi, len);
+
+ if (addr == 0)
+ /*
+ * SPI needs an address. Use 0x7FF if we want to keep
+ * reading from the last position of the register pointer.
+ */
+ addr = 0x7FF;
+
+ switch (cmd->op) {
+ case RMI_SPI_WRITE:
+ rmi_spi->tx_buf[0] = (addr >> 8);
+ rmi_spi->tx_buf[1] = addr & 0xFF;
+ break;
+ case RMI_SPI_READ:
+ rmi_spi->tx_buf[0] = (addr >> 8) | 0x80;
+ rmi_spi->tx_buf[1] = addr & 0xFF;
+ break;
+ case RMI_SPI_V2_READ_UNIFIED:
+ break;
+ case RMI_SPI_V2_READ_SPLIT:
+ break;
+ case RMI_SPI_V2_WRITE:
+ rmi_spi->tx_buf[0] = 0x40;
+ rmi_spi->tx_buf[1] = (addr >> 8) & 0xFF;
+ rmi_spi->tx_buf[2] = addr & 0xFF;
+ rmi_spi->tx_buf[3] = tx_len;
+ break;
+ }
+
+ if (tx_buf)
+ memcpy(&rmi_spi->tx_buf[cmd_len], tx_buf, tx_len);
+
+ if (rmi_spi->tx_xfer_count > 1) {
+ for (i = 0; i < total_tx_len; i++) {
+ xfer = &rmi_spi->tx_xfers[i];
+ memset(xfer, 0, sizeof(struct spi_transfer));
+ xfer->tx_buf = &rmi_spi->tx_buf[i];
+ xfer->len = 1;
+ xfer->delay_usecs = spi_data->write_delay_us;
+ spi_message_add_tail(xfer, &msg);
+ }
+ } else {
+ xfer = rmi_spi->tx_xfers;
+ memset(xfer, 0, sizeof(struct spi_transfer));
+ xfer->tx_buf = rmi_spi->tx_buf;
+ xfer->len = total_tx_len;
+ spi_message_add_tail(xfer, &msg);
+ }
+
+ rmi_dbg(RMI_DEBUG_XPORT, &spi->dev, "%s: cmd: %s tx_buf len: %d tx_buf: %*ph\n",
+ __func__, cmd->op == RMI_SPI_WRITE ? "WRITE" : "READ",
+ total_tx_len, total_tx_len, rmi_spi->tx_buf);
+
+ if (rx_buf) {
+ if (rmi_spi->rx_xfer_count > 1) {
+ for (i = 0; i < rx_len; i++) {
+ xfer = &rmi_spi->rx_xfers[i];
+ memset(xfer, 0, sizeof(struct spi_transfer));
+ xfer->rx_buf = &rmi_spi->rx_buf[i];
+ xfer->len = 1;
+ xfer->delay_usecs = spi_data->read_delay_us;
+ spi_message_add_tail(xfer, &msg);
+ }
+ } else {
+ xfer = rmi_spi->rx_xfers;
+ memset(xfer, 0, sizeof(struct spi_transfer));
+ xfer->rx_buf = rmi_spi->rx_buf;
+ xfer->len = rx_len;
+ spi_message_add_tail(xfer, &msg);
+ }
+ }
+
+ ret = spi_sync(spi, &msg);
+ if (ret < 0) {
+ dev_err(&spi->dev, "spi xfer failed: %d\n", ret);
+ return ret;
+ }
+
+ if (rx_buf) {
+ memcpy(rx_buf, rmi_spi->rx_buf, rx_len);
+ rmi_dbg(RMI_DEBUG_XPORT, &spi->dev, "%s: (%d) %*ph\n",
+ __func__, rx_len, rx_len, rx_buf);
+ }
+
+ return 0;
+}
+
+/*
+ * rmi_set_page - Set RMI page
+ * @xport: The pointer to the rmi_transport_dev struct
+ * @page: The new page address.
+ *
+ * RMI devices have 16-bit addressing, but some of the transport
+ * implementations (like SMBus) only have 8-bit addressing. So RMI implements
+ * a page address at 0xff of every page so we can reliable page addresses
+ * every 256 registers.
+ *
+ * The page_mutex lock must be held when this function is entered.
+ *
+ * Returns zero on success, non-zero on failure.
+ */
+static int rmi_set_page(struct rmi_spi_xport *rmi_spi, u8 page)
+{
+ struct rmi_spi_cmd cmd;
+ int ret;
+
+ cmd.op = RMI_SPI_WRITE;
+ cmd.addr = RMI_PAGE_SELECT_REGISTER;
+
+ ret = rmi_spi_xfer(rmi_spi, &cmd, &page, 1, NULL, 0);
+
+ if (ret)
+ rmi_spi->page = page;
+
+ return ret;
+}
+
+static int rmi_spi_write_block(struct rmi_transport_dev *xport, u16 addr,
+ const void *buf, size_t len)
+{
+ struct rmi_spi_xport *rmi_spi =
+ container_of(xport, struct rmi_spi_xport, xport);
+ struct rmi_spi_cmd cmd;
+ int ret;
+
+ mutex_lock(&rmi_spi->page_mutex);
+
+ if (RMI_SPI_PAGE(addr) != rmi_spi->page) {
+ ret = rmi_set_page(rmi_spi, RMI_SPI_PAGE(addr));
+ if (ret)
+ goto exit;
+ }
+
+ cmd.op = RMI_SPI_WRITE;
+ cmd.addr = addr;
+
+ ret = rmi_spi_xfer(rmi_spi, &cmd, buf, len, NULL, 0);
+
+exit:
+ mutex_unlock(&rmi_spi->page_mutex);
+ return ret;
+}
+
+static int rmi_spi_read_block(struct rmi_transport_dev *xport, u16 addr,
+ void *buf, size_t len)
+{
+ struct rmi_spi_xport *rmi_spi =
+ container_of(xport, struct rmi_spi_xport, xport);
+ struct rmi_spi_cmd cmd;
+ int ret;
+
+ mutex_lock(&rmi_spi->page_mutex);
+
+ if (RMI_SPI_PAGE(addr) != rmi_spi->page) {
+ ret = rmi_set_page(rmi_spi, RMI_SPI_PAGE(addr));
+ if (ret)
+ goto exit;
+ }
+
+ cmd.op = RMI_SPI_READ;
+ cmd.addr = addr;
+
+ ret = rmi_spi_xfer(rmi_spi, &cmd, NULL, 0, buf, len);
+
+exit:
+ mutex_unlock(&rmi_spi->page_mutex);
+ return ret;
+}
+
+static const struct rmi_transport_ops rmi_spi_ops = {
+ .write_block = rmi_spi_write_block,
+ .read_block = rmi_spi_read_block,
+};
+
+static irqreturn_t rmi_spi_irq(int irq, void *dev_id)
+{
+ struct rmi_spi_xport *rmi_spi = dev_id;
+ struct rmi_device *rmi_dev = rmi_spi->xport.rmi_dev;
+ int ret;
+
+ ret = rmi_process_interrupt_requests(rmi_dev);
+ if (ret)
+ rmi_dbg(RMI_DEBUG_XPORT, &rmi_dev->dev,
+ "Failed to process interrupt request: %d\n", ret);
+
+ return IRQ_HANDLED;
+}
+
+static int rmi_spi_init_irq(struct spi_device *spi)
+{
+ struct rmi_spi_xport *rmi_spi = spi_get_drvdata(spi);
+ int irq_flags = irqd_get_trigger_type(irq_get_irq_data(rmi_spi->irq));
+ int ret;
+
+ if (!irq_flags)
+ irq_flags = IRQF_TRIGGER_LOW;
+
+ ret = devm_request_threaded_irq(&spi->dev, rmi_spi->irq, NULL,
+ rmi_spi_irq, irq_flags | IRQF_ONESHOT,
+ dev_name(&spi->dev), rmi_spi);
+ if (ret < 0) {
+ dev_warn(&spi->dev, "Failed to register interrupt %d\n",
+ rmi_spi->irq);
+ return ret;
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_OF
+static int rmi_spi_of_probe(struct spi_device *spi,
+ struct rmi_device_platform_data *pdata)
+{
+ struct device *dev = &spi->dev;
+ int retval;
+
+ retval = rmi_of_property_read_u32(dev,
+ &pdata->spi_data.read_delay_us,
+ "spi-rx-delay-us", 1);
+ if (retval)
+ return retval;
+
+ retval = rmi_of_property_read_u32(dev,
+ &pdata->spi_data.write_delay_us,
+ "spi-tx-delay-us", 1);
+ if (retval)
+ return retval;
+
+ return 0;
+}
+
+static const struct of_device_id rmi_spi_of_match[] = {
+ { .compatible = "syna,rmi4-spi" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, rmi_spi_of_match);
+#else
+static inline int rmi_spi_of_probe(struct spi_device *spi,
+ struct rmi_device_platform_data *pdata)
+{
+ return -ENODEV;
+}
+#endif
+
+static int rmi_spi_probe(struct spi_device *spi)
+{
+ struct rmi_spi_xport *rmi_spi;
+ struct rmi_device_platform_data *pdata;
+ struct rmi_device_platform_data *spi_pdata = spi->dev.platform_data;
+ int retval;
+
+ if (spi->master->flags & SPI_MASTER_HALF_DUPLEX)
+ return -EINVAL;
+
+ rmi_spi = devm_kzalloc(&spi->dev, sizeof(struct rmi_spi_xport),
+ GFP_KERNEL);
+ if (!rmi_spi)
+ return -ENOMEM;
+
+ pdata = &rmi_spi->xport.pdata;
+
+ if (spi->dev.of_node) {
+ retval = rmi_spi_of_probe(spi, pdata);
+ if (retval)
+ return retval;
+ } else if (spi_pdata) {
+ *pdata = *spi_pdata;
+ }
+
+ if (pdata->spi_data.bits_per_word)
+ spi->bits_per_word = pdata->spi_data.bits_per_word;
+
+ if (pdata->spi_data.mode)
+ spi->mode = pdata->spi_data.mode;
+
+ retval = spi_setup(spi);
+ if (retval < 0) {
+ dev_err(&spi->dev, "spi_setup failed!\n");
+ return retval;
+ }
+
+ if (spi->irq > 0)
+ rmi_spi->irq = spi->irq;
+
+ rmi_spi->spi = spi;
+ mutex_init(&rmi_spi->page_mutex);
+
+ rmi_spi->xport.dev = &spi->dev;
+ rmi_spi->xport.proto_name = "spi";
+ rmi_spi->xport.ops = &rmi_spi_ops;
+
+ spi_set_drvdata(spi, rmi_spi);
+
+ retval = rmi_spi_manage_pools(rmi_spi, RMI_SPI_DEFAULT_XFER_BUF_SIZE);
+ if (retval)
+ return retval;
+
+ /*
+ * Setting the page to zero will (a) make sure the PSR is in a
+ * known state, and (b) make sure we can talk to the device.
+ */
+ retval = rmi_set_page(rmi_spi, 0);
+ if (retval) {
+ dev_err(&spi->dev, "Failed to set page select to 0.\n");
+ return retval;
+ }
+
+ retval = rmi_register_transport_device(&rmi_spi->xport);
+ if (retval) {
+ dev_err(&spi->dev, "failed to register transport.\n");
+ return retval;
+ }
+
+ retval = rmi_spi_init_irq(spi);
+ if (retval < 0)
+ return retval;
+
+ dev_info(&spi->dev, "registered RMI SPI driver\n");
+ return 0;
+}
+
+static int rmi_spi_remove(struct spi_device *spi)
+{
+ struct rmi_spi_xport *rmi_spi = spi_get_drvdata(spi);
+
+ rmi_unregister_transport_device(&rmi_spi->xport);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int rmi_spi_suspend(struct device *dev)
+{
+ struct spi_device *spi = to_spi_device(dev);
+ struct rmi_spi_xport *rmi_spi = spi_get_drvdata(spi);
+ int ret;
+
+ ret = rmi_driver_suspend(rmi_spi->xport.rmi_dev);
+ if (ret)
+ dev_warn(dev, "Failed to resume device: %d\n", ret);
+
+ disable_irq(rmi_spi->irq);
+ if (device_may_wakeup(&spi->dev)) {
+ ret = enable_irq_wake(rmi_spi->irq);
+ if (!ret)
+ dev_warn(dev, "Failed to enable irq for wake: %d\n",
+ ret);
+ }
+ return ret;
+}
+
+static int rmi_spi_resume(struct device *dev)
+{
+ struct spi_device *spi = to_spi_device(dev);
+ struct rmi_spi_xport *rmi_spi = spi_get_drvdata(spi);
+ int ret;
+
+ enable_irq(rmi_spi->irq);
+ if (device_may_wakeup(&spi->dev)) {
+ ret = disable_irq_wake(rmi_spi->irq);
+ if (!ret)
+ dev_warn(dev, "Failed to disable irq for wake: %d\n",
+ ret);
+ }
+
+ ret = rmi_driver_resume(rmi_spi->xport.rmi_dev);
+ if (ret)
+ dev_warn(dev, "Failed to resume device: %d\n", ret);
+
+ return ret;
+}
+#endif
+
+#ifdef CONFIG_PM
+static int rmi_spi_runtime_suspend(struct device *dev)
+{
+ struct spi_device *spi = to_spi_device(dev);
+ struct rmi_spi_xport *rmi_spi = spi_get_drvdata(spi);
+ int ret;
+
+ ret = rmi_driver_suspend(rmi_spi->xport.rmi_dev);
+ if (ret)
+ dev_warn(dev, "Failed to resume device: %d\n", ret);
+
+ disable_irq(rmi_spi->irq);
+
+ return 0;
+}
+
+static int rmi_spi_runtime_resume(struct device *dev)
+{
+ struct spi_device *spi = to_spi_device(dev);
+ struct rmi_spi_xport *rmi_spi = spi_get_drvdata(spi);
+ int ret;
+
+ enable_irq(rmi_spi->irq);
+
+ ret = rmi_driver_resume(rmi_spi->xport.rmi_dev);
+ if (ret)
+ dev_warn(dev, "Failed to resume device: %d\n", ret);
+
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops rmi_spi_pm = {
+ SET_SYSTEM_SLEEP_PM_OPS(rmi_spi_suspend, rmi_spi_resume)
+ SET_RUNTIME_PM_OPS(rmi_spi_runtime_suspend, rmi_spi_runtime_resume,
+ NULL)
+};
+
+static const struct spi_device_id rmi_id[] = {
+ { "rmi4_spi", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(spi, rmi_id);
+
+static struct spi_driver rmi_spi_driver = {
+ .driver = {
+ .name = "rmi4_spi",
+ .pm = &rmi_spi_pm,
+ .of_match_table = of_match_ptr(rmi_spi_of_match),
+ },
+ .id_table = rmi_id,
+ .probe = rmi_spi_probe,
+ .remove = rmi_spi_remove,
+};
+
+module_spi_driver(rmi_spi_driver);
+
+MODULE_AUTHOR("Christopher Heiny <cheiny@synaptics.com>");
+MODULE_AUTHOR("Andrew Duggan <aduggan@synaptics.com>");
+MODULE_DESCRIPTION("RMI SPI driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(RMI_DRIVER_VERSION);
diff --git a/drivers/input/tablet/gtco.c b/drivers/input/tablet/gtco.c
index 3a7f3a4a4396..7c18249d6c8e 100644
--- a/drivers/input/tablet/gtco.c
+++ b/drivers/input/tablet/gtco.c
@@ -858,6 +858,14 @@ static int gtco_probe(struct usb_interface *usbinterface,
goto err_free_buf;
}
+ /* Sanity check that a device has an endpoint */
+ if (usbinterface->altsetting[0].desc.bNumEndpoints < 1) {
+ dev_err(&usbinterface->dev,
+ "Invalid number of endpoints\n");
+ error = -EINVAL;
+ goto err_free_urb;
+ }
+
/*
* The endpoint is always altsetting 0, we know this since we know
* this device only has one interrupt endpoint
@@ -879,7 +887,7 @@ static int gtco_probe(struct usb_interface *usbinterface,
* HID report descriptor
*/
if (usb_get_extra_descriptor(usbinterface->cur_altsetting,
- HID_DEVICE_TYPE, &hid_desc) != 0){
+ HID_DEVICE_TYPE, &hid_desc) != 0) {
dev_err(&usbinterface->dev,
"Can't retrieve exta USB descriptor to get hid report descriptor length\n");
error = -EIO;
diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig
index 66c62641b59a..8ecdc38fd489 100644
--- a/drivers/input/touchscreen/Kconfig
+++ b/drivers/input/touchscreen/Kconfig
@@ -334,7 +334,7 @@ config TOUCHSCREEN_FUJITSU
config TOUCHSCREEN_GOODIX
tristate "Goodix I2C touchscreen"
depends on I2C
- depends on GPIOLIB
+ depends on GPIOLIB || COMPILE_TEST
help
Say Y here if you have the Goodix touchscreen (such as one
installed in Onda v975w tablets) connected to your
@@ -491,6 +491,17 @@ config TOUCHSCREEN_MMS114
To compile this driver as a module, choose M here: the
module will be called mms114.
+config TOUCHSCREEN_MELFAS_MIP4
+ tristate "MELFAS MIP4 Touchscreen"
+ depends on I2C
+ help
+ Say Y here if you have a MELFAS MIP4 Touchscreen device.
+
+ If unsure, say N.
+
+ To compile this driver as a module, choose M here:
+ the module will be called melfas_mip4.
+
config TOUCHSCREEN_MTOUCH
tristate "MicroTouch serial touchscreens"
select SERIO
@@ -822,6 +833,15 @@ config TOUCHSCREEN_USB_COMPOSITE
To compile this driver as a module, choose M here: the
module will be called usbtouchscreen.
+config TOUCHSCREEN_MX25
+ tristate "Freescale i.MX25 touchscreen input driver"
+ depends on MFD_MX25_TSADC
+ help
+ Enable support for touchscreen connected to your i.MX25.
+
+ To compile this driver as a module, choose M here: the
+ module will be called fsl-imx25-tcq.
+
config TOUCHSCREEN_MC13783
tristate "Freescale MC13783 touchscreen input driver"
depends on MFD_MC13XXX
@@ -941,6 +961,7 @@ config TOUCHSCREEN_TOUCHIT213
config TOUCHSCREEN_TS4800
tristate "TS-4800 touchscreen"
depends on HAS_IOMEM && OF
+ depends on SOC_IMX51 || COMPILE_TEST
select MFD_SYSCON
select INPUT_POLLDEV
help
@@ -1112,7 +1133,8 @@ config TOUCHSCREEN_ZFORCE
config TOUCHSCREEN_COLIBRI_VF50
tristate "Toradex Colibri on board touchscreen driver"
- depends on GPIOLIB && IIO && VF610_ADC
+ depends on IIO && VF610_ADC
+ depends on GPIOLIB || COMPILE_TEST
help
Say Y here if you have a Colibri VF50 and plan to use
the on-board provided 4-wire touchscreen driver.
diff --git a/drivers/input/touchscreen/Makefile b/drivers/input/touchscreen/Makefile
index 968ff12e3132..f42975e719e0 100644
--- a/drivers/input/touchscreen/Makefile
+++ b/drivers/input/touchscreen/Makefile
@@ -46,8 +46,10 @@ obj-$(CONFIG_TOUCHSCREEN_INTEL_MID) += intel-mid-touch.o
obj-$(CONFIG_TOUCHSCREEN_IPROC) += bcm_iproc_tsc.o
obj-$(CONFIG_TOUCHSCREEN_LPC32XX) += lpc32xx_ts.o
obj-$(CONFIG_TOUCHSCREEN_MAX11801) += max11801_ts.o
+obj-$(CONFIG_TOUCHSCREEN_MX25) += fsl-imx25-tcq.o
obj-$(CONFIG_TOUCHSCREEN_MC13783) += mc13783_ts.o
obj-$(CONFIG_TOUCHSCREEN_MCS5000) += mcs5000_ts.o
+obj-$(CONFIG_TOUCHSCREEN_MELFAS_MIP4) += melfas_mip4.o
obj-$(CONFIG_TOUCHSCREEN_MIGOR) += migor_ts.o
obj-$(CONFIG_TOUCHSCREEN_MMS114) += mms114.o
obj-$(CONFIG_TOUCHSCREEN_MTOUCH) += mtouch.o
diff --git a/drivers/input/touchscreen/ad7879-i2c.c b/drivers/input/touchscreen/ad7879-i2c.c
index d66962c5b1c2..58f72e0246ab 100644
--- a/drivers/input/touchscreen/ad7879-i2c.c
+++ b/drivers/input/touchscreen/ad7879-i2c.c
@@ -10,6 +10,7 @@
#include <linux/i2c.h>
#include <linux/module.h>
#include <linux/types.h>
+#include <linux/of.h>
#include <linux/pm.h>
#include "ad7879.h"
@@ -91,10 +92,19 @@ static const struct i2c_device_id ad7879_id[] = {
};
MODULE_DEVICE_TABLE(i2c, ad7879_id);
+#ifdef CONFIG_OF
+static const struct of_device_id ad7879_i2c_dt_ids[] = {
+ { .compatible = "adi,ad7879-1", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, ad7879_i2c_dt_ids);
+#endif
+
static struct i2c_driver ad7879_i2c_driver = {
.driver = {
.name = "ad7879",
.pm = &ad7879_pm_ops,
+ .of_match_table = of_match_ptr(ad7879_i2c_dt_ids),
},
.probe = ad7879_i2c_probe,
.remove = ad7879_i2c_remove,
diff --git a/drivers/input/touchscreen/ad7879-spi.c b/drivers/input/touchscreen/ad7879-spi.c
index 48033c2689ab..d42b6b9af191 100644
--- a/drivers/input/touchscreen/ad7879-spi.c
+++ b/drivers/input/touchscreen/ad7879-spi.c
@@ -10,6 +10,7 @@
#include <linux/pm.h>
#include <linux/spi/spi.h>
#include <linux/module.h>
+#include <linux/of.h>
#include "ad7879.h"
@@ -146,10 +147,19 @@ static int ad7879_spi_remove(struct spi_device *spi)
return 0;
}
+#ifdef CONFIG_OF
+static const struct of_device_id ad7879_spi_dt_ids[] = {
+ { .compatible = "adi,ad7879", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, ad7879_spi_dt_ids);
+#endif
+
static struct spi_driver ad7879_spi_driver = {
.driver = {
.name = "ad7879",
.pm = &ad7879_pm_ops,
+ .of_match_table = of_match_ptr(ad7879_spi_dt_ids),
},
.probe = ad7879_spi_probe,
.remove = ad7879_spi_remove,
diff --git a/drivers/input/touchscreen/ad7879.c b/drivers/input/touchscreen/ad7879.c
index 16b5cc2196f2..69d299d5dd00 100644
--- a/drivers/input/touchscreen/ad7879.c
+++ b/drivers/input/touchscreen/ad7879.c
@@ -31,7 +31,8 @@
#include <linux/i2c.h>
#include <linux/gpio.h>
-#include <linux/spi/ad7879.h>
+#include <linux/input/touchscreen.h>
+#include <linux/platform_data/ad7879.h>
#include <linux/module.h>
#include "ad7879.h"
@@ -94,8 +95,8 @@
#define AD7879_TEMP_BIT (1<<1)
enum {
- AD7879_SEQ_XPOS = 0,
- AD7879_SEQ_YPOS = 1,
+ AD7879_SEQ_YPOS = 0,
+ AD7879_SEQ_XPOS = 1,
AD7879_SEQ_Z1 = 2,
AD7879_SEQ_Z2 = 3,
AD7879_NR_SENSE = 4,
@@ -126,7 +127,6 @@ struct ad7879 {
u8 pen_down_acc_interval;
u8 median;
u16 x_plate_ohms;
- u16 pressure_max;
u16 cmd_crtl1;
u16 cmd_crtl2;
u16 cmd_crtl3;
@@ -170,10 +170,10 @@ static int ad7879_report(struct ad7879 *ts)
* filter. The combination of these two techniques provides a robust
* solution, discarding the spurious noise in the signal and keeping
* only the data of interest. The size of both filters is
- * programmable. (dev.platform_data, see linux/spi/ad7879.h) Other
- * user-programmable conversion controls include variable acquisition
- * time, and first conversion delay. Up to 16 averages can be taken
- * per conversion.
+ * programmable. (dev.platform_data, see linux/platform_data/ad7879.h)
+ * Other user-programmable conversion controls include variable
+ * acquisition time, and first conversion delay. Up to 16 averages can
+ * be taken per conversion.
*/
if (likely(x && z1)) {
@@ -186,7 +186,7 @@ static int ad7879_report(struct ad7879 *ts)
* Sample found inconsistent, pressure is beyond
* the maximum. Don't report it to user space.
*/
- if (Rt > ts->pressure_max)
+ if (Rt > input_abs_get_max(input_dev, ABS_PRESSURE))
return -EINVAL;
/*
@@ -469,7 +469,7 @@ static void ad7879_gpio_remove(struct ad7879 *ts)
{
const struct ad7879_platform_data *pdata = dev_get_platdata(ts->dev);
- if (pdata->gpio_export)
+ if (pdata && pdata->gpio_export)
gpiochip_remove(&ts->gc);
}
@@ -485,6 +485,32 @@ static inline void ad7879_gpio_remove(struct ad7879 *ts)
}
#endif
+static int ad7879_parse_dt(struct device *dev, struct ad7879 *ts)
+{
+ int err;
+ u32 tmp;
+
+ err = device_property_read_u32(dev, "adi,resistance-plate-x", &tmp);
+ if (err) {
+ dev_err(dev, "failed to get resistance-plate-x property\n");
+ return err;
+ }
+ ts->x_plate_ohms = (u16)tmp;
+
+ device_property_read_u8(dev, "adi,first-conversion-delay",
+ &ts->first_conversion_delay);
+ device_property_read_u8(dev, "adi,acquisition-time",
+ &ts->acquisition_time);
+ device_property_read_u8(dev, "adi,median-filter-size", &ts->median);
+ device_property_read_u8(dev, "adi,averaging", &ts->averaging);
+ device_property_read_u8(dev, "adi,conversion-interval",
+ &ts->pen_down_acc_interval);
+
+ ts->swap_xy = device_property_read_bool(dev, "touchscreen-swapped-x-y");
+
+ return 0;
+}
+
struct ad7879 *ad7879_probe(struct device *dev, u8 devid, unsigned int irq,
const struct ad7879_bus_ops *bops)
{
@@ -495,41 +521,44 @@ struct ad7879 *ad7879_probe(struct device *dev, u8 devid, unsigned int irq,
u16 revid;
if (!irq) {
- dev_err(dev, "no IRQ?\n");
- err = -EINVAL;
- goto err_out;
+ dev_err(dev, "No IRQ specified\n");
+ return ERR_PTR(-EINVAL);
}
- if (!pdata) {
- dev_err(dev, "no platform data?\n");
- err = -EINVAL;
- goto err_out;
+ ts = devm_kzalloc(dev, sizeof(*ts), GFP_KERNEL);
+ if (!ts)
+ return ERR_PTR(-ENOMEM);
+
+ if (pdata) {
+ /* Platform data use swapped axis (backward compatibility) */
+ ts->swap_xy = !pdata->swap_xy;
+
+ ts->x_plate_ohms = pdata->x_plate_ohms ? : 400;
+
+ ts->first_conversion_delay = pdata->first_conversion_delay;
+ ts->acquisition_time = pdata->acquisition_time;
+ ts->averaging = pdata->averaging;
+ ts->pen_down_acc_interval = pdata->pen_down_acc_interval;
+ ts->median = pdata->median;
+ } else if (dev->of_node) {
+ ad7879_parse_dt(dev, ts);
+ } else {
+ dev_err(dev, "No platform data\n");
+ return ERR_PTR(-EINVAL);
}
- ts = kzalloc(sizeof(*ts), GFP_KERNEL);
- input_dev = input_allocate_device();
- if (!ts || !input_dev) {
- err = -ENOMEM;
- goto err_free_mem;
+ input_dev = devm_input_allocate_device(dev);
+ if (!input_dev) {
+ dev_err(dev, "Failed to allocate input device\n");
+ return ERR_PTR(-ENOMEM);
}
ts->bops = bops;
ts->dev = dev;
ts->input = input_dev;
ts->irq = irq;
- ts->swap_xy = pdata->swap_xy;
setup_timer(&ts->timer, ad7879_timer, (unsigned long) ts);
-
- ts->x_plate_ohms = pdata->x_plate_ohms ? : 400;
- ts->pressure_max = pdata->pressure_max ? : ~0;
-
- ts->first_conversion_delay = pdata->first_conversion_delay;
- ts->acquisition_time = pdata->acquisition_time;
- ts->averaging = pdata->averaging;
- ts->pen_down_acc_interval = pdata->pen_down_acc_interval;
- ts->median = pdata->median;
-
snprintf(ts->phys, sizeof(ts->phys), "%s/input0", dev_name(dev));
input_dev->name = "AD7879 Touchscreen";
@@ -550,21 +579,33 @@ struct ad7879 *ad7879_probe(struct device *dev, u8 devid, unsigned int irq,
__set_bit(EV_KEY, input_dev->evbit);
__set_bit(BTN_TOUCH, input_dev->keybit);
- input_set_abs_params(input_dev, ABS_X,
- pdata->x_min ? : 0,
- pdata->x_max ? : MAX_12BIT,
- 0, 0);
- input_set_abs_params(input_dev, ABS_Y,
- pdata->y_min ? : 0,
- pdata->y_max ? : MAX_12BIT,
- 0, 0);
- input_set_abs_params(input_dev, ABS_PRESSURE,
- pdata->pressure_min, pdata->pressure_max, 0, 0);
+ if (pdata) {
+ input_set_abs_params(input_dev, ABS_X,
+ pdata->x_min ? : 0,
+ pdata->x_max ? : MAX_12BIT,
+ 0, 0);
+ input_set_abs_params(input_dev, ABS_Y,
+ pdata->y_min ? : 0,
+ pdata->y_max ? : MAX_12BIT,
+ 0, 0);
+ input_set_abs_params(input_dev, ABS_PRESSURE,
+ pdata->pressure_min,
+ pdata->pressure_max ? : ~0,
+ 0, 0);
+ } else {
+ input_set_abs_params(input_dev, ABS_X, 0, MAX_12BIT, 0, 0);
+ input_set_abs_params(input_dev, ABS_Y, 0, MAX_12BIT, 0, 0);
+ touchscreen_parse_properties(input_dev, false);
+ if (!input_abs_get_max(input_dev, ABS_PRESSURE)) {
+ dev_err(dev, "Touchscreen pressure is not specified\n");
+ return ERR_PTR(-EINVAL);
+ }
+ }
err = ad7879_write(ts, AD7879_REG_CTRL2, AD7879_RESET);
if (err < 0) {
dev_err(dev, "Failed to write %s\n", input_dev->name);
- goto err_free_mem;
+ return ERR_PTR(err);
}
revid = ad7879_read(ts, AD7879_REG_REVID);
@@ -573,8 +614,7 @@ struct ad7879 *ad7879_probe(struct device *dev, u8 devid, unsigned int irq,
if (input_dev->id.product != devid) {
dev_err(dev, "Failed to probe %s (%x vs %x)\n",
input_dev->name, devid, revid);
- err = -ENODEV;
- goto err_free_mem;
+ return ERR_PTR(-ENODEV);
}
ts->cmd_crtl3 = AD7879_YPLUS_BIT |
@@ -594,23 +634,25 @@ struct ad7879 *ad7879_probe(struct device *dev, u8 devid, unsigned int irq,
AD7879_ACQ(ts->acquisition_time) |
AD7879_TMR(ts->pen_down_acc_interval);
- err = request_threaded_irq(ts->irq, NULL, ad7879_irq,
- IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
- dev_name(dev), ts);
+ err = devm_request_threaded_irq(dev, ts->irq, NULL, ad7879_irq,
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ dev_name(dev), ts);
if (err) {
- dev_err(dev, "irq %d busy?\n", ts->irq);
- goto err_free_mem;
+ dev_err(dev, "Failed to request IRQ: %d\n", err);
+ return ERR_PTR(err);
}
__ad7879_disable(ts);
err = sysfs_create_group(&dev->kobj, &ad7879_attr_group);
if (err)
- goto err_free_irq;
+ goto err_out;
- err = ad7879_gpio_add(ts, pdata);
- if (err)
- goto err_remove_attr;
+ if (pdata) {
+ err = ad7879_gpio_add(ts, pdata);
+ if (err)
+ goto err_remove_attr;
+ }
err = input_register_device(input_dev);
if (err)
@@ -622,11 +664,6 @@ err_remove_gpio:
ad7879_gpio_remove(ts);
err_remove_attr:
sysfs_remove_group(&dev->kobj, &ad7879_attr_group);
-err_free_irq:
- free_irq(ts->irq, ts);
-err_free_mem:
- input_free_device(input_dev);
- kfree(ts);
err_out:
return ERR_PTR(err);
}
@@ -636,9 +673,6 @@ void ad7879_remove(struct ad7879 *ts)
{
ad7879_gpio_remove(ts);
sysfs_remove_group(&ts->dev->kobj, &ad7879_attr_group);
- free_irq(ts->irq, ts);
- input_unregister_device(ts->input);
- kfree(ts);
}
EXPORT_SYMBOL(ad7879_remove);
diff --git a/drivers/input/touchscreen/atmel_mxt_ts.c b/drivers/input/touchscreen/atmel_mxt_ts.c
index 2160512e861a..5af7907d0af4 100644
--- a/drivers/input/touchscreen/atmel_mxt_ts.c
+++ b/drivers/input/touchscreen/atmel_mxt_ts.c
@@ -1093,6 +1093,19 @@ static int mxt_t6_command(struct mxt_data *data, u16 cmd_offset,
return 0;
}
+static int mxt_acquire_irq(struct mxt_data *data)
+{
+ int error;
+
+ enable_irq(data->irq);
+
+ error = mxt_process_messages_until_invalid(data);
+ if (error)
+ return error;
+
+ return 0;
+}
+
static int mxt_soft_reset(struct mxt_data *data)
{
struct device *dev = &data->client->dev;
@@ -1111,7 +1124,7 @@ static int mxt_soft_reset(struct mxt_data *data)
/* Ignore CHG line for 100ms after reset */
msleep(100);
- enable_irq(data->irq);
+ mxt_acquire_irq(data);
ret = mxt_wait_for_completion(data, &data->reset_completion,
MXT_RESET_TIMEOUT);
@@ -1466,19 +1479,6 @@ release_mem:
return ret;
}
-static int mxt_acquire_irq(struct mxt_data *data)
-{
- int error;
-
- enable_irq(data->irq);
-
- error = mxt_process_messages_until_invalid(data);
- if (error)
- return error;
-
- return 0;
-}
-
static int mxt_get_info(struct mxt_data *data)
{
struct i2c_client *client = data->client;
diff --git a/drivers/input/touchscreen/cyttsp_core.c b/drivers/input/touchscreen/cyttsp_core.c
index 5b74e8b84e79..91cda8f8119d 100644
--- a/drivers/input/touchscreen/cyttsp_core.c
+++ b/drivers/input/touchscreen/cyttsp_core.c
@@ -30,9 +30,12 @@
#include <linux/delay.h>
#include <linux/input.h>
#include <linux/input/mt.h>
+#include <linux/input/touchscreen.h>
#include <linux/gpio.h>
#include <linux/interrupt.h>
#include <linux/slab.h>
+#include <linux/property.h>
+#include <linux/gpio/consumer.h>
#include "cyttsp_core.h"
@@ -57,6 +60,7 @@
#define CY_DELAY_DFLT 20 /* ms */
#define CY_DELAY_MAX 500
#define CY_ACT_DIST_DFLT 0xF8
+#define CY_ACT_DIST_MASK 0x0F
#define CY_HNDSHK_BIT 0x80
/* device mode bits */
#define CY_OPERATE_MODE 0x00
@@ -120,7 +124,7 @@ static int ttsp_send_command(struct cyttsp *ts, u8 cmd)
static int cyttsp_handshake(struct cyttsp *ts)
{
- if (ts->pdata->use_hndshk)
+ if (ts->use_hndshk)
return ttsp_send_command(ts,
ts->xy_data.hst_mode ^ CY_HNDSHK_BIT);
@@ -142,9 +146,9 @@ static int cyttsp_exit_bl_mode(struct cyttsp *ts)
u8 bl_cmd[sizeof(bl_command)];
memcpy(bl_cmd, bl_command, sizeof(bl_command));
- if (ts->pdata->bl_keys)
+ if (ts->bl_keys)
memcpy(&bl_cmd[sizeof(bl_command) - CY_NUM_BL_KEYS],
- ts->pdata->bl_keys, CY_NUM_BL_KEYS);
+ ts->bl_keys, CY_NUM_BL_KEYS);
error = ttsp_write_block_data(ts, CY_REG_BASE,
sizeof(bl_cmd), bl_cmd);
@@ -217,14 +221,14 @@ static int cyttsp_set_sysinfo_regs(struct cyttsp *ts)
{
int retval = 0;
- if (ts->pdata->act_intrvl != CY_ACT_INTRVL_DFLT ||
- ts->pdata->tch_tmout != CY_TCH_TMOUT_DFLT ||
- ts->pdata->lp_intrvl != CY_LP_INTRVL_DFLT) {
+ if (ts->act_intrvl != CY_ACT_INTRVL_DFLT ||
+ ts->tch_tmout != CY_TCH_TMOUT_DFLT ||
+ ts->lp_intrvl != CY_LP_INTRVL_DFLT) {
u8 intrvl_ray[] = {
- ts->pdata->act_intrvl,
- ts->pdata->tch_tmout,
- ts->pdata->lp_intrvl
+ ts->act_intrvl,
+ ts->tch_tmout,
+ ts->lp_intrvl
};
/* set intrvl registers */
@@ -236,6 +240,16 @@ static int cyttsp_set_sysinfo_regs(struct cyttsp *ts)
return retval;
}
+static void cyttsp_hard_reset(struct cyttsp *ts)
+{
+ if (ts->reset_gpio) {
+ gpiod_set_value_cansleep(ts->reset_gpio, 1);
+ msleep(CY_DELAY_DFLT);
+ gpiod_set_value_cansleep(ts->reset_gpio, 0);
+ msleep(CY_DELAY_DFLT);
+ }
+}
+
static int cyttsp_soft_reset(struct cyttsp *ts)
{
unsigned long timeout;
@@ -263,7 +277,7 @@ out:
static int cyttsp_act_dist_setup(struct cyttsp *ts)
{
- u8 act_dist_setup = ts->pdata->act_dist;
+ u8 act_dist_setup = ts->act_dist;
/* Init gesture; active distance setup */
return ttsp_write_block_data(ts, CY_REG_ACT_DIST,
@@ -528,45 +542,110 @@ static void cyttsp_close(struct input_dev *dev)
cyttsp_disable(ts);
}
+static int cyttsp_parse_properties(struct cyttsp *ts)
+{
+ struct device *dev = ts->dev;
+ u32 dt_value;
+ int ret;
+
+ ts->bl_keys = devm_kzalloc(dev, CY_NUM_BL_KEYS, GFP_KERNEL);
+ if (!ts->bl_keys)
+ return -ENOMEM;
+
+ /* Set some default values */
+ ts->use_hndshk = false;
+ ts->act_dist = CY_ACT_DIST_DFLT;
+ ts->act_intrvl = CY_ACT_INTRVL_DFLT;
+ ts->tch_tmout = CY_TCH_TMOUT_DFLT;
+ ts->lp_intrvl = CY_LP_INTRVL_DFLT;
+
+ ret = device_property_read_u8_array(dev, "bootloader-key",
+ ts->bl_keys, CY_NUM_BL_KEYS);
+ if (ret) {
+ dev_err(dev,
+ "bootloader-key property could not be retrieved\n");
+ return ret;
+ }
+
+ ts->use_hndshk = device_property_present(dev, "use-handshake");
+
+ if (!device_property_read_u32(dev, "active-distance", &dt_value)) {
+ if (dt_value > 15) {
+ dev_err(dev, "active-distance (%u) must be [0-15]\n",
+ dt_value);
+ return -EINVAL;
+ }
+ ts->act_dist &= ~CY_ACT_DIST_MASK;
+ ts->act_dist |= dt_value;
+ }
+
+ if (!device_property_read_u32(dev, "active-interval-ms", &dt_value)) {
+ if (dt_value > 255) {
+ dev_err(dev, "active-interval-ms (%u) must be [0-255]\n",
+ dt_value);
+ return -EINVAL;
+ }
+ ts->act_intrvl = dt_value;
+ }
+
+ if (!device_property_read_u32(dev, "lowpower-interval-ms", &dt_value)) {
+ if (dt_value > 2550) {
+ dev_err(dev, "lowpower-interval-ms (%u) must be [0-2550]\n",
+ dt_value);
+ return -EINVAL;
+ }
+ /* Register value is expressed in 0.01s / bit */
+ ts->lp_intrvl = dt_value / 10;
+ }
+
+ if (!device_property_read_u32(dev, "touch-timeout-ms", &dt_value)) {
+ if (dt_value > 2550) {
+ dev_err(dev, "touch-timeout-ms (%u) must be [0-2550]\n",
+ dt_value);
+ return -EINVAL;
+ }
+ /* Register value is expressed in 0.01s / bit */
+ ts->tch_tmout = dt_value / 10;
+ }
+
+ return 0;
+}
+
struct cyttsp *cyttsp_probe(const struct cyttsp_bus_ops *bus_ops,
struct device *dev, int irq, size_t xfer_buf_size)
{
- const struct cyttsp_platform_data *pdata = dev_get_platdata(dev);
struct cyttsp *ts;
struct input_dev *input_dev;
int error;
- if (!pdata || !pdata->name || irq <= 0) {
- error = -EINVAL;
- goto err_out;
- }
+ ts = devm_kzalloc(dev, sizeof(*ts) + xfer_buf_size, GFP_KERNEL);
+ if (!ts)
+ return ERR_PTR(-ENOMEM);
- ts = kzalloc(sizeof(*ts) + xfer_buf_size, GFP_KERNEL);
- input_dev = input_allocate_device();
- if (!ts || !input_dev) {
- error = -ENOMEM;
- goto err_free_mem;
- }
+ input_dev = devm_input_allocate_device(dev);
+ if (!input_dev)
+ return ERR_PTR(-ENOMEM);
ts->dev = dev;
ts->input = input_dev;
- ts->pdata = dev_get_platdata(dev);
ts->bus_ops = bus_ops;
ts->irq = irq;
+ ts->reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(ts->reset_gpio)) {
+ error = PTR_ERR(ts->reset_gpio);
+ dev_err(dev, "Failed to request reset gpio, error %d\n", error);
+ return ERR_PTR(error);
+ }
+
+ error = cyttsp_parse_properties(ts);
+ if (error)
+ return ERR_PTR(error);
+
init_completion(&ts->bl_ready);
snprintf(ts->phys, sizeof(ts->phys), "%s/input0", dev_name(dev));
- if (pdata->init) {
- error = pdata->init();
- if (error) {
- dev_err(ts->dev, "platform init failed, err: %d\n",
- error);
- goto err_free_mem;
- }
- }
-
- input_dev->name = pdata->name;
+ input_dev->name = "Cypress TTSP TouchScreen";
input_dev->phys = ts->phys;
input_dev->id.bustype = bus_ops->bustype;
input_dev->dev.parent = ts->dev;
@@ -576,63 +655,44 @@ struct cyttsp *cyttsp_probe(const struct cyttsp_bus_ops *bus_ops,
input_set_drvdata(input_dev, ts);
- __set_bit(EV_ABS, input_dev->evbit);
- input_set_abs_params(input_dev, ABS_MT_POSITION_X,
- 0, pdata->maxx, 0, 0);
- input_set_abs_params(input_dev, ABS_MT_POSITION_Y,
- 0, pdata->maxy, 0, 0);
- input_set_abs_params(input_dev, ABS_MT_TOUCH_MAJOR,
- 0, CY_MAXZ, 0, 0);
+ input_set_capability(input_dev, EV_ABS, ABS_MT_POSITION_X);
+ input_set_capability(input_dev, EV_ABS, ABS_MT_POSITION_Y);
+ touchscreen_parse_properties(input_dev, true);
- input_mt_init_slots(input_dev, CY_MAX_ID, 0);
+ error = input_mt_init_slots(input_dev, CY_MAX_ID, 0);
+ if (error) {
+ dev_err(dev, "Unable to init MT slots.\n");
+ return ERR_PTR(error);
+ }
- error = request_threaded_irq(ts->irq, NULL, cyttsp_irq,
- IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
- pdata->name, ts);
+ error = devm_request_threaded_irq(dev, ts->irq, NULL, cyttsp_irq,
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ "cyttsp", ts);
if (error) {
dev_err(ts->dev, "failed to request IRQ %d, err: %d\n",
ts->irq, error);
- goto err_platform_exit;
+ return ERR_PTR(error);
}
disable_irq(ts->irq);
+ cyttsp_hard_reset(ts);
+
error = cyttsp_power_on(ts);
if (error)
- goto err_free_irq;
+ return ERR_PTR(error);
error = input_register_device(input_dev);
if (error) {
dev_err(ts->dev, "failed to register input device: %d\n",
error);
- goto err_free_irq;
+ return ERR_PTR(error);
}
return ts;
-
-err_free_irq:
- free_irq(ts->irq, ts);
-err_platform_exit:
- if (pdata->exit)
- pdata->exit();
-err_free_mem:
- input_free_device(input_dev);
- kfree(ts);
-err_out:
- return ERR_PTR(error);
}
EXPORT_SYMBOL_GPL(cyttsp_probe);
-void cyttsp_remove(struct cyttsp *ts)
-{
- free_irq(ts->irq, ts);
- input_unregister_device(ts->input);
- if (ts->pdata->exit)
- ts->pdata->exit();
- kfree(ts);
-}
-EXPORT_SYMBOL_GPL(cyttsp_remove);
-
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Cypress TrueTouch(R) Standard touchscreen driver core");
MODULE_AUTHOR("Cypress");
diff --git a/drivers/input/touchscreen/cyttsp_core.h b/drivers/input/touchscreen/cyttsp_core.h
index 07074110a902..7835e2bacf5a 100644
--- a/drivers/input/touchscreen/cyttsp_core.h
+++ b/drivers/input/touchscreen/cyttsp_core.h
@@ -129,7 +129,6 @@ struct cyttsp {
int irq;
struct input_dev *input;
char phys[32];
- const struct cyttsp_platform_data *pdata;
const struct cyttsp_bus_ops *bus_ops;
struct cyttsp_bootloader_data bl_data;
struct cyttsp_sysinfo_data sysinfo_data;
@@ -138,12 +137,19 @@ struct cyttsp {
enum cyttsp_state state;
bool suspended;
+ struct gpio_desc *reset_gpio;
+ bool use_hndshk;
+ u8 act_dist;
+ u8 act_intrvl;
+ u8 tch_tmout;
+ u8 lp_intrvl;
+ u8 *bl_keys;
+
u8 xfer_buf[] ____cacheline_aligned;
};
struct cyttsp *cyttsp_probe(const struct cyttsp_bus_ops *bus_ops,
struct device *dev, int irq, size_t xfer_buf_size);
-void cyttsp_remove(struct cyttsp *ts);
int cyttsp_i2c_write_block_data(struct device *dev, u8 *xfer_buf, u16 addr,
u8 length, const void *values);
diff --git a/drivers/input/touchscreen/cyttsp_i2c.c b/drivers/input/touchscreen/cyttsp_i2c.c
index eee51b3f2e3f..1edfdba96ede 100644
--- a/drivers/input/touchscreen/cyttsp_i2c.c
+++ b/drivers/input/touchscreen/cyttsp_i2c.c
@@ -56,15 +56,6 @@ static int cyttsp_i2c_probe(struct i2c_client *client,
return 0;
}
-static int cyttsp_i2c_remove(struct i2c_client *client)
-{
- struct cyttsp *ts = i2c_get_clientdata(client);
-
- cyttsp_remove(ts);
-
- return 0;
-}
-
static const struct i2c_device_id cyttsp_i2c_id[] = {
{ CY_I2C_NAME, 0 },
{ }
@@ -77,7 +68,6 @@ static struct i2c_driver cyttsp_i2c_driver = {
.pm = &cyttsp_pm_ops,
},
.probe = cyttsp_i2c_probe,
- .remove = cyttsp_i2c_remove,
.id_table = cyttsp_i2c_id,
};
diff --git a/drivers/input/touchscreen/cyttsp_spi.c b/drivers/input/touchscreen/cyttsp_spi.c
index bbeeb2488b57..3c9d18b1b6ef 100644
--- a/drivers/input/touchscreen/cyttsp_spi.c
+++ b/drivers/input/touchscreen/cyttsp_spi.c
@@ -170,22 +170,12 @@ static int cyttsp_spi_probe(struct spi_device *spi)
return 0;
}
-static int cyttsp_spi_remove(struct spi_device *spi)
-{
- struct cyttsp *ts = spi_get_drvdata(spi);
-
- cyttsp_remove(ts);
-
- return 0;
-}
-
static struct spi_driver cyttsp_spi_driver = {
.driver = {
.name = CY_SPI_NAME,
.pm = &cyttsp_pm_ops,
},
.probe = cyttsp_spi_probe,
- .remove = cyttsp_spi_remove,
};
module_spi_driver(cyttsp_spi_driver);
diff --git a/drivers/input/touchscreen/fsl-imx25-tcq.c b/drivers/input/touchscreen/fsl-imx25-tcq.c
new file mode 100644
index 000000000000..fe9877a6af9e
--- /dev/null
+++ b/drivers/input/touchscreen/fsl-imx25-tcq.c
@@ -0,0 +1,596 @@
+/*
+ * Copyright (C) 2014-2015 Pengutronix, Markus Pargmann <mpa@pengutronix.de>
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU General Public License version 2 as published by the
+ * Free Software Foundation.
+ *
+ * Based on driver from 2011:
+ * Juergen Beisert, Pengutronix <kernel@pengutronix.de>
+ *
+ * This is the driver for the imx25 TCQ (Touchscreen Conversion Queue)
+ * connected to the imx25 ADC.
+ */
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/input.h>
+#include <linux/interrupt.h>
+#include <linux/mfd/imx25-tsadc.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+static const char mx25_tcq_name[] = "mx25-tcq";
+
+enum mx25_tcq_mode {
+ MX25_TS_4WIRE,
+};
+
+struct mx25_tcq_priv {
+ struct regmap *regs;
+ struct regmap *core_regs;
+ struct input_dev *idev;
+ enum mx25_tcq_mode mode;
+ unsigned int pen_threshold;
+ unsigned int sample_count;
+ unsigned int expected_samples;
+ unsigned int pen_debounce;
+ unsigned int settling_time;
+ struct clk *clk;
+ int irq;
+ struct device *dev;
+};
+
+static struct regmap_config mx25_tcq_regconfig = {
+ .fast_io = true,
+ .max_register = 0x5c,
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+};
+
+static const struct of_device_id mx25_tcq_ids[] = {
+ { .compatible = "fsl,imx25-tcq", },
+ { /* Sentinel */ }
+};
+
+#define TSC_4WIRE_PRE_INDEX 0
+#define TSC_4WIRE_X_INDEX 1
+#define TSC_4WIRE_Y_INDEX 2
+#define TSC_4WIRE_POST_INDEX 3
+#define TSC_4WIRE_LEAVE 4
+
+#define MX25_TSC_DEF_THRESHOLD 80
+#define TSC_MAX_SAMPLES 16
+
+#define MX25_TSC_REPEAT_WAIT 14
+
+enum mx25_adc_configurations {
+ MX25_CFG_PRECHARGE = 0,
+ MX25_CFG_TOUCH_DETECT,
+ MX25_CFG_X_MEASUREMENT,
+ MX25_CFG_Y_MEASUREMENT,
+};
+
+#define MX25_PRECHARGE_VALUE (\
+ MX25_ADCQ_CFG_YPLL_OFF | \
+ MX25_ADCQ_CFG_XNUR_OFF | \
+ MX25_ADCQ_CFG_XPUL_HIGH | \
+ MX25_ADCQ_CFG_REFP_INT | \
+ MX25_ADCQ_CFG_IN_XP | \
+ MX25_ADCQ_CFG_REFN_NGND2 | \
+ MX25_ADCQ_CFG_IGS)
+
+#define MX25_TOUCH_DETECT_VALUE (\
+ MX25_ADCQ_CFG_YNLR | \
+ MX25_ADCQ_CFG_YPLL_OFF | \
+ MX25_ADCQ_CFG_XNUR_OFF | \
+ MX25_ADCQ_CFG_XPUL_OFF | \
+ MX25_ADCQ_CFG_REFP_INT | \
+ MX25_ADCQ_CFG_IN_XP | \
+ MX25_ADCQ_CFG_REFN_NGND2 | \
+ MX25_ADCQ_CFG_PENIACK)
+
+static void imx25_setup_queue_cfgs(struct mx25_tcq_priv *priv,
+ unsigned int settling_cnt)
+{
+ u32 precharge_cfg =
+ MX25_PRECHARGE_VALUE |
+ MX25_ADCQ_CFG_SETTLING_TIME(settling_cnt);
+ u32 touch_detect_cfg =
+ MX25_TOUCH_DETECT_VALUE |
+ MX25_ADCQ_CFG_NOS(1) |
+ MX25_ADCQ_CFG_SETTLING_TIME(settling_cnt);
+
+ regmap_write(priv->core_regs, MX25_TSC_TICR, precharge_cfg);
+
+ /* PRECHARGE */
+ regmap_write(priv->regs, MX25_ADCQ_CFG(MX25_CFG_PRECHARGE),
+ precharge_cfg);
+
+ /* TOUCH_DETECT */
+ regmap_write(priv->regs, MX25_ADCQ_CFG(MX25_CFG_TOUCH_DETECT),
+ touch_detect_cfg);
+
+ /* X Measurement */
+ regmap_write(priv->regs, MX25_ADCQ_CFG(MX25_CFG_X_MEASUREMENT),
+ MX25_ADCQ_CFG_YPLL_OFF |
+ MX25_ADCQ_CFG_XNUR_LOW |
+ MX25_ADCQ_CFG_XPUL_HIGH |
+ MX25_ADCQ_CFG_REFP_XP |
+ MX25_ADCQ_CFG_IN_YP |
+ MX25_ADCQ_CFG_REFN_XN |
+ MX25_ADCQ_CFG_NOS(priv->sample_count) |
+ MX25_ADCQ_CFG_SETTLING_TIME(settling_cnt));
+
+ /* Y Measurement */
+ regmap_write(priv->regs, MX25_ADCQ_CFG(MX25_CFG_Y_MEASUREMENT),
+ MX25_ADCQ_CFG_YNLR |
+ MX25_ADCQ_CFG_YPLL_HIGH |
+ MX25_ADCQ_CFG_XNUR_OFF |
+ MX25_ADCQ_CFG_XPUL_OFF |
+ MX25_ADCQ_CFG_REFP_YP |
+ MX25_ADCQ_CFG_IN_XP |
+ MX25_ADCQ_CFG_REFN_YN |
+ MX25_ADCQ_CFG_NOS(priv->sample_count) |
+ MX25_ADCQ_CFG_SETTLING_TIME(settling_cnt));
+
+ /* Enable the touch detection right now */
+ regmap_write(priv->core_regs, MX25_TSC_TICR, touch_detect_cfg |
+ MX25_ADCQ_CFG_IGS);
+}
+
+static int imx25_setup_queue_4wire(struct mx25_tcq_priv *priv,
+ unsigned settling_cnt, int *items)
+{
+ imx25_setup_queue_cfgs(priv, settling_cnt);
+
+ /* Setup the conversion queue */
+ regmap_write(priv->regs, MX25_ADCQ_ITEM_7_0,
+ MX25_ADCQ_ITEM(0, MX25_CFG_PRECHARGE) |
+ MX25_ADCQ_ITEM(1, MX25_CFG_TOUCH_DETECT) |
+ MX25_ADCQ_ITEM(2, MX25_CFG_X_MEASUREMENT) |
+ MX25_ADCQ_ITEM(3, MX25_CFG_Y_MEASUREMENT) |
+ MX25_ADCQ_ITEM(4, MX25_CFG_PRECHARGE) |
+ MX25_ADCQ_ITEM(5, MX25_CFG_TOUCH_DETECT));
+
+ /*
+ * We measure X/Y with 'sample_count' number of samples and execute a
+ * touch detection twice, with 1 sample each
+ */
+ priv->expected_samples = priv->sample_count * 2 + 2;
+ *items = 6;
+
+ return 0;
+}
+
+static void mx25_tcq_disable_touch_irq(struct mx25_tcq_priv *priv)
+{
+ regmap_update_bits(priv->regs, MX25_ADCQ_CR, MX25_ADCQ_CR_PDMSK,
+ MX25_ADCQ_CR_PDMSK);
+}
+
+static void mx25_tcq_enable_touch_irq(struct mx25_tcq_priv *priv)
+{
+ regmap_update_bits(priv->regs, MX25_ADCQ_CR, MX25_ADCQ_CR_PDMSK, 0);
+}
+
+static void mx25_tcq_disable_fifo_irq(struct mx25_tcq_priv *priv)
+{
+ regmap_update_bits(priv->regs, MX25_ADCQ_MR, MX25_ADCQ_MR_FDRY_IRQ,
+ MX25_ADCQ_MR_FDRY_IRQ);
+}
+
+static void mx25_tcq_enable_fifo_irq(struct mx25_tcq_priv *priv)
+{
+ regmap_update_bits(priv->regs, MX25_ADCQ_MR, MX25_ADCQ_MR_FDRY_IRQ, 0);
+}
+
+static void mx25_tcq_force_queue_start(struct mx25_tcq_priv *priv)
+{
+ regmap_update_bits(priv->regs, MX25_ADCQ_CR,
+ MX25_ADCQ_CR_FQS,
+ MX25_ADCQ_CR_FQS);
+}
+
+static void mx25_tcq_force_queue_stop(struct mx25_tcq_priv *priv)
+{
+ regmap_update_bits(priv->regs, MX25_ADCQ_CR,
+ MX25_ADCQ_CR_FQS, 0);
+}
+
+static void mx25_tcq_fifo_reset(struct mx25_tcq_priv *priv)
+{
+ u32 tcqcr;
+
+ regmap_read(priv->regs, MX25_ADCQ_CR, &tcqcr);
+ regmap_update_bits(priv->regs, MX25_ADCQ_CR, MX25_ADCQ_CR_FRST,
+ MX25_ADCQ_CR_FRST);
+ regmap_update_bits(priv->regs, MX25_ADCQ_CR, MX25_ADCQ_CR_FRST, 0);
+ regmap_write(priv->regs, MX25_ADCQ_CR, tcqcr);
+}
+
+static void mx25_tcq_re_enable_touch_detection(struct mx25_tcq_priv *priv)
+{
+ /* stop the queue from looping */
+ mx25_tcq_force_queue_stop(priv);
+
+ /* for a clean touch detection, preload the X plane */
+ regmap_write(priv->core_regs, MX25_TSC_TICR, MX25_PRECHARGE_VALUE);
+
+ /* waste some time now to pre-load the X plate to high voltage */
+ mx25_tcq_fifo_reset(priv);
+
+ /* re-enable the detection right now */
+ regmap_write(priv->core_regs, MX25_TSC_TICR,
+ MX25_TOUCH_DETECT_VALUE | MX25_ADCQ_CFG_IGS);
+
+ regmap_update_bits(priv->regs, MX25_ADCQ_SR, MX25_ADCQ_SR_PD,
+ MX25_ADCQ_SR_PD);
+
+ /* enable the pen down event to be a source for the interrupt */
+ regmap_update_bits(priv->regs, MX25_ADCQ_MR, MX25_ADCQ_MR_PD_IRQ, 0);
+
+ /* lets fire the next IRQ if someone touches the touchscreen */
+ mx25_tcq_enable_touch_irq(priv);
+}
+
+static void mx25_tcq_create_event_for_4wire(struct mx25_tcq_priv *priv,
+ u32 *sample_buf,
+ unsigned int samples)
+{
+ unsigned int x_pos = 0;
+ unsigned int y_pos = 0;
+ unsigned int touch_pre = 0;
+ unsigned int touch_post = 0;
+ unsigned int i;
+
+ for (i = 0; i < samples; i++) {
+ unsigned int index = MX25_ADCQ_FIFO_ID(sample_buf[i]);
+ unsigned int val = MX25_ADCQ_FIFO_DATA(sample_buf[i]);
+
+ switch (index) {
+ case 1:
+ touch_pre = val;
+ break;
+ case 2:
+ x_pos = val;
+ break;
+ case 3:
+ y_pos = val;
+ break;
+ case 5:
+ touch_post = val;
+ break;
+ default:
+ dev_dbg(priv->dev, "Dropped samples because of invalid index %d\n",
+ index);
+ return;
+ }
+ }
+
+ if (samples != 0) {
+ /*
+ * only if both touch measures are below a threshold,
+ * the position is valid
+ */
+ if (touch_pre < priv->pen_threshold &&
+ touch_post < priv->pen_threshold) {
+ /* valid samples, generate a report */
+ x_pos /= priv->sample_count;
+ y_pos /= priv->sample_count;
+ input_report_abs(priv->idev, ABS_X, x_pos);
+ input_report_abs(priv->idev, ABS_Y, y_pos);
+ input_report_key(priv->idev, BTN_TOUCH, 1);
+ input_sync(priv->idev);
+
+ /* get next sample */
+ mx25_tcq_enable_fifo_irq(priv);
+ } else if (touch_pre >= priv->pen_threshold &&
+ touch_post >= priv->pen_threshold) {
+ /*
+ * if both samples are invalid,
+ * generate a release report
+ */
+ input_report_key(priv->idev, BTN_TOUCH, 0);
+ input_sync(priv->idev);
+ mx25_tcq_re_enable_touch_detection(priv);
+ } else {
+ /*
+ * if only one of both touch measurements are
+ * below the threshold, still some bouncing
+ * happens. Take additional samples in this
+ * case to be sure
+ */
+ mx25_tcq_enable_fifo_irq(priv);
+ }
+ }
+}
+
+static irqreturn_t mx25_tcq_irq_thread(int irq, void *dev_id)
+{
+ struct mx25_tcq_priv *priv = dev_id;
+ u32 sample_buf[TSC_MAX_SAMPLES];
+ unsigned int samples;
+ u32 stats;
+ unsigned int i;
+
+ /*
+ * Check how many samples are available. We always have to read exactly
+ * sample_count samples from the fifo, or a multiple of sample_count.
+ * Otherwise we mixup samples into different touch events.
+ */
+ regmap_read(priv->regs, MX25_ADCQ_SR, &stats);
+ samples = MX25_ADCQ_SR_FDN(stats);
+ samples -= samples % priv->sample_count;
+
+ if (!samples)
+ return IRQ_HANDLED;
+
+ for (i = 0; i != samples; ++i)
+ regmap_read(priv->regs, MX25_ADCQ_FIFO, &sample_buf[i]);
+
+ mx25_tcq_create_event_for_4wire(priv, sample_buf, samples);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t mx25_tcq_irq(int irq, void *dev_id)
+{
+ struct mx25_tcq_priv *priv = dev_id;
+ u32 stat;
+ int ret = IRQ_HANDLED;
+
+ regmap_read(priv->regs, MX25_ADCQ_SR, &stat);
+
+ if (stat & (MX25_ADCQ_SR_FRR | MX25_ADCQ_SR_FUR | MX25_ADCQ_SR_FOR))
+ mx25_tcq_re_enable_touch_detection(priv);
+
+ if (stat & MX25_ADCQ_SR_PD) {
+ mx25_tcq_disable_touch_irq(priv);
+ mx25_tcq_force_queue_start(priv);
+ mx25_tcq_enable_fifo_irq(priv);
+ }
+
+ if (stat & MX25_ADCQ_SR_FDRY) {
+ mx25_tcq_disable_fifo_irq(priv);
+ ret = IRQ_WAKE_THREAD;
+ }
+
+ regmap_update_bits(priv->regs, MX25_ADCQ_SR, MX25_ADCQ_SR_FRR |
+ MX25_ADCQ_SR_FUR | MX25_ADCQ_SR_FOR |
+ MX25_ADCQ_SR_PD,
+ MX25_ADCQ_SR_FRR | MX25_ADCQ_SR_FUR |
+ MX25_ADCQ_SR_FOR | MX25_ADCQ_SR_PD);
+
+ return ret;
+}
+
+/* configure the state machine for a 4-wire touchscreen */
+static int mx25_tcq_init(struct mx25_tcq_priv *priv)
+{
+ u32 tgcr;
+ unsigned int ipg_div;
+ unsigned int adc_period;
+ unsigned int debounce_cnt;
+ unsigned int settling_cnt;
+ int itemct;
+ int error;
+
+ regmap_read(priv->core_regs, MX25_TSC_TGCR, &tgcr);
+ ipg_div = max_t(unsigned int, 4, MX25_TGCR_GET_ADCCLK(tgcr));
+ adc_period = USEC_PER_SEC * ipg_div * 2 + 2;
+ adc_period /= clk_get_rate(priv->clk) / 1000 + 1;
+ debounce_cnt = DIV_ROUND_UP(priv->pen_debounce, adc_period * 8) - 1;
+ settling_cnt = DIV_ROUND_UP(priv->settling_time, adc_period * 8) - 1;
+
+ /* Reset */
+ regmap_write(priv->regs, MX25_ADCQ_CR,
+ MX25_ADCQ_CR_QRST | MX25_ADCQ_CR_FRST);
+ regmap_update_bits(priv->regs, MX25_ADCQ_CR,
+ MX25_ADCQ_CR_QRST | MX25_ADCQ_CR_FRST, 0);
+
+ /* up to 128 * 8 ADC clocks are possible */
+ if (debounce_cnt > 127)
+ debounce_cnt = 127;
+
+ /* up to 255 * 8 ADC clocks are possible */
+ if (settling_cnt > 255)
+ settling_cnt = 255;
+
+ error = imx25_setup_queue_4wire(priv, settling_cnt, &itemct);
+ if (error)
+ return error;
+
+ regmap_update_bits(priv->regs, MX25_ADCQ_CR,
+ MX25_ADCQ_CR_LITEMID_MASK | MX25_ADCQ_CR_WMRK_MASK,
+ MX25_ADCQ_CR_LITEMID(itemct - 1) |
+ MX25_ADCQ_CR_WMRK(priv->expected_samples - 1));
+
+ /* setup debounce count */
+ regmap_update_bits(priv->core_regs, MX25_TSC_TGCR,
+ MX25_TGCR_PDBTIME_MASK,
+ MX25_TGCR_PDBTIME(debounce_cnt));
+
+ /* enable debounce */
+ regmap_update_bits(priv->core_regs, MX25_TSC_TGCR, MX25_TGCR_PDBEN,
+ MX25_TGCR_PDBEN);
+ regmap_update_bits(priv->core_regs, MX25_TSC_TGCR, MX25_TGCR_PDEN,
+ MX25_TGCR_PDEN);
+
+ /* enable the engine on demand */
+ regmap_update_bits(priv->regs, MX25_ADCQ_CR, MX25_ADCQ_CR_QSM_MASK,
+ MX25_ADCQ_CR_QSM_FQS);
+
+ /* Enable repeat and repeat wait */
+ regmap_update_bits(priv->regs, MX25_ADCQ_CR,
+ MX25_ADCQ_CR_RPT | MX25_ADCQ_CR_RWAIT_MASK,
+ MX25_ADCQ_CR_RPT |
+ MX25_ADCQ_CR_RWAIT(MX25_TSC_REPEAT_WAIT));
+
+ return 0;
+}
+
+static int mx25_tcq_parse_dt(struct platform_device *pdev,
+ struct mx25_tcq_priv *priv)
+{
+ struct device_node *np = pdev->dev.of_node;
+ u32 wires;
+ int error;
+
+ /* Setup defaults */
+ priv->pen_threshold = 500;
+ priv->sample_count = 3;
+ priv->pen_debounce = 1000000;
+ priv->settling_time = 250000;
+
+ error = of_property_read_u32(np, "fsl,wires", &wires);
+ if (error) {
+ dev_err(&pdev->dev, "Failed to find fsl,wires properties\n");
+ return error;
+ }
+
+ if (wires == 4) {
+ priv->mode = MX25_TS_4WIRE;
+ } else {
+ dev_err(&pdev->dev, "%u-wire mode not supported\n", wires);
+ return -EINVAL;
+ }
+
+ /* These are optional, we don't care about the return values */
+ of_property_read_u32(np, "fsl,pen-threshold", &priv->pen_threshold);
+ of_property_read_u32(np, "fsl,settling-time-ns", &priv->settling_time);
+ of_property_read_u32(np, "fsl,pen-debounce-ns", &priv->pen_debounce);
+
+ return 0;
+}
+
+static int mx25_tcq_open(struct input_dev *idev)
+{
+ struct device *dev = &idev->dev;
+ struct mx25_tcq_priv *priv = dev_get_drvdata(dev);
+ int error;
+
+ error = clk_prepare_enable(priv->clk);
+ if (error) {
+ dev_err(dev, "Failed to enable ipg clock\n");
+ return error;
+ }
+
+ error = mx25_tcq_init(priv);
+ if (error) {
+ dev_err(dev, "Failed to init tcq\n");
+ clk_disable_unprepare(priv->clk);
+ return error;
+ }
+
+ mx25_tcq_re_enable_touch_detection(priv);
+
+ return 0;
+}
+
+static void mx25_tcq_close(struct input_dev *idev)
+{
+ struct mx25_tcq_priv *priv = input_get_drvdata(idev);
+
+ mx25_tcq_force_queue_stop(priv);
+ mx25_tcq_disable_touch_irq(priv);
+ mx25_tcq_disable_fifo_irq(priv);
+ clk_disable_unprepare(priv->clk);
+}
+
+static int mx25_tcq_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct input_dev *idev;
+ struct mx25_tcq_priv *priv;
+ struct mx25_tsadc *tsadc = dev_get_drvdata(pdev->dev.parent);
+ struct resource *res;
+ void __iomem *mem;
+ int error;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+ priv->dev = dev;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ mem = devm_ioremap_resource(dev, res);
+ if (IS_ERR(mem))
+ return PTR_ERR(mem);
+
+ error = mx25_tcq_parse_dt(pdev, priv);
+ if (error)
+ return error;
+
+ priv->regs = devm_regmap_init_mmio(dev, mem, &mx25_tcq_regconfig);
+ if (IS_ERR(priv->regs)) {
+ dev_err(dev, "Failed to initialize regmap\n");
+ return PTR_ERR(priv->regs);
+ }
+
+ priv->irq = platform_get_irq(pdev, 0);
+ if (priv->irq <= 0) {
+ dev_err(dev, "Failed to get IRQ\n");
+ return priv->irq;
+ }
+
+ idev = devm_input_allocate_device(dev);
+ if (!idev) {
+ dev_err(dev, "Failed to allocate input device\n");
+ return -ENOMEM;
+ }
+
+ idev->name = mx25_tcq_name;
+ input_set_capability(idev, EV_KEY, BTN_TOUCH);
+ input_set_abs_params(idev, ABS_X, 0, 0xfff, 0, 0);
+ input_set_abs_params(idev, ABS_Y, 0, 0xfff, 0, 0);
+
+ idev->id.bustype = BUS_HOST;
+ idev->open = mx25_tcq_open;
+ idev->close = mx25_tcq_close;
+
+ priv->idev = idev;
+ input_set_drvdata(idev, priv);
+
+ priv->core_regs = tsadc->regs;
+ if (!priv->core_regs)
+ return -EINVAL;
+
+ priv->clk = tsadc->clk;
+ if (!priv->clk)
+ return -EINVAL;
+
+ platform_set_drvdata(pdev, priv);
+
+ error = devm_request_threaded_irq(dev, priv->irq, mx25_tcq_irq,
+ mx25_tcq_irq_thread, 0, pdev->name,
+ priv);
+ if (error) {
+ dev_err(dev, "Failed requesting IRQ\n");
+ return error;
+ }
+
+ error = input_register_device(idev);
+ if (error) {
+ dev_err(dev, "Failed to register input device\n");
+ return error;
+ }
+
+ return 0;
+}
+
+static struct platform_driver mx25_tcq_driver = {
+ .driver = {
+ .name = "mx25-tcq",
+ .of_match_table = mx25_tcq_ids,
+ },
+ .probe = mx25_tcq_probe,
+};
+module_platform_driver(mx25_tcq_driver);
+
+MODULE_DESCRIPTION("TS input driver for Freescale mx25");
+MODULE_AUTHOR("Markus Pargmann <mpa@pengutronix.de>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/input/touchscreen/melfas_mip4.c b/drivers/input/touchscreen/melfas_mip4.c
new file mode 100644
index 000000000000..fb5fb9140ca9
--- /dev/null
+++ b/drivers/input/touchscreen/melfas_mip4.c
@@ -0,0 +1,1543 @@
+/*
+ * MELFAS MIP4 Touchscreen
+ *
+ * Copyright (C) 2016 MELFAS Inc.
+ *
+ * Author : Sangwon Jee <jeesw@melfas.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/acpi.h>
+#include <linux/delay.h>
+#include <linux/firmware.h>
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/input.h>
+#include <linux/input/mt.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/slab.h>
+#include <asm/unaligned.h>
+
+#define MIP4_DEVICE_NAME "mip4_ts"
+
+/*****************************************************************
+ * Protocol
+ * Version : MIP 4.0 Rev 4.6
+ *****************************************************************/
+
+/* Address */
+#define MIP4_R0_BOOT 0x00
+#define MIP4_R1_BOOT_MODE 0x01
+#define MIP4_R1_BOOT_BUF_ADDR 0x10
+#define MIP4_R1_BOOT_STATUS 0x20
+#define MIP4_R1_BOOT_CMD 0x30
+#define MIP4_R1_BOOT_TARGET_ADDR 0x40
+#define MIP4_R1_BOOT_SIZE 0x44
+
+#define MIP4_R0_INFO 0x01
+#define MIP4_R1_INFO_PRODUCT_NAME 0x00
+#define MIP4_R1_INFO_RESOLUTION_X 0x10
+#define MIP4_R1_INFO_RESOLUTION_Y 0x12
+#define MIP4_R1_INFO_NODE_NUM_X 0x14
+#define MIP4_R1_INFO_NODE_NUM_Y 0x15
+#define MIP4_R1_INFO_KEY_NUM 0x16
+#define MIP4_R1_INFO_PRESSURE_NUM 0x17
+#define MIP4_R1_INFO_LENGTH_X 0x18
+#define MIP4_R1_INFO_LENGTH_Y 0x1A
+#define MIP4_R1_INFO_PPM_X 0x1C
+#define MIP4_R1_INFO_PPM_Y 0x1D
+#define MIP4_R1_INFO_VERSION_BOOT 0x20
+#define MIP4_R1_INFO_VERSION_CORE 0x22
+#define MIP4_R1_INFO_VERSION_APP 0x24
+#define MIP4_R1_INFO_VERSION_PARAM 0x26
+#define MIP4_R1_INFO_SECT_BOOT_START 0x30
+#define MIP4_R1_INFO_SECT_BOOT_END 0x31
+#define MIP4_R1_INFO_SECT_CORE_START 0x32
+#define MIP4_R1_INFO_SECT_CORE_END 0x33
+#define MIP4_R1_INFO_SECT_APP_START 0x34
+#define MIP4_R1_INFO_SECT_APP_END 0x35
+#define MIP4_R1_INFO_SECT_PARAM_START 0x36
+#define MIP4_R1_INFO_SECT_PARAM_END 0x37
+#define MIP4_R1_INFO_BUILD_DATE 0x40
+#define MIP4_R1_INFO_BUILD_TIME 0x44
+#define MIP4_R1_INFO_CHECKSUM_PRECALC 0x48
+#define MIP4_R1_INFO_CHECKSUM_REALTIME 0x4A
+#define MIP4_R1_INFO_PROTOCOL_NAME 0x50
+#define MIP4_R1_INFO_PROTOCOL_VERSION 0x58
+#define MIP4_R1_INFO_IC_ID 0x70
+#define MIP4_R1_INFO_IC_NAME 0x71
+#define MIP4_R1_INFO_IC_VENDOR_ID 0x75
+#define MIP4_R1_INFO_IC_HW_CATEGORY 0x77
+#define MIP4_R1_INFO_CONTACT_THD_SCR 0x78
+#define MIP4_R1_INFO_CONTACT_THD_KEY 0x7A
+
+#define MIP4_R0_EVENT 0x02
+#define MIP4_R1_EVENT_SUPPORTED_FUNC 0x00
+#define MIP4_R1_EVENT_FORMAT 0x04
+#define MIP4_R1_EVENT_SIZE 0x06
+#define MIP4_R1_EVENT_PACKET_INFO 0x10
+#define MIP4_R1_EVENT_PACKET_DATA 0x11
+
+#define MIP4_R0_CTRL 0x06
+#define MIP4_R1_CTRL_READY_STATUS 0x00
+#define MIP4_R1_CTRL_EVENT_READY 0x01
+#define MIP4_R1_CTRL_MODE 0x10
+#define MIP4_R1_CTRL_EVENT_TRIGGER_TYPE 0x11
+#define MIP4_R1_CTRL_RECALIBRATE 0x12
+#define MIP4_R1_CTRL_POWER_STATE 0x13
+#define MIP4_R1_CTRL_GESTURE_TYPE 0x14
+#define MIP4_R1_CTRL_DISABLE_ESD_ALERT 0x18
+#define MIP4_R1_CTRL_CHARGER_MODE 0x19
+#define MIP4_R1_CTRL_HIGH_SENS_MODE 0x1A
+#define MIP4_R1_CTRL_WINDOW_MODE 0x1B
+#define MIP4_R1_CTRL_PALM_REJECTION 0x1C
+#define MIP4_R1_CTRL_EDGE_CORRECTION 0x1D
+#define MIP4_R1_CTRL_ENTER_GLOVE_MODE 0x1E
+#define MIP4_R1_CTRL_I2C_ON_LPM 0x1F
+#define MIP4_R1_CTRL_GESTURE_DEBUG 0x20
+#define MIP4_R1_CTRL_PALM_EVENT 0x22
+#define MIP4_R1_CTRL_PROXIMITY_SENSING 0x23
+
+/* Value */
+#define MIP4_BOOT_MODE_BOOT 0x01
+#define MIP4_BOOT_MODE_APP 0x02
+
+#define MIP4_BOOT_STATUS_BUSY 0x05
+#define MIP4_BOOT_STATUS_ERROR 0x0E
+#define MIP4_BOOT_STATUS_DONE 0xA0
+
+#define MIP4_BOOT_CMD_MASS_ERASE 0x15
+#define MIP4_BOOT_CMD_PROGRAM 0x54
+#define MIP4_BOOT_CMD_ERASE 0x8F
+#define MIP4_BOOT_CMD_WRITE 0xA5
+#define MIP4_BOOT_CMD_READ 0xC2
+
+#define MIP4_EVENT_INPUT_TYPE_KEY 0
+#define MIP4_EVENT_INPUT_TYPE_SCREEN 1
+#define MIP4_EVENT_INPUT_TYPE_PROXIMITY 2
+
+#define I2C_RETRY_COUNT 3 /* 2~ */
+
+#define MIP4_BUF_SIZE 128
+#define MIP4_MAX_FINGERS 10
+#define MIP4_MAX_KEYS 4
+
+#define MIP4_TOUCH_MAJOR_MIN 0
+#define MIP4_TOUCH_MAJOR_MAX 255
+#define MIP4_TOUCH_MINOR_MIN 0
+#define MIP4_TOUCH_MINOR_MAX 255
+#define MIP4_PRESSURE_MIN 0
+#define MIP4_PRESSURE_MAX 255
+
+#define MIP4_FW_NAME "melfas_mip4.fw"
+#define MIP4_FW_UPDATE_DEBUG 0 /* 0 (default) or 1 */
+
+struct mip4_fw_version {
+ u16 boot;
+ u16 core;
+ u16 app;
+ u16 param;
+};
+
+struct mip4_ts {
+ struct i2c_client *client;
+ struct input_dev *input;
+ struct gpio_desc *gpio_ce;
+
+ char phys[32];
+ char product_name[16];
+
+ unsigned int max_x;
+ unsigned int max_y;
+ u8 node_x;
+ u8 node_y;
+ u8 node_key;
+ unsigned int ppm_x;
+ unsigned int ppm_y;
+
+ struct mip4_fw_version fw_version;
+
+ unsigned int event_size;
+ unsigned int event_format;
+
+ unsigned int key_num;
+ unsigned short key_code[MIP4_MAX_KEYS];
+
+ bool wake_irq_enabled;
+
+ u8 buf[MIP4_BUF_SIZE];
+};
+
+static int mip4_i2c_xfer(struct mip4_ts *ts,
+ char *write_buf, unsigned int write_len,
+ char *read_buf, unsigned int read_len)
+{
+ struct i2c_msg msg[] = {
+ {
+ .addr = ts->client->addr,
+ .flags = 0,
+ .buf = write_buf,
+ .len = write_len,
+ }, {
+ .addr = ts->client->addr,
+ .flags = I2C_M_RD,
+ .buf = read_buf,
+ .len = read_len,
+ },
+ };
+ int retry = I2C_RETRY_COUNT;
+ int res;
+ int error;
+
+ do {
+ res = i2c_transfer(ts->client->adapter, msg, ARRAY_SIZE(msg));
+ if (res == ARRAY_SIZE(msg))
+ return 0;
+
+ error = res < 0 ? res : -EIO;
+ dev_err(&ts->client->dev,
+ "%s - i2c_transfer failed: %d (%d)\n",
+ __func__, error, res);
+ } while (--retry);
+
+ return error;
+}
+
+static void mip4_parse_fw_version(const u8 *buf, struct mip4_fw_version *v)
+{
+ v->boot = get_unaligned_le16(buf + 0);
+ v->core = get_unaligned_le16(buf + 2);
+ v->app = get_unaligned_le16(buf + 4);
+ v->param = get_unaligned_le16(buf + 6);
+}
+
+/*
+ * Read chip firmware version
+ */
+static int mip4_get_fw_version(struct mip4_ts *ts)
+{
+ u8 cmd[] = { MIP4_R0_INFO, MIP4_R1_INFO_VERSION_BOOT };
+ u8 buf[sizeof(ts->fw_version)];
+ int error;
+
+ error = mip4_i2c_xfer(ts, cmd, sizeof(cmd), buf, sizeof(buf));
+ if (error) {
+ memset(&ts->fw_version, 0xff, sizeof(ts->fw_version));
+ return error;
+ }
+
+ mip4_parse_fw_version(buf, &ts->fw_version);
+
+ return 0;
+}
+
+/*
+ * Fetch device characteristics
+ */
+static int mip4_query_device(struct mip4_ts *ts)
+{
+ int error;
+ u8 cmd[2];
+ u8 buf[14];
+
+ /* Product name */
+ cmd[0] = MIP4_R0_INFO;
+ cmd[1] = MIP4_R1_INFO_PRODUCT_NAME;
+ error = mip4_i2c_xfer(ts, cmd, sizeof(cmd),
+ ts->product_name, sizeof(ts->product_name));
+ if (error)
+ dev_warn(&ts->client->dev,
+ "Failed to retrieve product name: %d\n", error);
+ else
+ dev_dbg(&ts->client->dev, "product name: %.*s\n",
+ (int)sizeof(ts->product_name), ts->product_name);
+
+ /* Firmware version */
+ error = mip4_get_fw_version(ts);
+ if (error)
+ dev_warn(&ts->client->dev,
+ "Failed to retrieve FW version: %d\n", error);
+ else
+ dev_dbg(&ts->client->dev, "F/W Version: %04X %04X %04X %04X\n",
+ ts->fw_version.boot, ts->fw_version.core,
+ ts->fw_version.app, ts->fw_version.param);
+
+ /* Resolution */
+ cmd[0] = MIP4_R0_INFO;
+ cmd[1] = MIP4_R1_INFO_RESOLUTION_X;
+ error = mip4_i2c_xfer(ts, cmd, sizeof(cmd), buf, 14);
+ if (error) {
+ dev_warn(&ts->client->dev,
+ "Failed to retrieve touchscreen parameters: %d\n",
+ error);
+ } else {
+ ts->max_x = get_unaligned_le16(&buf[0]);
+ ts->max_y = get_unaligned_le16(&buf[2]);
+ dev_dbg(&ts->client->dev, "max_x: %d, max_y: %d\n",
+ ts->max_x, ts->max_y);
+
+ ts->node_x = buf[4];
+ ts->node_y = buf[5];
+ ts->node_key = buf[6];
+ dev_dbg(&ts->client->dev,
+ "node_x: %d, node_y: %d, node_key: %d\n",
+ ts->node_x, ts->node_y, ts->node_key);
+
+ ts->ppm_x = buf[12];
+ ts->ppm_y = buf[13];
+ dev_dbg(&ts->client->dev, "ppm_x: %d, ppm_y: %d\n",
+ ts->ppm_x, ts->ppm_y);
+
+ /* Key ts */
+ if (ts->node_key > 0)
+ ts->key_num = ts->node_key;
+ }
+
+ /* Protocol */
+ cmd[0] = MIP4_R0_EVENT;
+ cmd[1] = MIP4_R1_EVENT_SUPPORTED_FUNC;
+ error = mip4_i2c_xfer(ts, cmd, sizeof(cmd), buf, 7);
+ if (error) {
+ dev_warn(&ts->client->dev,
+ "Failed to retrieve device type: %d\n", error);
+ ts->event_format = 0xff;
+ } else {
+ ts->event_format = get_unaligned_le16(&buf[4]);
+ ts->event_size = buf[6];
+ dev_dbg(&ts->client->dev, "event_format: %d, event_size: %d\n",
+ ts->event_format, ts->event_size);
+
+ if (ts->event_format == 2 || ts->event_format > 3)
+ dev_warn(&ts->client->dev,
+ "Unknown event format %d\n", ts->event_format);
+ }
+
+ return 0;
+}
+
+static int mip4_power_on(struct mip4_ts *ts)
+{
+ if (ts->gpio_ce) {
+ gpiod_set_value_cansleep(ts->gpio_ce, 1);
+
+ /* Booting delay : 200~300ms */
+ usleep_range(200 * 1000, 300 * 1000);
+ }
+
+ return 0;
+}
+
+static void mip4_power_off(struct mip4_ts *ts)
+{
+ if (ts->gpio_ce)
+ gpiod_set_value_cansleep(ts->gpio_ce, 0);
+}
+
+/*
+ * Clear touch input event status
+ */
+static void mip4_clear_input(struct mip4_ts *ts)
+{
+ int i;
+
+ /* Screen */
+ for (i = 0; i < MIP4_MAX_FINGERS; i++) {
+ input_mt_slot(ts->input, i);
+ input_mt_report_slot_state(ts->input, MT_TOOL_FINGER, 0);
+ }
+
+ /* Keys */
+ for (i = 0; i < ts->key_num; i++)
+ input_report_key(ts->input, ts->key_code[i], 0);
+
+ input_sync(ts->input);
+}
+
+static int mip4_enable(struct mip4_ts *ts)
+{
+ int error;
+
+ error = mip4_power_on(ts);
+ if (error)
+ return error;
+
+ enable_irq(ts->client->irq);
+
+ return 0;
+}
+
+static void mip4_disable(struct mip4_ts *ts)
+{
+ disable_irq(ts->client->irq);
+
+ mip4_power_off(ts);
+
+ mip4_clear_input(ts);
+}
+
+/*****************************************************************
+ * Input handling
+ *****************************************************************/
+
+static void mip4_report_keys(struct mip4_ts *ts, u8 *packet)
+{
+ u8 key;
+ bool down;
+
+ switch (ts->event_format) {
+ case 0:
+ case 1:
+ key = packet[0] & 0x0F;
+ down = packet[0] & 0x80;
+ break;
+
+ case 3:
+ default:
+ key = packet[0] & 0x0F;
+ down = packet[1] & 0x01;
+ break;
+ }
+
+ /* Report key event */
+ if (key >= 1 && key <= ts->key_num) {
+ unsigned short keycode = ts->key_code[key - 1];
+
+ dev_dbg(&ts->client->dev,
+ "Key - ID: %d, keycode: %d, state: %d\n",
+ key, keycode, down);
+
+ input_event(ts->input, EV_MSC, MSC_SCAN, keycode);
+ input_report_key(ts->input, keycode, down);
+
+ } else {
+ dev_err(&ts->client->dev, "Unknown key: %d\n", key);
+ }
+}
+
+static void mip4_report_touch(struct mip4_ts *ts, u8 *packet)
+{
+ int id;
+ bool hover;
+ bool palm;
+ bool state;
+ u16 x, y;
+ u8 pressure_stage = 0;
+ u8 pressure;
+ u8 size;
+ u8 touch_major;
+ u8 touch_minor;
+
+ switch (ts->event_format) {
+ case 0:
+ case 1:
+ /* Touch only */
+ state = packet[0] & BIT(7);
+ hover = packet[0] & BIT(5);
+ palm = packet[0] & BIT(4);
+ id = (packet[0] & 0x0F) - 1;
+ x = ((packet[1] & 0x0F) << 8) | packet[2];
+ y = (((packet[1] >> 4) & 0x0F) << 8) |
+ packet[3];
+ pressure = packet[4];
+ size = packet[5];
+ if (ts->event_format == 0) {
+ touch_major = packet[5];
+ touch_minor = packet[5];
+ } else {
+ touch_major = packet[6];
+ touch_minor = packet[7];
+ }
+ break;
+
+ case 3:
+ default:
+ /* Touch + Force(Pressure) */
+ id = (packet[0] & 0x0F) - 1;
+ hover = packet[1] & BIT(2);
+ palm = packet[1] & BIT(1);
+ state = packet[1] & BIT(0);
+ x = ((packet[2] & 0x0F) << 8) | packet[3];
+ y = (((packet[2] >> 4) & 0x0F) << 8) |
+ packet[4];
+ size = packet[6];
+ pressure_stage = (packet[7] & 0xF0) >> 4;
+ pressure = ((packet[7] & 0x0F) << 8) |
+ packet[8];
+ touch_major = packet[9];
+ touch_minor = packet[10];
+ break;
+ }
+
+ dev_dbg(&ts->client->dev,
+ "Screen - Slot: %d State: %d X: %04d Y: %04d Z: %d\n",
+ id, state, x, y, pressure);
+
+ if (unlikely(id < 0 || id >= MIP4_MAX_FINGERS)) {
+ dev_err(&ts->client->dev, "Screen - invalid slot ID: %d\n", id);
+ } else if (state) {
+ /* Press or Move event */
+ input_mt_slot(ts->input, id);
+ input_mt_report_slot_state(ts->input, MT_TOOL_FINGER, true);
+ input_report_abs(ts->input, ABS_MT_POSITION_X, x);
+ input_report_abs(ts->input, ABS_MT_POSITION_Y, y);
+ input_report_abs(ts->input, ABS_MT_PRESSURE, pressure);
+ input_report_abs(ts->input, ABS_MT_TOUCH_MAJOR, touch_major);
+ input_report_abs(ts->input, ABS_MT_TOUCH_MINOR, touch_minor);
+ } else {
+ /* Release event */
+ input_mt_slot(ts->input, id);
+ input_mt_report_slot_state(ts->input, MT_TOOL_FINGER, 0);
+ }
+
+ input_mt_sync_frame(ts->input);
+}
+
+static int mip4_handle_packet(struct mip4_ts *ts, u8 *packet)
+{
+ u8 type;
+
+ switch (ts->event_format) {
+ case 0:
+ case 1:
+ type = (packet[0] & 0x40) >> 6;
+ break;
+
+ case 3:
+ type = (packet[0] & 0xF0) >> 4;
+ break;
+
+ default:
+ /* Should not happen unless we have corrupted firmware */
+ return -EINVAL;
+ }
+
+ dev_dbg(&ts->client->dev, "Type: %d\n", type);
+
+ /* Report input event */
+ switch (type) {
+ case MIP4_EVENT_INPUT_TYPE_KEY:
+ mip4_report_keys(ts, packet);
+ break;
+
+ case MIP4_EVENT_INPUT_TYPE_SCREEN:
+ mip4_report_touch(ts, packet);
+ break;
+
+ default:
+ dev_err(&ts->client->dev, "Unknown event type: %d\n", type);
+ break;
+ }
+
+ return 0;
+}
+
+static irqreturn_t mip4_interrupt(int irq, void *dev_id)
+{
+ struct mip4_ts *ts = dev_id;
+ struct i2c_client *client = ts->client;
+ unsigned int i;
+ int error;
+ u8 cmd[2];
+ u8 size;
+ bool alert;
+
+ /* Read packet info */
+ cmd[0] = MIP4_R0_EVENT;
+ cmd[1] = MIP4_R1_EVENT_PACKET_INFO;
+ error = mip4_i2c_xfer(ts, cmd, sizeof(cmd), ts->buf, 1);
+ if (error) {
+ dev_err(&client->dev,
+ "Failed to read packet info: %d\n", error);
+ goto out;
+ }
+
+ size = ts->buf[0] & 0x7F;
+ alert = ts->buf[0] & BIT(7);
+ dev_dbg(&client->dev, "packet size: %d, alert: %d\n", size, alert);
+
+ /* Check size */
+ if (!size) {
+ dev_err(&client->dev, "Empty packet\n");
+ goto out;
+ }
+
+ /* Read packet data */
+ cmd[0] = MIP4_R0_EVENT;
+ cmd[1] = MIP4_R1_EVENT_PACKET_DATA;
+ error = mip4_i2c_xfer(ts, cmd, sizeof(cmd), ts->buf, size);
+ if (error) {
+ dev_err(&client->dev,
+ "Failed to read packet data: %d\n", error);
+ goto out;
+ }
+
+ if (alert) {
+ dev_dbg(&client->dev, "Alert: %d\n", ts->buf[0]);
+ } else {
+ for (i = 0; i < size; i += ts->event_size) {
+ error = mip4_handle_packet(ts, &ts->buf[i]);
+ if (error)
+ break;
+ }
+
+ input_sync(ts->input);
+ }
+
+out:
+ return IRQ_HANDLED;
+}
+
+static int mip4_input_open(struct input_dev *dev)
+{
+ struct mip4_ts *ts = input_get_drvdata(dev);
+
+ return mip4_enable(ts);
+}
+
+static void mip4_input_close(struct input_dev *dev)
+{
+ struct mip4_ts *ts = input_get_drvdata(dev);
+
+ mip4_disable(ts);
+}
+
+/*****************************************************************
+ * Firmware update
+ *****************************************************************/
+
+/* Firmware Info */
+#define MIP4_BL_PAGE_SIZE 512 /* 512 */
+#define MIP4_BL_PACKET_SIZE 512 /* 512, 256, 128, 64, ... */
+
+/*
+ * Firmware binary tail info
+ */
+
+struct mip4_bin_tail {
+ u8 tail_mark[4];
+ u8 chip_name[4];
+
+ __le32 bin_start_addr;
+ __le32 bin_length;
+
+ __le16 ver_boot;
+ __le16 ver_core;
+ __le16 ver_app;
+ __le16 ver_param;
+
+ u8 boot_start;
+ u8 boot_end;
+ u8 core_start;
+ u8 core_end;
+ u8 app_start;
+ u8 app_end;
+ u8 param_start;
+ u8 param_end;
+
+ u8 checksum_type;
+ u8 hw_category;
+
+ __le16 param_id;
+ __le32 param_length;
+ __le32 build_date;
+ __le32 build_time;
+
+ __le32 reserved1;
+ __le32 reserved2;
+ __le16 reserved3;
+ __le16 tail_size;
+ __le32 crc;
+} __packed;
+
+#define MIP4_BIN_TAIL_MARK "MBT\001"
+#define MIP4_BIN_TAIL_SIZE (sizeof(struct mip4_bin_tail))
+
+/*
+* Bootloader - Read status
+*/
+static int mip4_bl_read_status(struct mip4_ts *ts)
+{
+ u8 cmd[] = { MIP4_R0_BOOT, MIP4_R1_BOOT_STATUS };
+ u8 result;
+ struct i2c_msg msg[] = {
+ {
+ .addr = ts->client->addr,
+ .flags = 0,
+ .buf = cmd,
+ .len = sizeof(cmd),
+ }, {
+ .addr = ts->client->addr,
+ .flags = I2C_M_RD,
+ .buf = &result,
+ .len = sizeof(result),
+ },
+ };
+ int ret;
+ int error;
+ int retry = 1000;
+
+ do {
+ ret = i2c_transfer(ts->client->adapter, msg, ARRAY_SIZE(msg));
+ if (ret != ARRAY_SIZE(msg)) {
+ error = ret < 0 ? ret : -EIO;
+ dev_err(&ts->client->dev,
+ "Failed to read bootloader status: %d\n",
+ error);
+ return error;
+ }
+
+ switch (result) {
+ case MIP4_BOOT_STATUS_DONE:
+ dev_dbg(&ts->client->dev, "%s - done\n", __func__);
+ return 0;
+
+ case MIP4_BOOT_STATUS_ERROR:
+ dev_err(&ts->client->dev, "Bootloader failure\n");
+ return -EIO;
+
+ case MIP4_BOOT_STATUS_BUSY:
+ dev_dbg(&ts->client->dev, "%s - Busy\n", __func__);
+ error = -EBUSY;
+ break;
+
+ default:
+ dev_err(&ts->client->dev,
+ "Unexpected bootloader status: %#02x\n",
+ result);
+ error = -EINVAL;
+ break;
+ }
+
+ usleep_range(1000, 2000);
+ } while (--retry);
+
+ return error;
+}
+
+/*
+* Bootloader - Change mode
+*/
+static int mip4_bl_change_mode(struct mip4_ts *ts, u8 mode)
+{
+ u8 mode_chg_cmd[] = { MIP4_R0_BOOT, MIP4_R1_BOOT_MODE, mode };
+ u8 mode_read_cmd[] = { MIP4_R0_BOOT, MIP4_R1_BOOT_MODE };
+ u8 result;
+ struct i2c_msg msg[] = {
+ {
+ .addr = ts->client->addr,
+ .flags = 0,
+ .buf = mode_read_cmd,
+ .len = sizeof(mode_read_cmd),
+ }, {
+ .addr = ts->client->addr,
+ .flags = I2C_M_RD,
+ .buf = &result,
+ .len = sizeof(result),
+ },
+ };
+ int retry = 10;
+ int ret;
+ int error;
+
+ do {
+ /* Send mode change command */
+ ret = i2c_master_send(ts->client,
+ mode_chg_cmd, sizeof(mode_chg_cmd));
+ if (ret != sizeof(mode_chg_cmd)) {
+ error = ret < 0 ? ret : -EIO;
+ dev_err(&ts->client->dev,
+ "Failed to send %d mode change: %d (%d)\n",
+ mode, error, ret);
+ return error;
+ }
+
+ dev_dbg(&ts->client->dev,
+ "Sent mode change request (mode: %d)\n", mode);
+
+ /* Wait */
+ msleep(1000);
+
+ /* Verify target mode */
+ ret = i2c_transfer(ts->client->adapter, msg, ARRAY_SIZE(msg));
+ if (ret != ARRAY_SIZE(msg)) {
+ error = ret < 0 ? ret : -EIO;
+ dev_err(&ts->client->dev,
+ "Failed to read device mode: %d\n", error);
+ return error;
+ }
+
+ dev_dbg(&ts->client->dev,
+ "Current device mode: %d, want: %d\n", result, mode);
+
+ if (result == mode)
+ return 0;
+
+ } while (--retry);
+
+ return -EIO;
+}
+
+/*
+ * Bootloader - Start bootloader mode
+ */
+static int mip4_bl_enter(struct mip4_ts *ts)
+{
+ return mip4_bl_change_mode(ts, MIP4_BOOT_MODE_BOOT);
+}
+
+/*
+ * Bootloader - Exit bootloader mode
+ */
+static int mip4_bl_exit(struct mip4_ts *ts)
+{
+ return mip4_bl_change_mode(ts, MIP4_BOOT_MODE_APP);
+}
+
+static int mip4_bl_get_address(struct mip4_ts *ts, u16 *buf_addr)
+{
+ u8 cmd[] = { MIP4_R0_BOOT, MIP4_R1_BOOT_BUF_ADDR };
+ u8 result[sizeof(u16)];
+ struct i2c_msg msg[] = {
+ {
+ .addr = ts->client->addr,
+ .flags = 0,
+ .buf = cmd,
+ .len = sizeof(cmd),
+ }, {
+ .addr = ts->client->addr,
+ .flags = I2C_M_RD,
+ .buf = result,
+ .len = sizeof(result),
+ },
+ };
+ int ret;
+ int error;
+
+ ret = i2c_transfer(ts->client->adapter, msg, ARRAY_SIZE(msg));
+ if (ret != ARRAY_SIZE(msg)) {
+ error = ret < 0 ? ret : -EIO;
+ dev_err(&ts->client->dev,
+ "Failed to retrieve bootloader buffer address: %d\n",
+ error);
+ return error;
+ }
+
+ *buf_addr = get_unaligned_le16(result);
+ dev_dbg(&ts->client->dev,
+ "Bootloader buffer address %#04x\n", *buf_addr);
+
+ return 0;
+}
+
+static int mip4_bl_program_page(struct mip4_ts *ts, int offset,
+ const u8 *data, int length, u16 buf_addr)
+{
+ u8 cmd[6];
+ u8 *data_buf;
+ u16 buf_offset;
+ int ret;
+ int error;
+
+ dev_dbg(&ts->client->dev, "Writing page @%#06x (%d)\n",
+ offset, length);
+
+ if (length > MIP4_BL_PAGE_SIZE || length % MIP4_BL_PACKET_SIZE) {
+ dev_err(&ts->client->dev,
+ "Invalid page length: %d\n", length);
+ return -EINVAL;
+ }
+
+ data_buf = kmalloc(2 + MIP4_BL_PACKET_SIZE, GFP_KERNEL);
+ if (!data_buf)
+ return -ENOMEM;
+
+ /* Addr */
+ cmd[0] = MIP4_R0_BOOT;
+ cmd[1] = MIP4_R1_BOOT_TARGET_ADDR;
+ put_unaligned_le32(offset, &cmd[2]);
+ ret = i2c_master_send(ts->client, cmd, 6);
+ if (ret != 6) {
+ error = ret < 0 ? ret : -EIO;
+ dev_err(&ts->client->dev,
+ "Failed to send write page address: %d\n", error);
+ goto out;
+ }
+
+ /* Size */
+ cmd[0] = MIP4_R0_BOOT;
+ cmd[1] = MIP4_R1_BOOT_SIZE;
+ put_unaligned_le32(length, &cmd[2]);
+ ret = i2c_master_send(ts->client, cmd, 6);
+ if (ret != 6) {
+ error = ret < 0 ? ret : -EIO;
+ dev_err(&ts->client->dev,
+ "Failed to send write page size: %d\n", error);
+ goto out;
+ }
+
+ /* Data */
+ for (buf_offset = 0;
+ buf_offset < length;
+ buf_offset += MIP4_BL_PACKET_SIZE) {
+ dev_dbg(&ts->client->dev,
+ "writing chunk at %#04x (size %d)\n",
+ buf_offset, MIP4_BL_PACKET_SIZE);
+ put_unaligned_be16(buf_addr + buf_offset, data_buf);
+ memcpy(&data_buf[2], &data[buf_offset], MIP4_BL_PACKET_SIZE);
+ ret = i2c_master_send(ts->client,
+ data_buf, 2 + MIP4_BL_PACKET_SIZE);
+ if (ret != 2 + MIP4_BL_PACKET_SIZE) {
+ error = ret < 0 ? ret : -EIO;
+ dev_err(&ts->client->dev,
+ "Failed to read chunk at %#04x (size %d): %d\n",
+ buf_offset, MIP4_BL_PACKET_SIZE, error);
+ goto out;
+ }
+ }
+
+ /* Command */
+ cmd[0] = MIP4_R0_BOOT;
+ cmd[1] = MIP4_R1_BOOT_CMD;
+ cmd[2] = MIP4_BOOT_CMD_PROGRAM;
+ ret = i2c_master_send(ts->client, cmd, 3);
+ if (ret != 3) {
+ error = ret < 0 ? ret : -EIO;
+ dev_err(&ts->client->dev,
+ "Failed to send 'write' command: %d\n", error);
+ goto out;
+ }
+
+ /* Status */
+ error = mip4_bl_read_status(ts);
+
+out:
+ kfree(data_buf);
+ return error ? error : 0;
+}
+
+static int mip4_bl_verify_page(struct mip4_ts *ts, int offset,
+ const u8 *data, int length, int buf_addr)
+{
+ u8 cmd[8];
+ u8 *read_buf;
+ int buf_offset;
+ struct i2c_msg msg[] = {
+ {
+ .addr = ts->client->addr,
+ .flags = 0,
+ .buf = cmd,
+ .len = 2,
+ }, {
+ .addr = ts->client->addr,
+ .flags = I2C_M_RD,
+ .len = MIP4_BL_PACKET_SIZE,
+ },
+ };
+ int ret;
+ int error;
+
+ dev_dbg(&ts->client->dev, "Validating page @%#06x (%d)\n",
+ offset, length);
+
+ /* Addr */
+ cmd[0] = MIP4_R0_BOOT;
+ cmd[1] = MIP4_R1_BOOT_TARGET_ADDR;
+ put_unaligned_le32(offset, &cmd[2]);
+ ret = i2c_master_send(ts->client, cmd, 6);
+ if (ret != 6) {
+ error = ret < 0 ? ret : -EIO;
+ dev_err(&ts->client->dev,
+ "Failed to send read page address: %d\n", error);
+ return error;
+ }
+
+ /* Size */
+ cmd[0] = MIP4_R0_BOOT;
+ cmd[1] = MIP4_R1_BOOT_SIZE;
+ put_unaligned_le32(length, &cmd[2]);
+ ret = i2c_master_send(ts->client, cmd, 6);
+ if (ret != 6) {
+ error = ret < 0 ? ret : -EIO;
+ dev_err(&ts->client->dev,
+ "Failed to send read page size: %d\n", error);
+ return error;
+ }
+
+ /* Command */
+ cmd[0] = MIP4_R0_BOOT;
+ cmd[1] = MIP4_R1_BOOT_CMD;
+ cmd[2] = MIP4_BOOT_CMD_READ;
+ ret = i2c_master_send(ts->client, cmd, 3);
+ if (ret != 3) {
+ error = ret < 0 ? ret : -EIO;
+ dev_err(&ts->client->dev,
+ "Failed to send 'read' command: %d\n", error);
+ return error;
+ }
+
+ /* Status */
+ error = mip4_bl_read_status(ts);
+ if (error)
+ return error;
+
+ /* Read */
+ msg[1].buf = read_buf = kmalloc(MIP4_BL_PACKET_SIZE, GFP_KERNEL);
+ if (!read_buf)
+ return -ENOMEM;
+
+ for (buf_offset = 0;
+ buf_offset < length;
+ buf_offset += MIP4_BL_PACKET_SIZE) {
+ dev_dbg(&ts->client->dev,
+ "reading chunk at %#04x (size %d)\n",
+ buf_offset, MIP4_BL_PACKET_SIZE);
+ put_unaligned_be16(buf_addr + buf_offset, cmd);
+ ret = i2c_transfer(ts->client->adapter, msg, ARRAY_SIZE(msg));
+ if (ret != ARRAY_SIZE(msg)) {
+ error = ret < 0 ? ret : -EIO;
+ dev_err(&ts->client->dev,
+ "Failed to read chunk at %#04x (size %d): %d\n",
+ buf_offset, MIP4_BL_PACKET_SIZE, error);
+ break;
+ }
+
+ if (memcmp(&data[buf_offset], read_buf, MIP4_BL_PACKET_SIZE)) {
+ dev_err(&ts->client->dev,
+ "Failed to validate chunk at %#04x (size %d)\n",
+ buf_offset, MIP4_BL_PACKET_SIZE);
+#if MIP4_FW_UPDATE_DEBUG
+ print_hex_dump(KERN_DEBUG,
+ MIP4_DEVICE_NAME " F/W File: ",
+ DUMP_PREFIX_OFFSET, 16, 1,
+ data + offset, MIP4_BL_PACKET_SIZE,
+ false);
+ print_hex_dump(KERN_DEBUG,
+ MIP4_DEVICE_NAME " F/W Chip: ",
+ DUMP_PREFIX_OFFSET, 16, 1,
+ read_buf, MIP4_BL_PAGE_SIZE, false);
+#endif
+ error = -EINVAL;
+ break;
+ }
+ }
+
+ kfree(read_buf);
+ return error ? error : 0;
+}
+
+/*
+ * Flash chip firmware
+ */
+static int mip4_flash_fw(struct mip4_ts *ts,
+ const u8 *fw_data, u32 fw_size, u32 fw_offset)
+{
+ struct i2c_client *client = ts->client;
+ int offset;
+ u16 buf_addr;
+ int error, error2;
+
+ /* Enter bootloader mode */
+ dev_dbg(&client->dev, "Entering bootloader mode\n");
+
+ error = mip4_bl_enter(ts);
+ if (error) {
+ dev_err(&client->dev,
+ "Failed to enter bootloader mode: %d\n",
+ error);
+ return error;
+ }
+
+ /* Read info */
+ error = mip4_bl_get_address(ts, &buf_addr);
+ if (error)
+ goto exit_bl;
+
+ /* Program & Verify */
+ dev_dbg(&client->dev,
+ "Program & Verify, page size: %d, packet size: %d\n",
+ MIP4_BL_PAGE_SIZE, MIP4_BL_PACKET_SIZE);
+
+ for (offset = fw_offset;
+ offset < fw_offset + fw_size;
+ offset += MIP4_BL_PAGE_SIZE) {
+ /* Program */
+ error = mip4_bl_program_page(ts, offset, fw_data + offset,
+ MIP4_BL_PAGE_SIZE, buf_addr);
+ if (error)
+ break;
+
+ /* Verify */
+ error = mip4_bl_verify_page(ts, offset, fw_data + offset,
+ MIP4_BL_PAGE_SIZE, buf_addr);
+ if (error)
+ break;
+ }
+
+exit_bl:
+ /* Exit bootloader mode */
+ dev_dbg(&client->dev, "Exiting bootloader mode\n");
+
+ error2 = mip4_bl_exit(ts);
+ if (error2) {
+ dev_err(&client->dev,
+ "Failed to exit bootloader mode: %d\n", error2);
+ if (!error)
+ error = error2;
+ }
+
+ /* Reset chip */
+ mip4_power_off(ts);
+ mip4_power_on(ts);
+
+ mip4_query_device(ts);
+
+ /* Refresh device parameters */
+ input_set_abs_params(ts->input, ABS_MT_POSITION_X, 0, ts->max_x, 0, 0);
+ input_set_abs_params(ts->input, ABS_MT_POSITION_Y, 0, ts->max_y, 0, 0);
+ input_set_abs_params(ts->input, ABS_X, 0, ts->max_x, 0, 0);
+ input_set_abs_params(ts->input, ABS_Y, 0, ts->max_y, 0, 0);
+ input_abs_set_res(ts->input, ABS_MT_POSITION_X, ts->ppm_x);
+ input_abs_set_res(ts->input, ABS_MT_POSITION_Y, ts->ppm_y);
+ input_abs_set_res(ts->input, ABS_X, ts->ppm_x);
+ input_abs_set_res(ts->input, ABS_Y, ts->ppm_y);
+
+ return error ? error : 0;
+}
+
+static int mip4_parse_firmware(struct mip4_ts *ts, const struct firmware *fw,
+ u32 *fw_offset_start, u32 *fw_size,
+ const struct mip4_bin_tail **pfw_info)
+{
+ const struct mip4_bin_tail *fw_info;
+ struct mip4_fw_version fw_version;
+ u16 tail_size;
+
+ if (fw->size < MIP4_BIN_TAIL_SIZE) {
+ dev_err(&ts->client->dev,
+ "Invalid firmware, size mismatch (tail %zd vs %zd)\n",
+ MIP4_BIN_TAIL_SIZE, fw->size);
+ return -EINVAL;
+ }
+
+ fw_info = (const void *)&fw->data[fw->size - MIP4_BIN_TAIL_SIZE];
+
+#if MIP4_FW_UPDATE_DEBUG
+ print_hex_dump(KERN_ERR, MIP4_DEVICE_NAME " Bin Info: ",
+ DUMP_PREFIX_OFFSET, 16, 1, *fw_info, tail_size, false);
+#endif
+
+ tail_size = get_unaligned_le16(&fw_info->tail_size);
+ if (tail_size != MIP4_BIN_TAIL_SIZE) {
+ dev_err(&ts->client->dev,
+ "wrong tail size: %d (expected %zd)\n",
+ tail_size, MIP4_BIN_TAIL_SIZE);
+ return -EINVAL;
+ }
+
+ /* Check bin format */
+ if (memcmp(fw_info->tail_mark, MIP4_BIN_TAIL_MARK,
+ sizeof(fw_info->tail_mark))) {
+ dev_err(&ts->client->dev,
+ "unable to locate tail marker (%*ph vs %*ph)\n",
+ (int)sizeof(fw_info->tail_mark), fw_info->tail_mark,
+ (int)sizeof(fw_info->tail_mark), MIP4_BIN_TAIL_MARK);
+ return -EINVAL;
+ }
+
+ *fw_offset_start = get_unaligned_le32(&fw_info->bin_start_addr);
+ *fw_size = get_unaligned_le32(&fw_info->bin_length);
+
+ dev_dbg(&ts->client->dev,
+ "F/W Data offset: %#08x, size: %d\n",
+ *fw_offset_start, *fw_size);
+
+ if (*fw_size % MIP4_BL_PAGE_SIZE) {
+ dev_err(&ts->client->dev,
+ "encoded fw length %d is not multiple of pages (%d)\n",
+ *fw_size, MIP4_BL_PAGE_SIZE);
+ return -EINVAL;
+ }
+
+ if (fw->size != *fw_offset_start + *fw_size) {
+ dev_err(&ts->client->dev,
+ "Wrong firmware size, expected %d bytes, got %zd\n",
+ *fw_offset_start + *fw_size, fw->size);
+ return -EINVAL;
+ }
+
+ mip4_parse_fw_version((const u8 *)&fw_info->ver_boot, &fw_version);
+
+ dev_dbg(&ts->client->dev,
+ "F/W file version %04X %04X %04X %04X\n",
+ fw_version.boot, fw_version.core,
+ fw_version.app, fw_version.param);
+
+ dev_dbg(&ts->client->dev, "F/W chip version: %04X %04X %04X %04X\n",
+ ts->fw_version.boot, ts->fw_version.core,
+ ts->fw_version.app, ts->fw_version.param);
+
+ /* Check F/W type */
+ if (fw_version.boot != 0xEEEE && fw_version.boot != 0xFFFF &&
+ fw_version.core == 0xEEEE &&
+ fw_version.app == 0xEEEE &&
+ fw_version.param == 0xEEEE) {
+ dev_dbg(&ts->client->dev, "F/W type: Bootloader\n");
+ } else if (fw_version.boot == 0xEEEE &&
+ fw_version.core != 0xEEEE && fw_version.core != 0xFFFF &&
+ fw_version.app != 0xEEEE && fw_version.app != 0xFFFF &&
+ fw_version.param != 0xEEEE && fw_version.param != 0xFFFF) {
+ dev_dbg(&ts->client->dev, "F/W type: Main\n");
+ } else {
+ dev_err(&ts->client->dev, "Wrong firmware type\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int mip4_execute_fw_update(struct mip4_ts *ts, const struct firmware *fw)
+{
+ const struct mip4_bin_tail *fw_info;
+ u32 fw_start_offset;
+ u32 fw_size;
+ int retires = 3;
+ int error;
+
+ error = mip4_parse_firmware(ts, fw,
+ &fw_start_offset, &fw_size, &fw_info);
+ if (error)
+ return error;
+
+ if (ts->input->users) {
+ disable_irq(ts->client->irq);
+ } else {
+ error = mip4_power_on(ts);
+ if (error)
+ return error;
+ }
+
+ /* Update firmware */
+ do {
+ error = mip4_flash_fw(ts, fw->data, fw_size, fw_start_offset);
+ if (!error)
+ break;
+ } while (--retires);
+
+ if (error)
+ dev_err(&ts->client->dev,
+ "Failed to flash firmware: %d\n", error);
+
+ /* Enable IRQ */
+ if (ts->input->users)
+ enable_irq(ts->client->irq);
+ else
+ mip4_power_off(ts);
+
+ return error ? error : 0;
+}
+
+static ssize_t mip4_sysfs_fw_update(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct mip4_ts *ts = i2c_get_clientdata(client);
+ const struct firmware *fw;
+ int error;
+
+ error = request_firmware(&fw, MIP4_FW_NAME, dev);
+ if (error) {
+ dev_err(&ts->client->dev,
+ "Failed to retrieve firmware %s: %d\n",
+ MIP4_FW_NAME, error);
+ return error;
+ }
+
+ /*
+ * Take input mutex to prevent racing with itself and also with
+ * userspace opening and closing the device and also suspend/resume
+ * transitions.
+ */
+ mutex_lock(&ts->input->mutex);
+
+ error = mip4_execute_fw_update(ts, fw);
+
+ mutex_unlock(&ts->input->mutex);
+
+ release_firmware(fw);
+
+ if (error) {
+ dev_err(&ts->client->dev,
+ "Firmware update failed: %d\n", error);
+ return error;
+ }
+
+ return count;
+}
+
+static DEVICE_ATTR(update_fw, S_IWUSR, NULL, mip4_sysfs_fw_update);
+
+static ssize_t mip4_sysfs_read_fw_version(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct mip4_ts *ts = i2c_get_clientdata(client);
+ size_t count;
+
+ /* Take lock to prevent racing with firmware update */
+ mutex_lock(&ts->input->mutex);
+
+ count = snprintf(buf, PAGE_SIZE, "%04X %04X %04X %04X\n",
+ ts->fw_version.boot, ts->fw_version.core,
+ ts->fw_version.app, ts->fw_version.param);
+
+ mutex_unlock(&ts->input->mutex);
+
+ return count;
+}
+
+static DEVICE_ATTR(fw_version, S_IRUGO, mip4_sysfs_read_fw_version, NULL);
+
+static ssize_t mip4_sysfs_read_hw_version(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct mip4_ts *ts = i2c_get_clientdata(client);
+ size_t count;
+
+ /* Take lock to prevent racing with firmware update */
+ mutex_lock(&ts->input->mutex);
+
+ /*
+ * product_name shows the name or version of the hardware
+ * paired with current firmware in the chip.
+ */
+ count = snprintf(buf, PAGE_SIZE, "%.*s\n",
+ (int)sizeof(ts->product_name), ts->product_name);
+
+ mutex_unlock(&ts->input->mutex);
+
+ return count;
+}
+
+static DEVICE_ATTR(hw_version, S_IRUGO, mip4_sysfs_read_hw_version, NULL);
+
+static struct attribute *mip4_attrs[] = {
+ &dev_attr_fw_version.attr,
+ &dev_attr_hw_version.attr,
+ &dev_attr_update_fw.attr,
+ NULL,
+};
+
+static const struct attribute_group mip4_attr_group = {
+ .attrs = mip4_attrs,
+};
+
+static void mip4_sysfs_remove(void *_data)
+{
+ struct mip4_ts *ts = _data;
+
+ sysfs_remove_group(&ts->client->dev.kobj, &mip4_attr_group);
+}
+
+static int mip4_probe(struct i2c_client *client, const struct i2c_device_id *id)
+{
+ struct mip4_ts *ts;
+ struct input_dev *input;
+ int error;
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+ dev_err(&client->dev, "Not supported I2C adapter\n");
+ return -ENXIO;
+ }
+
+ ts = devm_kzalloc(&client->dev, sizeof(*ts), GFP_KERNEL);
+ if (!ts)
+ return -ENOMEM;
+
+ input = devm_input_allocate_device(&client->dev);
+ if (!input)
+ return -ENOMEM;
+
+ ts->client = client;
+ ts->input = input;
+
+ snprintf(ts->phys, sizeof(ts->phys),
+ "%s/input0", dev_name(&client->dev));
+
+ ts->gpio_ce = devm_gpiod_get_optional(&client->dev,
+ "ce", GPIOD_OUT_LOW);
+ if (IS_ERR(ts->gpio_ce)) {
+ error = PTR_ERR(ts->gpio_ce);
+ if (error != EPROBE_DEFER)
+ dev_err(&client->dev,
+ "Failed to get gpio: %d\n", error);
+ return error;
+ }
+
+ error = mip4_power_on(ts);
+ if (error)
+ return error;
+ error = mip4_query_device(ts);
+ mip4_power_off(ts);
+ if (error)
+ return error;
+
+ input->name = "MELFAS MIP4 Touchscreen";
+ input->phys = ts->phys;
+
+ input->id.bustype = BUS_I2C;
+ input->id.vendor = 0x13c5;
+
+ input->open = mip4_input_open;
+ input->close = mip4_input_close;
+
+ input_set_drvdata(input, ts);
+
+ input->keycode = ts->key_code;
+ input->keycodesize = sizeof(*ts->key_code);
+ input->keycodemax = ts->key_num;
+
+ input_set_abs_params(input, ABS_MT_POSITION_X, 0, ts->max_x, 0, 0);
+ input_set_abs_params(input, ABS_MT_POSITION_Y, 0, ts->max_y, 0, 0);
+ input_set_abs_params(input, ABS_MT_PRESSURE,
+ MIP4_PRESSURE_MIN, MIP4_PRESSURE_MAX, 0, 0);
+ input_set_abs_params(input, ABS_MT_TOUCH_MAJOR,
+ MIP4_TOUCH_MAJOR_MIN, MIP4_TOUCH_MAJOR_MAX, 0, 0);
+ input_set_abs_params(input, ABS_MT_TOUCH_MINOR,
+ MIP4_TOUCH_MINOR_MIN, MIP4_TOUCH_MINOR_MAX, 0, 0);
+ input_abs_set_res(ts->input, ABS_MT_POSITION_X, ts->ppm_x);
+ input_abs_set_res(ts->input, ABS_MT_POSITION_Y, ts->ppm_y);
+
+ error = input_mt_init_slots(input, MIP4_MAX_FINGERS, INPUT_MT_DIRECT);
+ if (error)
+ return error;
+
+ i2c_set_clientdata(client, ts);
+
+ error = devm_request_threaded_irq(&client->dev, client->irq,
+ NULL, mip4_interrupt,
+ IRQF_ONESHOT, MIP4_DEVICE_NAME, ts);
+ if (error) {
+ dev_err(&client->dev,
+ "Failed to request interrupt %d: %d\n",
+ client->irq, error);
+ return error;
+ }
+
+ disable_irq(client->irq);
+
+ error = input_register_device(input);
+ if (error) {
+ dev_err(&client->dev,
+ "Failed to register input device: %d\n", error);
+ return error;
+ }
+
+ error = sysfs_create_group(&client->dev.kobj, &mip4_attr_group);
+ if (error) {
+ dev_err(&client->dev,
+ "Failed to create sysfs attribute group: %d\n", error);
+ return error;
+ }
+
+ error = devm_add_action(&client->dev, mip4_sysfs_remove, ts);
+ if (error) {
+ mip4_sysfs_remove(ts);
+ dev_err(&client->dev,
+ "Failed to install sysfs remoce action: %d\n", error);
+ return error;
+ }
+
+ return 0;
+}
+
+static int __maybe_unused mip4_suspend(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct mip4_ts *ts = i2c_get_clientdata(client);
+ struct input_dev *input = ts->input;
+
+ mutex_lock(&input->mutex);
+
+ if (device_may_wakeup(dev))
+ ts->wake_irq_enabled = enable_irq_wake(client->irq) == 0;
+ else if (input->users)
+ mip4_disable(ts);
+
+ mutex_unlock(&input->mutex);
+
+ return 0;
+}
+
+static int __maybe_unused mip4_resume(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct mip4_ts *ts = i2c_get_clientdata(client);
+ struct input_dev *input = ts->input;
+
+ mutex_lock(&input->mutex);
+
+ if (ts->wake_irq_enabled)
+ disable_irq_wake(client->irq);
+ else if (input->users)
+ mip4_enable(ts);
+
+ mutex_unlock(&input->mutex);
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(mip4_pm_ops, mip4_suspend, mip4_resume);
+
+#ifdef CONFIG_OF
+static const struct of_device_id mip4_of_match[] = {
+ { .compatible = "melfas,"MIP4_DEVICE_NAME, },
+ { },
+};
+MODULE_DEVICE_TABLE(of, mip4_of_match);
+#endif
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id mip4_acpi_match[] = {
+ { "MLFS0000", 0},
+ { },
+};
+MODULE_DEVICE_TABLE(acpi, mip4_acpi_match);
+#endif
+
+static const struct i2c_device_id mip4_i2c_ids[] = {
+ { MIP4_DEVICE_NAME, 0 },
+ { },
+};
+MODULE_DEVICE_TABLE(i2c, mip4_i2c_ids);
+
+static struct i2c_driver mip4_driver = {
+ .id_table = mip4_i2c_ids,
+ .probe = mip4_probe,
+ .driver = {
+ .name = MIP4_DEVICE_NAME,
+ .of_match_table = of_match_ptr(mip4_of_match),
+ .acpi_match_table = ACPI_PTR(mip4_acpi_match),
+ .pm = &mip4_pm_ops,
+ },
+};
+module_i2c_driver(mip4_driver);
+
+MODULE_DESCRIPTION("MELFAS MIP4 Touchscreen");
+MODULE_VERSION("2016.03.12");
+MODULE_AUTHOR("Sangwon Jee <jeesw@melfas.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/input/touchscreen/stmpe-ts.c b/drivers/input/touchscreen/stmpe-ts.c
index e414d43e5159..2a78e27b4495 100644
--- a/drivers/input/touchscreen/stmpe-ts.c
+++ b/drivers/input/touchscreen/stmpe-ts.c
@@ -63,6 +63,37 @@
#define STMPE_TS_NAME "stmpe-ts"
#define XY_MASK 0xfff
+/**
+ * struct stmpe_touch - stmpe811 touch screen controller state
+ * @stmpe: pointer back to STMPE MFD container
+ * @idev: registered input device
+ * @work: a work item used to scan the device
+ * @dev: a pointer back to the MFD cell struct device*
+ * @sample_time: ADC converstion time in number of clock.
+ * (0 -> 36 clocks, 1 -> 44 clocks, 2 -> 56 clocks, 3 -> 64 clocks,
+ * 4 -> 80 clocks, 5 -> 96 clocks, 6 -> 144 clocks),
+ * recommended is 4.
+ * @mod_12b: ADC Bit mode (0 -> 10bit ADC, 1 -> 12bit ADC)
+ * @ref_sel: ADC reference source
+ * (0 -> internal reference, 1 -> external reference)
+ * @adc_freq: ADC Clock speed
+ * (0 -> 1.625 MHz, 1 -> 3.25 MHz, 2 || 3 -> 6.5 MHz)
+ * @ave_ctrl: Sample average control
+ * (0 -> 1 sample, 1 -> 2 samples, 2 -> 4 samples, 3 -> 8 samples)
+ * @touch_det_delay: Touch detect interrupt delay
+ * (0 -> 10 us, 1 -> 50 us, 2 -> 100 us, 3 -> 500 us,
+ * 4-> 1 ms, 5 -> 5 ms, 6 -> 10 ms, 7 -> 50 ms)
+ * recommended is 3
+ * @settling: Panel driver settling time
+ * (0 -> 10 us, 1 -> 100 us, 2 -> 500 us, 3 -> 1 ms,
+ * 4 -> 5 ms, 5 -> 10 ms, 6 for 50 ms, 7 -> 100 ms)
+ * recommended is 2
+ * @fraction_z: Length of the fractional part in z
+ * (fraction_z ([0..7]) = Count of the fractional part)
+ * recommended is 7
+ * @i_drive: current limit value of the touchscreen drivers
+ * (0 -> 20 mA typical 35 mA max, 1 -> 50 mA typical 80 mA max)
+ */
struct stmpe_touch {
struct stmpe *stmpe;
struct input_dev *idev;
diff --git a/drivers/input/touchscreen/sur40.c b/drivers/input/touchscreen/sur40.c
index b6c4d03de340..880c40b23f66 100644
--- a/drivers/input/touchscreen/sur40.c
+++ b/drivers/input/touchscreen/sur40.c
@@ -197,28 +197,34 @@ static int sur40_command(struct sur40_state *dev,
static int sur40_init(struct sur40_state *dev)
{
int result;
- u8 buffer[24];
+ u8 *buffer;
+
+ buffer = kmalloc(24, GFP_KERNEL);
+ if (!buffer) {
+ result = -ENOMEM;
+ goto error;
+ }
/* stupidly replay the original MS driver init sequence */
result = sur40_command(dev, SUR40_GET_VERSION, 0x00, buffer, 12);
if (result < 0)
- return result;
+ goto error;
result = sur40_command(dev, SUR40_GET_VERSION, 0x01, buffer, 12);
if (result < 0)
- return result;
+ goto error;
result = sur40_command(dev, SUR40_GET_VERSION, 0x02, buffer, 12);
if (result < 0)
- return result;
+ goto error;
result = sur40_command(dev, SUR40_UNKNOWN2, 0x00, buffer, 24);
if (result < 0)
- return result;
+ goto error;
result = sur40_command(dev, SUR40_UNKNOWN1, 0x00, buffer, 5);
if (result < 0)
- return result;
+ goto error;
result = sur40_command(dev, SUR40_GET_VERSION, 0x03, buffer, 12);
@@ -226,7 +232,8 @@ static int sur40_init(struct sur40_state *dev)
* Discard the result buffer - no known data inside except
* some version strings, maybe extract these sometime...
*/
-
+error:
+ kfree(buffer);
return result;
}
diff --git a/drivers/input/touchscreen/wdt87xx_i2c.c b/drivers/input/touchscreen/wdt87xx_i2c.c
index 515c20a6e10f..73861ad22df4 100644
--- a/drivers/input/touchscreen/wdt87xx_i2c.c
+++ b/drivers/input/touchscreen/wdt87xx_i2c.c
@@ -848,7 +848,7 @@ static int wdt87xx_do_update_firmware(struct i2c_client *client,
error = wdt87xx_get_sysparam(client, &wdt->param);
if (error)
dev_err(&client->dev,
- "failed to refresh system paramaters: %d\n", error);
+ "failed to refresh system parameters: %d\n", error);
out:
enable_irq(client->irq);
mutex_unlock(&wdt->fw_mutex);
diff --git a/drivers/input/touchscreen/zforce_ts.c b/drivers/input/touchscreen/zforce_ts.c
index 9bbadaaf6bc3..7b3845aa5983 100644
--- a/drivers/input/touchscreen/zforce_ts.c
+++ b/drivers/input/touchscreen/zforce_ts.c
@@ -370,8 +370,8 @@ static int zforce_touch_event(struct zforce_ts *ts, u8 *payload)
point.coord_x = point.coord_y = 0;
}
- point.state = payload[9 * i + 5] & 0x03;
- point.id = (payload[9 * i + 5] & 0xfc) >> 2;
+ point.state = payload[9 * i + 5] & 0x0f;
+ point.id = (payload[9 * i + 5] & 0xf0) >> 4;
/* determine touch major, minor and orientation */
point.area_major = max(payload[9 * i + 6],
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index a1e75cba18e0..dd1dc39f84ff 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -39,6 +39,25 @@ config IOMMU_IO_PGTABLE_LPAE_SELFTEST
If unsure, say N here.
+config IOMMU_IO_PGTABLE_ARMV7S
+ bool "ARMv7/v8 Short Descriptor Format"
+ select IOMMU_IO_PGTABLE
+ depends on HAS_DMA && (ARM || ARM64 || COMPILE_TEST)
+ help
+ Enable support for the ARM Short-descriptor pagetable format.
+ This supports 32-bit virtual and physical addresses mapped using
+ 2-level tables with 4KB pages/1MB sections, and contiguous entries
+ for 64KB pages/16MB supersections if indicated by the IOMMU driver.
+
+config IOMMU_IO_PGTABLE_ARMV7S_SELFTEST
+ bool "ARMv7s selftests"
+ depends on IOMMU_IO_PGTABLE_ARMV7S
+ help
+ Enable self-tests for ARMv7s page table allocator. This performs
+ a series of page-table consistency checks during boot.
+
+ If unsure, say N here.
+
endmenu
config IOMMU_IOVA
@@ -51,9 +70,9 @@ config OF_IOMMU
# IOMMU-agnostic DMA-mapping layer
config IOMMU_DMA
bool
- depends on NEED_SG_DMA_LENGTH
select IOMMU_API
select IOMMU_IOVA
+ select NEED_SG_DMA_LENGTH
config FSL_PAMU
bool "Freescale IOMMU support"
@@ -243,7 +262,7 @@ config TEGRA_IOMMU_SMMU
config EXYNOS_IOMMU
bool "Exynos IOMMU Support"
- depends on ARCH_EXYNOS && ARM && MMU
+ depends on ARCH_EXYNOS && MMU
select IOMMU_API
select ARM_DMA_USE_IOMMU
help
@@ -266,7 +285,7 @@ config EXYNOS_IOMMU_DEBUG
config IPMMU_VMSA
bool "Renesas VMSA-compatible IPMMU"
depends on ARM_LPAE
- depends on ARCH_SHMOBILE || COMPILE_TEST
+ depends on ARCH_RENESAS || COMPILE_TEST
select IOMMU_API
select IOMMU_IO_PGTABLE_LPAE
select ARM_DMA_USE_IOMMU
@@ -318,4 +337,21 @@ config S390_IOMMU
help
Support for the IOMMU API for s390 PCI devices.
+config MTK_IOMMU
+ bool "MTK IOMMU Support"
+ depends on ARM || ARM64
+ depends on ARCH_MEDIATEK || COMPILE_TEST
+ select ARM_DMA_USE_IOMMU
+ select IOMMU_API
+ select IOMMU_DMA
+ select IOMMU_IO_PGTABLE_ARMV7S
+ select MEMORY
+ select MTK_SMI
+ help
+ Support for the M4U on certain Mediatek SOCs. M4U is MultiMedia
+ Memory Management Unit. This option enables remapping of DMA memory
+ accesses for the multimedia subsystem.
+
+ If unsure, say N here.
+
endif # IOMMU_SUPPORT
diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile
index 42fc0c25cf1a..c6edb31bf8c6 100644
--- a/drivers/iommu/Makefile
+++ b/drivers/iommu/Makefile
@@ -3,6 +3,7 @@ obj-$(CONFIG_IOMMU_API) += iommu-traces.o
obj-$(CONFIG_IOMMU_API) += iommu-sysfs.o
obj-$(CONFIG_IOMMU_DMA) += dma-iommu.o
obj-$(CONFIG_IOMMU_IO_PGTABLE) += io-pgtable.o
+obj-$(CONFIG_IOMMU_IO_PGTABLE_ARMV7S) += io-pgtable-arm-v7s.o
obj-$(CONFIG_IOMMU_IO_PGTABLE_LPAE) += io-pgtable-arm.o
obj-$(CONFIG_IOMMU_IOVA) += iova.o
obj-$(CONFIG_OF_IOMMU) += of_iommu.o
@@ -16,6 +17,7 @@ obj-$(CONFIG_INTEL_IOMMU) += intel-iommu.o
obj-$(CONFIG_INTEL_IOMMU_SVM) += intel-svm.o
obj-$(CONFIG_IPMMU_VMSA) += ipmmu-vmsa.o
obj-$(CONFIG_IRQ_REMAP) += intel_irq_remapping.o irq_remapping.o
+obj-$(CONFIG_MTK_IOMMU) += mtk_iommu.o
obj-$(CONFIG_OMAP_IOMMU) += omap-iommu.o
obj-$(CONFIG_OMAP_IOMMU_DEBUG) += omap-iommu-debug.o
obj-$(CONFIG_ROCKCHIP_IOMMU) += rockchip-iommu.o
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 374c129219ef..5efadad4615b 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -92,6 +92,7 @@ struct iommu_dev_data {
struct list_head dev_data_list; /* For global dev_data_list */
struct protection_domain *domain; /* Domain the device is bound to */
u16 devid; /* PCI Device ID */
+ u16 alias; /* Alias Device ID */
bool iommu_v2; /* Device can make use of IOMMUv2 */
bool passthrough; /* Device is identity mapped */
struct {
@@ -166,6 +167,13 @@ static struct protection_domain *to_pdomain(struct iommu_domain *dom)
return container_of(dom, struct protection_domain, domain);
}
+static inline u16 get_device_id(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+
+ return PCI_DEVID(pdev->bus->number, pdev->devfn);
+}
+
static struct iommu_dev_data *alloc_dev_data(u16 devid)
{
struct iommu_dev_data *dev_data;
@@ -203,6 +211,68 @@ out_unlock:
return dev_data;
}
+static int __last_alias(struct pci_dev *pdev, u16 alias, void *data)
+{
+ *(u16 *)data = alias;
+ return 0;
+}
+
+static u16 get_alias(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ u16 devid, ivrs_alias, pci_alias;
+
+ devid = get_device_id(dev);
+ ivrs_alias = amd_iommu_alias_table[devid];
+ pci_for_each_dma_alias(pdev, __last_alias, &pci_alias);
+
+ if (ivrs_alias == pci_alias)
+ return ivrs_alias;
+
+ /*
+ * DMA alias showdown
+ *
+ * The IVRS is fairly reliable in telling us about aliases, but it
+ * can't know about every screwy device. If we don't have an IVRS
+ * reported alias, use the PCI reported alias. In that case we may
+ * still need to initialize the rlookup and dev_table entries if the
+ * alias is to a non-existent device.
+ */
+ if (ivrs_alias == devid) {
+ if (!amd_iommu_rlookup_table[pci_alias]) {
+ amd_iommu_rlookup_table[pci_alias] =
+ amd_iommu_rlookup_table[devid];
+ memcpy(amd_iommu_dev_table[pci_alias].data,
+ amd_iommu_dev_table[devid].data,
+ sizeof(amd_iommu_dev_table[pci_alias].data));
+ }
+
+ return pci_alias;
+ }
+
+ pr_info("AMD-Vi: Using IVRS reported alias %02x:%02x.%d "
+ "for device %s[%04x:%04x], kernel reported alias "
+ "%02x:%02x.%d\n", PCI_BUS_NUM(ivrs_alias), PCI_SLOT(ivrs_alias),
+ PCI_FUNC(ivrs_alias), dev_name(dev), pdev->vendor, pdev->device,
+ PCI_BUS_NUM(pci_alias), PCI_SLOT(pci_alias),
+ PCI_FUNC(pci_alias));
+
+ /*
+ * If we don't have a PCI DMA alias and the IVRS alias is on the same
+ * bus, then the IVRS table may know about a quirk that we don't.
+ */
+ if (pci_alias == devid &&
+ PCI_BUS_NUM(ivrs_alias) == pdev->bus->number) {
+ pdev->dev_flags |= PCI_DEV_FLAGS_DMA_ALIAS_DEVFN;
+ pdev->dma_alias_devfn = ivrs_alias & 0xff;
+ pr_info("AMD-Vi: Added PCI DMA alias %02x.%d for %s\n",
+ PCI_SLOT(ivrs_alias), PCI_FUNC(ivrs_alias),
+ dev_name(dev));
+ }
+
+ return ivrs_alias;
+}
+
static struct iommu_dev_data *find_dev_data(u16 devid)
{
struct iommu_dev_data *dev_data;
@@ -215,13 +285,6 @@ static struct iommu_dev_data *find_dev_data(u16 devid)
return dev_data;
}
-static inline u16 get_device_id(struct device *dev)
-{
- struct pci_dev *pdev = to_pci_dev(dev);
-
- return PCI_DEVID(pdev->bus->number, pdev->devfn);
-}
-
static struct iommu_dev_data *get_dev_data(struct device *dev)
{
return dev->archdata.iommu;
@@ -349,6 +412,8 @@ static int iommu_init_device(struct device *dev)
if (!dev_data)
return -ENOMEM;
+ dev_data->alias = get_alias(dev);
+
if (pci_iommuv2_capable(pdev)) {
struct amd_iommu *iommu;
@@ -369,7 +434,7 @@ static void iommu_ignore_device(struct device *dev)
u16 devid, alias;
devid = get_device_id(dev);
- alias = amd_iommu_alias_table[devid];
+ alias = get_alias(dev);
memset(&amd_iommu_dev_table[devid], 0, sizeof(struct dev_table_entry));
memset(&amd_iommu_dev_table[alias], 0, sizeof(struct dev_table_entry));
@@ -1061,7 +1126,7 @@ static int device_flush_dte(struct iommu_dev_data *dev_data)
int ret;
iommu = amd_iommu_rlookup_table[dev_data->devid];
- alias = amd_iommu_alias_table[dev_data->devid];
+ alias = dev_data->alias;
ret = iommu_flush_dte(iommu, dev_data->devid);
if (!ret && alias != dev_data->devid)
@@ -2039,7 +2104,7 @@ static void do_attach(struct iommu_dev_data *dev_data,
bool ats;
iommu = amd_iommu_rlookup_table[dev_data->devid];
- alias = amd_iommu_alias_table[dev_data->devid];
+ alias = dev_data->alias;
ats = dev_data->ats.enabled;
/* Update data structures */
@@ -2073,7 +2138,7 @@ static void do_detach(struct iommu_dev_data *dev_data)
return;
iommu = amd_iommu_rlookup_table[dev_data->devid];
- alias = amd_iommu_alias_table[dev_data->devid];
+ alias = dev_data->alias;
/* decrease reference counters */
dev_data->domain->dev_iommu[iommu->index] -= 1;
diff --git a/drivers/iommu/amd_iommu_v2.c b/drivers/iommu/amd_iommu_v2.c
index c865737326e1..56999d2fac07 100644
--- a/drivers/iommu/amd_iommu_v2.c
+++ b/drivers/iommu/amd_iommu_v2.c
@@ -526,6 +526,7 @@ static void do_fault(struct work_struct *work)
flags |= FAULT_FLAG_USER;
if (fault->flags & PPR_FAULT_WRITE)
flags |= FAULT_FLAG_WRITE;
+ flags |= FAULT_FLAG_REMOTE;
down_read(&mm->mmap_sem);
vma = find_extend_vma(mm, address);
diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c
index 20875341c865..4ff73ff64e49 100644
--- a/drivers/iommu/arm-smmu-v3.c
+++ b/drivers/iommu/arm-smmu-v3.c
@@ -21,6 +21,7 @@
*/
#include <linux/delay.h>
+#include <linux/dma-iommu.h>
#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/iommu.h>
@@ -1396,7 +1397,7 @@ static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
{
struct arm_smmu_domain *smmu_domain;
- if (type != IOMMU_DOMAIN_UNMANAGED)
+ if (type != IOMMU_DOMAIN_UNMANAGED && type != IOMMU_DOMAIN_DMA)
return NULL;
/*
@@ -1408,6 +1409,12 @@ static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
if (!smmu_domain)
return NULL;
+ if (type == IOMMU_DOMAIN_DMA &&
+ iommu_get_dma_cookie(&smmu_domain->domain)) {
+ kfree(smmu_domain);
+ return NULL;
+ }
+
mutex_init(&smmu_domain->init_mutex);
spin_lock_init(&smmu_domain->pgtbl_lock);
return &smmu_domain->domain;
@@ -1436,6 +1443,7 @@ static void arm_smmu_domain_free(struct iommu_domain *domain)
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
struct arm_smmu_device *smmu = smmu_domain->smmu;
+ iommu_put_dma_cookie(domain);
free_io_pgtable_ops(smmu_domain->pgtbl_ops);
/* Free the CD and ASID, if we allocated them */
@@ -1630,6 +1638,17 @@ static int arm_smmu_install_ste_for_group(struct arm_smmu_group *smmu_group)
return 0;
}
+static void arm_smmu_detach_dev(struct device *dev)
+{
+ struct arm_smmu_group *smmu_group = arm_smmu_group_get(dev);
+
+ smmu_group->ste.bypass = true;
+ if (IS_ERR_VALUE(arm_smmu_install_ste_for_group(smmu_group)))
+ dev_warn(dev, "failed to install bypass STE\n");
+
+ smmu_group->domain = NULL;
+}
+
static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
{
int ret = 0;
@@ -1642,7 +1661,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
/* Already attached to a different domain? */
if (smmu_group->domain && smmu_group->domain != smmu_domain)
- return -EEXIST;
+ arm_smmu_detach_dev(dev);
smmu = smmu_group->smmu;
mutex_lock(&smmu_domain->init_mutex);
@@ -1668,7 +1687,12 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
goto out_unlock;
smmu_group->domain = smmu_domain;
- smmu_group->ste.bypass = false;
+
+ /*
+ * FIXME: This should always be "false" once we have IOMMU-backed
+ * DMA ops for all devices behind the SMMU.
+ */
+ smmu_group->ste.bypass = domain->type == IOMMU_DOMAIN_DMA;
ret = arm_smmu_install_ste_for_group(smmu_group);
if (IS_ERR_VALUE(ret))
@@ -1679,25 +1703,6 @@ out_unlock:
return ret;
}
-static void arm_smmu_detach_dev(struct iommu_domain *domain, struct device *dev)
-{
- struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
- struct arm_smmu_group *smmu_group = arm_smmu_group_get(dev);
-
- BUG_ON(!smmu_domain);
- BUG_ON(!smmu_group);
-
- mutex_lock(&smmu_domain->init_mutex);
- BUG_ON(smmu_group->domain != smmu_domain);
-
- smmu_group->ste.bypass = true;
- if (IS_ERR_VALUE(arm_smmu_install_ste_for_group(smmu_group)))
- dev_warn(dev, "failed to install bypass STE\n");
-
- smmu_group->domain = NULL;
- mutex_unlock(&smmu_domain->init_mutex);
-}
-
static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
phys_addr_t paddr, size_t size, int prot)
{
@@ -1935,7 +1940,6 @@ static struct iommu_ops arm_smmu_ops = {
.domain_alloc = arm_smmu_domain_alloc,
.domain_free = arm_smmu_domain_free,
.attach_dev = arm_smmu_attach_dev,
- .detach_dev = arm_smmu_detach_dev,
.map = arm_smmu_map,
.unmap = arm_smmu_unmap,
.iova_to_phys = arm_smmu_iova_to_phys,
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index 59ee4b8a3236..7c39ac4b9c53 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -29,6 +29,7 @@
#define pr_fmt(fmt) "arm-smmu: " fmt
#include <linux/delay.h>
+#include <linux/dma-iommu.h>
#include <linux/dma-mapping.h>
#include <linux/err.h>
#include <linux/interrupt.h>
@@ -167,6 +168,9 @@
#define S2CR_TYPE_BYPASS (1 << S2CR_TYPE_SHIFT)
#define S2CR_TYPE_FAULT (2 << S2CR_TYPE_SHIFT)
+#define S2CR_PRIVCFG_SHIFT 24
+#define S2CR_PRIVCFG_UNPRIV (2 << S2CR_PRIVCFG_SHIFT)
+
/* Context bank attribute registers */
#define ARM_SMMU_GR1_CBAR(n) (0x0 + ((n) << 2))
#define CBAR_VMID_SHIFT 0
@@ -257,9 +261,13 @@
#define FSYNR0_WNR (1 << 4)
static int force_stage;
-module_param_named(force_stage, force_stage, int, S_IRUGO);
+module_param(force_stage, int, S_IRUGO);
MODULE_PARM_DESC(force_stage,
"Force SMMU mappings to be installed at a particular stage of translation. A value of '1' or '2' forces the corresponding stage. All other values are ignored (i.e. no stage is forced). Note that selecting a specific stage will disable support for nested translation.");
+static bool disable_bypass;
+module_param(disable_bypass, bool, S_IRUGO);
+MODULE_PARM_DESC(disable_bypass,
+ "Disable bypass streams such that incoming transactions from devices that are not attached to an iommu domain will report an abort back to the device and will not be allowed to pass through the SMMU.");
enum arm_smmu_arch_version {
ARM_SMMU_V1 = 1,
@@ -818,6 +826,12 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
if (smmu_domain->smmu)
goto out_unlock;
+ /* We're bypassing these SIDs, so don't allocate an actual context */
+ if (domain->type == IOMMU_DOMAIN_DMA) {
+ smmu_domain->smmu = smmu;
+ goto out_unlock;
+ }
+
/*
* Mapping the requested stage onto what we support is surprisingly
* complicated, mainly because the spec allows S1+S2 SMMUs without
@@ -940,7 +954,7 @@ static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
void __iomem *cb_base;
int irq;
- if (!smmu)
+ if (!smmu || domain->type == IOMMU_DOMAIN_DMA)
return;
/*
@@ -963,7 +977,7 @@ static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
{
struct arm_smmu_domain *smmu_domain;
- if (type != IOMMU_DOMAIN_UNMANAGED)
+ if (type != IOMMU_DOMAIN_UNMANAGED && type != IOMMU_DOMAIN_DMA)
return NULL;
/*
* Allocate the domain and initialise some of its data structures.
@@ -974,6 +988,12 @@ static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
if (!smmu_domain)
return NULL;
+ if (type == IOMMU_DOMAIN_DMA &&
+ iommu_get_dma_cookie(&smmu_domain->domain)) {
+ kfree(smmu_domain);
+ return NULL;
+ }
+
mutex_init(&smmu_domain->init_mutex);
spin_lock_init(&smmu_domain->pgtbl_lock);
@@ -988,6 +1008,7 @@ static void arm_smmu_domain_free(struct iommu_domain *domain)
* Free the domain resources. We assume that all devices have
* already been detached.
*/
+ iommu_put_dma_cookie(domain);
arm_smmu_destroy_domain_context(domain);
kfree(smmu_domain);
}
@@ -1074,6 +1095,15 @@ static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
struct arm_smmu_device *smmu = smmu_domain->smmu;
void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
+ /*
+ * FIXME: This won't be needed once we have IOMMU-backed DMA ops
+ * for all devices behind the SMMU. Note that we need to take
+ * care configuring SMRs for devices both a platform_device and
+ * and a PCI device (i.e. a PCI host controller)
+ */
+ if (smmu_domain->domain.type == IOMMU_DOMAIN_DMA)
+ return 0;
+
/* Devices in an IOMMU group may already be configured */
ret = arm_smmu_master_configure_smrs(smmu, cfg);
if (ret)
@@ -1083,7 +1113,7 @@ static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
u32 idx, s2cr;
idx = cfg->smrs ? cfg->smrs[i].idx : cfg->streamids[i];
- s2cr = S2CR_TYPE_TRANS |
+ s2cr = S2CR_TYPE_TRANS | S2CR_PRIVCFG_UNPRIV |
(smmu_domain->cfg.cbndx << S2CR_CBNDX_SHIFT);
writel_relaxed(s2cr, gr0_base + ARM_SMMU_GR0_S2CR(idx));
}
@@ -1108,14 +1138,24 @@ static void arm_smmu_domain_remove_master(struct arm_smmu_domain *smmu_domain,
*/
for (i = 0; i < cfg->num_streamids; ++i) {
u32 idx = cfg->smrs ? cfg->smrs[i].idx : cfg->streamids[i];
+ u32 reg = disable_bypass ? S2CR_TYPE_FAULT : S2CR_TYPE_BYPASS;
- writel_relaxed(S2CR_TYPE_BYPASS,
- gr0_base + ARM_SMMU_GR0_S2CR(idx));
+ writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_S2CR(idx));
}
arm_smmu_master_free_smrs(smmu, cfg);
}
+static void arm_smmu_detach_dev(struct device *dev,
+ struct arm_smmu_master_cfg *cfg)
+{
+ struct iommu_domain *domain = dev->archdata.iommu;
+ struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
+
+ dev->archdata.iommu = NULL;
+ arm_smmu_domain_remove_master(smmu_domain, cfg);
+}
+
static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
{
int ret;
@@ -1129,11 +1169,6 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
return -ENXIO;
}
- if (dev->archdata.iommu) {
- dev_err(dev, "already attached to IOMMU domain\n");
- return -EEXIST;
- }
-
/* Ensure that the domain is finalised */
ret = arm_smmu_init_domain_context(domain, smmu);
if (IS_ERR_VALUE(ret))
@@ -1155,25 +1190,16 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
if (!cfg)
return -ENODEV;
+ /* Detach the dev from its current domain */
+ if (dev->archdata.iommu)
+ arm_smmu_detach_dev(dev, cfg);
+
ret = arm_smmu_domain_add_master(smmu_domain, cfg);
if (!ret)
dev->archdata.iommu = domain;
return ret;
}
-static void arm_smmu_detach_dev(struct iommu_domain *domain, struct device *dev)
-{
- struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
- struct arm_smmu_master_cfg *cfg;
-
- cfg = find_smmu_master_cfg(dev);
- if (!cfg)
- return;
-
- dev->archdata.iommu = NULL;
- arm_smmu_domain_remove_master(smmu_domain, cfg);
-}
-
static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
phys_addr_t paddr, size_t size, int prot)
{
@@ -1449,7 +1475,6 @@ static struct iommu_ops arm_smmu_ops = {
.domain_alloc = arm_smmu_domain_alloc,
.domain_free = arm_smmu_domain_free,
.attach_dev = arm_smmu_attach_dev,
- .detach_dev = arm_smmu_detach_dev,
.map = arm_smmu_map,
.unmap = arm_smmu_unmap,
.map_sg = default_iommu_map_sg,
@@ -1473,11 +1498,11 @@ static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
- /* Mark all SMRn as invalid and all S2CRn as bypass */
+ /* Mark all SMRn as invalid and all S2CRn as bypass unless overridden */
+ reg = disable_bypass ? S2CR_TYPE_FAULT : S2CR_TYPE_BYPASS;
for (i = 0; i < smmu->num_mapping_groups; ++i) {
writel_relaxed(0, gr0_base + ARM_SMMU_GR0_SMR(i));
- writel_relaxed(S2CR_TYPE_BYPASS,
- gr0_base + ARM_SMMU_GR0_S2CR(i));
+ writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_S2CR(i));
}
/* Make sure all context banks are disabled and clear CB_FSR */
@@ -1499,8 +1524,12 @@ static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
/* Disable TLB broadcasting. */
reg |= (sCR0_VMIDPNE | sCR0_PTM);
- /* Enable client access, but bypass when no mapping is found */
- reg &= ~(sCR0_CLIENTPD | sCR0_USFCFG);
+ /* Enable client access, handling unmatched streams as appropriate */
+ reg &= ~sCR0_CLIENTPD;
+ if (disable_bypass)
+ reg |= sCR0_USFCFG;
+ else
+ reg &= ~sCR0_USFCFG;
/* Disable forced broadcasting */
reg &= ~sCR0_FB;
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index 72d6182666cb..58f2fe687a24 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -403,7 +403,7 @@ static int __finalise_sg(struct device *dev, struct scatterlist *sg, int nents,
unsigned int s_length = sg_dma_len(s);
unsigned int s_dma_len = s->length;
- s->offset = s_offset;
+ s->offset += s_offset;
s->length = s_length;
sg_dma_address(s) = dma_addr + s_offset;
dma_addr += s_dma_len;
@@ -422,7 +422,7 @@ static void __invalidate_sg(struct scatterlist *sg, int nents)
for_each_sg(sg, s, nents, i) {
if (sg_dma_address(s) != DMA_ERROR_CODE)
- s->offset = sg_dma_address(s);
+ s->offset += sg_dma_address(s);
if (sg_dma_len(s))
s->length = sg_dma_len(s);
sg_dma_address(s) = DMA_ERROR_CODE;
diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c
index 97c41b8ab5d9..5ecc86cb74c8 100644
--- a/drivers/iommu/exynos-iommu.c
+++ b/drivers/iommu/exynos-iommu.c
@@ -1,6 +1,5 @@
-/* linux/drivers/iommu/exynos_iommu.c
- *
- * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+/*
+ * Copyright (c) 2011,2016 Samsung Electronics Co., Ltd.
* http://www.samsung.com
*
* This program is free software; you can redistribute it and/or modify
@@ -25,10 +24,7 @@
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/slab.h>
-
-#include <asm/cacheflush.h>
-#include <asm/dma-iommu.h>
-#include <asm/pgtable.h>
+#include <linux/dma-iommu.h>
typedef u32 sysmmu_iova_t;
typedef u32 sysmmu_pte_t;
@@ -58,17 +54,25 @@ typedef u32 sysmmu_pte_t;
#define lv2ent_small(pent) ((*(pent) & 2) == 2)
#define lv2ent_large(pent) ((*(pent) & 3) == 1)
-static u32 sysmmu_page_offset(sysmmu_iova_t iova, u32 size)
-{
- return iova & (size - 1);
-}
-
-#define section_phys(sent) (*(sent) & SECT_MASK)
-#define section_offs(iova) sysmmu_page_offset((iova), SECT_SIZE)
-#define lpage_phys(pent) (*(pent) & LPAGE_MASK)
-#define lpage_offs(iova) sysmmu_page_offset((iova), LPAGE_SIZE)
-#define spage_phys(pent) (*(pent) & SPAGE_MASK)
-#define spage_offs(iova) sysmmu_page_offset((iova), SPAGE_SIZE)
+/*
+ * v1.x - v3.x SYSMMU supports 32bit physical and 32bit virtual address spaces
+ * v5.0 introduced support for 36bit physical address space by shifting
+ * all page entry values by 4 bits.
+ * All SYSMMU controllers in the system support the address spaces of the same
+ * size, so PG_ENT_SHIFT can be initialized on first SYSMMU probe to proper
+ * value (0 or 4).
+ */
+static short PG_ENT_SHIFT = -1;
+#define SYSMMU_PG_ENT_SHIFT 0
+#define SYSMMU_V5_PG_ENT_SHIFT 4
+
+#define sect_to_phys(ent) (((phys_addr_t) ent) << PG_ENT_SHIFT)
+#define section_phys(sent) (sect_to_phys(*(sent)) & SECT_MASK)
+#define section_offs(iova) (iova & (SECT_SIZE - 1))
+#define lpage_phys(pent) (sect_to_phys(*(pent)) & LPAGE_MASK)
+#define lpage_offs(iova) (iova & (LPAGE_SIZE - 1))
+#define spage_phys(pent) (sect_to_phys(*(pent)) & SPAGE_MASK)
+#define spage_offs(iova) (iova & (SPAGE_SIZE - 1))
#define NUM_LV1ENTRIES 4096
#define NUM_LV2ENTRIES (SECT_SIZE / SPAGE_SIZE)
@@ -83,16 +87,16 @@ static u32 lv2ent_offset(sysmmu_iova_t iova)
return (iova >> SPAGE_ORDER) & (NUM_LV2ENTRIES - 1);
}
+#define LV1TABLE_SIZE (NUM_LV1ENTRIES * sizeof(sysmmu_pte_t))
#define LV2TABLE_SIZE (NUM_LV2ENTRIES * sizeof(sysmmu_pte_t))
#define SPAGES_PER_LPAGE (LPAGE_SIZE / SPAGE_SIZE)
+#define lv2table_base(sent) (sect_to_phys(*(sent) & 0xFFFFFFC0))
-#define lv2table_base(sent) (*(sent) & 0xFFFFFC00)
-
-#define mk_lv1ent_sect(pa) ((pa) | 2)
-#define mk_lv1ent_page(pa) ((pa) | 1)
-#define mk_lv2ent_lpage(pa) ((pa) | 1)
-#define mk_lv2ent_spage(pa) ((pa) | 2)
+#define mk_lv1ent_sect(pa) ((pa >> PG_ENT_SHIFT) | 2)
+#define mk_lv1ent_page(pa) ((pa >> PG_ENT_SHIFT) | 1)
+#define mk_lv2ent_lpage(pa) ((pa >> PG_ENT_SHIFT) | 1)
+#define mk_lv2ent_spage(pa) ((pa >> PG_ENT_SHIFT) | 2)
#define CTRL_ENABLE 0x5
#define CTRL_BLOCK 0x7
@@ -100,14 +104,23 @@ static u32 lv2ent_offset(sysmmu_iova_t iova)
#define CFG_LRU 0x1
#define CFG_QOS(n) ((n & 0xF) << 7)
-#define CFG_MASK 0x0150FFFF /* Selecting bit 0-15, 20, 22 and 24 */
#define CFG_ACGEN (1 << 24) /* System MMU 3.3 only */
#define CFG_SYSSEL (1 << 22) /* System MMU 3.2 only */
#define CFG_FLPDCACHE (1 << 20) /* System MMU 3.2+ only */
+/* common registers */
#define REG_MMU_CTRL 0x000
#define REG_MMU_CFG 0x004
#define REG_MMU_STATUS 0x008
+#define REG_MMU_VERSION 0x034
+
+#define MMU_MAJ_VER(val) ((val) >> 7)
+#define MMU_MIN_VER(val) ((val) & 0x7F)
+#define MMU_RAW_VER(reg) (((reg) >> 21) & ((1 << 11) - 1)) /* 11 bits */
+
+#define MAKE_MMU_VER(maj, min) ((((maj) & 0xF) << 7) | ((min) & 0x7F))
+
+/* v1.x - v3.x registers */
#define REG_MMU_FLUSH 0x00C
#define REG_MMU_FLUSH_ENTRY 0x010
#define REG_PT_BASE_ADDR 0x014
@@ -119,21 +132,18 @@ static u32 lv2ent_offset(sysmmu_iova_t iova)
#define REG_AR_FAULT_ADDR 0x02C
#define REG_DEFAULT_SLAVE_ADDR 0x030
-#define REG_MMU_VERSION 0x034
-
-#define MMU_MAJ_VER(val) ((val) >> 7)
-#define MMU_MIN_VER(val) ((val) & 0x7F)
-#define MMU_RAW_VER(reg) (((reg) >> 21) & ((1 << 11) - 1)) /* 11 bits */
-
-#define MAKE_MMU_VER(maj, min) ((((maj) & 0xF) << 7) | ((min) & 0x7F))
-
-#define REG_PB0_SADDR 0x04C
-#define REG_PB0_EADDR 0x050
-#define REG_PB1_SADDR 0x054
-#define REG_PB1_EADDR 0x058
+/* v5.x registers */
+#define REG_V5_PT_BASE_PFN 0x00C
+#define REG_V5_MMU_FLUSH_ALL 0x010
+#define REG_V5_MMU_FLUSH_ENTRY 0x014
+#define REG_V5_INT_STATUS 0x060
+#define REG_V5_INT_CLEAR 0x064
+#define REG_V5_FAULT_AR_VA 0x070
+#define REG_V5_FAULT_AW_VA 0x080
#define has_sysmmu(dev) (dev->archdata.iommu != NULL)
+static struct device *dma_dev;
static struct kmem_cache *lv2table_kmem_cache;
static sysmmu_pte_t *zero_lv2_table;
#define ZERO_LV2LINK mk_lv1ent_page(virt_to_phys(zero_lv2_table))
@@ -149,40 +159,38 @@ static sysmmu_pte_t *page_entry(sysmmu_pte_t *sent, sysmmu_iova_t iova)
lv2table_base(sent)) + lv2ent_offset(iova);
}
-enum exynos_sysmmu_inttype {
- SYSMMU_PAGEFAULT,
- SYSMMU_AR_MULTIHIT,
- SYSMMU_AW_MULTIHIT,
- SYSMMU_BUSERROR,
- SYSMMU_AR_SECURITY,
- SYSMMU_AR_ACCESS,
- SYSMMU_AW_SECURITY,
- SYSMMU_AW_PROTECTION, /* 7 */
- SYSMMU_FAULT_UNKNOWN,
- SYSMMU_FAULTS_NUM
+/*
+ * IOMMU fault information register
+ */
+struct sysmmu_fault_info {
+ unsigned int bit; /* bit number in STATUS register */
+ unsigned short addr_reg; /* register to read VA fault address */
+ const char *name; /* human readable fault name */
+ unsigned int type; /* fault type for report_iommu_fault */
};
-static unsigned short fault_reg_offset[SYSMMU_FAULTS_NUM] = {
- REG_PAGE_FAULT_ADDR,
- REG_AR_FAULT_ADDR,
- REG_AW_FAULT_ADDR,
- REG_DEFAULT_SLAVE_ADDR,
- REG_AR_FAULT_ADDR,
- REG_AR_FAULT_ADDR,
- REG_AW_FAULT_ADDR,
- REG_AW_FAULT_ADDR
+static const struct sysmmu_fault_info sysmmu_faults[] = {
+ { 0, REG_PAGE_FAULT_ADDR, "PAGE", IOMMU_FAULT_READ },
+ { 1, REG_AR_FAULT_ADDR, "AR MULTI-HIT", IOMMU_FAULT_READ },
+ { 2, REG_AW_FAULT_ADDR, "AW MULTI-HIT", IOMMU_FAULT_WRITE },
+ { 3, REG_DEFAULT_SLAVE_ADDR, "BUS ERROR", IOMMU_FAULT_READ },
+ { 4, REG_AR_FAULT_ADDR, "AR SECURITY PROTECTION", IOMMU_FAULT_READ },
+ { 5, REG_AR_FAULT_ADDR, "AR ACCESS PROTECTION", IOMMU_FAULT_READ },
+ { 6, REG_AW_FAULT_ADDR, "AW SECURITY PROTECTION", IOMMU_FAULT_WRITE },
+ { 7, REG_AW_FAULT_ADDR, "AW ACCESS PROTECTION", IOMMU_FAULT_WRITE },
};
-static char *sysmmu_fault_name[SYSMMU_FAULTS_NUM] = {
- "PAGE FAULT",
- "AR MULTI-HIT FAULT",
- "AW MULTI-HIT FAULT",
- "BUS ERROR",
- "AR SECURITY PROTECTION FAULT",
- "AR ACCESS PROTECTION FAULT",
- "AW SECURITY PROTECTION FAULT",
- "AW ACCESS PROTECTION FAULT",
- "UNKNOWN FAULT"
+static const struct sysmmu_fault_info sysmmu_v5_faults[] = {
+ { 0, REG_V5_FAULT_AR_VA, "AR PTW", IOMMU_FAULT_READ },
+ { 1, REG_V5_FAULT_AR_VA, "AR PAGE", IOMMU_FAULT_READ },
+ { 2, REG_V5_FAULT_AR_VA, "AR MULTI-HIT", IOMMU_FAULT_READ },
+ { 3, REG_V5_FAULT_AR_VA, "AR ACCESS PROTECTION", IOMMU_FAULT_READ },
+ { 4, REG_V5_FAULT_AR_VA, "AR SECURITY PROTECTION", IOMMU_FAULT_READ },
+ { 16, REG_V5_FAULT_AW_VA, "AW PTW", IOMMU_FAULT_WRITE },
+ { 17, REG_V5_FAULT_AW_VA, "AW PAGE", IOMMU_FAULT_WRITE },
+ { 18, REG_V5_FAULT_AW_VA, "AW MULTI-HIT", IOMMU_FAULT_WRITE },
+ { 19, REG_V5_FAULT_AW_VA, "AW ACCESS PROTECTION", IOMMU_FAULT_WRITE },
+ { 20, REG_V5_FAULT_AW_VA, "AW SECURITY PROTECTION", IOMMU_FAULT_WRITE },
};
/*
@@ -193,6 +201,7 @@ static char *sysmmu_fault_name[SYSMMU_FAULTS_NUM] = {
*/
struct exynos_iommu_owner {
struct list_head controllers; /* list of sysmmu_drvdata.owner_node */
+ struct iommu_domain *domain; /* domain this device is attached */
};
/*
@@ -221,6 +230,8 @@ struct sysmmu_drvdata {
struct device *master; /* master device (owner) */
void __iomem *sfrbase; /* our registers */
struct clk *clk; /* SYSMMU's clock */
+ struct clk *aclk; /* SYSMMU's aclk clock */
+ struct clk *pclk; /* SYSMMU's pclk clock */
struct clk *clk_master; /* master's device clock */
int activations; /* number of calls to sysmmu_enable */
spinlock_t lock; /* lock for modyfying state */
@@ -255,70 +266,101 @@ static bool is_sysmmu_active(struct sysmmu_drvdata *data)
return data->activations > 0;
}
-static void sysmmu_unblock(void __iomem *sfrbase)
+static void sysmmu_unblock(struct sysmmu_drvdata *data)
{
- __raw_writel(CTRL_ENABLE, sfrbase + REG_MMU_CTRL);
+ writel(CTRL_ENABLE, data->sfrbase + REG_MMU_CTRL);
}
-static bool sysmmu_block(void __iomem *sfrbase)
+static bool sysmmu_block(struct sysmmu_drvdata *data)
{
int i = 120;
- __raw_writel(CTRL_BLOCK, sfrbase + REG_MMU_CTRL);
- while ((i > 0) && !(__raw_readl(sfrbase + REG_MMU_STATUS) & 1))
+ writel(CTRL_BLOCK, data->sfrbase + REG_MMU_CTRL);
+ while ((i > 0) && !(readl(data->sfrbase + REG_MMU_STATUS) & 1))
--i;
- if (!(__raw_readl(sfrbase + REG_MMU_STATUS) & 1)) {
- sysmmu_unblock(sfrbase);
+ if (!(readl(data->sfrbase + REG_MMU_STATUS) & 1)) {
+ sysmmu_unblock(data);
return false;
}
return true;
}
-static void __sysmmu_tlb_invalidate(void __iomem *sfrbase)
+static void __sysmmu_tlb_invalidate(struct sysmmu_drvdata *data)
{
- __raw_writel(0x1, sfrbase + REG_MMU_FLUSH);
+ if (MMU_MAJ_VER(data->version) < 5)
+ writel(0x1, data->sfrbase + REG_MMU_FLUSH);
+ else
+ writel(0x1, data->sfrbase + REG_V5_MMU_FLUSH_ALL);
}
-static void __sysmmu_tlb_invalidate_entry(void __iomem *sfrbase,
+static void __sysmmu_tlb_invalidate_entry(struct sysmmu_drvdata *data,
sysmmu_iova_t iova, unsigned int num_inv)
{
unsigned int i;
for (i = 0; i < num_inv; i++) {
- __raw_writel((iova & SPAGE_MASK) | 1,
- sfrbase + REG_MMU_FLUSH_ENTRY);
+ if (MMU_MAJ_VER(data->version) < 5)
+ writel((iova & SPAGE_MASK) | 1,
+ data->sfrbase + REG_MMU_FLUSH_ENTRY);
+ else
+ writel((iova & SPAGE_MASK) | 1,
+ data->sfrbase + REG_V5_MMU_FLUSH_ENTRY);
iova += SPAGE_SIZE;
}
}
-static void __sysmmu_set_ptbase(void __iomem *sfrbase,
- phys_addr_t pgd)
+static void __sysmmu_set_ptbase(struct sysmmu_drvdata *data, phys_addr_t pgd)
{
- __raw_writel(pgd, sfrbase + REG_PT_BASE_ADDR);
+ if (MMU_MAJ_VER(data->version) < 5)
+ writel(pgd, data->sfrbase + REG_PT_BASE_ADDR);
+ else
+ writel(pgd >> PAGE_SHIFT,
+ data->sfrbase + REG_V5_PT_BASE_PFN);
- __sysmmu_tlb_invalidate(sfrbase);
+ __sysmmu_tlb_invalidate(data);
}
-static void show_fault_information(const char *name,
- enum exynos_sysmmu_inttype itype,
- phys_addr_t pgtable_base, sysmmu_iova_t fault_addr)
+static void __sysmmu_get_version(struct sysmmu_drvdata *data)
{
- sysmmu_pte_t *ent;
+ u32 ver;
+
+ clk_enable(data->clk_master);
+ clk_enable(data->clk);
+ clk_enable(data->pclk);
+ clk_enable(data->aclk);
+
+ ver = readl(data->sfrbase + REG_MMU_VERSION);
- if ((itype >= SYSMMU_FAULTS_NUM) || (itype < SYSMMU_PAGEFAULT))
- itype = SYSMMU_FAULT_UNKNOWN;
+ /* controllers on some SoCs don't report proper version */
+ if (ver == 0x80000001u)
+ data->version = MAKE_MMU_VER(1, 0);
+ else
+ data->version = MMU_RAW_VER(ver);
+
+ dev_dbg(data->sysmmu, "hardware version: %d.%d\n",
+ MMU_MAJ_VER(data->version), MMU_MIN_VER(data->version));
- pr_err("%s occurred at %#x by %s(Page table base: %pa)\n",
- sysmmu_fault_name[itype], fault_addr, name, &pgtable_base);
+ clk_disable(data->aclk);
+ clk_disable(data->pclk);
+ clk_disable(data->clk);
+ clk_disable(data->clk_master);
+}
- ent = section_entry(phys_to_virt(pgtable_base), fault_addr);
- pr_err("\tLv1 entry: %#x\n", *ent);
+static void show_fault_information(struct sysmmu_drvdata *data,
+ const struct sysmmu_fault_info *finfo,
+ sysmmu_iova_t fault_addr)
+{
+ sysmmu_pte_t *ent;
+ dev_err(data->sysmmu, "%s FAULT occurred at %#x (page table base: %pa)\n",
+ finfo->name, fault_addr, &data->pgtable);
+ ent = section_entry(phys_to_virt(data->pgtable), fault_addr);
+ dev_err(data->sysmmu, "\tLv1 entry: %#x\n", *ent);
if (lv1ent_page(ent)) {
ent = page_entry(ent, fault_addr);
- pr_err("\t Lv2 entry: %#x\n", *ent);
+ dev_err(data->sysmmu, "\t Lv2 entry: %#x\n", *ent);
}
}
@@ -326,49 +368,52 @@ static irqreturn_t exynos_sysmmu_irq(int irq, void *dev_id)
{
/* SYSMMU is in blocked state when interrupt occurred. */
struct sysmmu_drvdata *data = dev_id;
- enum exynos_sysmmu_inttype itype;
- sysmmu_iova_t addr = -1;
+ const struct sysmmu_fault_info *finfo;
+ unsigned int i, n, itype;
+ sysmmu_iova_t fault_addr = -1;
+ unsigned short reg_status, reg_clear;
int ret = -ENOSYS;
WARN_ON(!is_sysmmu_active(data));
+ if (MMU_MAJ_VER(data->version) < 5) {
+ reg_status = REG_INT_STATUS;
+ reg_clear = REG_INT_CLEAR;
+ finfo = sysmmu_faults;
+ n = ARRAY_SIZE(sysmmu_faults);
+ } else {
+ reg_status = REG_V5_INT_STATUS;
+ reg_clear = REG_V5_INT_CLEAR;
+ finfo = sysmmu_v5_faults;
+ n = ARRAY_SIZE(sysmmu_v5_faults);
+ }
+
spin_lock(&data->lock);
- if (!IS_ERR(data->clk_master))
- clk_enable(data->clk_master);
+ clk_enable(data->clk_master);
- itype = (enum exynos_sysmmu_inttype)
- __ffs(__raw_readl(data->sfrbase + REG_INT_STATUS));
- if (WARN_ON(!((itype >= 0) && (itype < SYSMMU_FAULT_UNKNOWN))))
- itype = SYSMMU_FAULT_UNKNOWN;
- else
- addr = __raw_readl(data->sfrbase + fault_reg_offset[itype]);
+ itype = __ffs(readl(data->sfrbase + reg_status));
+ for (i = 0; i < n; i++, finfo++)
+ if (finfo->bit == itype)
+ break;
+ /* unknown/unsupported fault */
+ BUG_ON(i == n);
- if (itype == SYSMMU_FAULT_UNKNOWN) {
- pr_err("%s: Fault is not occurred by System MMU '%s'!\n",
- __func__, dev_name(data->sysmmu));
- pr_err("%s: Please check if IRQ is correctly configured.\n",
- __func__);
- BUG();
- } else {
- unsigned int base =
- __raw_readl(data->sfrbase + REG_PT_BASE_ADDR);
- show_fault_information(dev_name(data->sysmmu),
- itype, base, addr);
- if (data->domain)
- ret = report_iommu_fault(&data->domain->domain,
- data->master, addr, itype);
- }
+ /* print debug message */
+ fault_addr = readl(data->sfrbase + finfo->addr_reg);
+ show_fault_information(data, finfo, fault_addr);
+ if (data->domain)
+ ret = report_iommu_fault(&data->domain->domain,
+ data->master, fault_addr, finfo->type);
/* fault is not recovered by fault handler */
BUG_ON(ret != 0);
- __raw_writel(1 << itype, data->sfrbase + REG_INT_CLEAR);
+ writel(1 << itype, data->sfrbase + reg_clear);
- sysmmu_unblock(data->sfrbase);
+ sysmmu_unblock(data);
- if (!IS_ERR(data->clk_master))
- clk_disable(data->clk_master);
+ clk_disable(data->clk_master);
spin_unlock(&data->lock);
@@ -377,15 +422,15 @@ static irqreturn_t exynos_sysmmu_irq(int irq, void *dev_id)
static void __sysmmu_disable_nocount(struct sysmmu_drvdata *data)
{
- if (!IS_ERR(data->clk_master))
- clk_enable(data->clk_master);
+ clk_enable(data->clk_master);
- __raw_writel(CTRL_DISABLE, data->sfrbase + REG_MMU_CTRL);
- __raw_writel(0, data->sfrbase + REG_MMU_CFG);
+ writel(CTRL_DISABLE, data->sfrbase + REG_MMU_CTRL);
+ writel(0, data->sfrbase + REG_MMU_CFG);
+ clk_disable(data->aclk);
+ clk_disable(data->pclk);
clk_disable(data->clk);
- if (!IS_ERR(data->clk_master))
- clk_disable(data->clk_master);
+ clk_disable(data->clk_master);
}
static bool __sysmmu_disable(struct sysmmu_drvdata *data)
@@ -416,42 +461,34 @@ static bool __sysmmu_disable(struct sysmmu_drvdata *data)
static void __sysmmu_init_config(struct sysmmu_drvdata *data)
{
- unsigned int cfg = CFG_LRU | CFG_QOS(15);
- unsigned int ver;
-
- ver = MMU_RAW_VER(__raw_readl(data->sfrbase + REG_MMU_VERSION));
- if (MMU_MAJ_VER(ver) == 3) {
- if (MMU_MIN_VER(ver) >= 2) {
- cfg |= CFG_FLPDCACHE;
- if (MMU_MIN_VER(ver) == 3) {
- cfg |= CFG_ACGEN;
- cfg &= ~CFG_LRU;
- } else {
- cfg |= CFG_SYSSEL;
- }
- }
- }
+ unsigned int cfg;
- __raw_writel(cfg, data->sfrbase + REG_MMU_CFG);
- data->version = ver;
+ if (data->version <= MAKE_MMU_VER(3, 1))
+ cfg = CFG_LRU | CFG_QOS(15);
+ else if (data->version <= MAKE_MMU_VER(3, 2))
+ cfg = CFG_LRU | CFG_QOS(15) | CFG_FLPDCACHE | CFG_SYSSEL;
+ else
+ cfg = CFG_QOS(15) | CFG_FLPDCACHE | CFG_ACGEN;
+
+ writel(cfg, data->sfrbase + REG_MMU_CFG);
}
static void __sysmmu_enable_nocount(struct sysmmu_drvdata *data)
{
- if (!IS_ERR(data->clk_master))
- clk_enable(data->clk_master);
+ clk_enable(data->clk_master);
clk_enable(data->clk);
+ clk_enable(data->pclk);
+ clk_enable(data->aclk);
- __raw_writel(CTRL_BLOCK, data->sfrbase + REG_MMU_CTRL);
+ writel(CTRL_BLOCK, data->sfrbase + REG_MMU_CTRL);
__sysmmu_init_config(data);
- __sysmmu_set_ptbase(data->sfrbase, data->pgtable);
+ __sysmmu_set_ptbase(data, data->pgtable);
- __raw_writel(CTRL_ENABLE, data->sfrbase + REG_MMU_CTRL);
+ writel(CTRL_ENABLE, data->sfrbase + REG_MMU_CTRL);
- if (!IS_ERR(data->clk_master))
- clk_disable(data->clk_master);
+ clk_disable(data->clk_master);
}
static int __sysmmu_enable(struct sysmmu_drvdata *data, phys_addr_t pgtable,
@@ -482,28 +519,21 @@ static int __sysmmu_enable(struct sysmmu_drvdata *data, phys_addr_t pgtable,
return ret;
}
-static void __sysmmu_tlb_invalidate_flpdcache(struct sysmmu_drvdata *data,
- sysmmu_iova_t iova)
-{
- if (data->version == MAKE_MMU_VER(3, 3))
- __raw_writel(iova | 0x1, data->sfrbase + REG_MMU_FLUSH_ENTRY);
-}
-
static void sysmmu_tlb_invalidate_flpdcache(struct sysmmu_drvdata *data,
sysmmu_iova_t iova)
{
unsigned long flags;
- if (!IS_ERR(data->clk_master))
- clk_enable(data->clk_master);
+ clk_enable(data->clk_master);
spin_lock_irqsave(&data->lock, flags);
- if (is_sysmmu_active(data))
- __sysmmu_tlb_invalidate_flpdcache(data, iova);
+ if (is_sysmmu_active(data)) {
+ if (data->version >= MAKE_MMU_VER(3, 3))
+ __sysmmu_tlb_invalidate_entry(data, iova, 1);
+ }
spin_unlock_irqrestore(&data->lock, flags);
- if (!IS_ERR(data->clk_master))
- clk_disable(data->clk_master);
+ clk_disable(data->clk_master);
}
static void sysmmu_tlb_invalidate_entry(struct sysmmu_drvdata *data,
@@ -515,8 +545,7 @@ static void sysmmu_tlb_invalidate_entry(struct sysmmu_drvdata *data,
if (is_sysmmu_active(data)) {
unsigned int num_inv = 1;
- if (!IS_ERR(data->clk_master))
- clk_enable(data->clk_master);
+ clk_enable(data->clk_master);
/*
* L2TLB invalidation required
@@ -531,13 +560,11 @@ static void sysmmu_tlb_invalidate_entry(struct sysmmu_drvdata *data,
if (MMU_MAJ_VER(data->version) == 2)
num_inv = min_t(unsigned int, size / PAGE_SIZE, 64);
- if (sysmmu_block(data->sfrbase)) {
- __sysmmu_tlb_invalidate_entry(
- data->sfrbase, iova, num_inv);
- sysmmu_unblock(data->sfrbase);
+ if (sysmmu_block(data)) {
+ __sysmmu_tlb_invalidate_entry(data, iova, num_inv);
+ sysmmu_unblock(data);
}
- if (!IS_ERR(data->clk_master))
- clk_disable(data->clk_master);
+ clk_disable(data->clk_master);
} else {
dev_dbg(data->master,
"disabled. Skipping TLB invalidation @ %#x\n", iova);
@@ -575,25 +602,52 @@ static int __init exynos_sysmmu_probe(struct platform_device *pdev)
}
data->clk = devm_clk_get(dev, "sysmmu");
- if (IS_ERR(data->clk)) {
- dev_err(dev, "Failed to get clock!\n");
- return PTR_ERR(data->clk);
- } else {
+ if (!IS_ERR(data->clk)) {
ret = clk_prepare(data->clk);
if (ret) {
dev_err(dev, "Failed to prepare clk\n");
return ret;
}
+ } else {
+ data->clk = NULL;
+ }
+
+ data->aclk = devm_clk_get(dev, "aclk");
+ if (!IS_ERR(data->aclk)) {
+ ret = clk_prepare(data->aclk);
+ if (ret) {
+ dev_err(dev, "Failed to prepare aclk\n");
+ return ret;
+ }
+ } else {
+ data->aclk = NULL;
+ }
+
+ data->pclk = devm_clk_get(dev, "pclk");
+ if (!IS_ERR(data->pclk)) {
+ ret = clk_prepare(data->pclk);
+ if (ret) {
+ dev_err(dev, "Failed to prepare pclk\n");
+ return ret;
+ }
+ } else {
+ data->pclk = NULL;
+ }
+
+ if (!data->clk && (!data->aclk || !data->pclk)) {
+ dev_err(dev, "Failed to get device clock(s)!\n");
+ return -ENOSYS;
}
data->clk_master = devm_clk_get(dev, "master");
if (!IS_ERR(data->clk_master)) {
ret = clk_prepare(data->clk_master);
if (ret) {
- clk_unprepare(data->clk);
dev_err(dev, "Failed to prepare master's clk\n");
return ret;
}
+ } else {
+ data->clk_master = NULL;
}
data->sysmmu = dev;
@@ -601,6 +655,14 @@ static int __init exynos_sysmmu_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, data);
+ __sysmmu_get_version(data);
+ if (PG_ENT_SHIFT < 0) {
+ if (MMU_MAJ_VER(data->version) < 5)
+ PG_ENT_SHIFT = SYSMMU_PG_ENT_SHIFT;
+ else
+ PG_ENT_SHIFT = SYSMMU_V5_PG_ENT_SHIFT;
+ }
+
pm_runtime_enable(dev);
return 0;
@@ -650,28 +712,38 @@ static struct platform_driver exynos_sysmmu_driver __refdata = {
}
};
-static inline void pgtable_flush(void *vastart, void *vaend)
+static inline void update_pte(sysmmu_pte_t *ent, sysmmu_pte_t val)
{
- dmac_flush_range(vastart, vaend);
- outer_flush_range(virt_to_phys(vastart),
- virt_to_phys(vaend));
+ dma_sync_single_for_cpu(dma_dev, virt_to_phys(ent), sizeof(*ent),
+ DMA_TO_DEVICE);
+ *ent = val;
+ dma_sync_single_for_device(dma_dev, virt_to_phys(ent), sizeof(*ent),
+ DMA_TO_DEVICE);
}
static struct iommu_domain *exynos_iommu_domain_alloc(unsigned type)
{
struct exynos_iommu_domain *domain;
+ dma_addr_t handle;
int i;
- if (type != IOMMU_DOMAIN_UNMANAGED)
- return NULL;
+ /* Check if correct PTE offsets are initialized */
+ BUG_ON(PG_ENT_SHIFT < 0 || !dma_dev);
domain = kzalloc(sizeof(*domain), GFP_KERNEL);
if (!domain)
return NULL;
+ if (type == IOMMU_DOMAIN_DMA) {
+ if (iommu_get_dma_cookie(&domain->domain) != 0)
+ goto err_pgtable;
+ } else if (type != IOMMU_DOMAIN_UNMANAGED) {
+ goto err_pgtable;
+ }
+
domain->pgtable = (sysmmu_pte_t *)__get_free_pages(GFP_KERNEL, 2);
if (!domain->pgtable)
- goto err_pgtable;
+ goto err_dma_cookie;
domain->lv2entcnt = (short *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 1);
if (!domain->lv2entcnt)
@@ -689,7 +761,10 @@ static struct iommu_domain *exynos_iommu_domain_alloc(unsigned type)
domain->pgtable[i + 7] = ZERO_LV2LINK;
}
- pgtable_flush(domain->pgtable, domain->pgtable + NUM_LV1ENTRIES);
+ handle = dma_map_single(dma_dev, domain->pgtable, LV1TABLE_SIZE,
+ DMA_TO_DEVICE);
+ /* For mapping page table entries we rely on dma == phys */
+ BUG_ON(handle != virt_to_phys(domain->pgtable));
spin_lock_init(&domain->lock);
spin_lock_init(&domain->pgtablelock);
@@ -703,6 +778,9 @@ static struct iommu_domain *exynos_iommu_domain_alloc(unsigned type)
err_counter:
free_pages((unsigned long)domain->pgtable, 2);
+err_dma_cookie:
+ if (type == IOMMU_DOMAIN_DMA)
+ iommu_put_dma_cookie(&domain->domain);
err_pgtable:
kfree(domain);
return NULL;
@@ -727,16 +805,62 @@ static void exynos_iommu_domain_free(struct iommu_domain *iommu_domain)
spin_unlock_irqrestore(&domain->lock, flags);
+ if (iommu_domain->type == IOMMU_DOMAIN_DMA)
+ iommu_put_dma_cookie(iommu_domain);
+
+ dma_unmap_single(dma_dev, virt_to_phys(domain->pgtable), LV1TABLE_SIZE,
+ DMA_TO_DEVICE);
+
for (i = 0; i < NUM_LV1ENTRIES; i++)
- if (lv1ent_page(domain->pgtable + i))
+ if (lv1ent_page(domain->pgtable + i)) {
+ phys_addr_t base = lv2table_base(domain->pgtable + i);
+
+ dma_unmap_single(dma_dev, base, LV2TABLE_SIZE,
+ DMA_TO_DEVICE);
kmem_cache_free(lv2table_kmem_cache,
- phys_to_virt(lv2table_base(domain->pgtable + i)));
+ phys_to_virt(base));
+ }
free_pages((unsigned long)domain->pgtable, 2);
free_pages((unsigned long)domain->lv2entcnt, 1);
kfree(domain);
}
+static void exynos_iommu_detach_device(struct iommu_domain *iommu_domain,
+ struct device *dev)
+{
+ struct exynos_iommu_owner *owner = dev->archdata.iommu;
+ struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
+ phys_addr_t pagetable = virt_to_phys(domain->pgtable);
+ struct sysmmu_drvdata *data, *next;
+ unsigned long flags;
+ bool found = false;
+
+ if (!has_sysmmu(dev) || owner->domain != iommu_domain)
+ return;
+
+ spin_lock_irqsave(&domain->lock, flags);
+ list_for_each_entry_safe(data, next, &domain->clients, domain_node) {
+ if (data->master == dev) {
+ if (__sysmmu_disable(data)) {
+ data->master = NULL;
+ list_del_init(&data->domain_node);
+ }
+ pm_runtime_put(data->sysmmu);
+ found = true;
+ }
+ }
+ spin_unlock_irqrestore(&domain->lock, flags);
+
+ owner->domain = NULL;
+
+ if (found)
+ dev_dbg(dev, "%s: Detached IOMMU with pgtable %pa\n",
+ __func__, &pagetable);
+ else
+ dev_err(dev, "%s: No IOMMU is attached\n", __func__);
+}
+
static int exynos_iommu_attach_device(struct iommu_domain *iommu_domain,
struct device *dev)
{
@@ -750,6 +874,9 @@ static int exynos_iommu_attach_device(struct iommu_domain *iommu_domain,
if (!has_sysmmu(dev))
return -ENODEV;
+ if (owner->domain)
+ exynos_iommu_detach_device(owner->domain, dev);
+
list_for_each_entry(data, &owner->controllers, owner_node) {
pm_runtime_get_sync(data->sysmmu);
ret = __sysmmu_enable(data, pagetable, domain);
@@ -768,44 +895,13 @@ static int exynos_iommu_attach_device(struct iommu_domain *iommu_domain,
return ret;
}
+ owner->domain = iommu_domain;
dev_dbg(dev, "%s: Attached IOMMU with pgtable %pa %s\n",
__func__, &pagetable, (ret == 0) ? "" : ", again");
return ret;
}
-static void exynos_iommu_detach_device(struct iommu_domain *iommu_domain,
- struct device *dev)
-{
- struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
- phys_addr_t pagetable = virt_to_phys(domain->pgtable);
- struct sysmmu_drvdata *data, *next;
- unsigned long flags;
- bool found = false;
-
- if (!has_sysmmu(dev))
- return;
-
- spin_lock_irqsave(&domain->lock, flags);
- list_for_each_entry_safe(data, next, &domain->clients, domain_node) {
- if (data->master == dev) {
- if (__sysmmu_disable(data)) {
- data->master = NULL;
- list_del_init(&data->domain_node);
- }
- pm_runtime_put(data->sysmmu);
- found = true;
- }
- }
- spin_unlock_irqrestore(&domain->lock, flags);
-
- if (found)
- dev_dbg(dev, "%s: Detached IOMMU with pgtable %pa\n",
- __func__, &pagetable);
- else
- dev_err(dev, "%s: No IOMMU is attached\n", __func__);
-}
-
static sysmmu_pte_t *alloc_lv2entry(struct exynos_iommu_domain *domain,
sysmmu_pte_t *sent, sysmmu_iova_t iova, short *pgcounter)
{
@@ -819,15 +915,14 @@ static sysmmu_pte_t *alloc_lv2entry(struct exynos_iommu_domain *domain,
bool need_flush_flpd_cache = lv1ent_zero(sent);
pent = kmem_cache_zalloc(lv2table_kmem_cache, GFP_ATOMIC);
- BUG_ON((unsigned int)pent & (LV2TABLE_SIZE - 1));
+ BUG_ON((uintptr_t)pent & (LV2TABLE_SIZE - 1));
if (!pent)
return ERR_PTR(-ENOMEM);
- *sent = mk_lv1ent_page(virt_to_phys(pent));
+ update_pte(sent, mk_lv1ent_page(virt_to_phys(pent)));
kmemleak_ignore(pent);
*pgcounter = NUM_LV2ENTRIES;
- pgtable_flush(pent, pent + NUM_LV2ENTRIES);
- pgtable_flush(sent, sent + 1);
+ dma_map_single(dma_dev, pent, LV2TABLE_SIZE, DMA_TO_DEVICE);
/*
* If pre-fetched SLPD is a faulty SLPD in zero_l2_table,
@@ -880,9 +975,7 @@ static int lv1set_section(struct exynos_iommu_domain *domain,
*pgcnt = 0;
}
- *sent = mk_lv1ent_sect(paddr);
-
- pgtable_flush(sent, sent + 1);
+ update_pte(sent, mk_lv1ent_sect(paddr));
spin_lock(&domain->lock);
if (lv1ent_page_zero(sent)) {
@@ -906,12 +999,15 @@ static int lv2set_page(sysmmu_pte_t *pent, phys_addr_t paddr, size_t size,
if (WARN_ON(!lv2ent_fault(pent)))
return -EADDRINUSE;
- *pent = mk_lv2ent_spage(paddr);
- pgtable_flush(pent, pent + 1);
+ update_pte(pent, mk_lv2ent_spage(paddr));
*pgcnt -= 1;
} else { /* size == LPAGE_SIZE */
int i;
+ dma_addr_t pent_base = virt_to_phys(pent);
+ dma_sync_single_for_cpu(dma_dev, pent_base,
+ sizeof(*pent) * SPAGES_PER_LPAGE,
+ DMA_TO_DEVICE);
for (i = 0; i < SPAGES_PER_LPAGE; i++, pent++) {
if (WARN_ON(!lv2ent_fault(pent))) {
if (i > 0)
@@ -921,7 +1017,9 @@ static int lv2set_page(sysmmu_pte_t *pent, phys_addr_t paddr, size_t size,
*pent = mk_lv2ent_lpage(paddr);
}
- pgtable_flush(pent - SPAGES_PER_LPAGE, pent);
+ dma_sync_single_for_device(dma_dev, pent_base,
+ sizeof(*pent) * SPAGES_PER_LPAGE,
+ DMA_TO_DEVICE);
*pgcnt -= SPAGES_PER_LPAGE;
}
@@ -1031,8 +1129,7 @@ static size_t exynos_iommu_unmap(struct iommu_domain *iommu_domain,
}
/* workaround for h/w bug in System MMU v3.3 */
- *ent = ZERO_LV2LINK;
- pgtable_flush(ent, ent + 1);
+ update_pte(ent, ZERO_LV2LINK);
size = SECT_SIZE;
goto done;
}
@@ -1053,9 +1150,8 @@ static size_t exynos_iommu_unmap(struct iommu_domain *iommu_domain,
}
if (lv2ent_small(ent)) {
- *ent = 0;
+ update_pte(ent, 0);
size = SPAGE_SIZE;
- pgtable_flush(ent, ent + 1);
domain->lv2entcnt[lv1ent_offset(iova)] += 1;
goto done;
}
@@ -1066,9 +1162,13 @@ static size_t exynos_iommu_unmap(struct iommu_domain *iommu_domain,
goto err;
}
+ dma_sync_single_for_cpu(dma_dev, virt_to_phys(ent),
+ sizeof(*ent) * SPAGES_PER_LPAGE,
+ DMA_TO_DEVICE);
memset(ent, 0, sizeof(*ent) * SPAGES_PER_LPAGE);
- pgtable_flush(ent, ent + SPAGES_PER_LPAGE);
-
+ dma_sync_single_for_device(dma_dev, virt_to_phys(ent),
+ sizeof(*ent) * SPAGES_PER_LPAGE,
+ DMA_TO_DEVICE);
size = LPAGE_SIZE;
domain->lv2entcnt[lv1ent_offset(iova)] += SPAGES_PER_LPAGE;
done:
@@ -1114,28 +1214,32 @@ static phys_addr_t exynos_iommu_iova_to_phys(struct iommu_domain *iommu_domain,
return phys;
}
+static struct iommu_group *get_device_iommu_group(struct device *dev)
+{
+ struct iommu_group *group;
+
+ group = iommu_group_get(dev);
+ if (!group)
+ group = iommu_group_alloc();
+
+ return group;
+}
+
static int exynos_iommu_add_device(struct device *dev)
{
struct iommu_group *group;
- int ret;
if (!has_sysmmu(dev))
return -ENODEV;
- group = iommu_group_get(dev);
+ group = iommu_group_get_for_dev(dev);
- if (!group) {
- group = iommu_group_alloc();
- if (IS_ERR(group)) {
- dev_err(dev, "Failed to allocate IOMMU group\n");
- return PTR_ERR(group);
- }
- }
+ if (IS_ERR(group))
+ return PTR_ERR(group);
- ret = iommu_group_add_device(group, dev);
iommu_group_put(group);
- return ret;
+ return 0;
}
static void exynos_iommu_remove_device(struct device *dev)
@@ -1182,6 +1286,7 @@ static struct iommu_ops exynos_iommu_ops = {
.unmap = exynos_iommu_unmap,
.map_sg = default_iommu_map_sg,
.iova_to_phys = exynos_iommu_iova_to_phys,
+ .device_group = get_device_iommu_group,
.add_device = exynos_iommu_add_device,
.remove_device = exynos_iommu_remove_device,
.pgsize_bitmap = SECT_SIZE | LPAGE_SIZE | SPAGE_SIZE,
@@ -1245,6 +1350,13 @@ static int __init exynos_iommu_of_setup(struct device_node *np)
if (IS_ERR(pdev))
return PTR_ERR(pdev);
+ /*
+ * use the first registered sysmmu device for performing
+ * dma mapping operations on iommu page tables (cpu cache flush)
+ */
+ if (!dma_dev)
+ dma_dev = &pdev->dev;
+
of_iommu_set_ops(np, &exynos_iommu_ops);
return 0;
}
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index a2e1b7f14df2..e1852e845d21 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -2458,7 +2458,7 @@ static struct dmar_domain *get_domain_for_dev(struct device *dev, int gaw)
}
/* register PCI DMA alias device */
- if (req_id != dma_alias && dev_is_pci(dev)) {
+ if (dev_is_pci(dev) && req_id != dma_alias) {
tmp = dmar_insert_one_dev_info(iommu, PCI_BUS_NUM(dma_alias),
dma_alias & 0xff, NULL, domain);
diff --git a/drivers/iommu/io-pgtable-arm-v7s.c b/drivers/iommu/io-pgtable-arm-v7s.c
new file mode 100644
index 000000000000..9488e3c97bcb
--- /dev/null
+++ b/drivers/iommu/io-pgtable-arm-v7s.c
@@ -0,0 +1,846 @@
+/*
+ * CPU-agnostic ARM page table allocator.
+ *
+ * ARMv7 Short-descriptor format, supporting
+ * - Basic memory attributes
+ * - Simplified access permissions (AP[2:1] model)
+ * - Backwards-compatible TEX remap
+ * - Large pages/supersections (if indicated by the caller)
+ *
+ * Not supporting:
+ * - Legacy access permissions (AP[2:0] model)
+ *
+ * Almost certainly never supporting:
+ * - PXN
+ * - Domains
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * Copyright (C) 2014-2015 ARM Limited
+ * Copyright (c) 2014-2015 MediaTek Inc.
+ */
+
+#define pr_fmt(fmt) "arm-v7s io-pgtable: " fmt
+
+#include <linux/dma-mapping.h>
+#include <linux/gfp.h>
+#include <linux/iommu.h>
+#include <linux/kernel.h>
+#include <linux/kmemleak.h>
+#include <linux/sizes.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
+#include <asm/barrier.h>
+
+#include "io-pgtable.h"
+
+/* Struct accessors */
+#define io_pgtable_to_data(x) \
+ container_of((x), struct arm_v7s_io_pgtable, iop)
+
+#define io_pgtable_ops_to_data(x) \
+ io_pgtable_to_data(io_pgtable_ops_to_pgtable(x))
+
+/*
+ * We have 32 bits total; 12 bits resolved at level 1, 8 bits at level 2,
+ * and 12 bits in a page. With some carefully-chosen coefficients we can
+ * hide the ugly inconsistencies behind these macros and at least let the
+ * rest of the code pretend to be somewhat sane.
+ */
+#define ARM_V7S_ADDR_BITS 32
+#define _ARM_V7S_LVL_BITS(lvl) (16 - (lvl) * 4)
+#define ARM_V7S_LVL_SHIFT(lvl) (ARM_V7S_ADDR_BITS - (4 + 8 * (lvl)))
+#define ARM_V7S_TABLE_SHIFT 10
+
+#define ARM_V7S_PTES_PER_LVL(lvl) (1 << _ARM_V7S_LVL_BITS(lvl))
+#define ARM_V7S_TABLE_SIZE(lvl) \
+ (ARM_V7S_PTES_PER_LVL(lvl) * sizeof(arm_v7s_iopte))
+
+#define ARM_V7S_BLOCK_SIZE(lvl) (1UL << ARM_V7S_LVL_SHIFT(lvl))
+#define ARM_V7S_LVL_MASK(lvl) ((u32)(~0U << ARM_V7S_LVL_SHIFT(lvl)))
+#define ARM_V7S_TABLE_MASK ((u32)(~0U << ARM_V7S_TABLE_SHIFT))
+#define _ARM_V7S_IDX_MASK(lvl) (ARM_V7S_PTES_PER_LVL(lvl) - 1)
+#define ARM_V7S_LVL_IDX(addr, lvl) ({ \
+ int _l = lvl; \
+ ((u32)(addr) >> ARM_V7S_LVL_SHIFT(_l)) & _ARM_V7S_IDX_MASK(_l); \
+})
+
+/*
+ * Large page/supersection entries are effectively a block of 16 page/section
+ * entries, along the lines of the LPAE contiguous hint, but all with the
+ * same output address. For want of a better common name we'll call them
+ * "contiguous" versions of their respective page/section entries here, but
+ * noting the distinction (WRT to TLB maintenance) that they represent *one*
+ * entry repeated 16 times, not 16 separate entries (as in the LPAE case).
+ */
+#define ARM_V7S_CONT_PAGES 16
+
+/* PTE type bits: these are all mixed up with XN/PXN bits in most cases */
+#define ARM_V7S_PTE_TYPE_TABLE 0x1
+#define ARM_V7S_PTE_TYPE_PAGE 0x2
+#define ARM_V7S_PTE_TYPE_CONT_PAGE 0x1
+
+#define ARM_V7S_PTE_IS_VALID(pte) (((pte) & 0x3) != 0)
+#define ARM_V7S_PTE_IS_TABLE(pte, lvl) (lvl == 1 && ((pte) & ARM_V7S_PTE_TYPE_TABLE))
+
+/* Page table bits */
+#define ARM_V7S_ATTR_XN(lvl) BIT(4 * (2 - (lvl)))
+#define ARM_V7S_ATTR_B BIT(2)
+#define ARM_V7S_ATTR_C BIT(3)
+#define ARM_V7S_ATTR_NS_TABLE BIT(3)
+#define ARM_V7S_ATTR_NS_SECTION BIT(19)
+
+#define ARM_V7S_CONT_SECTION BIT(18)
+#define ARM_V7S_CONT_PAGE_XN_SHIFT 15
+
+/*
+ * The attribute bits are consistently ordered*, but occupy bits [17:10] of
+ * a level 1 PTE vs. bits [11:4] at level 2. Thus we define the individual
+ * fields relative to that 8-bit block, plus a total shift relative to the PTE.
+ */
+#define ARM_V7S_ATTR_SHIFT(lvl) (16 - (lvl) * 6)
+
+#define ARM_V7S_ATTR_MASK 0xff
+#define ARM_V7S_ATTR_AP0 BIT(0)
+#define ARM_V7S_ATTR_AP1 BIT(1)
+#define ARM_V7S_ATTR_AP2 BIT(5)
+#define ARM_V7S_ATTR_S BIT(6)
+#define ARM_V7S_ATTR_NG BIT(7)
+#define ARM_V7S_TEX_SHIFT 2
+#define ARM_V7S_TEX_MASK 0x7
+#define ARM_V7S_ATTR_TEX(val) (((val) & ARM_V7S_TEX_MASK) << ARM_V7S_TEX_SHIFT)
+
+/* *well, except for TEX on level 2 large pages, of course :( */
+#define ARM_V7S_CONT_PAGE_TEX_SHIFT 6
+#define ARM_V7S_CONT_PAGE_TEX_MASK (ARM_V7S_TEX_MASK << ARM_V7S_CONT_PAGE_TEX_SHIFT)
+
+/* Simplified access permissions */
+#define ARM_V7S_PTE_AF ARM_V7S_ATTR_AP0
+#define ARM_V7S_PTE_AP_UNPRIV ARM_V7S_ATTR_AP1
+#define ARM_V7S_PTE_AP_RDONLY ARM_V7S_ATTR_AP2
+
+/* Register bits */
+#define ARM_V7S_RGN_NC 0
+#define ARM_V7S_RGN_WBWA 1
+#define ARM_V7S_RGN_WT 2
+#define ARM_V7S_RGN_WB 3
+
+#define ARM_V7S_PRRR_TYPE_DEVICE 1
+#define ARM_V7S_PRRR_TYPE_NORMAL 2
+#define ARM_V7S_PRRR_TR(n, type) (((type) & 0x3) << ((n) * 2))
+#define ARM_V7S_PRRR_DS0 BIT(16)
+#define ARM_V7S_PRRR_DS1 BIT(17)
+#define ARM_V7S_PRRR_NS0 BIT(18)
+#define ARM_V7S_PRRR_NS1 BIT(19)
+#define ARM_V7S_PRRR_NOS(n) BIT((n) + 24)
+
+#define ARM_V7S_NMRR_IR(n, attr) (((attr) & 0x3) << ((n) * 2))
+#define ARM_V7S_NMRR_OR(n, attr) (((attr) & 0x3) << ((n) * 2 + 16))
+
+#define ARM_V7S_TTBR_S BIT(1)
+#define ARM_V7S_TTBR_NOS BIT(5)
+#define ARM_V7S_TTBR_ORGN_ATTR(attr) (((attr) & 0x3) << 3)
+#define ARM_V7S_TTBR_IRGN_ATTR(attr) \
+ ((((attr) & 0x1) << 6) | (((attr) & 0x2) >> 1))
+
+#define ARM_V7S_TCR_PD1 BIT(5)
+
+typedef u32 arm_v7s_iopte;
+
+static bool selftest_running;
+
+struct arm_v7s_io_pgtable {
+ struct io_pgtable iop;
+
+ arm_v7s_iopte *pgd;
+ struct kmem_cache *l2_tables;
+};
+
+static dma_addr_t __arm_v7s_dma_addr(void *pages)
+{
+ return (dma_addr_t)virt_to_phys(pages);
+}
+
+static arm_v7s_iopte *iopte_deref(arm_v7s_iopte pte, int lvl)
+{
+ if (ARM_V7S_PTE_IS_TABLE(pte, lvl))
+ pte &= ARM_V7S_TABLE_MASK;
+ else
+ pte &= ARM_V7S_LVL_MASK(lvl);
+ return phys_to_virt(pte);
+}
+
+static void *__arm_v7s_alloc_table(int lvl, gfp_t gfp,
+ struct arm_v7s_io_pgtable *data)
+{
+ struct device *dev = data->iop.cfg.iommu_dev;
+ dma_addr_t dma;
+ size_t size = ARM_V7S_TABLE_SIZE(lvl);
+ void *table = NULL;
+
+ if (lvl == 1)
+ table = (void *)__get_dma_pages(__GFP_ZERO, get_order(size));
+ else if (lvl == 2)
+ table = kmem_cache_zalloc(data->l2_tables, gfp | GFP_DMA);
+ if (table && !selftest_running) {
+ dma = dma_map_single(dev, table, size, DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, dma))
+ goto out_free;
+ /*
+ * We depend on the IOMMU being able to work with any physical
+ * address directly, so if the DMA layer suggests otherwise by
+ * translating or truncating them, that bodes very badly...
+ */
+ if (dma != virt_to_phys(table))
+ goto out_unmap;
+ }
+ kmemleak_ignore(table);
+ return table;
+
+out_unmap:
+ dev_err(dev, "Cannot accommodate DMA translation for IOMMU page tables\n");
+ dma_unmap_single(dev, dma, size, DMA_TO_DEVICE);
+out_free:
+ if (lvl == 1)
+ free_pages((unsigned long)table, get_order(size));
+ else
+ kmem_cache_free(data->l2_tables, table);
+ return NULL;
+}
+
+static void __arm_v7s_free_table(void *table, int lvl,
+ struct arm_v7s_io_pgtable *data)
+{
+ struct device *dev = data->iop.cfg.iommu_dev;
+ size_t size = ARM_V7S_TABLE_SIZE(lvl);
+
+ if (!selftest_running)
+ dma_unmap_single(dev, __arm_v7s_dma_addr(table), size,
+ DMA_TO_DEVICE);
+ if (lvl == 1)
+ free_pages((unsigned long)table, get_order(size));
+ else
+ kmem_cache_free(data->l2_tables, table);
+}
+
+static void __arm_v7s_pte_sync(arm_v7s_iopte *ptep, int num_entries,
+ struct io_pgtable_cfg *cfg)
+{
+ if (selftest_running)
+ return;
+
+ dma_sync_single_for_device(cfg->iommu_dev, __arm_v7s_dma_addr(ptep),
+ num_entries * sizeof(*ptep), DMA_TO_DEVICE);
+}
+static void __arm_v7s_set_pte(arm_v7s_iopte *ptep, arm_v7s_iopte pte,
+ int num_entries, struct io_pgtable_cfg *cfg)
+{
+ int i;
+
+ for (i = 0; i < num_entries; i++)
+ ptep[i] = pte;
+
+ __arm_v7s_pte_sync(ptep, num_entries, cfg);
+}
+
+static arm_v7s_iopte arm_v7s_prot_to_pte(int prot, int lvl,
+ struct io_pgtable_cfg *cfg)
+{
+ bool ap = !(cfg->quirks & IO_PGTABLE_QUIRK_NO_PERMS);
+ arm_v7s_iopte pte = ARM_V7S_ATTR_NG | ARM_V7S_ATTR_S |
+ ARM_V7S_ATTR_TEX(1);
+
+ if (ap) {
+ pte |= ARM_V7S_PTE_AF | ARM_V7S_PTE_AP_UNPRIV;
+ if (!(prot & IOMMU_WRITE))
+ pte |= ARM_V7S_PTE_AP_RDONLY;
+ }
+ pte <<= ARM_V7S_ATTR_SHIFT(lvl);
+
+ if ((prot & IOMMU_NOEXEC) && ap)
+ pte |= ARM_V7S_ATTR_XN(lvl);
+ if (prot & IOMMU_CACHE)
+ pte |= ARM_V7S_ATTR_B | ARM_V7S_ATTR_C;
+
+ return pte;
+}
+
+static int arm_v7s_pte_to_prot(arm_v7s_iopte pte, int lvl)
+{
+ int prot = IOMMU_READ;
+
+ if (pte & (ARM_V7S_PTE_AP_RDONLY << ARM_V7S_ATTR_SHIFT(lvl)))
+ prot |= IOMMU_WRITE;
+ if (pte & ARM_V7S_ATTR_C)
+ prot |= IOMMU_CACHE;
+
+ return prot;
+}
+
+static arm_v7s_iopte arm_v7s_pte_to_cont(arm_v7s_iopte pte, int lvl)
+{
+ if (lvl == 1) {
+ pte |= ARM_V7S_CONT_SECTION;
+ } else if (lvl == 2) {
+ arm_v7s_iopte xn = pte & ARM_V7S_ATTR_XN(lvl);
+ arm_v7s_iopte tex = pte & ARM_V7S_CONT_PAGE_TEX_MASK;
+
+ pte ^= xn | tex | ARM_V7S_PTE_TYPE_PAGE;
+ pte |= (xn << ARM_V7S_CONT_PAGE_XN_SHIFT) |
+ (tex << ARM_V7S_CONT_PAGE_TEX_SHIFT) |
+ ARM_V7S_PTE_TYPE_CONT_PAGE;
+ }
+ return pte;
+}
+
+static arm_v7s_iopte arm_v7s_cont_to_pte(arm_v7s_iopte pte, int lvl)
+{
+ if (lvl == 1) {
+ pte &= ~ARM_V7S_CONT_SECTION;
+ } else if (lvl == 2) {
+ arm_v7s_iopte xn = pte & BIT(ARM_V7S_CONT_PAGE_XN_SHIFT);
+ arm_v7s_iopte tex = pte & (ARM_V7S_CONT_PAGE_TEX_MASK <<
+ ARM_V7S_CONT_PAGE_TEX_SHIFT);
+
+ pte ^= xn | tex | ARM_V7S_PTE_TYPE_CONT_PAGE;
+ pte |= (xn >> ARM_V7S_CONT_PAGE_XN_SHIFT) |
+ (tex >> ARM_V7S_CONT_PAGE_TEX_SHIFT) |
+ ARM_V7S_PTE_TYPE_PAGE;
+ }
+ return pte;
+}
+
+static bool arm_v7s_pte_is_cont(arm_v7s_iopte pte, int lvl)
+{
+ if (lvl == 1 && !ARM_V7S_PTE_IS_TABLE(pte, lvl))
+ return pte & ARM_V7S_CONT_SECTION;
+ else if (lvl == 2)
+ return !(pte & ARM_V7S_PTE_TYPE_PAGE);
+ return false;
+}
+
+static int __arm_v7s_unmap(struct arm_v7s_io_pgtable *, unsigned long,
+ size_t, int, arm_v7s_iopte *);
+
+static int arm_v7s_init_pte(struct arm_v7s_io_pgtable *data,
+ unsigned long iova, phys_addr_t paddr, int prot,
+ int lvl, int num_entries, arm_v7s_iopte *ptep)
+{
+ struct io_pgtable_cfg *cfg = &data->iop.cfg;
+ arm_v7s_iopte pte = arm_v7s_prot_to_pte(prot, lvl, cfg);
+ int i;
+
+ for (i = 0; i < num_entries; i++)
+ if (ARM_V7S_PTE_IS_TABLE(ptep[i], lvl)) {
+ /*
+ * We need to unmap and free the old table before
+ * overwriting it with a block entry.
+ */
+ arm_v7s_iopte *tblp;
+ size_t sz = ARM_V7S_BLOCK_SIZE(lvl);
+
+ tblp = ptep - ARM_V7S_LVL_IDX(iova, lvl);
+ if (WARN_ON(__arm_v7s_unmap(data, iova + i * sz,
+ sz, lvl, tblp) != sz))
+ return -EINVAL;
+ } else if (ptep[i]) {
+ /* We require an unmap first */
+ WARN_ON(!selftest_running);
+ return -EEXIST;
+ }
+
+ pte |= ARM_V7S_PTE_TYPE_PAGE;
+ if (lvl == 1 && (cfg->quirks & IO_PGTABLE_QUIRK_ARM_NS))
+ pte |= ARM_V7S_ATTR_NS_SECTION;
+
+ if (num_entries > 1)
+ pte = arm_v7s_pte_to_cont(pte, lvl);
+
+ pte |= paddr & ARM_V7S_LVL_MASK(lvl);
+
+ __arm_v7s_set_pte(ptep, pte, num_entries, cfg);
+ return 0;
+}
+
+static int __arm_v7s_map(struct arm_v7s_io_pgtable *data, unsigned long iova,
+ phys_addr_t paddr, size_t size, int prot,
+ int lvl, arm_v7s_iopte *ptep)
+{
+ struct io_pgtable_cfg *cfg = &data->iop.cfg;
+ arm_v7s_iopte pte, *cptep;
+ int num_entries = size >> ARM_V7S_LVL_SHIFT(lvl);
+
+ /* Find our entry at the current level */
+ ptep += ARM_V7S_LVL_IDX(iova, lvl);
+
+ /* If we can install a leaf entry at this level, then do so */
+ if (num_entries)
+ return arm_v7s_init_pte(data, iova, paddr, prot,
+ lvl, num_entries, ptep);
+
+ /* We can't allocate tables at the final level */
+ if (WARN_ON(lvl == 2))
+ return -EINVAL;
+
+ /* Grab a pointer to the next level */
+ pte = *ptep;
+ if (!pte) {
+ cptep = __arm_v7s_alloc_table(lvl + 1, GFP_ATOMIC, data);
+ if (!cptep)
+ return -ENOMEM;
+
+ pte = virt_to_phys(cptep) | ARM_V7S_PTE_TYPE_TABLE;
+ if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_NS)
+ pte |= ARM_V7S_ATTR_NS_TABLE;
+
+ __arm_v7s_set_pte(ptep, pte, 1, cfg);
+ } else {
+ cptep = iopte_deref(pte, lvl);
+ }
+
+ /* Rinse, repeat */
+ return __arm_v7s_map(data, iova, paddr, size, prot, lvl + 1, cptep);
+}
+
+static int arm_v7s_map(struct io_pgtable_ops *ops, unsigned long iova,
+ phys_addr_t paddr, size_t size, int prot)
+{
+ struct arm_v7s_io_pgtable *data = io_pgtable_ops_to_data(ops);
+ struct io_pgtable *iop = &data->iop;
+ int ret;
+
+ /* If no access, then nothing to do */
+ if (!(prot & (IOMMU_READ | IOMMU_WRITE)))
+ return 0;
+
+ ret = __arm_v7s_map(data, iova, paddr, size, prot, 1, data->pgd);
+ /*
+ * Synchronise all PTE updates for the new mapping before there's
+ * a chance for anything to kick off a table walk for the new iova.
+ */
+ if (iop->cfg.quirks & IO_PGTABLE_QUIRK_TLBI_ON_MAP) {
+ io_pgtable_tlb_add_flush(iop, iova, size,
+ ARM_V7S_BLOCK_SIZE(2), false);
+ io_pgtable_tlb_sync(iop);
+ } else {
+ wmb();
+ }
+
+ return ret;
+}
+
+static void arm_v7s_free_pgtable(struct io_pgtable *iop)
+{
+ struct arm_v7s_io_pgtable *data = io_pgtable_to_data(iop);
+ int i;
+
+ for (i = 0; i < ARM_V7S_PTES_PER_LVL(1); i++) {
+ arm_v7s_iopte pte = data->pgd[i];
+
+ if (ARM_V7S_PTE_IS_TABLE(pte, 1))
+ __arm_v7s_free_table(iopte_deref(pte, 1), 2, data);
+ }
+ __arm_v7s_free_table(data->pgd, 1, data);
+ kmem_cache_destroy(data->l2_tables);
+ kfree(data);
+}
+
+static void arm_v7s_split_cont(struct arm_v7s_io_pgtable *data,
+ unsigned long iova, int idx, int lvl,
+ arm_v7s_iopte *ptep)
+{
+ struct io_pgtable *iop = &data->iop;
+ arm_v7s_iopte pte;
+ size_t size = ARM_V7S_BLOCK_SIZE(lvl);
+ int i;
+
+ ptep -= idx & (ARM_V7S_CONT_PAGES - 1);
+ pte = arm_v7s_cont_to_pte(*ptep, lvl);
+ for (i = 0; i < ARM_V7S_CONT_PAGES; i++) {
+ ptep[i] = pte;
+ pte += size;
+ }
+
+ __arm_v7s_pte_sync(ptep, ARM_V7S_CONT_PAGES, &iop->cfg);
+
+ size *= ARM_V7S_CONT_PAGES;
+ io_pgtable_tlb_add_flush(iop, iova, size, size, true);
+ io_pgtable_tlb_sync(iop);
+}
+
+static int arm_v7s_split_blk_unmap(struct arm_v7s_io_pgtable *data,
+ unsigned long iova, size_t size,
+ arm_v7s_iopte *ptep)
+{
+ unsigned long blk_start, blk_end, blk_size;
+ phys_addr_t blk_paddr;
+ arm_v7s_iopte table = 0;
+ int prot = arm_v7s_pte_to_prot(*ptep, 1);
+
+ blk_size = ARM_V7S_BLOCK_SIZE(1);
+ blk_start = iova & ARM_V7S_LVL_MASK(1);
+ blk_end = blk_start + ARM_V7S_BLOCK_SIZE(1);
+ blk_paddr = *ptep & ARM_V7S_LVL_MASK(1);
+
+ for (; blk_start < blk_end; blk_start += size, blk_paddr += size) {
+ arm_v7s_iopte *tablep;
+
+ /* Unmap! */
+ if (blk_start == iova)
+ continue;
+
+ /* __arm_v7s_map expects a pointer to the start of the table */
+ tablep = &table - ARM_V7S_LVL_IDX(blk_start, 1);
+ if (__arm_v7s_map(data, blk_start, blk_paddr, size, prot, 1,
+ tablep) < 0) {
+ if (table) {
+ /* Free the table we allocated */
+ tablep = iopte_deref(table, 1);
+ __arm_v7s_free_table(tablep, 2, data);
+ }
+ return 0; /* Bytes unmapped */
+ }
+ }
+
+ __arm_v7s_set_pte(ptep, table, 1, &data->iop.cfg);
+ iova &= ~(blk_size - 1);
+ io_pgtable_tlb_add_flush(&data->iop, iova, blk_size, blk_size, true);
+ return size;
+}
+
+static int __arm_v7s_unmap(struct arm_v7s_io_pgtable *data,
+ unsigned long iova, size_t size, int lvl,
+ arm_v7s_iopte *ptep)
+{
+ arm_v7s_iopte pte[ARM_V7S_CONT_PAGES];
+ struct io_pgtable *iop = &data->iop;
+ int idx, i = 0, num_entries = size >> ARM_V7S_LVL_SHIFT(lvl);
+
+ /* Something went horribly wrong and we ran out of page table */
+ if (WARN_ON(lvl > 2))
+ return 0;
+
+ idx = ARM_V7S_LVL_IDX(iova, lvl);
+ ptep += idx;
+ do {
+ if (WARN_ON(!ARM_V7S_PTE_IS_VALID(ptep[i])))
+ return 0;
+ pte[i] = ptep[i];
+ } while (++i < num_entries);
+
+ /*
+ * If we've hit a contiguous 'large page' entry at this level, it
+ * needs splitting first, unless we're unmapping the whole lot.
+ */
+ if (num_entries <= 1 && arm_v7s_pte_is_cont(pte[0], lvl))
+ arm_v7s_split_cont(data, iova, idx, lvl, ptep);
+
+ /* If the size matches this level, we're in the right place */
+ if (num_entries) {
+ size_t blk_size = ARM_V7S_BLOCK_SIZE(lvl);
+
+ __arm_v7s_set_pte(ptep, 0, num_entries, &iop->cfg);
+
+ for (i = 0; i < num_entries; i++) {
+ if (ARM_V7S_PTE_IS_TABLE(pte[i], lvl)) {
+ /* Also flush any partial walks */
+ io_pgtable_tlb_add_flush(iop, iova, blk_size,
+ ARM_V7S_BLOCK_SIZE(lvl + 1), false);
+ io_pgtable_tlb_sync(iop);
+ ptep = iopte_deref(pte[i], lvl);
+ __arm_v7s_free_table(ptep, lvl + 1, data);
+ } else {
+ io_pgtable_tlb_add_flush(iop, iova, blk_size,
+ blk_size, true);
+ }
+ iova += blk_size;
+ }
+ return size;
+ } else if (lvl == 1 && !ARM_V7S_PTE_IS_TABLE(pte[0], lvl)) {
+ /*
+ * Insert a table at the next level to map the old region,
+ * minus the part we want to unmap
+ */
+ return arm_v7s_split_blk_unmap(data, iova, size, ptep);
+ }
+
+ /* Keep on walkin' */
+ ptep = iopte_deref(pte[0], lvl);
+ return __arm_v7s_unmap(data, iova, size, lvl + 1, ptep);
+}
+
+static int arm_v7s_unmap(struct io_pgtable_ops *ops, unsigned long iova,
+ size_t size)
+{
+ struct arm_v7s_io_pgtable *data = io_pgtable_ops_to_data(ops);
+ size_t unmapped;
+
+ unmapped = __arm_v7s_unmap(data, iova, size, 1, data->pgd);
+ if (unmapped)
+ io_pgtable_tlb_sync(&data->iop);
+
+ return unmapped;
+}
+
+static phys_addr_t arm_v7s_iova_to_phys(struct io_pgtable_ops *ops,
+ unsigned long iova)
+{
+ struct arm_v7s_io_pgtable *data = io_pgtable_ops_to_data(ops);
+ arm_v7s_iopte *ptep = data->pgd, pte;
+ int lvl = 0;
+ u32 mask;
+
+ do {
+ pte = ptep[ARM_V7S_LVL_IDX(iova, ++lvl)];
+ ptep = iopte_deref(pte, lvl);
+ } while (ARM_V7S_PTE_IS_TABLE(pte, lvl));
+
+ if (!ARM_V7S_PTE_IS_VALID(pte))
+ return 0;
+
+ mask = ARM_V7S_LVL_MASK(lvl);
+ if (arm_v7s_pte_is_cont(pte, lvl))
+ mask *= ARM_V7S_CONT_PAGES;
+ return (pte & mask) | (iova & ~mask);
+}
+
+static struct io_pgtable *arm_v7s_alloc_pgtable(struct io_pgtable_cfg *cfg,
+ void *cookie)
+{
+ struct arm_v7s_io_pgtable *data;
+
+ if (cfg->ias > ARM_V7S_ADDR_BITS || cfg->oas > ARM_V7S_ADDR_BITS)
+ return NULL;
+
+ if (cfg->quirks & ~(IO_PGTABLE_QUIRK_ARM_NS |
+ IO_PGTABLE_QUIRK_NO_PERMS |
+ IO_PGTABLE_QUIRK_TLBI_ON_MAP))
+ return NULL;
+
+ data = kmalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return NULL;
+
+ data->l2_tables = kmem_cache_create("io-pgtable_armv7s_l2",
+ ARM_V7S_TABLE_SIZE(2),
+ ARM_V7S_TABLE_SIZE(2),
+ SLAB_CACHE_DMA, NULL);
+ if (!data->l2_tables)
+ goto out_free_data;
+
+ data->iop.ops = (struct io_pgtable_ops) {
+ .map = arm_v7s_map,
+ .unmap = arm_v7s_unmap,
+ .iova_to_phys = arm_v7s_iova_to_phys,
+ };
+
+ /* We have to do this early for __arm_v7s_alloc_table to work... */
+ data->iop.cfg = *cfg;
+
+ /*
+ * Unless the IOMMU driver indicates supersection support by
+ * having SZ_16M set in the initial bitmap, they won't be used.
+ */
+ cfg->pgsize_bitmap &= SZ_4K | SZ_64K | SZ_1M | SZ_16M;
+
+ /* TCR: T0SZ=0, disable TTBR1 */
+ cfg->arm_v7s_cfg.tcr = ARM_V7S_TCR_PD1;
+
+ /*
+ * TEX remap: the indices used map to the closest equivalent types
+ * under the non-TEX-remap interpretation of those attribute bits,
+ * excepting various implementation-defined aspects of shareability.
+ */
+ cfg->arm_v7s_cfg.prrr = ARM_V7S_PRRR_TR(1, ARM_V7S_PRRR_TYPE_DEVICE) |
+ ARM_V7S_PRRR_TR(4, ARM_V7S_PRRR_TYPE_NORMAL) |
+ ARM_V7S_PRRR_TR(7, ARM_V7S_PRRR_TYPE_NORMAL) |
+ ARM_V7S_PRRR_DS0 | ARM_V7S_PRRR_DS1 |
+ ARM_V7S_PRRR_NS1 | ARM_V7S_PRRR_NOS(7);
+ cfg->arm_v7s_cfg.nmrr = ARM_V7S_NMRR_IR(7, ARM_V7S_RGN_WBWA) |
+ ARM_V7S_NMRR_OR(7, ARM_V7S_RGN_WBWA);
+
+ /* Looking good; allocate a pgd */
+ data->pgd = __arm_v7s_alloc_table(1, GFP_KERNEL, data);
+ if (!data->pgd)
+ goto out_free_data;
+
+ /* Ensure the empty pgd is visible before any actual TTBR write */
+ wmb();
+
+ /* TTBRs */
+ cfg->arm_v7s_cfg.ttbr[0] = virt_to_phys(data->pgd) |
+ ARM_V7S_TTBR_S | ARM_V7S_TTBR_NOS |
+ ARM_V7S_TTBR_IRGN_ATTR(ARM_V7S_RGN_WBWA) |
+ ARM_V7S_TTBR_ORGN_ATTR(ARM_V7S_RGN_WBWA);
+ cfg->arm_v7s_cfg.ttbr[1] = 0;
+ return &data->iop;
+
+out_free_data:
+ kmem_cache_destroy(data->l2_tables);
+ kfree(data);
+ return NULL;
+}
+
+struct io_pgtable_init_fns io_pgtable_arm_v7s_init_fns = {
+ .alloc = arm_v7s_alloc_pgtable,
+ .free = arm_v7s_free_pgtable,
+};
+
+#ifdef CONFIG_IOMMU_IO_PGTABLE_ARMV7S_SELFTEST
+
+static struct io_pgtable_cfg *cfg_cookie;
+
+static void dummy_tlb_flush_all(void *cookie)
+{
+ WARN_ON(cookie != cfg_cookie);
+}
+
+static void dummy_tlb_add_flush(unsigned long iova, size_t size,
+ size_t granule, bool leaf, void *cookie)
+{
+ WARN_ON(cookie != cfg_cookie);
+ WARN_ON(!(size & cfg_cookie->pgsize_bitmap));
+}
+
+static void dummy_tlb_sync(void *cookie)
+{
+ WARN_ON(cookie != cfg_cookie);
+}
+
+static struct iommu_gather_ops dummy_tlb_ops = {
+ .tlb_flush_all = dummy_tlb_flush_all,
+ .tlb_add_flush = dummy_tlb_add_flush,
+ .tlb_sync = dummy_tlb_sync,
+};
+
+#define __FAIL(ops) ({ \
+ WARN(1, "selftest: test failed\n"); \
+ selftest_running = false; \
+ -EFAULT; \
+})
+
+static int __init arm_v7s_do_selftests(void)
+{
+ struct io_pgtable_ops *ops;
+ struct io_pgtable_cfg cfg = {
+ .tlb = &dummy_tlb_ops,
+ .oas = 32,
+ .ias = 32,
+ .quirks = IO_PGTABLE_QUIRK_ARM_NS,
+ .pgsize_bitmap = SZ_4K | SZ_64K | SZ_1M | SZ_16M,
+ };
+ unsigned int iova, size, iova_start;
+ unsigned int i, loopnr = 0;
+
+ selftest_running = true;
+
+ cfg_cookie = &cfg;
+
+ ops = alloc_io_pgtable_ops(ARM_V7S, &cfg, &cfg);
+ if (!ops) {
+ pr_err("selftest: failed to allocate io pgtable ops\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Initial sanity checks.
+ * Empty page tables shouldn't provide any translations.
+ */
+ if (ops->iova_to_phys(ops, 42))
+ return __FAIL(ops);
+
+ if (ops->iova_to_phys(ops, SZ_1G + 42))
+ return __FAIL(ops);
+
+ if (ops->iova_to_phys(ops, SZ_2G + 42))
+ return __FAIL(ops);
+
+ /*
+ * Distinct mappings of different granule sizes.
+ */
+ iova = 0;
+ i = find_first_bit(&cfg.pgsize_bitmap, BITS_PER_LONG);
+ while (i != BITS_PER_LONG) {
+ size = 1UL << i;
+ if (ops->map(ops, iova, iova, size, IOMMU_READ |
+ IOMMU_WRITE |
+ IOMMU_NOEXEC |
+ IOMMU_CACHE))
+ return __FAIL(ops);
+
+ /* Overlapping mappings */
+ if (!ops->map(ops, iova, iova + size, size,
+ IOMMU_READ | IOMMU_NOEXEC))
+ return __FAIL(ops);
+
+ if (ops->iova_to_phys(ops, iova + 42) != (iova + 42))
+ return __FAIL(ops);
+
+ iova += SZ_16M;
+ i++;
+ i = find_next_bit(&cfg.pgsize_bitmap, BITS_PER_LONG, i);
+ loopnr++;
+ }
+
+ /* Partial unmap */
+ i = 1;
+ size = 1UL << __ffs(cfg.pgsize_bitmap);
+ while (i < loopnr) {
+ iova_start = i * SZ_16M;
+ if (ops->unmap(ops, iova_start + size, size) != size)
+ return __FAIL(ops);
+
+ /* Remap of partial unmap */
+ if (ops->map(ops, iova_start + size, size, size, IOMMU_READ))
+ return __FAIL(ops);
+
+ if (ops->iova_to_phys(ops, iova_start + size + 42)
+ != (size + 42))
+ return __FAIL(ops);
+ i++;
+ }
+
+ /* Full unmap */
+ iova = 0;
+ i = find_first_bit(&cfg.pgsize_bitmap, BITS_PER_LONG);
+ while (i != BITS_PER_LONG) {
+ size = 1UL << i;
+
+ if (ops->unmap(ops, iova, size) != size)
+ return __FAIL(ops);
+
+ if (ops->iova_to_phys(ops, iova + 42))
+ return __FAIL(ops);
+
+ /* Remap full block */
+ if (ops->map(ops, iova, iova, size, IOMMU_WRITE))
+ return __FAIL(ops);
+
+ if (ops->iova_to_phys(ops, iova + 42) != (iova + 42))
+ return __FAIL(ops);
+
+ iova += SZ_16M;
+ i++;
+ i = find_next_bit(&cfg.pgsize_bitmap, BITS_PER_LONG, i);
+ }
+
+ free_io_pgtable_ops(ops);
+
+ selftest_running = false;
+
+ pr_info("self test ok\n");
+ return 0;
+}
+subsys_initcall(arm_v7s_do_selftests);
+#endif
diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c
index 381ca5a37a7b..f433b516098a 100644
--- a/drivers/iommu/io-pgtable-arm.c
+++ b/drivers/iommu/io-pgtable-arm.c
@@ -446,7 +446,6 @@ static int arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data,
unsigned long blk_start, blk_end;
phys_addr_t blk_paddr;
arm_lpae_iopte table = 0;
- struct io_pgtable_cfg *cfg = &data->iop.cfg;
blk_start = iova & ~(blk_size - 1);
blk_end = blk_start + blk_size;
@@ -472,9 +471,9 @@ static int arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data,
}
}
- __arm_lpae_set_pte(ptep, table, cfg);
+ __arm_lpae_set_pte(ptep, table, &data->iop.cfg);
iova &= ~(blk_size - 1);
- cfg->tlb->tlb_add_flush(iova, blk_size, blk_size, true, data->iop.cookie);
+ io_pgtable_tlb_add_flush(&data->iop, iova, blk_size, blk_size, true);
return size;
}
@@ -483,8 +482,7 @@ static int __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
arm_lpae_iopte *ptep)
{
arm_lpae_iopte pte;
- const struct iommu_gather_ops *tlb = data->iop.cfg.tlb;
- void *cookie = data->iop.cookie;
+ struct io_pgtable *iop = &data->iop;
size_t blk_size = ARM_LPAE_BLOCK_SIZE(lvl, data);
/* Something went horribly wrong and we ran out of page table */
@@ -498,17 +496,17 @@ static int __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
/* If the size matches this level, we're in the right place */
if (size == blk_size) {
- __arm_lpae_set_pte(ptep, 0, &data->iop.cfg);
+ __arm_lpae_set_pte(ptep, 0, &iop->cfg);
if (!iopte_leaf(pte, lvl)) {
/* Also flush any partial walks */
- tlb->tlb_add_flush(iova, size, ARM_LPAE_GRANULE(data),
- false, cookie);
- tlb->tlb_sync(cookie);
+ io_pgtable_tlb_add_flush(iop, iova, size,
+ ARM_LPAE_GRANULE(data), false);
+ io_pgtable_tlb_sync(iop);
ptep = iopte_deref(pte, data);
__arm_lpae_free_pgtable(data, lvl + 1, ptep);
} else {
- tlb->tlb_add_flush(iova, size, size, true, cookie);
+ io_pgtable_tlb_add_flush(iop, iova, size, size, true);
}
return size;
@@ -532,13 +530,12 @@ static int arm_lpae_unmap(struct io_pgtable_ops *ops, unsigned long iova,
{
size_t unmapped;
struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
- struct io_pgtable *iop = &data->iop;
arm_lpae_iopte *ptep = data->pgd;
int lvl = ARM_LPAE_START_LVL(data);
unmapped = __arm_lpae_unmap(data, iova, size, lvl, ptep);
if (unmapped)
- iop->cfg.tlb->tlb_sync(iop->cookie);
+ io_pgtable_tlb_sync(&data->iop);
return unmapped;
}
@@ -662,8 +659,12 @@ static struct io_pgtable *
arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
{
u64 reg;
- struct arm_lpae_io_pgtable *data = arm_lpae_alloc_pgtable(cfg);
+ struct arm_lpae_io_pgtable *data;
+ if (cfg->quirks & ~IO_PGTABLE_QUIRK_ARM_NS)
+ return NULL;
+
+ data = arm_lpae_alloc_pgtable(cfg);
if (!data)
return NULL;
@@ -746,8 +747,13 @@ static struct io_pgtable *
arm_64_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
{
u64 reg, sl;
- struct arm_lpae_io_pgtable *data = arm_lpae_alloc_pgtable(cfg);
+ struct arm_lpae_io_pgtable *data;
+
+ /* The NS quirk doesn't apply at stage 2 */
+ if (cfg->quirks)
+ return NULL;
+ data = arm_lpae_alloc_pgtable(cfg);
if (!data)
return NULL;
diff --git a/drivers/iommu/io-pgtable.c b/drivers/iommu/io-pgtable.c
index 6f2e319d4f04..876f6a76d288 100644
--- a/drivers/iommu/io-pgtable.c
+++ b/drivers/iommu/io-pgtable.c
@@ -33,6 +33,9 @@ io_pgtable_init_table[IO_PGTABLE_NUM_FMTS] =
[ARM_64_LPAE_S1] = &io_pgtable_arm_64_lpae_s1_init_fns,
[ARM_64_LPAE_S2] = &io_pgtable_arm_64_lpae_s2_init_fns,
#endif
+#ifdef CONFIG_IOMMU_IO_PGTABLE_ARMV7S
+ [ARM_V7S] = &io_pgtable_arm_v7s_init_fns,
+#endif
};
struct io_pgtable_ops *alloc_io_pgtable_ops(enum io_pgtable_fmt fmt,
@@ -72,6 +75,6 @@ void free_io_pgtable_ops(struct io_pgtable_ops *ops)
return;
iop = container_of(ops, struct io_pgtable, ops);
- iop->cfg.tlb->tlb_flush_all(iop->cookie);
+ io_pgtable_tlb_flush_all(iop);
io_pgtable_init_table[iop->fmt]->free(iop);
}
diff --git a/drivers/iommu/io-pgtable.h b/drivers/iommu/io-pgtable.h
index 36673c83de58..d4f502742e3b 100644
--- a/drivers/iommu/io-pgtable.h
+++ b/drivers/iommu/io-pgtable.h
@@ -1,5 +1,6 @@
#ifndef __IO_PGTABLE_H
#define __IO_PGTABLE_H
+#include <linux/bitops.h>
/*
* Public API for use by IOMMU drivers
@@ -9,6 +10,7 @@ enum io_pgtable_fmt {
ARM_32_LPAE_S2,
ARM_64_LPAE_S1,
ARM_64_LPAE_S2,
+ ARM_V7S,
IO_PGTABLE_NUM_FMTS,
};
@@ -45,8 +47,24 @@ struct iommu_gather_ops {
* page table walker.
*/
struct io_pgtable_cfg {
- #define IO_PGTABLE_QUIRK_ARM_NS (1 << 0) /* Set NS bit in PTEs */
- int quirks;
+ /*
+ * IO_PGTABLE_QUIRK_ARM_NS: (ARM formats) Set NS and NSTABLE bits in
+ * stage 1 PTEs, for hardware which insists on validating them
+ * even in non-secure state where they should normally be ignored.
+ *
+ * IO_PGTABLE_QUIRK_NO_PERMS: Ignore the IOMMU_READ, IOMMU_WRITE and
+ * IOMMU_NOEXEC flags and map everything with full access, for
+ * hardware which does not implement the permissions of a given
+ * format, and/or requires some format-specific default value.
+ *
+ * IO_PGTABLE_QUIRK_TLBI_ON_MAP: If the format forbids caching invalid
+ * (unmapped) entries but the hardware might do so anyway, perform
+ * TLB maintenance when mapping as well as when unmapping.
+ */
+ #define IO_PGTABLE_QUIRK_ARM_NS BIT(0)
+ #define IO_PGTABLE_QUIRK_NO_PERMS BIT(1)
+ #define IO_PGTABLE_QUIRK_TLBI_ON_MAP BIT(2)
+ unsigned long quirks;
unsigned long pgsize_bitmap;
unsigned int ias;
unsigned int oas;
@@ -65,6 +83,13 @@ struct io_pgtable_cfg {
u64 vttbr;
u64 vtcr;
} arm_lpae_s2_cfg;
+
+ struct {
+ u32 ttbr[2];
+ u32 tcr;
+ u32 nmrr;
+ u32 prrr;
+ } arm_v7s_cfg;
};
};
@@ -121,18 +146,41 @@ void free_io_pgtable_ops(struct io_pgtable_ops *ops);
* @fmt: The page table format.
* @cookie: An opaque token provided by the IOMMU driver and passed back to
* any callback routines.
+ * @tlb_sync_pending: Private flag for optimising out redundant syncs.
* @cfg: A copy of the page table configuration.
* @ops: The page table operations in use for this set of page tables.
*/
struct io_pgtable {
enum io_pgtable_fmt fmt;
void *cookie;
+ bool tlb_sync_pending;
struct io_pgtable_cfg cfg;
struct io_pgtable_ops ops;
};
#define io_pgtable_ops_to_pgtable(x) container_of((x), struct io_pgtable, ops)
+static inline void io_pgtable_tlb_flush_all(struct io_pgtable *iop)
+{
+ iop->cfg.tlb->tlb_flush_all(iop->cookie);
+ iop->tlb_sync_pending = true;
+}
+
+static inline void io_pgtable_tlb_add_flush(struct io_pgtable *iop,
+ unsigned long iova, size_t size, size_t granule, bool leaf)
+{
+ iop->cfg.tlb->tlb_add_flush(iova, size, granule, leaf, iop->cookie);
+ iop->tlb_sync_pending = true;
+}
+
+static inline void io_pgtable_tlb_sync(struct io_pgtable *iop)
+{
+ if (iop->tlb_sync_pending) {
+ iop->cfg.tlb->tlb_sync(iop->cookie);
+ iop->tlb_sync_pending = false;
+ }
+}
+
/**
* struct io_pgtable_init_fns - Alloc/free a set of page tables for a
* particular format.
@@ -149,5 +197,6 @@ extern struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s1_init_fns;
extern struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s2_init_fns;
extern struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s1_init_fns;
extern struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s2_init_fns;
+extern struct io_pgtable_init_fns io_pgtable_arm_v7s_init_fns;
#endif /* __IO_PGTABLE_H */
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index 0e3b0092ec92..b9df1411c894 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -848,7 +848,8 @@ struct iommu_group *iommu_group_get_for_dev(struct device *dev)
if (!group->default_domain) {
group->default_domain = __iommu_domain_alloc(dev->bus,
IOMMU_DOMAIN_DMA);
- group->domain = group->default_domain;
+ if (!group->domain)
+ group->domain = group->default_domain;
}
ret = iommu_group_add_device(group, dev);
@@ -1314,6 +1315,7 @@ int iommu_map(struct iommu_domain *domain, unsigned long iova,
unsigned long orig_iova = iova;
unsigned int min_pagesz;
size_t orig_size = size;
+ phys_addr_t orig_paddr = paddr;
int ret = 0;
if (unlikely(domain->ops->map == NULL ||
@@ -1358,7 +1360,7 @@ int iommu_map(struct iommu_domain *domain, unsigned long iova,
if (ret)
iommu_unmap(domain, orig_iova, orig_size - size);
else
- trace_map(orig_iova, paddr, orig_size);
+ trace_map(orig_iova, orig_paddr, orig_size);
return ret;
}
diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c
new file mode 100644
index 000000000000..929a66a81b2b
--- /dev/null
+++ b/drivers/iommu/mtk_iommu.c
@@ -0,0 +1,736 @@
+/*
+ * Copyright (c) 2015-2016 MediaTek Inc.
+ * Author: Yong Wu <yong.wu@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <linux/bug.h>
+#include <linux/clk.h>
+#include <linux/component.h>
+#include <linux/device.h>
+#include <linux/dma-iommu.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iommu.h>
+#include <linux/iopoll.h>
+#include <linux/list.h>
+#include <linux/of_address.h>
+#include <linux/of_iommu.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <asm/barrier.h>
+#include <dt-bindings/memory/mt8173-larb-port.h>
+#include <soc/mediatek/smi.h>
+
+#include "io-pgtable.h"
+
+#define REG_MMU_PT_BASE_ADDR 0x000
+
+#define REG_MMU_INVALIDATE 0x020
+#define F_ALL_INVLD 0x2
+#define F_MMU_INV_RANGE 0x1
+
+#define REG_MMU_INVLD_START_A 0x024
+#define REG_MMU_INVLD_END_A 0x028
+
+#define REG_MMU_INV_SEL 0x038
+#define F_INVLD_EN0 BIT(0)
+#define F_INVLD_EN1 BIT(1)
+
+#define REG_MMU_STANDARD_AXI_MODE 0x048
+#define REG_MMU_DCM_DIS 0x050
+
+#define REG_MMU_CTRL_REG 0x110
+#define F_MMU_PREFETCH_RT_REPLACE_MOD BIT(4)
+#define F_MMU_TF_PROTECT_SEL(prot) (((prot) & 0x3) << 5)
+
+#define REG_MMU_IVRP_PADDR 0x114
+#define F_MMU_IVRP_PA_SET(pa) ((pa) >> 1)
+
+#define REG_MMU_INT_CONTROL0 0x120
+#define F_L2_MULIT_HIT_EN BIT(0)
+#define F_TABLE_WALK_FAULT_INT_EN BIT(1)
+#define F_PREETCH_FIFO_OVERFLOW_INT_EN BIT(2)
+#define F_MISS_FIFO_OVERFLOW_INT_EN BIT(3)
+#define F_PREFETCH_FIFO_ERR_INT_EN BIT(5)
+#define F_MISS_FIFO_ERR_INT_EN BIT(6)
+#define F_INT_CLR_BIT BIT(12)
+
+#define REG_MMU_INT_MAIN_CONTROL 0x124
+#define F_INT_TRANSLATION_FAULT BIT(0)
+#define F_INT_MAIN_MULTI_HIT_FAULT BIT(1)
+#define F_INT_INVALID_PA_FAULT BIT(2)
+#define F_INT_ENTRY_REPLACEMENT_FAULT BIT(3)
+#define F_INT_TLB_MISS_FAULT BIT(4)
+#define F_INT_MISS_TRANSACTION_FIFO_FAULT BIT(5)
+#define F_INT_PRETETCH_TRANSATION_FIFO_FAULT BIT(6)
+
+#define REG_MMU_CPE_DONE 0x12C
+
+#define REG_MMU_FAULT_ST1 0x134
+
+#define REG_MMU_FAULT_VA 0x13c
+#define F_MMU_FAULT_VA_MSK 0xfffff000
+#define F_MMU_FAULT_VA_WRITE_BIT BIT(1)
+#define F_MMU_FAULT_VA_LAYER_BIT BIT(0)
+
+#define REG_MMU_INVLD_PA 0x140
+#define REG_MMU_INT_ID 0x150
+#define F_MMU0_INT_ID_LARB_ID(a) (((a) >> 7) & 0x7)
+#define F_MMU0_INT_ID_PORT_ID(a) (((a) >> 2) & 0x1f)
+
+#define MTK_PROTECT_PA_ALIGN 128
+
+struct mtk_iommu_suspend_reg {
+ u32 standard_axi_mode;
+ u32 dcm_dis;
+ u32 ctrl_reg;
+ u32 int_control0;
+ u32 int_main_control;
+};
+
+struct mtk_iommu_client_priv {
+ struct list_head client;
+ unsigned int mtk_m4u_id;
+ struct device *m4udev;
+};
+
+struct mtk_iommu_domain {
+ spinlock_t pgtlock; /* lock for page table */
+
+ struct io_pgtable_cfg cfg;
+ struct io_pgtable_ops *iop;
+
+ struct iommu_domain domain;
+};
+
+struct mtk_iommu_data {
+ void __iomem *base;
+ int irq;
+ struct device *dev;
+ struct clk *bclk;
+ phys_addr_t protect_base; /* protect memory base */
+ struct mtk_iommu_suspend_reg reg;
+ struct mtk_iommu_domain *m4u_dom;
+ struct iommu_group *m4u_group;
+ struct mtk_smi_iommu smi_imu; /* SMI larb iommu info */
+};
+
+static struct iommu_ops mtk_iommu_ops;
+
+static struct mtk_iommu_domain *to_mtk_domain(struct iommu_domain *dom)
+{
+ return container_of(dom, struct mtk_iommu_domain, domain);
+}
+
+static void mtk_iommu_tlb_flush_all(void *cookie)
+{
+ struct mtk_iommu_data *data = cookie;
+
+ writel_relaxed(F_INVLD_EN1 | F_INVLD_EN0, data->base + REG_MMU_INV_SEL);
+ writel_relaxed(F_ALL_INVLD, data->base + REG_MMU_INVALIDATE);
+ wmb(); /* Make sure the tlb flush all done */
+}
+
+static void mtk_iommu_tlb_add_flush_nosync(unsigned long iova, size_t size,
+ size_t granule, bool leaf,
+ void *cookie)
+{
+ struct mtk_iommu_data *data = cookie;
+
+ writel_relaxed(F_INVLD_EN1 | F_INVLD_EN0, data->base + REG_MMU_INV_SEL);
+
+ writel_relaxed(iova, data->base + REG_MMU_INVLD_START_A);
+ writel_relaxed(iova + size - 1, data->base + REG_MMU_INVLD_END_A);
+ writel_relaxed(F_MMU_INV_RANGE, data->base + REG_MMU_INVALIDATE);
+}
+
+static void mtk_iommu_tlb_sync(void *cookie)
+{
+ struct mtk_iommu_data *data = cookie;
+ int ret;
+ u32 tmp;
+
+ ret = readl_poll_timeout_atomic(data->base + REG_MMU_CPE_DONE, tmp,
+ tmp != 0, 10, 100000);
+ if (ret) {
+ dev_warn(data->dev,
+ "Partial TLB flush timed out, falling back to full flush\n");
+ mtk_iommu_tlb_flush_all(cookie);
+ }
+ /* Clear the CPE status */
+ writel_relaxed(0, data->base + REG_MMU_CPE_DONE);
+}
+
+static const struct iommu_gather_ops mtk_iommu_gather_ops = {
+ .tlb_flush_all = mtk_iommu_tlb_flush_all,
+ .tlb_add_flush = mtk_iommu_tlb_add_flush_nosync,
+ .tlb_sync = mtk_iommu_tlb_sync,
+};
+
+static irqreturn_t mtk_iommu_isr(int irq, void *dev_id)
+{
+ struct mtk_iommu_data *data = dev_id;
+ struct mtk_iommu_domain *dom = data->m4u_dom;
+ u32 int_state, regval, fault_iova, fault_pa;
+ unsigned int fault_larb, fault_port;
+ bool layer, write;
+
+ /* Read error info from registers */
+ int_state = readl_relaxed(data->base + REG_MMU_FAULT_ST1);
+ fault_iova = readl_relaxed(data->base + REG_MMU_FAULT_VA);
+ layer = fault_iova & F_MMU_FAULT_VA_LAYER_BIT;
+ write = fault_iova & F_MMU_FAULT_VA_WRITE_BIT;
+ fault_iova &= F_MMU_FAULT_VA_MSK;
+ fault_pa = readl_relaxed(data->base + REG_MMU_INVLD_PA);
+ regval = readl_relaxed(data->base + REG_MMU_INT_ID);
+ fault_larb = F_MMU0_INT_ID_LARB_ID(regval);
+ fault_port = F_MMU0_INT_ID_PORT_ID(regval);
+
+ if (report_iommu_fault(&dom->domain, data->dev, fault_iova,
+ write ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ)) {
+ dev_err_ratelimited(
+ data->dev,
+ "fault type=0x%x iova=0x%x pa=0x%x larb=%d port=%d layer=%d %s\n",
+ int_state, fault_iova, fault_pa, fault_larb, fault_port,
+ layer, write ? "write" : "read");
+ }
+
+ /* Interrupt clear */
+ regval = readl_relaxed(data->base + REG_MMU_INT_CONTROL0);
+ regval |= F_INT_CLR_BIT;
+ writel_relaxed(regval, data->base + REG_MMU_INT_CONTROL0);
+
+ mtk_iommu_tlb_flush_all(data);
+
+ return IRQ_HANDLED;
+}
+
+static void mtk_iommu_config(struct mtk_iommu_data *data,
+ struct device *dev, bool enable)
+{
+ struct mtk_iommu_client_priv *head, *cur, *next;
+ struct mtk_smi_larb_iommu *larb_mmu;
+ unsigned int larbid, portid;
+
+ head = dev->archdata.iommu;
+ list_for_each_entry_safe(cur, next, &head->client, client) {
+ larbid = MTK_M4U_TO_LARB(cur->mtk_m4u_id);
+ portid = MTK_M4U_TO_PORT(cur->mtk_m4u_id);
+ larb_mmu = &data->smi_imu.larb_imu[larbid];
+
+ dev_dbg(dev, "%s iommu port: %d\n",
+ enable ? "enable" : "disable", portid);
+
+ if (enable)
+ larb_mmu->mmu |= MTK_SMI_MMU_EN(portid);
+ else
+ larb_mmu->mmu &= ~MTK_SMI_MMU_EN(portid);
+ }
+}
+
+static int mtk_iommu_domain_finalise(struct mtk_iommu_data *data)
+{
+ struct mtk_iommu_domain *dom = data->m4u_dom;
+
+ spin_lock_init(&dom->pgtlock);
+
+ dom->cfg = (struct io_pgtable_cfg) {
+ .quirks = IO_PGTABLE_QUIRK_ARM_NS |
+ IO_PGTABLE_QUIRK_NO_PERMS |
+ IO_PGTABLE_QUIRK_TLBI_ON_MAP,
+ .pgsize_bitmap = mtk_iommu_ops.pgsize_bitmap,
+ .ias = 32,
+ .oas = 32,
+ .tlb = &mtk_iommu_gather_ops,
+ .iommu_dev = data->dev,
+ };
+
+ dom->iop = alloc_io_pgtable_ops(ARM_V7S, &dom->cfg, data);
+ if (!dom->iop) {
+ dev_err(data->dev, "Failed to alloc io pgtable\n");
+ return -EINVAL;
+ }
+
+ /* Update our support page sizes bitmap */
+ mtk_iommu_ops.pgsize_bitmap = dom->cfg.pgsize_bitmap;
+
+ writel(data->m4u_dom->cfg.arm_v7s_cfg.ttbr[0],
+ data->base + REG_MMU_PT_BASE_ADDR);
+ return 0;
+}
+
+static struct iommu_domain *mtk_iommu_domain_alloc(unsigned type)
+{
+ struct mtk_iommu_domain *dom;
+
+ if (type != IOMMU_DOMAIN_DMA)
+ return NULL;
+
+ dom = kzalloc(sizeof(*dom), GFP_KERNEL);
+ if (!dom)
+ return NULL;
+
+ if (iommu_get_dma_cookie(&dom->domain)) {
+ kfree(dom);
+ return NULL;
+ }
+
+ dom->domain.geometry.aperture_start = 0;
+ dom->domain.geometry.aperture_end = DMA_BIT_MASK(32);
+ dom->domain.geometry.force_aperture = true;
+
+ return &dom->domain;
+}
+
+static void mtk_iommu_domain_free(struct iommu_domain *domain)
+{
+ iommu_put_dma_cookie(domain);
+ kfree(to_mtk_domain(domain));
+}
+
+static int mtk_iommu_attach_device(struct iommu_domain *domain,
+ struct device *dev)
+{
+ struct mtk_iommu_domain *dom = to_mtk_domain(domain);
+ struct mtk_iommu_client_priv *priv = dev->archdata.iommu;
+ struct mtk_iommu_data *data;
+ int ret;
+
+ if (!priv)
+ return -ENODEV;
+
+ data = dev_get_drvdata(priv->m4udev);
+ if (!data->m4u_dom) {
+ data->m4u_dom = dom;
+ ret = mtk_iommu_domain_finalise(data);
+ if (ret) {
+ data->m4u_dom = NULL;
+ return ret;
+ }
+ } else if (data->m4u_dom != dom) {
+ /* All the client devices should be in the same m4u domain */
+ dev_err(dev, "try to attach into the error iommu domain\n");
+ return -EPERM;
+ }
+
+ mtk_iommu_config(data, dev, true);
+ return 0;
+}
+
+static void mtk_iommu_detach_device(struct iommu_domain *domain,
+ struct device *dev)
+{
+ struct mtk_iommu_client_priv *priv = dev->archdata.iommu;
+ struct mtk_iommu_data *data;
+
+ if (!priv)
+ return;
+
+ data = dev_get_drvdata(priv->m4udev);
+ mtk_iommu_config(data, dev, false);
+}
+
+static int mtk_iommu_map(struct iommu_domain *domain, unsigned long iova,
+ phys_addr_t paddr, size_t size, int prot)
+{
+ struct mtk_iommu_domain *dom = to_mtk_domain(domain);
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&dom->pgtlock, flags);
+ ret = dom->iop->map(dom->iop, iova, paddr, size, prot);
+ spin_unlock_irqrestore(&dom->pgtlock, flags);
+
+ return ret;
+}
+
+static size_t mtk_iommu_unmap(struct iommu_domain *domain,
+ unsigned long iova, size_t size)
+{
+ struct mtk_iommu_domain *dom = to_mtk_domain(domain);
+ unsigned long flags;
+ size_t unmapsz;
+
+ spin_lock_irqsave(&dom->pgtlock, flags);
+ unmapsz = dom->iop->unmap(dom->iop, iova, size);
+ spin_unlock_irqrestore(&dom->pgtlock, flags);
+
+ return unmapsz;
+}
+
+static phys_addr_t mtk_iommu_iova_to_phys(struct iommu_domain *domain,
+ dma_addr_t iova)
+{
+ struct mtk_iommu_domain *dom = to_mtk_domain(domain);
+ unsigned long flags;
+ phys_addr_t pa;
+
+ spin_lock_irqsave(&dom->pgtlock, flags);
+ pa = dom->iop->iova_to_phys(dom->iop, iova);
+ spin_unlock_irqrestore(&dom->pgtlock, flags);
+
+ return pa;
+}
+
+static int mtk_iommu_add_device(struct device *dev)
+{
+ struct iommu_group *group;
+
+ if (!dev->archdata.iommu) /* Not a iommu client device */
+ return -ENODEV;
+
+ group = iommu_group_get_for_dev(dev);
+ if (IS_ERR(group))
+ return PTR_ERR(group);
+
+ iommu_group_put(group);
+ return 0;
+}
+
+static void mtk_iommu_remove_device(struct device *dev)
+{
+ struct mtk_iommu_client_priv *head, *cur, *next;
+
+ head = dev->archdata.iommu;
+ if (!head)
+ return;
+
+ list_for_each_entry_safe(cur, next, &head->client, client) {
+ list_del(&cur->client);
+ kfree(cur);
+ }
+ kfree(head);
+ dev->archdata.iommu = NULL;
+
+ iommu_group_remove_device(dev);
+}
+
+static struct iommu_group *mtk_iommu_device_group(struct device *dev)
+{
+ struct mtk_iommu_data *data;
+ struct mtk_iommu_client_priv *priv;
+
+ priv = dev->archdata.iommu;
+ if (!priv)
+ return ERR_PTR(-ENODEV);
+
+ /* All the client devices are in the same m4u iommu-group */
+ data = dev_get_drvdata(priv->m4udev);
+ if (!data->m4u_group) {
+ data->m4u_group = iommu_group_alloc();
+ if (IS_ERR(data->m4u_group))
+ dev_err(dev, "Failed to allocate M4U IOMMU group\n");
+ }
+ return data->m4u_group;
+}
+
+static int mtk_iommu_of_xlate(struct device *dev, struct of_phandle_args *args)
+{
+ struct mtk_iommu_client_priv *head, *priv, *next;
+ struct platform_device *m4updev;
+
+ if (args->args_count != 1) {
+ dev_err(dev, "invalid #iommu-cells(%d) property for IOMMU\n",
+ args->args_count);
+ return -EINVAL;
+ }
+
+ if (!dev->archdata.iommu) {
+ /* Get the m4u device */
+ m4updev = of_find_device_by_node(args->np);
+ of_node_put(args->np);
+ if (WARN_ON(!m4updev))
+ return -EINVAL;
+
+ head = kzalloc(sizeof(*head), GFP_KERNEL);
+ if (!head)
+ return -ENOMEM;
+
+ dev->archdata.iommu = head;
+ INIT_LIST_HEAD(&head->client);
+ head->m4udev = &m4updev->dev;
+ } else {
+ head = dev->archdata.iommu;
+ }
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ goto err_free_mem;
+
+ priv->mtk_m4u_id = args->args[0];
+ list_add_tail(&priv->client, &head->client);
+
+ return 0;
+
+err_free_mem:
+ list_for_each_entry_safe(priv, next, &head->client, client)
+ kfree(priv);
+ kfree(head);
+ dev->archdata.iommu = NULL;
+ return -ENOMEM;
+}
+
+static struct iommu_ops mtk_iommu_ops = {
+ .domain_alloc = mtk_iommu_domain_alloc,
+ .domain_free = mtk_iommu_domain_free,
+ .attach_dev = mtk_iommu_attach_device,
+ .detach_dev = mtk_iommu_detach_device,
+ .map = mtk_iommu_map,
+ .unmap = mtk_iommu_unmap,
+ .map_sg = default_iommu_map_sg,
+ .iova_to_phys = mtk_iommu_iova_to_phys,
+ .add_device = mtk_iommu_add_device,
+ .remove_device = mtk_iommu_remove_device,
+ .device_group = mtk_iommu_device_group,
+ .of_xlate = mtk_iommu_of_xlate,
+ .pgsize_bitmap = SZ_4K | SZ_64K | SZ_1M | SZ_16M,
+};
+
+static int mtk_iommu_hw_init(const struct mtk_iommu_data *data)
+{
+ u32 regval;
+ int ret;
+
+ ret = clk_prepare_enable(data->bclk);
+ if (ret) {
+ dev_err(data->dev, "Failed to enable iommu bclk(%d)\n", ret);
+ return ret;
+ }
+
+ regval = F_MMU_PREFETCH_RT_REPLACE_MOD |
+ F_MMU_TF_PROTECT_SEL(2);
+ writel_relaxed(regval, data->base + REG_MMU_CTRL_REG);
+
+ regval = F_L2_MULIT_HIT_EN |
+ F_TABLE_WALK_FAULT_INT_EN |
+ F_PREETCH_FIFO_OVERFLOW_INT_EN |
+ F_MISS_FIFO_OVERFLOW_INT_EN |
+ F_PREFETCH_FIFO_ERR_INT_EN |
+ F_MISS_FIFO_ERR_INT_EN;
+ writel_relaxed(regval, data->base + REG_MMU_INT_CONTROL0);
+
+ regval = F_INT_TRANSLATION_FAULT |
+ F_INT_MAIN_MULTI_HIT_FAULT |
+ F_INT_INVALID_PA_FAULT |
+ F_INT_ENTRY_REPLACEMENT_FAULT |
+ F_INT_TLB_MISS_FAULT |
+ F_INT_MISS_TRANSACTION_FIFO_FAULT |
+ F_INT_PRETETCH_TRANSATION_FIFO_FAULT;
+ writel_relaxed(regval, data->base + REG_MMU_INT_MAIN_CONTROL);
+
+ writel_relaxed(F_MMU_IVRP_PA_SET(data->protect_base),
+ data->base + REG_MMU_IVRP_PADDR);
+
+ writel_relaxed(0, data->base + REG_MMU_DCM_DIS);
+ writel_relaxed(0, data->base + REG_MMU_STANDARD_AXI_MODE);
+
+ if (devm_request_irq(data->dev, data->irq, mtk_iommu_isr, 0,
+ dev_name(data->dev), (void *)data)) {
+ writel_relaxed(0, data->base + REG_MMU_PT_BASE_ADDR);
+ clk_disable_unprepare(data->bclk);
+ dev_err(data->dev, "Failed @ IRQ-%d Request\n", data->irq);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static int compare_of(struct device *dev, void *data)
+{
+ return dev->of_node == data;
+}
+
+static int mtk_iommu_bind(struct device *dev)
+{
+ struct mtk_iommu_data *data = dev_get_drvdata(dev);
+
+ return component_bind_all(dev, &data->smi_imu);
+}
+
+static void mtk_iommu_unbind(struct device *dev)
+{
+ struct mtk_iommu_data *data = dev_get_drvdata(dev);
+
+ component_unbind_all(dev, &data->smi_imu);
+}
+
+static const struct component_master_ops mtk_iommu_com_ops = {
+ .bind = mtk_iommu_bind,
+ .unbind = mtk_iommu_unbind,
+};
+
+static int mtk_iommu_probe(struct platform_device *pdev)
+{
+ struct mtk_iommu_data *data;
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ struct component_match *match = NULL;
+ void *protect;
+ int i, larb_nr, ret;
+
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+ data->dev = dev;
+
+ /* Protect memory. HW will access here while translation fault.*/
+ protect = devm_kzalloc(dev, MTK_PROTECT_PA_ALIGN * 2, GFP_KERNEL);
+ if (!protect)
+ return -ENOMEM;
+ data->protect_base = ALIGN(virt_to_phys(protect), MTK_PROTECT_PA_ALIGN);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ data->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(data->base))
+ return PTR_ERR(data->base);
+
+ data->irq = platform_get_irq(pdev, 0);
+ if (data->irq < 0)
+ return data->irq;
+
+ data->bclk = devm_clk_get(dev, "bclk");
+ if (IS_ERR(data->bclk))
+ return PTR_ERR(data->bclk);
+
+ larb_nr = of_count_phandle_with_args(dev->of_node,
+ "mediatek,larbs", NULL);
+ if (larb_nr < 0)
+ return larb_nr;
+ data->smi_imu.larb_nr = larb_nr;
+
+ for (i = 0; i < larb_nr; i++) {
+ struct device_node *larbnode;
+ struct platform_device *plarbdev;
+
+ larbnode = of_parse_phandle(dev->of_node, "mediatek,larbs", i);
+ if (!larbnode)
+ return -EINVAL;
+
+ if (!of_device_is_available(larbnode))
+ continue;
+
+ plarbdev = of_find_device_by_node(larbnode);
+ of_node_put(larbnode);
+ if (!plarbdev) {
+ plarbdev = of_platform_device_create(
+ larbnode, NULL,
+ platform_bus_type.dev_root);
+ if (!plarbdev)
+ return -EPROBE_DEFER;
+ }
+ data->smi_imu.larb_imu[i].dev = &plarbdev->dev;
+
+ component_match_add(dev, &match, compare_of, larbnode);
+ }
+
+ platform_set_drvdata(pdev, data);
+
+ ret = mtk_iommu_hw_init(data);
+ if (ret)
+ return ret;
+
+ if (!iommu_present(&platform_bus_type))
+ bus_set_iommu(&platform_bus_type, &mtk_iommu_ops);
+
+ return component_master_add_with_match(dev, &mtk_iommu_com_ops, match);
+}
+
+static int mtk_iommu_remove(struct platform_device *pdev)
+{
+ struct mtk_iommu_data *data = platform_get_drvdata(pdev);
+
+ if (iommu_present(&platform_bus_type))
+ bus_set_iommu(&platform_bus_type, NULL);
+
+ free_io_pgtable_ops(data->m4u_dom->iop);
+ clk_disable_unprepare(data->bclk);
+ devm_free_irq(&pdev->dev, data->irq, data);
+ component_master_del(&pdev->dev, &mtk_iommu_com_ops);
+ return 0;
+}
+
+static int __maybe_unused mtk_iommu_suspend(struct device *dev)
+{
+ struct mtk_iommu_data *data = dev_get_drvdata(dev);
+ struct mtk_iommu_suspend_reg *reg = &data->reg;
+ void __iomem *base = data->base;
+
+ reg->standard_axi_mode = readl_relaxed(base +
+ REG_MMU_STANDARD_AXI_MODE);
+ reg->dcm_dis = readl_relaxed(base + REG_MMU_DCM_DIS);
+ reg->ctrl_reg = readl_relaxed(base + REG_MMU_CTRL_REG);
+ reg->int_control0 = readl_relaxed(base + REG_MMU_INT_CONTROL0);
+ reg->int_main_control = readl_relaxed(base + REG_MMU_INT_MAIN_CONTROL);
+ return 0;
+}
+
+static int __maybe_unused mtk_iommu_resume(struct device *dev)
+{
+ struct mtk_iommu_data *data = dev_get_drvdata(dev);
+ struct mtk_iommu_suspend_reg *reg = &data->reg;
+ void __iomem *base = data->base;
+
+ writel_relaxed(data->m4u_dom->cfg.arm_v7s_cfg.ttbr[0],
+ base + REG_MMU_PT_BASE_ADDR);
+ writel_relaxed(reg->standard_axi_mode,
+ base + REG_MMU_STANDARD_AXI_MODE);
+ writel_relaxed(reg->dcm_dis, base + REG_MMU_DCM_DIS);
+ writel_relaxed(reg->ctrl_reg, base + REG_MMU_CTRL_REG);
+ writel_relaxed(reg->int_control0, base + REG_MMU_INT_CONTROL0);
+ writel_relaxed(reg->int_main_control, base + REG_MMU_INT_MAIN_CONTROL);
+ writel_relaxed(F_MMU_IVRP_PA_SET(data->protect_base),
+ base + REG_MMU_IVRP_PADDR);
+ return 0;
+}
+
+const struct dev_pm_ops mtk_iommu_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(mtk_iommu_suspend, mtk_iommu_resume)
+};
+
+static const struct of_device_id mtk_iommu_of_ids[] = {
+ { .compatible = "mediatek,mt8173-m4u", },
+ {}
+};
+
+static struct platform_driver mtk_iommu_driver = {
+ .probe = mtk_iommu_probe,
+ .remove = mtk_iommu_remove,
+ .driver = {
+ .name = "mtk-iommu",
+ .of_match_table = mtk_iommu_of_ids,
+ .pm = &mtk_iommu_pm_ops,
+ }
+};
+
+static int mtk_iommu_init_fn(struct device_node *np)
+{
+ int ret;
+ struct platform_device *pdev;
+
+ pdev = of_platform_device_create(np, NULL, platform_bus_type.dev_root);
+ if (!pdev)
+ return -ENOMEM;
+
+ ret = platform_driver_register(&mtk_iommu_driver);
+ if (ret) {
+ pr_err("%s: Failed to register driver\n", __func__);
+ return ret;
+ }
+
+ of_iommu_set_ops(np, &mtk_iommu_ops);
+ return 0;
+}
+
+IOMMU_OF_DECLARE(mtkm4u, "mediatek,mt8173-m4u", mtk_iommu_init_fn);
diff --git a/drivers/iommu/of_iommu.c b/drivers/iommu/of_iommu.c
index 60ba238090d9..5fea665af99d 100644
--- a/drivers/iommu/of_iommu.c
+++ b/drivers/iommu/of_iommu.c
@@ -110,6 +110,7 @@ void of_iommu_set_ops(struct device_node *np, struct iommu_ops *ops)
if (WARN_ON(!iommu))
return;
+ of_node_get(np);
INIT_LIST_HEAD(&iommu->list);
iommu->np = np;
iommu->ops = ops;
diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c
index ebf0adb8e7ea..5710a06c3049 100644
--- a/drivers/iommu/rockchip-iommu.c
+++ b/drivers/iommu/rockchip-iommu.c
@@ -86,7 +86,8 @@ struct rk_iommu_domain {
struct rk_iommu {
struct device *dev;
- void __iomem *base;
+ void __iomem **bases;
+ int num_mmu;
int irq;
struct list_head node; /* entry in rk_iommu_domain.iommus */
struct iommu_domain *domain; /* domain to which iommu is attached */
@@ -271,47 +272,70 @@ static u32 rk_iova_page_offset(dma_addr_t iova)
return (u32)(iova & RK_IOVA_PAGE_MASK) >> RK_IOVA_PAGE_SHIFT;
}
-static u32 rk_iommu_read(struct rk_iommu *iommu, u32 offset)
+static u32 rk_iommu_read(void __iomem *base, u32 offset)
{
- return readl(iommu->base + offset);
+ return readl(base + offset);
}
-static void rk_iommu_write(struct rk_iommu *iommu, u32 offset, u32 value)
+static void rk_iommu_write(void __iomem *base, u32 offset, u32 value)
{
- writel(value, iommu->base + offset);
+ writel(value, base + offset);
}
static void rk_iommu_command(struct rk_iommu *iommu, u32 command)
{
- writel(command, iommu->base + RK_MMU_COMMAND);
+ int i;
+
+ for (i = 0; i < iommu->num_mmu; i++)
+ writel(command, iommu->bases[i] + RK_MMU_COMMAND);
}
+static void rk_iommu_base_command(void __iomem *base, u32 command)
+{
+ writel(command, base + RK_MMU_COMMAND);
+}
static void rk_iommu_zap_lines(struct rk_iommu *iommu, dma_addr_t iova,
size_t size)
{
+ int i;
+
dma_addr_t iova_end = iova + size;
/*
* TODO(djkurtz): Figure out when it is more efficient to shootdown the
* entire iotlb rather than iterate over individual iovas.
*/
- for (; iova < iova_end; iova += SPAGE_SIZE)
- rk_iommu_write(iommu, RK_MMU_ZAP_ONE_LINE, iova);
+ for (i = 0; i < iommu->num_mmu; i++)
+ for (; iova < iova_end; iova += SPAGE_SIZE)
+ rk_iommu_write(iommu->bases[i], RK_MMU_ZAP_ONE_LINE, iova);
}
static bool rk_iommu_is_stall_active(struct rk_iommu *iommu)
{
- return rk_iommu_read(iommu, RK_MMU_STATUS) & RK_MMU_STATUS_STALL_ACTIVE;
+ bool active = true;
+ int i;
+
+ for (i = 0; i < iommu->num_mmu; i++)
+ active &= !!(rk_iommu_read(iommu->bases[i], RK_MMU_STATUS) &
+ RK_MMU_STATUS_STALL_ACTIVE);
+
+ return active;
}
static bool rk_iommu_is_paging_enabled(struct rk_iommu *iommu)
{
- return rk_iommu_read(iommu, RK_MMU_STATUS) &
- RK_MMU_STATUS_PAGING_ENABLED;
+ bool enable = true;
+ int i;
+
+ for (i = 0; i < iommu->num_mmu; i++)
+ enable &= !!(rk_iommu_read(iommu->bases[i], RK_MMU_STATUS) &
+ RK_MMU_STATUS_PAGING_ENABLED);
+
+ return enable;
}
static int rk_iommu_enable_stall(struct rk_iommu *iommu)
{
- int ret;
+ int ret, i;
if (rk_iommu_is_stall_active(iommu))
return 0;
@@ -324,15 +348,16 @@ static int rk_iommu_enable_stall(struct rk_iommu *iommu)
ret = rk_wait_for(rk_iommu_is_stall_active(iommu), 1);
if (ret)
- dev_err(iommu->dev, "Enable stall request timed out, status: %#08x\n",
- rk_iommu_read(iommu, RK_MMU_STATUS));
+ for (i = 0; i < iommu->num_mmu; i++)
+ dev_err(iommu->dev, "Enable stall request timed out, status: %#08x\n",
+ rk_iommu_read(iommu->bases[i], RK_MMU_STATUS));
return ret;
}
static int rk_iommu_disable_stall(struct rk_iommu *iommu)
{
- int ret;
+ int ret, i;
if (!rk_iommu_is_stall_active(iommu))
return 0;
@@ -341,15 +366,16 @@ static int rk_iommu_disable_stall(struct rk_iommu *iommu)
ret = rk_wait_for(!rk_iommu_is_stall_active(iommu), 1);
if (ret)
- dev_err(iommu->dev, "Disable stall request timed out, status: %#08x\n",
- rk_iommu_read(iommu, RK_MMU_STATUS));
+ for (i = 0; i < iommu->num_mmu; i++)
+ dev_err(iommu->dev, "Disable stall request timed out, status: %#08x\n",
+ rk_iommu_read(iommu->bases[i], RK_MMU_STATUS));
return ret;
}
static int rk_iommu_enable_paging(struct rk_iommu *iommu)
{
- int ret;
+ int ret, i;
if (rk_iommu_is_paging_enabled(iommu))
return 0;
@@ -358,15 +384,16 @@ static int rk_iommu_enable_paging(struct rk_iommu *iommu)
ret = rk_wait_for(rk_iommu_is_paging_enabled(iommu), 1);
if (ret)
- dev_err(iommu->dev, "Enable paging request timed out, status: %#08x\n",
- rk_iommu_read(iommu, RK_MMU_STATUS));
+ for (i = 0; i < iommu->num_mmu; i++)
+ dev_err(iommu->dev, "Enable paging request timed out, status: %#08x\n",
+ rk_iommu_read(iommu->bases[i], RK_MMU_STATUS));
return ret;
}
static int rk_iommu_disable_paging(struct rk_iommu *iommu)
{
- int ret;
+ int ret, i;
if (!rk_iommu_is_paging_enabled(iommu))
return 0;
@@ -375,41 +402,49 @@ static int rk_iommu_disable_paging(struct rk_iommu *iommu)
ret = rk_wait_for(!rk_iommu_is_paging_enabled(iommu), 1);
if (ret)
- dev_err(iommu->dev, "Disable paging request timed out, status: %#08x\n",
- rk_iommu_read(iommu, RK_MMU_STATUS));
+ for (i = 0; i < iommu->num_mmu; i++)
+ dev_err(iommu->dev, "Disable paging request timed out, status: %#08x\n",
+ rk_iommu_read(iommu->bases[i], RK_MMU_STATUS));
return ret;
}
static int rk_iommu_force_reset(struct rk_iommu *iommu)
{
- int ret;
+ int ret, i;
u32 dte_addr;
/*
* Check if register DTE_ADDR is working by writing DTE_ADDR_DUMMY
* and verifying that upper 5 nybbles are read back.
*/
- rk_iommu_write(iommu, RK_MMU_DTE_ADDR, DTE_ADDR_DUMMY);
+ for (i = 0; i < iommu->num_mmu; i++) {
+ rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR, DTE_ADDR_DUMMY);
- dte_addr = rk_iommu_read(iommu, RK_MMU_DTE_ADDR);
- if (dte_addr != (DTE_ADDR_DUMMY & RK_DTE_PT_ADDRESS_MASK)) {
- dev_err(iommu->dev, "Error during raw reset. MMU_DTE_ADDR is not functioning\n");
- return -EFAULT;
+ dte_addr = rk_iommu_read(iommu->bases[i], RK_MMU_DTE_ADDR);
+ if (dte_addr != (DTE_ADDR_DUMMY & RK_DTE_PT_ADDRESS_MASK)) {
+ dev_err(iommu->dev, "Error during raw reset. MMU_DTE_ADDR is not functioning\n");
+ return -EFAULT;
+ }
}
rk_iommu_command(iommu, RK_MMU_CMD_FORCE_RESET);
- ret = rk_wait_for(rk_iommu_read(iommu, RK_MMU_DTE_ADDR) == 0x00000000,
- FORCE_RESET_TIMEOUT);
- if (ret)
- dev_err(iommu->dev, "FORCE_RESET command timed out\n");
+ for (i = 0; i < iommu->num_mmu; i++) {
+ ret = rk_wait_for(rk_iommu_read(iommu->bases[i], RK_MMU_DTE_ADDR) == 0x00000000,
+ FORCE_RESET_TIMEOUT);
+ if (ret) {
+ dev_err(iommu->dev, "FORCE_RESET command timed out\n");
+ return ret;
+ }
+ }
- return ret;
+ return 0;
}
-static void log_iova(struct rk_iommu *iommu, dma_addr_t iova)
+static void log_iova(struct rk_iommu *iommu, int index, dma_addr_t iova)
{
+ void __iomem *base = iommu->bases[index];
u32 dte_index, pte_index, page_offset;
u32 mmu_dte_addr;
phys_addr_t mmu_dte_addr_phys, dte_addr_phys;
@@ -425,7 +460,7 @@ static void log_iova(struct rk_iommu *iommu, dma_addr_t iova)
pte_index = rk_iova_pte_index(iova);
page_offset = rk_iova_page_offset(iova);
- mmu_dte_addr = rk_iommu_read(iommu, RK_MMU_DTE_ADDR);
+ mmu_dte_addr = rk_iommu_read(base, RK_MMU_DTE_ADDR);
mmu_dte_addr_phys = (phys_addr_t)mmu_dte_addr;
dte_addr_phys = mmu_dte_addr_phys + (4 * dte_index);
@@ -460,51 +495,56 @@ static irqreturn_t rk_iommu_irq(int irq, void *dev_id)
u32 status;
u32 int_status;
dma_addr_t iova;
+ irqreturn_t ret = IRQ_NONE;
+ int i;
- int_status = rk_iommu_read(iommu, RK_MMU_INT_STATUS);
- if (int_status == 0)
- return IRQ_NONE;
+ for (i = 0; i < iommu->num_mmu; i++) {
+ int_status = rk_iommu_read(iommu->bases[i], RK_MMU_INT_STATUS);
+ if (int_status == 0)
+ continue;
- iova = rk_iommu_read(iommu, RK_MMU_PAGE_FAULT_ADDR);
+ ret = IRQ_HANDLED;
+ iova = rk_iommu_read(iommu->bases[i], RK_MMU_PAGE_FAULT_ADDR);
- if (int_status & RK_MMU_IRQ_PAGE_FAULT) {
- int flags;
+ if (int_status & RK_MMU_IRQ_PAGE_FAULT) {
+ int flags;
- status = rk_iommu_read(iommu, RK_MMU_STATUS);
- flags = (status & RK_MMU_STATUS_PAGE_FAULT_IS_WRITE) ?
- IOMMU_FAULT_WRITE : IOMMU_FAULT_READ;
+ status = rk_iommu_read(iommu->bases[i], RK_MMU_STATUS);
+ flags = (status & RK_MMU_STATUS_PAGE_FAULT_IS_WRITE) ?
+ IOMMU_FAULT_WRITE : IOMMU_FAULT_READ;
- dev_err(iommu->dev, "Page fault at %pad of type %s\n",
- &iova,
- (flags == IOMMU_FAULT_WRITE) ? "write" : "read");
+ dev_err(iommu->dev, "Page fault at %pad of type %s\n",
+ &iova,
+ (flags == IOMMU_FAULT_WRITE) ? "write" : "read");
- log_iova(iommu, iova);
+ log_iova(iommu, i, iova);
- /*
- * Report page fault to any installed handlers.
- * Ignore the return code, though, since we always zap cache
- * and clear the page fault anyway.
- */
- if (iommu->domain)
- report_iommu_fault(iommu->domain, iommu->dev, iova,
- flags);
- else
- dev_err(iommu->dev, "Page fault while iommu not attached to domain?\n");
+ /*
+ * Report page fault to any installed handlers.
+ * Ignore the return code, though, since we always zap cache
+ * and clear the page fault anyway.
+ */
+ if (iommu->domain)
+ report_iommu_fault(iommu->domain, iommu->dev, iova,
+ flags);
+ else
+ dev_err(iommu->dev, "Page fault while iommu not attached to domain?\n");
- rk_iommu_command(iommu, RK_MMU_CMD_ZAP_CACHE);
- rk_iommu_command(iommu, RK_MMU_CMD_PAGE_FAULT_DONE);
- }
+ rk_iommu_base_command(iommu->bases[i], RK_MMU_CMD_ZAP_CACHE);
+ rk_iommu_base_command(iommu->bases[i], RK_MMU_CMD_PAGE_FAULT_DONE);
+ }
- if (int_status & RK_MMU_IRQ_BUS_ERROR)
- dev_err(iommu->dev, "BUS_ERROR occurred at %pad\n", &iova);
+ if (int_status & RK_MMU_IRQ_BUS_ERROR)
+ dev_err(iommu->dev, "BUS_ERROR occurred at %pad\n", &iova);
- if (int_status & ~RK_MMU_IRQ_MASK)
- dev_err(iommu->dev, "unexpected int_status: %#08x\n",
- int_status);
+ if (int_status & ~RK_MMU_IRQ_MASK)
+ dev_err(iommu->dev, "unexpected int_status: %#08x\n",
+ int_status);
- rk_iommu_write(iommu, RK_MMU_INT_CLEAR, int_status);
+ rk_iommu_write(iommu->bases[i], RK_MMU_INT_CLEAR, int_status);
+ }
- return IRQ_HANDLED;
+ return ret;
}
static phys_addr_t rk_iommu_iova_to_phys(struct iommu_domain *domain,
@@ -746,7 +786,7 @@ static int rk_iommu_attach_device(struct iommu_domain *domain,
struct rk_iommu *iommu;
struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
unsigned long flags;
- int ret;
+ int ret, i;
phys_addr_t dte_addr;
/*
@@ -773,9 +813,11 @@ static int rk_iommu_attach_device(struct iommu_domain *domain,
return ret;
dte_addr = virt_to_phys(rk_domain->dt);
- rk_iommu_write(iommu, RK_MMU_DTE_ADDR, dte_addr);
- rk_iommu_command(iommu, RK_MMU_CMD_ZAP_CACHE);
- rk_iommu_write(iommu, RK_MMU_INT_MASK, RK_MMU_IRQ_MASK);
+ for (i = 0; i < iommu->num_mmu; i++) {
+ rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR, dte_addr);
+ rk_iommu_command(iommu->bases[i], RK_MMU_CMD_ZAP_CACHE);
+ rk_iommu_write(iommu->bases[i], RK_MMU_INT_MASK, RK_MMU_IRQ_MASK);
+ }
ret = rk_iommu_enable_paging(iommu);
if (ret)
@@ -798,6 +840,7 @@ static void rk_iommu_detach_device(struct iommu_domain *domain,
struct rk_iommu *iommu;
struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
unsigned long flags;
+ int i;
/* Allow 'virtual devices' (eg drm) to detach from domain */
iommu = rk_iommu_from_dev(dev);
@@ -811,8 +854,10 @@ static void rk_iommu_detach_device(struct iommu_domain *domain,
/* Ignore error while disabling, just keep going */
rk_iommu_enable_stall(iommu);
rk_iommu_disable_paging(iommu);
- rk_iommu_write(iommu, RK_MMU_INT_MASK, 0);
- rk_iommu_write(iommu, RK_MMU_DTE_ADDR, 0);
+ for (i = 0; i < iommu->num_mmu; i++) {
+ rk_iommu_write(iommu->bases[i], RK_MMU_INT_MASK, 0);
+ rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR, 0);
+ }
rk_iommu_disable_stall(iommu);
devm_free_irq(dev, iommu->irq, iommu);
@@ -988,6 +1033,7 @@ static int rk_iommu_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct rk_iommu *iommu;
struct resource *res;
+ int i;
iommu = devm_kzalloc(dev, sizeof(*iommu), GFP_KERNEL);
if (!iommu)
@@ -995,11 +1041,21 @@ static int rk_iommu_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, iommu);
iommu->dev = dev;
+ iommu->num_mmu = 0;
+ iommu->bases = devm_kzalloc(dev, sizeof(*iommu->bases) * iommu->num_mmu,
+ GFP_KERNEL);
+ if (!iommu->bases)
+ return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- iommu->base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(iommu->base))
- return PTR_ERR(iommu->base);
+ for (i = 0; i < pdev->num_resources; i++) {
+ res = platform_get_resource(pdev, IORESOURCE_MEM, i);
+ iommu->bases[i] = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(iommu->bases[i]))
+ continue;
+ iommu->num_mmu++;
+ }
+ if (iommu->num_mmu == 0)
+ return PTR_ERR(iommu->bases[0]);
iommu->irq = platform_get_irq(pdev, 0);
if (iommu->irq < 0) {
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
index fb50911b3940..3e124793e224 100644
--- a/drivers/irqchip/Kconfig
+++ b/drivers/irqchip/Kconfig
@@ -32,14 +32,6 @@ config ARM_GIC_V3_ITS
bool
select PCI_MSI_IRQ_DOMAIN
-config HISILICON_IRQ_MBIGEN
- bool "Support mbigen interrupt controller"
- default n
- depends on ARM_GIC_V3 && ARM_GIC_V3_ITS && GENERIC_MSI_IRQ_DOMAIN
- help
- Enable the mbigen interrupt controller used on
- Hisilicon platform.
-
config ARM_NVIC
bool
select IRQ_DOMAIN
@@ -60,6 +52,17 @@ config ARM_VIC_NR
The maximum number of VICs available in the system, for
power management.
+config ARMADA_370_XP_IRQ
+ bool
+ select GENERIC_IRQ_CHIP
+ select PCI_MSI_IRQ_DOMAIN if PCI_MSI
+
+config ALPINE_MSI
+ bool
+ depends on PCI && PCI_MSI
+ select GENERIC_IRQ_CHIP
+ select PCI_MSI_IRQ_DOMAIN
+
config ATMEL_AIC_IRQ
bool
select GENERIC_IRQ_CHIP
@@ -78,6 +81,11 @@ config I8259
bool
select IRQ_DOMAIN
+config BCM6345_L1_IRQ
+ bool
+ select GENERIC_IRQ_CHIP
+ select IRQ_DOMAIN
+
config BCM7038_L1_IRQ
bool
select GENERIC_IRQ_CHIP
@@ -98,6 +106,12 @@ config DW_APB_ICTL
select GENERIC_IRQ_CHIP
select IRQ_DOMAIN
+config HISILICON_IRQ_MBIGEN
+ bool
+ select ARM_GIC_V3
+ select ARM_GIC_V3_ITS
+ select GENERIC_MSI_IRQ_DOMAIN
+
config IMGPDC_IRQ
bool
select GENERIC_IRQ_CHIP
@@ -151,6 +165,11 @@ config ST_IRQCHIP
help
Enables SysCfg Controlled IRQs on STi based platforms.
+config TANGO_IRQ
+ bool
+ select IRQ_DOMAIN
+ select GENERIC_IRQ_CHIP
+
config TB10X_IRQC
bool
select IRQ_DOMAIN
@@ -160,6 +179,7 @@ config TS4800_IRQ
tristate "TS-4800 IRQ controller"
select IRQ_DOMAIN
depends on HAS_IOMEM
+ depends on SOC_IMX51 || COMPILE_TEST
help
Support for the TS-4800 FPGA IRQ controller
@@ -193,6 +213,8 @@ config KEYSTONE_IRQ
config MIPS_GIC
bool
+ select GENERIC_IRQ_IPI
+ select IRQ_DOMAIN_HIERARCHY
select MIPS_CM
config INGENIC_IRQ
@@ -218,3 +240,7 @@ config IRQ_MXS
def_bool y if MACH_ASM9260 || ARCH_MXS
select IRQ_DOMAIN
select STMP_DEVICE
+
+config MVEBU_ODMI
+ bool
+ select GENERIC_MSI_IRQ_DOMAIN
diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile
index 18caacb60d58..b03cfcbbac6b 100644
--- a/drivers/irqchip/Makefile
+++ b/drivers/irqchip/Makefile
@@ -1,11 +1,13 @@
obj-$(CONFIG_IRQCHIP) += irqchip.o
+obj-$(CONFIG_ALPINE_MSI) += irq-alpine-msi.o
+obj-$(CONFIG_ATH79) += irq-ath79-cpu.o
+obj-$(CONFIG_ATH79) += irq-ath79-misc.o
obj-$(CONFIG_ARCH_BCM2835) += irq-bcm2835.o
obj-$(CONFIG_ARCH_BCM2835) += irq-bcm2836.o
obj-$(CONFIG_ARCH_EXYNOS) += exynos-combiner.o
obj-$(CONFIG_ARCH_HIP04) += irq-hip04.o
obj-$(CONFIG_ARCH_MMP) += irq-mmp.o
-obj-$(CONFIG_ARCH_MVEBU) += irq-armada-370-xp.o
obj-$(CONFIG_IRQ_MXS) += irq-mxs.o
obj-$(CONFIG_ARCH_TEGRA) += irq-tegra.o
obj-$(CONFIG_ARCH_S3C24XX) += irq-s3c24xx.o
@@ -28,6 +30,7 @@ obj-$(CONFIG_ARM_GIC_V3_ITS) += irq-gic-v3-its.o irq-gic-v3-its-pci-msi.o irq-g
obj-$(CONFIG_HISILICON_IRQ_MBIGEN) += irq-mbigen.o
obj-$(CONFIG_ARM_NVIC) += irq-nvic.o
obj-$(CONFIG_ARM_VIC) += irq-vic.o
+obj-$(CONFIG_ARMADA_370_XP_IRQ) += irq-armada-370-xp.o
obj-$(CONFIG_ATMEL_AIC_IRQ) += irq-atmel-aic-common.o irq-atmel-aic.o
obj-$(CONFIG_ATMEL_AIC5_IRQ) += irq-atmel-aic-common.o irq-atmel-aic5.o
obj-$(CONFIG_I8259) += irq-i8259.o
@@ -40,12 +43,14 @@ obj-$(CONFIG_VERSATILE_FPGA_IRQ) += irq-versatile-fpga.o
obj-$(CONFIG_ARCH_NSPIRE) += irq-zevio.o
obj-$(CONFIG_ARCH_VT8500) += irq-vt8500.o
obj-$(CONFIG_ST_IRQCHIP) += irq-st.o
+obj-$(CONFIG_TANGO_IRQ) += irq-tango.o
obj-$(CONFIG_TB10X_IRQC) += irq-tb10x.o
obj-$(CONFIG_TS4800_IRQ) += irq-ts4800.o
obj-$(CONFIG_XTENSA) += irq-xtensa-pic.o
obj-$(CONFIG_XTENSA_MX) += irq-xtensa-mx.o
obj-$(CONFIG_IRQ_CROSSBAR) += irq-crossbar.o
obj-$(CONFIG_SOC_VF610) += irq-vf610-mscm-ir.o
+obj-$(CONFIG_BCM6345_L1_IRQ) += irq-bcm6345-l1.o
obj-$(CONFIG_BCM7038_L1_IRQ) += irq-bcm7038-l1.o
obj-$(CONFIG_BCM7120_L2_IRQ) += irq-bcm7120-l2.o
obj-$(CONFIG_BRCMSTB_L2_IRQ) += irq-brcmstb-l2.o
@@ -59,3 +64,4 @@ obj-$(CONFIG_ARCH_SA1100) += irq-sa11x0.o
obj-$(CONFIG_INGENIC_IRQ) += irq-ingenic.o
obj-$(CONFIG_IMX_GPCV2) += irq-imx-gpcv2.o
obj-$(CONFIG_PIC32_EVIC) += irq-pic32-evic.o
+obj-$(CONFIG_MVEBU_ODMI) += irq-mvebu-odmi.o
diff --git a/drivers/irqchip/irq-alpine-msi.c b/drivers/irqchip/irq-alpine-msi.c
new file mode 100644
index 000000000000..25384255b30f
--- /dev/null
+++ b/drivers/irqchip/irq-alpine-msi.c
@@ -0,0 +1,293 @@
+/*
+ * Annapurna Labs MSIX support services
+ *
+ * Copyright (C) 2016, Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * Antoine Tenart <antoine.tenart@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/irqchip.h>
+#include <linux/irqchip/arm-gic.h>
+#include <linux/msi.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_pci.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+
+#include <asm/irq.h>
+#include <asm-generic/msi.h>
+
+/* MSIX message address format: local GIC target */
+#define ALPINE_MSIX_SPI_TARGET_CLUSTER0 BIT(16)
+
+struct alpine_msix_data {
+ spinlock_t msi_map_lock;
+ phys_addr_t addr;
+ u32 spi_first; /* The SGI number that MSIs start */
+ u32 num_spis; /* The number of SGIs for MSIs */
+ unsigned long *msi_map;
+};
+
+static void alpine_msix_mask_msi_irq(struct irq_data *d)
+{
+ pci_msi_mask_irq(d);
+ irq_chip_mask_parent(d);
+}
+
+static void alpine_msix_unmask_msi_irq(struct irq_data *d)
+{
+ pci_msi_unmask_irq(d);
+ irq_chip_unmask_parent(d);
+}
+
+static struct irq_chip alpine_msix_irq_chip = {
+ .name = "MSIx",
+ .irq_mask = alpine_msix_mask_msi_irq,
+ .irq_unmask = alpine_msix_unmask_msi_irq,
+ .irq_eoi = irq_chip_eoi_parent,
+ .irq_set_affinity = irq_chip_set_affinity_parent,
+};
+
+static int alpine_msix_allocate_sgi(struct alpine_msix_data *priv, int num_req)
+{
+ int first;
+
+ spin_lock(&priv->msi_map_lock);
+
+ first = bitmap_find_next_zero_area(priv->msi_map, priv->num_spis, 0,
+ num_req, 0);
+ if (first >= priv->num_spis) {
+ spin_unlock(&priv->msi_map_lock);
+ return -ENOSPC;
+ }
+
+ bitmap_set(priv->msi_map, first, num_req);
+
+ spin_unlock(&priv->msi_map_lock);
+
+ return priv->spi_first + first;
+}
+
+static void alpine_msix_free_sgi(struct alpine_msix_data *priv, unsigned sgi,
+ int num_req)
+{
+ int first = sgi - priv->spi_first;
+
+ spin_lock(&priv->msi_map_lock);
+
+ bitmap_clear(priv->msi_map, first, num_req);
+
+ spin_unlock(&priv->msi_map_lock);
+}
+
+static void alpine_msix_compose_msi_msg(struct irq_data *data,
+ struct msi_msg *msg)
+{
+ struct alpine_msix_data *priv = irq_data_get_irq_chip_data(data);
+ phys_addr_t msg_addr = priv->addr;
+
+ msg_addr |= (data->hwirq << 3);
+
+ msg->address_hi = upper_32_bits(msg_addr);
+ msg->address_lo = lower_32_bits(msg_addr);
+ msg->data = 0;
+}
+
+static struct msi_domain_info alpine_msix_domain_info = {
+ .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
+ MSI_FLAG_PCI_MSIX,
+ .chip = &alpine_msix_irq_chip,
+};
+
+static struct irq_chip middle_irq_chip = {
+ .name = "alpine_msix_middle",
+ .irq_mask = irq_chip_mask_parent,
+ .irq_unmask = irq_chip_unmask_parent,
+ .irq_eoi = irq_chip_eoi_parent,
+ .irq_set_affinity = irq_chip_set_affinity_parent,
+ .irq_compose_msi_msg = alpine_msix_compose_msi_msg,
+};
+
+static int alpine_msix_gic_domain_alloc(struct irq_domain *domain,
+ unsigned int virq, int sgi)
+{
+ struct irq_fwspec fwspec;
+ struct irq_data *d;
+ int ret;
+
+ if (!is_of_node(domain->parent->fwnode))
+ return -EINVAL;
+
+ fwspec.fwnode = domain->parent->fwnode;
+ fwspec.param_count = 3;
+ fwspec.param[0] = 0;
+ fwspec.param[1] = sgi;
+ fwspec.param[2] = IRQ_TYPE_EDGE_RISING;
+
+ ret = irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec);
+ if (ret)
+ return ret;
+
+ d = irq_domain_get_irq_data(domain->parent, virq);
+ d->chip->irq_set_type(d, IRQ_TYPE_EDGE_RISING);
+
+ return 0;
+}
+
+static int alpine_msix_middle_domain_alloc(struct irq_domain *domain,
+ unsigned int virq,
+ unsigned int nr_irqs, void *args)
+{
+ struct alpine_msix_data *priv = domain->host_data;
+ int sgi, err, i;
+
+ sgi = alpine_msix_allocate_sgi(priv, nr_irqs);
+ if (sgi < 0)
+ return sgi;
+
+ for (i = 0; i < nr_irqs; i++) {
+ err = alpine_msix_gic_domain_alloc(domain, virq + i, sgi + i);
+ if (err)
+ goto err_sgi;
+
+ irq_domain_set_hwirq_and_chip(domain, virq + i, sgi + i,
+ &middle_irq_chip, priv);
+ }
+
+ return 0;
+
+err_sgi:
+ while (--i >= 0)
+ irq_domain_free_irqs_parent(domain, virq, i);
+ alpine_msix_free_sgi(priv, sgi, nr_irqs);
+ return err;
+}
+
+static void alpine_msix_middle_domain_free(struct irq_domain *domain,
+ unsigned int virq,
+ unsigned int nr_irqs)
+{
+ struct irq_data *d = irq_domain_get_irq_data(domain, virq);
+ struct alpine_msix_data *priv = irq_data_get_irq_chip_data(d);
+
+ irq_domain_free_irqs_parent(domain, virq, nr_irqs);
+ alpine_msix_free_sgi(priv, d->hwirq, nr_irqs);
+}
+
+static const struct irq_domain_ops alpine_msix_middle_domain_ops = {
+ .alloc = alpine_msix_middle_domain_alloc,
+ .free = alpine_msix_middle_domain_free,
+};
+
+static int alpine_msix_init_domains(struct alpine_msix_data *priv,
+ struct device_node *node)
+{
+ struct irq_domain *middle_domain, *msi_domain, *gic_domain;
+ struct device_node *gic_node;
+
+ gic_node = of_irq_find_parent(node);
+ if (!gic_node) {
+ pr_err("Failed to find the GIC node\n");
+ return -ENODEV;
+ }
+
+ gic_domain = irq_find_host(gic_node);
+ if (!gic_domain) {
+ pr_err("Failed to find the GIC domain\n");
+ return -ENXIO;
+ }
+
+ middle_domain = irq_domain_add_tree(NULL,
+ &alpine_msix_middle_domain_ops,
+ priv);
+ if (!middle_domain) {
+ pr_err("Failed to create the MSIX middle domain\n");
+ return -ENOMEM;
+ }
+
+ middle_domain->parent = gic_domain;
+
+ msi_domain = pci_msi_create_irq_domain(of_node_to_fwnode(node),
+ &alpine_msix_domain_info,
+ middle_domain);
+ if (!msi_domain) {
+ pr_err("Failed to create MSI domain\n");
+ irq_domain_remove(middle_domain);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int alpine_msix_init(struct device_node *node,
+ struct device_node *parent)
+{
+ struct alpine_msix_data *priv;
+ struct resource res;
+ int ret;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ spin_lock_init(&priv->msi_map_lock);
+
+ ret = of_address_to_resource(node, 0, &res);
+ if (ret) {
+ pr_err("Failed to allocate resource\n");
+ goto err_priv;
+ }
+
+ /*
+ * The 20 least significant bits of addr provide direct information
+ * regarding the interrupt destination.
+ *
+ * To select the primary GIC as the target GIC, bits [18:17] must be set
+ * to 0x0. In this case, bit 16 (SPI_TARGET_CLUSTER0) must be set.
+ */
+ priv->addr = res.start & GENMASK_ULL(63,20);
+ priv->addr |= ALPINE_MSIX_SPI_TARGET_CLUSTER0;
+
+ if (of_property_read_u32(node, "al,msi-base-spi", &priv->spi_first)) {
+ pr_err("Unable to parse MSI base\n");
+ ret = -EINVAL;
+ goto err_priv;
+ }
+
+ if (of_property_read_u32(node, "al,msi-num-spis", &priv->num_spis)) {
+ pr_err("Unable to parse MSI numbers\n");
+ ret = -EINVAL;
+ goto err_priv;
+ }
+
+ priv->msi_map = kzalloc(sizeof(*priv->msi_map) * BITS_TO_LONGS(priv->num_spis),
+ GFP_KERNEL);
+ if (!priv->msi_map) {
+ ret = -ENOMEM;
+ goto err_priv;
+ }
+
+ pr_debug("Registering %d msixs, starting at %d\n",
+ priv->num_spis, priv->spi_first);
+
+ ret = alpine_msix_init_domains(priv, node);
+ if (ret)
+ goto err_map;
+
+ return 0;
+
+err_map:
+ kfree(priv->msi_map);
+err_priv:
+ kfree(priv);
+ return ret;
+}
+IRQCHIP_DECLARE(alpine_msix, "al,alpine-msix", alpine_msix_init);
diff --git a/drivers/irqchip/irq-armada-370-xp.c b/drivers/irqchip/irq-armada-370-xp.c
index 3f3a8c3d2175..e7dc6cbda2a1 100644
--- a/drivers/irqchip/irq-armada-370-xp.c
+++ b/drivers/irqchip/irq-armada-370-xp.c
@@ -71,6 +71,7 @@ static u32 doorbell_mask_reg;
static int parent_irq;
#ifdef CONFIG_PCI_MSI
static struct irq_domain *armada_370_xp_msi_domain;
+static struct irq_domain *armada_370_xp_msi_inner_domain;
static DECLARE_BITMAP(msi_used, PCI_MSI_DOORBELL_NR);
static DEFINE_MUTEX(msi_used_lock);
static phys_addr_t msi_doorbell_addr;
@@ -115,127 +116,102 @@ static void armada_370_xp_irq_unmask(struct irq_data *d)
#ifdef CONFIG_PCI_MSI
-static int armada_370_xp_alloc_msi(void)
-{
- int hwirq;
+static struct irq_chip armada_370_xp_msi_irq_chip = {
+ .name = "MPIC MSI",
+ .irq_mask = pci_msi_mask_irq,
+ .irq_unmask = pci_msi_unmask_irq,
+};
- mutex_lock(&msi_used_lock);
- hwirq = find_first_zero_bit(&msi_used, PCI_MSI_DOORBELL_NR);
- if (hwirq >= PCI_MSI_DOORBELL_NR)
- hwirq = -ENOSPC;
- else
- set_bit(hwirq, msi_used);
- mutex_unlock(&msi_used_lock);
+static struct msi_domain_info armada_370_xp_msi_domain_info = {
+ .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
+ MSI_FLAG_MULTI_PCI_MSI),
+ .chip = &armada_370_xp_msi_irq_chip,
+};
- return hwirq;
+static void armada_370_xp_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
+{
+ msg->address_lo = lower_32_bits(msi_doorbell_addr);
+ msg->address_hi = upper_32_bits(msi_doorbell_addr);
+ msg->data = 0xf00 | (data->hwirq + PCI_MSI_DOORBELL_START);
}
-static void armada_370_xp_free_msi(int hwirq)
+static int armada_370_xp_msi_set_affinity(struct irq_data *irq_data,
+ const struct cpumask *mask, bool force)
{
- mutex_lock(&msi_used_lock);
- if (!test_bit(hwirq, msi_used))
- pr_err("trying to free unused MSI#%d\n", hwirq);
- else
- clear_bit(hwirq, msi_used);
- mutex_unlock(&msi_used_lock);
+ return -EINVAL;
}
-static int armada_370_xp_setup_msi_irq(struct msi_controller *chip,
- struct pci_dev *pdev,
- struct msi_desc *desc)
-{
- struct msi_msg msg;
- int virq, hwirq;
+static struct irq_chip armada_370_xp_msi_bottom_irq_chip = {
+ .name = "MPIC MSI",
+ .irq_compose_msi_msg = armada_370_xp_compose_msi_msg,
+ .irq_set_affinity = armada_370_xp_msi_set_affinity,
+};
- /* We support MSI, but not MSI-X */
- if (desc->msi_attrib.is_msix)
- return -EINVAL;
+static int armada_370_xp_msi_alloc(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs, void *args)
+{
+ int hwirq, i;
- hwirq = armada_370_xp_alloc_msi();
- if (hwirq < 0)
- return hwirq;
+ mutex_lock(&msi_used_lock);
- virq = irq_create_mapping(armada_370_xp_msi_domain, hwirq);
- if (!virq) {
- armada_370_xp_free_msi(hwirq);
- return -EINVAL;
+ hwirq = bitmap_find_next_zero_area(msi_used, PCI_MSI_DOORBELL_NR,
+ 0, nr_irqs, 0);
+ if (hwirq >= PCI_MSI_DOORBELL_NR) {
+ mutex_unlock(&msi_used_lock);
+ return -ENOSPC;
}
- irq_set_msi_desc(virq, desc);
-
- msg.address_lo = msi_doorbell_addr;
- msg.address_hi = 0;
- msg.data = 0xf00 | (hwirq + 16);
-
- pci_write_msi_msg(virq, &msg);
- return 0;
-}
+ bitmap_set(msi_used, hwirq, nr_irqs);
+ mutex_unlock(&msi_used_lock);
-static void armada_370_xp_teardown_msi_irq(struct msi_controller *chip,
- unsigned int irq)
-{
- struct irq_data *d = irq_get_irq_data(irq);
- unsigned long hwirq = d->hwirq;
+ for (i = 0; i < nr_irqs; i++) {
+ irq_domain_set_info(domain, virq + i, hwirq + i,
+ &armada_370_xp_msi_bottom_irq_chip,
+ domain->host_data, handle_simple_irq,
+ NULL, NULL);
+ }
- irq_dispose_mapping(irq);
- armada_370_xp_free_msi(hwirq);
+ return hwirq;
}
-static struct irq_chip armada_370_xp_msi_irq_chip = {
- .name = "armada_370_xp_msi_irq",
- .irq_enable = pci_msi_unmask_irq,
- .irq_disable = pci_msi_mask_irq,
- .irq_mask = pci_msi_mask_irq,
- .irq_unmask = pci_msi_unmask_irq,
-};
-
-static int armada_370_xp_msi_map(struct irq_domain *domain, unsigned int virq,
- irq_hw_number_t hw)
+static void armada_370_xp_msi_free(struct irq_domain *domain,
+ unsigned int virq, unsigned int nr_irqs)
{
- irq_set_chip_and_handler(virq, &armada_370_xp_msi_irq_chip,
- handle_simple_irq);
+ struct irq_data *d = irq_domain_get_irq_data(domain, virq);
- return 0;
+ mutex_lock(&msi_used_lock);
+ bitmap_clear(msi_used, d->hwirq, nr_irqs);
+ mutex_unlock(&msi_used_lock);
}
-static const struct irq_domain_ops armada_370_xp_msi_irq_ops = {
- .map = armada_370_xp_msi_map,
+static const struct irq_domain_ops armada_370_xp_msi_domain_ops = {
+ .alloc = armada_370_xp_msi_alloc,
+ .free = armada_370_xp_msi_free,
};
static int armada_370_xp_msi_init(struct device_node *node,
phys_addr_t main_int_phys_base)
{
- struct msi_controller *msi_chip;
u32 reg;
- int ret;
msi_doorbell_addr = main_int_phys_base +
ARMADA_370_XP_SW_TRIG_INT_OFFS;
- msi_chip = kzalloc(sizeof(*msi_chip), GFP_KERNEL);
- if (!msi_chip)
+ armada_370_xp_msi_inner_domain =
+ irq_domain_add_linear(NULL, PCI_MSI_DOORBELL_NR,
+ &armada_370_xp_msi_domain_ops, NULL);
+ if (!armada_370_xp_msi_inner_domain)
return -ENOMEM;
- msi_chip->setup_irq = armada_370_xp_setup_msi_irq;
- msi_chip->teardown_irq = armada_370_xp_teardown_msi_irq;
- msi_chip->of_node = node;
-
armada_370_xp_msi_domain =
- irq_domain_add_linear(NULL, PCI_MSI_DOORBELL_NR,
- &armada_370_xp_msi_irq_ops,
- NULL);
+ pci_msi_create_irq_domain(of_node_to_fwnode(node),
+ &armada_370_xp_msi_domain_info,
+ armada_370_xp_msi_inner_domain);
if (!armada_370_xp_msi_domain) {
- kfree(msi_chip);
+ irq_domain_remove(armada_370_xp_msi_inner_domain);
return -ENOMEM;
}
- ret = of_pci_msi_chip_add(msi_chip);
- if (ret < 0) {
- irq_domain_remove(armada_370_xp_msi_domain);
- kfree(msi_chip);
- return ret;
- }
-
reg = readl(per_cpu_int_base + ARMADA_370_XP_IN_DRBEL_MSK_OFFS)
| PCI_MSI_DOORBELL_MASK;
@@ -280,7 +256,7 @@ static int armada_xp_set_affinity(struct irq_data *d,
#endif
static struct irq_chip armada_370_xp_irq_chip = {
- .name = "armada_370_xp_irq",
+ .name = "MPIC",
.irq_mask = armada_370_xp_irq_mask,
.irq_mask_ack = armada_370_xp_irq_mask,
.irq_unmask = armada_370_xp_irq_unmask,
@@ -427,12 +403,12 @@ static void armada_370_xp_handle_msi_irq(struct pt_regs *regs, bool is_chained)
continue;
if (is_chained) {
- irq = irq_find_mapping(armada_370_xp_msi_domain,
- msinr - 16);
+ irq = irq_find_mapping(armada_370_xp_msi_inner_domain,
+ msinr - PCI_MSI_DOORBELL_START);
generic_handle_irq(irq);
} else {
- irq = msinr - 16;
- handle_domain_irq(armada_370_xp_msi_domain,
+ irq = msinr - PCI_MSI_DOORBELL_START;
+ handle_domain_irq(armada_370_xp_msi_inner_domain,
irq, regs);
}
}
@@ -604,8 +580,8 @@ static int __init armada_370_xp_mpic_of_init(struct device_node *node,
armada_370_xp_mpic_domain =
irq_domain_add_linear(node, nr_irqs,
&armada_370_xp_mpic_irq_ops, NULL);
-
BUG_ON(!armada_370_xp_mpic_domain);
+ armada_370_xp_mpic_domain->bus_token = DOMAIN_BUS_WIRED;
/* Setup for the boot CPU */
armada_xp_mpic_perf_init();
diff --git a/drivers/irqchip/irq-ath79-cpu.c b/drivers/irqchip/irq-ath79-cpu.c
new file mode 100644
index 000000000000..befe93c5a51a
--- /dev/null
+++ b/drivers/irqchip/irq-ath79-cpu.c
@@ -0,0 +1,97 @@
+/*
+ * Atheros AR71xx/AR724x/AR913x specific interrupt handling
+ *
+ * Copyright (C) 2015 Alban Bedel <albeu@free.fr>
+ * Copyright (C) 2010-2011 Jaiganesh Narayanan <jnarayanan@atheros.com>
+ * Copyright (C) 2008-2011 Gabor Juhos <juhosg@openwrt.org>
+ * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
+ *
+ * Parts of this file are based on Atheros' 2.6.15/2.6.31 BSP
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/irqchip.h>
+#include <linux/of.h>
+
+#include <asm/irq_cpu.h>
+#include <asm/mach-ath79/ath79.h>
+
+/*
+ * The IP2/IP3 lines are tied to a PCI/WMAC/USB device. Drivers for
+ * these devices typically allocate coherent DMA memory, however the
+ * DMA controller may still have some unsynchronized data in the FIFO.
+ * Issue a flush in the handlers to ensure that the driver sees
+ * the update.
+ *
+ * This array map the interrupt lines to the DDR write buffer channels.
+ */
+
+static unsigned irq_wb_chan[8] = {
+ -1, -1, -1, -1, -1, -1, -1, -1,
+};
+
+asmlinkage void plat_irq_dispatch(void)
+{
+ unsigned long pending;
+ int irq;
+
+ pending = read_c0_status() & read_c0_cause() & ST0_IM;
+
+ if (!pending) {
+ spurious_interrupt();
+ return;
+ }
+
+ pending >>= CAUSEB_IP;
+ while (pending) {
+ irq = fls(pending) - 1;
+ if (irq < ARRAY_SIZE(irq_wb_chan) && irq_wb_chan[irq] != -1)
+ ath79_ddr_wb_flush(irq_wb_chan[irq]);
+ do_IRQ(MIPS_CPU_IRQ_BASE + irq);
+ pending &= ~BIT(irq);
+ }
+}
+
+static int __init ar79_cpu_intc_of_init(
+ struct device_node *node, struct device_node *parent)
+{
+ int err, i, count;
+
+ /* Fill the irq_wb_chan table */
+ count = of_count_phandle_with_args(
+ node, "qca,ddr-wb-channels", "#qca,ddr-wb-channel-cells");
+
+ for (i = 0; i < count; i++) {
+ struct of_phandle_args args;
+ u32 irq = i;
+
+ of_property_read_u32_index(
+ node, "qca,ddr-wb-channel-interrupts", i, &irq);
+ if (irq >= ARRAY_SIZE(irq_wb_chan))
+ continue;
+
+ err = of_parse_phandle_with_args(
+ node, "qca,ddr-wb-channels",
+ "#qca,ddr-wb-channel-cells",
+ i, &args);
+ if (err)
+ return err;
+
+ irq_wb_chan[irq] = args.args[0];
+ }
+
+ return mips_cpu_irq_of_init(node, parent);
+}
+IRQCHIP_DECLARE(ar79_cpu_intc, "qca,ar7100-cpu-intc",
+ ar79_cpu_intc_of_init);
+
+void __init ath79_cpu_irq_init(unsigned irq_wb_chan2, unsigned irq_wb_chan3)
+{
+ irq_wb_chan[2] = irq_wb_chan2;
+ irq_wb_chan[3] = irq_wb_chan3;
+ mips_cpu_irq_init();
+}
diff --git a/drivers/irqchip/irq-ath79-misc.c b/drivers/irqchip/irq-ath79-misc.c
new file mode 100644
index 000000000000..aa7290784636
--- /dev/null
+++ b/drivers/irqchip/irq-ath79-misc.c
@@ -0,0 +1,189 @@
+/*
+ * Atheros AR71xx/AR724x/AR913x MISC interrupt controller
+ *
+ * Copyright (C) 2015 Alban Bedel <albeu@free.fr>
+ * Copyright (C) 2010-2011 Jaiganesh Narayanan <jnarayanan@atheros.com>
+ * Copyright (C) 2008-2011 Gabor Juhos <juhosg@openwrt.org>
+ * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
+ *
+ * Parts of this file are based on Atheros' 2.6.15/2.6.31 BSP
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ */
+
+#include <linux/irqchip.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+
+#define AR71XX_RESET_REG_MISC_INT_STATUS 0
+#define AR71XX_RESET_REG_MISC_INT_ENABLE 4
+
+#define ATH79_MISC_IRQ_COUNT 32
+
+static void ath79_misc_irq_handler(struct irq_desc *desc)
+{
+ struct irq_domain *domain = irq_desc_get_handler_data(desc);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ void __iomem *base = domain->host_data;
+ u32 pending;
+
+ chained_irq_enter(chip, desc);
+
+ pending = __raw_readl(base + AR71XX_RESET_REG_MISC_INT_STATUS) &
+ __raw_readl(base + AR71XX_RESET_REG_MISC_INT_ENABLE);
+
+ if (!pending) {
+ spurious_interrupt();
+ chained_irq_exit(chip, desc);
+ return;
+ }
+
+ while (pending) {
+ int bit = __ffs(pending);
+
+ generic_handle_irq(irq_linear_revmap(domain, bit));
+ pending &= ~BIT(bit);
+ }
+
+ chained_irq_exit(chip, desc);
+}
+
+static void ar71xx_misc_irq_unmask(struct irq_data *d)
+{
+ void __iomem *base = irq_data_get_irq_chip_data(d);
+ unsigned int irq = d->hwirq;
+ u32 t;
+
+ t = __raw_readl(base + AR71XX_RESET_REG_MISC_INT_ENABLE);
+ __raw_writel(t | BIT(irq), base + AR71XX_RESET_REG_MISC_INT_ENABLE);
+
+ /* flush write */
+ __raw_readl(base + AR71XX_RESET_REG_MISC_INT_ENABLE);
+}
+
+static void ar71xx_misc_irq_mask(struct irq_data *d)
+{
+ void __iomem *base = irq_data_get_irq_chip_data(d);
+ unsigned int irq = d->hwirq;
+ u32 t;
+
+ t = __raw_readl(base + AR71XX_RESET_REG_MISC_INT_ENABLE);
+ __raw_writel(t & ~BIT(irq), base + AR71XX_RESET_REG_MISC_INT_ENABLE);
+
+ /* flush write */
+ __raw_readl(base + AR71XX_RESET_REG_MISC_INT_ENABLE);
+}
+
+static void ar724x_misc_irq_ack(struct irq_data *d)
+{
+ void __iomem *base = irq_data_get_irq_chip_data(d);
+ unsigned int irq = d->hwirq;
+ u32 t;
+
+ t = __raw_readl(base + AR71XX_RESET_REG_MISC_INT_STATUS);
+ __raw_writel(t & ~BIT(irq), base + AR71XX_RESET_REG_MISC_INT_STATUS);
+
+ /* flush write */
+ __raw_readl(base + AR71XX_RESET_REG_MISC_INT_STATUS);
+}
+
+static struct irq_chip ath79_misc_irq_chip = {
+ .name = "MISC",
+ .irq_unmask = ar71xx_misc_irq_unmask,
+ .irq_mask = ar71xx_misc_irq_mask,
+};
+
+static int misc_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw)
+{
+ irq_set_chip_and_handler(irq, &ath79_misc_irq_chip, handle_level_irq);
+ irq_set_chip_data(irq, d->host_data);
+ return 0;
+}
+
+static const struct irq_domain_ops misc_irq_domain_ops = {
+ .xlate = irq_domain_xlate_onecell,
+ .map = misc_map,
+};
+
+static void __init ath79_misc_intc_domain_init(
+ struct irq_domain *domain, int irq)
+{
+ void __iomem *base = domain->host_data;
+
+ /* Disable and clear all interrupts */
+ __raw_writel(0, base + AR71XX_RESET_REG_MISC_INT_ENABLE);
+ __raw_writel(0, base + AR71XX_RESET_REG_MISC_INT_STATUS);
+
+ irq_set_chained_handler_and_data(irq, ath79_misc_irq_handler, domain);
+}
+
+static int __init ath79_misc_intc_of_init(
+ struct device_node *node, struct device_node *parent)
+{
+ struct irq_domain *domain;
+ void __iomem *base;
+ int irq;
+
+ irq = irq_of_parse_and_map(node, 0);
+ if (!irq) {
+ pr_err("Failed to get MISC IRQ\n");
+ return -EINVAL;
+ }
+
+ base = of_iomap(node, 0);
+ if (!base) {
+ pr_err("Failed to get MISC IRQ registers\n");
+ return -ENOMEM;
+ }
+
+ domain = irq_domain_add_linear(node, ATH79_MISC_IRQ_COUNT,
+ &misc_irq_domain_ops, base);
+ if (!domain) {
+ pr_err("Failed to add MISC irqdomain\n");
+ return -EINVAL;
+ }
+
+ ath79_misc_intc_domain_init(domain, irq);
+ return 0;
+}
+
+static int __init ar7100_misc_intc_of_init(
+ struct device_node *node, struct device_node *parent)
+{
+ ath79_misc_irq_chip.irq_mask_ack = ar71xx_misc_irq_mask;
+ return ath79_misc_intc_of_init(node, parent);
+}
+
+IRQCHIP_DECLARE(ar7100_misc_intc, "qca,ar7100-misc-intc",
+ ar7100_misc_intc_of_init);
+
+static int __init ar7240_misc_intc_of_init(
+ struct device_node *node, struct device_node *parent)
+{
+ ath79_misc_irq_chip.irq_ack = ar724x_misc_irq_ack;
+ return ath79_misc_intc_of_init(node, parent);
+}
+
+IRQCHIP_DECLARE(ar7240_misc_intc, "qca,ar7240-misc-intc",
+ ar7240_misc_intc_of_init);
+
+void __init ath79_misc_irq_init(void __iomem *regs, int irq,
+ int irq_base, bool is_ar71xx)
+{
+ struct irq_domain *domain;
+
+ if (is_ar71xx)
+ ath79_misc_irq_chip.irq_mask_ack = ar71xx_misc_irq_mask;
+ else
+ ath79_misc_irq_chip.irq_ack = ar724x_misc_irq_ack;
+
+ domain = irq_domain_add_legacy(NULL, ATH79_MISC_IRQ_COUNT,
+ irq_base, 0, &misc_irq_domain_ops, regs);
+ if (!domain)
+ panic("Failed to create MISC irqdomain");
+
+ ath79_misc_intc_domain_init(domain, irq);
+}
diff --git a/drivers/irqchip/irq-atmel-aic-common.c b/drivers/irqchip/irq-atmel-aic-common.c
index 37199b9b2cfa..28b26c80f4cf 100644
--- a/drivers/irqchip/irq-atmel-aic-common.c
+++ b/drivers/irqchip/irq-atmel-aic-common.c
@@ -80,16 +80,10 @@ int aic_common_set_type(struct irq_data *d, unsigned type, unsigned *val)
return 0;
}
-int aic_common_set_priority(int priority, unsigned *val)
+void aic_common_set_priority(int priority, unsigned *val)
{
- if (priority < AT91_AIC_IRQ_MIN_PRIORITY ||
- priority > AT91_AIC_IRQ_MAX_PRIORITY)
- return -EINVAL;
-
*val &= ~AT91_AIC_PRIOR;
*val |= priority;
-
- return 0;
}
int aic_common_irq_domain_xlate(struct irq_domain *d,
@@ -193,7 +187,7 @@ void __init aic_common_rtt_irq_fixup(struct device_node *root)
}
}
-void __init aic_common_irq_fixup(const struct of_device_id *matches)
+static void __init aic_common_irq_fixup(const struct of_device_id *matches)
{
struct device_node *root = of_find_node_by_path("/");
const struct of_device_id *match;
@@ -214,7 +208,8 @@ void __init aic_common_irq_fixup(const struct of_device_id *matches)
struct irq_domain *__init aic_common_of_init(struct device_node *node,
const struct irq_domain_ops *ops,
- const char *name, int nirqs)
+ const char *name, int nirqs,
+ const struct of_device_id *matches)
{
struct irq_chip_generic *gc;
struct irq_domain *domain;
@@ -264,6 +259,7 @@ struct irq_domain *__init aic_common_of_init(struct device_node *node,
}
aic_common_ext_irq_of_init(domain);
+ aic_common_irq_fixup(matches);
return domain;
diff --git a/drivers/irqchip/irq-atmel-aic-common.h b/drivers/irqchip/irq-atmel-aic-common.h
index 603f0a9d5411..af60376d50de 100644
--- a/drivers/irqchip/irq-atmel-aic-common.h
+++ b/drivers/irqchip/irq-atmel-aic-common.h
@@ -19,7 +19,7 @@
int aic_common_set_type(struct irq_data *d, unsigned type, unsigned *val);
-int aic_common_set_priority(int priority, unsigned *val);
+void aic_common_set_priority(int priority, unsigned *val);
int aic_common_irq_domain_xlate(struct irq_domain *d,
struct device_node *ctrlr,
@@ -30,12 +30,11 @@ int aic_common_irq_domain_xlate(struct irq_domain *d,
struct irq_domain *__init aic_common_of_init(struct device_node *node,
const struct irq_domain_ops *ops,
- const char *name, int nirqs);
+ const char *name, int nirqs,
+ const struct of_device_id *matches);
void __init aic_common_rtc_irq_fixup(struct device_node *root);
void __init aic_common_rtt_irq_fixup(struct device_node *root);
-void __init aic_common_irq_fixup(const struct of_device_id *matches);
-
#endif /* __IRQ_ATMEL_AIC_COMMON_H */
diff --git a/drivers/irqchip/irq-atmel-aic.c b/drivers/irqchip/irq-atmel-aic.c
index 8a0c7f288198..112e17c2768b 100644
--- a/drivers/irqchip/irq-atmel-aic.c
+++ b/drivers/irqchip/irq-atmel-aic.c
@@ -196,9 +196,8 @@ static int aic_irq_domain_xlate(struct irq_domain *d,
irq_gc_lock(gc);
smr = irq_reg_readl(gc, AT91_AIC_SMR(*out_hwirq));
- ret = aic_common_set_priority(intspec[2], &smr);
- if (!ret)
- irq_reg_writel(gc, smr, AT91_AIC_SMR(*out_hwirq));
+ aic_common_set_priority(intspec[2], &smr);
+ irq_reg_writel(gc, smr, AT91_AIC_SMR(*out_hwirq));
irq_gc_unlock(gc);
return ret;
@@ -248,12 +247,10 @@ static int __init aic_of_init(struct device_node *node,
return -EEXIST;
domain = aic_common_of_init(node, &aic_irq_ops, "atmel-aic",
- NR_AIC_IRQS);
+ NR_AIC_IRQS, aic_irq_fixups);
if (IS_ERR(domain))
return PTR_ERR(domain);
- aic_common_irq_fixup(aic_irq_fixups);
-
aic_domain = domain;
gc = irq_get_domain_generic_chip(domain, 0);
diff --git a/drivers/irqchip/irq-atmel-aic5.c b/drivers/irqchip/irq-atmel-aic5.c
index 62bb840c613f..4f0d068e1abe 100644
--- a/drivers/irqchip/irq-atmel-aic5.c
+++ b/drivers/irqchip/irq-atmel-aic5.c
@@ -272,9 +272,8 @@ static int aic5_irq_domain_xlate(struct irq_domain *d,
irq_gc_lock(bgc);
irq_reg_writel(bgc, *out_hwirq, AT91_AIC5_SSR);
smr = irq_reg_readl(bgc, AT91_AIC5_SMR);
- ret = aic_common_set_priority(intspec[2], &smr);
- if (!ret)
- irq_reg_writel(bgc, intspec[2] | smr, AT91_AIC5_SMR);
+ aic_common_set_priority(intspec[2], &smr);
+ irq_reg_writel(bgc, smr, AT91_AIC5_SMR);
irq_gc_unlock(bgc);
return ret;
@@ -312,12 +311,10 @@ static int __init aic5_of_init(struct device_node *node,
return -EEXIST;
domain = aic_common_of_init(node, &aic5_irq_ops, "atmel-aic5",
- nirqs);
+ nirqs, aic5_irq_fixups);
if (IS_ERR(domain))
return PTR_ERR(domain);
- aic_common_irq_fixup(aic5_irq_fixups);
-
aic5_domain = domain;
nchips = aic5_domain->revmap_size / 32;
for (i = 0; i < nchips; i++) {
diff --git a/drivers/irqchip/irq-bcm2836.c b/drivers/irqchip/irq-bcm2836.c
index 963065a0d774..b6e950d4782a 100644
--- a/drivers/irqchip/irq-bcm2836.c
+++ b/drivers/irqchip/irq-bcm2836.c
@@ -229,7 +229,6 @@ int __init bcm2836_smp_boot_secondary(unsigned int cpu,
unsigned long secondary_startup_phys =
(unsigned long)virt_to_phys((void *)secondary_startup);
- dsb();
writel(secondary_startup_phys,
intc.base + LOCAL_MAILBOX3_SET0 + 16 * cpu);
diff --git a/drivers/irqchip/irq-bcm6345-l1.c b/drivers/irqchip/irq-bcm6345-l1.c
new file mode 100644
index 000000000000..b844c89a9506
--- /dev/null
+++ b/drivers/irqchip/irq-bcm6345-l1.c
@@ -0,0 +1,364 @@
+/*
+ * Broadcom BCM6345 style Level 1 interrupt controller driver
+ *
+ * Copyright (C) 2014 Broadcom Corporation
+ * Copyright 2015 Simon Arlott
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This is based on the BCM7038 (which supports SMP) but with a single
+ * enable register instead of separate mask/set/clear registers.
+ *
+ * The BCM3380 has a similar mask/status register layout, but each pair
+ * of words is at separate locations (and SMP is not supported).
+ *
+ * ENABLE/STATUS words are packed next to each other for each CPU:
+ *
+ * BCM6368:
+ * 0x1000_0020: CPU0_W0_ENABLE
+ * 0x1000_0024: CPU0_W1_ENABLE
+ * 0x1000_0028: CPU0_W0_STATUS IRQs 31-63
+ * 0x1000_002c: CPU0_W1_STATUS IRQs 0-31
+ * 0x1000_0030: CPU1_W0_ENABLE
+ * 0x1000_0034: CPU1_W1_ENABLE
+ * 0x1000_0038: CPU1_W0_STATUS IRQs 31-63
+ * 0x1000_003c: CPU1_W1_STATUS IRQs 0-31
+ *
+ * BCM63168:
+ * 0x1000_0020: CPU0_W0_ENABLE
+ * 0x1000_0024: CPU0_W1_ENABLE
+ * 0x1000_0028: CPU0_W2_ENABLE
+ * 0x1000_002c: CPU0_W3_ENABLE
+ * 0x1000_0030: CPU0_W0_STATUS IRQs 96-127
+ * 0x1000_0034: CPU0_W1_STATUS IRQs 64-95
+ * 0x1000_0038: CPU0_W2_STATUS IRQs 32-63
+ * 0x1000_003c: CPU0_W3_STATUS IRQs 0-31
+ * 0x1000_0040: CPU1_W0_ENABLE
+ * 0x1000_0044: CPU1_W1_ENABLE
+ * 0x1000_0048: CPU1_W2_ENABLE
+ * 0x1000_004c: CPU1_W3_ENABLE
+ * 0x1000_0050: CPU1_W0_STATUS IRQs 96-127
+ * 0x1000_0054: CPU1_W1_STATUS IRQs 64-95
+ * 0x1000_0058: CPU1_W2_STATUS IRQs 32-63
+ * 0x1000_005c: CPU1_W3_STATUS IRQs 0-31
+ *
+ * IRQs are numbered in CPU native endian order
+ * (which is big-endian in these examples)
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/bitops.h>
+#include <linux/cpumask.h>
+#include <linux/kconfig.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/smp.h>
+#include <linux/types.h>
+#include <linux/irqchip.h>
+#include <linux/irqchip/chained_irq.h>
+
+#define IRQS_PER_WORD 32
+#define REG_BYTES_PER_IRQ_WORD (sizeof(u32) * 2)
+
+struct bcm6345_l1_cpu;
+
+struct bcm6345_l1_chip {
+ raw_spinlock_t lock;
+ unsigned int n_words;
+ struct irq_domain *domain;
+ struct cpumask cpumask;
+ struct bcm6345_l1_cpu *cpus[NR_CPUS];
+};
+
+struct bcm6345_l1_cpu {
+ void __iomem *map_base;
+ unsigned int parent_irq;
+ u32 enable_cache[];
+};
+
+static inline unsigned int reg_enable(struct bcm6345_l1_chip *intc,
+ unsigned int word)
+{
+#ifdef __BIG_ENDIAN
+ return (1 * intc->n_words - word - 1) * sizeof(u32);
+#else
+ return (0 * intc->n_words + word) * sizeof(u32);
+#endif
+}
+
+static inline unsigned int reg_status(struct bcm6345_l1_chip *intc,
+ unsigned int word)
+{
+#ifdef __BIG_ENDIAN
+ return (2 * intc->n_words - word - 1) * sizeof(u32);
+#else
+ return (1 * intc->n_words + word) * sizeof(u32);
+#endif
+}
+
+static inline unsigned int cpu_for_irq(struct bcm6345_l1_chip *intc,
+ struct irq_data *d)
+{
+ return cpumask_first_and(&intc->cpumask, irq_data_get_affinity_mask(d));
+}
+
+static void bcm6345_l1_irq_handle(struct irq_desc *desc)
+{
+ struct bcm6345_l1_chip *intc = irq_desc_get_handler_data(desc);
+ struct bcm6345_l1_cpu *cpu;
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ unsigned int idx;
+
+#ifdef CONFIG_SMP
+ cpu = intc->cpus[cpu_logical_map(smp_processor_id())];
+#else
+ cpu = intc->cpus[0];
+#endif
+
+ chained_irq_enter(chip, desc);
+
+ for (idx = 0; idx < intc->n_words; idx++) {
+ int base = idx * IRQS_PER_WORD;
+ unsigned long pending;
+ irq_hw_number_t hwirq;
+ unsigned int irq;
+
+ pending = __raw_readl(cpu->map_base + reg_status(intc, idx));
+ pending &= __raw_readl(cpu->map_base + reg_enable(intc, idx));
+
+ for_each_set_bit(hwirq, &pending, IRQS_PER_WORD) {
+ irq = irq_linear_revmap(intc->domain, base + hwirq);
+ if (irq)
+ do_IRQ(irq);
+ else
+ spurious_interrupt();
+ }
+ }
+
+ chained_irq_exit(chip, desc);
+}
+
+static inline void __bcm6345_l1_unmask(struct irq_data *d)
+{
+ struct bcm6345_l1_chip *intc = irq_data_get_irq_chip_data(d);
+ u32 word = d->hwirq / IRQS_PER_WORD;
+ u32 mask = BIT(d->hwirq % IRQS_PER_WORD);
+ unsigned int cpu_idx = cpu_for_irq(intc, d);
+
+ intc->cpus[cpu_idx]->enable_cache[word] |= mask;
+ __raw_writel(intc->cpus[cpu_idx]->enable_cache[word],
+ intc->cpus[cpu_idx]->map_base + reg_enable(intc, word));
+}
+
+static inline void __bcm6345_l1_mask(struct irq_data *d)
+{
+ struct bcm6345_l1_chip *intc = irq_data_get_irq_chip_data(d);
+ u32 word = d->hwirq / IRQS_PER_WORD;
+ u32 mask = BIT(d->hwirq % IRQS_PER_WORD);
+ unsigned int cpu_idx = cpu_for_irq(intc, d);
+
+ intc->cpus[cpu_idx]->enable_cache[word] &= ~mask;
+ __raw_writel(intc->cpus[cpu_idx]->enable_cache[word],
+ intc->cpus[cpu_idx]->map_base + reg_enable(intc, word));
+}
+
+static void bcm6345_l1_unmask(struct irq_data *d)
+{
+ struct bcm6345_l1_chip *intc = irq_data_get_irq_chip_data(d);
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&intc->lock, flags);
+ __bcm6345_l1_unmask(d);
+ raw_spin_unlock_irqrestore(&intc->lock, flags);
+}
+
+static void bcm6345_l1_mask(struct irq_data *d)
+{
+ struct bcm6345_l1_chip *intc = irq_data_get_irq_chip_data(d);
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&intc->lock, flags);
+ __bcm6345_l1_mask(d);
+ raw_spin_unlock_irqrestore(&intc->lock, flags);
+}
+
+static int bcm6345_l1_set_affinity(struct irq_data *d,
+ const struct cpumask *dest,
+ bool force)
+{
+ struct bcm6345_l1_chip *intc = irq_data_get_irq_chip_data(d);
+ u32 word = d->hwirq / IRQS_PER_WORD;
+ u32 mask = BIT(d->hwirq % IRQS_PER_WORD);
+ unsigned int old_cpu = cpu_for_irq(intc, d);
+ unsigned int new_cpu;
+ struct cpumask valid;
+ unsigned long flags;
+ bool enabled;
+
+ if (!cpumask_and(&valid, &intc->cpumask, dest))
+ return -EINVAL;
+
+ new_cpu = cpumask_any_and(&valid, cpu_online_mask);
+ if (new_cpu >= nr_cpu_ids)
+ return -EINVAL;
+
+ dest = cpumask_of(new_cpu);
+
+ raw_spin_lock_irqsave(&intc->lock, flags);
+ if (old_cpu != new_cpu) {
+ enabled = intc->cpus[old_cpu]->enable_cache[word] & mask;
+ if (enabled)
+ __bcm6345_l1_mask(d);
+ cpumask_copy(irq_data_get_affinity_mask(d), dest);
+ if (enabled)
+ __bcm6345_l1_unmask(d);
+ } else {
+ cpumask_copy(irq_data_get_affinity_mask(d), dest);
+ }
+ raw_spin_unlock_irqrestore(&intc->lock, flags);
+
+ return IRQ_SET_MASK_OK_NOCOPY;
+}
+
+static int __init bcm6345_l1_init_one(struct device_node *dn,
+ unsigned int idx,
+ struct bcm6345_l1_chip *intc)
+{
+ struct resource res;
+ resource_size_t sz;
+ struct bcm6345_l1_cpu *cpu;
+ unsigned int i, n_words;
+
+ if (of_address_to_resource(dn, idx, &res))
+ return -EINVAL;
+ sz = resource_size(&res);
+ n_words = sz / REG_BYTES_PER_IRQ_WORD;
+
+ if (!intc->n_words)
+ intc->n_words = n_words;
+ else if (intc->n_words != n_words)
+ return -EINVAL;
+
+ cpu = intc->cpus[idx] = kzalloc(sizeof(*cpu) + n_words * sizeof(u32),
+ GFP_KERNEL);
+ if (!cpu)
+ return -ENOMEM;
+
+ cpu->map_base = ioremap(res.start, sz);
+ if (!cpu->map_base)
+ return -ENOMEM;
+
+ for (i = 0; i < n_words; i++) {
+ cpu->enable_cache[i] = 0;
+ __raw_writel(0, cpu->map_base + reg_enable(intc, i));
+ }
+
+ cpu->parent_irq = irq_of_parse_and_map(dn, idx);
+ if (!cpu->parent_irq) {
+ pr_err("failed to map parent interrupt %d\n", cpu->parent_irq);
+ return -EINVAL;
+ }
+ irq_set_chained_handler_and_data(cpu->parent_irq,
+ bcm6345_l1_irq_handle, intc);
+
+ return 0;
+}
+
+static struct irq_chip bcm6345_l1_irq_chip = {
+ .name = "bcm6345-l1",
+ .irq_mask = bcm6345_l1_mask,
+ .irq_unmask = bcm6345_l1_unmask,
+ .irq_set_affinity = bcm6345_l1_set_affinity,
+};
+
+static int bcm6345_l1_map(struct irq_domain *d, unsigned int virq,
+ irq_hw_number_t hw_irq)
+{
+ irq_set_chip_and_handler(virq,
+ &bcm6345_l1_irq_chip, handle_percpu_irq);
+ irq_set_chip_data(virq, d->host_data);
+ return 0;
+}
+
+static const struct irq_domain_ops bcm6345_l1_domain_ops = {
+ .xlate = irq_domain_xlate_onecell,
+ .map = bcm6345_l1_map,
+};
+
+static int __init bcm6345_l1_of_init(struct device_node *dn,
+ struct device_node *parent)
+{
+ struct bcm6345_l1_chip *intc;
+ unsigned int idx;
+ int ret;
+
+ intc = kzalloc(sizeof(*intc), GFP_KERNEL);
+ if (!intc)
+ return -ENOMEM;
+
+ for_each_possible_cpu(idx) {
+ ret = bcm6345_l1_init_one(dn, idx, intc);
+ if (ret)
+ pr_err("failed to init intc L1 for cpu %d: %d\n",
+ idx, ret);
+ else
+ cpumask_set_cpu(idx, &intc->cpumask);
+ }
+
+ if (!cpumask_weight(&intc->cpumask)) {
+ ret = -ENODEV;
+ goto out_free;
+ }
+
+ raw_spin_lock_init(&intc->lock);
+
+ intc->domain = irq_domain_add_linear(dn, IRQS_PER_WORD * intc->n_words,
+ &bcm6345_l1_domain_ops,
+ intc);
+ if (!intc->domain) {
+ ret = -ENOMEM;
+ goto out_unmap;
+ }
+
+ pr_info("registered BCM6345 L1 intc (IRQs: %d)\n",
+ IRQS_PER_WORD * intc->n_words);
+ for_each_cpu(idx, &intc->cpumask) {
+ struct bcm6345_l1_cpu *cpu = intc->cpus[idx];
+
+ pr_info(" CPU%u at MMIO 0x%p (irq = %d)\n", idx,
+ cpu->map_base, cpu->parent_irq);
+ }
+
+ return 0;
+
+out_unmap:
+ for_each_possible_cpu(idx) {
+ struct bcm6345_l1_cpu *cpu = intc->cpus[idx];
+
+ if (cpu) {
+ if (cpu->map_base)
+ iounmap(cpu->map_base);
+ kfree(cpu);
+ }
+ }
+out_free:
+ kfree(intc);
+ return ret;
+}
+
+IRQCHIP_DECLARE(bcm6345_l1, "brcm,bcm6345-l1-intc", bcm6345_l1_of_init);
diff --git a/drivers/irqchip/irq-gic-realview.c b/drivers/irqchip/irq-gic-realview.c
index aa46eb280a7f..54c296401525 100644
--- a/drivers/irqchip/irq-gic-realview.c
+++ b/drivers/irqchip/irq-gic-realview.c
@@ -10,7 +10,8 @@
#include <linux/irqchip/arm-gic.h>
#define REALVIEW_SYS_LOCK_OFFSET 0x20
-#define REALVIEW_PB11MP_SYS_PLD_CTRL1 0x74
+#define REALVIEW_SYS_PLD_CTRL1 0x74
+#define REALVIEW_EB_REVB_SYS_PLD_CTRL1 0xD8
#define VERSATILE_LOCK_VAL 0xA05F
#define PLD_INTMODE_MASK BIT(22)|BIT(23)|BIT(24)
#define PLD_INTMODE_LEGACY 0x0
@@ -18,26 +19,57 @@
#define PLD_INTMODE_NEW_NO_DCC BIT(23)
#define PLD_INTMODE_FIQ_ENABLE BIT(24)
+/* For some reason RealView EB Rev B moved this register */
+static const struct of_device_id syscon_pldset_of_match[] = {
+ {
+ .compatible = "arm,realview-eb11mp-revb-syscon",
+ .data = (void *)REALVIEW_EB_REVB_SYS_PLD_CTRL1,
+ },
+ {
+ .compatible = "arm,realview-eb11mp-revc-syscon",
+ .data = (void *)REALVIEW_SYS_PLD_CTRL1,
+ },
+ {
+ .compatible = "arm,realview-eb-syscon",
+ .data = (void *)REALVIEW_SYS_PLD_CTRL1,
+ },
+ {
+ .compatible = "arm,realview-pb11mp-syscon",
+ .data = (void *)REALVIEW_SYS_PLD_CTRL1,
+ },
+ {},
+};
+
static int __init
realview_gic_of_init(struct device_node *node, struct device_node *parent)
{
static struct regmap *map;
+ struct device_node *np;
+ const struct of_device_id *gic_id;
+ u32 pld1_ctrl;
+
+ np = of_find_matching_node_and_match(NULL, syscon_pldset_of_match,
+ &gic_id);
+ if (!np)
+ return -ENODEV;
+ pld1_ctrl = (u32)gic_id->data;
/* The PB11MPCore GIC needs to be configured in the syscon */
- map = syscon_regmap_lookup_by_compatible("arm,realview-pb11mp-syscon");
+ map = syscon_node_to_regmap(np);
if (!IS_ERR(map)) {
/* new irq mode with no DCC */
regmap_write(map, REALVIEW_SYS_LOCK_OFFSET,
VERSATILE_LOCK_VAL);
- regmap_update_bits(map, REALVIEW_PB11MP_SYS_PLD_CTRL1,
+ regmap_update_bits(map, pld1_ctrl,
PLD_INTMODE_NEW_NO_DCC,
PLD_INTMODE_MASK);
regmap_write(map, REALVIEW_SYS_LOCK_OFFSET, 0x0000);
- pr_info("TC11MP GIC: set up interrupt controller to NEW mode, no DCC\n");
+ pr_info("RealView GIC: set up interrupt controller to NEW mode, no DCC\n");
} else {
- pr_err("TC11MP GIC setup: could not find syscon\n");
- return -ENXIO;
+ pr_err("RealView GIC setup: could not find syscon\n");
+ return -ENODEV;
}
return gic_of_init(node, parent);
}
IRQCHIP_DECLARE(armtc11mp_gic, "arm,tc11mp-gic", realview_gic_of_init);
+IRQCHIP_DECLARE(armeb11mp_gic, "arm,eb11mp-gic", realview_gic_of_init);
diff --git a/drivers/irqchip/irq-gic-v2m.c b/drivers/irqchip/irq-gic-v2m.c
index c779f83e511d..28f047c61baa 100644
--- a/drivers/irqchip/irq-gic-v2m.c
+++ b/drivers/irqchip/irq-gic-v2m.c
@@ -92,18 +92,6 @@ static struct msi_domain_info gicv2m_msi_domain_info = {
.chip = &gicv2m_msi_irq_chip,
};
-static int gicv2m_set_affinity(struct irq_data *irq_data,
- const struct cpumask *mask, bool force)
-{
- int ret;
-
- ret = irq_chip_set_affinity_parent(irq_data, mask, force);
- if (ret == IRQ_SET_MASK_OK)
- ret = IRQ_SET_MASK_OK_DONE;
-
- return ret;
-}
-
static void gicv2m_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
{
struct v2m_data *v2m = irq_data_get_irq_chip_data(data);
@@ -122,7 +110,7 @@ static struct irq_chip gicv2m_irq_chip = {
.irq_mask = irq_chip_mask_parent,
.irq_unmask = irq_chip_unmask_parent,
.irq_eoi = irq_chip_eoi_parent,
- .irq_set_affinity = gicv2m_set_affinity,
+ .irq_set_affinity = irq_chip_set_affinity_parent,
.irq_compose_msi_msg = gicv2m_compose_msi_msg,
};
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index 43dfd15c1dd2..39261798c59f 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -103,7 +103,6 @@ struct its_device {
static LIST_HEAD(its_nodes);
static DEFINE_SPINLOCK(its_lock);
-static struct device_node *gic_root_node;
static struct rdists *gic_rdists;
#define gic_data_rdist() (raw_cpu_ptr(gic_rdists->rdist))
@@ -671,7 +670,7 @@ static int its_chunk_to_lpi(int chunk)
return (chunk << IRQS_PER_CHUNK_SHIFT) + 8192;
}
-static int its_lpi_init(u32 id_bits)
+static int __init its_lpi_init(u32 id_bits)
{
lpi_chunks = its_lpi_to_chunk(1UL << id_bits);
@@ -1430,7 +1429,8 @@ static void its_enable_quirks(struct its_node *its)
gic_enable_quirks(iidr, its_quirks, its);
}
-static int its_probe(struct device_node *node, struct irq_domain *parent)
+static int __init its_probe(struct device_node *node,
+ struct irq_domain *parent)
{
struct resource res;
struct its_node *its;
@@ -1591,7 +1591,7 @@ static struct of_device_id its_device_id[] = {
{},
};
-int its_init(struct device_node *node, struct rdists *rdists,
+int __init its_init(struct device_node *node, struct rdists *rdists,
struct irq_domain *parent_domain)
{
struct device_node *np;
@@ -1607,8 +1607,6 @@ int its_init(struct device_node *node, struct rdists *rdists,
}
gic_rdists = rdists;
- gic_root_node = node;
-
its_alloc_lpi_tables();
its_lpi_init(rdists->id_bits);
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index d7be6ddc34f6..5b7d3c2129d8 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -15,10 +15,12 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include <linux/acpi.h>
#include <linux/cpu.h>
#include <linux/cpu_pm.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
+#include <linux/irqdomain.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
@@ -38,6 +40,7 @@
struct redist_region {
void __iomem *redist_base;
phys_addr_t phys_base;
+ bool single_redist;
};
struct gic_chip_data {
@@ -434,6 +437,9 @@ static int gic_populate_rdist(void)
return 0;
}
+ if (gic_data.redist_regions[i].single_redist)
+ break;
+
if (gic_data.redist_stride) {
ptr += gic_data.redist_stride;
} else {
@@ -634,7 +640,7 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
else
gic_dist_wait_for_rwp();
- return IRQ_SET_MASK_OK;
+ return IRQ_SET_MASK_OK_DONE;
}
#else
#define gic_set_affinity NULL
@@ -764,6 +770,15 @@ static int gic_irq_domain_translate(struct irq_domain *d,
return 0;
}
+ if (is_fwnode_irqchip(fwspec->fwnode)) {
+ if(fwspec->param_count != 2)
+ return -EINVAL;
+
+ *hwirq = fwspec->param[0];
+ *type = fwspec->param[1];
+ return 0;
+ }
+
return -EINVAL;
}
@@ -811,17 +826,88 @@ static void gicv3_enable_quirks(void)
#endif
}
+static int __init gic_init_bases(void __iomem *dist_base,
+ struct redist_region *rdist_regs,
+ u32 nr_redist_regions,
+ u64 redist_stride,
+ struct fwnode_handle *handle)
+{
+ struct device_node *node;
+ u32 typer;
+ int gic_irqs;
+ int err;
+
+ if (!is_hyp_mode_available())
+ static_key_slow_dec(&supports_deactivate);
+
+ if (static_key_true(&supports_deactivate))
+ pr_info("GIC: Using split EOI/Deactivate mode\n");
+
+ gic_data.dist_base = dist_base;
+ gic_data.redist_regions = rdist_regs;
+ gic_data.nr_redist_regions = nr_redist_regions;
+ gic_data.redist_stride = redist_stride;
+
+ gicv3_enable_quirks();
+
+ /*
+ * Find out how many interrupts are supported.
+ * The GIC only supports up to 1020 interrupt sources (SGI+PPI+SPI)
+ */
+ typer = readl_relaxed(gic_data.dist_base + GICD_TYPER);
+ gic_data.rdists.id_bits = GICD_TYPER_ID_BITS(typer);
+ gic_irqs = GICD_TYPER_IRQS(typer);
+ if (gic_irqs > 1020)
+ gic_irqs = 1020;
+ gic_data.irq_nr = gic_irqs;
+
+ gic_data.domain = irq_domain_create_tree(handle, &gic_irq_domain_ops,
+ &gic_data);
+ gic_data.rdists.rdist = alloc_percpu(typeof(*gic_data.rdists.rdist));
+
+ if (WARN_ON(!gic_data.domain) || WARN_ON(!gic_data.rdists.rdist)) {
+ err = -ENOMEM;
+ goto out_free;
+ }
+
+ set_handle_irq(gic_handle_irq);
+
+ node = to_of_node(handle);
+ if (IS_ENABLED(CONFIG_ARM_GIC_V3_ITS) && gic_dist_supports_lpis() &&
+ node) /* Temp hack to prevent ITS init for ACPI */
+ its_init(node, &gic_data.rdists, gic_data.domain);
+
+ gic_smp_init();
+ gic_dist_init();
+ gic_cpu_init();
+ gic_cpu_pm_init();
+
+ return 0;
+
+out_free:
+ if (gic_data.domain)
+ irq_domain_remove(gic_data.domain);
+ free_percpu(gic_data.rdists.rdist);
+ return err;
+}
+
+static int __init gic_validate_dist_version(void __iomem *dist_base)
+{
+ u32 reg = readl_relaxed(dist_base + GICD_PIDR2) & GIC_PIDR2_ARCH_MASK;
+
+ if (reg != GIC_PIDR2_ARCH_GICv3 && reg != GIC_PIDR2_ARCH_GICv4)
+ return -ENODEV;
+
+ return 0;
+}
+
static int __init gic_of_init(struct device_node *node, struct device_node *parent)
{
void __iomem *dist_base;
struct redist_region *rdist_regs;
u64 redist_stride;
u32 nr_redist_regions;
- u32 typer;
- u32 reg;
- int gic_irqs;
- int err;
- int i;
+ int err, i;
dist_base = of_iomap(node, 0);
if (!dist_base) {
@@ -830,11 +916,10 @@ static int __init gic_of_init(struct device_node *node, struct device_node *pare
return -ENXIO;
}
- reg = readl_relaxed(dist_base + GICD_PIDR2) & GIC_PIDR2_ARCH_MASK;
- if (reg != GIC_PIDR2_ARCH_GICv3 && reg != GIC_PIDR2_ARCH_GICv4) {
+ err = gic_validate_dist_version(dist_base);
+ if (err) {
pr_err("%s: no distributor detected, giving up\n",
node->full_name);
- err = -ENODEV;
goto out_unmap_dist;
}
@@ -865,63 +950,229 @@ static int __init gic_of_init(struct device_node *node, struct device_node *pare
if (of_property_read_u64(node, "redistributor-stride", &redist_stride))
redist_stride = 0;
- if (!is_hyp_mode_available())
- static_key_slow_dec(&supports_deactivate);
+ err = gic_init_bases(dist_base, rdist_regs, nr_redist_regions,
+ redist_stride, &node->fwnode);
+ if (!err)
+ return 0;
- if (static_key_true(&supports_deactivate))
- pr_info("GIC: Using split EOI/Deactivate mode\n");
+out_unmap_rdist:
+ for (i = 0; i < nr_redist_regions; i++)
+ if (rdist_regs[i].redist_base)
+ iounmap(rdist_regs[i].redist_base);
+ kfree(rdist_regs);
+out_unmap_dist:
+ iounmap(dist_base);
+ return err;
+}
- gic_data.dist_base = dist_base;
- gic_data.redist_regions = rdist_regs;
- gic_data.nr_redist_regions = nr_redist_regions;
- gic_data.redist_stride = redist_stride;
+IRQCHIP_DECLARE(gic_v3, "arm,gic-v3", gic_of_init);
- gicv3_enable_quirks();
+#ifdef CONFIG_ACPI
+static void __iomem *dist_base;
+static struct redist_region *redist_regs __initdata;
+static u32 nr_redist_regions __initdata;
+static bool single_redist;
+
+static void __init
+gic_acpi_register_redist(phys_addr_t phys_base, void __iomem *redist_base)
+{
+ static int count = 0;
+
+ redist_regs[count].phys_base = phys_base;
+ redist_regs[count].redist_base = redist_base;
+ redist_regs[count].single_redist = single_redist;
+ count++;
+}
+
+static int __init
+gic_acpi_parse_madt_redist(struct acpi_subtable_header *header,
+ const unsigned long end)
+{
+ struct acpi_madt_generic_redistributor *redist =
+ (struct acpi_madt_generic_redistributor *)header;
+ void __iomem *redist_base;
+
+ redist_base = ioremap(redist->base_address, redist->length);
+ if (!redist_base) {
+ pr_err("Couldn't map GICR region @%llx\n", redist->base_address);
+ return -ENOMEM;
+ }
+
+ gic_acpi_register_redist(redist->base_address, redist_base);
+ return 0;
+}
+
+static int __init
+gic_acpi_parse_madt_gicc(struct acpi_subtable_header *header,
+ const unsigned long end)
+{
+ struct acpi_madt_generic_interrupt *gicc =
+ (struct acpi_madt_generic_interrupt *)header;
+ u32 reg = readl_relaxed(dist_base + GICD_PIDR2) & GIC_PIDR2_ARCH_MASK;
+ u32 size = reg == GIC_PIDR2_ARCH_GICv4 ? SZ_64K * 4 : SZ_64K * 2;
+ void __iomem *redist_base;
+
+ redist_base = ioremap(gicc->gicr_base_address, size);
+ if (!redist_base)
+ return -ENOMEM;
+
+ gic_acpi_register_redist(gicc->gicr_base_address, redist_base);
+ return 0;
+}
+
+static int __init gic_acpi_collect_gicr_base(void)
+{
+ acpi_tbl_entry_handler redist_parser;
+ enum acpi_madt_type type;
+
+ if (single_redist) {
+ type = ACPI_MADT_TYPE_GENERIC_INTERRUPT;
+ redist_parser = gic_acpi_parse_madt_gicc;
+ } else {
+ type = ACPI_MADT_TYPE_GENERIC_REDISTRIBUTOR;
+ redist_parser = gic_acpi_parse_madt_redist;
+ }
+
+ /* Collect redistributor base addresses in GICR entries */
+ if (acpi_table_parse_madt(type, redist_parser, 0) > 0)
+ return 0;
+
+ pr_info("No valid GICR entries exist\n");
+ return -ENODEV;
+}
+
+static int __init gic_acpi_match_gicr(struct acpi_subtable_header *header,
+ const unsigned long end)
+{
+ /* Subtable presence means that redist exists, that's it */
+ return 0;
+}
+
+static int __init gic_acpi_match_gicc(struct acpi_subtable_header *header,
+ const unsigned long end)
+{
+ struct acpi_madt_generic_interrupt *gicc =
+ (struct acpi_madt_generic_interrupt *)header;
/*
- * Find out how many interrupts are supported.
- * The GIC only supports up to 1020 interrupt sources (SGI+PPI+SPI)
+ * If GICC is enabled and has valid gicr base address, then it means
+ * GICR base is presented via GICC
*/
- typer = readl_relaxed(gic_data.dist_base + GICD_TYPER);
- gic_data.rdists.id_bits = GICD_TYPER_ID_BITS(typer);
- gic_irqs = GICD_TYPER_IRQS(typer);
- if (gic_irqs > 1020)
- gic_irqs = 1020;
- gic_data.irq_nr = gic_irqs;
+ if ((gicc->flags & ACPI_MADT_ENABLED) && gicc->gicr_base_address)
+ return 0;
- gic_data.domain = irq_domain_add_tree(node, &gic_irq_domain_ops,
- &gic_data);
- gic_data.rdists.rdist = alloc_percpu(typeof(*gic_data.rdists.rdist));
+ return -ENODEV;
+}
- if (WARN_ON(!gic_data.domain) || WARN_ON(!gic_data.rdists.rdist)) {
+static int __init gic_acpi_count_gicr_regions(void)
+{
+ int count;
+
+ /*
+ * Count how many redistributor regions we have. It is not allowed
+ * to mix redistributor description, GICR and GICC subtables have to be
+ * mutually exclusive.
+ */
+ count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_REDISTRIBUTOR,
+ gic_acpi_match_gicr, 0);
+ if (count > 0) {
+ single_redist = false;
+ return count;
+ }
+
+ count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT,
+ gic_acpi_match_gicc, 0);
+ if (count > 0)
+ single_redist = true;
+
+ return count;
+}
+
+static bool __init acpi_validate_gic_table(struct acpi_subtable_header *header,
+ struct acpi_probe_entry *ape)
+{
+ struct acpi_madt_generic_distributor *dist;
+ int count;
+
+ dist = (struct acpi_madt_generic_distributor *)header;
+ if (dist->version != ape->driver_data)
+ return false;
+
+ /* We need to do that exercise anyway, the sooner the better */
+ count = gic_acpi_count_gicr_regions();
+ if (count <= 0)
+ return false;
+
+ nr_redist_regions = count;
+ return true;
+}
+
+#define ACPI_GICV3_DIST_MEM_SIZE (SZ_64K)
+
+static int __init
+gic_acpi_init(struct acpi_subtable_header *header, const unsigned long end)
+{
+ struct acpi_madt_generic_distributor *dist;
+ struct fwnode_handle *domain_handle;
+ int i, err;
+
+ /* Get distributor base address */
+ dist = (struct acpi_madt_generic_distributor *)header;
+ dist_base = ioremap(dist->base_address, ACPI_GICV3_DIST_MEM_SIZE);
+ if (!dist_base) {
+ pr_err("Unable to map GICD registers\n");
+ return -ENOMEM;
+ }
+
+ err = gic_validate_dist_version(dist_base);
+ if (err) {
+ pr_err("No distributor detected at @%p, giving up", dist_base);
+ goto out_dist_unmap;
+ }
+
+ redist_regs = kzalloc(sizeof(*redist_regs) * nr_redist_regions,
+ GFP_KERNEL);
+ if (!redist_regs) {
err = -ENOMEM;
- goto out_free;
+ goto out_dist_unmap;
}
- set_handle_irq(gic_handle_irq);
+ err = gic_acpi_collect_gicr_base();
+ if (err)
+ goto out_redist_unmap;
- if (IS_ENABLED(CONFIG_ARM_GIC_V3_ITS) && gic_dist_supports_lpis())
- its_init(node, &gic_data.rdists, gic_data.domain);
+ domain_handle = irq_domain_alloc_fwnode(dist_base);
+ if (!domain_handle) {
+ err = -ENOMEM;
+ goto out_redist_unmap;
+ }
- gic_smp_init();
- gic_dist_init();
- gic_cpu_init();
- gic_cpu_pm_init();
+ err = gic_init_bases(dist_base, redist_regs, nr_redist_regions, 0,
+ domain_handle);
+ if (err)
+ goto out_fwhandle_free;
+ acpi_set_irq_model(ACPI_IRQ_MODEL_GIC, domain_handle);
return 0;
-out_free:
- if (gic_data.domain)
- irq_domain_remove(gic_data.domain);
- free_percpu(gic_data.rdists.rdist);
-out_unmap_rdist:
+out_fwhandle_free:
+ irq_domain_free_fwnode(domain_handle);
+out_redist_unmap:
for (i = 0; i < nr_redist_regions; i++)
- if (rdist_regs[i].redist_base)
- iounmap(rdist_regs[i].redist_base);
- kfree(rdist_regs);
-out_unmap_dist:
+ if (redist_regs[i].redist_base)
+ iounmap(redist_regs[i].redist_base);
+ kfree(redist_regs);
+out_dist_unmap:
iounmap(dist_base);
return err;
}
-
-IRQCHIP_DECLARE(gic_v3, "arm,gic-v3", gic_of_init);
+IRQCHIP_ACPI_DECLARE(gic_v3, ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR,
+ acpi_validate_gic_table, ACPI_MADT_GIC_VERSION_V3,
+ gic_acpi_init);
+IRQCHIP_ACPI_DECLARE(gic_v4, ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR,
+ acpi_validate_gic_table, ACPI_MADT_GIC_VERSION_V4,
+ gic_acpi_init);
+IRQCHIP_ACPI_DECLARE(gic_v3_or_v4, ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR,
+ acpi_validate_gic_table, ACPI_MADT_GIC_VERSION_NONE,
+ gic_acpi_init);
+#endif
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
index 8f9ebf714e2b..282344b95ec2 100644
--- a/drivers/irqchip/irq-gic.c
+++ b/drivers/irqchip/irq-gic.c
@@ -319,7 +319,7 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
writel_relaxed(val | bit, reg);
raw_spin_unlock_irqrestore(&irq_controller_lock, flags);
- return IRQ_SET_MASK_OK;
+ return IRQ_SET_MASK_OK_DONE;
}
#endif
diff --git a/drivers/irqchip/irq-mbigen.c b/drivers/irqchip/irq-mbigen.c
index 4dd3eb8a40b3..d67baa231c13 100644
--- a/drivers/irqchip/irq-mbigen.c
+++ b/drivers/irqchip/irq-mbigen.c
@@ -239,8 +239,11 @@ static struct irq_domain_ops mbigen_domain_ops = {
static int mbigen_device_probe(struct platform_device *pdev)
{
struct mbigen_device *mgn_chip;
- struct resource *res;
+ struct platform_device *child;
struct irq_domain *domain;
+ struct device_node *np;
+ struct device *parent;
+ struct resource *res;
u32 num_pins;
mgn_chip = devm_kzalloc(&pdev->dev, sizeof(*mgn_chip), GFP_KERNEL);
@@ -254,23 +257,30 @@ static int mbigen_device_probe(struct platform_device *pdev)
if (IS_ERR(mgn_chip->base))
return PTR_ERR(mgn_chip->base);
- if (of_property_read_u32(pdev->dev.of_node, "num-pins", &num_pins) < 0) {
- dev_err(&pdev->dev, "No num-pins property\n");
- return -EINVAL;
- }
+ for_each_child_of_node(pdev->dev.of_node, np) {
+ if (!of_property_read_bool(np, "interrupt-controller"))
+ continue;
- domain = platform_msi_create_device_domain(&pdev->dev, num_pins,
- mbigen_write_msg,
- &mbigen_domain_ops,
- mgn_chip);
+ parent = platform_bus_type.dev_root;
+ child = of_platform_device_create(np, NULL, parent);
+ if (IS_ERR(child))
+ return PTR_ERR(child);
- if (!domain)
- return -ENOMEM;
+ if (of_property_read_u32(child->dev.of_node, "num-pins",
+ &num_pins) < 0) {
+ dev_err(&pdev->dev, "No num-pins property\n");
+ return -EINVAL;
+ }
+
+ domain = platform_msi_create_device_domain(&child->dev, num_pins,
+ mbigen_write_msg,
+ &mbigen_domain_ops,
+ mgn_chip);
+ if (!domain)
+ return -ENOMEM;
+ }
platform_set_drvdata(pdev, mgn_chip);
-
- dev_info(&pdev->dev, "Allocated %d MSIs\n", num_pins);
-
return 0;
}
diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c
index 9e17ef27a183..4dffccf532a2 100644
--- a/drivers/irqchip/irq-mips-gic.c
+++ b/drivers/irqchip/irq-mips-gic.c
@@ -29,16 +29,32 @@ struct gic_pcpu_mask {
DECLARE_BITMAP(pcpu_mask, GIC_MAX_INTRS);
};
+struct gic_irq_spec {
+ enum {
+ GIC_DEVICE,
+ GIC_IPI
+ } type;
+
+ union {
+ struct cpumask *ipimask;
+ unsigned int hwirq;
+ };
+};
+
static unsigned long __gic_base_addr;
+
static void __iomem *gic_base;
static struct gic_pcpu_mask pcpu_masks[NR_CPUS];
static DEFINE_SPINLOCK(gic_lock);
static struct irq_domain *gic_irq_domain;
+static struct irq_domain *gic_dev_domain;
+static struct irq_domain *gic_ipi_domain;
static int gic_shared_intrs;
static int gic_vpes;
static unsigned int gic_cpu_pin;
static unsigned int timer_cpu_pin;
static struct irq_chip gic_level_irq_controller, gic_edge_irq_controller;
+DECLARE_BITMAP(ipi_resrv, GIC_MAX_INTRS);
static void __gic_irq_dispatch(void);
@@ -264,9 +280,11 @@ static void gic_bind_eic_interrupt(int irq, int set)
GIC_VPE_EIC_SS(irq), set);
}
-void gic_send_ipi(unsigned int intr)
+static void gic_send_ipi(struct irq_data *d, unsigned int cpu)
{
- gic_write(GIC_REG(SHARED, GIC_SH_WEDGE), GIC_SH_WEDGE_SET(intr));
+ irq_hw_number_t hwirq = GIC_HWIRQ_TO_SHARED(irqd_to_hwirq(d));
+
+ gic_write(GIC_REG(SHARED, GIC_SH_WEDGE), GIC_SH_WEDGE_SET(hwirq));
}
int gic_get_c0_compare_int(void)
@@ -449,7 +467,7 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *cpumask,
gic_map_to_vpe(irq, mips_cm_vp_id(cpumask_first(&tmp)));
/* Update the pcpu_masks */
- for (i = 0; i < NR_CPUS; i++)
+ for (i = 0; i < min(gic_vpes, NR_CPUS); i++)
clear_bit(irq, pcpu_masks[i].pcpu_mask);
set_bit(irq, pcpu_masks[cpumask_first(&tmp)].pcpu_mask);
@@ -479,6 +497,7 @@ static struct irq_chip gic_edge_irq_controller = {
#ifdef CONFIG_SMP
.irq_set_affinity = gic_set_affinity,
#endif
+ .ipi_send_single = gic_send_ipi,
};
static void gic_handle_local_int(bool chained)
@@ -572,83 +591,6 @@ static void gic_irq_dispatch(struct irq_desc *desc)
gic_handle_shared_int(true);
}
-#ifdef CONFIG_MIPS_GIC_IPI
-static int gic_resched_int_base;
-static int gic_call_int_base;
-
-unsigned int plat_ipi_resched_int_xlate(unsigned int cpu)
-{
- return gic_resched_int_base + cpu;
-}
-
-unsigned int plat_ipi_call_int_xlate(unsigned int cpu)
-{
- return gic_call_int_base + cpu;
-}
-
-static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id)
-{
- scheduler_ipi();
-
- return IRQ_HANDLED;
-}
-
-static irqreturn_t ipi_call_interrupt(int irq, void *dev_id)
-{
- generic_smp_call_function_interrupt();
-
- return IRQ_HANDLED;
-}
-
-static struct irqaction irq_resched = {
- .handler = ipi_resched_interrupt,
- .flags = IRQF_PERCPU,
- .name = "IPI resched"
-};
-
-static struct irqaction irq_call = {
- .handler = ipi_call_interrupt,
- .flags = IRQF_PERCPU,
- .name = "IPI call"
-};
-
-static __init void gic_ipi_init_one(unsigned int intr, int cpu,
- struct irqaction *action)
-{
- int virq = irq_create_mapping(gic_irq_domain,
- GIC_SHARED_TO_HWIRQ(intr));
- int i;
-
- gic_map_to_vpe(intr, mips_cm_vp_id(cpu));
- for (i = 0; i < NR_CPUS; i++)
- clear_bit(intr, pcpu_masks[i].pcpu_mask);
- set_bit(intr, pcpu_masks[cpu].pcpu_mask);
-
- irq_set_irq_type(virq, IRQ_TYPE_EDGE_RISING);
-
- irq_set_handler(virq, handle_percpu_irq);
- setup_irq(virq, action);
-}
-
-static __init void gic_ipi_init(void)
-{
- int i;
-
- /* Use last 2 * NR_CPUS interrupts as IPIs */
- gic_resched_int_base = gic_shared_intrs - nr_cpu_ids;
- gic_call_int_base = gic_resched_int_base - nr_cpu_ids;
-
- for (i = 0; i < nr_cpu_ids; i++) {
- gic_ipi_init_one(gic_call_int_base + i, i, &irq_call);
- gic_ipi_init_one(gic_resched_int_base + i, i, &irq_resched);
- }
-}
-#else
-static inline void gic_ipi_init(void)
-{
-}
-#endif
-
static void __init gic_basic_init(void)
{
unsigned int i;
@@ -753,19 +695,21 @@ static int gic_local_irq_domain_map(struct irq_domain *d, unsigned int virq,
}
static int gic_shared_irq_domain_map(struct irq_domain *d, unsigned int virq,
- irq_hw_number_t hw)
+ irq_hw_number_t hw, unsigned int vpe)
{
int intr = GIC_HWIRQ_TO_SHARED(hw);
unsigned long flags;
+ int i;
irq_set_chip_and_handler(virq, &gic_level_irq_controller,
handle_level_irq);
spin_lock_irqsave(&gic_lock, flags);
gic_map_to_pin(intr, gic_cpu_pin);
- /* Map to VPE 0 by default */
- gic_map_to_vpe(intr, 0);
- set_bit(intr, pcpu_masks[0].pcpu_mask);
+ gic_map_to_vpe(intr, vpe);
+ for (i = 0; i < min(gic_vpes, NR_CPUS); i++)
+ clear_bit(intr, pcpu_masks[i].pcpu_mask);
+ set_bit(intr, pcpu_masks[vpe].pcpu_mask);
spin_unlock_irqrestore(&gic_lock, flags);
return 0;
@@ -776,10 +720,93 @@ static int gic_irq_domain_map(struct irq_domain *d, unsigned int virq,
{
if (GIC_HWIRQ_TO_LOCAL(hw) < GIC_NUM_LOCAL_INTRS)
return gic_local_irq_domain_map(d, virq, hw);
- return gic_shared_irq_domain_map(d, virq, hw);
+ return gic_shared_irq_domain_map(d, virq, hw, 0);
}
-static int gic_irq_domain_xlate(struct irq_domain *d, struct device_node *ctrlr,
+static int gic_irq_domain_alloc(struct irq_domain *d, unsigned int virq,
+ unsigned int nr_irqs, void *arg)
+{
+ struct gic_irq_spec *spec = arg;
+ irq_hw_number_t hwirq, base_hwirq;
+ int cpu, ret, i;
+
+ if (spec->type == GIC_DEVICE) {
+ /* verify that it doesn't conflict with an IPI irq */
+ if (test_bit(spec->hwirq, ipi_resrv))
+ return -EBUSY;
+ } else {
+ base_hwirq = find_first_bit(ipi_resrv, gic_shared_intrs);
+ if (base_hwirq == gic_shared_intrs) {
+ return -ENOMEM;
+ }
+
+ /* check that we have enough space */
+ for (i = base_hwirq; i < nr_irqs; i++) {
+ if (!test_bit(i, ipi_resrv))
+ return -EBUSY;
+ }
+ bitmap_clear(ipi_resrv, base_hwirq, nr_irqs);
+
+ /* map the hwirq for each cpu consecutively */
+ i = 0;
+ for_each_cpu(cpu, spec->ipimask) {
+ hwirq = GIC_SHARED_TO_HWIRQ(base_hwirq + i);
+
+ ret = irq_domain_set_hwirq_and_chip(d, virq + i, hwirq,
+ &gic_edge_irq_controller,
+ NULL);
+ if (ret)
+ goto error;
+
+ ret = gic_shared_irq_domain_map(d, virq + i, hwirq, cpu);
+ if (ret)
+ goto error;
+
+ i++;
+ }
+
+ /*
+ * tell the parent about the base hwirq we allocated so it can
+ * set its own domain data
+ */
+ spec->hwirq = base_hwirq;
+ }
+
+ return 0;
+error:
+ bitmap_set(ipi_resrv, base_hwirq, nr_irqs);
+ return ret;
+}
+
+void gic_irq_domain_free(struct irq_domain *d, unsigned int virq,
+ unsigned int nr_irqs)
+{
+ irq_hw_number_t base_hwirq;
+ struct irq_data *data;
+
+ data = irq_get_irq_data(virq);
+ if (!data)
+ return;
+
+ base_hwirq = GIC_HWIRQ_TO_SHARED(irqd_to_hwirq(data));
+ bitmap_set(ipi_resrv, base_hwirq, nr_irqs);
+}
+
+int gic_irq_domain_match(struct irq_domain *d, struct device_node *node,
+ enum irq_domain_bus_token bus_token)
+{
+ /* this domain should'nt be accessed directly */
+ return 0;
+}
+
+static const struct irq_domain_ops gic_irq_domain_ops = {
+ .map = gic_irq_domain_map,
+ .alloc = gic_irq_domain_alloc,
+ .free = gic_irq_domain_free,
+ .match = gic_irq_domain_match,
+};
+
+static int gic_dev_domain_xlate(struct irq_domain *d, struct device_node *ctrlr,
const u32 *intspec, unsigned int intsize,
irq_hw_number_t *out_hwirq,
unsigned int *out_type)
@@ -798,9 +825,130 @@ static int gic_irq_domain_xlate(struct irq_domain *d, struct device_node *ctrlr,
return 0;
}
-static const struct irq_domain_ops gic_irq_domain_ops = {
- .map = gic_irq_domain_map,
- .xlate = gic_irq_domain_xlate,
+static int gic_dev_domain_alloc(struct irq_domain *d, unsigned int virq,
+ unsigned int nr_irqs, void *arg)
+{
+ struct irq_fwspec *fwspec = arg;
+ struct gic_irq_spec spec = {
+ .type = GIC_DEVICE,
+ .hwirq = fwspec->param[1],
+ };
+ int i, ret;
+ bool is_shared = fwspec->param[0] == GIC_SHARED;
+
+ if (is_shared) {
+ ret = irq_domain_alloc_irqs_parent(d, virq, nr_irqs, &spec);
+ if (ret)
+ return ret;
+ }
+
+ for (i = 0; i < nr_irqs; i++) {
+ irq_hw_number_t hwirq;
+
+ if (is_shared)
+ hwirq = GIC_SHARED_TO_HWIRQ(spec.hwirq + i);
+ else
+ hwirq = GIC_LOCAL_TO_HWIRQ(spec.hwirq + i);
+
+ ret = irq_domain_set_hwirq_and_chip(d, virq + i,
+ hwirq,
+ &gic_level_irq_controller,
+ NULL);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+void gic_dev_domain_free(struct irq_domain *d, unsigned int virq,
+ unsigned int nr_irqs)
+{
+ /* no real allocation is done for dev irqs, so no need to free anything */
+ return;
+}
+
+static struct irq_domain_ops gic_dev_domain_ops = {
+ .xlate = gic_dev_domain_xlate,
+ .alloc = gic_dev_domain_alloc,
+ .free = gic_dev_domain_free,
+};
+
+static int gic_ipi_domain_xlate(struct irq_domain *d, struct device_node *ctrlr,
+ const u32 *intspec, unsigned int intsize,
+ irq_hw_number_t *out_hwirq,
+ unsigned int *out_type)
+{
+ /*
+ * There's nothing to translate here. hwirq is dynamically allocated and
+ * the irq type is always edge triggered.
+ * */
+ *out_hwirq = 0;
+ *out_type = IRQ_TYPE_EDGE_RISING;
+
+ return 0;
+}
+
+static int gic_ipi_domain_alloc(struct irq_domain *d, unsigned int virq,
+ unsigned int nr_irqs, void *arg)
+{
+ struct cpumask *ipimask = arg;
+ struct gic_irq_spec spec = {
+ .type = GIC_IPI,
+ .ipimask = ipimask
+ };
+ int ret, i;
+
+ ret = irq_domain_alloc_irqs_parent(d, virq, nr_irqs, &spec);
+ if (ret)
+ return ret;
+
+ /* the parent should have set spec.hwirq to the base_hwirq it allocated */
+ for (i = 0; i < nr_irqs; i++) {
+ ret = irq_domain_set_hwirq_and_chip(d, virq + i,
+ GIC_SHARED_TO_HWIRQ(spec.hwirq + i),
+ &gic_edge_irq_controller,
+ NULL);
+ if (ret)
+ goto error;
+
+ ret = irq_set_irq_type(virq + i, IRQ_TYPE_EDGE_RISING);
+ if (ret)
+ goto error;
+ }
+
+ return 0;
+error:
+ irq_domain_free_irqs_parent(d, virq, nr_irqs);
+ return ret;
+}
+
+void gic_ipi_domain_free(struct irq_domain *d, unsigned int virq,
+ unsigned int nr_irqs)
+{
+ irq_domain_free_irqs_parent(d, virq, nr_irqs);
+}
+
+int gic_ipi_domain_match(struct irq_domain *d, struct device_node *node,
+ enum irq_domain_bus_token bus_token)
+{
+ bool is_ipi;
+
+ switch (bus_token) {
+ case DOMAIN_BUS_IPI:
+ is_ipi = d->bus_token == bus_token;
+ return to_of_node(d->fwnode) == node && is_ipi;
+ break;
+ default:
+ return 0;
+ }
+}
+
+static struct irq_domain_ops gic_ipi_domain_ops = {
+ .xlate = gic_ipi_domain_xlate,
+ .alloc = gic_ipi_domain_alloc,
+ .free = gic_ipi_domain_free,
+ .match = gic_ipi_domain_match,
};
static void __init __gic_init(unsigned long gic_base_addr,
@@ -809,6 +957,7 @@ static void __init __gic_init(unsigned long gic_base_addr,
struct device_node *node)
{
unsigned int gicconfig;
+ unsigned int v[2];
__gic_base_addr = gic_base_addr;
@@ -864,9 +1013,32 @@ static void __init __gic_init(unsigned long gic_base_addr,
if (!gic_irq_domain)
panic("Failed to add GIC IRQ domain");
- gic_basic_init();
+ gic_dev_domain = irq_domain_add_hierarchy(gic_irq_domain, 0,
+ GIC_NUM_LOCAL_INTRS + gic_shared_intrs,
+ node, &gic_dev_domain_ops, NULL);
+ if (!gic_dev_domain)
+ panic("Failed to add GIC DEV domain");
+
+ gic_ipi_domain = irq_domain_add_hierarchy(gic_irq_domain,
+ IRQ_DOMAIN_FLAG_IPI_PER_CPU,
+ GIC_NUM_LOCAL_INTRS + gic_shared_intrs,
+ node, &gic_ipi_domain_ops, NULL);
+ if (!gic_ipi_domain)
+ panic("Failed to add GIC IPI domain");
- gic_ipi_init();
+ gic_ipi_domain->bus_token = DOMAIN_BUS_IPI;
+
+ if (node &&
+ !of_property_read_u32_array(node, "mti,reserved-ipi-vectors", v, 2)) {
+ bitmap_set(ipi_resrv, v[0], v[1]);
+ } else {
+ /* Make the last 2 * gic_vpes available for IPIs */
+ bitmap_set(ipi_resrv,
+ gic_shared_intrs - 2 * gic_vpes,
+ 2 * gic_vpes);
+ }
+
+ gic_basic_init();
}
void __init gic_init(unsigned long gic_base_addr,
diff --git a/drivers/irqchip/irq-mvebu-odmi.c b/drivers/irqchip/irq-mvebu-odmi.c
new file mode 100644
index 000000000000..b4d367868dbb
--- /dev/null
+++ b/drivers/irqchip/irq-mvebu-odmi.c
@@ -0,0 +1,236 @@
+/*
+ * Copyright (C) 2016 Marvell
+ *
+ * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#define pr_fmt(fmt) "GIC-ODMI: " fmt
+
+#include <linux/irq.h>
+#include <linux/irqchip.h>
+#include <linux/irqdomain.h>
+#include <linux/kernel.h>
+#include <linux/msi.h>
+#include <linux/of_address.h>
+#include <linux/slab.h>
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+
+#define GICP_ODMIN_SET 0x40
+#define GICP_ODMI_INT_NUM_SHIFT 12
+#define GICP_ODMIN_GM_EP_R0 0x110
+#define GICP_ODMIN_GM_EP_R1 0x114
+#define GICP_ODMIN_GM_EA_R0 0x108
+#define GICP_ODMIN_GM_EA_R1 0x118
+
+/*
+ * We don't support the group events, so we simply have 8 interrupts
+ * per frame.
+ */
+#define NODMIS_SHIFT 3
+#define NODMIS_PER_FRAME (1 << NODMIS_SHIFT)
+#define NODMIS_MASK (NODMIS_PER_FRAME - 1)
+
+struct odmi_data {
+ struct resource res;
+ void __iomem *base;
+ unsigned int spi_base;
+};
+
+static struct odmi_data *odmis;
+static unsigned long *odmis_bm;
+static unsigned int odmis_count;
+
+/* Protects odmis_bm */
+static DEFINE_SPINLOCK(odmis_bm_lock);
+
+static void odmi_compose_msi_msg(struct irq_data *d, struct msi_msg *msg)
+{
+ struct odmi_data *odmi;
+ phys_addr_t addr;
+ unsigned int odmin;
+
+ if (WARN_ON(d->hwirq >= odmis_count * NODMIS_PER_FRAME))
+ return;
+
+ odmi = &odmis[d->hwirq >> NODMIS_SHIFT];
+ odmin = d->hwirq & NODMIS_MASK;
+
+ addr = odmi->res.start + GICP_ODMIN_SET;
+
+ msg->address_hi = upper_32_bits(addr);
+ msg->address_lo = lower_32_bits(addr);
+ msg->data = odmin << GICP_ODMI_INT_NUM_SHIFT;
+}
+
+static struct irq_chip odmi_irq_chip = {
+ .name = "ODMI",
+ .irq_mask = irq_chip_mask_parent,
+ .irq_unmask = irq_chip_unmask_parent,
+ .irq_eoi = irq_chip_eoi_parent,
+ .irq_set_affinity = irq_chip_set_affinity_parent,
+ .irq_compose_msi_msg = odmi_compose_msi_msg,
+};
+
+static int odmi_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs, void *args)
+{
+ struct odmi_data *odmi = NULL;
+ struct irq_fwspec fwspec;
+ struct irq_data *d;
+ unsigned int hwirq, odmin;
+ int ret;
+
+ spin_lock(&odmis_bm_lock);
+ hwirq = find_first_zero_bit(odmis_bm, NODMIS_PER_FRAME * odmis_count);
+ if (hwirq >= NODMIS_PER_FRAME * odmis_count) {
+ spin_unlock(&odmis_bm_lock);
+ return -ENOSPC;
+ }
+
+ __set_bit(hwirq, odmis_bm);
+ spin_unlock(&odmis_bm_lock);
+
+ odmi = &odmis[hwirq >> NODMIS_SHIFT];
+ odmin = hwirq & NODMIS_MASK;
+
+ fwspec.fwnode = domain->parent->fwnode;
+ fwspec.param_count = 3;
+ fwspec.param[0] = GIC_SPI;
+ fwspec.param[1] = odmi->spi_base - 32 + odmin;
+ fwspec.param[2] = IRQ_TYPE_EDGE_RISING;
+
+ ret = irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec);
+ if (ret) {
+ pr_err("Cannot allocate parent IRQ\n");
+ spin_lock(&odmis_bm_lock);
+ __clear_bit(odmin, odmis_bm);
+ spin_unlock(&odmis_bm_lock);
+ return ret;
+ }
+
+ /* Configure the interrupt line to be edge */
+ d = irq_domain_get_irq_data(domain->parent, virq);
+ d->chip->irq_set_type(d, IRQ_TYPE_EDGE_RISING);
+
+ irq_domain_set_hwirq_and_chip(domain, virq, hwirq,
+ &odmi_irq_chip, NULL);
+
+ return 0;
+}
+
+static void odmi_irq_domain_free(struct irq_domain *domain,
+ unsigned int virq, unsigned int nr_irqs)
+{
+ struct irq_data *d = irq_domain_get_irq_data(domain, virq);
+
+ if (d->hwirq >= odmis_count * NODMIS_PER_FRAME) {
+ pr_err("Failed to teardown msi. Invalid hwirq %lu\n", d->hwirq);
+ return;
+ }
+
+ irq_domain_free_irqs_parent(domain, virq, nr_irqs);
+
+ /* Actually free the MSI */
+ spin_lock(&odmis_bm_lock);
+ __clear_bit(d->hwirq, odmis_bm);
+ spin_unlock(&odmis_bm_lock);
+}
+
+static const struct irq_domain_ops odmi_domain_ops = {
+ .alloc = odmi_irq_domain_alloc,
+ .free = odmi_irq_domain_free,
+};
+
+static struct irq_chip odmi_msi_irq_chip = {
+ .name = "ODMI",
+};
+
+static struct msi_domain_ops odmi_msi_ops = {
+};
+
+static struct msi_domain_info odmi_msi_domain_info = {
+ .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS),
+ .ops = &odmi_msi_ops,
+ .chip = &odmi_msi_irq_chip,
+};
+
+static int __init mvebu_odmi_init(struct device_node *node,
+ struct device_node *parent)
+{
+ struct irq_domain *inner_domain, *plat_domain;
+ int ret, i;
+
+ if (of_property_read_u32(node, "marvell,odmi-frames", &odmis_count))
+ return -EINVAL;
+
+ odmis = kcalloc(odmis_count, sizeof(struct odmi_data), GFP_KERNEL);
+ if (!odmis)
+ return -ENOMEM;
+
+ odmis_bm = kcalloc(BITS_TO_LONGS(odmis_count * NODMIS_PER_FRAME),
+ sizeof(long), GFP_KERNEL);
+ if (!odmis_bm) {
+ ret = -ENOMEM;
+ goto err_alloc;
+ }
+
+ for (i = 0; i < odmis_count; i++) {
+ struct odmi_data *odmi = &odmis[i];
+
+ ret = of_address_to_resource(node, i, &odmi->res);
+ if (ret)
+ goto err_unmap;
+
+ odmi->base = of_io_request_and_map(node, i, "odmi");
+ if (IS_ERR(odmi->base)) {
+ ret = PTR_ERR(odmi->base);
+ goto err_unmap;
+ }
+
+ if (of_property_read_u32_index(node, "marvell,spi-base",
+ i, &odmi->spi_base)) {
+ ret = -EINVAL;
+ goto err_unmap;
+ }
+ }
+
+ inner_domain = irq_domain_create_linear(of_node_to_fwnode(node),
+ odmis_count * NODMIS_PER_FRAME,
+ &odmi_domain_ops, NULL);
+ if (!inner_domain) {
+ ret = -ENOMEM;
+ goto err_unmap;
+ }
+
+ inner_domain->parent = irq_find_host(parent);
+
+ plat_domain = platform_msi_create_irq_domain(of_node_to_fwnode(node),
+ &odmi_msi_domain_info,
+ inner_domain);
+ if (!plat_domain) {
+ ret = -ENOMEM;
+ goto err_remove_inner;
+ }
+
+ return 0;
+
+err_remove_inner:
+ irq_domain_remove(inner_domain);
+err_unmap:
+ for (i = 0; i < odmis_count; i++) {
+ struct odmi_data *odmi = &odmis[i];
+
+ if (odmi->base && !IS_ERR(odmi->base))
+ iounmap(odmis[i].base);
+ }
+ kfree(odmis_bm);
+err_alloc:
+ kfree(odmis);
+ return ret;
+}
+
+IRQCHIP_DECLARE(mvebu_odmi, "marvell,odmi-controller", mvebu_odmi_init);
diff --git a/drivers/irqchip/irq-mxs.c b/drivers/irqchip/irq-mxs.c
index efe50845939d..17304705f2cf 100644
--- a/drivers/irqchip/irq-mxs.c
+++ b/drivers/irqchip/irq-mxs.c
@@ -183,7 +183,7 @@ static void __iomem * __init icoll_init_iobase(struct device_node *np)
void __iomem *icoll_base;
icoll_base = of_io_request_and_map(np, 0, np->name);
- if (!icoll_base)
+ if (IS_ERR(icoll_base))
panic("%s: unable to map resource", np->full_name);
return icoll_base;
}
diff --git a/drivers/irqchip/irq-sunxi-nmi.c b/drivers/irqchip/irq-sunxi-nmi.c
index 0820f67cc9a7..668730c5cb66 100644
--- a/drivers/irqchip/irq-sunxi-nmi.c
+++ b/drivers/irqchip/irq-sunxi-nmi.c
@@ -160,9 +160,9 @@ static int __init sunxi_sc_nmi_irq_init(struct device_node *node,
gc = irq_get_domain_generic_chip(domain, 0);
gc->reg_base = of_io_request_and_map(node, 0, of_node_full_name(node));
- if (!gc->reg_base) {
+ if (IS_ERR(gc->reg_base)) {
pr_err("unable to map resource\n");
- ret = -ENOMEM;
+ ret = PTR_ERR(gc->reg_base);
goto fail_irqd_remove;
}
diff --git a/drivers/irqchip/irq-tango.c b/drivers/irqchip/irq-tango.c
new file mode 100644
index 000000000000..bdbb5c0ff7fe
--- /dev/null
+++ b/drivers/irqchip/irq-tango.c
@@ -0,0 +1,232 @@
+/*
+ * Copyright (C) 2014 Mans Rullgard <mans@mansr.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/init.h>
+#include <linux/irq.h>
+#include <linux/irqchip.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/ioport.h>
+#include <linux/io.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/slab.h>
+
+#define IRQ0_CTL_BASE 0x0000
+#define IRQ1_CTL_BASE 0x0100
+#define EDGE_CTL_BASE 0x0200
+#define IRQ2_CTL_BASE 0x0300
+
+#define IRQ_CTL_HI 0x18
+#define EDGE_CTL_HI 0x20
+
+#define IRQ_STATUS 0x00
+#define IRQ_RAWSTAT 0x04
+#define IRQ_EN_SET 0x08
+#define IRQ_EN_CLR 0x0c
+#define IRQ_SOFT_SET 0x10
+#define IRQ_SOFT_CLR 0x14
+
+#define EDGE_STATUS 0x00
+#define EDGE_RAWSTAT 0x04
+#define EDGE_CFG_RISE 0x08
+#define EDGE_CFG_FALL 0x0c
+#define EDGE_CFG_RISE_SET 0x10
+#define EDGE_CFG_RISE_CLR 0x14
+#define EDGE_CFG_FALL_SET 0x18
+#define EDGE_CFG_FALL_CLR 0x1c
+
+struct tangox_irq_chip {
+ void __iomem *base;
+ unsigned long ctl;
+};
+
+static inline u32 intc_readl(struct tangox_irq_chip *chip, int reg)
+{
+ return readl_relaxed(chip->base + reg);
+}
+
+static inline void intc_writel(struct tangox_irq_chip *chip, int reg, u32 val)
+{
+ writel_relaxed(val, chip->base + reg);
+}
+
+static void tangox_dispatch_irqs(struct irq_domain *dom, unsigned int status,
+ int base)
+{
+ unsigned int hwirq;
+ unsigned int virq;
+
+ while (status) {
+ hwirq = __ffs(status);
+ virq = irq_find_mapping(dom, base + hwirq);
+ if (virq)
+ generic_handle_irq(virq);
+ status &= ~BIT(hwirq);
+ }
+}
+
+static void tangox_irq_handler(struct irq_desc *desc)
+{
+ struct irq_domain *dom = irq_desc_get_handler_data(desc);
+ struct irq_chip *host_chip = irq_desc_get_chip(desc);
+ struct tangox_irq_chip *chip = dom->host_data;
+ unsigned int status_lo, status_hi;
+
+ chained_irq_enter(host_chip, desc);
+
+ status_lo = intc_readl(chip, chip->ctl + IRQ_STATUS);
+ status_hi = intc_readl(chip, chip->ctl + IRQ_CTL_HI + IRQ_STATUS);
+
+ tangox_dispatch_irqs(dom, status_lo, 0);
+ tangox_dispatch_irqs(dom, status_hi, 32);
+
+ chained_irq_exit(host_chip, desc);
+}
+
+static int tangox_irq_set_type(struct irq_data *d, unsigned int flow_type)
+{
+ struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+ struct tangox_irq_chip *chip = gc->domain->host_data;
+ struct irq_chip_regs *regs = &gc->chip_types[0].regs;
+
+ switch (flow_type & IRQ_TYPE_SENSE_MASK) {
+ case IRQ_TYPE_EDGE_RISING:
+ intc_writel(chip, regs->type + EDGE_CFG_RISE_SET, d->mask);
+ intc_writel(chip, regs->type + EDGE_CFG_FALL_CLR, d->mask);
+ break;
+
+ case IRQ_TYPE_EDGE_FALLING:
+ intc_writel(chip, regs->type + EDGE_CFG_RISE_CLR, d->mask);
+ intc_writel(chip, regs->type + EDGE_CFG_FALL_SET, d->mask);
+ break;
+
+ case IRQ_TYPE_LEVEL_HIGH:
+ intc_writel(chip, regs->type + EDGE_CFG_RISE_CLR, d->mask);
+ intc_writel(chip, regs->type + EDGE_CFG_FALL_CLR, d->mask);
+ break;
+
+ case IRQ_TYPE_LEVEL_LOW:
+ intc_writel(chip, regs->type + EDGE_CFG_RISE_SET, d->mask);
+ intc_writel(chip, regs->type + EDGE_CFG_FALL_SET, d->mask);
+ break;
+
+ default:
+ pr_err("Invalid trigger mode %x for IRQ %d\n",
+ flow_type, d->irq);
+ return -EINVAL;
+ }
+
+ return irq_setup_alt_chip(d, flow_type);
+}
+
+static void __init tangox_irq_init_chip(struct irq_chip_generic *gc,
+ unsigned long ctl_offs,
+ unsigned long edge_offs)
+{
+ struct tangox_irq_chip *chip = gc->domain->host_data;
+ struct irq_chip_type *ct = gc->chip_types;
+ unsigned long ctl_base = chip->ctl + ctl_offs;
+ unsigned long edge_base = EDGE_CTL_BASE + edge_offs;
+ int i;
+
+ gc->reg_base = chip->base;
+ gc->unused = 0;
+
+ for (i = 0; i < 2; i++) {
+ ct[i].chip.irq_ack = irq_gc_ack_set_bit;
+ ct[i].chip.irq_mask = irq_gc_mask_disable_reg;
+ ct[i].chip.irq_mask_ack = irq_gc_mask_disable_reg_and_ack;
+ ct[i].chip.irq_unmask = irq_gc_unmask_enable_reg;
+ ct[i].chip.irq_set_type = tangox_irq_set_type;
+ ct[i].chip.name = gc->domain->name;
+
+ ct[i].regs.enable = ctl_base + IRQ_EN_SET;
+ ct[i].regs.disable = ctl_base + IRQ_EN_CLR;
+ ct[i].regs.ack = edge_base + EDGE_RAWSTAT;
+ ct[i].regs.type = edge_base;
+ }
+
+ ct[0].type = IRQ_TYPE_LEVEL_MASK;
+ ct[0].handler = handle_level_irq;
+
+ ct[1].type = IRQ_TYPE_EDGE_BOTH;
+ ct[1].handler = handle_edge_irq;
+
+ intc_writel(chip, ct->regs.disable, 0xffffffff);
+ intc_writel(chip, ct->regs.ack, 0xffffffff);
+}
+
+static void __init tangox_irq_domain_init(struct irq_domain *dom)
+{
+ struct irq_chip_generic *gc;
+ int i;
+
+ for (i = 0; i < 2; i++) {
+ gc = irq_get_domain_generic_chip(dom, i * 32);
+ tangox_irq_init_chip(gc, i * IRQ_CTL_HI, i * EDGE_CTL_HI);
+ }
+}
+
+static int __init tangox_irq_init(void __iomem *base, struct resource *baseres,
+ struct device_node *node)
+{
+ struct tangox_irq_chip *chip;
+ struct irq_domain *dom;
+ struct resource res;
+ int irq;
+ int err;
+
+ irq = irq_of_parse_and_map(node, 0);
+ if (!irq)
+ panic("%s: failed to get IRQ", node->name);
+
+ err = of_address_to_resource(node, 0, &res);
+ if (err)
+ panic("%s: failed to get address", node->name);
+
+ chip = kzalloc(sizeof(*chip), GFP_KERNEL);
+ chip->ctl = res.start - baseres->start;
+ chip->base = base;
+
+ dom = irq_domain_add_linear(node, 64, &irq_generic_chip_ops, chip);
+ if (!dom)
+ panic("%s: failed to create irqdomain", node->name);
+
+ err = irq_alloc_domain_generic_chips(dom, 32, 2, node->name,
+ handle_level_irq, 0, 0, 0);
+ if (err)
+ panic("%s: failed to allocate irqchip", node->name);
+
+ tangox_irq_domain_init(dom);
+
+ irq_set_chained_handler(irq, tangox_irq_handler);
+ irq_set_handler_data(irq, dom);
+
+ return 0;
+}
+
+static int __init tangox_of_irq_init(struct device_node *node,
+ struct device_node *parent)
+{
+ struct device_node *c;
+ struct resource res;
+ void __iomem *base;
+
+ base = of_iomap(node, 0);
+ if (!base)
+ panic("%s: of_iomap failed", node->name);
+
+ of_address_to_resource(node, 0, &res);
+
+ for_each_child_of_node(node, c)
+ tangox_irq_init(base, &res, c);
+
+ return 0;
+}
+IRQCHIP_DECLARE(tangox_intc, "sigma,smp8642-intc", tangox_of_irq_init);
diff --git a/drivers/irqchip/irq-tegra.c b/drivers/irqchip/irq-tegra.c
index 121ec301372e..50be9639e27e 100644
--- a/drivers/irqchip/irq-tegra.c
+++ b/drivers/irqchip/irq-tegra.c
@@ -275,22 +275,10 @@ static int tegra_ictlr_domain_alloc(struct irq_domain *domain,
&parent_fwspec);
}
-static void tegra_ictlr_domain_free(struct irq_domain *domain,
- unsigned int virq,
- unsigned int nr_irqs)
-{
- unsigned int i;
-
- for (i = 0; i < nr_irqs; i++) {
- struct irq_data *d = irq_domain_get_irq_data(domain, virq + i);
- irq_domain_reset_irq_data(d);
- }
-}
-
static const struct irq_domain_ops tegra_ictlr_domain_ops = {
.translate = tegra_ictlr_domain_translate,
.alloc = tegra_ictlr_domain_alloc,
- .free = tegra_ictlr_domain_free,
+ .free = irq_domain_free_irqs_common,
};
static int __init tegra_ictlr_init(struct device_node *node,
diff --git a/drivers/irqchip/irq-ts4800.c b/drivers/irqchip/irq-ts4800.c
index 4192bdcd2734..2325fb3c482b 100644
--- a/drivers/irqchip/irq-ts4800.c
+++ b/drivers/irqchip/irq-ts4800.c
@@ -59,7 +59,7 @@ static int ts4800_irqdomain_map(struct irq_domain *d, unsigned int irq,
return 0;
}
-struct irq_domain_ops ts4800_ic_ops = {
+static const struct irq_domain_ops ts4800_ic_ops = {
.map = ts4800_irqdomain_map,
.xlate = irq_domain_xlate_onecell,
};
diff --git a/drivers/isdn/Makefile b/drivers/isdn/Makefile
index 91c81965e7ca..c32e45826c2c 100644
--- a/drivers/isdn/Makefile
+++ b/drivers/isdn/Makefile
@@ -8,9 +8,6 @@ obj-$(CONFIG_MISDN) += mISDN/
obj-$(CONFIG_ISDN) += hardware/
obj-$(CONFIG_ISDN_DIVERSION) += divert/
obj-$(CONFIG_ISDN_DRV_HISAX) += hisax/
-obj-$(CONFIG_ISDN_DRV_ICN) += icn/
-obj-$(CONFIG_ISDN_DRV_PCBIT) += pcbit/
obj-$(CONFIG_ISDN_DRV_LOOP) += isdnloop/
-obj-$(CONFIG_ISDN_DRV_ACT2000) += act2000/
obj-$(CONFIG_HYSDN) += hysdn/
obj-$(CONFIG_ISDN_DRV_GIGASET) += gigaset/
diff --git a/drivers/isdn/hardware/eicon/debug.c b/drivers/isdn/hardware/eicon/debug.c
index b5226af6ddec..576b7b4a3278 100644
--- a/drivers/isdn/hardware/eicon/debug.c
+++ b/drivers/isdn/hardware/eicon/debug.c
@@ -192,8 +192,6 @@ static diva_os_spin_lock_t dbg_q_lock;
static diva_os_spin_lock_t dbg_adapter_lock;
static int dbg_q_busy;
static volatile dword dbg_sequence;
-static dword start_sec;
-static dword start_usec;
/*
INTERFACE:
@@ -215,8 +213,6 @@ int diva_maint_init(byte *base, unsigned long length, int do_init) {
dbg_base = base;
- diva_os_get_time(&start_sec, &start_usec);
-
*(dword *)base = (dword)DBG_MAGIC; /* Store Magic */
base += sizeof(dword);
length -= sizeof(dword);
diff --git a/drivers/isdn/hardware/eicon/divamnt.c b/drivers/isdn/hardware/eicon/divamnt.c
index 48db08d0bb3d..0de29b7b712f 100644
--- a/drivers/isdn/hardware/eicon/divamnt.c
+++ b/drivers/isdn/hardware/eicon/divamnt.c
@@ -45,7 +45,6 @@ char *DRIVERRELEASE_MNT = "2.0";
static wait_queue_head_t msgwaitq;
static unsigned long opened;
-static struct timeval start_time;
extern int mntfunc_init(int *, void **, unsigned long);
extern void mntfunc_finit(void);
@@ -88,28 +87,12 @@ int diva_os_copy_from_user(void *os_handle, void *dst, const void __user *src,
*/
void diva_os_get_time(dword *sec, dword *usec)
{
- struct timeval tv;
-
- do_gettimeofday(&tv);
-
- if (tv.tv_sec > start_time.tv_sec) {
- if (start_time.tv_usec > tv.tv_usec) {
- tv.tv_sec--;
- tv.tv_usec += 1000000;
- }
- *sec = (dword) (tv.tv_sec - start_time.tv_sec);
- *usec = (dword) (tv.tv_usec - start_time.tv_usec);
- } else if (tv.tv_sec == start_time.tv_sec) {
- *sec = 0;
- if (start_time.tv_usec < tv.tv_usec) {
- *usec = (dword) (tv.tv_usec - start_time.tv_usec);
- } else {
- *usec = 0;
- }
- } else {
- *sec = (dword) tv.tv_sec;
- *usec = (dword) tv.tv_usec;
- }
+ struct timespec64 time;
+
+ ktime_get_ts64(&time);
+
+ *sec = (dword) time.tv_sec;
+ *usec = (dword) (time.tv_nsec / NSEC_PER_USEC);
}
/*
@@ -213,7 +196,6 @@ static int __init maint_init(void)
int ret = 0;
void *buffer = NULL;
- do_gettimeofday(&start_time);
init_waitqueue_head(&msgwaitq);
printk(KERN_INFO "%s\n", DRIVERNAME);
diff --git a/drivers/isdn/hardware/mISDN/ipac.h b/drivers/isdn/hardware/mISDN/ipac.h
index 8121e046b739..720ee72aab6a 100644
--- a/drivers/isdn/hardware/mISDN/ipac.h
+++ b/drivers/isdn/hardware/mISDN/ipac.h
@@ -99,32 +99,32 @@ struct ipac_hw {
/* All registers original Siemens Spec */
/* IPAC/ISAC registers */
-#define ISAC_MASK 0x20
#define ISAC_ISTA 0x20
-#define ISAC_STAR 0x21
+#define ISAC_MASK 0x20
#define ISAC_CMDR 0x21
+#define ISAC_STAR 0x21
+#define ISAC_MODE 0x22
+#define ISAC_TIMR 0x23
#define ISAC_EXIR 0x24
-#define ISAC_ADF2 0x39
+#define ISAC_RBCL 0x25
+#define ISAC_RSTA 0x27
+#define ISAC_RBCH 0x2A
#define ISAC_SPCR 0x30
-#define ISAC_ADF1 0x38
#define ISAC_CIR0 0x31
#define ISAC_CIX0 0x31
-#define ISAC_CIR1 0x33
-#define ISAC_CIX1 0x33
-#define ISAC_STCR 0x37
-#define ISAC_MODE 0x22
-#define ISAC_RSTA 0x27
-#define ISAC_RBCL 0x25
-#define ISAC_RBCH 0x2A
-#define ISAC_TIMR 0x23
-#define ISAC_SQXR 0x3b
-#define ISAC_SQRR 0x3b
-#define ISAC_MOSR 0x3a
-#define ISAC_MOCR 0x3a
#define ISAC_MOR0 0x32
#define ISAC_MOX0 0x32
+#define ISAC_CIR1 0x33
+#define ISAC_CIX1 0x33
#define ISAC_MOR1 0x34
#define ISAC_MOX1 0x34
+#define ISAC_STCR 0x37
+#define ISAC_ADF1 0x38
+#define ISAC_ADF2 0x39
+#define ISAC_MOCR 0x3a
+#define ISAC_MOSR 0x3a
+#define ISAC_SQRR 0x3b
+#define ISAC_SQXR 0x3b
#define ISAC_RBCH_XAC 0x80
@@ -212,13 +212,14 @@ struct ipac_hw {
#define ISAC_CMD_DUI 0xF
/* ISAC/ISACX/IPAC/IPACX L1 indications */
-#define ISAC_IND_RS 0x1
-#define ISAC_IND_PU 0x7
#define ISAC_IND_DR 0x0
+#define ISAC_IND_RS 0x1
#define ISAC_IND_SD 0x2
#define ISAC_IND_DIS 0x3
-#define ISAC_IND_EI 0x6
#define ISAC_IND_RSY 0x4
+#define ISAC_IND_DR6 0x5
+#define ISAC_IND_EI 0x6
+#define ISAC_IND_PU 0x7
#define ISAC_IND_ARD 0x8
#define ISAC_IND_TI 0xA
#define ISAC_IND_ATI 0xB
@@ -339,9 +340,9 @@ struct ipac_hw {
#define ISACX__AUX 0x08
#define ISACX__CIC 0x10
#define ISACX__ST 0x20
+#define IPACX__ON 0x2C
#define IPACX__ICB 0x40
#define IPACX__ICA 0x80
-#define IPACX__ON 0x2C
/* ISACX/IPACX _CMDRD (W) */
#define ISACX_CMDRD_XRES 0x01
diff --git a/drivers/isdn/hardware/mISDN/mISDNipac.c b/drivers/isdn/hardware/mISDN/mISDNipac.c
index cb428b9ee441..aa9b6c3cadc1 100644
--- a/drivers/isdn/hardware/mISDN/mISDNipac.c
+++ b/drivers/isdn/hardware/mISDN/mISDNipac.c
@@ -80,6 +80,7 @@ isac_ph_state_bh(struct dchannel *dch)
l1_event(dch->l1, HW_DEACT_CNF);
break;
case ISAC_IND_DR:
+ case ISAC_IND_DR6:
dch->state = 3;
l1_event(dch->l1, HW_DEACT_IND);
break;
@@ -660,6 +661,7 @@ isac_l1cmd(struct dchannel *dch, u32 cmd)
spin_lock_irqsave(isac->hwlock, flags);
if ((isac->state == ISAC_IND_EI) ||
(isac->state == ISAC_IND_DR) ||
+ (isac->state == ISAC_IND_DR6) ||
(isac->state == ISAC_IND_RS))
ph_command(isac, ISAC_CMD_TIM);
else
diff --git a/drivers/isdn/hisax/isac.c b/drivers/isdn/hisax/isac.c
index 7fdf78f46433..df7e05ca8f9c 100644
--- a/drivers/isdn/hisax/isac.c
+++ b/drivers/isdn/hisax/isac.c
@@ -215,9 +215,11 @@ isac_interrupt(struct IsdnCardState *cs, u_char val)
if (count == 0)
count = 32;
isac_empty_fifo(cs, count);
- if ((count = cs->rcvidx) > 0) {
+ count = cs->rcvidx;
+ if (count > 0) {
cs->rcvidx = 0;
- if (!(skb = alloc_skb(count, GFP_ATOMIC)))
+ skb = alloc_skb(count, GFP_ATOMIC);
+ if (!skb)
printk(KERN_WARNING "HiSax: D receive out of memory\n");
else {
memcpy(skb_put(skb, count), cs->rcvbuf, count);
@@ -251,7 +253,8 @@ isac_interrupt(struct IsdnCardState *cs, u_char val)
cs->tx_skb = NULL;
}
}
- if ((cs->tx_skb = skb_dequeue(&cs->sq))) {
+ cs->tx_skb = skb_dequeue(&cs->sq);
+ if (cs->tx_skb) {
cs->tx_cnt = 0;
isac_fill_fifo(cs);
} else
@@ -313,7 +316,8 @@ afterXPR:
#if ARCOFI_USE
if (v1 & 0x08) {
if (!cs->dc.isac.mon_rx) {
- if (!(cs->dc.isac.mon_rx = kmalloc(MAX_MON_FRAME, GFP_ATOMIC))) {
+ cs->dc.isac.mon_rx = kmalloc(MAX_MON_FRAME, GFP_ATOMIC);
+ if (!cs->dc.isac.mon_rx) {
if (cs->debug & L1_DEB_WARN)
debugl1(cs, "ISAC MON RX out of memory!");
cs->dc.isac.mocr &= 0xf0;
@@ -343,7 +347,8 @@ afterXPR:
afterMONR0:
if (v1 & 0x80) {
if (!cs->dc.isac.mon_rx) {
- if (!(cs->dc.isac.mon_rx = kmalloc(MAX_MON_FRAME, GFP_ATOMIC))) {
+ cs->dc.isac.mon_rx = kmalloc(MAX_MON_FRAME, GFP_ATOMIC);
+ if (!cs->dc.isac.mon_rx) {
if (cs->debug & L1_DEB_WARN)
debugl1(cs, "ISAC MON RX out of memory!");
cs->dc.isac.mocr &= 0x0f;
diff --git a/drivers/isdn/i4l/Kconfig b/drivers/isdn/i4l/Kconfig
index f5b714cd7618..68e54d9f2f53 100644
--- a/drivers/isdn/i4l/Kconfig
+++ b/drivers/isdn/i4l/Kconfig
@@ -123,16 +123,6 @@ comment "ISDN4Linux hardware drivers"
source "drivers/isdn/hisax/Kconfig"
-
-menu "Active cards"
-
-source "drivers/isdn/icn/Kconfig"
-
-source "drivers/isdn/pcbit/Kconfig"
-
-source "drivers/isdn/act2000/Kconfig"
-
-endmenu
# end ISDN_I4L
endif
diff --git a/drivers/isdn/i4l/isdn_tty.c b/drivers/isdn/i4l/isdn_tty.c
index 2175225af742..947d5c978b8f 100644
--- a/drivers/isdn/i4l/isdn_tty.c
+++ b/drivers/isdn/i4l/isdn_tty.c
@@ -1572,7 +1572,7 @@ isdn_tty_close(struct tty_struct *tty, struct file *filp)
#endif
return;
}
- port->flags |= ASYNC_CLOSING;
+ info->closing = 1;
tty->closing = 1;
/*
@@ -1603,6 +1603,7 @@ isdn_tty_close(struct tty_struct *tty, struct file *filp)
info->ncarrier = 0;
tty_port_close_end(port, tty);
+ info->closing = 0;
#ifdef ISDN_DEBUG_MODEM_OPEN
printk(KERN_DEBUG "isdn_tty_close normal exit\n");
#endif
@@ -2236,7 +2237,7 @@ isdn_tty_at_cout(char *msg, modem_info *info)
l = strlen(msg);
spin_lock_irqsave(&info->readlock, flags);
- if (port->flags & ASYNC_CLOSING) {
+ if (info->closing) {
spin_unlock_irqrestore(&info->readlock, flags);
return;
}
@@ -2386,13 +2387,12 @@ isdn_tty_modem_result(int code, modem_info *info)
case RESULT_NO_CARRIER:
#ifdef ISDN_DEBUG_MODEM_HUP
printk(KERN_DEBUG "modem_result: NO CARRIER %d %d\n",
- (info->port.flags & ASYNC_CLOSING),
- (!info->port.tty));
+ info->closing, !info->port.tty);
#endif
m->mdmreg[REG_RINGCNT] = 0;
del_timer(&info->nc_timer);
info->ncarrier = 0;
- if ((info->port.flags & ASYNC_CLOSING) || (!info->port.tty))
+ if (info->closing || !info->port.tty)
return;
#ifdef CONFIG_ISDN_AUDIO
@@ -2525,7 +2525,7 @@ isdn_tty_modem_result(int code, modem_info *info)
}
}
if (code == RESULT_NO_CARRIER) {
- if ((info->port.flags & ASYNC_CLOSING) || (!info->port.tty))
+ if (info->closing || (!info->port.tty))
return;
if (info->port.flags & ASYNC_CHECK_CD)
diff --git a/drivers/isdn/mISDN/clock.c b/drivers/isdn/mISDN/clock.c
index 693fb7c9b59a..f8f659f1ce1b 100644
--- a/drivers/isdn/mISDN/clock.c
+++ b/drivers/isdn/mISDN/clock.c
@@ -37,6 +37,7 @@
#include <linux/types.h>
#include <linux/stddef.h>
#include <linux/spinlock.h>
+#include <linux/ktime.h>
#include <linux/mISDNif.h>
#include <linux/export.h>
#include "core.h"
@@ -45,15 +46,15 @@ static u_int *debug;
static LIST_HEAD(iclock_list);
static DEFINE_RWLOCK(iclock_lock);
static u16 iclock_count; /* counter of last clock */
-static struct timeval iclock_tv; /* time stamp of last clock */
-static int iclock_tv_valid; /* already received one timestamp */
+static ktime_t iclock_timestamp; /* time stamp of last clock */
+static int iclock_timestamp_valid; /* already received one timestamp */
static struct mISDNclock *iclock_current;
void
mISDN_init_clock(u_int *dp)
{
debug = dp;
- do_gettimeofday(&iclock_tv);
+ iclock_timestamp = ktime_get();
}
static void
@@ -86,7 +87,7 @@ select_iclock(void)
}
if (bestclock != iclock_current) {
/* no clock received yet */
- iclock_tv_valid = 0;
+ iclock_timestamp_valid = 0;
}
iclock_current = bestclock;
}
@@ -139,12 +140,11 @@ mISDN_unregister_clock(struct mISDNclock *iclock)
EXPORT_SYMBOL(mISDN_unregister_clock);
void
-mISDN_clock_update(struct mISDNclock *iclock, int samples, struct timeval *tv)
+mISDN_clock_update(struct mISDNclock *iclock, int samples, ktime_t *timestamp)
{
u_long flags;
- struct timeval tv_now;
- time_t elapsed_sec;
- int elapsed_8000th;
+ ktime_t timestamp_now;
+ u16 delta;
write_lock_irqsave(&iclock_lock, flags);
if (iclock_current != iclock) {
@@ -156,33 +156,27 @@ mISDN_clock_update(struct mISDNclock *iclock, int samples, struct timeval *tv)
write_unlock_irqrestore(&iclock_lock, flags);
return;
}
- if (iclock_tv_valid) {
+ if (iclock_timestamp_valid) {
/* increment sample counter by given samples */
iclock_count += samples;
- if (tv) { /* tv must be set, if function call is delayed */
- iclock_tv.tv_sec = tv->tv_sec;
- iclock_tv.tv_usec = tv->tv_usec;
- } else
- do_gettimeofday(&iclock_tv);
+ if (timestamp) { /* timestamp must be set, if function call is delayed */
+ iclock_timestamp = *timestamp;
+ } else {
+ iclock_timestamp = ktime_get();
+ }
} else {
/* calc elapsed time by system clock */
- if (tv) { /* tv must be set, if function call is delayed */
- tv_now.tv_sec = tv->tv_sec;
- tv_now.tv_usec = tv->tv_usec;
- } else
- do_gettimeofday(&tv_now);
- elapsed_sec = tv_now.tv_sec - iclock_tv.tv_sec;
- elapsed_8000th = (tv_now.tv_usec / 125)
- - (iclock_tv.tv_usec / 125);
- if (elapsed_8000th < 0) {
- elapsed_sec -= 1;
- elapsed_8000th += 8000;
+ if (timestamp) { /* timestamp must be set, if function call is delayed */
+ timestamp_now = *timestamp;
+ } else {
+ timestamp_now = ktime_get();
}
+ delta = ktime_divns(ktime_sub(timestamp_now, iclock_timestamp),
+ (NSEC_PER_SEC / 8000));
/* add elapsed time to counter and set new timestamp */
- iclock_count += elapsed_sec * 8000 + elapsed_8000th;
- iclock_tv.tv_sec = tv_now.tv_sec;
- iclock_tv.tv_usec = tv_now.tv_usec;
- iclock_tv_valid = 1;
+ iclock_count += delta;
+ iclock_timestamp = timestamp_now;
+ iclock_timestamp_valid = 1;
if (*debug & DEBUG_CLOCK)
printk("Received first clock from source '%s'.\n",
iclock_current ? iclock_current->name : "nothing");
@@ -195,22 +189,17 @@ unsigned short
mISDN_clock_get(void)
{
u_long flags;
- struct timeval tv_now;
- time_t elapsed_sec;
- int elapsed_8000th;
+ ktime_t timestamp_now;
+ u16 delta;
u16 count;
read_lock_irqsave(&iclock_lock, flags);
/* calc elapsed time by system clock */
- do_gettimeofday(&tv_now);
- elapsed_sec = tv_now.tv_sec - iclock_tv.tv_sec;
- elapsed_8000th = (tv_now.tv_usec / 125) - (iclock_tv.tv_usec / 125);
- if (elapsed_8000th < 0) {
- elapsed_sec -= 1;
- elapsed_8000th += 8000;
- }
+ timestamp_now = ktime_get();
+ delta = ktime_divns(ktime_sub(timestamp_now, iclock_timestamp),
+ (NSEC_PER_SEC / 8000));
/* add elapsed time to counter */
- count = iclock_count + elapsed_sec * 8000 + elapsed_8000th;
+ count = iclock_count + delta;
read_unlock_irqrestore(&iclock_lock, flags);
return count;
}
diff --git a/drivers/isdn/mISDN/socket.c b/drivers/isdn/mISDN/socket.c
index 0d29b5a6356d..99e5f9751e8b 100644
--- a/drivers/isdn/mISDN/socket.c
+++ b/drivers/isdn/mISDN/socket.c
@@ -715,6 +715,9 @@ base_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
if (!maddr || maddr->family != AF_ISDN)
return -EINVAL;
+ if (addr_len < sizeof(struct sockaddr_mISDN))
+ return -EINVAL;
+
lock_sock(sk);
if (_pms(sk)->dev) {
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index 7f940c24a16b..225147863e02 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -443,6 +443,7 @@ config LEDS_DELL_NETBOOKS
tristate "External LED on Dell Business Netbooks"
depends on LEDS_CLASS
depends on X86 && ACPI_WMI
+ depends on DELL_SMBIOS
help
This adds support for the Latitude 2100 and similar
notebooks that have an external LED.
@@ -568,6 +569,14 @@ config LEDS_SEAD3
This driver can also be built as a module. If so the module
will be called leds-sead3.
+config LEDS_IS31FL32XX
+ tristate "LED support for ISSI IS31FL32XX I2C LED controller family"
+ depends on LEDS_CLASS && I2C && OF
+ help
+ Say Y here to include support for ISSI IS31FL32XX and Si-En SN32xx
+ LED controllers. They are I2C devices with multiple constant-current
+ channels, each with independent 256-level PWM control.
+
comment "LED driver for blink(1) USB RGB LED is under Special HID drivers (HID_THINGM)"
config LEDS_BLINKM
diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile
index e9d53092765d..cb2013df52d9 100644
--- a/drivers/leds/Makefile
+++ b/drivers/leds/Makefile
@@ -66,6 +66,7 @@ obj-$(CONFIG_LEDS_MENF21BMC) += leds-menf21bmc.o
obj-$(CONFIG_LEDS_KTD2692) += leds-ktd2692.o
obj-$(CONFIG_LEDS_POWERNV) += leds-powernv.o
obj-$(CONFIG_LEDS_SEAD3) += leds-sead3.o
+obj-$(CONFIG_LEDS_IS31FL32XX) += leds-is31fl32xx.o
# LED SPI Drivers
obj-$(CONFIG_LEDS_DAC124S085) += leds-dac124s085.o
diff --git a/drivers/leds/dell-led.c b/drivers/leds/dell-led.c
index c36acaf566a6..b3d6e9c15cf9 100644
--- a/drivers/leds/dell-led.c
+++ b/drivers/leds/dell-led.c
@@ -17,6 +17,7 @@
#include <linux/module.h>
#include <linux/dmi.h>
#include <linux/dell-led.h>
+#include "../platform/x86/dell-smbios.h"
MODULE_AUTHOR("Louis Davis/Jim Dailey");
MODULE_DESCRIPTION("Dell LED Control Driver");
@@ -42,120 +43,32 @@ MODULE_ALIAS("wmi:" DELL_LED_BIOS_GUID);
#define CMD_LED_OFF 17
#define CMD_LED_BLINK 18
-struct app_wmi_args {
- u16 class;
- u16 selector;
- u32 arg1;
- u32 arg2;
- u32 arg3;
- u32 arg4;
- u32 res1;
- u32 res2;
- u32 res3;
- u32 res4;
- char dummy[92];
-};
-
#define GLOBAL_MIC_MUTE_ENABLE 0x364
#define GLOBAL_MIC_MUTE_DISABLE 0x365
-struct dell_bios_data_token {
- u16 tokenid;
- u16 location;
- u16 value;
-};
-
-struct __attribute__ ((__packed__)) dell_bios_calling_interface {
- struct dmi_header header;
- u16 cmd_io_addr;
- u8 cmd_io_code;
- u32 supported_cmds;
- struct dell_bios_data_token damap[];
-};
-
-static struct dell_bios_data_token dell_mic_tokens[2];
-
-static int dell_wmi_perform_query(struct app_wmi_args *args)
-{
- struct app_wmi_args *bios_return;
- union acpi_object *obj;
- struct acpi_buffer input;
- struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
- acpi_status status;
- u32 rc = -EINVAL;
-
- input.length = 128;
- input.pointer = args;
-
- status = wmi_evaluate_method(DELL_APP_GUID, 0, 1, &input, &output);
- if (!ACPI_SUCCESS(status))
- goto err_out0;
-
- obj = output.pointer;
- if (!obj)
- goto err_out0;
-
- if (obj->type != ACPI_TYPE_BUFFER)
- goto err_out1;
-
- bios_return = (struct app_wmi_args *)obj->buffer.pointer;
- rc = bios_return->res1;
- if (rc)
- goto err_out1;
-
- memcpy(args, bios_return, sizeof(struct app_wmi_args));
- rc = 0;
-
- err_out1:
- kfree(obj);
- err_out0:
- return rc;
-}
-
-static void __init find_micmute_tokens(const struct dmi_header *dm, void *dummy)
-{
- struct dell_bios_calling_interface *calling_interface;
- struct dell_bios_data_token *token;
- int token_size = sizeof(struct dell_bios_data_token);
- int i = 0;
-
- if (dm->type == 0xda && dm->length > 17) {
- calling_interface = container_of(dm,
- struct dell_bios_calling_interface, header);
-
- token = &calling_interface->damap[i];
- while (token->tokenid != 0xffff) {
- if (token->tokenid == GLOBAL_MIC_MUTE_DISABLE)
- memcpy(&dell_mic_tokens[0], token, token_size);
- else if (token->tokenid == GLOBAL_MIC_MUTE_ENABLE)
- memcpy(&dell_mic_tokens[1], token, token_size);
-
- i++;
- token = &calling_interface->damap[i];
- }
- }
-}
-
static int dell_micmute_led_set(int state)
{
- struct app_wmi_args args;
- struct dell_bios_data_token *token;
+ struct calling_interface_buffer *buffer;
+ struct calling_interface_token *token;
if (!wmi_has_guid(DELL_APP_GUID))
return -ENODEV;
- if (state == 0 || state == 1)
- token = &dell_mic_tokens[state];
+ if (state == 0)
+ token = dell_smbios_find_token(GLOBAL_MIC_MUTE_DISABLE);
+ else if (state == 1)
+ token = dell_smbios_find_token(GLOBAL_MIC_MUTE_ENABLE);
else
return -EINVAL;
- memset(&args, 0, sizeof(struct app_wmi_args));
-
- args.class = 1;
- args.arg1 = token->location;
- args.arg2 = token->value;
+ if (!token)
+ return -ENODEV;
- dell_wmi_perform_query(&args);
+ buffer = dell_smbios_get_buffer();
+ buffer->input[0] = token->location;
+ buffer->input[1] = token->value;
+ dell_smbios_send_request(1, 0);
+ dell_smbios_release_buffer();
return state;
}
@@ -177,14 +90,6 @@ int dell_app_wmi_led_set(int whichled, int on)
}
EXPORT_SYMBOL_GPL(dell_app_wmi_led_set);
-static int __init dell_micmute_led_init(void)
-{
- memset(dell_mic_tokens, 0, sizeof(struct dell_bios_data_token) * 2);
- dmi_walk(find_micmute_tokens, NULL);
-
- return 0;
-}
-
struct bios_args {
unsigned char length;
unsigned char result_code;
@@ -330,9 +235,6 @@ static int __init dell_led_init(void)
if (!wmi_has_guid(DELL_LED_BIOS_GUID) && !wmi_has_guid(DELL_APP_GUID))
return -ENODEV;
- if (wmi_has_guid(DELL_APP_GUID))
- error = dell_micmute_led_init();
-
if (wmi_has_guid(DELL_LED_BIOS_GUID)) {
error = led_off();
if (error != 0)
diff --git a/drivers/leds/led-class.c b/drivers/leds/led-class.c
index 14139c337312..aa84e5b37593 100644
--- a/drivers/leds/led-class.c
+++ b/drivers/leds/led-class.c
@@ -245,6 +245,8 @@ void led_classdev_unregister(struct led_classdev *led_cdev)
up_write(&led_cdev->trigger_lock);
#endif
+ led_cdev->flags |= LED_UNREGISTERING;
+
/* Stop blinking */
led_stop_software_blink(led_cdev);
diff --git a/drivers/leds/led-core.c b/drivers/leds/led-core.c
index 19e1e60dfaa3..3495d5d6547f 100644
--- a/drivers/leds/led-core.c
+++ b/drivers/leds/led-core.c
@@ -25,6 +25,26 @@ EXPORT_SYMBOL_GPL(leds_list_lock);
LIST_HEAD(leds_list);
EXPORT_SYMBOL_GPL(leds_list);
+static int __led_set_brightness(struct led_classdev *led_cdev,
+ enum led_brightness value)
+{
+ if (!led_cdev->brightness_set)
+ return -ENOTSUPP;
+
+ led_cdev->brightness_set(led_cdev, value);
+
+ return 0;
+}
+
+static int __led_set_brightness_blocking(struct led_classdev *led_cdev,
+ enum led_brightness value)
+{
+ if (!led_cdev->brightness_set_blocking)
+ return -ENOTSUPP;
+
+ return led_cdev->brightness_set_blocking(led_cdev, value);
+}
+
static void led_timer_function(unsigned long data)
{
struct led_classdev *led_cdev = (void *)data;
@@ -91,14 +111,14 @@ static void set_brightness_delayed(struct work_struct *ws)
led_cdev->flags &= ~LED_BLINK_DISABLE;
}
- if (led_cdev->brightness_set)
- led_cdev->brightness_set(led_cdev, led_cdev->delayed_set_value);
- else if (led_cdev->brightness_set_blocking)
- ret = led_cdev->brightness_set_blocking(led_cdev,
- led_cdev->delayed_set_value);
- else
- ret = -ENOTSUPP;
- if (ret < 0)
+ ret = __led_set_brightness(led_cdev, led_cdev->delayed_set_value);
+ if (ret == -ENOTSUPP)
+ ret = __led_set_brightness_blocking(led_cdev,
+ led_cdev->delayed_set_value);
+ if (ret < 0 &&
+ /* LED HW might have been unplugged, therefore don't warn */
+ !(ret == -ENODEV && (led_cdev->flags & LED_UNREGISTERING) &&
+ (led_cdev->flags & LED_HW_PLUGGABLE)))
dev_err(led_cdev->dev,
"Setting an LED's brightness failed (%d)\n", ret);
}
@@ -233,10 +253,8 @@ void led_set_brightness_nopm(struct led_classdev *led_cdev,
enum led_brightness value)
{
/* Use brightness_set op if available, it is guaranteed not to sleep */
- if (led_cdev->brightness_set) {
- led_cdev->brightness_set(led_cdev, value);
+ if (!__led_set_brightness(led_cdev, value))
return;
- }
/* If brightness setting can sleep, delegate it to a work queue task */
led_cdev->delayed_set_value = value;
@@ -267,10 +285,7 @@ int led_set_brightness_sync(struct led_classdev *led_cdev,
if (led_cdev->flags & LED_SUSPENDED)
return 0;
- if (led_cdev->brightness_set_blocking)
- return led_cdev->brightness_set_blocking(led_cdev,
- led_cdev->brightness);
- return -ENOTSUPP;
+ return __led_set_brightness_blocking(led_cdev, led_cdev->brightness);
}
EXPORT_SYMBOL_GPL(led_set_brightness_sync);
diff --git a/drivers/leds/led-triggers.c b/drivers/leds/led-triggers.c
index e1e933424ac9..2181581795d3 100644
--- a/drivers/leds/led-triggers.c
+++ b/drivers/leds/led-triggers.c
@@ -34,9 +34,7 @@ ssize_t led_trigger_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct led_classdev *led_cdev = dev_get_drvdata(dev);
- char trigger_name[TRIG_NAME_MAX];
struct led_trigger *trig;
- size_t len;
int ret = count;
mutex_lock(&led_cdev->led_access);
@@ -46,21 +44,14 @@ ssize_t led_trigger_store(struct device *dev, struct device_attribute *attr,
goto unlock;
}
- trigger_name[sizeof(trigger_name) - 1] = '\0';
- strncpy(trigger_name, buf, sizeof(trigger_name) - 1);
- len = strlen(trigger_name);
-
- if (len && trigger_name[len - 1] == '\n')
- trigger_name[len - 1] = '\0';
-
- if (!strcmp(trigger_name, "none")) {
+ if (sysfs_streq(buf, "none")) {
led_trigger_remove(led_cdev);
goto unlock;
}
down_read(&triggers_list_lock);
list_for_each_entry(trig, &trigger_list, next_trig) {
- if (!strcmp(trigger_name, trig->name)) {
+ if (sysfs_streq(buf, trig->name)) {
down_write(&led_cdev->trigger_lock);
led_trigger_set(led_cdev, trig);
up_write(&led_cdev->trigger_lock);
diff --git a/drivers/leds/leds-88pm860x.c b/drivers/leds/leds-88pm860x.c
index 1ad4d03a0a3c..77a104d2b124 100644
--- a/drivers/leds/leds-88pm860x.c
+++ b/drivers/leds/leds-88pm860x.c
@@ -195,7 +195,6 @@ static int pm860x_led_probe(struct platform_device *pdev)
sprintf(data->name, "led1-blue");
break;
}
- platform_set_drvdata(pdev, data);
data->chip = chip;
data->i2c = (chip->id == CHIP_PM8606) ? chip->client : chip->companion;
data->port = pdev->id;
@@ -208,7 +207,7 @@ static int pm860x_led_probe(struct platform_device *pdev)
data->cdev.brightness_set_blocking = pm860x_led_set;
mutex_init(&data->lock);
- ret = led_classdev_register(chip->dev, &data->cdev);
+ ret = devm_led_classdev_register(chip->dev, &data->cdev);
if (ret < 0) {
dev_err(&pdev->dev, "Failed to register LED: %d\n", ret);
return ret;
@@ -217,21 +216,12 @@ static int pm860x_led_probe(struct platform_device *pdev)
return 0;
}
-static int pm860x_led_remove(struct platform_device *pdev)
-{
- struct pm860x_led *data = platform_get_drvdata(pdev);
-
- led_classdev_unregister(&data->cdev);
-
- return 0;
-}
static struct platform_driver pm860x_led_driver = {
.driver = {
.name = "88pm860x-led",
},
.probe = pm860x_led_probe,
- .remove = pm860x_led_remove,
};
module_platform_driver(pm860x_led_driver);
diff --git a/drivers/leds/leds-da903x.c b/drivers/leds/leds-da903x.c
index 4752a2b6ba2b..5ff7d72f73aa 100644
--- a/drivers/leds/leds-da903x.c
+++ b/drivers/leds/leds-da903x.c
@@ -113,21 +113,12 @@ static int da903x_led_probe(struct platform_device *pdev)
led->flags = pdata->flags;
led->master = pdev->dev.parent;
- ret = led_classdev_register(led->master, &led->cdev);
+ ret = devm_led_classdev_register(led->master, &led->cdev);
if (ret) {
dev_err(&pdev->dev, "failed to register LED %d\n", id);
return ret;
}
- platform_set_drvdata(pdev, led);
- return 0;
-}
-
-static int da903x_led_remove(struct platform_device *pdev)
-{
- struct da903x_led *led = platform_get_drvdata(pdev);
-
- led_classdev_unregister(&led->cdev);
return 0;
}
@@ -136,7 +127,6 @@ static struct platform_driver da903x_led_driver = {
.name = "da903x-led",
},
.probe = da903x_led_probe,
- .remove = da903x_led_remove,
};
module_platform_driver(da903x_led_driver);
diff --git a/drivers/leds/leds-gpio.c b/drivers/leds/leds-gpio.c
index 7bc53280dbfd..61143f55597e 100644
--- a/drivers/leds/leds-gpio.c
+++ b/drivers/leds/leds-gpio.c
@@ -86,7 +86,7 @@ static int create_gpio_led(const struct gpio_led *template,
* still uses GPIO numbers. Ultimately we would like to get
* rid of this block completely.
*/
- unsigned long flags = 0;
+ unsigned long flags = GPIOF_OUT_INIT_LOW;
/* skip leds that aren't available */
if (!gpio_is_valid(template->gpio)) {
@@ -104,8 +104,8 @@ static int create_gpio_led(const struct gpio_led *template,
return ret;
led_dat->gpiod = gpio_to_desc(template->gpio);
- if (IS_ERR(led_dat->gpiod))
- return PTR_ERR(led_dat->gpiod);
+ if (!led_dat->gpiod)
+ return -EINVAL;
}
led_dat->cdev.name = template->name;
diff --git a/drivers/leds/leds-is31fl32xx.c b/drivers/leds/leds-is31fl32xx.c
new file mode 100644
index 000000000000..c901d132d80c
--- /dev/null
+++ b/drivers/leds/leds-is31fl32xx.c
@@ -0,0 +1,508 @@
+/*
+ * Driver for ISSI IS31FL32xx family of I2C LED controllers
+ *
+ * Copyright 2015 Allworx Corp.
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Datasheets:
+ * http://www.issi.com/US/product-analog-fxled-driver.shtml
+ * http://www.si-en.com/product.asp?parentid=890
+ */
+
+#include <linux/device.h>
+#include <linux/i2c.h>
+#include <linux/kernel.h>
+#include <linux/leds.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+
+/* Used to indicate a device has no such register */
+#define IS31FL32XX_REG_NONE 0xFF
+
+/* Software Shutdown bit in Shutdown Register */
+#define IS31FL32XX_SHUTDOWN_SSD_ENABLE 0
+#define IS31FL32XX_SHUTDOWN_SSD_DISABLE BIT(0)
+
+/* IS31FL3216 has a number of unique registers */
+#define IS31FL3216_CONFIG_REG 0x00
+#define IS31FL3216_LIGHTING_EFFECT_REG 0x03
+#define IS31FL3216_CHANNEL_CONFIG_REG 0x04
+
+/* Software Shutdown bit in 3216 Config Register */
+#define IS31FL3216_CONFIG_SSD_ENABLE BIT(7)
+#define IS31FL3216_CONFIG_SSD_DISABLE 0
+
+struct is31fl32xx_priv;
+struct is31fl32xx_led_data {
+ struct led_classdev cdev;
+ u8 channel; /* 1-based, max priv->cdef->channels */
+ struct is31fl32xx_priv *priv;
+};
+
+struct is31fl32xx_priv {
+ const struct is31fl32xx_chipdef *cdef;
+ struct i2c_client *client;
+ unsigned int num_leds;
+ struct is31fl32xx_led_data leds[0];
+};
+
+/**
+ * struct is31fl32xx_chipdef - chip-specific attributes
+ * @channels : Number of LED channels
+ * @shutdown_reg : address of Shutdown register (optional)
+ * @pwm_update_reg : address of PWM Update register
+ * @global_control_reg : address of Global Control register (optional)
+ * @reset_reg : address of Reset register (optional)
+ * @pwm_register_base : address of first PWM register
+ * @pwm_registers_reversed: : true if PWM registers count down instead of up
+ * @led_control_register_base : address of first LED control register (optional)
+ * @enable_bits_per_led_control_register: number of LEDs enable bits in each
+ * @reset_func: : pointer to reset function
+ *
+ * For all optional register addresses, the sentinel value %IS31FL32XX_REG_NONE
+ * indicates that this chip has no such register.
+ *
+ * If non-NULL, @reset_func will be called during probing to set all
+ * necessary registers to a known initialization state. This is needed
+ * for chips that do not have a @reset_reg.
+ *
+ * @enable_bits_per_led_control_register must be >=1 if
+ * @led_control_register_base != %IS31FL32XX_REG_NONE.
+ */
+struct is31fl32xx_chipdef {
+ u8 channels;
+ u8 shutdown_reg;
+ u8 pwm_update_reg;
+ u8 global_control_reg;
+ u8 reset_reg;
+ u8 pwm_register_base;
+ bool pwm_registers_reversed;
+ u8 led_control_register_base;
+ u8 enable_bits_per_led_control_register;
+ int (*reset_func)(struct is31fl32xx_priv *priv);
+ int (*sw_shutdown_func)(struct is31fl32xx_priv *priv, bool enable);
+};
+
+static const struct is31fl32xx_chipdef is31fl3236_cdef = {
+ .channels = 36,
+ .shutdown_reg = 0x00,
+ .pwm_update_reg = 0x25,
+ .global_control_reg = 0x4a,
+ .reset_reg = 0x4f,
+ .pwm_register_base = 0x01,
+ .led_control_register_base = 0x26,
+ .enable_bits_per_led_control_register = 1,
+};
+
+static const struct is31fl32xx_chipdef is31fl3235_cdef = {
+ .channels = 28,
+ .shutdown_reg = 0x00,
+ .pwm_update_reg = 0x25,
+ .global_control_reg = 0x4a,
+ .reset_reg = 0x4f,
+ .pwm_register_base = 0x05,
+ .led_control_register_base = 0x2a,
+ .enable_bits_per_led_control_register = 1,
+};
+
+static const struct is31fl32xx_chipdef is31fl3218_cdef = {
+ .channels = 18,
+ .shutdown_reg = 0x00,
+ .pwm_update_reg = 0x16,
+ .global_control_reg = IS31FL32XX_REG_NONE,
+ .reset_reg = 0x17,
+ .pwm_register_base = 0x01,
+ .led_control_register_base = 0x13,
+ .enable_bits_per_led_control_register = 6,
+};
+
+static int is31fl3216_reset(struct is31fl32xx_priv *priv);
+static int is31fl3216_software_shutdown(struct is31fl32xx_priv *priv,
+ bool enable);
+static const struct is31fl32xx_chipdef is31fl3216_cdef = {
+ .channels = 16,
+ .shutdown_reg = IS31FL32XX_REG_NONE,
+ .pwm_update_reg = 0xB0,
+ .global_control_reg = IS31FL32XX_REG_NONE,
+ .reset_reg = IS31FL32XX_REG_NONE,
+ .pwm_register_base = 0x10,
+ .pwm_registers_reversed = true,
+ .led_control_register_base = 0x01,
+ .enable_bits_per_led_control_register = 8,
+ .reset_func = is31fl3216_reset,
+ .sw_shutdown_func = is31fl3216_software_shutdown,
+};
+
+static int is31fl32xx_write(struct is31fl32xx_priv *priv, u8 reg, u8 val)
+{
+ int ret;
+
+ dev_dbg(&priv->client->dev, "writing register 0x%02X=0x%02X", reg, val);
+
+ ret = i2c_smbus_write_byte_data(priv->client, reg, val);
+ if (ret) {
+ dev_err(&priv->client->dev,
+ "register write to 0x%02X failed (error %d)",
+ reg, ret);
+ }
+ return ret;
+}
+
+/*
+ * Custom reset function for IS31FL3216 because it does not have a RESET
+ * register the way that the other IS31FL32xx chips do. We don't bother
+ * writing the GPIO and animation registers, because the registers we
+ * do write ensure those will have no effect.
+ */
+static int is31fl3216_reset(struct is31fl32xx_priv *priv)
+{
+ unsigned int i;
+ int ret;
+
+ ret = is31fl32xx_write(priv, IS31FL3216_CONFIG_REG,
+ IS31FL3216_CONFIG_SSD_ENABLE);
+ if (ret)
+ return ret;
+ for (i = 0; i < priv->cdef->channels; i++) {
+ ret = is31fl32xx_write(priv, priv->cdef->pwm_register_base+i,
+ 0x00);
+ if (ret)
+ return ret;
+ }
+ ret = is31fl32xx_write(priv, priv->cdef->pwm_update_reg, 0);
+ if (ret)
+ return ret;
+ ret = is31fl32xx_write(priv, IS31FL3216_LIGHTING_EFFECT_REG, 0x00);
+ if (ret)
+ return ret;
+ ret = is31fl32xx_write(priv, IS31FL3216_CHANNEL_CONFIG_REG, 0x00);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+/*
+ * Custom Software-Shutdown function for IS31FL3216 because it does not have
+ * a SHUTDOWN register the way that the other IS31FL32xx chips do.
+ * We don't bother doing a read/modify/write on the CONFIG register because
+ * we only ever use a value of '0' for the other fields in that register.
+ */
+static int is31fl3216_software_shutdown(struct is31fl32xx_priv *priv,
+ bool enable)
+{
+ u8 value = enable ? IS31FL3216_CONFIG_SSD_ENABLE :
+ IS31FL3216_CONFIG_SSD_DISABLE;
+
+ return is31fl32xx_write(priv, IS31FL3216_CONFIG_REG, value);
+}
+
+/*
+ * NOTE: A mutex is not needed in this function because:
+ * - All referenced data is read-only after probe()
+ * - The I2C core has a mutex on to protect the bus
+ * - There are no read/modify/write operations
+ * - Intervening operations between the write of the PWM register
+ * and the Update register are harmless.
+ *
+ * Example:
+ * PWM_REG_1 write 16
+ * UPDATE_REG write 0
+ * PWM_REG_2 write 128
+ * UPDATE_REG write 0
+ * vs:
+ * PWM_REG_1 write 16
+ * PWM_REG_2 write 128
+ * UPDATE_REG write 0
+ * UPDATE_REG write 0
+ * are equivalent. Poking the Update register merely applies all PWM
+ * register writes up to that point.
+ */
+static int is31fl32xx_brightness_set(struct led_classdev *led_cdev,
+ enum led_brightness brightness)
+{
+ const struct is31fl32xx_led_data *led_data =
+ container_of(led_cdev, struct is31fl32xx_led_data, cdev);
+ const struct is31fl32xx_chipdef *cdef = led_data->priv->cdef;
+ u8 pwm_register_offset;
+ int ret;
+
+ dev_dbg(led_cdev->dev, "%s: %d\n", __func__, brightness);
+
+ /* NOTE: led_data->channel is 1-based */
+ if (cdef->pwm_registers_reversed)
+ pwm_register_offset = cdef->channels - led_data->channel;
+ else
+ pwm_register_offset = led_data->channel - 1;
+
+ ret = is31fl32xx_write(led_data->priv,
+ cdef->pwm_register_base + pwm_register_offset,
+ brightness);
+ if (ret)
+ return ret;
+
+ return is31fl32xx_write(led_data->priv, cdef->pwm_update_reg, 0);
+}
+
+static int is31fl32xx_reset_regs(struct is31fl32xx_priv *priv)
+{
+ const struct is31fl32xx_chipdef *cdef = priv->cdef;
+ int ret;
+
+ if (cdef->reset_reg != IS31FL32XX_REG_NONE) {
+ ret = is31fl32xx_write(priv, cdef->reset_reg, 0);
+ if (ret)
+ return ret;
+ }
+
+ if (cdef->reset_func)
+ return cdef->reset_func(priv);
+
+ return 0;
+}
+
+static int is31fl32xx_software_shutdown(struct is31fl32xx_priv *priv,
+ bool enable)
+{
+ const struct is31fl32xx_chipdef *cdef = priv->cdef;
+ int ret;
+
+ if (cdef->shutdown_reg != IS31FL32XX_REG_NONE) {
+ u8 value = enable ? IS31FL32XX_SHUTDOWN_SSD_ENABLE :
+ IS31FL32XX_SHUTDOWN_SSD_DISABLE;
+ ret = is31fl32xx_write(priv, cdef->shutdown_reg, value);
+ if (ret)
+ return ret;
+ }
+
+ if (cdef->sw_shutdown_func)
+ return cdef->sw_shutdown_func(priv, enable);
+
+ return 0;
+}
+
+static int is31fl32xx_init_regs(struct is31fl32xx_priv *priv)
+{
+ const struct is31fl32xx_chipdef *cdef = priv->cdef;
+ int ret;
+
+ ret = is31fl32xx_reset_regs(priv);
+ if (ret)
+ return ret;
+
+ /*
+ * Set enable bit for all channels.
+ * We will control state with PWM registers alone.
+ */
+ if (cdef->led_control_register_base != IS31FL32XX_REG_NONE) {
+ u8 value =
+ GENMASK(cdef->enable_bits_per_led_control_register-1, 0);
+ u8 num_regs = cdef->channels /
+ cdef->enable_bits_per_led_control_register;
+ int i;
+
+ for (i = 0; i < num_regs; i++) {
+ ret = is31fl32xx_write(priv,
+ cdef->led_control_register_base+i,
+ value);
+ if (ret)
+ return ret;
+ }
+ }
+
+ ret = is31fl32xx_software_shutdown(priv, false);
+ if (ret)
+ return ret;
+
+ if (cdef->global_control_reg != IS31FL32XX_REG_NONE) {
+ ret = is31fl32xx_write(priv, cdef->global_control_reg, 0x00);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static inline size_t sizeof_is31fl32xx_priv(int num_leds)
+{
+ return sizeof(struct is31fl32xx_priv) +
+ (sizeof(struct is31fl32xx_led_data) * num_leds);
+}
+
+static int is31fl32xx_parse_child_dt(const struct device *dev,
+ const struct device_node *child,
+ struct is31fl32xx_led_data *led_data)
+{
+ struct led_classdev *cdev = &led_data->cdev;
+ int ret = 0;
+ u32 reg;
+
+ if (of_property_read_string(child, "label", &cdev->name))
+ cdev->name = child->name;
+
+ ret = of_property_read_u32(child, "reg", &reg);
+ if (ret || reg < 1 || reg > led_data->priv->cdef->channels) {
+ dev_err(dev,
+ "Child node %s does not have a valid reg property\n",
+ child->full_name);
+ return -EINVAL;
+ }
+ led_data->channel = reg;
+
+ of_property_read_string(child, "linux,default-trigger",
+ &cdev->default_trigger);
+
+ cdev->brightness_set_blocking = is31fl32xx_brightness_set;
+
+ return 0;
+}
+
+static struct is31fl32xx_led_data *is31fl32xx_find_led_data(
+ struct is31fl32xx_priv *priv,
+ u8 channel)
+{
+ size_t i;
+
+ for (i = 0; i < priv->num_leds; i++) {
+ if (priv->leds[i].channel == channel)
+ return &priv->leds[i];
+ }
+
+ return NULL;
+}
+
+static int is31fl32xx_parse_dt(struct device *dev,
+ struct is31fl32xx_priv *priv)
+{
+ struct device_node *child;
+ int ret = 0;
+
+ for_each_child_of_node(dev->of_node, child) {
+ struct is31fl32xx_led_data *led_data =
+ &priv->leds[priv->num_leds];
+ const struct is31fl32xx_led_data *other_led_data;
+
+ led_data->priv = priv;
+
+ ret = is31fl32xx_parse_child_dt(dev, child, led_data);
+ if (ret)
+ goto err;
+
+ /* Detect if channel is already in use by another child */
+ other_led_data = is31fl32xx_find_led_data(priv,
+ led_data->channel);
+ if (other_led_data) {
+ dev_err(dev,
+ "%s and %s both attempting to use channel %d\n",
+ led_data->cdev.name,
+ other_led_data->cdev.name,
+ led_data->channel);
+ goto err;
+ }
+
+ ret = devm_led_classdev_register(dev, &led_data->cdev);
+ if (ret) {
+ dev_err(dev, "failed to register PWM led for %s: %d\n",
+ led_data->cdev.name, ret);
+ goto err;
+ }
+
+ priv->num_leds++;
+ }
+
+ return 0;
+
+err:
+ of_node_put(child);
+ return ret;
+}
+
+static const struct of_device_id of_is31fl31xx_match[] = {
+ { .compatible = "issi,is31fl3236", .data = &is31fl3236_cdef, },
+ { .compatible = "issi,is31fl3235", .data = &is31fl3235_cdef, },
+ { .compatible = "issi,is31fl3218", .data = &is31fl3218_cdef, },
+ { .compatible = "si-en,sn3218", .data = &is31fl3218_cdef, },
+ { .compatible = "issi,is31fl3216", .data = &is31fl3216_cdef, },
+ { .compatible = "si-en,sn3216", .data = &is31fl3216_cdef, },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, of_is31fl31xx_match);
+
+static int is31fl32xx_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ const struct is31fl32xx_chipdef *cdef;
+ const struct of_device_id *of_dev_id;
+ struct device *dev = &client->dev;
+ struct is31fl32xx_priv *priv;
+ int count;
+ int ret = 0;
+
+ of_dev_id = of_match_device(of_is31fl31xx_match, dev);
+ if (!of_dev_id)
+ return -EINVAL;
+
+ cdef = of_dev_id->data;
+
+ count = of_get_child_count(dev->of_node);
+ if (!count)
+ return -EINVAL;
+
+ priv = devm_kzalloc(dev, sizeof_is31fl32xx_priv(count),
+ GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->client = client;
+ priv->cdef = cdef;
+ i2c_set_clientdata(client, priv);
+
+ ret = is31fl32xx_init_regs(priv);
+ if (ret)
+ return ret;
+
+ ret = is31fl32xx_parse_dt(dev, priv);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int is31fl32xx_remove(struct i2c_client *client)
+{
+ struct is31fl32xx_priv *priv = i2c_get_clientdata(client);
+
+ return is31fl32xx_reset_regs(priv);
+}
+
+/*
+ * i2c-core requires that id_table be non-NULL, even though
+ * it is not used for DeviceTree based instantiation.
+ */
+static const struct i2c_device_id is31fl31xx_id[] = {
+ {},
+};
+
+MODULE_DEVICE_TABLE(i2c, is31fl31xx_id);
+
+static struct i2c_driver is31fl32xx_driver = {
+ .driver = {
+ .name = "is31fl32xx",
+ .of_match_table = of_is31fl31xx_match,
+ },
+ .probe = is31fl32xx_probe,
+ .remove = is31fl32xx_remove,
+ .id_table = is31fl31xx_id,
+};
+
+module_i2c_driver(is31fl32xx_driver);
+
+MODULE_AUTHOR("David Rivshin <drivshin@allworx.com>");
+MODULE_DESCRIPTION("ISSI IS31FL32xx LED driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/leds/leds-lm3533.c b/drivers/leds/leds-lm3533.c
index 196dcb5e6004..5b529dc013d2 100644
--- a/drivers/leds/leds-lm3533.c
+++ b/drivers/leds/leds-lm3533.c
@@ -698,7 +698,7 @@ static int lm3533_led_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, led);
- ret = led_classdev_register(pdev->dev.parent, &led->cdev);
+ ret = devm_led_classdev_register(pdev->dev.parent, &led->cdev);
if (ret) {
dev_err(&pdev->dev, "failed to register LED %d\n", pdev->id);
return ret;
@@ -708,18 +708,13 @@ static int lm3533_led_probe(struct platform_device *pdev)
ret = lm3533_led_setup(led, pdata);
if (ret)
- goto err_unregister;
+ return ret;
ret = lm3533_ctrlbank_enable(&led->cb);
if (ret)
- goto err_unregister;
+ return ret;
return 0;
-
-err_unregister:
- led_classdev_unregister(&led->cdev);
-
- return ret;
}
static int lm3533_led_remove(struct platform_device *pdev)
@@ -729,7 +724,6 @@ static int lm3533_led_remove(struct platform_device *pdev)
dev_dbg(&pdev->dev, "%s\n", __func__);
lm3533_ctrlbank_disable(&led->cb);
- led_classdev_unregister(&led->cdev);
return 0;
}
diff --git a/drivers/leds/leds-lp3944.c b/drivers/leds/leds-lp3944.c
index 6c758aea1bbd..be60c181222a 100644
--- a/drivers/leds/leds-lp3944.c
+++ b/drivers/leds/leds-lp3944.c
@@ -199,8 +199,11 @@ static int lp3944_led_set(struct lp3944_led_data *led, u8 status)
if (status > LP3944_LED_STATUS_DIM1)
return -EINVAL;
- /* invert only 0 and 1, leave unchanged the other values,
- * remember we are abusing status to set blink patterns
+ /*
+ * Invert status only when it's < 2 (i.e. 0 or 1) which means it's
+ * controlling the on/off state directly.
+ * When, instead, status is >= 2 don't invert it because it would mean
+ * to mess with the hardware blinking mode.
*/
if (led->type == LP3944_LED_TYPE_LED_INVERTED && status < 2)
status = 1 - status;
diff --git a/drivers/leds/leds-lp8788.c b/drivers/leds/leds-lp8788.c
index 0eee38fc0565..38c253a43700 100644
--- a/drivers/leds/leds-lp8788.c
+++ b/drivers/leds/leds-lp8788.c
@@ -146,15 +146,13 @@ static int lp8788_led_probe(struct platform_device *pdev)
mutex_init(&led->lock);
- platform_set_drvdata(pdev, led);
-
ret = lp8788_led_init_device(led, led_pdata);
if (ret) {
dev_err(dev, "led init device err: %d\n", ret);
return ret;
}
- ret = led_classdev_register(dev, &led->led_dev);
+ ret = devm_led_classdev_register(dev, &led->led_dev);
if (ret) {
dev_err(dev, "led register err: %d\n", ret);
return ret;
@@ -163,18 +161,8 @@ static int lp8788_led_probe(struct platform_device *pdev)
return 0;
}
-static int lp8788_led_remove(struct platform_device *pdev)
-{
- struct lp8788_led *led = platform_get_drvdata(pdev);
-
- led_classdev_unregister(&led->led_dev);
-
- return 0;
-}
-
static struct platform_driver lp8788_led_driver = {
.probe = lp8788_led_probe,
- .remove = lp8788_led_remove,
.driver = {
.name = LP8788_DEV_KEYLED,
},
diff --git a/drivers/leds/leds-max8997.c b/drivers/leds/leds-max8997.c
index 01b459069358..4edf74f1d6d4 100644
--- a/drivers/leds/leds-max8997.c
+++ b/drivers/leds/leds-max8997.c
@@ -281,30 +281,18 @@ static int max8997_led_probe(struct platform_device *pdev)
mutex_init(&led->mutex);
- platform_set_drvdata(pdev, led);
-
- ret = led_classdev_register(&pdev->dev, &led->cdev);
+ ret = devm_led_classdev_register(&pdev->dev, &led->cdev);
if (ret < 0)
return ret;
return 0;
}
-static int max8997_led_remove(struct platform_device *pdev)
-{
- struct max8997_led *led = platform_get_drvdata(pdev);
-
- led_classdev_unregister(&led->cdev);
-
- return 0;
-}
-
static struct platform_driver max8997_led_driver = {
.driver = {
.name = "max8997-led",
},
.probe = max8997_led_probe,
- .remove = max8997_led_remove,
};
module_platform_driver(max8997_led_driver);
diff --git a/drivers/leds/leds-s3c24xx.c b/drivers/leds/leds-s3c24xx.c
index 83641a7b299a..404da451cb88 100644
--- a/drivers/leds/leds-s3c24xx.c
+++ b/drivers/leds/leds-s3c24xx.c
@@ -29,11 +29,6 @@ struct s3c24xx_gpio_led {
struct s3c24xx_led_platdata *pdata;
};
-static inline struct s3c24xx_gpio_led *pdev_to_gpio(struct platform_device *dev)
-{
- return platform_get_drvdata(dev);
-}
-
static inline struct s3c24xx_gpio_led *to_gpio(struct led_classdev *led_cdev)
{
return container_of(led_cdev, struct s3c24xx_gpio_led, cdev);
@@ -59,15 +54,6 @@ static void s3c24xx_led_set(struct led_classdev *led_cdev,
}
}
-static int s3c24xx_led_remove(struct platform_device *dev)
-{
- struct s3c24xx_gpio_led *led = pdev_to_gpio(dev);
-
- led_classdev_unregister(&led->cdev);
-
- return 0;
-}
-
static int s3c24xx_led_probe(struct platform_device *dev)
{
struct s3c24xx_led_platdata *pdata = dev_get_platdata(&dev->dev);
@@ -79,8 +65,6 @@ static int s3c24xx_led_probe(struct platform_device *dev)
if (!led)
return -ENOMEM;
- platform_set_drvdata(dev, led);
-
led->cdev.brightness_set = s3c24xx_led_set;
led->cdev.default_trigger = pdata->def_trigger;
led->cdev.name = pdata->name;
@@ -104,7 +88,7 @@ static int s3c24xx_led_probe(struct platform_device *dev)
/* register our new led device */
- ret = led_classdev_register(&dev->dev, &led->cdev);
+ ret = devm_led_classdev_register(&dev->dev, &led->cdev);
if (ret < 0)
dev_err(&dev->dev, "led_classdev_register failed\n");
@@ -113,7 +97,6 @@ static int s3c24xx_led_probe(struct platform_device *dev)
static struct platform_driver s3c24xx_led_driver = {
.probe = s3c24xx_led_probe,
- .remove = s3c24xx_led_remove,
.driver = {
.name = "s3c24xx_led",
},
diff --git a/drivers/leds/leds-wm831x-status.c b/drivers/leds/leds-wm831x-status.c
index 64a22263e7fc..be93b20e792a 100644
--- a/drivers/leds/leds-wm831x-status.c
+++ b/drivers/leds/leds-wm831x-status.c
@@ -239,7 +239,6 @@ static int wm831x_status_probe(struct platform_device *pdev)
GFP_KERNEL);
if (!drvdata)
return -ENOMEM;
- platform_set_drvdata(pdev, drvdata);
drvdata->wm831x = wm831x;
drvdata->reg = res->start;
@@ -284,7 +283,7 @@ static int wm831x_status_probe(struct platform_device *pdev)
drvdata->cdev.blink_set = wm831x_status_blink_set;
drvdata->cdev.groups = wm831x_status_groups;
- ret = led_classdev_register(wm831x->dev, &drvdata->cdev);
+ ret = devm_led_classdev_register(wm831x->dev, &drvdata->cdev);
if (ret < 0) {
dev_err(&pdev->dev, "Failed to register LED: %d\n", ret);
return ret;
@@ -293,21 +292,11 @@ static int wm831x_status_probe(struct platform_device *pdev)
return 0;
}
-static int wm831x_status_remove(struct platform_device *pdev)
-{
- struct wm831x_status *drvdata = platform_get_drvdata(pdev);
-
- led_classdev_unregister(&drvdata->cdev);
-
- return 0;
-}
-
static struct platform_driver wm831x_status_driver = {
.driver = {
.name = "wm831x-status",
},
.probe = wm831x_status_probe,
- .remove = wm831x_status_remove,
};
module_platform_driver(wm831x_status_driver);
diff --git a/drivers/lguest/interrupts_and_traps.c b/drivers/lguest/interrupts_and_traps.c
index eb934b0242e0..67392b6ab845 100644
--- a/drivers/lguest/interrupts_and_traps.c
+++ b/drivers/lguest/interrupts_and_traps.c
@@ -331,7 +331,7 @@ void set_interrupt(struct lg_cpu *cpu, unsigned int irq)
* Actually now I think of it, it's possible that Ron *is* half the Plan 9
* userbase. Oh well.
*/
-static bool could_be_syscall(unsigned int num)
+bool could_be_syscall(unsigned int num)
{
/* Normal Linux IA32_SYSCALL_VECTOR or reserved vector? */
return num == IA32_SYSCALL_VECTOR || num == syscall_vector;
@@ -416,6 +416,10 @@ bool deliver_trap(struct lg_cpu *cpu, unsigned int num)
*
* This routine indicates if a particular trap number could be delivered
* directly.
+ *
+ * Unfortunately, Linux 4.6 started using an interrupt gate instead of a
+ * trap gate for syscalls, so this trick is ineffective. See Mastery for
+ * how we could do this anyway...
*/
static bool direct_trap(unsigned int num)
{
diff --git a/drivers/lguest/lg.h b/drivers/lguest/lg.h
index ac8ad0461e80..69b3814afd2f 100644
--- a/drivers/lguest/lg.h
+++ b/drivers/lguest/lg.h
@@ -167,6 +167,7 @@ void guest_set_clockevent(struct lg_cpu *cpu, unsigned long delta);
bool send_notify_to_eventfd(struct lg_cpu *cpu);
void init_clockdev(struct lg_cpu *cpu);
bool check_syscall_vector(struct lguest *lg);
+bool could_be_syscall(unsigned int num);
int init_interrupts(void);
void free_interrupts(void);
diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c
index 6a4cd771a2be..adc162c7040d 100644
--- a/drivers/lguest/x86/core.c
+++ b/drivers/lguest/x86/core.c
@@ -429,8 +429,12 @@ void lguest_arch_handle_trap(struct lg_cpu *cpu)
return;
break;
case 32 ... 255:
+ /* This might be a syscall. */
+ if (could_be_syscall(cpu->regs->trapnum))
+ break;
+
/*
- * These values mean a real interrupt occurred, in which case
+ * Other values mean a real interrupt occurred, in which case
* the Host handler has already been run. We just do a
* friendly check if another process should now be run, then
* return to run the Guest again.
diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c
index 9f6acd5d1d2e..0dc9a80adb94 100644
--- a/drivers/lightnvm/core.c
+++ b/drivers/lightnvm/core.c
@@ -250,7 +250,7 @@ int nvm_set_rqd_ppalist(struct nvm_dev *dev, struct nvm_rq *rqd,
return 0;
}
- plane_cnt = (1 << dev->plane_mode);
+ plane_cnt = dev->plane_mode;
rqd->nr_pages = plane_cnt * nr_ppas;
if (dev->ops->max_phys_sect < rqd->nr_pages)
@@ -463,13 +463,14 @@ static int nvm_core_init(struct nvm_dev *dev)
dev->sec_per_lun = dev->sec_per_blk * dev->blks_per_lun;
dev->nr_luns = dev->luns_per_chnl * dev->nr_chnls;
- dev->total_blocks = dev->nr_planes *
- dev->blks_per_lun *
- dev->luns_per_chnl *
- dev->nr_chnls;
- dev->total_pages = dev->total_blocks * dev->pgs_per_blk;
+ dev->total_secs = dev->nr_luns * dev->sec_per_lun;
+ dev->lun_map = kcalloc(BITS_TO_LONGS(dev->nr_luns),
+ sizeof(unsigned long), GFP_KERNEL);
+ if (!dev->lun_map)
+ return -ENOMEM;
INIT_LIST_HEAD(&dev->online_targets);
mutex_init(&dev->mlock);
+ spin_lock_init(&dev->lock);
return 0;
}
@@ -589,6 +590,7 @@ int nvm_register(struct request_queue *q, char *disk_name,
return 0;
err_init:
+ kfree(dev->lun_map);
kfree(dev);
return ret;
}
@@ -611,6 +613,7 @@ void nvm_unregister(char *disk_name)
up_write(&nvm_lock);
nvm_exit(dev);
+ kfree(dev->lun_map);
kfree(dev);
}
EXPORT_SYMBOL(nvm_unregister);
@@ -872,20 +875,19 @@ static int nvm_configure_by_str_event(const char *val,
static int nvm_configure_get(char *buf, const struct kernel_param *kp)
{
- int sz = 0;
- char *buf_start = buf;
+ int sz;
struct nvm_dev *dev;
- buf += sprintf(buf, "available devices:\n");
+ sz = sprintf(buf, "available devices:\n");
down_write(&nvm_lock);
list_for_each_entry(dev, &nvm_devices, devices) {
- if (sz > 4095 - DISK_NAME_LEN)
+ if (sz > 4095 - DISK_NAME_LEN - 2)
break;
- buf += sprintf(buf, " %32s\n", dev->name);
+ sz += sprintf(buf + sz, " %32s\n", dev->name);
}
up_write(&nvm_lock);
- return buf - buf_start - 1;
+ return sz;
}
static const struct kernel_param_ops nvm_configure_by_str_event_param_ops = {
diff --git a/drivers/lightnvm/gennvm.c b/drivers/lightnvm/gennvm.c
index 7fb725b16148..72e124a3927d 100644
--- a/drivers/lightnvm/gennvm.c
+++ b/drivers/lightnvm/gennvm.c
@@ -20,6 +20,68 @@
#include "gennvm.h"
+static int gennvm_get_area(struct nvm_dev *dev, sector_t *lba, sector_t len)
+{
+ struct gen_nvm *gn = dev->mp;
+ struct gennvm_area *area, *prev, *next;
+ sector_t begin = 0;
+ sector_t max_sectors = (dev->sec_size * dev->total_secs) >> 9;
+
+ if (len > max_sectors)
+ return -EINVAL;
+
+ area = kmalloc(sizeof(struct gennvm_area), GFP_KERNEL);
+ if (!area)
+ return -ENOMEM;
+
+ prev = NULL;
+
+ spin_lock(&dev->lock);
+ list_for_each_entry(next, &gn->area_list, list) {
+ if (begin + len > next->begin) {
+ begin = next->end;
+ prev = next;
+ continue;
+ }
+ break;
+ }
+
+ if ((begin + len) > max_sectors) {
+ spin_unlock(&dev->lock);
+ kfree(area);
+ return -EINVAL;
+ }
+
+ area->begin = *lba = begin;
+ area->end = begin + len;
+
+ if (prev) /* insert into sorted order */
+ list_add(&area->list, &prev->list);
+ else
+ list_add(&area->list, &gn->area_list);
+ spin_unlock(&dev->lock);
+
+ return 0;
+}
+
+static void gennvm_put_area(struct nvm_dev *dev, sector_t begin)
+{
+ struct gen_nvm *gn = dev->mp;
+ struct gennvm_area *area;
+
+ spin_lock(&dev->lock);
+ list_for_each_entry(area, &gn->area_list, list) {
+ if (area->begin != begin)
+ continue;
+
+ list_del(&area->list);
+ spin_unlock(&dev->lock);
+ kfree(area);
+ return;
+ }
+ spin_unlock(&dev->lock);
+}
+
static void gennvm_blocks_free(struct nvm_dev *dev)
{
struct gen_nvm *gn = dev->mp;
@@ -100,14 +162,13 @@ static int gennvm_block_map(u64 slba, u32 nlb, __le64 *entries, void *private)
{
struct nvm_dev *dev = private;
struct gen_nvm *gn = dev->mp;
- sector_t max_pages = dev->total_pages * (dev->sec_size >> 9);
u64 elba = slba + nlb;
struct gen_lun *lun;
struct nvm_block *blk;
u64 i;
int lun_id;
- if (unlikely(elba > dev->total_pages)) {
+ if (unlikely(elba > dev->total_secs)) {
pr_err("gennvm: L2P data from device is out of bounds!\n");
return -EINVAL;
}
@@ -115,7 +176,7 @@ static int gennvm_block_map(u64 slba, u32 nlb, __le64 *entries, void *private)
for (i = 0; i < nlb; i++) {
u64 pba = le64_to_cpu(entries[i]);
- if (unlikely(pba >= max_pages && pba != U64_MAX)) {
+ if (unlikely(pba >= dev->total_secs && pba != U64_MAX)) {
pr_err("gennvm: L2P data entry is out of bounds!\n");
return -EINVAL;
}
@@ -196,8 +257,8 @@ static int gennvm_blocks_init(struct nvm_dev *dev, struct gen_nvm *gn)
}
}
- if (dev->ops->get_l2p_tbl) {
- ret = dev->ops->get_l2p_tbl(dev, 0, dev->total_pages,
+ if ((dev->identity.dom & NVM_RSP_L2P) && dev->ops->get_l2p_tbl) {
+ ret = dev->ops->get_l2p_tbl(dev, 0, dev->total_secs,
gennvm_block_map, dev);
if (ret) {
pr_err("gennvm: could not read L2P table.\n");
@@ -230,6 +291,7 @@ static int gennvm_register(struct nvm_dev *dev)
gn->dev = dev;
gn->nr_luns = dev->nr_luns;
+ INIT_LIST_HEAD(&gn->area_list);
dev->mp = gn;
ret = gennvm_luns_init(dev, gn);
@@ -420,10 +482,23 @@ static int gennvm_erase_blk(struct nvm_dev *dev, struct nvm_block *blk,
return nvm_erase_ppa(dev, &addr, 1);
}
+static int gennvm_reserve_lun(struct nvm_dev *dev, int lunid)
+{
+ return test_and_set_bit(lunid, dev->lun_map);
+}
+
+static void gennvm_release_lun(struct nvm_dev *dev, int lunid)
+{
+ WARN_ON(!test_and_clear_bit(lunid, dev->lun_map));
+}
+
static struct nvm_lun *gennvm_get_lun(struct nvm_dev *dev, int lunid)
{
struct gen_nvm *gn = dev->mp;
+ if (unlikely(lunid >= dev->nr_luns))
+ return NULL;
+
return &gn->luns[lunid].vlun;
}
@@ -465,7 +540,13 @@ static struct nvmm_type gennvm = {
.erase_blk = gennvm_erase_blk,
.get_lun = gennvm_get_lun,
+ .reserve_lun = gennvm_reserve_lun,
+ .release_lun = gennvm_release_lun,
.lun_info_print = gennvm_lun_info_print,
+
+ .get_area = gennvm_get_area,
+ .put_area = gennvm_put_area,
+
};
static int __init gennvm_module_init(void)
diff --git a/drivers/lightnvm/gennvm.h b/drivers/lightnvm/gennvm.h
index 9c24b5b32dac..04d7c23cfc61 100644
--- a/drivers/lightnvm/gennvm.h
+++ b/drivers/lightnvm/gennvm.h
@@ -39,8 +39,14 @@ struct gen_nvm {
int nr_luns;
struct gen_lun *luns;
+ struct list_head area_list;
};
+struct gennvm_area {
+ struct list_head list;
+ sector_t begin;
+ sector_t end; /* end is excluded */
+};
#define gennvm_for_each_lun(bm, lun, i) \
for ((i) = 0, lun = &(bm)->luns[0]; \
(i) < (bm)->nr_luns; (i)++, lun = &(bm)->luns[(i)])
diff --git a/drivers/lightnvm/rrpc.c b/drivers/lightnvm/rrpc.c
index 307db1ea22de..3ab6495c3fd8 100644
--- a/drivers/lightnvm/rrpc.c
+++ b/drivers/lightnvm/rrpc.c
@@ -38,7 +38,7 @@ static void rrpc_page_invalidate(struct rrpc *rrpc, struct rrpc_addr *a)
spin_lock(&rblk->lock);
- div_u64_rem(a->addr, rrpc->dev->pgs_per_blk, &pg_offset);
+ div_u64_rem(a->addr, rrpc->dev->sec_per_blk, &pg_offset);
WARN_ON(test_and_set_bit(pg_offset, rblk->invalid_pages));
rblk->nr_invalid_pages++;
@@ -113,14 +113,24 @@ static void rrpc_discard(struct rrpc *rrpc, struct bio *bio)
static int block_is_full(struct rrpc *rrpc, struct rrpc_block *rblk)
{
- return (rblk->next_page == rrpc->dev->pgs_per_blk);
+ return (rblk->next_page == rrpc->dev->sec_per_blk);
}
+/* Calculate relative addr for the given block, considering instantiated LUNs */
+static u64 block_to_rel_addr(struct rrpc *rrpc, struct rrpc_block *rblk)
+{
+ struct nvm_block *blk = rblk->parent;
+ int lun_blk = blk->id % (rrpc->dev->blks_per_lun * rrpc->nr_luns);
+
+ return lun_blk * rrpc->dev->sec_per_blk;
+}
+
+/* Calculate global addr for the given block */
static u64 block_to_addr(struct rrpc *rrpc, struct rrpc_block *rblk)
{
struct nvm_block *blk = rblk->parent;
- return blk->id * rrpc->dev->pgs_per_blk;
+ return blk->id * rrpc->dev->sec_per_blk;
}
static struct ppa_addr linear_to_generic_addr(struct nvm_dev *dev,
@@ -136,7 +146,7 @@ static struct ppa_addr linear_to_generic_addr(struct nvm_dev *dev,
l.g.sec = secs;
sector_div(ppa, dev->sec_per_pg);
- div_u64_rem(ppa, dev->sec_per_blk, &pgs);
+ div_u64_rem(ppa, dev->pgs_per_blk, &pgs);
l.g.pg = pgs;
sector_div(ppa, dev->pgs_per_blk);
@@ -191,12 +201,12 @@ static struct rrpc_block *rrpc_get_blk(struct rrpc *rrpc, struct rrpc_lun *rlun,
return NULL;
}
- rblk = &rlun->blocks[blk->id];
+ rblk = rrpc_get_rblk(rlun, blk->id);
list_add_tail(&rblk->list, &rlun->open_list);
spin_unlock(&lun->lock);
blk->priv = rblk;
- bitmap_zero(rblk->invalid_pages, rrpc->dev->pgs_per_blk);
+ bitmap_zero(rblk->invalid_pages, rrpc->dev->sec_per_blk);
rblk->next_page = 0;
rblk->nr_invalid_pages = 0;
atomic_set(&rblk->data_cmnt_size, 0);
@@ -286,11 +296,11 @@ static int rrpc_move_valid_pages(struct rrpc *rrpc, struct rrpc_block *rblk)
struct bio *bio;
struct page *page;
int slot;
- int nr_pgs_per_blk = rrpc->dev->pgs_per_blk;
+ int nr_sec_per_blk = rrpc->dev->sec_per_blk;
u64 phys_addr;
DECLARE_COMPLETION_ONSTACK(wait);
- if (bitmap_full(rblk->invalid_pages, nr_pgs_per_blk))
+ if (bitmap_full(rblk->invalid_pages, nr_sec_per_blk))
return 0;
bio = bio_alloc(GFP_NOIO, 1);
@@ -306,10 +316,10 @@ static int rrpc_move_valid_pages(struct rrpc *rrpc, struct rrpc_block *rblk)
}
while ((slot = find_first_zero_bit(rblk->invalid_pages,
- nr_pgs_per_blk)) < nr_pgs_per_blk) {
+ nr_sec_per_blk)) < nr_sec_per_blk) {
/* Lock laddr */
- phys_addr = (rblk->parent->id * nr_pgs_per_blk) + slot;
+ phys_addr = rblk->parent->id * nr_sec_per_blk + slot;
try:
spin_lock(&rrpc->rev_lock);
@@ -381,7 +391,7 @@ finished:
mempool_free(page, rrpc->page_pool);
bio_put(bio);
- if (!bitmap_full(rblk->invalid_pages, nr_pgs_per_blk)) {
+ if (!bitmap_full(rblk->invalid_pages, nr_sec_per_blk)) {
pr_err("nvm: failed to garbage collect block\n");
return -EIO;
}
@@ -499,12 +509,21 @@ static void rrpc_gc_queue(struct work_struct *work)
struct rrpc *rrpc = gcb->rrpc;
struct rrpc_block *rblk = gcb->rblk;
struct nvm_lun *lun = rblk->parent->lun;
+ struct nvm_block *blk = rblk->parent;
struct rrpc_lun *rlun = &rrpc->luns[lun->id - rrpc->lun_offset];
spin_lock(&rlun->lock);
list_add_tail(&rblk->prio, &rlun->prio_list);
spin_unlock(&rlun->lock);
+ spin_lock(&lun->lock);
+ lun->nr_open_blocks--;
+ lun->nr_closed_blocks++;
+ blk->state &= ~NVM_BLK_ST_OPEN;
+ blk->state |= NVM_BLK_ST_CLOSED;
+ list_move_tail(&rblk->list, &rlun->closed_list);
+ spin_unlock(&lun->lock);
+
mempool_free(gcb, rrpc->gcb_pool);
pr_debug("nvm: block '%lu' is full, allow GC (sched)\n",
rblk->parent->id);
@@ -545,7 +564,7 @@ static struct rrpc_addr *rrpc_update_map(struct rrpc *rrpc, sector_t laddr,
struct rrpc_addr *gp;
struct rrpc_rev_addr *rev;
- BUG_ON(laddr >= rrpc->nr_pages);
+ BUG_ON(laddr >= rrpc->nr_sects);
gp = &rrpc->trans_map[laddr];
spin_lock(&rrpc->rev_lock);
@@ -668,20 +687,8 @@ static void rrpc_end_io_write(struct rrpc *rrpc, struct rrpc_rq *rrqd,
lun = rblk->parent->lun;
cmnt_size = atomic_inc_return(&rblk->data_cmnt_size);
- if (unlikely(cmnt_size == rrpc->dev->pgs_per_blk)) {
- struct nvm_block *blk = rblk->parent;
- struct rrpc_lun *rlun = rblk->rlun;
-
- spin_lock(&lun->lock);
- lun->nr_open_blocks--;
- lun->nr_closed_blocks++;
- blk->state &= ~NVM_BLK_ST_OPEN;
- blk->state |= NVM_BLK_ST_CLOSED;
- list_move_tail(&rblk->list, &rlun->closed_list);
- spin_unlock(&lun->lock);
-
+ if (unlikely(cmnt_size == rrpc->dev->sec_per_blk))
rrpc_run_gc(rrpc, rblk);
- }
}
}
@@ -726,7 +733,7 @@ static int rrpc_read_ppalist_rq(struct rrpc *rrpc, struct bio *bio,
for (i = 0; i < npages; i++) {
/* We assume that mapping occurs at 4KB granularity */
- BUG_ON(!(laddr + i >= 0 && laddr + i < rrpc->nr_pages));
+ BUG_ON(!(laddr + i >= 0 && laddr + i < rrpc->nr_sects));
gp = &rrpc->trans_map[laddr + i];
if (gp->rblk) {
@@ -757,7 +764,7 @@ static int rrpc_read_rq(struct rrpc *rrpc, struct bio *bio, struct nvm_rq *rqd,
if (!is_gc && rrpc_lock_rq(rrpc, bio, rqd))
return NVM_IO_REQUEUE;
- BUG_ON(!(laddr >= 0 && laddr < rrpc->nr_pages));
+ BUG_ON(!(laddr >= 0 && laddr < rrpc->nr_sects));
gp = &rrpc->trans_map[laddr];
if (gp->rblk) {
@@ -958,25 +965,11 @@ static void rrpc_requeue(struct work_struct *work)
static void rrpc_gc_free(struct rrpc *rrpc)
{
- struct rrpc_lun *rlun;
- int i;
-
if (rrpc->krqd_wq)
destroy_workqueue(rrpc->krqd_wq);
if (rrpc->kgc_wq)
destroy_workqueue(rrpc->kgc_wq);
-
- if (!rrpc->luns)
- return;
-
- for (i = 0; i < rrpc->nr_luns; i++) {
- rlun = &rrpc->luns[i];
-
- if (!rlun->blocks)
- break;
- vfree(rlun->blocks);
- }
}
static int rrpc_gc_init(struct rrpc *rrpc)
@@ -1007,21 +1000,21 @@ static int rrpc_l2p_update(u64 slba, u32 nlb, __le64 *entries, void *private)
struct nvm_dev *dev = rrpc->dev;
struct rrpc_addr *addr = rrpc->trans_map + slba;
struct rrpc_rev_addr *raddr = rrpc->rev_trans_map;
- sector_t max_pages = dev->total_pages * (dev->sec_size >> 9);
u64 elba = slba + nlb;
u64 i;
- if (unlikely(elba > dev->total_pages)) {
+ if (unlikely(elba > dev->total_secs)) {
pr_err("nvm: L2P data from device is out of bounds!\n");
return -EINVAL;
}
for (i = 0; i < nlb; i++) {
u64 pba = le64_to_cpu(entries[i]);
+ unsigned int mod;
/* LNVM treats address-spaces as silos, LBA and PBA are
* equally large and zero-indexed.
*/
- if (unlikely(pba >= max_pages && pba != U64_MAX)) {
+ if (unlikely(pba >= dev->total_secs && pba != U64_MAX)) {
pr_err("nvm: L2P data entry is out of bounds!\n");
return -EINVAL;
}
@@ -1033,8 +1026,10 @@ static int rrpc_l2p_update(u64 slba, u32 nlb, __le64 *entries, void *private)
if (!pba)
continue;
+ div_u64_rem(pba, rrpc->nr_sects, &mod);
+
addr[i].addr = pba;
- raddr[pba].addr = slba + i;
+ raddr[mod].addr = slba + i;
}
return 0;
@@ -1044,18 +1039,21 @@ static int rrpc_map_init(struct rrpc *rrpc)
{
struct nvm_dev *dev = rrpc->dev;
sector_t i;
+ u64 slba;
int ret;
- rrpc->trans_map = vzalloc(sizeof(struct rrpc_addr) * rrpc->nr_pages);
+ slba = rrpc->soffset >> (ilog2(dev->sec_size) - 9);
+
+ rrpc->trans_map = vzalloc(sizeof(struct rrpc_addr) * rrpc->nr_sects);
if (!rrpc->trans_map)
return -ENOMEM;
rrpc->rev_trans_map = vmalloc(sizeof(struct rrpc_rev_addr)
- * rrpc->nr_pages);
+ * rrpc->nr_sects);
if (!rrpc->rev_trans_map)
return -ENOMEM;
- for (i = 0; i < rrpc->nr_pages; i++) {
+ for (i = 0; i < rrpc->nr_sects; i++) {
struct rrpc_addr *p = &rrpc->trans_map[i];
struct rrpc_rev_addr *r = &rrpc->rev_trans_map[i];
@@ -1067,8 +1065,8 @@ static int rrpc_map_init(struct rrpc *rrpc)
return 0;
/* Bring up the mapping table from device */
- ret = dev->ops->get_l2p_tbl(dev, 0, dev->total_pages,
- rrpc_l2p_update, rrpc);
+ ret = dev->ops->get_l2p_tbl(dev, slba, rrpc->nr_sects, rrpc_l2p_update,
+ rrpc);
if (ret) {
pr_err("nvm: rrpc: could not read L2P table.\n");
return -EINVAL;
@@ -1077,7 +1075,6 @@ static int rrpc_map_init(struct rrpc *rrpc)
return 0;
}
-
/* Minimum pages needed within a lun */
#define PAGE_POOL_SIZE 16
#define ADDR_POOL_SIZE 64
@@ -1132,6 +1129,23 @@ static void rrpc_core_free(struct rrpc *rrpc)
static void rrpc_luns_free(struct rrpc *rrpc)
{
+ struct nvm_dev *dev = rrpc->dev;
+ struct nvm_lun *lun;
+ struct rrpc_lun *rlun;
+ int i;
+
+ if (!rrpc->luns)
+ return;
+
+ for (i = 0; i < rrpc->nr_luns; i++) {
+ rlun = &rrpc->luns[i];
+ lun = rlun->parent;
+ if (!lun)
+ break;
+ dev->mt->release_lun(dev, lun->id);
+ vfree(rlun->blocks);
+ }
+
kfree(rrpc->luns);
}
@@ -1139,9 +1153,9 @@ static int rrpc_luns_init(struct rrpc *rrpc, int lun_begin, int lun_end)
{
struct nvm_dev *dev = rrpc->dev;
struct rrpc_lun *rlun;
- int i, j;
+ int i, j, ret = -EINVAL;
- if (dev->pgs_per_blk > MAX_INVALID_PAGES_STORAGE * BITS_PER_LONG) {
+ if (dev->sec_per_blk > MAX_INVALID_PAGES_STORAGE * BITS_PER_LONG) {
pr_err("rrpc: number of pages per block too high.");
return -EINVAL;
}
@@ -1155,25 +1169,26 @@ static int rrpc_luns_init(struct rrpc *rrpc, int lun_begin, int lun_end)
/* 1:1 mapping */
for (i = 0; i < rrpc->nr_luns; i++) {
- struct nvm_lun *lun = dev->mt->get_lun(dev, lun_begin + i);
+ int lunid = lun_begin + i;
+ struct nvm_lun *lun;
- rlun = &rrpc->luns[i];
- rlun->rrpc = rrpc;
- rlun->parent = lun;
- INIT_LIST_HEAD(&rlun->prio_list);
- INIT_LIST_HEAD(&rlun->open_list);
- INIT_LIST_HEAD(&rlun->closed_list);
-
- INIT_WORK(&rlun->ws_gc, rrpc_lun_gc);
- spin_lock_init(&rlun->lock);
+ if (dev->mt->reserve_lun(dev, lunid)) {
+ pr_err("rrpc: lun %u is already allocated\n", lunid);
+ goto err;
+ }
- rrpc->total_blocks += dev->blks_per_lun;
- rrpc->nr_pages += dev->sec_per_lun;
+ lun = dev->mt->get_lun(dev, lunid);
+ if (!lun)
+ goto err;
+ rlun = &rrpc->luns[i];
+ rlun->parent = lun;
rlun->blocks = vzalloc(sizeof(struct rrpc_block) *
rrpc->dev->blks_per_lun);
- if (!rlun->blocks)
+ if (!rlun->blocks) {
+ ret = -ENOMEM;
goto err;
+ }
for (j = 0; j < rrpc->dev->blks_per_lun; j++) {
struct rrpc_block *rblk = &rlun->blocks[j];
@@ -1184,11 +1199,43 @@ static int rrpc_luns_init(struct rrpc *rrpc, int lun_begin, int lun_end)
INIT_LIST_HEAD(&rblk->prio);
spin_lock_init(&rblk->lock);
}
+
+ rlun->rrpc = rrpc;
+ INIT_LIST_HEAD(&rlun->prio_list);
+ INIT_LIST_HEAD(&rlun->open_list);
+ INIT_LIST_HEAD(&rlun->closed_list);
+
+ INIT_WORK(&rlun->ws_gc, rrpc_lun_gc);
+ spin_lock_init(&rlun->lock);
+
+ rrpc->total_blocks += dev->blks_per_lun;
+ rrpc->nr_sects += dev->sec_per_lun;
+
}
return 0;
err:
- return -ENOMEM;
+ return ret;
+}
+
+/* returns 0 on success and stores the beginning address in *begin */
+static int rrpc_area_init(struct rrpc *rrpc, sector_t *begin)
+{
+ struct nvm_dev *dev = rrpc->dev;
+ struct nvmm_type *mt = dev->mt;
+ sector_t size = rrpc->nr_sects * dev->sec_size;
+
+ size >>= 9;
+
+ return mt->get_area(dev, begin, size);
+}
+
+static void rrpc_area_free(struct rrpc *rrpc)
+{
+ struct nvm_dev *dev = rrpc->dev;
+ struct nvmm_type *mt = dev->mt;
+
+ mt->put_area(dev, rrpc->soffset);
}
static void rrpc_free(struct rrpc *rrpc)
@@ -1197,6 +1244,7 @@ static void rrpc_free(struct rrpc *rrpc)
rrpc_map_free(rrpc);
rrpc_core_free(rrpc);
rrpc_luns_free(rrpc);
+ rrpc_area_free(rrpc);
kfree(rrpc);
}
@@ -1221,9 +1269,9 @@ static sector_t rrpc_capacity(void *private)
/* cur, gc, and two emergency blocks for each lun */
reserved = rrpc->nr_luns * dev->max_pages_per_blk * 4;
- provisioned = rrpc->nr_pages - reserved;
+ provisioned = rrpc->nr_sects - reserved;
- if (reserved > rrpc->nr_pages) {
+ if (reserved > rrpc->nr_sects) {
pr_err("rrpc: not enough space available to expose storage.\n");
return 0;
}
@@ -1242,10 +1290,11 @@ static void rrpc_block_map_update(struct rrpc *rrpc, struct rrpc_block *rblk)
struct nvm_dev *dev = rrpc->dev;
int offset;
struct rrpc_addr *laddr;
- u64 paddr, pladdr;
+ u64 bpaddr, paddr, pladdr;
- for (offset = 0; offset < dev->pgs_per_blk; offset++) {
- paddr = block_to_addr(rrpc, rblk) + offset;
+ bpaddr = block_to_rel_addr(rrpc, rblk);
+ for (offset = 0; offset < dev->sec_per_blk; offset++) {
+ paddr = bpaddr + offset;
pladdr = rrpc->rev_trans_map[paddr].addr;
if (pladdr == ADDR_EMPTY)
@@ -1317,6 +1366,7 @@ static void *rrpc_init(struct nvm_dev *dev, struct gendisk *tdisk,
struct request_queue *bqueue = dev->q;
struct request_queue *tqueue = tdisk->queue;
struct rrpc *rrpc;
+ sector_t soffset;
int ret;
if (!(dev->identity.dom & NVM_RSP_L2P)) {
@@ -1342,6 +1392,13 @@ static void *rrpc_init(struct nvm_dev *dev, struct gendisk *tdisk,
/* simple round-robin strategy */
atomic_set(&rrpc->next_lun, -1);
+ ret = rrpc_area_init(rrpc, &soffset);
+ if (ret < 0) {
+ pr_err("nvm: rrpc: could not initialize area\n");
+ return ERR_PTR(ret);
+ }
+ rrpc->soffset = soffset;
+
ret = rrpc_luns_init(rrpc, lun_begin, lun_end);
if (ret) {
pr_err("nvm: rrpc: could not initialize luns\n");
@@ -1386,7 +1443,7 @@ static void *rrpc_init(struct nvm_dev *dev, struct gendisk *tdisk,
blk_queue_max_hw_sectors(tqueue, queue_max_hw_sectors(bqueue));
pr_info("nvm: rrpc initialized with %u luns and %llu pages.\n",
- rrpc->nr_luns, (unsigned long long)rrpc->nr_pages);
+ rrpc->nr_luns, (unsigned long long)rrpc->nr_sects);
mod_timer(&rrpc->gc_timer, jiffies + msecs_to_jiffies(10));
diff --git a/drivers/lightnvm/rrpc.h b/drivers/lightnvm/rrpc.h
index f7b37336353f..2653484a3b40 100644
--- a/drivers/lightnvm/rrpc.h
+++ b/drivers/lightnvm/rrpc.h
@@ -97,6 +97,7 @@ struct rrpc {
struct nvm_dev *dev;
struct gendisk *disk;
+ sector_t soffset; /* logical sector offset */
u64 poffset; /* physical page offset */
int lun_offset;
@@ -104,7 +105,7 @@ struct rrpc {
struct rrpc_lun *luns;
/* calculated values */
- unsigned long long nr_pages;
+ unsigned long long nr_sects;
unsigned long total_blocks;
/* Write strategy variables. Move these into each for structure for each
@@ -156,6 +157,15 @@ struct rrpc_rev_addr {
u64 addr;
};
+static inline struct rrpc_block *rrpc_get_rblk(struct rrpc_lun *rlun,
+ int blk_id)
+{
+ struct rrpc *rrpc = rlun->rrpc;
+ int lun_blk = blk_id % rrpc->dev->blks_per_lun;
+
+ return &rlun->blocks[lun_blk];
+}
+
static inline sector_t rrpc_get_laddr(struct bio *bio)
{
return bio->bi_iter.bi_sector / NR_PHY_IN_LOG;
@@ -206,7 +216,7 @@ static inline int rrpc_lock_laddr(struct rrpc *rrpc, sector_t laddr,
unsigned pages,
struct rrpc_inflight_rq *r)
{
- BUG_ON((laddr + pages) > rrpc->nr_pages);
+ BUG_ON((laddr + pages) > rrpc->nr_sects);
return __rrpc_lock_laddr(rrpc, laddr, pages, r);
}
@@ -243,7 +253,7 @@ static inline void rrpc_unlock_rq(struct rrpc *rrpc, struct nvm_rq *rqd)
struct rrpc_inflight_rq *r = rrpc_get_inflight_rq(rqd);
uint8_t pages = rqd->nr_pages;
- BUG_ON((r->l_start + pages) > rrpc->nr_pages);
+ BUG_ON((r->l_start + pages) > rrpc->nr_sects);
rrpc_unlock_laddr(rrpc, r);
}
diff --git a/drivers/macintosh/macio_asic.c b/drivers/macintosh/macio_asic.c
index 4f12c6f01fe7..b6819f0fc608 100644
--- a/drivers/macintosh/macio_asic.c
+++ b/drivers/macintosh/macio_asic.c
@@ -31,7 +31,6 @@
#include <asm/macio.h>
#include <asm/pmac_feature.h>
#include <asm/prom.h>
-#include <asm/pci-bridge.h>
#undef DEBUG
diff --git a/drivers/mailbox/Kconfig b/drivers/mailbox/Kconfig
index b2bbe8659bed..5305923752d2 100644
--- a/drivers/mailbox/Kconfig
+++ b/drivers/mailbox/Kconfig
@@ -43,6 +43,15 @@ config OMAP_MBOX_KFIFO_SIZE
This can also be changed at runtime (via the mbox_kfifo_size
module parameter).
+config ROCKCHIP_MBOX
+ bool "Rockchip Soc Intergrated Mailbox Support"
+ depends on ARCH_ROCKCHIP || COMPILE_TEST
+ help
+ This driver provides support for inter-processor communication
+ between CPU cores and MCU processor on Some Rockchip SOCs.
+ Please check it that the Soc you use have Mailbox hardware.
+ Say Y here if you want to use the Rockchip Mailbox support.
+
config PCC
bool "Platform Communication Channel Driver"
depends on ACPI
@@ -78,6 +87,25 @@ config STI_MBOX
Mailbox implementation for STMicroelectonics family chips with
hardware for interprocessor communication.
+config TI_MESSAGE_MANAGER
+ tristate "Texas Instruments Message Manager Driver"
+ depends on ARCH_KEYSTONE
+ help
+ An implementation of Message Manager slave driver for Keystone
+ architecture SoCs from Texas Instruments. Message Manager is a
+ communication entity found on few of Texas Instrument's keystone
+ architecture SoCs. These may be used for communication between
+ multiple processors within the SoC. Select this driver if your
+ platform has support for the hardware block.
+
+config HI6220_MBOX
+ tristate "Hi6220 Mailbox"
+ depends on ARCH_HISI
+ help
+ An implementation of the hi6220 mailbox. It is used to send message
+ between application processors and MCU. Say Y here if you want to
+ build Hi6220 mailbox controller driver.
+
config MAILBOX_TEST
tristate "Mailbox Test Client"
depends on OF
@@ -86,4 +114,13 @@ config MAILBOX_TEST
Test client to help with testing new Controller driver
implementations.
+config XGENE_SLIMPRO_MBOX
+ tristate "APM SoC X-Gene SLIMpro Mailbox Controller"
+ depends on ARCH_XGENE
+ help
+ An implementation of the APM X-Gene Interprocessor Communication
+ Mailbox (IPCM) between the ARM 64-bit cores and SLIMpro controller.
+ It is used to send short messages between ARM64-bit cores and
+ the SLIMpro Management Engine, primarily for PM. Say Y here if you
+ want to use the APM X-Gene SLIMpro IPCM support.
endif
diff --git a/drivers/mailbox/Makefile b/drivers/mailbox/Makefile
index 92435ef11f26..0be3e742bb7d 100644
--- a/drivers/mailbox/Makefile
+++ b/drivers/mailbox/Makefile
@@ -10,6 +10,8 @@ obj-$(CONFIG_PL320_MBOX) += pl320-ipc.o
obj-$(CONFIG_OMAP2PLUS_MBOX) += omap-mailbox.o
+obj-$(CONFIG_ROCKCHIP_MBOX) += rockchip-mailbox.o
+
obj-$(CONFIG_PCC) += pcc.o
obj-$(CONFIG_ALTERA_MBOX) += mailbox-altera.o
@@ -17,3 +19,9 @@ obj-$(CONFIG_ALTERA_MBOX) += mailbox-altera.o
obj-$(CONFIG_BCM2835_MBOX) += bcm2835-mailbox.o
obj-$(CONFIG_STI_MBOX) += mailbox-sti.o
+
+obj-$(CONFIG_TI_MESSAGE_MANAGER) += ti-msgmgr.o
+
+obj-$(CONFIG_XGENE_SLIMPRO_MBOX) += mailbox-xgene-slimpro.o
+
+obj-$(CONFIG_HI6220_MBOX) += hi6220-mailbox.o
diff --git a/drivers/mailbox/hi6220-mailbox.c b/drivers/mailbox/hi6220-mailbox.c
new file mode 100644
index 000000000000..613722db5daf
--- /dev/null
+++ b/drivers/mailbox/hi6220-mailbox.c
@@ -0,0 +1,395 @@
+/*
+ * Hisilicon's Hi6220 mailbox driver
+ *
+ * Copyright (c) 2015 Hisilicon Limited.
+ * Copyright (c) 2015 Linaro Limited.
+ *
+ * Author: Leo Yan <leo.yan@linaro.org>
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kfifo.h>
+#include <linux/mailbox_controller.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#define MBOX_CHAN_MAX 32
+
+#define MBOX_TX 0x1
+
+/* Mailbox message length: 8 words */
+#define MBOX_MSG_LEN 8
+
+/* Mailbox Registers */
+#define MBOX_OFF(m) (0x40 * (m))
+#define MBOX_MODE_REG(m) (MBOX_OFF(m) + 0x0)
+#define MBOX_DATA_REG(m) (MBOX_OFF(m) + 0x4)
+
+#define MBOX_STATE_MASK (0xF << 4)
+#define MBOX_STATE_IDLE (0x1 << 4)
+#define MBOX_STATE_TX (0x2 << 4)
+#define MBOX_STATE_RX (0x4 << 4)
+#define MBOX_STATE_ACK (0x8 << 4)
+#define MBOX_ACK_CONFIG_MASK (0x1 << 0)
+#define MBOX_ACK_AUTOMATIC (0x1 << 0)
+#define MBOX_ACK_IRQ (0x0 << 0)
+
+/* IPC registers */
+#define ACK_INT_RAW_REG(i) ((i) + 0x400)
+#define ACK_INT_MSK_REG(i) ((i) + 0x404)
+#define ACK_INT_STAT_REG(i) ((i) + 0x408)
+#define ACK_INT_CLR_REG(i) ((i) + 0x40c)
+#define ACK_INT_ENA_REG(i) ((i) + 0x500)
+#define ACK_INT_DIS_REG(i) ((i) + 0x504)
+#define DST_INT_RAW_REG(i) ((i) + 0x420)
+
+
+struct hi6220_mbox_chan {
+
+ /*
+ * Description for channel's hardware info:
+ * - direction: tx or rx
+ * - dst irq: peer core's irq number
+ * - ack irq: local irq number
+ * - slot number
+ */
+ unsigned int dir, dst_irq, ack_irq;
+ unsigned int slot;
+
+ struct hi6220_mbox *parent;
+};
+
+struct hi6220_mbox {
+ struct device *dev;
+
+ int irq;
+
+ /* flag of enabling tx's irq mode */
+ bool tx_irq_mode;
+
+ /* region for ipc event */
+ void __iomem *ipc;
+
+ /* region for mailbox */
+ void __iomem *base;
+
+ unsigned int chan_num;
+ struct hi6220_mbox_chan *mchan;
+
+ void *irq_map_chan[MBOX_CHAN_MAX];
+ struct mbox_chan *chan;
+ struct mbox_controller controller;
+};
+
+static void mbox_set_state(struct hi6220_mbox *mbox,
+ unsigned int slot, u32 val)
+{
+ u32 status;
+
+ status = readl(mbox->base + MBOX_MODE_REG(slot));
+ status = (status & ~MBOX_STATE_MASK) | val;
+ writel(status, mbox->base + MBOX_MODE_REG(slot));
+}
+
+static void mbox_set_mode(struct hi6220_mbox *mbox,
+ unsigned int slot, u32 val)
+{
+ u32 mode;
+
+ mode = readl(mbox->base + MBOX_MODE_REG(slot));
+ mode = (mode & ~MBOX_ACK_CONFIG_MASK) | val;
+ writel(mode, mbox->base + MBOX_MODE_REG(slot));
+}
+
+static bool hi6220_mbox_last_tx_done(struct mbox_chan *chan)
+{
+ struct hi6220_mbox_chan *mchan = chan->con_priv;
+ struct hi6220_mbox *mbox = mchan->parent;
+ u32 state;
+
+ /* Only set idle state for polling mode */
+ BUG_ON(mbox->tx_irq_mode);
+
+ state = readl(mbox->base + MBOX_MODE_REG(mchan->slot));
+ return ((state & MBOX_STATE_MASK) == MBOX_STATE_IDLE);
+}
+
+static int hi6220_mbox_send_data(struct mbox_chan *chan, void *msg)
+{
+ struct hi6220_mbox_chan *mchan = chan->con_priv;
+ struct hi6220_mbox *mbox = mchan->parent;
+ unsigned int slot = mchan->slot;
+ u32 *buf = msg;
+ int i;
+
+ /* indicate as a TX channel */
+ mchan->dir = MBOX_TX;
+
+ mbox_set_state(mbox, slot, MBOX_STATE_TX);
+
+ if (mbox->tx_irq_mode)
+ mbox_set_mode(mbox, slot, MBOX_ACK_IRQ);
+ else
+ mbox_set_mode(mbox, slot, MBOX_ACK_AUTOMATIC);
+
+ for (i = 0; i < MBOX_MSG_LEN; i++)
+ writel(buf[i], mbox->base + MBOX_DATA_REG(slot) + i * 4);
+
+ /* trigger remote request */
+ writel(BIT(mchan->dst_irq), DST_INT_RAW_REG(mbox->ipc));
+ return 0;
+}
+
+static irqreturn_t hi6220_mbox_interrupt(int irq, void *p)
+{
+ struct hi6220_mbox *mbox = p;
+ struct hi6220_mbox_chan *mchan;
+ struct mbox_chan *chan;
+ unsigned int state, intr_bit, i;
+ u32 msg[MBOX_MSG_LEN];
+
+ state = readl(ACK_INT_STAT_REG(mbox->ipc));
+ if (!state) {
+ dev_warn(mbox->dev, "%s: spurious interrupt\n",
+ __func__);
+ return IRQ_HANDLED;
+ }
+
+ while (state) {
+ intr_bit = __ffs(state);
+ state &= (state - 1);
+
+ chan = mbox->irq_map_chan[intr_bit];
+ if (!chan) {
+ dev_warn(mbox->dev, "%s: unexpected irq vector %d\n",
+ __func__, intr_bit);
+ continue;
+ }
+
+ mchan = chan->con_priv;
+ if (mchan->dir == MBOX_TX)
+ mbox_chan_txdone(chan, 0);
+ else {
+ for (i = 0; i < MBOX_MSG_LEN; i++)
+ msg[i] = readl(mbox->base +
+ MBOX_DATA_REG(mchan->slot) + i * 4);
+
+ mbox_chan_received_data(chan, (void *)msg);
+ }
+
+ /* clear IRQ source */
+ writel(BIT(mchan->ack_irq), ACK_INT_CLR_REG(mbox->ipc));
+ mbox_set_state(mbox, mchan->slot, MBOX_STATE_IDLE);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int hi6220_mbox_startup(struct mbox_chan *chan)
+{
+ struct hi6220_mbox_chan *mchan = chan->con_priv;
+ struct hi6220_mbox *mbox = mchan->parent;
+
+ mchan->dir = 0;
+
+ /* enable interrupt */
+ writel(BIT(mchan->ack_irq), ACK_INT_ENA_REG(mbox->ipc));
+ return 0;
+}
+
+static void hi6220_mbox_shutdown(struct mbox_chan *chan)
+{
+ struct hi6220_mbox_chan *mchan = chan->con_priv;
+ struct hi6220_mbox *mbox = mchan->parent;
+
+ /* disable interrupt */
+ writel(BIT(mchan->ack_irq), ACK_INT_DIS_REG(mbox->ipc));
+ mbox->irq_map_chan[mchan->ack_irq] = NULL;
+}
+
+static struct mbox_chan_ops hi6220_mbox_ops = {
+ .send_data = hi6220_mbox_send_data,
+ .startup = hi6220_mbox_startup,
+ .shutdown = hi6220_mbox_shutdown,
+ .last_tx_done = hi6220_mbox_last_tx_done,
+};
+
+static struct mbox_chan *hi6220_mbox_xlate(struct mbox_controller *controller,
+ const struct of_phandle_args *spec)
+{
+ struct hi6220_mbox *mbox = dev_get_drvdata(controller->dev);
+ struct hi6220_mbox_chan *mchan;
+ struct mbox_chan *chan;
+ unsigned int i = spec->args[0];
+ unsigned int dst_irq = spec->args[1];
+ unsigned int ack_irq = spec->args[2];
+
+ /* Bounds checking */
+ if (i >= mbox->chan_num || dst_irq >= mbox->chan_num ||
+ ack_irq >= mbox->chan_num) {
+ dev_err(mbox->dev,
+ "Invalid channel idx %d dst_irq %d ack_irq %d\n",
+ i, dst_irq, ack_irq);
+ return ERR_PTR(-EINVAL);
+ }
+
+ /* Is requested channel free? */
+ chan = &mbox->chan[i];
+ if (mbox->irq_map_chan[ack_irq] == (void *)chan) {
+ dev_err(mbox->dev, "Channel in use\n");
+ return ERR_PTR(-EBUSY);
+ }
+
+ mchan = chan->con_priv;
+ mchan->dst_irq = dst_irq;
+ mchan->ack_irq = ack_irq;
+
+ mbox->irq_map_chan[ack_irq] = (void *)chan;
+ return chan;
+}
+
+static const struct of_device_id hi6220_mbox_of_match[] = {
+ { .compatible = "hisilicon,hi6220-mbox", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, hi6220_mbox_of_match);
+
+static int hi6220_mbox_probe(struct platform_device *pdev)
+{
+ struct device_node *node = pdev->dev.of_node;
+ struct device *dev = &pdev->dev;
+ struct hi6220_mbox *mbox;
+ struct resource *res;
+ int i, err;
+
+ mbox = devm_kzalloc(dev, sizeof(*mbox), GFP_KERNEL);
+ if (!mbox)
+ return -ENOMEM;
+
+ mbox->dev = dev;
+ mbox->chan_num = MBOX_CHAN_MAX;
+ mbox->mchan = devm_kzalloc(dev,
+ mbox->chan_num * sizeof(*mbox->mchan), GFP_KERNEL);
+ if (!mbox->mchan)
+ return -ENOMEM;
+
+ mbox->chan = devm_kzalloc(dev,
+ mbox->chan_num * sizeof(*mbox->chan), GFP_KERNEL);
+ if (!mbox->chan)
+ return -ENOMEM;
+
+ mbox->irq = platform_get_irq(pdev, 0);
+ if (mbox->irq < 0)
+ return mbox->irq;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ mbox->ipc = devm_ioremap_resource(dev, res);
+ if (IS_ERR(mbox->ipc)) {
+ dev_err(dev, "ioremap ipc failed\n");
+ return PTR_ERR(mbox->ipc);
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ mbox->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(mbox->base)) {
+ dev_err(dev, "ioremap buffer failed\n");
+ return PTR_ERR(mbox->base);
+ }
+
+ err = devm_request_irq(dev, mbox->irq, hi6220_mbox_interrupt, 0,
+ dev_name(dev), mbox);
+ if (err) {
+ dev_err(dev, "Failed to register a mailbox IRQ handler: %d\n",
+ err);
+ return -ENODEV;
+ }
+
+ mbox->controller.dev = dev;
+ mbox->controller.chans = &mbox->chan[0];
+ mbox->controller.num_chans = mbox->chan_num;
+ mbox->controller.ops = &hi6220_mbox_ops;
+ mbox->controller.of_xlate = hi6220_mbox_xlate;
+
+ for (i = 0; i < mbox->chan_num; i++) {
+ mbox->chan[i].con_priv = &mbox->mchan[i];
+ mbox->irq_map_chan[i] = NULL;
+
+ mbox->mchan[i].parent = mbox;
+ mbox->mchan[i].slot = i;
+ }
+
+ /* mask and clear all interrupt vectors */
+ writel(0x0, ACK_INT_MSK_REG(mbox->ipc));
+ writel(~0x0, ACK_INT_CLR_REG(mbox->ipc));
+
+ /* use interrupt for tx's ack */
+ if (of_find_property(node, "hi6220,mbox-tx-noirq", NULL))
+ mbox->tx_irq_mode = false;
+ else
+ mbox->tx_irq_mode = true;
+
+ if (mbox->tx_irq_mode)
+ mbox->controller.txdone_irq = true;
+ else {
+ mbox->controller.txdone_poll = true;
+ mbox->controller.txpoll_period = 5;
+ }
+
+ err = mbox_controller_register(&mbox->controller);
+ if (err) {
+ dev_err(dev, "Failed to register mailbox %d\n", err);
+ return err;
+ }
+
+ platform_set_drvdata(pdev, mbox);
+ dev_info(dev, "Mailbox enabled\n");
+ return 0;
+}
+
+static int hi6220_mbox_remove(struct platform_device *pdev)
+{
+ struct hi6220_mbox *mbox = platform_get_drvdata(pdev);
+
+ mbox_controller_unregister(&mbox->controller);
+ return 0;
+}
+
+static struct platform_driver hi6220_mbox_driver = {
+ .driver = {
+ .name = "hi6220-mbox",
+ .owner = THIS_MODULE,
+ .of_match_table = hi6220_mbox_of_match,
+ },
+ .probe = hi6220_mbox_probe,
+ .remove = hi6220_mbox_remove,
+};
+
+static int __init hi6220_mbox_init(void)
+{
+ return platform_driver_register(&hi6220_mbox_driver);
+}
+core_initcall(hi6220_mbox_init);
+
+static void __exit hi6220_mbox_exit(void)
+{
+ platform_driver_unregister(&hi6220_mbox_driver);
+}
+module_exit(hi6220_mbox_exit);
+
+MODULE_AUTHOR("Leo Yan <leo.yan@linaro.org>");
+MODULE_DESCRIPTION("Hi6220 mailbox driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mailbox/mailbox-test.c b/drivers/mailbox/mailbox-test.c
index 684ae17dcf39..58d04726cdd7 100644
--- a/drivers/mailbox/mailbox-test.c
+++ b/drivers/mailbox/mailbox-test.c
@@ -31,7 +31,8 @@ static struct dentry *root_debugfs_dir;
struct mbox_test_device {
struct device *dev;
- void __iomem *mmio;
+ void __iomem *tx_mmio;
+ void __iomem *rx_mmio;
struct mbox_chan *tx_channel;
struct mbox_chan *rx_channel;
char *rx_buffer;
@@ -45,7 +46,6 @@ static ssize_t mbox_test_signal_write(struct file *filp,
size_t count, loff_t *ppos)
{
struct mbox_test_device *tdev = filp->private_data;
- int ret;
if (!tdev->tx_channel) {
dev_err(tdev->dev, "Channel cannot do Tx\n");
@@ -59,17 +59,20 @@ static ssize_t mbox_test_signal_write(struct file *filp,
return -EINVAL;
}
- tdev->signal = kzalloc(MBOX_MAX_SIG_LEN, GFP_KERNEL);
- if (!tdev->signal)
- return -ENOMEM;
+ /* Only allocate memory if we need to */
+ if (!tdev->signal) {
+ tdev->signal = kzalloc(MBOX_MAX_SIG_LEN, GFP_KERNEL);
+ if (!tdev->signal)
+ return -ENOMEM;
+ }
- ret = copy_from_user(tdev->signal, userbuf, count);
- if (ret) {
+ if (copy_from_user(tdev->signal, userbuf, count)) {
kfree(tdev->signal);
+ tdev->signal = NULL;
return -EFAULT;
}
- return ret < 0 ? ret : count;
+ return count;
}
static const struct file_operations mbox_test_signal_ops = {
@@ -112,16 +115,16 @@ static ssize_t mbox_test_message_write(struct file *filp,
* A separate signal is only of use if there is
* MMIO to subsequently pass the message through
*/
- if (tdev->mmio && tdev->signal) {
- print_hex_dump(KERN_INFO, "Client: Sending: Signal: ", DUMP_PREFIX_ADDRESS,
- MBOX_BYTES_PER_LINE, 1, tdev->signal, MBOX_MAX_SIG_LEN, true);
+ if (tdev->tx_mmio && tdev->signal) {
+ print_hex_dump_bytes("Client: Sending: Signal: ", DUMP_PREFIX_ADDRESS,
+ tdev->signal, MBOX_MAX_SIG_LEN);
data = tdev->signal;
} else
data = tdev->message;
- print_hex_dump(KERN_INFO, "Client: Sending: Message: ", DUMP_PREFIX_ADDRESS,
- MBOX_BYTES_PER_LINE, 1, tdev->message, MBOX_MAX_MSG_LEN, true);
+ print_hex_dump_bytes("Client: Sending: Message: ", DUMP_PREFIX_ADDRESS,
+ tdev->message, MBOX_MAX_MSG_LEN);
ret = mbox_send_message(tdev->tx_channel, data);
if (ret < 0)
@@ -220,15 +223,13 @@ static void mbox_test_receive_message(struct mbox_client *client, void *message)
unsigned long flags;
spin_lock_irqsave(&tdev->lock, flags);
- if (tdev->mmio) {
- memcpy_fromio(tdev->rx_buffer, tdev->mmio, MBOX_MAX_MSG_LEN);
- print_hex_dump(KERN_INFO, "Client: Received [MMIO]: ",
- DUMP_PREFIX_ADDRESS, MBOX_BYTES_PER_LINE, 1,
- tdev->rx_buffer, MBOX_MAX_MSG_LEN, true);
+ if (tdev->rx_mmio) {
+ memcpy_fromio(tdev->rx_buffer, tdev->rx_mmio, MBOX_MAX_MSG_LEN);
+ print_hex_dump_bytes("Client: Received [MMIO]: ", DUMP_PREFIX_ADDRESS,
+ tdev->rx_buffer, MBOX_MAX_MSG_LEN);
} else if (message) {
- print_hex_dump(KERN_INFO, "Client: Received [API]: ",
- DUMP_PREFIX_ADDRESS, MBOX_BYTES_PER_LINE, 1,
- message, MBOX_MAX_MSG_LEN, true);
+ print_hex_dump_bytes("Client: Received [API]: ", DUMP_PREFIX_ADDRESS,
+ message, MBOX_MAX_MSG_LEN);
memcpy(tdev->rx_buffer, message, MBOX_MAX_MSG_LEN);
}
spin_unlock_irqrestore(&tdev->lock, flags);
@@ -238,11 +239,11 @@ static void mbox_test_prepare_message(struct mbox_client *client, void *message)
{
struct mbox_test_device *tdev = dev_get_drvdata(client->dev);
- if (tdev->mmio) {
+ if (tdev->tx_mmio) {
if (tdev->signal)
- memcpy_toio(tdev->mmio, tdev->message, MBOX_MAX_MSG_LEN);
+ memcpy_toio(tdev->tx_mmio, tdev->message, MBOX_MAX_MSG_LEN);
else
- memcpy_toio(tdev->mmio, message, MBOX_MAX_MSG_LEN);
+ memcpy_toio(tdev->tx_mmio, message, MBOX_MAX_MSG_LEN);
}
}
@@ -296,9 +297,15 @@ static int mbox_test_probe(struct platform_device *pdev)
/* It's okay for MMIO to be NULL */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- tdev->mmio = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(tdev->mmio))
- tdev->mmio = NULL;
+ tdev->tx_mmio = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(tdev->tx_mmio))
+ tdev->tx_mmio = NULL;
+
+ /* If specified, second reg entry is Rx MMIO */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ tdev->rx_mmio = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(tdev->rx_mmio))
+ tdev->rx_mmio = tdev->tx_mmio;
tdev->tx_channel = mbox_test_request_channel(pdev, "tx");
tdev->rx_channel = mbox_test_request_channel(pdev, "rx");
@@ -306,6 +313,10 @@ static int mbox_test_probe(struct platform_device *pdev)
if (!tdev->tx_channel && !tdev->rx_channel)
return -EPROBE_DEFER;
+ /* If Rx is not specified but has Rx MMIO, then Rx = Tx */
+ if (!tdev->rx_channel && (tdev->rx_mmio != tdev->tx_mmio))
+ tdev->rx_channel = tdev->tx_channel;
+
tdev->dev = &pdev->dev;
platform_set_drvdata(pdev, tdev);
@@ -342,13 +353,13 @@ static int mbox_test_remove(struct platform_device *pdev)
}
static const struct of_device_id mbox_test_match[] = {
- { .compatible = "mailbox_test" },
+ { .compatible = "mailbox-test" },
{},
};
static struct platform_driver mbox_test_driver = {
.driver = {
- .name = "mailbox_sti_test",
+ .name = "mailbox_test",
.of_match_table = mbox_test_match,
},
.probe = mbox_test_probe,
diff --git a/drivers/mailbox/mailbox-xgene-slimpro.c b/drivers/mailbox/mailbox-xgene-slimpro.c
new file mode 100644
index 000000000000..dd2afbca51c9
--- /dev/null
+++ b/drivers/mailbox/mailbox-xgene-slimpro.c
@@ -0,0 +1,284 @@
+/*
+ * APM X-Gene SLIMpro MailBox Driver
+ *
+ * Copyright (c) 2015, Applied Micro Circuits Corporation
+ * Author: Feng Kan fkan@apm.com
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+#include <linux/acpi.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/mailbox_controller.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+
+#define MBOX_CON_NAME "slimpro-mbox"
+#define MBOX_REG_SET_OFFSET 0x1000
+#define MBOX_CNT 8
+#define MBOX_STATUS_AVAIL_MASK BIT(16)
+#define MBOX_STATUS_ACK_MASK BIT(0)
+
+/* Configuration and Status Registers */
+#define REG_DB_IN 0x00
+#define REG_DB_DIN0 0x04
+#define REG_DB_DIN1 0x08
+#define REG_DB_OUT 0x10
+#define REG_DB_DOUT0 0x14
+#define REG_DB_DOUT1 0x18
+#define REG_DB_STAT 0x20
+#define REG_DB_STATMASK 0x24
+
+/**
+ * X-Gene SlimPRO mailbox channel information
+ *
+ * @dev: Device to which it is attached
+ * @chan: Pointer to mailbox communication channel
+ * @reg: Base address to access channel registers
+ * @irq: Interrupt number of the channel
+ * @rx_msg: Received message storage
+ */
+struct slimpro_mbox_chan {
+ struct device *dev;
+ struct mbox_chan *chan;
+ void __iomem *reg;
+ int irq;
+ u32 rx_msg[3];
+};
+
+/**
+ * X-Gene SlimPRO Mailbox controller data
+ *
+ * X-Gene SlimPRO Mailbox controller has 8 commnunication channels.
+ * Each channel has a separate IRQ number assgined to it.
+ *
+ * @mb_ctrl: Representation of the commnunication channel controller
+ * @mc: Array of SlimPRO mailbox channels of the controller
+ * @chans: Array of mailbox communication channels
+ *
+ */
+struct slimpro_mbox {
+ struct mbox_controller mb_ctrl;
+ struct slimpro_mbox_chan mc[MBOX_CNT];
+ struct mbox_chan chans[MBOX_CNT];
+};
+
+static void mb_chan_send_msg(struct slimpro_mbox_chan *mb_chan, u32 *msg)
+{
+ writel(msg[1], mb_chan->reg + REG_DB_DOUT0);
+ writel(msg[2], mb_chan->reg + REG_DB_DOUT1);
+ writel(msg[0], mb_chan->reg + REG_DB_OUT);
+}
+
+static void mb_chan_recv_msg(struct slimpro_mbox_chan *mb_chan)
+{
+ mb_chan->rx_msg[1] = readl(mb_chan->reg + REG_DB_DIN0);
+ mb_chan->rx_msg[2] = readl(mb_chan->reg + REG_DB_DIN1);
+ mb_chan->rx_msg[0] = readl(mb_chan->reg + REG_DB_IN);
+}
+
+static int mb_chan_status_ack(struct slimpro_mbox_chan *mb_chan)
+{
+ u32 val = readl(mb_chan->reg + REG_DB_STAT);
+
+ if (val & MBOX_STATUS_ACK_MASK) {
+ writel(MBOX_STATUS_ACK_MASK, mb_chan->reg + REG_DB_STAT);
+ return 1;
+ }
+ return 0;
+}
+
+static int mb_chan_status_avail(struct slimpro_mbox_chan *mb_chan)
+{
+ u32 val = readl(mb_chan->reg + REG_DB_STAT);
+
+ if (val & MBOX_STATUS_AVAIL_MASK) {
+ mb_chan_recv_msg(mb_chan);
+ writel(MBOX_STATUS_AVAIL_MASK, mb_chan->reg + REG_DB_STAT);
+ return 1;
+ }
+ return 0;
+}
+
+static irqreturn_t slimpro_mbox_irq(int irq, void *id)
+{
+ struct slimpro_mbox_chan *mb_chan = id;
+
+ if (mb_chan_status_ack(mb_chan))
+ mbox_chan_txdone(mb_chan->chan, 0);
+
+ if (mb_chan_status_avail(mb_chan))
+ mbox_chan_received_data(mb_chan->chan, mb_chan->rx_msg);
+
+ return IRQ_HANDLED;
+}
+
+static int slimpro_mbox_send_data(struct mbox_chan *chan, void *msg)
+{
+ struct slimpro_mbox_chan *mb_chan = chan->con_priv;
+
+ mb_chan_send_msg(mb_chan, msg);
+ return 0;
+}
+
+static int slimpro_mbox_startup(struct mbox_chan *chan)
+{
+ struct slimpro_mbox_chan *mb_chan = chan->con_priv;
+ int rc;
+ u32 val;
+
+ rc = devm_request_irq(mb_chan->dev, mb_chan->irq, slimpro_mbox_irq, 0,
+ MBOX_CON_NAME, mb_chan);
+ if (unlikely(rc)) {
+ dev_err(mb_chan->dev, "failed to register mailbox interrupt %d\n",
+ mb_chan->irq);
+ return rc;
+ }
+
+ /* Enable HW interrupt */
+ writel(MBOX_STATUS_ACK_MASK | MBOX_STATUS_AVAIL_MASK,
+ mb_chan->reg + REG_DB_STAT);
+ /* Unmask doorbell status interrupt */
+ val = readl(mb_chan->reg + REG_DB_STATMASK);
+ val &= ~(MBOX_STATUS_ACK_MASK | MBOX_STATUS_AVAIL_MASK);
+ writel(val, mb_chan->reg + REG_DB_STATMASK);
+
+ return 0;
+}
+
+static void slimpro_mbox_shutdown(struct mbox_chan *chan)
+{
+ struct slimpro_mbox_chan *mb_chan = chan->con_priv;
+ u32 val;
+
+ /* Mask doorbell status interrupt */
+ val = readl(mb_chan->reg + REG_DB_STATMASK);
+ val |= (MBOX_STATUS_ACK_MASK | MBOX_STATUS_AVAIL_MASK);
+ writel(val, mb_chan->reg + REG_DB_STATMASK);
+
+ devm_free_irq(mb_chan->dev, mb_chan->irq, mb_chan);
+}
+
+static struct mbox_chan_ops slimpro_mbox_ops = {
+ .send_data = slimpro_mbox_send_data,
+ .startup = slimpro_mbox_startup,
+ .shutdown = slimpro_mbox_shutdown,
+};
+
+static int slimpro_mbox_probe(struct platform_device *pdev)
+{
+ struct slimpro_mbox *ctx;
+ struct resource *regs;
+ void __iomem *mb_base;
+ int rc;
+ int i;
+
+ ctx = devm_kzalloc(&pdev->dev, sizeof(struct slimpro_mbox), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, ctx);
+
+ regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ mb_base = devm_ioremap(&pdev->dev, regs->start, resource_size(regs));
+ if (!mb_base)
+ return -ENOMEM;
+
+ /* Setup mailbox links */
+ for (i = 0; i < MBOX_CNT; i++) {
+ ctx->mc[i].irq = platform_get_irq(pdev, i);
+ if (ctx->mc[i].irq < 0) {
+ if (i == 0) {
+ dev_err(&pdev->dev, "no available IRQ\n");
+ return -EINVAL;
+ }
+ dev_info(&pdev->dev, "no IRQ for channel %d\n", i);
+ break;
+ }
+
+ ctx->mc[i].dev = &pdev->dev;
+ ctx->mc[i].reg = mb_base + i * MBOX_REG_SET_OFFSET;
+ ctx->mc[i].chan = &ctx->chans[i];
+ ctx->chans[i].con_priv = &ctx->mc[i];
+ }
+
+ /* Setup mailbox controller */
+ ctx->mb_ctrl.dev = &pdev->dev;
+ ctx->mb_ctrl.chans = ctx->chans;
+ ctx->mb_ctrl.txdone_irq = true;
+ ctx->mb_ctrl.ops = &slimpro_mbox_ops;
+ ctx->mb_ctrl.num_chans = i;
+
+ rc = mbox_controller_register(&ctx->mb_ctrl);
+ if (rc) {
+ dev_err(&pdev->dev,
+ "APM X-Gene SLIMpro MailBox register failed:%d\n", rc);
+ return rc;
+ }
+
+ dev_info(&pdev->dev, "APM X-Gene SLIMpro MailBox registered\n");
+ return 0;
+}
+
+static int slimpro_mbox_remove(struct platform_device *pdev)
+{
+ struct slimpro_mbox *smb = platform_get_drvdata(pdev);
+
+ mbox_controller_unregister(&smb->mb_ctrl);
+ return 0;
+}
+
+static const struct of_device_id slimpro_of_match[] = {
+ {.compatible = "apm,xgene-slimpro-mbox" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, slimpro_of_match);
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id slimpro_acpi_ids[] = {
+ {"APMC0D01", 0},
+ {}
+};
+MODULE_DEVICE_TABLE(acpi, slimpro_acpi_ids);
+#endif
+
+static struct platform_driver slimpro_mbox_driver = {
+ .probe = slimpro_mbox_probe,
+ .remove = slimpro_mbox_remove,
+ .driver = {
+ .name = "xgene-slimpro-mbox",
+ .of_match_table = of_match_ptr(slimpro_of_match),
+ .acpi_match_table = ACPI_PTR(slimpro_acpi_ids)
+ },
+};
+
+static int __init slimpro_mbox_init(void)
+{
+ return platform_driver_register(&slimpro_mbox_driver);
+}
+
+static void __exit slimpro_mbox_exit(void)
+{
+ platform_driver_unregister(&slimpro_mbox_driver);
+}
+
+subsys_initcall(slimpro_mbox_init);
+module_exit(slimpro_mbox_exit);
+
+MODULE_DESCRIPTION("APM X-Gene SLIMpro Mailbox Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mailbox/mailbox.c b/drivers/mailbox/mailbox.c
index 6a4811f85705..4a36632c236f 100644
--- a/drivers/mailbox/mailbox.c
+++ b/drivers/mailbox/mailbox.c
@@ -375,13 +375,13 @@ struct mbox_chan *mbox_request_channel_byname(struct mbox_client *cl,
if (!np) {
dev_err(cl->dev, "%s() currently only supports DT\n", __func__);
- return ERR_PTR(-ENOSYS);
+ return ERR_PTR(-EINVAL);
}
if (!of_get_property(np, "mbox-names", NULL)) {
dev_err(cl->dev,
"%s() requires an \"mbox-names\" property\n", __func__);
- return ERR_PTR(-ENOSYS);
+ return ERR_PTR(-EINVAL);
}
of_property_for_each_string(np, "mbox-names", prop, mbox_name) {
diff --git a/drivers/mailbox/pcc.c b/drivers/mailbox/pcc.c
index 8f779a1ec99c..043828d541f7 100644
--- a/drivers/mailbox/pcc.c
+++ b/drivers/mailbox/pcc.c
@@ -63,6 +63,7 @@
#include <linux/platform_device.h>
#include <linux/mailbox_controller.h>
#include <linux/mailbox_client.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
#include "mailbox.h"
@@ -70,6 +71,9 @@
static struct mbox_chan *pcc_mbox_channels;
+/* Array of cached virtual address for doorbell registers */
+static void __iomem **pcc_doorbell_vaddr;
+
static struct mbox_controller pcc_mbox_ctrl = {};
/**
* get_pcc_channel - Given a PCC subspace idx, get
@@ -160,6 +164,66 @@ void pcc_mbox_free_channel(struct mbox_chan *chan)
}
EXPORT_SYMBOL_GPL(pcc_mbox_free_channel);
+/*
+ * PCC can be used with perf critical drivers such as CPPC
+ * So it makes sense to locally cache the virtual address and
+ * use it to read/write to PCC registers such as doorbell register
+ *
+ * The below read_register and write_registers are used to read and
+ * write from perf critical registers such as PCC doorbell register
+ */
+static int read_register(void __iomem *vaddr, u64 *val, unsigned int bit_width)
+{
+ int ret_val = 0;
+
+ switch (bit_width) {
+ case 8:
+ *val = readb(vaddr);
+ break;
+ case 16:
+ *val = readw(vaddr);
+ break;
+ case 32:
+ *val = readl(vaddr);
+ break;
+ case 64:
+ *val = readq(vaddr);
+ break;
+ default:
+ pr_debug("Error: Cannot read register of %u bit width",
+ bit_width);
+ ret_val = -EFAULT;
+ break;
+ }
+ return ret_val;
+}
+
+static int write_register(void __iomem *vaddr, u64 val, unsigned int bit_width)
+{
+ int ret_val = 0;
+
+ switch (bit_width) {
+ case 8:
+ writeb(val, vaddr);
+ break;
+ case 16:
+ writew(val, vaddr);
+ break;
+ case 32:
+ writel(val, vaddr);
+ break;
+ case 64:
+ writeq(val, vaddr);
+ break;
+ default:
+ pr_debug("Error: Cannot write register of %u bit width",
+ bit_width);
+ ret_val = -EFAULT;
+ break;
+ }
+ return ret_val;
+}
+
/**
* pcc_send_data - Called from Mailbox Controller code. Used
* here only to ring the channel doorbell. The PCC client
@@ -175,21 +239,39 @@ EXPORT_SYMBOL_GPL(pcc_mbox_free_channel);
static int pcc_send_data(struct mbox_chan *chan, void *data)
{
struct acpi_pcct_hw_reduced *pcct_ss = chan->con_priv;
- struct acpi_generic_address doorbell;
+ struct acpi_generic_address *doorbell;
u64 doorbell_preserve;
u64 doorbell_val;
u64 doorbell_write;
+ u32 id = chan - pcc_mbox_channels;
+ int ret = 0;
+
+ if (id >= pcc_mbox_ctrl.num_chans) {
+ pr_debug("pcc_send_data: Invalid mbox_chan passed\n");
+ return -ENOENT;
+ }
- doorbell = pcct_ss->doorbell_register;
+ doorbell = &pcct_ss->doorbell_register;
doorbell_preserve = pcct_ss->preserve_mask;
doorbell_write = pcct_ss->write_mask;
/* Sync notification from OS to Platform. */
- acpi_read(&doorbell_val, &doorbell);
- acpi_write((doorbell_val & doorbell_preserve) | doorbell_write,
- &doorbell);
-
- return 0;
+ if (pcc_doorbell_vaddr[id]) {
+ ret = read_register(pcc_doorbell_vaddr[id], &doorbell_val,
+ doorbell->bit_width);
+ if (ret)
+ return ret;
+ ret = write_register(pcc_doorbell_vaddr[id],
+ (doorbell_val & doorbell_preserve) | doorbell_write,
+ doorbell->bit_width);
+ } else {
+ ret = acpi_read(&doorbell_val, doorbell);
+ if (ret)
+ return ret;
+ ret = acpi_write((doorbell_val & doorbell_preserve) | doorbell_write,
+ doorbell);
+ }
+ return ret;
}
static const struct mbox_chan_ops pcc_chan_ops = {
@@ -265,12 +347,27 @@ static int __init acpi_pcc_probe(void)
return -ENOMEM;
}
+ pcc_doorbell_vaddr = kcalloc(count, sizeof(void *), GFP_KERNEL);
+ if (!pcc_doorbell_vaddr) {
+ kfree(pcc_mbox_channels);
+ return -ENOMEM;
+ }
+
/* Point to the first PCC subspace entry */
pcct_entry = (struct acpi_subtable_header *) (
(unsigned long) pcct_tbl + sizeof(struct acpi_table_pcct));
for (i = 0; i < count; i++) {
+ struct acpi_generic_address *db_reg;
+ struct acpi_pcct_hw_reduced *pcct_ss;
pcc_mbox_channels[i].con_priv = pcct_entry;
+
+ /* If doorbell is in system memory cache the virt address */
+ pcct_ss = (struct acpi_pcct_hw_reduced *)pcct_entry;
+ db_reg = &pcct_ss->doorbell_register;
+ if (db_reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
+ pcc_doorbell_vaddr[i] = acpi_os_ioremap(db_reg->address,
+ db_reg->bit_width/8);
pcct_entry = (struct acpi_subtable_header *)
((unsigned long) pcct_entry + pcct_entry->length);
}
diff --git a/drivers/mailbox/rockchip-mailbox.c b/drivers/mailbox/rockchip-mailbox.c
new file mode 100644
index 000000000000..d702a204f5c1
--- /dev/null
+++ b/drivers/mailbox/rockchip-mailbox.c
@@ -0,0 +1,283 @@
+/*
+ * Copyright (c) 2015, Fuzhou Rockchip Electronics Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/mailbox_controller.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#define MAILBOX_A2B_INTEN 0x00
+#define MAILBOX_A2B_STATUS 0x04
+#define MAILBOX_A2B_CMD(x) (0x08 + (x) * 8)
+#define MAILBOX_A2B_DAT(x) (0x0c + (x) * 8)
+
+#define MAILBOX_B2A_INTEN 0x28
+#define MAILBOX_B2A_STATUS 0x2C
+#define MAILBOX_B2A_CMD(x) (0x30 + (x) * 8)
+#define MAILBOX_B2A_DAT(x) (0x34 + (x) * 8)
+
+struct rockchip_mbox_msg {
+ u32 cmd;
+ int rx_size;
+};
+
+struct rockchip_mbox_data {
+ int num_chans;
+};
+
+struct rockchip_mbox_chan {
+ int idx;
+ int irq;
+ struct rockchip_mbox_msg *msg;
+ struct rockchip_mbox *mb;
+};
+
+struct rockchip_mbox {
+ struct mbox_controller mbox;
+ struct clk *pclk;
+ void __iomem *mbox_base;
+
+ /* The maximum size of buf for each channel */
+ u32 buf_size;
+
+ struct rockchip_mbox_chan *chans;
+};
+
+static int rockchip_mbox_send_data(struct mbox_chan *chan, void *data)
+{
+ struct rockchip_mbox *mb = dev_get_drvdata(chan->mbox->dev);
+ struct rockchip_mbox_msg *msg = data;
+ struct rockchip_mbox_chan *chans = mb->chans;
+
+ if (!msg)
+ return -EINVAL;
+
+ if (msg->rx_size > mb->buf_size) {
+ dev_err(mb->mbox.dev, "Transmit size over buf size(%d)\n",
+ mb->buf_size);
+ return -EINVAL;
+ }
+
+ dev_dbg(mb->mbox.dev, "Chan[%d]: A2B message, cmd 0x%08x\n",
+ chans->idx, msg->cmd);
+
+ mb->chans[chans->idx].msg = msg;
+
+ writel_relaxed(msg->cmd, mb->mbox_base + MAILBOX_A2B_CMD(chans->idx));
+ writel_relaxed(msg->rx_size, mb->mbox_base +
+ MAILBOX_A2B_DAT(chans->idx));
+
+ return 0;
+}
+
+static int rockchip_mbox_startup(struct mbox_chan *chan)
+{
+ struct rockchip_mbox *mb = dev_get_drvdata(chan->mbox->dev);
+
+ /* Enable all B2A interrupts */
+ writel_relaxed((1 << mb->mbox.num_chans) - 1,
+ mb->mbox_base + MAILBOX_B2A_INTEN);
+
+ return 0;
+}
+
+static void rockchip_mbox_shutdown(struct mbox_chan *chan)
+{
+ struct rockchip_mbox *mb = dev_get_drvdata(chan->mbox->dev);
+ struct rockchip_mbox_chan *chans = mb->chans;
+
+ /* Disable all B2A interrupts */
+ writel_relaxed(0, mb->mbox_base + MAILBOX_B2A_INTEN);
+
+ mb->chans[chans->idx].msg = NULL;
+}
+
+static const struct mbox_chan_ops rockchip_mbox_chan_ops = {
+ .send_data = rockchip_mbox_send_data,
+ .startup = rockchip_mbox_startup,
+ .shutdown = rockchip_mbox_shutdown,
+};
+
+static irqreturn_t rockchip_mbox_irq(int irq, void *dev_id)
+{
+ int idx;
+ struct rockchip_mbox *mb = (struct rockchip_mbox *)dev_id;
+ u32 status = readl_relaxed(mb->mbox_base + MAILBOX_B2A_STATUS);
+
+ for (idx = 0; idx < mb->mbox.num_chans; idx++) {
+ if ((status & (1 << idx)) && (irq == mb->chans[idx].irq)) {
+ /* Clear mbox interrupt */
+ writel_relaxed(1 << idx,
+ mb->mbox_base + MAILBOX_B2A_STATUS);
+ return IRQ_WAKE_THREAD;
+ }
+ }
+
+ return IRQ_NONE;
+}
+
+static irqreturn_t rockchip_mbox_isr(int irq, void *dev_id)
+{
+ int idx;
+ struct rockchip_mbox_msg *msg = NULL;
+ struct rockchip_mbox *mb = (struct rockchip_mbox *)dev_id;
+
+ for (idx = 0; idx < mb->mbox.num_chans; idx++) {
+ if (irq != mb->chans[idx].irq)
+ continue;
+
+ msg = mb->chans[idx].msg;
+ if (!msg) {
+ dev_err(mb->mbox.dev,
+ "Chan[%d]: B2A message is NULL\n", idx);
+ break; /* spurious */
+ }
+
+ mbox_chan_received_data(&mb->mbox.chans[idx], msg);
+ mb->chans[idx].msg = NULL;
+
+ dev_dbg(mb->mbox.dev, "Chan[%d]: B2A message, cmd 0x%08x\n",
+ idx, msg->cmd);
+
+ break;
+ }
+
+ return IRQ_HANDLED;
+}
+
+static const struct rockchip_mbox_data rk3368_drv_data = {
+ .num_chans = 4,
+};
+
+static const struct of_device_id rockchip_mbox_of_match[] = {
+ { .compatible = "rockchip,rk3368-mailbox", .data = &rk3368_drv_data},
+ { },
+};
+MODULE_DEVICE_TABLE(of, rockchp_mbox_of_match);
+
+static int rockchip_mbox_probe(struct platform_device *pdev)
+{
+ struct rockchip_mbox *mb;
+ const struct of_device_id *match;
+ const struct rockchip_mbox_data *drv_data;
+ struct resource *res;
+ int ret, irq, i;
+
+ if (!pdev->dev.of_node)
+ return -ENODEV;
+
+ match = of_match_node(rockchip_mbox_of_match, pdev->dev.of_node);
+ drv_data = (const struct rockchip_mbox_data *)match->data;
+
+ mb = devm_kzalloc(&pdev->dev, sizeof(*mb), GFP_KERNEL);
+ if (!mb)
+ return -ENOMEM;
+
+ mb->chans = devm_kcalloc(&pdev->dev, drv_data->num_chans,
+ sizeof(*mb->chans), GFP_KERNEL);
+ if (!mb->chans)
+ return -ENOMEM;
+
+ mb->mbox.chans = devm_kcalloc(&pdev->dev, drv_data->num_chans,
+ sizeof(*mb->mbox.chans), GFP_KERNEL);
+ if (!mb->mbox.chans)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, mb);
+
+ mb->mbox.dev = &pdev->dev;
+ mb->mbox.num_chans = drv_data->num_chans;
+ mb->mbox.ops = &rockchip_mbox_chan_ops;
+ mb->mbox.txdone_irq = true;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENODEV;
+
+ mb->mbox_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(mb->mbox_base))
+ return PTR_ERR(mb->mbox_base);
+
+ /* Each channel has two buffers for A2B and B2A */
+ mb->buf_size = (size_t)resource_size(res) / (drv_data->num_chans * 2);
+
+ mb->pclk = devm_clk_get(&pdev->dev, "pclk_mailbox");
+ if (IS_ERR(mb->pclk)) {
+ ret = PTR_ERR(mb->pclk);
+ dev_err(&pdev->dev, "failed to get pclk_mailbox clock: %d\n",
+ ret);
+ return ret;
+ }
+
+ ret = clk_prepare_enable(mb->pclk);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to enable pclk: %d\n", ret);
+ return ret;
+ }
+
+ for (i = 0; i < mb->mbox.num_chans; i++) {
+ irq = platform_get_irq(pdev, i);
+ if (irq < 0)
+ return irq;
+
+ ret = devm_request_threaded_irq(&pdev->dev, irq,
+ rockchip_mbox_irq,
+ rockchip_mbox_isr, IRQF_ONESHOT,
+ dev_name(&pdev->dev), mb);
+ if (ret < 0)
+ return ret;
+
+ mb->chans[i].idx = i;
+ mb->chans[i].irq = irq;
+ mb->chans[i].mb = mb;
+ mb->chans[i].msg = NULL;
+ }
+
+ ret = mbox_controller_register(&mb->mbox);
+ if (ret < 0)
+ dev_err(&pdev->dev, "Failed to register mailbox: %d\n", ret);
+
+ return ret;
+}
+
+static int rockchip_mbox_remove(struct platform_device *pdev)
+{
+ struct rockchip_mbox *mb = platform_get_drvdata(pdev);
+
+ if (!mb)
+ return -EINVAL;
+
+ mbox_controller_unregister(&mb->mbox);
+
+ return 0;
+}
+
+static struct platform_driver rockchip_mbox_driver = {
+ .probe = rockchip_mbox_probe,
+ .remove = rockchip_mbox_remove,
+ .driver = {
+ .name = "rockchip-mailbox",
+ .of_match_table = of_match_ptr(rockchip_mbox_of_match),
+ },
+};
+
+module_platform_driver(rockchip_mbox_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Rockchip mailbox: communicate between CPU cores and MCU");
+MODULE_AUTHOR("Addy Ke <addy.ke@rock-chips.com>");
+MODULE_AUTHOR("Caesar Wang <wxt@rock-chips.com>");
diff --git a/drivers/mailbox/ti-msgmgr.c b/drivers/mailbox/ti-msgmgr.c
new file mode 100644
index 000000000000..54b9e4cb4cfa
--- /dev/null
+++ b/drivers/mailbox/ti-msgmgr.c
@@ -0,0 +1,639 @@
+/*
+ * Texas Instruments' Message Manager Driver
+ *
+ * Copyright (C) 2015-2016 Texas Instruments Incorporated - http://www.ti.com/
+ * Nishanth Menon
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/mailbox_controller.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/soc/ti/ti-msgmgr.h>
+
+#define Q_DATA_OFFSET(proxy, queue, reg) \
+ ((0x10000 * (proxy)) + (0x80 * (queue)) + ((reg) * 4))
+#define Q_STATE_OFFSET(queue) ((queue) * 0x4)
+#define Q_STATE_ENTRY_COUNT_MASK (0xFFF000)
+
+/**
+ * struct ti_msgmgr_valid_queue_desc - SoC valid queues meant for this processor
+ * @queue_id: Queue Number for this path
+ * @proxy_id: Proxy ID representing the processor in SoC
+ * @is_tx: Is this a receive path?
+ */
+struct ti_msgmgr_valid_queue_desc {
+ u8 queue_id;
+ u8 proxy_id;
+ bool is_tx;
+};
+
+/**
+ * struct ti_msgmgr_desc - Description of message manager integration
+ * @queue_count: Number of Queues
+ * @max_message_size: Message size in bytes
+ * @max_messages: Number of messages
+ * @q_slices: Number of queue engines
+ * @q_proxies: Number of queue proxies per page
+ * @data_first_reg: First data register for proxy data region
+ * @data_last_reg: Last data register for proxy data region
+ * @tx_polled: Do I need to use polled mechanism for tx
+ * @tx_poll_timeout_ms: Timeout in ms if polled
+ * @valid_queues: List of Valid queues that the processor can access
+ * @num_valid_queues: Number of valid queues
+ *
+ * This structure is used in of match data to describe how integration
+ * for a specific compatible SoC is done.
+ */
+struct ti_msgmgr_desc {
+ u8 queue_count;
+ u8 max_message_size;
+ u8 max_messages;
+ u8 q_slices;
+ u8 q_proxies;
+ u8 data_first_reg;
+ u8 data_last_reg;
+ bool tx_polled;
+ int tx_poll_timeout_ms;
+ const struct ti_msgmgr_valid_queue_desc *valid_queues;
+ int num_valid_queues;
+};
+
+/**
+ * struct ti_queue_inst - Description of a queue instance
+ * @name: Queue Name
+ * @queue_id: Queue Identifier as mapped on SoC
+ * @proxy_id: Proxy Identifier as mapped on SoC
+ * @irq: IRQ for Rx Queue
+ * @is_tx: 'true' if transmit queue, else, 'false'
+ * @queue_buff_start: First register of Data Buffer
+ * @queue_buff_end: Last (or confirmation) register of Data buffer
+ * @queue_state: Queue status register
+ * @chan: Mailbox channel
+ * @rx_buff: Receive buffer pointer allocated at probe, max_message_size
+ */
+struct ti_queue_inst {
+ char name[30];
+ u8 queue_id;
+ u8 proxy_id;
+ int irq;
+ bool is_tx;
+ void __iomem *queue_buff_start;
+ void __iomem *queue_buff_end;
+ void __iomem *queue_state;
+ struct mbox_chan *chan;
+ u32 *rx_buff;
+};
+
+/**
+ * struct ti_msgmgr_inst - Description of a Message Manager Instance
+ * @dev: device pointer corresponding to the Message Manager instance
+ * @desc: Description of the SoC integration
+ * @queue_proxy_region: Queue proxy region where queue buffers are located
+ * @queue_state_debug_region: Queue status register regions
+ * @num_valid_queues: Number of valid queues defined for the processor
+ * Note: other queues are probably reserved for other processors
+ * in the SoC.
+ * @qinsts: Array of valid Queue Instances for the Processor
+ * @mbox: Mailbox Controller
+ * @chans: Array for channels corresponding to the Queue Instances.
+ */
+struct ti_msgmgr_inst {
+ struct device *dev;
+ const struct ti_msgmgr_desc *desc;
+ void __iomem *queue_proxy_region;
+ void __iomem *queue_state_debug_region;
+ u8 num_valid_queues;
+ struct ti_queue_inst *qinsts;
+ struct mbox_controller mbox;
+ struct mbox_chan *chans;
+};
+
+/**
+ * ti_msgmgr_queue_get_num_messages() - Get the number of pending messages
+ * @qinst: Queue instance for which we check the number of pending messages
+ *
+ * Return: number of messages pending in the queue (0 == no pending messages)
+ */
+static inline int ti_msgmgr_queue_get_num_messages(struct ti_queue_inst *qinst)
+{
+ u32 val;
+
+ /*
+ * We cannot use relaxed operation here - update may happen
+ * real-time.
+ */
+ val = readl(qinst->queue_state) & Q_STATE_ENTRY_COUNT_MASK;
+ val >>= __ffs(Q_STATE_ENTRY_COUNT_MASK);
+
+ return val;
+}
+
+/**
+ * ti_msgmgr_queue_rx_interrupt() - Interrupt handler for receive Queue
+ * @irq: Interrupt number
+ * @p: Channel Pointer
+ *
+ * Return: -EINVAL if there is no instance
+ * IRQ_NONE if the interrupt is not ours.
+ * IRQ_HANDLED if the rx interrupt was successfully handled.
+ */
+static irqreturn_t ti_msgmgr_queue_rx_interrupt(int irq, void *p)
+{
+ struct mbox_chan *chan = p;
+ struct device *dev = chan->mbox->dev;
+ struct ti_msgmgr_inst *inst = dev_get_drvdata(dev);
+ struct ti_queue_inst *qinst = chan->con_priv;
+ const struct ti_msgmgr_desc *desc;
+ int msg_count, num_words;
+ struct ti_msgmgr_message message;
+ void __iomem *data_reg;
+ u32 *word_data;
+
+ if (WARN_ON(!inst)) {
+ dev_err(dev, "no platform drv data??\n");
+ return -EINVAL;
+ }
+
+ /* Do I have an invalid interrupt source? */
+ if (qinst->is_tx) {
+ dev_err(dev, "Cannot handle rx interrupt on tx channel %s\n",
+ qinst->name);
+ return IRQ_NONE;
+ }
+
+ /* Do I actually have messages to read? */
+ msg_count = ti_msgmgr_queue_get_num_messages(qinst);
+ if (!msg_count) {
+ /* Shared IRQ? */
+ dev_dbg(dev, "Spurious event - 0 pending data!\n");
+ return IRQ_NONE;
+ }
+
+ /*
+ * I have no idea about the protocol being used to communicate with the
+ * remote producer - 0 could be valid data, so I wont make a judgement
+ * of how many bytes I should be reading. Let the client figure this
+ * out.. I just read the full message and pass it on..
+ */
+ desc = inst->desc;
+ message.len = desc->max_message_size;
+ message.buf = (u8 *)qinst->rx_buff;
+
+ /*
+ * NOTE about register access involved here:
+ * the hardware block is implemented with 32bit access operations and no
+ * support for data splitting. We don't want the hardware to misbehave
+ * with sub 32bit access - For example: if the last register read is
+ * split into byte wise access, it can result in the queue getting
+ * stuck or indeterminate behavior. An out of order read operation may
+ * result in weird data results as well.
+ * Hence, we do not use memcpy_fromio or __ioread32_copy here, instead
+ * we depend on readl for the purpose.
+ *
+ * Also note that the final register read automatically marks the
+ * queue message as read.
+ */
+ for (data_reg = qinst->queue_buff_start, word_data = qinst->rx_buff,
+ num_words = (desc->max_message_size / sizeof(u32));
+ num_words; num_words--, data_reg += sizeof(u32), word_data++)
+ *word_data = readl(data_reg);
+
+ /*
+ * Last register read automatically clears the IRQ if only 1 message
+ * is pending - so send the data up the stack..
+ * NOTE: Client is expected to be as optimal as possible, since
+ * we invoke the handler in IRQ context.
+ */
+ mbox_chan_received_data(chan, (void *)&message);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * ti_msgmgr_queue_peek_data() - Peek to see if there are any rx messages.
+ * @chan: Channel Pointer
+ *
+ * Return: 'true' if there is pending rx data, 'false' if there is none.
+ */
+static bool ti_msgmgr_queue_peek_data(struct mbox_chan *chan)
+{
+ struct ti_queue_inst *qinst = chan->con_priv;
+ int msg_count;
+
+ if (qinst->is_tx)
+ return false;
+
+ msg_count = ti_msgmgr_queue_get_num_messages(qinst);
+
+ return msg_count ? true : false;
+}
+
+/**
+ * ti_msgmgr_last_tx_done() - See if all the tx messages are sent
+ * @chan: Channel pointer
+ *
+ * Return: 'true' is no pending tx data, 'false' if there are any.
+ */
+static bool ti_msgmgr_last_tx_done(struct mbox_chan *chan)
+{
+ struct ti_queue_inst *qinst = chan->con_priv;
+ int msg_count;
+
+ if (!qinst->is_tx)
+ return false;
+
+ msg_count = ti_msgmgr_queue_get_num_messages(qinst);
+
+ /* if we have any messages pending.. */
+ return msg_count ? false : true;
+}
+
+/**
+ * ti_msgmgr_send_data() - Send data
+ * @chan: Channel Pointer
+ * @data: ti_msgmgr_message * Message Pointer
+ *
+ * Return: 0 if all goes good, else appropriate error messages.
+ */
+static int ti_msgmgr_send_data(struct mbox_chan *chan, void *data)
+{
+ struct device *dev = chan->mbox->dev;
+ struct ti_msgmgr_inst *inst = dev_get_drvdata(dev);
+ const struct ti_msgmgr_desc *desc;
+ struct ti_queue_inst *qinst = chan->con_priv;
+ int num_words, trail_bytes;
+ struct ti_msgmgr_message *message = data;
+ void __iomem *data_reg;
+ u32 *word_data;
+
+ if (WARN_ON(!inst)) {
+ dev_err(dev, "no platform drv data??\n");
+ return -EINVAL;
+ }
+ desc = inst->desc;
+
+ if (desc->max_message_size < message->len) {
+ dev_err(dev, "Queue %s message length %d > max %d\n",
+ qinst->name, message->len, desc->max_message_size);
+ return -EINVAL;
+ }
+
+ /* NOTE: Constraints similar to rx path exists here as well */
+ for (data_reg = qinst->queue_buff_start,
+ num_words = message->len / sizeof(u32),
+ word_data = (u32 *)message->buf;
+ num_words; num_words--, data_reg += sizeof(u32), word_data++)
+ writel(*word_data, data_reg);
+
+ trail_bytes = message->len % sizeof(u32);
+ if (trail_bytes) {
+ u32 data_trail = *word_data;
+
+ /* Ensure all unused data is 0 */
+ data_trail &= 0xFFFFFFFF >> (8 * (sizeof(u32) - trail_bytes));
+ writel(data_trail, data_reg);
+ data_reg++;
+ }
+ /*
+ * 'data_reg' indicates next register to write. If we did not already
+ * write on tx complete reg(last reg), we must do so for transmit
+ */
+ if (data_reg <= qinst->queue_buff_end)
+ writel(0, qinst->queue_buff_end);
+
+ return 0;
+}
+
+/**
+ * ti_msgmgr_queue_startup() - Startup queue
+ * @chan: Channel pointer
+ *
+ * Return: 0 if all goes good, else return corresponding error message
+ */
+static int ti_msgmgr_queue_startup(struct mbox_chan *chan)
+{
+ struct ti_queue_inst *qinst = chan->con_priv;
+ struct device *dev = chan->mbox->dev;
+ int ret;
+
+ if (!qinst->is_tx) {
+ /*
+ * With the expectation that the IRQ might be shared in SoC
+ */
+ ret = request_irq(qinst->irq, ti_msgmgr_queue_rx_interrupt,
+ IRQF_SHARED, qinst->name, chan);
+ if (ret) {
+ dev_err(dev, "Unable to get IRQ %d on %s(res=%d)\n",
+ qinst->irq, qinst->name, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * ti_msgmgr_queue_shutdown() - Shutdown the queue
+ * @chan: Channel pointer
+ */
+static void ti_msgmgr_queue_shutdown(struct mbox_chan *chan)
+{
+ struct ti_queue_inst *qinst = chan->con_priv;
+
+ if (!qinst->is_tx)
+ free_irq(qinst->irq, chan);
+}
+
+/**
+ * ti_msgmgr_of_xlate() - Translation of phandle to queue
+ * @mbox: Mailbox controller
+ * @p: phandle pointer
+ *
+ * Return: Mailbox channel corresponding to the queue, else return error
+ * pointer.
+ */
+static struct mbox_chan *ti_msgmgr_of_xlate(struct mbox_controller *mbox,
+ const struct of_phandle_args *p)
+{
+ struct ti_msgmgr_inst *inst;
+ int req_qid, req_pid;
+ struct ti_queue_inst *qinst;
+ int i;
+
+ inst = container_of(mbox, struct ti_msgmgr_inst, mbox);
+ if (WARN_ON(!inst))
+ return ERR_PTR(-EINVAL);
+
+ /* #mbox-cells is 2 */
+ if (p->args_count != 2) {
+ dev_err(inst->dev, "Invalid arguments in dt[%d] instead of 2\n",
+ p->args_count);
+ return ERR_PTR(-EINVAL);
+ }
+ req_qid = p->args[0];
+ req_pid = p->args[1];
+
+ for (qinst = inst->qinsts, i = 0; i < inst->num_valid_queues;
+ i++, qinst++) {
+ if (req_qid == qinst->queue_id && req_pid == qinst->proxy_id)
+ return qinst->chan;
+ }
+
+ dev_err(inst->dev, "Queue ID %d, Proxy ID %d is wrong on %s\n",
+ req_qid, req_pid, p->np->name);
+ return ERR_PTR(-ENOENT);
+}
+
+/**
+ * ti_msgmgr_queue_setup() - Setup data structures for each queue instance
+ * @idx: index of the queue
+ * @dev: pointer to the message manager device
+ * @np: pointer to the of node
+ * @inst: Queue instance pointer
+ * @d: Message Manager instance description data
+ * @qd: Queue description data
+ * @qinst: Queue instance pointer
+ * @chan: pointer to mailbox channel
+ *
+ * Return: 0 if all went well, else return corresponding error
+ */
+static int ti_msgmgr_queue_setup(int idx, struct device *dev,
+ struct device_node *np,
+ struct ti_msgmgr_inst *inst,
+ const struct ti_msgmgr_desc *d,
+ const struct ti_msgmgr_valid_queue_desc *qd,
+ struct ti_queue_inst *qinst,
+ struct mbox_chan *chan)
+{
+ qinst->proxy_id = qd->proxy_id;
+ qinst->queue_id = qd->queue_id;
+
+ if (qinst->queue_id > d->queue_count) {
+ dev_err(dev, "Queue Data [idx=%d] queuid %d > %d\n",
+ idx, qinst->queue_id, d->queue_count);
+ return -ERANGE;
+ }
+
+ qinst->is_tx = qd->is_tx;
+ snprintf(qinst->name, sizeof(qinst->name), "%s %s_%03d_%03d",
+ dev_name(dev), qinst->is_tx ? "tx" : "rx", qinst->queue_id,
+ qinst->proxy_id);
+
+ if (!qinst->is_tx) {
+ char of_rx_irq_name[7];
+
+ snprintf(of_rx_irq_name, sizeof(of_rx_irq_name),
+ "rx_%03d", qinst->queue_id);
+
+ qinst->irq = of_irq_get_byname(np, of_rx_irq_name);
+ if (qinst->irq < 0) {
+ dev_crit(dev,
+ "[%d]QID %d PID %d:No IRQ[%s]: %d\n",
+ idx, qinst->queue_id, qinst->proxy_id,
+ of_rx_irq_name, qinst->irq);
+ return qinst->irq;
+ }
+ /* Allocate usage buffer for rx */
+ qinst->rx_buff = devm_kzalloc(dev,
+ d->max_message_size, GFP_KERNEL);
+ if (!qinst->rx_buff)
+ return -ENOMEM;
+ }
+
+ qinst->queue_buff_start = inst->queue_proxy_region +
+ Q_DATA_OFFSET(qinst->proxy_id, qinst->queue_id, d->data_first_reg);
+ qinst->queue_buff_end = inst->queue_proxy_region +
+ Q_DATA_OFFSET(qinst->proxy_id, qinst->queue_id, d->data_last_reg);
+ qinst->queue_state = inst->queue_state_debug_region +
+ Q_STATE_OFFSET(qinst->queue_id);
+ qinst->chan = chan;
+
+ chan->con_priv = qinst;
+
+ dev_dbg(dev, "[%d] qidx=%d pidx=%d irq=%d q_s=%p q_e = %p\n",
+ idx, qinst->queue_id, qinst->proxy_id, qinst->irq,
+ qinst->queue_buff_start, qinst->queue_buff_end);
+ return 0;
+}
+
+/* Queue operations */
+static const struct mbox_chan_ops ti_msgmgr_chan_ops = {
+ .startup = ti_msgmgr_queue_startup,
+ .shutdown = ti_msgmgr_queue_shutdown,
+ .peek_data = ti_msgmgr_queue_peek_data,
+ .last_tx_done = ti_msgmgr_last_tx_done,
+ .send_data = ti_msgmgr_send_data,
+};
+
+/* Keystone K2G SoC integration details */
+static const struct ti_msgmgr_valid_queue_desc k2g_valid_queues[] = {
+ {.queue_id = 0, .proxy_id = 0, .is_tx = true,},
+ {.queue_id = 1, .proxy_id = 0, .is_tx = true,},
+ {.queue_id = 2, .proxy_id = 0, .is_tx = true,},
+ {.queue_id = 3, .proxy_id = 0, .is_tx = true,},
+ {.queue_id = 5, .proxy_id = 2, .is_tx = false,},
+ {.queue_id = 56, .proxy_id = 1, .is_tx = true,},
+ {.queue_id = 57, .proxy_id = 2, .is_tx = false,},
+ {.queue_id = 58, .proxy_id = 3, .is_tx = true,},
+ {.queue_id = 59, .proxy_id = 4, .is_tx = true,},
+ {.queue_id = 60, .proxy_id = 5, .is_tx = true,},
+ {.queue_id = 61, .proxy_id = 6, .is_tx = true,},
+};
+
+static const struct ti_msgmgr_desc k2g_desc = {
+ .queue_count = 64,
+ .max_message_size = 64,
+ .max_messages = 128,
+ .q_slices = 1,
+ .q_proxies = 1,
+ .data_first_reg = 16,
+ .data_last_reg = 31,
+ .tx_polled = false,
+ .valid_queues = k2g_valid_queues,
+ .num_valid_queues = ARRAY_SIZE(k2g_valid_queues),
+};
+
+static const struct of_device_id ti_msgmgr_of_match[] = {
+ {.compatible = "ti,k2g-message-manager", .data = &k2g_desc},
+ { /* Sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, ti_msgmgr_of_match);
+
+static int ti_msgmgr_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ const struct of_device_id *of_id;
+ struct device_node *np;
+ struct resource *res;
+ const struct ti_msgmgr_desc *desc;
+ struct ti_msgmgr_inst *inst;
+ struct ti_queue_inst *qinst;
+ struct mbox_controller *mbox;
+ struct mbox_chan *chans;
+ int queue_count;
+ int i;
+ int ret = -EINVAL;
+ const struct ti_msgmgr_valid_queue_desc *queue_desc;
+
+ if (!dev->of_node) {
+ dev_err(dev, "no OF information\n");
+ return -EINVAL;
+ }
+ np = dev->of_node;
+
+ of_id = of_match_device(ti_msgmgr_of_match, dev);
+ if (!of_id) {
+ dev_err(dev, "OF data missing\n");
+ return -EINVAL;
+ }
+ desc = of_id->data;
+
+ inst = devm_kzalloc(dev, sizeof(*inst), GFP_KERNEL);
+ if (!inst)
+ return -ENOMEM;
+
+ inst->dev = dev;
+ inst->desc = desc;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "queue_proxy_region");
+ inst->queue_proxy_region = devm_ioremap_resource(dev, res);
+ if (IS_ERR(inst->queue_proxy_region))
+ return PTR_ERR(inst->queue_proxy_region);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "queue_state_debug_region");
+ inst->queue_state_debug_region = devm_ioremap_resource(dev, res);
+ if (IS_ERR(inst->queue_state_debug_region))
+ return PTR_ERR(inst->queue_state_debug_region);
+
+ dev_dbg(dev, "proxy region=%p, queue_state=%p\n",
+ inst->queue_proxy_region, inst->queue_state_debug_region);
+
+ queue_count = desc->num_valid_queues;
+ if (!queue_count || queue_count > desc->queue_count) {
+ dev_crit(dev, "Invalid Number of queues %d. Max %d\n",
+ queue_count, desc->queue_count);
+ return -ERANGE;
+ }
+ inst->num_valid_queues = queue_count;
+
+ qinst = devm_kzalloc(dev, sizeof(*qinst) * queue_count, GFP_KERNEL);
+ if (!qinst)
+ return -ENOMEM;
+ inst->qinsts = qinst;
+
+ chans = devm_kzalloc(dev, sizeof(*chans) * queue_count, GFP_KERNEL);
+ if (!chans)
+ return -ENOMEM;
+ inst->chans = chans;
+
+ for (i = 0, queue_desc = desc->valid_queues;
+ i < queue_count; i++, qinst++, chans++, queue_desc++) {
+ ret = ti_msgmgr_queue_setup(i, dev, np, inst,
+ desc, queue_desc, qinst, chans);
+ if (ret)
+ return ret;
+ }
+
+ mbox = &inst->mbox;
+ mbox->dev = dev;
+ mbox->ops = &ti_msgmgr_chan_ops;
+ mbox->chans = inst->chans;
+ mbox->num_chans = inst->num_valid_queues;
+ mbox->txdone_irq = false;
+ mbox->txdone_poll = desc->tx_polled;
+ if (desc->tx_polled)
+ mbox->txpoll_period = desc->tx_poll_timeout_ms;
+ mbox->of_xlate = ti_msgmgr_of_xlate;
+
+ platform_set_drvdata(pdev, inst);
+ ret = mbox_controller_register(mbox);
+ if (ret)
+ dev_err(dev, "Failed to register mbox_controller(%d)\n", ret);
+
+ return ret;
+}
+
+static int ti_msgmgr_remove(struct platform_device *pdev)
+{
+ struct ti_msgmgr_inst *inst;
+
+ inst = platform_get_drvdata(pdev);
+ mbox_controller_unregister(&inst->mbox);
+
+ return 0;
+}
+
+static struct platform_driver ti_msgmgr_driver = {
+ .probe = ti_msgmgr_probe,
+ .remove = ti_msgmgr_remove,
+ .driver = {
+ .name = "ti-msgmgr",
+ .of_match_table = of_match_ptr(ti_msgmgr_of_match),
+ },
+};
+module_platform_driver(ti_msgmgr_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("TI message manager driver");
+MODULE_AUTHOR("Nishanth Menon");
+MODULE_ALIAS("platform:ti-msgmgr");
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig
index 0a2e7273db9e..02a5345a44a6 100644
--- a/drivers/md/Kconfig
+++ b/drivers/md/Kconfig
@@ -249,6 +249,7 @@ config DM_DEBUG_BLOCK_STACK_TRACING
block manager locking used by thin provisioning and caching.
If unsure, say N.
+
config DM_BIO_PRISON
tristate
depends on BLK_DEV_DM
@@ -304,16 +305,6 @@ config DM_CACHE
algorithms used to select which blocks are promoted, demoted,
cleaned etc. It supports writeback and writethrough modes.
-config DM_CACHE_MQ
- tristate "MQ Cache Policy (EXPERIMENTAL)"
- depends on DM_CACHE
- default y
- ---help---
- A cache policy that uses a multiqueue ordered by recent hit
- count to select which blocks should be promoted and demoted.
- This is meant to be a general purpose policy. It prioritises
- reads over writes.
-
config DM_CACHE_SMQ
tristate "Stochastic MQ Cache Policy (EXPERIMENTAL)"
depends on DM_CACHE
diff --git a/drivers/md/Makefile b/drivers/md/Makefile
index 62a65764e8e0..52ba8dd82821 100644
--- a/drivers/md/Makefile
+++ b/drivers/md/Makefile
@@ -12,7 +12,6 @@ dm-log-userspace-y \
+= dm-log-userspace-base.o dm-log-userspace-transfer.o
dm-thin-pool-y += dm-thin.o dm-thin-metadata.o
dm-cache-y += dm-cache-target.o dm-cache-metadata.o dm-cache-policy.o
-dm-cache-mq-y += dm-cache-policy-mq.o
dm-cache-smq-y += dm-cache-policy-smq.o
dm-cache-cleaner-y += dm-cache-policy-cleaner.o
dm-era-y += dm-era-target.o
@@ -55,7 +54,6 @@ obj-$(CONFIG_DM_RAID) += dm-raid.o
obj-$(CONFIG_DM_THIN_PROVISIONING) += dm-thin-pool.o
obj-$(CONFIG_DM_VERITY) += dm-verity.o
obj-$(CONFIG_DM_CACHE) += dm-cache.o
-obj-$(CONFIG_DM_CACHE_MQ) += dm-cache-mq.o
obj-$(CONFIG_DM_CACHE_SMQ) += dm-cache-smq.o
obj-$(CONFIG_DM_CACHE_CLEANER) += dm-cache-cleaner.o
obj-$(CONFIG_DM_ERA) += dm-era.o
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index 8d0ead98eb6e..a296425a7270 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -1015,8 +1015,12 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c)
*/
atomic_set(&dc->count, 1);
- if (bch_cached_dev_writeback_start(dc))
+ /* Block writeback thread, but spawn it */
+ down_write(&dc->writeback_lock);
+ if (bch_cached_dev_writeback_start(dc)) {
+ up_write(&dc->writeback_lock);
return -ENOMEM;
+ }
if (BDEV_STATE(&dc->sb) == BDEV_STATE_DIRTY) {
bch_sectors_dirty_init(dc);
@@ -1028,6 +1032,9 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c)
bch_cached_dev_run(dc);
bcache_device_link(&dc->disk, c, "bdev");
+ /* Allow the writeback thread to proceed */
+ up_write(&dc->writeback_lock);
+
pr_info("Caching %s as %s on set %pU",
bdevname(dc->bdev, buf), dc->disk.disk->disk_name,
dc->disk.c->sb.set_uuid);
@@ -1366,6 +1373,9 @@ static void cache_set_flush(struct closure *cl)
struct btree *b;
unsigned i;
+ if (!c)
+ closure_return(cl);
+
bch_cache_accounting_destroy(&c->accounting);
kobject_put(&c->internal);
@@ -1828,11 +1838,12 @@ static int cache_alloc(struct cache_sb *sb, struct cache *ca)
return 0;
}
-static void register_cache(struct cache_sb *sb, struct page *sb_page,
+static int register_cache(struct cache_sb *sb, struct page *sb_page,
struct block_device *bdev, struct cache *ca)
{
char name[BDEVNAME_SIZE];
- const char *err = "cannot allocate memory";
+ const char *err = NULL;
+ int ret = 0;
memcpy(&ca->sb, sb, sizeof(struct cache_sb));
ca->bdev = bdev;
@@ -1847,27 +1858,35 @@ static void register_cache(struct cache_sb *sb, struct page *sb_page,
if (blk_queue_discard(bdev_get_queue(ca->bdev)))
ca->discard = CACHE_DISCARD(&ca->sb);
- if (cache_alloc(sb, ca) != 0)
+ ret = cache_alloc(sb, ca);
+ if (ret != 0)
goto err;
- err = "error creating kobject";
- if (kobject_add(&ca->kobj, &part_to_dev(bdev->bd_part)->kobj, "bcache"))
- goto err;
+ if (kobject_add(&ca->kobj, &part_to_dev(bdev->bd_part)->kobj, "bcache")) {
+ err = "error calling kobject_add";
+ ret = -ENOMEM;
+ goto out;
+ }
mutex_lock(&bch_register_lock);
err = register_cache_set(ca);
mutex_unlock(&bch_register_lock);
- if (err)
- goto err;
+ if (err) {
+ ret = -ENODEV;
+ goto out;
+ }
pr_info("registered cache device %s", bdevname(bdev, name));
+
out:
kobject_put(&ca->kobj);
- return;
+
err:
- pr_notice("error opening %s: %s", bdevname(bdev, name), err);
- goto out;
+ if (err)
+ pr_notice("error opening %s: %s", bdevname(bdev, name), err);
+
+ return ret;
}
/* Global interfaces/init */
@@ -1965,7 +1984,8 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
if (!ca)
goto err_close;
- register_cache(sb, sb_page, bdev, ca);
+ if (register_cache(sb, sb_page, bdev, ca) != 0)
+ goto err_close;
}
out:
if (sb_page)
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index d80cce499a56..3fe86b54d50b 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -98,7 +98,6 @@ __acquires(bitmap->lock)
bitmap->bp[page].hijacked) {
/* somebody beat us to getting the page */
kfree(mappage);
- return 0;
} else {
/* no page was in place and we have one, so install it */
@@ -323,7 +322,7 @@ __clear_page_buffers(struct page *page)
{
ClearPagePrivate(page);
set_page_private(page, 0);
- page_cache_release(page);
+ put_page(page);
}
static void free_buffers(struct page *page)
{
@@ -510,8 +509,7 @@ static int bitmap_new_disk_sb(struct bitmap *bitmap)
sb->chunksize = cpu_to_le32(chunksize);
daemon_sleep = bitmap->mddev->bitmap_info.daemon_sleep;
- if (!daemon_sleep ||
- (daemon_sleep < 1) || (daemon_sleep > MAX_SCHEDULE_TIMEOUT)) {
+ if (!daemon_sleep || (daemon_sleep > MAX_SCHEDULE_TIMEOUT)) {
printk(KERN_INFO "Choosing daemon_sleep default (5 sec)\n");
daemon_sleep = 5 * HZ;
}
@@ -1675,6 +1673,9 @@ static void bitmap_free(struct bitmap *bitmap)
if (!bitmap) /* there was no bitmap */
return;
+ if (bitmap->sysfs_can_clear)
+ sysfs_put(bitmap->sysfs_can_clear);
+
if (mddev_is_clustered(bitmap->mddev) && bitmap->mddev->cluster_info &&
bitmap->cluster_slot == md_cluster_ops->slot_number(bitmap->mddev))
md_cluster_stop(bitmap->mddev);
@@ -1714,15 +1715,13 @@ void bitmap_destroy(struct mddev *mddev)
if (mddev->thread)
mddev->thread->timeout = MAX_SCHEDULE_TIMEOUT;
- if (bitmap->sysfs_can_clear)
- sysfs_put(bitmap->sysfs_can_clear);
-
bitmap_free(bitmap);
}
/*
* initialize the bitmap structure
* if this returns an error, bitmap_destroy must be called to do clean up
+ * once mddev->bitmap is set
*/
struct bitmap *bitmap_create(struct mddev *mddev, int slot)
{
@@ -1867,8 +1866,10 @@ int bitmap_copy_from_slot(struct mddev *mddev, int slot,
struct bitmap_counts *counts;
struct bitmap *bitmap = bitmap_create(mddev, slot);
- if (IS_ERR(bitmap))
+ if (IS_ERR(bitmap)) {
+ bitmap_free(bitmap);
return PTR_ERR(bitmap);
+ }
rv = bitmap_init_from_disk(bitmap, 0);
if (rv)
@@ -2172,14 +2173,14 @@ location_store(struct mddev *mddev, const char *buf, size_t len)
else {
mddev->bitmap = bitmap;
rv = bitmap_load(mddev);
- if (rv) {
- bitmap_destroy(mddev);
+ if (rv)
mddev->bitmap_info.offset = 0;
- }
}
mddev->pers->quiesce(mddev, 0);
- if (rv)
+ if (rv) {
+ bitmap_destroy(mddev);
return rv;
+ }
}
}
}
diff --git a/drivers/md/bitmap.h b/drivers/md/bitmap.h
index 7d5c3a610ca5..5e3fcd6ecf77 100644
--- a/drivers/md/bitmap.h
+++ b/drivers/md/bitmap.h
@@ -49,8 +49,8 @@
* When we set a bit, or in the counter (to start a write), if the fields is
* 0, we first set the disk bit and set the counter to 1.
*
- * If the counter is 0, the on-disk bit is clear and the stipe is clean
- * Anything that dirties the stipe pushes the counter to 2 (at least)
+ * If the counter is 0, the on-disk bit is clear and the stripe is clean
+ * Anything that dirties the stripe pushes the counter to 2 (at least)
* and sets the on-disk bit (lazily).
* If a periodic sweep find the counter at 2, it is decremented to 1.
* If the sweep find the counter at 1, the on-disk bit is cleared and the
diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c
index f6543f3a970f..3970cda10080 100644
--- a/drivers/md/dm-cache-metadata.c
+++ b/drivers/md/dm-cache-metadata.c
@@ -867,18 +867,55 @@ static int blocks_are_unmapped_or_clean(struct dm_cache_metadata *cmd,
return 0;
}
-#define WRITE_LOCK(cmd) \
- if (cmd->fail_io || dm_bm_is_read_only(cmd->bm)) \
- return -EINVAL; \
- down_write(&cmd->root_lock)
+static bool cmd_write_lock(struct dm_cache_metadata *cmd)
+{
+ down_write(&cmd->root_lock);
+ if (cmd->fail_io || dm_bm_is_read_only(cmd->bm)) {
+ up_write(&cmd->root_lock);
+ return false;
+ }
+ return true;
+}
+
+#define WRITE_LOCK(cmd) \
+ do { \
+ if (!cmd_write_lock((cmd))) \
+ return -EINVAL; \
+ } while(0)
-#define WRITE_LOCK_VOID(cmd) \
- if (cmd->fail_io || dm_bm_is_read_only(cmd->bm)) \
- return; \
- down_write(&cmd->root_lock)
+#define WRITE_LOCK_VOID(cmd) \
+ do { \
+ if (!cmd_write_lock((cmd))) \
+ return; \
+ } while(0)
#define WRITE_UNLOCK(cmd) \
- up_write(&cmd->root_lock)
+ up_write(&(cmd)->root_lock)
+
+static bool cmd_read_lock(struct dm_cache_metadata *cmd)
+{
+ down_read(&cmd->root_lock);
+ if (cmd->fail_io) {
+ up_read(&cmd->root_lock);
+ return false;
+ }
+ return true;
+}
+
+#define READ_LOCK(cmd) \
+ do { \
+ if (!cmd_read_lock((cmd))) \
+ return -EINVAL; \
+ } while(0)
+
+#define READ_LOCK_VOID(cmd) \
+ do { \
+ if (!cmd_read_lock((cmd))) \
+ return; \
+ } while(0)
+
+#define READ_UNLOCK(cmd) \
+ up_read(&(cmd)->root_lock)
int dm_cache_resize(struct dm_cache_metadata *cmd, dm_cblock_t new_cache_size)
{
@@ -1015,22 +1052,20 @@ int dm_cache_load_discards(struct dm_cache_metadata *cmd,
{
int r;
- down_read(&cmd->root_lock);
+ READ_LOCK(cmd);
r = __load_discards(cmd, fn, context);
- up_read(&cmd->root_lock);
+ READ_UNLOCK(cmd);
return r;
}
-dm_cblock_t dm_cache_size(struct dm_cache_metadata *cmd)
+int dm_cache_size(struct dm_cache_metadata *cmd, dm_cblock_t *result)
{
- dm_cblock_t r;
-
- down_read(&cmd->root_lock);
- r = cmd->cache_blocks;
- up_read(&cmd->root_lock);
+ READ_LOCK(cmd);
+ *result = cmd->cache_blocks;
+ READ_UNLOCK(cmd);
- return r;
+ return 0;
}
static int __remove(struct dm_cache_metadata *cmd, dm_cblock_t cblock)
@@ -1188,9 +1223,9 @@ int dm_cache_load_mappings(struct dm_cache_metadata *cmd,
{
int r;
- down_read(&cmd->root_lock);
+ READ_LOCK(cmd);
r = __load_mappings(cmd, policy, fn, context);
- up_read(&cmd->root_lock);
+ READ_UNLOCK(cmd);
return r;
}
@@ -1215,18 +1250,18 @@ static int __dump_mappings(struct dm_cache_metadata *cmd)
void dm_cache_dump(struct dm_cache_metadata *cmd)
{
- down_read(&cmd->root_lock);
+ READ_LOCK_VOID(cmd);
__dump_mappings(cmd);
- up_read(&cmd->root_lock);
+ READ_UNLOCK(cmd);
}
int dm_cache_changed_this_transaction(struct dm_cache_metadata *cmd)
{
int r;
- down_read(&cmd->root_lock);
+ READ_LOCK(cmd);
r = cmd->changed;
- up_read(&cmd->root_lock);
+ READ_UNLOCK(cmd);
return r;
}
@@ -1276,9 +1311,9 @@ int dm_cache_set_dirty(struct dm_cache_metadata *cmd,
void dm_cache_metadata_get_stats(struct dm_cache_metadata *cmd,
struct dm_cache_statistics *stats)
{
- down_read(&cmd->root_lock);
+ READ_LOCK_VOID(cmd);
*stats = cmd->stats;
- up_read(&cmd->root_lock);
+ READ_UNLOCK(cmd);
}
void dm_cache_metadata_set_stats(struct dm_cache_metadata *cmd,
@@ -1312,9 +1347,9 @@ int dm_cache_get_free_metadata_block_count(struct dm_cache_metadata *cmd,
{
int r = -EINVAL;
- down_read(&cmd->root_lock);
+ READ_LOCK(cmd);
r = dm_sm_get_nr_free(cmd->metadata_sm, result);
- up_read(&cmd->root_lock);
+ READ_UNLOCK(cmd);
return r;
}
@@ -1324,9 +1359,9 @@ int dm_cache_get_metadata_dev_size(struct dm_cache_metadata *cmd,
{
int r = -EINVAL;
- down_read(&cmd->root_lock);
+ READ_LOCK(cmd);
r = dm_sm_get_nr_blocks(cmd->metadata_sm, result);
- up_read(&cmd->root_lock);
+ READ_UNLOCK(cmd);
return r;
}
@@ -1417,7 +1452,13 @@ int dm_cache_write_hints(struct dm_cache_metadata *cmd, struct dm_cache_policy *
int dm_cache_metadata_all_clean(struct dm_cache_metadata *cmd, bool *result)
{
- return blocks_are_unmapped_or_clean(cmd, 0, cmd->cache_blocks, result);
+ int r;
+
+ READ_LOCK(cmd);
+ r = blocks_are_unmapped_or_clean(cmd, 0, cmd->cache_blocks, result);
+ READ_UNLOCK(cmd);
+
+ return r;
}
void dm_cache_metadata_set_read_only(struct dm_cache_metadata *cmd)
@@ -1440,10 +1481,7 @@ int dm_cache_metadata_set_needs_check(struct dm_cache_metadata *cmd)
struct dm_block *sblock;
struct cache_disk_superblock *disk_super;
- /*
- * We ignore fail_io for this function.
- */
- down_write(&cmd->root_lock);
+ WRITE_LOCK(cmd);
set_bit(NEEDS_CHECK, &cmd->flags);
r = superblock_lock(cmd, &sblock);
@@ -1458,19 +1496,17 @@ int dm_cache_metadata_set_needs_check(struct dm_cache_metadata *cmd)
dm_bm_unlock(sblock);
out:
- up_write(&cmd->root_lock);
+ WRITE_UNLOCK(cmd);
return r;
}
-bool dm_cache_metadata_needs_check(struct dm_cache_metadata *cmd)
+int dm_cache_metadata_needs_check(struct dm_cache_metadata *cmd, bool *result)
{
- bool needs_check;
+ READ_LOCK(cmd);
+ *result = !!test_bit(NEEDS_CHECK, &cmd->flags);
+ READ_UNLOCK(cmd);
- down_read(&cmd->root_lock);
- needs_check = !!test_bit(NEEDS_CHECK, &cmd->flags);
- up_read(&cmd->root_lock);
-
- return needs_check;
+ return 0;
}
int dm_cache_metadata_abort(struct dm_cache_metadata *cmd)
diff --git a/drivers/md/dm-cache-metadata.h b/drivers/md/dm-cache-metadata.h
index 2ffee21f318d..8528744195e5 100644
--- a/drivers/md/dm-cache-metadata.h
+++ b/drivers/md/dm-cache-metadata.h
@@ -66,7 +66,7 @@ void dm_cache_metadata_close(struct dm_cache_metadata *cmd);
* origin blocks to map to.
*/
int dm_cache_resize(struct dm_cache_metadata *cmd, dm_cblock_t new_cache_size);
-dm_cblock_t dm_cache_size(struct dm_cache_metadata *cmd);
+int dm_cache_size(struct dm_cache_metadata *cmd, dm_cblock_t *result);
int dm_cache_discard_bitset_resize(struct dm_cache_metadata *cmd,
sector_t discard_block_size,
@@ -137,7 +137,7 @@ int dm_cache_write_hints(struct dm_cache_metadata *cmd, struct dm_cache_policy *
*/
int dm_cache_metadata_all_clean(struct dm_cache_metadata *cmd, bool *result);
-bool dm_cache_metadata_needs_check(struct dm_cache_metadata *cmd);
+int dm_cache_metadata_needs_check(struct dm_cache_metadata *cmd, bool *result);
int dm_cache_metadata_set_needs_check(struct dm_cache_metadata *cmd);
void dm_cache_metadata_set_read_only(struct dm_cache_metadata *cmd);
void dm_cache_metadata_set_read_write(struct dm_cache_metadata *cmd);
diff --git a/drivers/md/dm-cache-policy-mq.c b/drivers/md/dm-cache-policy-mq.c
deleted file mode 100644
index ddb26980cd66..000000000000
--- a/drivers/md/dm-cache-policy-mq.c
+++ /dev/null
@@ -1,1473 +0,0 @@
-/*
- * Copyright (C) 2012 Red Hat. All rights reserved.
- *
- * This file is released under the GPL.
- */
-
-#include "dm-cache-policy.h"
-#include "dm.h"
-
-#include <linux/hash.h>
-#include <linux/jiffies.h>
-#include <linux/module.h>
-#include <linux/mutex.h>
-#include <linux/slab.h>
-#include <linux/vmalloc.h>
-
-#define DM_MSG_PREFIX "cache-policy-mq"
-
-static struct kmem_cache *mq_entry_cache;
-
-/*----------------------------------------------------------------*/
-
-static unsigned next_power(unsigned n, unsigned min)
-{
- return roundup_pow_of_two(max(n, min));
-}
-
-/*----------------------------------------------------------------*/
-
-/*
- * Large, sequential ios are probably better left on the origin device since
- * spindles tend to have good bandwidth.
- *
- * The io_tracker tries to spot when the io is in one of these sequential
- * modes.
- *
- * Two thresholds to switch between random and sequential io mode are defaulting
- * as follows and can be adjusted via the constructor and message interfaces.
- */
-#define RANDOM_THRESHOLD_DEFAULT 4
-#define SEQUENTIAL_THRESHOLD_DEFAULT 512
-
-enum io_pattern {
- PATTERN_SEQUENTIAL,
- PATTERN_RANDOM
-};
-
-struct io_tracker {
- enum io_pattern pattern;
-
- unsigned nr_seq_samples;
- unsigned nr_rand_samples;
- unsigned thresholds[2];
-
- dm_oblock_t last_end_oblock;
-};
-
-static void iot_init(struct io_tracker *t,
- int sequential_threshold, int random_threshold)
-{
- t->pattern = PATTERN_RANDOM;
- t->nr_seq_samples = 0;
- t->nr_rand_samples = 0;
- t->last_end_oblock = 0;
- t->thresholds[PATTERN_RANDOM] = random_threshold;
- t->thresholds[PATTERN_SEQUENTIAL] = sequential_threshold;
-}
-
-static enum io_pattern iot_pattern(struct io_tracker *t)
-{
- return t->pattern;
-}
-
-static void iot_update_stats(struct io_tracker *t, struct bio *bio)
-{
- if (bio->bi_iter.bi_sector == from_oblock(t->last_end_oblock) + 1)
- t->nr_seq_samples++;
- else {
- /*
- * Just one non-sequential IO is enough to reset the
- * counters.
- */
- if (t->nr_seq_samples) {
- t->nr_seq_samples = 0;
- t->nr_rand_samples = 0;
- }
-
- t->nr_rand_samples++;
- }
-
- t->last_end_oblock = to_oblock(bio_end_sector(bio) - 1);
-}
-
-static void iot_check_for_pattern_switch(struct io_tracker *t)
-{
- switch (t->pattern) {
- case PATTERN_SEQUENTIAL:
- if (t->nr_rand_samples >= t->thresholds[PATTERN_RANDOM]) {
- t->pattern = PATTERN_RANDOM;
- t->nr_seq_samples = t->nr_rand_samples = 0;
- }
- break;
-
- case PATTERN_RANDOM:
- if (t->nr_seq_samples >= t->thresholds[PATTERN_SEQUENTIAL]) {
- t->pattern = PATTERN_SEQUENTIAL;
- t->nr_seq_samples = t->nr_rand_samples = 0;
- }
- break;
- }
-}
-
-static void iot_examine_bio(struct io_tracker *t, struct bio *bio)
-{
- iot_update_stats(t, bio);
- iot_check_for_pattern_switch(t);
-}
-
-/*----------------------------------------------------------------*/
-
-
-/*
- * This queue is divided up into different levels. Allowing us to push
- * entries to the back of any of the levels. Think of it as a partially
- * sorted queue.
- */
-#define NR_QUEUE_LEVELS 16u
-#define NR_SENTINELS NR_QUEUE_LEVELS * 3
-
-#define WRITEBACK_PERIOD HZ
-
-struct queue {
- unsigned nr_elts;
- bool current_writeback_sentinels;
- unsigned long next_writeback;
- struct list_head qs[NR_QUEUE_LEVELS];
- struct list_head sentinels[NR_SENTINELS];
-};
-
-static void queue_init(struct queue *q)
-{
- unsigned i;
-
- q->nr_elts = 0;
- q->current_writeback_sentinels = false;
- q->next_writeback = 0;
- for (i = 0; i < NR_QUEUE_LEVELS; i++) {
- INIT_LIST_HEAD(q->qs + i);
- INIT_LIST_HEAD(q->sentinels + i);
- INIT_LIST_HEAD(q->sentinels + NR_QUEUE_LEVELS + i);
- INIT_LIST_HEAD(q->sentinels + (2 * NR_QUEUE_LEVELS) + i);
- }
-}
-
-static unsigned queue_size(struct queue *q)
-{
- return q->nr_elts;
-}
-
-static bool queue_empty(struct queue *q)
-{
- return q->nr_elts == 0;
-}
-
-/*
- * Insert an entry to the back of the given level.
- */
-static void queue_push(struct queue *q, unsigned level, struct list_head *elt)
-{
- q->nr_elts++;
- list_add_tail(elt, q->qs + level);
-}
-
-static void queue_remove(struct queue *q, struct list_head *elt)
-{
- q->nr_elts--;
- list_del(elt);
-}
-
-static bool is_sentinel(struct queue *q, struct list_head *h)
-{
- return (h >= q->sentinels) && (h < (q->sentinels + NR_SENTINELS));
-}
-
-/*
- * Gives us the oldest entry of the lowest popoulated level. If the first
- * level is emptied then we shift down one level.
- */
-static struct list_head *queue_peek(struct queue *q)
-{
- unsigned level;
- struct list_head *h;
-
- for (level = 0; level < NR_QUEUE_LEVELS; level++)
- list_for_each(h, q->qs + level)
- if (!is_sentinel(q, h))
- return h;
-
- return NULL;
-}
-
-static struct list_head *queue_pop(struct queue *q)
-{
- struct list_head *r = queue_peek(q);
-
- if (r) {
- q->nr_elts--;
- list_del(r);
- }
-
- return r;
-}
-
-/*
- * Pops an entry from a level that is not past a sentinel.
- */
-static struct list_head *queue_pop_old(struct queue *q)
-{
- unsigned level;
- struct list_head *h;
-
- for (level = 0; level < NR_QUEUE_LEVELS; level++)
- list_for_each(h, q->qs + level) {
- if (is_sentinel(q, h))
- break;
-
- q->nr_elts--;
- list_del(h);
- return h;
- }
-
- return NULL;
-}
-
-static struct list_head *list_pop(struct list_head *lh)
-{
- struct list_head *r = lh->next;
-
- BUG_ON(!r);
- list_del_init(r);
-
- return r;
-}
-
-static struct list_head *writeback_sentinel(struct queue *q, unsigned level)
-{
- if (q->current_writeback_sentinels)
- return q->sentinels + NR_QUEUE_LEVELS + level;
- else
- return q->sentinels + 2 * NR_QUEUE_LEVELS + level;
-}
-
-static void queue_update_writeback_sentinels(struct queue *q)
-{
- unsigned i;
- struct list_head *h;
-
- if (time_after(jiffies, q->next_writeback)) {
- for (i = 0; i < NR_QUEUE_LEVELS; i++) {
- h = writeback_sentinel(q, i);
- list_del(h);
- list_add_tail(h, q->qs + i);
- }
-
- q->next_writeback = jiffies + WRITEBACK_PERIOD;
- q->current_writeback_sentinels = !q->current_writeback_sentinels;
- }
-}
-
-/*
- * Sometimes we want to iterate through entries that have been pushed since
- * a certain event. We use sentinel entries on the queues to delimit these
- * 'tick' events.
- */
-static void queue_tick(struct queue *q)
-{
- unsigned i;
-
- for (i = 0; i < NR_QUEUE_LEVELS; i++) {
- list_del(q->sentinels + i);
- list_add_tail(q->sentinels + i, q->qs + i);
- }
-}
-
-typedef void (*iter_fn)(struct list_head *, void *);
-static void queue_iterate_tick(struct queue *q, iter_fn fn, void *context)
-{
- unsigned i;
- struct list_head *h;
-
- for (i = 0; i < NR_QUEUE_LEVELS; i++) {
- list_for_each_prev(h, q->qs + i) {
- if (is_sentinel(q, h))
- break;
-
- fn(h, context);
- }
- }
-}
-
-/*----------------------------------------------------------------*/
-
-/*
- * Describes a cache entry. Used in both the cache and the pre_cache.
- */
-struct entry {
- struct hlist_node hlist;
- struct list_head list;
- dm_oblock_t oblock;
-
- /*
- * FIXME: pack these better
- */
- bool dirty:1;
- unsigned hit_count;
-};
-
-/*
- * Rather than storing the cblock in an entry, we allocate all entries in
- * an array, and infer the cblock from the entry position.
- *
- * Free entries are linked together into a list.
- */
-struct entry_pool {
- struct entry *entries, *entries_end;
- struct list_head free;
- unsigned nr_allocated;
-};
-
-static int epool_init(struct entry_pool *ep, unsigned nr_entries)
-{
- unsigned i;
-
- ep->entries = vzalloc(sizeof(struct entry) * nr_entries);
- if (!ep->entries)
- return -ENOMEM;
-
- ep->entries_end = ep->entries + nr_entries;
-
- INIT_LIST_HEAD(&ep->free);
- for (i = 0; i < nr_entries; i++)
- list_add(&ep->entries[i].list, &ep->free);
-
- ep->nr_allocated = 0;
-
- return 0;
-}
-
-static void epool_exit(struct entry_pool *ep)
-{
- vfree(ep->entries);
-}
-
-static struct entry *alloc_entry(struct entry_pool *ep)
-{
- struct entry *e;
-
- if (list_empty(&ep->free))
- return NULL;
-
- e = list_entry(list_pop(&ep->free), struct entry, list);
- INIT_LIST_HEAD(&e->list);
- INIT_HLIST_NODE(&e->hlist);
- ep->nr_allocated++;
-
- return e;
-}
-
-/*
- * This assumes the cblock hasn't already been allocated.
- */
-static struct entry *alloc_particular_entry(struct entry_pool *ep, dm_cblock_t cblock)
-{
- struct entry *e = ep->entries + from_cblock(cblock);
-
- list_del_init(&e->list);
- INIT_HLIST_NODE(&e->hlist);
- ep->nr_allocated++;
-
- return e;
-}
-
-static void free_entry(struct entry_pool *ep, struct entry *e)
-{
- BUG_ON(!ep->nr_allocated);
- ep->nr_allocated--;
- INIT_HLIST_NODE(&e->hlist);
- list_add(&e->list, &ep->free);
-}
-
-/*
- * Returns NULL if the entry is free.
- */
-static struct entry *epool_find(struct entry_pool *ep, dm_cblock_t cblock)
-{
- struct entry *e = ep->entries + from_cblock(cblock);
- return !hlist_unhashed(&e->hlist) ? e : NULL;
-}
-
-static bool epool_empty(struct entry_pool *ep)
-{
- return list_empty(&ep->free);
-}
-
-static bool in_pool(struct entry_pool *ep, struct entry *e)
-{
- return e >= ep->entries && e < ep->entries_end;
-}
-
-static dm_cblock_t infer_cblock(struct entry_pool *ep, struct entry *e)
-{
- return to_cblock(e - ep->entries);
-}
-
-/*----------------------------------------------------------------*/
-
-struct mq_policy {
- struct dm_cache_policy policy;
-
- /* protects everything */
- struct mutex lock;
- dm_cblock_t cache_size;
- struct io_tracker tracker;
-
- /*
- * Entries come from two pools, one of pre-cache entries, and one
- * for the cache proper.
- */
- struct entry_pool pre_cache_pool;
- struct entry_pool cache_pool;
-
- /*
- * We maintain three queues of entries. The cache proper,
- * consisting of a clean and dirty queue, contains the currently
- * active mappings. Whereas the pre_cache tracks blocks that
- * are being hit frequently and potential candidates for promotion
- * to the cache.
- */
- struct queue pre_cache;
- struct queue cache_clean;
- struct queue cache_dirty;
-
- /*
- * Keeps track of time, incremented by the core. We use this to
- * avoid attributing multiple hits within the same tick.
- *
- * Access to tick_protected should be done with the spin lock held.
- * It's copied to tick at the start of the map function (within the
- * mutex).
- */
- spinlock_t tick_lock;
- unsigned tick_protected;
- unsigned tick;
-
- /*
- * A count of the number of times the map function has been called
- * and found an entry in the pre_cache or cache. Currently used to
- * calculate the generation.
- */
- unsigned hit_count;
-
- /*
- * A generation is a longish period that is used to trigger some
- * book keeping effects. eg, decrementing hit counts on entries.
- * This is needed to allow the cache to evolve as io patterns
- * change.
- */
- unsigned generation;
- unsigned generation_period; /* in lookups (will probably change) */
-
- unsigned discard_promote_adjustment;
- unsigned read_promote_adjustment;
- unsigned write_promote_adjustment;
-
- /*
- * The hash table allows us to quickly find an entry by origin
- * block. Both pre_cache and cache entries are in here.
- */
- unsigned nr_buckets;
- dm_block_t hash_bits;
- struct hlist_head *table;
-};
-
-#define DEFAULT_DISCARD_PROMOTE_ADJUSTMENT 1
-#define DEFAULT_READ_PROMOTE_ADJUSTMENT 4
-#define DEFAULT_WRITE_PROMOTE_ADJUSTMENT 8
-#define DISCOURAGE_DEMOTING_DIRTY_THRESHOLD 128
-
-/*----------------------------------------------------------------*/
-
-/*
- * Simple hash table implementation. Should replace with the standard hash
- * table that's making its way upstream.
- */
-static void hash_insert(struct mq_policy *mq, struct entry *e)
-{
- unsigned h = hash_64(from_oblock(e->oblock), mq->hash_bits);
-
- hlist_add_head(&e->hlist, mq->table + h);
-}
-
-static struct entry *hash_lookup(struct mq_policy *mq, dm_oblock_t oblock)
-{
- unsigned h = hash_64(from_oblock(oblock), mq->hash_bits);
- struct hlist_head *bucket = mq->table + h;
- struct entry *e;
-
- hlist_for_each_entry(e, bucket, hlist)
- if (e->oblock == oblock) {
- hlist_del(&e->hlist);
- hlist_add_head(&e->hlist, bucket);
- return e;
- }
-
- return NULL;
-}
-
-static void hash_remove(struct entry *e)
-{
- hlist_del(&e->hlist);
-}
-
-/*----------------------------------------------------------------*/
-
-static bool any_free_cblocks(struct mq_policy *mq)
-{
- return !epool_empty(&mq->cache_pool);
-}
-
-static bool any_clean_cblocks(struct mq_policy *mq)
-{
- return !queue_empty(&mq->cache_clean);
-}
-
-/*----------------------------------------------------------------*/
-
-/*
- * Now we get to the meat of the policy. This section deals with deciding
- * when to to add entries to the pre_cache and cache, and move between
- * them.
- */
-
-/*
- * The queue level is based on the log2 of the hit count.
- */
-static unsigned queue_level(struct entry *e)
-{
- return min((unsigned) ilog2(e->hit_count), NR_QUEUE_LEVELS - 1u);
-}
-
-static bool in_cache(struct mq_policy *mq, struct entry *e)
-{
- return in_pool(&mq->cache_pool, e);
-}
-
-/*
- * Inserts the entry into the pre_cache or the cache. Ensures the cache
- * block is marked as allocated if necc. Inserts into the hash table.
- * Sets the tick which records when the entry was last moved about.
- */
-static void push(struct mq_policy *mq, struct entry *e)
-{
- hash_insert(mq, e);
-
- if (in_cache(mq, e))
- queue_push(e->dirty ? &mq->cache_dirty : &mq->cache_clean,
- queue_level(e), &e->list);
- else
- queue_push(&mq->pre_cache, queue_level(e), &e->list);
-}
-
-/*
- * Removes an entry from pre_cache or cache. Removes from the hash table.
- */
-static void del(struct mq_policy *mq, struct entry *e)
-{
- if (in_cache(mq, e))
- queue_remove(e->dirty ? &mq->cache_dirty : &mq->cache_clean, &e->list);
- else
- queue_remove(&mq->pre_cache, &e->list);
-
- hash_remove(e);
-}
-
-/*
- * Like del, except it removes the first entry in the queue (ie. the least
- * recently used).
- */
-static struct entry *pop(struct mq_policy *mq, struct queue *q)
-{
- struct entry *e;
- struct list_head *h = queue_pop(q);
-
- if (!h)
- return NULL;
-
- e = container_of(h, struct entry, list);
- hash_remove(e);
-
- return e;
-}
-
-static struct entry *pop_old(struct mq_policy *mq, struct queue *q)
-{
- struct entry *e;
- struct list_head *h = queue_pop_old(q);
-
- if (!h)
- return NULL;
-
- e = container_of(h, struct entry, list);
- hash_remove(e);
-
- return e;
-}
-
-static struct entry *peek(struct queue *q)
-{
- struct list_head *h = queue_peek(q);
- return h ? container_of(h, struct entry, list) : NULL;
-}
-
-/*
- * The promotion threshold is adjusted every generation. As are the counts
- * of the entries.
- *
- * At the moment the threshold is taken by averaging the hit counts of some
- * of the entries in the cache (the first 20 entries across all levels in
- * ascending order, giving preference to the clean entries at each level).
- *
- * We can be much cleverer than this though. For example, each promotion
- * could bump up the threshold helping to prevent churn. Much more to do
- * here.
- */
-
-#define MAX_TO_AVERAGE 20
-
-static void check_generation(struct mq_policy *mq)
-{
- unsigned total = 0, nr = 0, count = 0, level;
- struct list_head *head;
- struct entry *e;
-
- if ((mq->hit_count >= mq->generation_period) && (epool_empty(&mq->cache_pool))) {
- mq->hit_count = 0;
- mq->generation++;
-
- for (level = 0; level < NR_QUEUE_LEVELS && count < MAX_TO_AVERAGE; level++) {
- head = mq->cache_clean.qs + level;
- list_for_each_entry(e, head, list) {
- nr++;
- total += e->hit_count;
-
- if (++count >= MAX_TO_AVERAGE)
- break;
- }
-
- head = mq->cache_dirty.qs + level;
- list_for_each_entry(e, head, list) {
- nr++;
- total += e->hit_count;
-
- if (++count >= MAX_TO_AVERAGE)
- break;
- }
- }
- }
-}
-
-/*
- * Whenever we use an entry we bump up it's hit counter, and push it to the
- * back to it's current level.
- */
-static void requeue(struct mq_policy *mq, struct entry *e)
-{
- check_generation(mq);
- del(mq, e);
- push(mq, e);
-}
-
-/*
- * Demote the least recently used entry from the cache to the pre_cache.
- * Returns the new cache entry to use, and the old origin block it was
- * mapped to.
- *
- * We drop the hit count on the demoted entry back to 1 to stop it bouncing
- * straight back into the cache if it's subsequently hit. There are
- * various options here, and more experimentation would be good:
- *
- * - just forget about the demoted entry completely (ie. don't insert it
- into the pre_cache).
- * - divide the hit count rather that setting to some hard coded value.
- * - set the hit count to a hard coded value other than 1, eg, is it better
- * if it goes in at level 2?
- */
-static int demote_cblock(struct mq_policy *mq,
- struct policy_locker *locker, dm_oblock_t *oblock)
-{
- struct entry *demoted = peek(&mq->cache_clean);
-
- if (!demoted)
- /*
- * We could get a block from mq->cache_dirty, but that
- * would add extra latency to the triggering bio as it
- * waits for the writeback. Better to not promote this
- * time and hope there's a clean block next time this block
- * is hit.
- */
- return -ENOSPC;
-
- if (locker->fn(locker, demoted->oblock))
- /*
- * We couldn't lock the demoted block.
- */
- return -EBUSY;
-
- del(mq, demoted);
- *oblock = demoted->oblock;
- free_entry(&mq->cache_pool, demoted);
-
- /*
- * We used to put the demoted block into the pre-cache, but I think
- * it's simpler to just let it work it's way up from zero again.
- * Stops blocks flickering in and out of the cache.
- */
-
- return 0;
-}
-
-/*
- * Entries in the pre_cache whose hit count passes the promotion
- * threshold move to the cache proper. Working out the correct
- * value for the promotion_threshold is crucial to this policy.
- */
-static unsigned promote_threshold(struct mq_policy *mq)
-{
- struct entry *e;
-
- if (any_free_cblocks(mq))
- return 0;
-
- e = peek(&mq->cache_clean);
- if (e)
- return e->hit_count;
-
- e = peek(&mq->cache_dirty);
- if (e)
- return e->hit_count + DISCOURAGE_DEMOTING_DIRTY_THRESHOLD;
-
- /* This should never happen */
- return 0;
-}
-
-/*
- * We modify the basic promotion_threshold depending on the specific io.
- *
- * If the origin block has been discarded then there's no cost to copy it
- * to the cache.
- *
- * We bias towards reads, since they can be demoted at no cost if they
- * haven't been dirtied.
- */
-static unsigned adjusted_promote_threshold(struct mq_policy *mq,
- bool discarded_oblock, int data_dir)
-{
- if (data_dir == READ)
- return promote_threshold(mq) + mq->read_promote_adjustment;
-
- if (discarded_oblock && (any_free_cblocks(mq) || any_clean_cblocks(mq))) {
- /*
- * We don't need to do any copying at all, so give this a
- * very low threshold.
- */
- return mq->discard_promote_adjustment;
- }
-
- return promote_threshold(mq) + mq->write_promote_adjustment;
-}
-
-static bool should_promote(struct mq_policy *mq, struct entry *e,
- bool discarded_oblock, int data_dir)
-{
- return e->hit_count >=
- adjusted_promote_threshold(mq, discarded_oblock, data_dir);
-}
-
-static int cache_entry_found(struct mq_policy *mq,
- struct entry *e,
- struct policy_result *result)
-{
- requeue(mq, e);
-
- if (in_cache(mq, e)) {
- result->op = POLICY_HIT;
- result->cblock = infer_cblock(&mq->cache_pool, e);
- }
-
- return 0;
-}
-
-/*
- * Moves an entry from the pre_cache to the cache. The main work is
- * finding which cache block to use.
- */
-static int pre_cache_to_cache(struct mq_policy *mq, struct entry *e,
- struct policy_locker *locker,
- struct policy_result *result)
-{
- int r;
- struct entry *new_e;
-
- /* Ensure there's a free cblock in the cache */
- if (epool_empty(&mq->cache_pool)) {
- result->op = POLICY_REPLACE;
- r = demote_cblock(mq, locker, &result->old_oblock);
- if (r) {
- result->op = POLICY_MISS;
- return 0;
- }
-
- } else
- result->op = POLICY_NEW;
-
- new_e = alloc_entry(&mq->cache_pool);
- BUG_ON(!new_e);
-
- new_e->oblock = e->oblock;
- new_e->dirty = false;
- new_e->hit_count = e->hit_count;
-
- del(mq, e);
- free_entry(&mq->pre_cache_pool, e);
- push(mq, new_e);
-
- result->cblock = infer_cblock(&mq->cache_pool, new_e);
-
- return 0;
-}
-
-static int pre_cache_entry_found(struct mq_policy *mq, struct entry *e,
- bool can_migrate, bool discarded_oblock,
- int data_dir, struct policy_locker *locker,
- struct policy_result *result)
-{
- int r = 0;
-
- if (!should_promote(mq, e, discarded_oblock, data_dir)) {
- requeue(mq, e);
- result->op = POLICY_MISS;
-
- } else if (!can_migrate)
- r = -EWOULDBLOCK;
-
- else {
- requeue(mq, e);
- r = pre_cache_to_cache(mq, e, locker, result);
- }
-
- return r;
-}
-
-static void insert_in_pre_cache(struct mq_policy *mq,
- dm_oblock_t oblock)
-{
- struct entry *e = alloc_entry(&mq->pre_cache_pool);
-
- if (!e)
- /*
- * There's no spare entry structure, so we grab the least
- * used one from the pre_cache.
- */
- e = pop(mq, &mq->pre_cache);
-
- if (unlikely(!e)) {
- DMWARN("couldn't pop from pre cache");
- return;
- }
-
- e->dirty = false;
- e->oblock = oblock;
- e->hit_count = 1;
- push(mq, e);
-}
-
-static void insert_in_cache(struct mq_policy *mq, dm_oblock_t oblock,
- struct policy_locker *locker,
- struct policy_result *result)
-{
- int r;
- struct entry *e;
-
- if (epool_empty(&mq->cache_pool)) {
- result->op = POLICY_REPLACE;
- r = demote_cblock(mq, locker, &result->old_oblock);
- if (unlikely(r)) {
- result->op = POLICY_MISS;
- insert_in_pre_cache(mq, oblock);
- return;
- }
-
- /*
- * This will always succeed, since we've just demoted.
- */
- e = alloc_entry(&mq->cache_pool);
- BUG_ON(!e);
-
- } else {
- e = alloc_entry(&mq->cache_pool);
- result->op = POLICY_NEW;
- }
-
- e->oblock = oblock;
- e->dirty = false;
- e->hit_count = 1;
- push(mq, e);
-
- result->cblock = infer_cblock(&mq->cache_pool, e);
-}
-
-static int no_entry_found(struct mq_policy *mq, dm_oblock_t oblock,
- bool can_migrate, bool discarded_oblock,
- int data_dir, struct policy_locker *locker,
- struct policy_result *result)
-{
- if (adjusted_promote_threshold(mq, discarded_oblock, data_dir) <= 1) {
- if (can_migrate)
- insert_in_cache(mq, oblock, locker, result);
- else
- return -EWOULDBLOCK;
- } else {
- insert_in_pre_cache(mq, oblock);
- result->op = POLICY_MISS;
- }
-
- return 0;
-}
-
-/*
- * Looks the oblock up in the hash table, then decides whether to put in
- * pre_cache, or cache etc.
- */
-static int map(struct mq_policy *mq, dm_oblock_t oblock,
- bool can_migrate, bool discarded_oblock,
- int data_dir, struct policy_locker *locker,
- struct policy_result *result)
-{
- int r = 0;
- struct entry *e = hash_lookup(mq, oblock);
-
- if (e && in_cache(mq, e))
- r = cache_entry_found(mq, e, result);
-
- else if (mq->tracker.thresholds[PATTERN_SEQUENTIAL] &&
- iot_pattern(&mq->tracker) == PATTERN_SEQUENTIAL)
- result->op = POLICY_MISS;
-
- else if (e)
- r = pre_cache_entry_found(mq, e, can_migrate, discarded_oblock,
- data_dir, locker, result);
-
- else
- r = no_entry_found(mq, oblock, can_migrate, discarded_oblock,
- data_dir, locker, result);
-
- if (r == -EWOULDBLOCK)
- result->op = POLICY_MISS;
-
- return r;
-}
-
-/*----------------------------------------------------------------*/
-
-/*
- * Public interface, via the policy struct. See dm-cache-policy.h for a
- * description of these.
- */
-
-static struct mq_policy *to_mq_policy(struct dm_cache_policy *p)
-{
- return container_of(p, struct mq_policy, policy);
-}
-
-static void mq_destroy(struct dm_cache_policy *p)
-{
- struct mq_policy *mq = to_mq_policy(p);
-
- vfree(mq->table);
- epool_exit(&mq->cache_pool);
- epool_exit(&mq->pre_cache_pool);
- kfree(mq);
-}
-
-static void update_pre_cache_hits(struct list_head *h, void *context)
-{
- struct entry *e = container_of(h, struct entry, list);
- e->hit_count++;
-}
-
-static void update_cache_hits(struct list_head *h, void *context)
-{
- struct mq_policy *mq = context;
- struct entry *e = container_of(h, struct entry, list);
- e->hit_count++;
- mq->hit_count++;
-}
-
-static void copy_tick(struct mq_policy *mq)
-{
- unsigned long flags, tick;
-
- spin_lock_irqsave(&mq->tick_lock, flags);
- tick = mq->tick_protected;
- if (tick != mq->tick) {
- queue_iterate_tick(&mq->pre_cache, update_pre_cache_hits, mq);
- queue_iterate_tick(&mq->cache_dirty, update_cache_hits, mq);
- queue_iterate_tick(&mq->cache_clean, update_cache_hits, mq);
- mq->tick = tick;
- }
-
- queue_tick(&mq->pre_cache);
- queue_tick(&mq->cache_dirty);
- queue_tick(&mq->cache_clean);
- queue_update_writeback_sentinels(&mq->cache_dirty);
- spin_unlock_irqrestore(&mq->tick_lock, flags);
-}
-
-static int mq_map(struct dm_cache_policy *p, dm_oblock_t oblock,
- bool can_block, bool can_migrate, bool discarded_oblock,
- struct bio *bio, struct policy_locker *locker,
- struct policy_result *result)
-{
- int r;
- struct mq_policy *mq = to_mq_policy(p);
-
- result->op = POLICY_MISS;
-
- if (can_block)
- mutex_lock(&mq->lock);
- else if (!mutex_trylock(&mq->lock))
- return -EWOULDBLOCK;
-
- copy_tick(mq);
-
- iot_examine_bio(&mq->tracker, bio);
- r = map(mq, oblock, can_migrate, discarded_oblock,
- bio_data_dir(bio), locker, result);
-
- mutex_unlock(&mq->lock);
-
- return r;
-}
-
-static int mq_lookup(struct dm_cache_policy *p, dm_oblock_t oblock, dm_cblock_t *cblock)
-{
- int r;
- struct mq_policy *mq = to_mq_policy(p);
- struct entry *e;
-
- if (!mutex_trylock(&mq->lock))
- return -EWOULDBLOCK;
-
- e = hash_lookup(mq, oblock);
- if (e && in_cache(mq, e)) {
- *cblock = infer_cblock(&mq->cache_pool, e);
- r = 0;
- } else
- r = -ENOENT;
-
- mutex_unlock(&mq->lock);
-
- return r;
-}
-
-static void __mq_set_clear_dirty(struct mq_policy *mq, dm_oblock_t oblock, bool set)
-{
- struct entry *e;
-
- e = hash_lookup(mq, oblock);
- BUG_ON(!e || !in_cache(mq, e));
-
- del(mq, e);
- e->dirty = set;
- push(mq, e);
-}
-
-static void mq_set_dirty(struct dm_cache_policy *p, dm_oblock_t oblock)
-{
- struct mq_policy *mq = to_mq_policy(p);
-
- mutex_lock(&mq->lock);
- __mq_set_clear_dirty(mq, oblock, true);
- mutex_unlock(&mq->lock);
-}
-
-static void mq_clear_dirty(struct dm_cache_policy *p, dm_oblock_t oblock)
-{
- struct mq_policy *mq = to_mq_policy(p);
-
- mutex_lock(&mq->lock);
- __mq_set_clear_dirty(mq, oblock, false);
- mutex_unlock(&mq->lock);
-}
-
-static int mq_load_mapping(struct dm_cache_policy *p,
- dm_oblock_t oblock, dm_cblock_t cblock,
- uint32_t hint, bool hint_valid)
-{
- struct mq_policy *mq = to_mq_policy(p);
- struct entry *e;
-
- e = alloc_particular_entry(&mq->cache_pool, cblock);
- e->oblock = oblock;
- e->dirty = false; /* this gets corrected in a minute */
- e->hit_count = hint_valid ? hint : 1;
- push(mq, e);
-
- return 0;
-}
-
-static int mq_save_hints(struct mq_policy *mq, struct queue *q,
- policy_walk_fn fn, void *context)
-{
- int r;
- unsigned level;
- struct list_head *h;
- struct entry *e;
-
- for (level = 0; level < NR_QUEUE_LEVELS; level++)
- list_for_each(h, q->qs + level) {
- if (is_sentinel(q, h))
- continue;
-
- e = container_of(h, struct entry, list);
- r = fn(context, infer_cblock(&mq->cache_pool, e),
- e->oblock, e->hit_count);
- if (r)
- return r;
- }
-
- return 0;
-}
-
-static int mq_walk_mappings(struct dm_cache_policy *p, policy_walk_fn fn,
- void *context)
-{
- struct mq_policy *mq = to_mq_policy(p);
- int r = 0;
-
- mutex_lock(&mq->lock);
-
- r = mq_save_hints(mq, &mq->cache_clean, fn, context);
- if (!r)
- r = mq_save_hints(mq, &mq->cache_dirty, fn, context);
-
- mutex_unlock(&mq->lock);
-
- return r;
-}
-
-static void __remove_mapping(struct mq_policy *mq, dm_oblock_t oblock)
-{
- struct entry *e;
-
- e = hash_lookup(mq, oblock);
- BUG_ON(!e || !in_cache(mq, e));
-
- del(mq, e);
- free_entry(&mq->cache_pool, e);
-}
-
-static void mq_remove_mapping(struct dm_cache_policy *p, dm_oblock_t oblock)
-{
- struct mq_policy *mq = to_mq_policy(p);
-
- mutex_lock(&mq->lock);
- __remove_mapping(mq, oblock);
- mutex_unlock(&mq->lock);
-}
-
-static int __remove_cblock(struct mq_policy *mq, dm_cblock_t cblock)
-{
- struct entry *e = epool_find(&mq->cache_pool, cblock);
-
- if (!e)
- return -ENODATA;
-
- del(mq, e);
- free_entry(&mq->cache_pool, e);
-
- return 0;
-}
-
-static int mq_remove_cblock(struct dm_cache_policy *p, dm_cblock_t cblock)
-{
- int r;
- struct mq_policy *mq = to_mq_policy(p);
-
- mutex_lock(&mq->lock);
- r = __remove_cblock(mq, cblock);
- mutex_unlock(&mq->lock);
-
- return r;
-}
-
-#define CLEAN_TARGET_PERCENTAGE 25
-
-static bool clean_target_met(struct mq_policy *mq)
-{
- /*
- * Cache entries may not be populated. So we're cannot rely on the
- * size of the clean queue.
- */
- unsigned nr_clean = from_cblock(mq->cache_size) - queue_size(&mq->cache_dirty);
- unsigned target = from_cblock(mq->cache_size) * CLEAN_TARGET_PERCENTAGE / 100;
-
- return nr_clean >= target;
-}
-
-static int __mq_writeback_work(struct mq_policy *mq, dm_oblock_t *oblock,
- dm_cblock_t *cblock)
-{
- struct entry *e = pop_old(mq, &mq->cache_dirty);
-
- if (!e && !clean_target_met(mq))
- e = pop(mq, &mq->cache_dirty);
-
- if (!e)
- return -ENODATA;
-
- *oblock = e->oblock;
- *cblock = infer_cblock(&mq->cache_pool, e);
- e->dirty = false;
- push(mq, e);
-
- return 0;
-}
-
-static int mq_writeback_work(struct dm_cache_policy *p, dm_oblock_t *oblock,
- dm_cblock_t *cblock, bool critical_only)
-{
- int r;
- struct mq_policy *mq = to_mq_policy(p);
-
- mutex_lock(&mq->lock);
- r = __mq_writeback_work(mq, oblock, cblock);
- mutex_unlock(&mq->lock);
-
- return r;
-}
-
-static void __force_mapping(struct mq_policy *mq,
- dm_oblock_t current_oblock, dm_oblock_t new_oblock)
-{
- struct entry *e = hash_lookup(mq, current_oblock);
-
- if (e && in_cache(mq, e)) {
- del(mq, e);
- e->oblock = new_oblock;
- e->dirty = true;
- push(mq, e);
- }
-}
-
-static void mq_force_mapping(struct dm_cache_policy *p,
- dm_oblock_t current_oblock, dm_oblock_t new_oblock)
-{
- struct mq_policy *mq = to_mq_policy(p);
-
- mutex_lock(&mq->lock);
- __force_mapping(mq, current_oblock, new_oblock);
- mutex_unlock(&mq->lock);
-}
-
-static dm_cblock_t mq_residency(struct dm_cache_policy *p)
-{
- dm_cblock_t r;
- struct mq_policy *mq = to_mq_policy(p);
-
- mutex_lock(&mq->lock);
- r = to_cblock(mq->cache_pool.nr_allocated);
- mutex_unlock(&mq->lock);
-
- return r;
-}
-
-static void mq_tick(struct dm_cache_policy *p, bool can_block)
-{
- struct mq_policy *mq = to_mq_policy(p);
- unsigned long flags;
-
- spin_lock_irqsave(&mq->tick_lock, flags);
- mq->tick_protected++;
- spin_unlock_irqrestore(&mq->tick_lock, flags);
-
- if (can_block) {
- mutex_lock(&mq->lock);
- copy_tick(mq);
- mutex_unlock(&mq->lock);
- }
-}
-
-static int mq_set_config_value(struct dm_cache_policy *p,
- const char *key, const char *value)
-{
- struct mq_policy *mq = to_mq_policy(p);
- unsigned long tmp;
-
- if (kstrtoul(value, 10, &tmp))
- return -EINVAL;
-
- if (!strcasecmp(key, "random_threshold")) {
- mq->tracker.thresholds[PATTERN_RANDOM] = tmp;
-
- } else if (!strcasecmp(key, "sequential_threshold")) {
- mq->tracker.thresholds[PATTERN_SEQUENTIAL] = tmp;
-
- } else if (!strcasecmp(key, "discard_promote_adjustment"))
- mq->discard_promote_adjustment = tmp;
-
- else if (!strcasecmp(key, "read_promote_adjustment"))
- mq->read_promote_adjustment = tmp;
-
- else if (!strcasecmp(key, "write_promote_adjustment"))
- mq->write_promote_adjustment = tmp;
-
- else
- return -EINVAL;
-
- return 0;
-}
-
-static int mq_emit_config_values(struct dm_cache_policy *p, char *result,
- unsigned maxlen, ssize_t *sz_ptr)
-{
- ssize_t sz = *sz_ptr;
- struct mq_policy *mq = to_mq_policy(p);
-
- DMEMIT("10 random_threshold %u "
- "sequential_threshold %u "
- "discard_promote_adjustment %u "
- "read_promote_adjustment %u "
- "write_promote_adjustment %u ",
- mq->tracker.thresholds[PATTERN_RANDOM],
- mq->tracker.thresholds[PATTERN_SEQUENTIAL],
- mq->discard_promote_adjustment,
- mq->read_promote_adjustment,
- mq->write_promote_adjustment);
-
- *sz_ptr = sz;
- return 0;
-}
-
-/* Init the policy plugin interface function pointers. */
-static void init_policy_functions(struct mq_policy *mq)
-{
- mq->policy.destroy = mq_destroy;
- mq->policy.map = mq_map;
- mq->policy.lookup = mq_lookup;
- mq->policy.set_dirty = mq_set_dirty;
- mq->policy.clear_dirty = mq_clear_dirty;
- mq->policy.load_mapping = mq_load_mapping;
- mq->policy.walk_mappings = mq_walk_mappings;
- mq->policy.remove_mapping = mq_remove_mapping;
- mq->policy.remove_cblock = mq_remove_cblock;
- mq->policy.writeback_work = mq_writeback_work;
- mq->policy.force_mapping = mq_force_mapping;
- mq->policy.residency = mq_residency;
- mq->policy.tick = mq_tick;
- mq->policy.emit_config_values = mq_emit_config_values;
- mq->policy.set_config_value = mq_set_config_value;
-}
-
-static struct dm_cache_policy *mq_create(dm_cblock_t cache_size,
- sector_t origin_size,
- sector_t cache_block_size)
-{
- struct mq_policy *mq = kzalloc(sizeof(*mq), GFP_KERNEL);
-
- if (!mq)
- return NULL;
-
- init_policy_functions(mq);
- iot_init(&mq->tracker, SEQUENTIAL_THRESHOLD_DEFAULT, RANDOM_THRESHOLD_DEFAULT);
- mq->cache_size = cache_size;
-
- if (epool_init(&mq->pre_cache_pool, from_cblock(cache_size))) {
- DMERR("couldn't initialize pool of pre-cache entries");
- goto bad_pre_cache_init;
- }
-
- if (epool_init(&mq->cache_pool, from_cblock(cache_size))) {
- DMERR("couldn't initialize pool of cache entries");
- goto bad_cache_init;
- }
-
- mq->tick_protected = 0;
- mq->tick = 0;
- mq->hit_count = 0;
- mq->generation = 0;
- mq->discard_promote_adjustment = DEFAULT_DISCARD_PROMOTE_ADJUSTMENT;
- mq->read_promote_adjustment = DEFAULT_READ_PROMOTE_ADJUSTMENT;
- mq->write_promote_adjustment = DEFAULT_WRITE_PROMOTE_ADJUSTMENT;
- mutex_init(&mq->lock);
- spin_lock_init(&mq->tick_lock);
-
- queue_init(&mq->pre_cache);
- queue_init(&mq->cache_clean);
- queue_init(&mq->cache_dirty);
-
- mq->generation_period = max((unsigned) from_cblock(cache_size), 1024U);
-
- mq->nr_buckets = next_power(from_cblock(cache_size) / 2, 16);
- mq->hash_bits = __ffs(mq->nr_buckets);
- mq->table = vzalloc(sizeof(*mq->table) * mq->nr_buckets);
- if (!mq->table)
- goto bad_alloc_table;
-
- return &mq->policy;
-
-bad_alloc_table:
- epool_exit(&mq->cache_pool);
-bad_cache_init:
- epool_exit(&mq->pre_cache_pool);
-bad_pre_cache_init:
- kfree(mq);
-
- return NULL;
-}
-
-/*----------------------------------------------------------------*/
-
-static struct dm_cache_policy_type mq_policy_type = {
- .name = "mq",
- .version = {1, 4, 0},
- .hint_size = 4,
- .owner = THIS_MODULE,
- .create = mq_create
-};
-
-static int __init mq_init(void)
-{
- int r;
-
- mq_entry_cache = kmem_cache_create("dm_mq_policy_cache_entry",
- sizeof(struct entry),
- __alignof__(struct entry),
- 0, NULL);
- if (!mq_entry_cache)
- return -ENOMEM;
-
- r = dm_cache_policy_register(&mq_policy_type);
- if (r) {
- DMERR("register failed %d", r);
- kmem_cache_destroy(mq_entry_cache);
- return -ENOMEM;
- }
-
- return 0;
-}
-
-static void __exit mq_exit(void)
-{
- dm_cache_policy_unregister(&mq_policy_type);
-
- kmem_cache_destroy(mq_entry_cache);
-}
-
-module_init(mq_init);
-module_exit(mq_exit);
-
-MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("mq cache policy");
diff --git a/drivers/md/dm-cache-policy-smq.c b/drivers/md/dm-cache-policy-smq.c
index 28d4586748d0..cf48a617a3a4 100644
--- a/drivers/md/dm-cache-policy-smq.c
+++ b/drivers/md/dm-cache-policy-smq.c
@@ -1567,8 +1567,48 @@ static void smq_tick(struct dm_cache_policy *p, bool can_block)
spin_unlock_irqrestore(&mq->lock, flags);
}
+/*
+ * smq has no config values, but the old mq policy did. To avoid breaking
+ * software we continue to accept these configurables for the mq policy,
+ * but they have no effect.
+ */
+static int mq_set_config_value(struct dm_cache_policy *p,
+ const char *key, const char *value)
+{
+ unsigned long tmp;
+
+ if (kstrtoul(value, 10, &tmp))
+ return -EINVAL;
+
+ if (!strcasecmp(key, "random_threshold") ||
+ !strcasecmp(key, "sequential_threshold") ||
+ !strcasecmp(key, "discard_promote_adjustment") ||
+ !strcasecmp(key, "read_promote_adjustment") ||
+ !strcasecmp(key, "write_promote_adjustment")) {
+ DMWARN("tunable '%s' no longer has any effect, mq policy is now an alias for smq", key);
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int mq_emit_config_values(struct dm_cache_policy *p, char *result,
+ unsigned maxlen, ssize_t *sz_ptr)
+{
+ ssize_t sz = *sz_ptr;
+
+ DMEMIT("10 random_threshold 0 "
+ "sequential_threshold 0 "
+ "discard_promote_adjustment 0 "
+ "read_promote_adjustment 0 "
+ "write_promote_adjustment 0 ");
+
+ *sz_ptr = sz;
+ return 0;
+}
+
/* Init the policy plugin interface function pointers. */
-static void init_policy_functions(struct smq_policy *mq)
+static void init_policy_functions(struct smq_policy *mq, bool mimic_mq)
{
mq->policy.destroy = smq_destroy;
mq->policy.map = smq_map;
@@ -1583,6 +1623,11 @@ static void init_policy_functions(struct smq_policy *mq)
mq->policy.force_mapping = smq_force_mapping;
mq->policy.residency = smq_residency;
mq->policy.tick = smq_tick;
+
+ if (mimic_mq) {
+ mq->policy.set_config_value = mq_set_config_value;
+ mq->policy.emit_config_values = mq_emit_config_values;
+ }
}
static bool too_many_hotspot_blocks(sector_t origin_size,
@@ -1606,9 +1651,10 @@ static void calc_hotspot_params(sector_t origin_size,
*hotspot_block_size /= 2u;
}
-static struct dm_cache_policy *smq_create(dm_cblock_t cache_size,
- sector_t origin_size,
- sector_t cache_block_size)
+static struct dm_cache_policy *__smq_create(dm_cblock_t cache_size,
+ sector_t origin_size,
+ sector_t cache_block_size,
+ bool mimic_mq)
{
unsigned i;
unsigned nr_sentinels_per_queue = 2u * NR_CACHE_LEVELS;
@@ -1618,7 +1664,7 @@ static struct dm_cache_policy *smq_create(dm_cblock_t cache_size,
if (!mq)
return NULL;
- init_policy_functions(mq);
+ init_policy_functions(mq, mimic_mq);
mq->cache_size = cache_size;
mq->cache_block_size = cache_block_size;
@@ -1706,19 +1752,41 @@ bad_pool_init:
return NULL;
}
+static struct dm_cache_policy *smq_create(dm_cblock_t cache_size,
+ sector_t origin_size,
+ sector_t cache_block_size)
+{
+ return __smq_create(cache_size, origin_size, cache_block_size, false);
+}
+
+static struct dm_cache_policy *mq_create(dm_cblock_t cache_size,
+ sector_t origin_size,
+ sector_t cache_block_size)
+{
+ return __smq_create(cache_size, origin_size, cache_block_size, true);
+}
+
/*----------------------------------------------------------------*/
static struct dm_cache_policy_type smq_policy_type = {
.name = "smq",
- .version = {1, 0, 0},
+ .version = {1, 5, 0},
.hint_size = 4,
.owner = THIS_MODULE,
.create = smq_create
};
+static struct dm_cache_policy_type mq_policy_type = {
+ .name = "mq",
+ .version = {1, 5, 0},
+ .hint_size = 4,
+ .owner = THIS_MODULE,
+ .create = mq_create,
+};
+
static struct dm_cache_policy_type default_policy_type = {
.name = "default",
- .version = {1, 4, 0},
+ .version = {1, 5, 0},
.hint_size = 4,
.owner = THIS_MODULE,
.create = smq_create,
@@ -1735,9 +1803,17 @@ static int __init smq_init(void)
return -ENOMEM;
}
+ r = dm_cache_policy_register(&mq_policy_type);
+ if (r) {
+ DMERR("register failed (as mq) %d", r);
+ dm_cache_policy_unregister(&smq_policy_type);
+ return -ENOMEM;
+ }
+
r = dm_cache_policy_register(&default_policy_type);
if (r) {
DMERR("register failed (as default) %d", r);
+ dm_cache_policy_unregister(&mq_policy_type);
dm_cache_policy_unregister(&smq_policy_type);
return -ENOMEM;
}
@@ -1748,6 +1824,7 @@ static int __init smq_init(void)
static void __exit smq_exit(void)
{
dm_cache_policy_unregister(&smq_policy_type);
+ dm_cache_policy_unregister(&mq_policy_type);
dm_cache_policy_unregister(&default_policy_type);
}
@@ -1759,3 +1836,4 @@ MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("smq cache policy");
MODULE_ALIAS("dm-cache-default");
+MODULE_ALIAS("dm-cache-mq");
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index 5780accffa30..ee0510f9a85e 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -984,9 +984,14 @@ static void notify_mode_switch(struct cache *cache, enum cache_metadata_mode mod
static void set_cache_mode(struct cache *cache, enum cache_metadata_mode new_mode)
{
- bool needs_check = dm_cache_metadata_needs_check(cache->cmd);
+ bool needs_check;
enum cache_metadata_mode old_mode = get_cache_mode(cache);
+ if (dm_cache_metadata_needs_check(cache->cmd, &needs_check)) {
+ DMERR("unable to read needs_check flag, setting failure mode");
+ new_mode = CM_FAIL;
+ }
+
if (new_mode == CM_WRITE && needs_check) {
DMERR("%s: unable to switch cache to write mode until repaired.",
cache_device_name(cache));
@@ -2771,7 +2776,7 @@ static int cache_create(struct cache_args *ca, struct cache **result)
ti->split_discard_bios = false;
cache->features = ca->features;
- ti->per_bio_data_size = get_per_bio_data_size(cache);
+ ti->per_io_data_size = get_per_bio_data_size(cache);
cache->callbacks.congested_fn = cache_is_congested;
dm_table_add_target_callbacks(ti->table, &cache->callbacks);
@@ -3510,6 +3515,7 @@ static void cache_status(struct dm_target *ti, status_type_t type,
char buf[BDEVNAME_SIZE];
struct cache *cache = ti->private;
dm_cblock_t residency;
+ bool needs_check;
switch (type) {
case STATUSTYPE_INFO:
@@ -3583,7 +3589,9 @@ static void cache_status(struct dm_target *ti, status_type_t type,
else
DMEMIT("rw ");
- if (dm_cache_metadata_needs_check(cache->cmd))
+ r = dm_cache_metadata_needs_check(cache->cmd, &needs_check);
+
+ if (r || needs_check)
DMEMIT("needs_check ");
else
DMEMIT("- ");
@@ -3806,7 +3814,7 @@ static void cache_io_hints(struct dm_target *ti, struct queue_limits *limits)
static struct target_type cache_target = {
.name = "cache",
- .version = {1, 8, 0},
+ .version = {1, 9, 0},
.module = THIS_MODULE,
.ctr = cache_ctr,
.dtr = cache_dtr,
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 3147c8d09ea8..4f3cb3554944 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -28,6 +28,7 @@
#include <crypto/hash.h>
#include <crypto/md5.h>
#include <crypto/algapi.h>
+#include <crypto/skcipher.h>
#include <linux/device-mapper.h>
@@ -44,7 +45,7 @@ struct convert_context {
struct bvec_iter iter_out;
sector_t cc_sector;
atomic_t cc_pending;
- struct ablkcipher_request *req;
+ struct skcipher_request *req;
};
/*
@@ -86,7 +87,7 @@ struct crypt_iv_operations {
};
struct iv_essiv_private {
- struct crypto_hash *hash_tfm;
+ struct crypto_ahash *hash_tfm;
u8 *salt;
};
@@ -153,13 +154,13 @@ struct crypt_config {
/* ESSIV: struct crypto_cipher *essiv_tfm */
void *iv_private;
- struct crypto_ablkcipher **tfms;
+ struct crypto_skcipher **tfms;
unsigned tfms_count;
/*
* Layout of each crypto request:
*
- * struct ablkcipher_request
+ * struct skcipher_request
* context
* padding
* struct dm_crypt_request
@@ -189,7 +190,7 @@ static u8 *iv_of_dmreq(struct crypt_config *cc, struct dm_crypt_request *dmreq);
/*
* Use this to access cipher attributes that are the same for each CPU.
*/
-static struct crypto_ablkcipher *any_tfm(struct crypt_config *cc)
+static struct crypto_skcipher *any_tfm(struct crypt_config *cc)
{
return cc->tfms[0];
}
@@ -263,23 +264,25 @@ static int crypt_iv_plain64_gen(struct crypt_config *cc, u8 *iv,
static int crypt_iv_essiv_init(struct crypt_config *cc)
{
struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv;
- struct hash_desc desc;
+ AHASH_REQUEST_ON_STACK(req, essiv->hash_tfm);
struct scatterlist sg;
struct crypto_cipher *essiv_tfm;
int err;
sg_init_one(&sg, cc->key, cc->key_size);
- desc.tfm = essiv->hash_tfm;
- desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
+ ahash_request_set_tfm(req, essiv->hash_tfm);
+ ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL);
+ ahash_request_set_crypt(req, &sg, essiv->salt, cc->key_size);
- err = crypto_hash_digest(&desc, &sg, cc->key_size, essiv->salt);
+ err = crypto_ahash_digest(req);
+ ahash_request_zero(req);
if (err)
return err;
essiv_tfm = cc->iv_private;
err = crypto_cipher_setkey(essiv_tfm, essiv->salt,
- crypto_hash_digestsize(essiv->hash_tfm));
+ crypto_ahash_digestsize(essiv->hash_tfm));
if (err)
return err;
@@ -290,7 +293,7 @@ static int crypt_iv_essiv_init(struct crypt_config *cc)
static int crypt_iv_essiv_wipe(struct crypt_config *cc)
{
struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv;
- unsigned salt_size = crypto_hash_digestsize(essiv->hash_tfm);
+ unsigned salt_size = crypto_ahash_digestsize(essiv->hash_tfm);
struct crypto_cipher *essiv_tfm;
int r, err = 0;
@@ -320,7 +323,7 @@ static struct crypto_cipher *setup_essiv_cpu(struct crypt_config *cc,
}
if (crypto_cipher_blocksize(essiv_tfm) !=
- crypto_ablkcipher_ivsize(any_tfm(cc))) {
+ crypto_skcipher_ivsize(any_tfm(cc))) {
ti->error = "Block size of ESSIV cipher does "
"not match IV size of block cipher";
crypto_free_cipher(essiv_tfm);
@@ -342,7 +345,7 @@ static void crypt_iv_essiv_dtr(struct crypt_config *cc)
struct crypto_cipher *essiv_tfm;
struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv;
- crypto_free_hash(essiv->hash_tfm);
+ crypto_free_ahash(essiv->hash_tfm);
essiv->hash_tfm = NULL;
kzfree(essiv->salt);
@@ -360,7 +363,7 @@ static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti,
const char *opts)
{
struct crypto_cipher *essiv_tfm = NULL;
- struct crypto_hash *hash_tfm = NULL;
+ struct crypto_ahash *hash_tfm = NULL;
u8 *salt = NULL;
int err;
@@ -370,14 +373,14 @@ static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti,
}
/* Allocate hash algorithm */
- hash_tfm = crypto_alloc_hash(opts, 0, CRYPTO_ALG_ASYNC);
+ hash_tfm = crypto_alloc_ahash(opts, 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(hash_tfm)) {
ti->error = "Error initializing ESSIV hash";
err = PTR_ERR(hash_tfm);
goto bad;
}
- salt = kzalloc(crypto_hash_digestsize(hash_tfm), GFP_KERNEL);
+ salt = kzalloc(crypto_ahash_digestsize(hash_tfm), GFP_KERNEL);
if (!salt) {
ti->error = "Error kmallocing salt storage in ESSIV";
err = -ENOMEM;
@@ -388,7 +391,7 @@ static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti,
cc->iv_gen_private.essiv.hash_tfm = hash_tfm;
essiv_tfm = setup_essiv_cpu(cc, ti, salt,
- crypto_hash_digestsize(hash_tfm));
+ crypto_ahash_digestsize(hash_tfm));
if (IS_ERR(essiv_tfm)) {
crypt_iv_essiv_dtr(cc);
return PTR_ERR(essiv_tfm);
@@ -399,7 +402,7 @@ static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti,
bad:
if (hash_tfm && !IS_ERR(hash_tfm))
- crypto_free_hash(hash_tfm);
+ crypto_free_ahash(hash_tfm);
kfree(salt);
return err;
}
@@ -419,7 +422,7 @@ static int crypt_iv_essiv_gen(struct crypt_config *cc, u8 *iv,
static int crypt_iv_benbi_ctr(struct crypt_config *cc, struct dm_target *ti,
const char *opts)
{
- unsigned bs = crypto_ablkcipher_blocksize(any_tfm(cc));
+ unsigned bs = crypto_skcipher_blocksize(any_tfm(cc));
int log = ilog2(bs);
/* we need to calculate how far we must shift the sector count
@@ -816,27 +819,27 @@ static void crypt_convert_init(struct crypt_config *cc,
}
static struct dm_crypt_request *dmreq_of_req(struct crypt_config *cc,
- struct ablkcipher_request *req)
+ struct skcipher_request *req)
{
return (struct dm_crypt_request *)((char *)req + cc->dmreq_start);
}
-static struct ablkcipher_request *req_of_dmreq(struct crypt_config *cc,
+static struct skcipher_request *req_of_dmreq(struct crypt_config *cc,
struct dm_crypt_request *dmreq)
{
- return (struct ablkcipher_request *)((char *)dmreq - cc->dmreq_start);
+ return (struct skcipher_request *)((char *)dmreq - cc->dmreq_start);
}
static u8 *iv_of_dmreq(struct crypt_config *cc,
struct dm_crypt_request *dmreq)
{
return (u8 *)ALIGN((unsigned long)(dmreq + 1),
- crypto_ablkcipher_alignmask(any_tfm(cc)) + 1);
+ crypto_skcipher_alignmask(any_tfm(cc)) + 1);
}
static int crypt_convert_block(struct crypt_config *cc,
struct convert_context *ctx,
- struct ablkcipher_request *req)
+ struct skcipher_request *req)
{
struct bio_vec bv_in = bio_iter_iovec(ctx->bio_in, ctx->iter_in);
struct bio_vec bv_out = bio_iter_iovec(ctx->bio_out, ctx->iter_out);
@@ -866,13 +869,13 @@ static int crypt_convert_block(struct crypt_config *cc,
return r;
}
- ablkcipher_request_set_crypt(req, &dmreq->sg_in, &dmreq->sg_out,
- 1 << SECTOR_SHIFT, iv);
+ skcipher_request_set_crypt(req, &dmreq->sg_in, &dmreq->sg_out,
+ 1 << SECTOR_SHIFT, iv);
if (bio_data_dir(ctx->bio_in) == WRITE)
- r = crypto_ablkcipher_encrypt(req);
+ r = crypto_skcipher_encrypt(req);
else
- r = crypto_ablkcipher_decrypt(req);
+ r = crypto_skcipher_decrypt(req);
if (!r && cc->iv_gen_ops && cc->iv_gen_ops->post)
r = cc->iv_gen_ops->post(cc, iv, dmreq);
@@ -891,23 +894,23 @@ static void crypt_alloc_req(struct crypt_config *cc,
if (!ctx->req)
ctx->req = mempool_alloc(cc->req_pool, GFP_NOIO);
- ablkcipher_request_set_tfm(ctx->req, cc->tfms[key_index]);
+ skcipher_request_set_tfm(ctx->req, cc->tfms[key_index]);
/*
* Use REQ_MAY_BACKLOG so a cipher driver internally backlogs
* requests if driver request queue is full.
*/
- ablkcipher_request_set_callback(ctx->req,
+ skcipher_request_set_callback(ctx->req,
CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
kcryptd_async_done, dmreq_of_req(cc, ctx->req));
}
static void crypt_free_req(struct crypt_config *cc,
- struct ablkcipher_request *req, struct bio *base_bio)
+ struct skcipher_request *req, struct bio *base_bio)
{
struct dm_crypt_io *io = dm_per_bio_data(base_bio, cc->per_bio_data_size);
- if ((struct ablkcipher_request *)(io + 1) != req)
+ if ((struct skcipher_request *)(io + 1) != req)
mempool_free(req, cc->req_pool);
}
@@ -1437,7 +1440,7 @@ static void crypt_free_tfms(struct crypt_config *cc)
for (i = 0; i < cc->tfms_count; i++)
if (cc->tfms[i] && !IS_ERR(cc->tfms[i])) {
- crypto_free_ablkcipher(cc->tfms[i]);
+ crypto_free_skcipher(cc->tfms[i]);
cc->tfms[i] = NULL;
}
@@ -1450,13 +1453,13 @@ static int crypt_alloc_tfms(struct crypt_config *cc, char *ciphermode)
unsigned i;
int err;
- cc->tfms = kmalloc(cc->tfms_count * sizeof(struct crypto_ablkcipher *),
+ cc->tfms = kmalloc(cc->tfms_count * sizeof(struct crypto_skcipher *),
GFP_KERNEL);
if (!cc->tfms)
return -ENOMEM;
for (i = 0; i < cc->tfms_count; i++) {
- cc->tfms[i] = crypto_alloc_ablkcipher(ciphermode, 0, 0);
+ cc->tfms[i] = crypto_alloc_skcipher(ciphermode, 0, 0);
if (IS_ERR(cc->tfms[i])) {
err = PTR_ERR(cc->tfms[i]);
crypt_free_tfms(cc);
@@ -1476,9 +1479,9 @@ static int crypt_setkey_allcpus(struct crypt_config *cc)
subkey_size = (cc->key_size - cc->key_extra_size) >> ilog2(cc->tfms_count);
for (i = 0; i < cc->tfms_count; i++) {
- r = crypto_ablkcipher_setkey(cc->tfms[i],
- cc->key + (i * subkey_size),
- subkey_size);
+ r = crypto_skcipher_setkey(cc->tfms[i],
+ cc->key + (i * subkey_size),
+ subkey_size);
if (r)
err = r;
}
@@ -1645,7 +1648,7 @@ static int crypt_ctr_cipher(struct dm_target *ti,
}
/* Initialize IV */
- cc->iv_size = crypto_ablkcipher_ivsize(any_tfm(cc));
+ cc->iv_size = crypto_skcipher_ivsize(any_tfm(cc));
if (cc->iv_size)
/* at least a 64 bit sector number should fit in our buffer */
cc->iv_size = max(cc->iv_size,
@@ -1763,21 +1766,21 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
if (ret < 0)
goto bad;
- cc->dmreq_start = sizeof(struct ablkcipher_request);
- cc->dmreq_start += crypto_ablkcipher_reqsize(any_tfm(cc));
+ cc->dmreq_start = sizeof(struct skcipher_request);
+ cc->dmreq_start += crypto_skcipher_reqsize(any_tfm(cc));
cc->dmreq_start = ALIGN(cc->dmreq_start, __alignof__(struct dm_crypt_request));
- if (crypto_ablkcipher_alignmask(any_tfm(cc)) < CRYPTO_MINALIGN) {
+ if (crypto_skcipher_alignmask(any_tfm(cc)) < CRYPTO_MINALIGN) {
/* Allocate the padding exactly */
iv_size_padding = -(cc->dmreq_start + sizeof(struct dm_crypt_request))
- & crypto_ablkcipher_alignmask(any_tfm(cc));
+ & crypto_skcipher_alignmask(any_tfm(cc));
} else {
/*
* If the cipher requires greater alignment than kmalloc
* alignment, we don't know the exact position of the
* initialization vector. We must assume worst case.
*/
- iv_size_padding = crypto_ablkcipher_alignmask(any_tfm(cc));
+ iv_size_padding = crypto_skcipher_alignmask(any_tfm(cc));
}
ret = -ENOMEM;
@@ -1788,7 +1791,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
goto bad;
}
- cc->per_bio_data_size = ti->per_bio_data_size =
+ cc->per_bio_data_size = ti->per_io_data_size =
ALIGN(sizeof(struct dm_crypt_io) + cc->dmreq_start +
sizeof(struct dm_crypt_request) + iv_size_padding + cc->iv_size,
ARCH_KMALLOC_MINALIGN);
@@ -1922,7 +1925,7 @@ static int crypt_map(struct dm_target *ti, struct bio *bio)
io = dm_per_bio_data(bio, cc->per_bio_data_size);
crypt_io_init(io, cc, bio, dm_target_offset(ti, bio->bi_iter.bi_sector));
- io->ctx.req = (struct ablkcipher_request *)(io + 1);
+ io->ctx.req = (struct skcipher_request *)(io + 1);
if (bio_data_dir(io->base_bio) == READ) {
if (kcryptd_io_read(io, GFP_NOWAIT))
diff --git a/drivers/md/dm-delay.c b/drivers/md/dm-delay.c
index b4c356a21123..cc70871a6d29 100644
--- a/drivers/md/dm-delay.c
+++ b/drivers/md/dm-delay.c
@@ -204,7 +204,7 @@ out:
ti->num_flush_bios = 1;
ti->num_discard_bios = 1;
- ti->per_bio_data_size = sizeof(struct dm_delay_info);
+ ti->per_io_data_size = sizeof(struct dm_delay_info);
ti->private = dc;
return 0;
diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c
index 09e2afcafd2d..b7341de87015 100644
--- a/drivers/md/dm-flakey.c
+++ b/drivers/md/dm-flakey.c
@@ -220,7 +220,7 @@ static int flakey_ctr(struct dm_target *ti, unsigned int argc, char **argv)
ti->num_flush_bios = 1;
ti->num_discard_bios = 1;
- ti->per_bio_data_size = sizeof(struct per_bio_data);
+ ti->per_io_data_size = sizeof(struct per_bio_data);
ti->private = fc;
return 0;
diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
index 80a439543259..2adf81d81fca 100644
--- a/drivers/md/dm-ioctl.c
+++ b/drivers/md/dm-ioctl.c
@@ -1291,7 +1291,8 @@ static int table_load(struct dm_ioctl *param, size_t param_size)
immutable_target_type = dm_get_immutable_target_type(md);
if (immutable_target_type &&
- (immutable_target_type != dm_table_get_immutable_target_type(t))) {
+ (immutable_target_type != dm_table_get_immutable_target_type(t)) &&
+ !dm_table_get_wildcard_target(t)) {
DMWARN("can't replace immutable target type %s",
immutable_target_type->name);
r = -EINVAL;
@@ -1303,7 +1304,7 @@ static int table_load(struct dm_ioctl *param, size_t param_size)
dm_set_md_type(md, dm_table_get_type(t));
/* setup md->queue to reflect md's type (may block) */
- r = dm_setup_md_queue(md);
+ r = dm_setup_md_queue(md, t);
if (r) {
DMWARN("unable to set up device queue for new table.");
goto err_unlock_md_type;
diff --git a/drivers/md/dm-log-writes.c b/drivers/md/dm-log-writes.c
index 624589d51c2c..608302e222af 100644
--- a/drivers/md/dm-log-writes.c
+++ b/drivers/md/dm-log-writes.c
@@ -475,7 +475,7 @@ static int log_writes_ctr(struct dm_target *ti, unsigned int argc, char **argv)
ti->flush_supported = true;
ti->num_discard_bios = 1;
ti->discards_supported = true;
- ti->per_bio_data_size = sizeof(struct per_bio_data);
+ ti->per_io_data_size = sizeof(struct per_bio_data);
ti->private = lc;
return 0;
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index cfa29f574c2a..677ba223e2ae 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -23,6 +23,7 @@
#include <linux/delay.h>
#include <scsi/scsi_dh.h>
#include <linux/atomic.h>
+#include <linux/blk-mq.h>
#define DM_MSG_PREFIX "multipath"
#define DM_PG_INIT_DELAY_MSECS 2000
@@ -33,11 +34,12 @@ struct pgpath {
struct list_head list;
struct priority_group *pg; /* Owning PG */
- unsigned is_active; /* Path status */
unsigned fail_count; /* Cumulative failure count */
struct dm_path path;
struct delayed_work activate_path;
+
+ bool is_active:1; /* Path status */
};
#define path_to_pgpath(__pgp) container_of((__pgp), struct pgpath, path)
@@ -53,10 +55,10 @@ struct priority_group {
struct path_selector ps;
unsigned pg_num; /* Reference number */
- unsigned bypassed; /* Temporarily bypass this PG? */
-
unsigned nr_pgpaths; /* Number of paths in PG */
struct list_head pgpaths;
+
+ bool bypassed:1; /* Temporarily bypass this PG? */
};
/* Multipath context */
@@ -74,21 +76,20 @@ struct multipath {
wait_queue_head_t pg_init_wait; /* Wait for pg_init completion */
- unsigned pg_init_required; /* pg_init needs calling? */
unsigned pg_init_in_progress; /* Only one pg_init allowed at once */
- unsigned pg_init_delay_retry; /* Delay pg_init retry? */
unsigned nr_valid_paths; /* Total number of usable paths */
struct pgpath *current_pgpath;
struct priority_group *current_pg;
struct priority_group *next_pg; /* Switch to this PG if set */
- unsigned repeat_count; /* I/Os left before calling PS again */
- unsigned queue_io:1; /* Must we queue all I/O? */
- unsigned queue_if_no_path:1; /* Queue I/O if last path fails? */
- unsigned saved_queue_if_no_path:1; /* Saved state during suspension */
- unsigned retain_attached_hw_handler:1; /* If there's already a hw_handler present, don't change it. */
- unsigned pg_init_disabled:1; /* pg_init is not currently allowed */
+ bool queue_io:1; /* Must we queue all I/O? */
+ bool queue_if_no_path:1; /* Queue I/O if last path fails? */
+ bool saved_queue_if_no_path:1; /* Saved state during suspension */
+ bool retain_attached_hw_handler:1; /* If there's already a hw_handler present, don't change it. */
+ bool pg_init_disabled:1; /* pg_init is not currently allowed */
+ bool pg_init_required:1; /* pg_init needs calling? */
+ bool pg_init_delay_retry:1; /* Delay pg_init retry? */
unsigned pg_init_retries; /* Number of times to retry pg_init */
unsigned pg_init_count; /* Number of times pg_init called */
@@ -120,7 +121,6 @@ static struct kmem_cache *_mpio_cache;
static struct workqueue_struct *kmultipathd, *kmpath_handlerd;
static void trigger_event(struct work_struct *work);
static void activate_path(struct work_struct *work);
-static int __pgpath_busy(struct pgpath *pgpath);
/*-----------------------------------------------
@@ -132,7 +132,7 @@ static struct pgpath *alloc_pgpath(void)
struct pgpath *pgpath = kzalloc(sizeof(*pgpath), GFP_KERNEL);
if (pgpath) {
- pgpath->is_active = 1;
+ pgpath->is_active = true;
INIT_DELAYED_WORK(&pgpath->activate_path, activate_path);
}
@@ -181,25 +181,31 @@ static void free_priority_group(struct priority_group *pg,
kfree(pg);
}
-static struct multipath *alloc_multipath(struct dm_target *ti)
+static struct multipath *alloc_multipath(struct dm_target *ti, bool use_blk_mq)
{
struct multipath *m;
- unsigned min_ios = dm_get_reserved_rq_based_ios();
m = kzalloc(sizeof(*m), GFP_KERNEL);
if (m) {
INIT_LIST_HEAD(&m->priority_groups);
spin_lock_init(&m->lock);
- m->queue_io = 1;
+ m->queue_io = true;
m->pg_init_delay_msecs = DM_PG_INIT_DELAY_DEFAULT;
INIT_WORK(&m->trigger_event, trigger_event);
init_waitqueue_head(&m->pg_init_wait);
mutex_init(&m->work_mutex);
- m->mpio_pool = mempool_create_slab_pool(min_ios, _mpio_cache);
- if (!m->mpio_pool) {
- kfree(m);
- return NULL;
+
+ m->mpio_pool = NULL;
+ if (!use_blk_mq) {
+ unsigned min_ios = dm_get_reserved_rq_based_ios();
+
+ m->mpio_pool = mempool_create_slab_pool(min_ios, _mpio_cache);
+ if (!m->mpio_pool) {
+ kfree(m);
+ return NULL;
+ }
}
+
m->ti = ti;
ti->private = m;
}
@@ -222,26 +228,41 @@ static void free_multipath(struct multipath *m)
kfree(m);
}
-static int set_mapinfo(struct multipath *m, union map_info *info)
+static struct dm_mpath_io *get_mpio(union map_info *info)
+{
+ return info->ptr;
+}
+
+static struct dm_mpath_io *set_mpio(struct multipath *m, union map_info *info)
{
struct dm_mpath_io *mpio;
+ if (!m->mpio_pool) {
+ /* Use blk-mq pdu memory requested via per_io_data_size */
+ mpio = get_mpio(info);
+ memset(mpio, 0, sizeof(*mpio));
+ return mpio;
+ }
+
mpio = mempool_alloc(m->mpio_pool, GFP_ATOMIC);
if (!mpio)
- return -ENOMEM;
+ return NULL;
memset(mpio, 0, sizeof(*mpio));
info->ptr = mpio;
- return 0;
+ return mpio;
}
-static void clear_mapinfo(struct multipath *m, union map_info *info)
+static void clear_request_fn_mpio(struct multipath *m, union map_info *info)
{
- struct dm_mpath_io *mpio = info->ptr;
+ /* Only needed for non blk-mq (.request_fn) multipath */
+ if (m->mpio_pool) {
+ struct dm_mpath_io *mpio = info->ptr;
- info->ptr = NULL;
- mempool_free(mpio, m->mpio_pool);
+ info->ptr = NULL;
+ mempool_free(mpio, m->mpio_pool);
+ }
}
/*-----------------------------------------------
@@ -257,7 +278,7 @@ static int __pg_init_all_paths(struct multipath *m)
return 0;
m->pg_init_count++;
- m->pg_init_required = 0;
+ m->pg_init_required = false;
/* Check here to reset pg_init_required */
if (!m->current_pg)
@@ -283,11 +304,11 @@ static void __switch_pg(struct multipath *m, struct pgpath *pgpath)
/* Must we initialise the PG first, and queue I/O till it's ready? */
if (m->hw_handler_name) {
- m->pg_init_required = 1;
- m->queue_io = 1;
+ m->pg_init_required = true;
+ m->queue_io = true;
} else {
- m->pg_init_required = 0;
- m->queue_io = 0;
+ m->pg_init_required = false;
+ m->queue_io = false;
}
m->pg_init_count = 0;
@@ -298,7 +319,7 @@ static int __choose_path_in_pg(struct multipath *m, struct priority_group *pg,
{
struct dm_path *path;
- path = pg->ps.type->select_path(&pg->ps, &m->repeat_count, nr_bytes);
+ path = pg->ps.type->select_path(&pg->ps, nr_bytes);
if (!path)
return -ENXIO;
@@ -313,10 +334,10 @@ static int __choose_path_in_pg(struct multipath *m, struct priority_group *pg,
static void __choose_pgpath(struct multipath *m, size_t nr_bytes)
{
struct priority_group *pg;
- unsigned bypassed = 1;
+ bool bypassed = true;
if (!m->nr_valid_paths) {
- m->queue_io = 0;
+ m->queue_io = false;
goto failed;
}
@@ -344,7 +365,7 @@ static void __choose_pgpath(struct multipath *m, size_t nr_bytes)
continue;
if (!__choose_path_in_pg(m, pg, nr_bytes)) {
if (!bypassed)
- m->pg_init_delay_retry = 1;
+ m->pg_init_delay_retry = true;
return;
}
}
@@ -380,7 +401,7 @@ static int __multipath_map(struct dm_target *ti, struct request *clone,
union map_info *map_context,
struct request *rq, struct request **__clone)
{
- struct multipath *m = (struct multipath *) ti->private;
+ struct multipath *m = ti->private;
int r = DM_MAPIO_REQUEUE;
size_t nr_bytes = clone ? blk_rq_bytes(clone) : blk_rq_bytes(rq);
struct pgpath *pgpath;
@@ -390,8 +411,7 @@ static int __multipath_map(struct dm_target *ti, struct request *clone,
spin_lock_irq(&m->lock);
/* Do we need to select a new pgpath? */
- if (!m->current_pgpath ||
- (!m->queue_io && (m->repeat_count && --m->repeat_count == 0)))
+ if (!m->current_pgpath || !m->queue_io)
__choose_pgpath(m, nr_bytes);
pgpath = m->current_pgpath;
@@ -405,11 +425,11 @@ static int __multipath_map(struct dm_target *ti, struct request *clone,
goto out_unlock;
}
- if (set_mapinfo(m, map_context) < 0)
+ mpio = set_mpio(m, map_context);
+ if (!mpio)
/* ENOMEM, requeue */
goto out_unlock;
- mpio = map_context->ptr;
mpio->pgpath = pgpath;
mpio->nr_bytes = nr_bytes;
@@ -418,17 +438,24 @@ static int __multipath_map(struct dm_target *ti, struct request *clone,
spin_unlock_irq(&m->lock);
if (clone) {
- /* Old request-based interface: allocated clone is passed in */
+ /*
+ * Old request-based interface: allocated clone is passed in.
+ * Used by: .request_fn stacked on .request_fn path(s).
+ */
clone->q = bdev_get_queue(bdev);
clone->rq_disk = bdev->bd_disk;
clone->cmd_flags |= REQ_FAILFAST_TRANSPORT;
} else {
- /* blk-mq request-based interface */
- *__clone = blk_get_request(bdev_get_queue(bdev),
- rq_data_dir(rq), GFP_ATOMIC);
+ /*
+ * blk-mq request-based interface; used by both:
+ * .request_fn stacked on blk-mq path(s) and
+ * blk-mq stacked on blk-mq path(s).
+ */
+ *__clone = blk_mq_alloc_request(bdev_get_queue(bdev),
+ rq_data_dir(rq), BLK_MQ_REQ_NOWAIT);
if (IS_ERR(*__clone)) {
/* ENOMEM, requeue */
- clear_mapinfo(m, map_context);
+ clear_request_fn_mpio(m, map_context);
return r;
}
(*__clone)->bio = (*__clone)->biotail = NULL;
@@ -463,14 +490,14 @@ static int multipath_clone_and_map(struct dm_target *ti, struct request *rq,
static void multipath_release_clone(struct request *clone)
{
- blk_put_request(clone);
+ blk_mq_free_request(clone);
}
/*
* If we run out of usable paths, should we queue I/O or error it?
*/
-static int queue_if_no_path(struct multipath *m, unsigned queue_if_no_path,
- unsigned save_old_value)
+static int queue_if_no_path(struct multipath *m, bool queue_if_no_path,
+ bool save_old_value)
{
unsigned long flags;
@@ -776,12 +803,12 @@ static int parse_features(struct dm_arg_set *as, struct multipath *m)
argc--;
if (!strcasecmp(arg_name, "queue_if_no_path")) {
- r = queue_if_no_path(m, 1, 0);
+ r = queue_if_no_path(m, true, false);
continue;
}
if (!strcasecmp(arg_name, "retain_attached_hw_handler")) {
- m->retain_attached_hw_handler = 1;
+ m->retain_attached_hw_handler = true;
continue;
}
@@ -820,11 +847,12 @@ static int multipath_ctr(struct dm_target *ti, unsigned int argc,
struct dm_arg_set as;
unsigned pg_count = 0;
unsigned next_pg_num;
+ bool use_blk_mq = dm_use_blk_mq(dm_table_get_md(ti->table));
as.argc = argc;
as.argv = argv;
- m = alloc_multipath(ti);
+ m = alloc_multipath(ti, use_blk_mq);
if (!m) {
ti->error = "can't allocate multipath";
return -EINVAL;
@@ -880,6 +908,8 @@ static int multipath_ctr(struct dm_target *ti, unsigned int argc,
ti->num_flush_bios = 1;
ti->num_discard_bios = 1;
ti->num_write_same_bios = 1;
+ if (use_blk_mq)
+ ti->per_io_data_size = sizeof(struct dm_mpath_io);
return 0;
@@ -917,7 +947,7 @@ static void flush_multipath_work(struct multipath *m)
unsigned long flags;
spin_lock_irqsave(&m->lock, flags);
- m->pg_init_disabled = 1;
+ m->pg_init_disabled = true;
spin_unlock_irqrestore(&m->lock, flags);
flush_workqueue(kmpath_handlerd);
@@ -926,7 +956,7 @@ static void flush_multipath_work(struct multipath *m)
flush_work(&m->trigger_event);
spin_lock_irqsave(&m->lock, flags);
- m->pg_init_disabled = 0;
+ m->pg_init_disabled = false;
spin_unlock_irqrestore(&m->lock, flags);
}
@@ -954,7 +984,7 @@ static int fail_path(struct pgpath *pgpath)
DMWARN("Failing path %s.", pgpath->path.dev->name);
pgpath->pg->ps.type->fail_path(&pgpath->pg->ps, &pgpath->path);
- pgpath->is_active = 0;
+ pgpath->is_active = false;
pgpath->fail_count++;
m->nr_valid_paths--;
@@ -987,18 +1017,13 @@ static int reinstate_path(struct pgpath *pgpath)
if (pgpath->is_active)
goto out;
- if (!pgpath->pg->ps.type->reinstate_path) {
- DMWARN("Reinstate path not supported by path selector %s",
- pgpath->pg->ps.type->name);
- r = -EINVAL;
- goto out;
- }
+ DMWARN("Reinstating path %s.", pgpath->path.dev->name);
r = pgpath->pg->ps.type->reinstate_path(&pgpath->pg->ps, &pgpath->path);
if (r)
goto out;
- pgpath->is_active = 1;
+ pgpath->is_active = true;
if (!m->nr_valid_paths++) {
m->current_pgpath = NULL;
@@ -1045,7 +1070,7 @@ static int action_dev(struct multipath *m, struct dm_dev *dev,
* Temporarily try to avoid having to use the specified PG
*/
static void bypass_pg(struct multipath *m, struct priority_group *pg,
- int bypassed)
+ bool bypassed)
{
unsigned long flags;
@@ -1078,7 +1103,7 @@ static int switch_pg_num(struct multipath *m, const char *pgstr)
spin_lock_irqsave(&m->lock, flags);
list_for_each_entry(pg, &m->priority_groups, list) {
- pg->bypassed = 0;
+ pg->bypassed = false;
if (--pgnum)
continue;
@@ -1096,7 +1121,7 @@ static int switch_pg_num(struct multipath *m, const char *pgstr)
* Set/clear bypassed status of a PG.
* PGs are numbered upwards from 1 in the order they were declared.
*/
-static int bypass_pg_num(struct multipath *m, const char *pgstr, int bypassed)
+static int bypass_pg_num(struct multipath *m, const char *pgstr, bool bypassed)
{
struct priority_group *pg;
unsigned pgnum;
@@ -1120,17 +1145,17 @@ static int bypass_pg_num(struct multipath *m, const char *pgstr, int bypassed)
/*
* Should we retry pg_init immediately?
*/
-static int pg_init_limit_reached(struct multipath *m, struct pgpath *pgpath)
+static bool pg_init_limit_reached(struct multipath *m, struct pgpath *pgpath)
{
unsigned long flags;
- int limit_reached = 0;
+ bool limit_reached = false;
spin_lock_irqsave(&m->lock, flags);
if (m->pg_init_count <= m->pg_init_retries && !m->pg_init_disabled)
- m->pg_init_required = 1;
+ m->pg_init_required = true;
else
- limit_reached = 1;
+ limit_reached = true;
spin_unlock_irqrestore(&m->lock, flags);
@@ -1143,7 +1168,7 @@ static void pg_init_done(void *data, int errors)
struct priority_group *pg = pgpath->pg;
struct multipath *m = pg->m;
unsigned long flags;
- unsigned delay_retry = 0;
+ bool delay_retry = false;
/* device or driver problems */
switch (errors) {
@@ -1166,7 +1191,7 @@ static void pg_init_done(void *data, int errors)
* Probably doing something like FW upgrade on the
* controller so try the other pg.
*/
- bypass_pg(m, pg, 1);
+ bypass_pg(m, pg, true);
break;
case SCSI_DH_RETRY:
/* Wait before retrying. */
@@ -1177,6 +1202,7 @@ static void pg_init_done(void *data, int errors)
fail_path(pgpath);
errors = 0;
break;
+ case SCSI_DH_DEV_OFFLINED:
default:
/*
* We probably do not want to fail the path for a device
@@ -1194,7 +1220,7 @@ static void pg_init_done(void *data, int errors)
m->current_pg = NULL;
}
} else if (!m->pg_init_required)
- pg->bypassed = 0;
+ pg->bypassed = false;
if (--m->pg_init_in_progress)
/* Activations of other paths are still on going */
@@ -1205,7 +1231,7 @@ static void pg_init_done(void *data, int errors)
if (__pg_init_all_paths(m))
goto out;
}
- m->queue_io = 0;
+ m->queue_io = false;
/*
* Wake up any thread waiting to suspend.
@@ -1291,21 +1317,21 @@ static int multipath_end_io(struct dm_target *ti, struct request *clone,
int error, union map_info *map_context)
{
struct multipath *m = ti->private;
- struct dm_mpath_io *mpio = map_context->ptr;
+ struct dm_mpath_io *mpio = get_mpio(map_context);
struct pgpath *pgpath;
struct path_selector *ps;
int r;
BUG_ON(!mpio);
- r = do_end_io(m, clone, error, mpio);
+ r = do_end_io(m, clone, error, mpio);
pgpath = mpio->pgpath;
if (pgpath) {
ps = &pgpath->pg->ps;
if (ps->type->end_io)
ps->type->end_io(ps, &pgpath->path, mpio->nr_bytes);
}
- clear_mapinfo(m, map_context);
+ clear_request_fn_mpio(m, map_context);
return r;
}
@@ -1318,9 +1344,9 @@ static int multipath_end_io(struct dm_target *ti, struct request *clone,
*/
static void multipath_presuspend(struct dm_target *ti)
{
- struct multipath *m = (struct multipath *) ti->private;
+ struct multipath *m = ti->private;
- queue_if_no_path(m, 0, 1);
+ queue_if_no_path(m, false, true);
}
static void multipath_postsuspend(struct dm_target *ti)
@@ -1337,7 +1363,7 @@ static void multipath_postsuspend(struct dm_target *ti)
*/
static void multipath_resume(struct dm_target *ti)
{
- struct multipath *m = (struct multipath *) ti->private;
+ struct multipath *m = ti->private;
unsigned long flags;
spin_lock_irqsave(&m->lock, flags);
@@ -1366,7 +1392,7 @@ static void multipath_status(struct dm_target *ti, status_type_t type,
{
int sz = 0;
unsigned long flags;
- struct multipath *m = (struct multipath *) ti->private;
+ struct multipath *m = ti->private;
struct priority_group *pg;
struct pgpath *p;
unsigned pg_num;
@@ -1474,7 +1500,7 @@ static int multipath_message(struct dm_target *ti, unsigned argc, char **argv)
{
int r = -EINVAL;
struct dm_dev *dev;
- struct multipath *m = (struct multipath *) ti->private;
+ struct multipath *m = ti->private;
action_fn action;
mutex_lock(&m->work_mutex);
@@ -1486,10 +1512,10 @@ static int multipath_message(struct dm_target *ti, unsigned argc, char **argv)
if (argc == 1) {
if (!strcasecmp(argv[0], "queue_if_no_path")) {
- r = queue_if_no_path(m, 1, 0);
+ r = queue_if_no_path(m, true, false);
goto out;
} else if (!strcasecmp(argv[0], "fail_if_no_path")) {
- r = queue_if_no_path(m, 0, 0);
+ r = queue_if_no_path(m, false, false);
goto out;
}
}
@@ -1500,10 +1526,10 @@ static int multipath_message(struct dm_target *ti, unsigned argc, char **argv)
}
if (!strcasecmp(argv[0], "disable_group")) {
- r = bypass_pg_num(m, argv[1], 1);
+ r = bypass_pg_num(m, argv[1], true);
goto out;
} else if (!strcasecmp(argv[0], "enable_group")) {
- r = bypass_pg_num(m, argv[1], 0);
+ r = bypass_pg_num(m, argv[1], false);
goto out;
} else if (!strcasecmp(argv[0], "switch_group")) {
r = switch_pg_num(m, argv[1]);
@@ -1604,7 +1630,7 @@ out:
return ret;
}
-static int __pgpath_busy(struct pgpath *pgpath)
+static int pgpath_busy(struct pgpath *pgpath)
{
struct request_queue *q = bdev_get_queue(pgpath->path.dev->bdev);
@@ -1621,7 +1647,7 @@ static int __pgpath_busy(struct pgpath *pgpath)
*/
static int multipath_busy(struct dm_target *ti)
{
- int busy = 0, has_active = 0;
+ bool busy = false, has_active = false;
struct multipath *m = ti->private;
struct priority_group *pg;
struct pgpath *pgpath;
@@ -1632,7 +1658,7 @@ static int multipath_busy(struct dm_target *ti)
/* pg_init in progress or no paths available */
if (m->pg_init_in_progress ||
(!m->nr_valid_paths && m->queue_if_no_path)) {
- busy = 1;
+ busy = true;
goto out;
}
/* Guess which priority_group will be used at next mapping time */
@@ -1654,13 +1680,12 @@ static int multipath_busy(struct dm_target *ti)
* If there is one non-busy active path at least, the path selector
* will be able to select it. So we consider such a pg as not busy.
*/
- busy = 1;
+ busy = true;
list_for_each_entry(pgpath, &pg->pgpaths, list)
if (pgpath->is_active) {
- has_active = 1;
-
- if (!__pgpath_busy(pgpath)) {
- busy = 0;
+ has_active = true;
+ if (!pgpath_busy(pgpath)) {
+ busy = false;
break;
}
}
@@ -1671,7 +1696,7 @@ static int multipath_busy(struct dm_target *ti)
* the current_pg will be changed at next mapping time.
* We need to try mapping to determine it.
*/
- busy = 0;
+ busy = false;
out:
spin_unlock_irqrestore(&m->lock, flags);
@@ -1684,7 +1709,8 @@ out:
*---------------------------------------------------------------*/
static struct target_type multipath_target = {
.name = "multipath",
- .version = {1, 10, 0},
+ .version = {1, 11, 0},
+ .features = DM_TARGET_SINGLETON | DM_TARGET_IMMUTABLE,
.module = THIS_MODULE,
.ctr = multipath_ctr,
.dtr = multipath_dtr,
diff --git a/drivers/md/dm-path-selector.h b/drivers/md/dm-path-selector.h
index e7d1fa8b0459..b6eb5365b1a4 100644
--- a/drivers/md/dm-path-selector.h
+++ b/drivers/md/dm-path-selector.h
@@ -50,13 +50,8 @@ struct path_selector_type {
/*
* Chooses a path for this io, if no paths are available then
* NULL will be returned.
- *
- * repeat_count is the number of times to use the path before
- * calling the function again. 0 means don't call it again unless
- * the path fails.
*/
struct dm_path *(*select_path) (struct path_selector *ps,
- unsigned *repeat_count,
size_t nr_bytes);
/*
diff --git a/drivers/md/dm-queue-length.c b/drivers/md/dm-queue-length.c
index 3941fae0de9f..23f178641794 100644
--- a/drivers/md/dm-queue-length.c
+++ b/drivers/md/dm-queue-length.c
@@ -23,12 +23,13 @@
#include <linux/atomic.h>
#define DM_MSG_PREFIX "multipath queue-length"
-#define QL_MIN_IO 128
-#define QL_VERSION "0.1.0"
+#define QL_MIN_IO 1
+#define QL_VERSION "0.2.0"
struct selector {
struct list_head valid_paths;
struct list_head failed_paths;
+ spinlock_t lock;
};
struct path_info {
@@ -45,6 +46,7 @@ static struct selector *alloc_selector(void)
if (s) {
INIT_LIST_HEAD(&s->valid_paths);
INIT_LIST_HEAD(&s->failed_paths);
+ spin_lock_init(&s->lock);
}
return s;
@@ -113,6 +115,7 @@ static int ql_add_path(struct path_selector *ps, struct dm_path *path,
struct path_info *pi;
unsigned repeat_count = QL_MIN_IO;
char dummy;
+ unsigned long flags;
/*
* Arguments: [<repeat_count>]
@@ -129,6 +132,11 @@ static int ql_add_path(struct path_selector *ps, struct dm_path *path,
return -EINVAL;
}
+ if (repeat_count > 1) {
+ DMWARN_LIMIT("repeat_count > 1 is deprecated, using 1 instead");
+ repeat_count = 1;
+ }
+
/* Allocate the path information structure */
pi = kmalloc(sizeof(*pi), GFP_KERNEL);
if (!pi) {
@@ -142,7 +150,9 @@ static int ql_add_path(struct path_selector *ps, struct dm_path *path,
path->pscontext = pi;
+ spin_lock_irqsave(&s->lock, flags);
list_add_tail(&pi->list, &s->valid_paths);
+ spin_unlock_irqrestore(&s->lock, flags);
return 0;
}
@@ -151,16 +161,22 @@ static void ql_fail_path(struct path_selector *ps, struct dm_path *path)
{
struct selector *s = ps->context;
struct path_info *pi = path->pscontext;
+ unsigned long flags;
+ spin_lock_irqsave(&s->lock, flags);
list_move(&pi->list, &s->failed_paths);
+ spin_unlock_irqrestore(&s->lock, flags);
}
static int ql_reinstate_path(struct path_selector *ps, struct dm_path *path)
{
struct selector *s = ps->context;
struct path_info *pi = path->pscontext;
+ unsigned long flags;
+ spin_lock_irqsave(&s->lock, flags);
list_move_tail(&pi->list, &s->valid_paths);
+ spin_unlock_irqrestore(&s->lock, flags);
return 0;
}
@@ -168,14 +184,16 @@ static int ql_reinstate_path(struct path_selector *ps, struct dm_path *path)
/*
* Select a path having the minimum number of in-flight I/Os
*/
-static struct dm_path *ql_select_path(struct path_selector *ps,
- unsigned *repeat_count, size_t nr_bytes)
+static struct dm_path *ql_select_path(struct path_selector *ps, size_t nr_bytes)
{
struct selector *s = ps->context;
struct path_info *pi = NULL, *best = NULL;
+ struct dm_path *ret = NULL;
+ unsigned long flags;
+ spin_lock_irqsave(&s->lock, flags);
if (list_empty(&s->valid_paths))
- return NULL;
+ goto out;
/* Change preferred (first in list) path to evenly balance. */
list_move_tail(s->valid_paths.next, &s->valid_paths);
@@ -190,11 +208,12 @@ static struct dm_path *ql_select_path(struct path_selector *ps,
}
if (!best)
- return NULL;
-
- *repeat_count = best->repeat_count;
+ goto out;
- return best->path;
+ ret = best->path;
+out:
+ spin_unlock_irqrestore(&s->lock, flags);
+ return ret;
}
static int ql_start_io(struct path_selector *ps, struct dm_path *path,
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index f2a363a89629..b3ccf1e0d4f2 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -1121,7 +1121,7 @@ static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv)
ti->num_flush_bios = 1;
ti->num_discard_bios = 1;
- ti->per_bio_data_size = sizeof(struct dm_raid1_bio_record);
+ ti->per_io_data_size = sizeof(struct dm_raid1_bio_record);
ti->discard_zeroes_data_unsupported = true;
ms->kmirrord_wq = alloc_workqueue("kmirrord", WQ_MEM_RECLAIM, 0);
diff --git a/drivers/md/dm-round-robin.c b/drivers/md/dm-round-robin.c
index 6ab1192cdd5f..4ace1da17db8 100644
--- a/drivers/md/dm-round-robin.c
+++ b/drivers/md/dm-round-robin.c
@@ -17,6 +17,8 @@
#include <linux/module.h>
#define DM_MSG_PREFIX "multipath round-robin"
+#define RR_MIN_IO 1000
+#define RR_VERSION "1.1.0"
/*-----------------------------------------------------------------
* Path-handling code, paths are held in lists
@@ -41,23 +43,48 @@ static void free_paths(struct list_head *paths)
* Round-robin selector
*---------------------------------------------------------------*/
-#define RR_MIN_IO 1000
-
struct selector {
struct list_head valid_paths;
struct list_head invalid_paths;
+ spinlock_t lock;
+ struct dm_path * __percpu *current_path;
+ struct percpu_counter repeat_count;
};
+static void set_percpu_current_path(struct selector *s, struct dm_path *path)
+{
+ int cpu;
+
+ for_each_possible_cpu(cpu)
+ *per_cpu_ptr(s->current_path, cpu) = path;
+}
+
static struct selector *alloc_selector(void)
{
struct selector *s = kmalloc(sizeof(*s), GFP_KERNEL);
- if (s) {
- INIT_LIST_HEAD(&s->valid_paths);
- INIT_LIST_HEAD(&s->invalid_paths);
- }
+ if (!s)
+ return NULL;
+
+ INIT_LIST_HEAD(&s->valid_paths);
+ INIT_LIST_HEAD(&s->invalid_paths);
+ spin_lock_init(&s->lock);
+
+ s->current_path = alloc_percpu(struct dm_path *);
+ if (!s->current_path)
+ goto out_current_path;
+ set_percpu_current_path(s, NULL);
+
+ if (percpu_counter_init(&s->repeat_count, 0, GFP_KERNEL))
+ goto out_repeat_count;
return s;
+
+out_repeat_count:
+ free_percpu(s->current_path);
+out_current_path:
+ kfree(s);
+ return NULL;;
}
static int rr_create(struct path_selector *ps, unsigned argc, char **argv)
@@ -74,10 +101,12 @@ static int rr_create(struct path_selector *ps, unsigned argc, char **argv)
static void rr_destroy(struct path_selector *ps)
{
- struct selector *s = (struct selector *) ps->context;
+ struct selector *s = ps->context;
free_paths(&s->valid_paths);
free_paths(&s->invalid_paths);
+ free_percpu(s->current_path);
+ percpu_counter_destroy(&s->repeat_count);
kfree(s);
ps->context = NULL;
}
@@ -111,10 +140,11 @@ static int rr_status(struct path_selector *ps, struct dm_path *path,
static int rr_add_path(struct path_selector *ps, struct dm_path *path,
int argc, char **argv, char **error)
{
- struct selector *s = (struct selector *) ps->context;
+ struct selector *s = ps->context;
struct path_info *pi;
unsigned repeat_count = RR_MIN_IO;
char dummy;
+ unsigned long flags;
if (argc > 1) {
*error = "round-robin ps: incorrect number of arguments";
@@ -139,42 +169,65 @@ static int rr_add_path(struct path_selector *ps, struct dm_path *path,
path->pscontext = pi;
+ spin_lock_irqsave(&s->lock, flags);
list_add_tail(&pi->list, &s->valid_paths);
+ spin_unlock_irqrestore(&s->lock, flags);
return 0;
}
static void rr_fail_path(struct path_selector *ps, struct dm_path *p)
{
- struct selector *s = (struct selector *) ps->context;
+ unsigned long flags;
+ struct selector *s = ps->context;
struct path_info *pi = p->pscontext;
+ spin_lock_irqsave(&s->lock, flags);
+ if (p == *this_cpu_ptr(s->current_path))
+ set_percpu_current_path(s, NULL);
+
list_move(&pi->list, &s->invalid_paths);
+ spin_unlock_irqrestore(&s->lock, flags);
}
static int rr_reinstate_path(struct path_selector *ps, struct dm_path *p)
{
- struct selector *s = (struct selector *) ps->context;
+ unsigned long flags;
+ struct selector *s = ps->context;
struct path_info *pi = p->pscontext;
+ spin_lock_irqsave(&s->lock, flags);
list_move(&pi->list, &s->valid_paths);
+ spin_unlock_irqrestore(&s->lock, flags);
return 0;
}
-static struct dm_path *rr_select_path(struct path_selector *ps,
- unsigned *repeat_count, size_t nr_bytes)
+static struct dm_path *rr_select_path(struct path_selector *ps, size_t nr_bytes)
{
- struct selector *s = (struct selector *) ps->context;
+ unsigned long flags;
+ struct selector *s = ps->context;
struct path_info *pi = NULL;
+ struct dm_path *current_path = NULL;
+
+ current_path = *this_cpu_ptr(s->current_path);
+ if (current_path) {
+ percpu_counter_dec(&s->repeat_count);
+ if (percpu_counter_read_positive(&s->repeat_count) > 0)
+ return current_path;
+ }
+ spin_lock_irqsave(&s->lock, flags);
if (!list_empty(&s->valid_paths)) {
pi = list_entry(s->valid_paths.next, struct path_info, list);
list_move_tail(&pi->list, &s->valid_paths);
- *repeat_count = pi->repeat_count;
+ percpu_counter_set(&s->repeat_count, pi->repeat_count);
+ set_percpu_current_path(s, pi->path);
+ current_path = pi->path;
}
+ spin_unlock_irqrestore(&s->lock, flags);
- return pi ? pi->path : NULL;
+ return current_path;
}
static struct path_selector_type rr_ps = {
@@ -198,7 +251,7 @@ static int __init dm_rr_init(void)
if (r < 0)
DMERR("register failed %d", r);
- DMINFO("version 1.0.0 loaded");
+ DMINFO("version " RR_VERSION " loaded");
return r;
}
diff --git a/drivers/md/dm-service-time.c b/drivers/md/dm-service-time.c
index 9df8f6bd6418..7b8642045c55 100644
--- a/drivers/md/dm-service-time.c
+++ b/drivers/md/dm-service-time.c
@@ -19,11 +19,12 @@
#define ST_MAX_RELATIVE_THROUGHPUT 100
#define ST_MAX_RELATIVE_THROUGHPUT_SHIFT 7
#define ST_MAX_INFLIGHT_SIZE ((size_t)-1 >> ST_MAX_RELATIVE_THROUGHPUT_SHIFT)
-#define ST_VERSION "0.2.0"
+#define ST_VERSION "0.3.0"
struct selector {
struct list_head valid_paths;
struct list_head failed_paths;
+ spinlock_t lock;
};
struct path_info {
@@ -41,6 +42,7 @@ static struct selector *alloc_selector(void)
if (s) {
INIT_LIST_HEAD(&s->valid_paths);
INIT_LIST_HEAD(&s->failed_paths);
+ spin_lock_init(&s->lock);
}
return s;
@@ -111,6 +113,7 @@ static int st_add_path(struct path_selector *ps, struct dm_path *path,
unsigned repeat_count = ST_MIN_IO;
unsigned relative_throughput = 1;
char dummy;
+ unsigned long flags;
/*
* Arguments: [<repeat_count> [<relative_throughput>]]
@@ -134,6 +137,11 @@ static int st_add_path(struct path_selector *ps, struct dm_path *path,
return -EINVAL;
}
+ if (repeat_count > 1) {
+ DMWARN_LIMIT("repeat_count > 1 is deprecated, using 1 instead");
+ repeat_count = 1;
+ }
+
if ((argc == 2) &&
(sscanf(argv[1], "%u%c", &relative_throughput, &dummy) != 1 ||
relative_throughput > ST_MAX_RELATIVE_THROUGHPUT)) {
@@ -155,7 +163,9 @@ static int st_add_path(struct path_selector *ps, struct dm_path *path,
path->pscontext = pi;
+ spin_lock_irqsave(&s->lock, flags);
list_add_tail(&pi->list, &s->valid_paths);
+ spin_unlock_irqrestore(&s->lock, flags);
return 0;
}
@@ -164,16 +174,22 @@ static void st_fail_path(struct path_selector *ps, struct dm_path *path)
{
struct selector *s = ps->context;
struct path_info *pi = path->pscontext;
+ unsigned long flags;
+ spin_lock_irqsave(&s->lock, flags);
list_move(&pi->list, &s->failed_paths);
+ spin_unlock_irqrestore(&s->lock, flags);
}
static int st_reinstate_path(struct path_selector *ps, struct dm_path *path)
{
struct selector *s = ps->context;
struct path_info *pi = path->pscontext;
+ unsigned long flags;
+ spin_lock_irqsave(&s->lock, flags);
list_move_tail(&pi->list, &s->valid_paths);
+ spin_unlock_irqrestore(&s->lock, flags);
return 0;
}
@@ -255,14 +271,16 @@ static int st_compare_load(struct path_info *pi1, struct path_info *pi2,
return pi2->relative_throughput - pi1->relative_throughput;
}
-static struct dm_path *st_select_path(struct path_selector *ps,
- unsigned *repeat_count, size_t nr_bytes)
+static struct dm_path *st_select_path(struct path_selector *ps, size_t nr_bytes)
{
struct selector *s = ps->context;
struct path_info *pi = NULL, *best = NULL;
+ struct dm_path *ret = NULL;
+ unsigned long flags;
+ spin_lock_irqsave(&s->lock, flags);
if (list_empty(&s->valid_paths))
- return NULL;
+ goto out;
/* Change preferred (first in list) path to evenly balance. */
list_move_tail(s->valid_paths.next, &s->valid_paths);
@@ -272,11 +290,12 @@ static struct dm_path *st_select_path(struct path_selector *ps,
best = pi;
if (!best)
- return NULL;
-
- *repeat_count = best->repeat_count;
+ goto out;
- return best->path;
+ ret = best->path;
+out:
+ spin_unlock_irqrestore(&s->lock, flags);
+ return ret;
}
static int st_start_io(struct path_selector *ps, struct dm_path *path,
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index 3766386080a4..70bb0e8b62ce 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -1105,6 +1105,7 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
int i;
int r = -EINVAL;
char *origin_path, *cow_path;
+ dev_t origin_dev, cow_dev;
unsigned args_used, num_flush_bios = 1;
fmode_t origin_mode = FMODE_READ;
@@ -1135,11 +1136,19 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
ti->error = "Cannot get origin device";
goto bad_origin;
}
+ origin_dev = s->origin->bdev->bd_dev;
cow_path = argv[0];
argv++;
argc--;
+ cow_dev = dm_get_dev_t(cow_path);
+ if (cow_dev && cow_dev == origin_dev) {
+ ti->error = "COW device cannot be the same as origin device";
+ r = -EINVAL;
+ goto bad_cow;
+ }
+
r = dm_get_device(ti, cow_path, dm_table_get_mode(ti->table), &s->cow);
if (r) {
ti->error = "Cannot get COW device";
@@ -1201,7 +1210,7 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
ti->private = s;
ti->num_flush_bios = num_flush_bios;
- ti->per_bio_data_size = sizeof(struct dm_snap_tracked_chunk);
+ ti->per_io_data_size = sizeof(struct dm_snap_tracked_chunk);
/* Add snapshot to the list of snapshots for this origin */
/* Exceptions aren't triggered till snapshot_resume() is called */
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 061152a43730..f9e8f0bef332 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -365,6 +365,26 @@ static int upgrade_mode(struct dm_dev_internal *dd, fmode_t new_mode,
}
/*
+ * Convert the path to a device
+ */
+dev_t dm_get_dev_t(const char *path)
+{
+ dev_t uninitialized_var(dev);
+ struct block_device *bdev;
+
+ bdev = lookup_bdev(path);
+ if (IS_ERR(bdev))
+ dev = name_to_dev_t(path);
+ else {
+ dev = bdev->bd_dev;
+ bdput(bdev);
+ }
+
+ return dev;
+}
+EXPORT_SYMBOL_GPL(dm_get_dev_t);
+
+/*
* Add a device to the list, or just increment the usage count if
* it's already present.
*/
@@ -372,23 +392,15 @@ int dm_get_device(struct dm_target *ti, const char *path, fmode_t mode,
struct dm_dev **result)
{
int r;
- dev_t uninitialized_var(dev);
+ dev_t dev;
struct dm_dev_internal *dd;
struct dm_table *t = ti->table;
- struct block_device *bdev;
BUG_ON(!t);
- /* convert the path to a device */
- bdev = lookup_bdev(path);
- if (IS_ERR(bdev)) {
- dev = name_to_dev_t(path);
- if (!dev)
- return -ENODEV;
- } else {
- dev = bdev->bd_dev;
- bdput(bdev);
- }
+ dev = dm_get_dev_t(path);
+ if (!dev)
+ return -ENODEV;
dd = find_device(&t->devices, dev);
if (!dd) {
@@ -920,6 +932,30 @@ struct target_type *dm_table_get_immutable_target_type(struct dm_table *t)
return t->immutable_target_type;
}
+struct dm_target *dm_table_get_immutable_target(struct dm_table *t)
+{
+ /* Immutable target is implicitly a singleton */
+ if (t->num_targets > 1 ||
+ !dm_target_is_immutable(t->targets[0].type))
+ return NULL;
+
+ return t->targets;
+}
+
+struct dm_target *dm_table_get_wildcard_target(struct dm_table *t)
+{
+ struct dm_target *uninitialized_var(ti);
+ unsigned i = 0;
+
+ while (i < dm_table_get_num_targets(t)) {
+ ti = dm_table_get_target(t, i++);
+ if (dm_target_is_wildcard(ti->type))
+ return ti;
+ }
+
+ return NULL;
+}
+
bool dm_table_request_based(struct dm_table *t)
{
return __table_type_request_based(dm_table_get_type(t));
@@ -933,7 +969,7 @@ bool dm_table_mq_request_based(struct dm_table *t)
static int dm_table_alloc_md_mempools(struct dm_table *t, struct mapped_device *md)
{
unsigned type = dm_table_get_type(t);
- unsigned per_bio_data_size = 0;
+ unsigned per_io_data_size = 0;
struct dm_target *tgt;
unsigned i;
@@ -945,10 +981,10 @@ static int dm_table_alloc_md_mempools(struct dm_table *t, struct mapped_device *
if (type == DM_TYPE_BIO_BASED)
for (i = 0; i < t->num_targets; i++) {
tgt = t->targets + i;
- per_bio_data_size = max(per_bio_data_size, tgt->per_bio_data_size);
+ per_io_data_size = max(per_io_data_size, tgt->per_io_data_size);
}
- t->mempools = dm_alloc_md_mempools(md, type, t->integrity_supported, per_bio_data_size);
+ t->mempools = dm_alloc_md_mempools(md, type, t->integrity_supported, per_io_data_size);
if (!t->mempools)
return -ENOMEM;
diff --git a/drivers/md/dm-target.c b/drivers/md/dm-target.c
index 925ec1b15e75..a317dd884ba6 100644
--- a/drivers/md/dm-target.c
+++ b/drivers/md/dm-target.c
@@ -150,7 +150,8 @@ static void io_err_release_clone_rq(struct request *clone)
static struct target_type error_target = {
.name = "error",
- .version = {1, 3, 0},
+ .version = {1, 4, 0},
+ .features = DM_TARGET_WILDCARD,
.ctr = io_err_ctr,
.dtr = io_err_dtr,
.map = io_err_map,
diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
index f962d6453afd..43824d73366d 100644
--- a/drivers/md/dm-thin-metadata.c
+++ b/drivers/md/dm-thin-metadata.c
@@ -344,7 +344,7 @@ static void subtree_dec(void *context, const void *value)
memcpy(&root_le, value, sizeof(root_le));
root = le64_to_cpu(root_le);
if (dm_btree_del(info, root))
- DMERR("btree delete failed\n");
+ DMERR("btree delete failed");
}
static int subtree_equal(void *context, const void *value1_le, const void *value2_le)
@@ -1981,5 +1981,8 @@ bool dm_pool_metadata_needs_check(struct dm_pool_metadata *pmd)
void dm_pool_issue_prefetches(struct dm_pool_metadata *pmd)
{
- dm_tm_issue_prefetches(pmd->tm);
+ down_read(&pmd->root_lock);
+ if (!pmd->fail_io)
+ dm_tm_issue_prefetches(pmd->tm);
+ up_read(&pmd->root_lock);
}
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index 72d91f477683..92237b6fa8cd 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -235,6 +235,7 @@ struct pool {
struct pool_features pf;
bool low_water_triggered:1; /* A dm event has been sent */
bool suspended:1;
+ bool out_of_data_space:1;
struct dm_bio_prison *prison;
struct dm_kcopyd_client *copier;
@@ -461,9 +462,16 @@ static void cell_error_with_code(struct pool *pool,
dm_bio_prison_free_cell(pool->prison, cell);
}
+static int get_pool_io_error_code(struct pool *pool)
+{
+ return pool->out_of_data_space ? -ENOSPC : -EIO;
+}
+
static void cell_error(struct pool *pool, struct dm_bio_prison_cell *cell)
{
- cell_error_with_code(pool, cell, -EIO);
+ int error = get_pool_io_error_code(pool);
+
+ cell_error_with_code(pool, cell, error);
}
static void cell_success(struct pool *pool, struct dm_bio_prison_cell *cell)
@@ -622,7 +630,9 @@ static void error_retry_list_with_code(struct pool *pool, int error)
static void error_retry_list(struct pool *pool)
{
- return error_retry_list_with_code(pool, -EIO);
+ int error = get_pool_io_error_code(pool);
+
+ return error_retry_list_with_code(pool, error);
}
/*
@@ -2419,6 +2429,7 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode)
*/
if (old_mode != new_mode)
notify_of_pool_mode_change_to_oods(pool);
+ pool->out_of_data_space = true;
pool->process_bio = process_bio_read_only;
pool->process_discard = process_discard_bio;
pool->process_cell = process_cell_read_only;
@@ -2432,6 +2443,7 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode)
case PM_WRITE:
if (old_mode != new_mode)
notify_of_pool_mode_change(pool, "write");
+ pool->out_of_data_space = false;
pool->pf.error_if_no_space = pt->requested_pf.error_if_no_space;
dm_pool_metadata_read_write(pool->pmd);
pool->process_bio = process_bio;
@@ -2832,6 +2844,7 @@ static struct pool *pool_create(struct mapped_device *pool_md,
INIT_LIST_HEAD(&pool->active_thins);
pool->low_water_triggered = false;
pool->suspended = true;
+ pool->out_of_data_space = false;
pool->shared_read_ds = dm_deferred_set_create();
if (!pool->shared_read_ds) {
@@ -3886,7 +3899,7 @@ static struct target_type pool_target = {
.name = "thin-pool",
.features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE |
DM_TARGET_IMMUTABLE,
- .version = {1, 17, 0},
+ .version = {1, 18, 0},
.module = THIS_MODULE,
.ctr = pool_ctr,
.dtr = pool_dtr,
@@ -4037,7 +4050,7 @@ static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)
ti->num_flush_bios = 1;
ti->flush_supported = true;
- ti->per_bio_data_size = sizeof(struct dm_thin_endio_hook);
+ ti->per_io_data_size = sizeof(struct dm_thin_endio_hook);
/* In case the pool supports discards, pass them on. */
ti->discard_zeroes_data_unsupported = true;
@@ -4260,7 +4273,7 @@ static void thin_io_hints(struct dm_target *ti, struct queue_limits *limits)
static struct target_type thin_target = {
.name = "thin",
- .version = {1, 17, 0},
+ .version = {1, 18, 0},
.module = THIS_MODULE,
.ctr = thin_ctr,
.dtr = thin_dtr,
diff --git a/drivers/md/dm-verity-fec.c b/drivers/md/dm-verity-fec.c
index 1cc10c4de701..459a9f8905ed 100644
--- a/drivers/md/dm-verity-fec.c
+++ b/drivers/md/dm-verity-fec.c
@@ -812,7 +812,7 @@ int verity_fec_ctr(struct dm_verity *v)
}
/* Reserve space for our per-bio data */
- ti->per_bio_data_size += sizeof(struct dm_verity_fec_io);
+ ti->per_io_data_size += sizeof(struct dm_verity_fec_io);
return 0;
}
diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c
index 5c5d30cb6ec5..0aba34a7b3b3 100644
--- a/drivers/md/dm-verity-target.c
+++ b/drivers/md/dm-verity-target.c
@@ -354,7 +354,7 @@ int verity_for_bv_block(struct dm_verity *v, struct dm_verity_io *io,
size_t len))
{
unsigned todo = 1 << v->data_dev_block_bits;
- struct bio *bio = dm_bio_from_per_bio_data(io, v->ti->per_bio_data_size);
+ struct bio *bio = dm_bio_from_per_bio_data(io, v->ti->per_io_data_size);
do {
int r;
@@ -460,7 +460,7 @@ static int verity_verify_io(struct dm_verity_io *io)
static void verity_finish_io(struct dm_verity_io *io, int error)
{
struct dm_verity *v = io->v;
- struct bio *bio = dm_bio_from_per_bio_data(io, v->ti->per_bio_data_size);
+ struct bio *bio = dm_bio_from_per_bio_data(io, v->ti->per_io_data_size);
bio->bi_end_io = io->orig_bi_end_io;
bio->bi_error = error;
@@ -574,7 +574,7 @@ static int verity_map(struct dm_target *ti, struct bio *bio)
if (bio_data_dir(bio) == WRITE)
return -EIO;
- io = dm_per_bio_data(bio, ti->per_bio_data_size);
+ io = dm_per_bio_data(bio, ti->per_io_data_size);
io->v = v;
io->orig_bi_end_io = bio->bi_end_io;
io->block = bio->bi_iter.bi_sector >> (v->data_dev_block_bits - SECTOR_SHIFT);
@@ -1036,15 +1036,15 @@ static int verity_ctr(struct dm_target *ti, unsigned argc, char **argv)
goto bad;
}
- ti->per_bio_data_size = sizeof(struct dm_verity_io) +
+ ti->per_io_data_size = sizeof(struct dm_verity_io) +
v->shash_descsize + v->digest_size * 2;
r = verity_fec_ctr(v);
if (r)
goto bad;
- ti->per_bio_data_size = roundup(ti->per_bio_data_size,
- __alignof__(struct dm_verity_io));
+ ti->per_io_data_size = roundup(ti->per_io_data_size,
+ __alignof__(struct dm_verity_io));
return 0;
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index dd834927bc66..3d3ac13287a4 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -106,14 +106,6 @@ struct dm_rq_clone_bio_info {
struct bio clone;
};
-union map_info *dm_get_rq_mapinfo(struct request *rq)
-{
- if (rq && rq->end_io_data)
- return &((struct dm_rq_target_io *)rq->end_io_data)->info;
- return NULL;
-}
-EXPORT_SYMBOL_GPL(dm_get_rq_mapinfo);
-
#define MINOR_ALLOCED ((void *)-1)
/*
@@ -129,28 +121,18 @@ EXPORT_SYMBOL_GPL(dm_get_rq_mapinfo);
#define DMF_SUSPENDED_INTERNALLY 7
/*
- * A dummy definition to make RCU happy.
- * struct dm_table should never be dereferenced in this file.
- */
-struct dm_table {
- int undefined__;
-};
-
-/*
* Work processed by per-device workqueue.
*/
struct mapped_device {
struct srcu_struct io_barrier;
struct mutex suspend_lock;
- atomic_t holders;
- atomic_t open_count;
/*
- * The current mapping.
+ * The current mapping (struct dm_table *).
* Use dm_get_live_table{_fast} or take suspend_lock for
* dereference.
*/
- struct dm_table __rcu *map;
+ void __rcu *map;
struct list_head table_devices;
struct mutex table_devices_lock;
@@ -158,10 +140,16 @@ struct mapped_device {
unsigned long flags;
struct request_queue *queue;
+ int numa_node_id;
+
unsigned type;
/* Protect queue and type against concurrent access. */
struct mutex type_lock;
+ atomic_t holders;
+ atomic_t open_count;
+
+ struct dm_target *immutable_target;
struct target_type *immutable_target_type;
struct gendisk *disk;
@@ -175,8 +163,20 @@ struct mapped_device {
atomic_t pending[2];
wait_queue_head_t wait;
struct work_struct work;
- struct bio_list deferred;
spinlock_t deferred_lock;
+ struct bio_list deferred;
+
+ /*
+ * Event handling.
+ */
+ wait_queue_head_t eventq;
+ atomic_t event_nr;
+ atomic_t uevent_seq;
+ struct list_head uevent_list;
+ spinlock_t uevent_lock; /* Protect access to uevent_list */
+
+ /* the number of internal suspends */
+ unsigned internal_suspend_count;
/*
* Processing queue (flush)
@@ -192,32 +192,21 @@ struct mapped_device {
struct bio_set *bs;
/*
- * Event handling.
- */
- atomic_t event_nr;
- wait_queue_head_t eventq;
- atomic_t uevent_seq;
- struct list_head uevent_list;
- spinlock_t uevent_lock; /* Protect access to uevent_list */
-
- /*
* freeze/thaw support require holding onto a super block
*/
struct super_block *frozen_sb;
- struct block_device *bdev;
/* forced geometry settings */
struct hd_geometry geometry;
+ struct block_device *bdev;
+
/* kobject and completion */
struct dm_kobject_holder kobj_holder;
/* zero-length flush that will be cloned and submitted to targets */
struct bio flush_bio;
- /* the number of internal suspends */
- unsigned internal_suspend_count;
-
struct dm_stats stats;
struct kthread_worker kworker;
@@ -230,8 +219,9 @@ struct mapped_device {
ktime_t last_rq_start_time;
/* for blk-mq request-based DM support */
- struct blk_mq_tag_set tag_set;
- bool use_blk_mq;
+ struct blk_mq_tag_set *tag_set;
+ bool use_blk_mq:1;
+ bool init_tio_pdu:1;
};
#ifdef CONFIG_DM_MQ_DEFAULT
@@ -240,10 +230,19 @@ static bool use_blk_mq = true;
static bool use_blk_mq = false;
#endif
+#define DM_MQ_NR_HW_QUEUES 1
+#define DM_MQ_QUEUE_DEPTH 2048
+#define DM_NUMA_NODE NUMA_NO_NODE
+
+static unsigned dm_mq_nr_hw_queues = DM_MQ_NR_HW_QUEUES;
+static unsigned dm_mq_queue_depth = DM_MQ_QUEUE_DEPTH;
+static int dm_numa_node = DM_NUMA_NODE;
+
bool dm_use_blk_mq(struct mapped_device *md)
{
return md->use_blk_mq;
}
+EXPORT_SYMBOL_GPL(dm_use_blk_mq);
/*
* For mempools pre-allocation at the table loading time.
@@ -277,6 +276,27 @@ static unsigned reserved_bio_based_ios = RESERVED_BIO_BASED_IOS;
*/
static unsigned reserved_rq_based_ios = RESERVED_REQUEST_BASED_IOS;
+static int __dm_get_module_param_int(int *module_param, int min, int max)
+{
+ int param = ACCESS_ONCE(*module_param);
+ int modified_param = 0;
+ bool modified = true;
+
+ if (param < min)
+ modified_param = min;
+ else if (param > max)
+ modified_param = max;
+ else
+ modified = false;
+
+ if (modified) {
+ (void)cmpxchg(module_param, param, modified_param);
+ param = modified_param;
+ }
+
+ return param;
+}
+
static unsigned __dm_get_module_param(unsigned *module_param,
unsigned def, unsigned max)
{
@@ -310,6 +330,23 @@ unsigned dm_get_reserved_rq_based_ios(void)
}
EXPORT_SYMBOL_GPL(dm_get_reserved_rq_based_ios);
+static unsigned dm_get_blk_mq_nr_hw_queues(void)
+{
+ return __dm_get_module_param(&dm_mq_nr_hw_queues, 1, 32);
+}
+
+static unsigned dm_get_blk_mq_queue_depth(void)
+{
+ return __dm_get_module_param(&dm_mq_queue_depth,
+ DM_MQ_QUEUE_DEPTH, BLK_MQ_MAX_DEPTH);
+}
+
+static unsigned dm_get_numa_node(void)
+{
+ return __dm_get_module_param_int(&dm_numa_node,
+ DM_NUMA_NODE, num_online_nodes() - 1);
+}
+
static int __init local_init(void)
{
int r = -ENOMEM;
@@ -323,7 +360,7 @@ static int __init local_init(void)
if (!_rq_tio_cache)
goto out_free_io_cache;
- _rq_cache = kmem_cache_create("dm_clone_request", sizeof(struct request),
+ _rq_cache = kmem_cache_create("dm_old_clone_request", sizeof(struct request),
__alignof__(struct request), 0, NULL);
if (!_rq_cache)
goto out_free_rq_tio_cache;
@@ -556,16 +593,17 @@ static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
return dm_get_geometry(md, geo);
}
-static int dm_get_live_table_for_ioctl(struct mapped_device *md,
- struct dm_target **tgt, struct block_device **bdev,
- fmode_t *mode, int *srcu_idx)
+static int dm_grab_bdev_for_ioctl(struct mapped_device *md,
+ struct block_device **bdev,
+ fmode_t *mode)
{
+ struct dm_target *tgt;
struct dm_table *map;
- int r;
+ int srcu_idx, r;
retry:
r = -ENOTTY;
- map = dm_get_live_table(md, srcu_idx);
+ map = dm_get_live_table(md, &srcu_idx);
if (!map || !dm_table_get_size(map))
goto out;
@@ -573,9 +611,8 @@ retry:
if (dm_table_get_num_targets(map) != 1)
goto out;
- *tgt = dm_table_get_target(map, 0);
-
- if (!(*tgt)->type->prepare_ioctl)
+ tgt = dm_table_get_target(map, 0);
+ if (!tgt->type->prepare_ioctl)
goto out;
if (dm_suspended_md(md)) {
@@ -583,14 +620,16 @@ retry:
goto out;
}
- r = (*tgt)->type->prepare_ioctl(*tgt, bdev, mode);
+ r = tgt->type->prepare_ioctl(tgt, bdev, mode);
if (r < 0)
goto out;
+ bdgrab(*bdev);
+ dm_put_live_table(md, srcu_idx);
return r;
out:
- dm_put_live_table(md, *srcu_idx);
+ dm_put_live_table(md, srcu_idx);
if (r == -ENOTCONN && !fatal_signal_pending(current)) {
msleep(10);
goto retry;
@@ -602,11 +641,9 @@ static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode,
unsigned int cmd, unsigned long arg)
{
struct mapped_device *md = bdev->bd_disk->private_data;
- struct dm_target *tgt;
- struct block_device *tgt_bdev = NULL;
- int srcu_idx, r;
+ int r;
- r = dm_get_live_table_for_ioctl(md, &tgt, &tgt_bdev, &mode, &srcu_idx);
+ r = dm_grab_bdev_for_ioctl(md, &bdev, &mode);
if (r < 0)
return r;
@@ -621,9 +658,9 @@ static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode,
goto out;
}
- r = __blkdev_driver_ioctl(tgt_bdev, mode, cmd, arg);
+ r = __blkdev_driver_ioctl(bdev, mode, cmd, arg);
out:
- dm_put_live_table(md, srcu_idx);
+ bdput(bdev);
return r;
}
@@ -642,24 +679,24 @@ static void free_tio(struct mapped_device *md, struct dm_target_io *tio)
bio_put(&tio->clone);
}
-static struct dm_rq_target_io *alloc_rq_tio(struct mapped_device *md,
- gfp_t gfp_mask)
+static struct dm_rq_target_io *alloc_old_rq_tio(struct mapped_device *md,
+ gfp_t gfp_mask)
{
return mempool_alloc(md->io_pool, gfp_mask);
}
-static void free_rq_tio(struct dm_rq_target_io *tio)
+static void free_old_rq_tio(struct dm_rq_target_io *tio)
{
mempool_free(tio, tio->md->io_pool);
}
-static struct request *alloc_clone_request(struct mapped_device *md,
- gfp_t gfp_mask)
+static struct request *alloc_old_clone_request(struct mapped_device *md,
+ gfp_t gfp_mask)
{
return mempool_alloc(md->rq_pool, gfp_mask);
}
-static void free_clone_request(struct mapped_device *md, struct request *rq)
+static void free_old_clone_request(struct mapped_device *md, struct request *rq)
{
mempool_free(rq, md->rq_pool);
}
@@ -827,7 +864,7 @@ int dm_get_table_device(struct mapped_device *md, dev_t dev, fmode_t mode,
mutex_lock(&md->table_devices_lock);
td = find_table_device(&md->table_devices, dev, mode);
if (!td) {
- td = kmalloc(sizeof(*td), GFP_KERNEL);
+ td = kmalloc_node(sizeof(*td), GFP_KERNEL, md->numa_node_id);
if (!td) {
mutex_unlock(&md->table_devices_lock);
return -ENOMEM;
@@ -1109,12 +1146,8 @@ static void rq_completed(struct mapped_device *md, int rw, bool run_queue)
* back into ->request_fn() could deadlock attempting to grab the
* queue lock again.
*/
- if (run_queue) {
- if (md->queue->mq_ops)
- blk_mq_run_hw_queues(md->queue, true);
- else
- blk_run_queue_async(md->queue);
- }
+ if (!md->queue->mq_ops && run_queue)
+ blk_run_queue_async(md->queue);
/*
* dm_put() must be at the end of this function. See the comment above
@@ -1134,15 +1167,10 @@ static void free_rq_clone(struct request *clone)
tio->ti->type->release_clone_rq(clone);
else if (!md->queue->mq_ops)
/* request_fn queue stacked on request_fn queue(s) */
- free_clone_request(md, clone);
- /*
- * NOTE: for the blk-mq queue stacked on request_fn queue(s) case:
- * no need to call free_clone_request() because we leverage blk-mq by
- * allocating the clone at the end of the blk-mq pdu (see: clone_rq)
- */
+ free_old_clone_request(md, clone);
if (!md->queue->mq_ops)
- free_rq_tio(tio);
+ free_old_rq_tio(tio);
}
/*
@@ -1192,13 +1220,13 @@ static void dm_unprep_request(struct request *rq)
if (clone)
free_rq_clone(clone);
else if (!tio->md->queue->mq_ops)
- free_rq_tio(tio);
+ free_old_rq_tio(tio);
}
/*
* Requeue the original request of a clone.
*/
-static void old_requeue_request(struct request *rq)
+static void dm_old_requeue_request(struct request *rq)
{
struct request_queue *q = rq->q;
unsigned long flags;
@@ -1209,45 +1237,57 @@ static void old_requeue_request(struct request *rq)
spin_unlock_irqrestore(q->queue_lock, flags);
}
+static void dm_mq_requeue_request(struct request *rq)
+{
+ struct request_queue *q = rq->q;
+ unsigned long flags;
+
+ blk_mq_requeue_request(rq);
+ spin_lock_irqsave(q->queue_lock, flags);
+ if (!blk_queue_stopped(q))
+ blk_mq_kick_requeue_list(q);
+ spin_unlock_irqrestore(q->queue_lock, flags);
+}
+
static void dm_requeue_original_request(struct mapped_device *md,
struct request *rq)
{
int rw = rq_data_dir(rq);
+ rq_end_stats(md, rq);
dm_unprep_request(rq);
- rq_end_stats(md, rq);
if (!rq->q->mq_ops)
- old_requeue_request(rq);
- else {
- blk_mq_requeue_request(rq);
- blk_mq_kick_requeue_list(rq->q);
- }
+ dm_old_requeue_request(rq);
+ else
+ dm_mq_requeue_request(rq);
rq_completed(md, rw, false);
}
-static void old_stop_queue(struct request_queue *q)
+static void dm_old_stop_queue(struct request_queue *q)
{
unsigned long flags;
- if (blk_queue_stopped(q))
+ spin_lock_irqsave(q->queue_lock, flags);
+ if (blk_queue_stopped(q)) {
+ spin_unlock_irqrestore(q->queue_lock, flags);
return;
+ }
- spin_lock_irqsave(q->queue_lock, flags);
blk_stop_queue(q);
spin_unlock_irqrestore(q->queue_lock, flags);
}
-static void stop_queue(struct request_queue *q)
+static void dm_stop_queue(struct request_queue *q)
{
if (!q->mq_ops)
- old_stop_queue(q);
+ dm_old_stop_queue(q);
else
blk_mq_stop_hw_queues(q);
}
-static void old_start_queue(struct request_queue *q)
+static void dm_old_start_queue(struct request_queue *q)
{
unsigned long flags;
@@ -1257,12 +1297,14 @@ static void old_start_queue(struct request_queue *q)
spin_unlock_irqrestore(q->queue_lock, flags);
}
-static void start_queue(struct request_queue *q)
+static void dm_start_queue(struct request_queue *q)
{
if (!q->mq_ops)
- old_start_queue(q);
- else
+ dm_old_start_queue(q);
+ else {
blk_mq_start_stopped_hw_queues(q, true);
+ blk_mq_kick_requeue_list(q);
+ }
}
static void dm_done(struct request *clone, int error, bool mapped)
@@ -1313,7 +1355,7 @@ static void dm_softirq_done(struct request *rq)
if (!rq->q->mq_ops) {
blk_end_request_all(rq, tio->error);
rq_completed(tio->md, rw, false);
- free_rq_tio(tio);
+ free_old_rq_tio(tio);
} else {
blk_mq_end_request(rq, tio->error);
rq_completed(tio->md, rw, false);
@@ -1336,7 +1378,10 @@ static void dm_complete_request(struct request *rq, int error)
struct dm_rq_target_io *tio = tio_from_request(rq);
tio->error = error;
- blk_complete_request(rq);
+ if (!rq->q->mq_ops)
+ blk_complete_request(rq);
+ else
+ blk_mq_complete_request(rq, error);
}
/*
@@ -1352,7 +1397,7 @@ static void dm_kill_unmapped_request(struct request *rq, int error)
}
/*
- * Called with the clone's queue lock held (for non-blk-mq)
+ * Called with the clone's queue lock held (in the case of .request_fn)
*/
static void end_clone_request(struct request *clone, int error)
{
@@ -1522,21 +1567,26 @@ static void bio_setup_sector(struct bio *bio, sector_t sector, unsigned len)
/*
* Creates a bio that consists of range of complete bvecs.
*/
-static void clone_bio(struct dm_target_io *tio, struct bio *bio,
- sector_t sector, unsigned len)
+static int clone_bio(struct dm_target_io *tio, struct bio *bio,
+ sector_t sector, unsigned len)
{
struct bio *clone = &tio->clone;
__bio_clone_fast(clone, bio);
- if (bio_integrity(bio))
- bio_integrity_clone(clone, bio, GFP_NOIO);
+ if (bio_integrity(bio)) {
+ int r = bio_integrity_clone(clone, bio, GFP_NOIO);
+ if (r < 0)
+ return r;
+ }
bio_advance(clone, to_bytes(sector - clone->bi_iter.bi_sector));
clone->bi_iter.bi_size = to_bytes(len);
if (bio_integrity(bio))
bio_integrity_trim(clone, 0, len);
+
+ return 0;
}
static struct dm_target_io *alloc_tio(struct clone_info *ci,
@@ -1593,13 +1643,14 @@ static int __send_empty_flush(struct clone_info *ci)
return 0;
}
-static void __clone_and_map_data_bio(struct clone_info *ci, struct dm_target *ti,
+static int __clone_and_map_data_bio(struct clone_info *ci, struct dm_target *ti,
sector_t sector, unsigned *len)
{
struct bio *bio = ci->bio;
struct dm_target_io *tio;
unsigned target_bio_nr;
unsigned num_target_bios = 1;
+ int r = 0;
/*
* Does the target want to receive duplicate copies of the bio?
@@ -1610,9 +1661,15 @@ static void __clone_and_map_data_bio(struct clone_info *ci, struct dm_target *ti
for (target_bio_nr = 0; target_bio_nr < num_target_bios; target_bio_nr++) {
tio = alloc_tio(ci, ti, target_bio_nr);
tio->len_ptr = len;
- clone_bio(tio, bio, sector, *len);
+ r = clone_bio(tio, bio, sector, *len);
+ if (r < 0) {
+ free_tio(ci->md, tio);
+ break;
+ }
__map_bio(tio);
}
+
+ return r;
}
typedef unsigned (*get_num_bios_fn)(struct dm_target *ti);
@@ -1689,6 +1746,7 @@ static int __split_and_process_non_flush(struct clone_info *ci)
struct bio *bio = ci->bio;
struct dm_target *ti;
unsigned len;
+ int r;
if (unlikely(bio->bi_rw & REQ_DISCARD))
return __send_discard(ci);
@@ -1701,7 +1759,9 @@ static int __split_and_process_non_flush(struct clone_info *ci)
len = min_t(sector_t, max_io_len(ci->sector, ti), ci->sector_count);
- __clone_and_map_data_bio(ci, ti, ci->sector, &len);
+ r = __clone_and_map_data_bio(ci, ti, ci->sector, &len);
+ if (r < 0)
+ return r;
ci->sector += len;
ci->sector_count -= len;
@@ -1839,28 +1899,22 @@ static int setup_clone(struct request *clone, struct request *rq,
return 0;
}
-static struct request *clone_rq(struct request *rq, struct mapped_device *md,
- struct dm_rq_target_io *tio, gfp_t gfp_mask)
+static struct request *clone_old_rq(struct request *rq, struct mapped_device *md,
+ struct dm_rq_target_io *tio, gfp_t gfp_mask)
{
/*
- * Do not allocate a clone if tio->clone was already set
- * (see: dm_mq_queue_rq).
+ * Create clone for use with .request_fn request_queue
*/
- bool alloc_clone = !tio->clone;
struct request *clone;
- if (alloc_clone) {
- clone = alloc_clone_request(md, gfp_mask);
- if (!clone)
- return NULL;
- } else
- clone = tio->clone;
+ clone = alloc_old_clone_request(md, gfp_mask);
+ if (!clone)
+ return NULL;
blk_rq_init(NULL, clone);
if (setup_clone(clone, rq, tio, gfp_mask)) {
/* -ENOMEM */
- if (alloc_clone)
- free_clone_request(md, clone);
+ free_old_clone_request(md, clone);
return NULL;
}
@@ -1877,29 +1931,40 @@ static void init_tio(struct dm_rq_target_io *tio, struct request *rq,
tio->clone = NULL;
tio->orig = rq;
tio->error = 0;
- memset(&tio->info, 0, sizeof(tio->info));
+ /*
+ * Avoid initializing info for blk-mq; it passes
+ * target-specific data through info.ptr
+ * (see: dm_mq_init_request)
+ */
+ if (!md->init_tio_pdu)
+ memset(&tio->info, 0, sizeof(tio->info));
if (md->kworker_task)
init_kthread_work(&tio->work, map_tio_request);
}
-static struct dm_rq_target_io *prep_tio(struct request *rq,
- struct mapped_device *md, gfp_t gfp_mask)
+static struct dm_rq_target_io *dm_old_prep_tio(struct request *rq,
+ struct mapped_device *md,
+ gfp_t gfp_mask)
{
struct dm_rq_target_io *tio;
int srcu_idx;
struct dm_table *table;
- tio = alloc_rq_tio(md, gfp_mask);
+ tio = alloc_old_rq_tio(md, gfp_mask);
if (!tio)
return NULL;
init_tio(tio, rq, md);
table = dm_get_live_table(md, &srcu_idx);
+ /*
+ * Must clone a request if this .request_fn DM device
+ * is stacked on .request_fn device(s).
+ */
if (!dm_table_mq_request_based(table)) {
- if (!clone_rq(rq, md, tio, gfp_mask)) {
+ if (!clone_old_rq(rq, md, tio, gfp_mask)) {
dm_put_live_table(md, srcu_idx);
- free_rq_tio(tio);
+ free_old_rq_tio(tio);
return NULL;
}
}
@@ -1911,7 +1976,7 @@ static struct dm_rq_target_io *prep_tio(struct request *rq,
/*
* Called with the queue lock held.
*/
-static int dm_prep_fn(struct request_queue *q, struct request *rq)
+static int dm_old_prep_fn(struct request_queue *q, struct request *rq)
{
struct mapped_device *md = q->queuedata;
struct dm_rq_target_io *tio;
@@ -1921,7 +1986,7 @@ static int dm_prep_fn(struct request_queue *q, struct request *rq)
return BLKPREP_KILL;
}
- tio = prep_tio(rq, md, GFP_ATOMIC);
+ tio = dm_old_prep_tio(rq, md, GFP_ATOMIC);
if (!tio)
return BLKPREP_DEFER;
@@ -2079,12 +2144,18 @@ static bool dm_request_peeked_before_merge_deadline(struct mapped_device *md)
static void dm_request_fn(struct request_queue *q)
{
struct mapped_device *md = q->queuedata;
- int srcu_idx;
- struct dm_table *map = dm_get_live_table(md, &srcu_idx);
- struct dm_target *ti;
+ struct dm_target *ti = md->immutable_target;
struct request *rq;
struct dm_rq_target_io *tio;
- sector_t pos;
+ sector_t pos = 0;
+
+ if (unlikely(!ti)) {
+ int srcu_idx;
+ struct dm_table *map = dm_get_live_table(md, &srcu_idx);
+
+ ti = dm_table_find_target(map, pos);
+ dm_put_live_table(md, srcu_idx);
+ }
/*
* For suspend, check blk_queue_stopped() and increment
@@ -2095,33 +2166,21 @@ static void dm_request_fn(struct request_queue *q)
while (!blk_queue_stopped(q)) {
rq = blk_peek_request(q);
if (!rq)
- goto out;
+ return;
/* always use block 0 to find the target for flushes for now */
pos = 0;
if (!(rq->cmd_flags & REQ_FLUSH))
pos = blk_rq_pos(rq);
- ti = dm_table_find_target(map, pos);
- if (!dm_target_is_valid(ti)) {
- /*
- * Must perform setup, that rq_completed() requires,
- * before calling dm_kill_unmapped_request
- */
- DMERR_LIMIT("request attempted access beyond the end of device");
- dm_start_request(md, rq);
- dm_kill_unmapped_request(rq, -EIO);
- continue;
+ if ((dm_request_peeked_before_merge_deadline(md) &&
+ md_in_flight(md) && rq->bio && rq->bio->bi_vcnt == 1 &&
+ md->last_rq_pos == pos && md->last_rq_rw == rq_data_dir(rq)) ||
+ (ti->type->busy && ti->type->busy(ti))) {
+ blk_delay_queue(q, HZ / 100);
+ return;
}
- if (dm_request_peeked_before_merge_deadline(md) &&
- md_in_flight(md) && rq->bio && rq->bio->bi_vcnt == 1 &&
- md->last_rq_pos == pos && md->last_rq_rw == rq_data_dir(rq))
- goto delay_and_out;
-
- if (ti->type->busy && ti->type->busy(ti))
- goto delay_and_out;
-
dm_start_request(md, rq);
tio = tio_from_request(rq);
@@ -2130,13 +2189,6 @@ static void dm_request_fn(struct request_queue *q)
queue_kthread_work(&md->kworker, &tio->work);
BUG_ON(!irqs_disabled());
}
-
- goto out;
-
-delay_and_out:
- blk_delay_queue(q, HZ / 100);
-out:
- dm_put_live_table(md, srcu_idx);
}
static int dm_any_congested(void *congested_data, int bdi_bits)
@@ -2146,19 +2198,18 @@ static int dm_any_congested(void *congested_data, int bdi_bits)
struct dm_table *map;
if (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) {
- map = dm_get_live_table_fast(md);
- if (map) {
+ if (dm_request_based(md)) {
/*
- * Request-based dm cares about only own queue for
- * the query about congestion status of request_queue
+ * With request-based DM we only need to check the
+ * top-level queue for congestion.
*/
- if (dm_request_based(md))
- r = md->queue->backing_dev_info.wb.state &
- bdi_bits;
- else
+ r = md->queue->backing_dev_info.wb.state & bdi_bits;
+ } else {
+ map = dm_get_live_table_fast(md);
+ if (map)
r = dm_table_any_congested(map, bdi_bits);
+ dm_put_live_table_fast(md);
}
- dm_put_live_table_fast(md);
}
return r;
@@ -2238,7 +2289,7 @@ static void dm_init_md_queue(struct mapped_device *md)
md->queue->backing_dev_info.congested_data = md;
}
-static void dm_init_old_md_queue(struct mapped_device *md)
+static void dm_init_normal_md_queue(struct mapped_device *md)
{
md->use_blk_mq = false;
dm_init_md_queue(md);
@@ -2285,10 +2336,11 @@ static void cleanup_mapped_device(struct mapped_device *md)
*/
static struct mapped_device *alloc_dev(int minor)
{
- int r;
- struct mapped_device *md = kzalloc(sizeof(*md), GFP_KERNEL);
+ int r, numa_node_id = dm_get_numa_node();
+ struct mapped_device *md;
void *old_md;
+ md = kzalloc_node(sizeof(*md), GFP_KERNEL, numa_node_id);
if (!md) {
DMWARN("unable to allocate device, out of memory.");
return NULL;
@@ -2309,7 +2361,9 @@ static struct mapped_device *alloc_dev(int minor)
if (r < 0)
goto bad_io_barrier;
+ md->numa_node_id = numa_node_id;
md->use_blk_mq = use_blk_mq;
+ md->init_tio_pdu = false;
md->type = DM_TYPE_NONE;
mutex_init(&md->suspend_lock);
mutex_init(&md->type_lock);
@@ -2323,13 +2377,13 @@ static struct mapped_device *alloc_dev(int minor)
INIT_LIST_HEAD(&md->table_devices);
spin_lock_init(&md->uevent_lock);
- md->queue = blk_alloc_queue(GFP_KERNEL);
+ md->queue = blk_alloc_queue_node(GFP_KERNEL, numa_node_id);
if (!md->queue)
goto bad;
dm_init_md_queue(md);
- md->disk = alloc_disk(1);
+ md->disk = alloc_disk_node(1, numa_node_id);
if (!md->disk)
goto bad;
@@ -2393,8 +2447,10 @@ static void free_dev(struct mapped_device *md)
unlock_fs(md);
cleanup_mapped_device(md);
- if (md->use_blk_mq)
- blk_mq_free_tag_set(&md->tag_set);
+ if (md->tag_set) {
+ blk_mq_free_tag_set(md->tag_set);
+ kfree(md->tag_set);
+ }
free_table_devices(&md->table_devices);
dm_stats_cleanup(&md->stats);
@@ -2502,13 +2558,20 @@ static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t,
* This must be done before setting the queue restrictions,
* because request-based dm may be run just after the setting.
*/
- if (dm_table_request_based(t))
- stop_queue(q);
+ if (dm_table_request_based(t)) {
+ dm_stop_queue(q);
+ /*
+ * Leverage the fact that request-based DM targets are
+ * immutable singletons and establish md->immutable_target
+ * - used to optimize both dm_request_fn and dm_mq_queue_rq
+ */
+ md->immutable_target = dm_table_get_immutable_target(t);
+ }
__bind_mempools(md, t);
old_map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
- rcu_assign_pointer(md->map, t);
+ rcu_assign_pointer(md->map, (void *)t);
md->immutable_target_type = dm_table_get_immutable_target_type(t);
dm_table_set_restrictions(t, q, limits);
@@ -2574,7 +2637,6 @@ void dm_set_md_type(struct mapped_device *md, unsigned type)
unsigned dm_get_md_type(struct mapped_device *md)
{
- BUG_ON(!mutex_is_locked(&md->type_lock));
return md->type;
}
@@ -2594,7 +2656,7 @@ struct queue_limits *dm_get_queue_limits(struct mapped_device *md)
}
EXPORT_SYMBOL_GPL(dm_get_queue_limits);
-static void init_rq_based_worker_thread(struct mapped_device *md)
+static void dm_old_init_rq_based_worker_thread(struct mapped_device *md)
{
/* Initialize the request-based DM worker thread */
init_kthread_worker(&md->kworker);
@@ -2603,26 +2665,22 @@ static void init_rq_based_worker_thread(struct mapped_device *md)
}
/*
- * Fully initialize a request-based queue (->elevator, ->request_fn, etc).
+ * Fully initialize a .request_fn request-based queue.
*/
-static int dm_init_request_based_queue(struct mapped_device *md)
+static int dm_old_init_request_queue(struct mapped_device *md)
{
- struct request_queue *q = NULL;
-
/* Fully initialize the queue */
- q = blk_init_allocated_queue(md->queue, dm_request_fn, NULL);
- if (!q)
+ if (!blk_init_allocated_queue(md->queue, dm_request_fn, NULL))
return -EINVAL;
/* disable dm_request_fn's merge heuristic by default */
md->seq_rq_merge_deadline_usecs = 0;
- md->queue = q;
- dm_init_old_md_queue(md);
+ dm_init_normal_md_queue(md);
blk_queue_softirq_done(md->queue, dm_softirq_done);
- blk_queue_prep_rq(md->queue, dm_prep_fn);
+ blk_queue_prep_rq(md->queue, dm_old_prep_fn);
- init_rq_based_worker_thread(md);
+ dm_old_init_rq_based_worker_thread(md);
elv_register_queue(md->queue);
@@ -2642,6 +2700,11 @@ static int dm_mq_init_request(void *data, struct request *rq,
*/
tio->md = md;
+ if (md->init_tio_pdu) {
+ /* target-specific per-io data is immediately after the tio */
+ tio->info.ptr = tio + 1;
+ }
+
return 0;
}
@@ -2651,28 +2714,15 @@ static int dm_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
struct request *rq = bd->rq;
struct dm_rq_target_io *tio = blk_mq_rq_to_pdu(rq);
struct mapped_device *md = tio->md;
- int srcu_idx;
- struct dm_table *map = dm_get_live_table(md, &srcu_idx);
- struct dm_target *ti;
- sector_t pos;
+ struct dm_target *ti = md->immutable_target;
- /* always use block 0 to find the target for flushes for now */
- pos = 0;
- if (!(rq->cmd_flags & REQ_FLUSH))
- pos = blk_rq_pos(rq);
+ if (unlikely(!ti)) {
+ int srcu_idx;
+ struct dm_table *map = dm_get_live_table(md, &srcu_idx);
- ti = dm_table_find_target(map, pos);
- if (!dm_target_is_valid(ti)) {
+ ti = dm_table_find_target(map, 0);
dm_put_live_table(md, srcu_idx);
- DMERR_LIMIT("request attempted access beyond the end of device");
- /*
- * Must perform setup, that rq_completed() requires,
- * before returning BLK_MQ_RQ_QUEUE_ERROR
- */
- dm_start_request(md, rq);
- return BLK_MQ_RQ_QUEUE_ERROR;
}
- dm_put_live_table(md, srcu_idx);
if (ti->type->busy && ti->type->busy(ti))
return BLK_MQ_RQ_QUEUE_BUSY;
@@ -2688,20 +2738,12 @@ static int dm_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
*/
tio->ti = ti;
- /* Clone the request if underlying devices aren't blk-mq */
- if (dm_table_get_type(map) == DM_TYPE_REQUEST_BASED) {
- /* clone request is allocated at the end of the pdu */
- tio->clone = (void *)blk_mq_rq_to_pdu(rq) + sizeof(struct dm_rq_target_io);
- (void) clone_rq(rq, md, tio, GFP_ATOMIC);
- queue_kthread_work(&md->kworker, &tio->work);
- } else {
- /* Direct call is fine since .queue_rq allows allocations */
- if (map_request(tio, rq, md) == DM_MAPIO_REQUEUE) {
- /* Undo dm_start_request() before requeuing */
- rq_end_stats(md, rq);
- rq_completed(md, rq_data_dir(rq), false);
- return BLK_MQ_RQ_QUEUE_BUSY;
- }
+ /* Direct call is fine since .queue_rq allows allocations */
+ if (map_request(tio, rq, md) == DM_MAPIO_REQUEUE) {
+ /* Undo dm_start_request() before requeuing */
+ rq_end_stats(md, rq);
+ rq_completed(md, rq_data_dir(rq), false);
+ return BLK_MQ_RQ_QUEUE_BUSY;
}
return BLK_MQ_RQ_QUEUE_OK;
@@ -2714,47 +2756,56 @@ static struct blk_mq_ops dm_mq_ops = {
.init_request = dm_mq_init_request,
};
-static int dm_init_request_based_blk_mq_queue(struct mapped_device *md)
+static int dm_mq_init_request_queue(struct mapped_device *md,
+ struct dm_target *immutable_tgt)
{
- unsigned md_type = dm_get_md_type(md);
struct request_queue *q;
int err;
- memset(&md->tag_set, 0, sizeof(md->tag_set));
- md->tag_set.ops = &dm_mq_ops;
- md->tag_set.queue_depth = BLKDEV_MAX_RQ;
- md->tag_set.numa_node = NUMA_NO_NODE;
- md->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_SG_MERGE;
- md->tag_set.nr_hw_queues = 1;
- if (md_type == DM_TYPE_REQUEST_BASED) {
- /* make the memory for non-blk-mq clone part of the pdu */
- md->tag_set.cmd_size = sizeof(struct dm_rq_target_io) + sizeof(struct request);
- } else
- md->tag_set.cmd_size = sizeof(struct dm_rq_target_io);
- md->tag_set.driver_data = md;
-
- err = blk_mq_alloc_tag_set(&md->tag_set);
+ if (dm_get_md_type(md) == DM_TYPE_REQUEST_BASED) {
+ DMERR("request-based dm-mq may only be stacked on blk-mq device(s)");
+ return -EINVAL;
+ }
+
+ md->tag_set = kzalloc_node(sizeof(struct blk_mq_tag_set), GFP_KERNEL, md->numa_node_id);
+ if (!md->tag_set)
+ return -ENOMEM;
+
+ md->tag_set->ops = &dm_mq_ops;
+ md->tag_set->queue_depth = dm_get_blk_mq_queue_depth();
+ md->tag_set->numa_node = md->numa_node_id;
+ md->tag_set->flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_SG_MERGE;
+ md->tag_set->nr_hw_queues = dm_get_blk_mq_nr_hw_queues();
+ md->tag_set->driver_data = md;
+
+ md->tag_set->cmd_size = sizeof(struct dm_rq_target_io);
+ if (immutable_tgt && immutable_tgt->per_io_data_size) {
+ /* any target-specific per-io data is immediately after the tio */
+ md->tag_set->cmd_size += immutable_tgt->per_io_data_size;
+ md->init_tio_pdu = true;
+ }
+
+ err = blk_mq_alloc_tag_set(md->tag_set);
if (err)
- return err;
+ goto out_kfree_tag_set;
- q = blk_mq_init_allocated_queue(&md->tag_set, md->queue);
+ q = blk_mq_init_allocated_queue(md->tag_set, md->queue);
if (IS_ERR(q)) {
err = PTR_ERR(q);
goto out_tag_set;
}
- md->queue = q;
dm_init_md_queue(md);
/* backfill 'mq' sysfs registration normally done in blk_register_queue */
blk_mq_register_disk(md->disk);
- if (md_type == DM_TYPE_REQUEST_BASED)
- init_rq_based_worker_thread(md);
-
return 0;
out_tag_set:
- blk_mq_free_tag_set(&md->tag_set);
+ blk_mq_free_tag_set(md->tag_set);
+out_kfree_tag_set:
+ kfree(md->tag_set);
+
return err;
}
@@ -2769,28 +2820,28 @@ static unsigned filter_md_type(unsigned type, struct mapped_device *md)
/*
* Setup the DM device's queue based on md's type
*/
-int dm_setup_md_queue(struct mapped_device *md)
+int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t)
{
int r;
unsigned md_type = filter_md_type(dm_get_md_type(md), md);
switch (md_type) {
case DM_TYPE_REQUEST_BASED:
- r = dm_init_request_based_queue(md);
+ r = dm_old_init_request_queue(md);
if (r) {
- DMWARN("Cannot initialize queue for request-based mapped device");
+ DMERR("Cannot initialize queue for request-based mapped device");
return r;
}
break;
case DM_TYPE_MQ_REQUEST_BASED:
- r = dm_init_request_based_blk_mq_queue(md);
+ r = dm_mq_init_request_queue(md, dm_table_get_immutable_target(t));
if (r) {
- DMWARN("Cannot initialize queue for request-based blk-mq mapped device");
+ DMERR("Cannot initialize queue for request-based dm-mq mapped device");
return r;
}
break;
case DM_TYPE_BIO_BASED:
- dm_init_old_md_queue(md);
+ dm_init_normal_md_queue(md);
blk_queue_make_request(md->queue, dm_make_request);
/*
* DM handles splitting bios as needed. Free the bio_split bioset
@@ -3133,7 +3184,7 @@ static int __dm_suspend(struct mapped_device *md, struct dm_table *map,
* dm defers requests to md->wq from md->queue.
*/
if (dm_request_based(md)) {
- stop_queue(md->queue);
+ dm_stop_queue(md->queue);
if (md->kworker_task)
flush_kthread_worker(&md->kworker);
}
@@ -3157,7 +3208,7 @@ static int __dm_suspend(struct mapped_device *md, struct dm_table *map,
dm_queue_flush(md);
if (dm_request_based(md))
- start_queue(md->queue);
+ dm_start_queue(md->queue);
unlock_fs(md);
dm_table_presuspend_undo_targets(map);
@@ -3236,7 +3287,7 @@ static int __dm_resume(struct mapped_device *md, struct dm_table *map)
* Request-based dm is queueing the deferred I/Os in its request_queue.
*/
if (dm_request_based(md))
- start_queue(md->queue);
+ dm_start_queue(md->queue);
unlock_fs(md);
@@ -3482,9 +3533,9 @@ int dm_noflush_suspending(struct dm_target *ti)
EXPORT_SYMBOL_GPL(dm_noflush_suspending);
struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, unsigned type,
- unsigned integrity, unsigned per_bio_data_size)
+ unsigned integrity, unsigned per_io_data_size)
{
- struct dm_md_mempools *pools = kzalloc(sizeof(*pools), GFP_KERNEL);
+ struct dm_md_mempools *pools = kzalloc_node(sizeof(*pools), GFP_KERNEL, md->numa_node_id);
struct kmem_cache *cachep = NULL;
unsigned int pool_size = 0;
unsigned int front_pad;
@@ -3498,7 +3549,7 @@ struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, unsigned t
case DM_TYPE_BIO_BASED:
cachep = _io_cache;
pool_size = dm_get_reserved_bio_based_ios();
- front_pad = roundup(per_bio_data_size, __alignof__(struct dm_target_io)) + offsetof(struct dm_target_io, clone);
+ front_pad = roundup(per_io_data_size, __alignof__(struct dm_target_io)) + offsetof(struct dm_target_io, clone);
break;
case DM_TYPE_REQUEST_BASED:
cachep = _rq_tio_cache;
@@ -3511,8 +3562,7 @@ struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, unsigned t
if (!pool_size)
pool_size = dm_get_reserved_rq_based_ios();
front_pad = offsetof(struct dm_rq_clone_bio_info, clone);
- /* per_bio_data_size is not used. See __bind_mempools(). */
- WARN_ON(per_bio_data_size != 0);
+ /* per_io_data_size is used for blk-mq pdu at queue allocation */
break;
default:
BUG();
@@ -3554,15 +3604,14 @@ void dm_free_md_mempools(struct dm_md_mempools *pools)
}
static int dm_pr_register(struct block_device *bdev, u64 old_key, u64 new_key,
- u32 flags)
+ u32 flags)
{
struct mapped_device *md = bdev->bd_disk->private_data;
const struct pr_ops *ops;
- struct dm_target *tgt;
fmode_t mode;
- int srcu_idx, r;
+ int r;
- r = dm_get_live_table_for_ioctl(md, &tgt, &bdev, &mode, &srcu_idx);
+ r = dm_grab_bdev_for_ioctl(md, &bdev, &mode);
if (r < 0)
return r;
@@ -3572,20 +3621,19 @@ static int dm_pr_register(struct block_device *bdev, u64 old_key, u64 new_key,
else
r = -EOPNOTSUPP;
- dm_put_live_table(md, srcu_idx);
+ bdput(bdev);
return r;
}
static int dm_pr_reserve(struct block_device *bdev, u64 key, enum pr_type type,
- u32 flags)
+ u32 flags)
{
struct mapped_device *md = bdev->bd_disk->private_data;
const struct pr_ops *ops;
- struct dm_target *tgt;
fmode_t mode;
- int srcu_idx, r;
+ int r;
- r = dm_get_live_table_for_ioctl(md, &tgt, &bdev, &mode, &srcu_idx);
+ r = dm_grab_bdev_for_ioctl(md, &bdev, &mode);
if (r < 0)
return r;
@@ -3595,7 +3643,7 @@ static int dm_pr_reserve(struct block_device *bdev, u64 key, enum pr_type type,
else
r = -EOPNOTSUPP;
- dm_put_live_table(md, srcu_idx);
+ bdput(bdev);
return r;
}
@@ -3603,11 +3651,10 @@ static int dm_pr_release(struct block_device *bdev, u64 key, enum pr_type type)
{
struct mapped_device *md = bdev->bd_disk->private_data;
const struct pr_ops *ops;
- struct dm_target *tgt;
fmode_t mode;
- int srcu_idx, r;
+ int r;
- r = dm_get_live_table_for_ioctl(md, &tgt, &bdev, &mode, &srcu_idx);
+ r = dm_grab_bdev_for_ioctl(md, &bdev, &mode);
if (r < 0)
return r;
@@ -3617,20 +3664,19 @@ static int dm_pr_release(struct block_device *bdev, u64 key, enum pr_type type)
else
r = -EOPNOTSUPP;
- dm_put_live_table(md, srcu_idx);
+ bdput(bdev);
return r;
}
static int dm_pr_preempt(struct block_device *bdev, u64 old_key, u64 new_key,
- enum pr_type type, bool abort)
+ enum pr_type type, bool abort)
{
struct mapped_device *md = bdev->bd_disk->private_data;
const struct pr_ops *ops;
- struct dm_target *tgt;
fmode_t mode;
- int srcu_idx, r;
+ int r;
- r = dm_get_live_table_for_ioctl(md, &tgt, &bdev, &mode, &srcu_idx);
+ r = dm_grab_bdev_for_ioctl(md, &bdev, &mode);
if (r < 0)
return r;
@@ -3640,7 +3686,7 @@ static int dm_pr_preempt(struct block_device *bdev, u64 old_key, u64 new_key,
else
r = -EOPNOTSUPP;
- dm_put_live_table(md, srcu_idx);
+ bdput(bdev);
return r;
}
@@ -3648,11 +3694,10 @@ static int dm_pr_clear(struct block_device *bdev, u64 key)
{
struct mapped_device *md = bdev->bd_disk->private_data;
const struct pr_ops *ops;
- struct dm_target *tgt;
fmode_t mode;
- int srcu_idx, r;
+ int r;
- r = dm_get_live_table_for_ioctl(md, &tgt, &bdev, &mode, &srcu_idx);
+ r = dm_grab_bdev_for_ioctl(md, &bdev, &mode);
if (r < 0)
return r;
@@ -3662,7 +3707,7 @@ static int dm_pr_clear(struct block_device *bdev, u64 key)
else
r = -EOPNOTSUPP;
- dm_put_live_table(md, srcu_idx);
+ bdput(bdev);
return r;
}
@@ -3701,6 +3746,15 @@ MODULE_PARM_DESC(reserved_rq_based_ios, "Reserved IOs in request-based mempools"
module_param(use_blk_mq, bool, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(use_blk_mq, "Use block multiqueue for request-based DM devices");
+module_param(dm_mq_nr_hw_queues, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(dm_mq_nr_hw_queues, "Number of hardware queues for request-based dm-mq devices");
+
+module_param(dm_mq_queue_depth, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(dm_mq_queue_depth, "Queue depth for request-based dm-mq devices");
+
+module_param(dm_numa_node, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(dm_numa_node, "NUMA node for DM device memory allocations");
+
MODULE_DESCRIPTION(DM_NAME " driver");
MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
MODULE_LICENSE("GPL");
diff --git a/drivers/md/dm.h b/drivers/md/dm.h
index 7edcf97dfa5a..13a758ec0f88 100644
--- a/drivers/md/dm.h
+++ b/drivers/md/dm.h
@@ -73,6 +73,8 @@ int dm_table_resume_targets(struct dm_table *t);
int dm_table_any_congested(struct dm_table *t, int bdi_bits);
unsigned dm_table_get_type(struct dm_table *t);
struct target_type *dm_table_get_immutable_target_type(struct dm_table *t);
+struct dm_target *dm_table_get_immutable_target(struct dm_table *t);
+struct dm_target *dm_table_get_wildcard_target(struct dm_table *t);
bool dm_table_request_based(struct dm_table *t);
bool dm_table_mq_request_based(struct dm_table *t);
void dm_table_free_md_mempools(struct dm_table *t);
@@ -84,7 +86,7 @@ void dm_set_md_type(struct mapped_device *md, unsigned type);
unsigned dm_get_md_type(struct mapped_device *md);
struct target_type *dm_get_immutable_target_type(struct mapped_device *md);
-int dm_setup_md_queue(struct mapped_device *md);
+int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t);
/*
* To check the return value from dm_table_find_target().
diff --git a/drivers/md/md.c b/drivers/md/md.c
index e55e6cf9ec17..14d3b37944df 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -284,6 +284,8 @@ static blk_qc_t md_make_request(struct request_queue *q, struct bio *bio)
* go away inside make_request
*/
sectors = bio_sectors(bio);
+ /* bio could be mergeable after passing to underlayer */
+ bio->bi_rw &= ~REQ_NOMERGE;
mddev->pers->make_request(mddev, bio);
cpu = part_stat_lock();
@@ -305,6 +307,7 @@ static blk_qc_t md_make_request(struct request_queue *q, struct bio *bio)
*/
void mddev_suspend(struct mddev *mddev)
{
+ WARN_ON_ONCE(current == mddev->thread->tsk);
if (mddev->suspended++)
return;
synchronize_rcu();
@@ -717,6 +720,7 @@ static void super_written(struct bio *bio)
if (atomic_dec_and_test(&mddev->pending_writes))
wake_up(&mddev->sb_wait);
+ rdev_dec_pending(rdev, mddev);
bio_put(bio);
}
@@ -731,6 +735,8 @@ void md_super_write(struct mddev *mddev, struct md_rdev *rdev,
*/
struct bio *bio = bio_alloc_mddev(GFP_NOIO, 1, mddev);
+ atomic_inc(&rdev->nr_pending);
+
bio->bi_bdev = rdev->meta_bdev ? rdev->meta_bdev : rdev->bdev;
bio->bi_iter.bi_sector = sector;
bio_add_page(bio, page, size, 0);
@@ -5671,7 +5677,6 @@ static int do_md_stop(struct mddev *mddev, int mode,
export_array(mddev);
md_clean(mddev);
- kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE);
if (mddev->hold_active == UNTIL_STOP)
mddev->hold_active = 0;
}
@@ -6883,7 +6888,7 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode,
case ADD_NEW_DISK:
/* We can support ADD_NEW_DISK on read-only arrays
- * on if we are re-adding a preexisting device.
+ * only if we are re-adding a preexisting device.
* So require mddev->pers and MD_DISK_SYNC.
*/
if (mddev->pers) {
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c
index 0a72ab6e6c20..dd483bb2e111 100644
--- a/drivers/md/multipath.c
+++ b/drivers/md/multipath.c
@@ -129,7 +129,9 @@ static void multipath_make_request(struct mddev *mddev, struct bio * bio)
}
multipath = conf->multipaths + mp_bh->path;
- mp_bh->bio = *bio;
+ bio_init(&mp_bh->bio);
+ __bio_clone_fast(&mp_bh->bio, bio);
+
mp_bh->bio.bi_iter.bi_sector += multipath->rdev->data_offset;
mp_bh->bio.bi_bdev = multipath->rdev->bdev;
mp_bh->bio.bi_rw |= REQ_FAILFAST_TRANSPORT;
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index 2ea12c6bf659..34783a3c8b3c 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -70,7 +70,6 @@ static void dump_zones(struct mddev *mddev)
(unsigned long long)zone_size>>1);
zone_start = conf->strip_zone[j].zone_end;
}
- printk(KERN_INFO "\n");
}
static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
@@ -85,6 +84,7 @@ static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
struct r0conf *conf = kzalloc(sizeof(*conf), GFP_KERNEL);
unsigned short blksize = 512;
+ *private_conf = ERR_PTR(-ENOMEM);
if (!conf)
return -ENOMEM;
rdev_for_each(rdev1, mddev) {
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 4e3843f7d245..a7f2b9c9f8a0 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -570,7 +570,7 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect
if (best_dist_disk < 0) {
if (is_badblock(rdev, this_sector, sectors,
&first_bad, &bad_sectors)) {
- if (first_bad < this_sector)
+ if (first_bad <= this_sector)
/* Cannot use this */
continue;
best_good_sectors = first_bad - this_sector;
@@ -2274,6 +2274,7 @@ static void handle_write_finished(struct r1conf *conf, struct r1bio *r1_bio)
if (fail) {
spin_lock_irq(&conf->device_lock);
list_add(&r1_bio->retry_list, &conf->bio_end_io_list);
+ conf->nr_queued++;
spin_unlock_irq(&conf->device_lock);
md_wakeup_thread(conf->mddev->thread);
} else {
@@ -2391,8 +2392,10 @@ static void raid1d(struct md_thread *thread)
LIST_HEAD(tmp);
spin_lock_irqsave(&conf->device_lock, flags);
if (!test_bit(MD_CHANGE_PENDING, &mddev->flags)) {
- list_add(&tmp, &conf->bio_end_io_list);
- list_del_init(&conf->bio_end_io_list);
+ while (!list_empty(&conf->bio_end_io_list)) {
+ list_move(conf->bio_end_io_list.prev, &tmp);
+ conf->nr_queued--;
+ }
}
spin_unlock_irqrestore(&conf->device_lock, flags);
while (!list_empty(&tmp)) {
@@ -2695,7 +2698,6 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
!conf->fullsync &&
!test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
break;
- BUG_ON(sync_blocks < (PAGE_SIZE>>9));
if ((len >> 9) > sync_blocks)
len = sync_blocks<<9;
}
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 1c1447dd3417..e3fd725d5c4d 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -2664,6 +2664,7 @@ static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio)
if (fail) {
spin_lock_irq(&conf->device_lock);
list_add(&r10_bio->retry_list, &conf->bio_end_io_list);
+ conf->nr_queued++;
spin_unlock_irq(&conf->device_lock);
md_wakeup_thread(conf->mddev->thread);
} else {
@@ -2691,8 +2692,10 @@ static void raid10d(struct md_thread *thread)
LIST_HEAD(tmp);
spin_lock_irqsave(&conf->device_lock, flags);
if (!test_bit(MD_CHANGE_PENDING, &mddev->flags)) {
- list_add(&tmp, &conf->bio_end_io_list);
- list_del_init(&conf->bio_end_io_list);
+ while (!list_empty(&conf->bio_end_io_list)) {
+ list_move(conf->bio_end_io_list.prev, &tmp);
+ conf->nr_queued--;
+ }
}
spin_unlock_irqrestore(&conf->device_lock, flags);
while (!list_empty(&tmp)) {
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index b4f02c9959f2..e48c262ce032 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -340,8 +340,7 @@ static void release_inactive_stripe_list(struct r5conf *conf,
int hash)
{
int size;
- unsigned long do_wakeup = 0;
- int i = 0;
+ bool do_wakeup = false;
unsigned long flags;
if (hash == NR_STRIPE_HASH_LOCKS) {
@@ -362,19 +361,15 @@ static void release_inactive_stripe_list(struct r5conf *conf,
!list_empty(list))
atomic_dec(&conf->empty_inactive_list_nr);
list_splice_tail_init(list, conf->inactive_list + hash);
- do_wakeup |= 1 << hash;
+ do_wakeup = true;
spin_unlock_irqrestore(conf->hash_locks + hash, flags);
}
size--;
hash--;
}
- for (i = 0; i < NR_STRIPE_HASH_LOCKS; i++) {
- if (do_wakeup & (1 << i))
- wake_up(&conf->wait_for_stripe[i]);
- }
-
if (do_wakeup) {
+ wake_up(&conf->wait_for_stripe);
if (atomic_read(&conf->active_stripes) == 0)
wake_up(&conf->wait_for_quiescent);
if (conf->retry_read_aligned)
@@ -687,15 +682,14 @@ raid5_get_active_stripe(struct r5conf *conf, sector_t sector,
if (!sh) {
set_bit(R5_INACTIVE_BLOCKED,
&conf->cache_state);
- wait_event_exclusive_cmd(
- conf->wait_for_stripe[hash],
+ wait_event_lock_irq(
+ conf->wait_for_stripe,
!list_empty(conf->inactive_list + hash) &&
(atomic_read(&conf->active_stripes)
< (conf->max_nr_stripes * 3 / 4)
|| !test_bit(R5_INACTIVE_BLOCKED,
&conf->cache_state)),
- spin_unlock_irq(conf->hash_locks + hash),
- spin_lock_irq(conf->hash_locks + hash));
+ *(conf->hash_locks + hash));
clear_bit(R5_INACTIVE_BLOCKED,
&conf->cache_state);
} else {
@@ -720,9 +714,6 @@ raid5_get_active_stripe(struct r5conf *conf, sector_t sector,
}
} while (sh == NULL);
- if (!list_empty(conf->inactive_list + hash))
- wake_up(&conf->wait_for_stripe[hash]);
-
spin_unlock_irq(conf->hash_locks + hash);
return sh;
}
@@ -2089,6 +2080,14 @@ static int resize_chunks(struct r5conf *conf, int new_disks, int new_sectors)
unsigned long cpu;
int err = 0;
+ /*
+ * Never shrink. And mddev_suspend() could deadlock if this is called
+ * from raid5d. In that case, scribble_disks and scribble_sectors
+ * should equal to new_disks and new_sectors
+ */
+ if (conf->scribble_disks >= new_disks &&
+ conf->scribble_sectors >= new_sectors)
+ return 0;
mddev_suspend(conf->mddev);
get_online_cpus();
for_each_present_cpu(cpu) {
@@ -2110,6 +2109,10 @@ static int resize_chunks(struct r5conf *conf, int new_disks, int new_sectors)
}
put_online_cpus();
mddev_resume(conf->mddev);
+ if (!err) {
+ conf->scribble_disks = new_disks;
+ conf->scribble_sectors = new_sectors;
+ }
return err;
}
@@ -2190,7 +2193,7 @@ static int resize_stripes(struct r5conf *conf, int newsize)
cnt = 0;
list_for_each_entry(nsh, &newstripes, lru) {
lock_device_hash_lock(conf, hash);
- wait_event_exclusive_cmd(conf->wait_for_stripe[hash],
+ wait_event_cmd(conf->wait_for_stripe,
!list_empty(conf->inactive_list + hash),
unlock_device_hash_lock(conf, hash),
lock_device_hash_lock(conf, hash));
@@ -3499,8 +3502,6 @@ returnbi:
dev = &sh->dev[i];
} else if (test_bit(R5_Discard, &dev->flags))
discard_pending = 1;
- WARN_ON(test_bit(R5_SkipCopy, &dev->flags));
- WARN_ON(dev->page != dev->orig_page);
}
r5l_stripe_write_finished(sh);
@@ -4233,10 +4234,9 @@ static void break_stripe_batch_list(struct stripe_head *head_sh,
list_del_init(&sh->batch_list);
- WARN_ON_ONCE(sh->state & ((1 << STRIPE_ACTIVE) |
+ WARN_ONCE(sh->state & ((1 << STRIPE_ACTIVE) |
(1 << STRIPE_SYNCING) |
(1 << STRIPE_REPLACED) |
- (1 << STRIPE_PREREAD_ACTIVE) |
(1 << STRIPE_DELAYED) |
(1 << STRIPE_BIT_DELAY) |
(1 << STRIPE_FULL_WRITE) |
@@ -4246,11 +4246,14 @@ static void break_stripe_batch_list(struct stripe_head *head_sh,
(1 << STRIPE_DISCARD) |
(1 << STRIPE_BATCH_READY) |
(1 << STRIPE_BATCH_ERR) |
- (1 << STRIPE_BITMAP_PENDING)));
- WARN_ON_ONCE(head_sh->state & ((1 << STRIPE_DISCARD) |
- (1 << STRIPE_REPLACED)));
+ (1 << STRIPE_BITMAP_PENDING)),
+ "stripe state: %lx\n", sh->state);
+ WARN_ONCE(head_sh->state & ((1 << STRIPE_DISCARD) |
+ (1 << STRIPE_REPLACED)),
+ "head stripe state: %lx\n", head_sh->state);
set_mask_bits(&sh->state, ~(STRIPE_EXPAND_SYNC_FLAGS |
+ (1 << STRIPE_PREREAD_ACTIVE) |
(1 << STRIPE_DEGRADED)),
head_sh->state & (1 << STRIPE_INSYNC));
@@ -6376,6 +6379,8 @@ static int raid456_cpu_notify(struct notifier_block *nfb, unsigned long action,
break;
case CPU_DEAD:
case CPU_DEAD_FROZEN:
+ case CPU_UP_CANCELED:
+ case CPU_UP_CANCELED_FROZEN:
free_scratch_buffer(conf, per_cpu_ptr(conf->percpu, cpu));
break;
default:
@@ -6413,6 +6418,12 @@ static int raid5_alloc_percpu(struct r5conf *conf)
}
put_online_cpus();
+ if (!err) {
+ conf->scribble_disks = max(conf->raid_disks,
+ conf->previous_raid_disks);
+ conf->scribble_sectors = max(conf->chunk_sectors,
+ conf->prev_chunk_sectors);
+ }
return err;
}
@@ -6503,9 +6514,7 @@ static struct r5conf *setup_conf(struct mddev *mddev)
seqcount_init(&conf->gen_lock);
mutex_init(&conf->cache_size_mutex);
init_waitqueue_head(&conf->wait_for_quiescent);
- for (i = 0; i < NR_STRIPE_HASH_LOCKS; i++) {
- init_waitqueue_head(&conf->wait_for_stripe[i]);
- }
+ init_waitqueue_head(&conf->wait_for_stripe);
init_waitqueue_head(&conf->wait_for_overlap);
INIT_LIST_HEAD(&conf->handle_list);
INIT_LIST_HEAD(&conf->hold_list);
@@ -7014,8 +7023,8 @@ static int raid5_run(struct mddev *mddev)
}
if (discard_supported &&
- mddev->queue->limits.max_discard_sectors >= stripe &&
- mddev->queue->limits.discard_granularity >= stripe)
+ mddev->queue->limits.max_discard_sectors >= (stripe >> 9) &&
+ mddev->queue->limits.discard_granularity >= stripe)
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD,
mddev->queue);
else
diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h
index a415e1cd39b8..517d4b68a1be 100644
--- a/drivers/md/raid5.h
+++ b/drivers/md/raid5.h
@@ -510,6 +510,8 @@ struct r5conf {
* conversions
*/
} __percpu *percpu;
+ int scribble_disks;
+ int scribble_sectors;
#ifdef CONFIG_HOTPLUG_CPU
struct notifier_block cpu_notify;
#endif
@@ -522,7 +524,7 @@ struct r5conf {
atomic_t empty_inactive_list_nr;
struct llist_head released_stripes;
wait_queue_head_t wait_for_quiescent;
- wait_queue_head_t wait_for_stripe[NR_STRIPE_HASH_LOCKS];
+ wait_queue_head_t wait_for_stripe;
wait_queue_head_t wait_for_overlap;
unsigned long cache_state;
#define R5_INACTIVE_BLOCKED 1 /* release of inactive stripes blocked,
diff --git a/drivers/media/common/b2c2/flexcop-fe-tuner.c b/drivers/media/common/b2c2/flexcop-fe-tuner.c
index 9c59f4306883..f5956402fc69 100644
--- a/drivers/media/common/b2c2/flexcop-fe-tuner.c
+++ b/drivers/media/common/b2c2/flexcop-fe-tuner.c
@@ -38,7 +38,7 @@ static int flexcop_fe_request_firmware(struct dvb_frontend *fe,
#endif
/* lnb control */
-#if FE_SUPPORTED(MT312) || FE_SUPPORTED(STV0299)
+#if (FE_SUPPORTED(MT312) || FE_SUPPORTED(STV0299)) && FE_SUPPORTED(PLL)
static int flexcop_set_voltage(struct dvb_frontend *fe,
enum fe_sec_voltage voltage)
{
@@ -68,7 +68,7 @@ static int flexcop_set_voltage(struct dvb_frontend *fe,
#endif
#if FE_SUPPORTED(S5H1420) || FE_SUPPORTED(STV0299) || FE_SUPPORTED(MT312)
-static int flexcop_sleep(struct dvb_frontend* fe)
+static int __maybe_unused flexcop_sleep(struct dvb_frontend* fe)
{
struct flexcop_device *fc = fe->dvb->priv;
if (fc->fe_sleep)
diff --git a/drivers/media/common/b2c2/flexcop.c b/drivers/media/common/b2c2/flexcop.c
index 412c5daf2b48..0f5114d406f8 100644
--- a/drivers/media/common/b2c2/flexcop.c
+++ b/drivers/media/common/b2c2/flexcop.c
@@ -1,7 +1,7 @@
/*
* Linux driver for digital TV devices equipped with B2C2 FlexcopII(b)/III
* flexcop.c - main module part
- * Copyright (C) 2004-9 Patrick Boettcher <patrick.boettcher@desy.de>
+ * Copyright (C) 2004-9 Patrick Boettcher <patrick.boettcher@posteo.de>
* based on skystar2-driver Copyright (C) 2003 Vadim Catana, skystar@moldova.cc
*
* Acknowledgements:
@@ -34,7 +34,7 @@
#include "flexcop.h"
#define DRIVER_NAME "B2C2 FlexcopII/II(b)/III digital TV receiver chip"
-#define DRIVER_AUTHOR "Patrick Boettcher <patrick.boettcher@desy.de"
+#define DRIVER_AUTHOR "Patrick Boettcher <patrick.boettcher@posteo.de"
#ifdef CONFIG_DVB_B2C2_FLEXCOP_DEBUG
#define DEBSTATUS ""
diff --git a/drivers/media/common/cypress_firmware.c b/drivers/media/common/cypress_firmware.c
index 577e82058fdc..50e3f76d4847 100644
--- a/drivers/media/common/cypress_firmware.c
+++ b/drivers/media/common/cypress_firmware.c
@@ -1,6 +1,6 @@
/* cypress_firmware.c is part of the DVB USB library.
*
- * Copyright (C) 2004-6 Patrick Boettcher (patrick.boettcher@desy.de)
+ * Copyright (C) 2004-6 Patrick Boettcher (patrick.boettcher@posteo.de)
* see dvb-usb-init.c for copyright information.
*
* This file contains functions for downloading the firmware to Cypress FX 1
diff --git a/drivers/media/common/cypress_firmware.h b/drivers/media/common/cypress_firmware.h
index e493cbc7a528..1e4f27356205 100644
--- a/drivers/media/common/cypress_firmware.h
+++ b/drivers/media/common/cypress_firmware.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2004-6 Patrick Boettcher (patrick.boettcher@desy.de)
+ * Copyright (C) 2004-6 Patrick Boettcher (patrick.boettcher@posteo.de)
* see dvb-usb-init.c for copyright information.
*
* This file contains functions for downloading the firmware to Cypress FX 1
diff --git a/drivers/media/common/siano/smscoreapi.c b/drivers/media/common/siano/smscoreapi.c
index 2a8d9a36d6f0..f3a42834d7d6 100644
--- a/drivers/media/common/siano/smscoreapi.c
+++ b/drivers/media/common/siano/smscoreapi.c
@@ -1167,8 +1167,8 @@ static int smscore_load_firmware_from_file(struct smscore_device_t *coredev,
return rc;
}
pr_debug("read fw %s, buffer size=0x%zx\n", fw_filename, fw->size);
- fw_buf = kmalloc(ALIGN(fw->size, SMS_ALLOC_ALIGNMENT),
- GFP_KERNEL | GFP_DMA);
+ fw_buf = kmalloc(ALIGN(fw->size + sizeof(struct sms_firmware),
+ SMS_ALLOC_ALIGNMENT), GFP_KERNEL | GFP_DMA);
if (!fw_buf) {
pr_err("failed to allocate firmware buffer\n");
rc = -ENOMEM;
diff --git a/drivers/media/common/siano/smsdvb-main.c b/drivers/media/common/siano/smsdvb-main.c
index d31f468830cf..9148e14c9d07 100644
--- a/drivers/media/common/siano/smsdvb-main.c
+++ b/drivers/media/common/siano/smsdvb-main.c
@@ -1015,12 +1015,6 @@ static int smsdvb_set_frontend(struct dvb_frontend *fe)
}
}
-/* Nothing to do here, as stats are automatically updated */
-static int smsdvb_get_frontend(struct dvb_frontend *fe)
-{
- return 0;
-}
-
static int smsdvb_init(struct dvb_frontend *fe)
{
struct smsdvb_client_t *client =
@@ -1069,7 +1063,6 @@ static struct dvb_frontend_ops smsdvb_fe_ops = {
.release = smsdvb_release,
.set_frontend = smsdvb_set_frontend,
- .get_frontend = smsdvb_get_frontend,
.get_tune_settings = smsdvb_get_tune_settings,
.read_status = smsdvb_read_status,
diff --git a/drivers/media/dvb-core/dvb-usb-ids.h b/drivers/media/dvb-core/dvb-usb-ids.h
index 1c1c298d2289..0afad395ef97 100644
--- a/drivers/media/dvb-core/dvb-usb-ids.h
+++ b/drivers/media/dvb-core/dvb-usb-ids.h
@@ -1,6 +1,6 @@
/* dvb-usb-ids.h is part of the DVB USB library.
*
- * Copyright (C) 2004-5 Patrick Boettcher (patrick.boettcher@desy.de) see
+ * Copyright (C) 2004-5 Patrick Boettcher (patrick.boettcher@posteo.de) see
* dvb-usb-init.c for copyright information.
*
* a header file containing define's for the USB device supported by the
@@ -118,6 +118,7 @@
#define USB_PID_DIBCOM_STK807XP 0x1f90
#define USB_PID_DIBCOM_STK807XPVR 0x1f98
#define USB_PID_DIBCOM_STK8096GP 0x1fa0
+#define USB_PID_DIBCOM_STK8096PVR 0x1faa
#define USB_PID_DIBCOM_NIM8096MD 0x1fa8
#define USB_PID_DIBCOM_TFE8096P 0x1f9C
#define USB_PID_DIBCOM_ANCHOR_2135_COLD 0x2131
@@ -241,12 +242,14 @@
#define USB_PID_AVERMEDIA_1867 0x1867
#define USB_PID_AVERMEDIA_A867 0xa867
#define USB_PID_AVERMEDIA_H335 0x0335
+#define USB_PID_AVERMEDIA_TD110 0xa110
#define USB_PID_AVERMEDIA_TWINSTAR 0x0825
#define USB_PID_TECHNOTREND_CONNECT_S2400 0x3006
#define USB_PID_TECHNOTREND_CONNECT_S2400_8KEEPROM 0x3009
#define USB_PID_TECHNOTREND_CONNECT_CT3650 0x300d
#define USB_PID_TECHNOTREND_CONNECT_S2_4600 0x3011
#define USB_PID_TECHNOTREND_CONNECT_CT2_4650_CI 0x3012
+#define USB_PID_TECHNOTREND_CONNECT_CT2_4650_CI_2 0x3015
#define USB_PID_TECHNOTREND_TVSTICK_CT2_4400 0x3014
#define USB_PID_TERRATEC_CINERGY_DT_XS_DIVERSITY 0x005a
#define USB_PID_TERRATEC_CINERGY_DT_XS_DIVERSITY_2 0x0081
@@ -255,6 +258,10 @@
#define USB_PID_TERRATEC_CINERGY_T_EXPRESS 0x0062
#define USB_PID_TERRATEC_CINERGY_T_XXS 0x0078
#define USB_PID_TERRATEC_CINERGY_T_XXS_2 0x00ab
+#define USB_PID_TERRATEC_CINERGY_S2_R1 0x00a8
+#define USB_PID_TERRATEC_CINERGY_S2_R2 0x00b0
+#define USB_PID_TERRATEC_CINERGY_S2_R3 0x0102
+#define USB_PID_TERRATEC_CINERGY_S2_R4 0x0105
#define USB_PID_TERRATEC_H7 0x10b4
#define USB_PID_TERRATEC_H7_2 0x10a3
#define USB_PID_TERRATEC_H7_3 0x10a5
diff --git a/drivers/media/dvb-core/dvb_frontend.c b/drivers/media/dvb-core/dvb_frontend.c
index 40080645341e..c0142614c408 100644
--- a/drivers/media/dvb-core/dvb_frontend.c
+++ b/drivers/media/dvb-core/dvb_frontend.c
@@ -134,15 +134,17 @@ struct dvb_frontend_private {
#if defined(CONFIG_MEDIA_CONTROLLER_DVB)
struct media_pipeline pipe;
- struct media_entity *pipe_start_entity;
#endif
};
static void dvb_frontend_wakeup(struct dvb_frontend *fe);
static int dtv_get_frontend(struct dvb_frontend *fe,
+ struct dtv_frontend_properties *c,
struct dvb_frontend_parameters *p_out);
-static int dtv_property_legacy_params_sync(struct dvb_frontend *fe,
- struct dvb_frontend_parameters *p);
+static int
+dtv_property_legacy_params_sync(struct dvb_frontend *fe,
+ const struct dtv_frontend_properties *c,
+ struct dvb_frontend_parameters *p);
static bool has_get_frontend(struct dvb_frontend *fe)
{
@@ -202,6 +204,7 @@ static void dvb_frontend_add_event(struct dvb_frontend *fe,
enum fe_status status)
{
struct dvb_frontend_private *fepriv = fe->frontend_priv;
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
struct dvb_fe_events *events = &fepriv->events;
struct dvb_frontend_event *e;
int wp;
@@ -209,7 +212,7 @@ static void dvb_frontend_add_event(struct dvb_frontend *fe,
dev_dbg(fe->dvb->device, "%s:\n", __func__);
if ((status & FE_HAS_LOCK) && has_get_frontend(fe))
- dtv_get_frontend(fe, &fepriv->parameters_out);
+ dtv_get_frontend(fe, c, &fepriv->parameters_out);
mutex_lock(&events->mtx);
@@ -596,104 +599,13 @@ static void dvb_frontend_wakeup(struct dvb_frontend *fe)
wake_up_interruptible(&fepriv->wait_queue);
}
-/**
- * dvb_enable_media_tuner() - tries to enable the DVB tuner
- *
- * @fe: struct dvb_frontend pointer
- *
- * This function ensures that just one media tuner is enabled for a given
- * frontend. It has two different behaviors:
- * - For trivial devices with just one tuner:
- * it just enables the existing tuner->fe link
- * - For devices with more than one tuner:
- * It is up to the driver to implement the logic that will enable one tuner
- * and disable the other ones. However, if more than one tuner is enabled for
- * the same frontend, it will print an error message and return -EINVAL.
- *
- * At return, it will return the error code returned by media_entity_setup_link,
- * or 0 if everything is OK, if no tuner is linked to the frontend or if the
- * mdev is NULL.
- */
-#ifdef CONFIG_MEDIA_CONTROLLER_DVB
-static int dvb_enable_media_tuner(struct dvb_frontend *fe)
-{
- struct dvb_frontend_private *fepriv = fe->frontend_priv;
- struct dvb_adapter *adapter = fe->dvb;
- struct media_device *mdev = adapter->mdev;
- struct media_entity *entity, *source;
- struct media_link *link, *found_link = NULL;
- int ret, n_links = 0, active_links = 0;
-
- fepriv->pipe_start_entity = NULL;
-
- if (!mdev)
- return 0;
-
- entity = fepriv->dvbdev->entity;
- fepriv->pipe_start_entity = entity;
-
- list_for_each_entry(link, &entity->links, list) {
- if (link->sink->entity == entity) {
- found_link = link;
- n_links++;
- if (link->flags & MEDIA_LNK_FL_ENABLED)
- active_links++;
- }
- }
-
- if (!n_links || active_links == 1 || !found_link)
- return 0;
-
- /*
- * If a frontend has more than one tuner linked, it is up to the driver
- * to select with one will be the active one, as the frontend core can't
- * guess. If the driver doesn't do that, it is a bug.
- */
- if (n_links > 1 && active_links != 1) {
- dev_err(fe->dvb->device,
- "WARNING: there are %d active links among %d tuners. This is a driver's bug!\n",
- active_links, n_links);
- return -EINVAL;
- }
-
- source = found_link->source->entity;
- fepriv->pipe_start_entity = source;
- list_for_each_entry(link, &source->links, list) {
- struct media_entity *sink;
- int flags = 0;
-
- sink = link->sink->entity;
- if (sink == entity)
- flags = MEDIA_LNK_FL_ENABLED;
-
- ret = media_entity_setup_link(link, flags);
- if (ret) {
- dev_err(fe->dvb->device,
- "Couldn't change link %s->%s to %s. Error %d\n",
- source->name, sink->name,
- flags ? "enabled" : "disabled",
- ret);
- return ret;
- } else
- dev_dbg(fe->dvb->device,
- "link %s->%s was %s\n",
- source->name, sink->name,
- flags ? "ENABLED" : "disabled");
- }
- return 0;
-}
-#endif
-
static int dvb_frontend_thread(void *data)
{
struct dvb_frontend *fe = data;
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
struct dvb_frontend_private *fepriv = fe->frontend_priv;
enum fe_status s;
enum dvbfe_algo algo;
-#ifdef CONFIG_MEDIA_CONTROLLER_DVB
- int ret;
-#endif
-
bool re_tune = false;
bool semheld = false;
@@ -706,20 +618,6 @@ static int dvb_frontend_thread(void *data)
fepriv->wakeup = 0;
fepriv->reinitialise = 0;
-#ifdef CONFIG_MEDIA_CONTROLLER_DVB
- ret = dvb_enable_media_tuner(fe);
- if (ret) {
- /* FIXME: return an error if it fails */
- dev_info(fe->dvb->device,
- "proceeding with FE task\n");
- } else if (fepriv->pipe_start_entity) {
- ret = media_entity_pipeline_start(fepriv->pipe_start_entity,
- &fepriv->pipe);
- if (ret)
- return ret;
- }
-#endif
-
dvb_frontend_init(fe);
set_freezable();
@@ -807,7 +705,7 @@ restart:
fepriv->algo_status |= DVBFE_ALGO_SEARCH_AGAIN;
fepriv->delay = HZ / 2;
}
- dtv_property_legacy_params_sync(fe, &fepriv->parameters_out);
+ dtv_property_legacy_params_sync(fe, c, &fepriv->parameters_out);
fe->ops.read_status(fe, &s);
if (s != fepriv->status) {
dvb_frontend_add_event(fe, s); /* update event list */
@@ -829,12 +727,6 @@ restart:
}
}
-#ifdef CONFIG_MEDIA_CONTROLLER_DVB
- if (fepriv->pipe_start_entity)
- media_entity_pipeline_stop(fepriv->pipe_start_entity);
- fepriv->pipe_start_entity = NULL;
-#endif
-
if (dvb_powerdown_on_sleep) {
if (fe->ops.set_voltage)
fe->ops.set_voltage(fe, SEC_VOLTAGE_OFF);
@@ -899,10 +791,10 @@ void dvb_frontend_sleep_until(ktime_t *waketime, u32 add_usec)
s32 delta;
*waketime = ktime_add_us(*waketime, add_usec);
- delta = ktime_us_delta(ktime_get_real(), *waketime);
+ delta = ktime_us_delta(ktime_get_boottime(), *waketime);
if (delta > 2500) {
msleep((delta - 1500) / 1000);
- delta = ktime_us_delta(ktime_get_real(), *waketime);
+ delta = ktime_us_delta(ktime_get_boottime(), *waketime);
}
if (delta > 0)
udelay(delta);
@@ -1162,18 +1054,24 @@ static struct dtv_cmds_h dtv_cmds[DTV_MAX_COMMAND + 1] = {
_DTV_CMD(DTV_STAT_TOTAL_BLOCK_COUNT, 0, 0),
};
-static void dtv_property_dump(struct dvb_frontend *fe, struct dtv_property *tvp)
+static void dtv_property_dump(struct dvb_frontend *fe,
+ bool is_set,
+ struct dtv_property *tvp)
{
int i;
if (tvp->cmd <= 0 || tvp->cmd > DTV_MAX_COMMAND) {
- dev_warn(fe->dvb->device, "%s: tvp.cmd = 0x%08x undefined\n",
- __func__, tvp->cmd);
+ dev_warn(fe->dvb->device, "%s: %s tvp.cmd = 0x%08x undefined\n",
+ __func__,
+ is_set ? "SET" : "GET",
+ tvp->cmd);
return;
}
- dev_dbg(fe->dvb->device, "%s: tvp.cmd = 0x%08x (%s)\n", __func__,
- tvp->cmd, dtv_cmds[tvp->cmd].name);
+ dev_dbg(fe->dvb->device, "%s: %s tvp.cmd = 0x%08x (%s)\n", __func__,
+ is_set ? "SET" : "GET",
+ tvp->cmd,
+ dtv_cmds[tvp->cmd].name);
if (dtv_cmds[tvp->cmd].buffer) {
dev_dbg(fe->dvb->device, "%s: tvp.u.buffer.len = 0x%02x\n",
@@ -1268,11 +1166,11 @@ static int dtv_property_cache_sync(struct dvb_frontend *fe,
/* Ensure the cached values are set correctly in the frontend
* legacy tuning structures, for the advanced tuning API.
*/
-static int dtv_property_legacy_params_sync(struct dvb_frontend *fe,
- struct dvb_frontend_parameters *p)
+static int
+dtv_property_legacy_params_sync(struct dvb_frontend *fe,
+ const struct dtv_frontend_properties *c,
+ struct dvb_frontend_parameters *p)
{
- const struct dtv_frontend_properties *c = &fe->dtv_property_cache;
-
p->frequency = c->frequency;
p->inversion = c->inversion;
@@ -1344,16 +1242,17 @@ static int dtv_property_legacy_params_sync(struct dvb_frontend *fe,
* If p_out is not null, it will update the DVBv3 params pointed by it.
*/
static int dtv_get_frontend(struct dvb_frontend *fe,
+ struct dtv_frontend_properties *c,
struct dvb_frontend_parameters *p_out)
{
int r;
if (fe->ops.get_frontend) {
- r = fe->ops.get_frontend(fe);
+ r = fe->ops.get_frontend(fe, c);
if (unlikely(r < 0))
return r;
if (p_out)
- dtv_property_legacy_params_sync(fe, p_out);
+ dtv_property_legacy_params_sync(fe, c, p_out);
return 0;
}
@@ -1589,7 +1488,7 @@ static int dtv_property_process_get(struct dvb_frontend *fe,
return r;
}
- dtv_property_dump(fe, tvp);
+ dtv_property_dump(fe, false, tvp);
return 0;
}
@@ -1830,6 +1729,8 @@ static int dtv_property_process_set(struct dvb_frontend *fe,
return r;
}
+ dtv_property_dump(fe, true, tvp);
+
switch(tvp->cmd) {
case DTV_CLEAR:
/*
@@ -2073,6 +1974,8 @@ static int dvb_frontend_ioctl_properties(struct file *file,
dev_dbg(fe->dvb->device, "%s: Property cache is full, tuning\n", __func__);
} else if (cmd == FE_GET_PROPERTY) {
+ struct dtv_frontend_properties getp = fe->dtv_property_cache;
+
dev_dbg(fe->dvb->device, "%s: properties.num = %d\n", __func__, tvps->num);
dev_dbg(fe->dvb->device, "%s: properties.props = %p\n", __func__, tvps->props);
@@ -2094,17 +1997,18 @@ static int dvb_frontend_ioctl_properties(struct file *file,
}
/*
- * Fills the cache out struct with the cache contents, plus
- * the data retrieved from get_frontend, if the frontend
- * is not idle. Otherwise, returns the cached content
+ * Let's use our own copy of property cache, in order to
+ * avoid mangling with DTV zigzag logic, as drivers might
+ * return crap, if they don't check if the data is available
+ * before updating the properties cache.
*/
if (fepriv->state != FESTATE_IDLE) {
- err = dtv_get_frontend(fe, NULL);
+ err = dtv_get_frontend(fe, &getp, NULL);
if (err < 0)
goto out;
}
for (i = 0; i < tvps->num; i++) {
- err = dtv_property_process_get(fe, c, tvp + i, file);
+ err = dtv_property_process_get(fe, &getp, tvp + i, file);
if (err < 0)
goto out;
(tvp + i)->result = err;
@@ -2139,7 +2043,7 @@ static int dtv_set_frontend(struct dvb_frontend *fe)
* the user. FE_SET_FRONTEND triggers an initial frontend event
* with status = 0, which copies output parameters to userspace.
*/
- dtv_property_legacy_params_sync(fe, &fepriv->parameters_out);
+ dtv_property_legacy_params_sync(fe, c, &fepriv->parameters_out);
/*
* Be sure that the bandwidth will be filled for all
@@ -2451,7 +2355,7 @@ static int dvb_frontend_ioctl_legacy(struct file *file,
u8 last = 1;
if (dvb_frontend_debug)
printk("%s switch command: 0x%04lx\n", __func__, swcmd);
- nexttime = ktime_get_real();
+ nexttime = ktime_get_boottime();
if (dvb_frontend_debug)
tv[0] = nexttime;
/* before sending a command, initialize by sending
@@ -2462,7 +2366,7 @@ static int dvb_frontend_ioctl_legacy(struct file *file,
for (i = 0; i < 9; i++) {
if (dvb_frontend_debug)
- tv[i+1] = ktime_get_real();
+ tv[i+1] = ktime_get_boottime();
if ((swcmd & 0x01) != last) {
/* set voltage to (last ? 13V : 18V) */
fe->ops.set_voltage(fe, (last) ? SEC_VOLTAGE_13 : SEC_VOLTAGE_18);
@@ -2509,10 +2413,18 @@ static int dvb_frontend_ioctl_legacy(struct file *file,
err = dvb_frontend_get_event (fe, parg, file->f_flags);
break;
- case FE_GET_FRONTEND:
- err = dtv_get_frontend(fe, parg);
- break;
+ case FE_GET_FRONTEND: {
+ struct dtv_frontend_properties getp = fe->dtv_property_cache;
+ /*
+ * Let's use our own copy of property cache, in order to
+ * avoid mangling with DTV zigzag logic, as drivers might
+ * return crap, if they don't check if the data is available
+ * before updating the properties cache.
+ */
+ err = dtv_get_frontend(fe, &getp, parg);
+ break;
+ }
case FE_SET_FRONTEND_TUNE_MODE:
fepriv->tune_mode_flags = (unsigned long) parg;
err = 0;
@@ -2612,9 +2524,20 @@ static int dvb_frontend_open(struct inode *inode, struct file *file)
fepriv->tone = -1;
fepriv->voltage = -1;
+#ifdef CONFIG_MEDIA_CONTROLLER_DVB
+ if (fe->dvb->mdev && fe->dvb->mdev->enable_source) {
+ ret = fe->dvb->mdev->enable_source(dvbdev->entity,
+ &fepriv->pipe);
+ if (ret) {
+ dev_err(fe->dvb->device,
+ "Tuner is busy. Error %d\n", ret);
+ goto err2;
+ }
+ }
+#endif
ret = dvb_frontend_start (fe);
if (ret)
- goto err2;
+ goto err3;
/* empty event queue */
fepriv->events.eventr = fepriv->events.eventw = 0;
@@ -2624,7 +2547,12 @@ static int dvb_frontend_open(struct inode *inode, struct file *file)
mutex_unlock (&adapter->mfe_lock);
return ret;
+err3:
+#ifdef CONFIG_MEDIA_CONTROLLER_DVB
+ if (fe->dvb->mdev && fe->dvb->mdev->disable_source)
+ fe->dvb->mdev->disable_source(dvbdev->entity);
err2:
+#endif
dvb_generic_release(inode, file);
err1:
if (dvbdev->users == -1 && fe->ops.ts_bus_ctrl)
@@ -2653,6 +2581,10 @@ static int dvb_frontend_release(struct inode *inode, struct file *file)
if (dvbdev->users == -1) {
wake_up(&fepriv->wait_queue);
+#ifdef CONFIG_MEDIA_CONTROLLER_DVB
+ if (fe->dvb->mdev && fe->dvb->mdev->disable_source)
+ fe->dvb->mdev->disable_source(dvbdev->entity);
+#endif
if (fe->exit != DVB_FE_NO_EXIT)
wake_up(&dvbdev->wait_queue);
if (fe->ops.ts_bus_ctrl)
diff --git a/drivers/media/dvb-core/dvb_frontend.h b/drivers/media/dvb-core/dvb_frontend.h
index 458bcce20e38..9592573a0b41 100644
--- a/drivers/media/dvb-core/dvb_frontend.h
+++ b/drivers/media/dvb-core/dvb_frontend.h
@@ -449,7 +449,8 @@ struct dvb_frontend_ops {
int (*set_frontend)(struct dvb_frontend *fe);
int (*get_tune_settings)(struct dvb_frontend* fe, struct dvb_frontend_tune_settings* settings);
- int (*get_frontend)(struct dvb_frontend *fe);
+ int (*get_frontend)(struct dvb_frontend *fe,
+ struct dtv_frontend_properties *props);
int (*read_status)(struct dvb_frontend *fe, enum fe_status *status);
int (*read_ber)(struct dvb_frontend* fe, u32* ber);
diff --git a/drivers/media/dvb-core/dvbdev.c b/drivers/media/dvb-core/dvbdev.c
index 560450a0b32a..e1684c570e2f 100644
--- a/drivers/media/dvb-core/dvbdev.c
+++ b/drivers/media/dvb-core/dvbdev.c
@@ -58,7 +58,7 @@ static const char * const dnames[] = {
#define DVB_MAX_IDS MAX_DVB_MINORS
#else
#define DVB_MAX_IDS 4
-#define nums2minor(num,type,id) ((num << 6) | (id << 4) | type)
+#define nums2minor(num, type, id) ((num << 6) | (id << 4) | type)
#define MAX_DVB_MINORS (DVB_MAX_ADAPTERS*64)
#endif
@@ -85,7 +85,7 @@ static int dvb_device_open(struct inode *inode, struct file *file)
file->private_data = dvbdev;
replace_fops(file, new_fops);
if (file->f_op->open)
- err = file->f_op->open(inode,file);
+ err = file->f_op->open(inode, file);
up_read(&minor_rwsem);
mutex_unlock(&dvbdev_mutex);
return err;
@@ -352,7 +352,7 @@ static int dvb_create_media_entity(struct dvb_device *dvbdev,
ret = media_device_register_entity(dvbdev->adapter->mdev,
dvbdev->entity);
if (ret)
- return (ret);
+ return ret;
printk(KERN_DEBUG "%s: media entity '%s' registered.\n",
__func__, dvbdev->entity->name);
@@ -620,8 +620,7 @@ int dvb_create_media_graph(struct dvb_adapter *adap,
return -ENOMEM;
adap->conn = conn;
- adap->conn_pads = kcalloc(1, sizeof(*adap->conn_pads),
- GFP_KERNEL);
+ adap->conn_pads = kzalloc(sizeof(*adap->conn_pads), GFP_KERNEL);
if (!adap->conn_pads)
return -ENOMEM;
@@ -661,7 +660,7 @@ int dvb_create_media_graph(struct dvb_adapter *adap,
if (ntuner && ndemod) {
ret = media_create_pad_links(mdev,
MEDIA_ENT_F_TUNER,
- tuner, TUNER_PAD_IF_OUTPUT,
+ tuner, TUNER_PAD_OUTPUT,
MEDIA_ENT_F_DTV_DEMOD,
demod, 0, MEDIA_LNK_FL_ENABLED,
false);
@@ -682,7 +681,7 @@ int dvb_create_media_graph(struct dvb_adapter *adap,
if (demux && ca) {
ret = media_create_pad_link(demux, 1, ca,
0, MEDIA_LNK_FL_ENABLED);
- if (!ret)
+ if (ret)
return -ENOMEM;
}
@@ -868,7 +867,7 @@ int dvb_usercopy(struct file *file,
parg = sbuf;
} else {
/* too big to allocate from stack */
- mbuf = kmalloc(_IOC_SIZE(cmd),GFP_KERNEL);
+ mbuf = kmalloc(_IOC_SIZE(cmd), GFP_KERNEL);
if (NULL == mbuf)
return -ENOMEM;
parg = mbuf;
diff --git a/drivers/media/dvb-frontends/Kconfig b/drivers/media/dvb-frontends/Kconfig
index 310e4b8beae8..a82f77c49bd5 100644
--- a/drivers/media/dvb-frontends/Kconfig
+++ b/drivers/media/dvb-frontends/Kconfig
@@ -73,6 +73,14 @@ config DVB_SI2165
Say Y when you want to support this frontend.
+config DVB_MN88473
+ tristate "Panasonic MN88473"
+ depends on DVB_CORE && I2C
+ select REGMAP_I2C
+ default m if !MEDIA_SUBDRV_AUTOSELECT
+ help
+ Say Y when you want to support this frontend.
+
comment "DVB-S (satellite) frontends"
depends on DVB_CORE
diff --git a/drivers/media/dvb-frontends/Makefile b/drivers/media/dvb-frontends/Makefile
index 37ef17b5b995..eb7191f4219d 100644
--- a/drivers/media/dvb-frontends/Makefile
+++ b/drivers/media/dvb-frontends/Makefile
@@ -95,6 +95,7 @@ obj-$(CONFIG_DVB_STV0900) += stv0900.o
obj-$(CONFIG_DVB_STV090x) += stv090x.o
obj-$(CONFIG_DVB_STV6110x) += stv6110x.o
obj-$(CONFIG_DVB_M88DS3103) += m88ds3103.o
+obj-$(CONFIG_DVB_MN88473) += mn88473.o
obj-$(CONFIG_DVB_ISL6423) += isl6423.o
obj-$(CONFIG_DVB_EC100) += ec100.o
obj-$(CONFIG_DVB_HD29L2) += hd29l2.o
diff --git a/drivers/media/dvb-frontends/af9013.c b/drivers/media/dvb-frontends/af9013.c
index e23197da84af..8bcde336ffd7 100644
--- a/drivers/media/dvb-frontends/af9013.c
+++ b/drivers/media/dvb-frontends/af9013.c
@@ -866,9 +866,9 @@ err:
return ret;
}
-static int af9013_get_frontend(struct dvb_frontend *fe)
+static int af9013_get_frontend(struct dvb_frontend *fe,
+ struct dtv_frontend_properties *c)
{
- struct dtv_frontend_properties *c = &fe->dtv_property_cache;
struct af9013_state *state = fe->demodulator_priv;
int ret;
u8 buf[3];
@@ -1344,6 +1344,10 @@ err:
static void af9013_release(struct dvb_frontend *fe)
{
struct af9013_state *state = fe->demodulator_priv;
+
+ /* stop statistics polling */
+ cancel_delayed_work_sync(&state->statistics_work);
+
kfree(state);
}
diff --git a/drivers/media/dvb-frontends/af9033.c b/drivers/media/dvb-frontends/af9033.c
index bc35206a0821..efebe5ce2429 100644
--- a/drivers/media/dvb-frontends/af9033.c
+++ b/drivers/media/dvb-frontends/af9033.c
@@ -691,10 +691,10 @@ err:
return ret;
}
-static int af9033_get_frontend(struct dvb_frontend *fe)
+static int af9033_get_frontend(struct dvb_frontend *fe,
+ struct dtv_frontend_properties *c)
{
struct af9033_dev *dev = fe->demodulator_priv;
- struct dtv_frontend_properties *c = &fe->dtv_property_cache;
int ret;
u8 buf[8];
@@ -1372,6 +1372,9 @@ static int af9033_remove(struct i2c_client *client)
dev_dbg(&dev->client->dev, "\n");
+ /* stop statistics polling */
+ cancel_delayed_work_sync(&dev->stat_work);
+
dev->fe.ops.release = NULL;
dev->fe.demodulator_priv = NULL;
kfree(dev);
diff --git a/drivers/media/dvb-frontends/as102_fe.c b/drivers/media/dvb-frontends/as102_fe.c
index 544c5f65d19a..9412fcd1bddb 100644
--- a/drivers/media/dvb-frontends/as102_fe.c
+++ b/drivers/media/dvb-frontends/as102_fe.c
@@ -190,10 +190,10 @@ static int as102_fe_set_frontend(struct dvb_frontend *fe)
return state->ops->set_tune(state->priv, &tune_args);
}
-static int as102_fe_get_frontend(struct dvb_frontend *fe)
+static int as102_fe_get_frontend(struct dvb_frontend *fe,
+ struct dtv_frontend_properties *c)
{
struct as102_state *state = fe->demodulator_priv;
- struct dtv_frontend_properties *c = &fe->dtv_property_cache;
int ret = 0;
struct as10x_tps tps = { 0 };
diff --git a/drivers/media/dvb-frontends/atbm8830.c b/drivers/media/dvb-frontends/atbm8830.c
index 8fe552e293ed..47248b868e38 100644
--- a/drivers/media/dvb-frontends/atbm8830.c
+++ b/drivers/media/dvb-frontends/atbm8830.c
@@ -297,9 +297,9 @@ static int atbm8830_set_fe(struct dvb_frontend *fe)
return 0;
}
-static int atbm8830_get_fe(struct dvb_frontend *fe)
+static int atbm8830_get_fe(struct dvb_frontend *fe,
+ struct dtv_frontend_properties *c)
{
- struct dtv_frontend_properties *c = &fe->dtv_property_cache;
dprintk("%s\n", __func__);
/* TODO: get real readings from device */
diff --git a/drivers/media/dvb-frontends/au8522.h b/drivers/media/dvb-frontends/au8522.h
index dde61582c158..78bf3f73e58d 100644
--- a/drivers/media/dvb-frontends/au8522.h
+++ b/drivers/media/dvb-frontends/au8522.h
@@ -89,5 +89,4 @@ enum au8522_audio_input {
AU8522_AUDIO_NONE,
AU8522_AUDIO_SIF,
};
-
#endif /* __AU8522_H__ */
diff --git a/drivers/media/dvb-frontends/au8522_decoder.c b/drivers/media/dvb-frontends/au8522_decoder.c
index 73612c5353d1..add246382806 100644
--- a/drivers/media/dvb-frontends/au8522_decoder.c
+++ b/drivers/media/dvb-frontends/au8522_decoder.c
@@ -763,9 +763,10 @@ static int au8522_probe(struct i2c_client *client,
v4l2_i2c_subdev_init(sd, client, &au8522_ops);
#if defined(CONFIG_MEDIA_CONTROLLER)
- state->pads[AU8522_PAD_INPUT].flags = MEDIA_PAD_FL_SINK;
- state->pads[AU8522_PAD_VID_OUT].flags = MEDIA_PAD_FL_SOURCE;
- state->pads[AU8522_PAD_VBI_OUT].flags = MEDIA_PAD_FL_SOURCE;
+ state->pads[DEMOD_PAD_IF_INPUT].flags = MEDIA_PAD_FL_SINK;
+ state->pads[DEMOD_PAD_VID_OUT].flags = MEDIA_PAD_FL_SOURCE;
+ state->pads[DEMOD_PAD_VBI_OUT].flags = MEDIA_PAD_FL_SOURCE;
+ state->pads[DEMOD_PAD_AUDIO_OUT].flags = MEDIA_PAD_FL_SOURCE;
sd->entity.function = MEDIA_ENT_F_ATV_DECODER;
ret = media_entity_pads_init(&sd->entity, ARRAY_SIZE(state->pads),
diff --git a/drivers/media/dvb-frontends/au8522_dig.c b/drivers/media/dvb-frontends/au8522_dig.c
index 6c1e97640f3f..e676b9461a59 100644
--- a/drivers/media/dvb-frontends/au8522_dig.c
+++ b/drivers/media/dvb-frontends/au8522_dig.c
@@ -816,9 +816,9 @@ static int au8522_read_ber(struct dvb_frontend *fe, u32 *ber)
return au8522_read_ucblocks(fe, ber);
}
-static int au8522_get_frontend(struct dvb_frontend *fe)
+static int au8522_get_frontend(struct dvb_frontend *fe,
+ struct dtv_frontend_properties *c)
{
- struct dtv_frontend_properties *c = &fe->dtv_property_cache;
struct au8522_state *state = fe->demodulator_priv;
c->frequency = state->current_frequency;
diff --git a/drivers/media/dvb-frontends/au8522_priv.h b/drivers/media/dvb-frontends/au8522_priv.h
index 404a0cb0ed8d..f5a9438f6ce5 100644
--- a/drivers/media/dvb-frontends/au8522_priv.h
+++ b/drivers/media/dvb-frontends/au8522_priv.h
@@ -30,6 +30,7 @@
#include <linux/videodev2.h>
#include <media/v4l2-device.h>
#include <media/v4l2-ctrls.h>
+#include <media/v4l2-mc.h>
#include <linux/i2c.h>
#include "dvb_frontend.h"
#include "au8522.h"
@@ -39,14 +40,6 @@
#define AU8522_DIGITAL_MODE 1
#define AU8522_SUSPEND_MODE 2
-enum au8522_media_pads {
- AU8522_PAD_INPUT,
- AU8522_PAD_VID_OUT,
- AU8522_PAD_VBI_OUT,
-
- AU8522_NUM_PADS
-};
-
struct au8522_state {
struct i2c_client *c;
struct i2c_adapter *i2c;
@@ -78,7 +71,7 @@ struct au8522_state {
struct v4l2_ctrl_handler hdl;
#ifdef CONFIG_MEDIA_CONTROLLER
- struct media_pad pads[AU8522_NUM_PADS];
+ struct media_pad pads[DEMOD_NUM_PADS];
#endif
};
diff --git a/drivers/media/dvb-frontends/bcm3510.c b/drivers/media/dvb-frontends/bcm3510.c
index d30275f27644..bb698839e477 100644
--- a/drivers/media/dvb-frontends/bcm3510.c
+++ b/drivers/media/dvb-frontends/bcm3510.c
@@ -3,7 +3,7 @@
*
* Copyright (C) 2001-5, B2C2 inc.
*
- * GPL/Linux driver written by Patrick Boettcher <patrick.boettcher@desy.de>
+ * GPL/Linux driver written by Patrick Boettcher <patrick.boettcher@posteo.de>
*
* This driver is "hard-coded" to be used with the 1st generation of
* Technisat/B2C2's Air2PC ATSC PCI/USB cards/boxes. The pll-programming
@@ -865,5 +865,5 @@ static struct dvb_frontend_ops bcm3510_ops = {
};
MODULE_DESCRIPTION("Broadcom BCM3510 ATSC (8VSB/16VSB & ITU J83 AnnexB FEC QAM64/256) demodulator driver");
-MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@desy.de>");
+MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@posteo.de>");
MODULE_LICENSE("GPL");
diff --git a/drivers/media/dvb-frontends/bcm3510.h b/drivers/media/dvb-frontends/bcm3510.h
index ff66492fb940..961c2eb87c68 100644
--- a/drivers/media/dvb-frontends/bcm3510.h
+++ b/drivers/media/dvb-frontends/bcm3510.h
@@ -3,7 +3,7 @@
*
* Copyright (C) 2001-5, B2C2 inc.
*
- * GPL/Linux driver written by Patrick Boettcher <patrick.boettcher@desy.de>
+ * GPL/Linux driver written by Patrick Boettcher <patrick.boettcher@posteo.de>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/media/dvb-frontends/bcm3510_priv.h b/drivers/media/dvb-frontends/bcm3510_priv.h
index 3bb1bc2a04f0..67f24686c31b 100644
--- a/drivers/media/dvb-frontends/bcm3510_priv.h
+++ b/drivers/media/dvb-frontends/bcm3510_priv.h
@@ -3,7 +3,7 @@
*
* Copyright (C) 2001-5, B2C2 inc.
*
- * GPL/Linux driver written by Patrick Boettcher <patrick.boettcher@desy.de>
+ * GPL/Linux driver written by Patrick Boettcher <patrick.boettcher@posteo.de>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/media/dvb-frontends/cx22700.c b/drivers/media/dvb-frontends/cx22700.c
index fd033cca6e11..5cad925609e0 100644
--- a/drivers/media/dvb-frontends/cx22700.c
+++ b/drivers/media/dvb-frontends/cx22700.c
@@ -345,9 +345,9 @@ static int cx22700_set_frontend(struct dvb_frontend *fe)
return 0;
}
-static int cx22700_get_frontend(struct dvb_frontend *fe)
+static int cx22700_get_frontend(struct dvb_frontend *fe,
+ struct dtv_frontend_properties *c)
{
- struct dtv_frontend_properties *c = &fe->dtv_property_cache;
struct cx22700_state* state = fe->demodulator_priv;
u8 reg09 = cx22700_readreg (state, 0x09);
diff --git a/drivers/media/dvb-frontends/cx22702.c b/drivers/media/dvb-frontends/cx22702.c
index d2d06dcd7683..c0e54c59cccf 100644
--- a/drivers/media/dvb-frontends/cx22702.c
+++ b/drivers/media/dvb-frontends/cx22702.c
@@ -562,9 +562,9 @@ static int cx22702_read_ucblocks(struct dvb_frontend *fe, u32 *ucblocks)
return 0;
}
-static int cx22702_get_frontend(struct dvb_frontend *fe)
+static int cx22702_get_frontend(struct dvb_frontend *fe,
+ struct dtv_frontend_properties *c)
{
- struct dtv_frontend_properties *c = &fe->dtv_property_cache;
struct cx22702_state *state = fe->demodulator_priv;
u8 reg0C = cx22702_readreg(state, 0x0C);
diff --git a/drivers/media/dvb-frontends/cx24110.c b/drivers/media/dvb-frontends/cx24110.c
index cb36475e322b..6cb81ec12847 100644
--- a/drivers/media/dvb-frontends/cx24110.c
+++ b/drivers/media/dvb-frontends/cx24110.c
@@ -550,9 +550,9 @@ static int cx24110_set_frontend(struct dvb_frontend *fe)
return 0;
}
-static int cx24110_get_frontend(struct dvb_frontend *fe)
+static int cx24110_get_frontend(struct dvb_frontend *fe,
+ struct dtv_frontend_properties *p)
{
- struct dtv_frontend_properties *p = &fe->dtv_property_cache;
struct cx24110_state *state = fe->demodulator_priv;
s32 afc; unsigned sclk;
diff --git a/drivers/media/dvb-frontends/cx24117.c b/drivers/media/dvb-frontends/cx24117.c
index 5f77bc80a896..a3f7eb4e609d 100644
--- a/drivers/media/dvb-frontends/cx24117.c
+++ b/drivers/media/dvb-frontends/cx24117.c
@@ -1560,10 +1560,10 @@ static int cx24117_get_algo(struct dvb_frontend *fe)
return DVBFE_ALGO_HW;
}
-static int cx24117_get_frontend(struct dvb_frontend *fe)
+static int cx24117_get_frontend(struct dvb_frontend *fe,
+ struct dtv_frontend_properties *c)
{
struct cx24117_state *state = fe->demodulator_priv;
- struct dtv_frontend_properties *c = &fe->dtv_property_cache;
struct cx24117_cmd cmd;
u8 reg, st, inv;
int ret, idx;
diff --git a/drivers/media/dvb-frontends/cx24120.c b/drivers/media/dvb-frontends/cx24120.c
index 3b0ef52bb834..066ee387bf25 100644
--- a/drivers/media/dvb-frontends/cx24120.c
+++ b/drivers/media/dvb-frontends/cx24120.c
@@ -1502,16 +1502,18 @@ static int cx24120_sleep(struct dvb_frontend *fe)
return 0;
}
-static int cx24120_get_frontend(struct dvb_frontend *fe)
+static int cx24120_get_frontend(struct dvb_frontend *fe,
+ struct dtv_frontend_properties *c)
{
- struct dtv_frontend_properties *c = &fe->dtv_property_cache;
struct cx24120_state *state = fe->demodulator_priv;
u8 freq1, freq2, freq3;
+ int status;
dev_dbg(&state->i2c->dev, "\n");
/* don't return empty data if we're not tuned in */
- if ((state->fe_status & FE_HAS_LOCK) == 0)
+ status = cx24120_readreg(state, CX24120_REG_STATUS);
+ if (!(status & CX24120_HAS_LOCK))
return 0;
/* Get frequency */
diff --git a/drivers/media/dvb-frontends/cx24123.c b/drivers/media/dvb-frontends/cx24123.c
index 0fe7fb11124b..113b0949408a 100644
--- a/drivers/media/dvb-frontends/cx24123.c
+++ b/drivers/media/dvb-frontends/cx24123.c
@@ -945,9 +945,9 @@ static int cx24123_set_frontend(struct dvb_frontend *fe)
return 0;
}
-static int cx24123_get_frontend(struct dvb_frontend *fe)
+static int cx24123_get_frontend(struct dvb_frontend *fe,
+ struct dtv_frontend_properties *p)
{
- struct dtv_frontend_properties *p = &fe->dtv_property_cache;
struct cx24123_state *state = fe->demodulator_priv;
dprintk("\n");
diff --git a/drivers/media/dvb-frontends/cxd2820r_c.c b/drivers/media/dvb-frontends/cxd2820r_c.c
index 42fad6aa3958..a674a6312c38 100644
--- a/drivers/media/dvb-frontends/cxd2820r_c.c
+++ b/drivers/media/dvb-frontends/cxd2820r_c.c
@@ -101,10 +101,10 @@ error:
return ret;
}
-int cxd2820r_get_frontend_c(struct dvb_frontend *fe)
+int cxd2820r_get_frontend_c(struct dvb_frontend *fe,
+ struct dtv_frontend_properties *c)
{
struct cxd2820r_priv *priv = fe->demodulator_priv;
- struct dtv_frontend_properties *c = &fe->dtv_property_cache;
int ret;
u8 buf[2];
diff --git a/drivers/media/dvb-frontends/cxd2820r_core.c b/drivers/media/dvb-frontends/cxd2820r_core.c
index 24a457d9d803..314d3b8c1080 100644
--- a/drivers/media/dvb-frontends/cxd2820r_core.c
+++ b/drivers/media/dvb-frontends/cxd2820r_core.c
@@ -313,7 +313,8 @@ static int cxd2820r_read_status(struct dvb_frontend *fe, enum fe_status *status)
return ret;
}
-static int cxd2820r_get_frontend(struct dvb_frontend *fe)
+static int cxd2820r_get_frontend(struct dvb_frontend *fe,
+ struct dtv_frontend_properties *p)
{
struct cxd2820r_priv *priv = fe->demodulator_priv;
int ret;
@@ -326,13 +327,13 @@ static int cxd2820r_get_frontend(struct dvb_frontend *fe)
switch (fe->dtv_property_cache.delivery_system) {
case SYS_DVBT:
- ret = cxd2820r_get_frontend_t(fe);
+ ret = cxd2820r_get_frontend_t(fe, p);
break;
case SYS_DVBT2:
- ret = cxd2820r_get_frontend_t2(fe);
+ ret = cxd2820r_get_frontend_t2(fe, p);
break;
case SYS_DVBC_ANNEX_A:
- ret = cxd2820r_get_frontend_c(fe);
+ ret = cxd2820r_get_frontend_c(fe, p);
break;
default:
ret = -EINVAL;
@@ -606,8 +607,7 @@ static int cxd2820r_i2c_gate_ctrl(struct dvb_frontend *fe, int enable)
static int cxd2820r_gpio_direction_output(struct gpio_chip *chip, unsigned nr,
int val)
{
- struct cxd2820r_priv *priv =
- container_of(chip, struct cxd2820r_priv, gpio_chip);
+ struct cxd2820r_priv *priv = gpiochip_get_data(chip);
u8 gpio[GPIO_COUNT];
dev_dbg(&priv->i2c->dev, "%s: nr=%d val=%d\n", __func__, nr, val);
@@ -620,8 +620,7 @@ static int cxd2820r_gpio_direction_output(struct gpio_chip *chip, unsigned nr,
static void cxd2820r_gpio_set(struct gpio_chip *chip, unsigned nr, int val)
{
- struct cxd2820r_priv *priv =
- container_of(chip, struct cxd2820r_priv, gpio_chip);
+ struct cxd2820r_priv *priv = gpiochip_get_data(chip);
u8 gpio[GPIO_COUNT];
dev_dbg(&priv->i2c->dev, "%s: nr=%d val=%d\n", __func__, nr, val);
@@ -636,8 +635,7 @@ static void cxd2820r_gpio_set(struct gpio_chip *chip, unsigned nr, int val)
static int cxd2820r_gpio_get(struct gpio_chip *chip, unsigned nr)
{
- struct cxd2820r_priv *priv =
- container_of(chip, struct cxd2820r_priv, gpio_chip);
+ struct cxd2820r_priv *priv = gpiochip_get_data(chip);
dev_dbg(&priv->i2c->dev, "%s: nr=%d\n", __func__, nr);
@@ -731,7 +729,7 @@ struct dvb_frontend *cxd2820r_attach(const struct cxd2820r_config *cfg,
priv->gpio_chip.base = -1; /* dynamic allocation */
priv->gpio_chip.ngpio = GPIO_COUNT;
priv->gpio_chip.can_sleep = 1;
- ret = gpiochip_add(&priv->gpio_chip);
+ ret = gpiochip_add_data(&priv->gpio_chip, priv);
if (ret)
goto error;
diff --git a/drivers/media/dvb-frontends/cxd2820r_priv.h b/drivers/media/dvb-frontends/cxd2820r_priv.h
index a0d53f01a8bf..e31c48e53097 100644
--- a/drivers/media/dvb-frontends/cxd2820r_priv.h
+++ b/drivers/media/dvb-frontends/cxd2820r_priv.h
@@ -76,7 +76,8 @@ int cxd2820r_rd_reg(struct cxd2820r_priv *priv, u32 reg, u8 *val);
/* cxd2820r_c.c */
-int cxd2820r_get_frontend_c(struct dvb_frontend *fe);
+int cxd2820r_get_frontend_c(struct dvb_frontend *fe,
+ struct dtv_frontend_properties *p);
int cxd2820r_set_frontend_c(struct dvb_frontend *fe);
@@ -99,7 +100,8 @@ int cxd2820r_get_tune_settings_c(struct dvb_frontend *fe,
/* cxd2820r_t.c */
-int cxd2820r_get_frontend_t(struct dvb_frontend *fe);
+int cxd2820r_get_frontend_t(struct dvb_frontend *fe,
+ struct dtv_frontend_properties *p);
int cxd2820r_set_frontend_t(struct dvb_frontend *fe);
@@ -122,7 +124,8 @@ int cxd2820r_get_tune_settings_t(struct dvb_frontend *fe,
/* cxd2820r_t2.c */
-int cxd2820r_get_frontend_t2(struct dvb_frontend *fe);
+int cxd2820r_get_frontend_t2(struct dvb_frontend *fe,
+ struct dtv_frontend_properties *p);
int cxd2820r_set_frontend_t2(struct dvb_frontend *fe);
diff --git a/drivers/media/dvb-frontends/cxd2820r_t.c b/drivers/media/dvb-frontends/cxd2820r_t.c
index 21abf1b4ed4d..75ce7d8ded00 100644
--- a/drivers/media/dvb-frontends/cxd2820r_t.c
+++ b/drivers/media/dvb-frontends/cxd2820r_t.c
@@ -138,10 +138,10 @@ error:
return ret;
}
-int cxd2820r_get_frontend_t(struct dvb_frontend *fe)
+int cxd2820r_get_frontend_t(struct dvb_frontend *fe,
+ struct dtv_frontend_properties *c)
{
struct cxd2820r_priv *priv = fe->demodulator_priv;
- struct dtv_frontend_properties *c = &fe->dtv_property_cache;
int ret;
u8 buf[2];
diff --git a/drivers/media/dvb-frontends/cxd2820r_t2.c b/drivers/media/dvb-frontends/cxd2820r_t2.c
index 4e028b41c0d5..704475676234 100644
--- a/drivers/media/dvb-frontends/cxd2820r_t2.c
+++ b/drivers/media/dvb-frontends/cxd2820r_t2.c
@@ -23,8 +23,8 @@
int cxd2820r_set_frontend_t2(struct dvb_frontend *fe)
{
- struct cxd2820r_priv *priv = fe->demodulator_priv;
struct dtv_frontend_properties *c = &fe->dtv_property_cache;
+ struct cxd2820r_priv *priv = fe->demodulator_priv;
int ret, i, bw_i;
u32 if_freq, if_ctl;
u64 num;
@@ -169,10 +169,10 @@ error:
}
-int cxd2820r_get_frontend_t2(struct dvb_frontend *fe)
+int cxd2820r_get_frontend_t2(struct dvb_frontend *fe,
+ struct dtv_frontend_properties *c)
{
struct cxd2820r_priv *priv = fe->demodulator_priv;
- struct dtv_frontend_properties *c = &fe->dtv_property_cache;
int ret;
u8 buf[2];
diff --git a/drivers/media/dvb-frontends/cxd2841er.c b/drivers/media/dvb-frontends/cxd2841er.c
index fdffb2f0ded8..900186ba8e62 100644
--- a/drivers/media/dvb-frontends/cxd2841er.c
+++ b/drivers/media/dvb-frontends/cxd2841er.c
@@ -2090,13 +2090,13 @@ static int cxd2841er_sleep_tc_to_active_c(struct cxd2841er_priv *priv,
return 0;
}
-static int cxd2841er_get_frontend(struct dvb_frontend *fe)
+static int cxd2841er_get_frontend(struct dvb_frontend *fe,
+ struct dtv_frontend_properties *p)
{
enum fe_status status = 0;
u16 strength = 0, snr = 0;
u32 errors = 0, ber = 0;
struct cxd2841er_priv *priv = fe->demodulator_priv;
- struct dtv_frontend_properties *p = &fe->dtv_property_cache;
dev_dbg(&priv->i2c->dev, "%s()\n", __func__);
if (priv->state == STATE_ACTIVE_S)
diff --git a/drivers/media/dvb-frontends/dib0070.c b/drivers/media/dvb-frontends/dib0070.c
index 0b8fb5dd1889..ee7d66997ccd 100644
--- a/drivers/media/dvb-frontends/dib0070.c
+++ b/drivers/media/dvb-frontends/dib0070.c
@@ -774,6 +774,6 @@ free_mem:
}
EXPORT_SYMBOL(dib0070_attach);
-MODULE_AUTHOR("Patrick Boettcher <pboettcher@dibcom.fr>");
+MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@posteo.de>");
MODULE_DESCRIPTION("Driver for the DiBcom 0070 base-band RF Tuner");
MODULE_LICENSE("GPL");
diff --git a/drivers/media/dvb-frontends/dib0090.c b/drivers/media/dvb-frontends/dib0090.c
index 47cb72243b9d..dc2d41e144fd 100644
--- a/drivers/media/dvb-frontends/dib0090.c
+++ b/drivers/media/dvb-frontends/dib0090.c
@@ -1115,9 +1115,15 @@ void dib0090_pwm_gain_reset(struct dvb_frontend *fe)
dib0090_set_bbramp_pwm(state, bb_ramp);
/* activate the ramp generator using PWM control */
- dprintk("ramp RF gain = %d BAND = %s version = %d", state->rf_ramp[0], (state->current_band == BAND_CBAND) ? "CBAND" : "NOT CBAND", state->identity.version & 0x1f);
-
- if ((state->rf_ramp[0] == 0) || (state->current_band == BAND_CBAND && (state->identity.version & 0x1f) <= P1D_E_F)) {
+ if (state->rf_ramp)
+ dprintk("ramp RF gain = %d BAND = %s version = %d",
+ state->rf_ramp[0],
+ (state->current_band == BAND_CBAND) ? "CBAND" : "NOT CBAND",
+ state->identity.version & 0x1f);
+
+ if (rf_ramp && ((state->rf_ramp[0] == 0) ||
+ (state->current_band == BAND_CBAND &&
+ (state->identity.version & 0x1f) <= P1D_E_F))) {
dprintk("DE-Engage mux for direct gain reg control");
en_pwm_rf_mux = 0;
} else
@@ -2669,7 +2675,7 @@ free_mem:
}
EXPORT_SYMBOL(dib0090_fw_register);
-MODULE_AUTHOR("Patrick Boettcher <pboettcher@dibcom.fr>");
-MODULE_AUTHOR("Olivier Grenie <olivier.grenie@dibcom.fr>");
+MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@posteo.de>");
+MODULE_AUTHOR("Olivier Grenie <olivier.grenie@parrot.com>");
MODULE_DESCRIPTION("Driver for the DiBcom 0090 base-band RF Tuner");
MODULE_LICENSE("GPL");
diff --git a/drivers/media/dvb-frontends/dib3000.h b/drivers/media/dvb-frontends/dib3000.h
index 6ae9899b5b45..d5dfafb4ef13 100644
--- a/drivers/media/dvb-frontends/dib3000.h
+++ b/drivers/media/dvb-frontends/dib3000.h
@@ -2,11 +2,11 @@
* public header file of the frontend drivers for mobile DVB-T demodulators
* DiBcom 3000M-B and DiBcom 3000P/M-C (http://www.dibcom.fr/)
*
- * Copyright (C) 2004-5 Patrick Boettcher (patrick.boettcher@desy.de)
+ * Copyright (C) 2004-5 Patrick Boettcher (patrick.boettcher@posteo.de)
*
* based on GPL code from DibCom, which has
*
- * Copyright (C) 2004 Amaury Demol for DiBcom (ademol@dibcom.fr)
+ * Copyright (C) 2004 Amaury Demol for DiBcom
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
@@ -14,7 +14,7 @@
*
* Acknowledgements
*
- * Amaury Demol (ademol@dibcom.fr) from DiBcom for providing specs and driver
+ * Amaury Demol from DiBcom for providing specs and driver
* sources, on which this driver (and the dvb-dibusb) are based.
*
* see Documentation/dvb/README.dvb-usb for more information
diff --git a/drivers/media/dvb-frontends/dib3000mb.c b/drivers/media/dvb-frontends/dib3000mb.c
index 7a61172d0d45..6821ecb53d63 100644
--- a/drivers/media/dvb-frontends/dib3000mb.c
+++ b/drivers/media/dvb-frontends/dib3000mb.c
@@ -2,11 +2,11 @@
* Frontend driver for mobile DVB-T demodulator DiBcom 3000M-B
* DiBcom (http://www.dibcom.fr/)
*
- * Copyright (C) 2004-5 Patrick Boettcher (patrick.boettcher@desy.de)
+ * Copyright (C) 2004-5 Patrick Boettcher (patrick.boettcher@posteo.de)
*
* based on GPL code from DibCom, which has
*
- * Copyright (C) 2004 Amaury Demol for DiBcom (ademol@dibcom.fr)
+ * Copyright (C) 2004 Amaury Demol for DiBcom
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
@@ -14,7 +14,7 @@
*
* Acknowledgements
*
- * Amaury Demol (ademol@dibcom.fr) from DiBcom for providing specs and driver
+ * Amaury Demol from DiBcom for providing specs and driver
* sources, on which this driver (and the dvb-dibusb) are based.
*
* see Documentation/dvb/README.dvb-usb for more information
@@ -36,7 +36,7 @@
/* Version information */
#define DRIVER_VERSION "0.1"
#define DRIVER_DESC "DiBcom 3000M-B DVB-T demodulator"
-#define DRIVER_AUTHOR "Patrick Boettcher, patrick.boettcher@desy.de"
+#define DRIVER_AUTHOR "Patrick Boettcher, patrick.boettcher@posteo.de"
static int debug;
module_param(debug, int, 0644);
@@ -112,7 +112,8 @@ static u16 dib3000_seq[2][2][2] = /* fft,gua, inv */
}
};
-static int dib3000mb_get_frontend(struct dvb_frontend* fe);
+static int dib3000mb_get_frontend(struct dvb_frontend* fe,
+ struct dtv_frontend_properties *c);
static int dib3000mb_set_frontend(struct dvb_frontend *fe, int tuner)
{
@@ -359,7 +360,7 @@ static int dib3000mb_set_frontend(struct dvb_frontend *fe, int tuner)
deb_setf("search_state after autosearch %d after %d checks\n",search_state,as_count);
if (search_state == 1) {
- if (dib3000mb_get_frontend(fe) == 0) {
+ if (dib3000mb_get_frontend(fe, c) == 0) {
deb_setf("reading tuning data from frontend succeeded.\n");
return dib3000mb_set_frontend(fe, 0);
}
@@ -450,9 +451,9 @@ static int dib3000mb_fe_init(struct dvb_frontend* fe, int mobile_mode)
return 0;
}
-static int dib3000mb_get_frontend(struct dvb_frontend* fe)
+static int dib3000mb_get_frontend(struct dvb_frontend* fe,
+ struct dtv_frontend_properties *c)
{
- struct dtv_frontend_properties *c = &fe->dtv_property_cache;
struct dib3000_state* state = fe->demodulator_priv;
enum fe_code_rate *cr;
u16 tps_val;
diff --git a/drivers/media/dvb-frontends/dib3000mb_priv.h b/drivers/media/dvb-frontends/dib3000mb_priv.h
index 9dc235aa44b7..0459d5c84314 100644
--- a/drivers/media/dvb-frontends/dib3000mb_priv.h
+++ b/drivers/media/dvb-frontends/dib3000mb_priv.h
@@ -1,7 +1,7 @@
/*
* dib3000mb_priv.h
*
- * Copyright (C) 2004 Patrick Boettcher (patrick.boettcher@desy.de)
+ * Copyright (C) 2004 Patrick Boettcher (patrick.boettcher@posteo.de)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
diff --git a/drivers/media/dvb-frontends/dib3000mc.c b/drivers/media/dvb-frontends/dib3000mc.c
index 583d6b7fabed..da0f1dc5aaf7 100644
--- a/drivers/media/dvb-frontends/dib3000mc.c
+++ b/drivers/media/dvb-frontends/dib3000mc.c
@@ -2,7 +2,7 @@
* Driver for DiBcom DiB3000MC/P-demodulator.
*
* Copyright (C) 2004-7 DiBcom (http://www.dibcom.fr/)
- * Copyright (C) 2004-5 Patrick Boettcher (patrick.boettcher@desy.de)
+ * Copyright (C) 2004-5 Patrick Boettcher (patrick.boettcher@posteo.de)
*
* This code is partially based on the previous dib3000mc.c .
*
@@ -636,9 +636,9 @@ struct i2c_adapter * dib3000mc_get_tuner_i2c_master(struct dvb_frontend *demod,
EXPORT_SYMBOL(dib3000mc_get_tuner_i2c_master);
-static int dib3000mc_get_frontend(struct dvb_frontend* fe)
+static int dib3000mc_get_frontend(struct dvb_frontend* fe,
+ struct dtv_frontend_properties *fep)
{
- struct dtv_frontend_properties *fep = &fe->dtv_property_cache;
struct dib3000mc_state *state = fe->demodulator_priv;
u16 tps = dib3000mc_read_word(state,458);
@@ -726,7 +726,7 @@ static int dib3000mc_set_frontend(struct dvb_frontend *fe)
if (found == 0 || found == 1)
return 0; // no channel found
- dib3000mc_get_frontend(fe);
+ dib3000mc_get_frontend(fe, fep);
}
ret = dib3000mc_tune(fe);
@@ -939,6 +939,6 @@ static struct dvb_frontend_ops dib3000mc_ops = {
.read_ucblocks = dib3000mc_read_unc_blocks,
};
-MODULE_AUTHOR("Patrick Boettcher <pboettcher@dibcom.fr>");
+MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@posteo.de>");
MODULE_DESCRIPTION("Driver for the DiBcom 3000MC/P COFDM demodulator");
MODULE_LICENSE("GPL");
diff --git a/drivers/media/dvb-frontends/dib3000mc.h b/drivers/media/dvb-frontends/dib3000mc.h
index 74816f793611..b37e69e6a58c 100644
--- a/drivers/media/dvb-frontends/dib3000mc.h
+++ b/drivers/media/dvb-frontends/dib3000mc.h
@@ -2,7 +2,7 @@
* Driver for DiBcom DiB3000MC/P-demodulator.
*
* Copyright (C) 2004-6 DiBcom (http://www.dibcom.fr/)
- * Copyright (C) 2004-5 Patrick Boettcher (patrick.boettcher\@desy.de)
+ * Copyright (C) 2004-5 Patrick Boettcher (patrick.boettcher@posteo.de)
*
* This code is partially based on the previous dib3000mc.c .
*
diff --git a/drivers/media/dvb-frontends/dib7000m.c b/drivers/media/dvb-frontends/dib7000m.c
index 35eb71fe3c2b..b3ddae8885ac 100644
--- a/drivers/media/dvb-frontends/dib7000m.c
+++ b/drivers/media/dvb-frontends/dib7000m.c
@@ -1151,9 +1151,9 @@ static int dib7000m_identify(struct dib7000m_state *state)
}
-static int dib7000m_get_frontend(struct dvb_frontend* fe)
+static int dib7000m_get_frontend(struct dvb_frontend* fe,
+ struct dtv_frontend_properties *fep)
{
- struct dtv_frontend_properties *fep = &fe->dtv_property_cache;
struct dib7000m_state *state = fe->demodulator_priv;
u16 tps = dib7000m_read_word(state,480);
@@ -1246,7 +1246,7 @@ static int dib7000m_set_frontend(struct dvb_frontend *fe)
if (found == 0 || found == 1)
return 0; // no channel found
- dib7000m_get_frontend(fe);
+ dib7000m_get_frontend(fe, fep);
}
ret = dib7000m_tune(fe);
@@ -1465,6 +1465,6 @@ static struct dvb_frontend_ops dib7000m_ops = {
.read_ucblocks = dib7000m_read_unc_blocks,
};
-MODULE_AUTHOR("Patrick Boettcher <pboettcher@dibcom.fr>");
+MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@posteo.de>");
MODULE_DESCRIPTION("Driver for the DiBcom 7000MA/MB/PA/PB/MC COFDM demodulator");
MODULE_LICENSE("GPL");
diff --git a/drivers/media/dvb-frontends/dib7000p.c b/drivers/media/dvb-frontends/dib7000p.c
index 33be5d6b9e10..b861d4437f2a 100644
--- a/drivers/media/dvb-frontends/dib7000p.c
+++ b/drivers/media/dvb-frontends/dib7000p.c
@@ -1405,9 +1405,9 @@ static int dib7000p_identify(struct dib7000p_state *st)
return 0;
}
-static int dib7000p_get_frontend(struct dvb_frontend *fe)
+static int dib7000p_get_frontend(struct dvb_frontend *fe,
+ struct dtv_frontend_properties *fep)
{
- struct dtv_frontend_properties *fep = &fe->dtv_property_cache;
struct dib7000p_state *state = fe->demodulator_priv;
u16 tps = dib7000p_read_word(state, 463);
@@ -1540,7 +1540,7 @@ static int dib7000p_set_frontend(struct dvb_frontend *fe)
if (found == 0 || found == 1)
return 0;
- dib7000p_get_frontend(fe);
+ dib7000p_get_frontend(fe, fep);
}
ret = dib7000p_tune(fe);
@@ -2834,7 +2834,7 @@ static struct dvb_frontend_ops dib7000p_ops = {
.read_ucblocks = dib7000p_read_unc_blocks,
};
-MODULE_AUTHOR("Olivier Grenie <ogrenie@dibcom.fr>");
-MODULE_AUTHOR("Patrick Boettcher <pboettcher@dibcom.fr>");
+MODULE_AUTHOR("Olivier Grenie <olivie.grenie@parrot.com>");
+MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@posteo.de>");
MODULE_DESCRIPTION("Driver for the DiBcom 7000PC COFDM demodulator");
MODULE_LICENSE("GPL");
diff --git a/drivers/media/dvb-frontends/dib8000.c b/drivers/media/dvb-frontends/dib8000.c
index 94c26270fff0..ddf9c44877a2 100644
--- a/drivers/media/dvb-frontends/dib8000.c
+++ b/drivers/media/dvb-frontends/dib8000.c
@@ -3382,14 +3382,15 @@ static int dib8000_sleep(struct dvb_frontend *fe)
static int dib8000_read_status(struct dvb_frontend *fe, enum fe_status *stat);
-static int dib8000_get_frontend(struct dvb_frontend *fe)
+static int dib8000_get_frontend(struct dvb_frontend *fe,
+ struct dtv_frontend_properties *c)
{
struct dib8000_state *state = fe->demodulator_priv;
u16 i, val = 0;
enum fe_status stat = 0;
u8 index_frontend, sub_index_frontend;
- fe->dtv_property_cache.bandwidth_hz = 6000000;
+ c->bandwidth_hz = 6000000;
/*
* If called to early, get_frontend makes dib8000_tune to either
@@ -3406,7 +3407,7 @@ static int dib8000_get_frontend(struct dvb_frontend *fe)
if (stat&FE_HAS_SYNC) {
dprintk("TMCC lock on the slave%i", index_frontend);
/* synchronize the cache with the other frontends */
- state->fe[index_frontend]->ops.get_frontend(state->fe[index_frontend]);
+ state->fe[index_frontend]->ops.get_frontend(state->fe[index_frontend], c);
for (sub_index_frontend = 0; (sub_index_frontend < MAX_NUMBER_OF_FRONTENDS) && (state->fe[sub_index_frontend] != NULL); sub_index_frontend++) {
if (sub_index_frontend != index_frontend) {
state->fe[sub_index_frontend]->dtv_property_cache.isdbt_sb_mode = state->fe[index_frontend]->dtv_property_cache.isdbt_sb_mode;
@@ -3426,57 +3427,57 @@ static int dib8000_get_frontend(struct dvb_frontend *fe)
}
}
- fe->dtv_property_cache.isdbt_sb_mode = dib8000_read_word(state, 508) & 0x1;
+ c->isdbt_sb_mode = dib8000_read_word(state, 508) & 0x1;
if (state->revision == 0x8090)
val = dib8000_read_word(state, 572);
else
val = dib8000_read_word(state, 570);
- fe->dtv_property_cache.inversion = (val & 0x40) >> 6;
+ c->inversion = (val & 0x40) >> 6;
switch ((val & 0x30) >> 4) {
case 1:
- fe->dtv_property_cache.transmission_mode = TRANSMISSION_MODE_2K;
+ c->transmission_mode = TRANSMISSION_MODE_2K;
dprintk("dib8000_get_frontend: transmission mode 2K");
break;
case 2:
- fe->dtv_property_cache.transmission_mode = TRANSMISSION_MODE_4K;
+ c->transmission_mode = TRANSMISSION_MODE_4K;
dprintk("dib8000_get_frontend: transmission mode 4K");
break;
case 3:
default:
- fe->dtv_property_cache.transmission_mode = TRANSMISSION_MODE_8K;
+ c->transmission_mode = TRANSMISSION_MODE_8K;
dprintk("dib8000_get_frontend: transmission mode 8K");
break;
}
switch (val & 0x3) {
case 0:
- fe->dtv_property_cache.guard_interval = GUARD_INTERVAL_1_32;
+ c->guard_interval = GUARD_INTERVAL_1_32;
dprintk("dib8000_get_frontend: Guard Interval = 1/32 ");
break;
case 1:
- fe->dtv_property_cache.guard_interval = GUARD_INTERVAL_1_16;
+ c->guard_interval = GUARD_INTERVAL_1_16;
dprintk("dib8000_get_frontend: Guard Interval = 1/16 ");
break;
case 2:
dprintk("dib8000_get_frontend: Guard Interval = 1/8 ");
- fe->dtv_property_cache.guard_interval = GUARD_INTERVAL_1_8;
+ c->guard_interval = GUARD_INTERVAL_1_8;
break;
case 3:
dprintk("dib8000_get_frontend: Guard Interval = 1/4 ");
- fe->dtv_property_cache.guard_interval = GUARD_INTERVAL_1_4;
+ c->guard_interval = GUARD_INTERVAL_1_4;
break;
}
val = dib8000_read_word(state, 505);
- fe->dtv_property_cache.isdbt_partial_reception = val & 1;
- dprintk("dib8000_get_frontend: partial_reception = %d ", fe->dtv_property_cache.isdbt_partial_reception);
+ c->isdbt_partial_reception = val & 1;
+ dprintk("dib8000_get_frontend: partial_reception = %d ", c->isdbt_partial_reception);
for (i = 0; i < 3; i++) {
int show;
val = dib8000_read_word(state, 493 + i) & 0x0f;
- fe->dtv_property_cache.layer[i].segment_count = val;
+ c->layer[i].segment_count = val;
if (val == 0 || val > 13)
show = 0;
@@ -3485,41 +3486,41 @@ static int dib8000_get_frontend(struct dvb_frontend *fe)
if (show)
dprintk("dib8000_get_frontend: Layer %d segments = %d ",
- i, fe->dtv_property_cache.layer[i].segment_count);
+ i, c->layer[i].segment_count);
val = dib8000_read_word(state, 499 + i) & 0x3;
/* Interleaving can be 0, 1, 2 or 4 */
if (val == 3)
val = 4;
- fe->dtv_property_cache.layer[i].interleaving = val;
+ c->layer[i].interleaving = val;
if (show)
dprintk("dib8000_get_frontend: Layer %d time_intlv = %d ",
- i, fe->dtv_property_cache.layer[i].interleaving);
+ i, c->layer[i].interleaving);
val = dib8000_read_word(state, 481 + i);
switch (val & 0x7) {
case 1:
- fe->dtv_property_cache.layer[i].fec = FEC_1_2;
+ c->layer[i].fec = FEC_1_2;
if (show)
dprintk("dib8000_get_frontend: Layer %d Code Rate = 1/2 ", i);
break;
case 2:
- fe->dtv_property_cache.layer[i].fec = FEC_2_3;
+ c->layer[i].fec = FEC_2_3;
if (show)
dprintk("dib8000_get_frontend: Layer %d Code Rate = 2/3 ", i);
break;
case 3:
- fe->dtv_property_cache.layer[i].fec = FEC_3_4;
+ c->layer[i].fec = FEC_3_4;
if (show)
dprintk("dib8000_get_frontend: Layer %d Code Rate = 3/4 ", i);
break;
case 5:
- fe->dtv_property_cache.layer[i].fec = FEC_5_6;
+ c->layer[i].fec = FEC_5_6;
if (show)
dprintk("dib8000_get_frontend: Layer %d Code Rate = 5/6 ", i);
break;
default:
- fe->dtv_property_cache.layer[i].fec = FEC_7_8;
+ c->layer[i].fec = FEC_7_8;
if (show)
dprintk("dib8000_get_frontend: Layer %d Code Rate = 7/8 ", i);
break;
@@ -3528,23 +3529,23 @@ static int dib8000_get_frontend(struct dvb_frontend *fe)
val = dib8000_read_word(state, 487 + i);
switch (val & 0x3) {
case 0:
- fe->dtv_property_cache.layer[i].modulation = DQPSK;
+ c->layer[i].modulation = DQPSK;
if (show)
dprintk("dib8000_get_frontend: Layer %d DQPSK ", i);
break;
case 1:
- fe->dtv_property_cache.layer[i].modulation = QPSK;
+ c->layer[i].modulation = QPSK;
if (show)
dprintk("dib8000_get_frontend: Layer %d QPSK ", i);
break;
case 2:
- fe->dtv_property_cache.layer[i].modulation = QAM_16;
+ c->layer[i].modulation = QAM_16;
if (show)
dprintk("dib8000_get_frontend: Layer %d QAM16 ", i);
break;
case 3:
default:
- fe->dtv_property_cache.layer[i].modulation = QAM_64;
+ c->layer[i].modulation = QAM_64;
if (show)
dprintk("dib8000_get_frontend: Layer %d QAM64 ", i);
break;
@@ -3553,16 +3554,16 @@ static int dib8000_get_frontend(struct dvb_frontend *fe)
/* synchronize the cache with the other frontends */
for (index_frontend = 1; (index_frontend < MAX_NUMBER_OF_FRONTENDS) && (state->fe[index_frontend] != NULL); index_frontend++) {
- state->fe[index_frontend]->dtv_property_cache.isdbt_sb_mode = fe->dtv_property_cache.isdbt_sb_mode;
- state->fe[index_frontend]->dtv_property_cache.inversion = fe->dtv_property_cache.inversion;
- state->fe[index_frontend]->dtv_property_cache.transmission_mode = fe->dtv_property_cache.transmission_mode;
- state->fe[index_frontend]->dtv_property_cache.guard_interval = fe->dtv_property_cache.guard_interval;
- state->fe[index_frontend]->dtv_property_cache.isdbt_partial_reception = fe->dtv_property_cache.isdbt_partial_reception;
+ state->fe[index_frontend]->dtv_property_cache.isdbt_sb_mode = c->isdbt_sb_mode;
+ state->fe[index_frontend]->dtv_property_cache.inversion = c->inversion;
+ state->fe[index_frontend]->dtv_property_cache.transmission_mode = c->transmission_mode;
+ state->fe[index_frontend]->dtv_property_cache.guard_interval = c->guard_interval;
+ state->fe[index_frontend]->dtv_property_cache.isdbt_partial_reception = c->isdbt_partial_reception;
for (i = 0; i < 3; i++) {
- state->fe[index_frontend]->dtv_property_cache.layer[i].segment_count = fe->dtv_property_cache.layer[i].segment_count;
- state->fe[index_frontend]->dtv_property_cache.layer[i].interleaving = fe->dtv_property_cache.layer[i].interleaving;
- state->fe[index_frontend]->dtv_property_cache.layer[i].fec = fe->dtv_property_cache.layer[i].fec;
- state->fe[index_frontend]->dtv_property_cache.layer[i].modulation = fe->dtv_property_cache.layer[i].modulation;
+ state->fe[index_frontend]->dtv_property_cache.layer[i].segment_count = c->layer[i].segment_count;
+ state->fe[index_frontend]->dtv_property_cache.layer[i].interleaving = c->layer[i].interleaving;
+ state->fe[index_frontend]->dtv_property_cache.layer[i].fec = c->layer[i].fec;
+ state->fe[index_frontend]->dtv_property_cache.layer[i].modulation = c->layer[i].modulation;
}
}
return 0;
@@ -3671,7 +3672,7 @@ static int dib8000_set_frontend(struct dvb_frontend *fe)
if (state->channel_parameters_set == 0) { /* searching */
if ((dib8000_get_status(state->fe[index_frontend]) == FE_STATUS_DEMOD_SUCCESS) || (dib8000_get_status(state->fe[index_frontend]) == FE_STATUS_FFT_SUCCESS)) {
dprintk("autosearch succeeded on fe%i", index_frontend);
- dib8000_get_frontend(state->fe[index_frontend]); /* we read the channel parameters from the frontend which was successful */
+ dib8000_get_frontend(state->fe[index_frontend], c); /* we read the channel parameters from the frontend which was successful */
state->channel_parameters_set = 1;
for (l = 0; (l < MAX_NUMBER_OF_FRONTENDS) && (state->fe[l] != NULL); l++) {
@@ -4516,6 +4517,6 @@ void *dib8000_attach(struct dib8000_ops *ops)
}
EXPORT_SYMBOL(dib8000_attach);
-MODULE_AUTHOR("Olivier Grenie <Olivier.Grenie@dibcom.fr, " "Patrick Boettcher <pboettcher@dibcom.fr>");
+MODULE_AUTHOR("Olivier Grenie <Olivier.Grenie@parrot.com, Patrick Boettcher <patrick.boettcher@posteo.de>");
MODULE_DESCRIPTION("Driver for the DiBcom 8000 ISDB-T demodulator");
MODULE_LICENSE("GPL");
diff --git a/drivers/media/dvb-frontends/dib9000.c b/drivers/media/dvb-frontends/dib9000.c
index 8f92aca0b073..5897977d2d00 100644
--- a/drivers/media/dvb-frontends/dib9000.c
+++ b/drivers/media/dvb-frontends/dib9000.c
@@ -225,7 +225,7 @@ static u16 to_fw_output_mode(u16 mode)
}
}
-static u16 dib9000_read16_attr(struct dib9000_state *state, u16 reg, u8 * b, u32 len, u16 attribute)
+static int dib9000_read16_attr(struct dib9000_state *state, u16 reg, u8 *b, u32 len, u16 attribute)
{
u32 chunk_size = 126;
u32 l;
@@ -309,7 +309,7 @@ static inline u16 dib9000_read_word_attr(struct dib9000_state *state, u16 reg, u
#define dib9000_read16_noinc_attr(state, reg, b, len, attribute) dib9000_read16_attr(state, reg, b, len, (attribute) | DATA_BUS_ACCESS_MODE_NO_ADDRESS_INCREMENT)
-static u16 dib9000_write16_attr(struct dib9000_state *state, u16 reg, const u8 * buf, u32 len, u16 attribute)
+static int dib9000_write16_attr(struct dib9000_state *state, u16 reg, const u8 *buf, u32 len, u16 attribute)
{
u32 chunk_size = 126;
u32 l;
@@ -1889,7 +1889,8 @@ static int dib9000_fe_get_tune_settings(struct dvb_frontend *fe, struct dvb_fron
return 0;
}
-static int dib9000_get_frontend(struct dvb_frontend *fe)
+static int dib9000_get_frontend(struct dvb_frontend *fe,
+ struct dtv_frontend_properties *c)
{
struct dib9000_state *state = fe->demodulator_priv;
u8 index_frontend, sub_index_frontend;
@@ -1909,7 +1910,7 @@ static int dib9000_get_frontend(struct dvb_frontend *fe)
dprintk("TPS lock on the slave%i", index_frontend);
/* synchronize the cache with the other frontends */
- state->fe[index_frontend]->ops.get_frontend(state->fe[index_frontend]);
+ state->fe[index_frontend]->ops.get_frontend(state->fe[index_frontend], c);
for (sub_index_frontend = 0; (sub_index_frontend < MAX_NUMBER_OF_FRONTENDS) && (state->fe[sub_index_frontend] != NULL);
sub_index_frontend++) {
if (sub_index_frontend != index_frontend) {
@@ -1943,14 +1944,14 @@ static int dib9000_get_frontend(struct dvb_frontend *fe)
/* synchronize the cache with the other frontends */
for (index_frontend = 1; (index_frontend < MAX_NUMBER_OF_FRONTENDS) && (state->fe[index_frontend] != NULL); index_frontend++) {
- state->fe[index_frontend]->dtv_property_cache.inversion = fe->dtv_property_cache.inversion;
- state->fe[index_frontend]->dtv_property_cache.transmission_mode = fe->dtv_property_cache.transmission_mode;
- state->fe[index_frontend]->dtv_property_cache.guard_interval = fe->dtv_property_cache.guard_interval;
- state->fe[index_frontend]->dtv_property_cache.modulation = fe->dtv_property_cache.modulation;
- state->fe[index_frontend]->dtv_property_cache.hierarchy = fe->dtv_property_cache.hierarchy;
- state->fe[index_frontend]->dtv_property_cache.code_rate_HP = fe->dtv_property_cache.code_rate_HP;
- state->fe[index_frontend]->dtv_property_cache.code_rate_LP = fe->dtv_property_cache.code_rate_LP;
- state->fe[index_frontend]->dtv_property_cache.rolloff = fe->dtv_property_cache.rolloff;
+ state->fe[index_frontend]->dtv_property_cache.inversion = c->inversion;
+ state->fe[index_frontend]->dtv_property_cache.transmission_mode = c->transmission_mode;
+ state->fe[index_frontend]->dtv_property_cache.guard_interval = c->guard_interval;
+ state->fe[index_frontend]->dtv_property_cache.modulation = c->modulation;
+ state->fe[index_frontend]->dtv_property_cache.hierarchy = c->hierarchy;
+ state->fe[index_frontend]->dtv_property_cache.code_rate_HP = c->code_rate_HP;
+ state->fe[index_frontend]->dtv_property_cache.code_rate_LP = c->code_rate_LP;
+ state->fe[index_frontend]->dtv_property_cache.rolloff = c->rolloff;
}
ret = 0;
@@ -2083,7 +2084,7 @@ static int dib9000_set_frontend(struct dvb_frontend *fe)
/* synchronize all the channel cache */
state->get_frontend_internal = 1;
- dib9000_get_frontend(state->fe[0]);
+ dib9000_get_frontend(state->fe[0], &state->fe[0]->dtv_property_cache);
state->get_frontend_internal = 0;
/* retune the other frontends with the found channel */
@@ -2589,7 +2590,7 @@ static struct dvb_frontend_ops dib9000_ops = {
.read_ucblocks = dib9000_read_unc_blocks,
};
-MODULE_AUTHOR("Patrick Boettcher <pboettcher@dibcom.fr>");
-MODULE_AUTHOR("Olivier Grenie <ogrenie@dibcom.fr>");
+MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@posteo.de>");
+MODULE_AUTHOR("Olivier Grenie <olivier.grenie@parrot.com>");
MODULE_DESCRIPTION("Driver for the DiBcom 9000 COFDM demodulator");
MODULE_LICENSE("GPL");
diff --git a/drivers/media/dvb-frontends/dibx000_common.c b/drivers/media/dvb-frontends/dibx000_common.c
index 43be7238311e..723358d7ca84 100644
--- a/drivers/media/dvb-frontends/dibx000_common.c
+++ b/drivers/media/dvb-frontends/dibx000_common.c
@@ -510,6 +510,6 @@ u32 systime(void)
}
EXPORT_SYMBOL(systime);
-MODULE_AUTHOR("Patrick Boettcher <pboettcher@dibcom.fr>");
+MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@posteo.de>");
MODULE_DESCRIPTION("Common function the DiBcom demodulator family");
MODULE_LICENSE("GPL");
diff --git a/drivers/media/dvb-frontends/drx39xyj/drxj.c b/drivers/media/dvb-frontends/drx39xyj/drxj.c
index b28b5787b39a..e48b741d439e 100644
--- a/drivers/media/dvb-frontends/drx39xyj/drxj.c
+++ b/drivers/media/dvb-frontends/drx39xyj/drxj.c
@@ -4131,7 +4131,7 @@ int drxj_dap_scu_atomic_read_write_block(struct i2c_device_addr *dev_addr, u32 a
{
struct drxjscu_cmd scu_cmd;
int rc;
- u16 set_param_parameters[15];
+ u16 set_param_parameters[18];
u16 cmd_result[15];
/* Parameter check */
@@ -9597,12 +9597,13 @@ ctrl_get_qam_sig_quality(struct drx_demod_instance *demod)
Precision errors still possible.
*/
- e = post_bit_err_rs * 742686;
- m = fec_oc_period * 100;
- if (fec_oc_period == 0)
+ if (!fec_oc_period) {
qam_post_rs_ber = 0xFFFFFFFF;
- else
+ } else {
+ e = post_bit_err_rs * 742686;
+ m = fec_oc_period * 100;
qam_post_rs_ber = e / m;
+ }
/* fill signal quality data structure */
p->pre_bit_count.stat[0].scale = FE_SCALE_COUNTER;
diff --git a/drivers/media/dvb-frontends/dvb_dummy_fe.c b/drivers/media/dvb-frontends/dvb_dummy_fe.c
index 14e996d45fac..e5bd8c62ad3a 100644
--- a/drivers/media/dvb-frontends/dvb_dummy_fe.c
+++ b/drivers/media/dvb-frontends/dvb_dummy_fe.c
@@ -70,9 +70,12 @@ static int dvb_dummy_fe_read_ucblocks(struct dvb_frontend* fe, u32* ucblocks)
}
/*
- * Only needed if it actually reads something from the hardware
+ * Should only be implemented if it actually reads something from the hardware.
+ * Also, it should check for the locks, in order to avoid report wrong data
+ * to userspace.
*/
-static int dvb_dummy_fe_get_frontend(struct dvb_frontend *fe)
+static int dvb_dummy_fe_get_frontend(struct dvb_frontend *fe,
+ struct dtv_frontend_properties *p)
{
return 0;
}
diff --git a/drivers/media/dvb-frontends/hd29l2.c b/drivers/media/dvb-frontends/hd29l2.c
index 40e359f2d17d..1c7eb477e2cd 100644
--- a/drivers/media/dvb-frontends/hd29l2.c
+++ b/drivers/media/dvb-frontends/hd29l2.c
@@ -560,11 +560,11 @@ static int hd29l2_get_frontend_algo(struct dvb_frontend *fe)
return DVBFE_ALGO_CUSTOM;
}
-static int hd29l2_get_frontend(struct dvb_frontend *fe)
+static int hd29l2_get_frontend(struct dvb_frontend *fe,
+ struct dtv_frontend_properties *c)
{
int ret;
struct hd29l2_priv *priv = fe->demodulator_priv;
- struct dtv_frontend_properties *c = &fe->dtv_property_cache;
u8 buf[3];
u32 if_ctl;
char *str_constellation, *str_code_rate, *str_constellation_code_rate,
diff --git a/drivers/media/dvb-frontends/l64781.c b/drivers/media/dvb-frontends/l64781.c
index 0977871232a2..2f3d0519e19b 100644
--- a/drivers/media/dvb-frontends/l64781.c
+++ b/drivers/media/dvb-frontends/l64781.c
@@ -243,9 +243,9 @@ static int apply_frontend_param(struct dvb_frontend *fe)
return 0;
}
-static int get_frontend(struct dvb_frontend *fe)
+static int get_frontend(struct dvb_frontend *fe,
+ struct dtv_frontend_properties *p)
{
- struct dtv_frontend_properties *p = &fe->dtv_property_cache;
struct l64781_state* state = fe->demodulator_priv;
int tmp;
diff --git a/drivers/media/dvb-frontends/lg2160.c b/drivers/media/dvb-frontends/lg2160.c
index 7880f71ccd8a..f51a3a0b3949 100644
--- a/drivers/media/dvb-frontends/lg2160.c
+++ b/drivers/media/dvb-frontends/lg2160.c
@@ -942,101 +942,102 @@ static int lg216x_read_rs_err_count(struct lg216x_state *state, u16 *err)
/* ------------------------------------------------------------------------ */
-static int lg216x_get_frontend(struct dvb_frontend *fe)
+static int lg216x_get_frontend(struct dvb_frontend *fe,
+ struct dtv_frontend_properties *c)
{
struct lg216x_state *state = fe->demodulator_priv;
int ret;
lg_dbg("\n");
- fe->dtv_property_cache.modulation = VSB_8;
- fe->dtv_property_cache.frequency = state->current_frequency;
- fe->dtv_property_cache.delivery_system = SYS_ATSCMH;
+ c->modulation = VSB_8;
+ c->frequency = state->current_frequency;
+ c->delivery_system = SYS_ATSCMH;
ret = lg216x_get_fic_version(state,
- &fe->dtv_property_cache.atscmh_fic_ver);
+ &c->atscmh_fic_ver);
if (lg_fail(ret))
goto fail;
- if (state->fic_ver != fe->dtv_property_cache.atscmh_fic_ver) {
- state->fic_ver = fe->dtv_property_cache.atscmh_fic_ver;
+ if (state->fic_ver != c->atscmh_fic_ver) {
+ state->fic_ver = c->atscmh_fic_ver;
#if 0
ret = lg2160_get_parade_id(state,
- &fe->dtv_property_cache.atscmh_parade_id);
+ &c->atscmh_parade_id);
if (lg_fail(ret))
goto fail;
/* #else */
- fe->dtv_property_cache.atscmh_parade_id = state->parade_id;
+ c->atscmh_parade_id = state->parade_id;
#endif
ret = lg216x_get_nog(state,
- &fe->dtv_property_cache.atscmh_nog);
+ &c->atscmh_nog);
if (lg_fail(ret))
goto fail;
ret = lg216x_get_tnog(state,
- &fe->dtv_property_cache.atscmh_tnog);
+ &c->atscmh_tnog);
if (lg_fail(ret))
goto fail;
ret = lg216x_get_sgn(state,
- &fe->dtv_property_cache.atscmh_sgn);
+ &c->atscmh_sgn);
if (lg_fail(ret))
goto fail;
ret = lg216x_get_prc(state,
- &fe->dtv_property_cache.atscmh_prc);
+ &c->atscmh_prc);
if (lg_fail(ret))
goto fail;
ret = lg216x_get_rs_frame_mode(state,
(enum atscmh_rs_frame_mode *)
- &fe->dtv_property_cache.atscmh_rs_frame_mode);
+ &c->atscmh_rs_frame_mode);
if (lg_fail(ret))
goto fail;
ret = lg216x_get_rs_frame_ensemble(state,
(enum atscmh_rs_frame_ensemble *)
- &fe->dtv_property_cache.atscmh_rs_frame_ensemble);
+ &c->atscmh_rs_frame_ensemble);
if (lg_fail(ret))
goto fail;
ret = lg216x_get_rs_code_mode(state,
(enum atscmh_rs_code_mode *)
- &fe->dtv_property_cache.atscmh_rs_code_mode_pri,
+ &c->atscmh_rs_code_mode_pri,
(enum atscmh_rs_code_mode *)
- &fe->dtv_property_cache.atscmh_rs_code_mode_sec);
+ &c->atscmh_rs_code_mode_sec);
if (lg_fail(ret))
goto fail;
ret = lg216x_get_sccc_block_mode(state,
(enum atscmh_sccc_block_mode *)
- &fe->dtv_property_cache.atscmh_sccc_block_mode);
+ &c->atscmh_sccc_block_mode);
if (lg_fail(ret))
goto fail;
ret = lg216x_get_sccc_code_mode(state,
(enum atscmh_sccc_code_mode *)
- &fe->dtv_property_cache.atscmh_sccc_code_mode_a,
+ &c->atscmh_sccc_code_mode_a,
(enum atscmh_sccc_code_mode *)
- &fe->dtv_property_cache.atscmh_sccc_code_mode_b,
+ &c->atscmh_sccc_code_mode_b,
(enum atscmh_sccc_code_mode *)
- &fe->dtv_property_cache.atscmh_sccc_code_mode_c,
+ &c->atscmh_sccc_code_mode_c,
(enum atscmh_sccc_code_mode *)
- &fe->dtv_property_cache.atscmh_sccc_code_mode_d);
+ &c->atscmh_sccc_code_mode_d);
if (lg_fail(ret))
goto fail;
}
#if 0
ret = lg216x_read_fic_err_count(state,
- (u8 *)&fe->dtv_property_cache.atscmh_fic_err);
+ (u8 *)&c->atscmh_fic_err);
if (lg_fail(ret))
goto fail;
ret = lg216x_read_crc_err_count(state,
- &fe->dtv_property_cache.atscmh_crc_err);
+ &c->atscmh_crc_err);
if (lg_fail(ret))
goto fail;
ret = lg216x_read_rs_err_count(state,
- &fe->dtv_property_cache.atscmh_rs_err);
+ &c->atscmh_rs_err);
if (lg_fail(ret))
goto fail;
switch (state->cfg->lg_chip) {
case LG2160:
- if (((fe->dtv_property_cache.atscmh_rs_err >= 240) &&
- (fe->dtv_property_cache.atscmh_crc_err >= 240)) &&
+ if (((c->atscmh_rs_err >= 240) &&
+ (c->atscmh_crc_err >= 240)) &&
((jiffies_to_msecs(jiffies) - state->last_reset) > 6000))
ret = lg216x_soft_reset(state);
break;
@@ -1054,14 +1055,17 @@ fail:
static int lg216x_get_property(struct dvb_frontend *fe,
struct dtv_property *tvp)
{
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
+
return (DTV_ATSCMH_FIC_VER == tvp->cmd) ?
- lg216x_get_frontend(fe) : 0;
+ lg216x_get_frontend(fe, c) : 0;
}
static int lg2160_set_frontend(struct dvb_frontend *fe)
{
struct lg216x_state *state = fe->demodulator_priv;
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
int ret;
lg_dbg("(%d)\n", fe->dtv_property_cache.frequency);
@@ -1129,7 +1133,7 @@ static int lg2160_set_frontend(struct dvb_frontend *fe)
ret = lg216x_enable_fic(state, 1);
lg_fail(ret);
- lg216x_get_frontend(fe);
+ lg216x_get_frontend(fe, c);
fail:
return ret;
}
diff --git a/drivers/media/dvb-frontends/lgdt3305.c b/drivers/media/dvb-frontends/lgdt3305.c
index 47121866163d..4503e8852fd1 100644
--- a/drivers/media/dvb-frontends/lgdt3305.c
+++ b/drivers/media/dvb-frontends/lgdt3305.c
@@ -812,9 +812,9 @@ fail:
return ret;
}
-static int lgdt3305_get_frontend(struct dvb_frontend *fe)
+static int lgdt3305_get_frontend(struct dvb_frontend *fe,
+ struct dtv_frontend_properties *p)
{
- struct dtv_frontend_properties *p = &fe->dtv_property_cache;
struct lgdt3305_state *state = fe->demodulator_priv;
lg_dbg("\n");
diff --git a/drivers/media/dvb-frontends/lgdt3306a.c b/drivers/media/dvb-frontends/lgdt3306a.c
index 721fbc07e9ee..179c26e5eb4e 100644
--- a/drivers/media/dvb-frontends/lgdt3306a.c
+++ b/drivers/media/dvb-frontends/lgdt3306a.c
@@ -1040,10 +1040,10 @@ fail:
return ret;
}
-static int lgdt3306a_get_frontend(struct dvb_frontend *fe)
+static int lgdt3306a_get_frontend(struct dvb_frontend *fe,
+ struct dtv_frontend_properties *p)
{
struct lgdt3306a_state *state = fe->demodulator_priv;
- struct dtv_frontend_properties *p = &fe->dtv_property_cache;
dbg_info("(%u, %d)\n",
state->current_frequency, state->current_modulation);
diff --git a/drivers/media/dvb-frontends/lgdt330x.c b/drivers/media/dvb-frontends/lgdt330x.c
index cf3cc20510da..96bf254da21e 100644
--- a/drivers/media/dvb-frontends/lgdt330x.c
+++ b/drivers/media/dvb-frontends/lgdt330x.c
@@ -439,10 +439,11 @@ static int lgdt330x_set_parameters(struct dvb_frontend *fe)
return 0;
}
-static int lgdt330x_get_frontend(struct dvb_frontend *fe)
+static int lgdt330x_get_frontend(struct dvb_frontend *fe,
+ struct dtv_frontend_properties *p)
{
- struct dtv_frontend_properties *p = &fe->dtv_property_cache;
struct lgdt330x_state *state = fe->demodulator_priv;
+
p->frequency = state->current_frequency;
return 0;
}
diff --git a/drivers/media/dvb-frontends/lgs8gl5.c b/drivers/media/dvb-frontends/lgs8gl5.c
index 7bbb2c18c2dd..fbfd87b5b803 100644
--- a/drivers/media/dvb-frontends/lgs8gl5.c
+++ b/drivers/media/dvb-frontends/lgs8gl5.c
@@ -336,10 +336,11 @@ lgs8gl5_set_frontend(struct dvb_frontend *fe)
static int
-lgs8gl5_get_frontend(struct dvb_frontend *fe)
+lgs8gl5_get_frontend(struct dvb_frontend *fe,
+ struct dtv_frontend_properties *p)
{
- struct dtv_frontend_properties *p = &fe->dtv_property_cache;
struct lgs8gl5_state *state = fe->demodulator_priv;
+
u8 inv = lgs8gl5_read_reg(state, REG_INVERSION);
p->inversion = (inv & REG_INVERSION_ON) ? INVERSION_ON : INVERSION_OFF;
diff --git a/drivers/media/dvb-frontends/lgs8gxx.c b/drivers/media/dvb-frontends/lgs8gxx.c
index e2c191c8b196..919daeb96747 100644
--- a/drivers/media/dvb-frontends/lgs8gxx.c
+++ b/drivers/media/dvb-frontends/lgs8gxx.c
@@ -672,7 +672,7 @@ static int lgs8gxx_write(struct dvb_frontend *fe, const u8 buf[], int len)
static int lgs8gxx_set_fe(struct dvb_frontend *fe)
{
-
+ struct dtv_frontend_properties *fe_params = &fe->dtv_property_cache;
struct lgs8gxx_state *priv = fe->demodulator_priv;
dprintk("%s\n", __func__);
@@ -689,17 +689,7 @@ static int lgs8gxx_set_fe(struct dvb_frontend *fe)
msleep(10);
- return 0;
-}
-
-static int lgs8gxx_get_fe(struct dvb_frontend *fe)
-{
- struct dtv_frontend_properties *fe_params = &fe->dtv_property_cache;
- dprintk("%s\n", __func__);
-
/* TODO: get real readings from device */
- /* inversion status */
- fe_params->inversion = INVERSION_OFF;
/* bandwidth */
fe_params->bandwidth_hz = 8000000;
@@ -1016,7 +1006,6 @@ static struct dvb_frontend_ops lgs8gxx_ops = {
.i2c_gate_ctrl = lgs8gxx_i2c_gate_ctrl,
.set_frontend = lgs8gxx_set_fe,
- .get_frontend = lgs8gxx_get_fe,
.get_tune_settings = lgs8gxx_get_tune_settings,
.read_status = lgs8gxx_read_status,
diff --git a/drivers/media/dvb-frontends/m88ds3103.c b/drivers/media/dvb-frontends/m88ds3103.c
index ce73a5ec6036..76883600ec6f 100644
--- a/drivers/media/dvb-frontends/m88ds3103.c
+++ b/drivers/media/dvb-frontends/m88ds3103.c
@@ -791,11 +791,11 @@ err:
return ret;
}
-static int m88ds3103_get_frontend(struct dvb_frontend *fe)
+static int m88ds3103_get_frontend(struct dvb_frontend *fe,
+ struct dtv_frontend_properties *c)
{
struct m88ds3103_dev *dev = fe->demodulator_priv;
struct i2c_client *client = dev->client;
- struct dtv_frontend_properties *c = &fe->dtv_property_cache;
int ret;
u8 buf[3];
diff --git a/drivers/media/dvb-frontends/m88rs2000.c b/drivers/media/dvb-frontends/m88rs2000.c
index 9b6f464c48bd..a09b12313a73 100644
--- a/drivers/media/dvb-frontends/m88rs2000.c
+++ b/drivers/media/dvb-frontends/m88rs2000.c
@@ -708,10 +708,11 @@ static int m88rs2000_set_frontend(struct dvb_frontend *fe)
return 0;
}
-static int m88rs2000_get_frontend(struct dvb_frontend *fe)
+static int m88rs2000_get_frontend(struct dvb_frontend *fe,
+ struct dtv_frontend_properties *c)
{
- struct dtv_frontend_properties *c = &fe->dtv_property_cache;
struct m88rs2000_state *state = fe->demodulator_priv;
+
c->fec_inner = state->fec_inner;
c->frequency = state->tuner_frequency;
c->symbol_rate = state->symbol_rate;
diff --git a/drivers/media/dvb-frontends/mb86a20s.c b/drivers/media/dvb-frontends/mb86a20s.c
index cfc005ee11d8..fb88dddaf3a3 100644
--- a/drivers/media/dvb-frontends/mb86a20s.c
+++ b/drivers/media/dvb-frontends/mb86a20s.c
@@ -2028,16 +2028,6 @@ static int mb86a20s_read_signal_strength_from_cache(struct dvb_frontend *fe,
return 0;
}
-static int mb86a20s_get_frontend_dummy(struct dvb_frontend *fe)
-{
- /*
- * get_frontend is now handled together with other stats
- * retrival, when read_status() is called, as some statistics
- * will depend on the layers detection.
- */
- return 0;
-};
-
static int mb86a20s_tune(struct dvb_frontend *fe,
bool re_tune,
unsigned int mode_flags,
@@ -2136,7 +2126,6 @@ static struct dvb_frontend_ops mb86a20s_ops = {
.init = mb86a20s_initfe,
.set_frontend = mb86a20s_set_frontend,
- .get_frontend = mb86a20s_get_frontend_dummy,
.read_status = mb86a20s_read_status_and_stats,
.read_signal_strength = mb86a20s_read_signal_strength_from_cache,
.tune = mb86a20s_tune,
diff --git a/drivers/staging/media/mn88473/mn88473.c b/drivers/media/dvb-frontends/mn88473.c
index a222e99935d2..6c5d592161d4 100644
--- a/drivers/staging/media/mn88473/mn88473.c
+++ b/drivers/media/dvb-frontends/mn88473.c
@@ -29,21 +29,17 @@ static int mn88473_set_frontend(struct dvb_frontend *fe)
struct mn88473_dev *dev = i2c_get_clientdata(client);
struct dtv_frontend_properties *c = &fe->dtv_property_cache;
int ret, i;
+ unsigned int uitmp;
u32 if_frequency;
- u64 tmp;
- u8 delivery_system_val, if_val[3], bw_val[7];
+ u8 delivery_system_val, if_val[3], *conf_val_ptr;
+ u8 reg_bank2_2d_val, reg_bank0_d2_val;
dev_dbg(&client->dev,
"delivery_system=%u modulation=%u frequency=%u bandwidth_hz=%u symbol_rate=%u inversion=%d stream_id=%d\n",
- c->delivery_system,
- c->modulation,
- c->frequency,
- c->bandwidth_hz,
- c->symbol_rate,
- c->inversion,
- c->stream_id);
-
- if (!dev->warm) {
+ c->delivery_system, c->modulation, c->frequency,
+ c->bandwidth_hz, c->symbol_rate, c->inversion, c->stream_id);
+
+ if (!dev->active) {
ret = -EAGAIN;
goto err;
}
@@ -51,30 +47,50 @@ static int mn88473_set_frontend(struct dvb_frontend *fe)
switch (c->delivery_system) {
case SYS_DVBT:
delivery_system_val = 0x02;
+ reg_bank2_2d_val = 0x23;
+ reg_bank0_d2_val = 0x2a;
break;
case SYS_DVBT2:
delivery_system_val = 0x03;
+ reg_bank2_2d_val = 0x3b;
+ reg_bank0_d2_val = 0x29;
break;
case SYS_DVBC_ANNEX_A:
delivery_system_val = 0x04;
+ reg_bank2_2d_val = 0x3b;
+ reg_bank0_d2_val = 0x29;
break;
default:
ret = -EINVAL;
goto err;
}
- if (c->bandwidth_hz <= 6000000) {
- memcpy(bw_val, "\xe9\x55\x55\x1c\x29\x1c\x29", 7);
- } else if (c->bandwidth_hz <= 7000000) {
- memcpy(bw_val, "\xc8\x00\x00\x17\x0a\x17\x0a", 7);
- } else if (c->bandwidth_hz <= 8000000) {
- memcpy(bw_val, "\xaf\x00\x00\x11\xec\x11\xec", 7);
- } else {
- ret = -EINVAL;
- goto err;
+ switch (c->delivery_system) {
+ case SYS_DVBT:
+ case SYS_DVBT2:
+ switch (c->bandwidth_hz) {
+ case 6000000:
+ conf_val_ptr = "\xe9\x55\x55\x1c\x29\x1c\x29";
+ break;
+ case 7000000:
+ conf_val_ptr = "\xc8\x00\x00\x17\x0a\x17\x0a";
+ break;
+ case 8000000:
+ conf_val_ptr = "\xaf\x00\x00\x11\xec\x11\xec";
+ break;
+ default:
+ ret = -EINVAL;
+ goto err;
+ }
+ break;
+ case SYS_DVBC_ANNEX_A:
+ conf_val_ptr = "\x10\xab\x0d\xae\x1d\x9d";
+ break;
+ default:
+ break;
}
- /* program tuner */
+ /* Program tuner */
if (fe->ops.tuner_ops.set_params) {
ret = fe->ops.tuner_ops.set_params(fe);
if (ret)
@@ -86,27 +102,45 @@ static int mn88473_set_frontend(struct dvb_frontend *fe)
if (ret)
goto err;
- dev_dbg(&client->dev, "get_if_frequency=%d\n", if_frequency);
+ dev_dbg(&client->dev, "get_if_frequency=%u\n", if_frequency);
} else {
- if_frequency = 0;
+ ret = -EINVAL;
+ goto err;
}
- /* Calculate IF registers ( (1<<24)*IF / Xtal ) */
- tmp = div_u64(if_frequency * (u64)(1<<24) + (dev->xtal / 2),
- dev->xtal);
- if_val[0] = ((tmp >> 16) & 0xff);
- if_val[1] = ((tmp >> 8) & 0xff);
- if_val[2] = ((tmp >> 0) & 0xff);
+ /* Calculate IF registers */
+ uitmp = DIV_ROUND_CLOSEST_ULL((u64) if_frequency * 0x1000000, dev->clk);
+ if_val[0] = (uitmp >> 16) & 0xff;
+ if_val[1] = (uitmp >> 8) & 0xff;
+ if_val[2] = (uitmp >> 0) & 0xff;
ret = regmap_write(dev->regmap[2], 0x05, 0x00);
+ if (ret)
+ goto err;
ret = regmap_write(dev->regmap[2], 0xfb, 0x13);
+ if (ret)
+ goto err;
ret = regmap_write(dev->regmap[2], 0xef, 0x13);
+ if (ret)
+ goto err;
ret = regmap_write(dev->regmap[2], 0xf9, 0x13);
+ if (ret)
+ goto err;
ret = regmap_write(dev->regmap[2], 0x00, 0x18);
+ if (ret)
+ goto err;
ret = regmap_write(dev->regmap[2], 0x01, 0x01);
+ if (ret)
+ goto err;
ret = regmap_write(dev->regmap[2], 0x02, 0x21);
+ if (ret)
+ goto err;
ret = regmap_write(dev->regmap[2], 0x03, delivery_system_val);
+ if (ret)
+ goto err;
ret = regmap_write(dev->regmap[2], 0x0b, 0x00);
+ if (ret)
+ goto err;
for (i = 0; i < sizeof(if_val); i++) {
ret = regmap_write(dev->regmap[2], 0x10 + i, if_val[i]);
@@ -114,52 +148,85 @@ static int mn88473_set_frontend(struct dvb_frontend *fe)
goto err;
}
- for (i = 0; i < sizeof(bw_val); i++) {
- ret = regmap_write(dev->regmap[2], 0x13 + i, bw_val[i]);
+ switch (c->delivery_system) {
+ case SYS_DVBT:
+ case SYS_DVBT2:
+ for (i = 0; i < 7; i++) {
+ ret = regmap_write(dev->regmap[2], 0x13 + i,
+ conf_val_ptr[i]);
+ if (ret)
+ goto err;
+ }
+ break;
+ case SYS_DVBC_ANNEX_A:
+ ret = regmap_bulk_write(dev->regmap[1], 0x10, conf_val_ptr, 6);
if (ret)
goto err;
+ break;
+ default:
+ break;
}
- ret = regmap_write(dev->regmap[2], 0x2d, 0x3b);
+ ret = regmap_write(dev->regmap[2], 0x2d, reg_bank2_2d_val);
+ if (ret)
+ goto err;
ret = regmap_write(dev->regmap[2], 0x2e, 0x00);
+ if (ret)
+ goto err;
ret = regmap_write(dev->regmap[2], 0x56, 0x0d);
- ret = regmap_write(dev->regmap[0], 0x01, 0xba);
- ret = regmap_write(dev->regmap[0], 0x02, 0x13);
- ret = regmap_write(dev->regmap[0], 0x03, 0x80);
- ret = regmap_write(dev->regmap[0], 0x04, 0xba);
- ret = regmap_write(dev->regmap[0], 0x05, 0x91);
- ret = regmap_write(dev->regmap[0], 0x07, 0xe7);
- ret = regmap_write(dev->regmap[0], 0x08, 0x28);
+ if (ret)
+ goto err;
+ ret = regmap_bulk_write(dev->regmap[0], 0x01,
+ "\xba\x13\x80\xba\x91\xdd\xe7\x28", 8);
+ if (ret)
+ goto err;
ret = regmap_write(dev->regmap[0], 0x0a, 0x1a);
+ if (ret)
+ goto err;
ret = regmap_write(dev->regmap[0], 0x13, 0x1f);
+ if (ret)
+ goto err;
ret = regmap_write(dev->regmap[0], 0x19, 0x03);
+ if (ret)
+ goto err;
ret = regmap_write(dev->regmap[0], 0x1d, 0xb0);
+ if (ret)
+ goto err;
ret = regmap_write(dev->regmap[0], 0x2a, 0x72);
+ if (ret)
+ goto err;
ret = regmap_write(dev->regmap[0], 0x2d, 0x00);
+ if (ret)
+ goto err;
ret = regmap_write(dev->regmap[0], 0x3c, 0x00);
+ if (ret)
+ goto err;
ret = regmap_write(dev->regmap[0], 0x3f, 0xf8);
- ret = regmap_write(dev->regmap[0], 0x40, 0xf4);
- ret = regmap_write(dev->regmap[0], 0x41, 0x08);
- ret = regmap_write(dev->regmap[0], 0xd2, 0x29);
+ if (ret)
+ goto err;
+ ret = regmap_bulk_write(dev->regmap[0], 0x40, "\xf4\x08", 2);
+ if (ret)
+ goto err;
+ ret = regmap_write(dev->regmap[0], 0xd2, reg_bank0_d2_val);
+ if (ret)
+ goto err;
ret = regmap_write(dev->regmap[0], 0xd4, 0x55);
- ret = regmap_write(dev->regmap[1], 0x10, 0x10);
- ret = regmap_write(dev->regmap[1], 0x11, 0xab);
- ret = regmap_write(dev->regmap[1], 0x12, 0x0d);
- ret = regmap_write(dev->regmap[1], 0x13, 0xae);
- ret = regmap_write(dev->regmap[1], 0x14, 0x1d);
- ret = regmap_write(dev->regmap[1], 0x15, 0x9d);
+ if (ret)
+ goto err;
ret = regmap_write(dev->regmap[1], 0xbe, 0x08);
- ret = regmap_write(dev->regmap[2], 0x09, 0x08);
- ret = regmap_write(dev->regmap[2], 0x08, 0x1d);
+ if (ret)
+ goto err;
ret = regmap_write(dev->regmap[0], 0xb2, 0x37);
+ if (ret)
+ goto err;
ret = regmap_write(dev->regmap[0], 0xd7, 0x04);
- ret = regmap_write(dev->regmap[2], 0x32, 0x80);
- ret = regmap_write(dev->regmap[2], 0x36, 0x00);
- ret = regmap_write(dev->regmap[2], 0xf8, 0x9f);
if (ret)
goto err;
- dev->delivery_system = c->delivery_system;
+ /* Reset FSM */
+ ret = regmap_write(dev->regmap[2], 0xf8, 0x9f);
+ if (ret)
+ goto err;
return 0;
err:
@@ -173,51 +240,61 @@ static int mn88473_read_status(struct dvb_frontend *fe, enum fe_status *status)
struct mn88473_dev *dev = i2c_get_clientdata(client);
struct dtv_frontend_properties *c = &fe->dtv_property_cache;
int ret;
- unsigned int utmp;
- int lock = 0;
-
- *status = 0;
+ unsigned int uitmp;
- if (!dev->warm) {
+ if (!dev->active) {
ret = -EAGAIN;
goto err;
}
+ *status = 0;
+
switch (c->delivery_system) {
case SYS_DVBT:
- ret = regmap_read(dev->regmap[0], 0x62, &utmp);
+ ret = regmap_read(dev->regmap[0], 0x62, &uitmp);
if (ret)
goto err;
- if (!(utmp & 0xA0)) {
- if ((utmp & 0xF) >= 0x03)
- *status |= FE_HAS_SIGNAL;
- if ((utmp & 0xF) >= 0x09)
- lock = 1;
+
+ if (!(uitmp & 0xa0)) {
+ if ((uitmp & 0x0f) >= 0x09)
+ *status = FE_HAS_SIGNAL | FE_HAS_CARRIER |
+ FE_HAS_VITERBI | FE_HAS_SYNC |
+ FE_HAS_LOCK;
+ else if ((uitmp & 0x0f) >= 0x03)
+ *status = FE_HAS_SIGNAL | FE_HAS_CARRIER;
}
break;
case SYS_DVBT2:
- ret = regmap_read(dev->regmap[2], 0x8B, &utmp);
+ ret = regmap_read(dev->regmap[2], 0x8b, &uitmp);
if (ret)
goto err;
- if (!(utmp & 0x40)) {
- if ((utmp & 0xF) >= 0x07)
- *status |= FE_HAS_SIGNAL;
- if ((utmp & 0xF) >= 0x0a)
- *status |= FE_HAS_CARRIER;
- if ((utmp & 0xF) >= 0x0d)
- *status |= FE_HAS_VITERBI | FE_HAS_SYNC | FE_HAS_LOCK;
+
+ if (!(uitmp & 0x40)) {
+ if ((uitmp & 0x0f) >= 0x0d)
+ *status = FE_HAS_SIGNAL | FE_HAS_CARRIER |
+ FE_HAS_VITERBI | FE_HAS_SYNC |
+ FE_HAS_LOCK;
+ else if ((uitmp & 0x0f) >= 0x0a)
+ *status = FE_HAS_SIGNAL | FE_HAS_CARRIER |
+ FE_HAS_VITERBI;
+ else if ((uitmp & 0x0f) >= 0x07)
+ *status = FE_HAS_SIGNAL | FE_HAS_CARRIER;
}
break;
case SYS_DVBC_ANNEX_A:
- ret = regmap_read(dev->regmap[1], 0x85, &utmp);
+ ret = regmap_read(dev->regmap[1], 0x85, &uitmp);
if (ret)
goto err;
- if (!(utmp & 0x40)) {
- ret = regmap_read(dev->regmap[1], 0x89, &utmp);
+
+ if (!(uitmp & 0x40)) {
+ ret = regmap_read(dev->regmap[1], 0x89, &uitmp);
if (ret)
goto err;
- if (utmp & 0x01)
- lock = 1;
+
+ if (uitmp & 0x01)
+ *status = FE_HAS_SIGNAL | FE_HAS_CARRIER |
+ FE_HAS_VITERBI | FE_HAS_SYNC |
+ FE_HAS_LOCK;
}
break;
default:
@@ -225,10 +302,6 @@ static int mn88473_read_status(struct dvb_frontend *fe, enum fe_status *status)
goto err;
}
- if (lock)
- *status = FE_HAS_SIGNAL | FE_HAS_CARRIER | FE_HAS_VITERBI |
- FE_HAS_SYNC | FE_HAS_LOCK;
-
return 0;
err:
dev_dbg(&client->dev, "failed=%d\n", ret);
@@ -239,85 +312,76 @@ static int mn88473_init(struct dvb_frontend *fe)
{
struct i2c_client *client = fe->demodulator_priv;
struct mn88473_dev *dev = i2c_get_clientdata(client);
- int ret, len, remaining;
- const struct firmware *fw = NULL;
- u8 *fw_file = MN88473_FIRMWARE;
- unsigned int tmp;
+ int ret, len, remain;
+ unsigned int uitmp;
+ const struct firmware *fw;
+ const char *name = MN88473_FIRMWARE;
dev_dbg(&client->dev, "\n");
- /* set cold state by default */
- dev->warm = false;
-
- /* check if firmware is already running */
- ret = regmap_read(dev->regmap[0], 0xf5, &tmp);
+ /* Check if firmware is already running */
+ ret = regmap_read(dev->regmap[0], 0xf5, &uitmp);
if (ret)
goto err;
- if (!(tmp & 0x1)) {
- dev_info(&client->dev, "firmware already running\n");
- dev->warm = true;
- return 0;
- }
+ if (!(uitmp & 0x01))
+ goto warm;
- /* request the firmware, this will block and timeout */
- ret = request_firmware(&fw, fw_file, &client->dev);
+ /* Request the firmware, this will block and timeout */
+ ret = request_firmware(&fw, name, &client->dev);
if (ret) {
- dev_err(&client->dev, "firmare file '%s' not found\n", fw_file);
- goto err_request_firmware;
+ dev_err(&client->dev, "firmare file '%s' not found\n", name);
+ goto err;
}
- dev_info(&client->dev, "downloading firmware from file '%s'\n",
- fw_file);
+ dev_info(&client->dev, "downloading firmware from file '%s'\n", name);
ret = regmap_write(dev->regmap[0], 0xf5, 0x03);
if (ret)
- goto err;
-
- for (remaining = fw->size; remaining > 0;
- remaining -= (dev->i2c_wr_max - 1)) {
- len = remaining;
- if (len > (dev->i2c_wr_max - 1))
- len = dev->i2c_wr_max - 1;
+ goto err_release_firmware;
+ for (remain = fw->size; remain > 0; remain -= (dev->i2c_wr_max - 1)) {
+ len = min(dev->i2c_wr_max - 1, remain);
ret = regmap_bulk_write(dev->regmap[0], 0xf6,
- &fw->data[fw->size - remaining], len);
+ &fw->data[fw->size - remain], len);
if (ret) {
- dev_err(&client->dev, "firmware download failed=%d\n",
+ dev_err(&client->dev, "firmware download failed %d\n",
ret);
- goto err;
+ goto err_release_firmware;
}
}
- /* parity check of firmware */
- ret = regmap_read(dev->regmap[0], 0xf8, &tmp);
- if (ret) {
- dev_err(&client->dev,
- "parity reg read failed=%d\n", ret);
+ release_firmware(fw);
+
+ /* Parity check of firmware */
+ ret = regmap_read(dev->regmap[0], 0xf8, &uitmp);
+ if (ret)
goto err;
- }
- if (tmp & 0x10) {
- dev_err(&client->dev,
- "firmware parity check failed=0x%x\n", tmp);
+
+ if (uitmp & 0x10) {
+ dev_err(&client->dev, "firmware parity check failed\n");
+ ret = -EINVAL;
goto err;
}
- dev_err(&client->dev, "firmware parity check succeeded=0x%x\n", tmp);
ret = regmap_write(dev->regmap[0], 0xf5, 0x00);
if (ret)
goto err;
+warm:
+ /* TS config */
+ ret = regmap_write(dev->regmap[2], 0x09, 0x08);
+ if (ret)
+ goto err;
+ ret = regmap_write(dev->regmap[2], 0x08, 0x1d);
+ if (ret)
+ goto err;
- release_firmware(fw);
- fw = NULL;
-
- /* warm state */
- dev->warm = true;
+ dev->active = true;
return 0;
-
-err:
+err_release_firmware:
release_firmware(fw);
-err_request_firmware:
+err:
dev_dbg(&client->dev, "failed=%d\n", ret);
return ret;
}
@@ -330,20 +394,20 @@ static int mn88473_sleep(struct dvb_frontend *fe)
dev_dbg(&client->dev, "\n");
+ dev->active = false;
+
ret = regmap_write(dev->regmap[2], 0x05, 0x3e);
if (ret)
goto err;
- dev->delivery_system = SYS_UNDEFINED;
-
return 0;
err:
dev_dbg(&client->dev, "failed=%d\n", ret);
return ret;
}
-static struct dvb_frontend_ops mn88473_ops = {
- .delsys = {SYS_DVBT, SYS_DVBT2, SYS_DVBC_ANNEX_AC},
+static const struct dvb_frontend_ops mn88473_ops = {
+ .delsys = {SYS_DVBT, SYS_DVBT2, SYS_DVBC_ANNEX_A},
.info = {
.name = "Panasonic MN88473",
.symbol_rate_min = 1000000,
@@ -365,8 +429,7 @@ static struct dvb_frontend_ops mn88473_ops = {
FE_CAN_GUARD_INTERVAL_AUTO |
FE_CAN_HIERARCHY_AUTO |
FE_CAN_MUTE_TS |
- FE_CAN_2G_MODULATION |
- FE_CAN_MULTISTREAM
+ FE_CAN_2G_MODULATION
},
.get_tune_settings = mn88473_get_tune_settings,
@@ -385,7 +448,7 @@ static int mn88473_probe(struct i2c_client *client,
struct mn88473_config *config = client->dev.platform_data;
struct mn88473_dev *dev;
int ret;
- unsigned int utmp;
+ unsigned int uitmp;
static const struct regmap_config regmap_config = {
.reg_bits = 8,
.val_bits = 8,
@@ -393,7 +456,7 @@ static int mn88473_probe(struct i2c_client *client,
dev_dbg(&client->dev, "\n");
- /* Caller really need to provide pointer for frontend we create. */
+ /* Caller really need to provide pointer for frontend we create */
if (config->fe == NULL) {
dev_err(&client->dev, "frontend pointer not defined\n");
ret = -EINVAL;
@@ -406,11 +469,15 @@ static int mn88473_probe(struct i2c_client *client,
goto err;
}
- dev->i2c_wr_max = config->i2c_wr_max;
- if (!config->xtal)
- dev->xtal = 25000000;
+ if (config->i2c_wr_max)
+ dev->i2c_wr_max = config->i2c_wr_max;
else
- dev->xtal = config->xtal;
+ dev->i2c_wr_max = ~0;
+
+ if (config->xtal)
+ dev->clk = config->xtal;
+ else
+ dev->clk = 25000000;
dev->client[0] = client;
dev->regmap[0] = regmap_init_i2c(dev->client[0], &regmap_config);
if (IS_ERR(dev->regmap[0])) {
@@ -418,15 +485,25 @@ static int mn88473_probe(struct i2c_client *client,
goto err_kfree;
}
- /* check demod answers to I2C */
- ret = regmap_read(dev->regmap[0], 0x00, &utmp);
+ /* Check demod answers with correct chip id */
+ ret = regmap_read(dev->regmap[0], 0xff, &uitmp);
if (ret)
goto err_regmap_0_regmap_exit;
+ dev_dbg(&client->dev, "chip id=%02x\n", uitmp);
+
+ if (uitmp != 0x03) {
+ ret = -ENODEV;
+ goto err_regmap_0_regmap_exit;
+ }
+
/*
- * Chip has three I2C addresses for different register pages. Used
+ * Chip has three I2C addresses for different register banks. Used
* addresses are 0x18, 0x1a and 0x1c. We register two dummy clients,
- * 0x1a and 0x1c, in order to get own I2C client for each register page.
+ * 0x1a and 0x1c, in order to get own I2C client for each register bank.
+ *
+ * Also, register bank 2 do not support sequential I/O. Only single
+ * register write or read is allowed to that bank.
*/
dev->client[1] = i2c_new_dummy(client->adapter, 0x1a);
if (dev->client[1] == NULL) {
@@ -456,13 +533,19 @@ static int mn88473_probe(struct i2c_client *client,
}
i2c_set_clientdata(dev->client[2], dev);
- /* create dvb_frontend */
- memcpy(&dev->fe.ops, &mn88473_ops, sizeof(struct dvb_frontend_ops));
- dev->fe.demodulator_priv = client;
- *config->fe = &dev->fe;
+ /* Sleep because chip is active by default */
+ ret = regmap_write(dev->regmap[2], 0x05, 0x3e);
+ if (ret)
+ goto err_client_2_i2c_unregister_device;
+
+ /* Create dvb frontend */
+ memcpy(&dev->frontend.ops, &mn88473_ops, sizeof(dev->frontend.ops));
+ dev->frontend.demodulator_priv = client;
+ *config->fe = &dev->frontend;
i2c_set_clientdata(client, dev);
- dev_info(&dev->client[0]->dev, "Panasonic MN88473 successfully attached\n");
+ dev_info(&client->dev, "Panasonic MN88473 successfully identified\n");
+
return 0;
err_client_2_i2c_unregister_device:
@@ -507,7 +590,8 @@ MODULE_DEVICE_TABLE(i2c, mn88473_id_table);
static struct i2c_driver mn88473_driver = {
.driver = {
- .name = "mn88473",
+ .name = "mn88473",
+ .suppress_bind_attrs = true,
},
.probe = mn88473_probe,
.remove = mn88473_remove,
diff --git a/drivers/media/dvb-frontends/mn88473.h b/drivers/media/dvb-frontends/mn88473.h
index c717ebed0e03..2aa5181f3033 100644
--- a/drivers/media/dvb-frontends/mn88473.h
+++ b/drivers/media/dvb-frontends/mn88473.h
@@ -22,10 +22,16 @@
struct mn88473_config {
/*
* Max num of bytes given I2C adapter could write at once.
- * Default: none
+ * Default: unlimited
*/
u16 i2c_wr_max;
+ /*
+ * Xtal frequency Hz.
+ * Default: 25000000
+ */
+ u32 xtal;
+
/* Everything after that is returned by the driver. */
@@ -33,12 +39,6 @@ struct mn88473_config {
* DVB frontend.
*/
struct dvb_frontend **fe;
-
- /*
- * Xtal frequency.
- * Hz
- */
- u32 xtal;
};
#endif
diff --git a/drivers/staging/media/mn88473/mn88473_priv.h b/drivers/media/dvb-frontends/mn88473_priv.h
index 54beb4241ccf..e6c65893e451 100644
--- a/drivers/staging/media/mn88473/mn88473_priv.h
+++ b/drivers/media/dvb-frontends/mn88473_priv.h
@@ -27,11 +27,10 @@
struct mn88473_dev {
struct i2c_client *client[3];
struct regmap *regmap[3];
- struct dvb_frontend fe;
+ struct dvb_frontend frontend;
u16 i2c_wr_max;
- enum fe_delivery_system delivery_system;
- bool warm; /* FW running */
- u32 xtal;
+ bool active;
+ u32 clk;
};
#endif
diff --git a/drivers/media/dvb-frontends/mt312.c b/drivers/media/dvb-frontends/mt312.c
index c36e6764eead..fc08429c99b7 100644
--- a/drivers/media/dvb-frontends/mt312.c
+++ b/drivers/media/dvb-frontends/mt312.c
@@ -647,9 +647,9 @@ static int mt312_set_frontend(struct dvb_frontend *fe)
return 0;
}
-static int mt312_get_frontend(struct dvb_frontend *fe)
+static int mt312_get_frontend(struct dvb_frontend *fe,
+ struct dtv_frontend_properties *p)
{
- struct dtv_frontend_properties *p = &fe->dtv_property_cache;
struct mt312_state *state = fe->demodulator_priv;
int ret;
diff --git a/drivers/media/dvb-frontends/mt352.c b/drivers/media/dvb-frontends/mt352.c
index 123bb2f8e4b6..c0bb6328956b 100644
--- a/drivers/media/dvb-frontends/mt352.c
+++ b/drivers/media/dvb-frontends/mt352.c
@@ -311,9 +311,9 @@ static int mt352_set_parameters(struct dvb_frontend *fe)
return 0;
}
-static int mt352_get_parameters(struct dvb_frontend* fe)
+static int mt352_get_parameters(struct dvb_frontend* fe,
+ struct dtv_frontend_properties *op)
{
- struct dtv_frontend_properties *op = &fe->dtv_property_cache;
struct mt352_state* state = fe->demodulator_priv;
u16 tps;
u16 div;
diff --git a/drivers/media/dvb-frontends/or51132.c b/drivers/media/dvb-frontends/or51132.c
index 35b1053b3640..a165af990672 100644
--- a/drivers/media/dvb-frontends/or51132.c
+++ b/drivers/media/dvb-frontends/or51132.c
@@ -375,9 +375,9 @@ static int or51132_set_parameters(struct dvb_frontend *fe)
return 0;
}
-static int or51132_get_parameters(struct dvb_frontend* fe)
+static int or51132_get_parameters(struct dvb_frontend* fe,
+ struct dtv_frontend_properties *p)
{
- struct dtv_frontend_properties *p = &fe->dtv_property_cache;
struct or51132_state* state = fe->demodulator_priv;
int status;
int retry = 1;
diff --git a/drivers/media/dvb-frontends/rtl2830.c b/drivers/media/dvb-frontends/rtl2830.c
index b792f305cf15..3f96429af0e5 100644
--- a/drivers/media/dvb-frontends/rtl2830.c
+++ b/drivers/media/dvb-frontends/rtl2830.c
@@ -279,11 +279,11 @@ err:
return ret;
}
-static int rtl2830_get_frontend(struct dvb_frontend *fe)
+static int rtl2830_get_frontend(struct dvb_frontend *fe,
+ struct dtv_frontend_properties *c)
{
struct i2c_client *client = fe->demodulator_priv;
struct rtl2830_dev *dev = i2c_get_clientdata(client);
- struct dtv_frontend_properties *c = &fe->dtv_property_cache;
int ret;
u8 buf[3];
@@ -900,6 +900,9 @@ static int rtl2830_remove(struct i2c_client *client)
dev_dbg(&client->dev, "\n");
+ /* stop statistics polling */
+ cancel_delayed_work_sync(&dev->stat_work);
+
i2c_del_mux_adapter(dev->adapter);
regmap_exit(dev->regmap);
kfree(dev);
diff --git a/drivers/media/dvb-frontends/rtl2832.c b/drivers/media/dvb-frontends/rtl2832.c
index 10f2119935da..7c96f7679669 100644
--- a/drivers/media/dvb-frontends/rtl2832.c
+++ b/drivers/media/dvb-frontends/rtl2832.c
@@ -347,6 +347,10 @@ static int rtl2832_init(struct dvb_frontend *fe)
dev_dbg(&client->dev, "\n");
+ ret = rtl2832_wr_demod_reg(dev, DVBT_SOFT_RST, 0x0);
+ if (ret)
+ goto err;
+
for (i = 0; i < ARRAY_SIZE(rtl2832_initial_regs); i++) {
ret = rtl2832_wr_demod_reg(dev, rtl2832_initial_regs[i].reg,
rtl2832_initial_regs[i].value);
@@ -404,8 +408,6 @@ static int rtl2832_init(struct dvb_frontend *fe)
c->post_bit_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
c->post_bit_count.len = 1;
c->post_bit_count.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
- /* start statistics polling */
- schedule_delayed_work(&dev->stat_work, msecs_to_jiffies(2000));
dev->sleeping = false;
return 0;
@@ -423,8 +425,6 @@ static int rtl2832_sleep(struct dvb_frontend *fe)
dev_dbg(&client->dev, "\n");
dev->sleeping = true;
- /* stop statistics polling */
- cancel_delayed_work_sync(&dev->stat_work);
dev->fe_status = 0;
ret = rtl2832_wr_demod_reg(dev, DVBT_SOFT_RST, 0x1);
@@ -491,11 +491,6 @@ static int rtl2832_set_frontend(struct dvb_frontend *fe)
if (fe->ops.tuner_ops.set_params)
fe->ops.tuner_ops.set_params(fe);
- /* PIP mode related */
- ret = rtl2832_bulk_write(client, 0x192, "\x00\x0f\xff", 3);
- if (ret)
- goto err;
-
/* If the frontend has get_if_frequency(), use it */
if (fe->ops.tuner_ops.get_if_frequency) {
u32 if_freq;
@@ -575,11 +570,11 @@ err:
return ret;
}
-static int rtl2832_get_frontend(struct dvb_frontend *fe)
+static int rtl2832_get_frontend(struct dvb_frontend *fe,
+ struct dtv_frontend_properties *c)
{
struct rtl2832_dev *dev = fe->demodulator_priv;
struct i2c_client *client = dev->client;
- struct dtv_frontend_properties *c = &fe->dtv_property_cache;
int ret;
u8 buf[3];
@@ -692,8 +687,11 @@ static int rtl2832_read_status(struct dvb_frontend *fe, enum fe_status *status)
{
struct rtl2832_dev *dev = fe->demodulator_priv;
struct i2c_client *client = dev->client;
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
int ret;
u32 uninitialized_var(tmp);
+ u8 u8tmp, buf[2];
+ u16 u16tmp;
dev_dbg(&client->dev, "\n");
@@ -714,45 +712,6 @@ static int rtl2832_read_status(struct dvb_frontend *fe, enum fe_status *status)
}
dev->fe_status = *status;
- return 0;
-err:
- dev_dbg(&client->dev, "failed=%d\n", ret);
- return ret;
-}
-
-static int rtl2832_read_snr(struct dvb_frontend *fe, u16 *snr)
-{
- struct dtv_frontend_properties *c = &fe->dtv_property_cache;
-
- /* report SNR in resolution of 0.1 dB */
- if (c->cnr.stat[0].scale == FE_SCALE_DECIBEL)
- *snr = div_s64(c->cnr.stat[0].svalue, 100);
- else
- *snr = 0;
-
- return 0;
-}
-
-static int rtl2832_read_ber(struct dvb_frontend *fe, u32 *ber)
-{
- struct rtl2832_dev *dev = fe->demodulator_priv;
-
- *ber = (dev->post_bit_error - dev->post_bit_error_prev);
- dev->post_bit_error_prev = dev->post_bit_error;
-
- return 0;
-}
-
-static void rtl2832_stat_work(struct work_struct *work)
-{
- struct rtl2832_dev *dev = container_of(work, struct rtl2832_dev, stat_work.work);
- struct i2c_client *client = dev->client;
- struct dtv_frontend_properties *c = &dev->fe.dtv_property_cache;
- int ret, tmp;
- u8 u8tmp, buf[2];
- u16 u16tmp;
-
- dev_dbg(&client->dev, "\n");
/* signal strength */
if (dev->fe_status & FE_HAS_SIGNAL) {
@@ -789,11 +748,11 @@ static void rtl2832_stat_work(struct work_struct *work)
constellation = (u8tmp >> 2) & 0x03; /* [3:2] */
if (constellation > CONSTELLATION_NUM - 1)
- goto err_schedule_delayed_work;
+ goto err;
hierarchy = (u8tmp >> 4) & 0x07; /* [6:4] */
if (hierarchy > HIERARCHY_NUM - 1)
- goto err_schedule_delayed_work;
+ goto err;
ret = rtl2832_bulk_read(client, 0x40c, buf, 2);
if (ret)
@@ -835,11 +794,33 @@ static void rtl2832_stat_work(struct work_struct *work)
c->post_bit_count.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
}
-err_schedule_delayed_work:
- schedule_delayed_work(&dev->stat_work, msecs_to_jiffies(2000));
- return;
+ return 0;
err:
dev_dbg(&client->dev, "failed=%d\n", ret);
+ return ret;
+}
+
+static int rtl2832_read_snr(struct dvb_frontend *fe, u16 *snr)
+{
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
+
+ /* report SNR in resolution of 0.1 dB */
+ if (c->cnr.stat[0].scale == FE_SCALE_DECIBEL)
+ *snr = div_s64(c->cnr.stat[0].svalue, 100);
+ else
+ *snr = 0;
+
+ return 0;
+}
+
+static int rtl2832_read_ber(struct dvb_frontend *fe, u32 *ber)
+{
+ struct rtl2832_dev *dev = fe->demodulator_priv;
+
+ *ber = (dev->post_bit_error - dev->post_bit_error_prev);
+ dev->post_bit_error_prev = dev->post_bit_error;
+
+ return 0;
}
/*
@@ -1081,37 +1062,46 @@ static struct i2c_adapter *rtl2832_get_i2c_adapter(struct i2c_client *client)
return dev->i2c_adapter_tuner;
}
-static int rtl2832_enable_slave_ts(struct i2c_client *client)
+static int rtl2832_slave_ts_ctrl(struct i2c_client *client, bool enable)
{
struct rtl2832_dev *dev = i2c_get_clientdata(client);
int ret;
- dev_dbg(&client->dev, "\n");
-
- ret = rtl2832_bulk_write(client, 0x10c, "\x5f\xff", 2);
- if (ret)
- goto err;
-
- ret = rtl2832_wr_demod_reg(dev, DVBT_PIP_ON, 0x1);
- if (ret)
- goto err;
+ dev_dbg(&client->dev, "enable=%d\n", enable);
- ret = rtl2832_bulk_write(client, 0x0bc, "\x18", 1);
- if (ret)
- goto err;
-
- ret = rtl2832_bulk_write(client, 0x192, "\x7f\xf7\xff", 3);
- if (ret)
- goto err;
-
- /* soft reset */
- ret = rtl2832_wr_demod_reg(dev, DVBT_SOFT_RST, 0x1);
- if (ret)
- goto err;
-
- ret = rtl2832_wr_demod_reg(dev, DVBT_SOFT_RST, 0x0);
- if (ret)
- goto err;
+ if (enable) {
+ ret = rtl2832_wr_demod_reg(dev, DVBT_SOFT_RST, 0x0);
+ if (ret)
+ goto err;
+ ret = rtl2832_bulk_write(client, 0x10c, "\x5f\xff", 2);
+ if (ret)
+ goto err;
+ ret = rtl2832_wr_demod_reg(dev, DVBT_PIP_ON, 0x1);
+ if (ret)
+ goto err;
+ ret = rtl2832_bulk_write(client, 0x0bc, "\x18", 1);
+ if (ret)
+ goto err;
+ ret = rtl2832_bulk_write(client, 0x192, "\x7f\xf7\xff", 3);
+ if (ret)
+ goto err;
+ } else {
+ ret = rtl2832_bulk_write(client, 0x192, "\x00\x0f\xff", 3);
+ if (ret)
+ goto err;
+ ret = rtl2832_bulk_write(client, 0x0bc, "\x08", 1);
+ if (ret)
+ goto err;
+ ret = rtl2832_wr_demod_reg(dev, DVBT_PIP_ON, 0x0);
+ if (ret)
+ goto err;
+ ret = rtl2832_bulk_write(client, 0x10c, "\x00\x00", 2);
+ if (ret)
+ goto err;
+ ret = rtl2832_wr_demod_reg(dev, DVBT_SOFT_RST, 0x1);
+ if (ret)
+ goto err;
+ }
return 0;
err:
@@ -1227,7 +1217,6 @@ static int rtl2832_probe(struct i2c_client *client,
dev->pdata = client->dev.platform_data;
dev->sleeping = true;
INIT_DELAYED_WORK(&dev->i2c_gate_work, rtl2832_i2c_gate_work);
- INIT_DELAYED_WORK(&dev->stat_work, rtl2832_stat_work);
/* create regmap */
mutex_init(&dev->regmap_mutex);
dev->regmap_config.reg_bits = 8,
@@ -1267,7 +1256,7 @@ static int rtl2832_probe(struct i2c_client *client,
/* setup callbacks */
pdata->get_dvb_frontend = rtl2832_get_dvb_frontend;
pdata->get_i2c_adapter = rtl2832_get_i2c_adapter;
- pdata->enable_slave_ts = rtl2832_enable_slave_ts;
+ pdata->slave_ts_ctrl = rtl2832_slave_ts_ctrl;
pdata->pid_filter = rtl2832_pid_filter;
pdata->pid_filter_ctrl = rtl2832_pid_filter_ctrl;
pdata->bulk_read = rtl2832_bulk_read;
diff --git a/drivers/media/dvb-frontends/rtl2832.h b/drivers/media/dvb-frontends/rtl2832.h
index c29a4c2bf71a..6390af64cf45 100644
--- a/drivers/media/dvb-frontends/rtl2832.h
+++ b/drivers/media/dvb-frontends/rtl2832.h
@@ -31,7 +31,7 @@
* @tuner: Used tuner model.
* @get_dvb_frontend: Get DVB frontend.
* @get_i2c_adapter: Get I2C adapter.
- * @enable_slave_ts: Enable slave TS IF.
+ * @slave_ts_ctrl: Control slave TS interface.
* @pid_filter: Set PID to PID filter.
* @pid_filter_ctrl: Control PID filter.
*/
@@ -53,7 +53,7 @@ struct rtl2832_platform_data {
struct dvb_frontend* (*get_dvb_frontend)(struct i2c_client *);
struct i2c_adapter* (*get_i2c_adapter)(struct i2c_client *);
- int (*enable_slave_ts)(struct i2c_client *);
+ int (*slave_ts_ctrl)(struct i2c_client *, bool);
int (*pid_filter)(struct dvb_frontend *, u8, u16, int);
int (*pid_filter_ctrl)(struct dvb_frontend *, int);
/* private: Register access for SDR module use only */
diff --git a/drivers/media/dvb-frontends/rtl2832_priv.h b/drivers/media/dvb-frontends/rtl2832_priv.h
index 5dcd3a41d23f..6b875f462f8b 100644
--- a/drivers/media/dvb-frontends/rtl2832_priv.h
+++ b/drivers/media/dvb-frontends/rtl2832_priv.h
@@ -38,7 +38,6 @@ struct rtl2832_dev {
struct regmap *regmap;
struct i2c_adapter *i2c_adapter_tuner;
struct dvb_frontend fe;
- struct delayed_work stat_work;
enum fe_status fe_status;
u64 post_bit_error_prev; /* for old DVBv3 read_ber() calculation */
u64 post_bit_error;
diff --git a/drivers/media/dvb-frontends/s5h1409.c b/drivers/media/dvb-frontends/s5h1409.c
index 10964848a2f1..c68965ad97c0 100644
--- a/drivers/media/dvb-frontends/s5h1409.c
+++ b/drivers/media/dvb-frontends/s5h1409.c
@@ -925,9 +925,9 @@ static int s5h1409_read_ber(struct dvb_frontend *fe, u32 *ber)
return s5h1409_read_ucblocks(fe, ber);
}
-static int s5h1409_get_frontend(struct dvb_frontend *fe)
+static int s5h1409_get_frontend(struct dvb_frontend *fe,
+ struct dtv_frontend_properties *p)
{
- struct dtv_frontend_properties *p = &fe->dtv_property_cache;
struct s5h1409_state *state = fe->demodulator_priv;
p->frequency = state->current_frequency;
diff --git a/drivers/media/dvb-frontends/s5h1411.c b/drivers/media/dvb-frontends/s5h1411.c
index 9afc3f42290e..90f86e82b087 100644
--- a/drivers/media/dvb-frontends/s5h1411.c
+++ b/drivers/media/dvb-frontends/s5h1411.c
@@ -840,9 +840,9 @@ static int s5h1411_read_ber(struct dvb_frontend *fe, u32 *ber)
return s5h1411_read_ucblocks(fe, ber);
}
-static int s5h1411_get_frontend(struct dvb_frontend *fe)
+static int s5h1411_get_frontend(struct dvb_frontend *fe,
+ struct dtv_frontend_properties *p)
{
- struct dtv_frontend_properties *p = &fe->dtv_property_cache;
struct s5h1411_state *state = fe->demodulator_priv;
p->frequency = state->current_frequency;
diff --git a/drivers/media/dvb-frontends/s5h1420.c b/drivers/media/dvb-frontends/s5h1420.c
index 9c22a4c70d87..d7d0b7d57ad7 100644
--- a/drivers/media/dvb-frontends/s5h1420.c
+++ b/drivers/media/dvb-frontends/s5h1420.c
@@ -756,9 +756,9 @@ static int s5h1420_set_frontend(struct dvb_frontend *fe)
return 0;
}
-static int s5h1420_get_frontend(struct dvb_frontend* fe)
+static int s5h1420_get_frontend(struct dvb_frontend* fe,
+ struct dtv_frontend_properties *p)
{
- struct dtv_frontend_properties *p = &fe->dtv_property_cache;
struct s5h1420_state* state = fe->demodulator_priv;
p->frequency = state->tunedfreq + s5h1420_getfreqoffset(state);
diff --git a/drivers/media/dvb-frontends/s921.c b/drivers/media/dvb-frontends/s921.c
index d6a8fa63040b..b5e3d90eba5e 100644
--- a/drivers/media/dvb-frontends/s921.c
+++ b/drivers/media/dvb-frontends/s921.c
@@ -433,9 +433,9 @@ static int s921_set_frontend(struct dvb_frontend *fe)
return 0;
}
-static int s921_get_frontend(struct dvb_frontend *fe)
+static int s921_get_frontend(struct dvb_frontend *fe,
+ struct dtv_frontend_properties *p)
{
- struct dtv_frontend_properties *p = &fe->dtv_property_cache;
struct s921_state *state = fe->demodulator_priv;
/* FIXME: Probably it is possible to get it from regs f1 and f2 */
diff --git a/drivers/media/dvb-frontends/si2165.c b/drivers/media/dvb-frontends/si2165.c
index 2b93241d4bc1..8bf716a8ea58 100644
--- a/drivers/media/dvb-frontends/si2165.c
+++ b/drivers/media/dvb-frontends/si2165.c
@@ -225,22 +225,18 @@ static int si2165_writereg32(struct si2165_state *state, const u16 reg, u32 val)
static int si2165_writereg_mask8(struct si2165_state *state, const u16 reg,
u8 val, u8 mask)
{
- int ret;
- u8 tmp;
-
if (mask != 0xff) {
- ret = si2165_readreg8(state, reg, &tmp);
+ u8 tmp;
+ int ret = si2165_readreg8(state, reg, &tmp);
+
if (ret < 0)
- goto err;
+ return ret;
val &= mask;
tmp &= ~mask;
val |= tmp;
}
-
- ret = si2165_writereg8(state, reg, val);
-err:
- return ret;
+ return si2165_writereg8(state, reg, val);
}
#define REG16(reg, val) { (reg), (val) & 0xff }, { (reg)+1, (val)>>8 & 0xff }
@@ -825,19 +821,19 @@ static int si2165_set_frontend_dvbt(struct dvb_frontend *fe)
struct si2165_state *state = fe->demodulator_priv;
u32 dvb_rate = 0;
u16 bw10k;
+ u32 bw_hz = p->bandwidth_hz;
dprintk("%s: called\n", __func__);
if (!state->has_dvbt)
return -EINVAL;
- if (p->bandwidth_hz > 0) {
- dvb_rate = p->bandwidth_hz * 8 / 7;
- bw10k = p->bandwidth_hz / 10000;
- } else {
- dvb_rate = 8 * 8 / 7;
- bw10k = 800;
- }
+ /* no bandwidth auto-detection */
+ if (bw_hz == 0)
+ return -EINVAL;
+
+ dvb_rate = bw_hz * 8 / 7;
+ bw10k = bw_hz / 10000;
ret = si2165_adjust_pll_divl(state, 12);
if (ret < 0)
diff --git a/drivers/media/dvb-frontends/stb0899_drv.c b/drivers/media/dvb-frontends/stb0899_drv.c
index 756650f154ab..3d171b0e00c2 100644
--- a/drivers/media/dvb-frontends/stb0899_drv.c
+++ b/drivers/media/dvb-frontends/stb0899_drv.c
@@ -1568,9 +1568,9 @@ static enum dvbfe_search stb0899_search(struct dvb_frontend *fe)
return DVBFE_ALGO_SEARCH_ERROR;
}
-static int stb0899_get_frontend(struct dvb_frontend *fe)
+static int stb0899_get_frontend(struct dvb_frontend *fe,
+ struct dtv_frontend_properties *p)
{
- struct dtv_frontend_properties *p = &fe->dtv_property_cache;
struct stb0899_state *state = fe->demodulator_priv;
struct stb0899_internal *internal = &state->internal;
diff --git a/drivers/media/dvb-frontends/stb6100.c b/drivers/media/dvb-frontends/stb6100.c
index c978c801c7aa..b9c2511bf019 100644
--- a/drivers/media/dvb-frontends/stb6100.c
+++ b/drivers/media/dvb-frontends/stb6100.c
@@ -346,7 +346,7 @@ static int stb6100_set_frequency(struct dvb_frontend *fe, u32 frequency)
if (fe->ops.get_frontend) {
dprintk(verbose, FE_DEBUG, 1, "Get frontend parameters");
- fe->ops.get_frontend(fe);
+ fe->ops.get_frontend(fe, p);
}
srate = p->symbol_rate;
diff --git a/drivers/media/dvb-frontends/stv0297.c b/drivers/media/dvb-frontends/stv0297.c
index 75b4d8b25657..81b27b7c0c96 100644
--- a/drivers/media/dvb-frontends/stv0297.c
+++ b/drivers/media/dvb-frontends/stv0297.c
@@ -615,9 +615,9 @@ timeout:
return 0;
}
-static int stv0297_get_frontend(struct dvb_frontend *fe)
+static int stv0297_get_frontend(struct dvb_frontend *fe,
+ struct dtv_frontend_properties *p)
{
- struct dtv_frontend_properties *p = &fe->dtv_property_cache;
struct stv0297_state *state = fe->demodulator_priv;
int reg_00, reg_83;
diff --git a/drivers/media/dvb-frontends/stv0299.c b/drivers/media/dvb-frontends/stv0299.c
index a8177807fb65..7927fa925f2f 100644
--- a/drivers/media/dvb-frontends/stv0299.c
+++ b/drivers/media/dvb-frontends/stv0299.c
@@ -422,7 +422,7 @@ static int stv0299_send_legacy_dish_cmd (struct dvb_frontend* fe, unsigned long
if (debug_legacy_dish_switch)
printk ("%s switch command: 0x%04lx\n",__func__, cmd);
- nexttime = ktime_get_real();
+ nexttime = ktime_get_boottime();
if (debug_legacy_dish_switch)
tv[0] = nexttime;
stv0299_writeregI (state, 0x0c, reg0x0c | 0x50); /* set LNB to 18V */
@@ -431,7 +431,7 @@ static int stv0299_send_legacy_dish_cmd (struct dvb_frontend* fe, unsigned long
for (i=0; i<9; i++) {
if (debug_legacy_dish_switch)
- tv[i+1] = ktime_get_real();
+ tv[i+1] = ktime_get_boottime();
if((cmd & 0x01) != last) {
/* set voltage to (last ? 13V : 18V) */
stv0299_writeregI (state, 0x0c, reg0x0c | (last ? lv_mask : 0x50));
@@ -602,9 +602,9 @@ static int stv0299_set_frontend(struct dvb_frontend *fe)
return 0;
}
-static int stv0299_get_frontend(struct dvb_frontend *fe)
+static int stv0299_get_frontend(struct dvb_frontend *fe,
+ struct dtv_frontend_properties *p)
{
- struct dtv_frontend_properties *p = &fe->dtv_property_cache;
struct stv0299_state* state = fe->demodulator_priv;
s32 derot_freq;
int invval;
diff --git a/drivers/media/dvb-frontends/stv0367.c b/drivers/media/dvb-frontends/stv0367.c
index 44cb73f68af6..abc379aea713 100644
--- a/drivers/media/dvb-frontends/stv0367.c
+++ b/drivers/media/dvb-frontends/stv0367.c
@@ -1938,9 +1938,9 @@ static int stv0367ter_read_ucblocks(struct dvb_frontend *fe, u32 *ucblocks)
return 0;
}
-static int stv0367ter_get_frontend(struct dvb_frontend *fe)
+static int stv0367ter_get_frontend(struct dvb_frontend *fe,
+ struct dtv_frontend_properties *p)
{
- struct dtv_frontend_properties *p = &fe->dtv_property_cache;
struct stv0367_state *state = fe->demodulator_priv;
struct stv0367ter_state *ter_state = state->ter_state;
enum stv0367_ter_mode mode;
@@ -3146,9 +3146,9 @@ static int stv0367cab_set_frontend(struct dvb_frontend *fe)
return 0;
}
-static int stv0367cab_get_frontend(struct dvb_frontend *fe)
+static int stv0367cab_get_frontend(struct dvb_frontend *fe,
+ struct dtv_frontend_properties *p)
{
- struct dtv_frontend_properties *p = &fe->dtv_property_cache;
struct stv0367_state *state = fe->demodulator_priv;
struct stv0367cab_state *cab_state = state->cab_state;
diff --git a/drivers/media/dvb-frontends/stv0900_core.c b/drivers/media/dvb-frontends/stv0900_core.c
index fe31dd541955..f667005a6661 100644
--- a/drivers/media/dvb-frontends/stv0900_core.c
+++ b/drivers/media/dvb-frontends/stv0900_core.c
@@ -1087,7 +1087,7 @@ u8 stv0900_get_optim_carr_loop(s32 srate, enum fe_stv0900_modcode modcode,
s32 pilot, u8 chip_id)
{
u8 aclc_value = 0x29;
- s32 i;
+ s32 i, cllas2_size;
const struct stv0900_car_loop_optim *cls2, *cllqs2, *cllas2;
dprintk("%s\n", __func__);
@@ -1096,14 +1096,17 @@ u8 stv0900_get_optim_carr_loop(s32 srate, enum fe_stv0900_modcode modcode,
cls2 = FE_STV0900_S2CarLoop;
cllqs2 = FE_STV0900_S2LowQPCarLoopCut30;
cllas2 = FE_STV0900_S2APSKCarLoopCut30;
+ cllas2_size = ARRAY_SIZE(FE_STV0900_S2APSKCarLoopCut30);
} else if (chip_id == 0x20) {
cls2 = FE_STV0900_S2CarLoopCut20;
cllqs2 = FE_STV0900_S2LowQPCarLoopCut20;
cllas2 = FE_STV0900_S2APSKCarLoopCut20;
+ cllas2_size = ARRAY_SIZE(FE_STV0900_S2APSKCarLoopCut20);
} else {
cls2 = FE_STV0900_S2CarLoopCut30;
cllqs2 = FE_STV0900_S2LowQPCarLoopCut30;
cllas2 = FE_STV0900_S2APSKCarLoopCut30;
+ cllas2_size = ARRAY_SIZE(FE_STV0900_S2APSKCarLoopCut30);
}
if (modcode < STV0900_QPSK_12) {
@@ -1178,7 +1181,7 @@ u8 stv0900_get_optim_carr_loop(s32 srate, enum fe_stv0900_modcode modcode,
aclc_value = cls2[i].car_loop_pilots_off_30;
}
- } else {
+ } else if (i < cllas2_size) {
if (srate <= 3000000)
aclc_value = cllas2[i].car_loop_pilots_on_2;
else if (srate <= 7000000)
@@ -1859,9 +1862,9 @@ static int stv0900_sleep(struct dvb_frontend *fe)
return 0;
}
-static int stv0900_get_frontend(struct dvb_frontend *fe)
+static int stv0900_get_frontend(struct dvb_frontend *fe,
+ struct dtv_frontend_properties *p)
{
- struct dtv_frontend_properties *p = &fe->dtv_property_cache;
struct stv0900_state *state = fe->demodulator_priv;
struct stv0900_internal *intp = state->internal;
enum fe_stv0900_demod_num demod = state->demod;
diff --git a/drivers/media/dvb-frontends/stv6110x.c b/drivers/media/dvb-frontends/stv6110x.c
index e66154e5c1d7..a62c01e454f5 100644
--- a/drivers/media/dvb-frontends/stv6110x.c
+++ b/drivers/media/dvb-frontends/stv6110x.c
@@ -355,7 +355,7 @@ static struct dvb_tuner_ops stv6110x_ops = {
.release = stv6110x_release
};
-static struct stv6110x_devctl stv6110x_ctl = {
+static const struct stv6110x_devctl stv6110x_ctl = {
.tuner_init = stv6110x_init,
.tuner_sleep = stv6110x_sleep,
.tuner_set_mode = stv6110x_set_mode,
@@ -369,7 +369,7 @@ static struct stv6110x_devctl stv6110x_ctl = {
.tuner_get_status = stv6110x_get_status,
};
-struct stv6110x_devctl *stv6110x_attach(struct dvb_frontend *fe,
+const struct stv6110x_devctl *stv6110x_attach(struct dvb_frontend *fe,
const struct stv6110x_config *config,
struct i2c_adapter *i2c)
{
diff --git a/drivers/media/dvb-frontends/stv6110x.h b/drivers/media/dvb-frontends/stv6110x.h
index 9f7eb251aec3..696b6e5b9e7b 100644
--- a/drivers/media/dvb-frontends/stv6110x.h
+++ b/drivers/media/dvb-frontends/stv6110x.h
@@ -55,12 +55,12 @@ struct stv6110x_devctl {
#if IS_REACHABLE(CONFIG_DVB_STV6110x)
-extern struct stv6110x_devctl *stv6110x_attach(struct dvb_frontend *fe,
+extern const struct stv6110x_devctl *stv6110x_attach(struct dvb_frontend *fe,
const struct stv6110x_config *config,
struct i2c_adapter *i2c);
#else
-static inline struct stv6110x_devctl *stv6110x_attach(struct dvb_frontend *fe,
+static inline const struct stv6110x_devctl *stv6110x_attach(struct dvb_frontend *fe,
const struct stv6110x_config *config,
struct i2c_adapter *i2c)
{
diff --git a/drivers/media/dvb-frontends/stv6110x_priv.h b/drivers/media/dvb-frontends/stv6110x_priv.h
index 0ec936a660a7..a993aba27b7e 100644
--- a/drivers/media/dvb-frontends/stv6110x_priv.h
+++ b/drivers/media/dvb-frontends/stv6110x_priv.h
@@ -70,7 +70,7 @@ struct stv6110x_state {
const struct stv6110x_config *config;
u8 regs[8];
- struct stv6110x_devctl *devctl;
+ const struct stv6110x_devctl *devctl;
};
#endif /* __STV6110x_PRIV_H */
diff --git a/drivers/media/dvb-frontends/tc90522.c b/drivers/media/dvb-frontends/tc90522.c
index 456cdc7fb1e7..31cd32532387 100644
--- a/drivers/media/dvb-frontends/tc90522.c
+++ b/drivers/media/dvb-frontends/tc90522.c
@@ -201,10 +201,10 @@ static const enum fe_code_rate fec_conv_sat[] = {
FEC_2_3, /* for 8PSK. (trellis code) */
};
-static int tc90522s_get_frontend(struct dvb_frontend *fe)
+static int tc90522s_get_frontend(struct dvb_frontend *fe,
+ struct dtv_frontend_properties *c)
{
struct tc90522_state *state;
- struct dtv_frontend_properties *c;
struct dtv_fe_stats *stats;
int ret, i;
int layers;
@@ -212,7 +212,6 @@ static int tc90522s_get_frontend(struct dvb_frontend *fe)
u32 cndat;
state = fe->demodulator_priv;
- c = &fe->dtv_property_cache;
c->delivery_system = SYS_ISDBS;
c->symbol_rate = 28860000;
@@ -337,10 +336,10 @@ static const enum fe_modulation mod_conv[] = {
DQPSK, QPSK, QAM_16, QAM_64, 0, 0, 0, 0
};
-static int tc90522t_get_frontend(struct dvb_frontend *fe)
+static int tc90522t_get_frontend(struct dvb_frontend *fe,
+ struct dtv_frontend_properties *c)
{
struct tc90522_state *state;
- struct dtv_frontend_properties *c;
struct dtv_fe_stats *stats;
int ret, i;
int layers;
@@ -348,7 +347,6 @@ static int tc90522t_get_frontend(struct dvb_frontend *fe)
u32 cndat;
state = fe->demodulator_priv;
- c = &fe->dtv_property_cache;
c->delivery_system = SYS_ISDBT;
c->bandwidth_hz = 6000000;
mode = 1;
diff --git a/drivers/media/dvb-frontends/tda10021.c b/drivers/media/dvb-frontends/tda10021.c
index a684424e665a..806c56691ca5 100644
--- a/drivers/media/dvb-frontends/tda10021.c
+++ b/drivers/media/dvb-frontends/tda10021.c
@@ -387,9 +387,9 @@ static int tda10021_read_ucblocks(struct dvb_frontend* fe, u32* ucblocks)
return 0;
}
-static int tda10021_get_frontend(struct dvb_frontend *fe)
+static int tda10021_get_frontend(struct dvb_frontend *fe,
+ struct dtv_frontend_properties *p)
{
- struct dtv_frontend_properties *p = &fe->dtv_property_cache;
struct tda10021_state* state = fe->demodulator_priv;
int sync;
s8 afc = 0;
diff --git a/drivers/media/dvb-frontends/tda10023.c b/drivers/media/dvb-frontends/tda10023.c
index 44a55656093f..3b8c7e499d0d 100644
--- a/drivers/media/dvb-frontends/tda10023.c
+++ b/drivers/media/dvb-frontends/tda10023.c
@@ -457,9 +457,9 @@ static int tda10023_read_ucblocks(struct dvb_frontend* fe, u32* ucblocks)
return 0;
}
-static int tda10023_get_frontend(struct dvb_frontend *fe)
+static int tda10023_get_frontend(struct dvb_frontend *fe,
+ struct dtv_frontend_properties *p)
{
- struct dtv_frontend_properties *p = &fe->dtv_property_cache;
struct tda10023_state* state = fe->demodulator_priv;
int sync,inv;
s8 afc = 0;
diff --git a/drivers/media/dvb-frontends/tda10048.c b/drivers/media/dvb-frontends/tda10048.c
index 8451086c563f..c2bf89d0b0b0 100644
--- a/drivers/media/dvb-frontends/tda10048.c
+++ b/drivers/media/dvb-frontends/tda10048.c
@@ -1028,9 +1028,9 @@ static int tda10048_read_ucblocks(struct dvb_frontend *fe, u32 *ucblocks)
return 0;
}
-static int tda10048_get_frontend(struct dvb_frontend *fe)
+static int tda10048_get_frontend(struct dvb_frontend *fe,
+ struct dtv_frontend_properties *p)
{
- struct dtv_frontend_properties *p = &fe->dtv_property_cache;
struct tda10048_state *state = fe->demodulator_priv;
dprintk(1, "%s()\n", __func__);
diff --git a/drivers/media/dvb-frontends/tda1004x.c b/drivers/media/dvb-frontends/tda1004x.c
index c6abeb4fba9d..b89848313fb9 100644
--- a/drivers/media/dvb-frontends/tda1004x.c
+++ b/drivers/media/dvb-frontends/tda1004x.c
@@ -899,9 +899,9 @@ static int tda1004x_set_fe(struct dvb_frontend *fe)
return 0;
}
-static int tda1004x_get_fe(struct dvb_frontend *fe)
+static int tda1004x_get_fe(struct dvb_frontend *fe,
+ struct dtv_frontend_properties *fe_params)
{
- struct dtv_frontend_properties *fe_params = &fe->dtv_property_cache;
struct tda1004x_state* state = fe->demodulator_priv;
int status;
diff --git a/drivers/media/dvb-frontends/tda10071.c b/drivers/media/dvb-frontends/tda10071.c
index 119d47596ac8..37ebeef2bbd0 100644
--- a/drivers/media/dvb-frontends/tda10071.c
+++ b/drivers/media/dvb-frontends/tda10071.c
@@ -701,11 +701,11 @@ error:
return ret;
}
-static int tda10071_get_frontend(struct dvb_frontend *fe)
+static int tda10071_get_frontend(struct dvb_frontend *fe,
+ struct dtv_frontend_properties *c)
{
struct tda10071_dev *dev = fe->demodulator_priv;
struct i2c_client *client = dev->client;
- struct dtv_frontend_properties *c = &fe->dtv_property_cache;
int ret, i;
u8 buf[5], tmp;
diff --git a/drivers/media/dvb-frontends/tda10086.c b/drivers/media/dvb-frontends/tda10086.c
index 95a33e187f8e..31d0acb54fe8 100644
--- a/drivers/media/dvb-frontends/tda10086.c
+++ b/drivers/media/dvb-frontends/tda10086.c
@@ -459,9 +459,9 @@ static int tda10086_set_frontend(struct dvb_frontend *fe)
return 0;
}
-static int tda10086_get_frontend(struct dvb_frontend *fe)
+static int tda10086_get_frontend(struct dvb_frontend *fe,
+ struct dtv_frontend_properties *fe_params)
{
- struct dtv_frontend_properties *fe_params = &fe->dtv_property_cache;
struct tda10086_state* state = fe->demodulator_priv;
u8 val;
int tmp;
diff --git a/drivers/media/dvb-frontends/tda8083.c b/drivers/media/dvb-frontends/tda8083.c
index 796543fa2c8d..9072d6463094 100644
--- a/drivers/media/dvb-frontends/tda8083.c
+++ b/drivers/media/dvb-frontends/tda8083.c
@@ -342,9 +342,9 @@ static int tda8083_set_frontend(struct dvb_frontend *fe)
return 0;
}
-static int tda8083_get_frontend(struct dvb_frontend *fe)
+static int tda8083_get_frontend(struct dvb_frontend *fe,
+ struct dtv_frontend_properties *p)
{
- struct dtv_frontend_properties *p = &fe->dtv_property_cache;
struct tda8083_state* state = fe->demodulator_priv;
/* FIXME: get symbolrate & frequency offset...*/
diff --git a/drivers/media/dvb-frontends/ts2020.c b/drivers/media/dvb-frontends/ts2020.c
index 7979e5d6498b..14b410ffe612 100644
--- a/drivers/media/dvb-frontends/ts2020.c
+++ b/drivers/media/dvb-frontends/ts2020.c
@@ -712,6 +712,10 @@ static int ts2020_remove(struct i2c_client *client)
dev_dbg(&client->dev, "\n");
+ /* stop statistics polling */
+ if (!dev->dont_poll)
+ cancel_delayed_work_sync(&dev->stat_work);
+
regmap_exit(dev->regmap);
kfree(dev);
return 0;
diff --git a/drivers/media/dvb-frontends/ves1820.c b/drivers/media/dvb-frontends/ves1820.c
index aacfdda3e005..b09fe88c40f8 100644
--- a/drivers/media/dvb-frontends/ves1820.c
+++ b/drivers/media/dvb-frontends/ves1820.c
@@ -312,9 +312,9 @@ static int ves1820_read_ucblocks(struct dvb_frontend* fe, u32* ucblocks)
return 0;
}
-static int ves1820_get_frontend(struct dvb_frontend *fe)
+static int ves1820_get_frontend(struct dvb_frontend *fe,
+ struct dtv_frontend_properties *p)
{
- struct dtv_frontend_properties *p = &fe->dtv_property_cache;
struct ves1820_state* state = fe->demodulator_priv;
int sync;
s8 afc = 0;
diff --git a/drivers/media/dvb-frontends/ves1x93.c b/drivers/media/dvb-frontends/ves1x93.c
index 526952396422..ed113e216e14 100644
--- a/drivers/media/dvb-frontends/ves1x93.c
+++ b/drivers/media/dvb-frontends/ves1x93.c
@@ -406,9 +406,9 @@ static int ves1x93_set_frontend(struct dvb_frontend *fe)
return 0;
}
-static int ves1x93_get_frontend(struct dvb_frontend *fe)
+static int ves1x93_get_frontend(struct dvb_frontend *fe,
+ struct dtv_frontend_properties *p)
{
- struct dtv_frontend_properties *p = &fe->dtv_property_cache;
struct ves1x93_state* state = fe->demodulator_priv;
int afc;
diff --git a/drivers/media/dvb-frontends/zl10353.c b/drivers/media/dvb-frontends/zl10353.c
index ef9764a02d4c..1832c2f7695c 100644
--- a/drivers/media/dvb-frontends/zl10353.c
+++ b/drivers/media/dvb-frontends/zl10353.c
@@ -371,9 +371,9 @@ static int zl10353_set_parameters(struct dvb_frontend *fe)
return 0;
}
-static int zl10353_get_parameters(struct dvb_frontend *fe)
+static int zl10353_get_parameters(struct dvb_frontend *fe,
+ struct dtv_frontend_properties *c)
{
- struct dtv_frontend_properties *c = &fe->dtv_property_cache;
struct zl10353_state *state = fe->demodulator_priv;
int s6, s9;
u16 tps;
diff --git a/drivers/media/i2c/adv7511.c b/drivers/media/i2c/adv7511.c
index 471fd23b5c5c..bd822f032b08 100644
--- a/drivers/media/i2c/adv7511.c
+++ b/drivers/media/i2c/adv7511.c
@@ -103,12 +103,14 @@ struct adv7511_state {
u32 ycbcr_enc;
u32 quantization;
u32 xfer_func;
+ u32 content_type;
/* controls */
struct v4l2_ctrl *hdmi_mode_ctrl;
struct v4l2_ctrl *hotplug_ctrl;
struct v4l2_ctrl *rx_sense_ctrl;
struct v4l2_ctrl *have_edid0_ctrl;
struct v4l2_ctrl *rgb_quantization_range_ctrl;
+ struct v4l2_ctrl *content_type_ctrl;
struct i2c_client *i2c_edid;
struct i2c_client *i2c_pktmem;
struct adv7511_state_edid edid;
@@ -400,6 +402,16 @@ static int adv7511_s_ctrl(struct v4l2_ctrl *ctrl)
}
if (state->rgb_quantization_range_ctrl == ctrl)
return adv7511_set_rgb_quantization_mode(sd, ctrl);
+ if (state->content_type_ctrl == ctrl) {
+ u8 itc, cn;
+
+ state->content_type = ctrl->val;
+ itc = state->content_type != V4L2_DV_IT_CONTENT_TYPE_NO_ITC;
+ cn = itc ? state->content_type : V4L2_DV_IT_CONTENT_TYPE_GRAPHICS;
+ adv7511_wr_and_or(sd, 0x57, 0x7f, itc << 7);
+ adv7511_wr_and_or(sd, 0x59, 0xcf, cn << 4);
+ return 0;
+ }
return -EINVAL;
}
@@ -1002,6 +1014,8 @@ static int adv7511_set_fmt(struct v4l2_subdev *sd,
u8 y = HDMI_COLORSPACE_RGB;
u8 q = HDMI_QUANTIZATION_RANGE_DEFAULT;
u8 yq = HDMI_YCC_QUANTIZATION_RANGE_LIMITED;
+ u8 itc = state->content_type != V4L2_DV_IT_CONTENT_TYPE_NO_ITC;
+ u8 cn = itc ? state->content_type : V4L2_DV_IT_CONTENT_TYPE_GRAPHICS;
if (format->pad != 0)
return -EINVAL;
@@ -1115,8 +1129,8 @@ static int adv7511_set_fmt(struct v4l2_subdev *sd,
adv7511_wr_and_or(sd, 0x4a, 0xbf, 0);
adv7511_wr_and_or(sd, 0x55, 0x9f, y << 5);
adv7511_wr_and_or(sd, 0x56, 0x3f, c << 6);
- adv7511_wr_and_or(sd, 0x57, 0x83, (ec << 4) | (q << 2));
- adv7511_wr_and_or(sd, 0x59, 0x3f, yq << 6);
+ adv7511_wr_and_or(sd, 0x57, 0x83, (ec << 4) | (q << 2) | (itc << 7));
+ adv7511_wr_and_or(sd, 0x59, 0x0f, (yq << 6) | (cn << 4));
adv7511_wr_and_or(sd, 0x4a, 0xff, 1);
return 0;
@@ -1161,12 +1175,23 @@ static void adv7511_dbg_dump_edid(int lvl, int debug, struct v4l2_subdev *sd, in
}
}
+static void adv7511_notify_no_edid(struct v4l2_subdev *sd)
+{
+ struct adv7511_state *state = get_adv7511_state(sd);
+ struct adv7511_edid_detect ed;
+
+ /* We failed to read the EDID, so send an event for this. */
+ ed.present = false;
+ ed.segment = adv7511_rd(sd, 0xc4);
+ v4l2_subdev_notify(sd, ADV7511_EDID_DETECT, (void *)&ed);
+ v4l2_ctrl_s_ctrl(state->have_edid0_ctrl, 0x0);
+}
+
static void adv7511_edid_handler(struct work_struct *work)
{
struct delayed_work *dwork = to_delayed_work(work);
struct adv7511_state *state = container_of(dwork, struct adv7511_state, edid_handler);
struct v4l2_subdev *sd = &state->sd;
- struct adv7511_edid_detect ed;
v4l2_dbg(1, debug, sd, "%s:\n", __func__);
@@ -1191,9 +1216,7 @@ static void adv7511_edid_handler(struct work_struct *work)
}
/* We failed to read the EDID, so send an event for this. */
- ed.present = false;
- ed.segment = adv7511_rd(sd, 0xc4);
- v4l2_subdev_notify(sd, ADV7511_EDID_DETECT, (void *)&ed);
+ adv7511_notify_no_edid(sd);
v4l2_dbg(1, debug, sd, "%s: no edid found\n", __func__);
}
@@ -1264,7 +1287,6 @@ static void adv7511_check_monitor_present_status(struct v4l2_subdev *sd)
/* update read only ctrls */
v4l2_ctrl_s_ctrl(state->hotplug_ctrl, adv7511_have_hotplug(sd) ? 0x1 : 0x0);
v4l2_ctrl_s_ctrl(state->rx_sense_ctrl, adv7511_have_rx_sense(sd) ? 0x1 : 0x0);
- v4l2_ctrl_s_ctrl(state->have_edid0_ctrl, state->edid.segments ? 0x1 : 0x0);
if ((status & MASK_ADV7511_HPD_DETECT) && ((status & MASK_ADV7511_MSEN_DETECT) || state->edid.segments)) {
v4l2_dbg(1, debug, sd, "%s: hotplug and (rx-sense or edid)\n", __func__);
@@ -1294,6 +1316,7 @@ static void adv7511_check_monitor_present_status(struct v4l2_subdev *sd)
}
adv7511_s_power(sd, false);
memset(&state->edid, 0, sizeof(struct adv7511_state_edid));
+ adv7511_notify_no_edid(sd);
}
}
@@ -1370,6 +1393,7 @@ static bool adv7511_check_edid_status(struct v4l2_subdev *sd)
}
/* one more segment read ok */
state->edid.segments = segment + 1;
+ v4l2_ctrl_s_ctrl(state->have_edid0_ctrl, 0x1);
if (((state->edid.data[0x7e] >> 1) + 1) > state->edid.segments) {
/* Request next EDID segment */
v4l2_dbg(1, debug, sd, "%s: request segment %d\n", __func__, state->edid.segments);
@@ -1389,7 +1413,6 @@ static bool adv7511_check_edid_status(struct v4l2_subdev *sd)
ed.present = true;
ed.segment = 0;
state->edid_detect_counter++;
- v4l2_ctrl_s_ctrl(state->have_edid0_ctrl, state->edid.segments ? 0x1 : 0x0);
v4l2_subdev_notify(sd, ADV7511_EDID_DETECT, (void *)&ed);
return ed.present;
}
@@ -1470,6 +1493,10 @@ static int adv7511_probe(struct i2c_client *client, const struct i2c_device_id *
v4l2_ctrl_new_std_menu(hdl, &adv7511_ctrl_ops,
V4L2_CID_DV_TX_RGB_RANGE, V4L2_DV_RGB_RANGE_FULL,
0, V4L2_DV_RGB_RANGE_AUTO);
+ state->content_type_ctrl =
+ v4l2_ctrl_new_std_menu(hdl, &adv7511_ctrl_ops,
+ V4L2_CID_DV_TX_IT_CONTENT_TYPE, V4L2_DV_IT_CONTENT_TYPE_NO_ITC,
+ 0, V4L2_DV_IT_CONTENT_TYPE_NO_ITC);
sd->ctrl_handler = hdl;
if (hdl->error) {
err = hdl->error;
diff --git a/drivers/media/i2c/adv7604.c b/drivers/media/i2c/adv7604.c
index e1719ffdfb3d..41a1bfc5eaa7 100644
--- a/drivers/media/i2c/adv7604.c
+++ b/drivers/media/i2c/adv7604.c
@@ -207,71 +207,22 @@ static bool adv76xx_has_afe(struct adv76xx_state *state)
return state->info->has_afe;
}
-/* Supported CEA and DMT timings */
-static const struct v4l2_dv_timings adv76xx_timings[] = {
- V4L2_DV_BT_CEA_720X480P59_94,
- V4L2_DV_BT_CEA_720X576P50,
- V4L2_DV_BT_CEA_1280X720P24,
- V4L2_DV_BT_CEA_1280X720P25,
- V4L2_DV_BT_CEA_1280X720P50,
- V4L2_DV_BT_CEA_1280X720P60,
- V4L2_DV_BT_CEA_1920X1080P24,
- V4L2_DV_BT_CEA_1920X1080P25,
- V4L2_DV_BT_CEA_1920X1080P30,
- V4L2_DV_BT_CEA_1920X1080P50,
- V4L2_DV_BT_CEA_1920X1080P60,
-
- /* sorted by DMT ID */
- V4L2_DV_BT_DMT_640X350P85,
- V4L2_DV_BT_DMT_640X400P85,
- V4L2_DV_BT_DMT_720X400P85,
- V4L2_DV_BT_DMT_640X480P60,
- V4L2_DV_BT_DMT_640X480P72,
- V4L2_DV_BT_DMT_640X480P75,
- V4L2_DV_BT_DMT_640X480P85,
- V4L2_DV_BT_DMT_800X600P56,
- V4L2_DV_BT_DMT_800X600P60,
- V4L2_DV_BT_DMT_800X600P72,
- V4L2_DV_BT_DMT_800X600P75,
- V4L2_DV_BT_DMT_800X600P85,
- V4L2_DV_BT_DMT_848X480P60,
- V4L2_DV_BT_DMT_1024X768P60,
- V4L2_DV_BT_DMT_1024X768P70,
- V4L2_DV_BT_DMT_1024X768P75,
- V4L2_DV_BT_DMT_1024X768P85,
- V4L2_DV_BT_DMT_1152X864P75,
- V4L2_DV_BT_DMT_1280X768P60_RB,
- V4L2_DV_BT_DMT_1280X768P60,
- V4L2_DV_BT_DMT_1280X768P75,
- V4L2_DV_BT_DMT_1280X768P85,
- V4L2_DV_BT_DMT_1280X800P60_RB,
- V4L2_DV_BT_DMT_1280X800P60,
- V4L2_DV_BT_DMT_1280X800P75,
- V4L2_DV_BT_DMT_1280X800P85,
- V4L2_DV_BT_DMT_1280X960P60,
- V4L2_DV_BT_DMT_1280X960P85,
- V4L2_DV_BT_DMT_1280X1024P60,
- V4L2_DV_BT_DMT_1280X1024P75,
- V4L2_DV_BT_DMT_1280X1024P85,
- V4L2_DV_BT_DMT_1360X768P60,
- V4L2_DV_BT_DMT_1400X1050P60_RB,
- V4L2_DV_BT_DMT_1400X1050P60,
- V4L2_DV_BT_DMT_1400X1050P75,
- V4L2_DV_BT_DMT_1400X1050P85,
- V4L2_DV_BT_DMT_1440X900P60_RB,
- V4L2_DV_BT_DMT_1440X900P60,
- V4L2_DV_BT_DMT_1600X1200P60,
- V4L2_DV_BT_DMT_1680X1050P60_RB,
- V4L2_DV_BT_DMT_1680X1050P60,
- V4L2_DV_BT_DMT_1792X1344P60,
- V4L2_DV_BT_DMT_1856X1392P60,
- V4L2_DV_BT_DMT_1920X1200P60_RB,
- V4L2_DV_BT_DMT_1366X768P60_RB,
- V4L2_DV_BT_DMT_1366X768P60,
- V4L2_DV_BT_DMT_1920X1080P60,
- { },
+/* Unsupported timings. This device cannot support 720p30. */
+static const struct v4l2_dv_timings adv76xx_timings_exceptions[] = {
+ V4L2_DV_BT_CEA_1280X720P30,
+ { }
};
+static bool adv76xx_check_dv_timings(const struct v4l2_dv_timings *t, void *hdl)
+{
+ int i;
+
+ for (i = 0; adv76xx_timings_exceptions[i].bt.width; i++)
+ if (v4l2_match_dv_timings(t, adv76xx_timings_exceptions + i, 0, false))
+ return false;
+ return true;
+}
+
struct adv76xx_video_standards {
struct v4l2_dv_timings timings;
u8 vid_std;
@@ -806,6 +757,36 @@ static inline bool is_digital_input(struct v4l2_subdev *sd)
state->selected_input == ADV7604_PAD_HDMI_PORT_D;
}
+static const struct v4l2_dv_timings_cap adv7604_timings_cap_analog = {
+ .type = V4L2_DV_BT_656_1120,
+ /* keep this initialization for compatibility with GCC < 4.4.6 */
+ .reserved = { 0 },
+ V4L2_INIT_BT_TIMINGS(0, 1920, 0, 1200, 25000000, 170000000,
+ V4L2_DV_BT_STD_CEA861 | V4L2_DV_BT_STD_DMT |
+ V4L2_DV_BT_STD_GTF | V4L2_DV_BT_STD_CVT,
+ V4L2_DV_BT_CAP_PROGRESSIVE | V4L2_DV_BT_CAP_REDUCED_BLANKING |
+ V4L2_DV_BT_CAP_CUSTOM)
+};
+
+static const struct v4l2_dv_timings_cap adv76xx_timings_cap_digital = {
+ .type = V4L2_DV_BT_656_1120,
+ /* keep this initialization for compatibility with GCC < 4.4.6 */
+ .reserved = { 0 },
+ V4L2_INIT_BT_TIMINGS(0, 1920, 0, 1200, 25000000, 225000000,
+ V4L2_DV_BT_STD_CEA861 | V4L2_DV_BT_STD_DMT |
+ V4L2_DV_BT_STD_GTF | V4L2_DV_BT_STD_CVT,
+ V4L2_DV_BT_CAP_PROGRESSIVE | V4L2_DV_BT_CAP_REDUCED_BLANKING |
+ V4L2_DV_BT_CAP_CUSTOM)
+};
+
+static inline const struct v4l2_dv_timings_cap *
+adv76xx_get_dv_timings_cap(struct v4l2_subdev *sd)
+{
+ return is_digital_input(sd) ? &adv76xx_timings_cap_digital :
+ &adv7604_timings_cap_analog;
+}
+
+
/* ----------------------------------------------------------------------- */
#ifdef CONFIG_VIDEO_ADV_DEBUG
@@ -1216,6 +1197,20 @@ static int adv76xx_s_ctrl(struct v4l2_ctrl *ctrl)
return -EINVAL;
}
+static int adv76xx_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct v4l2_subdev *sd =
+ &container_of(ctrl->handler, struct adv76xx_state, hdl)->sd;
+
+ if (ctrl->id == V4L2_CID_DV_RX_IT_CONTENT_TYPE) {
+ ctrl->val = V4L2_DV_IT_CONTENT_TYPE_NO_ITC;
+ if ((io_read(sd, 0x60) & 1) && (infoframe_read(sd, 0x03) & 0x80))
+ ctrl->val = (infoframe_read(sd, 0x05) >> 4) & 3;
+ return 0;
+ }
+ return -EINVAL;
+}
+
/* ----------------------------------------------------------------------- */
static inline bool no_power(struct v4l2_subdev *sd)
@@ -1330,17 +1325,23 @@ static int stdi2dv_timings(struct v4l2_subdev *sd,
u32 pix_clk;
int i;
- for (i = 0; adv76xx_timings[i].bt.height; i++) {
- if (vtotal(&adv76xx_timings[i].bt) != stdi->lcf + 1)
+ for (i = 0; v4l2_dv_timings_presets[i].bt.width; i++) {
+ const struct v4l2_bt_timings *bt = &v4l2_dv_timings_presets[i].bt;
+
+ if (!v4l2_valid_dv_timings(&v4l2_dv_timings_presets[i],
+ adv76xx_get_dv_timings_cap(sd),
+ adv76xx_check_dv_timings, NULL))
+ continue;
+ if (vtotal(bt) != stdi->lcf + 1)
continue;
- if (adv76xx_timings[i].bt.vsync != stdi->lcvs)
+ if (bt->vsync != stdi->lcvs)
continue;
- pix_clk = hfreq * htotal(&adv76xx_timings[i].bt);
+ pix_clk = hfreq * htotal(bt);
- if ((pix_clk < adv76xx_timings[i].bt.pixelclock + 1000000) &&
- (pix_clk > adv76xx_timings[i].bt.pixelclock - 1000000)) {
- *timings = adv76xx_timings[i];
+ if ((pix_clk < bt->pixelclock + 1000000) &&
+ (pix_clk > bt->pixelclock - 1000000)) {
+ *timings = v4l2_dv_timings_presets[i];
return 0;
}
}
@@ -1425,15 +1426,11 @@ static int adv76xx_enum_dv_timings(struct v4l2_subdev *sd,
{
struct adv76xx_state *state = to_state(sd);
- if (timings->index >= ARRAY_SIZE(adv76xx_timings) - 1)
- return -EINVAL;
-
if (timings->pad >= state->source_pad)
return -EINVAL;
- memset(timings->reserved, 0, sizeof(timings->reserved));
- timings->timings = adv76xx_timings[timings->index];
- return 0;
+ return v4l2_enum_dv_timings_cap(timings,
+ adv76xx_get_dv_timings_cap(sd), adv76xx_check_dv_timings, NULL);
}
static int adv76xx_dv_timings_cap(struct v4l2_subdev *sd,
@@ -1444,29 +1441,7 @@ static int adv76xx_dv_timings_cap(struct v4l2_subdev *sd,
if (cap->pad >= state->source_pad)
return -EINVAL;
- cap->type = V4L2_DV_BT_656_1120;
- cap->bt.max_width = 1920;
- cap->bt.max_height = 1200;
- cap->bt.min_pixelclock = 25000000;
-
- switch (cap->pad) {
- case ADV76XX_PAD_HDMI_PORT_A:
- case ADV7604_PAD_HDMI_PORT_B:
- case ADV7604_PAD_HDMI_PORT_C:
- case ADV7604_PAD_HDMI_PORT_D:
- cap->bt.max_pixelclock = 225000000;
- break;
- case ADV7604_PAD_VGA_RGB:
- case ADV7604_PAD_VGA_COMP:
- default:
- cap->bt.max_pixelclock = 170000000;
- break;
- }
-
- cap->bt.standards = V4L2_DV_BT_STD_CEA861 | V4L2_DV_BT_STD_DMT |
- V4L2_DV_BT_STD_GTF | V4L2_DV_BT_STD_CVT;
- cap->bt.capabilities = V4L2_DV_BT_CAP_PROGRESSIVE |
- V4L2_DV_BT_CAP_REDUCED_BLANKING | V4L2_DV_BT_CAP_CUSTOM;
+ *cap = *adv76xx_get_dv_timings_cap(sd);
return 0;
}
@@ -1475,15 +1450,9 @@ static int adv76xx_dv_timings_cap(struct v4l2_subdev *sd,
static void adv76xx_fill_optional_dv_timings_fields(struct v4l2_subdev *sd,
struct v4l2_dv_timings *timings)
{
- int i;
-
- for (i = 0; adv76xx_timings[i].bt.width; i++) {
- if (v4l2_match_dv_timings(timings, &adv76xx_timings[i],
- is_digital_input(sd) ? 250000 : 1000000, false)) {
- *timings = adv76xx_timings[i];
- break;
- }
- }
+ v4l2_find_dv_timings_cap(timings, adv76xx_get_dv_timings_cap(sd),
+ is_digital_input(sd) ? 250000 : 1000000,
+ adv76xx_check_dv_timings, NULL);
}
static unsigned int adv7604_read_hdmi_pixelclock(struct v4l2_subdev *sd)
@@ -1651,12 +1620,9 @@ static int adv76xx_s_dv_timings(struct v4l2_subdev *sd,
bt = &timings->bt;
- if ((is_analog_input(sd) && bt->pixelclock > 170000000) ||
- (is_digital_input(sd) && bt->pixelclock > 225000000)) {
- v4l2_dbg(1, debug, sd, "%s: pixelclock out of range %d\n",
- __func__, (u32)bt->pixelclock);
+ if (!v4l2_valid_dv_timings(timings, adv76xx_get_dv_timings_cap(sd),
+ adv76xx_check_dv_timings, NULL))
return -ERANGE;
- }
adv76xx_fill_optional_dv_timings_fields(sd, timings);
@@ -1884,6 +1850,26 @@ static int adv76xx_get_format(struct v4l2_subdev *sd,
return 0;
}
+static int adv76xx_get_selection(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_selection *sel)
+{
+ struct adv76xx_state *state = to_state(sd);
+
+ if (sel->which != V4L2_SUBDEV_FORMAT_ACTIVE)
+ return -EINVAL;
+ /* Only CROP, CROP_DEFAULT and CROP_BOUNDS are supported */
+ if (sel->target > V4L2_SEL_TGT_CROP_BOUNDS)
+ return -EINVAL;
+
+ sel->r.left = 0;
+ sel->r.top = 0;
+ sel->r.width = state->timings.bt.width;
+ sel->r.height = state->timings.bt.height;
+
+ return 0;
+}
+
static int adv76xx_set_format(struct v4l2_subdev *sd,
struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_format *format)
@@ -2109,7 +2095,8 @@ static int adv76xx_set_edid(struct v4l2_subdev *sd, struct v4l2_edid *edid)
rep_write(sd, 0x76, spa_loc & 0xff);
rep_write_clr_set(sd, 0x77, 0x40, (spa_loc & 0x100) >> 2);
} else {
- /* FIXME: Where is the SPA location LSB register ? */
+ /* ADV7612 Software Manual Rev. A, p. 15 */
+ rep_write(sd, 0x70, spa_loc & 0xff);
rep_write_clr_set(sd, 0x71, 0x01, (spa_loc & 0x100) >> 8);
}
@@ -2380,6 +2367,7 @@ static int adv76xx_subscribe_event(struct v4l2_subdev *sd,
static const struct v4l2_ctrl_ops adv76xx_ctrl_ops = {
.s_ctrl = adv76xx_s_ctrl,
+ .g_volatile_ctrl = adv76xx_g_volatile_ctrl,
};
static const struct v4l2_subdev_core_ops adv76xx_core_ops = {
@@ -2403,6 +2391,7 @@ static const struct v4l2_subdev_video_ops adv76xx_video_ops = {
static const struct v4l2_subdev_pad_ops adv76xx_pad_ops = {
.enum_mbus_code = adv76xx_enum_mbus_code,
+ .get_selection = adv76xx_get_selection,
.get_fmt = adv76xx_get_format,
.set_fmt = adv76xx_set_format,
.get_edid = adv76xx_get_edid,
@@ -2798,6 +2787,7 @@ static int adv76xx_parse_dt(struct adv76xx_state *state)
struct device_node *endpoint;
struct device_node *np;
unsigned int flags;
+ int ret;
u32 v;
np = state->i2c_clients[ADV76XX_PAGE_IO]->dev.of_node;
@@ -2807,7 +2797,11 @@ static int adv76xx_parse_dt(struct adv76xx_state *state)
if (!endpoint)
return -EINVAL;
- v4l2_of_parse_endpoint(endpoint, &bus_cfg);
+ ret = v4l2_of_parse_endpoint(endpoint, &bus_cfg);
+ if (ret) {
+ of_node_put(endpoint);
+ return ret;
+ }
if (!of_property_read_u32(endpoint, "default-input", &v))
state->pdata.default_input = v;
@@ -3009,6 +3003,7 @@ static int adv76xx_probe(struct i2c_client *client,
V4L2_DV_BT_CEA_640X480P59_94;
struct adv76xx_state *state;
struct v4l2_ctrl_handler *hdl;
+ struct v4l2_ctrl *ctrl;
struct v4l2_subdev *sd;
unsigned int i;
unsigned int val, val2;
@@ -3140,6 +3135,11 @@ static int adv76xx_probe(struct i2c_client *client,
V4L2_CID_SATURATION, 0, 255, 1, 128);
v4l2_ctrl_new_std(hdl, &adv76xx_ctrl_ops,
V4L2_CID_HUE, 0, 128, 1, 0);
+ ctrl = v4l2_ctrl_new_std_menu(hdl, &adv76xx_ctrl_ops,
+ V4L2_CID_DV_RX_IT_CONTENT_TYPE, V4L2_DV_IT_CONTENT_TYPE_NO_ITC,
+ 0, V4L2_DV_IT_CONTENT_TYPE_NO_ITC);
+ if (ctrl)
+ ctrl->flags |= V4L2_CTRL_FLAG_VOLATILE;
/* private controls */
state->detect_tx_5v_ctrl = v4l2_ctrl_new_std(hdl, NULL,
diff --git a/drivers/media/i2c/adv7842.c b/drivers/media/i2c/adv7842.c
index 5fbb788e7b59..7ccb85d45224 100644
--- a/drivers/media/i2c/adv7842.c
+++ b/drivers/media/i2c/adv7842.c
@@ -1359,6 +1359,19 @@ static int adv7842_s_ctrl(struct v4l2_ctrl *ctrl)
return -EINVAL;
}
+static int adv7842_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct v4l2_subdev *sd = to_sd(ctrl);
+
+ if (ctrl->id == V4L2_CID_DV_RX_IT_CONTENT_TYPE) {
+ ctrl->val = V4L2_DV_IT_CONTENT_TYPE_NO_ITC;
+ if ((io_read(sd, 0x60) & 1) && (infoframe_read(sd, 0x03) & 0x80))
+ ctrl->val = (infoframe_read(sd, 0x05) >> 4) & 3;
+ return 0;
+ }
+ return -EINVAL;
+}
+
static inline bool no_power(struct v4l2_subdev *sd)
{
return io_read(sd, 0x0c) & 0x24;
@@ -3022,6 +3035,7 @@ static int adv7842_subscribe_event(struct v4l2_subdev *sd,
static const struct v4l2_ctrl_ops adv7842_ctrl_ops = {
.s_ctrl = adv7842_s_ctrl,
+ .g_volatile_ctrl = adv7842_g_volatile_ctrl,
};
static const struct v4l2_subdev_core_ops adv7842_core_ops = {
@@ -3196,6 +3210,7 @@ static int adv7842_probe(struct i2c_client *client,
V4L2_DV_BT_CEA_640X480P59_94;
struct adv7842_platform_data *pdata = client->dev.platform_data;
struct v4l2_ctrl_handler *hdl;
+ struct v4l2_ctrl *ctrl;
struct v4l2_subdev *sd;
u16 rev;
int err;
@@ -3261,6 +3276,11 @@ static int adv7842_probe(struct i2c_client *client,
V4L2_CID_SATURATION, 0, 255, 1, 128);
v4l2_ctrl_new_std(hdl, &adv7842_ctrl_ops,
V4L2_CID_HUE, 0, 128, 1, 0);
+ ctrl = v4l2_ctrl_new_std_menu(hdl, &adv7842_ctrl_ops,
+ V4L2_CID_DV_RX_IT_CONTENT_TYPE, V4L2_DV_IT_CONTENT_TYPE_NO_ITC,
+ 0, V4L2_DV_IT_CONTENT_TYPE_NO_ITC);
+ if (ctrl)
+ ctrl->flags |= V4L2_CTRL_FLAG_VOLATILE;
/* custom controls */
state->detect_tx_5v_ctrl = v4l2_ctrl_new_std(hdl, NULL,
diff --git a/drivers/media/i2c/msp3400-driver.c b/drivers/media/i2c/msp3400-driver.c
index a84561d0d4a8..e016626ebf89 100644
--- a/drivers/media/i2c/msp3400-driver.c
+++ b/drivers/media/i2c/msp3400-driver.c
@@ -688,6 +688,9 @@ static int msp_probe(struct i2c_client *client, const struct i2c_device_id *id)
int msp_revision;
int msp_product, msp_prod_hi, msp_prod_lo;
int msp_rom;
+#if defined(CONFIG_MEDIA_CONTROLLER)
+ int ret;
+#endif
if (!id)
strlcpy(client->name, "msp3400", sizeof(client->name));
@@ -704,6 +707,17 @@ static int msp_probe(struct i2c_client *client, const struct i2c_device_id *id)
sd = &state->sd;
v4l2_i2c_subdev_init(sd, client, &msp_ops);
+#if defined(CONFIG_MEDIA_CONTROLLER)
+ state->pads[IF_AUD_DEC_PAD_IF_INPUT].flags = MEDIA_PAD_FL_SINK;
+ state->pads[IF_AUD_DEC_PAD_OUT].flags = MEDIA_PAD_FL_SOURCE;
+
+ sd->entity.function = MEDIA_ENT_F_IF_AUD_DECODER;
+
+ ret = media_entity_pads_init(&sd->entity, 2, state->pads);
+ if (ret < 0)
+ return ret;
+#endif
+
state->v4l2_std = V4L2_STD_NTSC;
state->detected_std = V4L2_STD_ALL;
state->audmode = V4L2_TUNER_MODE_STEREO;
diff --git a/drivers/media/i2c/msp3400-driver.h b/drivers/media/i2c/msp3400-driver.h
index 6cae21366ed5..a8702aca187a 100644
--- a/drivers/media/i2c/msp3400-driver.h
+++ b/drivers/media/i2c/msp3400-driver.h
@@ -7,6 +7,7 @@
#include <media/drv-intf/msp3400.h>
#include <media/v4l2-device.h>
#include <media/v4l2-ctrls.h>
+#include <media/v4l2-mc.h>
/* ---------------------------------------------------------------------- */
@@ -102,6 +103,10 @@ struct msp_state {
wait_queue_head_t wq;
unsigned int restart:1;
unsigned int watch_stereo:1;
+
+#if IS_ENABLED(CONFIG_MEDIA_CONTROLLER)
+ struct media_pad pads[IF_AUD_DEC_PAD_NUM_PADS];
+#endif
};
static inline struct msp_state *to_state(struct v4l2_subdev *sd)
diff --git a/drivers/media/i2c/mt9v011.c b/drivers/media/i2c/mt9v011.c
index b9fea11d6b0b..9ed1b26b6549 100644
--- a/drivers/media/i2c/mt9v011.c
+++ b/drivers/media/i2c/mt9v011.c
@@ -50,6 +50,9 @@ MODULE_PARM_DESC(debug, "Debug level (0-2)");
struct mt9v011 {
struct v4l2_subdev sd;
+#ifdef CONFIG_MEDIA_CONTROLLER
+ struct media_pad pad;
+#endif
struct v4l2_ctrl_handler ctrls;
unsigned width, height;
unsigned xtal;
@@ -493,6 +496,9 @@ static int mt9v011_probe(struct i2c_client *c,
u16 version;
struct mt9v011 *core;
struct v4l2_subdev *sd;
+#ifdef CONFIG_MEDIA_CONTROLLER
+ int ret;
+#endif
/* Check if the adapter supports the needed features */
if (!i2c_check_functionality(c->adapter,
@@ -506,6 +512,15 @@ static int mt9v011_probe(struct i2c_client *c,
sd = &core->sd;
v4l2_i2c_subdev_init(sd, c, &mt9v011_ops);
+#ifdef CONFIG_MEDIA_CONTROLLER
+ core->pad.flags = MEDIA_PAD_FL_SOURCE;
+ sd->entity.function = MEDIA_ENT_F_CAM_SENSOR;
+
+ ret = media_entity_pads_init(&sd->entity, 1, &core->pad);
+ if (ret < 0)
+ return ret;
+#endif
+
/* Check if the sensor is really a MT9V011 */
version = mt9v011_read(sd, R00_MT9V011_CHIP_VERSION);
if ((version != MT9V011_VERSION) &&
diff --git a/drivers/media/i2c/mt9v032.c b/drivers/media/i2c/mt9v032.c
index 2e1d116a64e7..501b37039449 100644
--- a/drivers/media/i2c/mt9v032.c
+++ b/drivers/media/i2c/mt9v032.c
@@ -14,6 +14,7 @@
#include <linux/clk.h>
#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
#include <linux/i2c.h>
#include <linux/log2.h>
#include <linux/mutex.h>
@@ -251,6 +252,8 @@ struct mt9v032 {
struct regmap *regmap;
struct clk *clk;
+ struct gpio_desc *reset_gpio;
+ struct gpio_desc *standby_gpio;
struct mt9v032_platform_data *pdata;
const struct mt9v032_model_info *model;
@@ -312,16 +315,31 @@ static int mt9v032_power_on(struct mt9v032 *mt9v032)
struct regmap *map = mt9v032->regmap;
int ret;
+ if (mt9v032->reset_gpio)
+ gpiod_set_value_cansleep(mt9v032->reset_gpio, 1);
+
ret = clk_set_rate(mt9v032->clk, mt9v032->sysclk);
if (ret < 0)
return ret;
+ /* System clock has to be enabled before releasing the reset */
ret = clk_prepare_enable(mt9v032->clk);
if (ret)
return ret;
udelay(1);
+ if (mt9v032->reset_gpio) {
+ gpiod_set_value_cansleep(mt9v032->reset_gpio, 0);
+
+ /* After releasing reset we need to wait 10 clock cycles
+ * before accessing the sensor over I2C. As the minimum SYSCLK
+ * frequency is 13MHz, waiting 1µs will be enough in the worst
+ * case.
+ */
+ udelay(1);
+ }
+
/* Reset the chip and stop data read out */
ret = regmap_write(map, MT9V032_RESET, 1);
if (ret < 0)
@@ -954,6 +972,16 @@ static int mt9v032_probe(struct i2c_client *client,
if (IS_ERR(mt9v032->clk))
return PTR_ERR(mt9v032->clk);
+ mt9v032->reset_gpio = devm_gpiod_get_optional(&client->dev, "reset",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(mt9v032->reset_gpio))
+ return PTR_ERR(mt9v032->reset_gpio);
+
+ mt9v032->standby_gpio = devm_gpiod_get_optional(&client->dev, "standby",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(mt9v032->standby_gpio))
+ return PTR_ERR(mt9v032->standby_gpio);
+
mutex_init(&mt9v032->power_lock);
mt9v032->pdata = pdata;
mt9v032->model = (const void *)did->driver_data;
diff --git a/drivers/media/i2c/ov2659.c b/drivers/media/i2c/ov2659.c
index 02b9a3440557..1f999e9c0118 100644
--- a/drivers/media/i2c/ov2659.c
+++ b/drivers/media/i2c/ov2659.c
@@ -1321,10 +1321,6 @@ static int ov2659_detect(struct v4l2_subdev *sd)
}
usleep_range(1000, 2000);
- ret = ov2659_init(sd, 0);
- if (ret < 0)
- return ret;
-
/* Check sensor revision */
ret = ov2659_read(client, REG_SC_CHIP_ID_H, &pid);
if (!ret)
@@ -1338,8 +1334,10 @@ static int ov2659_detect(struct v4l2_subdev *sd)
dev_err(&client->dev,
"Sensor detection failed (%04X, %d)\n",
id, ret);
- else
+ else {
dev_info(&client->dev, "Found OV%04X sensor\n", id);
+ ret = ov2659_init(sd, 0);
+ }
}
return ret;
diff --git a/drivers/media/i2c/ov9650.c b/drivers/media/i2c/ov9650.c
index a0b3c9bde53d..be5a7fd4f076 100644
--- a/drivers/media/i2c/ov9650.c
+++ b/drivers/media/i2c/ov9650.c
@@ -1046,8 +1046,8 @@ static int ov965x_initialize_controls(struct ov965x *ov965x)
ctrls->exposure->flags |= V4L2_CTRL_FLAG_VOLATILE;
v4l2_ctrl_auto_cluster(3, &ctrls->auto_wb, 0, false);
- v4l2_ctrl_auto_cluster(3, &ctrls->auto_gain, 0, true);
- v4l2_ctrl_auto_cluster(3, &ctrls->auto_exp, 1, true);
+ v4l2_ctrl_auto_cluster(2, &ctrls->auto_gain, 0, true);
+ v4l2_ctrl_auto_cluster(2, &ctrls->auto_exp, 1, true);
v4l2_ctrl_cluster(2, &ctrls->hflip);
ov965x->sd.ctrl_handler = hdl;
diff --git a/drivers/media/i2c/s5c73m3/s5c73m3-core.c b/drivers/media/i2c/s5c73m3/s5c73m3-core.c
index 57b3d27993a4..08af58fb8e7d 100644
--- a/drivers/media/i2c/s5c73m3/s5c73m3-core.c
+++ b/drivers/media/i2c/s5c73m3/s5c73m3-core.c
@@ -1639,8 +1639,10 @@ static int s5c73m3_get_platform_data(struct s5c73m3 *state)
return 0;
}
- v4l2_of_parse_endpoint(node_ep, &ep);
+ ret = v4l2_of_parse_endpoint(node_ep, &ep);
of_node_put(node_ep);
+ if (ret)
+ return ret;
if (ep.bus_type != V4L2_MBUS_CSI2) {
dev_err(dev, "unsupported bus type\n");
diff --git a/drivers/media/i2c/s5c73m3/s5c73m3-spi.c b/drivers/media/i2c/s5c73m3/s5c73m3-spi.c
index 7d65b36434b1..72ef9f936e6c 100644
--- a/drivers/media/i2c/s5c73m3/s5c73m3-spi.c
+++ b/drivers/media/i2c/s5c73m3/s5c73m3-spi.c
@@ -37,7 +37,6 @@ enum spi_direction {
SPI_DIR_RX,
SPI_DIR_TX
};
-MODULE_DEVICE_TABLE(of, s5c73m3_spi_ids);
static int spi_xmit(struct spi_device *spi_dev, void *addr, const int len,
enum spi_direction dir)
diff --git a/drivers/media/i2c/s5k5baf.c b/drivers/media/i2c/s5k5baf.c
index fc3a5a8e6c9c..db82ed05792e 100644
--- a/drivers/media/i2c/s5k5baf.c
+++ b/drivers/media/i2c/s5k5baf.c
@@ -1868,8 +1868,11 @@ static int s5k5baf_parse_device_node(struct s5k5baf *state, struct device *dev)
return -EINVAL;
}
- v4l2_of_parse_endpoint(node_ep, &ep);
+ ret = v4l2_of_parse_endpoint(node_ep, &ep);
of_node_put(node_ep);
+ if (ret)
+ return ret;
+
state->bus_type = ep.bus_type;
switch (state->bus_type) {
diff --git a/drivers/media/i2c/saa7115.c b/drivers/media/i2c/saa7115.c
index 24d2b76dbe97..d2a1ce2bc7f5 100644
--- a/drivers/media/i2c/saa7115.c
+++ b/drivers/media/i2c/saa7115.c
@@ -46,6 +46,7 @@
#include <linux/videodev2.h>
#include <media/v4l2-device.h>
#include <media/v4l2-ctrls.h>
+#include <media/v4l2-mc.h>
#include <media/i2c/saa7115.h>
#include <asm/div64.h>
@@ -74,6 +75,9 @@ enum saa711x_model {
struct saa711x_state {
struct v4l2_subdev sd;
+#ifdef CONFIG_MEDIA_CONTROLLER
+ struct media_pad pads[DEMOD_NUM_PADS];
+#endif
struct v4l2_ctrl_handler hdl;
struct {
@@ -1809,6 +1813,9 @@ static int saa711x_probe(struct i2c_client *client,
struct saa7115_platform_data *pdata;
int ident;
char name[CHIP_VER_SIZE + 1];
+#if defined(CONFIG_MEDIA_CONTROLLER)
+ int ret;
+#endif
/* Check if the adapter supports the needed features */
if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA))
@@ -1832,6 +1839,18 @@ static int saa711x_probe(struct i2c_client *client,
sd = &state->sd;
v4l2_i2c_subdev_init(sd, client, &saa711x_ops);
+#if defined(CONFIG_MEDIA_CONTROLLER)
+ state->pads[DEMOD_PAD_IF_INPUT].flags = MEDIA_PAD_FL_SINK;
+ state->pads[DEMOD_PAD_VID_OUT].flags = MEDIA_PAD_FL_SOURCE;
+ state->pads[DEMOD_PAD_VBI_OUT].flags = MEDIA_PAD_FL_SOURCE;
+
+ sd->entity.function = MEDIA_ENT_F_ATV_DECODER;
+
+ ret = media_entity_pads_init(&sd->entity, DEMOD_NUM_PADS, state->pads);
+ if (ret < 0)
+ return ret;
+#endif
+
v4l_info(client, "%s found @ 0x%x (%s)\n", name,
client->addr << 1, client->adapter->name);
hdl = &state->hdl;
diff --git a/drivers/media/i2c/soc_camera/mt9m001.c b/drivers/media/i2c/soc_camera/mt9m001.c
index 2e14e52ba2e0..69becc358659 100644
--- a/drivers/media/i2c/soc_camera/mt9m001.c
+++ b/drivers/media/i2c/soc_camera/mt9m001.c
@@ -632,7 +632,7 @@ static struct v4l2_subdev_video_ops mt9m001_subdev_video_ops = {
.s_mbus_config = mt9m001_s_mbus_config,
};
-static struct v4l2_subdev_sensor_ops mt9m001_subdev_sensor_ops = {
+static const struct v4l2_subdev_sensor_ops mt9m001_subdev_sensor_ops = {
.g_skip_top_lines = mt9m001_g_skip_top_lines,
};
diff --git a/drivers/media/i2c/soc_camera/mt9t031.c b/drivers/media/i2c/soc_camera/mt9t031.c
index 3b6eeed2e2b9..5c8e3ffe3b27 100644
--- a/drivers/media/i2c/soc_camera/mt9t031.c
+++ b/drivers/media/i2c/soc_camera/mt9t031.c
@@ -728,7 +728,7 @@ static struct v4l2_subdev_video_ops mt9t031_subdev_video_ops = {
.s_mbus_config = mt9t031_s_mbus_config,
};
-static struct v4l2_subdev_sensor_ops mt9t031_subdev_sensor_ops = {
+static const struct v4l2_subdev_sensor_ops mt9t031_subdev_sensor_ops = {
.g_skip_top_lines = mt9t031_g_skip_top_lines,
};
diff --git a/drivers/media/i2c/soc_camera/mt9v022.c b/drivers/media/i2c/soc_camera/mt9v022.c
index c2ba1fb3694d..2721e583bfa0 100644
--- a/drivers/media/i2c/soc_camera/mt9v022.c
+++ b/drivers/media/i2c/soc_camera/mt9v022.c
@@ -860,7 +860,7 @@ static struct v4l2_subdev_video_ops mt9v022_subdev_video_ops = {
.s_mbus_config = mt9v022_s_mbus_config,
};
-static struct v4l2_subdev_sensor_ops mt9v022_subdev_sensor_ops = {
+static const struct v4l2_subdev_sensor_ops mt9v022_subdev_sensor_ops = {
.g_skip_top_lines = mt9v022_g_skip_top_lines,
};
diff --git a/drivers/media/i2c/tc358743.c b/drivers/media/i2c/tc358743.c
index 3397eb99c67b..972e0d47259d 100644
--- a/drivers/media/i2c/tc358743.c
+++ b/drivers/media/i2c/tc358743.c
@@ -59,8 +59,7 @@ MODULE_LICENSE("GPL");
#define EDID_NUM_BLOCKS_MAX 8
#define EDID_BLOCK_SIZE 128
-/* Max transfer size done by I2C transfer functions */
-#define MAX_XFER_SIZE (EDID_NUM_BLOCKS_MAX * EDID_BLOCK_SIZE + 2)
+#define I2C_MAX_XFER_SIZE (EDID_BLOCK_SIZE + 2)
static const struct v4l2_dv_timings_cap tc358743_timings_cap = {
.type = V4L2_DV_BT_656_1120,
@@ -97,9 +96,6 @@ struct tc358743_state {
/* edid */
u8 edid_blocks_written;
- /* used by i2c_wr() */
- u8 wr_data[MAX_XFER_SIZE];
-
struct v4l2_dv_timings timings;
u32 mbus_fmt_code;
@@ -149,13 +145,15 @@ static void i2c_wr(struct v4l2_subdev *sd, u16 reg, u8 *values, u32 n)
{
struct tc358743_state *state = to_state(sd);
struct i2c_client *client = state->i2c_client;
- u8 *data = state->wr_data;
int err, i;
struct i2c_msg msg;
+ u8 data[I2C_MAX_XFER_SIZE];
- if ((2 + n) > sizeof(state->wr_data))
+ if ((2 + n) > I2C_MAX_XFER_SIZE) {
+ n = I2C_MAX_XFER_SIZE - 2;
v4l2_warn(sd, "i2c wr reg=%04x: len=%d is too big!\n",
reg, 2 + n);
+ }
msg.addr = client->addr;
msg.buf = data;
@@ -859,15 +857,16 @@ static void tc358743_format_change(struct v4l2_subdev *sd)
if (tc358743_get_detected_timings(sd, &timings)) {
enable_stream(sd, false);
- v4l2_dbg(1, debug, sd, "%s: Format changed. No signal\n",
+ v4l2_dbg(1, debug, sd, "%s: No signal\n",
__func__);
} else {
if (!v4l2_match_dv_timings(&state->timings, &timings, 0, false))
enable_stream(sd, false);
- v4l2_print_dv_timings(sd->name,
- "tc358743_format_change: Format changed. New format: ",
- &timings, false);
+ if (debug)
+ v4l2_print_dv_timings(sd->name,
+ "tc358743_format_change: New format: ",
+ &timings, false);
}
if (sd->devnode)
@@ -1199,21 +1198,21 @@ static int tc358743_log_status(struct v4l2_subdev *sd)
#ifdef CONFIG_VIDEO_ADV_DEBUG
static void tc358743_print_register_map(struct v4l2_subdev *sd)
{
- v4l2_info(sd, "0x0000–0x00FF: Global Control Register\n");
- v4l2_info(sd, "0x0100–0x01FF: CSI2-TX PHY Register\n");
- v4l2_info(sd, "0x0200–0x03FF: CSI2-TX PPI Register\n");
- v4l2_info(sd, "0x0400–0x05FF: Reserved\n");
- v4l2_info(sd, "0x0600–0x06FF: CEC Register\n");
- v4l2_info(sd, "0x0700–0x84FF: Reserved\n");
- v4l2_info(sd, "0x8500–0x85FF: HDMIRX System Control Register\n");
- v4l2_info(sd, "0x8600–0x86FF: HDMIRX Audio Control Register\n");
- v4l2_info(sd, "0x8700–0x87FF: HDMIRX InfoFrame packet data Register\n");
- v4l2_info(sd, "0x8800–0x88FF: HDMIRX HDCP Port Register\n");
- v4l2_info(sd, "0x8900–0x89FF: HDMIRX Video Output Port & 3D Register\n");
- v4l2_info(sd, "0x8A00–0x8BFF: Reserved\n");
- v4l2_info(sd, "0x8C00–0x8FFF: HDMIRX EDID-RAM (1024bytes)\n");
- v4l2_info(sd, "0x9000–0x90FF: HDMIRX GBD Extraction Control\n");
- v4l2_info(sd, "0x9100–0x92FF: HDMIRX GBD RAM read\n");
+ v4l2_info(sd, "0x0000-0x00FF: Global Control Register\n");
+ v4l2_info(sd, "0x0100-0x01FF: CSI2-TX PHY Register\n");
+ v4l2_info(sd, "0x0200-0x03FF: CSI2-TX PPI Register\n");
+ v4l2_info(sd, "0x0400-0x05FF: Reserved\n");
+ v4l2_info(sd, "0x0600-0x06FF: CEC Register\n");
+ v4l2_info(sd, "0x0700-0x84FF: Reserved\n");
+ v4l2_info(sd, "0x8500-0x85FF: HDMIRX System Control Register\n");
+ v4l2_info(sd, "0x8600-0x86FF: HDMIRX Audio Control Register\n");
+ v4l2_info(sd, "0x8700-0x87FF: HDMIRX InfoFrame packet data Register\n");
+ v4l2_info(sd, "0x8800-0x88FF: HDMIRX HDCP Port Register\n");
+ v4l2_info(sd, "0x8900-0x89FF: HDMIRX Video Output Port & 3D Register\n");
+ v4l2_info(sd, "0x8A00-0x8BFF: Reserved\n");
+ v4l2_info(sd, "0x8C00-0x8FFF: HDMIRX EDID-RAM (1024bytes)\n");
+ v4l2_info(sd, "0x9000-0x90FF: HDMIRX GBD Extraction Control\n");
+ v4l2_info(sd, "0x9100-0x92FF: HDMIRX GBD RAM read\n");
v4l2_info(sd, "0x9300- : Reserved\n");
}
@@ -1581,6 +1580,7 @@ static int tc358743_s_edid(struct v4l2_subdev *sd,
{
struct tc358743_state *state = to_state(sd);
u16 edid_len = edid->blocks * EDID_BLOCK_SIZE;
+ int i;
v4l2_dbg(2, debug, sd, "%s, pad %d, start block %d, blocks %d\n",
__func__, edid->pad, edid->start_block, edid->blocks);
@@ -1606,7 +1606,8 @@ static int tc358743_s_edid(struct v4l2_subdev *sd,
return 0;
}
- i2c_wr(sd, EDID_RAM, edid->edid, edid_len);
+ for (i = 0; i < edid_len; i += EDID_BLOCK_SIZE)
+ i2c_wr(sd, EDID_RAM + i, edid->edid + i, EDID_BLOCK_SIZE);
state->edid_blocks_written = edid->blocks;
diff --git a/drivers/media/i2c/tvp514x.c b/drivers/media/i2c/tvp514x.c
index 7fa5f1e4fe37..7cdd94842938 100644
--- a/drivers/media/i2c/tvp514x.c
+++ b/drivers/media/i2c/tvp514x.c
@@ -1001,7 +1001,7 @@ static struct tvp514x_decoder tvp514x_dev = {
static struct tvp514x_platform_data *
tvp514x_get_pdata(struct i2c_client *client)
{
- struct tvp514x_platform_data *pdata;
+ struct tvp514x_platform_data *pdata = NULL;
struct v4l2_of_endpoint bus_cfg;
struct device_node *endpoint;
unsigned int flags;
@@ -1013,11 +1013,13 @@ tvp514x_get_pdata(struct i2c_client *client)
if (!endpoint)
return NULL;
+ if (v4l2_of_parse_endpoint(endpoint, &bus_cfg))
+ goto done;
+
pdata = devm_kzalloc(&client->dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata)
goto done;
- v4l2_of_parse_endpoint(endpoint, &bus_cfg);
flags = bus_cfg.bus.parallel.flags;
if (flags & V4L2_MBUS_HSYNC_ACTIVE_HIGH)
diff --git a/drivers/media/i2c/tvp5150.c b/drivers/media/i2c/tvp5150.c
index 6c3769d44b75..ff18444e19e4 100644
--- a/drivers/media/i2c/tvp5150.c
+++ b/drivers/media/i2c/tvp5150.c
@@ -1,19 +1,22 @@
/*
- * tvp5150 - Texas Instruments TVP5150A/AM1 video decoder driver
+ * tvp5150 - Texas Instruments TVP5150A/AM1 and TVP5151 video decoder driver
*
* Copyright (c) 2005,2006 Mauro Carvalho Chehab (mchehab@infradead.org)
* This code is placed under the terms of the GNU General Public License v2
*/
+#include <dt-bindings/media/tvp5150.h>
#include <linux/i2c.h>
#include <linux/slab.h>
#include <linux/videodev2.h>
#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
#include <linux/module.h>
#include <media/v4l2-async.h>
#include <media/v4l2-device.h>
-#include <media/i2c/tvp5150.h>
#include <media/v4l2-ctrls.h>
+#include <media/v4l2-of.h>
+#include <media/v4l2-mc.h>
#include "tvp5150_reg.h"
@@ -24,7 +27,7 @@
#define TVP5150_MAX_CROP_TOP 127
#define TVP5150_CROP_SHIFT 2
-MODULE_DESCRIPTION("Texas Instruments TVP5150A video decoder driver");
+MODULE_DESCRIPTION("Texas Instruments TVP5150A/TVP5150AM1/TVP5151 video decoder driver");
MODULE_AUTHOR("Mauro Carvalho Chehab");
MODULE_LICENSE("GPL");
@@ -35,6 +38,11 @@ MODULE_PARM_DESC(debug, "Debug level (0-2)");
struct tvp5150 {
struct v4l2_subdev sd;
+#ifdef CONFIG_MEDIA_CONTROLLER
+ struct media_pad pads[DEMOD_NUM_PADS];
+ struct media_entity input_ent[TVP5150_INPUT_NUM];
+ struct media_pad input_pad[TVP5150_INPUT_NUM];
+#endif
struct v4l2_ctrl_handler hdl;
struct v4l2_rect rect;
@@ -42,6 +50,11 @@ struct tvp5150 {
u32 input;
u32 output;
int enable;
+
+ u16 dev_id;
+ u16 rom_ver;
+
+ enum v4l2_mbus_type mbus_type;
};
static inline struct tvp5150 *to_tvp5150(struct v4l2_subdev *sd)
@@ -246,8 +259,12 @@ static inline void tvp5150_selmux(struct v4l2_subdev *sd)
int input = 0;
int val;
- if ((decoder->output & TVP5150_BLACK_SCREEN) || !decoder->enable)
- input = 8;
+ /* Only tvp5150am1 and tvp5151 have signal generator support */
+ if ((decoder->dev_id == 0x5150 && decoder->rom_ver == 0x0400) ||
+ (decoder->dev_id == 0x5151 && decoder->rom_ver == 0x0100)) {
+ if (!decoder->enable)
+ input = 8;
+ }
switch (decoder->input) {
case TVP5150_COMPOSITE1:
@@ -772,12 +789,17 @@ static int tvp5150_reset(struct v4l2_subdev *sd, u32 val)
v4l2_ctrl_handler_setup(&decoder->hdl);
tvp5150_set_std(sd, decoder->norm);
+
+ if (decoder->mbus_type == V4L2_MBUS_PARALLEL)
+ tvp5150_write(sd, TVP5150_DATA_RATE_SEL, 0x40);
+
return 0;
};
static int tvp5150_s_ctrl(struct v4l2_ctrl *ctrl)
{
struct v4l2_subdev *sd = to_sd(ctrl);
+ struct tvp5150 *decoder = to_tvp5150(sd);
switch (ctrl->id) {
case V4L2_CID_BRIGHTNESS:
@@ -791,6 +813,9 @@ static int tvp5150_s_ctrl(struct v4l2_ctrl *ctrl)
return 0;
case V4L2_CID_HUE:
tvp5150_write(sd, TVP5150_HUE_CTL, ctrl->val);
+ case V4L2_CID_TEST_PATTERN:
+ decoder->enable = ctrl->val ? false : true;
+ tvp5150_selmux(sd);
return 0;
}
return -EINVAL;
@@ -818,17 +843,6 @@ static v4l2_std_id tvp5150_read_std(struct v4l2_subdev *sd)
}
}
-static int tvp5150_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
- struct v4l2_subdev_mbus_code_enum *code)
-{
- if (code->pad || code->index)
- return -EINVAL;
-
- code->code = MEDIA_BUS_FMT_UYVY8_2X8;
- return 0;
-}
-
static int tvp5150_fill_fmt(struct v4l2_subdev *sd,
struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_format *format)
@@ -844,10 +858,10 @@ static int tvp5150_fill_fmt(struct v4l2_subdev *sd,
tvp5150_reset(sd, 0);
f->width = decoder->rect.width;
- f->height = decoder->rect.height;
+ f->height = decoder->rect.height / 2;
f->code = MEDIA_BUS_FMT_UYVY8_2X8;
- f->field = V4L2_FIELD_SEQ_TB;
+ f->field = V4L2_FIELD_ALTERNATE;
f->colorspace = V4L2_COLORSPACE_SMPTE170M;
v4l2_dbg(1, debug, sd, "width = %d, height = %d\n", f->width,
@@ -948,10 +962,110 @@ static int tvp5150_cropcap(struct v4l2_subdev *sd, struct v4l2_cropcap *a)
return 0;
}
+static int tvp5150_g_mbus_config(struct v4l2_subdev *sd,
+ struct v4l2_mbus_config *cfg)
+{
+ struct tvp5150 *decoder = to_tvp5150(sd);
+
+ cfg->type = decoder->mbus_type;
+ cfg->flags = V4L2_MBUS_MASTER | V4L2_MBUS_PCLK_SAMPLE_RISING
+ | V4L2_MBUS_FIELD_EVEN_LOW | V4L2_MBUS_DATA_ACTIVE_HIGH;
+
+ return 0;
+}
+
+/****************************************************************************
+ V4L2 subdev pad ops
+ ****************************************************************************/
+static int tvp5150_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ if (code->pad || code->index)
+ return -EINVAL;
+
+ code->code = MEDIA_BUS_FMT_UYVY8_2X8;
+ return 0;
+}
+
+static int tvp5150_enum_frame_size(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ struct tvp5150 *decoder = to_tvp5150(sd);
+
+ if (fse->index >= 8 || fse->code != MEDIA_BUS_FMT_UYVY8_2X8)
+ return -EINVAL;
+
+ fse->code = MEDIA_BUS_FMT_UYVY8_2X8;
+ fse->min_width = decoder->rect.width;
+ fse->max_width = decoder->rect.width;
+ fse->min_height = decoder->rect.height / 2;
+ fse->max_height = decoder->rect.height / 2;
+
+ return 0;
+}
+
+/****************************************************************************
+ Media entity ops
+ ****************************************************************************/
+
+static int tvp5150_link_setup(struct media_entity *entity,
+ const struct media_pad *local,
+ const struct media_pad *remote, u32 flags)
+{
+#ifdef CONFIG_MEDIA_CONTROLLER
+ struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(entity);
+ struct tvp5150 *decoder = to_tvp5150(sd);
+ int i;
+
+ for (i = 0; i < TVP5150_INPUT_NUM; i++) {
+ if (remote->entity == &decoder->input_ent[i])
+ break;
+ }
+
+ /* Do nothing for entities that are not input connectors */
+ if (i == TVP5150_INPUT_NUM)
+ return 0;
+
+ decoder->input = i;
+
+ tvp5150_selmux(sd);
+#endif
+
+ return 0;
+}
+
+static const struct media_entity_operations tvp5150_sd_media_ops = {
+ .link_setup = tvp5150_link_setup,
+};
+
/****************************************************************************
I2C Command
****************************************************************************/
+static int tvp5150_s_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct tvp5150 *decoder = to_tvp5150(sd);
+ /* Output format: 8-bit ITU-R BT.656 with embedded syncs */
+ int val = 0x09;
+
+ /* Output format: 8-bit 4:2:2 YUV with discrete sync */
+ if (decoder->mbus_type == V4L2_MBUS_PARALLEL)
+ val = 0x0d;
+
+ /* Initializes TVP5150 to its default values */
+ /* # set PCLK (27MHz) */
+ tvp5150_write(sd, TVP5150_CONF_SHARED_PIN, 0x00);
+
+ if (enable)
+ tvp5150_write(sd, TVP5150_MISC_CTL, val);
+ else
+ tvp5150_write(sd, TVP5150_MISC_CTL, 0x00);
+
+ return 0;
+}
+
static int tvp5150_s_routing(struct v4l2_subdev *sd,
u32 input, u32 output, u32 config)
{
@@ -959,6 +1073,12 @@ static int tvp5150_s_routing(struct v4l2_subdev *sd,
decoder->input = input;
decoder->output = output;
+
+ if (output == TVP5150_BLACK_SCREEN)
+ decoder->enable = false;
+ else
+ decoder->enable = true;
+
tvp5150_selmux(sd);
return 0;
}
@@ -1052,6 +1172,42 @@ static int tvp5150_g_tuner(struct v4l2_subdev *sd, struct v4l2_tuner *vt)
return 0;
}
+static int tvp5150_registered_async(struct v4l2_subdev *sd)
+{
+#ifdef CONFIG_MEDIA_CONTROLLER
+ struct tvp5150 *decoder = to_tvp5150(sd);
+ int ret = 0;
+ int i;
+
+ for (i = 0; i < TVP5150_INPUT_NUM; i++) {
+ struct media_entity *input = &decoder->input_ent[i];
+ struct media_pad *pad = &decoder->input_pad[i];
+
+ if (!input->name)
+ continue;
+
+ decoder->input_pad[i].flags = MEDIA_PAD_FL_SOURCE;
+
+ ret = media_entity_pads_init(input, 1, pad);
+ if (ret < 0)
+ return ret;
+
+ ret = media_device_register_entity(sd->v4l2_dev->mdev, input);
+ if (ret < 0)
+ return ret;
+
+ ret = media_create_pad_link(input, 0, &sd->entity,
+ DEMOD_PAD_IF_INPUT, 0);
+ if (ret < 0) {
+ media_device_unregister_entity(input);
+ return ret;
+ }
+ }
+#endif
+
+ return 0;
+}
+
/* ----------------------------------------------------------------------- */
static const struct v4l2_ctrl_ops tvp5150_ctrl_ops = {
@@ -1065,6 +1221,7 @@ static const struct v4l2_subdev_core_ops tvp5150_core_ops = {
.g_register = tvp5150_g_register,
.s_register = tvp5150_s_register,
#endif
+ .registered_async = tvp5150_registered_async,
};
static const struct v4l2_subdev_tuner_ops tvp5150_tuner_ops = {
@@ -1073,10 +1230,12 @@ static const struct v4l2_subdev_tuner_ops tvp5150_tuner_ops = {
static const struct v4l2_subdev_video_ops tvp5150_video_ops = {
.s_std = tvp5150_s_std,
+ .s_stream = tvp5150_s_stream,
.s_routing = tvp5150_s_routing,
.s_crop = tvp5150_s_crop,
.g_crop = tvp5150_g_crop,
.cropcap = tvp5150_cropcap,
+ .g_mbus_config = tvp5150_g_mbus_config,
};
static const struct v4l2_subdev_vbi_ops tvp5150_vbi_ops = {
@@ -1088,6 +1247,7 @@ static const struct v4l2_subdev_vbi_ops tvp5150_vbi_ops = {
static const struct v4l2_subdev_pad_ops tvp5150_pad_ops = {
.enum_mbus_code = tvp5150_enum_mbus_code,
+ .enum_frame_size = tvp5150_enum_frame_size,
.set_fmt = tvp5150_fill_fmt,
.get_fmt = tvp5150_fill_fmt,
};
@@ -1105,63 +1265,239 @@ static const struct v4l2_subdev_ops tvp5150_ops = {
I2C Client & Driver
****************************************************************************/
+static int tvp5150_detect_version(struct tvp5150 *core)
+{
+ struct v4l2_subdev *sd = &core->sd;
+ struct i2c_client *c = v4l2_get_subdevdata(sd);
+ unsigned int i;
+ u8 regs[4];
+ int res;
+
+ /*
+ * Read consequent registers - TVP5150_MSB_DEV_ID, TVP5150_LSB_DEV_ID,
+ * TVP5150_ROM_MAJOR_VER, TVP5150_ROM_MINOR_VER
+ */
+ for (i = 0; i < 4; i++) {
+ res = tvp5150_read(sd, TVP5150_MSB_DEV_ID + i);
+ if (res < 0)
+ return res;
+ regs[i] = res;
+ }
+
+ core->dev_id = (regs[0] << 8) | regs[1];
+ core->rom_ver = (regs[2] << 8) | regs[3];
+
+ v4l2_info(sd, "tvp%04x (%u.%u) chip found @ 0x%02x (%s)\n",
+ core->dev_id, regs[2], regs[3], c->addr << 1,
+ c->adapter->name);
+
+ if (core->dev_id == 0x5150 && core->rom_ver == 0x0321) {
+ v4l2_info(sd, "tvp5150a detected.\n");
+ } else if (core->dev_id == 0x5150 && core->rom_ver == 0x0400) {
+ v4l2_info(sd, "tvp5150am1 detected.\n");
+
+ /* ITU-T BT.656.4 timing */
+ tvp5150_write(sd, TVP5150_REV_SELECT, 0);
+ } else if (core->dev_id == 0x5151 && core->rom_ver == 0x0100) {
+ v4l2_info(sd, "tvp5151 detected.\n");
+ } else {
+ v4l2_info(sd, "*** unknown tvp%04x chip detected.\n",
+ core->dev_id);
+ }
+
+ return 0;
+}
+
+static int tvp5150_init(struct i2c_client *c)
+{
+ struct gpio_desc *pdn_gpio;
+ struct gpio_desc *reset_gpio;
+
+ pdn_gpio = devm_gpiod_get_optional(&c->dev, "pdn", GPIOD_OUT_HIGH);
+ if (IS_ERR(pdn_gpio))
+ return PTR_ERR(pdn_gpio);
+
+ if (pdn_gpio) {
+ gpiod_set_value_cansleep(pdn_gpio, 0);
+ /* Delay time between power supplies active and reset */
+ msleep(20);
+ }
+
+ reset_gpio = devm_gpiod_get_optional(&c->dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(reset_gpio))
+ return PTR_ERR(reset_gpio);
+
+ if (reset_gpio) {
+ /* RESETB pulse duration */
+ ndelay(500);
+ gpiod_set_value_cansleep(reset_gpio, 0);
+ /* Delay time between end of reset to I2C active */
+ usleep_range(200, 250);
+ }
+
+ return 0;
+}
+
+static int tvp5150_parse_dt(struct tvp5150 *decoder, struct device_node *np)
+{
+ struct v4l2_of_endpoint bus_cfg;
+ struct device_node *ep;
+#ifdef CONFIG_MEDIA_CONTROLLER
+ struct device_node *connectors, *child;
+ struct media_entity *input;
+ const char *name;
+ u32 input_type;
+#endif
+ unsigned int flags;
+ int ret = 0;
+
+ ep = of_graph_get_next_endpoint(np, NULL);
+ if (!ep)
+ return -EINVAL;
+
+ ret = v4l2_of_parse_endpoint(ep, &bus_cfg);
+ if (ret)
+ goto err;
+
+ flags = bus_cfg.bus.parallel.flags;
+
+ if (bus_cfg.bus_type == V4L2_MBUS_PARALLEL &&
+ !(flags & V4L2_MBUS_HSYNC_ACTIVE_HIGH &&
+ flags & V4L2_MBUS_VSYNC_ACTIVE_HIGH &&
+ flags & V4L2_MBUS_FIELD_EVEN_LOW)) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ decoder->mbus_type = bus_cfg.bus_type;
+
+#ifdef CONFIG_MEDIA_CONTROLLER
+ connectors = of_get_child_by_name(np, "connectors");
+
+ if (!connectors)
+ goto err;
+
+ for_each_available_child_of_node(connectors, child) {
+ ret = of_property_read_u32(child, "input", &input_type);
+ if (ret) {
+ v4l2_err(&decoder->sd,
+ "missing type property in node %s\n",
+ child->name);
+ goto err_connector;
+ }
+
+ if (input_type >= TVP5150_INPUT_NUM) {
+ ret = -EINVAL;
+ goto err_connector;
+ }
+
+ input = &decoder->input_ent[input_type];
+
+ /* Each input connector can only be defined once */
+ if (input->name) {
+ v4l2_err(&decoder->sd,
+ "input %s with same type already exists\n",
+ input->name);
+ ret = -EINVAL;
+ goto err_connector;
+ }
+
+ switch (input_type) {
+ case TVP5150_COMPOSITE0:
+ case TVP5150_COMPOSITE1:
+ input->function = MEDIA_ENT_F_CONN_COMPOSITE;
+ break;
+ case TVP5150_SVIDEO:
+ input->function = MEDIA_ENT_F_CONN_SVIDEO;
+ break;
+ }
+
+ input->flags = MEDIA_ENT_FL_CONNECTOR;
+
+ ret = of_property_read_string(child, "label", &name);
+ if (ret < 0) {
+ v4l2_err(&decoder->sd,
+ "missing label property in node %s\n",
+ child->name);
+ goto err_connector;
+ }
+
+ input->name = name;
+ }
+
+err_connector:
+ of_node_put(connectors);
+#endif
+err:
+ of_node_put(ep);
+ return ret;
+}
+
+static const char * const tvp5150_test_patterns[2] = {
+ "Disabled",
+ "Black screen"
+};
+
static int tvp5150_probe(struct i2c_client *c,
const struct i2c_device_id *id)
{
struct tvp5150 *core;
struct v4l2_subdev *sd;
- int tvp5150_id[4];
- int i, res;
+ struct device_node *np = c->dev.of_node;
+ int res;
/* Check if the adapter supports the needed features */
if (!i2c_check_functionality(c->adapter,
I2C_FUNC_SMBUS_READ_BYTE | I2C_FUNC_SMBUS_WRITE_BYTE_DATA))
return -EIO;
+ res = tvp5150_init(c);
+ if (res)
+ return res;
+
core = devm_kzalloc(&c->dev, sizeof(*core), GFP_KERNEL);
if (!core)
return -ENOMEM;
+
sd = &core->sd;
- v4l2_i2c_subdev_init(sd, c, &tvp5150_ops);
- /*
- * Read consequent registers - TVP5150_MSB_DEV_ID, TVP5150_LSB_DEV_ID,
- * TVP5150_ROM_MAJOR_VER, TVP5150_ROM_MINOR_VER
- */
- for (i = 0; i < 4; i++) {
- res = tvp5150_read(sd, TVP5150_MSB_DEV_ID + i);
- if (res < 0)
+ if (IS_ENABLED(CONFIG_OF) && np) {
+ res = tvp5150_parse_dt(core, np);
+ if (res) {
+ v4l2_err(sd, "DT parsing error: %d\n", res);
return res;
- tvp5150_id[i] = res;
+ }
+ } else {
+ /* Default to BT.656 embedded sync */
+ core->mbus_type = V4L2_MBUS_BT656;
}
- v4l_info(c, "chip found @ 0x%02x (%s)\n",
- c->addr << 1, c->adapter->name);
+ v4l2_i2c_subdev_init(sd, c, &tvp5150_ops);
+ sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
- if (tvp5150_id[2] == 4 && tvp5150_id[3] == 0) { /* Is TVP5150AM1 */
- v4l2_info(sd, "tvp%02x%02xam1 detected.\n",
- tvp5150_id[0], tvp5150_id[1]);
+#if defined(CONFIG_MEDIA_CONTROLLER)
+ core->pads[DEMOD_PAD_IF_INPUT].flags = MEDIA_PAD_FL_SINK;
+ core->pads[DEMOD_PAD_VID_OUT].flags = MEDIA_PAD_FL_SOURCE;
+ core->pads[DEMOD_PAD_VBI_OUT].flags = MEDIA_PAD_FL_SOURCE;
- /* ITU-T BT.656.4 timing */
- tvp5150_write(sd, TVP5150_REV_SELECT, 0);
- } else {
- /* Is TVP5150A */
- if (tvp5150_id[2] == 3 || tvp5150_id[3] == 0x21) {
- v4l2_info(sd, "tvp%02x%02xa detected.\n",
- tvp5150_id[0], tvp5150_id[1]);
- } else {
- v4l2_info(sd, "*** unknown tvp%02x%02x chip detected.\n",
- tvp5150_id[0], tvp5150_id[1]);
- v4l2_info(sd, "*** Rom ver is %d.%d\n",
- tvp5150_id[2], tvp5150_id[3]);
- }
- }
+ sd->entity.function = MEDIA_ENT_F_ATV_DECODER;
+
+ res = media_entity_pads_init(&sd->entity, DEMOD_NUM_PADS, core->pads);
+ if (res < 0)
+ return res;
+
+ sd->entity.ops = &tvp5150_sd_media_ops;
+#endif
+
+ res = tvp5150_detect_version(core);
+ if (res < 0)
+ return res;
core->norm = V4L2_STD_ALL; /* Default is autodetect */
core->input = TVP5150_COMPOSITE1;
- core->enable = 1;
+ core->enable = true;
- v4l2_ctrl_handler_init(&core->hdl, 4);
+ v4l2_ctrl_handler_init(&core->hdl, 5);
v4l2_ctrl_new_std(&core->hdl, &tvp5150_ctrl_ops,
V4L2_CID_BRIGHTNESS, 0, 255, 1, 128);
v4l2_ctrl_new_std(&core->hdl, &tvp5150_ctrl_ops,
@@ -1170,6 +1506,13 @@ static int tvp5150_probe(struct i2c_client *c,
V4L2_CID_SATURATION, 0, 255, 1, 128);
v4l2_ctrl_new_std(&core->hdl, &tvp5150_ctrl_ops,
V4L2_CID_HUE, -128, 127, 1, 0);
+ v4l2_ctrl_new_std(&core->hdl, &tvp5150_ctrl_ops,
+ V4L2_CID_PIXEL_RATE, 27000000,
+ 27000000, 1, 27000000);
+ v4l2_ctrl_new_std_menu_items(&core->hdl, &tvp5150_ctrl_ops,
+ V4L2_CID_TEST_PATTERN,
+ ARRAY_SIZE(tvp5150_test_patterns),
+ 0, 0, tvp5150_test_patterns);
sd->ctrl_handler = &core->hdl;
if (core->hdl.error) {
res = core->hdl.error;
@@ -1221,8 +1564,17 @@ static const struct i2c_device_id tvp5150_id[] = {
};
MODULE_DEVICE_TABLE(i2c, tvp5150_id);
+#if IS_ENABLED(CONFIG_OF)
+static const struct of_device_id tvp5150_of_match[] = {
+ { .compatible = "ti,tvp5150", },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, tvp5150_of_match);
+#endif
+
static struct i2c_driver tvp5150_driver = {
.driver = {
+ .of_match_table = of_match_ptr(tvp5150_of_match),
.name = "tvp5150",
},
.probe = tvp5150_probe,
diff --git a/drivers/media/i2c/tvp7002.c b/drivers/media/i2c/tvp7002.c
index 83c79fa5f61d..4df640c3aa40 100644
--- a/drivers/media/i2c/tvp7002.c
+++ b/drivers/media/i2c/tvp7002.c
@@ -894,7 +894,7 @@ static struct tvp7002_config *
tvp7002_get_pdata(struct i2c_client *client)
{
struct v4l2_of_endpoint bus_cfg;
- struct tvp7002_config *pdata;
+ struct tvp7002_config *pdata = NULL;
struct device_node *endpoint;
unsigned int flags;
@@ -905,11 +905,13 @@ tvp7002_get_pdata(struct i2c_client *client)
if (!endpoint)
return NULL;
+ if (v4l2_of_parse_endpoint(endpoint, &bus_cfg))
+ goto done;
+
pdata = devm_kzalloc(&client->dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata)
goto done;
- v4l2_of_parse_endpoint(endpoint, &bus_cfg);
flags = bus_cfg.bus.parallel.flags;
if (flags & V4L2_MBUS_HSYNC_ACTIVE_HIGH)
diff --git a/drivers/media/i2c/vpx3220.c b/drivers/media/i2c/vpx3220.c
index 4b564f17f618..90b693f4e2ab 100644
--- a/drivers/media/i2c/vpx3220.c
+++ b/drivers/media/i2c/vpx3220.c
@@ -124,7 +124,7 @@ static int vpx3220_fp_write(struct v4l2_subdev *sd, u8 fpaddr, u16 data)
return 0;
}
-static u16 vpx3220_fp_read(struct v4l2_subdev *sd, u16 fpaddr)
+static int vpx3220_fp_read(struct v4l2_subdev *sd, u16 fpaddr)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
s16 data;
diff --git a/drivers/media/media-device.c b/drivers/media/media-device.c
index 7dae0ac0f3ae..3cfd7af8c5ca 100644
--- a/drivers/media/media-device.c
+++ b/drivers/media/media-device.c
@@ -20,6 +20,9 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
+/* We need to access legacy defines from linux/media.h */
+#define __NEED_MEDIA_LEGACY_API
+
#include <linux/compat.h>
#include <linux/export.h>
#include <linux/idr.h>
@@ -27,6 +30,8 @@
#include <linux/media.h>
#include <linux/slab.h>
#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/usb.h>
#include <media/media-device.h>
#include <media/media-devnode.h>
@@ -38,6 +43,11 @@
* Userspace API
*/
+static inline void __user *media_get_uptr(__u64 arg)
+{
+ return (void __user *)(uintptr_t)arg;
+}
+
static int media_device_open(struct file *filp)
{
return 0;
@@ -55,7 +65,11 @@ static int media_device_get_info(struct media_device *dev,
memset(&info, 0, sizeof(info));
- strlcpy(info.driver, dev->dev->driver->name, sizeof(info.driver));
+ if (dev->driver_name[0])
+ strlcpy(info.driver, dev->driver_name, sizeof(info.driver));
+ else
+ strlcpy(info.driver, dev->dev->driver->name, sizeof(info.driver));
+
strlcpy(info.model, dev->model, sizeof(info.model));
strlcpy(info.serial, dev->serial, sizeof(info.serial));
strlcpy(info.bus_info, dev->bus_info, sizeof(info.bus_info));
@@ -115,6 +129,26 @@ static long media_device_enum_entities(struct media_device *mdev,
u_ent.group_id = 0; /* Unused */
u_ent.pads = ent->num_pads;
u_ent.links = ent->num_links - ent->num_backlinks;
+
+ /*
+ * Workaround for a bug at media-ctl <= v1.10 that makes it to
+ * do the wrong thing if the entity function doesn't belong to
+ * either MEDIA_ENT_F_OLD_BASE or MEDIA_ENT_F_OLD_SUBDEV_BASE
+ * Ranges.
+ *
+ * Non-subdevices are expected to be at the MEDIA_ENT_F_OLD_BASE,
+ * or, otherwise, will be silently ignored by media-ctl when
+ * printing the graphviz diagram. So, map them into the devnode
+ * old range.
+ */
+ if (ent->function < MEDIA_ENT_F_OLD_BASE ||
+ ent->function > MEDIA_ENT_T_DEVNODE_UNKNOWN) {
+ if (is_media_entity_v4l2_subdev(ent))
+ u_ent.type = MEDIA_ENT_F_V4L2_SUBDEV_UNKNOWN;
+ else if (ent->function != MEDIA_ENT_F_IO_V4L)
+ u_ent.type = MEDIA_ENT_T_DEVNODE_UNKNOWN;
+ }
+
memcpy(&u_ent.raw, &ent->info, sizeof(ent->info));
if (copy_to_user(uent, &u_ent, sizeof(u_ent)))
return -EFAULT;
@@ -234,7 +268,6 @@ static long media_device_setup_link(struct media_device *mdev,
return ret;
}
-#if 0 /* Let's postpone it to Kernel 4.6 */
static long __media_device_get_topology(struct media_device *mdev,
struct media_v2_topology *topo)
{
@@ -242,10 +275,10 @@ static long __media_device_get_topology(struct media_device *mdev,
struct media_interface *intf;
struct media_pad *pad;
struct media_link *link;
- struct media_v2_entity kentity, *uentity;
- struct media_v2_interface kintf, *uintf;
- struct media_v2_pad kpad, *upad;
- struct media_v2_link klink, *ulink;
+ struct media_v2_entity kentity, __user *uentity;
+ struct media_v2_interface kintf, __user *uintf;
+ struct media_v2_pad kpad, __user *upad;
+ struct media_v2_link klink, __user *ulink;
unsigned int i;
int ret = 0;
@@ -390,7 +423,6 @@ static long media_device_get_topology(struct media_device *mdev,
return 0;
}
-#endif
static long media_device_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg)
@@ -424,14 +456,13 @@ static long media_device_ioctl(struct file *filp, unsigned int cmd,
mutex_unlock(&dev->graph_mutex);
break;
-#if 0 /* Let's postpone it to Kernel 4.6 */
case MEDIA_IOC_G_TOPOLOGY:
mutex_lock(&dev->graph_mutex);
ret = media_device_get_topology(dev,
(struct media_v2_topology __user *)arg);
mutex_unlock(&dev->graph_mutex);
break;
-#endif
+
default:
ret = -ENOIOCTLCMD;
}
@@ -480,9 +511,7 @@ static long media_device_compat_ioctl(struct file *filp, unsigned int cmd,
case MEDIA_IOC_DEVICE_INFO:
case MEDIA_IOC_ENUM_ENTITIES:
case MEDIA_IOC_SETUP_LINK:
-#if 0 /* Let's postpone it to Kernel 4.6 */
case MEDIA_IOC_G_TOPOLOGY:
-#endif
return media_device_ioctl(filp, cmd, arg);
case MEDIA_IOC_ENUM_LINKS32:
@@ -541,6 +570,7 @@ static void media_device_release(struct media_devnode *mdev)
int __must_check media_device_register_entity(struct media_device *mdev,
struct media_entity *entity)
{
+ struct media_entity_notify *notify, *next;
unsigned int i;
int ret;
@@ -580,8 +610,33 @@ int __must_check media_device_register_entity(struct media_device *mdev,
media_gobj_create(mdev, MEDIA_GRAPH_PAD,
&entity->pads[i].graph_obj);
+ /* invoke entity_notify callbacks */
+ list_for_each_entry_safe(notify, next, &mdev->entity_notify, list) {
+ (notify)->notify(entity, notify->notify_data);
+ }
+
spin_unlock(&mdev->lock);
+ mutex_lock(&mdev->graph_mutex);
+ if (mdev->entity_internal_idx_max
+ >= mdev->pm_count_walk.ent_enum.idx_max) {
+ struct media_entity_graph new = { .top = 0 };
+
+ /*
+ * Initialise the new graph walk before cleaning up
+ * the old one in order not to spoil the graph walk
+ * object of the media device if graph walk init fails.
+ */
+ ret = media_entity_graph_walk_init(&new, mdev);
+ if (ret) {
+ mutex_unlock(&mdev->graph_mutex);
+ return ret;
+ }
+ media_entity_graph_walk_cleanup(&mdev->pm_count_walk);
+ mdev->pm_count_walk = new;
+ }
+ mutex_unlock(&mdev->graph_mutex);
+
return 0;
}
EXPORT_SYMBOL_GPL(media_device_register_entity);
@@ -613,6 +668,8 @@ static void __media_device_unregister_entity(struct media_entity *entity)
/* Remove the entity */
media_gobj_destroy(&entity->graph_obj);
+ /* invoke entity_notify callbacks to handle entity removal?? */
+
entity->graph_obj.mdev = NULL;
}
@@ -645,6 +702,7 @@ void media_device_init(struct media_device *mdev)
INIT_LIST_HEAD(&mdev->interfaces);
INIT_LIST_HEAD(&mdev->pads);
INIT_LIST_HEAD(&mdev->links);
+ INIT_LIST_HEAD(&mdev->entity_notify);
spin_lock_init(&mdev->lock);
mutex_init(&mdev->graph_mutex);
ida_init(&mdev->entity_internal_idx);
@@ -657,6 +715,7 @@ void media_device_cleanup(struct media_device *mdev)
{
ida_destroy(&mdev->entity_internal_idx);
mdev->entity_internal_idx_max = 0;
+ media_entity_graph_walk_cleanup(&mdev->pm_count_walk);
mutex_destroy(&mdev->graph_mutex);
}
EXPORT_SYMBOL_GPL(media_device_cleanup);
@@ -690,11 +749,40 @@ int __must_check __media_device_register(struct media_device *mdev,
}
EXPORT_SYMBOL_GPL(__media_device_register);
+int __must_check media_device_register_entity_notify(struct media_device *mdev,
+ struct media_entity_notify *nptr)
+{
+ spin_lock(&mdev->lock);
+ list_add_tail(&nptr->list, &mdev->entity_notify);
+ spin_unlock(&mdev->lock);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(media_device_register_entity_notify);
+
+/*
+ * Note: Should be called with mdev->lock held.
+ */
+static void __media_device_unregister_entity_notify(struct media_device *mdev,
+ struct media_entity_notify *nptr)
+{
+ list_del(&nptr->list);
+}
+
+void media_device_unregister_entity_notify(struct media_device *mdev,
+ struct media_entity_notify *nptr)
+{
+ spin_lock(&mdev->lock);
+ __media_device_unregister_entity_notify(mdev, nptr);
+ spin_unlock(&mdev->lock);
+}
+EXPORT_SYMBOL_GPL(media_device_unregister_entity_notify);
+
void media_device_unregister(struct media_device *mdev)
{
struct media_entity *entity;
struct media_entity *next;
struct media_interface *intf, *tmp_intf;
+ struct media_entity_notify *notify, *nextp;
if (mdev == NULL)
return;
@@ -711,6 +799,10 @@ void media_device_unregister(struct media_device *mdev)
list_for_each_entry_safe(entity, next, &mdev->entities, graph_obj.list)
__media_device_unregister_entity(entity);
+ /* Remove all entity_notify callbacks from the media device */
+ list_for_each_entry_safe(notify, nextp, &mdev->entity_notify, list)
+ __media_device_unregister_entity_notify(mdev, notify);
+
/* Remove all interfaces from the media device */
list_for_each_entry_safe(intf, tmp_intf, &mdev->interfaces,
graph_obj.list) {
@@ -754,4 +846,58 @@ struct media_device *media_device_find_devres(struct device *dev)
}
EXPORT_SYMBOL_GPL(media_device_find_devres);
+#if IS_ENABLED(CONFIG_PCI)
+void media_device_pci_init(struct media_device *mdev,
+ struct pci_dev *pci_dev,
+ const char *name)
+{
+ mdev->dev = &pci_dev->dev;
+
+ if (name)
+ strlcpy(mdev->model, name, sizeof(mdev->model));
+ else
+ strlcpy(mdev->model, pci_name(pci_dev), sizeof(mdev->model));
+
+ sprintf(mdev->bus_info, "PCI:%s", pci_name(pci_dev));
+
+ mdev->hw_revision = (pci_dev->subsystem_vendor << 16)
+ | pci_dev->subsystem_device;
+
+ mdev->driver_version = LINUX_VERSION_CODE;
+
+ media_device_init(mdev);
+}
+EXPORT_SYMBOL_GPL(media_device_pci_init);
+#endif
+
+#if IS_ENABLED(CONFIG_USB)
+void __media_device_usb_init(struct media_device *mdev,
+ struct usb_device *udev,
+ const char *board_name,
+ const char *driver_name)
+{
+ mdev->dev = &udev->dev;
+
+ if (driver_name)
+ strlcpy(mdev->driver_name, driver_name,
+ sizeof(mdev->driver_name));
+
+ if (board_name)
+ strlcpy(mdev->model, board_name, sizeof(mdev->model));
+ else if (udev->product)
+ strlcpy(mdev->model, udev->product, sizeof(mdev->model));
+ else
+ strlcpy(mdev->model, "unknown model", sizeof(mdev->model));
+ if (udev->serial)
+ strlcpy(mdev->serial, udev->serial, sizeof(mdev->serial));
+ usb_make_path(udev, mdev->bus_info, sizeof(mdev->bus_info));
+ mdev->hw_revision = le16_to_cpu(udev->descriptor.bcdDevice);
+ mdev->driver_version = LINUX_VERSION_CODE;
+
+ media_device_init(mdev);
+}
+EXPORT_SYMBOL_GPL(__media_device_usb_init);
+#endif
+
+
#endif /* CONFIG_MEDIA_CONTROLLER */
diff --git a/drivers/media/media-devnode.c b/drivers/media/media-devnode.c
index cea35bf20011..29409f440f1c 100644
--- a/drivers/media/media-devnode.c
+++ b/drivers/media/media-devnode.c
@@ -181,6 +181,7 @@ static int media_open(struct inode *inode, struct file *filp)
ret = mdev->fops->open(filp);
if (ret) {
put_device(&mdev->dev);
+ filp->private_data = NULL;
return ret;
}
}
diff --git a/drivers/media/media-entity.c b/drivers/media/media-entity.c
index e89d85a7d31b..e95070b3a3d4 100644
--- a/drivers/media/media-entity.c
+++ b/drivers/media/media-entity.c
@@ -46,25 +46,41 @@ static inline const char *intf_type(struct media_interface *intf)
{
switch (intf->type) {
case MEDIA_INTF_T_DVB_FE:
- return "frontend";
+ return "dvb-frontend";
case MEDIA_INTF_T_DVB_DEMUX:
- return "demux";
+ return "dvb-demux";
case MEDIA_INTF_T_DVB_DVR:
- return "DVR";
+ return "dvb-dvr";
case MEDIA_INTF_T_DVB_CA:
- return "CA";
+ return "dvb-ca";
case MEDIA_INTF_T_DVB_NET:
- return "dvbnet";
+ return "dvb-net";
case MEDIA_INTF_T_V4L_VIDEO:
- return "video";
+ return "v4l-video";
case MEDIA_INTF_T_V4L_VBI:
- return "vbi";
+ return "v4l-vbi";
case MEDIA_INTF_T_V4L_RADIO:
- return "radio";
+ return "v4l-radio";
case MEDIA_INTF_T_V4L_SUBDEV:
- return "v4l2-subdev";
+ return "v4l-subdev";
case MEDIA_INTF_T_V4L_SWRADIO:
- return "swradio";
+ return "v4l-swradio";
+ case MEDIA_INTF_T_ALSA_PCM_CAPTURE:
+ return "alsa-pcm-capture";
+ case MEDIA_INTF_T_ALSA_PCM_PLAYBACK:
+ return "alsa-pcm-playback";
+ case MEDIA_INTF_T_ALSA_CONTROL:
+ return "alsa-control";
+ case MEDIA_INTF_T_ALSA_COMPRESS:
+ return "alsa-compress";
+ case MEDIA_INTF_T_ALSA_RAWMIDI:
+ return "alsa-rawmidi";
+ case MEDIA_INTF_T_ALSA_HWDEP:
+ return "alsa-hwdep";
+ case MEDIA_INTF_T_ALSA_SEQUENCER:
+ return "alsa-sequencer";
+ case MEDIA_INTF_T_ALSA_TIMER:
+ return "alsa-timer";
default:
return "unknown-intf";
}
@@ -73,8 +89,9 @@ static inline const char *intf_type(struct media_interface *intf)
__must_check int __media_entity_enum_init(struct media_entity_enum *ent_enum,
int idx_max)
{
- ent_enum->bmap = kcalloc(DIV_ROUND_UP(idx_max, BITS_PER_LONG),
- sizeof(long), GFP_KERNEL);
+ idx_max = ALIGN(idx_max, BITS_PER_LONG);
+ ent_enum->bmap = kcalloc(idx_max / BITS_PER_LONG, sizeof(long),
+ GFP_KERNEL);
if (!ent_enum->bmap)
return -ENOMEM;
@@ -349,8 +366,8 @@ EXPORT_SYMBOL_GPL(media_entity_graph_walk_next);
* Pipeline management
*/
-__must_check int media_entity_pipeline_start(struct media_entity *entity,
- struct media_pipeline *pipe)
+__must_check int __media_entity_pipeline_start(struct media_entity *entity,
+ struct media_pipeline *pipe)
{
struct media_device *mdev = entity->graph_obj.mdev;
struct media_entity_graph *graph = &pipe->graph;
@@ -358,8 +375,6 @@ __must_check int media_entity_pipeline_start(struct media_entity *entity,
struct media_link *link;
int ret;
- mutex_lock(&mdev->graph_mutex);
-
if (!pipe->streaming_count++) {
ret = media_entity_graph_walk_init(&pipe->graph, mdev);
if (ret)
@@ -440,8 +455,6 @@ __must_check int media_entity_pipeline_start(struct media_entity *entity,
}
}
- mutex_unlock(&mdev->graph_mutex);
-
return 0;
error:
@@ -452,9 +465,12 @@ error:
media_entity_graph_walk_start(graph, entity_err);
while ((entity_err = media_entity_graph_walk_next(graph))) {
- entity_err->stream_count--;
- if (entity_err->stream_count == 0)
- entity_err->pipe = NULL;
+ /* don't let the stream_count go negative */
+ if (entity->stream_count > 0) {
+ entity_err->stream_count--;
+ if (entity_err->stream_count == 0)
+ entity_err->pipe = NULL;
+ }
/*
* We haven't increased stream_count further than this
@@ -468,32 +484,53 @@ error_graph_walk_start:
if (!--pipe->streaming_count)
media_entity_graph_walk_cleanup(graph);
- mutex_unlock(&mdev->graph_mutex);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(__media_entity_pipeline_start);
+__must_check int media_entity_pipeline_start(struct media_entity *entity,
+ struct media_pipeline *pipe)
+{
+ struct media_device *mdev = entity->graph_obj.mdev;
+ int ret;
+
+ mutex_lock(&mdev->graph_mutex);
+ ret = __media_entity_pipeline_start(entity, pipe);
+ mutex_unlock(&mdev->graph_mutex);
return ret;
}
EXPORT_SYMBOL_GPL(media_entity_pipeline_start);
-void media_entity_pipeline_stop(struct media_entity *entity)
+void __media_entity_pipeline_stop(struct media_entity *entity)
{
- struct media_device *mdev = entity->graph_obj.mdev;
struct media_entity_graph *graph = &entity->pipe->graph;
struct media_pipeline *pipe = entity->pipe;
- mutex_lock(&mdev->graph_mutex);
WARN_ON(!pipe->streaming_count);
media_entity_graph_walk_start(graph, entity);
while ((entity = media_entity_graph_walk_next(graph))) {
- entity->stream_count--;
- if (entity->stream_count == 0)
- entity->pipe = NULL;
+ /* don't let the stream_count go negative */
+ if (entity->stream_count > 0) {
+ entity->stream_count--;
+ if (entity->stream_count == 0)
+ entity->pipe = NULL;
+ }
}
if (!--pipe->streaming_count)
media_entity_graph_walk_cleanup(graph);
+}
+EXPORT_SYMBOL_GPL(__media_entity_pipeline_stop);
+
+void media_entity_pipeline_stop(struct media_entity *entity)
+{
+ struct media_device *mdev = entity->graph_obj.mdev;
+
+ mutex_lock(&mdev->graph_mutex);
+ __media_entity_pipeline_stop(entity);
mutex_unlock(&mdev->graph_mutex);
}
EXPORT_SYMBOL_GPL(media_entity_pipeline_stop);
@@ -783,6 +820,7 @@ int __media_entity_setup_link(struct media_link *link, u32 flags)
return ret;
}
+EXPORT_SYMBOL_GPL(__media_entity_setup_link);
int media_entity_setup_link(struct media_link *link, u32 flags)
{
diff --git a/drivers/media/pci/b2c2/flexcop-pci.c b/drivers/media/pci/b2c2/flexcop-pci.c
index 8b5e0b3a92a0..4cac1fc233f2 100644
--- a/drivers/media/pci/b2c2/flexcop-pci.c
+++ b/drivers/media/pci/b2c2/flexcop-pci.c
@@ -39,7 +39,7 @@ MODULE_PARM_DESC(debug,
#define DRIVER_VERSION "0.1"
#define DRIVER_NAME "flexcop-pci"
-#define DRIVER_AUTHOR "Patrick Boettcher <patrick.boettcher@desy.de>"
+#define DRIVER_AUTHOR "Patrick Boettcher <patrick.boettcher@posteo.de>"
struct flexcop_pci {
struct pci_dev *pdev;
diff --git a/drivers/media/pci/bt8xx/bttv-driver.c b/drivers/media/pci/bt8xx/bttv-driver.c
index 9400e996087b..df54e17ef864 100644
--- a/drivers/media/pci/bt8xx/bttv-driver.c
+++ b/drivers/media/pci/bt8xx/bttv-driver.c
@@ -186,7 +186,7 @@ MODULE_VERSION(BTTV_VERSION);
static ssize_t show_card(struct device *cd,
struct device_attribute *attr, char *buf)
{
- struct video_device *vfd = container_of(cd, struct video_device, dev);
+ struct video_device *vfd = to_video_device(cd);
struct bttv *btv = video_get_drvdata(vfd);
return sprintf(buf, "%d\n", btv ? btv->c.type : UNSET);
}
@@ -1726,22 +1726,15 @@ static int bttv_s_std(struct file *file, void *priv, v4l2_std_id id)
struct bttv_fh *fh = priv;
struct bttv *btv = fh->btv;
unsigned int i;
- int err = 0;
for (i = 0; i < BTTV_TVNORMS; i++)
if (id & bttv_tvnorms[i].v4l2_id)
break;
- if (i == BTTV_TVNORMS) {
- err = -EINVAL;
- goto err;
- }
-
+ if (i == BTTV_TVNORMS)
+ return -EINVAL;
btv->std = id;
set_tvnorm(btv, i);
-
-err:
-
- return err;
+ return 0;
}
static int bttv_g_std(struct file *file, void *priv, v4l2_std_id *id)
@@ -1770,12 +1763,9 @@ static int bttv_enum_input(struct file *file, void *priv,
{
struct bttv_fh *fh = priv;
struct bttv *btv = fh->btv;
- int rc = 0;
- if (i->index >= bttv_tvcards[btv->c.type].video_inputs) {
- rc = -EINVAL;
- goto err;
- }
+ if (i->index >= bttv_tvcards[btv->c.type].video_inputs)
+ return -EINVAL;
i->type = V4L2_INPUT_TYPE_CAMERA;
i->audioset = 0;
@@ -1799,10 +1789,7 @@ static int bttv_enum_input(struct file *file, void *priv,
}
i->std = BTTV_NORMS;
-
-err:
-
- return rc;
+ return 0;
}
static int bttv_g_input(struct file *file, void *priv, unsigned int *i)
@@ -2334,6 +2321,19 @@ static int bttv_g_fmt_vid_overlay(struct file *file, void *priv,
return 0;
}
+static void bttv_get_width_mask_vid_cap(const struct bttv_format *fmt,
+ unsigned int *width_mask,
+ unsigned int *width_bias)
+{
+ if (fmt->flags & FORMAT_FLAGS_PLANAR) {
+ *width_mask = ~15; /* width must be a multiple of 16 pixels */
+ *width_bias = 8; /* nearest */
+ } else {
+ *width_mask = ~3; /* width must be a multiple of 4 pixels */
+ *width_bias = 2; /* nearest */
+ }
+}
+
static int bttv_try_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
@@ -2343,6 +2343,7 @@ static int bttv_try_fmt_vid_cap(struct file *file, void *priv,
enum v4l2_field field;
__s32 width, height;
__s32 height2;
+ unsigned int width_mask, width_bias;
int rc;
fmt = format_by_fourcc(f->fmt.pix.pixelformat);
@@ -2375,9 +2376,9 @@ static int bttv_try_fmt_vid_cap(struct file *file, void *priv,
width = f->fmt.pix.width;
height = f->fmt.pix.height;
+ bttv_get_width_mask_vid_cap(fmt, &width_mask, &width_bias);
rc = limit_scaled_size_lock(fh, &width, &height, field,
- /* width_mask: 4 pixels */ ~3,
- /* width_bias: nearest */ 2,
+ width_mask, width_bias,
/* adjust_size */ 1,
/* adjust_crop */ 0);
if (0 != rc)
@@ -2410,6 +2411,7 @@ static int bttv_s_fmt_vid_cap(struct file *file, void *priv,
struct bttv_fh *fh = priv;
struct bttv *btv = fh->btv;
__s32 width, height;
+ unsigned int width_mask, width_bias;
enum v4l2_field field;
retval = bttv_switch_type(fh, f->type);
@@ -2424,9 +2426,10 @@ static int bttv_s_fmt_vid_cap(struct file *file, void *priv,
height = f->fmt.pix.height;
field = f->fmt.pix.field;
+ fmt = format_by_fourcc(f->fmt.pix.pixelformat);
+ bttv_get_width_mask_vid_cap(fmt, &width_mask, &width_bias);
retval = limit_scaled_size_lock(fh, &width, &height, f->fmt.pix.field,
- /* width_mask: 4 pixels */ ~3,
- /* width_bias: nearest */ 2,
+ width_mask, width_bias,
/* adjust_size */ 1,
/* adjust_crop */ 1);
if (0 != retval)
@@ -2434,8 +2437,6 @@ static int bttv_s_fmt_vid_cap(struct file *file, void *priv,
f->fmt.pix.field = field;
- fmt = format_by_fourcc(f->fmt.pix.pixelformat);
-
/* update our state informations */
fh->fmt = fmt;
fh->cap.field = f->fmt.pix.field;
diff --git a/drivers/media/pci/bt8xx/dst.c b/drivers/media/pci/bt8xx/dst.c
index 4a90eee5e3bb..35bc9b2287b4 100644
--- a/drivers/media/pci/bt8xx/dst.c
+++ b/drivers/media/pci/bt8xx/dst.c
@@ -1688,9 +1688,9 @@ static int dst_get_tuning_algo(struct dvb_frontend *fe)
return dst_algo ? DVBFE_ALGO_HW : DVBFE_ALGO_SW;
}
-static int dst_get_frontend(struct dvb_frontend *fe)
+static int dst_get_frontend(struct dvb_frontend *fe,
+ struct dtv_frontend_properties *p)
{
- struct dtv_frontend_properties *p = &fe->dtv_property_cache;
struct dst_state *state = fe->demodulator_priv;
p->frequency = state->decode_freq;
diff --git a/drivers/media/pci/bt8xx/dvb-bt8xx.c b/drivers/media/pci/bt8xx/dvb-bt8xx.c
index d407244fd1bc..e69d338ab9be 100644
--- a/drivers/media/pci/bt8xx/dvb-bt8xx.c
+++ b/drivers/media/pci/bt8xx/dvb-bt8xx.c
@@ -318,7 +318,7 @@ static int microtune_mt7202dtf_request_firmware(struct dvb_frontend* fe, const s
return request_firmware(fw, name, &bt->bt->dev->dev);
}
-static struct sp887x_config microtune_mt7202dtf_config = {
+static const struct sp887x_config microtune_mt7202dtf_config = {
.demod_address = 0x70,
.request_firmware = microtune_mt7202dtf_request_firmware,
};
@@ -458,7 +458,7 @@ static void or51211_sleep(struct dvb_frontend * fe)
bttv_write_gpio(bt->bttv_nr, 0x0001, 0x0000);
}
-static struct or51211_config or51211_config = {
+static const struct or51211_config or51211_config = {
.demod_address = 0x15,
.request_firmware = or51211_request_firmware,
.setmode = or51211_setmode,
diff --git a/drivers/media/pci/cx23885/cx23885-dvb.c b/drivers/media/pci/cx23885/cx23885-dvb.c
index 80319bb73d94..f041b6931ba8 100644
--- a/drivers/media/pci/cx23885/cx23885-dvb.c
+++ b/drivers/media/pci/cx23885/cx23885-dvb.c
@@ -1139,7 +1139,7 @@ static int dvb_register_ci_mac(struct cx23885_tsport *port)
u8 eeprom[256]; /* 24C02 i2c eeprom */
struct sp2_config sp2_config;
struct i2c_board_info info;
- struct cx23885_i2c *i2c_bus2 = &dev->i2c_bus[1];
+ struct cx23885_i2c *i2c_bus = &dev->i2c_bus[0];
/* attach CI */
memset(&sp2_config, 0, sizeof(sp2_config));
@@ -1151,7 +1151,7 @@ static int dvb_register_ci_mac(struct cx23885_tsport *port)
info.addr = 0x40;
info.platform_data = &sp2_config;
request_module(info.type);
- client_ci = i2c_new_device(&i2c_bus2->i2c_adap, &info);
+ client_ci = i2c_new_device(&i2c_bus->i2c_adap, &info);
if (client_ci == NULL || client_ci->dev.driver == NULL)
return -ENODEV;
if (!try_module_get(client_ci->dev.driver->owner)) {
@@ -1988,8 +1988,8 @@ static int dvb_register(struct cx23885_tsport *port)
break;
case CX23885_BOARD_DVBSKY_T980C:
case CX23885_BOARD_TT_CT2_4500_CI:
- i2c_bus = &dev->i2c_bus[1];
- i2c_bus2 = &dev->i2c_bus[0];
+ i2c_bus = &dev->i2c_bus[0];
+ i2c_bus2 = &dev->i2c_bus[1];
/* attach frontend */
memset(&si2168_config, 0, sizeof(si2168_config));
@@ -2001,7 +2001,7 @@ static int dvb_register(struct cx23885_tsport *port)
info.addr = 0x64;
info.platform_data = &si2168_config;
request_module(info.type);
- client_demod = i2c_new_device(&i2c_bus->i2c_adap, &info);
+ client_demod = i2c_new_device(&i2c_bus2->i2c_adap, &info);
if (client_demod == NULL || client_demod->dev.driver == NULL)
goto frontend_detach;
if (!try_module_get(client_demod->dev.driver->owner)) {
@@ -2030,13 +2030,13 @@ static int dvb_register(struct cx23885_tsport *port)
port->i2c_client_tuner = client_tuner;
break;
case CX23885_BOARD_DVBSKY_S950C:
- i2c_bus = &dev->i2c_bus[1];
- i2c_bus2 = &dev->i2c_bus[0];
+ i2c_bus = &dev->i2c_bus[0];
+ i2c_bus2 = &dev->i2c_bus[1];
/* attach frontend */
fe0->dvb.frontend = dvb_attach(m88ds3103_attach,
&dvbsky_s950c_m88ds3103_config,
- &i2c_bus->i2c_adap, &adapter);
+ &i2c_bus2->i2c_adap, &adapter);
if (fe0->dvb.frontend == NULL)
break;
@@ -2301,7 +2301,8 @@ static int dvb_register(struct cx23885_tsport *port)
/* register everything */
ret = vb2_dvb_register_bus(&port->frontends, THIS_MODULE, port,
- &dev->pci->dev, adapter_nr, mfe_shared);
+ &dev->pci->dev, NULL,
+ adapter_nr, mfe_shared);
if (ret)
goto frontend_detach;
diff --git a/drivers/media/pci/cx88/cx88-dvb.c b/drivers/media/pci/cx88/cx88-dvb.c
index afb20756d7a5..851d2a9caed3 100644
--- a/drivers/media/pci/cx88/cx88-dvb.c
+++ b/drivers/media/pci/cx88/cx88-dvb.c
@@ -1642,7 +1642,8 @@ static int dvb_register(struct cx8802_dev *dev)
/* register everything */
res = vb2_dvb_register_bus(&dev->frontends, THIS_MODULE, dev,
- &dev->pci->dev, adapter_nr, mfe_shared);
+ &dev->pci->dev, NULL, adapter_nr,
+ mfe_shared);
if (res)
goto frontend_detach;
return res;
diff --git a/drivers/media/pci/ddbridge/ddbridge-core.c b/drivers/media/pci/ddbridge/ddbridge-core.c
index 9d5b314142f1..6e995ef8c37e 100644
--- a/drivers/media/pci/ddbridge/ddbridge-core.c
+++ b/drivers/media/pci/ddbridge/ddbridge-core.c
@@ -690,7 +690,7 @@ static int tuner_attach_stv6110(struct ddb_input *input, int type)
struct stv090x_config *feconf = type ? &stv0900_aa : &stv0900;
struct stv6110x_config *tunerconf = (input->nr & 1) ?
&stv6110b : &stv6110a;
- struct stv6110x_devctl *ctl;
+ const struct stv6110x_devctl *ctl;
ctl = dvb_attach(stv6110x_attach, input->fe, tunerconf, i2c);
if (!ctl) {
diff --git a/drivers/media/pci/ivtv/ivtv-queue.c b/drivers/media/pci/ivtv/ivtv-queue.c
index 7fde36e6d227..2128c2a8d7fd 100644
--- a/drivers/media/pci/ivtv/ivtv-queue.c
+++ b/drivers/media/pci/ivtv/ivtv-queue.c
@@ -141,7 +141,7 @@ int ivtv_queue_move(struct ivtv_stream *s, struct ivtv_queue *from, struct ivtv_
spin_unlock_irqrestore(&s->qlock, flags);
return -ENOMEM;
}
- while (bytes_available < needed_bytes) {
+ while (steal && bytes_available < needed_bytes) {
struct ivtv_buffer *buf = list_entry(steal->list.prev, struct ivtv_buffer, list);
u16 dma_xfer_cnt = buf->dma_xfer_cnt;
diff --git a/drivers/media/pci/ivtv/ivtv-udma.c b/drivers/media/pci/ivtv/ivtv-udma.c
index 24152accc66c..4769469fe842 100644
--- a/drivers/media/pci/ivtv/ivtv-udma.c
+++ b/drivers/media/pci/ivtv/ivtv-udma.c
@@ -124,8 +124,8 @@ int ivtv_udma_setup(struct ivtv *itv, unsigned long ivtv_dest_addr,
}
/* Get user pages for DMA Xfer */
- err = get_user_pages_unlocked(current, current->mm,
- user_dma.uaddr, user_dma.page_count, 0, 1, dma->map);
+ err = get_user_pages_unlocked(user_dma.uaddr, user_dma.page_count, 0,
+ 1, dma->map);
if (user_dma.page_count != err) {
IVTV_DEBUG_WARN("failed to map user pages, returned %d instead of %d\n",
diff --git a/drivers/media/pci/ivtv/ivtv-yuv.c b/drivers/media/pci/ivtv/ivtv-yuv.c
index 2b8e7b2f2b86..b094054cda6e 100644
--- a/drivers/media/pci/ivtv/ivtv-yuv.c
+++ b/drivers/media/pci/ivtv/ivtv-yuv.c
@@ -75,14 +75,12 @@ static int ivtv_yuv_prep_user_dma(struct ivtv *itv, struct ivtv_user_dma *dma,
ivtv_udma_get_page_info (&uv_dma, (unsigned long)args->uv_source, 360 * uv_decode_height);
/* Get user pages for DMA Xfer */
- y_pages = get_user_pages_unlocked(current, current->mm,
- y_dma.uaddr, y_dma.page_count, 0, 1,
- &dma->map[0]);
+ y_pages = get_user_pages_unlocked(y_dma.uaddr,
+ y_dma.page_count, 0, 1, &dma->map[0]);
uv_pages = 0; /* silence gcc. value is set and consumed only if: */
if (y_pages == y_dma.page_count) {
- uv_pages = get_user_pages_unlocked(current, current->mm,
- uv_dma.uaddr, uv_dma.page_count, 0, 1,
- &dma->map[y_pages]);
+ uv_pages = get_user_pages_unlocked(uv_dma.uaddr,
+ uv_dma.page_count, 0, 1, &dma->map[y_pages]);
}
if (y_pages != y_dma.page_count || uv_pages != uv_dma.page_count) {
diff --git a/drivers/media/pci/netup_unidvb/netup_unidvb_core.c b/drivers/media/pci/netup_unidvb/netup_unidvb_core.c
index 525ebfefeee8..2b667b315913 100644
--- a/drivers/media/pci/netup_unidvb/netup_unidvb_core.c
+++ b/drivers/media/pci/netup_unidvb/netup_unidvb_core.c
@@ -462,8 +462,8 @@ static int netup_unidvb_dvb_init(struct netup_unidvb_dev *ndev,
}
if (vb2_dvb_register_bus(&ndev->frontends[num],
- THIS_MODULE, NULL,
- &ndev->pci_dev->dev, adapter_nr, 1)) {
+ THIS_MODULE, NULL,
+ &ndev->pci_dev->dev, NULL, adapter_nr, 1)) {
dev_dbg(&ndev->pci_dev->dev,
"%s(): unable to register DVB bus %d\n",
__func__, num);
@@ -771,10 +771,9 @@ static int netup_unidvb_initdev(struct pci_dev *pci_dev,
/* allocate device context */
ndev = kzalloc(sizeof(*ndev), GFP_KERNEL);
-
if (!ndev)
goto dev_alloc_err;
- memset(ndev, 0, sizeof(*ndev));
+
ndev->old_fw = old_firmware;
ndev->wq = create_singlethread_workqueue(NETUP_UNIDVB_NAME);
if (!ndev->wq) {
diff --git a/drivers/media/pci/ngene/ngene-cards.c b/drivers/media/pci/ngene/ngene-cards.c
index 039bed3cc919..4e783a68bf4a 100644
--- a/drivers/media/pci/ngene/ngene-cards.c
+++ b/drivers/media/pci/ngene/ngene-cards.c
@@ -57,7 +57,7 @@ static int tuner_attach_stv6110(struct ngene_channel *chan)
chan->dev->card_info->fe_config[chan->number];
struct stv6110x_config *tunerconf = (struct stv6110x_config *)
chan->dev->card_info->tuner_config[chan->number];
- struct stv6110x_devctl *ctl;
+ const struct stv6110x_devctl *ctl;
/* tuner 1+2: i2c adapter #0, tuner 3+4: i2c adapter #1 */
if (chan->number < 2)
diff --git a/drivers/media/pci/pt3/pt3.c b/drivers/media/pci/pt3/pt3.c
index 0d2e2b217121..eff5e9f51ace 100644
--- a/drivers/media/pci/pt3/pt3.c
+++ b/drivers/media/pci/pt3/pt3.c
@@ -395,7 +395,8 @@ static int pt3_attach_fe(struct pt3_board *pt3, int i)
if (!try_module_get(cl->dev.driver->owner))
goto err_demod_i2c_unregister_device;
- if (!strncmp(cl->name, TC90522_I2C_DEV_SAT, sizeof(cl->name))) {
+ if (!strncmp(cl->name, TC90522_I2C_DEV_SAT,
+ strlen(TC90522_I2C_DEV_SAT))) {
struct qm1d1c0042_config tcfg;
tcfg = adap_conf[i].tuner_cfg.qm1d1c0042;
diff --git a/drivers/media/pci/saa7134/saa7134-cards.c b/drivers/media/pci/saa7134/saa7134-cards.c
index 29d2094c42a0..c480a7e87593 100644
--- a/drivers/media/pci/saa7134/saa7134-cards.c
+++ b/drivers/media/pci/saa7134/saa7134-cards.c
@@ -36,17 +36,23 @@
#include "xc5000.h"
#include "s5h1411.h"
-/* commly used strings */
-static char name_mute[] = "mute";
-static char name_radio[] = "Radio";
-static char name_tv[] = "Television";
-static char name_tv_mono[] = "TV (mono only)";
-static char name_comp[] = "Composite";
-static char name_comp1[] = "Composite1";
-static char name_comp2[] = "Composite2";
-static char name_comp3[] = "Composite3";
-static char name_comp4[] = "Composite4";
-static char name_svideo[] = "S-Video";
+/* Input names */
+const char * const saa7134_input_name[] = {
+ [SAA7134_INPUT_MUTE] = "mute",
+ [SAA7134_INPUT_RADIO] = "Radio",
+ [SAA7134_INPUT_TV] = "Television",
+ [SAA7134_INPUT_TV_MONO] = "TV (mono only)",
+ [SAA7134_INPUT_COMPOSITE] = "Composite",
+ [SAA7134_INPUT_COMPOSITE0] = "Composite0",
+ [SAA7134_INPUT_COMPOSITE1] = "Composite1",
+ [SAA7134_INPUT_COMPOSITE2] = "Composite2",
+ [SAA7134_INPUT_COMPOSITE3] = "Composite3",
+ [SAA7134_INPUT_COMPOSITE4] = "Composite4",
+ [SAA7134_INPUT_SVIDEO] = "S-Video",
+ [SAA7134_INPUT_SVIDEO0] = "S-Video0",
+ [SAA7134_INPUT_SVIDEO1] = "S-Video1",
+ [SAA7134_INPUT_COMPOSITE_OVER_SVIDEO] = "Composite over S-Video",
+};
/* ------------------------------------------------------------------ */
/* board config info */
@@ -69,7 +75,7 @@ struct saa7134_board saa7134_boards[] = {
.radio_addr = ADDR_UNSET,
.inputs = {{
- .name = "default",
+ .type = SAA7134_INPUT_COMPOSITE,
.vmux = 0,
.amux = LINE1,
}},
@@ -84,22 +90,20 @@ struct saa7134_board saa7134_boards[] = {
.radio_addr = ADDR_UNSET,
.inputs = {{
- .name = name_comp1,
+ .type = SAA7134_INPUT_COMPOSITE1,
.vmux = 0,
.amux = LINE1,
},{
- .name = name_tv,
+ .type = SAA7134_INPUT_TV,
.vmux = 1,
.amux = TV,
- .tv = 1,
},{
- .name = name_tv_mono,
+ .type = SAA7134_INPUT_TV_MONO,
.vmux = 1,
.amux = LINE2,
- .tv = 1,
}},
.radio = {
- .name = name_radio,
+ .type = SAA7134_INPUT_RADIO,
.amux = LINE2,
},
},
@@ -114,40 +118,38 @@ struct saa7134_board saa7134_boards[] = {
.gpiomask = 0xe000,
.inputs = {{
- .name = name_tv,
+ .type = SAA7134_INPUT_TV,
.vmux = 1,
.amux = TV,
.gpio = 0x8000,
- .tv = 1,
},{
- .name = name_tv_mono,
+ .type = SAA7134_INPUT_TV_MONO,
.vmux = 1,
.amux = LINE2,
.gpio = 0x0000,
- .tv = 1,
},{
- .name = name_comp1,
+ .type = SAA7134_INPUT_COMPOSITE1,
.vmux = 0,
.amux = LINE2,
.gpio = 0x4000,
},{
- .name = name_comp2,
+ .type = SAA7134_INPUT_COMPOSITE2,
.vmux = 3,
.amux = LINE2,
.gpio = 0x4000,
},{
- .name = name_svideo,
+ .type = SAA7134_INPUT_SVIDEO,
.vmux = 8,
.amux = LINE2,
.gpio = 0x4000,
}},
.radio = {
- .name = name_radio,
+ .type = SAA7134_INPUT_RADIO,
.amux = LINE2,
.gpio = 0x2000,
},
.mute = {
- .name = name_mute,
+ .type = SAA7134_INPUT_MUTE,
.amux = TV,
.gpio = 0x8000,
},
@@ -163,34 +165,33 @@ struct saa7134_board saa7134_boards[] = {
.gpiomask = 0xe000,
.inputs = {{
- .name = name_tv,
+ .type = SAA7134_INPUT_TV,
.vmux = 1,
.amux = LINE2,
.gpio = 0x0000,
- .tv = 1,
},{
- .name = name_comp1,
+ .type = SAA7134_INPUT_COMPOSITE1,
.vmux = 0,
.amux = LINE2,
.gpio = 0x4000,
},{
- .name = name_comp2,
+ .type = SAA7134_INPUT_COMPOSITE2,
.vmux = 3,
.amux = LINE2,
.gpio = 0x4000,
},{
- .name = name_svideo,
+ .type = SAA7134_INPUT_SVIDEO,
.vmux = 8,
.amux = LINE2,
.gpio = 0x4000,
}},
.radio = {
- .name = name_radio,
+ .type = SAA7134_INPUT_RADIO,
.amux = LINE2,
.gpio = 0x2000,
},
.mute = {
- .name = name_mute,
+ .type = SAA7134_INPUT_MUTE,
.amux = LINE2,
.gpio = 0x8000,
},
@@ -205,20 +206,19 @@ struct saa7134_board saa7134_boards[] = {
.radio_addr = ADDR_UNSET,
.inputs = {{
- .name = name_tv,
+ .type = SAA7134_INPUT_TV,
.vmux = 1,
.amux = TV,
- .tv = 1,
},{
- .name = name_comp1, /* Composite signal on S-Video input */
+ .type = SAA7134_INPUT_COMPOSITE_OVER_SVIDEO,
.vmux = 0,
.amux = LINE2,
},{
- .name = name_comp2, /* Composite input */
+ .type = SAA7134_INPUT_COMPOSITE,
.vmux = 3,
.amux = LINE2,
},{
- .name = name_svideo,
+ .type = SAA7134_INPUT_SVIDEO,
.vmux = 8,
.amux = LINE2,
}},
@@ -235,40 +235,38 @@ struct saa7134_board saa7134_boards[] = {
.gpiomask = 0x1E000, /* Set GP16 and unused 15,14,13 to Output */
.inputs = {{
- .name = name_tv,
+ .type = SAA7134_INPUT_TV,
.vmux = 1,
.amux = TV,
.gpio = 0x10000, /* GP16=1 selects TV input */
- .tv = 1,
},{
-/* .name = name_tv_mono,
+/* .type = SAA7134_INPUT_TV_MONO,
.vmux = 1,
.amux = LINE2,
.gpio = 0x0000,
- .tv = 1,
},{
-*/ .name = name_comp1, /* Composite signal on S-Video input */
+*/ .type = SAA7134_INPUT_COMPOSITE_OVER_SVIDEO,
.vmux = 0,
.amux = LINE2,
/* .gpio = 0x4000, */
},{
- .name = name_comp2, /* Composite input */
+ .type = SAA7134_INPUT_COMPOSITE,
.vmux = 3,
.amux = LINE2,
/* .gpio = 0x4000, */
},{
- .name = name_svideo, /* S-Video signal on S-Video input */
+ .type = SAA7134_INPUT_SVIDEO,
.vmux = 8,
.amux = LINE2,
/* .gpio = 0x4000, */
}},
.radio = {
- .name = name_radio,
+ .type = SAA7134_INPUT_RADIO,
.amux = TV,
.gpio = 0x00000, /* GP16=0 selects FM radio antenna */
},
.mute = {
- .name = name_mute,
+ .type = SAA7134_INPUT_MUTE,
.amux = TV,
.gpio = 0x10000,
},
@@ -285,40 +283,38 @@ struct saa7134_board saa7134_boards[] = {
.tda9887_conf = TDA9887_PRESENT,
.gpiomask = 0xe000,
.inputs = { {
- .name = name_tv,
+ .type = SAA7134_INPUT_TV,
.vmux = 1,
.amux = TV,
.gpio = 0x8000,
- .tv = 1,
}, {
- .name = name_tv_mono,
+ .type = SAA7134_INPUT_TV_MONO,
.vmux = 1,
.amux = LINE2,
.gpio = 0x0000,
- .tv = 1,
}, {
- .name = name_comp1,
+ .type = SAA7134_INPUT_COMPOSITE1,
.vmux = 0,
.amux = LINE2,
.gpio = 0x4000,
}, {
- .name = name_comp2,
+ .type = SAA7134_INPUT_COMPOSITE2,
.vmux = 3,
.amux = LINE2,
.gpio = 0x4000,
}, {
- .name = name_svideo,
+ .type = SAA7134_INPUT_SVIDEO,
.vmux = 8,
.amux = LINE2,
.gpio = 0x4000,
} },
.radio = {
- .name = name_radio,
+ .type = SAA7134_INPUT_RADIO,
.amux = LINE2,
.gpio = 0x2000,
},
.mute = {
- .name = name_mute,
+ .type = SAA7134_INPUT_MUTE,
.amux = TV,
.gpio = 0x8000,
},
@@ -334,21 +330,20 @@ struct saa7134_board saa7134_boards[] = {
.empress_addr = 0x20,
.inputs = {{
- .name = name_comp1,
+ .type = SAA7134_INPUT_COMPOSITE1,
.vmux = 0,
.amux = LINE1,
},{
- .name = name_svideo,
+ .type = SAA7134_INPUT_SVIDEO,
.vmux = 8,
.amux = LINE1,
},{
- .name = name_tv,
+ .type = SAA7134_INPUT_TV,
.vmux = 1,
.amux = LINE2,
- .tv = 1,
}},
.radio = {
- .name = name_radio,
+ .type = SAA7134_INPUT_RADIO,
.amux = LINE2,
},
.mpeg = SAA7134_MPEG_EMPRESS,
@@ -364,21 +359,20 @@ struct saa7134_board saa7134_boards[] = {
.radio_addr = ADDR_UNSET,
.inputs = {{
- .name = name_tv,
+ .type = SAA7134_INPUT_TV,
.vmux = 1,
.amux = TV,
- .tv = 1,
},{
- .name = name_comp1,
+ .type = SAA7134_INPUT_COMPOSITE1,
.vmux = 0,
.amux = LINE1,
},{
- .name = name_svideo,
+ .type = SAA7134_INPUT_SVIDEO,
.vmux = 8,
.amux = LINE1,
}},
.radio = {
- .name = name_radio,
+ .type = SAA7134_INPUT_RADIO,
.amux = LINE2,
},
},
@@ -390,35 +384,33 @@ struct saa7134_board saa7134_boards[] = {
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
.inputs = {{
- .name = name_tv,
+ .type = SAA7134_INPUT_TV,
.vmux = 1,
.amux = TV,
- .tv = 1,
},{
/* workaround for problems with normal TV sound */
- .name = name_tv_mono,
+ .type = SAA7134_INPUT_TV_MONO,
.vmux = 1,
.amux = LINE2,
- .tv = 1,
},{
- .name = name_comp1,
+ .type = SAA7134_INPUT_COMPOSITE1,
.vmux = 0,
.amux = LINE1,
},{
- .name = name_comp2,
+ .type = SAA7134_INPUT_COMPOSITE2,
.vmux = 3,
.amux = LINE1,
},{
- .name = name_svideo,
+ .type = SAA7134_INPUT_SVIDEO,
.vmux = 8,
.amux = LINE1,
}},
.radio = {
- .name = name_radio,
+ .type = SAA7134_INPUT_RADIO,
.amux = LINE2,
},
.mute = {
- .name = name_mute,
+ .type = SAA7134_INPUT_MUTE,
.amux = TV,
},
},
@@ -432,32 +424,30 @@ struct saa7134_board saa7134_boards[] = {
.radio_addr = ADDR_UNSET,
.tda9887_conf = TDA9887_PRESENT,
.inputs = {{
- .name = name_tv,
+ .type = SAA7134_INPUT_TV,
.vmux = 1,
.amux = TV,
- .tv = 1,
},{
- .name = name_tv_mono,
+ .type = SAA7134_INPUT_TV_MONO,
.vmux = 1,
.amux = LINE2,
- .tv = 1,
},{
- .name = name_svideo,
+ .type = SAA7134_INPUT_SVIDEO,
.vmux = 8,
.amux = LINE1,
},{
- .name = name_comp1,
+ .type = SAA7134_INPUT_COMPOSITE1,
.vmux = 3,
.amux = LINE1,
},{
- .name = "CVid over SVid",
+ .type = SAA7134_INPUT_COMPOSITE_OVER_SVIDEO,
.vmux = 0,
.amux = LINE1,
}},
.radio = {
- .name = name_radio,
+ .type = SAA7134_INPUT_RADIO,
.amux = LINE2,
},
},
@@ -472,24 +462,23 @@ struct saa7134_board saa7134_boards[] = {
.tda9887_conf = TDA9887_PRESENT,
.gpiomask = 0x820000,
.inputs = {{
- .name = name_tv,
+ .type = SAA7134_INPUT_TV,
.vmux = 1,
.amux = TV,
- .tv = 1,
.gpio = 0x20000,
},{
- .name = name_svideo,
+ .type = SAA7134_INPUT_SVIDEO,
.vmux = 8,
.amux = LINE1,
.gpio = 0x20000,
},{
- .name = name_comp1,
+ .type = SAA7134_INPUT_COMPOSITE1,
.vmux = 3,
.amux = LINE1,
.gpio = 0x20000,
}},
.radio = {
- .name = name_radio,
+ .type = SAA7134_INPUT_RADIO,
.amux = LINE2,
.gpio = 0x20000,
},
@@ -504,20 +493,19 @@ struct saa7134_board saa7134_boards[] = {
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
.inputs = {{
- .name = name_tv,
+ .type = SAA7134_INPUT_TV,
.vmux = 1,
.amux = TV,
- .tv = 1,
},{
- .name = name_comp1,
+ .type = SAA7134_INPUT_COMPOSITE,
.vmux = 4,
.amux = LINE1,
},{
- .name = name_svideo,
+ .type = SAA7134_INPUT_SVIDEO,
.vmux = 8,
.amux = LINE1,
},{
- .name = name_comp2, /* CVideo over SVideo Connector */
+ .type = SAA7134_INPUT_COMPOSITE_OVER_SVIDEO,
.vmux = 0,
.amux = LINE1,
}}
@@ -531,31 +519,29 @@ struct saa7134_board saa7134_boards[] = {
.radio_addr = ADDR_UNSET,
.tda9887_conf = TDA9887_PRESENT,
.inputs = {{
- .name = name_tv,
+ .type = SAA7134_INPUT_TV,
.vmux = 1,
.amux = TV,
- .tv = 1,
},{
/* workaround for problems with normal TV sound */
- .name = name_tv_mono,
+ .type = SAA7134_INPUT_TV_MONO,
.vmux = 1,
.amux = LINE2,
- .tv = 1,
},{
- .name = name_comp1,
+ .type = SAA7134_INPUT_COMPOSITE1,
.vmux = 0,
.amux = LINE2,
},{
- .name = name_comp2,
+ .type = SAA7134_INPUT_COMPOSITE2,
.vmux = 3,
.amux = LINE2,
},{
- .name = name_svideo,
+ .type = SAA7134_INPUT_SVIDEO,
.vmux = 8,
.amux = LINE2,
}},
.radio = {
- .name = name_radio,
+ .type = SAA7134_INPUT_RADIO,
.amux = LINE2,
},
},
@@ -567,18 +553,17 @@ struct saa7134_board saa7134_boards[] = {
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
.inputs = {{
- .name = name_svideo,
+ .type = SAA7134_INPUT_SVIDEO,
.vmux = 8,
.amux = LINE1,
},{
- .name = name_comp1,
+ .type = SAA7134_INPUT_COMPOSITE1,
.vmux = 3,
.amux = LINE1,
},{
- .name = name_tv,
+ .type = SAA7134_INPUT_TV,
.vmux = 1,
.amux = LINE2,
- .tv = 1,
}},
},
[SAA7134_BOARD_CINERGY600] = {
@@ -590,25 +575,24 @@ struct saa7134_board saa7134_boards[] = {
.radio_addr = ADDR_UNSET,
.tda9887_conf = TDA9887_PRESENT,
.inputs = {{
- .name = name_tv,
+ .type = SAA7134_INPUT_TV,
.vmux = 1,
.amux = TV,
- .tv = 1,
},{
- .name = name_comp1,
+ .type = SAA7134_INPUT_COMPOSITE1,
.vmux = 4,
.amux = LINE1,
},{
- .name = name_svideo,
+ .type = SAA7134_INPUT_SVIDEO,
.vmux = 8,
.amux = LINE1,
},{
- .name = name_comp2, /* CVideo over SVideo Connector */
+ .type = SAA7134_INPUT_COMPOSITE_OVER_SVIDEO,
.vmux = 0,
.amux = LINE1,
}},
.radio = {
- .name = name_radio,
+ .type = SAA7134_INPUT_RADIO,
.amux = LINE2,
},
},
@@ -622,25 +606,24 @@ struct saa7134_board saa7134_boards[] = {
.tda9887_conf = TDA9887_PRESENT,
.mpeg = SAA7134_MPEG_DVB,
.inputs = {{
- .name = name_tv,
+ .type = SAA7134_INPUT_TV,
.vmux = 1,
.amux = TV,
- .tv = 1,
},{
- .name = name_comp1,
+ .type = SAA7134_INPUT_COMPOSITE1,
.vmux = 0,
.amux = LINE1,
},{
- .name = name_svideo,
+ .type = SAA7134_INPUT_SVIDEO,
.vmux = 8,
.amux = LINE1,
}},
.radio = {
- .name = name_radio,
+ .type = SAA7134_INPUT_RADIO,
.amux = LINE2,
},
.mute = {
- .name = name_mute,
+ .type = SAA7134_INPUT_MUTE,
.amux = TV,
},
},
@@ -655,21 +638,20 @@ struct saa7134_board saa7134_boards[] = {
.radio_addr = ADDR_UNSET,
.tda9887_conf = TDA9887_PRESENT,
.inputs = {{
- .name = name_tv,
+ .type = SAA7134_INPUT_TV,
.vmux = 1,
.amux = TV,
- .tv = 1,
},{
- .name = name_comp1,
+ .type = SAA7134_INPUT_COMPOSITE1,
.vmux = 3,
.amux = LINE1,
},{
- .name = name_svideo,
+ .type = SAA7134_INPUT_SVIDEO,
.vmux = 8,
.amux = LINE1,
}},
.radio = {
- .name = name_radio,
+ .type = SAA7134_INPUT_RADIO,
.amux = LINE2,
},
},
@@ -681,18 +663,17 @@ struct saa7134_board saa7134_boards[] = {
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
.inputs = {{
- .name = name_svideo,
+ .type = SAA7134_INPUT_SVIDEO,
.vmux = 8,
.amux = LINE1,
},{
- .name = name_comp1,
+ .type = SAA7134_INPUT_COMPOSITE1,
.vmux = 0,
.amux = LINE1,
},{
- .name = name_tv,
+ .type = SAA7134_INPUT_TV,
.vmux = 4,
.amux = LINE2,
- .tv = 1,
}},
},
[SAA7134_BOARD_ELSA_500TV] = {
@@ -703,19 +684,17 @@ struct saa7134_board saa7134_boards[] = {
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
.inputs = {{
- .name = name_svideo,
+ .type = SAA7134_INPUT_SVIDEO,
.vmux = 7,
.amux = LINE1,
},{
- .name = name_tv,
+ .type = SAA7134_INPUT_TV,
.vmux = 8,
.amux = TV,
- .tv = 1,
},{
- .name = name_tv_mono,
+ .type = SAA7134_INPUT_TV_MONO,
.vmux = 8,
.amux = LINE2,
- .tv = 1,
}},
},
[SAA7134_BOARD_ELSA_700TV] = {
@@ -726,21 +705,20 @@ struct saa7134_board saa7134_boards[] = {
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
.inputs = {{
- .name = name_tv,
+ .type = SAA7134_INPUT_TV,
.vmux = 4,
.amux = LINE2,
- .tv = 1,
},{
- .name = name_comp1,
+ .type = SAA7134_INPUT_COMPOSITE1,
.vmux = 6,
.amux = LINE1,
},{
- .name = name_svideo,
+ .type = SAA7134_INPUT_SVIDEO,
.vmux = 7,
.amux = LINE1,
}},
.mute = {
- .name = name_mute,
+ .type = SAA7134_INPUT_MUTE,
.amux = TV,
},
},
@@ -753,21 +731,20 @@ struct saa7134_board saa7134_boards[] = {
.radio_addr = ADDR_UNSET,
.tda9887_conf = TDA9887_PRESENT,
.inputs = {{
- .name = name_tv,
+ .type = SAA7134_INPUT_TV,
.vmux = 1,
.amux = TV,
- .tv = 1,
},{
- .name = name_comp1,
+ .type = SAA7134_INPUT_COMPOSITE1,
.vmux = 4,
.amux = LINE2,
},{
- .name = name_svideo,
+ .type = SAA7134_INPUT_SVIDEO,
.vmux = 6,
.amux = LINE2,
}},
.radio = {
- .name = name_radio,
+ .type = SAA7134_INPUT_RADIO,
.amux = LINE1,
},
},
@@ -780,29 +757,28 @@ struct saa7134_board saa7134_boards[] = {
.radio_addr = ADDR_UNSET,
.gpiomask = 0x200000,
.inputs = {{
- .name = name_tv,
+ .type = SAA7134_INPUT_TV,
.vmux = 1,
.amux = TV,
.gpio = 0x0000,
- .tv = 1,
},{
- .name = name_comp1,
+ .type = SAA7134_INPUT_COMPOSITE1,
.vmux = 4,
.amux = LINE2,
.gpio = 0x0000,
},{
- .name = name_svideo,
+ .type = SAA7134_INPUT_SVIDEO,
.vmux = 6,
.amux = LINE2,
.gpio = 0x0000,
}},
.radio = {
- .name = name_radio,
+ .type = SAA7134_INPUT_RADIO,
.amux = TV,
.gpio = 0x200000,
},
.mute = {
- .name = name_mute,
+ .type = SAA7134_INPUT_MUTE,
.gpio = 0x0000,
},
@@ -815,18 +791,17 @@ struct saa7134_board saa7134_boards[] = {
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
.inputs = {{
- .name = name_svideo,
+ .type = SAA7134_INPUT_SVIDEO,
.vmux = 8,
.amux = LINE1,
},{
- .name = name_comp1,
+ .type = SAA7134_INPUT_COMPOSITE1,
.vmux = 3,
.amux = LINE1,
},{
- .name = name_tv,
+ .type = SAA7134_INPUT_TV,
.vmux = 1,
.amux = LINE2,
- .tv = 1,
}},
},
[SAA7134_BOARD_10MOONSTVMASTER] = {
@@ -839,34 +814,33 @@ struct saa7134_board saa7134_boards[] = {
.radio_addr = ADDR_UNSET,
.gpiomask = 0xe000,
.inputs = {{
- .name = name_tv,
+ .type = SAA7134_INPUT_TV,
.vmux = 1,
.amux = LINE2,
.gpio = 0x0000,
- .tv = 1,
},{
- .name = name_comp1,
+ .type = SAA7134_INPUT_COMPOSITE1,
.vmux = 0,
.amux = LINE2,
.gpio = 0x4000,
},{
- .name = name_comp2,
+ .type = SAA7134_INPUT_COMPOSITE2,
.vmux = 3,
.amux = LINE2,
.gpio = 0x4000,
},{
- .name = name_svideo,
+ .type = SAA7134_INPUT_SVIDEO,
.vmux = 8,
.amux = LINE2,
.gpio = 0x4000,
}},
.radio = {
- .name = name_radio,
+ .type = SAA7134_INPUT_RADIO,
.amux = LINE2,
.gpio = 0x2000,
},
.mute = {
- .name = name_mute,
+ .type = SAA7134_INPUT_MUTE,
.amux = LINE2,
.gpio = 0x8000,
},
@@ -881,23 +855,23 @@ struct saa7134_board saa7134_boards[] = {
.radio_addr = ADDR_UNSET,
.empress_addr = 0x20,
.inputs = {{
- .name = name_comp1,
+ .type = SAA7134_INPUT_COMPOSITE1,
.vmux = 4,
.amux = LINE1,
},{
- .name = name_comp2,
+ .type = SAA7134_INPUT_COMPOSITE2,
.vmux = 3,
.amux = LINE1,
},{
- .name = name_comp3,
+ .type = SAA7134_INPUT_COMPOSITE3,
.vmux = 0,
.amux = LINE1,
},{
- .name = name_comp4,
+ .type = SAA7134_INPUT_COMPOSITE4,
.vmux = 1,
.amux = LINE1,
},{
- .name = name_svideo,
+ .type = SAA7134_INPUT_SVIDEO,
.vmux = 8,
.amux = LINE1,
}},
@@ -912,18 +886,17 @@ struct saa7134_board saa7134_boards[] = {
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
.inputs = {{
- .name = name_svideo,
+ .type = SAA7134_INPUT_SVIDEO,
.vmux = 8,
.amux = LINE1,
},{
- .name = name_comp1,
+ .type = SAA7134_INPUT_COMPOSITE1,
.vmux = 3,
.amux = LINE1,
},{
- .name = name_tv,
+ .type = SAA7134_INPUT_TV,
.vmux = 1,
.amux = LINE2,
- .tv = 1,
}},
},
[SAA7134_BOARD_VIDEOMATE_TV_GOLD_PLUS] = {
@@ -935,21 +908,20 @@ struct saa7134_board saa7134_boards[] = {
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
.inputs = {{
- .name = name_svideo,
+ .type = SAA7134_INPUT_SVIDEO,
.vmux = 8,
.amux = LINE1,
.gpio = 0x06c00012,
},{
- .name = name_comp1,
+ .type = SAA7134_INPUT_COMPOSITE1,
.vmux = 3,
.amux = LINE1,
.gpio = 0x0ac20012,
},{
- .name = name_tv,
+ .type = SAA7134_INPUT_TV,
.vmux = 1,
.amux = LINE2,
.gpio = 0x08c20012,
- .tv = 1,
}}, /* radio and probably mute is missing */
},
[SAA7134_BOARD_CRONOS_PLUS] = {
@@ -968,23 +940,23 @@ struct saa7134_board saa7134_boards[] = {
.radio_addr = ADDR_UNSET,
.gpiomask = 0xcf00,
.inputs = {{
- .name = name_comp1,
+ .type = SAA7134_INPUT_COMPOSITE1,
.vmux = 0,
.gpio = 2 << 14,
},{
- .name = name_comp2,
+ .type = SAA7134_INPUT_COMPOSITE2,
.vmux = 0,
.gpio = 1 << 14,
},{
- .name = name_comp3,
+ .type = SAA7134_INPUT_COMPOSITE3,
.vmux = 0,
.gpio = 0 << 14,
},{
- .name = name_comp4,
+ .type = SAA7134_INPUT_COMPOSITE4,
.vmux = 0,
.gpio = 3 << 14,
},{
- .name = name_svideo,
+ .type = SAA7134_INPUT_SVIDEO,
.vmux = 8,
.gpio = 2 << 14,
}},
@@ -999,34 +971,33 @@ struct saa7134_board saa7134_boards[] = {
.tda9887_conf = TDA9887_PRESENT,
.gpiomask = 0x03,
.inputs = {{
- .name = name_tv,
+ .type = SAA7134_INPUT_TV,
.vmux = 1,
.amux = TV,
- .tv = 1,
.gpio = 0x00,
}, {
- .name = name_comp1,
+ .type = SAA7134_INPUT_COMPOSITE1,
.vmux = 3,
.amux = LINE1,
.gpio = 0x02,
}, {
- .name = name_comp2,
+ .type = SAA7134_INPUT_COMPOSITE2,
.vmux = 0,
.amux = LINE1,
.gpio = 0x02,
}, {
- .name = name_svideo,
+ .type = SAA7134_INPUT_SVIDEO,
.vmux = 8,
.amux = LINE1,
.gpio = 0x02,
} },
.radio = {
- .name = name_radio,
+ .type = SAA7134_INPUT_RADIO,
.amux = LINE1,
.gpio = 0x01,
},
.mute = {
- .name = name_mute,
+ .type = SAA7134_INPUT_MUTE,
.amux = TV,
.gpio = 0x00,
},
@@ -1041,18 +1012,17 @@ struct saa7134_board saa7134_boards[] = {
.radio_addr = ADDR_UNSET,
.empress_addr = 0x20,
.inputs = {{
- .name = name_comp1,
+ .type = SAA7134_INPUT_COMPOSITE1,
.vmux = 1,
.amux = LINE1,
},{
- .name = name_svideo,
+ .type = SAA7134_INPUT_SVIDEO,
.vmux = 8,
.amux = LINE1,
},{
- .name = name_tv,
+ .type = SAA7134_INPUT_TV,
.vmux = 3,
.amux = TV,
- .tv = 1,
}},
.mpeg = SAA7134_MPEG_EMPRESS,
.video_out = CCIR656,
@@ -1068,22 +1038,21 @@ struct saa7134_board saa7134_boards[] = {
.radio_addr = ADDR_UNSET,
.tda9887_conf = TDA9887_PRESENT,
.inputs = {{
- .name = name_tv,
+ .type = SAA7134_INPUT_TV,
.vmux = 1,
.amux = TV,
- .tv = 1,
},{
- .name = name_comp1,
+ .type = SAA7134_INPUT_COMPOSITE1,
.vmux = 4,
.amux = LINE2,
},{
- .name = name_svideo,
+ .type = SAA7134_INPUT_SVIDEO,
.vmux = 6,
.amux = LINE2,
}},
.radio = {
- .name = name_radio,
+ .type = SAA7134_INPUT_RADIO,
.amux = LINE1,
},
},
@@ -1096,20 +1065,19 @@ struct saa7134_board saa7134_boards[] = {
.radio_addr = ADDR_UNSET,
.tda9887_conf = TDA9887_PRESENT | TDA9887_INTERCARRIER | TDA9887_PORT2_INACTIVE,
.inputs = {{
- .name = name_tv,
+ .type = SAA7134_INPUT_TV,
.vmux = 3,
.amux = TV,
- .tv = 1,
},{
- .name = name_comp1,
+ .type = SAA7134_INPUT_COMPOSITE1,
.vmux = 0,
.amux = LINE2,
},{
- .name = name_comp2,
+ .type = SAA7134_INPUT_COMPOSITE2,
.vmux = 1,
.amux = LINE2,
},{
- .name = name_svideo,
+ .type = SAA7134_INPUT_SVIDEO,
.vmux = 8,
.amux = LINE2,
}},
@@ -1123,21 +1091,20 @@ struct saa7134_board saa7134_boards[] = {
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
.inputs = {{
- .name = name_svideo,
+ .type = SAA7134_INPUT_SVIDEO,
.vmux = 8,
.amux = LINE1,
},{
- .name = name_comp1,
+ .type = SAA7134_INPUT_COMPOSITE1,
.vmux = 1,
.amux = LINE1,
},{
- .name = name_tv,
+ .type = SAA7134_INPUT_TV,
.vmux = 3,
.amux = LINE2,
- .tv = 1,
}},
.radio = {
- .name = name_radio,
+ .type = SAA7134_INPUT_RADIO,
.amux = LINE2,
},
},
@@ -1150,21 +1117,20 @@ struct saa7134_board saa7134_boards[] = {
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
.inputs = {{
- .name = name_svideo,
+ .type = SAA7134_INPUT_SVIDEO,
.vmux = 8,
.amux = LINE1,
},{
- .name = name_comp1,
+ .type = SAA7134_INPUT_COMPOSITE1,
.vmux = 1,
.amux = LINE1,
},{
- .name = name_tv,
+ .type = SAA7134_INPUT_TV,
.vmux = 3,
.amux = LINE2,
- .tv = 1,
}},
.mute = {
- .name = name_mute,
+ .type = SAA7134_INPUT_MUTE,
.amux = LINE1,
},
},
@@ -1177,16 +1143,15 @@ struct saa7134_board saa7134_boards[] = {
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
.inputs = {{
- .name = name_tv,
+ .type = SAA7134_INPUT_TV,
.vmux = 1,
.amux = LINE2,
- .tv = 1,
},{
- .name = name_comp1,
+ .type = SAA7134_INPUT_COMPOSITE1,
.vmux = 3,
.amux = LINE2,
},{
- .name = name_svideo,
+ .type = SAA7134_INPUT_SVIDEO,
.vmux = 8,
.amux = LINE2,
}},
@@ -1199,30 +1164,28 @@ struct saa7134_board saa7134_boards[] = {
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
.inputs = {{
- .name = name_tv,
+ .type = SAA7134_INPUT_TV,
.vmux = 1,
.amux = TV,
- .tv = 1,
},{
- .name = name_tv_mono,
+ .type = SAA7134_INPUT_TV_MONO,
.vmux = 1,
.amux = LINE2,
- .tv = 1,
},{
- .name = name_comp1,
+ .type = SAA7134_INPUT_COMPOSITE1,
.vmux = 3,
.amux = LINE1,
},{
- .name = name_svideo,
+ .type = SAA7134_INPUT_SVIDEO,
.vmux = 8,
.amux = LINE1,
},{
- .name = "CVid over SVid",
+ .type = SAA7134_INPUT_COMPOSITE_OVER_SVIDEO,
.vmux = 0,
.amux = LINE1,
}},
.radio = {
- .name = name_radio,
+ .type = SAA7134_INPUT_RADIO,
.amux = LINE2,
},
},
@@ -1234,30 +1197,28 @@ struct saa7134_board saa7134_boards[] = {
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
.inputs = {{
- .name = name_tv,
+ .type = SAA7134_INPUT_TV,
.vmux = 1,
.amux = TV,
- .tv = 1,
},{
- .name = name_tv_mono,
+ .type = SAA7134_INPUT_TV_MONO,
.vmux = 1,
.amux = LINE2,
- .tv = 1,
},{
- .name = name_comp1,
+ .type = SAA7134_INPUT_COMPOSITE1,
.vmux = 3,
.amux = LINE1,
},{
- .name = name_svideo,
+ .type = SAA7134_INPUT_SVIDEO,
.vmux = 8,
.amux = LINE1,
},{
- .name = "CVid over SVid",
+ .type = SAA7134_INPUT_COMPOSITE_OVER_SVIDEO,
.vmux = 0,
.amux = LINE1,
}},
.radio = {
- .name = name_radio,
+ .type = SAA7134_INPUT_RADIO,
.amux = LINE2,
},
},
@@ -1270,30 +1231,28 @@ struct saa7134_board saa7134_boards[] = {
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
.inputs = {{
- .name = name_tv,
+ .type = SAA7134_INPUT_TV,
.vmux = 1,
.amux = TV,
- .tv = 1,
},{
- .name = name_tv_mono,
+ .type = SAA7134_INPUT_TV_MONO,
.vmux = 1,
.amux = LINE2,
- .tv = 1,
},{
- .name = name_comp1,
+ .type = SAA7134_INPUT_COMPOSITE1,
.vmux = 3,
.amux = LINE1,
},{
- .name = name_svideo,
+ .type = SAA7134_INPUT_SVIDEO,
.vmux = 8,
.amux = LINE1,
},{
- .name = "CVid over SVid",
+ .type = SAA7134_INPUT_COMPOSITE_OVER_SVIDEO,
.vmux = 0,
.amux = LINE1,
}},
.radio = {
- .name = name_radio,
+ .type = SAA7134_INPUT_RADIO,
.amux = LINE2,
},
},
@@ -1306,30 +1265,28 @@ struct saa7134_board saa7134_boards[] = {
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
.inputs = {{
- .name = name_tv,
+ .type = SAA7134_INPUT_TV,
.vmux = 1,
.amux = TV,
- .tv = 1,
},{
- .name = name_tv_mono,
+ .type = SAA7134_INPUT_TV_MONO,
.vmux = 1,
.amux = LINE2,
- .tv = 1,
},{
- .name = name_comp1,
+ .type = SAA7134_INPUT_COMPOSITE1,
.vmux = 0,
.amux = LINE2,
},{
- .name = name_comp2,
+ .type = SAA7134_INPUT_COMPOSITE2,
.vmux = 3,
.amux = LINE2,
},{
- .name = name_svideo,
+ .type = SAA7134_INPUT_SVIDEO,
.vmux = 8,
.amux = LINE2,
}},
.radio = {
- .name = name_radio,
+ .type = SAA7134_INPUT_RADIO,
.amux = LINE2,
.gpio = 0x200000,
},
@@ -1343,10 +1300,10 @@ struct saa7134_board saa7134_boards[] = {
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
.inputs = {{
- .name = name_comp1,
+ .type = SAA7134_INPUT_COMPOSITE1,
.vmux = 3,
},{
- .name = name_svideo,
+ .type = SAA7134_INPUT_SVIDEO,
.vmux = 8,
}},
},
@@ -1360,10 +1317,9 @@ struct saa7134_board saa7134_boards[] = {
.radio_addr = ADDR_UNSET,
.mpeg = SAA7134_MPEG_DVB,
.inputs = {{
- .name = name_tv,
+ .type = SAA7134_INPUT_TV,
.vmux = 1,
.amux = TV,
- .tv = 1,
} },
},
[SAA7134_BOARD_NOVAC_PRIMETV7133] = {
@@ -1375,15 +1331,14 @@ struct saa7134_board saa7134_boards[] = {
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
.inputs = {{
- .name = name_comp1,
+ .type = SAA7134_INPUT_COMPOSITE1,
.vmux = 3,
},{
- .name = name_tv,
+ .type = SAA7134_INPUT_TV,
.vmux = 1,
.amux = TV,
- .tv = 1,
},{
- .name = name_svideo,
+ .type = SAA7134_INPUT_SVIDEO,
.vmux = 8,
}},
},
@@ -1396,29 +1351,28 @@ struct saa7134_board saa7134_boards[] = {
.radio_addr = ADDR_UNSET,
.tda9887_conf = TDA9887_PRESENT,
.inputs = {{
- .name = name_tv,
+ .type = SAA7134_INPUT_TV,
.vmux = 1,
.amux = LINE2,
- .tv = 1,
},{
- .name = name_comp1,
+ .type = SAA7134_INPUT_COMPOSITE1,
.vmux = 0,
.amux = LINE2,
},{
- .name = name_comp2,
+ .type = SAA7134_INPUT_COMPOSITE2,
.vmux = 3,
.amux = LINE2,
},{
- .name = name_svideo,
+ .type = SAA7134_INPUT_SVIDEO,
.vmux = 8,
.amux = LINE2,
}},
.radio = {
- .name = name_radio,
+ .type = SAA7134_INPUT_RADIO,
.amux = LINE2,
},
.mute = {
- .name = name_mute,
+ .type = SAA7134_INPUT_MUTE,
.amux = LINE1,
},
},
@@ -1432,29 +1386,28 @@ struct saa7134_board saa7134_boards[] = {
.radio_addr = ADDR_UNSET,
.tda9887_conf = TDA9887_PRESENT,
.inputs = { {
- .name = name_tv,
+ .type = SAA7134_INPUT_TV,
.vmux = 1,
.amux = LINE2,
- .tv = 1,
}, {
- .name = name_comp1,
+ .type = SAA7134_INPUT_COMPOSITE1,
.vmux = 0,
.amux = LINE2,
}, {
- .name = name_comp2,
+ .type = SAA7134_INPUT_COMPOSITE2,
.vmux = 3,
.amux = LINE2,
},{
- .name = name_svideo,
+ .type = SAA7134_INPUT_SVIDEO,
.vmux = 8,
.amux = LINE2,
} },
.radio = {
- .name = name_radio,
+ .type = SAA7134_INPUT_RADIO,
.amux = LINE2,
},
.mute = {
- .name = name_mute,
+ .type = SAA7134_INPUT_MUTE,
.amux = LINE1,
},
},
@@ -1467,12 +1420,11 @@ struct saa7134_board saa7134_boards[] = {
.radio_addr = ADDR_UNSET,
.tda9887_conf = TDA9887_PRESENT,
.inputs = {{
- .name = name_tv,
+ .type = SAA7134_INPUT_TV,
.vmux = 7,
.amux = TV,
- .tv = 1,
},{
- .name = name_svideo,
+ .type = SAA7134_INPUT_SVIDEO,
.vmux = 7,
.amux = LINE1,
}},
@@ -1486,21 +1438,20 @@ struct saa7134_board saa7134_boards[] = {
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
.inputs = {{
- .name = name_tv,
+ .type = SAA7134_INPUT_TV,
.vmux = 3,
.amux = TV,
- .tv = 1,
},{
- .name = name_comp1,
+ .type = SAA7134_INPUT_COMPOSITE1,
.vmux = 1,
.amux = LINE1,
},{
- .name = name_svideo,
+ .type = SAA7134_INPUT_SVIDEO,
.vmux = 8,
.amux = LINE1,
}},
.radio = {
- .name = name_radio,
+ .type = SAA7134_INPUT_RADIO,
.amux = LINE2,
},
},
@@ -1512,25 +1463,24 @@ struct saa7134_board saa7134_boards[] = {
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
.inputs = {{
- .name = name_tv,
+ .type = SAA7134_INPUT_TV,
.vmux = 1,
.amux = LINE2,
- .tv = 1,
},{
- .name = name_comp1,
+ .type = SAA7134_INPUT_COMPOSITE1,
.vmux = 4,
.amux = LINE1,
},{
- .name = name_svideo,
+ .type = SAA7134_INPUT_SVIDEO,
.vmux = 8,
.amux = LINE1,
},{
- .name = name_comp2, /* CVideo over SVideo Connector */
+ .type = SAA7134_INPUT_COMPOSITE_OVER_SVIDEO,
.vmux = 0,
.amux = LINE1,
}},
.mute = {
- .name = name_mute,
+ .type = SAA7134_INPUT_MUTE,
.amux = LINE2,
},
},
@@ -1544,29 +1494,28 @@ struct saa7134_board saa7134_boards[] = {
.radio_addr = ADDR_UNSET,
.gpiomask = 0x808c0080,
.inputs = {{
- .name = name_svideo,
+ .type = SAA7134_INPUT_SVIDEO,
.vmux = 8,
.amux = LINE1,
.gpio = 0x00080,
},{
- .name = name_comp1,
+ .type = SAA7134_INPUT_COMPOSITE1,
.vmux = 3,
.amux = LINE1,
.gpio = 0x00080,
},{
- .name = name_tv,
+ .type = SAA7134_INPUT_TV,
.vmux = 1,
.amux = LINE2_LEFT,
- .tv = 1,
.gpio = 0x00080,
}},
.radio = {
- .name = name_radio,
+ .type = SAA7134_INPUT_RADIO,
.amux = LINE2,
.gpio = 0x80000,
},
.mute = {
- .name = name_mute,
+ .type = SAA7134_INPUT_MUTE,
.amux = LINE2,
.gpio = 0x40000,
},
@@ -1580,21 +1529,20 @@ struct saa7134_board saa7134_boards[] = {
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
.inputs = {{
- .name = name_comp1,
+ .type = SAA7134_INPUT_COMPOSITE1,
.vmux = 1,
.amux = LINE1,
},{
- .name = name_tv,
+ .type = SAA7134_INPUT_TV,
.vmux = 3,
.amux = LINE2,
- .tv = 1,
},{
- .name = name_svideo,
+ .type = SAA7134_INPUT_SVIDEO,
.vmux = 8,
.amux = LINE1,
}},
.radio = {
- .name = name_radio,
+ .type = SAA7134_INPUT_RADIO,
.amux = LINE2,
},
},
@@ -1607,18 +1555,17 @@ struct saa7134_board saa7134_boards[] = {
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
.inputs = {{
- .name = name_svideo,
+ .type = SAA7134_INPUT_SVIDEO,
.vmux = 8,
.amux = LINE1,
},{
- .name = name_comp1,
+ .type = SAA7134_INPUT_COMPOSITE1,
.vmux = 3,
.amux = LINE1,
},{
- .name = name_tv,
+ .type = SAA7134_INPUT_TV,
.vmux = 1,
.amux = LINE2,
- .tv = 1,
}},
},
[SAA7134_BOARD_EMPIRE_PCI_TV_RADIO_LE] = {
@@ -1631,29 +1578,28 @@ struct saa7134_board saa7134_boards[] = {
.radio_addr = ADDR_UNSET,
.gpiomask = 0x4000,
.inputs = {{
- .name = name_tv_mono,
+ .type = SAA7134_INPUT_TV_MONO,
.vmux = 1,
.amux = LINE2,
.gpio = 0x8000,
- .tv = 1,
},{
- .name = name_comp1,
+ .type = SAA7134_INPUT_COMPOSITE1,
.vmux = 3,
.amux = LINE1,
.gpio = 0x8000,
},{
- .name = name_svideo,
+ .type = SAA7134_INPUT_SVIDEO,
.vmux = 6,
.amux = LINE1,
.gpio = 0x8000,
}},
.radio = {
- .name = name_radio,
+ .type = SAA7134_INPUT_RADIO,
.amux = LINE1,
.gpio = 0x8000,
},
.mute = {
- .name = name_mute,
+ .type = SAA7134_INPUT_MUTE,
.amux = TV,
.gpio =0x8000,
}
@@ -1672,29 +1618,28 @@ struct saa7134_board saa7134_boards[] = {
.tda9887_conf = TDA9887_PRESENT,
.gpiomask = 0x03,
.inputs = {{
- .name = name_tv,
+ .type = SAA7134_INPUT_TV,
.vmux = 1,
.amux = TV,
- .tv = 1,
.gpio = 0x00,
},{
- .name = name_comp,
+ .type = SAA7134_INPUT_COMPOSITE,
.vmux = 3,
.amux = LINE1,
.gpio = 0x02,
},{
- .name = name_svideo,
+ .type = SAA7134_INPUT_SVIDEO,
.vmux = 8,
.amux = LINE1,
.gpio = 0x02,
}},
.radio = {
- .name = name_radio,
+ .type = SAA7134_INPUT_RADIO,
.amux = LINE1,
.gpio = 0x01,
},
.mute = {
- .name = name_mute,
+ .type = SAA7134_INPUT_MUTE,
.amux = LINE1,
.gpio = 0x00,
},
@@ -1709,29 +1654,28 @@ struct saa7134_board saa7134_boards[] = {
.gpiomask = 0x00300003,
/* .gpiomask = 0x8c240003, */
.inputs = {{
- .name = name_tv,
+ .type = SAA7134_INPUT_TV,
.vmux = 1,
.amux = TV,
- .tv = 1,
.gpio = 0x01,
},{
- .name = name_comp1,
+ .type = SAA7134_INPUT_COMPOSITE1,
.vmux = 0,
.amux = LINE1,
.gpio = 0x02,
},{
- .name = name_svideo,
+ .type = SAA7134_INPUT_SVIDEO,
.vmux = 6,
.amux = LINE1,
.gpio = 0x02,
}},
.radio = {
- .name = name_radio,
+ .type = SAA7134_INPUT_RADIO,
.amux = TV,
.gpio = 0x00300001,
},
.mute = {
- .name = name_mute,
+ .type = SAA7134_INPUT_MUTE,
.amux = TV,
.gpio = 0x01,
},
@@ -1745,21 +1689,20 @@ struct saa7134_board saa7134_boards[] = {
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
.inputs = {{
- .name = name_tv,
+ .type = SAA7134_INPUT_TV,
.vmux = 1,
.amux = TV,
- .tv = 1,
},{
- .name = name_comp1,
+ .type = SAA7134_INPUT_COMPOSITE1,
.vmux = 3,
.amux = LINE2,
},{
- .name = name_svideo,
+ .type = SAA7134_INPUT_SVIDEO,
.vmux = 8,
.amux = LINE1,
}},
.radio = {
- .name = name_radio,
+ .type = SAA7134_INPUT_RADIO,
.amux = LINE1,
},
},
@@ -1774,24 +1717,23 @@ struct saa7134_board saa7134_boards[] = {
.tda9887_conf = TDA9887_PRESENT,
.gpiomask = 0x08000000,
.inputs = { {
- .name = name_tv,
+ .type = SAA7134_INPUT_TV,
.vmux = 1,
.amux = TV,
- .tv = 1,
.gpio = 0x08000000,
}, {
- .name = name_comp1,
+ .type = SAA7134_INPUT_COMPOSITE1,
.vmux = 3,
.amux = LINE1,
.gpio = 0x08000000,
}, {
- .name = name_svideo,
+ .type = SAA7134_INPUT_SVIDEO,
.vmux = 8,
.amux = LINE1,
.gpio = 0x08000000,
} },
.radio = {
- .name = name_radio,
+ .type = SAA7134_INPUT_RADIO,
.amux = LINE2,
.gpio = 0x00000000,
},
@@ -1805,21 +1747,19 @@ struct saa7134_board saa7134_boards[] = {
.radio_addr = ADDR_UNSET,
.tda9887_conf = TDA9887_PRESENT,
.inputs = {{
- .name = name_tv,
+ .type = SAA7134_INPUT_TV,
.vmux = 1,
.amux = TV,
- .tv = 1,
},{
- .name = name_tv_mono,
+ .type = SAA7134_INPUT_TV_MONO,
.vmux = 1,
.amux = LINE2,
- .tv = 1,
},{
- .name = name_comp1,
+ .type = SAA7134_INPUT_COMPOSITE1,
.vmux = 3,
.amux = LINE1,
},{
- .name = name_svideo,
+ .type = SAA7134_INPUT_SVIDEO,
.vmux = 8,
.amux = LINE1,
}},
@@ -1834,25 +1774,24 @@ struct saa7134_board saa7134_boards[] = {
.rds_addr = 0x10,
.tda9887_conf = TDA9887_PRESENT,
.inputs = {{
- .name = name_tv,
+ .type = SAA7134_INPUT_TV,
.vmux = 1,
.amux = TV,
- .tv = 1,
},{
- .name = name_comp1,
+ .type = SAA7134_INPUT_COMPOSITE1,
.vmux = 4,
.amux = LINE1,
},{
- .name = name_svideo,
+ .type = SAA7134_INPUT_SVIDEO,
.vmux = 8,
.amux = LINE1,
},{
- .name = name_comp2, /* CVideo over SVideo Connector */
+ .type = SAA7134_INPUT_COMPOSITE_OVER_SVIDEO,
.vmux = 0,
.amux = LINE1,
}},
.radio = {
- .name = name_radio,
+ .type = SAA7134_INPUT_RADIO,
.amux = LINE2,
},
},
@@ -1866,29 +1805,28 @@ struct saa7134_board saa7134_boards[] = {
.radio_addr = ADDR_UNSET,
.gpiomask = 0x1ce780,
.inputs = {{
- .name = name_svideo,
- .vmux = 0, /* CVideo over SVideo Connector - ok? */
+ .type = SAA7134_INPUT_COMPOSITE_OVER_SVIDEO,
+ .vmux = 0,
.amux = LINE1,
.gpio = 0x008080,
},{
- .name = name_comp1,
+ .type = SAA7134_INPUT_COMPOSITE1,
.vmux = 3,
.amux = LINE1,
.gpio = 0x008080,
},{
- .name = name_tv,
+ .type = SAA7134_INPUT_TV,
.vmux = 1,
.amux = TV,
- .tv = 1,
.gpio = 0x008080,
}},
.radio = {
- .name = name_radio,
+ .type = SAA7134_INPUT_RADIO,
.amux = LINE2,
.gpio = 0x80000,
},
.mute = {
- .name = name_mute,
+ .type = SAA7134_INPUT_MUTE,
.amux = LINE2,
.gpio = 0x0c8000,
},
@@ -1903,20 +1841,19 @@ struct saa7134_board saa7134_boards[] = {
.tda9887_conf = TDA9887_PRESENT | TDA9887_INTERCARRIER | TDA9887_PORT2_INACTIVE,
.mpeg = SAA7134_MPEG_DVB,
.inputs = {{
- .name = name_tv,
+ .type = SAA7134_INPUT_TV,
.vmux = 3,
.amux = TV,
- .tv = 1,
},{
- .name = name_comp1,
+ .type = SAA7134_INPUT_COMPOSITE1,
.vmux = 0,
.amux = LINE2,
},{
- .name = name_comp2,
+ .type = SAA7134_INPUT_COMPOSITE2,
.vmux = 1,
.amux = LINE2,
},{
- .name = name_svideo,
+ .type = SAA7134_INPUT_SVIDEO,
.vmux = 8,
.amux = LINE2,
}},
@@ -1931,22 +1868,20 @@ struct saa7134_board saa7134_boards[] = {
.radio_addr = ADDR_UNSET,
.tda9887_conf = TDA9887_PRESENT,
.inputs = {{
- .name = name_comp1,
+ .type = SAA7134_INPUT_COMPOSITE1,
.vmux = 0,
.amux = LINE1,
},{
- .name = name_tv,
+ .type = SAA7134_INPUT_TV,
.vmux = 1,
.amux = TV,
- .tv = 1,
},{
- .name = name_tv_mono,
+ .type = SAA7134_INPUT_TV_MONO,
.vmux = 1,
.amux = LINE2,
- .tv = 1,
}},
.radio = {
- .name = name_radio,
+ .type = SAA7134_INPUT_RADIO,
.amux = LINE2,
},
},
@@ -1961,25 +1896,24 @@ struct saa7134_board saa7134_boards[] = {
.radio_addr = ADDR_UNSET,
.tda9887_conf = TDA9887_PRESENT,
.inputs = {{
- .name = name_tv,
+ .type = SAA7134_INPUT_TV,
.vmux = 1,
.amux = LINE2,
- .tv = 1,
},{
- .name = name_comp1,
+ .type = SAA7134_INPUT_COMPOSITE1,
.vmux = 0,
.amux = LINE2,
},{
- .name = name_comp2,
+ .type = SAA7134_INPUT_COMPOSITE2,
.vmux = 3,
.amux = LINE2,
},{
- .name = name_svideo,
+ .type = SAA7134_INPUT_SVIDEO,
.vmux = 8,
.amux = LINE2,
}},
.mute = {
- .name = name_mute,
+ .type = SAA7134_INPUT_MUTE,
.amux = LINE1,
},
},
@@ -1995,26 +1929,25 @@ struct saa7134_board saa7134_boards[] = {
.gpiomask = 0x00200000,
.mpeg = SAA7134_MPEG_DVB,
.inputs = {{
- .name = name_tv,
+ .type = SAA7134_INPUT_TV,
.vmux = 1,
.amux = TV,
.gpio = 0x200000, /* GPIO21=High for TV input */
- .tv = 1,
},{
- .name = name_comp1, /* Composite signal on S-Video input */
+ .type = SAA7134_INPUT_COMPOSITE_OVER_SVIDEO,
.vmux = 0,
.amux = LINE2,
},{
- .name = name_comp2, /* Composite input */
+ .type = SAA7134_INPUT_COMPOSITE,
.vmux = 3,
.amux = LINE2,
},{
- .name = name_svideo, /* S-Video signal on S-Video input */
+ .type = SAA7134_INPUT_SVIDEO,
.vmux = 8,
.amux = LINE2,
}},
.radio = {
- .name = name_radio,
+ .type = SAA7134_INPUT_RADIO,
.amux = TV,
.gpio = 0x000000, /* GPIO21=Low for FM radio antenna */
},
@@ -2028,11 +1961,11 @@ struct saa7134_board saa7134_boards[] = {
.radio_addr = ADDR_UNSET,
.mpeg = SAA7134_MPEG_DVB,
.inputs = {{
- .name = name_comp1,
+ .type = SAA7134_INPUT_COMPOSITE1,
.vmux = 0,
.amux = LINE1,
},{
- .name = name_svideo,
+ .type = SAA7134_INPUT_SVIDEO,
.vmux = 8,
.amux = LINE1,
}},
@@ -2049,20 +1982,19 @@ struct saa7134_board saa7134_boards[] = {
.radio_addr = ADDR_UNSET,
.tda9887_conf = TDA9887_PRESENT,
.inputs = {{
- .name = name_tv,
+ .type = SAA7134_INPUT_TV,
.vmux = 1,
.amux = TV,
- .tv = 1,
},{
- .name = name_comp1,
+ .type = SAA7134_INPUT_COMPOSITE1,
.vmux = 0,
.amux = LINE1,
},{
- .name = name_comp2,
+ .type = SAA7134_INPUT_COMPOSITE2,
.vmux = 3,
.amux = LINE1,
},{
- .name = name_svideo,
+ .type = SAA7134_INPUT_SVIDEO,
.vmux = 8,
.amux = LINE1,
}},
@@ -2075,16 +2007,15 @@ struct saa7134_board saa7134_boards[] = {
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
.inputs = {{
- .name = name_tv,
+ .type = SAA7134_INPUT_TV,
.vmux = 1,
.amux = TV,
- .tv = 1,
},{
- .name = name_comp1,
+ .type = SAA7134_INPUT_COMPOSITE1,
.vmux = 3,
.amux = LINE2,
},{
- .name = name_svideo,
+ .type = SAA7134_INPUT_SVIDEO,
.vmux = 8,
.amux = LINE2,
}},
@@ -2098,29 +2029,28 @@ struct saa7134_board saa7134_boards[] = {
.radio_addr = ADDR_UNSET,
.gpiomask = 0x0700,
.inputs = {{
- .name = name_tv,
+ .type = SAA7134_INPUT_TV,
.vmux = 1,
.amux = TV,
- .tv = 1,
.gpio = 0x000,
},{
- .name = name_comp1,
+ .type = SAA7134_INPUT_COMPOSITE1,
.vmux = 3,
.amux = LINE1,
.gpio = 0x200, /* gpio by DScaler */
},{
- .name = name_svideo,
+ .type = SAA7134_INPUT_SVIDEO,
.vmux = 0,
.amux = LINE1,
.gpio = 0x200,
}},
.radio = {
- .name = name_radio,
+ .type = SAA7134_INPUT_RADIO,
.amux = LINE1,
.gpio = 0x100,
},
.mute = {
- .name = name_mute,
+ .type = SAA7134_INPUT_MUTE,
.amux = TV,
.gpio = 0x000,
},
@@ -2135,26 +2065,25 @@ struct saa7134_board saa7134_boards[] = {
.mpeg = SAA7134_MPEG_DVB,
.gpiomask = 0x00200000,
.inputs = {{
- .name = name_tv,
+ .type = SAA7134_INPUT_TV,
.vmux = 1,
.amux = TV,
.gpio = 0x200000, /* GPIO21=High for TV input */
- .tv = 1,
},{
- .name = name_svideo, /* S-Video signal on S-Video input */
+ .type = SAA7134_INPUT_SVIDEO,
.vmux = 8,
.amux = LINE2,
},{
- .name = name_comp1, /* Composite signal on S-Video input */
+ .type = SAA7134_INPUT_COMPOSITE_OVER_SVIDEO,
.vmux = 0,
.amux = LINE2,
},{
- .name = name_comp2, /* Composite input */
+ .type = SAA7134_INPUT_COMPOSITE2,
.vmux = 3,
.amux = LINE2,
}},
.radio = {
- .name = name_radio,
+ .type = SAA7134_INPUT_RADIO,
.amux = TV,
.gpio = 0x000000, /* GPIO21=Low for FM radio antenna */
},
@@ -2168,29 +2097,28 @@ struct saa7134_board saa7134_boards[] = {
.radio_addr = 0x60,
.gpiomask = 0x8c1880,
.inputs = {{
- .name = name_svideo,
+ .type = SAA7134_INPUT_SVIDEO,
.vmux = 0,
.amux = LINE1,
.gpio = 0x800800,
},{
- .name = name_comp1,
+ .type = SAA7134_INPUT_COMPOSITE1,
.vmux = 3,
.amux = LINE1,
.gpio = 0x801000,
},{
- .name = name_tv,
+ .type = SAA7134_INPUT_TV,
.vmux = 1,
.amux = TV,
- .tv = 1,
.gpio = 0x800000,
}},
.radio = {
- .name = name_radio,
+ .type = SAA7134_INPUT_RADIO,
.amux = TV,
.gpio = 0x880000,
},
.mute = {
- .name = name_mute,
+ .type = SAA7134_INPUT_MUTE,
.amux = LINE2,
.gpio = 0x840000,
},
@@ -2213,29 +2141,28 @@ struct saa7134_board saa7134_boards[] = {
.radio_addr = 0x60,
.gpiomask = 0x0700,
.inputs = {{
- .name = name_tv,
+ .type = SAA7134_INPUT_TV,
.vmux = 1,
.amux = TV,
- .tv = 1,
.gpio = 0x000,
},{
- .name = name_comp1,
+ .type = SAA7134_INPUT_COMPOSITE1,
.vmux = 3,
.amux = LINE1,
.gpio = 0x200, /* gpio by DScaler */
},{
- .name = name_svideo,
+ .type = SAA7134_INPUT_SVIDEO,
.vmux = 0,
.amux = LINE1,
.gpio = 0x200,
}},
.radio = {
- .name = name_radio,
+ .type = SAA7134_INPUT_RADIO,
.amux = LINE1,
.gpio = 0x100,
},
.mute = {
- .name = name_mute,
+ .type = SAA7134_INPUT_MUTE,
.amux = TV,
.gpio = 0x000,
},
@@ -2248,30 +2175,28 @@ struct saa7134_board saa7134_boards[] = {
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
.inputs = {{
- .name = name_tv,
+ .type = SAA7134_INPUT_TV,
.vmux = 1,
.amux = TV,
- .tv = 1,
},{
- .name = name_tv_mono,
+ .type = SAA7134_INPUT_TV_MONO,
.vmux = 1,
.amux = LINE2,
- .tv = 1,
},{
- .name = name_comp1,
+ .type = SAA7134_INPUT_COMPOSITE1,
.vmux = 0,
.amux = LINE2,
},{
- .name = name_comp2,
+ .type = SAA7134_INPUT_COMPOSITE2,
.vmux = 3,
.amux = LINE2,
},{
- .name = name_svideo,
+ .type = SAA7134_INPUT_SVIDEO,
.vmux = 8,
.amux = LINE2,
}},
.radio = {
- .name = name_radio, /* radio unconfirmed */
+ .type = SAA7134_INPUT_RADIO, /* radio unconfirmed */
.amux = LINE2,
},
},
@@ -2286,24 +2211,23 @@ struct saa7134_board saa7134_boards[] = {
.radio_addr = ADDR_UNSET,
.gpiomask = 1 << 21,
.inputs = {{
- .name = name_tv,
+ .type = SAA7134_INPUT_TV,
.vmux = 1,
.amux = TV,
.gpio = 0x0000000,
- .tv = 1,
},{
- .name = name_comp1, /* Composite input */
+ .type = SAA7134_INPUT_COMPOSITE1,
.vmux = 3,
.amux = LINE2,
.gpio = 0x0000000,
},{
- .name = name_svideo, /* S-Video input */
+ .type = SAA7134_INPUT_SVIDEO,
.vmux = 8,
.amux = LINE2,
.gpio = 0x0000000,
}},
.radio = {
- .name = name_radio,
+ .type = SAA7134_INPUT_RADIO,
.amux = TV,
.gpio = 0x0200000,
},
@@ -2322,29 +2246,28 @@ struct saa7134_board saa7134_boards[] = {
.radio_addr= ADDR_UNSET,
.gpiomask = 0x00010003,
.inputs = {{
- .name = name_tv,
+ .type = SAA7134_INPUT_TV,
.vmux = 1,
.amux = TV,
- .tv = 1,
.gpio = 0x01,
},{
- .name = name_comp1,
+ .type = SAA7134_INPUT_COMPOSITE1,
.vmux = 0,
.amux = LINE2,
.gpio = 0x02,
},{
- .name = name_svideo,
+ .type = SAA7134_INPUT_SVIDEO,
.vmux = 6,
.amux = LINE2,
.gpio = 0x02,
}},
.radio = {
- .name = name_radio,
+ .type = SAA7134_INPUT_RADIO,
.amux = LINE1,
.gpio = 0x00010003,
},
.mute = {
- .name = name_mute,
+ .type = SAA7134_INPUT_MUTE,
.amux = TV,
.gpio = 0x01,
},
@@ -2362,21 +2285,20 @@ struct saa7134_board saa7134_boards[] = {
.tda9887_conf = TDA9887_PRESENT,
.gpiomask = 0x00008000,
.inputs = {{
- .name = name_tv,
+ .type = SAA7134_INPUT_TV,
.vmux = 3,
.amux = TV,
- .tv = 1,
},{
- .name = name_comp1,
+ .type = SAA7134_INPUT_COMPOSITE1,
.vmux = 1,
.amux = LINE1,
},{
- .name = name_svideo,
+ .type = SAA7134_INPUT_SVIDEO,
.vmux = 8,
.amux = LINE1,
}},
.radio = {
- .name = name_radio,
+ .type = SAA7134_INPUT_RADIO,
.amux = LINE2,
},
},
@@ -2392,34 +2314,33 @@ struct saa7134_board saa7134_boards[] = {
.tda9887_conf = TDA9887_PRESENT,
.gpiomask = 0x00200003,
.inputs = {{
- .name = name_tv,
+ .type = SAA7134_INPUT_TV,
.vmux = 1,
.amux = TV,
- .tv = 1,
.gpio = 0x00200003,
},{
- .name = name_tv_mono,
+ .type = SAA7134_INPUT_TV_MONO,
.vmux = 1,
.amux = LINE2,
.gpio = 0x00200003,
},{
- .name = name_comp1,
+ .type = SAA7134_INPUT_COMPOSITE1,
.vmux = 3,
.amux = LINE1,
.gpio = 0x00200003,
},{
- .name = name_svideo,
+ .type = SAA7134_INPUT_SVIDEO,
.vmux = 8,
.amux = LINE1,
.gpio = 0x00200003,
}},
.radio = {
- .name = name_radio,
+ .type = SAA7134_INPUT_RADIO,
.amux = LINE2,
.gpio = 0x00200003,
},
.mute = {
- .name = name_mute,
+ .type = SAA7134_INPUT_MUTE,
.amux = TV,
.gpio = 0x00200003,
},
@@ -2434,16 +2355,15 @@ struct saa7134_board saa7134_boards[] = {
.tda9887_conf = TDA9887_PRESENT | TDA9887_PORT1_ACTIVE,
.mpeg = SAA7134_MPEG_DVB,
.inputs = {{
- .name = name_tv,
+ .type = SAA7134_INPUT_TV,
.vmux = 3,
.amux = TV,
- .tv = 1,
},{
- .name = name_comp1,
+ .type = SAA7134_INPUT_COMPOSITE1,
.vmux = 0,
.amux = LINE2,
},{
- .name = name_svideo,
+ .type = SAA7134_INPUT_SVIDEO,
.vmux = 8,
.amux = LINE2,
}},
@@ -2458,16 +2378,15 @@ struct saa7134_board saa7134_boards[] = {
.tda9887_conf = TDA9887_PRESENT | TDA9887_PORT1_ACTIVE,
.mpeg = SAA7134_MPEG_DVB,
.inputs = {{
- .name = name_tv,
+ .type = SAA7134_INPUT_TV,
.vmux = 3,
.amux = TV,
- .tv = 1,
},{
- .name = name_comp1,
+ .type = SAA7134_INPUT_COMPOSITE1,
.vmux = 1,
.amux = LINE2,
},{
- .name = name_svideo,
+ .type = SAA7134_INPUT_SVIDEO,
.vmux = 8,
.amux = LINE2,
}},
@@ -2481,11 +2400,11 @@ struct saa7134_board saa7134_boards[] = {
.radio_addr = ADDR_UNSET,
.mpeg = SAA7134_MPEG_DVB,
.inputs = {{
- .name = name_comp1,
+ .type = SAA7134_INPUT_COMPOSITE1,
.vmux = 0,
.amux = LINE1,
},{
- .name = name_svideo,
+ .type = SAA7134_INPUT_SVIDEO,
.vmux = 8,
.amux = LINE1,
}},
@@ -2499,27 +2418,28 @@ struct saa7134_board saa7134_boards[] = {
.radio_addr = ADDR_UNSET,
.empress_addr = 0x21,
.inputs = {{
- .name = "Composite 0",
+ .type = SAA7134_INPUT_COMPOSITE0,
.vmux = 0,
.amux = LINE1,
},{
- .name = "Composite 1",
+ .type = SAA7134_INPUT_COMPOSITE1,
.vmux = 1,
.amux = LINE2,
},{
- .name = "Composite 2",
+ .type = SAA7134_INPUT_COMPOSITE2,
.vmux = 2,
.amux = LINE1,
},{
- .name = "Composite 3",
+ .type = SAA7134_INPUT_COMPOSITE3,
.vmux = 3,
.amux = LINE2,
},{
- .name = "S-Video 0",
+ .type = SAA7134_INPUT_SVIDEO0,
+
.vmux = 8,
.amux = LINE1,
},{
- .name = "S-Video 1",
+ .type = SAA7134_INPUT_SVIDEO1,
.vmux = 9,
.amux = LINE2,
}},
@@ -2538,27 +2458,27 @@ struct saa7134_board saa7134_boards[] = {
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
.inputs = {{
- .name = "Composite 0",
+ .type = SAA7134_INPUT_COMPOSITE0,
.vmux = 0,
.amux = LINE1,
},{
- .name = "Composite 1",
+ .type = SAA7134_INPUT_COMPOSITE1,
.vmux = 1,
.amux = LINE2,
},{
- .name = "Composite 2",
+ .type = SAA7134_INPUT_COMPOSITE2,
.vmux = 2,
.amux = LINE1,
},{
- .name = "Composite 3",
+ .type = SAA7134_INPUT_COMPOSITE3,
.vmux = 3,
.amux = LINE2,
},{
- .name = "S-Video 0",
+ .type = SAA7134_INPUT_SVIDEO0,
.vmux = 8,
.amux = LINE1,
},{
- .name = "S-Video 1",
+ .type = SAA7134_INPUT_SVIDEO1,
.vmux = 9,
.amux = LINE2,
}},
@@ -2572,20 +2492,19 @@ struct saa7134_board saa7134_boards[] = {
.radio_addr = ADDR_UNSET,
.inputs = {{
- .name = name_tv,
+ .type = SAA7134_INPUT_TV,
.vmux = 1,
.amux = TV,
- .tv = 1,
},{
- .name = name_comp1, /* Composite signal on S-Video input */
+ .type = SAA7134_INPUT_COMPOSITE_OVER_SVIDEO,
.vmux = 0,
.amux = LINE2,
},{
- .name = name_comp2, /* Composite input */
+ .type = SAA7134_INPUT_COMPOSITE,
.vmux = 3,
.amux = LINE2,
},{
- .name = name_svideo,
+ .type = SAA7134_INPUT_SVIDEO,
.vmux = 8,
.amux = LINE2,
}},
@@ -2604,11 +2523,11 @@ struct saa7134_board saa7134_boards[] = {
.radio_addr = ADDR_UNSET,
.mpeg = SAA7134_MPEG_DVB,
.inputs = {{
- .name = name_comp1,
+ .type = SAA7134_INPUT_COMPOSITE1,
.vmux = 3,
.amux = LINE2,
},{
- .name = name_svideo,
+ .type = SAA7134_INPUT_SVIDEO,
.vmux = 8,
.amux = LINE2,
}},
@@ -2622,16 +2541,15 @@ struct saa7134_board saa7134_boards[] = {
.radio_addr = ADDR_UNSET,
.inputs = {{
- .name = name_tv,
+ .type = SAA7134_INPUT_TV,
.vmux = 1,
.amux = TV,
- .tv = 1,
},{
- .name = name_comp1,
+ .type = SAA7134_INPUT_COMPOSITE1,
.vmux = 3,
.amux = LINE1,
},{
- .name = name_svideo,
+ .type = SAA7134_INPUT_SVIDEO,
.vmux = 6,
.amux = LINE1,
}},
@@ -2645,25 +2563,24 @@ struct saa7134_board saa7134_boards[] = {
.radio_addr = ADDR_UNSET,
.gpiomask = 0x080200000,
.inputs = { {
- .name = name_tv,
+ .type = SAA7134_INPUT_TV,
.vmux = 4,
.amux = TV,
- .tv = 1,
}, {
- .name = name_comp1,
+ .type = SAA7134_INPUT_COMPOSITE1,
.vmux = 1,
.amux = LINE2,
}, {
- .name = name_comp2,
+ .type = SAA7134_INPUT_COMPOSITE2,
.vmux = 0,
.amux = LINE2,
}, {
- .name = name_svideo,
+ .type = SAA7134_INPUT_SVIDEO,
.vmux = 8,
.amux = LINE2,
} },
.radio = {
- .name = name_radio,
+ .type = SAA7134_INPUT_RADIO,
.amux = TV,
.gpio = 0x0200000,
},
@@ -2678,29 +2595,28 @@ struct saa7134_board saa7134_boards[] = {
.gpiomask = 1 << 21,
.mpeg = SAA7134_MPEG_DVB,
.inputs = {{
- .name = name_tv,
+ .type = SAA7134_INPUT_TV,
.vmux = 1,
.amux = TV,
- .tv = 1,
.gpio = 0x0000000,
},{
- .name = name_comp1,
+ .type = SAA7134_INPUT_COMPOSITE1,
.vmux = 3,
.amux = LINE2,
.gpio = 0x0200000,
},{
- .name = name_comp2,
+ .type = SAA7134_INPUT_COMPOSITE2,
.vmux = 0,
.amux = LINE2,
.gpio = 0x0200000,
},{
- .name = name_svideo,
+ .type = SAA7134_INPUT_SVIDEO,
.vmux = 8,
.amux = LINE2,
.gpio = 0x0200000,
}},
.radio = {
- .name = name_radio,
+ .type = SAA7134_INPUT_RADIO,
.amux = TV,
.gpio = 0x0200000,
},
@@ -2717,21 +2633,20 @@ struct saa7134_board saa7134_boards[] = {
.radio_addr = ADDR_UNSET,
.gpiomask = 0xe880c0,
.inputs = {{
- .name = name_tv,
+ .type = SAA7134_INPUT_TV,
.vmux = 3,
.amux = TV,
- .tv = 1,
},{
- .name = name_comp1,
+ .type = SAA7134_INPUT_COMPOSITE1,
.vmux = 1,
.amux = LINE1,
},{
- .name = name_svideo,
+ .type = SAA7134_INPUT_SVIDEO,
.vmux = 6,
.amux = LINE1,
}},
.radio = {
- .name = name_radio,
+ .type = SAA7134_INPUT_RADIO,
.amux = LINE2,
},
},
@@ -2745,16 +2660,15 @@ struct saa7134_board saa7134_boards[] = {
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
.inputs = {{
- .name = name_tv,
+ .type = SAA7134_INPUT_TV,
.vmux = 1,
.amux = TV,
- .tv = 1,
},{
- .name = name_comp1,
+ .type = SAA7134_INPUT_COMPOSITE1,
.vmux = 3,
.amux = LINE1,
},{
- .name = name_svideo,
+ .type = SAA7134_INPUT_SVIDEO,
.vmux = 8,
.amux = LINE1,
}},
@@ -2770,21 +2684,20 @@ struct saa7134_board saa7134_boards[] = {
.mpeg = SAA7134_MPEG_DVB,
.gpiomask = 0x0200000,
.inputs = {{
- .name = name_tv,
+ .type = SAA7134_INPUT_TV,
.vmux = 1,
.amux = TV,
- .tv = 1,
},{
- .name = name_comp1,
+ .type = SAA7134_INPUT_COMPOSITE1,
.vmux = 3,
.amux = LINE1,
},{
- .name = name_svideo,
+ .type = SAA7134_INPUT_SVIDEO,
.vmux = 8,
.amux = LINE1,
}},
.radio = {
- .name = name_radio,
+ .type = SAA7134_INPUT_RADIO,
.amux = TV,
.gpio = 0x0200000,
},
@@ -2798,25 +2711,24 @@ struct saa7134_board saa7134_boards[] = {
.radio_addr = ADDR_UNSET,
.gpiomask = 1 << 21,
.inputs = {{
- .name = name_tv,
+ .type = SAA7134_INPUT_TV,
.vmux = 1,
.amux = TV,
- .tv = 1,
},{
- .name = name_comp1,
+ .type = SAA7134_INPUT_COMPOSITE,
.vmux = 3,
.amux = LINE2, /* unconfirmed, taken from Philips driver */
},{
- .name = name_comp2,
- .vmux = 0, /* untested, Composite over S-Video */
+ .type = SAA7134_INPUT_COMPOSITE_OVER_SVIDEO,
+ .vmux = 0, /* untested */
.amux = LINE2,
},{
- .name = name_svideo,
+ .type = SAA7134_INPUT_SVIDEO,
.vmux = 8,
.amux = LINE2,
}},
.radio = {
- .name = name_radio,
+ .type = SAA7134_INPUT_RADIO,
.amux = TV,
.gpio = 0x0200000,
},
@@ -2834,17 +2746,16 @@ struct saa7134_board saa7134_boards[] = {
.radio_addr = ADDR_UNSET,
.gpiomask = 0x80200000,
.inputs = {{
- .name = name_tv,
+ .type = SAA7134_INPUT_TV,
.vmux = 1,
.amux = TV,
- .tv = 1,
},{
- .name = name_svideo, /* NOT tested */
+ .type = SAA7134_INPUT_SVIDEO, /* NOT tested */
.vmux = 8,
.amux = LINE1,
}},
.radio = {
- .name = name_radio,
+ .type = SAA7134_INPUT_RADIO,
.amux = TV,
.gpio = 0x0200000,
},
@@ -2861,26 +2772,25 @@ struct saa7134_board saa7134_boards[] = {
.gpiomask = 0x00200000,
.mpeg = SAA7134_MPEG_DVB,
.inputs = {{
- .name = name_tv, /* Analog broadcast/cable TV */
+ .type = SAA7134_INPUT_TV, /* Analog broadcast/cable TV */
.vmux = 1,
.amux = TV,
.gpio = 0x200000, /* GPIO21=High for TV input */
- .tv = 1,
},{
- .name = name_svideo, /* S-Video signal on S-Video input */
+ .type = SAA7134_INPUT_SVIDEO,
.vmux = 8,
.amux = LINE2,
},{
- .name = name_comp1, /* Composite signal on S-Video input */
+ .type = SAA7134_INPUT_COMPOSITE_OVER_SVIDEO,
.vmux = 0,
.amux = LINE2,
},{
- .name = name_comp2, /* Composite input */
+ .type = SAA7134_INPUT_COMPOSITE2,
.vmux = 3,
.amux = LINE2,
}},
.radio = {
- .name = name_radio,
+ .type = SAA7134_INPUT_RADIO,
.amux = TV,
.gpio = 0x000000, /* GPIO21=Low for FM radio antenna */
},
@@ -2894,11 +2804,11 @@ struct saa7134_board saa7134_boards[] = {
.radio_addr = ADDR_UNSET,
.mpeg = SAA7134_MPEG_DVB,
.inputs = {{
- .name = name_comp1,
+ .type = SAA7134_INPUT_COMPOSITE1,
.vmux = 1,
.amux = LINE1,
},{
- .name = name_svideo,
+ .type = SAA7134_INPUT_SVIDEO,
.vmux = 8,
.amux = LINE1,
}},
@@ -2914,11 +2824,11 @@ struct saa7134_board saa7134_boards[] = {
.radio_addr = ADDR_UNSET,
.mpeg = SAA7134_MPEG_DVB,
.inputs = {{
- .name = name_comp1, /* Composite input */
+ .type = SAA7134_INPUT_COMPOSITE1,
.vmux = 3,
.amux = LINE2,
},{
- .name = name_svideo, /* S-Video signal on S-Video input */
+ .type = SAA7134_INPUT_SVIDEO,
.vmux = 8,
.amux = LINE2,
}},
@@ -2933,10 +2843,9 @@ struct saa7134_board saa7134_boards[] = {
.mpeg = SAA7134_MPEG_DVB,
.gpiomask = 0x00600000, /* Bit 21 0=Radio, Bit 22 0=TV */
.inputs = {{
- .name = name_tv,
+ .type = SAA7134_INPUT_TV,
.vmux = 1,
.amux = TV,
- .tv = 1,
.gpio = 0x00200000,
}},
},
@@ -2950,25 +2859,24 @@ struct saa7134_board saa7134_boards[] = {
.mpeg = SAA7134_MPEG_DVB,
.gpiomask = 1 << 21,
.inputs = {{
- .name = name_tv,
+ .type = SAA7134_INPUT_TV,
.vmux = 1,
.amux = TV,
- .tv = 1,
},{
- .name = name_comp1,
+ .type = SAA7134_INPUT_COMPOSITE1,
.vmux = 3,
.amux = LINE1,
},{
- .name = name_comp2,
+ .type = SAA7134_INPUT_COMPOSITE2,
.vmux = 0,
.amux = LINE1,
},{
- .name = name_svideo,
+ .type = SAA7134_INPUT_SVIDEO,
.vmux = 8,
.amux = LINE1,
}},
.radio = {
- .name = name_radio,
+ .type = SAA7134_INPUT_RADIO,
.amux = TV,
.gpio = 0x0200000,
},
@@ -2983,21 +2891,20 @@ struct saa7134_board saa7134_boards[] = {
.mpeg = SAA7134_MPEG_DVB,
.gpiomask = 1 << 21,
.inputs = {{
- .name = name_tv,
+ .type = SAA7134_INPUT_TV,
.vmux = 1,
.amux = TV,
- .tv = 1,
},{
- .name = name_comp1,
+ .type = SAA7134_INPUT_COMPOSITE1,
.vmux = 3,
.amux = LINE1,
},{
- .name = name_svideo,
+ .type = SAA7134_INPUT_SVIDEO,
.vmux = 8,
.amux = LINE1,
}},
.radio = {
- .name = name_radio,
+ .type = SAA7134_INPUT_RADIO,
.amux = TV,
.gpio = 0x0200000,
},
@@ -3012,16 +2919,15 @@ struct saa7134_board saa7134_boards[] = {
.tda9887_conf = TDA9887_PRESENT,
.mpeg = SAA7134_MPEG_DVB,
.inputs = {{
- .name = name_tv,
+ .type = SAA7134_INPUT_TV,
.vmux = 1,
.amux = TV,
- .tv = 1,
},{
- .name = name_comp1,
+ .type = SAA7134_INPUT_COMPOSITE1,
.vmux = 3,
.amux = LINE2,
},{
- .name = name_svideo,
+ .type = SAA7134_INPUT_SVIDEO,
.vmux = 8,
.amux = LINE2,
}},
@@ -3052,17 +2958,16 @@ struct saa7134_board saa7134_boards[] = {
.tda9887_conf = TDA9887_PRESENT,
.gpiomask = 0xca60000,
.inputs = {{
- .name = name_tv,
+ .type = SAA7134_INPUT_TV,
.vmux = 4,
.amux = TV,
- .tv = 1,
.gpio = 0x04a61000,
},{
- .name = name_comp2, /* Composite SVIDEO (B/W if signal is carried with SVIDEO) */
+ .type = SAA7134_INPUT_COMPOSITE_OVER_SVIDEO,
.vmux = 1,
.amux = LINE2,
},{
- .name = name_svideo,
+ .type = SAA7134_INPUT_SVIDEO,
.vmux = 9, /* 9 is correct as S-VIDEO1 according to a169.inf! */
.amux = LINE1,
}},
@@ -3086,26 +2991,25 @@ struct saa7134_board saa7134_boards[] = {
.mpeg = SAA7134_MPEG_DVB,
.gpiomask = 0x00600000, /* Bit 21 0=Radio, Bit 22 0=TV */
.inputs = {{
- .name = name_tv,
+ .type = SAA7134_INPUT_TV,
.vmux = 1,
.amux = TV,
.gpio = 0x200000, /* GPIO21=High for TV input */
- .tv = 1,
},{
- .name = name_svideo, /* S-Video signal on S-Video input */
+ .type = SAA7134_INPUT_SVIDEO,
.vmux = 8,
.amux = LINE2,
},{
- .name = name_comp1, /* Composite signal on S-Video input */
+ .type = SAA7134_INPUT_COMPOSITE_OVER_SVIDEO,
.vmux = 0,
.amux = LINE2,
},{
- .name = name_comp2, /* Composite input */
+ .type = SAA7134_INPUT_COMPOSITE,
.vmux = 3,
.amux = LINE2,
}},
.radio = {
- .name = name_radio,
+ .type = SAA7134_INPUT_RADIO,
.amux = TV,
.gpio = 0x000000, /* GPIO21=Low for FM radio antenna */
},
@@ -3121,40 +3025,38 @@ struct saa7134_board saa7134_boards[] = {
.gpiomask = 0xe000,
.inputs = {{
- .name = name_tv,
+ .type = SAA7134_INPUT_TV,
.vmux = 1,
.amux = TV,
.gpio = 0x8000,
- .tv = 1,
},{
- .name = name_tv_mono,
+ .type = SAA7134_INPUT_TV_MONO,
.vmux = 1,
.amux = LINE2,
.gpio = 0x0000,
- .tv = 1,
},{
- .name = name_comp1,
+ .type = SAA7134_INPUT_COMPOSITE1,
.vmux = 0,
.amux = LINE2,
.gpio = 0x4000,
},{
- .name = name_comp2,
+ .type = SAA7134_INPUT_COMPOSITE2,
.vmux = 3,
.amux = LINE2,
.gpio = 0x4000,
},{
- .name = name_svideo,
+ .type = SAA7134_INPUT_SVIDEO,
.vmux = 8,
.amux = LINE2,
.gpio = 0x4000,
}},
.radio = {
- .name = name_radio,
+ .type = SAA7134_INPUT_RADIO,
.amux = LINE2,
.gpio = 0x2000,
},
.mute = {
- .name = name_mute,
+ .type = SAA7134_INPUT_MUTE,
.amux = TV,
.gpio = 0x8000,
},
@@ -3168,16 +3070,15 @@ struct saa7134_board saa7134_boards[] = {
.radio_addr = ADDR_UNSET,
.mpeg = SAA7134_MPEG_DVB,
.inputs = {{
- .name = name_tv,
+ .type = SAA7134_INPUT_TV,
.vmux = 1,
.amux = TV,
- .tv = 1,
},{
- .name = name_comp1,
+ .type = SAA7134_INPUT_COMPOSITE1,
.vmux = 0,
.amux = LINE1,
},{
- .name = name_svideo,
+ .type = SAA7134_INPUT_SVIDEO,
.vmux = 8,
.amux = LINE1,
}},
@@ -3193,11 +3094,11 @@ struct saa7134_board saa7134_boards[] = {
.radio_addr = ADDR_UNSET,
.mpeg = SAA7134_MPEG_DVB,
.inputs = {{
- .name = name_comp1, /* Composite input */
+ .type = SAA7134_INPUT_COMPOSITE1,
.vmux = 3,
.amux = LINE1,
},{
- .name = name_svideo, /* S-Video signal on S-Video input */
+ .type = SAA7134_INPUT_SVIDEO,
.vmux = 8,
.amux = LINE1,
}},
@@ -3211,25 +3112,24 @@ struct saa7134_board saa7134_boards[] = {
.radio_addr = ADDR_UNSET,
.tda9887_conf = TDA9887_PRESENT,
.inputs = {{
- .name = name_tv,
+ .type = SAA7134_INPUT_TV,
.vmux = 1,
.amux = LINE2,
- .tv = 1,
},{
- .name = name_comp1,
+ .type = SAA7134_INPUT_COMPOSITE1,
.vmux = 0,
.amux = LINE2,
},{
- .name = name_comp2,
+ .type = SAA7134_INPUT_COMPOSITE2,
.vmux = 3,
.amux = LINE2,
},{
- .name = name_svideo,
+ .type = SAA7134_INPUT_SVIDEO,
.vmux = 8,
.amux = LINE2,
}},
.mute = {
- .name = name_mute,
+ .type = SAA7134_INPUT_MUTE,
.amux = LINE1,
},
},
@@ -3244,21 +3144,20 @@ struct saa7134_board saa7134_boards[] = {
.tda9887_conf = TDA9887_PRESENT,
.mpeg = SAA7134_MPEG_DVB,
.inputs = {{
- .name = name_tv,
+ .type = SAA7134_INPUT_TV,
.vmux = 1,
.amux = TV,
- .tv = 1,
},{
- .name = name_comp1,
+ .type = SAA7134_INPUT_COMPOSITE1,
.vmux = 3,
.amux = LINE2,
},{
- .name = name_svideo,
+ .type = SAA7134_INPUT_SVIDEO,
.vmux = 8,
.amux = LINE1,
}},
.radio = {
- .name = name_radio,
+ .type = SAA7134_INPUT_RADIO,
.amux = LINE1,
},
},
@@ -3272,21 +3171,20 @@ struct saa7134_board saa7134_boards[] = {
.tda9887_conf = TDA9887_PRESENT| TDA9887_PORT1_ACTIVE | TDA9887_PORT2_ACTIVE,
.mpeg = SAA7134_MPEG_DVB,
.inputs = {{
- .name = name_tv,
+ .type = SAA7134_INPUT_TV,
.vmux = 3,
.amux = TV,
- .tv = 1,
},{
- .name = name_comp1,
+ .type = SAA7134_INPUT_COMPOSITE1,
.vmux = 4,
.amux = LINE2,
},{
- .name = name_svideo,
+ .type = SAA7134_INPUT_SVIDEO,
.vmux = 8,
.amux = LINE2,
}},
.radio = {
- .name = name_radio,
+ .type = SAA7134_INPUT_RADIO,
.amux = LINE1,
},
},
@@ -3301,25 +3199,24 @@ struct saa7134_board saa7134_boards[] = {
.mpeg = SAA7134_MPEG_DVB,
.gpiomask = 0x000200000,
.inputs = {{
- .name = name_tv,
+ .type = SAA7134_INPUT_TV,
.vmux = 4,
.amux = TV,
- .tv = 1,
},{
- .name = name_comp1,
+ .type = SAA7134_INPUT_COMPOSITE1,
.vmux = 1,
.amux = LINE2,
},{
- .name = name_comp2,
+ .type = SAA7134_INPUT_COMPOSITE2,
.vmux = 0,
.amux = LINE2,
},{
- .name = name_svideo,
+ .type = SAA7134_INPUT_SVIDEO,
.vmux = 8,
.amux = LINE2,
}},
.radio = {
- .name = name_radio,
+ .type = SAA7134_INPUT_RADIO,
.amux = TV,
.gpio = 0x0200000,
},
@@ -3335,34 +3232,33 @@ struct saa7134_board saa7134_boards[] = {
.tda9887_conf = TDA9887_PRESENT,
.gpiomask = 0x03,
.inputs = {{
- .name = name_tv,
+ .type = SAA7134_INPUT_TV,
.vmux = 1,
.amux = TV,
- .tv = 1,
.gpio = 0x00,
},{
- .name = name_comp1,
+ .type = SAA7134_INPUT_COMPOSITE1,
.vmux = 0,
.amux = LINE2,
.gpio = 0x00,
},{
- .name = name_comp2,
+ .type = SAA7134_INPUT_COMPOSITE2,
.vmux = 3,
.amux = LINE2,
.gpio = 0x00,
},{
- .name = name_svideo,
+ .type = SAA7134_INPUT_SVIDEO,
.vmux = 8,
.amux = LINE2,
.gpio = 0x00,
}},
.radio = {
- .name = name_radio,
+ .type = SAA7134_INPUT_RADIO,
.amux = LINE2,
.gpio = 0x01,
},
.mute = {
- .name = name_mute,
+ .type = SAA7134_INPUT_MUTE,
.amux = LINE1,
.gpio = 0x00,
},
@@ -3378,16 +3274,15 @@ struct saa7134_board saa7134_boards[] = {
.tda9887_conf = TDA9887_PRESENT | TDA9887_PORT1_ACTIVE,
.mpeg = SAA7134_MPEG_DVB,
.inputs = {{
- .name = name_tv,
+ .type = SAA7134_INPUT_TV,
.vmux = 3,
.amux = TV,
- .tv = 1,
},{
- .name = name_comp1,
+ .type = SAA7134_INPUT_COMPOSITE1,
.vmux = 1,
.amux = LINE2,
},{
- .name = name_svideo,
+ .type = SAA7134_INPUT_SVIDEO,
.vmux = 8,
.amux = LINE2,
}},
@@ -3405,22 +3300,21 @@ struct saa7134_board saa7134_boards[] = {
.mpeg = SAA7134_MPEG_DVB,
.gpiomask = 0x0200100,
.inputs = {{
- .name = name_tv,
+ .type = SAA7134_INPUT_TV,
.vmux = 1,
.amux = TV,
- .tv = 1,
.gpio = 0x0000100,
}, {
- .name = name_comp1,
+ .type = SAA7134_INPUT_COMPOSITE1,
.vmux = 3,
.amux = LINE1,
}, {
- .name = name_svideo,
+ .type = SAA7134_INPUT_SVIDEO,
.vmux = 8,
.amux = LINE1,
} },
.radio = {
- .name = name_radio,
+ .type = SAA7134_INPUT_RADIO,
.amux = TV,
.gpio = 0x0200100,
},
@@ -3438,22 +3332,21 @@ struct saa7134_board saa7134_boards[] = {
.ts_force_val = 1,
.gpiomask = 0x0800100, /* GPIO 21 is an INPUT */
.inputs = {{
- .name = name_tv,
+ .type = SAA7134_INPUT_TV,
.vmux = 1,
.amux = TV,
- .tv = 1,
.gpio = 0x0000100,
}, {
- .name = name_comp1,
+ .type = SAA7134_INPUT_COMPOSITE1,
.vmux = 3,
.amux = LINE1,
}, {
- .name = name_svideo,
+ .type = SAA7134_INPUT_SVIDEO,
.vmux = 8,
.amux = LINE1,
} },
.radio = {
- .name = name_radio,
+ .type = SAA7134_INPUT_RADIO,
.amux = TV,
.gpio = 0x0800100, /* GPIO 23 HI for FM */
},
@@ -3470,22 +3363,21 @@ struct saa7134_board saa7134_boards[] = {
.ts_type = SAA7134_MPEG_TS_SERIAL,
.gpiomask = 0x0800100, /* GPIO 21 is an INPUT */
.inputs = {{
- .name = name_tv,
+ .type = SAA7134_INPUT_TV,
.vmux = 1,
.amux = TV,
- .tv = 1,
.gpio = 0x0000100,
}, {
- .name = name_comp1,
+ .type = SAA7134_INPUT_COMPOSITE1,
.vmux = 3,
.amux = LINE1,
}, {
- .name = name_svideo,
+ .type = SAA7134_INPUT_SVIDEO,
.vmux = 8,
.amux = LINE1,
} },
.radio = {
- .name = name_radio,
+ .type = SAA7134_INPUT_RADIO,
.amux = TV,
.gpio = 0x0800100, /* GPIO 23 HI for FM */
},
@@ -3499,16 +3391,15 @@ struct saa7134_board saa7134_boards[] = {
.radio_addr = ADDR_UNSET,
.mpeg = SAA7134_MPEG_DVB,
.inputs = {{
- .name = name_tv,
+ .type = SAA7134_INPUT_TV,
.vmux = 1,
.amux = TV,
- .tv = 1,
},{
- .name = name_comp1,
+ .type = SAA7134_INPUT_COMPOSITE1,
.vmux = 0,
.amux = LINE1,
},{
- .name = name_svideo,
+ .type = SAA7134_INPUT_SVIDEO,
.vmux = 6,
.amux = LINE1,
}},
@@ -3523,33 +3414,31 @@ struct saa7134_board saa7134_boards[] = {
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
.inputs = {{
- .name = name_tv,
+ .type = SAA7134_INPUT_TV,
.vmux = 1,
.amux = 3,
- .tv = 1,
},{
- .name = name_tv_mono,
+ .type = SAA7134_INPUT_TV_MONO,
.vmux = 7,
.amux = 4,
- .tv = 1,
},{
- .name = name_comp1,
+ .type = SAA7134_INPUT_COMPOSITE1,
.vmux = 3,
.amux = 2,
},{
- .name = name_svideo,
+ .type = SAA7134_INPUT_SVIDEO,
.vmux = 0,
.amux = 2,
}},
.radio = {
- .name = name_radio,
+ .type = SAA7134_INPUT_RADIO,
.amux = LINE2,
/* .gpio = 0x00300001,*/
.gpio = 0x20000,
},
.mute = {
- .name = name_mute,
+ .type = SAA7134_INPUT_MUTE,
.amux = 0,
},
},
@@ -3562,32 +3451,30 @@ struct saa7134_board saa7134_boards[] = {
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
.inputs = {{
- .name = name_tv,
+ .type = SAA7134_INPUT_TV,
.vmux = 1,
.amux = 3,
- .tv = 1,
},{
- .name = name_tv_mono,
+ .type = SAA7134_INPUT_TV_MONO,
.vmux = 7,
.amux = 4,
- .tv = 1,
},{
- .name = name_comp1,
+ .type = SAA7134_INPUT_COMPOSITE1,
.vmux = 3,
.amux = 2,
},{
- .name = name_svideo,
+ .type = SAA7134_INPUT_SVIDEO,
.vmux = 0,
.amux = 2,
}},
.radio = {
- .name = name_radio,
+ .type = SAA7134_INPUT_RADIO,
.amux = LINE2,
.gpio = 0x20000,
},
.mute = {
- .name = name_mute,
+ .type = SAA7134_INPUT_MUTE,
.amux = 0,
},
},
@@ -3600,29 +3487,28 @@ struct saa7134_board saa7134_boards[] = {
.radio_addr = ADDR_UNSET,
.gpiomask = 0x7000,
.inputs = { {
- .name = name_tv,
+ .type = SAA7134_INPUT_TV,
.vmux = 1,
.amux = 1,
- .tv = 1,
.gpio = 0x50000,
}, {
- .name = name_comp1,
+ .type = SAA7134_INPUT_COMPOSITE1,
.vmux = 3,
.amux = 2,
.gpio = 0x2000,
}, {
- .name = name_svideo,
+ .type = SAA7134_INPUT_SVIDEO,
.vmux = 8,
.amux = 2,
.gpio = 0x2000,
} },
.radio = {
- .name = name_radio,
+ .type = SAA7134_INPUT_RADIO,
.vmux = 1,
.amux = 1,
},
.mute = {
- .name = name_mute,
+ .type = SAA7134_INPUT_MUTE,
.gpio = 0xf000,
.amux = 0,
},
@@ -3635,26 +3521,25 @@ struct saa7134_board saa7134_boards[] = {
.tuner_addr = 0x61,
.radio_addr = 0x60,
.inputs = { {
- .name = name_tv,
+ .type = SAA7134_INPUT_TV,
.vmux = 1,
.amux = LINE2,
- .tv = 1,
}, {
- .name = name_comp1,
+ .type = SAA7134_INPUT_COMPOSITE1,
.vmux = 3,
.amux = LINE1,
}, {
- .name = name_svideo,
+ .type = SAA7134_INPUT_SVIDEO,
.vmux = 8,
.amux = LINE1,
} },
.radio = {
- .name = name_radio,
+ .type = SAA7134_INPUT_RADIO,
.vmux = 1,
.amux = LINE1,
},
.mute = {
- .name = name_mute,
+ .type = SAA7134_INPUT_MUTE,
.amux = LINE1,
.gpio = 0x43000,
},
@@ -3668,16 +3553,15 @@ struct saa7134_board saa7134_boards[] = {
.radio_addr = ADDR_UNSET,
.mpeg = SAA7134_MPEG_DVB,
.inputs = {{
- .name = name_tv,
+ .type = SAA7134_INPUT_TV,
.vmux = 1,
.amux = TV,
- .tv = 1,
},{
- .name = name_comp1,
+ .type = SAA7134_INPUT_COMPOSITE1,
.vmux = 0,
.amux = LINE1,
},{
- .name = name_svideo,
+ .type = SAA7134_INPUT_SVIDEO,
.vmux = 6,
.amux = LINE1,
}},
@@ -3693,21 +3577,20 @@ struct saa7134_board saa7134_boards[] = {
.mpeg = SAA7134_MPEG_DVB,
.gpiomask = 0x0200000,
.inputs = {{
- .name = name_tv,
+ .type = SAA7134_INPUT_TV,
.vmux = 1,
.amux = TV,
- .tv = 1,
},{
- .name = name_comp1,
+ .type = SAA7134_INPUT_COMPOSITE1,
.vmux = 3,
.amux = LINE1,
},{
- .name = name_svideo,
+ .type = SAA7134_INPUT_SVIDEO,
.vmux = 8,
.amux = LINE1,
}},
.radio = {
- .name = name_radio,
+ .type = SAA7134_INPUT_RADIO,
.amux = TV,
.gpio = 0x0200000,
},
@@ -3721,16 +3604,15 @@ struct saa7134_board saa7134_boards[] = {
.radio_addr = ADDR_UNSET,
.gpiomask = 1<<21,
.inputs = {{
- .name = name_tv,
+ .type = SAA7134_INPUT_TV,
.vmux = 1,
.amux = TV,
- .tv = 1,
},{
- .name = name_comp1,
+ .type = SAA7134_INPUT_COMPOSITE1,
.vmux = 0,
.amux = LINE2,
},{
- .name = name_svideo,
+ .type = SAA7134_INPUT_SVIDEO,
.vmux = 6,
.amux = LINE2,
}},
@@ -3746,10 +3628,9 @@ struct saa7134_board saa7134_boards[] = {
.mpeg = SAA7134_MPEG_DVB,
.gpiomask = 0x0200000,
.inputs = {{
- .name = name_tv,
+ .type = SAA7134_INPUT_TV,
.vmux = 1,
.amux = TV,
- .tv = 1,
.gpio = 0x0200000,
}},
},
@@ -3764,29 +3645,28 @@ struct saa7134_board saa7134_boards[] = {
.gpiomask = 1 << 21,
.mpeg = SAA7134_MPEG_DVB,
.inputs = {{
- .name = name_tv,
+ .type = SAA7134_INPUT_TV,
.vmux = 1,
.amux = TV,
- .tv = 1,
.gpio = 0x0000000,
},{
- .name = name_comp1,
+ .type = SAA7134_INPUT_COMPOSITE1,
.vmux = 3,
.amux = LINE2,
.gpio = 0x0200000,
},{
- .name = name_comp2,
+ .type = SAA7134_INPUT_COMPOSITE2,
.vmux = 0,
.amux = LINE2,
.gpio = 0x0200000,
},{
- .name = name_svideo,
+ .type = SAA7134_INPUT_SVIDEO,
.vmux = 8,
.amux = LINE2,
.gpio = 0x0200000,
}},
.radio = {
- .name = name_radio,
+ .type = SAA7134_INPUT_RADIO,
.amux = TV,
.gpio = 0x0200000,
},
@@ -3800,26 +3680,25 @@ struct saa7134_board saa7134_boards[] = {
.radio_addr = ADDR_UNSET,
.gpiomask = 1 << 21,
.inputs = {{
- .name = name_tv,
+ .type = SAA7134_INPUT_TV,
.vmux = 1,
.amux = TV,
- .tv = 1,
.gpio = 0x0000000,
}, {
- .name = name_comp1,
+ .type = SAA7134_INPUT_COMPOSITE1,
.vmux = 3,
.amux = LINE2,
}, {
- .name = name_comp2,
+ .type = SAA7134_INPUT_COMPOSITE2,
.vmux = 0,
.amux = LINE2,
}, {
- .name = name_svideo,
+ .type = SAA7134_INPUT_SVIDEO,
.vmux = 8,
.amux = LINE2,
} },
.radio = {
- .name = name_radio,
+ .type = SAA7134_INPUT_RADIO,
.amux = TV,
.gpio = 0x0200000,
},
@@ -3832,25 +3711,24 @@ struct saa7134_board saa7134_boards[] = {
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
.inputs = {{
- .name = name_tv,
+ .type = SAA7134_INPUT_TV,
.vmux = 1,
.amux = TV,
- .tv = 1,
},{
- .name = name_comp1,
+ .type = SAA7134_INPUT_COMPOSITE1,
.vmux = 3,
.amux = LINE1,
},{
- .name = name_comp2,
+ .type = SAA7134_INPUT_COMPOSITE2,
.vmux = 0,
.amux = LINE1,
},{
- .name = name_svideo,
+ .type = SAA7134_INPUT_SVIDEO,
.vmux = 8,
.amux = LINE1,
}},
.mute = {
- .name = name_mute,
+ .type = SAA7134_INPUT_MUTE,
.amux = TV,
},
},
@@ -3864,24 +3742,23 @@ struct saa7134_board saa7134_boards[] = {
.radio_addr = ADDR_UNSET,
.gpiomask = 0x7000,
.inputs = {{
- .name = name_tv,
+ .type = SAA7134_INPUT_TV,
.vmux = 1,
.amux = LINE2,
.gpio = 0x0000,
- .tv = 1,
},{
- .name = name_comp1,
+ .type = SAA7134_INPUT_COMPOSITE1,
.vmux = 3,
.amux = LINE1,
.gpio = 0x2000,
},{
- .name = name_svideo,
+ .type = SAA7134_INPUT_SVIDEO,
.vmux = 8,
.amux = LINE1,
.gpio = 0x2000,
}},
.mute = {
- .name = name_mute,
+ .type = SAA7134_INPUT_MUTE,
.amux = LINE2,
.gpio = 0x3000,
},
@@ -3896,10 +3773,9 @@ struct saa7134_board saa7134_boards[] = {
.tda829x_conf = { .lna_cfg = TDA8290_LNA_OFF },
.mpeg = SAA7134_MPEG_DVB,
.inputs = {{
- .name = name_tv, /* FIXME: analog tv untested */
+ .type = SAA7134_INPUT_TV, /* FIXME: analog tv untested */
.vmux = 1,
.amux = TV,
- .tv = 1,
}},
},
[SAA7134_BOARD_AVERMEDIA_M135A] = {
@@ -3912,26 +3788,25 @@ struct saa7134_board saa7134_boards[] = {
.tda829x_conf = { .lna_cfg = TDA8290_LNA_GP0_HIGH_OFF },
.gpiomask = 0x020200000,
.inputs = {{
- .name = name_tv,
+ .type = SAA7134_INPUT_TV,
.vmux = 1,
.amux = TV,
- .tv = 1,
}, {
- .name = name_comp1,
+ .type = SAA7134_INPUT_COMPOSITE1,
.vmux = 3,
.amux = LINE1,
}, {
- .name = name_svideo,
+ .type = SAA7134_INPUT_SVIDEO,
.vmux = 8,
.amux = LINE1,
} },
.radio = {
- .name = name_radio,
+ .type = SAA7134_INPUT_RADIO,
.amux = TV,
.gpio = 0x00200000,
},
.mute = {
- .name = name_mute,
+ .type = SAA7134_INPUT_MUTE,
.amux = TV,
.gpio = 0x01,
},
@@ -3946,26 +3821,25 @@ struct saa7134_board saa7134_boards[] = {
.tda829x_conf = { .lna_cfg = TDA8290_LNA_OFF },
.gpiomask = 0x020200000,
.inputs = {{
- .name = name_tv,
+ .type = SAA7134_INPUT_TV,
.vmux = 1,
.amux = TV,
- .tv = 1,
}, {
- .name = name_comp1,
+ .type = SAA7134_INPUT_COMPOSITE1,
.vmux = 3,
.amux = LINE1,
}, {
- .name = name_svideo,
+ .type = SAA7134_INPUT_SVIDEO,
.vmux = 8,
.amux = LINE1,
} },
.radio = {
- .name = name_radio,
+ .type = SAA7134_INPUT_RADIO,
.amux = TV,
.gpio = 0x00200000,
},
.mute = {
- .name = name_mute,
+ .type = SAA7134_INPUT_MUTE,
.amux = TV,
.gpio = 0x01,
},
@@ -3981,21 +3855,20 @@ struct saa7134_board saa7134_boards[] = {
.radio_addr = ADDR_UNSET,
.gpiomask = 0x00008000,
.inputs = {{
- .name = name_svideo,
+ .type = SAA7134_INPUT_SVIDEO,
.vmux = 8,
.amux = LINE1,
},{
- .name = name_comp1,
+ .type = SAA7134_INPUT_COMPOSITE1,
.vmux = 1,
.amux = LINE1,
},{
- .name = name_tv,
+ .type = SAA7134_INPUT_TV,
.vmux = 3,
.amux = LINE2,
- .tv = 1,
}},
.mute = {
- .name = name_mute,
+ .type = SAA7134_INPUT_MUTE,
.amux = LINE1,
},
},
@@ -4010,18 +3883,17 @@ struct saa7134_board saa7134_boards[] = {
.radio_addr = ADDR_UNSET,
.gpiomask = 0x00008000,
.inputs = {{
- .name = name_svideo,
+ .type = SAA7134_INPUT_SVIDEO,
.vmux = 8,
.amux = LINE1,
},{
- .name = name_comp1,
+ .type = SAA7134_INPUT_COMPOSITE1,
.vmux = 1,
.amux = LINE1,
},{
- .name = name_tv,
+ .type = SAA7134_INPUT_TV,
.vmux = 3,
.amux = LINE2,
- .tv = 1,
}},
},
[SAA7134_BOARD_BEHOLD_403FM] = {
@@ -4035,21 +3907,20 @@ struct saa7134_board saa7134_boards[] = {
.radio_addr = ADDR_UNSET,
.gpiomask = 0x00008000,
.inputs = {{
- .name = name_svideo,
+ .type = SAA7134_INPUT_SVIDEO,
.vmux = 8,
.amux = LINE1,
},{
- .name = name_comp1,
+ .type = SAA7134_INPUT_COMPOSITE1,
.vmux = 1,
.amux = LINE1,
},{
- .name = name_tv,
+ .type = SAA7134_INPUT_TV,
.vmux = 3,
.amux = LINE2,
- .tv = 1,
}},
.radio = {
- .name = name_radio,
+ .type = SAA7134_INPUT_RADIO,
.amux = LINE2,
},
},
@@ -4065,18 +3936,17 @@ struct saa7134_board saa7134_boards[] = {
.tda9887_conf = TDA9887_PRESENT,
.gpiomask = 0x00008000,
.inputs = {{
- .name = name_svideo,
+ .type = SAA7134_INPUT_SVIDEO,
.vmux = 8,
.amux = LINE1,
},{
- .name = name_comp1,
+ .type = SAA7134_INPUT_COMPOSITE1,
.vmux = 3,
.amux = LINE1,
},{
- .name = name_tv,
+ .type = SAA7134_INPUT_TV,
.vmux = 3,
.amux = LINE2,
- .tv = 1,
}},
},
[SAA7134_BOARD_BEHOLD_405FM] = {
@@ -4092,21 +3962,20 @@ struct saa7134_board saa7134_boards[] = {
.tda9887_conf = TDA9887_PRESENT,
.gpiomask = 0x00008000,
.inputs = {{
- .name = name_svideo,
+ .type = SAA7134_INPUT_SVIDEO,
.vmux = 8,
.amux = LINE1,
},{
- .name = name_comp1,
+ .type = SAA7134_INPUT_COMPOSITE1,
.vmux = 3,
.amux = LINE1,
},{
- .name = name_tv,
+ .type = SAA7134_INPUT_TV,
.vmux = 3,
.amux = LINE2,
- .tv = 1,
}},
.radio = {
- .name = name_radio,
+ .type = SAA7134_INPUT_RADIO,
.amux = LINE2,
},
},
@@ -4122,20 +3991,19 @@ struct saa7134_board saa7134_boards[] = {
.tda9887_conf = TDA9887_PRESENT,
.gpiomask = 0x00008000,
.inputs = {{
- .name = name_svideo,
+ .type = SAA7134_INPUT_SVIDEO,
.vmux = 8,
.amux = LINE1,
.gpio = 0xc0c000,
},{
- .name = name_comp1,
+ .type = SAA7134_INPUT_COMPOSITE1,
.vmux = 1,
.amux = LINE1,
.gpio = 0xc0c000,
},{
- .name = name_tv,
+ .type = SAA7134_INPUT_TV,
.vmux = 3,
.amux = TV,
- .tv = 1,
.gpio = 0xc0c000,
}},
},
@@ -4151,24 +4019,23 @@ struct saa7134_board saa7134_boards[] = {
.tda9887_conf = TDA9887_PRESENT,
.gpiomask = 0x00008000,
.inputs = {{
- .name = name_svideo,
+ .type = SAA7134_INPUT_SVIDEO,
.vmux = 8,
.amux = LINE1,
.gpio = 0xc0c000,
},{
- .name = name_comp1,
+ .type = SAA7134_INPUT_COMPOSITE1,
.vmux = 1,
.amux = LINE1,
.gpio = 0xc0c000,
},{
- .name = name_tv,
+ .type = SAA7134_INPUT_TV,
.vmux = 3,
.amux = TV,
- .tv = 1,
.gpio = 0xc0c000,
}},
.radio = {
- .name = name_radio,
+ .type = SAA7134_INPUT_RADIO,
.amux = LINE2,
.gpio = 0xc0c000,
},
@@ -4185,16 +4052,15 @@ struct saa7134_board saa7134_boards[] = {
.tda9887_conf = TDA9887_PRESENT,
.gpiomask = 0x00008000,
.inputs = {{
- .name = name_tv,
+ .type = SAA7134_INPUT_TV,
.vmux = 3,
.amux = TV,
- .tv = 1,
},{
- .name = name_comp1,
+ .type = SAA7134_INPUT_COMPOSITE1,
.vmux = 1,
.amux = LINE1,
},{
- .name = name_svideo,
+ .type = SAA7134_INPUT_SVIDEO,
.vmux = 8,
.amux = LINE1,
}},
@@ -4211,25 +4077,24 @@ struct saa7134_board saa7134_boards[] = {
.tda9887_conf = TDA9887_PRESENT,
.gpiomask = 0x00008000,
.inputs = {{
- .name = name_tv,
+ .type = SAA7134_INPUT_TV,
.vmux = 3,
.amux = LINE2,
- .tv = 1,
}, {
- .name = name_comp1,
+ .type = SAA7134_INPUT_COMPOSITE1,
.vmux = 1,
.amux = LINE1,
}, {
- .name = name_svideo,
+ .type = SAA7134_INPUT_SVIDEO,
.vmux = 8,
.amux = LINE1,
} },
.mute = {
- .name = name_mute,
+ .type = SAA7134_INPUT_MUTE,
.amux = LINE1,
},
.radio = {
- .name = name_radio,
+ .type = SAA7134_INPUT_RADIO,
.amux = LINE2,
},
},
@@ -4246,25 +4111,24 @@ struct saa7134_board saa7134_boards[] = {
.tda9887_conf = TDA9887_PRESENT,
.gpiomask = 0x00008000,
.inputs = {{
- .name = name_tv,
+ .type = SAA7134_INPUT_TV,
.vmux = 3,
.amux = LINE2,
- .tv = 1,
},{
- .name = name_comp1,
+ .type = SAA7134_INPUT_COMPOSITE1,
.vmux = 1,
.amux = LINE1,
},{
- .name = name_svideo,
+ .type = SAA7134_INPUT_SVIDEO,
.vmux = 8,
.amux = LINE1,
}},
.mute = {
- .name = name_mute,
+ .type = SAA7134_INPUT_MUTE,
.amux = LINE1,
},
.radio = {
- .name = name_radio,
+ .type = SAA7134_INPUT_RADIO,
.amux = LINE2,
},
},
@@ -4280,21 +4144,20 @@ struct saa7134_board saa7134_boards[] = {
.tda9887_conf = TDA9887_PRESENT,
.gpiomask = 0x00008000,
.inputs = {{
- .name = name_tv,
+ .type = SAA7134_INPUT_TV,
.vmux = 3,
.amux = TV,
- .tv = 1,
},{
- .name = name_comp1,
+ .type = SAA7134_INPUT_COMPOSITE1,
.vmux = 1,
.amux = LINE1,
},{
- .name = name_svideo,
+ .type = SAA7134_INPUT_SVIDEO,
.vmux = 8,
.amux = LINE1,
}},
.radio = {
- .name = name_radio,
+ .type = SAA7134_INPUT_RADIO,
.amux = LINE2,
},
},
@@ -4311,21 +4174,20 @@ struct saa7134_board saa7134_boards[] = {
.tda9887_conf = TDA9887_PRESENT,
.gpiomask = 0x00008000,
.inputs = {{
- .name = name_tv,
+ .type = SAA7134_INPUT_TV,
.vmux = 3,
.amux = TV,
- .tv = 1,
}, {
- .name = name_comp1,
+ .type = SAA7134_INPUT_COMPOSITE1,
.vmux = 1,
.amux = LINE1,
}, {
- .name = name_svideo,
+ .type = SAA7134_INPUT_SVIDEO,
.vmux = 8,
.amux = LINE1,
} },
.radio = {
- .name = name_radio,
+ .type = SAA7134_INPUT_RADIO,
.amux = LINE2,
},
},
@@ -4342,21 +4204,20 @@ struct saa7134_board saa7134_boards[] = {
.tda9887_conf = TDA9887_PRESENT,
.gpiomask = 0x00008000,
.inputs = {{
- .name = name_tv,
+ .type = SAA7134_INPUT_TV,
.vmux = 3,
.amux = TV,
- .tv = 1,
}, {
- .name = name_comp1,
+ .type = SAA7134_INPUT_COMPOSITE1,
.vmux = 1,
.amux = LINE1,
}, {
- .name = name_svideo,
+ .type = SAA7134_INPUT_SVIDEO,
.vmux = 8,
.amux = LINE1,
} },
.radio = {
- .name = name_radio,
+ .type = SAA7134_INPUT_RADIO,
.amux = LINE2,
},
},
@@ -4372,24 +4233,23 @@ struct saa7134_board saa7134_boards[] = {
.tda9887_conf = TDA9887_PRESENT,
.gpiomask = 0x000A8004,
.inputs = {{
- .name = name_tv,
+ .type = SAA7134_INPUT_TV,
.vmux = 3,
.amux = TV,
- .tv = 1,
.gpio = 0x000A8004,
}, {
- .name = name_comp1,
+ .type = SAA7134_INPUT_COMPOSITE1,
.vmux = 1,
.amux = LINE1,
.gpio = 0x000A8000,
}, {
- .name = name_svideo,
+ .type = SAA7134_INPUT_SVIDEO,
.vmux = 8,
.amux = LINE1,
.gpio = 0x000A8000,
} },
.radio = {
- .name = name_radio,
+ .type = SAA7134_INPUT_RADIO,
.amux = LINE2,
.gpio = 0x000A8000,
},
@@ -4404,21 +4264,20 @@ struct saa7134_board saa7134_boards[] = {
.radio_addr = ADDR_UNSET,
.tda9887_conf = TDA9887_PRESENT,
.inputs = {{
- .name = name_tv,
+ .type = SAA7134_INPUT_TV,
.vmux = 3,
.amux = TV,
- .tv = 1,
}, {
- .name = name_comp1,
+ .type = SAA7134_INPUT_COMPOSITE1,
.vmux = 1,
.amux = LINE1,
}, {
- .name = name_svideo,
+ .type = SAA7134_INPUT_SVIDEO,
.vmux = 8,
.amux = LINE1,
} },
.radio = {
- .name = name_radio,
+ .type = SAA7134_INPUT_RADIO,
.amux = LINE2,
},
},
@@ -4432,21 +4291,20 @@ struct saa7134_board saa7134_boards[] = {
.radio_addr = ADDR_UNSET,
.tda9887_conf = TDA9887_PRESENT,
.inputs = {{
- .name = name_tv,
+ .type = SAA7134_INPUT_TV,
.vmux = 3,
.amux = TV,
- .tv = 1,
}, {
- .name = name_comp1,
+ .type = SAA7134_INPUT_COMPOSITE1,
.vmux = 1,
.amux = LINE1,
}, {
- .name = name_svideo,
+ .type = SAA7134_INPUT_SVIDEO,
.vmux = 8,
.amux = LINE1,
} },
.radio = {
- .name = name_radio,
+ .type = SAA7134_INPUT_RADIO,
.amux = LINE2,
},
},
@@ -4460,21 +4318,20 @@ struct saa7134_board saa7134_boards[] = {
.radio_addr = ADDR_UNSET,
.tda9887_conf = TDA9887_PRESENT,
.inputs = {{
- .name = name_tv,
+ .type = SAA7134_INPUT_TV,
.vmux = 3,
.amux = TV,
- .tv = 1,
}, {
- .name = name_comp1,
+ .type = SAA7134_INPUT_COMPOSITE1,
.vmux = 1,
.amux = LINE1,
}, {
- .name = name_svideo,
+ .type = SAA7134_INPUT_SVIDEO,
.vmux = 8,
.amux = LINE1,
} },
.radio = {
- .name = name_radio,
+ .type = SAA7134_INPUT_RADIO,
.amux = LINE2,
},
},
@@ -4488,21 +4345,20 @@ struct saa7134_board saa7134_boards[] = {
.radio_addr = ADDR_UNSET,
.tda9887_conf = TDA9887_PRESENT,
.inputs = {{
- .name = name_tv,
+ .type = SAA7134_INPUT_TV,
.vmux = 3,
.amux = TV,
- .tv = 1,
}, {
- .name = name_comp1,
+ .type = SAA7134_INPUT_COMPOSITE1,
.vmux = 1,
.amux = LINE1,
}, {
- .name = name_svideo,
+ .type = SAA7134_INPUT_SVIDEO,
.vmux = 8,
.amux = LINE1,
} },
.radio = {
- .name = name_radio,
+ .type = SAA7134_INPUT_RADIO,
.amux = LINE2,
},
},
@@ -4517,21 +4373,20 @@ struct saa7134_board saa7134_boards[] = {
.rds_addr = 0x10,
.tda9887_conf = TDA9887_PRESENT,
.inputs = {{
- .name = name_tv,
+ .type = SAA7134_INPUT_TV,
.vmux = 3,
.amux = TV,
- .tv = 1,
}, {
- .name = name_comp1,
+ .type = SAA7134_INPUT_COMPOSITE1,
.vmux = 1,
.amux = LINE1,
}, {
- .name = name_svideo,
+ .type = SAA7134_INPUT_SVIDEO,
.vmux = 8,
.amux = LINE1,
} },
.radio = {
- .name = name_radio,
+ .type = SAA7134_INPUT_RADIO,
.amux = LINE2,
},
},
@@ -4546,21 +4401,20 @@ struct saa7134_board saa7134_boards[] = {
.rds_addr = 0x10,
.tda9887_conf = TDA9887_PRESENT,
.inputs = {{
- .name = name_tv,
+ .type = SAA7134_INPUT_TV,
.vmux = 3,
.amux = TV,
- .tv = 1,
}, {
- .name = name_comp1,
+ .type = SAA7134_INPUT_COMPOSITE1,
.vmux = 1,
.amux = LINE1,
}, {
- .name = name_svideo,
+ .type = SAA7134_INPUT_SVIDEO,
.vmux = 8,
.amux = LINE1,
} },
.radio = {
- .name = name_radio,
+ .type = SAA7134_INPUT_RADIO,
.amux = LINE2,
},
},
@@ -4575,21 +4429,20 @@ struct saa7134_board saa7134_boards[] = {
.rds_addr = 0x10,
.tda9887_conf = TDA9887_PRESENT,
.inputs = {{
- .name = name_tv,
+ .type = SAA7134_INPUT_TV,
.vmux = 3,
.amux = TV,
- .tv = 1,
}, {
- .name = name_comp1,
+ .type = SAA7134_INPUT_COMPOSITE1,
.vmux = 1,
.amux = LINE1,
}, {
- .name = name_svideo,
+ .type = SAA7134_INPUT_SVIDEO,
.vmux = 8,
.amux = LINE1,
} },
.radio = {
- .name = name_radio,
+ .type = SAA7134_INPUT_RADIO,
.amux = LINE2,
},
},
@@ -4604,21 +4457,20 @@ struct saa7134_board saa7134_boards[] = {
.rds_addr = 0x10,
.tda9887_conf = TDA9887_PRESENT,
.inputs = {{
- .name = name_tv,
+ .type = SAA7134_INPUT_TV,
.vmux = 3,
.amux = TV,
- .tv = 1,
},{
- .name = name_comp1,
+ .type = SAA7134_INPUT_COMPOSITE1,
.vmux = 1,
.amux = LINE1,
},{
- .name = name_svideo,
+ .type = SAA7134_INPUT_SVIDEO,
.vmux = 8,
.amux = LINE1,
}},
.radio = {
- .name = name_radio,
+ .type = SAA7134_INPUT_RADIO,
.amux = LINE2,
},
},
@@ -4636,21 +4488,20 @@ struct saa7134_board saa7134_boards[] = {
.empress_addr = 0x20,
.tda9887_conf = TDA9887_PRESENT,
.inputs = { {
- .name = name_tv,
+ .type = SAA7134_INPUT_TV,
.vmux = 3,
.amux = TV,
- .tv = 1,
}, {
- .name = name_comp1,
+ .type = SAA7134_INPUT_COMPOSITE1,
.vmux = 1,
.amux = LINE1,
}, {
- .name = name_svideo,
+ .type = SAA7134_INPUT_SVIDEO,
.vmux = 8,
.amux = LINE1,
} },
.radio = {
- .name = name_radio,
+ .type = SAA7134_INPUT_RADIO,
.amux = LINE2,
},
.mpeg = SAA7134_MPEG_EMPRESS,
@@ -4673,21 +4524,20 @@ struct saa7134_board saa7134_boards[] = {
.empress_addr = 0x20,
.tda9887_conf = TDA9887_PRESENT,
.inputs = { {
- .name = name_tv,
+ .type = SAA7134_INPUT_TV,
.vmux = 3,
.amux = TV,
- .tv = 1,
}, {
- .name = name_comp1,
+ .type = SAA7134_INPUT_COMPOSITE1,
.vmux = 1,
.amux = LINE1,
}, {
- .name = name_svideo,
+ .type = SAA7134_INPUT_SVIDEO,
.vmux = 8,
.amux = LINE1,
} },
.radio = {
- .name = name_radio,
+ .type = SAA7134_INPUT_RADIO,
.amux = LINE2,
},
.mpeg = SAA7134_MPEG_EMPRESS,
@@ -4712,21 +4562,20 @@ struct saa7134_board saa7134_boards[] = {
.empress_addr = 0x20,
.tda9887_conf = TDA9887_PRESENT,
.inputs = { {
- .name = name_tv,
+ .type = SAA7134_INPUT_TV,
.vmux = 3,
.amux = TV,
- .tv = 1,
}, {
- .name = name_comp1,
+ .type = SAA7134_INPUT_COMPOSITE1,
.vmux = 1,
.amux = LINE1,
}, {
- .name = name_svideo,
+ .type = SAA7134_INPUT_SVIDEO,
.vmux = 8,
.amux = LINE1,
} },
.radio = {
- .name = name_radio,
+ .type = SAA7134_INPUT_RADIO,
.amux = LINE2,
},
.mpeg = SAA7134_MPEG_EMPRESS,
@@ -4747,21 +4596,20 @@ struct saa7134_board saa7134_boards[] = {
.mpeg = SAA7134_MPEG_DVB,
.gpiomask = 0x0200000,
.inputs = {{
- .name = name_tv,
+ .type = SAA7134_INPUT_TV,
.vmux = 1,
.amux = TV,
- .tv = 1,
}, {
- .name = name_comp1,
+ .type = SAA7134_INPUT_COMPOSITE1,
.vmux = 3,
.amux = LINE1,
}, {
- .name = name_svideo,
+ .type = SAA7134_INPUT_SVIDEO,
.vmux = 8, /* untested */
.amux = LINE1,
} },
.radio = {
- .name = name_radio,
+ .type = SAA7134_INPUT_RADIO,
.amux = TV,
.gpio = 0x0200000,
},
@@ -4776,30 +4624,28 @@ struct saa7134_board saa7134_boards[] = {
.radio_addr = ADDR_UNSET,
.gpiomask = 0xf000,
.inputs = {{
- .name = name_tv_mono,
+ .type = SAA7134_INPUT_TV_MONO,
.vmux = 1,
.amux = LINE2,
.gpio = 0x0000,
- .tv = 1,
}, {
- .name = name_comp1,
+ .type = SAA7134_INPUT_COMPOSITE1,
.vmux = 3,
.amux = LINE1,
.gpio = 0x2000,
- .tv = 1
}, {
- .name = name_svideo,
+ .type = SAA7134_INPUT_SVIDEO,
.vmux = 8,
.amux = LINE1,
.gpio = 0x2000,
} },
.radio = {
- .name = name_radio,
+ .type = SAA7134_INPUT_RADIO,
.amux = LINE2,
.gpio = 0x1000,
},
.mute = {
- .name = name_mute,
+ .type = SAA7134_INPUT_MUTE,
.amux = LINE2,
.gpio = 0x6000,
},
@@ -4813,11 +4659,11 @@ struct saa7134_board saa7134_boards[] = {
.radio_addr = ADDR_UNSET,
.mpeg = SAA7134_MPEG_DVB,
.inputs = {{
- .name = name_comp1,
+ .type = SAA7134_INPUT_COMPOSITE1,
.vmux = 3,
.amux = LINE1,
}, {
- .name = name_svideo,
+ .type = SAA7134_INPUT_SVIDEO,
.vmux = 8,
.amux = LINE1,
} },
@@ -4832,16 +4678,15 @@ struct saa7134_board saa7134_boards[] = {
.tda829x_conf = { .lna_cfg = TDA8290_LNA_OFF },
.mpeg = SAA7134_MPEG_DVB,
.inputs = {{
- .name = name_tv,
+ .type = SAA7134_INPUT_TV,
.vmux = 1,
.amux = TV,
- .tv = 1,
}, {
- .name = name_comp1,
+ .type = SAA7134_INPUT_COMPOSITE1,
.vmux = 0,
.amux = LINE1,
}, {
- .name = name_svideo,
+ .type = SAA7134_INPUT_SVIDEO,
.vmux = 8,
.amux = LINE1,
} },
@@ -4857,21 +4702,20 @@ struct saa7134_board saa7134_boards[] = {
.mpeg = SAA7134_MPEG_DVB,
.gpiomask = 0x0200000,
.inputs = { {
- .name = name_tv,
+ .type = SAA7134_INPUT_TV,
.vmux = 1,
.amux = TV,
- .tv = 1,
}, {
- .name = name_comp1,
+ .type = SAA7134_INPUT_COMPOSITE1,
.vmux = 3,
.amux = LINE1,
}, {
- .name = name_svideo,
+ .type = SAA7134_INPUT_SVIDEO,
.vmux = 8,
.amux = LINE1,
} },
.radio = {
- .name = name_radio,
+ .type = SAA7134_INPUT_RADIO,
.amux = TV,
.gpio = 0x0200000,
},
@@ -4885,21 +4729,20 @@ struct saa7134_board saa7134_boards[] = {
.radio_addr = ADDR_UNSET,
.mpeg = SAA7134_MPEG_DVB,
.inputs = {{
- .name = name_tv,
+ .type = SAA7134_INPUT_TV,
.vmux = 1,
.amux = TV,
- .tv = 1,
}, {
- .name = name_comp1,
+ .type = SAA7134_INPUT_COMPOSITE1,
.vmux = 3,
.amux = LINE1,
}, {
- .name = name_svideo,
+ .type = SAA7134_INPUT_SVIDEO,
.vmux = 8,
.amux = LINE2,
} },
.radio = {
- .name = name_radio,
+ .type = SAA7134_INPUT_RADIO,
.amux = TV,
},
},
@@ -4912,21 +4755,20 @@ struct saa7134_board saa7134_boards[] = {
.radio_addr = ADDR_UNSET,
.mpeg = SAA7134_MPEG_DVB,
.inputs = {{
- .name = name_tv,
+ .type = SAA7134_INPUT_TV,
.vmux = 1,
.amux = TV,
- .tv = 1,
}, {
- .name = name_svideo,
+ .type = SAA7134_INPUT_SVIDEO,
.vmux = 8,
.amux = LINE1,
}, {
- .name = name_comp,
+ .type = SAA7134_INPUT_COMPOSITE,
.vmux = 0,
.amux = LINE1,
} },
.radio = {
- .name = name_radio,
+ .type = SAA7134_INPUT_RADIO,
.amux = TV,
},
},
@@ -4938,16 +4780,15 @@ struct saa7134_board saa7134_boards[] = {
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
.inputs = {{
- .name = name_tv,
+ .type = SAA7134_INPUT_TV,
.vmux = 1,
.amux = TV,
- .tv = 1,
}, {
- .name = name_comp1,
+ .type = SAA7134_INPUT_COMPOSITE1,
.vmux = 3,
.amux = LINE1,
}, {
- .name = name_svideo,
+ .type = SAA7134_INPUT_SVIDEO,
.vmux = 8,
.amux = LINE2,
} },
@@ -4962,21 +4803,20 @@ struct saa7134_board saa7134_boards[] = {
.radio_addr = ADDR_UNSET,
.mpeg = SAA7134_MPEG_DVB,
.inputs = {{
- .name = name_tv,
+ .type = SAA7134_INPUT_TV,
.vmux = 3,
.amux = TV,
- .tv = 1,
}, {
- .name = name_comp1,
+ .type = SAA7134_INPUT_COMPOSITE1,
.vmux = 1,
.amux = LINE2,
}, {
- .name = name_svideo,
+ .type = SAA7134_INPUT_SVIDEO,
.vmux = 8,
.amux = LINE2,
} },
.radio = {
- .name = name_radio,
+ .type = SAA7134_INPUT_RADIO,
.amux = TV,
}
},
@@ -4990,11 +4830,11 @@ struct saa7134_board saa7134_boards[] = {
.radio_addr = ADDR_UNSET,
.mpeg = SAA7134_MPEG_DVB,
.inputs = { {
- .name = name_comp,
+ .type = SAA7134_INPUT_COMPOSITE,
.vmux = 1,
.amux = LINE1,
}, {
- .name = name_svideo,
+ .type = SAA7134_INPUT_SVIDEO,
.vmux = 6,
.amux = LINE1,
} },
@@ -5009,21 +4849,20 @@ struct saa7134_board saa7134_boards[] = {
.radio_addr = ADDR_UNSET,
.mpeg = SAA7134_MPEG_DVB,
.inputs = { {
- .name = name_tv,
+ .type = SAA7134_INPUT_TV,
.vmux = 4,
.amux = TV,
- .tv = 1,
}, {
- .name = name_comp,
+ .type = SAA7134_INPUT_COMPOSITE,
.vmux = 1,
.amux = LINE1,
}, {
- .name = name_svideo,
+ .type = SAA7134_INPUT_SVIDEO,
.vmux = 6,
.amux = LINE1,
} },
.radio = {
- .name = name_radio,
+ .type = SAA7134_INPUT_RADIO,
.amux = TV,
},
},
@@ -5038,21 +4877,20 @@ struct saa7134_board saa7134_boards[] = {
.tda9887_conf = TDA9887_PRESENT,
.mpeg = SAA7134_MPEG_DVB,
.inputs = {{
- .name = name_tv,
+ .type = SAA7134_INPUT_TV,
.vmux = 3,
.amux = TV,
- .tv = 1,
}, {
- .name = name_comp1,
+ .type = SAA7134_INPUT_COMPOSITE1,
.vmux = 1,
.amux = LINE1,
}, {
- .name = name_svideo,
+ .type = SAA7134_INPUT_SVIDEO,
.vmux = 8,
.amux = LINE1,
} },
.radio = {
- .name = name_radio,
+ .type = SAA7134_INPUT_RADIO,
.amux = LINE2,
},
},
@@ -5067,21 +4905,20 @@ struct saa7134_board saa7134_boards[] = {
.gpiomask = 1 << 21,
.mpeg = SAA7134_MPEG_DVB,
.inputs = {{
- .name = name_tv,
+ .type = SAA7134_INPUT_TV,
.vmux = 1,
.amux = TV,
- .tv = 1,
}, {
- .name = name_comp,
+ .type = SAA7134_INPUT_COMPOSITE,
.vmux = 0,
.amux = LINE2,
}, {
- .name = name_svideo,
+ .type = SAA7134_INPUT_SVIDEO,
.vmux = 8,
.amux = LINE2,
} },
.radio = {
- .name = name_radio,
+ .type = SAA7134_INPUT_RADIO,
.amux = TV,
.gpio = 0x0200000,
},
@@ -5097,21 +4934,20 @@ struct saa7134_board saa7134_boards[] = {
.gpiomask = 1 << 21,
.mpeg = SAA7134_MPEG_DVB,
.inputs = {{
- .name = name_tv,
+ .type = SAA7134_INPUT_TV,
.vmux = 1,
.amux = TV,
- .tv = 1,
}, {
- .name = name_comp,
+ .type = SAA7134_INPUT_COMPOSITE,
.vmux = 0,
.amux = LINE2,
}, {
- .name = name_svideo,
+ .type = SAA7134_INPUT_SVIDEO,
.vmux = 8,
.amux = LINE2,
} },
.radio = {
- .name = name_radio,
+ .type = SAA7134_INPUT_RADIO,
.amux = TV,
.gpio = 0x0200000,
},
@@ -5125,29 +4961,28 @@ struct saa7134_board saa7134_boards[] = {
.radio_addr = ADDR_UNSET,
.gpiomask = 0x801a8087,
.inputs = { {
- .name = name_tv,
+ .type = SAA7134_INPUT_TV,
.vmux = 3,
.amux = LINE2,
- .tv = 1,
.gpio = 0x624000,
}, {
- .name = name_comp1,
+ .type = SAA7134_INPUT_COMPOSITE1,
.vmux = 1,
.amux = LINE1,
.gpio = 0x624000,
}, {
- .name = name_svideo,
+ .type = SAA7134_INPUT_SVIDEO,
.vmux = 1,
.amux = LINE1,
.gpio = 0x624000,
} },
.radio = {
- .name = name_radio,
+ .type = SAA7134_INPUT_RADIO,
.amux = LINE2,
.gpio = 0x624001,
},
.mute = {
- .name = name_mute,
+ .type = SAA7134_INPUT_MUTE,
.amux = TV,
},
},
@@ -5161,16 +4996,15 @@ struct saa7134_board saa7134_boards[] = {
.tda9887_conf = TDA9887_PRESENT,
.mpeg = SAA7134_MPEG_DVB,
.inputs = { {
- .name = name_tv,
+ .type = SAA7134_INPUT_TV,
.vmux = 1,
.amux = TV,
- .tv = 1,
}, {
- .name = name_comp,
+ .type = SAA7134_INPUT_COMPOSITE,
.vmux = 4,
.amux = LINE1,
}, {
- .name = name_svideo,
+ .type = SAA7134_INPUT_SVIDEO,
.vmux = 8,
.amux = LINE1,
} },
@@ -5186,25 +5020,24 @@ struct saa7134_board saa7134_boards[] = {
.mpeg = SAA7134_MPEG_DVB,
.gpiomask = 0x0200000,
.inputs = { {
- .name = name_tv,
+ .type = SAA7134_INPUT_TV,
.vmux = 1,
.amux = TV,
- .tv = 1,
}, {
- .name = name_comp1,
+ .type = SAA7134_INPUT_COMPOSITE1,
.vmux = 3,
.amux = LINE2,
}, {
- .name = name_comp2,
+ .type = SAA7134_INPUT_COMPOSITE2,
.vmux = 0,
.amux = LINE2,
}, {
- .name = name_svideo,
+ .type = SAA7134_INPUT_SVIDEO,
.vmux = 8,
.amux = LINE2,
} },
.radio = {
- .name = name_radio,
+ .type = SAA7134_INPUT_RADIO,
.amux = TV,
.gpio = 0x0200000,
},
@@ -5218,30 +5051,29 @@ struct saa7134_board saa7134_boards[] = {
.radio_addr = 0x60,
.gpiomask = 0x80000700,
.inputs = { {
- .name = name_tv,
+ .type = SAA7134_INPUT_TV,
.vmux = 1,
.amux = LINE2,
- .tv = 1,
.gpio = 0x100,
}, {
- .name = name_comp1,
+ .type = SAA7134_INPUT_COMPOSITE1,
.vmux = 3,
.amux = LINE1,
.gpio = 0x200,
}, {
- .name = name_svideo,
+ .type = SAA7134_INPUT_SVIDEO,
.vmux = 8,
.amux = LINE1,
.gpio = 0x200,
} },
.radio = {
- .name = name_radio,
+ .type = SAA7134_INPUT_RADIO,
.vmux = 1,
.amux = LINE1,
.gpio = 0x100,
},
.mute = {
- .name = name_mute,
+ .type = SAA7134_INPUT_MUTE,
.vmux = 8,
.amux = 2,
},
@@ -5257,18 +5089,17 @@ struct saa7134_board saa7134_boards[] = {
.mpeg = SAA7134_MPEG_DVB,
.ts_type = SAA7134_MPEG_TS_PARALLEL,
.inputs = { {
- .name = name_tv,
+ .type = SAA7134_INPUT_TV,
.vmux = 1,
.amux = TV,
- .tv = 1,
#if 0 /* FIXME */
}, {
- .name = name_comp1,
+ .type = SAA7134_INPUT_COMPOSITE1,
.vmux = 3,
.amux = LINE1,
.gpio = 0x200,
}, {
- .name = name_svideo,
+ .type = SAA7134_INPUT_SVIDEO,
.vmux = 8,
.amux = LINE1,
.gpio = 0x200,
@@ -5276,14 +5107,14 @@ struct saa7134_board saa7134_boards[] = {
} },
#if 0
.radio = {
- .name = name_radio,
+ .type = SAA7134_INPUT_RADIO,
.vmux = 1,
.amux = LINE1,
.gpio = 0x100,
},
#endif
.mute = {
- .name = name_mute,
+ .type = SAA7134_INPUT_MUTE,
.vmux = 0,
.amux = TV,
},
@@ -5298,24 +5129,23 @@ struct saa7134_board saa7134_boards[] = {
.gpiomask = 0x00300003,
/* .gpiomask = 0x8c240003, */
.inputs = { {
- .name = name_tv,
+ .type = SAA7134_INPUT_TV,
.vmux = 1,
.amux = TV,
- .tv = 1,
.gpio = 0x01,
}, {
- .name = name_svideo,
+ .type = SAA7134_INPUT_SVIDEO,
.vmux = 6,
.amux = LINE1,
.gpio = 0x02,
} },
.radio = {
- .name = name_radio,
+ .type = SAA7134_INPUT_RADIO,
.amux = TV,
.gpio = 0x00300001,
},
.mute = {
- .name = name_mute,
+ .type = SAA7134_INPUT_MUTE,
.amux = TV,
.gpio = 0x01,
},
@@ -5331,29 +5161,28 @@ struct saa7134_board saa7134_boards[] = {
.tda9887_conf = TDA9887_PRESENT,
.gpiomask = 0x03,
.inputs = { {
- .name = name_tv,
+ .type = SAA7134_INPUT_TV,
.vmux = 1,
.amux = TV,
- .tv = 1,
.gpio = 0x00,
}, {
- .name = name_comp1,
+ .type = SAA7134_INPUT_COMPOSITE1,
.vmux = 3,
.amux = LINE1,
.gpio = 0x00,
}, {
- .name = name_svideo,
+ .type = SAA7134_INPUT_SVIDEO,
.vmux = 8,
.amux = LINE1,
.gpio = 0x00,
} },
.radio = {
- .name = name_radio,
+ .type = SAA7134_INPUT_RADIO,
.amux = LINE2,
.gpio = 0x01,
},
.mute = {
- .name = name_mute,
+ .type = SAA7134_INPUT_MUTE,
.amux = LINE1,
.gpio = 0x00,
},
@@ -5368,11 +5197,11 @@ struct saa7134_board saa7134_boards[] = {
.radio_addr = ADDR_UNSET,
.mpeg = SAA7134_MPEG_DVB,
.inputs = { {
- .name = name_comp1,
+ .type = SAA7134_INPUT_COMPOSITE1,
.vmux = 0,
.amux = LINE1,
}, {
- .name = name_svideo,
+ .type = SAA7134_INPUT_SVIDEO,
.vmux = 8, /* Not tested */
.amux = LINE1
} },
@@ -5387,21 +5216,20 @@ struct saa7134_board saa7134_boards[] = {
.radio_addr = ADDR_UNSET,
.mpeg = SAA7134_MPEG_DVB,
.inputs = { {
- .name = name_tv,
+ .type = SAA7134_INPUT_TV,
.vmux = 2,
.amux = TV,
- .tv = 1,
}, {
- .name = name_comp1,
+ .type = SAA7134_INPUT_COMPOSITE1,
.vmux = 0,
.amux = LINE1,
}, {
- .name = name_svideo,
+ .type = SAA7134_INPUT_SVIDEO,
.vmux = 9,
.amux = LINE1,
} },
.radio = {
- .name = name_radio,
+ .type = SAA7134_INPUT_RADIO,
.amux = TV,
},
},
@@ -5416,13 +5244,12 @@ struct saa7134_board saa7134_boards[] = {
.mpeg = SAA7134_MPEG_DVB,
.ts_type = SAA7134_MPEG_TS_PARALLEL,
.inputs = {{
- .name = name_tv,
+ .type = SAA7134_INPUT_TV,
.vmux = 1,
.amux = TV,
- .tv = 1,
} },
.radio = { /* untested */
- .name = name_radio,
+ .type = SAA7134_INPUT_RADIO,
.amux = TV,
},
},
@@ -5436,16 +5263,15 @@ struct saa7134_board saa7134_boards[] = {
.tda9887_conf = TDA9887_PRESENT | TDA9887_PORT1_ACTIVE,
.mpeg = SAA7134_MPEG_DVB,
.inputs = { {
- .name = name_tv,
+ .type = SAA7134_INPUT_TV,
.vmux = 3,
.amux = TV,
- .tv = 1,
}, {
- .name = name_comp1,
+ .type = SAA7134_INPUT_COMPOSITE1,
.vmux = 4,
.amux = LINE2,
}, {
- .name = name_svideo,
+ .type = SAA7134_INPUT_SVIDEO,
.vmux = 8,
.amux = LINE2,
} },
@@ -5459,10 +5285,10 @@ struct saa7134_board saa7134_boards[] = {
.radio_addr = ADDR_UNSET,
.mpeg = SAA7134_MPEG_DVB,
.inputs = { {
- .name = name_comp1,
+ .type = SAA7134_INPUT_COMPOSITE1,
.vmux = 3,
}, {
- .name = name_svideo,
+ .type = SAA7134_INPUT_SVIDEO,
.vmux = 8,
} },
},
@@ -5479,25 +5305,24 @@ struct saa7134_board saa7134_boards[] = {
.tda9887_conf = TDA9887_PRESENT,
.gpiomask = 0x00008000,
.inputs = {{
- .name = name_tv,
+ .type = SAA7134_INPUT_TV,
.vmux = 3,
.amux = LINE2,
- .tv = 1,
}, {
- .name = name_comp1,
+ .type = SAA7134_INPUT_COMPOSITE1,
.vmux = 1,
.amux = LINE1,
}, {
- .name = name_svideo,
+ .type = SAA7134_INPUT_SVIDEO,
.vmux = 8,
.amux = LINE1,
} },
.mute = {
- .name = name_mute,
+ .type = SAA7134_INPUT_MUTE,
.amux = LINE1,
},
.radio = {
- .name = name_radio,
+ .type = SAA7134_INPUT_RADIO,
.amux = LINE2,
},
},
@@ -5512,7 +5337,7 @@ struct saa7134_board saa7134_boards[] = {
.radio_addr = ADDR_UNSET,
.gpiomask = 0x389c00,
.inputs = {{
- .name = name_comp1,
+ .type = SAA7134_INPUT_COMPOSITE1,
.vmux = 3,
.amux = LINE1,
.gpio = 0x01fc00,
@@ -5529,21 +5354,20 @@ struct saa7134_board saa7134_boards[] = {
.mpeg = SAA7134_MPEG_DVB,
.ts_type = SAA7134_MPEG_TS_PARALLEL,
.inputs = { {
- .name = name_tv,
+ .type = SAA7134_INPUT_TV,
.vmux = 2,
.amux = TV,
- .tv = 1,
}, {
- .name = name_comp1,
+ .type = SAA7134_INPUT_COMPOSITE1,
.vmux = 0,
.amux = LINE1,
}, {
- .name = name_svideo,
+ .type = SAA7134_INPUT_SVIDEO,
.vmux = 9,
.amux = LINE1,
} },
.radio = {
- .name = name_radio,
+ .type = SAA7134_INPUT_RADIO,
.amux = TV,
},
},
@@ -5556,21 +5380,20 @@ struct saa7134_board saa7134_boards[] = {
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
.inputs = { {
- .name = name_tv,
+ .type = SAA7134_INPUT_TV,
.vmux = 2,
.amux = TV,
- .tv = 1,
}, {
- .name = name_comp1,
+ .type = SAA7134_INPUT_COMPOSITE1,
.vmux = 0,
.amux = LINE1,
}, {
- .name = name_svideo,
+ .type = SAA7134_INPUT_SVIDEO,
.vmux = 9,
.amux = LINE1,
} },
.radio = {
- .name = name_radio,
+ .type = SAA7134_INPUT_RADIO,
.amux = TV,
},
},
@@ -5584,16 +5407,15 @@ struct saa7134_board saa7134_boards[] = {
.tda9887_conf = TDA9887_PRESENT | TDA9887_PORT1_ACTIVE,
.mpeg = SAA7134_MPEG_DVB,
.inputs = {{
- .name = name_tv,
+ .type = SAA7134_INPUT_TV,
.vmux = 3,
.amux = TV,
- .tv = 1,
}, {
- .name = name_comp1,
+ .type = SAA7134_INPUT_COMPOSITE1,
.vmux = 0,
.amux = LINE2,
}, {
- .name = name_svideo,
+ .type = SAA7134_INPUT_SVIDEO,
.vmux = 8,
.amux = LINE2,
} },
@@ -5607,25 +5429,24 @@ struct saa7134_board saa7134_boards[] = {
.tuner_addr = ADDR_UNSET,
.radio_addr = 0x60,
.inputs = { {
- .name = name_tv,
+ .type = SAA7134_INPUT_TV,
.vmux = 1,
.amux = TV,
- .tv = 1,
}, {
- .name = name_comp1,
+ .type = SAA7134_INPUT_COMPOSITE1,
.vmux = 3,
.amux = LINE2,
}, {
- .name = name_svideo,
+ .type = SAA7134_INPUT_SVIDEO,
.vmux = 8,
.amux = LINE2,
} },
.radio = {
- .name = name_radio,
+ .type = SAA7134_INPUT_RADIO,
.amux = LINE1,
},
.mute = {
- .name = name_mute,
+ .type = SAA7134_INPUT_MUTE,
.amux = TV,
},
},
@@ -5642,29 +5463,28 @@ struct saa7134_board saa7134_boards[] = {
.mpeg = SAA7134_MPEG_DVB,
.ts_type = SAA7134_MPEG_TS_PARALLEL,
.inputs = { {
- .name = name_tv,
+ .type = SAA7134_INPUT_TV,
.vmux = 1,
.amux = TV,
- .tv = 1,
.gpio = 0x00050000,
}, {
- .name = name_comp1,
+ .type = SAA7134_INPUT_COMPOSITE1,
.vmux = 3,
.amux = LINE1,
.gpio = 0x00050000,
}, {
- .name = name_svideo,
+ .type = SAA7134_INPUT_SVIDEO,
.vmux = 8,
.amux = LINE1,
.gpio = 0x00050000,
} },
.radio = {
- .name = name_radio,
+ .type = SAA7134_INPUT_RADIO,
.amux = TV,
.gpio = 0x00050000,
},
.mute = {
- .name = name_mute,
+ .type = SAA7134_INPUT_MUTE,
.vmux = 0,
.amux = TV,
.gpio = 0x00050000,
@@ -5681,21 +5501,20 @@ struct saa7134_board saa7134_boards[] = {
.radio_addr = ADDR_UNSET,
.gpiomask = 0x00008000,
.inputs = { {
- .name = name_tv,
+ .type = SAA7134_INPUT_TV,
.vmux = 3,
.amux = LINE2,
- .tv = 1,
}, {
- .name = name_comp1,
+ .type = SAA7134_INPUT_COMPOSITE1,
.vmux = 1,
.amux = LINE1,
}, {
- .name = name_svideo,
+ .type = SAA7134_INPUT_SVIDEO,
.vmux = 8,
.amux = LINE1,
} },
.mute = {
- .name = name_mute,
+ .type = SAA7134_INPUT_MUTE,
.amux = LINE1,
},
},
@@ -5710,21 +5529,20 @@ struct saa7134_board saa7134_boards[] = {
.radio_addr = ADDR_UNSET,
.gpiomask = 0x00008000,
.inputs = { {
- .name = name_tv,
+ .type = SAA7134_INPUT_TV,
.vmux = 3,
.amux = LINE2,
- .tv = 1,
}, {
- .name = name_comp1,
+ .type = SAA7134_INPUT_COMPOSITE1,
.vmux = 1,
.amux = LINE1,
}, {
- .name = name_svideo,
+ .type = SAA7134_INPUT_SVIDEO,
.vmux = 8,
.amux = LINE1,
} },
.mute = {
- .name = name_mute,
+ .type = SAA7134_INPUT_MUTE,
.amux = LINE1,
},
},
@@ -5736,15 +5554,15 @@ struct saa7134_board saa7134_boards[] = {
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
.inputs = {{
- .name = name_comp1,
+ .type = SAA7134_INPUT_COMPOSITE1,
.vmux = 0,
.amux = LINE1,
}, {
- .name = name_comp3,
+ .type = SAA7134_INPUT_COMPOSITE3,
.vmux = 2,
.amux = LINE1,
}, {
- .name = name_svideo,
+ .type = SAA7134_INPUT_SVIDEO,
.vmux = 8,
.amux = LINE1,
} },
@@ -5760,21 +5578,20 @@ struct saa7134_board saa7134_boards[] = {
.gpiomask = 1 << 21,
.ts_type = SAA7134_MPEG_TS_PARALLEL,
.inputs = { {
- .name = name_tv,
+ .type = SAA7134_INPUT_TV,
.vmux = 1,
.amux = TV,
- .tv = 1,
}, {
- .name = name_comp,
+ .type = SAA7134_INPUT_COMPOSITE,
.vmux = 3,
.amux = LINE1,
}, {
- .name = name_svideo,
+ .type = SAA7134_INPUT_SVIDEO,
.vmux = 8,
.amux = LINE2,
} },
.radio = {
- .name = name_radio,
+ .type = SAA7134_INPUT_RADIO,
.amux = TV,
.gpio = 0x0000000,
},
@@ -5790,7 +5607,7 @@ struct saa7134_board saa7134_boards[] = {
.radio_addr = ADDR_UNSET,
.gpiomask = 0x618E700,
.inputs = {{
- .name = name_comp1,
+ .type = SAA7134_INPUT_COMPOSITE1,
.vmux = 3,
.amux = LINE1,
.gpio = 0x6010000,
@@ -5809,21 +5626,20 @@ struct saa7134_board saa7134_boards[] = {
.gpiomask = 1 << 11,
.mpeg = SAA7134_MPEG_DVB,
.inputs = {{
- .name = name_tv,
+ .type = SAA7134_INPUT_TV,
.vmux = 1,
.amux = TV,
- .tv = 1,
}, {
- .name = name_comp,
+ .type = SAA7134_INPUT_COMPOSITE,
.vmux = 4,
.amux = LINE1,
}, {
- .name = name_svideo,
+ .type = SAA7134_INPUT_SVIDEO,
.vmux = 8,
.amux = LINE1,
} },
.radio = {
- .name = name_radio,
+ .type = SAA7134_INPUT_RADIO,
.amux = TV,
.gpio = 0x0000800,
},
@@ -5837,16 +5653,15 @@ struct saa7134_board saa7134_boards[] = {
.radio_addr = ADDR_UNSET,
.mpeg = SAA7134_MPEG_GO7007,
.inputs = { {
- .name = name_comp1,
+ .type = SAA7134_INPUT_COMPOSITE1,
.vmux = 0,
.amux = LINE2,
}, {
- .name = name_tv,
+ .type = SAA7134_INPUT_TV,
.vmux = 3,
.amux = TV,
- .tv = 1,
}, {
- .name = name_svideo,
+ .type = SAA7134_INPUT_SVIDEO,
.vmux = 6,
.amux = LINE1,
} },
@@ -5862,25 +5677,24 @@ struct saa7134_board saa7134_boards[] = {
.radio_addr = ADDR_UNSET,
.tda9887_conf = TDA9887_PRESENT,
.inputs = {{
- .name = name_tv,
+ .type = SAA7134_INPUT_TV,
.vmux = 1,
.amux = LINE2,
- .tv = 1,
}, {
- .name = name_comp1,
+ .type = SAA7134_INPUT_COMPOSITE1,
.vmux = 0,
.amux = LINE2,
}, {
- .name = name_comp2,
+ .type = SAA7134_INPUT_COMPOSITE2,
.vmux = 3,
.amux = LINE2,
}, {
- .name = name_svideo,
+ .type = SAA7134_INPUT_SVIDEO,
.vmux = 8,
.amux = LINE2,
} },
.mute = {
- .name = name_mute,
+ .type = SAA7134_INPUT_MUTE,
.amux = LINE1,
},
},
@@ -5893,34 +5707,62 @@ struct saa7134_board saa7134_boards[] = {
.radio_addr = ADDR_UNSET,
.gpiomask = 0x0d,
.inputs = {{
- .name = name_tv_mono,
+ .type = SAA7134_INPUT_TV_MONO,
.vmux = 1,
.amux = LINE1,
.gpio = 0x00,
- .tv = 1,
}, {
- .name = name_comp1,
+ .type = SAA7134_INPUT_COMPOSITE1,
.vmux = 3,
.amux = LINE2,
.gpio = 0x08,
}, {
- .name = name_svideo,
+ .type = SAA7134_INPUT_SVIDEO,
.vmux = 8,
.amux = LINE2,
.gpio = 0x08,
} },
.radio = {
- .name = name_radio,
+ .type = SAA7134_INPUT_RADIO,
.amux = LINE1,
.gpio = 0x04,
},
.mute = {
- .name = name_mute,
+ .type = SAA7134_INPUT_MUTE,
.amux = LINE1,
.gpio = 0x08,
},
},
-
+ [SAA7134_BOARD_SNAZIO_TVPVR_PRO] = {
+ .name = "SnaZio* TVPVR PRO",
+ .audio_clock = 0x00187de7,
+ .tuner_type = TUNER_PHILIPS_TDA8290,
+ .radio_type = UNSET,
+ .tuner_addr = ADDR_UNSET,
+ .radio_addr = ADDR_UNSET,
+ .gpiomask = 1 << 21,
+ .inputs = { {
+ .type = SAA7134_INPUT_TV,
+ .vmux = 1,
+ .amux = TV,
+ .gpio = 0x0000000,
+ }, {
+ .type = SAA7134_INPUT_COMPOSITE1,
+ .vmux = 3,
+ .amux = LINE2,
+ .gpio = 0x0000000,
+ }, {
+ .type = SAA7134_INPUT_SVIDEO,
+ .vmux = 8,
+ .amux = LINE2,
+ .gpio = 0x0000000,
+ } },
+ .radio = {
+ .type = SAA7134_INPUT_RADIO,
+ .amux = TV,
+ .gpio = 0x0200000,
+ },
+ },
};
const unsigned int saa7134_bcount = ARRAY_SIZE(saa7134_boards);
@@ -7191,6 +7033,12 @@ struct pci_device_id saa7134_pci_tbl[] = {
.subdevice = 0x6f3a,
.driver_data = SAA7134_BOARD_LEADTEK_WINFAST_TV2100_FM,
}, {
+ .vendor = PCI_VENDOR_ID_PHILIPS,
+ .device = PCI_DEVICE_ID_PHILIPS_SAA7133,
+ .subvendor = 0x1779, /* V One Multimedia PTE Ltd */
+ .subdevice = 0x13cf,
+ .driver_data = SAA7134_BOARD_SNAZIO_TVPVR_PRO,
+ }, {
/* --- boards without eeprom + subsystem ID --- */
.vendor = PCI_VENDOR_ID_PHILIPS,
.device = PCI_DEVICE_ID_PHILIPS_SAA7134,
@@ -7721,6 +7569,7 @@ int saa7134_board_init1(struct saa7134_dev *dev)
case SAA7134_BOARD_BEHOLD_H7:
case SAA7134_BOARD_BEHOLD_A7:
case SAA7134_BOARD_KWORLD_PC150U:
+ case SAA7134_BOARD_SNAZIO_TVPVR_PRO:
dev->has_remote = SAA7134_REMOTE_I2C;
break;
case SAA7134_BOARD_AVERMEDIA_A169_B:
diff --git a/drivers/media/pci/saa7134/saa7134-core.c b/drivers/media/pci/saa7134/saa7134-core.c
index e227b02cc122..c0e1780ec831 100644
--- a/drivers/media/pci/saa7134/saa7134-core.c
+++ b/drivers/media/pci/saa7134/saa7134-core.c
@@ -112,7 +112,7 @@ int (*saa7134_dmasound_exit)(struct saa7134_dev *dev);
printk(KERN_DEBUG pr_fmt("irq: " fmt), ## arg); \
} while (0)
-void saa7134_track_gpio(struct saa7134_dev *dev, char *msg)
+void saa7134_track_gpio(struct saa7134_dev *dev, const char *msg)
{
unsigned long mode,status;
@@ -806,6 +806,154 @@ static void must_configure_manually(int has_eeprom)
}
}
+static void saa7134_unregister_media_device(struct saa7134_dev *dev)
+{
+
+#ifdef CONFIG_MEDIA_CONTROLLER
+ if (!dev->media_dev)
+ return;
+ media_device_unregister(dev->media_dev);
+ media_device_cleanup(dev->media_dev);
+ kfree(dev->media_dev);
+ dev->media_dev = NULL;
+#endif
+}
+
+static void saa7134_media_release(struct saa7134_dev *dev)
+{
+#ifdef CONFIG_MEDIA_CONTROLLER
+ int i;
+
+ for (i = 0; i < SAA7134_INPUT_MAX + 1; i++)
+ media_device_unregister_entity(&dev->input_ent[i]);
+#endif
+}
+
+#if defined(CONFIG_MEDIA_CONTROLLER)
+static void saa7134_create_entities(struct saa7134_dev *dev)
+{
+ int ret, i;
+ struct media_entity *entity;
+ struct media_entity *decoder = NULL;
+
+ /* Check if it is using an external analog TV demod */
+ media_device_for_each_entity(entity, dev->media_dev) {
+ if (entity->function == MEDIA_ENT_F_ATV_DECODER) {
+ decoder = entity;
+ break;
+ }
+ }
+
+ /*
+ * saa713x is not using an external ATV demod.
+ * Register the internal one
+ */
+ if (!decoder) {
+ dev->demod.name = "saa713x";
+ dev->demod_pad[DEMOD_PAD_IF_INPUT].flags = MEDIA_PAD_FL_SINK;
+ dev->demod_pad[DEMOD_PAD_VID_OUT].flags = MEDIA_PAD_FL_SOURCE;
+ dev->demod_pad[DEMOD_PAD_VBI_OUT].flags = MEDIA_PAD_FL_SOURCE;
+ dev->demod.function = MEDIA_ENT_F_ATV_DECODER;
+
+ ret = media_entity_pads_init(&dev->demod, DEMOD_NUM_PADS,
+ dev->demod_pad);
+ if (ret < 0)
+ pr_err("failed to initialize demod pad!\n");
+
+ ret = media_device_register_entity(dev->media_dev, &dev->demod);
+ if (ret < 0)
+ pr_err("failed to register demod entity!\n");
+
+ dev->decoder = &dev->demod;
+ } else {
+ dev->decoder = decoder;
+ }
+
+ /* Initialize Video, VBI and Radio pads */
+ dev->video_pad.flags = MEDIA_PAD_FL_SINK;
+ ret = media_entity_pads_init(&dev->video_dev->entity, 1,
+ &dev->video_pad);
+ if (ret < 0)
+ pr_err("failed to initialize video media entity!\n");
+
+ dev->vbi_pad.flags = MEDIA_PAD_FL_SINK;
+ ret = media_entity_pads_init(&dev->vbi_dev->entity, 1,
+ &dev->vbi_pad);
+ if (ret < 0)
+ pr_err("failed to initialize vbi media entity!\n");
+
+ /* Create entities for each input connector */
+ for (i = 0; i < SAA7134_INPUT_MAX; i++) {
+ struct media_entity *ent = &dev->input_ent[i];
+ struct saa7134_input *in = &card_in(dev, i);
+
+ if (in->type == SAA7134_NO_INPUT)
+ break;
+
+ /* This input uses the S-Video connector */
+ if (in->type == SAA7134_INPUT_COMPOSITE_OVER_SVIDEO)
+ continue;
+
+ ent->name = saa7134_input_name[in->type];
+ ent->flags = MEDIA_ENT_FL_CONNECTOR;
+ dev->input_pad[i].flags = MEDIA_PAD_FL_SOURCE;
+
+ switch (in->type) {
+ case SAA7134_INPUT_COMPOSITE:
+ case SAA7134_INPUT_COMPOSITE0:
+ case SAA7134_INPUT_COMPOSITE1:
+ case SAA7134_INPUT_COMPOSITE2:
+ case SAA7134_INPUT_COMPOSITE3:
+ case SAA7134_INPUT_COMPOSITE4:
+ ent->function = MEDIA_ENT_F_CONN_COMPOSITE;
+ break;
+ case SAA7134_INPUT_SVIDEO:
+ case SAA7134_INPUT_SVIDEO0:
+ case SAA7134_INPUT_SVIDEO1:
+ ent->function = MEDIA_ENT_F_CONN_SVIDEO;
+ break;
+ default:
+ /*
+ * SAA7134_INPUT_TV and SAA7134_INPUT_TV_MONO.
+ *
+ * Please notice that neither SAA7134_INPUT_MUTE or
+ * SAA7134_INPUT_RADIO are defined at
+ * saa7134_board.input.
+ */
+ ent->function = MEDIA_ENT_F_CONN_RF;
+ break;
+ }
+
+ ret = media_entity_pads_init(ent, 1, &dev->input_pad[i]);
+ if (ret < 0)
+ pr_err("failed to initialize input pad[%d]!\n", i);
+
+ ret = media_device_register_entity(dev->media_dev, ent);
+ if (ret < 0)
+ pr_err("failed to register input entity %d!\n", i);
+ }
+
+ /* Create input for Radio RF connector */
+ if (card_has_radio(dev)) {
+ struct saa7134_input *in = &saa7134_boards[dev->board].radio;
+ struct media_entity *ent = &dev->input_ent[i];
+
+ ent->name = saa7134_input_name[in->type];
+ ent->flags = MEDIA_ENT_FL_CONNECTOR;
+ dev->input_pad[i].flags = MEDIA_PAD_FL_SOURCE;
+ ent->function = MEDIA_ENT_F_CONN_RF;
+
+ ret = media_entity_pads_init(ent, 1, &dev->input_pad[i]);
+ if (ret < 0)
+ pr_err("failed to initialize input pad[%d]!\n", i);
+
+ ret = media_device_register_entity(dev->media_dev, ent);
+ if (ret < 0)
+ pr_err("failed to register input entity %d!\n", i);
+ }
+}
+#endif
+
static struct video_device *vdev_init(struct saa7134_dev *dev,
struct video_device *template,
char *type)
@@ -826,6 +974,8 @@ static struct video_device *vdev_init(struct saa7134_dev *dev,
static void saa7134_unregister_video(struct saa7134_dev *dev)
{
+ saa7134_media_release(dev);
+
if (dev->video_dev) {
if (video_is_registered(dev->video_dev))
video_unregister_device(dev->video_dev);
@@ -889,6 +1039,19 @@ static int saa7134_initdev(struct pci_dev *pci_dev,
if (NULL == dev)
return -ENOMEM;
+ dev->nr = saa7134_devcount;
+ sprintf(dev->name, "saa%x[%d]", pci_dev->device, dev->nr);
+
+#ifdef CONFIG_MEDIA_CONTROLLER
+ dev->media_dev = kzalloc(sizeof(*dev->media_dev), GFP_KERNEL);
+ if (!dev->media_dev) {
+ err = -ENOMEM;
+ goto fail0;
+ }
+ media_device_pci_init(dev->media_dev, pci_dev, dev->name);
+ dev->v4l2_dev.mdev = dev->media_dev;
+#endif
+
err = v4l2_device_register(&pci_dev->dev, &dev->v4l2_dev);
if (err)
goto fail0;
@@ -900,9 +1063,6 @@ static int saa7134_initdev(struct pci_dev *pci_dev,
goto fail1;
}
- dev->nr = saa7134_devcount;
- sprintf(dev->name,"saa%x[%d]",pci_dev->device,dev->nr);
-
/* pci quirks */
if (pci_pci_problems) {
if (pci_pci_problems & PCIPCI_TRITON)
@@ -1102,6 +1262,15 @@ static int saa7134_initdev(struct pci_dev *pci_dev,
dev->name, video_device_node_name(dev->radio_dev));
}
+#ifdef CONFIG_MEDIA_CONTROLLER
+ saa7134_create_entities(dev);
+
+ err = v4l2_mc_create_media_graph(dev->media_dev);
+ if (err) {
+ pr_err("failed to create media graph\n");
+ goto fail5;
+ }
+#endif
/* everything worked */
saa7134_devcount++;
@@ -1109,6 +1278,18 @@ static int saa7134_initdev(struct pci_dev *pci_dev,
saa7134_dmasound_init(dev);
request_submodules(dev);
+
+ /*
+ * Do it at the end, to reduce dynamic configuration changes during
+ * the device init. Yet, as request_modules() can be async, the
+ * topology will likely change after load the saa7134 subdrivers.
+ */
+#ifdef CONFIG_MEDIA_CONTROLLER
+ err = media_device_register(dev->media_dev);
+ if (err)
+ goto fail5;
+#endif
+
return 0;
fail5:
@@ -1126,6 +1307,9 @@ static int saa7134_initdev(struct pci_dev *pci_dev,
fail1:
v4l2_device_unregister(&dev->v4l2_dev);
fail0:
+#ifdef CONFIG_MEDIA_CONTROLLER
+ kfree(dev->media_dev);
+#endif
kfree(dev);
return err;
}
@@ -1188,9 +1372,10 @@ static void saa7134_finidev(struct pci_dev *pci_dev)
release_mem_region(pci_resource_start(pci_dev,0),
pci_resource_len(pci_dev,0));
-
v4l2_device_unregister(&dev->v4l2_dev);
+ saa7134_unregister_media_device(dev);
+
/* free memory */
kfree(dev);
}
diff --git a/drivers/media/pci/saa7134/saa7134-dvb.c b/drivers/media/pci/saa7134/saa7134-dvb.c
index 101ba8729416..db987e5b93eb 100644
--- a/drivers/media/pci/saa7134/saa7134-dvb.c
+++ b/drivers/media/pci/saa7134/saa7134-dvb.c
@@ -1883,8 +1883,15 @@ static int dvb_init(struct saa7134_dev *dev)
fe0->dvb.frontend->callback = saa7134_tuner_callback;
/* register everything else */
+#ifndef CONFIG_MEDIA_CONTROLLER_DVB
ret = vb2_dvb_register_bus(&dev->frontends, THIS_MODULE, dev,
- &dev->pci->dev, adapter_nr, 0);
+ &dev->pci->dev, NULL,
+ adapter_nr, 0);
+#else
+ ret = vb2_dvb_register_bus(&dev->frontends, THIS_MODULE, dev,
+ &dev->pci->dev, dev->media_dev,
+ adapter_nr, 0);
+#endif
/* this sequence is necessary to make the tda1004x load its firmware
* and to enter analog mode of hybrid boards
diff --git a/drivers/media/pci/saa7134/saa7134-empress.c b/drivers/media/pci/saa7134/saa7134-empress.c
index 56b932c97196..ca417a454d67 100644
--- a/drivers/media/pci/saa7134/saa7134-empress.c
+++ b/drivers/media/pci/saa7134/saa7134-empress.c
@@ -189,6 +189,7 @@ static const struct v4l2_ioctl_ops ts_ioctl_ops = {
.vidioc_querybuf = vb2_ioctl_querybuf,
.vidioc_qbuf = vb2_ioctl_qbuf,
.vidioc_dqbuf = vb2_ioctl_dqbuf,
+ .vidioc_expbuf = vb2_ioctl_expbuf,
.vidioc_streamon = vb2_ioctl_streamon,
.vidioc_streamoff = vb2_ioctl_streamoff,
.vidioc_g_frequency = saa7134_g_frequency,
@@ -286,7 +287,7 @@ static int empress_init(struct saa7134_dev *dev)
* transfers that do not start at the beginning of a page. A USERPTR
* can start anywhere in a page, so USERPTR support is a no-go.
*/
- q->io_modes = VB2_MMAP | VB2_READ;
+ q->io_modes = VB2_MMAP | VB2_DMABUF | VB2_READ;
q->drv_priv = &dev->ts_q;
q->ops = &saa7134_empress_qops;
q->gfp_flags = GFP_DMA32;
diff --git a/drivers/media/pci/saa7134/saa7134-go7007.c b/drivers/media/pci/saa7134/saa7134-go7007.c
index 8a2abb34186b..2799538e2d7e 100644
--- a/drivers/media/pci/saa7134/saa7134-go7007.c
+++ b/drivers/media/pci/saa7134/saa7134-go7007.c
@@ -378,7 +378,7 @@ static int saa7134_go7007_send_firmware(struct go7007 *go, u8 *data, int len)
return 0;
}
-static struct go7007_hpi_ops saa7134_go7007_hpi_ops = {
+static const struct go7007_hpi_ops saa7134_go7007_hpi_ops = {
.interface_reset = saa7134_go7007_interface_reset,
.write_interrupt = saa7134_go7007_write_interrupt,
.read_interrupt = saa7134_go7007_read_interrupt,
diff --git a/drivers/media/pci/saa7134/saa7134-input.c b/drivers/media/pci/saa7134/saa7134-input.c
index 69d32d3fa32c..c8042c3888cd 100644
--- a/drivers/media/pci/saa7134/saa7134-input.c
+++ b/drivers/media/pci/saa7134/saa7134-input.c
@@ -975,6 +975,27 @@ void saa7134_probe_i2c_ir(struct saa7134_dev *dev)
msg_msi.addr, dev->i2c_adap.name,
(1 == rc) ? "yes" : "no");
break;
+ case SAA7134_BOARD_SNAZIO_TVPVR_PRO:
+ dev->init_data.name = "SnaZio* TVPVR PRO";
+ dev->init_data.get_key = get_key_msi_tvanywhere_plus;
+ dev->init_data.ir_codes = RC_MAP_MSI_TVANYWHERE_PLUS;
+ /*
+ * MSI TV@nyware Plus requires more frequent polling
+ * otherwise it will miss some keypresses
+ */
+ dev->init_data.polling_interval = 50;
+ info.addr = 0x30;
+ /*
+ * MSI TV@nywhere Plus controller doesn't seem to
+ * respond to probes unless we read something from
+ * an existing device. Weird...
+ * REVISIT: might no longer be needed
+ */
+ rc = i2c_transfer(&dev->i2c_adap, &msg_msi, 1);
+ input_dbg("probe 0x%02x @ %s: %s\n",
+ msg_msi.addr, dev->i2c_adap.name,
+ (rc == 1) ? "yes" : "no");
+ break;
case SAA7134_BOARD_KWORLD_PC150U:
/* copied and modified from MSI TV@nywhere Plus */
dev->init_data.name = "Kworld PC150-U";
diff --git a/drivers/media/pci/saa7134/saa7134-tvaudio.c b/drivers/media/pci/saa7134/saa7134-tvaudio.c
index 21a579309575..38f94b742e28 100644
--- a/drivers/media/pci/saa7134/saa7134-tvaudio.c
+++ b/drivers/media/pci/saa7134/saa7134-tvaudio.c
@@ -192,7 +192,7 @@ static void mute_input_7134(struct saa7134_dev *dev)
in = dev->input;
mute = (dev->ctl_mute ||
(dev->automute && (&card(dev).radio) != in));
- if (card(dev).mute.name) {
+ if (card(dev).mute.type) {
/*
* 7130 - we'll mute using some unconnected audio input
* 7134 - we'll probably should switch external mux with gpio
@@ -204,13 +204,14 @@ static void mute_input_7134(struct saa7134_dev *dev)
if (dev->hw_mute == mute &&
dev->hw_input == in && !dev->insuspend) {
audio_dbg(1, "mute/input: nothing to do [mute=%d,input=%s]\n",
- mute, in->name);
+ mute, saa7134_input_name[in->type]);
return;
}
audio_dbg(1, "ctl_mute=%d automute=%d input=%s => mute=%d input=%s\n",
dev->ctl_mute, dev->automute,
- dev->input->name, mute, in->name);
+ saa7134_input_name[dev->input->type], mute,
+ saa7134_input_name[in->type]);
dev->hw_mute = mute;
dev->hw_input = in;
@@ -245,7 +246,7 @@ static void mute_input_7134(struct saa7134_dev *dev)
mask = card(dev).gpiomask;
saa_andorl(SAA7134_GPIO_GPMODE0 >> 2, mask, mask);
saa_andorl(SAA7134_GPIO_GPSTATUS0 >> 2, mask, in->gpio);
- saa7134_track_gpio(dev,in->name);
+ saa7134_track_gpio(dev, saa7134_input_name[in->type]);
}
static void tvaudio_setmode(struct saa7134_dev *dev,
@@ -756,14 +757,14 @@ static int mute_input_7133(struct saa7134_dev *dev)
if (0 != card(dev).gpiomask) {
mask = card(dev).gpiomask;
- if (card(dev).mute.name && dev->ctl_mute)
+ if (card(dev).mute.type && dev->ctl_mute)
in = &card(dev).mute;
else
in = dev->input;
saa_andorl(SAA7134_GPIO_GPMODE0 >> 2, mask, mask);
saa_andorl(SAA7134_GPIO_GPSTATUS0 >> 2, mask, in->gpio);
- saa7134_track_gpio(dev,in->name);
+ saa7134_track_gpio(dev, saa7134_input_name[in->type]);
}
return 0;
diff --git a/drivers/media/pci/saa7134/saa7134-video.c b/drivers/media/pci/saa7134/saa7134-video.c
index a63c1366a64e..ffa39543eb65 100644
--- a/drivers/media/pci/saa7134/saa7134-video.c
+++ b/drivers/media/pci/saa7134/saa7134-video.c
@@ -409,7 +409,8 @@ static void set_tvnorm(struct saa7134_dev *dev, struct saa7134_tvnorm *norm)
static void video_mux(struct saa7134_dev *dev, int input)
{
- video_dbg("video input = %d [%s]\n", input, card_in(dev, input).name);
+ video_dbg("video input = %d [%s]\n",
+ input, saa7134_input_name[card_in(dev, input).type]);
dev->ctl_input = input;
set_tvnorm(dev, dev->tvnorm);
saa7134_tvaudio_setinput(dev, &card_in(dev, input));
@@ -478,8 +479,7 @@ void saa7134_set_tvnorm_hw(struct saa7134_dev *dev)
{
saa7134_set_decoder(dev);
- if (card_in(dev, dev->ctl_input).tv)
- saa_call_all(dev, video, s_std, dev->tvnorm->id);
+ saa_call_all(dev, video, s_std, dev->tvnorm->id);
/* Set the correct norm for the saa6752hs. This function
does nothing if there is no saa6752hs. */
saa_call_empress(dev, video, s_std, dev->tvnorm->id);
@@ -785,6 +785,63 @@ static int stop_preview(struct saa7134_dev *dev)
return 0;
}
+/*
+ * Media Controller helper functions
+ */
+
+static int saa7134_enable_analog_tuner(struct saa7134_dev *dev)
+{
+#ifdef CONFIG_MEDIA_CONTROLLER
+ struct media_device *mdev = dev->media_dev;
+ struct media_entity *source;
+ struct media_link *link, *found_link = NULL;
+ int ret, active_links = 0;
+
+ if (!mdev || !dev->decoder)
+ return 0;
+
+ /*
+ * This will find the tuner that is connected into the decoder.
+ * Technically, this is not 100% correct, as the device may be
+ * using an analog input instead of the tuner. However, as we can't
+ * do DVB streaming while the DMA engine is being used for V4L2,
+ * this should be enough for the actual needs.
+ */
+ list_for_each_entry(link, &dev->decoder->links, list) {
+ if (link->sink->entity == dev->decoder) {
+ found_link = link;
+ if (link->flags & MEDIA_LNK_FL_ENABLED)
+ active_links++;
+ break;
+ }
+ }
+
+ if (active_links == 1 || !found_link)
+ return 0;
+
+ source = found_link->source->entity;
+ list_for_each_entry(link, &source->links, list) {
+ struct media_entity *sink;
+ int flags = 0;
+
+ sink = link->sink->entity;
+
+ if (sink == dev->decoder)
+ flags = MEDIA_LNK_FL_ENABLED;
+
+ ret = media_entity_setup_link(link, flags);
+ if (ret) {
+ pr_err("Couldn't change link %s->%s to %s. Error %d\n",
+ source->name, sink->name,
+ flags ? "enabled" : "disabled",
+ ret);
+ return ret;
+ }
+ }
+#endif
+ return 0;
+}
+
/* ------------------------------------------------------------------ */
static int buffer_activate(struct saa7134_dev *dev,
@@ -924,6 +981,9 @@ static int queue_setup(struct vb2_queue *q,
*nplanes = 1;
sizes[0] = size;
alloc_ctxs[0] = dev->alloc_ctx;
+
+ saa7134_enable_analog_tuner(dev);
+
return 0;
}
@@ -1219,10 +1279,13 @@ static int saa7134_g_fmt_vid_cap(struct file *file, void *priv,
f->fmt.pix.height = dev->height;
f->fmt.pix.field = dev->field;
f->fmt.pix.pixelformat = dev->fmt->fourcc;
- f->fmt.pix.bytesperline =
- (f->fmt.pix.width * dev->fmt->depth) >> 3;
+ if (dev->fmt->planar)
+ f->fmt.pix.bytesperline = f->fmt.pix.width;
+ else
+ f->fmt.pix.bytesperline =
+ (f->fmt.pix.width * dev->fmt->depth) / 8;
f->fmt.pix.sizeimage =
- f->fmt.pix.height * f->fmt.pix.bytesperline;
+ (f->fmt.pix.height * f->fmt.pix.width * dev->fmt->depth) / 8;
f->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M;
return 0;
}
@@ -1298,10 +1361,13 @@ static int saa7134_try_fmt_vid_cap(struct file *file, void *priv,
if (f->fmt.pix.height > maxh)
f->fmt.pix.height = maxh;
f->fmt.pix.width &= ~0x03;
- f->fmt.pix.bytesperline =
- (f->fmt.pix.width * fmt->depth) >> 3;
+ if (fmt->planar)
+ f->fmt.pix.bytesperline = f->fmt.pix.width;
+ else
+ f->fmt.pix.bytesperline =
+ (f->fmt.pix.width * fmt->depth) / 8;
f->fmt.pix.sizeimage =
- f->fmt.pix.height * f->fmt.pix.bytesperline;
+ (f->fmt.pix.height * f->fmt.pix.width * fmt->depth) / 8;
f->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M;
return 0;
@@ -1381,13 +1447,19 @@ int saa7134_enum_input(struct file *file, void *priv, struct v4l2_input *i)
n = i->index;
if (n >= SAA7134_INPUT_MAX)
return -EINVAL;
- if (NULL == card_in(dev, i->index).name)
+ if (card_in(dev, i->index).type == SAA7134_NO_INPUT)
return -EINVAL;
i->index = n;
- i->type = V4L2_INPUT_TYPE_CAMERA;
- strcpy(i->name, card_in(dev, n).name);
- if (card_in(dev, n).tv)
+ strcpy(i->name, saa7134_input_name[card_in(dev, n).type]);
+ switch (card_in(dev, n).type) {
+ case SAA7134_INPUT_TV:
+ case SAA7134_INPUT_TV_MONO:
i->type = V4L2_INPUT_TYPE_TUNER;
+ break;
+ default:
+ i->type = V4L2_INPUT_TYPE_CAMERA;
+ break;
+ }
if (n == dev->ctl_input) {
int v1 = saa_readb(SAA7134_STATUS_VIDEO1);
int v2 = saa_readb(SAA7134_STATUS_VIDEO2);
@@ -1419,7 +1491,7 @@ int saa7134_s_input(struct file *file, void *priv, unsigned int i)
if (i >= SAA7134_INPUT_MAX)
return -EINVAL;
- if (NULL == card_in(dev, i).name)
+ if (card_in(dev, i).type == SAA7134_NO_INPUT)
return -EINVAL;
video_mux(dev, i);
return 0;
@@ -1656,12 +1728,13 @@ int saa7134_g_tuner(struct file *file, void *priv,
return -EINVAL;
memset(t, 0, sizeof(*t));
for (n = 0; n < SAA7134_INPUT_MAX; n++) {
- if (card_in(dev, n).tv)
+ if (card_in(dev, n).type == SAA7134_INPUT_TV ||
+ card_in(dev, n).type == SAA7134_INPUT_TV_MONO)
break;
}
if (n == SAA7134_INPUT_MAX)
return -EINVAL;
- if (NULL != card_in(dev, n).name) {
+ if (card_in(dev, n).type != SAA7134_NO_INPUT) {
strcpy(t->name, "Television");
t->type = V4L2_TUNER_ANALOG_TV;
saa_call_all(dev, tuner, g_tuner, t);
@@ -1906,6 +1979,7 @@ static const struct v4l2_ioctl_ops video_ioctl_ops = {
.vidioc_querybuf = vb2_ioctl_querybuf,
.vidioc_qbuf = vb2_ioctl_qbuf,
.vidioc_dqbuf = vb2_ioctl_dqbuf,
+ .vidioc_expbuf = vb2_ioctl_expbuf,
.vidioc_s_std = saa7134_s_std,
.vidioc_g_std = saa7134_g_std,
.vidioc_querystd = saa7134_querystd,
@@ -2089,7 +2163,7 @@ int saa7134_video_init1(struct saa7134_dev *dev)
* USERPTR support is a no-go unless the application knows about these
* limitations and has special support for this.
*/
- q->io_modes = VB2_MMAP | VB2_READ;
+ q->io_modes = VB2_MMAP | VB2_DMABUF | VB2_READ;
if (saa7134_userptr)
q->io_modes |= VB2_USERPTR;
q->drv_priv = &dev->video_q;
diff --git a/drivers/media/pci/saa7134/saa7134.h b/drivers/media/pci/saa7134/saa7134.h
index 5938bc781999..69a9bbf22d4d 100644
--- a/drivers/media/pci/saa7134/saa7134.h
+++ b/drivers/media/pci/saa7134/saa7134.h
@@ -343,6 +343,7 @@ struct saa7134_card_ir {
#define SAA7134_BOARD_WIS_VOYAGER 193
#define SAA7134_BOARD_AVERMEDIA_505 194
#define SAA7134_BOARD_LEADTEK_WINFAST_TV2100_FM 195
+#define SAA7134_BOARD_SNAZIO_TVPVR_PRO 196
#define SAA7134_MAXBOARDS 32
#define SAA7134_INPUT_MAX 8
@@ -361,12 +362,29 @@ struct saa7134_card_ir {
#define SET_CLOCK_INVERTED (1 << 2)
#define SET_VSYNC_OFF (1 << 3)
+enum saa7134_input_types {
+ SAA7134_NO_INPUT = 0,
+ SAA7134_INPUT_MUTE,
+ SAA7134_INPUT_RADIO,
+ SAA7134_INPUT_TV,
+ SAA7134_INPUT_TV_MONO,
+ SAA7134_INPUT_COMPOSITE,
+ SAA7134_INPUT_COMPOSITE0,
+ SAA7134_INPUT_COMPOSITE1,
+ SAA7134_INPUT_COMPOSITE2,
+ SAA7134_INPUT_COMPOSITE3,
+ SAA7134_INPUT_COMPOSITE4,
+ SAA7134_INPUT_SVIDEO,
+ SAA7134_INPUT_SVIDEO0,
+ SAA7134_INPUT_SVIDEO1,
+ SAA7134_INPUT_COMPOSITE_OVER_SVIDEO,
+};
+
struct saa7134_input {
- char *name;
- unsigned int vmux;
- enum saa7134_audio_in amux;
- unsigned int gpio;
- unsigned int tv:1;
+ enum saa7134_input_types type;
+ unsigned int vmux;
+ enum saa7134_audio_in amux;
+ unsigned int gpio;
};
enum saa7134_mpeg_type {
@@ -410,7 +428,7 @@ struct saa7134_board {
unsigned int ts_force_val:1;
};
-#define card_has_radio(dev) (NULL != saa7134_boards[dev->board].radio.name)
+#define card_has_radio(dev) (SAA7134_NO_INPUT != saa7134_boards[dev->board].radio.type)
#define card_is_empress(dev) (SAA7134_MPEG_EMPRESS == saa7134_boards[dev->board].mpeg)
#define card_is_dvb(dev) (SAA7134_MPEG_DVB == saa7134_boards[dev->board].mpeg)
#define card_is_go7007(dev) (SAA7134_MPEG_GO7007 == saa7134_boards[dev->board].mpeg)
@@ -654,6 +672,19 @@ struct saa7134_dev {
/* I2C keyboard data */
struct IR_i2c_init_data init_data;
+#ifdef CONFIG_MEDIA_CONTROLLER
+ struct media_device *media_dev;
+
+ struct media_entity input_ent[SAA7134_INPUT_MAX + 1];
+ struct media_pad input_pad[SAA7134_INPUT_MAX + 1];
+
+ struct media_entity demod;
+ struct media_pad demod_pad[DEMOD_NUM_PADS];
+
+ struct media_pad video_pad, vbi_pad;
+ struct media_entity *decoder;
+#endif
+
#if IS_ENABLED(CONFIG_VIDEO_SAA7134_DVB)
/* SAA7134_MPEG_DVB only */
struct vb2_dvb_frontends frontends;
@@ -727,7 +758,7 @@ extern struct mutex saa7134_devlist_lock;
extern int saa7134_no_overlay;
extern bool saa7134_userptr;
-void saa7134_track_gpio(struct saa7134_dev *dev, char *msg);
+void saa7134_track_gpio(struct saa7134_dev *dev, const char *msg);
void saa7134_set_gpio(struct saa7134_dev *dev, int bit_no, int value);
#define SAA7134_PGTABLE_SIZE 4096
@@ -760,6 +791,7 @@ extern int (*saa7134_dmasound_exit)(struct saa7134_dev *dev);
/* saa7134-cards.c */
extern struct saa7134_board saa7134_boards[];
+extern const char * const saa7134_input_name[];
extern const unsigned int saa7134_bcount;
extern struct pci_device_id saa7134_pci_tbl[];
diff --git a/drivers/media/pci/ttpci/av7110.c b/drivers/media/pci/ttpci/av7110.c
index a69dc6a0752b..382caf200ba1 100644
--- a/drivers/media/pci/ttpci/av7110.c
+++ b/drivers/media/pci/ttpci/av7110.c
@@ -1739,7 +1739,7 @@ static int alps_tdlb7_request_firmware(struct dvb_frontend* fe, const struct fir
#endif
}
-static struct sp8870_config alps_tdlb7_config = {
+static const struct sp8870_config alps_tdlb7_config = {
.demod_address = 0x71,
.request_firmware = alps_tdlb7_request_firmware,
@@ -2198,13 +2198,18 @@ static int frontend_init(struct av7110 *av7110)
break;
case 0x0001: // Hauppauge/TT Nexus-T premium rev1.X
+ {
+ struct dvb_frontend *fe;
+
// try ALPS TDLB7 first, then Grundig 29504-401
- av7110->fe = dvb_attach(sp8870_attach, &alps_tdlb7_config, &av7110->i2c_adap);
- if (av7110->fe) {
- av7110->fe->ops.tuner_ops.set_params = alps_tdlb7_tuner_set_params;
+ fe = dvb_attach(sp8870_attach, &alps_tdlb7_config, &av7110->i2c_adap);
+ if (fe) {
+ fe->ops.tuner_ops.set_params = alps_tdlb7_tuner_set_params;
+ av7110->fe = fe;
break;
}
- /* fall-thru */
+ }
+ /* fall-thru */
case 0x0008: // Hauppauge/TT DVB-T
// Grundig 29504-401
diff --git a/drivers/media/pci/ttpci/budget.c b/drivers/media/pci/ttpci/budget.c
index de54310a2660..fb8ede5a1531 100644
--- a/drivers/media/pci/ttpci/budget.c
+++ b/drivers/media/pci/ttpci/budget.c
@@ -615,36 +615,50 @@ static void frontend_init(struct budget *budget)
break;
case 0x1016: // Hauppauge/TT Nova-S SE (samsung s5h1420/????(tda8260))
- budget->dvb_frontend = dvb_attach(s5h1420_attach, &s5h1420_config, &budget->i2c_adap);
- if (budget->dvb_frontend) {
- budget->dvb_frontend->ops.tuner_ops.set_params = s5h1420_tuner_set_params;
- if (dvb_attach(lnbp21_attach, budget->dvb_frontend, &budget->i2c_adap, 0, 0) == NULL) {
+ {
+ struct dvb_frontend *fe;
+
+ fe = dvb_attach(s5h1420_attach, &s5h1420_config, &budget->i2c_adap);
+ if (fe) {
+ fe->ops.tuner_ops.set_params = s5h1420_tuner_set_params;
+ budget->dvb_frontend = fe;
+ if (dvb_attach(lnbp21_attach, fe, &budget->i2c_adap,
+ 0, 0) == NULL) {
printk("%s: No LNBP21 found!\n", __func__);
goto error_out;
}
break;
}
-
+ }
+ /* fall through */
case 0x1018: // TT Budget-S-1401 (philips tda10086/philips tda8262)
+ {
+ struct dvb_frontend *fe;
+
// gpio2 is connected to CLB - reset it + leave it high
saa7146_setgpio(budget->dev, 2, SAA7146_GPIO_OUTLO);
msleep(1);
saa7146_setgpio(budget->dev, 2, SAA7146_GPIO_OUTHI);
msleep(1);
- budget->dvb_frontend = dvb_attach(tda10086_attach, &tda10086_config, &budget->i2c_adap);
- if (budget->dvb_frontend) {
- if (dvb_attach(tda826x_attach, budget->dvb_frontend, 0x60, &budget->i2c_adap, 0) == NULL)
+ fe = dvb_attach(tda10086_attach, &tda10086_config, &budget->i2c_adap);
+ if (fe) {
+ budget->dvb_frontend = fe;
+ if (dvb_attach(tda826x_attach, fe, 0x60,
+ &budget->i2c_adap, 0) == NULL)
printk("%s: No tda826x found!\n", __func__);
- if (dvb_attach(lnbp21_attach, budget->dvb_frontend, &budget->i2c_adap, 0, 0) == NULL) {
+ if (dvb_attach(lnbp21_attach, fe,
+ &budget->i2c_adap, 0, 0) == NULL) {
printk("%s: No LNBP21 found!\n", __func__);
goto error_out;
}
break;
}
+ }
+ /* fall through */
case 0x101c: { /* TT S2-1600 */
- struct stv6110x_devctl *ctl;
+ const struct stv6110x_devctl *ctl;
saa7146_setgpio(budget->dev, 2, SAA7146_GPIO_OUTLO);
msleep(50);
saa7146_setgpio(budget->dev, 2, SAA7146_GPIO_OUTHI);
@@ -697,7 +711,7 @@ static void frontend_init(struct budget *budget)
break;
case 0x1020: { /* Omicom S2 */
- struct stv6110x_devctl *ctl;
+ const struct stv6110x_devctl *ctl;
saa7146_setgpio(budget->dev, 2, SAA7146_GPIO_OUTLO);
msleep(50);
saa7146_setgpio(budget->dev, 2, SAA7146_GPIO_OUTHI);
diff --git a/drivers/media/platform/Kconfig b/drivers/media/platform/Kconfig
index 8b89ebe16d94..201f5c296a95 100644
--- a/drivers/media/platform/Kconfig
+++ b/drivers/media/platform/Kconfig
@@ -54,15 +54,6 @@ config VIDEO_VIU
Say Y here if you want to enable VIU device on MPC5121e Rev2+.
In doubt, say N.
-config VIDEO_TIMBERDALE
- tristate "Support for timberdale Video In/LogiWIN"
- depends on VIDEO_V4L2 && I2C && VIDEO_V4L2_SUBDEV_API && HAS_DMA
- depends on (MFD_TIMBERDALE && TIMB_DMA) || COMPILE_TEST
- select VIDEO_ADV7180
- select VIDEOBUF_DMA_CONTIG
- ---help---
- Add support for the Video In peripherial of the timberdale FPGA.
-
config VIDEO_M32R_AR
tristate "AR devices"
depends on VIDEO_V4L2
@@ -120,6 +111,19 @@ source "drivers/media/platform/s5p-tv/Kconfig"
source "drivers/media/platform/am437x/Kconfig"
source "drivers/media/platform/xilinx/Kconfig"
+config VIDEO_TI_CAL
+ tristate "TI CAL (Camera Adaptation Layer) driver"
+ depends on VIDEO_DEV && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
+ depends on SOC_DRA7XX || COMPILE_TEST
+ depends on HAS_DMA
+ select VIDEOBUF2_DMA_CONTIG
+ default n
+ ---help---
+ Support for the TI CAL (Camera Adaptation Layer) block
+ found on DRA72X SoC.
+ In TI Technical Reference Manual this module is referred as
+ Camera Interface Subsystem (CAMSS).
+
endif # V4L_PLATFORM_DRIVERS
menuconfig V4L_MEM2MEM_DRIVERS
diff --git a/drivers/media/platform/Makefile b/drivers/media/platform/Makefile
index efa0295af87b..bbb7bd1eb268 100644
--- a/drivers/media/platform/Makefile
+++ b/drivers/media/platform/Makefile
@@ -2,7 +2,6 @@
# Makefile for the video capture/playback device drivers.
#
-obj-$(CONFIG_VIDEO_TIMBERDALE) += timblogiw.o
obj-$(CONFIG_VIDEO_M32R_AR_M64278) += arv.o
obj-$(CONFIG_VIDEO_VIA_CAMERA) += via-camera.o
@@ -18,6 +17,8 @@ obj-$(CONFIG_VIDEO_VIM2M) += vim2m.o
obj-$(CONFIG_VIDEO_TI_VPE) += ti-vpe/
+obj-$(CONFIG_VIDEO_TI_CAL) += ti-vpe/
+
obj-$(CONFIG_VIDEO_MX2_EMMAPRP) += mx2_emmaprp.o
obj-$(CONFIG_VIDEO_CODA) += coda/
diff --git a/drivers/media/platform/coda/coda-bit.c b/drivers/media/platform/coda/coda-bit.c
index 7d28899f89ce..b6625047250d 100644
--- a/drivers/media/platform/coda/coda-bit.c
+++ b/drivers/media/platform/coda/coda-bit.c
@@ -1342,7 +1342,7 @@ static void coda_finish_encode(struct coda_ctx *ctx)
/* Calculate bytesused field */
if (dst_buf->sequence == 0) {
- vb2_set_plane_payload(&dst_buf->vb2_buf, 0,
+ vb2_set_plane_payload(&dst_buf->vb2_buf, 0, wr_ptr - start_ptr +
ctx->vpu_header_size[0] +
ctx->vpu_header_size[1] +
ctx->vpu_header_size[2]);
@@ -1455,9 +1455,9 @@ static int coda_alloc_bitstream_buffer(struct coda_ctx *ctx,
return 0;
ctx->bitstream.size = roundup_pow_of_two(q_data->sizeimage * 2);
- ctx->bitstream.vaddr = dma_alloc_writecombine(
- &ctx->dev->plat_dev->dev, ctx->bitstream.size,
- &ctx->bitstream.paddr, GFP_KERNEL);
+ ctx->bitstream.vaddr = dma_alloc_wc(&ctx->dev->plat_dev->dev,
+ ctx->bitstream.size,
+ &ctx->bitstream.paddr, GFP_KERNEL);
if (!ctx->bitstream.vaddr) {
v4l2_err(&ctx->dev->v4l2_dev,
"failed to allocate bitstream ringbuffer");
@@ -1474,8 +1474,8 @@ static void coda_free_bitstream_buffer(struct coda_ctx *ctx)
if (ctx->bitstream.vaddr == NULL)
return;
- dma_free_writecombine(&ctx->dev->plat_dev->dev, ctx->bitstream.size,
- ctx->bitstream.vaddr, ctx->bitstream.paddr);
+ dma_free_wc(&ctx->dev->plat_dev->dev, ctx->bitstream.size,
+ ctx->bitstream.vaddr, ctx->bitstream.paddr);
ctx->bitstream.vaddr = NULL;
kfifo_init(&ctx->bitstream_fifo, NULL, 0);
}
diff --git a/drivers/media/platform/coda/coda-common.c b/drivers/media/platform/coda/coda-common.c
index 2d782ce94a67..133ab9f70f85 100644
--- a/drivers/media/platform/coda/coda-common.c
+++ b/drivers/media/platform/coda/coda-common.c
@@ -1950,16 +1950,76 @@ static int coda_register_device(struct coda_dev *dev, int i)
return video_register_device(vfd, VFL_TYPE_GRABBER, 0);
}
+static void coda_copy_firmware(struct coda_dev *dev, const u8 * const buf,
+ size_t size)
+{
+ u32 *src = (u32 *)buf;
+
+ /* Check if the firmware has a 16-byte Freescale header, skip it */
+ if (buf[0] == 'M' && buf[1] == 'X')
+ src += 4;
+ /*
+ * Check whether the firmware is in native order or pre-reordered for
+ * memory access. The first instruction opcode always is 0xe40e.
+ */
+ if (__le16_to_cpup((__le16 *)src) == 0xe40e) {
+ u32 *dst = dev->codebuf.vaddr;
+ int i;
+
+ /* Firmware in native order, reorder while copying */
+ if (dev->devtype->product == CODA_DX6) {
+ for (i = 0; i < (size - 16) / 4; i++)
+ dst[i] = (src[i] << 16) | (src[i] >> 16);
+ } else {
+ for (i = 0; i < (size - 16) / 4; i += 2) {
+ dst[i] = (src[i + 1] << 16) | (src[i + 1] >> 16);
+ dst[i + 1] = (src[i] << 16) | (src[i] >> 16);
+ }
+ }
+ } else {
+ /* Copy the already reordered firmware image */
+ memcpy(dev->codebuf.vaddr, src, size);
+ }
+}
+
+static void coda_fw_callback(const struct firmware *fw, void *context);
+
+static int coda_firmware_request(struct coda_dev *dev)
+{
+ char *fw = dev->devtype->firmware[dev->firmware];
+
+ dev_dbg(&dev->plat_dev->dev, "requesting firmware '%s' for %s\n", fw,
+ coda_product_name(dev->devtype->product));
+
+ return request_firmware_nowait(THIS_MODULE, true, fw,
+ &dev->plat_dev->dev, GFP_KERNEL, dev,
+ coda_fw_callback);
+}
+
static void coda_fw_callback(const struct firmware *fw, void *context)
{
struct coda_dev *dev = context;
struct platform_device *pdev = dev->plat_dev;
int i, ret;
- if (!fw) {
+ if (!fw && dev->firmware == 1) {
v4l2_err(&dev->v4l2_dev, "firmware request failed\n");
goto put_pm;
}
+ if (!fw) {
+ dev->firmware = 1;
+ coda_firmware_request(dev);
+ return;
+ }
+ if (dev->firmware == 1) {
+ /*
+ * Since we can't suppress warnings for failed asynchronous
+ * firmware requests, report that the fallback firmware was
+ * found.
+ */
+ dev_info(&pdev->dev, "Using fallback firmware %s\n",
+ dev->devtype->firmware[dev->firmware]);
+ }
/* allocate auxiliary per-device code buffer for the BIT processor */
ret = coda_alloc_aux_buf(dev, &dev->codebuf, fw->size, "codebuf",
@@ -1967,8 +2027,7 @@ static void coda_fw_callback(const struct firmware *fw, void *context)
if (ret < 0)
goto put_pm;
- /* Copy the whole firmware image to the code buffer */
- memcpy(dev->codebuf.vaddr, fw->data, fw->size);
+ coda_copy_firmware(dev, fw->data, fw->size);
release_firmware(fw);
ret = coda_hw_init(dev);
@@ -2019,17 +2078,6 @@ put_pm:
pm_runtime_put_sync(&pdev->dev);
}
-static int coda_firmware_request(struct coda_dev *dev)
-{
- char *fw = dev->devtype->firmware;
-
- dev_dbg(&dev->plat_dev->dev, "requesting firmware '%s' for %s\n", fw,
- coda_product_name(dev->devtype->product));
-
- return request_firmware_nowait(THIS_MODULE, true,
- fw, &dev->plat_dev->dev, GFP_KERNEL, dev, coda_fw_callback);
-}
-
enum coda_platform {
CODA_IMX27,
CODA_IMX53,
@@ -2039,7 +2087,10 @@ enum coda_platform {
static const struct coda_devtype coda_devdata[] = {
[CODA_IMX27] = {
- .firmware = "v4l-codadx6-imx27.bin",
+ .firmware = {
+ "vpu_fw_imx27_TO2.bin",
+ "v4l-codadx6-imx27.bin"
+ },
.product = CODA_DX6,
.codecs = codadx6_codecs,
.num_codecs = ARRAY_SIZE(codadx6_codecs),
@@ -2049,7 +2100,10 @@ static const struct coda_devtype coda_devdata[] = {
.iram_size = 0xb000,
},
[CODA_IMX53] = {
- .firmware = "v4l-coda7541-imx53.bin",
+ .firmware = {
+ "vpu_fw_imx53.bin",
+ "v4l-coda7541-imx53.bin"
+ },
.product = CODA_7541,
.codecs = coda7_codecs,
.num_codecs = ARRAY_SIZE(coda7_codecs),
@@ -2060,7 +2114,10 @@ static const struct coda_devtype coda_devdata[] = {
.iram_size = 0x14000,
},
[CODA_IMX6Q] = {
- .firmware = "v4l-coda960-imx6q.bin",
+ .firmware = {
+ "vpu_fw_imx6q.bin",
+ "v4l-coda960-imx6q.bin"
+ },
.product = CODA_960,
.codecs = coda9_codecs,
.num_codecs = ARRAY_SIZE(coda9_codecs),
@@ -2071,7 +2128,10 @@ static const struct coda_devtype coda_devdata[] = {
.iram_size = 0x21000,
},
[CODA_IMX6DL] = {
- .firmware = "v4l-coda960-imx6dl.bin",
+ .firmware = {
+ "vpu_fw_imx6d.bin",
+ "v4l-coda960-imx6dl.bin"
+ },
.product = CODA_960,
.codecs = coda9_codecs,
.num_codecs = ARRAY_SIZE(coda9_codecs),
@@ -2118,14 +2178,12 @@ static int coda_probe(struct platform_device *pdev)
pdev_id = of_id ? of_id->data : platform_get_device_id(pdev);
- if (of_id) {
+ if (of_id)
dev->devtype = of_id->data;
- } else if (pdev_id) {
+ else if (pdev_id)
dev->devtype = &coda_devdata[pdev_id->driver_data];
- } else {
- ret = -EINVAL;
- goto err_v4l2_register;
- }
+ else
+ return -EINVAL;
spin_lock_init(&dev->irqlock);
INIT_LIST_HEAD(&dev->instances);
diff --git a/drivers/media/platform/coda/coda.h b/drivers/media/platform/coda/coda.h
index d08e9843e9f2..8f2c71e06966 100644
--- a/drivers/media/platform/coda/coda.h
+++ b/drivers/media/platform/coda/coda.h
@@ -50,7 +50,7 @@ enum coda_product {
struct coda_video_device;
struct coda_devtype {
- char *firmware;
+ char *firmware[2];
enum coda_product product;
const struct coda_codec *codecs;
unsigned int num_codecs;
@@ -74,6 +74,7 @@ struct coda_dev {
struct video_device vfd[5];
struct platform_device *plat_dev;
const struct coda_devtype *devtype;
+ int firmware;
void __iomem *regs_base;
struct clk *clk_per;
diff --git a/drivers/media/platform/davinci/dm644x_ccdc.c b/drivers/media/platform/davinci/dm644x_ccdc.c
index ffbefdff6b5e..6fba32bec974 100644
--- a/drivers/media/platform/davinci/dm644x_ccdc.c
+++ b/drivers/media/platform/davinci/dm644x_ccdc.c
@@ -261,7 +261,7 @@ static int ccdc_update_raw_params(struct ccdc_config_params_raw *raw_params)
*/
if (raw_params->fault_pxl.fp_num != config_params->fault_pxl.fp_num) {
if (fpc_physaddr != NULL) {
- free_pages((unsigned long)fpc_physaddr,
+ free_pages((unsigned long)fpc_virtaddr,
get_order
(config_params->fault_pxl.fp_num *
FP_NUM_BYTES));
diff --git a/drivers/media/platform/exynos-gsc/gsc-m2m.c b/drivers/media/platform/exynos-gsc/gsc-m2m.c
index 93782f15b825..a600e32e2543 100644
--- a/drivers/media/platform/exynos-gsc/gsc-m2m.c
+++ b/drivers/media/platform/exynos-gsc/gsc-m2m.c
@@ -700,7 +700,7 @@ static unsigned int gsc_m2m_poll(struct file *file,
{
struct gsc_ctx *ctx = fh_to_ctx(file->private_data);
struct gsc_dev *gsc = ctx->gsc_dev;
- int ret;
+ unsigned int ret;
if (mutex_lock_interruptible(&gsc->lock))
return -ERESTARTSYS;
diff --git a/drivers/media/platform/exynos4-is/fimc-lite.c b/drivers/media/platform/exynos4-is/fimc-lite.c
index e85649147dc8..dc1b929f7a33 100644
--- a/drivers/media/platform/exynos4-is/fimc-lite.c
+++ b/drivers/media/platform/exynos4-is/fimc-lite.c
@@ -992,10 +992,6 @@ static int fimc_lite_link_setup(struct media_entity *entity,
switch (local->index) {
case FLITE_SD_PAD_SINK:
- if (!is_media_entity_v4l2_subdev(remote->entity)) {
- ret = -EINVAL;
- break;
- }
if (flags & MEDIA_LNK_FL_ENABLED) {
if (fimc->source_subdev_grp_id == 0)
fimc->source_subdev_grp_id = sd->grp_id;
@@ -1010,19 +1006,15 @@ static int fimc_lite_link_setup(struct media_entity *entity,
case FLITE_SD_PAD_SOURCE_DMA:
if (!(flags & MEDIA_LNK_FL_ENABLED))
atomic_set(&fimc->out_path, FIMC_IO_NONE);
- else if (is_media_entity_v4l2_io(remote->entity))
- atomic_set(&fimc->out_path, FIMC_IO_DMA);
else
- ret = -EINVAL;
+ atomic_set(&fimc->out_path, FIMC_IO_DMA);
break;
case FLITE_SD_PAD_SOURCE_ISP:
if (!(flags & MEDIA_LNK_FL_ENABLED))
atomic_set(&fimc->out_path, FIMC_IO_NONE);
- else if (is_media_entity_v4l2_subdev(remote->entity))
- atomic_set(&fimc->out_path, FIMC_IO_ISP);
else
- ret = -EINVAL;
+ atomic_set(&fimc->out_path, FIMC_IO_ISP);
break;
default:
diff --git a/drivers/media/platform/exynos4-is/media-dev.c b/drivers/media/platform/exynos4-is/media-dev.c
index e79ddbb1e14f..4f494acd8150 100644
--- a/drivers/media/platform/exynos4-is/media-dev.c
+++ b/drivers/media/platform/exynos4-is/media-dev.c
@@ -389,13 +389,19 @@ static int fimc_md_parse_port_node(struct fimc_md *fmd,
struct fimc_source_info *pd = &fmd->sensor[index].pdata;
struct device_node *rem, *ep, *np;
struct v4l2_of_endpoint endpoint;
+ int ret;
/* Assume here a port node can have only one endpoint node. */
ep = of_get_next_child(port, NULL);
if (!ep)
return 0;
- v4l2_of_parse_endpoint(ep, &endpoint);
+ ret = v4l2_of_parse_endpoint(ep, &endpoint);
+ if (ret) {
+ of_node_put(ep);
+ return ret;
+ }
+
if (WARN_ON(endpoint.base.port == 0) || index >= FIMC_MAX_SENSORS)
return -EINVAL;
@@ -486,8 +492,10 @@ static int fimc_md_register_sensor_entities(struct fimc_md *fmd)
continue;
ret = fimc_md_parse_port_node(fmd, port, index);
- if (ret < 0)
+ if (ret < 0) {
+ of_node_put(node);
goto rpm_put;
+ }
index++;
}
@@ -498,8 +506,10 @@ static int fimc_md_register_sensor_entities(struct fimc_md *fmd)
for_each_child_of_node(ports, node) {
ret = fimc_md_parse_port_node(fmd, node, index);
- if (ret < 0)
+ if (ret < 0) {
+ of_node_put(node);
break;
+ }
index++;
}
rpm_put:
@@ -707,8 +717,10 @@ static int fimc_md_register_platform_entities(struct fimc_md *fmd,
ret = fimc_md_register_platform_entity(fmd, pdev,
plat_entity);
put_device(&pdev->dev);
- if (ret < 0)
+ if (ret < 0) {
+ of_node_put(node);
break;
+ }
}
return ret;
@@ -1434,22 +1446,13 @@ static int fimc_md_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, fmd);
- /* Protect the media graph while we're registering entities */
- mutex_lock(&fmd->media_dev.graph_mutex);
-
ret = fimc_md_register_platform_entities(fmd, dev->of_node);
- if (ret) {
- mutex_unlock(&fmd->media_dev.graph_mutex);
+ if (ret)
goto err_clk;
- }
ret = fimc_md_register_sensor_entities(fmd);
- if (ret) {
- mutex_unlock(&fmd->media_dev.graph_mutex);
+ if (ret)
goto err_m_ent;
- }
-
- mutex_unlock(&fmd->media_dev.graph_mutex);
ret = device_create_file(&pdev->dev, &dev_attr_subdev_conf_mode);
if (ret)
diff --git a/drivers/media/platform/exynos4-is/mipi-csis.c b/drivers/media/platform/exynos4-is/mipi-csis.c
index ac5e50e595be..bd5c46c3d4b7 100644
--- a/drivers/media/platform/exynos4-is/mipi-csis.c
+++ b/drivers/media/platform/exynos4-is/mipi-csis.c
@@ -736,6 +736,7 @@ static int s5pcsis_parse_dt(struct platform_device *pdev,
{
struct device_node *node = pdev->dev.of_node;
struct v4l2_of_endpoint endpoint;
+ int ret;
if (of_property_read_u32(node, "clock-frequency",
&state->clk_frequency))
@@ -751,7 +752,9 @@ static int s5pcsis_parse_dt(struct platform_device *pdev,
return -EINVAL;
}
/* Get port node and validate MIPI-CSI channel id. */
- v4l2_of_parse_endpoint(node, &endpoint);
+ ret = v4l2_of_parse_endpoint(node, &endpoint);
+ if (ret)
+ goto err;
state->index = endpoint.base.port - FIMC_INPUT_MIPI_CSI2_0;
if (state->index >= CSIS_MAX_ENTITIES)
@@ -764,9 +767,10 @@ static int s5pcsis_parse_dt(struct platform_device *pdev,
"samsung,csis-wclk");
state->num_lanes = endpoint.bus.mipi_csi2.num_data_lanes;
- of_node_put(node);
- return 0;
+err:
+ of_node_put(node);
+ return ret;
}
static int s5pcsis_pm_resume(struct device *dev, bool runtime);
diff --git a/drivers/media/platform/omap3isp/isp.c b/drivers/media/platform/omap3isp/isp.c
index 0bcfa553c1aa..5d54e2c6c16b 100644
--- a/drivers/media/platform/omap3isp/isp.c
+++ b/drivers/media/platform/omap3isp/isp.c
@@ -64,6 +64,7 @@
#include <media/v4l2-common.h>
#include <media/v4l2-device.h>
+#include <media/v4l2-mc.h>
#include <media/v4l2-of.h>
#include "isp.h"
@@ -449,7 +450,7 @@ void omap3isp_configure_bridge(struct isp_device *isp,
case CCDC_INPUT_PARALLEL:
ispctrl_val |= ISPCTRL_PAR_SER_CLK_SEL_PARALLEL;
ispctrl_val |= parcfg->clk_pol << ISPCTRL_PAR_CLK_POL_SHIFT;
- shift += parcfg->data_lane_shift * 2;
+ shift += parcfg->data_lane_shift;
break;
case CCDC_INPUT_CSI2A:
@@ -657,216 +658,6 @@ static irqreturn_t isp_isr(int irq, void *_isp)
}
/* -----------------------------------------------------------------------------
- * Pipeline power management
- *
- * Entities must be powered up when part of a pipeline that contains at least
- * one open video device node.
- *
- * To achieve this use the entity use_count field to track the number of users.
- * For entities corresponding to video device nodes the use_count field stores
- * the users count of the node. For entities corresponding to subdevs the
- * use_count field stores the total number of users of all video device nodes
- * in the pipeline.
- *
- * The omap3isp_pipeline_pm_use() function must be called in the open() and
- * close() handlers of video device nodes. It increments or decrements the use
- * count of all subdev entities in the pipeline.
- *
- * To react to link management on powered pipelines, the link setup notification
- * callback updates the use count of all entities in the source and sink sides
- * of the link.
- */
-
-/*
- * isp_pipeline_pm_use_count - Count the number of users of a pipeline
- * @entity: The entity
- *
- * Return the total number of users of all video device nodes in the pipeline.
- */
-static int isp_pipeline_pm_use_count(struct media_entity *entity,
- struct media_entity_graph *graph)
-{
- int use = 0;
-
- media_entity_graph_walk_start(graph, entity);
-
- while ((entity = media_entity_graph_walk_next(graph))) {
- if (is_media_entity_v4l2_io(entity))
- use += entity->use_count;
- }
-
- return use;
-}
-
-/*
- * isp_pipeline_pm_power_one - Apply power change to an entity
- * @entity: The entity
- * @change: Use count change
- *
- * Change the entity use count by @change. If the entity is a subdev update its
- * power state by calling the core::s_power operation when the use count goes
- * from 0 to != 0 or from != 0 to 0.
- *
- * Return 0 on success or a negative error code on failure.
- */
-static int isp_pipeline_pm_power_one(struct media_entity *entity, int change)
-{
- struct v4l2_subdev *subdev;
- int ret;
-
- subdev = is_media_entity_v4l2_subdev(entity)
- ? media_entity_to_v4l2_subdev(entity) : NULL;
-
- if (entity->use_count == 0 && change > 0 && subdev != NULL) {
- ret = v4l2_subdev_call(subdev, core, s_power, 1);
- if (ret < 0 && ret != -ENOIOCTLCMD)
- return ret;
- }
-
- entity->use_count += change;
- WARN_ON(entity->use_count < 0);
-
- if (entity->use_count == 0 && change < 0 && subdev != NULL)
- v4l2_subdev_call(subdev, core, s_power, 0);
-
- return 0;
-}
-
-/*
- * isp_pipeline_pm_power - Apply power change to all entities in a pipeline
- * @entity: The entity
- * @change: Use count change
- *
- * Walk the pipeline to update the use count and the power state of all non-node
- * entities.
- *
- * Return 0 on success or a negative error code on failure.
- */
-static int isp_pipeline_pm_power(struct media_entity *entity, int change,
- struct media_entity_graph *graph)
-{
- struct media_entity *first = entity;
- int ret = 0;
-
- if (!change)
- return 0;
-
- media_entity_graph_walk_start(graph, entity);
-
- while (!ret && (entity = media_entity_graph_walk_next(graph)))
- if (is_media_entity_v4l2_subdev(entity))
- ret = isp_pipeline_pm_power_one(entity, change);
-
- if (!ret)
- return ret;
-
- media_entity_graph_walk_start(graph, first);
-
- while ((first = media_entity_graph_walk_next(graph))
- && first != entity)
- if (is_media_entity_v4l2_subdev(first))
- isp_pipeline_pm_power_one(first, -change);
-
- return ret;
-}
-
-/*
- * omap3isp_pipeline_pm_use - Update the use count of an entity
- * @entity: The entity
- * @use: Use (1) or stop using (0) the entity
- *
- * Update the use count of all entities in the pipeline and power entities on or
- * off accordingly.
- *
- * Return 0 on success or a negative error code on failure. Powering entities
- * off is assumed to never fail. No failure can occur when the use parameter is
- * set to 0.
- */
-int omap3isp_pipeline_pm_use(struct media_entity *entity, int use,
- struct media_entity_graph *graph)
-{
- int change = use ? 1 : -1;
- int ret;
-
- mutex_lock(&entity->graph_obj.mdev->graph_mutex);
-
- /* Apply use count to node. */
- entity->use_count += change;
- WARN_ON(entity->use_count < 0);
-
- /* Apply power change to connected non-nodes. */
- ret = isp_pipeline_pm_power(entity, change, graph);
- if (ret < 0)
- entity->use_count -= change;
-
- mutex_unlock(&entity->graph_obj.mdev->graph_mutex);
-
- return ret;
-}
-
-/*
- * isp_pipeline_link_notify - Link management notification callback
- * @link: The link
- * @flags: New link flags that will be applied
- * @notification: The link's state change notification type (MEDIA_DEV_NOTIFY_*)
- *
- * React to link management on powered pipelines by updating the use count of
- * all entities in the source and sink sides of the link. Entities are powered
- * on or off accordingly.
- *
- * Return 0 on success or a negative error code on failure. Powering entities
- * off is assumed to never fail. This function will not fail for disconnection
- * events.
- */
-static int isp_pipeline_link_notify(struct media_link *link, u32 flags,
- unsigned int notification)
-{
- struct media_entity_graph *graph =
- &container_of(link->graph_obj.mdev, struct isp_device,
- media_dev)->pm_count_graph;
- struct media_entity *source = link->source->entity;
- struct media_entity *sink = link->sink->entity;
- int source_use;
- int sink_use;
- int ret = 0;
-
- if (notification == MEDIA_DEV_NOTIFY_PRE_LINK_CH) {
- ret = media_entity_graph_walk_init(graph,
- link->graph_obj.mdev);
- if (ret)
- return ret;
- }
-
- source_use = isp_pipeline_pm_use_count(source, graph);
- sink_use = isp_pipeline_pm_use_count(sink, graph);
-
- if (notification == MEDIA_DEV_NOTIFY_POST_LINK_CH &&
- !(flags & MEDIA_LNK_FL_ENABLED)) {
- /* Powering off entities is assumed to never fail. */
- isp_pipeline_pm_power(source, -sink_use, graph);
- isp_pipeline_pm_power(sink, -source_use, graph);
- return 0;
- }
-
- if (notification == MEDIA_DEV_NOTIFY_PRE_LINK_CH &&
- (flags & MEDIA_LNK_FL_ENABLED)) {
-
- ret = isp_pipeline_pm_power(source, sink_use, graph);
- if (ret < 0)
- return ret;
-
- ret = isp_pipeline_pm_power(sink, source_use, graph);
- if (ret < 0)
- isp_pipeline_pm_power(source, -sink_use, graph);
- }
-
- if (notification == MEDIA_DEV_NOTIFY_POST_LINK_CH)
- media_entity_graph_walk_cleanup(graph);
-
- return ret;
-}
-
-/* -----------------------------------------------------------------------------
* Pipeline stream management
*/
@@ -1889,7 +1680,7 @@ static int isp_register_entities(struct isp_device *isp)
strlcpy(isp->media_dev.model, "TI OMAP3 ISP",
sizeof(isp->media_dev.model));
isp->media_dev.hw_revision = isp->revision;
- isp->media_dev.link_notify = isp_pipeline_link_notify;
+ isp->media_dev.link_notify = v4l2_pipeline_link_notify;
media_device_init(&isp->media_dev);
isp->v4l2_dev.mdev = &isp->media_dev;
@@ -2235,8 +2026,11 @@ static int isp_of_parse_node(struct device *dev, struct device_node *node,
struct isp_bus_cfg *buscfg = &isd->bus;
struct v4l2_of_endpoint vep;
unsigned int i;
+ int ret;
- v4l2_of_parse_endpoint(node, &vep);
+ ret = v4l2_of_parse_endpoint(node, &vep);
+ if (ret)
+ return ret;
dev_dbg(dev, "parsing endpoint %s, interface %u\n", node->full_name,
vep.base.port);
@@ -2528,12 +2322,13 @@ static int isp_probe(struct platform_device *pdev)
}
/* Interrupt */
- isp->irq_num = platform_get_irq(pdev, 0);
- if (isp->irq_num <= 0) {
+ ret = platform_get_irq(pdev, 0);
+ if (ret <= 0) {
dev_err(isp->dev, "No IRQ resource\n");
ret = -ENODEV;
goto error_iommu;
}
+ isp->irq_num = ret;
if (devm_request_irq(isp->dev, isp->irq_num, isp_isr, IRQF_SHARED,
"OMAP3 ISP", isp)) {
@@ -2599,6 +2394,7 @@ static const struct of_device_id omap3isp_of_table[] = {
{ .compatible = "ti,omap3-isp" },
{ },
};
+MODULE_DEVICE_TABLE(of, omap3isp_of_table);
static struct platform_driver omap3isp_driver = {
.probe = isp_probe,
diff --git a/drivers/media/platform/omap3isp/isp.h b/drivers/media/platform/omap3isp/isp.h
index 49b7f71ac968..7e6f6638433b 100644
--- a/drivers/media/platform/omap3isp/isp.h
+++ b/drivers/media/platform/omap3isp/isp.h
@@ -177,7 +177,6 @@ struct isp_device {
struct v4l2_device v4l2_dev;
struct v4l2_async_notifier notifier;
struct media_device media_dev;
- struct media_entity_graph pm_count_graph;
struct device *dev;
u32 revision;
@@ -267,9 +266,6 @@ void omap3isp_subclk_enable(struct isp_device *isp,
void omap3isp_subclk_disable(struct isp_device *isp,
enum isp_subclk_resource res);
-int omap3isp_pipeline_pm_use(struct media_entity *entity, int use,
- struct media_entity_graph *graph);
-
int omap3isp_register_entities(struct platform_device *pdev,
struct v4l2_device *v4l2_dev);
void omap3isp_unregister_entities(struct platform_device *pdev);
diff --git a/drivers/media/platform/omap3isp/ispccdc.c b/drivers/media/platform/omap3isp/ispccdc.c
index bb3974c98e37..882310eb45cc 100644
--- a/drivers/media/platform/omap3isp/ispccdc.c
+++ b/drivers/media/platform/omap3isp/ispccdc.c
@@ -2421,7 +2421,7 @@ static int ccdc_link_validate(struct v4l2_subdev *sd,
&((struct isp_bus_cfg *)
media_entity_to_v4l2_subdev(link->source->entity)
->host_priv)->bus.parallel;
- parallel_shift = parcfg->data_lane_shift * 2;
+ parallel_shift = parcfg->data_lane_shift;
} else {
parallel_shift = 0;
}
diff --git a/drivers/media/platform/omap3isp/isppreview.c b/drivers/media/platform/omap3isp/isppreview.c
index 84a96670e2e7..ac30a0f83780 100644
--- a/drivers/media/platform/omap3isp/isppreview.c
+++ b/drivers/media/platform/omap3isp/isppreview.c
@@ -1480,13 +1480,6 @@ static void preview_isr_buffer(struct isp_prev_device *prev)
struct isp_buffer *buffer;
int restart = 0;
- if (prev->input == PREVIEW_INPUT_MEMORY) {
- buffer = omap3isp_video_buffer_next(&prev->video_in);
- if (buffer != NULL)
- preview_set_inaddr(prev, buffer->dma);
- pipe->state |= ISP_PIPELINE_IDLE_INPUT;
- }
-
if (prev->output & PREVIEW_OUTPUT_MEMORY) {
buffer = omap3isp_video_buffer_next(&prev->video_out);
if (buffer != NULL) {
@@ -1496,6 +1489,13 @@ static void preview_isr_buffer(struct isp_prev_device *prev)
pipe->state |= ISP_PIPELINE_IDLE_OUTPUT;
}
+ if (prev->input == PREVIEW_INPUT_MEMORY) {
+ buffer = omap3isp_video_buffer_next(&prev->video_in);
+ if (buffer != NULL)
+ preview_set_inaddr(prev, buffer->dma);
+ pipe->state |= ISP_PIPELINE_IDLE_INPUT;
+ }
+
switch (prev->state) {
case ISP_PIPELINE_STREAM_SINGLESHOT:
if (isp_pipeline_ready(pipe))
diff --git a/drivers/media/platform/omap3isp/ispvideo.c b/drivers/media/platform/omap3isp/ispvideo.c
index 994dfc0813f6..ac76d2901501 100644
--- a/drivers/media/platform/omap3isp/ispvideo.c
+++ b/drivers/media/platform/omap3isp/ispvideo.c
@@ -22,8 +22,10 @@
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
+
#include <media/v4l2-dev.h>
#include <media/v4l2-ioctl.h>
+#include <media/v4l2-mc.h>
#include <media/videobuf2-dma-contig.h>
#include "ispvideo.h"
@@ -434,10 +436,68 @@ static void isp_video_buffer_queue(struct vb2_buffer *buf)
}
}
+/*
+ * omap3isp_video_return_buffers - Return all queued buffers to videobuf2
+ * @video: ISP video object
+ * @state: new state for the returned buffers
+ *
+ * Return all buffers queued on the video node to videobuf2 in the given state.
+ * The buffer state should be VB2_BUF_STATE_QUEUED if called due to an error
+ * when starting the stream, or VB2_BUF_STATE_ERROR otherwise.
+ *
+ * The function must be called with the video irqlock held.
+ */
+static void omap3isp_video_return_buffers(struct isp_video *video,
+ enum vb2_buffer_state state)
+{
+ while (!list_empty(&video->dmaqueue)) {
+ struct isp_buffer *buf;
+
+ buf = list_first_entry(&video->dmaqueue,
+ struct isp_buffer, irqlist);
+ list_del(&buf->irqlist);
+ vb2_buffer_done(&buf->vb.vb2_buf, state);
+ }
+}
+
+static int isp_video_start_streaming(struct vb2_queue *queue,
+ unsigned int count)
+{
+ struct isp_video_fh *vfh = vb2_get_drv_priv(queue);
+ struct isp_video *video = vfh->video;
+ struct isp_pipeline *pipe = to_isp_pipeline(&video->video.entity);
+ unsigned long flags;
+ int ret;
+
+ /* In sensor-to-memory mode, the stream can be started synchronously
+ * to the stream on command. In memory-to-memory mode, it will be
+ * started when buffers are queued on both the input and output.
+ */
+ if (pipe->input)
+ return 0;
+
+ ret = omap3isp_pipeline_set_stream(pipe,
+ ISP_PIPELINE_STREAM_CONTINUOUS);
+ if (ret < 0) {
+ spin_lock_irqsave(&video->irqlock, flags);
+ omap3isp_video_return_buffers(video, VB2_BUF_STATE_QUEUED);
+ spin_unlock_irqrestore(&video->irqlock, flags);
+ return ret;
+ }
+
+ spin_lock_irqsave(&video->irqlock, flags);
+ if (list_empty(&video->dmaqueue))
+ video->dmaqueue_flags |= ISP_VIDEO_DMAQUEUE_UNDERRUN;
+ spin_unlock_irqrestore(&video->irqlock, flags);
+
+ return 0;
+}
+
static const struct vb2_ops isp_video_queue_ops = {
.queue_setup = isp_video_queue_setup,
.buf_prepare = isp_video_buffer_prepare,
.buf_queue = isp_video_buffer_queue,
+ .start_streaming = isp_video_start_streaming,
};
/*
@@ -459,7 +519,7 @@ static const struct vb2_ops isp_video_queue_ops = {
struct isp_buffer *omap3isp_video_buffer_next(struct isp_video *video)
{
struct isp_pipeline *pipe = to_isp_pipeline(&video->video.entity);
- enum isp_pipeline_state state;
+ enum vb2_buffer_state vb_state;
struct isp_buffer *buf;
unsigned long flags;
@@ -495,17 +555,19 @@ struct isp_buffer *omap3isp_video_buffer_next(struct isp_video *video)
/* Report pipeline errors to userspace on the capture device side. */
if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE && pipe->error) {
- state = VB2_BUF_STATE_ERROR;
+ vb_state = VB2_BUF_STATE_ERROR;
pipe->error = false;
} else {
- state = VB2_BUF_STATE_DONE;
+ vb_state = VB2_BUF_STATE_DONE;
}
- vb2_buffer_done(&buf->vb.vb2_buf, state);
+ vb2_buffer_done(&buf->vb.vb2_buf, vb_state);
spin_lock_irqsave(&video->irqlock, flags);
if (list_empty(&video->dmaqueue)) {
+ enum isp_pipeline_state state;
+
spin_unlock_irqrestore(&video->irqlock, flags);
if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
@@ -541,26 +603,16 @@ struct isp_buffer *omap3isp_video_buffer_next(struct isp_video *video)
* omap3isp_video_cancel_stream - Cancel stream on a video node
* @video: ISP video object
*
- * Cancelling a stream mark all buffers on the video node as erroneous and makes
- * sure no new buffer can be queued.
+ * Cancelling a stream returns all buffers queued on the video node to videobuf2
+ * in the erroneous state and makes sure no new buffer can be queued.
*/
void omap3isp_video_cancel_stream(struct isp_video *video)
{
unsigned long flags;
spin_lock_irqsave(&video->irqlock, flags);
-
- while (!list_empty(&video->dmaqueue)) {
- struct isp_buffer *buf;
-
- buf = list_first_entry(&video->dmaqueue,
- struct isp_buffer, irqlist);
- list_del(&buf->irqlist);
- vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
- }
-
+ omap3isp_video_return_buffers(video, VB2_BUF_STATE_ERROR);
video->error = true;
-
spin_unlock_irqrestore(&video->irqlock, flags);
}
@@ -1087,29 +1139,10 @@ isp_video_streamon(struct file *file, void *fh, enum v4l2_buf_type type)
if (ret < 0)
goto err_check_format;
- /* In sensor-to-memory mode, the stream can be started synchronously
- * to the stream on command. In memory-to-memory mode, it will be
- * started when buffers are queued on both the input and output.
- */
- if (pipe->input == NULL) {
- ret = omap3isp_pipeline_set_stream(pipe,
- ISP_PIPELINE_STREAM_CONTINUOUS);
- if (ret < 0)
- goto err_set_stream;
- spin_lock_irqsave(&video->irqlock, flags);
- if (list_empty(&video->dmaqueue))
- video->dmaqueue_flags |= ISP_VIDEO_DMAQUEUE_UNDERRUN;
- spin_unlock_irqrestore(&video->irqlock, flags);
- }
-
mutex_unlock(&video->stream_lock);
return 0;
-err_set_stream:
- mutex_lock(&video->queue_lock);
- vb2_streamoff(&vfh->queue, type);
- mutex_unlock(&video->queue_lock);
err_check_format:
media_entity_pipeline_stop(&video->video.entity);
err_pipeline_start:
@@ -1261,12 +1294,7 @@ static int isp_video_open(struct file *file)
goto done;
}
- ret = media_entity_graph_walk_init(&handle->graph,
- &video->isp->media_dev);
- if (ret)
- goto done;
-
- ret = omap3isp_pipeline_pm_use(&video->video.entity, 1, &handle->graph);
+ ret = v4l2_pipeline_pm_use(&video->video.entity, 1);
if (ret < 0) {
omap3isp_put(video->isp);
goto done;
@@ -1297,7 +1325,6 @@ static int isp_video_open(struct file *file)
done:
if (ret < 0) {
v4l2_fh_del(&handle->vfh);
- media_entity_graph_walk_cleanup(&handle->graph);
kfree(handle);
}
@@ -1317,8 +1344,7 @@ static int isp_video_release(struct file *file)
vb2_queue_release(&handle->queue);
mutex_unlock(&video->queue_lock);
- omap3isp_pipeline_pm_use(&video->video.entity, 0, &handle->graph);
- media_entity_graph_walk_cleanup(&handle->graph);
+ v4l2_pipeline_pm_use(&video->video.entity, 0);
/* Release the file handle. */
v4l2_fh_del(vfh);
diff --git a/drivers/media/platform/omap3isp/ispvideo.h b/drivers/media/platform/omap3isp/ispvideo.h
index 156429878d64..6a48d5879c56 100644
--- a/drivers/media/platform/omap3isp/ispvideo.h
+++ b/drivers/media/platform/omap3isp/ispvideo.h
@@ -189,7 +189,6 @@ struct isp_video_fh {
struct vb2_queue queue;
struct v4l2_format format;
struct v4l2_fract timeperframe;
- struct media_entity_graph graph;
};
#define to_isp_video_fh(fh) container_of(fh, struct isp_video_fh, vfh)
diff --git a/drivers/media/platform/omap3isp/omap3isp.h b/drivers/media/platform/omap3isp/omap3isp.h
index 190e259a6a2d..443e8f7673e2 100644
--- a/drivers/media/platform/omap3isp/omap3isp.h
+++ b/drivers/media/platform/omap3isp/omap3isp.h
@@ -33,9 +33,9 @@ enum isp_interface_type {
* struct isp_parallel_cfg - Parallel interface configuration
* @data_lane_shift: Data lane shifter
* 0 - CAMEXT[13:0] -> CAM[13:0]
- * 1 - CAMEXT[13:2] -> CAM[11:0]
- * 2 - CAMEXT[13:4] -> CAM[9:0]
- * 3 - CAMEXT[13:6] -> CAM[7:0]
+ * 2 - CAMEXT[13:2] -> CAM[11:0]
+ * 4 - CAMEXT[13:4] -> CAM[9:0]
+ * 6 - CAMEXT[13:6] -> CAM[7:0]
* @clk_pol: Pixel clock polarity
* 0 - Sample on rising edge, 1 - Sample on falling edge
* @hs_pol: Horizontal synchronization polarity
@@ -48,7 +48,7 @@ enum isp_interface_type {
* 0 - Normal, 1 - One's complement
*/
struct isp_parallel_cfg {
- unsigned int data_lane_shift:2;
+ unsigned int data_lane_shift:3;
unsigned int clk_pol:1;
unsigned int hs_pol:1;
unsigned int vs_pol:1;
diff --git a/drivers/media/platform/rcar_jpu.c b/drivers/media/platform/rcar_jpu.c
index 485f5259acb0..552789a69c86 100644
--- a/drivers/media/platform/rcar_jpu.c
+++ b/drivers/media/platform/rcar_jpu.c
@@ -1613,6 +1613,7 @@ static const struct of_device_id jpu_dt_ids[] = {
{ .compatible = "renesas,jpu-r8a7791" }, /* M2-W */
{ .compatible = "renesas,jpu-r8a7792" }, /* V2H */
{ .compatible = "renesas,jpu-r8a7793" }, /* M2-N */
+ { .compatible = "renesas,rcar-gen2-jpu" },
{ },
};
MODULE_DEVICE_TABLE(of, jpu_dt_ids);
diff --git a/drivers/media/platform/s3c-camif/camif-core.c b/drivers/media/platform/s3c-camif/camif-core.c
index 0b44b9accf50..af237af204e2 100644
--- a/drivers/media/platform/s3c-camif/camif-core.c
+++ b/drivers/media/platform/s3c-camif/camif-core.c
@@ -493,21 +493,17 @@ static int s3c_camif_probe(struct platform_device *pdev)
if (ret < 0)
goto err_sens;
- mutex_lock(&camif->media_dev.graph_mutex);
-
ret = v4l2_device_register_subdev_nodes(&camif->v4l2_dev);
if (ret < 0)
- goto err_unlock;
+ goto err_sens;
ret = camif_register_video_nodes(camif);
if (ret < 0)
- goto err_unlock;
+ goto err_sens;
ret = camif_create_media_links(camif);
if (ret < 0)
- goto err_unlock;
-
- mutex_unlock(&camif->media_dev.graph_mutex);
+ goto err_sens;
ret = media_device_register(&camif->media_dev);
if (ret < 0)
@@ -516,8 +512,6 @@ static int s3c_camif_probe(struct platform_device *pdev)
pm_runtime_put(dev);
return 0;
-err_unlock:
- mutex_unlock(&camif->media_dev.graph_mutex);
err_sens:
v4l2_device_unregister(&camif->v4l2_dev);
media_device_unregister(&camif->media_dev);
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c b/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c
index 0434f02a7175..034b5c1d35a1 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c
@@ -212,6 +212,14 @@ static struct mfc_control controls[] = {
.menu_skip_mask = 0,
},
{
+ .id = V4L2_CID_MPEG_VIDEO_FORCE_KEY_FRAME,
+ .type = V4L2_CTRL_TYPE_BUTTON,
+ .minimum = 0,
+ .maximum = 0,
+ .step = 0,
+ .default_value = 0,
+ },
+ {
.id = V4L2_CID_MPEG_VIDEO_VBV_SIZE,
.type = V4L2_CTRL_TYPE_INTEGER,
.minimum = 0,
@@ -1423,6 +1431,10 @@ static int s5p_mfc_enc_s_ctrl(struct v4l2_ctrl *ctrl)
case V4L2_CID_MPEG_MFC51_VIDEO_FORCE_FRAME_TYPE:
ctx->force_frame_type = ctrl->val;
break;
+ case V4L2_CID_MPEG_VIDEO_FORCE_KEY_FRAME:
+ ctx->force_frame_type =
+ V4L2_MPEG_MFC51_VIDEO_FORCE_FRAME_TYPE_I_FRAME;
+ break;
case V4L2_CID_MPEG_VIDEO_VBV_SIZE:
p->vbv_size = ctrl->val;
break;
diff --git a/drivers/media/platform/soc_camera/Kconfig b/drivers/media/platform/soc_camera/Kconfig
index f2776cd415ca..355298989dd8 100644
--- a/drivers/media/platform/soc_camera/Kconfig
+++ b/drivers/media/platform/soc_camera/Kconfig
@@ -17,19 +17,11 @@ config SOC_CAMERA_PLATFORM
help
This is a generic SoC camera platform driver, useful for testing
-config VIDEO_MX3
- tristate "i.MX3x Camera Sensor Interface driver"
- depends on VIDEO_DEV && MX3_IPU && SOC_CAMERA
- depends on MX3_IPU || COMPILE_TEST
- depends on HAS_DMA
- select VIDEOBUF2_DMA_CONTIG
- ---help---
- This is a v4l2 driver for the i.MX3x Camera Sensor Interface
-
config VIDEO_PXA27x
tristate "PXA27x Quick Capture Interface driver"
depends on VIDEO_DEV && PXA27x && SOC_CAMERA
select VIDEOBUF_DMA_SG
+ select SG_SPLIT
---help---
This is a v4l2 driver for the PXA27x Quick Capture Interface
@@ -60,25 +52,6 @@ config VIDEO_SH_MOBILE_CEU
---help---
This is a v4l2 driver for the SuperH Mobile CEU Interface
-config VIDEO_OMAP1
- tristate "OMAP1 Camera Interface driver"
- depends on VIDEO_DEV && SOC_CAMERA
- depends on ARCH_OMAP1
- depends on HAS_DMA
- select VIDEOBUF_DMA_CONTIG
- select VIDEOBUF_DMA_SG
- ---help---
- This is a v4l2 driver for the TI OMAP1 camera interface
-
-config VIDEO_MX2
- tristate "i.MX27 Camera Sensor Interface driver"
- depends on VIDEO_DEV && SOC_CAMERA
- depends on SOC_IMX27 || COMPILE_TEST
- depends on HAS_DMA
- select VIDEOBUF2_DMA_CONTIG
- ---help---
- This is a v4l2 driver for the i.MX27 Camera Sensor Interface
-
config VIDEO_ATMEL_ISI
tristate "ATMEL Image Sensor Interface (ISI) support"
depends on VIDEO_DEV && SOC_CAMERA
diff --git a/drivers/media/platform/soc_camera/Makefile b/drivers/media/platform/soc_camera/Makefile
index 2826382dc9f8..7ee71ae231c7 100644
--- a/drivers/media/platform/soc_camera/Makefile
+++ b/drivers/media/platform/soc_camera/Makefile
@@ -7,9 +7,6 @@ obj-$(CONFIG_SOC_CAMERA_PLATFORM) += soc_camera_platform.o
# soc-camera host drivers have to be linked after camera drivers
obj-$(CONFIG_VIDEO_ATMEL_ISI) += atmel-isi.o
-obj-$(CONFIG_VIDEO_MX2) += mx2_camera.o
-obj-$(CONFIG_VIDEO_MX3) += mx3_camera.o
-obj-$(CONFIG_VIDEO_OMAP1) += omap1_camera.o
obj-$(CONFIG_VIDEO_PXA27x) += pxa_camera.o
obj-$(CONFIG_VIDEO_SH_MOBILE_CEU) += sh_mobile_ceu_camera.o
obj-$(CONFIG_VIDEO_SH_MOBILE_CSI2) += sh_mobile_csi2.o
diff --git a/drivers/media/platform/soc_camera/atmel-isi.c b/drivers/media/platform/soc_camera/atmel-isi.c
index 1af779ee3c74..ab2d9b9b1f5d 100644
--- a/drivers/media/platform/soc_camera/atmel-isi.c
+++ b/drivers/media/platform/soc_camera/atmel-isi.c
@@ -1026,7 +1026,7 @@ static int atmel_isi_parse_dt(struct atmel_isi *isi,
static int atmel_isi_probe(struct platform_device *pdev)
{
- unsigned int irq;
+ int irq;
struct atmel_isi *isi;
struct resource *regs;
int ret, i;
@@ -1086,7 +1086,7 @@ static int atmel_isi_probe(struct platform_device *pdev)
isi->width_flags |= 1 << 9;
irq = platform_get_irq(pdev, 0);
- if (IS_ERR_VALUE(irq)) {
+ if (irq < 0) {
ret = irq;
goto err_req_irq;
}
diff --git a/drivers/media/platform/soc_camera/pxa_camera.c b/drivers/media/platform/soc_camera/pxa_camera.c
index 415f3bda60bf..2aaf4a8f71a0 100644
--- a/drivers/media/platform/soc_camera/pxa_camera.c
+++ b/drivers/media/platform/soc_camera/pxa_camera.c
@@ -28,6 +28,9 @@
#include <linux/clk.h>
#include <linux/sched.h>
#include <linux/slab.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/dma/pxa-dma.h>
#include <media/v4l2-common.h>
#include <media/v4l2-dev.h>
@@ -38,7 +41,6 @@
#include <linux/videodev2.h>
-#include <mach/dma.h>
#include <linux/platform_data/media/camera-pxa.h>
#define PXA_CAM_VERSION "0.0.6"
@@ -175,21 +177,16 @@ enum pxa_camera_active_dma {
DMA_V = 0x4,
};
-/* descriptor needed for the PXA DMA engine */
-struct pxa_cam_dma {
- dma_addr_t sg_dma;
- struct pxa_dma_desc *sg_cpu;
- size_t sg_size;
- int sglen;
-};
-
/* buffer for one video frame */
struct pxa_buffer {
/* common v4l buffer stuff -- must be first */
struct videobuf_buffer vb;
u32 code;
/* our descriptor lists for Y, U and V channels */
- struct pxa_cam_dma dmas[3];
+ struct dma_async_tx_descriptor *descs[3];
+ dma_cookie_t cookie[3];
+ struct scatterlist *sg[3];
+ int sg_len[3];
int inwork;
enum pxa_camera_active_dma active_dma;
};
@@ -207,7 +204,7 @@ struct pxa_camera_dev {
void __iomem *base;
int channels;
- unsigned int dma_chans[3];
+ struct dma_chan *dma_chans[3];
struct pxacamera_platform_data *pdata;
struct resource *res;
@@ -222,7 +219,7 @@ struct pxa_camera_dev {
spinlock_t lock;
struct pxa_buffer *active;
- struct pxa_dma_desc *sg_tail[3];
+ struct tasklet_struct task_eof;
u32 save_cicr[5];
};
@@ -258,7 +255,6 @@ static int pxa_videobuf_setup(struct videobuf_queue *vq, unsigned int *count,
static void free_buffer(struct videobuf_queue *vq, struct pxa_buffer *buf)
{
struct soc_camera_device *icd = vq->priv_data;
- struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct videobuf_dmabuf *dma = videobuf_to_dma(&buf->vb);
int i;
@@ -272,42 +268,45 @@ static void free_buffer(struct videobuf_queue *vq, struct pxa_buffer *buf)
* longer in STATE_QUEUED or STATE_ACTIVE
*/
videobuf_waiton(vq, &buf->vb, 0, 0);
- videobuf_dma_unmap(vq->dev, dma);
- videobuf_dma_free(dma);
- for (i = 0; i < ARRAY_SIZE(buf->dmas); i++) {
- if (buf->dmas[i].sg_cpu)
- dma_free_coherent(ici->v4l2_dev.dev,
- buf->dmas[i].sg_size,
- buf->dmas[i].sg_cpu,
- buf->dmas[i].sg_dma);
- buf->dmas[i].sg_cpu = NULL;
+ for (i = 0; i < 3 && buf->descs[i]; i++) {
+ dmaengine_desc_free(buf->descs[i]);
+ kfree(buf->sg[i]);
+ buf->descs[i] = NULL;
+ buf->sg[i] = NULL;
+ buf->sg_len[i] = 0;
}
+ videobuf_dma_unmap(vq->dev, dma);
+ videobuf_dma_free(dma);
buf->vb.state = VIDEOBUF_NEEDS_INIT;
+
+ dev_dbg(icd->parent, "%s end (vb=0x%p) 0x%08lx %d\n", __func__,
+ &buf->vb, buf->vb.baddr, buf->vb.bsize);
}
-static int calculate_dma_sglen(struct scatterlist *sglist, int sglen,
- int sg_first_ofs, int size)
+static void pxa_camera_dma_irq(struct pxa_camera_dev *pcdev,
+ enum pxa_camera_active_dma act_dma);
+
+static void pxa_camera_dma_irq_y(void *data)
{
- int i, offset, dma_len, xfer_len;
- struct scatterlist *sg;
+ struct pxa_camera_dev *pcdev = data;
- offset = sg_first_ofs;
- for_each_sg(sglist, sg, sglen, i) {
- dma_len = sg_dma_len(sg);
+ pxa_camera_dma_irq(pcdev, DMA_Y);
+}
- /* PXA27x Developer's Manual 27.4.4.1: round up to 8 bytes */
- xfer_len = roundup(min(dma_len - offset, size), 8);
+static void pxa_camera_dma_irq_u(void *data)
+{
+ struct pxa_camera_dev *pcdev = data;
- size = max(0, size - xfer_len);
- offset = 0;
- if (size == 0)
- break;
- }
+ pxa_camera_dma_irq(pcdev, DMA_U);
+}
+
+static void pxa_camera_dma_irq_v(void *data)
+{
+ struct pxa_camera_dev *pcdev = data;
- BUG_ON(size != 0);
- return i + 1;
+ pxa_camera_dma_irq(pcdev, DMA_V);
}
/**
@@ -318,93 +317,53 @@ static int calculate_dma_sglen(struct scatterlist *sglist, int sglen,
* @channel: dma channel (0 => 'Y', 1 => 'U', 2 => 'V')
* @cibr: camera Receive Buffer Register
* @size: bytes to transfer
- * @sg_first: first element of sg_list
- * @sg_first_ofs: offset in first element of sg_list
+ * @offset: offset in videobuffer of the first byte to transfer
*
* Prepares the pxa dma descriptors to transfer one camera channel.
- * Beware sg_first and sg_first_ofs are both input and output parameters.
*
- * Returns 0 or -ENOMEM if no coherent memory is available
+ * Returns 0 if success or -ENOMEM if no memory is available
*/
static int pxa_init_dma_channel(struct pxa_camera_dev *pcdev,
struct pxa_buffer *buf,
struct videobuf_dmabuf *dma, int channel,
- int cibr, int size,
- struct scatterlist **sg_first, int *sg_first_ofs)
+ int cibr, int size, int offset)
{
- struct pxa_cam_dma *pxa_dma = &buf->dmas[channel];
- struct device *dev = pcdev->soc_host.v4l2_dev.dev;
- struct scatterlist *sg;
- int i, offset, sglen;
- int dma_len = 0, xfer_len = 0;
-
- if (pxa_dma->sg_cpu)
- dma_free_coherent(dev, pxa_dma->sg_size,
- pxa_dma->sg_cpu, pxa_dma->sg_dma);
-
- sglen = calculate_dma_sglen(*sg_first, dma->sglen,
- *sg_first_ofs, size);
-
- pxa_dma->sg_size = (sglen + 1) * sizeof(struct pxa_dma_desc);
- pxa_dma->sg_cpu = dma_alloc_coherent(dev, pxa_dma->sg_size,
- &pxa_dma->sg_dma, GFP_KERNEL);
- if (!pxa_dma->sg_cpu)
- return -ENOMEM;
-
- pxa_dma->sglen = sglen;
- offset = *sg_first_ofs;
-
- dev_dbg(dev, "DMA: sg_first=%p, sglen=%d, ofs=%d, dma.desc=%x\n",
- *sg_first, sglen, *sg_first_ofs, pxa_dma->sg_dma);
-
-
- for_each_sg(*sg_first, sg, sglen, i) {
- dma_len = sg_dma_len(sg);
-
- /* PXA27x Developer's Manual 27.4.4.1: round up to 8 bytes */
- xfer_len = roundup(min(dma_len - offset, size), 8);
-
- size = max(0, size - xfer_len);
-
- pxa_dma->sg_cpu[i].dsadr = pcdev->res->start + cibr;
- pxa_dma->sg_cpu[i].dtadr = sg_dma_address(sg) + offset;
- pxa_dma->sg_cpu[i].dcmd =
- DCMD_FLOWSRC | DCMD_BURST8 | DCMD_INCTRGADDR | xfer_len;
-#ifdef DEBUG
- if (!i)
- pxa_dma->sg_cpu[i].dcmd |= DCMD_STARTIRQEN;
-#endif
- pxa_dma->sg_cpu[i].ddadr =
- pxa_dma->sg_dma + (i + 1) * sizeof(struct pxa_dma_desc);
-
- dev_vdbg(dev, "DMA: desc.%08x->@phys=0x%08x, len=%d\n",
- pxa_dma->sg_dma + i * sizeof(struct pxa_dma_desc),
- sg_dma_address(sg) + offset, xfer_len);
- offset = 0;
-
- if (size == 0)
- break;
+ struct dma_chan *dma_chan = pcdev->dma_chans[channel];
+ struct scatterlist *sg = buf->sg[channel];
+ int sglen = buf->sg_len[channel];
+ struct dma_async_tx_descriptor *tx;
+
+ tx = dmaengine_prep_slave_sg(dma_chan, sg, sglen, DMA_DEV_TO_MEM,
+ DMA_PREP_INTERRUPT | DMA_CTRL_REUSE);
+ if (!tx) {
+ dev_err(pcdev->soc_host.v4l2_dev.dev,
+ "dmaengine_prep_slave_sg failed\n");
+ goto fail;
}
- pxa_dma->sg_cpu[sglen].ddadr = DDADR_STOP;
- pxa_dma->sg_cpu[sglen].dcmd = DCMD_FLOWSRC | DCMD_BURST8 | DCMD_ENDIRQEN;
-
- /*
- * Handle 1 special case :
- * - in 3 planes (YUV422P format), we might finish with xfer_len equal
- * to dma_len (end on PAGE boundary). In this case, the sg element
- * for next plane should be the next after the last used to store the
- * last scatter gather RAM page
- */
- if (xfer_len >= dma_len) {
- *sg_first_ofs = xfer_len - dma_len;
- *sg_first = sg_next(sg);
- } else {
- *sg_first_ofs = xfer_len;
- *sg_first = sg;
+ tx->callback_param = pcdev;
+ switch (channel) {
+ case 0:
+ tx->callback = pxa_camera_dma_irq_y;
+ break;
+ case 1:
+ tx->callback = pxa_camera_dma_irq_u;
+ break;
+ case 2:
+ tx->callback = pxa_camera_dma_irq_v;
+ break;
}
+ buf->descs[channel] = tx;
return 0;
+fail:
+ kfree(sg);
+
+ dev_dbg(pcdev->soc_host.v4l2_dev.dev,
+ "%s (vb=0x%p) dma_tx=%p\n",
+ __func__, &buf->vb, tx);
+
+ return -ENOMEM;
}
static void pxa_videobuf_set_actdma(struct pxa_camera_dev *pcdev,
@@ -431,6 +390,7 @@ static int pxa_videobuf_prepare(struct videobuf_queue *vq,
struct pxa_buffer *buf = container_of(vb, struct pxa_buffer, vb);
int ret;
int size_y, size_u = 0, size_v = 0;
+ size_t sizes[3];
dev_dbg(dev, "%s (vb=0x%p) 0x%08lx %d\n", __func__,
vb, vb->baddr, vb->bsize);
@@ -473,13 +433,11 @@ static int pxa_videobuf_prepare(struct videobuf_queue *vq,
if (vb->state == VIDEOBUF_NEEDS_INIT) {
int size = vb->size;
- int next_ofs = 0;
struct videobuf_dmabuf *dma = videobuf_to_dma(vb);
- struct scatterlist *sg;
ret = videobuf_iolock(vq, vb, NULL);
if (ret)
- goto fail;
+ goto out;
if (pcdev->channels == 3) {
size_y = size / 2;
@@ -488,11 +446,19 @@ static int pxa_videobuf_prepare(struct videobuf_queue *vq,
size_y = size;
}
- sg = dma->sglist;
+ sizes[0] = size_y;
+ sizes[1] = size_u;
+ sizes[2] = size_v;
+ ret = sg_split(dma->sglist, dma->sglen, 0, pcdev->channels,
+ sizes, buf->sg, buf->sg_len, GFP_KERNEL);
+ if (ret < 0) {
+ dev_err(dev, "sg_split failed: %d\n", ret);
+ goto fail;
+ }
/* init DMA for Y channel */
- ret = pxa_init_dma_channel(pcdev, buf, dma, 0, CIBR0, size_y,
- &sg, &next_ofs);
+ ret = pxa_init_dma_channel(pcdev, buf, dma, 0, CIBR0,
+ size_y, 0);
if (ret) {
dev_err(dev, "DMA initialization for Y/RGB failed\n");
goto fail;
@@ -501,19 +467,19 @@ static int pxa_videobuf_prepare(struct videobuf_queue *vq,
/* init DMA for U channel */
if (size_u)
ret = pxa_init_dma_channel(pcdev, buf, dma, 1, CIBR1,
- size_u, &sg, &next_ofs);
+ size_u, size_y);
if (ret) {
dev_err(dev, "DMA initialization for U failed\n");
- goto fail_u;
+ goto fail;
}
/* init DMA for V channel */
if (size_v)
ret = pxa_init_dma_channel(pcdev, buf, dma, 2, CIBR2,
- size_v, &sg, &next_ofs);
+ size_v, size_y + size_u);
if (ret) {
dev_err(dev, "DMA initialization for V failed\n");
- goto fail_v;
+ goto fail;
}
vb->state = VIDEOBUF_PREPARED;
@@ -524,12 +490,6 @@ static int pxa_videobuf_prepare(struct videobuf_queue *vq,
return 0;
-fail_v:
- dma_free_coherent(dev, buf->dmas[1].sg_size,
- buf->dmas[1].sg_cpu, buf->dmas[1].sg_dma);
-fail_u:
- dma_free_coherent(dev, buf->dmas[0].sg_size,
- buf->dmas[0].sg_cpu, buf->dmas[0].sg_dma);
fail:
free_buffer(vq, buf);
out:
@@ -553,10 +513,8 @@ static void pxa_dma_start_channels(struct pxa_camera_dev *pcdev)
for (i = 0; i < pcdev->channels; i++) {
dev_dbg(pcdev->soc_host.v4l2_dev.dev,
- "%s (channel=%d) ddadr=%08x\n", __func__,
- i, active->dmas[i].sg_dma);
- DDADR(pcdev->dma_chans[i]) = active->dmas[i].sg_dma;
- DCSR(pcdev->dma_chans[i]) = DCSR_RUN;
+ "%s (channel=%d)\n", __func__, i);
+ dma_async_issue_pending(pcdev->dma_chans[i]);
}
}
@@ -567,7 +525,7 @@ static void pxa_dma_stop_channels(struct pxa_camera_dev *pcdev)
for (i = 0; i < pcdev->channels; i++) {
dev_dbg(pcdev->soc_host.v4l2_dev.dev,
"%s (channel=%d)\n", __func__, i);
- DCSR(pcdev->dma_chans[i]) = 0;
+ dmaengine_terminate_all(pcdev->dma_chans[i]);
}
}
@@ -575,18 +533,12 @@ static void pxa_dma_add_tail_buf(struct pxa_camera_dev *pcdev,
struct pxa_buffer *buf)
{
int i;
- struct pxa_dma_desc *buf_last_desc;
for (i = 0; i < pcdev->channels; i++) {
- buf_last_desc = buf->dmas[i].sg_cpu + buf->dmas[i].sglen;
- buf_last_desc->ddadr = DDADR_STOP;
-
- if (pcdev->sg_tail[i])
- /* Link the new buffer to the old tail */
- pcdev->sg_tail[i]->ddadr = buf->dmas[i].sg_dma;
-
- /* Update the channel tail */
- pcdev->sg_tail[i] = buf_last_desc;
+ buf->cookie[i] = dmaengine_submit(buf->descs[i]);
+ dev_dbg(pcdev->soc_host.v4l2_dev.dev,
+ "%s (channel=%d) : submit vb=%p cookie=%d\n",
+ __func__, i, buf, buf->descs[i]->cookie);
}
}
@@ -603,6 +555,7 @@ static void pxa_camera_start_capture(struct pxa_camera_dev *pcdev)
unsigned long cicr0;
dev_dbg(pcdev->soc_host.v4l2_dev.dev, "%s\n", __func__);
+ __raw_writel(__raw_readl(pcdev->base + CISR), pcdev->base + CISR);
/* Enable End-Of-Frame Interrupt */
cicr0 = __raw_readl(pcdev->base + CICR0) | CICR0_ENB;
cicr0 &= ~CICR0_EOFM;
@@ -677,8 +630,6 @@ static void pxa_camera_wakeup(struct pxa_camera_dev *pcdev,
struct videobuf_buffer *vb,
struct pxa_buffer *buf)
{
- int i;
-
/* _init is used to debug races, see comment in pxa_camera_reqbufs() */
list_del_init(&vb->queue);
vb->state = VIDEOBUF_DONE;
@@ -690,8 +641,6 @@ static void pxa_camera_wakeup(struct pxa_camera_dev *pcdev,
if (list_empty(&pcdev->capture)) {
pxa_camera_stop_capture(pcdev);
- for (i = 0; i < pcdev->channels; i++)
- pcdev->sg_tail[i] = NULL;
return;
}
@@ -715,50 +664,41 @@ static void pxa_camera_wakeup(struct pxa_camera_dev *pcdev,
*
* Context: should only be called within the dma irq handler
*/
-static void pxa_camera_check_link_miss(struct pxa_camera_dev *pcdev)
+static void pxa_camera_check_link_miss(struct pxa_camera_dev *pcdev,
+ dma_cookie_t last_submitted,
+ dma_cookie_t last_issued)
{
- int i, is_dma_stopped = 1;
+ bool is_dma_stopped = last_submitted != last_issued;
- for (i = 0; i < pcdev->channels; i++)
- if (DDADR(pcdev->dma_chans[i]) != DDADR_STOP)
- is_dma_stopped = 0;
dev_dbg(pcdev->soc_host.v4l2_dev.dev,
- "%s : top queued buffer=%p, dma_stopped=%d\n",
+ "%s : top queued buffer=%p, is_dma_stopped=%d\n",
__func__, pcdev->active, is_dma_stopped);
+
if (pcdev->active && is_dma_stopped)
pxa_camera_start_capture(pcdev);
}
-static void pxa_camera_dma_irq(int channel, struct pxa_camera_dev *pcdev,
+static void pxa_camera_dma_irq(struct pxa_camera_dev *pcdev,
enum pxa_camera_active_dma act_dma)
{
struct device *dev = pcdev->soc_host.v4l2_dev.dev;
- struct pxa_buffer *buf;
+ struct pxa_buffer *buf, *last_buf;
unsigned long flags;
- u32 status, camera_status, overrun;
+ u32 camera_status, overrun;
+ int chan;
struct videobuf_buffer *vb;
+ enum dma_status last_status;
+ dma_cookie_t last_issued;
spin_lock_irqsave(&pcdev->lock, flags);
- status = DCSR(channel);
- DCSR(channel) = status;
-
camera_status = __raw_readl(pcdev->base + CISR);
+ dev_dbg(dev, "camera dma irq, cisr=0x%x dma=%d\n",
+ camera_status, act_dma);
overrun = CISR_IFO_0;
if (pcdev->channels == 3)
overrun |= CISR_IFO_1 | CISR_IFO_2;
- if (status & DCSR_BUSERR) {
- dev_err(dev, "DMA Bus Error IRQ!\n");
- goto out;
- }
-
- if (!(status & (DCSR_ENDINTR | DCSR_STARTINTR))) {
- dev_err(dev, "Unknown DMA IRQ source, status: 0x%08x\n",
- status);
- goto out;
- }
-
/*
* pcdev->active should not be NULL in DMA irq handler.
*
@@ -778,52 +718,47 @@ static void pxa_camera_dma_irq(int channel, struct pxa_camera_dev *pcdev,
buf = container_of(vb, struct pxa_buffer, vb);
WARN_ON(buf->inwork || list_empty(&vb->queue));
- dev_dbg(dev, "%s channel=%d %s%s(vb=0x%p) dma.desc=%x\n",
- __func__, channel, status & DCSR_STARTINTR ? "SOF " : "",
- status & DCSR_ENDINTR ? "EOF " : "", vb, DDADR(channel));
-
- if (status & DCSR_ENDINTR) {
- /*
- * It's normal if the last frame creates an overrun, as there
- * are no more DMA descriptors to fetch from QCI fifos
- */
- if (camera_status & overrun &&
- !list_is_last(pcdev->capture.next, &pcdev->capture)) {
- dev_dbg(dev, "FIFO overrun! CISR: %x\n",
- camera_status);
- pxa_camera_stop_capture(pcdev);
- pxa_camera_start_capture(pcdev);
- goto out;
- }
- buf->active_dma &= ~act_dma;
- if (!buf->active_dma) {
- pxa_camera_wakeup(pcdev, vb, buf);
- pxa_camera_check_link_miss(pcdev);
- }
+ /*
+ * It's normal if the last frame creates an overrun, as there
+ * are no more DMA descriptors to fetch from QCI fifos
+ */
+ switch (act_dma) {
+ case DMA_U:
+ chan = 1;
+ break;
+ case DMA_V:
+ chan = 2;
+ break;
+ default:
+ chan = 0;
+ break;
+ }
+ last_buf = list_entry(pcdev->capture.prev,
+ struct pxa_buffer, vb.queue);
+ last_status = dma_async_is_tx_complete(pcdev->dma_chans[chan],
+ last_buf->cookie[chan],
+ NULL, &last_issued);
+ if (camera_status & overrun &&
+ last_status != DMA_COMPLETE) {
+ dev_dbg(dev, "FIFO overrun! CISR: %x\n",
+ camera_status);
+ pxa_camera_stop_capture(pcdev);
+ list_for_each_entry(buf, &pcdev->capture, vb.queue)
+ pxa_dma_add_tail_buf(pcdev, buf);
+ pxa_camera_start_capture(pcdev);
+ goto out;
+ }
+ buf->active_dma &= ~act_dma;
+ if (!buf->active_dma) {
+ pxa_camera_wakeup(pcdev, vb, buf);
+ pxa_camera_check_link_miss(pcdev, last_buf->cookie[chan],
+ last_issued);
}
out:
spin_unlock_irqrestore(&pcdev->lock, flags);
}
-static void pxa_camera_dma_irq_y(int channel, void *data)
-{
- struct pxa_camera_dev *pcdev = data;
- pxa_camera_dma_irq(channel, pcdev, DMA_Y);
-}
-
-static void pxa_camera_dma_irq_u(int channel, void *data)
-{
- struct pxa_camera_dev *pcdev = data;
- pxa_camera_dma_irq(channel, pcdev, DMA_U);
-}
-
-static void pxa_camera_dma_irq_v(int channel, void *data)
-{
- struct pxa_camera_dev *pcdev = data;
- pxa_camera_dma_irq(channel, pcdev, DMA_V);
-}
-
static struct videobuf_queue_ops pxa_videobuf_ops = {
.buf_setup = pxa_videobuf_setup,
.buf_prepare = pxa_videobuf_prepare,
@@ -920,13 +855,35 @@ static void pxa_camera_deactivate(struct pxa_camera_dev *pcdev)
clk_disable_unprepare(pcdev->clk);
}
-static irqreturn_t pxa_camera_irq(int irq, void *data)
+static void pxa_camera_eof(unsigned long arg)
{
- struct pxa_camera_dev *pcdev = data;
- unsigned long status, cifr, cicr0;
+ struct pxa_camera_dev *pcdev = (struct pxa_camera_dev *)arg;
+ unsigned long cifr;
struct pxa_buffer *buf;
struct videobuf_buffer *vb;
+ dev_dbg(pcdev->soc_host.v4l2_dev.dev,
+ "Camera interrupt status 0x%x\n",
+ __raw_readl(pcdev->base + CISR));
+
+ /* Reset the FIFOs */
+ cifr = __raw_readl(pcdev->base + CIFR) | CIFR_RESET_F;
+ __raw_writel(cifr, pcdev->base + CIFR);
+
+ pcdev->active = list_first_entry(&pcdev->capture,
+ struct pxa_buffer, vb.queue);
+ vb = &pcdev->active->vb;
+ buf = container_of(vb, struct pxa_buffer, vb);
+ pxa_videobuf_set_actdma(pcdev, buf);
+
+ pxa_dma_start_channels(pcdev);
+}
+
+static irqreturn_t pxa_camera_irq(int irq, void *data)
+{
+ struct pxa_camera_dev *pcdev = data;
+ unsigned long status, cicr0;
+
status = __raw_readl(pcdev->base + CISR);
dev_dbg(pcdev->soc_host.v4l2_dev.dev,
"Camera interrupt status 0x%lx\n", status);
@@ -937,20 +894,9 @@ static irqreturn_t pxa_camera_irq(int irq, void *data)
__raw_writel(status, pcdev->base + CISR);
if (status & CISR_EOF) {
- /* Reset the FIFOs */
- cifr = __raw_readl(pcdev->base + CIFR) | CIFR_RESET_F;
- __raw_writel(cifr, pcdev->base + CIFR);
-
- pcdev->active = list_first_entry(&pcdev->capture,
- struct pxa_buffer, vb.queue);
- vb = &pcdev->active->vb;
- buf = container_of(vb, struct pxa_buffer, vb);
- pxa_videobuf_set_actdma(pcdev, buf);
-
- pxa_dma_start_channels(pcdev);
-
cicr0 = __raw_readl(pcdev->base + CICR0) | CICR0_EOFM;
__raw_writel(cicr0, pcdev->base + CICR0);
+ tasklet_schedule(&pcdev->task_eof);
}
return IRQ_HANDLED;
@@ -993,10 +939,7 @@ static void pxa_camera_clock_stop(struct soc_camera_host *ici)
__raw_writel(0x3ff, pcdev->base + CICR0);
/* Stop DMA engine */
- DCSR(pcdev->dma_chans[0]) = 0;
- DCSR(pcdev->dma_chans[1]) = 0;
- DCSR(pcdev->dma_chans[2]) = 0;
-
+ pxa_dma_stop_channels(pcdev);
pxa_camera_deactivate(pcdev);
}
@@ -1623,10 +1566,6 @@ static int pxa_camera_resume(struct device *dev)
struct pxa_camera_dev *pcdev = ici->priv;
int i = 0, ret = 0;
- DRCMR(68) = pcdev->dma_chans[0] | DRCMR_MAPVLD;
- DRCMR(69) = pcdev->dma_chans[1] | DRCMR_MAPVLD;
- DRCMR(70) = pcdev->dma_chans[2] | DRCMR_MAPVLD;
-
__raw_writel(pcdev->save_cicr[i++] & ~CICR0_ENB, pcdev->base + CICR0);
__raw_writel(pcdev->save_cicr[i++], pcdev->base + CICR1);
__raw_writel(pcdev->save_cicr[i++], pcdev->base + CICR2);
@@ -1732,8 +1671,15 @@ static int pxa_camera_probe(struct platform_device *pdev)
struct pxa_camera_dev *pcdev;
struct resource *res;
void __iomem *base;
+ struct dma_slave_config config = {
+ .src_addr_width = 0,
+ .src_maxburst = 8,
+ .direction = DMA_DEV_TO_MEM,
+ };
+ dma_cap_mask_t mask;
+ struct pxad_param params;
int irq;
- int err = 0;
+ int err = 0, i;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
irq = platform_get_irq(pdev, 0);
@@ -1801,36 +1747,47 @@ static int pxa_camera_probe(struct platform_device *pdev)
pcdev->base = base;
/* request dma */
- err = pxa_request_dma("CI_Y", DMA_PRIO_HIGH,
- pxa_camera_dma_irq_y, pcdev);
- if (err < 0) {
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+ dma_cap_set(DMA_PRIVATE, mask);
+
+ params.prio = 0;
+ params.drcmr = 68;
+ pcdev->dma_chans[0] =
+ dma_request_slave_channel_compat(mask, pxad_filter_fn,
+ &params, &pdev->dev, "CI_Y");
+ if (!pcdev->dma_chans[0]) {
dev_err(&pdev->dev, "Can't request DMA for Y\n");
- return err;
+ return -ENODEV;
}
- pcdev->dma_chans[0] = err;
- dev_dbg(&pdev->dev, "got DMA channel %d\n", pcdev->dma_chans[0]);
- err = pxa_request_dma("CI_U", DMA_PRIO_HIGH,
- pxa_camera_dma_irq_u, pcdev);
- if (err < 0) {
- dev_err(&pdev->dev, "Can't request DMA for U\n");
+ params.drcmr = 69;
+ pcdev->dma_chans[1] =
+ dma_request_slave_channel_compat(mask, pxad_filter_fn,
+ &params, &pdev->dev, "CI_U");
+ if (!pcdev->dma_chans[1]) {
+ dev_err(&pdev->dev, "Can't request DMA for Y\n");
goto exit_free_dma_y;
}
- pcdev->dma_chans[1] = err;
- dev_dbg(&pdev->dev, "got DMA channel (U) %d\n", pcdev->dma_chans[1]);
- err = pxa_request_dma("CI_V", DMA_PRIO_HIGH,
- pxa_camera_dma_irq_v, pcdev);
- if (err < 0) {
+ params.drcmr = 70;
+ pcdev->dma_chans[2] =
+ dma_request_slave_channel_compat(mask, pxad_filter_fn,
+ &params, &pdev->dev, "CI_V");
+ if (!pcdev->dma_chans[2]) {
dev_err(&pdev->dev, "Can't request DMA for V\n");
goto exit_free_dma_u;
}
- pcdev->dma_chans[2] = err;
- dev_dbg(&pdev->dev, "got DMA channel (V) %d\n", pcdev->dma_chans[2]);
- DRCMR(68) = pcdev->dma_chans[0] | DRCMR_MAPVLD;
- DRCMR(69) = pcdev->dma_chans[1] | DRCMR_MAPVLD;
- DRCMR(70) = pcdev->dma_chans[2] | DRCMR_MAPVLD;
+ for (i = 0; i < 3; i++) {
+ config.src_addr = pcdev->res->start + CIBR0 + i * 8;
+ err = dmaengine_slave_config(pcdev->dma_chans[i], &config);
+ if (err < 0) {
+ dev_err(&pdev->dev, "dma slave config failed: %d\n",
+ err);
+ goto exit_free_dma;
+ }
+ }
/* request irq */
err = devm_request_irq(&pdev->dev, pcdev->irq, pxa_camera_irq, 0,
@@ -1845,6 +1802,7 @@ static int pxa_camera_probe(struct platform_device *pdev)
pcdev->soc_host.priv = pcdev;
pcdev->soc_host.v4l2_dev.dev = &pdev->dev;
pcdev->soc_host.nr = pdev->id;
+ tasklet_init(&pcdev->task_eof, pxa_camera_eof, (unsigned long)pcdev);
err = soc_camera_host_register(&pcdev->soc_host);
if (err)
@@ -1853,11 +1811,11 @@ static int pxa_camera_probe(struct platform_device *pdev)
return 0;
exit_free_dma:
- pxa_free_dma(pcdev->dma_chans[2]);
+ dma_release_channel(pcdev->dma_chans[2]);
exit_free_dma_u:
- pxa_free_dma(pcdev->dma_chans[1]);
+ dma_release_channel(pcdev->dma_chans[1]);
exit_free_dma_y:
- pxa_free_dma(pcdev->dma_chans[0]);
+ dma_release_channel(pcdev->dma_chans[0]);
return err;
}
@@ -1867,9 +1825,9 @@ static int pxa_camera_remove(struct platform_device *pdev)
struct pxa_camera_dev *pcdev = container_of(soc_host,
struct pxa_camera_dev, soc_host);
- pxa_free_dma(pcdev->dma_chans[0]);
- pxa_free_dma(pcdev->dma_chans[1]);
- pxa_free_dma(pcdev->dma_chans[2]);
+ dma_release_channel(pcdev->dma_chans[0]);
+ dma_release_channel(pcdev->dma_chans[1]);
+ dma_release_channel(pcdev->dma_chans[2]);
soc_camera_host_unregister(soc_host);
diff --git a/drivers/media/platform/soc_camera/rcar_vin.c b/drivers/media/platform/soc_camera/rcar_vin.c
index b7fd695b9ed5..3b8edf458964 100644
--- a/drivers/media/platform/soc_camera/rcar_vin.c
+++ b/drivers/media/platform/soc_camera/rcar_vin.c
@@ -124,7 +124,7 @@
#define VNDMR_EXRGB (1 << 8)
#define VNDMR_BPSM (1 << 4)
#define VNDMR_DTMD_YCSEP (1 << 1)
-#define VNDMR_DTMD_ARGB1555 (1 << 0)
+#define VNDMR_DTMD_ARGB (1 << 0)
/* Video n Data Mode Register 2 bits */
#define VNDMR2_VPS (1 << 30)
@@ -143,6 +143,7 @@
#define RCAR_VIN_BT656 (1 << 3)
enum chip_id {
+ RCAR_GEN3,
RCAR_GEN2,
RCAR_H1,
RCAR_M1,
@@ -642,21 +643,26 @@ static int rcar_vin_setup(struct rcar_vin_priv *priv)
output_is_yuv = true;
break;
case V4L2_PIX_FMT_RGB555X:
- dmr = VNDMR_DTMD_ARGB1555;
+ dmr = VNDMR_DTMD_ARGB;
break;
case V4L2_PIX_FMT_RGB565:
dmr = 0;
break;
case V4L2_PIX_FMT_RGB32:
- if (priv->chip == RCAR_GEN2 || priv->chip == RCAR_H1 ||
- priv->chip == RCAR_E1) {
- dmr = VNDMR_EXRGB;
- break;
- }
+ if (priv->chip != RCAR_GEN2 && priv->chip != RCAR_H1 &&
+ priv->chip != RCAR_E1)
+ goto e_format;
+
+ dmr = VNDMR_EXRGB;
+ break;
+ case V4L2_PIX_FMT_ARGB32:
+ if (priv->chip != RCAR_GEN3)
+ goto e_format;
+
+ dmr = VNDMR_EXRGB | VNDMR_DTMD_ARGB;
+ break;
default:
- dev_warn(icd->parent, "Invalid fourcc format (0x%x)\n",
- icd->current_fmt->host_fmt->fourcc);
- return -EINVAL;
+ goto e_format;
}
/* Always update on field change */
@@ -678,6 +684,11 @@ static int rcar_vin_setup(struct rcar_vin_priv *priv)
iowrite32(vnmc | VNMC_ME, priv->base + VNMC_REG);
return 0;
+
+e_format:
+ dev_warn(icd->parent, "Invalid fourcc format (0x%x)\n",
+ icd->current_fmt->host_fmt->fourcc);
+ return -EINVAL;
}
static void rcar_vin_capture(struct rcar_vin_priv *priv)
@@ -1303,6 +1314,14 @@ static const struct soc_mbus_pixelfmt rcar_vin_formats[] = {
.order = SOC_MBUS_ORDER_LE,
.layout = SOC_MBUS_LAYOUT_PACKED,
},
+ {
+ .fourcc = V4L2_PIX_FMT_ARGB32,
+ .name = "ARGB8888",
+ .bits_per_sample = 32,
+ .packing = SOC_MBUS_PACKING_NONE,
+ .order = SOC_MBUS_ORDER_LE,
+ .layout = SOC_MBUS_LAYOUT_PACKED,
+ },
};
static int rcar_vin_get_formats(struct soc_camera_device *icd, unsigned int idx,
@@ -1610,6 +1629,7 @@ static int rcar_vin_set_fmt(struct soc_camera_device *icd,
case V4L2_PIX_FMT_RGB32:
can_scale = priv->chip != RCAR_E1;
break;
+ case V4L2_PIX_FMT_ARGB32:
case V4L2_PIX_FMT_UYVY:
case V4L2_PIX_FMT_YUYV:
case V4L2_PIX_FMT_RGB565:
@@ -1818,6 +1838,7 @@ static struct soc_camera_host_ops rcar_vin_host_ops = {
#ifdef CONFIG_OF
static const struct of_device_id rcar_vin_of_table[] = {
+ { .compatible = "renesas,vin-r8a7795", .data = (void *)RCAR_GEN3 },
{ .compatible = "renesas,vin-r8a7794", .data = (void *)RCAR_GEN2 },
{ .compatible = "renesas,vin-r8a7793", .data = (void *)RCAR_GEN2 },
{ .compatible = "renesas,vin-r8a7791", .data = (void *)RCAR_GEN2 },
diff --git a/drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c b/drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c
index 90c87f2b4ec0..b9f369c0fb94 100644
--- a/drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c
+++ b/drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c
@@ -213,8 +213,7 @@ static int sh_mobile_ceu_videobuf_setup(struct vb2_queue *vq,
unsigned int *count, unsigned int *num_planes,
unsigned int sizes[], void *alloc_ctxs[])
{
- struct soc_camera_device *icd = container_of(vq,
- struct soc_camera_device, vb2_vidq);
+ struct soc_camera_device *icd = soc_camera_from_vb2q(vq);
struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct sh_mobile_ceu_dev *pcdev = ici->priv;
@@ -361,8 +360,7 @@ static int sh_mobile_ceu_videobuf_prepare(struct vb2_buffer *vb)
static void sh_mobile_ceu_videobuf_queue(struct vb2_buffer *vb)
{
struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
- struct soc_camera_device *icd = container_of(vb->vb2_queue,
- struct soc_camera_device, vb2_vidq);
+ struct soc_camera_device *icd = soc_camera_from_vb2q(vb->vb2_queue);
struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct sh_mobile_ceu_dev *pcdev = ici->priv;
struct sh_mobile_ceu_buffer *buf = to_ceu_vb(vbuf);
@@ -413,8 +411,7 @@ error:
static void sh_mobile_ceu_videobuf_release(struct vb2_buffer *vb)
{
struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
- struct soc_camera_device *icd = container_of(vb->vb2_queue,
- struct soc_camera_device, vb2_vidq);
+ struct soc_camera_device *icd = soc_camera_from_vb2q(vb->vb2_queue);
struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct sh_mobile_ceu_buffer *buf = to_ceu_vb(vbuf);
struct sh_mobile_ceu_dev *pcdev = ici->priv;
@@ -444,8 +441,7 @@ static void sh_mobile_ceu_videobuf_release(struct vb2_buffer *vb)
static int sh_mobile_ceu_videobuf_init(struct vb2_buffer *vb)
{
struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
- struct soc_camera_device *icd = container_of(vb->vb2_queue,
- struct soc_camera_device, vb2_vidq);
+ struct soc_camera_device *icd = soc_camera_from_vb2q(vb->vb2_queue);
struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct sh_mobile_ceu_dev *pcdev = ici->priv;
@@ -460,7 +456,7 @@ static int sh_mobile_ceu_videobuf_init(struct vb2_buffer *vb)
static void sh_mobile_ceu_stop_streaming(struct vb2_queue *q)
{
- struct soc_camera_device *icd = container_of(q, struct soc_camera_device, vb2_vidq);
+ struct soc_camera_device *icd = soc_camera_from_vb2q(q);
struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct sh_mobile_ceu_dev *pcdev = ici->priv;
struct list_head *buf_head, *tmp;
diff --git a/drivers/media/platform/sti/c8sectpfe/c8sectpfe-dvb.c b/drivers/media/platform/sti/c8sectpfe/c8sectpfe-dvb.c
index 69d7fe4471c2..2c0015b1264d 100644
--- a/drivers/media/platform/sti/c8sectpfe/c8sectpfe-dvb.c
+++ b/drivers/media/platform/sti/c8sectpfe/c8sectpfe-dvb.c
@@ -118,7 +118,7 @@ int c8sectpfe_frontend_attach(struct dvb_frontend **fe,
struct channel_info *tsin, int chan_num)
{
struct tda18212_config *tda18212;
- struct stv6110x_devctl *fe2;
+ const struct stv6110x_devctl *fe2;
struct i2c_client *client;
struct i2c_board_info tda18212_info = {
.type = "tda18212",
diff --git a/drivers/media/platform/ti-vpe/Makefile b/drivers/media/platform/ti-vpe/Makefile
index be680f839e77..e236059a60ad 100644
--- a/drivers/media/platform/ti-vpe/Makefile
+++ b/drivers/media/platform/ti-vpe/Makefile
@@ -3,3 +3,7 @@ obj-$(CONFIG_VIDEO_TI_VPE) += ti-vpe.o
ti-vpe-y := vpe.o sc.o csc.o vpdma.o
ccflags-$(CONFIG_VIDEO_TI_VPE_DEBUG) += -DDEBUG
+
+obj-$(CONFIG_VIDEO_TI_CAL) += ti-cal.o
+
+ti-cal-y := cal.o
diff --git a/drivers/media/platform/ti-vpe/cal.c b/drivers/media/platform/ti-vpe/cal.c
new file mode 100644
index 000000000000..82001e6b5553
--- /dev/null
+++ b/drivers/media/platform/ti-vpe/cal.c
@@ -0,0 +1,1947 @@
+/*
+ * TI CAL camera interface driver
+ *
+ * Copyright (c) 2015 Texas Instruments Inc.
+ * Benoit Parrot, <bparrot@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation
+ */
+
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/ioctl.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+#include <linux/videodev2.h>
+#include <linux/of_device.h>
+#include <linux/of_graph.h>
+
+#include <media/v4l2-of.h>
+#include <media/v4l2-async.h>
+#include <media/v4l2-common.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-fh.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-common.h>
+#include <media/videobuf2-core.h>
+#include <media/videobuf2-dma-contig.h>
+#include "cal_regs.h"
+
+#define CAL_MODULE_NAME "cal"
+
+#define MAX_WIDTH 1920
+#define MAX_HEIGHT 1200
+
+#define CAL_VERSION "0.1.0"
+
+MODULE_DESCRIPTION("TI CAL driver");
+MODULE_AUTHOR("Benoit Parrot, <bparrot@ti.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_VERSION(CAL_VERSION);
+
+static unsigned video_nr = -1;
+module_param(video_nr, uint, 0644);
+MODULE_PARM_DESC(video_nr, "videoX start number, -1 is autodetect");
+
+static unsigned debug;
+module_param(debug, uint, 0644);
+MODULE_PARM_DESC(debug, "activates debug info");
+
+/* timeperframe: min/max and default */
+static const struct v4l2_fract
+ tpf_default = {.numerator = 1001, .denominator = 30000};
+
+#define cal_dbg(level, caldev, fmt, arg...) \
+ v4l2_dbg(level, debug, &caldev->v4l2_dev, fmt, ##arg)
+#define cal_info(caldev, fmt, arg...) \
+ v4l2_info(&caldev->v4l2_dev, fmt, ##arg)
+#define cal_err(caldev, fmt, arg...) \
+ v4l2_err(&caldev->v4l2_dev, fmt, ##arg)
+
+#define ctx_dbg(level, ctx, fmt, arg...) \
+ v4l2_dbg(level, debug, &ctx->v4l2_dev, fmt, ##arg)
+#define ctx_info(ctx, fmt, arg...) \
+ v4l2_info(&ctx->v4l2_dev, fmt, ##arg)
+#define ctx_err(ctx, fmt, arg...) \
+ v4l2_err(&ctx->v4l2_dev, fmt, ##arg)
+
+#define CAL_NUM_INPUT 1
+#define CAL_NUM_CONTEXT 2
+
+#define bytes_per_line(pixel, bpp) (ALIGN(pixel * bpp, 16))
+
+#define reg_read(dev, offset) ioread32(dev->base + offset)
+#define reg_write(dev, offset, val) iowrite32(val, dev->base + offset)
+
+#define reg_read_field(dev, offset, mask) get_field(reg_read(dev, offset), \
+ mask)
+#define reg_write_field(dev, offset, field, mask) { \
+ u32 val = reg_read(dev, offset); \
+ set_field(&val, field, mask); \
+ reg_write(dev, offset, val); }
+
+/* ------------------------------------------------------------------
+ * Basic structures
+ * ------------------------------------------------------------------
+ */
+
+struct cal_fmt {
+ u32 fourcc;
+ u32 code;
+ u8 depth;
+};
+
+static struct cal_fmt cal_formats[] = {
+ {
+ .fourcc = V4L2_PIX_FMT_YUYV,
+ .code = MEDIA_BUS_FMT_YUYV8_2X8,
+ .depth = 16,
+ }, {
+ .fourcc = V4L2_PIX_FMT_UYVY,
+ .code = MEDIA_BUS_FMT_UYVY8_2X8,
+ .depth = 16,
+ }, {
+ .fourcc = V4L2_PIX_FMT_YVYU,
+ .code = MEDIA_BUS_FMT_YVYU8_2X8,
+ .depth = 16,
+ }, {
+ .fourcc = V4L2_PIX_FMT_VYUY,
+ .code = MEDIA_BUS_FMT_VYUY8_2X8,
+ .depth = 16,
+ }, {
+ .fourcc = V4L2_PIX_FMT_RGB565, /* gggbbbbb rrrrrggg */
+ .code = MEDIA_BUS_FMT_RGB565_2X8_LE,
+ .depth = 16,
+ }, {
+ .fourcc = V4L2_PIX_FMT_RGB565X, /* rrrrrggg gggbbbbb */
+ .code = MEDIA_BUS_FMT_RGB565_2X8_BE,
+ .depth = 16,
+ }, {
+ .fourcc = V4L2_PIX_FMT_RGB555, /* gggbbbbb arrrrrgg */
+ .code = MEDIA_BUS_FMT_RGB555_2X8_PADHI_LE,
+ .depth = 16,
+ }, {
+ .fourcc = V4L2_PIX_FMT_RGB555X, /* arrrrrgg gggbbbbb */
+ .code = MEDIA_BUS_FMT_RGB555_2X8_PADHI_BE,
+ .depth = 16,
+ }, {
+ .fourcc = V4L2_PIX_FMT_RGB24, /* rgb */
+ .code = MEDIA_BUS_FMT_RGB888_2X12_LE,
+ .depth = 24,
+ }, {
+ .fourcc = V4L2_PIX_FMT_BGR24, /* bgr */
+ .code = MEDIA_BUS_FMT_RGB888_2X12_BE,
+ .depth = 24,
+ }, {
+ .fourcc = V4L2_PIX_FMT_RGB32, /* argb */
+ .code = MEDIA_BUS_FMT_ARGB8888_1X32,
+ .depth = 32,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SBGGR8,
+ .code = MEDIA_BUS_FMT_SBGGR8_1X8,
+ .depth = 8,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SGBRG8,
+ .code = MEDIA_BUS_FMT_SGBRG8_1X8,
+ .depth = 8,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SGRBG8,
+ .code = MEDIA_BUS_FMT_SGRBG8_1X8,
+ .depth = 8,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SRGGB8,
+ .code = MEDIA_BUS_FMT_SRGGB8_1X8,
+ .depth = 8,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SBGGR10,
+ .code = MEDIA_BUS_FMT_SBGGR10_1X10,
+ .depth = 16,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SGBRG10,
+ .code = MEDIA_BUS_FMT_SGBRG10_1X10,
+ .depth = 16,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SGRBG10,
+ .code = MEDIA_BUS_FMT_SGRBG10_1X10,
+ .depth = 16,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SRGGB10,
+ .code = MEDIA_BUS_FMT_SRGGB10_1X10,
+ .depth = 16,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SBGGR12,
+ .code = MEDIA_BUS_FMT_SBGGR12_1X12,
+ .depth = 16,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SGBRG12,
+ .code = MEDIA_BUS_FMT_SGBRG12_1X12,
+ .depth = 16,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SGRBG12,
+ .code = MEDIA_BUS_FMT_SGRBG12_1X12,
+ .depth = 16,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SRGGB12,
+ .code = MEDIA_BUS_FMT_SRGGB12_1X12,
+ .depth = 16,
+ },
+};
+
+/* Print Four-character-code (FOURCC) */
+static char *fourcc_to_str(u32 fmt)
+{
+ static char code[5];
+
+ code[0] = (unsigned char)(fmt & 0xff);
+ code[1] = (unsigned char)((fmt >> 8) & 0xff);
+ code[2] = (unsigned char)((fmt >> 16) & 0xff);
+ code[3] = (unsigned char)((fmt >> 24) & 0xff);
+ code[4] = '\0';
+
+ return code;
+}
+
+/* buffer for one video frame */
+struct cal_buffer {
+ /* common v4l buffer stuff -- must be first */
+ struct vb2_v4l2_buffer vb;
+ struct list_head list;
+ const struct cal_fmt *fmt;
+};
+
+struct cal_dmaqueue {
+ struct list_head active;
+
+ /* Counters to control fps rate */
+ int frame;
+ int ini_jiffies;
+};
+
+struct cm_data {
+ void __iomem *base;
+ struct resource *res;
+
+ unsigned int camerrx_control;
+
+ struct platform_device *pdev;
+};
+
+struct cc_data {
+ void __iomem *base;
+ struct resource *res;
+
+ struct platform_device *pdev;
+};
+
+/*
+ * there is one cal_dev structure in the driver, it is shared by
+ * all instances.
+ */
+struct cal_dev {
+ int irq;
+ void __iomem *base;
+ struct resource *res;
+ struct platform_device *pdev;
+ struct v4l2_device v4l2_dev;
+
+ /* Control Module handle */
+ struct cm_data *cm;
+ /* Camera Core Module handle */
+ struct cc_data *cc[CAL_NUM_CSI2_PORTS];
+
+ struct cal_ctx *ctx[CAL_NUM_CONTEXT];
+};
+
+/*
+ * There is one cal_ctx structure for each camera core context.
+ */
+struct cal_ctx {
+ struct v4l2_device v4l2_dev;
+ struct v4l2_ctrl_handler ctrl_handler;
+ struct video_device vdev;
+ struct v4l2_async_notifier notifier;
+ struct v4l2_subdev *sensor;
+ struct v4l2_of_endpoint endpoint;
+
+ struct v4l2_async_subdev asd;
+ struct v4l2_async_subdev *asd_list[1];
+
+ struct v4l2_fh fh;
+ struct cal_dev *dev;
+ struct cc_data *cc;
+
+ /* v4l2_ioctl mutex */
+ struct mutex mutex;
+ /* v4l2 buffers lock */
+ spinlock_t slock;
+
+ /* Several counters */
+ unsigned long jiffies;
+
+ struct vb2_alloc_ctx *alloc_ctx;
+ struct cal_dmaqueue vidq;
+
+ /* Input Number */
+ int input;
+
+ /* video capture */
+ const struct cal_fmt *fmt;
+ /* Used to store current pixel format */
+ struct v4l2_format v_fmt;
+ /* Used to store current mbus frame format */
+ struct v4l2_mbus_framefmt m_fmt;
+
+ /* Current subdev enumerated format */
+ struct cal_fmt *active_fmt[ARRAY_SIZE(cal_formats)];
+ int num_active_fmt;
+
+ struct v4l2_fract timeperframe;
+ unsigned int sequence;
+ unsigned int external_rate;
+ struct vb2_queue vb_vidq;
+ unsigned int seq_count;
+ unsigned int csi2_port;
+ unsigned int virtual_channel;
+
+ /* Pointer pointing to current v4l2_buffer */
+ struct cal_buffer *cur_frm;
+ /* Pointer pointing to next v4l2_buffer */
+ struct cal_buffer *next_frm;
+};
+
+static const struct cal_fmt *find_format_by_pix(struct cal_ctx *ctx,
+ u32 pixelformat)
+{
+ const struct cal_fmt *fmt;
+ unsigned int k;
+
+ for (k = 0; k < ctx->num_active_fmt; k++) {
+ fmt = ctx->active_fmt[k];
+ if (fmt->fourcc == pixelformat)
+ return fmt;
+ }
+
+ return NULL;
+}
+
+static const struct cal_fmt *find_format_by_code(struct cal_ctx *ctx,
+ u32 code)
+{
+ const struct cal_fmt *fmt;
+ unsigned int k;
+
+ for (k = 0; k < ctx->num_active_fmt; k++) {
+ fmt = ctx->active_fmt[k];
+ if (fmt->code == code)
+ return fmt;
+ }
+
+ return NULL;
+}
+
+static inline struct cal_ctx *notifier_to_ctx(struct v4l2_async_notifier *n)
+{
+ return container_of(n, struct cal_ctx, notifier);
+}
+
+static inline int get_field(u32 value, u32 mask)
+{
+ return (value & mask) >> __ffs(mask);
+}
+
+static inline void set_field(u32 *valp, u32 field, u32 mask)
+{
+ u32 val = *valp;
+
+ val &= ~mask;
+ val |= (field << __ffs(mask)) & mask;
+ *valp = val;
+}
+
+/*
+ * Control Module block access
+ */
+static struct cm_data *cm_create(struct cal_dev *dev)
+{
+ struct platform_device *pdev = dev->pdev;
+ struct cm_data *cm;
+
+ cm = devm_kzalloc(&pdev->dev, sizeof(*cm), GFP_KERNEL);
+ if (!cm)
+ return ERR_PTR(-ENOMEM);
+
+ cm->res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "camerrx_control");
+ cm->base = devm_ioremap_resource(&pdev->dev, cm->res);
+ if (IS_ERR(cm->base)) {
+ cal_err(dev, "failed to ioremap\n");
+ return ERR_CAST(cm->base);
+ }
+
+ cal_dbg(1, dev, "ioresource %s at %pa - %pa\n",
+ cm->res->name, &cm->res->start, &cm->res->end);
+
+ return cm;
+}
+
+static void camerarx_phy_enable(struct cal_ctx *ctx)
+{
+ u32 val;
+
+ if (!ctx->dev->cm->base) {
+ ctx_err(ctx, "cm not mapped\n");
+ return;
+ }
+
+ val = reg_read(ctx->dev->cm, CM_CTRL_CORE_CAMERRX_CONTROL);
+ if (ctx->csi2_port == 1) {
+ set_field(&val, 1, CM_CAMERRX_CTRL_CSI0_CTRLCLKEN_MASK);
+ set_field(&val, 0, CM_CAMERRX_CTRL_CSI0_CAMMODE_MASK);
+ /* enable all lanes by default */
+ set_field(&val, 0xf, CM_CAMERRX_CTRL_CSI0_LANEENABLE_MASK);
+ set_field(&val, 1, CM_CAMERRX_CTRL_CSI0_MODE_MASK);
+ } else if (ctx->csi2_port == 2) {
+ set_field(&val, 1, CM_CAMERRX_CTRL_CSI1_CTRLCLKEN_MASK);
+ set_field(&val, 0, CM_CAMERRX_CTRL_CSI1_CAMMODE_MASK);
+ /* enable all lanes by default */
+ set_field(&val, 0x3, CM_CAMERRX_CTRL_CSI1_LANEENABLE_MASK);
+ set_field(&val, 1, CM_CAMERRX_CTRL_CSI1_MODE_MASK);
+ }
+ reg_write(ctx->dev->cm, CM_CTRL_CORE_CAMERRX_CONTROL, val);
+}
+
+static void camerarx_phy_disable(struct cal_ctx *ctx)
+{
+ u32 val;
+
+ if (!ctx->dev->cm->base) {
+ ctx_err(ctx, "cm not mapped\n");
+ return;
+ }
+
+ val = reg_read(ctx->dev->cm, CM_CTRL_CORE_CAMERRX_CONTROL);
+ if (ctx->csi2_port == 1)
+ set_field(&val, 0x0, CM_CAMERRX_CTRL_CSI0_CTRLCLKEN_MASK);
+ else if (ctx->csi2_port == 2)
+ set_field(&val, 0x0, CM_CAMERRX_CTRL_CSI1_CTRLCLKEN_MASK);
+ reg_write(ctx->dev->cm, CM_CTRL_CORE_CAMERRX_CONTROL, val);
+}
+
+/*
+ * Camera Instance access block
+ */
+static struct cc_data *cc_create(struct cal_dev *dev, unsigned int core)
+{
+ struct platform_device *pdev = dev->pdev;
+ struct cc_data *cc;
+
+ cc = devm_kzalloc(&pdev->dev, sizeof(*cc), GFP_KERNEL);
+ if (!cc)
+ return ERR_PTR(-ENOMEM);
+
+ cc->res = platform_get_resource_byname(pdev,
+ IORESOURCE_MEM,
+ (core == 0) ?
+ "cal_rx_core0" :
+ "cal_rx_core1");
+ cc->base = devm_ioremap_resource(&pdev->dev, cc->res);
+ if (IS_ERR(cc->base)) {
+ cal_err(dev, "failed to ioremap\n");
+ return ERR_CAST(cc->base);
+ }
+
+ cal_dbg(1, dev, "ioresource %s at %pa - %pa\n",
+ cc->res->name, &cc->res->start, &cc->res->end);
+
+ return cc;
+}
+
+/*
+ * Get Revision and HW info
+ */
+static void cal_get_hwinfo(struct cal_dev *dev)
+{
+ u32 revision = 0;
+ u32 hwinfo = 0;
+
+ revision = reg_read(dev, CAL_HL_REVISION);
+ cal_dbg(3, dev, "CAL_HL_REVISION = 0x%08x (expecting 0x40000200)\n",
+ revision);
+
+ hwinfo = reg_read(dev, CAL_HL_HWINFO);
+ cal_dbg(3, dev, "CAL_HL_HWINFO = 0x%08x (expecting 0xA3C90469)\n",
+ hwinfo);
+}
+
+static inline int cal_runtime_get(struct cal_dev *dev)
+{
+ int r;
+
+ r = pm_runtime_get_sync(&dev->pdev->dev);
+
+ return r;
+}
+
+static inline void cal_runtime_put(struct cal_dev *dev)
+{
+ pm_runtime_put_sync(&dev->pdev->dev);
+}
+
+static void cal_quickdump_regs(struct cal_dev *dev)
+{
+ cal_info(dev, "CAL Registers @ 0x%pa:\n", &dev->res->start);
+ print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET, 16, 4,
+ (__force const void *)dev->base,
+ resource_size(dev->res), false);
+
+ if (dev->ctx[0]) {
+ cal_info(dev, "CSI2 Core 0 Registers @ %pa:\n",
+ &dev->ctx[0]->cc->res->start);
+ print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET, 16, 4,
+ (__force const void *)dev->ctx[0]->cc->base,
+ resource_size(dev->ctx[0]->cc->res),
+ false);
+ }
+
+ if (dev->ctx[1]) {
+ cal_info(dev, "CSI2 Core 1 Registers @ %pa:\n",
+ &dev->ctx[1]->cc->res->start);
+ print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET, 16, 4,
+ (__force const void *)dev->ctx[1]->cc->base,
+ resource_size(dev->ctx[1]->cc->res),
+ false);
+ }
+
+ cal_info(dev, "CAMERRX_Control Registers @ %pa:\n",
+ &dev->cm->res->start);
+ print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET, 16, 4,
+ (__force const void *)dev->cm->base,
+ resource_size(dev->cm->res), false);
+}
+
+/*
+ * Enable the expected IRQ sources
+ */
+static void enable_irqs(struct cal_ctx *ctx)
+{
+ /* Enable IRQ_WDMA_END 0/1 */
+ reg_write_field(ctx->dev,
+ CAL_HL_IRQENABLE_SET(2),
+ CAL_HL_IRQ_ENABLE,
+ CAL_HL_IRQ_MASK(ctx->csi2_port));
+ /* Enable IRQ_WDMA_START 0/1 */
+ reg_write_field(ctx->dev,
+ CAL_HL_IRQENABLE_SET(3),
+ CAL_HL_IRQ_ENABLE,
+ CAL_HL_IRQ_MASK(ctx->csi2_port));
+ /* Todo: Add VC_IRQ and CSI2_COMPLEXIO_IRQ handling */
+ reg_write(ctx->dev, CAL_CSI2_VC_IRQENABLE(1), 0xFF000000);
+}
+
+static void disable_irqs(struct cal_ctx *ctx)
+{
+ /* Disable IRQ_WDMA_END 0/1 */
+ reg_write_field(ctx->dev,
+ CAL_HL_IRQENABLE_CLR(2),
+ CAL_HL_IRQ_CLEAR,
+ CAL_HL_IRQ_MASK(ctx->csi2_port));
+ /* Disable IRQ_WDMA_START 0/1 */
+ reg_write_field(ctx->dev,
+ CAL_HL_IRQENABLE_CLR(3),
+ CAL_HL_IRQ_CLEAR,
+ CAL_HL_IRQ_MASK(ctx->csi2_port));
+ /* Todo: Add VC_IRQ and CSI2_COMPLEXIO_IRQ handling */
+ reg_write(ctx->dev, CAL_CSI2_VC_IRQENABLE(1), 0);
+}
+
+static void csi2_init(struct cal_ctx *ctx)
+{
+ int i;
+ u32 val;
+
+ val = reg_read(ctx->dev, CAL_CSI2_TIMING(ctx->csi2_port));
+ set_field(&val, CAL_GEN_ENABLE,
+ CAL_CSI2_TIMING_FORCE_RX_MODE_IO1_MASK);
+ set_field(&val, CAL_GEN_ENABLE,
+ CAL_CSI2_TIMING_STOP_STATE_X16_IO1_MASK);
+ set_field(&val, CAL_GEN_DISABLE,
+ CAL_CSI2_TIMING_STOP_STATE_X4_IO1_MASK);
+ set_field(&val, 407, CAL_CSI2_TIMING_STOP_STATE_COUNTER_IO1_MASK);
+ reg_write(ctx->dev, CAL_CSI2_TIMING(ctx->csi2_port), val);
+ ctx_dbg(3, ctx, "CAL_CSI2_TIMING(%d) = 0x%08x\n", ctx->csi2_port,
+ reg_read(ctx->dev, CAL_CSI2_TIMING(ctx->csi2_port)));
+
+ val = reg_read(ctx->dev, CAL_CSI2_COMPLEXIO_CFG(ctx->csi2_port));
+ set_field(&val, CAL_CSI2_COMPLEXIO_CFG_RESET_CTRL_OPERATIONAL,
+ CAL_CSI2_COMPLEXIO_CFG_RESET_CTRL_MASK);
+ set_field(&val, CAL_CSI2_COMPLEXIO_CFG_PWR_CMD_STATE_ON,
+ CAL_CSI2_COMPLEXIO_CFG_PWR_CMD_MASK);
+ reg_write(ctx->dev, CAL_CSI2_COMPLEXIO_CFG(ctx->csi2_port), val);
+ for (i = 0; i < 10; i++) {
+ if (reg_read_field(ctx->dev,
+ CAL_CSI2_COMPLEXIO_CFG(ctx->csi2_port),
+ CAL_CSI2_COMPLEXIO_CFG_PWR_STATUS_MASK) ==
+ CAL_CSI2_COMPLEXIO_CFG_PWR_STATUS_STATE_ON)
+ break;
+ usleep_range(1000, 1100);
+ }
+ ctx_dbg(3, ctx, "CAL_CSI2_COMPLEXIO_CFG(%d) = 0x%08x\n", ctx->csi2_port,
+ reg_read(ctx->dev, CAL_CSI2_COMPLEXIO_CFG(ctx->csi2_port)));
+
+ val = reg_read(ctx->dev, CAL_CTRL);
+ set_field(&val, CAL_CTRL_BURSTSIZE_BURST128, CAL_CTRL_BURSTSIZE_MASK);
+ set_field(&val, 0xF, CAL_CTRL_TAGCNT_MASK);
+ set_field(&val, CAL_CTRL_POSTED_WRITES_NONPOSTED,
+ CAL_CTRL_POSTED_WRITES_MASK);
+ set_field(&val, 0xFF, CAL_CTRL_MFLAGL_MASK);
+ set_field(&val, 0xFF, CAL_CTRL_MFLAGH_MASK);
+ reg_write(ctx->dev, CAL_CTRL, val);
+ ctx_dbg(3, ctx, "CAL_CTRL = 0x%08x\n", reg_read(ctx->dev, CAL_CTRL));
+}
+
+static void csi2_lane_config(struct cal_ctx *ctx)
+{
+ u32 val = reg_read(ctx->dev, CAL_CSI2_COMPLEXIO_CFG(ctx->csi2_port));
+ u32 lane_mask = CAL_CSI2_COMPLEXIO_CFG_CLOCK_POSITION_MASK;
+ u32 polarity_mask = CAL_CSI2_COMPLEXIO_CFG_CLOCK_POL_MASK;
+ struct v4l2_of_bus_mipi_csi2 *mipi_csi2 = &ctx->endpoint.bus.mipi_csi2;
+ int lane;
+
+ set_field(&val, mipi_csi2->clock_lane + 1, lane_mask);
+ set_field(&val, mipi_csi2->lane_polarities[0], polarity_mask);
+ for (lane = 0; lane < mipi_csi2->num_data_lanes; lane++) {
+ /*
+ * Every lane are one nibble apart starting with the
+ * clock followed by the data lanes so shift masks by 4.
+ */
+ lane_mask <<= 4;
+ polarity_mask <<= 4;
+ set_field(&val, mipi_csi2->data_lanes[lane] + 1, lane_mask);
+ set_field(&val, mipi_csi2->lane_polarities[lane + 1],
+ polarity_mask);
+ }
+
+ reg_write(ctx->dev, CAL_CSI2_COMPLEXIO_CFG(ctx->csi2_port), val);
+ ctx_dbg(3, ctx, "CAL_CSI2_COMPLEXIO_CFG(%d) = 0x%08x\n",
+ ctx->csi2_port, val);
+}
+
+static void csi2_ppi_enable(struct cal_ctx *ctx)
+{
+ reg_write_field(ctx->dev, CAL_CSI2_PPI_CTRL(ctx->csi2_port),
+ CAL_GEN_ENABLE, CAL_CSI2_PPI_CTRL_IF_EN_MASK);
+}
+
+static void csi2_ppi_disable(struct cal_ctx *ctx)
+{
+ reg_write_field(ctx->dev, CAL_CSI2_PPI_CTRL(ctx->csi2_port),
+ CAL_GEN_DISABLE, CAL_CSI2_PPI_CTRL_IF_EN_MASK);
+}
+
+static void csi2_ctx_config(struct cal_ctx *ctx)
+{
+ u32 val;
+
+ val = reg_read(ctx->dev, CAL_CSI2_CTX0(ctx->csi2_port));
+ set_field(&val, ctx->csi2_port, CAL_CSI2_CTX_CPORT_MASK);
+ /*
+ * DT type: MIPI CSI-2 Specs
+ * 0x1: All - DT filter is disabled
+ * 0x24: RGB888 1 pixel = 3 bytes
+ * 0x2B: RAW10 4 pixels = 5 bytes
+ * 0x2A: RAW8 1 pixel = 1 byte
+ * 0x1E: YUV422 2 pixels = 4 bytes
+ */
+ set_field(&val, 0x1, CAL_CSI2_CTX_DT_MASK);
+ /* Virtual Channel from the CSI2 sensor usually 0! */
+ set_field(&val, ctx->virtual_channel, CAL_CSI2_CTX_VC_MASK);
+ /* NUM_LINES_PER_FRAME => 0 means auto detect */
+ set_field(&val, 0, CAL_CSI2_CTX_LINES_MASK);
+ set_field(&val, CAL_CSI2_CTX_ATT_PIX, CAL_CSI2_CTX_ATT_MASK);
+ set_field(&val, CAL_CSI2_CTX_PACK_MODE_LINE,
+ CAL_CSI2_CTX_PACK_MODE_MASK);
+ reg_write(ctx->dev, CAL_CSI2_CTX0(ctx->csi2_port), val);
+ ctx_dbg(3, ctx, "CAL_CSI2_CTX0(%d) = 0x%08x\n", ctx->csi2_port,
+ reg_read(ctx->dev, CAL_CSI2_CTX0(ctx->csi2_port)));
+}
+
+static void pix_proc_config(struct cal_ctx *ctx)
+{
+ u32 val;
+
+ val = reg_read(ctx->dev, CAL_PIX_PROC(ctx->csi2_port));
+ set_field(&val, CAL_PIX_PROC_EXTRACT_B8, CAL_PIX_PROC_EXTRACT_MASK);
+ set_field(&val, CAL_PIX_PROC_DPCMD_BYPASS, CAL_PIX_PROC_DPCMD_MASK);
+ set_field(&val, CAL_PIX_PROC_DPCME_BYPASS, CAL_PIX_PROC_DPCME_MASK);
+ set_field(&val, CAL_PIX_PROC_PACK_B8, CAL_PIX_PROC_PACK_MASK);
+ set_field(&val, ctx->csi2_port, CAL_PIX_PROC_CPORT_MASK);
+ set_field(&val, CAL_GEN_ENABLE, CAL_PIX_PROC_EN_MASK);
+ reg_write(ctx->dev, CAL_PIX_PROC(ctx->csi2_port), val);
+ ctx_dbg(3, ctx, "CAL_PIX_PROC(%d) = 0x%08x\n", ctx->csi2_port,
+ reg_read(ctx->dev, CAL_PIX_PROC(ctx->csi2_port)));
+}
+
+static void cal_wr_dma_config(struct cal_ctx *ctx,
+ unsigned int width)
+{
+ u32 val;
+
+ val = reg_read(ctx->dev, CAL_WR_DMA_CTRL(ctx->csi2_port));
+ set_field(&val, ctx->csi2_port, CAL_WR_DMA_CTRL_CPORT_MASK);
+ set_field(&val, CAL_WR_DMA_CTRL_DTAG_PIX_DAT,
+ CAL_WR_DMA_CTRL_DTAG_MASK);
+ set_field(&val, CAL_WR_DMA_CTRL_MODE_CONST,
+ CAL_WR_DMA_CTRL_MODE_MASK);
+ set_field(&val, CAL_WR_DMA_CTRL_PATTERN_LINEAR,
+ CAL_WR_DMA_CTRL_PATTERN_MASK);
+ set_field(&val, CAL_GEN_ENABLE, CAL_WR_DMA_CTRL_STALL_RD_MASK);
+ reg_write(ctx->dev, CAL_WR_DMA_CTRL(ctx->csi2_port), val);
+ ctx_dbg(3, ctx, "CAL_WR_DMA_CTRL(%d) = 0x%08x\n", ctx->csi2_port,
+ reg_read(ctx->dev, CAL_WR_DMA_CTRL(ctx->csi2_port)));
+
+ /*
+ * width/16 not sure but giving it a whirl.
+ * zero does not work right
+ */
+ reg_write_field(ctx->dev,
+ CAL_WR_DMA_OFST(ctx->csi2_port),
+ (width / 16),
+ CAL_WR_DMA_OFST_MASK);
+ ctx_dbg(3, ctx, "CAL_WR_DMA_OFST(%d) = 0x%08x\n", ctx->csi2_port,
+ reg_read(ctx->dev, CAL_WR_DMA_OFST(ctx->csi2_port)));
+
+ val = reg_read(ctx->dev, CAL_WR_DMA_XSIZE(ctx->csi2_port));
+ /* 64 bit word means no skipping */
+ set_field(&val, 0, CAL_WR_DMA_XSIZE_XSKIP_MASK);
+ /*
+ * (width*8)/64 this should be size of an entire line
+ * in 64bit word but 0 means all data until the end
+ * is detected automagically
+ */
+ set_field(&val, (width / 8), CAL_WR_DMA_XSIZE_MASK);
+ reg_write(ctx->dev, CAL_WR_DMA_XSIZE(ctx->csi2_port), val);
+ ctx_dbg(3, ctx, "CAL_WR_DMA_XSIZE(%d) = 0x%08x\n", ctx->csi2_port,
+ reg_read(ctx->dev, CAL_WR_DMA_XSIZE(ctx->csi2_port)));
+}
+
+static void cal_wr_dma_addr(struct cal_ctx *ctx, unsigned int dmaaddr)
+{
+ reg_write(ctx->dev, CAL_WR_DMA_ADDR(ctx->csi2_port), dmaaddr);
+}
+
+/*
+ * TCLK values are OK at their reset values
+ */
+#define TCLK_TERM 0
+#define TCLK_MISS 1
+#define TCLK_SETTLE 14
+#define THS_SETTLE 15
+
+static void csi2_phy_config(struct cal_ctx *ctx)
+{
+ unsigned int reg0, reg1;
+ unsigned int ths_term, ths_settle;
+ unsigned int ddrclkperiod_us;
+
+ /*
+ * THS_TERM: Programmed value = floor(20 ns/DDRClk period) - 2.
+ */
+ ddrclkperiod_us = ctx->external_rate / 2000000;
+ ddrclkperiod_us = 1000000 / ddrclkperiod_us;
+ ctx_dbg(1, ctx, "ddrclkperiod_us: %d\n", ddrclkperiod_us);
+
+ ths_term = 20000 / ddrclkperiod_us;
+ ths_term = (ths_term >= 2) ? ths_term - 2 : ths_term;
+ ctx_dbg(1, ctx, "ths_term: %d (0x%02x)\n", ths_term, ths_term);
+
+ /*
+ * THS_SETTLE: Programmed value = floor(176.3 ns/CtrlClk period) - 1.
+ * Since CtrlClk is fixed at 96Mhz then we get
+ * ths_settle = floor(176.3 / 10.416) - 1 = 15
+ * If we ever switch to a dynamic clock then this code might be useful
+ *
+ * unsigned int ctrlclkperiod_us;
+ * ctrlclkperiod_us = 96000000 / 1000000;
+ * ctrlclkperiod_us = 1000000 / ctrlclkperiod_us;
+ * ctx_dbg(1, ctx, "ctrlclkperiod_us: %d\n", ctrlclkperiod_us);
+
+ * ths_settle = 176300 / ctrlclkperiod_us;
+ * ths_settle = (ths_settle > 1) ? ths_settle - 1 : ths_settle;
+ */
+
+ ths_settle = THS_SETTLE;
+ ctx_dbg(1, ctx, "ths_settle: %d (0x%02x)\n", ths_settle, ths_settle);
+
+ reg0 = reg_read(ctx->cc, CAL_CSI2_PHY_REG0);
+ set_field(&reg0, CAL_CSI2_PHY_REG0_HSCLOCKCONFIG_DISABLE,
+ CAL_CSI2_PHY_REG0_HSCLOCKCONFIG_MASK);
+ set_field(&reg0, ths_term, CAL_CSI2_PHY_REG0_THS_TERM_MASK);
+ set_field(&reg0, ths_settle, CAL_CSI2_PHY_REG0_THS_SETTLE_MASK);
+
+ ctx_dbg(1, ctx, "CSI2_%d_REG0 = 0x%08x\n", (ctx->csi2_port - 1), reg0);
+ reg_write(ctx->cc, CAL_CSI2_PHY_REG0, reg0);
+
+ reg1 = reg_read(ctx->cc, CAL_CSI2_PHY_REG1);
+ set_field(&reg1, TCLK_TERM, CAL_CSI2_PHY_REG1_TCLK_TERM_MASK);
+ set_field(&reg1, 0xb8, CAL_CSI2_PHY_REG1_DPHY_HS_SYNC_PATTERN_MASK);
+ set_field(&reg1, TCLK_MISS, CAL_CSI2_PHY_REG1_CTRLCLK_DIV_FACTOR_MASK);
+ set_field(&reg1, TCLK_SETTLE, CAL_CSI2_PHY_REG1_TCLK_SETTLE_MASK);
+
+ ctx_dbg(1, ctx, "CSI2_%d_REG1 = 0x%08x\n", (ctx->csi2_port - 1), reg1);
+ reg_write(ctx->cc, CAL_CSI2_PHY_REG1, reg1);
+}
+
+static int cal_get_external_info(struct cal_ctx *ctx)
+{
+ struct v4l2_ctrl *ctrl;
+
+ if (!ctx->sensor)
+ return -ENODEV;
+
+ ctrl = v4l2_ctrl_find(ctx->sensor->ctrl_handler, V4L2_CID_PIXEL_RATE);
+ if (!ctrl) {
+ ctx_err(ctx, "no pixel rate control in subdev: %s\n",
+ ctx->sensor->name);
+ return -EPIPE;
+ }
+
+ ctx->external_rate = v4l2_ctrl_g_ctrl_int64(ctrl);
+ ctx_dbg(3, ctx, "sensor Pixel Rate: %d\n", ctx->external_rate);
+
+ return 0;
+}
+
+static inline void cal_schedule_next_buffer(struct cal_ctx *ctx)
+{
+ struct cal_dmaqueue *dma_q = &ctx->vidq;
+ struct cal_buffer *buf;
+ unsigned long addr;
+
+ buf = list_entry(dma_q->active.next, struct cal_buffer, list);
+ ctx->next_frm = buf;
+ list_del(&buf->list);
+
+ addr = vb2_dma_contig_plane_dma_addr(&buf->vb.vb2_buf, 0);
+ cal_wr_dma_addr(ctx, addr);
+}
+
+static inline void cal_process_buffer_complete(struct cal_ctx *ctx)
+{
+ ctx->cur_frm->vb.vb2_buf.timestamp = ktime_get_ns();
+ ctx->cur_frm->vb.field = ctx->m_fmt.field;
+ ctx->cur_frm->vb.sequence = ctx->sequence++;
+
+ vb2_buffer_done(&ctx->cur_frm->vb.vb2_buf, VB2_BUF_STATE_DONE);
+ ctx->cur_frm = ctx->next_frm;
+}
+
+#define isvcirqset(irq, vc, ff) (irq & \
+ (CAL_CSI2_VC_IRQENABLE_ ##ff ##_IRQ_##vc ##_MASK))
+
+#define isportirqset(irq, port) (irq & CAL_HL_IRQ_MASK(port))
+
+static irqreturn_t cal_irq(int irq_cal, void *data)
+{
+ struct cal_dev *dev = (struct cal_dev *)data;
+ struct cal_ctx *ctx;
+ struct cal_dmaqueue *dma_q;
+ u32 irqst2, irqst3;
+
+ /* Check which DMA just finished */
+ irqst2 = reg_read(dev, CAL_HL_IRQSTATUS(2));
+ if (irqst2) {
+ /* Clear Interrupt status */
+ reg_write(dev, CAL_HL_IRQSTATUS(2), irqst2);
+
+ /* Need to check both port */
+ if (isportirqset(irqst2, 1)) {
+ ctx = dev->ctx[0];
+
+ if (ctx->cur_frm != ctx->next_frm)
+ cal_process_buffer_complete(ctx);
+ }
+
+ if (isportirqset(irqst2, 2)) {
+ ctx = dev->ctx[1];
+
+ if (ctx->cur_frm != ctx->next_frm)
+ cal_process_buffer_complete(ctx);
+ }
+ }
+
+ /* Check which DMA just started */
+ irqst3 = reg_read(dev, CAL_HL_IRQSTATUS(3));
+ if (irqst3) {
+ /* Clear Interrupt status */
+ reg_write(dev, CAL_HL_IRQSTATUS(3), irqst3);
+
+ /* Need to check both port */
+ if (isportirqset(irqst3, 1)) {
+ ctx = dev->ctx[0];
+ dma_q = &ctx->vidq;
+
+ spin_lock(&ctx->slock);
+ if (!list_empty(&dma_q->active) &&
+ ctx->cur_frm == ctx->next_frm)
+ cal_schedule_next_buffer(ctx);
+ spin_unlock(&ctx->slock);
+ }
+
+ if (isportirqset(irqst3, 2)) {
+ ctx = dev->ctx[1];
+ dma_q = &ctx->vidq;
+
+ spin_lock(&ctx->slock);
+ if (!list_empty(&dma_q->active) &&
+ ctx->cur_frm == ctx->next_frm)
+ cal_schedule_next_buffer(ctx);
+ spin_unlock(&ctx->slock);
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * video ioctls
+ */
+static int cal_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ struct cal_ctx *ctx = video_drvdata(file);
+
+ strlcpy(cap->driver, CAL_MODULE_NAME, sizeof(cap->driver));
+ strlcpy(cap->card, CAL_MODULE_NAME, sizeof(cap->card));
+
+ snprintf(cap->bus_info, sizeof(cap->bus_info),
+ "platform:%s", ctx->v4l2_dev.name);
+ cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING |
+ V4L2_CAP_READWRITE;
+ cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
+ return 0;
+}
+
+static int cal_enum_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ struct cal_ctx *ctx = video_drvdata(file);
+ const struct cal_fmt *fmt = NULL;
+
+ if (f->index >= ctx->num_active_fmt)
+ return -EINVAL;
+
+ fmt = ctx->active_fmt[f->index];
+
+ f->pixelformat = fmt->fourcc;
+ f->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ return 0;
+}
+
+static int __subdev_get_format(struct cal_ctx *ctx,
+ struct v4l2_mbus_framefmt *fmt)
+{
+ struct v4l2_subdev_format sd_fmt;
+ struct v4l2_mbus_framefmt *mbus_fmt = &sd_fmt.format;
+ int ret;
+
+ sd_fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+ sd_fmt.pad = 0;
+
+ ret = v4l2_subdev_call(ctx->sensor, pad, get_fmt, NULL, &sd_fmt);
+ if (ret)
+ return ret;
+
+ *fmt = *mbus_fmt;
+
+ ctx_dbg(1, ctx, "%s %dx%d code:%04X\n", __func__,
+ fmt->width, fmt->height, fmt->code);
+
+ return 0;
+}
+
+static int __subdev_set_format(struct cal_ctx *ctx,
+ struct v4l2_mbus_framefmt *fmt)
+{
+ struct v4l2_subdev_format sd_fmt;
+ struct v4l2_mbus_framefmt *mbus_fmt = &sd_fmt.format;
+ int ret;
+
+ sd_fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+ sd_fmt.pad = 0;
+ *mbus_fmt = *fmt;
+
+ ret = v4l2_subdev_call(ctx->sensor, pad, set_fmt, NULL, &sd_fmt);
+ if (ret)
+ return ret;
+
+ ctx_dbg(1, ctx, "%s %dx%d code:%04X\n", __func__,
+ fmt->width, fmt->height, fmt->code);
+
+ return 0;
+}
+
+static int cal_calc_format_size(struct cal_ctx *ctx,
+ const struct cal_fmt *fmt,
+ struct v4l2_format *f)
+{
+ if (!fmt) {
+ ctx_dbg(3, ctx, "No cal_fmt provided!\n");
+ return -EINVAL;
+ }
+
+ v4l_bound_align_image(&f->fmt.pix.width, 48, MAX_WIDTH, 2,
+ &f->fmt.pix.height, 32, MAX_HEIGHT, 0, 0);
+ f->fmt.pix.bytesperline = bytes_per_line(f->fmt.pix.width,
+ fmt->depth >> 3);
+ f->fmt.pix.sizeimage = f->fmt.pix.height *
+ f->fmt.pix.bytesperline;
+
+ ctx_dbg(3, ctx, "%s: fourcc: %s size: %dx%d bpl:%d img_size:%d\n",
+ __func__, fourcc_to_str(f->fmt.pix.pixelformat),
+ f->fmt.pix.width, f->fmt.pix.height,
+ f->fmt.pix.bytesperline, f->fmt.pix.sizeimage);
+
+ return 0;
+}
+
+static int cal_g_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct cal_ctx *ctx = video_drvdata(file);
+
+ *f = ctx->v_fmt;
+
+ return 0;
+}
+
+static int cal_try_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct cal_ctx *ctx = video_drvdata(file);
+ const struct cal_fmt *fmt;
+ struct v4l2_subdev_frame_size_enum fse;
+ int ret, found;
+
+ fmt = find_format_by_pix(ctx, f->fmt.pix.pixelformat);
+ if (!fmt) {
+ ctx_dbg(3, ctx, "Fourcc format (0x%08x) not found.\n",
+ f->fmt.pix.pixelformat);
+
+ /* Just get the first one enumerated */
+ fmt = ctx->active_fmt[0];
+ f->fmt.pix.pixelformat = fmt->fourcc;
+ }
+
+ f->fmt.pix.field = ctx->v_fmt.fmt.pix.field;
+
+ /* check for/find a valid width/height */
+ ret = 0;
+ found = false;
+ fse.pad = 0;
+ fse.code = fmt->code;
+ fse.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+ for (fse.index = 0; ; fse.index++) {
+ ret = v4l2_subdev_call(ctx->sensor, pad, enum_frame_size,
+ NULL, &fse);
+ if (ret)
+ break;
+
+ if ((f->fmt.pix.width == fse.max_width) &&
+ (f->fmt.pix.height == fse.max_height)) {
+ found = true;
+ break;
+ } else if ((f->fmt.pix.width >= fse.min_width) &&
+ (f->fmt.pix.width <= fse.max_width) &&
+ (f->fmt.pix.height >= fse.min_height) &&
+ (f->fmt.pix.height <= fse.max_height)) {
+ found = true;
+ break;
+ }
+ }
+
+ if (!found) {
+ /* use existing values as default */
+ f->fmt.pix.width = ctx->v_fmt.fmt.pix.width;
+ f->fmt.pix.height = ctx->v_fmt.fmt.pix.height;
+ }
+
+ /*
+ * Use current colorspace for now, it will get
+ * updated properly during s_fmt
+ */
+ f->fmt.pix.colorspace = ctx->v_fmt.fmt.pix.colorspace;
+ return cal_calc_format_size(ctx, fmt, f);
+}
+
+static int cal_s_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct cal_ctx *ctx = video_drvdata(file);
+ struct vb2_queue *q = &ctx->vb_vidq;
+ const struct cal_fmt *fmt;
+ struct v4l2_mbus_framefmt mbus_fmt;
+ int ret;
+
+ if (vb2_is_busy(q)) {
+ ctx_dbg(3, ctx, "%s device busy\n", __func__);
+ return -EBUSY;
+ }
+
+ ret = cal_try_fmt_vid_cap(file, priv, f);
+ if (ret < 0)
+ return ret;
+
+ fmt = find_format_by_pix(ctx, f->fmt.pix.pixelformat);
+
+ v4l2_fill_mbus_format(&mbus_fmt, &f->fmt.pix, fmt->code);
+
+ ret = __subdev_set_format(ctx, &mbus_fmt);
+ if (ret)
+ return ret;
+
+ /* Just double check nothing has gone wrong */
+ if (mbus_fmt.code != fmt->code) {
+ ctx_dbg(3, ctx,
+ "%s subdev changed format on us, this should not happen\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ v4l2_fill_pix_format(&ctx->v_fmt.fmt.pix, &mbus_fmt);
+ ctx->v_fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ ctx->v_fmt.fmt.pix.pixelformat = fmt->fourcc;
+ cal_calc_format_size(ctx, fmt, &ctx->v_fmt);
+ ctx->fmt = fmt;
+ ctx->m_fmt = mbus_fmt;
+ *f = ctx->v_fmt;
+
+ return 0;
+}
+
+static int cal_enum_framesizes(struct file *file, void *fh,
+ struct v4l2_frmsizeenum *fsize)
+{
+ struct cal_ctx *ctx = video_drvdata(file);
+ const struct cal_fmt *fmt;
+ struct v4l2_subdev_frame_size_enum fse;
+ int ret;
+
+ /* check for valid format */
+ fmt = find_format_by_pix(ctx, fsize->pixel_format);
+ if (!fmt) {
+ ctx_dbg(3, ctx, "Invalid pixel code: %x\n",
+ fsize->pixel_format);
+ return -EINVAL;
+ }
+
+ fse.index = fsize->index;
+ fse.pad = 0;
+ fse.code = fmt->code;
+
+ ret = v4l2_subdev_call(ctx->sensor, pad, enum_frame_size, NULL, &fse);
+ if (ret)
+ return ret;
+
+ ctx_dbg(1, ctx, "%s: index: %d code: %x W:[%d,%d] H:[%d,%d]\n",
+ __func__, fse.index, fse.code, fse.min_width, fse.max_width,
+ fse.min_height, fse.max_height);
+
+ fsize->type = V4L2_FRMSIZE_TYPE_DISCRETE;
+ fsize->discrete.width = fse.max_width;
+ fsize->discrete.height = fse.max_height;
+
+ return 0;
+}
+
+static int cal_enum_input(struct file *file, void *priv,
+ struct v4l2_input *inp)
+{
+ if (inp->index >= CAL_NUM_INPUT)
+ return -EINVAL;
+
+ inp->type = V4L2_INPUT_TYPE_CAMERA;
+ sprintf(inp->name, "Camera %u", inp->index);
+ return 0;
+}
+
+static int cal_g_input(struct file *file, void *priv, unsigned int *i)
+{
+ struct cal_ctx *ctx = video_drvdata(file);
+
+ *i = ctx->input;
+ return 0;
+}
+
+static int cal_s_input(struct file *file, void *priv, unsigned int i)
+{
+ struct cal_ctx *ctx = video_drvdata(file);
+
+ if (i >= CAL_NUM_INPUT)
+ return -EINVAL;
+
+ ctx->input = i;
+ return 0;
+}
+
+/* timeperframe is arbitrary and continuous */
+static int cal_enum_frameintervals(struct file *file, void *priv,
+ struct v4l2_frmivalenum *fival)
+{
+ struct cal_ctx *ctx = video_drvdata(file);
+ const struct cal_fmt *fmt;
+ struct v4l2_subdev_frame_interval_enum fie = {
+ .index = fival->index,
+ .width = fival->width,
+ .height = fival->height,
+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
+ };
+ int ret;
+
+ fmt = find_format_by_pix(ctx, fival->pixel_format);
+ if (!fmt)
+ return -EINVAL;
+
+ fie.code = fmt->code;
+ ret = v4l2_subdev_call(ctx->sensor, pad, enum_frame_interval,
+ NULL, &fie);
+ if (ret)
+ return ret;
+ fival->type = V4L2_FRMIVAL_TYPE_DISCRETE;
+ fival->discrete = fie.interval;
+
+ return 0;
+}
+
+/*
+ * Videobuf operations
+ */
+static int cal_queue_setup(struct vb2_queue *vq,
+ unsigned int *nbuffers, unsigned int *nplanes,
+ unsigned int sizes[], void *alloc_ctxs[])
+{
+ struct cal_ctx *ctx = vb2_get_drv_priv(vq);
+ unsigned size = ctx->v_fmt.fmt.pix.sizeimage;
+
+ if (vq->num_buffers + *nbuffers < 3)
+ *nbuffers = 3 - vq->num_buffers;
+ alloc_ctxs[0] = ctx->alloc_ctx;
+
+ if (*nplanes) {
+ if (sizes[0] < size)
+ return -EINVAL;
+ size = sizes[0];
+ }
+
+ *nplanes = 1;
+ sizes[0] = size;
+
+ ctx_dbg(3, ctx, "nbuffers=%d, size=%d\n", *nbuffers, sizes[0]);
+
+ return 0;
+}
+
+static int cal_buffer_prepare(struct vb2_buffer *vb)
+{
+ struct cal_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+ struct cal_buffer *buf = container_of(vb, struct cal_buffer,
+ vb.vb2_buf);
+ unsigned long size;
+
+ if (WARN_ON(!ctx->fmt))
+ return -EINVAL;
+
+ size = ctx->v_fmt.fmt.pix.sizeimage;
+ if (vb2_plane_size(vb, 0) < size) {
+ ctx_err(ctx,
+ "data will not fit into plane (%lu < %lu)\n",
+ vb2_plane_size(vb, 0), size);
+ return -EINVAL;
+ }
+
+ vb2_set_plane_payload(&buf->vb.vb2_buf, 0, size);
+ return 0;
+}
+
+static void cal_buffer_queue(struct vb2_buffer *vb)
+{
+ struct cal_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+ struct cal_buffer *buf = container_of(vb, struct cal_buffer,
+ vb.vb2_buf);
+ struct cal_dmaqueue *vidq = &ctx->vidq;
+ unsigned long flags = 0;
+
+ /* recheck locking */
+ spin_lock_irqsave(&ctx->slock, flags);
+ list_add_tail(&buf->list, &vidq->active);
+ spin_unlock_irqrestore(&ctx->slock, flags);
+}
+
+static int cal_start_streaming(struct vb2_queue *vq, unsigned int count)
+{
+ struct cal_ctx *ctx = vb2_get_drv_priv(vq);
+ struct cal_dmaqueue *dma_q = &ctx->vidq;
+ struct cal_buffer *buf, *tmp;
+ unsigned long addr = 0;
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&ctx->slock, flags);
+ if (list_empty(&dma_q->active)) {
+ spin_unlock_irqrestore(&ctx->slock, flags);
+ ctx_dbg(3, ctx, "buffer queue is empty\n");
+ return -EIO;
+ }
+
+ buf = list_entry(dma_q->active.next, struct cal_buffer, list);
+ ctx->cur_frm = buf;
+ ctx->next_frm = buf;
+ list_del(&buf->list);
+ spin_unlock_irqrestore(&ctx->slock, flags);
+
+ addr = vb2_dma_contig_plane_dma_addr(&ctx->cur_frm->vb.vb2_buf, 0);
+ ctx->sequence = 0;
+
+ ret = cal_get_external_info(ctx);
+ if (ret < 0)
+ goto err;
+
+ cal_runtime_get(ctx->dev);
+
+ enable_irqs(ctx);
+ camerarx_phy_enable(ctx);
+ csi2_init(ctx);
+ csi2_phy_config(ctx);
+ csi2_lane_config(ctx);
+ csi2_ctx_config(ctx);
+ pix_proc_config(ctx);
+ cal_wr_dma_config(ctx, ctx->v_fmt.fmt.pix.bytesperline);
+ cal_wr_dma_addr(ctx, addr);
+ csi2_ppi_enable(ctx);
+
+ ret = v4l2_subdev_call(ctx->sensor, video, s_stream, 1);
+ if (ret) {
+ ctx_err(ctx, "stream on failed in subdev\n");
+ cal_runtime_put(ctx->dev);
+ goto err;
+ }
+
+ if (debug >= 4)
+ cal_quickdump_regs(ctx->dev);
+
+ return 0;
+
+err:
+ list_for_each_entry_safe(buf, tmp, &dma_q->active, list) {
+ list_del(&buf->list);
+ vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_QUEUED);
+ }
+ return ret;
+}
+
+static void cal_stop_streaming(struct vb2_queue *vq)
+{
+ struct cal_ctx *ctx = vb2_get_drv_priv(vq);
+ struct cal_dmaqueue *dma_q = &ctx->vidq;
+ struct cal_buffer *buf, *tmp;
+ unsigned long flags;
+
+ if (v4l2_subdev_call(ctx->sensor, video, s_stream, 0))
+ ctx_err(ctx, "stream off failed in subdev\n");
+
+ csi2_ppi_disable(ctx);
+ disable_irqs(ctx);
+
+ /* Release all active buffers */
+ spin_lock_irqsave(&ctx->slock, flags);
+ list_for_each_entry_safe(buf, tmp, &dma_q->active, list) {
+ list_del(&buf->list);
+ vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
+ }
+
+ if (ctx->cur_frm == ctx->next_frm) {
+ vb2_buffer_done(&ctx->cur_frm->vb.vb2_buf, VB2_BUF_STATE_ERROR);
+ } else {
+ vb2_buffer_done(&ctx->cur_frm->vb.vb2_buf, VB2_BUF_STATE_ERROR);
+ vb2_buffer_done(&ctx->next_frm->vb.vb2_buf,
+ VB2_BUF_STATE_ERROR);
+ }
+ ctx->cur_frm = NULL;
+ ctx->next_frm = NULL;
+ spin_unlock_irqrestore(&ctx->slock, flags);
+
+ cal_runtime_put(ctx->dev);
+}
+
+static struct vb2_ops cal_video_qops = {
+ .queue_setup = cal_queue_setup,
+ .buf_prepare = cal_buffer_prepare,
+ .buf_queue = cal_buffer_queue,
+ .start_streaming = cal_start_streaming,
+ .stop_streaming = cal_stop_streaming,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+};
+
+static const struct v4l2_file_operations cal_fops = {
+ .owner = THIS_MODULE,
+ .open = v4l2_fh_open,
+ .release = vb2_fop_release,
+ .read = vb2_fop_read,
+ .poll = vb2_fop_poll,
+ .unlocked_ioctl = video_ioctl2, /* V4L2 ioctl handler */
+ .mmap = vb2_fop_mmap,
+};
+
+static const struct v4l2_ioctl_ops cal_ioctl_ops = {
+ .vidioc_querycap = cal_querycap,
+ .vidioc_enum_fmt_vid_cap = cal_enum_fmt_vid_cap,
+ .vidioc_g_fmt_vid_cap = cal_g_fmt_vid_cap,
+ .vidioc_try_fmt_vid_cap = cal_try_fmt_vid_cap,
+ .vidioc_s_fmt_vid_cap = cal_s_fmt_vid_cap,
+ .vidioc_enum_framesizes = cal_enum_framesizes,
+ .vidioc_reqbufs = vb2_ioctl_reqbufs,
+ .vidioc_create_bufs = vb2_ioctl_create_bufs,
+ .vidioc_prepare_buf = vb2_ioctl_prepare_buf,
+ .vidioc_querybuf = vb2_ioctl_querybuf,
+ .vidioc_qbuf = vb2_ioctl_qbuf,
+ .vidioc_dqbuf = vb2_ioctl_dqbuf,
+ .vidioc_enum_input = cal_enum_input,
+ .vidioc_g_input = cal_g_input,
+ .vidioc_s_input = cal_s_input,
+ .vidioc_enum_frameintervals = cal_enum_frameintervals,
+ .vidioc_streamon = vb2_ioctl_streamon,
+ .vidioc_streamoff = vb2_ioctl_streamoff,
+ .vidioc_log_status = v4l2_ctrl_log_status,
+ .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+};
+
+static struct video_device cal_videodev = {
+ .name = CAL_MODULE_NAME,
+ .fops = &cal_fops,
+ .ioctl_ops = &cal_ioctl_ops,
+ .minor = -1,
+ .release = video_device_release_empty,
+};
+
+/* -----------------------------------------------------------------
+ * Initialization and module stuff
+ * ------------------------------------------------------------------
+ */
+static int cal_complete_ctx(struct cal_ctx *ctx);
+
+static int cal_async_bound(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *subdev,
+ struct v4l2_async_subdev *asd)
+{
+ struct cal_ctx *ctx = notifier_to_ctx(notifier);
+ struct v4l2_subdev_mbus_code_enum mbus_code;
+ int ret = 0;
+ int i, j, k;
+
+ if (ctx->sensor) {
+ ctx_info(ctx, "Rejecting subdev %s (Already set!!)",
+ subdev->name);
+ return 0;
+ }
+
+ ctx->sensor = subdev;
+ ctx_dbg(1, ctx, "Using sensor %s for capture\n", subdev->name);
+
+ /* Enumerate sub device formats and enable all matching local formats */
+ ctx->num_active_fmt = 0;
+ for (j = 0, i = 0; ret != -EINVAL; ++j) {
+ struct cal_fmt *fmt;
+
+ memset(&mbus_code, 0, sizeof(mbus_code));
+ mbus_code.index = j;
+ ret = v4l2_subdev_call(subdev, pad, enum_mbus_code,
+ NULL, &mbus_code);
+ if (ret)
+ continue;
+
+ ctx_dbg(2, ctx,
+ "subdev %s: code: %04x idx: %d\n",
+ subdev->name, mbus_code.code, j);
+
+ for (k = 0; k < ARRAY_SIZE(cal_formats); k++) {
+ fmt = &cal_formats[k];
+
+ if (mbus_code.code == fmt->code) {
+ ctx->active_fmt[i] = fmt;
+ ctx_dbg(2, ctx,
+ "matched fourcc: %s: code: %04x idx: %d\n",
+ fourcc_to_str(fmt->fourcc),
+ fmt->code, i);
+ ctx->num_active_fmt = ++i;
+ }
+ }
+ }
+
+ if (i == 0) {
+ ctx_err(ctx, "No suitable format reported by subdev %s\n",
+ subdev->name);
+ return -EINVAL;
+ }
+
+ cal_complete_ctx(ctx);
+
+ return 0;
+}
+
+static int cal_async_complete(struct v4l2_async_notifier *notifier)
+{
+ struct cal_ctx *ctx = notifier_to_ctx(notifier);
+ const struct cal_fmt *fmt;
+ struct v4l2_mbus_framefmt mbus_fmt;
+ int ret;
+
+ ret = __subdev_get_format(ctx, &mbus_fmt);
+ if (ret)
+ return ret;
+
+ fmt = find_format_by_code(ctx, mbus_fmt.code);
+ if (!fmt) {
+ ctx_dbg(3, ctx, "mbus code format (0x%08x) not found.\n",
+ mbus_fmt.code);
+ return -EINVAL;
+ }
+
+ /* Save current subdev format */
+ v4l2_fill_pix_format(&ctx->v_fmt.fmt.pix, &mbus_fmt);
+ ctx->v_fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ ctx->v_fmt.fmt.pix.pixelformat = fmt->fourcc;
+ cal_calc_format_size(ctx, fmt, &ctx->v_fmt);
+ ctx->fmt = fmt;
+ ctx->m_fmt = mbus_fmt;
+
+ return 0;
+}
+
+static int cal_complete_ctx(struct cal_ctx *ctx)
+{
+ struct video_device *vfd;
+ struct vb2_queue *q;
+ int ret;
+
+ ctx->timeperframe = tpf_default;
+ ctx->external_rate = 192000000;
+
+ /* initialize locks */
+ spin_lock_init(&ctx->slock);
+ mutex_init(&ctx->mutex);
+
+ /* initialize queue */
+ q = &ctx->vb_vidq;
+ q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ q->io_modes = VB2_MMAP | VB2_DMABUF | VB2_READ;
+ q->drv_priv = ctx;
+ q->buf_struct_size = sizeof(struct cal_buffer);
+ q->ops = &cal_video_qops;
+ q->mem_ops = &vb2_dma_contig_memops;
+ q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ q->lock = &ctx->mutex;
+ q->min_buffers_needed = 3;
+
+ ret = vb2_queue_init(q);
+ if (ret)
+ return ret;
+
+ /* init video dma queues */
+ INIT_LIST_HEAD(&ctx->vidq.active);
+
+ vfd = &ctx->vdev;
+ *vfd = cal_videodev;
+ vfd->v4l2_dev = &ctx->v4l2_dev;
+ vfd->queue = q;
+
+ /*
+ * Provide a mutex to v4l2 core. It will be used to protect
+ * all fops and v4l2 ioctls.
+ */
+ vfd->lock = &ctx->mutex;
+ video_set_drvdata(vfd, ctx);
+
+ ret = video_register_device(vfd, VFL_TYPE_GRABBER, video_nr);
+ if (ret < 0)
+ return ret;
+
+ v4l2_info(&ctx->v4l2_dev, "V4L2 device registered as %s\n",
+ video_device_node_name(vfd));
+
+ ctx->alloc_ctx = vb2_dma_contig_init_ctx(vfd->v4l2_dev->dev);
+ if (IS_ERR(ctx->alloc_ctx)) {
+ ctx_err(ctx, "Failed to alloc vb2 context\n");
+ ret = PTR_ERR(ctx->alloc_ctx);
+ goto vdev_unreg;
+ }
+
+ return 0;
+
+vdev_unreg:
+ video_unregister_device(vfd);
+ return ret;
+}
+
+static struct device_node *
+of_get_next_port(const struct device_node *parent,
+ struct device_node *prev)
+{
+ struct device_node *port = NULL;
+
+ if (!parent)
+ return NULL;
+
+ if (!prev) {
+ struct device_node *ports;
+ /*
+ * It's the first call, we have to find a port subnode
+ * within this node or within an optional 'ports' node.
+ */
+ ports = of_get_child_by_name(parent, "ports");
+ if (ports)
+ parent = ports;
+
+ port = of_get_child_by_name(parent, "port");
+
+ /* release the 'ports' node */
+ of_node_put(ports);
+ } else {
+ struct device_node *ports;
+
+ ports = of_get_parent(prev);
+ if (!ports)
+ return NULL;
+
+ do {
+ port = of_get_next_child(ports, prev);
+ if (!port) {
+ of_node_put(ports);
+ return NULL;
+ }
+ prev = port;
+ } while (of_node_cmp(port->name, "port") != 0);
+ }
+
+ return port;
+}
+
+static struct device_node *
+of_get_next_endpoint(const struct device_node *parent,
+ struct device_node *prev)
+{
+ struct device_node *ep = NULL;
+
+ if (!parent)
+ return NULL;
+
+ do {
+ ep = of_get_next_child(parent, prev);
+ if (!ep)
+ return NULL;
+ prev = ep;
+ } while (of_node_cmp(ep->name, "endpoint") != 0);
+
+ return ep;
+}
+
+static int of_cal_create_instance(struct cal_ctx *ctx, int inst)
+{
+ struct platform_device *pdev = ctx->dev->pdev;
+ struct device_node *ep_node, *port, *remote_ep,
+ *sensor_node, *parent;
+ struct v4l2_of_endpoint *endpoint;
+ struct v4l2_async_subdev *asd;
+ u32 regval = 0;
+ int ret, index, found_port = 0, lane;
+
+ parent = pdev->dev.of_node;
+
+ asd = &ctx->asd;
+ endpoint = &ctx->endpoint;
+
+ ep_node = NULL;
+ port = NULL;
+ remote_ep = NULL;
+ sensor_node = NULL;
+ ret = -EINVAL;
+
+ ctx_dbg(3, ctx, "Scanning Port node for csi2 port: %d\n", inst);
+ for (index = 0; index < CAL_NUM_CSI2_PORTS; index++) {
+ port = of_get_next_port(parent, port);
+ if (!port) {
+ ctx_dbg(1, ctx, "No port node found for csi2 port:%d\n",
+ index);
+ goto cleanup_exit;
+ }
+
+ /* Match the slice number with <REG> */
+ of_property_read_u32(port, "reg", &regval);
+ ctx_dbg(3, ctx, "port:%d inst:%d <reg>:%d\n",
+ index, inst, regval);
+ if ((regval == inst) && (index == inst)) {
+ found_port = 1;
+ break;
+ }
+ }
+
+ if (!found_port) {
+ ctx_dbg(1, ctx, "No port node matches csi2 port:%d\n",
+ inst);
+ goto cleanup_exit;
+ }
+
+ ctx_dbg(3, ctx, "Scanning sub-device for csi2 port: %d\n",
+ inst);
+
+ ep_node = of_get_next_endpoint(port, ep_node);
+ if (!ep_node) {
+ ctx_dbg(3, ctx, "can't get next endpoint\n");
+ goto cleanup_exit;
+ }
+
+ sensor_node = of_graph_get_remote_port_parent(ep_node);
+ if (!sensor_node) {
+ ctx_dbg(3, ctx, "can't get remote parent\n");
+ goto cleanup_exit;
+ }
+ asd->match_type = V4L2_ASYNC_MATCH_OF;
+ asd->match.of.node = sensor_node;
+
+ remote_ep = of_parse_phandle(ep_node, "remote-endpoint", 0);
+ if (!remote_ep) {
+ ctx_dbg(3, ctx, "can't get remote-endpoint\n");
+ goto cleanup_exit;
+ }
+ v4l2_of_parse_endpoint(remote_ep, endpoint);
+
+ if (endpoint->bus_type != V4L2_MBUS_CSI2) {
+ ctx_err(ctx, "Port:%d sub-device %s is not a CSI2 device\n",
+ inst, sensor_node->name);
+ goto cleanup_exit;
+ }
+
+ /* Store Virtual Channel number */
+ ctx->virtual_channel = endpoint->base.id;
+
+ ctx_dbg(3, ctx, "Port:%d v4l2-endpoint: CSI2\n", inst);
+ ctx_dbg(3, ctx, "Virtual Channel=%d\n", ctx->virtual_channel);
+ ctx_dbg(3, ctx, "flags=0x%08x\n", endpoint->bus.mipi_csi2.flags);
+ ctx_dbg(3, ctx, "clock_lane=%d\n", endpoint->bus.mipi_csi2.clock_lane);
+ ctx_dbg(3, ctx, "num_data_lanes=%d\n",
+ endpoint->bus.mipi_csi2.num_data_lanes);
+ ctx_dbg(3, ctx, "data_lanes= <\n");
+ for (lane = 0; lane < endpoint->bus.mipi_csi2.num_data_lanes; lane++)
+ ctx_dbg(3, ctx, "\t%d\n",
+ endpoint->bus.mipi_csi2.data_lanes[lane]);
+ ctx_dbg(3, ctx, "\t>\n");
+
+ ctx_dbg(1, ctx, "Port: %d found sub-device %s\n",
+ inst, sensor_node->name);
+
+ ctx->asd_list[0] = asd;
+ ctx->notifier.subdevs = ctx->asd_list;
+ ctx->notifier.num_subdevs = 1;
+ ctx->notifier.bound = cal_async_bound;
+ ctx->notifier.complete = cal_async_complete;
+ ret = v4l2_async_notifier_register(&ctx->v4l2_dev,
+ &ctx->notifier);
+ if (ret) {
+ ctx_err(ctx, "Error registering async notifier\n");
+ ret = -EINVAL;
+ }
+
+cleanup_exit:
+ if (!remote_ep)
+ of_node_put(remote_ep);
+ if (!sensor_node)
+ of_node_put(sensor_node);
+ if (!ep_node)
+ of_node_put(ep_node);
+ if (!port)
+ of_node_put(port);
+
+ return ret;
+}
+
+static struct cal_ctx *cal_create_instance(struct cal_dev *dev, int inst)
+{
+ struct cal_ctx *ctx;
+ struct v4l2_ctrl_handler *hdl;
+ int ret;
+
+ ctx = devm_kzalloc(&dev->pdev->dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return NULL;
+
+ /* save the cal_dev * for future ref */
+ ctx->dev = dev;
+
+ snprintf(ctx->v4l2_dev.name, sizeof(ctx->v4l2_dev.name),
+ "%s-%03d", CAL_MODULE_NAME, inst);
+ ret = v4l2_device_register(&dev->pdev->dev, &ctx->v4l2_dev);
+ if (ret)
+ goto err_exit;
+
+ hdl = &ctx->ctrl_handler;
+ ret = v4l2_ctrl_handler_init(hdl, 11);
+ if (ret) {
+ ctx_err(ctx, "Failed to init ctrl handler\n");
+ goto unreg_dev;
+ }
+ ctx->v4l2_dev.ctrl_handler = hdl;
+
+ /* Make sure Camera Core H/W register area is available */
+ ctx->cc = dev->cc[inst];
+
+ /* Store the instance id */
+ ctx->csi2_port = inst + 1;
+
+ ret = of_cal_create_instance(ctx, inst);
+ if (ret) {
+ ret = -EINVAL;
+ goto free_hdl;
+ }
+ return ctx;
+
+free_hdl:
+ v4l2_ctrl_handler_free(hdl);
+unreg_dev:
+ v4l2_device_unregister(&ctx->v4l2_dev);
+err_exit:
+ return NULL;
+}
+
+static int cal_probe(struct platform_device *pdev)
+{
+ struct cal_dev *dev;
+ int ret;
+ int irq;
+
+ dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return -ENOMEM;
+
+ /* set pseudo v4l2 device name so we can use v4l2_printk */
+ strlcpy(dev->v4l2_dev.name, CAL_MODULE_NAME,
+ sizeof(dev->v4l2_dev.name));
+
+ /* save pdev pointer */
+ dev->pdev = pdev;
+
+ dev->res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "cal_top");
+ dev->base = devm_ioremap_resource(&pdev->dev, dev->res);
+ if (IS_ERR(dev->base))
+ return PTR_ERR(dev->base);
+
+ cal_dbg(1, dev, "ioresource %s at %pa - %pa\n",
+ dev->res->name, &dev->res->start, &dev->res->end);
+
+ irq = platform_get_irq(pdev, 0);
+ cal_dbg(1, dev, "got irq# %d\n", irq);
+ ret = devm_request_irq(&pdev->dev, irq, cal_irq, 0, CAL_MODULE_NAME,
+ dev);
+ if (ret)
+ return ret;
+
+ platform_set_drvdata(pdev, dev);
+
+ dev->cm = cm_create(dev);
+ if (IS_ERR(dev->cm))
+ return PTR_ERR(dev->cm);
+
+ dev->cc[0] = cc_create(dev, 0);
+ if (IS_ERR(dev->cc[0]))
+ return PTR_ERR(dev->cc[0]);
+
+ dev->cc[1] = cc_create(dev, 1);
+ if (IS_ERR(dev->cc[1]))
+ return PTR_ERR(dev->cc[1]);
+
+ dev->ctx[0] = NULL;
+ dev->ctx[1] = NULL;
+
+ dev->ctx[0] = cal_create_instance(dev, 0);
+ dev->ctx[1] = cal_create_instance(dev, 1);
+ if (!dev->ctx[0] && !dev->ctx[1]) {
+ cal_err(dev, "Neither port is configured, no point in staying up\n");
+ return -ENODEV;
+ }
+
+ pm_runtime_enable(&pdev->dev);
+
+ ret = cal_runtime_get(dev);
+ if (ret)
+ goto runtime_disable;
+
+ /* Just check we can actually access the module */
+ cal_get_hwinfo(dev);
+
+ cal_runtime_put(dev);
+
+ return 0;
+
+runtime_disable:
+ pm_runtime_disable(&pdev->dev);
+ return ret;
+}
+
+static int cal_remove(struct platform_device *pdev)
+{
+ struct cal_dev *dev =
+ (struct cal_dev *)platform_get_drvdata(pdev);
+ struct cal_ctx *ctx;
+ int i;
+
+ cal_dbg(1, dev, "Removing %s\n", CAL_MODULE_NAME);
+
+ cal_runtime_get(dev);
+
+ for (i = 0; i < CAL_NUM_CONTEXT; i++) {
+ ctx = dev->ctx[i];
+ if (ctx) {
+ ctx_dbg(1, ctx, "unregistering %s\n",
+ video_device_node_name(&ctx->vdev));
+ camerarx_phy_disable(ctx);
+ v4l2_async_notifier_unregister(&ctx->notifier);
+ vb2_dma_contig_cleanup_ctx(ctx->alloc_ctx);
+ v4l2_ctrl_handler_free(&ctx->ctrl_handler);
+ v4l2_device_unregister(&ctx->v4l2_dev);
+ video_unregister_device(&ctx->vdev);
+ }
+ }
+
+ cal_runtime_put(dev);
+ pm_runtime_disable(&pdev->dev);
+
+ return 0;
+}
+
+#if defined(CONFIG_OF)
+static const struct of_device_id cal_of_match[] = {
+ { .compatible = "ti,dra72-cal", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, cal_of_match);
+#endif
+
+static struct platform_driver cal_pdrv = {
+ .probe = cal_probe,
+ .remove = cal_remove,
+ .driver = {
+ .name = CAL_MODULE_NAME,
+ .of_match_table = of_match_ptr(cal_of_match),
+ },
+};
+
+module_platform_driver(cal_pdrv);
diff --git a/drivers/media/platform/ti-vpe/cal_regs.h b/drivers/media/platform/ti-vpe/cal_regs.h
new file mode 100644
index 000000000000..82b3dcf87128
--- /dev/null
+++ b/drivers/media/platform/ti-vpe/cal_regs.h
@@ -0,0 +1,479 @@
+/*
+ * TI CAL camera interface driver
+ *
+ * Copyright (c) 2015 Texas Instruments Inc.
+ *
+ * Benoit Parrot, <bparrot@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#ifndef __TI_CAL_REGS_H
+#define __TI_CAL_REGS_H
+
+#define CAL_NUM_CSI2_PORTS 2
+
+/* CAL register offsets */
+
+#define CAL_HL_REVISION 0x0000
+#define CAL_HL_HWINFO 0x0004
+#define CAL_HL_SYSCONFIG 0x0010
+#define CAL_HL_IRQ_EOI 0x001c
+#define CAL_HL_IRQSTATUS_RAW(m) (0x20U + ((m-1) * 0x10U))
+#define CAL_HL_IRQSTATUS(m) (0x24U + ((m-1) * 0x10U))
+#define CAL_HL_IRQENABLE_SET(m) (0x28U + ((m-1) * 0x10U))
+#define CAL_HL_IRQENABLE_CLR(m) (0x2cU + ((m-1) * 0x10U))
+#define CAL_PIX_PROC(m) (0xc0U + ((m-1) * 0x4U))
+#define CAL_CTRL 0x100
+#define CAL_CTRL1 0x104
+#define CAL_LINE_NUMBER_EVT 0x108
+#define CAL_VPORT_CTRL1 0x120
+#define CAL_VPORT_CTRL2 0x124
+#define CAL_BYS_CTRL1 0x130
+#define CAL_BYS_CTRL2 0x134
+#define CAL_RD_DMA_CTRL 0x140
+#define CAL_RD_DMA_PIX_ADDR 0x144
+#define CAL_RD_DMA_PIX_OFST 0x148
+#define CAL_RD_DMA_XSIZE 0x14c
+#define CAL_RD_DMA_YSIZE 0x150
+#define CAL_RD_DMA_INIT_ADDR 0x154
+#define CAL_RD_DMA_INIT_OFST 0x168
+#define CAL_RD_DMA_CTRL2 0x16c
+#define CAL_WR_DMA_CTRL(m) (0x200U + ((m-1) * 0x10U))
+#define CAL_WR_DMA_ADDR(m) (0x204U + ((m-1) * 0x10U))
+#define CAL_WR_DMA_OFST(m) (0x208U + ((m-1) * 0x10U))
+#define CAL_WR_DMA_XSIZE(m) (0x20cU + ((m-1) * 0x10U))
+#define CAL_CSI2_PPI_CTRL(m) (0x300U + ((m-1) * 0x80U))
+#define CAL_CSI2_COMPLEXIO_CFG(m) (0x304U + ((m-1) * 0x80U))
+#define CAL_CSI2_COMPLEXIO_IRQSTATUS(m) (0x308U + ((m-1) * 0x80U))
+#define CAL_CSI2_SHORT_PACKET(m) (0x30cU + ((m-1) * 0x80U))
+#define CAL_CSI2_COMPLEXIO_IRQENABLE(m) (0x310U + ((m-1) * 0x80U))
+#define CAL_CSI2_TIMING(m) (0x314U + ((m-1) * 0x80U))
+#define CAL_CSI2_VC_IRQENABLE(m) (0x318U + ((m-1) * 0x80U))
+#define CAL_CSI2_VC_IRQSTATUS(m) (0x328U + ((m-1) * 0x80U))
+#define CAL_CSI2_CTX0(m) (0x330U + ((m-1) * 0x80U))
+#define CAL_CSI2_CTX1(m) (0x334U + ((m-1) * 0x80U))
+#define CAL_CSI2_CTX2(m) (0x338U + ((m-1) * 0x80U))
+#define CAL_CSI2_CTX3(m) (0x33cU + ((m-1) * 0x80U))
+#define CAL_CSI2_CTX4(m) (0x340U + ((m-1) * 0x80U))
+#define CAL_CSI2_CTX5(m) (0x344U + ((m-1) * 0x80U))
+#define CAL_CSI2_CTX6(m) (0x348U + ((m-1) * 0x80U))
+#define CAL_CSI2_CTX7(m) (0x34cU + ((m-1) * 0x80U))
+#define CAL_CSI2_STATUS0(m) (0x350U + ((m-1) * 0x80U))
+#define CAL_CSI2_STATUS1(m) (0x354U + ((m-1) * 0x80U))
+#define CAL_CSI2_STATUS2(m) (0x358U + ((m-1) * 0x80U))
+#define CAL_CSI2_STATUS3(m) (0x35cU + ((m-1) * 0x80U))
+#define CAL_CSI2_STATUS4(m) (0x360U + ((m-1) * 0x80U))
+#define CAL_CSI2_STATUS5(m) (0x364U + ((m-1) * 0x80U))
+#define CAL_CSI2_STATUS6(m) (0x368U + ((m-1) * 0x80U))
+#define CAL_CSI2_STATUS7(m) (0x36cU + ((m-1) * 0x80U))
+
+/* CAL CSI2 PHY register offsets */
+#define CAL_CSI2_PHY_REG0 0x000
+#define CAL_CSI2_PHY_REG1 0x004
+#define CAL_CSI2_PHY_REG2 0x008
+
+/* CAL Control Module Core Camerrx Control register offsets */
+#define CM_CTRL_CORE_CAMERRX_CONTROL 0x000
+
+/*********************************************************************
+* Generic value used in various field below
+*********************************************************************/
+
+#define CAL_GEN_DISABLE 0
+#define CAL_GEN_ENABLE 1
+#define CAL_GEN_FALSE 0
+#define CAL_GEN_TRUE 1
+
+/*********************************************************************
+* Field Definition Macros
+*********************************************************************/
+
+#define CAL_HL_REVISION_MINOR_MASK GENMASK(5, 0)
+#define CAL_HL_REVISION_CUSTOM_MASK GENMASK(7, 6)
+#define CAL_HL_REVISION_MAJOR_MASK GENMASK(10, 8)
+#define CAL_HL_REVISION_RTL_MASK GENMASK(15, 11)
+#define CAL_HL_REVISION_FUNC_MASK GENMASK(27, 16)
+#define CAL_HL_REVISION_SCHEME_MASK GENMASK(31, 30)
+#define CAL_HL_REVISION_SCHEME_H08 1
+#define CAL_HL_REVISION_SCHEME_LEGACY 0
+
+#define CAL_HL_HWINFO_WFIFO_MASK GENMASK(3, 0)
+#define CAL_HL_HWINFO_RFIFO_MASK GENMASK(7, 4)
+#define CAL_HL_HWINFO_PCTX_MASK GENMASK(12, 8)
+#define CAL_HL_HWINFO_WCTX_MASK GENMASK(18, 13)
+#define CAL_HL_HWINFO_VFIFO_MASK GENMASK(22, 19)
+#define CAL_HL_HWINFO_NCPORT_MASK GENMASK(27, 23)
+#define CAL_HL_HWINFO_NPPI_CTXS0_MASK GENMASK(29, 28)
+#define CAL_HL_HWINFO_NPPI_CTXS1_MASK GENMASK(31, 30)
+#define CAL_HL_HWINFO_NPPI_CONTEXTS_ZERO 0
+#define CAL_HL_HWINFO_NPPI_CONTEXTS_FOUR 1
+#define CAL_HL_HWINFO_NPPI_CONTEXTS_EIGHT 2
+#define CAL_HL_HWINFO_NPPI_CONTEXTS_RESERVED 3
+
+#define CAL_HL_SYSCONFIG_SOFTRESET_MASK BIT_MASK(0)
+#define CAL_HL_SYSCONFIG_SOFTRESET_DONE 0x0
+#define CAL_HL_SYSCONFIG_SOFTRESET_PENDING 0x1
+#define CAL_HL_SYSCONFIG_SOFTRESET_NOACTION 0x0
+#define CAL_HL_SYSCONFIG_SOFTRESET_RESET 0x1
+#define CAL_HL_SYSCONFIG_IDLE_MASK GENMASK(3, 2)
+#define CAL_HL_SYSCONFIG_IDLEMODE_FORCE 0
+#define CAL_HL_SYSCONFIG_IDLEMODE_NO 1
+#define CAL_HL_SYSCONFIG_IDLEMODE_SMART1 2
+#define CAL_HL_SYSCONFIG_IDLEMODE_SMART2 3
+
+#define CAL_HL_IRQ_EOI_LINE_NUMBER_MASK BIT_MASK(0)
+#define CAL_HL_IRQ_EOI_LINE_NUMBER_READ0 0
+#define CAL_HL_IRQ_EOI_LINE_NUMBER_EOI0 0
+
+#define CAL_HL_IRQ_MASK(m) BIT_MASK(m-1)
+#define CAL_HL_IRQ_NOACTION 0x0
+#define CAL_HL_IRQ_ENABLE 0x1
+#define CAL_HL_IRQ_CLEAR 0x1
+#define CAL_HL_IRQ_DISABLED 0x0
+#define CAL_HL_IRQ_ENABLED 0x1
+#define CAL_HL_IRQ_PENDING 0x1
+
+#define CAL_PIX_PROC_EN_MASK BIT_MASK(0)
+#define CAL_PIX_PROC_EXTRACT_MASK GENMASK(4, 1)
+#define CAL_PIX_PROC_EXTRACT_B6 0x0
+#define CAL_PIX_PROC_EXTRACT_B7 0x1
+#define CAL_PIX_PROC_EXTRACT_B8 0x2
+#define CAL_PIX_PROC_EXTRACT_B10 0x3
+#define CAL_PIX_PROC_EXTRACT_B10_MIPI 0x4
+#define CAL_PIX_PROC_EXTRACT_B12 0x5
+#define CAL_PIX_PROC_EXTRACT_B12_MIPI 0x6
+#define CAL_PIX_PROC_EXTRACT_B14 0x7
+#define CAL_PIX_PROC_EXTRACT_B14_MIPI 0x8
+#define CAL_PIX_PROC_EXTRACT_B16_BE 0x9
+#define CAL_PIX_PROC_EXTRACT_B16_LE 0xa
+#define CAL_PIX_PROC_DPCMD_MASK GENMASK(9, 5)
+#define CAL_PIX_PROC_DPCMD_BYPASS 0x0
+#define CAL_PIX_PROC_DPCMD_DPCM_10_8_1 0x2
+#define CAL_PIX_PROC_DPCMD_DPCM_12_8_1 0x8
+#define CAL_PIX_PROC_DPCMD_DPCM_10_7_1 0x4
+#define CAL_PIX_PROC_DPCMD_DPCM_10_7_2 0x5
+#define CAL_PIX_PROC_DPCMD_DPCM_10_6_1 0x6
+#define CAL_PIX_PROC_DPCMD_DPCM_10_6_2 0x7
+#define CAL_PIX_PROC_DPCMD_DPCM_12_7_1 0xa
+#define CAL_PIX_PROC_DPCMD_DPCM_12_6_1 0xc
+#define CAL_PIX_PROC_DPCMD_DPCM_14_10 0xe
+#define CAL_PIX_PROC_DPCMD_DPCM_14_8_1 0x10
+#define CAL_PIX_PROC_DPCMD_DPCM_16_12_1 0x12
+#define CAL_PIX_PROC_DPCMD_DPCM_16_10_1 0x14
+#define CAL_PIX_PROC_DPCMD_DPCM_16_8_1 0x16
+#define CAL_PIX_PROC_DPCME_MASK GENMASK(15, 11)
+#define CAL_PIX_PROC_DPCME_BYPASS 0x0
+#define CAL_PIX_PROC_DPCME_DPCM_10_8_1 0x2
+#define CAL_PIX_PROC_DPCME_DPCM_12_8_1 0x8
+#define CAL_PIX_PROC_DPCME_DPCM_14_10 0xe
+#define CAL_PIX_PROC_DPCME_DPCM_14_8_1 0x10
+#define CAL_PIX_PROC_DPCME_DPCM_16_12_1 0x12
+#define CAL_PIX_PROC_DPCME_DPCM_16_10_1 0x14
+#define CAL_PIX_PROC_DPCME_DPCM_16_8_1 0x16
+#define CAL_PIX_PROC_PACK_MASK GENMASK(18, 16)
+#define CAL_PIX_PROC_PACK_B8 0x0
+#define CAL_PIX_PROC_PACK_B10_MIPI 0x2
+#define CAL_PIX_PROC_PACK_B12 0x3
+#define CAL_PIX_PROC_PACK_B12_MIPI 0x4
+#define CAL_PIX_PROC_PACK_B16 0x5
+#define CAL_PIX_PROC_PACK_ARGB 0x6
+#define CAL_PIX_PROC_CPORT_MASK GENMASK(23, 19)
+
+#define CAL_CTRL_POSTED_WRITES_MASK BIT_MASK(0)
+#define CAL_CTRL_POSTED_WRITES_NONPOSTED 0
+#define CAL_CTRL_POSTED_WRITES 1
+#define CAL_CTRL_TAGCNT_MASK GENMASK(4, 1)
+#define CAL_CTRL_BURSTSIZE_MASK GENMASK(6, 5)
+#define CAL_CTRL_BURSTSIZE_BURST16 0x0
+#define CAL_CTRL_BURSTSIZE_BURST32 0x1
+#define CAL_CTRL_BURSTSIZE_BURST64 0x2
+#define CAL_CTRL_BURSTSIZE_BURST128 0x3
+#define CAL_CTRL_LL_FORCE_STATE_MASK GENMASK(12, 7)
+#define CAL_CTRL_MFLAGL_MASK GENMASK(20, 13)
+#define CAL_CTRL_PWRSCPCLK_MASK BIT_MASK(21)
+#define CAL_CTRL_PWRSCPCLK_AUTO 0
+#define CAL_CTRL_PWRSCPCLK_FORCE 1
+#define CAL_CTRL_RD_DMA_STALL_MASK BIT_MASK(22)
+#define CAL_CTRL_MFLAGH_MASK GENMASK(31, 24)
+
+#define CAL_CTRL1_PPI_GROUPING_MASK GENMASK(1, 0)
+#define CAL_CTRL1_PPI_GROUPING_DISABLED 0
+#define CAL_CTRL1_PPI_GROUPING_RESERVED 1
+#define CAL_CTRL1_PPI_GROUPING_0 2
+#define CAL_CTRL1_PPI_GROUPING_1 3
+#define CAL_CTRL1_INTERLEAVE01_MASK GENMASK(3, 2)
+#define CAL_CTRL1_INTERLEAVE01_DISABLED 0
+#define CAL_CTRL1_INTERLEAVE01_PIX1 1
+#define CAL_CTRL1_INTERLEAVE01_PIX4 2
+#define CAL_CTRL1_INTERLEAVE01_RESERVED 3
+#define CAL_CTRL1_INTERLEAVE23_MASK GENMASK(5, 4)
+#define CAL_CTRL1_INTERLEAVE23_DISABLED 0
+#define CAL_CTRL1_INTERLEAVE23_PIX1 1
+#define CAL_CTRL1_INTERLEAVE23_PIX4 2
+#define CAL_CTRL1_INTERLEAVE23_RESERVED 3
+
+#define CAL_LINE_NUMBER_EVT_CPORT_MASK GENMASK(4, 0)
+#define CAL_LINE_NUMBER_EVT_MASK GENMASK(29, 16)
+
+#define CAL_VPORT_CTRL1_PCLK_MASK GENMASK(16, 0)
+#define CAL_VPORT_CTRL1_XBLK_MASK GENMASK(24, 17)
+#define CAL_VPORT_CTRL1_YBLK_MASK GENMASK(30, 25)
+#define CAL_VPORT_CTRL1_WIDTH_MASK BIT_MASK(31)
+#define CAL_VPORT_CTRL1_WIDTH_ONE 0
+#define CAL_VPORT_CTRL1_WIDTH_TWO 1
+
+#define CAL_VPORT_CTRL2_CPORT_MASK GENMASK(4, 0)
+#define CAL_VPORT_CTRL2_FREERUNNING_MASK BIT_MASK(15)
+#define CAL_VPORT_CTRL2_FREERUNNING_GATED 0
+#define CAL_VPORT_CTRL2_FREERUNNING_FREE 1
+#define CAL_VPORT_CTRL2_FS_RESETS_MASK BIT_MASK(16)
+#define CAL_VPORT_CTRL2_FS_RESETS_NO 0
+#define CAL_VPORT_CTRL2_FS_RESETS_YES 1
+#define CAL_VPORT_CTRL2_FSM_RESET_MASK BIT_MASK(17)
+#define CAL_VPORT_CTRL2_FSM_RESET_NOEFFECT 0
+#define CAL_VPORT_CTRL2_FSM_RESET 1
+#define CAL_VPORT_CTRL2_RDY_THR_MASK GENMASK(31, 18)
+
+#define CAL_BYS_CTRL1_PCLK_MASK GENMASK(16, 0)
+#define CAL_BYS_CTRL1_XBLK_MASK GENMASK(24, 17)
+#define CAL_BYS_CTRL1_YBLK_MASK GENMASK(30, 25)
+#define CAL_BYS_CTRL1_BYSINEN_MASK BIT_MASK(31)
+
+#define CAL_BYS_CTRL2_CPORTIN_MASK GENMASK(4, 0)
+#define CAL_BYS_CTRL2_CPORTOUT_MASK GENMASK(9, 5)
+#define CAL_BYS_CTRL2_DUPLICATEDDATA_MASK BIT_MASK(10)
+#define CAL_BYS_CTRL2_DUPLICATEDDATA_NO 0
+#define CAL_BYS_CTRL2_DUPLICATEDDATA_YES 1
+#define CAL_BYS_CTRL2_FREERUNNING_MASK BIT_MASK(11)
+#define CAL_BYS_CTRL2_FREERUNNING_NO 0
+#define CAL_BYS_CTRL2_FREERUNNING_YES 1
+
+#define CAL_RD_DMA_CTRL_GO_MASK BIT_MASK(0)
+#define CAL_RD_DMA_CTRL_GO_DIS 0
+#define CAL_RD_DMA_CTRL_GO_EN 1
+#define CAL_RD_DMA_CTRL_GO_IDLE 0
+#define CAL_RD_DMA_CTRL_GO_BUSY 1
+#define CAL_RD_DMA_CTRL_INIT_MASK BIT_MASK(1)
+#define CAL_RD_DMA_CTRL_BW_LIMITER_MASK GENMASK(10, 2)
+#define CAL_RD_DMA_CTRL_OCP_TAG_CNT_MASK GENMASK(14, 11)
+#define CAL_RD_DMA_CTRL_PCLK_MASK GENMASK(31, 15)
+
+#define CAL_RD_DMA_PIX_ADDR_MASK GENMASK(31, 3)
+
+#define CAL_RD_DMA_PIX_OFST_MASK GENMASK(31, 4)
+
+#define CAL_RD_DMA_XSIZE_MASK GENMASK(31, 19)
+
+#define CAL_RD_DMA_YSIZE_MASK GENMASK(29, 16)
+
+#define CAL_RD_DMA_INIT_ADDR_MASK GENMASK(31, 3)
+
+#define CAL_RD_DMA_INIT_OFST_MASK GENMASK(31, 3)
+
+#define CAL_RD_DMA_CTRL2_CIRC_MODE_MASK GENMASK(2, 0)
+#define CAL_RD_DMA_CTRL2_CIRC_MODE_DIS 0
+#define CAL_RD_DMA_CTRL2_CIRC_MODE_ONE 1
+#define CAL_RD_DMA_CTRL2_CIRC_MODE_FOUR 2
+#define CAL_RD_DMA_CTRL2_CIRC_MODE_SIXTEEN 3
+#define CAL_RD_DMA_CTRL2_CIRC_MODE_SIXTYFOUR 4
+#define CAL_RD_DMA_CTRL2_CIRC_MODE_RESERVED 5
+#define CAL_RD_DMA_CTRL2_ICM_CSTART_MASK BIT_MASK(3)
+#define CAL_RD_DMA_CTRL2_PATTERN_MASK GENMASK(5, 4)
+#define CAL_RD_DMA_CTRL2_PATTERN_LINEAR 0
+#define CAL_RD_DMA_CTRL2_PATTERN_YUV420 1
+#define CAL_RD_DMA_CTRL2_PATTERN_RD2SKIP2 2
+#define CAL_RD_DMA_CTRL2_PATTERN_RD2SKIP4 3
+#define CAL_RD_DMA_CTRL2_BYSOUT_LE_WAIT_MASK BIT_MASK(6)
+#define CAL_RD_DMA_CTRL2_BYSOUT_LE_WAIT_FREERUNNING 0
+#define CAL_RD_DMA_CTRL2_BYSOUT_LE_WAIT_WAITFORBYSOUT 1
+#define CAL_RD_DMA_CTRL2_CIRC_SIZE_MASK GENMASK(29, 16)
+
+#define CAL_WR_DMA_CTRL_MODE_MASK GENMASK(2, 0)
+#define CAL_WR_DMA_CTRL_MODE_DIS 0
+#define CAL_WR_DMA_CTRL_MODE_SHD 1
+#define CAL_WR_DMA_CTRL_MODE_CNT 2
+#define CAL_WR_DMA_CTRL_MODE_CNT_INIT 3
+#define CAL_WR_DMA_CTRL_MODE_CONST 4
+#define CAL_WR_DMA_CTRL_MODE_RESERVED 5
+#define CAL_WR_DMA_CTRL_PATTERN_MASK GENMASK(4, 3)
+#define CAL_WR_DMA_CTRL_PATTERN_LINEAR 0
+#define CAL_WR_DMA_CTRL_PATTERN_WR2SKIP2 2
+#define CAL_WR_DMA_CTRL_PATTERN_WR2SKIP4 3
+#define CAL_WR_DMA_CTRL_PATTERN_RESERVED 1
+#define CAL_WR_DMA_CTRL_ICM_PSTART_MASK BIT_MASK(5)
+#define CAL_WR_DMA_CTRL_DTAG_MASK GENMASK(8, 6)
+#define CAL_WR_DMA_CTRL_DTAG_ATT_HDR 0
+#define CAL_WR_DMA_CTRL_DTAG_ATT_DAT 1
+#define CAL_WR_DMA_CTRL_DTAG 2
+#define CAL_WR_DMA_CTRL_DTAG_PIX_HDR 3
+#define CAL_WR_DMA_CTRL_DTAG_PIX_DAT 4
+#define CAL_WR_DMA_CTRL_DTAG_D5 5
+#define CAL_WR_DMA_CTRL_DTAG_D6 6
+#define CAL_WR_DMA_CTRL_DTAG_D7 7
+#define CAL_WR_DMA_CTRL_CPORT_MASK GENMASK(13, 9)
+#define CAL_WR_DMA_CTRL_STALL_RD_MASK BIT_MASK(14)
+#define CAL_WR_DMA_CTRL_YSIZE_MASK GENMASK(31, 18)
+
+#define CAL_WR_DMA_ADDR_MASK GENMASK(31, 4)
+
+#define CAL_WR_DMA_OFST_MASK GENMASK(18, 4)
+#define CAL_WR_DMA_OFST_CIRC_MODE_MASK GENMASK(23, 22)
+#define CAL_WR_DMA_OFST_CIRC_MODE_ONE 1
+#define CAL_WR_DMA_OFST_CIRC_MODE_FOUR 2
+#define CAL_WR_DMA_OFST_CIRC_MODE_SIXTYFOUR 3
+#define CAL_WR_DMA_OFST_CIRC_MODE_DISABLED 0
+#define CAL_WR_DMA_OFST_CIRC_SIZE_MASK GENMASK(31, 24)
+
+#define CAL_WR_DMA_XSIZE_XSKIP_MASK GENMASK(15, 3)
+#define CAL_WR_DMA_XSIZE_MASK GENMASK(31, 19)
+
+#define CAL_CSI2_PPI_CTRL_IF_EN_MASK BIT_MASK(0)
+#define CAL_CSI2_PPI_CTRL_ECC_EN_MASK BIT_MASK(2)
+#define CAL_CSI2_PPI_CTRL_FRAME_MASK BIT_MASK(3)
+#define CAL_CSI2_PPI_CTRL_FRAME_IMMEDIATE 0
+#define CAL_CSI2_PPI_CTRL_FRAME 1
+
+#define CAL_CSI2_COMPLEXIO_CFG_CLOCK_POSITION_MASK GENMASK(2, 0)
+#define CAL_CSI2_COMPLEXIO_CFG_POSITION_5 5
+#define CAL_CSI2_COMPLEXIO_CFG_POSITION_4 4
+#define CAL_CSI2_COMPLEXIO_CFG_POSITION_3 3
+#define CAL_CSI2_COMPLEXIO_CFG_POSITION_2 2
+#define CAL_CSI2_COMPLEXIO_CFG_POSITION_1 1
+#define CAL_CSI2_COMPLEXIO_CFG_POSITION_NOT_USED 0
+#define CAL_CSI2_COMPLEXIO_CFG_CLOCK_POL_MASK BIT_MASK(3)
+#define CAL_CSI2_COMPLEXIO_CFG_POL_PLUSMINUS 0
+#define CAL_CSI2_COMPLEXIO_CFG_POL_MINUSPLUS 1
+#define CAL_CSI2_COMPLEXIO_CFG_DATA1_POSITION_MASK GENMASK(6, 4)
+#define CAL_CSI2_COMPLEXIO_CFG_DATA1_POL_MASK BIT_MASK(7)
+#define CAL_CSI2_COMPLEXIO_CFG_DATA2_POSITION_MASK GENMASK(10, 8)
+#define CAL_CSI2_COMPLEXIO_CFG_DATA2_POL_MASK BIT_MASK(11)
+#define CAL_CSI2_COMPLEXIO_CFG_DATA3_POSITION_MASK GENMASK(14, 12)
+#define CAL_CSI2_COMPLEXIO_CFG_DATA3_POL_MASK BIT_MASK(15)
+#define CAL_CSI2_COMPLEXIO_CFG_DATA4_POSITION_MASK GENMASK(18, 16)
+#define CAL_CSI2_COMPLEXIO_CFG_DATA4_POL_MASK BIT_MASK(19)
+#define CAL_CSI2_COMPLEXIO_CFG_PWR_AUTO_MASK BIT_MASK(24)
+#define CAL_CSI2_COMPLEXIO_CFG_PWR_STATUS_MASK GENMASK(26, 25)
+#define CAL_CSI2_COMPLEXIO_CFG_PWR_STATUS_STATE_OFF 0
+#define CAL_CSI2_COMPLEXIO_CFG_PWR_STATUS_STATE_ON 1
+#define CAL_CSI2_COMPLEXIO_CFG_PWR_STATUS_STATE_ULP 2
+#define CAL_CSI2_COMPLEXIO_CFG_PWR_CMD_MASK GENMASK(28, 27)
+#define CAL_CSI2_COMPLEXIO_CFG_PWR_CMD_STATE_OFF 0
+#define CAL_CSI2_COMPLEXIO_CFG_PWR_CMD_STATE_ON 1
+#define CAL_CSI2_COMPLEXIO_CFG_PWR_CMD_STATE_ULP 2
+#define CAL_CSI2_COMPLEXIO_CFG_RESET_DONE_MASK BIT_MASK(29)
+#define CAL_CSI2_COMPLEXIO_CFG_RESET_DONE_RESETCOMPLETED 1
+#define CAL_CSI2_COMPLEXIO_CFG_RESET_DONE_RESETONGOING 0
+#define CAL_CSI2_COMPLEXIO_CFG_RESET_CTRL_MASK BIT_MASK(30)
+#define CAL_CSI2_COMPLEXIO_CFG_RESET_CTRL 0
+#define CAL_CSI2_COMPLEXIO_CFG_RESET_CTRL_OPERATIONAL 1
+
+#define CAL_CSI2_SHORT_PACKET_MASK GENMASK(23, 0)
+
+#define CAL_CSI2_COMPLEXIO_IRQ_ERRSOTHS1_MASK BIT_MASK(0)
+#define CAL_CSI2_COMPLEXIO_IRQ_ERRSOTHS2_MASK BIT_MASK(1)
+#define CAL_CSI2_COMPLEXIO_IRQ_ERRSOTHS3_MASK BIT_MASK(2)
+#define CAL_CSI2_COMPLEXIO_IRQ_ERRSOTHS4_MASK BIT_MASK(3)
+#define CAL_CSI2_COMPLEXIO_IRQ_ERRSOTHS5_MASK BIT_MASK(4)
+#define CAL_CSI2_COMPLEXIO_IRQ_ERRSOTSYNCHS1_MASK BIT_MASK(5)
+#define CAL_CSI2_COMPLEXIO_IRQ_ERRSOTSYNCHS2_MASK BIT_MASK(6)
+#define CAL_CSI2_COMPLEXIO_IRQ_ERRSOTSYNCHS3_MASK BIT_MASK(7)
+#define CAL_CSI2_COMPLEXIO_IRQ_ERRSOTSYNCHS4_MASK BIT_MASK(8)
+#define CAL_CSI2_COMPLEXIO_IRQ_ERRSOTSYNCHS5_MASK BIT_MASK(9)
+#define CAL_CSI2_COMPLEXIO_IRQ_ERRESC1_MASK BIT_MASK(10)
+#define CAL_CSI2_COMPLEXIO_IRQ_ERRESC2_MASK BIT_MASK(11)
+#define CAL_CSI2_COMPLEXIO_IRQ_ERRESC3_MASK BIT_MASK(12)
+#define CAL_CSI2_COMPLEXIO_IRQ_ERRESC4_MASK BIT_MASK(13)
+#define CAL_CSI2_COMPLEXIO_IRQ_ERRESC5_MASK BIT_MASK(14)
+#define CAL_CSI2_COMPLEXIO_IRQ_ERRCONTROL1_MASK BIT_MASK(15)
+#define CAL_CSI2_COMPLEXIO_IRQ_ERRCONTROL2_MASK BIT_MASK(16)
+#define CAL_CSI2_COMPLEXIO_IRQ_ERRCONTROL3_MASK BIT_MASK(17)
+#define CAL_CSI2_COMPLEXIO_IRQ_ERRCONTROL4_MASK BIT_MASK(18)
+#define CAL_CSI2_COMPLEXIO_IRQ_ERRCONTROL5_MASK BIT_MASK(19)
+#define CAL_CSI2_COMPLEXIO_IRQ_STATEULPM1_MASK BIT_MASK(20)
+#define CAL_CSI2_COMPLEXIO_IRQ_STATEULPM2_MASK BIT_MASK(21)
+#define CAL_CSI2_COMPLEXIO_IRQ_STATEULPM3_MASK BIT_MASK(22)
+#define CAL_CSI2_COMPLEXIO_IRQ_STATEULPM4_MASK BIT_MASK(23)
+#define CAL_CSI2_COMPLEXIO_IRQ_STATEULPM5_MASK BIT_MASK(24)
+#define CAL_CSI2_COMPLEXIO_IRQ_STATEALLULPMENTER_MASK BIT_MASK(25)
+#define CAL_CSI2_COMPLEXIO_IRQ_STATEALLULPMEXIT_MASK BIT_MASK(26)
+#define CAL_CSI2_COMPLEXIO_IRQ_FIFO_OVR_MASK BIT_MASK(27)
+#define CAL_CSI2_COMPLEXIO_IRQ_SHORT_PACKET_MASK BIT_MASK(28)
+#define CAL_CSI2_COMPLEXIO_IRQ_ECC_NO_CORRECTION_MASK BIT_MASK(30)
+
+#define CAL_CSI2_TIMING_STOP_STATE_COUNTER_IO1_MASK GENMASK(12, 0)
+#define CAL_CSI2_TIMING_STOP_STATE_X4_IO1_MASK BIT_MASK(13)
+#define CAL_CSI2_TIMING_STOP_STATE_X16_IO1_MASK BIT_MASK(14)
+#define CAL_CSI2_TIMING_FORCE_RX_MODE_IO1_MASK BIT_MASK(15)
+
+#define CAL_CSI2_VC_IRQ_FS_IRQ_0_MASK BIT_MASK(0)
+#define CAL_CSI2_VC_IRQ_FE_IRQ_0_MASK BIT_MASK(1)
+#define CAL_CSI2_VC_IRQ_LS_IRQ_0_MASK BIT_MASK(2)
+#define CAL_CSI2_VC_IRQ_LE_IRQ_0_MASK BIT_MASK(3)
+#define CAL_CSI2_VC_IRQ_CS_IRQ_0_MASK BIT_MASK(4)
+#define CAL_CSI2_VC_IRQ_ECC_CORRECTION0_IRQ_0_MASK BIT_MASK(5)
+#define CAL_CSI2_VC_IRQ_FS_IRQ_1_MASK BIT_MASK(8)
+#define CAL_CSI2_VC_IRQ_FE_IRQ_1_MASK BIT_MASK(9)
+#define CAL_CSI2_VC_IRQ_LS_IRQ_1_MASK BIT_MASK(10)
+#define CAL_CSI2_VC_IRQ_LE_IRQ_1_MASK BIT_MASK(11)
+#define CAL_CSI2_VC_IRQ_CS_IRQ_1_MASK BIT_MASK(12)
+#define CAL_CSI2_VC_IRQ_ECC_CORRECTION0_IRQ_1_MASK BIT_MASK(13)
+#define CAL_CSI2_VC_IRQ_FS_IRQ_2_MASK BIT_MASK(16)
+#define CAL_CSI2_VC_IRQ_FE_IRQ_2_MASK BIT_MASK(17)
+#define CAL_CSI2_VC_IRQ_LS_IRQ_2_MASK BIT_MASK(18)
+#define CAL_CSI2_VC_IRQ_LE_IRQ_2_MASK BIT_MASK(19)
+#define CAL_CSI2_VC_IRQ_CS_IRQ_2_MASK BIT_MASK(20)
+#define CAL_CSI2_VC_IRQ_ECC_CORRECTION0_IRQ_2_MASK BIT_MASK(21)
+#define CAL_CSI2_VC_IRQ_FS_IRQ_3_MASK BIT_MASK(24)
+#define CAL_CSI2_VC_IRQ_FE_IRQ_3_MASK BIT_MASK(25)
+#define CAL_CSI2_VC_IRQ_LS_IRQ_3_MASK BIT_MASK(26)
+#define CAL_CSI2_VC_IRQ_LE_IRQ_3_MASK BIT_MASK(27)
+#define CAL_CSI2_VC_IRQ_CS_IRQ_3_MASK BIT_MASK(28)
+#define CAL_CSI2_VC_IRQ_ECC_CORRECTION0_IRQ_3_MASK BIT_MASK(29)
+
+#define CAL_CSI2_CTX_DT_MASK GENMASK(5, 0)
+#define CAL_CSI2_CTX_VC_MASK GENMASK(7, 6)
+#define CAL_CSI2_CTX_CPORT_MASK GENMASK(12, 8)
+#define CAL_CSI2_CTX_ATT_MASK BIT_MASK(13)
+#define CAL_CSI2_CTX_ATT_PIX 0
+#define CAL_CSI2_CTX_ATT 1
+#define CAL_CSI2_CTX_PACK_MODE_MASK BIT_MASK(14)
+#define CAL_CSI2_CTX_PACK_MODE_LINE 0
+#define CAL_CSI2_CTX_PACK_MODE_FRAME 1
+#define CAL_CSI2_CTX_LINES_MASK GENMASK(29, 16)
+
+#define CAL_CSI2_STATUS_FRAME_MASK GENMASK(15, 0)
+
+#define CAL_CSI2_PHY_REG0_THS_SETTLE_MASK GENMASK(7, 0)
+#define CAL_CSI2_PHY_REG0_THS_TERM_MASK GENMASK(15, 8)
+#define CAL_CSI2_PHY_REG0_HSCLOCKCONFIG_MASK BIT_MASK(24)
+#define CAL_CSI2_PHY_REG0_HSCLOCKCONFIG_DISABLE 1
+#define CAL_CSI2_PHY_REG0_HSCLOCKCONFIG_ENABLE 0
+
+#define CAL_CSI2_PHY_REG1_TCLK_SETTLE_MASK GENMASK(7, 0)
+#define CAL_CSI2_PHY_REG1_CTRLCLK_DIV_FACTOR_MASK GENMASK(9, 8)
+#define CAL_CSI2_PHY_REG1_DPHY_HS_SYNC_PATTERN_MASK GENMASK(17, 10)
+#define CAL_CSI2_PHY_REG1_TCLK_TERM_MASK GENMASK(24, 18)
+#define CAL_CSI2_PHY_REG1_CLOCK_MISS_DETECTOR_STATUS_MASK BIT_MASK(25)
+#define CAL_CSI2_PHY_REG1_CLOCK_MISS_DETECTOR_STATUS_ERROR 1
+#define CAL_CSI2_PHY_REG1_CLOCK_MISS_DETECTOR_STATUS_SUCCESS 0
+#define CAL_CSI2_PHY_REG1_RESET_DONE_STATUS_MASK GENMASK(29, 28)
+
+#define CAL_CSI2_PHY_REG2_CCP2_SYNC_PATTERN_MASK GENMASK(23, 0)
+#define CAL_CSI2_PHY_REG2_TRIGGER_CMD_RXTRIGESC3_MASK GENMASK(25, 24)
+#define CAL_CSI2_PHY_REG2_TRIGGER_CMD_RXTRIGESC2_MASK GENMASK(27, 26)
+#define CAL_CSI2_PHY_REG2_TRIGGER_CMD_RXTRIGESC1_MASK GENMASK(29, 28)
+#define CAL_CSI2_PHY_REG2_TRIGGER_CMD_RXTRIGESC0_MASK GENMASK(31, 30)
+
+#define CM_CAMERRX_CTRL_CSI1_CTRLCLKEN_MASK BIT_MASK(0)
+#define CM_CAMERRX_CTRL_CSI1_CAMMODE_MASK GENMASK(2, 1)
+#define CM_CAMERRX_CTRL_CSI1_LANEENABLE_MASK GENMASK(4, 3)
+#define CM_CAMERRX_CTRL_CSI1_MODE_MASK BIT_MASK(5)
+#define CM_CAMERRX_CTRL_CSI0_CTRLCLKEN_MASK BIT_MASK(10)
+#define CM_CAMERRX_CTRL_CSI0_CAMMODE_MASK GENMASK(12, 11)
+#define CM_CAMERRX_CTRL_CSI0_LANEENABLE_MASK GENMASK(16, 13)
+#define CM_CAMERRX_CTRL_CSI0_MODE_MASK BIT_MASK(17)
+
+#endif
diff --git a/drivers/media/platform/vim2m.c b/drivers/media/platform/vim2m.c
index 418113c99801..c4b5fab83666 100644
--- a/drivers/media/platform/vim2m.c
+++ b/drivers/media/platform/vim2m.c
@@ -1074,7 +1074,7 @@ static int __init vim2m_init(void)
if (ret)
platform_device_unregister(&vim2m_pdev);
- return 0;
+ return ret;
}
module_init(vim2m_init);
diff --git a/drivers/media/platform/vivid/vivid-osd.c b/drivers/media/platform/vivid/vivid-osd.c
index e15eef6a94e5..bdc380b14e0c 100644
--- a/drivers/media/platform/vivid/vivid-osd.c
+++ b/drivers/media/platform/vivid/vivid-osd.c
@@ -360,7 +360,7 @@ void vivid_fb_release_buffers(struct vivid_dev *dev)
/* Release pseudo palette */
kfree(dev->fb_info.pseudo_palette);
- kfree((void *)dev->video_vbase);
+ kfree(dev->video_vbase);
}
/* Initialize the specified card */
diff --git a/drivers/media/platform/vivid/vivid-tpg.c b/drivers/media/platform/vivid/vivid-tpg.c
index 14256141f905..da862bb2e5f8 100644
--- a/drivers/media/platform/vivid/vivid-tpg.c
+++ b/drivers/media/platform/vivid/vivid-tpg.c
@@ -251,6 +251,10 @@ bool tpg_s_fourcc(struct tpg_data *tpg, u32 fourcc)
tpg->planes = 3;
tpg->is_yuv = true;
break;
+ case V4L2_PIX_FMT_YUV422M:
+ case V4L2_PIX_FMT_YVU422M:
+ tpg->buffers = 3;
+ /* fall through */
case V4L2_PIX_FMT_YUV422P:
tpg->vdownsampling[1] = 1;
tpg->vdownsampling[2] = 1;
@@ -283,6 +287,16 @@ bool tpg_s_fourcc(struct tpg_data *tpg, u32 fourcc)
tpg->planes = 2;
tpg->is_yuv = true;
break;
+ case V4L2_PIX_FMT_YUV444M:
+ case V4L2_PIX_FMT_YVU444M:
+ tpg->buffers = 3;
+ tpg->planes = 3;
+ tpg->vdownsampling[1] = 1;
+ tpg->vdownsampling[2] = 1;
+ tpg->hdownsampling[1] = 1;
+ tpg->hdownsampling[2] = 1;
+ tpg->is_yuv = true;
+ break;
case V4L2_PIX_FMT_NV24:
case V4L2_PIX_FMT_NV42:
tpg->vdownsampling[1] = 1;
@@ -368,6 +382,10 @@ bool tpg_s_fourcc(struct tpg_data *tpg, u32 fourcc)
tpg->twopixelsize[0] = 4;
tpg->twopixelsize[1] = 4;
break;
+ case V4L2_PIX_FMT_YUV444M:
+ case V4L2_PIX_FMT_YVU444M:
+ case V4L2_PIX_FMT_YUV422M:
+ case V4L2_PIX_FMT_YVU422M:
case V4L2_PIX_FMT_YUV422P:
case V4L2_PIX_FMT_YUV420:
case V4L2_PIX_FMT_YVU420:
@@ -933,6 +951,7 @@ static void gen_twopix(struct tpg_data *tpg,
buf[0][offset] = r_y;
buf[0][offset+1] = r_y == 0xff ? r_y : 0;
break;
+ case V4L2_PIX_FMT_YUV422M:
case V4L2_PIX_FMT_YUV422P:
case V4L2_PIX_FMT_YUV420:
case V4L2_PIX_FMT_YUV420M:
@@ -947,6 +966,7 @@ static void gen_twopix(struct tpg_data *tpg,
buf[1][0] = g_u;
buf[2][0] = b_v;
break;
+ case V4L2_PIX_FMT_YVU422M:
case V4L2_PIX_FMT_YVU420:
case V4L2_PIX_FMT_YVU420M:
buf[0][offset] = r_y;
@@ -988,6 +1008,18 @@ static void gen_twopix(struct tpg_data *tpg,
buf[1][1] = g_u;
break;
+ case V4L2_PIX_FMT_YUV444M:
+ buf[0][offset] = r_y;
+ buf[1][offset] = g_u;
+ buf[2][offset] = b_v;
+ break;
+
+ case V4L2_PIX_FMT_YVU444M:
+ buf[0][offset] = r_y;
+ buf[1][offset] = b_v;
+ buf[2][offset] = g_u;
+ break;
+
case V4L2_PIX_FMT_NV24:
buf[0][offset] = r_y;
buf[1][2 * offset] = g_u;
diff --git a/drivers/media/platform/vivid/vivid-tpg.h b/drivers/media/platform/vivid/vivid-tpg.h
index 9baed6a10334..93fbaee69675 100644
--- a/drivers/media/platform/vivid/vivid-tpg.h
+++ b/drivers/media/platform/vivid/vivid-tpg.h
@@ -418,6 +418,8 @@ static inline void tpg_s_bytesperline(struct tpg_data *tpg, unsigned plane, unsi
tpg->bytesperline[p] = plane_w / tpg->hdownsampling[p];
}
+ if (tpg_g_interleaved(tpg))
+ tpg->bytesperline[1] = tpg->bytesperline[0];
}
diff --git a/drivers/media/platform/vivid/vivid-vid-common.c b/drivers/media/platform/vivid/vivid-vid-common.c
index 1678b730dba2..b0d4e3a0acf0 100644
--- a/drivers/media/platform/vivid/vivid-vid-common.c
+++ b/drivers/media/platform/vivid/vivid-vid-common.c
@@ -445,6 +445,9 @@ struct vivid_fmt vivid_formats[] = {
.planes = 1,
.buffers = 1,
},
+
+ /* Multiplanar formats */
+
{
.fourcc = V4L2_PIX_FMT_NV16M,
.vdownsampling = { 1, 1 },
@@ -495,10 +498,42 @@ struct vivid_fmt vivid_formats[] = {
.planes = 2,
.buffers = 2,
},
+ {
+ .fourcc = V4L2_PIX_FMT_YUV422M,
+ .vdownsampling = { 1, 1, 1 },
+ .bit_depth = { 8, 4, 4 },
+ .is_yuv = true,
+ .planes = 3,
+ .buffers = 3,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_YVU422M,
+ .vdownsampling = { 1, 1, 1 },
+ .bit_depth = { 8, 4, 4 },
+ .is_yuv = true,
+ .planes = 3,
+ .buffers = 3,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_YUV444M,
+ .vdownsampling = { 1, 1, 1 },
+ .bit_depth = { 8, 8, 8 },
+ .is_yuv = true,
+ .planes = 3,
+ .buffers = 3,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_YVU444M,
+ .vdownsampling = { 1, 1, 1 },
+ .bit_depth = { 8, 8, 8 },
+ .is_yuv = true,
+ .planes = 3,
+ .buffers = 3,
+ },
};
-/* There are 6 multiplanar formats in the list */
-#define VIVID_MPLANAR_FORMATS 6
+/* There are this many multiplanar formats in the list */
+#define VIVID_MPLANAR_FORMATS 10
const struct vivid_fmt *vivid_get_format(struct vivid_dev *dev, u32 pixelformat)
{
diff --git a/drivers/media/platform/vsp1/Makefile b/drivers/media/platform/vsp1/Makefile
index 6a93f928dfde..95b3ac2ea7ef 100644
--- a/drivers/media/platform/vsp1/Makefile
+++ b/drivers/media/platform/vsp1/Makefile
@@ -1,4 +1,5 @@
-vsp1-y := vsp1_drv.o vsp1_entity.o vsp1_video.o
+vsp1-y := vsp1_drv.o vsp1_entity.o vsp1_pipe.o
+vsp1-y += vsp1_dl.o vsp1_drm.o vsp1_video.o
vsp1-y += vsp1_rpf.o vsp1_rwpf.o vsp1_wpf.o
vsp1-y += vsp1_hsit.o vsp1_lif.o vsp1_lut.o
vsp1-y += vsp1_bru.o vsp1_sru.o vsp1_uds.o
diff --git a/drivers/media/platform/vsp1/vsp1.h b/drivers/media/platform/vsp1/vsp1.h
index 989e96f7e360..910d6b8e8b50 100644
--- a/drivers/media/platform/vsp1/vsp1.h
+++ b/drivers/media/platform/vsp1/vsp1.h
@@ -26,6 +26,9 @@
struct clk;
struct device;
+struct vsp1_dl;
+struct vsp1_drm;
+struct vsp1_entity;
struct vsp1_platform_data;
struct vsp1_bru;
struct vsp1_hsit;
@@ -42,17 +45,21 @@ struct vsp1_uds;
#define VSP1_HAS_LIF (1 << 0)
#define VSP1_HAS_LUT (1 << 1)
#define VSP1_HAS_SRU (1 << 2)
+#define VSP1_HAS_BRU (1 << 3)
-struct vsp1_platform_data {
+struct vsp1_device_info {
+ u32 version;
unsigned int features;
unsigned int rpf_count;
unsigned int uds_count;
unsigned int wpf_count;
+ unsigned int num_bru_inputs;
+ bool uapi;
};
struct vsp1_device {
struct device *dev;
- struct vsp1_platform_data pdata;
+ const struct vsp1_device_info *info;
void __iomem *mmio;
struct clk *clock;
@@ -71,14 +78,22 @@ struct vsp1_device {
struct vsp1_rwpf *wpf[VSP1_MAX_WPF];
struct list_head entities;
+ struct list_head videos;
struct v4l2_device v4l2_dev;
struct media_device media_dev;
+ struct media_entity_operations media_ops;
+
+ struct vsp1_drm *drm;
+
+ bool use_dl;
};
int vsp1_device_get(struct vsp1_device *vsp1);
void vsp1_device_put(struct vsp1_device *vsp1);
+int vsp1_reset_wpf(struct vsp1_device *vsp1, unsigned int index);
+
static inline u32 vsp1_read(struct vsp1_device *vsp1, u32 reg)
{
return ioread32(vsp1->mmio + reg);
@@ -89,4 +104,14 @@ static inline void vsp1_write(struct vsp1_device *vsp1, u32 reg, u32 data)
iowrite32(data, vsp1->mmio + reg);
}
+#include "vsp1_dl.h"
+
+static inline void vsp1_mod_write(struct vsp1_entity *e, u32 reg, u32 data)
+{
+ if (e->vsp1->use_dl)
+ vsp1_dl_add(e, reg, data);
+ else
+ vsp1_write(e->vsp1, reg, data);
+}
+
#endif /* __VSP1_H__ */
diff --git a/drivers/media/platform/vsp1/vsp1_bru.c b/drivers/media/platform/vsp1/vsp1_bru.c
index 7dd763311c0f..cb0dbc15ddad 100644
--- a/drivers/media/platform/vsp1/vsp1_bru.c
+++ b/drivers/media/platform/vsp1/vsp1_bru.c
@@ -19,6 +19,7 @@
#include "vsp1.h"
#include "vsp1_bru.h"
#include "vsp1_rwpf.h"
+#include "vsp1_video.h"
#define BRU_MIN_SIZE 1U
#define BRU_MAX_SIZE 8190U
@@ -27,14 +28,9 @@
* Device Access
*/
-static inline u32 vsp1_bru_read(struct vsp1_bru *bru, u32 reg)
-{
- return vsp1_read(bru->entity.vsp1, reg);
-}
-
static inline void vsp1_bru_write(struct vsp1_bru *bru, u32 reg, u32 data)
{
- vsp1_write(bru->entity.vsp1, reg, data);
+ vsp1_mod_write(&bru->entity, reg, data);
}
/* -----------------------------------------------------------------------------
@@ -83,7 +79,7 @@ static int bru_s_stream(struct v4l2_subdev *subdev, int enable)
if (!enable)
return 0;
- format = &bru->entity.formats[BRU_PAD_SOURCE];
+ format = &bru->entity.formats[bru->entity.source_pad];
/* The hardware is extremely flexible but we have no userspace API to
* expose all the parameters, nor is it clear whether we would have use
@@ -94,7 +90,7 @@ static int bru_s_stream(struct v4l2_subdev *subdev, int enable)
/* Disable dithering and enable color data normalization unless the
* format at the pipeline output is premultiplied.
*/
- flags = pipe->output ? pipe->output->video.format.flags : 0;
+ flags = pipe->output ? pipe->output->format.flags : 0;
vsp1_bru_write(bru, VI6_BRU_INCTRL,
flags & V4L2_PIX_FMT_FLAG_PREMUL_ALPHA ?
0 : VI6_BRU_INCTRL_NRM);
@@ -113,7 +109,7 @@ static int bru_s_stream(struct v4l2_subdev *subdev, int enable)
VI6_BRU_ROP_CROP(VI6_ROP_NOP) |
VI6_BRU_ROP_AROP(VI6_ROP_NOP));
- for (i = 0; i < 4; ++i) {
+ for (i = 0; i < bru->entity.source_pad; ++i) {
bool premultiplied = false;
u32 ctrl = 0;
@@ -125,7 +121,7 @@ static int bru_s_stream(struct v4l2_subdev *subdev, int enable)
if (bru->inputs[i].rpf) {
ctrl |= VI6_BRU_CTRL_RBC;
- premultiplied = bru->inputs[i].rpf->video.format.flags
+ premultiplied = bru->inputs[i].rpf->format.flags
& V4L2_PIX_FMT_FLAG_PREMUL_ALPHA;
} else {
ctrl |= VI6_BRU_CTRL_CROP(VI6_ROP_NOP)
@@ -295,7 +291,7 @@ static int bru_set_format(struct v4l2_subdev *subdev, struct v4l2_subdev_pad_con
*format = fmt->format;
/* Reset the compose rectangle */
- if (fmt->pad != BRU_PAD_SOURCE) {
+ if (fmt->pad != bru->entity.source_pad) {
struct v4l2_rect *compose;
compose = bru_get_compose(bru, cfg, fmt->pad, fmt->which);
@@ -309,7 +305,7 @@ static int bru_set_format(struct v4l2_subdev *subdev, struct v4l2_subdev_pad_con
if (fmt->pad == BRU_PAD_SINK(0)) {
unsigned int i;
- for (i = 0; i <= BRU_PAD_SOURCE; ++i) {
+ for (i = 0; i <= bru->entity.source_pad; ++i) {
format = vsp1_entity_get_pad_format(&bru->entity, cfg,
i, fmt->which);
format->code = fmt->format.code;
@@ -325,7 +321,7 @@ static int bru_get_selection(struct v4l2_subdev *subdev,
{
struct vsp1_bru *bru = to_bru(subdev);
- if (sel->pad == BRU_PAD_SOURCE)
+ if (sel->pad == bru->entity.source_pad)
return -EINVAL;
switch (sel->target) {
@@ -353,7 +349,7 @@ static int bru_set_selection(struct v4l2_subdev *subdev,
struct v4l2_mbus_framefmt *format;
struct v4l2_rect *compose;
- if (sel->pad == BRU_PAD_SOURCE)
+ if (sel->pad == bru->entity.source_pad)
return -EINVAL;
if (sel->target != V4L2_SEL_TGT_COMPOSE)
@@ -362,8 +358,8 @@ static int bru_set_selection(struct v4l2_subdev *subdev,
/* The compose rectangle top left corner must be inside the output
* frame.
*/
- format = vsp1_entity_get_pad_format(&bru->entity, cfg, BRU_PAD_SOURCE,
- sel->which);
+ format = vsp1_entity_get_pad_format(&bru->entity, cfg,
+ bru->entity.source_pad, sel->which);
sel->r.left = clamp_t(unsigned int, sel->r.left, 0, format->width - 1);
sel->r.top = clamp_t(unsigned int, sel->r.top, 0, format->height - 1);
@@ -419,7 +415,8 @@ struct vsp1_bru *vsp1_bru_create(struct vsp1_device *vsp1)
bru->entity.type = VSP1_ENTITY_BRU;
- ret = vsp1_entity_init(vsp1, &bru->entity, 5);
+ ret = vsp1_entity_init(vsp1, &bru->entity,
+ vsp1->info->num_bru_inputs + 1);
if (ret < 0)
return ERR_PTR(ret);
@@ -427,7 +424,7 @@ struct vsp1_bru *vsp1_bru_create(struct vsp1_device *vsp1)
subdev = &bru->entity.subdev;
v4l2_subdev_init(subdev, &bru_ops);
- subdev->entity.ops = &vsp1_media_ops;
+ subdev->entity.ops = &vsp1->media_ops;
subdev->internal_ops = &vsp1_subdev_internal_ops;
snprintf(subdev->name, sizeof(subdev->name), "%s bru",
dev_name(vsp1->dev));
diff --git a/drivers/media/platform/vsp1/vsp1_bru.h b/drivers/media/platform/vsp1/vsp1_bru.h
index 16b1c6554911..dbac9686ea69 100644
--- a/drivers/media/platform/vsp1/vsp1_bru.h
+++ b/drivers/media/platform/vsp1/vsp1_bru.h
@@ -23,7 +23,6 @@ struct vsp1_device;
struct vsp1_rwpf;
#define BRU_PAD_SINK(n) (n)
-#define BRU_PAD_SOURCE 4
struct vsp1_bru {
struct vsp1_entity entity;
@@ -33,7 +32,7 @@ struct vsp1_bru {
struct {
struct vsp1_rwpf *rpf;
struct v4l2_rect compose;
- } inputs[4];
+ } inputs[VSP1_MAX_RPF];
};
static inline struct vsp1_bru *to_bru(struct v4l2_subdev *subdev)
diff --git a/drivers/media/platform/vsp1/vsp1_dl.c b/drivers/media/platform/vsp1/vsp1_dl.c
new file mode 100644
index 000000000000..1a9a58588f84
--- /dev/null
+++ b/drivers/media/platform/vsp1/vsp1_dl.c
@@ -0,0 +1,305 @@
+/*
+ * vsp1_dl.h -- R-Car VSP1 Display List
+ *
+ * Copyright (C) 2015 Renesas Corporation
+ *
+ * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/gfp.h>
+#include <linux/slab.h>
+
+#include "vsp1.h"
+#include "vsp1_dl.h"
+#include "vsp1_pipe.h"
+
+/*
+ * Global resources
+ *
+ * - Display-related interrupts (can be used for vblank evasion ?)
+ * - Display-list enable
+ * - Header-less for WPF0
+ * - DL swap
+ */
+
+#define VSP1_DL_BODY_SIZE (2 * 4 * 256)
+#define VSP1_DL_NUM_LISTS 3
+
+struct vsp1_dl_entry {
+ u32 addr;
+ u32 data;
+} __attribute__((__packed__));
+
+struct vsp1_dl_list {
+ size_t size;
+ int reg_count;
+
+ bool in_use;
+
+ struct vsp1_dl_entry *body;
+ dma_addr_t dma;
+};
+
+/**
+ * struct vsp1_dl - Display List manager
+ * @vsp1: the VSP1 device
+ * @lock: protects the active, queued and pending lists
+ * @lists.all: array of all allocate display lists
+ * @lists.active: list currently being processed (loaded) by hardware
+ * @lists.queued: list queued to the hardware (written to the DL registers)
+ * @lists.pending: list waiting to be queued to the hardware
+ * @lists.write: list being written to by software
+ */
+struct vsp1_dl {
+ struct vsp1_device *vsp1;
+
+ spinlock_t lock;
+
+ size_t size;
+ dma_addr_t dma;
+ void *mem;
+
+ struct {
+ struct vsp1_dl_list all[VSP1_DL_NUM_LISTS];
+
+ struct vsp1_dl_list *active;
+ struct vsp1_dl_list *queued;
+ struct vsp1_dl_list *pending;
+ struct vsp1_dl_list *write;
+ } lists;
+};
+
+/* -----------------------------------------------------------------------------
+ * Display List Transaction Management
+ */
+
+static void vsp1_dl_free_list(struct vsp1_dl_list *list)
+{
+ if (!list)
+ return;
+
+ list->in_use = false;
+}
+
+void vsp1_dl_reset(struct vsp1_dl *dl)
+{
+ unsigned int i;
+
+ dl->lists.active = NULL;
+ dl->lists.queued = NULL;
+ dl->lists.pending = NULL;
+ dl->lists.write = NULL;
+
+ for (i = 0; i < ARRAY_SIZE(dl->lists.all); ++i)
+ dl->lists.all[i].in_use = false;
+}
+
+void vsp1_dl_begin(struct vsp1_dl *dl)
+{
+ struct vsp1_dl_list *list = NULL;
+ unsigned long flags;
+ unsigned int i;
+
+ spin_lock_irqsave(&dl->lock, flags);
+
+ for (i = 0; i < ARRAY_SIZE(dl->lists.all); ++i) {
+ if (!dl->lists.all[i].in_use) {
+ list = &dl->lists.all[i];
+ break;
+ }
+ }
+
+ if (!list) {
+ list = dl->lists.pending;
+ dl->lists.pending = NULL;
+ }
+
+ spin_unlock_irqrestore(&dl->lock, flags);
+
+ dl->lists.write = list;
+
+ list->in_use = true;
+ list->reg_count = 0;
+}
+
+void vsp1_dl_add(struct vsp1_entity *e, u32 reg, u32 data)
+{
+ struct vsp1_pipeline *pipe = to_vsp1_pipeline(&e->subdev.entity);
+ struct vsp1_dl *dl = pipe->dl;
+ struct vsp1_dl_list *list = dl->lists.write;
+
+ list->body[list->reg_count].addr = reg;
+ list->body[list->reg_count].data = data;
+ list->reg_count++;
+}
+
+void vsp1_dl_commit(struct vsp1_dl *dl)
+{
+ struct vsp1_device *vsp1 = dl->vsp1;
+ struct vsp1_dl_list *list;
+ unsigned long flags;
+ bool update;
+
+ list = dl->lists.write;
+ dl->lists.write = NULL;
+
+ spin_lock_irqsave(&dl->lock, flags);
+
+ /* Once the UPD bit has been set the hardware can start processing the
+ * display list at any time and we can't touch the address and size
+ * registers. In that case mark the update as pending, it will be
+ * queued up to the hardware by the frame end interrupt handler.
+ */
+ update = !!(vsp1_read(vsp1, VI6_DL_BODY_SIZE) & VI6_DL_BODY_SIZE_UPD);
+ if (update) {
+ vsp1_dl_free_list(dl->lists.pending);
+ dl->lists.pending = list;
+ goto done;
+ }
+
+ /* Program the hardware with the display list body address and size.
+ * The UPD bit will be cleared by the device when the display list is
+ * processed.
+ */
+ vsp1_write(vsp1, VI6_DL_HDR_ADDR(0), list->dma);
+ vsp1_write(vsp1, VI6_DL_BODY_SIZE, VI6_DL_BODY_SIZE_UPD |
+ (list->reg_count * 8));
+
+ vsp1_dl_free_list(dl->lists.queued);
+ dl->lists.queued = list;
+
+done:
+ spin_unlock_irqrestore(&dl->lock, flags);
+}
+
+/* -----------------------------------------------------------------------------
+ * Interrupt Handling
+ */
+
+void vsp1_dl_irq_display_start(struct vsp1_dl *dl)
+{
+ spin_lock(&dl->lock);
+
+ /* The display start interrupt signals the end of the display list
+ * processing by the device. The active display list, if any, won't be
+ * accessed anymore and can be reused.
+ */
+ if (dl->lists.active) {
+ vsp1_dl_free_list(dl->lists.active);
+ dl->lists.active = NULL;
+ }
+
+ spin_unlock(&dl->lock);
+}
+
+void vsp1_dl_irq_frame_end(struct vsp1_dl *dl)
+{
+ struct vsp1_device *vsp1 = dl->vsp1;
+
+ spin_lock(&dl->lock);
+
+ /* The UPD bit set indicates that the commit operation raced with the
+ * interrupt and occurred after the frame end event and UPD clear but
+ * before interrupt processing. The hardware hasn't taken the update
+ * into account yet, we'll thus skip one frame and retry.
+ */
+ if (vsp1_read(vsp1, VI6_DL_BODY_SIZE) & VI6_DL_BODY_SIZE_UPD)
+ goto done;
+
+ /* The device starts processing the queued display list right after the
+ * frame end interrupt. The display list thus becomes active.
+ */
+ if (dl->lists.queued) {
+ WARN_ON(dl->lists.active);
+ dl->lists.active = dl->lists.queued;
+ dl->lists.queued = NULL;
+ }
+
+ /* Now that the UPD bit has been cleared we can queue the next display
+ * list to the hardware if one has been prepared.
+ */
+ if (dl->lists.pending) {
+ struct vsp1_dl_list *list = dl->lists.pending;
+
+ vsp1_write(vsp1, VI6_DL_HDR_ADDR(0), list->dma);
+ vsp1_write(vsp1, VI6_DL_BODY_SIZE, VI6_DL_BODY_SIZE_UPD |
+ (list->reg_count * 8));
+
+ dl->lists.queued = list;
+ dl->lists.pending = NULL;
+ }
+
+done:
+ spin_unlock(&dl->lock);
+}
+
+/* -----------------------------------------------------------------------------
+ * Hardware Setup
+ */
+
+void vsp1_dl_setup(struct vsp1_device *vsp1)
+{
+ u32 ctrl = (256 << VI6_DL_CTRL_AR_WAIT_SHIFT)
+ | VI6_DL_CTRL_DC2 | VI6_DL_CTRL_DC1 | VI6_DL_CTRL_DC0
+ | VI6_DL_CTRL_DLE;
+
+ /* The DRM pipeline operates with header-less display lists in
+ * Continuous Frame Mode.
+ */
+ if (vsp1->drm)
+ ctrl |= VI6_DL_CTRL_CFM0 | VI6_DL_CTRL_NH0;
+
+ vsp1_write(vsp1, VI6_DL_CTRL, ctrl);
+ vsp1_write(vsp1, VI6_DL_SWAP, VI6_DL_SWAP_LWS);
+}
+
+/* -----------------------------------------------------------------------------
+ * Initialization and Cleanup
+ */
+
+struct vsp1_dl *vsp1_dl_create(struct vsp1_device *vsp1)
+{
+ struct vsp1_dl *dl;
+ unsigned int i;
+
+ dl = kzalloc(sizeof(*dl), GFP_KERNEL);
+ if (!dl)
+ return NULL;
+
+ spin_lock_init(&dl->lock);
+
+ dl->vsp1 = vsp1;
+ dl->size = VSP1_DL_BODY_SIZE * ARRAY_SIZE(dl->lists.all);
+
+ dl->mem = dma_alloc_wc(vsp1->dev, dl->size, &dl->dma,
+ GFP_KERNEL);
+ if (!dl->mem) {
+ kfree(dl);
+ return NULL;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(dl->lists.all); ++i) {
+ struct vsp1_dl_list *list = &dl->lists.all[i];
+
+ list->size = VSP1_DL_BODY_SIZE;
+ list->reg_count = 0;
+ list->in_use = false;
+ list->dma = dl->dma + VSP1_DL_BODY_SIZE * i;
+ list->body = dl->mem + VSP1_DL_BODY_SIZE * i;
+ }
+
+ return dl;
+}
+
+void vsp1_dl_destroy(struct vsp1_dl *dl)
+{
+ dma_free_wc(dl->vsp1->dev, dl->size, dl->mem, dl->dma);
+ kfree(dl);
+}
diff --git a/drivers/media/platform/vsp1/vsp1_dl.h b/drivers/media/platform/vsp1/vsp1_dl.h
new file mode 100644
index 000000000000..448c4250e54c
--- /dev/null
+++ b/drivers/media/platform/vsp1/vsp1_dl.h
@@ -0,0 +1,42 @@
+/*
+ * vsp1_dl.h -- R-Car VSP1 Display List
+ *
+ * Copyright (C) 2015 Renesas Corporation
+ *
+ * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+#ifndef __VSP1_DL_H__
+#define __VSP1_DL_H__
+
+#include "vsp1_entity.h"
+
+struct vsp1_device;
+struct vsp1_dl;
+
+struct vsp1_dl *vsp1_dl_create(struct vsp1_device *vsp1);
+void vsp1_dl_destroy(struct vsp1_dl *dl);
+
+void vsp1_dl_setup(struct vsp1_device *vsp1);
+
+void vsp1_dl_reset(struct vsp1_dl *dl);
+void vsp1_dl_begin(struct vsp1_dl *dl);
+void vsp1_dl_add(struct vsp1_entity *e, u32 reg, u32 data);
+void vsp1_dl_commit(struct vsp1_dl *dl);
+
+void vsp1_dl_irq_display_start(struct vsp1_dl *dl);
+void vsp1_dl_irq_frame_end(struct vsp1_dl *dl);
+
+static inline void vsp1_dl_mod_write(struct vsp1_entity *e, u32 reg, u32 data)
+{
+ if (e->vsp1->use_dl)
+ vsp1_dl_add(e, reg, data);
+ else
+ vsp1_write(e->vsp1, reg, data);
+}
+
+#endif /* __VSP1_DL_H__ */
diff --git a/drivers/media/platform/vsp1/vsp1_drm.c b/drivers/media/platform/vsp1/vsp1_drm.c
new file mode 100644
index 000000000000..021fe5778cd1
--- /dev/null
+++ b/drivers/media/platform/vsp1/vsp1_drm.c
@@ -0,0 +1,597 @@
+/*
+ * vsp1_drm.c -- R-Car VSP1 DRM API
+ *
+ * Copyright (C) 2015 Renesas Electronics Corporation
+ *
+ * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/vsp1.h>
+
+#include <media/media-entity.h>
+#include <media/v4l2-subdev.h>
+
+#include "vsp1.h"
+#include "vsp1_bru.h"
+#include "vsp1_dl.h"
+#include "vsp1_drm.h"
+#include "vsp1_lif.h"
+#include "vsp1_pipe.h"
+#include "vsp1_rwpf.h"
+
+/* -----------------------------------------------------------------------------
+ * Runtime Handling
+ */
+
+static void vsp1_drm_pipeline_frame_end(struct vsp1_pipeline *pipe)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&pipe->irqlock, flags);
+ if (pipe->num_inputs)
+ vsp1_pipeline_run(pipe);
+ spin_unlock_irqrestore(&pipe->irqlock, flags);
+}
+
+/* -----------------------------------------------------------------------------
+ * DU Driver API
+ */
+
+int vsp1_du_init(struct device *dev)
+{
+ struct vsp1_device *vsp1 = dev_get_drvdata(dev);
+
+ if (!vsp1)
+ return -EPROBE_DEFER;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(vsp1_du_init);
+
+/**
+ * vsp1_du_setup_lif - Setup the output part of the VSP pipeline
+ * @dev: the VSP device
+ * @width: output frame width in pixels
+ * @height: output frame height in pixels
+ *
+ * Configure the output part of VSP DRM pipeline for the given frame @width and
+ * @height. This sets up formats on the BRU source pad, the WPF0 sink and source
+ * pads, and the LIF sink pad.
+ *
+ * As the media bus code on the BRU source pad is conditioned by the
+ * configuration of the BRU sink 0 pad, we also set up the formats on all BRU
+ * sinks, even if the configuration will be overwritten later by
+ * vsp1_du_setup_rpf(). This ensures that the BRU configuration is set to a well
+ * defined state.
+ *
+ * Return 0 on success or a negative error code on failure.
+ */
+int vsp1_du_setup_lif(struct device *dev, unsigned int width,
+ unsigned int height)
+{
+ struct vsp1_device *vsp1 = dev_get_drvdata(dev);
+ struct vsp1_pipeline *pipe = &vsp1->drm->pipe;
+ struct vsp1_bru *bru = vsp1->bru;
+ struct v4l2_subdev_format format;
+ unsigned int i;
+ int ret;
+
+ dev_dbg(vsp1->dev, "%s: configuring LIF with format %ux%u\n",
+ __func__, width, height);
+
+ if (width == 0 || height == 0) {
+ /* Zero width or height means the CRTC is being disabled, stop
+ * the pipeline and turn the light off.
+ */
+ ret = vsp1_pipeline_stop(pipe);
+ if (ret == -ETIMEDOUT)
+ dev_err(vsp1->dev, "DRM pipeline stop timeout\n");
+
+ media_entity_pipeline_stop(&pipe->output->entity.subdev.entity);
+
+ for (i = 0; i < bru->entity.source_pad; ++i) {
+ bru->inputs[i].rpf = NULL;
+ pipe->inputs[i] = NULL;
+ }
+
+ pipe->num_inputs = 0;
+
+ vsp1_device_put(vsp1);
+
+ dev_dbg(vsp1->dev, "%s: pipeline disabled\n", __func__);
+
+ return 0;
+ }
+
+ vsp1_dl_reset(vsp1->drm->dl);
+
+ /* Configure the format at the BRU sinks and propagate it through the
+ * pipeline.
+ */
+ memset(&format, 0, sizeof(format));
+ format.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+
+ for (i = 0; i < bru->entity.source_pad; ++i) {
+ format.pad = i;
+
+ format.format.width = width;
+ format.format.height = height;
+ format.format.code = MEDIA_BUS_FMT_ARGB8888_1X32;
+ format.format.field = V4L2_FIELD_NONE;
+
+ ret = v4l2_subdev_call(&bru->entity.subdev, pad,
+ set_fmt, NULL, &format);
+ if (ret < 0)
+ return ret;
+
+ dev_dbg(vsp1->dev, "%s: set format %ux%u (%x) on BRU pad %u\n",
+ __func__, format.format.width, format.format.height,
+ format.format.code, i);
+ }
+
+ format.pad = bru->entity.source_pad;
+ format.format.width = width;
+ format.format.height = height;
+ format.format.code = MEDIA_BUS_FMT_ARGB8888_1X32;
+ format.format.field = V4L2_FIELD_NONE;
+
+ ret = v4l2_subdev_call(&bru->entity.subdev, pad, set_fmt, NULL,
+ &format);
+ if (ret < 0)
+ return ret;
+
+ dev_dbg(vsp1->dev, "%s: set format %ux%u (%x) on BRU pad %u\n",
+ __func__, format.format.width, format.format.height,
+ format.format.code, i);
+
+ format.pad = RWPF_PAD_SINK;
+ ret = v4l2_subdev_call(&vsp1->wpf[0]->entity.subdev, pad, set_fmt, NULL,
+ &format);
+ if (ret < 0)
+ return ret;
+
+ dev_dbg(vsp1->dev, "%s: set format %ux%u (%x) on WPF0 sink\n",
+ __func__, format.format.width, format.format.height,
+ format.format.code);
+
+ format.pad = RWPF_PAD_SOURCE;
+ ret = v4l2_subdev_call(&vsp1->wpf[0]->entity.subdev, pad, get_fmt, NULL,
+ &format);
+ if (ret < 0)
+ return ret;
+
+ dev_dbg(vsp1->dev, "%s: got format %ux%u (%x) on WPF0 source\n",
+ __func__, format.format.width, format.format.height,
+ format.format.code);
+
+ format.pad = LIF_PAD_SINK;
+ ret = v4l2_subdev_call(&vsp1->lif->entity.subdev, pad, set_fmt, NULL,
+ &format);
+ if (ret < 0)
+ return ret;
+
+ dev_dbg(vsp1->dev, "%s: set format %ux%u (%x) on LIF sink\n",
+ __func__, format.format.width, format.format.height,
+ format.format.code);
+
+ /* Verify that the format at the output of the pipeline matches the
+ * requested frame size and media bus code.
+ */
+ if (format.format.width != width || format.format.height != height ||
+ format.format.code != MEDIA_BUS_FMT_ARGB8888_1X32) {
+ dev_dbg(vsp1->dev, "%s: format mismatch\n", __func__);
+ return -EPIPE;
+ }
+
+ /* Mark the pipeline as streaming and enable the VSP1. This will store
+ * the pipeline pointer in all entities, which the s_stream handlers
+ * will need. We don't start the entities themselves right at this point
+ * as there's no plane configured yet, so we can't start processing
+ * buffers.
+ */
+ ret = vsp1_device_get(vsp1);
+ if (ret < 0)
+ return ret;
+
+ ret = media_entity_pipeline_start(&pipe->output->entity.subdev.entity,
+ &pipe->pipe);
+ if (ret < 0) {
+ dev_dbg(vsp1->dev, "%s: pipeline start failed\n", __func__);
+ vsp1_device_put(vsp1);
+ return ret;
+ }
+
+ dev_dbg(vsp1->dev, "%s: pipeline enabled\n", __func__);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(vsp1_du_setup_lif);
+
+/**
+ * vsp1_du_atomic_begin - Prepare for an atomic update
+ * @dev: the VSP device
+ */
+void vsp1_du_atomic_begin(struct device *dev)
+{
+ struct vsp1_device *vsp1 = dev_get_drvdata(dev);
+ struct vsp1_pipeline *pipe = &vsp1->drm->pipe;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pipe->irqlock, flags);
+
+ vsp1->drm->num_inputs = pipe->num_inputs;
+
+ spin_unlock_irqrestore(&pipe->irqlock, flags);
+
+ /* Prepare the display list. */
+ vsp1_dl_begin(vsp1->drm->dl);
+}
+EXPORT_SYMBOL_GPL(vsp1_du_atomic_begin);
+
+/**
+ * vsp1_du_atomic_update - Setup one RPF input of the VSP pipeline
+ * @dev: the VSP device
+ * @rpf_index: index of the RPF to setup (0-based)
+ * @pixelformat: V4L2 pixel format for the RPF memory input
+ * @pitch: number of bytes per line in the image stored in memory
+ * @mem: DMA addresses of the memory buffers (one per plane)
+ * @src: the source crop rectangle for the RPF
+ * @dst: the destination compose rectangle for the BRU input
+ *
+ * Configure the VSP to perform composition of the image referenced by @mem
+ * through RPF @rpf_index, using the @src crop rectangle and the @dst
+ * composition rectangle. The Z-order is fixed with RPF 0 at the bottom.
+ *
+ * Image format as stored in memory is expressed as a V4L2 @pixelformat value.
+ * As a special case, setting the pixel format to 0 will disable the RPF. The
+ * @pitch, @mem, @src and @dst parameters are ignored in that case. Calling the
+ * function on a disabled RPF is allowed.
+ *
+ * The memory pitch is configurable to allow for padding at end of lines, or
+ * simple for images that extend beyond the crop rectangle boundaries. The
+ * @pitch value is expressed in bytes and applies to all planes for multiplanar
+ * formats.
+ *
+ * The source memory buffer is referenced by the DMA address of its planes in
+ * the @mem array. Up to two planes are supported. The second plane DMA address
+ * is ignored for formats using a single plane.
+ *
+ * This function isn't reentrant, the caller needs to serialize calls.
+ *
+ * TODO: Implement Z-order control by decoupling the RPF index from the BRU
+ * input index.
+ *
+ * Return 0 on success or a negative error code on failure.
+ */
+int vsp1_du_atomic_update(struct device *dev, unsigned int rpf_index,
+ u32 pixelformat, unsigned int pitch,
+ dma_addr_t mem[2], const struct v4l2_rect *src,
+ const struct v4l2_rect *dst)
+{
+ struct vsp1_device *vsp1 = dev_get_drvdata(dev);
+ struct vsp1_pipeline *pipe = &vsp1->drm->pipe;
+ const struct vsp1_format_info *fmtinfo;
+ struct v4l2_subdev_selection sel;
+ struct v4l2_subdev_format format;
+ struct vsp1_rwpf_memory memory;
+ struct vsp1_rwpf *rpf;
+ unsigned long flags;
+ int ret;
+
+ if (rpf_index >= vsp1->info->rpf_count)
+ return -EINVAL;
+
+ rpf = vsp1->rpf[rpf_index];
+
+ if (pixelformat == 0) {
+ dev_dbg(vsp1->dev, "%s: RPF%u: disable requested\n", __func__,
+ rpf_index);
+
+ spin_lock_irqsave(&pipe->irqlock, flags);
+
+ if (pipe->inputs[rpf_index]) {
+ /* Remove the RPF from the pipeline if it was previously
+ * enabled.
+ */
+ vsp1->bru->inputs[rpf_index].rpf = NULL;
+ pipe->inputs[rpf_index] = NULL;
+
+ pipe->num_inputs--;
+ }
+
+ spin_unlock_irqrestore(&pipe->irqlock, flags);
+
+ return 0;
+ }
+
+ dev_dbg(vsp1->dev,
+ "%s: RPF%u: (%u,%u)/%ux%u -> (%u,%u)/%ux%u (%08x), pitch %u dma { %pad, %pad }\n",
+ __func__, rpf_index,
+ src->left, src->top, src->width, src->height,
+ dst->left, dst->top, dst->width, dst->height,
+ pixelformat, pitch, &mem[0], &mem[1]);
+
+ /* Set the stride at the RPF input. */
+ fmtinfo = vsp1_get_format_info(pixelformat);
+ if (!fmtinfo) {
+ dev_dbg(vsp1->dev, "Unsupport pixel format %08x for RPF\n",
+ pixelformat);
+ return -EINVAL;
+ }
+
+ rpf->fmtinfo = fmtinfo;
+ rpf->format.num_planes = fmtinfo->planes;
+ rpf->format.plane_fmt[0].bytesperline = pitch;
+ rpf->format.plane_fmt[1].bytesperline = pitch;
+
+ /* Configure the format on the RPF sink pad and propagate it up to the
+ * BRU sink pad.
+ */
+ memset(&format, 0, sizeof(format));
+ format.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+ format.pad = RWPF_PAD_SINK;
+ format.format.width = src->width + src->left;
+ format.format.height = src->height + src->top;
+ format.format.code = fmtinfo->mbus;
+ format.format.field = V4L2_FIELD_NONE;
+
+ ret = v4l2_subdev_call(&rpf->entity.subdev, pad, set_fmt, NULL,
+ &format);
+ if (ret < 0)
+ return ret;
+
+ dev_dbg(vsp1->dev,
+ "%s: set format %ux%u (%x) on RPF%u sink\n",
+ __func__, format.format.width, format.format.height,
+ format.format.code, rpf->entity.index);
+
+ memset(&sel, 0, sizeof(sel));
+ sel.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+ sel.pad = RWPF_PAD_SINK;
+ sel.target = V4L2_SEL_TGT_CROP;
+ sel.r = *src;
+
+ ret = v4l2_subdev_call(&rpf->entity.subdev, pad, set_selection, NULL,
+ &sel);
+ if (ret < 0)
+ return ret;
+
+ dev_dbg(vsp1->dev,
+ "%s: set selection (%u,%u)/%ux%u on RPF%u sink\n",
+ __func__, sel.r.left, sel.r.top, sel.r.width, sel.r.height,
+ rpf->entity.index);
+
+ /* RPF source, hardcode the format to ARGB8888 to turn on format
+ * conversion if needed.
+ */
+ format.pad = RWPF_PAD_SOURCE;
+
+ ret = v4l2_subdev_call(&rpf->entity.subdev, pad, get_fmt, NULL,
+ &format);
+ if (ret < 0)
+ return ret;
+
+ dev_dbg(vsp1->dev,
+ "%s: got format %ux%u (%x) on RPF%u source\n",
+ __func__, format.format.width, format.format.height,
+ format.format.code, rpf->entity.index);
+
+ format.format.code = MEDIA_BUS_FMT_ARGB8888_1X32;
+
+ ret = v4l2_subdev_call(&rpf->entity.subdev, pad, set_fmt, NULL,
+ &format);
+ if (ret < 0)
+ return ret;
+
+ /* BRU sink, propagate the format from the RPF source. */
+ format.pad = rpf->entity.index;
+
+ ret = v4l2_subdev_call(&vsp1->bru->entity.subdev, pad, set_fmt, NULL,
+ &format);
+ if (ret < 0)
+ return ret;
+
+ dev_dbg(vsp1->dev, "%s: set format %ux%u (%x) on BRU pad %u\n",
+ __func__, format.format.width, format.format.height,
+ format.format.code, format.pad);
+
+ sel.pad = rpf->entity.index;
+ sel.target = V4L2_SEL_TGT_COMPOSE;
+ sel.r = *dst;
+
+ ret = v4l2_subdev_call(&vsp1->bru->entity.subdev, pad, set_selection,
+ NULL, &sel);
+ if (ret < 0)
+ return ret;
+
+ dev_dbg(vsp1->dev,
+ "%s: set selection (%u,%u)/%ux%u on BRU pad %u\n",
+ __func__, sel.r.left, sel.r.top, sel.r.width, sel.r.height,
+ sel.pad);
+
+ /* Store the compose rectangle coordinates in the RPF. */
+ rpf->location.left = dst->left;
+ rpf->location.top = dst->top;
+
+ /* Set the memory buffer address. */
+ memory.num_planes = fmtinfo->planes;
+ memory.addr[0] = mem[0];
+ memory.addr[1] = mem[1];
+
+ rpf->ops->set_memory(rpf, &memory);
+
+ spin_lock_irqsave(&pipe->irqlock, flags);
+
+ /* If the RPF was previously stopped set the BRU input to the RPF and
+ * store the RPF in the pipeline inputs array.
+ */
+ if (!pipe->inputs[rpf->entity.index]) {
+ vsp1->bru->inputs[rpf_index].rpf = rpf;
+ pipe->inputs[rpf->entity.index] = rpf;
+ pipe->num_inputs++;
+ }
+
+ spin_unlock_irqrestore(&pipe->irqlock, flags);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(vsp1_du_atomic_update);
+
+/**
+ * vsp1_du_atomic_flush - Commit an atomic update
+ * @dev: the VSP device
+ */
+void vsp1_du_atomic_flush(struct device *dev)
+{
+ struct vsp1_device *vsp1 = dev_get_drvdata(dev);
+ struct vsp1_pipeline *pipe = &vsp1->drm->pipe;
+ struct vsp1_entity *entity;
+ unsigned long flags;
+ bool stop = false;
+ int ret;
+
+ list_for_each_entry(entity, &pipe->entities, list_pipe) {
+ /* Disconnect unused RPFs from the pipeline. */
+ if (entity->type == VSP1_ENTITY_RPF) {
+ struct vsp1_rwpf *rpf = to_rwpf(&entity->subdev);
+
+ if (!pipe->inputs[rpf->entity.index]) {
+ vsp1_mod_write(entity, entity->route->reg,
+ VI6_DPR_NODE_UNUSED);
+ continue;
+ }
+ }
+
+ vsp1_entity_route_setup(entity);
+
+ ret = v4l2_subdev_call(&entity->subdev, video,
+ s_stream, 1);
+ if (ret < 0) {
+ dev_err(vsp1->dev,
+ "DRM pipeline start failure on entity %s\n",
+ entity->subdev.name);
+ return;
+ }
+ }
+
+ vsp1_dl_commit(vsp1->drm->dl);
+
+ spin_lock_irqsave(&pipe->irqlock, flags);
+
+ /* Start or stop the pipeline if needed. */
+ if (!vsp1->drm->num_inputs && pipe->num_inputs) {
+ vsp1_write(vsp1, VI6_DISP_IRQ_STA, 0);
+ vsp1_write(vsp1, VI6_DISP_IRQ_ENB, VI6_DISP_IRQ_ENB_DSTE);
+ vsp1_pipeline_run(pipe);
+ } else if (vsp1->drm->num_inputs && !pipe->num_inputs) {
+ stop = true;
+ }
+
+ spin_unlock_irqrestore(&pipe->irqlock, flags);
+
+ if (stop) {
+ vsp1_write(vsp1, VI6_DISP_IRQ_ENB, 0);
+ vsp1_pipeline_stop(pipe);
+ }
+}
+EXPORT_SYMBOL_GPL(vsp1_du_atomic_flush);
+
+/* -----------------------------------------------------------------------------
+ * Initialization
+ */
+
+int vsp1_drm_create_links(struct vsp1_device *vsp1)
+{
+ const u32 flags = MEDIA_LNK_FL_ENABLED | MEDIA_LNK_FL_IMMUTABLE;
+ unsigned int i;
+ int ret;
+
+ /* VSPD instances require a BRU to perform composition and a LIF to
+ * output to the DU.
+ */
+ if (!vsp1->bru || !vsp1->lif)
+ return -ENXIO;
+
+ for (i = 0; i < vsp1->info->rpf_count; ++i) {
+ struct vsp1_rwpf *rpf = vsp1->rpf[i];
+
+ ret = media_create_pad_link(&rpf->entity.subdev.entity,
+ RWPF_PAD_SOURCE,
+ &vsp1->bru->entity.subdev.entity,
+ i, flags);
+ if (ret < 0)
+ return ret;
+
+ rpf->entity.sink = &vsp1->bru->entity.subdev.entity;
+ rpf->entity.sink_pad = i;
+ }
+
+ ret = media_create_pad_link(&vsp1->bru->entity.subdev.entity,
+ vsp1->bru->entity.source_pad,
+ &vsp1->wpf[0]->entity.subdev.entity,
+ RWPF_PAD_SINK, flags);
+ if (ret < 0)
+ return ret;
+
+ vsp1->bru->entity.sink = &vsp1->wpf[0]->entity.subdev.entity;
+ vsp1->bru->entity.sink_pad = RWPF_PAD_SINK;
+
+ ret = media_create_pad_link(&vsp1->wpf[0]->entity.subdev.entity,
+ RWPF_PAD_SOURCE,
+ &vsp1->lif->entity.subdev.entity,
+ LIF_PAD_SINK, flags);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+int vsp1_drm_init(struct vsp1_device *vsp1)
+{
+ struct vsp1_pipeline *pipe;
+ unsigned int i;
+
+ vsp1->drm = devm_kzalloc(vsp1->dev, sizeof(*vsp1->drm), GFP_KERNEL);
+ if (!vsp1->drm)
+ return -ENOMEM;
+
+ vsp1->drm->dl = vsp1_dl_create(vsp1);
+ if (!vsp1->drm->dl)
+ return -ENOMEM;
+
+ pipe = &vsp1->drm->pipe;
+
+ vsp1_pipeline_init(pipe);
+ pipe->frame_end = vsp1_drm_pipeline_frame_end;
+
+ /* The DRM pipeline is static, add entities manually. */
+ for (i = 0; i < vsp1->info->rpf_count; ++i) {
+ struct vsp1_rwpf *input = vsp1->rpf[i];
+
+ list_add_tail(&input->entity.list_pipe, &pipe->entities);
+ }
+
+ list_add_tail(&vsp1->bru->entity.list_pipe, &pipe->entities);
+ list_add_tail(&vsp1->wpf[0]->entity.list_pipe, &pipe->entities);
+ list_add_tail(&vsp1->lif->entity.list_pipe, &pipe->entities);
+
+ pipe->bru = &vsp1->bru->entity;
+ pipe->lif = &vsp1->lif->entity;
+ pipe->output = vsp1->wpf[0];
+
+ pipe->dl = vsp1->drm->dl;
+
+ return 0;
+}
+
+void vsp1_drm_cleanup(struct vsp1_device *vsp1)
+{
+ vsp1_dl_destroy(vsp1->drm->dl);
+}
diff --git a/drivers/media/platform/vsp1/vsp1_drm.h b/drivers/media/platform/vsp1/vsp1_drm.h
new file mode 100644
index 000000000000..f68056838319
--- /dev/null
+++ b/drivers/media/platform/vsp1/vsp1_drm.h
@@ -0,0 +1,49 @@
+/*
+ * vsp1_drm.h -- R-Car VSP1 DRM/KMS Interface
+ *
+ * Copyright (C) 2015 Renesas Electronics Corporation
+ *
+ * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+#ifndef __VSP1_DRM_H__
+#define __VSP1_DRM_H__
+
+#include "vsp1_pipe.h"
+
+struct vsp1_dl;
+
+/**
+ * vsp1_drm - State for the API exposed to the DRM driver
+ * @dl: display list for DRM pipeline operation
+ * @pipe: the VSP1 pipeline used for display
+ * @num_inputs: number of active pipeline inputs at the beginning of an update
+ * @update: the pipeline configuration has been updated
+ */
+struct vsp1_drm {
+ struct vsp1_dl *dl;
+ struct vsp1_pipeline pipe;
+ unsigned int num_inputs;
+ bool update;
+};
+
+int vsp1_drm_init(struct vsp1_device *vsp1);
+void vsp1_drm_cleanup(struct vsp1_device *vsp1);
+int vsp1_drm_create_links(struct vsp1_device *vsp1);
+
+int vsp1_du_init(struct device *dev);
+int vsp1_du_setup_lif(struct device *dev, unsigned int width,
+ unsigned int height);
+void vsp1_du_atomic_begin(struct device *dev);
+int vsp1_du_atomic_update(struct device *dev, unsigned int rpf_index,
+ u32 pixelformat, unsigned int pitch,
+ dma_addr_t mem[2], const struct v4l2_rect *src,
+ const struct v4l2_rect *dst);
+void vsp1_du_atomic_flush(struct device *dev);
+
+
+#endif /* __VSP1_DRM_H__ */
diff --git a/drivers/media/platform/vsp1/vsp1_drv.c b/drivers/media/platform/vsp1/vsp1_drv.c
index 533bc796391e..25750a0e4631 100644
--- a/drivers/media/platform/vsp1/vsp1_drv.c
+++ b/drivers/media/platform/vsp1/vsp1_drv.c
@@ -17,17 +17,23 @@
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/videodev2.h>
+#include <media/v4l2-subdev.h>
+
#include "vsp1.h"
#include "vsp1_bru.h"
+#include "vsp1_dl.h"
+#include "vsp1_drm.h"
#include "vsp1_hsit.h"
#include "vsp1_lif.h"
#include "vsp1_lut.h"
#include "vsp1_rwpf.h"
#include "vsp1_sru.h"
#include "vsp1_uds.h"
+#include "vsp1_video.h"
/* -----------------------------------------------------------------------------
* Interrupt Handling
@@ -39,11 +45,11 @@ static irqreturn_t vsp1_irq_handler(int irq, void *data)
struct vsp1_device *vsp1 = data;
irqreturn_t ret = IRQ_NONE;
unsigned int i;
+ u32 status;
- for (i = 0; i < vsp1->pdata.wpf_count; ++i) {
+ for (i = 0; i < vsp1->info->wpf_count; ++i) {
struct vsp1_rwpf *wpf = vsp1->wpf[i];
struct vsp1_pipeline *pipe;
- u32 status;
if (wpf == NULL)
continue;
@@ -58,6 +64,21 @@ static irqreturn_t vsp1_irq_handler(int irq, void *data)
}
}
+ status = vsp1_read(vsp1, VI6_DISP_IRQ_STA);
+ vsp1_write(vsp1, VI6_DISP_IRQ_STA, ~status & VI6_DISP_IRQ_STA_DST);
+
+ if (status & VI6_DISP_IRQ_STA_DST) {
+ struct vsp1_rwpf *wpf = vsp1->wpf[0];
+ struct vsp1_pipeline *pipe;
+
+ if (wpf) {
+ pipe = to_vsp1_pipeline(&wpf->entity.subdev.entity);
+ vsp1_pipeline_display_start(pipe);
+ }
+
+ ret = IRQ_HANDLED;
+ }
+
return ret;
}
@@ -66,7 +87,7 @@ static irqreturn_t vsp1_irq_handler(int irq, void *data)
*/
/*
- * vsp1_create_links - Create links from all sources to the given sink
+ * vsp1_create_sink_links - Create links from all sources to the given sink
*
* This function creates media links from all valid sources to the given sink
* pad. Links that would be invalid according to the VSP1 hardware capabilities
@@ -75,7 +96,8 @@ static irqreturn_t vsp1_irq_handler(int irq, void *data)
* - from a UDS to a UDS (UDS entities can't be chained)
* - from an entity to itself (no loops are allowed)
*/
-static int vsp1_create_links(struct vsp1_device *vsp1, struct vsp1_entity *sink)
+static int vsp1_create_sink_links(struct vsp1_device *vsp1,
+ struct vsp1_entity *sink)
{
struct media_entity *entity = &sink->subdev.entity;
struct vsp1_entity *source;
@@ -115,19 +137,86 @@ static int vsp1_create_links(struct vsp1_device *vsp1, struct vsp1_entity *sink)
return 0;
}
-static void vsp1_destroy_entities(struct vsp1_device *vsp1)
+static int vsp1_uapi_create_links(struct vsp1_device *vsp1)
{
struct vsp1_entity *entity;
- struct vsp1_entity *next;
+ unsigned int i;
+ int ret;
+
+ list_for_each_entry(entity, &vsp1->entities, list_dev) {
+ if (entity->type == VSP1_ENTITY_LIF ||
+ entity->type == VSP1_ENTITY_RPF)
+ continue;
+
+ ret = vsp1_create_sink_links(vsp1, entity);
+ if (ret < 0)
+ return ret;
+ }
+
+ if (vsp1->info->features & VSP1_HAS_LIF) {
+ ret = media_create_pad_link(&vsp1->wpf[0]->entity.subdev.entity,
+ RWPF_PAD_SOURCE,
+ &vsp1->lif->entity.subdev.entity,
+ LIF_PAD_SINK, 0);
+ if (ret < 0)
+ return ret;
+ }
+
+ for (i = 0; i < vsp1->info->rpf_count; ++i) {
+ struct vsp1_rwpf *rpf = vsp1->rpf[i];
- list_for_each_entry_safe(entity, next, &vsp1->entities, list_dev) {
+ ret = media_create_pad_link(&rpf->video->video.entity, 0,
+ &rpf->entity.subdev.entity,
+ RWPF_PAD_SINK,
+ MEDIA_LNK_FL_ENABLED |
+ MEDIA_LNK_FL_IMMUTABLE);
+ if (ret < 0)
+ return ret;
+ }
+
+ for (i = 0; i < vsp1->info->wpf_count; ++i) {
+ /* Connect the video device to the WPF. All connections are
+ * immutable except for the WPF0 source link if a LIF is
+ * present.
+ */
+ struct vsp1_rwpf *wpf = vsp1->wpf[i];
+ unsigned int flags = MEDIA_LNK_FL_ENABLED;
+
+ if (!(vsp1->info->features & VSP1_HAS_LIF) || i != 0)
+ flags |= MEDIA_LNK_FL_IMMUTABLE;
+
+ ret = media_create_pad_link(&wpf->entity.subdev.entity,
+ RWPF_PAD_SOURCE,
+ &wpf->video->video.entity, 0,
+ flags);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static void vsp1_destroy_entities(struct vsp1_device *vsp1)
+{
+ struct vsp1_entity *entity, *_entity;
+ struct vsp1_video *video, *_video;
+
+ list_for_each_entry_safe(entity, _entity, &vsp1->entities, list_dev) {
list_del(&entity->list_dev);
vsp1_entity_destroy(entity);
}
+ list_for_each_entry_safe(video, _video, &vsp1->videos, list) {
+ list_del(&video->list);
+ vsp1_video_cleanup(video);
+ }
+
v4l2_device_unregister(&vsp1->v4l2_dev);
media_device_unregister(&vsp1->media_dev);
media_device_cleanup(&vsp1->media_dev);
+
+ if (!vsp1->info->uapi)
+ vsp1_drm_cleanup(vsp1);
}
static int vsp1_create_entities(struct vsp1_device *vsp1)
@@ -144,6 +233,14 @@ static int vsp1_create_entities(struct vsp1_device *vsp1)
dev_name(mdev->dev));
media_device_init(mdev);
+ vsp1->media_ops.link_setup = vsp1_entity_link_setup;
+ /* Don't perform link validation when the userspace API is disabled as
+ * the pipeline is configured internally by the driver in that case, and
+ * its configuration can thus be trusted.
+ */
+ if (vsp1->info->uapi)
+ vsp1->media_ops.link_validate = v4l2_subdev_link_validate;
+
vdev->mdev = mdev;
ret = v4l2_device_register(vsp1->dev, vdev);
if (ret < 0) {
@@ -153,13 +250,15 @@ static int vsp1_create_entities(struct vsp1_device *vsp1)
}
/* Instantiate all the entities. */
- vsp1->bru = vsp1_bru_create(vsp1);
- if (IS_ERR(vsp1->bru)) {
- ret = PTR_ERR(vsp1->bru);
- goto done;
- }
+ if (vsp1->info->features & VSP1_HAS_BRU) {
+ vsp1->bru = vsp1_bru_create(vsp1);
+ if (IS_ERR(vsp1->bru)) {
+ ret = PTR_ERR(vsp1->bru);
+ goto done;
+ }
- list_add_tail(&vsp1->bru->entity.list_dev, &vsp1->entities);
+ list_add_tail(&vsp1->bru->entity.list_dev, &vsp1->entities);
+ }
vsp1->hsi = vsp1_hsit_create(vsp1, true);
if (IS_ERR(vsp1->hsi)) {
@@ -177,7 +276,7 @@ static int vsp1_create_entities(struct vsp1_device *vsp1)
list_add_tail(&vsp1->hst->entity.list_dev, &vsp1->entities);
- if (vsp1->pdata.features & VSP1_HAS_LIF) {
+ if (vsp1->info->features & VSP1_HAS_LIF) {
vsp1->lif = vsp1_lif_create(vsp1);
if (IS_ERR(vsp1->lif)) {
ret = PTR_ERR(vsp1->lif);
@@ -187,7 +286,7 @@ static int vsp1_create_entities(struct vsp1_device *vsp1)
list_add_tail(&vsp1->lif->entity.list_dev, &vsp1->entities);
}
- if (vsp1->pdata.features & VSP1_HAS_LUT) {
+ if (vsp1->info->features & VSP1_HAS_LUT) {
vsp1->lut = vsp1_lut_create(vsp1);
if (IS_ERR(vsp1->lut)) {
ret = PTR_ERR(vsp1->lut);
@@ -197,7 +296,7 @@ static int vsp1_create_entities(struct vsp1_device *vsp1)
list_add_tail(&vsp1->lut->entity.list_dev, &vsp1->entities);
}
- for (i = 0; i < vsp1->pdata.rpf_count; ++i) {
+ for (i = 0; i < vsp1->info->rpf_count; ++i) {
struct vsp1_rwpf *rpf;
rpf = vsp1_rpf_create(vsp1, i);
@@ -208,9 +307,20 @@ static int vsp1_create_entities(struct vsp1_device *vsp1)
vsp1->rpf[i] = rpf;
list_add_tail(&rpf->entity.list_dev, &vsp1->entities);
+
+ if (vsp1->info->uapi) {
+ struct vsp1_video *video = vsp1_video_create(vsp1, rpf);
+
+ if (IS_ERR(video)) {
+ ret = PTR_ERR(video);
+ goto done;
+ }
+
+ list_add_tail(&video->list, &vsp1->videos);
+ }
}
- if (vsp1->pdata.features & VSP1_HAS_SRU) {
+ if (vsp1->info->features & VSP1_HAS_SRU) {
vsp1->sru = vsp1_sru_create(vsp1);
if (IS_ERR(vsp1->sru)) {
ret = PTR_ERR(vsp1->sru);
@@ -220,7 +330,7 @@ static int vsp1_create_entities(struct vsp1_device *vsp1)
list_add_tail(&vsp1->sru->entity.list_dev, &vsp1->entities);
}
- for (i = 0; i < vsp1->pdata.uds_count; ++i) {
+ for (i = 0; i < vsp1->info->uds_count; ++i) {
struct vsp1_uds *uds;
uds = vsp1_uds_create(vsp1, i);
@@ -233,7 +343,7 @@ static int vsp1_create_entities(struct vsp1_device *vsp1)
list_add_tail(&uds->entity.list_dev, &vsp1->entities);
}
- for (i = 0; i < vsp1->pdata.wpf_count; ++i) {
+ for (i = 0; i < vsp1->info->wpf_count; ++i) {
struct vsp1_rwpf *wpf;
wpf = vsp1_wpf_create(vsp1, i);
@@ -244,6 +354,18 @@ static int vsp1_create_entities(struct vsp1_device *vsp1)
vsp1->wpf[i] = wpf;
list_add_tail(&wpf->entity.list_dev, &vsp1->entities);
+
+ if (vsp1->info->uapi) {
+ struct vsp1_video *video = vsp1_video_create(vsp1, wpf);
+
+ if (IS_ERR(video)) {
+ ret = PTR_ERR(video);
+ goto done;
+ }
+
+ list_add_tail(&video->list, &vsp1->videos);
+ wpf->entity.sink = &video->video.entity;
+ }
}
/* Register all subdevs. */
@@ -255,34 +377,23 @@ static int vsp1_create_entities(struct vsp1_device *vsp1)
}
/* Create links. */
- list_for_each_entry(entity, &vsp1->entities, list_dev) {
- if (entity->type == VSP1_ENTITY_WPF) {
- ret = vsp1_wpf_create_links(vsp1, entity);
- if (ret < 0)
- goto done;
- } else if (entity->type == VSP1_ENTITY_RPF) {
- ret = vsp1_rpf_create_links(vsp1, entity);
- if (ret < 0)
- goto done;
- }
-
- if (entity->type != VSP1_ENTITY_LIF &&
- entity->type != VSP1_ENTITY_RPF) {
- ret = vsp1_create_links(vsp1, entity);
- if (ret < 0)
- goto done;
- }
- }
+ if (vsp1->info->uapi)
+ ret = vsp1_uapi_create_links(vsp1);
+ else
+ ret = vsp1_drm_create_links(vsp1);
+ if (ret < 0)
+ goto done;
- if (vsp1->pdata.features & VSP1_HAS_LIF) {
- ret = media_create_pad_link(
- &vsp1->wpf[0]->entity.subdev.entity, RWPF_PAD_SOURCE,
- &vsp1->lif->entity.subdev.entity, LIF_PAD_SINK, 0);
- if (ret < 0)
- return ret;
+ /* Register subdev nodes if the userspace API is enabled or initialize
+ * the DRM pipeline otherwise.
+ */
+ if (vsp1->info->uapi) {
+ vsp1->use_dl = false;
+ ret = v4l2_device_register_subdev_nodes(&vsp1->v4l2_dev);
+ } else {
+ vsp1->use_dl = true;
+ ret = vsp1_drm_init(vsp1);
}
-
- ret = v4l2_device_register_subdev_nodes(&vsp1->v4l2_dev);
if (ret < 0)
goto done;
@@ -295,42 +406,51 @@ done:
return ret;
}
-static int vsp1_device_init(struct vsp1_device *vsp1)
+int vsp1_reset_wpf(struct vsp1_device *vsp1, unsigned int index)
{
- unsigned int i;
+ unsigned int timeout;
u32 status;
- /* Reset any channel that might be running. */
status = vsp1_read(vsp1, VI6_STATUS);
+ if (!(status & VI6_STATUS_SYS_ACT(index)))
+ return 0;
- for (i = 0; i < vsp1->pdata.wpf_count; ++i) {
- unsigned int timeout;
+ vsp1_write(vsp1, VI6_SRESET, VI6_SRESET_SRTS(index));
+ for (timeout = 10; timeout > 0; --timeout) {
+ status = vsp1_read(vsp1, VI6_STATUS);
+ if (!(status & VI6_STATUS_SYS_ACT(index)))
+ break;
- if (!(status & VI6_STATUS_SYS_ACT(i)))
- continue;
+ usleep_range(1000, 2000);
+ }
- vsp1_write(vsp1, VI6_SRESET, VI6_SRESET_SRTS(i));
- for (timeout = 10; timeout > 0; --timeout) {
- status = vsp1_read(vsp1, VI6_STATUS);
- if (!(status & VI6_STATUS_SYS_ACT(i)))
- break;
+ if (!timeout) {
+ dev_err(vsp1->dev, "failed to reset wpf.%u\n", index);
+ return -ETIMEDOUT;
+ }
- usleep_range(1000, 2000);
- }
+ return 0;
+}
- if (!timeout) {
- dev_err(vsp1->dev, "failed to reset wpf.%u\n", i);
- return -ETIMEDOUT;
- }
+static int vsp1_device_init(struct vsp1_device *vsp1)
+{
+ unsigned int i;
+ int ret;
+
+ /* Reset any channel that might be running. */
+ for (i = 0; i < vsp1->info->wpf_count; ++i) {
+ ret = vsp1_reset_wpf(vsp1, i);
+ if (ret < 0)
+ return ret;
}
vsp1_write(vsp1, VI6_CLK_DCSWT, (8 << VI6_CLK_DCSWT_CSTPW_SHIFT) |
(8 << VI6_CLK_DCSWT_CSTRW_SHIFT));
- for (i = 0; i < vsp1->pdata.rpf_count; ++i)
+ for (i = 0; i < vsp1->info->rpf_count; ++i)
vsp1_write(vsp1, VI6_DPR_RPF_ROUTE(i), VI6_DPR_NODE_UNUSED);
- for (i = 0; i < vsp1->pdata.uds_count; ++i)
+ for (i = 0; i < vsp1->info->uds_count; ++i)
vsp1_write(vsp1, VI6_DPR_UDS_ROUTE(i), VI6_DPR_NODE_UNUSED);
vsp1_write(vsp1, VI6_DPR_SRU_ROUTE, VI6_DPR_NODE_UNUSED);
@@ -345,6 +465,9 @@ static int vsp1_device_init(struct vsp1_device *vsp1)
vsp1_write(vsp1, VI6_DPR_HGT_SMPPT, (7 << VI6_DPR_SMPPT_TGW_SHIFT) |
(VI6_DPR_NODE_UNUSED << VI6_DPR_SMPPT_PT_SHIFT));
+ if (vsp1->use_dl)
+ vsp1_dl_setup(vsp1);
+
return 0;
}
@@ -444,48 +567,76 @@ static const struct dev_pm_ops vsp1_pm_ops = {
* Platform Driver
*/
-static int vsp1_parse_dt(struct vsp1_device *vsp1)
-{
- struct device_node *np = vsp1->dev->of_node;
- struct vsp1_platform_data *pdata = &vsp1->pdata;
-
- if (of_property_read_bool(np, "renesas,has-lif"))
- pdata->features |= VSP1_HAS_LIF;
- if (of_property_read_bool(np, "renesas,has-lut"))
- pdata->features |= VSP1_HAS_LUT;
- if (of_property_read_bool(np, "renesas,has-sru"))
- pdata->features |= VSP1_HAS_SRU;
-
- of_property_read_u32(np, "renesas,#rpf", &pdata->rpf_count);
- of_property_read_u32(np, "renesas,#uds", &pdata->uds_count);
- of_property_read_u32(np, "renesas,#wpf", &pdata->wpf_count);
-
- if (pdata->rpf_count <= 0 || pdata->rpf_count > VSP1_MAX_RPF) {
- dev_err(vsp1->dev, "invalid number of RPF (%u)\n",
- pdata->rpf_count);
- return -EINVAL;
- }
-
- if (pdata->uds_count <= 0 || pdata->uds_count > VSP1_MAX_UDS) {
- dev_err(vsp1->dev, "invalid number of UDS (%u)\n",
- pdata->uds_count);
- return -EINVAL;
- }
-
- if (pdata->wpf_count <= 0 || pdata->wpf_count > VSP1_MAX_WPF) {
- dev_err(vsp1->dev, "invalid number of WPF (%u)\n",
- pdata->wpf_count);
- return -EINVAL;
- }
-
- return 0;
-}
+static const struct vsp1_device_info vsp1_device_infos[] = {
+ {
+ .version = VI6_IP_VERSION_MODEL_VSPS_H2,
+ .features = VSP1_HAS_BRU | VSP1_HAS_LUT | VSP1_HAS_SRU,
+ .rpf_count = 5,
+ .uds_count = 3,
+ .wpf_count = 4,
+ .num_bru_inputs = 4,
+ .uapi = true,
+ }, {
+ .version = VI6_IP_VERSION_MODEL_VSPR_H2,
+ .features = VSP1_HAS_BRU | VSP1_HAS_SRU,
+ .rpf_count = 5,
+ .uds_count = 1,
+ .wpf_count = 4,
+ .num_bru_inputs = 4,
+ .uapi = true,
+ }, {
+ .version = VI6_IP_VERSION_MODEL_VSPD_GEN2,
+ .features = VSP1_HAS_BRU | VSP1_HAS_LIF | VSP1_HAS_LUT,
+ .rpf_count = 4,
+ .uds_count = 1,
+ .wpf_count = 4,
+ .num_bru_inputs = 4,
+ .uapi = true,
+ }, {
+ .version = VI6_IP_VERSION_MODEL_VSPS_M2,
+ .features = VSP1_HAS_BRU | VSP1_HAS_LUT | VSP1_HAS_SRU,
+ .rpf_count = 5,
+ .uds_count = 3,
+ .wpf_count = 4,
+ .num_bru_inputs = 4,
+ .uapi = true,
+ }, {
+ .version = VI6_IP_VERSION_MODEL_VSPI_GEN3,
+ .features = VSP1_HAS_LUT | VSP1_HAS_SRU,
+ .rpf_count = 1,
+ .uds_count = 1,
+ .wpf_count = 1,
+ .uapi = true,
+ }, {
+ .version = VI6_IP_VERSION_MODEL_VSPBD_GEN3,
+ .features = VSP1_HAS_BRU,
+ .rpf_count = 5,
+ .wpf_count = 1,
+ .num_bru_inputs = 5,
+ .uapi = true,
+ }, {
+ .version = VI6_IP_VERSION_MODEL_VSPBC_GEN3,
+ .features = VSP1_HAS_BRU | VSP1_HAS_LUT,
+ .rpf_count = 5,
+ .wpf_count = 1,
+ .num_bru_inputs = 5,
+ .uapi = true,
+ }, {
+ .version = VI6_IP_VERSION_MODEL_VSPD_GEN3,
+ .features = VSP1_HAS_BRU | VSP1_HAS_LIF | VSP1_HAS_LUT,
+ .rpf_count = 5,
+ .wpf_count = 2,
+ .num_bru_inputs = 5,
+ },
+};
static int vsp1_probe(struct platform_device *pdev)
{
struct vsp1_device *vsp1;
struct resource *irq;
struct resource *io;
+ unsigned int i;
+ u32 version;
int ret;
vsp1 = devm_kzalloc(&pdev->dev, sizeof(*vsp1), GFP_KERNEL);
@@ -495,10 +646,7 @@ static int vsp1_probe(struct platform_device *pdev)
vsp1->dev = &pdev->dev;
mutex_init(&vsp1->lock);
INIT_LIST_HEAD(&vsp1->entities);
-
- ret = vsp1_parse_dt(vsp1);
- if (ret < 0)
- return ret;
+ INIT_LIST_HEAD(&vsp1->videos);
/* I/O, IRQ and clock resources */
io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -525,6 +673,29 @@ static int vsp1_probe(struct platform_device *pdev)
return ret;
}
+ /* Configure device parameters based on the version register. */
+ ret = clk_prepare_enable(vsp1->clock);
+ if (ret < 0)
+ return ret;
+
+ version = vsp1_read(vsp1, VI6_IP_VERSION);
+ clk_disable_unprepare(vsp1->clock);
+
+ for (i = 0; i < ARRAY_SIZE(vsp1_device_infos); ++i) {
+ if ((version & VI6_IP_VERSION_MODEL_MASK) ==
+ vsp1_device_infos[i].version) {
+ vsp1->info = &vsp1_device_infos[i];
+ break;
+ }
+ }
+
+ if (!vsp1->info) {
+ dev_err(&pdev->dev, "unsupported IP version 0x%08x\n", version);
+ return -ENXIO;
+ }
+
+ dev_dbg(&pdev->dev, "IP version 0x%08x\n", version);
+
/* Instanciate entities */
ret = vsp1_create_entities(vsp1);
if (ret < 0) {
@@ -548,6 +719,7 @@ static int vsp1_remove(struct platform_device *pdev)
static const struct of_device_id vsp1_of_match[] = {
{ .compatible = "renesas,vsp1" },
+ { .compatible = "renesas,vsp2" },
{ },
};
diff --git a/drivers/media/platform/vsp1/vsp1_entity.c b/drivers/media/platform/vsp1/vsp1_entity.c
index d7308530952f..20a78fbd3691 100644
--- a/drivers/media/platform/vsp1/vsp1_entity.c
+++ b/drivers/media/platform/vsp1/vsp1_entity.c
@@ -20,7 +20,6 @@
#include "vsp1.h"
#include "vsp1_entity.h"
-#include "vsp1_video.h"
bool vsp1_entity_is_streaming(struct vsp1_entity *entity)
{
@@ -46,7 +45,7 @@ int vsp1_entity_set_streaming(struct vsp1_entity *entity, bool streaming)
if (!streaming)
return 0;
- if (!entity->subdev.ctrl_handler)
+ if (!entity->vsp1->info->uapi || !entity->subdev.ctrl_handler)
return 0;
ret = v4l2_ctrl_handler_setup(entity->subdev.ctrl_handler);
@@ -59,6 +58,18 @@ int vsp1_entity_set_streaming(struct vsp1_entity *entity, bool streaming)
return ret;
}
+void vsp1_entity_route_setup(struct vsp1_entity *source)
+{
+ struct vsp1_entity *sink;
+
+ if (source->route->reg == 0)
+ return;
+
+ sink = container_of(source->sink, struct vsp1_entity, subdev.entity);
+ vsp1_mod_write(source, source->route->reg,
+ sink->route->inputs[source->sink_pad]);
+}
+
/* -----------------------------------------------------------------------------
* V4L2 Subdevice Operations
*/
@@ -120,9 +131,9 @@ const struct v4l2_subdev_internal_ops vsp1_subdev_internal_ops = {
* Media Operations
*/
-static int vsp1_entity_link_setup(struct media_entity *entity,
- const struct media_pad *local,
- const struct media_pad *remote, u32 flags)
+int vsp1_entity_link_setup(struct media_entity *entity,
+ const struct media_pad *local,
+ const struct media_pad *remote, u32 flags)
{
struct vsp1_entity *source;
@@ -147,11 +158,6 @@ static int vsp1_entity_link_setup(struct media_entity *entity,
return 0;
}
-const struct media_entity_operations vsp1_media_ops = {
- .link_setup = vsp1_entity_link_setup,
- .link_validate = v4l2_subdev_link_validate,
-};
-
/* -----------------------------------------------------------------------------
* Initialization
*/
@@ -159,7 +165,8 @@ const struct media_entity_operations vsp1_media_ops = {
static const struct vsp1_route vsp1_routes[] = {
{ VSP1_ENTITY_BRU, 0, VI6_DPR_BRU_ROUTE,
{ VI6_DPR_NODE_BRU_IN(0), VI6_DPR_NODE_BRU_IN(1),
- VI6_DPR_NODE_BRU_IN(2), VI6_DPR_NODE_BRU_IN(3), } },
+ VI6_DPR_NODE_BRU_IN(2), VI6_DPR_NODE_BRU_IN(3),
+ VI6_DPR_NODE_BRU_IN(4) } },
{ VSP1_ENTITY_HSI, 0, VI6_DPR_HSI_ROUTE, { VI6_DPR_NODE_HSI, } },
{ VSP1_ENTITY_HST, 0, VI6_DPR_HST_ROUTE, { VI6_DPR_NODE_HST, } },
{ VSP1_ENTITY_LIF, 0, 0, { VI6_DPR_NODE_LIF, } },
@@ -225,8 +232,6 @@ int vsp1_entity_init(struct vsp1_device *vsp1, struct vsp1_entity *entity,
void vsp1_entity_destroy(struct vsp1_entity *entity)
{
- if (entity->video)
- vsp1_video_cleanup(entity->video);
if (entity->subdev.ctrl_handler)
v4l2_ctrl_handler_free(entity->subdev.ctrl_handler);
media_entity_cleanup(&entity->subdev.entity);
diff --git a/drivers/media/platform/vsp1/vsp1_entity.h b/drivers/media/platform/vsp1/vsp1_entity.h
index 8867a5787c28..83570dfde8ec 100644
--- a/drivers/media/platform/vsp1/vsp1_entity.h
+++ b/drivers/media/platform/vsp1/vsp1_entity.h
@@ -19,7 +19,6 @@
#include <media/v4l2-subdev.h>
struct vsp1_device;
-struct vsp1_video;
enum vsp1_entity_type {
VSP1_ENTITY_BRU,
@@ -33,6 +32,8 @@ enum vsp1_entity_type {
VSP1_ENTITY_WPF,
};
+#define VSP1_ENTITY_MAX_INPUTS 5 /* For the BRU */
+
/*
* struct vsp1_route - Entity routing configuration
* @type: Entity type this routing entry is associated with
@@ -49,7 +50,7 @@ struct vsp1_route {
enum vsp1_entity_type type;
unsigned int index;
unsigned int reg;
- unsigned int inputs[4];
+ unsigned int inputs[VSP1_ENTITY_MAX_INPUTS];
};
struct vsp1_entity {
@@ -71,8 +72,6 @@ struct vsp1_entity {
struct v4l2_subdev subdev;
struct v4l2_mbus_framefmt *formats;
- struct vsp1_video *video;
-
spinlock_t lock; /* Protects the streaming field */
bool streaming;
};
@@ -87,7 +86,10 @@ int vsp1_entity_init(struct vsp1_device *vsp1, struct vsp1_entity *entity,
void vsp1_entity_destroy(struct vsp1_entity *entity);
extern const struct v4l2_subdev_internal_ops vsp1_subdev_internal_ops;
-extern const struct media_entity_operations vsp1_media_ops;
+
+int vsp1_entity_link_setup(struct media_entity *entity,
+ const struct media_pad *local,
+ const struct media_pad *remote, u32 flags);
struct v4l2_mbus_framefmt *
vsp1_entity_get_pad_format(struct vsp1_entity *entity,
@@ -99,4 +101,6 @@ void vsp1_entity_init_formats(struct v4l2_subdev *subdev,
bool vsp1_entity_is_streaming(struct vsp1_entity *entity);
int vsp1_entity_set_streaming(struct vsp1_entity *entity, bool streaming);
+void vsp1_entity_route_setup(struct vsp1_entity *source);
+
#endif /* __VSP1_ENTITY_H__ */
diff --git a/drivers/media/platform/vsp1/vsp1_hsit.c b/drivers/media/platform/vsp1/vsp1_hsit.c
index 8ffb817ae525..c1087cff31a0 100644
--- a/drivers/media/platform/vsp1/vsp1_hsit.c
+++ b/drivers/media/platform/vsp1/vsp1_hsit.c
@@ -203,7 +203,7 @@ struct vsp1_hsit *vsp1_hsit_create(struct vsp1_device *vsp1, bool inverse)
subdev = &hsit->entity.subdev;
v4l2_subdev_init(subdev, &hsit_ops);
- subdev->entity.ops = &vsp1_media_ops;
+ subdev->entity.ops = &vsp1->media_ops;
subdev->internal_ops = &vsp1_subdev_internal_ops;
snprintf(subdev->name, sizeof(subdev->name), "%s %s",
dev_name(vsp1->dev), inverse ? "hsi" : "hst");
diff --git a/drivers/media/platform/vsp1/vsp1_lif.c b/drivers/media/platform/vsp1/vsp1_lif.c
index 39fa5ef20fbb..433853ce8dbf 100644
--- a/drivers/media/platform/vsp1/vsp1_lif.c
+++ b/drivers/media/platform/vsp1/vsp1_lif.c
@@ -26,14 +26,9 @@
* Device Access
*/
-static inline u32 vsp1_lif_read(struct vsp1_lif *lif, u32 reg)
-{
- return vsp1_read(lif->entity.vsp1, reg);
-}
-
static inline void vsp1_lif_write(struct vsp1_lif *lif, u32 reg, u32 data)
{
- vsp1_write(lif->entity.vsp1, reg, data);
+ vsp1_mod_write(&lif->entity, reg, data);
}
/* -----------------------------------------------------------------------------
@@ -49,7 +44,7 @@ static int lif_s_stream(struct v4l2_subdev *subdev, int enable)
unsigned int lbth = 200;
if (!enable) {
- vsp1_lif_write(lif, VI6_LIF_CTRL, 0);
+ vsp1_write(lif->entity.vsp1, VI6_LIF_CTRL, 0);
return 0;
}
@@ -228,7 +223,7 @@ struct vsp1_lif *vsp1_lif_create(struct vsp1_device *vsp1)
subdev = &lif->entity.subdev;
v4l2_subdev_init(subdev, &lif_ops);
- subdev->entity.ops = &vsp1_media_ops;
+ subdev->entity.ops = &vsp1->media_ops;
subdev->internal_ops = &vsp1_subdev_internal_ops;
snprintf(subdev->name, sizeof(subdev->name), "%s lif",
dev_name(vsp1->dev));
diff --git a/drivers/media/platform/vsp1/vsp1_lut.c b/drivers/media/platform/vsp1/vsp1_lut.c
index 656ec272a414..4b89095e7b5f 100644
--- a/drivers/media/platform/vsp1/vsp1_lut.c
+++ b/drivers/media/platform/vsp1/vsp1_lut.c
@@ -27,11 +27,6 @@
* Device Access
*/
-static inline u32 vsp1_lut_read(struct vsp1_lut *lut, u32 reg)
-{
- return vsp1_read(lut->entity.vsp1, reg);
-}
-
static inline void vsp1_lut_write(struct vsp1_lut *lut, u32 reg, u32 data)
{
vsp1_write(lut->entity.vsp1, reg, data);
@@ -242,7 +237,7 @@ struct vsp1_lut *vsp1_lut_create(struct vsp1_device *vsp1)
subdev = &lut->entity.subdev;
v4l2_subdev_init(subdev, &lut_ops);
- subdev->entity.ops = &vsp1_media_ops;
+ subdev->entity.ops = &vsp1->media_ops;
subdev->internal_ops = &vsp1_subdev_internal_ops;
snprintf(subdev->name, sizeof(subdev->name), "%s lut",
dev_name(vsp1->dev));
diff --git a/drivers/media/platform/vsp1/vsp1_pipe.c b/drivers/media/platform/vsp1/vsp1_pipe.c
new file mode 100644
index 000000000000..6659f06b1643
--- /dev/null
+++ b/drivers/media/platform/vsp1/vsp1_pipe.c
@@ -0,0 +1,426 @@
+/*
+ * vsp1_pipe.c -- R-Car VSP1 Pipeline
+ *
+ * Copyright (C) 2013-2015 Renesas Electronics Corporation
+ *
+ * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/delay.h>
+#include <linux/list.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+
+#include <media/media-entity.h>
+#include <media/v4l2-subdev.h>
+
+#include "vsp1.h"
+#include "vsp1_bru.h"
+#include "vsp1_dl.h"
+#include "vsp1_entity.h"
+#include "vsp1_pipe.h"
+#include "vsp1_rwpf.h"
+#include "vsp1_uds.h"
+
+/* -----------------------------------------------------------------------------
+ * Helper Functions
+ */
+
+static const struct vsp1_format_info vsp1_video_formats[] = {
+ { V4L2_PIX_FMT_RGB332, MEDIA_BUS_FMT_ARGB8888_1X32,
+ VI6_FMT_RGB_332, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
+ VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS,
+ 1, { 8, 0, 0 }, false, false, 1, 1, false },
+ { V4L2_PIX_FMT_ARGB444, MEDIA_BUS_FMT_ARGB8888_1X32,
+ VI6_FMT_ARGB_4444, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
+ VI6_RPF_DSWAP_P_WDS,
+ 1, { 16, 0, 0 }, false, false, 1, 1, true },
+ { V4L2_PIX_FMT_XRGB444, MEDIA_BUS_FMT_ARGB8888_1X32,
+ VI6_FMT_XRGB_4444, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
+ VI6_RPF_DSWAP_P_WDS,
+ 1, { 16, 0, 0 }, false, false, 1, 1, true },
+ { V4L2_PIX_FMT_ARGB555, MEDIA_BUS_FMT_ARGB8888_1X32,
+ VI6_FMT_ARGB_1555, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
+ VI6_RPF_DSWAP_P_WDS,
+ 1, { 16, 0, 0 }, false, false, 1, 1, true },
+ { V4L2_PIX_FMT_XRGB555, MEDIA_BUS_FMT_ARGB8888_1X32,
+ VI6_FMT_XRGB_1555, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
+ VI6_RPF_DSWAP_P_WDS,
+ 1, { 16, 0, 0 }, false, false, 1, 1, false },
+ { V4L2_PIX_FMT_RGB565, MEDIA_BUS_FMT_ARGB8888_1X32,
+ VI6_FMT_RGB_565, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
+ VI6_RPF_DSWAP_P_WDS,
+ 1, { 16, 0, 0 }, false, false, 1, 1, false },
+ { V4L2_PIX_FMT_BGR24, MEDIA_BUS_FMT_ARGB8888_1X32,
+ VI6_FMT_BGR_888, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
+ VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS,
+ 1, { 24, 0, 0 }, false, false, 1, 1, false },
+ { V4L2_PIX_FMT_RGB24, MEDIA_BUS_FMT_ARGB8888_1X32,
+ VI6_FMT_RGB_888, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
+ VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS,
+ 1, { 24, 0, 0 }, false, false, 1, 1, false },
+ { V4L2_PIX_FMT_ABGR32, MEDIA_BUS_FMT_ARGB8888_1X32,
+ VI6_FMT_ARGB_8888, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS,
+ 1, { 32, 0, 0 }, false, false, 1, 1, true },
+ { V4L2_PIX_FMT_XBGR32, MEDIA_BUS_FMT_ARGB8888_1X32,
+ VI6_FMT_ARGB_8888, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS,
+ 1, { 32, 0, 0 }, false, false, 1, 1, false },
+ { V4L2_PIX_FMT_ARGB32, MEDIA_BUS_FMT_ARGB8888_1X32,
+ VI6_FMT_ARGB_8888, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
+ VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS,
+ 1, { 32, 0, 0 }, false, false, 1, 1, true },
+ { V4L2_PIX_FMT_XRGB32, MEDIA_BUS_FMT_ARGB8888_1X32,
+ VI6_FMT_ARGB_8888, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
+ VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS,
+ 1, { 32, 0, 0 }, false, false, 1, 1, false },
+ { V4L2_PIX_FMT_UYVY, MEDIA_BUS_FMT_AYUV8_1X32,
+ VI6_FMT_YUYV_422, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
+ VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS,
+ 1, { 16, 0, 0 }, false, false, 2, 1, false },
+ { V4L2_PIX_FMT_VYUY, MEDIA_BUS_FMT_AYUV8_1X32,
+ VI6_FMT_YUYV_422, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
+ VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS,
+ 1, { 16, 0, 0 }, false, true, 2, 1, false },
+ { V4L2_PIX_FMT_YUYV, MEDIA_BUS_FMT_AYUV8_1X32,
+ VI6_FMT_YUYV_422, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
+ VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS,
+ 1, { 16, 0, 0 }, true, false, 2, 1, false },
+ { V4L2_PIX_FMT_YVYU, MEDIA_BUS_FMT_AYUV8_1X32,
+ VI6_FMT_YUYV_422, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
+ VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS,
+ 1, { 16, 0, 0 }, true, true, 2, 1, false },
+ { V4L2_PIX_FMT_NV12M, MEDIA_BUS_FMT_AYUV8_1X32,
+ VI6_FMT_Y_UV_420, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
+ VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS,
+ 2, { 8, 16, 0 }, false, false, 2, 2, false },
+ { V4L2_PIX_FMT_NV21M, MEDIA_BUS_FMT_AYUV8_1X32,
+ VI6_FMT_Y_UV_420, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
+ VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS,
+ 2, { 8, 16, 0 }, false, true, 2, 2, false },
+ { V4L2_PIX_FMT_NV16M, MEDIA_BUS_FMT_AYUV8_1X32,
+ VI6_FMT_Y_UV_422, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
+ VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS,
+ 2, { 8, 16, 0 }, false, false, 2, 1, false },
+ { V4L2_PIX_FMT_NV61M, MEDIA_BUS_FMT_AYUV8_1X32,
+ VI6_FMT_Y_UV_422, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
+ VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS,
+ 2, { 8, 16, 0 }, false, true, 2, 1, false },
+ { V4L2_PIX_FMT_YUV420M, MEDIA_BUS_FMT_AYUV8_1X32,
+ VI6_FMT_Y_U_V_420, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
+ VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS,
+ 3, { 8, 8, 8 }, false, false, 2, 2, false },
+ { V4L2_PIX_FMT_YVU420M, MEDIA_BUS_FMT_AYUV8_1X32,
+ VI6_FMT_Y_U_V_420, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
+ VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS,
+ 3, { 8, 8, 8 }, false, true, 2, 2, false },
+ { V4L2_PIX_FMT_YUV422M, MEDIA_BUS_FMT_AYUV8_1X32,
+ VI6_FMT_Y_U_V_422, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
+ VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS,
+ 3, { 8, 8, 8 }, false, false, 2, 1, false },
+ { V4L2_PIX_FMT_YVU422M, MEDIA_BUS_FMT_AYUV8_1X32,
+ VI6_FMT_Y_U_V_422, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
+ VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS,
+ 3, { 8, 8, 8 }, false, true, 2, 1, false },
+ { V4L2_PIX_FMT_YUV444M, MEDIA_BUS_FMT_AYUV8_1X32,
+ VI6_FMT_Y_U_V_444, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
+ VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS,
+ 3, { 8, 8, 8 }, false, false, 1, 1, false },
+ { V4L2_PIX_FMT_YVU444M, MEDIA_BUS_FMT_AYUV8_1X32,
+ VI6_FMT_Y_U_V_444, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
+ VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS,
+ 3, { 8, 8, 8 }, false, true, 1, 1, false },
+};
+
+/*
+ * vsp1_get_format_info - Retrieve format information for a 4CC
+ * @fourcc: the format 4CC
+ *
+ * Return a pointer to the format information structure corresponding to the
+ * given V4L2 format 4CC, or NULL if no corresponding format can be found.
+ */
+const struct vsp1_format_info *vsp1_get_format_info(u32 fourcc)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(vsp1_video_formats); ++i) {
+ const struct vsp1_format_info *info = &vsp1_video_formats[i];
+
+ if (info->fourcc == fourcc)
+ return info;
+ }
+
+ return NULL;
+}
+
+/* -----------------------------------------------------------------------------
+ * Pipeline Management
+ */
+
+void vsp1_pipeline_reset(struct vsp1_pipeline *pipe)
+{
+ unsigned int i;
+
+ if (pipe->bru) {
+ struct vsp1_bru *bru = to_bru(&pipe->bru->subdev);
+
+ for (i = 0; i < ARRAY_SIZE(bru->inputs); ++i)
+ bru->inputs[i].rpf = NULL;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(pipe->inputs); ++i)
+ pipe->inputs[i] = NULL;
+
+ INIT_LIST_HEAD(&pipe->entities);
+ pipe->state = VSP1_PIPELINE_STOPPED;
+ pipe->buffers_ready = 0;
+ pipe->num_inputs = 0;
+ pipe->output = NULL;
+ pipe->bru = NULL;
+ pipe->lif = NULL;
+ pipe->uds = NULL;
+}
+
+void vsp1_pipeline_init(struct vsp1_pipeline *pipe)
+{
+ mutex_init(&pipe->lock);
+ spin_lock_init(&pipe->irqlock);
+ init_waitqueue_head(&pipe->wq);
+
+ INIT_LIST_HEAD(&pipe->entities);
+ pipe->state = VSP1_PIPELINE_STOPPED;
+}
+
+void vsp1_pipeline_run(struct vsp1_pipeline *pipe)
+{
+ struct vsp1_device *vsp1 = pipe->output->entity.vsp1;
+
+ if (pipe->state == VSP1_PIPELINE_STOPPED) {
+ vsp1_write(vsp1, VI6_CMD(pipe->output->entity.index),
+ VI6_CMD_STRCMD);
+ pipe->state = VSP1_PIPELINE_RUNNING;
+ }
+
+ pipe->buffers_ready = 0;
+}
+
+bool vsp1_pipeline_stopped(struct vsp1_pipeline *pipe)
+{
+ unsigned long flags;
+ bool stopped;
+
+ spin_lock_irqsave(&pipe->irqlock, flags);
+ stopped = pipe->state == VSP1_PIPELINE_STOPPED;
+ spin_unlock_irqrestore(&pipe->irqlock, flags);
+
+ return stopped;
+}
+
+int vsp1_pipeline_stop(struct vsp1_pipeline *pipe)
+{
+ struct vsp1_entity *entity;
+ unsigned long flags;
+ int ret;
+
+ if (pipe->dl) {
+ /* When using display lists in continuous frame mode the only
+ * way to stop the pipeline is to reset the hardware.
+ */
+ ret = vsp1_reset_wpf(pipe->output->entity.vsp1,
+ pipe->output->entity.index);
+ if (ret == 0) {
+ spin_lock_irqsave(&pipe->irqlock, flags);
+ pipe->state = VSP1_PIPELINE_STOPPED;
+ spin_unlock_irqrestore(&pipe->irqlock, flags);
+ }
+ } else {
+ /* Otherwise just request a stop and wait. */
+ spin_lock_irqsave(&pipe->irqlock, flags);
+ if (pipe->state == VSP1_PIPELINE_RUNNING)
+ pipe->state = VSP1_PIPELINE_STOPPING;
+ spin_unlock_irqrestore(&pipe->irqlock, flags);
+
+ ret = wait_event_timeout(pipe->wq, vsp1_pipeline_stopped(pipe),
+ msecs_to_jiffies(500));
+ ret = ret == 0 ? -ETIMEDOUT : 0;
+ }
+
+ list_for_each_entry(entity, &pipe->entities, list_pipe) {
+ if (entity->route && entity->route->reg)
+ vsp1_write(entity->vsp1, entity->route->reg,
+ VI6_DPR_NODE_UNUSED);
+
+ v4l2_subdev_call(&entity->subdev, video, s_stream, 0);
+ }
+
+ return ret;
+}
+
+bool vsp1_pipeline_ready(struct vsp1_pipeline *pipe)
+{
+ unsigned int mask;
+
+ mask = ((1 << pipe->num_inputs) - 1) << 1;
+ if (!pipe->lif)
+ mask |= 1 << 0;
+
+ return pipe->buffers_ready == mask;
+}
+
+void vsp1_pipeline_display_start(struct vsp1_pipeline *pipe)
+{
+ if (pipe->dl)
+ vsp1_dl_irq_display_start(pipe->dl);
+}
+
+void vsp1_pipeline_frame_end(struct vsp1_pipeline *pipe)
+{
+ enum vsp1_pipeline_state state;
+ unsigned long flags;
+
+ if (pipe == NULL)
+ return;
+
+ if (pipe->dl)
+ vsp1_dl_irq_frame_end(pipe->dl);
+
+ /* Signal frame end to the pipeline handler. */
+ pipe->frame_end(pipe);
+
+ spin_lock_irqsave(&pipe->irqlock, flags);
+
+ state = pipe->state;
+
+ /* When using display lists in continuous frame mode the pipeline is
+ * automatically restarted by the hardware.
+ */
+ if (!pipe->dl)
+ pipe->state = VSP1_PIPELINE_STOPPED;
+
+ /* If a stop has been requested, mark the pipeline as stopped and
+ * return.
+ */
+ if (state == VSP1_PIPELINE_STOPPING) {
+ wake_up(&pipe->wq);
+ goto done;
+ }
+
+ /* Restart the pipeline if ready. */
+ if (vsp1_pipeline_ready(pipe))
+ vsp1_pipeline_run(pipe);
+
+done:
+ spin_unlock_irqrestore(&pipe->irqlock, flags);
+}
+
+/*
+ * Propagate the alpha value through the pipeline.
+ *
+ * As the UDS has restricted scaling capabilities when the alpha component needs
+ * to be scaled, we disable alpha scaling when the UDS input has a fixed alpha
+ * value. The UDS then outputs a fixed alpha value which needs to be programmed
+ * from the input RPF alpha.
+ */
+void vsp1_pipeline_propagate_alpha(struct vsp1_pipeline *pipe,
+ struct vsp1_entity *input,
+ unsigned int alpha)
+{
+ struct vsp1_entity *entity;
+ struct media_pad *pad;
+
+ pad = media_entity_remote_pad(&input->pads[RWPF_PAD_SOURCE]);
+
+ while (pad) {
+ if (!is_media_entity_v4l2_subdev(pad->entity))
+ break;
+
+ entity = to_vsp1_entity(media_entity_to_v4l2_subdev(pad->entity));
+
+ /* The BRU background color has a fixed alpha value set to 255,
+ * the output alpha value is thus always equal to 255.
+ */
+ if (entity->type == VSP1_ENTITY_BRU)
+ alpha = 255;
+
+ if (entity->type == VSP1_ENTITY_UDS) {
+ struct vsp1_uds *uds = to_uds(&entity->subdev);
+
+ vsp1_uds_set_alpha(uds, alpha);
+ break;
+ }
+
+ pad = &entity->pads[entity->source_pad];
+ pad = media_entity_remote_pad(pad);
+ }
+}
+
+void vsp1_pipelines_suspend(struct vsp1_device *vsp1)
+{
+ unsigned long flags;
+ unsigned int i;
+ int ret;
+
+ /* To avoid increasing the system suspend time needlessly, loop over the
+ * pipelines twice, first to set them all to the stopping state, and
+ * then to wait for the stop to complete.
+ */
+ for (i = 0; i < vsp1->info->wpf_count; ++i) {
+ struct vsp1_rwpf *wpf = vsp1->wpf[i];
+ struct vsp1_pipeline *pipe;
+
+ if (wpf == NULL)
+ continue;
+
+ pipe = to_vsp1_pipeline(&wpf->entity.subdev.entity);
+ if (pipe == NULL)
+ continue;
+
+ spin_lock_irqsave(&pipe->irqlock, flags);
+ if (pipe->state == VSP1_PIPELINE_RUNNING)
+ pipe->state = VSP1_PIPELINE_STOPPING;
+ spin_unlock_irqrestore(&pipe->irqlock, flags);
+ }
+
+ for (i = 0; i < vsp1->info->wpf_count; ++i) {
+ struct vsp1_rwpf *wpf = vsp1->wpf[i];
+ struct vsp1_pipeline *pipe;
+
+ if (wpf == NULL)
+ continue;
+
+ pipe = to_vsp1_pipeline(&wpf->entity.subdev.entity);
+ if (pipe == NULL)
+ continue;
+
+ ret = wait_event_timeout(pipe->wq, vsp1_pipeline_stopped(pipe),
+ msecs_to_jiffies(500));
+ if (ret == 0)
+ dev_warn(vsp1->dev, "pipeline %u stop timeout\n",
+ wpf->entity.index);
+ }
+}
+
+void vsp1_pipelines_resume(struct vsp1_device *vsp1)
+{
+ unsigned int i;
+
+ /* Resume pipeline all running pipelines. */
+ for (i = 0; i < vsp1->info->wpf_count; ++i) {
+ struct vsp1_rwpf *wpf = vsp1->wpf[i];
+ struct vsp1_pipeline *pipe;
+
+ if (wpf == NULL)
+ continue;
+
+ pipe = to_vsp1_pipeline(&wpf->entity.subdev.entity);
+ if (pipe == NULL)
+ continue;
+
+ if (vsp1_pipeline_ready(pipe))
+ vsp1_pipeline_run(pipe);
+ }
+}
diff --git a/drivers/media/platform/vsp1/vsp1_pipe.h b/drivers/media/platform/vsp1/vsp1_pipe.h
new file mode 100644
index 000000000000..b2f3a8a896c9
--- /dev/null
+++ b/drivers/media/platform/vsp1/vsp1_pipe.h
@@ -0,0 +1,134 @@
+/*
+ * vsp1_pipe.h -- R-Car VSP1 Pipeline
+ *
+ * Copyright (C) 2013-2015 Renesas Electronics Corporation
+ *
+ * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+#ifndef __VSP1_PIPE_H__
+#define __VSP1_PIPE_H__
+
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/wait.h>
+
+#include <media/media-entity.h>
+
+struct vsp1_dl;
+struct vsp1_rwpf;
+
+/*
+ * struct vsp1_format_info - VSP1 video format description
+ * @mbus: media bus format code
+ * @fourcc: V4L2 pixel format FCC identifier
+ * @planes: number of planes
+ * @bpp: bits per pixel
+ * @hwfmt: VSP1 hardware format
+ * @swap_yc: the Y and C components are swapped (Y comes before C)
+ * @swap_uv: the U and V components are swapped (V comes before U)
+ * @hsub: horizontal subsampling factor
+ * @vsub: vertical subsampling factor
+ * @alpha: has an alpha channel
+ */
+struct vsp1_format_info {
+ u32 fourcc;
+ unsigned int mbus;
+ unsigned int hwfmt;
+ unsigned int swap;
+ unsigned int planes;
+ unsigned int bpp[3];
+ bool swap_yc;
+ bool swap_uv;
+ unsigned int hsub;
+ unsigned int vsub;
+ bool alpha;
+};
+
+enum vsp1_pipeline_state {
+ VSP1_PIPELINE_STOPPED,
+ VSP1_PIPELINE_RUNNING,
+ VSP1_PIPELINE_STOPPING,
+};
+
+/*
+ * struct vsp1_pipeline - A VSP1 hardware pipeline
+ * @pipe: the media pipeline
+ * @irqlock: protects the pipeline state
+ * @state: current state
+ * @wq: work queue to wait for state change completion
+ * @frame_end: frame end interrupt handler
+ * @lock: protects the pipeline use count and stream count
+ * @use_count: number of video nodes using the pipeline
+ * @stream_count: number of streaming video nodes
+ * @buffers_ready: bitmask of RPFs and WPFs with at least one buffer available
+ * @num_inputs: number of RPFs
+ * @inputs: array of RPFs in the pipeline (indexed by RPF index)
+ * @output: WPF at the output of the pipeline
+ * @bru: BRU entity, if present
+ * @lif: LIF entity, if present
+ * @uds: UDS entity, if present
+ * @uds_input: entity at the input of the UDS, if the UDS is present
+ * @entities: list of entities in the pipeline
+ * @dl: display list associated with the pipeline
+ */
+struct vsp1_pipeline {
+ struct media_pipeline pipe;
+
+ spinlock_t irqlock;
+ enum vsp1_pipeline_state state;
+ wait_queue_head_t wq;
+
+ void (*frame_end)(struct vsp1_pipeline *pipe);
+
+ struct mutex lock;
+ unsigned int use_count;
+ unsigned int stream_count;
+ unsigned int buffers_ready;
+
+ unsigned int num_inputs;
+ struct vsp1_rwpf *inputs[VSP1_MAX_RPF];
+ struct vsp1_rwpf *output;
+ struct vsp1_entity *bru;
+ struct vsp1_entity *lif;
+ struct vsp1_entity *uds;
+ struct vsp1_entity *uds_input;
+
+ struct list_head entities;
+
+ struct vsp1_dl *dl;
+};
+
+static inline struct vsp1_pipeline *to_vsp1_pipeline(struct media_entity *e)
+{
+ if (likely(e->pipe))
+ return container_of(e->pipe, struct vsp1_pipeline, pipe);
+ else
+ return NULL;
+}
+
+void vsp1_pipeline_reset(struct vsp1_pipeline *pipe);
+void vsp1_pipeline_init(struct vsp1_pipeline *pipe);
+
+void vsp1_pipeline_run(struct vsp1_pipeline *pipe);
+bool vsp1_pipeline_stopped(struct vsp1_pipeline *pipe);
+int vsp1_pipeline_stop(struct vsp1_pipeline *pipe);
+bool vsp1_pipeline_ready(struct vsp1_pipeline *pipe);
+
+void vsp1_pipeline_display_start(struct vsp1_pipeline *pipe);
+void vsp1_pipeline_frame_end(struct vsp1_pipeline *pipe);
+
+void vsp1_pipeline_propagate_alpha(struct vsp1_pipeline *pipe,
+ struct vsp1_entity *input,
+ unsigned int alpha);
+
+void vsp1_pipelines_suspend(struct vsp1_device *vsp1);
+void vsp1_pipelines_resume(struct vsp1_device *vsp1);
+
+const struct vsp1_format_info *vsp1_get_format_info(u32 fourcc);
+
+#endif /* __VSP1_PIPE_H__ */
diff --git a/drivers/media/platform/vsp1/vsp1_regs.h b/drivers/media/platform/vsp1/vsp1_regs.h
index 25b48738b147..069216f0eb44 100644
--- a/drivers/media/platform/vsp1/vsp1_regs.h
+++ b/drivers/media/platform/vsp1/vsp1_regs.h
@@ -46,7 +46,7 @@
#define VI6_DISP_IRQ_ENB_LNEE(n) (1 << (n))
#define VI6_DISP_IRQ_STA 0x007c
-#define VI6_DISP_IRQ_STA_DSE (1 << 8)
+#define VI6_DISP_IRQ_STA_DST (1 << 8)
#define VI6_DISP_IRQ_STA_MAE (1 << 5)
#define VI6_DISP_IRQ_STA_LNE(n) (1 << (n))
@@ -322,7 +322,7 @@
#define VI6_DPR_NODE_SRU 16
#define VI6_DPR_NODE_UDS(n) (17 + (n))
#define VI6_DPR_NODE_LUT 22
-#define VI6_DPR_NODE_BRU_IN(n) (23 + (n))
+#define VI6_DPR_NODE_BRU_IN(n) (((n) <= 3) ? 23 + (n) : 49)
#define VI6_DPR_NODE_BRU_OUT 27
#define VI6_DPR_NODE_CLU 29
#define VI6_DPR_NODE_HST 30
@@ -504,12 +504,12 @@
#define VI6_BRU_VIRRPF_COL_BCB_MASK (0xff << 0)
#define VI6_BRU_VIRRPF_COL_BCB_SHIFT 0
-#define VI6_BRU_CTRL(n) (0x2c10 + (n) * 8)
+#define VI6_BRU_CTRL(n) (0x2c10 + (n) * 8 + ((n) <= 3 ? 0 : 4))
#define VI6_BRU_CTRL_RBC (1 << 31)
-#define VI6_BRU_CTRL_DSTSEL_BRUIN(n) ((n) << 20)
+#define VI6_BRU_CTRL_DSTSEL_BRUIN(n) (((n) <= 3 ? (n) : (n)+1) << 20)
#define VI6_BRU_CTRL_DSTSEL_VRPF (4 << 20)
#define VI6_BRU_CTRL_DSTSEL_MASK (7 << 20)
-#define VI6_BRU_CTRL_SRCSEL_BRUIN(n) ((n) << 16)
+#define VI6_BRU_CTRL_SRCSEL_BRUIN(n) (((n) <= 3 ? (n) : (n)+1) << 16)
#define VI6_BRU_CTRL_SRCSEL_VRPF (4 << 16)
#define VI6_BRU_CTRL_SRCSEL_MASK (7 << 16)
#define VI6_BRU_CTRL_CROP(rop) ((rop) << 4)
@@ -517,7 +517,7 @@
#define VI6_BRU_CTRL_AROP(rop) ((rop) << 0)
#define VI6_BRU_CTRL_AROP_MASK (0xf << 0)
-#define VI6_BRU_BLD(n) (0x2c14 + (n) * 8)
+#define VI6_BRU_BLD(n) (0x2c14 + (n) * 8 + ((n) <= 3 ? 0 : 4))
#define VI6_BRU_BLD_CBES (1 << 31)
#define VI6_BRU_BLD_CCMDX_DST_A (0 << 28)
#define VI6_BRU_BLD_CCMDX_255_DST_A (1 << 28)
@@ -551,7 +551,7 @@
#define VI6_BRU_BLD_COEFY_SHIFT 0
#define VI6_BRU_ROP 0x2c30
-#define VI6_BRU_ROP_DSTSEL_BRUIN(n) ((n) << 20)
+#define VI6_BRU_ROP_DSTSEL_BRUIN(n) (((n) <= 3 ? (n) : (n)+1) << 20)
#define VI6_BRU_ROP_DSTSEL_VRPF (4 << 20)
#define VI6_BRU_ROP_DSTSEL_MASK (7 << 20)
#define VI6_BRU_ROP_CROP(rop) ((rop) << 4)
@@ -625,6 +625,24 @@
#define VI6_SECURITY_CTRL1 0x3d04
/* -----------------------------------------------------------------------------
+ * IP Version Registers
+ */
+
+#define VI6_IP_VERSION 0x3f00
+#define VI6_IP_VERSION_MODEL_MASK (0xff << 8)
+#define VI6_IP_VERSION_MODEL_VSPS_H2 (0x09 << 8)
+#define VI6_IP_VERSION_MODEL_VSPR_H2 (0x0a << 8)
+#define VI6_IP_VERSION_MODEL_VSPD_GEN2 (0x0b << 8)
+#define VI6_IP_VERSION_MODEL_VSPS_M2 (0x0c << 8)
+#define VI6_IP_VERSION_MODEL_VSPI_GEN3 (0x14 << 8)
+#define VI6_IP_VERSION_MODEL_VSPBD_GEN3 (0x15 << 8)
+#define VI6_IP_VERSION_MODEL_VSPBC_GEN3 (0x16 << 8)
+#define VI6_IP_VERSION_MODEL_VSPD_GEN3 (0x17 << 8)
+#define VI6_IP_VERSION_SOC_MASK (0xff << 0)
+#define VI6_IP_VERSION_SOC_H (0x01 << 0)
+#define VI6_IP_VERSION_SOC_M (0x02 << 0)
+
+/* -----------------------------------------------------------------------------
* RPF CLUT Registers
*/
diff --git a/drivers/media/platform/vsp1/vsp1_rpf.c b/drivers/media/platform/vsp1/vsp1_rpf.c
index 924538223d3e..5bc1d1574a43 100644
--- a/drivers/media/platform/vsp1/vsp1_rpf.c
+++ b/drivers/media/platform/vsp1/vsp1_rpf.c
@@ -26,16 +26,10 @@
* Device Access
*/
-static inline u32 vsp1_rpf_read(struct vsp1_rwpf *rpf, u32 reg)
-{
- return vsp1_read(rpf->entity.vsp1,
- reg + rpf->entity.index * VI6_RPF_OFFSET);
-}
-
static inline void vsp1_rpf_write(struct vsp1_rwpf *rpf, u32 reg, u32 data)
{
- vsp1_write(rpf->entity.vsp1,
- reg + rpf->entity.index * VI6_RPF_OFFSET, data);
+ vsp1_mod_write(&rpf->entity, reg + rpf->entity.index * VI6_RPF_OFFSET,
+ data);
}
/* -----------------------------------------------------------------------------
@@ -74,9 +68,11 @@ static const struct v4l2_ctrl_ops rpf_ctrl_ops = {
static int rpf_s_stream(struct v4l2_subdev *subdev, int enable)
{
+ struct vsp1_pipeline *pipe = to_vsp1_pipeline(&subdev->entity);
struct vsp1_rwpf *rpf = to_rwpf(subdev);
- const struct vsp1_format_info *fmtinfo = rpf->video.fmtinfo;
- const struct v4l2_pix_format_mplane *format = &rpf->video.format;
+ struct vsp1_device *vsp1 = rpf->entity.vsp1;
+ const struct vsp1_format_info *fmtinfo = rpf->fmtinfo;
+ const struct v4l2_pix_format_mplane *format = &rpf->format;
const struct v4l2_rect *crop = &rpf->crop;
u32 pstride;
u32 infmt;
@@ -154,6 +150,15 @@ static int rpf_s_stream(struct v4l2_subdev *subdev, int enable)
vsp1_rpf_write(rpf, VI6_RPF_ALPH_SEL, VI6_RPF_ALPH_SEL_AEXT_EXT |
(fmtinfo->alpha ? VI6_RPF_ALPH_SEL_ASEL_PACKED
: VI6_RPF_ALPH_SEL_ASEL_FIXED));
+
+ if (vsp1->info->uapi)
+ mutex_lock(rpf->ctrls.lock);
+ vsp1_rpf_write(rpf, VI6_RPF_VRTCOL_SET,
+ rpf->alpha->cur.val << VI6_RPF_VRTCOL_SET_LAYA_SHIFT);
+ vsp1_pipeline_propagate_alpha(pipe, &rpf->entity, rpf->alpha->cur.val);
+ if (vsp1->info->uapi)
+ mutex_unlock(rpf->ctrls.lock);
+
vsp1_rpf_write(rpf, VI6_RPF_MSK_CTRL, 0);
vsp1_rpf_write(rpf, VI6_RPF_CKEY_CTRL, 0);
@@ -186,30 +191,28 @@ static struct v4l2_subdev_ops rpf_ops = {
* Video Device Operations
*/
-static void rpf_vdev_queue(struct vsp1_video *video,
- struct vsp1_video_buffer *buf)
+static void rpf_set_memory(struct vsp1_rwpf *rpf, struct vsp1_rwpf_memory *mem)
{
- struct vsp1_rwpf *rpf = container_of(video, struct vsp1_rwpf, video);
unsigned int i;
for (i = 0; i < 3; ++i)
- rpf->buf_addr[i] = buf->addr[i];
+ rpf->buf_addr[i] = mem->addr[i];
if (!vsp1_entity_is_streaming(&rpf->entity))
return;
vsp1_rpf_write(rpf, VI6_RPF_SRCM_ADDR_Y,
- buf->addr[0] + rpf->offsets[0]);
- if (buf->buf.vb2_buf.num_planes > 1)
+ mem->addr[0] + rpf->offsets[0]);
+ if (mem->num_planes > 1)
vsp1_rpf_write(rpf, VI6_RPF_SRCM_ADDR_C0,
- buf->addr[1] + rpf->offsets[1]);
- if (buf->buf.vb2_buf.num_planes > 2)
+ mem->addr[1] + rpf->offsets[1]);
+ if (mem->num_planes > 2)
vsp1_rpf_write(rpf, VI6_RPF_SRCM_ADDR_C1,
- buf->addr[2] + rpf->offsets[1]);
+ mem->addr[2] + rpf->offsets[1]);
}
-static const struct vsp1_video_operations rpf_vdev_ops = {
- .queue = rpf_vdev_queue,
+static const struct vsp1_rwpf_operations rpf_vdev_ops = {
+ .set_memory = rpf_set_memory,
};
/* -----------------------------------------------------------------------------
@@ -219,7 +222,6 @@ static const struct vsp1_video_operations rpf_vdev_ops = {
struct vsp1_rwpf *vsp1_rpf_create(struct vsp1_device *vsp1, unsigned int index)
{
struct v4l2_subdev *subdev;
- struct vsp1_video *video;
struct vsp1_rwpf *rpf;
int ret;
@@ -227,6 +229,8 @@ struct vsp1_rwpf *vsp1_rpf_create(struct vsp1_device *vsp1, unsigned int index)
if (rpf == NULL)
return ERR_PTR(-ENOMEM);
+ rpf->ops = &rpf_vdev_ops;
+
rpf->max_width = RPF_MAX_WIDTH;
rpf->max_height = RPF_MAX_HEIGHT;
@@ -241,7 +245,7 @@ struct vsp1_rwpf *vsp1_rpf_create(struct vsp1_device *vsp1, unsigned int index)
subdev = &rpf->entity.subdev;
v4l2_subdev_init(subdev, &rpf_ops);
- subdev->entity.ops = &vsp1_media_ops;
+ subdev->entity.ops = &vsp1->media_ops;
subdev->internal_ops = &vsp1_subdev_internal_ops;
snprintf(subdev->name, sizeof(subdev->name), "%s rpf.%u",
dev_name(vsp1->dev), index);
@@ -252,8 +256,9 @@ struct vsp1_rwpf *vsp1_rpf_create(struct vsp1_device *vsp1, unsigned int index)
/* Initialize the control handler. */
v4l2_ctrl_handler_init(&rpf->ctrls, 1);
- v4l2_ctrl_new_std(&rpf->ctrls, &rpf_ctrl_ops, V4L2_CID_ALPHA_COMPONENT,
- 0, 255, 1, 255);
+ rpf->alpha = v4l2_ctrl_new_std(&rpf->ctrls, &rpf_ctrl_ops,
+ V4L2_CID_ALPHA_COMPONENT,
+ 0, 255, 1, 255);
rpf->entity.subdev.ctrl_handler = &rpf->ctrls;
@@ -264,42 +269,9 @@ struct vsp1_rwpf *vsp1_rpf_create(struct vsp1_device *vsp1, unsigned int index)
goto error;
}
- /* Initialize the video device. */
- video = &rpf->video;
-
- video->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
- video->vsp1 = vsp1;
- video->ops = &rpf_vdev_ops;
-
- ret = vsp1_video_init(video, &rpf->entity);
- if (ret < 0)
- goto error;
-
- rpf->entity.video = video;
-
return rpf;
error:
vsp1_entity_destroy(&rpf->entity);
return ERR_PTR(ret);
}
-
-/*
- * vsp1_rpf_create_links() - RPF pads links creation
- * @vsp1: Pointer to VSP1 device
- * @entity: Pointer to VSP1 entity
- *
- * return negative error code or zero on success
- */
-int vsp1_rpf_create_links(struct vsp1_device *vsp1,
- struct vsp1_entity *entity)
-{
- struct vsp1_rwpf *rpf = to_rwpf(&entity->subdev);
-
- /* Connect the video device to the RPF. */
- return media_create_pad_link(&rpf->video.video.entity, 0,
- &rpf->entity.subdev.entity,
- RWPF_PAD_SINK,
- MEDIA_LNK_FL_ENABLED |
- MEDIA_LNK_FL_IMMUTABLE);
-}
diff --git a/drivers/media/platform/vsp1/vsp1_rwpf.h b/drivers/media/platform/vsp1/vsp1_rwpf.h
index 731d36e5258d..8e8235682ada 100644
--- a/drivers/media/platform/vsp1/vsp1_rwpf.h
+++ b/drivers/media/platform/vsp1/vsp1_rwpf.h
@@ -19,19 +19,39 @@
#include "vsp1.h"
#include "vsp1_entity.h"
-#include "vsp1_video.h"
#define RWPF_PAD_SINK 0
#define RWPF_PAD_SOURCE 1
+struct v4l2_ctrl;
+struct vsp1_rwpf;
+struct vsp1_video;
+
+struct vsp1_rwpf_memory {
+ unsigned int num_planes;
+ dma_addr_t addr[3];
+ unsigned int length[3];
+};
+
+struct vsp1_rwpf_operations {
+ void (*set_memory)(struct vsp1_rwpf *rwpf,
+ struct vsp1_rwpf_memory *mem);
+};
+
struct vsp1_rwpf {
struct vsp1_entity entity;
- struct vsp1_video video;
struct v4l2_ctrl_handler ctrls;
+ struct v4l2_ctrl *alpha;
+
+ struct vsp1_video *video;
+
+ const struct vsp1_rwpf_operations *ops;
unsigned int max_width;
unsigned int max_height;
+ struct v4l2_pix_format_mplane format;
+ const struct vsp1_format_info *fmtinfo;
struct {
unsigned int left;
unsigned int top;
@@ -50,11 +70,6 @@ static inline struct vsp1_rwpf *to_rwpf(struct v4l2_subdev *subdev)
struct vsp1_rwpf *vsp1_rpf_create(struct vsp1_device *vsp1, unsigned int index);
struct vsp1_rwpf *vsp1_wpf_create(struct vsp1_device *vsp1, unsigned int index);
-int vsp1_rpf_create_links(struct vsp1_device *vsp1,
- struct vsp1_entity *entity);
-int vsp1_wpf_create_links(struct vsp1_device *vsp1,
- struct vsp1_entity *entity);
-
int vsp1_rwpf_enum_mbus_code(struct v4l2_subdev *subdev,
struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_mbus_code_enum *code);
diff --git a/drivers/media/platform/vsp1/vsp1_sru.c b/drivers/media/platform/vsp1/vsp1_sru.c
index 6310acab60e7..cc09efbfb24f 100644
--- a/drivers/media/platform/vsp1/vsp1_sru.c
+++ b/drivers/media/platform/vsp1/vsp1_sru.c
@@ -151,10 +151,13 @@ static int sru_s_stream(struct v4l2_subdev *subdev, int enable)
/* Take the control handler lock to ensure that the CTRL0 value won't be
* changed behind our back by a set control operation.
*/
- mutex_lock(sru->ctrls.lock);
+ if (sru->entity.vsp1->info->uapi)
+ mutex_lock(sru->ctrls.lock);
ctrl0 |= vsp1_sru_read(sru, VI6_SRU_CTRL0)
& (VI6_SRU_CTRL0_PARAM0_MASK | VI6_SRU_CTRL0_PARAM1_MASK);
- mutex_unlock(sru->ctrls.lock);
+ vsp1_sru_write(sru, VI6_SRU_CTRL0, ctrl0);
+ if (sru->entity.vsp1->info->uapi)
+ mutex_unlock(sru->ctrls.lock);
vsp1_sru_write(sru, VI6_SRU_CTRL1, VI6_SRU_CTRL1_PARAM5);
@@ -360,7 +363,7 @@ struct vsp1_sru *vsp1_sru_create(struct vsp1_device *vsp1)
subdev = &sru->entity.subdev;
v4l2_subdev_init(subdev, &sru_ops);
- subdev->entity.ops = &vsp1_media_ops;
+ subdev->entity.ops = &vsp1->media_ops;
subdev->internal_ops = &vsp1_subdev_internal_ops;
snprintf(subdev->name, sizeof(subdev->name), "%s sru",
dev_name(vsp1->dev));
diff --git a/drivers/media/platform/vsp1/vsp1_uds.c b/drivers/media/platform/vsp1/vsp1_uds.c
index ccc8243e3493..bba67770cf95 100644
--- a/drivers/media/platform/vsp1/vsp1_uds.c
+++ b/drivers/media/platform/vsp1/vsp1_uds.c
@@ -29,12 +29,6 @@
* Device Access
*/
-static inline u32 vsp1_uds_read(struct vsp1_uds *uds, u32 reg)
-{
- return vsp1_read(uds->entity.vsp1,
- reg + uds->entity.index * VI6_UDS_OFFSET);
-}
-
static inline void vsp1_uds_write(struct vsp1_uds *uds, u32 reg, u32 data)
{
vsp1_write(uds->entity.vsp1,
@@ -344,7 +338,7 @@ struct vsp1_uds *vsp1_uds_create(struct vsp1_device *vsp1, unsigned int index)
subdev = &uds->entity.subdev;
v4l2_subdev_init(subdev, &uds_ops);
- subdev->entity.ops = &vsp1_media_ops;
+ subdev->entity.ops = &vsp1->media_ops;
subdev->internal_ops = &vsp1_subdev_internal_ops;
snprintf(subdev->name, sizeof(subdev->name), "%s uds.%u",
dev_name(vsp1->dev), index);
diff --git a/drivers/media/platform/vsp1/vsp1_video.c b/drivers/media/platform/vsp1/vsp1_video.c
index b4dca57d1ae3..72cc7d3729f8 100644
--- a/drivers/media/platform/vsp1/vsp1_video.c
+++ b/drivers/media/platform/vsp1/vsp1_video.c
@@ -14,10 +14,10 @@
#include <linux/list.h>
#include <linux/module.h>
#include <linux/mutex.h>
-#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/v4l2-mediabus.h>
#include <linux/videodev2.h>
+#include <linux/wait.h>
#include <media/media-entity.h>
#include <media/v4l2-dev.h>
@@ -30,6 +30,7 @@
#include "vsp1.h"
#include "vsp1_bru.h"
#include "vsp1_entity.h"
+#include "vsp1_pipe.h"
#include "vsp1_rwpf.h"
#include "vsp1_uds.h"
#include "vsp1_video.h"
@@ -47,113 +48,6 @@
* Helper functions
*/
-static const struct vsp1_format_info vsp1_video_formats[] = {
- { V4L2_PIX_FMT_RGB332, MEDIA_BUS_FMT_ARGB8888_1X32,
- VI6_FMT_RGB_332, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
- VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS,
- 1, { 8, 0, 0 }, false, false, 1, 1, false },
- { V4L2_PIX_FMT_ARGB444, MEDIA_BUS_FMT_ARGB8888_1X32,
- VI6_FMT_ARGB_4444, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
- VI6_RPF_DSWAP_P_WDS,
- 1, { 16, 0, 0 }, false, false, 1, 1, true },
- { V4L2_PIX_FMT_XRGB444, MEDIA_BUS_FMT_ARGB8888_1X32,
- VI6_FMT_XRGB_4444, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
- VI6_RPF_DSWAP_P_WDS,
- 1, { 16, 0, 0 }, false, false, 1, 1, true },
- { V4L2_PIX_FMT_ARGB555, MEDIA_BUS_FMT_ARGB8888_1X32,
- VI6_FMT_ARGB_1555, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
- VI6_RPF_DSWAP_P_WDS,
- 1, { 16, 0, 0 }, false, false, 1, 1, true },
- { V4L2_PIX_FMT_XRGB555, MEDIA_BUS_FMT_ARGB8888_1X32,
- VI6_FMT_XRGB_1555, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
- VI6_RPF_DSWAP_P_WDS,
- 1, { 16, 0, 0 }, false, false, 1, 1, false },
- { V4L2_PIX_FMT_RGB565, MEDIA_BUS_FMT_ARGB8888_1X32,
- VI6_FMT_RGB_565, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
- VI6_RPF_DSWAP_P_WDS,
- 1, { 16, 0, 0 }, false, false, 1, 1, false },
- { V4L2_PIX_FMT_BGR24, MEDIA_BUS_FMT_ARGB8888_1X32,
- VI6_FMT_BGR_888, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
- VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS,
- 1, { 24, 0, 0 }, false, false, 1, 1, false },
- { V4L2_PIX_FMT_RGB24, MEDIA_BUS_FMT_ARGB8888_1X32,
- VI6_FMT_RGB_888, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
- VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS,
- 1, { 24, 0, 0 }, false, false, 1, 1, false },
- { V4L2_PIX_FMT_ABGR32, MEDIA_BUS_FMT_ARGB8888_1X32,
- VI6_FMT_ARGB_8888, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS,
- 1, { 32, 0, 0 }, false, false, 1, 1, true },
- { V4L2_PIX_FMT_XBGR32, MEDIA_BUS_FMT_ARGB8888_1X32,
- VI6_FMT_ARGB_8888, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS,
- 1, { 32, 0, 0 }, false, false, 1, 1, false },
- { V4L2_PIX_FMT_ARGB32, MEDIA_BUS_FMT_ARGB8888_1X32,
- VI6_FMT_ARGB_8888, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
- VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS,
- 1, { 32, 0, 0 }, false, false, 1, 1, true },
- { V4L2_PIX_FMT_XRGB32, MEDIA_BUS_FMT_ARGB8888_1X32,
- VI6_FMT_ARGB_8888, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
- VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS,
- 1, { 32, 0, 0 }, false, false, 1, 1, false },
- { V4L2_PIX_FMT_UYVY, MEDIA_BUS_FMT_AYUV8_1X32,
- VI6_FMT_YUYV_422, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
- VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS,
- 1, { 16, 0, 0 }, false, false, 2, 1, false },
- { V4L2_PIX_FMT_VYUY, MEDIA_BUS_FMT_AYUV8_1X32,
- VI6_FMT_YUYV_422, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
- VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS,
- 1, { 16, 0, 0 }, false, true, 2, 1, false },
- { V4L2_PIX_FMT_YUYV, MEDIA_BUS_FMT_AYUV8_1X32,
- VI6_FMT_YUYV_422, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
- VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS,
- 1, { 16, 0, 0 }, true, false, 2, 1, false },
- { V4L2_PIX_FMT_YVYU, MEDIA_BUS_FMT_AYUV8_1X32,
- VI6_FMT_YUYV_422, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
- VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS,
- 1, { 16, 0, 0 }, true, true, 2, 1, false },
- { V4L2_PIX_FMT_NV12M, MEDIA_BUS_FMT_AYUV8_1X32,
- VI6_FMT_Y_UV_420, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
- VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS,
- 2, { 8, 16, 0 }, false, false, 2, 2, false },
- { V4L2_PIX_FMT_NV21M, MEDIA_BUS_FMT_AYUV8_1X32,
- VI6_FMT_Y_UV_420, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
- VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS,
- 2, { 8, 16, 0 }, false, true, 2, 2, false },
- { V4L2_PIX_FMT_NV16M, MEDIA_BUS_FMT_AYUV8_1X32,
- VI6_FMT_Y_UV_422, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
- VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS,
- 2, { 8, 16, 0 }, false, false, 2, 1, false },
- { V4L2_PIX_FMT_NV61M, MEDIA_BUS_FMT_AYUV8_1X32,
- VI6_FMT_Y_UV_422, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
- VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS,
- 2, { 8, 16, 0 }, false, true, 2, 1, false },
- { V4L2_PIX_FMT_YUV420M, MEDIA_BUS_FMT_AYUV8_1X32,
- VI6_FMT_Y_U_V_420, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
- VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS,
- 3, { 8, 8, 8 }, false, false, 2, 2, false },
-};
-
-/*
- * vsp1_get_format_info - Retrieve format information for a 4CC
- * @fourcc: the format 4CC
- *
- * Return a pointer to the format information structure corresponding to the
- * given V4L2 format 4CC, or NULL if no corresponding format can be found.
- */
-static const struct vsp1_format_info *vsp1_get_format_info(u32 fourcc)
-{
- unsigned int i;
-
- for (i = 0; i < ARRAY_SIZE(vsp1_video_formats); ++i) {
- const struct vsp1_format_info *info = &vsp1_video_formats[i];
-
- if (info->fourcc == fourcc)
- return info;
- }
-
- return NULL;
-}
-
-
static struct v4l2_subdev *
vsp1_video_remote_subdev(struct media_pad *local, u32 *pad)
{
@@ -184,9 +78,9 @@ static int vsp1_video_verify_format(struct vsp1_video *video)
if (ret < 0)
return ret == -ENOIOCTLCMD ? -EINVAL : ret;
- if (video->fmtinfo->mbus != fmt.format.code ||
- video->format.height != fmt.format.height ||
- video->format.width != fmt.format.width)
+ if (video->rwpf->fmtinfo->mbus != fmt.format.code ||
+ video->rwpf->format.height != fmt.format.height ||
+ video->rwpf->format.width != fmt.format.width)
return -EINVAL;
return 0;
@@ -277,9 +171,9 @@ static int __vsp1_video_try_format(struct vsp1_video *video,
* Pipeline Management
*/
-static int vsp1_pipeline_validate_branch(struct vsp1_pipeline *pipe,
- struct vsp1_rwpf *input,
- struct vsp1_rwpf *output)
+static int vsp1_video_pipeline_validate_branch(struct vsp1_pipeline *pipe,
+ struct vsp1_rwpf *input,
+ struct vsp1_rwpf *output)
{
struct vsp1_entity *entity;
struct media_entity_enum ent_enum;
@@ -370,29 +264,8 @@ out:
return rval;
}
-static void __vsp1_pipeline_cleanup(struct vsp1_pipeline *pipe)
-{
- if (pipe->bru) {
- struct vsp1_bru *bru = to_bru(&pipe->bru->subdev);
- unsigned int i;
-
- for (i = 0; i < ARRAY_SIZE(bru->inputs); ++i)
- bru->inputs[i].rpf = NULL;
- }
-
- INIT_LIST_HEAD(&pipe->entities);
- pipe->state = VSP1_PIPELINE_STOPPED;
- pipe->buffers_ready = 0;
- pipe->num_video = 0;
- pipe->num_inputs = 0;
- pipe->output = NULL;
- pipe->bru = NULL;
- pipe->lif = NULL;
- pipe->uds = NULL;
-}
-
-static int vsp1_pipeline_validate(struct vsp1_pipeline *pipe,
- struct vsp1_video *video)
+static int vsp1_video_pipeline_validate(struct vsp1_pipeline *pipe,
+ struct vsp1_video *video)
{
struct media_entity_graph graph;
struct media_entity *entity = &video->video.entity;
@@ -416,10 +289,8 @@ static int vsp1_pipeline_validate(struct vsp1_pipeline *pipe,
struct vsp1_rwpf *rwpf;
struct vsp1_entity *e;
- if (is_media_entity_v4l2_io(entity)) {
- pipe->num_video++;
+ if (!is_media_entity_v4l2_subdev(entity))
continue;
- }
subdev = media_entity_to_v4l2_subdev(entity);
e = to_vsp1_entity(subdev);
@@ -427,12 +298,12 @@ static int vsp1_pipeline_validate(struct vsp1_pipeline *pipe,
if (e->type == VSP1_ENTITY_RPF) {
rwpf = to_rwpf(subdev);
- pipe->inputs[pipe->num_inputs++] = rwpf;
- rwpf->video.pipe_index = pipe->num_inputs;
+ pipe->inputs[rwpf->entity.index] = rwpf;
+ rwpf->video->pipe_index = ++pipe->num_inputs;
} else if (e->type == VSP1_ENTITY_WPF) {
rwpf = to_rwpf(subdev);
- pipe->output = to_rwpf(subdev);
- rwpf->video.pipe_index = 0;
+ pipe->output = rwpf;
+ rwpf->video->pipe_index = 0;
} else if (e->type == VSP1_ENTITY_LIF) {
pipe->lif = e;
} else if (e->type == VSP1_ENTITY_BRU) {
@@ -453,9 +324,12 @@ static int vsp1_pipeline_validate(struct vsp1_pipeline *pipe,
/* Follow links downstream for each input and make sure the graph
* contains no loop and that all branches end at the output WPF.
*/
- for (i = 0; i < pipe->num_inputs; ++i) {
- ret = vsp1_pipeline_validate_branch(pipe, pipe->inputs[i],
- pipe->output);
+ for (i = 0; i < video->vsp1->info->rpf_count; ++i) {
+ if (!pipe->inputs[i])
+ continue;
+
+ ret = vsp1_video_pipeline_validate_branch(pipe, pipe->inputs[i],
+ pipe->output);
if (ret < 0)
goto error;
}
@@ -463,12 +337,12 @@ static int vsp1_pipeline_validate(struct vsp1_pipeline *pipe,
return 0;
error:
- __vsp1_pipeline_cleanup(pipe);
+ vsp1_pipeline_reset(pipe);
return ret;
}
-static int vsp1_pipeline_init(struct vsp1_pipeline *pipe,
- struct vsp1_video *video)
+static int vsp1_video_pipeline_init(struct vsp1_pipeline *pipe,
+ struct vsp1_video *video)
{
int ret;
@@ -476,7 +350,7 @@ static int vsp1_pipeline_init(struct vsp1_pipeline *pipe,
/* If we're the first user validate and initialize the pipeline. */
if (pipe->use_count == 0) {
- ret = vsp1_pipeline_validate(pipe, video);
+ ret = vsp1_video_pipeline_validate(pipe, video);
if (ret < 0)
goto done;
}
@@ -489,75 +363,17 @@ done:
return ret;
}
-static void vsp1_pipeline_cleanup(struct vsp1_pipeline *pipe)
+static void vsp1_video_pipeline_cleanup(struct vsp1_pipeline *pipe)
{
mutex_lock(&pipe->lock);
/* If we're the last user clean up the pipeline. */
if (--pipe->use_count == 0)
- __vsp1_pipeline_cleanup(pipe);
+ vsp1_pipeline_reset(pipe);
mutex_unlock(&pipe->lock);
}
-static void vsp1_pipeline_run(struct vsp1_pipeline *pipe)
-{
- struct vsp1_device *vsp1 = pipe->output->entity.vsp1;
-
- vsp1_write(vsp1, VI6_CMD(pipe->output->entity.index), VI6_CMD_STRCMD);
- pipe->state = VSP1_PIPELINE_RUNNING;
- pipe->buffers_ready = 0;
-}
-
-static bool vsp1_pipeline_stopped(struct vsp1_pipeline *pipe)
-{
- unsigned long flags;
- bool stopped;
-
- spin_lock_irqsave(&pipe->irqlock, flags);
- stopped = pipe->state == VSP1_PIPELINE_STOPPED;
- spin_unlock_irqrestore(&pipe->irqlock, flags);
-
- return stopped;
-}
-
-static int vsp1_pipeline_stop(struct vsp1_pipeline *pipe)
-{
- struct vsp1_entity *entity;
- unsigned long flags;
- int ret;
-
- spin_lock_irqsave(&pipe->irqlock, flags);
- if (pipe->state == VSP1_PIPELINE_RUNNING)
- pipe->state = VSP1_PIPELINE_STOPPING;
- spin_unlock_irqrestore(&pipe->irqlock, flags);
-
- ret = wait_event_timeout(pipe->wq, vsp1_pipeline_stopped(pipe),
- msecs_to_jiffies(500));
- ret = ret == 0 ? -ETIMEDOUT : 0;
-
- list_for_each_entry(entity, &pipe->entities, list_pipe) {
- if (entity->route && entity->route->reg)
- vsp1_write(entity->vsp1, entity->route->reg,
- VI6_DPR_NODE_UNUSED);
-
- v4l2_subdev_call(&entity->subdev, video, s_stream, 0);
- }
-
- return ret;
-}
-
-static bool vsp1_pipeline_ready(struct vsp1_pipeline *pipe)
-{
- unsigned int mask;
-
- mask = ((1 << pipe->num_inputs) - 1) << 1;
- if (!pipe->lif)
- mask |= 1 << 0;
-
- return pipe->buffers_ready == mask;
-}
-
/*
* vsp1_video_complete_buffer - Complete the current buffer
* @video: the video node
@@ -572,12 +388,12 @@ static bool vsp1_pipeline_ready(struct vsp1_pipeline *pipe)
*
* Return the next queued buffer or NULL if the queue is empty.
*/
-static struct vsp1_video_buffer *
+static struct vsp1_vb2_buffer *
vsp1_video_complete_buffer(struct vsp1_video *video)
{
struct vsp1_pipeline *pipe = to_vsp1_pipeline(&video->video.entity);
- struct vsp1_video_buffer *next = NULL;
- struct vsp1_video_buffer *done;
+ struct vsp1_vb2_buffer *next = NULL;
+ struct vsp1_vb2_buffer *done;
unsigned long flags;
unsigned int i;
@@ -589,7 +405,7 @@ vsp1_video_complete_buffer(struct vsp1_video *video)
}
done = list_first_entry(&video->irqqueue,
- struct vsp1_video_buffer, queue);
+ struct vsp1_vb2_buffer, queue);
/* In DU output mode reuse the buffer if the list is singular. */
if (pipe->lif && list_is_singular(&video->irqqueue)) {
@@ -601,23 +417,25 @@ vsp1_video_complete_buffer(struct vsp1_video *video)
if (!list_empty(&video->irqqueue))
next = list_first_entry(&video->irqqueue,
- struct vsp1_video_buffer, queue);
+ struct vsp1_vb2_buffer, queue);
spin_unlock_irqrestore(&video->irqlock, flags);
done->buf.sequence = video->sequence++;
done->buf.vb2_buf.timestamp = ktime_get_ns();
for (i = 0; i < done->buf.vb2_buf.num_planes; ++i)
- vb2_set_plane_payload(&done->buf.vb2_buf, i, done->length[i]);
+ vb2_set_plane_payload(&done->buf.vb2_buf, i,
+ done->mem.length[i]);
vb2_buffer_done(&done->buf.vb2_buf, VB2_BUF_STATE_DONE);
return next;
}
static void vsp1_video_frame_end(struct vsp1_pipeline *pipe,
- struct vsp1_video *video)
+ struct vsp1_rwpf *rwpf)
{
- struct vsp1_video_buffer *buf;
+ struct vsp1_video *video = rwpf->video;
+ struct vsp1_vb2_buffer *buf;
unsigned long flags;
buf = vsp1_video_complete_buffer(video);
@@ -626,155 +444,27 @@ static void vsp1_video_frame_end(struct vsp1_pipeline *pipe,
spin_lock_irqsave(&pipe->irqlock, flags);
- video->ops->queue(video, buf);
+ video->rwpf->ops->set_memory(video->rwpf, &buf->mem);
pipe->buffers_ready |= 1 << video->pipe_index;
spin_unlock_irqrestore(&pipe->irqlock, flags);
}
-void vsp1_pipeline_frame_end(struct vsp1_pipeline *pipe)
+static void vsp1_video_pipeline_frame_end(struct vsp1_pipeline *pipe)
{
- enum vsp1_pipeline_state state;
- unsigned long flags;
+ struct vsp1_device *vsp1 = pipe->output->entity.vsp1;
unsigned int i;
- if (pipe == NULL)
- return;
-
/* Complete buffers on all video nodes. */
- for (i = 0; i < pipe->num_inputs; ++i)
- vsp1_video_frame_end(pipe, &pipe->inputs[i]->video);
-
- if (!pipe->lif)
- vsp1_video_frame_end(pipe, &pipe->output->video);
-
- spin_lock_irqsave(&pipe->irqlock, flags);
-
- state = pipe->state;
- pipe->state = VSP1_PIPELINE_STOPPED;
-
- /* If a stop has been requested, mark the pipeline as stopped and
- * return.
- */
- if (state == VSP1_PIPELINE_STOPPING) {
- wake_up(&pipe->wq);
- goto done;
- }
-
- /* Restart the pipeline if ready. */
- if (vsp1_pipeline_ready(pipe))
- vsp1_pipeline_run(pipe);
-
-done:
- spin_unlock_irqrestore(&pipe->irqlock, flags);
-}
-
-/*
- * Propagate the alpha value through the pipeline.
- *
- * As the UDS has restricted scaling capabilities when the alpha component needs
- * to be scaled, we disable alpha scaling when the UDS input has a fixed alpha
- * value. The UDS then outputs a fixed alpha value which needs to be programmed
- * from the input RPF alpha.
- */
-void vsp1_pipeline_propagate_alpha(struct vsp1_pipeline *pipe,
- struct vsp1_entity *input,
- unsigned int alpha)
-{
- struct vsp1_entity *entity;
- struct media_pad *pad;
-
- pad = media_entity_remote_pad(&input->pads[RWPF_PAD_SOURCE]);
-
- while (pad) {
- if (!is_media_entity_v4l2_subdev(pad->entity))
- break;
-
- entity = to_vsp1_entity(media_entity_to_v4l2_subdev(pad->entity));
-
- /* The BRU background color has a fixed alpha value set to 255,
- * the output alpha value is thus always equal to 255.
- */
- if (entity->type == VSP1_ENTITY_BRU)
- alpha = 255;
-
- if (entity->type == VSP1_ENTITY_UDS) {
- struct vsp1_uds *uds = to_uds(&entity->subdev);
-
- vsp1_uds_set_alpha(uds, alpha);
- break;
- }
-
- pad = &entity->pads[entity->source_pad];
- pad = media_entity_remote_pad(pad);
- }
-}
-
-void vsp1_pipelines_suspend(struct vsp1_device *vsp1)
-{
- unsigned long flags;
- unsigned int i;
- int ret;
-
- /* To avoid increasing the system suspend time needlessly, loop over the
- * pipelines twice, first to set them all to the stopping state, and then
- * to wait for the stop to complete.
- */
- for (i = 0; i < vsp1->pdata.wpf_count; ++i) {
- struct vsp1_rwpf *wpf = vsp1->wpf[i];
- struct vsp1_pipeline *pipe;
-
- if (wpf == NULL)
- continue;
-
- pipe = to_vsp1_pipeline(&wpf->entity.subdev.entity);
- if (pipe == NULL)
- continue;
-
- spin_lock_irqsave(&pipe->irqlock, flags);
- if (pipe->state == VSP1_PIPELINE_RUNNING)
- pipe->state = VSP1_PIPELINE_STOPPING;
- spin_unlock_irqrestore(&pipe->irqlock, flags);
- }
-
- for (i = 0; i < vsp1->pdata.wpf_count; ++i) {
- struct vsp1_rwpf *wpf = vsp1->wpf[i];
- struct vsp1_pipeline *pipe;
-
- if (wpf == NULL)
- continue;
-
- pipe = to_vsp1_pipeline(&wpf->entity.subdev.entity);
- if (pipe == NULL)
+ for (i = 0; i < vsp1->info->rpf_count; ++i) {
+ if (!pipe->inputs[i])
continue;
- ret = wait_event_timeout(pipe->wq, vsp1_pipeline_stopped(pipe),
- msecs_to_jiffies(500));
- if (ret == 0)
- dev_warn(vsp1->dev, "pipeline %u stop timeout\n",
- wpf->entity.index);
+ vsp1_video_frame_end(pipe, pipe->inputs[i]);
}
-}
-
-void vsp1_pipelines_resume(struct vsp1_device *vsp1)
-{
- unsigned int i;
-
- /* Resume pipeline all running pipelines. */
- for (i = 0; i < vsp1->pdata.wpf_count; ++i) {
- struct vsp1_rwpf *wpf = vsp1->wpf[i];
- struct vsp1_pipeline *pipe;
- if (wpf == NULL)
- continue;
-
- pipe = to_vsp1_pipeline(&wpf->entity.subdev.entity);
- if (pipe == NULL)
- continue;
-
- if (vsp1_pipeline_ready(pipe))
- vsp1_pipeline_run(pipe);
- }
+ if (!pipe->lif)
+ vsp1_video_frame_end(pipe, pipe->output);
}
/* -----------------------------------------------------------------------------
@@ -787,7 +477,7 @@ vsp1_video_queue_setup(struct vb2_queue *vq,
unsigned int sizes[], void *alloc_ctxs[])
{
struct vsp1_video *video = vb2_get_drv_priv(vq);
- const struct v4l2_pix_format_mplane *format = &video->format;
+ const struct v4l2_pix_format_mplane *format = &video->rwpf->format;
unsigned int i;
if (*nplanes) {
@@ -816,18 +506,20 @@ static int vsp1_video_buffer_prepare(struct vb2_buffer *vb)
{
struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct vsp1_video *video = vb2_get_drv_priv(vb->vb2_queue);
- struct vsp1_video_buffer *buf = to_vsp1_video_buffer(vbuf);
- const struct v4l2_pix_format_mplane *format = &video->format;
+ struct vsp1_vb2_buffer *buf = to_vsp1_vb2_buffer(vbuf);
+ const struct v4l2_pix_format_mplane *format = &video->rwpf->format;
unsigned int i;
if (vb->num_planes < format->num_planes)
return -EINVAL;
+ buf->mem.num_planes = vb->num_planes;
+
for (i = 0; i < vb->num_planes; ++i) {
- buf->addr[i] = vb2_dma_contig_plane_dma_addr(vb, i);
- buf->length[i] = vb2_plane_size(vb, i);
+ buf->mem.addr[i] = vb2_dma_contig_plane_dma_addr(vb, i);
+ buf->mem.length[i] = vb2_plane_size(vb, i);
- if (buf->length[i] < format->plane_fmt[i].sizeimage)
+ if (buf->mem.length[i] < format->plane_fmt[i].sizeimage)
return -EINVAL;
}
@@ -839,7 +531,7 @@ static void vsp1_video_buffer_queue(struct vb2_buffer *vb)
struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct vsp1_video *video = vb2_get_drv_priv(vb->vb2_queue);
struct vsp1_pipeline *pipe = to_vsp1_pipeline(&video->video.entity);
- struct vsp1_video_buffer *buf = to_vsp1_video_buffer(vbuf);
+ struct vsp1_vb2_buffer *buf = to_vsp1_vb2_buffer(vbuf);
unsigned long flags;
bool empty;
@@ -853,7 +545,7 @@ static void vsp1_video_buffer_queue(struct vb2_buffer *vb)
spin_lock_irqsave(&pipe->irqlock, flags);
- video->ops->queue(video, buf);
+ video->rwpf->ops->set_memory(video->rwpf, &buf->mem);
pipe->buffers_ready |= 1 << video->pipe_index;
if (vb2_is_streaming(&video->queue) &&
@@ -863,18 +555,6 @@ static void vsp1_video_buffer_queue(struct vb2_buffer *vb)
spin_unlock_irqrestore(&pipe->irqlock, flags);
}
-static void vsp1_entity_route_setup(struct vsp1_entity *source)
-{
- struct vsp1_entity *sink;
-
- if (source->route->reg == 0)
- return;
-
- sink = container_of(source->sink, struct vsp1_entity, subdev.entity);
- vsp1_write(source->vsp1, source->route->reg,
- sink->route->inputs[source->sink_pad]);
-}
-
static int vsp1_video_start_streaming(struct vb2_queue *vq, unsigned int count)
{
struct vsp1_video *video = vb2_get_drv_priv(vq);
@@ -884,7 +564,7 @@ static int vsp1_video_start_streaming(struct vb2_queue *vq, unsigned int count)
int ret;
mutex_lock(&pipe->lock);
- if (pipe->stream_count == pipe->num_video - 1) {
+ if (pipe->stream_count == pipe->num_inputs) {
if (pipe->uds) {
struct vsp1_uds *uds = to_uds(&pipe->uds->subdev);
@@ -900,7 +580,7 @@ static int vsp1_video_start_streaming(struct vb2_queue *vq, unsigned int count)
struct vsp1_rwpf *rpf =
to_rwpf(&pipe->uds_input->subdev);
- uds->scale_alpha = rpf->video.fmtinfo->alpha;
+ uds->scale_alpha = rpf->fmtinfo->alpha;
}
}
@@ -931,7 +611,7 @@ static void vsp1_video_stop_streaming(struct vb2_queue *vq)
{
struct vsp1_video *video = vb2_get_drv_priv(vq);
struct vsp1_pipeline *pipe = to_vsp1_pipeline(&video->video.entity);
- struct vsp1_video_buffer *buffer;
+ struct vsp1_vb2_buffer *buffer;
unsigned long flags;
int ret;
@@ -944,7 +624,7 @@ static void vsp1_video_stop_streaming(struct vb2_queue *vq)
}
mutex_unlock(&pipe->lock);
- vsp1_pipeline_cleanup(pipe);
+ vsp1_video_pipeline_cleanup(pipe);
media_entity_pipeline_stop(&video->video.entity);
/* Remove all buffers from the IRQ queue. */
@@ -1004,7 +684,7 @@ vsp1_video_get_format(struct file *file, void *fh, struct v4l2_format *format)
return -EINVAL;
mutex_lock(&video->lock);
- format->fmt.pix_mp = video->format;
+ format->fmt.pix_mp = video->rwpf->format;
mutex_unlock(&video->lock);
return 0;
@@ -1044,8 +724,8 @@ vsp1_video_set_format(struct file *file, void *fh, struct v4l2_format *format)
goto done;
}
- video->format = format->fmt.pix_mp;
- video->fmtinfo = info;
+ video->rwpf->format = format->fmt.pix_mp;
+ video->rwpf->fmtinfo = info;
done:
mutex_unlock(&video->lock);
@@ -1085,7 +765,7 @@ vsp1_video_streamon(struct file *file, void *fh, enum v4l2_buf_type type)
if (ret < 0)
goto err_stop;
- ret = vsp1_pipeline_init(pipe, video);
+ ret = vsp1_video_pipeline_init(pipe, video);
if (ret < 0)
goto err_stop;
@@ -1097,7 +777,7 @@ vsp1_video_streamon(struct file *file, void *fh, enum v4l2_buf_type type)
return 0;
err_cleanup:
- vsp1_pipeline_cleanup(pipe);
+ vsp1_video_pipeline_cleanup(pipe);
err_stop:
media_entity_pipeline_stop(&video->video.entity);
return ret;
@@ -1183,62 +863,64 @@ static struct v4l2_file_operations vsp1_video_fops = {
* Initialization and Cleanup
*/
-int vsp1_video_init(struct vsp1_video *video, struct vsp1_entity *rwpf)
+struct vsp1_video *vsp1_video_create(struct vsp1_device *vsp1,
+ struct vsp1_rwpf *rwpf)
{
+ struct vsp1_video *video;
const char *direction;
int ret;
- switch (video->type) {
- case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
- direction = "output";
- video->pad.flags = MEDIA_PAD_FL_SINK;
- break;
+ video = devm_kzalloc(vsp1->dev, sizeof(*video), GFP_KERNEL);
+ if (!video)
+ return ERR_PTR(-ENOMEM);
+
+ rwpf->video = video;
+
+ video->vsp1 = vsp1;
+ video->rwpf = rwpf;
- case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
+ if (rwpf->entity.type == VSP1_ENTITY_RPF) {
direction = "input";
+ video->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
video->pad.flags = MEDIA_PAD_FL_SOURCE;
video->video.vfl_dir = VFL_DIR_TX;
- break;
-
- default:
- return -EINVAL;
+ } else {
+ direction = "output";
+ video->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+ video->pad.flags = MEDIA_PAD_FL_SINK;
+ video->video.vfl_dir = VFL_DIR_RX;
}
- video->rwpf = rwpf;
-
mutex_init(&video->lock);
spin_lock_init(&video->irqlock);
INIT_LIST_HEAD(&video->irqqueue);
- mutex_init(&video->pipe.lock);
- spin_lock_init(&video->pipe.irqlock);
- INIT_LIST_HEAD(&video->pipe.entities);
- init_waitqueue_head(&video->pipe.wq);
- video->pipe.state = VSP1_PIPELINE_STOPPED;
+ vsp1_pipeline_init(&video->pipe);
+ video->pipe.frame_end = vsp1_video_pipeline_frame_end;
/* Initialize the media entity... */
ret = media_entity_pads_init(&video->video.entity, 1, &video->pad);
if (ret < 0)
- return ret;
+ return ERR_PTR(ret);
/* ... and the format ... */
- video->fmtinfo = vsp1_get_format_info(VSP1_VIDEO_DEF_FORMAT);
- video->format.pixelformat = video->fmtinfo->fourcc;
- video->format.colorspace = V4L2_COLORSPACE_SRGB;
- video->format.field = V4L2_FIELD_NONE;
- video->format.width = VSP1_VIDEO_DEF_WIDTH;
- video->format.height = VSP1_VIDEO_DEF_HEIGHT;
- video->format.num_planes = 1;
- video->format.plane_fmt[0].bytesperline =
- video->format.width * video->fmtinfo->bpp[0] / 8;
- video->format.plane_fmt[0].sizeimage =
- video->format.plane_fmt[0].bytesperline * video->format.height;
+ rwpf->fmtinfo = vsp1_get_format_info(VSP1_VIDEO_DEF_FORMAT);
+ rwpf->format.pixelformat = rwpf->fmtinfo->fourcc;
+ rwpf->format.colorspace = V4L2_COLORSPACE_SRGB;
+ rwpf->format.field = V4L2_FIELD_NONE;
+ rwpf->format.width = VSP1_VIDEO_DEF_WIDTH;
+ rwpf->format.height = VSP1_VIDEO_DEF_HEIGHT;
+ rwpf->format.num_planes = 1;
+ rwpf->format.plane_fmt[0].bytesperline =
+ rwpf->format.width * rwpf->fmtinfo->bpp[0] / 8;
+ rwpf->format.plane_fmt[0].sizeimage =
+ rwpf->format.plane_fmt[0].bytesperline * rwpf->format.height;
/* ... and the video node... */
video->video.v4l2_dev = &video->vsp1->v4l2_dev;
video->video.fops = &vsp1_video_fops;
snprintf(video->video.name, sizeof(video->video.name), "%s %s",
- rwpf->subdev.name, direction);
+ rwpf->entity.subdev.name, direction);
video->video.vfl_type = VFL_TYPE_GRABBER;
video->video.release = video_device_release_empty;
video->video.ioctl_ops = &vsp1_video_ioctl_ops;
@@ -1256,7 +938,7 @@ int vsp1_video_init(struct vsp1_video *video, struct vsp1_entity *rwpf)
video->queue.io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
video->queue.lock = &video->lock;
video->queue.drv_priv = video;
- video->queue.buf_struct_size = sizeof(struct vsp1_video_buffer);
+ video->queue.buf_struct_size = sizeof(struct vsp1_vb2_buffer);
video->queue.ops = &vsp1_video_queue_qops;
video->queue.mem_ops = &vb2_dma_contig_memops;
video->queue.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
@@ -1274,12 +956,12 @@ int vsp1_video_init(struct vsp1_video *video, struct vsp1_entity *rwpf)
goto error;
}
- return 0;
+ return video;
error:
vb2_dma_contig_cleanup_ctx(video->alloc_ctx);
vsp1_video_cleanup(video);
- return ret;
+ return ERR_PTR(ret);
}
void vsp1_video_cleanup(struct vsp1_video *video)
diff --git a/drivers/media/platform/vsp1/vsp1_video.h b/drivers/media/platform/vsp1/vsp1_video.h
index a929aa81cdbf..64abd39ee1e7 100644
--- a/drivers/media/platform/vsp1/vsp1_video.h
+++ b/drivers/media/platform/vsp1/vsp1_video.h
@@ -15,115 +15,34 @@
#include <linux/list.h>
#include <linux/spinlock.h>
-#include <linux/wait.h>
-#include <media/media-entity.h>
#include <media/videobuf2-v4l2.h>
-struct vsp1_video;
+#include "vsp1_pipe.h"
+#include "vsp1_rwpf.h"
-/*
- * struct vsp1_format_info - VSP1 video format description
- * @mbus: media bus format code
- * @fourcc: V4L2 pixel format FCC identifier
- * @planes: number of planes
- * @bpp: bits per pixel
- * @hwfmt: VSP1 hardware format
- * @swap_yc: the Y and C components are swapped (Y comes before C)
- * @swap_uv: the U and V components are swapped (V comes before U)
- * @hsub: horizontal subsampling factor
- * @vsub: vertical subsampling factor
- * @alpha: has an alpha channel
- */
-struct vsp1_format_info {
- u32 fourcc;
- unsigned int mbus;
- unsigned int hwfmt;
- unsigned int swap;
- unsigned int planes;
- unsigned int bpp[3];
- bool swap_yc;
- bool swap_uv;
- unsigned int hsub;
- unsigned int vsub;
- bool alpha;
-};
-
-enum vsp1_pipeline_state {
- VSP1_PIPELINE_STOPPED,
- VSP1_PIPELINE_RUNNING,
- VSP1_PIPELINE_STOPPING,
-};
-
-/*
- * struct vsp1_pipeline - A VSP1 hardware pipeline
- * @media: the media pipeline
- * @irqlock: protects the pipeline state
- * @lock: protects the pipeline use count and stream count
- */
-struct vsp1_pipeline {
- struct media_pipeline pipe;
-
- spinlock_t irqlock;
- enum vsp1_pipeline_state state;
- wait_queue_head_t wq;
-
- struct mutex lock;
- unsigned int use_count;
- unsigned int stream_count;
- unsigned int buffers_ready;
-
- unsigned int num_video;
- unsigned int num_inputs;
- struct vsp1_rwpf *inputs[VSP1_MAX_RPF];
- struct vsp1_rwpf *output;
- struct vsp1_entity *bru;
- struct vsp1_entity *lif;
- struct vsp1_entity *uds;
- struct vsp1_entity *uds_input;
-
- struct list_head entities;
-};
-
-static inline struct vsp1_pipeline *to_vsp1_pipeline(struct media_entity *e)
-{
- if (likely(e->pipe))
- return container_of(e->pipe, struct vsp1_pipeline, pipe);
- else
- return NULL;
-}
-
-struct vsp1_video_buffer {
+struct vsp1_vb2_buffer {
struct vb2_v4l2_buffer buf;
struct list_head queue;
-
- dma_addr_t addr[3];
- unsigned int length[3];
+ struct vsp1_rwpf_memory mem;
};
-static inline struct vsp1_video_buffer *
-to_vsp1_video_buffer(struct vb2_v4l2_buffer *vbuf)
+static inline struct vsp1_vb2_buffer *
+to_vsp1_vb2_buffer(struct vb2_v4l2_buffer *vbuf)
{
- return container_of(vbuf, struct vsp1_video_buffer, buf);
+ return container_of(vbuf, struct vsp1_vb2_buffer, buf);
}
-struct vsp1_video_operations {
- void (*queue)(struct vsp1_video *video, struct vsp1_video_buffer *buf);
-};
-
struct vsp1_video {
+ struct list_head list;
struct vsp1_device *vsp1;
- struct vsp1_entity *rwpf;
-
- const struct vsp1_video_operations *ops;
+ struct vsp1_rwpf *rwpf;
struct video_device video;
enum v4l2_buf_type type;
struct media_pad pad;
struct mutex lock;
- struct v4l2_pix_format_mplane format;
- const struct vsp1_format_info *fmtinfo;
struct vsp1_pipeline pipe;
unsigned int pipe_index;
@@ -140,16 +59,8 @@ static inline struct vsp1_video *to_vsp1_video(struct video_device *vdev)
return container_of(vdev, struct vsp1_video, video);
}
-int vsp1_video_init(struct vsp1_video *video, struct vsp1_entity *rwpf);
+struct vsp1_video *vsp1_video_create(struct vsp1_device *vsp1,
+ struct vsp1_rwpf *rwpf);
void vsp1_video_cleanup(struct vsp1_video *video);
-void vsp1_pipeline_frame_end(struct vsp1_pipeline *pipe);
-
-void vsp1_pipeline_propagate_alpha(struct vsp1_pipeline *pipe,
- struct vsp1_entity *input,
- unsigned int alpha);
-
-void vsp1_pipelines_suspend(struct vsp1_device *vsp1);
-void vsp1_pipelines_resume(struct vsp1_device *vsp1);
-
#endif /* __VSP1_VIDEO_H__ */
diff --git a/drivers/media/platform/vsp1/vsp1_wpf.c b/drivers/media/platform/vsp1/vsp1_wpf.c
index cbf514a6582d..c78d4af50fcf 100644
--- a/drivers/media/platform/vsp1/vsp1_wpf.c
+++ b/drivers/media/platform/vsp1/vsp1_wpf.c
@@ -34,8 +34,8 @@ static inline u32 vsp1_wpf_read(struct vsp1_rwpf *wpf, u32 reg)
static inline void vsp1_wpf_write(struct vsp1_rwpf *wpf, u32 reg, u32 data)
{
- vsp1_write(wpf->entity.vsp1,
- reg + wpf->entity.index * VI6_WPF_OFFSET, data);
+ vsp1_mod_write(&wpf->entity,
+ reg + wpf->entity.index * VI6_WPF_OFFSET, data);
}
/* -----------------------------------------------------------------------------
@@ -88,7 +88,8 @@ static int wpf_s_stream(struct v4l2_subdev *subdev, int enable)
if (!enable) {
vsp1_write(vsp1, VI6_WPF_IRQ_ENB(wpf->entity.index), 0);
- vsp1_wpf_write(wpf, VI6_WPF_SRCRPF, 0);
+ vsp1_write(vsp1, wpf->entity.index * VI6_WPF_OFFSET +
+ VI6_WPF_SRCRPF, 0);
return 0;
}
@@ -97,9 +98,12 @@ static int wpf_s_stream(struct v4l2_subdev *subdev, int enable)
* inputs as sub-layers and select the virtual RPF as the master
* layer.
*/
- for (i = 0; i < pipe->num_inputs; ++i) {
+ for (i = 0; i < vsp1->info->rpf_count; ++i) {
struct vsp1_rwpf *input = pipe->inputs[i];
+ if (!input)
+ continue;
+
srcrpf |= (!pipe->bru && pipe->num_inputs == 1)
? VI6_WPF_SRCRPF_RPF_ACT_MST(input->entity.index)
: VI6_WPF_SRCRPF_RPF_ACT_SUB(input->entity.index);
@@ -112,7 +116,7 @@ static int wpf_s_stream(struct v4l2_subdev *subdev, int enable)
/* Destination stride. */
if (!pipe->lif) {
- struct v4l2_pix_format_mplane *format = &wpf->video.format;
+ struct v4l2_pix_format_mplane *format = &wpf->format;
vsp1_wpf_write(wpf, VI6_WPF_DSTM_STRIDE_Y,
format->plane_fmt[0].bytesperline);
@@ -130,7 +134,7 @@ static int wpf_s_stream(struct v4l2_subdev *subdev, int enable)
/* Format */
if (!pipe->lif) {
- const struct vsp1_format_info *fmtinfo = wpf->video.fmtinfo;
+ const struct vsp1_format_info *fmtinfo = wpf->fmtinfo;
outfmt = fmtinfo->hwfmt << VI6_WPF_OUTFMT_WRFMT_SHIFT;
@@ -151,15 +155,17 @@ static int wpf_s_stream(struct v4l2_subdev *subdev, int enable)
/* Take the control handler lock to ensure that the PDV value won't be
* changed behind our back by a set control operation.
*/
- mutex_lock(wpf->ctrls.lock);
- outfmt |= vsp1_wpf_read(wpf, VI6_WPF_OUTFMT) & VI6_WPF_OUTFMT_PDV_MASK;
+ if (vsp1->info->uapi)
+ mutex_lock(wpf->ctrls.lock);
+ outfmt |= wpf->alpha->cur.val << VI6_WPF_OUTFMT_PDV_SHIFT;
vsp1_wpf_write(wpf, VI6_WPF_OUTFMT, outfmt);
- mutex_unlock(wpf->ctrls.lock);
+ if (vsp1->info->uapi)
+ mutex_unlock(wpf->ctrls.lock);
- vsp1_write(vsp1, VI6_DPR_WPF_FPORCH(wpf->entity.index),
- VI6_DPR_WPF_FPORCH_FP_WPFN);
+ vsp1_mod_write(&wpf->entity, VI6_DPR_WPF_FPORCH(wpf->entity.index),
+ VI6_DPR_WPF_FPORCH_FP_WPFN);
- vsp1_write(vsp1, VI6_WPF_WRBCK_CTRL, 0);
+ vsp1_mod_write(&wpf->entity, VI6_WPF_WRBCK_CTRL, 0);
/* Enable interrupts */
vsp1_write(vsp1, VI6_WPF_IRQ_STA(wpf->entity.index), 0);
@@ -195,20 +201,17 @@ static struct v4l2_subdev_ops wpf_ops = {
* Video Device Operations
*/
-static void wpf_vdev_queue(struct vsp1_video *video,
- struct vsp1_video_buffer *buf)
+static void wpf_set_memory(struct vsp1_rwpf *wpf, struct vsp1_rwpf_memory *mem)
{
- struct vsp1_rwpf *wpf = container_of(video, struct vsp1_rwpf, video);
-
- vsp1_wpf_write(wpf, VI6_WPF_DSTM_ADDR_Y, buf->addr[0]);
- if (buf->buf.vb2_buf.num_planes > 1)
- vsp1_wpf_write(wpf, VI6_WPF_DSTM_ADDR_C0, buf->addr[1]);
- if (buf->buf.vb2_buf.num_planes > 2)
- vsp1_wpf_write(wpf, VI6_WPF_DSTM_ADDR_C1, buf->addr[2]);
+ vsp1_wpf_write(wpf, VI6_WPF_DSTM_ADDR_Y, mem->addr[0]);
+ if (mem->num_planes > 1)
+ vsp1_wpf_write(wpf, VI6_WPF_DSTM_ADDR_C0, mem->addr[1]);
+ if (mem->num_planes > 2)
+ vsp1_wpf_write(wpf, VI6_WPF_DSTM_ADDR_C1, mem->addr[2]);
}
-static const struct vsp1_video_operations wpf_vdev_ops = {
- .queue = wpf_vdev_queue,
+static const struct vsp1_rwpf_operations wpf_vdev_ops = {
+ .set_memory = wpf_set_memory,
};
/* -----------------------------------------------------------------------------
@@ -218,7 +221,6 @@ static const struct vsp1_video_operations wpf_vdev_ops = {
struct vsp1_rwpf *vsp1_wpf_create(struct vsp1_device *vsp1, unsigned int index)
{
struct v4l2_subdev *subdev;
- struct vsp1_video *video;
struct vsp1_rwpf *wpf;
int ret;
@@ -226,6 +228,8 @@ struct vsp1_rwpf *vsp1_wpf_create(struct vsp1_device *vsp1, unsigned int index)
if (wpf == NULL)
return ERR_PTR(-ENOMEM);
+ wpf->ops = &wpf_vdev_ops;
+
wpf->max_width = WPF_MAX_WIDTH;
wpf->max_height = WPF_MAX_HEIGHT;
@@ -240,7 +244,7 @@ struct vsp1_rwpf *vsp1_wpf_create(struct vsp1_device *vsp1, unsigned int index)
subdev = &wpf->entity.subdev;
v4l2_subdev_init(subdev, &wpf_ops);
- subdev->entity.ops = &vsp1_media_ops;
+ subdev->entity.ops = &vsp1->media_ops;
subdev->internal_ops = &vsp1_subdev_internal_ops;
snprintf(subdev->name, sizeof(subdev->name), "%s wpf.%u",
dev_name(vsp1->dev), index);
@@ -251,8 +255,9 @@ struct vsp1_rwpf *vsp1_wpf_create(struct vsp1_device *vsp1, unsigned int index)
/* Initialize the control handler. */
v4l2_ctrl_handler_init(&wpf->ctrls, 1);
- v4l2_ctrl_new_std(&wpf->ctrls, &wpf_ctrl_ops, V4L2_CID_ALPHA_COMPONENT,
- 0, 255, 1, 255);
+ wpf->alpha = v4l2_ctrl_new_std(&wpf->ctrls, &wpf_ctrl_ops,
+ V4L2_CID_ALPHA_COMPONENT,
+ 0, 255, 1, 255);
wpf->entity.subdev.ctrl_handler = &wpf->ctrls;
@@ -263,48 +268,9 @@ struct vsp1_rwpf *vsp1_wpf_create(struct vsp1_device *vsp1, unsigned int index)
goto error;
}
- /* Initialize the video device. */
- video = &wpf->video;
-
- video->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
- video->vsp1 = vsp1;
- video->ops = &wpf_vdev_ops;
-
- ret = vsp1_video_init(video, &wpf->entity);
- if (ret < 0)
- goto error;
-
- wpf->entity.video = video;
- wpf->entity.sink = &wpf->video.video.entity;
-
return wpf;
error:
vsp1_entity_destroy(&wpf->entity);
return ERR_PTR(ret);
}
-
-/*
- * vsp1_wpf_create_links() - RPF pads links creation
- * @vsp1: Pointer to VSP1 device
- * @entity: Pointer to VSP1 entity
- *
- * return negative error code or zero on success
- */
-int vsp1_wpf_create_links(struct vsp1_device *vsp1,
- struct vsp1_entity *entity)
-{
- struct vsp1_rwpf *wpf = to_rwpf(&entity->subdev);
- unsigned int flags;
-
- /* Connect the video device to the WPF. All connections are immutable
- * except for the WPF0 source link if a LIF is present.
- */
- flags = MEDIA_LNK_FL_ENABLED;
- if (!(vsp1->pdata.features & VSP1_HAS_LIF) || entity->index != 0)
- flags |= MEDIA_LNK_FL_IMMUTABLE;
-
- return media_create_pad_link(&wpf->entity.subdev.entity,
- RWPF_PAD_SOURCE,
- &wpf->video.video.entity, 0, flags);
-}
diff --git a/drivers/media/radio/radio-si476x.c b/drivers/media/radio/radio-si476x.c
index 859f0c08ee05..271f725b17e8 100644
--- a/drivers/media/radio/radio-si476x.c
+++ b/drivers/media/radio/radio-si476x.c
@@ -1530,11 +1530,11 @@ static int si476x_radio_probe(struct platform_device *pdev)
if (si476x_core_has_diversity(radio->core)) {
si476x_ctrls[SI476X_IDX_DIVERSITY_MODE].def =
si476x_phase_diversity_mode_to_idx(radio->core->diversity_mode);
- si476x_radio_add_new_custom(radio, SI476X_IDX_DIVERSITY_MODE);
+ rval = si476x_radio_add_new_custom(radio, SI476X_IDX_DIVERSITY_MODE);
if (rval < 0)
goto exit;
- si476x_radio_add_new_custom(radio, SI476X_IDX_INTERCHIP_LINK);
+ rval = si476x_radio_add_new_custom(radio, SI476X_IDX_INTERCHIP_LINK);
if (rval < 0)
goto exit;
}
diff --git a/drivers/media/radio/tea575x.c b/drivers/media/radio/tea575x.c
index 3e08475af579..4dc2067bce14 100644
--- a/drivers/media/radio/tea575x.c
+++ b/drivers/media/radio/tea575x.c
@@ -14,10 +14,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
*/
#include <linux/delay.h>
@@ -226,6 +222,7 @@ void snd_tea575x_set_freq(struct snd_tea575x *tea)
snd_tea575x_write(tea, tea->val);
tea->freq = snd_tea575x_val_to_freq(tea, tea->val);
}
+EXPORT_SYMBOL(snd_tea575x_set_freq);
/*
* Linux Video interface
@@ -582,25 +579,11 @@ int snd_tea575x_init(struct snd_tea575x *tea, struct module *owner)
return 0;
}
+EXPORT_SYMBOL(snd_tea575x_init);
void snd_tea575x_exit(struct snd_tea575x *tea)
{
video_unregister_device(&tea->vd);
v4l2_ctrl_handler_free(tea->vd.ctrl_handler);
}
-
-static int __init alsa_tea575x_module_init(void)
-{
- return 0;
-}
-
-static void __exit alsa_tea575x_module_exit(void)
-{
-}
-
-module_init(alsa_tea575x_module_init)
-module_exit(alsa_tea575x_module_exit)
-
-EXPORT_SYMBOL(snd_tea575x_init);
EXPORT_SYMBOL(snd_tea575x_exit);
-EXPORT_SYMBOL(snd_tea575x_set_freq);
diff --git a/drivers/media/radio/wl128x/fmdrv_common.c b/drivers/media/radio/wl128x/fmdrv_common.c
index ebc73b034249..3f9e6df7d837 100644
--- a/drivers/media/radio/wl128x/fmdrv_common.c
+++ b/drivers/media/radio/wl128x/fmdrv_common.c
@@ -68,7 +68,7 @@ MODULE_PARM_DESC(default_radio_region, "Region: 0=Europe/US, 1=Japan");
/* RDS buffer blocks */
static u32 default_rds_buf = 300;
module_param(default_rds_buf, uint, 0444);
-MODULE_PARM_DESC(rds_buf, "RDS buffer entries");
+MODULE_PARM_DESC(default_rds_buf, "RDS buffer entries");
/* Radio Nr */
static u32 radio_nr = -1;
diff --git a/drivers/media/rc/ati_remote.c b/drivers/media/rc/ati_remote.c
index a35631891cc0..3f61d77d4147 100644
--- a/drivers/media/rc/ati_remote.c
+++ b/drivers/media/rc/ati_remote.c
@@ -443,6 +443,21 @@ static int ati_remote_sendpacket(struct ati_remote *ati_remote, u16 cmd,
return retval;
}
+struct accel_times {
+ const char value;
+ unsigned int msecs;
+};
+
+static const struct accel_times accel[] = {
+ { 1, 125 },
+ { 2, 250 },
+ { 4, 500 },
+ { 6, 1000 },
+ { 9, 1500 },
+ { 13, 2000 },
+ { 20, 0 },
+};
+
/*
* ati_remote_compute_accel
*
@@ -454,30 +469,22 @@ static int ati_remote_sendpacket(struct ati_remote *ati_remote, u16 cmd,
*/
static int ati_remote_compute_accel(struct ati_remote *ati_remote)
{
- static const char accel[] = { 1, 2, 4, 6, 9, 13, 20 };
- unsigned long now = jiffies;
- int acc;
+ unsigned long now = jiffies, reset_time;
+ int i;
+
+ reset_time = msecs_to_jiffies(250);
- if (time_after(now, ati_remote->old_jiffies + msecs_to_jiffies(250))) {
- acc = 1;
+ if (time_after(now, ati_remote->old_jiffies + reset_time)) {
ati_remote->acc_jiffies = now;
+ return 1;
}
- else if (time_before(now, ati_remote->acc_jiffies + msecs_to_jiffies(125)))
- acc = accel[0];
- else if (time_before(now, ati_remote->acc_jiffies + msecs_to_jiffies(250)))
- acc = accel[1];
- else if (time_before(now, ati_remote->acc_jiffies + msecs_to_jiffies(500)))
- acc = accel[2];
- else if (time_before(now, ati_remote->acc_jiffies + msecs_to_jiffies(1000)))
- acc = accel[3];
- else if (time_before(now, ati_remote->acc_jiffies + msecs_to_jiffies(1500)))
- acc = accel[4];
- else if (time_before(now, ati_remote->acc_jiffies + msecs_to_jiffies(2000)))
- acc = accel[5];
- else
- acc = accel[6];
+ for (i = 0; i < ARRAY_SIZE(accel) - 1; i++) {
+ unsigned long timeout = msecs_to_jiffies(accel[i].msecs);
- return acc;
+ if (time_before(now, ati_remote->acc_jiffies + timeout))
+ return accel[i].value;
+ }
+ return accel[i].value;
}
/*
diff --git a/drivers/media/rc/igorplugusb.c b/drivers/media/rc/igorplugusb.c
index b36e51576f8e..e0c531fa01da 100644
--- a/drivers/media/rc/igorplugusb.c
+++ b/drivers/media/rc/igorplugusb.c
@@ -152,7 +152,7 @@ static int igorplugusb_probe(struct usb_interface *intf,
struct usb_endpoint_descriptor *ep;
struct igorplugusb *ir;
struct rc_dev *rc;
- int ret;
+ int ret = -ENOMEM;
udev = interface_to_usbdev(intf);
idesc = intf->cur_altsetting;
@@ -182,7 +182,7 @@ static int igorplugusb_probe(struct usb_interface *intf,
ir->urb = usb_alloc_urb(0, GFP_KERNEL);
if (!ir->urb)
- return -ENOMEM;
+ goto fail;
usb_fill_control_urb(ir->urb, udev,
usb_rcvctrlpipe(udev, 0), (uint8_t *)&ir->request,
@@ -191,6 +191,9 @@ static int igorplugusb_probe(struct usb_interface *intf,
usb_make_path(udev, ir->phys, sizeof(ir->phys));
rc = rc_allocate_device();
+ if (!rc)
+ goto fail;
+
rc->input_name = DRIVER_DESC;
rc->input_phys = ir->phys;
usb_to_input_id(udev, &rc->input_id);
@@ -214,9 +217,7 @@ static int igorplugusb_probe(struct usb_interface *intf,
ret = rc_register_device(rc);
if (ret) {
dev_err(&intf->dev, "failed to register rc device: %d", ret);
- rc_free_device(rc);
- usb_free_urb(ir->urb);
- return ret;
+ goto fail;
}
usb_set_intfdata(intf, ir);
@@ -224,6 +225,12 @@ static int igorplugusb_probe(struct usb_interface *intf,
igorplugusb_cmd(ir, SET_INFRABUFFER_EMPTY);
return 0;
+fail:
+ rc_free_device(ir->rc);
+ usb_free_urb(ir->urb);
+ del_timer(&ir->timer);
+
+ return ret;
}
static void igorplugusb_disconnect(struct usb_interface *intf)
diff --git a/drivers/media/rc/keymaps/rc-avermedia-rm-ks.c b/drivers/media/rc/keymaps/rc-avermedia-rm-ks.c
index 8344bcc595be..2583400ca1b4 100644
--- a/drivers/media/rc/keymaps/rc-avermedia-rm-ks.c
+++ b/drivers/media/rc/keymaps/rc-avermedia-rm-ks.c
@@ -23,35 +23,35 @@
/* Initial keytable is from Jose Alberto Reguero <jareguero@telefonica.net>
and Felipe Morales Moreno <felipe.morales.moreno@gmail.com> */
-/* FIXME: mappings are not 100% correct? */
+/* Keytable fixed by Philippe Valembois <lephilousophe@users.sourceforge.net> */
static struct rc_map_table avermedia_rm_ks[] = {
- { 0x0501, KEY_POWER2 },
- { 0x0502, KEY_CHANNELUP },
- { 0x0503, KEY_CHANNELDOWN },
- { 0x0504, KEY_VOLUMEUP },
- { 0x0505, KEY_VOLUMEDOWN },
- { 0x0506, KEY_MUTE },
- { 0x0507, KEY_RIGHT },
- { 0x0508, KEY_RED },
- { 0x0509, KEY_1 },
- { 0x050a, KEY_2 },
- { 0x050b, KEY_3 },
- { 0x050c, KEY_4 },
- { 0x050d, KEY_5 },
- { 0x050e, KEY_6 },
- { 0x050f, KEY_7 },
- { 0x0510, KEY_8 },
- { 0x0511, KEY_9 },
- { 0x0512, KEY_0 },
- { 0x0513, KEY_AUDIO },
- { 0x0515, KEY_EPG },
- { 0x0516, KEY_PLAY },
- { 0x0517, KEY_RECORD },
- { 0x0518, KEY_STOP },
- { 0x051c, KEY_BACK },
- { 0x051d, KEY_FORWARD },
- { 0x054d, KEY_LEFT },
- { 0x0556, KEY_ZOOM },
+ { 0x0501, KEY_POWER2 }, /* Power (RED POWER BUTTON) */
+ { 0x0502, KEY_CHANNELUP }, /* Channel+ */
+ { 0x0503, KEY_CHANNELDOWN }, /* Channel- */
+ { 0x0504, KEY_VOLUMEUP }, /* Volume+ */
+ { 0x0505, KEY_VOLUMEDOWN }, /* Volume- */
+ { 0x0506, KEY_MUTE }, /* Mute */
+ { 0x0507, KEY_AGAIN }, /* Recall */
+ { 0x0508, KEY_VIDEO }, /* Source */
+ { 0x0509, KEY_1 }, /* 1 */
+ { 0x050a, KEY_2 }, /* 2 */
+ { 0x050b, KEY_3 }, /* 3 */
+ { 0x050c, KEY_4 }, /* 4 */
+ { 0x050d, KEY_5 }, /* 5 */
+ { 0x050e, KEY_6 }, /* 6 */
+ { 0x050f, KEY_7 }, /* 7 */
+ { 0x0510, KEY_8 }, /* 8 */
+ { 0x0511, KEY_9 }, /* 9 */
+ { 0x0512, KEY_0 }, /* 0 */
+ { 0x0513, KEY_AUDIO }, /* Audio */
+ { 0x0515, KEY_EPG }, /* EPG */
+ { 0x0516, KEY_PLAYPAUSE }, /* Play/Pause */
+ { 0x0517, KEY_RECORD }, /* Record */
+ { 0x0518, KEY_STOP }, /* Stop */
+ { 0x051c, KEY_BACK }, /* << */
+ { 0x051d, KEY_FORWARD }, /* >> */
+ { 0x054d, KEY_INFO }, /* Display information */
+ { 0x0556, KEY_ZOOM }, /* Fullscreen */
};
static struct rc_map_list avermedia_rm_ks_map = {
diff --git a/drivers/media/rc/lirc_dev.c b/drivers/media/rc/lirc_dev.c
index 4de0e85af805..92ae1903c010 100644
--- a/drivers/media/rc/lirc_dev.c
+++ b/drivers/media/rc/lirc_dev.c
@@ -506,6 +506,7 @@ int lirc_dev_fop_close(struct inode *inode, struct file *file)
{
struct irctl *ir = irctls[iminor(inode)];
struct cdev *cdev;
+ int ret;
if (!ir) {
printk(KERN_ERR "%s: called with invalid irctl\n", __func__);
@@ -516,7 +517,8 @@ int lirc_dev_fop_close(struct inode *inode, struct file *file)
dev_dbg(ir->d.dev, LOGHEAD "close called\n", ir->d.name, ir->d.minor);
- WARN_ON(mutex_lock_killable(&lirc_dev_lock));
+ ret = mutex_lock_killable(&lirc_dev_lock);
+ WARN_ON(ret);
rc_close(ir->d.rdev);
@@ -532,7 +534,8 @@ int lirc_dev_fop_close(struct inode *inode, struct file *file)
kfree(ir);
}
- mutex_unlock(&lirc_dev_lock);
+ if (!ret)
+ mutex_unlock(&lirc_dev_lock);
return 0;
}
diff --git a/drivers/media/rc/mceusb.c b/drivers/media/rc/mceusb.c
index 2cdb740cde48..35155ae500c7 100644
--- a/drivers/media/rc/mceusb.c
+++ b/drivers/media/rc/mceusb.c
@@ -587,9 +587,8 @@ static void mceusb_dev_printdata(struct mceusb_dev *ir, char *buf,
if (len == 2)
dev_dbg(dev, "Get hw/sw rev?");
else
- dev_dbg(dev, "hw/sw rev 0x%02x 0x%02x 0x%02x 0x%02x",
- data1, data2,
- buf[start + 4], buf[start + 5]);
+ dev_dbg(dev, "hw/sw rev %*ph",
+ 4, &buf[start + 2]);
break;
case MCE_CMD_RESUME:
dev_dbg(dev, "Device resume requested");
diff --git a/drivers/media/rc/nuvoton-cir.c b/drivers/media/rc/nuvoton-cir.c
index 18adf580f502..99b303b702ac 100644
--- a/drivers/media/rc/nuvoton-cir.c
+++ b/drivers/media/rc/nuvoton-cir.c
@@ -39,6 +39,8 @@
#include "nuvoton-cir.h"
+static void nvt_clear_cir_wake_fifo(struct nvt_dev *nvt);
+
static const struct nvt_chip nvt_chips[] = {
{ "w83667hg", NVT_W83667HG },
{ "NCT6775F", NVT_6775F },
@@ -80,17 +82,24 @@ static inline void nvt_clear_reg_bit(struct nvt_dev *nvt, u8 val, u8 reg)
}
/* enter extended function mode */
-static inline void nvt_efm_enable(struct nvt_dev *nvt)
+static inline int nvt_efm_enable(struct nvt_dev *nvt)
{
+ if (!request_muxed_region(nvt->cr_efir, 2, NVT_DRIVER_NAME))
+ return -EBUSY;
+
/* Enabling Extended Function Mode explicitly requires writing 2x */
outb(EFER_EFM_ENABLE, nvt->cr_efir);
outb(EFER_EFM_ENABLE, nvt->cr_efir);
+
+ return 0;
}
/* exit extended function mode */
static inline void nvt_efm_disable(struct nvt_dev *nvt)
{
outb(EFER_EFM_DISABLE, nvt->cr_efir);
+
+ release_region(nvt->cr_efir, 2);
}
/*
@@ -100,8 +109,25 @@ static inline void nvt_efm_disable(struct nvt_dev *nvt)
*/
static inline void nvt_select_logical_dev(struct nvt_dev *nvt, u8 ldev)
{
- outb(CR_LOGICAL_DEV_SEL, nvt->cr_efir);
- outb(ldev, nvt->cr_efdr);
+ nvt_cr_write(nvt, ldev, CR_LOGICAL_DEV_SEL);
+}
+
+/* select and enable logical device with setting EFM mode*/
+static inline void nvt_enable_logical_dev(struct nvt_dev *nvt, u8 ldev)
+{
+ nvt_efm_enable(nvt);
+ nvt_select_logical_dev(nvt, ldev);
+ nvt_cr_write(nvt, LOGICAL_DEV_ENABLE, CR_LOGICAL_DEV_EN);
+ nvt_efm_disable(nvt);
+}
+
+/* select and disable logical device with setting EFM mode*/
+static inline void nvt_disable_logical_dev(struct nvt_dev *nvt, u8 ldev)
+{
+ nvt_efm_enable(nvt);
+ nvt_select_logical_dev(nvt, ldev);
+ nvt_cr_write(nvt, LOGICAL_DEV_DISABLE, CR_LOGICAL_DEV_EN);
+ nvt_efm_disable(nvt);
}
/* write val to cir config register */
@@ -137,6 +163,120 @@ static u8 nvt_cir_wake_reg_read(struct nvt_dev *nvt, u8 offset)
return val;
}
+/* don't override io address if one is set already */
+static void nvt_set_ioaddr(struct nvt_dev *nvt, unsigned long *ioaddr)
+{
+ unsigned long old_addr;
+
+ old_addr = nvt_cr_read(nvt, CR_CIR_BASE_ADDR_HI) << 8;
+ old_addr |= nvt_cr_read(nvt, CR_CIR_BASE_ADDR_LO);
+
+ if (old_addr)
+ *ioaddr = old_addr;
+ else {
+ nvt_cr_write(nvt, *ioaddr >> 8, CR_CIR_BASE_ADDR_HI);
+ nvt_cr_write(nvt, *ioaddr & 0xff, CR_CIR_BASE_ADDR_LO);
+ }
+}
+
+static ssize_t wakeup_data_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct rc_dev *rc_dev = to_rc_dev(dev);
+ struct nvt_dev *nvt = rc_dev->priv;
+ int fifo_len, duration;
+ unsigned long flags;
+ ssize_t buf_len = 0;
+ int i;
+
+ spin_lock_irqsave(&nvt->nvt_lock, flags);
+
+ fifo_len = nvt_cir_wake_reg_read(nvt, CIR_WAKE_FIFO_COUNT);
+ fifo_len = min(fifo_len, WAKEUP_MAX_SIZE);
+
+ /* go to first element to be read */
+ while (nvt_cir_wake_reg_read(nvt, CIR_WAKE_RD_FIFO_ONLY_IDX))
+ nvt_cir_wake_reg_read(nvt, CIR_WAKE_RD_FIFO_ONLY);
+
+ for (i = 0; i < fifo_len; i++) {
+ duration = nvt_cir_wake_reg_read(nvt, CIR_WAKE_RD_FIFO_ONLY);
+ duration = (duration & BUF_LEN_MASK) * SAMPLE_PERIOD;
+ buf_len += snprintf(buf + buf_len, PAGE_SIZE - buf_len,
+ "%d ", duration);
+ }
+ buf_len += snprintf(buf + buf_len, PAGE_SIZE - buf_len, "\n");
+
+ spin_unlock_irqrestore(&nvt->nvt_lock, flags);
+
+ return buf_len;
+}
+
+static ssize_t wakeup_data_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct rc_dev *rc_dev = to_rc_dev(dev);
+ struct nvt_dev *nvt = rc_dev->priv;
+ unsigned long flags;
+ u8 tolerance, config, wake_buf[WAKEUP_MAX_SIZE];
+ char **argv;
+ int i, count;
+ unsigned int val;
+ ssize_t ret;
+
+ argv = argv_split(GFP_KERNEL, buf, &count);
+ if (!argv)
+ return -ENOMEM;
+ if (!count || count > WAKEUP_MAX_SIZE) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ for (i = 0; i < count; i++) {
+ ret = kstrtouint(argv[i], 10, &val);
+ if (ret)
+ goto out;
+ val = DIV_ROUND_CLOSEST(val, SAMPLE_PERIOD);
+ if (!val || val > 0x7f) {
+ ret = -EINVAL;
+ goto out;
+ }
+ wake_buf[i] = val;
+ /* sequence must start with a pulse */
+ if (i % 2 == 0)
+ wake_buf[i] |= BUF_PULSE_BIT;
+ }
+
+ /* hardcode the tolerance to 10% */
+ tolerance = DIV_ROUND_UP(count, 10);
+
+ spin_lock_irqsave(&nvt->nvt_lock, flags);
+
+ nvt_clear_cir_wake_fifo(nvt);
+ nvt_cir_wake_reg_write(nvt, count, CIR_WAKE_FIFO_CMP_DEEP);
+ nvt_cir_wake_reg_write(nvt, tolerance, CIR_WAKE_FIFO_CMP_TOL);
+
+ config = nvt_cir_wake_reg_read(nvt, CIR_WAKE_IRCON);
+
+ /* enable writes to wake fifo */
+ nvt_cir_wake_reg_write(nvt, config | CIR_WAKE_IRCON_MODE1,
+ CIR_WAKE_IRCON);
+
+ for (i = 0; i < count; i++)
+ nvt_cir_wake_reg_write(nvt, wake_buf[i], CIR_WAKE_WR_FIFO_DATA);
+
+ nvt_cir_wake_reg_write(nvt, config, CIR_WAKE_IRCON);
+
+ spin_unlock_irqrestore(&nvt->nvt_lock, flags);
+
+ ret = len;
+out:
+ argv_free(argv);
+ return ret;
+}
+static DEVICE_ATTR_RW(wakeup_data);
+
/* dump current cir register contents */
static void cir_dump_regs(struct nvt_dev *nvt)
{
@@ -251,7 +391,7 @@ static inline const char *nvt_find_chip(struct nvt_dev *nvt, int id)
/* detect hardware features */
-static void nvt_hw_detect(struct nvt_dev *nvt)
+static int nvt_hw_detect(struct nvt_dev *nvt)
{
const char *chip_name;
int chip_id;
@@ -266,10 +406,17 @@ static void nvt_hw_detect(struct nvt_dev *nvt)
nvt_efm_enable(nvt);
nvt->chip_major = nvt_cr_read(nvt, CR_CHIP_ID_HI);
}
-
nvt->chip_minor = nvt_cr_read(nvt, CR_CHIP_ID_LO);
+ nvt_efm_disable(nvt);
+
chip_id = nvt->chip_major << 8 | nvt->chip_minor;
+ if (chip_id == NVT_INVALID) {
+ dev_err(&nvt->pdev->dev,
+ "No device found on either EFM port\n");
+ return -ENODEV;
+ }
+
chip_name = nvt_find_chip(nvt, chip_id);
/* warn, but still let the driver load, if we don't know this chip */
@@ -282,7 +429,7 @@ static void nvt_hw_detect(struct nvt_dev *nvt)
"found %s or compatible: chip id: 0x%02x 0x%02x",
chip_name, nvt->chip_major, nvt->chip_minor);
- nvt_efm_disable(nvt);
+ return 0;
}
static void nvt_cir_ldev_init(struct nvt_dev *nvt)
@@ -305,12 +452,10 @@ static void nvt_cir_ldev_init(struct nvt_dev *nvt)
val |= psval;
nvt_cr_write(nvt, val, psreg);
- /* Select CIR logical device and enable */
+ /* Select CIR logical device */
nvt_select_logical_dev(nvt, LOGICAL_DEV_CIR);
- nvt_cr_write(nvt, LOGICAL_DEV_ENABLE, CR_LOGICAL_DEV_EN);
- nvt_cr_write(nvt, nvt->cir_addr >> 8, CR_CIR_BASE_ADDR_HI);
- nvt_cr_write(nvt, nvt->cir_addr & 0xff, CR_CIR_BASE_ADDR_LO);
+ nvt_set_ioaddr(nvt, &nvt->cir_addr);
nvt_cr_write(nvt, nvt->cir_irq, CR_CIR_IRQ_RSRC);
@@ -320,7 +465,7 @@ static void nvt_cir_ldev_init(struct nvt_dev *nvt)
static void nvt_cir_wake_ldev_init(struct nvt_dev *nvt)
{
- /* Select ACPI logical device, enable it and CIR Wake */
+ /* Select ACPI logical device and anable it */
nvt_select_logical_dev(nvt, LOGICAL_DEV_ACPI);
nvt_cr_write(nvt, LOGICAL_DEV_ENABLE, CR_LOGICAL_DEV_EN);
@@ -330,12 +475,10 @@ static void nvt_cir_wake_ldev_init(struct nvt_dev *nvt)
/* enable pme interrupt of cir wakeup event */
nvt_set_reg_bit(nvt, PME_INTR_CIR_PASS_BIT, CR_ACPI_IRQ_EVENTS2);
- /* Select CIR Wake logical device and enable */
+ /* Select CIR Wake logical device */
nvt_select_logical_dev(nvt, LOGICAL_DEV_CIR_WAKE);
- nvt_cr_write(nvt, LOGICAL_DEV_ENABLE, CR_LOGICAL_DEV_EN);
- nvt_cr_write(nvt, nvt->cir_wake_addr >> 8, CR_CIR_BASE_ADDR_HI);
- nvt_cr_write(nvt, nvt->cir_wake_addr & 0xff, CR_CIR_BASE_ADDR_LO);
+ nvt_set_ioaddr(nvt, &nvt->cir_wake_addr);
nvt_cr_write(nvt, nvt->cir_wake_irq, CR_CIR_IRQ_RSRC);
@@ -355,11 +498,19 @@ static void nvt_clear_cir_fifo(struct nvt_dev *nvt)
/* clear out the hardware's cir wake rx fifo */
static void nvt_clear_cir_wake_fifo(struct nvt_dev *nvt)
{
- u8 val;
+ u8 val, config;
+
+ config = nvt_cir_wake_reg_read(nvt, CIR_WAKE_IRCON);
+
+ /* clearing wake fifo works in learning mode only */
+ nvt_cir_wake_reg_write(nvt, config & ~CIR_WAKE_IRCON_MODE0,
+ CIR_WAKE_IRCON);
val = nvt_cir_wake_reg_read(nvt, CIR_WAKE_FIFOCON);
nvt_cir_wake_reg_write(nvt, val | CIR_WAKE_FIFOCON_RXFIFOCLR,
CIR_WAKE_FIFOCON);
+
+ nvt_cir_wake_reg_write(nvt, config, CIR_WAKE_IRCON);
}
/* clear out the hardware's cir tx fifo */
@@ -408,6 +559,9 @@ static void nvt_cir_regs_init(struct nvt_dev *nvt)
/* and finally, enable interrupts */
nvt_set_cir_iren(nvt);
+
+ /* enable the CIR logical device */
+ nvt_enable_logical_dev(nvt, LOGICAL_DEV_CIR);
}
static void nvt_cir_wake_regs_init(struct nvt_dev *nvt)
@@ -442,10 +596,15 @@ static void nvt_cir_wake_regs_init(struct nvt_dev *nvt)
/* clear any and all stray interrupts */
nvt_cir_wake_reg_write(nvt, 0xff, CIR_WAKE_IRSTS);
+
+ /* enable the CIR WAKE logical device */
+ nvt_enable_logical_dev(nvt, LOGICAL_DEV_CIR_WAKE);
}
static void nvt_enable_wake(struct nvt_dev *nvt)
{
+ unsigned long flags;
+
nvt_efm_enable(nvt);
nvt_select_logical_dev(nvt, LOGICAL_DEV_ACPI);
@@ -457,12 +616,16 @@ static void nvt_enable_wake(struct nvt_dev *nvt)
nvt_efm_disable(nvt);
+ spin_lock_irqsave(&nvt->nvt_lock, flags);
+
nvt_cir_wake_reg_write(nvt, CIR_WAKE_IRCON_MODE0 | CIR_WAKE_IRCON_RXEN |
CIR_WAKE_IRCON_R | CIR_WAKE_IRCON_RXINV |
CIR_WAKE_IRCON_SAMPLE_PERIOD_SEL,
CIR_WAKE_IRCON);
nvt_cir_wake_reg_write(nvt, 0xff, CIR_WAKE_IRSTS);
nvt_cir_wake_reg_write(nvt, 0, CIR_WAKE_IREN);
+
+ spin_unlock_irqrestore(&nvt->nvt_lock, flags);
}
#if 0 /* Currently unused */
@@ -670,7 +833,6 @@ static void nvt_handle_rx_fifo_overrun(struct nvt_dev *nvt)
/* copy data from hardware rx fifo into driver buffer */
static void nvt_get_rx_ir_data(struct nvt_dev *nvt)
{
- unsigned long flags;
u8 fifocount, val;
unsigned int b_idx;
bool overrun = false;
@@ -689,8 +851,6 @@ static void nvt_get_rx_ir_data(struct nvt_dev *nvt)
nvt_dbg("attempting to fetch %u bytes from hw rx fifo", fifocount);
- spin_lock_irqsave(&nvt->nvt_lock, flags);
-
b_idx = nvt->pkts;
/* This should never happen, but lets check anyway... */
@@ -712,8 +872,6 @@ static void nvt_get_rx_ir_data(struct nvt_dev *nvt)
if (overrun)
nvt_handle_rx_fifo_overrun(nvt);
-
- spin_unlock_irqrestore(&nvt->nvt_lock, flags);
}
static void nvt_cir_log_irqs(u8 status, u8 iren)
@@ -736,16 +894,13 @@ static void nvt_cir_log_irqs(u8 status, u8 iren)
static bool nvt_cir_tx_inactive(struct nvt_dev *nvt)
{
unsigned long flags;
- bool tx_inactive;
u8 tx_state;
spin_lock_irqsave(&nvt->tx.lock, flags);
tx_state = nvt->tx.tx_state;
spin_unlock_irqrestore(&nvt->tx.lock, flags);
- tx_inactive = (tx_state == ST_TX_NONE);
-
- return tx_inactive;
+ return tx_state == ST_TX_NONE;
}
/* interrupt service routine for incoming and outgoing CIR data */
@@ -757,9 +912,7 @@ static irqreturn_t nvt_cir_isr(int irq, void *data)
nvt_dbg_verbose("%s firing", __func__);
- nvt_efm_enable(nvt);
- nvt_select_logical_dev(nvt, LOGICAL_DEV_CIR);
- nvt_efm_disable(nvt);
+ spin_lock_irqsave(&nvt->nvt_lock, flags);
/*
* Get IR Status register contents. Write 1 to ack/clear
@@ -775,9 +928,14 @@ static irqreturn_t nvt_cir_isr(int irq, void *data)
* 0: CIR_IRSTS_GH - Min Length Detected
*/
status = nvt_cir_reg_read(nvt, CIR_IRSTS);
- if (!status) {
+ iren = nvt_cir_reg_read(nvt, CIR_IREN);
+
+ /* IRQ may be shared with CIR WAKE, therefore check for each
+ * status bit whether the related interrupt source is enabled
+ */
+ if (!(status & iren)) {
+ spin_unlock_irqrestore(&nvt->nvt_lock, flags);
nvt_dbg_verbose("%s exiting, IRSTS 0x0", __func__);
- nvt_cir_reg_write(nvt, 0xff, CIR_IRSTS);
return IRQ_NONE;
}
@@ -785,13 +943,6 @@ static irqreturn_t nvt_cir_isr(int irq, void *data)
nvt_cir_reg_write(nvt, status, CIR_IRSTS);
nvt_cir_reg_write(nvt, 0, CIR_IRSTS);
- /* Interrupt may be shared with CIR Wake, bail if CIR not enabled */
- iren = nvt_cir_reg_read(nvt, CIR_IREN);
- if (!iren) {
- nvt_dbg_verbose("%s exiting, CIR not enabled", __func__);
- return IRQ_NONE;
- }
-
nvt_cir_log_irqs(status, iren);
if (status & CIR_IRSTS_RTR) {
@@ -805,16 +956,14 @@ static irqreturn_t nvt_cir_isr(int irq, void *data)
if (nvt_cir_tx_inactive(nvt))
nvt_get_rx_ir_data(nvt);
- spin_lock_irqsave(&nvt->nvt_lock, flags);
-
cur_state = nvt->study_state;
- spin_unlock_irqrestore(&nvt->nvt_lock, flags);
-
if (cur_state == ST_STUDY_NONE)
nvt_clear_cir_fifo(nvt);
}
+ spin_unlock_irqrestore(&nvt->nvt_lock, flags);
+
if (status & CIR_IRSTS_TE)
nvt_clear_tx_fifo(nvt);
@@ -863,9 +1012,18 @@ static irqreturn_t nvt_cir_wake_isr(int irq, void *data)
nvt_dbg_wake("%s firing", __func__);
+ spin_lock_irqsave(&nvt->nvt_lock, flags);
+
status = nvt_cir_wake_reg_read(nvt, CIR_WAKE_IRSTS);
- if (!status)
+ iren = nvt_cir_wake_reg_read(nvt, CIR_WAKE_IREN);
+
+ /* IRQ may be shared with CIR, therefore check for each
+ * status bit whether the related interrupt source is enabled
+ */
+ if (!(status & iren)) {
+ spin_unlock_irqrestore(&nvt->nvt_lock, flags);
return IRQ_NONE;
+ }
if (status & CIR_WAKE_IRSTS_IR_PENDING)
nvt_clear_cir_wake_fifo(nvt);
@@ -873,13 +1031,6 @@ static irqreturn_t nvt_cir_wake_isr(int irq, void *data)
nvt_cir_wake_reg_write(nvt, status, CIR_WAKE_IRSTS);
nvt_cir_wake_reg_write(nvt, 0, CIR_WAKE_IRSTS);
- /* Interrupt may be shared with CIR, bail if Wake not enabled */
- iren = nvt_cir_wake_reg_read(nvt, CIR_WAKE_IREN);
- if (!iren) {
- nvt_dbg_wake("%s exiting, wake not enabled", __func__);
- return IRQ_HANDLED;
- }
-
if ((status & CIR_WAKE_IRSTS_PE) &&
(nvt->wake_state == ST_WAKE_START)) {
while (nvt_cir_wake_reg_read(nvt, CIR_WAKE_RD_FIFO_ONLY_IDX)) {
@@ -888,39 +1039,21 @@ static irqreturn_t nvt_cir_wake_isr(int irq, void *data)
}
nvt_cir_wake_reg_write(nvt, 0, CIR_WAKE_IREN);
- spin_lock_irqsave(&nvt->nvt_lock, flags);
nvt->wake_state = ST_WAKE_FINISH;
- spin_unlock_irqrestore(&nvt->nvt_lock, flags);
}
+ spin_unlock_irqrestore(&nvt->nvt_lock, flags);
+
nvt_dbg_wake("%s done", __func__);
return IRQ_HANDLED;
}
-static void nvt_enable_cir(struct nvt_dev *nvt)
+static void nvt_disable_cir(struct nvt_dev *nvt)
{
- /* set function enable flags */
- nvt_cir_reg_write(nvt, CIR_IRCON_TXEN | CIR_IRCON_RXEN |
- CIR_IRCON_RXINV | CIR_IRCON_SAMPLE_PERIOD_SEL,
- CIR_IRCON);
-
- nvt_efm_enable(nvt);
-
- /* enable the CIR logical device */
- nvt_select_logical_dev(nvt, LOGICAL_DEV_CIR);
- nvt_cr_write(nvt, LOGICAL_DEV_ENABLE, CR_LOGICAL_DEV_EN);
-
- nvt_efm_disable(nvt);
-
- /* clear all pending interrupts */
- nvt_cir_reg_write(nvt, 0xff, CIR_IRSTS);
+ unsigned long flags;
- /* enable interrupts */
- nvt_set_cir_iren(nvt);
-}
+ spin_lock_irqsave(&nvt->nvt_lock, flags);
-static void nvt_disable_cir(struct nvt_dev *nvt)
-{
/* disable CIR interrupts */
nvt_cir_reg_write(nvt, 0, CIR_IREN);
@@ -934,13 +1067,10 @@ static void nvt_disable_cir(struct nvt_dev *nvt)
nvt_clear_cir_fifo(nvt);
nvt_clear_tx_fifo(nvt);
- nvt_efm_enable(nvt);
+ spin_unlock_irqrestore(&nvt->nvt_lock, flags);
/* disable the CIR logical device */
- nvt_select_logical_dev(nvt, LOGICAL_DEV_CIR);
- nvt_cr_write(nvt, LOGICAL_DEV_DISABLE, CR_LOGICAL_DEV_EN);
-
- nvt_efm_disable(nvt);
+ nvt_disable_logical_dev(nvt, LOGICAL_DEV_CIR);
}
static int nvt_open(struct rc_dev *dev)
@@ -949,20 +1079,31 @@ static int nvt_open(struct rc_dev *dev)
unsigned long flags;
spin_lock_irqsave(&nvt->nvt_lock, flags);
- nvt_enable_cir(nvt);
+
+ /* set function enable flags */
+ nvt_cir_reg_write(nvt, CIR_IRCON_TXEN | CIR_IRCON_RXEN |
+ CIR_IRCON_RXINV | CIR_IRCON_SAMPLE_PERIOD_SEL,
+ CIR_IRCON);
+
+ /* clear all pending interrupts */
+ nvt_cir_reg_write(nvt, 0xff, CIR_IRSTS);
+
+ /* enable interrupts */
+ nvt_set_cir_iren(nvt);
+
spin_unlock_irqrestore(&nvt->nvt_lock, flags);
+ /* enable the CIR logical device */
+ nvt_enable_logical_dev(nvt, LOGICAL_DEV_CIR);
+
return 0;
}
static void nvt_close(struct rc_dev *dev)
{
struct nvt_dev *nvt = dev->priv;
- unsigned long flags;
- spin_lock_irqsave(&nvt->nvt_lock, flags);
nvt_disable_cir(nvt);
- spin_unlock_irqrestore(&nvt->nvt_lock, flags);
}
/* Allocate memory, probe hardware, and initialize everything */
@@ -1024,7 +1165,9 @@ static int nvt_probe(struct pnp_dev *pdev, const struct pnp_device_id *dev_id)
init_waitqueue_head(&nvt->tx.queue);
- nvt_hw_detect(nvt);
+ ret = nvt_hw_detect(nvt);
+ if (ret)
+ goto exit_free_dev_rdev;
/* Initialize CIR & CIR Wake Logical Devices */
nvt_efm_enable(nvt);
@@ -1032,7 +1175,10 @@ static int nvt_probe(struct pnp_dev *pdev, const struct pnp_device_id *dev_id)
nvt_cir_wake_ldev_init(nvt);
nvt_efm_disable(nvt);
- /* Initialize CIR & CIR Wake Config Registers */
+ /*
+ * Initialize CIR & CIR Wake Config Registers
+ * and enable logical devices
+ */
nvt_cir_regs_init(nvt);
nvt_cir_wake_regs_init(nvt);
@@ -1079,12 +1225,16 @@ static int nvt_probe(struct pnp_dev *pdev, const struct pnp_device_id *dev_id)
goto exit_unregister_device;
if (!devm_request_region(&pdev->dev, nvt->cir_wake_addr,
- CIR_IOREG_LENGTH, NVT_DRIVER_NAME))
+ CIR_IOREG_LENGTH, NVT_DRIVER_NAME "-wake"))
goto exit_unregister_device;
if (devm_request_irq(&pdev->dev, nvt->cir_wake_irq,
nvt_cir_wake_isr, IRQF_SHARED,
- NVT_DRIVER_NAME, (void *)nvt))
+ NVT_DRIVER_NAME "-wake", (void *)nvt))
+ goto exit_unregister_device;
+
+ ret = device_create_file(&rdev->dev, &dev_attr_wakeup_data);
+ if (ret)
goto exit_unregister_device;
device_init_wakeup(&pdev->dev, true);
@@ -1109,15 +1259,13 @@ exit_free_dev_rdev:
static void nvt_remove(struct pnp_dev *pdev)
{
struct nvt_dev *nvt = pnp_get_drvdata(pdev);
- unsigned long flags;
- spin_lock_irqsave(&nvt->nvt_lock, flags);
- /* disable CIR */
- nvt_cir_reg_write(nvt, 0, CIR_IREN);
+ device_remove_file(&nvt->rdev->dev, &dev_attr_wakeup_data);
+
nvt_disable_cir(nvt);
+
/* enable CIR Wake (for IR power-on) */
nvt_enable_wake(nvt);
- spin_unlock_irqrestore(&nvt->nvt_lock, flags);
rc_unregister_device(nvt->rdev);
}
@@ -1129,26 +1277,23 @@ static int nvt_suspend(struct pnp_dev *pdev, pm_message_t state)
nvt_dbg("%s called", __func__);
- /* zero out misc state tracking */
- spin_lock_irqsave(&nvt->nvt_lock, flags);
- nvt->study_state = ST_STUDY_NONE;
- nvt->wake_state = ST_WAKE_NONE;
- spin_unlock_irqrestore(&nvt->nvt_lock, flags);
-
spin_lock_irqsave(&nvt->tx.lock, flags);
nvt->tx.tx_state = ST_TX_NONE;
spin_unlock_irqrestore(&nvt->tx.lock, flags);
+ spin_lock_irqsave(&nvt->nvt_lock, flags);
+
+ /* zero out misc state tracking */
+ nvt->study_state = ST_STUDY_NONE;
+ nvt->wake_state = ST_WAKE_NONE;
+
/* disable all CIR interrupts */
nvt_cir_reg_write(nvt, 0, CIR_IREN);
- nvt_efm_enable(nvt);
+ spin_unlock_irqrestore(&nvt->nvt_lock, flags);
/* disable cir logical dev */
- nvt_select_logical_dev(nvt, LOGICAL_DEV_CIR);
- nvt_cr_write(nvt, LOGICAL_DEV_DISABLE, CR_LOGICAL_DEV_EN);
-
- nvt_efm_disable(nvt);
+ nvt_disable_logical_dev(nvt, LOGICAL_DEV_CIR);
/* make sure wake is enabled */
nvt_enable_wake(nvt);
@@ -1162,16 +1307,6 @@ static int nvt_resume(struct pnp_dev *pdev)
nvt_dbg("%s called", __func__);
- /* open interrupt */
- nvt_set_cir_iren(nvt);
-
- /* Enable CIR logical device */
- nvt_efm_enable(nvt);
- nvt_select_logical_dev(nvt, LOGICAL_DEV_CIR);
- nvt_cr_write(nvt, LOGICAL_DEV_ENABLE, CR_LOGICAL_DEV_EN);
-
- nvt_efm_disable(nvt);
-
nvt_cir_regs_init(nvt);
nvt_cir_wake_regs_init(nvt);
@@ -1181,6 +1316,7 @@ static int nvt_resume(struct pnp_dev *pdev)
static void nvt_shutdown(struct pnp_dev *pdev)
{
struct nvt_dev *nvt = pnp_get_drvdata(pdev);
+
nvt_enable_wake(nvt);
}
diff --git a/drivers/media/rc/nuvoton-cir.h b/drivers/media/rc/nuvoton-cir.h
index 0ad15d34e9c9..c9c98ebb19ee 100644
--- a/drivers/media/rc/nuvoton-cir.h
+++ b/drivers/media/rc/nuvoton-cir.h
@@ -68,7 +68,8 @@ enum nvt_chip_ver {
NVT_W83667HG = 0xa510,
NVT_6775F = 0xb470,
NVT_6776F = 0xc330,
- NVT_6779D = 0xc560
+ NVT_6779D = 0xc560,
+ NVT_INVALID = 0xffff,
};
struct nvt_chip {
@@ -157,8 +158,8 @@ struct nvt_dev {
/* total length of CIR and CIR WAKE */
#define CIR_IOREG_LENGTH 0x0f
-/* RX limit length, 8 high bits for SLCH, 8 low bits for SLCL (0x7d0 = 2000) */
-#define CIR_RX_LIMIT_COUNT 0x7d0
+/* RX limit length, 8 high bits for SLCH, 8 low bits for SLCL */
+#define CIR_RX_LIMIT_COUNT (IR_DEFAULT_TIMEOUT / US_TO_NS(SAMPLE_PERIOD))
/* CIR Regs */
#define CIR_IRCON 0x00
@@ -292,10 +293,7 @@ struct nvt_dev {
#define CIR_WAKE_IREN_RTR 0x40
#define CIR_WAKE_IREN_PE 0x20
#define CIR_WAKE_IREN_RFO 0x10
-#define CIR_WAKE_IREN_TE 0x08
-#define CIR_WAKE_IREN_TTR 0x04
-#define CIR_WAKE_IREN_TFU 0x02
-#define CIR_WAKE_IREN_GH 0x01
+#define CIR_WAKE_IREN_GH 0x08
/* CIR WAKE FIFOCON settings */
#define CIR_WAKE_FIFOCON_RXFIFOCLR 0x08
@@ -419,3 +417,6 @@ struct nvt_dev {
/* as VISTA MCE definition, valid carrier value */
#define MAX_CARRIER 60000
#define MIN_CARRIER 30000
+
+/* max wakeup sequence length */
+#define WAKEUP_MAX_SIZE 65
diff --git a/drivers/media/rc/rc-core-priv.h b/drivers/media/rc/rc-core-priv.h
index 7359f3d03b64..585d5e52118d 100644
--- a/drivers/media/rc/rc-core-priv.h
+++ b/drivers/media/rc/rc-core-priv.h
@@ -16,6 +16,9 @@
#ifndef _RC_CORE_PRIV
#define _RC_CORE_PRIV
+/* Define the max number of pulse/space transitions to buffer */
+#define MAX_IR_EVENT_SIZE 512
+
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <media/rc-core.h>
@@ -35,7 +38,8 @@ struct ir_raw_event_ctrl {
struct list_head list; /* to keep track of raw clients */
struct task_struct *thread;
spinlock_t lock;
- struct kfifo_rec_ptr_1 kfifo; /* fifo for the pulse/space durations */
+ /* fifo for the pulse/space durations */
+ DECLARE_KFIFO(kfifo, struct ir_raw_event, MAX_IR_EVENT_SIZE);
ktime_t last_event; /* when last event occurred */
enum raw_event_type last_type; /* last event type */
struct rc_dev *dev; /* pointer to the parent rc_dev */
diff --git a/drivers/media/rc/rc-ir-raw.c b/drivers/media/rc/rc-ir-raw.c
index c69807fe2fef..144304c94606 100644
--- a/drivers/media/rc/rc-ir-raw.c
+++ b/drivers/media/rc/rc-ir-raw.c
@@ -20,9 +20,6 @@
#include <linux/freezer.h>
#include "rc-core-priv.h"
-/* Define the max number of pulse/space transitions to buffer */
-#define MAX_IR_EVENT_SIZE 512
-
/* Used to keep track of IR raw clients, protected by ir_raw_handler_lock */
static LIST_HEAD(ir_raw_client_list);
@@ -36,14 +33,12 @@ static int ir_raw_event_thread(void *data)
struct ir_raw_event ev;
struct ir_raw_handler *handler;
struct ir_raw_event_ctrl *raw = (struct ir_raw_event_ctrl *)data;
- int retval;
while (!kthread_should_stop()) {
spin_lock_irq(&raw->lock);
- retval = kfifo_len(&raw->kfifo);
- if (retval < sizeof(ev)) {
+ if (!kfifo_len(&raw->kfifo)) {
set_current_state(TASK_INTERRUPTIBLE);
if (kthread_should_stop())
@@ -54,7 +49,8 @@ static int ir_raw_event_thread(void *data)
continue;
}
- retval = kfifo_out(&raw->kfifo, &ev, sizeof(ev));
+ if(!kfifo_out(&raw->kfifo, &ev, 1))
+ dev_err(&raw->dev->dev, "IR event FIFO is empty!\n");
spin_unlock_irq(&raw->lock);
mutex_lock(&ir_raw_handler_lock);
@@ -87,8 +83,10 @@ int ir_raw_event_store(struct rc_dev *dev, struct ir_raw_event *ev)
IR_dprintk(2, "sample: (%05dus %s)\n",
TO_US(ev->duration), TO_STR(ev->pulse));
- if (kfifo_in(&dev->raw->kfifo, ev, sizeof(*ev)) != sizeof(*ev))
- return -ENOMEM;
+ if (!kfifo_put(&dev->raw->kfifo, *ev)) {
+ dev_err(&dev->dev, "IR event FIFO is full!\n");
+ return -ENOSPC;
+ }
return 0;
}
@@ -273,11 +271,7 @@ int ir_raw_event_register(struct rc_dev *dev)
dev->raw->dev = dev;
dev->change_protocol = change_protocol;
- rc = kfifo_alloc(&dev->raw->kfifo,
- sizeof(struct ir_raw_event) * MAX_IR_EVENT_SIZE,
- GFP_KERNEL);
- if (rc < 0)
- goto out;
+ INIT_KFIFO(dev->raw->kfifo);
spin_lock_init(&dev->raw->lock);
dev->raw->thread = kthread_run(ir_raw_event_thread, dev->raw,
@@ -319,7 +313,6 @@ void ir_raw_event_unregister(struct rc_dev *dev)
handler->raw_unregister(dev);
mutex_unlock(&ir_raw_handler_lock);
- kfifo_free(&dev->raw->kfifo);
kfree(dev->raw);
dev->raw = NULL;
}
diff --git a/drivers/media/rc/rc-main.c b/drivers/media/rc/rc-main.c
index 1042fa331a07..4e9bbe735ae9 100644
--- a/drivers/media/rc/rc-main.c
+++ b/drivers/media/rc/rc-main.c
@@ -13,6 +13,7 @@
*/
#include <media/rc-core.h>
+#include <linux/atomic.h>
#include <linux/spinlock.h>
#include <linux/delay.h>
#include <linux/input.h>
@@ -723,6 +724,7 @@ int rc_open(struct rc_dev *rdev)
return -EINVAL;
mutex_lock(&rdev->lock);
+
if (!rdev->users++ && rdev->open != NULL)
rval = rdev->open(rdev);
@@ -873,6 +875,9 @@ static ssize_t show_protocols(struct device *device,
if (!dev)
return -EINVAL;
+ if (!atomic_read(&dev->initialized))
+ return -ERESTARTSYS;
+
mutex_lock(&dev->lock);
if (fattr->type == RC_FILTER_NORMAL) {
@@ -1054,6 +1059,9 @@ static ssize_t store_protocols(struct device *device,
if (!dev)
return -EINVAL;
+ if (!atomic_read(&dev->initialized))
+ return -ERESTARTSYS;
+
if (fattr->type == RC_FILTER_NORMAL) {
IR_dprintk(1, "Normal protocol change requested\n");
current_protocols = &dev->enabled_protocols;
@@ -1154,12 +1162,16 @@ static ssize_t show_filter(struct device *device,
if (!dev)
return -EINVAL;
+ if (!atomic_read(&dev->initialized))
+ return -ERESTARTSYS;
+
+ mutex_lock(&dev->lock);
+
if (fattr->type == RC_FILTER_NORMAL)
filter = &dev->scancode_filter;
else
filter = &dev->scancode_wakeup_filter;
- mutex_lock(&dev->lock);
if (fattr->mask)
val = filter->mask;
else
@@ -1204,6 +1216,9 @@ static ssize_t store_filter(struct device *device,
if (!dev)
return -EINVAL;
+ if (!atomic_read(&dev->initialized))
+ return -ERESTARTSYS;
+
ret = kstrtoul(buf, 0, &val);
if (ret < 0)
return ret;
@@ -1408,6 +1423,7 @@ int rc_register_device(struct rc_dev *dev)
dev->minor = minor;
dev_set_name(&dev->dev, "rc%u", dev->minor);
dev_set_drvdata(&dev->dev, dev);
+ atomic_set(&dev->initialized, 0);
dev->dev.groups = dev->sysfs_groups;
dev->sysfs_groups[attr++] = &rc_dev_protocol_attr_grp;
@@ -1419,14 +1435,6 @@ int rc_register_device(struct rc_dev *dev)
dev->sysfs_groups[attr++] = &rc_dev_wakeup_protocol_attr_grp;
dev->sysfs_groups[attr++] = NULL;
- /*
- * Take the lock here, as the device sysfs node will appear
- * when device_add() is called, which may trigger an ir-keytable udev
- * rule, which will in turn call show_protocols and access
- * dev->enabled_protocols before it has been initialized.
- */
- mutex_lock(&dev->lock);
-
rc = device_add(&dev->dev);
if (rc)
goto out_unlock;
@@ -1440,16 +1448,6 @@ int rc_register_device(struct rc_dev *dev)
dev->input_dev->phys = dev->input_phys;
dev->input_dev->name = dev->input_name;
- /* input_register_device can call ir_open, so unlock mutex here */
- mutex_unlock(&dev->lock);
-
- rc = input_register_device(dev->input_dev);
-
- mutex_lock(&dev->lock);
-
- if (rc)
- goto out_table;
-
/*
* Default delay of 250ms is too short for some protocols, especially
* since the timeout is currently set to 250ms. Increase it to 500ms,
@@ -1465,6 +1463,11 @@ int rc_register_device(struct rc_dev *dev)
*/
dev->input_dev->rep[REP_PERIOD] = 125;
+ /* rc_open will be called here */
+ rc = input_register_device(dev->input_dev);
+ if (rc)
+ goto out_table;
+
path = kobject_get_path(&dev->dev.kobj, GFP_KERNEL);
dev_info(&dev->dev, "%s as %s\n",
dev->input_name ?: "Unspecified device", path ?: "N/A");
@@ -1475,10 +1478,7 @@ int rc_register_device(struct rc_dev *dev)
request_module_nowait("ir-lirc-codec");
raw_init = true;
}
- /* calls ir_register_device so unlock mutex here*/
- mutex_unlock(&dev->lock);
rc = ir_raw_event_register(dev);
- mutex_lock(&dev->lock);
if (rc < 0)
goto out_input;
}
@@ -1491,6 +1491,9 @@ int rc_register_device(struct rc_dev *dev)
dev->enabled_protocols = rc_type;
}
+ /* Allow the RC sysfs nodes to be accessible */
+ mutex_lock(&dev->lock);
+ atomic_set(&dev->initialized, 1);
mutex_unlock(&dev->lock);
IR_dprintk(1, "Registered rc%u (driver: %s, remote: %s, mode %s)\n",
@@ -1512,7 +1515,6 @@ out_table:
out_dev:
device_del(&dev->dev);
out_unlock:
- mutex_unlock(&dev->lock);
ida_simple_remove(&rc_ida, minor);
return rc;
}
diff --git a/drivers/media/rc/sunxi-cir.c b/drivers/media/rc/sunxi-cir.c
index 40f77685cc4a..eaadc081760a 100644
--- a/drivers/media/rc/sunxi-cir.c
+++ b/drivers/media/rc/sunxi-cir.c
@@ -326,6 +326,7 @@ static const struct of_device_id sunxi_ir_match[] = {
{ .compatible = "allwinner,sun5i-a13-ir", },
{},
};
+MODULE_DEVICE_TABLE(of, sunxi_ir_match);
static struct platform_driver sunxi_ir_driver = {
.probe = sunxi_ir_probe,
diff --git a/drivers/media/tuners/m88rs6000t.c b/drivers/media/tuners/m88rs6000t.c
index 504bfbc4027a..9f3e0fd4cad9 100644
--- a/drivers/media/tuners/m88rs6000t.c
+++ b/drivers/media/tuners/m88rs6000t.c
@@ -461,13 +461,12 @@ static int m88rs6000t_sleep(struct dvb_frontend *fe)
dev_dbg(&dev->client->dev, "%s:\n", __func__);
ret = regmap_write(dev->regmap, 0x07, 0x6d);
- if (ret)
- goto err;
- usleep_range(5000, 10000);
-err:
- if (ret)
+ if (ret) {
dev_dbg(&dev->client->dev, "failed=%d\n", ret);
- return ret;
+ return ret;
+ }
+ usleep_range(5000, 10000);
+ return 0;
}
static int m88rs6000t_get_frequency(struct dvb_frontend *fe, u32 *frequency)
diff --git a/drivers/media/tuners/r820t.c b/drivers/media/tuners/r820t.c
index a7a8452e99d2..6ab35e315fe7 100644
--- a/drivers/media/tuners/r820t.c
+++ b/drivers/media/tuners/r820t.c
@@ -1295,7 +1295,7 @@ static int generic_set_freq(struct dvb_frontend *fe,
v4l2_std_id std, u32 delsys)
{
struct r820t_priv *priv = fe->tuner_priv;
- int rc = -EINVAL;
+ int rc;
u32 lo_freq;
tuner_dbg("should set frequency to %d kHz, bw %d MHz\n",
diff --git a/drivers/media/tuners/si2157.c b/drivers/media/tuners/si2157.c
index 0e1ca2b00e61..243ac3816028 100644
--- a/drivers/media/tuners/si2157.c
+++ b/drivers/media/tuners/si2157.c
@@ -364,8 +364,8 @@ static int si2157_get_if_frequency(struct dvb_frontend *fe, u32 *frequency)
static const struct dvb_tuner_ops si2157_ops = {
.info = {
.name = "Silicon Labs Si2146/2147/2148/2157/2158",
- .frequency_min = 55000000,
- .frequency_max = 862000000,
+ .frequency_min = 42000000,
+ .frequency_max = 870000000,
},
.init = si2157_init,
@@ -403,7 +403,7 @@ err:
}
static int si2157_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+ const struct i2c_device_id *id)
{
struct si2157_config *cfg = client->dev.platform_data;
struct dvb_frontend *fe = cfg->fe;
@@ -438,6 +438,31 @@ static int si2157_probe(struct i2c_client *client,
memcpy(&fe->ops.tuner_ops, &si2157_ops, sizeof(struct dvb_tuner_ops));
fe->tuner_priv = client;
+#ifdef CONFIG_MEDIA_CONTROLLER
+ if (cfg->mdev) {
+ dev->mdev = cfg->mdev;
+
+ dev->ent.name = KBUILD_MODNAME;
+ dev->ent.function = MEDIA_ENT_F_TUNER;
+
+ dev->pad[TUNER_PAD_RF_INPUT].flags = MEDIA_PAD_FL_SINK;
+ dev->pad[TUNER_PAD_OUTPUT].flags = MEDIA_PAD_FL_SOURCE;
+ dev->pad[TUNER_PAD_AUD_OUT].flags = MEDIA_PAD_FL_SOURCE;
+
+ ret = media_entity_pads_init(&dev->ent, TUNER_NUM_PADS,
+ &dev->pad[0]);
+
+ if (ret)
+ goto err_kfree;
+
+ ret = media_device_register_entity(cfg->mdev, &dev->ent);
+ if (ret) {
+ media_entity_cleanup(&dev->ent);
+ goto err_kfree;
+ }
+ }
+#endif
+
dev_info(&client->dev, "Silicon Labs %s successfully attached\n",
dev->chiptype == SI2157_CHIPTYPE_SI2146 ?
"Si2146" : "Si2147/2148/2157/2158");
@@ -458,6 +483,14 @@ static int si2157_remove(struct i2c_client *client)
dev_dbg(&client->dev, "\n");
+ /* stop statistics polling */
+ cancel_delayed_work_sync(&dev->stat_work);
+
+#ifdef CONFIG_MEDIA_CONTROLLER_DVB
+ if (dev->mdev)
+ media_device_unregister_entity(&dev->ent);
+#endif
+
memset(&fe->ops.tuner_ops, 0, sizeof(struct dvb_tuner_ops));
fe->tuner_priv = NULL;
kfree(dev);
diff --git a/drivers/media/tuners/si2157.h b/drivers/media/tuners/si2157.h
index 4db97ab744d6..5f1a60bf7ced 100644
--- a/drivers/media/tuners/si2157.h
+++ b/drivers/media/tuners/si2157.h
@@ -18,6 +18,7 @@
#define SI2157_H
#include <linux/kconfig.h>
+#include <media/media-device.h>
#include "dvb_frontend.h"
/*
@@ -30,6 +31,10 @@ struct si2157_config {
*/
struct dvb_frontend *fe;
+#if defined(CONFIG_MEDIA_CONTROLLER)
+ struct media_device *mdev;
+#endif
+
/*
* Spectral Inversion
*/
diff --git a/drivers/media/tuners/si2157_priv.h b/drivers/media/tuners/si2157_priv.h
index ecc463db8f69..589d558d381c 100644
--- a/drivers/media/tuners/si2157_priv.h
+++ b/drivers/media/tuners/si2157_priv.h
@@ -18,6 +18,7 @@
#define SI2157_PRIV_H
#include <linux/firmware.h>
+#include <media/v4l2-mc.h>
#include "si2157.h"
/* state struct */
@@ -31,6 +32,13 @@ struct si2157_dev {
u8 if_port;
u32 if_frequency;
struct delayed_work stat_work;
+
+#if defined(CONFIG_MEDIA_CONTROLLER)
+ struct media_device *mdev;
+ struct media_entity ent;
+ struct media_pad pad[TUNER_NUM_PADS];
+#endif
+
};
#define SI2157_CHIPTYPE_SI2157 0
diff --git a/drivers/media/tuners/tuner-xc2028.c b/drivers/media/tuners/tuner-xc2028.c
index 4e941f00b600..317ef63ee789 100644
--- a/drivers/media/tuners/tuner-xc2028.c
+++ b/drivers/media/tuners/tuner-xc2028.c
@@ -1403,11 +1403,14 @@ static int xc2028_set_config(struct dvb_frontend *fe, void *priv_cfg)
* in order to avoid troubles during device release.
*/
kfree(priv->ctrl.fname);
+ priv->ctrl.fname = NULL;
memcpy(&priv->ctrl, p, sizeof(priv->ctrl));
if (p->fname) {
priv->ctrl.fname = kstrdup(p->fname, GFP_KERNEL);
- if (priv->ctrl.fname == NULL)
+ if (priv->ctrl.fname == NULL) {
rc = -ENOMEM;
+ goto unlock;
+ }
}
/*
@@ -1439,6 +1442,7 @@ static int xc2028_set_config(struct dvb_frontend *fe, void *priv_cfg)
} else
priv->state = XC2028_WAITING_FIRMWARE;
}
+unlock:
mutex_unlock(&priv->lock);
return rc;
diff --git a/drivers/media/tuners/xc4000.c b/drivers/media/tuners/xc4000.c
index 219ebafae70f..d95c7e082ccf 100644
--- a/drivers/media/tuners/xc4000.c
+++ b/drivers/media/tuners/xc4000.c
@@ -1508,7 +1508,7 @@ static int xc4000_get_signal(struct dvb_frontend *fe, u16 *strength)
if (value >= 0x2000) {
value = 0;
} else {
- value = ~value << 3;
+ value = (~value << 3) & 0xffff;
}
goto ret;
diff --git a/drivers/media/usb/airspy/airspy.c b/drivers/media/usb/airspy/airspy.c
index 0d4ac5947f3a..87c12930416f 100644
--- a/drivers/media/usb/airspy/airspy.c
+++ b/drivers/media/usb/airspy/airspy.c
@@ -104,9 +104,8 @@ struct airspy_frame_buf {
};
struct airspy {
-#define POWER_ON (1 << 1)
-#define URB_BUF (1 << 2)
-#define USB_STATE_URB_BUF (1 << 3)
+#define POWER_ON 1
+#define USB_STATE_URB_BUF 2
unsigned long flags;
struct device *dev;
@@ -359,7 +358,7 @@ static int airspy_submit_urbs(struct airspy *s)
static int airspy_free_stream_bufs(struct airspy *s)
{
- if (s->flags & USB_STATE_URB_BUF) {
+ if (test_bit(USB_STATE_URB_BUF, &s->flags)) {
while (s->buf_num) {
s->buf_num--;
dev_dbg(s->dev, "free buf=%d\n", s->buf_num);
@@ -368,7 +367,7 @@ static int airspy_free_stream_bufs(struct airspy *s)
s->dma_addr[s->buf_num]);
}
}
- s->flags &= ~USB_STATE_URB_BUF;
+ clear_bit(USB_STATE_URB_BUF, &s->flags);
return 0;
}
@@ -394,7 +393,7 @@ static int airspy_alloc_stream_bufs(struct airspy *s)
dev_dbg(s->dev, "alloc buf=%d %p (dma %llu)\n", s->buf_num,
s->buf_list[s->buf_num],
(long long)s->dma_addr[s->buf_num]);
- s->flags |= USB_STATE_URB_BUF;
+ set_bit(USB_STATE_URB_BUF, &s->flags);
}
return 0;
diff --git a/drivers/media/usb/as102/as102_drv.h b/drivers/media/usb/as102/as102_drv.h
index aee2d76e8dfc..8def19d9ab92 100644
--- a/drivers/media/usb/as102/as102_drv.h
+++ b/drivers/media/usb/as102/as102_drv.h
@@ -52,7 +52,7 @@ struct as10x_bus_adapter_t {
struct as10x_cmd_t *cmd, *rsp;
/* bus adapter private ops callback */
- struct as102_priv_ops_t *ops;
+ const struct as102_priv_ops_t *ops;
};
struct as102_dev_t {
diff --git a/drivers/media/usb/as102/as102_usb_drv.c b/drivers/media/usb/as102/as102_usb_drv.c
index 3f669066ccf6..0e8030c071b8 100644
--- a/drivers/media/usb/as102/as102_usb_drv.c
+++ b/drivers/media/usb/as102/as102_usb_drv.c
@@ -189,7 +189,7 @@ static int as102_read_ep2(struct as10x_bus_adapter_t *bus_adap,
return actual_len;
}
-static struct as102_priv_ops_t as102_priv_ops = {
+static const struct as102_priv_ops_t as102_priv_ops = {
.upload_fw_pkt = as102_send_ep1,
.xfer_cmd = as102_usb_xfer_cmd,
.as102_read_ep2 = as102_read_ep2,
diff --git a/drivers/media/usb/au0828/au0828-cards.c b/drivers/media/usb/au0828/au0828-cards.c
index ca861aea68a5..6b469e8c4c6e 100644
--- a/drivers/media/usb/au0828/au0828-cards.c
+++ b/drivers/media/usb/au0828/au0828-cards.c
@@ -228,10 +228,6 @@ void au0828_card_analog_fe_setup(struct au0828_dev *dev)
"au8522", 0x8e >> 1, NULL);
if (sd == NULL)
pr_err("analog subdev registration failed\n");
-#ifdef CONFIG_MEDIA_CONTROLLER
- if (sd)
- dev->decoder = &sd->entity;
-#endif
}
/* Setup tuners */
diff --git a/drivers/media/usb/au0828/au0828-core.c b/drivers/media/usb/au0828/au0828-core.c
index 9e29e70a78d7..cc22b32776ad 100644
--- a/drivers/media/usb/au0828/au0828-core.c
+++ b/drivers/media/usb/au0828/au0828-core.c
@@ -20,6 +20,7 @@
*/
#include "au0828.h"
+#include "au8522.h"
#include <linux/module.h>
#include <linux/slab.h>
@@ -134,7 +135,13 @@ static void au0828_unregister_media_device(struct au0828_dev *dev)
{
#ifdef CONFIG_MEDIA_CONTROLLER
- if (dev->media_dev) {
+ if (dev->media_dev &&
+ media_devnode_is_registered(&dev->media_dev->devnode)) {
+ /* clear enable_source, disable_source */
+ dev->media_dev->source_priv = NULL;
+ dev->media_dev->enable_source = NULL;
+ dev->media_dev->disable_source = NULL;
+
media_device_unregister(dev->media_dev);
media_device_cleanup(dev->media_dev);
kfree(dev->media_dev);
@@ -143,7 +150,7 @@ static void au0828_unregister_media_device(struct au0828_dev *dev)
#endif
}
-static void au0828_usb_release(struct au0828_dev *dev)
+void au0828_usb_release(struct au0828_dev *dev)
{
au0828_unregister_media_device(dev);
@@ -153,33 +160,6 @@ static void au0828_usb_release(struct au0828_dev *dev)
kfree(dev);
}
-#ifdef CONFIG_VIDEO_AU0828_V4L2
-
-static void au0828_usb_v4l2_media_release(struct au0828_dev *dev)
-{
-#ifdef CONFIG_MEDIA_CONTROLLER
- int i;
-
- for (i = 0; i < AU0828_MAX_INPUT; i++) {
- if (AUVI_INPUT(i).type == AU0828_VMUX_UNDEFINED)
- return;
- media_device_unregister_entity(&dev->input_ent[i]);
- }
-#endif
-}
-
-static void au0828_usb_v4l2_release(struct v4l2_device *v4l2_dev)
-{
- struct au0828_dev *dev =
- container_of(v4l2_dev, struct au0828_dev, v4l2_dev);
-
- v4l2_ctrl_handler_free(&dev->v4l2_ctrl_hdl);
- v4l2_device_unregister(&dev->v4l2_dev);
- au0828_usb_v4l2_media_release(dev);
- au0828_usb_release(dev);
-}
-#endif
-
static void au0828_usb_disconnect(struct usb_interface *interface)
{
struct au0828_dev *dev = usb_get_intfdata(interface);
@@ -192,7 +172,7 @@ static void au0828_usb_disconnect(struct usb_interface *interface)
Set the status so poll routines can check and avoid
access after disconnect.
*/
- dev->dev_state = DEV_DISCONNECTED;
+ set_bit(DEV_DISCONNECTED, &dev->dev_state);
au0828_rc_unregister(dev);
/* Digital TV */
@@ -202,18 +182,13 @@ static void au0828_usb_disconnect(struct usb_interface *interface)
mutex_lock(&dev->mutex);
dev->usbdev = NULL;
mutex_unlock(&dev->mutex);
-#ifdef CONFIG_VIDEO_AU0828_V4L2
- if (AUVI_INPUT(0).type != AU0828_VMUX_UNDEFINED) {
- au0828_analog_unregister(dev);
- v4l2_device_disconnect(&dev->v4l2_dev);
- v4l2_device_put(&dev->v4l2_dev);
+ if (au0828_analog_unregister(dev)) {
/*
* No need to call au0828_usb_release() if V4L2 is enabled,
* as this is already called via au0828_usb_v4l2_release()
*/
return;
}
-#endif
au0828_usb_release(dev);
}
@@ -227,99 +202,342 @@ static int au0828_media_device_init(struct au0828_dev *dev,
if (!mdev)
return -ENOMEM;
- mdev->dev = &udev->dev;
-
- if (!dev->board.name)
- strlcpy(mdev->model, "unknown au0828", sizeof(mdev->model));
- else
- strlcpy(mdev->model, dev->board.name, sizeof(mdev->model));
- if (udev->serial)
- strlcpy(mdev->serial, udev->serial, sizeof(mdev->serial));
- strcpy(mdev->bus_info, udev->devpath);
- mdev->hw_revision = le16_to_cpu(udev->descriptor.bcdDevice);
- mdev->driver_version = LINUX_VERSION_CODE;
-
- media_device_init(mdev);
+ /* check if media device is already initialized */
+ if (!mdev->dev)
+ media_device_usb_init(mdev, udev, udev->product);
dev->media_dev = mdev;
#endif
return 0;
}
+#ifdef CONFIG_MEDIA_CONTROLLER
+static void au0828_media_graph_notify(struct media_entity *new,
+ void *notify_data)
+{
+ struct au0828_dev *dev = (struct au0828_dev *) notify_data;
+ int ret;
+ struct media_entity *entity, *mixer = NULL, *decoder = NULL;
+
+ if (!new) {
+ /*
+ * Called during au0828 probe time to connect
+ * entites that were created prior to registering
+ * the notify handler. Find mixer and decoder.
+ */
+ media_device_for_each_entity(entity, dev->media_dev) {
+ if (entity->function == MEDIA_ENT_F_AUDIO_MIXER)
+ mixer = entity;
+ else if (entity->function == MEDIA_ENT_F_ATV_DECODER)
+ decoder = entity;
+ }
+ goto create_link;
+ }
+
+ switch (new->function) {
+ case MEDIA_ENT_F_AUDIO_MIXER:
+ mixer = new;
+ if (dev->decoder)
+ decoder = dev->decoder;
+ break;
+ case MEDIA_ENT_F_ATV_DECODER:
+ /* In case, Mixer is added first, find mixer and create link */
+ media_device_for_each_entity(entity, dev->media_dev) {
+ if (entity->function == MEDIA_ENT_F_AUDIO_MIXER)
+ mixer = entity;
+ }
+ decoder = new;
+ break;
+ default:
+ break;
+ }
+
+create_link:
+ if (decoder && mixer) {
+ ret = media_create_pad_link(decoder,
+ DEMOD_PAD_AUDIO_OUT,
+ mixer, 0,
+ MEDIA_LNK_FL_ENABLED);
+ if (ret)
+ dev_err(&dev->usbdev->dev,
+ "Mixer Pad Link Create Error: %d\n", ret);
+ }
+}
-static int au0828_create_media_graph(struct au0828_dev *dev)
+static int au0828_enable_source(struct media_entity *entity,
+ struct media_pipeline *pipe)
{
-#ifdef CONFIG_MEDIA_CONTROLLER
- struct media_device *mdev = dev->media_dev;
- struct media_entity *entity;
- struct media_entity *tuner = NULL, *decoder = NULL;
- int i, ret;
+ struct media_entity *source, *find_source;
+ struct media_entity *sink;
+ struct media_link *link, *found_link = NULL;
+ int ret = 0;
+ struct media_device *mdev = entity->graph_obj.mdev;
+ struct au0828_dev *dev;
if (!mdev)
- return 0;
+ return -ENODEV;
- media_device_for_each_entity(entity, mdev) {
- switch (entity->function) {
- case MEDIA_ENT_F_TUNER:
- tuner = entity;
- break;
- case MEDIA_ENT_F_ATV_DECODER:
- decoder = entity;
+ mutex_lock(&mdev->graph_mutex);
+
+ dev = mdev->source_priv;
+
+ /*
+ * For Audio and V4L2 entity, find the link to which decoder
+ * is the sink. Look for an active link between decoder and
+ * source (tuner/s-video/Composite), if one exists, nothing
+ * to do. If not, look for any active links between source
+ * and any other entity. If one exists, source is busy. If
+ * source is free, setup link and start pipeline from source.
+ * For DVB FE entity, the source for the link is the tuner.
+ * Check if tuner is available and setup link and start
+ * pipeline.
+ */
+ if (entity->function == MEDIA_ENT_F_DTV_DEMOD) {
+ sink = entity;
+ find_source = dev->tuner;
+ } else {
+ /* Analog isn't configured or register failed */
+ if (!dev->decoder) {
+ ret = -ENODEV;
+ goto end;
+ }
+
+ sink = dev->decoder;
+
+ /*
+ * Default input is tuner and default input_type
+ * is AU0828_VMUX_TELEVISION.
+ * FIXME:
+ * There is a problem when s_input is called to
+ * change the default input. s_input will try to
+ * enable_source before attempting to change the
+ * input on the device, and will end up enabling
+ * default source which is tuner.
+ *
+ * Additional logic is necessary in au0828
+ * to detect that the input has changed and
+ * enable the right source.
+ */
+
+ if (dev->input_type == AU0828_VMUX_TELEVISION)
+ find_source = dev->tuner;
+ else if (dev->input_type == AU0828_VMUX_SVIDEO ||
+ dev->input_type == AU0828_VMUX_COMPOSITE)
+ find_source = &dev->input_ent[dev->input_type];
+ else {
+ /* unknown input - let user select input */
+ ret = 0;
+ goto end;
+ }
+ }
+
+ /* Is an active link between sink and source */
+ if (dev->active_link) {
+ /*
+ * If DVB is using the tuner and calling entity is
+ * audio/video, the following check will be false,
+ * since sink is different. Result is Busy.
+ */
+ if (dev->active_link->sink->entity == sink &&
+ dev->active_link->source->entity == find_source) {
+ /*
+ * Either ALSA or Video own tuner. sink is
+ * the same for both. Prevent Video stepping
+ * on ALSA when ALSA owns the source.
+ */
+ if (dev->active_link_owner != entity &&
+ dev->active_link_owner->function ==
+ MEDIA_ENT_F_AUDIO_CAPTURE) {
+ pr_debug("ALSA has the tuner\n");
+ ret = -EBUSY;
+ goto end;
+ }
+ ret = 0;
+ goto end;
+ } else {
+ ret = -EBUSY;
+ goto end;
+ }
+ }
+
+ list_for_each_entry(link, &sink->links, list) {
+ /* Check sink, and source */
+ if (link->sink->entity == sink &&
+ link->source->entity == find_source) {
+ found_link = link;
break;
}
}
- /* Analog setup, using tuner as a link */
+ if (!found_link) {
+ ret = -ENODEV;
+ goto end;
+ }
+
+ /* activate link between source and sink and start pipeline */
+ source = found_link->source->entity;
+ ret = __media_entity_setup_link(found_link, MEDIA_LNK_FL_ENABLED);
+ if (ret) {
+ pr_err("Activate tuner link %s->%s. Error %d\n",
+ source->name, sink->name, ret);
+ goto end;
+ }
- /* Something bad happened! */
- if (!decoder)
- return -EINVAL;
+ ret = __media_entity_pipeline_start(entity, pipe);
+ if (ret) {
+ pr_err("Start Pipeline: %s->%s Error %d\n",
+ source->name, entity->name, ret);
+ ret = __media_entity_setup_link(found_link, 0);
+ pr_err("Deactivate link Error %d\n", ret);
+ goto end;
+ }
+ /*
+ * save active link and active link owner to avoid audio
+ * deactivating video owned link from disable_source and
+ * vice versa
+ */
+ dev->active_link = found_link;
+ dev->active_link_owner = entity;
+ dev->active_source = source;
+ dev->active_sink = sink;
+
+ pr_debug("Enabled Source: %s->%s->%s Ret %d\n",
+ dev->active_source->name, dev->active_sink->name,
+ dev->active_link_owner->name, ret);
+end:
+ mutex_unlock(&mdev->graph_mutex);
+ pr_debug("au0828_enable_source() end %s %d %d\n",
+ entity->name, entity->function, ret);
+ return ret;
+}
- if (tuner) {
- ret = media_create_pad_link(tuner, TUNER_PAD_IF_OUTPUT,
- decoder, 0,
- MEDIA_LNK_FL_ENABLED);
+static void au0828_disable_source(struct media_entity *entity)
+{
+ int ret = 0;
+ struct media_device *mdev = entity->graph_obj.mdev;
+ struct au0828_dev *dev;
+
+ if (!mdev)
+ return;
+
+ mutex_lock(&mdev->graph_mutex);
+ dev = mdev->source_priv;
+
+ if (!dev->active_link) {
+ ret = -ENODEV;
+ goto end;
+ }
+
+ /* link is active - stop pipeline from source (tuner) */
+ if (dev->active_link->sink->entity == dev->active_sink &&
+ dev->active_link->source->entity == dev->active_source) {
+ /*
+ * prevent video from deactivating link when audio
+ * has active pipeline
+ */
+ if (dev->active_link_owner != entity)
+ goto end;
+ __media_entity_pipeline_stop(entity);
+ ret = __media_entity_setup_link(dev->active_link, 0);
if (ret)
- return ret;
+ pr_err("Deactivate link Error %d\n", ret);
+
+ pr_debug("Disabled Source: %s->%s->%s Ret %d\n",
+ dev->active_source->name, dev->active_sink->name,
+ dev->active_link_owner->name, ret);
+
+ dev->active_link = NULL;
+ dev->active_link_owner = NULL;
+ dev->active_source = NULL;
+ dev->active_sink = NULL;
}
- ret = media_create_pad_link(decoder, 1, &dev->vdev.entity, 0,
- MEDIA_LNK_FL_ENABLED);
- if (ret)
- return ret;
- ret = media_create_pad_link(decoder, 2, &dev->vbi_dev.entity, 0,
- MEDIA_LNK_FL_ENABLED);
- if (ret)
- return ret;
- for (i = 0; i < AU0828_MAX_INPUT; i++) {
- struct media_entity *ent = &dev->input_ent[i];
+end:
+ mutex_unlock(&mdev->graph_mutex);
+}
+#endif
- if (AUVI_INPUT(i).type == AU0828_VMUX_UNDEFINED)
- break;
+static int au0828_media_device_register(struct au0828_dev *dev,
+ struct usb_device *udev)
+{
+#ifdef CONFIG_MEDIA_CONTROLLER
+ int ret;
+ struct media_entity *entity, *demod = NULL;
+ struct media_link *link;
+
+ if (!dev->media_dev)
+ return 0;
- switch (AUVI_INPUT(i).type) {
- case AU0828_VMUX_CABLE:
- case AU0828_VMUX_TELEVISION:
- case AU0828_VMUX_DVB:
- if (!tuner)
- break;
-
- ret = media_create_pad_link(ent, 0, tuner,
- TUNER_PAD_RF_INPUT,
- MEDIA_LNK_FL_ENABLED);
- if (ret)
- return ret;
+ if (!media_devnode_is_registered(&dev->media_dev->devnode)) {
+
+ /* register media device */
+ ret = media_device_register(dev->media_dev);
+ if (ret) {
+ dev_err(&udev->dev,
+ "Media Device Register Error: %d\n", ret);
+ return ret;
+ }
+ } else {
+ /*
+ * Call au0828_media_graph_notify() to connect
+ * audio graph to our graph. In this case, audio
+ * driver registered the device and there is no
+ * entity_notify to be called when new entities
+ * are added. Invoke it now.
+ */
+ au0828_media_graph_notify(NULL, (void *) dev);
+ }
+
+ /*
+ * Find tuner, decoder and demod.
+ *
+ * The tuner and decoder should be cached, as they'll be used by
+ * au0828_enable_source.
+ *
+ * It also needs to disable the link between tuner and
+ * decoder/demod, to avoid disable step when tuner is requested
+ * by video or audio. Note that this step can't be done until dvb
+ * graph is created during dvb register.
+ */
+ media_device_for_each_entity(entity, dev->media_dev) {
+ switch (entity->function) {
+ case MEDIA_ENT_F_TUNER:
+ dev->tuner = entity;
+ break;
+ case MEDIA_ENT_F_ATV_DECODER:
+ dev->decoder = entity;
break;
- case AU0828_VMUX_COMPOSITE:
- case AU0828_VMUX_SVIDEO:
- default: /* AU0828_VMUX_DEBUG */
- /* FIXME: fix the decoder PAD */
- ret = media_create_pad_link(ent, 0, decoder, 0, 0);
- if (ret)
- return ret;
+ case MEDIA_ENT_F_DTV_DEMOD:
+ demod = entity;
break;
}
}
+
+ /* Disable link between tuner->demod and/or tuner->decoder */
+ if (dev->tuner) {
+ list_for_each_entry(link, &dev->tuner->links, list) {
+ if (demod && link->sink->entity == demod)
+ media_entity_setup_link(link, 0);
+ if (dev->decoder && link->sink->entity == dev->decoder)
+ media_entity_setup_link(link, 0);
+ }
+ }
+
+ /* register entity_notify callback */
+ dev->entity_notify.notify_data = (void *) dev;
+ dev->entity_notify.notify = (void *) au0828_media_graph_notify;
+ ret = media_device_register_entity_notify(dev->media_dev,
+ &dev->entity_notify);
+ if (ret) {
+ dev_err(&udev->dev,
+ "Media Device register entity_notify Error: %d\n",
+ ret);
+ return ret;
+ }
+ /* set enable_source */
+ dev->media_dev->source_priv = (void *) dev;
+ dev->media_dev->enable_source = au0828_enable_source;
+ dev->media_dev->disable_source = au0828_disable_source;
#endif
return 0;
}
@@ -378,32 +596,13 @@ static int au0828_usb_probe(struct usb_interface *interface,
return retval;
}
-#ifdef CONFIG_VIDEO_AU0828_V4L2
- dev->v4l2_dev.release = au0828_usb_v4l2_release;
-
- /* Create the v4l2_device */
-#ifdef CONFIG_MEDIA_CONTROLLER
- dev->v4l2_dev.mdev = dev->media_dev;
-#endif
- retval = v4l2_device_register(&interface->dev, &dev->v4l2_dev);
- if (retval) {
- pr_err("%s() v4l2_device_register failed\n",
- __func__);
- mutex_unlock(&dev->lock);
- kfree(dev);
- return retval;
- }
- /* This control handler will inherit the controls from au8522 */
- retval = v4l2_ctrl_handler_init(&dev->v4l2_ctrl_hdl, 4);
+ retval = au0828_v4l2_device_register(interface, dev);
if (retval) {
- pr_err("%s() v4l2_ctrl_handler_init failed\n",
- __func__);
+ au0828_usb_v4l2_media_release(dev);
mutex_unlock(&dev->lock);
kfree(dev);
return retval;
}
- dev->v4l2_dev.ctrl_handler = &dev->v4l2_ctrl_hdl;
-#endif
/* Power Up the bridge */
au0828_write(dev, REG_600, 1 << 4);
@@ -417,11 +616,13 @@ static int au0828_usb_probe(struct usb_interface *interface,
/* Setup */
au0828_card_setup(dev);
-#ifdef CONFIG_VIDEO_AU0828_V4L2
/* Analog TV */
- if (AUVI_INPUT(0).type != AU0828_VMUX_UNDEFINED)
- au0828_analog_register(dev, interface);
-#endif
+ retval = au0828_analog_register(dev, interface);
+ if (retval) {
+ pr_err("%s() au0282_dev_register failed to register on V4L2\n",
+ __func__);
+ goto done;
+ }
/* Digital TV */
retval = au0828_dvb_register(dev);
@@ -443,16 +644,7 @@ static int au0828_usb_probe(struct usb_interface *interface,
mutex_unlock(&dev->lock);
- retval = au0828_create_media_graph(dev);
- if (retval) {
- pr_err("%s() au0282_dev_register failed to create graph\n",
- __func__);
- goto done;
- }
-
-#ifdef CONFIG_MEDIA_CONTROLLER
- retval = media_device_register(dev->media_dev);
-#endif
+ retval = au0828_media_device_register(dev, usbdev);
done:
if (retval < 0)
diff --git a/drivers/media/usb/au0828/au0828-dvb.c b/drivers/media/usb/au0828/au0828-dvb.c
index 94363a3ba400..0e174e860614 100644
--- a/drivers/media/usb/au0828/au0828-dvb.c
+++ b/drivers/media/usb/au0828/au0828-dvb.c
@@ -181,7 +181,7 @@ static int stop_urb_transfer(struct au0828_dev *dev)
static int start_urb_transfer(struct au0828_dev *dev)
{
struct urb *purb;
- int i, ret = -ENOMEM;
+ int i, ret;
dprintk(2, "%s()\n", __func__);
@@ -194,7 +194,7 @@ static int start_urb_transfer(struct au0828_dev *dev)
dev->urbs[i] = usb_alloc_urb(0, GFP_KERNEL);
if (!dev->urbs[i])
- goto err;
+ return -ENOMEM;
purb = dev->urbs[i];
@@ -207,9 +207,10 @@ static int start_urb_transfer(struct au0828_dev *dev)
if (!purb->transfer_buffer) {
usb_free_urb(purb);
dev->urbs[i] = NULL;
+ ret = -ENOMEM;
pr_err("%s: failed big buffer allocation, err = %d\n",
__func__, ret);
- goto err;
+ return ret;
}
purb->status = -EINPROGRESS;
@@ -235,10 +236,7 @@ static int start_urb_transfer(struct au0828_dev *dev)
}
dev->urb_streaming = true;
- ret = 0;
-
-err:
- return ret;
+ return 0;
}
static void au0828_start_transport(struct au0828_dev *dev)
diff --git a/drivers/media/usb/au0828/au0828-input.c b/drivers/media/usb/au0828/au0828-input.c
index b0f067971979..3d6687f0407d 100644
--- a/drivers/media/usb/au0828/au0828-input.c
+++ b/drivers/media/usb/au0828/au0828-input.c
@@ -130,7 +130,7 @@ static int au0828_get_key_au8522(struct au0828_rc *ir)
bool first = true;
/* do nothing if device is disconnected */
- if (ir->dev->dev_state == DEV_DISCONNECTED)
+ if (test_bit(DEV_DISCONNECTED, &ir->dev->dev_state))
return 0;
/* Check IR int */
@@ -260,7 +260,7 @@ static void au0828_rc_stop(struct rc_dev *rc)
cancel_delayed_work_sync(&ir->work);
/* do nothing if device is disconnected */
- if (ir->dev->dev_state != DEV_DISCONNECTED) {
+ if (!test_bit(DEV_DISCONNECTED, &ir->dev->dev_state)) {
/* Disable IR */
au8522_rc_clear(ir, 0xe0, 1 << 4);
}
diff --git a/drivers/media/usb/au0828/au0828-video.c b/drivers/media/usb/au0828/au0828-video.c
index a13625722848..32d7db96479c 100644
--- a/drivers/media/usb/au0828/au0828-video.c
+++ b/drivers/media/usb/au0828/au0828-video.c
@@ -28,12 +28,14 @@
*/
#include "au0828.h"
+#include "au8522.h"
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/device.h>
#include <media/v4l2-common.h>
+#include <media/v4l2-mc.h>
#include <media/v4l2-ioctl.h>
#include <media/v4l2-event.h>
#include <media/tuner.h>
@@ -104,14 +106,13 @@ static inline void print_err_status(struct au0828_dev *dev,
static int check_dev(struct au0828_dev *dev)
{
- if (dev->dev_state & DEV_DISCONNECTED) {
+ if (test_bit(DEV_DISCONNECTED, &dev->dev_state)) {
pr_info("v4l2 ioctl: device not present\n");
return -ENODEV;
}
- if (dev->dev_state & DEV_MISCONFIGURED) {
- pr_info("v4l2 ioctl: device is misconfigured; "
- "close and open it again\n");
+ if (test_bit(DEV_MISCONFIGURED, &dev->dev_state)) {
+ pr_info("v4l2 ioctl: device is misconfigured; close and open it again\n");
return -EIO;
}
return 0;
@@ -519,8 +520,8 @@ static inline int au0828_isoc_copy(struct au0828_dev *dev, struct urb *urb)
if (!dev)
return 0;
- if ((dev->dev_state & DEV_DISCONNECTED) ||
- (dev->dev_state & DEV_MISCONFIGURED))
+ if (test_bit(DEV_DISCONNECTED, &dev->dev_state) ||
+ test_bit(DEV_MISCONFIGURED, &dev->dev_state))
return 0;
if (urb->status < 0) {
@@ -638,61 +639,64 @@ static inline int au0828_isoc_copy(struct au0828_dev *dev, struct urb *urb)
return rc;
}
-static int au0828_enable_analog_tuner(struct au0828_dev *dev)
+void au0828_usb_v4l2_media_release(struct au0828_dev *dev)
{
#ifdef CONFIG_MEDIA_CONTROLLER
- struct media_device *mdev = dev->media_dev;
- struct media_entity *source;
- struct media_link *link, *found_link = NULL;
- int ret, active_links = 0;
-
- if (!mdev || !dev->decoder)
- return 0;
+ int i;
- /*
- * This will find the tuner that is connected into the decoder.
- * Technically, this is not 100% correct, as the device may be
- * using an analog input instead of the tuner. However, as we can't
- * do DVB streaming while the DMA engine is being used for V4L2,
- * this should be enough for the actual needs.
- */
- list_for_each_entry(link, &dev->decoder->links, list) {
- if (link->sink->entity == dev->decoder) {
- found_link = link;
- if (link->flags & MEDIA_LNK_FL_ENABLED)
- active_links++;
- break;
- }
+ for (i = 0; i < AU0828_MAX_INPUT; i++) {
+ if (AUVI_INPUT(i).type == AU0828_VMUX_UNDEFINED)
+ return;
+ media_device_unregister_entity(&dev->input_ent[i]);
}
+#endif
+}
- if (active_links == 1 || !found_link)
- return 0;
+static void au0828_usb_v4l2_release(struct v4l2_device *v4l2_dev)
+{
+ struct au0828_dev *dev =
+ container_of(v4l2_dev, struct au0828_dev, v4l2_dev);
+
+ v4l2_ctrl_handler_free(&dev->v4l2_ctrl_hdl);
+ v4l2_device_unregister(&dev->v4l2_dev);
+ au0828_usb_v4l2_media_release(dev);
+ au0828_usb_release(dev);
+}
- source = found_link->source->entity;
- list_for_each_entry(link, &source->links, list) {
- struct media_entity *sink;
- int flags = 0;
+int au0828_v4l2_device_register(struct usb_interface *interface,
+ struct au0828_dev *dev)
+{
+ int retval;
- sink = link->sink->entity;
+ if (AUVI_INPUT(0).type == AU0828_VMUX_UNDEFINED)
+ return 0;
- if (sink == dev->decoder)
- flags = MEDIA_LNK_FL_ENABLED;
+ /* Create the v4l2_device */
+#ifdef CONFIG_MEDIA_CONTROLLER
+ dev->v4l2_dev.mdev = dev->media_dev;
+#endif
+ retval = v4l2_device_register(&interface->dev, &dev->v4l2_dev);
+ if (retval) {
+ pr_err("%s() v4l2_device_register failed\n",
+ __func__);
+ mutex_unlock(&dev->lock);
+ kfree(dev);
+ return retval;
+ }
- ret = media_entity_setup_link(link, flags);
- if (ret) {
- pr_err(
- "Couldn't change link %s->%s to %s. Error %d\n",
- source->name, sink->name,
- flags ? "enabled" : "disabled",
- ret);
- return ret;
- } else
- au0828_isocdbg(
- "link %s->%s was %s\n",
- source->name, sink->name,
- flags ? "ENABLED" : "disabled");
+ dev->v4l2_dev.release = au0828_usb_v4l2_release;
+
+ /* This control handler will inherit the controls from au8522 */
+ retval = v4l2_ctrl_handler_init(&dev->v4l2_ctrl_hdl, 4);
+ if (retval) {
+ pr_err("%s() v4l2_ctrl_handler_init failed\n",
+ __func__);
+ mutex_unlock(&dev->lock);
+ kfree(dev);
+ return retval;
}
-#endif
+ dev->v4l2_dev.ctrl_handler = &dev->v4l2_ctrl_hdl;
+
return 0;
}
@@ -707,9 +711,6 @@ static int queue_setup(struct vb2_queue *vq,
return sizes[0] < size ? -EINVAL : 0;
*nplanes = 1;
sizes[0] = size;
-
- au0828_enable_analog_tuner(dev);
-
return 0;
}
@@ -822,10 +823,10 @@ static int au0828_stream_interrupt(struct au0828_dev *dev)
int ret = 0;
dev->stream_state = STREAM_INTERRUPT;
- if (dev->dev_state == DEV_DISCONNECTED)
+ if (test_bit(DEV_DISCONNECTED, &dev->dev_state))
return -ENODEV;
else if (ret) {
- dev->dev_state = DEV_MISCONFIGURED;
+ set_bit(DEV_MISCONFIGURED, &dev->dev_state);
dprintk(1, "%s device is misconfigured!\n", __func__);
return ret;
}
@@ -949,13 +950,23 @@ static struct vb2_ops au0828_video_qops = {
* au0828_analog_unregister
* unregister v4l2 devices
*/
-void au0828_analog_unregister(struct au0828_dev *dev)
+int au0828_analog_unregister(struct au0828_dev *dev)
{
dprintk(1, "au0828_analog_unregister called\n");
+
+ /* No analog TV */
+ if (AUVI_INPUT(0).type == AU0828_VMUX_UNDEFINED)
+ return 0;
+
mutex_lock(&au0828_sysfs_lock);
video_unregister_device(&dev->vdev);
video_unregister_device(&dev->vbi_dev);
mutex_unlock(&au0828_sysfs_lock);
+
+ v4l2_device_disconnect(&dev->v4l2_dev);
+ v4l2_device_put(&dev->v4l2_dev);
+
+ return 1;
}
/* This function ensures that video frames continue to be delivered even if
@@ -1014,7 +1025,7 @@ static int au0828_v4l2_open(struct file *filp)
int ret;
dprintk(1,
- "%s called std_set %d dev_state %d stream users %d users %d\n",
+ "%s called std_set %d dev_state %ld stream users %d users %d\n",
__func__, dev->std_set_in_tuner_core, dev->dev_state,
dev->streaming_users, dev->users);
@@ -1033,7 +1044,7 @@ static int au0828_v4l2_open(struct file *filp)
au0828_analog_stream_enable(dev);
au0828_analog_stream_reset(dev);
dev->stream_state = STREAM_OFF;
- dev->dev_state |= DEV_INITIALIZED;
+ set_bit(DEV_INITIALIZED, &dev->dev_state);
}
dev->users++;
mutex_unlock(&dev->lock);
@@ -1047,7 +1058,7 @@ static int au0828_v4l2_close(struct file *filp)
struct video_device *vdev = video_devdata(filp);
dprintk(1,
- "%s called std_set %d dev_state %d stream users %d users %d\n",
+ "%s called std_set %d dev_state %ld stream users %d users %d\n",
__func__, dev->std_set_in_tuner_core, dev->dev_state,
dev->streaming_users, dev->users);
@@ -1063,12 +1074,43 @@ static int au0828_v4l2_close(struct file *filp)
del_timer_sync(&dev->vbi_timeout);
}
- if (dev->dev_state == DEV_DISCONNECTED)
+ if (test_bit(DEV_DISCONNECTED, &dev->dev_state))
goto end;
if (dev->users == 1) {
- /* Save some power by putting tuner to sleep */
- v4l2_device_call_all(&dev->v4l2_dev, 0, core, s_power, 0);
+ /*
+ * Avoid putting tuner in sleep if DVB or ALSA are
+ * streaming.
+ *
+ * On most USB devices like au0828 the tuner can
+ * be safely put in sleep stare here if ALSA isn't
+ * streaming. Exceptions are some very old USB tuner
+ * models such as em28xx-based WinTV USB2 which have
+ * a separate audio output jack. The devices that have
+ * a separate audio output jack have analog tuners,
+ * like Philips FM1236. Those devices are always on,
+ * so the s_power callback are silently ignored.
+ * So, the current logic here does the following:
+ * Disable (put tuner to sleep) when
+ * - ALSA and DVB aren't not streaming;
+ * - the last V4L2 file handler is closed.
+ *
+ * FIXME:
+ *
+ * Additionally, this logic could be improved to
+ * disable the media source if the above conditions
+ * are met and if the device:
+ * - doesn't have a separate audio out plug (or
+ * - doesn't use a silicon tuner like xc2028/3028/4000/5000).
+ *
+ * Once this additional logic is in place, a callback
+ * is needed to enable the media source and power on
+ * the tuner, for radio to work.
+ */
+ ret = v4l_enable_media_source(vdev);
+ if (ret == 0)
+ v4l2_device_call_all(&dev->v4l2_dev, 0, core,
+ s_power, 0);
dev->std_set_in_tuner_core = 0;
/* When close the device, set the usb intf0 into alt0 to free
@@ -1092,7 +1134,7 @@ static void au0828_init_tuner(struct au0828_dev *dev)
.type = V4L2_TUNER_ANALOG_TV,
};
- dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
+ dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
dev->std_set_in_tuner_core, dev->dev_state);
if (dev->std_set_in_tuner_core)
@@ -1164,7 +1206,7 @@ static int vidioc_querycap(struct file *file, void *priv,
struct video_device *vdev = video_devdata(file);
struct au0828_dev *dev = video_drvdata(file);
- dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
+ dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
dev->std_set_in_tuner_core, dev->dev_state);
strlcpy(cap->driver, "au0828", sizeof(cap->driver));
@@ -1207,7 +1249,7 @@ static int vidioc_g_fmt_vid_cap(struct file *file, void *priv,
{
struct au0828_dev *dev = video_drvdata(file);
- dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
+ dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
dev->std_set_in_tuner_core, dev->dev_state);
f->fmt.pix.width = dev->width;
@@ -1226,7 +1268,7 @@ static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
{
struct au0828_dev *dev = video_drvdata(file);
- dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
+ dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
dev->std_set_in_tuner_core, dev->dev_state);
return au0828_set_format(dev, VIDIOC_TRY_FMT, f);
@@ -1238,7 +1280,7 @@ static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
struct au0828_dev *dev = video_drvdata(file);
int rc;
- dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
+ dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
dev->std_set_in_tuner_core, dev->dev_state);
rc = check_dev(dev);
@@ -1260,7 +1302,7 @@ static int vidioc_s_std(struct file *file, void *priv, v4l2_std_id norm)
{
struct au0828_dev *dev = video_drvdata(file);
- dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
+ dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
dev->std_set_in_tuner_core, dev->dev_state);
if (norm == dev->std)
@@ -1292,7 +1334,7 @@ static int vidioc_g_std(struct file *file, void *priv, v4l2_std_id *norm)
{
struct au0828_dev *dev = video_drvdata(file);
- dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
+ dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
dev->std_set_in_tuner_core, dev->dev_state);
*norm = dev->std;
@@ -1312,10 +1354,9 @@ static int vidioc_enum_input(struct file *file, void *priv,
[AU0828_VMUX_CABLE] = "Cable TV",
[AU0828_VMUX_TELEVISION] = "Television",
[AU0828_VMUX_DVB] = "DVB",
- [AU0828_VMUX_DEBUG] = "tv debug"
};
- dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
+ dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
dev->std_set_in_tuner_core, dev->dev_state);
tmp = input->index;
@@ -1345,7 +1386,7 @@ static int vidioc_g_input(struct file *file, void *priv, unsigned int *i)
{
struct au0828_dev *dev = video_drvdata(file);
- dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
+ dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
dev->std_set_in_tuner_core, dev->dev_state);
*i = dev->ctrl_input;
@@ -1356,7 +1397,7 @@ static void au0828_s_input(struct au0828_dev *dev, int index)
{
int i;
- dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
+ dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
dev->std_set_in_tuner_core, dev->dev_state);
switch (AUVI_INPUT(index).type) {
@@ -1375,9 +1416,11 @@ static void au0828_s_input(struct au0828_dev *dev, int index)
default:
dprintk(1, "unknown input type set [%d]\n",
AUVI_INPUT(index).type);
- break;
+ return;
}
+ dev->ctrl_input = index;
+
v4l2_device_call_all(&dev->v4l2_dev, 0, video, s_routing,
AUVI_INPUT(index).vmux, 0, 0);
@@ -1409,6 +1452,7 @@ static void au0828_s_input(struct au0828_dev *dev, int index)
static int vidioc_s_input(struct file *file, void *priv, unsigned int index)
{
struct au0828_dev *dev = video_drvdata(file);
+ struct video_device *vfd = video_devdata(file);
dprintk(1, "VIDIOC_S_INPUT in function %s, input=%d\n", __func__,
index);
@@ -1416,9 +1460,19 @@ static int vidioc_s_input(struct file *file, void *priv, unsigned int index)
return -EINVAL;
if (AUVI_INPUT(index).type == 0)
return -EINVAL;
- dev->ctrl_input = index;
+
+ if (dev->ctrl_input == index)
+ return 0;
+
au0828_s_input(dev, index);
- return 0;
+
+ /*
+ * Input has been changed. Disable the media source
+ * associated with the old input and enable source
+ * for the newly set input
+ */
+ v4l_disable_media_source(vfd);
+ return v4l_enable_media_source(vfd);
}
static int vidioc_enumaudio(struct file *file, void *priv, struct v4l2_audio *a)
@@ -1441,7 +1495,7 @@ static int vidioc_g_audio(struct file *file, void *priv, struct v4l2_audio *a)
{
struct au0828_dev *dev = video_drvdata(file);
- dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
+ dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
dev->std_set_in_tuner_core, dev->dev_state);
a->index = dev->ctrl_ainput;
@@ -1461,7 +1515,7 @@ static int vidioc_s_audio(struct file *file, void *priv, const struct v4l2_audio
if (a->index != dev->ctrl_ainput)
return -EINVAL;
- dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
+ dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
dev->std_set_in_tuner_core, dev->dev_state);
return 0;
}
@@ -1469,11 +1523,17 @@ static int vidioc_s_audio(struct file *file, void *priv, const struct v4l2_audio
static int vidioc_g_tuner(struct file *file, void *priv, struct v4l2_tuner *t)
{
struct au0828_dev *dev = video_drvdata(file);
+ struct video_device *vfd = video_devdata(file);
+ int ret;
if (t->index != 0)
return -EINVAL;
- dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
+ ret = v4l_enable_media_source(vfd);
+ if (ret)
+ return ret;
+
+ dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
dev->std_set_in_tuner_core, dev->dev_state);
strcpy(t->name, "Auvitek tuner");
@@ -1493,7 +1553,7 @@ static int vidioc_s_tuner(struct file *file, void *priv,
if (t->index != 0)
return -EINVAL;
- dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
+ dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
dev->std_set_in_tuner_core, dev->dev_state);
au0828_init_tuner(dev);
@@ -1515,7 +1575,7 @@ static int vidioc_g_frequency(struct file *file, void *priv,
if (freq->tuner != 0)
return -EINVAL;
- dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
+ dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
dev->std_set_in_tuner_core, dev->dev_state);
freq->frequency = dev->ctrl_freq;
return 0;
@@ -1530,7 +1590,7 @@ static int vidioc_s_frequency(struct file *file, void *priv,
if (freq->tuner != 0)
return -EINVAL;
- dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
+ dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
dev->std_set_in_tuner_core, dev->dev_state);
au0828_init_tuner(dev);
@@ -1556,7 +1616,7 @@ static int vidioc_g_fmt_vbi_cap(struct file *file, void *priv,
{
struct au0828_dev *dev = video_drvdata(file);
- dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
+ dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
dev->std_set_in_tuner_core, dev->dev_state);
format->fmt.vbi.samples_per_line = dev->vbi_width;
@@ -1582,7 +1642,7 @@ static int vidioc_cropcap(struct file *file, void *priv,
if (cc->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
- dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
+ dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
dev->std_set_in_tuner_core, dev->dev_state);
cc->bounds.left = 0;
@@ -1604,7 +1664,7 @@ static int vidioc_g_register(struct file *file, void *priv,
{
struct au0828_dev *dev = video_drvdata(file);
- dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
+ dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
dev->std_set_in_tuner_core, dev->dev_state);
reg->val = au0828_read(dev, reg->reg);
@@ -1617,7 +1677,7 @@ static int vidioc_s_register(struct file *file, void *priv,
{
struct au0828_dev *dev = video_drvdata(file);
- dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
+ dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
dev->std_set_in_tuner_core, dev->dev_state);
return au0828_writereg(dev, reg->reg, reg->val);
@@ -1804,7 +1864,6 @@ static void au0828_analog_create_entities(struct au0828_dev *dev)
[AU0828_VMUX_CABLE] = "Cable TV",
[AU0828_VMUX_TELEVISION] = "Television",
[AU0828_VMUX_DVB] = "DVB",
- [AU0828_VMUX_DEBUG] = "tv debug"
};
int ret, i;
@@ -1840,10 +1899,9 @@ static void au0828_analog_create_entities(struct au0828_dev *dev)
case AU0828_VMUX_CABLE:
case AU0828_VMUX_TELEVISION:
case AU0828_VMUX_DVB:
+ default: /* Just to shut up a warning */
ent->function = MEDIA_ENT_F_CONN_RF;
break;
- default: /* AU0828_VMUX_DEBUG */
- continue;
}
ret = media_entity_pads_init(ent, 1, &dev->input_pad[i]);
@@ -1870,6 +1928,10 @@ int au0828_analog_register(struct au0828_dev *dev,
dprintk(1, "au0828_analog_register called for intf#%d!\n",
interface->cur_altsetting->desc.bInterfaceNumber);
+ /* No analog TV */
+ if (AUVI_INPUT(0).type == AU0828_VMUX_UNDEFINED)
+ return 0;
+
/* set au0828 usb interface0 to as5 */
retval = usb_set_interface(dev->usbdev,
interface->cur_altsetting->desc.bInterfaceNumber, 5);
@@ -1924,6 +1986,7 @@ int au0828_analog_register(struct au0828_dev *dev,
dev->ctrl_ainput = 0;
dev->ctrl_freq = 960;
dev->std = V4L2_STD_NTSC_M;
+ /* Default input is TV Tuner */
au0828_s_input(dev, 0);
mutex_init(&dev->vb_queue_lock);
@@ -1976,6 +2039,16 @@ int au0828_analog_register(struct au0828_dev *dev,
goto err_reg_vbi_dev;
}
+#ifdef CONFIG_MEDIA_CONTROLLER
+ retval = v4l2_mc_create_media_graph(dev->media_dev);
+ if (retval) {
+ pr_err("%s() au0282_dev_register failed to create graph\n",
+ __func__);
+ ret = -ENODEV;
+ goto err_reg_vbi_dev;
+ }
+#endif
+
dprintk(1, "%s completed!\n", __func__);
return 0;
diff --git a/drivers/media/usb/au0828/au0828.h b/drivers/media/usb/au0828/au0828.h
index 8276072bc55a..87f32846f1c0 100644
--- a/drivers/media/usb/au0828/au0828.h
+++ b/drivers/media/usb/au0828/au0828.h
@@ -21,6 +21,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/bitops.h>
#include <linux/usb.h>
#include <linux/i2c.h>
#include <linux/i2c-algo-bit.h>
@@ -76,7 +77,6 @@ enum au0828_itype {
AU0828_VMUX_CABLE,
AU0828_VMUX_TELEVISION,
AU0828_VMUX_DVB,
- AU0828_VMUX_DEBUG
};
struct au0828_input {
@@ -122,9 +122,9 @@ enum au0828_stream_state {
/* device state */
enum au0828_dev_state {
- DEV_INITIALIZED = 0x01,
- DEV_DISCONNECTED = 0x02,
- DEV_MISCONFIGURED = 0x04
+ DEV_INITIALIZED = 0,
+ DEV_DISCONNECTED = 1,
+ DEV_MISCONFIGURED = 2
};
struct au0828_dev;
@@ -248,7 +248,7 @@ struct au0828_dev {
int input_type;
int std_set_in_tuner_core;
unsigned int ctrl_input;
- enum au0828_dev_state dev_state;
+ long unsigned int dev_state; /* defined at enum au0828_dev_state */;
enum au0828_stream_state stream_state;
wait_queue_head_t open;
@@ -283,6 +283,12 @@ struct au0828_dev {
struct media_entity *decoder;
struct media_entity input_ent[AU0828_MAX_INPUT];
struct media_pad input_pad[AU0828_MAX_INPUT];
+ struct media_entity_notify entity_notify;
+ struct media_entity *tuner;
+ struct media_link *active_link;
+ struct media_entity *active_link_owner;
+ struct media_entity *active_source;
+ struct media_entity *active_sink;
#endif
};
@@ -301,6 +307,7 @@ struct au0828_dev {
/* au0828-core.c */
extern u32 au0828_read(struct au0828_dev *dev, u16 reg);
extern u32 au0828_write(struct au0828_dev *dev, u16 reg, u32 val);
+extern void au0828_usb_release(struct au0828_dev *dev);
extern int au0828_debug;
/* ----------------------------------------------------------- */
@@ -319,16 +326,29 @@ extern int au0828_i2c_unregister(struct au0828_dev *dev);
/* ----------------------------------------------------------- */
/* au0828-video.c */
-extern int au0828_analog_register(struct au0828_dev *dev,
- struct usb_interface *interface);
-extern void au0828_analog_unregister(struct au0828_dev *dev);
extern int au0828_start_analog_streaming(struct vb2_queue *vq,
unsigned int count);
extern void au0828_stop_vbi_streaming(struct vb2_queue *vq);
#ifdef CONFIG_VIDEO_AU0828_V4L2
+extern int au0828_v4l2_device_register(struct usb_interface *interface,
+ struct au0828_dev *dev);
+
+extern int au0828_analog_register(struct au0828_dev *dev,
+ struct usb_interface *interface);
+extern int au0828_analog_unregister(struct au0828_dev *dev);
+extern void au0828_usb_v4l2_media_release(struct au0828_dev *dev);
extern void au0828_v4l2_suspend(struct au0828_dev *dev);
extern void au0828_v4l2_resume(struct au0828_dev *dev);
#else
+static inline int au0828_v4l2_device_register(struct usb_interface *interface,
+ struct au0828_dev *dev)
+{ return 0; };
+static inline int au0828_analog_register(struct au0828_dev *dev,
+ struct usb_interface *interface)
+{ return 0; };
+static inline int au0828_analog_unregister(struct au0828_dev *dev)
+{ return 0; };
+static inline void au0828_usb_v4l2_media_release(struct au0828_dev *dev) { };
static inline void au0828_v4l2_suspend(struct au0828_dev *dev) { };
static inline void au0828_v4l2_resume(struct au0828_dev *dev) { };
#endif
diff --git a/drivers/media/usb/b2c2/flexcop-usb.c b/drivers/media/usb/b2c2/flexcop-usb.c
index 0bd969063392..d4bdba60b0f7 100644
--- a/drivers/media/usb/b2c2/flexcop-usb.c
+++ b/drivers/media/usb/b2c2/flexcop-usb.c
@@ -10,7 +10,7 @@
/* Version information */
#define DRIVER_VERSION "0.1"
#define DRIVER_NAME "Technisat/B2C2 FlexCop II/IIb/III Digital TV USB Driver"
-#define DRIVER_AUTHOR "Patrick Boettcher <patrick.boettcher@desy.de>"
+#define DRIVER_AUTHOR "Patrick Boettcher <patrick.boettcher@posteo.de>"
/* debug */
#ifdef CONFIG_DVB_B2C2_FLEXCOP_DEBUG
diff --git a/drivers/media/usb/cpia2/cpia2_core.c b/drivers/media/usb/cpia2/cpia2_core.c
index 187012ce444b..0310fd6ed103 100644
--- a/drivers/media/usb/cpia2/cpia2_core.c
+++ b/drivers/media/usb/cpia2/cpia2_core.c
@@ -923,7 +923,7 @@ static int apply_vp_patch(struct camera_data *cam)
/* ... followed by the data payload */
for (i = 2; i < fw->size; i += 64) {
cmd.start = 0x0C; /* Data */
- cmd.reg_count = min_t(int, 64, fw->size - i);
+ cmd.reg_count = min_t(uint, 64, fw->size - i);
memcpy(cmd.buffer.block_data, &fw->data[i], cmd.reg_count);
cpia2_send_command(cam, &cmd);
}
diff --git a/drivers/media/usb/cx231xx/cx231xx-417.c b/drivers/media/usb/cx231xx/cx231xx-417.c
index 48643b94e694..c9320d6c6131 100644
--- a/drivers/media/usb/cx231xx/cx231xx-417.c
+++ b/drivers/media/usb/cx231xx/cx231xx-417.c
@@ -1382,6 +1382,8 @@ static int cx231xx_bulk_copy(struct cx231xx *dev, struct urb *urb)
buffer_size = urb->actual_length;
buffer = kmalloc(buffer_size, GFP_ATOMIC);
+ if (!buffer)
+ return -ENOMEM;
memcpy(buffer, dma_q->ps_head, 3);
memcpy(buffer+3, p_buffer, buffer_size-3);
diff --git a/drivers/media/usb/cx231xx/cx231xx-audio.c b/drivers/media/usb/cx231xx/cx231xx-audio.c
index de4ae5eb4830..a6a9508418f8 100644
--- a/drivers/media/usb/cx231xx/cx231xx-audio.c
+++ b/drivers/media/usb/cx231xx/cx231xx-audio.c
@@ -499,6 +499,11 @@ static int snd_cx231xx_pcm_close(struct snd_pcm_substream *substream)
}
dev->adev.users--;
+ if (substream->runtime->dma_area) {
+ dev_dbg(dev->dev, "freeing\n");
+ vfree(substream->runtime->dma_area);
+ substream->runtime->dma_area = NULL;
+ }
mutex_unlock(&dev->lock);
if (dev->adev.users == 0 && dev->adev.shutdown == 1) {
diff --git a/drivers/media/usb/cx231xx/cx231xx-cards.c b/drivers/media/usb/cx231xx/cx231xx-cards.c
index 620b83d03f75..c63248a18823 100644
--- a/drivers/media/usb/cx231xx/cx231xx-cards.c
+++ b/drivers/media/usb/cx231xx/cx231xx-cards.c
@@ -1216,66 +1216,13 @@ static int cx231xx_media_device_init(struct cx231xx *dev,
if (!mdev)
return -ENOMEM;
- mdev->dev = dev->dev;
- strlcpy(mdev->model, dev->board.name, sizeof(mdev->model));
- if (udev->serial)
- strlcpy(mdev->serial, udev->serial, sizeof(mdev->serial));
- strcpy(mdev->bus_info, udev->devpath);
- mdev->hw_revision = le16_to_cpu(udev->descriptor.bcdDevice);
- mdev->driver_version = LINUX_VERSION_CODE;
-
- media_device_init(mdev);
+ media_device_usb_init(mdev, udev, dev->board.name);
dev->media_dev = mdev;
#endif
return 0;
}
-static int cx231xx_create_media_graph(struct cx231xx *dev)
-{
-#ifdef CONFIG_MEDIA_CONTROLLER
- struct media_device *mdev = dev->media_dev;
- struct media_entity *entity;
- struct media_entity *tuner = NULL, *decoder = NULL;
- int ret;
-
- if (!mdev)
- return 0;
-
- media_device_for_each_entity(entity, mdev) {
- switch (entity->function) {
- case MEDIA_ENT_F_TUNER:
- tuner = entity;
- break;
- case MEDIA_ENT_F_ATV_DECODER:
- decoder = entity;
- break;
- }
- }
-
- /* Analog setup, using tuner as a link */
-
- if (!decoder)
- return 0;
-
- if (tuner) {
- ret = media_create_pad_link(tuner, TUNER_PAD_IF_OUTPUT, decoder, 0,
- MEDIA_LNK_FL_ENABLED);
- if (ret < 0)
- return ret;
- }
- ret = media_create_pad_link(decoder, 1, &dev->vdev.entity, 0,
- MEDIA_LNK_FL_ENABLED);
- if (ret < 0)
- return ret;
- ret = media_create_pad_link(decoder, 2, &dev->vbi_dev.entity, 0,
- MEDIA_LNK_FL_ENABLED);
- if (ret < 0)
- return ret;
-#endif
- return 0;
-}
-
/*
* cx231xx_init_dev()
* allocates and inits the device structs, registers i2c bus and v4l device
@@ -1739,15 +1686,14 @@ static int cx231xx_usb_probe(struct usb_interface *interface,
/* load other modules required */
request_modules(dev);
- retval = cx231xx_create_media_graph(dev);
- if (retval < 0)
- goto done;
-
#ifdef CONFIG_MEDIA_CONTROLLER
- retval = media_device_register(dev->media_dev);
-#endif
+ /* Init entities at the Media Controller */
+ cx231xx_v4l2_create_entities(dev);
-done:
+ retval = v4l2_mc_create_media_graph(dev->media_dev);
+ if (!retval)
+ retval = media_device_register(dev->media_dev);
+#endif
if (retval < 0)
cx231xx_release_resources(dev);
return retval;
diff --git a/drivers/media/usb/cx231xx/cx231xx-dvb.c b/drivers/media/usb/cx231xx/cx231xx-dvb.c
index b8d5b2be9293..ab2fb9fa0cd1 100644
--- a/drivers/media/usb/cx231xx/cx231xx-dvb.c
+++ b/drivers/media/usb/cx231xx/cx231xx-dvb.c
@@ -25,6 +25,7 @@
#include <media/v4l2-common.h>
#include <media/videobuf-vmalloc.h>
+#include <media/tuner.h>
#include "xc5000.h"
#include "s5h1432.h"
@@ -551,7 +552,8 @@ static int register_dvb(struct cx231xx_dvb *dvb,
/* register network adapter */
dvb_net_init(&dvb->adapter, &dvb->net, &dvb->demux.dmx);
- result = dvb_create_media_graph(&dvb->adapter, false);
+ result = dvb_create_media_graph(&dvb->adapter,
+ dev->tuner_type == TUNER_ABSENT);
if (result < 0)
goto fail_create_graph;
@@ -801,6 +803,9 @@ static int dvb_init(struct cx231xx *dev)
/* attach tuner */
memset(&si2157_config, 0, sizeof(si2157_config));
si2157_config.fe = dev->dvb->frontend;
+#ifdef CONFIG_MEDIA_CONTROLLER_DVB
+ si2157_config.mdev = dev->media_dev;
+#endif
si2157_config.if_port = 1;
si2157_config.inversion = true;
strlcpy(info.type, "si2157", I2C_NAME_SIZE);
@@ -857,6 +862,9 @@ static int dvb_init(struct cx231xx *dev)
/* attach tuner */
memset(&si2157_config, 0, sizeof(si2157_config));
si2157_config.fe = dev->dvb->frontend;
+#ifdef CONFIG_MEDIA_CONTROLLER_DVB
+ si2157_config.mdev = dev->media_dev;
+#endif
si2157_config.if_port = 1;
si2157_config.inversion = true;
strlcpy(info.type, "si2157", I2C_NAME_SIZE);
diff --git a/drivers/media/usb/cx231xx/cx231xx-video.c b/drivers/media/usb/cx231xx/cx231xx-video.c
index 9b88cd8127ac..6414188ffdfa 100644
--- a/drivers/media/usb/cx231xx/cx231xx-video.c
+++ b/drivers/media/usb/cx231xx/cx231xx-video.c
@@ -1103,9 +1103,54 @@ static const char *iname[] = {
[CX231XX_VMUX_TELEVISION] = "Television",
[CX231XX_VMUX_CABLE] = "Cable TV",
[CX231XX_VMUX_DVB] = "DVB",
- [CX231XX_VMUX_DEBUG] = "for debug only",
};
+void cx231xx_v4l2_create_entities(struct cx231xx *dev)
+{
+#if defined(CONFIG_MEDIA_CONTROLLER)
+ int ret, i;
+
+ /* Create entities for each input connector */
+ for (i = 0; i < MAX_CX231XX_INPUT; i++) {
+ struct media_entity *ent = &dev->input_ent[i];
+
+ if (!INPUT(i)->type)
+ break;
+
+ ent->name = iname[INPUT(i)->type];
+ ent->flags = MEDIA_ENT_FL_CONNECTOR;
+ dev->input_pad[i].flags = MEDIA_PAD_FL_SOURCE;
+
+ switch (INPUT(i)->type) {
+ case CX231XX_VMUX_COMPOSITE1:
+ ent->function = MEDIA_ENT_F_CONN_COMPOSITE;
+ break;
+ case CX231XX_VMUX_SVIDEO:
+ ent->function = MEDIA_ENT_F_CONN_SVIDEO;
+ break;
+ case CX231XX_VMUX_TELEVISION:
+ case CX231XX_VMUX_CABLE:
+ case CX231XX_VMUX_DVB:
+ /* The DVB core will handle it */
+ if (dev->tuner_type == TUNER_ABSENT)
+ continue;
+ /* fall though */
+ default: /* just to shut up a gcc warning */
+ ent->function = MEDIA_ENT_F_CONN_RF;
+ break;
+ }
+
+ ret = media_entity_pads_init(ent, 1, &dev->input_pad[i]);
+ if (ret < 0)
+ pr_err("failed to initialize input pad[%d]!\n", i);
+
+ ret = media_device_register_entity(dev->media_dev, ent);
+ if (ret < 0)
+ pr_err("failed to register input entity %d!\n", i);
+ }
+#endif
+}
+
int cx231xx_enum_input(struct file *file, void *priv,
struct v4l2_input *i)
{
diff --git a/drivers/media/usb/cx231xx/cx231xx.h b/drivers/media/usb/cx231xx/cx231xx.h
index ec6d3f5bc36d..69f6d20870f5 100644
--- a/drivers/media/usb/cx231xx/cx231xx.h
+++ b/drivers/media/usb/cx231xx/cx231xx.h
@@ -281,7 +281,6 @@ enum cx231xx_itype {
CX231XX_VMUX_CABLE,
CX231XX_RADIO,
CX231XX_VMUX_DVB,
- CX231XX_VMUX_DEBUG
};
enum cx231xx_v_input {
@@ -663,6 +662,8 @@ struct cx231xx {
#if defined(CONFIG_MEDIA_CONTROLLER)
struct media_device *media_dev;
struct media_pad video_pad, vbi_pad;
+ struct media_entity input_ent[MAX_CX231XX_INPUT];
+ struct media_pad input_pad[MAX_CX231XX_INPUT];
#endif
unsigned char eedata[256];
@@ -943,6 +944,7 @@ int cx231xx_register_extension(struct cx231xx_ops *dev);
void cx231xx_unregister_extension(struct cx231xx_ops *dev);
void cx231xx_init_extension(struct cx231xx *dev);
void cx231xx_close_extension(struct cx231xx *dev);
+void cx231xx_v4l2_create_entities(struct cx231xx *dev);
int cx231xx_querycap(struct file *file, void *priv,
struct v4l2_capability *cap);
int cx231xx_g_tuner(struct file *file, void *priv, struct v4l2_tuner *t);
diff --git a/drivers/media/usb/dvb-usb-v2/af9035.c b/drivers/media/usb/dvb-usb-v2/af9035.c
index 6e02a15d39ce..2638e3251f2a 100644
--- a/drivers/media/usb/dvb-usb-v2/af9035.c
+++ b/drivers/media/usb/dvb-usb-v2/af9035.c
@@ -684,7 +684,7 @@ static int af9035_download_firmware(struct dvb_usb_device *d,
if (ret < 0)
goto err;
- if (tmp == 1 || tmp == 3) {
+ if (tmp == 1 || tmp == 3 || tmp == 5) {
/* configure gpioh1, reset & power slave demod */
ret = af9035_wr_reg_mask(d, 0x00d8b0, 0x01, 0x01);
if (ret < 0)
@@ -823,7 +823,7 @@ static int af9035_read_config(struct dvb_usb_device *d)
if (ret < 0)
goto err;
- if (tmp == 1 || tmp == 3)
+ if (tmp == 1 || tmp == 3 || tmp == 5)
state->dual_mode = true;
dev_dbg(&d->udev->dev, "%s: ts mode=%d dual mode=%d\n", __func__,
@@ -2053,6 +2053,8 @@ static const struct usb_device_id af9035_id_table[] = {
&af9035_props, "Avermedia A835B(3835)", RC_MAP_IT913X_V2) },
{ DVB_USB_DEVICE(USB_VID_AVERMEDIA, USB_PID_AVERMEDIA_A835B_4835,
&af9035_props, "Avermedia A835B(4835)", RC_MAP_IT913X_V2) },
+ { DVB_USB_DEVICE(USB_VID_AVERMEDIA, USB_PID_AVERMEDIA_TD110,
+ &af9035_props, "Avermedia AverTV Volar HD 2 (TD110)", RC_MAP_AVERMEDIA_RM_KS) },
{ DVB_USB_DEVICE(USB_VID_AVERMEDIA, USB_PID_AVERMEDIA_H335,
&af9035_props, "Avermedia H335", RC_MAP_IT913X_V2) },
{ DVB_USB_DEVICE(USB_VID_KWORLD_2, USB_PID_KWORLD_UB499_2T_T09,
diff --git a/drivers/media/usb/dvb-usb-v2/af9035.h b/drivers/media/usb/dvb-usb-v2/af9035.h
index 416a97f05ec8..df22001f9e41 100644
--- a/drivers/media/usb/dvb-usb-v2/af9035.h
+++ b/drivers/media/usb/dvb-usb-v2/af9035.h
@@ -112,9 +112,10 @@ static const u32 clock_lut_it9135[] = {
* 0 TS
* 1 DCA + PIP
* 3 PIP
+ * 5 DCA + PIP
* n DCA
*
- * Values 0 and 3 are seen to this day. 0 for single TS and 3 for dual TS.
+ * Values 0, 3 and 5 are seen to this day. 0 for single TS and 3/5 for dual TS.
*/
#define EEPROM_BASE_AF9035 0x42fd
diff --git a/drivers/media/usb/dvb-usb-v2/dvb_usb.h b/drivers/media/usb/dvb-usb-v2/dvb_usb.h
index 023d91f7e654..35f27e2e4e28 100644
--- a/drivers/media/usb/dvb-usb-v2/dvb_usb.h
+++ b/drivers/media/usb/dvb-usb-v2/dvb_usb.h
@@ -1,7 +1,7 @@
/*
* DVB USB framework
*
- * Copyright (C) 2004-6 Patrick Boettcher <patrick.boettcher@desy.de>
+ * Copyright (C) 2004-6 Patrick Boettcher <patrick.boettcher@posteo.de>
* Copyright (C) 2012 Antti Palosaari <crope@iki.fi>
*
* This program is free software; you can redistribute it and/or modify
diff --git a/drivers/media/usb/dvb-usb-v2/dvb_usb_common.h b/drivers/media/usb/dvb-usb-v2/dvb_usb_common.h
index 45f07090d431..a1622bda2a5e 100644
--- a/drivers/media/usb/dvb-usb-v2/dvb_usb_common.h
+++ b/drivers/media/usb/dvb-usb-v2/dvb_usb_common.h
@@ -1,7 +1,7 @@
/*
* DVB USB framework
*
- * Copyright (C) 2004-6 Patrick Boettcher <patrick.boettcher@desy.de>
+ * Copyright (C) 2004-6 Patrick Boettcher <patrick.boettcher@posteo.de>
* Copyright (C) 2012 Antti Palosaari <crope@iki.fi>
*
* This program is free software; you can redistribute it and/or modify
diff --git a/drivers/media/usb/dvb-usb-v2/dvb_usb_core.c b/drivers/media/usb/dvb-usb-v2/dvb_usb_core.c
index f0565bf3673e..3fbb2cd19f5e 100644
--- a/drivers/media/usb/dvb-usb-v2/dvb_usb_core.c
+++ b/drivers/media/usb/dvb-usb-v2/dvb_usb_core.c
@@ -1,7 +1,7 @@
/*
* DVB USB framework
*
- * Copyright (C) 2004-6 Patrick Boettcher <patrick.boettcher@desy.de>
+ * Copyright (C) 2004-6 Patrick Boettcher <patrick.boettcher@posteo.de>
* Copyright (C) 2012 Antti Palosaari <crope@iki.fi>
*
* This program is free software; you can redistribute it and/or modify
@@ -20,6 +20,7 @@
*/
#include "dvb_usb_common.h"
+#include <media/media-device.h>
static int dvb_usbv2_disable_rc_polling;
module_param_named(disable_rc_polling, dvb_usbv2_disable_rc_polling, int, 0644);
@@ -411,15 +412,7 @@ static int dvb_usbv2_media_device_init(struct dvb_usb_adapter *adap)
if (!mdev)
return -ENOMEM;
- mdev->dev = &udev->dev;
- strlcpy(mdev->model, d->name, sizeof(mdev->model));
- if (udev->serial)
- strlcpy(mdev->serial, udev->serial, sizeof(mdev->serial));
- strcpy(mdev->bus_info, udev->devpath);
- mdev->hw_revision = le16_to_cpu(udev->descriptor.bcdDevice);
- mdev->driver_version = LINUX_VERSION_CODE;
-
- media_device_init(mdev);
+ media_device_usb_init(mdev, udev, d->name);
dvb_register_media_controller(&adap->dvb_adap, mdev);
@@ -1129,7 +1122,7 @@ int dvb_usbv2_reset_resume(struct usb_interface *intf)
EXPORT_SYMBOL(dvb_usbv2_reset_resume);
MODULE_VERSION("2.0");
-MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@desy.de>");
+MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@posteo.de>");
MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>");
MODULE_DESCRIPTION("DVB USB common");
MODULE_LICENSE("GPL");
diff --git a/drivers/media/usb/dvb-usb-v2/dvb_usb_urb.c b/drivers/media/usb/dvb-usb-v2/dvb_usb_urb.c
index 22bdce15ecf3..5bafeb6486be 100644
--- a/drivers/media/usb/dvb-usb-v2/dvb_usb_urb.c
+++ b/drivers/media/usb/dvb-usb-v2/dvb_usb_urb.c
@@ -1,7 +1,7 @@
/*
* DVB USB framework
*
- * Copyright (C) 2004-6 Patrick Boettcher <patrick.boettcher@desy.de>
+ * Copyright (C) 2004-6 Patrick Boettcher <patrick.boettcher@posteo.de>
* Copyright (C) 2012 Antti Palosaari <crope@iki.fi>
*
* This program is free software; you can redistribute it and/or modify
diff --git a/drivers/media/usb/dvb-usb-v2/dvbsky.c b/drivers/media/usb/dvb-usb-v2/dvbsky.c
index 1dd962535f97..02dbc6c45423 100644
--- a/drivers/media/usb/dvb-usb-v2/dvbsky.c
+++ b/drivers/media/usb/dvb-usb-v2/dvbsky.c
@@ -847,10 +847,17 @@ static const struct usb_device_id dvbsky_id_table[] = {
USB_PID_TECHNOTREND_CONNECT_CT2_4650_CI,
&dvbsky_t680c_props, "TechnoTrend TT-connect CT2-4650 CI",
RC_MAP_TT_1500) },
+ { DVB_USB_DEVICE(USB_VID_TECHNOTREND,
+ USB_PID_TECHNOTREND_CONNECT_CT2_4650_CI_2,
+ &dvbsky_t680c_props, "TechnoTrend TT-connect CT2-4650 CI v1.1",
+ RC_MAP_TT_1500) },
{ DVB_USB_DEVICE(USB_VID_TERRATEC,
USB_PID_TERRATEC_H7_3,
&dvbsky_t680c_props, "Terratec H7 Rev.4",
RC_MAP_TT_1500) },
+ { DVB_USB_DEVICE(USB_VID_TERRATEC, USB_PID_TERRATEC_CINERGY_S2_R4,
+ &dvbsky_s960_props, "Terratec Cinergy S2 Rev.4",
+ RC_MAP_DVBSKY) },
{ }
};
MODULE_DEVICE_TABLE(usb, dvbsky_id_table);
diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf-demod.c b/drivers/media/usb/dvb-usb-v2/mxl111sf-demod.c
index 84f6de6fa07d..047a32fe43ea 100644
--- a/drivers/media/usb/dvb-usb-v2/mxl111sf-demod.c
+++ b/drivers/media/usb/dvb-usb-v2/mxl111sf-demod.c
@@ -507,9 +507,9 @@ static int mxl111sf_demod_read_signal_strength(struct dvb_frontend *fe,
return 0;
}
-static int mxl111sf_demod_get_frontend(struct dvb_frontend *fe)
+static int mxl111sf_demod_get_frontend(struct dvb_frontend *fe,
+ struct dtv_frontend_properties *p)
{
- struct dtv_frontend_properties *p = &fe->dtv_property_cache;
struct mxl111sf_demod_state *state = fe->demodulator_priv;
mxl_dbg("()");
diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf-tuner.c b/drivers/media/usb/dvb-usb-v2/mxl111sf-tuner.c
index 444579be0b77..7d16252dbb71 100644
--- a/drivers/media/usb/dvb-usb-v2/mxl111sf-tuner.c
+++ b/drivers/media/usb/dvb-usb-v2/mxl111sf-tuner.c
@@ -36,7 +36,7 @@ MODULE_PARM_DESC(debug, "set debugging level (1=info (or-able)).");
struct mxl111sf_tuner_state {
struct mxl111sf_state *mxl_state;
- struct mxl111sf_tuner_config *cfg;
+ const struct mxl111sf_tuner_config *cfg;
enum mxl_if_freq if_freq;
@@ -489,8 +489,8 @@ static struct dvb_tuner_ops mxl111sf_tuner_tuner_ops = {
};
struct dvb_frontend *mxl111sf_tuner_attach(struct dvb_frontend *fe,
- struct mxl111sf_state *mxl_state,
- struct mxl111sf_tuner_config *cfg)
+ struct mxl111sf_state *mxl_state,
+ const struct mxl111sf_tuner_config *cfg)
{
struct mxl111sf_tuner_state *state = NULL;
diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf-tuner.h b/drivers/media/usb/dvb-usb-v2/mxl111sf-tuner.h
index e6caab21a197..509b55071218 100644
--- a/drivers/media/usb/dvb-usb-v2/mxl111sf-tuner.h
+++ b/drivers/media/usb/dvb-usb-v2/mxl111sf-tuner.h
@@ -63,13 +63,13 @@ struct mxl111sf_tuner_config {
#if IS_ENABLED(CONFIG_DVB_USB_MXL111SF)
extern
struct dvb_frontend *mxl111sf_tuner_attach(struct dvb_frontend *fe,
- struct mxl111sf_state *mxl_state,
- struct mxl111sf_tuner_config *cfg);
+ struct mxl111sf_state *mxl_state,
+ const struct mxl111sf_tuner_config *cfg);
#else
static inline
struct dvb_frontend *mxl111sf_tuner_attach(struct dvb_frontend *fe,
- struct mxl111sf_state *mxl_state,
- struct mxl111sf_tuner_config *cfg)
+ struct mxl111sf_state *mxl_state,
+ const struct mxl111sf_tuner_config *cfg)
{
printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
return NULL;
diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf.c b/drivers/media/usb/dvb-usb-v2/mxl111sf.c
index b669deccc34c..5d676b533a3a 100644
--- a/drivers/media/usb/dvb-usb-v2/mxl111sf.c
+++ b/drivers/media/usb/dvb-usb-v2/mxl111sf.c
@@ -856,7 +856,7 @@ static int mxl111sf_ant_hunt(struct dvb_frontend *fe)
return 0;
}
-static struct mxl111sf_tuner_config mxl_tuner_config = {
+static const struct mxl111sf_tuner_config mxl_tuner_config = {
.if_freq = MXL_IF_6_0, /* applies to external IF output, only */
.invert_spectrum = 0,
.read_reg = mxl111sf_read_reg,
@@ -888,7 +888,7 @@ static int mxl111sf_attach_tuner(struct dvb_usb_adapter *adap)
state->tuner.function = MEDIA_ENT_F_TUNER;
state->tuner.name = "mxl111sf tuner";
state->tuner_pads[TUNER_PAD_RF_INPUT].flags = MEDIA_PAD_FL_SINK;
- state->tuner_pads[TUNER_PAD_IF_OUTPUT].flags = MEDIA_PAD_FL_SOURCE;
+ state->tuner_pads[TUNER_PAD_OUTPUT].flags = MEDIA_PAD_FL_SOURCE;
ret = media_entity_pads_init(&state->tuner,
TUNER_NUM_PADS, state->tuner_pads);
diff --git a/drivers/media/usb/dvb-usb-v2/rtl28xxu.c b/drivers/media/usb/dvb-usb-v2/rtl28xxu.c
index eb5787a3191e..fa72642d41f3 100644
--- a/drivers/media/usb/dvb-usb-v2/rtl28xxu.c
+++ b/drivers/media/usb/dvb-usb-v2/rtl28xxu.c
@@ -259,6 +259,10 @@ static int rtl28xxu_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[],
ret = -EOPNOTSUPP;
}
+ /* Retry failed I2C messages */
+ if (ret == -EPIPE)
+ ret = -EAGAIN;
+
err_mutex_unlock:
mutex_unlock(&d->i2c_mutex);
@@ -619,6 +623,10 @@ static int rtl28xxu_identify_state(struct dvb_usb_device *d, const char **name)
}
dev_dbg(&d->intf->dev, "chip_id=%u\n", dev->chip_id);
+ /* Retry failed I2C messages */
+ d->i2c_adap.retries = 1;
+ d->i2c_adap.timeout = msecs_to_jiffies(10);
+
return WARM;
err:
dev_dbg(&d->intf->dev, "failed=%d\n", ret);
@@ -1563,19 +1571,19 @@ static int rtl28xxu_frontend_ctrl(struct dvb_frontend *fe, int onoff)
if (dev->chip_id == CHIP_ID_RTL2831U)
return 0;
- /* control internal demod ADC */
- if (fe->id == 0 && onoff)
- val = 0x48; /* enable ADC */
- else
- val = 0x00; /* disable ADC */
-
- ret = rtl28xxu_wr_reg_mask(d, SYS_DEMOD_CTL, val, 0x48);
- if (ret)
- goto err;
+ if (fe->id == 0) {
+ /* control internal demod ADC */
+ if (onoff)
+ val = 0x48; /* enable ADC */
+ else
+ val = 0x00; /* disable ADC */
- /* bypass slave demod TS through master demod */
- if (fe->id == 1 && onoff) {
- ret = pdata->enable_slave_ts(dev->i2c_client_demod);
+ ret = rtl28xxu_wr_reg_mask(d, SYS_DEMOD_CTL, val, 0x48);
+ if (ret)
+ goto err;
+ } else if (fe->id == 1) {
+ /* bypass slave demod TS through master demod */
+ ret = pdata->slave_ts_ctrl(dev->i2c_client_demod, onoff);
if (ret)
goto err;
}
diff --git a/drivers/media/usb/dvb-usb-v2/usb_urb.c b/drivers/media/usb/dvb-usb-v2/usb_urb.c
index ca8f3c2b1082..55136cde38f5 100644
--- a/drivers/media/usb/dvb-usb-v2/usb_urb.c
+++ b/drivers/media/usb/dvb-usb-v2/usb_urb.c
@@ -1,6 +1,6 @@
/* usb-urb.c is part of the DVB USB library.
*
- * Copyright (C) 2004-6 Patrick Boettcher (patrick.boettcher@desy.de)
+ * Copyright (C) 2004-6 Patrick Boettcher (patrick.boettcher@posteo.de)
* see dvb-usb-init.c for copyright information.
*
* This file keeps functions for initializing and handling the
diff --git a/drivers/media/usb/dvb-usb/a800.c b/drivers/media/usb/dvb-usb/a800.c
index 83684ed023cd..7ba975bea96a 100644
--- a/drivers/media/usb/dvb-usb/a800.c
+++ b/drivers/media/usb/dvb-usb/a800.c
@@ -1,7 +1,7 @@
/* DVB USB framework compliant Linux driver for the AVerMedia AverTV DVB-T
* USB2.0 (A800) DVB-T receiver.
*
- * Copyright (C) 2005 Patrick Boettcher (patrick.boettcher@desy.de)
+ * Copyright (C) 2005 Patrick Boettcher (patrick.boettcher@posteo.de)
*
* Thanks to
* - AVerMedia who kindly provided information and
@@ -185,7 +185,7 @@ static struct usb_driver a800_driver = {
module_usb_driver(a800_driver);
-MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@desy.de>");
+MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@posteo.de>");
MODULE_DESCRIPTION("AVerMedia AverTV DVB-T USB 2.0 (A800)");
MODULE_VERSION("1.0");
MODULE_LICENSE("GPL");
diff --git a/drivers/media/usb/dvb-usb/af9005-fe.c b/drivers/media/usb/dvb-usb/af9005-fe.c
index ac97075d75f7..09db3d02bd82 100644
--- a/drivers/media/usb/dvb-usb/af9005-fe.c
+++ b/drivers/media/usb/dvb-usb/af9005-fe.c
@@ -1227,9 +1227,9 @@ static int af9005_fe_set_frontend(struct dvb_frontend *fe)
return 0;
}
-static int af9005_fe_get_frontend(struct dvb_frontend *fe)
+static int af9005_fe_get_frontend(struct dvb_frontend *fe,
+ struct dtv_frontend_properties *fep)
{
- struct dtv_frontend_properties *fep = &fe->dtv_property_cache;
struct af9005_fe_state *state = fe->demodulator_priv;
int ret;
u8 temp;
diff --git a/drivers/media/usb/dvb-usb/cxusb.c b/drivers/media/usb/dvb-usb/cxusb.c
index ab7151181728..907ac01ae297 100644
--- a/drivers/media/usb/dvb-usb/cxusb.c
+++ b/drivers/media/usb/dvb-usb/cxusb.c
@@ -13,7 +13,7 @@
*
* TODO: Use the cx25840-driver for the analogue part
*
- * Copyright (C) 2005 Patrick Boettcher (patrick.boettcher@desy.de)
+ * Copyright (C) 2005 Patrick Boettcher (patrick.boettcher@posteo.de)
* Copyright (C) 2006 Michael Krufky (mkrufky@linuxtv.org)
* Copyright (C) 2006, 2007 Chris Pascoe (c.pascoe@itee.uq.edu.au)
*
@@ -2314,7 +2314,7 @@ static struct usb_driver cxusb_driver = {
module_usb_driver(cxusb_driver);
-MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@desy.de>");
+MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@posteo.de>");
MODULE_AUTHOR("Michael Krufky <mkrufky@linuxtv.org>");
MODULE_AUTHOR("Chris Pascoe <c.pascoe@itee.uq.edu.au>");
MODULE_DESCRIPTION("Driver for Conexant USB2.0 hybrid reference design");
diff --git a/drivers/media/usb/dvb-usb/dib0700_core.c b/drivers/media/usb/dvb-usb/dib0700_core.c
index 0d248ce02a9b..c16f999b9d7c 100644
--- a/drivers/media/usb/dvb-usb/dib0700_core.c
+++ b/drivers/media/usb/dvb-usb/dib0700_core.c
@@ -881,7 +881,7 @@ static struct usb_driver dib0700_driver = {
module_usb_driver(dib0700_driver);
MODULE_FIRMWARE("dvb-usb-dib0700-1.20.fw");
-MODULE_AUTHOR("Patrick Boettcher <pboettcher@dibcom.fr>");
+MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@posteo.de>");
MODULE_DESCRIPTION("Driver for devices based on DiBcom DiB0700 - USB bridge");
MODULE_VERSION("1.0");
MODULE_LICENSE("GPL");
diff --git a/drivers/media/usb/dvb-usb/dib0700_devices.c b/drivers/media/usb/dvb-usb/dib0700_devices.c
index 7ed49646a699..ea0391e32d23 100644
--- a/drivers/media/usb/dvb-usb/dib0700_devices.c
+++ b/drivers/media/usb/dvb-usb/dib0700_devices.c
@@ -1736,8 +1736,13 @@ static int dib809x_tuner_attach(struct dvb_usb_adapter *adap)
struct dib0700_adapter_state *st = adap->priv;
struct i2c_adapter *tun_i2c = st->dib8000_ops.get_i2c_master(adap->fe_adap[0].fe, DIBX000_I2C_INTERFACE_TUNER, 1);
- if (dvb_attach(dib0090_register, adap->fe_adap[0].fe, tun_i2c, &dib809x_dib0090_config) == NULL)
- return -ENODEV;
+ if (adap->id == 0) {
+ if (dvb_attach(dib0090_register, adap->fe_adap[0].fe, tun_i2c, &dib809x_dib0090_config) == NULL)
+ return -ENODEV;
+ } else {
+ if (dvb_attach(dib0090_register, adap->fe_adap[0].fe, tun_i2c, &dib809x_dib0090_config) == NULL)
+ return -ENODEV;
+ }
st->set_param_save = adap->fe_adap[0].fe->ops.tuner_ops.set_params;
adap->fe_adap[0].fe->ops.tuner_ops.set_params = dib8096_set_param_override;
@@ -1773,6 +1778,20 @@ static int stk809x_frontend_attach(struct dvb_usb_adapter *adap)
return adap->fe_adap[0].fe == NULL ? -ENODEV : 0;
}
+static int stk809x_frontend1_attach(struct dvb_usb_adapter *adap)
+{
+ struct dib0700_adapter_state *state = adap->priv;
+
+ if (!dvb_attach(dib8000_attach, &state->dib8000_ops))
+ return -ENODEV;
+
+ state->dib8000_ops.i2c_enumeration(&adap->dev->i2c_adap, 1, 0x10, 0x82, 0);
+
+ adap->fe_adap[0].fe = state->dib8000_ops.init(&adap->dev->i2c_adap, 0x82, &dib809x_dib8000_config[1]);
+
+ return adap->fe_adap[0].fe == NULL ? -ENODEV : 0;
+}
+
static int nim8096md_tuner_attach(struct dvb_usb_adapter *adap)
{
struct dib0700_adapter_state *st = adap->priv;
@@ -3794,6 +3813,7 @@ struct usb_device_id dib0700_usb_id_table[] = {
/* 80 */{ USB_DEVICE(USB_VID_ELGATO, USB_PID_ELGATO_EYETV_DTT_2) },
{ USB_DEVICE(USB_VID_PCTV, USB_PID_PCTV_2002E) },
{ USB_DEVICE(USB_VID_PCTV, USB_PID_PCTV_2002E_SE) },
+ { USB_DEVICE(USB_VID_PCTV, USB_PID_DIBCOM_STK8096PVR) },
{ 0 } /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, dib0700_usb_id_table);
@@ -4959,6 +4979,59 @@ struct dvb_usb_device_properties dib0700_devices[] = {
RC_BIT_NEC,
.change_protocol = dib0700_change_protocol,
},
+ }, { DIB0700_DEFAULT_DEVICE_PROPERTIES,
+ .num_adapters = 2,
+ .adapter = {
+ {
+ .num_frontends = 1,
+ .fe = {{
+ .caps = DVB_USB_ADAP_HAS_PID_FILTER |
+ DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF,
+ .pid_filter_count = 32,
+ .pid_filter = stk80xx_pid_filter,
+ .pid_filter_ctrl = stk80xx_pid_filter_ctrl,
+ .frontend_attach = stk809x_frontend_attach,
+ .tuner_attach = dib809x_tuner_attach,
+
+ DIB0700_DEFAULT_STREAMING_CONFIG(0x02),
+ } },
+ .size_of_priv =
+ sizeof(struct dib0700_adapter_state),
+ }, {
+ .num_frontends = 1,
+ .fe = { {
+ .caps = DVB_USB_ADAP_HAS_PID_FILTER |
+ DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF,
+ .pid_filter_count = 32,
+ .pid_filter = stk80xx_pid_filter,
+ .pid_filter_ctrl = stk80xx_pid_filter_ctrl,
+ .frontend_attach = stk809x_frontend1_attach,
+ .tuner_attach = dib809x_tuner_attach,
+
+ DIB0700_DEFAULT_STREAMING_CONFIG(0x03),
+ } },
+ .size_of_priv =
+ sizeof(struct dib0700_adapter_state),
+ },
+ },
+ .num_device_descs = 1,
+ .devices = {
+ { "DiBcom STK8096-PVR reference design",
+ { &dib0700_usb_id_table[83], NULL },
+ { NULL },
+ },
+ },
+
+ .rc.core = {
+ .rc_interval = DEFAULT_RC_INTERVAL,
+ .rc_codes = RC_MAP_DIB0700_RC5_TABLE,
+ .module_name = "dib0700",
+ .rc_query = dib0700_rc_query_old_firmware,
+ .allowed_protos = RC_BIT_RC5 |
+ RC_BIT_RC6_MCE |
+ RC_BIT_NEC,
+ .change_protocol = dib0700_change_protocol,
+ },
},
};
diff --git a/drivers/media/usb/dvb-usb/dibusb-common.c b/drivers/media/usb/dvb-usb/dibusb-common.c
index ef3a8f75f82e..35de6095926d 100644
--- a/drivers/media/usb/dvb-usb/dibusb-common.c
+++ b/drivers/media/usb/dvb-usb/dibusb-common.c
@@ -1,6 +1,6 @@
/* Common methods for dibusb-based-receivers.
*
- * Copyright (C) 2004-5 Patrick Boettcher (patrick.boettcher@desy.de)
+ * Copyright (C) 2004-5 Patrick Boettcher (patrick.boettcher@posteo.de)
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
diff --git a/drivers/media/usb/dvb-usb/dibusb-mb.c b/drivers/media/usb/dvb-usb/dibusb-mb.c
index a4ac37e0e98b..a0057641cc86 100644
--- a/drivers/media/usb/dvb-usb/dibusb-mb.c
+++ b/drivers/media/usb/dvb-usb/dibusb-mb.c
@@ -1,10 +1,10 @@
/* DVB USB compliant linux driver for mobile DVB-T USB devices based on
* reference designs made by DiBcom (http://www.dibcom.fr/) (DiB3000M-B)
*
- * Copyright (C) 2004-5 Patrick Boettcher (patrick.boettcher@desy.de)
+ * Copyright (C) 2004-5 Patrick Boettcher (patrick.boettcher@posteo.de)
*
* based on GPL code from DiBcom, which has
- * Copyright (C) 2004 Amaury Demol for DiBcom (ademol@dibcom.fr)
+ * Copyright (C) 2004 Amaury Demol for DiBcom
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
@@ -465,7 +465,7 @@ static struct usb_driver dibusb_driver = {
module_usb_driver(dibusb_driver);
-MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@desy.de>");
+MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@posteo.de>");
MODULE_DESCRIPTION("Driver for DiBcom USB DVB-T devices (DiB3000M-B based)");
MODULE_VERSION("1.0");
MODULE_LICENSE("GPL");
diff --git a/drivers/media/usb/dvb-usb/dibusb-mc.c b/drivers/media/usb/dvb-usb/dibusb-mc.c
index 9d1a59d09c52..08fb8a3f6e0c 100644
--- a/drivers/media/usb/dvb-usb/dibusb-mc.c
+++ b/drivers/media/usb/dvb-usb/dibusb-mc.c
@@ -1,10 +1,10 @@
/* DVB USB compliant linux driver for mobile DVB-T USB devices based on
* reference designs made by DiBcom (http://www.dibcom.fr/) (DiB3000M-C/P)
*
- * Copyright (C) 2004-5 Patrick Boettcher (patrick.boettcher@desy.de)
+ * Copyright (C) 2004-5 Patrick Boettcher (patrick.boettcher@posteo.de)
*
* based on GPL code from DiBcom, which has
- * Copyright (C) 2004 Amaury Demol for DiBcom (ademol@dibcom.fr)
+ * Copyright (C) 2004 Amaury Demol for DiBcom
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
@@ -143,7 +143,7 @@ static struct usb_driver dibusb_mc_driver = {
module_usb_driver(dibusb_mc_driver);
-MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@desy.de>");
+MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@posteo.de>");
MODULE_DESCRIPTION("Driver for DiBcom USB2.0 DVB-T (DiB3000M-C/P based) devices");
MODULE_VERSION("1.0");
MODULE_LICENSE("GPL");
diff --git a/drivers/media/usb/dvb-usb/dibusb.h b/drivers/media/usb/dvb-usb/dibusb.h
index 32ab1392313f..3f82163d8ab8 100644
--- a/drivers/media/usb/dvb-usb/dibusb.h
+++ b/drivers/media/usb/dvb-usb/dibusb.h
@@ -1,6 +1,6 @@
/* Header file for all dibusb-based-receivers.
*
- * Copyright (C) 2004-5 Patrick Boettcher (patrick.boettcher@desy.de)
+ * Copyright (C) 2004-5 Patrick Boettcher (patrick.boettcher@posteo.de)
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
diff --git a/drivers/media/usb/dvb-usb/digitv.c b/drivers/media/usb/dvb-usb/digitv.c
index 772bde3c5020..63134335c994 100644
--- a/drivers/media/usb/dvb-usb/digitv.c
+++ b/drivers/media/usb/dvb-usb/digitv.c
@@ -1,7 +1,7 @@
/* DVB USB compliant linux driver for Nebula Electronics uDigiTV DVB-T USB2.0
* receiver
*
- * Copyright (C) 2005 Patrick Boettcher (patrick.boettcher@desy.de)
+ * Copyright (C) 2005 Patrick Boettcher (patrick.boettcher@posteo.de)
*
* partly based on the SDK published by Nebula Electronics
*
@@ -348,7 +348,7 @@ static struct usb_driver digitv_driver = {
module_usb_driver(digitv_driver);
-MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@desy.de>");
+MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@posteo.de>");
MODULE_DESCRIPTION("Driver for Nebula Electronics uDigiTV DVB-T USB2.0");
MODULE_VERSION("1.0-alpha");
MODULE_LICENSE("GPL");
diff --git a/drivers/media/usb/dvb-usb/dtt200u-fe.c b/drivers/media/usb/dvb-usb/dtt200u-fe.c
index 8637ad1be6be..c09332bd99cb 100644
--- a/drivers/media/usb/dvb-usb/dtt200u-fe.c
+++ b/drivers/media/usb/dvb-usb/dtt200u-fe.c
@@ -1,7 +1,7 @@
/* Frontend part of the Linux driver for the WideView/ Yakumo/ Hama/
* Typhoon/ Yuan DVB-T USB2.0 receiver.
*
- * Copyright (C) 2005 Patrick Boettcher <patrick.boettcher@desy.de>
+ * Copyright (C) 2005 Patrick Boettcher <patrick.boettcher@posteo.de>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
@@ -140,10 +140,11 @@ static int dtt200u_fe_set_frontend(struct dvb_frontend *fe)
return 0;
}
-static int dtt200u_fe_get_frontend(struct dvb_frontend* fe)
+static int dtt200u_fe_get_frontend(struct dvb_frontend* fe,
+ struct dtv_frontend_properties *fep)
{
- struct dtv_frontend_properties *fep = &fe->dtv_property_cache;
struct dtt200u_fe_state *state = fe->demodulator_priv;
+
memcpy(fep, &state->fep, sizeof(struct dtv_frontend_properties));
return 0;
}
diff --git a/drivers/media/usb/dvb-usb/dtt200u.c b/drivers/media/usb/dvb-usb/dtt200u.c
index c357fb3b0a88..ca3b69aa9688 100644
--- a/drivers/media/usb/dvb-usb/dtt200u.c
+++ b/drivers/media/usb/dvb-usb/dtt200u.c
@@ -1,7 +1,7 @@
/* DVB USB library compliant Linux driver for the WideView/ Yakumo/ Hama/
* Typhoon/ Yuan/ Miglia DVB-T USB2.0 receiver.
*
- * Copyright (C) 2004-5 Patrick Boettcher (patrick.boettcher@desy.de)
+ * Copyright (C) 2004-5 Patrick Boettcher (patrick.boettcher@posteo.de)
*
* Thanks to Steve Chang from WideView for providing support for the WT-220U.
*
@@ -362,7 +362,7 @@ static struct usb_driver dtt200u_usb_driver = {
module_usb_driver(dtt200u_usb_driver);
-MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@desy.de>");
+MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@posteo.de>");
MODULE_DESCRIPTION("Driver for the WideView/Yakumo/Hama/Typhoon/Club3D/Miglia DVB-T USB2.0 devices");
MODULE_VERSION("1.0");
MODULE_LICENSE("GPL");
diff --git a/drivers/media/usb/dvb-usb/dtt200u.h b/drivers/media/usb/dvb-usb/dtt200u.h
index 005b0a7df358..efccc399b1cb 100644
--- a/drivers/media/usb/dvb-usb/dtt200u.h
+++ b/drivers/media/usb/dvb-usb/dtt200u.h
@@ -1,7 +1,7 @@
/* Common header file of Linux driver for the WideView/ Yakumo/ Hama/
* Typhoon/ Yuan DVB-T USB2.0 receiver.
*
- * Copyright (C) 2004-5 Patrick Boettcher (patrick.boettcher@desy.de)
+ * Copyright (C) 2004-5 Patrick Boettcher (patrick.boettcher@posteo.de)
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
diff --git a/drivers/media/usb/dvb-usb/dvb-usb-common.h b/drivers/media/usb/dvb-usb/dvb-usb-common.h
index 6b7b2a89242e..7e619d638809 100644
--- a/drivers/media/usb/dvb-usb/dvb-usb-common.h
+++ b/drivers/media/usb/dvb-usb/dvb-usb-common.h
@@ -1,6 +1,6 @@
/* dvb-usb-common.h is part of the DVB USB library.
*
- * Copyright (C) 2004-5 Patrick Boettcher (patrick.boettcher@desy.de)
+ * Copyright (C) 2004-5 Patrick Boettcher (patrick.boettcher@posteo.de)
* see dvb-usb-init.c for copyright information.
*
* a header file containing prototypes and types for internal use of the dvb-usb-lib
diff --git a/drivers/media/usb/dvb-usb/dvb-usb-dvb.c b/drivers/media/usb/dvb-usb/dvb-usb-dvb.c
index 9ddfcab268be..6477b04e95c7 100644
--- a/drivers/media/usb/dvb-usb/dvb-usb-dvb.c
+++ b/drivers/media/usb/dvb-usb/dvb-usb-dvb.c
@@ -1,12 +1,13 @@
/* dvb-usb-dvb.c is part of the DVB USB library.
*
- * Copyright (C) 2004-6 Patrick Boettcher (patrick.boettcher@desy.de)
+ * Copyright (C) 2004-6 Patrick Boettcher (patrick.boettcher@posteo.de)
* see dvb-usb-init.c for copyright information.
*
* This file contains functions for initializing and handling the
* linux-dvb API.
*/
#include "dvb-usb-common.h"
+#include <media/media-device.h>
/* does the complete input transfer handling */
static int dvb_usb_ctrl_feed(struct dvb_demux_feed *dvbdmxfeed, int onoff)
@@ -106,15 +107,7 @@ static int dvb_usb_media_device_init(struct dvb_usb_adapter *adap)
if (!mdev)
return -ENOMEM;
- mdev->dev = &udev->dev;
- strlcpy(mdev->model, d->desc->name, sizeof(mdev->model));
- if (udev->serial)
- strlcpy(mdev->serial, udev->serial, sizeof(mdev->serial));
- strcpy(mdev->bus_info, udev->devpath);
- mdev->hw_revision = le16_to_cpu(udev->descriptor.bcdDevice);
- mdev->driver_version = LINUX_VERSION_CODE;
-
- media_device_init(mdev);
+ media_device_usb_init(mdev, udev, d->desc->name);
dvb_register_media_controller(&adap->dvb_adap, mdev);
diff --git a/drivers/media/usb/dvb-usb/dvb-usb-firmware.c b/drivers/media/usb/dvb-usb/dvb-usb-firmware.c
index 733a7ff7b207..dd048a7c461c 100644
--- a/drivers/media/usb/dvb-usb/dvb-usb-firmware.c
+++ b/drivers/media/usb/dvb-usb/dvb-usb-firmware.c
@@ -1,6 +1,6 @@
/* dvb-usb-firmware.c is part of the DVB USB library.
*
- * Copyright (C) 2004-6 Patrick Boettcher (patrick.boettcher@desy.de)
+ * Copyright (C) 2004-6 Patrick Boettcher (patrick.boettcher@posteo.de)
* see dvb-usb-init.c for copyright information.
*
* This file contains functions for downloading the firmware to Cypress FX 1 and 2 based devices.
diff --git a/drivers/media/usb/dvb-usb/dvb-usb-i2c.c b/drivers/media/usb/dvb-usb/dvb-usb-i2c.c
index 88e4a62abc44..4f0b0adce7f5 100644
--- a/drivers/media/usb/dvb-usb/dvb-usb-i2c.c
+++ b/drivers/media/usb/dvb-usb/dvb-usb-i2c.c
@@ -1,6 +1,6 @@
/* dvb-usb-i2c.c is part of the DVB USB library.
*
- * Copyright (C) 2004-6 Patrick Boettcher (patrick.boettcher@desy.de)
+ * Copyright (C) 2004-6 Patrick Boettcher (patrick.boettcher@posteo.de)
* see dvb-usb-init.c for copyright information.
*
* This file contains functions for (de-)initializing an I2C adapter.
diff --git a/drivers/media/usb/dvb-usb/dvb-usb-init.c b/drivers/media/usb/dvb-usb/dvb-usb-init.c
index 1adf325012f7..3896ba9a4179 100644
--- a/drivers/media/usb/dvb-usb/dvb-usb-init.c
+++ b/drivers/media/usb/dvb-usb/dvb-usb-init.c
@@ -3,7 +3,7 @@
*
* dvb-usb-init.c
*
- * Copyright (C) 2004-6 Patrick Boettcher (patrick.boettcher@desy.de)
+ * Copyright (C) 2004-6 Patrick Boettcher (patrick.boettcher@posteo.de)
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
@@ -299,6 +299,6 @@ void dvb_usb_device_exit(struct usb_interface *intf)
EXPORT_SYMBOL(dvb_usb_device_exit);
MODULE_VERSION("1.0");
-MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@desy.de>");
+MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@posteo.de>");
MODULE_DESCRIPTION("A library module containing commonly used USB and DVB function USB DVB devices");
MODULE_LICENSE("GPL");
diff --git a/drivers/media/usb/dvb-usb/dvb-usb-remote.c b/drivers/media/usb/dvb-usb/dvb-usb-remote.c
index 7b5dae3077f6..c259f9e43542 100644
--- a/drivers/media/usb/dvb-usb/dvb-usb-remote.c
+++ b/drivers/media/usb/dvb-usb/dvb-usb-remote.c
@@ -1,6 +1,6 @@
/* dvb-usb-remote.c is part of the DVB USB library.
*
- * Copyright (C) 2004-6 Patrick Boettcher (patrick.boettcher@desy.de)
+ * Copyright (C) 2004-6 Patrick Boettcher (patrick.boettcher@posteo.de)
* see dvb-usb-init.c for copyright information.
*
* This file contains functions for initializing the input-device and for handling remote-control-queries.
diff --git a/drivers/media/usb/dvb-usb/dvb-usb-urb.c b/drivers/media/usb/dvb-usb/dvb-usb-urb.c
index 5c8f651344fc..95f9097498cb 100644
--- a/drivers/media/usb/dvb-usb/dvb-usb-urb.c
+++ b/drivers/media/usb/dvb-usb/dvb-usb-urb.c
@@ -1,6 +1,6 @@
/* dvb-usb-urb.c is part of the DVB USB library.
*
- * Copyright (C) 2004-6 Patrick Boettcher (patrick.boettcher@desy.de)
+ * Copyright (C) 2004-6 Patrick Boettcher (patrick.boettcher@posteo.de)
* see dvb-usb-init.c for copyright information.
*
* This file keeps functions for initializing and handling the
diff --git a/drivers/media/usb/dvb-usb/dvb-usb.h b/drivers/media/usb/dvb-usb/dvb-usb.h
index ce4c4e3b58bb..639c4678c65b 100644
--- a/drivers/media/usb/dvb-usb/dvb-usb.h
+++ b/drivers/media/usb/dvb-usb/dvb-usb.h
@@ -1,6 +1,6 @@
/* dvb-usb.h is part of the DVB USB library.
*
- * Copyright (C) 2004-6 Patrick Boettcher (patrick.boettcher@desy.de)
+ * Copyright (C) 2004-6 Patrick Boettcher (patrick.boettcher@posteo.de)
* see dvb-usb-init.c for copyright information.
*
* the headerfile, all dvb-usb-drivers have to include.
diff --git a/drivers/media/usb/dvb-usb/dw2102.c b/drivers/media/usb/dvb-usb/dw2102.c
index 14ef25dc6cd3..6d0dd859d684 100644
--- a/drivers/media/usb/dvb-usb/dw2102.c
+++ b/drivers/media/usb/dvb-usb/dw2102.c
@@ -1,9 +1,10 @@
/* DVB USB framework compliant Linux driver for the
* DVBWorld DVB-S 2101, 2102, DVB-S2 2104, DVB-C 3101,
- * TeVii S600, S630, S650, S660, S480, S421, S632
+ * TeVii S421, S480, S482, S600, S630, S632, S650, S660, S662,
* Prof 1100, 7500,
* Geniatech SU3000, T220,
- * TechnoTrend S2-4600 Cards
+ * TechnoTrend S2-4600,
+ * Terratec Cinergy S2 cards
* Copyright (C) 2008-2012 Igor M. Liplianin (liplianin@me.by)
*
* This program is free software; you can redistribute it and/or modify it
@@ -33,7 +34,6 @@
#include "tda18271.h"
#include "cxd2820r.h"
#include "m88ds3103.h"
-#include "ts2020.h"
/* Max transfer size done by I2C transfer functions */
#define MAX_XFER_SIZE 64
@@ -66,6 +66,10 @@
#define USB_PID_TEVII_S660 0xd660
#endif
+#ifndef USB_PID_TEVII_S662
+#define USB_PID_TEVII_S662 0xd662
+#endif
+
#ifndef USB_PID_TEVII_S480_1
#define USB_PID_TEVII_S480_1 0xd481
#endif
@@ -118,6 +122,7 @@
struct dw2102_state {
u8 initialized;
u8 last_lock;
+ struct i2c_client *i2c_client_demod;
struct i2c_client *i2c_client_tuner;
/* fe hook functions*/
@@ -1141,22 +1146,6 @@ static struct tda18271_config tda18271_config = {
.gate = TDA18271_GATE_DIGITAL,
};
-static const struct m88ds3103_config tt_s2_4600_m88ds3103_config = {
- .i2c_addr = 0x68,
- .clock = 27000000,
- .i2c_wr_max = 33,
- .ts_mode = M88DS3103_TS_CI,
- .ts_clk = 16000,
- .ts_clk_pol = 0,
- .spec_inv = 0,
- .agc_inv = 0,
- .clock_out = M88DS3103_CLOCK_OUT_ENABLED,
- .envelope_mode = 0,
- .agc = 0x99,
- .lnb_hv_pol = 1,
- .lnb_en_pol = 0,
-};
-
static u8 m88rs2000_inittab[] = {
DEMOD_WRITE, 0x9a, 0x30,
DEMOD_WRITE, 0x00, 0x01,
@@ -1509,7 +1498,8 @@ static int tt_s2_4600_frontend_attach(struct dvb_usb_adapter *adap)
u8 ibuf[] = { 0 };
struct i2c_adapter *i2c_adapter;
struct i2c_client *client;
- struct i2c_board_info info;
+ struct i2c_board_info board_info;
+ struct m88ds3103_platform_data m88ds3103_pdata = {};
struct ts2020_config ts2020_config = {};
if (dvb_usb_generic_rw(d, obuf, 3, ibuf, 1, 0) < 0)
@@ -1542,22 +1532,44 @@ static int tt_s2_4600_frontend_attach(struct dvb_usb_adapter *adap)
if (dvb_usb_generic_rw(d, obuf, 1, ibuf, 1, 0) < 0)
err("command 0x51 transfer failed.");
- memset(&info, 0, sizeof(struct i2c_board_info));
-
- adap->fe_adap[0].fe = dvb_attach(m88ds3103_attach,
- &tt_s2_4600_m88ds3103_config,
- &d->i2c_adap,
- &i2c_adapter);
- if (adap->fe_adap[0].fe == NULL)
+ /* attach demod */
+ m88ds3103_pdata.clk = 27000000;
+ m88ds3103_pdata.i2c_wr_max = 33;
+ m88ds3103_pdata.ts_mode = M88DS3103_TS_CI;
+ m88ds3103_pdata.ts_clk = 16000;
+ m88ds3103_pdata.ts_clk_pol = 0;
+ m88ds3103_pdata.spec_inv = 0;
+ m88ds3103_pdata.agc = 0x99;
+ m88ds3103_pdata.agc_inv = 0;
+ m88ds3103_pdata.clk_out = M88DS3103_CLOCK_OUT_ENABLED;
+ m88ds3103_pdata.envelope_mode = 0;
+ m88ds3103_pdata.lnb_hv_pol = 1;
+ m88ds3103_pdata.lnb_en_pol = 0;
+ memset(&board_info, 0, sizeof(board_info));
+ strlcpy(board_info.type, "m88ds3103", I2C_NAME_SIZE);
+ board_info.addr = 0x68;
+ board_info.platform_data = &m88ds3103_pdata;
+ request_module("m88ds3103");
+ client = i2c_new_device(&d->i2c_adap, &board_info);
+ if (client == NULL || client->dev.driver == NULL)
return -ENODEV;
+ if (!try_module_get(client->dev.driver->owner)) {
+ i2c_unregister_device(client);
+ return -ENODEV;
+ }
+ adap->fe_adap[0].fe = m88ds3103_pdata.get_dvb_frontend(client);
+ i2c_adapter = m88ds3103_pdata.get_i2c_adapter(client);
+
+ state->i2c_client_demod = client;
/* attach tuner */
ts2020_config.fe = adap->fe_adap[0].fe;
- strlcpy(info.type, "ts2022", I2C_NAME_SIZE);
- info.addr = 0x60;
- info.platform_data = &ts2020_config;
+ memset(&board_info, 0, sizeof(board_info));
+ strlcpy(board_info.type, "ts2022", I2C_NAME_SIZE);
+ board_info.addr = 0x60;
+ board_info.platform_data = &ts2020_config;
request_module("ts2020");
- client = i2c_new_device(i2c_adapter, &info);
+ client = i2c_new_device(i2c_adapter, &board_info);
if (client == NULL || client->dev.driver == NULL) {
dvb_frontend_detach(adap->fe_adap[0].fe);
@@ -1688,6 +1700,8 @@ enum dw2102_table_entry {
TECHNOTREND_S2_4600,
TEVII_S482_1,
TEVII_S482_2,
+ TERRATEC_CINERGY_S2_BOX,
+ TEVII_S662
};
static struct usb_device_id dw2102_table[] = {
@@ -1702,19 +1716,21 @@ static struct usb_device_id dw2102_table[] = {
[TEVII_S660] = {USB_DEVICE(0x9022, USB_PID_TEVII_S660)},
[PROF_7500] = {USB_DEVICE(0x3034, 0x7500)},
[GENIATECH_SU3000] = {USB_DEVICE(0x1f4d, 0x3000)},
- [TERRATEC_CINERGY_S2] = {USB_DEVICE(USB_VID_TERRATEC, 0x00a8)},
+ [TERRATEC_CINERGY_S2] = {USB_DEVICE(USB_VID_TERRATEC, USB_PID_TERRATEC_CINERGY_S2_R1)},
[TEVII_S480_1] = {USB_DEVICE(0x9022, USB_PID_TEVII_S480_1)},
[TEVII_S480_2] = {USB_DEVICE(0x9022, USB_PID_TEVII_S480_2)},
[X3M_SPC1400HD] = {USB_DEVICE(0x1f4d, 0x3100)},
[TEVII_S421] = {USB_DEVICE(0x9022, USB_PID_TEVII_S421)},
[TEVII_S632] = {USB_DEVICE(0x9022, USB_PID_TEVII_S632)},
- [TERRATEC_CINERGY_S2_R2] = {USB_DEVICE(USB_VID_TERRATEC, 0x00b0)},
+ [TERRATEC_CINERGY_S2_R2] = {USB_DEVICE(USB_VID_TERRATEC, USB_PID_TERRATEC_CINERGY_S2_R2)},
[GOTVIEW_SAT_HD] = {USB_DEVICE(0x1FE1, USB_PID_GOTVIEW_SAT_HD)},
[GENIATECH_T220] = {USB_DEVICE(0x1f4d, 0xD220)},
[TECHNOTREND_S2_4600] = {USB_DEVICE(USB_VID_TECHNOTREND,
USB_PID_TECHNOTREND_CONNECT_S2_4600)},
[TEVII_S482_1] = {USB_DEVICE(0x9022, 0xd483)},
[TEVII_S482_2] = {USB_DEVICE(0x9022, 0xd484)},
+ [TERRATEC_CINERGY_S2_BOX] = {USB_DEVICE(USB_VID_TERRATEC, 0x0105)},
+ [TEVII_S662] = {USB_DEVICE(0x9022, USB_PID_TEVII_S662)},
{ }
};
@@ -2232,7 +2248,7 @@ static struct dvb_usb_device_properties tt_s2_4600_properties = {
} },
}
},
- .num_device_descs = 3,
+ .num_device_descs = 5,
.devices = {
{ "TechnoTrend TT-connect S2-4600",
{ &dw2102_table[TECHNOTREND_S2_4600], NULL },
@@ -2246,6 +2262,14 @@ static struct dvb_usb_device_properties tt_s2_4600_properties = {
{ &dw2102_table[TEVII_S482_2], NULL },
{ NULL },
},
+ { "Terratec Cinergy S2 USB BOX",
+ { &dw2102_table[TERRATEC_CINERGY_S2_BOX], NULL },
+ { NULL },
+ },
+ { "TeVii S662",
+ { &dw2102_table[TEVII_S662], NULL },
+ { NULL },
+ },
}
};
@@ -2344,6 +2368,13 @@ static void dw2102_disconnect(struct usb_interface *intf)
i2c_unregister_device(client);
}
+ /* remove I2C client for demodulator */
+ client = st->i2c_client_demod;
+ if (client) {
+ module_put(client->dev.driver->owner);
+ i2c_unregister_device(client);
+ }
+
dvb_usb_device_exit(intf);
}
@@ -2359,10 +2390,10 @@ module_usb_driver(dw2102_driver);
MODULE_AUTHOR("Igor M. Liplianin (c) liplianin@me.by");
MODULE_DESCRIPTION("Driver for DVBWorld DVB-S 2101, 2102, DVB-S2 2104,"
" DVB-C 3101 USB2.0,"
- " TeVii S600, S630, S650, S660, S480, S421, S632"
- " Prof 1100, 7500 USB2.0,"
+ " TeVii S421, S480, S482, S600, S630, S632, S650,"
+ " TeVii S660, S662, Prof 1100, 7500 USB2.0,"
" Geniatech SU3000, T220,"
- " TechnoTrend S2-4600 devices");
+ " TechnoTrend S2-4600, Terratec Cinergy S2 devices");
MODULE_VERSION("0.1");
MODULE_LICENSE("GPL");
MODULE_FIRMWARE(DW2101_FIRMWARE);
diff --git a/drivers/media/usb/dvb-usb/friio-fe.c b/drivers/media/usb/dvb-usb/friio-fe.c
index 8ec92fbeabad..979f05b4b87c 100644
--- a/drivers/media/usb/dvb-usb/friio-fe.c
+++ b/drivers/media/usb/dvb-usb/friio-fe.c
@@ -283,20 +283,6 @@ static int jdvbt90502_set_property(struct dvb_frontend *fe,
return r;
}
-static int jdvbt90502_get_frontend(struct dvb_frontend *fe)
-{
- struct dtv_frontend_properties *p = &fe->dtv_property_cache;
- p->inversion = INVERSION_AUTO;
- p->bandwidth_hz = 6000000;
- p->code_rate_HP = FEC_AUTO;
- p->code_rate_LP = FEC_AUTO;
- p->modulation = QAM_64;
- p->transmission_mode = TRANSMISSION_MODE_AUTO;
- p->guard_interval = GUARD_INTERVAL_AUTO;
- p->hierarchy = HIERARCHY_AUTO;
- return 0;
-}
-
static int jdvbt90502_set_frontend(struct dvb_frontend *fe)
{
struct dtv_frontend_properties *p = &fe->dtv_property_cache;
@@ -312,8 +298,16 @@ static int jdvbt90502_set_frontend(struct dvb_frontend *fe)
deb_fe("%s: Freq:%d\n", __func__, p->frequency);
- /* for recovery from DTV_CLEAN */
- fe->dtv_property_cache.delivery_system = SYS_ISDBT;
+ /* This driver only works on auto mode */
+ p->inversion = INVERSION_AUTO;
+ p->bandwidth_hz = 6000000;
+ p->code_rate_HP = FEC_AUTO;
+ p->code_rate_LP = FEC_AUTO;
+ p->modulation = QAM_64;
+ p->transmission_mode = TRANSMISSION_MODE_AUTO;
+ p->guard_interval = GUARD_INTERVAL_AUTO;
+ p->hierarchy = HIERARCHY_AUTO;
+ p->delivery_system = SYS_ISDBT;
ret = jdvbt90502_pll_set_freq(state, p->frequency);
if (ret) {
@@ -466,7 +460,6 @@ static struct dvb_frontend_ops jdvbt90502_ops = {
.set_property = jdvbt90502_set_property,
.set_frontend = jdvbt90502_set_frontend,
- .get_frontend = jdvbt90502_get_frontend,
.read_status = jdvbt90502_read_status,
.read_signal_strength = jdvbt90502_read_signal_strength,
diff --git a/drivers/media/usb/dvb-usb/nova-t-usb2.c b/drivers/media/usb/dvb-usb/nova-t-usb2.c
index 6c55384e2fca..fc7569e2728d 100644
--- a/drivers/media/usb/dvb-usb/nova-t-usb2.c
+++ b/drivers/media/usb/dvb-usb/nova-t-usb2.c
@@ -1,7 +1,7 @@
/* DVB USB framework compliant Linux driver for the Hauppauge WinTV-NOVA-T usb2
* DVB-T receiver.
*
- * Copyright (C) 2004-5 Patrick Boettcher (patrick.boettcher@desy.de)
+ * Copyright (C) 2004-5 Patrick Boettcher (patrick.boettcher@posteo.de)
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
@@ -227,7 +227,7 @@ static struct usb_driver nova_t_driver = {
module_usb_driver(nova_t_driver);
-MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@desy.de>");
+MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@posteo.de>");
MODULE_DESCRIPTION("Hauppauge WinTV-NOVA-T usb2");
MODULE_VERSION("1.0");
MODULE_LICENSE("GPL");
diff --git a/drivers/media/usb/dvb-usb/technisat-usb2.c b/drivers/media/usb/dvb-usb/technisat-usb2.c
index 6c3c47722955..d9f3262bf071 100644
--- a/drivers/media/usb/dvb-usb/technisat-usb2.c
+++ b/drivers/media/usb/dvb-usb/technisat-usb2.c
@@ -60,6 +60,8 @@ struct technisat_usb2_state {
u8 power_state;
u16 last_scan_code;
+
+ u8 buf[64];
};
/* debug print helpers */
@@ -220,19 +222,19 @@ enum technisat_usb2_led_state {
TECH_LED_UNDEFINED
};
-static int technisat_usb2_set_led(struct dvb_usb_device *d, int red, enum technisat_usb2_led_state state)
+static int technisat_usb2_set_led(struct dvb_usb_device *d, int red,
+ enum technisat_usb2_led_state st)
{
+ struct technisat_usb2_state *state = d->priv;
+ u8 *led = state->buf;
int ret;
- u8 led[8] = {
- red ? SET_RED_LED_VENDOR_REQUEST : SET_GREEN_LED_VENDOR_REQUEST,
- 0
- };
+ led[0] = red ? SET_RED_LED_VENDOR_REQUEST : SET_GREEN_LED_VENDOR_REQUEST;
- if (disable_led_control && state != TECH_LED_OFF)
+ if (disable_led_control && st != TECH_LED_OFF)
return 0;
- switch (state) {
+ switch (st) {
case TECH_LED_ON:
led[1] = 0x82;
break;
@@ -263,7 +265,7 @@ static int technisat_usb2_set_led(struct dvb_usb_device *d, int red, enum techni
red ? SET_RED_LED_VENDOR_REQUEST : SET_GREEN_LED_VENDOR_REQUEST,
USB_TYPE_VENDOR | USB_DIR_OUT,
0, 0,
- led, sizeof(led), 500);
+ led, 8, 500);
mutex_unlock(&d->i2c_mutex);
return ret;
@@ -271,8 +273,11 @@ static int technisat_usb2_set_led(struct dvb_usb_device *d, int red, enum techni
static int technisat_usb2_set_led_timer(struct dvb_usb_device *d, u8 red, u8 green)
{
+ struct technisat_usb2_state *state = d->priv;
+ u8 *b = state->buf;
int ret;
- u8 b = 0;
+
+ b[0] = 0;
if (mutex_lock_interruptible(&d->i2c_mutex) < 0)
return -EAGAIN;
@@ -281,7 +286,7 @@ static int technisat_usb2_set_led_timer(struct dvb_usb_device *d, u8 red, u8 gre
SET_LED_TIMER_DIVIDER_VENDOR_REQUEST,
USB_TYPE_VENDOR | USB_DIR_OUT,
(red << 8) | green, 0,
- &b, 1, 500);
+ b, 1, 500);
mutex_unlock(&d->i2c_mutex);
@@ -328,7 +333,11 @@ static int technisat_usb2_identify_state(struct usb_device *udev,
struct dvb_usb_device_description **desc, int *cold)
{
int ret;
- u8 version[3];
+ u8 *version;
+
+ version = kmalloc(3, GFP_KERNEL);
+ if (!version)
+ return -ENOMEM;
/* first select the interface */
if (usb_set_interface(udev, 0, 1) != 0)
@@ -342,7 +351,7 @@ static int technisat_usb2_identify_state(struct usb_device *udev,
GET_VERSION_INFO_VENDOR_REQUEST,
USB_TYPE_VENDOR | USB_DIR_IN,
0, 0,
- version, sizeof(version), 500);
+ version, 3, 500);
if (ret < 0)
*cold = 1;
@@ -351,6 +360,8 @@ static int technisat_usb2_identify_state(struct usb_device *udev,
*cold = 0;
}
+ kfree(version);
+
return 0;
}
@@ -512,7 +523,7 @@ static int technisat_usb2_frontend_attach(struct dvb_usb_adapter *a)
&a->dev->i2c_adap, STV090x_DEMODULATOR_0);
if (a->fe_adap[0].fe) {
- struct stv6110x_devctl *ctl;
+ const struct stv6110x_devctl *ctl;
ctl = dvb_attach(stv6110x_attach,
a->fe_adap[0].fe,
@@ -594,7 +605,9 @@ static int technisat_usb2_frontend_attach(struct dvb_usb_adapter *a)
static int technisat_usb2_get_ir(struct dvb_usb_device *d)
{
- u8 buf[62], *b;
+ struct technisat_usb2_state *state = d->priv;
+ u8 *buf = state->buf;
+ u8 *b;
int ret;
struct ir_raw_event ev;
@@ -620,7 +633,7 @@ static int technisat_usb2_get_ir(struct dvb_usb_device *d)
GET_IR_DATA_VENDOR_REQUEST,
USB_TYPE_VENDOR | USB_DIR_IN,
0x8080, 0,
- buf, sizeof(buf), 500);
+ buf, 62, 500);
unlock:
mutex_unlock(&d->i2c_mutex);
diff --git a/drivers/media/usb/dvb-usb/ttusb2.c b/drivers/media/usb/dvb-usb/ttusb2.c
index f10717311e05..ecc207fbaf3c 100644
--- a/drivers/media/usb/dvb-usb/ttusb2.c
+++ b/drivers/media/usb/dvb-usb/ttusb2.c
@@ -820,7 +820,7 @@ static struct usb_driver ttusb2_driver = {
module_usb_driver(ttusb2_driver);
-MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@desy.de>");
+MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@posteo.de>");
MODULE_DESCRIPTION("Driver for Pinnacle PCTV 400e DVB-S USB2.0");
MODULE_VERSION("1.0");
MODULE_LICENSE("GPL");
diff --git a/drivers/media/usb/dvb-usb/umt-010.c b/drivers/media/usb/dvb-usb/umt-010.c
index 9b042292e788..58ad5b4f856c 100644
--- a/drivers/media/usb/dvb-usb/umt-010.c
+++ b/drivers/media/usb/dvb-usb/umt-010.c
@@ -1,7 +1,7 @@
/* DVB USB framework compliant Linux driver for the HanfTek UMT-010 USB2.0
* DVB-T receiver.
*
- * Copyright (C) 2004-5 Patrick Boettcher (patrick.boettcher@desy.de)
+ * Copyright (C) 2004-5 Patrick Boettcher (patrick.boettcher@posteo.de)
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
@@ -145,7 +145,7 @@ static struct usb_driver umt_driver = {
module_usb_driver(umt_driver);
-MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@desy.de>");
+MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@posteo.de>");
MODULE_DESCRIPTION("Driver for HanfTek UMT 010 USB2.0 DVB-T device");
MODULE_VERSION("1.0");
MODULE_LICENSE("GPL");
diff --git a/drivers/media/usb/dvb-usb/usb-urb.c b/drivers/media/usb/dvb-usb/usb-urb.c
index d62ee0f5a165..89173603be67 100644
--- a/drivers/media/usb/dvb-usb/usb-urb.c
+++ b/drivers/media/usb/dvb-usb/usb-urb.c
@@ -1,6 +1,6 @@
/* usb-urb.c is part of the DVB USB library.
*
- * Copyright (C) 2004-6 Patrick Boettcher (patrick.boettcher@desy.de)
+ * Copyright (C) 2004-6 Patrick Boettcher (patrick.boettcher@posteo.de)
* see dvb-usb-init.c for copyright information.
*
* This file keeps functions for initializing and handling the
diff --git a/drivers/media/usb/dvb-usb/vp702x-fe.c b/drivers/media/usb/dvb-usb/vp702x-fe.c
index d361a72ca0fa..27398c08c69d 100644
--- a/drivers/media/usb/dvb-usb/vp702x-fe.c
+++ b/drivers/media/usb/dvb-usb/vp702x-fe.c
@@ -4,7 +4,7 @@
* Copyright (C) 2005 Ralph Metzler <rjkm@metzlerbros.de>
* Metzler Brothers Systementwicklung GbR
*
- * Copyright (C) 2005 Patrick Boettcher <patrick.boettcher@desy.de>
+ * Copyright (C) 2005 Patrick Boettcher <patrick.boettcher@posteo.de>
*
* Thanks to Twinhan who kindly provided hardware and information.
*
diff --git a/drivers/media/usb/dvb-usb/vp702x.c b/drivers/media/usb/dvb-usb/vp702x.c
index ee1e19e36445..40de33de90a7 100644
--- a/drivers/media/usb/dvb-usb/vp702x.c
+++ b/drivers/media/usb/dvb-usb/vp702x.c
@@ -4,7 +4,7 @@
* Copyright (C) 2005 Ralph Metzler <rjkm@metzlerbros.de>
* Metzler Brothers Systementwicklung GbR
*
- * Copyright (C) 2005 Patrick Boettcher <patrick.boettcher@desy.de>
+ * Copyright (C) 2005 Patrick Boettcher <patrick.boettcher@posteo.de>
*
* Thanks to Twinhan who kindly provided hardware and information.
*
@@ -439,7 +439,7 @@ static struct usb_driver vp702x_usb_driver = {
module_usb_driver(vp702x_usb_driver);
-MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@desy.de>");
+MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@posteo.de>");
MODULE_DESCRIPTION("Driver for Twinhan StarBox DVB-S USB2.0 and clones");
MODULE_VERSION("1.0");
MODULE_LICENSE("GPL");
diff --git a/drivers/media/usb/dvb-usb/vp7045-fe.c b/drivers/media/usb/dvb-usb/vp7045-fe.c
index e708afc6a57f..7765602ea658 100644
--- a/drivers/media/usb/dvb-usb/vp7045-fe.c
+++ b/drivers/media/usb/dvb-usb/vp7045-fe.c
@@ -1,7 +1,7 @@
/* DVB frontend part of the Linux driver for TwinhanDTV Alpha/MagicBoxII USB2.0
* DVB-T receiver.
*
- * Copyright (C) 2004-5 Patrick Boettcher (patrick.boettcher@desy.de)
+ * Copyright (C) 2004-5 Patrick Boettcher (patrick.boettcher@posteo.de)
*
* Thanks to Twinhan who kindly provided hardware and information.
*
diff --git a/drivers/media/usb/dvb-usb/vp7045.c b/drivers/media/usb/dvb-usb/vp7045.c
index d750724132ee..13340af0d39c 100644
--- a/drivers/media/usb/dvb-usb/vp7045.c
+++ b/drivers/media/usb/dvb-usb/vp7045.c
@@ -2,7 +2,7 @@
* - TwinhanDTV Alpha/MagicBoxII USB2.0 DVB-T receiver
* - DigitalNow TinyUSB2 DVB-t receiver
*
- * Copyright (C) 2004-5 Patrick Boettcher (patrick.boettcher@desy.de)
+ * Copyright (C) 2004-5 Patrick Boettcher (patrick.boettcher@posteo.de)
*
* Thanks to Twinhan who kindly provided hardware and information.
*
@@ -296,7 +296,7 @@ static struct usb_driver vp7045_usb_driver = {
module_usb_driver(vp7045_usb_driver);
-MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@desy.de>");
+MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@posteo.de>");
MODULE_DESCRIPTION("Driver for Twinhan MagicBox/Alpha and DNTV tinyUSB2 DVB-T USB2.0");
MODULE_VERSION("1.0");
MODULE_LICENSE("GPL");
diff --git a/drivers/media/usb/dvb-usb/vp7045.h b/drivers/media/usb/dvb-usb/vp7045.h
index cf5ec46f8bb1..66499932ca76 100644
--- a/drivers/media/usb/dvb-usb/vp7045.h
+++ b/drivers/media/usb/dvb-usb/vp7045.h
@@ -1,7 +1,7 @@
/* Common header-file of the Linux driver for the TwinhanDTV Alpha/MagicBoxII
* USB2.0 DVB-T receiver.
*
- * Copyright (C) 2004-5 Patrick Boettcher (patrick.boettcher@desy.de)
+ * Copyright (C) 2004-5 Patrick Boettcher (patrick.boettcher@posteo.de)
*
* Thanks to Twinhan who kindly provided hardware and information.
*
diff --git a/drivers/media/usb/em28xx/em28xx-camera.c b/drivers/media/usb/em28xx/em28xx-camera.c
index b58acd3fcd99..72f3f4d50253 100644
--- a/drivers/media/usb/em28xx/em28xx-camera.c
+++ b/drivers/media/usb/em28xx/em28xx-camera.c
@@ -64,6 +64,8 @@ static int em28xx_initialize_mt9m111(struct em28xx *dev)
i2c_master_send(&dev->i2c_client[dev->def_i2c_bus],
&regs[i][0], 3);
+ /* FIXME: This won't be creating a sensor at the media graph */
+
return 0;
}
@@ -91,6 +93,8 @@ static int em28xx_initialize_mt9m001(struct em28xx *dev)
i2c_master_send(&dev->i2c_client[dev->def_i2c_bus],
&regs[i][0], 3);
+ /* FIXME: This won't be creating a sensor at the media graph */
+
return 0;
}
diff --git a/drivers/media/usb/em28xx/em28xx-cards.c b/drivers/media/usb/em28xx/em28xx-cards.c
index a1b6ef5894a6..930e3e3fc948 100644
--- a/drivers/media/usb/em28xx/em28xx-cards.c
+++ b/drivers/media/usb/em28xx/em28xx-cards.c
@@ -32,11 +32,12 @@
#include <media/tuner.h>
#include <media/drv-intf/msp3400.h>
#include <media/i2c/saa7115.h>
-#include <media/i2c/tvp5150.h>
+#include <dt-bindings/media/tvp5150.h>
#include <media/i2c/tvaudio.h>
#include <media/i2c-addr.h>
#include <media/tveeprom.h>
#include <media/v4l2-common.h>
+#include <sound/ac97_codec.h>
#include "em28xx.h"
@@ -560,6 +561,16 @@ static struct em28xx_led pctv_80e_leds[] = {
{-1, 0, 0, 0},
};
+static struct em28xx_led terratec_grabby_leds[] = {
+ {
+ .role = EM28XX_LED_ANALOG_CAPTURING,
+ .gpio_reg = EM2820_R08_GPIO_CTRL,
+ .gpio_mask = EM_GPIO_3,
+ .inverted = 1,
+ },
+ {-1, 0, 0, 0},
+};
+
/*
* Board definitions
*/
@@ -570,7 +581,7 @@ struct em28xx_board em28xx_boards[] = {
.tuner_type = TUNER_ABSENT,
.is_webcam = 1,
.input = { {
- .type = EM28XX_VMUX_COMPOSITE1,
+ .type = EM28XX_VMUX_COMPOSITE,
.vmux = 0,
.amux = EM28XX_AMUX_VIDEO,
.gpio = silvercrest_reg_seq,
@@ -583,7 +594,7 @@ struct em28xx_board em28xx_boards[] = {
.decoder = EM28XX_SAA711X,
.tuner_type = TUNER_ABSENT,
.input = { {
- .type = EM28XX_VMUX_COMPOSITE1,
+ .type = EM28XX_VMUX_COMPOSITE,
.vmux = SAA7115_COMPOSITE0,
.amux = EM28XX_AMUX_LINE_IN,
}, {
@@ -605,7 +616,7 @@ struct em28xx_board em28xx_boards[] = {
.tuner_type = TUNER_ABSENT,
.is_webcam = 1,
.input = { {
- .type = EM28XX_VMUX_COMPOSITE1,
+ .type = EM28XX_VMUX_COMPOSITE,
.vmux = 0,
.amux = EM28XX_AMUX_VIDEO,
} },
@@ -616,7 +627,7 @@ struct em28xx_board em28xx_boards[] = {
.tda9887_conf = TDA9887_PRESENT,
.decoder = EM28XX_SAA711X,
.input = { {
- .type = EM28XX_VMUX_COMPOSITE1,
+ .type = EM28XX_VMUX_COMPOSITE,
.vmux = SAA7115_COMPOSITE0,
.amux = EM28XX_AMUX_LINE_IN,
}, {
@@ -635,7 +646,7 @@ struct em28xx_board em28xx_boards[] = {
.vmux = SAA7115_COMPOSITE2,
.amux = EM28XX_AMUX_LINE_IN,
}, {
- .type = EM28XX_VMUX_COMPOSITE1,
+ .type = EM28XX_VMUX_COMPOSITE,
.vmux = SAA7115_COMPOSITE0,
.amux = EM28XX_AMUX_LINE_IN,
}, {
@@ -655,7 +666,7 @@ struct em28xx_board em28xx_boards[] = {
.vmux = SAA7115_COMPOSITE2,
.amux = EM28XX_AMUX_VIDEO,
}, {
- .type = EM28XX_VMUX_COMPOSITE1,
+ .type = EM28XX_VMUX_COMPOSITE,
.vmux = SAA7115_COMPOSITE0,
.amux = EM28XX_AMUX_LINE_IN,
}, {
@@ -675,7 +686,7 @@ struct em28xx_board em28xx_boards[] = {
.vmux = SAA7115_COMPOSITE2,
.amux = EM28XX_AMUX_VIDEO,
}, {
- .type = EM28XX_VMUX_COMPOSITE1,
+ .type = EM28XX_VMUX_COMPOSITE,
.vmux = SAA7115_COMPOSITE0,
.amux = EM28XX_AMUX_LINE_IN,
}, {
@@ -715,7 +726,7 @@ struct em28xx_board em28xx_boards[] = {
.vmux = SAA7115_COMPOSITE2,
.amux = EM28XX_AMUX_LINE_IN,
}, {
- .type = EM28XX_VMUX_COMPOSITE1,
+ .type = EM28XX_VMUX_COMPOSITE,
.vmux = SAA7115_COMPOSITE0,
.amux = EM28XX_AMUX_LINE_IN,
}, {
@@ -735,7 +746,7 @@ struct em28xx_board em28xx_boards[] = {
.vmux = SAA7115_COMPOSITE2,
.amux = EM28XX_AMUX_LINE_IN,
}, {
- .type = EM28XX_VMUX_COMPOSITE1,
+ .type = EM28XX_VMUX_COMPOSITE,
.vmux = SAA7115_COMPOSITE0,
.amux = EM28XX_AMUX_LINE_IN,
}, {
@@ -755,7 +766,7 @@ struct em28xx_board em28xx_boards[] = {
.vmux = SAA7115_COMPOSITE2,
.amux = EM28XX_AMUX_VIDEO,
}, {
- .type = EM28XX_VMUX_COMPOSITE1,
+ .type = EM28XX_VMUX_COMPOSITE,
.vmux = SAA7115_COMPOSITE0,
.amux = EM28XX_AMUX_LINE_IN,
}, {
@@ -775,7 +786,7 @@ struct em28xx_board em28xx_boards[] = {
.vmux = SAA7115_COMPOSITE1,
.amux = EM28XX_AMUX_LINE_IN,
}, {
- .type = EM28XX_VMUX_COMPOSITE1,
+ .type = EM28XX_VMUX_COMPOSITE,
.vmux = SAA7115_COMPOSITE0,
.amux = EM28XX_AMUX_LINE_IN,
}, {
@@ -800,7 +811,7 @@ struct em28xx_board em28xx_boards[] = {
.vmux = SAA7115_COMPOSITE4,
.amux = EM28XX_AMUX_AUX,
}, {
- .type = EM28XX_VMUX_COMPOSITE1,
+ .type = EM28XX_VMUX_COMPOSITE,
.vmux = SAA7115_COMPOSITE5,
.amux = EM28XX_AMUX_LINE_IN,
}, {
@@ -819,7 +830,7 @@ struct em28xx_board em28xx_boards[] = {
.tuner_type = TUNER_ABSENT,
.is_webcam = 1,
.input = { {
- .type = EM28XX_VMUX_COMPOSITE1,
+ .type = EM28XX_VMUX_COMPOSITE,
.vmux = 0,
.amux = EM28XX_AMUX_VIDEO,
} },
@@ -829,7 +840,7 @@ struct em28xx_board em28xx_boards[] = {
.tuner_type = TUNER_ABSENT,
.is_webcam = 1,
.input = { {
- .type = EM28XX_VMUX_COMPOSITE1,
+ .type = EM28XX_VMUX_COMPOSITE,
.vmux = 0,
.amux = EM28XX_AMUX_VIDEO,
.gpio = silvercrest_reg_seq,
@@ -848,7 +859,7 @@ struct em28xx_board em28xx_boards[] = {
.vmux = SAA7115_COMPOSITE2,
.amux = EM28XX_AMUX_LINE_IN,
}, {
- .type = EM28XX_VMUX_COMPOSITE1,
+ .type = EM28XX_VMUX_COMPOSITE,
.vmux = SAA7115_COMPOSITE0,
.amux = EM28XX_AMUX_VIDEO,
}, {
@@ -863,7 +874,7 @@ struct em28xx_board em28xx_boards[] = {
.tuner_type = TUNER_ABSENT, /* Capture only device */
.decoder = EM28XX_SAA711X,
.input = { {
- .type = EM28XX_VMUX_COMPOSITE1,
+ .type = EM28XX_VMUX_COMPOSITE,
.vmux = SAA7115_COMPOSITE0,
.amux = EM28XX_AMUX_LINE_IN,
}, {
@@ -879,7 +890,7 @@ struct em28xx_board em28xx_boards[] = {
.tuner_type = TUNER_ABSENT,
.is_webcam = 1,
.input = { {
- .type = EM28XX_VMUX_COMPOSITE1,
+ .type = EM28XX_VMUX_COMPOSITE,
.vmux = 0,
.amux = EM28XX_AMUX_VIDEO,
} },
@@ -889,7 +900,7 @@ struct em28xx_board em28xx_boards[] = {
.decoder = EM28XX_SAA711X,
.tuner_type = TUNER_ABSENT, /* Capture only device */
.input = { {
- .type = EM28XX_VMUX_COMPOSITE1,
+ .type = EM28XX_VMUX_COMPOSITE,
.vmux = SAA7115_COMPOSITE0,
.amux = EM28XX_AMUX_LINE_IN,
}, {
@@ -909,7 +920,7 @@ struct em28xx_board em28xx_boards[] = {
.vmux = SAA7115_COMPOSITE2,
.amux = EM28XX_AMUX_VIDEO,
}, {
- .type = EM28XX_VMUX_COMPOSITE1,
+ .type = EM28XX_VMUX_COMPOSITE,
.vmux = SAA7115_COMPOSITE0,
.amux = EM28XX_AMUX_LINE_IN,
}, {
@@ -930,7 +941,7 @@ struct em28xx_board em28xx_boards[] = {
.vmux = SAA7115_COMPOSITE2,
.amux = EM28XX_AMUX_VIDEO,
}, {
- .type = EM28XX_VMUX_COMPOSITE1,
+ .type = EM28XX_VMUX_COMPOSITE,
.vmux = SAA7115_COMPOSITE0,
.amux = EM28XX_AMUX_LINE_IN,
}, {
@@ -952,7 +963,7 @@ struct em28xx_board em28xx_boards[] = {
.amux = EM28XX_AMUX_VIDEO,
.gpio = hauppauge_wintv_hvr_900_analog,
}, {
- .type = EM28XX_VMUX_COMPOSITE1,
+ .type = EM28XX_VMUX_COMPOSITE,
.vmux = TVP5150_COMPOSITE1,
.amux = EM28XX_AMUX_LINE_IN,
.gpio = hauppauge_wintv_hvr_900_analog,
@@ -974,7 +985,7 @@ struct em28xx_board em28xx_boards[] = {
.vmux = TVP5150_COMPOSITE0,
.amux = EM28XX_AMUX_VIDEO,
}, {
- .type = EM28XX_VMUX_COMPOSITE1,
+ .type = EM28XX_VMUX_COMPOSITE,
.vmux = TVP5150_COMPOSITE1,
.amux = EM28XX_AMUX_LINE_IN,
}, {
@@ -992,7 +1003,7 @@ struct em28xx_board em28xx_boards[] = {
.vmux = TVP5150_COMPOSITE0,
.amux = EM28XX_AMUX_VIDEO,
}, {
- .type = EM28XX_VMUX_COMPOSITE1,
+ .type = EM28XX_VMUX_COMPOSITE,
.vmux = TVP5150_COMPOSITE1,
.amux = EM28XX_AMUX_LINE_IN,
}, {
@@ -1006,7 +1017,7 @@ struct em28xx_board em28xx_boards[] = {
.tuner_type = TUNER_ABSENT, /* Capture only device */
.decoder = EM28XX_TVP5150,
.input = { {
- .type = EM28XX_VMUX_COMPOSITE1,
+ .type = EM28XX_VMUX_COMPOSITE,
.vmux = TVP5150_COMPOSITE1,
.amux = EM28XX_AMUX_LINE_IN,
}, {
@@ -1029,7 +1040,7 @@ struct em28xx_board em28xx_boards[] = {
.amux = EM28XX_AMUX_LINE_IN,
.gpio = pinnacle_hybrid_pro_analog,
}, {
- .type = EM28XX_VMUX_COMPOSITE1,
+ .type = EM28XX_VMUX_COMPOSITE,
.vmux = TVP5150_COMPOSITE1,
.amux = EM28XX_AMUX_LINE_IN,
.gpio = pinnacle_hybrid_pro_analog,
@@ -1100,7 +1111,7 @@ struct em28xx_board em28xx_boards[] = {
.amux = EM28XX_AMUX_VIDEO,
.gpio = terratec_cinergy_USB_XS_FR_analog,
}, {
- .type = EM28XX_VMUX_COMPOSITE1,
+ .type = EM28XX_VMUX_COMPOSITE,
.vmux = TVP5150_COMPOSITE1,
.amux = EM28XX_AMUX_LINE_IN,
.gpio = terratec_cinergy_USB_XS_FR_analog,
@@ -1186,7 +1197,7 @@ struct em28xx_board em28xx_boards[] = {
.amux = EM28XX_AMUX_VIDEO,
.gpio = hauppauge_wintv_hvr_900_analog,
}, {
- .type = EM28XX_VMUX_COMPOSITE1,
+ .type = EM28XX_VMUX_COMPOSITE,
.vmux = TVP5150_COMPOSITE1,
.amux = EM28XX_AMUX_LINE_IN,
.gpio = hauppauge_wintv_hvr_900_analog,
@@ -1213,7 +1224,7 @@ struct em28xx_board em28xx_boards[] = {
.amux = EM28XX_AMUX_VIDEO,
.gpio = hauppauge_wintv_hvr_900_analog,
}, {
- .type = EM28XX_VMUX_COMPOSITE1,
+ .type = EM28XX_VMUX_COMPOSITE,
.vmux = TVP5150_COMPOSITE1,
.amux = EM28XX_AMUX_LINE_IN,
.gpio = hauppauge_wintv_hvr_900_analog,
@@ -1239,7 +1250,7 @@ struct em28xx_board em28xx_boards[] = {
.amux = EM28XX_AMUX_VIDEO,
.gpio = hauppauge_wintv_hvr_900_analog,
}, {
- .type = EM28XX_VMUX_COMPOSITE1,
+ .type = EM28XX_VMUX_COMPOSITE,
.vmux = TVP5150_COMPOSITE1,
.amux = EM28XX_AMUX_LINE_IN,
.gpio = hauppauge_wintv_hvr_900_analog,
@@ -1265,7 +1276,7 @@ struct em28xx_board em28xx_boards[] = {
.amux = EM28XX_AMUX_VIDEO,
.gpio = hauppauge_wintv_hvr_900_analog,
}, {
- .type = EM28XX_VMUX_COMPOSITE1,
+ .type = EM28XX_VMUX_COMPOSITE,
.vmux = TVP5150_COMPOSITE1,
.amux = EM28XX_AMUX_LINE_IN,
.gpio = hauppauge_wintv_hvr_900_analog,
@@ -1291,7 +1302,7 @@ struct em28xx_board em28xx_boards[] = {
.amux = EM28XX_AMUX_VIDEO,
.gpio = hauppauge_wintv_hvr_900_analog,
}, {
- .type = EM28XX_VMUX_COMPOSITE1,
+ .type = EM28XX_VMUX_COMPOSITE,
.vmux = TVP5150_COMPOSITE1,
.amux = EM28XX_AMUX_LINE_IN,
.gpio = hauppauge_wintv_hvr_900_analog,
@@ -1317,7 +1328,7 @@ struct em28xx_board em28xx_boards[] = {
.amux = EM28XX_AMUX_VIDEO,
.gpio = hauppauge_wintv_hvr_900_analog,
}, {
- .type = EM28XX_VMUX_COMPOSITE1,
+ .type = EM28XX_VMUX_COMPOSITE,
.vmux = TVP5150_COMPOSITE1,
.amux = EM28XX_AMUX_LINE_IN,
.gpio = hauppauge_wintv_hvr_900_analog,
@@ -1343,7 +1354,7 @@ struct em28xx_board em28xx_boards[] = {
.amux = EM28XX_AMUX_VIDEO,
.gpio = default_analog,
}, {
- .type = EM28XX_VMUX_COMPOSITE1,
+ .type = EM28XX_VMUX_COMPOSITE,
.vmux = TVP5150_COMPOSITE1,
.amux = EM28XX_AMUX_LINE_IN,
.gpio = default_analog,
@@ -1368,7 +1379,7 @@ struct em28xx_board em28xx_boards[] = {
.amux = EM28XX_AMUX_VIDEO,
.gpio = hauppauge_wintv_hvr_900_analog,
}, {
- .type = EM28XX_VMUX_COMPOSITE1,
+ .type = EM28XX_VMUX_COMPOSITE,
.vmux = TVP5150_COMPOSITE1,
.amux = EM28XX_AMUX_LINE_IN,
.gpio = hauppauge_wintv_hvr_900_analog,
@@ -1392,7 +1403,7 @@ struct em28xx_board em28xx_boards[] = {
.vmux = SAA7115_COMPOSITE4,
.amux = EM28XX_AMUX_VIDEO,
}, {
- .type = EM28XX_VMUX_COMPOSITE1,
+ .type = EM28XX_VMUX_COMPOSITE,
.vmux = SAA7115_COMPOSITE0,
.amux = EM28XX_AMUX_LINE_IN,
}, {
@@ -1413,7 +1424,7 @@ struct em28xx_board em28xx_boards[] = {
.vmux = SAA7115_COMPOSITE2,
.amux = EM28XX_AMUX_VIDEO,
}, {
- .type = EM28XX_VMUX_COMPOSITE1,
+ .type = EM28XX_VMUX_COMPOSITE,
.vmux = SAA7115_COMPOSITE0,
.amux = EM28XX_AMUX_LINE_IN,
}, {
@@ -1428,7 +1439,7 @@ struct em28xx_board em28xx_boards[] = {
.decoder = EM28XX_SAA711X,
.tuner_type = TUNER_ABSENT, /* capture only board */
.input = { {
- .type = EM28XX_VMUX_COMPOSITE1,
+ .type = EM28XX_VMUX_COMPOSITE,
.vmux = SAA7115_COMPOSITE0,
.amux = EM28XX_AMUX_LINE_IN,
}, {
@@ -1443,7 +1454,7 @@ struct em28xx_board em28xx_boards[] = {
.tuner_type = TUNER_ABSENT, /* Capture-only board */
.decoder = EM28XX_SAA711X,
.input = { {
- .type = EM28XX_VMUX_COMPOSITE1,
+ .type = EM28XX_VMUX_COMPOSITE,
.vmux = SAA7115_COMPOSITE0,
.amux = EM28XX_AMUX_LINE_IN,
.gpio = vc211a_enable,
@@ -1465,7 +1476,7 @@ struct em28xx_board em28xx_boards[] = {
.vmux = SAA7115_COMPOSITE2,
.amux = EM28XX_AMUX_VIDEO,
}, {
- .type = EM28XX_VMUX_COMPOSITE1,
+ .type = EM28XX_VMUX_COMPOSITE,
.vmux = SAA7115_COMPOSITE0,
.amux = EM28XX_AMUX_LINE_IN,
}, {
@@ -1485,7 +1496,7 @@ struct em28xx_board em28xx_boards[] = {
.vmux = SAA7115_COMPOSITE2,
.amux = EM28XX_AMUX_VIDEO,
}, {
- .type = EM28XX_VMUX_COMPOSITE1,
+ .type = EM28XX_VMUX_COMPOSITE,
.vmux = SAA7115_COMPOSITE0,
.amux = EM28XX_AMUX_LINE_IN,
}, {
@@ -1500,7 +1511,7 @@ struct em28xx_board em28xx_boards[] = {
.tuner_type = TUNER_ABSENT, /* capture only board */
.decoder = EM28XX_SAA711X,
.input = { {
- .type = EM28XX_VMUX_COMPOSITE1,
+ .type = EM28XX_VMUX_COMPOSITE,
.vmux = SAA7115_COMPOSITE0,
.amux = EM28XX_AMUX_LINE_IN,
}, {
@@ -1520,7 +1531,7 @@ struct em28xx_board em28xx_boards[] = {
.vmux = SAA7115_COMPOSITE2,
.amux = EM28XX_AMUX_VIDEO,
}, {
- .type = EM28XX_VMUX_COMPOSITE1,
+ .type = EM28XX_VMUX_COMPOSITE,
.vmux = SAA7115_COMPOSITE0,
.amux = EM28XX_AMUX_LINE_IN,
}, {
@@ -1541,7 +1552,7 @@ struct em28xx_board em28xx_boards[] = {
.aout = EM28XX_AOUT_MONO | /* I2S */
EM28XX_AOUT_MASTER, /* Line out pin */
}, {
- .type = EM28XX_VMUX_COMPOSITE1,
+ .type = EM28XX_VMUX_COMPOSITE,
.vmux = SAA7115_COMPOSITE0,
.amux = EM28XX_AMUX_LINE_IN,
}, {
@@ -1555,6 +1566,7 @@ struct em28xx_board em28xx_boards[] = {
.buttons = std_snapshot_button,
.tda9887_conf = TDA9887_PRESENT,
.tuner_type = TUNER_YMEC_TVF_5533MF,
+ .tuner_addr = 0x60,
.decoder = EM28XX_SAA711X,
.input = { {
.type = EM28XX_VMUX_TELEVISION,
@@ -1563,7 +1575,7 @@ struct em28xx_board em28xx_boards[] = {
.aout = EM28XX_AOUT_MONO | /* I2S */
EM28XX_AOUT_MASTER, /* Line out pin */
}, {
- .type = EM28XX_VMUX_COMPOSITE1,
+ .type = EM28XX_VMUX_COMPOSITE,
.vmux = SAA7115_COMPOSITE0,
.amux = EM28XX_AMUX_LINE_IN,
}, {
@@ -1581,7 +1593,7 @@ struct em28xx_board em28xx_boards[] = {
.type = EM28XX_VMUX_SVIDEO,
.vmux = SAA7115_SVIDEO3,
}, {
- .type = EM28XX_VMUX_COMPOSITE1,
+ .type = EM28XX_VMUX_COMPOSITE,
.vmux = SAA7115_COMPOSITE0,
} },
},
@@ -1610,7 +1622,7 @@ struct em28xx_board em28xx_boards[] = {
.amux = EM28XX_AMUX_VIDEO,
.gpio = em2880_msi_digivox_ad_analog,
}, {
- .type = EM28XX_VMUX_COMPOSITE1,
+ .type = EM28XX_VMUX_COMPOSITE,
.vmux = TVP5150_COMPOSITE1,
.amux = EM28XX_AMUX_LINE_IN,
.gpio = em2880_msi_digivox_ad_analog,
@@ -1633,7 +1645,7 @@ struct em28xx_board em28xx_boards[] = {
.amux = EM28XX_AMUX_VIDEO,
.gpio = em2880_msi_digivox_ad_analog,
}, {
- .type = EM28XX_VMUX_COMPOSITE1,
+ .type = EM28XX_VMUX_COMPOSITE,
.vmux = TVP5150_COMPOSITE1,
.amux = EM28XX_AMUX_LINE_IN,
.gpio = em2880_msi_digivox_ad_analog,
@@ -1654,7 +1666,7 @@ struct em28xx_board em28xx_boards[] = {
.vmux = TVP5150_COMPOSITE0,
.amux = EM28XX_AMUX_VIDEO,
}, {
- .type = EM28XX_VMUX_COMPOSITE1,
+ .type = EM28XX_VMUX_COMPOSITE,
.vmux = TVP5150_COMPOSITE1,
.amux = EM28XX_AMUX_LINE_IN,
}, {
@@ -1677,7 +1689,7 @@ struct em28xx_board em28xx_boards[] = {
.amux = EM28XX_AMUX_VIDEO,
.gpio = default_analog,
}, {
- .type = EM28XX_VMUX_COMPOSITE1,
+ .type = EM28XX_VMUX_COMPOSITE,
.vmux = TVP5150_COMPOSITE1,
.amux = EM28XX_AMUX_LINE_IN,
.gpio = default_analog,
@@ -1708,7 +1720,7 @@ struct em28xx_board em28xx_boards[] = {
.gpio = em2882_kworld_315u_analog,
.aout = EM28XX_AOUT_PCM_IN | EM28XX_AOUT_PCM_STEREO,
}, {
- .type = EM28XX_VMUX_COMPOSITE1,
+ .type = EM28XX_VMUX_COMPOSITE,
.vmux = SAA7115_COMPOSITE0,
.amux = EM28XX_AMUX_LINE_IN,
.gpio = em2882_kworld_315u_analog1,
@@ -1735,7 +1747,7 @@ struct em28xx_board em28xx_boards[] = {
.amux = EM28XX_AMUX_VIDEO,
.gpio = default_analog,
}, {
- .type = EM28XX_VMUX_COMPOSITE1,
+ .type = EM28XX_VMUX_COMPOSITE,
.vmux = TVP5150_COMPOSITE1,
.amux = EM28XX_AMUX_LINE_IN,
.gpio = default_analog,
@@ -1758,7 +1770,7 @@ struct em28xx_board em28xx_boards[] = {
.amux = EM28XX_AMUX_VIDEO,
.gpio = default_analog,
}, {
- .type = EM28XX_VMUX_COMPOSITE1,
+ .type = EM28XX_VMUX_COMPOSITE,
.vmux = TVP5150_COMPOSITE1,
.amux = EM28XX_AMUX_LINE_IN,
.gpio = default_analog,
@@ -1782,7 +1794,7 @@ struct em28xx_board em28xx_boards[] = {
.amux = EM28XX_AMUX_VIDEO,
.gpio = pinnacle_hybrid_pro_analog,
}, {
- .type = EM28XX_VMUX_COMPOSITE1,
+ .type = EM28XX_VMUX_COMPOSITE,
.vmux = TVP5150_COMPOSITE1,
.amux = EM28XX_AMUX_LINE_IN,
.gpio = pinnacle_hybrid_pro_analog,
@@ -1808,7 +1820,7 @@ struct em28xx_board em28xx_boards[] = {
.amux = EM28XX_AMUX_VIDEO,
.gpio = hauppauge_wintv_hvr_900_analog,
}, {
- .type = EM28XX_VMUX_COMPOSITE1,
+ .type = EM28XX_VMUX_COMPOSITE,
.vmux = TVP5150_COMPOSITE1,
.amux = EM28XX_AMUX_LINE_IN,
.gpio = hauppauge_wintv_hvr_900_analog,
@@ -1834,7 +1846,7 @@ struct em28xx_board em28xx_boards[] = {
.vmux = TVP5150_COMPOSITE0,
.amux = EM28XX_AMUX_VIDEO,
}, {
- .type = EM28XX_VMUX_COMPOSITE1,
+ .type = EM28XX_VMUX_COMPOSITE,
.vmux = TVP5150_COMPOSITE1,
.amux = EM28XX_AMUX_LINE_IN,
}, {
@@ -1859,7 +1871,7 @@ struct em28xx_board em28xx_boards[] = {
.amux = EM28XX_AMUX_VIDEO,
.gpio = hauppauge_wintv_hvr_900_analog,
}, {
- .type = EM28XX_VMUX_COMPOSITE1,
+ .type = EM28XX_VMUX_COMPOSITE,
.vmux = TVP5150_COMPOSITE1,
.amux = EM28XX_AMUX_LINE_IN,
.gpio = hauppauge_wintv_hvr_900_analog,
@@ -1904,7 +1916,7 @@ struct em28xx_board em28xx_boards[] = {
.gpio = kworld_330u_analog,
.aout = EM28XX_AOUT_PCM_IN | EM28XX_AOUT_PCM_STEREO,
}, {
- .type = EM28XX_VMUX_COMPOSITE1,
+ .type = EM28XX_VMUX_COMPOSITE,
.vmux = TVP5150_COMPOSITE1,
.amux = EM28XX_AMUX_LINE_IN,
.gpio = kworld_330u_analog,
@@ -1951,7 +1963,7 @@ struct em28xx_board em28xx_boards[] = {
.amux = EM28XX_AMUX_VIDEO,
}, {
- .type = EM28XX_VMUX_COMPOSITE1,
+ .type = EM28XX_VMUX_COMPOSITE,
.vmux = TVP5150_COMPOSITE1,
.amux = EM28XX_AMUX_LINE_IN,
}, {
@@ -1970,7 +1982,7 @@ struct em28xx_board em28xx_boards[] = {
.tuner_type = TUNER_ABSENT,
.decoder = EM28XX_SAA711X,
.input = { {
- .type = EM28XX_VMUX_COMPOSITE1,
+ .type = EM28XX_VMUX_COMPOSITE,
.vmux = SAA7115_COMPOSITE0,
.amux = EM28XX_AMUX_LINE_IN,
}, {
@@ -1990,7 +2002,7 @@ struct em28xx_board em28xx_boards[] = {
.vmux = TVP5150_COMPOSITE0,
.amux = EM28XX_AMUX_VIDEO,
}, { /* Composite has not been tested yet */
- .type = EM28XX_VMUX_COMPOSITE1,
+ .type = EM28XX_VMUX_COMPOSITE,
.vmux = TVP5150_COMPOSITE1,
.amux = EM28XX_AMUX_VIDEO,
}, { /* S-video has not been tested yet */
@@ -2006,7 +2018,7 @@ struct em28xx_board em28xx_boards[] = {
.decoder = EM28XX_SAA711X,
.xclk = EM28XX_XCLK_FREQUENCY_12MHZ,
.input = { {
- .type = EM28XX_VMUX_COMPOSITE1,
+ .type = EM28XX_VMUX_COMPOSITE,
.vmux = SAA7115_COMPOSITE0,
.amux = EM28XX_AMUX_LINE_IN,
}, {
@@ -2014,6 +2026,8 @@ struct em28xx_board em28xx_boards[] = {
.vmux = SAA7115_SVIDEO3,
.amux = EM28XX_AMUX_LINE_IN,
} },
+ .buttons = std_snapshot_button,
+ .leds = terratec_grabby_leds,
},
[EM2860_BOARD_TERRATEC_AV350] = {
.name = "Terratec AV350",
@@ -2023,7 +2037,7 @@ struct em28xx_board em28xx_boards[] = {
.xclk = EM28XX_XCLK_FREQUENCY_12MHZ,
.mute_gpio = terratec_av350_mute_gpio,
.input = { {
- .type = EM28XX_VMUX_COMPOSITE1,
+ .type = EM28XX_VMUX_COMPOSITE,
.vmux = TVP5150_COMPOSITE1,
.amux = EM28XX_AUDIO_SRC_LINE,
.gpio = terratec_av350_unmute_gpio,
@@ -2041,7 +2055,7 @@ struct em28xx_board em28xx_boards[] = {
.decoder = EM28XX_SAA711X,
.tuner_type = TUNER_ABSENT, /* Capture only device */
.input = { {
- .type = EM28XX_VMUX_COMPOSITE1,
+ .type = EM28XX_VMUX_COMPOSITE,
.vmux = SAA7115_COMPOSITE0,
.amux = EM28XX_AMUX_LINE_IN,
}, {
@@ -2067,7 +2081,7 @@ struct em28xx_board em28xx_boards[] = {
.amux = EM28XX_AMUX_VIDEO,
.gpio = evga_indtube_analog,
}, {
- .type = EM28XX_VMUX_COMPOSITE1,
+ .type = EM28XX_VMUX_COMPOSITE,
.vmux = TVP5150_COMPOSITE1,
.amux = EM28XX_AMUX_LINE_IN,
.gpio = evga_indtube_analog,
@@ -2125,7 +2139,7 @@ struct em28xx_board em28xx_boards[] = {
.tuner_type = TUNER_ABSENT,
.decoder = EM28XX_SAA711X,
.input = { {
- .type = EM28XX_VMUX_COMPOSITE1,
+ .type = EM28XX_VMUX_COMPOSITE,
.vmux = SAA7115_COMPOSITE0,
.amux = EM28XX_AMUX_LINE_IN,
}, {
@@ -2238,7 +2252,7 @@ struct em28xx_board em28xx_boards[] = {
.tuner_type = TUNER_ABSENT,
.is_webcam = 1,
.input = { {
- .type = EM28XX_VMUX_COMPOSITE1,
+ .type = EM28XX_VMUX_COMPOSITE,
.amux = EM28XX_AMUX_VIDEO,
.gpio = speedlink_vad_laplace_reg_seq,
} },
@@ -2272,7 +2286,7 @@ struct em28xx_board em28xx_boards[] = {
.tuner_type = TUNER_ABSENT, /* Capture only device */
.decoder = EM28XX_TVP5150,
.input = { {
- .type = EM28XX_VMUX_COMPOSITE1,
+ .type = EM28XX_VMUX_COMPOSITE,
.vmux = TVP5150_COMPOSITE1,
.amux = EM28XX_AMUX_LINE_IN,
}, {
@@ -2550,6 +2564,36 @@ static inline void em28xx_set_model(struct em28xx *dev)
dev->def_i2c_bus = dev->board.def_i2c_bus;
}
+/* Wait until AC97_RESET reports the expected value reliably before proceeding.
+ * We also check that two unrelated registers accesses don't return the same
+ * value to avoid premature return.
+ * This procedure helps ensuring AC97 register accesses are reliable.
+ */
+static int em28xx_wait_until_ac97_features_equals(struct em28xx *dev,
+ int expected_feat)
+{
+ unsigned long timeout = jiffies + msecs_to_jiffies(2000);
+ int feat, powerdown;
+
+ while (time_is_after_jiffies(timeout)) {
+ feat = em28xx_read_ac97(dev, AC97_RESET);
+ if (feat < 0)
+ return feat;
+
+ powerdown = em28xx_read_ac97(dev, AC97_POWERDOWN);
+ if (powerdown < 0)
+ return powerdown;
+
+ if (feat == expected_feat && feat != powerdown)
+ return 0;
+
+ msleep(50);
+ }
+
+ em28xx_warn("AC97 registers access is not reliable !\n");
+ return -ETIMEDOUT;
+}
+
/* Since em28xx_pre_card_setup() requires a proper dev->model,
* this won't work for boards with generic PCI IDs
*/
@@ -2655,6 +2699,13 @@ static void em28xx_pre_card_setup(struct em28xx *dev)
em28xx_write_reg(dev, EM2820_R08_GPIO_CTRL, 0xfd);
msleep(70);
break;
+
+ case EM2860_BOARD_TERRATEC_GRABBY:
+ /* HACK?: Ensure AC97 register reading is reliable before
+ * proceeding. In practice, this will wait about 1.6 seconds.
+ */
+ em28xx_wait_until_ac97_features_equals(dev, 0x6a90);
+ break;
}
em28xx_gpio_set(dev, dev->board.tuner_gpio);
@@ -3012,6 +3063,41 @@ static void flush_request_modules(struct em28xx *dev)
flush_work(&dev->request_module_wk);
}
+static int em28xx_media_device_init(struct em28xx *dev,
+ struct usb_device *udev)
+{
+#ifdef CONFIG_MEDIA_CONTROLLER
+ struct media_device *mdev;
+
+ mdev = kzalloc(sizeof(*mdev), GFP_KERNEL);
+ if (!mdev)
+ return -ENOMEM;
+
+ if (udev->product)
+ media_device_usb_init(mdev, udev, udev->product);
+ else if (udev->manufacturer)
+ media_device_usb_init(mdev, udev, udev->manufacturer);
+ else
+ media_device_usb_init(mdev, udev, dev->name);
+
+ dev->media_dev = mdev;
+#endif
+ return 0;
+}
+
+static void em28xx_unregister_media_device(struct em28xx *dev)
+{
+
+#ifdef CONFIG_MEDIA_CONTROLLER
+ if (dev->media_dev) {
+ media_device_unregister(dev->media_dev);
+ media_device_cleanup(dev->media_dev);
+ kfree(dev->media_dev);
+ dev->media_dev = NULL;
+ }
+#endif
+}
+
/*
* em28xx_release_resources()
* unregisters the v4l2,i2c and usb devices
@@ -3023,6 +3109,8 @@ static void em28xx_release_resources(struct em28xx *dev)
mutex_lock(&dev->lock);
+ em28xx_unregister_media_device(dev);
+
if (dev->def_i2c_bus)
em28xx_i2c_unregister(dev, 1);
em28xx_i2c_unregister(dev, 0);
@@ -3167,6 +3255,8 @@ static int em28xx_init_dev(struct em28xx *dev, struct usb_device *udev,
*/
snprintf(dev->name, sizeof(dev->name), "%s #%d", chip_name, dev->devno);
+ em28xx_media_device_init(dev, udev);
+
if (dev->is_audio_only) {
retval = em28xx_audio_setup(dev);
if (retval)
@@ -3467,7 +3557,7 @@ static int em28xx_usb_probe(struct usb_interface *interface,
/* save our data pointer in this interface device */
usb_set_intfdata(interface, dev);
- /* allocate device struct */
+ /* allocate device struct and check if the device is a webcam */
mutex_init(&dev->lock);
retval = em28xx_init_dev(dev, udev, interface, nr);
if (retval) {
@@ -3483,6 +3573,15 @@ static int em28xx_usb_probe(struct usb_interface *interface,
try_bulk = usb_xfer_mode > 0;
}
+ /* Disable V4L2 if the device doesn't have a decoder */
+ if (has_video &&
+ dev->board.decoder == EM28XX_NODECODER && !dev->board.is_webcam) {
+ printk(DRIVER_NAME
+ ": Currently, V4L2 is not supported on this model\n");
+ has_video = false;
+ dev->has_video = false;
+ }
+
/* Select USB transfer types to use */
if (has_video) {
if (!dev->analog_ep_isoc || (try_bulk && dev->analog_ep_bulk))
@@ -3501,9 +3600,14 @@ static int em28xx_usb_probe(struct usb_interface *interface,
request_modules(dev);
- /* Should be the last thing to do, to avoid newer udev's to
- open the device before fully initializing it
+ /*
+ * Do it at the end, to reduce dynamic configuration changes during
+ * the device init. Yet, as request_modules() can be async, the
+ * topology will likely change after the load of the em28xx subdrivers.
*/
+#ifdef CONFIG_MEDIA_CONTROLLER
+ retval = media_device_register(dev->media_dev);
+#endif
return 0;
diff --git a/drivers/media/usb/em28xx/em28xx-dvb.c b/drivers/media/usb/em28xx/em28xx-dvb.c
index bf5c24467c65..5d209c7c54d5 100644
--- a/drivers/media/usb/em28xx/em28xx-dvb.c
+++ b/drivers/media/usb/em28xx/em28xx-dvb.c
@@ -905,6 +905,7 @@ static int em28xx_register_dvb(struct em28xx_dvb *dvb, struct module *module,
struct em28xx *dev, struct device *device)
{
int result;
+ bool create_rf_connector = false;
mutex_init(&dvb->lock);
@@ -916,6 +917,9 @@ static int em28xx_register_dvb(struct em28xx_dvb *dvb, struct module *module,
dev->name, result);
goto fail_adapter;
}
+#ifdef CONFIG_MEDIA_CONTROLLER_DVB
+ dvb->adapter.mdev = dev->media_dev;
+#endif
/* Ensure all frontends negotiate bus access */
dvb->fe[0]->ops.ts_bus_ctrl = em28xx_dvb_bus_ctrl;
@@ -994,8 +998,19 @@ static int em28xx_register_dvb(struct em28xx_dvb *dvb, struct module *module,
/* register network adapter */
dvb_net_init(&dvb->adapter, &dvb->net, &dvb->demux.dmx);
+
+ /* If the analog part won't create RF connectors, DVB will do it */
+ if (!dev->has_video || (dev->tuner_type == TUNER_ABSENT))
+ create_rf_connector = true;
+
+ result = dvb_create_media_graph(&dvb->adapter, create_rf_connector);
+ if (result < 0)
+ goto fail_create_graph;
+
return 0;
+fail_create_graph:
+ dvb_net_release(&dvb->net);
fail_fe_conn:
dvb->demux.dmx.remove_frontend(&dvb->demux.dmx, &dvb->fe_mem);
fail_fe_mem:
@@ -1656,6 +1671,9 @@ static int em28xx_dvb_init(struct em28xx *dev)
memset(&si2157_config, 0, sizeof(si2157_config));
si2157_config.fe = dvb->fe[0];
si2157_config.if_port = 1;
+#ifdef CONFIG_MEDIA_CONTROLLER_DVB
+ si2157_config.mdev = dev->media_dev;
+#endif
memset(&info, 0, sizeof(struct i2c_board_info));
strlcpy(info.type, "si2157", I2C_NAME_SIZE);
info.addr = 0x60;
@@ -1717,6 +1735,9 @@ static int em28xx_dvb_init(struct em28xx *dev)
memset(&si2157_config, 0, sizeof(si2157_config));
si2157_config.fe = dvb->fe[0];
si2157_config.if_port = 0;
+#ifdef CONFIG_MEDIA_CONTROLLER_DVB
+ si2157_config.mdev = dev->media_dev;
+#endif
memset(&info, 0, sizeof(struct i2c_board_info));
strlcpy(info.type, "si2146", I2C_NAME_SIZE);
info.addr = 0x60;
diff --git a/drivers/media/usb/em28xx/em28xx-video.c b/drivers/media/usb/em28xx/em28xx-video.c
index 0e86ff423c49..44834b2eff55 100644
--- a/drivers/media/usb/em28xx/em28xx-video.c
+++ b/drivers/media/usb/em28xx/em28xx-video.c
@@ -196,7 +196,6 @@ static void em28xx_wake_i2c(struct em28xx *dev)
v4l2_device_call_all(v4l2_dev, 0, core, reset, 0);
v4l2_device_call_all(v4l2_dev, 0, video, s_routing,
INPUT(dev->ctl_input)->vmux, 0, 0);
- v4l2_device_call_all(v4l2_dev, 0, video, s_stream, 0);
}
static int em28xx_colorlevels_set_default(struct em28xx *dev)
@@ -867,6 +866,147 @@ static void res_free(struct em28xx *dev, enum v4l2_buf_type f_type)
em28xx_videodbg("res: put %d\n", res_type);
}
+static void em28xx_v4l2_media_release(struct em28xx *dev)
+{
+#ifdef CONFIG_MEDIA_CONTROLLER
+ int i;
+
+ for (i = 0; i < MAX_EM28XX_INPUT; i++) {
+ if (!INPUT(i)->type)
+ return;
+ media_device_unregister_entity(&dev->input_ent[i]);
+ }
+#endif
+}
+
+/*
+ * Media Controller helper functions
+ */
+
+static int em28xx_enable_analog_tuner(struct em28xx *dev)
+{
+#ifdef CONFIG_MEDIA_CONTROLLER
+ struct media_device *mdev = dev->media_dev;
+ struct em28xx_v4l2 *v4l2 = dev->v4l2;
+ struct media_entity *source;
+ struct media_link *link, *found_link = NULL;
+ int ret, active_links = 0;
+
+ if (!mdev || !v4l2->decoder)
+ return 0;
+
+ /*
+ * This will find the tuner that is connected into the decoder.
+ * Technically, this is not 100% correct, as the device may be
+ * using an analog input instead of the tuner. However, as we can't
+ * do DVB streaming while the DMA engine is being used for V4L2,
+ * this should be enough for the actual needs.
+ */
+ list_for_each_entry(link, &v4l2->decoder->links, list) {
+ if (link->sink->entity == v4l2->decoder) {
+ found_link = link;
+ if (link->flags & MEDIA_LNK_FL_ENABLED)
+ active_links++;
+ break;
+ }
+ }
+
+ if (active_links == 1 || !found_link)
+ return 0;
+
+ source = found_link->source->entity;
+ list_for_each_entry(link, &source->links, list) {
+ struct media_entity *sink;
+ int flags = 0;
+
+ sink = link->sink->entity;
+
+ if (sink == v4l2->decoder)
+ flags = MEDIA_LNK_FL_ENABLED;
+
+ ret = media_entity_setup_link(link, flags);
+ if (ret) {
+ pr_err("Couldn't change link %s->%s to %s. Error %d\n",
+ source->name, sink->name,
+ flags ? "enabled" : "disabled",
+ ret);
+ return ret;
+ } else
+ em28xx_videodbg("link %s->%s was %s\n",
+ source->name, sink->name,
+ flags ? "ENABLED" : "disabled");
+ }
+#endif
+ return 0;
+}
+
+static const char * const iname[] = {
+ [EM28XX_VMUX_COMPOSITE] = "Composite",
+ [EM28XX_VMUX_SVIDEO] = "S-Video",
+ [EM28XX_VMUX_TELEVISION] = "Television",
+ [EM28XX_RADIO] = "Radio",
+};
+
+static void em28xx_v4l2_create_entities(struct em28xx *dev)
+{
+#if defined(CONFIG_MEDIA_CONTROLLER)
+ struct em28xx_v4l2 *v4l2 = dev->v4l2;
+ int ret, i;
+
+ /* Initialize Video, VBI and Radio pads */
+ v4l2->video_pad.flags = MEDIA_PAD_FL_SINK;
+ ret = media_entity_pads_init(&v4l2->vdev.entity, 1, &v4l2->video_pad);
+ if (ret < 0)
+ pr_err("failed to initialize video media entity!\n");
+
+ if (em28xx_vbi_supported(dev)) {
+ v4l2->vbi_pad.flags = MEDIA_PAD_FL_SINK;
+ ret = media_entity_pads_init(&v4l2->vbi_dev.entity, 1,
+ &v4l2->vbi_pad);
+ if (ret < 0)
+ pr_err("failed to initialize vbi media entity!\n");
+ }
+
+ /* Webcams don't have input connectors */
+ if (dev->board.is_webcam)
+ return;
+
+ /* Create entities for each input connector */
+ for (i = 0; i < MAX_EM28XX_INPUT; i++) {
+ struct media_entity *ent = &dev->input_ent[i];
+
+ if (!INPUT(i)->type)
+ break;
+
+ ent->name = iname[INPUT(i)->type];
+ ent->flags = MEDIA_ENT_FL_CONNECTOR;
+ dev->input_pad[i].flags = MEDIA_PAD_FL_SOURCE;
+
+ switch (INPUT(i)->type) {
+ case EM28XX_VMUX_COMPOSITE:
+ ent->function = MEDIA_ENT_F_CONN_COMPOSITE;
+ break;
+ case EM28XX_VMUX_SVIDEO:
+ ent->function = MEDIA_ENT_F_CONN_SVIDEO;
+ break;
+ default: /* EM28XX_VMUX_TELEVISION or EM28XX_RADIO */
+ if (dev->tuner_type != TUNER_ABSENT)
+ ent->function = MEDIA_ENT_F_CONN_RF;
+ break;
+ }
+
+ ret = media_entity_pads_init(ent, 1, &dev->input_pad[i]);
+ if (ret < 0)
+ pr_err("failed to initialize input pad[%d]!\n", i);
+
+ ret = media_device_register_entity(dev->media_dev, ent);
+ if (ret < 0)
+ pr_err("failed to register input entity %d!\n", i);
+ }
+#endif
+}
+
+
/* ------------------------------------------------------------------
Videobuf2 operations
------------------------------------------------------------------*/
@@ -884,6 +1024,9 @@ static int queue_setup(struct vb2_queue *vq,
return sizes[0] < size ? -EINVAL : 0;
*nplanes = 1;
sizes[0] = size;
+
+ em28xx_enable_analog_tuner(dev);
+
return 0;
}
@@ -962,6 +1105,9 @@ int em28xx_start_analog_streaming(struct vb2_queue *vq, unsigned int count)
f.type = V4L2_TUNER_ANALOG_TV;
v4l2_device_call_all(&v4l2->v4l2_dev,
0, tuner, s_frequency, &f);
+
+ /* Enable video stream at TV decoder */
+ v4l2_device_call_all(&v4l2->v4l2_dev, 0, video, s_stream, 1);
}
v4l2->streaming_users++;
@@ -981,6 +1127,9 @@ static void em28xx_stop_streaming(struct vb2_queue *vq)
res_free(dev, vq->type);
if (v4l2->streaming_users-- == 1) {
+ /* Disable video stream at TV decoder */
+ v4l2_device_call_all(&v4l2->v4l2_dev, 0, video, s_stream, 0);
+
/* Last active user, so shutdown all the URBS */
em28xx_uninit_usb_xfer(dev, EM28XX_ANALOG_MODE);
}
@@ -1013,6 +1162,9 @@ void em28xx_stop_vbi_streaming(struct vb2_queue *vq)
res_free(dev, vq->type);
if (v4l2->streaming_users-- == 1) {
+ /* Disable video stream at TV decoder */
+ v4l2_device_call_all(&v4l2->v4l2_dev, 0, video, s_stream, 0);
+
/* Last active user, so shutdown all the URBS */
em28xx_uninit_usb_xfer(dev, EM28XX_ANALOG_MODE);
}
@@ -1224,6 +1376,12 @@ static void scale_to_size(struct em28xx *dev,
*width = (((unsigned long)maxw) << 12) / (hscale + 4096L);
*height = (((unsigned long)maxh) << 12) / (vscale + 4096L);
+
+ /* Don't let width or height to be zero */
+ if (*width < 1)
+ *width = 1;
+ if (*height < 1)
+ *height = 1;
}
/* ------------------------------------------------------------------
@@ -1299,6 +1457,11 @@ static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
v4l_bound_align_image(&width, 48, maxw, 1, &height, 32, maxh,
1, 0);
}
+ /* Avoid division by zero at size_to_scale */
+ if (width < 1)
+ width = 1;
+ if (height < 1)
+ height = 1;
size_to_scale(dev, width, height, &hscale, &vscale);
scale_to_size(dev, hscale, vscale, &width, &height);
@@ -1434,18 +1597,6 @@ static int vidioc_s_parm(struct file *file, void *priv,
0, video, s_parm, p);
}
-static const char *iname[] = {
- [EM28XX_VMUX_COMPOSITE1] = "Composite1",
- [EM28XX_VMUX_COMPOSITE2] = "Composite2",
- [EM28XX_VMUX_COMPOSITE3] = "Composite3",
- [EM28XX_VMUX_COMPOSITE4] = "Composite4",
- [EM28XX_VMUX_SVIDEO] = "S-Video",
- [EM28XX_VMUX_TELEVISION] = "Television",
- [EM28XX_VMUX_CABLE] = "Cable TV",
- [EM28XX_VMUX_DVB] = "DVB",
- [EM28XX_VMUX_DEBUG] = "for debug only",
-};
-
static int vidioc_enum_input(struct file *file, void *priv,
struct v4l2_input *i)
{
@@ -1463,8 +1614,7 @@ static int vidioc_enum_input(struct file *file, void *priv,
strcpy(i->name, iname[INPUT(n)->type]);
- if ((EM28XX_VMUX_TELEVISION == INPUT(n)->type) ||
- (EM28XX_VMUX_CABLE == INPUT(n)->type))
+ if ((EM28XX_VMUX_TELEVISION == INPUT(n)->type))
i->type = V4L2_INPUT_TYPE_TUNER;
i->std = dev->v4l2->vdev.tvnorms;
@@ -1961,6 +2111,8 @@ static int em28xx_v4l2_fini(struct em28xx *dev)
em28xx_uninit_usb_xfer(dev, EM28XX_ANALOG_MODE);
+ em28xx_v4l2_media_release(dev);
+
if (video_is_registered(&v4l2->radio_dev)) {
em28xx_info("V4L2 device %s deregistered\n",
video_device_node_name(&v4l2->radio_dev));
@@ -2284,6 +2436,9 @@ static int em28xx_v4l2_init(struct em28xx *dev)
v4l2->dev = dev;
dev->v4l2 = v4l2;
+#ifdef CONFIG_MEDIA_CONTROLLER
+ v4l2->v4l2_dev.mdev = dev->media_dev;
+#endif
ret = v4l2_device_register(&dev->udev->dev, &v4l2->v4l2_dev);
if (ret < 0) {
em28xx_errdev("Call to v4l2_device_register() failed!\n");
@@ -2556,6 +2711,18 @@ static int em28xx_v4l2_init(struct em28xx *dev)
video_device_node_name(&v4l2->radio_dev));
}
+ /* Init entities at the Media Controller */
+ em28xx_v4l2_create_entities(dev);
+
+#ifdef CONFIG_MEDIA_CONTROLLER
+ ret = v4l2_mc_create_media_graph(dev->media_dev);
+ if (ret) {
+ em28xx_errdev("failed to create media graph\n");
+ em28xx_v4l2_media_release(dev);
+ goto unregister_dev;
+ }
+#endif
+
em28xx_info("V4L2 video device registered as %s\n",
video_device_node_name(&v4l2->vdev));
@@ -2577,6 +2744,22 @@ static int em28xx_v4l2_init(struct em28xx *dev)
return 0;
unregister_dev:
+ if (video_is_registered(&v4l2->radio_dev)) {
+ em28xx_info("V4L2 device %s deregistered\n",
+ video_device_node_name(&v4l2->radio_dev));
+ video_unregister_device(&v4l2->radio_dev);
+ }
+ if (video_is_registered(&v4l2->vbi_dev)) {
+ em28xx_info("V4L2 device %s deregistered\n",
+ video_device_node_name(&v4l2->vbi_dev));
+ video_unregister_device(&v4l2->vbi_dev);
+ }
+ if (video_is_registered(&v4l2->vdev)) {
+ em28xx_info("V4L2 device %s deregistered\n",
+ video_device_node_name(&v4l2->vdev));
+ video_unregister_device(&v4l2->vdev);
+ }
+
v4l2_ctrl_handler_free(&v4l2->ctrl_handler);
v4l2_device_unregister(&v4l2->v4l2_dev);
err:
diff --git a/drivers/media/usb/em28xx/em28xx.h b/drivers/media/usb/em28xx/em28xx.h
index 8ff066c977d9..267444961775 100644
--- a/drivers/media/usb/em28xx/em28xx.h
+++ b/drivers/media/usb/em28xx/em28xx.h
@@ -26,7 +26,7 @@
#ifndef _EM28XX_H
#define _EM28XX_H
-#define EM28XX_VERSION "0.2.1"
+#define EM28XX_VERSION "0.2.2"
#define DRIVER_DESC "Empia em28xx device driver"
#include <linux/workqueue.h>
@@ -291,15 +291,9 @@ struct em28xx_dmaqueue {
#define MAX_EM28XX_INPUT 4
enum enum28xx_itype {
- EM28XX_VMUX_COMPOSITE1 = 1,
- EM28XX_VMUX_COMPOSITE2,
- EM28XX_VMUX_COMPOSITE3,
- EM28XX_VMUX_COMPOSITE4,
+ EM28XX_VMUX_COMPOSITE = 1,
EM28XX_VMUX_SVIDEO,
EM28XX_VMUX_TELEVISION,
- EM28XX_VMUX_CABLE,
- EM28XX_VMUX_DVB,
- EM28XX_VMUX_DEBUG,
EM28XX_RADIO,
};
@@ -558,6 +552,11 @@ struct em28xx_v4l2 {
bool top_field;
int vbi_read;
unsigned int field_count;
+
+#ifdef CONFIG_MEDIA_CONTROLLER
+ struct media_pad video_pad, vbi_pad;
+ struct media_entity *decoder;
+#endif
};
struct em28xx_audio {
@@ -718,6 +717,12 @@ struct em28xx {
/* Snapshot button input device */
char snapshot_button_path[30]; /* path of the input dev */
struct input_dev *sbutton_input_dev;
+
+#ifdef CONFIG_MEDIA_CONTROLLER
+ struct media_device *media_dev;
+ struct media_entity input_ent[MAX_EM28XX_INPUT];
+ struct media_pad input_pad[MAX_EM28XX_INPUT];
+#endif
};
#define kref_to_dev(d) container_of(d, struct em28xx, ref)
diff --git a/drivers/media/usb/go7007/go7007-priv.h b/drivers/media/usb/go7007/go7007-priv.h
index 745185eb060b..bebee8ca9981 100644
--- a/drivers/media/usb/go7007/go7007-priv.h
+++ b/drivers/media/usb/go7007/go7007-priv.h
@@ -250,7 +250,7 @@ struct go7007 {
struct i2c_adapter i2c_adapter;
/* HPI driver */
- struct go7007_hpi_ops *hpi_ops;
+ const struct go7007_hpi_ops *hpi_ops;
void *hpi_context;
int interrupt_available;
wait_queue_head_t interrupt_waitq;
diff --git a/drivers/media/usb/go7007/go7007-usb.c b/drivers/media/usb/go7007/go7007-usb.c
index 3dbf14c85c5c..14d3f8c1ce4a 100644
--- a/drivers/media/usb/go7007/go7007-usb.c
+++ b/drivers/media/usb/go7007/go7007-usb.c
@@ -932,7 +932,7 @@ static void go7007_usb_release(struct go7007 *go)
kfree(go->hpi_context);
}
-static struct go7007_hpi_ops go7007_usb_ezusb_hpi_ops = {
+static const struct go7007_hpi_ops go7007_usb_ezusb_hpi_ops = {
.interface_reset = go7007_usb_interface_reset,
.write_interrupt = go7007_usb_ezusb_write_interrupt,
.read_interrupt = go7007_usb_read_interrupt,
@@ -942,7 +942,7 @@ static struct go7007_hpi_ops go7007_usb_ezusb_hpi_ops = {
.release = go7007_usb_release,
};
-static struct go7007_hpi_ops go7007_usb_onboard_hpi_ops = {
+static const struct go7007_hpi_ops go7007_usb_onboard_hpi_ops = {
.interface_reset = go7007_usb_interface_reset,
.write_interrupt = go7007_usb_onboard_write_interrupt,
.read_interrupt = go7007_usb_read_interrupt,
diff --git a/drivers/media/usb/gspca/ov519.c b/drivers/media/usb/gspca/ov519.c
index c95f32a0c02b..965372a5ff2f 100644
--- a/drivers/media/usb/gspca/ov519.c
+++ b/drivers/media/usb/gspca/ov519.c
@@ -360,40 +360,6 @@ static const struct v4l2_pix_format ov511_sif_mode[] = {
.priv = 0},
};
-static const struct v4l2_pix_format ovfx2_vga_mode[] = {
- {320, 240, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE,
- .bytesperline = 320,
- .sizeimage = 320 * 240,
- .colorspace = V4L2_COLORSPACE_SRGB,
- .priv = 1},
- {640, 480, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE,
- .bytesperline = 640,
- .sizeimage = 640 * 480,
- .colorspace = V4L2_COLORSPACE_SRGB,
- .priv = 0},
-};
-static const struct v4l2_pix_format ovfx2_cif_mode[] = {
- {160, 120, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE,
- .bytesperline = 160,
- .sizeimage = 160 * 120,
- .colorspace = V4L2_COLORSPACE_SRGB,
- .priv = 3},
- {176, 144, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE,
- .bytesperline = 176,
- .sizeimage = 176 * 144,
- .colorspace = V4L2_COLORSPACE_SRGB,
- .priv = 1},
- {320, 240, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE,
- .bytesperline = 320,
- .sizeimage = 320 * 240,
- .colorspace = V4L2_COLORSPACE_SRGB,
- .priv = 2},
- {352, 288, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE,
- .bytesperline = 352,
- .sizeimage = 352 * 288,
- .colorspace = V4L2_COLORSPACE_SRGB,
- .priv = 0},
-};
static const struct v4l2_pix_format ovfx2_ov2610_mode[] = {
{800, 600, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE,
.bytesperline = 800,
@@ -2042,6 +2008,9 @@ static void reg_w(struct sd *sd, u16 index, u16 value)
if (sd->gspca_dev.usb_err < 0)
return;
+ /* Avoid things going to fast for the bridge with a xhci host */
+ udelay(150);
+
switch (sd->bridge) {
case BRIDGE_OV511:
case BRIDGE_OV511PLUS:
@@ -2103,6 +2072,8 @@ static int reg_r(struct sd *sd, u16 index)
req = 1;
}
+ /* Avoid things going to fast for the bridge with a xhci host */
+ udelay(150);
ret = usb_control_msg(sd->gspca_dev.dev,
usb_rcvctrlpipe(sd->gspca_dev.dev, 0),
req,
@@ -2131,6 +2102,8 @@ static int reg_r8(struct sd *sd,
if (sd->gspca_dev.usb_err < 0)
return -1;
+ /* Avoid things going to fast for the bridge with a xhci host */
+ udelay(150);
ret = usb_control_msg(sd->gspca_dev.dev,
usb_rcvctrlpipe(sd->gspca_dev.dev, 0),
1, /* REQ_IO */
@@ -2187,6 +2160,8 @@ static void ov518_reg_w32(struct sd *sd, u16 index, u32 value, int n)
*((__le32 *) sd->gspca_dev.usb_buf) = __cpu_to_le32(value);
+ /* Avoid things going to fast for the bridge with a xhci host */
+ udelay(150);
ret = usb_control_msg(sd->gspca_dev.dev,
usb_sndctrlpipe(sd->gspca_dev.dev, 0),
1 /* REG_IO */,
diff --git a/drivers/media/usb/gspca/touptek.c b/drivers/media/usb/gspca/touptek.c
index 7bac6bc96063..b8af4370d27c 100644
--- a/drivers/media/usb/gspca/touptek.c
+++ b/drivers/media/usb/gspca/touptek.c
@@ -203,7 +203,7 @@ static int val_reply(struct gspca_dev *gspca_dev, const char *reply, int rc)
return -EIO;
}
if (reply[0] != 0x08) {
- PERR("Bad reply 0x%02X", reply[0]);
+ PERR("Bad reply 0x%02x", (int)reply[0]);
return -EIO;
}
return 0;
@@ -211,7 +211,7 @@ static int val_reply(struct gspca_dev *gspca_dev, const char *reply, int rc)
static void reg_w(struct gspca_dev *gspca_dev, u16 value, u16 index)
{
- char buff[1];
+ char *buff = gspca_dev->usb_buf;
int rc;
PDEBUG(D_USBO,
@@ -219,7 +219,7 @@ static void reg_w(struct gspca_dev *gspca_dev, u16 value, u16 index)
value, index);
rc = usb_control_msg(gspca_dev->dev, usb_rcvctrlpipe(gspca_dev->dev, 0),
0x0B, 0xC0, value, index, buff, 1, 500);
- PDEBUG(D_USBO, "rc=%d, ret={0x%02X}", rc, buff[0]);
+ PDEBUG(D_USBO, "rc=%d, ret={0x%02x}", rc, (int)buff[0]);
if (rc < 0) {
PERR("Failed reg_w(0x0B, 0xC0, 0x%04X, 0x%04X) w/ rc %d\n",
value, index, rc);
@@ -438,7 +438,7 @@ static void configure_encrypted(struct gspca_dev *gspca_dev)
static int configure(struct gspca_dev *gspca_dev)
{
int rc;
- uint8_t buff[4];
+ char *buff = gspca_dev->usb_buf;
PDEBUG(D_STREAM, "configure()\n");
diff --git a/drivers/media/usb/gspca/w996Xcf.c b/drivers/media/usb/gspca/w996Xcf.c
index fb9fe2ef3a6f..896f1b2b9179 100644
--- a/drivers/media/usb/gspca/w996Xcf.c
+++ b/drivers/media/usb/gspca/w996Xcf.c
@@ -79,6 +79,8 @@ static void w9968cf_write_fsb(struct sd *sd, u16* data)
value = *data++;
memcpy(sd->gspca_dev.usb_buf, data, 6);
+ /* Avoid things going to fast for the bridge with a xhci host */
+ udelay(150);
ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0,
USB_TYPE_VENDOR | USB_DIR_OUT | USB_RECIP_DEVICE,
value, 0x06, sd->gspca_dev.usb_buf, 6, 500);
@@ -99,6 +101,9 @@ static void w9968cf_write_sb(struct sd *sd, u16 value)
if (sd->gspca_dev.usb_err < 0)
return;
+ /* Avoid things going to fast for the bridge with a xhci host */
+ udelay(150);
+
/* We don't use reg_w here, as that would cause all writes when
bitbanging i2c to be logged, making the logs impossible to read */
ret = usb_control_msg(sd->gspca_dev.dev,
@@ -126,6 +131,9 @@ static int w9968cf_read_sb(struct sd *sd)
if (sd->gspca_dev.usb_err < 0)
return -1;
+ /* Avoid things going to fast for the bridge with a xhci host */
+ udelay(150);
+
/* We don't use reg_r here, as the w9968cf is special and has 16
bit registers instead of 8 bit */
ret = usb_control_msg(sd->gspca_dev.dev,
diff --git a/drivers/media/usb/hdpvr/hdpvr-core.c b/drivers/media/usb/hdpvr/hdpvr-core.c
index 3fc64197b4e6..08f0ca7aa012 100644
--- a/drivers/media/usb/hdpvr/hdpvr-core.c
+++ b/drivers/media/usb/hdpvr/hdpvr-core.c
@@ -273,7 +273,9 @@ static int hdpvr_probe(struct usb_interface *interface,
struct hdpvr_device *dev;
struct usb_host_interface *iface_desc;
struct usb_endpoint_descriptor *endpoint;
+#if IS_ENABLED(CONFIG_I2C)
struct i2c_client *client;
+#endif
size_t buffer_size;
int i;
int retval = -ENOMEM;
diff --git a/drivers/media/usb/hdpvr/hdpvr-video.c b/drivers/media/usb/hdpvr/hdpvr-video.c
index 7dee22deebf3..ba7f02270c83 100644
--- a/drivers/media/usb/hdpvr/hdpvr-video.c
+++ b/drivers/media/usb/hdpvr/hdpvr-video.c
@@ -462,10 +462,8 @@ static ssize_t hdpvr_read(struct file *file, char __user *buffer, size_t count,
}
if (wait_event_interruptible(dev->wait_data,
- buf->status == BUFSTAT_READY)) {
- ret = -ERESTARTSYS;
- goto err;
- }
+ buf->status == BUFSTAT_READY))
+ return -ERESTARTSYS;
}
if (buf->status != BUFSTAT_READY)
diff --git a/drivers/media/usb/msi2500/msi2500.c b/drivers/media/usb/msi2500/msi2500.c
index c104315fdc17..2d33033682af 100644
--- a/drivers/media/usb/msi2500/msi2500.c
+++ b/drivers/media/usb/msi2500/msi2500.c
@@ -839,8 +839,6 @@ static int msi2500_set_usb_adc(struct msi2500_dev *dev)
goto err;
ret = msi2500_ctrl_msg(dev, CMD_WREG, reg3);
- if (ret)
- goto err;
err:
return ret;
}
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-context.c b/drivers/media/usb/pvrusb2/pvrusb2-context.c
index fd888a604462..c45f30715dcd 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-context.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-context.c
@@ -196,7 +196,7 @@ int pvr2_context_global_init(void)
pvr2_context_thread_ptr = kthread_run(pvr2_context_thread_func,
NULL,
"pvrusb2-context");
- return (pvr2_context_thread_ptr ? 0 : -ENOMEM);
+ return IS_ERR(pvr2_context_thread_ptr) ? -ENOMEM : 0;
}
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-hdw.c b/drivers/media/usb/pvrusb2/pvrusb2-hdw.c
index 0533ef20decf..1a093e5953fd 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-hdw.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-hdw.c
@@ -4903,6 +4903,9 @@ static void pvr2_hdw_state_log_state(struct pvr2_hdw *hdw)
printk(KERN_INFO "%s %.*s\n",hdw->name,ccnt,buf);
}
ccnt = pvr2_hdw_report_clients(hdw, buf, sizeof(buf));
+ if (ccnt >= sizeof(buf))
+ ccnt = sizeof(buf);
+
ucnt = 0;
while (ucnt < ccnt) {
lcnt = 0;
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-io.c b/drivers/media/usb/pvrusb2/pvrusb2-io.c
index d860344de84e..e68ce24f27e3 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-io.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-io.c
@@ -473,7 +473,7 @@ static void buffer_complete(struct urb *urb)
}
spin_unlock_irqrestore(&sp->list_lock,irq_flags);
pvr2_buffer_set_ready(bp);
- if (sp && sp->callback_func) {
+ if (sp->callback_func) {
sp->callback_func(sp->callback_data);
}
}
diff --git a/drivers/media/usb/pwc/pwc-if.c b/drivers/media/usb/pwc/pwc-if.c
index 086cf1c7bd7d..18aed5dd325e 100644
--- a/drivers/media/usb/pwc/pwc-if.c
+++ b/drivers/media/usb/pwc/pwc-if.c
@@ -91,6 +91,7 @@ static const struct usb_device_id pwc_device_table [] = {
{ USB_DEVICE(0x0471, 0x0312) },
{ USB_DEVICE(0x0471, 0x0313) }, /* the 'new' 720K */
{ USB_DEVICE(0x0471, 0x0329) }, /* Philips SPC 900NC PC Camera */
+ { USB_DEVICE(0x0471, 0x032C) }, /* Philips SPC 880NC PC Camera */
{ USB_DEVICE(0x069A, 0x0001) }, /* Askey */
{ USB_DEVICE(0x046D, 0x08B0) }, /* Logitech QuickCam Pro 3000 */
{ USB_DEVICE(0x046D, 0x08B1) }, /* Logitech QuickCam Notebook Pro */
@@ -810,6 +811,11 @@ static int usb_pwc_probe(struct usb_interface *intf, const struct usb_device_id
name = "Philips SPC 900NC webcam";
type_id = 740;
break;
+ case 0x032C:
+ PWC_INFO("Philips SPC 880NC USB webcam detected.\n");
+ name = "Philips SPC 880NC webcam";
+ type_id = 740;
+ break;
default:
return -ENODEV;
break;
diff --git a/drivers/media/usb/siano/smsusb.c b/drivers/media/usb/siano/smsusb.c
index 8abbd3cc8eba..c2e25876e93b 100644
--- a/drivers/media/usb/siano/smsusb.c
+++ b/drivers/media/usb/siano/smsusb.c
@@ -27,6 +27,7 @@ along with this program. If not, see <http://www.gnu.org/licenses/>.
#include <linux/firmware.h>
#include <linux/slab.h>
#include <linux/module.h>
+#include <media/media-device.h>
#include "sms-cards.h"
#include "smsendian.h"
@@ -51,6 +52,9 @@ struct smsusb_urb_t {
struct smsusb_device_t *dev;
struct urb urb;
+
+ /* For the bottom half */
+ struct work_struct wq;
};
struct smsusb_device_t {
@@ -71,6 +75,18 @@ static int smsusb_submit_urb(struct smsusb_device_t *dev,
struct smsusb_urb_t *surb);
/**
+ * Completing URB's callback handler - bottom half (proccess context)
+ * submits the URB prepared on smsusb_onresponse()
+ */
+static void do_submit_urb(struct work_struct *work)
+{
+ struct smsusb_urb_t *surb = container_of(work, struct smsusb_urb_t, wq);
+ struct smsusb_device_t *dev = surb->dev;
+
+ smsusb_submit_urb(dev, surb);
+}
+
+/**
* Completing URB's callback handler - top half (interrupt context)
* adds completing sms urb to the global surbs list and activtes the worker
* thread the surb
@@ -138,13 +154,15 @@ static void smsusb_onresponse(struct urb *urb)
exit_and_resubmit:
- smsusb_submit_urb(dev, surb);
+ INIT_WORK(&surb->wq, do_submit_urb);
+ schedule_work(&surb->wq);
}
static int smsusb_submit_urb(struct smsusb_device_t *dev,
struct smsusb_urb_t *surb)
{
if (!surb->cb) {
+ /* This function can sleep */
surb->cb = smscore_getbuffer(dev->coredev);
if (!surb->cb) {
pr_err("smscore_getbuffer(...) returned NULL\n");
@@ -353,15 +371,7 @@ static void *siano_media_device_register(struct smsusb_device_t *dev,
if (!mdev)
return NULL;
- mdev->dev = &udev->dev;
- strlcpy(mdev->model, board->name, sizeof(mdev->model));
- if (udev->serial)
- strlcpy(mdev->serial, udev->serial, sizeof(mdev->serial));
- strcpy(mdev->bus_info, udev->devpath);
- mdev->hw_revision = le16_to_cpu(udev->descriptor.bcdDevice);
- mdev->driver_version = LINUX_VERSION_CODE;
-
- media_device_init(mdev);
+ media_device_usb_init(mdev, udev, board->name);
ret = media_device_register(mdev);
if (ret) {
diff --git a/drivers/media/usb/stk1160/stk1160-video.c b/drivers/media/usb/stk1160/stk1160-video.c
index 46191d5262eb..6ecb0b48423f 100644
--- a/drivers/media/usb/stk1160/stk1160-video.c
+++ b/drivers/media/usb/stk1160/stk1160-video.c
@@ -98,7 +98,6 @@ void stk1160_buffer_done(struct stk1160 *dev)
buf->vb.sequence = dev->sequence++;
buf->vb.field = V4L2_FIELD_INTERLACED;
- buf->vb.vb2_buf.planes[0].bytesused = buf->bytesused;
buf->vb.vb2_buf.timestamp = ktime_get_ns();
vb2_set_plane_payload(&buf->vb.vb2_buf, 0, buf->bytesused);
diff --git a/drivers/media/usb/usbtv/usbtv-video.c b/drivers/media/usb/usbtv/usbtv-video.c
index 4ebb33943f9a..f6cfad46547e 100644
--- a/drivers/media/usb/usbtv/usbtv-video.c
+++ b/drivers/media/usb/usbtv/usbtv-video.c
@@ -312,20 +312,24 @@ static void usbtv_image_chunk(struct usbtv *usbtv, __be32 *chunk)
usbtv_chunk_to_vbuf(frame, &chunk[1], chunk_no, odd);
usbtv->chunks_done++;
- /* Last chunk in a frame, signalling an end */
- if (odd && chunk_no == usbtv->n_chunks-1) {
- int size = vb2_plane_size(&buf->vb.vb2_buf, 0);
- enum vb2_buffer_state state = usbtv->chunks_done ==
- usbtv->n_chunks ?
- VB2_BUF_STATE_DONE :
- VB2_BUF_STATE_ERROR;
-
- buf->vb.field = V4L2_FIELD_INTERLACED;
- buf->vb.sequence = usbtv->sequence++;
- buf->vb.vb2_buf.timestamp = ktime_get_ns();
- vb2_set_plane_payload(&buf->vb.vb2_buf, 0, size);
- vb2_buffer_done(&buf->vb.vb2_buf, state);
- list_del(&buf->list);
+ /* Last chunk in a field */
+ if (chunk_no == usbtv->n_chunks-1) {
+ /* Last chunk in a frame, signalling an end */
+ if (odd && !usbtv->last_odd) {
+ int size = vb2_plane_size(&buf->vb.vb2_buf, 0);
+ enum vb2_buffer_state state = usbtv->chunks_done ==
+ usbtv->n_chunks ?
+ VB2_BUF_STATE_DONE :
+ VB2_BUF_STATE_ERROR;
+
+ buf->vb.field = V4L2_FIELD_INTERLACED;
+ buf->vb.sequence = usbtv->sequence++;
+ buf->vb.vb2_buf.timestamp = ktime_get_ns();
+ vb2_set_plane_payload(&buf->vb.vb2_buf, 0, size);
+ vb2_buffer_done(&buf->vb.vb2_buf, state);
+ list_del(&buf->list);
+ }
+ usbtv->last_odd = odd;
}
spin_unlock_irqrestore(&usbtv->buflock, flags);
@@ -389,6 +393,10 @@ static struct urb *usbtv_setup_iso_transfer(struct usbtv *usbtv)
ip->transfer_flags = URB_ISO_ASAP;
ip->transfer_buffer = kzalloc(size * USBTV_ISOC_PACKETS,
GFP_KERNEL);
+ if (!ip->transfer_buffer) {
+ usb_free_urb(ip);
+ return NULL;
+ }
ip->complete = usbtv_iso_cb;
ip->number_of_packets = USBTV_ISOC_PACKETS;
ip->transfer_buffer_length = size * USBTV_ISOC_PACKETS;
@@ -639,6 +647,7 @@ static int usbtv_start_streaming(struct vb2_queue *vq, unsigned int count)
if (usbtv->udev == NULL)
return -ENODEV;
+ usbtv->last_odd = 1;
usbtv->sequence = 0;
return usbtv_start(usbtv);
}
diff --git a/drivers/media/usb/usbtv/usbtv.h b/drivers/media/usb/usbtv/usbtv.h
index 19cb8bf7c4e9..161b38d5cfa0 100644
--- a/drivers/media/usb/usbtv/usbtv.h
+++ b/drivers/media/usb/usbtv/usbtv.h
@@ -95,6 +95,7 @@ struct usbtv {
int width, height;
int n_chunks;
int iso_size;
+ int last_odd;
unsigned int sequence;
struct urb *isoc_urbs[USBTV_ISOC_TRANSFERS];
diff --git a/drivers/media/usb/usbvision/usbvision-video.c b/drivers/media/usb/usbvision/usbvision-video.c
index de9ff3bb8edd..ad2f3d27b266 100644
--- a/drivers/media/usb/usbvision/usbvision-video.c
+++ b/drivers/media/usb/usbvision/usbvision-video.c
@@ -162,8 +162,7 @@ MODULE_ALIAS(DRIVER_ALIAS);
static inline struct usb_usbvision *cd_to_usbvision(struct device *cd)
{
- struct video_device *vdev =
- container_of(cd, struct video_device, dev);
+ struct video_device *vdev = to_video_device(cd);
return video_get_drvdata(vdev);
}
@@ -177,8 +176,7 @@ static DEVICE_ATTR(version, S_IRUGO, show_version, NULL);
static ssize_t show_model(struct device *cd,
struct device_attribute *attr, char *buf)
{
- struct video_device *vdev =
- container_of(cd, struct video_device, dev);
+ struct video_device *vdev = to_video_device(cd);
struct usb_usbvision *usbvision = video_get_drvdata(vdev);
return sprintf(buf, "%s\n",
usbvision_device_data[usbvision->dev_model].model_string);
@@ -188,8 +186,7 @@ static DEVICE_ATTR(model, S_IRUGO, show_model, NULL);
static ssize_t show_hue(struct device *cd,
struct device_attribute *attr, char *buf)
{
- struct video_device *vdev =
- container_of(cd, struct video_device, dev);
+ struct video_device *vdev = to_video_device(cd);
struct usb_usbvision *usbvision = video_get_drvdata(vdev);
struct v4l2_control ctrl;
ctrl.id = V4L2_CID_HUE;
@@ -203,8 +200,7 @@ static DEVICE_ATTR(hue, S_IRUGO, show_hue, NULL);
static ssize_t show_contrast(struct device *cd,
struct device_attribute *attr, char *buf)
{
- struct video_device *vdev =
- container_of(cd, struct video_device, dev);
+ struct video_device *vdev = to_video_device(cd);
struct usb_usbvision *usbvision = video_get_drvdata(vdev);
struct v4l2_control ctrl;
ctrl.id = V4L2_CID_CONTRAST;
@@ -218,8 +214,7 @@ static DEVICE_ATTR(contrast, S_IRUGO, show_contrast, NULL);
static ssize_t show_brightness(struct device *cd,
struct device_attribute *attr, char *buf)
{
- struct video_device *vdev =
- container_of(cd, struct video_device, dev);
+ struct video_device *vdev = to_video_device(cd);
struct usb_usbvision *usbvision = video_get_drvdata(vdev);
struct v4l2_control ctrl;
ctrl.id = V4L2_CID_BRIGHTNESS;
@@ -233,8 +228,7 @@ static DEVICE_ATTR(brightness, S_IRUGO, show_brightness, NULL);
static ssize_t show_saturation(struct device *cd,
struct device_attribute *attr, char *buf)
{
- struct video_device *vdev =
- container_of(cd, struct video_device, dev);
+ struct video_device *vdev = to_video_device(cd);
struct usb_usbvision *usbvision = video_get_drvdata(vdev);
struct v4l2_control ctrl;
ctrl.id = V4L2_CID_SATURATION;
@@ -248,8 +242,7 @@ static DEVICE_ATTR(saturation, S_IRUGO, show_saturation, NULL);
static ssize_t show_streaming(struct device *cd,
struct device_attribute *attr, char *buf)
{
- struct video_device *vdev =
- container_of(cd, struct video_device, dev);
+ struct video_device *vdev = to_video_device(cd);
struct usb_usbvision *usbvision = video_get_drvdata(vdev);
return sprintf(buf, "%s\n",
YES_NO(usbvision->streaming == stream_on ? 1 : 0));
@@ -259,8 +252,7 @@ static DEVICE_ATTR(streaming, S_IRUGO, show_streaming, NULL);
static ssize_t show_compression(struct device *cd,
struct device_attribute *attr, char *buf)
{
- struct video_device *vdev =
- container_of(cd, struct video_device, dev);
+ struct video_device *vdev = to_video_device(cd);
struct usb_usbvision *usbvision = video_get_drvdata(vdev);
return sprintf(buf, "%s\n",
YES_NO(usbvision->isoc_mode == ISOC_MODE_COMPRESS));
@@ -270,8 +262,7 @@ static DEVICE_ATTR(compression, S_IRUGO, show_compression, NULL);
static ssize_t show_device_bridge(struct device *cd,
struct device_attribute *attr, char *buf)
{
- struct video_device *vdev =
- container_of(cd, struct video_device, dev);
+ struct video_device *vdev = to_video_device(cd);
struct usb_usbvision *usbvision = video_get_drvdata(vdev);
return sprintf(buf, "%d\n", usbvision->bridge_type);
}
@@ -1156,6 +1147,7 @@ static int usbvision_radio_close(struct file *file)
usbvision_audio_off(usbvision);
usbvision->radio = 0;
usbvision->user--;
+ mutex_unlock(&usbvision->v4l2_lock);
if (usbvision->remove_pending) {
printk(KERN_INFO "%s: Final disconnect\n", __func__);
@@ -1164,7 +1156,6 @@ static int usbvision_radio_close(struct file *file)
return 0;
}
- mutex_unlock(&usbvision->v4l2_lock);
PDEBUG(DBG_IO, "success");
return v4l2_fh_release(file);
}
@@ -1461,13 +1452,6 @@ static int usbvision_probe(struct usb_interface *intf,
printk(KERN_INFO "%s: %s found\n", __func__,
usbvision_device_data[model].model_string);
- /*
- * this is a security check.
- * an exploit using an incorrect bInterfaceNumber is known
- */
- if (ifnum >= USB_MAXINTERFACES || !dev->actconfig->interface[ifnum])
- return -ENODEV;
-
if (usbvision_device_data[model].interface >= 0)
interface = &dev->actconfig->interface[usbvision_device_data[model].interface]->altsetting[0];
else if (ifnum < dev->actconfig->desc.bNumInterfaces)
diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c
index 4e7148815a78..451e84e962e2 100644
--- a/drivers/media/usb/uvc/uvc_driver.c
+++ b/drivers/media/usb/uvc/uvc_driver.c
@@ -148,6 +148,26 @@ static struct uvc_format_desc uvc_fmts[] = {
.guid = UVC_GUID_FORMAT_H264,
.fcc = V4L2_PIX_FMT_H264,
},
+ {
+ .name = "Greyscale 8 L/R (Y8I)",
+ .guid = UVC_GUID_FORMAT_Y8I,
+ .fcc = V4L2_PIX_FMT_Y8I,
+ },
+ {
+ .name = "Greyscale 12 L/R (Y12I)",
+ .guid = UVC_GUID_FORMAT_Y12I,
+ .fcc = V4L2_PIX_FMT_Y12I,
+ },
+ {
+ .name = "Depth data 16-bit (Z16)",
+ .guid = UVC_GUID_FORMAT_Z16,
+ .fcc = V4L2_PIX_FMT_Z16,
+ },
+ {
+ .name = "Bayer 10-bit (SRGGB10P)",
+ .guid = UVC_GUID_FORMAT_RW10,
+ .fcc = V4L2_PIX_FMT_SRGGB10P,
+ },
};
/* ------------------------------------------------------------------------
diff --git a/drivers/media/usb/uvc/uvcvideo.h b/drivers/media/usb/uvc/uvcvideo.h
index f0f2391e1b43..7e4d3eea371b 100644
--- a/drivers/media/usb/uvc/uvcvideo.h
+++ b/drivers/media/usb/uvc/uvcvideo.h
@@ -119,6 +119,18 @@
#define UVC_GUID_FORMAT_H264 \
{ 'H', '2', '6', '4', 0x00, 0x00, 0x10, 0x00, \
0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
+#define UVC_GUID_FORMAT_Y8I \
+ { 'Y', '8', 'I', ' ', 0x00, 0x00, 0x10, 0x00, \
+ 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
+#define UVC_GUID_FORMAT_Y12I \
+ { 'Y', '1', '2', 'I', 0x00, 0x00, 0x10, 0x00, \
+ 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
+#define UVC_GUID_FORMAT_Z16 \
+ { 'Z', '1', '6', ' ', 0x00, 0x00, 0x10, 0x00, \
+ 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
+#define UVC_GUID_FORMAT_RW10 \
+ { 'R', 'W', '1', '0', 0x00, 0x00, 0x10, 0x00, \
+ 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
/* ------------------------------------------------------------------------
* Driver specific constants.
diff --git a/drivers/media/v4l2-core/Kconfig b/drivers/media/v4l2-core/Kconfig
index 9beece00869b..29b3436d0910 100644
--- a/drivers/media/v4l2-core/Kconfig
+++ b/drivers/media/v4l2-core/Kconfig
@@ -37,7 +37,6 @@ config VIDEO_PCI_SKELETON
# Used by drivers that need tuner.ko
config VIDEO_TUNER
tristate
- depends on MEDIA_TUNER
# Used by drivers that need v4l2-mem2mem.ko
config V4L2_MEM2MEM_DEV
diff --git a/drivers/media/v4l2-core/Makefile b/drivers/media/v4l2-core/Makefile
index 1dc8bba2b198..795a5352761d 100644
--- a/drivers/media/v4l2-core/Makefile
+++ b/drivers/media/v4l2-core/Makefile
@@ -16,6 +16,7 @@ endif
ifeq ($(CONFIG_TRACEPOINTS),y)
videodev-objs += vb2-trace.o v4l2-trace.o
endif
+videodev-$(CONFIG_MEDIA_CONTROLLER) += v4l2-mc.o
obj-$(CONFIG_VIDEO_V4L2) += videodev.o
obj-$(CONFIG_VIDEO_V4L2) += v4l2-common.o
diff --git a/drivers/media/v4l2-core/tuner-core.c b/drivers/media/v4l2-core/tuner-core.c
index 76496fd282aa..731487be5baa 100644
--- a/drivers/media/v4l2-core/tuner-core.c
+++ b/drivers/media/v4l2-core/tuner-core.c
@@ -696,16 +696,32 @@ static int tuner_probe(struct i2c_client *client,
/* Should be just before return */
register_client:
#if defined(CONFIG_MEDIA_CONTROLLER)
- t->pad[TUNER_PAD_RF_INPUT].flags = MEDIA_PAD_FL_SINK;
- t->pad[TUNER_PAD_IF_OUTPUT].flags = MEDIA_PAD_FL_SOURCE;
- t->sd.entity.function = MEDIA_ENT_F_TUNER;
t->sd.entity.name = t->name;
+ /*
+ * Handle the special case where the tuner has actually
+ * two stages: the PLL to tune into a frequency and the
+ * IF-PLL demodulator (tda988x).
+ */
+ if (t->type == TUNER_TDA9887) {
+ t->pad[IF_VID_DEC_PAD_IF_INPUT].flags = MEDIA_PAD_FL_SINK;
+ t->pad[IF_VID_DEC_PAD_OUT].flags = MEDIA_PAD_FL_SOURCE;
+ ret = media_entity_pads_init(&t->sd.entity,
+ IF_VID_DEC_PAD_NUM_PADS,
+ &t->pad[0]);
+ t->sd.entity.function = MEDIA_ENT_F_IF_VID_DECODER;
+ } else {
+ t->pad[TUNER_PAD_RF_INPUT].flags = MEDIA_PAD_FL_SINK;
+ t->pad[TUNER_PAD_OUTPUT].flags = MEDIA_PAD_FL_SOURCE;
+ t->pad[TUNER_PAD_AUD_OUT].flags = MEDIA_PAD_FL_SOURCE;
+ ret = media_entity_pads_init(&t->sd.entity, TUNER_NUM_PADS,
+ &t->pad[0]);
+ t->sd.entity.function = MEDIA_ENT_F_TUNER;
+ }
- ret = media_entity_pads_init(&t->sd.entity, TUNER_NUM_PADS, &t->pad[0]);
if (ret < 0) {
tuner_err("failed to initialize media entity!\n");
kfree(t);
- return -ENODEV;
+ return ret;
}
#endif
/* Sets a default mode */
diff --git a/drivers/media/v4l2-core/v4l2-async.c b/drivers/media/v4l2-core/v4l2-async.c
index 5bada202b2d3..a4b224d92572 100644
--- a/drivers/media/v4l2-core/v4l2-async.c
+++ b/drivers/media/v4l2-core/v4l2-async.c
@@ -119,6 +119,13 @@ static int v4l2_async_test_notify(struct v4l2_async_notifier *notifier,
return ret;
}
+ ret = v4l2_subdev_call(sd, core, registered_async);
+ if (ret < 0 && ret != -ENOIOCTLCMD) {
+ if (notifier->unbind)
+ notifier->unbind(notifier, sd, asd);
+ return ret;
+ }
+
if (list_empty(&notifier->waiting) && notifier->complete)
return notifier->complete(notifier);
diff --git a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
index 8fd84a67478a..019644ff627d 100644
--- a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
+++ b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
@@ -415,7 +415,8 @@ static int get_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user
get_user(kp->index, &up->index) ||
get_user(kp->type, &up->type) ||
get_user(kp->flags, &up->flags) ||
- get_user(kp->memory, &up->memory))
+ get_user(kp->memory, &up->memory) ||
+ get_user(kp->length, &up->length))
return -EFAULT;
if (V4L2_TYPE_IS_OUTPUT(kp->type))
@@ -427,9 +428,6 @@ static int get_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user
return -EFAULT;
if (V4L2_TYPE_IS_MULTIPLANAR(kp->type)) {
- if (get_user(kp->length, &up->length))
- return -EFAULT;
-
num_planes = kp->length;
if (num_planes == 0) {
kp->m.planes = NULL;
@@ -462,16 +460,14 @@ static int get_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user
} else {
switch (kp->memory) {
case V4L2_MEMORY_MMAP:
- if (get_user(kp->length, &up->length) ||
- get_user(kp->m.offset, &up->m.offset))
+ if (get_user(kp->m.offset, &up->m.offset))
return -EFAULT;
break;
case V4L2_MEMORY_USERPTR:
{
compat_long_t tmp;
- if (get_user(kp->length, &up->length) ||
- get_user(tmp, &up->m.userptr))
+ if (get_user(tmp, &up->m.userptr))
return -EFAULT;
kp->m.userptr = (unsigned long)compat_ptr(tmp);
@@ -513,7 +509,8 @@ static int put_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user
copy_to_user(&up->timecode, &kp->timecode, sizeof(struct v4l2_timecode)) ||
put_user(kp->sequence, &up->sequence) ||
put_user(kp->reserved2, &up->reserved2) ||
- put_user(kp->reserved, &up->reserved))
+ put_user(kp->reserved, &up->reserved) ||
+ put_user(kp->length, &up->length))
return -EFAULT;
if (V4L2_TYPE_IS_MULTIPLANAR(kp->type)) {
@@ -536,13 +533,11 @@ static int put_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user
} else {
switch (kp->memory) {
case V4L2_MEMORY_MMAP:
- if (put_user(kp->length, &up->length) ||
- put_user(kp->m.offset, &up->m.offset))
+ if (put_user(kp->m.offset, &up->m.offset))
return -EFAULT;
break;
case V4L2_MEMORY_USERPTR:
- if (put_user(kp->length, &up->length) ||
- put_user(kp->m.userptr, &up->m.userptr))
+ if (put_user(kp->m.userptr, &up->m.userptr))
return -EFAULT;
break;
case V4L2_MEMORY_OVERLAY:
diff --git a/drivers/media/v4l2-core/v4l2-ctrls.c b/drivers/media/v4l2-core/v4l2-ctrls.c
index c9d5537b6af7..8b321e0aae62 100644
--- a/drivers/media/v4l2-core/v4l2-ctrls.c
+++ b/drivers/media/v4l2-core/v4l2-ctrls.c
@@ -462,6 +462,14 @@ const char * const *v4l2_ctrl_get_menu(u32 id)
"RGB full range (0-255)",
NULL,
};
+ static const char * const dv_it_content_type[] = {
+ "Graphics",
+ "Photo",
+ "Cinema",
+ "Game",
+ "No IT Content",
+ NULL,
+ };
static const char * const detect_md_mode[] = {
"Disabled",
"Global",
@@ -560,6 +568,9 @@ const char * const *v4l2_ctrl_get_menu(u32 id)
case V4L2_CID_DV_TX_RGB_RANGE:
case V4L2_CID_DV_RX_RGB_RANGE:
return dv_rgb_range;
+ case V4L2_CID_DV_TX_IT_CONTENT_TYPE:
+ case V4L2_CID_DV_RX_IT_CONTENT_TYPE:
+ return dv_it_content_type;
case V4L2_CID_DETECT_MD_MODE:
return detect_md_mode;
@@ -747,6 +758,7 @@ const char *v4l2_ctrl_get_name(u32 id)
case V4L2_CID_MPEG_VIDEO_MV_H_SEARCH_RANGE: return "Horizontal MV Search Range";
case V4L2_CID_MPEG_VIDEO_MV_V_SEARCH_RANGE: return "Vertical MV Search Range";
case V4L2_CID_MPEG_VIDEO_REPEAT_SEQ_HEADER: return "Repeat Sequence Header";
+ case V4L2_CID_MPEG_VIDEO_FORCE_KEY_FRAME: return "Force Key Frame";
/* VPX controls */
case V4L2_CID_MPEG_VIDEO_VPX_NUM_PARTITIONS: return "VPX Number of Partitions";
@@ -881,8 +893,10 @@ const char *v4l2_ctrl_get_name(u32 id)
case V4L2_CID_DV_TX_EDID_PRESENT: return "EDID Present";
case V4L2_CID_DV_TX_MODE: return "Transmit Mode";
case V4L2_CID_DV_TX_RGB_RANGE: return "Tx RGB Quantization Range";
+ case V4L2_CID_DV_TX_IT_CONTENT_TYPE: return "Tx IT Content Type";
case V4L2_CID_DV_RX_POWER_PRESENT: return "Power Present";
case V4L2_CID_DV_RX_RGB_RANGE: return "Rx RGB Quantization Range";
+ case V4L2_CID_DV_RX_IT_CONTENT_TYPE: return "Rx IT Content Type";
case V4L2_CID_FM_RX_CLASS: return "FM Radio Receiver Controls";
case V4L2_CID_TUNE_DEEMPHASIS: return "De-Emphasis";
@@ -985,6 +999,7 @@ void v4l2_ctrl_fill(u32 id, const char **name, enum v4l2_ctrl_type *type,
case V4L2_CID_MPEG_VIDEO_MV_V_SEARCH_RANGE:
*type = V4L2_CTRL_TYPE_INTEGER;
break;
+ case V4L2_CID_MPEG_VIDEO_FORCE_KEY_FRAME:
case V4L2_CID_PAN_RESET:
case V4L2_CID_TILT_RESET:
case V4L2_CID_FLASH_STROBE:
@@ -1038,7 +1053,9 @@ void v4l2_ctrl_fill(u32 id, const char **name, enum v4l2_ctrl_type *type,
case V4L2_CID_SCENE_MODE:
case V4L2_CID_DV_TX_MODE:
case V4L2_CID_DV_TX_RGB_RANGE:
+ case V4L2_CID_DV_TX_IT_CONTENT_TYPE:
case V4L2_CID_DV_RX_RGB_RANGE:
+ case V4L2_CID_DV_RX_IT_CONTENT_TYPE:
case V4L2_CID_TEST_PATTERN:
case V4L2_CID_TUNE_DEEMPHASIS:
case V4L2_CID_MPEG_VIDEO_VPX_GOLDEN_FRAME_SEL:
@@ -1185,6 +1202,7 @@ void v4l2_ctrl_fill(u32 id, const char **name, enum v4l2_ctrl_type *type,
case V4L2_CID_DV_TX_RXSENSE:
case V4L2_CID_DV_TX_EDID_PRESENT:
case V4L2_CID_DV_RX_POWER_PRESENT:
+ case V4L2_CID_DV_RX_IT_CONTENT_TYPE:
case V4L2_CID_RDS_RX_PTY:
case V4L2_CID_RDS_RX_PS_NAME:
case V4L2_CID_RDS_RX_RADIO_TEXT:
@@ -2211,22 +2229,6 @@ struct v4l2_ctrl *v4l2_ctrl_new_int_menu(struct v4l2_ctrl_handler *hdl,
}
EXPORT_SYMBOL(v4l2_ctrl_new_int_menu);
-/* Add a control from another handler to this handler */
-struct v4l2_ctrl *v4l2_ctrl_add_ctrl(struct v4l2_ctrl_handler *hdl,
- struct v4l2_ctrl *ctrl)
-{
- if (hdl == NULL || hdl->error)
- return NULL;
- if (ctrl == NULL) {
- handler_set_err(hdl, -EINVAL);
- return NULL;
- }
- if (ctrl->handler == hdl)
- return ctrl;
- return handler_new_ref(hdl, ctrl) ? NULL : ctrl;
-}
-EXPORT_SYMBOL(v4l2_ctrl_add_ctrl);
-
/* Add the controls from another handler to our own. */
int v4l2_ctrl_add_handler(struct v4l2_ctrl_handler *hdl,
struct v4l2_ctrl_handler *add,
diff --git a/drivers/media/v4l2-core/v4l2-dv-timings.c b/drivers/media/v4l2-core/v4l2-dv-timings.c
index ec258b73001a..889de0a32152 100644
--- a/drivers/media/v4l2-core/v4l2-dv-timings.c
+++ b/drivers/media/v4l2-core/v4l2-dv-timings.c
@@ -165,7 +165,8 @@ bool v4l2_valid_dv_timings(const struct v4l2_dv_timings *t,
bt->width > cap->max_width ||
bt->pixelclock < cap->min_pixelclock ||
bt->pixelclock > cap->max_pixelclock ||
- (cap->standards && bt->standards &&
+ (!(caps & V4L2_DV_BT_CAP_CUSTOM) &&
+ cap->standards && bt->standards &&
!(bt->standards & cap->standards)) ||
(bt->interlaced && !(caps & V4L2_DV_BT_CAP_INTERLACED)) ||
(!bt->interlaced && !(caps & V4L2_DV_BT_CAP_PROGRESSIVE)))
diff --git a/drivers/media/v4l2-core/v4l2-fh.c b/drivers/media/v4l2-core/v4l2-fh.c
index c97067a25bd2..c183f0996fa1 100644
--- a/drivers/media/v4l2-core/v4l2-fh.c
+++ b/drivers/media/v4l2-core/v4l2-fh.c
@@ -29,6 +29,7 @@
#include <media/v4l2-fh.h>
#include <media/v4l2-event.h>
#include <media/v4l2-ioctl.h>
+#include <media/v4l2-mc.h>
void v4l2_fh_init(struct v4l2_fh *fh, struct video_device *vdev)
{
@@ -92,6 +93,7 @@ void v4l2_fh_exit(struct v4l2_fh *fh)
{
if (fh->vdev == NULL)
return;
+ v4l_disable_media_source(fh->vdev);
v4l2_event_unsubscribe_all(fh);
fh->vdev = NULL;
}
diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c
index 8a018c6dd16a..170dd68d27f4 100644
--- a/drivers/media/v4l2-core/v4l2-ioctl.c
+++ b/drivers/media/v4l2-core/v4l2-ioctl.c
@@ -27,6 +27,7 @@
#include <media/v4l2-event.h>
#include <media/v4l2-device.h>
#include <media/videobuf2-v4l2.h>
+#include <media/v4l2-mc.h>
#include <trace/events/v4l2.h>
@@ -1041,6 +1042,12 @@ static int v4l_querycap(const struct v4l2_ioctl_ops *ops,
static int v4l_s_input(const struct v4l2_ioctl_ops *ops,
struct file *file, void *fh, void *arg)
{
+ struct video_device *vfd = video_devdata(file);
+ int ret;
+
+ ret = v4l_enable_media_source(vfd);
+ if (ret)
+ return ret;
return ops->vidioc_s_input(file, fh, *(unsigned int *)arg);
}
@@ -1165,7 +1172,7 @@ static void v4l_fill_fmtdesc(struct v4l2_fmtdesc *fmt)
case V4L2_PIX_FMT_YVYU: descr = "YVYU 4:2:2"; break;
case V4L2_PIX_FMT_UYVY: descr = "UYVY 4:2:2"; break;
case V4L2_PIX_FMT_VYUY: descr = "VYUY 4:2:2"; break;
- case V4L2_PIX_FMT_YUV422P: descr = "Planar YVU 4:2:2"; break;
+ case V4L2_PIX_FMT_YUV422P: descr = "Planar YUV 4:2:2"; break;
case V4L2_PIX_FMT_YUV411P: descr = "Planar YUV 4:1:1"; break;
case V4L2_PIX_FMT_Y41P: descr = "YUV 4:1:1 (Packed)"; break;
case V4L2_PIX_FMT_YUV444: descr = "16-bit A/XYUV 4-4-4-4"; break;
@@ -1191,6 +1198,10 @@ static void v4l_fill_fmtdesc(struct v4l2_fmtdesc *fmt)
case V4L2_PIX_FMT_NV12MT_16X16: descr = "Y/CbCr 4:2:0 (16x16 MB, N-C)"; break;
case V4L2_PIX_FMT_YUV420M: descr = "Planar YUV 4:2:0 (N-C)"; break;
case V4L2_PIX_FMT_YVU420M: descr = "Planar YVU 4:2:0 (N-C)"; break;
+ case V4L2_PIX_FMT_YUV422M: descr = "Planar YUV 4:2:2 (N-C)"; break;
+ case V4L2_PIX_FMT_YVU422M: descr = "Planar YVU 4:2:2 (N-C)"; break;
+ case V4L2_PIX_FMT_YUV444M: descr = "Planar YUV 4:4:4 (N-C)"; break;
+ case V4L2_PIX_FMT_YVU444M: descr = "Planar YVU 4:4:4 (N-C)"; break;
case V4L2_PIX_FMT_SBGGR8: descr = "8-bit Bayer BGBG/GRGR"; break;
case V4L2_PIX_FMT_SGBRG8: descr = "8-bit Bayer GBGB/RGRG"; break;
case V4L2_PIX_FMT_SGRBG8: descr = "8-bit Bayer GRGR/BGBG"; break;
@@ -1448,6 +1459,9 @@ static int v4l_s_fmt(const struct v4l2_ioctl_ops *ops,
bool is_tx = vfd->vfl_dir != VFL_DIR_RX;
int ret;
+ ret = v4l_enable_media_source(vfd);
+ if (ret)
+ return ret;
v4l_sanitize_format(p);
switch (p->type) {
@@ -1637,7 +1651,11 @@ static int v4l_s_tuner(const struct v4l2_ioctl_ops *ops,
{
struct video_device *vfd = video_devdata(file);
struct v4l2_tuner *p = arg;
+ int ret;
+ ret = v4l_enable_media_source(vfd);
+ if (ret)
+ return ret;
p->type = (vfd->vfl_type == VFL_TYPE_RADIO) ?
V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV;
return ops->vidioc_s_tuner(file, fh, p);
@@ -1691,7 +1709,11 @@ static int v4l_s_frequency(const struct v4l2_ioctl_ops *ops,
struct video_device *vfd = video_devdata(file);
const struct v4l2_frequency *p = arg;
enum v4l2_tuner_type type;
+ int ret;
+ ret = v4l_enable_media_source(vfd);
+ if (ret)
+ return ret;
if (vfd->vfl_type == VFL_TYPE_SDR) {
if (p->type != V4L2_TUNER_SDR && p->type != V4L2_TUNER_RF)
return -EINVAL;
@@ -1746,7 +1768,11 @@ static int v4l_s_std(const struct v4l2_ioctl_ops *ops,
{
struct video_device *vfd = video_devdata(file);
v4l2_std_id id = *(v4l2_std_id *)arg, norm;
+ int ret;
+ ret = v4l_enable_media_source(vfd);
+ if (ret)
+ return ret;
norm = id & vfd->tvnorms;
if (vfd->tvnorms && !norm) /* Check if std is supported */
return -EINVAL;
@@ -1760,7 +1786,11 @@ static int v4l_querystd(const struct v4l2_ioctl_ops *ops,
{
struct video_device *vfd = video_devdata(file);
v4l2_std_id *p = arg;
+ int ret;
+ ret = v4l_enable_media_source(vfd);
+ if (ret)
+ return ret;
/*
* If no signal is detected, then the driver should return
* V4L2_STD_UNKNOWN. Otherwise it should return tvnorms with
@@ -1779,7 +1809,11 @@ static int v4l_s_hw_freq_seek(const struct v4l2_ioctl_ops *ops,
struct video_device *vfd = video_devdata(file);
struct v4l2_hw_freq_seek *p = arg;
enum v4l2_tuner_type type;
+ int ret;
+ ret = v4l_enable_media_source(vfd);
+ if (ret)
+ return ret;
/* s_hw_freq_seek is not supported for SDR for now */
if (vfd->vfl_type == VFL_TYPE_SDR)
return -EINVAL;
diff --git a/drivers/media/v4l2-core/v4l2-mc.c b/drivers/media/v4l2-core/v4l2-mc.c
new file mode 100644
index 000000000000..2228cd3a846e
--- /dev/null
+++ b/drivers/media/v4l2-core/v4l2-mc.c
@@ -0,0 +1,403 @@
+/*
+ * Media Controller ancillary functions
+ *
+ * Copyright (c) 2016 Mauro Carvalho Chehab <mchehab@osg.samsung.com>
+ * Copyright (C) 2016 Shuah Khan <shuahkh@osg.samsung.com>
+ * Copyright (C) 2006-2010 Nokia Corporation
+ * Copyright (c) 2016 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/usb.h>
+#include <media/media-device.h>
+#include <media/media-entity.h>
+#include <media/v4l2-fh.h>
+#include <media/v4l2-mc.h>
+#include <media/v4l2-subdev.h>
+#include <media/media-device.h>
+#include <media/v4l2-mc.h>
+#include <media/videobuf2-core.h>
+
+int v4l2_mc_create_media_graph(struct media_device *mdev)
+
+{
+ struct media_entity *entity;
+ struct media_entity *if_vid = NULL, *if_aud = NULL;
+ struct media_entity *tuner = NULL, *decoder = NULL;
+ struct media_entity *io_v4l = NULL, *io_vbi = NULL, *io_swradio = NULL;
+ bool is_webcam = false;
+ u32 flags;
+ int ret;
+
+ if (!mdev)
+ return 0;
+
+ media_device_for_each_entity(entity, mdev) {
+ switch (entity->function) {
+ case MEDIA_ENT_F_IF_VID_DECODER:
+ if_vid = entity;
+ break;
+ case MEDIA_ENT_F_IF_AUD_DECODER:
+ if_aud = entity;
+ break;
+ case MEDIA_ENT_F_TUNER:
+ tuner = entity;
+ break;
+ case MEDIA_ENT_F_ATV_DECODER:
+ decoder = entity;
+ break;
+ case MEDIA_ENT_F_IO_V4L:
+ io_v4l = entity;
+ break;
+ case MEDIA_ENT_F_IO_VBI:
+ io_vbi = entity;
+ break;
+ case MEDIA_ENT_F_IO_SWRADIO:
+ io_swradio = entity;
+ break;
+ case MEDIA_ENT_F_CAM_SENSOR:
+ is_webcam = true;
+ break;
+ }
+ }
+
+ /* It should have at least one I/O entity */
+ if (!io_v4l && !io_vbi && !io_swradio)
+ return -EINVAL;
+
+ /*
+ * Here, webcams are modelled on a very simple way: the sensor is
+ * connected directly to the I/O entity. All dirty details, like
+ * scaler and crop HW are hidden. While such mapping is not enough
+ * for mc-centric hardware, it is enough for v4l2 interface centric
+ * PC-consumer's hardware.
+ */
+ if (is_webcam) {
+ if (!io_v4l)
+ return -EINVAL;
+
+ media_device_for_each_entity(entity, mdev) {
+ if (entity->function != MEDIA_ENT_F_CAM_SENSOR)
+ continue;
+ ret = media_create_pad_link(entity, 0,
+ io_v4l, 0,
+ MEDIA_LNK_FL_ENABLED);
+ if (ret)
+ return ret;
+ }
+ if (!decoder)
+ return 0;
+ }
+
+ /* The device isn't a webcam. So, it should have a decoder */
+ if (!decoder)
+ return -EINVAL;
+
+ /* Link the tuner and IF video output pads */
+ if (tuner) {
+ if (if_vid) {
+ ret = media_create_pad_link(tuner, TUNER_PAD_OUTPUT,
+ if_vid,
+ IF_VID_DEC_PAD_IF_INPUT,
+ MEDIA_LNK_FL_ENABLED);
+ if (ret)
+ return ret;
+ ret = media_create_pad_link(if_vid, IF_VID_DEC_PAD_OUT,
+ decoder, DEMOD_PAD_IF_INPUT,
+ MEDIA_LNK_FL_ENABLED);
+ if (ret)
+ return ret;
+ } else {
+ ret = media_create_pad_link(tuner, TUNER_PAD_OUTPUT,
+ decoder, DEMOD_PAD_IF_INPUT,
+ MEDIA_LNK_FL_ENABLED);
+ if (ret)
+ return ret;
+ }
+
+ if (if_aud) {
+ ret = media_create_pad_link(tuner, TUNER_PAD_AUD_OUT,
+ if_aud,
+ IF_AUD_DEC_PAD_IF_INPUT,
+ MEDIA_LNK_FL_ENABLED);
+ if (ret)
+ return ret;
+ } else {
+ if_aud = tuner;
+ }
+
+ }
+
+ /* Create demod to V4L, VBI and SDR radio links */
+ if (io_v4l) {
+ ret = media_create_pad_link(decoder, DEMOD_PAD_VID_OUT,
+ io_v4l, 0,
+ MEDIA_LNK_FL_ENABLED);
+ if (ret)
+ return ret;
+ }
+
+ if (io_swradio) {
+ ret = media_create_pad_link(decoder, DEMOD_PAD_VID_OUT,
+ io_swradio, 0,
+ MEDIA_LNK_FL_ENABLED);
+ if (ret)
+ return ret;
+ }
+
+ if (io_vbi) {
+ ret = media_create_pad_link(decoder, DEMOD_PAD_VBI_OUT,
+ io_vbi, 0,
+ MEDIA_LNK_FL_ENABLED);
+ if (ret)
+ return ret;
+ }
+
+ /* Create links for the media connectors */
+ flags = MEDIA_LNK_FL_ENABLED;
+ media_device_for_each_entity(entity, mdev) {
+ switch (entity->function) {
+ case MEDIA_ENT_F_CONN_RF:
+ if (!tuner)
+ continue;
+
+ ret = media_create_pad_link(entity, 0, tuner,
+ TUNER_PAD_RF_INPUT,
+ flags);
+ break;
+ case MEDIA_ENT_F_CONN_SVIDEO:
+ case MEDIA_ENT_F_CONN_COMPOSITE:
+ ret = media_create_pad_link(entity, 0, decoder,
+ DEMOD_PAD_IF_INPUT,
+ flags);
+ break;
+ default:
+ continue;
+ }
+ if (ret)
+ return ret;
+
+ flags = 0;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(v4l2_mc_create_media_graph);
+
+int v4l_enable_media_source(struct video_device *vdev)
+{
+ struct media_device *mdev = vdev->entity.graph_obj.mdev;
+ int ret;
+
+ if (!mdev || !mdev->enable_source)
+ return 0;
+ ret = mdev->enable_source(&vdev->entity, &vdev->pipe);
+ if (ret)
+ return -EBUSY;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(v4l_enable_media_source);
+
+void v4l_disable_media_source(struct video_device *vdev)
+{
+ struct media_device *mdev = vdev->entity.graph_obj.mdev;
+
+ if (mdev && mdev->disable_source)
+ mdev->disable_source(&vdev->entity);
+}
+EXPORT_SYMBOL_GPL(v4l_disable_media_source);
+
+int v4l_vb2q_enable_media_source(struct vb2_queue *q)
+{
+ struct v4l2_fh *fh = q->owner;
+
+ if (fh && fh->vdev)
+ return v4l_enable_media_source(fh->vdev);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(v4l_vb2q_enable_media_source);
+
+/* -----------------------------------------------------------------------------
+ * Pipeline power management
+ *
+ * Entities must be powered up when part of a pipeline that contains at least
+ * one open video device node.
+ *
+ * To achieve this use the entity use_count field to track the number of users.
+ * For entities corresponding to video device nodes the use_count field stores
+ * the users count of the node. For entities corresponding to subdevs the
+ * use_count field stores the total number of users of all video device nodes
+ * in the pipeline.
+ *
+ * The v4l2_pipeline_pm_use() function must be called in the open() and
+ * close() handlers of video device nodes. It increments or decrements the use
+ * count of all subdev entities in the pipeline.
+ *
+ * To react to link management on powered pipelines, the link setup notification
+ * callback updates the use count of all entities in the source and sink sides
+ * of the link.
+ */
+
+/*
+ * pipeline_pm_use_count - Count the number of users of a pipeline
+ * @entity: The entity
+ *
+ * Return the total number of users of all video device nodes in the pipeline.
+ */
+static int pipeline_pm_use_count(struct media_entity *entity,
+ struct media_entity_graph *graph)
+{
+ int use = 0;
+
+ media_entity_graph_walk_start(graph, entity);
+
+ while ((entity = media_entity_graph_walk_next(graph))) {
+ if (is_media_entity_v4l2_io(entity))
+ use += entity->use_count;
+ }
+
+ return use;
+}
+
+/*
+ * pipeline_pm_power_one - Apply power change to an entity
+ * @entity: The entity
+ * @change: Use count change
+ *
+ * Change the entity use count by @change. If the entity is a subdev update its
+ * power state by calling the core::s_power operation when the use count goes
+ * from 0 to != 0 or from != 0 to 0.
+ *
+ * Return 0 on success or a negative error code on failure.
+ */
+static int pipeline_pm_power_one(struct media_entity *entity, int change)
+{
+ struct v4l2_subdev *subdev;
+ int ret;
+
+ subdev = is_media_entity_v4l2_subdev(entity)
+ ? media_entity_to_v4l2_subdev(entity) : NULL;
+
+ if (entity->use_count == 0 && change > 0 && subdev != NULL) {
+ ret = v4l2_subdev_call(subdev, core, s_power, 1);
+ if (ret < 0 && ret != -ENOIOCTLCMD)
+ return ret;
+ }
+
+ entity->use_count += change;
+ WARN_ON(entity->use_count < 0);
+
+ if (entity->use_count == 0 && change < 0 && subdev != NULL)
+ v4l2_subdev_call(subdev, core, s_power, 0);
+
+ return 0;
+}
+
+/*
+ * pipeline_pm_power - Apply power change to all entities in a pipeline
+ * @entity: The entity
+ * @change: Use count change
+ *
+ * Walk the pipeline to update the use count and the power state of all non-node
+ * entities.
+ *
+ * Return 0 on success or a negative error code on failure.
+ */
+static int pipeline_pm_power(struct media_entity *entity, int change,
+ struct media_entity_graph *graph)
+{
+ struct media_entity *first = entity;
+ int ret = 0;
+
+ if (!change)
+ return 0;
+
+ media_entity_graph_walk_start(graph, entity);
+
+ while (!ret && (entity = media_entity_graph_walk_next(graph)))
+ if (is_media_entity_v4l2_subdev(entity))
+ ret = pipeline_pm_power_one(entity, change);
+
+ if (!ret)
+ return ret;
+
+ media_entity_graph_walk_start(graph, first);
+
+ while ((first = media_entity_graph_walk_next(graph))
+ && first != entity)
+ if (is_media_entity_v4l2_subdev(first))
+ pipeline_pm_power_one(first, -change);
+
+ return ret;
+}
+
+int v4l2_pipeline_pm_use(struct media_entity *entity, int use)
+{
+ struct media_device *mdev = entity->graph_obj.mdev;
+ int change = use ? 1 : -1;
+ int ret;
+
+ mutex_lock(&mdev->graph_mutex);
+
+ /* Apply use count to node. */
+ entity->use_count += change;
+ WARN_ON(entity->use_count < 0);
+
+ /* Apply power change to connected non-nodes. */
+ ret = pipeline_pm_power(entity, change, &mdev->pm_count_walk);
+ if (ret < 0)
+ entity->use_count -= change;
+
+ mutex_unlock(&mdev->graph_mutex);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(v4l2_pipeline_pm_use);
+
+int v4l2_pipeline_link_notify(struct media_link *link, u32 flags,
+ unsigned int notification)
+{
+ struct media_entity_graph *graph = &link->graph_obj.mdev->pm_count_walk;
+ struct media_entity *source = link->source->entity;
+ struct media_entity *sink = link->sink->entity;
+ int source_use;
+ int sink_use;
+ int ret = 0;
+
+ source_use = pipeline_pm_use_count(source, graph);
+ sink_use = pipeline_pm_use_count(sink, graph);
+
+ if (notification == MEDIA_DEV_NOTIFY_POST_LINK_CH &&
+ !(flags & MEDIA_LNK_FL_ENABLED)) {
+ /* Powering off entities is assumed to never fail. */
+ pipeline_pm_power(source, -sink_use, graph);
+ pipeline_pm_power(sink, -source_use, graph);
+ return 0;
+ }
+
+ if (notification == MEDIA_DEV_NOTIFY_PRE_LINK_CH &&
+ (flags & MEDIA_LNK_FL_ENABLED)) {
+
+ ret = pipeline_pm_power(source, sink_use, graph);
+ if (ret < 0)
+ return ret;
+
+ ret = pipeline_pm_power(sink, source_use, graph);
+ if (ret < 0)
+ pipeline_pm_power(source, -sink_use, graph);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(v4l2_pipeline_link_notify);
diff --git a/drivers/media/v4l2-core/v4l2-of.c b/drivers/media/v4l2-core/v4l2-of.c
index b27cbb1f5afe..93b33681776c 100644
--- a/drivers/media/v4l2-core/v4l2-of.c
+++ b/drivers/media/v4l2-core/v4l2-of.c
@@ -146,7 +146,7 @@ static void v4l2_of_parse_parallel_bus(const struct device_node *node,
* variable without a low fixed limit. Please use
* v4l2_of_alloc_parse_endpoint() in new drivers instead.
*
- * Return: 0.
+ * Return: 0 on success or a negative error code on failure.
*/
int v4l2_of_parse_endpoint(const struct device_node *node,
struct v4l2_of_endpoint *endpoint)
diff --git a/drivers/media/v4l2-core/videobuf-core.c b/drivers/media/v4l2-core/videobuf-core.c
index 6c02989ee33f..def84753c4c3 100644
--- a/drivers/media/v4l2-core/videobuf-core.c
+++ b/drivers/media/v4l2-core/videobuf-core.c
@@ -75,7 +75,8 @@ struct videobuf_buffer *videobuf_alloc_vb(struct videobuf_queue *q)
}
EXPORT_SYMBOL_GPL(videobuf_alloc_vb);
-static int is_state_active_or_queued(struct videobuf_queue *q, struct videobuf_buffer *vb)
+static int state_neither_active_nor_queued(struct videobuf_queue *q,
+ struct videobuf_buffer *vb)
{
unsigned long flags;
bool rc;
@@ -95,7 +96,7 @@ int videobuf_waiton(struct videobuf_queue *q, struct videobuf_buffer *vb,
MAGIC_CHECK(vb->magic, MAGIC_BUFFER);
if (non_blocking) {
- if (is_state_active_or_queued(q, vb))
+ if (state_neither_active_nor_queued(q, vb))
return 0;
return -EAGAIN;
}
@@ -107,9 +108,10 @@ int videobuf_waiton(struct videobuf_queue *q, struct videobuf_buffer *vb,
if (is_ext_locked)
mutex_unlock(q->ext_lock);
if (intr)
- ret = wait_event_interruptible(vb->done, is_state_active_or_queued(q, vb));
+ ret = wait_event_interruptible(vb->done,
+ state_neither_active_nor_queued(q, vb));
else
- wait_event(vb->done, is_state_active_or_queued(q, vb));
+ wait_event(vb->done, state_neither_active_nor_queued(q, vb));
/* Relock */
if (is_ext_locked)
mutex_lock(q->ext_lock);
diff --git a/drivers/media/v4l2-core/videobuf-dma-sg.c b/drivers/media/v4l2-core/videobuf-dma-sg.c
index f669cedca8bd..f300f060b3f3 100644
--- a/drivers/media/v4l2-core/videobuf-dma-sg.c
+++ b/drivers/media/v4l2-core/videobuf-dma-sg.c
@@ -181,8 +181,7 @@ static int videobuf_dma_init_user_locked(struct videobuf_dmabuf *dma,
dprintk(1, "init user [0x%lx+0x%lx => %d pages]\n",
data, size, dma->nr_pages);
- err = get_user_pages(current, current->mm,
- data & PAGE_MASK, dma->nr_pages,
+ err = get_user_pages(data & PAGE_MASK, dma->nr_pages,
rw == READ, 1, /* force */
dma->pages, NULL);
@@ -350,7 +349,7 @@ int videobuf_dma_free(struct videobuf_dmabuf *dma)
if (dma->pages) {
for (i = 0; i < dma->nr_pages; i++)
- page_cache_release(dma->pages[i]);
+ put_page(dma->pages[i]);
kfree(dma->pages);
dma->pages = NULL;
}
diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c
index ff8953ae52d1..9fbcb67a9ee6 100644
--- a/drivers/media/v4l2-core/videobuf2-core.c
+++ b/drivers/media/v4l2-core/videobuf2-core.c
@@ -25,6 +25,7 @@
#include <linux/kthread.h>
#include <media/videobuf2-core.h>
+#include <media/v4l2-mc.h>
#include <trace/events/vb2.h>
@@ -1227,6 +1228,7 @@ static int __qbuf_dmabuf(struct vb2_buffer *vb, const void *pb)
if (planes[plane].length < vb->planes[plane].min_length) {
dprintk(1, "invalid dmabuf length for plane %d\n",
plane);
+ dma_buf_put(dbuf);
ret = -EINVAL;
goto err;
}
@@ -1643,7 +1645,7 @@ static int __vb2_wait_for_done_vb(struct vb2_queue *q, int nonblocking)
* Will sleep if required for nonblocking == false.
*/
static int __vb2_get_done_vb(struct vb2_queue *q, struct vb2_buffer **vb,
- int nonblocking)
+ void *pb, int nonblocking)
{
unsigned long flags;
int ret;
@@ -1664,10 +1666,10 @@ static int __vb2_get_done_vb(struct vb2_queue *q, struct vb2_buffer **vb,
/*
* Only remove the buffer from done_list if v4l2_buffer can handle all
* the planes.
- * Verifying planes is NOT necessary since it already has been checked
- * before the buffer is queued/prepared. So it can never fail.
*/
- list_del(&(*vb)->done_entry);
+ ret = call_bufop(q, verify_planes_array, *vb, pb);
+ if (!ret)
+ list_del(&(*vb)->done_entry);
spin_unlock_irqrestore(&q->done_lock, flags);
return ret;
@@ -1746,7 +1748,7 @@ int vb2_core_dqbuf(struct vb2_queue *q, unsigned int *pindex, void *pb,
struct vb2_buffer *vb = NULL;
int ret;
- ret = __vb2_get_done_vb(q, &vb, nonblocking);
+ ret = __vb2_get_done_vb(q, &vb, pb, nonblocking);
if (ret < 0)
return ret;
@@ -1886,6 +1888,9 @@ int vb2_core_streamon(struct vb2_queue *q, unsigned int type)
* are available.
*/
if (q->queued_count >= q->min_buffers_needed) {
+ ret = v4l_vb2q_enable_media_source(q);
+ if (ret)
+ return ret;
ret = vb2_start_streaming(q);
if (ret) {
__vb2_queue_cancel(q);
@@ -2293,6 +2298,16 @@ unsigned int vb2_core_poll(struct vb2_queue *q, struct file *file,
return POLLERR;
/*
+ * If this quirk is set and QBUF hasn't been called yet then
+ * return POLLERR as well. This only affects capture queues, output
+ * queues will always initialize waiting_for_buffers to false.
+ * This quirk is set by V4L2 for backwards compatibility reasons.
+ */
+ if (q->quirk_poll_must_check_waiting_for_buffers &&
+ q->waiting_for_buffers && (req_events & (POLLIN | POLLRDNORM)))
+ return POLLERR;
+
+ /*
* For output streams you can call write() as long as there are fewer
* buffers queued than there are buffers available.
*/
diff --git a/drivers/media/v4l2-core/videobuf2-dma-contig.c b/drivers/media/v4l2-core/videobuf2-dma-contig.c
index c33127284cfe..5361197f3e57 100644
--- a/drivers/media/v4l2-core/videobuf2-dma-contig.c
+++ b/drivers/media/v4l2-core/videobuf2-dma-contig.c
@@ -23,13 +23,16 @@
struct vb2_dc_conf {
struct device *dev;
+ struct dma_attrs attrs;
};
struct vb2_dc_buf {
struct device *dev;
void *vaddr;
unsigned long size;
+ void *cookie;
dma_addr_t dma_addr;
+ struct dma_attrs attrs;
enum dma_data_direction dma_dir;
struct sg_table *dma_sgt;
struct frame_vector *vec;
@@ -131,7 +134,8 @@ static void vb2_dc_put(void *buf_priv)
sg_free_table(buf->sgt_base);
kfree(buf->sgt_base);
}
- dma_free_coherent(buf->dev, buf->size, buf->vaddr, buf->dma_addr);
+ dma_free_attrs(buf->dev, buf->size, buf->cookie, buf->dma_addr,
+ &buf->attrs);
put_device(buf->dev);
kfree(buf);
}
@@ -147,14 +151,18 @@ static void *vb2_dc_alloc(void *alloc_ctx, unsigned long size,
if (!buf)
return ERR_PTR(-ENOMEM);
- buf->vaddr = dma_alloc_coherent(dev, size, &buf->dma_addr,
- GFP_KERNEL | gfp_flags);
- if (!buf->vaddr) {
+ buf->attrs = conf->attrs;
+ buf->cookie = dma_alloc_attrs(dev, size, &buf->dma_addr,
+ GFP_KERNEL | gfp_flags, &buf->attrs);
+ if (!buf->cookie) {
dev_err(dev, "dma_alloc_coherent of size %ld failed\n", size);
kfree(buf);
return ERR_PTR(-ENOMEM);
}
+ if (!dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, &buf->attrs))
+ buf->vaddr = buf->cookie;
+
/* Prevent the device from being released while the buffer is used */
buf->dev = get_device(dev);
buf->size = size;
@@ -185,8 +193,8 @@ static int vb2_dc_mmap(void *buf_priv, struct vm_area_struct *vma)
*/
vma->vm_pgoff = 0;
- ret = dma_mmap_coherent(buf->dev, vma, buf->vaddr,
- buf->dma_addr, buf->size);
+ ret = dma_mmap_attrs(buf->dev, vma, buf->cookie,
+ buf->dma_addr, buf->size, &buf->attrs);
if (ret) {
pr_err("Remapping memory failed, error: %d\n", ret);
@@ -329,7 +337,7 @@ static void *vb2_dc_dmabuf_ops_kmap(struct dma_buf *dbuf, unsigned long pgnum)
{
struct vb2_dc_buf *buf = dbuf->priv;
- return buf->vaddr + pgnum * PAGE_SIZE;
+ return buf->vaddr ? buf->vaddr + pgnum * PAGE_SIZE : NULL;
}
static void *vb2_dc_dmabuf_ops_vmap(struct dma_buf *dbuf)
@@ -368,8 +376,8 @@ static struct sg_table *vb2_dc_get_base_sgt(struct vb2_dc_buf *buf)
return NULL;
}
- ret = dma_get_sgtable(buf->dev, sgt, buf->vaddr, buf->dma_addr,
- buf->size);
+ ret = dma_get_sgtable_attrs(buf->dev, sgt, buf->cookie, buf->dma_addr,
+ buf->size, &buf->attrs);
if (ret < 0) {
dev_err(buf->dev, "failed to get scatterlist from DMA API\n");
kfree(sgt);
@@ -721,7 +729,8 @@ const struct vb2_mem_ops vb2_dma_contig_memops = {
};
EXPORT_SYMBOL_GPL(vb2_dma_contig_memops);
-void *vb2_dma_contig_init_ctx(struct device *dev)
+void *vb2_dma_contig_init_ctx_attrs(struct device *dev,
+ struct dma_attrs *attrs)
{
struct vb2_dc_conf *conf;
@@ -730,10 +739,12 @@ void *vb2_dma_contig_init_ctx(struct device *dev)
return ERR_PTR(-ENOMEM);
conf->dev = dev;
+ if (attrs)
+ conf->attrs = *attrs;
return conf;
}
-EXPORT_SYMBOL_GPL(vb2_dma_contig_init_ctx);
+EXPORT_SYMBOL_GPL(vb2_dma_contig_init_ctx_attrs);
void vb2_dma_contig_cleanup_ctx(void *alloc_ctx)
{
diff --git a/drivers/media/v4l2-core/videobuf2-dvb.c b/drivers/media/v4l2-core/videobuf2-dvb.c
index d09269846b7e..9f38b4218c0d 100644
--- a/drivers/media/v4l2-core/videobuf2-dvb.c
+++ b/drivers/media/v4l2-core/videobuf2-dvb.c
@@ -77,6 +77,7 @@ static int vb2_dvb_register_adapter(struct vb2_dvb_frontends *fe,
struct module *module,
void *adapter_priv,
struct device *device,
+ struct media_device *mdev,
char *adapter_name,
short *adapter_nr,
int mfe_shared)
@@ -94,7 +95,10 @@ static int vb2_dvb_register_adapter(struct vb2_dvb_frontends *fe,
}
fe->adapter.priv = adapter_priv;
fe->adapter.mfe_shared = mfe_shared;
-
+#ifdef CONFIG_MEDIA_CONTROLLER_DVB
+ if (mdev)
+ fe->adapter.mdev = mdev;
+#endif
return result;
}
@@ -193,6 +197,7 @@ int vb2_dvb_register_bus(struct vb2_dvb_frontends *f,
struct module *module,
void *adapter_priv,
struct device *device,
+ struct media_device *mdev,
short *adapter_nr,
int mfe_shared)
{
@@ -207,7 +212,7 @@ int vb2_dvb_register_bus(struct vb2_dvb_frontends *f,
}
/* Bring up the adapter */
- res = vb2_dvb_register_adapter(f, module, adapter_priv, device,
+ res = vb2_dvb_register_adapter(f, module, adapter_priv, device, mdev,
fe->dvb.name, adapter_nr, mfe_shared);
if (res < 0) {
pr_warn("vb2_dvb_register_adapter failed (errno = %d)\n", res);
@@ -224,7 +229,11 @@ int vb2_dvb_register_bus(struct vb2_dvb_frontends *f,
fe->dvb.name, res);
goto err;
}
+ res = dvb_create_media_graph(&f->adapter, false);
+ if (res < 0)
+ goto err;
}
+
mutex_unlock(&f->lock);
return 0;
diff --git a/drivers/media/v4l2-core/videobuf2-memops.c b/drivers/media/v4l2-core/videobuf2-memops.c
index dbec5923fcf0..3c3b517f1d1c 100644
--- a/drivers/media/v4l2-core/videobuf2-memops.c
+++ b/drivers/media/v4l2-core/videobuf2-memops.c
@@ -49,7 +49,7 @@ struct frame_vector *vb2_create_framevec(unsigned long start,
vec = frame_vector_create(nr);
if (!vec)
return ERR_PTR(-ENOMEM);
- ret = get_vaddr_frames(start, nr, write, 1, vec);
+ ret = get_vaddr_frames(start & PAGE_MASK, nr, write, true, vec);
if (ret < 0)
goto out_destroy;
/* We accept only complete set of PFNs */
diff --git a/drivers/media/v4l2-core/videobuf2-v4l2.c b/drivers/media/v4l2-core/videobuf2-v4l2.c
index 91f552124050..0b1b8c7b6ce5 100644
--- a/drivers/media/v4l2-core/videobuf2-v4l2.c
+++ b/drivers/media/v4l2-core/videobuf2-v4l2.c
@@ -765,6 +765,12 @@ int vb2_queue_init(struct vb2_queue *q)
q->is_output = V4L2_TYPE_IS_OUTPUT(q->type);
q->copy_timestamp = (q->timestamp_flags & V4L2_BUF_FLAG_TIMESTAMP_MASK)
== V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ /*
+ * For compatibility with vb1: if QBUF hasn't been called yet, then
+ * return POLLERR as well. This only affects capture queues, output
+ * queues will always initialize waiting_for_buffers to false.
+ */
+ q->quirk_poll_must_check_waiting_for_buffers = true;
return vb2_core_queue_init(q);
}
@@ -818,14 +824,6 @@ unsigned int vb2_poll(struct vb2_queue *q, struct file *file, poll_table *wait)
poll_wait(file, &fh->wait, wait);
}
- /*
- * For compatibility with vb1: if QBUF hasn't been called yet, then
- * return POLLERR as well. This only affects capture queues, output
- * queues will always initialize waiting_for_buffers to false.
- */
- if (q->waiting_for_buffers && (req_events & (POLLIN | POLLRDNORM)))
- return POLLERR;
-
return res | vb2_core_poll(q, file, wait);
}
EXPORT_SYMBOL_GPL(vb2_poll);
diff --git a/drivers/memory/Kconfig b/drivers/memory/Kconfig
index 6f3154613dc7..51d5cd20c26a 100644
--- a/drivers/memory/Kconfig
+++ b/drivers/memory/Kconfig
@@ -114,6 +114,14 @@ config JZ4780_NEMC
the Ingenic JZ4780. This controller is used to handle external
memory devices such as NAND and SRAM.
+config MTK_SMI
+ bool
+ depends on ARCH_MEDIATEK || COMPILE_TEST
+ help
+ This driver is for the Memory Controller module in MediaTek SoCs,
+ mainly help enable/disable iommu and control the power domain and
+ clocks for each local arbiter.
+
source "drivers/memory/tegra/Kconfig"
endif
diff --git a/drivers/memory/Makefile b/drivers/memory/Makefile
index 1c46af501610..890bdf402449 100644
--- a/drivers/memory/Makefile
+++ b/drivers/memory/Makefile
@@ -15,5 +15,6 @@ obj-$(CONFIG_FSL_IFC) += fsl_ifc.o
obj-$(CONFIG_MVEBU_DEVBUS) += mvebu-devbus.o
obj-$(CONFIG_TEGRA20_MC) += tegra20-mc.o
obj-$(CONFIG_JZ4780_NEMC) += jz4780-nemc.o
+obj-$(CONFIG_MTK_SMI) += mtk-smi.o
obj-$(CONFIG_TEGRA_MC) += tegra/
diff --git a/drivers/memory/fsl_ifc.c b/drivers/memory/fsl_ifc.c
index acd1460cf787..2a691da8c1c7 100644
--- a/drivers/memory/fsl_ifc.c
+++ b/drivers/memory/fsl_ifc.c
@@ -260,7 +260,7 @@ static int fsl_ifc_ctrl_probe(struct platform_device *dev)
/* get the Controller level irq */
fsl_ifc_ctrl_dev->irq = irq_of_parse_and_map(dev->dev.of_node, 0);
- if (fsl_ifc_ctrl_dev->irq == NO_IRQ) {
+ if (fsl_ifc_ctrl_dev->irq == 0) {
dev_err(&dev->dev, "failed to get irq resource "
"for IFC\n");
ret = -ENODEV;
diff --git a/drivers/memory/mtk-smi.c b/drivers/memory/mtk-smi.c
new file mode 100644
index 000000000000..089091f5f890
--- /dev/null
+++ b/drivers/memory/mtk-smi.c
@@ -0,0 +1,273 @@
+/*
+ * Copyright (c) 2015-2016 MediaTek Inc.
+ * Author: Yong Wu <yong.wu@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <linux/clk.h>
+#include <linux/component.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <soc/mediatek/smi.h>
+
+#define SMI_LARB_MMU_EN 0xf00
+
+struct mtk_smi {
+ struct device *dev;
+ struct clk *clk_apb, *clk_smi;
+};
+
+struct mtk_smi_larb { /* larb: local arbiter */
+ struct mtk_smi smi;
+ void __iomem *base;
+ struct device *smi_common_dev;
+ u32 *mmu;
+};
+
+static int mtk_smi_enable(const struct mtk_smi *smi)
+{
+ int ret;
+
+ ret = pm_runtime_get_sync(smi->dev);
+ if (ret < 0)
+ return ret;
+
+ ret = clk_prepare_enable(smi->clk_apb);
+ if (ret)
+ goto err_put_pm;
+
+ ret = clk_prepare_enable(smi->clk_smi);
+ if (ret)
+ goto err_disable_apb;
+
+ return 0;
+
+err_disable_apb:
+ clk_disable_unprepare(smi->clk_apb);
+err_put_pm:
+ pm_runtime_put_sync(smi->dev);
+ return ret;
+}
+
+static void mtk_smi_disable(const struct mtk_smi *smi)
+{
+ clk_disable_unprepare(smi->clk_smi);
+ clk_disable_unprepare(smi->clk_apb);
+ pm_runtime_put_sync(smi->dev);
+}
+
+int mtk_smi_larb_get(struct device *larbdev)
+{
+ struct mtk_smi_larb *larb = dev_get_drvdata(larbdev);
+ struct mtk_smi *common = dev_get_drvdata(larb->smi_common_dev);
+ int ret;
+
+ /* Enable the smi-common's power and clocks */
+ ret = mtk_smi_enable(common);
+ if (ret)
+ return ret;
+
+ /* Enable the larb's power and clocks */
+ ret = mtk_smi_enable(&larb->smi);
+ if (ret) {
+ mtk_smi_disable(common);
+ return ret;
+ }
+
+ /* Configure the iommu info for this larb */
+ writel(*larb->mmu, larb->base + SMI_LARB_MMU_EN);
+
+ return 0;
+}
+
+void mtk_smi_larb_put(struct device *larbdev)
+{
+ struct mtk_smi_larb *larb = dev_get_drvdata(larbdev);
+ struct mtk_smi *common = dev_get_drvdata(larb->smi_common_dev);
+
+ /*
+ * Don't de-configure the iommu info for this larb since there may be
+ * several modules in this larb.
+ * The iommu info will be reset after power off.
+ */
+
+ mtk_smi_disable(&larb->smi);
+ mtk_smi_disable(common);
+}
+
+static int
+mtk_smi_larb_bind(struct device *dev, struct device *master, void *data)
+{
+ struct mtk_smi_larb *larb = dev_get_drvdata(dev);
+ struct mtk_smi_iommu *smi_iommu = data;
+ unsigned int i;
+
+ for (i = 0; i < smi_iommu->larb_nr; i++) {
+ if (dev == smi_iommu->larb_imu[i].dev) {
+ /* The 'mmu' may be updated in iommu-attach/detach. */
+ larb->mmu = &smi_iommu->larb_imu[i].mmu;
+ return 0;
+ }
+ }
+ return -ENODEV;
+}
+
+static void
+mtk_smi_larb_unbind(struct device *dev, struct device *master, void *data)
+{
+ /* Do nothing as the iommu is always enabled. */
+}
+
+static const struct component_ops mtk_smi_larb_component_ops = {
+ .bind = mtk_smi_larb_bind,
+ .unbind = mtk_smi_larb_unbind,
+};
+
+static int mtk_smi_larb_probe(struct platform_device *pdev)
+{
+ struct mtk_smi_larb *larb;
+ struct resource *res;
+ struct device *dev = &pdev->dev;
+ struct device_node *smi_node;
+ struct platform_device *smi_pdev;
+
+ if (!dev->pm_domain)
+ return -EPROBE_DEFER;
+
+ larb = devm_kzalloc(dev, sizeof(*larb), GFP_KERNEL);
+ if (!larb)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ larb->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(larb->base))
+ return PTR_ERR(larb->base);
+
+ larb->smi.clk_apb = devm_clk_get(dev, "apb");
+ if (IS_ERR(larb->smi.clk_apb))
+ return PTR_ERR(larb->smi.clk_apb);
+
+ larb->smi.clk_smi = devm_clk_get(dev, "smi");
+ if (IS_ERR(larb->smi.clk_smi))
+ return PTR_ERR(larb->smi.clk_smi);
+ larb->smi.dev = dev;
+
+ smi_node = of_parse_phandle(dev->of_node, "mediatek,smi", 0);
+ if (!smi_node)
+ return -EINVAL;
+
+ smi_pdev = of_find_device_by_node(smi_node);
+ of_node_put(smi_node);
+ if (smi_pdev) {
+ larb->smi_common_dev = &smi_pdev->dev;
+ } else {
+ dev_err(dev, "Failed to get the smi_common device\n");
+ return -EINVAL;
+ }
+
+ pm_runtime_enable(dev);
+ platform_set_drvdata(pdev, larb);
+ return component_add(dev, &mtk_smi_larb_component_ops);
+}
+
+static int mtk_smi_larb_remove(struct platform_device *pdev)
+{
+ pm_runtime_disable(&pdev->dev);
+ component_del(&pdev->dev, &mtk_smi_larb_component_ops);
+ return 0;
+}
+
+static const struct of_device_id mtk_smi_larb_of_ids[] = {
+ { .compatible = "mediatek,mt8173-smi-larb",},
+ {}
+};
+
+static struct platform_driver mtk_smi_larb_driver = {
+ .probe = mtk_smi_larb_probe,
+ .remove = mtk_smi_larb_remove,
+ .driver = {
+ .name = "mtk-smi-larb",
+ .of_match_table = mtk_smi_larb_of_ids,
+ }
+};
+
+static int mtk_smi_common_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct mtk_smi *common;
+
+ if (!dev->pm_domain)
+ return -EPROBE_DEFER;
+
+ common = devm_kzalloc(dev, sizeof(*common), GFP_KERNEL);
+ if (!common)
+ return -ENOMEM;
+ common->dev = dev;
+
+ common->clk_apb = devm_clk_get(dev, "apb");
+ if (IS_ERR(common->clk_apb))
+ return PTR_ERR(common->clk_apb);
+
+ common->clk_smi = devm_clk_get(dev, "smi");
+ if (IS_ERR(common->clk_smi))
+ return PTR_ERR(common->clk_smi);
+
+ pm_runtime_enable(dev);
+ platform_set_drvdata(pdev, common);
+ return 0;
+}
+
+static int mtk_smi_common_remove(struct platform_device *pdev)
+{
+ pm_runtime_disable(&pdev->dev);
+ return 0;
+}
+
+static const struct of_device_id mtk_smi_common_of_ids[] = {
+ { .compatible = "mediatek,mt8173-smi-common", },
+ {}
+};
+
+static struct platform_driver mtk_smi_common_driver = {
+ .probe = mtk_smi_common_probe,
+ .remove = mtk_smi_common_remove,
+ .driver = {
+ .name = "mtk-smi-common",
+ .of_match_table = mtk_smi_common_of_ids,
+ }
+};
+
+static int __init mtk_smi_init(void)
+{
+ int ret;
+
+ ret = platform_driver_register(&mtk_smi_common_driver);
+ if (ret != 0) {
+ pr_err("Failed to register SMI driver\n");
+ return ret;
+ }
+
+ ret = platform_driver_register(&mtk_smi_larb_driver);
+ if (ret != 0) {
+ pr_err("Failed to register SMI-LARB driver\n");
+ goto err_unreg_smi;
+ }
+ return ret;
+
+err_unreg_smi:
+ platform_driver_unregister(&mtk_smi_common_driver);
+ return ret;
+}
+subsys_initcall(mtk_smi_init);
diff --git a/drivers/memory/omap-gpmc.c b/drivers/memory/omap-gpmc.c
index 6515dfc2b805..21825ddce4a3 100644
--- a/drivers/memory/omap-gpmc.c
+++ b/drivers/memory/omap-gpmc.c
@@ -541,9 +541,20 @@ static void gpmc_cs_show_timings(int cs, const char *desc)
GPMC_GET_TICKS(GPMC_CS_CONFIG3, 0, 3, "adv-on-ns");
GPMC_GET_TICKS(GPMC_CS_CONFIG3, 8, 12, "adv-rd-off-ns");
GPMC_GET_TICKS(GPMC_CS_CONFIG3, 16, 20, "adv-wr-off-ns");
+ if (gpmc_capability & GPMC_HAS_MUX_AAD) {
+ GPMC_GET_TICKS(GPMC_CS_CONFIG3, 4, 6, "adv-aad-mux-on-ns");
+ GPMC_GET_TICKS(GPMC_CS_CONFIG3, 24, 26,
+ "adv-aad-mux-rd-off-ns");
+ GPMC_GET_TICKS(GPMC_CS_CONFIG3, 28, 30,
+ "adv-aad-mux-wr-off-ns");
+ }
GPMC_GET_TICKS(GPMC_CS_CONFIG4, 0, 3, "oe-on-ns");
GPMC_GET_TICKS(GPMC_CS_CONFIG4, 8, 12, "oe-off-ns");
+ if (gpmc_capability & GPMC_HAS_MUX_AAD) {
+ GPMC_GET_TICKS(GPMC_CS_CONFIG4, 4, 6, "oe-aad-mux-on-ns");
+ GPMC_GET_TICKS(GPMC_CS_CONFIG4, 13, 15, "oe-aad-mux-off-ns");
+ }
GPMC_GET_TICKS(GPMC_CS_CONFIG4, 16, 19, "we-on-ns");
GPMC_GET_TICKS(GPMC_CS_CONFIG4, 24, 28, "we-off-ns");
@@ -734,9 +745,18 @@ int gpmc_cs_set_timings(int cs, const struct gpmc_timings *t,
GPMC_SET_ONE(GPMC_CS_CONFIG3, 0, 3, adv_on);
GPMC_SET_ONE(GPMC_CS_CONFIG3, 8, 12, adv_rd_off);
GPMC_SET_ONE(GPMC_CS_CONFIG3, 16, 20, adv_wr_off);
+ if (gpmc_capability & GPMC_HAS_MUX_AAD) {
+ GPMC_SET_ONE(GPMC_CS_CONFIG3, 4, 6, adv_aad_mux_on);
+ GPMC_SET_ONE(GPMC_CS_CONFIG3, 24, 26, adv_aad_mux_rd_off);
+ GPMC_SET_ONE(GPMC_CS_CONFIG3, 28, 30, adv_aad_mux_wr_off);
+ }
GPMC_SET_ONE(GPMC_CS_CONFIG4, 0, 3, oe_on);
GPMC_SET_ONE(GPMC_CS_CONFIG4, 8, 12, oe_off);
+ if (gpmc_capability & GPMC_HAS_MUX_AAD) {
+ GPMC_SET_ONE(GPMC_CS_CONFIG4, 4, 6, oe_aad_mux_on);
+ GPMC_SET_ONE(GPMC_CS_CONFIG4, 13, 15, oe_aad_mux_off);
+ }
GPMC_SET_ONE(GPMC_CS_CONFIG4, 16, 19, we_on);
GPMC_SET_ONE(GPMC_CS_CONFIG4, 24, 28, we_off);
@@ -1722,6 +1742,12 @@ static void __maybe_unused gpmc_read_timings_dt(struct device_node *np,
of_property_read_u32(np, "gpmc,adv-on-ns", &gpmc_t->adv_on);
of_property_read_u32(np, "gpmc,adv-rd-off-ns", &gpmc_t->adv_rd_off);
of_property_read_u32(np, "gpmc,adv-wr-off-ns", &gpmc_t->adv_wr_off);
+ of_property_read_u32(np, "gpmc,adv-aad-mux-on-ns",
+ &gpmc_t->adv_aad_mux_on);
+ of_property_read_u32(np, "gpmc,adv-aad-mux-rd-off-ns",
+ &gpmc_t->adv_aad_mux_rd_off);
+ of_property_read_u32(np, "gpmc,adv-aad-mux-wr-off-ns",
+ &gpmc_t->adv_aad_mux_wr_off);
/* WE signal timings */
of_property_read_u32(np, "gpmc,we-on-ns", &gpmc_t->we_on);
@@ -1730,6 +1756,10 @@ static void __maybe_unused gpmc_read_timings_dt(struct device_node *np,
/* OE signal timings */
of_property_read_u32(np, "gpmc,oe-on-ns", &gpmc_t->oe_on);
of_property_read_u32(np, "gpmc,oe-off-ns", &gpmc_t->oe_off);
+ of_property_read_u32(np, "gpmc,oe-aad-mux-on-ns",
+ &gpmc_t->oe_aad_mux_on);
+ of_property_read_u32(np, "gpmc,oe-aad-mux-off-ns",
+ &gpmc_t->oe_aad_mux_off);
/* access and cycle timings */
of_property_read_u32(np, "gpmc,page-burst-access-ns",
diff --git a/drivers/memstick/host/r592.c b/drivers/memstick/host/r592.c
index ef09ba0289d7..d5cfb503b9d6 100644
--- a/drivers/memstick/host/r592.c
+++ b/drivers/memstick/host/r592.c
@@ -298,8 +298,7 @@ static int r592_transfer_fifo_dma(struct r592_device *dev)
sg_count = dma_map_sg(&dev->pci_dev->dev, &dev->req->sg, 1, is_write ?
PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE);
- if (sg_count != 1 ||
- (sg_dma_len(&dev->req->sg) < dev->req->sg.length)) {
+ if (sg_count != 1 || sg_dma_len(&dev->req->sg) < R592_LFIFO_SIZE) {
message("problem in dma_map_sg");
return -EIO;
}
diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
index 5dcc0313c38a..5537f8df8512 100644
--- a/drivers/message/fusion/mptbase.c
+++ b/drivers/message/fusion/mptbase.c
@@ -1801,8 +1801,7 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
ioc->pcidev = pdev;
if (mpt_mapresources(ioc)) {
- kfree(ioc);
- return r;
+ goto out_free_ioc;
}
/*
@@ -1871,9 +1870,8 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
if (!ioc->reset_work_q) {
printk(MYIOC_s_ERR_FMT "Insufficient memory to add adapter!\n",
ioc->name);
- pci_release_selected_regions(pdev, ioc->bars);
- kfree(ioc);
- return -ENOMEM;
+ r = -ENOMEM;
+ goto out_unmap_resources;
}
dinitprintk(ioc, printk(MYIOC_s_INFO_FMT "facts @ %p, pfacts[0] @ %p\n",
@@ -1995,16 +1993,27 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
spin_lock_init(&ioc->fw_event_lock);
snprintf(ioc->fw_event_q_name, MPT_KOBJ_NAME_LEN, "mpt/%d", ioc->id);
ioc->fw_event_q = create_singlethread_workqueue(ioc->fw_event_q_name);
+ if (!ioc->fw_event_q) {
+ printk(MYIOC_s_ERR_FMT "Insufficient memory to add adapter!\n",
+ ioc->name);
+ r = -ENOMEM;
+ goto out_remove_ioc;
+ }
if ((r = mpt_do_ioc_recovery(ioc, MPT_HOSTEVENT_IOC_BRINGUP,
CAN_SLEEP)) != 0){
printk(MYIOC_s_ERR_FMT "didn't initialize properly! (%d)\n",
ioc->name, r);
+ destroy_workqueue(ioc->fw_event_q);
+ ioc->fw_event_q = NULL;
+
list_del(&ioc->list);
if (ioc->alt_ioc)
ioc->alt_ioc->alt_ioc = NULL;
iounmap(ioc->memmap);
+ if (pci_is_enabled(pdev))
+ pci_disable_device(pdev);
if (r != -5)
pci_release_selected_regions(pdev, ioc->bars);
@@ -2012,7 +2021,6 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
ioc->reset_work_q = NULL;
kfree(ioc);
- pci_set_drvdata(pdev, NULL);
return r;
}
@@ -2040,6 +2048,24 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
msecs_to_jiffies(MPT_POLLING_INTERVAL));
return 0;
+
+out_remove_ioc:
+ list_del(&ioc->list);
+ if (ioc->alt_ioc)
+ ioc->alt_ioc->alt_ioc = NULL;
+
+ destroy_workqueue(ioc->reset_work_q);
+ ioc->reset_work_q = NULL;
+
+out_unmap_resources:
+ iounmap(ioc->memmap);
+ pci_disable_device(pdev);
+ pci_release_selected_regions(pdev, ioc->bars);
+
+out_free_ioc:
+ kfree(ioc);
+
+ return r;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
@@ -6229,7 +6255,7 @@ mpt_get_manufacturing_pg_0(MPT_ADAPTER *ioc)
memcpy(ioc->board_assembly, pbuf->BoardAssembly, sizeof(ioc->board_assembly));
memcpy(ioc->board_tracer, pbuf->BoardTracerNumber, sizeof(ioc->board_tracer));
- out:
+out:
if (pbuf)
pci_free_consistent(ioc->pcidev, hdr.PageLength * 4, pbuf, buf_dma);
@@ -6848,6 +6874,7 @@ mpt_print_ioc_summary(MPT_ADAPTER *ioc, char *buffer, int *size, int len, int sh
*size = y;
}
+#ifdef CONFIG_PROC_FS
static void seq_mpt_print_ioc_summary(MPT_ADAPTER *ioc, struct seq_file *m, int showlan)
{
char expVer[32];
@@ -6879,6 +6906,7 @@ static void seq_mpt_print_ioc_summary(MPT_ADAPTER *ioc, struct seq_file *m, int
seq_putc(m, '\n');
}
+#endif
/**
* mpt_set_taskmgmt_in_progress_flag - set flags associated with task management
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index 9ca66de0c1c1..eea61e349e26 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -18,6 +18,17 @@ config MFD_CS5535
This is the core driver for CS5535/CS5536 MFD functions. This is
necessary for using the board's GPIO and MFGPT functionality.
+config MFD_ACT8945A
+ tristate "Active-semi ACT8945A"
+ select MFD_CORE
+ select REGMAP_I2C
+ depends on I2C && OF
+ help
+ Support for the ACT8945A PMIC from Active-semi. This device
+ features three step-down DC/DC converters and four low-dropout
+ linear regulators, along with a complete ActivePath battery
+ charger.
+
config MFD_AS3711
bool "AMS AS3711"
select MFD_CORE
@@ -91,14 +102,29 @@ config MFD_BCM590XX
Support for the BCM590xx PMUs from Broadcom
config MFD_AXP20X
- bool "X-Powers AXP20X"
+ tristate
select MFD_CORE
- select REGMAP_I2C
select REGMAP_IRQ
- depends on I2C=y
+
+config MFD_AXP20X_I2C
+ tristate "X-Powers AXP series PMICs with I2C"
+ select MFD_AXP20X
+ select REGMAP_I2C
+ depends on I2C
+ help
+ If you say Y here you get support for the X-Powers AXP series power
+ management ICs (PMICs) controlled with I2C.
+ This driver include only the core APIs. You have to select individual
+ components like regulators or the PEK (Power Enable Key) under the
+ corresponding menus.
+
+config MFD_AXP20X_RSB
+ tristate "X-Powers AXP series PMICs with RSB"
+ select MFD_AXP20X
+ depends on SUNXI_RSB
help
- If you say Y here you get support for the X-Powers AXP202, AXP209 and
- AXP288 power management IC (PMIC).
+ If you say Y here you get support for the X-Powers AXP series power
+ management ICs (PMICs) controlled with RSB.
This driver include only the core APIs. You have to select individual
components like regulators or the PEK (Power Enable Key) under the
corresponding menus.
@@ -203,7 +229,7 @@ config MFD_DA9062
select MFD_CORE
select REGMAP_I2C
select REGMAP_IRQ
- depends on I2C=y
+ depends on I2C
help
Say yes here for support for the Dialog Semiconductor DA9062 PMIC.
This includes the I2C driver and core APIs.
@@ -215,7 +241,7 @@ config MFD_DA9063
select MFD_CORE
select REGMAP_I2C
select REGMAP_IRQ
- depends on I2C=y
+ depends on I2C
help
Say yes here for support for the Dialog Semiconductor DA9063 PMIC.
This includes the I2C driver and core APIs.
@@ -224,7 +250,7 @@ config MFD_DA9063
config MFD_DA9150
tristate "Dialog Semiconductor DA9150 Charger Fuel-Gauge chip"
- depends on I2C=y
+ depends on I2C
select MFD_CORE
select REGMAP_I2C
select REGMAP_IRQ
@@ -271,6 +297,15 @@ config MFD_MC13XXX_I2C
help
Select this if your MC13xxx is connected via an I2C bus.
+config MFD_MX25_TSADC
+ tristate "Freescale i.MX25 integrated Touchscreen and ADC unit"
+ select REGMAP_MMIO
+ depends on (SOC_IMX25 && OF) || COMPILE_TEST
+ help
+ Enable support for the integrated Touchscreen and ADC unit of the
+ i.MX25 processors. They consist of a conversion queue for general
+ purpose ADC and a queue for Touchscreens.
+
config MFD_HI6421_PMIC
tristate "HiSilicon Hi6421 PMU/Codec IC"
depends on OF
@@ -445,7 +480,7 @@ config MFD_KEMPLD
config MFD_88PM800
tristate "Marvell 88PM800"
- depends on I2C=y
+ depends on I2C
select REGMAP_I2C
select REGMAP_IRQ
select MFD_CORE
@@ -457,7 +492,7 @@ config MFD_88PM800
config MFD_88PM805
tristate "Marvell 88PM805"
- depends on I2C=y
+ depends on I2C
select REGMAP_I2C
select REGMAP_IRQ
select MFD_CORE
@@ -493,8 +528,8 @@ config MFD_MAX14577
of the device.
config MFD_MAX77686
- bool "Maxim Semiconductor MAX77686/802 PMIC Support"
- depends on I2C=y
+ tristate "Maxim Semiconductor MAX77686/802 PMIC Support"
+ depends on I2C
depends on OF
select MFD_CORE
select REGMAP_I2C
@@ -538,7 +573,7 @@ config MFD_MAX77843
config MFD_MAX8907
tristate "Maxim Semiconductor MAX8907 PMIC Support"
select MFD_CORE
- depends on I2C=y
+ depends on I2C
select REGMAP_I2C
select REGMAP_IRQ
help
@@ -743,7 +778,7 @@ config MFD_RTSX_PCI
config MFD_RT5033
tristate "Richtek RT5033 Power Management IC"
- depends on I2C=y
+ depends on I2C
select MFD_CORE
select REGMAP_I2C
select REGMAP_IRQ
@@ -1106,6 +1141,19 @@ config TPS6507X
This driver can also be built as a module. If so, the module
will be called tps6507x.
+config MFD_TPS65086
+ tristate "TI TPS65086 Power Management Integrated Chips (PMICs)"
+ select REGMAP
+ select REGMAP_IRQ
+ select REGMAP_I2C
+ depends on I2C
+ help
+ If you say yes here you get support for the TPS65086 series of
+ Power Management chips.
+ This driver provides common support for accessing the device,
+ additional drivers must be enabled in order to use the
+ functionality of the device.
+
config TPS65911_COMPARATOR
tristate
@@ -1181,27 +1229,25 @@ config MFD_TPS65910
Power Management chips.
config MFD_TPS65912
- bool "TI TPS65912 Power Management chip"
- depends on GPIOLIB
+ tristate
select MFD_CORE
- help
- If you say yes here you get support for the TPS65912 series of
- PM chips.
+ select REGMAP
+ select REGMAP_IRQ
config MFD_TPS65912_I2C
- bool "TI TPS65912 Power Management chip with I2C"
- select MFD_CORE
+ tristate "TI TPS65912 Power Management chip with I2C"
select MFD_TPS65912
- depends on I2C=y && GPIOLIB
+ select REGMAP_I2C
+ depends on I2C
help
If you say yes here you get support for the TPS65912 series of
PM chips with I2C interface.
config MFD_TPS65912_SPI
- bool "TI TPS65912 Power Management chip with SPI"
- select MFD_CORE
+ tristate "TI TPS65912 Power Management chip with SPI"
select MFD_TPS65912
- depends on SPI_MASTER && GPIOLIB
+ select REGMAP_SPI
+ depends on SPI_MASTER
help
If you say yes here you get support for the TPS65912 series of
PM chips with SPI interface.
@@ -1372,7 +1418,6 @@ config MFD_ARIZONA
config MFD_ARIZONA_I2C
tristate "Cirrus Logic/Wolfson Microelectronics Arizona platform with I2C"
select MFD_ARIZONA
- select MFD_CORE
select REGMAP_I2C
depends on I2C
help
@@ -1382,12 +1427,11 @@ config MFD_ARIZONA_I2C
config MFD_ARIZONA_SPI
tristate "Cirrus Logic/Wolfson Microelectronics Arizona platform with SPI"
select MFD_ARIZONA
- select MFD_CORE
select REGMAP_SPI
depends on SPI_MASTER
help
Support for the Cirrus Logic/Wolfson Microelectronics Arizona platform
- audio SoC core functionality controlled via I2C.
+ audio SoC core functionality controlled via SPI.
config MFD_CS47L24
bool "Cirrus Logic CS47L24 and WM1831"
diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile
index 0f230a6103f8..5eaa6465d0a6 100644
--- a/drivers/mfd/Makefile
+++ b/drivers/mfd/Makefile
@@ -6,6 +6,7 @@
obj-$(CONFIG_MFD_88PM860X) += 88pm860x.o
obj-$(CONFIG_MFD_88PM800) += 88pm800.o 88pm80x.o
obj-$(CONFIG_MFD_88PM805) += 88pm805.o 88pm80x.o
+obj-$(CONFIG_MFD_ACT8945A) += act8945a.o
obj-$(CONFIG_MFD_SM501) += sm501.o
obj-$(CONFIG_MFD_ASIC3) += asic3.o tmio_core.o
obj-$(CONFIG_MFD_BCM590XX) += bcm590xx.o
@@ -70,11 +71,11 @@ obj-$(CONFIG_MFD_WM8994) += wm8994.o
obj-$(CONFIG_TPS6105X) += tps6105x.o
obj-$(CONFIG_TPS65010) += tps65010.o
obj-$(CONFIG_TPS6507X) += tps6507x.o
+obj-$(CONFIG_MFD_TPS65086) += tps65086.o
obj-$(CONFIG_MFD_TPS65217) += tps65217.o
obj-$(CONFIG_MFD_TPS65218) += tps65218.o
obj-$(CONFIG_MFD_TPS65910) += tps65910.o
-tps65912-objs := tps65912-core.o tps65912-irq.o
-obj-$(CONFIG_MFD_TPS65912) += tps65912.o
+obj-$(CONFIG_MFD_TPS65912) += tps65912-core.o
obj-$(CONFIG_MFD_TPS65912_I2C) += tps65912-i2c.o
obj-$(CONFIG_MFD_TPS65912_SPI) += tps65912-spi.o
obj-$(CONFIG_MFD_TPS80031) += tps80031.o
@@ -85,6 +86,8 @@ obj-$(CONFIG_TWL4030_POWER) += twl4030-power.o
obj-$(CONFIG_MFD_TWL4030_AUDIO) += twl4030-audio.o
obj-$(CONFIG_TWL6040_CORE) += twl6040.o
+obj-$(CONFIG_MFD_MX25_TSADC) += fsl-imx25-tsadc.o
+
obj-$(CONFIG_MFD_MC13XXX) += mc13xxx-core.o
obj-$(CONFIG_MFD_MC13XXX_SPI) += mc13xxx-spi.o
obj-$(CONFIG_MFD_MC13XXX_I2C) += mc13xxx-i2c.o
@@ -111,6 +114,8 @@ obj-$(CONFIG_PMIC_DA9052) += da9052-core.o
obj-$(CONFIG_MFD_DA9052_SPI) += da9052-spi.o
obj-$(CONFIG_MFD_DA9052_I2C) += da9052-i2c.o
obj-$(CONFIG_MFD_AXP20X) += axp20x.o
+obj-$(CONFIG_MFD_AXP20X_I2C) += axp20x-i2c.o
+obj-$(CONFIG_MFD_AXP20X_RSB) += axp20x-rsb.o
obj-$(CONFIG_MFD_LP3943) += lp3943.o
obj-$(CONFIG_MFD_LP8788) += lp8788.o lp8788-irq.o
diff --git a/drivers/mfd/act8945a.c b/drivers/mfd/act8945a.c
new file mode 100644
index 000000000000..525b546ba42f
--- /dev/null
+++ b/drivers/mfd/act8945a.c
@@ -0,0 +1,102 @@
+/*
+ * MFD driver for Active-semi ACT8945a PMIC
+ *
+ * Copyright (C) 2015 Atmel Corporation.
+ *
+ * Author: Wenyou Yang <wenyou.yang@atmel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/i2c.h>
+#include <linux/mfd/core.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/regmap.h>
+
+static const struct mfd_cell act8945a_devs[] = {
+ {
+ .name = "act8945a-regulator",
+ },
+ {
+ .name = "act8945a-charger",
+ },
+};
+
+static const struct regmap_config act8945a_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+};
+
+static int act8945a_i2c_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
+{
+ int ret;
+ struct regmap *regmap;
+
+ regmap = devm_regmap_init_i2c(i2c, &act8945a_regmap_config);
+ if (IS_ERR(regmap)) {
+ ret = PTR_ERR(regmap);
+ dev_err(&i2c->dev, "regmap init failed: %d\n", ret);
+ return ret;
+ }
+
+ i2c_set_clientdata(i2c, regmap);
+
+ ret = mfd_add_devices(&i2c->dev, PLATFORM_DEVID_NONE, act8945a_devs,
+ ARRAY_SIZE(act8945a_devs), NULL, 0, NULL);
+ if (ret) {
+ dev_err(&i2c->dev, "Failed to add sub devices\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int act8945a_i2c_remove(struct i2c_client *i2c)
+{
+ mfd_remove_devices(&i2c->dev);
+
+ return 0;
+}
+
+static const struct i2c_device_id act8945a_i2c_id[] = {
+ { "act8945a", 0 },
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, act8945a_i2c_id);
+
+static const struct of_device_id act8945a_of_match[] = {
+ { .compatible = "active-semi,act8945a", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, act8945a_of_match);
+
+static struct i2c_driver act8945a_i2c_driver = {
+ .driver = {
+ .name = "act8945a",
+ .of_match_table = of_match_ptr(act8945a_of_match),
+ },
+ .probe = act8945a_i2c_probe,
+ .remove = act8945a_i2c_remove,
+ .id_table = act8945a_i2c_id,
+};
+
+static int __init act8945a_i2c_init(void)
+{
+ return i2c_add_driver(&act8945a_i2c_driver);
+}
+subsys_initcall(act8945a_i2c_init);
+
+static void __exit act8945a_i2c_exit(void)
+{
+ i2c_del_driver(&act8945a_i2c_driver);
+}
+module_exit(act8945a_i2c_exit);
+
+MODULE_DESCRIPTION("ACT8945A PMIC multi-function driver");
+MODULE_AUTHOR("Wenyou Yang <wenyou.yang@atmel.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/as3711.c b/drivers/mfd/as3711.c
index 94d67a6e1eb7..09e1483b99bc 100644
--- a/drivers/mfd/as3711.c
+++ b/drivers/mfd/as3711.c
@@ -108,8 +108,8 @@ static const struct regmap_config as3711_regmap_config = {
.volatile_reg = as3711_volatile_reg,
.readable_reg = as3711_readable_reg,
.precious_reg = as3711_precious_reg,
- .max_register = AS3711_MAX_REGS,
- .num_reg_defaults_raw = AS3711_MAX_REGS,
+ .max_register = AS3711_MAX_REG,
+ .num_reg_defaults_raw = AS3711_NUM_REGS,
.cache_type = REGCACHE_RBTREE,
};
diff --git a/drivers/mfd/axp20x-i2c.c b/drivers/mfd/axp20x-i2c.c
new file mode 100644
index 000000000000..b1b865822c07
--- /dev/null
+++ b/drivers/mfd/axp20x-i2c.c
@@ -0,0 +1,104 @@
+/*
+ * I2C driver for the X-Powers' Power Management ICs
+ *
+ * AXP20x typically comprises an adaptive USB-Compatible PWM charger, BUCK DC-DC
+ * converters, LDOs, multiple 12-bit ADCs of voltage, current and temperature
+ * as well as configurable GPIOs.
+ *
+ * This driver supports the I2C variants.
+ *
+ * Copyright (C) 2014 Carlo Caione
+ *
+ * Author: Carlo Caione <carlo@caione.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/acpi.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/mfd/axp20x.h>
+#include <linux/of.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+
+static int axp20x_i2c_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
+{
+ struct axp20x_dev *axp20x;
+ int ret;
+
+ axp20x = devm_kzalloc(&i2c->dev, sizeof(*axp20x), GFP_KERNEL);
+ if (!axp20x)
+ return -ENOMEM;
+
+ axp20x->dev = &i2c->dev;
+ axp20x->irq = i2c->irq;
+ dev_set_drvdata(axp20x->dev, axp20x);
+
+ ret = axp20x_match_device(axp20x);
+ if (ret)
+ return ret;
+
+ axp20x->regmap = devm_regmap_init_i2c(i2c, axp20x->regmap_cfg);
+ if (IS_ERR(axp20x->regmap)) {
+ ret = PTR_ERR(axp20x->regmap);
+ dev_err(&i2c->dev, "regmap init failed: %d\n", ret);
+ return ret;
+ }
+
+ return axp20x_device_probe(axp20x);
+}
+
+static int axp20x_i2c_remove(struct i2c_client *i2c)
+{
+ struct axp20x_dev *axp20x = i2c_get_clientdata(i2c);
+
+ return axp20x_device_remove(axp20x);
+}
+
+static const struct of_device_id axp20x_i2c_of_match[] = {
+ { .compatible = "x-powers,axp152", .data = (void *)AXP152_ID },
+ { .compatible = "x-powers,axp202", .data = (void *)AXP202_ID },
+ { .compatible = "x-powers,axp209", .data = (void *)AXP209_ID },
+ { .compatible = "x-powers,axp221", .data = (void *)AXP221_ID },
+ { },
+};
+MODULE_DEVICE_TABLE(of, axp20x_i2c_of_match);
+
+/*
+ * This is useless for OF-enabled devices, but it is needed by I2C subsystem
+ */
+static const struct i2c_device_id axp20x_i2c_id[] = {
+ { },
+};
+MODULE_DEVICE_TABLE(i2c, axp20x_i2c_id);
+
+static const struct acpi_device_id axp20x_i2c_acpi_match[] = {
+ {
+ .id = "INT33F4",
+ .driver_data = AXP288_ID,
+ },
+ { },
+};
+MODULE_DEVICE_TABLE(acpi, axp20x_i2c_acpi_match);
+
+static struct i2c_driver axp20x_i2c_driver = {
+ .driver = {
+ .name = "axp20x-i2c",
+ .of_match_table = of_match_ptr(axp20x_i2c_of_match),
+ .acpi_match_table = ACPI_PTR(axp20x_i2c_acpi_match),
+ },
+ .probe = axp20x_i2c_probe,
+ .remove = axp20x_i2c_remove,
+ .id_table = axp20x_i2c_id,
+};
+
+module_i2c_driver(axp20x_i2c_driver);
+
+MODULE_DESCRIPTION("PMIC MFD I2C driver for AXP20X");
+MODULE_AUTHOR("Carlo Caione <carlo@caione.org>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/axp20x-rsb.c b/drivers/mfd/axp20x-rsb.c
new file mode 100644
index 000000000000..28c20247c112
--- /dev/null
+++ b/drivers/mfd/axp20x-rsb.c
@@ -0,0 +1,80 @@
+/*
+ * RSB driver for the X-Powers' Power Management ICs
+ *
+ * AXP20x typically comprises an adaptive USB-Compatible PWM charger, BUCK DC-DC
+ * converters, LDOs, multiple 12-bit ADCs of voltage, current and temperature
+ * as well as configurable GPIOs.
+ *
+ * This driver supports the RSB variants.
+ *
+ * Copyright (C) 2015 Chen-Yu Tsai
+ *
+ * Author: Chen-Yu Tsai <wens@csie.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/acpi.h>
+#include <linux/err.h>
+#include <linux/mfd/axp20x.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include <linux/sunxi-rsb.h>
+
+static int axp20x_rsb_probe(struct sunxi_rsb_device *rdev)
+{
+ struct axp20x_dev *axp20x;
+ int ret;
+
+ axp20x = devm_kzalloc(&rdev->dev, sizeof(*axp20x), GFP_KERNEL);
+ if (!axp20x)
+ return -ENOMEM;
+
+ axp20x->dev = &rdev->dev;
+ axp20x->irq = rdev->irq;
+ dev_set_drvdata(&rdev->dev, axp20x);
+
+ ret = axp20x_match_device(axp20x);
+ if (ret)
+ return ret;
+
+ axp20x->regmap = devm_regmap_init_sunxi_rsb(rdev, axp20x->regmap_cfg);
+ if (IS_ERR(axp20x->regmap)) {
+ ret = PTR_ERR(axp20x->regmap);
+ dev_err(&rdev->dev, "regmap init failed: %d\n", ret);
+ return ret;
+ }
+
+ return axp20x_device_probe(axp20x);
+}
+
+static int axp20x_rsb_remove(struct sunxi_rsb_device *rdev)
+{
+ struct axp20x_dev *axp20x = sunxi_rsb_device_get_drvdata(rdev);
+
+ return axp20x_device_remove(axp20x);
+}
+
+static const struct of_device_id axp20x_rsb_of_match[] = {
+ { .compatible = "x-powers,axp223", .data = (void *)AXP223_ID },
+ { },
+};
+MODULE_DEVICE_TABLE(of, axp20x_rsb_of_match);
+
+static struct sunxi_rsb_driver axp20x_rsb_driver = {
+ .driver = {
+ .name = "axp20x-rsb",
+ .of_match_table = of_match_ptr(axp20x_rsb_of_match),
+ },
+ .probe = axp20x_rsb_probe,
+ .remove = axp20x_rsb_remove,
+};
+module_sunxi_rsb_driver(axp20x_rsb_driver);
+
+MODULE_DESCRIPTION("PMIC MFD sunXi RSB driver for AXP20X");
+MODULE_AUTHOR("Chen-Yu Tsai <wens@csie.org>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mfd/axp20x.c b/drivers/mfd/axp20x.c
index 9842199e2e6c..a57d6e940610 100644
--- a/drivers/mfd/axp20x.c
+++ b/drivers/mfd/axp20x.c
@@ -1,10 +1,14 @@
/*
- * axp20x.c - MFD core driver for the X-Powers' Power Management ICs
+ * MFD core driver for the X-Powers' Power Management ICs
*
* AXP20x typically comprises an adaptive USB-Compatible PWM charger, BUCK DC-DC
* converters, LDOs, multiple 12-bit ADCs of voltage, current and temperature
* as well as configurable GPIOs.
*
+ * This file contains the interface independent core functions.
+ *
+ * Copyright (C) 2014 Carlo Caione
+ *
* Author: Carlo Caione <carlo@caione.org>
*
* This program is free software; you can redistribute it and/or modify
@@ -13,18 +17,15 @@
*/
#include <linux/err.h>
-#include <linux/i2c.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
-#include <linux/slab.h>
#include <linux/regulator/consumer.h>
#include <linux/mfd/axp20x.h>
#include <linux/mfd/core.h>
#include <linux/of_device.h>
-#include <linux/of_irq.h>
#include <linux/acpi.h>
#define AXP20X_OFF 0x80
@@ -34,6 +35,7 @@ static const char * const axp20x_model_names[] = {
"AXP202",
"AXP209",
"AXP221",
+ "AXP223",
"AXP288",
};
@@ -376,32 +378,6 @@ static const struct regmap_irq axp288_regmap_irqs[] = {
INIT_REGMAP_IRQ(AXP288, BC_USB_CHNG, 5, 1),
};
-static const struct of_device_id axp20x_of_match[] = {
- { .compatible = "x-powers,axp152", .data = (void *) AXP152_ID },
- { .compatible = "x-powers,axp202", .data = (void *) AXP202_ID },
- { .compatible = "x-powers,axp209", .data = (void *) AXP209_ID },
- { .compatible = "x-powers,axp221", .data = (void *) AXP221_ID },
- { },
-};
-MODULE_DEVICE_TABLE(of, axp20x_of_match);
-
-/*
- * This is useless for OF-enabled devices, but it is needed by I2C subsystem
- */
-static const struct i2c_device_id axp20x_i2c_id[] = {
- { },
-};
-MODULE_DEVICE_TABLE(i2c, axp20x_i2c_id);
-
-static const struct acpi_device_id axp20x_acpi_match[] = {
- {
- .id = "INT33F4",
- .driver_data = AXP288_ID,
- },
- { },
-};
-MODULE_DEVICE_TABLE(acpi, axp20x_acpi_match);
-
static const struct regmap_irq_chip axp152_regmap_irq_chip = {
.name = "axp152_irq_chip",
.status_base = AXP152_IRQ1_STATE,
@@ -606,25 +582,26 @@ static void axp20x_power_off(void)
AXP20X_OFF);
}
-static int axp20x_match_device(struct axp20x_dev *axp20x, struct device *dev)
+int axp20x_match_device(struct axp20x_dev *axp20x)
{
+ struct device *dev = axp20x->dev;
const struct acpi_device_id *acpi_id;
const struct of_device_id *of_id;
if (dev->of_node) {
- of_id = of_match_device(axp20x_of_match, dev);
+ of_id = of_match_device(dev->driver->of_match_table, dev);
if (!of_id) {
dev_err(dev, "Unable to match OF ID\n");
return -ENODEV;
}
- axp20x->variant = (long) of_id->data;
+ axp20x->variant = (long)of_id->data;
} else {
acpi_id = acpi_match_device(dev->driver->acpi_match_table, dev);
if (!acpi_id || !acpi_id->driver_data) {
dev_err(dev, "Unable to match ACPI ID and data\n");
return -ENODEV;
}
- axp20x->variant = (long) acpi_id->driver_data;
+ axp20x->variant = (long)acpi_id->driver_data;
}
switch (axp20x->variant) {
@@ -642,6 +619,7 @@ static int axp20x_match_device(struct axp20x_dev *axp20x, struct device *dev)
axp20x->regmap_irq_chip = &axp20x_regmap_irq_chip;
break;
case AXP221_ID:
+ case AXP223_ID:
axp20x->nr_cells = ARRAY_SIZE(axp22x_cells);
axp20x->cells = axp22x_cells;
axp20x->regmap_cfg = &axp22x_regmap_config;
@@ -658,51 +636,31 @@ static int axp20x_match_device(struct axp20x_dev *axp20x, struct device *dev)
return -EINVAL;
}
dev_info(dev, "AXP20x variant %s found\n",
- axp20x_model_names[axp20x->variant]);
+ axp20x_model_names[axp20x->variant]);
return 0;
}
+EXPORT_SYMBOL(axp20x_match_device);
-static int axp20x_i2c_probe(struct i2c_client *i2c,
- const struct i2c_device_id *id)
+int axp20x_device_probe(struct axp20x_dev *axp20x)
{
- struct axp20x_dev *axp20x;
int ret;
- axp20x = devm_kzalloc(&i2c->dev, sizeof(*axp20x), GFP_KERNEL);
- if (!axp20x)
- return -ENOMEM;
-
- ret = axp20x_match_device(axp20x, &i2c->dev);
- if (ret)
- return ret;
-
- axp20x->i2c_client = i2c;
- axp20x->dev = &i2c->dev;
- dev_set_drvdata(axp20x->dev, axp20x);
-
- axp20x->regmap = devm_regmap_init_i2c(i2c, axp20x->regmap_cfg);
- if (IS_ERR(axp20x->regmap)) {
- ret = PTR_ERR(axp20x->regmap);
- dev_err(&i2c->dev, "regmap init failed: %d\n", ret);
- return ret;
- }
-
- ret = regmap_add_irq_chip(axp20x->regmap, i2c->irq,
+ ret = regmap_add_irq_chip(axp20x->regmap, axp20x->irq,
IRQF_ONESHOT | IRQF_SHARED, -1,
axp20x->regmap_irq_chip,
&axp20x->regmap_irqc);
if (ret) {
- dev_err(&i2c->dev, "failed to add irq chip: %d\n", ret);
+ dev_err(axp20x->dev, "failed to add irq chip: %d\n", ret);
return ret;
}
ret = mfd_add_devices(axp20x->dev, -1, axp20x->cells,
- axp20x->nr_cells, NULL, 0, NULL);
+ axp20x->nr_cells, NULL, 0, NULL);
if (ret) {
- dev_err(&i2c->dev, "failed to add MFD devices: %d\n", ret);
- regmap_del_irq_chip(i2c->irq, axp20x->regmap_irqc);
+ dev_err(axp20x->dev, "failed to add MFD devices: %d\n", ret);
+ regmap_del_irq_chip(axp20x->irq, axp20x->regmap_irqc);
return ret;
}
@@ -711,38 +669,25 @@ static int axp20x_i2c_probe(struct i2c_client *i2c,
pm_power_off = axp20x_power_off;
}
- dev_info(&i2c->dev, "AXP20X driver loaded\n");
+ dev_info(axp20x->dev, "AXP20X driver loaded\n");
return 0;
}
+EXPORT_SYMBOL(axp20x_device_probe);
-static int axp20x_i2c_remove(struct i2c_client *i2c)
+int axp20x_device_remove(struct axp20x_dev *axp20x)
{
- struct axp20x_dev *axp20x = i2c_get_clientdata(i2c);
-
if (axp20x == axp20x_pm_power_off) {
axp20x_pm_power_off = NULL;
pm_power_off = NULL;
}
mfd_remove_devices(axp20x->dev);
- regmap_del_irq_chip(axp20x->i2c_client->irq, axp20x->regmap_irqc);
+ regmap_del_irq_chip(axp20x->irq, axp20x->regmap_irqc);
return 0;
}
-
-static struct i2c_driver axp20x_i2c_driver = {
- .driver = {
- .name = "axp20x",
- .of_match_table = of_match_ptr(axp20x_of_match),
- .acpi_match_table = ACPI_PTR(axp20x_acpi_match),
- },
- .probe = axp20x_i2c_probe,
- .remove = axp20x_i2c_remove,
- .id_table = axp20x_i2c_id,
-};
-
-module_i2c_driver(axp20x_i2c_driver);
+EXPORT_SYMBOL(axp20x_device_remove);
MODULE_DESCRIPTION("PMIC MFD core driver for AXP20X");
MODULE_AUTHOR("Carlo Caione <carlo@caione.org>");
diff --git a/drivers/mfd/cs47l24-tables.c b/drivers/mfd/cs47l24-tables.c
index 870800657594..f6b78aafdb55 100644
--- a/drivers/mfd/cs47l24-tables.c
+++ b/drivers/mfd/cs47l24-tables.c
@@ -227,8 +227,6 @@ static const struct reg_default cs47l24_reg_default[] = {
{ 0x00000174, 0x007D }, /* R372 - FLL1 Control 4 */
{ 0x00000175, 0x0006 }, /* R373 - FLL1 Control 5 */
{ 0x00000176, 0x0000 }, /* R374 - FLL1 Control 6 */
- { 0x00000177, 0x0281 }, /* R375 - FLL1 Loop Filter Test 1 */
- { 0x00000178, 0x0000 }, /* R376 - FLL1 NCO Test 0 */
{ 0x00000179, 0x0000 }, /* R376 - FLL1 Control 7 */
{ 0x00000181, 0x0000 }, /* R385 - FLL1 Synchroniser 1 */
{ 0x00000182, 0x0000 }, /* R386 - FLL1 Synchroniser 2 */
@@ -245,8 +243,6 @@ static const struct reg_default cs47l24_reg_default[] = {
{ 0x00000194, 0x007D }, /* R404 - FLL2 Control 4 */
{ 0x00000195, 0x000C }, /* R405 - FLL2 Control 5 */
{ 0x00000196, 0x0000 }, /* R406 - FLL2 Control 6 */
- { 0x00000197, 0x0000 }, /* R407 - FLL2 Loop Filter Test 1 */
- { 0x00000198, 0x0000 }, /* R408 - FLL2 NCO Test 0 */
{ 0x00000199, 0x0000 }, /* R408 - FLL2 Control 7 */
{ 0x000001A1, 0x0000 }, /* R417 - FLL2 Synchroniser 1 */
{ 0x000001A2, 0x0000 }, /* R418 - FLL2 Synchroniser 2 */
@@ -678,7 +674,7 @@ static const struct reg_default cs47l24_reg_default[] = {
{ 0x00000C0F, 0x0400 }, /* R3087 - IRQ CTRL 1 */
{ 0x00000C10, 0x1000 }, /* R3088 - GPIO Debounce Config */
{ 0x00000C20, 0x0002 }, /* R3104 - Misc Pad Ctrl 1 */
- { 0x00000C21, 0x8001 }, /* R3105 - Misc Pad Ctrl 2 */
+ { 0x00000C21, 0x0000 }, /* R3105 - Misc Pad Ctrl 2 */
{ 0x00000C22, 0x0000 }, /* R3106 - Misc Pad Ctrl 3 */
{ 0x00000C23, 0x0000 }, /* R3107 - Misc Pad Ctrl 4 */
{ 0x00000C24, 0x0000 }, /* R3108 - Misc Pad Ctrl 5 */
@@ -858,8 +854,6 @@ static bool cs47l24_readable_register(struct device *dev, unsigned int reg)
case ARIZONA_FLL1_CONTROL_5:
case ARIZONA_FLL1_CONTROL_6:
case ARIZONA_FLL1_CONTROL_7:
- case ARIZONA_FLL1_LOOP_FILTER_TEST_1:
- case ARIZONA_FLL1_NCO_TEST_0:
case ARIZONA_FLL1_SYNCHRONISER_1:
case ARIZONA_FLL1_SYNCHRONISER_2:
case ARIZONA_FLL1_SYNCHRONISER_3:
@@ -876,8 +870,6 @@ static bool cs47l24_readable_register(struct device *dev, unsigned int reg)
case ARIZONA_FLL2_CONTROL_5:
case ARIZONA_FLL2_CONTROL_6:
case ARIZONA_FLL2_CONTROL_7:
- case ARIZONA_FLL2_LOOP_FILTER_TEST_1:
- case ARIZONA_FLL2_NCO_TEST_0:
case ARIZONA_FLL2_SYNCHRONISER_1:
case ARIZONA_FLL2_SYNCHRONISER_2:
case ARIZONA_FLL2_SYNCHRONISER_3:
diff --git a/drivers/mfd/da9062-core.c b/drivers/mfd/da9062-core.c
index a9ad024ec6b0..8f873866ea60 100644
--- a/drivers/mfd/da9062-core.c
+++ b/drivers/mfd/da9062-core.c
@@ -388,11 +388,32 @@ static const struct regmap_range da9062_aa_volatile_ranges[] = {
.range_min = DA9062AA_STATUS_D,
.range_max = DA9062AA_EVENT_C,
}, {
- .range_min = DA9062AA_CONTROL_F,
+ .range_min = DA9062AA_CONTROL_A,
+ .range_max = DA9062AA_CONTROL_B,
+ }, {
+ .range_min = DA9062AA_CONTROL_E,
.range_max = DA9062AA_CONTROL_F,
}, {
+ .range_min = DA9062AA_BUCK2_CONT,
+ .range_max = DA9062AA_BUCK4_CONT,
+ }, {
+ .range_min = DA9062AA_BUCK3_CONT,
+ .range_max = DA9062AA_BUCK3_CONT,
+ }, {
+ .range_min = DA9062AA_LDO1_CONT,
+ .range_max = DA9062AA_LDO4_CONT,
+ }, {
+ .range_min = DA9062AA_DVC_1,
+ .range_max = DA9062AA_DVC_1,
+ }, {
.range_min = DA9062AA_COUNT_S,
.range_max = DA9062AA_SECOND_D,
+ }, {
+ .range_min = DA9062AA_SEQ,
+ .range_max = DA9062AA_SEQ,
+ }, {
+ .range_min = DA9062AA_EN_32K,
+ .range_max = DA9062AA_EN_32K,
},
};
diff --git a/drivers/mfd/da9063-i2c.c b/drivers/mfd/da9063-i2c.c
index 2d4e3e0f4e94..73901084945f 100644
--- a/drivers/mfd/da9063-i2c.c
+++ b/drivers/mfd/da9063-i2c.c
@@ -74,18 +74,30 @@ static const struct regmap_range da9063_ad_writeable_ranges[] = {
static const struct regmap_range da9063_ad_volatile_ranges[] = {
{
- .range_min = DA9063_REG_STATUS_A,
+ .range_min = DA9063_REG_PAGE_CON,
.range_max = DA9063_REG_EVENT_D,
}, {
- .range_min = DA9063_REG_CONTROL_F,
+ .range_min = DA9063_REG_CONTROL_A,
+ .range_max = DA9063_REG_CONTROL_B,
+ }, {
+ .range_min = DA9063_REG_CONTROL_E,
.range_max = DA9063_REG_CONTROL_F,
}, {
- .range_min = DA9063_REG_ADC_MAN,
+ .range_min = DA9063_REG_BCORE2_CONT,
+ .range_max = DA9063_REG_LDO11_CONT,
+ }, {
+ .range_min = DA9063_REG_DVC_1,
.range_max = DA9063_REG_ADC_MAN,
}, {
.range_min = DA9063_REG_ADC_RES_L,
.range_max = DA9063_AD_REG_SECOND_D,
}, {
+ .range_min = DA9063_REG_SEQ,
+ .range_max = DA9063_REG_SEQ,
+ }, {
+ .range_min = DA9063_REG_EN_32K,
+ .range_max = DA9063_REG_EN_32K,
+ }, {
.range_min = DA9063_AD_REG_MON_REG_5,
.range_max = DA9063_AD_REG_MON_REG_6,
},
@@ -152,18 +164,30 @@ static const struct regmap_range da9063_bb_writeable_ranges[] = {
static const struct regmap_range da9063_bb_volatile_ranges[] = {
{
- .range_min = DA9063_REG_STATUS_A,
+ .range_min = DA9063_REG_PAGE_CON,
.range_max = DA9063_REG_EVENT_D,
}, {
- .range_min = DA9063_REG_CONTROL_F,
+ .range_min = DA9063_REG_CONTROL_A,
+ .range_max = DA9063_REG_CONTROL_B,
+ }, {
+ .range_min = DA9063_REG_CONTROL_E,
.range_max = DA9063_REG_CONTROL_F,
}, {
- .range_min = DA9063_REG_ADC_MAN,
+ .range_min = DA9063_REG_BCORE2_CONT,
+ .range_max = DA9063_REG_LDO11_CONT,
+ }, {
+ .range_min = DA9063_REG_DVC_1,
.range_max = DA9063_REG_ADC_MAN,
}, {
.range_min = DA9063_REG_ADC_RES_L,
.range_max = DA9063_BB_REG_SECOND_D,
}, {
+ .range_min = DA9063_REG_SEQ,
+ .range_max = DA9063_REG_SEQ,
+ }, {
+ .range_min = DA9063_REG_EN_32K,
+ .range_max = DA9063_REG_EN_32K,
+ }, {
.range_min = DA9063_BB_REG_MON_REG_5,
.range_max = DA9063_BB_REG_MON_REG_6,
},
diff --git a/drivers/mfd/db8500-prcmu.c b/drivers/mfd/db8500-prcmu.c
index 12099b09a9a7..c0a86aeb1733 100644
--- a/drivers/mfd/db8500-prcmu.c
+++ b/drivers/mfd/db8500-prcmu.c
@@ -739,20 +739,17 @@ int prcmu_config_clkout(u8 clkout, u8 source, u8 div)
if (!div && !requests[clkout])
return -EINVAL;
- switch (clkout) {
- case 0:
+ if (clkout == 0) {
div_mask = PRCM_CLKOCR_CLKODIV0_MASK;
mask = (PRCM_CLKOCR_CLKODIV0_MASK | PRCM_CLKOCR_CLKOSEL0_MASK);
bits = ((source << PRCM_CLKOCR_CLKOSEL0_SHIFT) |
(div << PRCM_CLKOCR_CLKODIV0_SHIFT));
- break;
- case 1:
+ } else {
div_mask = PRCM_CLKOCR_CLKODIV1_MASK;
mask = (PRCM_CLKOCR_CLKODIV1_MASK | PRCM_CLKOCR_CLKOSEL1_MASK |
PRCM_CLKOCR_CLK1TYPE);
bits = ((source << PRCM_CLKOCR_CLKOSEL1_SHIFT) |
(div << PRCM_CLKOCR_CLKODIV1_SHIFT));
- break;
}
bits &= mask;
diff --git a/drivers/mfd/fsl-imx25-tsadc.c b/drivers/mfd/fsl-imx25-tsadc.c
new file mode 100644
index 000000000000..77b2675cf8f5
--- /dev/null
+++ b/drivers/mfd/fsl-imx25-tsadc.c
@@ -0,0 +1,203 @@
+/*
+ * Copyright (C) 2014-2015 Pengutronix, Markus Pargmann <mpa@pengutronix.de>
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU General Public License version 2 as published by the
+ * Free Software Foundation.
+ */
+
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqdesc.h>
+#include <linux/irqdomain.h>
+#include <linux/irq.h>
+#include <linux/mfd/imx25-tsadc.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+static struct regmap_config mx25_tsadc_regmap_config = {
+ .fast_io = true,
+ .max_register = 8,
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+};
+
+static void mx25_tsadc_irq_handler(struct irq_desc *desc)
+{
+ struct mx25_tsadc *tsadc = irq_desc_get_handler_data(desc);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ u32 status;
+
+ chained_irq_enter(chip, desc);
+
+ regmap_read(tsadc->regs, MX25_TSC_TGSR, &status);
+
+ if (status & MX25_TGSR_GCQ_INT)
+ generic_handle_irq(irq_find_mapping(tsadc->domain, 1));
+
+ if (status & MX25_TGSR_TCQ_INT)
+ generic_handle_irq(irq_find_mapping(tsadc->domain, 0));
+
+ chained_irq_exit(chip, desc);
+}
+
+static int mx25_tsadc_domain_map(struct irq_domain *d, unsigned int irq,
+ irq_hw_number_t hwirq)
+{
+ struct mx25_tsadc *tsadc = d->host_data;
+
+ irq_set_chip_data(irq, tsadc);
+ irq_set_chip_and_handler(irq, &dummy_irq_chip,
+ handle_level_irq);
+ irq_modify_status(irq, IRQ_NOREQUEST, IRQ_NOPROBE);
+
+ return 0;
+}
+
+static struct irq_domain_ops mx25_tsadc_domain_ops = {
+ .map = mx25_tsadc_domain_map,
+ .xlate = irq_domain_xlate_onecell,
+};
+
+static int mx25_tsadc_setup_irq(struct platform_device *pdev,
+ struct mx25_tsadc *tsadc)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ int irq;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq <= 0) {
+ dev_err(dev, "Failed to get irq\n");
+ return irq;
+ }
+
+ tsadc->domain = irq_domain_add_simple(np, 2, 0, &mx25_tsadc_domain_ops,
+ tsadc);
+ if (!tsadc->domain) {
+ dev_err(dev, "Failed to add irq domain\n");
+ return -ENOMEM;
+ }
+
+ irq_set_chained_handler(irq, mx25_tsadc_irq_handler);
+ irq_set_handler_data(irq, tsadc);
+
+ return 0;
+}
+
+static void mx25_tsadc_setup_clk(struct platform_device *pdev,
+ struct mx25_tsadc *tsadc)
+{
+ unsigned clk_div;
+
+ /*
+ * According to the datasheet the ADC clock should never
+ * exceed 1,75 MHz. Base clock is the IPG and the ADC unit uses
+ * a funny clock divider. To keep the ADC conversion time constant
+ * adapt the ADC internal clock divider to the IPG clock rate.
+ */
+
+ dev_dbg(&pdev->dev, "Found master clock at %lu Hz\n",
+ clk_get_rate(tsadc->clk));
+
+ clk_div = DIV_ROUND_UP(clk_get_rate(tsadc->clk), 1750000);
+ dev_dbg(&pdev->dev, "Setting up ADC clock divider to %u\n", clk_div);
+
+ /* adc clock = IPG clock / (2 * div + 2) */
+ clk_div -= 2;
+ clk_div /= 2;
+
+ /*
+ * the ADC clock divider changes its behaviour when values below 4
+ * are used: it is fixed to "/ 10" in this case
+ */
+ clk_div = max_t(unsigned, 4, clk_div);
+
+ dev_dbg(&pdev->dev, "Resulting ADC conversion clock at %lu Hz\n",
+ clk_get_rate(tsadc->clk) / (2 * clk_div + 2));
+
+ regmap_update_bits(tsadc->regs, MX25_TSC_TGCR,
+ MX25_TGCR_ADCCLKCFG(0x1f),
+ MX25_TGCR_ADCCLKCFG(clk_div));
+}
+
+static int mx25_tsadc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct mx25_tsadc *tsadc;
+ struct resource *res;
+ int ret;
+ void __iomem *iomem;
+
+ tsadc = devm_kzalloc(dev, sizeof(*tsadc), GFP_KERNEL);
+ if (!tsadc)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ iomem = devm_ioremap_resource(dev, res);
+ if (IS_ERR(iomem))
+ return PTR_ERR(iomem);
+
+ tsadc->regs = devm_regmap_init_mmio(dev, iomem,
+ &mx25_tsadc_regmap_config);
+ if (IS_ERR(tsadc->regs)) {
+ dev_err(dev, "Failed to initialize regmap\n");
+ return PTR_ERR(tsadc->regs);
+ }
+
+ tsadc->clk = devm_clk_get(dev, "ipg");
+ if (IS_ERR(tsadc->clk)) {
+ dev_err(dev, "Failed to get ipg clock\n");
+ return PTR_ERR(tsadc->clk);
+ }
+
+ /* setup clock according to the datasheet */
+ mx25_tsadc_setup_clk(pdev, tsadc);
+
+ /* Enable clock and reset the component */
+ regmap_update_bits(tsadc->regs, MX25_TSC_TGCR, MX25_TGCR_CLK_EN,
+ MX25_TGCR_CLK_EN);
+ regmap_update_bits(tsadc->regs, MX25_TSC_TGCR, MX25_TGCR_TSC_RST,
+ MX25_TGCR_TSC_RST);
+
+ /* Setup powersaving mode, but enable internal reference voltage */
+ regmap_update_bits(tsadc->regs, MX25_TSC_TGCR, MX25_TGCR_POWERMODE_MASK,
+ MX25_TGCR_POWERMODE_SAVE);
+ regmap_update_bits(tsadc->regs, MX25_TSC_TGCR, MX25_TGCR_INTREFEN,
+ MX25_TGCR_INTREFEN);
+
+ ret = mx25_tsadc_setup_irq(pdev, tsadc);
+ if (ret)
+ return ret;
+
+ platform_set_drvdata(pdev, tsadc);
+
+ of_platform_populate(np, NULL, NULL, dev);
+
+ return 0;
+}
+
+static const struct of_device_id mx25_tsadc_ids[] = {
+ { .compatible = "fsl,imx25-tsadc" },
+ { /* Sentinel */ }
+};
+
+static struct platform_driver mx25_tsadc_driver = {
+ .driver = {
+ .name = "mx25-tsadc",
+ .of_match_table = of_match_ptr(mx25_tsadc_ids),
+ },
+ .probe = mx25_tsadc_probe,
+};
+module_platform_driver(mx25_tsadc_driver);
+
+MODULE_DESCRIPTION("MFD for ADC/TSC for Freescale mx25");
+MODULE_AUTHOR("Markus Pargmann <mpa@pengutronix.de>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:mx25-tsadc");
diff --git a/drivers/mfd/intel-lpss-acpi.c b/drivers/mfd/intel-lpss-acpi.c
index 06f00d60be46..5a8d9c766633 100644
--- a/drivers/mfd/intel-lpss-acpi.c
+++ b/drivers/mfd/intel-lpss-acpi.c
@@ -44,8 +44,20 @@ static const struct intel_lpss_platform_info bxt_info = {
.clk_rate = 100000000,
};
+static struct property_entry bxt_i2c_properties[] = {
+ PROPERTY_ENTRY_U32("i2c-sda-hold-time-ns", 42),
+ PROPERTY_ENTRY_U32("i2c-sda-falling-time-ns", 171),
+ PROPERTY_ENTRY_U32("i2c-scl-falling-time-ns", 208),
+ { },
+};
+
+static struct property_set bxt_i2c_pset = {
+ .properties = bxt_i2c_properties,
+};
+
static const struct intel_lpss_platform_info bxt_i2c_info = {
.clk_rate = 133000000,
+ .pset = &bxt_i2c_pset,
};
static const struct acpi_device_id intel_lpss_acpi_ids[] = {
diff --git a/drivers/mfd/intel-lpss-pci.c b/drivers/mfd/intel-lpss-pci.c
index a7136c7ae9fb..a19e57118641 100644
--- a/drivers/mfd/intel-lpss-pci.c
+++ b/drivers/mfd/intel-lpss-pci.c
@@ -107,12 +107,24 @@ static const struct intel_lpss_platform_info bxt_uart_info = {
.pset = &uart_pset,
};
+static struct property_entry bxt_i2c_properties[] = {
+ PROPERTY_ENTRY_U32("i2c-sda-hold-time-ns", 42),
+ PROPERTY_ENTRY_U32("i2c-sda-falling-time-ns", 171),
+ PROPERTY_ENTRY_U32("i2c-scl-falling-time-ns", 208),
+ { },
+};
+
+static struct property_set bxt_i2c_pset = {
+ .properties = bxt_i2c_properties,
+};
+
static const struct intel_lpss_platform_info bxt_i2c_info = {
.clk_rate = 133000000,
+ .pset = &bxt_i2c_pset,
};
static const struct pci_device_id intel_lpss_pci_ids[] = {
- /* BXT */
+ /* BXT A-Step */
{ PCI_VDEVICE(INTEL, 0x0aac), (kernel_ulong_t)&bxt_i2c_info },
{ PCI_VDEVICE(INTEL, 0x0aae), (kernel_ulong_t)&bxt_i2c_info },
{ PCI_VDEVICE(INTEL, 0x0ab0), (kernel_ulong_t)&bxt_i2c_info },
@@ -128,6 +140,23 @@ static const struct pci_device_id intel_lpss_pci_ids[] = {
{ PCI_VDEVICE(INTEL, 0x0ac4), (kernel_ulong_t)&bxt_info },
{ PCI_VDEVICE(INTEL, 0x0ac6), (kernel_ulong_t)&bxt_info },
{ PCI_VDEVICE(INTEL, 0x0aee), (kernel_ulong_t)&bxt_uart_info },
+ /* BXT B-Step */
+ { PCI_VDEVICE(INTEL, 0x1aac), (kernel_ulong_t)&bxt_i2c_info },
+ { PCI_VDEVICE(INTEL, 0x1aae), (kernel_ulong_t)&bxt_i2c_info },
+ { PCI_VDEVICE(INTEL, 0x1ab0), (kernel_ulong_t)&bxt_i2c_info },
+ { PCI_VDEVICE(INTEL, 0x1ab2), (kernel_ulong_t)&bxt_i2c_info },
+ { PCI_VDEVICE(INTEL, 0x1ab4), (kernel_ulong_t)&bxt_i2c_info },
+ { PCI_VDEVICE(INTEL, 0x1ab6), (kernel_ulong_t)&bxt_i2c_info },
+ { PCI_VDEVICE(INTEL, 0x1ab8), (kernel_ulong_t)&bxt_i2c_info },
+ { PCI_VDEVICE(INTEL, 0x1aba), (kernel_ulong_t)&bxt_i2c_info },
+ { PCI_VDEVICE(INTEL, 0x1abc), (kernel_ulong_t)&bxt_uart_info },
+ { PCI_VDEVICE(INTEL, 0x1abe), (kernel_ulong_t)&bxt_uart_info },
+ { PCI_VDEVICE(INTEL, 0x1ac0), (kernel_ulong_t)&bxt_uart_info },
+ { PCI_VDEVICE(INTEL, 0x1ac2), (kernel_ulong_t)&bxt_info },
+ { PCI_VDEVICE(INTEL, 0x1ac4), (kernel_ulong_t)&bxt_info },
+ { PCI_VDEVICE(INTEL, 0x1ac6), (kernel_ulong_t)&bxt_info },
+ { PCI_VDEVICE(INTEL, 0x1aee), (kernel_ulong_t)&bxt_uart_info },
+
/* APL */
{ PCI_VDEVICE(INTEL, 0x5aac), (kernel_ulong_t)&bxt_i2c_info },
{ PCI_VDEVICE(INTEL, 0x5aae), (kernel_ulong_t)&bxt_i2c_info },
diff --git a/drivers/mfd/intel-lpss.c b/drivers/mfd/intel-lpss.c
index 1743788f1595..1bbbe877ba7e 100644
--- a/drivers/mfd/intel-lpss.c
+++ b/drivers/mfd/intel-lpss.c
@@ -453,6 +453,7 @@ int intel_lpss_probe(struct device *dev,
err_remove_ltr:
intel_lpss_debugfs_remove(lpss);
intel_lpss_ltr_hide(lpss);
+ intel_lpss_unregister_clock(lpss);
err_clk_register:
ida_simple_remove(&intel_lpss_devid_ida, lpss->devid);
diff --git a/drivers/mfd/intel_quark_i2c_gpio.c b/drivers/mfd/intel_quark_i2c_gpio.c
index 042137465300..bdc5e27222c0 100644
--- a/drivers/mfd/intel_quark_i2c_gpio.c
+++ b/drivers/mfd/intel_quark_i2c_gpio.c
@@ -52,8 +52,6 @@
/* The Quark I2C controller source clock */
#define INTEL_QUARK_I2C_CLK_HZ 33000000
-#define INTEL_QUARK_I2C_NCLK 1
-
struct intel_quark_mfd {
struct pci_dev *pdev;
struct clk *i2c_clk;
@@ -128,30 +126,24 @@ MODULE_DEVICE_TABLE(pci, intel_quark_mfd_ids);
static int intel_quark_register_i2c_clk(struct intel_quark_mfd *quark_mfd)
{
struct pci_dev *pdev = quark_mfd->pdev;
- struct clk_lookup *i2c_clk_lookup;
struct clk *i2c_clk;
- int ret;
-
- i2c_clk_lookup = devm_kcalloc(&pdev->dev, INTEL_QUARK_I2C_NCLK,
- sizeof(*i2c_clk_lookup), GFP_KERNEL);
- if (!i2c_clk_lookup)
- return -ENOMEM;
-
- i2c_clk_lookup[0].dev_id = INTEL_QUARK_I2C_CONTROLLER_CLK;
i2c_clk = clk_register_fixed_rate(&pdev->dev,
INTEL_QUARK_I2C_CONTROLLER_CLK, NULL,
CLK_IS_ROOT, INTEL_QUARK_I2C_CLK_HZ);
+ if (IS_ERR(i2c_clk))
+ return PTR_ERR(i2c_clk);
- quark_mfd->i2c_clk_lookup = i2c_clk_lookup;
quark_mfd->i2c_clk = i2c_clk;
+ quark_mfd->i2c_clk_lookup = clkdev_create(i2c_clk, NULL,
+ INTEL_QUARK_I2C_CONTROLLER_CLK);
- ret = clk_register_clkdevs(i2c_clk, i2c_clk_lookup,
- INTEL_QUARK_I2C_NCLK);
- if (ret)
- dev_err(&pdev->dev, "Fixed clk register failed: %d\n", ret);
+ if (!quark_mfd->i2c_clk_lookup) {
+ dev_err(&pdev->dev, "Fixed clk register failed\n");
+ return -ENOMEM;
+ }
- return ret;
+ return 0;
}
static void intel_quark_unregister_i2c_clk(struct pci_dev *pdev)
diff --git a/drivers/mfd/ipaq-micro.c b/drivers/mfd/ipaq-micro.c
index a41859c55bda..df16fd1df68b 100644
--- a/drivers/mfd/ipaq-micro.c
+++ b/drivers/mfd/ipaq-micro.c
@@ -376,7 +376,7 @@ static const struct mfd_cell micro_cells[] = {
{ .name = "ipaq-micro-leds", },
};
-static int micro_resume(struct device *dev)
+static int __maybe_unused micro_resume(struct device *dev)
{
struct ipaq_micro *micro = dev_get_drvdata(dev);
diff --git a/drivers/mfd/max77686.c b/drivers/mfd/max77686.c
index d959ebbb2194..c1aff46e89d9 100644
--- a/drivers/mfd/max77686.c
+++ b/drivers/mfd/max77686.c
@@ -35,8 +35,6 @@
#include <linux/err.h>
#include <linux/of.h>
-#define I2C_ADDR_RTC (0x0C >> 1)
-
static const struct mfd_cell max77686_devs[] = {
{ .name = "max77686-pmic", },
{ .name = "max77686-rtc", },
@@ -116,11 +114,6 @@ static const struct regmap_config max77686_regmap_config = {
.val_bits = 8,
};
-static const struct regmap_config max77686_rtc_regmap_config = {
- .reg_bits = 8,
- .val_bits = 8,
-};
-
static const struct regmap_config max77802_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
@@ -156,25 +149,6 @@ static const struct regmap_irq_chip max77686_irq_chip = {
.num_irqs = ARRAY_SIZE(max77686_irqs),
};
-static const struct regmap_irq max77686_rtc_irqs[] = {
- /* RTC interrupts */
- { .reg_offset = 0, .mask = MAX77686_RTCINT_RTC60S_MSK, },
- { .reg_offset = 0, .mask = MAX77686_RTCINT_RTCA1_MSK, },
- { .reg_offset = 0, .mask = MAX77686_RTCINT_RTCA2_MSK, },
- { .reg_offset = 0, .mask = MAX77686_RTCINT_SMPL_MSK, },
- { .reg_offset = 0, .mask = MAX77686_RTCINT_RTC1S_MSK, },
- { .reg_offset = 0, .mask = MAX77686_RTCINT_WTSR_MSK, },
-};
-
-static const struct regmap_irq_chip max77686_rtc_irq_chip = {
- .name = "max77686-rtc",
- .status_base = MAX77686_RTC_INT,
- .mask_base = MAX77686_RTC_INTM,
- .num_regs = 1,
- .irqs = max77686_rtc_irqs,
- .num_irqs = ARRAY_SIZE(max77686_rtc_irqs),
-};
-
static const struct regmap_irq_chip max77802_irq_chip = {
.name = "max77802-pmic",
.status_base = MAX77802_REG_INT1,
@@ -184,15 +158,6 @@ static const struct regmap_irq_chip max77802_irq_chip = {
.num_irqs = ARRAY_SIZE(max77686_irqs),
};
-static const struct regmap_irq_chip max77802_rtc_irq_chip = {
- .name = "max77802-rtc",
- .status_base = MAX77802_RTC_INT,
- .mask_base = MAX77802_RTC_INTM,
- .num_regs = 1,
- .irqs = max77686_rtc_irqs, /* same masks as 77686 */
- .num_irqs = ARRAY_SIZE(max77686_rtc_irqs),
-};
-
static const struct of_device_id max77686_pmic_dt_match[] = {
{
.compatible = "maxim,max77686",
@@ -204,6 +169,7 @@ static const struct of_device_id max77686_pmic_dt_match[] = {
},
{ },
};
+MODULE_DEVICE_TABLE(of, max77686_pmic_dt_match);
static int max77686_i2c_probe(struct i2c_client *i2c,
const struct i2c_device_id *id)
@@ -214,8 +180,6 @@ static int max77686_i2c_probe(struct i2c_client *i2c,
int ret = 0;
const struct regmap_config *config;
const struct regmap_irq_chip *irq_chip;
- const struct regmap_irq_chip *rtc_irq_chip;
- struct regmap **rtc_regmap;
const struct mfd_cell *cells;
int n_devs;
@@ -242,15 +206,11 @@ static int max77686_i2c_probe(struct i2c_client *i2c,
if (max77686->type == TYPE_MAX77686) {
config = &max77686_regmap_config;
irq_chip = &max77686_irq_chip;
- rtc_irq_chip = &max77686_rtc_irq_chip;
- rtc_regmap = &max77686->rtc_regmap;
cells = max77686_devs;
n_devs = ARRAY_SIZE(max77686_devs);
} else {
config = &max77802_regmap_config;
irq_chip = &max77802_irq_chip;
- rtc_irq_chip = &max77802_rtc_irq_chip;
- rtc_regmap = &max77686->regmap;
cells = max77802_devs;
n_devs = ARRAY_SIZE(max77802_devs);
}
@@ -270,60 +230,25 @@ static int max77686_i2c_probe(struct i2c_client *i2c,
return -ENODEV;
}
- if (max77686->type == TYPE_MAX77686) {
- max77686->rtc = i2c_new_dummy(i2c->adapter, I2C_ADDR_RTC);
- if (!max77686->rtc) {
- dev_err(max77686->dev,
- "Failed to allocate I2C device for RTC\n");
- return -ENODEV;
- }
- i2c_set_clientdata(max77686->rtc, max77686);
-
- max77686->rtc_regmap =
- devm_regmap_init_i2c(max77686->rtc,
- &max77686_rtc_regmap_config);
- if (IS_ERR(max77686->rtc_regmap)) {
- ret = PTR_ERR(max77686->rtc_regmap);
- dev_err(max77686->dev,
- "failed to allocate RTC regmap: %d\n",
- ret);
- goto err_unregister_i2c;
- }
- }
-
ret = regmap_add_irq_chip(max77686->regmap, max77686->irq,
IRQF_TRIGGER_FALLING | IRQF_ONESHOT |
IRQF_SHARED, 0, irq_chip,
&max77686->irq_data);
- if (ret) {
+ if (ret < 0) {
dev_err(&i2c->dev, "failed to add PMIC irq chip: %d\n", ret);
- goto err_unregister_i2c;
- }
-
- ret = regmap_add_irq_chip(*rtc_regmap, max77686->irq,
- IRQF_TRIGGER_FALLING | IRQF_ONESHOT |
- IRQF_SHARED, 0, rtc_irq_chip,
- &max77686->rtc_irq_data);
- if (ret) {
- dev_err(&i2c->dev, "failed to add RTC irq chip: %d\n", ret);
- goto err_del_irqc;
+ return ret;
}
ret = mfd_add_devices(max77686->dev, -1, cells, n_devs, NULL, 0, NULL);
if (ret < 0) {
dev_err(&i2c->dev, "failed to add MFD devices: %d\n", ret);
- goto err_del_rtc_irqc;
+ goto err_del_irqc;
}
return 0;
-err_del_rtc_irqc:
- regmap_del_irq_chip(max77686->irq, max77686->rtc_irq_data);
err_del_irqc:
regmap_del_irq_chip(max77686->irq, max77686->irq_data);
-err_unregister_i2c:
- if (max77686->type == TYPE_MAX77686)
- i2c_unregister_device(max77686->rtc);
return ret;
}
@@ -334,17 +259,14 @@ static int max77686_i2c_remove(struct i2c_client *i2c)
mfd_remove_devices(max77686->dev);
- regmap_del_irq_chip(max77686->irq, max77686->rtc_irq_data);
regmap_del_irq_chip(max77686->irq, max77686->irq_data);
- if (max77686->type == TYPE_MAX77686)
- i2c_unregister_device(max77686->rtc);
-
return 0;
}
static const struct i2c_device_id max77686_i2c_id[] = {
{ "max77686", TYPE_MAX77686 },
+ { "max77802", TYPE_MAX77802 },
{ }
};
MODULE_DEVICE_TABLE(i2c, max77686_i2c_id);
diff --git a/drivers/mfd/menelaus.c b/drivers/mfd/menelaus.c
index 3ac36f5ccd3e..a4a8f1ec3fb6 100644
--- a/drivers/mfd/menelaus.c
+++ b/drivers/mfd/menelaus.c
@@ -42,10 +42,10 @@
#include <linux/bcd.h>
#include <linux/slab.h>
#include <linux/mfd/menelaus.h>
+#include <linux/gpio.h>
#include <asm/mach/irq.h>
-#include <asm/gpio.h>
#define DRIVER_NAME "menelaus"
diff --git a/drivers/mfd/mt6397-core.c b/drivers/mfd/mt6397-core.c
index 1749c1c9f405..8e8d93249c09 100644
--- a/drivers/mfd/mt6397-core.c
+++ b/drivers/mfd/mt6397-core.c
@@ -19,11 +19,17 @@
#include <linux/regmap.h>
#include <linux/mfd/core.h>
#include <linux/mfd/mt6397/core.h>
+#include <linux/mfd/mt6323/core.h>
#include <linux/mfd/mt6397/registers.h>
+#include <linux/mfd/mt6323/registers.h>
#define MT6397_RTC_BASE 0xe000
#define MT6397_RTC_SIZE 0x3e
+#define MT6323_CID_CODE 0x23
+#define MT6391_CID_CODE 0x91
+#define MT6397_CID_CODE 0x97
+
static const struct resource mt6397_rtc_resources[] = {
{
.start = MT6397_RTC_BASE,
@@ -37,6 +43,13 @@ static const struct resource mt6397_rtc_resources[] = {
},
};
+static const struct mfd_cell mt6323_devs[] = {
+ {
+ .name = "mt6323-regulator",
+ .of_compatible = "mediatek,mt6323-regulator"
+ },
+};
+
static const struct mfd_cell mt6397_devs[] = {
{
.name = "mt6397-rtc",
@@ -69,8 +82,10 @@ static void mt6397_irq_sync_unlock(struct irq_data *data)
{
struct mt6397_chip *mt6397 = irq_data_get_irq_chip_data(data);
- regmap_write(mt6397->regmap, MT6397_INT_CON0, mt6397->irq_masks_cur[0]);
- regmap_write(mt6397->regmap, MT6397_INT_CON1, mt6397->irq_masks_cur[1]);
+ regmap_write(mt6397->regmap, mt6397->int_con[0],
+ mt6397->irq_masks_cur[0]);
+ regmap_write(mt6397->regmap, mt6397->int_con[1],
+ mt6397->irq_masks_cur[1]);
mutex_unlock(&mt6397->irqlock);
}
@@ -147,8 +162,8 @@ static irqreturn_t mt6397_irq_thread(int irq, void *data)
{
struct mt6397_chip *mt6397 = data;
- mt6397_irq_handle_reg(mt6397, MT6397_INT_STATUS0, 0);
- mt6397_irq_handle_reg(mt6397, MT6397_INT_STATUS1, 16);
+ mt6397_irq_handle_reg(mt6397, mt6397->int_status[0], 0);
+ mt6397_irq_handle_reg(mt6397, mt6397->int_status[1], 16);
return IRQ_HANDLED;
}
@@ -177,8 +192,8 @@ static int mt6397_irq_init(struct mt6397_chip *mt6397)
mutex_init(&mt6397->irqlock);
/* Mask all interrupt sources */
- regmap_write(mt6397->regmap, MT6397_INT_CON0, 0x0);
- regmap_write(mt6397->regmap, MT6397_INT_CON1, 0x0);
+ regmap_write(mt6397->regmap, mt6397->int_con[0], 0x0);
+ regmap_write(mt6397->regmap, mt6397->int_con[1], 0x0);
mt6397->irq_domain = irq_domain_add_linear(mt6397->dev->of_node,
MT6397_IRQ_NR, &mt6397_irq_domain_ops, mt6397);
@@ -203,8 +218,8 @@ static int mt6397_irq_suspend(struct device *dev)
{
struct mt6397_chip *chip = dev_get_drvdata(dev);
- regmap_write(chip->regmap, MT6397_INT_CON0, chip->wake_mask[0]);
- regmap_write(chip->regmap, MT6397_INT_CON1, chip->wake_mask[1]);
+ regmap_write(chip->regmap, chip->int_con[0], chip->wake_mask[0]);
+ regmap_write(chip->regmap, chip->int_con[1], chip->wake_mask[1]);
enable_irq_wake(chip->irq);
@@ -215,8 +230,8 @@ static int mt6397_irq_resume(struct device *dev)
{
struct mt6397_chip *chip = dev_get_drvdata(dev);
- regmap_write(chip->regmap, MT6397_INT_CON0, chip->irq_masks_cur[0]);
- regmap_write(chip->regmap, MT6397_INT_CON1, chip->irq_masks_cur[1]);
+ regmap_write(chip->regmap, chip->int_con[0], chip->irq_masks_cur[0]);
+ regmap_write(chip->regmap, chip->int_con[1], chip->irq_masks_cur[1]);
disable_irq_wake(chip->irq);
@@ -230,34 +245,69 @@ static SIMPLE_DEV_PM_OPS(mt6397_pm_ops, mt6397_irq_suspend,
static int mt6397_probe(struct platform_device *pdev)
{
int ret;
- struct mt6397_chip *mt6397;
+ unsigned int id;
+ struct mt6397_chip *pmic;
- mt6397 = devm_kzalloc(&pdev->dev, sizeof(*mt6397), GFP_KERNEL);
- if (!mt6397)
+ pmic = devm_kzalloc(&pdev->dev, sizeof(*pmic), GFP_KERNEL);
+ if (!pmic)
return -ENOMEM;
- mt6397->dev = &pdev->dev;
+ pmic->dev = &pdev->dev;
+
/*
* mt6397 MFD is child device of soc pmic wrapper.
* Regmap is set from its parent.
*/
- mt6397->regmap = dev_get_regmap(pdev->dev.parent, NULL);
- if (!mt6397->regmap)
+ pmic->regmap = dev_get_regmap(pdev->dev.parent, NULL);
+ if (!pmic->regmap)
return -ENODEV;
- platform_set_drvdata(pdev, mt6397);
+ platform_set_drvdata(pdev, pmic);
- mt6397->irq = platform_get_irq(pdev, 0);
- if (mt6397->irq > 0) {
- ret = mt6397_irq_init(mt6397);
+ ret = regmap_read(pmic->regmap, MT6397_CID, &id);
+ if (ret) {
+ dev_err(pmic->dev, "Failed to read chip id: %d\n", ret);
+ goto fail_irq;
+ }
+
+ switch (id & 0xff) {
+ case MT6323_CID_CODE:
+ pmic->int_con[0] = MT6323_INT_CON0;
+ pmic->int_con[1] = MT6323_INT_CON1;
+ pmic->int_status[0] = MT6323_INT_STATUS0;
+ pmic->int_status[1] = MT6323_INT_STATUS1;
+ ret = mfd_add_devices(&pdev->dev, -1, mt6323_devs,
+ ARRAY_SIZE(mt6323_devs), NULL, 0, NULL);
+ break;
+
+ case MT6397_CID_CODE:
+ case MT6391_CID_CODE:
+ pmic->int_con[0] = MT6397_INT_CON0;
+ pmic->int_con[1] = MT6397_INT_CON1;
+ pmic->int_status[0] = MT6397_INT_STATUS0;
+ pmic->int_status[1] = MT6397_INT_STATUS1;
+ ret = mfd_add_devices(&pdev->dev, -1, mt6397_devs,
+ ARRAY_SIZE(mt6397_devs), NULL, 0, NULL);
+ break;
+
+ default:
+ dev_err(&pdev->dev, "unsupported chip: %d\n", id);
+ ret = -ENODEV;
+ break;
+ }
+
+ pmic->irq = platform_get_irq(pdev, 0);
+ if (pmic->irq > 0) {
+ ret = mt6397_irq_init(pmic);
if (ret)
return ret;
}
- ret = mfd_add_devices(&pdev->dev, -1, mt6397_devs,
- ARRAY_SIZE(mt6397_devs), NULL, 0, NULL);
- if (ret)
+fail_irq:
+ if (ret) {
+ irq_domain_remove(pmic->irq_domain);
dev_err(&pdev->dev, "failed to add child devices: %d\n", ret);
+ }
return ret;
}
@@ -271,10 +321,17 @@ static int mt6397_remove(struct platform_device *pdev)
static const struct of_device_id mt6397_of_match[] = {
{ .compatible = "mediatek,mt6397" },
+ { .compatible = "mediatek,mt6323" },
{ }
};
MODULE_DEVICE_TABLE(of, mt6397_of_match);
+static const struct platform_device_id mt6397_id[] = {
+ { "mt6397", 0 },
+ { },
+};
+MODULE_DEVICE_TABLE(platform, mt6397_id);
+
static struct platform_driver mt6397_driver = {
.probe = mt6397_probe,
.remove = mt6397_remove,
@@ -283,6 +340,7 @@ static struct platform_driver mt6397_driver = {
.of_match_table = of_match_ptr(mt6397_of_match),
.pm = &mt6397_pm_ops,
},
+ .id_table = mt6397_id,
};
module_platform_driver(mt6397_driver);
@@ -290,4 +348,3 @@ module_platform_driver(mt6397_driver);
MODULE_AUTHOR("Flora Fu, MediaTek");
MODULE_DESCRIPTION("Driver for MediaTek MT6397 PMIC");
MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:mt6397");
diff --git a/drivers/mfd/rc5t583.c b/drivers/mfd/rc5t583.c
index e10f02f5d551..fc2b2d93f354 100644
--- a/drivers/mfd/rc5t583.c
+++ b/drivers/mfd/rc5t583.c
@@ -241,8 +241,8 @@ static const struct regmap_config rc5t583_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
.volatile_reg = volatile_reg,
- .max_register = RC5T583_MAX_REGS,
- .num_reg_defaults_raw = RC5T583_MAX_REGS,
+ .max_register = RC5T583_MAX_REG,
+ .num_reg_defaults_raw = RC5T583_NUM_REGS,
.cache_type = REGCACHE_RBTREE,
};
diff --git a/drivers/mfd/stmpe.c b/drivers/mfd/stmpe.c
index 8222e374e4b1..fb8f9e8b75df 100644
--- a/drivers/mfd/stmpe.c
+++ b/drivers/mfd/stmpe.c
@@ -334,6 +334,31 @@ static const struct mfd_cell stmpe_keypad_cell = {
};
/*
+ * PWM (1601, 2401, 2403)
+ */
+static struct resource stmpe_pwm_resources[] = {
+ {
+ .name = "PWM0",
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .name = "PWM1",
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .name = "PWM2",
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static const struct mfd_cell stmpe_pwm_cell = {
+ .name = "stmpe-pwm",
+ .of_compatible = "st,stmpe-pwm",
+ .resources = stmpe_pwm_resources,
+ .num_resources = ARRAY_SIZE(stmpe_pwm_resources),
+};
+
+/*
* STMPE801
*/
static const u8 stmpe801_regs[] = {
@@ -537,6 +562,11 @@ static struct stmpe_variant_block stmpe1601_blocks[] = {
.irq = STMPE1601_IRQ_KEYPAD,
.block = STMPE_BLOCK_KEYPAD,
},
+ {
+ .cell = &stmpe_pwm_cell,
+ .irq = STMPE1601_IRQ_PWM0,
+ .block = STMPE_BLOCK_PWM,
+ },
};
/* supported autosleep timeout delay (in msecs) */
@@ -771,6 +801,11 @@ static struct stmpe_variant_block stmpe24xx_blocks[] = {
.irq = STMPE24XX_IRQ_KEYPAD,
.block = STMPE_BLOCK_KEYPAD,
},
+ {
+ .cell = &stmpe_pwm_cell,
+ .irq = STMPE24XX_IRQ_PWM0,
+ .block = STMPE_BLOCK_PWM,
+ },
};
static int stmpe24xx_enable(struct stmpe *stmpe, unsigned int blocks,
diff --git a/drivers/mfd/syscon.c b/drivers/mfd/syscon.c
index b7aabeefab07..2f2225e845ef 100644
--- a/drivers/mfd/syscon.c
+++ b/drivers/mfd/syscon.c
@@ -36,7 +36,7 @@ struct syscon {
struct list_head list;
};
-static struct regmap_config syscon_regmap_config = {
+static const struct regmap_config syscon_regmap_config = {
.reg_bits = 32,
.val_bits = 32,
.reg_stride = 4,
@@ -50,6 +50,7 @@ static struct syscon *of_syscon_register(struct device_node *np)
u32 reg_io_width;
int ret;
struct regmap_config syscon_config = syscon_regmap_config;
+ struct resource res;
if (!of_device_is_compatible(np, "syscon"))
return ERR_PTR(-EINVAL);
@@ -58,7 +59,12 @@ static struct syscon *of_syscon_register(struct device_node *np)
if (!syscon)
return ERR_PTR(-ENOMEM);
- base = of_iomap(np, 0);
+ if (of_address_to_resource(np, 0, &res)) {
+ ret = -ENOMEM;
+ goto err_map;
+ }
+
+ base = ioremap(res.start, resource_size(&res));
if (!base) {
ret = -ENOMEM;
goto err_map;
@@ -81,6 +87,7 @@ static struct syscon *of_syscon_register(struct device_node *np)
syscon_config.reg_stride = reg_io_width;
syscon_config.val_bits = reg_io_width * 8;
+ syscon_config.max_register = resource_size(&res) - reg_io_width;
regmap = regmap_init_mmio(NULL, base, &syscon_config);
if (IS_ERR(regmap)) {
@@ -192,6 +199,7 @@ static int syscon_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct syscon_platform_data *pdata = dev_get_platdata(dev);
struct syscon *syscon;
+ struct regmap_config syscon_config = syscon_regmap_config;
struct resource *res;
void __iomem *base;
@@ -207,11 +215,10 @@ static int syscon_probe(struct platform_device *pdev)
if (!base)
return -ENOMEM;
- syscon_regmap_config.max_register = res->end - res->start - 3;
+ syscon_config.max_register = res->end - res->start - 3;
if (pdata)
- syscon_regmap_config.name = pdata->label;
- syscon->regmap = devm_regmap_init_mmio(dev, base,
- &syscon_regmap_config);
+ syscon_config.name = pdata->label;
+ syscon->regmap = devm_regmap_init_mmio(dev, base, &syscon_config);
if (IS_ERR(syscon->regmap)) {
dev_err(dev, "regmap init failed\n");
return PTR_ERR(syscon->regmap);
diff --git a/drivers/mfd/tps65010.c b/drivers/mfd/tps65010.c
index 83e615ed100a..495e4518fc29 100644
--- a/drivers/mfd/tps65010.c
+++ b/drivers/mfd/tps65010.c
@@ -1059,26 +1059,7 @@ EXPORT_SYMBOL(tps65013_set_low_pwr);
static int __init tps_init(void)
{
- u32 tries = 3;
- int status = -ENODEV;
-
- printk(KERN_INFO "%s: version %s\n", DRIVER_NAME, DRIVER_VERSION);
-
- /* some boards have startup glitches */
- while (tries--) {
- status = i2c_add_driver(&tps65010_driver);
- if (the_tps)
- break;
- i2c_del_driver(&tps65010_driver);
- if (!tries) {
- printk(KERN_ERR "%s: no chip?\n", DRIVER_NAME);
- return -ENODEV;
- }
- pr_debug("%s: re-probe ...\n", DRIVER_NAME);
- msleep(10);
- }
-
- return status;
+ return i2c_add_driver(&tps65010_driver);
}
/* NOTE: this MUST be initialized before the other parts of the system
* that rely on it ... but after the i2c bus on which this relies.
diff --git a/drivers/mfd/tps65086.c b/drivers/mfd/tps65086.c
new file mode 100644
index 000000000000..43119a6867fe
--- /dev/null
+++ b/drivers/mfd/tps65086.c
@@ -0,0 +1,149 @@
+/*
+ * Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com/
+ * Andrew F. Davis <afd@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether expressed or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License version 2 for more details.
+ *
+ * Based on the TPS65912 driver
+ */
+
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/mfd/core.h>
+#include <linux/module.h>
+
+#include <linux/mfd/tps65086.h>
+
+static const struct mfd_cell tps65086_cells[] = {
+ { .name = "tps65086-regulator", },
+ { .name = "tps65086-gpio", },
+};
+
+static const struct regmap_range tps65086_yes_ranges[] = {
+ regmap_reg_range(TPS65086_IRQ, TPS65086_IRQ),
+ regmap_reg_range(TPS65086_PMICSTAT, TPS65086_SHUTDNSRC),
+ regmap_reg_range(TPS65086_GPOCTRL, TPS65086_GPOCTRL),
+ regmap_reg_range(TPS65086_PG_STATUS1, TPS65086_OC_STATUS),
+};
+
+static const struct regmap_access_table tps65086_volatile_table = {
+ .yes_ranges = tps65086_yes_ranges,
+ .n_yes_ranges = ARRAY_SIZE(tps65086_yes_ranges),
+};
+
+static const struct regmap_config tps65086_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .cache_type = REGCACHE_RBTREE,
+ .volatile_table = &tps65086_volatile_table,
+};
+
+static const struct regmap_irq tps65086_irqs[] = {
+ REGMAP_IRQ_REG(TPS65086_IRQ_DIETEMP, 0, TPS65086_IRQ_DIETEMP_MASK),
+ REGMAP_IRQ_REG(TPS65086_IRQ_SHUTDN, 0, TPS65086_IRQ_SHUTDN_MASK),
+ REGMAP_IRQ_REG(TPS65086_IRQ_FAULT, 0, TPS65086_IRQ_FAULT_MASK),
+};
+
+static struct regmap_irq_chip tps65086_irq_chip = {
+ .name = "tps65086",
+ .status_base = TPS65086_IRQ,
+ .mask_base = TPS65086_IRQ_MASK,
+ .ack_base = TPS65086_IRQ,
+ .init_ack_masked = true,
+ .num_regs = 1,
+ .irqs = tps65086_irqs,
+ .num_irqs = ARRAY_SIZE(tps65086_irqs),
+};
+
+static const struct of_device_id tps65086_of_match_table[] = {
+ { .compatible = "ti,tps65086", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, tps65086_of_match_table);
+
+static int tps65086_probe(struct i2c_client *client,
+ const struct i2c_device_id *ids)
+{
+ struct tps65086 *tps;
+ unsigned int version;
+ int ret;
+
+ tps = devm_kzalloc(&client->dev, sizeof(*tps), GFP_KERNEL);
+ if (!tps)
+ return -ENOMEM;
+
+ i2c_set_clientdata(client, tps);
+ tps->dev = &client->dev;
+ tps->irq = client->irq;
+
+ tps->regmap = devm_regmap_init_i2c(client, &tps65086_regmap_config);
+ if (IS_ERR(tps->regmap)) {
+ dev_err(tps->dev, "Failed to initialize register map\n");
+ return PTR_ERR(tps->regmap);
+ }
+
+ ret = regmap_read(tps->regmap, TPS65086_DEVICEID, &version);
+ if (ret) {
+ dev_err(tps->dev, "Failed to read revision register\n");
+ return ret;
+ }
+
+ dev_info(tps->dev, "Device: TPS65086%01lX, OTP: %c, Rev: %ld\n",
+ (version & TPS65086_DEVICEID_PART_MASK),
+ (char)((version & TPS65086_DEVICEID_OTP_MASK) >> 4) + 'A',
+ (version & TPS65086_DEVICEID_REV_MASK) >> 6);
+
+ ret = regmap_add_irq_chip(tps->regmap, tps->irq, IRQF_ONESHOT, 0,
+ &tps65086_irq_chip, &tps->irq_data);
+ if (ret) {
+ dev_err(tps->dev, "Failed to register IRQ chip\n");
+ return ret;
+ }
+
+ ret = mfd_add_devices(tps->dev, PLATFORM_DEVID_AUTO, tps65086_cells,
+ ARRAY_SIZE(tps65086_cells), NULL, 0,
+ regmap_irq_get_domain(tps->irq_data));
+ if (ret) {
+ regmap_del_irq_chip(tps->irq, tps->irq_data);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int tps65086_remove(struct i2c_client *client)
+{
+ struct tps65086 *tps = i2c_get_clientdata(client);
+
+ regmap_del_irq_chip(tps->irq, tps->irq_data);
+
+ return 0;
+}
+
+static const struct i2c_device_id tps65086_id_table[] = {
+ { "tps65086", 0 },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(i2c, tps65086_id_table);
+
+static struct i2c_driver tps65086_driver = {
+ .driver = {
+ .name = "tps65086",
+ .of_match_table = tps65086_of_match_table,
+ },
+ .probe = tps65086_probe,
+ .remove = tps65086_remove,
+ .id_table = tps65086_id_table,
+};
+module_i2c_driver(tps65086_driver);
+
+MODULE_AUTHOR("Andrew F. Davis <afd@ti.com>");
+MODULE_DESCRIPTION("TPS65086 PMIC Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mfd/tps65090.c b/drivers/mfd/tps65090.c
index f88085ad9772..d7ec318c40c3 100644
--- a/drivers/mfd/tps65090.c
+++ b/drivers/mfd/tps65090.c
@@ -30,7 +30,6 @@
#include <linux/err.h>
#define NUM_INT_REG 2
-#define TOTAL_NUM_REG 0x18
#define TPS65090_INT1_MASK_VAC_STATUS_CHANGE 1
#define TPS65090_INT1_MASK_VSYS_STATUS_CHANGE 2
@@ -161,8 +160,8 @@ static bool is_volatile_reg(struct device *dev, unsigned int reg)
static const struct regmap_config tps65090_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
- .max_register = TOTAL_NUM_REG,
- .num_reg_defaults_raw = TOTAL_NUM_REG,
+ .max_register = TPS65090_MAX_REG,
+ .num_reg_defaults_raw = TPS65090_NUM_REGS,
.cache_type = REGCACHE_RBTREE,
.volatile_reg = is_volatile_reg,
};
diff --git a/drivers/mfd/tps65912-core.c b/drivers/mfd/tps65912-core.c
index 1f82d60b1d0f..a88cfa80dbc4 100644
--- a/drivers/mfd/tps65912-core.c
+++ b/drivers/mfd/tps65912-core.c
@@ -1,175 +1,111 @@
/*
- * tps65912-core.c -- TI TPS65912x
+ * Core functions for TI TPS65912x PMICs
*
- * Copyright 2011 Texas Instruments Inc.
+ * Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com/
+ * Andrew F. Davis <afd@ti.com>
*
- * Author: Margarita Olaya Cabrera <magi@slimlogic.co.uk>
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether expressed or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License version 2 for more details.
*
- * This driver is based on wm8350 implementation.
+ * Based on the TPS65218 driver and the previous TPS65912 driver by
+ * Margarita Olaya Cabrera <magi@slimlogic.co.uk>
*/
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/slab.h>
-#include <linux/gpio.h>
+#include <linux/interrupt.h>
#include <linux/mfd/core.h>
+#include <linux/module.h>
+
#include <linux/mfd/tps65912.h>
-static const struct mfd_cell tps65912s[] = {
- {
- .name = "tps65912-pmic",
- },
+static const struct mfd_cell tps65912_cells[] = {
+ { .name = "tps65912-regulator", },
+ { .name = "tps65912-gpio", },
};
-int tps65912_set_bits(struct tps65912 *tps65912, u8 reg, u8 mask)
-{
- u8 data;
- int err;
-
- mutex_lock(&tps65912->io_mutex);
-
- err = tps65912->read(tps65912, reg, 1, &data);
- if (err) {
- dev_err(tps65912->dev, "Read from reg 0x%x failed\n", reg);
- goto out;
- }
-
- data |= mask;
- err = tps65912->write(tps65912, reg, 1, &data);
- if (err)
- dev_err(tps65912->dev, "Write to reg 0x%x failed\n", reg);
+static const struct regmap_irq tps65912_irqs[] = {
+ /* INT_STS IRQs */
+ REGMAP_IRQ_REG(TPS65912_IRQ_PWRHOLD_F, 0, TPS65912_INT_STS_PWRHOLD_F),
+ REGMAP_IRQ_REG(TPS65912_IRQ_VMON, 0, TPS65912_INT_STS_VMON),
+ REGMAP_IRQ_REG(TPS65912_IRQ_PWRON, 0, TPS65912_INT_STS_PWRON),
+ REGMAP_IRQ_REG(TPS65912_IRQ_PWRON_LP, 0, TPS65912_INT_STS_PWRON_LP),
+ REGMAP_IRQ_REG(TPS65912_IRQ_PWRHOLD_R, 0, TPS65912_INT_STS_PWRHOLD_R),
+ REGMAP_IRQ_REG(TPS65912_IRQ_HOTDIE, 0, TPS65912_INT_STS_HOTDIE),
+ REGMAP_IRQ_REG(TPS65912_IRQ_GPIO1_R, 0, TPS65912_INT_STS_GPIO1_R),
+ REGMAP_IRQ_REG(TPS65912_IRQ_GPIO1_F, 0, TPS65912_INT_STS_GPIO1_F),
+ /* INT_STS2 IRQs */
+ REGMAP_IRQ_REG(TPS65912_IRQ_GPIO2_R, 1, TPS65912_INT_STS2_GPIO2_R),
+ REGMAP_IRQ_REG(TPS65912_IRQ_GPIO2_F, 1, TPS65912_INT_STS2_GPIO2_F),
+ REGMAP_IRQ_REG(TPS65912_IRQ_GPIO3_R, 1, TPS65912_INT_STS2_GPIO3_R),
+ REGMAP_IRQ_REG(TPS65912_IRQ_GPIO3_F, 1, TPS65912_INT_STS2_GPIO3_F),
+ REGMAP_IRQ_REG(TPS65912_IRQ_GPIO4_R, 1, TPS65912_INT_STS2_GPIO4_R),
+ REGMAP_IRQ_REG(TPS65912_IRQ_GPIO4_F, 1, TPS65912_INT_STS2_GPIO4_F),
+ REGMAP_IRQ_REG(TPS65912_IRQ_GPIO5_R, 1, TPS65912_INT_STS2_GPIO5_R),
+ REGMAP_IRQ_REG(TPS65912_IRQ_GPIO5_F, 1, TPS65912_INT_STS2_GPIO5_F),
+ /* INT_STS3 IRQs */
+ REGMAP_IRQ_REG(TPS65912_IRQ_PGOOD_DCDC1, 2, TPS65912_INT_STS3_PGOOD_DCDC1),
+ REGMAP_IRQ_REG(TPS65912_IRQ_PGOOD_DCDC2, 2, TPS65912_INT_STS3_PGOOD_DCDC2),
+ REGMAP_IRQ_REG(TPS65912_IRQ_PGOOD_DCDC3, 2, TPS65912_INT_STS3_PGOOD_DCDC3),
+ REGMAP_IRQ_REG(TPS65912_IRQ_PGOOD_DCDC4, 2, TPS65912_INT_STS3_PGOOD_DCDC4),
+ REGMAP_IRQ_REG(TPS65912_IRQ_PGOOD_LDO1, 2, TPS65912_INT_STS3_PGOOD_LDO1),
+ REGMAP_IRQ_REG(TPS65912_IRQ_PGOOD_LDO2, 2, TPS65912_INT_STS3_PGOOD_LDO2),
+ REGMAP_IRQ_REG(TPS65912_IRQ_PGOOD_LDO3, 2, TPS65912_INT_STS3_PGOOD_LDO3),
+ REGMAP_IRQ_REG(TPS65912_IRQ_PGOOD_LDO4, 2, TPS65912_INT_STS3_PGOOD_LDO4),
+ /* INT_STS4 IRQs */
+ REGMAP_IRQ_REG(TPS65912_IRQ_PGOOD_LDO5, 3, TPS65912_INT_STS4_PGOOD_LDO5),
+ REGMAP_IRQ_REG(TPS65912_IRQ_PGOOD_LDO6, 3, TPS65912_INT_STS4_PGOOD_LDO6),
+ REGMAP_IRQ_REG(TPS65912_IRQ_PGOOD_LDO7, 3, TPS65912_INT_STS4_PGOOD_LDO7),
+ REGMAP_IRQ_REG(TPS65912_IRQ_PGOOD_LDO8, 3, TPS65912_INT_STS4_PGOOD_LDO8),
+ REGMAP_IRQ_REG(TPS65912_IRQ_PGOOD_LDO9, 3, TPS65912_INT_STS4_PGOOD_LDO9),
+ REGMAP_IRQ_REG(TPS65912_IRQ_PGOOD_LDO10, 3, TPS65912_INT_STS4_PGOOD_LDO10),
+};
-out:
- mutex_unlock(&tps65912->io_mutex);
- return err;
-}
-EXPORT_SYMBOL_GPL(tps65912_set_bits);
+static struct regmap_irq_chip tps65912_irq_chip = {
+ .name = "tps65912",
+ .irqs = tps65912_irqs,
+ .num_irqs = ARRAY_SIZE(tps65912_irqs),
+ .num_regs = 4,
+ .irq_reg_stride = 2,
+ .mask_base = TPS65912_INT_MSK,
+ .status_base = TPS65912_INT_STS,
+ .ack_base = TPS65912_INT_STS,
+ .init_ack_masked = true,
+};
-int tps65912_clear_bits(struct tps65912 *tps65912, u8 reg, u8 mask)
+int tps65912_device_init(struct tps65912 *tps)
{
- u8 data;
- int err;
-
- mutex_lock(&tps65912->io_mutex);
- err = tps65912->read(tps65912, reg, 1, &data);
- if (err) {
- dev_err(tps65912->dev, "Read from reg 0x%x failed\n", reg);
- goto out;
+ int ret;
+
+ ret = regmap_add_irq_chip(tps->regmap, tps->irq, IRQF_ONESHOT, 0,
+ &tps65912_irq_chip, &tps->irq_data);
+ if (ret)
+ return ret;
+
+ ret = mfd_add_devices(tps->dev, PLATFORM_DEVID_AUTO, tps65912_cells,
+ ARRAY_SIZE(tps65912_cells), NULL, 0,
+ regmap_irq_get_domain(tps->irq_data));
+ if (ret) {
+ regmap_del_irq_chip(tps->irq, tps->irq_data);
+ return ret;
}
- data &= ~mask;
- err = tps65912->write(tps65912, reg, 1, &data);
- if (err)
- dev_err(tps65912->dev, "Write to reg 0x%x failed\n", reg);
-
-out:
- mutex_unlock(&tps65912->io_mutex);
- return err;
+ return 0;
}
-EXPORT_SYMBOL_GPL(tps65912_clear_bits);
+EXPORT_SYMBOL_GPL(tps65912_device_init);
-static inline int tps65912_read(struct tps65912 *tps65912, u8 reg)
+int tps65912_device_exit(struct tps65912 *tps)
{
- u8 val;
- int err;
-
- err = tps65912->read(tps65912, reg, 1, &val);
- if (err < 0)
- return err;
-
- return val;
-}
-
-static inline int tps65912_write(struct tps65912 *tps65912, u8 reg, u8 val)
-{
- return tps65912->write(tps65912, reg, 1, &val);
-}
-
-int tps65912_reg_read(struct tps65912 *tps65912, u8 reg)
-{
- int data;
-
- mutex_lock(&tps65912->io_mutex);
+ regmap_del_irq_chip(tps->irq, tps->irq_data);
- data = tps65912_read(tps65912, reg);
- if (data < 0)
- dev_err(tps65912->dev, "Read from reg 0x%x failed\n", reg);
-
- mutex_unlock(&tps65912->io_mutex);
- return data;
-}
-EXPORT_SYMBOL_GPL(tps65912_reg_read);
-
-int tps65912_reg_write(struct tps65912 *tps65912, u8 reg, u8 val)
-{
- int err;
-
- mutex_lock(&tps65912->io_mutex);
-
- err = tps65912_write(tps65912, reg, val);
- if (err < 0)
- dev_err(tps65912->dev, "Write for reg 0x%x failed\n", reg);
-
- mutex_unlock(&tps65912->io_mutex);
- return err;
-}
-EXPORT_SYMBOL_GPL(tps65912_reg_write);
-
-int tps65912_device_init(struct tps65912 *tps65912)
-{
- struct tps65912_board *pmic_plat_data = dev_get_platdata(tps65912->dev);
- struct tps65912_platform_data *init_data;
- int ret, dcdc_avs, value;
-
- init_data = kzalloc(sizeof(struct tps65912_platform_data), GFP_KERNEL);
- if (init_data == NULL)
- return -ENOMEM;
-
- mutex_init(&tps65912->io_mutex);
- dev_set_drvdata(tps65912->dev, tps65912);
-
- dcdc_avs = (pmic_plat_data->is_dcdc1_avs << 0 |
- pmic_plat_data->is_dcdc2_avs << 1 |
- pmic_plat_data->is_dcdc3_avs << 2 |
- pmic_plat_data->is_dcdc4_avs << 3);
- if (dcdc_avs) {
- tps65912->read(tps65912, TPS65912_I2C_SPI_CFG, 1, &value);
- dcdc_avs |= value;
- tps65912->write(tps65912, TPS65912_I2C_SPI_CFG, 1, &dcdc_avs);
- }
-
- ret = mfd_add_devices(tps65912->dev, -1,
- tps65912s, ARRAY_SIZE(tps65912s),
- NULL, 0, NULL);
- if (ret < 0)
- goto err;
-
- init_data->irq = pmic_plat_data->irq;
- init_data->irq_base = pmic_plat_data->irq_base;
- ret = tps65912_irq_init(tps65912, init_data->irq, init_data);
- if (ret < 0)
- goto err;
-
- kfree(init_data);
- return ret;
-
-err:
- kfree(init_data);
- mfd_remove_devices(tps65912->dev);
- return ret;
-}
-
-void tps65912_device_exit(struct tps65912 *tps65912)
-{
- mfd_remove_devices(tps65912->dev);
- tps65912_irq_exit(tps65912);
+ return 0;
}
+EXPORT_SYMBOL_GPL(tps65912_device_exit);
-MODULE_AUTHOR("Margarita Olaya <magi@slimlogic.co.uk>");
-MODULE_DESCRIPTION("TPS65912x chip family multi-function driver");
-MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Andrew F. Davis <afd@ti.com>");
+MODULE_DESCRIPTION("TPS65912x MFD Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mfd/tps65912-i2c.c b/drivers/mfd/tps65912-i2c.c
index 7e55640b3ed5..45871403f995 100644
--- a/drivers/mfd/tps65912-i2c.c
+++ b/drivers/mfd/tps65912-i2c.c
@@ -1,139 +1,79 @@
/*
- * tps65912-i2c.c -- I2C access for TI TPS65912x PMIC
+ * I2C access driver for TI TPS65912x PMICs
*
- * Copyright 2011 Texas Instruments Inc.
+ * Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com/
+ * Andrew F. Davis <afd@ti.com>
*
- * Author: Margarita Olaya Cabrera <magi@slimlogic.co.uk>
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether expressed or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License version 2 for more details.
*
- * This driver is based on wm8350 implementation.
+ * Based on the TPS65218 driver and the previous TPS65912 driver by
+ * Margarita Olaya Cabrera <magi@slimlogic.co.uk>
*/
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/init.h>
-#include <linux/slab.h>
-#include <linux/gpio.h>
#include <linux/i2c.h>
-#include <linux/mfd/core.h>
-#include <linux/mfd/tps65912.h>
-
-static int tps65912_i2c_read(struct tps65912 *tps65912, u8 reg,
- int bytes, void *dest)
-{
- struct i2c_client *i2c = tps65912->control_data;
- struct i2c_msg xfer[2];
- int ret;
-
- /* Write register */
- xfer[0].addr = i2c->addr;
- xfer[0].flags = 0;
- xfer[0].len = 1;
- xfer[0].buf = &reg;
-
- /* Read data */
- xfer[1].addr = i2c->addr;
- xfer[1].flags = I2C_M_RD;
- xfer[1].len = bytes;
- xfer[1].buf = dest;
-
- ret = i2c_transfer(i2c->adapter, xfer, 2);
- if (ret == 2)
- ret = 0;
- else if (ret >= 0)
- ret = -EIO;
- return ret;
-}
-
-static int tps65912_i2c_write(struct tps65912 *tps65912, u8 reg,
- int bytes, void *src)
-{
- struct i2c_client *i2c = tps65912->control_data;
- /* we add 1 byte for device register */
- u8 msg[TPS6591X_MAX_REGISTER + 1];
- int ret;
-
- if (bytes > TPS6591X_MAX_REGISTER)
- return -EINVAL;
-
- msg[0] = reg;
- memcpy(&msg[1], src, bytes);
+#include <linux/module.h>
+#include <linux/regmap.h>
- ret = i2c_master_send(i2c, msg, bytes + 1);
- if (ret < 0)
- return ret;
- if (ret != bytes + 1)
- return -EIO;
+#include <linux/mfd/tps65912.h>
- return 0;
-}
+static const struct of_device_id tps65912_i2c_of_match_table[] = {
+ { .compatible = "ti,tps65912", },
+ { /* sentinel */ }
+};
-static int tps65912_i2c_probe(struct i2c_client *i2c,
- const struct i2c_device_id *id)
+static int tps65912_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *ids)
{
- struct tps65912 *tps65912;
+ struct tps65912 *tps;
- tps65912 = devm_kzalloc(&i2c->dev,
- sizeof(struct tps65912), GFP_KERNEL);
- if (tps65912 == NULL)
+ tps = devm_kzalloc(&client->dev, sizeof(*tps), GFP_KERNEL);
+ if (!tps)
return -ENOMEM;
- i2c_set_clientdata(i2c, tps65912);
- tps65912->dev = &i2c->dev;
- tps65912->control_data = i2c;
- tps65912->read = tps65912_i2c_read;
- tps65912->write = tps65912_i2c_write;
+ i2c_set_clientdata(client, tps);
+ tps->dev = &client->dev;
+ tps->irq = client->irq;
+
+ tps->regmap = devm_regmap_init_i2c(client, &tps65912_regmap_config);
+ if (IS_ERR(tps->regmap)) {
+ dev_err(tps->dev, "Failed to initialize register map\n");
+ return PTR_ERR(tps->regmap);
+ }
- return tps65912_device_init(tps65912);
+ return tps65912_device_init(tps);
}
-static int tps65912_i2c_remove(struct i2c_client *i2c)
+static int tps65912_i2c_remove(struct i2c_client *client)
{
- struct tps65912 *tps65912 = i2c_get_clientdata(i2c);
+ struct tps65912 *tps = i2c_get_clientdata(client);
- tps65912_device_exit(tps65912);
-
- return 0;
+ return tps65912_device_exit(tps);
}
-static const struct i2c_device_id tps65912_i2c_id[] = {
- {"tps65912", 0 },
- { }
+static const struct i2c_device_id tps65912_i2c_id_table[] = {
+ { "tps65912", 0 },
+ { /* sentinel */ }
};
-MODULE_DEVICE_TABLE(i2c, tps65912_i2c_id);
+MODULE_DEVICE_TABLE(i2c, tps65912_i2c_id_table);
static struct i2c_driver tps65912_i2c_driver = {
- .driver = {
- .name = "tps65912",
+ .driver = {
+ .name = "tps65912",
+ .of_match_table = tps65912_i2c_of_match_table,
},
- .probe = tps65912_i2c_probe,
- .remove = tps65912_i2c_remove,
- .id_table = tps65912_i2c_id,
+ .probe = tps65912_i2c_probe,
+ .remove = tps65912_i2c_remove,
+ .id_table = tps65912_i2c_id_table,
};
+module_i2c_driver(tps65912_i2c_driver);
-static int __init tps65912_i2c_init(void)
-{
- int ret;
-
- ret = i2c_add_driver(&tps65912_i2c_driver);
- if (ret != 0)
- pr_err("Failed to register TPS65912 I2C driver: %d\n", ret);
-
- return ret;
-}
-/* init early so consumer devices can complete system boot */
-subsys_initcall(tps65912_i2c_init);
-
-static void __exit tps65912_i2c_exit(void)
-{
- i2c_del_driver(&tps65912_i2c_driver);
-}
-module_exit(tps65912_i2c_exit);
-
-MODULE_AUTHOR("Margarita Olaya <magi@slimlogic.co.uk>");
-MODULE_DESCRIPTION("TPS6591x chip family multi-function driver");
-MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Andrew F. Davis <afd@ti.com>");
+MODULE_DESCRIPTION("TPS65912x I2C Interface Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mfd/tps65912-irq.c b/drivers/mfd/tps65912-irq.c
deleted file mode 100644
index db2c29cb709b..000000000000
--- a/drivers/mfd/tps65912-irq.c
+++ /dev/null
@@ -1,217 +0,0 @@
-/*
- * tps65912-irq.c -- TI TPS6591x
- *
- * Copyright 2011 Texas Instruments Inc.
- *
- * Author: Margarita Olaya <magi@slimlogic.co.uk>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * This driver is based on wm8350 implementation.
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/bug.h>
-#include <linux/device.h>
-#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/gpio.h>
-#include <linux/mfd/tps65912.h>
-
-static inline int irq_to_tps65912_irq(struct tps65912 *tps65912,
- int irq)
-{
- return irq - tps65912->irq_base;
-}
-
-/*
- * This is a threaded IRQ handler so can access I2C/SPI. Since the
- * IRQ handler explicitly clears the IRQ it handles the IRQ line
- * will be reasserted and the physical IRQ will be handled again if
- * another interrupt is asserted while we run - in the normal course
- * of events this is a rare occurrence so we save I2C/SPI reads. We're
- * also assuming that it's rare to get lots of interrupts firing
- * simultaneously so try to minimise I/O.
- */
-static irqreturn_t tps65912_irq(int irq, void *irq_data)
-{
- struct tps65912 *tps65912 = irq_data;
- u32 irq_sts;
- u32 irq_mask;
- u8 reg;
- int i;
-
-
- tps65912->read(tps65912, TPS65912_INT_STS, 1, &reg);
- irq_sts = reg;
- tps65912->read(tps65912, TPS65912_INT_STS2, 1, &reg);
- irq_sts |= reg << 8;
- tps65912->read(tps65912, TPS65912_INT_STS3, 1, &reg);
- irq_sts |= reg << 16;
- tps65912->read(tps65912, TPS65912_INT_STS4, 1, &reg);
- irq_sts |= reg << 24;
-
- tps65912->read(tps65912, TPS65912_INT_MSK, 1, &reg);
- irq_mask = reg;
- tps65912->read(tps65912, TPS65912_INT_MSK2, 1, &reg);
- irq_mask |= reg << 8;
- tps65912->read(tps65912, TPS65912_INT_MSK3, 1, &reg);
- irq_mask |= reg << 16;
- tps65912->read(tps65912, TPS65912_INT_MSK4, 1, &reg);
- irq_mask |= reg << 24;
-
- irq_sts &= ~irq_mask;
- if (!irq_sts)
- return IRQ_NONE;
-
- for (i = 0; i < tps65912->irq_num; i++) {
- if (!(irq_sts & (1 << i)))
- continue;
-
- handle_nested_irq(tps65912->irq_base + i);
- }
-
- /* Write the STS register back to clear IRQs we handled */
- reg = irq_sts & 0xFF;
- irq_sts >>= 8;
- if (reg)
- tps65912->write(tps65912, TPS65912_INT_STS, 1, &reg);
- reg = irq_sts & 0xFF;
- irq_sts >>= 8;
- if (reg)
- tps65912->write(tps65912, TPS65912_INT_STS2, 1, &reg);
- reg = irq_sts & 0xFF;
- irq_sts >>= 8;
- if (reg)
- tps65912->write(tps65912, TPS65912_INT_STS3, 1, &reg);
- reg = irq_sts & 0xFF;
- if (reg)
- tps65912->write(tps65912, TPS65912_INT_STS4, 1, &reg);
-
- return IRQ_HANDLED;
-}
-
-static void tps65912_irq_lock(struct irq_data *data)
-{
- struct tps65912 *tps65912 = irq_data_get_irq_chip_data(data);
-
- mutex_lock(&tps65912->irq_lock);
-}
-
-static void tps65912_irq_sync_unlock(struct irq_data *data)
-{
- struct tps65912 *tps65912 = irq_data_get_irq_chip_data(data);
- u32 reg_mask;
- u8 reg;
-
- tps65912->read(tps65912, TPS65912_INT_MSK, 1, &reg);
- reg_mask = reg;
- tps65912->read(tps65912, TPS65912_INT_MSK2, 1, &reg);
- reg_mask |= reg << 8;
- tps65912->read(tps65912, TPS65912_INT_MSK3, 1, &reg);
- reg_mask |= reg << 16;
- tps65912->read(tps65912, TPS65912_INT_MSK4, 1, &reg);
- reg_mask |= reg << 24;
-
- if (tps65912->irq_mask != reg_mask) {
- reg = tps65912->irq_mask & 0xFF;
- tps65912->write(tps65912, TPS65912_INT_MSK, 1, &reg);
- reg = tps65912->irq_mask >> 8 & 0xFF;
- tps65912->write(tps65912, TPS65912_INT_MSK2, 1, &reg);
- reg = tps65912->irq_mask >> 16 & 0xFF;
- tps65912->write(tps65912, TPS65912_INT_MSK3, 1, &reg);
- reg = tps65912->irq_mask >> 24 & 0xFF;
- tps65912->write(tps65912, TPS65912_INT_MSK4, 1, &reg);
- }
-
- mutex_unlock(&tps65912->irq_lock);
-}
-
-static void tps65912_irq_enable(struct irq_data *data)
-{
- struct tps65912 *tps65912 = irq_data_get_irq_chip_data(data);
-
- tps65912->irq_mask &= ~(1 << irq_to_tps65912_irq(tps65912, data->irq));
-}
-
-static void tps65912_irq_disable(struct irq_data *data)
-{
- struct tps65912 *tps65912 = irq_data_get_irq_chip_data(data);
-
- tps65912->irq_mask |= (1 << irq_to_tps65912_irq(tps65912, data->irq));
-}
-
-static struct irq_chip tps65912_irq_chip = {
- .name = "tps65912",
- .irq_bus_lock = tps65912_irq_lock,
- .irq_bus_sync_unlock = tps65912_irq_sync_unlock,
- .irq_disable = tps65912_irq_disable,
- .irq_enable = tps65912_irq_enable,
-};
-
-int tps65912_irq_init(struct tps65912 *tps65912, int irq,
- struct tps65912_platform_data *pdata)
-{
- int ret, cur_irq;
- int flags = IRQF_ONESHOT;
- u8 reg;
-
- if (!irq) {
- dev_warn(tps65912->dev, "No interrupt support, no core IRQ\n");
- return 0;
- }
-
- if (!pdata || !pdata->irq_base) {
- dev_warn(tps65912->dev, "No interrupt support, no IRQ base\n");
- return 0;
- }
-
- /* Clear unattended interrupts */
- tps65912->read(tps65912, TPS65912_INT_STS, 1, &reg);
- tps65912->write(tps65912, TPS65912_INT_STS, 1, &reg);
- tps65912->read(tps65912, TPS65912_INT_STS2, 1, &reg);
- tps65912->write(tps65912, TPS65912_INT_STS2, 1, &reg);
- tps65912->read(tps65912, TPS65912_INT_STS3, 1, &reg);
- tps65912->write(tps65912, TPS65912_INT_STS3, 1, &reg);
- tps65912->read(tps65912, TPS65912_INT_STS4, 1, &reg);
- tps65912->write(tps65912, TPS65912_INT_STS4, 1, &reg);
-
- /* Mask top level interrupts */
- tps65912->irq_mask = 0xFFFFFFFF;
-
- mutex_init(&tps65912->irq_lock);
- tps65912->chip_irq = irq;
- tps65912->irq_base = pdata->irq_base;
-
- tps65912->irq_num = TPS65912_NUM_IRQ;
-
- /* Register with genirq */
- for (cur_irq = tps65912->irq_base;
- cur_irq < tps65912->irq_num + tps65912->irq_base;
- cur_irq++) {
- irq_set_chip_data(cur_irq, tps65912);
- irq_set_chip_and_handler(cur_irq, &tps65912_irq_chip,
- handle_edge_irq);
- irq_set_nested_thread(cur_irq, 1);
- irq_clear_status_flags(cur_irq, IRQ_NOREQUEST | IRQ_NOPROBE);
- }
-
- ret = request_threaded_irq(irq, NULL, tps65912_irq, flags,
- "tps65912", tps65912);
-
- irq_set_irq_type(irq, IRQ_TYPE_LEVEL_LOW);
- if (ret != 0)
- dev_err(tps65912->dev, "Failed to request IRQ: %d\n", ret);
-
- return ret;
-}
-
-int tps65912_irq_exit(struct tps65912 *tps65912)
-{
- free_irq(tps65912->chip_irq, tps65912);
- return 0;
-}
diff --git a/drivers/mfd/tps65912-spi.c b/drivers/mfd/tps65912-spi.c
index d59aa55b1495..4aeba9b6942a 100644
--- a/drivers/mfd/tps65912-spi.c
+++ b/drivers/mfd/tps65912-spi.c
@@ -1,140 +1,78 @@
/*
- * tps65912-spi.c -- SPI access for TI TPS65912x PMIC
+ * SPI access driver for TI TPS65912x PMICs
*
- * Copyright 2011 Texas Instruments Inc.
+ * Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com/
+ * Andrew F. Davis <afd@ti.com>
*
- * Author: Margarita Olaya Cabrera <magi@slimlogic.co.uk>
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether expressed or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License version 2 for more details.
*
- * This driver is based on wm8350 implementation.
+ * Based on the TPS65218 driver and the previous TPS65912 driver by
+ * Margarita Olaya Cabrera <magi@slimlogic.co.uk>
*/
#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/init.h>
-#include <linux/slab.h>
-#include <linux/gpio.h>
+#include <linux/regmap.h>
#include <linux/spi/spi.h>
-#include <linux/mfd/core.h>
-#include <linux/mfd/tps65912.h>
-
-static int tps65912_spi_write(struct tps65912 *tps65912, u8 addr,
- int bytes, void *src)
-{
- struct spi_device *spi = tps65912->control_data;
- u8 *data = (u8 *) src;
- int ret;
- /* bit 23 is the read/write bit */
- unsigned long spi_data = 1 << 23 | addr << 15 | *data;
- struct spi_transfer xfer;
- struct spi_message msg;
- u32 tx_buf;
-
- tx_buf = spi_data;
-
- xfer.tx_buf = &tx_buf;
- xfer.rx_buf = NULL;
- xfer.len = sizeof(unsigned long);
- xfer.bits_per_word = 24;
-
- spi_message_init(&msg);
- spi_message_add_tail(&xfer, &msg);
-
- ret = spi_sync(spi, &msg);
- return ret;
-}
-
-static int tps65912_spi_read(struct tps65912 *tps65912, u8 addr,
- int bytes, void *dest)
-{
- struct spi_device *spi = tps65912->control_data;
- /* bit 23 is the read/write bit */
- unsigned long spi_data = 0 << 23 | addr << 15;
- struct spi_transfer xfer;
- struct spi_message msg;
- int ret;
- u8 *data = (u8 *) dest;
- u32 tx_buf, rx_buf;
-
- tx_buf = spi_data;
- rx_buf = 0;
- xfer.tx_buf = &tx_buf;
- xfer.rx_buf = &rx_buf;
- xfer.len = sizeof(unsigned long);
- xfer.bits_per_word = 24;
-
- spi_message_init(&msg);
- spi_message_add_tail(&xfer, &msg);
-
- if (spi == NULL)
- return 0;
+#include <linux/mfd/tps65912.h>
- ret = spi_sync(spi, &msg);
- if (ret == 0)
- *data = (u8) (rx_buf & 0xFF);
- return ret;
-}
+static const struct of_device_id tps65912_spi_of_match_table[] = {
+ { .compatible = "ti,tps65912", },
+ { /* sentinel */ }
+};
static int tps65912_spi_probe(struct spi_device *spi)
{
- struct tps65912 *tps65912;
+ struct tps65912 *tps;
- tps65912 = devm_kzalloc(&spi->dev,
- sizeof(struct tps65912), GFP_KERNEL);
- if (tps65912 == NULL)
+ tps = devm_kzalloc(&spi->dev, sizeof(*tps), GFP_KERNEL);
+ if (!tps)
return -ENOMEM;
- tps65912->dev = &spi->dev;
- tps65912->control_data = spi;
- tps65912->read = tps65912_spi_read;
- tps65912->write = tps65912_spi_write;
+ spi_set_drvdata(spi, tps);
+ tps->dev = &spi->dev;
+ tps->irq = spi->irq;
- spi_set_drvdata(spi, tps65912);
+ tps->regmap = devm_regmap_init_spi(spi, &tps65912_regmap_config);
+ if (IS_ERR(tps->regmap)) {
+ dev_err(tps->dev, "Failed to initialize register map\n");
+ return PTR_ERR(tps->regmap);
+ }
- return tps65912_device_init(tps65912);
+ return tps65912_device_init(tps);
}
-static int tps65912_spi_remove(struct spi_device *spi)
+static int tps65912_spi_remove(struct spi_device *client)
{
- struct tps65912 *tps65912 = spi_get_drvdata(spi);
+ struct tps65912 *tps = spi_get_drvdata(client);
- tps65912_device_exit(tps65912);
-
- return 0;
+ return tps65912_device_exit(tps);
}
+static const struct spi_device_id tps65912_spi_id_table[] = {
+ { "tps65912", 0 },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(spi, tps65912_spi_id_table);
+
static struct spi_driver tps65912_spi_driver = {
- .driver = {
- .name = "tps65912",
+ .driver = {
+ .name = "tps65912",
+ .of_match_table = tps65912_spi_of_match_table,
},
- .probe = tps65912_spi_probe,
- .remove = tps65912_spi_remove,
+ .probe = tps65912_spi_probe,
+ .remove = tps65912_spi_remove,
+ .id_table = tps65912_spi_id_table,
};
+module_spi_driver(tps65912_spi_driver);
-static int __init tps65912_spi_init(void)
-{
- int ret;
-
- ret = spi_register_driver(&tps65912_spi_driver);
- if (ret != 0)
- pr_err("Failed to register TPS65912 SPI driver: %d\n", ret);
-
- return 0;
-}
-/* init early so consumer devices can complete system boot */
-subsys_initcall(tps65912_spi_init);
-
-static void __exit tps65912_spi_exit(void)
-{
- spi_unregister_driver(&tps65912_spi_driver);
-}
-module_exit(tps65912_spi_exit);
-
-MODULE_AUTHOR("Margarita Olaya <magi@slimlogic.co.uk>");
-MODULE_DESCRIPTION("SPI support for TPS65912 chip family mfd");
-MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Andrew F. Davis <afd@ti.com>");
+MODULE_DESCRIPTION("TPS65912x SPI Interface Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mfd/wm5102-tables.c b/drivers/mfd/wm5102-tables.c
index 0386eaf6be32..ab8b23b5bd22 100644
--- a/drivers/mfd/wm5102-tables.c
+++ b/drivers/mfd/wm5102-tables.c
@@ -297,7 +297,6 @@ static const struct reg_default wm5102_reg_default[] = {
{ 0x00000174, 0x007D }, /* R372 - FLL1 Control 4 */
{ 0x00000175, 0x0004 }, /* R373 - FLL1 Control 5 */
{ 0x00000176, 0x0000 }, /* R374 - FLL1 Control 6 */
- { 0x00000177, 0x0181 }, /* R375 - FLL1 Loop Filter Test 1 */
{ 0x00000179, 0x0000 }, /* R377 - FLL1 Control 7 */
{ 0x00000181, 0x0000 }, /* R385 - FLL1 Synchroniser 1 */
{ 0x00000182, 0x0000 }, /* R386 - FLL1 Synchroniser 2 */
@@ -314,7 +313,6 @@ static const struct reg_default wm5102_reg_default[] = {
{ 0x00000194, 0x007D }, /* R404 - FLL2 Control 4 */
{ 0x00000195, 0x0004 }, /* R405 - FLL2 Control 5 */
{ 0x00000196, 0x0000 }, /* R406 - FLL2 Control 6 */
- { 0x00000197, 0x0000 }, /* R407 - FLL2 Loop Filter Test 1 */
{ 0x00000199, 0x0000 }, /* R409 - FLL2 Control 7 */
{ 0x000001A1, 0x0000 }, /* R417 - FLL2 Synchroniser 1 */
{ 0x000001A2, 0x0000 }, /* R418 - FLL2 Synchroniser 2 */
@@ -338,7 +336,7 @@ static const struct reg_default wm5102_reg_default[] = {
{ 0x000002A3, 0x1102 }, /* R675 - Mic Detect 1 */
{ 0x000002A4, 0x009F }, /* R676 - Mic Detect 2 */
{ 0x000002A6, 0x3737 }, /* R678 - Mic Detect Level 1 */
- { 0x000002A7, 0x372C }, /* R679 - Mic Detect Level 2 */
+ { 0x000002A7, 0x2C37 }, /* R679 - Mic Detect Level 2 */
{ 0x000002A8, 0x1422 }, /* R680 - Mic Detect Level 3 */
{ 0x000002A9, 0x030A }, /* R681 - Mic Detect Level 4 */
{ 0x000002C3, 0x0000 }, /* R707 - Mic noise mix control 1 */
@@ -402,7 +400,7 @@ static const struct reg_default wm5102_reg_default[] = {
{ 0x00000435, 0x0180 }, /* R1077 - DAC Digital Volume 5R */
{ 0x00000436, 0x0081 }, /* R1078 - DAC Volume Limit 5R */
{ 0x00000437, 0x0200 }, /* R1079 - Noise Gate Select 5R */
- { 0x00000440, 0x8FFF }, /* R1088 - DRE Enable */
+ { 0x00000440, 0x0FFF }, /* R1088 - DRE Enable */
{ 0x00000442, 0x3F0A }, /* R1090 - DRE Control 2 */
{ 0x00000443, 0xDC1F }, /* R1090 - DRE Control 3 */
{ 0x00000450, 0x0000 }, /* R1104 - DAC AEC Control 1 */
@@ -863,7 +861,7 @@ static const struct reg_default wm5102_reg_default[] = {
{ 0x00000C0F, 0x0400 }, /* R3087 - IRQ CTRL 1 */
{ 0x00000C10, 0x1000 }, /* R3088 - GPIO Debounce Config */
{ 0x00000C20, 0x8002 }, /* R3104 - Misc Pad Ctrl 1 */
- { 0x00000C21, 0x8001 }, /* R3105 - Misc Pad Ctrl 2 */
+ { 0x00000C21, 0x0001 }, /* R3105 - Misc Pad Ctrl 2 */
{ 0x00000C22, 0x0000 }, /* R3106 - Misc Pad Ctrl 3 */
{ 0x00000C23, 0x0000 }, /* R3107 - Misc Pad Ctrl 4 */
{ 0x00000C24, 0x0000 }, /* R3108 - Misc Pad Ctrl 5 */
@@ -984,7 +982,7 @@ static const struct reg_default wm5102_reg_default[] = {
{ 0x00000ECD, 0x0000 }, /* R3789 - HPLPF4_2 */
{ 0x00000EE0, 0x0000 }, /* R3808 - ASRC_ENABLE */
{ 0x00000EE2, 0x0000 }, /* R3810 - ASRC_RATE1 */
- { 0x00000EE3, 0x0400 }, /* R3811 - ASRC_RATE2 */
+ { 0x00000EE3, 0x4000 }, /* R3811 - ASRC_RATE2 */
{ 0x00000EF0, 0x0000 }, /* R3824 - ISRC 1 CTRL 1 */
{ 0x00000EF1, 0x0000 }, /* R3825 - ISRC 1 CTRL 2 */
{ 0x00000EF2, 0x0000 }, /* R3826 - ISRC 1 CTRL 3 */
@@ -1062,8 +1060,6 @@ static bool wm5102_readable_register(struct device *dev, unsigned int reg)
case ARIZONA_FLL1_CONTROL_4:
case ARIZONA_FLL1_CONTROL_5:
case ARIZONA_FLL1_CONTROL_6:
- case ARIZONA_FLL1_LOOP_FILTER_TEST_1:
- case ARIZONA_FLL1_NCO_TEST_0:
case ARIZONA_FLL1_CONTROL_7:
case ARIZONA_FLL1_SYNCHRONISER_1:
case ARIZONA_FLL1_SYNCHRONISER_2:
@@ -1080,8 +1076,6 @@ static bool wm5102_readable_register(struct device *dev, unsigned int reg)
case ARIZONA_FLL2_CONTROL_4:
case ARIZONA_FLL2_CONTROL_5:
case ARIZONA_FLL2_CONTROL_6:
- case ARIZONA_FLL2_LOOP_FILTER_TEST_1:
- case ARIZONA_FLL2_NCO_TEST_0:
case ARIZONA_FLL2_CONTROL_7:
case ARIZONA_FLL2_SYNCHRONISER_1:
case ARIZONA_FLL2_SYNCHRONISER_2:
@@ -1849,8 +1843,6 @@ static bool wm5102_volatile_register(struct device *dev, unsigned int reg)
case ARIZONA_HAPTICS_STATUS:
case ARIZONA_ASYNC_SAMPLE_RATE_1_STATUS:
case ARIZONA_ASYNC_SAMPLE_RATE_2_STATUS:
- case ARIZONA_FLL1_NCO_TEST_0:
- case ARIZONA_FLL2_NCO_TEST_0:
case ARIZONA_DAC_COMP_1:
case ARIZONA_DAC_COMP_2:
case ARIZONA_DAC_COMP_3:
diff --git a/drivers/mfd/wm5110-tables.c b/drivers/mfd/wm5110-tables.c
index c18e11f42b3f..8e74e71507e7 100644
--- a/drivers/mfd/wm5110-tables.c
+++ b/drivers/mfd/wm5110-tables.c
@@ -676,8 +676,8 @@ static const struct reg_default wm5110_reg_default[] = {
{ 0x00000008, 0x0019 }, /* R8 - Ctrl IF SPI CFG 1 */
{ 0x00000009, 0x0001 }, /* R9 - Ctrl IF I2C1 CFG 1 */
{ 0x0000000A, 0x0001 }, /* R10 - Ctrl IF I2C2 CFG 1 */
- { 0x0000000B, 0x0036 }, /* R11 - Ctrl IF I2C1 CFG 2 */
- { 0x0000000C, 0x0036 }, /* R12 - Ctrl IF I2C2 CFG 2 */
+ { 0x0000000B, 0x001A }, /* R11 - Ctrl IF I2C1 CFG 2 */
+ { 0x0000000C, 0x001A }, /* R12 - Ctrl IF I2C2 CFG 2 */
{ 0x00000020, 0x0000 }, /* R32 - Tone Generator 1 */
{ 0x00000021, 0x1000 }, /* R33 - Tone Generator 2 */
{ 0x00000022, 0x0000 }, /* R34 - Tone Generator 3 */
@@ -723,14 +723,12 @@ static const struct reg_default wm5110_reg_default[] = {
{ 0x00000154, 0x0000 }, /* R340 - Rate Estimator 3 */
{ 0x00000155, 0x0000 }, /* R341 - Rate Estimator 4 */
{ 0x00000156, 0x0000 }, /* R342 - Rate Estimator 5 */
- { 0x00000171, 0x0000 }, /* R369 - FLL1 Control 1 */
+ { 0x00000171, 0x0002 }, /* R369 - FLL1 Control 1 */
{ 0x00000172, 0x0008 }, /* R370 - FLL1 Control 2 */
{ 0x00000173, 0x0018 }, /* R371 - FLL1 Control 3 */
{ 0x00000174, 0x007D }, /* R372 - FLL1 Control 4 */
{ 0x00000175, 0x0006 }, /* R373 - FLL1 Control 5 */
{ 0x00000176, 0x0000 }, /* R374 - FLL1 Control 6 */
- { 0x00000177, 0x0281 }, /* R375 - FLL1 Loop Filter Test 1 */
- { 0x00000178, 0x0000 }, /* R376 - FLL1 NCO Test 0 */
{ 0x00000179, 0x0000 }, /* R376 - FLL1 Control 7 */
{ 0x00000181, 0x0000 }, /* R385 - FLL1 Synchroniser 1 */
{ 0x00000182, 0x0000 }, /* R386 - FLL1 Synchroniser 2 */
@@ -740,15 +738,13 @@ static const struct reg_default wm5110_reg_default[] = {
{ 0x00000186, 0x0000 }, /* R390 - FLL1 Synchroniser 6 */
{ 0x00000187, 0x0001 }, /* R390 - FLL1 Synchroniser 7 */
{ 0x00000189, 0x0000 }, /* R393 - FLL1 Spread Spectrum */
- { 0x0000018A, 0x0004 }, /* R394 - FLL1 GPIO Clock */
- { 0x00000191, 0x0000 }, /* R401 - FLL2 Control 1 */
+ { 0x0000018A, 0x000C }, /* R394 - FLL1 GPIO Clock */
+ { 0x00000191, 0x0002 }, /* R401 - FLL2 Control 1 */
{ 0x00000192, 0x0008 }, /* R402 - FLL2 Control 2 */
{ 0x00000193, 0x0018 }, /* R403 - FLL2 Control 3 */
{ 0x00000194, 0x007D }, /* R404 - FLL2 Control 4 */
{ 0x00000195, 0x000C }, /* R405 - FLL2 Control 5 */
{ 0x00000196, 0x0000 }, /* R406 - FLL2 Control 6 */
- { 0x00000197, 0x0000 }, /* R407 - FLL2 Loop Filter Test 1 */
- { 0x00000198, 0x0000 }, /* R408 - FLL2 NCO Test 0 */
{ 0x00000199, 0x0000 }, /* R408 - FLL2 Control 7 */
{ 0x000001A1, 0x0000 }, /* R417 - FLL2 Synchroniser 1 */
{ 0x000001A2, 0x0000 }, /* R418 - FLL2 Synchroniser 2 */
@@ -758,7 +754,7 @@ static const struct reg_default wm5110_reg_default[] = {
{ 0x000001A6, 0x0000 }, /* R422 - FLL2 Synchroniser 6 */
{ 0x000001A7, 0x0001 }, /* R422 - FLL2 Synchroniser 7 */
{ 0x000001A9, 0x0000 }, /* R425 - FLL2 Spread Spectrum */
- { 0x000001AA, 0x0004 }, /* R426 - FLL2 GPIO Clock */
+ { 0x000001AA, 0x000C }, /* R426 - FLL2 GPIO Clock */
{ 0x00000200, 0x0006 }, /* R512 - Mic Charge Pump 1 */
{ 0x00000210, 0x0184 }, /* R528 - LDO1 Control 1 */
{ 0x00000213, 0x03E4 }, /* R531 - LDO2 Control 1 */
@@ -771,9 +767,9 @@ static const struct reg_default wm5110_reg_default[] = {
{ 0x000002A3, 0x1102 }, /* R675 - Mic Detect 1 */
{ 0x000002A4, 0x009F }, /* R676 - Mic Detect 2 */
{ 0x000002A6, 0x3737 }, /* R678 - Mic Detect Level 1 */
- { 0x000002A7, 0x372C }, /* R679 - Mic Detect Level 2 */
+ { 0x000002A7, 0x2C37 }, /* R679 - Mic Detect Level 2 */
{ 0x000002A8, 0x1422 }, /* R680 - Mic Detect Level 3 */
- { 0x000002A9, 0x300A }, /* R681 - Mic Detect Level 4 */
+ { 0x000002A9, 0x030A }, /* R681 - Mic Detect Level 4 */
{ 0x000002C3, 0x0000 }, /* R707 - Mic noise mix control 1 */
{ 0x000002CB, 0x0000 }, /* R715 - Isolation control */
{ 0x000002D3, 0x0000 }, /* R723 - Jack detect analogue */
@@ -810,53 +806,53 @@ static const struct reg_default wm5110_reg_default[] = {
{ 0x00000409, 0x0022 }, /* R1033 - Output Volume Ramp */
{ 0x00000410, 0x0080 }, /* R1040 - Output Path Config 1L */
{ 0x00000411, 0x0180 }, /* R1041 - DAC Digital Volume 1L */
- { 0x00000412, 0x0080 }, /* R1042 - DAC Volume Limit 1L */
+ { 0x00000412, 0x0081 }, /* R1042 - DAC Volume Limit 1L */
{ 0x00000413, 0x0001 }, /* R1043 - Noise Gate Select 1L */
{ 0x00000414, 0x0080 }, /* R1044 - Output Path Config 1R */
{ 0x00000415, 0x0180 }, /* R1045 - DAC Digital Volume 1R */
- { 0x00000416, 0x0080 }, /* R1046 - DAC Volume Limit 1R */
+ { 0x00000416, 0x0081 }, /* R1046 - DAC Volume Limit 1R */
{ 0x00000417, 0x0002 }, /* R1047 - Noise Gate Select 1R */
{ 0x00000418, 0x0080 }, /* R1048 - Output Path Config 2L */
{ 0x00000419, 0x0180 }, /* R1049 - DAC Digital Volume 2L */
- { 0x0000041A, 0x0080 }, /* R1050 - DAC Volume Limit 2L */
+ { 0x0000041A, 0x0081 }, /* R1050 - DAC Volume Limit 2L */
{ 0x0000041B, 0x0004 }, /* R1051 - Noise Gate Select 2L */
{ 0x0000041C, 0x0080 }, /* R1052 - Output Path Config 2R */
{ 0x0000041D, 0x0180 }, /* R1053 - DAC Digital Volume 2R */
- { 0x0000041E, 0x0080 }, /* R1054 - DAC Volume Limit 2R */
+ { 0x0000041E, 0x0081 }, /* R1054 - DAC Volume Limit 2R */
{ 0x0000041F, 0x0008 }, /* R1055 - Noise Gate Select 2R */
{ 0x00000420, 0x0080 }, /* R1056 - Output Path Config 3L */
{ 0x00000421, 0x0180 }, /* R1057 - DAC Digital Volume 3L */
- { 0x00000422, 0x0080 }, /* R1058 - DAC Volume Limit 3L */
+ { 0x00000422, 0x0081 }, /* R1058 - DAC Volume Limit 3L */
{ 0x00000423, 0x0010 }, /* R1059 - Noise Gate Select 3L */
{ 0x00000424, 0x0080 }, /* R1060 - Output Path Config 3R */
{ 0x00000425, 0x0180 }, /* R1061 - DAC Digital Volume 3R */
- { 0x00000426, 0x0080 }, /* R1062 - DAC Volume Limit 3R */
+ { 0x00000426, 0x0081 }, /* R1062 - DAC Volume Limit 3R */
{ 0x00000427, 0x0020 }, /* R1063 - Noise Gate Select 3R */
{ 0x00000428, 0x0000 }, /* R1064 - Output Path Config 4L */
{ 0x00000429, 0x0180 }, /* R1065 - DAC Digital Volume 4L */
- { 0x0000042A, 0x0080 }, /* R1066 - Out Volume 4L */
+ { 0x0000042A, 0x0081 }, /* R1066 - Out Volume 4L */
{ 0x0000042B, 0x0040 }, /* R1067 - Noise Gate Select 4L */
{ 0x0000042C, 0x0000 }, /* R1068 - Output Path Config 4R */
{ 0x0000042D, 0x0180 }, /* R1069 - DAC Digital Volume 4R */
- { 0x0000042E, 0x0080 }, /* R1070 - Out Volume 4R */
+ { 0x0000042E, 0x0081 }, /* R1070 - Out Volume 4R */
{ 0x0000042F, 0x0080 }, /* R1071 - Noise Gate Select 4R */
{ 0x00000430, 0x0000 }, /* R1072 - Output Path Config 5L */
{ 0x00000431, 0x0180 }, /* R1073 - DAC Digital Volume 5L */
- { 0x00000432, 0x0080 }, /* R1074 - DAC Volume Limit 5L */
+ { 0x00000432, 0x0081 }, /* R1074 - DAC Volume Limit 5L */
{ 0x00000433, 0x0100 }, /* R1075 - Noise Gate Select 5L */
{ 0x00000434, 0x0000 }, /* R1076 - Output Path Config 5R */
{ 0x00000435, 0x0180 }, /* R1077 - DAC Digital Volume 5R */
- { 0x00000436, 0x0080 }, /* R1078 - DAC Volume Limit 5R */
+ { 0x00000436, 0x0081 }, /* R1078 - DAC Volume Limit 5R */
{ 0x00000437, 0x0200 }, /* R1079 - Noise Gate Select 5R */
{ 0x00000438, 0x0000 }, /* R1080 - Output Path Config 6L */
{ 0x00000439, 0x0180 }, /* R1081 - DAC Digital Volume 6L */
- { 0x0000043A, 0x0080 }, /* R1082 - DAC Volume Limit 6L */
+ { 0x0000043A, 0x0081 }, /* R1082 - DAC Volume Limit 6L */
{ 0x0000043B, 0x0400 }, /* R1083 - Noise Gate Select 6L */
{ 0x0000043C, 0x0000 }, /* R1084 - Output Path Config 6R */
{ 0x0000043D, 0x0180 }, /* R1085 - DAC Digital Volume 6R */
- { 0x0000043E, 0x0080 }, /* R1086 - DAC Volume Limit 6R */
+ { 0x0000043E, 0x0081 }, /* R1086 - DAC Volume Limit 6R */
{ 0x0000043F, 0x0800 }, /* R1087 - Noise Gate Select 6R */
- { 0x00000440, 0x8FFF }, /* R1088 - DRE Enable */
+ { 0x00000440, 0x003F }, /* R1088 - DRE Enable */
{ 0x00000450, 0x0000 }, /* R1104 - DAC AEC Control 1 */
{ 0x00000458, 0x0000 }, /* R1112 - Noise Gate Control */
{ 0x00000490, 0x0069 }, /* R1168 - PDM SPK1 CTRL 1 */
@@ -864,8 +860,8 @@ static const struct reg_default wm5110_reg_default[] = {
{ 0x00000492, 0x0069 }, /* R1170 - PDM SPK2 CTRL 1 */
{ 0x00000493, 0x0000 }, /* R1171 - PDM SPK2 CTRL 2 */
{ 0x000004A0, 0x3480 }, /* R1184 - HP1 Short Circuit Ctrl */
- { 0x000004A1, 0x3480 }, /* R1185 - HP2 Short Circuit Ctrl */
- { 0x000004A2, 0x3480 }, /* R1186 - HP3 Short Circuit Ctrl */
+ { 0x000004A1, 0x3400 }, /* R1185 - HP2 Short Circuit Ctrl */
+ { 0x000004A2, 0x3400 }, /* R1186 - HP3 Short Circuit Ctrl */
{ 0x00000500, 0x000C }, /* R1280 - AIF1 BCLK Ctrl */
{ 0x00000501, 0x0008 }, /* R1281 - AIF1 Tx Pin Ctrl */
{ 0x00000502, 0x0000 }, /* R1282 - AIF1 Rx Pin Ctrl */
@@ -1483,23 +1479,23 @@ static const struct reg_default wm5110_reg_default[] = {
{ 0x00000C10, 0x1000 }, /* R3088 - GPIO Debounce Config */
{ 0x00000C18, 0x0000 }, /* R3096 - GP Switch 1 */
{ 0x00000C20, 0x8002 }, /* R3104 - Misc Pad Ctrl 1 */
- { 0x00000C21, 0x8001 }, /* R3105 - Misc Pad Ctrl 2 */
+ { 0x00000C21, 0x0001 }, /* R3105 - Misc Pad Ctrl 2 */
{ 0x00000C22, 0x0000 }, /* R3106 - Misc Pad Ctrl 3 */
{ 0x00000C23, 0x0000 }, /* R3107 - Misc Pad Ctrl 4 */
{ 0x00000C24, 0x0000 }, /* R3108 - Misc Pad Ctrl 5 */
{ 0x00000C25, 0x0000 }, /* R3109 - Misc Pad Ctrl 6 */
- { 0x00000C30, 0x8282 }, /* R3120 - Misc Pad Ctrl 7 */
- { 0x00000C31, 0x0082 }, /* R3121 - Misc Pad Ctrl 8 */
- { 0x00000C32, 0x8282 }, /* R3122 - Misc Pad Ctrl 9 */
- { 0x00000C33, 0x8282 }, /* R3123 - Misc Pad Ctrl 10 */
- { 0x00000C34, 0x8282 }, /* R3124 - Misc Pad Ctrl 11 */
- { 0x00000C35, 0x8282 }, /* R3125 - Misc Pad Ctrl 12 */
- { 0x00000C36, 0x8282 }, /* R3126 - Misc Pad Ctrl 13 */
- { 0x00000C37, 0x8282 }, /* R3127 - Misc Pad Ctrl 14 */
- { 0x00000C38, 0x8282 }, /* R3128 - Misc Pad Ctrl 15 */
- { 0x00000C39, 0x8282 }, /* R3129 - Misc Pad Ctrl 16 */
- { 0x00000C3A, 0x8282 }, /* R3130 - Misc Pad Ctrl 17 */
- { 0x00000C3B, 0x8282 }, /* R3131 - Misc Pad Ctrl 18 */
+ { 0x00000C30, 0x0404 }, /* R3120 - Misc Pad Ctrl 7 */
+ { 0x00000C31, 0x0004 }, /* R3121 - Misc Pad Ctrl 8 */
+ { 0x00000C32, 0x0404 }, /* R3122 - Misc Pad Ctrl 9 */
+ { 0x00000C33, 0x0404 }, /* R3123 - Misc Pad Ctrl 10 */
+ { 0x00000C34, 0x0404 }, /* R3124 - Misc Pad Ctrl 11 */
+ { 0x00000C35, 0x0404 }, /* R3125 - Misc Pad Ctrl 12 */
+ { 0x00000C36, 0x0404 }, /* R3126 - Misc Pad Ctrl 13 */
+ { 0x00000C37, 0x0404 }, /* R3127 - Misc Pad Ctrl 14 */
+ { 0x00000C38, 0x0004 }, /* R3128 - Misc Pad Ctrl 15 */
+ { 0x00000C39, 0x0404 }, /* R3129 - Misc Pad Ctrl 16 */
+ { 0x00000C3A, 0x0404 }, /* R3130 - Misc Pad Ctrl 17 */
+ { 0x00000C3B, 0x0404 }, /* R3131 - Misc Pad Ctrl 18 */
{ 0x00000D08, 0xFFFF }, /* R3336 - Interrupt Status 1 Mask */
{ 0x00000D09, 0xFFFF }, /* R3337 - Interrupt Status 2 Mask */
{ 0x00000D0A, 0xFFFF }, /* R3338 - Interrupt Status 3 Mask */
@@ -1641,7 +1637,7 @@ static const struct reg_default wm5110_reg_default[] = {
{ 0x00000F0D, 0x0000 }, /* R3853 - ANC Coefficient */
{ 0x00000F0E, 0x0000 }, /* R3854 - ANC Coefficient */
{ 0x00000F0F, 0x0000 }, /* R3855 - ANC Coefficient */
- { 0x00000F10, 0x0000 }, /* R3856 - ANC Coefficient */
+ { 0x00000F10, 0x0001 }, /* R3856 - ANC Coefficient */
{ 0x00000F11, 0x0000 }, /* R3857 - ANC Coefficient */
{ 0x00000F12, 0x0000 }, /* R3858 - ANC Coefficient */
{ 0x00000F15, 0x0000 }, /* R3861 - FCL Filter Control */
@@ -1947,8 +1943,6 @@ static bool wm5110_readable_register(struct device *dev, unsigned int reg)
case ARIZONA_FLL1_CONTROL_5:
case ARIZONA_FLL1_CONTROL_6:
case ARIZONA_FLL1_CONTROL_7:
- case ARIZONA_FLL1_LOOP_FILTER_TEST_1:
- case ARIZONA_FLL1_NCO_TEST_0:
case ARIZONA_FLL1_SYNCHRONISER_1:
case ARIZONA_FLL1_SYNCHRONISER_2:
case ARIZONA_FLL1_SYNCHRONISER_3:
@@ -1965,8 +1959,6 @@ static bool wm5110_readable_register(struct device *dev, unsigned int reg)
case ARIZONA_FLL2_CONTROL_5:
case ARIZONA_FLL2_CONTROL_6:
case ARIZONA_FLL2_CONTROL_7:
- case ARIZONA_FLL2_LOOP_FILTER_TEST_1:
- case ARIZONA_FLL2_NCO_TEST_0:
case ARIZONA_FLL2_SYNCHRONISER_1:
case ARIZONA_FLL2_SYNCHRONISER_2:
case ARIZONA_FLL2_SYNCHRONISER_3:
diff --git a/drivers/mfd/wm8998-tables.c b/drivers/mfd/wm8998-tables.c
index 4c2dce77cdfc..a0de3002cdad 100644
--- a/drivers/mfd/wm8998-tables.c
+++ b/drivers/mfd/wm8998-tables.c
@@ -229,8 +229,6 @@ static const struct reg_default wm8998_reg_default[] = {
{ 0x00000174, 0x007D }, /* R372 - FLL1 Control 4 */
{ 0x00000175, 0x0004 }, /* R373 - FLL1 Control 5 */
{ 0x00000176, 0x0000 }, /* R374 - FLL1 Control 6 */
- { 0x00000177, 0x0181 }, /* R375 - FLL1 Loop Filter Test 1 */
- { 0x00000178, 0x0000 }, /* R376 - FLL1 NCO Test 0 */
{ 0x00000179, 0x0000 }, /* R377 - FLL1 Control 7 */
{ 0x00000181, 0x0000 }, /* R385 - FLL1 Synchroniser 1 */
{ 0x00000182, 0x0000 }, /* R386 - FLL1 Synchroniser 2 */
@@ -247,8 +245,6 @@ static const struct reg_default wm8998_reg_default[] = {
{ 0x00000194, 0x007D }, /* R404 - FLL2 Control 4 */
{ 0x00000195, 0x0004 }, /* R405 - FLL2 Control 5 */
{ 0x00000196, 0x0000 }, /* R406 - FLL2 Control 6 */
- { 0x00000197, 0x0000 }, /* R407 - FLL2 Loop Filter Test 1 */
- { 0x00000198, 0x0000 }, /* R408 - FLL2 NCO Test 0 */
{ 0x00000199, 0x0000 }, /* R409 - FLL2 Control 7 */
{ 0x000001A1, 0x0000 }, /* R417 - FLL2 Synchroniser 1 */
{ 0x000001A2, 0x0000 }, /* R418 - FLL2 Synchroniser 2 */
@@ -320,7 +316,7 @@ static const struct reg_default wm8998_reg_default[] = {
{ 0x00000434, 0x0000 }, /* R1076 - Output Path Config 5R */
{ 0x00000435, 0x0180 }, /* R1077 - DAC Digital Volume 5R */
{ 0x00000437, 0x0200 }, /* R1079 - Noise Gate Select 5R */
- { 0x00000440, 0x8FFF }, /* R1088 - DRE Enable */
+ { 0x00000440, 0x002F }, /* R1088 - DRE Enable */
{ 0x00000441, 0xC759 }, /* R1089 - DRE Control 1 */
{ 0x00000442, 0x2A08 }, /* R1089 - DRE Control 2 */
{ 0x00000443, 0x5CFA }, /* R1089 - DRE Control 3 */
@@ -686,7 +682,7 @@ static const struct reg_default wm8998_reg_default[] = {
{ 0x00000C10, 0x1000 }, /* R3088 - GPIO Debounce Config */
{ 0x00000C18, 0x0000 }, /* R3096 - GP Switch 1 */
{ 0x00000C20, 0x8002 }, /* R3104 - Misc Pad Ctrl 1 */
- { 0x00000C21, 0x8001 }, /* R3105 - Misc Pad Ctrl 2 */
+ { 0x00000C21, 0x0001 }, /* R3105 - Misc Pad Ctrl 2 */
{ 0x00000C22, 0x0000 }, /* R3106 - Misc Pad Ctrl 3 */
{ 0x00000C23, 0x0000 }, /* R3107 - Misc Pad Ctrl 4 */
{ 0x00000C24, 0x0000 }, /* R3108 - Misc Pad Ctrl 5 */
@@ -888,8 +884,6 @@ static bool wm8998_readable_register(struct device *dev, unsigned int reg)
case ARIZONA_FLL1_CONTROL_5:
case ARIZONA_FLL1_CONTROL_6:
case ARIZONA_FLL1_CONTROL_7:
- case ARIZONA_FLL1_LOOP_FILTER_TEST_1:
- case ARIZONA_FLL1_NCO_TEST_0:
case ARIZONA_FLL1_SYNCHRONISER_1:
case ARIZONA_FLL1_SYNCHRONISER_2:
case ARIZONA_FLL1_SYNCHRONISER_3:
@@ -906,8 +900,6 @@ static bool wm8998_readable_register(struct device *dev, unsigned int reg)
case ARIZONA_FLL2_CONTROL_5:
case ARIZONA_FLL2_CONTROL_6:
case ARIZONA_FLL2_CONTROL_7:
- case ARIZONA_FLL2_LOOP_FILTER_TEST_1:
- case ARIZONA_FLL2_NCO_TEST_0:
case ARIZONA_FLL2_SYNCHRONISER_1:
case ARIZONA_FLL2_SYNCHRONISER_2:
case ARIZONA_FLL2_SYNCHRONISER_3:
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index 054fc10cb3b6..a216b4667742 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -440,7 +440,7 @@ config ARM_CHARLCD
still useful.
config BMP085
- bool
+ tristate
depends on SYSFS
config BMP085_I2C
@@ -470,7 +470,7 @@ config BMP085_SPI
config PCH_PHUB
tristate "Intel EG20T PCH/LAPIS Semicon IOH(ML7213/ML7223/ML7831) PHUB"
select GENERIC_NET_UTILS
- depends on PCI && (X86_32 || COMPILE_TEST)
+ depends on PCI && (X86_32 || MIPS || COMPILE_TEST)
help
This driver is for PCH(Platform controller Hub) PHUB(Packet Hub) of
Intel Topcliff which is an IOH(Input/Output Hub) for x86 embedded
@@ -525,6 +525,284 @@ config VEXPRESS_SYSCFG
ARM Ltd. Versatile Express uses specialised platform configuration
bus. System Configuration interface is one of the possible means
of generating transactions on this bus.
+config PANEL
+ tristate "Parallel port LCD/Keypad Panel support"
+ depends on PARPORT
+ ---help---
+ Say Y here if you have an HD44780 or KS-0074 LCD connected to your
+ parallel port. This driver also features 4 and 6-key keypads. The LCD
+ is accessible through the /dev/lcd char device (10, 156), and the
+ keypad through /dev/keypad (10, 185). Both require misc device to be
+ enabled. This code can either be compiled as a module, or linked into
+ the kernel and started at boot. If you don't understand what all this
+ is about, say N.
+
+config PANEL_PARPORT
+ int "Default parallel port number (0=LPT1)"
+ depends on PANEL
+ range 0 255
+ default "0"
+ ---help---
+ This is the index of the parallel port the panel is connected to. One
+ driver instance only supports one parallel port, so if your keypad
+ and LCD are connected to two separate ports, you have to start two
+ modules with different arguments. Numbering starts with '0' for LPT1,
+ and so on.
+
+config PANEL_PROFILE
+ int "Default panel profile (0-5, 0=custom)"
+ depends on PANEL
+ range 0 5
+ default "5"
+ ---help---
+ To ease configuration, the driver supports different configuration
+ profiles for past and recent wirings. These profiles can also be
+ used to define an approximative configuration, completed by a few
+ other options. Here are the profiles :
+
+ 0 = custom (see further)
+ 1 = 2x16 parallel LCD, old keypad
+ 2 = 2x16 serial LCD (KS-0074), new keypad
+ 3 = 2x16 parallel LCD (Hantronix), no keypad
+ 4 = 2x16 parallel LCD (Nexcom NSA1045) with Nexcom's keypad
+ 5 = 2x40 parallel LCD (old one), with old keypad
+
+ Custom configurations allow you to define how your display is
+ wired to the parallel port, and how it works. This is only intended
+ for experts.
+
+config PANEL_KEYPAD
+ depends on PANEL && PANEL_PROFILE="0"
+ int "Keypad type (0=none, 1=old 6 keys, 2=new 6 keys, 3=Nexcom 4 keys)"
+ range 0 3
+ default 0
+ ---help---
+ This enables and configures a keypad connected to the parallel port.
+ The keys will be read from character device 10,185. Valid values are :
+
+ 0 : do not enable this driver
+ 1 : old 6 keys keypad
+ 2 : new 6 keys keypad, as used on the server at www.ant-computing.com
+ 3 : Nexcom NSA1045's 4 keys keypad
+
+ New profiles can be described in the driver source. The driver also
+ supports simultaneous keys pressed when the keypad supports them.
+
+config PANEL_LCD
+ depends on PANEL && PANEL_PROFILE="0"
+ int "LCD type (0=none, 1=custom, 2=old //, 3=ks0074, 4=hantronix, 5=Nexcom)"
+ range 0 5
+ default 0
+ ---help---
+ This enables and configures an LCD connected to the parallel port.
+ The driver includes an interpreter for escape codes starting with
+ '\e[L' which are specific to the LCD, and a few ANSI codes. The
+ driver will be registered as character device 10,156, usually
+ under the name '/dev/lcd'. There are a total of 6 supported types :
+
+ 0 : do not enable the driver
+ 1 : custom configuration and wiring (see further)
+ 2 : 2x16 & 2x40 parallel LCD (old wiring)
+ 3 : 2x16 serial LCD (KS-0074 based)
+ 4 : 2x16 parallel LCD (Hantronix wiring)
+ 5 : 2x16 parallel LCD (Nexcom wiring)
+
+ When type '1' is specified, other options will appear to configure
+ more precise aspects (wiring, dimensions, protocol, ...). Please note
+ that those values changed from the 2.4 driver for better consistency.
+
+config PANEL_LCD_HEIGHT
+ depends on PANEL && PANEL_PROFILE="0" && PANEL_LCD="1"
+ int "Number of lines on the LCD (1-2)"
+ range 1 2
+ default 2
+ ---help---
+ This is the number of visible character lines on the LCD in custom profile.
+ It can either be 1 or 2.
+
+config PANEL_LCD_WIDTH
+ depends on PANEL && PANEL_PROFILE="0" && PANEL_LCD="1"
+ int "Number of characters per line on the LCD (1-40)"
+ range 1 40
+ default 40
+ ---help---
+ This is the number of characters per line on the LCD in custom profile.
+ Common values are 16,20,24,40.
+
+config PANEL_LCD_BWIDTH
+ depends on PANEL && PANEL_PROFILE="0" && PANEL_LCD="1"
+ int "Internal LCD line width (1-40, 40 by default)"
+ range 1 40
+ default 40
+ ---help---
+ Most LCDs use a standard controller which supports hardware lines of 40
+ characters, although sometimes only 16, 20 or 24 of them are really wired
+ to the terminal. This results in some non-visible but addressable characters,
+ and is the case for most parallel LCDs. Other LCDs, and some serial ones,
+ however, use the same line width internally as what is visible. The KS0074
+ for example, uses 16 characters per line for 16 visible characters per line.
+
+ This option lets you configure the value used by your LCD in 'custom' profile.
+ If you don't know, put '40' here.
+
+config PANEL_LCD_HWIDTH
+ depends on PANEL && PANEL_PROFILE="0" && PANEL_LCD="1"
+ int "Hardware LCD line width (1-64, 64 by default)"
+ range 1 64
+ default 64
+ ---help---
+ Most LCDs use a single address bit to differentiate line 0 and line 1. Since
+ some of them need to be able to address 40 chars with the lower bits, they
+ often use the immediately superior power of 2, which is 64, to address the
+ next line.
+
+ If you don't know what your LCD uses, in doubt let 16 here for a 2x16, and
+ 64 here for a 2x40.
+
+config PANEL_LCD_CHARSET
+ depends on PANEL && PANEL_PROFILE="0" && PANEL_LCD="1"
+ int "LCD character set (0=normal, 1=KS0074)"
+ range 0 1
+ default 0
+ ---help---
+ Some controllers such as the KS0074 use a somewhat strange character set
+ where many symbols are at unusual places. The driver knows how to map
+ 'standard' ASCII characters to the character sets used by these controllers.
+ Valid values are :
+
+ 0 : normal (untranslated) character set
+ 1 : KS0074 character set
+
+ If you don't know, use the normal one (0).
+
+config PANEL_LCD_PROTO
+ depends on PANEL && PANEL_PROFILE="0" && PANEL_LCD="1"
+ int "LCD communication mode (0=parallel 8 bits, 1=serial)"
+ range 0 1
+ default 0
+ ---help---
+ This driver now supports any serial or parallel LCD wired to a parallel
+ port. But before assigning signals, the driver needs to know if it will
+ be driving a serial LCD or a parallel one. Serial LCDs only use 2 wires
+ (SDA/SCL), while parallel ones use 2 or 3 wires for the control signals
+ (E, RS, sometimes RW), and 4 or 8 for the data. Use 0 here for a 8 bits
+ parallel LCD, and 1 for a serial LCD.
+
+config PANEL_LCD_PIN_E
+ depends on PANEL && PANEL_PROFILE="0" && PANEL_LCD="1" && PANEL_LCD_PROTO="0"
+ int "Parallel port pin number & polarity connected to the LCD E signal (-17...17) "
+ range -17 17
+ default 14
+ ---help---
+ This describes the number of the parallel port pin to which the LCD 'E'
+ signal has been connected. It can be :
+
+ 0 : no connection (eg: connected to ground)
+ 1..17 : directly connected to any of these pins on the DB25 plug
+ -1..-17 : connected to the same pin through an inverter (eg: transistor).
+
+ Default for the 'E' pin in custom profile is '14' (AUTOFEED).
+
+config PANEL_LCD_PIN_RS
+ depends on PANEL && PANEL_PROFILE="0" && PANEL_LCD="1" && PANEL_LCD_PROTO="0"
+ int "Parallel port pin number & polarity connected to the LCD RS signal (-17...17) "
+ range -17 17
+ default 17
+ ---help---
+ This describes the number of the parallel port pin to which the LCD 'RS'
+ signal has been connected. It can be :
+
+ 0 : no connection (eg: connected to ground)
+ 1..17 : directly connected to any of these pins on the DB25 plug
+ -1..-17 : connected to the same pin through an inverter (eg: transistor).
+
+ Default for the 'RS' pin in custom profile is '17' (SELECT IN).
+
+config PANEL_LCD_PIN_RW
+ depends on PANEL && PANEL_PROFILE="0" && PANEL_LCD="1" && PANEL_LCD_PROTO="0"
+ int "Parallel port pin number & polarity connected to the LCD RW signal (-17...17) "
+ range -17 17
+ default 16
+ ---help---
+ This describes the number of the parallel port pin to which the LCD 'RW'
+ signal has been connected. It can be :
+
+ 0 : no connection (eg: connected to ground)
+ 1..17 : directly connected to any of these pins on the DB25 plug
+ -1..-17 : connected to the same pin through an inverter (eg: transistor).
+
+ Default for the 'RW' pin in custom profile is '16' (INIT).
+
+config PANEL_LCD_PIN_SCL
+ depends on PANEL && PANEL_PROFILE="0" && PANEL_LCD="1" && PANEL_LCD_PROTO!="0"
+ int "Parallel port pin number & polarity connected to the LCD SCL signal (-17...17) "
+ range -17 17
+ default 1
+ ---help---
+ This describes the number of the parallel port pin to which the serial
+ LCD 'SCL' signal has been connected. It can be :
+
+ 0 : no connection (eg: connected to ground)
+ 1..17 : directly connected to any of these pins on the DB25 plug
+ -1..-17 : connected to the same pin through an inverter (eg: transistor).
+
+ Default for the 'SCL' pin in custom profile is '1' (STROBE).
+
+config PANEL_LCD_PIN_SDA
+ depends on PANEL && PANEL_PROFILE="0" && PANEL_LCD="1" && PANEL_LCD_PROTO!="0"
+ int "Parallel port pin number & polarity connected to the LCD SDA signal (-17...17) "
+ range -17 17
+ default 2
+ ---help---
+ This describes the number of the parallel port pin to which the serial
+ LCD 'SDA' signal has been connected. It can be :
+
+ 0 : no connection (eg: connected to ground)
+ 1..17 : directly connected to any of these pins on the DB25 plug
+ -1..-17 : connected to the same pin through an inverter (eg: transistor).
+
+ Default for the 'SDA' pin in custom profile is '2' (D0).
+
+config PANEL_LCD_PIN_BL
+ depends on PANEL && PANEL_PROFILE="0" && PANEL_LCD="1"
+ int "Parallel port pin number & polarity connected to the LCD backlight signal (-17...17) "
+ range -17 17
+ default 0
+ ---help---
+ This describes the number of the parallel port pin to which the LCD 'BL' signal
+ has been connected. It can be :
+
+ 0 : no connection (eg: connected to ground)
+ 1..17 : directly connected to any of these pins on the DB25 plug
+ -1..-17 : connected to the same pin through an inverter (eg: transistor).
+
+ Default for the 'BL' pin in custom profile is '0' (uncontrolled).
+
+config PANEL_CHANGE_MESSAGE
+ depends on PANEL
+ bool "Change LCD initialization message ?"
+ default "n"
+ ---help---
+ This allows you to replace the boot message indicating the kernel version
+ and the driver version with a custom message. This is useful on appliances
+ where a simple 'Starting system' message can be enough to stop a customer
+ from worrying.
+
+ If you say 'Y' here, you'll be able to choose a message yourself. Otherwise,
+ say 'N' and keep the default message with the version.
+
+config PANEL_BOOT_MESSAGE
+ depends on PANEL && PANEL_CHANGE_MESSAGE="y"
+ string "New initialization message"
+ default ""
+ ---help---
+ This allows you to replace the boot message indicating the kernel version
+ and the driver version with a custom message. This is useful on appliances
+ where a simple 'Starting system' message can be enough to stop a customer
+ from worrying.
+
+ An empty message will only clear the display at driver init time. Any other
+ printf()-formatted message is valid with newline and escape codes.
source "drivers/misc/c2port/Kconfig"
source "drivers/misc/eeprom/Kconfig"
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index 537d7f3b78da..b2fb6dbffcef 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -56,3 +56,4 @@ obj-$(CONFIG_GENWQE) += genwqe/
obj-$(CONFIG_ECHO) += echo/
obj-$(CONFIG_VEXPRESS_SYSCFG) += vexpress-syscfg.o
obj-$(CONFIG_CXL_BASE) += cxl/
+obj-$(CONFIG_PANEL) += panel.o
diff --git a/drivers/misc/ad525x_dpot.c b/drivers/misc/ad525x_dpot.c
index 15e88078ba1e..fe1672747bc1 100644
--- a/drivers/misc/ad525x_dpot.c
+++ b/drivers/misc/ad525x_dpot.c
@@ -216,7 +216,7 @@ static s32 dpot_read_i2c(struct dpot_data *dpot, u8 reg)
*/
value = swab16(value);
- if (dpot->uid == DPOT_UID(AD5271_ID))
+ if (dpot->uid == DPOT_UID(AD5274_ID))
value = value >> 2;
return value;
default:
@@ -452,7 +452,7 @@ static ssize_t sysfs_set_reg(struct device *dev,
int err;
if (reg & DPOT_ADDR_OTP_EN) {
- if (!strncmp(buf, "enabled", sizeof("enabled")))
+ if (sysfs_streq(buf, "enabled"))
set_bit(DPOT_RDAC_MASK & reg, data->otp_en_mask);
else
clear_bit(DPOT_RDAC_MASK & reg, data->otp_en_mask);
diff --git a/drivers/misc/apds990x.c b/drivers/misc/apds990x.c
index a3e789b85cc8..dfb72ecfa604 100644
--- a/drivers/misc/apds990x.c
+++ b/drivers/misc/apds990x.c
@@ -1215,7 +1215,7 @@ static int apds990x_remove(struct i2c_client *client)
#ifdef CONFIG_PM_SLEEP
static int apds990x_suspend(struct device *dev)
{
- struct i2c_client *client = container_of(dev, struct i2c_client, dev);
+ struct i2c_client *client = to_i2c_client(dev);
struct apds990x_chip *chip = i2c_get_clientdata(client);
apds990x_chip_off(chip);
@@ -1224,7 +1224,7 @@ static int apds990x_suspend(struct device *dev)
static int apds990x_resume(struct device *dev)
{
- struct i2c_client *client = container_of(dev, struct i2c_client, dev);
+ struct i2c_client *client = to_i2c_client(dev);
struct apds990x_chip *chip = i2c_get_clientdata(client);
/*
@@ -1240,7 +1240,7 @@ static int apds990x_resume(struct device *dev)
#ifdef CONFIG_PM
static int apds990x_runtime_suspend(struct device *dev)
{
- struct i2c_client *client = container_of(dev, struct i2c_client, dev);
+ struct i2c_client *client = to_i2c_client(dev);
struct apds990x_chip *chip = i2c_get_clientdata(client);
apds990x_chip_off(chip);
@@ -1249,7 +1249,7 @@ static int apds990x_runtime_suspend(struct device *dev)
static int apds990x_runtime_resume(struct device *dev)
{
- struct i2c_client *client = container_of(dev, struct i2c_client, dev);
+ struct i2c_client *client = to_i2c_client(dev);
struct apds990x_chip *chip = i2c_get_clientdata(client);
apds990x_chip_on(chip);
diff --git a/drivers/misc/arm-charlcd.c b/drivers/misc/arm-charlcd.c
index c65b5ea5d5ef..b3176ee92b90 100644
--- a/drivers/misc/arm-charlcd.c
+++ b/drivers/misc/arm-charlcd.c
@@ -8,7 +8,6 @@
* Author: Linus Walleij <triad@df.lth.se>
*/
#include <linux/init.h>
-#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <linux/of.h>
@@ -328,20 +327,6 @@ out_no_resource:
return ret;
}
-static int __exit charlcd_remove(struct platform_device *pdev)
-{
- struct charlcd *lcd = platform_get_drvdata(pdev);
-
- if (lcd) {
- free_irq(lcd->irq, lcd);
- iounmap(lcd->virtbase);
- release_mem_region(lcd->phybase, lcd->physize);
- kfree(lcd);
- }
-
- return 0;
-}
-
static int charlcd_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
@@ -376,13 +361,8 @@ static struct platform_driver charlcd_driver = {
.driver = {
.name = DRIVERNAME,
.pm = &charlcd_pm_ops,
+ .suppress_bind_attrs = true,
.of_match_table = of_match_ptr(charlcd_match),
},
- .remove = __exit_p(charlcd_remove),
};
-
-module_platform_driver_probe(charlcd_driver, charlcd_probe);
-
-MODULE_AUTHOR("Linus Walleij <triad@df.lth.se>");
-MODULE_DESCRIPTION("ARM Character LCD Driver");
-MODULE_LICENSE("GPL v2");
+builtin_platform_driver_probe(charlcd_driver, charlcd_probe);
diff --git a/drivers/misc/atmel-ssc.c b/drivers/misc/atmel-ssc.c
index e11a0bd6c66e..0516ecda54d3 100644
--- a/drivers/misc/atmel-ssc.c
+++ b/drivers/misc/atmel-ssc.c
@@ -34,6 +34,7 @@ struct ssc_device *ssc_request(unsigned int ssc_num)
if (ssc->pdev->dev.of_node) {
if (of_alias_get_id(ssc->pdev->dev.of_node, "ssc")
== ssc_num) {
+ ssc->pdev->id = ssc_num;
ssc_valid = 1;
break;
}
diff --git a/drivers/misc/bh1770glc.c b/drivers/misc/bh1770glc.c
index 753d7ecdadaa..845466e45b95 100644
--- a/drivers/misc/bh1770glc.c
+++ b/drivers/misc/bh1770glc.c
@@ -1323,7 +1323,7 @@ static int bh1770_remove(struct i2c_client *client)
#ifdef CONFIG_PM_SLEEP
static int bh1770_suspend(struct device *dev)
{
- struct i2c_client *client = container_of(dev, struct i2c_client, dev);
+ struct i2c_client *client = to_i2c_client(dev);
struct bh1770_chip *chip = i2c_get_clientdata(client);
bh1770_chip_off(chip);
@@ -1333,7 +1333,7 @@ static int bh1770_suspend(struct device *dev)
static int bh1770_resume(struct device *dev)
{
- struct i2c_client *client = container_of(dev, struct i2c_client, dev);
+ struct i2c_client *client = to_i2c_client(dev);
struct bh1770_chip *chip = i2c_get_clientdata(client);
int ret = 0;
@@ -1361,7 +1361,7 @@ static int bh1770_resume(struct device *dev)
#ifdef CONFIG_PM
static int bh1770_runtime_suspend(struct device *dev)
{
- struct i2c_client *client = container_of(dev, struct i2c_client, dev);
+ struct i2c_client *client = to_i2c_client(dev);
struct bh1770_chip *chip = i2c_get_clientdata(client);
bh1770_chip_off(chip);
@@ -1371,7 +1371,7 @@ static int bh1770_runtime_suspend(struct device *dev)
static int bh1770_runtime_resume(struct device *dev)
{
- struct i2c_client *client = container_of(dev, struct i2c_client, dev);
+ struct i2c_client *client = to_i2c_client(dev);
struct bh1770_chip *chip = i2c_get_clientdata(client);
bh1770_chip_on(chip);
diff --git a/drivers/misc/c2port/core.c b/drivers/misc/c2port/core.c
index cc8645b5369d..1922cb8f6b88 100644
--- a/drivers/misc/c2port/core.c
+++ b/drivers/misc/c2port/core.c
@@ -721,9 +721,7 @@ static ssize_t c2port_read_flash_data(struct file *filp, struct kobject *kobj,
struct bin_attribute *attr,
char *buffer, loff_t offset, size_t count)
{
- struct c2port_device *c2dev =
- dev_get_drvdata(container_of(kobj,
- struct device, kobj));
+ struct c2port_device *c2dev = dev_get_drvdata(kobj_to_dev(kobj));
ssize_t ret;
/* Check the device and flash access status */
@@ -838,9 +836,7 @@ static ssize_t c2port_write_flash_data(struct file *filp, struct kobject *kobj,
struct bin_attribute *attr,
char *buffer, loff_t offset, size_t count)
{
- struct c2port_device *c2dev =
- dev_get_drvdata(container_of(kobj,
- struct device, kobj));
+ struct c2port_device *c2dev = dev_get_drvdata(kobj_to_dev(kobj));
int ret;
/* Check the device access status */
diff --git a/drivers/misc/cxl/Makefile b/drivers/misc/cxl/Makefile
index be2ac5ce349f..8a55c1aa11aa 100644
--- a/drivers/misc/cxl/Makefile
+++ b/drivers/misc/cxl/Makefile
@@ -4,6 +4,7 @@ ccflags-$(CONFIG_PPC_WERROR) += -Werror
cxl-y += main.o file.o irq.o fault.o native.o
cxl-y += context.o sysfs.o debugfs.o pci.o trace.o
cxl-y += vphb.o api.o
+cxl-$(CONFIG_PPC_PSERIES) += flash.o guest.o of.o hcalls.o
obj-$(CONFIG_CXL) += cxl.o
obj-$(CONFIG_CXL_BASE) += base.o
diff --git a/drivers/misc/cxl/api.c b/drivers/misc/cxl/api.c
index ea3eeb7011e1..2107c948406d 100644
--- a/drivers/misc/cxl/api.c
+++ b/drivers/misc/cxl/api.c
@@ -51,8 +51,6 @@ struct cxl_context *cxl_dev_context_init(struct pci_dev *dev)
if (rc)
goto err_mapping;
- cxl_assign_psn_space(ctx);
-
return ctx;
err_mapping:
@@ -78,7 +76,6 @@ struct device *cxl_get_phys_dev(struct pci_dev *dev)
return afu->adapter->dev.parent;
}
-EXPORT_SYMBOL_GPL(cxl_get_phys_dev);
int cxl_release_context(struct cxl_context *ctx)
{
@@ -91,28 +88,11 @@ int cxl_release_context(struct cxl_context *ctx)
}
EXPORT_SYMBOL_GPL(cxl_release_context);
-int cxl_allocate_afu_irqs(struct cxl_context *ctx, int num)
-{
- if (num == 0)
- num = ctx->afu->pp_irqs;
- return afu_allocate_irqs(ctx, num);
-}
-EXPORT_SYMBOL_GPL(cxl_allocate_afu_irqs);
-
-void cxl_free_afu_irqs(struct cxl_context *ctx)
-{
- afu_irq_name_free(ctx);
- cxl_release_irq_ranges(&ctx->irqs, ctx->afu->adapter);
-}
-EXPORT_SYMBOL_GPL(cxl_free_afu_irqs);
-
static irq_hw_number_t cxl_find_afu_irq(struct cxl_context *ctx, int num)
{
__u16 range;
int r;
- WARN_ON(num == 0);
-
for (r = 0; r < CXL_IRQ_RANGES; r++) {
range = ctx->irqs.range[r];
if (num < range) {
@@ -123,6 +103,44 @@ static irq_hw_number_t cxl_find_afu_irq(struct cxl_context *ctx, int num)
return 0;
}
+int cxl_allocate_afu_irqs(struct cxl_context *ctx, int num)
+{
+ int res;
+ irq_hw_number_t hwirq;
+
+ if (num == 0)
+ num = ctx->afu->pp_irqs;
+ res = afu_allocate_irqs(ctx, num);
+ if (!res && !cpu_has_feature(CPU_FTR_HVMODE)) {
+ /* In a guest, the PSL interrupt is not multiplexed. It was
+ * allocated above, and we need to set its handler
+ */
+ hwirq = cxl_find_afu_irq(ctx, 0);
+ if (hwirq)
+ cxl_map_irq(ctx->afu->adapter, hwirq, cxl_ops->psl_interrupt, ctx, "psl");
+ }
+ return res;
+}
+EXPORT_SYMBOL_GPL(cxl_allocate_afu_irqs);
+
+void cxl_free_afu_irqs(struct cxl_context *ctx)
+{
+ irq_hw_number_t hwirq;
+ unsigned int virq;
+
+ if (!cpu_has_feature(CPU_FTR_HVMODE)) {
+ hwirq = cxl_find_afu_irq(ctx, 0);
+ if (hwirq) {
+ virq = irq_find_mapping(NULL, hwirq);
+ if (virq)
+ cxl_unmap_irq(virq, ctx);
+ }
+ }
+ afu_irq_name_free(ctx);
+ cxl_ops->release_irq_ranges(&ctx->irqs, ctx->afu->adapter);
+}
+EXPORT_SYMBOL_GPL(cxl_free_afu_irqs);
+
int cxl_map_afu_irq(struct cxl_context *ctx, int num,
irq_handler_t handler, void *cookie, char *name)
{
@@ -178,7 +196,7 @@ int cxl_start_context(struct cxl_context *ctx, u64 wed,
cxl_ctx_get();
- if ((rc = cxl_attach_process(ctx, kernel, wed , 0))) {
+ if ((rc = cxl_ops->attach_process(ctx, kernel, wed, 0))) {
put_pid(ctx->pid);
cxl_ctx_put();
goto out;
@@ -193,7 +211,7 @@ EXPORT_SYMBOL_GPL(cxl_start_context);
int cxl_process_element(struct cxl_context *ctx)
{
- return ctx->pe;
+ return ctx->external_pe;
}
EXPORT_SYMBOL_GPL(cxl_process_element);
@@ -207,7 +225,6 @@ EXPORT_SYMBOL_GPL(cxl_stop_context);
void cxl_set_master(struct cxl_context *ctx)
{
ctx->master = true;
- cxl_assign_psn_space(ctx);
}
EXPORT_SYMBOL_GPL(cxl_set_master);
@@ -325,15 +342,11 @@ EXPORT_SYMBOL_GPL(cxl_start_work);
void __iomem *cxl_psa_map(struct cxl_context *ctx)
{
- struct cxl_afu *afu = ctx->afu;
- int rc;
-
- rc = cxl_afu_check_and_enable(afu);
- if (rc)
+ if (ctx->status != STARTED)
return NULL;
pr_devel("%s: psn_phys%llx size:%llx\n",
- __func__, afu->psn_phys, afu->adapter->ps_size);
+ __func__, ctx->psn_phys, ctx->psn_size);
return ioremap(ctx->psn_phys, ctx->psn_size);
}
EXPORT_SYMBOL_GPL(cxl_psa_map);
@@ -349,11 +362,11 @@ int cxl_afu_reset(struct cxl_context *ctx)
struct cxl_afu *afu = ctx->afu;
int rc;
- rc = __cxl_afu_reset(afu);
+ rc = cxl_ops->afu_reset(afu);
if (rc)
return rc;
- return cxl_afu_check_and_enable(afu);
+ return cxl_ops->afu_check_and_enable(afu);
}
EXPORT_SYMBOL_GPL(cxl_afu_reset);
@@ -363,3 +376,11 @@ void cxl_perst_reloads_same_image(struct cxl_afu *afu,
afu->adapter->perst_same_image = perst_reloads_same_image;
}
EXPORT_SYMBOL_GPL(cxl_perst_reloads_same_image);
+
+ssize_t cxl_read_adapter_vpd(struct pci_dev *dev, void *buf, size_t count)
+{
+ struct cxl_afu *afu = cxl_pci_to_afu(dev);
+
+ return cxl_ops->read_adapter_vpd(afu->adapter, buf, count);
+}
+EXPORT_SYMBOL_GPL(cxl_read_adapter_vpd);
diff --git a/drivers/misc/cxl/base.c b/drivers/misc/cxl/base.c
index a9f0dd3255a2..9b90ec6c07cd 100644
--- a/drivers/misc/cxl/base.c
+++ b/drivers/misc/cxl/base.c
@@ -11,6 +11,7 @@
#include <linux/rcupdate.h>
#include <asm/errno.h>
#include <misc/cxl-base.h>
+#include <linux/of_platform.h>
#include "cxl.h"
/* protected by rcu */
@@ -84,3 +85,34 @@ void unregister_cxl_calls(struct cxl_calls *calls)
synchronize_rcu();
}
EXPORT_SYMBOL_GPL(unregister_cxl_calls);
+
+int cxl_update_properties(struct device_node *dn,
+ struct property *new_prop)
+{
+ return of_update_property(dn, new_prop);
+}
+EXPORT_SYMBOL_GPL(cxl_update_properties);
+
+static int __init cxl_base_init(void)
+{
+ struct device_node *np = NULL;
+ struct platform_device *dev;
+ int count = 0;
+
+ /*
+ * Scan for compatible devices in guest only
+ */
+ if (cpu_has_feature(CPU_FTR_HVMODE))
+ return 0;
+
+ while ((np = of_find_compatible_node(np, NULL,
+ "ibm,coherent-platform-facility"))) {
+ dev = of_platform_device_create(np, NULL, NULL);
+ if (dev)
+ count++;
+ }
+ pr_devel("Found %d cxl device(s)\n", count);
+ return 0;
+}
+
+module_init(cxl_base_init);
diff --git a/drivers/misc/cxl/context.c b/drivers/misc/cxl/context.c
index 262b88eac414..7edea9c19199 100644
--- a/drivers/misc/cxl/context.c
+++ b/drivers/misc/cxl/context.c
@@ -95,7 +95,12 @@ int cxl_context_init(struct cxl_context *ctx, struct cxl_afu *afu, bool master,
return i;
ctx->pe = i;
- ctx->elem = &ctx->afu->spa[i];
+ if (cpu_has_feature(CPU_FTR_HVMODE)) {
+ ctx->elem = &ctx->afu->native->spa[i];
+ ctx->external_pe = ctx->pe;
+ } else {
+ ctx->external_pe = -1; /* assigned when attaching */
+ }
ctx->pe_inserted = false;
/*
@@ -214,10 +219,17 @@ int __detach_context(struct cxl_context *ctx)
/* Only warn if we detached while the link was OK.
* If detach fails when hw is down, we don't care.
*/
- WARN_ON(cxl_detach_process(ctx) &&
- cxl_adapter_link_ok(ctx->afu->adapter));
+ WARN_ON(cxl_ops->detach_process(ctx) &&
+ cxl_ops->link_ok(ctx->afu->adapter, ctx->afu));
flush_work(&ctx->fault_work); /* Only needed for dedicated process */
+ /*
+ * Wait until no further interrupts are presented by the PSL
+ * for this context.
+ */
+ if (cxl_ops->irq_wait)
+ cxl_ops->irq_wait(ctx);
+
/* release the reference to the group leader and mm handling pid */
put_pid(ctx->pid);
put_pid(ctx->glpid);
diff --git a/drivers/misc/cxl/cxl.h b/drivers/misc/cxl/cxl.h
index a521bc72cec2..73dc2a33da74 100644
--- a/drivers/misc/cxl/cxl.h
+++ b/drivers/misc/cxl/cxl.h
@@ -274,6 +274,7 @@ static const cxl_p2n_reg_t CXL_PSL_WED_An = {0x0A0};
#define CXL_PSL_DSISR_An_PE (1ull << (63-4)) /* PSL Error (implementation specific) */
#define CXL_PSL_DSISR_An_AE (1ull << (63-5)) /* AFU Error */
#define CXL_PSL_DSISR_An_OC (1ull << (63-6)) /* OS Context Warning */
+#define CXL_PSL_DSISR_PENDING (CXL_PSL_DSISR_TRANS | CXL_PSL_DSISR_An_PE | CXL_PSL_DSISR_An_AE | CXL_PSL_DSISR_An_OC)
/* NOTE: Bits 32:63 are undefined if DSISR[DS] = 1 */
#define CXL_PSL_DSISR_An_M DSISR_NOHPTE /* PTE not found */
#define CXL_PSL_DSISR_An_P DSISR_PROTFAULT /* Storage protection violation */
@@ -324,6 +325,10 @@ static const cxl_p2n_reg_t CXL_PSL_WED_An = {0x0A0};
#define CXL_MODE_TIME_SLICED 0x4
#define CXL_SUPPORTED_MODES (CXL_MODE_DEDICATED | CXL_MODE_DIRECTED)
+#define CXL_DEV_MINORS 13 /* 1 control + 4 AFUs * 3 (dedicated/master/shared) */
+#define CXL_CARD_MINOR(adapter) (adapter->adapter_num * CXL_DEV_MINORS)
+#define CXL_DEVT_ADAPTER(dev) (MINOR(dev) / CXL_DEV_MINORS)
+
enum cxl_context_status {
CLOSED,
OPENED,
@@ -336,6 +341,12 @@ enum prefault_modes {
CXL_PREFAULT_ALL,
};
+enum cxl_attrs {
+ CXL_ADAPTER_ATTRS,
+ CXL_AFU_MASTER_ATTRS,
+ CXL_AFU_ATTRS,
+};
+
struct cxl_sste {
__be64 esid_data;
__be64 vsid_data;
@@ -344,18 +355,46 @@ struct cxl_sste {
#define to_cxl_adapter(d) container_of(d, struct cxl, dev)
#define to_cxl_afu(d) container_of(d, struct cxl_afu, dev)
-struct cxl_afu {
+struct cxl_afu_native {
+ void __iomem *p1n_mmio;
+ void __iomem *afu_desc_mmio;
irq_hw_number_t psl_hwirq;
+ unsigned int psl_virq;
+ struct mutex spa_mutex;
+ /*
+ * Only the first part of the SPA is used for the process element
+ * linked list. The only other part that software needs to worry about
+ * is sw_command_status, which we store a separate pointer to.
+ * Everything else in the SPA is only used by hardware
+ */
+ struct cxl_process_element *spa;
+ __be64 *sw_command_status;
+ unsigned int spa_size;
+ int spa_order;
+ int spa_max_procs;
+ u64 pp_offset;
+};
+
+struct cxl_afu_guest {
+ u64 handle;
+ phys_addr_t p2n_phys;
+ u64 p2n_size;
+ int max_ints;
+ struct mutex recovery_lock;
+ int previous_state;
+};
+
+struct cxl_afu {
+ struct cxl_afu_native *native;
+ struct cxl_afu_guest *guest;
irq_hw_number_t serr_hwirq;
- char *err_irq_name;
- char *psl_irq_name;
unsigned int serr_virq;
- void __iomem *p1n_mmio;
+ char *psl_irq_name;
+ char *err_irq_name;
void __iomem *p2n_mmio;
phys_addr_t psn_phys;
- u64 pp_offset;
u64 pp_size;
- void __iomem *afu_desc_mmio;
+
struct cxl *adapter;
struct device dev;
struct cdev afu_cdev_s, afu_cdev_m, afu_cdev_d;
@@ -363,26 +402,12 @@ struct cxl_afu {
struct idr contexts_idr;
struct dentry *debugfs;
struct mutex contexts_lock;
- struct mutex spa_mutex;
spinlock_t afu_cntl_lock;
/* AFU error buffer fields and bin attribute for sysfs */
u64 eb_len, eb_offset;
struct bin_attribute attr_eb;
- /*
- * Only the first part of the SPA is used for the process element
- * linked list. The only other part that software needs to worry about
- * is sw_command_status, which we store a separate pointer to.
- * Everything else in the SPA is only used by hardware
- */
- struct cxl_process_element *spa;
- __be64 *sw_command_status;
- unsigned int spa_size;
- int spa_order;
- int spa_max_procs;
- unsigned int psl_virq;
-
/* pointer to the vphb */
struct pci_controller *phb;
@@ -421,6 +446,12 @@ struct cxl_irq_name {
char *name;
};
+struct irq_avail {
+ irq_hw_number_t offset;
+ irq_hw_number_t range;
+ unsigned long *bitmap;
+};
+
/*
* This is a cxl context. If the PSL is in dedicated mode, there will be one
* of these per AFU. If in AFU directed there can be lots of these.
@@ -476,7 +507,19 @@ struct cxl_context {
struct cxl_process_element *elem;
- int pe; /* process element handle */
+ /*
+ * pe is the process element handle, assigned by this driver when the
+ * context is initialized.
+ *
+ * external_pe is the PE shown outside of cxl.
+ * On bare-metal, pe=external_pe, because we decide what the handle is.
+ * In a guest, we only find out about the pe used by pHyp when the
+ * context is attached, and that's the value we want to report outside
+ * of cxl.
+ */
+ int pe;
+ int external_pe;
+
u32 irq_count;
bool pe_inserted;
bool master;
@@ -488,11 +531,34 @@ struct cxl_context {
struct rcu_head rcu;
};
-struct cxl {
+struct cxl_native {
+ u64 afu_desc_off;
+ u64 afu_desc_size;
void __iomem *p1_mmio;
void __iomem *p2_mmio;
irq_hw_number_t err_hwirq;
unsigned int err_virq;
+ u64 ps_off;
+};
+
+struct cxl_guest {
+ struct platform_device *pdev;
+ int irq_nranges;
+ struct cdev cdev;
+ irq_hw_number_t irq_base_offset;
+ struct irq_avail *irq_avail;
+ spinlock_t irq_alloc_lock;
+ u64 handle;
+ char *status;
+ u16 vendor;
+ u16 device;
+ u16 subsystem_vendor;
+ u16 subsystem;
+};
+
+struct cxl {
+ struct cxl_native *native;
+ struct cxl_guest *guest;
spinlock_t afu_list_lock;
struct cxl_afu *afu[CXL_MAX_SLICES];
struct device dev;
@@ -503,9 +569,6 @@ struct cxl {
struct bin_attribute cxl_attr;
int adapter_num;
int user_irqs;
- u64 afu_desc_off;
- u64 afu_desc_size;
- u64 ps_off;
u64 ps_size;
u16 psl_rev;
u16 base_image;
@@ -519,13 +582,15 @@ struct cxl {
bool perst_same_image;
};
-int cxl_alloc_one_irq(struct cxl *adapter);
-void cxl_release_one_irq(struct cxl *adapter, int hwirq);
-int cxl_alloc_irq_ranges(struct cxl_irq_ranges *irqs, struct cxl *adapter, unsigned int num);
-void cxl_release_irq_ranges(struct cxl_irq_ranges *irqs, struct cxl *adapter);
-int cxl_setup_irq(struct cxl *adapter, unsigned int hwirq, unsigned int virq);
+int cxl_pci_alloc_one_irq(struct cxl *adapter);
+void cxl_pci_release_one_irq(struct cxl *adapter, int hwirq);
+int cxl_pci_alloc_irq_ranges(struct cxl_irq_ranges *irqs, struct cxl *adapter, unsigned int num);
+void cxl_pci_release_irq_ranges(struct cxl_irq_ranges *irqs, struct cxl *adapter);
+int cxl_pci_setup_irq(struct cxl *adapter, unsigned int hwirq, unsigned int virq);
int cxl_update_image_control(struct cxl *adapter);
-int cxl_reset(struct cxl *adapter);
+int cxl_pci_reset(struct cxl *adapter);
+void cxl_pci_release_afu(struct device *dev);
+ssize_t cxl_pci_read_adapter_vpd(struct cxl *adapter, void *buf, size_t len);
/* common == phyp + powernv */
struct cxl_process_element_common {
@@ -555,29 +620,32 @@ struct cxl_process_element {
__be32 software_state;
} __packed;
-static inline bool cxl_adapter_link_ok(struct cxl *cxl)
+static inline bool cxl_adapter_link_ok(struct cxl *cxl, struct cxl_afu *afu)
{
struct pci_dev *pdev;
- pdev = to_pci_dev(cxl->dev.parent);
- return !pci_channel_offline(pdev);
+ if (cpu_has_feature(CPU_FTR_HVMODE)) {
+ pdev = to_pci_dev(cxl->dev.parent);
+ return !pci_channel_offline(pdev);
+ }
+ return true;
}
static inline void __iomem *_cxl_p1_addr(struct cxl *cxl, cxl_p1_reg_t reg)
{
WARN_ON(!cpu_has_feature(CPU_FTR_HVMODE));
- return cxl->p1_mmio + cxl_reg_off(reg);
+ return cxl->native->p1_mmio + cxl_reg_off(reg);
}
static inline void cxl_p1_write(struct cxl *cxl, cxl_p1_reg_t reg, u64 val)
{
- if (likely(cxl_adapter_link_ok(cxl)))
+ if (likely(cxl_adapter_link_ok(cxl, NULL)))
out_be64(_cxl_p1_addr(cxl, reg), val);
}
static inline u64 cxl_p1_read(struct cxl *cxl, cxl_p1_reg_t reg)
{
- if (likely(cxl_adapter_link_ok(cxl)))
+ if (likely(cxl_adapter_link_ok(cxl, NULL)))
return in_be64(_cxl_p1_addr(cxl, reg));
else
return ~0ULL;
@@ -586,18 +654,18 @@ static inline u64 cxl_p1_read(struct cxl *cxl, cxl_p1_reg_t reg)
static inline void __iomem *_cxl_p1n_addr(struct cxl_afu *afu, cxl_p1n_reg_t reg)
{
WARN_ON(!cpu_has_feature(CPU_FTR_HVMODE));
- return afu->p1n_mmio + cxl_reg_off(reg);
+ return afu->native->p1n_mmio + cxl_reg_off(reg);
}
static inline void cxl_p1n_write(struct cxl_afu *afu, cxl_p1n_reg_t reg, u64 val)
{
- if (likely(cxl_adapter_link_ok(afu->adapter)))
+ if (likely(cxl_adapter_link_ok(afu->adapter, afu)))
out_be64(_cxl_p1n_addr(afu, reg), val);
}
static inline u64 cxl_p1n_read(struct cxl_afu *afu, cxl_p1n_reg_t reg)
{
- if (likely(cxl_adapter_link_ok(afu->adapter)))
+ if (likely(cxl_adapter_link_ok(afu->adapter, afu)))
return in_be64(_cxl_p1n_addr(afu, reg));
else
return ~0ULL;
@@ -610,39 +678,19 @@ static inline void __iomem *_cxl_p2n_addr(struct cxl_afu *afu, cxl_p2n_reg_t reg
static inline void cxl_p2n_write(struct cxl_afu *afu, cxl_p2n_reg_t reg, u64 val)
{
- if (likely(cxl_adapter_link_ok(afu->adapter)))
+ if (likely(cxl_adapter_link_ok(afu->adapter, afu)))
out_be64(_cxl_p2n_addr(afu, reg), val);
}
static inline u64 cxl_p2n_read(struct cxl_afu *afu, cxl_p2n_reg_t reg)
{
- if (likely(cxl_adapter_link_ok(afu->adapter)))
+ if (likely(cxl_adapter_link_ok(afu->adapter, afu)))
return in_be64(_cxl_p2n_addr(afu, reg));
else
return ~0ULL;
}
-static inline u64 cxl_afu_cr_read64(struct cxl_afu *afu, int cr, u64 off)
-{
- if (likely(cxl_adapter_link_ok(afu->adapter)))
- return in_le64((afu)->afu_desc_mmio + (afu)->crs_offset +
- ((cr) * (afu)->crs_len) + (off));
- else
- return ~0ULL;
-}
-
-static inline u32 cxl_afu_cr_read32(struct cxl_afu *afu, int cr, u64 off)
-{
- if (likely(cxl_adapter_link_ok(afu->adapter)))
- return in_le32((afu)->afu_desc_mmio + (afu)->crs_offset +
- ((cr) * (afu)->crs_len) + (off));
- else
- return 0xffffffff;
-}
-u16 cxl_afu_cr_read16(struct cxl_afu *afu, int cr, u64 off);
-u8 cxl_afu_cr_read8(struct cxl_afu *afu, int cr, u64 off);
-
-ssize_t cxl_afu_read_err_buffer(struct cxl_afu *afu, char *buf,
+ssize_t cxl_pci_afu_read_err_buffer(struct cxl_afu *afu, char *buf,
loff_t off, size_t count);
@@ -652,13 +700,14 @@ struct cxl_calls {
};
int register_cxl_calls(struct cxl_calls *calls);
void unregister_cxl_calls(struct cxl_calls *calls);
+int cxl_update_properties(struct device_node *dn, struct property *new_prop);
-int cxl_alloc_adapter_nr(struct cxl *adapter);
void cxl_remove_adapter_nr(struct cxl *adapter);
int cxl_alloc_spa(struct cxl_afu *afu);
void cxl_release_spa(struct cxl_afu *afu);
+dev_t cxl_get_dev(void);
int cxl_file_init(void);
void cxl_file_exit(void);
int cxl_register_adapter(struct cxl *adapter);
@@ -679,21 +728,19 @@ void cxl_sysfs_afu_remove(struct cxl_afu *afu);
int cxl_sysfs_afu_m_add(struct cxl_afu *afu);
void cxl_sysfs_afu_m_remove(struct cxl_afu *afu);
-int cxl_afu_activate_mode(struct cxl_afu *afu, int mode);
-int _cxl_afu_deactivate_mode(struct cxl_afu *afu, int mode);
-int cxl_afu_deactivate_mode(struct cxl_afu *afu);
+struct cxl *cxl_alloc_adapter(void);
+struct cxl_afu *cxl_alloc_afu(struct cxl *adapter, int slice);
int cxl_afu_select_best_mode(struct cxl_afu *afu);
-int cxl_register_psl_irq(struct cxl_afu *afu);
-void cxl_release_psl_irq(struct cxl_afu *afu);
-int cxl_register_psl_err_irq(struct cxl *adapter);
-void cxl_release_psl_err_irq(struct cxl *adapter);
-int cxl_register_serr_irq(struct cxl_afu *afu);
-void cxl_release_serr_irq(struct cxl_afu *afu);
+int cxl_native_register_psl_irq(struct cxl_afu *afu);
+void cxl_native_release_psl_irq(struct cxl_afu *afu);
+int cxl_native_register_psl_err_irq(struct cxl *adapter);
+void cxl_native_release_psl_err_irq(struct cxl *adapter);
+int cxl_native_register_serr_irq(struct cxl_afu *afu);
+void cxl_native_release_serr_irq(struct cxl_afu *afu);
int afu_register_irqs(struct cxl_context *ctx, u32 count);
void afu_release_irqs(struct cxl_context *ctx, void *cookie);
void afu_irq_name_free(struct cxl_context *ctx);
-irqreturn_t cxl_slice_irq_err(int irq, void *data);
int cxl_debugfs_init(void);
void cxl_debugfs_exit(void);
@@ -707,6 +754,7 @@ void cxl_prefault(struct cxl_context *ctx, u64 wed);
struct cxl *get_cxl_adapter(int num);
int cxl_alloc_sst(struct cxl_context *ctx);
+void cxl_dump_debug_buffer(void *addr, size_t size);
void init_cxl_native(void);
@@ -720,40 +768,54 @@ unsigned int cxl_map_irq(struct cxl *adapter, irq_hw_number_t hwirq,
void cxl_unmap_irq(unsigned int virq, void *cookie);
int __detach_context(struct cxl_context *ctx);
-/* This matches the layout of the H_COLLECT_CA_INT_INFO retbuf */
+/*
+ * This must match the layout of the H_COLLECT_CA_INT_INFO retbuf defined
+ * in PAPR.
+ * A word about endianness: a pointer to this structure is passed when
+ * calling the hcall. However, it is not a block of memory filled up by
+ * the hypervisor. The return values are found in registers, and copied
+ * one by one when returning from the hcall. See the end of the call to
+ * plpar_hcall9() in hvCall.S
+ * As a consequence:
+ * - we don't need to do any endianness conversion
+ * - the pid and tid are an exception. They are 32-bit values returned in
+ * the same 64-bit register. So we do need to worry about byte ordering.
+ */
struct cxl_irq_info {
u64 dsisr;
u64 dar;
u64 dsr;
+#ifndef CONFIG_CPU_LITTLE_ENDIAN
u32 pid;
u32 tid;
+#else
+ u32 tid;
+ u32 pid;
+#endif
u64 afu_err;
u64 errstat;
- u64 padding[3]; /* to match the expected retbuf size for plpar_hcall9 */
+ u64 proc_handle;
+ u64 padding[2]; /* to match the expected retbuf size for plpar_hcall9 */
};
void cxl_assign_psn_space(struct cxl_context *ctx);
-int cxl_attach_process(struct cxl_context *ctx, bool kernel, u64 wed,
- u64 amr);
-int cxl_detach_process(struct cxl_context *ctx);
-
-int cxl_get_irq(struct cxl_afu *afu, struct cxl_irq_info *info);
-int cxl_ack_irq(struct cxl_context *ctx, u64 tfc, u64 psl_reset_mask);
+irqreturn_t cxl_irq(int irq, struct cxl_context *ctx, struct cxl_irq_info *irq_info);
+int cxl_register_one_irq(struct cxl *adapter, irq_handler_t handler,
+ void *cookie, irq_hw_number_t *dest_hwirq,
+ unsigned int *dest_virq, const char *name);
int cxl_check_error(struct cxl_afu *afu);
int cxl_afu_slbia(struct cxl_afu *afu);
int cxl_tlb_slb_invalidate(struct cxl *adapter);
int cxl_afu_disable(struct cxl_afu *afu);
-int __cxl_afu_reset(struct cxl_afu *afu);
-int cxl_afu_check_and_enable(struct cxl_afu *afu);
int cxl_psl_purge(struct cxl_afu *afu);
void cxl_stop_trace(struct cxl *cxl);
int cxl_pci_vphb_add(struct cxl_afu *afu);
-void cxl_pci_vphb_reconfigure(struct cxl_afu *afu);
void cxl_pci_vphb_remove(struct cxl_afu *afu);
extern struct pci_driver cxl_pci_driver;
+extern struct platform_driver cxl_of_driver;
int afu_allocate_irqs(struct cxl_context *ctx, u32 count);
int afu_open(struct inode *inode, struct file *file);
@@ -764,4 +826,62 @@ unsigned int afu_poll(struct file *file, struct poll_table_struct *poll);
ssize_t afu_read(struct file *file, char __user *buf, size_t count, loff_t *off);
extern const struct file_operations afu_fops;
+struct cxl *cxl_guest_init_adapter(struct device_node *np, struct platform_device *dev);
+void cxl_guest_remove_adapter(struct cxl *adapter);
+int cxl_of_read_adapter_handle(struct cxl *adapter, struct device_node *np);
+int cxl_of_read_adapter_properties(struct cxl *adapter, struct device_node *np);
+ssize_t cxl_guest_read_adapter_vpd(struct cxl *adapter, void *buf, size_t len);
+ssize_t cxl_guest_read_afu_vpd(struct cxl_afu *afu, void *buf, size_t len);
+int cxl_guest_init_afu(struct cxl *adapter, int slice, struct device_node *afu_np);
+void cxl_guest_remove_afu(struct cxl_afu *afu);
+int cxl_of_read_afu_handle(struct cxl_afu *afu, struct device_node *afu_np);
+int cxl_of_read_afu_properties(struct cxl_afu *afu, struct device_node *afu_np);
+int cxl_guest_add_chardev(struct cxl *adapter);
+void cxl_guest_remove_chardev(struct cxl *adapter);
+void cxl_guest_reload_module(struct cxl *adapter);
+int cxl_of_probe(struct platform_device *pdev);
+
+struct cxl_backend_ops {
+ struct module *module;
+ int (*adapter_reset)(struct cxl *adapter);
+ int (*alloc_one_irq)(struct cxl *adapter);
+ void (*release_one_irq)(struct cxl *adapter, int hwirq);
+ int (*alloc_irq_ranges)(struct cxl_irq_ranges *irqs,
+ struct cxl *adapter, unsigned int num);
+ void (*release_irq_ranges)(struct cxl_irq_ranges *irqs,
+ struct cxl *adapter);
+ int (*setup_irq)(struct cxl *adapter, unsigned int hwirq,
+ unsigned int virq);
+ irqreturn_t (*handle_psl_slice_error)(struct cxl_context *ctx,
+ u64 dsisr, u64 errstat);
+ irqreturn_t (*psl_interrupt)(int irq, void *data);
+ int (*ack_irq)(struct cxl_context *ctx, u64 tfc, u64 psl_reset_mask);
+ void (*irq_wait)(struct cxl_context *ctx);
+ int (*attach_process)(struct cxl_context *ctx, bool kernel,
+ u64 wed, u64 amr);
+ int (*detach_process)(struct cxl_context *ctx);
+ bool (*support_attributes)(const char *attr_name, enum cxl_attrs type);
+ bool (*link_ok)(struct cxl *cxl, struct cxl_afu *afu);
+ void (*release_afu)(struct device *dev);
+ ssize_t (*afu_read_err_buffer)(struct cxl_afu *afu, char *buf,
+ loff_t off, size_t count);
+ int (*afu_check_and_enable)(struct cxl_afu *afu);
+ int (*afu_activate_mode)(struct cxl_afu *afu, int mode);
+ int (*afu_deactivate_mode)(struct cxl_afu *afu, int mode);
+ int (*afu_reset)(struct cxl_afu *afu);
+ int (*afu_cr_read8)(struct cxl_afu *afu, int cr_idx, u64 offset, u8 *val);
+ int (*afu_cr_read16)(struct cxl_afu *afu, int cr_idx, u64 offset, u16 *val);
+ int (*afu_cr_read32)(struct cxl_afu *afu, int cr_idx, u64 offset, u32 *val);
+ int (*afu_cr_read64)(struct cxl_afu *afu, int cr_idx, u64 offset, u64 *val);
+ int (*afu_cr_write8)(struct cxl_afu *afu, int cr_idx, u64 offset, u8 val);
+ int (*afu_cr_write16)(struct cxl_afu *afu, int cr_idx, u64 offset, u16 val);
+ int (*afu_cr_write32)(struct cxl_afu *afu, int cr_idx, u64 offset, u32 val);
+ ssize_t (*read_adapter_vpd)(struct cxl *adapter, void *buf, size_t count);
+};
+extern const struct cxl_backend_ops cxl_native_ops;
+extern const struct cxl_backend_ops cxl_guest_ops;
+extern const struct cxl_backend_ops *cxl_ops;
+
+/* check if the given pci_dev is on the the cxl vphb bus */
+bool cxl_pci_is_vphb_device(struct pci_dev *dev);
#endif
diff --git a/drivers/misc/cxl/debugfs.c b/drivers/misc/cxl/debugfs.c
index 18df6f44af2a..5751899e0c17 100644
--- a/drivers/misc/cxl/debugfs.c
+++ b/drivers/misc/cxl/debugfs.c
@@ -118,6 +118,10 @@ void cxl_debugfs_afu_remove(struct cxl_afu *afu)
int __init cxl_debugfs_init(void)
{
struct dentry *ent;
+
+ if (!cpu_has_feature(CPU_FTR_HVMODE))
+ return 0;
+
ent = debugfs_create_dir("cxl", NULL);
if (IS_ERR(ent))
return PTR_ERR(ent);
diff --git a/drivers/misc/cxl/fault.c b/drivers/misc/cxl/fault.c
index 81c3f75b7330..9a8650bcb042 100644
--- a/drivers/misc/cxl/fault.c
+++ b/drivers/misc/cxl/fault.c
@@ -101,7 +101,7 @@ static void cxl_ack_ae(struct cxl_context *ctx)
{
unsigned long flags;
- cxl_ack_irq(ctx, CXL_PSL_TFC_An_AE, 0);
+ cxl_ops->ack_irq(ctx, CXL_PSL_TFC_An_AE, 0);
spin_lock_irqsave(&ctx->lock, flags);
ctx->pending_fault = true;
@@ -125,7 +125,7 @@ static int cxl_handle_segment_miss(struct cxl_context *ctx,
else {
mb(); /* Order seg table write to TFC MMIO write */
- cxl_ack_irq(ctx, CXL_PSL_TFC_An_R, 0);
+ cxl_ops->ack_irq(ctx, CXL_PSL_TFC_An_R, 0);
}
return IRQ_HANDLED;
@@ -163,7 +163,7 @@ static void cxl_handle_page_fault(struct cxl_context *ctx,
local_irq_restore(flags);
pr_devel("Page fault successfully handled for pe: %i!\n", ctx->pe);
- cxl_ack_irq(ctx, CXL_PSL_TFC_An_R, 0);
+ cxl_ops->ack_irq(ctx, CXL_PSL_TFC_An_R, 0);
}
/*
@@ -254,14 +254,17 @@ void cxl_handle_fault(struct work_struct *fault_work)
u64 dar = ctx->dar;
struct mm_struct *mm = NULL;
- if (cxl_p2n_read(ctx->afu, CXL_PSL_DSISR_An) != dsisr ||
- cxl_p2n_read(ctx->afu, CXL_PSL_DAR_An) != dar ||
- cxl_p2n_read(ctx->afu, CXL_PSL_PEHandle_An) != ctx->pe) {
- /* Most likely explanation is harmless - a dedicated process
- * has detached and these were cleared by the PSL purge, but
- * warn about it just in case */
- dev_notice(&ctx->afu->dev, "cxl_handle_fault: Translation fault regs changed\n");
- return;
+ if (cpu_has_feature(CPU_FTR_HVMODE)) {
+ if (cxl_p2n_read(ctx->afu, CXL_PSL_DSISR_An) != dsisr ||
+ cxl_p2n_read(ctx->afu, CXL_PSL_DAR_An) != dar ||
+ cxl_p2n_read(ctx->afu, CXL_PSL_PEHandle_An) != ctx->pe) {
+ /* Most likely explanation is harmless - a dedicated
+ * process has detached and these were cleared by the
+ * PSL purge, but warn about it just in case
+ */
+ dev_notice(&ctx->afu->dev, "cxl_handle_fault: Translation fault regs changed\n");
+ return;
+ }
}
/* Early return if the context is being / has been detached */
diff --git a/drivers/misc/cxl/file.c b/drivers/misc/cxl/file.c
index 783337d22f36..eec468f1612f 100644
--- a/drivers/misc/cxl/file.c
+++ b/drivers/misc/cxl/file.c
@@ -26,9 +26,7 @@
#include "trace.h"
#define CXL_NUM_MINORS 256 /* Total to reserve */
-#define CXL_DEV_MINORS 13 /* 1 control + 4 AFUs * 3 (dedicated/master/shared) */
-#define CXL_CARD_MINOR(adapter) (adapter->adapter_num * CXL_DEV_MINORS)
#define CXL_AFU_MINOR_D(afu) (CXL_CARD_MINOR(afu->adapter) + 1 + (3 * afu->slice))
#define CXL_AFU_MINOR_M(afu) (CXL_AFU_MINOR_D(afu) + 1)
#define CXL_AFU_MINOR_S(afu) (CXL_AFU_MINOR_D(afu) + 2)
@@ -36,7 +34,6 @@
#define CXL_AFU_MKDEV_M(afu) MKDEV(MAJOR(cxl_dev), CXL_AFU_MINOR_M(afu))
#define CXL_AFU_MKDEV_S(afu) MKDEV(MAJOR(cxl_dev), CXL_AFU_MINOR_S(afu))
-#define CXL_DEVT_ADAPTER(dev) (MINOR(dev) / CXL_DEV_MINORS)
#define CXL_DEVT_AFU(dev) ((MINOR(dev) % CXL_DEV_MINORS - 1) / 3)
#define CXL_DEVT_IS_CARD(dev) (MINOR(dev) % CXL_DEV_MINORS == 0)
@@ -79,7 +76,7 @@ static int __afu_open(struct inode *inode, struct file *file, bool master)
if (!afu->current_mode)
goto err_put_afu;
- if (!cxl_adapter_link_ok(adapter)) {
+ if (!cxl_ops->link_ok(adapter, afu)) {
rc = -EIO;
goto err_put_afu;
}
@@ -210,8 +207,8 @@ static long afu_ioctl_start_work(struct cxl_context *ctx,
trace_cxl_attach(ctx, work.work_element_descriptor, work.num_interrupts, amr);
- if ((rc = cxl_attach_process(ctx, false, work.work_element_descriptor,
- amr))) {
+ if ((rc = cxl_ops->attach_process(ctx, false, work.work_element_descriptor,
+ amr))) {
afu_release_irqs(ctx, ctx);
goto out;
}
@@ -222,12 +219,13 @@ out:
mutex_unlock(&ctx->status_mutex);
return rc;
}
+
static long afu_ioctl_process_element(struct cxl_context *ctx,
int __user *upe)
{
pr_devel("%s: pe: %i\n", __func__, ctx->pe);
- if (copy_to_user(upe, &ctx->pe, sizeof(__u32)))
+ if (copy_to_user(upe, &ctx->external_pe, sizeof(__u32)))
return -EFAULT;
return 0;
@@ -259,7 +257,7 @@ long afu_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
if (ctx->status == CLOSED)
return -EIO;
- if (!cxl_adapter_link_ok(ctx->afu->adapter))
+ if (!cxl_ops->link_ok(ctx->afu->adapter, ctx->afu))
return -EIO;
pr_devel("afu_ioctl\n");
@@ -289,7 +287,7 @@ int afu_mmap(struct file *file, struct vm_area_struct *vm)
if (ctx->status != STARTED)
return -EIO;
- if (!cxl_adapter_link_ok(ctx->afu->adapter))
+ if (!cxl_ops->link_ok(ctx->afu->adapter, ctx->afu))
return -EIO;
return cxl_context_iomap(ctx, vm);
@@ -336,7 +334,7 @@ ssize_t afu_read(struct file *file, char __user *buf, size_t count,
int rc;
DEFINE_WAIT(wait);
- if (!cxl_adapter_link_ok(ctx->afu->adapter))
+ if (!cxl_ops->link_ok(ctx->afu->adapter, ctx->afu))
return -EIO;
if (count < CXL_READ_MIN_SIZE)
@@ -349,7 +347,7 @@ ssize_t afu_read(struct file *file, char __user *buf, size_t count,
if (ctx_event_pending(ctx))
break;
- if (!cxl_adapter_link_ok(ctx->afu->adapter)) {
+ if (!cxl_ops->link_ok(ctx->afu->adapter, ctx->afu)) {
rc = -EIO;
goto out;
}
@@ -445,7 +443,8 @@ static const struct file_operations afu_master_fops = {
static char *cxl_devnode(struct device *dev, umode_t *mode)
{
- if (CXL_DEVT_IS_CARD(dev->devt)) {
+ if (cpu_has_feature(CPU_FTR_HVMODE) &&
+ CXL_DEVT_IS_CARD(dev->devt)) {
/*
* These minor numbers will eventually be used to program the
* PSL and AFUs once we have dynamic reprogramming support
@@ -546,6 +545,11 @@ int cxl_register_adapter(struct cxl *adapter)
return device_register(&adapter->dev);
}
+dev_t cxl_get_dev(void)
+{
+ return cxl_dev;
+}
+
int __init cxl_file_init(void)
{
int rc;
diff --git a/drivers/misc/cxl/flash.c b/drivers/misc/cxl/flash.c
new file mode 100644
index 000000000000..68dd0b7da471
--- /dev/null
+++ b/drivers/misc/cxl/flash.c
@@ -0,0 +1,538 @@
+#include <linux/kernel.h>
+#include <linux/fs.h>
+#include <linux/semaphore.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <asm/rtas.h>
+
+#include "cxl.h"
+#include "hcalls.h"
+
+#define DOWNLOAD_IMAGE 1
+#define VALIDATE_IMAGE 2
+
+struct ai_header {
+ u16 version;
+ u8 reserved0[6];
+ u16 vendor;
+ u16 device;
+ u16 subsystem_vendor;
+ u16 subsystem;
+ u64 image_offset;
+ u64 image_length;
+ u8 reserved1[96];
+};
+
+static struct semaphore sem;
+unsigned long *buffer[CXL_AI_MAX_ENTRIES];
+struct sg_list *le;
+static u64 continue_token;
+static unsigned int transfer;
+
+struct update_props_workarea {
+ __be32 phandle;
+ __be32 state;
+ __be64 reserved;
+ __be32 nprops;
+} __packed;
+
+struct update_nodes_workarea {
+ __be32 state;
+ __be64 unit_address;
+ __be32 reserved;
+} __packed;
+
+#define DEVICE_SCOPE 3
+#define NODE_ACTION_MASK 0xff000000
+#define NODE_COUNT_MASK 0x00ffffff
+#define OPCODE_DELETE 0x01000000
+#define OPCODE_UPDATE 0x02000000
+#define OPCODE_ADD 0x03000000
+
+static int rcall(int token, char *buf, s32 scope)
+{
+ int rc;
+
+ spin_lock(&rtas_data_buf_lock);
+
+ memcpy(rtas_data_buf, buf, RTAS_DATA_BUF_SIZE);
+ rc = rtas_call(token, 2, 1, NULL, rtas_data_buf, scope);
+ memcpy(buf, rtas_data_buf, RTAS_DATA_BUF_SIZE);
+
+ spin_unlock(&rtas_data_buf_lock);
+ return rc;
+}
+
+static int update_property(struct device_node *dn, const char *name,
+ u32 vd, char *value)
+{
+ struct property *new_prop;
+ u32 *val;
+ int rc;
+
+ new_prop = kzalloc(sizeof(*new_prop), GFP_KERNEL);
+ if (!new_prop)
+ return -ENOMEM;
+
+ new_prop->name = kstrdup(name, GFP_KERNEL);
+ if (!new_prop->name) {
+ kfree(new_prop);
+ return -ENOMEM;
+ }
+
+ new_prop->length = vd;
+ new_prop->value = kzalloc(new_prop->length, GFP_KERNEL);
+ if (!new_prop->value) {
+ kfree(new_prop->name);
+ kfree(new_prop);
+ return -ENOMEM;
+ }
+ memcpy(new_prop->value, value, vd);
+
+ val = (u32 *)new_prop->value;
+ rc = cxl_update_properties(dn, new_prop);
+ pr_devel("%s: update property (%s, length: %i, value: %#x)\n",
+ dn->name, name, vd, be32_to_cpu(*val));
+
+ if (rc) {
+ kfree(new_prop->name);
+ kfree(new_prop->value);
+ kfree(new_prop);
+ }
+ return rc;
+}
+
+static int update_node(__be32 phandle, s32 scope)
+{
+ struct update_props_workarea *upwa;
+ struct device_node *dn;
+ int i, rc, ret;
+ char *prop_data;
+ char *buf;
+ int token;
+ u32 nprops;
+ u32 vd;
+
+ token = rtas_token("ibm,update-properties");
+ if (token == RTAS_UNKNOWN_SERVICE)
+ return -EINVAL;
+
+ buf = kzalloc(RTAS_DATA_BUF_SIZE, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ dn = of_find_node_by_phandle(be32_to_cpu(phandle));
+ if (!dn) {
+ kfree(buf);
+ return -ENOENT;
+ }
+
+ upwa = (struct update_props_workarea *)&buf[0];
+ upwa->phandle = phandle;
+ do {
+ rc = rcall(token, buf, scope);
+ if (rc < 0)
+ break;
+
+ prop_data = buf + sizeof(*upwa);
+ nprops = be32_to_cpu(upwa->nprops);
+
+ if (*prop_data == 0) {
+ prop_data++;
+ vd = be32_to_cpu(*(__be32 *)prop_data);
+ prop_data += vd + sizeof(vd);
+ nprops--;
+ }
+
+ for (i = 0; i < nprops; i++) {
+ char *prop_name;
+
+ prop_name = prop_data;
+ prop_data += strlen(prop_name) + 1;
+ vd = be32_to_cpu(*(__be32 *)prop_data);
+ prop_data += sizeof(vd);
+
+ if ((vd != 0x00000000) && (vd != 0x80000000)) {
+ ret = update_property(dn, prop_name, vd,
+ prop_data);
+ if (ret)
+ pr_err("cxl: Could not update property %s - %i\n",
+ prop_name, ret);
+
+ prop_data += vd;
+ }
+ }
+ } while (rc == 1);
+
+ of_node_put(dn);
+ kfree(buf);
+ return rc;
+}
+
+static int update_devicetree(struct cxl *adapter, s32 scope)
+{
+ struct update_nodes_workarea *unwa;
+ u32 action, node_count;
+ int token, rc, i;
+ __be32 *data, drc_index, phandle;
+ char *buf;
+
+ token = rtas_token("ibm,update-nodes");
+ if (token == RTAS_UNKNOWN_SERVICE)
+ return -EINVAL;
+
+ buf = kzalloc(RTAS_DATA_BUF_SIZE, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ unwa = (struct update_nodes_workarea *)&buf[0];
+ unwa->unit_address = cpu_to_be64(adapter->guest->handle);
+ do {
+ rc = rcall(token, buf, scope);
+ if (rc && rc != 1)
+ break;
+
+ data = (__be32 *)buf + 4;
+ while (be32_to_cpu(*data) & NODE_ACTION_MASK) {
+ action = be32_to_cpu(*data) & NODE_ACTION_MASK;
+ node_count = be32_to_cpu(*data) & NODE_COUNT_MASK;
+ pr_devel("device reconfiguration - action: %#x, nodes: %#x\n",
+ action, node_count);
+ data++;
+
+ for (i = 0; i < node_count; i++) {
+ phandle = *data++;
+
+ switch (action) {
+ case OPCODE_DELETE:
+ /* nothing to do */
+ break;
+ case OPCODE_UPDATE:
+ update_node(phandle, scope);
+ break;
+ case OPCODE_ADD:
+ /* nothing to do, just move pointer */
+ drc_index = *data++;
+ break;
+ }
+ }
+ }
+ } while (rc == 1);
+
+ kfree(buf);
+ return 0;
+}
+
+static int handle_image(struct cxl *adapter, int operation,
+ long (*fct)(u64, u64, u64, u64 *),
+ struct cxl_adapter_image *ai)
+{
+ size_t mod, s_copy, len_chunk = 0;
+ struct ai_header *header = NULL;
+ unsigned int entries = 0, i;
+ void *dest, *from;
+ int rc = 0, need_header;
+
+ /* base adapter image header */
+ need_header = (ai->flags & CXL_AI_NEED_HEADER);
+ if (need_header) {
+ header = kzalloc(sizeof(struct ai_header), GFP_KERNEL);
+ if (!header)
+ return -ENOMEM;
+ header->version = cpu_to_be16(1);
+ header->vendor = cpu_to_be16(adapter->guest->vendor);
+ header->device = cpu_to_be16(adapter->guest->device);
+ header->subsystem_vendor = cpu_to_be16(adapter->guest->subsystem_vendor);
+ header->subsystem = cpu_to_be16(adapter->guest->subsystem);
+ header->image_offset = cpu_to_be64(CXL_AI_HEADER_SIZE);
+ header->image_length = cpu_to_be64(ai->len_image);
+ }
+
+ /* number of entries in the list */
+ len_chunk = ai->len_data;
+ if (need_header)
+ len_chunk += CXL_AI_HEADER_SIZE;
+
+ entries = len_chunk / CXL_AI_BUFFER_SIZE;
+ mod = len_chunk % CXL_AI_BUFFER_SIZE;
+ if (mod)
+ entries++;
+
+ if (entries > CXL_AI_MAX_ENTRIES) {
+ rc = -EINVAL;
+ goto err;
+ }
+
+ /* < -- MAX_CHUNK_SIZE = 4096 * 256 = 1048576 bytes -->
+ * chunk 0 ----------------------------------------------------
+ * | header | data |
+ * ----------------------------------------------------
+ * chunk 1 ----------------------------------------------------
+ * | data |
+ * ----------------------------------------------------
+ * ....
+ * chunk n ----------------------------------------------------
+ * | data |
+ * ----------------------------------------------------
+ */
+ from = (void *) ai->data;
+ for (i = 0; i < entries; i++) {
+ dest = buffer[i];
+ s_copy = CXL_AI_BUFFER_SIZE;
+
+ if ((need_header) && (i == 0)) {
+ /* add adapter image header */
+ memcpy(buffer[i], header, sizeof(struct ai_header));
+ s_copy = CXL_AI_BUFFER_SIZE - CXL_AI_HEADER_SIZE;
+ dest += CXL_AI_HEADER_SIZE; /* image offset */
+ }
+ if ((i == (entries - 1)) && mod)
+ s_copy = mod;
+
+ /* copy data */
+ if (copy_from_user(dest, from, s_copy))
+ goto err;
+
+ /* fill in the list */
+ le[i].phys_addr = cpu_to_be64(virt_to_phys(buffer[i]));
+ le[i].len = cpu_to_be64(CXL_AI_BUFFER_SIZE);
+ if ((i == (entries - 1)) && mod)
+ le[i].len = cpu_to_be64(mod);
+ from += s_copy;
+ }
+ pr_devel("%s (op: %i, need header: %i, entries: %i, token: %#llx)\n",
+ __func__, operation, need_header, entries, continue_token);
+
+ /*
+ * download/validate the adapter image to the coherent
+ * platform facility
+ */
+ rc = fct(adapter->guest->handle, virt_to_phys(le), entries,
+ &continue_token);
+ if (rc == 0) /* success of download/validation operation */
+ continue_token = 0;
+
+err:
+ kfree(header);
+
+ return rc;
+}
+
+static int transfer_image(struct cxl *adapter, int operation,
+ struct cxl_adapter_image *ai)
+{
+ int rc = 0;
+ int afu;
+
+ switch (operation) {
+ case DOWNLOAD_IMAGE:
+ rc = handle_image(adapter, operation,
+ &cxl_h_download_adapter_image, ai);
+ if (rc < 0) {
+ pr_devel("resetting adapter\n");
+ cxl_h_reset_adapter(adapter->guest->handle);
+ }
+ return rc;
+
+ case VALIDATE_IMAGE:
+ rc = handle_image(adapter, operation,
+ &cxl_h_validate_adapter_image, ai);
+ if (rc < 0) {
+ pr_devel("resetting adapter\n");
+ cxl_h_reset_adapter(adapter->guest->handle);
+ return rc;
+ }
+ if (rc == 0) {
+ pr_devel("remove curent afu\n");
+ for (afu = 0; afu < adapter->slices; afu++)
+ cxl_guest_remove_afu(adapter->afu[afu]);
+
+ pr_devel("resetting adapter\n");
+ cxl_h_reset_adapter(adapter->guest->handle);
+
+ /* The entire image has now been
+ * downloaded and the validation has
+ * been successfully performed.
+ * After that, the partition should call
+ * ibm,update-nodes and
+ * ibm,update-properties to receive the
+ * current configuration
+ */
+ rc = update_devicetree(adapter, DEVICE_SCOPE);
+ transfer = 1;
+ }
+ return rc;
+ }
+
+ return -EINVAL;
+}
+
+static long ioctl_transfer_image(struct cxl *adapter, int operation,
+ struct cxl_adapter_image __user *uai)
+{
+ struct cxl_adapter_image ai;
+
+ pr_devel("%s\n", __func__);
+
+ if (copy_from_user(&ai, uai, sizeof(struct cxl_adapter_image)))
+ return -EFAULT;
+
+ /*
+ * Make sure reserved fields and bits are set to 0
+ */
+ if (ai.reserved1 || ai.reserved2 || ai.reserved3 || ai.reserved4 ||
+ (ai.flags & ~CXL_AI_ALL))
+ return -EINVAL;
+
+ return transfer_image(adapter, operation, &ai);
+}
+
+static int device_open(struct inode *inode, struct file *file)
+{
+ int adapter_num = CXL_DEVT_ADAPTER(inode->i_rdev);
+ struct cxl *adapter;
+ int rc = 0, i;
+
+ pr_devel("in %s\n", __func__);
+
+ BUG_ON(sizeof(struct ai_header) != CXL_AI_HEADER_SIZE);
+
+ /* Allows one process to open the device by using a semaphore */
+ if (down_interruptible(&sem) != 0)
+ return -EPERM;
+
+ if (!(adapter = get_cxl_adapter(adapter_num)))
+ return -ENODEV;
+
+ file->private_data = adapter;
+ continue_token = 0;
+ transfer = 0;
+
+ for (i = 0; i < CXL_AI_MAX_ENTRIES; i++)
+ buffer[i] = NULL;
+
+ /* aligned buffer containing list entries which describes up to
+ * 1 megabyte of data (256 entries of 4096 bytes each)
+ * Logical real address of buffer 0 - Buffer 0 length in bytes
+ * Logical real address of buffer 1 - Buffer 1 length in bytes
+ * Logical real address of buffer 2 - Buffer 2 length in bytes
+ * ....
+ * ....
+ * Logical real address of buffer N - Buffer N length in bytes
+ */
+ le = (struct sg_list *)get_zeroed_page(GFP_KERNEL);
+ if (!le) {
+ rc = -ENOMEM;
+ goto err;
+ }
+
+ for (i = 0; i < CXL_AI_MAX_ENTRIES; i++) {
+ buffer[i] = (unsigned long *)get_zeroed_page(GFP_KERNEL);
+ if (!buffer[i]) {
+ rc = -ENOMEM;
+ goto err1;
+ }
+ }
+
+ return 0;
+
+err1:
+ for (i = 0; i < CXL_AI_MAX_ENTRIES; i++) {
+ if (buffer[i])
+ free_page((unsigned long) buffer[i]);
+ }
+
+ if (le)
+ free_page((unsigned long) le);
+err:
+ put_device(&adapter->dev);
+
+ return rc;
+}
+
+static long device_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ struct cxl *adapter = file->private_data;
+
+ pr_devel("in %s\n", __func__);
+
+ if (cmd == CXL_IOCTL_DOWNLOAD_IMAGE)
+ return ioctl_transfer_image(adapter,
+ DOWNLOAD_IMAGE,
+ (struct cxl_adapter_image __user *)arg);
+ else if (cmd == CXL_IOCTL_VALIDATE_IMAGE)
+ return ioctl_transfer_image(adapter,
+ VALIDATE_IMAGE,
+ (struct cxl_adapter_image __user *)arg);
+ else
+ return -EINVAL;
+}
+
+static long device_compat_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ return device_ioctl(file, cmd, arg);
+}
+
+static int device_close(struct inode *inode, struct file *file)
+{
+ struct cxl *adapter = file->private_data;
+ int i;
+
+ pr_devel("in %s\n", __func__);
+
+ for (i = 0; i < CXL_AI_MAX_ENTRIES; i++) {
+ if (buffer[i])
+ free_page((unsigned long) buffer[i]);
+ }
+
+ if (le)
+ free_page((unsigned long) le);
+
+ up(&sem);
+ put_device(&adapter->dev);
+ continue_token = 0;
+
+ /* reload the module */
+ if (transfer)
+ cxl_guest_reload_module(adapter);
+ else {
+ pr_devel("resetting adapter\n");
+ cxl_h_reset_adapter(adapter->guest->handle);
+ }
+
+ transfer = 0;
+ return 0;
+}
+
+static const struct file_operations fops = {
+ .owner = THIS_MODULE,
+ .open = device_open,
+ .unlocked_ioctl = device_ioctl,
+ .compat_ioctl = device_compat_ioctl,
+ .release = device_close,
+};
+
+void cxl_guest_remove_chardev(struct cxl *adapter)
+{
+ cdev_del(&adapter->guest->cdev);
+}
+
+int cxl_guest_add_chardev(struct cxl *adapter)
+{
+ dev_t devt;
+ int rc;
+
+ devt = MKDEV(MAJOR(cxl_get_dev()), CXL_CARD_MINOR(adapter));
+ cdev_init(&adapter->guest->cdev, &fops);
+ if ((rc = cdev_add(&adapter->guest->cdev, devt, 1))) {
+ dev_err(&adapter->dev,
+ "Unable to add chardev on adapter (card%i): %i\n",
+ adapter->adapter_num, rc);
+ goto err;
+ }
+ adapter->dev.devt = devt;
+ sema_init(&sem, 1);
+err:
+ return rc;
+}
diff --git a/drivers/misc/cxl/guest.c b/drivers/misc/cxl/guest.c
new file mode 100644
index 000000000000..8213372de2b7
--- /dev/null
+++ b/drivers/misc/cxl/guest.c
@@ -0,0 +1,1177 @@
+/*
+ * Copyright 2015 IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/spinlock.h>
+#include <linux/uaccess.h>
+#include <linux/delay.h>
+
+#include "cxl.h"
+#include "hcalls.h"
+#include "trace.h"
+
+#define CXL_ERROR_DETECTED_EVENT 1
+#define CXL_SLOT_RESET_EVENT 2
+#define CXL_RESUME_EVENT 3
+
+static void pci_error_handlers(struct cxl_afu *afu,
+ int bus_error_event,
+ pci_channel_state_t state)
+{
+ struct pci_dev *afu_dev;
+
+ if (afu->phb == NULL)
+ return;
+
+ list_for_each_entry(afu_dev, &afu->phb->bus->devices, bus_list) {
+ if (!afu_dev->driver)
+ continue;
+
+ switch (bus_error_event) {
+ case CXL_ERROR_DETECTED_EVENT:
+ afu_dev->error_state = state;
+
+ if (afu_dev->driver->err_handler &&
+ afu_dev->driver->err_handler->error_detected)
+ afu_dev->driver->err_handler->error_detected(afu_dev, state);
+ break;
+ case CXL_SLOT_RESET_EVENT:
+ afu_dev->error_state = state;
+
+ if (afu_dev->driver->err_handler &&
+ afu_dev->driver->err_handler->slot_reset)
+ afu_dev->driver->err_handler->slot_reset(afu_dev);
+ break;
+ case CXL_RESUME_EVENT:
+ if (afu_dev->driver->err_handler &&
+ afu_dev->driver->err_handler->resume)
+ afu_dev->driver->err_handler->resume(afu_dev);
+ break;
+ }
+ }
+}
+
+static irqreturn_t guest_handle_psl_slice_error(struct cxl_context *ctx, u64 dsisr,
+ u64 errstat)
+{
+ pr_devel("in %s\n", __func__);
+ dev_crit(&ctx->afu->dev, "PSL ERROR STATUS: 0x%.16llx\n", errstat);
+
+ return cxl_ops->ack_irq(ctx, 0, errstat);
+}
+
+static ssize_t guest_collect_vpd(struct cxl *adapter, struct cxl_afu *afu,
+ void *buf, size_t len)
+{
+ unsigned int entries, mod;
+ unsigned long **vpd_buf = NULL;
+ struct sg_list *le;
+ int rc = 0, i, tocopy;
+ u64 out = 0;
+
+ if (buf == NULL)
+ return -EINVAL;
+
+ /* number of entries in the list */
+ entries = len / SG_BUFFER_SIZE;
+ mod = len % SG_BUFFER_SIZE;
+ if (mod)
+ entries++;
+
+ if (entries > SG_MAX_ENTRIES) {
+ entries = SG_MAX_ENTRIES;
+ len = SG_MAX_ENTRIES * SG_BUFFER_SIZE;
+ mod = 0;
+ }
+
+ vpd_buf = kzalloc(entries * sizeof(unsigned long *), GFP_KERNEL);
+ if (!vpd_buf)
+ return -ENOMEM;
+
+ le = (struct sg_list *)get_zeroed_page(GFP_KERNEL);
+ if (!le) {
+ rc = -ENOMEM;
+ goto err1;
+ }
+
+ for (i = 0; i < entries; i++) {
+ vpd_buf[i] = (unsigned long *)get_zeroed_page(GFP_KERNEL);
+ if (!vpd_buf[i]) {
+ rc = -ENOMEM;
+ goto err2;
+ }
+ le[i].phys_addr = cpu_to_be64(virt_to_phys(vpd_buf[i]));
+ le[i].len = cpu_to_be64(SG_BUFFER_SIZE);
+ if ((i == (entries - 1)) && mod)
+ le[i].len = cpu_to_be64(mod);
+ }
+
+ if (adapter)
+ rc = cxl_h_collect_vpd_adapter(adapter->guest->handle,
+ virt_to_phys(le), entries, &out);
+ else
+ rc = cxl_h_collect_vpd(afu->guest->handle, 0,
+ virt_to_phys(le), entries, &out);
+ pr_devel("length of available (entries: %i), vpd: %#llx\n",
+ entries, out);
+
+ if (!rc) {
+ /*
+ * hcall returns in 'out' the size of available VPDs.
+ * It fills the buffer with as much data as possible.
+ */
+ if (out < len)
+ len = out;
+ rc = len;
+ if (out) {
+ for (i = 0; i < entries; i++) {
+ if (len < SG_BUFFER_SIZE)
+ tocopy = len;
+ else
+ tocopy = SG_BUFFER_SIZE;
+ memcpy(buf, vpd_buf[i], tocopy);
+ buf += tocopy;
+ len -= tocopy;
+ }
+ }
+ }
+err2:
+ for (i = 0; i < entries; i++) {
+ if (vpd_buf[i])
+ free_page((unsigned long) vpd_buf[i]);
+ }
+ free_page((unsigned long) le);
+err1:
+ kfree(vpd_buf);
+ return rc;
+}
+
+static int guest_get_irq_info(struct cxl_context *ctx, struct cxl_irq_info *info)
+{
+ return cxl_h_collect_int_info(ctx->afu->guest->handle, ctx->process_token, info);
+}
+
+static irqreturn_t guest_psl_irq(int irq, void *data)
+{
+ struct cxl_context *ctx = data;
+ struct cxl_irq_info irq_info;
+ int rc;
+
+ pr_devel("%d: received PSL interrupt %i\n", ctx->pe, irq);
+ rc = guest_get_irq_info(ctx, &irq_info);
+ if (rc) {
+ WARN(1, "Unable to get IRQ info: %i\n", rc);
+ return IRQ_HANDLED;
+ }
+
+ rc = cxl_irq(irq, ctx, &irq_info);
+ return rc;
+}
+
+static int afu_read_error_state(struct cxl_afu *afu, int *state_out)
+{
+ u64 state;
+ int rc = 0;
+
+ rc = cxl_h_read_error_state(afu->guest->handle, &state);
+ if (!rc) {
+ WARN_ON(state != H_STATE_NORMAL &&
+ state != H_STATE_DISABLE &&
+ state != H_STATE_TEMP_UNAVAILABLE &&
+ state != H_STATE_PERM_UNAVAILABLE);
+ *state_out = state & 0xffffffff;
+ }
+ return rc;
+}
+
+static irqreturn_t guest_slice_irq_err(int irq, void *data)
+{
+ struct cxl_afu *afu = data;
+ int rc;
+ u64 serr;
+
+ WARN(irq, "CXL SLICE ERROR interrupt %i\n", irq);
+ rc = cxl_h_get_fn_error_interrupt(afu->guest->handle, &serr);
+ if (rc) {
+ dev_crit(&afu->dev, "Couldn't read PSL_SERR_An: %d\n", rc);
+ return IRQ_HANDLED;
+ }
+ dev_crit(&afu->dev, "PSL_SERR_An: 0x%.16llx\n", serr);
+
+ rc = cxl_h_ack_fn_error_interrupt(afu->guest->handle, serr);
+ if (rc)
+ dev_crit(&afu->dev, "Couldn't ack slice error interrupt: %d\n",
+ rc);
+
+ return IRQ_HANDLED;
+}
+
+
+static int irq_alloc_range(struct cxl *adapter, int len, int *irq)
+{
+ int i, n;
+ struct irq_avail *cur;
+
+ for (i = 0; i < adapter->guest->irq_nranges; i++) {
+ cur = &adapter->guest->irq_avail[i];
+ n = bitmap_find_next_zero_area(cur->bitmap, cur->range,
+ 0, len, 0);
+ if (n < cur->range) {
+ bitmap_set(cur->bitmap, n, len);
+ *irq = cur->offset + n;
+ pr_devel("guest: allocate IRQs %#x->%#x\n",
+ *irq, *irq + len - 1);
+
+ return 0;
+ }
+ }
+ return -ENOSPC;
+}
+
+static int irq_free_range(struct cxl *adapter, int irq, int len)
+{
+ int i, n;
+ struct irq_avail *cur;
+
+ if (len == 0)
+ return -ENOENT;
+
+ for (i = 0; i < adapter->guest->irq_nranges; i++) {
+ cur = &adapter->guest->irq_avail[i];
+ if (irq >= cur->offset &&
+ (irq + len) <= (cur->offset + cur->range)) {
+ n = irq - cur->offset;
+ bitmap_clear(cur->bitmap, n, len);
+ pr_devel("guest: release IRQs %#x->%#x\n",
+ irq, irq + len - 1);
+ return 0;
+ }
+ }
+ return -ENOENT;
+}
+
+static int guest_reset(struct cxl *adapter)
+{
+ struct cxl_afu *afu = NULL;
+ int i, rc;
+
+ pr_devel("Adapter reset request\n");
+ for (i = 0; i < adapter->slices; i++) {
+ if ((afu = adapter->afu[i])) {
+ pci_error_handlers(afu, CXL_ERROR_DETECTED_EVENT,
+ pci_channel_io_frozen);
+ cxl_context_detach_all(afu);
+ }
+ }
+
+ rc = cxl_h_reset_adapter(adapter->guest->handle);
+ for (i = 0; i < adapter->slices; i++) {
+ if (!rc && (afu = adapter->afu[i])) {
+ pci_error_handlers(afu, CXL_SLOT_RESET_EVENT,
+ pci_channel_io_normal);
+ pci_error_handlers(afu, CXL_RESUME_EVENT, 0);
+ }
+ }
+ return rc;
+}
+
+static int guest_alloc_one_irq(struct cxl *adapter)
+{
+ int irq;
+
+ spin_lock(&adapter->guest->irq_alloc_lock);
+ if (irq_alloc_range(adapter, 1, &irq))
+ irq = -ENOSPC;
+ spin_unlock(&adapter->guest->irq_alloc_lock);
+ return irq;
+}
+
+static void guest_release_one_irq(struct cxl *adapter, int irq)
+{
+ spin_lock(&adapter->guest->irq_alloc_lock);
+ irq_free_range(adapter, irq, 1);
+ spin_unlock(&adapter->guest->irq_alloc_lock);
+}
+
+static int guest_alloc_irq_ranges(struct cxl_irq_ranges *irqs,
+ struct cxl *adapter, unsigned int num)
+{
+ int i, try, irq;
+
+ memset(irqs, 0, sizeof(struct cxl_irq_ranges));
+
+ spin_lock(&adapter->guest->irq_alloc_lock);
+ for (i = 0; i < CXL_IRQ_RANGES && num; i++) {
+ try = num;
+ while (try) {
+ if (irq_alloc_range(adapter, try, &irq) == 0)
+ break;
+ try /= 2;
+ }
+ if (!try)
+ goto error;
+ irqs->offset[i] = irq;
+ irqs->range[i] = try;
+ num -= try;
+ }
+ if (num)
+ goto error;
+ spin_unlock(&adapter->guest->irq_alloc_lock);
+ return 0;
+
+error:
+ for (i = 0; i < CXL_IRQ_RANGES; i++)
+ irq_free_range(adapter, irqs->offset[i], irqs->range[i]);
+ spin_unlock(&adapter->guest->irq_alloc_lock);
+ return -ENOSPC;
+}
+
+static void guest_release_irq_ranges(struct cxl_irq_ranges *irqs,
+ struct cxl *adapter)
+{
+ int i;
+
+ spin_lock(&adapter->guest->irq_alloc_lock);
+ for (i = 0; i < CXL_IRQ_RANGES; i++)
+ irq_free_range(adapter, irqs->offset[i], irqs->range[i]);
+ spin_unlock(&adapter->guest->irq_alloc_lock);
+}
+
+static int guest_register_serr_irq(struct cxl_afu *afu)
+{
+ afu->err_irq_name = kasprintf(GFP_KERNEL, "cxl-%s-err",
+ dev_name(&afu->dev));
+ if (!afu->err_irq_name)
+ return -ENOMEM;
+
+ if (!(afu->serr_virq = cxl_map_irq(afu->adapter, afu->serr_hwirq,
+ guest_slice_irq_err, afu, afu->err_irq_name))) {
+ kfree(afu->err_irq_name);
+ afu->err_irq_name = NULL;
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void guest_release_serr_irq(struct cxl_afu *afu)
+{
+ cxl_unmap_irq(afu->serr_virq, afu);
+ cxl_ops->release_one_irq(afu->adapter, afu->serr_hwirq);
+ kfree(afu->err_irq_name);
+}
+
+static int guest_ack_irq(struct cxl_context *ctx, u64 tfc, u64 psl_reset_mask)
+{
+ return cxl_h_control_faults(ctx->afu->guest->handle, ctx->process_token,
+ tfc >> 32, (psl_reset_mask != 0));
+}
+
+static void disable_afu_irqs(struct cxl_context *ctx)
+{
+ irq_hw_number_t hwirq;
+ unsigned int virq;
+ int r, i;
+
+ pr_devel("Disabling AFU(%d) interrupts\n", ctx->afu->slice);
+ for (r = 0; r < CXL_IRQ_RANGES; r++) {
+ hwirq = ctx->irqs.offset[r];
+ for (i = 0; i < ctx->irqs.range[r]; hwirq++, i++) {
+ virq = irq_find_mapping(NULL, hwirq);
+ disable_irq(virq);
+ }
+ }
+}
+
+static void enable_afu_irqs(struct cxl_context *ctx)
+{
+ irq_hw_number_t hwirq;
+ unsigned int virq;
+ int r, i;
+
+ pr_devel("Enabling AFU(%d) interrupts\n", ctx->afu->slice);
+ for (r = 0; r < CXL_IRQ_RANGES; r++) {
+ hwirq = ctx->irqs.offset[r];
+ for (i = 0; i < ctx->irqs.range[r]; hwirq++, i++) {
+ virq = irq_find_mapping(NULL, hwirq);
+ enable_irq(virq);
+ }
+ }
+}
+
+static int _guest_afu_cr_readXX(int sz, struct cxl_afu *afu, int cr_idx,
+ u64 offset, u64 *val)
+{
+ unsigned long cr;
+ char c;
+ int rc = 0;
+
+ if (afu->crs_len < sz)
+ return -ENOENT;
+
+ if (unlikely(offset >= afu->crs_len))
+ return -ERANGE;
+
+ cr = get_zeroed_page(GFP_KERNEL);
+ if (!cr)
+ return -ENOMEM;
+
+ rc = cxl_h_get_config(afu->guest->handle, cr_idx, offset,
+ virt_to_phys((void *)cr), sz);
+ if (rc)
+ goto err;
+
+ switch (sz) {
+ case 1:
+ c = *((char *) cr);
+ *val = c;
+ break;
+ case 2:
+ *val = in_le16((u16 *)cr);
+ break;
+ case 4:
+ *val = in_le32((unsigned *)cr);
+ break;
+ case 8:
+ *val = in_le64((u64 *)cr);
+ break;
+ default:
+ WARN_ON(1);
+ }
+err:
+ free_page(cr);
+ return rc;
+}
+
+static int guest_afu_cr_read32(struct cxl_afu *afu, int cr_idx, u64 offset,
+ u32 *out)
+{
+ int rc;
+ u64 val;
+
+ rc = _guest_afu_cr_readXX(4, afu, cr_idx, offset, &val);
+ if (!rc)
+ *out = (u32) val;
+ return rc;
+}
+
+static int guest_afu_cr_read16(struct cxl_afu *afu, int cr_idx, u64 offset,
+ u16 *out)
+{
+ int rc;
+ u64 val;
+
+ rc = _guest_afu_cr_readXX(2, afu, cr_idx, offset, &val);
+ if (!rc)
+ *out = (u16) val;
+ return rc;
+}
+
+static int guest_afu_cr_read8(struct cxl_afu *afu, int cr_idx, u64 offset,
+ u8 *out)
+{
+ int rc;
+ u64 val;
+
+ rc = _guest_afu_cr_readXX(1, afu, cr_idx, offset, &val);
+ if (!rc)
+ *out = (u8) val;
+ return rc;
+}
+
+static int guest_afu_cr_read64(struct cxl_afu *afu, int cr_idx, u64 offset,
+ u64 *out)
+{
+ return _guest_afu_cr_readXX(8, afu, cr_idx, offset, out);
+}
+
+static int guest_afu_cr_write32(struct cxl_afu *afu, int cr, u64 off, u32 in)
+{
+ /* config record is not writable from guest */
+ return -EPERM;
+}
+
+static int guest_afu_cr_write16(struct cxl_afu *afu, int cr, u64 off, u16 in)
+{
+ /* config record is not writable from guest */
+ return -EPERM;
+}
+
+static int guest_afu_cr_write8(struct cxl_afu *afu, int cr, u64 off, u8 in)
+{
+ /* config record is not writable from guest */
+ return -EPERM;
+}
+
+static int attach_afu_directed(struct cxl_context *ctx, u64 wed, u64 amr)
+{
+ struct cxl_process_element_hcall *elem;
+ struct cxl *adapter = ctx->afu->adapter;
+ const struct cred *cred;
+ u32 pid, idx;
+ int rc, r, i;
+ u64 mmio_addr, mmio_size;
+ __be64 flags = 0;
+
+ /* Must be 8 byte aligned and cannot cross a 4096 byte boundary */
+ if (!(elem = (struct cxl_process_element_hcall *)
+ get_zeroed_page(GFP_KERNEL)))
+ return -ENOMEM;
+
+ elem->version = cpu_to_be64(CXL_PROCESS_ELEMENT_VERSION);
+ if (ctx->kernel) {
+ pid = 0;
+ flags |= CXL_PE_TRANSLATION_ENABLED;
+ flags |= CXL_PE_PRIVILEGED_PROCESS;
+ if (mfmsr() & MSR_SF)
+ flags |= CXL_PE_64_BIT;
+ } else {
+ pid = current->pid;
+ flags |= CXL_PE_PROBLEM_STATE;
+ flags |= CXL_PE_TRANSLATION_ENABLED;
+ if (!test_tsk_thread_flag(current, TIF_32BIT))
+ flags |= CXL_PE_64_BIT;
+ cred = get_current_cred();
+ if (uid_eq(cred->euid, GLOBAL_ROOT_UID))
+ flags |= CXL_PE_PRIVILEGED_PROCESS;
+ put_cred(cred);
+ }
+ elem->flags = cpu_to_be64(flags);
+ elem->common.tid = cpu_to_be32(0); /* Unused */
+ elem->common.pid = cpu_to_be32(pid);
+ elem->common.csrp = cpu_to_be64(0); /* disable */
+ elem->common.aurp0 = cpu_to_be64(0); /* disable */
+ elem->common.aurp1 = cpu_to_be64(0); /* disable */
+
+ cxl_prefault(ctx, wed);
+
+ elem->common.sstp0 = cpu_to_be64(ctx->sstp0);
+ elem->common.sstp1 = cpu_to_be64(ctx->sstp1);
+ for (r = 0; r < CXL_IRQ_RANGES; r++) {
+ for (i = 0; i < ctx->irqs.range[r]; i++) {
+ if (r == 0 && i == 0) {
+ elem->pslVirtualIsn = cpu_to_be32(ctx->irqs.offset[0]);
+ } else {
+ idx = ctx->irqs.offset[r] + i - adapter->guest->irq_base_offset;
+ elem->applicationVirtualIsnBitmap[idx / 8] |= 0x80 >> (idx % 8);
+ }
+ }
+ }
+ elem->common.amr = cpu_to_be64(amr);
+ elem->common.wed = cpu_to_be64(wed);
+
+ disable_afu_irqs(ctx);
+
+ rc = cxl_h_attach_process(ctx->afu->guest->handle, elem,
+ &ctx->process_token, &mmio_addr, &mmio_size);
+ if (rc == H_SUCCESS) {
+ if (ctx->master || !ctx->afu->pp_psa) {
+ ctx->psn_phys = ctx->afu->psn_phys;
+ ctx->psn_size = ctx->afu->adapter->ps_size;
+ } else {
+ ctx->psn_phys = mmio_addr;
+ ctx->psn_size = mmio_size;
+ }
+ if (ctx->afu->pp_psa && mmio_size &&
+ ctx->afu->pp_size == 0) {
+ /*
+ * There's no property in the device tree to read the
+ * pp_size. We only find out at the 1st attach.
+ * Compared to bare-metal, it is too late and we
+ * should really lock here. However, on powerVM,
+ * pp_size is really only used to display in /sys.
+ * Being discussed with pHyp for their next release.
+ */
+ ctx->afu->pp_size = mmio_size;
+ }
+ /* from PAPR: process element is bytes 4-7 of process token */
+ ctx->external_pe = ctx->process_token & 0xFFFFFFFF;
+ pr_devel("CXL pe=%i is known as %i for pHyp, mmio_size=%#llx",
+ ctx->pe, ctx->external_pe, ctx->psn_size);
+ ctx->pe_inserted = true;
+ enable_afu_irqs(ctx);
+ }
+
+ free_page((u64)elem);
+ return rc;
+}
+
+static int guest_attach_process(struct cxl_context *ctx, bool kernel, u64 wed, u64 amr)
+{
+ pr_devel("in %s\n", __func__);
+
+ ctx->kernel = kernel;
+ if (ctx->afu->current_mode == CXL_MODE_DIRECTED)
+ return attach_afu_directed(ctx, wed, amr);
+
+ /* dedicated mode not supported on FW840 */
+
+ return -EINVAL;
+}
+
+static int detach_afu_directed(struct cxl_context *ctx)
+{
+ if (!ctx->pe_inserted)
+ return 0;
+ if (cxl_h_detach_process(ctx->afu->guest->handle, ctx->process_token))
+ return -1;
+ return 0;
+}
+
+static int guest_detach_process(struct cxl_context *ctx)
+{
+ pr_devel("in %s\n", __func__);
+ trace_cxl_detach(ctx);
+
+ if (!cxl_ops->link_ok(ctx->afu->adapter, ctx->afu))
+ return -EIO;
+
+ if (ctx->afu->current_mode == CXL_MODE_DIRECTED)
+ return detach_afu_directed(ctx);
+
+ return -EINVAL;
+}
+
+static void guest_release_afu(struct device *dev)
+{
+ struct cxl_afu *afu = to_cxl_afu(dev);
+
+ pr_devel("%s\n", __func__);
+
+ idr_destroy(&afu->contexts_idr);
+
+ kfree(afu->guest);
+ kfree(afu);
+}
+
+ssize_t cxl_guest_read_afu_vpd(struct cxl_afu *afu, void *buf, size_t len)
+{
+ return guest_collect_vpd(NULL, afu, buf, len);
+}
+
+#define ERR_BUFF_MAX_COPY_SIZE PAGE_SIZE
+static ssize_t guest_afu_read_err_buffer(struct cxl_afu *afu, char *buf,
+ loff_t off, size_t count)
+{
+ void *tbuf = NULL;
+ int rc = 0;
+
+ tbuf = (void *) get_zeroed_page(GFP_KERNEL);
+ if (!tbuf)
+ return -ENOMEM;
+
+ rc = cxl_h_get_afu_err(afu->guest->handle,
+ off & 0x7,
+ virt_to_phys(tbuf),
+ count);
+ if (rc)
+ goto err;
+
+ if (count > ERR_BUFF_MAX_COPY_SIZE)
+ count = ERR_BUFF_MAX_COPY_SIZE - (off & 0x7);
+ memcpy(buf, tbuf, count);
+err:
+ free_page((u64)tbuf);
+
+ return rc;
+}
+
+static int guest_afu_check_and_enable(struct cxl_afu *afu)
+{
+ return 0;
+}
+
+static bool guest_support_attributes(const char *attr_name,
+ enum cxl_attrs type)
+{
+ switch (type) {
+ case CXL_ADAPTER_ATTRS:
+ if ((strcmp(attr_name, "base_image") == 0) ||
+ (strcmp(attr_name, "load_image_on_perst") == 0) ||
+ (strcmp(attr_name, "perst_reloads_same_image") == 0) ||
+ (strcmp(attr_name, "image_loaded") == 0))
+ return false;
+ break;
+ case CXL_AFU_MASTER_ATTRS:
+ if ((strcmp(attr_name, "pp_mmio_off") == 0))
+ return false;
+ break;
+ case CXL_AFU_ATTRS:
+ break;
+ default:
+ break;
+ }
+
+ return true;
+}
+
+static int activate_afu_directed(struct cxl_afu *afu)
+{
+ int rc;
+
+ dev_info(&afu->dev, "Activating AFU(%d) directed mode\n", afu->slice);
+
+ afu->current_mode = CXL_MODE_DIRECTED;
+
+ afu->num_procs = afu->max_procs_virtualised;
+
+ if ((rc = cxl_chardev_m_afu_add(afu)))
+ return rc;
+
+ if ((rc = cxl_sysfs_afu_m_add(afu)))
+ goto err;
+
+ if ((rc = cxl_chardev_s_afu_add(afu)))
+ goto err1;
+
+ return 0;
+err1:
+ cxl_sysfs_afu_m_remove(afu);
+err:
+ cxl_chardev_afu_remove(afu);
+ return rc;
+}
+
+static int guest_afu_activate_mode(struct cxl_afu *afu, int mode)
+{
+ if (!mode)
+ return 0;
+ if (!(mode & afu->modes_supported))
+ return -EINVAL;
+
+ if (mode == CXL_MODE_DIRECTED)
+ return activate_afu_directed(afu);
+
+ if (mode == CXL_MODE_DEDICATED)
+ dev_err(&afu->dev, "Dedicated mode not supported\n");
+
+ return -EINVAL;
+}
+
+static int deactivate_afu_directed(struct cxl_afu *afu)
+{
+ dev_info(&afu->dev, "Deactivating AFU(%d) directed mode\n", afu->slice);
+
+ afu->current_mode = 0;
+ afu->num_procs = 0;
+
+ cxl_sysfs_afu_m_remove(afu);
+ cxl_chardev_afu_remove(afu);
+
+ cxl_ops->afu_reset(afu);
+
+ return 0;
+}
+
+static int guest_afu_deactivate_mode(struct cxl_afu *afu, int mode)
+{
+ if (!mode)
+ return 0;
+ if (!(mode & afu->modes_supported))
+ return -EINVAL;
+
+ if (mode == CXL_MODE_DIRECTED)
+ return deactivate_afu_directed(afu);
+ return 0;
+}
+
+static int guest_afu_reset(struct cxl_afu *afu)
+{
+ pr_devel("AFU(%d) reset request\n", afu->slice);
+ return cxl_h_reset_afu(afu->guest->handle);
+}
+
+static int guest_map_slice_regs(struct cxl_afu *afu)
+{
+ if (!(afu->p2n_mmio = ioremap(afu->guest->p2n_phys, afu->guest->p2n_size))) {
+ dev_err(&afu->dev, "Error mapping AFU(%d) MMIO regions\n",
+ afu->slice);
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+static void guest_unmap_slice_regs(struct cxl_afu *afu)
+{
+ if (afu->p2n_mmio)
+ iounmap(afu->p2n_mmio);
+}
+
+static int afu_update_state(struct cxl_afu *afu)
+{
+ int rc, cur_state;
+
+ rc = afu_read_error_state(afu, &cur_state);
+ if (rc)
+ return rc;
+
+ if (afu->guest->previous_state == cur_state)
+ return 0;
+
+ pr_devel("AFU(%d) update state to %#x\n", afu->slice, cur_state);
+
+ switch (cur_state) {
+ case H_STATE_NORMAL:
+ afu->guest->previous_state = cur_state;
+ rc = 1;
+ break;
+
+ case H_STATE_DISABLE:
+ pci_error_handlers(afu, CXL_ERROR_DETECTED_EVENT,
+ pci_channel_io_frozen);
+
+ cxl_context_detach_all(afu);
+ if ((rc = cxl_ops->afu_reset(afu)))
+ pr_devel("reset hcall failed %d\n", rc);
+
+ rc = afu_read_error_state(afu, &cur_state);
+ if (!rc && cur_state == H_STATE_NORMAL) {
+ pci_error_handlers(afu, CXL_SLOT_RESET_EVENT,
+ pci_channel_io_normal);
+ pci_error_handlers(afu, CXL_RESUME_EVENT, 0);
+ rc = 1;
+ }
+ afu->guest->previous_state = 0;
+ break;
+
+ case H_STATE_TEMP_UNAVAILABLE:
+ afu->guest->previous_state = cur_state;
+ break;
+
+ case H_STATE_PERM_UNAVAILABLE:
+ dev_err(&afu->dev, "AFU is in permanent error state\n");
+ pci_error_handlers(afu, CXL_ERROR_DETECTED_EVENT,
+ pci_channel_io_perm_failure);
+ afu->guest->previous_state = cur_state;
+ break;
+
+ default:
+ pr_err("Unexpected AFU(%d) error state: %#x\n",
+ afu->slice, cur_state);
+ return -EINVAL;
+ }
+
+ return rc;
+}
+
+static int afu_do_recovery(struct cxl_afu *afu)
+{
+ int rc;
+
+ /* many threads can arrive here, in case of detach_all for example.
+ * Only one needs to drive the recovery
+ */
+ if (mutex_trylock(&afu->guest->recovery_lock)) {
+ rc = afu_update_state(afu);
+ mutex_unlock(&afu->guest->recovery_lock);
+ return rc;
+ }
+ return 0;
+}
+
+static bool guest_link_ok(struct cxl *cxl, struct cxl_afu *afu)
+{
+ int state;
+
+ if (afu) {
+ if (afu_read_error_state(afu, &state) ||
+ state != H_STATE_NORMAL) {
+ if (afu_do_recovery(afu) > 0) {
+ /* check again in case we've just fixed it */
+ if (!afu_read_error_state(afu, &state) &&
+ state == H_STATE_NORMAL)
+ return true;
+ }
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static int afu_properties_look_ok(struct cxl_afu *afu)
+{
+ if (afu->pp_irqs < 0) {
+ dev_err(&afu->dev, "Unexpected per-process minimum interrupt value\n");
+ return -EINVAL;
+ }
+
+ if (afu->max_procs_virtualised < 1) {
+ dev_err(&afu->dev, "Unexpected max number of processes virtualised value\n");
+ return -EINVAL;
+ }
+
+ if (afu->crs_len < 0) {
+ dev_err(&afu->dev, "Unexpected configuration record size value\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int cxl_guest_init_afu(struct cxl *adapter, int slice, struct device_node *afu_np)
+{
+ struct cxl_afu *afu;
+ bool free = true;
+ int rc;
+
+ pr_devel("in %s - AFU(%d)\n", __func__, slice);
+ if (!(afu = cxl_alloc_afu(adapter, slice)))
+ return -ENOMEM;
+
+ if (!(afu->guest = kzalloc(sizeof(struct cxl_afu_guest), GFP_KERNEL))) {
+ kfree(afu);
+ return -ENOMEM;
+ }
+
+ mutex_init(&afu->guest->recovery_lock);
+
+ if ((rc = dev_set_name(&afu->dev, "afu%i.%i",
+ adapter->adapter_num,
+ slice)))
+ goto err1;
+
+ adapter->slices++;
+
+ if ((rc = cxl_of_read_afu_handle(afu, afu_np)))
+ goto err1;
+
+ if ((rc = cxl_ops->afu_reset(afu)))
+ goto err1;
+
+ if ((rc = cxl_of_read_afu_properties(afu, afu_np)))
+ goto err1;
+
+ if ((rc = afu_properties_look_ok(afu)))
+ goto err1;
+
+ if ((rc = guest_map_slice_regs(afu)))
+ goto err1;
+
+ if ((rc = guest_register_serr_irq(afu)))
+ goto err2;
+
+ /*
+ * After we call this function we must not free the afu directly, even
+ * if it returns an error!
+ */
+ if ((rc = cxl_register_afu(afu)))
+ goto err_put1;
+
+ if ((rc = cxl_sysfs_afu_add(afu)))
+ goto err_put1;
+
+ /*
+ * pHyp doesn't expose the programming models supported by the
+ * AFU. pHyp currently only supports directed mode. If it adds
+ * dedicated mode later, this version of cxl has no way to
+ * detect it. So we'll initialize the driver, but the first
+ * attach will fail.
+ * Being discussed with pHyp to do better (likely new property)
+ */
+ if (afu->max_procs_virtualised == 1)
+ afu->modes_supported = CXL_MODE_DEDICATED;
+ else
+ afu->modes_supported = CXL_MODE_DIRECTED;
+
+ if ((rc = cxl_afu_select_best_mode(afu)))
+ goto err_put2;
+
+ adapter->afu[afu->slice] = afu;
+
+ afu->enabled = true;
+
+ if ((rc = cxl_pci_vphb_add(afu)))
+ dev_info(&afu->dev, "Can't register vPHB\n");
+
+ return 0;
+
+err_put2:
+ cxl_sysfs_afu_remove(afu);
+err_put1:
+ device_unregister(&afu->dev);
+ free = false;
+ guest_release_serr_irq(afu);
+err2:
+ guest_unmap_slice_regs(afu);
+err1:
+ if (free) {
+ kfree(afu->guest);
+ kfree(afu);
+ }
+ return rc;
+}
+
+void cxl_guest_remove_afu(struct cxl_afu *afu)
+{
+ pr_devel("in %s - AFU(%d)\n", __func__, afu->slice);
+
+ if (!afu)
+ return;
+
+ cxl_pci_vphb_remove(afu);
+ cxl_sysfs_afu_remove(afu);
+
+ spin_lock(&afu->adapter->afu_list_lock);
+ afu->adapter->afu[afu->slice] = NULL;
+ spin_unlock(&afu->adapter->afu_list_lock);
+
+ cxl_context_detach_all(afu);
+ cxl_ops->afu_deactivate_mode(afu, afu->current_mode);
+ guest_release_serr_irq(afu);
+ guest_unmap_slice_regs(afu);
+
+ device_unregister(&afu->dev);
+}
+
+static void free_adapter(struct cxl *adapter)
+{
+ struct irq_avail *cur;
+ int i;
+
+ if (adapter->guest->irq_avail) {
+ for (i = 0; i < adapter->guest->irq_nranges; i++) {
+ cur = &adapter->guest->irq_avail[i];
+ kfree(cur->bitmap);
+ }
+ kfree(adapter->guest->irq_avail);
+ }
+ kfree(adapter->guest->status);
+ cxl_remove_adapter_nr(adapter);
+ kfree(adapter->guest);
+ kfree(adapter);
+}
+
+static int properties_look_ok(struct cxl *adapter)
+{
+ /* The absence of this property means that the operational
+ * status is unknown or okay
+ */
+ if (strlen(adapter->guest->status) &&
+ strcmp(adapter->guest->status, "okay")) {
+ pr_err("ABORTING:Bad operational status of the device\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+ssize_t cxl_guest_read_adapter_vpd(struct cxl *adapter, void *buf, size_t len)
+{
+ return guest_collect_vpd(adapter, NULL, buf, len);
+}
+
+void cxl_guest_remove_adapter(struct cxl *adapter)
+{
+ pr_devel("in %s\n", __func__);
+
+ cxl_sysfs_adapter_remove(adapter);
+
+ cxl_guest_remove_chardev(adapter);
+ device_unregister(&adapter->dev);
+}
+
+static void release_adapter(struct device *dev)
+{
+ free_adapter(to_cxl_adapter(dev));
+}
+
+struct cxl *cxl_guest_init_adapter(struct device_node *np, struct platform_device *pdev)
+{
+ struct cxl *adapter;
+ bool free = true;
+ int rc;
+
+ if (!(adapter = cxl_alloc_adapter()))
+ return ERR_PTR(-ENOMEM);
+
+ if (!(adapter->guest = kzalloc(sizeof(struct cxl_guest), GFP_KERNEL))) {
+ free_adapter(adapter);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ adapter->slices = 0;
+ adapter->guest->pdev = pdev;
+ adapter->dev.parent = &pdev->dev;
+ adapter->dev.release = release_adapter;
+ dev_set_drvdata(&pdev->dev, adapter);
+
+ if ((rc = cxl_of_read_adapter_handle(adapter, np)))
+ goto err1;
+
+ if ((rc = cxl_of_read_adapter_properties(adapter, np)))
+ goto err1;
+
+ if ((rc = properties_look_ok(adapter)))
+ goto err1;
+
+ if ((rc = cxl_guest_add_chardev(adapter)))
+ goto err1;
+
+ /*
+ * After we call this function we must not free the adapter directly,
+ * even if it returns an error!
+ */
+ if ((rc = cxl_register_adapter(adapter)))
+ goto err_put1;
+
+ if ((rc = cxl_sysfs_adapter_add(adapter)))
+ goto err_put1;
+
+ return adapter;
+
+err_put1:
+ device_unregister(&adapter->dev);
+ free = false;
+ cxl_guest_remove_chardev(adapter);
+err1:
+ if (free)
+ free_adapter(adapter);
+ return ERR_PTR(rc);
+}
+
+void cxl_guest_reload_module(struct cxl *adapter)
+{
+ struct platform_device *pdev;
+
+ pdev = adapter->guest->pdev;
+ cxl_guest_remove_adapter(adapter);
+
+ cxl_of_probe(pdev);
+}
+
+const struct cxl_backend_ops cxl_guest_ops = {
+ .module = THIS_MODULE,
+ .adapter_reset = guest_reset,
+ .alloc_one_irq = guest_alloc_one_irq,
+ .release_one_irq = guest_release_one_irq,
+ .alloc_irq_ranges = guest_alloc_irq_ranges,
+ .release_irq_ranges = guest_release_irq_ranges,
+ .setup_irq = NULL,
+ .handle_psl_slice_error = guest_handle_psl_slice_error,
+ .psl_interrupt = guest_psl_irq,
+ .ack_irq = guest_ack_irq,
+ .attach_process = guest_attach_process,
+ .detach_process = guest_detach_process,
+ .support_attributes = guest_support_attributes,
+ .link_ok = guest_link_ok,
+ .release_afu = guest_release_afu,
+ .afu_read_err_buffer = guest_afu_read_err_buffer,
+ .afu_check_and_enable = guest_afu_check_and_enable,
+ .afu_activate_mode = guest_afu_activate_mode,
+ .afu_deactivate_mode = guest_afu_deactivate_mode,
+ .afu_reset = guest_afu_reset,
+ .afu_cr_read8 = guest_afu_cr_read8,
+ .afu_cr_read16 = guest_afu_cr_read16,
+ .afu_cr_read32 = guest_afu_cr_read32,
+ .afu_cr_read64 = guest_afu_cr_read64,
+ .afu_cr_write8 = guest_afu_cr_write8,
+ .afu_cr_write16 = guest_afu_cr_write16,
+ .afu_cr_write32 = guest_afu_cr_write32,
+ .read_adapter_vpd = cxl_guest_read_adapter_vpd,
+};
diff --git a/drivers/misc/cxl/hcalls.c b/drivers/misc/cxl/hcalls.c
new file mode 100644
index 000000000000..d6d11f4056d7
--- /dev/null
+++ b/drivers/misc/cxl/hcalls.c
@@ -0,0 +1,647 @@
+/*
+ * Copyright 2015 IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+
+#include <linux/compiler.h>
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <asm/byteorder.h>
+#include "hcalls.h"
+#include "trace.h"
+
+#define CXL_HCALL_TIMEOUT 60000
+#define CXL_HCALL_TIMEOUT_DOWNLOAD 120000
+
+#define H_ATTACH_CA_PROCESS 0x344
+#define H_CONTROL_CA_FUNCTION 0x348
+#define H_DETACH_CA_PROCESS 0x34C
+#define H_COLLECT_CA_INT_INFO 0x350
+#define H_CONTROL_CA_FAULTS 0x354
+#define H_DOWNLOAD_CA_FUNCTION 0x35C
+#define H_DOWNLOAD_CA_FACILITY 0x364
+#define H_CONTROL_CA_FACILITY 0x368
+
+#define H_CONTROL_CA_FUNCTION_RESET 1 /* perform a reset */
+#define H_CONTROL_CA_FUNCTION_SUSPEND_PROCESS 2 /* suspend a process from being executed */
+#define H_CONTROL_CA_FUNCTION_RESUME_PROCESS 3 /* resume a process to be executed */
+#define H_CONTROL_CA_FUNCTION_READ_ERR_STATE 4 /* read the error state */
+#define H_CONTROL_CA_FUNCTION_GET_AFU_ERR 5 /* collect the AFU error buffer */
+#define H_CONTROL_CA_FUNCTION_GET_CONFIG 6 /* collect configuration record */
+#define H_CONTROL_CA_FUNCTION_GET_DOWNLOAD_STATE 7 /* query to return download status */
+#define H_CONTROL_CA_FUNCTION_TERMINATE_PROCESS 8 /* terminate the process before completion */
+#define H_CONTROL_CA_FUNCTION_COLLECT_VPD 9 /* collect VPD */
+#define H_CONTROL_CA_FUNCTION_GET_FUNCTION_ERR_INT 11 /* read the function-wide error data based on an interrupt */
+#define H_CONTROL_CA_FUNCTION_ACK_FUNCTION_ERR_INT 12 /* acknowledge function-wide error data based on an interrupt */
+#define H_CONTROL_CA_FUNCTION_GET_ERROR_LOG 13 /* retrieve the Platform Log ID (PLID) of an error log */
+
+#define H_CONTROL_CA_FAULTS_RESPOND_PSL 1
+#define H_CONTROL_CA_FAULTS_RESPOND_AFU 2
+
+#define H_CONTROL_CA_FACILITY_RESET 1 /* perform a reset */
+#define H_CONTROL_CA_FACILITY_COLLECT_VPD 2 /* collect VPD */
+
+#define H_DOWNLOAD_CA_FACILITY_DOWNLOAD 1 /* download adapter image */
+#define H_DOWNLOAD_CA_FACILITY_VALIDATE 2 /* validate adapter image */
+
+
+#define _CXL_LOOP_HCALL(call, rc, retbuf, fn, ...) \
+ { \
+ unsigned int delay, total_delay = 0; \
+ u64 token = 0; \
+ \
+ memset(retbuf, 0, sizeof(retbuf)); \
+ while (1) { \
+ rc = call(fn, retbuf, __VA_ARGS__, token); \
+ token = retbuf[0]; \
+ if (rc != H_BUSY && !H_IS_LONG_BUSY(rc)) \
+ break; \
+ \
+ if (rc == H_BUSY) \
+ delay = 10; \
+ else \
+ delay = get_longbusy_msecs(rc); \
+ \
+ total_delay += delay; \
+ if (total_delay > CXL_HCALL_TIMEOUT) { \
+ WARN(1, "Warning: Giving up waiting for CXL hcall " \
+ "%#x after %u msec\n", fn, total_delay); \
+ rc = H_BUSY; \
+ break; \
+ } \
+ msleep(delay); \
+ } \
+ }
+#define CXL_H_WAIT_UNTIL_DONE(...) _CXL_LOOP_HCALL(plpar_hcall, __VA_ARGS__)
+#define CXL_H9_WAIT_UNTIL_DONE(...) _CXL_LOOP_HCALL(plpar_hcall9, __VA_ARGS__)
+
+#define _PRINT_MSG(rc, format, ...) \
+ { \
+ if ((rc != H_SUCCESS) && (rc != H_CONTINUE)) \
+ pr_err(format, __VA_ARGS__); \
+ else \
+ pr_devel(format, __VA_ARGS__); \
+ } \
+
+
+static char *afu_op_names[] = {
+ "UNKNOWN_OP", /* 0 undefined */
+ "RESET", /* 1 */
+ "SUSPEND_PROCESS", /* 2 */
+ "RESUME_PROCESS", /* 3 */
+ "READ_ERR_STATE", /* 4 */
+ "GET_AFU_ERR", /* 5 */
+ "GET_CONFIG", /* 6 */
+ "GET_DOWNLOAD_STATE", /* 7 */
+ "TERMINATE_PROCESS", /* 8 */
+ "COLLECT_VPD", /* 9 */
+ "UNKNOWN_OP", /* 10 undefined */
+ "GET_FUNCTION_ERR_INT", /* 11 */
+ "ACK_FUNCTION_ERR_INT", /* 12 */
+ "GET_ERROR_LOG", /* 13 */
+};
+
+static char *control_adapter_op_names[] = {
+ "UNKNOWN_OP", /* 0 undefined */
+ "RESET", /* 1 */
+ "COLLECT_VPD", /* 2 */
+};
+
+static char *download_op_names[] = {
+ "UNKNOWN_OP", /* 0 undefined */
+ "DOWNLOAD", /* 1 */
+ "VALIDATE", /* 2 */
+};
+
+static char *op_str(unsigned int op, char *name_array[], int array_len)
+{
+ if (op >= array_len)
+ return "UNKNOWN_OP";
+ return name_array[op];
+}
+
+#define OP_STR(op, name_array) op_str(op, name_array, ARRAY_SIZE(name_array))
+
+#define OP_STR_AFU(op) OP_STR(op, afu_op_names)
+#define OP_STR_CONTROL_ADAPTER(op) OP_STR(op, control_adapter_op_names)
+#define OP_STR_DOWNLOAD_ADAPTER(op) OP_STR(op, download_op_names)
+
+
+long cxl_h_attach_process(u64 unit_address,
+ struct cxl_process_element_hcall *element,
+ u64 *process_token, u64 *mmio_addr, u64 *mmio_size)
+{
+ unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
+ long rc;
+
+ CXL_H_WAIT_UNTIL_DONE(rc, retbuf, H_ATTACH_CA_PROCESS, unit_address, virt_to_phys(element));
+ _PRINT_MSG(rc, "cxl_h_attach_process(%#.16llx, %#.16lx): %li\n",
+ unit_address, virt_to_phys(element), rc);
+ trace_cxl_hcall_attach(unit_address, virt_to_phys(element), retbuf[0], retbuf[1], retbuf[2], rc);
+
+ pr_devel("token: 0x%.8lx mmio_addr: 0x%lx mmio_size: 0x%lx\nProcess Element Structure:\n",
+ retbuf[0], retbuf[1], retbuf[2]);
+ cxl_dump_debug_buffer(element, sizeof(*element));
+
+ switch (rc) {
+ case H_SUCCESS: /* The process info is attached to the coherent platform function */
+ *process_token = retbuf[0];
+ if (mmio_addr)
+ *mmio_addr = retbuf[1];
+ if (mmio_size)
+ *mmio_size = retbuf[2];
+ return 0;
+ case H_PARAMETER: /* An incorrect parameter was supplied. */
+ case H_FUNCTION: /* The function is not supported. */
+ return -EINVAL;
+ case H_AUTHORITY: /* The partition does not have authority to perform this hcall */
+ case H_RESOURCE: /* The coherent platform function does not have enough additional resource to attach the process */
+ case H_HARDWARE: /* A hardware event prevented the attach operation */
+ case H_STATE: /* The coherent platform function is not in a valid state */
+ case H_BUSY:
+ return -EBUSY;
+ default:
+ WARN(1, "Unexpected return code: %lx", rc);
+ return -EINVAL;
+ }
+}
+
+/**
+ * cxl_h_detach_process - Detach a process element from a coherent
+ * platform function.
+ */
+long cxl_h_detach_process(u64 unit_address, u64 process_token)
+{
+ unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
+ long rc;
+
+ CXL_H_WAIT_UNTIL_DONE(rc, retbuf, H_DETACH_CA_PROCESS, unit_address, process_token);
+ _PRINT_MSG(rc, "cxl_h_detach_process(%#.16llx, 0x%.8llx): %li\n", unit_address, process_token, rc);
+ trace_cxl_hcall_detach(unit_address, process_token, rc);
+
+ switch (rc) {
+ case H_SUCCESS: /* The process was detached from the coherent platform function */
+ return 0;
+ case H_PARAMETER: /* An incorrect parameter was supplied. */
+ return -EINVAL;
+ case H_AUTHORITY: /* The partition does not have authority to perform this hcall */
+ case H_RESOURCE: /* The function has page table mappings for MMIO */
+ case H_HARDWARE: /* A hardware event prevented the detach operation */
+ case H_STATE: /* The coherent platform function is not in a valid state */
+ case H_BUSY:
+ return -EBUSY;
+ default:
+ WARN(1, "Unexpected return code: %lx", rc);
+ return -EINVAL;
+ }
+}
+
+/**
+ * cxl_h_control_function - This H_CONTROL_CA_FUNCTION hypervisor call allows
+ * the partition to manipulate or query
+ * certain coherent platform function behaviors.
+ */
+static long cxl_h_control_function(u64 unit_address, u64 op,
+ u64 p1, u64 p2, u64 p3, u64 p4, u64 *out)
+{
+ unsigned long retbuf[PLPAR_HCALL9_BUFSIZE];
+ long rc;
+
+ CXL_H9_WAIT_UNTIL_DONE(rc, retbuf, H_CONTROL_CA_FUNCTION, unit_address, op, p1, p2, p3, p4);
+ _PRINT_MSG(rc, "cxl_h_control_function(%#.16llx, %s(%#llx, %#llx, %#llx, %#llx, R4: %#lx)): %li\n",
+ unit_address, OP_STR_AFU(op), p1, p2, p3, p4, retbuf[0], rc);
+ trace_cxl_hcall_control_function(unit_address, OP_STR_AFU(op), p1, p2, p3, p4, retbuf[0], rc);
+
+ switch (rc) {
+ case H_SUCCESS: /* The operation is completed for the coherent platform function */
+ if ((op == H_CONTROL_CA_FUNCTION_GET_FUNCTION_ERR_INT ||
+ op == H_CONTROL_CA_FUNCTION_READ_ERR_STATE ||
+ op == H_CONTROL_CA_FUNCTION_COLLECT_VPD))
+ *out = retbuf[0];
+ return 0;
+ case H_PARAMETER: /* An incorrect parameter was supplied. */
+ case H_FUNCTION: /* The function is not supported. */
+ case H_NOT_FOUND: /* The operation supplied was not valid */
+ case H_NOT_AVAILABLE: /* The operation cannot be performed because the AFU has not been downloaded */
+ case H_SG_LIST: /* An block list entry was invalid */
+ return -EINVAL;
+ case H_AUTHORITY: /* The partition does not have authority to perform this hcall */
+ case H_RESOURCE: /* The function has page table mappings for MMIO */
+ case H_HARDWARE: /* A hardware event prevented the attach operation */
+ case H_STATE: /* The coherent platform function is not in a valid state */
+ case H_BUSY:
+ return -EBUSY;
+ default:
+ WARN(1, "Unexpected return code: %lx", rc);
+ return -EINVAL;
+ }
+}
+
+/**
+ * cxl_h_reset_afu - Perform a reset to the coherent platform function.
+ */
+long cxl_h_reset_afu(u64 unit_address)
+{
+ return cxl_h_control_function(unit_address,
+ H_CONTROL_CA_FUNCTION_RESET,
+ 0, 0, 0, 0,
+ NULL);
+}
+
+/**
+ * cxl_h_suspend_process - Suspend a process from being executed
+ * Parameter1 = process-token as returned from H_ATTACH_CA_PROCESS when
+ * process was attached.
+ */
+long cxl_h_suspend_process(u64 unit_address, u64 process_token)
+{
+ return cxl_h_control_function(unit_address,
+ H_CONTROL_CA_FUNCTION_SUSPEND_PROCESS,
+ process_token, 0, 0, 0,
+ NULL);
+}
+
+/**
+ * cxl_h_resume_process - Resume a process to be executed
+ * Parameter1 = process-token as returned from H_ATTACH_CA_PROCESS when
+ * process was attached.
+ */
+long cxl_h_resume_process(u64 unit_address, u64 process_token)
+{
+ return cxl_h_control_function(unit_address,
+ H_CONTROL_CA_FUNCTION_RESUME_PROCESS,
+ process_token, 0, 0, 0,
+ NULL);
+}
+
+/**
+ * cxl_h_read_error_state - Checks the error state of the coherent
+ * platform function.
+ * R4 contains the error state
+ */
+long cxl_h_read_error_state(u64 unit_address, u64 *state)
+{
+ return cxl_h_control_function(unit_address,
+ H_CONTROL_CA_FUNCTION_READ_ERR_STATE,
+ 0, 0, 0, 0,
+ state);
+}
+
+/**
+ * cxl_h_get_afu_err - collect the AFU error buffer
+ * Parameter1 = byte offset into error buffer to retrieve, valid values
+ * are between 0 and (ibm,error-buffer-size - 1)
+ * Parameter2 = 4K aligned real address of error buffer, to be filled in
+ * Parameter3 = length of error buffer, valid values are 4K or less
+ */
+long cxl_h_get_afu_err(u64 unit_address, u64 offset,
+ u64 buf_address, u64 len)
+{
+ return cxl_h_control_function(unit_address,
+ H_CONTROL_CA_FUNCTION_GET_AFU_ERR,
+ offset, buf_address, len, 0,
+ NULL);
+}
+
+/**
+ * cxl_h_get_config - collect configuration record for the
+ * coherent platform function
+ * Parameter1 = # of configuration record to retrieve, valid values are
+ * between 0 and (ibm,#config-records - 1)
+ * Parameter2 = byte offset into configuration record to retrieve,
+ * valid values are between 0 and (ibm,config-record-size - 1)
+ * Parameter3 = 4K aligned real address of configuration record buffer,
+ * to be filled in
+ * Parameter4 = length of configuration buffer, valid values are 4K or less
+ */
+long cxl_h_get_config(u64 unit_address, u64 cr_num, u64 offset,
+ u64 buf_address, u64 len)
+{
+ return cxl_h_control_function(unit_address,
+ H_CONTROL_CA_FUNCTION_GET_CONFIG,
+ cr_num, offset, buf_address, len,
+ NULL);
+}
+
+/**
+ * cxl_h_terminate_process - Terminate the process before completion
+ * Parameter1 = process-token as returned from H_ATTACH_CA_PROCESS when
+ * process was attached.
+ */
+long cxl_h_terminate_process(u64 unit_address, u64 process_token)
+{
+ return cxl_h_control_function(unit_address,
+ H_CONTROL_CA_FUNCTION_TERMINATE_PROCESS,
+ process_token, 0, 0, 0,
+ NULL);
+}
+
+/**
+ * cxl_h_collect_vpd - Collect VPD for the coherent platform function.
+ * Parameter1 = # of VPD record to retrieve, valid values are between 0
+ * and (ibm,#config-records - 1).
+ * Parameter2 = 4K naturally aligned real buffer containing block
+ * list entries
+ * Parameter3 = number of block list entries in the block list, valid
+ * values are between 0 and 256
+ */
+long cxl_h_collect_vpd(u64 unit_address, u64 record, u64 list_address,
+ u64 num, u64 *out)
+{
+ return cxl_h_control_function(unit_address,
+ H_CONTROL_CA_FUNCTION_COLLECT_VPD,
+ record, list_address, num, 0,
+ out);
+}
+
+/**
+ * cxl_h_get_fn_error_interrupt - Read the function-wide error data based on an interrupt
+ */
+long cxl_h_get_fn_error_interrupt(u64 unit_address, u64 *reg)
+{
+ return cxl_h_control_function(unit_address,
+ H_CONTROL_CA_FUNCTION_GET_FUNCTION_ERR_INT,
+ 0, 0, 0, 0, reg);
+}
+
+/**
+ * cxl_h_ack_fn_error_interrupt - Acknowledge function-wide error data
+ * based on an interrupt
+ * Parameter1 = value to write to the function-wide error interrupt register
+ */
+long cxl_h_ack_fn_error_interrupt(u64 unit_address, u64 value)
+{
+ return cxl_h_control_function(unit_address,
+ H_CONTROL_CA_FUNCTION_ACK_FUNCTION_ERR_INT,
+ value, 0, 0, 0,
+ NULL);
+}
+
+/**
+ * cxl_h_get_error_log - Retrieve the Platform Log ID (PLID) of
+ * an error log
+ */
+long cxl_h_get_error_log(u64 unit_address, u64 value)
+{
+ return cxl_h_control_function(unit_address,
+ H_CONTROL_CA_FUNCTION_GET_ERROR_LOG,
+ 0, 0, 0, 0,
+ NULL);
+}
+
+/**
+ * cxl_h_collect_int_info - Collect interrupt info about a coherent
+ * platform function after an interrupt occurred.
+ */
+long cxl_h_collect_int_info(u64 unit_address, u64 process_token,
+ struct cxl_irq_info *info)
+{
+ long rc;
+
+ BUG_ON(sizeof(*info) != sizeof(unsigned long[PLPAR_HCALL9_BUFSIZE]));
+
+ rc = plpar_hcall9(H_COLLECT_CA_INT_INFO, (unsigned long *) info,
+ unit_address, process_token);
+ _PRINT_MSG(rc, "cxl_h_collect_int_info(%#.16llx, 0x%llx): %li\n",
+ unit_address, process_token, rc);
+ trace_cxl_hcall_collect_int_info(unit_address, process_token, rc);
+
+ switch (rc) {
+ case H_SUCCESS: /* The interrupt info is returned in return registers. */
+ pr_devel("dsisr:%#llx, dar:%#llx, dsr:%#llx, pid:%u, tid:%u, afu_err:%#llx, errstat:%#llx\n",
+ info->dsisr, info->dar, info->dsr, info->pid,
+ info->tid, info->afu_err, info->errstat);
+ return 0;
+ case H_PARAMETER: /* An incorrect parameter was supplied. */
+ return -EINVAL;
+ case H_AUTHORITY: /* The partition does not have authority to perform this hcall. */
+ case H_HARDWARE: /* A hardware event prevented the collection of the interrupt info.*/
+ case H_STATE: /* The coherent platform function is not in a valid state to collect interrupt info. */
+ return -EBUSY;
+ default:
+ WARN(1, "Unexpected return code: %lx", rc);
+ return -EINVAL;
+ }
+}
+
+/**
+ * cxl_h_control_faults - Control the operation of a coherent platform
+ * function after a fault occurs.
+ *
+ * Parameters
+ * control-mask: value to control the faults
+ * looks like PSL_TFC_An shifted >> 32
+ * reset-mask: mask to control reset of function faults
+ * Set reset_mask = 1 to reset PSL errors
+ */
+long cxl_h_control_faults(u64 unit_address, u64 process_token,
+ u64 control_mask, u64 reset_mask)
+{
+ unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
+ long rc;
+
+ memset(retbuf, 0, sizeof(retbuf));
+
+ rc = plpar_hcall(H_CONTROL_CA_FAULTS, retbuf, unit_address,
+ H_CONTROL_CA_FAULTS_RESPOND_PSL, process_token,
+ control_mask, reset_mask);
+ _PRINT_MSG(rc, "cxl_h_control_faults(%#.16llx, 0x%llx, %#llx, %#llx): %li (%#lx)\n",
+ unit_address, process_token, control_mask, reset_mask,
+ rc, retbuf[0]);
+ trace_cxl_hcall_control_faults(unit_address, process_token,
+ control_mask, reset_mask, retbuf[0], rc);
+
+ switch (rc) {
+ case H_SUCCESS: /* Faults were successfully controlled for the function. */
+ return 0;
+ case H_PARAMETER: /* An incorrect parameter was supplied. */
+ return -EINVAL;
+ case H_HARDWARE: /* A hardware event prevented the control of faults. */
+ case H_STATE: /* The function was in an invalid state. */
+ case H_AUTHORITY: /* The partition does not have authority to perform this hcall; the coherent platform facilities may need to be licensed. */
+ return -EBUSY;
+ case H_FUNCTION: /* The function is not supported */
+ case H_NOT_FOUND: /* The operation supplied was not valid */
+ return -EINVAL;
+ default:
+ WARN(1, "Unexpected return code: %lx", rc);
+ return -EINVAL;
+ }
+}
+
+/**
+ * cxl_h_control_facility - This H_CONTROL_CA_FACILITY hypervisor call
+ * allows the partition to manipulate or query
+ * certain coherent platform facility behaviors.
+ */
+static long cxl_h_control_facility(u64 unit_address, u64 op,
+ u64 p1, u64 p2, u64 p3, u64 p4, u64 *out)
+{
+ unsigned long retbuf[PLPAR_HCALL9_BUFSIZE];
+ long rc;
+
+ CXL_H9_WAIT_UNTIL_DONE(rc, retbuf, H_CONTROL_CA_FACILITY, unit_address, op, p1, p2, p3, p4);
+ _PRINT_MSG(rc, "cxl_h_control_facility(%#.16llx, %s(%#llx, %#llx, %#llx, %#llx, R4: %#lx)): %li\n",
+ unit_address, OP_STR_CONTROL_ADAPTER(op), p1, p2, p3, p4, retbuf[0], rc);
+ trace_cxl_hcall_control_facility(unit_address, OP_STR_CONTROL_ADAPTER(op), p1, p2, p3, p4, retbuf[0], rc);
+
+ switch (rc) {
+ case H_SUCCESS: /* The operation is completed for the coherent platform facility */
+ if (op == H_CONTROL_CA_FACILITY_COLLECT_VPD)
+ *out = retbuf[0];
+ return 0;
+ case H_PARAMETER: /* An incorrect parameter was supplied. */
+ case H_FUNCTION: /* The function is not supported. */
+ case H_NOT_FOUND: /* The operation supplied was not valid */
+ case H_NOT_AVAILABLE: /* The operation cannot be performed because the AFU has not been downloaded */
+ case H_SG_LIST: /* An block list entry was invalid */
+ return -EINVAL;
+ case H_AUTHORITY: /* The partition does not have authority to perform this hcall */
+ case H_RESOURCE: /* The function has page table mappings for MMIO */
+ case H_HARDWARE: /* A hardware event prevented the attach operation */
+ case H_STATE: /* The coherent platform facility is not in a valid state */
+ case H_BUSY:
+ return -EBUSY;
+ default:
+ WARN(1, "Unexpected return code: %lx", rc);
+ return -EINVAL;
+ }
+}
+
+/**
+ * cxl_h_reset_adapter - Perform a reset to the coherent platform facility.
+ */
+long cxl_h_reset_adapter(u64 unit_address)
+{
+ return cxl_h_control_facility(unit_address,
+ H_CONTROL_CA_FACILITY_RESET,
+ 0, 0, 0, 0,
+ NULL);
+}
+
+/**
+ * cxl_h_collect_vpd - Collect VPD for the coherent platform function.
+ * Parameter1 = 4K naturally aligned real buffer containing block
+ * list entries
+ * Parameter2 = number of block list entries in the block list, valid
+ * values are between 0 and 256
+ */
+long cxl_h_collect_vpd_adapter(u64 unit_address, u64 list_address,
+ u64 num, u64 *out)
+{
+ return cxl_h_control_facility(unit_address,
+ H_CONTROL_CA_FACILITY_COLLECT_VPD,
+ list_address, num, 0, 0,
+ out);
+}
+
+/**
+ * cxl_h_download_facility - This H_DOWNLOAD_CA_FACILITY
+ * hypervisor call provide platform support for
+ * downloading a base adapter image to the coherent
+ * platform facility, and for validating the entire
+ * image after the download.
+ * Parameters
+ * op: operation to perform to the coherent platform function
+ * Download: operation = 1, the base image in the coherent platform
+ * facility is first erased, and then
+ * programmed using the image supplied
+ * in the scatter/gather list.
+ * Validate: operation = 2, the base image in the coherent platform
+ * facility is compared with the image
+ * supplied in the scatter/gather list.
+ * list_address: 4K naturally aligned real buffer containing
+ * scatter/gather list entries.
+ * num: number of block list entries in the scatter/gather list.
+ */
+static long cxl_h_download_facility(u64 unit_address, u64 op,
+ u64 list_address, u64 num,
+ u64 *out)
+{
+ unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
+ unsigned int delay, total_delay = 0;
+ u64 token = 0;
+ long rc;
+
+ if (*out != 0)
+ token = *out;
+
+ memset(retbuf, 0, sizeof(retbuf));
+ while (1) {
+ rc = plpar_hcall(H_DOWNLOAD_CA_FACILITY, retbuf,
+ unit_address, op, list_address, num,
+ token);
+ token = retbuf[0];
+ if (rc != H_BUSY && !H_IS_LONG_BUSY(rc))
+ break;
+
+ if (rc != H_BUSY) {
+ delay = get_longbusy_msecs(rc);
+ total_delay += delay;
+ if (total_delay > CXL_HCALL_TIMEOUT_DOWNLOAD) {
+ WARN(1, "Warning: Giving up waiting for CXL hcall "
+ "%#x after %u msec\n",
+ H_DOWNLOAD_CA_FACILITY, total_delay);
+ rc = H_BUSY;
+ break;
+ }
+ msleep(delay);
+ }
+ }
+ _PRINT_MSG(rc, "cxl_h_download_facility(%#.16llx, %s(%#llx, %#llx), %#lx): %li\n",
+ unit_address, OP_STR_DOWNLOAD_ADAPTER(op), list_address, num, retbuf[0], rc);
+ trace_cxl_hcall_download_facility(unit_address, OP_STR_DOWNLOAD_ADAPTER(op), list_address, num, retbuf[0], rc);
+
+ switch (rc) {
+ case H_SUCCESS: /* The operation is completed for the coherent platform facility */
+ return 0;
+ case H_PARAMETER: /* An incorrect parameter was supplied */
+ case H_FUNCTION: /* The function is not supported. */
+ case H_SG_LIST: /* An block list entry was invalid */
+ case H_BAD_DATA: /* Image verification failed */
+ return -EINVAL;
+ case H_AUTHORITY: /* The partition does not have authority to perform this hcall */
+ case H_RESOURCE: /* The function has page table mappings for MMIO */
+ case H_HARDWARE: /* A hardware event prevented the attach operation */
+ case H_STATE: /* The coherent platform facility is not in a valid state */
+ case H_BUSY:
+ return -EBUSY;
+ case H_CONTINUE:
+ *out = retbuf[0];
+ return 1; /* More data is needed for the complete image */
+ default:
+ WARN(1, "Unexpected return code: %lx", rc);
+ return -EINVAL;
+ }
+}
+
+/**
+ * cxl_h_download_adapter_image - Download the base image to the coherent
+ * platform facility.
+ */
+long cxl_h_download_adapter_image(u64 unit_address,
+ u64 list_address, u64 num,
+ u64 *out)
+{
+ return cxl_h_download_facility(unit_address,
+ H_DOWNLOAD_CA_FACILITY_DOWNLOAD,
+ list_address, num, out);
+}
+
+/**
+ * cxl_h_validate_adapter_image - Validate the base image in the coherent
+ * platform facility.
+ */
+long cxl_h_validate_adapter_image(u64 unit_address,
+ u64 list_address, u64 num,
+ u64 *out)
+{
+ return cxl_h_download_facility(unit_address,
+ H_DOWNLOAD_CA_FACILITY_VALIDATE,
+ list_address, num, out);
+}
diff --git a/drivers/misc/cxl/hcalls.h b/drivers/misc/cxl/hcalls.h
new file mode 100644
index 000000000000..3e25522a5df6
--- /dev/null
+++ b/drivers/misc/cxl/hcalls.h
@@ -0,0 +1,204 @@
+/*
+ * Copyright 2015 IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _HCALLS_H
+#define _HCALLS_H
+
+#include <linux/types.h>
+#include <asm/byteorder.h>
+#include <asm/hvcall.h>
+#include "cxl.h"
+
+#define SG_BUFFER_SIZE 4096
+#define SG_MAX_ENTRIES 256
+
+struct sg_list {
+ u64 phys_addr;
+ u64 len;
+};
+
+/*
+ * This is straight out of PAPR, but replacing some of the compound fields with
+ * a single field, where they were identical to the register layout.
+ *
+ * The 'flags' parameter regroups the various bit-fields
+ */
+#define CXL_PE_CSRP_VALID (1ULL << 63)
+#define CXL_PE_PROBLEM_STATE (1ULL << 62)
+#define CXL_PE_SECONDARY_SEGMENT_TBL_SRCH (1ULL << 61)
+#define CXL_PE_TAGS_ACTIVE (1ULL << 60)
+#define CXL_PE_USER_STATE (1ULL << 59)
+#define CXL_PE_TRANSLATION_ENABLED (1ULL << 58)
+#define CXL_PE_64_BIT (1ULL << 57)
+#define CXL_PE_PRIVILEGED_PROCESS (1ULL << 56)
+
+#define CXL_PROCESS_ELEMENT_VERSION 1
+struct cxl_process_element_hcall {
+ __be64 version;
+ __be64 flags;
+ u8 reserved0[12];
+ __be32 pslVirtualIsn;
+ u8 applicationVirtualIsnBitmap[256];
+ u8 reserved1[144];
+ struct cxl_process_element_common common;
+ u8 reserved4[12];
+} __packed;
+
+#define H_STATE_NORMAL 1
+#define H_STATE_DISABLE 2
+#define H_STATE_TEMP_UNAVAILABLE 3
+#define H_STATE_PERM_UNAVAILABLE 4
+
+/* NOTE: element must be a logical real address, and must be pinned */
+long cxl_h_attach_process(u64 unit_address, struct cxl_process_element_hcall *element,
+ u64 *process_token, u64 *mmio_addr, u64 *mmio_size);
+
+/**
+ * cxl_h_detach_process - Detach a process element from a coherent
+ * platform function.
+ */
+long cxl_h_detach_process(u64 unit_address, u64 process_token);
+
+/**
+ * cxl_h_reset_afu - Perform a reset to the coherent platform function.
+ */
+long cxl_h_reset_afu(u64 unit_address);
+
+/**
+ * cxl_h_suspend_process - Suspend a process from being executed
+ * Parameter1 = process-token as returned from H_ATTACH_CA_PROCESS when
+ * process was attached.
+ */
+long cxl_h_suspend_process(u64 unit_address, u64 process_token);
+
+/**
+ * cxl_h_resume_process - Resume a process to be executed
+ * Parameter1 = process-token as returned from H_ATTACH_CA_PROCESS when
+ * process was attached.
+ */
+long cxl_h_resume_process(u64 unit_address, u64 process_token);
+
+/**
+ * cxl_h_read_error_state - Reads the error state of the coherent
+ * platform function.
+ * R4 contains the error state
+ */
+long cxl_h_read_error_state(u64 unit_address, u64 *state);
+
+/**
+ * cxl_h_get_afu_err - collect the AFU error buffer
+ * Parameter1 = byte offset into error buffer to retrieve, valid values
+ * are between 0 and (ibm,error-buffer-size - 1)
+ * Parameter2 = 4K aligned real address of error buffer, to be filled in
+ * Parameter3 = length of error buffer, valid values are 4K or less
+ */
+long cxl_h_get_afu_err(u64 unit_address, u64 offset, u64 buf_address, u64 len);
+
+/**
+ * cxl_h_get_config - collect configuration record for the
+ * coherent platform function
+ * Parameter1 = # of configuration record to retrieve, valid values are
+ * between 0 and (ibm,#config-records - 1)
+ * Parameter2 = byte offset into configuration record to retrieve,
+ * valid values are between 0 and (ibm,config-record-size - 1)
+ * Parameter3 = 4K aligned real address of configuration record buffer,
+ * to be filled in
+ * Parameter4 = length of configuration buffer, valid values are 4K or less
+ */
+long cxl_h_get_config(u64 unit_address, u64 cr_num, u64 offset,
+ u64 buf_address, u64 len);
+
+/**
+ * cxl_h_terminate_process - Terminate the process before completion
+ * Parameter1 = process-token as returned from H_ATTACH_CA_PROCESS when
+ * process was attached.
+ */
+long cxl_h_terminate_process(u64 unit_address, u64 process_token);
+
+/**
+ * cxl_h_collect_vpd - Collect VPD for the coherent platform function.
+ * Parameter1 = # of VPD record to retrieve, valid values are between 0
+ * and (ibm,#config-records - 1).
+ * Parameter2 = 4K naturally aligned real buffer containing block
+ * list entries
+ * Parameter3 = number of block list entries in the block list, valid
+ * values are between 0 and 256
+ */
+long cxl_h_collect_vpd(u64 unit_address, u64 record, u64 list_address,
+ u64 num, u64 *out);
+
+/**
+ * cxl_h_get_fn_error_interrupt - Read the function-wide error data based on an interrupt
+ */
+long cxl_h_get_fn_error_interrupt(u64 unit_address, u64 *reg);
+
+/**
+ * cxl_h_ack_fn_error_interrupt - Acknowledge function-wide error data
+ * based on an interrupt
+ * Parameter1 = value to write to the function-wide error interrupt register
+ */
+long cxl_h_ack_fn_error_interrupt(u64 unit_address, u64 value);
+
+/**
+ * cxl_h_get_error_log - Retrieve the Platform Log ID (PLID) of
+ * an error log
+ */
+long cxl_h_get_error_log(u64 unit_address, u64 value);
+
+/**
+ * cxl_h_collect_int_info - Collect interrupt info about a coherent
+ * platform function after an interrupt occurred.
+ */
+long cxl_h_collect_int_info(u64 unit_address, u64 process_token,
+ struct cxl_irq_info *info);
+
+/**
+ * cxl_h_control_faults - Control the operation of a coherent platform
+ * function after a fault occurs.
+ *
+ * Parameters
+ * control-mask: value to control the faults
+ * looks like PSL_TFC_An shifted >> 32
+ * reset-mask: mask to control reset of function faults
+ * Set reset_mask = 1 to reset PSL errors
+ */
+long cxl_h_control_faults(u64 unit_address, u64 process_token,
+ u64 control_mask, u64 reset_mask);
+
+/**
+ * cxl_h_reset_adapter - Perform a reset to the coherent platform facility.
+ */
+long cxl_h_reset_adapter(u64 unit_address);
+
+/**
+ * cxl_h_collect_vpd - Collect VPD for the coherent platform function.
+ * Parameter1 = 4K naturally aligned real buffer containing block
+ * list entries
+ * Parameter2 = number of block list entries in the block list, valid
+ * values are between 0 and 256
+ */
+long cxl_h_collect_vpd_adapter(u64 unit_address, u64 list_address,
+ u64 num, u64 *out);
+
+/**
+ * cxl_h_download_adapter_image - Download the base image to the coherent
+ * platform facility.
+ */
+long cxl_h_download_adapter_image(u64 unit_address,
+ u64 list_address, u64 num,
+ u64 *out);
+
+/**
+ * cxl_h_validate_adapter_image - Validate the base image in the coherent
+ * platform facility.
+ */
+long cxl_h_validate_adapter_image(u64 unit_address,
+ u64 list_address, u64 num,
+ u64 *out);
+#endif /* _HCALLS_H */
diff --git a/drivers/misc/cxl/irq.c b/drivers/misc/cxl/irq.c
index 09a406058c46..8def4553acba 100644
--- a/drivers/misc/cxl/irq.c
+++ b/drivers/misc/cxl/irq.c
@@ -19,70 +19,11 @@
#include "cxl.h"
#include "trace.h"
-/* XXX: This is implementation specific */
-static irqreturn_t handle_psl_slice_error(struct cxl_context *ctx, u64 dsisr, u64 errstat)
+static int afu_irq_range_start(void)
{
- u64 fir1, fir2, fir_slice, serr, afu_debug;
-
- fir1 = cxl_p1_read(ctx->afu->adapter, CXL_PSL_FIR1);
- fir2 = cxl_p1_read(ctx->afu->adapter, CXL_PSL_FIR2);
- fir_slice = cxl_p1n_read(ctx->afu, CXL_PSL_FIR_SLICE_An);
- serr = cxl_p1n_read(ctx->afu, CXL_PSL_SERR_An);
- afu_debug = cxl_p1n_read(ctx->afu, CXL_AFU_DEBUG_An);
-
- dev_crit(&ctx->afu->dev, "PSL ERROR STATUS: 0x%016llx\n", errstat);
- dev_crit(&ctx->afu->dev, "PSL_FIR1: 0x%016llx\n", fir1);
- dev_crit(&ctx->afu->dev, "PSL_FIR2: 0x%016llx\n", fir2);
- dev_crit(&ctx->afu->dev, "PSL_SERR_An: 0x%016llx\n", serr);
- dev_crit(&ctx->afu->dev, "PSL_FIR_SLICE_An: 0x%016llx\n", fir_slice);
- dev_crit(&ctx->afu->dev, "CXL_PSL_AFU_DEBUG_An: 0x%016llx\n", afu_debug);
-
- dev_crit(&ctx->afu->dev, "STOPPING CXL TRACE\n");
- cxl_stop_trace(ctx->afu->adapter);
-
- return cxl_ack_irq(ctx, 0, errstat);
-}
-
-irqreturn_t cxl_slice_irq_err(int irq, void *data)
-{
- struct cxl_afu *afu = data;
- u64 fir_slice, errstat, serr, afu_debug;
-
- WARN(irq, "CXL SLICE ERROR interrupt %i\n", irq);
-
- serr = cxl_p1n_read(afu, CXL_PSL_SERR_An);
- fir_slice = cxl_p1n_read(afu, CXL_PSL_FIR_SLICE_An);
- errstat = cxl_p2n_read(afu, CXL_PSL_ErrStat_An);
- afu_debug = cxl_p1n_read(afu, CXL_AFU_DEBUG_An);
- dev_crit(&afu->dev, "PSL_SERR_An: 0x%016llx\n", serr);
- dev_crit(&afu->dev, "PSL_FIR_SLICE_An: 0x%016llx\n", fir_slice);
- dev_crit(&afu->dev, "CXL_PSL_ErrStat_An: 0x%016llx\n", errstat);
- dev_crit(&afu->dev, "CXL_PSL_AFU_DEBUG_An: 0x%016llx\n", afu_debug);
-
- cxl_p1n_write(afu, CXL_PSL_SERR_An, serr);
-
- return IRQ_HANDLED;
-}
-
-static irqreturn_t cxl_irq_err(int irq, void *data)
-{
- struct cxl *adapter = data;
- u64 fir1, fir2, err_ivte;
-
- WARN(1, "CXL ERROR interrupt %i\n", irq);
-
- err_ivte = cxl_p1_read(adapter, CXL_PSL_ErrIVTE);
- dev_crit(&adapter->dev, "PSL_ErrIVTE: 0x%016llx\n", err_ivte);
-
- dev_crit(&adapter->dev, "STOPPING CXL TRACE\n");
- cxl_stop_trace(adapter);
-
- fir1 = cxl_p1_read(adapter, CXL_PSL_FIR1);
- fir2 = cxl_p1_read(adapter, CXL_PSL_FIR2);
-
- dev_crit(&adapter->dev, "PSL_FIR1: 0x%016llx\nPSL_FIR2: 0x%016llx\n", fir1, fir2);
-
- return IRQ_HANDLED;
+ if (cpu_has_feature(CPU_FTR_HVMODE))
+ return 1;
+ return 0;
}
static irqreturn_t schedule_cxl_fault(struct cxl_context *ctx, u64 dsisr, u64 dar)
@@ -93,9 +34,8 @@ static irqreturn_t schedule_cxl_fault(struct cxl_context *ctx, u64 dsisr, u64 da
return IRQ_HANDLED;
}
-static irqreturn_t cxl_irq(int irq, void *data, struct cxl_irq_info *irq_info)
+irqreturn_t cxl_irq(int irq, struct cxl_context *ctx, struct cxl_irq_info *irq_info)
{
- struct cxl_context *ctx = data;
u64 dsisr, dar;
dsisr = irq_info->dsisr;
@@ -145,7 +85,8 @@ static irqreturn_t cxl_irq(int irq, void *data, struct cxl_irq_info *irq_info)
if (dsisr & CXL_PSL_DSISR_An_UR)
pr_devel("CXL interrupt: AURP PTE not found\n");
if (dsisr & CXL_PSL_DSISR_An_PE)
- return handle_psl_slice_error(ctx, dsisr, irq_info->errstat);
+ return cxl_ops->handle_psl_slice_error(ctx, dsisr,
+ irq_info->errstat);
if (dsisr & CXL_PSL_DSISR_An_AE) {
pr_devel("CXL interrupt: AFU Error 0x%016llx\n", irq_info->afu_err);
@@ -169,7 +110,7 @@ static irqreturn_t cxl_irq(int irq, void *data, struct cxl_irq_info *irq_info)
wake_up_all(&ctx->wq);
}
- cxl_ack_irq(ctx, CXL_PSL_TFC_An_A, 0);
+ cxl_ops->ack_irq(ctx, CXL_PSL_TFC_An_A, 0);
return IRQ_HANDLED;
}
if (dsisr & CXL_PSL_DSISR_An_OC)
@@ -179,54 +120,27 @@ static irqreturn_t cxl_irq(int irq, void *data, struct cxl_irq_info *irq_info)
return IRQ_HANDLED;
}
-static irqreturn_t fail_psl_irq(struct cxl_afu *afu, struct cxl_irq_info *irq_info)
-{
- if (irq_info->dsisr & CXL_PSL_DSISR_TRANS)
- cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_AE);
- else
- cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_A);
-
- return IRQ_HANDLED;
-}
-
-static irqreturn_t cxl_irq_multiplexed(int irq, void *data)
-{
- struct cxl_afu *afu = data;
- struct cxl_context *ctx;
- struct cxl_irq_info irq_info;
- int ph = cxl_p2n_read(afu, CXL_PSL_PEHandle_An) & 0xffff;
- int ret;
-
- if ((ret = cxl_get_irq(afu, &irq_info))) {
- WARN(1, "Unable to get CXL IRQ Info: %i\n", ret);
- return fail_psl_irq(afu, &irq_info);
- }
-
- rcu_read_lock();
- ctx = idr_find(&afu->contexts_idr, ph);
- if (ctx) {
- ret = cxl_irq(irq, ctx, &irq_info);
- rcu_read_unlock();
- return ret;
- }
- rcu_read_unlock();
-
- WARN(1, "Unable to demultiplex CXL PSL IRQ for PE %i DSISR %016llx DAR"
- " %016llx\n(Possible AFU HW issue - was a term/remove acked"
- " with outstanding transactions?)\n", ph, irq_info.dsisr,
- irq_info.dar);
- return fail_psl_irq(afu, &irq_info);
-}
-
static irqreturn_t cxl_irq_afu(int irq, void *data)
{
struct cxl_context *ctx = data;
irq_hw_number_t hwirq = irqd_to_hwirq(irq_get_irq_data(irq));
- int irq_off, afu_irq = 1;
+ int irq_off, afu_irq = 0;
__u16 range;
int r;
- for (r = 1; r < CXL_IRQ_RANGES; r++) {
+ /*
+ * Look for the interrupt number.
+ * On bare-metal, we know range 0 only contains the PSL
+ * interrupt so we could start counting at range 1 and initialize
+ * afu_irq at 1.
+ * In a guest, range 0 also contains AFU interrupts, so it must
+ * be counted for. Therefore we initialize afu_irq at 0 to take into
+ * account the PSL interrupt.
+ *
+ * For code-readability, it just seems easier to go over all
+ * the ranges on bare-metal and guest. The end result is the same.
+ */
+ for (r = 0; r < CXL_IRQ_RANGES; r++) {
irq_off = hwirq - ctx->irqs.offset[r];
range = ctx->irqs.range[r];
if (irq_off >= 0 && irq_off < range) {
@@ -236,7 +150,7 @@ static irqreturn_t cxl_irq_afu(int irq, void *data)
afu_irq += range;
}
if (unlikely(r >= CXL_IRQ_RANGES)) {
- WARN(1, "Recieved AFU IRQ out of range for pe %i (virq %i hwirq %lx)\n",
+ WARN(1, "Received AFU IRQ out of range for pe %i (virq %i hwirq %lx)\n",
ctx->pe, irq, hwirq);
return IRQ_HANDLED;
}
@@ -246,7 +160,7 @@ static irqreturn_t cxl_irq_afu(int irq, void *data)
afu_irq, ctx->pe, irq, hwirq);
if (unlikely(!ctx->irq_bitmap)) {
- WARN(1, "Recieved AFU IRQ for context with no IRQ bitmap\n");
+ WARN(1, "Received AFU IRQ for context with no IRQ bitmap\n");
return IRQ_HANDLED;
}
spin_lock(&ctx->lock);
@@ -272,7 +186,8 @@ unsigned int cxl_map_irq(struct cxl *adapter, irq_hw_number_t hwirq,
return 0;
}
- cxl_setup_irq(adapter, hwirq, virq);
+ if (cxl_ops->setup_irq)
+ cxl_ops->setup_irq(adapter, hwirq, virq);
pr_devel("hwirq %#lx mapped to virq %u\n", hwirq, virq);
@@ -288,19 +203,18 @@ unsigned int cxl_map_irq(struct cxl *adapter, irq_hw_number_t hwirq,
void cxl_unmap_irq(unsigned int virq, void *cookie)
{
free_irq(virq, cookie);
- irq_dispose_mapping(virq);
}
-static int cxl_register_one_irq(struct cxl *adapter,
- irq_handler_t handler,
- void *cookie,
- irq_hw_number_t *dest_hwirq,
- unsigned int *dest_virq,
- const char *name)
+int cxl_register_one_irq(struct cxl *adapter,
+ irq_handler_t handler,
+ void *cookie,
+ irq_hw_number_t *dest_hwirq,
+ unsigned int *dest_virq,
+ const char *name)
{
int hwirq, virq;
- if ((hwirq = cxl_alloc_one_irq(adapter)) < 0)
+ if ((hwirq = cxl_ops->alloc_one_irq(adapter)) < 0)
return hwirq;
if (!(virq = cxl_map_irq(adapter, hwirq, handler, cookie, name)))
@@ -312,108 +226,10 @@ static int cxl_register_one_irq(struct cxl *adapter,
return 0;
err:
- cxl_release_one_irq(adapter, hwirq);
+ cxl_ops->release_one_irq(adapter, hwirq);
return -ENOMEM;
}
-int cxl_register_psl_err_irq(struct cxl *adapter)
-{
- int rc;
-
- adapter->irq_name = kasprintf(GFP_KERNEL, "cxl-%s-err",
- dev_name(&adapter->dev));
- if (!adapter->irq_name)
- return -ENOMEM;
-
- if ((rc = cxl_register_one_irq(adapter, cxl_irq_err, adapter,
- &adapter->err_hwirq,
- &adapter->err_virq,
- adapter->irq_name))) {
- kfree(adapter->irq_name);
- adapter->irq_name = NULL;
- return rc;
- }
-
- cxl_p1_write(adapter, CXL_PSL_ErrIVTE, adapter->err_hwirq & 0xffff);
-
- return 0;
-}
-
-void cxl_release_psl_err_irq(struct cxl *adapter)
-{
- if (adapter->err_virq != irq_find_mapping(NULL, adapter->err_hwirq))
- return;
-
- cxl_p1_write(adapter, CXL_PSL_ErrIVTE, 0x0000000000000000);
- cxl_unmap_irq(adapter->err_virq, adapter);
- cxl_release_one_irq(adapter, adapter->err_hwirq);
- kfree(adapter->irq_name);
-}
-
-int cxl_register_serr_irq(struct cxl_afu *afu)
-{
- u64 serr;
- int rc;
-
- afu->err_irq_name = kasprintf(GFP_KERNEL, "cxl-%s-err",
- dev_name(&afu->dev));
- if (!afu->err_irq_name)
- return -ENOMEM;
-
- if ((rc = cxl_register_one_irq(afu->adapter, cxl_slice_irq_err, afu,
- &afu->serr_hwirq,
- &afu->serr_virq, afu->err_irq_name))) {
- kfree(afu->err_irq_name);
- afu->err_irq_name = NULL;
- return rc;
- }
-
- serr = cxl_p1n_read(afu, CXL_PSL_SERR_An);
- serr = (serr & 0x00ffffffffff0000ULL) | (afu->serr_hwirq & 0xffff);
- cxl_p1n_write(afu, CXL_PSL_SERR_An, serr);
-
- return 0;
-}
-
-void cxl_release_serr_irq(struct cxl_afu *afu)
-{
- if (afu->serr_virq != irq_find_mapping(NULL, afu->serr_hwirq))
- return;
-
- cxl_p1n_write(afu, CXL_PSL_SERR_An, 0x0000000000000000);
- cxl_unmap_irq(afu->serr_virq, afu);
- cxl_release_one_irq(afu->adapter, afu->serr_hwirq);
- kfree(afu->err_irq_name);
-}
-
-int cxl_register_psl_irq(struct cxl_afu *afu)
-{
- int rc;
-
- afu->psl_irq_name = kasprintf(GFP_KERNEL, "cxl-%s",
- dev_name(&afu->dev));
- if (!afu->psl_irq_name)
- return -ENOMEM;
-
- if ((rc = cxl_register_one_irq(afu->adapter, cxl_irq_multiplexed, afu,
- &afu->psl_hwirq, &afu->psl_virq,
- afu->psl_irq_name))) {
- kfree(afu->psl_irq_name);
- afu->psl_irq_name = NULL;
- }
- return rc;
-}
-
-void cxl_release_psl_irq(struct cxl_afu *afu)
-{
- if (afu->psl_virq != irq_find_mapping(NULL, afu->psl_hwirq))
- return;
-
- cxl_unmap_irq(afu->psl_virq, afu);
- cxl_release_one_irq(afu->adapter, afu->psl_hwirq);
- kfree(afu->psl_irq_name);
-}
-
void afu_irq_name_free(struct cxl_context *ctx)
{
struct cxl_irq_name *irq_name, *tmp;
@@ -429,16 +245,33 @@ int afu_allocate_irqs(struct cxl_context *ctx, u32 count)
{
int rc, r, i, j = 1;
struct cxl_irq_name *irq_name;
+ int alloc_count;
+
+ /*
+ * In native mode, range 0 is reserved for the multiplexed
+ * PSL interrupt. It has been allocated when the AFU was initialized.
+ *
+ * In a guest, the PSL interrupt is not mutliplexed, but per-context,
+ * and is the first interrupt from range 0. It still needs to be
+ * allocated, so bump the count by one.
+ */
+ if (cpu_has_feature(CPU_FTR_HVMODE))
+ alloc_count = count;
+ else
+ alloc_count = count + 1;
/* Initialize the list head to hold irq names */
INIT_LIST_HEAD(&ctx->irq_names);
- if ((rc = cxl_alloc_irq_ranges(&ctx->irqs, ctx->afu->adapter, count)))
+ if ((rc = cxl_ops->alloc_irq_ranges(&ctx->irqs, ctx->afu->adapter,
+ alloc_count)))
return rc;
- /* Multiplexed PSL Interrupt */
- ctx->irqs.offset[0] = ctx->afu->psl_hwirq;
- ctx->irqs.range[0] = 1;
+ if (cpu_has_feature(CPU_FTR_HVMODE)) {
+ /* Multiplexed PSL Interrupt */
+ ctx->irqs.offset[0] = ctx->afu->native->psl_hwirq;
+ ctx->irqs.range[0] = 1;
+ }
ctx->irq_count = count;
ctx->irq_bitmap = kcalloc(BITS_TO_LONGS(count),
@@ -450,7 +283,7 @@ int afu_allocate_irqs(struct cxl_context *ctx, u32 count)
* Allocate names first. If any fail, bail out before allocating
* actual hardware IRQs.
*/
- for (r = 1; r < CXL_IRQ_RANGES; r++) {
+ for (r = afu_irq_range_start(); r < CXL_IRQ_RANGES; r++) {
for (i = 0; i < ctx->irqs.range[r]; i++) {
irq_name = kmalloc(sizeof(struct cxl_irq_name),
GFP_KERNEL);
@@ -471,7 +304,7 @@ int afu_allocate_irqs(struct cxl_context *ctx, u32 count)
return 0;
out:
- cxl_release_irq_ranges(&ctx->irqs, ctx->afu->adapter);
+ cxl_ops->release_irq_ranges(&ctx->irqs, ctx->afu->adapter);
afu_irq_name_free(ctx);
return -ENOMEM;
}
@@ -480,15 +313,30 @@ static void afu_register_hwirqs(struct cxl_context *ctx)
{
irq_hw_number_t hwirq;
struct cxl_irq_name *irq_name;
- int r,i;
+ int r, i;
+ irqreturn_t (*handler)(int irq, void *data);
/* We've allocated all memory now, so let's do the irq allocations */
irq_name = list_first_entry(&ctx->irq_names, struct cxl_irq_name, list);
- for (r = 1; r < CXL_IRQ_RANGES; r++) {
+ for (r = afu_irq_range_start(); r < CXL_IRQ_RANGES; r++) {
hwirq = ctx->irqs.offset[r];
for (i = 0; i < ctx->irqs.range[r]; hwirq++, i++) {
- cxl_map_irq(ctx->afu->adapter, hwirq,
- cxl_irq_afu, ctx, irq_name->name);
+ if (r == 0 && i == 0)
+ /*
+ * The very first interrupt of range 0 is
+ * always the PSL interrupt, but we only
+ * need to connect a handler for guests,
+ * because there's one PSL interrupt per
+ * context.
+ * On bare-metal, the PSL interrupt is
+ * multiplexed and was setup when the AFU
+ * was configured.
+ */
+ handler = cxl_ops->psl_interrupt;
+ else
+ handler = cxl_irq_afu;
+ cxl_map_irq(ctx->afu->adapter, hwirq, handler, ctx,
+ irq_name->name);
irq_name = list_next_entry(irq_name, list);
}
}
@@ -504,7 +352,7 @@ int afu_register_irqs(struct cxl_context *ctx, u32 count)
afu_register_hwirqs(ctx);
return 0;
- }
+}
void afu_release_irqs(struct cxl_context *ctx, void *cookie)
{
@@ -512,7 +360,7 @@ void afu_release_irqs(struct cxl_context *ctx, void *cookie)
unsigned int virq;
int r, i;
- for (r = 1; r < CXL_IRQ_RANGES; r++) {
+ for (r = afu_irq_range_start(); r < CXL_IRQ_RANGES; r++) {
hwirq = ctx->irqs.offset[r];
for (i = 0; i < ctx->irqs.range[r]; hwirq++, i++) {
virq = irq_find_mapping(NULL, hwirq);
@@ -522,7 +370,7 @@ void afu_release_irqs(struct cxl_context *ctx, void *cookie)
}
afu_irq_name_free(ctx);
- cxl_release_irq_ranges(&ctx->irqs, ctx->afu->adapter);
+ cxl_ops->release_irq_ranges(&ctx->irqs, ctx->afu->adapter);
ctx->irq_count = 0;
}
diff --git a/drivers/misc/cxl/main.c b/drivers/misc/cxl/main.c
index 9fde75ed4fac..ae68c3201156 100644
--- a/drivers/misc/cxl/main.c
+++ b/drivers/misc/cxl/main.c
@@ -32,6 +32,29 @@ uint cxl_verbose;
module_param_named(verbose, cxl_verbose, uint, 0600);
MODULE_PARM_DESC(verbose, "Enable verbose dmesg output");
+const struct cxl_backend_ops *cxl_ops;
+
+int cxl_afu_slbia(struct cxl_afu *afu)
+{
+ unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT);
+
+ pr_devel("cxl_afu_slbia issuing SLBIA command\n");
+ cxl_p2n_write(afu, CXL_SLBIA_An, CXL_TLB_SLB_IQ_ALL);
+ while (cxl_p2n_read(afu, CXL_SLBIA_An) & CXL_TLB_SLB_P) {
+ if (time_after_eq(jiffies, timeout)) {
+ dev_warn(&afu->dev, "WARNING: CXL AFU SLBIA timed out!\n");
+ return -EBUSY;
+ }
+ /* If the adapter has gone down, we can assume that we
+ * will PERST it and that will invalidate everything.
+ */
+ if (!cxl_ops->link_ok(afu->adapter, afu))
+ return -EIO;
+ cpu_relax();
+ }
+ return 0;
+}
+
static inline void _cxl_slbia(struct cxl_context *ctx, struct mm_struct *mm)
{
struct task_struct *task;
@@ -139,6 +162,32 @@ int cxl_alloc_sst(struct cxl_context *ctx)
return 0;
}
+/* print buffer content as integers when debugging */
+void cxl_dump_debug_buffer(void *buf, size_t buf_len)
+{
+#ifdef DEBUG
+ int i, *ptr;
+
+ /*
+ * We want to regroup up to 4 integers per line, which means they
+ * need to be in the same pr_devel() statement
+ */
+ ptr = (int *) buf;
+ for (i = 0; i * 4 < buf_len; i += 4) {
+ if ((i + 3) * 4 < buf_len)
+ pr_devel("%.8x %.8x %.8x %.8x\n", ptr[i], ptr[i + 1],
+ ptr[i + 2], ptr[i + 3]);
+ else if ((i + 2) * 4 < buf_len)
+ pr_devel("%.8x %.8x %.8x\n", ptr[i], ptr[i + 1],
+ ptr[i + 2]);
+ else if ((i + 1) * 4 < buf_len)
+ pr_devel("%.8x %.8x\n", ptr[i], ptr[i + 1]);
+ else
+ pr_devel("%.8x\n", ptr[i]);
+ }
+#endif /* DEBUG */
+}
+
/* Find a CXL adapter by it's number and increase it's refcount */
struct cxl *get_cxl_adapter(int num)
{
@@ -152,7 +201,7 @@ struct cxl *get_cxl_adapter(int num)
return adapter;
}
-int cxl_alloc_adapter_nr(struct cxl *adapter)
+static int cxl_alloc_adapter_nr(struct cxl *adapter)
{
int i;
@@ -174,13 +223,58 @@ void cxl_remove_adapter_nr(struct cxl *adapter)
idr_remove(&cxl_adapter_idr, adapter->adapter_num);
}
+struct cxl *cxl_alloc_adapter(void)
+{
+ struct cxl *adapter;
+
+ if (!(adapter = kzalloc(sizeof(struct cxl), GFP_KERNEL)))
+ return NULL;
+
+ spin_lock_init(&adapter->afu_list_lock);
+
+ if (cxl_alloc_adapter_nr(adapter))
+ goto err1;
+
+ if (dev_set_name(&adapter->dev, "card%i", adapter->adapter_num))
+ goto err2;
+
+ return adapter;
+
+err2:
+ cxl_remove_adapter_nr(adapter);
+err1:
+ kfree(adapter);
+ return NULL;
+}
+
+struct cxl_afu *cxl_alloc_afu(struct cxl *adapter, int slice)
+{
+ struct cxl_afu *afu;
+
+ if (!(afu = kzalloc(sizeof(struct cxl_afu), GFP_KERNEL)))
+ return NULL;
+
+ afu->adapter = adapter;
+ afu->dev.parent = &adapter->dev;
+ afu->dev.release = cxl_ops->release_afu;
+ afu->slice = slice;
+ idr_init(&afu->contexts_idr);
+ mutex_init(&afu->contexts_lock);
+ spin_lock_init(&afu->afu_cntl_lock);
+
+ afu->prefault_mode = CXL_PREFAULT_NONE;
+ afu->irqs_max = afu->adapter->user_irqs;
+
+ return afu;
+}
+
int cxl_afu_select_best_mode(struct cxl_afu *afu)
{
if (afu->modes_supported & CXL_MODE_DIRECTED)
- return cxl_afu_activate_mode(afu, CXL_MODE_DIRECTED);
+ return cxl_ops->afu_activate_mode(afu, CXL_MODE_DIRECTED);
if (afu->modes_supported & CXL_MODE_DEDICATED)
- return cxl_afu_activate_mode(afu, CXL_MODE_DEDICATED);
+ return cxl_ops->afu_activate_mode(afu, CXL_MODE_DEDICATED);
dev_warn(&afu->dev, "No supported programming modes available\n");
/* We don't fail this so the user can inspect sysfs */
@@ -191,9 +285,6 @@ static int __init init_cxl(void)
{
int rc = 0;
- if (!cpu_has_feature(CPU_FTR_HVMODE))
- return -EPERM;
-
if ((rc = cxl_file_init()))
return rc;
@@ -202,7 +293,17 @@ static int __init init_cxl(void)
if ((rc = register_cxl_calls(&cxl_calls)))
goto err;
- if ((rc = pci_register_driver(&cxl_pci_driver)))
+ if (cpu_has_feature(CPU_FTR_HVMODE)) {
+ cxl_ops = &cxl_native_ops;
+ rc = pci_register_driver(&cxl_pci_driver);
+ }
+#ifdef CONFIG_PPC_PSERIES
+ else {
+ cxl_ops = &cxl_guest_ops;
+ rc = platform_driver_register(&cxl_of_driver);
+ }
+#endif
+ if (rc)
goto err1;
return 0;
@@ -217,7 +318,12 @@ err:
static void exit_cxl(void)
{
- pci_unregister_driver(&cxl_pci_driver);
+ if (cpu_has_feature(CPU_FTR_HVMODE))
+ pci_unregister_driver(&cxl_pci_driver);
+#ifdef CONFIG_PPC_PSERIES
+ else
+ platform_driver_unregister(&cxl_of_driver);
+#endif
cxl_debugfs_exit();
cxl_file_exit();
diff --git a/drivers/misc/cxl/native.c b/drivers/misc/cxl/native.c
index f40909793490..ecf7557cd657 100644
--- a/drivers/misc/cxl/native.c
+++ b/drivers/misc/cxl/native.c
@@ -14,6 +14,7 @@
#include <linux/mutex.h>
#include <linux/mm.h>
#include <linux/uaccess.h>
+#include <linux/delay.h>
#include <asm/synch.h>
#include <misc/cxl-base.h>
@@ -42,7 +43,7 @@ static int afu_control(struct cxl_afu *afu, u64 command,
goto out;
}
- if (!cxl_adapter_link_ok(afu->adapter)) {
+ if (!cxl_ops->link_ok(afu->adapter, afu)) {
afu->enabled = enabled;
rc = -EIO;
goto out;
@@ -80,7 +81,7 @@ int cxl_afu_disable(struct cxl_afu *afu)
}
/* This will disable as well as reset */
-int __cxl_afu_reset(struct cxl_afu *afu)
+static int native_afu_reset(struct cxl_afu *afu)
{
pr_devel("AFU reset request\n");
@@ -90,9 +91,9 @@ int __cxl_afu_reset(struct cxl_afu *afu)
false);
}
-int cxl_afu_check_and_enable(struct cxl_afu *afu)
+static int native_afu_check_and_enable(struct cxl_afu *afu)
{
- if (!cxl_adapter_link_ok(afu->adapter)) {
+ if (!cxl_ops->link_ok(afu->adapter, afu)) {
WARN(1, "Refusing to enable afu while link down!\n");
return -EIO;
}
@@ -114,7 +115,7 @@ int cxl_psl_purge(struct cxl_afu *afu)
pr_devel("PSL purge request\n");
- if (!cxl_adapter_link_ok(afu->adapter)) {
+ if (!cxl_ops->link_ok(afu->adapter, afu)) {
dev_warn(&afu->dev, "PSL Purge called with link down, ignoring\n");
rc = -EIO;
goto out;
@@ -136,7 +137,7 @@ int cxl_psl_purge(struct cxl_afu *afu)
rc = -EBUSY;
goto out;
}
- if (!cxl_adapter_link_ok(afu->adapter)) {
+ if (!cxl_ops->link_ok(afu->adapter, afu)) {
rc = -EIO;
goto out;
}
@@ -186,22 +187,22 @@ static int spa_max_procs(int spa_size)
int cxl_alloc_spa(struct cxl_afu *afu)
{
/* Work out how many pages to allocate */
- afu->spa_order = 0;
+ afu->native->spa_order = 0;
do {
- afu->spa_order++;
- afu->spa_size = (1 << afu->spa_order) * PAGE_SIZE;
- afu->spa_max_procs = spa_max_procs(afu->spa_size);
- } while (afu->spa_max_procs < afu->num_procs);
+ afu->native->spa_order++;
+ afu->native->spa_size = (1 << afu->native->spa_order) * PAGE_SIZE;
+ afu->native->spa_max_procs = spa_max_procs(afu->native->spa_size);
+ } while (afu->native->spa_max_procs < afu->num_procs);
- WARN_ON(afu->spa_size > 0x100000); /* Max size supported by the hardware */
+ WARN_ON(afu->native->spa_size > 0x100000); /* Max size supported by the hardware */
- if (!(afu->spa = (struct cxl_process_element *)
- __get_free_pages(GFP_KERNEL | __GFP_ZERO, afu->spa_order))) {
+ if (!(afu->native->spa = (struct cxl_process_element *)
+ __get_free_pages(GFP_KERNEL | __GFP_ZERO, afu->native->spa_order))) {
pr_err("cxl_alloc_spa: Unable to allocate scheduled process area\n");
return -ENOMEM;
}
pr_devel("spa pages: %i afu->spa_max_procs: %i afu->num_procs: %i\n",
- 1<<afu->spa_order, afu->spa_max_procs, afu->num_procs);
+ 1<<afu->native->spa_order, afu->native->spa_max_procs, afu->num_procs);
return 0;
}
@@ -210,13 +211,15 @@ static void attach_spa(struct cxl_afu *afu)
{
u64 spap;
- afu->sw_command_status = (__be64 *)((char *)afu->spa +
- ((afu->spa_max_procs + 3) * 128));
+ afu->native->sw_command_status = (__be64 *)((char *)afu->native->spa +
+ ((afu->native->spa_max_procs + 3) * 128));
- spap = virt_to_phys(afu->spa) & CXL_PSL_SPAP_Addr;
- spap |= ((afu->spa_size >> (12 - CXL_PSL_SPAP_Size_Shift)) - 1) & CXL_PSL_SPAP_Size;
+ spap = virt_to_phys(afu->native->spa) & CXL_PSL_SPAP_Addr;
+ spap |= ((afu->native->spa_size >> (12 - CXL_PSL_SPAP_Size_Shift)) - 1) & CXL_PSL_SPAP_Size;
spap |= CXL_PSL_SPAP_V;
- pr_devel("cxl: SPA allocated at 0x%p. Max processes: %i, sw_command_status: 0x%p CXL_PSL_SPAP_An=0x%016llx\n", afu->spa, afu->spa_max_procs, afu->sw_command_status, spap);
+ pr_devel("cxl: SPA allocated at 0x%p. Max processes: %i, sw_command_status: 0x%p CXL_PSL_SPAP_An=0x%016llx\n",
+ afu->native->spa, afu->native->spa_max_procs,
+ afu->native->sw_command_status, spap);
cxl_p1n_write(afu, CXL_PSL_SPAP_An, spap);
}
@@ -227,9 +230,10 @@ static inline void detach_spa(struct cxl_afu *afu)
void cxl_release_spa(struct cxl_afu *afu)
{
- if (afu->spa) {
- free_pages((unsigned long) afu->spa, afu->spa_order);
- afu->spa = NULL;
+ if (afu->native->spa) {
+ free_pages((unsigned long) afu->native->spa,
+ afu->native->spa_order);
+ afu->native->spa = NULL;
}
}
@@ -247,7 +251,7 @@ int cxl_tlb_slb_invalidate(struct cxl *adapter)
dev_warn(&adapter->dev, "WARNING: CXL adapter wide TLBIA timed out!\n");
return -EBUSY;
}
- if (!cxl_adapter_link_ok(adapter))
+ if (!cxl_ops->link_ok(adapter, NULL))
return -EIO;
cpu_relax();
}
@@ -258,28 +262,7 @@ int cxl_tlb_slb_invalidate(struct cxl *adapter)
dev_warn(&adapter->dev, "WARNING: CXL adapter wide SLBIA timed out!\n");
return -EBUSY;
}
- if (!cxl_adapter_link_ok(adapter))
- return -EIO;
- cpu_relax();
- }
- return 0;
-}
-
-int cxl_afu_slbia(struct cxl_afu *afu)
-{
- unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT);
-
- pr_devel("cxl_afu_slbia issuing SLBIA command\n");
- cxl_p2n_write(afu, CXL_SLBIA_An, CXL_TLB_SLB_IQ_ALL);
- while (cxl_p2n_read(afu, CXL_SLBIA_An) & CXL_TLB_SLB_P) {
- if (time_after_eq(jiffies, timeout)) {
- dev_warn(&afu->dev, "WARNING: CXL AFU SLBIA timed out!\n");
- return -EBUSY;
- }
- /* If the adapter has gone down, we can assume that we
- * will PERST it and that will invalidate everything.
- */
- if (!cxl_adapter_link_ok(afu->adapter))
+ if (!cxl_ops->link_ok(adapter, NULL))
return -EIO;
cpu_relax();
}
@@ -312,7 +295,7 @@ static void slb_invalid(struct cxl_context *ctx)
struct cxl *adapter = ctx->afu->adapter;
u64 slbia;
- WARN_ON(!mutex_is_locked(&ctx->afu->spa_mutex));
+ WARN_ON(!mutex_is_locked(&ctx->afu->native->spa_mutex));
cxl_p1_write(adapter, CXL_PSL_LBISEL,
((u64)be32_to_cpu(ctx->elem->common.pid) << 32) |
@@ -320,7 +303,7 @@ static void slb_invalid(struct cxl_context *ctx)
cxl_p1_write(adapter, CXL_PSL_SLBIA, CXL_TLB_SLB_IQ_LPIDPID);
while (1) {
- if (!cxl_adapter_link_ok(adapter))
+ if (!cxl_ops->link_ok(adapter, NULL))
break;
slbia = cxl_p1_read(adapter, CXL_PSL_SLBIA);
if (!(slbia & CXL_TLB_SLB_P))
@@ -342,7 +325,7 @@ static int do_process_element_cmd(struct cxl_context *ctx,
ctx->elem->software_state = cpu_to_be32(pe_state);
smp_wmb();
- *(ctx->afu->sw_command_status) = cpu_to_be64(cmd | 0 | ctx->pe);
+ *(ctx->afu->native->sw_command_status) = cpu_to_be64(cmd | 0 | ctx->pe);
smp_mb();
cxl_p1n_write(ctx->afu, CXL_PSL_LLCMD_An, cmd | ctx->pe);
while (1) {
@@ -351,12 +334,12 @@ static int do_process_element_cmd(struct cxl_context *ctx,
rc = -EBUSY;
goto out;
}
- if (!cxl_adapter_link_ok(ctx->afu->adapter)) {
+ if (!cxl_ops->link_ok(ctx->afu->adapter, ctx->afu)) {
dev_warn(&ctx->afu->dev, "WARNING: Device link down, aborting Process Element Command!\n");
rc = -EIO;
goto out;
}
- state = be64_to_cpup(ctx->afu->sw_command_status);
+ state = be64_to_cpup(ctx->afu->native->sw_command_status);
if (state == ~0ULL) {
pr_err("cxl: Error adding process element to AFU\n");
rc = -1;
@@ -384,12 +367,12 @@ static int add_process_element(struct cxl_context *ctx)
{
int rc = 0;
- mutex_lock(&ctx->afu->spa_mutex);
+ mutex_lock(&ctx->afu->native->spa_mutex);
pr_devel("%s Adding pe: %i started\n", __func__, ctx->pe);
if (!(rc = do_process_element_cmd(ctx, CXL_SPA_SW_CMD_ADD, CXL_PE_SOFTWARE_STATE_V)))
ctx->pe_inserted = true;
pr_devel("%s Adding pe: %i finished\n", __func__, ctx->pe);
- mutex_unlock(&ctx->afu->spa_mutex);
+ mutex_unlock(&ctx->afu->native->spa_mutex);
return rc;
}
@@ -401,18 +384,18 @@ static int terminate_process_element(struct cxl_context *ctx)
if (!(ctx->elem->software_state & cpu_to_be32(CXL_PE_SOFTWARE_STATE_V)))
return rc;
- mutex_lock(&ctx->afu->spa_mutex);
+ mutex_lock(&ctx->afu->native->spa_mutex);
pr_devel("%s Terminate pe: %i started\n", __func__, ctx->pe);
/* We could be asked to terminate when the hw is down. That
* should always succeed: it's not running if the hw has gone
* away and is being reset.
*/
- if (cxl_adapter_link_ok(ctx->afu->adapter))
+ if (cxl_ops->link_ok(ctx->afu->adapter, ctx->afu))
rc = do_process_element_cmd(ctx, CXL_SPA_SW_CMD_TERMINATE,
CXL_PE_SOFTWARE_STATE_V | CXL_PE_SOFTWARE_STATE_T);
ctx->elem->software_state = 0; /* Remove Valid bit */
pr_devel("%s Terminate pe: %i finished\n", __func__, ctx->pe);
- mutex_unlock(&ctx->afu->spa_mutex);
+ mutex_unlock(&ctx->afu->native->spa_mutex);
return rc;
}
@@ -420,20 +403,20 @@ static int remove_process_element(struct cxl_context *ctx)
{
int rc = 0;
- mutex_lock(&ctx->afu->spa_mutex);
+ mutex_lock(&ctx->afu->native->spa_mutex);
pr_devel("%s Remove pe: %i started\n", __func__, ctx->pe);
/* We could be asked to remove when the hw is down. Again, if
* the hw is down, the PE is gone, so we succeed.
*/
- if (cxl_adapter_link_ok(ctx->afu->adapter))
+ if (cxl_ops->link_ok(ctx->afu->adapter, ctx->afu))
rc = do_process_element_cmd(ctx, CXL_SPA_SW_CMD_REMOVE, 0);
if (!rc)
ctx->pe_inserted = false;
slb_invalid(ctx);
pr_devel("%s Remove pe: %i finished\n", __func__, ctx->pe);
- mutex_unlock(&ctx->afu->spa_mutex);
+ mutex_unlock(&ctx->afu->native->spa_mutex);
return rc;
}
@@ -446,7 +429,7 @@ void cxl_assign_psn_space(struct cxl_context *ctx)
ctx->psn_size = ctx->afu->adapter->ps_size;
} else {
ctx->psn_phys = ctx->afu->psn_phys +
- (ctx->afu->pp_offset + ctx->afu->pp_size * ctx->pe);
+ (ctx->afu->native->pp_offset + ctx->afu->pp_size * ctx->pe);
ctx->psn_size = ctx->afu->pp_size;
}
}
@@ -458,7 +441,7 @@ static int activate_afu_directed(struct cxl_afu *afu)
dev_info(&afu->dev, "Activating AFU directed mode\n");
afu->num_procs = afu->max_procs_virtualised;
- if (afu->spa == NULL) {
+ if (afu->native->spa == NULL) {
if (cxl_alloc_spa(afu))
return -ENOMEM;
}
@@ -552,7 +535,7 @@ static int attach_afu_directed(struct cxl_context *ctx, u64 wed, u64 amr)
ctx->elem->common.wed = cpu_to_be64(wed);
/* first guy needs to enable */
- if ((result = cxl_afu_check_and_enable(ctx->afu)))
+ if ((result = cxl_ops->afu_check_and_enable(ctx->afu)))
return result;
return add_process_element(ctx);
@@ -568,7 +551,7 @@ static int deactivate_afu_directed(struct cxl_afu *afu)
cxl_sysfs_afu_m_remove(afu);
cxl_chardev_afu_remove(afu);
- __cxl_afu_reset(afu);
+ cxl_ops->afu_reset(afu);
cxl_afu_disable(afu);
cxl_psl_purge(afu);
@@ -632,7 +615,7 @@ static int attach_dedicated(struct cxl_context *ctx, u64 wed, u64 amr)
/* master only context for dedicated */
cxl_assign_psn_space(ctx);
- if ((rc = __cxl_afu_reset(afu)))
+ if ((rc = cxl_ops->afu_reset(afu)))
return rc;
cxl_p2n_write(afu, CXL_PSL_WED_An, wed);
@@ -652,7 +635,7 @@ static int deactivate_dedicated_process(struct cxl_afu *afu)
return 0;
}
-int _cxl_afu_deactivate_mode(struct cxl_afu *afu, int mode)
+static int native_afu_deactivate_mode(struct cxl_afu *afu, int mode)
{
if (mode == CXL_MODE_DIRECTED)
return deactivate_afu_directed(afu);
@@ -661,19 +644,14 @@ int _cxl_afu_deactivate_mode(struct cxl_afu *afu, int mode)
return 0;
}
-int cxl_afu_deactivate_mode(struct cxl_afu *afu)
-{
- return _cxl_afu_deactivate_mode(afu, afu->current_mode);
-}
-
-int cxl_afu_activate_mode(struct cxl_afu *afu, int mode)
+static int native_afu_activate_mode(struct cxl_afu *afu, int mode)
{
if (!mode)
return 0;
if (!(mode & afu->modes_supported))
return -EINVAL;
- if (!cxl_adapter_link_ok(afu->adapter)) {
+ if (!cxl_ops->link_ok(afu->adapter, afu)) {
WARN(1, "Device link is down, refusing to activate!\n");
return -EIO;
}
@@ -686,9 +664,10 @@ int cxl_afu_activate_mode(struct cxl_afu *afu, int mode)
return -EINVAL;
}
-int cxl_attach_process(struct cxl_context *ctx, bool kernel, u64 wed, u64 amr)
+static int native_attach_process(struct cxl_context *ctx, bool kernel,
+ u64 wed, u64 amr)
{
- if (!cxl_adapter_link_ok(ctx->afu->adapter)) {
+ if (!cxl_ops->link_ok(ctx->afu->adapter, ctx->afu)) {
WARN(1, "Device link is down, refusing to attach process!\n");
return -EIO;
}
@@ -705,7 +684,7 @@ int cxl_attach_process(struct cxl_context *ctx, bool kernel, u64 wed, u64 amr)
static inline int detach_process_native_dedicated(struct cxl_context *ctx)
{
- __cxl_afu_reset(ctx->afu);
+ cxl_ops->afu_reset(ctx->afu);
cxl_afu_disable(ctx->afu);
cxl_psl_purge(ctx->afu);
return 0;
@@ -723,7 +702,7 @@ static inline int detach_process_native_afu_directed(struct cxl_context *ctx)
return 0;
}
-int cxl_detach_process(struct cxl_context *ctx)
+static int native_detach_process(struct cxl_context *ctx)
{
trace_cxl_detach(ctx);
@@ -733,14 +712,14 @@ int cxl_detach_process(struct cxl_context *ctx)
return detach_process_native_afu_directed(ctx);
}
-int cxl_get_irq(struct cxl_afu *afu, struct cxl_irq_info *info)
+static int native_get_irq_info(struct cxl_afu *afu, struct cxl_irq_info *info)
{
u64 pidtid;
/* If the adapter has gone away, we can't get any meaningful
* information.
*/
- if (!cxl_adapter_link_ok(afu->adapter))
+ if (!cxl_ops->link_ok(afu->adapter, afu))
return -EIO;
info->dsisr = cxl_p2n_read(afu, CXL_PSL_DSISR_An);
@@ -751,10 +730,243 @@ int cxl_get_irq(struct cxl_afu *afu, struct cxl_irq_info *info)
info->tid = pidtid & 0xffffffff;
info->afu_err = cxl_p2n_read(afu, CXL_AFU_ERR_An);
info->errstat = cxl_p2n_read(afu, CXL_PSL_ErrStat_An);
+ info->proc_handle = 0;
+
+ return 0;
+}
+
+static irqreturn_t native_handle_psl_slice_error(struct cxl_context *ctx,
+ u64 dsisr, u64 errstat)
+{
+ u64 fir1, fir2, fir_slice, serr, afu_debug;
+
+ fir1 = cxl_p1_read(ctx->afu->adapter, CXL_PSL_FIR1);
+ fir2 = cxl_p1_read(ctx->afu->adapter, CXL_PSL_FIR2);
+ fir_slice = cxl_p1n_read(ctx->afu, CXL_PSL_FIR_SLICE_An);
+ serr = cxl_p1n_read(ctx->afu, CXL_PSL_SERR_An);
+ afu_debug = cxl_p1n_read(ctx->afu, CXL_AFU_DEBUG_An);
+
+ dev_crit(&ctx->afu->dev, "PSL ERROR STATUS: 0x%016llx\n", errstat);
+ dev_crit(&ctx->afu->dev, "PSL_FIR1: 0x%016llx\n", fir1);
+ dev_crit(&ctx->afu->dev, "PSL_FIR2: 0x%016llx\n", fir2);
+ dev_crit(&ctx->afu->dev, "PSL_SERR_An: 0x%016llx\n", serr);
+ dev_crit(&ctx->afu->dev, "PSL_FIR_SLICE_An: 0x%016llx\n", fir_slice);
+ dev_crit(&ctx->afu->dev, "CXL_PSL_AFU_DEBUG_An: 0x%016llx\n", afu_debug);
+
+ dev_crit(&ctx->afu->dev, "STOPPING CXL TRACE\n");
+ cxl_stop_trace(ctx->afu->adapter);
+
+ return cxl_ops->ack_irq(ctx, 0, errstat);
+}
+
+static irqreturn_t fail_psl_irq(struct cxl_afu *afu, struct cxl_irq_info *irq_info)
+{
+ if (irq_info->dsisr & CXL_PSL_DSISR_TRANS)
+ cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_AE);
+ else
+ cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_A);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t native_irq_multiplexed(int irq, void *data)
+{
+ struct cxl_afu *afu = data;
+ struct cxl_context *ctx;
+ struct cxl_irq_info irq_info;
+ int ph = cxl_p2n_read(afu, CXL_PSL_PEHandle_An) & 0xffff;
+ int ret;
+
+ if ((ret = native_get_irq_info(afu, &irq_info))) {
+ WARN(1, "Unable to get CXL IRQ Info: %i\n", ret);
+ return fail_psl_irq(afu, &irq_info);
+ }
+
+ rcu_read_lock();
+ ctx = idr_find(&afu->contexts_idr, ph);
+ if (ctx) {
+ ret = cxl_irq(irq, ctx, &irq_info);
+ rcu_read_unlock();
+ return ret;
+ }
+ rcu_read_unlock();
+
+ WARN(1, "Unable to demultiplex CXL PSL IRQ for PE %i DSISR %016llx DAR"
+ " %016llx\n(Possible AFU HW issue - was a term/remove acked"
+ " with outstanding transactions?)\n", ph, irq_info.dsisr,
+ irq_info.dar);
+ return fail_psl_irq(afu, &irq_info);
+}
+
+void native_irq_wait(struct cxl_context *ctx)
+{
+ u64 dsisr;
+ int timeout = 1000;
+ int ph;
+
+ /*
+ * Wait until no further interrupts are presented by the PSL
+ * for this context.
+ */
+ while (timeout--) {
+ ph = cxl_p2n_read(ctx->afu, CXL_PSL_PEHandle_An) & 0xffff;
+ if (ph != ctx->pe)
+ return;
+ dsisr = cxl_p2n_read(ctx->afu, CXL_PSL_DSISR_An);
+ if ((dsisr & CXL_PSL_DSISR_PENDING) == 0)
+ return;
+ /*
+ * We are waiting for the workqueue to process our
+ * irq, so need to let that run here.
+ */
+ msleep(1);
+ }
+
+ dev_warn(&ctx->afu->dev, "WARNING: waiting on DSI for PE %i"
+ " DSISR %016llx!\n", ph, dsisr);
+ return;
+}
+
+static irqreturn_t native_slice_irq_err(int irq, void *data)
+{
+ struct cxl_afu *afu = data;
+ u64 fir_slice, errstat, serr, afu_debug;
+
+ WARN(irq, "CXL SLICE ERROR interrupt %i\n", irq);
+
+ serr = cxl_p1n_read(afu, CXL_PSL_SERR_An);
+ fir_slice = cxl_p1n_read(afu, CXL_PSL_FIR_SLICE_An);
+ errstat = cxl_p2n_read(afu, CXL_PSL_ErrStat_An);
+ afu_debug = cxl_p1n_read(afu, CXL_AFU_DEBUG_An);
+ dev_crit(&afu->dev, "PSL_SERR_An: 0x%016llx\n", serr);
+ dev_crit(&afu->dev, "PSL_FIR_SLICE_An: 0x%016llx\n", fir_slice);
+ dev_crit(&afu->dev, "CXL_PSL_ErrStat_An: 0x%016llx\n", errstat);
+ dev_crit(&afu->dev, "CXL_PSL_AFU_DEBUG_An: 0x%016llx\n", afu_debug);
+
+ cxl_p1n_write(afu, CXL_PSL_SERR_An, serr);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t native_irq_err(int irq, void *data)
+{
+ struct cxl *adapter = data;
+ u64 fir1, fir2, err_ivte;
+
+ WARN(1, "CXL ERROR interrupt %i\n", irq);
+
+ err_ivte = cxl_p1_read(adapter, CXL_PSL_ErrIVTE);
+ dev_crit(&adapter->dev, "PSL_ErrIVTE: 0x%016llx\n", err_ivte);
+
+ dev_crit(&adapter->dev, "STOPPING CXL TRACE\n");
+ cxl_stop_trace(adapter);
+
+ fir1 = cxl_p1_read(adapter, CXL_PSL_FIR1);
+ fir2 = cxl_p1_read(adapter, CXL_PSL_FIR2);
+
+ dev_crit(&adapter->dev, "PSL_FIR1: 0x%016llx\nPSL_FIR2: 0x%016llx\n", fir1, fir2);
+
+ return IRQ_HANDLED;
+}
+
+int cxl_native_register_psl_err_irq(struct cxl *adapter)
+{
+ int rc;
+
+ adapter->irq_name = kasprintf(GFP_KERNEL, "cxl-%s-err",
+ dev_name(&adapter->dev));
+ if (!adapter->irq_name)
+ return -ENOMEM;
+
+ if ((rc = cxl_register_one_irq(adapter, native_irq_err, adapter,
+ &adapter->native->err_hwirq,
+ &adapter->native->err_virq,
+ adapter->irq_name))) {
+ kfree(adapter->irq_name);
+ adapter->irq_name = NULL;
+ return rc;
+ }
+
+ cxl_p1_write(adapter, CXL_PSL_ErrIVTE, adapter->native->err_hwirq & 0xffff);
return 0;
}
+void cxl_native_release_psl_err_irq(struct cxl *adapter)
+{
+ if (adapter->native->err_virq != irq_find_mapping(NULL, adapter->native->err_hwirq))
+ return;
+
+ cxl_p1_write(adapter, CXL_PSL_ErrIVTE, 0x0000000000000000);
+ cxl_unmap_irq(adapter->native->err_virq, adapter);
+ cxl_ops->release_one_irq(adapter, adapter->native->err_hwirq);
+ kfree(adapter->irq_name);
+}
+
+int cxl_native_register_serr_irq(struct cxl_afu *afu)
+{
+ u64 serr;
+ int rc;
+
+ afu->err_irq_name = kasprintf(GFP_KERNEL, "cxl-%s-err",
+ dev_name(&afu->dev));
+ if (!afu->err_irq_name)
+ return -ENOMEM;
+
+ if ((rc = cxl_register_one_irq(afu->adapter, native_slice_irq_err, afu,
+ &afu->serr_hwirq,
+ &afu->serr_virq, afu->err_irq_name))) {
+ kfree(afu->err_irq_name);
+ afu->err_irq_name = NULL;
+ return rc;
+ }
+
+ serr = cxl_p1n_read(afu, CXL_PSL_SERR_An);
+ serr = (serr & 0x00ffffffffff0000ULL) | (afu->serr_hwirq & 0xffff);
+ cxl_p1n_write(afu, CXL_PSL_SERR_An, serr);
+
+ return 0;
+}
+
+void cxl_native_release_serr_irq(struct cxl_afu *afu)
+{
+ if (afu->serr_virq != irq_find_mapping(NULL, afu->serr_hwirq))
+ return;
+
+ cxl_p1n_write(afu, CXL_PSL_SERR_An, 0x0000000000000000);
+ cxl_unmap_irq(afu->serr_virq, afu);
+ cxl_ops->release_one_irq(afu->adapter, afu->serr_hwirq);
+ kfree(afu->err_irq_name);
+}
+
+int cxl_native_register_psl_irq(struct cxl_afu *afu)
+{
+ int rc;
+
+ afu->psl_irq_name = kasprintf(GFP_KERNEL, "cxl-%s",
+ dev_name(&afu->dev));
+ if (!afu->psl_irq_name)
+ return -ENOMEM;
+
+ if ((rc = cxl_register_one_irq(afu->adapter, native_irq_multiplexed,
+ afu, &afu->native->psl_hwirq, &afu->native->psl_virq,
+ afu->psl_irq_name))) {
+ kfree(afu->psl_irq_name);
+ afu->psl_irq_name = NULL;
+ }
+ return rc;
+}
+
+void cxl_native_release_psl_irq(struct cxl_afu *afu)
+{
+ if (afu->native->psl_virq != irq_find_mapping(NULL, afu->native->psl_hwirq))
+ return;
+
+ cxl_unmap_irq(afu->native->psl_virq, afu);
+ cxl_ops->release_one_irq(afu->adapter, afu->native->psl_hwirq);
+ kfree(afu->psl_irq_name);
+}
+
static void recover_psl_err(struct cxl_afu *afu, u64 errstat)
{
u64 dsisr;
@@ -769,7 +981,7 @@ static void recover_psl_err(struct cxl_afu *afu, u64 errstat)
cxl_p2n_write(afu, CXL_PSL_ErrStat_An, errstat);
}
-int cxl_ack_irq(struct cxl_context *ctx, u64 tfc, u64 psl_reset_mask)
+static int native_ack_irq(struct cxl_context *ctx, u64 tfc, u64 psl_reset_mask)
{
trace_cxl_psl_irq_ack(ctx, tfc);
if (tfc)
@@ -784,3 +996,133 @@ int cxl_check_error(struct cxl_afu *afu)
{
return (cxl_p1n_read(afu, CXL_PSL_SCNTL_An) == ~0ULL);
}
+
+static bool native_support_attributes(const char *attr_name,
+ enum cxl_attrs type)
+{
+ return true;
+}
+
+static int native_afu_cr_read64(struct cxl_afu *afu, int cr, u64 off, u64 *out)
+{
+ if (unlikely(!cxl_ops->link_ok(afu->adapter, afu)))
+ return -EIO;
+ if (unlikely(off >= afu->crs_len))
+ return -ERANGE;
+ *out = in_le64(afu->native->afu_desc_mmio + afu->crs_offset +
+ (cr * afu->crs_len) + off);
+ return 0;
+}
+
+static int native_afu_cr_read32(struct cxl_afu *afu, int cr, u64 off, u32 *out)
+{
+ if (unlikely(!cxl_ops->link_ok(afu->adapter, afu)))
+ return -EIO;
+ if (unlikely(off >= afu->crs_len))
+ return -ERANGE;
+ *out = in_le32(afu->native->afu_desc_mmio + afu->crs_offset +
+ (cr * afu->crs_len) + off);
+ return 0;
+}
+
+static int native_afu_cr_read16(struct cxl_afu *afu, int cr, u64 off, u16 *out)
+{
+ u64 aligned_off = off & ~0x3L;
+ u32 val;
+ int rc;
+
+ rc = native_afu_cr_read32(afu, cr, aligned_off, &val);
+ if (!rc)
+ *out = (val >> ((off & 0x3) * 8)) & 0xffff;
+ return rc;
+}
+
+static int native_afu_cr_read8(struct cxl_afu *afu, int cr, u64 off, u8 *out)
+{
+ u64 aligned_off = off & ~0x3L;
+ u32 val;
+ int rc;
+
+ rc = native_afu_cr_read32(afu, cr, aligned_off, &val);
+ if (!rc)
+ *out = (val >> ((off & 0x3) * 8)) & 0xff;
+ return rc;
+}
+
+static int native_afu_cr_write32(struct cxl_afu *afu, int cr, u64 off, u32 in)
+{
+ if (unlikely(!cxl_ops->link_ok(afu->adapter, afu)))
+ return -EIO;
+ if (unlikely(off >= afu->crs_len))
+ return -ERANGE;
+ out_le32(afu->native->afu_desc_mmio + afu->crs_offset +
+ (cr * afu->crs_len) + off, in);
+ return 0;
+}
+
+static int native_afu_cr_write16(struct cxl_afu *afu, int cr, u64 off, u16 in)
+{
+ u64 aligned_off = off & ~0x3L;
+ u32 val32, mask, shift;
+ int rc;
+
+ rc = native_afu_cr_read32(afu, cr, aligned_off, &val32);
+ if (rc)
+ return rc;
+ shift = (off & 0x3) * 8;
+ WARN_ON(shift == 24);
+ mask = 0xffff << shift;
+ val32 = (val32 & ~mask) | (in << shift);
+
+ rc = native_afu_cr_write32(afu, cr, aligned_off, val32);
+ return rc;
+}
+
+static int native_afu_cr_write8(struct cxl_afu *afu, int cr, u64 off, u8 in)
+{
+ u64 aligned_off = off & ~0x3L;
+ u32 val32, mask, shift;
+ int rc;
+
+ rc = native_afu_cr_read32(afu, cr, aligned_off, &val32);
+ if (rc)
+ return rc;
+ shift = (off & 0x3) * 8;
+ mask = 0xff << shift;
+ val32 = (val32 & ~mask) | (in << shift);
+
+ rc = native_afu_cr_write32(afu, cr, aligned_off, val32);
+ return rc;
+}
+
+const struct cxl_backend_ops cxl_native_ops = {
+ .module = THIS_MODULE,
+ .adapter_reset = cxl_pci_reset,
+ .alloc_one_irq = cxl_pci_alloc_one_irq,
+ .release_one_irq = cxl_pci_release_one_irq,
+ .alloc_irq_ranges = cxl_pci_alloc_irq_ranges,
+ .release_irq_ranges = cxl_pci_release_irq_ranges,
+ .setup_irq = cxl_pci_setup_irq,
+ .handle_psl_slice_error = native_handle_psl_slice_error,
+ .psl_interrupt = NULL,
+ .ack_irq = native_ack_irq,
+ .irq_wait = native_irq_wait,
+ .attach_process = native_attach_process,
+ .detach_process = native_detach_process,
+ .support_attributes = native_support_attributes,
+ .link_ok = cxl_adapter_link_ok,
+ .release_afu = cxl_pci_release_afu,
+ .afu_read_err_buffer = cxl_pci_afu_read_err_buffer,
+ .afu_check_and_enable = native_afu_check_and_enable,
+ .afu_activate_mode = native_afu_activate_mode,
+ .afu_deactivate_mode = native_afu_deactivate_mode,
+ .afu_reset = native_afu_reset,
+ .afu_cr_read8 = native_afu_cr_read8,
+ .afu_cr_read16 = native_afu_cr_read16,
+ .afu_cr_read32 = native_afu_cr_read32,
+ .afu_cr_read64 = native_afu_cr_read64,
+ .afu_cr_write8 = native_afu_cr_write8,
+ .afu_cr_write16 = native_afu_cr_write16,
+ .afu_cr_write32 = native_afu_cr_write32,
+ .read_adapter_vpd = cxl_pci_read_adapter_vpd,
+};
diff --git a/drivers/misc/cxl/of.c b/drivers/misc/cxl/of.c
new file mode 100644
index 000000000000..edc458395f68
--- /dev/null
+++ b/drivers/misc/cxl/of.c
@@ -0,0 +1,513 @@
+/*
+ * Copyright 2015 IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+
+#include "cxl.h"
+
+
+static const __be32 *read_prop_string(const struct device_node *np,
+ const char *prop_name)
+{
+ const __be32 *prop;
+
+ prop = of_get_property(np, prop_name, NULL);
+ if (cxl_verbose && prop)
+ pr_info("%s: %s\n", prop_name, (char *) prop);
+ return prop;
+}
+
+static const __be32 *read_prop_dword(const struct device_node *np,
+ const char *prop_name, u32 *val)
+{
+ const __be32 *prop;
+
+ prop = of_get_property(np, prop_name, NULL);
+ if (prop)
+ *val = be32_to_cpu(prop[0]);
+ if (cxl_verbose && prop)
+ pr_info("%s: %#x (%u)\n", prop_name, *val, *val);
+ return prop;
+}
+
+static const __be64 *read_prop64_dword(const struct device_node *np,
+ const char *prop_name, u64 *val)
+{
+ const __be64 *prop;
+
+ prop = of_get_property(np, prop_name, NULL);
+ if (prop)
+ *val = be64_to_cpu(prop[0]);
+ if (cxl_verbose && prop)
+ pr_info("%s: %#llx (%llu)\n", prop_name, *val, *val);
+ return prop;
+}
+
+
+static int read_handle(struct device_node *np, u64 *handle)
+{
+ const __be32 *prop;
+ u64 size;
+
+ /* Get address and size of the node */
+ prop = of_get_address(np, 0, &size, NULL);
+ if (size)
+ return -EINVAL;
+
+ /* Helper to read a big number; size is in cells (not bytes) */
+ *handle = of_read_number(prop, of_n_addr_cells(np));
+ return 0;
+}
+
+static int read_phys_addr(struct device_node *np, char *prop_name,
+ struct cxl_afu *afu)
+{
+ int i, len, entry_size, naddr, nsize, type;
+ u64 addr, size;
+ const __be32 *prop;
+
+ naddr = of_n_addr_cells(np);
+ nsize = of_n_size_cells(np);
+
+ prop = of_get_property(np, prop_name, &len);
+ if (prop) {
+ entry_size = naddr + nsize;
+ for (i = 0; i < (len / 4); i += entry_size, prop += entry_size) {
+ type = be32_to_cpu(prop[0]);
+ addr = of_read_number(prop, naddr);
+ size = of_read_number(&prop[naddr], nsize);
+ switch (type) {
+ case 0: /* unit address */
+ afu->guest->handle = addr;
+ break;
+ case 1: /* p2 area */
+ afu->guest->p2n_phys += addr;
+ afu->guest->p2n_size = size;
+ break;
+ case 2: /* problem state area */
+ afu->psn_phys += addr;
+ afu->adapter->ps_size = size;
+ break;
+ default:
+ pr_err("Invalid address type %d found in %s property of AFU\n",
+ type, prop_name);
+ return -EINVAL;
+ }
+ if (cxl_verbose)
+ pr_info("%s: %#x %#llx (size %#llx)\n",
+ prop_name, type, addr, size);
+ }
+ }
+ return 0;
+}
+
+static int read_vpd(struct cxl *adapter, struct cxl_afu *afu)
+{
+ char vpd[256];
+ int rc;
+ size_t len = sizeof(vpd);
+
+ memset(vpd, 0, len);
+
+ if (adapter)
+ rc = cxl_guest_read_adapter_vpd(adapter, vpd, len);
+ else
+ rc = cxl_guest_read_afu_vpd(afu, vpd, len);
+
+ if (rc > 0) {
+ cxl_dump_debug_buffer(vpd, rc);
+ rc = 0;
+ }
+ return rc;
+}
+
+int cxl_of_read_afu_handle(struct cxl_afu *afu, struct device_node *afu_np)
+{
+ if (read_handle(afu_np, &afu->guest->handle))
+ return -EINVAL;
+ pr_devel("AFU handle: 0x%.16llx\n", afu->guest->handle);
+
+ return 0;
+}
+
+int cxl_of_read_afu_properties(struct cxl_afu *afu, struct device_node *np)
+{
+ int i, len, rc;
+ char *p;
+ const __be32 *prop;
+ u16 device_id, vendor_id;
+ u32 val = 0, class_code;
+
+ /* Properties are read in the same order as listed in PAPR */
+
+ if (cxl_verbose) {
+ pr_info("Dump of the 'ibm,coherent-platform-function' node properties:\n");
+
+ prop = of_get_property(np, "compatible", &len);
+ i = 0;
+ while (i < len) {
+ p = (char *) prop + i;
+ pr_info("compatible: %s\n", p);
+ i += strlen(p) + 1;
+ }
+ read_prop_string(np, "name");
+ }
+
+ rc = read_phys_addr(np, "reg", afu);
+ if (rc)
+ return rc;
+
+ rc = read_phys_addr(np, "assigned-addresses", afu);
+ if (rc)
+ return rc;
+
+ if (afu->psn_phys == 0)
+ afu->psa = false;
+ else
+ afu->psa = true;
+
+ if (cxl_verbose) {
+ read_prop_string(np, "ibm,loc-code");
+ read_prop_string(np, "device_type");
+ }
+
+ read_prop_dword(np, "ibm,#processes", &afu->max_procs_virtualised);
+
+ if (cxl_verbose) {
+ read_prop_dword(np, "ibm,scratchpad-size", &val);
+ read_prop_dword(np, "ibm,programmable", &val);
+ read_prop_string(np, "ibm,phandle");
+ read_vpd(NULL, afu);
+ }
+
+ read_prop_dword(np, "ibm,max-ints-per-process", &afu->guest->max_ints);
+ afu->irqs_max = afu->guest->max_ints;
+
+ prop = read_prop_dword(np, "ibm,min-ints-per-process", &afu->pp_irqs);
+ if (prop) {
+ /* One extra interrupt for the PSL interrupt is already
+ * included. Remove it now to keep only AFU interrupts and
+ * match the native case.
+ */
+ afu->pp_irqs--;
+ }
+
+ if (cxl_verbose) {
+ read_prop_dword(np, "ibm,max-ints", &val);
+ read_prop_dword(np, "ibm,vpd-size", &val);
+ }
+
+ read_prop64_dword(np, "ibm,error-buffer-size", &afu->eb_len);
+ afu->eb_offset = 0;
+
+ if (cxl_verbose)
+ read_prop_dword(np, "ibm,config-record-type", &val);
+
+ read_prop64_dword(np, "ibm,config-record-size", &afu->crs_len);
+ afu->crs_offset = 0;
+
+ read_prop_dword(np, "ibm,#config-records", &afu->crs_num);
+
+ if (cxl_verbose) {
+ for (i = 0; i < afu->crs_num; i++) {
+ rc = cxl_ops->afu_cr_read16(afu, i, PCI_DEVICE_ID,
+ &device_id);
+ if (!rc)
+ pr_info("record %d - device-id: %#x\n",
+ i, device_id);
+ rc = cxl_ops->afu_cr_read16(afu, i, PCI_VENDOR_ID,
+ &vendor_id);
+ if (!rc)
+ pr_info("record %d - vendor-id: %#x\n",
+ i, vendor_id);
+ rc = cxl_ops->afu_cr_read32(afu, i, PCI_CLASS_REVISION,
+ &class_code);
+ if (!rc) {
+ class_code >>= 8;
+ pr_info("record %d - class-code: %#x\n",
+ i, class_code);
+ }
+ }
+
+ read_prop_dword(np, "ibm,function-number", &val);
+ read_prop_dword(np, "ibm,privileged-function", &val);
+ read_prop_dword(np, "vendor-id", &val);
+ read_prop_dword(np, "device-id", &val);
+ read_prop_dword(np, "revision-id", &val);
+ read_prop_dword(np, "class-code", &val);
+ read_prop_dword(np, "subsystem-vendor-id", &val);
+ read_prop_dword(np, "subsystem-id", &val);
+ }
+ /*
+ * if "ibm,process-mmio" doesn't exist then per-process mmio is
+ * not supported
+ */
+ val = 0;
+ prop = read_prop_dword(np, "ibm,process-mmio", &val);
+ if (prop && val == 1)
+ afu->pp_psa = true;
+ else
+ afu->pp_psa = false;
+
+ if (cxl_verbose) {
+ read_prop_dword(np, "ibm,supports-aur", &val);
+ read_prop_dword(np, "ibm,supports-csrp", &val);
+ read_prop_dword(np, "ibm,supports-prr", &val);
+ }
+
+ prop = read_prop_dword(np, "ibm,function-error-interrupt", &val);
+ if (prop)
+ afu->serr_hwirq = val;
+
+ pr_devel("AFU handle: %#llx\n", afu->guest->handle);
+ pr_devel("p2n_phys: %#llx (size %#llx)\n",
+ afu->guest->p2n_phys, afu->guest->p2n_size);
+ pr_devel("psn_phys: %#llx (size %#llx)\n",
+ afu->psn_phys, afu->adapter->ps_size);
+ pr_devel("Max number of processes virtualised=%i\n",
+ afu->max_procs_virtualised);
+ pr_devel("Per-process irqs min=%i, max=%i\n", afu->pp_irqs,
+ afu->irqs_max);
+ pr_devel("Slice error interrupt=%#lx\n", afu->serr_hwirq);
+
+ return 0;
+}
+
+static int read_adapter_irq_config(struct cxl *adapter, struct device_node *np)
+{
+ const __be32 *ranges;
+ int len, nranges, i;
+ struct irq_avail *cur;
+
+ ranges = of_get_property(np, "interrupt-ranges", &len);
+ if (ranges == NULL || len < (2 * sizeof(int)))
+ return -EINVAL;
+
+ /*
+ * encoded array of two cells per entry, each cell encoded as
+ * with encode-int
+ */
+ nranges = len / (2 * sizeof(int));
+ if (nranges == 0 || (nranges * 2 * sizeof(int)) != len)
+ return -EINVAL;
+
+ adapter->guest->irq_avail = kzalloc(nranges * sizeof(struct irq_avail),
+ GFP_KERNEL);
+ if (adapter->guest->irq_avail == NULL)
+ return -ENOMEM;
+
+ adapter->guest->irq_base_offset = be32_to_cpu(ranges[0]);
+ for (i = 0; i < nranges; i++) {
+ cur = &adapter->guest->irq_avail[i];
+ cur->offset = be32_to_cpu(ranges[i * 2]);
+ cur->range = be32_to_cpu(ranges[i * 2 + 1]);
+ cur->bitmap = kcalloc(BITS_TO_LONGS(cur->range),
+ sizeof(*cur->bitmap), GFP_KERNEL);
+ if (cur->bitmap == NULL)
+ goto err;
+ if (cur->offset < adapter->guest->irq_base_offset)
+ adapter->guest->irq_base_offset = cur->offset;
+ if (cxl_verbose)
+ pr_info("available IRQ range: %#lx-%#lx (%lu)\n",
+ cur->offset, cur->offset + cur->range - 1,
+ cur->range);
+ }
+ adapter->guest->irq_nranges = nranges;
+ spin_lock_init(&adapter->guest->irq_alloc_lock);
+
+ return 0;
+err:
+ for (i--; i >= 0; i--) {
+ cur = &adapter->guest->irq_avail[i];
+ kfree(cur->bitmap);
+ }
+ kfree(adapter->guest->irq_avail);
+ adapter->guest->irq_avail = NULL;
+ return -ENOMEM;
+}
+
+int cxl_of_read_adapter_handle(struct cxl *adapter, struct device_node *np)
+{
+ if (read_handle(np, &adapter->guest->handle))
+ return -EINVAL;
+ pr_devel("Adapter handle: 0x%.16llx\n", adapter->guest->handle);
+
+ return 0;
+}
+
+int cxl_of_read_adapter_properties(struct cxl *adapter, struct device_node *np)
+{
+ int rc, len, naddr, i;
+ char *p;
+ const __be32 *prop;
+ u32 val = 0;
+
+ /* Properties are read in the same order as listed in PAPR */
+
+ naddr = of_n_addr_cells(np);
+
+ if (cxl_verbose) {
+ pr_info("Dump of the 'ibm,coherent-platform-facility' node properties:\n");
+
+ read_prop_dword(np, "#address-cells", &val);
+ read_prop_dword(np, "#size-cells", &val);
+
+ prop = of_get_property(np, "compatible", &len);
+ i = 0;
+ while (i < len) {
+ p = (char *) prop + i;
+ pr_info("compatible: %s\n", p);
+ i += strlen(p) + 1;
+ }
+ read_prop_string(np, "name");
+ read_prop_string(np, "model");
+
+ prop = of_get_property(np, "reg", NULL);
+ if (prop) {
+ pr_info("reg: addr:%#llx size:%#x\n",
+ of_read_number(prop, naddr),
+ be32_to_cpu(prop[naddr]));
+ }
+
+ read_prop_string(np, "ibm,loc-code");
+ }
+
+ if ((rc = read_adapter_irq_config(adapter, np)))
+ return rc;
+
+ if (cxl_verbose) {
+ read_prop_string(np, "device_type");
+ read_prop_string(np, "ibm,phandle");
+ }
+
+ prop = read_prop_dword(np, "ibm,caia-version", &val);
+ if (prop) {
+ adapter->caia_major = (val & 0xFF00) >> 8;
+ adapter->caia_minor = val & 0xFF;
+ }
+
+ prop = read_prop_dword(np, "ibm,psl-revision", &val);
+ if (prop)
+ adapter->psl_rev = val;
+
+ prop = read_prop_string(np, "status");
+ if (prop) {
+ adapter->guest->status = kasprintf(GFP_KERNEL, "%s", (char *) prop);
+ if (adapter->guest->status == NULL)
+ return -ENOMEM;
+ }
+
+ prop = read_prop_dword(np, "vendor-id", &val);
+ if (prop)
+ adapter->guest->vendor = val;
+
+ prop = read_prop_dword(np, "device-id", &val);
+ if (prop)
+ adapter->guest->device = val;
+
+ if (cxl_verbose) {
+ read_prop_dword(np, "ibm,privileged-facility", &val);
+ read_prop_dword(np, "revision-id", &val);
+ read_prop_dword(np, "class-code", &val);
+ }
+
+ prop = read_prop_dword(np, "subsystem-vendor-id", &val);
+ if (prop)
+ adapter->guest->subsystem_vendor = val;
+
+ prop = read_prop_dword(np, "subsystem-id", &val);
+ if (prop)
+ adapter->guest->subsystem = val;
+
+ if (cxl_verbose)
+ read_vpd(adapter, NULL);
+
+ return 0;
+}
+
+static int cxl_of_remove(struct platform_device *pdev)
+{
+ struct cxl *adapter;
+ int afu;
+
+ adapter = dev_get_drvdata(&pdev->dev);
+ for (afu = 0; afu < adapter->slices; afu++)
+ cxl_guest_remove_afu(adapter->afu[afu]);
+
+ cxl_guest_remove_adapter(adapter);
+ return 0;
+}
+
+static void cxl_of_shutdown(struct platform_device *pdev)
+{
+ cxl_of_remove(pdev);
+}
+
+int cxl_of_probe(struct platform_device *pdev)
+{
+ struct device_node *np = NULL;
+ struct device_node *afu_np = NULL;
+ struct cxl *adapter = NULL;
+ int ret;
+ int slice, slice_ok;
+
+ pr_devel("in %s\n", __func__);
+
+ np = pdev->dev.of_node;
+ if (np == NULL)
+ return -ENODEV;
+
+ /* init adapter */
+ adapter = cxl_guest_init_adapter(np, pdev);
+ if (IS_ERR(adapter)) {
+ dev_err(&pdev->dev, "guest_init_adapter failed: %li\n", PTR_ERR(adapter));
+ return PTR_ERR(adapter);
+ }
+
+ /* init afu */
+ slice_ok = 0;
+ for (afu_np = NULL, slice = 0; (afu_np = of_get_next_child(np, afu_np)); slice++) {
+ if ((ret = cxl_guest_init_afu(adapter, slice, afu_np)))
+ dev_err(&pdev->dev, "AFU %i failed to initialise: %i\n",
+ slice, ret);
+ else
+ slice_ok++;
+ }
+
+ if (slice_ok == 0) {
+ dev_info(&pdev->dev, "No active AFU");
+ adapter->slices = 0;
+ }
+
+ if (afu_np)
+ of_node_put(afu_np);
+ return 0;
+}
+
+static const struct of_device_id cxl_of_match[] = {
+ { .compatible = "ibm,coherent-platform-facility",},
+ {},
+};
+MODULE_DEVICE_TABLE(of, cxl_of_match);
+
+struct platform_driver cxl_of_driver = {
+ .driver = {
+ .name = "cxl_of",
+ .of_match_table = cxl_of_match,
+ .owner = THIS_MODULE
+ },
+ .probe = cxl_of_probe,
+ .remove = cxl_of_remove,
+ .shutdown = cxl_of_shutdown,
+};
diff --git a/drivers/misc/cxl/pci.c b/drivers/misc/cxl/pci.c
index 0c6c17a1c59e..2844e975bf79 100644
--- a/drivers/misc/cxl/pci.c
+++ b/drivers/misc/cxl/pci.c
@@ -19,7 +19,6 @@
#include <linux/delay.h>
#include <asm/opal.h>
#include <asm/msi_bitmap.h>
-#include <asm/pci-bridge.h> /* for struct pci_controller */
#include <asm/pnv-pci.h>
#include <asm/io.h>
@@ -90,8 +89,8 @@
/* This works a little different than the p1/p2 register accesses to make it
* easier to pull out individual fields */
-#define AFUD_READ(afu, off) in_be64(afu->afu_desc_mmio + off)
-#define AFUD_READ_LE(afu, off) in_le64(afu->afu_desc_mmio + off)
+#define AFUD_READ(afu, off) in_be64(afu->native->afu_desc_mmio + off)
+#define AFUD_READ_LE(afu, off) in_le64(afu->native->afu_desc_mmio + off)
#define EXTRACT_PPC_BIT(val, bit) (!!(val & PPC_BIT(bit)))
#define EXTRACT_PPC_BITS(val, bs, be) ((val & PPC_BITMASK(bs, be)) >> PPC_BITLSHIFT(be))
@@ -116,24 +115,6 @@
#define AFUD_EB_LEN(val) EXTRACT_PPC_BITS(val, 8, 63)
#define AFUD_READ_EB_OFF(afu) AFUD_READ(afu, 0x48)
-u16 cxl_afu_cr_read16(struct cxl_afu *afu, int cr, u64 off)
-{
- u64 aligned_off = off & ~0x3L;
- u32 val;
-
- val = cxl_afu_cr_read32(afu, cr, aligned_off);
- return (val >> ((off & 0x2) * 8)) & 0xffff;
-}
-
-u8 cxl_afu_cr_read8(struct cxl_afu *afu, int cr, u64 off)
-{
- u64 aligned_off = off & ~0x3L;
- u32 val;
-
- val = cxl_afu_cr_read32(afu, cr, aligned_off);
- return (val >> ((off & 0x3) * 8)) & 0xff;
-}
-
static const struct pci_device_id cxl_pci_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_IBM, 0x0477), },
{ PCI_DEVICE(PCI_VENDOR_ID_IBM, 0x044b), },
@@ -433,8 +414,8 @@ static int init_implementation_afu_regs(struct cxl_afu *afu)
return 0;
}
-int cxl_setup_irq(struct cxl *adapter, unsigned int hwirq,
- unsigned int virq)
+int cxl_pci_setup_irq(struct cxl *adapter, unsigned int hwirq,
+ unsigned int virq)
{
struct pci_dev *dev = to_pci_dev(adapter->dev.parent);
@@ -476,28 +457,30 @@ int cxl_update_image_control(struct cxl *adapter)
return 0;
}
-int cxl_alloc_one_irq(struct cxl *adapter)
+int cxl_pci_alloc_one_irq(struct cxl *adapter)
{
struct pci_dev *dev = to_pci_dev(adapter->dev.parent);
return pnv_cxl_alloc_hwirqs(dev, 1);
}
-void cxl_release_one_irq(struct cxl *adapter, int hwirq)
+void cxl_pci_release_one_irq(struct cxl *adapter, int hwirq)
{
struct pci_dev *dev = to_pci_dev(adapter->dev.parent);
return pnv_cxl_release_hwirqs(dev, hwirq, 1);
}
-int cxl_alloc_irq_ranges(struct cxl_irq_ranges *irqs, struct cxl *adapter, unsigned int num)
+int cxl_pci_alloc_irq_ranges(struct cxl_irq_ranges *irqs,
+ struct cxl *adapter, unsigned int num)
{
struct pci_dev *dev = to_pci_dev(adapter->dev.parent);
return pnv_cxl_alloc_hwirq_ranges(irqs, dev, num);
}
-void cxl_release_irq_ranges(struct cxl_irq_ranges *irqs, struct cxl *adapter)
+void cxl_pci_release_irq_ranges(struct cxl_irq_ranges *irqs,
+ struct cxl *adapter)
{
struct pci_dev *dev = to_pci_dev(adapter->dev.parent);
@@ -558,7 +541,7 @@ static int switch_card_to_cxl(struct pci_dev *dev)
return 0;
}
-static int cxl_map_slice_regs(struct cxl_afu *afu, struct cxl *adapter, struct pci_dev *dev)
+static int pci_map_slice_regs(struct cxl_afu *afu, struct cxl *adapter, struct pci_dev *dev)
{
u64 p1n_base, p2n_base, afu_desc;
const u64 p1n_size = 0x100;
@@ -566,15 +549,15 @@ static int cxl_map_slice_regs(struct cxl_afu *afu, struct cxl *adapter, struct p
p1n_base = p1_base(dev) + 0x10000 + (afu->slice * p1n_size);
p2n_base = p2_base(dev) + (afu->slice * p2n_size);
- afu->psn_phys = p2_base(dev) + (adapter->ps_off + (afu->slice * adapter->ps_size));
- afu_desc = p2_base(dev) + adapter->afu_desc_off + (afu->slice * adapter->afu_desc_size);
+ afu->psn_phys = p2_base(dev) + (adapter->native->ps_off + (afu->slice * adapter->ps_size));
+ afu_desc = p2_base(dev) + adapter->native->afu_desc_off + (afu->slice * adapter->native->afu_desc_size);
- if (!(afu->p1n_mmio = ioremap(p1n_base, p1n_size)))
+ if (!(afu->native->p1n_mmio = ioremap(p1n_base, p1n_size)))
goto err;
if (!(afu->p2n_mmio = ioremap(p2n_base, p2n_size)))
goto err1;
if (afu_desc) {
- if (!(afu->afu_desc_mmio = ioremap(afu_desc, adapter->afu_desc_size)))
+ if (!(afu->native->afu_desc_mmio = ioremap(afu_desc, adapter->native->afu_desc_size)))
goto err2;
}
@@ -582,62 +565,41 @@ static int cxl_map_slice_regs(struct cxl_afu *afu, struct cxl *adapter, struct p
err2:
iounmap(afu->p2n_mmio);
err1:
- iounmap(afu->p1n_mmio);
+ iounmap(afu->native->p1n_mmio);
err:
dev_err(&afu->dev, "Error mapping AFU MMIO regions\n");
return -ENOMEM;
}
-static void cxl_unmap_slice_regs(struct cxl_afu *afu)
+static void pci_unmap_slice_regs(struct cxl_afu *afu)
{
if (afu->p2n_mmio) {
iounmap(afu->p2n_mmio);
afu->p2n_mmio = NULL;
}
- if (afu->p1n_mmio) {
- iounmap(afu->p1n_mmio);
- afu->p1n_mmio = NULL;
+ if (afu->native->p1n_mmio) {
+ iounmap(afu->native->p1n_mmio);
+ afu->native->p1n_mmio = NULL;
}
- if (afu->afu_desc_mmio) {
- iounmap(afu->afu_desc_mmio);
- afu->afu_desc_mmio = NULL;
+ if (afu->native->afu_desc_mmio) {
+ iounmap(afu->native->afu_desc_mmio);
+ afu->native->afu_desc_mmio = NULL;
}
}
-static void cxl_release_afu(struct device *dev)
+void cxl_pci_release_afu(struct device *dev)
{
struct cxl_afu *afu = to_cxl_afu(dev);
- pr_devel("cxl_release_afu\n");
+ pr_devel("%s\n", __func__);
idr_destroy(&afu->contexts_idr);
cxl_release_spa(afu);
+ kfree(afu->native);
kfree(afu);
}
-static struct cxl_afu *cxl_alloc_afu(struct cxl *adapter, int slice)
-{
- struct cxl_afu *afu;
-
- if (!(afu = kzalloc(sizeof(struct cxl_afu), GFP_KERNEL)))
- return NULL;
-
- afu->adapter = adapter;
- afu->dev.parent = &adapter->dev;
- afu->dev.release = cxl_release_afu;
- afu->slice = slice;
- idr_init(&afu->contexts_idr);
- mutex_init(&afu->contexts_lock);
- spin_lock_init(&afu->afu_cntl_lock);
- mutex_init(&afu->spa_mutex);
-
- afu->prefault_mode = CXL_PREFAULT_NONE;
- afu->irqs_max = afu->adapter->user_irqs;
-
- return afu;
-}
-
/* Expects AFU struct to have recently been zeroed out */
static int cxl_read_afu_descriptor(struct cxl_afu *afu)
{
@@ -659,7 +621,7 @@ static int cxl_read_afu_descriptor(struct cxl_afu *afu)
afu->pp_size = AFUD_PPPSA_LEN(val) * 4096;
afu->psa = AFUD_PPPSA_PSA(val);
if ((afu->pp_psa = AFUD_PPPSA_PP(val)))
- afu->pp_offset = AFUD_READ_PPPSA_OFF(afu);
+ afu->native->pp_offset = AFUD_READ_PPPSA_OFF(afu);
val = AFUD_READ_CR(afu);
afu->crs_len = AFUD_CR_LEN(val) * 256;
@@ -686,10 +648,11 @@ static int cxl_read_afu_descriptor(struct cxl_afu *afu)
static int cxl_afu_descriptor_looks_ok(struct cxl_afu *afu)
{
- int i;
+ int i, rc;
+ u32 val;
if (afu->psa && afu->adapter->ps_size <
- (afu->pp_offset + afu->pp_size*afu->max_procs_virtualised)) {
+ (afu->native->pp_offset + afu->pp_size*afu->max_procs_virtualised)) {
dev_err(&afu->dev, "per-process PSA can't fit inside the PSA!\n");
return -ENODEV;
}
@@ -698,7 +661,8 @@ static int cxl_afu_descriptor_looks_ok(struct cxl_afu *afu)
dev_warn(&afu->dev, "AFU uses < PAGE_SIZE per-process PSA!");
for (i = 0; i < afu->crs_num; i++) {
- if ((cxl_afu_cr_read32(afu, i, 0) == 0)) {
+ rc = cxl_ops->afu_cr_read32(afu, i, 0, &val);
+ if (rc || val == 0) {
dev_err(&afu->dev, "ABORTING: AFU configuration record %i is invalid\n", i);
return -EINVAL;
}
@@ -719,7 +683,7 @@ static int sanitise_afu_regs(struct cxl_afu *afu)
reg = cxl_p2n_read(afu, CXL_AFU_Cntl_An);
if ((reg & CXL_AFU_Cntl_An_ES_MASK) != CXL_AFU_Cntl_An_ES_Disabled) {
dev_warn(&afu->dev, "WARNING: AFU was not disabled: %#016llx\n", reg);
- if (__cxl_afu_reset(afu))
+ if (cxl_ops->afu_reset(afu))
return -EIO;
if (cxl_afu_disable(afu))
return -EIO;
@@ -767,13 +731,13 @@ static int sanitise_afu_regs(struct cxl_afu *afu)
* 4/8 bytes aligned access. So in case the requested offset/count arent 8 byte
* aligned the function uses a bounce buffer which can be max PAGE_SIZE.
*/
-ssize_t cxl_afu_read_err_buffer(struct cxl_afu *afu, char *buf,
+ssize_t cxl_pci_afu_read_err_buffer(struct cxl_afu *afu, char *buf,
loff_t off, size_t count)
{
loff_t aligned_start, aligned_end;
size_t aligned_length;
void *tbuf;
- const void __iomem *ebuf = afu->afu_desc_mmio + afu->eb_offset;
+ const void __iomem *ebuf = afu->native->afu_desc_mmio + afu->eb_offset;
if (count == 0 || off < 0 || (size_t)off >= afu->eb_len)
return 0;
@@ -804,18 +768,18 @@ ssize_t cxl_afu_read_err_buffer(struct cxl_afu *afu, char *buf,
return count;
}
-static int cxl_configure_afu(struct cxl_afu *afu, struct cxl *adapter, struct pci_dev *dev)
+static int pci_configure_afu(struct cxl_afu *afu, struct cxl *adapter, struct pci_dev *dev)
{
int rc;
- if ((rc = cxl_map_slice_regs(afu, adapter, dev)))
+ if ((rc = pci_map_slice_regs(afu, adapter, dev)))
return rc;
if ((rc = sanitise_afu_regs(afu)))
goto err1;
/* We need to reset the AFU before we can read the AFU descriptor */
- if ((rc = __cxl_afu_reset(afu)))
+ if ((rc = cxl_ops->afu_reset(afu)))
goto err1;
if (cxl_verbose)
@@ -830,44 +794,50 @@ static int cxl_configure_afu(struct cxl_afu *afu, struct cxl *adapter, struct pc
if ((rc = init_implementation_afu_regs(afu)))
goto err1;
- if ((rc = cxl_register_serr_irq(afu)))
+ if ((rc = cxl_native_register_serr_irq(afu)))
goto err1;
- if ((rc = cxl_register_psl_irq(afu)))
+ if ((rc = cxl_native_register_psl_irq(afu)))
goto err2;
return 0;
err2:
- cxl_release_serr_irq(afu);
+ cxl_native_release_serr_irq(afu);
err1:
- cxl_unmap_slice_regs(afu);
+ pci_unmap_slice_regs(afu);
return rc;
}
-static void cxl_deconfigure_afu(struct cxl_afu *afu)
+static void pci_deconfigure_afu(struct cxl_afu *afu)
{
- cxl_release_psl_irq(afu);
- cxl_release_serr_irq(afu);
- cxl_unmap_slice_regs(afu);
+ cxl_native_release_psl_irq(afu);
+ cxl_native_release_serr_irq(afu);
+ pci_unmap_slice_regs(afu);
}
-static int cxl_init_afu(struct cxl *adapter, int slice, struct pci_dev *dev)
+static int pci_init_afu(struct cxl *adapter, int slice, struct pci_dev *dev)
{
struct cxl_afu *afu;
- int rc;
+ int rc = -ENOMEM;
afu = cxl_alloc_afu(adapter, slice);
if (!afu)
return -ENOMEM;
+ afu->native = kzalloc(sizeof(struct cxl_afu_native), GFP_KERNEL);
+ if (!afu->native)
+ goto err_free_afu;
+
+ mutex_init(&afu->native->spa_mutex);
+
rc = dev_set_name(&afu->dev, "afu%i.%i", adapter->adapter_num, slice);
if (rc)
- goto err_free;
+ goto err_free_native;
- rc = cxl_configure_afu(afu, adapter, dev);
+ rc = pci_configure_afu(afu, adapter, dev);
if (rc)
- goto err_free;
+ goto err_free_native;
/* Don't care if this fails */
cxl_debugfs_afu_add(afu);
@@ -890,24 +860,27 @@ static int cxl_init_afu(struct cxl *adapter, int slice, struct pci_dev *dev)
return 0;
err_put1:
- cxl_deconfigure_afu(afu);
+ pci_deconfigure_afu(afu);
cxl_debugfs_afu_remove(afu);
device_unregister(&afu->dev);
return rc;
-err_free:
+err_free_native:
+ kfree(afu->native);
+err_free_afu:
kfree(afu);
return rc;
}
-static void cxl_remove_afu(struct cxl_afu *afu)
+static void cxl_pci_remove_afu(struct cxl_afu *afu)
{
- pr_devel("cxl_remove_afu\n");
+ pr_devel("%s\n", __func__);
if (!afu)
return;
+ cxl_pci_vphb_remove(afu);
cxl_sysfs_afu_remove(afu);
cxl_debugfs_afu_remove(afu);
@@ -916,13 +889,13 @@ static void cxl_remove_afu(struct cxl_afu *afu)
spin_unlock(&afu->adapter->afu_list_lock);
cxl_context_detach_all(afu);
- cxl_afu_deactivate_mode(afu);
+ cxl_ops->afu_deactivate_mode(afu, afu->current_mode);
- cxl_deconfigure_afu(afu);
+ pci_deconfigure_afu(afu);
device_unregister(&afu->dev);
}
-int cxl_reset(struct cxl *adapter)
+int cxl_pci_reset(struct cxl *adapter)
{
struct pci_dev *dev = to_pci_dev(adapter->dev.parent);
int rc;
@@ -956,17 +929,17 @@ static int cxl_map_adapter_regs(struct cxl *adapter, struct pci_dev *dev)
pr_devel("cxl_map_adapter_regs: p1: %#016llx %#llx, p2: %#016llx %#llx",
p1_base(dev), p1_size(dev), p2_base(dev), p2_size(dev));
- if (!(adapter->p1_mmio = ioremap(p1_base(dev), p1_size(dev))))
+ if (!(adapter->native->p1_mmio = ioremap(p1_base(dev), p1_size(dev))))
goto err3;
- if (!(adapter->p2_mmio = ioremap(p2_base(dev), p2_size(dev))))
+ if (!(adapter->native->p2_mmio = ioremap(p2_base(dev), p2_size(dev))))
goto err4;
return 0;
err4:
- iounmap(adapter->p1_mmio);
- adapter->p1_mmio = NULL;
+ iounmap(adapter->native->p1_mmio);
+ adapter->native->p1_mmio = NULL;
err3:
pci_release_region(dev, 0);
err2:
@@ -977,14 +950,14 @@ err1:
static void cxl_unmap_adapter_regs(struct cxl *adapter)
{
- if (adapter->p1_mmio) {
- iounmap(adapter->p1_mmio);
- adapter->p1_mmio = NULL;
+ if (adapter->native->p1_mmio) {
+ iounmap(adapter->native->p1_mmio);
+ adapter->native->p1_mmio = NULL;
pci_release_region(to_pci_dev(adapter->dev.parent), 2);
}
- if (adapter->p2_mmio) {
- iounmap(adapter->p2_mmio);
- adapter->p2_mmio = NULL;
+ if (adapter->native->p2_mmio) {
+ iounmap(adapter->native->p2_mmio);
+ adapter->native->p2_mmio = NULL;
pci_release_region(to_pci_dev(adapter->dev.parent), 0);
}
}
@@ -1025,10 +998,10 @@ static int cxl_read_vsec(struct cxl *adapter, struct pci_dev *dev)
/* Convert everything to bytes, because there is NO WAY I'd look at the
* code a month later and forget what units these are in ;-) */
- adapter->ps_off = ps_off * 64 * 1024;
+ adapter->native->ps_off = ps_off * 64 * 1024;
adapter->ps_size = ps_size * 64 * 1024;
- adapter->afu_desc_off = afu_desc_off * 64 * 1024;
- adapter->afu_desc_size = afu_desc_size *64 * 1024;
+ adapter->native->afu_desc_off = afu_desc_off * 64 * 1024;
+ adapter->native->afu_desc_size = afu_desc_size * 64 * 1024;
/* Total IRQs - 1 PSL ERROR - #AFU*(1 slice error + 1 DSI) */
adapter->user_irqs = pnv_cxl_get_irq_count(dev) - 1 - 2*adapter->slices;
@@ -1079,21 +1052,26 @@ static int cxl_vsec_looks_ok(struct cxl *adapter, struct pci_dev *dev)
return -EINVAL;
}
- if (!adapter->afu_desc_off || !adapter->afu_desc_size) {
+ if (!adapter->native->afu_desc_off || !adapter->native->afu_desc_size) {
dev_err(&dev->dev, "ABORTING: VSEC shows no AFU descriptors\n");
return -EINVAL;
}
- if (adapter->ps_size > p2_size(dev) - adapter->ps_off) {
+ if (adapter->ps_size > p2_size(dev) - adapter->native->ps_off) {
dev_err(&dev->dev, "ABORTING: Problem state size larger than "
"available in BAR2: 0x%llx > 0x%llx\n",
- adapter->ps_size, p2_size(dev) - adapter->ps_off);
+ adapter->ps_size, p2_size(dev) - adapter->native->ps_off);
return -EINVAL;
}
return 0;
}
+ssize_t cxl_pci_read_adapter_vpd(struct cxl *adapter, void *buf, size_t len)
+{
+ return pci_read_vpd(to_pci_dev(adapter->dev.parent), 0, len, buf);
+}
+
static void cxl_release_adapter(struct device *dev)
{
struct cxl *adapter = to_cxl_adapter(dev);
@@ -1102,33 +1080,10 @@ static void cxl_release_adapter(struct device *dev)
cxl_remove_adapter_nr(adapter);
+ kfree(adapter->native);
kfree(adapter);
}
-static struct cxl *cxl_alloc_adapter(void)
-{
- struct cxl *adapter;
-
- if (!(adapter = kzalloc(sizeof(struct cxl), GFP_KERNEL)))
- return NULL;
-
- spin_lock_init(&adapter->afu_list_lock);
-
- if (cxl_alloc_adapter_nr(adapter))
- goto err1;
-
- if (dev_set_name(&adapter->dev, "card%i", adapter->adapter_num))
- goto err2;
-
- return adapter;
-
-err2:
- cxl_remove_adapter_nr(adapter);
-err1:
- kfree(adapter);
- return NULL;
-}
-
#define CXL_PSL_ErrIVTE_tberror (0x1ull << (63-31))
static int sanitise_adapter_regs(struct cxl *adapter)
@@ -1192,7 +1147,7 @@ static int cxl_configure_adapter(struct cxl *adapter, struct pci_dev *dev)
if ((rc = cxl_setup_psl_timebase(adapter, dev)))
goto err;
- if ((rc = cxl_register_psl_err_irq(adapter)))
+ if ((rc = cxl_native_register_psl_err_irq(adapter)))
goto err;
return 0;
@@ -1207,13 +1162,13 @@ static void cxl_deconfigure_adapter(struct cxl *adapter)
{
struct pci_dev *pdev = to_pci_dev(adapter->dev.parent);
- cxl_release_psl_err_irq(adapter);
+ cxl_native_release_psl_err_irq(adapter);
cxl_unmap_adapter_regs(adapter);
pci_disable_device(pdev);
}
-static struct cxl *cxl_init_adapter(struct pci_dev *dev)
+static struct cxl *cxl_pci_init_adapter(struct pci_dev *dev)
{
struct cxl *adapter;
int rc;
@@ -1222,6 +1177,12 @@ static struct cxl *cxl_init_adapter(struct pci_dev *dev)
if (!adapter)
return ERR_PTR(-ENOMEM);
+ adapter->native = kzalloc(sizeof(struct cxl_native), GFP_KERNEL);
+ if (!adapter->native) {
+ rc = -ENOMEM;
+ goto err_release;
+ }
+
/* Set defaults for parameters which need to persist over
* configure/reconfigure
*/
@@ -1231,8 +1192,7 @@ static struct cxl *cxl_init_adapter(struct pci_dev *dev)
rc = cxl_configure_adapter(adapter, dev);
if (rc) {
pci_disable_device(dev);
- cxl_release_adapter(&adapter->dev);
- return ERR_PTR(rc);
+ goto err_release;
}
/* Don't care if this one fails: */
@@ -1258,9 +1218,13 @@ err_put1:
cxl_deconfigure_adapter(adapter);
device_unregister(&adapter->dev);
return ERR_PTR(rc);
+
+err_release:
+ cxl_release_adapter(&adapter->dev);
+ return ERR_PTR(rc);
}
-static void cxl_remove_adapter(struct cxl *adapter)
+static void cxl_pci_remove_adapter(struct cxl *adapter)
{
pr_devel("cxl_remove_adapter\n");
@@ -1278,17 +1242,22 @@ static int cxl_probe(struct pci_dev *dev, const struct pci_device_id *id)
int slice;
int rc;
+ if (cxl_pci_is_vphb_device(dev)) {
+ dev_dbg(&dev->dev, "cxl_init_adapter: Ignoring cxl vphb device\n");
+ return -ENODEV;
+ }
+
if (cxl_verbose)
dump_cxl_config_space(dev);
- adapter = cxl_init_adapter(dev);
+ adapter = cxl_pci_init_adapter(dev);
if (IS_ERR(adapter)) {
dev_err(&dev->dev, "cxl_init_adapter failed: %li\n", PTR_ERR(adapter));
return PTR_ERR(adapter);
}
for (slice = 0; slice < adapter->slices; slice++) {
- if ((rc = cxl_init_afu(adapter, slice, dev))) {
+ if ((rc = pci_init_afu(adapter, slice, dev))) {
dev_err(&dev->dev, "AFU %i failed to initialise: %i\n", slice, rc);
continue;
}
@@ -1313,10 +1282,9 @@ static void cxl_remove(struct pci_dev *dev)
*/
for (i = 0; i < adapter->slices; i++) {
afu = adapter->afu[i];
- cxl_pci_vphb_remove(afu);
- cxl_remove_afu(afu);
+ cxl_pci_remove_afu(afu);
}
- cxl_remove_adapter(adapter);
+ cxl_pci_remove_adapter(adapter);
}
static pci_ers_result_t cxl_vphb_error_detected(struct cxl_afu *afu,
@@ -1462,8 +1430,8 @@ static pci_ers_result_t cxl_pci_error_detected(struct pci_dev *pdev,
return result;
cxl_context_detach_all(afu);
- cxl_afu_deactivate_mode(afu);
- cxl_deconfigure_afu(afu);
+ cxl_ops->afu_deactivate_mode(afu, afu->current_mode);
+ pci_deconfigure_afu(afu);
}
cxl_deconfigure_adapter(adapter);
@@ -1486,14 +1454,12 @@ static pci_ers_result_t cxl_pci_slot_reset(struct pci_dev *pdev)
for (i = 0; i < adapter->slices; i++) {
afu = adapter->afu[i];
- if (cxl_configure_afu(afu, adapter, pdev))
+ if (pci_configure_afu(afu, adapter, pdev))
goto err;
if (cxl_afu_select_best_mode(afu))
goto err;
- cxl_pci_vphb_reconfigure(afu);
-
list_for_each_entry(afu_dev, &afu->phb->bus->devices, bus_list) {
/* Reset the device context.
* TODO: make this less disruptive
@@ -1509,7 +1475,7 @@ static pci_ers_result_t cxl_pci_slot_reset(struct pci_dev *pdev)
afu_dev->dev.archdata.cxl_ctx = ctx;
- if (cxl_afu_check_and_enable(afu))
+ if (cxl_ops->afu_check_and_enable(afu))
goto err;
afu_dev->error_state = pci_channel_io_normal;
diff --git a/drivers/misc/cxl/sysfs.c b/drivers/misc/cxl/sysfs.c
index 02006f7109a8..25913c08794c 100644
--- a/drivers/misc/cxl/sysfs.c
+++ b/drivers/misc/cxl/sysfs.c
@@ -69,7 +69,7 @@ static ssize_t reset_adapter_store(struct device *device,
if ((rc != 1) || (val != 1))
return -EINVAL;
- if ((rc = cxl_reset(adapter)))
+ if ((rc = cxl_ops->adapter_reset(adapter)))
return rc;
return count;
}
@@ -165,7 +165,7 @@ static ssize_t pp_mmio_off_show(struct device *device,
{
struct cxl_afu *afu = to_afu_chardev_m(device);
- return scnprintf(buf, PAGE_SIZE, "%llu\n", afu->pp_offset);
+ return scnprintf(buf, PAGE_SIZE, "%llu\n", afu->native->pp_offset);
}
static ssize_t pp_mmio_len_show(struct device *device,
@@ -211,7 +211,7 @@ static ssize_t reset_store_afu(struct device *device,
goto err;
}
- if ((rc = __cxl_afu_reset(afu)))
+ if ((rc = cxl_ops->afu_reset(afu)))
goto err;
rc = count;
@@ -253,8 +253,14 @@ static ssize_t irqs_max_store(struct device *device,
if (irqs_max < afu->pp_irqs)
return -EINVAL;
- if (irqs_max > afu->adapter->user_irqs)
- return -EINVAL;
+ if (cpu_has_feature(CPU_FTR_HVMODE)) {
+ if (irqs_max > afu->adapter->user_irqs)
+ return -EINVAL;
+ } else {
+ /* pHyp sets a per-AFU limit */
+ if (irqs_max > afu->guest->max_ints)
+ return -EINVAL;
+ }
afu->irqs_max = irqs_max;
return count;
@@ -348,7 +354,7 @@ static ssize_t mode_store(struct device *device, struct device_attribute *attr,
}
/*
- * cxl_afu_deactivate_mode needs to be done outside the lock, prevent
+ * afu_deactivate_mode needs to be done outside the lock, prevent
* other contexts coming in before we are ready:
*/
old_mode = afu->current_mode;
@@ -357,9 +363,9 @@ static ssize_t mode_store(struct device *device, struct device_attribute *attr,
mutex_unlock(&afu->contexts_lock);
- if ((rc = _cxl_afu_deactivate_mode(afu, old_mode)))
+ if ((rc = cxl_ops->afu_deactivate_mode(afu, old_mode)))
return rc;
- if ((rc = cxl_afu_activate_mode(afu, mode)))
+ if ((rc = cxl_ops->afu_activate_mode(afu, mode)))
return rc;
return count;
@@ -386,10 +392,9 @@ static ssize_t afu_eb_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr, char *buf,
loff_t off, size_t count)
{
- struct cxl_afu *afu = to_cxl_afu(container_of(kobj,
- struct device, kobj));
+ struct cxl_afu *afu = to_cxl_afu(kobj_to_dev(kobj));
- return cxl_afu_read_err_buffer(afu, buf, off, count);
+ return cxl_ops->afu_read_err_buffer(afu, buf, off, count);
}
static struct device_attribute afu_attrs[] = {
@@ -406,24 +411,39 @@ static struct device_attribute afu_attrs[] = {
int cxl_sysfs_adapter_add(struct cxl *adapter)
{
+ struct device_attribute *dev_attr;
int i, rc;
for (i = 0; i < ARRAY_SIZE(adapter_attrs); i++) {
- if ((rc = device_create_file(&adapter->dev, &adapter_attrs[i])))
- goto err;
+ dev_attr = &adapter_attrs[i];
+ if (cxl_ops->support_attributes(dev_attr->attr.name,
+ CXL_ADAPTER_ATTRS)) {
+ if ((rc = device_create_file(&adapter->dev, dev_attr)))
+ goto err;
+ }
}
return 0;
err:
- for (i--; i >= 0; i--)
- device_remove_file(&adapter->dev, &adapter_attrs[i]);
+ for (i--; i >= 0; i--) {
+ dev_attr = &adapter_attrs[i];
+ if (cxl_ops->support_attributes(dev_attr->attr.name,
+ CXL_ADAPTER_ATTRS))
+ device_remove_file(&adapter->dev, dev_attr);
+ }
return rc;
}
+
void cxl_sysfs_adapter_remove(struct cxl *adapter)
{
+ struct device_attribute *dev_attr;
int i;
- for (i = 0; i < ARRAY_SIZE(adapter_attrs); i++)
- device_remove_file(&adapter->dev, &adapter_attrs[i]);
+ for (i = 0; i < ARRAY_SIZE(adapter_attrs); i++) {
+ dev_attr = &adapter_attrs[i];
+ if (cxl_ops->support_attributes(dev_attr->attr.name,
+ CXL_ADAPTER_ATTRS))
+ device_remove_file(&adapter->dev, dev_attr);
+ }
}
struct afu_config_record {
@@ -467,12 +487,14 @@ static ssize_t afu_read_config(struct file *filp, struct kobject *kobj,
loff_t off, size_t count)
{
struct afu_config_record *cr = to_cr(kobj);
- struct cxl_afu *afu = to_cxl_afu(container_of(kobj->parent, struct device, kobj));
+ struct cxl_afu *afu = to_cxl_afu(kobj_to_dev(kobj->parent));
- u64 i, j, val;
+ u64 i, j, val, rc;
for (i = 0; i < count;) {
- val = cxl_afu_cr_read64(afu, cr->cr, off & ~0x7);
+ rc = cxl_ops->afu_cr_read64(afu, cr->cr, off & ~0x7, &val);
+ if (rc)
+ val = ~0ULL;
for (j = off & 0x7; j < 8 && i < count; i++, j++, off++)
buf[i] = (val >> (j * 8)) & 0xff;
}
@@ -517,14 +539,22 @@ static struct afu_config_record *cxl_sysfs_afu_new_cr(struct cxl_afu *afu, int c
return ERR_PTR(-ENOMEM);
cr->cr = cr_idx;
- cr->device = cxl_afu_cr_read16(afu, cr_idx, PCI_DEVICE_ID);
- cr->vendor = cxl_afu_cr_read16(afu, cr_idx, PCI_VENDOR_ID);
- cr->class = cxl_afu_cr_read32(afu, cr_idx, PCI_CLASS_REVISION) >> 8;
+
+ rc = cxl_ops->afu_cr_read16(afu, cr_idx, PCI_DEVICE_ID, &cr->device);
+ if (rc)
+ goto err;
+ rc = cxl_ops->afu_cr_read16(afu, cr_idx, PCI_VENDOR_ID, &cr->vendor);
+ if (rc)
+ goto err;
+ rc = cxl_ops->afu_cr_read32(afu, cr_idx, PCI_CLASS_REVISION, &cr->class);
+ if (rc)
+ goto err;
+ cr->class >>= 8;
/*
* Export raw AFU PCIe like config record. For now this is read only by
* root - we can expand that later to be readable by non-root and maybe
- * even writable provided we have a good use-case. Once we suport
+ * even writable provided we have a good use-case. Once we support
* exposing AFUs through a virtual PHB they will get that for free from
* Linux' PCI infrastructure, but until then it's not clear that we
* need it for anything since the main use case is just identifying
@@ -562,6 +592,7 @@ err:
void cxl_sysfs_afu_remove(struct cxl_afu *afu)
{
+ struct device_attribute *dev_attr;
struct afu_config_record *cr, *tmp;
int i;
@@ -569,8 +600,12 @@ void cxl_sysfs_afu_remove(struct cxl_afu *afu)
if (afu->eb_len)
device_remove_bin_file(&afu->dev, &afu->attr_eb);
- for (i = 0; i < ARRAY_SIZE(afu_attrs); i++)
- device_remove_file(&afu->dev, &afu_attrs[i]);
+ for (i = 0; i < ARRAY_SIZE(afu_attrs); i++) {
+ dev_attr = &afu_attrs[i];
+ if (cxl_ops->support_attributes(dev_attr->attr.name,
+ CXL_AFU_ATTRS))
+ device_remove_file(&afu->dev, &afu_attrs[i]);
+ }
list_for_each_entry_safe(cr, tmp, &afu->crs, list) {
sysfs_remove_bin_file(&cr->kobj, &cr->config_attr);
@@ -580,14 +615,19 @@ void cxl_sysfs_afu_remove(struct cxl_afu *afu)
int cxl_sysfs_afu_add(struct cxl_afu *afu)
{
+ struct device_attribute *dev_attr;
struct afu_config_record *cr;
int i, rc;
INIT_LIST_HEAD(&afu->crs);
for (i = 0; i < ARRAY_SIZE(afu_attrs); i++) {
- if ((rc = device_create_file(&afu->dev, &afu_attrs[i])))
- goto err;
+ dev_attr = &afu_attrs[i];
+ if (cxl_ops->support_attributes(dev_attr->attr.name,
+ CXL_AFU_ATTRS)) {
+ if ((rc = device_create_file(&afu->dev, &afu_attrs[i])))
+ goto err;
+ }
}
/* conditionally create the add the binary file for error info buffer */
@@ -626,32 +666,50 @@ err:
/* reset the eb_len as we havent created the bin attr */
afu->eb_len = 0;
- for (i--; i >= 0; i--)
+ for (i--; i >= 0; i--) {
+ dev_attr = &afu_attrs[i];
+ if (cxl_ops->support_attributes(dev_attr->attr.name,
+ CXL_AFU_ATTRS))
device_remove_file(&afu->dev, &afu_attrs[i]);
+ }
return rc;
}
int cxl_sysfs_afu_m_add(struct cxl_afu *afu)
{
+ struct device_attribute *dev_attr;
int i, rc;
for (i = 0; i < ARRAY_SIZE(afu_master_attrs); i++) {
- if ((rc = device_create_file(afu->chardev_m, &afu_master_attrs[i])))
- goto err;
+ dev_attr = &afu_master_attrs[i];
+ if (cxl_ops->support_attributes(dev_attr->attr.name,
+ CXL_AFU_MASTER_ATTRS)) {
+ if ((rc = device_create_file(afu->chardev_m, &afu_master_attrs[i])))
+ goto err;
+ }
}
return 0;
err:
- for (i--; i >= 0; i--)
- device_remove_file(afu->chardev_m, &afu_master_attrs[i]);
+ for (i--; i >= 0; i--) {
+ dev_attr = &afu_master_attrs[i];
+ if (cxl_ops->support_attributes(dev_attr->attr.name,
+ CXL_AFU_MASTER_ATTRS))
+ device_remove_file(afu->chardev_m, &afu_master_attrs[i]);
+ }
return rc;
}
void cxl_sysfs_afu_m_remove(struct cxl_afu *afu)
{
+ struct device_attribute *dev_attr;
int i;
- for (i = 0; i < ARRAY_SIZE(afu_master_attrs); i++)
- device_remove_file(afu->chardev_m, &afu_master_attrs[i]);
+ for (i = 0; i < ARRAY_SIZE(afu_master_attrs); i++) {
+ dev_attr = &afu_master_attrs[i];
+ if (cxl_ops->support_attributes(dev_attr->attr.name,
+ CXL_AFU_MASTER_ATTRS))
+ device_remove_file(afu->chardev_m, &afu_master_attrs[i]);
+ }
}
diff --git a/drivers/misc/cxl/trace.h b/drivers/misc/cxl/trace.h
index 6e1e2adfba8e..751d6119683e 100644
--- a/drivers/misc/cxl/trace.h
+++ b/drivers/misc/cxl/trace.h
@@ -450,6 +450,199 @@ DEFINE_EVENT(cxl_pe_class, cxl_slbia,
TP_ARGS(ctx)
);
+TRACE_EVENT(cxl_hcall,
+ TP_PROTO(u64 unit_address, u64 process_token, long rc),
+
+ TP_ARGS(unit_address, process_token, rc),
+
+ TP_STRUCT__entry(
+ __field(u64, unit_address)
+ __field(u64, process_token)
+ __field(long, rc)
+ ),
+
+ TP_fast_assign(
+ __entry->unit_address = unit_address;
+ __entry->process_token = process_token;
+ __entry->rc = rc;
+ ),
+
+ TP_printk("unit_address=0x%016llx process_token=0x%016llx rc=%li",
+ __entry->unit_address,
+ __entry->process_token,
+ __entry->rc
+ )
+);
+
+TRACE_EVENT(cxl_hcall_control,
+ TP_PROTO(u64 unit_address, char *fct, u64 p1, u64 p2, u64 p3,
+ u64 p4, unsigned long r4, long rc),
+
+ TP_ARGS(unit_address, fct, p1, p2, p3, p4, r4, rc),
+
+ TP_STRUCT__entry(
+ __field(u64, unit_address)
+ __field(char *, fct)
+ __field(u64, p1)
+ __field(u64, p2)
+ __field(u64, p3)
+ __field(u64, p4)
+ __field(unsigned long, r4)
+ __field(long, rc)
+ ),
+
+ TP_fast_assign(
+ __entry->unit_address = unit_address;
+ __entry->fct = fct;
+ __entry->p1 = p1;
+ __entry->p2 = p2;
+ __entry->p3 = p3;
+ __entry->p4 = p4;
+ __entry->r4 = r4;
+ __entry->rc = rc;
+ ),
+
+ TP_printk("unit_address=%#.16llx %s(%#llx, %#llx, %#llx, %#llx, R4: %#lx)): %li",
+ __entry->unit_address,
+ __entry->fct,
+ __entry->p1,
+ __entry->p2,
+ __entry->p3,
+ __entry->p4,
+ __entry->r4,
+ __entry->rc
+ )
+);
+
+TRACE_EVENT(cxl_hcall_attach,
+ TP_PROTO(u64 unit_address, u64 phys_addr, unsigned long process_token,
+ unsigned long mmio_addr, unsigned long mmio_size, long rc),
+
+ TP_ARGS(unit_address, phys_addr, process_token,
+ mmio_addr, mmio_size, rc),
+
+ TP_STRUCT__entry(
+ __field(u64, unit_address)
+ __field(u64, phys_addr)
+ __field(unsigned long, process_token)
+ __field(unsigned long, mmio_addr)
+ __field(unsigned long, mmio_size)
+ __field(long, rc)
+ ),
+
+ TP_fast_assign(
+ __entry->unit_address = unit_address;
+ __entry->phys_addr = phys_addr;
+ __entry->process_token = process_token;
+ __entry->mmio_addr = mmio_addr;
+ __entry->mmio_size = mmio_size;
+ __entry->rc = rc;
+ ),
+
+ TP_printk("unit_address=0x%016llx phys_addr=0x%016llx "
+ "token=0x%.8lx mmio_addr=0x%lx mmio_size=0x%lx rc=%li",
+ __entry->unit_address,
+ __entry->phys_addr,
+ __entry->process_token,
+ __entry->mmio_addr,
+ __entry->mmio_size,
+ __entry->rc
+ )
+);
+
+DEFINE_EVENT(cxl_hcall, cxl_hcall_detach,
+ TP_PROTO(u64 unit_address, u64 process_token, long rc),
+ TP_ARGS(unit_address, process_token, rc)
+);
+
+DEFINE_EVENT(cxl_hcall_control, cxl_hcall_control_function,
+ TP_PROTO(u64 unit_address, char *fct, u64 p1, u64 p2, u64 p3,
+ u64 p4, unsigned long r4, long rc),
+ TP_ARGS(unit_address, fct, p1, p2, p3, p4, r4, rc)
+);
+
+DEFINE_EVENT(cxl_hcall, cxl_hcall_collect_int_info,
+ TP_PROTO(u64 unit_address, u64 process_token, long rc),
+ TP_ARGS(unit_address, process_token, rc)
+);
+
+TRACE_EVENT(cxl_hcall_control_faults,
+ TP_PROTO(u64 unit_address, u64 process_token,
+ u64 control_mask, u64 reset_mask, unsigned long r4,
+ long rc),
+
+ TP_ARGS(unit_address, process_token,
+ control_mask, reset_mask, r4, rc),
+
+ TP_STRUCT__entry(
+ __field(u64, unit_address)
+ __field(u64, process_token)
+ __field(u64, control_mask)
+ __field(u64, reset_mask)
+ __field(unsigned long, r4)
+ __field(long, rc)
+ ),
+
+ TP_fast_assign(
+ __entry->unit_address = unit_address;
+ __entry->process_token = process_token;
+ __entry->control_mask = control_mask;
+ __entry->reset_mask = reset_mask;
+ __entry->r4 = r4;
+ __entry->rc = rc;
+ ),
+
+ TP_printk("unit_address=0x%016llx process_token=0x%llx "
+ "control_mask=%#llx reset_mask=%#llx r4=%#lx rc=%li",
+ __entry->unit_address,
+ __entry->process_token,
+ __entry->control_mask,
+ __entry->reset_mask,
+ __entry->r4,
+ __entry->rc
+ )
+);
+
+DEFINE_EVENT(cxl_hcall_control, cxl_hcall_control_facility,
+ TP_PROTO(u64 unit_address, char *fct, u64 p1, u64 p2, u64 p3,
+ u64 p4, unsigned long r4, long rc),
+ TP_ARGS(unit_address, fct, p1, p2, p3, p4, r4, rc)
+);
+
+TRACE_EVENT(cxl_hcall_download_facility,
+ TP_PROTO(u64 unit_address, char *fct, u64 list_address, u64 num,
+ unsigned long r4, long rc),
+
+ TP_ARGS(unit_address, fct, list_address, num, r4, rc),
+
+ TP_STRUCT__entry(
+ __field(u64, unit_address)
+ __field(char *, fct)
+ __field(u64, list_address)
+ __field(u64, num)
+ __field(unsigned long, r4)
+ __field(long, rc)
+ ),
+
+ TP_fast_assign(
+ __entry->unit_address = unit_address;
+ __entry->fct = fct;
+ __entry->list_address = list_address;
+ __entry->num = num;
+ __entry->r4 = r4;
+ __entry->rc = rc;
+ ),
+
+ TP_printk("%#.16llx, %s(%#llx, %#llx), %#lx): %li",
+ __entry->unit_address,
+ __entry->fct,
+ __entry->list_address,
+ __entry->num,
+ __entry->r4,
+ __entry->rc
+ )
+);
+
#endif /* _CXL_TRACE_H */
/* This part must be outside protection */
diff --git a/drivers/misc/cxl/vphb.c b/drivers/misc/cxl/vphb.c
index cbd4331fb45c..cdc7723b845d 100644
--- a/drivers/misc/cxl/vphb.c
+++ b/drivers/misc/cxl/vphb.c
@@ -49,7 +49,7 @@ static bool cxl_pci_enable_device_hook(struct pci_dev *dev)
phb = pci_bus_to_host(dev->bus);
afu = (struct cxl_afu *)phb->private_data;
- if (!cxl_adapter_link_ok(afu->adapter)) {
+ if (!cxl_ops->link_ok(afu->adapter, afu)) {
dev_warn(&dev->dev, "%s: Device link is down, refusing to enable AFU\n", __func__);
return false;
}
@@ -66,7 +66,7 @@ static bool cxl_pci_enable_device_hook(struct pci_dev *dev)
return false;
dev->dev.archdata.cxl_ctx = ctx;
- return (cxl_afu_check_and_enable(afu) == 0);
+ return (cxl_ops->afu_check_and_enable(afu) == 0);
}
static void cxl_pci_disable_device(struct pci_dev *dev)
@@ -99,113 +99,90 @@ static int cxl_pcie_cfg_record(u8 bus, u8 devfn)
return (bus << 8) + devfn;
}
-static unsigned long cxl_pcie_cfg_addr(struct pci_controller* phb,
- u8 bus, u8 devfn, int offset)
-{
- int record = cxl_pcie_cfg_record(bus, devfn);
-
- return (unsigned long)phb->cfg_addr + ((unsigned long)phb->cfg_data * record) + offset;
-}
-
-
static int cxl_pcie_config_info(struct pci_bus *bus, unsigned int devfn,
- int offset, int len,
- volatile void __iomem **ioaddr,
- u32 *mask, int *shift)
+ struct cxl_afu **_afu, int *_record)
{
struct pci_controller *phb;
struct cxl_afu *afu;
- unsigned long addr;
+ int record;
phb = pci_bus_to_host(bus);
if (phb == NULL)
return PCIBIOS_DEVICE_NOT_FOUND;
- afu = (struct cxl_afu *)phb->private_data;
- if (cxl_pcie_cfg_record(bus->number, devfn) > afu->crs_num)
+ afu = (struct cxl_afu *)phb->private_data;
+ record = cxl_pcie_cfg_record(bus->number, devfn);
+ if (record > afu->crs_num)
return PCIBIOS_DEVICE_NOT_FOUND;
- if (offset >= (unsigned long)phb->cfg_data)
- return PCIBIOS_BAD_REGISTER_NUMBER;
- addr = cxl_pcie_cfg_addr(phb, bus->number, devfn, offset);
- *ioaddr = (void *)(addr & ~0x3ULL);
- *shift = ((addr & 0x3) * 8);
- switch (len) {
- case 1:
- *mask = 0xff;
- break;
- case 2:
- *mask = 0xffff;
- break;
- default:
- *mask = 0xffffffff;
- break;
- }
+ *_afu = afu;
+ *_record = record;
return 0;
}
-
-static inline bool cxl_config_link_ok(struct pci_bus *bus)
-{
- struct pci_controller *phb;
- struct cxl_afu *afu;
-
- /* Config space IO is based on phb->cfg_addr, which is based on
- * afu_desc_mmio. This isn't safe to read/write when the link
- * goes down, as EEH tears down MMIO space.
- *
- * Check if the link is OK before proceeding.
- */
-
- phb = pci_bus_to_host(bus);
- if (phb == NULL)
- return false;
- afu = (struct cxl_afu *)phb->private_data;
- return cxl_adapter_link_ok(afu->adapter);
-}
-
static int cxl_pcie_read_config(struct pci_bus *bus, unsigned int devfn,
int offset, int len, u32 *val)
{
- volatile void __iomem *ioaddr;
- int shift, rc;
- u32 mask;
+ int rc, record;
+ struct cxl_afu *afu;
+ u8 val8;
+ u16 val16;
+ u32 val32;
- rc = cxl_pcie_config_info(bus, devfn, offset, len, &ioaddr,
- &mask, &shift);
+ rc = cxl_pcie_config_info(bus, devfn, &afu, &record);
if (rc)
return rc;
- if (!cxl_config_link_ok(bus))
+ switch (len) {
+ case 1:
+ rc = cxl_ops->afu_cr_read8(afu, record, offset, &val8);
+ *val = val8;
+ break;
+ case 2:
+ rc = cxl_ops->afu_cr_read16(afu, record, offset, &val16);
+ *val = val16;
+ break;
+ case 4:
+ rc = cxl_ops->afu_cr_read32(afu, record, offset, &val32);
+ *val = val32;
+ break;
+ default:
+ WARN_ON(1);
+ }
+
+ if (rc)
return PCIBIOS_DEVICE_NOT_FOUND;
- /* Can only read 32 bits */
- *val = (in_le32(ioaddr) >> shift) & mask;
return PCIBIOS_SUCCESSFUL;
}
static int cxl_pcie_write_config(struct pci_bus *bus, unsigned int devfn,
int offset, int len, u32 val)
{
- volatile void __iomem *ioaddr;
- u32 v, mask;
- int shift, rc;
+ int rc, record;
+ struct cxl_afu *afu;
- rc = cxl_pcie_config_info(bus, devfn, offset, len, &ioaddr,
- &mask, &shift);
+ rc = cxl_pcie_config_info(bus, devfn, &afu, &record);
if (rc)
return rc;
- if (!cxl_config_link_ok(bus))
- return PCIBIOS_DEVICE_NOT_FOUND;
-
- /* Can only write 32 bits so do read-modify-write */
- mask <<= shift;
- val <<= shift;
+ switch (len) {
+ case 1:
+ rc = cxl_ops->afu_cr_write8(afu, record, offset, val & 0xff);
+ break;
+ case 2:
+ rc = cxl_ops->afu_cr_write16(afu, record, offset, val & 0xffff);
+ break;
+ case 4:
+ rc = cxl_ops->afu_cr_write32(afu, record, offset, val);
+ break;
+ default:
+ WARN_ON(1);
+ }
- v = (in_le32(ioaddr) & ~mask) | (val & mask);
+ if (rc)
+ return PCIBIOS_SET_FAILED;
- out_le32(ioaddr, v);
return PCIBIOS_SUCCESSFUL;
}
@@ -233,23 +210,31 @@ int cxl_pci_vphb_add(struct cxl_afu *afu)
{
struct pci_dev *phys_dev;
struct pci_controller *phb, *phys_phb;
-
- phys_dev = to_pci_dev(afu->adapter->dev.parent);
- phys_phb = pci_bus_to_host(phys_dev->bus);
+ struct device_node *vphb_dn;
+ struct device *parent;
+
+ if (cpu_has_feature(CPU_FTR_HVMODE)) {
+ phys_dev = to_pci_dev(afu->adapter->dev.parent);
+ phys_phb = pci_bus_to_host(phys_dev->bus);
+ vphb_dn = phys_phb->dn;
+ parent = &phys_dev->dev;
+ } else {
+ vphb_dn = afu->adapter->dev.parent->of_node;
+ parent = afu->adapter->dev.parent;
+ }
/* Alloc and setup PHB data structure */
- phb = pcibios_alloc_controller(phys_phb->dn);
-
+ phb = pcibios_alloc_controller(vphb_dn);
if (!phb)
return -ENODEV;
/* Setup parent in sysfs */
- phb->parent = &phys_dev->dev;
+ phb->parent = parent;
/* Setup the PHB using arch provided callback */
phb->ops = &cxl_pcie_pci_ops;
- phb->cfg_addr = afu->afu_desc_mmio + afu->crs_offset;
- phb->cfg_data = (void *)(u64)afu->crs_len;
+ phb->cfg_addr = NULL;
+ phb->cfg_data = 0;
phb->private_data = afu;
phb->controller_ops = cxl_pci_controller_ops;
@@ -272,15 +257,6 @@ int cxl_pci_vphb_add(struct cxl_afu *afu)
return 0;
}
-void cxl_pci_vphb_reconfigure(struct cxl_afu *afu)
-{
- /* When we are reconfigured, the AFU's MMIO space is unmapped
- * and remapped. We need to reflect this in the PHB's view of
- * the world.
- */
- afu->phb->cfg_addr = afu->afu_desc_mmio + afu->crs_offset;
-}
-
void cxl_pci_vphb_remove(struct cxl_afu *afu)
{
struct pci_controller *phb;
@@ -296,6 +272,15 @@ void cxl_pci_vphb_remove(struct cxl_afu *afu)
pcibios_free_controller(phb);
}
+bool cxl_pci_is_vphb_device(struct pci_dev *dev)
+{
+ struct pci_controller *phb;
+
+ phb = pci_bus_to_host(dev->bus);
+
+ return (phb->ops == &cxl_pcie_pci_ops);
+}
+
struct cxl_afu *cxl_pci_to_afu(struct pci_dev *dev)
{
struct pci_controller *phb;
diff --git a/drivers/misc/eeprom/Kconfig b/drivers/misc/eeprom/Kconfig
index 04f2e1fa9dd1..cfc493c2e30a 100644
--- a/drivers/misc/eeprom/Kconfig
+++ b/drivers/misc/eeprom/Kconfig
@@ -3,6 +3,8 @@ menu "EEPROM support"
config EEPROM_AT24
tristate "I2C EEPROMs / RAMs / ROMs from most vendors"
depends on I2C && SYSFS
+ select REGMAP
+ select NVMEM
help
Enable this driver to get read/write support to most I2C EEPROMs
and compatible devices like FRAMs, SRAMs, ROMs etc. After you
@@ -30,6 +32,8 @@ config EEPROM_AT24
config EEPROM_AT25
tristate "SPI EEPROMs from most vendors"
depends on SPI && SYSFS
+ select REGMAP
+ select NVMEM
help
Enable this driver to get read/write support to most SPI EEPROMs,
after you configure the board init code to know about each eeprom
@@ -74,6 +78,8 @@ config EEPROM_93CX6
config EEPROM_93XX46
tristate "Microwire EEPROM 93XX46 support"
depends on SPI && SYSFS
+ select REGMAP
+ select NVMEM
help
Driver for the microwire EEPROM chipsets 93xx46x. The driver
supports both read and write commands and also the command to
diff --git a/drivers/misc/eeprom/at24.c b/drivers/misc/eeprom/at24.c
index 5d7c0900fa1b..089d6943f68a 100644
--- a/drivers/misc/eeprom/at24.c
+++ b/drivers/misc/eeprom/at24.c
@@ -15,7 +15,6 @@
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/mutex.h>
-#include <linux/sysfs.h>
#include <linux/mod_devicetable.h>
#include <linux/log2.h>
#include <linux/bitops.h>
@@ -23,6 +22,8 @@
#include <linux/of.h>
#include <linux/acpi.h>
#include <linux/i2c.h>
+#include <linux/nvmem-provider.h>
+#include <linux/regmap.h>
#include <linux/platform_data/at24.h>
/*
@@ -55,7 +56,6 @@
struct at24_data {
struct at24_platform_data chip;
- struct memory_accessor macc;
int use_smbus;
int use_smbus_write;
@@ -64,12 +64,15 @@ struct at24_data {
* but not from changes by other I2C masters.
*/
struct mutex lock;
- struct bin_attribute bin;
u8 *writebuf;
unsigned write_max;
unsigned num_addresses;
+ struct regmap_config regmap_config;
+ struct nvmem_config nvmem_config;
+ struct nvmem_device *nvmem;
+
/*
* Some chips tie up multiple I2C addresses; dummy devices reserve
* them for us, and we'll use them with SMBus calls.
@@ -283,17 +286,6 @@ static ssize_t at24_read(struct at24_data *at24,
return retval;
}
-static ssize_t at24_bin_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr,
- char *buf, loff_t off, size_t count)
-{
- struct at24_data *at24;
-
- at24 = dev_get_drvdata(container_of(kobj, struct device, kobj));
- return at24_read(at24, buf, off, count);
-}
-
-
/*
* Note that if the hardware write-protect pin is pulled high, the whole
* chip is normally write protected. But there are plenty of product
@@ -414,40 +406,49 @@ static ssize_t at24_write(struct at24_data *at24, const char *buf, loff_t off,
return retval;
}
-static ssize_t at24_bin_write(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr,
- char *buf, loff_t off, size_t count)
-{
- struct at24_data *at24;
-
- at24 = dev_get_drvdata(container_of(kobj, struct device, kobj));
- return at24_write(at24, buf, off, count);
-}
-
/*-------------------------------------------------------------------------*/
/*
- * This lets other kernel code access the eeprom data. For example, it
- * might hold a board's Ethernet address, or board-specific calibration
- * data generated on the manufacturing floor.
- */
-
-static ssize_t at24_macc_read(struct memory_accessor *macc, char *buf,
- off_t offset, size_t count)
+ * Provide a regmap interface, which is registered with the NVMEM
+ * framework
+*/
+static int at24_regmap_read(void *context, const void *reg, size_t reg_size,
+ void *val, size_t val_size)
{
- struct at24_data *at24 = container_of(macc, struct at24_data, macc);
+ struct at24_data *at24 = context;
+ off_t offset = *(u32 *)reg;
+ int err;
- return at24_read(at24, buf, offset, count);
+ err = at24_read(at24, val, offset, val_size);
+ if (err)
+ return err;
+ return 0;
}
-static ssize_t at24_macc_write(struct memory_accessor *macc, const char *buf,
- off_t offset, size_t count)
+static int at24_regmap_write(void *context, const void *data, size_t count)
{
- struct at24_data *at24 = container_of(macc, struct at24_data, macc);
+ struct at24_data *at24 = context;
+ const char *buf;
+ u32 offset;
+ size_t len;
+ int err;
- return at24_write(at24, buf, offset, count);
+ memcpy(&offset, data, sizeof(offset));
+ buf = (const char *)data + sizeof(offset);
+ len = count - sizeof(offset);
+
+ err = at24_write(at24, buf, offset, len);
+ if (err)
+ return err;
+ return 0;
}
+static const struct regmap_bus at24_regmap_bus = {
+ .read = at24_regmap_read,
+ .write = at24_regmap_write,
+ .reg_format_endian_default = REGMAP_ENDIAN_NATIVE,
+};
+
/*-------------------------------------------------------------------------*/
#ifdef CONFIG_OF
@@ -481,6 +482,7 @@ static int at24_probe(struct i2c_client *client, const struct i2c_device_id *id)
struct at24_data *at24;
int err;
unsigned i, num_addresses;
+ struct regmap *regmap;
if (client->dev.platform_data) {
chip = *(struct at24_platform_data *)client->dev.platform_data;
@@ -573,29 +575,12 @@ static int at24_probe(struct i2c_client *client, const struct i2c_device_id *id)
at24->chip = chip;
at24->num_addresses = num_addresses;
- /*
- * Export the EEPROM bytes through sysfs, since that's convenient.
- * By default, only root should see the data (maybe passwords etc)
- */
- sysfs_bin_attr_init(&at24->bin);
- at24->bin.attr.name = "eeprom";
- at24->bin.attr.mode = chip.flags & AT24_FLAG_IRUGO ? S_IRUGO : S_IRUSR;
- at24->bin.read = at24_bin_read;
- at24->bin.size = chip.byte_len;
-
- at24->macc.read = at24_macc_read;
-
writable = !(chip.flags & AT24_FLAG_READONLY);
if (writable) {
if (!use_smbus || use_smbus_write) {
unsigned write_max = chip.page_size;
- at24->macc.write = at24_macc_write;
-
- at24->bin.write = at24_bin_write;
- at24->bin.attr.mode |= S_IWUSR;
-
if (write_max > io_limit)
write_max = io_limit;
if (use_smbus && write_max > I2C_SMBUS_BLOCK_MAX)
@@ -627,14 +612,38 @@ static int at24_probe(struct i2c_client *client, const struct i2c_device_id *id)
}
}
- err = sysfs_create_bin_file(&client->dev.kobj, &at24->bin);
- if (err)
+ at24->regmap_config.reg_bits = 32;
+ at24->regmap_config.val_bits = 8;
+ at24->regmap_config.reg_stride = 1;
+ at24->regmap_config.max_register = chip.byte_len - 1;
+
+ regmap = devm_regmap_init(&client->dev, &at24_regmap_bus, at24,
+ &at24->regmap_config);
+ if (IS_ERR(regmap)) {
+ dev_err(&client->dev, "regmap init failed\n");
+ err = PTR_ERR(regmap);
+ goto err_clients;
+ }
+
+ at24->nvmem_config.name = dev_name(&client->dev);
+ at24->nvmem_config.dev = &client->dev;
+ at24->nvmem_config.read_only = !writable;
+ at24->nvmem_config.root_only = true;
+ at24->nvmem_config.owner = THIS_MODULE;
+ at24->nvmem_config.compat = true;
+ at24->nvmem_config.base_dev = &client->dev;
+
+ at24->nvmem = nvmem_register(&at24->nvmem_config);
+
+ if (IS_ERR(at24->nvmem)) {
+ err = PTR_ERR(at24->nvmem);
goto err_clients;
+ }
i2c_set_clientdata(client, at24);
- dev_info(&client->dev, "%zu byte %s EEPROM, %s, %u bytes/write\n",
- at24->bin.size, client->name,
+ dev_info(&client->dev, "%u byte %s EEPROM, %s, %u bytes/write\n",
+ chip.byte_len, client->name,
writable ? "writable" : "read-only", at24->write_max);
if (use_smbus == I2C_SMBUS_WORD_DATA ||
use_smbus == I2C_SMBUS_BYTE_DATA) {
@@ -645,7 +654,7 @@ static int at24_probe(struct i2c_client *client, const struct i2c_device_id *id)
/* export data to kernel code */
if (chip.setup)
- chip.setup(&at24->macc, chip.context);
+ chip.setup(at24->nvmem, chip.context);
return 0;
@@ -663,7 +672,8 @@ static int at24_remove(struct i2c_client *client)
int i;
at24 = i2c_get_clientdata(client);
- sysfs_remove_bin_file(&client->dev.kobj, &at24->bin);
+
+ nvmem_unregister(at24->nvmem);
for (i = 1; i < at24->num_addresses; i++)
i2c_unregister_device(at24->client[i]);
diff --git a/drivers/misc/eeprom/at25.c b/drivers/misc/eeprom/at25.c
index f850ef556bcc..fa36a6e37084 100644
--- a/drivers/misc/eeprom/at25.c
+++ b/drivers/misc/eeprom/at25.c
@@ -16,6 +16,8 @@
#include <linux/device.h>
#include <linux/sched.h>
+#include <linux/nvmem-provider.h>
+#include <linux/regmap.h>
#include <linux/spi/spi.h>
#include <linux/spi/eeprom.h>
#include <linux/property.h>
@@ -29,11 +31,12 @@
struct at25_data {
struct spi_device *spi;
- struct memory_accessor mem;
struct mutex lock;
struct spi_eeprom chip;
- struct bin_attribute bin;
unsigned addrlen;
+ struct regmap_config regmap_config;
+ struct nvmem_config nvmem_config;
+ struct nvmem_device *nvmem;
};
#define AT25_WREN 0x06 /* latch the write enable */
@@ -77,10 +80,10 @@ at25_ee_read(
struct spi_message m;
u8 instr;
- if (unlikely(offset >= at25->bin.size))
+ if (unlikely(offset >= at25->chip.byte_len))
return 0;
- if ((offset + count) > at25->bin.size)
- count = at25->bin.size - offset;
+ if ((offset + count) > at25->chip.byte_len)
+ count = at25->chip.byte_len - offset;
if (unlikely(!count))
return count;
@@ -131,21 +134,19 @@ at25_ee_read(
return status ? status : count;
}
-static ssize_t
-at25_bin_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
- char *buf, loff_t off, size_t count)
+static int at25_regmap_read(void *context, const void *reg, size_t reg_size,
+ void *val, size_t val_size)
{
- struct device *dev;
- struct at25_data *at25;
+ struct at25_data *at25 = context;
+ off_t offset = *(u32 *)reg;
+ int err;
- dev = container_of(kobj, struct device, kobj);
- at25 = dev_get_drvdata(dev);
-
- return at25_ee_read(at25, buf, off, count);
+ err = at25_ee_read(at25, val, offset, val_size);
+ if (err)
+ return err;
+ return 0;
}
-
static ssize_t
at25_ee_write(struct at25_data *at25, const char *buf, loff_t off,
size_t count)
@@ -155,10 +156,10 @@ at25_ee_write(struct at25_data *at25, const char *buf, loff_t off,
unsigned buf_size;
u8 *bounce;
- if (unlikely(off >= at25->bin.size))
+ if (unlikely(off >= at25->chip.byte_len))
return -EFBIG;
- if ((off + count) > at25->bin.size)
- count = at25->bin.size - off;
+ if ((off + count) > at25->chip.byte_len)
+ count = at25->chip.byte_len - off;
if (unlikely(!count))
return count;
@@ -265,39 +266,29 @@ at25_ee_write(struct at25_data *at25, const char *buf, loff_t off,
return written ? written : status;
}
-static ssize_t
-at25_bin_write(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
- char *buf, loff_t off, size_t count)
+static int at25_regmap_write(void *context, const void *data, size_t count)
{
- struct device *dev;
- struct at25_data *at25;
-
- dev = container_of(kobj, struct device, kobj);
- at25 = dev_get_drvdata(dev);
-
- return at25_ee_write(at25, buf, off, count);
-}
+ struct at25_data *at25 = context;
+ const char *buf;
+ u32 offset;
+ size_t len;
+ int err;
-/*-------------------------------------------------------------------------*/
-
-/* Let in-kernel code access the eeprom data. */
-
-static ssize_t at25_mem_read(struct memory_accessor *mem, char *buf,
- off_t offset, size_t count)
-{
- struct at25_data *at25 = container_of(mem, struct at25_data, mem);
+ memcpy(&offset, data, sizeof(offset));
+ buf = (const char *)data + sizeof(offset);
+ len = count - sizeof(offset);
- return at25_ee_read(at25, buf, offset, count);
+ err = at25_ee_write(at25, buf, offset, len);
+ if (err)
+ return err;
+ return 0;
}
-static ssize_t at25_mem_write(struct memory_accessor *mem, const char *buf,
- off_t offset, size_t count)
-{
- struct at25_data *at25 = container_of(mem, struct at25_data, mem);
-
- return at25_ee_write(at25, buf, offset, count);
-}
+static const struct regmap_bus at25_regmap_bus = {
+ .read = at25_regmap_read,
+ .write = at25_regmap_write,
+ .reg_format_endian_default = REGMAP_ENDIAN_NATIVE,
+};
/*-------------------------------------------------------------------------*/
@@ -358,6 +349,7 @@ static int at25_probe(struct spi_device *spi)
{
struct at25_data *at25 = NULL;
struct spi_eeprom chip;
+ struct regmap *regmap;
int err;
int sr;
int addrlen;
@@ -402,40 +394,35 @@ static int at25_probe(struct spi_device *spi)
spi_set_drvdata(spi, at25);
at25->addrlen = addrlen;
- /* Export the EEPROM bytes through sysfs, since that's convenient.
- * And maybe to other kernel code; it might hold a board's Ethernet
- * address, or board-specific calibration data generated on the
- * manufacturing floor.
- *
- * Default to root-only access to the data; EEPROMs often hold data
- * that's sensitive for read and/or write, like ethernet addresses,
- * security codes, board-specific manufacturing calibrations, etc.
- */
- sysfs_bin_attr_init(&at25->bin);
- at25->bin.attr.name = "eeprom";
- at25->bin.attr.mode = S_IRUSR;
- at25->bin.read = at25_bin_read;
- at25->mem.read = at25_mem_read;
-
- at25->bin.size = at25->chip.byte_len;
- if (!(chip.flags & EE_READONLY)) {
- at25->bin.write = at25_bin_write;
- at25->bin.attr.mode |= S_IWUSR;
- at25->mem.write = at25_mem_write;
- }
+ at25->regmap_config.reg_bits = 32;
+ at25->regmap_config.val_bits = 8;
+ at25->regmap_config.reg_stride = 1;
+ at25->regmap_config.max_register = chip.byte_len - 1;
- err = sysfs_create_bin_file(&spi->dev.kobj, &at25->bin);
- if (err)
- return err;
-
- if (chip.setup)
- chip.setup(&at25->mem, chip.context);
+ regmap = devm_regmap_init(&spi->dev, &at25_regmap_bus, at25,
+ &at25->regmap_config);
+ if (IS_ERR(regmap)) {
+ dev_err(&spi->dev, "regmap init failed\n");
+ return PTR_ERR(regmap);
+ }
- dev_info(&spi->dev, "%Zd %s %s eeprom%s, pagesize %u\n",
- (at25->bin.size < 1024)
- ? at25->bin.size
- : (at25->bin.size / 1024),
- (at25->bin.size < 1024) ? "Byte" : "KByte",
+ at25->nvmem_config.name = dev_name(&spi->dev);
+ at25->nvmem_config.dev = &spi->dev;
+ at25->nvmem_config.read_only = chip.flags & EE_READONLY;
+ at25->nvmem_config.root_only = true;
+ at25->nvmem_config.owner = THIS_MODULE;
+ at25->nvmem_config.compat = true;
+ at25->nvmem_config.base_dev = &spi->dev;
+
+ at25->nvmem = nvmem_register(&at25->nvmem_config);
+ if (IS_ERR(at25->nvmem))
+ return PTR_ERR(at25->nvmem);
+
+ dev_info(&spi->dev, "%d %s %s eeprom%s, pagesize %u\n",
+ (chip.byte_len < 1024)
+ ? chip.byte_len
+ : (chip.byte_len / 1024),
+ (chip.byte_len < 1024) ? "Byte" : "KByte",
at25->chip.name,
(chip.flags & EE_READONLY) ? " (readonly)" : "",
at25->chip.page_size);
@@ -447,7 +434,8 @@ static int at25_remove(struct spi_device *spi)
struct at25_data *at25;
at25 = spi_get_drvdata(spi);
- sysfs_remove_bin_file(&spi->dev.kobj, &at25->bin);
+ nvmem_unregister(at25->nvmem);
+
return 0;
}
diff --git a/drivers/misc/eeprom/eeprom.c b/drivers/misc/eeprom/eeprom.c
index 7342fd637031..3d1d55157e5f 100644
--- a/drivers/misc/eeprom/eeprom.c
+++ b/drivers/misc/eeprom/eeprom.c
@@ -84,7 +84,7 @@ static ssize_t eeprom_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
- struct i2c_client *client = to_i2c_client(container_of(kobj, struct device, kobj));
+ struct i2c_client *client = to_i2c_client(kobj_to_dev(kobj));
struct eeprom_data *data = i2c_get_clientdata(client);
u8 slice;
diff --git a/drivers/misc/eeprom/eeprom_93xx46.c b/drivers/misc/eeprom/eeprom_93xx46.c
index ff63f05edc76..426fe2fd5238 100644
--- a/drivers/misc/eeprom/eeprom_93xx46.c
+++ b/drivers/misc/eeprom/eeprom_93xx46.c
@@ -10,12 +10,17 @@
#include <linux/delay.h>
#include <linux/device.h>
+#include <linux/gpio/consumer.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_gpio.h>
#include <linux/slab.h>
#include <linux/spi/spi.h>
-#include <linux/sysfs.h>
+#include <linux/nvmem-provider.h>
+#include <linux/regmap.h>
#include <linux/eeprom_93xx46.h>
#define OP_START 0x4
@@ -25,73 +30,111 @@
#define ADDR_ERAL 0x20
#define ADDR_EWEN 0x30
+struct eeprom_93xx46_devtype_data {
+ unsigned int quirks;
+};
+
+static const struct eeprom_93xx46_devtype_data atmel_at93c46d_data = {
+ .quirks = EEPROM_93XX46_QUIRK_SINGLE_WORD_READ |
+ EEPROM_93XX46_QUIRK_INSTRUCTION_LENGTH,
+};
+
struct eeprom_93xx46_dev {
struct spi_device *spi;
struct eeprom_93xx46_platform_data *pdata;
- struct bin_attribute bin;
struct mutex lock;
+ struct regmap_config regmap_config;
+ struct nvmem_config nvmem_config;
+ struct nvmem_device *nvmem;
int addrlen;
+ int size;
};
+static inline bool has_quirk_single_word_read(struct eeprom_93xx46_dev *edev)
+{
+ return edev->pdata->quirks & EEPROM_93XX46_QUIRK_SINGLE_WORD_READ;
+}
+
+static inline bool has_quirk_instruction_length(struct eeprom_93xx46_dev *edev)
+{
+ return edev->pdata->quirks & EEPROM_93XX46_QUIRK_INSTRUCTION_LENGTH;
+}
+
static ssize_t
-eeprom_93xx46_bin_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
- char *buf, loff_t off, size_t count)
+eeprom_93xx46_read(struct eeprom_93xx46_dev *edev, char *buf,
+ unsigned off, size_t count)
{
- struct eeprom_93xx46_dev *edev;
- struct device *dev;
- struct spi_message m;
- struct spi_transfer t[2];
- int bits, ret;
- u16 cmd_addr;
+ ssize_t ret = 0;
- dev = container_of(kobj, struct device, kobj);
- edev = dev_get_drvdata(dev);
+ if (unlikely(off >= edev->size))
+ return 0;
+ if ((off + count) > edev->size)
+ count = edev->size - off;
+ if (unlikely(!count))
+ return count;
- cmd_addr = OP_READ << edev->addrlen;
+ mutex_lock(&edev->lock);
- if (edev->addrlen == 7) {
- cmd_addr |= off & 0x7f;
- bits = 10;
- } else {
- cmd_addr |= off & 0x3f;
- bits = 9;
- }
+ if (edev->pdata->prepare)
+ edev->pdata->prepare(edev);
- dev_dbg(&edev->spi->dev, "read cmd 0x%x, %d Hz\n",
- cmd_addr, edev->spi->max_speed_hz);
+ while (count) {
+ struct spi_message m;
+ struct spi_transfer t[2] = { { 0 } };
+ u16 cmd_addr = OP_READ << edev->addrlen;
+ size_t nbytes = count;
+ int bits;
+ int err;
+
+ if (edev->addrlen == 7) {
+ cmd_addr |= off & 0x7f;
+ bits = 10;
+ if (has_quirk_single_word_read(edev))
+ nbytes = 1;
+ } else {
+ cmd_addr |= (off >> 1) & 0x3f;
+ bits = 9;
+ if (has_quirk_single_word_read(edev))
+ nbytes = 2;
+ }
- spi_message_init(&m);
- memset(t, 0, sizeof(t));
+ dev_dbg(&edev->spi->dev, "read cmd 0x%x, %d Hz\n",
+ cmd_addr, edev->spi->max_speed_hz);
- t[0].tx_buf = (char *)&cmd_addr;
- t[0].len = 2;
- t[0].bits_per_word = bits;
- spi_message_add_tail(&t[0], &m);
+ spi_message_init(&m);
- t[1].rx_buf = buf;
- t[1].len = count;
- t[1].bits_per_word = 8;
- spi_message_add_tail(&t[1], &m);
+ t[0].tx_buf = (char *)&cmd_addr;
+ t[0].len = 2;
+ t[0].bits_per_word = bits;
+ spi_message_add_tail(&t[0], &m);
- mutex_lock(&edev->lock);
+ t[1].rx_buf = buf;
+ t[1].len = count;
+ t[1].bits_per_word = 8;
+ spi_message_add_tail(&t[1], &m);
- if (edev->pdata->prepare)
- edev->pdata->prepare(edev);
+ err = spi_sync(edev->spi, &m);
+ /* have to wait at least Tcsl ns */
+ ndelay(250);
- ret = spi_sync(edev->spi, &m);
- /* have to wait at least Tcsl ns */
- ndelay(250);
- if (ret) {
- dev_err(&edev->spi->dev, "read %zu bytes at %d: err. %d\n",
- count, (int)off, ret);
+ if (err) {
+ dev_err(&edev->spi->dev, "read %zu bytes at %d: err. %d\n",
+ nbytes, (int)off, err);
+ ret = err;
+ break;
+ }
+
+ buf += nbytes;
+ off += nbytes;
+ count -= nbytes;
+ ret += nbytes;
}
if (edev->pdata->finish)
edev->pdata->finish(edev);
mutex_unlock(&edev->lock);
- return ret ? : count;
+ return ret;
}
static int eeprom_93xx46_ew(struct eeprom_93xx46_dev *edev, int is_on)
@@ -110,7 +153,13 @@ static int eeprom_93xx46_ew(struct eeprom_93xx46_dev *edev, int is_on)
bits = 9;
}
- dev_dbg(&edev->spi->dev, "ew cmd 0x%04x\n", cmd_addr);
+ if (has_quirk_instruction_length(edev)) {
+ cmd_addr <<= 2;
+ bits += 2;
+ }
+
+ dev_dbg(&edev->spi->dev, "ew%s cmd 0x%04x, %d bits\n",
+ is_on ? "en" : "ds", cmd_addr, bits);
spi_message_init(&m);
memset(&t, 0, sizeof(t));
@@ -155,7 +204,7 @@ eeprom_93xx46_write_word(struct eeprom_93xx46_dev *edev,
bits = 10;
data_len = 1;
} else {
- cmd_addr |= off & 0x3f;
+ cmd_addr |= (off >> 1) & 0x3f;
bits = 9;
data_len = 2;
}
@@ -182,16 +231,17 @@ eeprom_93xx46_write_word(struct eeprom_93xx46_dev *edev,
}
static ssize_t
-eeprom_93xx46_bin_write(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
- char *buf, loff_t off, size_t count)
+eeprom_93xx46_write(struct eeprom_93xx46_dev *edev, const char *buf,
+ loff_t off, size_t count)
{
- struct eeprom_93xx46_dev *edev;
- struct device *dev;
int i, ret, step = 1;
- dev = container_of(kobj, struct device, kobj);
- edev = dev_get_drvdata(dev);
+ if (unlikely(off >= edev->size))
+ return -EFBIG;
+ if ((off + count) > edev->size)
+ count = edev->size - off;
+ if (unlikely(!count))
+ return count;
/* only write even number of bytes on 16-bit devices */
if (edev->addrlen == 6) {
@@ -228,6 +278,49 @@ eeprom_93xx46_bin_write(struct file *filp, struct kobject *kobj,
return ret ? : count;
}
+/*
+ * Provide a regmap interface, which is registered with the NVMEM
+ * framework
+*/
+static int eeprom_93xx46_regmap_read(void *context, const void *reg,
+ size_t reg_size, void *val,
+ size_t val_size)
+{
+ struct eeprom_93xx46_dev *eeprom_93xx46 = context;
+ off_t offset = *(u32 *)reg;
+ int err;
+
+ err = eeprom_93xx46_read(eeprom_93xx46, val, offset, val_size);
+ if (err)
+ return err;
+ return 0;
+}
+
+static int eeprom_93xx46_regmap_write(void *context, const void *data,
+ size_t count)
+{
+ struct eeprom_93xx46_dev *eeprom_93xx46 = context;
+ const char *buf;
+ u32 offset;
+ size_t len;
+ int err;
+
+ memcpy(&offset, data, sizeof(offset));
+ buf = (const char *)data + sizeof(offset);
+ len = count - sizeof(offset);
+
+ err = eeprom_93xx46_write(eeprom_93xx46, buf, offset, len);
+ if (err)
+ return err;
+ return 0;
+}
+
+static const struct regmap_bus eeprom_93xx46_regmap_bus = {
+ .read = eeprom_93xx46_regmap_read,
+ .write = eeprom_93xx46_regmap_write,
+ .reg_format_endian_default = REGMAP_ENDIAN_NATIVE,
+};
+
static int eeprom_93xx46_eral(struct eeprom_93xx46_dev *edev)
{
struct eeprom_93xx46_platform_data *pd = edev->pdata;
@@ -245,6 +338,13 @@ static int eeprom_93xx46_eral(struct eeprom_93xx46_dev *edev)
bits = 9;
}
+ if (has_quirk_instruction_length(edev)) {
+ cmd_addr <<= 2;
+ bits += 2;
+ }
+
+ dev_dbg(&edev->spi->dev, "eral cmd 0x%04x, %d bits\n", cmd_addr, bits);
+
spi_message_init(&m);
memset(&t, 0, sizeof(t));
@@ -294,12 +394,101 @@ static ssize_t eeprom_93xx46_store_erase(struct device *dev,
}
static DEVICE_ATTR(erase, S_IWUSR, NULL, eeprom_93xx46_store_erase);
+static void select_assert(void *context)
+{
+ struct eeprom_93xx46_dev *edev = context;
+
+ gpiod_set_value_cansleep(edev->pdata->select, 1);
+}
+
+static void select_deassert(void *context)
+{
+ struct eeprom_93xx46_dev *edev = context;
+
+ gpiod_set_value_cansleep(edev->pdata->select, 0);
+}
+
+static const struct of_device_id eeprom_93xx46_of_table[] = {
+ { .compatible = "eeprom-93xx46", },
+ { .compatible = "atmel,at93c46d", .data = &atmel_at93c46d_data, },
+ {}
+};
+MODULE_DEVICE_TABLE(of, eeprom_93xx46_of_table);
+
+static int eeprom_93xx46_probe_dt(struct spi_device *spi)
+{
+ const struct of_device_id *of_id =
+ of_match_device(eeprom_93xx46_of_table, &spi->dev);
+ struct device_node *np = spi->dev.of_node;
+ struct eeprom_93xx46_platform_data *pd;
+ u32 tmp;
+ int gpio;
+ enum of_gpio_flags of_flags;
+ int ret;
+
+ pd = devm_kzalloc(&spi->dev, sizeof(*pd), GFP_KERNEL);
+ if (!pd)
+ return -ENOMEM;
+
+ ret = of_property_read_u32(np, "data-size", &tmp);
+ if (ret < 0) {
+ dev_err(&spi->dev, "data-size property not found\n");
+ return ret;
+ }
+
+ if (tmp == 8) {
+ pd->flags |= EE_ADDR8;
+ } else if (tmp == 16) {
+ pd->flags |= EE_ADDR16;
+ } else {
+ dev_err(&spi->dev, "invalid data-size (%d)\n", tmp);
+ return -EINVAL;
+ }
+
+ if (of_property_read_bool(np, "read-only"))
+ pd->flags |= EE_READONLY;
+
+ gpio = of_get_named_gpio_flags(np, "select-gpios", 0, &of_flags);
+ if (gpio_is_valid(gpio)) {
+ unsigned long flags =
+ of_flags == OF_GPIO_ACTIVE_LOW ? GPIOF_ACTIVE_LOW : 0;
+
+ ret = devm_gpio_request_one(&spi->dev, gpio, flags,
+ "eeprom_93xx46_select");
+ if (ret)
+ return ret;
+
+ pd->select = gpio_to_desc(gpio);
+ pd->prepare = select_assert;
+ pd->finish = select_deassert;
+
+ gpiod_direction_output(pd->select, 0);
+ }
+
+ if (of_id->data) {
+ const struct eeprom_93xx46_devtype_data *data = of_id->data;
+
+ pd->quirks = data->quirks;
+ }
+
+ spi->dev.platform_data = pd;
+
+ return 0;
+}
+
static int eeprom_93xx46_probe(struct spi_device *spi)
{
struct eeprom_93xx46_platform_data *pd;
struct eeprom_93xx46_dev *edev;
+ struct regmap *regmap;
int err;
+ if (spi->dev.of_node) {
+ err = eeprom_93xx46_probe_dt(spi);
+ if (err < 0)
+ return err;
+ }
+
pd = spi->dev.platform_data;
if (!pd) {
dev_err(&spi->dev, "missing platform data\n");
@@ -325,19 +514,34 @@ static int eeprom_93xx46_probe(struct spi_device *spi)
edev->spi = spi_dev_get(spi);
edev->pdata = pd;
- sysfs_bin_attr_init(&edev->bin);
- edev->bin.attr.name = "eeprom";
- edev->bin.attr.mode = S_IRUSR;
- edev->bin.read = eeprom_93xx46_bin_read;
- edev->bin.size = 128;
- if (!(pd->flags & EE_READONLY)) {
- edev->bin.write = eeprom_93xx46_bin_write;
- edev->bin.attr.mode |= S_IWUSR;
+ edev->size = 128;
+
+ edev->regmap_config.reg_bits = 32;
+ edev->regmap_config.val_bits = 8;
+ edev->regmap_config.reg_stride = 1;
+ edev->regmap_config.max_register = edev->size - 1;
+
+ regmap = devm_regmap_init(&spi->dev, &eeprom_93xx46_regmap_bus, edev,
+ &edev->regmap_config);
+ if (IS_ERR(regmap)) {
+ dev_err(&spi->dev, "regmap init failed\n");
+ err = PTR_ERR(regmap);
+ goto fail;
}
- err = sysfs_create_bin_file(&spi->dev.kobj, &edev->bin);
- if (err)
+ edev->nvmem_config.name = dev_name(&spi->dev);
+ edev->nvmem_config.dev = &spi->dev;
+ edev->nvmem_config.read_only = pd->flags & EE_READONLY;
+ edev->nvmem_config.root_only = true;
+ edev->nvmem_config.owner = THIS_MODULE;
+ edev->nvmem_config.compat = true;
+ edev->nvmem_config.base_dev = &spi->dev;
+
+ edev->nvmem = nvmem_register(&edev->nvmem_config);
+ if (IS_ERR(edev->nvmem)) {
+ err = PTR_ERR(edev->nvmem);
goto fail;
+ }
dev_info(&spi->dev, "%d-bit eeprom %s\n",
(pd->flags & EE_ADDR8) ? 8 : 16,
@@ -359,10 +563,11 @@ static int eeprom_93xx46_remove(struct spi_device *spi)
{
struct eeprom_93xx46_dev *edev = spi_get_drvdata(spi);
+ nvmem_unregister(edev->nvmem);
+
if (!(edev->pdata->flags & EE_READONLY))
device_remove_file(&spi->dev, &dev_attr_erase);
- sysfs_remove_bin_file(&spi->dev.kobj, &edev->bin);
kfree(edev);
return 0;
}
@@ -370,6 +575,7 @@ static int eeprom_93xx46_remove(struct spi_device *spi)
static struct spi_driver eeprom_93xx46_driver = {
.driver = {
.name = "93xx46",
+ .of_match_table = of_match_ptr(eeprom_93xx46_of_table),
},
.probe = eeprom_93xx46_probe,
.remove = eeprom_93xx46_remove,
diff --git a/drivers/misc/genwqe/card_sysfs.c b/drivers/misc/genwqe/card_sysfs.c
index 6ab31eff0536..c24c9b7c1dd3 100644
--- a/drivers/misc/genwqe/card_sysfs.c
+++ b/drivers/misc/genwqe/card_sysfs.c
@@ -278,7 +278,7 @@ static umode_t genwqe_is_visible(struct kobject *kobj,
struct attribute *attr, int n)
{
unsigned int j;
- struct device *dev = container_of(kobj, struct device, kobj);
+ struct device *dev = kobj_to_dev(kobj);
struct genwqe_dev *cd = dev_get_drvdata(dev);
umode_t mode = attr->mode;
diff --git a/drivers/misc/ibmasm/ibmasm.h b/drivers/misc/ibmasm/ibmasm.h
index 5bd127727d8e..9fea49d2e15b 100644
--- a/drivers/misc/ibmasm/ibmasm.h
+++ b/drivers/misc/ibmasm/ibmasm.h
@@ -34,6 +34,7 @@
#include <linux/kref.h>
#include <linux/device.h>
#include <linux/input.h>
+#include <linux/time64.h>
/* Driver identification */
#define DRIVER_NAME "ibmasm"
@@ -53,9 +54,11 @@ extern int ibmasm_debug;
static inline char *get_timestamp(char *buf)
{
- struct timeval now;
- do_gettimeofday(&now);
- sprintf(buf, "%lu.%lu", now.tv_sec, now.tv_usec);
+ struct timespec64 now;
+
+ ktime_get_real_ts64(&now);
+ sprintf(buf, "%llu.%.08lu", (long long)now.tv_sec,
+ now.tv_nsec / NSEC_PER_USEC);
return buf;
}
diff --git a/drivers/misc/ibmasm/ibmasmfs.c b/drivers/misc/ibmasm/ibmasmfs.c
index e8b933111e0d..9c677f3f3c26 100644
--- a/drivers/misc/ibmasm/ibmasmfs.c
+++ b/drivers/misc/ibmasm/ibmasmfs.c
@@ -116,8 +116,8 @@ static int ibmasmfs_fill_super (struct super_block *sb, void *data, int silent)
{
struct inode *root;
- sb->s_blocksize = PAGE_CACHE_SIZE;
- sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
+ sb->s_blocksize = PAGE_SIZE;
+ sb->s_blocksize_bits = PAGE_SHIFT;
sb->s_magic = IBMASMFS_MAGIC;
sb->s_op = &ibmasmfs_s_ops;
sb->s_time_gran = 1;
diff --git a/drivers/misc/lis3lv02d/lis3lv02d_i2c.c b/drivers/misc/lis3lv02d/lis3lv02d_i2c.c
index 0c3bb7e3ee80..14b7d539fed6 100644
--- a/drivers/misc/lis3lv02d/lis3lv02d_i2c.c
+++ b/drivers/misc/lis3lv02d/lis3lv02d_i2c.c
@@ -209,7 +209,7 @@ static int lis3lv02d_i2c_remove(struct i2c_client *client)
#ifdef CONFIG_PM_SLEEP
static int lis3lv02d_i2c_suspend(struct device *dev)
{
- struct i2c_client *client = container_of(dev, struct i2c_client, dev);
+ struct i2c_client *client = to_i2c_client(dev);
struct lis3lv02d *lis3 = i2c_get_clientdata(client);
if (!lis3->pdata || !lis3->pdata->wakeup_flags)
@@ -219,7 +219,7 @@ static int lis3lv02d_i2c_suspend(struct device *dev)
static int lis3lv02d_i2c_resume(struct device *dev)
{
- struct i2c_client *client = container_of(dev, struct i2c_client, dev);
+ struct i2c_client *client = to_i2c_client(dev);
struct lis3lv02d *lis3 = i2c_get_clientdata(client);
/*
@@ -238,7 +238,7 @@ static int lis3lv02d_i2c_resume(struct device *dev)
#ifdef CONFIG_PM
static int lis3_i2c_runtime_suspend(struct device *dev)
{
- struct i2c_client *client = container_of(dev, struct i2c_client, dev);
+ struct i2c_client *client = to_i2c_client(dev);
struct lis3lv02d *lis3 = i2c_get_clientdata(client);
lis3lv02d_poweroff(lis3);
@@ -247,7 +247,7 @@ static int lis3_i2c_runtime_suspend(struct device *dev)
static int lis3_i2c_runtime_resume(struct device *dev)
{
- struct i2c_client *client = container_of(dev, struct i2c_client, dev);
+ struct i2c_client *client = to_i2c_client(dev);
struct lis3lv02d *lis3 = i2c_get_clientdata(client);
lis3lv02d_poweron(lis3);
diff --git a/drivers/misc/lkdtm.c b/drivers/misc/lkdtm.c
index 11fdadc68e53..0a5cbbe12452 100644
--- a/drivers/misc/lkdtm.c
+++ b/drivers/misc/lkdtm.c
@@ -92,6 +92,9 @@ enum ctype {
CT_UNALIGNED_LOAD_STORE_WRITE,
CT_OVERWRITE_ALLOCATION,
CT_WRITE_AFTER_FREE,
+ CT_READ_AFTER_FREE,
+ CT_WRITE_BUDDY_AFTER_FREE,
+ CT_READ_BUDDY_AFTER_FREE,
CT_SOFTLOCKUP,
CT_HARDLOCKUP,
CT_SPINLOCKUP,
@@ -103,7 +106,9 @@ enum ctype {
CT_EXEC_USERSPACE,
CT_ACCESS_USERSPACE,
CT_WRITE_RO,
+ CT_WRITE_RO_AFTER_INIT,
CT_WRITE_KERN,
+ CT_WRAP_ATOMIC
};
static char* cp_name[] = {
@@ -129,6 +134,9 @@ static char* cp_type[] = {
"UNALIGNED_LOAD_STORE_WRITE",
"OVERWRITE_ALLOCATION",
"WRITE_AFTER_FREE",
+ "READ_AFTER_FREE",
+ "WRITE_BUDDY_AFTER_FREE",
+ "READ_BUDDY_AFTER_FREE",
"SOFTLOCKUP",
"HARDLOCKUP",
"SPINLOCKUP",
@@ -140,7 +148,9 @@ static char* cp_type[] = {
"EXEC_USERSPACE",
"ACCESS_USERSPACE",
"WRITE_RO",
+ "WRITE_RO_AFTER_INIT",
"WRITE_KERN",
+ "WRAP_ATOMIC"
};
static struct jprobe lkdtm;
@@ -162,6 +172,7 @@ static DEFINE_SPINLOCK(lock_me_up);
static u8 data_area[EXEC_SIZE];
static const unsigned long rodata = 0xAA55AA55;
+static unsigned long ro_after_init __ro_after_init = 0x55AA5500;
module_param(recur_count, int, 0644);
MODULE_PARM_DESC(recur_count, " Recursion level for the stack overflow test");
@@ -335,7 +346,7 @@ static noinline void corrupt_stack(void)
memset((void *)data, 0, 64);
}
-static void execute_location(void *dst)
+static void noinline execute_location(void *dst)
{
void (*func)(void) = dst;
@@ -409,12 +420,114 @@ static void lkdtm_do_action(enum ctype which)
break;
}
case CT_WRITE_AFTER_FREE: {
+ int *base, *again;
size_t len = 1024;
- u32 *data = kmalloc(len, GFP_KERNEL);
+ /*
+ * The slub allocator uses the first word to store the free
+ * pointer in some configurations. Use the middle of the
+ * allocation to avoid running into the freelist
+ */
+ size_t offset = (len / sizeof(*base)) / 2;
+
+ base = kmalloc(len, GFP_KERNEL);
+ pr_info("Allocated memory %p-%p\n", base, &base[offset * 2]);
+ pr_info("Attempting bad write to freed memory at %p\n",
+ &base[offset]);
+ kfree(base);
+ base[offset] = 0x0abcdef0;
+ /* Attempt to notice the overwrite. */
+ again = kmalloc(len, GFP_KERNEL);
+ kfree(again);
+ if (again != base)
+ pr_info("Hmm, didn't get the same memory range.\n");
- kfree(data);
+ break;
+ }
+ case CT_READ_AFTER_FREE: {
+ int *base, *val, saw;
+ size_t len = 1024;
+ /*
+ * The slub allocator uses the first word to store the free
+ * pointer in some configurations. Use the middle of the
+ * allocation to avoid running into the freelist
+ */
+ size_t offset = (len / sizeof(*base)) / 2;
+
+ base = kmalloc(len, GFP_KERNEL);
+ if (!base)
+ break;
+
+ val = kmalloc(len, GFP_KERNEL);
+ if (!val) {
+ kfree(base);
+ break;
+ }
+
+ *val = 0x12345678;
+ base[offset] = *val;
+ pr_info("Value in memory before free: %x\n", base[offset]);
+
+ kfree(base);
+
+ pr_info("Attempting bad read from freed memory\n");
+ saw = base[offset];
+ if (saw != *val) {
+ /* Good! Poisoning happened, so declare a win. */
+ pr_info("Memory correctly poisoned (%x)\n", saw);
+ BUG();
+ }
+ pr_info("Memory was not poisoned\n");
+
+ kfree(val);
+ break;
+ }
+ case CT_WRITE_BUDDY_AFTER_FREE: {
+ unsigned long p = __get_free_page(GFP_KERNEL);
+ if (!p)
+ break;
+ pr_info("Writing to the buddy page before free\n");
+ memset((void *)p, 0x3, PAGE_SIZE);
+ free_page(p);
schedule();
- memset(data, 0x78, len);
+ pr_info("Attempting bad write to the buddy page after free\n");
+ memset((void *)p, 0x78, PAGE_SIZE);
+ /* Attempt to notice the overwrite. */
+ p = __get_free_page(GFP_KERNEL);
+ free_page(p);
+ schedule();
+
+ break;
+ }
+ case CT_READ_BUDDY_AFTER_FREE: {
+ unsigned long p = __get_free_page(GFP_KERNEL);
+ int saw, *val;
+ int *base;
+
+ if (!p)
+ break;
+
+ val = kmalloc(1024, GFP_KERNEL);
+ if (!val) {
+ free_page(p);
+ break;
+ }
+
+ base = (int *)p;
+
+ *val = 0x12345678;
+ base[0] = *val;
+ pr_info("Value in memory before free: %x\n", base[0]);
+ free_page(p);
+ pr_info("Attempting to read from freed memory\n");
+ saw = base[0];
+ if (saw != *val) {
+ /* Good! Poisoning happened, so declare a win. */
+ pr_info("Memory correctly poisoned (%x)\n", saw);
+ BUG();
+ }
+ pr_info("Buddy page was not poisoned\n");
+
+ kfree(val);
break;
}
case CT_SOFTLOCKUP:
@@ -503,11 +616,28 @@ static void lkdtm_do_action(enum ctype which)
break;
}
case CT_WRITE_RO: {
- unsigned long *ptr;
+ /* Explicitly cast away "const" for the test. */
+ unsigned long *ptr = (unsigned long *)&rodata;
- ptr = (unsigned long *)&rodata;
+ pr_info("attempting bad rodata write at %p\n", ptr);
+ *ptr ^= 0xabcd1234;
- pr_info("attempting bad write at %p\n", ptr);
+ break;
+ }
+ case CT_WRITE_RO_AFTER_INIT: {
+ unsigned long *ptr = &ro_after_init;
+
+ /*
+ * Verify we were written to during init. Since an Oops
+ * is considered a "success", a failure is to just skip the
+ * real test.
+ */
+ if ((*ptr & 0xAA) != 0xAA) {
+ pr_info("%p was NOT written during init!?\n", ptr);
+ break;
+ }
+
+ pr_info("attempting bad ro_after_init write at %p\n", ptr);
*ptr ^= 0xabcd1234;
break;
@@ -528,6 +658,17 @@ static void lkdtm_do_action(enum ctype which)
do_overwritten();
break;
}
+ case CT_WRAP_ATOMIC: {
+ atomic_t under = ATOMIC_INIT(INT_MIN);
+ atomic_t over = ATOMIC_INIT(INT_MAX);
+
+ pr_info("attempting atomic underflow\n");
+ atomic_dec(&under);
+ pr_info("attempting atomic overflow\n");
+ atomic_inc(&over);
+
+ return;
+ }
case CT_NONE:
default:
break;
@@ -817,6 +958,9 @@ static int __init lkdtm_module_init(void)
int n_debugfs_entries = 1; /* Assume only the direct entry */
int i;
+ /* Make sure we can write to __ro_after_init values during __init */
+ ro_after_init |= 0xAA;
+
/* Register debugfs interface */
lkdtm_debugfs_root = debugfs_create_dir("provoke-crash", NULL);
if (!lkdtm_debugfs_root) {
diff --git a/drivers/misc/mei/Kconfig b/drivers/misc/mei/Kconfig
index d23384dde73b..c49e1d2269af 100644
--- a/drivers/misc/mei/Kconfig
+++ b/drivers/misc/mei/Kconfig
@@ -1,6 +1,6 @@
config INTEL_MEI
tristate "Intel Management Engine Interface"
- depends on X86 && PCI && WATCHDOG_CORE
+ depends on X86 && PCI
help
The Intel Management Engine (Intel ME) provides Manageability,
Security and Media services for system containing Intel chipsets.
@@ -12,7 +12,7 @@ config INTEL_MEI
config INTEL_MEI_ME
tristate "ME Enabled Intel Chipsets"
select INTEL_MEI
- depends on X86 && PCI && WATCHDOG_CORE
+ depends on X86 && PCI
help
MEI support for ME Enabled Intel chipsets.
@@ -37,7 +37,7 @@ config INTEL_MEI_ME
config INTEL_MEI_TXE
tristate "Intel Trusted Execution Environment with ME Interface"
select INTEL_MEI
- depends on X86 && PCI && WATCHDOG_CORE
+ depends on X86 && PCI
help
MEI Support for Trusted Execution Environment device on Intel SoCs
diff --git a/drivers/misc/mei/Makefile b/drivers/misc/mei/Makefile
index 01447ca21c26..59e6b0aede34 100644
--- a/drivers/misc/mei/Makefile
+++ b/drivers/misc/mei/Makefile
@@ -9,7 +9,6 @@ mei-objs += interrupt.o
mei-objs += client.o
mei-objs += main.o
mei-objs += amthif.o
-mei-objs += wd.o
mei-objs += bus.o
mei-objs += bus-fixup.o
mei-$(CONFIG_DEBUG_FS) += debugfs.o
diff --git a/drivers/misc/mei/amthif.c b/drivers/misc/mei/amthif.c
index cd0403f09267..194360a5f782 100644
--- a/drivers/misc/mei/amthif.c
+++ b/drivers/misc/mei/amthif.c
@@ -50,7 +50,6 @@ void mei_amthif_reset_params(struct mei_device *dev)
dev->iamthif_current_cb = NULL;
dev->iamthif_canceled = false;
dev->iamthif_state = MEI_IAMTHIF_IDLE;
- dev->iamthif_timer = 0;
dev->iamthif_stall_timer = 0;
dev->iamthif_open_count = 0;
}
@@ -68,11 +67,14 @@ int mei_amthif_host_init(struct mei_device *dev, struct mei_me_client *me_cl)
struct mei_cl *cl = &dev->iamthif_cl;
int ret;
+ if (mei_cl_is_connected(cl))
+ return 0;
+
dev->iamthif_state = MEI_IAMTHIF_IDLE;
mei_cl_init(cl, dev);
- ret = mei_cl_link(cl, MEI_IAMTHIF_HOST_CLIENT_ID);
+ ret = mei_cl_link(cl);
if (ret < 0) {
dev_err(dev->dev, "amthif: failed cl_link %d\n", ret);
return ret;
@@ -80,32 +82,10 @@ int mei_amthif_host_init(struct mei_device *dev, struct mei_me_client *me_cl)
ret = mei_cl_connect(cl, me_cl, NULL);
- dev->iamthif_state = MEI_IAMTHIF_IDLE;
-
return ret;
}
/**
- * mei_amthif_find_read_list_entry - finds a amthilist entry for current file
- *
- * @dev: the device structure
- * @file: pointer to file object
- *
- * Return: returned a list entry on success, NULL on failure.
- */
-struct mei_cl_cb *mei_amthif_find_read_list_entry(struct mei_device *dev,
- struct file *file)
-{
- struct mei_cl_cb *cb;
-
- list_for_each_entry(cb, &dev->amthif_rd_complete_list.list, list)
- if (cb->file_object == file)
- return cb;
- return NULL;
-}
-
-
-/**
* mei_amthif_read - read data from AMTHIF client
*
* @dev: the device structure
@@ -126,18 +106,11 @@ int mei_amthif_read(struct mei_device *dev, struct file *file,
{
struct mei_cl *cl = file->private_data;
struct mei_cl_cb *cb;
- unsigned long timeout;
int rets;
int wait_ret;
- /* Only possible if we are in timeout */
- if (!cl) {
- dev_err(dev->dev, "bad file ext.\n");
- return -ETIME;
- }
-
dev_dbg(dev->dev, "checking amthif data\n");
- cb = mei_amthif_find_read_list_entry(dev, file);
+ cb = mei_cl_read_cb(cl, file);
/* Check for if we can block or not*/
if (cb == NULL && file->f_flags & O_NONBLOCK)
@@ -149,8 +122,9 @@ int mei_amthif_read(struct mei_device *dev, struct file *file,
/* unlock the Mutex */
mutex_unlock(&dev->device_lock);
- wait_ret = wait_event_interruptible(dev->iamthif_cl.wait,
- (cb = mei_amthif_find_read_list_entry(dev, file)));
+ wait_ret = wait_event_interruptible(cl->rx_wait,
+ !list_empty(&cl->rd_completed) ||
+ !mei_cl_is_connected(cl));
/* Locking again the Mutex */
mutex_lock(&dev->device_lock);
@@ -158,7 +132,12 @@ int mei_amthif_read(struct mei_device *dev, struct file *file,
if (wait_ret)
return -ERESTARTSYS;
- dev_dbg(dev->dev, "woke up from sleep\n");
+ if (!mei_cl_is_connected(cl)) {
+ rets = -EBUSY;
+ goto out;
+ }
+
+ cb = mei_cl_read_cb(cl, file);
}
if (cb->status) {
@@ -168,24 +147,10 @@ int mei_amthif_read(struct mei_device *dev, struct file *file,
}
dev_dbg(dev->dev, "Got amthif data\n");
- dev->iamthif_timer = 0;
-
- timeout = cb->read_time +
- mei_secs_to_jiffies(MEI_IAMTHIF_READ_TIMER);
- dev_dbg(dev->dev, "amthif timeout = %lud\n",
- timeout);
-
- if (time_after(jiffies, timeout)) {
- dev_dbg(dev->dev, "amthif Time out\n");
- /* 15 sec for the message has expired */
- list_del_init(&cb->list);
- rets = -ETIME;
- goto free;
- }
/* if the whole message will fit remove it from the list */
if (cb->buf_idx >= *offset && length >= (cb->buf_idx - *offset))
list_del_init(&cb->list);
- else if (cb->buf_idx > 0 && cb->buf_idx <= *offset) {
+ else if (cb->buf_idx <= *offset) {
/* end of the message has been reached */
list_del_init(&cb->list);
rets = 0;
@@ -195,9 +160,8 @@ int mei_amthif_read(struct mei_device *dev, struct file *file,
* remove message from deletion list
*/
- dev_dbg(dev->dev, "amthif cb->buf size - %d\n",
- cb->buf.size);
- dev_dbg(dev->dev, "amthif cb->buf_idx - %lu\n", cb->buf_idx);
+ dev_dbg(dev->dev, "amthif cb->buf.size - %zu cb->buf_idx - %zu\n",
+ cb->buf.size, cb->buf_idx);
/* length is being truncated to PAGE_SIZE, however,
* the buf_idx may point beyond */
@@ -229,7 +193,7 @@ out:
*
* Return: 0 on success, <0 on failure.
*/
-static int mei_amthif_read_start(struct mei_cl *cl, struct file *file)
+static int mei_amthif_read_start(struct mei_cl *cl, const struct file *file)
{
struct mei_device *dev = cl->dev;
struct mei_cl_cb *cb;
@@ -248,7 +212,7 @@ static int mei_amthif_read_start(struct mei_cl *cl, struct file *file)
list_add_tail(&cb->list, &dev->ctrl_wr_list.list);
dev->iamthif_state = MEI_IAMTHIF_READING;
- dev->iamthif_file_object = cb->file_object;
+ dev->iamthif_fp = cb->fp;
dev->iamthif_current_cb = cb;
return 0;
@@ -277,7 +241,7 @@ static int mei_amthif_send_cmd(struct mei_cl *cl, struct mei_cl_cb *cb)
dev->iamthif_state = MEI_IAMTHIF_WRITING;
dev->iamthif_current_cb = cb;
- dev->iamthif_file_object = cb->file_object;
+ dev->iamthif_fp = cb->fp;
dev->iamthif_canceled = false;
ret = mei_cl_write(cl, cb, false);
@@ -285,7 +249,7 @@ static int mei_amthif_send_cmd(struct mei_cl *cl, struct mei_cl_cb *cb)
return ret;
if (cb->completed)
- cb->status = mei_amthif_read_start(cl, cb->file_object);
+ cb->status = mei_amthif_read_start(cl, cb->fp);
return 0;
}
@@ -304,8 +268,7 @@ int mei_amthif_run_next_cmd(struct mei_device *dev)
dev->iamthif_canceled = false;
dev->iamthif_state = MEI_IAMTHIF_IDLE;
- dev->iamthif_timer = 0;
- dev->iamthif_file_object = NULL;
+ dev->iamthif_fp = NULL;
dev_dbg(dev->dev, "complete amthif cmd_list cb.\n");
@@ -329,17 +292,17 @@ int mei_amthif_run_next_cmd(struct mei_device *dev)
int mei_amthif_write(struct mei_cl *cl, struct mei_cl_cb *cb)
{
- struct mei_device *dev;
-
- if (WARN_ON(!cl || !cl->dev))
- return -ENODEV;
+ struct mei_device *dev = cl->dev;
- if (WARN_ON(!cb))
- return -EINVAL;
+ list_add_tail(&cb->list, &dev->amthif_cmd_list.list);
- dev = cl->dev;
+ /*
+ * The previous request is still in processing, queue this one.
+ */
+ if (dev->iamthif_state > MEI_IAMTHIF_IDLE &&
+ dev->iamthif_state < MEI_IAMTHIF_READ_COMPLETE)
+ return 0;
- list_add_tail(&cb->list, &dev->amthif_cmd_list.list);
return mei_amthif_run_next_cmd(dev);
}
@@ -360,10 +323,10 @@ unsigned int mei_amthif_poll(struct mei_device *dev,
{
unsigned int mask = 0;
- poll_wait(file, &dev->iamthif_cl.wait, wait);
+ poll_wait(file, &dev->iamthif_cl.rx_wait, wait);
if (dev->iamthif_state == MEI_IAMTHIF_READ_COMPLETE &&
- dev->iamthif_file_object == file) {
+ dev->iamthif_fp == file) {
mask |= POLLIN | POLLRDNORM;
mei_amthif_run_next_cmd(dev);
@@ -393,7 +356,7 @@ int mei_amthif_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb,
return ret;
if (cb->completed)
- cb->status = mei_amthif_read_start(cl, cb->file_object);
+ cb->status = mei_amthif_read_start(cl, cb->fp);
return 0;
}
@@ -437,11 +400,12 @@ int mei_amthif_irq_read_msg(struct mei_cl *cl,
/**
* mei_amthif_complete - complete amthif callback.
*
- * @dev: the device structure.
+ * @cl: host client
* @cb: callback block.
*/
-void mei_amthif_complete(struct mei_device *dev, struct mei_cl_cb *cb)
+void mei_amthif_complete(struct mei_cl *cl, struct mei_cl_cb *cb)
{
+ struct mei_device *dev = cl->dev;
if (cb->fop_type == MEI_FOP_WRITE) {
if (!cb->status) {
@@ -453,25 +417,22 @@ void mei_amthif_complete(struct mei_device *dev, struct mei_cl_cb *cb)
* in case of error enqueue the write cb to complete read list
* so it can be propagated to the reader
*/
- list_add_tail(&cb->list, &dev->amthif_rd_complete_list.list);
- wake_up_interruptible(&dev->iamthif_cl.wait);
+ list_add_tail(&cb->list, &cl->rd_completed);
+ wake_up_interruptible(&cl->rx_wait);
return;
}
if (!dev->iamthif_canceled) {
dev->iamthif_state = MEI_IAMTHIF_READ_COMPLETE;
dev->iamthif_stall_timer = 0;
- list_add_tail(&cb->list, &dev->amthif_rd_complete_list.list);
+ list_add_tail(&cb->list, &cl->rd_completed);
dev_dbg(dev->dev, "amthif read completed\n");
- dev->iamthif_timer = jiffies;
- dev_dbg(dev->dev, "dev->iamthif_timer = %ld\n",
- dev->iamthif_timer);
} else {
mei_amthif_run_next_cmd(dev);
}
dev_dbg(dev->dev, "completing amthif call back.\n");
- wake_up_interruptible(&dev->iamthif_cl.wait);
+ wake_up_interruptible(&cl->rx_wait);
}
/**
@@ -497,7 +458,7 @@ static bool mei_clear_list(struct mei_device *dev,
/* list all list member */
list_for_each_entry_safe(cb, next, mei_cb_list, list) {
/* check if list member associated with a file */
- if (file == cb->file_object) {
+ if (file == cb->fp) {
/* check if cb equal to current iamthif cb */
if (dev->iamthif_current_cb == cb) {
dev->iamthif_current_cb = NULL;
@@ -523,13 +484,14 @@ static bool mei_clear_list(struct mei_device *dev,
*
* Return: true if callback removed from the list, false otherwise
*/
-static bool mei_clear_lists(struct mei_device *dev, struct file *file)
+static bool mei_clear_lists(struct mei_device *dev, const struct file *file)
{
bool removed = false;
+ struct mei_cl *cl = &dev->iamthif_cl;
/* remove callbacks associated with a file */
mei_clear_list(dev, file, &dev->amthif_cmd_list.list);
- if (mei_clear_list(dev, file, &dev->amthif_rd_complete_list.list))
+ if (mei_clear_list(dev, file, &cl->rd_completed))
removed = true;
mei_clear_list(dev, file, &dev->ctrl_rd_list.list);
@@ -546,7 +508,7 @@ static bool mei_clear_lists(struct mei_device *dev, struct file *file)
/* check if iamthif_current_cb not NULL */
if (dev->iamthif_current_cb && !removed) {
/* check file and iamthif current cb association */
- if (dev->iamthif_current_cb->file_object == file) {
+ if (dev->iamthif_current_cb->fp == file) {
/* remove cb */
mei_io_cb_free(dev->iamthif_current_cb);
dev->iamthif_current_cb = NULL;
@@ -569,7 +531,7 @@ int mei_amthif_release(struct mei_device *dev, struct file *file)
if (dev->iamthif_open_count > 0)
dev->iamthif_open_count--;
- if (dev->iamthif_file_object == file &&
+ if (dev->iamthif_fp == file &&
dev->iamthif_state != MEI_IAMTHIF_IDLE) {
dev_dbg(dev->dev, "amthif canceled iamthif state %d\n",
diff --git a/drivers/misc/mei/bus-fixup.c b/drivers/misc/mei/bus-fixup.c
index 020de5919c21..e9e6ea3ab73c 100644
--- a/drivers/misc/mei/bus-fixup.c
+++ b/drivers/misc/mei/bus-fixup.c
@@ -35,6 +35,9 @@ static const uuid_le mei_nfc_info_guid = MEI_UUID_NFC_INFO;
#define MEI_UUID_NFC_HCI UUID_LE(0x0bb17a78, 0x2a8e, 0x4c50, \
0x94, 0xd4, 0x50, 0x26, 0x67, 0x23, 0x77, 0x5c)
+#define MEI_UUID_WD UUID_LE(0x05B79A6F, 0x4628, 0x4D7F, \
+ 0x89, 0x9D, 0xA9, 0x15, 0x14, 0xCB, 0x32, 0xAB)
+
#define MEI_UUID_ANY NULL_UUID_LE
/**
@@ -48,8 +51,7 @@ static const uuid_le mei_nfc_info_guid = MEI_UUID_NFC_INFO;
*/
static void number_of_connections(struct mei_cl_device *cldev)
{
- dev_dbg(&cldev->dev, "running hook %s on %pUl\n",
- __func__, mei_me_cl_uuid(cldev->me_cl));
+ dev_dbg(&cldev->dev, "running hook %s\n", __func__);
if (cldev->me_cl->props.max_number_of_connections > 1)
cldev->do_match = 0;
@@ -62,11 +64,36 @@ static void number_of_connections(struct mei_cl_device *cldev)
*/
static void blacklist(struct mei_cl_device *cldev)
{
- dev_dbg(&cldev->dev, "running hook %s on %pUl\n",
- __func__, mei_me_cl_uuid(cldev->me_cl));
+ dev_dbg(&cldev->dev, "running hook %s\n", __func__);
+
cldev->do_match = 0;
}
+/**
+ * mei_wd - wd client on the bus, change protocol version
+ * as the API has changed.
+ *
+ * @cldev: me clients device
+ */
+#if IS_ENABLED(CONFIG_INTEL_MEI_ME)
+#include <linux/pci.h>
+#include "hw-me-regs.h"
+static void mei_wd(struct mei_cl_device *cldev)
+{
+ struct pci_dev *pdev = to_pci_dev(cldev->dev.parent);
+
+ dev_dbg(&cldev->dev, "running hook %s\n", __func__);
+ if (pdev->device == MEI_DEV_ID_WPT_LP ||
+ pdev->device == MEI_DEV_ID_SPT ||
+ pdev->device == MEI_DEV_ID_SPT_H)
+ cldev->me_cl->props.protocol_version = 0x2;
+
+ cldev->do_match = 1;
+}
+#else
+static inline void mei_wd(struct mei_cl_device *cldev) {}
+#endif /* CONFIG_INTEL_MEI_ME */
+
struct mei_nfc_cmd {
u8 command;
u8 status;
@@ -208,12 +235,11 @@ static void mei_nfc(struct mei_cl_device *cldev)
bus = cldev->bus;
- dev_dbg(bus->dev, "running hook %s: %pUl match=%d\n",
- __func__, mei_me_cl_uuid(cldev->me_cl), cldev->do_match);
+ dev_dbg(&cldev->dev, "running hook %s\n", __func__);
mutex_lock(&bus->device_lock);
/* we need to connect to INFO GUID */
- cl = mei_cl_alloc_linked(bus, MEI_HOST_CLIENT_ID_ANY);
+ cl = mei_cl_alloc_linked(bus);
if (IS_ERR(cl)) {
ret = PTR_ERR(cl);
cl = NULL;
@@ -282,6 +308,7 @@ static struct mei_fixup {
MEI_FIXUP(MEI_UUID_ANY, number_of_connections),
MEI_FIXUP(MEI_UUID_NFC_INFO, blacklist),
MEI_FIXUP(MEI_UUID_NFC_HCI, mei_nfc),
+ MEI_FIXUP(MEI_UUID_WD, mei_wd),
};
/**
diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c
index 0b05aa938799..5d5996e39a67 100644
--- a/drivers/misc/mei/bus.c
+++ b/drivers/misc/mei/bus.c
@@ -44,7 +44,7 @@ ssize_t __mei_cl_send(struct mei_cl *cl, u8 *buf, size_t length,
bool blocking)
{
struct mei_device *bus;
- struct mei_cl_cb *cb = NULL;
+ struct mei_cl_cb *cb;
ssize_t rets;
if (WARN_ON(!cl || !cl->dev))
@@ -53,6 +53,11 @@ ssize_t __mei_cl_send(struct mei_cl *cl, u8 *buf, size_t length,
bus = cl->dev;
mutex_lock(&bus->device_lock);
+ if (bus->dev_state != MEI_DEV_ENABLED) {
+ rets = -ENODEV;
+ goto out;
+ }
+
if (!mei_cl_is_connected(cl)) {
rets = -ENODEV;
goto out;
@@ -81,8 +86,6 @@ ssize_t __mei_cl_send(struct mei_cl *cl, u8 *buf, size_t length,
out:
mutex_unlock(&bus->device_lock);
- if (rets < 0)
- mei_io_cb_free(cb);
return rets;
}
@@ -109,6 +112,10 @@ ssize_t __mei_cl_recv(struct mei_cl *cl, u8 *buf, size_t length)
bus = cl->dev;
mutex_lock(&bus->device_lock);
+ if (bus->dev_state != MEI_DEV_ENABLED) {
+ rets = -ENODEV;
+ goto out;
+ }
cb = mei_cl_read_cb(cl, NULL);
if (cb)
@@ -230,45 +237,55 @@ static void mei_cl_bus_event_work(struct work_struct *work)
* mei_cl_bus_notify_event - schedule notify cb on bus client
*
* @cl: host client
+ *
+ * Return: true if event was scheduled
+ * false if the client is not waiting for event
*/
-void mei_cl_bus_notify_event(struct mei_cl *cl)
+bool mei_cl_bus_notify_event(struct mei_cl *cl)
{
struct mei_cl_device *cldev = cl->cldev;
if (!cldev || !cldev->event_cb)
- return;
+ return false;
if (!(cldev->events_mask & BIT(MEI_CL_EVENT_NOTIF)))
- return;
+ return false;
if (!cl->notify_ev)
- return;
+ return false;
set_bit(MEI_CL_EVENT_NOTIF, &cldev->events);
schedule_work(&cldev->event_work);
cl->notify_ev = false;
+
+ return true;
}
/**
- * mei_cl_bus_rx_event - schedule rx evenet
+ * mei_cl_bus_rx_event - schedule rx event
*
* @cl: host client
+ *
+ * Return: true if event was scheduled
+ * false if the client is not waiting for event
*/
-void mei_cl_bus_rx_event(struct mei_cl *cl)
+bool mei_cl_bus_rx_event(struct mei_cl *cl)
{
struct mei_cl_device *cldev = cl->cldev;
if (!cldev || !cldev->event_cb)
- return;
+ return false;
if (!(cldev->events_mask & BIT(MEI_CL_EVENT_RX)))
- return;
+ return false;
set_bit(MEI_CL_EVENT_RX, &cldev->events);
schedule_work(&cldev->event_work);
+
+ return true;
}
/**
@@ -398,7 +415,7 @@ int mei_cldev_enable(struct mei_cl_device *cldev)
if (!cl) {
mutex_lock(&bus->device_lock);
- cl = mei_cl_alloc_linked(bus, MEI_HOST_CLIENT_ID_ANY);
+ cl = mei_cl_alloc_linked(bus);
mutex_unlock(&bus->device_lock);
if (IS_ERR(cl))
return PTR_ERR(cl);
@@ -958,6 +975,22 @@ void mei_cl_bus_rescan(struct mei_device *bus)
dev_dbg(bus->dev, "rescan end");
}
+void mei_cl_bus_rescan_work(struct work_struct *work)
+{
+ struct mei_device *bus =
+ container_of(work, struct mei_device, bus_rescan_work);
+ struct mei_me_client *me_cl;
+
+ mutex_lock(&bus->device_lock);
+ me_cl = mei_me_cl_by_uuid(bus, &mei_amthif_guid);
+ if (me_cl)
+ mei_amthif_host_init(bus, me_cl);
+ mei_me_cl_put(me_cl);
+ mutex_unlock(&bus->device_lock);
+
+ mei_cl_bus_rescan(bus);
+}
+
int __mei_cldev_driver_register(struct mei_cl_driver *cldrv,
struct module *owner)
{
diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c
index a6c87c713193..bab17e4197b6 100644
--- a/drivers/misc/mei/client.c
+++ b/drivers/misc/mei/client.c
@@ -359,7 +359,7 @@ void mei_io_cb_free(struct mei_cl_cb *cb)
* Return: mei_cl_cb pointer or NULL;
*/
struct mei_cl_cb *mei_io_cb_init(struct mei_cl *cl, enum mei_cb_file_ops type,
- struct file *fp)
+ const struct file *fp)
{
struct mei_cl_cb *cb;
@@ -368,7 +368,7 @@ struct mei_cl_cb *mei_io_cb_init(struct mei_cl *cl, enum mei_cb_file_ops type,
return NULL;
INIT_LIST_HEAD(&cb->list);
- cb->file_object = fp;
+ cb->fp = fp;
cb->cl = cl;
cb->buf_idx = 0;
cb->fop_type = type;
@@ -455,7 +455,8 @@ int mei_io_cb_alloc_buf(struct mei_cl_cb *cb, size_t length)
* Return: cb on success and NULL on failure
*/
struct mei_cl_cb *mei_cl_alloc_cb(struct mei_cl *cl, size_t length,
- enum mei_cb_file_ops type, struct file *fp)
+ enum mei_cb_file_ops type,
+ const struct file *fp)
{
struct mei_cl_cb *cb;
@@ -485,7 +486,7 @@ struct mei_cl_cb *mei_cl_read_cb(const struct mei_cl *cl, const struct file *fp)
struct mei_cl_cb *cb;
list_for_each_entry(cb, &cl->rd_completed, list)
- if (!fp || fp == cb->file_object)
+ if (!fp || fp == cb->fp)
return cb;
return NULL;
@@ -503,12 +504,12 @@ void mei_cl_read_cb_flush(const struct mei_cl *cl, const struct file *fp)
struct mei_cl_cb *cb, *next;
list_for_each_entry_safe(cb, next, &cl->rd_completed, list)
- if (!fp || fp == cb->file_object)
+ if (!fp || fp == cb->fp)
mei_io_cb_free(cb);
list_for_each_entry_safe(cb, next, &cl->rd_pending, list)
- if (!fp || fp == cb->file_object)
+ if (!fp || fp == cb->fp)
mei_io_cb_free(cb);
}
@@ -535,7 +536,6 @@ int mei_cl_flush_queues(struct mei_cl *cl, const struct file *fp)
mei_io_list_flush(&cl->dev->ctrl_wr_list, cl);
mei_io_list_flush(&cl->dev->ctrl_rd_list, cl);
mei_io_list_flush(&cl->dev->amthif_cmd_list, cl);
- mei_io_list_flush(&cl->dev->amthif_rd_complete_list, cl);
mei_cl_read_cb_flush(cl, fp);
@@ -587,27 +587,23 @@ struct mei_cl *mei_cl_allocate(struct mei_device *dev)
* mei_cl_link - allocate host id in the host map
*
* @cl: host client
- * @id: fixed host id or MEI_HOST_CLIENT_ID_ANY (-1) for generic one
*
* Return: 0 on success
* -EINVAL on incorrect values
* -EMFILE if open count exceeded.
*/
-int mei_cl_link(struct mei_cl *cl, int id)
+int mei_cl_link(struct mei_cl *cl)
{
struct mei_device *dev;
long open_handle_count;
+ int id;
if (WARN_ON(!cl || !cl->dev))
return -EINVAL;
dev = cl->dev;
- /* If Id is not assigned get one*/
- if (id == MEI_HOST_CLIENT_ID_ANY)
- id = find_first_zero_bit(dev->host_clients_map,
- MEI_CLIENTS_MAX);
-
+ id = find_first_zero_bit(dev->host_clients_map, MEI_CLIENTS_MAX);
if (id >= MEI_CLIENTS_MAX) {
dev_err(dev->dev, "id exceeded %d", MEI_CLIENTS_MAX);
return -EMFILE;
@@ -648,7 +644,7 @@ int mei_cl_unlink(struct mei_cl *cl)
if (!cl)
return 0;
- /* wd and amthif might not be initialized */
+ /* amthif might not be initialized */
if (!cl->dev)
return 0;
@@ -670,31 +666,12 @@ int mei_cl_unlink(struct mei_cl *cl)
return 0;
}
-
-void mei_host_client_init(struct work_struct *work)
+void mei_host_client_init(struct mei_device *dev)
{
- struct mei_device *dev =
- container_of(work, struct mei_device, init_work);
- struct mei_me_client *me_cl;
-
- mutex_lock(&dev->device_lock);
-
-
- me_cl = mei_me_cl_by_uuid(dev, &mei_amthif_guid);
- if (me_cl)
- mei_amthif_host_init(dev, me_cl);
- mei_me_cl_put(me_cl);
-
- me_cl = mei_me_cl_by_uuid(dev, &mei_wd_guid);
- if (me_cl)
- mei_wd_host_init(dev, me_cl);
- mei_me_cl_put(me_cl);
-
dev->dev_state = MEI_DEV_ENABLED;
dev->reset_count = 0;
- mutex_unlock(&dev->device_lock);
- mei_cl_bus_rescan(dev);
+ schedule_work(&dev->bus_rescan_work);
pm_runtime_mark_last_busy(dev->dev);
dev_dbg(dev->dev, "rpm: autosuspend\n");
@@ -726,6 +703,33 @@ bool mei_hbuf_acquire(struct mei_device *dev)
}
/**
+ * mei_cl_wake_all - wake up readers, writers and event waiters so
+ * they can be interrupted
+ *
+ * @cl: host client
+ */
+static void mei_cl_wake_all(struct mei_cl *cl)
+{
+ struct mei_device *dev = cl->dev;
+
+ /* synchronized under device mutex */
+ if (waitqueue_active(&cl->rx_wait)) {
+ cl_dbg(dev, cl, "Waking up reading client!\n");
+ wake_up_interruptible(&cl->rx_wait);
+ }
+ /* synchronized under device mutex */
+ if (waitqueue_active(&cl->tx_wait)) {
+ cl_dbg(dev, cl, "Waking up writing client!\n");
+ wake_up_interruptible(&cl->tx_wait);
+ }
+ /* synchronized under device mutex */
+ if (waitqueue_active(&cl->ev_wait)) {
+ cl_dbg(dev, cl, "Waking up waiting for event clients!\n");
+ wake_up_interruptible(&cl->ev_wait);
+ }
+}
+
+/**
* mei_cl_set_disconnected - set disconnected state and clear
* associated states and resources
*
@@ -740,8 +744,11 @@ void mei_cl_set_disconnected(struct mei_cl *cl)
return;
cl->state = MEI_FILE_DISCONNECTED;
+ mei_io_list_free(&dev->write_list, cl);
+ mei_io_list_free(&dev->write_waiting_list, cl);
mei_io_list_flush(&dev->ctrl_rd_list, cl);
mei_io_list_flush(&dev->ctrl_wr_list, cl);
+ mei_cl_wake_all(cl);
cl->mei_flow_ctrl_creds = 0;
cl->timer_count = 0;
@@ -1034,7 +1041,7 @@ int mei_cl_irq_connect(struct mei_cl *cl, struct mei_cl_cb *cb,
* Return: 0 on success, <0 on failure.
*/
int mei_cl_connect(struct mei_cl *cl, struct mei_me_client *me_cl,
- struct file *file)
+ const struct file *file)
{
struct mei_device *dev;
struct mei_cl_cb *cb;
@@ -1119,11 +1126,10 @@ nortpm:
* mei_cl_alloc_linked - allocate and link host client
*
* @dev: the device structure
- * @id: fixed host id or MEI_HOST_CLIENT_ID_ANY (-1) for generic one
*
* Return: cl on success ERR_PTR on failure
*/
-struct mei_cl *mei_cl_alloc_linked(struct mei_device *dev, int id)
+struct mei_cl *mei_cl_alloc_linked(struct mei_device *dev)
{
struct mei_cl *cl;
int ret;
@@ -1134,7 +1140,7 @@ struct mei_cl *mei_cl_alloc_linked(struct mei_device *dev, int id)
goto err;
}
- ret = mei_cl_link(cl, id);
+ ret = mei_cl_link(cl);
if (ret)
goto err;
@@ -1149,11 +1155,12 @@ err:
/**
* mei_cl_flow_ctrl_creds - checks flow_control credits for cl.
*
- * @cl: private data of the file object
+ * @cl: host client
+ * @fp: the file pointer associated with the pointer
*
* Return: 1 if mei_flow_ctrl_creds >0, 0 - otherwise.
*/
-int mei_cl_flow_ctrl_creds(struct mei_cl *cl)
+static int mei_cl_flow_ctrl_creds(struct mei_cl *cl, const struct file *fp)
{
int rets;
@@ -1164,7 +1171,7 @@ int mei_cl_flow_ctrl_creds(struct mei_cl *cl)
return 1;
if (mei_cl_is_fixed_address(cl)) {
- rets = mei_cl_read_start(cl, mei_cl_mtu(cl), NULL);
+ rets = mei_cl_read_start(cl, mei_cl_mtu(cl), fp);
if (rets && rets != -EBUSY)
return rets;
return 1;
@@ -1186,7 +1193,7 @@ int mei_cl_flow_ctrl_creds(struct mei_cl *cl)
* 0 on success
* -EINVAL when ctrl credits are <= 0
*/
-int mei_cl_flow_ctrl_reduce(struct mei_cl *cl)
+static int mei_cl_flow_ctrl_reduce(struct mei_cl *cl)
{
if (WARN_ON(!cl || !cl->me_cl))
return -EINVAL;
@@ -1283,7 +1290,8 @@ int mei_cl_irq_notify(struct mei_cl *cl, struct mei_cl_cb *cb,
*
* Return: 0 on such and error otherwise.
*/
-int mei_cl_notify_request(struct mei_cl *cl, struct file *file, u8 request)
+int mei_cl_notify_request(struct mei_cl *cl,
+ const struct file *file, u8 request)
{
struct mei_device *dev;
struct mei_cl_cb *cb;
@@ -1368,12 +1376,12 @@ void mei_cl_notify(struct mei_cl *cl)
cl_dbg(dev, cl, "notify event");
cl->notify_ev = true;
- wake_up_interruptible_all(&cl->ev_wait);
+ if (!mei_cl_bus_notify_event(cl))
+ wake_up_interruptible(&cl->ev_wait);
if (cl->ev_async)
kill_fasync(&cl->ev_async, SIGIO, POLL_PRI);
- mei_cl_bus_notify_event(cl);
}
/**
@@ -1422,6 +1430,25 @@ out:
}
/**
+ * mei_cl_is_read_fc_cb - check if read cb is waiting for flow control
+ * for given host client
+ *
+ * @cl: host client
+ *
+ * Return: true, if found at least one cb.
+ */
+static bool mei_cl_is_read_fc_cb(struct mei_cl *cl)
+{
+ struct mei_device *dev = cl->dev;
+ struct mei_cl_cb *cb;
+
+ list_for_each_entry(cb, &dev->ctrl_wr_list.list, list)
+ if (cb->fop_type == MEI_FOP_READ && cb->cl == cl)
+ return true;
+ return false;
+}
+
+/**
* mei_cl_read_start - the start read client message function.
*
* @cl: host client
@@ -1430,7 +1457,7 @@ out:
*
* Return: 0 on success, <0 on failure.
*/
-int mei_cl_read_start(struct mei_cl *cl, size_t length, struct file *fp)
+int mei_cl_read_start(struct mei_cl *cl, size_t length, const struct file *fp)
{
struct mei_device *dev;
struct mei_cl_cb *cb;
@@ -1445,7 +1472,7 @@ int mei_cl_read_start(struct mei_cl *cl, size_t length, struct file *fp)
return -ENODEV;
/* HW currently supports only one pending read */
- if (!list_empty(&cl->rd_pending))
+ if (!list_empty(&cl->rd_pending) || mei_cl_is_read_fc_cb(cl))
return -EBUSY;
if (!mei_me_cl_is_active(cl->me_cl)) {
@@ -1524,7 +1551,7 @@ int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb,
first_chunk = cb->buf_idx == 0;
- rets = first_chunk ? mei_cl_flow_ctrl_creds(cl) : 1;
+ rets = first_chunk ? mei_cl_flow_ctrl_creds(cl, cb->fp) : 1;
if (rets < 0)
return rets;
@@ -1556,7 +1583,7 @@ int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb,
return 0;
}
- cl_dbg(dev, cl, "buf: size = %d idx = %lu\n",
+ cl_dbg(dev, cl, "buf: size = %zu idx = %zu\n",
cb->buf.size, cb->buf_idx);
rets = mei_write_message(dev, &mei_hdr, buf->data + cb->buf_idx);
@@ -1618,7 +1645,7 @@ int mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb, bool blocking)
if (rets < 0 && rets != -EINPROGRESS) {
pm_runtime_put_noidle(dev->dev);
cl_err(dev, cl, "rpm: get failed %d\n", rets);
- return rets;
+ goto free;
}
cb->buf_idx = 0;
@@ -1630,7 +1657,7 @@ int mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb, bool blocking)
mei_hdr.msg_complete = 0;
mei_hdr.internal = cb->internal;
- rets = mei_cl_flow_ctrl_creds(cl);
+ rets = mei_cl_flow_ctrl_creds(cl, cb->fp);
if (rets < 0)
goto err;
@@ -1677,7 +1704,8 @@ out:
mutex_unlock(&dev->device_lock);
rets = wait_event_interruptible(cl->tx_wait,
- cl->writing_state == MEI_WRITE_COMPLETE);
+ cl->writing_state == MEI_WRITE_COMPLETE ||
+ (!mei_cl_is_connected(cl)));
mutex_lock(&dev->device_lock);
/* wait_event_interruptible returns -ERESTARTSYS */
if (rets) {
@@ -1685,6 +1713,10 @@ out:
rets = -EINTR;
goto err;
}
+ if (cl->writing_state != MEI_WRITE_COMPLETE) {
+ rets = -EFAULT;
+ goto err;
+ }
}
rets = size;
@@ -1692,6 +1724,8 @@ err:
cl_dbg(dev, cl, "rpm: autosuspend\n");
pm_runtime_mark_last_busy(dev->dev);
pm_runtime_put_autosuspend(dev->dev);
+free:
+ mei_io_cb_free(cb);
return rets;
}
@@ -1721,10 +1755,8 @@ void mei_cl_complete(struct mei_cl *cl, struct mei_cl_cb *cb)
case MEI_FOP_READ:
list_add_tail(&cb->list, &cl->rd_completed);
- if (waitqueue_active(&cl->rx_wait))
- wake_up_interruptible_all(&cl->rx_wait);
- else
- mei_cl_bus_rx_event(cl);
+ if (!mei_cl_bus_rx_event(cl))
+ wake_up_interruptible(&cl->rx_wait);
break;
case MEI_FOP_CONNECT:
@@ -1753,44 +1785,3 @@ void mei_cl_all_disconnect(struct mei_device *dev)
list_for_each_entry(cl, &dev->file_list, link)
mei_cl_set_disconnected(cl);
}
-
-
-/**
- * mei_cl_all_wakeup - wake up all readers and writers they can be interrupted
- *
- * @dev: mei device
- */
-void mei_cl_all_wakeup(struct mei_device *dev)
-{
- struct mei_cl *cl;
-
- list_for_each_entry(cl, &dev->file_list, link) {
- if (waitqueue_active(&cl->rx_wait)) {
- cl_dbg(dev, cl, "Waking up reading client!\n");
- wake_up_interruptible(&cl->rx_wait);
- }
- if (waitqueue_active(&cl->tx_wait)) {
- cl_dbg(dev, cl, "Waking up writing client!\n");
- wake_up_interruptible(&cl->tx_wait);
- }
-
- /* synchronized under device mutex */
- if (waitqueue_active(&cl->ev_wait)) {
- cl_dbg(dev, cl, "Waking up waiting for event clients!\n");
- wake_up_interruptible(&cl->ev_wait);
- }
- }
-}
-
-/**
- * mei_cl_all_write_clear - clear all pending writes
- *
- * @dev: mei device
- */
-void mei_cl_all_write_clear(struct mei_device *dev)
-{
- mei_io_list_free(&dev->write_list, NULL);
- mei_io_list_free(&dev->write_waiting_list, NULL);
-}
-
-
diff --git a/drivers/misc/mei/client.h b/drivers/misc/mei/client.h
index 04e1aa39243f..0d7a3a1fef78 100644
--- a/drivers/misc/mei/client.h
+++ b/drivers/misc/mei/client.h
@@ -18,7 +18,6 @@
#define _MEI_CLIENT_H_
#include <linux/types.h>
-#include <linux/watchdog.h>
#include <linux/poll.h>
#include <linux/mei.h>
@@ -84,7 +83,7 @@ static inline u8 mei_me_cl_ver(const struct mei_me_client *me_cl)
* MEI IO Functions
*/
struct mei_cl_cb *mei_io_cb_init(struct mei_cl *cl, enum mei_cb_file_ops type,
- struct file *fp);
+ const struct file *fp);
void mei_io_cb_free(struct mei_cl_cb *priv_cb);
int mei_io_cb_alloc_buf(struct mei_cl_cb *cb, size_t length);
@@ -108,21 +107,19 @@ struct mei_cl *mei_cl_allocate(struct mei_device *dev);
void mei_cl_init(struct mei_cl *cl, struct mei_device *dev);
-int mei_cl_link(struct mei_cl *cl, int id);
+int mei_cl_link(struct mei_cl *cl);
int mei_cl_unlink(struct mei_cl *cl);
-struct mei_cl *mei_cl_alloc_linked(struct mei_device *dev, int id);
+struct mei_cl *mei_cl_alloc_linked(struct mei_device *dev);
struct mei_cl_cb *mei_cl_read_cb(const struct mei_cl *cl,
const struct file *fp);
void mei_cl_read_cb_flush(const struct mei_cl *cl, const struct file *fp);
struct mei_cl_cb *mei_cl_alloc_cb(struct mei_cl *cl, size_t length,
- enum mei_cb_file_ops type, struct file *fp);
+ enum mei_cb_file_ops type,
+ const struct file *fp);
int mei_cl_flush_queues(struct mei_cl *cl, const struct file *fp);
-int mei_cl_flow_ctrl_creds(struct mei_cl *cl);
-
-int mei_cl_flow_ctrl_reduce(struct mei_cl *cl);
/*
* MEI input output function prototype
*/
@@ -217,10 +214,10 @@ void mei_cl_set_disconnected(struct mei_cl *cl);
int mei_cl_irq_disconnect(struct mei_cl *cl, struct mei_cl_cb *cb,
struct mei_cl_cb *cmpl_list);
int mei_cl_connect(struct mei_cl *cl, struct mei_me_client *me_cl,
- struct file *file);
+ const struct file *file);
int mei_cl_irq_connect(struct mei_cl *cl, struct mei_cl_cb *cb,
struct mei_cl_cb *cmpl_list);
-int mei_cl_read_start(struct mei_cl *cl, size_t length, struct file *fp);
+int mei_cl_read_start(struct mei_cl *cl, size_t length, const struct file *fp);
int mei_cl_irq_read_msg(struct mei_cl *cl, struct mei_msg_hdr *hdr,
struct mei_cl_cb *cmpl_list);
int mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb, bool blocking);
@@ -229,19 +226,18 @@ int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb,
void mei_cl_complete(struct mei_cl *cl, struct mei_cl_cb *cb);
-void mei_host_client_init(struct work_struct *work);
+void mei_host_client_init(struct mei_device *dev);
u8 mei_cl_notify_fop2req(enum mei_cb_file_ops fop);
enum mei_cb_file_ops mei_cl_notify_req2fop(u8 request);
-int mei_cl_notify_request(struct mei_cl *cl, struct file *file, u8 request);
+int mei_cl_notify_request(struct mei_cl *cl,
+ const struct file *file, u8 request);
int mei_cl_irq_notify(struct mei_cl *cl, struct mei_cl_cb *cb,
struct mei_cl_cb *cmpl_list);
int mei_cl_notify_get(struct mei_cl *cl, bool block, bool *notify_ev);
void mei_cl_notify(struct mei_cl *cl);
void mei_cl_all_disconnect(struct mei_device *dev);
-void mei_cl_all_wakeup(struct mei_device *dev);
-void mei_cl_all_write_clear(struct mei_device *dev);
#define MEI_CL_FMT "cl:host=%02d me=%02d "
#define MEI_CL_PRM(cl) (cl)->host_client_id, mei_cl_me_id(cl)
@@ -249,6 +245,9 @@ void mei_cl_all_write_clear(struct mei_device *dev);
#define cl_dbg(dev, cl, format, arg...) \
dev_dbg((dev)->dev, MEI_CL_FMT format, MEI_CL_PRM(cl), ##arg)
+#define cl_warn(dev, cl, format, arg...) \
+ dev_warn((dev)->dev, MEI_CL_FMT format, MEI_CL_PRM(cl), ##arg)
+
#define cl_err(dev, cl, format, arg...) \
dev_err((dev)->dev, MEI_CL_FMT format, MEI_CL_PRM(cl), ##arg)
diff --git a/drivers/misc/mei/debugfs.c b/drivers/misc/mei/debugfs.c
index a138d8a27ab5..c6c051b52f55 100644
--- a/drivers/misc/mei/debugfs.c
+++ b/drivers/misc/mei/debugfs.c
@@ -50,6 +50,7 @@ static ssize_t mei_dbgfs_read_meclients(struct file *fp, char __user *ubuf,
}
pos += scnprintf(buf + pos, bufsz - pos, HDR);
+#undef HDR
/* if the driver is not enabled the list won't be consistent */
if (dev->dev_state != MEI_DEV_ENABLED)
@@ -90,23 +91,37 @@ static ssize_t mei_dbgfs_read_active(struct file *fp, char __user *ubuf,
{
struct mei_device *dev = fp->private_data;
struct mei_cl *cl;
- const size_t bufsz = 1024;
+ size_t bufsz = 1;
char *buf;
int i = 0;
int pos = 0;
int ret;
+#define HDR " |me|host|state|rd|wr|\n"
+
if (!dev)
return -ENODEV;
+ mutex_lock(&dev->device_lock);
+
+ /*
+ * if the driver is not enabled the list won't be consistent,
+ * we output empty table
+ */
+ if (dev->dev_state == MEI_DEV_ENABLED)
+ list_for_each_entry(cl, &dev->file_list, link)
+ bufsz++;
+
+ bufsz *= sizeof(HDR) + 1;
+
buf = kzalloc(bufsz, GFP_KERNEL);
- if (!buf)
+ if (!buf) {
+ mutex_unlock(&dev->device_lock);
return -ENOMEM;
+ }
- pos += scnprintf(buf + pos, bufsz - pos,
- " |me|host|state|rd|wr|\n");
-
- mutex_lock(&dev->device_lock);
+ pos += scnprintf(buf + pos, bufsz - pos, HDR);
+#undef HDR
/* if the driver is not enabled the list won't be consistent */
if (dev->dev_state != MEI_DEV_ENABLED)
@@ -115,7 +130,7 @@ static ssize_t mei_dbgfs_read_active(struct file *fp, char __user *ubuf,
list_for_each_entry(cl, &dev->file_list, link) {
pos += scnprintf(buf + pos, bufsz - pos,
- "%2d|%2d|%4d|%5d|%2d|%2d|\n",
+ "%3d|%2d|%4d|%5d|%2d|%2d|\n",
i, mei_cl_me_id(cl), cl->host_client_id, cl->state,
!list_empty(&cl->rd_completed), cl->writing_state);
i++;
@@ -150,16 +165,21 @@ static ssize_t mei_dbgfs_read_devstate(struct file *fp, char __user *ubuf,
pos += scnprintf(buf + pos, bufsz - pos, "hbm: %s\n",
mei_hbm_state_str(dev->hbm_state));
- if (dev->hbm_state == MEI_HBM_STARTED) {
+ if (dev->hbm_state >= MEI_HBM_ENUM_CLIENTS &&
+ dev->hbm_state <= MEI_HBM_STARTED) {
pos += scnprintf(buf + pos, bufsz - pos, "hbm features:\n");
pos += scnprintf(buf + pos, bufsz - pos, "\tPG: %01d\n",
dev->hbm_f_pg_supported);
pos += scnprintf(buf + pos, bufsz - pos, "\tDC: %01d\n",
dev->hbm_f_dc_supported);
+ pos += scnprintf(buf + pos, bufsz - pos, "\tIE: %01d\n",
+ dev->hbm_f_ie_supported);
pos += scnprintf(buf + pos, bufsz - pos, "\tDOT: %01d\n",
dev->hbm_f_dot_supported);
pos += scnprintf(buf + pos, bufsz - pos, "\tEV: %01d\n",
dev->hbm_f_ev_supported);
+ pos += scnprintf(buf + pos, bufsz - pos, "\tFA: %01d\n",
+ dev->hbm_f_fa_supported);
}
pos += scnprintf(buf + pos, bufsz - pos, "pg: %s, %s\n",
@@ -175,6 +195,30 @@ static const struct file_operations mei_dbgfs_fops_devstate = {
.llseek = generic_file_llseek,
};
+static ssize_t mei_dbgfs_write_allow_fa(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct mei_device *dev;
+ int ret;
+
+ dev = container_of(file->private_data,
+ struct mei_device, allow_fixed_address);
+
+ ret = debugfs_write_file_bool(file, user_buf, count, ppos);
+ if (ret < 0)
+ return ret;
+ dev->override_fixed_address = true;
+ return ret;
+}
+
+static const struct file_operations mei_dbgfs_fops_allow_fa = {
+ .open = simple_open,
+ .read = debugfs_read_file_bool,
+ .write = mei_dbgfs_write_allow_fa,
+ .llseek = generic_file_llseek,
+};
+
/**
* mei_dbgfs_deregister - Remove the debugfs files and directories
*
@@ -224,8 +268,9 @@ int mei_dbgfs_register(struct mei_device *dev, const char *name)
dev_err(dev->dev, "devstate: registration failed\n");
goto err;
}
- f = debugfs_create_bool("allow_fixed_address", S_IRUSR | S_IWUSR, dir,
- &dev->allow_fixed_address);
+ f = debugfs_create_file("allow_fixed_address", S_IRUSR | S_IWUSR, dir,
+ &dev->allow_fixed_address,
+ &mei_dbgfs_fops_allow_fa);
if (!f) {
dev_err(dev->dev, "allow_fixed_address: registration failed\n");
goto err;
diff --git a/drivers/misc/mei/hbm.c b/drivers/misc/mei/hbm.c
index e7b7aad0999b..5e305d2605f3 100644
--- a/drivers/misc/mei/hbm.c
+++ b/drivers/misc/mei/hbm.c
@@ -301,7 +301,10 @@ static int mei_hbm_enum_clients_req(struct mei_device *dev)
enum_req = (struct hbm_host_enum_request *)dev->wr_msg.data;
memset(enum_req, 0, len);
enum_req->hbm_cmd = HOST_ENUM_REQ_CMD;
- enum_req->allow_add = dev->hbm_f_dc_supported;
+ enum_req->flags |= dev->hbm_f_dc_supported ?
+ MEI_HBM_ENUM_F_ALLOW_ADD : 0;
+ enum_req->flags |= dev->hbm_f_ie_supported ?
+ MEI_HBM_ENUM_F_IMMEDIATE_ENUM : 0;
ret = mei_write_message(dev, mei_hdr, dev->wr_msg.data);
if (ret) {
@@ -401,6 +404,9 @@ static int mei_hbm_fw_add_cl_req(struct mei_device *dev,
if (ret)
status = !MEI_HBMS_SUCCESS;
+ if (dev->dev_state == MEI_DEV_ENABLED)
+ schedule_work(&dev->bus_rescan_work);
+
return mei_hbm_add_cl_resp(dev, req->me_addr, status);
}
@@ -543,7 +549,7 @@ static int mei_hbm_prop_req(struct mei_device *dev)
/* We got all client properties */
if (next_client_index == MEI_CLIENTS_MAX) {
dev->hbm_state = MEI_HBM_STARTED;
- schedule_work(&dev->init_work);
+ mei_host_client_init(dev);
return 0;
}
@@ -789,8 +795,11 @@ static void mei_hbm_cl_connect_res(struct mei_device *dev, struct mei_cl *cl,
cl->state = MEI_FILE_CONNECTED;
else {
cl->state = MEI_FILE_DISCONNECT_REPLY;
- if (rs->status == MEI_CL_CONN_NOT_FOUND)
+ if (rs->status == MEI_CL_CONN_NOT_FOUND) {
mei_me_cl_del(dev, cl->me_cl);
+ if (dev->dev_state == MEI_DEV_ENABLED)
+ schedule_work(&dev->bus_rescan_work);
+ }
}
cl->status = mei_cl_conn_status_to_errno(rs->status);
}
@@ -866,7 +875,7 @@ static int mei_hbm_fw_disconnect_req(struct mei_device *dev,
cl = mei_hbm_cl_find_by_cmd(dev, disconnect_req);
if (cl) {
- cl_dbg(dev, cl, "fw disconnect request received\n");
+ cl_warn(dev, cl, "fw disconnect request received\n");
cl->state = MEI_FILE_DISCONNECTING;
cl->timer_count = 0;
@@ -972,6 +981,9 @@ static void mei_hbm_config_features(struct mei_device *dev)
if (dev->version.major_version >= HBM_MAJOR_VERSION_DC)
dev->hbm_f_dc_supported = 1;
+ if (dev->version.major_version >= HBM_MAJOR_VERSION_IE)
+ dev->hbm_f_ie_supported = 1;
+
/* disconnect on connect timeout instead of link reset */
if (dev->version.major_version >= HBM_MAJOR_VERSION_DOT)
dev->hbm_f_dot_supported = 1;
@@ -979,6 +991,10 @@ static void mei_hbm_config_features(struct mei_device *dev)
/* Notification Event Support */
if (dev->version.major_version >= HBM_MAJOR_VERSION_EV)
dev->hbm_f_ev_supported = 1;
+
+ /* Fixed Address Client Support */
+ if (dev->version.major_version >= HBM_MAJOR_VERSION_FA)
+ dev->hbm_f_fa_supported = 1;
}
/**
diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h
index a8a68acd3267..0dcb854b4bfc 100644
--- a/drivers/misc/mei/hw-me-regs.h
+++ b/drivers/misc/mei/hw-me-regs.h
@@ -121,6 +121,10 @@
#define MEI_DEV_ID_SPT_2 0x9D3B /* Sunrise Point 2 */
#define MEI_DEV_ID_SPT_H 0xA13A /* Sunrise Point H */
#define MEI_DEV_ID_SPT_H_2 0xA13B /* Sunrise Point H 2 */
+
+#define MEI_DEV_ID_BXT_M 0x1A9A /* Broxton M */
+#define MEI_DEV_ID_APL_I 0x5A9A /* Apollo Lake I */
+
/*
* MEI HW Section
*/
diff --git a/drivers/misc/mei/hw-me.c b/drivers/misc/mei/hw-me.c
index 25b1997a62cb..e2fb44cc5c37 100644
--- a/drivers/misc/mei/hw-me.c
+++ b/drivers/misc/mei/hw-me.c
@@ -189,8 +189,11 @@ static int mei_me_fw_status(struct mei_device *dev,
fw_status->count = fw_src->count;
for (i = 0; i < fw_src->count && i < MEI_FW_STATUS_MAX; i++) {
- ret = pci_read_config_dword(pdev,
- fw_src->status[i], &fw_status->status[i]);
+ ret = pci_read_config_dword(pdev, fw_src->status[i],
+ &fw_status->status[i]);
+ trace_mei_pci_cfg_read(dev->dev, "PCI_CFG_HSF_X",
+ fw_src->status[i],
+ fw_status->status[i]);
if (ret)
return ret;
}
@@ -215,6 +218,7 @@ static void mei_me_hw_config(struct mei_device *dev)
reg = 0;
pci_read_config_dword(pdev, PCI_CFG_HFS_1, &reg);
+ trace_mei_pci_cfg_read(dev->dev, "PCI_CFG_HFS_1", PCI_CFG_HFS_1, reg);
hw->d0i3_supported =
((reg & PCI_CFG_HFS_1_D0I3_MSK) == PCI_CFG_HFS_1_D0I3_MSK);
@@ -1248,6 +1252,7 @@ static bool mei_me_fw_type_nm(struct pci_dev *pdev)
u32 reg;
pci_read_config_dword(pdev, PCI_CFG_HFS_2, &reg);
+ trace_mei_pci_cfg_read(&pdev->dev, "PCI_CFG_HFS_2", PCI_CFG_HFS_2, reg);
/* make sure that bit 9 (NM) is up and bit 10 (DM) is down */
return (reg & 0x600) == 0x200;
}
@@ -1260,6 +1265,7 @@ static bool mei_me_fw_type_sps(struct pci_dev *pdev)
u32 reg;
/* Read ME FW Status check for SPS Firmware */
pci_read_config_dword(pdev, PCI_CFG_HFS_1, &reg);
+ trace_mei_pci_cfg_read(&pdev->dev, "PCI_CFG_HFS_1", PCI_CFG_HFS_1, reg);
/* if bits [19:16] = 15, running SPS Firmware */
return (reg & 0xf0000) == 0xf0000;
}
diff --git a/drivers/misc/mei/hw-txe.c b/drivers/misc/mei/hw-txe.c
index bae680c648ff..4a6c1b85f11e 100644
--- a/drivers/misc/mei/hw-txe.c
+++ b/drivers/misc/mei/hw-txe.c
@@ -28,6 +28,9 @@
#include "client.h"
#include "hbm.h"
+#include "mei-trace.h"
+
+
/**
* mei_txe_reg_read - Reads 32bit data from the txe device
*
@@ -640,8 +643,11 @@ static int mei_txe_fw_status(struct mei_device *dev,
fw_status->count = fw_src->count;
for (i = 0; i < fw_src->count && i < MEI_FW_STATUS_MAX; i++) {
- ret = pci_read_config_dword(pdev,
- fw_src->status[i], &fw_status->status[i]);
+ ret = pci_read_config_dword(pdev, fw_src->status[i],
+ &fw_status->status[i]);
+ trace_mei_pci_cfg_read(dev->dev, "PCI_CFG_HSF_X",
+ fw_src->status[i],
+ fw_status->status[i]);
if (ret)
return ret;
}
diff --git a/drivers/misc/mei/hw.h b/drivers/misc/mei/hw.h
index 4cebde85924f..9daf3f9aed25 100644
--- a/drivers/misc/mei/hw.h
+++ b/drivers/misc/mei/hw.h
@@ -29,7 +29,6 @@
#define MEI_CLIENTS_INIT_TIMEOUT 15 /* HPS: Clients Enumeration Timeout */
#define MEI_IAMTHIF_STALL_TIMER 12 /* HPS */
-#define MEI_IAMTHIF_READ_TIMER 10 /* HPS */
#define MEI_PGI_TIMEOUT 1 /* PG Isolation time response 1 sec */
#define MEI_D0I3_TIMEOUT 5 /* D0i3 set/unset max response time */
@@ -54,6 +53,12 @@
#define HBM_MAJOR_VERSION_DC 2
/*
+ * MEI version with immediate reply to enum request support
+ */
+#define HBM_MINOR_VERSION_IE 0
+#define HBM_MAJOR_VERSION_IE 2
+
+/*
* MEI version with disconnect on connection timeout support
*/
#define HBM_MINOR_VERSION_DOT 0
@@ -65,6 +70,12 @@
#define HBM_MINOR_VERSION_EV 0
#define HBM_MAJOR_VERSION_EV 2
+/*
+ * MEI version with fixed address client support
+ */
+#define HBM_MINOR_VERSION_FA 0
+#define HBM_MAJOR_VERSION_FA 2
+
/* Host bus message command opcode */
#define MEI_HBM_CMD_OP_MSK 0x7f
/* Host bus message command RESPONSE */
@@ -241,15 +252,26 @@ struct hbm_me_stop_request {
} __packed;
/**
- * struct hbm_host_enum_request - enumeration request from host to fw
+ * enum hbm_host_enum_flags - enumeration request flags (HBM version >= 2.0)
*
- * @hbm_cmd: bus message command header
- * @allow_add: allow dynamic clients add HBM version >= 2.0
+ * @MEI_HBM_ENUM_F_ALLOW_ADD: allow dynamic clients add
+ * @MEI_HBM_ENUM_F_IMMEDIATE_ENUM: allow FW to send answer immediately
+ */
+enum hbm_host_enum_flags {
+ MEI_HBM_ENUM_F_ALLOW_ADD = BIT(0),
+ MEI_HBM_ENUM_F_IMMEDIATE_ENUM = BIT(1),
+};
+
+/**
+ * struct hbm_host_enum_request - enumeration request from host to fw
+ *
+ * @hbm_cmd : bus message command header
+ * @flags : request flags
* @reserved: reserved
*/
struct hbm_host_enum_request {
u8 hbm_cmd;
- u8 allow_add;
+ u8 flags;
u8 reserved[2];
} __packed;
diff --git a/drivers/misc/mei/init.c b/drivers/misc/mei/init.c
index 3edafc8d3ad4..f7c8dfdb6a12 100644
--- a/drivers/misc/mei/init.c
+++ b/drivers/misc/mei/init.c
@@ -91,8 +91,8 @@ EXPORT_SYMBOL_GPL(mei_fw_status2str);
*/
void mei_cancel_work(struct mei_device *dev)
{
- cancel_work_sync(&dev->init_work);
cancel_work_sync(&dev->reset_work);
+ cancel_work_sync(&dev->bus_rescan_work);
cancel_delayed_work(&dev->timer_work);
}
@@ -148,16 +148,10 @@ int mei_reset(struct mei_device *dev)
state != MEI_DEV_POWER_UP) {
/* remove all waiting requests */
- mei_cl_all_write_clear(dev);
-
mei_cl_all_disconnect(dev);
- /* wake up all readers and writers so they can be interrupted */
- mei_cl_all_wakeup(dev);
-
/* remove entry if already in list */
- dev_dbg(dev->dev, "remove iamthif and wd from the file list.\n");
- mei_cl_unlink(&dev->wd_cl);
+ dev_dbg(dev->dev, "remove iamthif from the file list.\n");
mei_cl_unlink(&dev->iamthif_cl);
mei_amthif_reset_params(dev);
}
@@ -165,7 +159,6 @@ int mei_reset(struct mei_device *dev)
mei_hbm_reset(dev);
dev->rd_msg_hdr = 0;
- dev->wd_pending = false;
if (ret) {
dev_err(dev->dev, "hw_reset failed ret = %d\n", ret);
@@ -335,16 +328,12 @@ void mei_stop(struct mei_device *dev)
mutex_lock(&dev->device_lock);
- mei_wd_stop(dev);
-
dev->dev_state = MEI_DEV_POWER_DOWN;
mei_reset(dev);
/* move device to disabled state unconditionally */
dev->dev_state = MEI_DEV_DISABLED;
mutex_unlock(&dev->device_lock);
-
- mei_watchdog_unregister(dev);
}
EXPORT_SYMBOL_GPL(mei_stop);
@@ -394,7 +383,6 @@ void mei_device_init(struct mei_device *dev,
init_waitqueue_head(&dev->wait_hw_ready);
init_waitqueue_head(&dev->wait_pg);
init_waitqueue_head(&dev->wait_hbm_start);
- init_waitqueue_head(&dev->wait_stop_wd);
dev->dev_state = MEI_DEV_INITIALIZING;
dev->reset_count = 0;
@@ -404,13 +392,11 @@ void mei_device_init(struct mei_device *dev,
mei_io_list_init(&dev->ctrl_rd_list);
INIT_DELAYED_WORK(&dev->timer_work, mei_timer);
- INIT_WORK(&dev->init_work, mei_host_client_init);
INIT_WORK(&dev->reset_work, mei_reset_work);
+ INIT_WORK(&dev->bus_rescan_work, mei_cl_bus_rescan_work);
- INIT_LIST_HEAD(&dev->wd_cl.link);
INIT_LIST_HEAD(&dev->iamthif_cl.link);
mei_io_list_init(&dev->amthif_cmd_list);
- mei_io_list_init(&dev->amthif_rd_complete_list);
bitmap_zero(dev->host_clients_map, MEI_CLIENTS_MAX);
dev->open_handle_count = 0;
diff --git a/drivers/misc/mei/interrupt.c b/drivers/misc/mei/interrupt.c
index 64b568a0268d..1e5cb1f704f8 100644
--- a/drivers/misc/mei/interrupt.c
+++ b/drivers/misc/mei/interrupt.c
@@ -48,7 +48,7 @@ void mei_irq_compl_handler(struct mei_device *dev, struct mei_cl_cb *compl_list)
dev_dbg(dev->dev, "completing call back.\n");
if (cl == &dev->iamthif_cl)
- mei_amthif_complete(dev, cb);
+ mei_amthif_complete(cl, cb);
else
mei_cl_complete(cl, cb);
}
@@ -104,6 +104,7 @@ int mei_cl_irq_read_msg(struct mei_cl *cl,
struct mei_device *dev = cl->dev;
struct mei_cl_cb *cb;
unsigned char *buffer = NULL;
+ size_t buf_sz;
cb = list_first_entry_or_null(&cl->rd_pending, struct mei_cl_cb, list);
if (!cb) {
@@ -124,11 +125,21 @@ int mei_cl_irq_read_msg(struct mei_cl *cl,
goto out;
}
- if (cb->buf.size < mei_hdr->length + cb->buf_idx) {
- cl_dbg(dev, cl, "message overflow. size %d len %d idx %ld\n",
+ buf_sz = mei_hdr->length + cb->buf_idx;
+ /* catch for integer overflow */
+ if (buf_sz < cb->buf_idx) {
+ cl_err(dev, cl, "message is too big len %d idx %zu\n",
+ mei_hdr->length, cb->buf_idx);
+
+ list_move_tail(&cb->list, &complete_list->list);
+ cb->status = -EMSGSIZE;
+ goto out;
+ }
+
+ if (cb->buf.size < buf_sz) {
+ cl_dbg(dev, cl, "message overflow. size %zu len %d idx %zu\n",
cb->buf.size, mei_hdr->length, cb->buf_idx);
- buffer = krealloc(cb->buf.data, mei_hdr->length + cb->buf_idx,
- GFP_KERNEL);
+ buffer = krealloc(cb->buf.data, buf_sz, GFP_KERNEL);
if (!buffer) {
cb->status = -ENOMEM;
@@ -136,7 +147,7 @@ int mei_cl_irq_read_msg(struct mei_cl *cl,
goto out;
}
cb->buf.data = buffer;
- cb->buf.size = mei_hdr->length + cb->buf_idx;
+ cb->buf.size = buf_sz;
}
buffer = cb->buf.data + cb->buf_idx;
@@ -145,8 +156,7 @@ int mei_cl_irq_read_msg(struct mei_cl *cl,
cb->buf_idx += mei_hdr->length;
if (mei_hdr->msg_complete) {
- cb->read_time = jiffies;
- cl_dbg(dev, cl, "completed read length = %lu\n", cb->buf_idx);
+ cl_dbg(dev, cl, "completed read length = %zu\n", cb->buf_idx);
list_move_tail(&cb->list, &complete_list->list);
} else {
pm_runtime_mark_last_busy(dev->dev);
@@ -229,6 +239,16 @@ static int mei_cl_irq_read(struct mei_cl *cl, struct mei_cl_cb *cb,
return 0;
}
+static inline bool hdr_is_hbm(struct mei_msg_hdr *mei_hdr)
+{
+ return mei_hdr->host_addr == 0 && mei_hdr->me_addr == 0;
+}
+
+static inline bool hdr_is_fixed(struct mei_msg_hdr *mei_hdr)
+{
+ return mei_hdr->host_addr == 0 && mei_hdr->me_addr != 0;
+}
+
/**
* mei_irq_read_handler - bottom half read routine after ISR to
* handle the read processing.
@@ -270,7 +290,7 @@ int mei_irq_read_handler(struct mei_device *dev,
}
/* HBM message */
- if (mei_hdr->host_addr == 0 && mei_hdr->me_addr == 0) {
+ if (hdr_is_hbm(mei_hdr)) {
ret = mei_hbm_dispatch(dev, mei_hdr);
if (ret) {
dev_dbg(dev->dev, "mei_hbm_dispatch failed ret = %d\n",
@@ -290,6 +310,14 @@ int mei_irq_read_handler(struct mei_device *dev,
/* if no recipient cl was found we assume corrupted header */
if (&cl->link == &dev->file_list) {
+ /* A message for not connected fixed address clients
+ * should be silently discarded
+ */
+ if (hdr_is_fixed(mei_hdr)) {
+ mei_irq_discard_msg(dev, mei_hdr);
+ ret = 0;
+ goto reset_slots;
+ }
dev_err(dev->dev, "no destination client found 0x%08X\n",
dev->rd_msg_hdr);
ret = -EBADMSG;
@@ -360,21 +388,6 @@ int mei_irq_write_handler(struct mei_device *dev, struct mei_cl_cb *cmpl_list)
list_move_tail(&cb->list, &cmpl_list->list);
}
- if (dev->wd_state == MEI_WD_STOPPING) {
- dev->wd_state = MEI_WD_IDLE;
- wake_up(&dev->wait_stop_wd);
- }
-
- if (mei_cl_is_connected(&dev->wd_cl)) {
- if (dev->wd_pending &&
- mei_cl_flow_ctrl_creds(&dev->wd_cl) > 0) {
- ret = mei_wd_send(dev);
- if (ret)
- return ret;
- dev->wd_pending = false;
- }
- }
-
/* complete control write list CB */
dev_dbg(dev->dev, "complete control write list cb.\n");
list_for_each_entry_safe(cb, next, &dev->ctrl_wr_list.list, list) {
@@ -462,7 +475,6 @@ static void mei_connect_timeout(struct mei_cl *cl)
*/
void mei_timer(struct work_struct *work)
{
- unsigned long timeout;
struct mei_cl *cl;
struct mei_device *dev = container_of(work,
@@ -508,45 +520,15 @@ void mei_timer(struct work_struct *work)
mei_reset(dev);
dev->iamthif_canceled = false;
dev->iamthif_state = MEI_IAMTHIF_IDLE;
- dev->iamthif_timer = 0;
mei_io_cb_free(dev->iamthif_current_cb);
dev->iamthif_current_cb = NULL;
- dev->iamthif_file_object = NULL;
+ dev->iamthif_fp = NULL;
mei_amthif_run_next_cmd(dev);
}
}
- if (dev->iamthif_timer) {
-
- timeout = dev->iamthif_timer +
- mei_secs_to_jiffies(MEI_IAMTHIF_READ_TIMER);
-
- dev_dbg(dev->dev, "dev->iamthif_timer = %ld\n",
- dev->iamthif_timer);
- dev_dbg(dev->dev, "timeout = %ld\n", timeout);
- dev_dbg(dev->dev, "jiffies = %ld\n", jiffies);
- if (time_after(jiffies, timeout)) {
- /*
- * User didn't read the AMTHI data on time (15sec)
- * freeing AMTHI for other requests
- */
-
- dev_dbg(dev->dev, "freeing AMTHI for other requests\n");
-
- mei_io_list_flush(&dev->amthif_rd_complete_list,
- &dev->iamthif_cl);
- mei_io_cb_free(dev->iamthif_current_cb);
- dev->iamthif_current_cb = NULL;
-
- dev->iamthif_file_object->private_data = NULL;
- dev->iamthif_file_object = NULL;
- dev->iamthif_timer = 0;
- mei_amthif_run_next_cmd(dev);
-
- }
- }
out:
if (dev->dev_state != MEI_DEV_DISABLED)
schedule_delayed_work(&dev->timer_work, 2 * HZ);
diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c
index 80f9afcb1382..52635b063873 100644
--- a/drivers/misc/mei/main.c
+++ b/drivers/misc/mei/main.c
@@ -65,7 +65,7 @@ static int mei_open(struct inode *inode, struct file *file)
goto err_unlock;
}
- cl = mei_cl_alloc_linked(dev, MEI_HOST_CLIENT_ID_ANY);
+ cl = mei_cl_alloc_linked(dev);
if (IS_ERR(cl)) {
err = PTR_ERR(cl);
goto err_unlock;
@@ -159,27 +159,22 @@ static ssize_t mei_read(struct file *file, char __user *ubuf,
goto out;
}
+ if (ubuf == NULL) {
+ rets = -EMSGSIZE;
+ goto out;
+ }
+
if (cl == &dev->iamthif_cl) {
rets = mei_amthif_read(dev, file, ubuf, length, offset);
goto out;
}
cb = mei_cl_read_cb(cl, file);
- if (cb) {
- /* read what left */
- if (cb->buf_idx > *offset)
- goto copy_buffer;
- /* offset is beyond buf_idx we have no more data return 0 */
- if (cb->buf_idx > 0 && cb->buf_idx <= *offset) {
- rets = 0;
- goto free;
- }
- /* Offset needs to be cleaned for contiguous reads*/
- if (cb->buf_idx == 0 && *offset > 0)
- *offset = 0;
- } else if (*offset > 0) {
+ if (cb)
+ goto copy_buffer;
+
+ if (*offset > 0)
*offset = 0;
- }
err = mei_cl_read_start(cl, length, file);
if (err && err != -EBUSY) {
@@ -214,11 +209,6 @@ static ssize_t mei_read(struct file *file, char __user *ubuf,
cb = mei_cl_read_cb(cl, file);
if (!cb) {
- if (mei_cl_is_fixed_address(cl) && dev->allow_fixed_address) {
- cb = mei_cl_read_cb(cl, NULL);
- if (cb)
- goto copy_buffer;
- }
rets = 0;
goto out;
}
@@ -231,10 +221,10 @@ copy_buffer:
goto free;
}
- cl_dbg(dev, cl, "buf.size = %d buf.idx = %ld\n",
- cb->buf.size, cb->buf_idx);
- if (length == 0 || ubuf == NULL || *offset > cb->buf_idx) {
- rets = -EMSGSIZE;
+ cl_dbg(dev, cl, "buf.size = %zu buf.idx = %zu offset = %lld\n",
+ cb->buf.size, cb->buf_idx, *offset);
+ if (*offset >= cb->buf_idx) {
+ rets = 0;
goto free;
}
@@ -250,11 +240,13 @@ copy_buffer:
rets = length;
*offset += length;
- if ((unsigned long)*offset < cb->buf_idx)
+ /* not all data was read, keep the cb */
+ if (*offset < cb->buf_idx)
goto out;
free:
mei_io_cb_free(cb);
+ *offset = 0;
out:
cl_dbg(dev, cl, "end mei read rets = %d\n", rets);
@@ -275,9 +267,8 @@ static ssize_t mei_write(struct file *file, const char __user *ubuf,
size_t length, loff_t *offset)
{
struct mei_cl *cl = file->private_data;
- struct mei_cl_cb *write_cb = NULL;
+ struct mei_cl_cb *cb;
struct mei_device *dev;
- unsigned long timeout = 0;
int rets;
if (WARN_ON(!cl || !cl->dev))
@@ -313,52 +304,31 @@ static ssize_t mei_write(struct file *file, const char __user *ubuf,
goto out;
}
- if (cl == &dev->iamthif_cl) {
- write_cb = mei_amthif_find_read_list_entry(dev, file);
-
- if (write_cb) {
- timeout = write_cb->read_time +
- mei_secs_to_jiffies(MEI_IAMTHIF_READ_TIMER);
-
- if (time_after(jiffies, timeout)) {
- *offset = 0;
- mei_io_cb_free(write_cb);
- write_cb = NULL;
- }
- }
- }
-
*offset = 0;
- write_cb = mei_cl_alloc_cb(cl, length, MEI_FOP_WRITE, file);
- if (!write_cb) {
+ cb = mei_cl_alloc_cb(cl, length, MEI_FOP_WRITE, file);
+ if (!cb) {
rets = -ENOMEM;
goto out;
}
- rets = copy_from_user(write_cb->buf.data, ubuf, length);
+ rets = copy_from_user(cb->buf.data, ubuf, length);
if (rets) {
dev_dbg(dev->dev, "failed to copy data from userland\n");
rets = -EFAULT;
+ mei_io_cb_free(cb);
goto out;
}
if (cl == &dev->iamthif_cl) {
- rets = mei_amthif_write(cl, write_cb);
-
- if (rets) {
- dev_err(dev->dev,
- "amthif write failed with status = %d\n", rets);
- goto out;
- }
- mutex_unlock(&dev->device_lock);
- return length;
+ rets = mei_amthif_write(cl, cb);
+ if (!rets)
+ rets = length;
+ goto out;
}
- rets = mei_cl_write(cl, write_cb, false);
+ rets = mei_cl_write(cl, cb, false);
out:
mutex_unlock(&dev->device_lock);
- if (rets < 0)
- mei_io_cb_free(write_cb);
return rets;
}
@@ -393,12 +363,22 @@ static int mei_ioctl_connect_client(struct file *file,
/* find ME client we're trying to connect to */
me_cl = mei_me_cl_by_uuid(dev, &data->in_client_uuid);
- if (!me_cl ||
- (me_cl->props.fixed_address && !dev->allow_fixed_address)) {
+ if (!me_cl) {
dev_dbg(dev->dev, "Cannot connect to FW Client UUID = %pUl\n",
&data->in_client_uuid);
- mei_me_cl_put(me_cl);
- return -ENOTTY;
+ rets = -ENOTTY;
+ goto end;
+ }
+
+ if (me_cl->props.fixed_address) {
+ bool forbidden = dev->override_fixed_address ?
+ !dev->allow_fixed_address : !dev->hbm_f_fa_supported;
+ if (forbidden) {
+ dev_dbg(dev->dev, "Connection forbidden to FW Client UUID = %pUl\n",
+ &data->in_client_uuid);
+ rets = -ENOTTY;
+ goto end;
+ }
}
dev_dbg(dev->dev, "Connect to FW Client ID = %d\n",
@@ -454,7 +434,7 @@ end:
*
* Return: 0 on success , <0 on error
*/
-static int mei_ioctl_client_notify_request(struct file *file, u32 request)
+static int mei_ioctl_client_notify_request(const struct file *file, u32 request)
{
struct mei_cl *cl = file->private_data;
@@ -473,7 +453,7 @@ static int mei_ioctl_client_notify_request(struct file *file, u32 request)
*
* Return: 0 on success , <0 on error
*/
-static int mei_ioctl_client_notify_get(struct file *file, u32 *notify_get)
+static int mei_ioctl_client_notify_get(const struct file *file, u32 *notify_get)
{
struct mei_cl *cl = file->private_data;
bool notify_ev;
diff --git a/drivers/misc/mei/mei-trace.c b/drivers/misc/mei/mei-trace.c
index 388efb519138..e19e6acb191b 100644
--- a/drivers/misc/mei/mei-trace.c
+++ b/drivers/misc/mei/mei-trace.c
@@ -22,4 +22,6 @@
EXPORT_TRACEPOINT_SYMBOL(mei_reg_read);
EXPORT_TRACEPOINT_SYMBOL(mei_reg_write);
+EXPORT_TRACEPOINT_SYMBOL(mei_pci_cfg_read);
+EXPORT_TRACEPOINT_SYMBOL(mei_pci_cfg_write);
#endif /* __CHECKER__ */
diff --git a/drivers/misc/mei/mei-trace.h b/drivers/misc/mei/mei-trace.h
index 47e1bc6551d4..7d2d5d4a1624 100644
--- a/drivers/misc/mei/mei-trace.h
+++ b/drivers/misc/mei/mei-trace.h
@@ -60,7 +60,45 @@ TRACE_EVENT(mei_reg_write,
__entry->offs = offs;
__entry->val = val;
),
- TP_printk("[%s] write %s[%#x] = %#x)",
+ TP_printk("[%s] write %s[%#x] = %#x",
+ __get_str(dev), __entry->reg, __entry->offs, __entry->val)
+);
+
+TRACE_EVENT(mei_pci_cfg_read,
+ TP_PROTO(const struct device *dev, const char *reg, u32 offs, u32 val),
+ TP_ARGS(dev, reg, offs, val),
+ TP_STRUCT__entry(
+ __string(dev, dev_name(dev))
+ __field(const char *, reg)
+ __field(u32, offs)
+ __field(u32, val)
+ ),
+ TP_fast_assign(
+ __assign_str(dev, dev_name(dev))
+ __entry->reg = reg;
+ __entry->offs = offs;
+ __entry->val = val;
+ ),
+ TP_printk("[%s] pci cfg read %s:[%#x] = %#x",
+ __get_str(dev), __entry->reg, __entry->offs, __entry->val)
+);
+
+TRACE_EVENT(mei_pci_cfg_write,
+ TP_PROTO(const struct device *dev, const char *reg, u32 offs, u32 val),
+ TP_ARGS(dev, reg, offs, val),
+ TP_STRUCT__entry(
+ __string(dev, dev_name(dev))
+ __field(const char *, reg)
+ __field(u32, offs)
+ __field(u32, val)
+ ),
+ TP_fast_assign(
+ __assign_str(dev, dev_name(dev))
+ __entry->reg = reg;
+ __entry->offs = offs;
+ __entry->val = val;
+ ),
+ TP_printk("[%s] pci cfg write %s[%#x] = %#x",
__get_str(dev), __entry->reg, __entry->offs, __entry->val)
);
diff --git a/drivers/misc/mei/mei_dev.h b/drivers/misc/mei/mei_dev.h
index 4250555d5e72..db78e6d99456 100644
--- a/drivers/misc/mei/mei_dev.h
+++ b/drivers/misc/mei/mei_dev.h
@@ -18,7 +18,7 @@
#define _MEI_DEV_H_
#include <linux/types.h>
-#include <linux/watchdog.h>
+#include <linux/cdev.h>
#include <linux/poll.h>
#include <linux/mei.h>
#include <linux/mei_cl_bus.h>
@@ -26,33 +26,13 @@
#include "hw.h"
#include "hbm.h"
-/*
- * watch dog definition
- */
-#define MEI_WD_HDR_SIZE 4
-#define MEI_WD_STOP_MSG_SIZE MEI_WD_HDR_SIZE
-#define MEI_WD_START_MSG_SIZE (MEI_WD_HDR_SIZE + 16)
-
-#define MEI_WD_DEFAULT_TIMEOUT 120 /* seconds */
-#define MEI_WD_MIN_TIMEOUT 120 /* seconds */
-#define MEI_WD_MAX_TIMEOUT 65535 /* seconds */
-
-#define MEI_WD_STOP_TIMEOUT 10 /* msecs */
-
-#define MEI_WD_STATE_INDEPENDENCE_MSG_SENT (1 << 0)
-
-#define MEI_RD_MSG_BUF_SIZE (128 * sizeof(u32))
-
/*
* AMTHI Client UUID
*/
extern const uuid_le mei_amthif_guid;
-/*
- * Watchdog Client UUID
- */
-extern const uuid_le mei_wd_guid;
+#define MEI_RD_MSG_BUF_SIZE (128 * sizeof(u32))
/*
* Number of Maximum MEI Clients
@@ -73,15 +53,6 @@ extern const uuid_le mei_wd_guid;
*/
#define MEI_MAX_OPEN_HANDLE_COUNT (MEI_CLIENTS_MAX - 1)
-/*
- * Internal Clients Number
- */
-#define MEI_HOST_CLIENT_ID_ANY (-1)
-#define MEI_HBM_HOST_CLIENT_ID 0 /* not used, just for documentation */
-#define MEI_WD_HOST_CLIENT_ID 1
-#define MEI_IAMTHIF_HOST_CLIENT_ID 2
-
-
/* File state */
enum file_state {
MEI_FILE_INITIALIZING = 0,
@@ -123,12 +94,6 @@ enum mei_file_transaction_states {
MEI_READ_COMPLETE
};
-enum mei_wd_states {
- MEI_WD_IDLE,
- MEI_WD_RUNNING,
- MEI_WD_STOPPING,
-};
-
/**
* enum mei_cb_file_ops - file operation associated with the callback
* @MEI_FOP_READ: read
@@ -153,7 +118,7 @@ enum mei_cb_file_ops {
* Intel MEI message data struct
*/
struct mei_msg_data {
- u32 size;
+ size_t size;
unsigned char *data;
};
@@ -206,8 +171,7 @@ struct mei_cl;
* @fop_type: file operation type
* @buf: buffer for data associated with the callback
* @buf_idx: last read index
- * @read_time: last read operation time stamp (iamthif)
- * @file_object: pointer to file structure
+ * @fp: pointer to file structure
* @status: io status of the cb
* @internal: communication between driver and FW flag
* @completed: the transfer or reception has completed
@@ -217,9 +181,8 @@ struct mei_cl_cb {
struct mei_cl *cl;
enum mei_cb_file_ops fop_type;
struct mei_msg_data buf;
- unsigned long buf_idx;
- unsigned long read_time;
- struct file *file_object;
+ size_t buf_idx;
+ const struct file *fp;
int status;
u32 internal:1;
u32 completed:1;
@@ -341,12 +304,13 @@ struct mei_hw_ops {
/* MEI bus API*/
void mei_cl_bus_rescan(struct mei_device *bus);
+void mei_cl_bus_rescan_work(struct work_struct *work);
void mei_cl_bus_dev_fixup(struct mei_cl_device *dev);
ssize_t __mei_cl_send(struct mei_cl *cl, u8 *buf, size_t length,
bool blocking);
ssize_t __mei_cl_recv(struct mei_cl *cl, u8 *buf, size_t length);
-void mei_cl_bus_rx_event(struct mei_cl *cl);
-void mei_cl_bus_notify_event(struct mei_cl *cl);
+bool mei_cl_bus_rx_event(struct mei_cl *cl);
+bool mei_cl_bus_notify_event(struct mei_cl *cl);
void mei_cl_bus_remove_devices(struct mei_device *bus);
int mei_cl_bus_init(void);
void mei_cl_bus_exit(void);
@@ -404,7 +368,6 @@ const char *mei_pg_state_str(enum mei_pg_state state);
* @wait_hw_ready : wait queue for receive HW ready message form FW
* @wait_pg : wait queue for receive PG message from FW
* @wait_hbm_start : wait queue for receive HBM start message from FW
- * @wait_stop_wd : wait queue for receive WD stop message from FW
*
* @reset_count : number of consecutive resets
* @dev_state : device state
@@ -426,6 +389,8 @@ const char *mei_pg_state_str(enum mei_pg_state state);
* @hbm_f_dc_supported : hbm feature dynamic clients
* @hbm_f_dot_supported : hbm feature disconnect on timeout
* @hbm_f_ev_supported : hbm feature event notification
+ * @hbm_f_fa_supported : hbm feature fixed address client
+ * @hbm_f_ie_supported : hbm feature immediate reply to enum request
*
* @me_clients_rwsem: rw lock over me_clients list
* @me_clients : list of FW clients
@@ -434,26 +399,19 @@ const char *mei_pg_state_str(enum mei_pg_state state);
* @me_client_index : last FW client index in enumeration
*
* @allow_fixed_address: allow user space to connect a fixed client
- *
- * @wd_cl : watchdog client
- * @wd_state : watchdog client state
- * @wd_pending : watchdog command is pending
- * @wd_timeout : watchdog expiration timeout
- * @wd_data : watchdog message buffer
+ * @override_fixed_address: force allow fixed address behavior
*
* @amthif_cmd_list : amthif list for cmd waiting
- * @amthif_rd_complete_list : amthif list for reading completed cmd data
- * @iamthif_file_object : file for current amthif operation
+ * @iamthif_fp : file for current amthif operation
* @iamthif_cl : amthif host client
* @iamthif_current_cb : amthif current operation callback
* @iamthif_open_count : number of opened amthif connections
- * @iamthif_timer : time stamp of current amthif command completion
* @iamthif_stall_timer : timer to detect amthif hang
* @iamthif_state : amthif processor state
* @iamthif_canceled : current amthif command is canceled
*
- * @init_work : work item for the device init
* @reset_work : work item for the device reset
+ * @bus_rescan_work : work item for the bus rescan
*
* @device_list : mei client bus list
* @cl_bus_lock : client bus list lock
@@ -486,7 +444,6 @@ struct mei_device {
wait_queue_head_t wait_hw_ready;
wait_queue_head_t wait_pg;
wait_queue_head_t wait_hbm_start;
- wait_queue_head_t wait_stop_wd;
/*
* mei device states
@@ -522,6 +479,8 @@ struct mei_device {
unsigned int hbm_f_dc_supported:1;
unsigned int hbm_f_dot_supported:1;
unsigned int hbm_f_ev_supported:1;
+ unsigned int hbm_f_fa_supported:1;
+ unsigned int hbm_f_ie_supported:1;
struct rw_semaphore me_clients_rwsem;
struct list_head me_clients;
@@ -530,29 +489,21 @@ struct mei_device {
unsigned long me_client_index;
bool allow_fixed_address;
-
- struct mei_cl wd_cl;
- enum mei_wd_states wd_state;
- bool wd_pending;
- u16 wd_timeout;
- unsigned char wd_data[MEI_WD_START_MSG_SIZE];
-
+ bool override_fixed_address;
/* amthif list for cmd waiting */
struct mei_cl_cb amthif_cmd_list;
/* driver managed amthif list for reading completed amthif cmd data */
- struct mei_cl_cb amthif_rd_complete_list;
- struct file *iamthif_file_object;
+ const struct file *iamthif_fp;
struct mei_cl iamthif_cl;
struct mei_cl_cb *iamthif_current_cb;
long iamthif_open_count;
- unsigned long iamthif_timer;
u32 iamthif_stall_timer;
enum iamthif_states iamthif_state;
bool iamthif_canceled;
- struct work_struct init_work;
struct work_struct reset_work;
+ struct work_struct bus_rescan_work;
/* List of bus devices */
struct list_head device_list;
@@ -635,47 +586,18 @@ unsigned int mei_amthif_poll(struct mei_device *dev,
int mei_amthif_release(struct mei_device *dev, struct file *file);
-struct mei_cl_cb *mei_amthif_find_read_list_entry(struct mei_device *dev,
- struct file *file);
-
int mei_amthif_write(struct mei_cl *cl, struct mei_cl_cb *cb);
int mei_amthif_run_next_cmd(struct mei_device *dev);
int mei_amthif_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb,
struct mei_cl_cb *cmpl_list);
-void mei_amthif_complete(struct mei_device *dev, struct mei_cl_cb *cb);
+void mei_amthif_complete(struct mei_cl *cl, struct mei_cl_cb *cb);
int mei_amthif_irq_read_msg(struct mei_cl *cl,
struct mei_msg_hdr *mei_hdr,
struct mei_cl_cb *complete_list);
int mei_amthif_irq_read(struct mei_device *dev, s32 *slots);
/*
- * NFC functions
- */
-int mei_nfc_host_init(struct mei_device *dev, struct mei_me_client *me_cl);
-void mei_nfc_host_exit(struct mei_device *dev);
-
-/*
- * NFC Client UUID
- */
-extern const uuid_le mei_nfc_guid;
-
-int mei_wd_send(struct mei_device *dev);
-int mei_wd_stop(struct mei_device *dev);
-int mei_wd_host_init(struct mei_device *dev, struct mei_me_client *me_cl);
-/*
- * mei_watchdog_register - Registering watchdog interface
- * once we got connection to the WD Client
- * @dev: mei device
- */
-int mei_watchdog_register(struct mei_device *dev);
-/*
- * mei_watchdog_unregister - Unregistering watchdog interface
- * @dev: mei device
- */
-void mei_watchdog_unregister(struct mei_device *dev);
-
-/*
* Register Access Function
*/
diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
index 75fc9c688df8..64e64da6da44 100644
--- a/drivers/misc/mei/pci-me.c
+++ b/drivers/misc/mei/pci-me.c
@@ -88,6 +88,9 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
{MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H, mei_me_pch8_cfg)},
{MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H_2, mei_me_pch8_cfg)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_BXT_M, mei_me_pch8_cfg)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_APL_I, mei_me_pch8_cfg)},
+
/* required last entry */
{0, }
};
@@ -210,7 +213,7 @@ static int mei_me_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
err = mei_register(dev, &pdev->dev);
if (err)
- goto release_irq;
+ goto stop;
pci_set_drvdata(pdev, dev);
@@ -231,6 +234,8 @@ static int mei_me_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return 0;
+stop:
+ mei_stop(dev);
release_irq:
mei_cancel_work(dev);
mei_disable_interrupts(dev);
diff --git a/drivers/misc/mei/pci-txe.c b/drivers/misc/mei/pci-txe.c
index 71f8a7475717..30cc30683c07 100644
--- a/drivers/misc/mei/pci-txe.c
+++ b/drivers/misc/mei/pci-txe.c
@@ -154,7 +154,7 @@ static int mei_txe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
err = mei_register(dev, &pdev->dev);
if (err)
- goto release_irq;
+ goto stop;
pci_set_drvdata(pdev, dev);
@@ -170,6 +170,8 @@ static int mei_txe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return 0;
+stop:
+ mei_stop(dev);
release_irq:
mei_cancel_work(dev);
diff --git a/drivers/misc/mei/wd.c b/drivers/misc/mei/wd.c
deleted file mode 100644
index b346638833b0..000000000000
--- a/drivers/misc/mei/wd.c
+++ /dev/null
@@ -1,391 +0,0 @@
-/*
- *
- * Intel Management Engine Interface (Intel MEI) Linux driver
- * Copyright (c) 2003-2012, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- */
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/device.h>
-#include <linux/sched.h>
-#include <linux/watchdog.h>
-
-#include <linux/mei.h>
-
-#include "mei_dev.h"
-#include "hbm.h"
-#include "client.h"
-
-static const u8 mei_start_wd_params[] = { 0x02, 0x12, 0x13, 0x10 };
-static const u8 mei_stop_wd_params[] = { 0x02, 0x02, 0x14, 0x10 };
-
-/*
- * AMT Watchdog Device
- */
-#define INTEL_AMT_WATCHDOG_ID "INTCAMT"
-
-/* UUIDs for AMT F/W clients */
-const uuid_le mei_wd_guid = UUID_LE(0x05B79A6F, 0x4628, 0x4D7F, 0x89,
- 0x9D, 0xA9, 0x15, 0x14, 0xCB,
- 0x32, 0xAB);
-
-static void mei_wd_set_start_timeout(struct mei_device *dev, u16 timeout)
-{
- dev_dbg(dev->dev, "wd: set timeout=%d.\n", timeout);
- memcpy(dev->wd_data, mei_start_wd_params, MEI_WD_HDR_SIZE);
- memcpy(dev->wd_data + MEI_WD_HDR_SIZE, &timeout, sizeof(u16));
-}
-
-/**
- * mei_wd_host_init - connect to the watchdog client
- *
- * @dev: the device structure
- * @me_cl: me client
- *
- * Return: -ENOTTY if wd client cannot be found
- * -EIO if write has failed
- * 0 on success
- */
-int mei_wd_host_init(struct mei_device *dev, struct mei_me_client *me_cl)
-{
- struct mei_cl *cl = &dev->wd_cl;
- int ret;
-
- mei_cl_init(cl, dev);
-
- dev->wd_timeout = MEI_WD_DEFAULT_TIMEOUT;
- dev->wd_state = MEI_WD_IDLE;
-
- ret = mei_cl_link(cl, MEI_WD_HOST_CLIENT_ID);
- if (ret < 0) {
- dev_info(dev->dev, "wd: failed link client\n");
- return ret;
- }
-
- ret = mei_cl_connect(cl, me_cl, NULL);
- if (ret) {
- dev_err(dev->dev, "wd: failed to connect = %d\n", ret);
- mei_cl_unlink(cl);
- return ret;
- }
-
- ret = mei_watchdog_register(dev);
- if (ret) {
- mei_cl_disconnect(cl);
- mei_cl_unlink(cl);
- }
- return ret;
-}
-
-/**
- * mei_wd_send - sends watch dog message to fw.
- *
- * @dev: the device structure
- *
- * Return: 0 if success,
- * -EIO when message send fails
- * -EINVAL when invalid message is to be sent
- * -ENODEV on flow control failure
- */
-int mei_wd_send(struct mei_device *dev)
-{
- struct mei_cl *cl = &dev->wd_cl;
- struct mei_msg_hdr hdr;
- int ret;
-
- hdr.host_addr = cl->host_client_id;
- hdr.me_addr = mei_cl_me_id(cl);
- hdr.msg_complete = 1;
- hdr.reserved = 0;
- hdr.internal = 0;
-
- if (!memcmp(dev->wd_data, mei_start_wd_params, MEI_WD_HDR_SIZE))
- hdr.length = MEI_WD_START_MSG_SIZE;
- else if (!memcmp(dev->wd_data, mei_stop_wd_params, MEI_WD_HDR_SIZE))
- hdr.length = MEI_WD_STOP_MSG_SIZE;
- else {
- dev_err(dev->dev, "wd: invalid message is to be sent, aborting\n");
- return -EINVAL;
- }
-
- ret = mei_write_message(dev, &hdr, dev->wd_data);
- if (ret) {
- dev_err(dev->dev, "wd: write message failed\n");
- return ret;
- }
-
- ret = mei_cl_flow_ctrl_reduce(cl);
- if (ret) {
- dev_err(dev->dev, "wd: flow_ctrl_reduce failed.\n");
- return ret;
- }
-
- return 0;
-}
-
-/**
- * mei_wd_stop - sends watchdog stop message to fw.
- *
- * @dev: the device structure
- *
- * Return: 0 if success
- * on error:
- * -EIO when message send fails
- * -EINVAL when invalid message is to be sent
- * -ETIME on message timeout
- */
-int mei_wd_stop(struct mei_device *dev)
-{
- struct mei_cl *cl = &dev->wd_cl;
- int ret;
-
- if (!mei_cl_is_connected(cl) ||
- dev->wd_state != MEI_WD_RUNNING)
- return 0;
-
- memcpy(dev->wd_data, mei_stop_wd_params, MEI_WD_STOP_MSG_SIZE);
-
- dev->wd_state = MEI_WD_STOPPING;
-
- ret = mei_cl_flow_ctrl_creds(cl);
- if (ret < 0)
- goto err;
-
- if (ret && mei_hbuf_acquire(dev)) {
- ret = mei_wd_send(dev);
- if (ret)
- goto err;
- dev->wd_pending = false;
- } else {
- dev->wd_pending = true;
- }
-
- mutex_unlock(&dev->device_lock);
-
- ret = wait_event_timeout(dev->wait_stop_wd,
- dev->wd_state == MEI_WD_IDLE,
- msecs_to_jiffies(MEI_WD_STOP_TIMEOUT));
- mutex_lock(&dev->device_lock);
- if (dev->wd_state != MEI_WD_IDLE) {
- /* timeout */
- ret = -ETIME;
- dev_warn(dev->dev, "wd: stop failed to complete ret=%d\n", ret);
- goto err;
- }
- dev_dbg(dev->dev, "wd: stop completed after %u msec\n",
- MEI_WD_STOP_TIMEOUT - jiffies_to_msecs(ret));
- return 0;
-err:
- return ret;
-}
-
-/**
- * mei_wd_ops_start - wd start command from the watchdog core.
- *
- * @wd_dev: watchdog device struct
- *
- * Return: 0 if success, negative errno code for failure
- */
-static int mei_wd_ops_start(struct watchdog_device *wd_dev)
-{
- struct mei_device *dev;
- struct mei_cl *cl;
- int err = -ENODEV;
-
- dev = watchdog_get_drvdata(wd_dev);
- if (!dev)
- return -ENODEV;
-
- cl = &dev->wd_cl;
-
- mutex_lock(&dev->device_lock);
-
- if (dev->dev_state != MEI_DEV_ENABLED) {
- dev_dbg(dev->dev, "wd: dev_state != MEI_DEV_ENABLED dev_state = %s\n",
- mei_dev_state_str(dev->dev_state));
- goto end_unlock;
- }
-
- if (!mei_cl_is_connected(cl)) {
- cl_dbg(dev, cl, "MEI Driver is not connected to Watchdog Client\n");
- goto end_unlock;
- }
-
- mei_wd_set_start_timeout(dev, dev->wd_timeout);
-
- err = 0;
-end_unlock:
- mutex_unlock(&dev->device_lock);
- return err;
-}
-
-/**
- * mei_wd_ops_stop - wd stop command from the watchdog core.
- *
- * @wd_dev: watchdog device struct
- *
- * Return: 0 if success, negative errno code for failure
- */
-static int mei_wd_ops_stop(struct watchdog_device *wd_dev)
-{
- struct mei_device *dev;
-
- dev = watchdog_get_drvdata(wd_dev);
- if (!dev)
- return -ENODEV;
-
- mutex_lock(&dev->device_lock);
- mei_wd_stop(dev);
- mutex_unlock(&dev->device_lock);
-
- return 0;
-}
-
-/**
- * mei_wd_ops_ping - wd ping command from the watchdog core.
- *
- * @wd_dev: watchdog device struct
- *
- * Return: 0 if success, negative errno code for failure
- */
-static int mei_wd_ops_ping(struct watchdog_device *wd_dev)
-{
- struct mei_device *dev;
- struct mei_cl *cl;
- int ret;
-
- dev = watchdog_get_drvdata(wd_dev);
- if (!dev)
- return -ENODEV;
-
- cl = &dev->wd_cl;
-
- mutex_lock(&dev->device_lock);
-
- if (!mei_cl_is_connected(cl)) {
- cl_err(dev, cl, "wd: not connected.\n");
- ret = -ENODEV;
- goto end;
- }
-
- dev->wd_state = MEI_WD_RUNNING;
-
- ret = mei_cl_flow_ctrl_creds(cl);
- if (ret < 0)
- goto end;
-
- /* Check if we can send the ping to HW*/
- if (ret && mei_hbuf_acquire(dev)) {
- dev_dbg(dev->dev, "wd: sending ping\n");
-
- ret = mei_wd_send(dev);
- if (ret)
- goto end;
- dev->wd_pending = false;
- } else {
- dev->wd_pending = true;
- }
-
-end:
- mutex_unlock(&dev->device_lock);
- return ret;
-}
-
-/**
- * mei_wd_ops_set_timeout - wd set timeout command from the watchdog core.
- *
- * @wd_dev: watchdog device struct
- * @timeout: timeout value to set
- *
- * Return: 0 if success, negative errno code for failure
- */
-static int mei_wd_ops_set_timeout(struct watchdog_device *wd_dev,
- unsigned int timeout)
-{
- struct mei_device *dev;
-
- dev = watchdog_get_drvdata(wd_dev);
- if (!dev)
- return -ENODEV;
-
- /* Check Timeout value */
- if (timeout < MEI_WD_MIN_TIMEOUT || timeout > MEI_WD_MAX_TIMEOUT)
- return -EINVAL;
-
- mutex_lock(&dev->device_lock);
-
- dev->wd_timeout = timeout;
- wd_dev->timeout = timeout;
- mei_wd_set_start_timeout(dev, dev->wd_timeout);
-
- mutex_unlock(&dev->device_lock);
-
- return 0;
-}
-
-/*
- * Watchdog Device structs
- */
-static const struct watchdog_ops wd_ops = {
- .owner = THIS_MODULE,
- .start = mei_wd_ops_start,
- .stop = mei_wd_ops_stop,
- .ping = mei_wd_ops_ping,
- .set_timeout = mei_wd_ops_set_timeout,
-};
-static const struct watchdog_info wd_info = {
- .identity = INTEL_AMT_WATCHDOG_ID,
- .options = WDIOF_KEEPALIVEPING |
- WDIOF_SETTIMEOUT |
- WDIOF_ALARMONLY,
-};
-
-static struct watchdog_device amt_wd_dev = {
- .info = &wd_info,
- .ops = &wd_ops,
- .timeout = MEI_WD_DEFAULT_TIMEOUT,
- .min_timeout = MEI_WD_MIN_TIMEOUT,
- .max_timeout = MEI_WD_MAX_TIMEOUT,
-};
-
-
-int mei_watchdog_register(struct mei_device *dev)
-{
-
- int ret;
-
- amt_wd_dev.parent = dev->dev;
- /* unlock to perserve correct locking order */
- mutex_unlock(&dev->device_lock);
- ret = watchdog_register_device(&amt_wd_dev);
- mutex_lock(&dev->device_lock);
- if (ret) {
- dev_err(dev->dev, "wd: unable to register watchdog device = %d.\n",
- ret);
- return ret;
- }
-
- dev_dbg(dev->dev, "wd: successfully register watchdog interface.\n");
- watchdog_set_drvdata(&amt_wd_dev, dev);
- return 0;
-}
-
-void mei_watchdog_unregister(struct mei_device *dev)
-{
- if (watchdog_get_drvdata(&amt_wd_dev) == NULL)
- return;
-
- watchdog_set_drvdata(&amt_wd_dev, NULL);
- watchdog_unregister_device(&amt_wd_dev);
-}
-
diff --git a/drivers/misc/mic/Kconfig b/drivers/misc/mic/Kconfig
index 40677df7f996..2e4f3ba75c8e 100644
--- a/drivers/misc/mic/Kconfig
+++ b/drivers/misc/mic/Kconfig
@@ -32,12 +32,29 @@ config SCIF_BUS
OS and tools for MIC to use with this driver are available from
<http://software.intel.com/en-us/mic-developer>.
+comment "VOP Bus Driver"
+
+config VOP_BUS
+ tristate "VOP Bus Driver"
+ depends on 64BIT && PCI && X86 && X86_DEV_DMA_OPS
+ help
+ This option is selected by any driver which registers a
+ device or driver on the VOP Bus, such as CONFIG_INTEL_MIC_HOST
+ and CONFIG_INTEL_MIC_CARD.
+
+ If you are building a host/card kernel with an Intel MIC device
+ then say M (recommended) or Y, else say N. If unsure say N.
+
+ More information about the Intel MIC family as well as the Linux
+ OS and tools for MIC to use with this driver are available from
+ <http://software.intel.com/en-us/mic-developer>.
+
comment "Intel MIC Host Driver"
config INTEL_MIC_HOST
tristate "Intel MIC Host Driver"
- depends on 64BIT && PCI && X86 && INTEL_MIC_BUS && SCIF_BUS && MIC_COSM
- select VHOST_RING
+ depends on 64BIT && PCI && X86
+ depends on INTEL_MIC_BUS && SCIF_BUS && MIC_COSM && VOP_BUS
help
This enables Host Driver support for the Intel Many Integrated
Core (MIC) family of PCIe form factor coprocessor devices that
@@ -56,7 +73,8 @@ comment "Intel MIC Card Driver"
config INTEL_MIC_CARD
tristate "Intel MIC Card Driver"
- depends on 64BIT && X86 && INTEL_MIC_BUS && SCIF_BUS && MIC_COSM
+ depends on 64BIT && X86
+ depends on INTEL_MIC_BUS && SCIF_BUS && MIC_COSM && VOP_BUS
select VIRTIO
help
This enables card driver support for the Intel Many Integrated
@@ -107,3 +125,23 @@ config MIC_COSM
More information about the Intel MIC family as well as the Linux
OS and tools for MIC to use with this driver are available from
<http://software.intel.com/en-us/mic-developer>.
+
+comment "VOP Driver"
+
+config VOP
+ tristate "VOP Driver"
+ depends on 64BIT && PCI && X86 && VOP_BUS
+ select VHOST_RING
+ help
+ This enables VOP (Virtio over PCIe) Driver support for the Intel
+ Many Integrated Core (MIC) family of PCIe form factor coprocessor
+ devices. The VOP driver allows virtio drivers, e.g. net, console
+ and block drivers, on the card connect to user space virtio
+ devices on the host.
+
+ If you are building a host kernel with an Intel MIC device then
+ say M (recommended) or Y, else say N. If unsure say N.
+
+ More information about the Intel MIC family as well as the Linux
+ OS and tools for MIC to use with this driver are available from
+ <http://software.intel.com/en-us/mic-developer>.
diff --git a/drivers/misc/mic/Makefile b/drivers/misc/mic/Makefile
index e288a1106738..f2b1323ff96c 100644
--- a/drivers/misc/mic/Makefile
+++ b/drivers/misc/mic/Makefile
@@ -8,3 +8,4 @@ obj-y += bus/
obj-$(CONFIG_SCIF) += scif/
obj-$(CONFIG_MIC_COSM) += cosm/
obj-$(CONFIG_MIC_COSM) += cosm_client/
+obj-$(CONFIG_VOP) += vop/
diff --git a/drivers/misc/mic/bus/Makefile b/drivers/misc/mic/bus/Makefile
index 761842b0d0bb..8758a7daa52c 100644
--- a/drivers/misc/mic/bus/Makefile
+++ b/drivers/misc/mic/bus/Makefile
@@ -5,3 +5,4 @@
obj-$(CONFIG_INTEL_MIC_BUS) += mic_bus.o
obj-$(CONFIG_SCIF_BUS) += scif_bus.o
obj-$(CONFIG_MIC_COSM) += cosm_bus.o
+obj-$(CONFIG_VOP_BUS) += vop_bus.o
diff --git a/drivers/misc/mic/bus/cosm_bus.h b/drivers/misc/mic/bus/cosm_bus.h
index f7c57f266916..8b6341855dc3 100644
--- a/drivers/misc/mic/bus/cosm_bus.h
+++ b/drivers/misc/mic/bus/cosm_bus.h
@@ -30,6 +30,7 @@
* @attr_group: Pointer to list of sysfs attribute groups.
* @sdev: Device for sysfs entries.
* @state: MIC state.
+ * @prev_state: MIC state previous to MIC_RESETTING
* @shutdown_status: MIC status reported by card for shutdown/crashes.
* @shutdown_status_int: Internal shutdown status maintained by the driver
* @cosm_mutex: Mutex for synchronizing access to data structures.
@@ -55,6 +56,7 @@ struct cosm_device {
const struct attribute_group **attr_group;
struct device *sdev;
u8 state;
+ u8 prev_state;
u8 shutdown_status;
u8 shutdown_status_int;
struct mutex cosm_mutex;
diff --git a/drivers/misc/mic/bus/vop_bus.c b/drivers/misc/mic/bus/vop_bus.c
new file mode 100644
index 000000000000..303da222f5b6
--- /dev/null
+++ b/drivers/misc/mic/bus/vop_bus.c
@@ -0,0 +1,203 @@
+/*
+ * Intel MIC Platform Software Stack (MPSS)
+ *
+ * Copyright(c) 2016 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Intel Virtio Over PCIe (VOP) Bus driver.
+ */
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/idr.h>
+#include <linux/dma-mapping.h>
+
+#include "vop_bus.h"
+
+static ssize_t device_show(struct device *d,
+ struct device_attribute *attr, char *buf)
+{
+ struct vop_device *dev = dev_to_vop(d);
+
+ return sprintf(buf, "0x%04x\n", dev->id.device);
+}
+static DEVICE_ATTR_RO(device);
+
+static ssize_t vendor_show(struct device *d,
+ struct device_attribute *attr, char *buf)
+{
+ struct vop_device *dev = dev_to_vop(d);
+
+ return sprintf(buf, "0x%04x\n", dev->id.vendor);
+}
+static DEVICE_ATTR_RO(vendor);
+
+static ssize_t modalias_show(struct device *d,
+ struct device_attribute *attr, char *buf)
+{
+ struct vop_device *dev = dev_to_vop(d);
+
+ return sprintf(buf, "vop:d%08Xv%08X\n",
+ dev->id.device, dev->id.vendor);
+}
+static DEVICE_ATTR_RO(modalias);
+
+static struct attribute *vop_dev_attrs[] = {
+ &dev_attr_device.attr,
+ &dev_attr_vendor.attr,
+ &dev_attr_modalias.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(vop_dev);
+
+static inline int vop_id_match(const struct vop_device *dev,
+ const struct vop_device_id *id)
+{
+ if (id->device != dev->id.device && id->device != VOP_DEV_ANY_ID)
+ return 0;
+
+ return id->vendor == VOP_DEV_ANY_ID || id->vendor == dev->id.vendor;
+}
+
+/*
+ * This looks through all the IDs a driver claims to support. If any of them
+ * match, we return 1 and the kernel will call vop_dev_probe().
+ */
+static int vop_dev_match(struct device *dv, struct device_driver *dr)
+{
+ unsigned int i;
+ struct vop_device *dev = dev_to_vop(dv);
+ const struct vop_device_id *ids;
+
+ ids = drv_to_vop(dr)->id_table;
+ for (i = 0; ids[i].device; i++)
+ if (vop_id_match(dev, &ids[i]))
+ return 1;
+ return 0;
+}
+
+static int vop_uevent(struct device *dv, struct kobj_uevent_env *env)
+{
+ struct vop_device *dev = dev_to_vop(dv);
+
+ return add_uevent_var(env, "MODALIAS=vop:d%08Xv%08X",
+ dev->id.device, dev->id.vendor);
+}
+
+static int vop_dev_probe(struct device *d)
+{
+ struct vop_device *dev = dev_to_vop(d);
+ struct vop_driver *drv = drv_to_vop(dev->dev.driver);
+
+ return drv->probe(dev);
+}
+
+static int vop_dev_remove(struct device *d)
+{
+ struct vop_device *dev = dev_to_vop(d);
+ struct vop_driver *drv = drv_to_vop(dev->dev.driver);
+
+ drv->remove(dev);
+ return 0;
+}
+
+static struct bus_type vop_bus = {
+ .name = "vop_bus",
+ .match = vop_dev_match,
+ .dev_groups = vop_dev_groups,
+ .uevent = vop_uevent,
+ .probe = vop_dev_probe,
+ .remove = vop_dev_remove,
+};
+
+int vop_register_driver(struct vop_driver *driver)
+{
+ driver->driver.bus = &vop_bus;
+ return driver_register(&driver->driver);
+}
+EXPORT_SYMBOL_GPL(vop_register_driver);
+
+void vop_unregister_driver(struct vop_driver *driver)
+{
+ driver_unregister(&driver->driver);
+}
+EXPORT_SYMBOL_GPL(vop_unregister_driver);
+
+static void vop_release_dev(struct device *d)
+{
+ put_device(d);
+}
+
+struct vop_device *
+vop_register_device(struct device *pdev, int id,
+ const struct dma_map_ops *dma_ops,
+ struct vop_hw_ops *hw_ops, u8 dnode, struct mic_mw *aper,
+ struct dma_chan *chan)
+{
+ int ret;
+ struct vop_device *vdev;
+
+ vdev = kzalloc(sizeof(*vdev), GFP_KERNEL);
+ if (!vdev)
+ return ERR_PTR(-ENOMEM);
+
+ vdev->dev.parent = pdev;
+ vdev->id.device = id;
+ vdev->id.vendor = VOP_DEV_ANY_ID;
+ vdev->dev.archdata.dma_ops = (struct dma_map_ops *)dma_ops;
+ vdev->dev.dma_mask = &vdev->dev.coherent_dma_mask;
+ dma_set_mask(&vdev->dev, DMA_BIT_MASK(64));
+ vdev->dev.release = vop_release_dev;
+ vdev->hw_ops = hw_ops;
+ vdev->dev.bus = &vop_bus;
+ vdev->dnode = dnode;
+ vdev->aper = aper;
+ vdev->dma_ch = chan;
+ vdev->index = dnode - 1;
+ dev_set_name(&vdev->dev, "vop-dev%u", vdev->index);
+ /*
+ * device_register() causes the bus infrastructure to look for a
+ * matching driver.
+ */
+ ret = device_register(&vdev->dev);
+ if (ret)
+ goto free_vdev;
+ return vdev;
+free_vdev:
+ kfree(vdev);
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(vop_register_device);
+
+void vop_unregister_device(struct vop_device *dev)
+{
+ device_unregister(&dev->dev);
+}
+EXPORT_SYMBOL_GPL(vop_unregister_device);
+
+static int __init vop_init(void)
+{
+ return bus_register(&vop_bus);
+}
+
+static void __exit vop_exit(void)
+{
+ bus_unregister(&vop_bus);
+}
+
+core_initcall(vop_init);
+module_exit(vop_exit);
+
+MODULE_AUTHOR("Intel Corporation");
+MODULE_DESCRIPTION("Intel(R) VOP Bus driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/misc/mic/bus/vop_bus.h b/drivers/misc/mic/bus/vop_bus.h
new file mode 100644
index 000000000000..fff7a865d721
--- /dev/null
+++ b/drivers/misc/mic/bus/vop_bus.h
@@ -0,0 +1,140 @@
+/*
+ * Intel MIC Platform Software Stack (MPSS)
+ *
+ * Copyright(c) 2016 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Intel Virtio over PCIe Bus driver.
+ */
+#ifndef _VOP_BUS_H_
+#define _VOP_BUS_H_
+/*
+ * Everything a vop driver needs to work with any particular vop
+ * implementation.
+ */
+#include <linux/dmaengine.h>
+#include <linux/interrupt.h>
+
+#include "../common/mic_dev.h"
+
+struct vop_device_id {
+ u32 device;
+ u32 vendor;
+};
+
+#define VOP_DEV_TRNSP 1
+#define VOP_DEV_ANY_ID 0xffffffff
+/*
+ * Size of the internal buffer used during DMA's as an intermediate buffer
+ * for copy to/from user. Must be an integral number of pages.
+ */
+#define VOP_INT_DMA_BUF_SIZE PAGE_ALIGN(64 * 1024ULL)
+
+/**
+ * vop_device - representation of a device using vop
+ * @hw_ops: the hardware ops supported by this device.
+ * @id: the device type identification (used to match it with a driver).
+ * @dev: underlying device.
+ * @dnode - The destination node which this device will communicate with.
+ * @aper: Aperture memory window
+ * @dma_ch - DMA channel
+ * @index: unique position on the vop bus
+ */
+struct vop_device {
+ struct vop_hw_ops *hw_ops;
+ struct vop_device_id id;
+ struct device dev;
+ u8 dnode;
+ struct mic_mw *aper;
+ struct dma_chan *dma_ch;
+ int index;
+};
+
+/**
+ * vop_driver - operations for a vop I/O driver
+ * @driver: underlying device driver (populate name and owner).
+ * @id_table: the ids serviced by this driver.
+ * @probe: the function to call when a device is found. Returns 0 or -errno.
+ * @remove: the function to call when a device is removed.
+ */
+struct vop_driver {
+ struct device_driver driver;
+ const struct vop_device_id *id_table;
+ int (*probe)(struct vop_device *dev);
+ void (*remove)(struct vop_device *dev);
+};
+
+/**
+ * vop_hw_ops - Hardware operations for accessing a VOP device on the VOP bus.
+ *
+ * @next_db: Obtain the next available doorbell.
+ * @request_irq: Request an interrupt on a particular doorbell.
+ * @free_irq: Free an interrupt requested previously.
+ * @ack_interrupt: acknowledge an interrupt in the ISR.
+ * @get_remote_dp: Get access to the virtio device page used by the remote
+ * node to add/remove/configure virtio devices.
+ * @get_dp: Get access to the virtio device page used by the self
+ * node to add/remove/configure virtio devices.
+ * @send_intr: Send an interrupt to the peer node on a specified doorbell.
+ * @ioremap: Map a buffer with the specified DMA address and length.
+ * @iounmap: Unmap a buffer previously mapped.
+ * @dma_filter: The DMA filter function to use for obtaining access to
+ * a DMA channel on the peer node.
+ */
+struct vop_hw_ops {
+ int (*next_db)(struct vop_device *vpdev);
+ struct mic_irq *(*request_irq)(struct vop_device *vpdev,
+ irqreturn_t (*func)(int irq, void *data),
+ const char *name, void *data,
+ int intr_src);
+ void (*free_irq)(struct vop_device *vpdev,
+ struct mic_irq *cookie, void *data);
+ void (*ack_interrupt)(struct vop_device *vpdev, int num);
+ void __iomem * (*get_remote_dp)(struct vop_device *vpdev);
+ void * (*get_dp)(struct vop_device *vpdev);
+ void (*send_intr)(struct vop_device *vpdev, int db);
+ void __iomem * (*ioremap)(struct vop_device *vpdev,
+ dma_addr_t pa, size_t len);
+ void (*iounmap)(struct vop_device *vpdev, void __iomem *va);
+};
+
+struct vop_device *
+vop_register_device(struct device *pdev, int id,
+ const struct dma_map_ops *dma_ops,
+ struct vop_hw_ops *hw_ops, u8 dnode, struct mic_mw *aper,
+ struct dma_chan *chan);
+void vop_unregister_device(struct vop_device *dev);
+int vop_register_driver(struct vop_driver *drv);
+void vop_unregister_driver(struct vop_driver *drv);
+
+/*
+ * module_vop_driver() - Helper macro for drivers that don't do
+ * anything special in module init/exit. This eliminates a lot of
+ * boilerplate. Each module may only use this macro once, and
+ * calling it replaces module_init() and module_exit()
+ */
+#define module_vop_driver(__vop_driver) \
+ module_driver(__vop_driver, vop_register_driver, \
+ vop_unregister_driver)
+
+static inline struct vop_device *dev_to_vop(struct device *dev)
+{
+ return container_of(dev, struct vop_device, dev);
+}
+
+static inline struct vop_driver *drv_to_vop(struct device_driver *drv)
+{
+ return container_of(drv, struct vop_driver, driver);
+}
+#endif /* _VOP_BUS_H */
diff --git a/drivers/misc/mic/card/Makefile b/drivers/misc/mic/card/Makefile
index 69d58bef92ce..6e9675e12a09 100644
--- a/drivers/misc/mic/card/Makefile
+++ b/drivers/misc/mic/card/Makefile
@@ -8,4 +8,3 @@ obj-$(CONFIG_INTEL_MIC_CARD) += mic_card.o
mic_card-y += mic_x100.o
mic_card-y += mic_device.o
mic_card-y += mic_debugfs.o
-mic_card-y += mic_virtio.o
diff --git a/drivers/misc/mic/card/mic_device.c b/drivers/misc/mic/card/mic_device.c
index d0edaf7e0cd5..e749af48f736 100644
--- a/drivers/misc/mic/card/mic_device.c
+++ b/drivers/misc/mic/card/mic_device.c
@@ -34,7 +34,6 @@
#include <linux/mic_common.h>
#include "../common/mic_dev.h"
#include "mic_device.h"
-#include "mic_virtio.h"
static struct mic_driver *g_drv;
@@ -250,12 +249,82 @@ static struct scif_hw_ops scif_hw_ops = {
.iounmap = ___mic_iounmap,
};
+static inline struct mic_driver *vpdev_to_mdrv(struct vop_device *vpdev)
+{
+ return dev_get_drvdata(vpdev->dev.parent);
+}
+
+static struct mic_irq *
+__mic_request_irq(struct vop_device *vpdev,
+ irqreturn_t (*func)(int irq, void *data),
+ const char *name, void *data, int intr_src)
+{
+ return mic_request_card_irq(func, NULL, name, data, intr_src);
+}
+
+static void __mic_free_irq(struct vop_device *vpdev,
+ struct mic_irq *cookie, void *data)
+{
+ return mic_free_card_irq(cookie, data);
+}
+
+static void __mic_ack_interrupt(struct vop_device *vpdev, int num)
+{
+ struct mic_driver *mdrv = vpdev_to_mdrv(vpdev);
+
+ mic_ack_interrupt(&mdrv->mdev);
+}
+
+static int __mic_next_db(struct vop_device *vpdev)
+{
+ return mic_next_card_db();
+}
+
+static void __iomem *__mic_get_remote_dp(struct vop_device *vpdev)
+{
+ struct mic_driver *mdrv = vpdev_to_mdrv(vpdev);
+
+ return mdrv->dp;
+}
+
+static void __mic_send_intr(struct vop_device *vpdev, int db)
+{
+ struct mic_driver *mdrv = vpdev_to_mdrv(vpdev);
+
+ mic_send_intr(&mdrv->mdev, db);
+}
+
+static void __iomem *__mic_ioremap(struct vop_device *vpdev,
+ dma_addr_t pa, size_t len)
+{
+ struct mic_driver *mdrv = vpdev_to_mdrv(vpdev);
+
+ return mic_card_map(&mdrv->mdev, pa, len);
+}
+
+static void __mic_iounmap(struct vop_device *vpdev, void __iomem *va)
+{
+ struct mic_driver *mdrv = vpdev_to_mdrv(vpdev);
+
+ mic_card_unmap(&mdrv->mdev, va);
+}
+
+static struct vop_hw_ops vop_hw_ops = {
+ .request_irq = __mic_request_irq,
+ .free_irq = __mic_free_irq,
+ .ack_interrupt = __mic_ack_interrupt,
+ .next_db = __mic_next_db,
+ .get_remote_dp = __mic_get_remote_dp,
+ .send_intr = __mic_send_intr,
+ .ioremap = __mic_ioremap,
+ .iounmap = __mic_iounmap,
+};
+
static int mic_request_dma_chans(struct mic_driver *mdrv)
{
dma_cap_mask_t mask;
struct dma_chan *chan;
- request_module("mic_x100_dma");
dma_cap_zero(mask);
dma_cap_set(DMA_MEMCPY, mask);
@@ -309,9 +378,13 @@ int __init mic_driver_init(struct mic_driver *mdrv)
rc = -ENODEV;
goto irq_uninit;
}
- rc = mic_devices_init(mdrv);
- if (rc)
+ mdrv->vpdev = vop_register_device(mdrv->dev, VOP_DEV_TRNSP,
+ NULL, &vop_hw_ops, 0,
+ NULL, mdrv->dma_ch[0]);
+ if (IS_ERR(mdrv->vpdev)) {
+ rc = PTR_ERR(mdrv->vpdev);
goto dma_free;
+ }
bootparam = mdrv->dp;
node_id = ioread8(&bootparam->node_id);
mdrv->scdev = scif_register_device(mdrv->dev, MIC_SCIF_DEV,
@@ -321,13 +394,13 @@ int __init mic_driver_init(struct mic_driver *mdrv)
mdrv->num_dma_ch, true);
if (IS_ERR(mdrv->scdev)) {
rc = PTR_ERR(mdrv->scdev);
- goto device_uninit;
+ goto vop_remove;
}
mic_create_card_debug_dir(mdrv);
done:
return rc;
-device_uninit:
- mic_devices_uninit(mdrv);
+vop_remove:
+ vop_unregister_device(mdrv->vpdev);
dma_free:
mic_free_dma_chans(mdrv);
irq_uninit:
@@ -348,7 +421,7 @@ void mic_driver_uninit(struct mic_driver *mdrv)
{
mic_delete_card_debug_dir(mdrv);
scif_unregister_device(mdrv->scdev);
- mic_devices_uninit(mdrv);
+ vop_unregister_device(mdrv->vpdev);
mic_free_dma_chans(mdrv);
mic_uninit_irq();
mic_dp_uninit();
diff --git a/drivers/misc/mic/card/mic_device.h b/drivers/misc/mic/card/mic_device.h
index 1dbf83c41289..333dbed972f6 100644
--- a/drivers/misc/mic/card/mic_device.h
+++ b/drivers/misc/mic/card/mic_device.h
@@ -32,6 +32,7 @@
#include <linux/interrupt.h>
#include <linux/mic_bus.h>
#include "../bus/scif_bus.h"
+#include "../bus/vop_bus.h"
/**
* struct mic_intr_info - Contains h/w specific interrupt sources info
@@ -76,6 +77,7 @@ struct mic_device {
* @dma_ch - Array of DMA channels
* @num_dma_ch - Number of DMA channels available
* @scdev: SCIF device on the SCIF virtual bus.
+ * @vpdev: Virtio over PCIe device on the VOP virtual bus.
*/
struct mic_driver {
char name[20];
@@ -90,6 +92,7 @@ struct mic_driver {
struct dma_chan *dma_ch[MIC_MAX_DMA_CHAN];
int num_dma_ch;
struct scif_hw_dev *scdev;
+ struct vop_device *vpdev;
};
/**
diff --git a/drivers/misc/mic/card/mic_virtio.c b/drivers/misc/mic/card/mic_virtio.c
deleted file mode 100644
index f6ed57d3125c..000000000000
--- a/drivers/misc/mic/card/mic_virtio.c
+++ /dev/null
@@ -1,634 +0,0 @@
-/*
- * Intel MIC Platform Software Stack (MPSS)
- *
- * Copyright(c) 2013 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, version 2, as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Disclaimer: The codes contained in these modules may be specific to
- * the Intel Software Development Platform codenamed: Knights Ferry, and
- * the Intel product codenamed: Knights Corner, and are not backward
- * compatible with other Intel products. Additionally, Intel will NOT
- * support the codes or instruction set in future products.
- *
- * Adapted from:
- *
- * virtio for kvm on s390
- *
- * Copyright IBM Corp. 2008
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
- *
- * Author(s): Christian Borntraeger <borntraeger@de.ibm.com>
- *
- * Intel MIC Card driver.
- *
- */
-#include <linux/delay.h>
-#include <linux/slab.h>
-#include <linux/virtio_config.h>
-
-#include "../common/mic_dev.h"
-#include "mic_virtio.h"
-
-#define VIRTIO_SUBCODE_64 0x0D00
-
-#define MIC_MAX_VRINGS 4
-struct mic_vdev {
- struct virtio_device vdev;
- struct mic_device_desc __iomem *desc;
- struct mic_device_ctrl __iomem *dc;
- struct mic_device *mdev;
- void __iomem *vr[MIC_MAX_VRINGS];
- int used_size[MIC_MAX_VRINGS];
- struct completion reset_done;
- struct mic_irq *virtio_cookie;
- int c2h_vdev_db;
-};
-
-static struct mic_irq *virtio_config_cookie;
-#define to_micvdev(vd) container_of(vd, struct mic_vdev, vdev)
-
-/* Helper API to obtain the parent of the virtio device */
-static inline struct device *mic_dev(struct mic_vdev *mvdev)
-{
- return mvdev->vdev.dev.parent;
-}
-
-/* This gets the device's feature bits. */
-static u64 mic_get_features(struct virtio_device *vdev)
-{
- unsigned int i, bits;
- u32 features = 0;
- struct mic_device_desc __iomem *desc = to_micvdev(vdev)->desc;
- u8 __iomem *in_features = mic_vq_features(desc);
- int feature_len = ioread8(&desc->feature_len);
-
- bits = min_t(unsigned, feature_len, sizeof(features)) * 8;
- for (i = 0; i < bits; i++)
- if (ioread8(&in_features[i / 8]) & (BIT(i % 8)))
- features |= BIT(i);
-
- return features;
-}
-
-static int mic_finalize_features(struct virtio_device *vdev)
-{
- unsigned int i, bits;
- struct mic_device_desc __iomem *desc = to_micvdev(vdev)->desc;
- u8 feature_len = ioread8(&desc->feature_len);
- /* Second half of bitmap is features we accept. */
- u8 __iomem *out_features =
- mic_vq_features(desc) + feature_len;
-
- /* Give virtio_ring a chance to accept features. */
- vring_transport_features(vdev);
-
- /* Make sure we don't have any features > 32 bits! */
- BUG_ON((u32)vdev->features != vdev->features);
-
- memset_io(out_features, 0, feature_len);
- bits = min_t(unsigned, feature_len,
- sizeof(vdev->features)) * 8;
- for (i = 0; i < bits; i++) {
- if (__virtio_test_bit(vdev, i))
- iowrite8(ioread8(&out_features[i / 8]) | (1 << (i % 8)),
- &out_features[i / 8]);
- }
-
- return 0;
-}
-
-/*
- * Reading and writing elements in config space
- */
-static void mic_get(struct virtio_device *vdev, unsigned int offset,
- void *buf, unsigned len)
-{
- struct mic_device_desc __iomem *desc = to_micvdev(vdev)->desc;
-
- if (offset + len > ioread8(&desc->config_len))
- return;
- memcpy_fromio(buf, mic_vq_configspace(desc) + offset, len);
-}
-
-static void mic_set(struct virtio_device *vdev, unsigned int offset,
- const void *buf, unsigned len)
-{
- struct mic_device_desc __iomem *desc = to_micvdev(vdev)->desc;
-
- if (offset + len > ioread8(&desc->config_len))
- return;
- memcpy_toio(mic_vq_configspace(desc) + offset, buf, len);
-}
-
-/*
- * The operations to get and set the status word just access the status
- * field of the device descriptor. set_status also interrupts the host
- * to tell about status changes.
- */
-static u8 mic_get_status(struct virtio_device *vdev)
-{
- return ioread8(&to_micvdev(vdev)->desc->status);
-}
-
-static void mic_set_status(struct virtio_device *vdev, u8 status)
-{
- struct mic_vdev *mvdev = to_micvdev(vdev);
- if (!status)
- return;
- iowrite8(status, &mvdev->desc->status);
- mic_send_intr(mvdev->mdev, mvdev->c2h_vdev_db);
-}
-
-/* Inform host on a virtio device reset and wait for ack from host */
-static void mic_reset_inform_host(struct virtio_device *vdev)
-{
- struct mic_vdev *mvdev = to_micvdev(vdev);
- struct mic_device_ctrl __iomem *dc = mvdev->dc;
- int retry;
-
- iowrite8(0, &dc->host_ack);
- iowrite8(1, &dc->vdev_reset);
- mic_send_intr(mvdev->mdev, mvdev->c2h_vdev_db);
-
- /* Wait till host completes all card accesses and acks the reset */
- for (retry = 100; retry--;) {
- if (ioread8(&dc->host_ack))
- break;
- msleep(100);
- };
-
- dev_dbg(mic_dev(mvdev), "%s: retry: %d\n", __func__, retry);
-
- /* Reset status to 0 in case we timed out */
- iowrite8(0, &mvdev->desc->status);
-}
-
-static void mic_reset(struct virtio_device *vdev)
-{
- struct mic_vdev *mvdev = to_micvdev(vdev);
-
- dev_dbg(mic_dev(mvdev), "%s: virtio id %d\n",
- __func__, vdev->id.device);
-
- mic_reset_inform_host(vdev);
- complete_all(&mvdev->reset_done);
-}
-
-/*
- * The virtio_ring code calls this API when it wants to notify the Host.
- */
-static bool mic_notify(struct virtqueue *vq)
-{
- struct mic_vdev *mvdev = vq->priv;
-
- mic_send_intr(mvdev->mdev, mvdev->c2h_vdev_db);
- return true;
-}
-
-static void mic_del_vq(struct virtqueue *vq, int n)
-{
- struct mic_vdev *mvdev = to_micvdev(vq->vdev);
- struct vring *vr = (struct vring *)(vq + 1);
-
- free_pages((unsigned long) vr->used, get_order(mvdev->used_size[n]));
- vring_del_virtqueue(vq);
- mic_card_unmap(mvdev->mdev, mvdev->vr[n]);
- mvdev->vr[n] = NULL;
-}
-
-static void mic_del_vqs(struct virtio_device *vdev)
-{
- struct mic_vdev *mvdev = to_micvdev(vdev);
- struct virtqueue *vq, *n;
- int idx = 0;
-
- dev_dbg(mic_dev(mvdev), "%s\n", __func__);
-
- list_for_each_entry_safe(vq, n, &vdev->vqs, list)
- mic_del_vq(vq, idx++);
-}
-
-/*
- * This routine will assign vring's allocated in host/io memory. Code in
- * virtio_ring.c however continues to access this io memory as if it were local
- * memory without io accessors.
- */
-static struct virtqueue *mic_find_vq(struct virtio_device *vdev,
- unsigned index,
- void (*callback)(struct virtqueue *vq),
- const char *name)
-{
- struct mic_vdev *mvdev = to_micvdev(vdev);
- struct mic_vqconfig __iomem *vqconfig;
- struct mic_vqconfig config;
- struct virtqueue *vq;
- void __iomem *va;
- struct _mic_vring_info __iomem *info;
- void *used;
- int vr_size, _vr_size, err, magic;
- struct vring *vr;
- u8 type = ioread8(&mvdev->desc->type);
-
- if (index >= ioread8(&mvdev->desc->num_vq))
- return ERR_PTR(-ENOENT);
-
- if (!name)
- return ERR_PTR(-ENOENT);
-
- /* First assign the vring's allocated in host memory */
- vqconfig = mic_vq_config(mvdev->desc) + index;
- memcpy_fromio(&config, vqconfig, sizeof(config));
- _vr_size = vring_size(le16_to_cpu(config.num), MIC_VIRTIO_RING_ALIGN);
- vr_size = PAGE_ALIGN(_vr_size + sizeof(struct _mic_vring_info));
- va = mic_card_map(mvdev->mdev, le64_to_cpu(config.address), vr_size);
- if (!va)
- return ERR_PTR(-ENOMEM);
- mvdev->vr[index] = va;
- memset_io(va, 0x0, _vr_size);
- vq = vring_new_virtqueue(index, le16_to_cpu(config.num),
- MIC_VIRTIO_RING_ALIGN, vdev, false,
- (void __force *)va, mic_notify, callback,
- name);
- if (!vq) {
- err = -ENOMEM;
- goto unmap;
- }
- info = va + _vr_size;
- magic = ioread32(&info->magic);
-
- if (WARN(magic != MIC_MAGIC + type + index, "magic mismatch")) {
- err = -EIO;
- goto unmap;
- }
-
- /* Allocate and reassign used ring now */
- mvdev->used_size[index] = PAGE_ALIGN(sizeof(__u16) * 3 +
- sizeof(struct vring_used_elem) *
- le16_to_cpu(config.num));
- used = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
- get_order(mvdev->used_size[index]));
- if (!used) {
- err = -ENOMEM;
- dev_err(mic_dev(mvdev), "%s %d err %d\n",
- __func__, __LINE__, err);
- goto del_vq;
- }
- iowrite64(virt_to_phys(used), &vqconfig->used_address);
-
- /*
- * To reassign the used ring here we are directly accessing
- * struct vring_virtqueue which is a private data structure
- * in virtio_ring.c. At the minimum, a BUILD_BUG_ON() in
- * vring_new_virtqueue() would ensure that
- * (&vq->vring == (struct vring *) (&vq->vq + 1));
- */
- vr = (struct vring *)(vq + 1);
- vr->used = used;
-
- vq->priv = mvdev;
- return vq;
-del_vq:
- vring_del_virtqueue(vq);
-unmap:
- mic_card_unmap(mvdev->mdev, mvdev->vr[index]);
- return ERR_PTR(err);
-}
-
-static int mic_find_vqs(struct virtio_device *vdev, unsigned nvqs,
- struct virtqueue *vqs[],
- vq_callback_t *callbacks[],
- const char * const names[])
-{
- struct mic_vdev *mvdev = to_micvdev(vdev);
- struct mic_device_ctrl __iomem *dc = mvdev->dc;
- int i, err, retry;
-
- /* We must have this many virtqueues. */
- if (nvqs > ioread8(&mvdev->desc->num_vq))
- return -ENOENT;
-
- for (i = 0; i < nvqs; ++i) {
- dev_dbg(mic_dev(mvdev), "%s: %d: %s\n",
- __func__, i, names[i]);
- vqs[i] = mic_find_vq(vdev, i, callbacks[i], names[i]);
- if (IS_ERR(vqs[i])) {
- err = PTR_ERR(vqs[i]);
- goto error;
- }
- }
-
- iowrite8(1, &dc->used_address_updated);
- /*
- * Send an interrupt to the host to inform it that used
- * rings have been re-assigned.
- */
- mic_send_intr(mvdev->mdev, mvdev->c2h_vdev_db);
- for (retry = 100; retry--;) {
- if (!ioread8(&dc->used_address_updated))
- break;
- msleep(100);
- };
-
- dev_dbg(mic_dev(mvdev), "%s: retry: %d\n", __func__, retry);
- if (!retry) {
- err = -ENODEV;
- goto error;
- }
-
- return 0;
-error:
- mic_del_vqs(vdev);
- return err;
-}
-
-/*
- * The config ops structure as defined by virtio config
- */
-static struct virtio_config_ops mic_vq_config_ops = {
- .get_features = mic_get_features,
- .finalize_features = mic_finalize_features,
- .get = mic_get,
- .set = mic_set,
- .get_status = mic_get_status,
- .set_status = mic_set_status,
- .reset = mic_reset,
- .find_vqs = mic_find_vqs,
- .del_vqs = mic_del_vqs,
-};
-
-static irqreturn_t
-mic_virtio_intr_handler(int irq, void *data)
-{
- struct mic_vdev *mvdev = data;
- struct virtqueue *vq;
-
- mic_ack_interrupt(mvdev->mdev);
- list_for_each_entry(vq, &mvdev->vdev.vqs, list)
- vring_interrupt(0, vq);
-
- return IRQ_HANDLED;
-}
-
-static void mic_virtio_release_dev(struct device *_d)
-{
- /*
- * No need for a release method similar to virtio PCI.
- * Provide an empty one to avoid getting a warning from core.
- */
-}
-
-/*
- * adds a new device and register it with virtio
- * appropriate drivers are loaded by the device model
- */
-static int mic_add_device(struct mic_device_desc __iomem *d,
- unsigned int offset, struct mic_driver *mdrv)
-{
- struct mic_vdev *mvdev;
- int ret;
- int virtio_db;
- u8 type = ioread8(&d->type);
-
- mvdev = kzalloc(sizeof(*mvdev), GFP_KERNEL);
- if (!mvdev) {
- dev_err(mdrv->dev, "Cannot allocate mic dev %u type %u\n",
- offset, type);
- return -ENOMEM;
- }
-
- mvdev->mdev = &mdrv->mdev;
- mvdev->vdev.dev.parent = mdrv->dev;
- mvdev->vdev.dev.release = mic_virtio_release_dev;
- mvdev->vdev.id.device = type;
- mvdev->vdev.config = &mic_vq_config_ops;
- mvdev->desc = d;
- mvdev->dc = (void __iomem *)d + mic_aligned_desc_size(d);
- init_completion(&mvdev->reset_done);
-
- virtio_db = mic_next_card_db();
- mvdev->virtio_cookie = mic_request_card_irq(mic_virtio_intr_handler,
- NULL, "virtio intr", mvdev, virtio_db);
- if (IS_ERR(mvdev->virtio_cookie)) {
- ret = PTR_ERR(mvdev->virtio_cookie);
- goto kfree;
- }
- iowrite8((u8)virtio_db, &mvdev->dc->h2c_vdev_db);
- mvdev->c2h_vdev_db = ioread8(&mvdev->dc->c2h_vdev_db);
-
- ret = register_virtio_device(&mvdev->vdev);
- if (ret) {
- dev_err(mic_dev(mvdev),
- "Failed to register mic device %u type %u\n",
- offset, type);
- goto free_irq;
- }
- iowrite64((u64)mvdev, &mvdev->dc->vdev);
- dev_dbg(mic_dev(mvdev), "%s: registered mic device %u type %u mvdev %p\n",
- __func__, offset, type, mvdev);
-
- return 0;
-
-free_irq:
- mic_free_card_irq(mvdev->virtio_cookie, mvdev);
-kfree:
- kfree(mvdev);
- return ret;
-}
-
-/*
- * match for a mic device with a specific desc pointer
- */
-static int mic_match_desc(struct device *dev, void *data)
-{
- struct virtio_device *vdev = dev_to_virtio(dev);
- struct mic_vdev *mvdev = to_micvdev(vdev);
-
- return mvdev->desc == (void __iomem *)data;
-}
-
-static void mic_handle_config_change(struct mic_device_desc __iomem *d,
- unsigned int offset, struct mic_driver *mdrv)
-{
- struct mic_device_ctrl __iomem *dc
- = (void __iomem *)d + mic_aligned_desc_size(d);
- struct mic_vdev *mvdev = (struct mic_vdev *)ioread64(&dc->vdev);
-
- if (ioread8(&dc->config_change) != MIC_VIRTIO_PARAM_CONFIG_CHANGED)
- return;
-
- dev_dbg(mdrv->dev, "%s %d\n", __func__, __LINE__);
- virtio_config_changed(&mvdev->vdev);
- iowrite8(1, &dc->guest_ack);
-}
-
-/*
- * removes a virtio device if a hot remove event has been
- * requested by the host.
- */
-static int mic_remove_device(struct mic_device_desc __iomem *d,
- unsigned int offset, struct mic_driver *mdrv)
-{
- struct mic_device_ctrl __iomem *dc
- = (void __iomem *)d + mic_aligned_desc_size(d);
- struct mic_vdev *mvdev = (struct mic_vdev *)ioread64(&dc->vdev);
- u8 status;
- int ret = -1;
-
- if (ioread8(&dc->config_change) == MIC_VIRTIO_PARAM_DEV_REMOVE) {
- dev_dbg(mdrv->dev,
- "%s %d config_change %d type %d mvdev %p\n",
- __func__, __LINE__,
- ioread8(&dc->config_change), ioread8(&d->type), mvdev);
-
- status = ioread8(&d->status);
- reinit_completion(&mvdev->reset_done);
- unregister_virtio_device(&mvdev->vdev);
- mic_free_card_irq(mvdev->virtio_cookie, mvdev);
- if (status & VIRTIO_CONFIG_S_DRIVER_OK)
- wait_for_completion(&mvdev->reset_done);
- kfree(mvdev);
- iowrite8(1, &dc->guest_ack);
- dev_dbg(mdrv->dev, "%s %d guest_ack %d\n",
- __func__, __LINE__, ioread8(&dc->guest_ack));
- ret = 0;
- }
-
- return ret;
-}
-
-#define REMOVE_DEVICES true
-
-static void mic_scan_devices(struct mic_driver *mdrv, bool remove)
-{
- s8 type;
- unsigned int i;
- struct mic_device_desc __iomem *d;
- struct mic_device_ctrl __iomem *dc;
- struct device *dev;
- int ret;
-
- for (i = sizeof(struct mic_bootparam); i < MIC_DP_SIZE;
- i += mic_total_desc_size(d)) {
- d = mdrv->dp + i;
- dc = (void __iomem *)d + mic_aligned_desc_size(d);
- /*
- * This read barrier is paired with the corresponding write
- * barrier on the host which is inserted before adding or
- * removing a virtio device descriptor, by updating the type.
- */
- rmb();
- type = ioread8(&d->type);
-
- /* end of list */
- if (type == 0)
- break;
-
- if (type == -1)
- continue;
-
- /* device already exists */
- dev = device_find_child(mdrv->dev, (void __force *)d,
- mic_match_desc);
- if (dev) {
- if (remove)
- iowrite8(MIC_VIRTIO_PARAM_DEV_REMOVE,
- &dc->config_change);
- put_device(dev);
- mic_handle_config_change(d, i, mdrv);
- ret = mic_remove_device(d, i, mdrv);
- if (!ret && !remove)
- iowrite8(-1, &d->type);
- if (remove) {
- iowrite8(0, &dc->config_change);
- iowrite8(0, &dc->guest_ack);
- }
- continue;
- }
-
- /* new device */
- dev_dbg(mdrv->dev, "%s %d Adding new virtio device %p\n",
- __func__, __LINE__, d);
- if (!remove)
- mic_add_device(d, i, mdrv);
- }
-}
-
-/*
- * mic_hotplug_device tries to find changes in the device page.
- */
-static void mic_hotplug_devices(struct work_struct *work)
-{
- struct mic_driver *mdrv = container_of(work,
- struct mic_driver, hotplug_work);
-
- mic_scan_devices(mdrv, !REMOVE_DEVICES);
-}
-
-/*
- * Interrupt handler for hot plug/config changes etc.
- */
-static irqreturn_t
-mic_extint_handler(int irq, void *data)
-{
- struct mic_driver *mdrv = (struct mic_driver *)data;
-
- dev_dbg(mdrv->dev, "%s %d hotplug work\n",
- __func__, __LINE__);
- mic_ack_interrupt(&mdrv->mdev);
- schedule_work(&mdrv->hotplug_work);
- return IRQ_HANDLED;
-}
-
-/*
- * Init function for virtio
- */
-int mic_devices_init(struct mic_driver *mdrv)
-{
- int rc;
- struct mic_bootparam __iomem *bootparam;
- int config_db;
-
- INIT_WORK(&mdrv->hotplug_work, mic_hotplug_devices);
- mic_scan_devices(mdrv, !REMOVE_DEVICES);
-
- config_db = mic_next_card_db();
- virtio_config_cookie = mic_request_card_irq(mic_extint_handler, NULL,
- "virtio_config_intr", mdrv,
- config_db);
- if (IS_ERR(virtio_config_cookie)) {
- rc = PTR_ERR(virtio_config_cookie);
- goto exit;
- }
-
- bootparam = mdrv->dp;
- iowrite8(config_db, &bootparam->h2c_config_db);
- return 0;
-exit:
- return rc;
-}
-
-/*
- * Uninit function for virtio
- */
-void mic_devices_uninit(struct mic_driver *mdrv)
-{
- struct mic_bootparam __iomem *bootparam = mdrv->dp;
- iowrite8(-1, &bootparam->h2c_config_db);
- mic_free_card_irq(virtio_config_cookie, mdrv);
- flush_work(&mdrv->hotplug_work);
- mic_scan_devices(mdrv, REMOVE_DEVICES);
-}
diff --git a/drivers/misc/mic/card/mic_virtio.h b/drivers/misc/mic/card/mic_virtio.h
deleted file mode 100644
index d0407ba53bb7..000000000000
--- a/drivers/misc/mic/card/mic_virtio.h
+++ /dev/null
@@ -1,76 +0,0 @@
-/*
- * Intel MIC Platform Software Stack (MPSS)
- *
- * Copyright(c) 2013 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, version 2, as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Disclaimer: The codes contained in these modules may be specific to
- * the Intel Software Development Platform codenamed: Knights Ferry, and
- * the Intel product codenamed: Knights Corner, and are not backward
- * compatible with other Intel products. Additionally, Intel will NOT
- * support the codes or instruction set in future products.
- *
- * Intel MIC Card driver.
- *
- */
-#ifndef __MIC_CARD_VIRTIO_H
-#define __MIC_CARD_VIRTIO_H
-
-#include <linux/mic_common.h>
-#include "mic_device.h"
-
-/*
- * 64 bit I/O access
- */
-#ifndef ioread64
-#define ioread64 readq
-#endif
-#ifndef iowrite64
-#define iowrite64 writeq
-#endif
-
-static inline unsigned mic_desc_size(struct mic_device_desc __iomem *desc)
-{
- return sizeof(*desc)
- + ioread8(&desc->num_vq) * sizeof(struct mic_vqconfig)
- + ioread8(&desc->feature_len) * 2
- + ioread8(&desc->config_len);
-}
-
-static inline struct mic_vqconfig __iomem *
-mic_vq_config(struct mic_device_desc __iomem *desc)
-{
- return (struct mic_vqconfig __iomem *)(desc + 1);
-}
-
-static inline __u8 __iomem *
-mic_vq_features(struct mic_device_desc __iomem *desc)
-{
- return (__u8 __iomem *)(mic_vq_config(desc) + ioread8(&desc->num_vq));
-}
-
-static inline __u8 __iomem *
-mic_vq_configspace(struct mic_device_desc __iomem *desc)
-{
- return mic_vq_features(desc) + ioread8(&desc->feature_len) * 2;
-}
-static inline unsigned mic_total_desc_size(struct mic_device_desc __iomem *desc)
-{
- return mic_aligned_desc_size(desc) + sizeof(struct mic_device_ctrl);
-}
-
-int mic_devices_init(struct mic_driver *mdrv);
-void mic_devices_uninit(struct mic_driver *mdrv);
-
-#endif
diff --git a/drivers/misc/mic/card/mic_x100.c b/drivers/misc/mic/card/mic_x100.c
index b2958ce2368c..b9f0710ffa6b 100644
--- a/drivers/misc/mic/card/mic_x100.c
+++ b/drivers/misc/mic/card/mic_x100.c
@@ -326,6 +326,7 @@ static int __init mic_init(void)
goto done;
}
+ request_module("mic_x100_dma");
mic_init_card_debugfs();
ret = platform_device_register(&mic_platform_dev);
if (ret) {
diff --git a/drivers/misc/mic/cosm/cosm_main.c b/drivers/misc/mic/cosm/cosm_main.c
index 4b4b356c797d..7005cb1e01d2 100644
--- a/drivers/misc/mic/cosm/cosm_main.c
+++ b/drivers/misc/mic/cosm/cosm_main.c
@@ -153,8 +153,10 @@ void cosm_stop(struct cosm_device *cdev, bool force)
* stop(..) calls device_unregister and will crash the system if
* called multiple times.
*/
- bool call_hw_ops = cdev->state != MIC_RESET_FAILED &&
- cdev->state != MIC_READY;
+ u8 state = cdev->state == MIC_RESETTING ?
+ cdev->prev_state : cdev->state;
+ bool call_hw_ops = state != MIC_RESET_FAILED &&
+ state != MIC_READY;
if (cdev->state != MIC_RESETTING)
cosm_set_state(cdev, MIC_RESETTING);
@@ -195,8 +197,11 @@ int cosm_reset(struct cosm_device *cdev)
mutex_lock(&cdev->cosm_mutex);
if (cdev->state != MIC_READY) {
- cosm_set_state(cdev, MIC_RESETTING);
- schedule_work(&cdev->reset_trigger_work);
+ if (cdev->state != MIC_RESETTING) {
+ cdev->prev_state = cdev->state;
+ cosm_set_state(cdev, MIC_RESETTING);
+ schedule_work(&cdev->reset_trigger_work);
+ }
} else {
dev_err(&cdev->dev, "%s %d MIC is READY\n", __func__, __LINE__);
rc = -EINVAL;
diff --git a/drivers/misc/mic/host/Makefile b/drivers/misc/mic/host/Makefile
index 004d3db0f990..f3b502333ded 100644
--- a/drivers/misc/mic/host/Makefile
+++ b/drivers/misc/mic/host/Makefile
@@ -9,5 +9,3 @@ mic_host-objs += mic_smpt.o
mic_host-objs += mic_intr.o
mic_host-objs += mic_boot.o
mic_host-objs += mic_debugfs.o
-mic_host-objs += mic_fops.o
-mic_host-objs += mic_virtio.o
diff --git a/drivers/misc/mic/host/mic_boot.c b/drivers/misc/mic/host/mic_boot.c
index 7845564dff64..8c91c9950b54 100644
--- a/drivers/misc/mic/host/mic_boot.c
+++ b/drivers/misc/mic/host/mic_boot.c
@@ -25,10 +25,117 @@
#include <linux/mic_common.h>
#include <linux/mic_bus.h>
#include "../bus/scif_bus.h"
+#include "../bus/vop_bus.h"
#include "../common/mic_dev.h"
#include "mic_device.h"
#include "mic_smpt.h"
-#include "mic_virtio.h"
+
+static inline struct mic_device *vpdev_to_mdev(struct device *dev)
+{
+ return dev_get_drvdata(dev->parent);
+}
+
+static dma_addr_t
+_mic_dma_map_page(struct device *dev, struct page *page,
+ unsigned long offset, size_t size,
+ enum dma_data_direction dir, struct dma_attrs *attrs)
+{
+ void *va = phys_to_virt(page_to_phys(page)) + offset;
+ struct mic_device *mdev = vpdev_to_mdev(dev);
+
+ return mic_map_single(mdev, va, size);
+}
+
+static void _mic_dma_unmap_page(struct device *dev, dma_addr_t dma_addr,
+ size_t size, enum dma_data_direction dir,
+ struct dma_attrs *attrs)
+{
+ struct mic_device *mdev = vpdev_to_mdev(dev);
+
+ mic_unmap_single(mdev, dma_addr, size);
+}
+
+static const struct dma_map_ops _mic_dma_ops = {
+ .map_page = _mic_dma_map_page,
+ .unmap_page = _mic_dma_unmap_page,
+};
+
+static struct mic_irq *
+__mic_request_irq(struct vop_device *vpdev,
+ irqreturn_t (*func)(int irq, void *data),
+ const char *name, void *data, int intr_src)
+{
+ struct mic_device *mdev = vpdev_to_mdev(&vpdev->dev);
+
+ return mic_request_threaded_irq(mdev, func, NULL, name, data,
+ intr_src, MIC_INTR_DB);
+}
+
+static void __mic_free_irq(struct vop_device *vpdev,
+ struct mic_irq *cookie, void *data)
+{
+ struct mic_device *mdev = vpdev_to_mdev(&vpdev->dev);
+
+ return mic_free_irq(mdev, cookie, data);
+}
+
+static void __mic_ack_interrupt(struct vop_device *vpdev, int num)
+{
+ struct mic_device *mdev = vpdev_to_mdev(&vpdev->dev);
+
+ mdev->ops->intr_workarounds(mdev);
+}
+
+static int __mic_next_db(struct vop_device *vpdev)
+{
+ struct mic_device *mdev = vpdev_to_mdev(&vpdev->dev);
+
+ return mic_next_db(mdev);
+}
+
+static void *__mic_get_dp(struct vop_device *vpdev)
+{
+ struct mic_device *mdev = vpdev_to_mdev(&vpdev->dev);
+
+ return mdev->dp;
+}
+
+static void __iomem *__mic_get_remote_dp(struct vop_device *vpdev)
+{
+ return NULL;
+}
+
+static void __mic_send_intr(struct vop_device *vpdev, int db)
+{
+ struct mic_device *mdev = vpdev_to_mdev(&vpdev->dev);
+
+ mdev->ops->send_intr(mdev, db);
+}
+
+static void __iomem *__mic_ioremap(struct vop_device *vpdev,
+ dma_addr_t pa, size_t len)
+{
+ struct mic_device *mdev = vpdev_to_mdev(&vpdev->dev);
+
+ return mdev->aper.va + pa;
+}
+
+static void __mic_iounmap(struct vop_device *vpdev, void __iomem *va)
+{
+ /* nothing to do */
+}
+
+static struct vop_hw_ops vop_hw_ops = {
+ .request_irq = __mic_request_irq,
+ .free_irq = __mic_free_irq,
+ .ack_interrupt = __mic_ack_interrupt,
+ .next_db = __mic_next_db,
+ .get_dp = __mic_get_dp,
+ .get_remote_dp = __mic_get_remote_dp,
+ .send_intr = __mic_send_intr,
+ .ioremap = __mic_ioremap,
+ .iounmap = __mic_iounmap,
+};
static inline struct mic_device *scdev_to_mdev(struct scif_hw_dev *scdev)
{
@@ -315,7 +422,6 @@ static int mic_request_dma_chans(struct mic_device *mdev)
dma_cap_mask_t mask;
struct dma_chan *chan;
- request_module("mic_x100_dma");
dma_cap_zero(mask);
dma_cap_set(DMA_MEMCPY, mask);
@@ -387,9 +493,18 @@ static int _mic_start(struct cosm_device *cdev, int id)
goto dma_free;
}
+ mdev->vpdev = vop_register_device(&mdev->pdev->dev,
+ VOP_DEV_TRNSP, &_mic_dma_ops,
+ &vop_hw_ops, id + 1, &mdev->aper,
+ mdev->dma_ch[0]);
+ if (IS_ERR(mdev->vpdev)) {
+ rc = PTR_ERR(mdev->vpdev);
+ goto scif_remove;
+ }
+
rc = mdev->ops->load_mic_fw(mdev, NULL);
if (rc)
- goto scif_remove;
+ goto vop_remove;
mic_smpt_restore(mdev);
mic_intr_restore(mdev);
mdev->intr_ops->enable_interrupts(mdev);
@@ -397,6 +512,8 @@ static int _mic_start(struct cosm_device *cdev, int id)
mdev->ops->write_spad(mdev, MIC_DPHI_SPAD, mdev->dp_dma_addr >> 32);
mdev->ops->send_firmware_intr(mdev);
goto unlock_ret;
+vop_remove:
+ vop_unregister_device(mdev->vpdev);
scif_remove:
scif_unregister_device(mdev->scdev);
dma_free:
@@ -423,7 +540,7 @@ static void _mic_stop(struct cosm_device *cdev, bool force)
* will be the first to be registered and the last to be
* unregistered.
*/
- mic_virtio_reset_devices(mdev);
+ vop_unregister_device(mdev->vpdev);
scif_unregister_device(mdev->scdev);
mic_free_dma_chans(mdev);
mbus_unregister_device(mdev->dma_mbdev);
diff --git a/drivers/misc/mic/host/mic_debugfs.c b/drivers/misc/mic/host/mic_debugfs.c
index 10581600777a..0a9daba8bb5d 100644
--- a/drivers/misc/mic/host/mic_debugfs.c
+++ b/drivers/misc/mic/host/mic_debugfs.c
@@ -26,7 +26,6 @@
#include "../common/mic_dev.h"
#include "mic_device.h"
#include "mic_smpt.h"
-#include "mic_virtio.h"
/* Debugfs parent dir */
static struct dentry *mic_dbg;
@@ -100,190 +99,6 @@ static const struct file_operations post_code_ops = {
.release = mic_post_code_debug_release
};
-static int mic_dp_show(struct seq_file *s, void *pos)
-{
- struct mic_device *mdev = s->private;
- struct mic_device_desc *d;
- struct mic_device_ctrl *dc;
- struct mic_vqconfig *vqconfig;
- __u32 *features;
- __u8 *config;
- struct mic_bootparam *bootparam = mdev->dp;
- int i, j;
-
- seq_printf(s, "Bootparam: magic 0x%x\n",
- bootparam->magic);
- seq_printf(s, "Bootparam: h2c_config_db %d\n",
- bootparam->h2c_config_db);
- seq_printf(s, "Bootparam: node_id %d\n",
- bootparam->node_id);
- seq_printf(s, "Bootparam: c2h_scif_db %d\n",
- bootparam->c2h_scif_db);
- seq_printf(s, "Bootparam: h2c_scif_db %d\n",
- bootparam->h2c_scif_db);
- seq_printf(s, "Bootparam: scif_host_dma_addr 0x%llx\n",
- bootparam->scif_host_dma_addr);
- seq_printf(s, "Bootparam: scif_card_dma_addr 0x%llx\n",
- bootparam->scif_card_dma_addr);
-
-
- for (i = sizeof(*bootparam); i < MIC_DP_SIZE;
- i += mic_total_desc_size(d)) {
- d = mdev->dp + i;
- dc = (void *)d + mic_aligned_desc_size(d);
-
- /* end of list */
- if (d->type == 0)
- break;
-
- if (d->type == -1)
- continue;
-
- seq_printf(s, "Type %d ", d->type);
- seq_printf(s, "Num VQ %d ", d->num_vq);
- seq_printf(s, "Feature Len %d\n", d->feature_len);
- seq_printf(s, "Config Len %d ", d->config_len);
- seq_printf(s, "Shutdown Status %d\n", d->status);
-
- for (j = 0; j < d->num_vq; j++) {
- vqconfig = mic_vq_config(d) + j;
- seq_printf(s, "vqconfig[%d]: ", j);
- seq_printf(s, "address 0x%llx ", vqconfig->address);
- seq_printf(s, "num %d ", vqconfig->num);
- seq_printf(s, "used address 0x%llx\n",
- vqconfig->used_address);
- }
-
- features = (__u32 *)mic_vq_features(d);
- seq_printf(s, "Features: Host 0x%x ", features[0]);
- seq_printf(s, "Guest 0x%x\n", features[1]);
-
- config = mic_vq_configspace(d);
- for (j = 0; j < d->config_len; j++)
- seq_printf(s, "config[%d]=%d\n", j, config[j]);
-
- seq_puts(s, "Device control:\n");
- seq_printf(s, "Config Change %d ", dc->config_change);
- seq_printf(s, "Vdev reset %d\n", dc->vdev_reset);
- seq_printf(s, "Guest Ack %d ", dc->guest_ack);
- seq_printf(s, "Host ack %d\n", dc->host_ack);
- seq_printf(s, "Used address updated %d ",
- dc->used_address_updated);
- seq_printf(s, "Vdev 0x%llx\n", dc->vdev);
- seq_printf(s, "c2h doorbell %d ", dc->c2h_vdev_db);
- seq_printf(s, "h2c doorbell %d\n", dc->h2c_vdev_db);
- }
-
- return 0;
-}
-
-static int mic_dp_debug_open(struct inode *inode, struct file *file)
-{
- return single_open(file, mic_dp_show, inode->i_private);
-}
-
-static int mic_dp_debug_release(struct inode *inode, struct file *file)
-{
- return single_release(inode, file);
-}
-
-static const struct file_operations dp_ops = {
- .owner = THIS_MODULE,
- .open = mic_dp_debug_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = mic_dp_debug_release
-};
-
-static int mic_vdev_info_show(struct seq_file *s, void *unused)
-{
- struct mic_device *mdev = s->private;
- struct list_head *pos, *tmp;
- struct mic_vdev *mvdev;
- int i, j;
-
- mutex_lock(&mdev->mic_mutex);
- list_for_each_safe(pos, tmp, &mdev->vdev_list) {
- mvdev = list_entry(pos, struct mic_vdev, list);
- seq_printf(s, "VDEV type %d state %s in %ld out %ld\n",
- mvdev->virtio_id,
- mic_vdevup(mvdev) ? "UP" : "DOWN",
- mvdev->in_bytes,
- mvdev->out_bytes);
- for (i = 0; i < MIC_MAX_VRINGS; i++) {
- struct vring_desc *desc;
- struct vring_avail *avail;
- struct vring_used *used;
- struct mic_vringh *mvr = &mvdev->mvr[i];
- struct vringh *vrh = &mvr->vrh;
- int num = vrh->vring.num;
- if (!num)
- continue;
- desc = vrh->vring.desc;
- seq_printf(s, "vring i %d avail_idx %d",
- i, mvr->vring.info->avail_idx & (num - 1));
- seq_printf(s, " vring i %d avail_idx %d\n",
- i, mvr->vring.info->avail_idx);
- seq_printf(s, "vrh i %d weak_barriers %d",
- i, vrh->weak_barriers);
- seq_printf(s, " last_avail_idx %d last_used_idx %d",
- vrh->last_avail_idx, vrh->last_used_idx);
- seq_printf(s, " completed %d\n", vrh->completed);
- for (j = 0; j < num; j++) {
- seq_printf(s, "desc[%d] addr 0x%llx len %d",
- j, desc->addr, desc->len);
- seq_printf(s, " flags 0x%x next %d\n",
- desc->flags, desc->next);
- desc++;
- }
- avail = vrh->vring.avail;
- seq_printf(s, "avail flags 0x%x idx %d\n",
- vringh16_to_cpu(vrh, avail->flags),
- vringh16_to_cpu(vrh, avail->idx) & (num - 1));
- seq_printf(s, "avail flags 0x%x idx %d\n",
- vringh16_to_cpu(vrh, avail->flags),
- vringh16_to_cpu(vrh, avail->idx));
- for (j = 0; j < num; j++)
- seq_printf(s, "avail ring[%d] %d\n",
- j, avail->ring[j]);
- used = vrh->vring.used;
- seq_printf(s, "used flags 0x%x idx %d\n",
- vringh16_to_cpu(vrh, used->flags),
- vringh16_to_cpu(vrh, used->idx) & (num - 1));
- seq_printf(s, "used flags 0x%x idx %d\n",
- vringh16_to_cpu(vrh, used->flags),
- vringh16_to_cpu(vrh, used->idx));
- for (j = 0; j < num; j++)
- seq_printf(s, "used ring[%d] id %d len %d\n",
- j, vringh32_to_cpu(vrh,
- used->ring[j].id),
- vringh32_to_cpu(vrh,
- used->ring[j].len));
- }
- }
- mutex_unlock(&mdev->mic_mutex);
-
- return 0;
-}
-
-static int mic_vdev_info_debug_open(struct inode *inode, struct file *file)
-{
- return single_open(file, mic_vdev_info_show, inode->i_private);
-}
-
-static int mic_vdev_info_debug_release(struct inode *inode, struct file *file)
-{
- return single_release(inode, file);
-}
-
-static const struct file_operations vdev_info_ops = {
- .owner = THIS_MODULE,
- .open = mic_vdev_info_debug_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = mic_vdev_info_debug_release
-};
-
static int mic_msi_irq_info_show(struct seq_file *s, void *pos)
{
struct mic_device *mdev = s->private;
@@ -367,11 +182,6 @@ void mic_create_debug_dir(struct mic_device *mdev)
debugfs_create_file("post_code", 0444, mdev->dbg_dir, mdev,
&post_code_ops);
- debugfs_create_file("dp", 0444, mdev->dbg_dir, mdev, &dp_ops);
-
- debugfs_create_file("vdev_info", 0444, mdev->dbg_dir, mdev,
- &vdev_info_ops);
-
debugfs_create_file("msi_irq_info", 0444, mdev->dbg_dir, mdev,
&msi_irq_info_ops);
}
diff --git a/drivers/misc/mic/host/mic_device.h b/drivers/misc/mic/host/mic_device.h
index 461184a12fbb..52b12b22f4ae 100644
--- a/drivers/misc/mic/host/mic_device.h
+++ b/drivers/misc/mic/host/mic_device.h
@@ -29,6 +29,7 @@
#include <linux/miscdevice.h>
#include <linux/mic_bus.h>
#include "../bus/scif_bus.h"
+#include "../bus/vop_bus.h"
#include "../bus/cosm_bus.h"
#include "mic_intr.h"
@@ -64,13 +65,11 @@ extern struct cosm_hw_ops cosm_hw_ops;
* @bootaddr: MIC boot address.
* @dp: virtio device page
* @dp_dma_addr: virtio device page DMA address.
- * @name: name for the misc char device
- * @miscdev: registered misc char device
- * @vdev_list: list of virtio devices.
* @dma_mbdev: MIC BUS DMA device.
* @dma_ch - Array of DMA channels
* @num_dma_ch - Number of DMA channels available
* @scdev: SCIF device on the SCIF virtual bus.
+ * @vpdev: Virtio over PCIe device on the VOP virtual bus.
* @cosm_dev: COSM device
*/
struct mic_device {
@@ -91,13 +90,11 @@ struct mic_device {
u32 bootaddr;
void *dp;
dma_addr_t dp_dma_addr;
- char name[16];
- struct miscdevice miscdev;
- struct list_head vdev_list;
struct mbus_device *dma_mbdev;
struct dma_chan *dma_ch[MIC_MAX_DMA_CHAN];
int num_dma_ch;
struct scif_hw_dev *scdev;
+ struct vop_device *vpdev;
struct cosm_device *cosm_dev;
};
diff --git a/drivers/misc/mic/host/mic_fops.c b/drivers/misc/mic/host/mic_fops.c
deleted file mode 100644
index 8cc1d90cd949..000000000000
--- a/drivers/misc/mic/host/mic_fops.c
+++ /dev/null
@@ -1,222 +0,0 @@
-/*
- * Intel MIC Platform Software Stack (MPSS)
- *
- * Copyright(c) 2013 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, version 2, as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Intel MIC Host driver.
- *
- */
-#include <linux/poll.h>
-#include <linux/pci.h>
-
-#include <linux/mic_common.h>
-#include "../common/mic_dev.h"
-#include "mic_device.h"
-#include "mic_fops.h"
-#include "mic_virtio.h"
-
-int mic_open(struct inode *inode, struct file *f)
-{
- struct mic_vdev *mvdev;
- struct mic_device *mdev = container_of(f->private_data,
- struct mic_device, miscdev);
-
- mvdev = kzalloc(sizeof(*mvdev), GFP_KERNEL);
- if (!mvdev)
- return -ENOMEM;
-
- init_waitqueue_head(&mvdev->waitq);
- INIT_LIST_HEAD(&mvdev->list);
- mvdev->mdev = mdev;
- mvdev->virtio_id = -1;
-
- f->private_data = mvdev;
- return 0;
-}
-
-int mic_release(struct inode *inode, struct file *f)
-{
- struct mic_vdev *mvdev = (struct mic_vdev *)f->private_data;
-
- if (-1 != mvdev->virtio_id)
- mic_virtio_del_device(mvdev);
- f->private_data = NULL;
- kfree(mvdev);
- return 0;
-}
-
-long mic_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
-{
- struct mic_vdev *mvdev = (struct mic_vdev *)f->private_data;
- void __user *argp = (void __user *)arg;
- int ret;
-
- switch (cmd) {
- case MIC_VIRTIO_ADD_DEVICE:
- {
- ret = mic_virtio_add_device(mvdev, argp);
- if (ret < 0) {
- dev_err(mic_dev(mvdev),
- "%s %d errno ret %d\n",
- __func__, __LINE__, ret);
- return ret;
- }
- break;
- }
- case MIC_VIRTIO_COPY_DESC:
- {
- struct mic_copy_desc copy;
-
- ret = mic_vdev_inited(mvdev);
- if (ret)
- return ret;
-
- if (copy_from_user(&copy, argp, sizeof(copy)))
- return -EFAULT;
-
- dev_dbg(mic_dev(mvdev),
- "%s %d === iovcnt 0x%x vr_idx 0x%x update_used %d\n",
- __func__, __LINE__, copy.iovcnt, copy.vr_idx,
- copy.update_used);
-
- ret = mic_virtio_copy_desc(mvdev, &copy);
- if (ret < 0) {
- dev_err(mic_dev(mvdev),
- "%s %d errno ret %d\n",
- __func__, __LINE__, ret);
- return ret;
- }
- if (copy_to_user(
- &((struct mic_copy_desc __user *)argp)->out_len,
- &copy.out_len, sizeof(copy.out_len))) {
- dev_err(mic_dev(mvdev), "%s %d errno ret %d\n",
- __func__, __LINE__, -EFAULT);
- return -EFAULT;
- }
- break;
- }
- case MIC_VIRTIO_CONFIG_CHANGE:
- {
- ret = mic_vdev_inited(mvdev);
- if (ret)
- return ret;
-
- ret = mic_virtio_config_change(mvdev, argp);
- if (ret < 0) {
- dev_err(mic_dev(mvdev),
- "%s %d errno ret %d\n",
- __func__, __LINE__, ret);
- return ret;
- }
- break;
- }
- default:
- return -ENOIOCTLCMD;
- };
- return 0;
-}
-
-/*
- * We return POLLIN | POLLOUT from poll when new buffers are enqueued, and
- * not when previously enqueued buffers may be available. This means that
- * in the card->host (TX) path, when userspace is unblocked by poll it
- * must drain all available descriptors or it can stall.
- */
-unsigned int mic_poll(struct file *f, poll_table *wait)
-{
- struct mic_vdev *mvdev = (struct mic_vdev *)f->private_data;
- int mask = 0;
-
- poll_wait(f, &mvdev->waitq, wait);
-
- if (mic_vdev_inited(mvdev)) {
- mask = POLLERR;
- } else if (mvdev->poll_wake) {
- mvdev->poll_wake = 0;
- mask = POLLIN | POLLOUT;
- }
-
- return mask;
-}
-
-static inline int
-mic_query_offset(struct mic_vdev *mvdev, unsigned long offset,
- unsigned long *size, unsigned long *pa)
-{
- struct mic_device *mdev = mvdev->mdev;
- unsigned long start = MIC_DP_SIZE;
- int i;
-
- /*
- * MMAP interface is as follows:
- * offset region
- * 0x0 virtio device_page
- * 0x1000 first vring
- * 0x1000 + size of 1st vring second vring
- * ....
- */
- if (!offset) {
- *pa = virt_to_phys(mdev->dp);
- *size = MIC_DP_SIZE;
- return 0;
- }
-
- for (i = 0; i < mvdev->dd->num_vq; i++) {
- struct mic_vringh *mvr = &mvdev->mvr[i];
- if (offset == start) {
- *pa = virt_to_phys(mvr->vring.va);
- *size = mvr->vring.len;
- return 0;
- }
- start += mvr->vring.len;
- }
- return -1;
-}
-
-/*
- * Maps the device page and virtio rings to user space for readonly access.
- */
-int
-mic_mmap(struct file *f, struct vm_area_struct *vma)
-{
- struct mic_vdev *mvdev = (struct mic_vdev *)f->private_data;
- unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
- unsigned long pa, size = vma->vm_end - vma->vm_start, size_rem = size;
- int i, err;
-
- err = mic_vdev_inited(mvdev);
- if (err)
- return err;
-
- if (vma->vm_flags & VM_WRITE)
- return -EACCES;
-
- while (size_rem) {
- i = mic_query_offset(mvdev, offset, &size, &pa);
- if (i < 0)
- return -EINVAL;
- err = remap_pfn_range(vma, vma->vm_start + offset,
- pa >> PAGE_SHIFT, size, vma->vm_page_prot);
- if (err)
- return err;
- dev_dbg(mic_dev(mvdev),
- "%s %d type %d size 0x%lx off 0x%lx pa 0x%lx vma 0x%lx\n",
- __func__, __LINE__, mvdev->virtio_id, size, offset,
- pa, vma->vm_start + offset);
- size_rem -= size;
- offset += size;
- }
- return 0;
-}
diff --git a/drivers/misc/mic/host/mic_fops.h b/drivers/misc/mic/host/mic_fops.h
deleted file mode 100644
index dc3893dff667..000000000000
--- a/drivers/misc/mic/host/mic_fops.h
+++ /dev/null
@@ -1,32 +0,0 @@
-/*
- * Intel MIC Platform Software Stack (MPSS)
- *
- * Copyright(c) 2013 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, version 2, as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Intel MIC Host driver.
- *
- */
-#ifndef _MIC_FOPS_H_
-#define _MIC_FOPS_H_
-
-int mic_open(struct inode *inode, struct file *filp);
-int mic_release(struct inode *inode, struct file *filp);
-ssize_t mic_read(struct file *filp, char __user *buf,
- size_t count, loff_t *pos);
-long mic_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
-int mic_mmap(struct file *f, struct vm_area_struct *vma);
-unsigned int mic_poll(struct file *f, poll_table *wait);
-
-#endif
diff --git a/drivers/misc/mic/host/mic_main.c b/drivers/misc/mic/host/mic_main.c
index 153894e7ed5b..035be3e9ceba 100644
--- a/drivers/misc/mic/host/mic_main.c
+++ b/drivers/misc/mic/host/mic_main.c
@@ -27,8 +27,6 @@
#include "mic_device.h"
#include "mic_x100.h"
#include "mic_smpt.h"
-#include "mic_fops.h"
-#include "mic_virtio.h"
static const char mic_driver_name[] = "mic";
@@ -57,17 +55,6 @@ MODULE_DEVICE_TABLE(pci, mic_pci_tbl);
/* ID allocator for MIC devices */
static struct ida g_mic_ida;
-/* Base device node number for MIC devices */
-static dev_t g_mic_devno;
-
-static const struct file_operations mic_fops = {
- .open = mic_open,
- .release = mic_release,
- .unlocked_ioctl = mic_ioctl,
- .poll = mic_poll,
- .mmap = mic_mmap,
- .owner = THIS_MODULE,
-};
/* Initialize the device page */
static int mic_dp_init(struct mic_device *mdev)
@@ -169,7 +156,6 @@ mic_device_init(struct mic_device *mdev, struct pci_dev *pdev)
mic_ops_init(mdev);
mutex_init(&mdev->mic_mutex);
mdev->irq_info.next_avail_src = 0;
- INIT_LIST_HEAD(&mdev->vdev_list);
}
/**
@@ -259,30 +245,15 @@ static int mic_probe(struct pci_dev *pdev,
goto smpt_uninit;
}
mic_bootparam_init(mdev);
-
mic_create_debug_dir(mdev);
- mdev->miscdev.minor = MISC_DYNAMIC_MINOR;
- snprintf(mdev->name, sizeof(mdev->name), "mic%d", mdev->id);
- mdev->miscdev.name = mdev->name;
- mdev->miscdev.fops = &mic_fops;
- mdev->miscdev.parent = &mdev->pdev->dev;
- rc = misc_register(&mdev->miscdev);
- if (rc) {
- dev_err(&pdev->dev, "misc_register err id %d rc %d\n",
- mdev->id, rc);
- goto cleanup_debug_dir;
- }
-
mdev->cosm_dev = cosm_register_device(&mdev->pdev->dev, &cosm_hw_ops);
if (IS_ERR(mdev->cosm_dev)) {
rc = PTR_ERR(mdev->cosm_dev);
dev_err(&pdev->dev, "cosm_add_device failed rc %d\n", rc);
- goto misc_dereg;
+ goto cleanup_debug_dir;
}
return 0;
-misc_dereg:
- misc_deregister(&mdev->miscdev);
cleanup_debug_dir:
mic_delete_debug_dir(mdev);
mic_dp_uninit(mdev);
@@ -323,7 +294,6 @@ static void mic_remove(struct pci_dev *pdev)
return;
cosm_unregister_device(mdev->cosm_dev);
- misc_deregister(&mdev->miscdev);
mic_delete_debug_dir(mdev);
mic_dp_uninit(mdev);
mic_smpt_uninit(mdev);
@@ -347,26 +317,18 @@ static int __init mic_init(void)
{
int ret;
- ret = alloc_chrdev_region(&g_mic_devno, 0,
- MIC_MAX_NUM_DEVS, mic_driver_name);
- if (ret) {
- pr_err("alloc_chrdev_region failed ret %d\n", ret);
- goto error;
- }
-
+ request_module("mic_x100_dma");
mic_init_debugfs();
ida_init(&g_mic_ida);
ret = pci_register_driver(&mic_driver);
if (ret) {
pr_err("pci_register_driver failed ret %d\n", ret);
- goto cleanup_chrdev;
+ goto cleanup_debugfs;
}
- return ret;
-cleanup_chrdev:
+ return 0;
+cleanup_debugfs:
ida_destroy(&g_mic_ida);
mic_exit_debugfs();
- unregister_chrdev_region(g_mic_devno, MIC_MAX_NUM_DEVS);
-error:
return ret;
}
@@ -375,7 +337,6 @@ static void __exit mic_exit(void)
pci_unregister_driver(&mic_driver);
ida_destroy(&g_mic_ida);
mic_exit_debugfs();
- unregister_chrdev_region(g_mic_devno, MIC_MAX_NUM_DEVS);
}
module_init(mic_init);
diff --git a/drivers/misc/mic/host/mic_virtio.c b/drivers/misc/mic/host/mic_virtio.c
deleted file mode 100644
index 58b107a24a8b..000000000000
--- a/drivers/misc/mic/host/mic_virtio.c
+++ /dev/null
@@ -1,811 +0,0 @@
-/*
- * Intel MIC Platform Software Stack (MPSS)
- *
- * Copyright(c) 2013 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, version 2, as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Intel MIC Host driver.
- *
- */
-#include <linux/pci.h>
-#include <linux/sched.h>
-#include <linux/uaccess.h>
-#include <linux/dmaengine.h>
-#include <linux/mic_common.h>
-#include "../common/mic_dev.h"
-#include "mic_device.h"
-#include "mic_smpt.h"
-#include "mic_virtio.h"
-
-/*
- * Size of the internal buffer used during DMA's as an intermediate buffer
- * for copy to/from user.
- */
-#define MIC_INT_DMA_BUF_SIZE PAGE_ALIGN(64 * 1024ULL)
-
-static int mic_sync_dma(struct mic_device *mdev, dma_addr_t dst,
- dma_addr_t src, size_t len)
-{
- int err = 0;
- struct dma_async_tx_descriptor *tx;
- struct dma_chan *mic_ch = mdev->dma_ch[0];
-
- if (!mic_ch) {
- err = -EBUSY;
- goto error;
- }
-
- tx = mic_ch->device->device_prep_dma_memcpy(mic_ch, dst, src, len,
- DMA_PREP_FENCE);
- if (!tx) {
- err = -ENOMEM;
- goto error;
- } else {
- dma_cookie_t cookie = tx->tx_submit(tx);
-
- err = dma_submit_error(cookie);
- if (err)
- goto error;
- err = dma_sync_wait(mic_ch, cookie);
- }
-error:
- if (err)
- dev_err(&mdev->pdev->dev, "%s %d err %d\n",
- __func__, __LINE__, err);
- return err;
-}
-
-/*
- * Initiates the copies across the PCIe bus from card memory to a user
- * space buffer. When transfers are done using DMA, source/destination
- * addresses and transfer length must follow the alignment requirements of
- * the MIC DMA engine.
- */
-static int mic_virtio_copy_to_user(struct mic_vdev *mvdev, void __user *ubuf,
- size_t len, u64 daddr, size_t dlen,
- int vr_idx)
-{
- struct mic_device *mdev = mvdev->mdev;
- void __iomem *dbuf = mdev->aper.va + daddr;
- struct mic_vringh *mvr = &mvdev->mvr[vr_idx];
- size_t dma_alignment = 1 << mdev->dma_ch[0]->device->copy_align;
- size_t dma_offset;
- size_t partlen;
- int err;
-
- dma_offset = daddr - round_down(daddr, dma_alignment);
- daddr -= dma_offset;
- len += dma_offset;
-
- while (len) {
- partlen = min_t(size_t, len, MIC_INT_DMA_BUF_SIZE);
-
- err = mic_sync_dma(mdev, mvr->buf_da, daddr,
- ALIGN(partlen, dma_alignment));
- if (err)
- goto err;
-
- if (copy_to_user(ubuf, mvr->buf + dma_offset,
- partlen - dma_offset)) {
- err = -EFAULT;
- goto err;
- }
- daddr += partlen;
- ubuf += partlen;
- dbuf += partlen;
- mvdev->in_bytes_dma += partlen;
- mvdev->in_bytes += partlen;
- len -= partlen;
- dma_offset = 0;
- }
- return 0;
-err:
- dev_err(mic_dev(mvdev), "%s %d err %d\n", __func__, __LINE__, err);
- return err;
-}
-
-/*
- * Initiates copies across the PCIe bus from a user space buffer to card
- * memory. When transfers are done using DMA, source/destination addresses
- * and transfer length must follow the alignment requirements of the MIC
- * DMA engine.
- */
-static int mic_virtio_copy_from_user(struct mic_vdev *mvdev, void __user *ubuf,
- size_t len, u64 daddr, size_t dlen,
- int vr_idx)
-{
- struct mic_device *mdev = mvdev->mdev;
- void __iomem *dbuf = mdev->aper.va + daddr;
- struct mic_vringh *mvr = &mvdev->mvr[vr_idx];
- size_t dma_alignment = 1 << mdev->dma_ch[0]->device->copy_align;
- size_t partlen;
- int err;
-
- if (daddr & (dma_alignment - 1)) {
- mvdev->tx_dst_unaligned += len;
- goto memcpy;
- } else if (ALIGN(len, dma_alignment) > dlen) {
- mvdev->tx_len_unaligned += len;
- goto memcpy;
- }
-
- while (len) {
- partlen = min_t(size_t, len, MIC_INT_DMA_BUF_SIZE);
-
- if (copy_from_user(mvr->buf, ubuf, partlen)) {
- err = -EFAULT;
- goto err;
- }
- err = mic_sync_dma(mdev, daddr, mvr->buf_da,
- ALIGN(partlen, dma_alignment));
- if (err)
- goto err;
- daddr += partlen;
- ubuf += partlen;
- dbuf += partlen;
- mvdev->out_bytes_dma += partlen;
- mvdev->out_bytes += partlen;
- len -= partlen;
- }
-memcpy:
- /*
- * We are copying to IO below and should ideally use something
- * like copy_from_user_toio(..) if it existed.
- */
- if (copy_from_user((void __force *)dbuf, ubuf, len)) {
- err = -EFAULT;
- goto err;
- }
- mvdev->out_bytes += len;
- return 0;
-err:
- dev_err(mic_dev(mvdev), "%s %d err %d\n", __func__, __LINE__, err);
- return err;
-}
-
-#define MIC_VRINGH_READ true
-
-/* The function to call to notify the card about added buffers */
-static void mic_notify(struct vringh *vrh)
-{
- struct mic_vringh *mvrh = container_of(vrh, struct mic_vringh, vrh);
- struct mic_vdev *mvdev = mvrh->mvdev;
- s8 db = mvdev->dc->h2c_vdev_db;
-
- if (db != -1)
- mvdev->mdev->ops->send_intr(mvdev->mdev, db);
-}
-
-/* Determine the total number of bytes consumed in a VRINGH KIOV */
-static inline u32 mic_vringh_iov_consumed(struct vringh_kiov *iov)
-{
- int i;
- u32 total = iov->consumed;
-
- for (i = 0; i < iov->i; i++)
- total += iov->iov[i].iov_len;
- return total;
-}
-
-/*
- * Traverse the VRINGH KIOV and issue the APIs to trigger the copies.
- * This API is heavily based on the vringh_iov_xfer(..) implementation
- * in vringh.c. The reason we cannot reuse vringh_iov_pull_kern(..)
- * and vringh_iov_push_kern(..) directly is because there is no
- * way to override the VRINGH xfer(..) routines as of v3.10.
- */
-static int mic_vringh_copy(struct mic_vdev *mvdev, struct vringh_kiov *iov,
- void __user *ubuf, size_t len, bool read, int vr_idx,
- size_t *out_len)
-{
- int ret = 0;
- size_t partlen, tot_len = 0;
-
- while (len && iov->i < iov->used) {
- partlen = min(iov->iov[iov->i].iov_len, len);
- if (read)
- ret = mic_virtio_copy_to_user(mvdev, ubuf, partlen,
- (u64)iov->iov[iov->i].iov_base,
- iov->iov[iov->i].iov_len,
- vr_idx);
- else
- ret = mic_virtio_copy_from_user(mvdev, ubuf, partlen,
- (u64)iov->iov[iov->i].iov_base,
- iov->iov[iov->i].iov_len,
- vr_idx);
- if (ret) {
- dev_err(mic_dev(mvdev), "%s %d err %d\n",
- __func__, __LINE__, ret);
- break;
- }
- len -= partlen;
- ubuf += partlen;
- tot_len += partlen;
- iov->consumed += partlen;
- iov->iov[iov->i].iov_len -= partlen;
- iov->iov[iov->i].iov_base += partlen;
- if (!iov->iov[iov->i].iov_len) {
- /* Fix up old iov element then increment. */
- iov->iov[iov->i].iov_len = iov->consumed;
- iov->iov[iov->i].iov_base -= iov->consumed;
-
- iov->consumed = 0;
- iov->i++;
- }
- }
- *out_len = tot_len;
- return ret;
-}
-
-/*
- * Use the standard VRINGH infrastructure in the kernel to fetch new
- * descriptors, initiate the copies and update the used ring.
- */
-static int _mic_virtio_copy(struct mic_vdev *mvdev,
- struct mic_copy_desc *copy)
-{
- int ret = 0;
- u32 iovcnt = copy->iovcnt;
- struct iovec iov;
- struct iovec __user *u_iov = copy->iov;
- void __user *ubuf = NULL;
- struct mic_vringh *mvr = &mvdev->mvr[copy->vr_idx];
- struct vringh_kiov *riov = &mvr->riov;
- struct vringh_kiov *wiov = &mvr->wiov;
- struct vringh *vrh = &mvr->vrh;
- u16 *head = &mvr->head;
- struct mic_vring *vr = &mvr->vring;
- size_t len = 0, out_len;
-
- copy->out_len = 0;
- /* Fetch a new IOVEC if all previous elements have been processed */
- if (riov->i == riov->used && wiov->i == wiov->used) {
- ret = vringh_getdesc_kern(vrh, riov, wiov,
- head, GFP_KERNEL);
- /* Check if there are available descriptors */
- if (ret <= 0)
- return ret;
- }
- while (iovcnt) {
- if (!len) {
- /* Copy over a new iovec from user space. */
- ret = copy_from_user(&iov, u_iov, sizeof(*u_iov));
- if (ret) {
- ret = -EINVAL;
- dev_err(mic_dev(mvdev), "%s %d err %d\n",
- __func__, __LINE__, ret);
- break;
- }
- len = iov.iov_len;
- ubuf = iov.iov_base;
- }
- /* Issue all the read descriptors first */
- ret = mic_vringh_copy(mvdev, riov, ubuf, len, MIC_VRINGH_READ,
- copy->vr_idx, &out_len);
- if (ret) {
- dev_err(mic_dev(mvdev), "%s %d err %d\n",
- __func__, __LINE__, ret);
- break;
- }
- len -= out_len;
- ubuf += out_len;
- copy->out_len += out_len;
- /* Issue the write descriptors next */
- ret = mic_vringh_copy(mvdev, wiov, ubuf, len, !MIC_VRINGH_READ,
- copy->vr_idx, &out_len);
- if (ret) {
- dev_err(mic_dev(mvdev), "%s %d err %d\n",
- __func__, __LINE__, ret);
- break;
- }
- len -= out_len;
- ubuf += out_len;
- copy->out_len += out_len;
- if (!len) {
- /* One user space iovec is now completed */
- iovcnt--;
- u_iov++;
- }
- /* Exit loop if all elements in KIOVs have been processed. */
- if (riov->i == riov->used && wiov->i == wiov->used)
- break;
- }
- /*
- * Update the used ring if a descriptor was available and some data was
- * copied in/out and the user asked for a used ring update.
- */
- if (*head != USHRT_MAX && copy->out_len && copy->update_used) {
- u32 total = 0;
-
- /* Determine the total data consumed */
- total += mic_vringh_iov_consumed(riov);
- total += mic_vringh_iov_consumed(wiov);
- vringh_complete_kern(vrh, *head, total);
- *head = USHRT_MAX;
- if (vringh_need_notify_kern(vrh) > 0)
- vringh_notify(vrh);
- vringh_kiov_cleanup(riov);
- vringh_kiov_cleanup(wiov);
- /* Update avail idx for user space */
- vr->info->avail_idx = vrh->last_avail_idx;
- }
- return ret;
-}
-
-static inline int mic_verify_copy_args(struct mic_vdev *mvdev,
- struct mic_copy_desc *copy)
-{
- if (copy->vr_idx >= mvdev->dd->num_vq) {
- dev_err(mic_dev(mvdev), "%s %d err %d\n",
- __func__, __LINE__, -EINVAL);
- return -EINVAL;
- }
- return 0;
-}
-
-/* Copy a specified number of virtio descriptors in a chain */
-int mic_virtio_copy_desc(struct mic_vdev *mvdev,
- struct mic_copy_desc *copy)
-{
- int err;
- struct mic_vringh *mvr = &mvdev->mvr[copy->vr_idx];
-
- err = mic_verify_copy_args(mvdev, copy);
- if (err)
- return err;
-
- mutex_lock(&mvr->vr_mutex);
- if (!mic_vdevup(mvdev)) {
- err = -ENODEV;
- dev_err(mic_dev(mvdev), "%s %d err %d\n",
- __func__, __LINE__, err);
- goto err;
- }
- err = _mic_virtio_copy(mvdev, copy);
- if (err) {
- dev_err(mic_dev(mvdev), "%s %d err %d\n",
- __func__, __LINE__, err);
- }
-err:
- mutex_unlock(&mvr->vr_mutex);
- return err;
-}
-
-static void mic_virtio_init_post(struct mic_vdev *mvdev)
-{
- struct mic_vqconfig *vqconfig = mic_vq_config(mvdev->dd);
- int i;
-
- for (i = 0; i < mvdev->dd->num_vq; i++) {
- if (!le64_to_cpu(vqconfig[i].used_address)) {
- dev_warn(mic_dev(mvdev), "used_address zero??\n");
- continue;
- }
- mvdev->mvr[i].vrh.vring.used =
- (void __force *)mvdev->mdev->aper.va +
- le64_to_cpu(vqconfig[i].used_address);
- }
-
- mvdev->dc->used_address_updated = 0;
-
- dev_dbg(mic_dev(mvdev), "%s: device type %d LINKUP\n",
- __func__, mvdev->virtio_id);
-}
-
-static inline void mic_virtio_device_reset(struct mic_vdev *mvdev)
-{
- int i;
-
- dev_dbg(mic_dev(mvdev), "%s: status %d device type %d RESET\n",
- __func__, mvdev->dd->status, mvdev->virtio_id);
-
- for (i = 0; i < mvdev->dd->num_vq; i++)
- /*
- * Avoid lockdep false positive. The + 1 is for the mic
- * mutex which is held in the reset devices code path.
- */
- mutex_lock_nested(&mvdev->mvr[i].vr_mutex, i + 1);
-
- /* 0 status means "reset" */
- mvdev->dd->status = 0;
- mvdev->dc->vdev_reset = 0;
- mvdev->dc->host_ack = 1;
-
- for (i = 0; i < mvdev->dd->num_vq; i++) {
- struct vringh *vrh = &mvdev->mvr[i].vrh;
- mvdev->mvr[i].vring.info->avail_idx = 0;
- vrh->completed = 0;
- vrh->last_avail_idx = 0;
- vrh->last_used_idx = 0;
- }
-
- for (i = 0; i < mvdev->dd->num_vq; i++)
- mutex_unlock(&mvdev->mvr[i].vr_mutex);
-}
-
-void mic_virtio_reset_devices(struct mic_device *mdev)
-{
- struct list_head *pos, *tmp;
- struct mic_vdev *mvdev;
-
- dev_dbg(&mdev->pdev->dev, "%s\n", __func__);
-
- list_for_each_safe(pos, tmp, &mdev->vdev_list) {
- mvdev = list_entry(pos, struct mic_vdev, list);
- mic_virtio_device_reset(mvdev);
- mvdev->poll_wake = 1;
- wake_up(&mvdev->waitq);
- }
-}
-
-void mic_bh_handler(struct work_struct *work)
-{
- struct mic_vdev *mvdev = container_of(work, struct mic_vdev,
- virtio_bh_work);
-
- if (mvdev->dc->used_address_updated)
- mic_virtio_init_post(mvdev);
-
- if (mvdev->dc->vdev_reset)
- mic_virtio_device_reset(mvdev);
-
- mvdev->poll_wake = 1;
- wake_up(&mvdev->waitq);
-}
-
-static irqreturn_t mic_virtio_intr_handler(int irq, void *data)
-{
- struct mic_vdev *mvdev = data;
- struct mic_device *mdev = mvdev->mdev;
-
- mdev->ops->intr_workarounds(mdev);
- schedule_work(&mvdev->virtio_bh_work);
- return IRQ_HANDLED;
-}
-
-int mic_virtio_config_change(struct mic_vdev *mvdev,
- void __user *argp)
-{
- DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wake);
- int ret = 0, retry, i;
- struct mic_bootparam *bootparam = mvdev->mdev->dp;
- s8 db = bootparam->h2c_config_db;
-
- mutex_lock(&mvdev->mdev->mic_mutex);
- for (i = 0; i < mvdev->dd->num_vq; i++)
- mutex_lock_nested(&mvdev->mvr[i].vr_mutex, i + 1);
-
- if (db == -1 || mvdev->dd->type == -1) {
- ret = -EIO;
- goto exit;
- }
-
- if (copy_from_user(mic_vq_configspace(mvdev->dd),
- argp, mvdev->dd->config_len)) {
- dev_err(mic_dev(mvdev), "%s %d err %d\n",
- __func__, __LINE__, -EFAULT);
- ret = -EFAULT;
- goto exit;
- }
- mvdev->dc->config_change = MIC_VIRTIO_PARAM_CONFIG_CHANGED;
- mvdev->mdev->ops->send_intr(mvdev->mdev, db);
-
- for (retry = 100; retry--;) {
- ret = wait_event_timeout(wake,
- mvdev->dc->guest_ack, msecs_to_jiffies(100));
- if (ret)
- break;
- }
-
- dev_dbg(mic_dev(mvdev),
- "%s %d retry: %d\n", __func__, __LINE__, retry);
- mvdev->dc->config_change = 0;
- mvdev->dc->guest_ack = 0;
-exit:
- for (i = 0; i < mvdev->dd->num_vq; i++)
- mutex_unlock(&mvdev->mvr[i].vr_mutex);
- mutex_unlock(&mvdev->mdev->mic_mutex);
- return ret;
-}
-
-static int mic_copy_dp_entry(struct mic_vdev *mvdev,
- void __user *argp,
- __u8 *type,
- struct mic_device_desc **devpage)
-{
- struct mic_device *mdev = mvdev->mdev;
- struct mic_device_desc dd, *dd_config, *devp;
- struct mic_vqconfig *vqconfig;
- int ret = 0, i;
- bool slot_found = false;
-
- if (copy_from_user(&dd, argp, sizeof(dd))) {
- dev_err(mic_dev(mvdev), "%s %d err %d\n",
- __func__, __LINE__, -EFAULT);
- return -EFAULT;
- }
-
- if (mic_aligned_desc_size(&dd) > MIC_MAX_DESC_BLK_SIZE ||
- dd.num_vq > MIC_MAX_VRINGS) {
- dev_err(mic_dev(mvdev), "%s %d err %d\n",
- __func__, __LINE__, -EINVAL);
- return -EINVAL;
- }
-
- dd_config = kmalloc(mic_desc_size(&dd), GFP_KERNEL);
- if (dd_config == NULL) {
- dev_err(mic_dev(mvdev), "%s %d err %d\n",
- __func__, __LINE__, -ENOMEM);
- return -ENOMEM;
- }
- if (copy_from_user(dd_config, argp, mic_desc_size(&dd))) {
- ret = -EFAULT;
- dev_err(mic_dev(mvdev), "%s %d err %d\n",
- __func__, __LINE__, ret);
- goto exit;
- }
-
- vqconfig = mic_vq_config(dd_config);
- for (i = 0; i < dd.num_vq; i++) {
- if (le16_to_cpu(vqconfig[i].num) > MIC_MAX_VRING_ENTRIES) {
- ret = -EINVAL;
- dev_err(mic_dev(mvdev), "%s %d err %d\n",
- __func__, __LINE__, ret);
- goto exit;
- }
- }
-
- /* Find the first free device page entry */
- for (i = sizeof(struct mic_bootparam);
- i < MIC_DP_SIZE - mic_total_desc_size(dd_config);
- i += mic_total_desc_size(devp)) {
- devp = mdev->dp + i;
- if (devp->type == 0 || devp->type == -1) {
- slot_found = true;
- break;
- }
- }
- if (!slot_found) {
- ret = -EINVAL;
- dev_err(mic_dev(mvdev), "%s %d err %d\n",
- __func__, __LINE__, ret);
- goto exit;
- }
- /*
- * Save off the type before doing the memcpy. Type will be set in the
- * end after completing all initialization for the new device.
- */
- *type = dd_config->type;
- dd_config->type = 0;
- memcpy(devp, dd_config, mic_desc_size(dd_config));
-
- *devpage = devp;
-exit:
- kfree(dd_config);
- return ret;
-}
-
-static void mic_init_device_ctrl(struct mic_vdev *mvdev,
- struct mic_device_desc *devpage)
-{
- struct mic_device_ctrl *dc;
-
- dc = (void *)devpage + mic_aligned_desc_size(devpage);
-
- dc->config_change = 0;
- dc->guest_ack = 0;
- dc->vdev_reset = 0;
- dc->host_ack = 0;
- dc->used_address_updated = 0;
- dc->c2h_vdev_db = -1;
- dc->h2c_vdev_db = -1;
- mvdev->dc = dc;
-}
-
-int mic_virtio_add_device(struct mic_vdev *mvdev,
- void __user *argp)
-{
- struct mic_device *mdev = mvdev->mdev;
- struct mic_device_desc *dd = NULL;
- struct mic_vqconfig *vqconfig;
- int vr_size, i, j, ret;
- u8 type = 0;
- s8 db;
- char irqname[10];
- struct mic_bootparam *bootparam = mdev->dp;
- u16 num;
- dma_addr_t vr_addr;
-
- mutex_lock(&mdev->mic_mutex);
-
- ret = mic_copy_dp_entry(mvdev, argp, &type, &dd);
- if (ret) {
- mutex_unlock(&mdev->mic_mutex);
- return ret;
- }
-
- mic_init_device_ctrl(mvdev, dd);
-
- mvdev->dd = dd;
- mvdev->virtio_id = type;
- vqconfig = mic_vq_config(dd);
- INIT_WORK(&mvdev->virtio_bh_work, mic_bh_handler);
-
- for (i = 0; i < dd->num_vq; i++) {
- struct mic_vringh *mvr = &mvdev->mvr[i];
- struct mic_vring *vr = &mvdev->mvr[i].vring;
- num = le16_to_cpu(vqconfig[i].num);
- mutex_init(&mvr->vr_mutex);
- vr_size = PAGE_ALIGN(vring_size(num, MIC_VIRTIO_RING_ALIGN) +
- sizeof(struct _mic_vring_info));
- vr->va = (void *)
- __get_free_pages(GFP_KERNEL | __GFP_ZERO,
- get_order(vr_size));
- if (!vr->va) {
- ret = -ENOMEM;
- dev_err(mic_dev(mvdev), "%s %d err %d\n",
- __func__, __LINE__, ret);
- goto err;
- }
- vr->len = vr_size;
- vr->info = vr->va + vring_size(num, MIC_VIRTIO_RING_ALIGN);
- vr->info->magic = cpu_to_le32(MIC_MAGIC + mvdev->virtio_id + i);
- vr_addr = mic_map_single(mdev, vr->va, vr_size);
- if (mic_map_error(vr_addr)) {
- free_pages((unsigned long)vr->va, get_order(vr_size));
- ret = -ENOMEM;
- dev_err(mic_dev(mvdev), "%s %d err %d\n",
- __func__, __LINE__, ret);
- goto err;
- }
- vqconfig[i].address = cpu_to_le64(vr_addr);
-
- vring_init(&vr->vr, num, vr->va, MIC_VIRTIO_RING_ALIGN);
- ret = vringh_init_kern(&mvr->vrh,
- *(u32 *)mic_vq_features(mvdev->dd), num, false,
- vr->vr.desc, vr->vr.avail, vr->vr.used);
- if (ret) {
- dev_err(mic_dev(mvdev), "%s %d err %d\n",
- __func__, __LINE__, ret);
- goto err;
- }
- vringh_kiov_init(&mvr->riov, NULL, 0);
- vringh_kiov_init(&mvr->wiov, NULL, 0);
- mvr->head = USHRT_MAX;
- mvr->mvdev = mvdev;
- mvr->vrh.notify = mic_notify;
- dev_dbg(&mdev->pdev->dev,
- "%s %d index %d va %p info %p vr_size 0x%x\n",
- __func__, __LINE__, i, vr->va, vr->info, vr_size);
- mvr->buf = (void *)__get_free_pages(GFP_KERNEL,
- get_order(MIC_INT_DMA_BUF_SIZE));
- mvr->buf_da = mic_map_single(mvdev->mdev, mvr->buf,
- MIC_INT_DMA_BUF_SIZE);
- }
-
- snprintf(irqname, sizeof(irqname), "mic%dvirtio%d", mdev->id,
- mvdev->virtio_id);
- mvdev->virtio_db = mic_next_db(mdev);
- mvdev->virtio_cookie = mic_request_threaded_irq(mdev,
- mic_virtio_intr_handler,
- NULL, irqname, mvdev,
- mvdev->virtio_db, MIC_INTR_DB);
- if (IS_ERR(mvdev->virtio_cookie)) {
- ret = PTR_ERR(mvdev->virtio_cookie);
- dev_dbg(&mdev->pdev->dev, "request irq failed\n");
- goto err;
- }
-
- mvdev->dc->c2h_vdev_db = mvdev->virtio_db;
-
- list_add_tail(&mvdev->list, &mdev->vdev_list);
- /*
- * Order the type update with previous stores. This write barrier
- * is paired with the corresponding read barrier before the uncached
- * system memory read of the type, on the card while scanning the
- * device page.
- */
- smp_wmb();
- dd->type = type;
-
- dev_dbg(&mdev->pdev->dev, "Added virtio device id %d\n", dd->type);
-
- db = bootparam->h2c_config_db;
- if (db != -1)
- mdev->ops->send_intr(mdev, db);
- mutex_unlock(&mdev->mic_mutex);
- return 0;
-err:
- vqconfig = mic_vq_config(dd);
- for (j = 0; j < i; j++) {
- struct mic_vringh *mvr = &mvdev->mvr[j];
- mic_unmap_single(mdev, le64_to_cpu(vqconfig[j].address),
- mvr->vring.len);
- free_pages((unsigned long)mvr->vring.va,
- get_order(mvr->vring.len));
- }
- mutex_unlock(&mdev->mic_mutex);
- return ret;
-}
-
-void mic_virtio_del_device(struct mic_vdev *mvdev)
-{
- struct list_head *pos, *tmp;
- struct mic_vdev *tmp_mvdev;
- struct mic_device *mdev = mvdev->mdev;
- DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wake);
- int i, ret, retry;
- struct mic_vqconfig *vqconfig;
- struct mic_bootparam *bootparam = mdev->dp;
- s8 db;
-
- mutex_lock(&mdev->mic_mutex);
- db = bootparam->h2c_config_db;
- if (db == -1)
- goto skip_hot_remove;
- dev_dbg(&mdev->pdev->dev,
- "Requesting hot remove id %d\n", mvdev->virtio_id);
- mvdev->dc->config_change = MIC_VIRTIO_PARAM_DEV_REMOVE;
- mdev->ops->send_intr(mdev, db);
- for (retry = 100; retry--;) {
- ret = wait_event_timeout(wake,
- mvdev->dc->guest_ack, msecs_to_jiffies(100));
- if (ret)
- break;
- }
- dev_dbg(&mdev->pdev->dev,
- "Device id %d config_change %d guest_ack %d retry %d\n",
- mvdev->virtio_id, mvdev->dc->config_change,
- mvdev->dc->guest_ack, retry);
- mvdev->dc->config_change = 0;
- mvdev->dc->guest_ack = 0;
-skip_hot_remove:
- mic_free_irq(mdev, mvdev->virtio_cookie, mvdev);
- flush_work(&mvdev->virtio_bh_work);
- vqconfig = mic_vq_config(mvdev->dd);
- for (i = 0; i < mvdev->dd->num_vq; i++) {
- struct mic_vringh *mvr = &mvdev->mvr[i];
-
- mic_unmap_single(mvdev->mdev, mvr->buf_da,
- MIC_INT_DMA_BUF_SIZE);
- free_pages((unsigned long)mvr->buf,
- get_order(MIC_INT_DMA_BUF_SIZE));
- vringh_kiov_cleanup(&mvr->riov);
- vringh_kiov_cleanup(&mvr->wiov);
- mic_unmap_single(mdev, le64_to_cpu(vqconfig[i].address),
- mvr->vring.len);
- free_pages((unsigned long)mvr->vring.va,
- get_order(mvr->vring.len));
- }
-
- list_for_each_safe(pos, tmp, &mdev->vdev_list) {
- tmp_mvdev = list_entry(pos, struct mic_vdev, list);
- if (tmp_mvdev == mvdev) {
- list_del(pos);
- dev_dbg(&mdev->pdev->dev,
- "Removing virtio device id %d\n",
- mvdev->virtio_id);
- break;
- }
- }
- /*
- * Order the type update with previous stores. This write barrier
- * is paired with the corresponding read barrier before the uncached
- * system memory read of the type, on the card while scanning the
- * device page.
- */
- smp_wmb();
- mvdev->dd->type = -1;
- mutex_unlock(&mdev->mic_mutex);
-}
diff --git a/drivers/misc/mic/host/mic_x100.c b/drivers/misc/mic/host/mic_x100.c
index 8118ac48c764..82a973c85b5d 100644
--- a/drivers/misc/mic/host/mic_x100.c
+++ b/drivers/misc/mic/host/mic_x100.c
@@ -450,26 +450,29 @@ mic_x100_load_firmware(struct mic_device *mdev, const char *buf)
rc = mic_x100_get_boot_addr(mdev);
if (rc)
- goto error;
+ return rc;
/* load OS */
rc = request_firmware(&fw, mdev->cosm_dev->firmware, &mdev->pdev->dev);
if (rc < 0) {
dev_err(&mdev->pdev->dev,
"ramdisk request_firmware failed: %d %s\n",
rc, mdev->cosm_dev->firmware);
- goto error;
+ return rc;
}
if (mdev->bootaddr > mdev->aper.len - fw->size) {
rc = -EINVAL;
dev_err(&mdev->pdev->dev, "%s %d rc %d bootaddr 0x%x\n",
__func__, __LINE__, rc, mdev->bootaddr);
- release_firmware(fw);
goto error;
}
memcpy_toio(mdev->aper.va + mdev->bootaddr, fw->data, fw->size);
mdev->ops->write_spad(mdev, MIC_X100_FW_SIZE, fw->size);
- if (!strcmp(mdev->cosm_dev->bootmode, "flash"))
- goto done;
+ if (!strcmp(mdev->cosm_dev->bootmode, "flash")) {
+ rc = -EINVAL;
+ dev_err(&mdev->pdev->dev, "%s %d rc %d\n",
+ __func__, __LINE__, rc);
+ goto error;
+ }
/* load command line */
rc = mic_x100_load_command_line(mdev, fw);
if (rc) {
@@ -481,9 +484,11 @@ mic_x100_load_firmware(struct mic_device *mdev, const char *buf)
/* load ramdisk */
if (mdev->cosm_dev->ramdisk)
rc = mic_x100_load_ramdisk(mdev);
+
+ return rc;
+
error:
- dev_dbg(&mdev->pdev->dev, "%s %d rc %d\n", __func__, __LINE__, rc);
-done:
+ release_firmware(fw);
return rc;
}
diff --git a/drivers/misc/mic/scif/scif_dma.c b/drivers/misc/mic/scif/scif_dma.c
index 95a13c629a8e..cd01a0efda6b 100644
--- a/drivers/misc/mic/scif/scif_dma.c
+++ b/drivers/misc/mic/scif/scif_dma.c
@@ -74,11 +74,6 @@ struct scif_copy_work {
bool ordered;
};
-#ifndef list_entry_next
-#define list_entry_next(pos, member) \
- list_entry(pos->member.next, typeof(*pos), member)
-#endif
-
/**
* scif_reserve_dma_chan:
* @ep: Endpoint Descriptor.
@@ -276,13 +271,10 @@ static struct scif_mmu_notif *
scif_find_mmu_notifier(struct mm_struct *mm, struct scif_endpt_rma_info *rma)
{
struct scif_mmu_notif *mmn;
- struct list_head *item;
- list_for_each(item, &rma->mmn_list) {
- mmn = list_entry(item, struct scif_mmu_notif, list);
+ list_for_each_entry(mmn, &rma->mmn_list, list)
if (mmn->mm == mm)
return mmn;
- }
return NULL;
}
@@ -293,13 +285,12 @@ scif_add_mmu_notifier(struct mm_struct *mm, struct scif_endpt *ep)
= kzalloc(sizeof(*mmn), GFP_KERNEL);
if (!mmn)
- return ERR_PTR(ENOMEM);
+ return ERR_PTR(-ENOMEM);
scif_init_mmu_notifier(mmn, current->mm, ep);
- if (mmu_notifier_register(&mmn->ep_mmu_notifier,
- current->mm)) {
+ if (mmu_notifier_register(&mmn->ep_mmu_notifier, current->mm)) {
kfree(mmn);
- return ERR_PTR(EBUSY);
+ return ERR_PTR(-EBUSY);
}
list_add(&mmn->list, &ep->rma_info.mmn_list);
return mmn;
@@ -851,7 +842,7 @@ static void scif_rma_local_cpu_copy(s64 offset, struct scif_window *window,
(window->nr_pages << PAGE_SHIFT);
while (rem_len) {
if (offset == end_offset) {
- window = list_entry_next(window, list);
+ window = list_next_entry(window, list);
end_offset = window->offset +
(window->nr_pages << PAGE_SHIFT);
}
@@ -957,7 +948,7 @@ scif_rma_list_dma_copy_unaligned(struct scif_copy_work *work,
remaining_len -= tail_len;
while (remaining_len) {
if (offset == end_offset) {
- window = list_entry_next(window, list);
+ window = list_next_entry(window, list);
end_offset = window->offset +
(window->nr_pages << PAGE_SHIFT);
}
@@ -1064,7 +1055,7 @@ scif_rma_list_dma_copy_unaligned(struct scif_copy_work *work,
}
if (tail_len) {
if (offset == end_offset) {
- window = list_entry_next(window, list);
+ window = list_next_entry(window, list);
end_offset = window->offset +
(window->nr_pages << PAGE_SHIFT);
}
@@ -1147,13 +1138,13 @@ static int _scif_rma_list_dma_copy_aligned(struct scif_copy_work *work,
(dst_window->nr_pages << PAGE_SHIFT);
while (remaining_len) {
if (src_offset == end_src_offset) {
- src_window = list_entry_next(src_window, list);
+ src_window = list_next_entry(src_window, list);
end_src_offset = src_window->offset +
(src_window->nr_pages << PAGE_SHIFT);
scif_init_window_iter(src_window, &src_win_iter);
}
if (dst_offset == end_dst_offset) {
- dst_window = list_entry_next(dst_window, list);
+ dst_window = list_next_entry(dst_window, list);
end_dst_offset = dst_window->offset +
(dst_window->nr_pages << PAGE_SHIFT);
scif_init_window_iter(dst_window, &dst_win_iter);
@@ -1314,13 +1305,13 @@ static int scif_rma_list_dma_copy_aligned(struct scif_copy_work *work,
remaining_len -= tail_len;
while (remaining_len) {
if (src_offset == end_src_offset) {
- src_window = list_entry_next(src_window, list);
+ src_window = list_next_entry(src_window, list);
end_src_offset = src_window->offset +
(src_window->nr_pages << PAGE_SHIFT);
scif_init_window_iter(src_window, &src_win_iter);
}
if (dst_offset == end_dst_offset) {
- dst_window = list_entry_next(dst_window, list);
+ dst_window = list_next_entry(dst_window, list);
end_dst_offset = dst_window->offset +
(dst_window->nr_pages << PAGE_SHIFT);
scif_init_window_iter(dst_window, &dst_win_iter);
@@ -1405,9 +1396,9 @@ static int scif_rma_list_dma_copy_aligned(struct scif_copy_work *work,
if (remaining_len) {
loop_len = remaining_len;
if (src_offset == end_src_offset)
- src_window = list_entry_next(src_window, list);
+ src_window = list_next_entry(src_window, list);
if (dst_offset == end_dst_offset)
- dst_window = list_entry_next(dst_window, list);
+ dst_window = list_next_entry(dst_window, list);
src_dma_addr = __scif_off_to_dma_addr(src_window, src_offset);
dst_dma_addr = __scif_off_to_dma_addr(dst_window, dst_offset);
@@ -1550,12 +1541,12 @@ static int scif_rma_list_cpu_copy(struct scif_copy_work *work)
end_dst_offset = dst_window->offset +
(dst_window->nr_pages << PAGE_SHIFT);
if (src_offset == end_src_offset) {
- src_window = list_entry_next(src_window, list);
+ src_window = list_next_entry(src_window, list);
scif_init_window_iter(src_window,
&src_win_iter);
}
if (dst_offset == end_dst_offset) {
- dst_window = list_entry_next(dst_window, list);
+ dst_window = list_next_entry(dst_window, list);
scif_init_window_iter(dst_window,
&dst_win_iter);
}
@@ -1730,7 +1721,7 @@ static int scif_rma_copy(scif_epd_t epd, off_t loffset, unsigned long addr,
mutex_lock(&ep->rma_info.mmn_lock);
mmn = scif_find_mmu_notifier(current->mm, &ep->rma_info);
if (!mmn)
- scif_add_mmu_notifier(current->mm, ep);
+ mmn = scif_add_mmu_notifier(current->mm, ep);
mutex_unlock(&ep->rma_info.mmn_lock);
if (IS_ERR(mmn)) {
scif_put_peer_dev(spdev);
diff --git a/drivers/misc/mic/scif/scif_rma.c b/drivers/misc/mic/scif/scif_rma.c
index 8310b4dbff06..e0203b1a20fd 100644
--- a/drivers/misc/mic/scif/scif_rma.c
+++ b/drivers/misc/mic/scif/scif_rma.c
@@ -1394,8 +1394,6 @@ retry:
}
pinned_pages->nr_pages = get_user_pages(
- current,
- mm,
(u64)addr,
nr_pages,
!!(prot & SCIF_PROT_WRITE),
@@ -1511,7 +1509,7 @@ off_t scif_register_pinned_pages(scif_epd_t epd,
if ((map_flags & SCIF_MAP_FIXED) &&
((ALIGN(offset, PAGE_SIZE) != offset) ||
(offset < 0) ||
- (offset + (off_t)len < offset)))
+ (len > LONG_MAX - offset)))
return -EINVAL;
might_sleep();
@@ -1614,7 +1612,7 @@ off_t scif_register(scif_epd_t epd, void *addr, size_t len, off_t offset,
if ((map_flags & SCIF_MAP_FIXED) &&
((ALIGN(offset, PAGE_SIZE) != offset) ||
(offset < 0) ||
- (offset + (off_t)len < offset)))
+ (len > LONG_MAX - offset)))
return -EINVAL;
/* Unsupported protection requested */
@@ -1732,7 +1730,8 @@ scif_unregister(scif_epd_t epd, off_t offset, size_t len)
/* Offset is not page aligned or offset+len wraps around */
if ((ALIGN(offset, PAGE_SIZE) != offset) ||
- (offset + (off_t)len < offset))
+ (offset < 0) ||
+ (len > LONG_MAX - offset))
return -EINVAL;
err = scif_verify_epd(ep);
diff --git a/drivers/misc/mic/vop/Makefile b/drivers/misc/mic/vop/Makefile
new file mode 100644
index 000000000000..78819c8999f1
--- /dev/null
+++ b/drivers/misc/mic/vop/Makefile
@@ -0,0 +1,9 @@
+#
+# Makefile - Intel MIC Linux driver.
+# Copyright(c) 2016, Intel Corporation.
+#
+obj-m := vop.o
+
+vop-objs += vop_main.o
+vop-objs += vop_debugfs.o
+vop-objs += vop_vringh.o
diff --git a/drivers/misc/mic/vop/vop_debugfs.c b/drivers/misc/mic/vop/vop_debugfs.c
new file mode 100644
index 000000000000..ab43884e5cd7
--- /dev/null
+++ b/drivers/misc/mic/vop/vop_debugfs.c
@@ -0,0 +1,232 @@
+/*
+ * Intel MIC Platform Software Stack (MPSS)
+ *
+ * Copyright(c) 2016 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Intel Virtio Over PCIe (VOP) driver.
+ *
+ */
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+
+#include "vop_main.h"
+
+static int vop_dp_show(struct seq_file *s, void *pos)
+{
+ struct mic_device_desc *d;
+ struct mic_device_ctrl *dc;
+ struct mic_vqconfig *vqconfig;
+ __u32 *features;
+ __u8 *config;
+ struct vop_info *vi = s->private;
+ struct vop_device *vpdev = vi->vpdev;
+ struct mic_bootparam *bootparam = vpdev->hw_ops->get_dp(vpdev);
+ int j, k;
+
+ seq_printf(s, "Bootparam: magic 0x%x\n",
+ bootparam->magic);
+ seq_printf(s, "Bootparam: h2c_config_db %d\n",
+ bootparam->h2c_config_db);
+ seq_printf(s, "Bootparam: node_id %d\n",
+ bootparam->node_id);
+ seq_printf(s, "Bootparam: c2h_scif_db %d\n",
+ bootparam->c2h_scif_db);
+ seq_printf(s, "Bootparam: h2c_scif_db %d\n",
+ bootparam->h2c_scif_db);
+ seq_printf(s, "Bootparam: scif_host_dma_addr 0x%llx\n",
+ bootparam->scif_host_dma_addr);
+ seq_printf(s, "Bootparam: scif_card_dma_addr 0x%llx\n",
+ bootparam->scif_card_dma_addr);
+
+ for (j = sizeof(*bootparam);
+ j < MIC_DP_SIZE; j += mic_total_desc_size(d)) {
+ d = (void *)bootparam + j;
+ dc = (void *)d + mic_aligned_desc_size(d);
+
+ /* end of list */
+ if (d->type == 0)
+ break;
+
+ if (d->type == -1)
+ continue;
+
+ seq_printf(s, "Type %d ", d->type);
+ seq_printf(s, "Num VQ %d ", d->num_vq);
+ seq_printf(s, "Feature Len %d\n", d->feature_len);
+ seq_printf(s, "Config Len %d ", d->config_len);
+ seq_printf(s, "Shutdown Status %d\n", d->status);
+
+ for (k = 0; k < d->num_vq; k++) {
+ vqconfig = mic_vq_config(d) + k;
+ seq_printf(s, "vqconfig[%d]: ", k);
+ seq_printf(s, "address 0x%llx ",
+ vqconfig->address);
+ seq_printf(s, "num %d ", vqconfig->num);
+ seq_printf(s, "used address 0x%llx\n",
+ vqconfig->used_address);
+ }
+
+ features = (__u32 *)mic_vq_features(d);
+ seq_printf(s, "Features: Host 0x%x ", features[0]);
+ seq_printf(s, "Guest 0x%x\n", features[1]);
+
+ config = mic_vq_configspace(d);
+ for (k = 0; k < d->config_len; k++)
+ seq_printf(s, "config[%d]=%d\n", k, config[k]);
+
+ seq_puts(s, "Device control:\n");
+ seq_printf(s, "Config Change %d ", dc->config_change);
+ seq_printf(s, "Vdev reset %d\n", dc->vdev_reset);
+ seq_printf(s, "Guest Ack %d ", dc->guest_ack);
+ seq_printf(s, "Host ack %d\n", dc->host_ack);
+ seq_printf(s, "Used address updated %d ",
+ dc->used_address_updated);
+ seq_printf(s, "Vdev 0x%llx\n", dc->vdev);
+ seq_printf(s, "c2h doorbell %d ", dc->c2h_vdev_db);
+ seq_printf(s, "h2c doorbell %d\n", dc->h2c_vdev_db);
+ }
+ schedule_work(&vi->hotplug_work);
+ return 0;
+}
+
+static int vop_dp_debug_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, vop_dp_show, inode->i_private);
+}
+
+static int vop_dp_debug_release(struct inode *inode, struct file *file)
+{
+ return single_release(inode, file);
+}
+
+static const struct file_operations dp_ops = {
+ .owner = THIS_MODULE,
+ .open = vop_dp_debug_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = vop_dp_debug_release
+};
+
+static int vop_vdev_info_show(struct seq_file *s, void *unused)
+{
+ struct vop_info *vi = s->private;
+ struct list_head *pos, *tmp;
+ struct vop_vdev *vdev;
+ int i, j;
+
+ mutex_lock(&vi->vop_mutex);
+ list_for_each_safe(pos, tmp, &vi->vdev_list) {
+ vdev = list_entry(pos, struct vop_vdev, list);
+ seq_printf(s, "VDEV type %d state %s in %ld out %ld in_dma %ld out_dma %ld\n",
+ vdev->virtio_id,
+ vop_vdevup(vdev) ? "UP" : "DOWN",
+ vdev->in_bytes,
+ vdev->out_bytes,
+ vdev->in_bytes_dma,
+ vdev->out_bytes_dma);
+ for (i = 0; i < MIC_MAX_VRINGS; i++) {
+ struct vring_desc *desc;
+ struct vring_avail *avail;
+ struct vring_used *used;
+ struct vop_vringh *vvr = &vdev->vvr[i];
+ struct vringh *vrh = &vvr->vrh;
+ int num = vrh->vring.num;
+
+ if (!num)
+ continue;
+ desc = vrh->vring.desc;
+ seq_printf(s, "vring i %d avail_idx %d",
+ i, vvr->vring.info->avail_idx & (num - 1));
+ seq_printf(s, " vring i %d avail_idx %d\n",
+ i, vvr->vring.info->avail_idx);
+ seq_printf(s, "vrh i %d weak_barriers %d",
+ i, vrh->weak_barriers);
+ seq_printf(s, " last_avail_idx %d last_used_idx %d",
+ vrh->last_avail_idx, vrh->last_used_idx);
+ seq_printf(s, " completed %d\n", vrh->completed);
+ for (j = 0; j < num; j++) {
+ seq_printf(s, "desc[%d] addr 0x%llx len %d",
+ j, desc->addr, desc->len);
+ seq_printf(s, " flags 0x%x next %d\n",
+ desc->flags, desc->next);
+ desc++;
+ }
+ avail = vrh->vring.avail;
+ seq_printf(s, "avail flags 0x%x idx %d\n",
+ vringh16_to_cpu(vrh, avail->flags),
+ vringh16_to_cpu(vrh,
+ avail->idx) & (num - 1));
+ seq_printf(s, "avail flags 0x%x idx %d\n",
+ vringh16_to_cpu(vrh, avail->flags),
+ vringh16_to_cpu(vrh, avail->idx));
+ for (j = 0; j < num; j++)
+ seq_printf(s, "avail ring[%d] %d\n",
+ j, avail->ring[j]);
+ used = vrh->vring.used;
+ seq_printf(s, "used flags 0x%x idx %d\n",
+ vringh16_to_cpu(vrh, used->flags),
+ vringh16_to_cpu(vrh, used->idx) & (num - 1));
+ seq_printf(s, "used flags 0x%x idx %d\n",
+ vringh16_to_cpu(vrh, used->flags),
+ vringh16_to_cpu(vrh, used->idx));
+ for (j = 0; j < num; j++)
+ seq_printf(s, "used ring[%d] id %d len %d\n",
+ j, vringh32_to_cpu(vrh,
+ used->ring[j].id),
+ vringh32_to_cpu(vrh,
+ used->ring[j].len));
+ }
+ }
+ mutex_unlock(&vi->vop_mutex);
+
+ return 0;
+}
+
+static int vop_vdev_info_debug_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, vop_vdev_info_show, inode->i_private);
+}
+
+static int vop_vdev_info_debug_release(struct inode *inode, struct file *file)
+{
+ return single_release(inode, file);
+}
+
+static const struct file_operations vdev_info_ops = {
+ .owner = THIS_MODULE,
+ .open = vop_vdev_info_debug_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = vop_vdev_info_debug_release
+};
+
+void vop_init_debugfs(struct vop_info *vi)
+{
+ char name[16];
+
+ snprintf(name, sizeof(name), "%s%d", KBUILD_MODNAME, vi->vpdev->dnode);
+ vi->dbg = debugfs_create_dir(name, NULL);
+ if (!vi->dbg) {
+ pr_err("can't create debugfs dir vop\n");
+ return;
+ }
+ debugfs_create_file("dp", 0444, vi->dbg, vi, &dp_ops);
+ debugfs_create_file("vdev_info", 0444, vi->dbg, vi, &vdev_info_ops);
+}
+
+void vop_exit_debugfs(struct vop_info *vi)
+{
+ debugfs_remove_recursive(vi->dbg);
+}
diff --git a/drivers/misc/mic/vop/vop_main.c b/drivers/misc/mic/vop/vop_main.c
new file mode 100644
index 000000000000..1a2b67f3183d
--- /dev/null
+++ b/drivers/misc/mic/vop/vop_main.c
@@ -0,0 +1,755 @@
+/*
+ * Intel MIC Platform Software Stack (MPSS)
+ *
+ * Copyright(c) 2016 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Adapted from:
+ *
+ * virtio for kvm on s390
+ *
+ * Copyright IBM Corp. 2008
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License (version 2 only)
+ * as published by the Free Software Foundation.
+ *
+ * Author(s): Christian Borntraeger <borntraeger@de.ibm.com>
+ *
+ * Intel Virtio Over PCIe (VOP) driver.
+ *
+ */
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/dma-mapping.h>
+
+#include "vop_main.h"
+
+#define VOP_MAX_VRINGS 4
+
+/*
+ * _vop_vdev - Allocated per virtio device instance injected by the peer.
+ *
+ * @vdev: Virtio device
+ * @desc: Virtio device page descriptor
+ * @dc: Virtio device control
+ * @vpdev: VOP device which is the parent for this virtio device
+ * @vr: Buffer for accessing the VRING
+ * @used: Buffer for used
+ * @used_size: Size of the used buffer
+ * @reset_done: Track whether VOP reset is complete
+ * @virtio_cookie: Cookie returned upon requesting a interrupt
+ * @c2h_vdev_db: The doorbell used by the guest to interrupt the host
+ * @h2c_vdev_db: The doorbell used by the host to interrupt the guest
+ * @dnode: The destination node
+ */
+struct _vop_vdev {
+ struct virtio_device vdev;
+ struct mic_device_desc __iomem *desc;
+ struct mic_device_ctrl __iomem *dc;
+ struct vop_device *vpdev;
+ void __iomem *vr[VOP_MAX_VRINGS];
+ dma_addr_t used[VOP_MAX_VRINGS];
+ int used_size[VOP_MAX_VRINGS];
+ struct completion reset_done;
+ struct mic_irq *virtio_cookie;
+ int c2h_vdev_db;
+ int h2c_vdev_db;
+ int dnode;
+};
+
+#define to_vopvdev(vd) container_of(vd, struct _vop_vdev, vdev)
+
+#define _vop_aligned_desc_size(d) __mic_align(_vop_desc_size(d), 8)
+
+/* Helper API to obtain the parent of the virtio device */
+static inline struct device *_vop_dev(struct _vop_vdev *vdev)
+{
+ return vdev->vdev.dev.parent;
+}
+
+static inline unsigned _vop_desc_size(struct mic_device_desc __iomem *desc)
+{
+ return sizeof(*desc)
+ + ioread8(&desc->num_vq) * sizeof(struct mic_vqconfig)
+ + ioread8(&desc->feature_len) * 2
+ + ioread8(&desc->config_len);
+}
+
+static inline struct mic_vqconfig __iomem *
+_vop_vq_config(struct mic_device_desc __iomem *desc)
+{
+ return (struct mic_vqconfig __iomem *)(desc + 1);
+}
+
+static inline u8 __iomem *
+_vop_vq_features(struct mic_device_desc __iomem *desc)
+{
+ return (u8 __iomem *)(_vop_vq_config(desc) + ioread8(&desc->num_vq));
+}
+
+static inline u8 __iomem *
+_vop_vq_configspace(struct mic_device_desc __iomem *desc)
+{
+ return _vop_vq_features(desc) + ioread8(&desc->feature_len) * 2;
+}
+
+static inline unsigned
+_vop_total_desc_size(struct mic_device_desc __iomem *desc)
+{
+ return _vop_aligned_desc_size(desc) + sizeof(struct mic_device_ctrl);
+}
+
+/* This gets the device's feature bits. */
+static u64 vop_get_features(struct virtio_device *vdev)
+{
+ unsigned int i, bits;
+ u32 features = 0;
+ struct mic_device_desc __iomem *desc = to_vopvdev(vdev)->desc;
+ u8 __iomem *in_features = _vop_vq_features(desc);
+ int feature_len = ioread8(&desc->feature_len);
+
+ bits = min_t(unsigned, feature_len, sizeof(vdev->features)) * 8;
+ for (i = 0; i < bits; i++)
+ if (ioread8(&in_features[i / 8]) & (BIT(i % 8)))
+ features |= BIT(i);
+
+ return features;
+}
+
+static int vop_finalize_features(struct virtio_device *vdev)
+{
+ unsigned int i, bits;
+ struct mic_device_desc __iomem *desc = to_vopvdev(vdev)->desc;
+ u8 feature_len = ioread8(&desc->feature_len);
+ /* Second half of bitmap is features we accept. */
+ u8 __iomem *out_features =
+ _vop_vq_features(desc) + feature_len;
+
+ /* Give virtio_ring a chance to accept features. */
+ vring_transport_features(vdev);
+
+ memset_io(out_features, 0, feature_len);
+ bits = min_t(unsigned, feature_len,
+ sizeof(vdev->features)) * 8;
+ for (i = 0; i < bits; i++) {
+ if (__virtio_test_bit(vdev, i))
+ iowrite8(ioread8(&out_features[i / 8]) | (1 << (i % 8)),
+ &out_features[i / 8]);
+ }
+ return 0;
+}
+
+/*
+ * Reading and writing elements in config space
+ */
+static void vop_get(struct virtio_device *vdev, unsigned int offset,
+ void *buf, unsigned len)
+{
+ struct mic_device_desc __iomem *desc = to_vopvdev(vdev)->desc;
+
+ if (offset + len > ioread8(&desc->config_len))
+ return;
+ memcpy_fromio(buf, _vop_vq_configspace(desc) + offset, len);
+}
+
+static void vop_set(struct virtio_device *vdev, unsigned int offset,
+ const void *buf, unsigned len)
+{
+ struct mic_device_desc __iomem *desc = to_vopvdev(vdev)->desc;
+
+ if (offset + len > ioread8(&desc->config_len))
+ return;
+ memcpy_toio(_vop_vq_configspace(desc) + offset, buf, len);
+}
+
+/*
+ * The operations to get and set the status word just access the status
+ * field of the device descriptor. set_status also interrupts the host
+ * to tell about status changes.
+ */
+static u8 vop_get_status(struct virtio_device *vdev)
+{
+ return ioread8(&to_vopvdev(vdev)->desc->status);
+}
+
+static void vop_set_status(struct virtio_device *dev, u8 status)
+{
+ struct _vop_vdev *vdev = to_vopvdev(dev);
+ struct vop_device *vpdev = vdev->vpdev;
+
+ if (!status)
+ return;
+ iowrite8(status, &vdev->desc->status);
+ vpdev->hw_ops->send_intr(vpdev, vdev->c2h_vdev_db);
+}
+
+/* Inform host on a virtio device reset and wait for ack from host */
+static void vop_reset_inform_host(struct virtio_device *dev)
+{
+ struct _vop_vdev *vdev = to_vopvdev(dev);
+ struct mic_device_ctrl __iomem *dc = vdev->dc;
+ struct vop_device *vpdev = vdev->vpdev;
+ int retry;
+
+ iowrite8(0, &dc->host_ack);
+ iowrite8(1, &dc->vdev_reset);
+ vpdev->hw_ops->send_intr(vpdev, vdev->c2h_vdev_db);
+
+ /* Wait till host completes all card accesses and acks the reset */
+ for (retry = 100; retry--;) {
+ if (ioread8(&dc->host_ack))
+ break;
+ msleep(100);
+ };
+
+ dev_dbg(_vop_dev(vdev), "%s: retry: %d\n", __func__, retry);
+
+ /* Reset status to 0 in case we timed out */
+ iowrite8(0, &vdev->desc->status);
+}
+
+static void vop_reset(struct virtio_device *dev)
+{
+ struct _vop_vdev *vdev = to_vopvdev(dev);
+
+ dev_dbg(_vop_dev(vdev), "%s: virtio id %d\n",
+ __func__, dev->id.device);
+
+ vop_reset_inform_host(dev);
+ complete_all(&vdev->reset_done);
+}
+
+/*
+ * The virtio_ring code calls this API when it wants to notify the Host.
+ */
+static bool vop_notify(struct virtqueue *vq)
+{
+ struct _vop_vdev *vdev = vq->priv;
+ struct vop_device *vpdev = vdev->vpdev;
+
+ vpdev->hw_ops->send_intr(vpdev, vdev->c2h_vdev_db);
+ return true;
+}
+
+static void vop_del_vq(struct virtqueue *vq, int n)
+{
+ struct _vop_vdev *vdev = to_vopvdev(vq->vdev);
+ struct vring *vr = (struct vring *)(vq + 1);
+ struct vop_device *vpdev = vdev->vpdev;
+
+ dma_unmap_single(&vpdev->dev, vdev->used[n],
+ vdev->used_size[n], DMA_BIDIRECTIONAL);
+ free_pages((unsigned long)vr->used, get_order(vdev->used_size[n]));
+ vring_del_virtqueue(vq);
+ vpdev->hw_ops->iounmap(vpdev, vdev->vr[n]);
+ vdev->vr[n] = NULL;
+}
+
+static void vop_del_vqs(struct virtio_device *dev)
+{
+ struct _vop_vdev *vdev = to_vopvdev(dev);
+ struct virtqueue *vq, *n;
+ int idx = 0;
+
+ dev_dbg(_vop_dev(vdev), "%s\n", __func__);
+
+ list_for_each_entry_safe(vq, n, &dev->vqs, list)
+ vop_del_vq(vq, idx++);
+}
+
+/*
+ * This routine will assign vring's allocated in host/io memory. Code in
+ * virtio_ring.c however continues to access this io memory as if it were local
+ * memory without io accessors.
+ */
+static struct virtqueue *vop_find_vq(struct virtio_device *dev,
+ unsigned index,
+ void (*callback)(struct virtqueue *vq),
+ const char *name)
+{
+ struct _vop_vdev *vdev = to_vopvdev(dev);
+ struct vop_device *vpdev = vdev->vpdev;
+ struct mic_vqconfig __iomem *vqconfig;
+ struct mic_vqconfig config;
+ struct virtqueue *vq;
+ void __iomem *va;
+ struct _mic_vring_info __iomem *info;
+ void *used;
+ int vr_size, _vr_size, err, magic;
+ struct vring *vr;
+ u8 type = ioread8(&vdev->desc->type);
+
+ if (index >= ioread8(&vdev->desc->num_vq))
+ return ERR_PTR(-ENOENT);
+
+ if (!name)
+ return ERR_PTR(-ENOENT);
+
+ /* First assign the vring's allocated in host memory */
+ vqconfig = _vop_vq_config(vdev->desc) + index;
+ memcpy_fromio(&config, vqconfig, sizeof(config));
+ _vr_size = vring_size(le16_to_cpu(config.num), MIC_VIRTIO_RING_ALIGN);
+ vr_size = PAGE_ALIGN(_vr_size + sizeof(struct _mic_vring_info));
+ va = vpdev->hw_ops->ioremap(vpdev, le64_to_cpu(config.address),
+ vr_size);
+ if (!va)
+ return ERR_PTR(-ENOMEM);
+ vdev->vr[index] = va;
+ memset_io(va, 0x0, _vr_size);
+ vq = vring_new_virtqueue(
+ index,
+ le16_to_cpu(config.num), MIC_VIRTIO_RING_ALIGN,
+ dev,
+ false,
+ (void __force *)va, vop_notify, callback, name);
+ if (!vq) {
+ err = -ENOMEM;
+ goto unmap;
+ }
+ info = va + _vr_size;
+ magic = ioread32(&info->magic);
+
+ if (WARN(magic != MIC_MAGIC + type + index, "magic mismatch")) {
+ err = -EIO;
+ goto unmap;
+ }
+
+ /* Allocate and reassign used ring now */
+ vdev->used_size[index] = PAGE_ALIGN(sizeof(__u16) * 3 +
+ sizeof(struct vring_used_elem) *
+ le16_to_cpu(config.num));
+ used = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
+ get_order(vdev->used_size[index]));
+ if (!used) {
+ err = -ENOMEM;
+ dev_err(_vop_dev(vdev), "%s %d err %d\n",
+ __func__, __LINE__, err);
+ goto del_vq;
+ }
+ vdev->used[index] = dma_map_single(&vpdev->dev, used,
+ vdev->used_size[index],
+ DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(&vpdev->dev, vdev->used[index])) {
+ err = -ENOMEM;
+ dev_err(_vop_dev(vdev), "%s %d err %d\n",
+ __func__, __LINE__, err);
+ goto free_used;
+ }
+ writeq(vdev->used[index], &vqconfig->used_address);
+ /*
+ * To reassign the used ring here we are directly accessing
+ * struct vring_virtqueue which is a private data structure
+ * in virtio_ring.c. At the minimum, a BUILD_BUG_ON() in
+ * vring_new_virtqueue() would ensure that
+ * (&vq->vring == (struct vring *) (&vq->vq + 1));
+ */
+ vr = (struct vring *)(vq + 1);
+ vr->used = used;
+
+ vq->priv = vdev;
+ return vq;
+free_used:
+ free_pages((unsigned long)used,
+ get_order(vdev->used_size[index]));
+del_vq:
+ vring_del_virtqueue(vq);
+unmap:
+ vpdev->hw_ops->iounmap(vpdev, vdev->vr[index]);
+ return ERR_PTR(err);
+}
+
+static int vop_find_vqs(struct virtio_device *dev, unsigned nvqs,
+ struct virtqueue *vqs[],
+ vq_callback_t *callbacks[],
+ const char * const names[])
+{
+ struct _vop_vdev *vdev = to_vopvdev(dev);
+ struct vop_device *vpdev = vdev->vpdev;
+ struct mic_device_ctrl __iomem *dc = vdev->dc;
+ int i, err, retry;
+
+ /* We must have this many virtqueues. */
+ if (nvqs > ioread8(&vdev->desc->num_vq))
+ return -ENOENT;
+
+ for (i = 0; i < nvqs; ++i) {
+ dev_dbg(_vop_dev(vdev), "%s: %d: %s\n",
+ __func__, i, names[i]);
+ vqs[i] = vop_find_vq(dev, i, callbacks[i], names[i]);
+ if (IS_ERR(vqs[i])) {
+ err = PTR_ERR(vqs[i]);
+ goto error;
+ }
+ }
+
+ iowrite8(1, &dc->used_address_updated);
+ /*
+ * Send an interrupt to the host to inform it that used
+ * rings have been re-assigned.
+ */
+ vpdev->hw_ops->send_intr(vpdev, vdev->c2h_vdev_db);
+ for (retry = 100; --retry;) {
+ if (!ioread8(&dc->used_address_updated))
+ break;
+ msleep(100);
+ };
+
+ dev_dbg(_vop_dev(vdev), "%s: retry: %d\n", __func__, retry);
+ if (!retry) {
+ err = -ENODEV;
+ goto error;
+ }
+
+ return 0;
+error:
+ vop_del_vqs(dev);
+ return err;
+}
+
+/*
+ * The config ops structure as defined by virtio config
+ */
+static struct virtio_config_ops vop_vq_config_ops = {
+ .get_features = vop_get_features,
+ .finalize_features = vop_finalize_features,
+ .get = vop_get,
+ .set = vop_set,
+ .get_status = vop_get_status,
+ .set_status = vop_set_status,
+ .reset = vop_reset,
+ .find_vqs = vop_find_vqs,
+ .del_vqs = vop_del_vqs,
+};
+
+static irqreturn_t vop_virtio_intr_handler(int irq, void *data)
+{
+ struct _vop_vdev *vdev = data;
+ struct vop_device *vpdev = vdev->vpdev;
+ struct virtqueue *vq;
+
+ vpdev->hw_ops->ack_interrupt(vpdev, vdev->h2c_vdev_db);
+ list_for_each_entry(vq, &vdev->vdev.vqs, list)
+ vring_interrupt(0, vq);
+
+ return IRQ_HANDLED;
+}
+
+static void vop_virtio_release_dev(struct device *_d)
+{
+ /*
+ * No need for a release method similar to virtio PCI.
+ * Provide an empty one to avoid getting a warning from core.
+ */
+}
+
+/*
+ * adds a new device and register it with virtio
+ * appropriate drivers are loaded by the device model
+ */
+static int _vop_add_device(struct mic_device_desc __iomem *d,
+ unsigned int offset, struct vop_device *vpdev,
+ int dnode)
+{
+ struct _vop_vdev *vdev;
+ int ret;
+ u8 type = ioread8(&d->type);
+
+ vdev = kzalloc(sizeof(*vdev), GFP_KERNEL);
+ if (!vdev)
+ return -ENOMEM;
+
+ vdev->vpdev = vpdev;
+ vdev->vdev.dev.parent = &vpdev->dev;
+ vdev->vdev.dev.release = vop_virtio_release_dev;
+ vdev->vdev.id.device = type;
+ vdev->vdev.config = &vop_vq_config_ops;
+ vdev->desc = d;
+ vdev->dc = (void __iomem *)d + _vop_aligned_desc_size(d);
+ vdev->dnode = dnode;
+ vdev->vdev.priv = (void *)(u64)dnode;
+ init_completion(&vdev->reset_done);
+
+ vdev->h2c_vdev_db = vpdev->hw_ops->next_db(vpdev);
+ vdev->virtio_cookie = vpdev->hw_ops->request_irq(vpdev,
+ vop_virtio_intr_handler, "virtio intr",
+ vdev, vdev->h2c_vdev_db);
+ if (IS_ERR(vdev->virtio_cookie)) {
+ ret = PTR_ERR(vdev->virtio_cookie);
+ goto kfree;
+ }
+ iowrite8((u8)vdev->h2c_vdev_db, &vdev->dc->h2c_vdev_db);
+ vdev->c2h_vdev_db = ioread8(&vdev->dc->c2h_vdev_db);
+
+ ret = register_virtio_device(&vdev->vdev);
+ if (ret) {
+ dev_err(_vop_dev(vdev),
+ "Failed to register vop device %u type %u\n",
+ offset, type);
+ goto free_irq;
+ }
+ writeq((u64)vdev, &vdev->dc->vdev);
+ dev_dbg(_vop_dev(vdev), "%s: registered vop device %u type %u vdev %p\n",
+ __func__, offset, type, vdev);
+
+ return 0;
+
+free_irq:
+ vpdev->hw_ops->free_irq(vpdev, vdev->virtio_cookie, vdev);
+kfree:
+ kfree(vdev);
+ return ret;
+}
+
+/*
+ * match for a vop device with a specific desc pointer
+ */
+static int vop_match_desc(struct device *dev, void *data)
+{
+ struct virtio_device *_dev = dev_to_virtio(dev);
+ struct _vop_vdev *vdev = to_vopvdev(_dev);
+
+ return vdev->desc == (void __iomem *)data;
+}
+
+static void _vop_handle_config_change(struct mic_device_desc __iomem *d,
+ unsigned int offset,
+ struct vop_device *vpdev)
+{
+ struct mic_device_ctrl __iomem *dc
+ = (void __iomem *)d + _vop_aligned_desc_size(d);
+ struct _vop_vdev *vdev = (struct _vop_vdev *)readq(&dc->vdev);
+
+ if (ioread8(&dc->config_change) != MIC_VIRTIO_PARAM_CONFIG_CHANGED)
+ return;
+
+ dev_dbg(&vpdev->dev, "%s %d\n", __func__, __LINE__);
+ virtio_config_changed(&vdev->vdev);
+ iowrite8(1, &dc->guest_ack);
+}
+
+/*
+ * removes a virtio device if a hot remove event has been
+ * requested by the host.
+ */
+static int _vop_remove_device(struct mic_device_desc __iomem *d,
+ unsigned int offset, struct vop_device *vpdev)
+{
+ struct mic_device_ctrl __iomem *dc
+ = (void __iomem *)d + _vop_aligned_desc_size(d);
+ struct _vop_vdev *vdev = (struct _vop_vdev *)readq(&dc->vdev);
+ u8 status;
+ int ret = -1;
+
+ if (ioread8(&dc->config_change) == MIC_VIRTIO_PARAM_DEV_REMOVE) {
+ dev_dbg(&vpdev->dev,
+ "%s %d config_change %d type %d vdev %p\n",
+ __func__, __LINE__,
+ ioread8(&dc->config_change), ioread8(&d->type), vdev);
+ status = ioread8(&d->status);
+ reinit_completion(&vdev->reset_done);
+ unregister_virtio_device(&vdev->vdev);
+ vpdev->hw_ops->free_irq(vpdev, vdev->virtio_cookie, vdev);
+ iowrite8(-1, &dc->h2c_vdev_db);
+ if (status & VIRTIO_CONFIG_S_DRIVER_OK)
+ wait_for_completion(&vdev->reset_done);
+ kfree(vdev);
+ iowrite8(1, &dc->guest_ack);
+ dev_dbg(&vpdev->dev, "%s %d guest_ack %d\n",
+ __func__, __LINE__, ioread8(&dc->guest_ack));
+ iowrite8(-1, &d->type);
+ ret = 0;
+ }
+ return ret;
+}
+
+#define REMOVE_DEVICES true
+
+static void _vop_scan_devices(void __iomem *dp, struct vop_device *vpdev,
+ bool remove, int dnode)
+{
+ s8 type;
+ unsigned int i;
+ struct mic_device_desc __iomem *d;
+ struct mic_device_ctrl __iomem *dc;
+ struct device *dev;
+ int ret;
+
+ for (i = sizeof(struct mic_bootparam);
+ i < MIC_DP_SIZE; i += _vop_total_desc_size(d)) {
+ d = dp + i;
+ dc = (void __iomem *)d + _vop_aligned_desc_size(d);
+ /*
+ * This read barrier is paired with the corresponding write
+ * barrier on the host which is inserted before adding or
+ * removing a virtio device descriptor, by updating the type.
+ */
+ rmb();
+ type = ioread8(&d->type);
+
+ /* end of list */
+ if (type == 0)
+ break;
+
+ if (type == -1)
+ continue;
+
+ /* device already exists */
+ dev = device_find_child(&vpdev->dev, (void __force *)d,
+ vop_match_desc);
+ if (dev) {
+ if (remove)
+ iowrite8(MIC_VIRTIO_PARAM_DEV_REMOVE,
+ &dc->config_change);
+ put_device(dev);
+ _vop_handle_config_change(d, i, vpdev);
+ ret = _vop_remove_device(d, i, vpdev);
+ if (remove) {
+ iowrite8(0, &dc->config_change);
+ iowrite8(0, &dc->guest_ack);
+ }
+ continue;
+ }
+
+ /* new device */
+ dev_dbg(&vpdev->dev, "%s %d Adding new virtio device %p\n",
+ __func__, __LINE__, d);
+ if (!remove)
+ _vop_add_device(d, i, vpdev, dnode);
+ }
+}
+
+static void vop_scan_devices(struct vop_info *vi,
+ struct vop_device *vpdev, bool remove)
+{
+ void __iomem *dp = vpdev->hw_ops->get_remote_dp(vpdev);
+
+ if (!dp)
+ return;
+ mutex_lock(&vi->vop_mutex);
+ _vop_scan_devices(dp, vpdev, remove, vpdev->dnode);
+ mutex_unlock(&vi->vop_mutex);
+}
+
+/*
+ * vop_hotplug_device tries to find changes in the device page.
+ */
+static void vop_hotplug_devices(struct work_struct *work)
+{
+ struct vop_info *vi = container_of(work, struct vop_info,
+ hotplug_work);
+
+ vop_scan_devices(vi, vi->vpdev, !REMOVE_DEVICES);
+}
+
+/*
+ * Interrupt handler for hot plug/config changes etc.
+ */
+static irqreturn_t vop_extint_handler(int irq, void *data)
+{
+ struct vop_info *vi = data;
+ struct mic_bootparam __iomem *bp;
+ struct vop_device *vpdev = vi->vpdev;
+
+ bp = vpdev->hw_ops->get_remote_dp(vpdev);
+ dev_dbg(&vpdev->dev, "%s %d hotplug work\n",
+ __func__, __LINE__);
+ vpdev->hw_ops->ack_interrupt(vpdev, ioread8(&bp->h2c_config_db));
+ schedule_work(&vi->hotplug_work);
+ return IRQ_HANDLED;
+}
+
+static int vop_driver_probe(struct vop_device *vpdev)
+{
+ struct vop_info *vi;
+ int rc;
+
+ vi = kzalloc(sizeof(*vi), GFP_KERNEL);
+ if (!vi) {
+ rc = -ENOMEM;
+ goto exit;
+ }
+ dev_set_drvdata(&vpdev->dev, vi);
+ vi->vpdev = vpdev;
+
+ mutex_init(&vi->vop_mutex);
+ INIT_WORK(&vi->hotplug_work, vop_hotplug_devices);
+ if (vpdev->dnode) {
+ rc = vop_host_init(vi);
+ if (rc < 0)
+ goto free;
+ } else {
+ struct mic_bootparam __iomem *bootparam;
+
+ vop_scan_devices(vi, vpdev, !REMOVE_DEVICES);
+
+ vi->h2c_config_db = vpdev->hw_ops->next_db(vpdev);
+ vi->cookie = vpdev->hw_ops->request_irq(vpdev,
+ vop_extint_handler,
+ "virtio_config_intr",
+ vi, vi->h2c_config_db);
+ if (IS_ERR(vi->cookie)) {
+ rc = PTR_ERR(vi->cookie);
+ goto free;
+ }
+ bootparam = vpdev->hw_ops->get_remote_dp(vpdev);
+ iowrite8(vi->h2c_config_db, &bootparam->h2c_config_db);
+ }
+ vop_init_debugfs(vi);
+ return 0;
+free:
+ kfree(vi);
+exit:
+ return rc;
+}
+
+static void vop_driver_remove(struct vop_device *vpdev)
+{
+ struct vop_info *vi = dev_get_drvdata(&vpdev->dev);
+
+ if (vpdev->dnode) {
+ vop_host_uninit(vi);
+ } else {
+ struct mic_bootparam __iomem *bootparam =
+ vpdev->hw_ops->get_remote_dp(vpdev);
+ if (bootparam)
+ iowrite8(-1, &bootparam->h2c_config_db);
+ vpdev->hw_ops->free_irq(vpdev, vi->cookie, vi);
+ flush_work(&vi->hotplug_work);
+ vop_scan_devices(vi, vpdev, REMOVE_DEVICES);
+ }
+ vop_exit_debugfs(vi);
+ kfree(vi);
+}
+
+static struct vop_device_id id_table[] = {
+ { VOP_DEV_TRNSP, VOP_DEV_ANY_ID },
+ { 0 },
+};
+
+static struct vop_driver vop_driver = {
+ .driver.name = KBUILD_MODNAME,
+ .driver.owner = THIS_MODULE,
+ .id_table = id_table,
+ .probe = vop_driver_probe,
+ .remove = vop_driver_remove,
+};
+
+module_vop_driver(vop_driver);
+
+MODULE_DEVICE_TABLE(mbus, id_table);
+MODULE_AUTHOR("Intel Corporation");
+MODULE_DESCRIPTION("Intel(R) Virtio Over PCIe (VOP) driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/misc/mic/host/mic_virtio.h b/drivers/misc/mic/vop/vop_main.h
index a80631f2790d..ba47ec7a6386 100644
--- a/drivers/misc/mic/host/mic_virtio.h
+++ b/drivers/misc/mic/vop/vop_main.h
@@ -1,7 +1,7 @@
/*
* Intel MIC Platform Software Stack (MPSS)
*
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2016 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, version 2, as
@@ -15,14 +15,21 @@
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
- * Intel MIC Host driver.
+ * Intel Virtio Over PCIe (VOP) driver.
*
*/
-#ifndef MIC_VIRTIO_H
-#define MIC_VIRTIO_H
+#ifndef _VOP_MAIN_H_
+#define _VOP_MAIN_H_
+#include <linux/vringh.h>
#include <linux/virtio_config.h>
-#include <linux/mic_ioctl.h>
+#include <linux/virtio.h>
+#include <linux/miscdevice.h>
+
+#include <linux/mic_common.h>
+#include "../common/mic_dev.h"
+
+#include "../bus/vop_bus.h"
/*
* Note on endianness.
@@ -39,38 +46,68 @@
* in guest endianness.
*/
+/*
+ * vop_info - Allocated per invocation of VOP probe
+ *
+ * @vpdev: VOP device
+ * @hotplug_work: Handle virtio device creation, deletion and configuration
+ * @cookie: Cookie received upon requesting a virtio configuration interrupt
+ * @h2c_config_db: The doorbell used by the peer to indicate a config change
+ * @vdev_list: List of "active" virtio devices injected in the peer node
+ * @vop_mutex: Synchronize access to the device page as well as serialize
+ * creation/deletion of virtio devices on the peer node
+ * @dp: Peer device page information
+ * @dbg: Debugfs entry
+ * @dma_ch: The DMA channel used by this transport for data transfers.
+ * @name: Name for this transport used in misc device creation.
+ * @miscdev: The misc device registered.
+ */
+struct vop_info {
+ struct vop_device *vpdev;
+ struct work_struct hotplug_work;
+ struct mic_irq *cookie;
+ int h2c_config_db;
+ struct list_head vdev_list;
+ struct mutex vop_mutex;
+ void __iomem *dp;
+ struct dentry *dbg;
+ struct dma_chan *dma_ch;
+ char name[16];
+ struct miscdevice miscdev;
+};
+
/**
- * struct mic_vringh - Virtio ring host information.
+ * struct vop_vringh - Virtio ring host information.
*
- * @vring: The MIC vring used for setting up user space mappings.
+ * @vring: The VOP vring used for setting up user space mappings.
* @vrh: The host VRINGH used for accessing the card vrings.
* @riov: The VRINGH read kernel IOV.
* @wiov: The VRINGH write kernel IOV.
+ * @head: The VRINGH head index address passed to vringh_getdesc_kern(..).
* @vr_mutex: Mutex for synchronizing access to the VRING.
* @buf: Temporary kernel buffer used to copy in/out data
* from/to the card via DMA.
* @buf_da: dma address of buf.
- * @mvdev: Back pointer to MIC virtio device for vringh_notify(..).
- * @head: The VRINGH head index address passed to vringh_getdesc_kern(..).
+ * @vdev: Back pointer to VOP virtio device for vringh_notify(..).
*/
-struct mic_vringh {
+struct vop_vringh {
struct mic_vring vring;
struct vringh vrh;
struct vringh_kiov riov;
struct vringh_kiov wiov;
+ u16 head;
struct mutex vr_mutex;
void *buf;
dma_addr_t buf_da;
- struct mic_vdev *mvdev;
- u16 head;
+ struct vop_vdev *vdev;
};
/**
- * struct mic_vdev - Host information for a card Virtio device.
+ * struct vop_vdev - Host information for a card Virtio device.
*
* @virtio_id - Virtio device id.
* @waitq - Waitqueue to allow ring3 apps to poll.
- * @mdev - Back pointer to host MIC device.
+ * @vpdev - pointer to VOP bus device.
* @poll_wake - Used for waking up threads blocked in poll.
* @out_bytes - Debug stats for number of bytes copied from host to card.
* @in_bytes - Debug stats for number of bytes copied from card to host.
@@ -82,18 +119,23 @@ struct mic_vringh {
* the transfer length did not have the required DMA alignment.
* @tx_dst_unaligned - Debug stats for number of bytes copied where the
* destination address on the card did not have the required DMA alignment.
- * @mvr - Store per VRING data structures.
+ * @vvr - Store per VRING data structures.
* @virtio_bh_work - Work struct used to schedule virtio bottom half handling.
* @dd - Virtio device descriptor.
* @dc - Virtio device control fields.
* @list - List of Virtio devices.
* @virtio_db - The doorbell used by the card to interrupt the host.
* @virtio_cookie - The cookie returned while requesting interrupts.
+ * @vi: Transport information.
+ * @vdev_mutex: Mutex synchronizing virtio device injection,
+ * removal and data transfers.
+ * @destroy: Track if a virtio device is being destroyed.
+ * @deleted: The virtio device has been deleted.
*/
-struct mic_vdev {
+struct vop_vdev {
int virtio_id;
wait_queue_head_t waitq;
- struct mic_device *mdev;
+ struct vop_device *vpdev;
int poll_wake;
unsigned long out_bytes;
unsigned long in_bytes;
@@ -101,55 +143,28 @@ struct mic_vdev {
unsigned long in_bytes_dma;
unsigned long tx_len_unaligned;
unsigned long tx_dst_unaligned;
- struct mic_vringh mvr[MIC_MAX_VRINGS];
+ unsigned long rx_dst_unaligned;
+ struct vop_vringh vvr[MIC_MAX_VRINGS];
struct work_struct virtio_bh_work;
struct mic_device_desc *dd;
struct mic_device_ctrl *dc;
struct list_head list;
int virtio_db;
struct mic_irq *virtio_cookie;
+ struct vop_info *vi;
+ struct mutex vdev_mutex;
+ struct completion destroy;
+ bool deleted;
};
-void mic_virtio_uninit(struct mic_device *mdev);
-int mic_virtio_add_device(struct mic_vdev *mvdev,
- void __user *argp);
-void mic_virtio_del_device(struct mic_vdev *mvdev);
-int mic_virtio_config_change(struct mic_vdev *mvdev,
- void __user *argp);
-int mic_virtio_copy_desc(struct mic_vdev *mvdev,
- struct mic_copy_desc *request);
-void mic_virtio_reset_devices(struct mic_device *mdev);
-void mic_bh_handler(struct work_struct *work);
-
-/* Helper API to obtain the MIC PCIe device */
-static inline struct device *mic_dev(struct mic_vdev *mvdev)
-{
- return &mvdev->mdev->pdev->dev;
-}
-
-/* Helper API to check if a virtio device is initialized */
-static inline int mic_vdev_inited(struct mic_vdev *mvdev)
-{
- /* Device has not been created yet */
- if (!mvdev->dd || !mvdev->dd->type) {
- dev_err(mic_dev(mvdev), "%s %d err %d\n",
- __func__, __LINE__, -EINVAL);
- return -EINVAL;
- }
-
- /* Device has been removed/deleted */
- if (mvdev->dd->type == -1) {
- dev_err(mic_dev(mvdev), "%s %d err %d\n",
- __func__, __LINE__, -ENODEV);
- return -ENODEV;
- }
-
- return 0;
-}
-
/* Helper API to check if a virtio device is running */
-static inline bool mic_vdevup(struct mic_vdev *mvdev)
+static inline bool vop_vdevup(struct vop_vdev *vdev)
{
- return !!mvdev->dd->status;
+ return !!vdev->dd->status;
}
+
+void vop_init_debugfs(struct vop_info *vi);
+void vop_exit_debugfs(struct vop_info *vi);
+int vop_host_init(struct vop_info *vi);
+void vop_host_uninit(struct vop_info *vi);
#endif
diff --git a/drivers/misc/mic/vop/vop_vringh.c b/drivers/misc/mic/vop/vop_vringh.c
new file mode 100644
index 000000000000..88e45234d527
--- /dev/null
+++ b/drivers/misc/mic/vop/vop_vringh.c
@@ -0,0 +1,1170 @@
+/*
+ * Intel MIC Platform Software Stack (MPSS)
+ *
+ * Copyright(c) 2016 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Intel Virtio Over PCIe (VOP) driver.
+ *
+ */
+#include <linux/sched.h>
+#include <linux/poll.h>
+#include <linux/dma-mapping.h>
+
+#include <linux/mic_common.h>
+#include "../common/mic_dev.h"
+
+#include <linux/mic_ioctl.h>
+#include "vop_main.h"
+
+/* Helper API to obtain the VOP PCIe device */
+static inline struct device *vop_dev(struct vop_vdev *vdev)
+{
+ return vdev->vpdev->dev.parent;
+}
+
+/* Helper API to check if a virtio device is initialized */
+static inline int vop_vdev_inited(struct vop_vdev *vdev)
+{
+ if (!vdev)
+ return -EINVAL;
+ /* Device has not been created yet */
+ if (!vdev->dd || !vdev->dd->type) {
+ dev_err(vop_dev(vdev), "%s %d err %d\n",
+ __func__, __LINE__, -EINVAL);
+ return -EINVAL;
+ }
+ /* Device has been removed/deleted */
+ if (vdev->dd->type == -1) {
+ dev_dbg(vop_dev(vdev), "%s %d err %d\n",
+ __func__, __LINE__, -ENODEV);
+ return -ENODEV;
+ }
+ return 0;
+}
+
+static void _vop_notify(struct vringh *vrh)
+{
+ struct vop_vringh *vvrh = container_of(vrh, struct vop_vringh, vrh);
+ struct vop_vdev *vdev = vvrh->vdev;
+ struct vop_device *vpdev = vdev->vpdev;
+ s8 db = vdev->dc->h2c_vdev_db;
+
+ if (db != -1)
+ vpdev->hw_ops->send_intr(vpdev, db);
+}
+
+static void vop_virtio_init_post(struct vop_vdev *vdev)
+{
+ struct mic_vqconfig *vqconfig = mic_vq_config(vdev->dd);
+ struct vop_device *vpdev = vdev->vpdev;
+ int i, used_size;
+
+ for (i = 0; i < vdev->dd->num_vq; i++) {
+ used_size = PAGE_ALIGN(sizeof(u16) * 3 +
+ sizeof(struct vring_used_elem) *
+ le16_to_cpu(vqconfig->num));
+ if (!le64_to_cpu(vqconfig[i].used_address)) {
+ dev_warn(vop_dev(vdev), "used_address zero??\n");
+ continue;
+ }
+ vdev->vvr[i].vrh.vring.used =
+ (void __force *)vpdev->hw_ops->ioremap(
+ vpdev,
+ le64_to_cpu(vqconfig[i].used_address),
+ used_size);
+ }
+
+ vdev->dc->used_address_updated = 0;
+
+ dev_info(vop_dev(vdev), "%s: device type %d LINKUP\n",
+ __func__, vdev->virtio_id);
+}
+
+static inline void vop_virtio_device_reset(struct vop_vdev *vdev)
+{
+ int i;
+
+ dev_dbg(vop_dev(vdev), "%s: status %d device type %d RESET\n",
+ __func__, vdev->dd->status, vdev->virtio_id);
+
+ for (i = 0; i < vdev->dd->num_vq; i++)
+ /*
+ * Avoid lockdep false positive. The + 1 is for the vop
+ * mutex which is held in the reset devices code path.
+ */
+ mutex_lock_nested(&vdev->vvr[i].vr_mutex, i + 1);
+
+ /* 0 status means "reset" */
+ vdev->dd->status = 0;
+ vdev->dc->vdev_reset = 0;
+ vdev->dc->host_ack = 1;
+
+ for (i = 0; i < vdev->dd->num_vq; i++) {
+ struct vringh *vrh = &vdev->vvr[i].vrh;
+
+ vdev->vvr[i].vring.info->avail_idx = 0;
+ vrh->completed = 0;
+ vrh->last_avail_idx = 0;
+ vrh->last_used_idx = 0;
+ }
+
+ for (i = 0; i < vdev->dd->num_vq; i++)
+ mutex_unlock(&vdev->vvr[i].vr_mutex);
+}
+
+static void vop_virtio_reset_devices(struct vop_info *vi)
+{
+ struct list_head *pos, *tmp;
+ struct vop_vdev *vdev;
+
+ list_for_each_safe(pos, tmp, &vi->vdev_list) {
+ vdev = list_entry(pos, struct vop_vdev, list);
+ vop_virtio_device_reset(vdev);
+ vdev->poll_wake = 1;
+ wake_up(&vdev->waitq);
+ }
+}
+
+static void vop_bh_handler(struct work_struct *work)
+{
+ struct vop_vdev *vdev = container_of(work, struct vop_vdev,
+ virtio_bh_work);
+
+ if (vdev->dc->used_address_updated)
+ vop_virtio_init_post(vdev);
+
+ if (vdev->dc->vdev_reset)
+ vop_virtio_device_reset(vdev);
+
+ vdev->poll_wake = 1;
+ wake_up(&vdev->waitq);
+}
+
+static irqreturn_t _vop_virtio_intr_handler(int irq, void *data)
+{
+ struct vop_vdev *vdev = data;
+ struct vop_device *vpdev = vdev->vpdev;
+
+ vpdev->hw_ops->ack_interrupt(vpdev, vdev->virtio_db);
+ schedule_work(&vdev->virtio_bh_work);
+ return IRQ_HANDLED;
+}
+
+static int vop_virtio_config_change(struct vop_vdev *vdev, void *argp)
+{
+ DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wake);
+ int ret = 0, retry, i;
+ struct vop_device *vpdev = vdev->vpdev;
+ struct vop_info *vi = dev_get_drvdata(&vpdev->dev);
+ struct mic_bootparam *bootparam = vpdev->hw_ops->get_dp(vpdev);
+ s8 db = bootparam->h2c_config_db;
+
+ mutex_lock(&vi->vop_mutex);
+ for (i = 0; i < vdev->dd->num_vq; i++)
+ mutex_lock_nested(&vdev->vvr[i].vr_mutex, i + 1);
+
+ if (db == -1 || vdev->dd->type == -1) {
+ ret = -EIO;
+ goto exit;
+ }
+
+ memcpy(mic_vq_configspace(vdev->dd), argp, vdev->dd->config_len);
+ vdev->dc->config_change = MIC_VIRTIO_PARAM_CONFIG_CHANGED;
+ vpdev->hw_ops->send_intr(vpdev, db);
+
+ for (retry = 100; retry--;) {
+ ret = wait_event_timeout(wake, vdev->dc->guest_ack,
+ msecs_to_jiffies(100));
+ if (ret)
+ break;
+ }
+
+ dev_dbg(vop_dev(vdev),
+ "%s %d retry: %d\n", __func__, __LINE__, retry);
+ vdev->dc->config_change = 0;
+ vdev->dc->guest_ack = 0;
+exit:
+ for (i = 0; i < vdev->dd->num_vq; i++)
+ mutex_unlock(&vdev->vvr[i].vr_mutex);
+ mutex_unlock(&vi->vop_mutex);
+ return ret;
+}
+
+static int vop_copy_dp_entry(struct vop_vdev *vdev,
+ struct mic_device_desc *argp, __u8 *type,
+ struct mic_device_desc **devpage)
+{
+ struct vop_device *vpdev = vdev->vpdev;
+ struct mic_device_desc *devp;
+ struct mic_vqconfig *vqconfig;
+ int ret = 0, i;
+ bool slot_found = false;
+
+ vqconfig = mic_vq_config(argp);
+ for (i = 0; i < argp->num_vq; i++) {
+ if (le16_to_cpu(vqconfig[i].num) > MIC_MAX_VRING_ENTRIES) {
+ ret = -EINVAL;
+ dev_err(vop_dev(vdev), "%s %d err %d\n",
+ __func__, __LINE__, ret);
+ goto exit;
+ }
+ }
+
+ /* Find the first free device page entry */
+ for (i = sizeof(struct mic_bootparam);
+ i < MIC_DP_SIZE - mic_total_desc_size(argp);
+ i += mic_total_desc_size(devp)) {
+ devp = vpdev->hw_ops->get_dp(vpdev) + i;
+ if (devp->type == 0 || devp->type == -1) {
+ slot_found = true;
+ break;
+ }
+ }
+ if (!slot_found) {
+ ret = -EINVAL;
+ dev_err(vop_dev(vdev), "%s %d err %d\n",
+ __func__, __LINE__, ret);
+ goto exit;
+ }
+ /*
+ * Save off the type before doing the memcpy. Type will be set in the
+ * end after completing all initialization for the new device.
+ */
+ *type = argp->type;
+ argp->type = 0;
+ memcpy(devp, argp, mic_desc_size(argp));
+
+ *devpage = devp;
+exit:
+ return ret;
+}
+
+static void vop_init_device_ctrl(struct vop_vdev *vdev,
+ struct mic_device_desc *devpage)
+{
+ struct mic_device_ctrl *dc;
+
+ dc = (void *)devpage + mic_aligned_desc_size(devpage);
+
+ dc->config_change = 0;
+ dc->guest_ack = 0;
+ dc->vdev_reset = 0;
+ dc->host_ack = 0;
+ dc->used_address_updated = 0;
+ dc->c2h_vdev_db = -1;
+ dc->h2c_vdev_db = -1;
+ vdev->dc = dc;
+}
+
+static int vop_virtio_add_device(struct vop_vdev *vdev,
+ struct mic_device_desc *argp)
+{
+ struct vop_info *vi = vdev->vi;
+ struct vop_device *vpdev = vi->vpdev;
+ struct mic_device_desc *dd = NULL;
+ struct mic_vqconfig *vqconfig;
+ int vr_size, i, j, ret;
+ u8 type = 0;
+ s8 db = -1;
+ char irqname[16];
+ struct mic_bootparam *bootparam;
+ u16 num;
+ dma_addr_t vr_addr;
+
+ bootparam = vpdev->hw_ops->get_dp(vpdev);
+ init_waitqueue_head(&vdev->waitq);
+ INIT_LIST_HEAD(&vdev->list);
+ vdev->vpdev = vpdev;
+
+ ret = vop_copy_dp_entry(vdev, argp, &type, &dd);
+ if (ret) {
+ dev_err(vop_dev(vdev), "%s %d err %d\n",
+ __func__, __LINE__, ret);
+ kfree(vdev);
+ return ret;
+ }
+
+ vop_init_device_ctrl(vdev, dd);
+
+ vdev->dd = dd;
+ vdev->virtio_id = type;
+ vqconfig = mic_vq_config(dd);
+ INIT_WORK(&vdev->virtio_bh_work, vop_bh_handler);
+
+ for (i = 0; i < dd->num_vq; i++) {
+ struct vop_vringh *vvr = &vdev->vvr[i];
+ struct mic_vring *vr = &vdev->vvr[i].vring;
+
+ num = le16_to_cpu(vqconfig[i].num);
+ mutex_init(&vvr->vr_mutex);
+ vr_size = PAGE_ALIGN(vring_size(num, MIC_VIRTIO_RING_ALIGN) +
+ sizeof(struct _mic_vring_info));
+ vr->va = (void *)
+ __get_free_pages(GFP_KERNEL | __GFP_ZERO,
+ get_order(vr_size));
+ if (!vr->va) {
+ ret = -ENOMEM;
+ dev_err(vop_dev(vdev), "%s %d err %d\n",
+ __func__, __LINE__, ret);
+ goto err;
+ }
+ vr->len = vr_size;
+ vr->info = vr->va + vring_size(num, MIC_VIRTIO_RING_ALIGN);
+ vr->info->magic = cpu_to_le32(MIC_MAGIC + vdev->virtio_id + i);
+ vr_addr = dma_map_single(&vpdev->dev, vr->va, vr_size,
+ DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(&vpdev->dev, vr_addr)) {
+ free_pages((unsigned long)vr->va, get_order(vr_size));
+ ret = -ENOMEM;
+ dev_err(vop_dev(vdev), "%s %d err %d\n",
+ __func__, __LINE__, ret);
+ goto err;
+ }
+ vqconfig[i].address = cpu_to_le64(vr_addr);
+
+ vring_init(&vr->vr, num, vr->va, MIC_VIRTIO_RING_ALIGN);
+ ret = vringh_init_kern(&vvr->vrh,
+ *(u32 *)mic_vq_features(vdev->dd),
+ num, false, vr->vr.desc, vr->vr.avail,
+ vr->vr.used);
+ if (ret) {
+ dev_err(vop_dev(vdev), "%s %d err %d\n",
+ __func__, __LINE__, ret);
+ goto err;
+ }
+ vringh_kiov_init(&vvr->riov, NULL, 0);
+ vringh_kiov_init(&vvr->wiov, NULL, 0);
+ vvr->head = USHRT_MAX;
+ vvr->vdev = vdev;
+ vvr->vrh.notify = _vop_notify;
+ dev_dbg(&vpdev->dev,
+ "%s %d index %d va %p info %p vr_size 0x%x\n",
+ __func__, __LINE__, i, vr->va, vr->info, vr_size);
+ vvr->buf = (void *)__get_free_pages(GFP_KERNEL,
+ get_order(VOP_INT_DMA_BUF_SIZE));
+ vvr->buf_da = dma_map_single(&vpdev->dev,
+ vvr->buf, VOP_INT_DMA_BUF_SIZE,
+ DMA_BIDIRECTIONAL);
+ }
+
+ snprintf(irqname, sizeof(irqname), "vop%dvirtio%d", vpdev->index,
+ vdev->virtio_id);
+ vdev->virtio_db = vpdev->hw_ops->next_db(vpdev);
+ vdev->virtio_cookie = vpdev->hw_ops->request_irq(vpdev,
+ _vop_virtio_intr_handler, irqname, vdev,
+ vdev->virtio_db);
+ if (IS_ERR(vdev->virtio_cookie)) {
+ ret = PTR_ERR(vdev->virtio_cookie);
+ dev_dbg(&vpdev->dev, "request irq failed\n");
+ goto err;
+ }
+
+ vdev->dc->c2h_vdev_db = vdev->virtio_db;
+
+ /*
+ * Order the type update with previous stores. This write barrier
+ * is paired with the corresponding read barrier before the uncached
+ * system memory read of the type, on the card while scanning the
+ * device page.
+ */
+ smp_wmb();
+ dd->type = type;
+ argp->type = type;
+
+ if (bootparam) {
+ db = bootparam->h2c_config_db;
+ if (db != -1)
+ vpdev->hw_ops->send_intr(vpdev, db);
+ }
+ dev_dbg(&vpdev->dev, "Added virtio id %d db %d\n", dd->type, db);
+ return 0;
+err:
+ vqconfig = mic_vq_config(dd);
+ for (j = 0; j < i; j++) {
+ struct vop_vringh *vvr = &vdev->vvr[j];
+
+ dma_unmap_single(&vpdev->dev, le64_to_cpu(vqconfig[j].address),
+ vvr->vring.len, DMA_BIDIRECTIONAL);
+ free_pages((unsigned long)vvr->vring.va,
+ get_order(vvr->vring.len));
+ }
+ return ret;
+}
+
+static void vop_dev_remove(struct vop_info *pvi, struct mic_device_ctrl *devp,
+ struct vop_device *vpdev)
+{
+ struct mic_bootparam *bootparam = vpdev->hw_ops->get_dp(vpdev);
+ s8 db;
+ int ret, retry;
+ DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wake);
+
+ devp->config_change = MIC_VIRTIO_PARAM_DEV_REMOVE;
+ db = bootparam->h2c_config_db;
+ if (db != -1)
+ vpdev->hw_ops->send_intr(vpdev, db);
+ else
+ goto done;
+ for (retry = 15; retry--;) {
+ ret = wait_event_timeout(wake, devp->guest_ack,
+ msecs_to_jiffies(1000));
+ if (ret)
+ break;
+ }
+done:
+ devp->config_change = 0;
+ devp->guest_ack = 0;
+}
+
+static void vop_virtio_del_device(struct vop_vdev *vdev)
+{
+ struct vop_info *vi = vdev->vi;
+ struct vop_device *vpdev = vdev->vpdev;
+ int i;
+ struct mic_vqconfig *vqconfig;
+ struct mic_bootparam *bootparam = vpdev->hw_ops->get_dp(vpdev);
+
+ if (!bootparam)
+ goto skip_hot_remove;
+ vop_dev_remove(vi, vdev->dc, vpdev);
+skip_hot_remove:
+ vpdev->hw_ops->free_irq(vpdev, vdev->virtio_cookie, vdev);
+ flush_work(&vdev->virtio_bh_work);
+ vqconfig = mic_vq_config(vdev->dd);
+ for (i = 0; i < vdev->dd->num_vq; i++) {
+ struct vop_vringh *vvr = &vdev->vvr[i];
+
+ dma_unmap_single(&vpdev->dev,
+ vvr->buf_da, VOP_INT_DMA_BUF_SIZE,
+ DMA_BIDIRECTIONAL);
+ free_pages((unsigned long)vvr->buf,
+ get_order(VOP_INT_DMA_BUF_SIZE));
+ vringh_kiov_cleanup(&vvr->riov);
+ vringh_kiov_cleanup(&vvr->wiov);
+ dma_unmap_single(&vpdev->dev, le64_to_cpu(vqconfig[i].address),
+ vvr->vring.len, DMA_BIDIRECTIONAL);
+ free_pages((unsigned long)vvr->vring.va,
+ get_order(vvr->vring.len));
+ }
+ /*
+ * Order the type update with previous stores. This write barrier
+ * is paired with the corresponding read barrier before the uncached
+ * system memory read of the type, on the card while scanning the
+ * device page.
+ */
+ smp_wmb();
+ vdev->dd->type = -1;
+}
+
+/*
+ * vop_sync_dma - Wrapper for synchronous DMAs.
+ *
+ * @dev - The address of the pointer to the device instance used
+ * for DMA registration.
+ * @dst - destination DMA address.
+ * @src - source DMA address.
+ * @len - size of the transfer.
+ *
+ * Return DMA_SUCCESS on success
+ */
+static int vop_sync_dma(struct vop_vdev *vdev, dma_addr_t dst, dma_addr_t src,
+ size_t len)
+{
+ int err = 0;
+ struct dma_device *ddev;
+ struct dma_async_tx_descriptor *tx;
+ struct vop_info *vi = dev_get_drvdata(&vdev->vpdev->dev);
+ struct dma_chan *vop_ch = vi->dma_ch;
+
+ if (!vop_ch) {
+ err = -EBUSY;
+ goto error;
+ }
+ ddev = vop_ch->device;
+ tx = ddev->device_prep_dma_memcpy(vop_ch, dst, src, len,
+ DMA_PREP_FENCE);
+ if (!tx) {
+ err = -ENOMEM;
+ goto error;
+ } else {
+ dma_cookie_t cookie;
+
+ cookie = tx->tx_submit(tx);
+ if (dma_submit_error(cookie)) {
+ err = -ENOMEM;
+ goto error;
+ }
+ dma_async_issue_pending(vop_ch);
+ err = dma_sync_wait(vop_ch, cookie);
+ }
+error:
+ if (err)
+ dev_err(&vi->vpdev->dev, "%s %d err %d\n",
+ __func__, __LINE__, err);
+ return err;
+}
+
+#define VOP_USE_DMA true
+
+/*
+ * Initiates the copies across the PCIe bus from card memory to a user
+ * space buffer. When transfers are done using DMA, source/destination
+ * addresses and transfer length must follow the alignment requirements of
+ * the MIC DMA engine.
+ */
+static int vop_virtio_copy_to_user(struct vop_vdev *vdev, void __user *ubuf,
+ size_t len, u64 daddr, size_t dlen,
+ int vr_idx)
+{
+ struct vop_device *vpdev = vdev->vpdev;
+ void __iomem *dbuf = vpdev->hw_ops->ioremap(vpdev, daddr, len);
+ struct vop_vringh *vvr = &vdev->vvr[vr_idx];
+ struct vop_info *vi = dev_get_drvdata(&vpdev->dev);
+ size_t dma_alignment = 1 << vi->dma_ch->device->copy_align;
+ bool x200 = is_dma_copy_aligned(vi->dma_ch->device, 1, 1, 1);
+ size_t dma_offset, partlen;
+ int err;
+
+ if (!VOP_USE_DMA) {
+ if (copy_to_user(ubuf, (void __force *)dbuf, len)) {
+ err = -EFAULT;
+ dev_err(vop_dev(vdev), "%s %d err %d\n",
+ __func__, __LINE__, err);
+ goto err;
+ }
+ vdev->in_bytes += len;
+ err = 0;
+ goto err;
+ }
+
+ dma_offset = daddr - round_down(daddr, dma_alignment);
+ daddr -= dma_offset;
+ len += dma_offset;
+ /*
+ * X100 uses DMA addresses as seen by the card so adding
+ * the aperture base is not required for DMA. However x200
+ * requires DMA addresses to be an offset into the bar so
+ * add the aperture base for x200.
+ */
+ if (x200)
+ daddr += vpdev->aper->pa;
+ while (len) {
+ partlen = min_t(size_t, len, VOP_INT_DMA_BUF_SIZE);
+ err = vop_sync_dma(vdev, vvr->buf_da, daddr,
+ ALIGN(partlen, dma_alignment));
+ if (err) {
+ dev_err(vop_dev(vdev), "%s %d err %d\n",
+ __func__, __LINE__, err);
+ goto err;
+ }
+ if (copy_to_user(ubuf, vvr->buf + dma_offset,
+ partlen - dma_offset)) {
+ err = -EFAULT;
+ dev_err(vop_dev(vdev), "%s %d err %d\n",
+ __func__, __LINE__, err);
+ goto err;
+ }
+ daddr += partlen;
+ ubuf += partlen;
+ dbuf += partlen;
+ vdev->in_bytes_dma += partlen;
+ vdev->in_bytes += partlen;
+ len -= partlen;
+ dma_offset = 0;
+ }
+ err = 0;
+err:
+ vpdev->hw_ops->iounmap(vpdev, dbuf);
+ dev_dbg(vop_dev(vdev),
+ "%s: ubuf %p dbuf %p len 0x%lx vr_idx 0x%x\n",
+ __func__, ubuf, dbuf, len, vr_idx);
+ return err;
+}
+
+/*
+ * Initiates copies across the PCIe bus from a user space buffer to card
+ * memory. When transfers are done using DMA, source/destination addresses
+ * and transfer length must follow the alignment requirements of the MIC
+ * DMA engine.
+ */
+static int vop_virtio_copy_from_user(struct vop_vdev *vdev, void __user *ubuf,
+ size_t len, u64 daddr, size_t dlen,
+ int vr_idx)
+{
+ struct vop_device *vpdev = vdev->vpdev;
+ void __iomem *dbuf = vpdev->hw_ops->ioremap(vpdev, daddr, len);
+ struct vop_vringh *vvr = &vdev->vvr[vr_idx];
+ struct vop_info *vi = dev_get_drvdata(&vdev->vpdev->dev);
+ size_t dma_alignment = 1 << vi->dma_ch->device->copy_align;
+ bool x200 = is_dma_copy_aligned(vi->dma_ch->device, 1, 1, 1);
+ size_t partlen;
+ bool dma = VOP_USE_DMA;
+ int err = 0;
+
+ if (daddr & (dma_alignment - 1)) {
+ vdev->tx_dst_unaligned += len;
+ dma = false;
+ } else if (ALIGN(len, dma_alignment) > dlen) {
+ vdev->tx_len_unaligned += len;
+ dma = false;
+ }
+
+ if (!dma)
+ goto memcpy;
+
+ /*
+ * X100 uses DMA addresses as seen by the card so adding
+ * the aperture base is not required for DMA. However x200
+ * requires DMA addresses to be an offset into the bar so
+ * add the aperture base for x200.
+ */
+ if (x200)
+ daddr += vpdev->aper->pa;
+ while (len) {
+ partlen = min_t(size_t, len, VOP_INT_DMA_BUF_SIZE);
+
+ if (copy_from_user(vvr->buf, ubuf, partlen)) {
+ err = -EFAULT;
+ dev_err(vop_dev(vdev), "%s %d err %d\n",
+ __func__, __LINE__, err);
+ goto err;
+ }
+ err = vop_sync_dma(vdev, daddr, vvr->buf_da,
+ ALIGN(partlen, dma_alignment));
+ if (err) {
+ dev_err(vop_dev(vdev), "%s %d err %d\n",
+ __func__, __LINE__, err);
+ goto err;
+ }
+ daddr += partlen;
+ ubuf += partlen;
+ dbuf += partlen;
+ vdev->out_bytes_dma += partlen;
+ vdev->out_bytes += partlen;
+ len -= partlen;
+ }
+memcpy:
+ /*
+ * We are copying to IO below and should ideally use something
+ * like copy_from_user_toio(..) if it existed.
+ */
+ if (copy_from_user((void __force *)dbuf, ubuf, len)) {
+ err = -EFAULT;
+ dev_err(vop_dev(vdev), "%s %d err %d\n",
+ __func__, __LINE__, err);
+ goto err;
+ }
+ vdev->out_bytes += len;
+ err = 0;
+err:
+ vpdev->hw_ops->iounmap(vpdev, dbuf);
+ dev_dbg(vop_dev(vdev),
+ "%s: ubuf %p dbuf %p len 0x%lx vr_idx 0x%x\n",
+ __func__, ubuf, dbuf, len, vr_idx);
+ return err;
+}
+
+#define MIC_VRINGH_READ true
+
+/* Determine the total number of bytes consumed in a VRINGH KIOV */
+static inline u32 vop_vringh_iov_consumed(struct vringh_kiov *iov)
+{
+ int i;
+ u32 total = iov->consumed;
+
+ for (i = 0; i < iov->i; i++)
+ total += iov->iov[i].iov_len;
+ return total;
+}
+
+/*
+ * Traverse the VRINGH KIOV and issue the APIs to trigger the copies.
+ * This API is heavily based on the vringh_iov_xfer(..) implementation
+ * in vringh.c. The reason we cannot reuse vringh_iov_pull_kern(..)
+ * and vringh_iov_push_kern(..) directly is because there is no
+ * way to override the VRINGH xfer(..) routines as of v3.10.
+ */
+static int vop_vringh_copy(struct vop_vdev *vdev, struct vringh_kiov *iov,
+ void __user *ubuf, size_t len, bool read, int vr_idx,
+ size_t *out_len)
+{
+ int ret = 0;
+ size_t partlen, tot_len = 0;
+
+ while (len && iov->i < iov->used) {
+ struct kvec *kiov = &iov->iov[iov->i];
+
+ partlen = min(kiov->iov_len, len);
+ if (read)
+ ret = vop_virtio_copy_to_user(vdev, ubuf, partlen,
+ (u64)kiov->iov_base,
+ kiov->iov_len,
+ vr_idx);
+ else
+ ret = vop_virtio_copy_from_user(vdev, ubuf, partlen,
+ (u64)kiov->iov_base,
+ kiov->iov_len,
+ vr_idx);
+ if (ret) {
+ dev_err(vop_dev(vdev), "%s %d err %d\n",
+ __func__, __LINE__, ret);
+ break;
+ }
+ len -= partlen;
+ ubuf += partlen;
+ tot_len += partlen;
+ iov->consumed += partlen;
+ kiov->iov_len -= partlen;
+ kiov->iov_base += partlen;
+ if (!kiov->iov_len) {
+ /* Fix up old iov element then increment. */
+ kiov->iov_len = iov->consumed;
+ kiov->iov_base -= iov->consumed;
+
+ iov->consumed = 0;
+ iov->i++;
+ }
+ }
+ *out_len = tot_len;
+ return ret;
+}
+
+/*
+ * Use the standard VRINGH infrastructure in the kernel to fetch new
+ * descriptors, initiate the copies and update the used ring.
+ */
+static int _vop_virtio_copy(struct vop_vdev *vdev, struct mic_copy_desc *copy)
+{
+ int ret = 0;
+ u32 iovcnt = copy->iovcnt;
+ struct iovec iov;
+ struct iovec __user *u_iov = copy->iov;
+ void __user *ubuf = NULL;
+ struct vop_vringh *vvr = &vdev->vvr[copy->vr_idx];
+ struct vringh_kiov *riov = &vvr->riov;
+ struct vringh_kiov *wiov = &vvr->wiov;
+ struct vringh *vrh = &vvr->vrh;
+ u16 *head = &vvr->head;
+ struct mic_vring *vr = &vvr->vring;
+ size_t len = 0, out_len;
+
+ copy->out_len = 0;
+ /* Fetch a new IOVEC if all previous elements have been processed */
+ if (riov->i == riov->used && wiov->i == wiov->used) {
+ ret = vringh_getdesc_kern(vrh, riov, wiov,
+ head, GFP_KERNEL);
+ /* Check if there are available descriptors */
+ if (ret <= 0)
+ return ret;
+ }
+ while (iovcnt) {
+ if (!len) {
+ /* Copy over a new iovec from user space. */
+ ret = copy_from_user(&iov, u_iov, sizeof(*u_iov));
+ if (ret) {
+ ret = -EINVAL;
+ dev_err(vop_dev(vdev), "%s %d err %d\n",
+ __func__, __LINE__, ret);
+ break;
+ }
+ len = iov.iov_len;
+ ubuf = iov.iov_base;
+ }
+ /* Issue all the read descriptors first */
+ ret = vop_vringh_copy(vdev, riov, ubuf, len,
+ MIC_VRINGH_READ, copy->vr_idx, &out_len);
+ if (ret) {
+ dev_err(vop_dev(vdev), "%s %d err %d\n",
+ __func__, __LINE__, ret);
+ break;
+ }
+ len -= out_len;
+ ubuf += out_len;
+ copy->out_len += out_len;
+ /* Issue the write descriptors next */
+ ret = vop_vringh_copy(vdev, wiov, ubuf, len,
+ !MIC_VRINGH_READ, copy->vr_idx, &out_len);
+ if (ret) {
+ dev_err(vop_dev(vdev), "%s %d err %d\n",
+ __func__, __LINE__, ret);
+ break;
+ }
+ len -= out_len;
+ ubuf += out_len;
+ copy->out_len += out_len;
+ if (!len) {
+ /* One user space iovec is now completed */
+ iovcnt--;
+ u_iov++;
+ }
+ /* Exit loop if all elements in KIOVs have been processed. */
+ if (riov->i == riov->used && wiov->i == wiov->used)
+ break;
+ }
+ /*
+ * Update the used ring if a descriptor was available and some data was
+ * copied in/out and the user asked for a used ring update.
+ */
+ if (*head != USHRT_MAX && copy->out_len && copy->update_used) {
+ u32 total = 0;
+
+ /* Determine the total data consumed */
+ total += vop_vringh_iov_consumed(riov);
+ total += vop_vringh_iov_consumed(wiov);
+ vringh_complete_kern(vrh, *head, total);
+ *head = USHRT_MAX;
+ if (vringh_need_notify_kern(vrh) > 0)
+ vringh_notify(vrh);
+ vringh_kiov_cleanup(riov);
+ vringh_kiov_cleanup(wiov);
+ /* Update avail idx for user space */
+ vr->info->avail_idx = vrh->last_avail_idx;
+ }
+ return ret;
+}
+
+static inline int vop_verify_copy_args(struct vop_vdev *vdev,
+ struct mic_copy_desc *copy)
+{
+ if (!vdev || copy->vr_idx >= vdev->dd->num_vq)
+ return -EINVAL;
+ return 0;
+}
+
+/* Copy a specified number of virtio descriptors in a chain */
+static int vop_virtio_copy_desc(struct vop_vdev *vdev,
+ struct mic_copy_desc *copy)
+{
+ int err;
+ struct vop_vringh *vvr;
+
+ err = vop_verify_copy_args(vdev, copy);
+ if (err)
+ return err;
+
+ vvr = &vdev->vvr[copy->vr_idx];
+ mutex_lock(&vvr->vr_mutex);
+ if (!vop_vdevup(vdev)) {
+ err = -ENODEV;
+ dev_err(vop_dev(vdev), "%s %d err %d\n",
+ __func__, __LINE__, err);
+ goto err;
+ }
+ err = _vop_virtio_copy(vdev, copy);
+ if (err) {
+ dev_err(vop_dev(vdev), "%s %d err %d\n",
+ __func__, __LINE__, err);
+ }
+err:
+ mutex_unlock(&vvr->vr_mutex);
+ return err;
+}
+
+static int vop_open(struct inode *inode, struct file *f)
+{
+ struct vop_vdev *vdev;
+ struct vop_info *vi = container_of(f->private_data,
+ struct vop_info, miscdev);
+
+ vdev = kzalloc(sizeof(*vdev), GFP_KERNEL);
+ if (!vdev)
+ return -ENOMEM;
+ vdev->vi = vi;
+ mutex_init(&vdev->vdev_mutex);
+ f->private_data = vdev;
+ init_completion(&vdev->destroy);
+ complete(&vdev->destroy);
+ return 0;
+}
+
+static int vop_release(struct inode *inode, struct file *f)
+{
+ struct vop_vdev *vdev = f->private_data, *vdev_tmp;
+ struct vop_info *vi = vdev->vi;
+ struct list_head *pos, *tmp;
+ bool found = false;
+
+ mutex_lock(&vdev->vdev_mutex);
+ if (vdev->deleted)
+ goto unlock;
+ mutex_lock(&vi->vop_mutex);
+ list_for_each_safe(pos, tmp, &vi->vdev_list) {
+ vdev_tmp = list_entry(pos, struct vop_vdev, list);
+ if (vdev == vdev_tmp) {
+ vop_virtio_del_device(vdev);
+ list_del(pos);
+ found = true;
+ break;
+ }
+ }
+ mutex_unlock(&vi->vop_mutex);
+unlock:
+ mutex_unlock(&vdev->vdev_mutex);
+ if (!found)
+ wait_for_completion(&vdev->destroy);
+ f->private_data = NULL;
+ kfree(vdev);
+ return 0;
+}
+
+static long vop_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
+{
+ struct vop_vdev *vdev = f->private_data;
+ struct vop_info *vi = vdev->vi;
+ void __user *argp = (void __user *)arg;
+ int ret;
+
+ switch (cmd) {
+ case MIC_VIRTIO_ADD_DEVICE:
+ {
+ struct mic_device_desc dd, *dd_config;
+
+ if (copy_from_user(&dd, argp, sizeof(dd)))
+ return -EFAULT;
+
+ if (mic_aligned_desc_size(&dd) > MIC_MAX_DESC_BLK_SIZE ||
+ dd.num_vq > MIC_MAX_VRINGS)
+ return -EINVAL;
+
+ dd_config = kzalloc(mic_desc_size(&dd), GFP_KERNEL);
+ if (!dd_config)
+ return -ENOMEM;
+ if (copy_from_user(dd_config, argp, mic_desc_size(&dd))) {
+ ret = -EFAULT;
+ goto free_ret;
+ }
+ /* Ensure desc has not changed between the two reads */
+ if (memcmp(&dd, dd_config, sizeof(dd))) {
+ ret = -EINVAL;
+ goto free_ret;
+ }
+ mutex_lock(&vdev->vdev_mutex);
+ mutex_lock(&vi->vop_mutex);
+ ret = vop_virtio_add_device(vdev, dd_config);
+ if (ret)
+ goto unlock_ret;
+ list_add_tail(&vdev->list, &vi->vdev_list);
+unlock_ret:
+ mutex_unlock(&vi->vop_mutex);
+ mutex_unlock(&vdev->vdev_mutex);
+free_ret:
+ kfree(dd_config);
+ return ret;
+ }
+ case MIC_VIRTIO_COPY_DESC:
+ {
+ struct mic_copy_desc copy;
+
+ mutex_lock(&vdev->vdev_mutex);
+ ret = vop_vdev_inited(vdev);
+ if (ret)
+ goto _unlock_ret;
+
+ if (copy_from_user(&copy, argp, sizeof(copy))) {
+ ret = -EFAULT;
+ goto _unlock_ret;
+ }
+
+ ret = vop_virtio_copy_desc(vdev, &copy);
+ if (ret < 0)
+ goto _unlock_ret;
+ if (copy_to_user(
+ &((struct mic_copy_desc __user *)argp)->out_len,
+ &copy.out_len, sizeof(copy.out_len)))
+ ret = -EFAULT;
+_unlock_ret:
+ mutex_unlock(&vdev->vdev_mutex);
+ return ret;
+ }
+ case MIC_VIRTIO_CONFIG_CHANGE:
+ {
+ void *buf;
+
+ mutex_lock(&vdev->vdev_mutex);
+ ret = vop_vdev_inited(vdev);
+ if (ret)
+ goto __unlock_ret;
+ buf = kzalloc(vdev->dd->config_len, GFP_KERNEL);
+ if (!buf) {
+ ret = -ENOMEM;
+ goto __unlock_ret;
+ }
+ if (copy_from_user(buf, argp, vdev->dd->config_len)) {
+ ret = -EFAULT;
+ goto done;
+ }
+ ret = vop_virtio_config_change(vdev, buf);
+done:
+ kfree(buf);
+__unlock_ret:
+ mutex_unlock(&vdev->vdev_mutex);
+ return ret;
+ }
+ default:
+ return -ENOIOCTLCMD;
+ };
+ return 0;
+}
+
+/*
+ * We return POLLIN | POLLOUT from poll when new buffers are enqueued, and
+ * not when previously enqueued buffers may be available. This means that
+ * in the card->host (TX) path, when userspace is unblocked by poll it
+ * must drain all available descriptors or it can stall.
+ */
+static unsigned int vop_poll(struct file *f, poll_table *wait)
+{
+ struct vop_vdev *vdev = f->private_data;
+ int mask = 0;
+
+ mutex_lock(&vdev->vdev_mutex);
+ if (vop_vdev_inited(vdev)) {
+ mask = POLLERR;
+ goto done;
+ }
+ poll_wait(f, &vdev->waitq, wait);
+ if (vop_vdev_inited(vdev)) {
+ mask = POLLERR;
+ } else if (vdev->poll_wake) {
+ vdev->poll_wake = 0;
+ mask = POLLIN | POLLOUT;
+ }
+done:
+ mutex_unlock(&vdev->vdev_mutex);
+ return mask;
+}
+
+static inline int
+vop_query_offset(struct vop_vdev *vdev, unsigned long offset,
+ unsigned long *size, unsigned long *pa)
+{
+ struct vop_device *vpdev = vdev->vpdev;
+ unsigned long start = MIC_DP_SIZE;
+ int i;
+
+ /*
+ * MMAP interface is as follows:
+ * offset region
+ * 0x0 virtio device_page
+ * 0x1000 first vring
+ * 0x1000 + size of 1st vring second vring
+ * ....
+ */
+ if (!offset) {
+ *pa = virt_to_phys(vpdev->hw_ops->get_dp(vpdev));
+ *size = MIC_DP_SIZE;
+ return 0;
+ }
+
+ for (i = 0; i < vdev->dd->num_vq; i++) {
+ struct vop_vringh *vvr = &vdev->vvr[i];
+
+ if (offset == start) {
+ *pa = virt_to_phys(vvr->vring.va);
+ *size = vvr->vring.len;
+ return 0;
+ }
+ start += vvr->vring.len;
+ }
+ return -1;
+}
+
+/*
+ * Maps the device page and virtio rings to user space for readonly access.
+ */
+static int vop_mmap(struct file *f, struct vm_area_struct *vma)
+{
+ struct vop_vdev *vdev = f->private_data;
+ unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
+ unsigned long pa, size = vma->vm_end - vma->vm_start, size_rem = size;
+ int i, err;
+
+ err = vop_vdev_inited(vdev);
+ if (err)
+ goto ret;
+ if (vma->vm_flags & VM_WRITE) {
+ err = -EACCES;
+ goto ret;
+ }
+ while (size_rem) {
+ i = vop_query_offset(vdev, offset, &size, &pa);
+ if (i < 0) {
+ err = -EINVAL;
+ goto ret;
+ }
+ err = remap_pfn_range(vma, vma->vm_start + offset,
+ pa >> PAGE_SHIFT, size,
+ vma->vm_page_prot);
+ if (err)
+ goto ret;
+ size_rem -= size;
+ offset += size;
+ }
+ret:
+ return err;
+}
+
+static const struct file_operations vop_fops = {
+ .open = vop_open,
+ .release = vop_release,
+ .unlocked_ioctl = vop_ioctl,
+ .poll = vop_poll,
+ .mmap = vop_mmap,
+ .owner = THIS_MODULE,
+};
+
+int vop_host_init(struct vop_info *vi)
+{
+ int rc;
+ struct miscdevice *mdev;
+ struct vop_device *vpdev = vi->vpdev;
+
+ INIT_LIST_HEAD(&vi->vdev_list);
+ vi->dma_ch = vpdev->dma_ch;
+ mdev = &vi->miscdev;
+ mdev->minor = MISC_DYNAMIC_MINOR;
+ snprintf(vi->name, sizeof(vi->name), "vop_virtio%d", vpdev->index);
+ mdev->name = vi->name;
+ mdev->fops = &vop_fops;
+ mdev->parent = &vpdev->dev;
+
+ rc = misc_register(mdev);
+ if (rc)
+ dev_err(&vpdev->dev, "%s failed rc %d\n", __func__, rc);
+ return rc;
+}
+
+void vop_host_uninit(struct vop_info *vi)
+{
+ struct list_head *pos, *tmp;
+ struct vop_vdev *vdev;
+
+ mutex_lock(&vi->vop_mutex);
+ vop_virtio_reset_devices(vi);
+ list_for_each_safe(pos, tmp, &vi->vdev_list) {
+ vdev = list_entry(pos, struct vop_vdev, list);
+ list_del(pos);
+ reinit_completion(&vdev->destroy);
+ mutex_unlock(&vi->vop_mutex);
+ mutex_lock(&vdev->vdev_mutex);
+ vop_virtio_del_device(vdev);
+ vdev->deleted = true;
+ mutex_unlock(&vdev->vdev_mutex);
+ complete(&vdev->destroy);
+ mutex_lock(&vi->vop_mutex);
+ }
+ mutex_unlock(&vi->vop_mutex);
+ misc_deregister(&vi->miscdev);
+}
diff --git a/drivers/staging/panel/panel.c b/drivers/misc/panel.c
index 70b8f4fabfad..6030ac5b8c63 100644
--- a/drivers/staging/panel/panel.c
+++ b/drivers/misc/panel.c
@@ -172,8 +172,6 @@ static __u8 scan_mask_o;
/* logical or of the input bits involved in the scan matrix */
static __u8 scan_mask_i;
-typedef __u64 pmask_t;
-
enum input_type {
INPUT_TYPE_STD,
INPUT_TYPE_KBD,
@@ -188,8 +186,8 @@ enum input_state {
struct logical_input {
struct list_head list;
- pmask_t mask;
- pmask_t value;
+ __u64 mask;
+ __u64 value;
enum input_type type;
enum input_state state;
__u8 rise_time, fall_time;
@@ -219,19 +217,19 @@ static LIST_HEAD(logical_inputs); /* list of all defined logical inputs */
* corresponds to the ground.
* Within each group, bits are stored in the same order as read on the port :
* BAPSE (busy=4, ack=3, paper empty=2, select=1, error=0).
- * So, each __u64 (or pmask_t) is represented like this :
+ * So, each __u64 is represented like this :
* 0000000000000000000BAPSEBAPSEBAPSEBAPSEBAPSEBAPSEBAPSEBAPSEBAPSE
* <-----unused------><gnd><d07><d06><d05><d04><d03><d02><d01><d00>
*/
/* what has just been read from the I/O ports */
-static pmask_t phys_read;
+static __u64 phys_read;
/* previous phys_read */
-static pmask_t phys_read_prev;
+static __u64 phys_read_prev;
/* stabilized phys_read (phys_read|phys_read_prev) */
-static pmask_t phys_curr;
+static __u64 phys_curr;
/* previous phys_curr */
-static pmask_t phys_prev;
+static __u64 phys_prev;
/* 0 means that at least one logical signal needs be computed */
static char inputs_stable;
@@ -650,34 +648,28 @@ static const char nexcom_keypad_profile[][4][9] = {
static const char (*keypad_profile)[4][9] = old_keypad_profile;
-/* FIXME: this should be converted to a bit array containing signals states */
-static struct {
- unsigned char e; /* parallel LCD E (data latch on falling edge) */
- unsigned char rs; /* parallel LCD RS (0 = cmd, 1 = data) */
- unsigned char rw; /* parallel LCD R/W (0 = W, 1 = R) */
- unsigned char bl; /* parallel LCD backlight (0 = off, 1 = on) */
- unsigned char cl; /* serial LCD clock (latch on rising edge) */
- unsigned char da; /* serial LCD data */
-} bits;
+static DECLARE_BITMAP(bits, LCD_BITS);
+
+static void lcd_get_bits(unsigned int port, int *val)
+{
+ unsigned int bit, state;
+
+ for (bit = 0; bit < LCD_BITS; bit++) {
+ state = test_bit(bit, bits) ? BIT_SET : BIT_CLR;
+ *val &= lcd_bits[port][bit][BIT_MSK];
+ *val |= lcd_bits[port][bit][state];
+ }
+}
static void init_scan_timer(void);
/* sets data port bits according to current signals values */
static int set_data_bits(void)
{
- int val, bit;
+ int val;
val = r_dtr(pprt);
- for (bit = 0; bit < LCD_BITS; bit++)
- val &= lcd_bits[LCD_PORT_D][bit][BIT_MSK];
-
- val |= lcd_bits[LCD_PORT_D][LCD_BIT_E][bits.e]
- | lcd_bits[LCD_PORT_D][LCD_BIT_RS][bits.rs]
- | lcd_bits[LCD_PORT_D][LCD_BIT_RW][bits.rw]
- | lcd_bits[LCD_PORT_D][LCD_BIT_BL][bits.bl]
- | lcd_bits[LCD_PORT_D][LCD_BIT_CL][bits.cl]
- | lcd_bits[LCD_PORT_D][LCD_BIT_DA][bits.da];
-
+ lcd_get_bits(LCD_PORT_D, &val);
w_dtr(pprt, val);
return val;
}
@@ -685,19 +677,10 @@ static int set_data_bits(void)
/* sets ctrl port bits according to current signals values */
static int set_ctrl_bits(void)
{
- int val, bit;
+ int val;
val = r_ctr(pprt);
- for (bit = 0; bit < LCD_BITS; bit++)
- val &= lcd_bits[LCD_PORT_C][bit][BIT_MSK];
-
- val |= lcd_bits[LCD_PORT_C][LCD_BIT_E][bits.e]
- | lcd_bits[LCD_PORT_C][LCD_BIT_RS][bits.rs]
- | lcd_bits[LCD_PORT_C][LCD_BIT_RW][bits.rw]
- | lcd_bits[LCD_PORT_C][LCD_BIT_BL][bits.bl]
- | lcd_bits[LCD_PORT_C][LCD_BIT_CL][bits.cl]
- | lcd_bits[LCD_PORT_C][LCD_BIT_DA][bits.da];
-
+ lcd_get_bits(LCD_PORT_C, &val);
w_ctr(pprt, val);
return val;
}
@@ -793,12 +776,17 @@ static void lcd_send_serial(int byte)
* LCD reads D0 on STROBE's rising edge.
*/
for (bit = 0; bit < 8; bit++) {
- bits.cl = BIT_CLR; /* CLK low */
+ clear_bit(LCD_BIT_CL, bits); /* CLK low */
panel_set_bits();
- bits.da = byte & 1;
+ if (byte & 1) {
+ set_bit(LCD_BIT_DA, bits);
+ } else {
+ clear_bit(LCD_BIT_DA, bits);
+ }
+
panel_set_bits();
udelay(2); /* maintain the data during 2 us before CLK up */
- bits.cl = BIT_SET; /* CLK high */
+ set_bit(LCD_BIT_CL, bits); /* CLK high */
panel_set_bits();
udelay(1); /* maintain the strobe during 1 us */
byte >>= 1;
@@ -813,7 +801,10 @@ static void lcd_backlight(int on)
/* The backlight is activated by setting the AUTOFEED line to +5V */
spin_lock_irq(&pprt_lock);
- bits.bl = on;
+ if (on)
+ set_bit(LCD_BIT_BL, bits);
+ else
+ clear_bit(LCD_BIT_BL, bits);
panel_set_bits();
spin_unlock_irq(&pprt_lock);
}
@@ -848,14 +839,14 @@ static void lcd_write_cmd_p8(int cmd)
w_dtr(pprt, cmd);
udelay(20); /* maintain the data during 20 us before the strobe */
- bits.e = BIT_SET;
- bits.rs = BIT_CLR;
- bits.rw = BIT_CLR;
+ set_bit(LCD_BIT_E, bits);
+ clear_bit(LCD_BIT_RS, bits);
+ clear_bit(LCD_BIT_RW, bits);
set_ctrl_bits();
udelay(40); /* maintain the strobe during 40 us */
- bits.e = BIT_CLR;
+ clear_bit(LCD_BIT_E, bits);
set_ctrl_bits();
udelay(120); /* the shortest command takes at least 120 us */
@@ -870,14 +861,14 @@ static void lcd_write_data_p8(int data)
w_dtr(pprt, data);
udelay(20); /* maintain the data during 20 us before the strobe */
- bits.e = BIT_SET;
- bits.rs = BIT_SET;
- bits.rw = BIT_CLR;
+ set_bit(LCD_BIT_E, bits);
+ set_bit(LCD_BIT_RS, bits);
+ clear_bit(LCD_BIT_RW, bits);
set_ctrl_bits();
udelay(40); /* maintain the strobe during 40 us */
- bits.e = BIT_CLR;
+ clear_bit(LCD_BIT_E, bits);
set_ctrl_bits();
udelay(45); /* the shortest data takes at least 45 us */
@@ -943,7 +934,8 @@ static void lcd_clear_fast_s(void)
lcd_send_serial(0x5F); /* R/W=W, RS=1 */
lcd_send_serial(' ' & 0x0F);
lcd_send_serial((' ' >> 4) & 0x0F);
- udelay(40); /* the shortest data takes at least 40 us */
+ /* the shortest data takes at least 40 us */
+ udelay(40);
}
spin_unlock_irq(&pprt_lock);
@@ -969,15 +961,15 @@ static void lcd_clear_fast_p8(void)
/* maintain the data during 20 us before the strobe */
udelay(20);
- bits.e = BIT_SET;
- bits.rs = BIT_SET;
- bits.rw = BIT_CLR;
+ set_bit(LCD_BIT_E, bits);
+ set_bit(LCD_BIT_RS, bits);
+ clear_bit(LCD_BIT_RW, bits);
set_ctrl_bits();
/* maintain the strobe during 40 us */
udelay(40);
- bits.e = BIT_CLR;
+ clear_bit(LCD_BIT_E, bits);
set_ctrl_bits();
/* the shortest data takes at least 45 us */
@@ -1784,7 +1776,7 @@ static void phys_scan_contacts(void)
gndmask = PNL_PINPUT(r_str(pprt)) & scan_mask_i;
/* grounded inputs are signals 40-44 */
- phys_read |= (pmask_t) gndmask << 40;
+ phys_read |= (__u64)gndmask << 40;
if (bitmask != gndmask) {
/*
@@ -1800,7 +1792,7 @@ static void phys_scan_contacts(void)
w_dtr(pprt, oldval & ~bitval); /* enable this output */
bitmask = PNL_PINPUT(r_str(pprt)) & ~gndmask;
- phys_read |= (pmask_t) bitmask << (5 * bit);
+ phys_read |= (__u64)bitmask << (5 * bit);
}
w_dtr(pprt, oldval); /* disable all outputs */
}
@@ -2037,32 +2029,32 @@ static void init_scan_timer(void)
* corresponding to out and in bits respectively.
* returns 1 if ok, 0 if error (in which case, nothing is written).
*/
-static int input_name2mask(const char *name, pmask_t *mask, pmask_t *value,
- char *imask, char *omask)
+static u8 input_name2mask(const char *name, __u64 *mask, __u64 *value,
+ u8 *imask, u8 *omask)
{
- static char sigtab[10] = "EeSsPpAaBb";
- char im, om;
- pmask_t m, v;
+ const char sigtab[] = "EeSsPpAaBb";
+ u8 im, om;
+ __u64 m, v;
- om = 0ULL;
- im = 0ULL;
+ om = 0;
+ im = 0;
m = 0ULL;
v = 0ULL;
while (*name) {
int in, out, bit, neg;
+ const char *idx;
- for (in = 0; (in < sizeof(sigtab)) && (sigtab[in] != *name);
- in++)
- ;
-
- if (in >= sizeof(sigtab))
+ idx = strchr(sigtab, *name);
+ if (!idx)
return 0; /* input name not found */
+
+ in = idx - sigtab;
neg = (in & 1); /* odd (lower) names are negated */
in >>= 1;
im |= BIT(in);
name++;
- if (isdigit(*name)) {
+ if (*name >= '0' && *name <= '7') {
out = *name - '0';
om |= BIT(out);
} else if (*name == '-') {
diff --git a/drivers/misc/pch_phub.c b/drivers/misc/pch_phub.c
index 9a17a9bab8d6..4810e039bbec 100644
--- a/drivers/misc/pch_phub.c
+++ b/drivers/misc/pch_phub.c
@@ -503,8 +503,7 @@ static ssize_t pch_phub_bin_read(struct file *filp, struct kobject *kobj,
int err;
ssize_t rom_size;
- struct pch_phub_reg *chip =
- dev_get_drvdata(container_of(kobj, struct device, kobj));
+ struct pch_phub_reg *chip = dev_get_drvdata(kobj_to_dev(kobj));
ret = mutex_lock_interruptible(&pch_phub_mutex);
if (ret) {
@@ -514,8 +513,10 @@ static ssize_t pch_phub_bin_read(struct file *filp, struct kobject *kobj,
/* Get Rom signature */
chip->pch_phub_extrom_base_address = pci_map_rom(chip->pdev, &rom_size);
- if (!chip->pch_phub_extrom_base_address)
+ if (!chip->pch_phub_extrom_base_address) {
+ err = -ENODATA;
goto exrom_map_err;
+ }
pch_phub_read_serial_rom(chip, chip->pch_opt_rom_start_address,
(unsigned char *)&rom_signature);
@@ -567,8 +568,7 @@ static ssize_t pch_phub_bin_write(struct file *filp, struct kobject *kobj,
unsigned int addr_offset;
int ret;
ssize_t rom_size;
- struct pch_phub_reg *chip =
- dev_get_drvdata(container_of(kobj, struct device, kobj));
+ struct pch_phub_reg *chip = dev_get_drvdata(kobj_to_dev(kobj));
ret = mutex_lock_interruptible(&pch_phub_mutex);
if (ret)
diff --git a/drivers/misc/sgi-gru/grufault.c b/drivers/misc/sgi-gru/grufault.c
index f74fc0ca2ef9..a2d97b9b17e3 100644
--- a/drivers/misc/sgi-gru/grufault.c
+++ b/drivers/misc/sgi-gru/grufault.c
@@ -198,8 +198,7 @@ static int non_atomic_pte_lookup(struct vm_area_struct *vma,
#else
*pageshift = PAGE_SHIFT;
#endif
- if (get_user_pages
- (current, current->mm, vaddr, 1, write, 0, &page, NULL) <= 0)
+ if (get_user_pages(vaddr, 1, write, 0, &page, NULL) <= 0)
return -EFAULT;
*paddr = page_to_phys(page);
put_page(page);
diff --git a/drivers/misc/sram.c b/drivers/misc/sram.c
index 736dae715dbf..69cdabea9c03 100644
--- a/drivers/misc/sram.c
+++ b/drivers/misc/sram.c
@@ -360,7 +360,10 @@ static int sram_probe(struct platform_device *pdev)
return -EBUSY;
}
- sram->virt_base = devm_ioremap_wc(sram->dev, res->start, size);
+ if (of_property_read_bool(pdev->dev.of_node, "no-memory-wc"))
+ sram->virt_base = devm_ioremap(sram->dev, res->start, size);
+ else
+ sram->virt_base = devm_ioremap_wc(sram->dev, res->start, size);
if (IS_ERR(sram->virt_base))
return PTR_ERR(sram->virt_base);
diff --git a/drivers/misc/ti-st/st_core.c b/drivers/misc/ti-st/st_core.c
index 6e3af8b42cdd..dcdbd58672cc 100644
--- a/drivers/misc/ti-st/st_core.c
+++ b/drivers/misc/ti-st/st_core.c
@@ -632,7 +632,6 @@ long st_register(struct st_proto_s *new_proto)
spin_unlock_irqrestore(&st_gdata->lock, flags);
return err;
}
- pr_debug("done %s(%d) ", __func__, new_proto->chnl_id);
}
EXPORT_SYMBOL_GPL(st_register);
diff --git a/drivers/misc/vmw_vmci/vmci_driver.c b/drivers/misc/vmw_vmci/vmci_driver.c
index b823f9a6e464..896be150e28f 100644
--- a/drivers/misc/vmw_vmci/vmci_driver.c
+++ b/drivers/misc/vmw_vmci/vmci_driver.c
@@ -113,5 +113,5 @@ module_exit(vmci_drv_exit);
MODULE_AUTHOR("VMware, Inc.");
MODULE_DESCRIPTION("VMware Virtual Machine Communication Interface.");
-MODULE_VERSION("1.1.3.0-k");
+MODULE_VERSION("1.1.4.0-k");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/misc/vmw_vmci/vmci_queue_pair.c b/drivers/misc/vmw_vmci/vmci_queue_pair.c
index f42d9c4e4561..f84a4275ca29 100644
--- a/drivers/misc/vmw_vmci/vmci_queue_pair.c
+++ b/drivers/misc/vmw_vmci/vmci_queue_pair.c
@@ -728,7 +728,7 @@ static void qp_release_pages(struct page **pages,
if (dirty)
set_page_dirty(pages[i]);
- page_cache_release(pages[i]);
+ put_page(pages[i]);
pages[i] = NULL;
}
}
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index fe207e542032..8a0147dfed27 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -86,7 +86,6 @@ static int max_devices;
/* TODO: Replace these with struct ida */
static DECLARE_BITMAP(dev_use, MAX_DEVICES);
-static DECLARE_BITMAP(name_use, MAX_DEVICES);
/*
* There is one mmc_blk_data per slot.
@@ -105,7 +104,6 @@ struct mmc_blk_data {
unsigned int usage;
unsigned int read_only;
unsigned int part_type;
- unsigned int name_idx;
unsigned int reset_done;
#define MMC_BLK_READ BIT(0)
#define MMC_BLK_WRITE BIT(1)
@@ -589,6 +587,14 @@ static int mmc_blk_ioctl_cmd(struct block_device *bdev,
struct mmc_card *card;
int err = 0, ioc_err = 0;
+ /*
+ * The caller must have CAP_SYS_RAWIO, and must be calling this on the
+ * whole block device, not on a partition. This prevents overspray
+ * between sibling partitions.
+ */
+ if ((!capable(CAP_SYS_RAWIO)) || (bdev != bdev->bd_contains))
+ return -EPERM;
+
idata = mmc_blk_ioctl_copy_from_user(ic_ptr);
if (IS_ERR(idata))
return PTR_ERR(idata);
@@ -631,6 +637,14 @@ static int mmc_blk_ioctl_multi_cmd(struct block_device *bdev,
int i, err = 0, ioc_err = 0;
__u64 num_of_cmds;
+ /*
+ * The caller must have CAP_SYS_RAWIO, and must be calling this on the
+ * whole block device, not on a partition. This prevents overspray
+ * between sibling partitions.
+ */
+ if ((!capable(CAP_SYS_RAWIO)) || (bdev != bdev->bd_contains))
+ return -EPERM;
+
if (copy_from_user(&num_of_cmds, &user->num_of_cmds,
sizeof(num_of_cmds)))
return -EFAULT;
@@ -688,14 +702,6 @@ cmd_err:
static int mmc_blk_ioctl(struct block_device *bdev, fmode_t mode,
unsigned int cmd, unsigned long arg)
{
- /*
- * The caller must have CAP_SYS_RAWIO, and must be calling this on the
- * whole block device, not on a partition. This prevents overspray
- * between sibling partitions.
- */
- if ((!capable(CAP_SYS_RAWIO)) || (bdev != bdev->bd_contains))
- return -EPERM;
-
switch (cmd) {
case MMC_IOC_CMD:
return mmc_blk_ioctl_cmd(bdev,
@@ -1362,8 +1368,8 @@ static int mmc_blk_err_check(struct mmc_card *card,
if (brq->data.error) {
if (need_retune && !brq->retune_retry_done) {
- pr_info("%s: retrying because a re-tune was needed\n",
- req->rq_disk->disk_name);
+ pr_debug("%s: retrying because a re-tune was needed\n",
+ req->rq_disk->disk_name);
brq->retune_retry_done = 1;
return MMC_BLK_RETRY;
}
@@ -1524,13 +1530,13 @@ static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
}
if (rq_data_dir(req) == READ) {
brq->cmd.opcode = readcmd;
- brq->data.flags |= MMC_DATA_READ;
+ brq->data.flags = MMC_DATA_READ;
if (brq->mrq.stop)
brq->stop.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 |
MMC_CMD_AC;
} else {
brq->cmd.opcode = writecmd;
- brq->data.flags |= MMC_DATA_WRITE;
+ brq->data.flags = MMC_DATA_WRITE;
if (brq->mrq.stop)
brq->stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B |
MMC_CMD_AC;
@@ -1799,7 +1805,7 @@ static void mmc_blk_packed_hdr_wrq_prep(struct mmc_queue_req *mqrq,
brq->data.blksz = 512;
brq->data.blocks = packed->blocks + hdr_blocks;
- brq->data.flags |= MMC_DATA_WRITE;
+ brq->data.flags = MMC_DATA_WRITE;
brq->stop.opcode = MMC_STOP_TRANSMISSION;
brq->stop.arg = 0;
@@ -2194,19 +2200,6 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
goto out;
}
- /*
- * !subname implies we are creating main mmc_blk_data that will be
- * associated with mmc_card with dev_set_drvdata. Due to device
- * partitions, devidx will not coincide with a per-physical card
- * index anymore so we keep track of a name index.
- */
- if (!subname) {
- md->name_idx = find_first_zero_bit(name_use, max_devices);
- __set_bit(md->name_idx, name_use);
- } else
- md->name_idx = ((struct mmc_blk_data *)
- dev_to_disk(parent)->private_data)->name_idx;
-
md->area_type = area_type;
/*
@@ -2256,7 +2249,7 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
*/
snprintf(md->disk->disk_name, sizeof(md->disk->disk_name),
- "mmcblk%u%s", md->name_idx, subname ? subname : "");
+ "mmcblk%u%s", card->host->index, subname ? subname : "");
if (mmc_card_mmc(card))
blk_queue_logical_block_size(md->queue.queue,
@@ -2410,7 +2403,6 @@ static void mmc_blk_remove_parts(struct mmc_card *card,
struct list_head *pos, *q;
struct mmc_blk_data *part_md;
- __clear_bit(md->name_idx, name_use);
list_for_each_safe(pos, q, &md->part) {
part_md = list_entry(pos, struct mmc_blk_data, part);
list_del(pos);
diff --git a/drivers/mmc/card/mmc_test.c b/drivers/mmc/card/mmc_test.c
index 7fc9174d4619..c032eef45762 100644
--- a/drivers/mmc/card/mmc_test.c
+++ b/drivers/mmc/card/mmc_test.c
@@ -2829,6 +2829,7 @@ static int mtf_testlist_show(struct seq_file *sf, void *data)
mutex_lock(&mmc_test_lock);
+ seq_printf(sf, "0:\tRun all tests\n");
for (i = 0; i < ARRAY_SIZE(mmc_test_cases); i++)
seq_printf(sf, "%d:\t%s\n", i+1, mmc_test_cases[i].name);
diff --git a/drivers/mmc/card/sdio_uart.c b/drivers/mmc/card/sdio_uart.c
index d2de5925b73e..5415056f9aa5 100644
--- a/drivers/mmc/card/sdio_uart.c
+++ b/drivers/mmc/card/sdio_uart.c
@@ -493,7 +493,7 @@ static void sdio_uart_check_modem_status(struct sdio_uart_port *port)
if (status & UART_MSR_DCTS) {
port->icount.cts++;
tty = tty_port_tty_get(&port->port);
- if (tty && (tty->termios.c_cflag & CRTSCTS)) {
+ if (tty && C_CRTSCTS(tty)) {
int cts = (status & UART_MSR_CTS);
if (tty->hw_stopped) {
if (cts) {
@@ -648,10 +648,10 @@ static int sdio_uart_activate(struct tty_port *tport, struct tty_struct *tty)
sdio_uart_change_speed(port, &tty->termios, NULL);
- if (tty->termios.c_cflag & CBAUD)
+ if (C_BAUD(tty))
sdio_uart_set_mctrl(port, TIOCM_RTS | TIOCM_DTR);
- if (tty->termios.c_cflag & CRTSCTS)
+ if (C_CRTSCTS(tty))
if (!(sdio_uart_get_mctrl(port) & TIOCM_CTS))
tty->hw_stopped = 1;
@@ -833,7 +833,7 @@ static void sdio_uart_throttle(struct tty_struct *tty)
{
struct sdio_uart_port *port = tty->driver_data;
- if (!I_IXOFF(tty) && !(tty->termios.c_cflag & CRTSCTS))
+ if (!I_IXOFF(tty) && !C_CRTSCTS(tty))
return;
if (sdio_uart_claim_func(port) != 0)
@@ -844,7 +844,7 @@ static void sdio_uart_throttle(struct tty_struct *tty)
sdio_uart_start_tx(port);
}
- if (tty->termios.c_cflag & CRTSCTS)
+ if (C_CRTSCTS(tty))
sdio_uart_clear_mctrl(port, TIOCM_RTS);
sdio_uart_irq(port->func);
@@ -855,7 +855,7 @@ static void sdio_uart_unthrottle(struct tty_struct *tty)
{
struct sdio_uart_port *port = tty->driver_data;
- if (!I_IXOFF(tty) && !(tty->termios.c_cflag & CRTSCTS))
+ if (!I_IXOFF(tty) && !C_CRTSCTS(tty))
return;
if (sdio_uart_claim_func(port) != 0)
@@ -870,7 +870,7 @@ static void sdio_uart_unthrottle(struct tty_struct *tty)
}
}
- if (tty->termios.c_cflag & CRTSCTS)
+ if (C_CRTSCTS(tty))
sdio_uart_set_mctrl(port, TIOCM_RTS);
sdio_uart_irq(port->func);
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index f95d41ffc766..41b1e761965f 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -1033,7 +1033,7 @@ static inline void mmc_set_ios(struct mmc_host *host)
"width %u timing %u\n",
mmc_hostname(host), ios->clock, ios->bus_mode,
ios->power_mode, ios->chip_select, ios->vdd,
- ios->bus_width, ios->timing);
+ 1 << ios->bus_width, ios->timing);
host->ops->set_ios(host, ios);
}
@@ -1079,7 +1079,8 @@ int mmc_execute_tuning(struct mmc_card *card)
err = host->ops->execute_tuning(host, opcode);
if (err)
- pr_err("%s: tuning execution failed\n", mmc_hostname(host));
+ pr_err("%s: tuning execution failed: %d\n",
+ mmc_hostname(host), err);
else
mmc_retune_enable(host);
@@ -1204,8 +1205,9 @@ EXPORT_SYMBOL(mmc_vddrange_to_ocrmask);
* @np: The device node need to be parsed.
* @mask: mask of voltages available for MMC/SD/SDIO
*
- * 1. Return zero on success.
- * 2. Return negative errno: voltage-range is invalid.
+ * Parse the "voltage-ranges" DT property, returning zero if it is not
+ * found, negative errno if the voltage-range specification is invalid,
+ * or one if the voltage-range is specified and successfully parsed.
*/
int mmc_of_parse_voltage(struct device_node *np, u32 *mask)
{
@@ -1214,8 +1216,12 @@ int mmc_of_parse_voltage(struct device_node *np, u32 *mask)
voltage_ranges = of_get_property(np, "voltage-ranges", &num_ranges);
num_ranges = num_ranges / sizeof(*voltage_ranges) / 2;
- if (!voltage_ranges || !num_ranges) {
- pr_info("%s: voltage-ranges unspecified\n", np->full_name);
+ if (!voltage_ranges) {
+ pr_debug("%s: voltage-ranges unspecified\n", np->full_name);
+ return 0;
+ }
+ if (!num_ranges) {
+ pr_err("%s: voltage-ranges empty\n", np->full_name);
return -EINVAL;
}
@@ -1234,7 +1240,7 @@ int mmc_of_parse_voltage(struct device_node *np, u32 *mask)
*mask |= ocr_mask;
}
- return 0;
+ return 1;
}
EXPORT_SYMBOL(mmc_of_parse_voltage);
@@ -2532,7 +2538,7 @@ int mmc_detect_card_removed(struct mmc_host *host)
if (!card)
return 1;
- if (host->caps & MMC_CAP_NONREMOVABLE)
+ if (!mmc_card_is_removable(host))
return 0;
ret = mmc_card_removed(card);
@@ -2570,7 +2576,7 @@ void mmc_rescan(struct work_struct *work)
return;
/* If there is a non-removable card registered, only scan once */
- if ((host->caps & MMC_CAP_NONREMOVABLE) && host->rescan_entered)
+ if (!mmc_card_is_removable(host) && host->rescan_entered)
return;
host->rescan_entered = 1;
@@ -2587,8 +2593,7 @@ void mmc_rescan(struct work_struct *work)
* if there is a _removable_ card registered, check whether it is
* still present
*/
- if (host->bus_ops && !host->bus_dead
- && !(host->caps & MMC_CAP_NONREMOVABLE))
+ if (host->bus_ops && !host->bus_dead && mmc_card_is_removable(host))
host->bus_ops->detect(host);
host->detect_change = 0;
@@ -2613,7 +2618,7 @@ void mmc_rescan(struct work_struct *work)
mmc_bus_put(host);
mmc_claim_host(host);
- if (!(host->caps & MMC_CAP_NONREMOVABLE) && host->ops->get_cd &&
+ if (mmc_card_is_removable(host) && host->ops->get_cd &&
host->ops->get_cd(host) == 0) {
mmc_power_off(host);
mmc_release_host(host);
diff --git a/drivers/mmc/core/debugfs.c b/drivers/mmc/core/debugfs.c
index 65cc0ac9b82d..9382a57a5aa4 100644
--- a/drivers/mmc/core/debugfs.c
+++ b/drivers/mmc/core/debugfs.c
@@ -220,7 +220,7 @@ static int mmc_clock_opt_set(void *data, u64 val)
struct mmc_host *host = data;
/* We need this check due to input value is u64 */
- if (val > host->f_max)
+ if (val != 0 && (val > host->f_max || val < host->f_min))
return -EINVAL;
mmc_claim_host(host);
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
index 0aecd5c00b86..6e4c55a4aab5 100644
--- a/drivers/mmc/core/host.c
+++ b/drivers/mmc/core/host.c
@@ -339,6 +339,7 @@ struct mmc_host *mmc_alloc_host(int extra, struct device *dev)
host->class_dev.parent = dev;
host->class_dev.class = &mmc_host_class;
device_initialize(&host->class_dev);
+ device_enable_async_suspend(&host->class_dev);
if (mmc_gpio_alloc(host)) {
put_device(&host->class_dev);
@@ -355,11 +356,11 @@ struct mmc_host *mmc_alloc_host(int extra, struct device *dev)
* They have to set these according to their abilities.
*/
host->max_segs = 1;
- host->max_seg_size = PAGE_CACHE_SIZE;
+ host->max_seg_size = PAGE_SIZE;
- host->max_req_size = PAGE_CACHE_SIZE;
+ host->max_req_size = PAGE_SIZE;
host->max_blk_size = 512;
- host->max_blk_count = PAGE_CACHE_SIZE / 512;
+ host->max_blk_count = PAGE_SIZE / 512;
return host;
}
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index bf49e44571f2..4dbe3df8024b 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -501,7 +501,7 @@ static int mmc_decode_ext_csd(struct mmc_card *card, u8 *ext_csd)
card->ext_csd.raw_bkops_status =
ext_csd[EXT_CSD_BKOPS_STATUS];
if (!card->ext_csd.man_bkops_en)
- pr_info("%s: MAN_BKOPS_EN bit is not set\n",
+ pr_debug("%s: MAN_BKOPS_EN bit is not set\n",
mmc_hostname(card->host));
}
@@ -945,7 +945,7 @@ static int mmc_select_bus_width(struct mmc_card *card)
break;
} else {
pr_warn("%s: switch to bus width %d failed\n",
- mmc_hostname(host), ext_csd_bits[idx]);
+ mmc_hostname(host), 1 << bus_width);
}
}
diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c
index 2c90635c89af..62355bda608f 100644
--- a/drivers/mmc/core/mmc_ops.c
+++ b/drivers/mmc/core/mmc_ops.c
@@ -90,7 +90,6 @@ int mmc_send_status(struct mmc_card *card, u32 *status)
static int _mmc_select_card(struct mmc_host *host, struct mmc_card *card)
{
- int err;
struct mmc_command cmd = {0};
BUG_ON(!host);
@@ -105,11 +104,7 @@ static int _mmc_select_card(struct mmc_host *host, struct mmc_card *card)
cmd.flags = MMC_RSP_NONE | MMC_CMD_AC;
}
- err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
- if (err)
- return err;
-
- return 0;
+ return mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
}
int mmc_select_card(struct mmc_card *card)
@@ -244,7 +239,6 @@ int mmc_all_send_cid(struct mmc_host *host, u32 *cid)
int mmc_set_relative_addr(struct mmc_card *card)
{
- int err;
struct mmc_command cmd = {0};
BUG_ON(!card);
@@ -254,11 +248,7 @@ int mmc_set_relative_addr(struct mmc_card *card)
cmd.arg = card->rca << 16;
cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
- err = mmc_wait_for_cmd(card->host, &cmd, MMC_CMD_RETRIES);
- if (err)
- return err;
-
- return 0;
+ return mmc_wait_for_cmd(card->host, &cmd, MMC_CMD_RETRIES);
}
static int
@@ -743,7 +733,7 @@ mmc_send_bus_test(struct mmc_card *card, struct mmc_host *host, u8 opcode,
int mmc_bus_test(struct mmc_card *card, u8 bus_width)
{
- int err, width;
+ int width;
if (bus_width == MMC_BUS_WIDTH_8)
width = 8;
@@ -759,8 +749,7 @@ int mmc_bus_test(struct mmc_card *card, u8 bus_width)
* is a problem. This improves chances that the test will work.
*/
mmc_send_bus_test(card, card->host, MMC_BUS_TEST_W, width);
- err = mmc_send_bus_test(card, card->host, MMC_BUS_TEST_R, width);
- return err;
+ return mmc_send_bus_test(card, card->host, MMC_BUS_TEST_R, width);
}
int mmc_send_hpi_cmd(struct mmc_card *card, u32 *status)
diff --git a/drivers/mmc/core/pwrseq_simple.c b/drivers/mmc/core/pwrseq_simple.c
index aba786daebca..bc173e18b71c 100644
--- a/drivers/mmc/core/pwrseq_simple.c
+++ b/drivers/mmc/core/pwrseq_simple.c
@@ -12,7 +12,6 @@
#include <linux/slab.h>
#include <linux/device.h>
#include <linux/err.h>
-#include <linux/of_gpio.h>
#include <linux/gpio/consumer.h>
#include <linux/mmc/host.h>
diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
index bb39a29b2db6..b95bd24d92f4 100644
--- a/drivers/mmc/core/sd.c
+++ b/drivers/mmc/core/sd.c
@@ -74,8 +74,6 @@ void mmc_decode_cid(struct mmc_card *card)
{
u32 *resp = card->raw_cid;
- memset(&card->cid, 0, sizeof(struct mmc_cid));
-
/*
* SD doesn't currently have a version field so we will
* have to assume we can parse this.
diff --git a/drivers/mmc/core/sd_ops.c b/drivers/mmc/core/sd_ops.c
index 48d0c93ba25a..16b774c18e75 100644
--- a/drivers/mmc/core/sd_ops.c
+++ b/drivers/mmc/core/sd_ops.c
@@ -120,7 +120,6 @@ EXPORT_SYMBOL(mmc_wait_for_app_cmd);
int mmc_app_set_bus_width(struct mmc_card *card, int width)
{
- int err;
struct mmc_command cmd = {0};
BUG_ON(!card);
@@ -140,11 +139,7 @@ int mmc_app_set_bus_width(struct mmc_card *card, int width)
return -EINVAL;
}
- err = mmc_wait_for_app_cmd(card->host, card, &cmd, MMC_CMD_RETRIES);
- if (err)
- return err;
-
- return 0;
+ return mmc_wait_for_app_cmd(card->host, card, &cmd, MMC_CMD_RETRIES);
}
int mmc_send_app_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr)
diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c
index 467b3cf80c44..bd44ba8116d1 100644
--- a/drivers/mmc/core/sdio.c
+++ b/drivers/mmc/core/sdio.c
@@ -106,8 +106,6 @@ static int sdio_read_cccr(struct mmc_card *card, u32 ocr)
unsigned char data;
unsigned char speed;
- memset(&card->cccr, 0, sizeof(struct sdio_cccr));
-
ret = mmc_io_rw_direct(card, 0, 0, SDIO_CCCR_CCCR, 0, &data);
if (ret)
goto out;
diff --git a/drivers/mmc/core/sdio_ops.c b/drivers/mmc/core/sdio_ops.c
index 62508b457c4f..34f6e8015306 100644
--- a/drivers/mmc/core/sdio_ops.c
+++ b/drivers/mmc/core/sdio_ops.c
@@ -217,7 +217,6 @@ int sdio_reset(struct mmc_host *host)
else
abort |= 0x08;
- ret = mmc_io_rw_direct_host(host, 1, 0, SDIO_CCCR_ABORT, abort, NULL);
- return ret;
+ return mmc_io_rw_direct_host(host, 1, 0, SDIO_CCCR_ABORT, abort, NULL);
}
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index 1526b8a10b09..e657af0e95fa 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -97,6 +97,7 @@ config MMC_RICOH_MMC
config MMC_SDHCI_ACPI
tristate "SDHCI support for ACPI enumerated SDHCI controllers"
depends on MMC_SDHCI && ACPI
+ select IOSF_MBI if X86
help
This selects support for ACPI enumerated SDHCI controllers,
identified by ACPI Compatibility ID PNP0D40 or specific
@@ -318,15 +319,15 @@ config MMC_SDHCI_F_SDH30
If unsure, say N.
config MMC_SDHCI_IPROC
- tristate "SDHCI platform support for the iProc SD/MMC Controller"
- depends on ARCH_BCM_IPROC || COMPILE_TEST
+ tristate "SDHCI support for the BCM2835 & iProc SD/MMC Controller"
+ depends on ARCH_BCM2835 || ARCH_BCM_IPROC || COMPILE_TEST
depends on MMC_SDHCI_PLTFM
default ARCH_BCM_IPROC
select MMC_SDHCI_IO_ACCESSORS
help
This selects the iProc SD/MMC controller.
- If you have an IPROC platform with SD or MMC devices,
+ If you have a BCM2835 or IPROC platform with SD or MMC devices,
say Y or M here.
If unsure, say N.
@@ -560,8 +561,8 @@ config MMC_TMIO
config MMC_SDHI
tristate "SH-Mobile SDHI SD/SDIO controller support"
- depends on SUPERH || ARM
- depends on SUPERH || ARCH_SHMOBILE || COMPILE_TEST
+ depends on SUPERH || ARM || ARM64
+ depends on SUPERH || ARCH_RENESAS || COMPILE_TEST
select MMC_TMIO_CORE
help
This provides support for the SDHI SD/SDIO controller found in
@@ -673,8 +674,8 @@ config MMC_DW_ROCKCHIP
config MMC_SH_MMCIF
tristate "SuperH Internal MMCIF support"
- depends on MMC_BLOCK && HAS_DMA
- depends on SUPERH || ARCH_SHMOBILE || COMPILE_TEST
+ depends on HAS_DMA
+ depends on SUPERH || ARCH_RENESAS || COMPILE_TEST
help
This selects the MMC Host Interface controller (MMCIF).
@@ -786,3 +787,14 @@ config MMC_MTK
If you have a machine with a integrated SD/MMC card reader, say Y or M here.
This is needed if support for any SD/SDIO/MMC devices is required.
If unsure, say N.
+
+config MMC_SDHCI_MICROCHIP_PIC32
+ tristate "Microchip PIC32MZDA SDHCI support"
+ depends on MMC_SDHCI && PIC32MZDA && MMC_SDHCI_PLTFM
+ help
+ This selects the Secure Digital Host Controller Interface (SDHCI)
+ for PIC32MZDA platform.
+
+ If you have a controller with this interface, say Y or M here.
+
+ If unsure, say N.
diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile
index 3595f83e89dd..af918d261ff9 100644
--- a/drivers/mmc/host/Makefile
+++ b/drivers/mmc/host/Makefile
@@ -75,6 +75,7 @@ obj-$(CONFIG_MMC_SDHCI_BCM2835) += sdhci-bcm2835.o
obj-$(CONFIG_MMC_SDHCI_IPROC) += sdhci-iproc.o
obj-$(CONFIG_MMC_SDHCI_MSM) += sdhci-msm.o
obj-$(CONFIG_MMC_SDHCI_ST) += sdhci-st.o
+obj-$(CONFIG_MMC_SDHCI_MICROCHIP_PIC32) += sdhci-pic32.o
ifeq ($(CONFIG_CB710_DEBUG),y)
CFLAGS-cb710-mmc += -DDEBUG
diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c
index 851ccd9ac868..9268c41a8561 100644
--- a/drivers/mmc/host/atmel-mci.c
+++ b/drivers/mmc/host/atmel-mci.c
@@ -848,9 +848,7 @@ static u32 atmci_prepare_command(struct mmc_host *mmc,
if (cmd->opcode == SD_IO_RW_EXTENDED) {
cmdr |= ATMCI_CMDR_SDIO_BLOCK;
} else {
- if (data->flags & MMC_DATA_STREAM)
- cmdr |= ATMCI_CMDR_STREAM;
- else if (data->blocks > 1)
+ if (data->blocks > 1)
cmdr |= ATMCI_CMDR_MULTI_BLOCK;
else
cmdr |= ATMCI_CMDR_BLOCK;
@@ -1371,10 +1369,7 @@ static void atmci_start_request(struct atmel_mci *host,
host->stop_cmdr |= ATMCI_CMDR_STOP_XFER;
if (!(data->flags & MMC_DATA_WRITE))
host->stop_cmdr |= ATMCI_CMDR_TRDIR_READ;
- if (data->flags & MMC_DATA_STREAM)
- host->stop_cmdr |= ATMCI_CMDR_STREAM;
- else
- host->stop_cmdr |= ATMCI_CMDR_MULTI_BLOCK;
+ host->stop_cmdr |= ATMCI_CMDR_MULTI_BLOCK;
}
/*
@@ -2443,7 +2438,7 @@ static int atmci_configure_dma(struct atmel_mci *host)
struct mci_platform_data *pdata = host->pdev->dev.platform_data;
dma_cap_mask_t mask;
- if (!pdata->dma_filter)
+ if (!pdata || !pdata->dma_filter)
return -ENODEV;
dma_cap_zero(mask);
diff --git a/drivers/mmc/host/bfin_sdh.c b/drivers/mmc/host/bfin_sdh.c
index 2b7f37e82ca9..526231e38583 100644
--- a/drivers/mmc/host/bfin_sdh.c
+++ b/drivers/mmc/host/bfin_sdh.c
@@ -126,9 +126,6 @@ static int sdh_setup_data(struct sdh_host *host, struct mmc_data *data)
length = data->blksz * data->blocks;
bfin_write_SDH_DATA_LGTH(length);
- if (data->flags & MMC_DATA_STREAM)
- data_ctl |= DTX_MODE;
-
if (data->flags & MMC_DATA_READ)
data_ctl |= DTX_DIR;
/* Only supports power-of-2 block size */
diff --git a/drivers/mmc/host/davinci_mmc.c b/drivers/mmc/host/davinci_mmc.c
index ea2a2ebc6b91..693144e7427b 100644
--- a/drivers/mmc/host/davinci_mmc.c
+++ b/drivers/mmc/host/davinci_mmc.c
@@ -346,10 +346,6 @@ static void mmc_davinci_start_command(struct mmc_davinci_host *host,
if (cmd->data)
cmd_reg |= MMCCMD_WDATX;
- /* Setting whether stream or block transfer */
- if (cmd->flags & MMC_DATA_STREAM)
- cmd_reg |= MMCCMD_STRMTP;
-
/* Setting whether data read or write */
if (host->data_dir == DAVINCI_MMC_DATADIR_WRITE)
cmd_reg |= MMCCMD_DTRW;
@@ -568,8 +564,7 @@ mmc_davinci_prepare_data(struct mmc_davinci_host *host, struct mmc_request *req)
return;
}
- dev_dbg(mmc_dev(host->mmc), "%s %s, %d blocks of %d bytes\n",
- (data->flags & MMC_DATA_STREAM) ? "stream" : "block",
+ dev_dbg(mmc_dev(host->mmc), "%s, %d blocks of %d bytes\n",
(data->flags & MMC_DATA_WRITE) ? "write" : "read",
data->blocks, data->blksz);
dev_dbg(mmc_dev(host->mmc), " DTO %d cycles + %d ns\n",
@@ -584,22 +579,18 @@ mmc_davinci_prepare_data(struct mmc_davinci_host *host, struct mmc_request *req)
writel(data->blksz, host->base + DAVINCI_MMCBLEN);
/* Configure the FIFO */
- switch (data->flags & MMC_DATA_WRITE) {
- case MMC_DATA_WRITE:
+ if (data->flags & MMC_DATA_WRITE) {
host->data_dir = DAVINCI_MMC_DATADIR_WRITE;
writel(fifo_lev | MMCFIFOCTL_FIFODIR_WR | MMCFIFOCTL_FIFORST,
host->base + DAVINCI_MMCFIFOCTL);
writel(fifo_lev | MMCFIFOCTL_FIFODIR_WR,
host->base + DAVINCI_MMCFIFOCTL);
- break;
-
- default:
+ } else {
host->data_dir = DAVINCI_MMC_DATADIR_READ;
writel(fifo_lev | MMCFIFOCTL_FIFODIR_RD | MMCFIFOCTL_FIFORST,
host->base + DAVINCI_MMCFIFOCTL);
writel(fifo_lev | MMCFIFOCTL_FIFODIR_RD,
host->base + DAVINCI_MMCFIFOCTL);
- break;
}
host->buffer = NULL;
diff --git a/drivers/mmc/host/dw_mmc-exynos.c b/drivers/mmc/host/dw_mmc-exynos.c
index 3a7e835a0033..8790f2afc057 100644
--- a/drivers/mmc/host/dw_mmc-exynos.c
+++ b/drivers/mmc/host/dw_mmc-exynos.c
@@ -145,6 +145,16 @@ static void dw_mci_exynos_set_clksel_timing(struct dw_mci *host, u32 timing)
mci_writel(host, CLKSEL64, clksel);
else
mci_writel(host, CLKSEL, clksel);
+
+ /*
+ * Exynos4412 and Exynos5250 extends the use of CMD register with the
+ * use of bit 29 (which is reserved on standard MSHC controllers) for
+ * optionally bypassing the HOLD register for command and data. The
+ * HOLD register should be bypassed in case there is no phase shift
+ * applied on CMD/DATA that is sent to the card.
+ */
+ if (!SDMMC_CLKSEL_GET_DRV_WD3(clksel))
+ set_bit(DW_MMC_CARD_NO_USE_HOLD, &host->cur_slot->flags);
}
#ifdef CONFIG_PM_SLEEP
@@ -202,26 +212,6 @@ static int dw_mci_exynos_resume_noirq(struct device *dev)
#define dw_mci_exynos_resume_noirq NULL
#endif /* CONFIG_PM_SLEEP */
-static void dw_mci_exynos_prepare_command(struct dw_mci *host, u32 *cmdr)
-{
- struct dw_mci_exynos_priv_data *priv = host->priv;
- /*
- * Exynos4412 and Exynos5250 extends the use of CMD register with the
- * use of bit 29 (which is reserved on standard MSHC controllers) for
- * optionally bypassing the HOLD register for command and data. The
- * HOLD register should be bypassed in case there is no phase shift
- * applied on CMD/DATA that is sent to the card.
- */
- if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS7 ||
- priv->ctrl_type == DW_MCI_TYPE_EXYNOS7_SMU) {
- if (SDMMC_CLKSEL_GET_DRV_WD3(mci_readl(host, CLKSEL64)))
- *cmdr |= SDMMC_CMD_USE_HOLD_REG;
- } else {
- if (SDMMC_CLKSEL_GET_DRV_WD3(mci_readl(host, CLKSEL)))
- *cmdr |= SDMMC_CMD_USE_HOLD_REG;
- }
-}
-
static void dw_mci_exynos_config_hs400(struct dw_mci *host, u32 timing)
{
struct dw_mci_exynos_priv_data *priv = host->priv;
@@ -500,7 +490,6 @@ static const struct dw_mci_drv_data exynos_drv_data = {
.caps = exynos_dwmmc_caps,
.init = dw_mci_exynos_priv_init,
.setup_clock = dw_mci_exynos_setup_clock,
- .prepare_command = dw_mci_exynos_prepare_command,
.set_ios = dw_mci_exynos_set_ios,
.parse_dt = dw_mci_exynos_parse_dt,
.execute_tuning = dw_mci_exynos_execute_tuning,
diff --git a/drivers/mmc/host/dw_mmc-pltfm.c b/drivers/mmc/host/dw_mmc-pltfm.c
index 81bdeeb05a4d..c0bb0c793e84 100644
--- a/drivers/mmc/host/dw_mmc-pltfm.c
+++ b/drivers/mmc/host/dw_mmc-pltfm.c
@@ -26,19 +26,6 @@
#include "dw_mmc.h"
#include "dw_mmc-pltfm.h"
-static void dw_mci_pltfm_prepare_command(struct dw_mci *host, u32 *cmdr)
-{
- *cmdr |= SDMMC_CMD_USE_HOLD_REG;
-}
-
-static const struct dw_mci_drv_data socfpga_drv_data = {
- .prepare_command = dw_mci_pltfm_prepare_command,
-};
-
-static const struct dw_mci_drv_data pistachio_drv_data = {
- .prepare_command = dw_mci_pltfm_prepare_command,
-};
-
int dw_mci_pltfm_register(struct platform_device *pdev,
const struct dw_mci_drv_data *drv_data)
{
@@ -94,10 +81,8 @@ EXPORT_SYMBOL_GPL(dw_mci_pltfm_pmops);
static const struct of_device_id dw_mci_pltfm_match[] = {
{ .compatible = "snps,dw-mshc", },
- { .compatible = "altr,socfpga-dw-mshc",
- .data = &socfpga_drv_data },
- { .compatible = "img,pistachio-dw-mshc",
- .data = &pistachio_drv_data },
+ { .compatible = "altr,socfpga-dw-mshc", },
+ { .compatible = "img,pistachio-dw-mshc", },
{},
};
MODULE_DEVICE_TABLE(of, dw_mci_pltfm_match);
diff --git a/drivers/mmc/host/dw_mmc-rockchip.c b/drivers/mmc/host/dw_mmc-rockchip.c
index d9c92f31da64..84e50f3a64b6 100644
--- a/drivers/mmc/host/dw_mmc-rockchip.c
+++ b/drivers/mmc/host/dw_mmc-rockchip.c
@@ -26,11 +26,6 @@ struct dw_mci_rockchip_priv_data {
int default_sample_phase;
};
-static void dw_mci_rockchip_prepare_command(struct dw_mci *host, u32 *cmdr)
-{
- *cmdr |= SDMMC_CMD_USE_HOLD_REG;
-}
-
static int dw_mci_rk3288_setup_clock(struct dw_mci *host)
{
host->bus_hz /= RK3288_CLKGEN_DIV;
@@ -240,12 +235,10 @@ static int dw_mci_rockchip_init(struct dw_mci *host)
}
static const struct dw_mci_drv_data rk2928_drv_data = {
- .prepare_command = dw_mci_rockchip_prepare_command,
.init = dw_mci_rockchip_init,
};
static const struct dw_mci_drv_data rk3288_drv_data = {
- .prepare_command = dw_mci_rockchip_prepare_command,
.set_ios = dw_mci_rk3288_set_ios,
.execute_tuning = dw_mci_rk3288_execute_tuning,
.parse_dt = dw_mci_rk3288_parse_dt,
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
index 712835177e8b..242f9a0769bd 100644
--- a/drivers/mmc/host/dw_mmc.c
+++ b/drivers/mmc/host/dw_mmc.c
@@ -234,7 +234,6 @@ static u32 dw_mci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd)
struct mmc_data *data;
struct dw_mci_slot *slot = mmc_priv(mmc);
struct dw_mci *host = slot->host;
- const struct dw_mci_drv_data *drv_data = slot->host->drv_data;
u32 cmdr;
cmd->error = -EINPROGRESS;
@@ -290,14 +289,12 @@ static u32 dw_mci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd)
data = cmd->data;
if (data) {
cmdr |= SDMMC_CMD_DAT_EXP;
- if (data->flags & MMC_DATA_STREAM)
- cmdr |= SDMMC_CMD_STRM_MODE;
if (data->flags & MMC_DATA_WRITE)
cmdr |= SDMMC_CMD_DAT_WR;
}
- if (drv_data && drv_data->prepare_command)
- drv_data->prepare_command(slot->host, &cmdr);
+ if (!test_bit(DW_MMC_CARD_NO_USE_HOLD, &slot->flags))
+ cmdr |= SDMMC_CMD_USE_HOLD_REG;
return cmdr;
}
@@ -1450,12 +1447,11 @@ static int dw_mci_get_cd(struct mmc_host *mmc)
{
int present;
struct dw_mci_slot *slot = mmc_priv(mmc);
- struct dw_mci_board *brd = slot->host->pdata;
struct dw_mci *host = slot->host;
int gpio_cd = mmc_gpio_get_cd(mmc);
/* Use platform get_cd function, else try onboard card detect */
- if ((brd->quirks & DW_MCI_QUIRK_BROKEN_CARD_DETECTION) ||
+ if ((mmc->caps & MMC_CAP_NEEDS_POLL) ||
(mmc->caps & MMC_CAP_NONREMOVABLE))
present = 1;
else if (!IS_ERR_VALUE(gpio_cd))
@@ -1477,6 +1473,34 @@ static int dw_mci_get_cd(struct mmc_host *mmc)
return present;
}
+static void dw_mci_hw_reset(struct mmc_host *mmc)
+{
+ struct dw_mci_slot *slot = mmc_priv(mmc);
+ struct dw_mci *host = slot->host;
+ int reset;
+
+ if (host->use_dma == TRANS_MODE_IDMAC)
+ dw_mci_idmac_reset(host);
+
+ if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_DMA_RESET |
+ SDMMC_CTRL_FIFO_RESET))
+ return;
+
+ /*
+ * According to eMMC spec, card reset procedure:
+ * tRstW >= 1us: RST_n pulse width
+ * tRSCA >= 200us: RST_n to Command time
+ * tRSTH >= 1us: RST_n high period
+ */
+ reset = mci_readl(host, RST_N);
+ reset &= ~(SDMMC_RST_HWACTIVE << slot->id);
+ mci_writel(host, RST_N, reset);
+ usleep_range(1, 2);
+ reset |= SDMMC_RST_HWACTIVE << slot->id;
+ mci_writel(host, RST_N, reset);
+ usleep_range(200, 300);
+}
+
static void dw_mci_init_card(struct mmc_host *mmc, struct mmc_card *card)
{
struct dw_mci_slot *slot = mmc_priv(mmc);
@@ -1563,6 +1587,7 @@ static const struct mmc_host_ops dw_mci_ops = {
.set_ios = dw_mci_set_ios,
.get_ro = dw_mci_get_ro,
.get_cd = dw_mci_get_cd,
+ .hw_reset = dw_mci_hw_reset,
.enable_sdio_irq = dw_mci_enable_sdio_irq,
.execute_tuning = dw_mci_execute_tuning,
.card_busy = dw_mci_card_busy,
@@ -2840,23 +2865,13 @@ static void dw_mci_dto_timer(unsigned long arg)
}
#ifdef CONFIG_OF
-static struct dw_mci_of_quirks {
- char *quirk;
- int id;
-} of_quirks[] = {
- {
- .quirk = "broken-cd",
- .id = DW_MCI_QUIRK_BROKEN_CARD_DETECTION,
- },
-};
-
static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
{
struct dw_mci_board *pdata;
struct device *dev = host->dev;
struct device_node *np = dev->of_node;
const struct dw_mci_drv_data *drv_data = host->drv_data;
- int idx, ret;
+ int ret;
u32 clock_frequency;
pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
@@ -2864,17 +2879,7 @@ static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
return ERR_PTR(-ENOMEM);
/* find out number of slots supported */
- if (of_property_read_u32(dev->of_node, "num-slots",
- &pdata->num_slots)) {
- dev_info(dev,
- "num-slots property not found, assuming 1 slot is available\n");
- pdata->num_slots = 1;
- }
-
- /* get quirks */
- for (idx = 0; idx < ARRAY_SIZE(of_quirks); idx++)
- if (of_get_property(np, of_quirks[idx].quirk, NULL))
- pdata->quirks |= of_quirks[idx].id;
+ of_property_read_u32(np, "num-slots", &pdata->num_slots);
if (of_property_read_u32(np, "fifo-depth", &pdata->fifo_depth))
dev_info(dev,
@@ -2908,18 +2913,19 @@ static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
static void dw_mci_enable_cd(struct dw_mci *host)
{
- struct dw_mci_board *brd = host->pdata;
unsigned long irqflags;
u32 temp;
int i;
+ struct dw_mci_slot *slot;
- /* No need for CD if broken card detection */
- if (brd->quirks & DW_MCI_QUIRK_BROKEN_CARD_DETECTION)
- return;
-
- /* No need for CD if all slots have a non-error GPIO */
+ /*
+ * No need for CD if all slots have a non-error GPIO
+ * as well as broken card detection is found.
+ */
for (i = 0; i < host->num_slots; i++) {
- struct dw_mci_slot *slot = host->slot[i];
+ slot = host->slot[i];
+ if (slot->mmc->caps & MMC_CAP_NEEDS_POLL)
+ return;
if (IS_ERR_VALUE(mmc_gpio_get_cd(slot->mmc)))
break;
@@ -2949,12 +2955,6 @@ int dw_mci_probe(struct dw_mci *host)
}
}
- if (host->pdata->num_slots < 1) {
- dev_err(host->dev,
- "Platform data must supply num_slots.\n");
- return -ENODEV;
- }
-
host->biu_clk = devm_clk_get(host->dev, "biu");
if (IS_ERR(host->biu_clk)) {
dev_dbg(host->dev, "biu clock not available\n");
@@ -3052,8 +3052,10 @@ int dw_mci_probe(struct dw_mci *host)
}
/* Reset all blocks */
- if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_ALL_RESET_FLAGS))
- return -ENODEV;
+ if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_ALL_RESET_FLAGS)) {
+ ret = -ENODEV;
+ goto err_clk_ciu;
+ }
host->dma_ops = host->pdata->dma_ops;
dw_mci_init_dma(host);
@@ -3111,13 +3113,20 @@ int dw_mci_probe(struct dw_mci *host)
if (host->pdata->num_slots)
host->num_slots = host->pdata->num_slots;
else
- host->num_slots = SDMMC_GET_SLOT_NUM(mci_readl(host, HCON));
+ host->num_slots = 1;
+
+ if (host->num_slots < 1 ||
+ host->num_slots > SDMMC_GET_SLOT_NUM(mci_readl(host, HCON))) {
+ dev_err(host->dev,
+ "Platform data must supply correct num_slots.\n");
+ ret = -ENODEV;
+ goto err_clk_ciu;
+ }
/*
* Enable interrupts for command done, data over, data empty,
* receive ready and error such as transmit, receive timeout, crc error
*/
- mci_writel(host, RINTSTS, 0xFFFFFFFF);
mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER |
SDMMC_INT_TXDR | SDMMC_INT_RXDR |
DW_MCI_ERROR_FLAGS);
diff --git a/drivers/mmc/host/dw_mmc.h b/drivers/mmc/host/dw_mmc.h
index f695b58f0613..68d5da2dfd19 100644
--- a/drivers/mmc/host/dw_mmc.h
+++ b/drivers/mmc/host/dw_mmc.h
@@ -46,6 +46,7 @@
#define SDMMC_VERID 0x06c
#define SDMMC_HCON 0x070
#define SDMMC_UHS_REG 0x074
+#define SDMMC_RST_N 0x078
#define SDMMC_BMOD 0x080
#define SDMMC_PLDMND 0x084
#define SDMMC_DBADDR 0x088
@@ -169,6 +170,8 @@
#define SDMMC_IDMAC_ENABLE BIT(7)
#define SDMMC_IDMAC_FB BIT(1)
#define SDMMC_IDMAC_SWRESET BIT(0)
+/* H/W reset */
+#define SDMMC_RST_HWACTIVE 0x1
/* Version ID register define */
#define SDMMC_GET_VERID(x) ((x) & 0xFFFF)
/* Card read threshold */
@@ -265,6 +268,7 @@ struct dw_mci_slot {
#define DW_MMC_CARD_PRESENT 0
#define DW_MMC_CARD_NEED_INIT 1
#define DW_MMC_CARD_NO_LOW_PWR 2
+#define DW_MMC_CARD_NO_USE_HOLD 3
int id;
int sdio_id;
};
@@ -274,7 +278,6 @@ struct dw_mci_slot {
* @caps: mmc subsystem specified capabilities of the controller(s).
* @init: early implementation specific initialization.
* @setup_clock: implementation specific clock configuration.
- * @prepare_command: handle CMD register extensions.
* @set_ios: handle bus specific extensions.
* @parse_dt: parse implementation specific device tree properties.
* @execute_tuning: implementation specific tuning procedure.
@@ -287,7 +290,6 @@ struct dw_mci_drv_data {
unsigned long *caps;
int (*init)(struct dw_mci *host);
int (*setup_clock)(struct dw_mci *host);
- void (*prepare_command)(struct dw_mci *host, u32 *cmdr);
void (*set_ios)(struct dw_mci *host, struct mmc_ios *ios);
int (*parse_dt)(struct dw_mci *host);
int (*execute_tuning)(struct dw_mci_slot *slot, u32 opcode);
diff --git a/drivers/mmc/host/jz4740_mmc.c b/drivers/mmc/host/jz4740_mmc.c
index 76e8bce6f46e..03ddf0ecf402 100644
--- a/drivers/mmc/host/jz4740_mmc.c
+++ b/drivers/mmc/host/jz4740_mmc.c
@@ -660,8 +660,6 @@ static void jz4740_mmc_send_command(struct jz4740_mmc_host *host,
cmdat |= JZ_MMC_CMDAT_DATA_EN;
if (cmd->data->flags & MMC_DATA_WRITE)
cmdat |= JZ_MMC_CMDAT_WRITE;
- if (cmd->data->flags & MMC_DATA_STREAM)
- cmdat |= JZ_MMC_CMDAT_STREAM;
if (host->use_dma)
cmdat |= JZ_MMC_CMDAT_DMA_EN;
diff --git a/drivers/mmc/host/mmc_spi.c b/drivers/mmc/host/mmc_spi.c
index 3446097a43c0..e77d79c8cd9f 100644
--- a/drivers/mmc/host/mmc_spi.c
+++ b/drivers/mmc/host/mmc_spi.c
@@ -1442,6 +1442,12 @@ static int mmc_spi_probe(struct spi_device *spi)
host->pdata->cd_debounce);
if (status != 0)
goto fail_add_host;
+
+ /* The platform has a CD GPIO signal that may support
+ * interrupts, so let mmc_gpiod_request_cd_irq() decide
+ * if polling is needed or not.
+ */
+ mmc->caps &= ~MMC_CAP_NEEDS_POLL;
mmc_gpiod_request_cd_irq(mmc);
}
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
index 0d6ca4116f3d..2e6c96845c9a 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
@@ -40,7 +40,6 @@
#include <asm/div64.h>
#include <asm/io.h>
-#include <asm/sizes.h>
#include "mmci.h"
#include "mmci_qcom_dml.h"
diff --git a/drivers/mmc/host/mtk-sd.c b/drivers/mmc/host/mtk-sd.c
index 82a97ac4e956..b17f30da97da 100644
--- a/drivers/mmc/host/mtk-sd.c
+++ b/drivers/mmc/host/mtk-sd.c
@@ -35,6 +35,7 @@
#include <linux/mmc/mmc.h>
#include <linux/mmc/sd.h>
#include <linux/mmc/sdio.h>
+#include <linux/mmc/slot-gpio.h>
#define MAX_BD_NUM 1024
@@ -1020,26 +1021,19 @@ static void msdc_set_buswidth(struct msdc_host *host, u32 width)
static int msdc_ops_switch_volt(struct mmc_host *mmc, struct mmc_ios *ios)
{
struct msdc_host *host = mmc_priv(mmc);
- int min_uv, max_uv;
int ret = 0;
if (!IS_ERR(mmc->supply.vqmmc)) {
- if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_330) {
- min_uv = 3300000;
- max_uv = 3300000;
- } else if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_180) {
- min_uv = 1800000;
- max_uv = 1800000;
- } else {
+ if (ios->signal_voltage != MMC_SIGNAL_VOLTAGE_330 &&
+ ios->signal_voltage != MMC_SIGNAL_VOLTAGE_180) {
dev_err(host->dev, "Unsupported signal voltage!\n");
return -EINVAL;
}
- ret = regulator_set_voltage(mmc->supply.vqmmc, min_uv, max_uv);
+ ret = mmc_regulator_set_vqmmc(mmc, ios);
if (ret) {
- dev_err(host->dev,
- "Regulator set error %d: %d - %d\n",
- ret, min_uv, max_uv);
+ dev_dbg(host->dev, "Regulator set error %d (%d)\n",
+ ret, ios->signal_voltage);
} else {
/* Apply different pinctrl settings for different signal voltage */
if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_180)
@@ -1452,6 +1446,7 @@ static struct mmc_host_ops mt_msdc_ops = {
.pre_req = msdc_pre_req,
.request = msdc_ops_request,
.set_ios = msdc_ops_set_ios,
+ .get_ro = mmc_gpio_get_ro,
.start_signal_voltage_switch = msdc_ops_switch_volt,
.card_busy = msdc_card_busy,
.execute_tuning = msdc_execute_tuning,
diff --git a/drivers/mmc/host/mxcmmc.c b/drivers/mmc/host/mxcmmc.c
index d110f9e98c4b..3d1ea5e0e549 100644
--- a/drivers/mmc/host/mxcmmc.c
+++ b/drivers/mmc/host/mxcmmc.c
@@ -307,9 +307,6 @@ static int mxcmci_setup_data(struct mxcmci_host *host, struct mmc_data *data)
enum dma_transfer_direction slave_dirn;
int i, nents;
- if (data->flags & MMC_DATA_STREAM)
- nob = 0xffff;
-
host->data = data;
data->bytes_xfered = 0;
diff --git a/drivers/mmc/host/of_mmc_spi.c b/drivers/mmc/host/of_mmc_spi.c
index 660170cd04d9..85bbebfde02e 100644
--- a/drivers/mmc/host/of_mmc_spi.c
+++ b/drivers/mmc/host/of_mmc_spi.c
@@ -74,7 +74,6 @@ struct mmc_spi_platform_data *mmc_spi_get_pdata(struct spi_device *spi)
const u32 *voltage_ranges;
int num_ranges;
int i;
- int ret = -EINVAL;
if (dev->platform_data || !np)
return dev->platform_data;
@@ -97,7 +96,6 @@ struct mmc_spi_platform_data *mmc_spi_get_pdata(struct spi_device *spi)
mask = mmc_vddrange_to_ocrmask(be32_to_cpu(voltage_ranges[j]),
be32_to_cpu(voltage_ranges[j + 1]));
if (!mask) {
- ret = -EINVAL;
dev_err(dev, "OF: voltage-range #%d is invalid\n", i);
goto err_ocr;
}
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index f6e4d9718035..f9ac3bb5d617 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -503,8 +503,11 @@ static int omap_hsmmc_reg_get(struct omap_hsmmc_host *host)
host->pbias = devm_regulator_get_optional(host->dev, "pbias");
if (IS_ERR(host->pbias)) {
ret = PTR_ERR(host->pbias);
- if ((ret != -ENODEV) && host->dev->of_node)
+ if ((ret != -ENODEV) && host->dev->of_node) {
+ dev_err(host->dev,
+ "SD card detect fail? enable CONFIG_REGULATOR_PBIAS\n");
return ret;
+ }
dev_dbg(host->dev, "unable to get pbias regulator %ld\n",
PTR_ERR(host->pbias));
host->pbias = NULL;
@@ -2159,7 +2162,7 @@ static int omap_hsmmc_probe(struct platform_device *pdev)
&rx_req, &pdev->dev, "rx");
if (!host->rx_chan) {
- dev_err(mmc_dev(host->mmc), "unable to obtain RX DMA engine channel %u\n", rx_req);
+ dev_err(mmc_dev(host->mmc), "unable to obtain RX DMA engine channel\n");
ret = -ENXIO;
goto err_irq;
}
@@ -2169,7 +2172,7 @@ static int omap_hsmmc_probe(struct platform_device *pdev)
&tx_req, &pdev->dev, "tx");
if (!host->tx_chan) {
- dev_err(mmc_dev(host->mmc), "unable to obtain TX DMA engine channel %u\n", tx_req);
+ dev_err(mmc_dev(host->mmc), "unable to obtain TX DMA engine channel\n");
ret = -ENXIO;
goto err_irq;
}
diff --git a/drivers/mmc/host/pxamci.c b/drivers/mmc/host/pxamci.c
index da824772bbb4..86fac3e86833 100644
--- a/drivers/mmc/host/pxamci.c
+++ b/drivers/mmc/host/pxamci.c
@@ -191,9 +191,6 @@ static void pxamci_setup_data(struct pxamci_host *host, struct mmc_data *data)
host->data = data;
- if (data->flags & MMC_DATA_STREAM)
- nob = 0xffff;
-
writel(nob, host->base + MMC_NOB);
writel(data->blksz, host->base + MMC_BLKLEN);
@@ -443,9 +440,6 @@ static void pxamci_request(struct mmc_host *mmc, struct mmc_request *mrq)
cmdat |= CMDAT_DATAEN | CMDAT_DMAEN;
if (mrq->data->flags & MMC_DATA_WRITE)
cmdat |= CMDAT_WRITE;
-
- if (mrq->data->flags & MMC_DATA_STREAM)
- cmdat |= CMDAT_STREAM;
}
pxamci_start_cmd(host, mrq->cmd, cmdat);
diff --git a/drivers/mmc/host/s3cmci.c b/drivers/mmc/host/s3cmci.c
index 6291d5042ef2..39814f3dc96f 100644
--- a/drivers/mmc/host/s3cmci.c
+++ b/drivers/mmc/host/s3cmci.c
@@ -1014,8 +1014,7 @@ static int s3cmci_setup_data(struct s3cmci_host *host, struct mmc_data *data)
if (host->bus_width == MMC_BUS_WIDTH_4)
dcon |= S3C2410_SDIDCON_WIDEBUS;
- if (!(data->flags & MMC_DATA_STREAM))
- dcon |= S3C2410_SDIDCON_BLOCKMODE;
+ dcon |= S3C2410_SDIDCON_BLOCKMODE;
if (data->flags & MMC_DATA_WRITE) {
dcon |= S3C2410_SDIDCON_TXAFTERRESP;
diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c
index a5cda926d38e..bed6a494f52c 100644
--- a/drivers/mmc/host/sdhci-acpi.c
+++ b/drivers/mmc/host/sdhci-acpi.c
@@ -41,6 +41,11 @@
#include <linux/mmc/pm.h>
#include <linux/mmc/slot-gpio.h>
+#ifdef CONFIG_X86
+#include <asm/cpu_device_id.h>
+#include <asm/iosf_mbi.h>
+#endif
+
#include "sdhci.h"
enum {
@@ -75,7 +80,6 @@ struct sdhci_acpi_host {
const struct sdhci_acpi_slot *slot;
struct platform_device *pdev;
bool use_runtime_pm;
- bool dma_setup;
};
static inline bool sdhci_acpi_flag(struct sdhci_acpi_host *c, unsigned int flag)
@@ -83,33 +87,6 @@ static inline bool sdhci_acpi_flag(struct sdhci_acpi_host *c, unsigned int flag)
return c->slot && (c->slot->flags & flag);
}
-static int sdhci_acpi_enable_dma(struct sdhci_host *host)
-{
- struct sdhci_acpi_host *c = sdhci_priv(host);
- struct device *dev = &c->pdev->dev;
- int err = -1;
-
- if (c->dma_setup)
- return 0;
-
- if (host->flags & SDHCI_USE_64_BIT_DMA) {
- if (host->quirks2 & SDHCI_QUIRK2_BROKEN_64_BIT_DMA) {
- host->flags &= ~SDHCI_USE_64_BIT_DMA;
- } else {
- err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
- if (err)
- dev_warn(dev, "Failed to set 64-bit DMA mask\n");
- }
- }
-
- if (err)
- err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
-
- c->dma_setup = !err;
-
- return err;
-}
-
static void sdhci_acpi_int_hw_reset(struct sdhci_host *host)
{
u8 reg;
@@ -127,7 +104,6 @@ static void sdhci_acpi_int_hw_reset(struct sdhci_host *host)
static const struct sdhci_ops sdhci_acpi_ops_dflt = {
.set_clock = sdhci_set_clock,
- .enable_dma = sdhci_acpi_enable_dma,
.set_bus_width = sdhci_set_bus_width,
.reset = sdhci_reset,
.set_uhs_signaling = sdhci_set_uhs_signaling,
@@ -135,7 +111,6 @@ static const struct sdhci_ops sdhci_acpi_ops_dflt = {
static const struct sdhci_ops sdhci_acpi_ops_int = {
.set_clock = sdhci_set_clock,
- .enable_dma = sdhci_acpi_enable_dma,
.set_bus_width = sdhci_set_bus_width,
.reset = sdhci_reset,
.set_uhs_signaling = sdhci_set_uhs_signaling,
@@ -146,6 +121,75 @@ static const struct sdhci_acpi_chip sdhci_acpi_chip_int = {
.ops = &sdhci_acpi_ops_int,
};
+#ifdef CONFIG_X86
+
+static bool sdhci_acpi_byt(void)
+{
+ static const struct x86_cpu_id byt[] = {
+ { X86_VENDOR_INTEL, 6, 0x37 },
+ {}
+ };
+
+ return x86_match_cpu(byt);
+}
+
+#define BYT_IOSF_SCCEP 0x63
+#define BYT_IOSF_OCP_NETCTRL0 0x1078
+#define BYT_IOSF_OCP_TIMEOUT_BASE GENMASK(10, 8)
+
+static void sdhci_acpi_byt_setting(struct device *dev)
+{
+ u32 val = 0;
+
+ if (!sdhci_acpi_byt())
+ return;
+
+ if (iosf_mbi_read(BYT_IOSF_SCCEP, MBI_CR_READ, BYT_IOSF_OCP_NETCTRL0,
+ &val)) {
+ dev_err(dev, "%s read error\n", __func__);
+ return;
+ }
+
+ if (!(val & BYT_IOSF_OCP_TIMEOUT_BASE))
+ return;
+
+ val &= ~BYT_IOSF_OCP_TIMEOUT_BASE;
+
+ if (iosf_mbi_write(BYT_IOSF_SCCEP, MBI_CR_WRITE, BYT_IOSF_OCP_NETCTRL0,
+ val)) {
+ dev_err(dev, "%s write error\n", __func__);
+ return;
+ }
+
+ dev_dbg(dev, "%s completed\n", __func__);
+}
+
+static bool sdhci_acpi_byt_defer(struct device *dev)
+{
+ if (!sdhci_acpi_byt())
+ return false;
+
+ if (!iosf_mbi_available())
+ return true;
+
+ sdhci_acpi_byt_setting(dev);
+
+ return false;
+}
+
+#else
+
+static inline void sdhci_acpi_byt_setting(struct device *dev)
+{
+}
+
+static inline bool sdhci_acpi_byt_defer(struct device *dev)
+{
+ return false;
+}
+
+#endif
+
static int bxt_get_cd(struct mmc_host *mmc)
{
int gpio_cd = mmc_gpio_get_cd(mmc);
@@ -264,6 +308,17 @@ static const struct sdhci_acpi_slot sdhci_acpi_slot_int_sd = {
.probe_slot = sdhci_acpi_sd_probe_slot,
};
+static const struct sdhci_acpi_slot sdhci_acpi_slot_qcom_sd_3v = {
+ .quirks = SDHCI_QUIRK_BROKEN_CARD_DETECTION,
+ .quirks2 = SDHCI_QUIRK2_NO_1_8_V,
+ .caps = MMC_CAP_NONREMOVABLE,
+};
+
+static const struct sdhci_acpi_slot sdhci_acpi_slot_qcom_sd = {
+ .quirks = SDHCI_QUIRK_BROKEN_CARD_DETECTION,
+ .caps = MMC_CAP_NONREMOVABLE,
+};
+
struct sdhci_acpi_uid_slot {
const char *hid;
const char *uid;
@@ -284,6 +339,8 @@ static const struct sdhci_acpi_uid_slot sdhci_acpi_uids[] = {
{ "INT344D" , NULL, &sdhci_acpi_slot_int_sdio },
{ "PNP0FFF" , "3" , &sdhci_acpi_slot_int_sd },
{ "PNP0D40" },
+ { "QCOM8051", NULL, &sdhci_acpi_slot_qcom_sd_3v },
+ { "QCOM8052", NULL, &sdhci_acpi_slot_qcom_sd },
{ },
};
@@ -298,6 +355,8 @@ static const struct acpi_device_id sdhci_acpi_ids[] = {
{ "INT3436" },
{ "INT344D" },
{ "PNP0D40" },
+ { "QCOM8051" },
+ { "QCOM8052" },
{ },
};
MODULE_DEVICE_TABLE(acpi, sdhci_acpi_ids);
@@ -337,6 +396,9 @@ static int sdhci_acpi_probe(struct platform_device *pdev)
if (acpi_bus_get_status(device) || !device->status.present)
return -ENODEV;
+ if (sdhci_acpi_byt_defer(dev))
+ return -EPROBE_DEFER;
+
hid = acpi_device_hid(device);
uid = device->pnp.unique_id;
@@ -418,6 +480,8 @@ static int sdhci_acpi_probe(struct platform_device *pdev)
pm_runtime_enable(dev);
}
+ device_enable_async_suspend(dev);
+
return 0;
err_free:
@@ -460,6 +524,8 @@ static int sdhci_acpi_resume(struct device *dev)
{
struct sdhci_acpi_host *c = dev_get_drvdata(dev);
+ sdhci_acpi_byt_setting(&c->pdev->dev);
+
return sdhci_resume_host(c->host);
}
@@ -483,6 +549,8 @@ static int sdhci_acpi_runtime_resume(struct device *dev)
{
struct sdhci_acpi_host *c = dev_get_drvdata(dev);
+ sdhci_acpi_byt_setting(&c->pdev->dev);
+
return sdhci_runtime_resume_host(c->host);
}
diff --git a/drivers/mmc/host/sdhci-bcm2835.c b/drivers/mmc/host/sdhci-bcm2835.c
index 1c65d4690e70..4a6a1d1386cb 100644
--- a/drivers/mmc/host/sdhci-bcm2835.c
+++ b/drivers/mmc/host/sdhci-bcm2835.c
@@ -74,7 +74,7 @@ static inline u32 bcm2835_sdhci_readl(struct sdhci_host *host, int reg)
static void bcm2835_sdhci_writew(struct sdhci_host *host, u16 val, int reg)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
- struct bcm2835_sdhci *bcm2835_host = pltfm_host->priv;
+ struct bcm2835_sdhci *bcm2835_host = sdhci_pltfm_priv(pltfm_host);
u32 oldval = (reg == SDHCI_COMMAND) ? bcm2835_host->shadow :
bcm2835_sdhci_readl(host, reg & ~3);
u32 word_num = (reg >> 1) & 1;
@@ -152,20 +152,12 @@ static int bcm2835_sdhci_probe(struct platform_device *pdev)
struct sdhci_pltfm_host *pltfm_host;
int ret;
- host = sdhci_pltfm_init(pdev, &bcm2835_sdhci_pdata, 0);
+ host = sdhci_pltfm_init(pdev, &bcm2835_sdhci_pdata,
+ sizeof(*bcm2835_host));
if (IS_ERR(host))
return PTR_ERR(host);
- bcm2835_host = devm_kzalloc(&pdev->dev, sizeof(*bcm2835_host),
- GFP_KERNEL);
- if (!bcm2835_host) {
- dev_err(mmc_dev(host->mmc),
- "failed to allocate bcm2835_sdhci\n");
- return -ENOMEM;
- }
-
pltfm_host = sdhci_priv(host);
- pltfm_host->priv = bcm2835_host;
pltfm_host->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(pltfm_host->clk)) {
diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
index f25f29253595..2d300d87cda8 100644
--- a/drivers/mmc/host/sdhci-esdhc-imx.c
+++ b/drivers/mmc/host/sdhci-esdhc-imx.c
@@ -260,7 +260,7 @@ static inline void esdhc_clrset_le(struct sdhci_host *host, u32 mask, u32 val, i
static u32 esdhc_readl_le(struct sdhci_host *host, int reg)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
- struct pltfm_imx_data *imx_data = pltfm_host->priv;
+ struct pltfm_imx_data *imx_data = sdhci_pltfm_priv(pltfm_host);
u32 val = readl(host->ioaddr + reg);
if (unlikely(reg == SDHCI_PRESENT_STATE)) {
@@ -338,7 +338,7 @@ static u32 esdhc_readl_le(struct sdhci_host *host, int reg)
static void esdhc_writel_le(struct sdhci_host *host, u32 val, int reg)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
- struct pltfm_imx_data *imx_data = pltfm_host->priv;
+ struct pltfm_imx_data *imx_data = sdhci_pltfm_priv(pltfm_host);
u32 data;
if (unlikely(reg == SDHCI_INT_ENABLE || reg == SDHCI_SIGNAL_ENABLE)) {
@@ -388,7 +388,7 @@ static void esdhc_writel_le(struct sdhci_host *host, u32 val, int reg)
static u16 esdhc_readw_le(struct sdhci_host *host, int reg)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
- struct pltfm_imx_data *imx_data = pltfm_host->priv;
+ struct pltfm_imx_data *imx_data = sdhci_pltfm_priv(pltfm_host);
u16 ret = 0;
u32 val;
@@ -448,7 +448,7 @@ static u16 esdhc_readw_le(struct sdhci_host *host, int reg)
static void esdhc_writew_le(struct sdhci_host *host, u16 val, int reg)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
- struct pltfm_imx_data *imx_data = pltfm_host->priv;
+ struct pltfm_imx_data *imx_data = sdhci_pltfm_priv(pltfm_host);
u32 new_val = 0;
switch (reg) {
@@ -556,7 +556,7 @@ static void esdhc_writew_le(struct sdhci_host *host, u16 val, int reg)
static void esdhc_writeb_le(struct sdhci_host *host, u8 val, int reg)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
- struct pltfm_imx_data *imx_data = pltfm_host->priv;
+ struct pltfm_imx_data *imx_data = sdhci_pltfm_priv(pltfm_host);
u32 new_val;
u32 mask;
@@ -633,7 +633,7 @@ static inline void esdhc_pltfm_set_clock(struct sdhci_host *host,
unsigned int clock)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
- struct pltfm_imx_data *imx_data = pltfm_host->priv;
+ struct pltfm_imx_data *imx_data = sdhci_pltfm_priv(pltfm_host);
unsigned int host_clock = pltfm_host->clock;
int pre_div = 2;
int div = 1;
@@ -692,7 +692,7 @@ static inline void esdhc_pltfm_set_clock(struct sdhci_host *host,
static unsigned int esdhc_pltfm_get_ro(struct sdhci_host *host)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
- struct pltfm_imx_data *imx_data = pltfm_host->priv;
+ struct pltfm_imx_data *imx_data = sdhci_pltfm_priv(pltfm_host);
struct esdhc_platform_data *boarddata = &imx_data->boarddata;
switch (boarddata->wp_type) {
@@ -794,7 +794,7 @@ static int esdhc_change_pinstate(struct sdhci_host *host,
unsigned int uhs)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
- struct pltfm_imx_data *imx_data = pltfm_host->priv;
+ struct pltfm_imx_data *imx_data = sdhci_pltfm_priv(pltfm_host);
struct pinctrl_state *pinctrl;
dev_dbg(mmc_dev(host->mmc), "change pinctrl state for uhs %d\n", uhs);
@@ -864,7 +864,7 @@ static void esdhc_set_uhs_signaling(struct sdhci_host *host, unsigned timing)
{
u32 m;
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
- struct pltfm_imx_data *imx_data = pltfm_host->priv;
+ struct pltfm_imx_data *imx_data = sdhci_pltfm_priv(pltfm_host);
struct esdhc_platform_data *boarddata = &imx_data->boarddata;
/* disable ddr mode and disable HS400 mode */
@@ -917,7 +917,7 @@ static void esdhc_reset(struct sdhci_host *host, u8 mask)
static unsigned int esdhc_get_max_timeout_count(struct sdhci_host *host)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
- struct pltfm_imx_data *imx_data = pltfm_host->priv;
+ struct pltfm_imx_data *imx_data = sdhci_pltfm_priv(pltfm_host);
return esdhc_is_usdhc(imx_data) ? 1 << 28 : 1 << 27;
}
@@ -925,7 +925,7 @@ static unsigned int esdhc_get_max_timeout_count(struct sdhci_host *host)
static void esdhc_set_timeout(struct sdhci_host *host, struct mmc_command *cmd)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
- struct pltfm_imx_data *imx_data = pltfm_host->priv;
+ struct pltfm_imx_data *imx_data = sdhci_pltfm_priv(pltfm_host);
/* use maximum timeout counter */
sdhci_writeb(host, esdhc_is_usdhc(imx_data) ? 0xF : 0xE,
@@ -1100,21 +1100,17 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev)
int err;
struct pltfm_imx_data *imx_data;
- host = sdhci_pltfm_init(pdev, &sdhci_esdhc_imx_pdata, 0);
+ host = sdhci_pltfm_init(pdev, &sdhci_esdhc_imx_pdata,
+ sizeof(*imx_data));
if (IS_ERR(host))
return PTR_ERR(host);
pltfm_host = sdhci_priv(host);
- imx_data = devm_kzalloc(&pdev->dev, sizeof(*imx_data), GFP_KERNEL);
- if (!imx_data) {
- err = -ENOMEM;
- goto free_sdhci;
- }
+ imx_data = sdhci_pltfm_priv(pltfm_host);
imx_data->socdata = of_id ? of_id->data : (struct esdhc_soc_data *)
pdev->id_entry->driver_data;
- pltfm_host->priv = imx_data;
imx_data->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
if (IS_ERR(imx_data->clk_ipg)) {
@@ -1241,7 +1237,7 @@ static int sdhci_esdhc_imx_remove(struct platform_device *pdev)
{
struct sdhci_host *host = platform_get_drvdata(pdev);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
- struct pltfm_imx_data *imx_data = pltfm_host->priv;
+ struct pltfm_imx_data *imx_data = sdhci_pltfm_priv(pltfm_host);
int dead = (readl(host->ioaddr + SDHCI_INT_STATUS) == 0xffffffff);
pm_runtime_get_sync(&pdev->dev);
@@ -1264,7 +1260,7 @@ static int sdhci_esdhc_runtime_suspend(struct device *dev)
{
struct sdhci_host *host = dev_get_drvdata(dev);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
- struct pltfm_imx_data *imx_data = pltfm_host->priv;
+ struct pltfm_imx_data *imx_data = sdhci_pltfm_priv(pltfm_host);
int ret;
ret = sdhci_runtime_suspend_host(host);
@@ -1282,7 +1278,7 @@ static int sdhci_esdhc_runtime_resume(struct device *dev)
{
struct sdhci_host *host = dev_get_drvdata(dev);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
- struct pltfm_imx_data *imx_data = pltfm_host->priv;
+ struct pltfm_imx_data *imx_data = sdhci_pltfm_priv(pltfm_host);
if (!sdhci_sdio_irq_enabled(host)) {
clk_prepare_enable(imx_data->clk_per);
diff --git a/drivers/mmc/host/sdhci-iproc.c b/drivers/mmc/host/sdhci-iproc.c
index 3b423b0ad8e7..1110f73b08aa 100644
--- a/drivers/mmc/host/sdhci-iproc.c
+++ b/drivers/mmc/host/sdhci-iproc.c
@@ -26,6 +26,7 @@ struct sdhci_iproc_data {
const struct sdhci_pltfm_data *pdata;
u32 caps;
u32 caps1;
+ u32 mmc_caps;
};
struct sdhci_iproc_host {
@@ -165,9 +166,25 @@ static const struct sdhci_iproc_data iproc_data = {
.pdata = &sdhci_iproc_pltfm_data,
.caps = 0x05E90000,
.caps1 = 0x00000064,
+ .mmc_caps = MMC_CAP_1_8V_DDR,
+};
+
+static const struct sdhci_pltfm_data sdhci_bcm2835_pltfm_data = {
+ .quirks = SDHCI_QUIRK_BROKEN_CARD_DETECTION |
+ SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK |
+ SDHCI_QUIRK_MISSING_CAPS,
+ .ops = &sdhci_iproc_ops,
+};
+
+static const struct sdhci_iproc_data bcm2835_data = {
+ .pdata = &sdhci_bcm2835_pltfm_data,
+ .caps = SDHCI_CAN_VDD_330,
+ .caps1 = 0x00000000,
+ .mmc_caps = 0x00000000,
};
static const struct of_device_id sdhci_iproc_of_match[] = {
+ { .compatible = "brcm,bcm2835-sdhci", .data = &bcm2835_data },
{ .compatible = "brcm,sdhci-iproc-cygnus", .data = &iproc_data },
{ }
};
@@ -199,32 +216,37 @@ static int sdhci_iproc_probe(struct platform_device *pdev)
mmc_of_parse(host->mmc);
sdhci_get_of_property(pdev);
- /* Enable EMMC 1/8V DDR capable */
- host->mmc->caps |= MMC_CAP_1_8V_DDR;
+ host->mmc->caps |= iproc_host->data->mmc_caps;
pltfm_host->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(pltfm_host->clk)) {
ret = PTR_ERR(pltfm_host->clk);
goto err;
}
+ ret = clk_prepare_enable(pltfm_host->clk);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to enable host clk\n");
+ goto err;
+ }
if (iproc_host->data->pdata->quirks & SDHCI_QUIRK_MISSING_CAPS) {
host->caps = iproc_host->data->caps;
host->caps1 = iproc_host->data->caps1;
}
- return sdhci_add_host(host);
+ ret = sdhci_add_host(host);
+ if (ret)
+ goto err_clk;
+
+ return 0;
+err_clk:
+ clk_disable_unprepare(pltfm_host->clk);
err:
sdhci_pltfm_free(pdev);
return ret;
}
-static int sdhci_iproc_remove(struct platform_device *pdev)
-{
- return sdhci_pltfm_unregister(pdev);
-}
-
static struct platform_driver sdhci_iproc_driver = {
.driver = {
.name = "sdhci-iproc",
@@ -232,7 +254,7 @@ static struct platform_driver sdhci_iproc_driver = {
.pm = SDHCI_PLTFM_PMOPS,
},
.probe = sdhci_iproc_probe,
- .remove = sdhci_iproc_remove,
+ .remove = sdhci_pltfm_unregister,
};
module_platform_driver(sdhci_iproc_driver);
diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c
index 4695bee203ea..0653fe730150 100644
--- a/drivers/mmc/host/sdhci-msm.c
+++ b/drivers/mmc/host/sdhci-msm.c
@@ -60,7 +60,6 @@ struct sdhci_msm_host {
struct clk *pclk; /* SDHC peripheral bus clock */
struct clk *bus_clk; /* SDHC bus voter clock */
struct mmc_host *mmc;
- struct sdhci_pltfm_data sdhci_msm_pdata;
};
/* Platform specific tuning */
@@ -418,7 +417,7 @@ static const struct of_device_id sdhci_msm_dt_match[] = {
MODULE_DEVICE_TABLE(of, sdhci_msm_dt_match);
-static struct sdhci_ops sdhci_msm_ops = {
+static const struct sdhci_ops sdhci_msm_ops = {
.platform_execute_tuning = sdhci_msm_execute_tuning,
.reset = sdhci_reset,
.set_clock = sdhci_set_clock,
@@ -426,6 +425,12 @@ static struct sdhci_ops sdhci_msm_ops = {
.set_uhs_signaling = sdhci_set_uhs_signaling,
};
+static const struct sdhci_pltfm_data sdhci_msm_pdata = {
+ .quirks = SDHCI_QUIRK_BROKEN_CARD_DETECTION |
+ SDHCI_QUIRK_SINGLE_POWER_WRITE,
+ .ops = &sdhci_msm_ops,
+};
+
static int sdhci_msm_probe(struct platform_device *pdev)
{
struct sdhci_host *host;
@@ -437,17 +442,12 @@ static int sdhci_msm_probe(struct platform_device *pdev)
u32 core_version, caps;
u8 core_major;
- msm_host = devm_kzalloc(&pdev->dev, sizeof(*msm_host), GFP_KERNEL);
- if (!msm_host)
- return -ENOMEM;
-
- msm_host->sdhci_msm_pdata.ops = &sdhci_msm_ops;
- host = sdhci_pltfm_init(pdev, &msm_host->sdhci_msm_pdata, 0);
+ host = sdhci_pltfm_init(pdev, &sdhci_msm_pdata, sizeof(*msm_host));
if (IS_ERR(host))
return PTR_ERR(host);
pltfm_host = sdhci_priv(host);
- pltfm_host->priv = msm_host;
+ msm_host = sdhci_pltfm_priv(pltfm_host);
msm_host->mmc = host->mmc;
msm_host->pdev = pdev;
@@ -522,9 +522,6 @@ static int sdhci_msm_probe(struct platform_device *pdev)
/* Set HC_MODE_EN bit in HC_MODE register */
writel_relaxed(HC_MODE_EN, (msm_host->core_mem + CORE_HC_MODE));
- host->quirks |= SDHCI_QUIRK_BROKEN_CARD_DETECTION;
- host->quirks |= SDHCI_QUIRK_SINGLE_POWER_WRITE;
-
host_version = readw_relaxed((host->ioaddr + SDHCI_HOST_VERSION));
dev_dbg(&pdev->dev, "Host Version: 0x%x Vendor Version 0x%x\n",
host_version, ((host_version & SDHCI_VENDOR_VER_MASK) >>
@@ -570,16 +567,16 @@ static int sdhci_msm_remove(struct platform_device *pdev)
{
struct sdhci_host *host = platform_get_drvdata(pdev);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
- struct sdhci_msm_host *msm_host = pltfm_host->priv;
+ struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
int dead = (readl_relaxed(host->ioaddr + SDHCI_INT_STATUS) ==
0xffffffff);
sdhci_remove_host(host, dead);
- sdhci_pltfm_free(pdev);
clk_disable_unprepare(msm_host->clk);
clk_disable_unprepare(msm_host->pclk);
if (!IS_ERR(msm_host->bus_clk))
clk_disable_unprepare(msm_host->bus_clk);
+ sdhci_pltfm_free(pdev);
return 0;
}
diff --git a/drivers/mmc/host/sdhci-of-arasan.c b/drivers/mmc/host/sdhci-of-arasan.c
index 75379cb0fb35..2e482b13d25e 100644
--- a/drivers/mmc/host/sdhci-of-arasan.c
+++ b/drivers/mmc/host/sdhci-of-arasan.c
@@ -21,6 +21,7 @@
#include <linux/module.h>
#include <linux/of_device.h>
+#include <linux/phy/phy.h>
#include "sdhci-pltfm.h"
#define SDHCI_ARASAN_CLK_CTRL_OFFSET 0x2c
@@ -32,9 +33,11 @@
/**
* struct sdhci_arasan_data
* @clk_ahb: Pointer to the AHB clock
+ * @phy: Pointer to the generic phy
*/
struct sdhci_arasan_data {
struct clk *clk_ahb;
+ struct phy *phy;
};
static unsigned int sdhci_arasan_get_timeout_clock(struct sdhci_host *host)
@@ -81,13 +84,22 @@ static int sdhci_arasan_suspend(struct device *dev)
struct platform_device *pdev = to_platform_device(dev);
struct sdhci_host *host = platform_get_drvdata(pdev);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
- struct sdhci_arasan_data *sdhci_arasan = pltfm_host->priv;
+ struct sdhci_arasan_data *sdhci_arasan = sdhci_pltfm_priv(pltfm_host);
int ret;
ret = sdhci_suspend_host(host);
if (ret)
return ret;
+ if (!IS_ERR(sdhci_arasan->phy)) {
+ ret = phy_power_off(sdhci_arasan->phy);
+ if (ret) {
+ dev_err(dev, "Cannot power off phy.\n");
+ sdhci_resume_host(host);
+ return ret;
+ }
+ }
+
clk_disable(pltfm_host->clk);
clk_disable(sdhci_arasan->clk_ahb);
@@ -106,7 +118,7 @@ static int sdhci_arasan_resume(struct device *dev)
struct platform_device *pdev = to_platform_device(dev);
struct sdhci_host *host = platform_get_drvdata(pdev);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
- struct sdhci_arasan_data *sdhci_arasan = pltfm_host->priv;
+ struct sdhci_arasan_data *sdhci_arasan = sdhci_pltfm_priv(pltfm_host);
int ret;
ret = clk_enable(sdhci_arasan->clk_ahb);
@@ -118,10 +130,17 @@ static int sdhci_arasan_resume(struct device *dev)
ret = clk_enable(pltfm_host->clk);
if (ret) {
dev_err(dev, "Cannot enable SD clock.\n");
- clk_disable(sdhci_arasan->clk_ahb);
return ret;
}
+ if (!IS_ERR(sdhci_arasan->phy)) {
+ ret = phy_power_on(sdhci_arasan->phy);
+ if (ret) {
+ dev_err(dev, "Cannot power on phy.\n");
+ return ret;
+ }
+ }
+
return sdhci_resume_host(host);
}
#endif /* ! CONFIG_PM_SLEEP */
@@ -137,27 +156,32 @@ static int sdhci_arasan_probe(struct platform_device *pdev)
struct sdhci_pltfm_host *pltfm_host;
struct sdhci_arasan_data *sdhci_arasan;
- sdhci_arasan = devm_kzalloc(&pdev->dev, sizeof(*sdhci_arasan),
- GFP_KERNEL);
- if (!sdhci_arasan)
- return -ENOMEM;
+ host = sdhci_pltfm_init(pdev, &sdhci_arasan_pdata,
+ sizeof(*sdhci_arasan));
+ if (IS_ERR(host))
+ return PTR_ERR(host);
+
+ pltfm_host = sdhci_priv(host);
+ sdhci_arasan = sdhci_pltfm_priv(pltfm_host);
sdhci_arasan->clk_ahb = devm_clk_get(&pdev->dev, "clk_ahb");
if (IS_ERR(sdhci_arasan->clk_ahb)) {
dev_err(&pdev->dev, "clk_ahb clock not found.\n");
- return PTR_ERR(sdhci_arasan->clk_ahb);
+ ret = PTR_ERR(sdhci_arasan->clk_ahb);
+ goto err_pltfm_free;
}
clk_xin = devm_clk_get(&pdev->dev, "clk_xin");
if (IS_ERR(clk_xin)) {
dev_err(&pdev->dev, "clk_xin clock not found.\n");
- return PTR_ERR(clk_xin);
+ ret = PTR_ERR(clk_xin);
+ goto err_pltfm_free;
}
ret = clk_prepare_enable(sdhci_arasan->clk_ahb);
if (ret) {
dev_err(&pdev->dev, "Unable to enable AHB clock.\n");
- return ret;
+ goto err_pltfm_free;
}
ret = clk_prepare_enable(clk_xin);
@@ -166,20 +190,7 @@ static int sdhci_arasan_probe(struct platform_device *pdev)
goto clk_dis_ahb;
}
- host = sdhci_pltfm_init(pdev, &sdhci_arasan_pdata, 0);
- if (IS_ERR(host)) {
- ret = PTR_ERR(host);
- goto clk_disable_all;
- }
-
- if (of_device_is_compatible(pdev->dev.of_node, "arasan,sdhci-4.9a")) {
- host->quirks |= SDHCI_QUIRK_NO_HISPD_BIT;
- host->quirks2 |= SDHCI_QUIRK2_HOST_NO_CMD23;
- }
-
sdhci_get_of_property(pdev);
- pltfm_host = sdhci_priv(host);
- pltfm_host->priv = sdhci_arasan;
pltfm_host->clk = clk_xin;
ret = mmc_of_parse(host->mmc);
@@ -188,31 +199,69 @@ static int sdhci_arasan_probe(struct platform_device *pdev)
goto clk_disable_all;
}
+ sdhci_arasan->phy = ERR_PTR(-ENODEV);
+ if (of_device_is_compatible(pdev->dev.of_node,
+ "arasan,sdhci-5.1")) {
+ sdhci_arasan->phy = devm_phy_get(&pdev->dev,
+ "phy_arasan");
+ if (IS_ERR(sdhci_arasan->phy)) {
+ ret = PTR_ERR(sdhci_arasan->phy);
+ dev_err(&pdev->dev, "No phy for arasan,sdhci-5.1.\n");
+ goto clk_disable_all;
+ }
+
+ ret = phy_init(sdhci_arasan->phy);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "phy_init err.\n");
+ goto clk_disable_all;
+ }
+
+ ret = phy_power_on(sdhci_arasan->phy);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "phy_power_on err.\n");
+ goto err_phy_power;
+ }
+ }
+
ret = sdhci_add_host(host);
if (ret)
- goto err_pltfm_free;
+ goto err_add_host;
return 0;
-err_pltfm_free:
- sdhci_pltfm_free(pdev);
+err_add_host:
+ if (!IS_ERR(sdhci_arasan->phy))
+ phy_power_off(sdhci_arasan->phy);
+err_phy_power:
+ if (!IS_ERR(sdhci_arasan->phy))
+ phy_exit(sdhci_arasan->phy);
clk_disable_all:
clk_disable_unprepare(clk_xin);
clk_dis_ahb:
clk_disable_unprepare(sdhci_arasan->clk_ahb);
-
+err_pltfm_free:
+ sdhci_pltfm_free(pdev);
return ret;
}
static int sdhci_arasan_remove(struct platform_device *pdev)
{
+ int ret;
struct sdhci_host *host = platform_get_drvdata(pdev);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
- struct sdhci_arasan_data *sdhci_arasan = pltfm_host->priv;
+ struct sdhci_arasan_data *sdhci_arasan = sdhci_pltfm_priv(pltfm_host);
+ struct clk *clk_ahb = sdhci_arasan->clk_ahb;
- clk_disable_unprepare(sdhci_arasan->clk_ahb);
+ if (!IS_ERR(sdhci_arasan->phy)) {
+ phy_power_off(sdhci_arasan->phy);
+ phy_exit(sdhci_arasan->phy);
+ }
- return sdhci_pltfm_unregister(pdev);
+ ret = sdhci_pltfm_unregister(pdev);
+
+ clk_disable_unprepare(clk_ahb);
+
+ return ret;
}
static const struct of_device_id sdhci_arasan_of_match[] = {
diff --git a/drivers/mmc/host/sdhci-of-at91.c b/drivers/mmc/host/sdhci-of-at91.c
index 9cb86fb25976..2703aa90d018 100644
--- a/drivers/mmc/host/sdhci-of-at91.c
+++ b/drivers/mmc/host/sdhci-of-at91.c
@@ -18,6 +18,7 @@
#include <linux/err.h>
#include <linux/io.h>
#include <linux/mmc/host.h>
+#include <linux/mmc/slot-gpio.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
@@ -58,7 +59,7 @@ static int sdhci_at91_runtime_suspend(struct device *dev)
{
struct sdhci_host *host = dev_get_drvdata(dev);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
- struct sdhci_at91_priv *priv = pltfm_host->priv;
+ struct sdhci_at91_priv *priv = sdhci_pltfm_priv(pltfm_host);
int ret;
ret = sdhci_runtime_suspend_host(host);
@@ -74,7 +75,7 @@ static int sdhci_at91_runtime_resume(struct device *dev)
{
struct sdhci_host *host = dev_get_drvdata(dev);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
- struct sdhci_at91_priv *priv = pltfm_host->priv;
+ struct sdhci_at91_priv *priv = sdhci_pltfm_priv(pltfm_host);
int ret;
ret = clk_prepare_enable(priv->mainck);
@@ -124,11 +125,12 @@ static int sdhci_at91_probe(struct platform_device *pdev)
return -EINVAL;
soc_data = match->data;
- priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
- if (!priv) {
- dev_err(&pdev->dev, "unable to allocate private data\n");
- return -ENOMEM;
- }
+ host = sdhci_pltfm_init(pdev, soc_data, sizeof(*priv));
+ if (IS_ERR(host))
+ return PTR_ERR(host);
+
+ pltfm_host = sdhci_priv(host);
+ priv = sdhci_pltfm_priv(pltfm_host);
priv->mainck = devm_clk_get(&pdev->dev, "baseclk");
if (IS_ERR(priv->mainck)) {
@@ -148,10 +150,6 @@ static int sdhci_at91_probe(struct platform_device *pdev)
return PTR_ERR(priv->gck);
}
- host = sdhci_pltfm_init(pdev, soc_data, 0);
- if (IS_ERR(host))
- return PTR_ERR(host);
-
/*
* The mult clock is provided by as a generated clock by the PMC
* controller. In order to set the rate of gck, we have to get the
@@ -191,9 +189,6 @@ static int sdhci_at91_probe(struct platform_device *pdev)
clk_prepare_enable(priv->mainck);
clk_prepare_enable(priv->gck);
- pltfm_host = sdhci_priv(host);
- pltfm_host->priv = priv;
-
ret = mmc_of_parse(host->mmc);
if (ret)
goto clocks_disable_unprepare;
@@ -210,6 +205,25 @@ static int sdhci_at91_probe(struct platform_device *pdev)
if (ret)
goto pm_runtime_disable;
+ /*
+ * When calling sdhci_runtime_suspend_host(), the sdhci layer makes
+ * the assumption that all the clocks of the controller are disabled.
+ * It means we can't get irq from it when it is runtime suspended.
+ * For that reason, it is not planned to wake-up on a card detect irq
+ * from the controller.
+ * If we want to use runtime PM and to be able to wake-up on card
+ * insertion, we have to use a GPIO for the card detection or we can
+ * use polling. Be aware that using polling will resume/suspend the
+ * controller between each attempt.
+ * Disable SDHCI_QUIRK_BROKEN_CARD_DETECTION to be sure nobody tries
+ * to enable polling via device tree with broken-cd property.
+ */
+ if (!(host->mmc->caps & MMC_CAP_NONREMOVABLE) &&
+ IS_ERR_VALUE(mmc_gpio_get_cd(host->mmc))) {
+ host->mmc->caps |= MMC_CAP_NEEDS_POLL;
+ host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION;
+ }
+
pm_runtime_put_autosuspend(&pdev->dev);
return 0;
@@ -231,7 +245,10 @@ static int sdhci_at91_remove(struct platform_device *pdev)
{
struct sdhci_host *host = platform_get_drvdata(pdev);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
- struct sdhci_at91_priv *priv = pltfm_host->priv;
+ struct sdhci_at91_priv *priv = sdhci_pltfm_priv(pltfm_host);
+ struct clk *gck = priv->gck;
+ struct clk *hclock = priv->hclock;
+ struct clk *mainck = priv->mainck;
pm_runtime_get_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
@@ -239,9 +256,9 @@ static int sdhci_at91_remove(struct platform_device *pdev)
sdhci_pltfm_unregister(pdev);
- clk_disable_unprepare(priv->gck);
- clk_disable_unprepare(priv->hclock);
- clk_disable_unprepare(priv->mainck);
+ clk_disable_unprepare(gck);
+ clk_disable_unprepare(hclock);
+ clk_disable_unprepare(mainck);
return 0;
}
diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c
index 83b1226471c1..3f34d354f1fc 100644
--- a/drivers/mmc/host/sdhci-of-esdhc.c
+++ b/drivers/mmc/host/sdhci-of-esdhc.c
@@ -49,7 +49,7 @@ static u32 esdhc_readl_fixup(struct sdhci_host *host,
int spec_reg, u32 value)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
- struct sdhci_esdhc *esdhc = pltfm_host->priv;
+ struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
u32 ret;
/*
@@ -354,7 +354,7 @@ static void esdhc_le_writeb(struct sdhci_host *host, u8 val, int reg)
static void esdhc_of_adma_workaround(struct sdhci_host *host, u32 intmask)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
- struct sdhci_esdhc *esdhc = pltfm_host->priv;
+ struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
bool applicable;
dma_addr_t dmastart;
dma_addr_t dmanow;
@@ -404,7 +404,7 @@ static unsigned int esdhc_of_get_min_clock(struct sdhci_host *host)
static void esdhc_of_set_clock(struct sdhci_host *host, unsigned int clock)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
- struct sdhci_esdhc *esdhc = pltfm_host->priv;
+ struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
int pre_div = 1;
int div = 1;
u32 temp;
@@ -569,15 +569,12 @@ static void esdhc_init(struct platform_device *pdev, struct sdhci_host *host)
u16 host_ver;
pltfm_host = sdhci_priv(host);
- esdhc = devm_kzalloc(&pdev->dev, sizeof(struct sdhci_esdhc),
- GFP_KERNEL);
+ esdhc = sdhci_pltfm_priv(pltfm_host);
host_ver = sdhci_readw(host, SDHCI_HOST_VERSION);
esdhc->vendor_ver = (host_ver & SDHCI_VENDOR_VER_MASK) >>
SDHCI_VENDOR_VER_SHIFT;
esdhc->spec_ver = host_ver & SDHCI_SPEC_VER_MASK;
-
- pltfm_host->priv = esdhc;
}
static int sdhci_esdhc_probe(struct platform_device *pdev)
@@ -591,9 +588,11 @@ static int sdhci_esdhc_probe(struct platform_device *pdev)
np = pdev->dev.of_node;
if (of_get_property(np, "little-endian", NULL))
- host = sdhci_pltfm_init(pdev, &sdhci_esdhc_le_pdata, 0);
+ host = sdhci_pltfm_init(pdev, &sdhci_esdhc_le_pdata,
+ sizeof(struct sdhci_esdhc));
else
- host = sdhci_pltfm_init(pdev, &sdhci_esdhc_be_pdata, 0);
+ host = sdhci_pltfm_init(pdev, &sdhci_esdhc_be_pdata,
+ sizeof(struct sdhci_esdhc));
if (IS_ERR(host))
return PTR_ERR(host);
@@ -603,7 +602,7 @@ static int sdhci_esdhc_probe(struct platform_device *pdev)
sdhci_get_of_property(pdev);
pltfm_host = sdhci_priv(host);
- esdhc = pltfm_host->priv;
+ esdhc = sdhci_pltfm_priv(pltfm_host);
if (esdhc->vendor_ver == VENDOR_V_22)
host->quirks2 |= SDHCI_QUIRK2_HOST_NO_CMD23;
diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c
index df3b8eced8c4..79e19017343e 100644
--- a/drivers/mmc/host/sdhci-pci-core.c
+++ b/drivers/mmc/host/sdhci-pci-core.c
@@ -390,6 +390,7 @@ static int byt_sd_probe_slot(struct sdhci_pci_slot *slot)
slot->cd_idx = 0;
slot->cd_override_level = true;
if (slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_BXT_SD ||
+ slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_BXTM_SD ||
slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_APL_SD)
slot->host->mmc_host_ops.get_cd = bxt_get_cd;
@@ -1173,6 +1174,30 @@ static const struct pci_device_id pci_ids[] = {
{
.vendor = PCI_VENDOR_ID_INTEL,
+ .device = PCI_DEVICE_ID_INTEL_BXTM_EMMC,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = (kernel_ulong_t)&sdhci_intel_byt_emmc,
+ },
+
+ {
+ .vendor = PCI_VENDOR_ID_INTEL,
+ .device = PCI_DEVICE_ID_INTEL_BXTM_SDIO,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = (kernel_ulong_t)&sdhci_intel_byt_sdio,
+ },
+
+ {
+ .vendor = PCI_VENDOR_ID_INTEL,
+ .device = PCI_DEVICE_ID_INTEL_BXTM_SD,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = (kernel_ulong_t)&sdhci_intel_byt_sd,
+ },
+
+ {
+ .vendor = PCI_VENDOR_ID_INTEL,
.device = PCI_DEVICE_ID_INTEL_APL_EMMC,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
@@ -1302,7 +1327,6 @@ static int sdhci_pci_enable_dma(struct sdhci_host *host)
{
struct sdhci_pci_slot *slot;
struct pci_dev *pdev;
- int ret = -1;
slot = sdhci_priv(host);
pdev = slot->chip->pdev;
@@ -1314,20 +1338,6 @@ static int sdhci_pci_enable_dma(struct sdhci_host *host)
"doesn't fully claim to support it.\n");
}
- if (host->flags & SDHCI_USE_64_BIT_DMA) {
- if (host->quirks2 & SDHCI_QUIRK2_BROKEN_64_BIT_DMA) {
- host->flags &= ~SDHCI_USE_64_BIT_DMA;
- } else {
- ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
- if (ret)
- dev_warn(&pdev->dev, "Failed to set 64-bit DMA mask\n");
- }
- }
- if (ret)
- ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
- if (ret)
- return ret;
-
pci_set_master(pdev);
return 0;
diff --git a/drivers/mmc/host/sdhci-pci.h b/drivers/mmc/host/sdhci-pci.h
index d1a0b4db60db..89e7151684a1 100644
--- a/drivers/mmc/host/sdhci-pci.h
+++ b/drivers/mmc/host/sdhci-pci.h
@@ -28,6 +28,9 @@
#define PCI_DEVICE_ID_INTEL_BXT_SD 0x0aca
#define PCI_DEVICE_ID_INTEL_BXT_EMMC 0x0acc
#define PCI_DEVICE_ID_INTEL_BXT_SDIO 0x0ad0
+#define PCI_DEVICE_ID_INTEL_BXTM_SD 0x1aca
+#define PCI_DEVICE_ID_INTEL_BXTM_EMMC 0x1acc
+#define PCI_DEVICE_ID_INTEL_BXTM_SDIO 0x1ad0
#define PCI_DEVICE_ID_INTEL_APL_SD 0x5aca
#define PCI_DEVICE_ID_INTEL_APL_EMMC 0x5acc
#define PCI_DEVICE_ID_INTEL_APL_SDIO 0x5ad0
diff --git a/drivers/mmc/host/sdhci-pic32.c b/drivers/mmc/host/sdhci-pic32.c
new file mode 100644
index 000000000000..059df707a2fe
--- /dev/null
+++ b/drivers/mmc/host/sdhci-pic32.c
@@ -0,0 +1,257 @@
+/*
+ * Support of SDHCI platform devices for Microchip PIC32.
+ *
+ * Copyright (C) 2015 Microchip
+ * Andrei Pistirica, Paul Thacker
+ *
+ * Inspired by sdhci-pltfm.c
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/highmem.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/slab.h>
+#include <linux/mmc/host.h>
+#include <linux/io.h>
+#include "sdhci.h"
+#include "sdhci-pltfm.h"
+#include <linux/platform_data/sdhci-pic32.h>
+
+#define SDH_SHARED_BUS_CTRL 0x000000E0
+#define SDH_SHARED_BUS_NR_CLK_PINS_MASK 0x7
+#define SDH_SHARED_BUS_NR_IRQ_PINS_MASK 0x30
+#define SDH_SHARED_BUS_CLK_PINS 0x10
+#define SDH_SHARED_BUS_IRQ_PINS 0x14
+#define SDH_CAPS_SDH_SLOT_TYPE_MASK 0xC0000000
+#define SDH_SLOT_TYPE_REMOVABLE 0x0
+#define SDH_SLOT_TYPE_EMBEDDED 0x1
+#define SDH_SLOT_TYPE_SHARED_BUS 0x2
+#define SDHCI_CTRL_CDSSEL 0x80
+#define SDHCI_CTRL_CDTLVL 0x40
+
+#define ADMA_FIFO_RD_THSHLD 512
+#define ADMA_FIFO_WR_THSHLD 512
+
+struct pic32_sdhci_priv {
+ struct platform_device *pdev;
+ struct clk *sys_clk;
+ struct clk *base_clk;
+};
+
+static unsigned int pic32_sdhci_get_max_clock(struct sdhci_host *host)
+{
+ struct pic32_sdhci_priv *sdhci_pdata = sdhci_priv(host);
+
+ return clk_get_rate(sdhci_pdata->base_clk);
+}
+
+static void pic32_sdhci_set_bus_width(struct sdhci_host *host, int width)
+{
+ u8 ctrl;
+
+ ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
+ if (width == MMC_BUS_WIDTH_8) {
+ ctrl &= ~SDHCI_CTRL_4BITBUS;
+ if (host->version >= SDHCI_SPEC_300)
+ ctrl |= SDHCI_CTRL_8BITBUS;
+ } else {
+ if (host->version >= SDHCI_SPEC_300)
+ ctrl &= ~SDHCI_CTRL_8BITBUS;
+ if (width == MMC_BUS_WIDTH_4)
+ ctrl |= SDHCI_CTRL_4BITBUS;
+ else
+ ctrl &= ~SDHCI_CTRL_4BITBUS;
+ }
+
+ /* CD select and test bits must be set for errata workaround. */
+ ctrl &= ~SDHCI_CTRL_CDTLVL;
+ ctrl |= SDHCI_CTRL_CDSSEL;
+ sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
+}
+
+static unsigned int pic32_sdhci_get_ro(struct sdhci_host *host)
+{
+ /*
+ * The SDHCI_WRITE_PROTECT bit is unstable on current hardware so we
+ * can't depend on its value in any way.
+ */
+ return 0;
+}
+
+static const struct sdhci_ops pic32_sdhci_ops = {
+ .get_max_clock = pic32_sdhci_get_max_clock,
+ .set_clock = sdhci_set_clock,
+ .set_bus_width = pic32_sdhci_set_bus_width,
+ .reset = sdhci_reset,
+ .set_uhs_signaling = sdhci_set_uhs_signaling,
+ .get_ro = pic32_sdhci_get_ro,
+};
+
+static struct sdhci_pltfm_data sdhci_pic32_pdata = {
+ .ops = &pic32_sdhci_ops,
+ .quirks = SDHCI_QUIRK_NO_HISPD_BIT,
+ .quirks2 = SDHCI_QUIRK2_NO_1_8_V,
+};
+
+static void pic32_sdhci_shared_bus(struct platform_device *pdev)
+{
+ struct sdhci_host *host = platform_get_drvdata(pdev);
+ u32 bus = readl(host->ioaddr + SDH_SHARED_BUS_CTRL);
+ u32 clk_pins = (bus & SDH_SHARED_BUS_NR_CLK_PINS_MASK) >> 0;
+ u32 irq_pins = (bus & SDH_SHARED_BUS_NR_IRQ_PINS_MASK) >> 4;
+
+ /* select first clock */
+ if (clk_pins & 1)
+ bus |= (1 << SDH_SHARED_BUS_CLK_PINS);
+
+ /* select first interrupt */
+ if (irq_pins & 1)
+ bus |= (1 << SDH_SHARED_BUS_IRQ_PINS);
+
+ writel(bus, host->ioaddr + SDH_SHARED_BUS_CTRL);
+}
+
+static int pic32_sdhci_probe_platform(struct platform_device *pdev,
+ struct pic32_sdhci_priv *pdata)
+{
+ int ret = 0;
+ u32 caps_slot_type;
+ struct sdhci_host *host = platform_get_drvdata(pdev);
+
+ /* Check card slot connected on shared bus. */
+ host->caps = readl(host->ioaddr + SDHCI_CAPABILITIES);
+ caps_slot_type = (host->caps & SDH_CAPS_SDH_SLOT_TYPE_MASK) >> 30;
+ if (caps_slot_type == SDH_SLOT_TYPE_SHARED_BUS)
+ pic32_sdhci_shared_bus(pdev);
+
+ return ret;
+}
+
+static int pic32_sdhci_probe(struct platform_device *pdev)
+{
+ struct sdhci_host *host;
+ struct sdhci_pltfm_host *pltfm_host;
+ struct pic32_sdhci_priv *sdhci_pdata;
+ struct pic32_sdhci_platform_data *plat_data;
+ int ret;
+
+ host = sdhci_pltfm_init(pdev, &sdhci_pic32_pdata,
+ sizeof(struct pic32_sdhci_priv));
+ if (IS_ERR(host)) {
+ ret = PTR_ERR(host);
+ goto err;
+ }
+
+ pltfm_host = sdhci_priv(host);
+ sdhci_pdata = sdhci_pltfm_priv(pltfm_host);
+
+ plat_data = pdev->dev.platform_data;
+ if (plat_data && plat_data->setup_dma) {
+ ret = plat_data->setup_dma(ADMA_FIFO_RD_THSHLD,
+ ADMA_FIFO_WR_THSHLD);
+ if (ret)
+ goto err_host;
+ }
+
+ sdhci_pdata->sys_clk = devm_clk_get(&pdev->dev, "sys_clk");
+ if (IS_ERR(sdhci_pdata->sys_clk)) {
+ ret = PTR_ERR(sdhci_pdata->sys_clk);
+ dev_err(&pdev->dev, "Error getting clock\n");
+ goto err_host;
+ }
+
+ ret = clk_prepare_enable(sdhci_pdata->sys_clk);
+ if (ret) {
+ dev_err(&pdev->dev, "Error enabling clock\n");
+ goto err_host;
+ }
+
+ sdhci_pdata->base_clk = devm_clk_get(&pdev->dev, "base_clk");
+ if (IS_ERR(sdhci_pdata->base_clk)) {
+ ret = PTR_ERR(sdhci_pdata->base_clk);
+ dev_err(&pdev->dev, "Error getting clock\n");
+ goto err_sys_clk;
+ }
+
+ ret = clk_prepare_enable(sdhci_pdata->base_clk);
+ if (ret) {
+ dev_err(&pdev->dev, "Error enabling clock\n");
+ goto err_base_clk;
+ }
+
+ ret = mmc_of_parse(host->mmc);
+ if (ret)
+ goto err_base_clk;
+
+ ret = pic32_sdhci_probe_platform(pdev, sdhci_pdata);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to probe platform!\n");
+ goto err_base_clk;
+ }
+
+ ret = sdhci_add_host(host);
+ if (ret) {
+ dev_err(&pdev->dev, "error adding host\n");
+ goto err_base_clk;
+ }
+
+ dev_info(&pdev->dev, "Successfully added sdhci host\n");
+ return 0;
+
+err_base_clk:
+ clk_disable_unprepare(sdhci_pdata->base_clk);
+err_sys_clk:
+ clk_disable_unprepare(sdhci_pdata->sys_clk);
+err_host:
+ sdhci_pltfm_free(pdev);
+err:
+ dev_err(&pdev->dev, "pic32-sdhci probe failed: %d\n", ret);
+ return ret;
+}
+
+static int pic32_sdhci_remove(struct platform_device *pdev)
+{
+ struct sdhci_host *host = platform_get_drvdata(pdev);
+ struct pic32_sdhci_priv *sdhci_pdata = sdhci_priv(host);
+ u32 scratch;
+
+ scratch = readl(host->ioaddr + SDHCI_INT_STATUS);
+ sdhci_remove_host(host, scratch == (u32)~0);
+ clk_disable_unprepare(sdhci_pdata->base_clk);
+ clk_disable_unprepare(sdhci_pdata->sys_clk);
+ sdhci_pltfm_free(pdev);
+
+ return 0;
+}
+
+static const struct of_device_id pic32_sdhci_id_table[] = {
+ { .compatible = "microchip,pic32mzda-sdhci" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, pic32_sdhci_id_table);
+
+static struct platform_driver pic32_sdhci_driver = {
+ .driver = {
+ .name = "pic32-sdhci",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(pic32_sdhci_id_table),
+ },
+ .probe = pic32_sdhci_probe,
+ .remove = pic32_sdhci_remove,
+};
+
+module_platform_driver(pic32_sdhci_driver);
+
+MODULE_DESCRIPTION("Microchip PIC32 SDHCI driver");
+MODULE_AUTHOR("Pistirica Sorin Andrei & Sandeep Sheriker");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mmc/host/sdhci-pltfm.h b/drivers/mmc/host/sdhci-pltfm.h
index 04bc2481e5c3..d38053bf9e4d 100644
--- a/drivers/mmc/host/sdhci-pltfm.h
+++ b/drivers/mmc/host/sdhci-pltfm.h
@@ -23,7 +23,6 @@ struct sdhci_pltfm_data {
struct sdhci_pltfm_host {
struct clk *clk;
- void *priv; /* to handle quirks across io-accessor calls */
/* migrate from sdhci_of_host */
unsigned int clock;
diff --git a/drivers/mmc/host/sdhci-pxav2.c b/drivers/mmc/host/sdhci-pxav2.c
index beffd8615489..1d8dd3540636 100644
--- a/drivers/mmc/host/sdhci-pxav2.c
+++ b/drivers/mmc/host/sdhci-pxav2.c
@@ -177,7 +177,6 @@ static int sdhci_pxav2_probe(struct platform_device *pdev)
return PTR_ERR(host);
pltfm_host = sdhci_priv(host);
- pltfm_host->priv = NULL;
clk = clk_get(dev, "PXA-SDHCLK");
if (IS_ERR(clk)) {
diff --git a/drivers/mmc/host/sdhci-pxav3.c b/drivers/mmc/host/sdhci-pxav3.c
index f5edf9d3a18a..30132500aa1c 100644
--- a/drivers/mmc/host/sdhci-pxav3.c
+++ b/drivers/mmc/host/sdhci-pxav3.c
@@ -132,11 +132,15 @@ static int armada_38x_quirks(struct platform_device *pdev,
{
struct device_node *np = pdev->dev.of_node;
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
- struct sdhci_pxa *pxa = pltfm_host->priv;
+ struct sdhci_pxa *pxa = sdhci_pltfm_priv(pltfm_host);
struct resource *res;
host->quirks &= ~SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN;
host->quirks |= SDHCI_QUIRK_MISSING_CAPS;
+
+ host->caps = sdhci_readl(host, SDHCI_CAPABILITIES);
+ host->caps1 = sdhci_readl(host, SDHCI_CAPABILITIES_1);
+
res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
"conf-sdio3");
if (res) {
@@ -150,7 +154,6 @@ static int armada_38x_quirks(struct platform_device *pdev,
* Configuration register, if the adjustment is not done,
* remove them from the capabilities.
*/
- host->caps1 = sdhci_readl(host, SDHCI_CAPABILITIES_1);
host->caps1 &= ~(SDHCI_SUPPORT_SDR50 | SDHCI_SUPPORT_DDR50);
dev_warn(&pdev->dev, "conf-sdio3 register not found: disabling SDR50 and DDR50 modes.\nConsider updating your dtb\n");
@@ -161,7 +164,6 @@ static int armada_38x_quirks(struct platform_device *pdev,
* controller has different capabilities than the ones shown
* in its registers
*/
- host->caps = sdhci_readl(host, SDHCI_CAPABILITIES);
if (of_property_read_bool(np, "no-1-8-v")) {
host->caps &= ~SDHCI_CAN_VDD_180;
host->mmc->caps &= ~MMC_CAP_1_8V_DDR;
@@ -201,7 +203,7 @@ static void pxav3_reset(struct sdhci_host *host, u8 mask)
static void pxav3_gen_init_74_clocks(struct sdhci_host *host, u8 power_mode)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
- struct sdhci_pxa *pxa = pltfm_host->priv;
+ struct sdhci_pxa *pxa = sdhci_pltfm_priv(pltfm_host);
u16 tmp;
int count;
@@ -250,7 +252,7 @@ static void pxav3_gen_init_74_clocks(struct sdhci_host *host, u8 power_mode)
static void pxav3_set_uhs_signaling(struct sdhci_host *host, unsigned int uhs)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
- struct sdhci_pxa *pxa = pltfm_host->priv;
+ struct sdhci_pxa *pxa = sdhci_pltfm_priv(pltfm_host);
u16 ctrl_2;
/*
@@ -307,8 +309,30 @@ static void pxav3_set_uhs_signaling(struct sdhci_host *host, unsigned int uhs)
__func__, uhs, ctrl_2);
}
+static void pxav3_set_power(struct sdhci_host *host, unsigned char mode,
+ unsigned short vdd)
+{
+ struct mmc_host *mmc = host->mmc;
+ u8 pwr = host->pwr;
+
+ sdhci_set_power(host, mode, vdd);
+
+ if (host->pwr == pwr)
+ return;
+
+ if (host->pwr == 0)
+ vdd = 0;
+
+ if (!IS_ERR(mmc->supply.vmmc)) {
+ spin_unlock_irq(&host->lock);
+ mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);
+ spin_lock_irq(&host->lock);
+ }
+}
+
static const struct sdhci_ops pxav3_sdhci_ops = {
.set_clock = sdhci_set_clock,
+ .set_power = pxav3_set_power,
.platform_send_init_74_clocks = pxav3_gen_init_74_clocks,
.get_max_clock = sdhci_pltfm_clk_get_max_clock,
.set_bus_width = sdhci_set_bus_width,
@@ -370,16 +394,12 @@ static int sdhci_pxav3_probe(struct platform_device *pdev)
const struct of_device_id *match;
int ret;
- pxa = devm_kzalloc(&pdev->dev, sizeof(struct sdhci_pxa), GFP_KERNEL);
- if (!pxa)
- return -ENOMEM;
-
- host = sdhci_pltfm_init(pdev, &sdhci_pxav3_pdata, 0);
+ host = sdhci_pltfm_init(pdev, &sdhci_pxav3_pdata, sizeof(*pxa));
if (IS_ERR(host))
return PTR_ERR(host);
pltfm_host = sdhci_priv(host);
- pltfm_host->priv = pxa;
+ pxa = sdhci_pltfm_priv(pltfm_host);
pxa->clk_io = devm_clk_get(dev, "io");
if (IS_ERR(pxa->clk_io))
@@ -486,7 +506,7 @@ static int sdhci_pxav3_remove(struct platform_device *pdev)
{
struct sdhci_host *host = platform_get_drvdata(pdev);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
- struct sdhci_pxa *pxa = pltfm_host->priv;
+ struct sdhci_pxa *pxa = sdhci_pltfm_priv(pltfm_host);
pm_runtime_get_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
@@ -535,7 +555,7 @@ static int sdhci_pxav3_runtime_suspend(struct device *dev)
{
struct sdhci_host *host = dev_get_drvdata(dev);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
- struct sdhci_pxa *pxa = pltfm_host->priv;
+ struct sdhci_pxa *pxa = sdhci_pltfm_priv(pltfm_host);
int ret;
ret = sdhci_runtime_suspend_host(host);
@@ -553,7 +573,7 @@ static int sdhci_pxav3_runtime_resume(struct device *dev)
{
struct sdhci_host *host = dev_get_drvdata(dev);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
- struct sdhci_pxa *pxa = pltfm_host->priv;
+ struct sdhci_pxa *pxa = sdhci_pltfm_priv(pltfm_host);
clk_prepare_enable(pxa->clk_io);
if (!IS_ERR(pxa->clk_core))
diff --git a/drivers/mmc/host/sdhci-st.c b/drivers/mmc/host/sdhci-st.c
index 969c2b0d57fd..320e1c2f8853 100644
--- a/drivers/mmc/host/sdhci-st.c
+++ b/drivers/mmc/host/sdhci-st.c
@@ -251,7 +251,7 @@ static int sdhci_st_set_dll_for_clock(struct sdhci_host *host)
{
int ret = 0;
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
- struct st_mmc_platform_data *pdata = pltfm_host->priv;
+ struct st_mmc_platform_data *pdata = sdhci_pltfm_priv(pltfm_host);
if (host->clock > CLK_TO_CHECK_DLL_LOCK) {
st_mmcss_set_dll(pdata->top_ioaddr);
@@ -265,7 +265,7 @@ static void sdhci_st_set_uhs_signaling(struct sdhci_host *host,
unsigned int uhs)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
- struct st_mmc_platform_data *pdata = pltfm_host->priv;
+ struct st_mmc_platform_data *pdata = sdhci_pltfm_priv(pltfm_host);
u16 ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
int ret = 0;
@@ -357,10 +357,7 @@ static int sdhci_st_probe(struct platform_device *pdev)
int ret = 0;
u16 host_version;
struct resource *res;
-
- pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
- if (!pdata)
- return -ENOMEM;
+ struct reset_control *rstc;
clk = devm_clk_get(&pdev->dev, "mmc");
if (IS_ERR(clk)) {
@@ -368,19 +365,23 @@ static int sdhci_st_probe(struct platform_device *pdev)
return PTR_ERR(clk);
}
- pdata->rstc = devm_reset_control_get(&pdev->dev, NULL);
- if (IS_ERR(pdata->rstc))
- pdata->rstc = NULL;
+ rstc = devm_reset_control_get(&pdev->dev, NULL);
+ if (IS_ERR(rstc))
+ rstc = NULL;
else
- reset_control_deassert(pdata->rstc);
+ reset_control_deassert(rstc);
- host = sdhci_pltfm_init(pdev, &sdhci_st_pdata, 0);
+ host = sdhci_pltfm_init(pdev, &sdhci_st_pdata, sizeof(*pdata));
if (IS_ERR(host)) {
dev_err(&pdev->dev, "Failed sdhci_pltfm_init\n");
ret = PTR_ERR(host);
goto err_pltfm_init;
}
+ pltfm_host = sdhci_priv(host);
+ pdata = sdhci_pltfm_priv(pltfm_host);
+ pdata->rstc = rstc;
+
ret = mmc_of_parse(host->mmc);
if (ret) {
dev_err(&pdev->dev, "Failed mmc_of_parse\n");
@@ -398,8 +399,6 @@ static int sdhci_st_probe(struct platform_device *pdev)
pdata->top_ioaddr = NULL;
}
- pltfm_host = sdhci_priv(host);
- pltfm_host->priv = pdata;
pltfm_host->clk = clk;
/* Configure the Arasan HC inside the flashSS */
@@ -427,8 +426,8 @@ err_out:
err_of:
sdhci_pltfm_free(pdev);
err_pltfm_init:
- if (pdata->rstc)
- reset_control_assert(pdata->rstc);
+ if (rstc)
+ reset_control_assert(rstc);
return ret;
}
@@ -437,13 +436,14 @@ static int sdhci_st_remove(struct platform_device *pdev)
{
struct sdhci_host *host = platform_get_drvdata(pdev);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
- struct st_mmc_platform_data *pdata = pltfm_host->priv;
+ struct st_mmc_platform_data *pdata = sdhci_pltfm_priv(pltfm_host);
+ struct reset_control *rstc = pdata->rstc;
int ret;
ret = sdhci_pltfm_unregister(pdev);
- if (pdata->rstc)
- reset_control_assert(pdata->rstc);
+ if (rstc)
+ reset_control_assert(rstc);
return ret;
}
@@ -453,7 +453,7 @@ static int sdhci_st_suspend(struct device *dev)
{
struct sdhci_host *host = dev_get_drvdata(dev);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
- struct st_mmc_platform_data *pdata = pltfm_host->priv;
+ struct st_mmc_platform_data *pdata = sdhci_pltfm_priv(pltfm_host);
int ret = sdhci_suspend_host(host);
if (ret)
@@ -471,7 +471,7 @@ static int sdhci_st_resume(struct device *dev)
{
struct sdhci_host *host = dev_get_drvdata(dev);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
- struct st_mmc_platform_data *pdata = pltfm_host->priv;
+ struct st_mmc_platform_data *pdata = sdhci_pltfm_priv(pltfm_host);
struct device_node *np = dev->of_node;
clk_prepare_enable(pltfm_host->clk);
diff --git a/drivers/mmc/host/sdhci-tegra.c b/drivers/mmc/host/sdhci-tegra.c
index 83c4bf7bc16c..bcc0de47fe7e 100644
--- a/drivers/mmc/host/sdhci-tegra.c
+++ b/drivers/mmc/host/sdhci-tegra.c
@@ -12,6 +12,7 @@
*
*/
+#include <linux/delay.h>
#include <linux/err.h>
#include <linux/module.h>
#include <linux/init.h>
@@ -42,12 +43,17 @@
#define SDHCI_MISC_CTRL_ENABLE_SDHCI_SPEC_300 0x20
#define SDHCI_MISC_CTRL_ENABLE_DDR50 0x200
+#define SDHCI_TEGRA_AUTO_CAL_CONFIG 0x1e4
+#define SDHCI_AUTO_CAL_START BIT(31)
+#define SDHCI_AUTO_CAL_ENABLE BIT(29)
+
#define NVQUIRK_FORCE_SDHCI_SPEC_200 BIT(0)
#define NVQUIRK_ENABLE_BLOCK_GAP_DET BIT(1)
#define NVQUIRK_ENABLE_SDHCI_SPEC_300 BIT(2)
#define NVQUIRK_ENABLE_SDR50 BIT(3)
#define NVQUIRK_ENABLE_SDR104 BIT(4)
#define NVQUIRK_ENABLE_DDR50 BIT(5)
+#define NVQUIRK_HAS_PADCALIB BIT(6)
struct sdhci_tegra_soc_data {
const struct sdhci_pltfm_data *pdata;
@@ -58,12 +64,13 @@ struct sdhci_tegra {
const struct sdhci_tegra_soc_data *soc_data;
struct gpio_desc *power_gpio;
bool ddr_signaling;
+ bool pad_calib_required;
};
static u16 tegra_sdhci_readw(struct sdhci_host *host, int reg)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
- struct sdhci_tegra *tegra_host = pltfm_host->priv;
+ struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
if (unlikely((soc_data->nvquirks & NVQUIRK_FORCE_SDHCI_SPEC_200) &&
@@ -99,7 +106,7 @@ static void tegra_sdhci_writew(struct sdhci_host *host, u16 val, int reg)
static void tegra_sdhci_writel(struct sdhci_host *host, u32 val, int reg)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
- struct sdhci_tegra *tegra_host = pltfm_host->priv;
+ struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
/* Seems like we're getting spurious timeout and crc errors, so
@@ -131,7 +138,7 @@ static unsigned int tegra_sdhci_get_ro(struct sdhci_host *host)
static void tegra_sdhci_reset(struct sdhci_host *host, u8 mask)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
- struct sdhci_tegra *tegra_host = pltfm_host->priv;
+ struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
u32 misc_ctrl, clk_ctrl;
@@ -147,10 +154,16 @@ static void tegra_sdhci_reset(struct sdhci_host *host, u8 mask)
/* Advertise UHS modes as supported by host */
if (soc_data->nvquirks & NVQUIRK_ENABLE_SDR50)
misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_SDR50;
+ else
+ misc_ctrl &= ~SDHCI_MISC_CTRL_ENABLE_SDR50;
if (soc_data->nvquirks & NVQUIRK_ENABLE_DDR50)
misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_DDR50;
+ else
+ misc_ctrl &= ~SDHCI_MISC_CTRL_ENABLE_DDR50;
if (soc_data->nvquirks & NVQUIRK_ENABLE_SDR104)
misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_SDR104;
+ else
+ misc_ctrl &= ~SDHCI_MISC_CTRL_ENABLE_SDR104;
sdhci_writel(host, misc_ctrl, SDHCI_TEGRA_VENDOR_MISC_CTRL);
clk_ctrl = sdhci_readl(host, SDHCI_TEGRA_VENDOR_CLOCK_CTRL);
@@ -159,6 +172,9 @@ static void tegra_sdhci_reset(struct sdhci_host *host, u8 mask)
clk_ctrl |= SDHCI_CLOCK_CTRL_SDR50_TUNING_OVERRIDE;
sdhci_writel(host, clk_ctrl, SDHCI_TEGRA_VENDOR_CLOCK_CTRL);
+ if (soc_data->nvquirks & NVQUIRK_HAS_PADCALIB)
+ tegra_host->pad_calib_required = true;
+
tegra_host->ddr_signaling = false;
}
@@ -181,27 +197,43 @@ static void tegra_sdhci_set_bus_width(struct sdhci_host *host, int bus_width)
sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
}
+static void tegra_sdhci_pad_autocalib(struct sdhci_host *host)
+{
+ u32 val;
+
+ mdelay(1);
+
+ val = sdhci_readl(host, SDHCI_TEGRA_AUTO_CAL_CONFIG);
+ val |= SDHCI_AUTO_CAL_ENABLE | SDHCI_AUTO_CAL_START;
+ sdhci_writel(host,val, SDHCI_TEGRA_AUTO_CAL_CONFIG);
+}
+
static void tegra_sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
- struct sdhci_tegra *tegra_host = pltfm_host->priv;
+ struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
unsigned long host_clk;
if (!clock)
- return;
+ return sdhci_set_clock(host, clock);
host_clk = tegra_host->ddr_signaling ? clock * 2 : clock;
clk_set_rate(pltfm_host->clk, host_clk);
host->max_clk = clk_get_rate(pltfm_host->clk);
- return sdhci_set_clock(host, clock);
+ sdhci_set_clock(host, clock);
+
+ if (tegra_host->pad_calib_required) {
+ tegra_sdhci_pad_autocalib(host);
+ tegra_host->pad_calib_required = false;
+ }
}
static void tegra_sdhci_set_uhs_signaling(struct sdhci_host *host,
unsigned timing)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
- struct sdhci_tegra *tegra_host = pltfm_host->priv;
+ struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
if (timing == MMC_TIMING_UHS_DDR50)
tegra_host->ddr_signaling = true;
@@ -264,6 +296,16 @@ static int tegra_sdhci_execute_tuning(struct sdhci_host *host, u32 opcode)
return mmc_send_tuning(host->mmc, opcode, NULL);
}
+static void tegra_sdhci_voltage_switch(struct sdhci_host *host)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
+ const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
+
+ if (soc_data->nvquirks & NVQUIRK_HAS_PADCALIB)
+ tegra_host->pad_calib_required = true;
+}
+
static const struct sdhci_ops tegra_sdhci_ops = {
.get_ro = tegra_sdhci_get_ro,
.read_w = tegra_sdhci_readw,
@@ -273,6 +315,7 @@ static const struct sdhci_ops tegra_sdhci_ops = {
.reset = tegra_sdhci_reset,
.platform_execute_tuning = tegra_sdhci_execute_tuning,
.set_uhs_signaling = tegra_sdhci_set_uhs_signaling,
+ .voltage_switch = tegra_sdhci_voltage_switch,
.get_max_clock = tegra_sdhci_get_max_clock,
};
@@ -306,7 +349,8 @@ static const struct sdhci_tegra_soc_data soc_data_tegra30 = {
.pdata = &sdhci_tegra30_pdata,
.nvquirks = NVQUIRK_ENABLE_SDHCI_SPEC_300 |
NVQUIRK_ENABLE_SDR50 |
- NVQUIRK_ENABLE_SDR104,
+ NVQUIRK_ENABLE_SDR104 |
+ NVQUIRK_HAS_PADCALIB,
};
static const struct sdhci_ops tegra114_sdhci_ops = {
@@ -319,6 +363,7 @@ static const struct sdhci_ops tegra114_sdhci_ops = {
.reset = tegra_sdhci_reset,
.platform_execute_tuning = tegra_sdhci_execute_tuning,
.set_uhs_signaling = tegra_sdhci_set_uhs_signaling,
+ .voltage_switch = tegra_sdhci_voltage_switch,
.get_max_clock = tegra_sdhci_get_max_clock,
};
@@ -335,9 +380,6 @@ static const struct sdhci_pltfm_data sdhci_tegra114_pdata = {
static const struct sdhci_tegra_soc_data soc_data_tegra114 = {
.pdata = &sdhci_tegra114_pdata,
- .nvquirks = NVQUIRK_ENABLE_SDR50 |
- NVQUIRK_ENABLE_DDR50 |
- NVQUIRK_ENABLE_SDR104,
};
static const struct sdhci_pltfm_data sdhci_tegra210_pdata = {
@@ -380,20 +422,15 @@ static int sdhci_tegra_probe(struct platform_device *pdev)
return -EINVAL;
soc_data = match->data;
- host = sdhci_pltfm_init(pdev, soc_data->pdata, 0);
+ host = sdhci_pltfm_init(pdev, soc_data->pdata, sizeof(*tegra_host));
if (IS_ERR(host))
return PTR_ERR(host);
pltfm_host = sdhci_priv(host);
- tegra_host = devm_kzalloc(&pdev->dev, sizeof(*tegra_host), GFP_KERNEL);
- if (!tegra_host) {
- dev_err(mmc_dev(host->mmc), "failed to allocate tegra_host\n");
- rc = -ENOMEM;
- goto err_alloc_tegra_host;
- }
+ tegra_host = sdhci_pltfm_priv(pltfm_host);
tegra_host->ddr_signaling = false;
+ tegra_host->pad_calib_required = false;
tegra_host->soc_data = soc_data;
- pltfm_host->priv = tegra_host;
rc = mmc_of_parse(host->mmc);
if (rc)
@@ -429,7 +466,6 @@ err_add_host:
err_clk_get:
err_power_req:
err_parse_dt:
-err_alloc_tegra_host:
sdhci_pltfm_free(pdev);
return rc;
}
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index add9fdfd1d8f..6bd3d1794966 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -53,8 +53,6 @@ static void sdhci_finish_data(struct sdhci_host *);
static void sdhci_finish_command(struct sdhci_host *);
static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode);
static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable);
-static int sdhci_pre_dma_transfer(struct sdhci_host *host,
- struct mmc_data *data);
static int sdhci_do_get_cd(struct sdhci_host *host);
#ifdef CONFIG_PM
@@ -428,6 +426,31 @@ static void sdhci_transfer_pio(struct sdhci_host *host)
DBG("PIO transfer complete.\n");
}
+static int sdhci_pre_dma_transfer(struct sdhci_host *host,
+ struct mmc_data *data, int cookie)
+{
+ int sg_count;
+
+ /*
+ * If the data buffers are already mapped, return the previous
+ * dma_map_sg() result.
+ */
+ if (data->host_cookie == COOKIE_PRE_MAPPED)
+ return data->sg_count;
+
+ sg_count = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
+ data->flags & MMC_DATA_WRITE ?
+ DMA_TO_DEVICE : DMA_FROM_DEVICE);
+
+ if (sg_count == 0)
+ return -ENOSPC;
+
+ data->sg_count = sg_count;
+ data->host_cookie = cookie;
+
+ return sg_count;
+}
+
static char *sdhci_kmap_atomic(struct scatterlist *sg, unsigned long *flags)
{
local_irq_save(*flags);
@@ -462,41 +485,22 @@ static void sdhci_adma_mark_end(void *desc)
dma_desc->cmd |= cpu_to_le16(ADMA2_END);
}
-static int sdhci_adma_table_pre(struct sdhci_host *host,
- struct mmc_data *data)
+static void sdhci_adma_table_pre(struct sdhci_host *host,
+ struct mmc_data *data, int sg_count)
{
- int direction;
-
- void *desc;
- void *align;
- dma_addr_t addr;
- dma_addr_t align_addr;
- int len, offset;
-
struct scatterlist *sg;
- int i;
- char *buffer;
unsigned long flags;
+ dma_addr_t addr, align_addr;
+ void *desc, *align;
+ char *buffer;
+ int len, offset, i;
/*
* The spec does not specify endianness of descriptor table.
* We currently guess that it is LE.
*/
- if (data->flags & MMC_DATA_READ)
- direction = DMA_FROM_DEVICE;
- else
- direction = DMA_TO_DEVICE;
-
- host->align_addr = dma_map_single(mmc_dev(host->mmc),
- host->align_buffer, host->align_buffer_sz, direction);
- if (dma_mapping_error(mmc_dev(host->mmc), host->align_addr))
- goto fail;
- BUG_ON(host->align_addr & SDHCI_ADMA2_MASK);
-
- host->sg_count = sdhci_pre_dma_transfer(host, data);
- if (host->sg_count < 0)
- goto unmap_align;
+ host->sg_count = sg_count;
desc = host->adma_table;
align = host->align_buffer;
@@ -508,10 +512,9 @@ static int sdhci_adma_table_pre(struct sdhci_host *host,
len = sg_dma_len(sg);
/*
- * The SDHCI specification states that ADMA
- * addresses must be 32-bit aligned. If they
- * aren't, then we use a bounce buffer for
- * the (up to three) bytes that screw up the
+ * The SDHCI specification states that ADMA addresses must
+ * be 32-bit aligned. If they aren't, then we use a bounce
+ * buffer for the (up to three) bytes that screw up the
* alignment.
*/
offset = (SDHCI_ADMA2_ALIGN - (addr & SDHCI_ADMA2_MASK)) &
@@ -555,92 +558,56 @@ static int sdhci_adma_table_pre(struct sdhci_host *host,
}
if (host->quirks & SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC) {
- /*
- * Mark the last descriptor as the terminating descriptor
- */
+ /* Mark the last descriptor as the terminating descriptor */
if (desc != host->adma_table) {
desc -= host->desc_sz;
sdhci_adma_mark_end(desc);
}
} else {
- /*
- * Add a terminating entry.
- */
-
- /* nop, end, valid */
+ /* Add a terminating entry - nop, end, valid */
sdhci_adma_write_desc(host, desc, 0, 0, ADMA2_NOP_END_VALID);
}
-
- /*
- * Resync align buffer as we might have changed it.
- */
- if (data->flags & MMC_DATA_WRITE) {
- dma_sync_single_for_device(mmc_dev(host->mmc),
- host->align_addr, host->align_buffer_sz, direction);
- }
-
- return 0;
-
-unmap_align:
- dma_unmap_single(mmc_dev(host->mmc), host->align_addr,
- host->align_buffer_sz, direction);
-fail:
- return -EINVAL;
}
static void sdhci_adma_table_post(struct sdhci_host *host,
struct mmc_data *data)
{
- int direction;
-
struct scatterlist *sg;
int i, size;
void *align;
char *buffer;
unsigned long flags;
- bool has_unaligned;
- if (data->flags & MMC_DATA_READ)
- direction = DMA_FROM_DEVICE;
- else
- direction = DMA_TO_DEVICE;
-
- dma_unmap_single(mmc_dev(host->mmc), host->align_addr,
- host->align_buffer_sz, direction);
+ if (data->flags & MMC_DATA_READ) {
+ bool has_unaligned = false;
- /* Do a quick scan of the SG list for any unaligned mappings */
- has_unaligned = false;
- for_each_sg(data->sg, sg, host->sg_count, i)
- if (sg_dma_address(sg) & SDHCI_ADMA2_MASK) {
- has_unaligned = true;
- break;
- }
+ /* Do a quick scan of the SG list for any unaligned mappings */
+ for_each_sg(data->sg, sg, host->sg_count, i)
+ if (sg_dma_address(sg) & SDHCI_ADMA2_MASK) {
+ has_unaligned = true;
+ break;
+ }
- if (has_unaligned && data->flags & MMC_DATA_READ) {
- dma_sync_sg_for_cpu(mmc_dev(host->mmc), data->sg,
- data->sg_len, direction);
+ if (has_unaligned) {
+ dma_sync_sg_for_cpu(mmc_dev(host->mmc), data->sg,
+ data->sg_len, DMA_FROM_DEVICE);
- align = host->align_buffer;
+ align = host->align_buffer;
- for_each_sg(data->sg, sg, host->sg_count, i) {
- if (sg_dma_address(sg) & SDHCI_ADMA2_MASK) {
- size = SDHCI_ADMA2_ALIGN -
- (sg_dma_address(sg) & SDHCI_ADMA2_MASK);
+ for_each_sg(data->sg, sg, host->sg_count, i) {
+ if (sg_dma_address(sg) & SDHCI_ADMA2_MASK) {
+ size = SDHCI_ADMA2_ALIGN -
+ (sg_dma_address(sg) & SDHCI_ADMA2_MASK);
- buffer = sdhci_kmap_atomic(sg, &flags);
- memcpy(buffer, align, size);
- sdhci_kunmap_atomic(buffer, &flags);
+ buffer = sdhci_kmap_atomic(sg, &flags);
+ memcpy(buffer, align, size);
+ sdhci_kunmap_atomic(buffer, &flags);
- align += SDHCI_ADMA2_ALIGN;
+ align += SDHCI_ADMA2_ALIGN;
+ }
}
}
}
-
- if (data->host_cookie == COOKIE_MAPPED) {
- dma_unmap_sg(mmc_dev(host->mmc), data->sg,
- data->sg_len, direction);
- data->host_cookie = COOKIE_UNMAPPED;
- }
}
static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd)
@@ -666,9 +633,20 @@ static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd)
if (!data)
target_timeout = cmd->busy_timeout * 1000;
else {
- target_timeout = data->timeout_ns / 1000;
- if (host->clock)
- target_timeout += data->timeout_clks / host->clock;
+ target_timeout = DIV_ROUND_UP(data->timeout_ns, 1000);
+ if (host->clock && data->timeout_clks) {
+ unsigned long long val;
+
+ /*
+ * data->timeout_clks is in units of clock cycles.
+ * host->clock is in Hz. target_timeout is in us.
+ * Hence, us = 1000000 * cycles / Hz. Round up.
+ */
+ val = 1000000 * data->timeout_clks;
+ if (do_div(val, host->clock))
+ target_timeout++;
+ target_timeout += val;
+ }
}
/*
@@ -729,7 +707,6 @@ static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd)
{
u8 ctrl;
struct mmc_data *data = cmd->data;
- int ret;
WARN_ON(host->data);
@@ -748,63 +725,48 @@ static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd)
host->data_early = 0;
host->data->bytes_xfered = 0;
- if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA))
- host->flags |= SDHCI_REQ_USE_DMA;
-
- /*
- * FIXME: This doesn't account for merging when mapping the
- * scatterlist.
- */
- if (host->flags & SDHCI_REQ_USE_DMA) {
- int broken, i;
+ if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
struct scatterlist *sg;
+ unsigned int length_mask, offset_mask;
+ int i;
- broken = 0;
+ host->flags |= SDHCI_REQ_USE_DMA;
+
+ /*
+ * FIXME: This doesn't account for merging when mapping the
+ * scatterlist.
+ *
+ * The assumption here being that alignment and lengths are
+ * the same after DMA mapping to device address space.
+ */
+ length_mask = 0;
+ offset_mask = 0;
if (host->flags & SDHCI_USE_ADMA) {
- if (host->quirks & SDHCI_QUIRK_32BIT_ADMA_SIZE)
- broken = 1;
+ if (host->quirks & SDHCI_QUIRK_32BIT_ADMA_SIZE) {
+ length_mask = 3;
+ /*
+ * As we use up to 3 byte chunks to work
+ * around alignment problems, we need to
+ * check the offset as well.
+ */
+ offset_mask = 3;
+ }
} else {
if (host->quirks & SDHCI_QUIRK_32BIT_DMA_SIZE)
- broken = 1;
+ length_mask = 3;
+ if (host->quirks & SDHCI_QUIRK_32BIT_DMA_ADDR)
+ offset_mask = 3;
}
- if (unlikely(broken)) {
+ if (unlikely(length_mask | offset_mask)) {
for_each_sg(data->sg, sg, data->sg_len, i) {
- if (sg->length & 0x3) {
+ if (sg->length & length_mask) {
DBG("Reverting to PIO because of transfer size (%d)\n",
- sg->length);
+ sg->length);
host->flags &= ~SDHCI_REQ_USE_DMA;
break;
}
- }
- }
- }
-
- /*
- * The assumption here being that alignment is the same after
- * translation to device address space.
- */
- if (host->flags & SDHCI_REQ_USE_DMA) {
- int broken, i;
- struct scatterlist *sg;
-
- broken = 0;
- if (host->flags & SDHCI_USE_ADMA) {
- /*
- * As we use 3 byte chunks to work around
- * alignment problems, we need to check this
- * quirk.
- */
- if (host->quirks & SDHCI_QUIRK_32BIT_ADMA_SIZE)
- broken = 1;
- } else {
- if (host->quirks & SDHCI_QUIRK_32BIT_DMA_ADDR)
- broken = 1;
- }
-
- if (unlikely(broken)) {
- for_each_sg(data->sg, sg, data->sg_len, i) {
- if (sg->offset & 0x3) {
+ if (sg->offset & offset_mask) {
DBG("Reverting to PIO because of bad alignment\n");
host->flags &= ~SDHCI_REQ_USE_DMA;
break;
@@ -814,39 +776,27 @@ static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd)
}
if (host->flags & SDHCI_REQ_USE_DMA) {
- if (host->flags & SDHCI_USE_ADMA) {
- ret = sdhci_adma_table_pre(host, data);
- if (ret) {
- /*
- * This only happens when someone fed
- * us an invalid request.
- */
- WARN_ON(1);
- host->flags &= ~SDHCI_REQ_USE_DMA;
- } else {
- sdhci_writel(host, host->adma_addr,
- SDHCI_ADMA_ADDRESS);
- if (host->flags & SDHCI_USE_64_BIT_DMA)
- sdhci_writel(host,
- (u64)host->adma_addr >> 32,
- SDHCI_ADMA_ADDRESS_HI);
- }
- } else {
- int sg_cnt;
+ int sg_cnt = sdhci_pre_dma_transfer(host, data, COOKIE_MAPPED);
- sg_cnt = sdhci_pre_dma_transfer(host, data);
- if (sg_cnt <= 0) {
- /*
- * This only happens when someone fed
- * us an invalid request.
- */
- WARN_ON(1);
- host->flags &= ~SDHCI_REQ_USE_DMA;
- } else {
- WARN_ON(sg_cnt != 1);
- sdhci_writel(host, sg_dma_address(data->sg),
- SDHCI_DMA_ADDRESS);
- }
+ if (sg_cnt <= 0) {
+ /*
+ * This only happens when someone fed
+ * us an invalid request.
+ */
+ WARN_ON(1);
+ host->flags &= ~SDHCI_REQ_USE_DMA;
+ } else if (host->flags & SDHCI_USE_ADMA) {
+ sdhci_adma_table_pre(host, data, sg_cnt);
+
+ sdhci_writel(host, host->adma_addr, SDHCI_ADMA_ADDRESS);
+ if (host->flags & SDHCI_USE_64_BIT_DMA)
+ sdhci_writel(host,
+ (u64)host->adma_addr >> 32,
+ SDHCI_ADMA_ADDRESS_HI);
+ } else {
+ WARN_ON(sg_cnt != 1);
+ sdhci_writel(host, sg_dma_address(data->sg),
+ SDHCI_DMA_ADDRESS);
}
}
@@ -946,19 +896,9 @@ static void sdhci_finish_data(struct sdhci_host *host)
data = host->data;
host->data = NULL;
- if (host->flags & SDHCI_REQ_USE_DMA) {
- if (host->flags & SDHCI_USE_ADMA)
- sdhci_adma_table_post(host, data);
- else {
- if (data->host_cookie == COOKIE_MAPPED) {
- dma_unmap_sg(mmc_dev(host->mmc),
- data->sg, data->sg_len,
- (data->flags & MMC_DATA_READ) ?
- DMA_FROM_DEVICE : DMA_TO_DEVICE);
- data->host_cookie = COOKIE_UNMAPPED;
- }
- }
- }
+ if ((host->flags & (SDHCI_REQ_USE_DMA | SDHCI_USE_ADMA)) ==
+ (SDHCI_REQ_USE_DMA | SDHCI_USE_ADMA))
+ sdhci_adma_table_post(host, data);
/*
* The specification states that the block count register must
@@ -1003,6 +943,9 @@ void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd)
WARN_ON(host->cmd);
+ /* Initially, a command has no error */
+ cmd->error = 0;
+
/* Wait max 10 ms */
timeout = 10;
@@ -1097,8 +1040,6 @@ static void sdhci_finish_command(struct sdhci_host *host)
}
}
- host->cmd->error = 0;
-
/* Finished CMD23, now send actual command. */
if (host->cmd == host->mrq->sbc) {
host->cmd = NULL;
@@ -1269,10 +1210,24 @@ clock_set:
}
EXPORT_SYMBOL_GPL(sdhci_set_clock);
-static void sdhci_set_power(struct sdhci_host *host, unsigned char mode,
- unsigned short vdd)
+static void sdhci_set_power_reg(struct sdhci_host *host, unsigned char mode,
+ unsigned short vdd)
{
struct mmc_host *mmc = host->mmc;
+
+ spin_unlock_irq(&host->lock);
+ mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);
+ spin_lock_irq(&host->lock);
+
+ if (mode != MMC_POWER_OFF)
+ sdhci_writeb(host, SDHCI_POWER_ON, SDHCI_POWER_CONTROL);
+ else
+ sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
+}
+
+void sdhci_set_power(struct sdhci_host *host, unsigned char mode,
+ unsigned short vdd)
+{
u8 pwr = 0;
if (mode != MMC_POWER_OFF) {
@@ -1304,7 +1259,6 @@ static void sdhci_set_power(struct sdhci_host *host, unsigned char mode,
sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
sdhci_runtime_pm_bus_off(host);
- vdd = 0;
} else {
/*
* Spec says that we should clear the power reg before setting
@@ -1335,12 +1289,20 @@ static void sdhci_set_power(struct sdhci_host *host, unsigned char mode,
if (host->quirks & SDHCI_QUIRK_DELAY_AFTER_POWER)
mdelay(10);
}
+}
+EXPORT_SYMBOL_GPL(sdhci_set_power);
- if (!IS_ERR(mmc->supply.vmmc)) {
- spin_unlock_irq(&host->lock);
- mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);
- spin_lock_irq(&host->lock);
- }
+static void __sdhci_set_power(struct sdhci_host *host, unsigned char mode,
+ unsigned short vdd)
+{
+ struct mmc_host *mmc = host->mmc;
+
+ if (host->ops->set_power)
+ host->ops->set_power(host, mode, vdd);
+ else if (!IS_ERR(mmc->supply.vmmc))
+ sdhci_set_power_reg(host, mode, vdd);
+ else
+ sdhci_set_power(host, mode, vdd);
}
/*****************************************************************************\
@@ -1490,7 +1452,7 @@ static void sdhci_do_set_ios(struct sdhci_host *host, struct mmc_ios *ios)
}
}
- sdhci_set_power(host, ios->power_mode, ios->vdd);
+ __sdhci_set_power(host, ios->power_mode, ios->vdd);
if (host->ops->platform_send_init_74_clocks)
host->ops->platform_send_init_74_clocks(host, ios->power_mode);
@@ -2114,39 +2076,12 @@ static void sdhci_post_req(struct mmc_host *mmc, struct mmc_request *mrq,
struct sdhci_host *host = mmc_priv(mmc);
struct mmc_data *data = mrq->data;
- if (host->flags & SDHCI_REQ_USE_DMA) {
- if (data->host_cookie == COOKIE_GIVEN ||
- data->host_cookie == COOKIE_MAPPED)
- dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
- data->flags & MMC_DATA_WRITE ?
- DMA_TO_DEVICE : DMA_FROM_DEVICE);
- data->host_cookie = COOKIE_UNMAPPED;
- }
-}
-
-static int sdhci_pre_dma_transfer(struct sdhci_host *host,
- struct mmc_data *data)
-{
- int sg_count;
-
- if (data->host_cookie == COOKIE_MAPPED) {
- data->host_cookie = COOKIE_GIVEN;
- return data->sg_count;
- }
-
- WARN_ON(data->host_cookie == COOKIE_GIVEN);
-
- sg_count = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
- data->flags & MMC_DATA_WRITE ?
- DMA_TO_DEVICE : DMA_FROM_DEVICE);
-
- if (sg_count == 0)
- return -ENOSPC;
-
- data->sg_count = sg_count;
- data->host_cookie = COOKIE_MAPPED;
+ if (data->host_cookie != COOKIE_UNMAPPED)
+ dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
+ data->flags & MMC_DATA_WRITE ?
+ DMA_TO_DEVICE : DMA_FROM_DEVICE);
- return sg_count;
+ data->host_cookie = COOKIE_UNMAPPED;
}
static void sdhci_pre_req(struct mmc_host *mmc, struct mmc_request *mrq,
@@ -2157,7 +2092,7 @@ static void sdhci_pre_req(struct mmc_host *mmc, struct mmc_request *mrq,
mrq->data->host_cookie = COOKIE_UNMAPPED;
if (host->flags & SDHCI_REQ_USE_DMA)
- sdhci_pre_dma_transfer(host, mrq->data);
+ sdhci_pre_dma_transfer(host, mrq->data, COOKIE_PRE_MAPPED);
}
static void sdhci_card_event(struct mmc_host *mmc)
@@ -2238,6 +2173,22 @@ static void sdhci_tasklet_finish(unsigned long param)
mrq = host->mrq;
/*
+ * Always unmap the data buffers if they were mapped by
+ * sdhci_prepare_data() whenever we finish with a request.
+ * This avoids leaking DMA mappings on error.
+ */
+ if (host->flags & SDHCI_REQ_USE_DMA) {
+ struct mmc_data *data = mrq->data;
+
+ if (data && data->host_cookie == COOKIE_MAPPED) {
+ dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
+ (data->flags & MMC_DATA_READ) ?
+ DMA_FROM_DEVICE : DMA_TO_DEVICE);
+ data->host_cookie = COOKIE_UNMAPPED;
+ }
+ }
+
+ /*
* The controller needs a reset of internal state machines
* upon error conditions.
*/
@@ -2322,13 +2273,30 @@ static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask, u32 *mask)
return;
}
- if (intmask & SDHCI_INT_TIMEOUT)
- host->cmd->error = -ETIMEDOUT;
- else if (intmask & (SDHCI_INT_CRC | SDHCI_INT_END_BIT |
- SDHCI_INT_INDEX))
- host->cmd->error = -EILSEQ;
+ if (intmask & (SDHCI_INT_TIMEOUT | SDHCI_INT_CRC |
+ SDHCI_INT_END_BIT | SDHCI_INT_INDEX)) {
+ if (intmask & SDHCI_INT_TIMEOUT)
+ host->cmd->error = -ETIMEDOUT;
+ else
+ host->cmd->error = -EILSEQ;
+
+ /*
+ * If this command initiates a data phase and a response
+ * CRC error is signalled, the card can start transferring
+ * data - the card may have received the command without
+ * error. We must not terminate the mmc_request early.
+ *
+ * If the card did not receive the command or returned an
+ * error which prevented it sending data, the data phase
+ * will time out.
+ */
+ if (host->cmd->data &&
+ (intmask & (SDHCI_INT_CRC | SDHCI_INT_TIMEOUT)) ==
+ SDHCI_INT_CRC) {
+ host->cmd = NULL;
+ return;
+ }
- if (host->cmd->error) {
tasklet_schedule(&host->finish_tasklet);
return;
}
@@ -2857,6 +2825,36 @@ struct sdhci_host *sdhci_alloc_host(struct device *dev,
EXPORT_SYMBOL_GPL(sdhci_alloc_host);
+static int sdhci_set_dma_mask(struct sdhci_host *host)
+{
+ struct mmc_host *mmc = host->mmc;
+ struct device *dev = mmc_dev(mmc);
+ int ret = -EINVAL;
+
+ if (host->quirks2 & SDHCI_QUIRK2_BROKEN_64_BIT_DMA)
+ host->flags &= ~SDHCI_USE_64_BIT_DMA;
+
+ /* Try 64-bit mask if hardware is capable of it */
+ if (host->flags & SDHCI_USE_64_BIT_DMA) {
+ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
+ if (ret) {
+ pr_warn("%s: Failed to set 64-bit DMA mask.\n",
+ mmc_hostname(mmc));
+ host->flags &= ~SDHCI_USE_64_BIT_DMA;
+ }
+ }
+
+ /* 32-bit mask as default & fallback */
+ if (ret) {
+ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
+ if (ret)
+ pr_warn("%s: Failed to set 32-bit DMA mask.\n",
+ mmc_hostname(mmc));
+ }
+
+ return ret;
+}
+
int sdhci_add_host(struct sdhci_host *host)
{
struct mmc_host *mmc;
@@ -2928,17 +2926,21 @@ int sdhci_add_host(struct sdhci_host *host)
* SDHCI_QUIRK2_BROKEN_64_BIT_DMA must be left to the drivers to
* implement.
*/
- if (sdhci_readl(host, SDHCI_CAPABILITIES) & SDHCI_CAN_64BIT)
+ if (caps[0] & SDHCI_CAN_64BIT)
host->flags |= SDHCI_USE_64_BIT_DMA;
if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
- if (host->ops->enable_dma) {
- if (host->ops->enable_dma(host)) {
- pr_warn("%s: No suitable DMA available - falling back to PIO\n",
- mmc_hostname(mmc));
- host->flags &=
- ~(SDHCI_USE_SDMA | SDHCI_USE_ADMA);
- }
+ ret = sdhci_set_dma_mask(host);
+
+ if (!ret && host->ops->enable_dma)
+ ret = host->ops->enable_dma(host);
+
+ if (ret) {
+ pr_warn("%s: No suitable DMA available - falling back to PIO\n",
+ mmc_hostname(mmc));
+ host->flags &= ~(SDHCI_USE_SDMA | SDHCI_USE_ADMA);
+
+ ret = 0;
}
}
@@ -2947,6 +2949,9 @@ int sdhci_add_host(struct sdhci_host *host)
host->flags &= ~SDHCI_USE_SDMA;
if (host->flags & SDHCI_USE_ADMA) {
+ dma_addr_t dma;
+ void *buf;
+
/*
* The DMA descriptor table size is calculated as the maximum
* number of segments times 2, to allow for an alignment
@@ -2962,33 +2967,27 @@ int sdhci_add_host(struct sdhci_host *host)
SDHCI_ADMA2_32_DESC_SZ;
host->desc_sz = SDHCI_ADMA2_32_DESC_SZ;
}
- host->adma_table = dma_alloc_coherent(mmc_dev(mmc),
- host->adma_table_sz,
- &host->adma_addr,
- GFP_KERNEL);
+
host->align_buffer_sz = SDHCI_MAX_SEGS * SDHCI_ADMA2_ALIGN;
- host->align_buffer = kmalloc(host->align_buffer_sz, GFP_KERNEL);
- if (!host->adma_table || !host->align_buffer) {
- if (host->adma_table)
- dma_free_coherent(mmc_dev(mmc),
- host->adma_table_sz,
- host->adma_table,
- host->adma_addr);
- kfree(host->align_buffer);
+ buf = dma_alloc_coherent(mmc_dev(mmc), host->align_buffer_sz +
+ host->adma_table_sz, &dma, GFP_KERNEL);
+ if (!buf) {
pr_warn("%s: Unable to allocate ADMA buffers - falling back to standard DMA\n",
mmc_hostname(mmc));
host->flags &= ~SDHCI_USE_ADMA;
- host->adma_table = NULL;
- host->align_buffer = NULL;
- } else if (host->adma_addr & (SDHCI_ADMA2_DESC_ALIGN - 1)) {
+ } else if ((dma + host->align_buffer_sz) &
+ (SDHCI_ADMA2_DESC_ALIGN - 1)) {
pr_warn("%s: unable to allocate aligned ADMA descriptor\n",
mmc_hostname(mmc));
host->flags &= ~SDHCI_USE_ADMA;
- dma_free_coherent(mmc_dev(mmc), host->adma_table_sz,
- host->adma_table, host->adma_addr);
- kfree(host->align_buffer);
- host->adma_table = NULL;
- host->align_buffer = NULL;
+ dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
+ host->adma_table_sz, buf, dma);
+ } else {
+ host->align_buffer = buf;
+ host->align_addr = dma;
+
+ host->adma_table = buf + host->align_buffer_sz;
+ host->adma_addr = dma + host->align_buffer_sz;
}
}
@@ -3072,14 +3071,14 @@ int sdhci_add_host(struct sdhci_host *host)
if (caps[0] & SDHCI_TIMEOUT_CLK_UNIT)
host->timeout_clk *= 1000;
+ if (override_timeout_clk)
+ host->timeout_clk = override_timeout_clk;
+
mmc->max_busy_timeout = host->ops->get_max_timeout_count ?
host->ops->get_max_timeout_count(host) : 1 << 27;
mmc->max_busy_timeout /= host->timeout_clk;
}
- if (override_timeout_clk)
- host->timeout_clk = override_timeout_clk;
-
mmc->caps |= MMC_CAP_SDIO_IRQ | MMC_CAP_ERASE | MMC_CAP_CMD23;
mmc->caps2 |= MMC_CAP2_SDIO_IRQ_NOTHREAD;
@@ -3449,10 +3448,10 @@ void sdhci_remove_host(struct sdhci_host *host, int dead)
if (!IS_ERR(mmc->supply.vqmmc))
regulator_disable(mmc->supply.vqmmc);
- if (host->adma_table)
- dma_free_coherent(mmc_dev(mmc), host->adma_table_sz,
- host->adma_table, host->adma_addr);
- kfree(host->align_buffer);
+ if (host->align_buffer)
+ dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
+ host->adma_table_sz, host->align_buffer,
+ host->align_addr);
host->adma_table = NULL;
host->align_buffer = NULL;
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
index 0115e9907bf8..0f39f4f84d10 100644
--- a/drivers/mmc/host/sdhci.h
+++ b/drivers/mmc/host/sdhci.h
@@ -316,8 +316,8 @@ struct sdhci_adma2_64_desc {
enum sdhci_cookie {
COOKIE_UNMAPPED,
- COOKIE_MAPPED,
- COOKIE_GIVEN,
+ COOKIE_PRE_MAPPED, /* mapped by sdhci_pre_req() */
+ COOKIE_MAPPED, /* mapped by sdhci_prepare_data() */
};
struct sdhci_host {
@@ -529,6 +529,8 @@ struct sdhci_ops {
#endif
void (*set_clock)(struct sdhci_host *host, unsigned int clock);
+ void (*set_power)(struct sdhci_host *host, unsigned char mode,
+ unsigned short vdd);
int (*enable_dma)(struct sdhci_host *host);
unsigned int (*get_max_clock)(struct sdhci_host *host);
@@ -660,6 +662,8 @@ static inline bool sdhci_sdio_irq_enabled(struct sdhci_host *host)
}
void sdhci_set_clock(struct sdhci_host *host, unsigned int clock);
+void sdhci_set_power(struct sdhci_host *host, unsigned char mode,
+ unsigned short vdd);
void sdhci_set_bus_width(struct sdhci_host *host, int width);
void sdhci_reset(struct sdhci_host *host, u8 mask);
void sdhci_set_uhs_signaling(struct sdhci_host *host, unsigned timing);
diff --git a/drivers/mmc/host/sdricoh_cs.c b/drivers/mmc/host/sdricoh_cs.c
index b7e305775314..5ff26ab81eb1 100644
--- a/drivers/mmc/host/sdricoh_cs.c
+++ b/drivers/mmc/host/sdricoh_cs.c
@@ -398,10 +398,10 @@ static struct mmc_host_ops sdricoh_ops = {
static int sdricoh_init_mmc(struct pci_dev *pci_dev,
struct pcmcia_device *pcmcia_dev)
{
- int result = 0;
- void __iomem *iobase = NULL;
- struct mmc_host *mmc = NULL;
- struct sdricoh_host *host = NULL;
+ int result;
+ void __iomem *iobase;
+ struct mmc_host *mmc;
+ struct sdricoh_host *host;
struct device *dev = &pcmcia_dev->dev;
/* map iomem */
if (pci_resource_len(pci_dev, SDRICOH_PCI_REGION) !=
@@ -419,7 +419,7 @@ static int sdricoh_init_mmc(struct pci_dev *pci_dev,
if (readl(iobase + R104_VERSION) != 0x4000) {
dev_dbg(dev, "no supported mmc controller found\n");
result = -ENODEV;
- goto err;
+ goto unmap_io;
}
/* allocate privdata */
mmc = pcmcia_dev->priv =
@@ -427,7 +427,7 @@ static int sdricoh_init_mmc(struct pci_dev *pci_dev,
if (!mmc) {
dev_err(dev, "mmc_alloc_host failed\n");
result = -ENOMEM;
- goto err;
+ goto unmap_io;
}
host = mmc_priv(mmc);
@@ -451,8 +451,7 @@ static int sdricoh_init_mmc(struct pci_dev *pci_dev,
if (sdricoh_reset(host)) {
dev_dbg(dev, "could not reset\n");
result = -EIO;
- goto err;
-
+ goto free_host;
}
result = mmc_add_host(mmc);
@@ -461,13 +460,10 @@ static int sdricoh_init_mmc(struct pci_dev *pci_dev,
dev_dbg(dev, "mmc host registered\n");
return 0;
}
-
-err:
- if (iobase)
- pci_iounmap(pci_dev, iobase);
- if (mmc)
- mmc_free_host(mmc);
-
+free_host:
+ mmc_free_host(mmc);
+unmap_io:
+ pci_iounmap(pci_dev, iobase);
return result;
}
diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c
index 6234eab38ff3..d9a655f47d41 100644
--- a/drivers/mmc/host/sh_mmcif.c
+++ b/drivers/mmc/host/sh_mmcif.c
@@ -1395,7 +1395,7 @@ static irqreturn_t sh_mmcif_intr(int irq, void *dev_id)
static void sh_mmcif_timeout_work(struct work_struct *work)
{
- struct delayed_work *d = container_of(work, struct delayed_work, work);
+ struct delayed_work *d = to_delayed_work(work);
struct sh_mmcif_host *host = container_of(d, struct sh_mmcif_host, timeout_work);
struct mmc_request *mrq = host->mrq;
struct device *dev = sh_mmcif_host_to_dev(host);
@@ -1513,7 +1513,7 @@ static int sh_mmcif_probe(struct platform_device *pdev)
mmc->caps |= pd->caps;
mmc->max_segs = 32;
mmc->max_blk_size = 512;
- mmc->max_req_size = PAGE_CACHE_SIZE * mmc->max_segs;
+ mmc->max_req_size = PAGE_SIZE * mmc->max_segs;
mmc->max_blk_count = mmc->max_req_size / mmc->max_blk_size;
mmc->max_seg_size = mmc->max_req_size;
diff --git a/drivers/mmc/host/sh_mobile_sdhi.c b/drivers/mmc/host/sh_mobile_sdhi.c
index 354f4f335ed5..9aa147959276 100644
--- a/drivers/mmc/host/sh_mobile_sdhi.c
+++ b/drivers/mmc/host/sh_mobile_sdhi.c
@@ -1,6 +1,8 @@
/*
* SuperH Mobile SDHI
*
+ * Copyright (C) 2016 Sang Engineering, Wolfram Sang
+ * Copyright (C) 2015-16 Renesas Electronics Corporation
* Copyright (C) 2009 Magnus Damm
*
* This program is free software; you can redistribute it and/or modify
@@ -43,6 +45,7 @@ struct sh_mobile_sdhi_of_data {
unsigned long capabilities2;
enum dma_slave_buswidth dma_buswidth;
dma_addr_t dma_rx_offset;
+ unsigned bus_shift;
};
static const struct sh_mobile_sdhi_of_data sh_mobile_sdhi_of_cfg[] = {
@@ -59,12 +62,19 @@ static const struct sh_mobile_sdhi_of_data of_rcar_gen1_compatible = {
static const struct sh_mobile_sdhi_of_data of_rcar_gen2_compatible = {
.tmio_flags = TMIO_MMC_HAS_IDLE_WAIT | TMIO_MMC_WRPROTECT_DISABLE |
- TMIO_MMC_CLK_ACTUAL,
+ TMIO_MMC_CLK_ACTUAL | TMIO_MMC_FAST_CLK_CHG,
.capabilities = MMC_CAP_SD_HIGHSPEED | MMC_CAP_SDIO_IRQ,
.dma_buswidth = DMA_SLAVE_BUSWIDTH_4_BYTES,
.dma_rx_offset = 0x2000,
};
+static const struct sh_mobile_sdhi_of_data of_rcar_gen3_compatible = {
+ .tmio_flags = TMIO_MMC_HAS_IDLE_WAIT | TMIO_MMC_WRPROTECT_DISABLE |
+ TMIO_MMC_CLK_ACTUAL | TMIO_MMC_FAST_CLK_CHG,
+ .capabilities = MMC_CAP_SD_HIGHSPEED,
+ .bus_shift = 2,
+};
+
static const struct of_device_id sh_mobile_sdhi_of_match[] = {
{ .compatible = "renesas,sdhi-shmobile" },
{ .compatible = "renesas,sdhi-sh7372" },
@@ -78,6 +88,7 @@ static const struct of_device_id sh_mobile_sdhi_of_match[] = {
{ .compatible = "renesas,sdhi-r8a7792", .data = &of_rcar_gen2_compatible, },
{ .compatible = "renesas,sdhi-r8a7793", .data = &of_rcar_gen2_compatible, },
{ .compatible = "renesas,sdhi-r8a7794", .data = &of_rcar_gen2_compatible, },
+ { .compatible = "renesas,sdhi-r8a7795", .data = &of_rcar_gen3_compatible, },
{},
};
MODULE_DEVICE_TABLE(of, sh_mobile_sdhi_of_match);
@@ -103,6 +114,15 @@ static void sh_mobile_sdhi_sdbuf_width(struct tmio_mmc_host *host, int width)
case 0xCB0D:
val = (width == 32) ? 0x0000 : 0x0001;
break;
+ case 0xCC10: /* Gen3, SD only */
+ case 0xCD10: /* Gen3, SD + MMC */
+ if (width == 64)
+ val = 0x0000;
+ else if (width == 32)
+ val = 0x0101;
+ else
+ val = 0x0001;
+ break;
default:
/* nothing to do */
return;
@@ -163,6 +183,7 @@ static int sh_mobile_sdhi_write16_hook(struct tmio_mmc_host *host, int addr)
case CTL_SD_MEM_CARD_OPT:
case CTL_TRANSACTION_CTL:
case CTL_DMA_ENABLE:
+ case EXT_ACC:
return sh_mobile_sdhi_wait_idle(host);
}
@@ -213,10 +234,8 @@ static int sh_mobile_sdhi_probe(struct platform_device *pdev)
return -EINVAL;
priv = devm_kzalloc(&pdev->dev, sizeof(struct sh_mobile_sdhi), GFP_KERNEL);
- if (priv == NULL) {
- dev_err(&pdev->dev, "kzalloc failed\n");
+ if (!priv)
return -ENOMEM;
- }
mmc_data = &priv->mmc_data;
dma_priv = &priv->dma_priv;
@@ -234,16 +253,26 @@ static int sh_mobile_sdhi_probe(struct platform_device *pdev)
goto eprobe;
}
+ if (of_id && of_id->data) {
+ const struct sh_mobile_sdhi_of_data *of_data = of_id->data;
+
+ mmc_data->flags |= of_data->tmio_flags;
+ mmc_data->capabilities |= of_data->capabilities;
+ mmc_data->capabilities2 |= of_data->capabilities2;
+ mmc_data->dma_rx_offset = of_data->dma_rx_offset;
+ dma_priv->dma_buswidth = of_data->dma_buswidth;
+ host->bus_shift = of_data->bus_shift;
+ }
+
host->dma = dma_priv;
host->write16_hook = sh_mobile_sdhi_write16_hook;
host->clk_enable = sh_mobile_sdhi_clk_enable;
host->clk_disable = sh_mobile_sdhi_clk_disable;
host->multi_io_quirk = sh_mobile_sdhi_multi_io_quirk;
- /* SD control register space size is 0x100, 0x200 for bus_shift=1 */
- if (resource_size(res) > 0x100)
+
+ /* Orginally registers were 16 bit apart, could be 32 or 64 nowadays */
+ if (!host->bus_shift && resource_size(res) > 0x100) /* old way to determine the shift */
host->bus_shift = 1;
- else
- host->bus_shift = 0;
if (mmd)
*mmc_data = *mmd;
@@ -275,15 +304,6 @@ static int sh_mobile_sdhi_probe(struct platform_device *pdev)
*/
mmc_data->flags |= TMIO_MMC_SDIO_STATUS_QUIRK;
- if (of_id && of_id->data) {
- const struct sh_mobile_sdhi_of_data *of_data = of_id->data;
- mmc_data->flags |= of_data->tmio_flags;
- mmc_data->capabilities |= of_data->capabilities;
- mmc_data->capabilities2 |= of_data->capabilities2;
- mmc_data->dma_rx_offset = of_data->dma_rx_offset;
- dma_priv->dma_buswidth = of_data->dma_buswidth;
- }
-
ret = tmio_mmc_host_probe(host, mmc_data);
if (ret < 0)
goto efree;
diff --git a/drivers/mmc/host/sunxi-mmc.c b/drivers/mmc/host/sunxi-mmc.c
index 83de82bceafc..7fc8b7aa83f0 100644
--- a/drivers/mmc/host/sunxi-mmc.c
+++ b/drivers/mmc/host/sunxi-mmc.c
@@ -28,6 +28,7 @@
#include <linux/dma-mapping.h>
#include <linux/slab.h>
#include <linux/reset.h>
+#include <linux/regulator/consumer.h>
#include <linux/of_address.h>
#include <linux/of_gpio.h>
@@ -214,6 +215,7 @@
#define SDXC_CLK_25M 1
#define SDXC_CLK_50M 2
#define SDXC_CLK_50M_DDR 3
+#define SDXC_CLK_50M_DDR_8BIT 4
struct sunxi_mmc_clk_delay {
u32 output;
@@ -256,6 +258,9 @@ struct sunxi_mmc_host {
struct mmc_request *mrq;
struct mmc_request *manual_stop_mrq;
int ferror;
+
+ /* vqmmc */
+ bool vqmmc_enabled;
};
static int sunxi_mmc_reset_host(struct sunxi_mmc_host *host)
@@ -284,16 +289,28 @@ static int sunxi_mmc_init_host(struct mmc_host *mmc)
if (sunxi_mmc_reset_host(host))
return -EIO;
+ /*
+ * Burst 8 transfers, RX trigger level: 7, TX trigger level: 8
+ *
+ * TODO: sun9i has a larger FIFO and supports higher trigger values
+ */
mmc_writel(host, REG_FTRGL, 0x20070008);
+ /* Maximum timeout value */
mmc_writel(host, REG_TMOUT, 0xffffffff);
+ /* Unmask SDIO interrupt if needed */
mmc_writel(host, REG_IMASK, host->sdio_imask);
+ /* Clear all pending interrupts */
mmc_writel(host, REG_RINTR, 0xffffffff);
+ /* Debug register? undocumented */
mmc_writel(host, REG_DBGC, 0xdeb);
+ /* Enable CEATA support */
mmc_writel(host, REG_FUNS, SDXC_CEATA_ON);
+ /* Set DMA descriptor list base address */
mmc_writel(host, REG_DLBA, host->sg_dma);
rval = mmc_readl(host, REG_GCTRL);
rval |= SDXC_INTERRUPT_ENABLE_BIT;
+ /* Undocumented, but found in Allwinner code */
rval &= ~SDXC_ACCESS_DONE_DIRECT;
mmc_writel(host, REG_GCTRL, rval);
@@ -640,11 +657,17 @@ static int sunxi_mmc_clk_set_rate(struct sunxi_mmc_host *host,
struct mmc_ios *ios)
{
u32 rate, oclk_dly, rval, sclk_dly;
+ u32 clock = ios->clock;
int ret;
- rate = clk_round_rate(host->clk_mmc, ios->clock);
+ /* 8 bit DDR requires a higher module clock */
+ if (ios->timing == MMC_TIMING_MMC_DDR52 &&
+ ios->bus_width == MMC_BUS_WIDTH_8)
+ clock <<= 1;
+
+ rate = clk_round_rate(host->clk_mmc, clock);
dev_dbg(mmc_dev(host->mmc), "setting clk to %d, rounded %d\n",
- ios->clock, rate);
+ clock, rate);
/* setting clock rate */
ret = clk_set_rate(host->clk_mmc, rate);
@@ -661,6 +684,12 @@ static int sunxi_mmc_clk_set_rate(struct sunxi_mmc_host *host,
/* clear internal divider */
rval = mmc_readl(host, REG_CLKCR);
rval &= ~0xff;
+ /* set internal divider for 8 bit eMMC DDR, so card clock is right */
+ if (ios->timing == MMC_TIMING_MMC_DDR52 &&
+ ios->bus_width == MMC_BUS_WIDTH_8) {
+ rval |= 1;
+ rate >>= 1;
+ }
mmc_writel(host, REG_CLKCR, rval);
/* determine delays */
@@ -670,13 +699,17 @@ static int sunxi_mmc_clk_set_rate(struct sunxi_mmc_host *host,
} else if (rate <= 25000000) {
oclk_dly = host->clk_delays[SDXC_CLK_25M].output;
sclk_dly = host->clk_delays[SDXC_CLK_25M].sample;
- } else if (rate <= 50000000) {
- if (ios->timing == MMC_TIMING_UHS_DDR50) {
- oclk_dly = host->clk_delays[SDXC_CLK_50M_DDR].output;
- sclk_dly = host->clk_delays[SDXC_CLK_50M_DDR].sample;
- } else {
+ } else if (rate <= 52000000) {
+ if (ios->timing != MMC_TIMING_UHS_DDR50 &&
+ ios->timing != MMC_TIMING_MMC_DDR52) {
oclk_dly = host->clk_delays[SDXC_CLK_50M].output;
sclk_dly = host->clk_delays[SDXC_CLK_50M].sample;
+ } else if (ios->bus_width == MMC_BUS_WIDTH_8) {
+ oclk_dly = host->clk_delays[SDXC_CLK_50M_DDR_8BIT].output;
+ sclk_dly = host->clk_delays[SDXC_CLK_50M_DDR_8BIT].sample;
+ } else {
+ oclk_dly = host->clk_delays[SDXC_CLK_50M_DDR].output;
+ sclk_dly = host->clk_delays[SDXC_CLK_50M_DDR].sample;
}
} else {
return -EINVAL;
@@ -699,7 +732,20 @@ static void sunxi_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
break;
case MMC_POWER_UP:
- mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd);
+ host->ferror = mmc_regulator_set_ocr(mmc, mmc->supply.vmmc,
+ ios->vdd);
+ if (host->ferror)
+ return;
+
+ if (!IS_ERR(mmc->supply.vqmmc)) {
+ host->ferror = regulator_enable(mmc->supply.vqmmc);
+ if (host->ferror) {
+ dev_err(mmc_dev(mmc),
+ "failed to enable vqmmc\n");
+ return;
+ }
+ host->vqmmc_enabled = true;
+ }
host->ferror = sunxi_mmc_init_host(mmc);
if (host->ferror)
@@ -712,6 +758,9 @@ static void sunxi_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
dev_dbg(mmc_dev(mmc), "power off!\n");
sunxi_mmc_reset_host(host);
mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
+ if (!IS_ERR(mmc->supply.vqmmc) && host->vqmmc_enabled)
+ regulator_disable(mmc->supply.vqmmc);
+ host->vqmmc_enabled = false;
break;
}
@@ -730,7 +779,8 @@ static void sunxi_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
/* set ddr mode */
rval = mmc_readl(host, REG_GCTRL);
- if (ios->timing == MMC_TIMING_UHS_DDR50)
+ if (ios->timing == MMC_TIMING_UHS_DDR50 ||
+ ios->timing == MMC_TIMING_MMC_DDR52)
rval |= SDXC_DDR_MODE;
else
rval &= ~SDXC_DDR_MODE;
@@ -743,6 +793,19 @@ static void sunxi_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
}
}
+static int sunxi_mmc_volt_switch(struct mmc_host *mmc, struct mmc_ios *ios)
+{
+ /* vqmmc regulator is available */
+ if (!IS_ERR(mmc->supply.vqmmc))
+ return mmc_regulator_set_vqmmc(mmc, ios);
+
+ /* no vqmmc regulator, assume fixed regulator at 3/3.3V */
+ if (mmc->ios.signal_voltage == MMC_SIGNAL_VOLTAGE_330)
+ return 0;
+
+ return -EINVAL;
+}
+
static void sunxi_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable)
{
struct sunxi_mmc_host *host = mmc_priv(mmc);
@@ -815,11 +878,6 @@ static void sunxi_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
if ((cmd->flags & MMC_CMD_MASK) == MMC_CMD_ADTC) {
cmd_val |= SDXC_DATA_EXPIRE | SDXC_WAIT_PRE_OVER;
- if (cmd->data->flags & MMC_DATA_STREAM) {
- imask |= SDXC_AUTO_COMMAND_DONE;
- cmd_val |= SDXC_SEQUENCE_MODE |
- SDXC_SEND_AUTO_STOP;
- }
if (cmd->data->stop) {
imask |= SDXC_AUTO_COMMAND_DONE;
@@ -894,6 +952,7 @@ static struct mmc_host_ops sunxi_mmc_ops = {
.get_ro = mmc_gpio_get_ro,
.get_cd = mmc_gpio_get_cd,
.enable_sdio_irq = sunxi_mmc_enable_sdio_irq,
+ .start_signal_voltage_switch = sunxi_mmc_volt_switch,
.hw_reset = sunxi_mmc_hw_reset,
.card_busy = sunxi_mmc_card_busy,
};
@@ -903,6 +962,8 @@ static const struct sunxi_mmc_clk_delay sunxi_mmc_clk_delays[] = {
[SDXC_CLK_25M] = { .output = 180, .sample = 75 },
[SDXC_CLK_50M] = { .output = 90, .sample = 120 },
[SDXC_CLK_50M_DDR] = { .output = 60, .sample = 120 },
+ /* Value from A83T "new timing mode". Works but might not be right. */
+ [SDXC_CLK_50M_DDR_8BIT] = { .output = 90, .sample = 180 },
};
static const struct sunxi_mmc_clk_delay sun9i_mmc_clk_delays[] = {
@@ -910,6 +971,7 @@ static const struct sunxi_mmc_clk_delay sun9i_mmc_clk_delays[] = {
[SDXC_CLK_25M] = { .output = 180, .sample = 75 },
[SDXC_CLK_50M] = { .output = 150, .sample = 120 },
[SDXC_CLK_50M_DDR] = { .output = 90, .sample = 120 },
+ [SDXC_CLK_50M_DDR_8BIT] = { .output = 90, .sample = 120 },
};
static int sunxi_mmc_resource_request(struct sunxi_mmc_host *host,
@@ -1060,12 +1122,18 @@ static int sunxi_mmc_probe(struct platform_device *pdev)
mmc->max_segs = PAGE_SIZE / sizeof(struct sunxi_idma_des);
mmc->max_seg_size = (1 << host->idma_des_size_bits);
mmc->max_req_size = mmc->max_seg_size * mmc->max_segs;
- /* 400kHz ~ 50MHz */
+ /* 400kHz ~ 52MHz */
mmc->f_min = 400000;
- mmc->f_max = 50000000;
+ mmc->f_max = 52000000;
mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED |
+ MMC_CAP_1_8V_DDR |
MMC_CAP_ERASE | MMC_CAP_SDIO_IRQ;
+ /* TODO MMC DDR is not working on A80 */
+ if (of_device_is_compatible(pdev->dev.of_node,
+ "allwinner,sun9i-a80-mmc"))
+ mmc->caps &= ~MMC_CAP_1_8V_DDR;
+
ret = mmc_of_parse(mmc);
if (ret)
goto error_free_dma;
diff --git a/drivers/mmc/host/tmio_mmc_dma.c b/drivers/mmc/host/tmio_mmc_dma.c
index 4a0d6b80eaa3..7fb0c034dcb6 100644
--- a/drivers/mmc/host/tmio_mmc_dma.c
+++ b/drivers/mmc/host/tmio_mmc_dma.c
@@ -63,7 +63,7 @@ static void tmio_mmc_start_dma_rx(struct tmio_mmc_host *host)
}
}
- if ((!aligned && (host->sg_len > 1 || sg->length > PAGE_CACHE_SIZE ||
+ if ((!aligned && (host->sg_len > 1 || sg->length > PAGE_SIZE ||
(align & PAGE_MASK))) || !multiple) {
ret = -EINVAL;
goto pio;
@@ -94,10 +94,7 @@ static void tmio_mmc_start_dma_rx(struct tmio_mmc_host *host)
desc = NULL;
ret = cookie;
}
- dev_dbg(&host->pdev->dev, "%s(): mapped %d -> %d, cookie %d, rq %p\n",
- __func__, host->sg_len, ret, cookie, host->mrq);
}
-
pio:
if (!desc) {
/* DMA failed, fall back to PIO */
@@ -115,9 +112,6 @@ pio:
dev_warn(&host->pdev->dev,
"DMA failed: %d, falling back to PIO\n", ret);
}
-
- dev_dbg(&host->pdev->dev, "%s(): desc %p, sg[%d]\n", __func__,
- desc, host->sg_len);
}
static void tmio_mmc_start_dma_tx(struct tmio_mmc_host *host)
@@ -139,7 +133,7 @@ static void tmio_mmc_start_dma_tx(struct tmio_mmc_host *host)
}
}
- if ((!aligned && (host->sg_len > 1 || sg->length > PAGE_CACHE_SIZE ||
+ if ((!aligned && (host->sg_len > 1 || sg->length > PAGE_SIZE ||
(align & PAGE_MASK))) || !multiple) {
ret = -EINVAL;
goto pio;
@@ -174,10 +168,7 @@ static void tmio_mmc_start_dma_tx(struct tmio_mmc_host *host)
desc = NULL;
ret = cookie;
}
- dev_dbg(&host->pdev->dev, "%s(): mapped %d -> %d, cookie %d, rq %p\n",
- __func__, host->sg_len, ret, cookie, host->mrq);
}
-
pio:
if (!desc) {
/* DMA failed, fall back to PIO */
@@ -195,8 +186,6 @@ pio:
dev_warn(&host->pdev->dev,
"DMA failed: %d, falling back to PIO\n", ret);
}
-
- dev_dbg(&host->pdev->dev, "%s(): desc %p\n", __func__, desc);
}
void tmio_mmc_start_dma(struct tmio_mmc_host *host,
diff --git a/drivers/mmc/host/tmio_mmc_pio.c b/drivers/mmc/host/tmio_mmc_pio.c
index a10fde40b6c3..0521b4662748 100644
--- a/drivers/mmc/host/tmio_mmc_pio.c
+++ b/drivers/mmc/host/tmio_mmc_pio.c
@@ -1,6 +1,8 @@
/*
* linux/drivers/mmc/host/tmio_mmc_pio.c
*
+ * Copyright (C) 2016 Sang Engineering, Wolfram Sang
+ * Copyright (C) 2015-16 Renesas Electronics Corporation
* Copyright (C) 2011 Guennadi Liakhovetski
* Copyright (C) 2007 Ian Molton
* Copyright (C) 2004 Ian Molton
@@ -159,42 +161,44 @@ static void tmio_mmc_set_clock(struct tmio_mmc_host *host,
if (new_clock) {
for (clock = host->mmc->f_min, clk = 0x80000080;
- new_clock >= (clock<<1); clk >>= 1)
+ new_clock >= (clock << 1);
+ clk >>= 1)
clock <<= 1;
/* 1/1 clock is option */
if ((host->pdata->flags & TMIO_MMC_CLK_ACTUAL) &&
- ((clk >> 22) & 0x1))
+ ((clk >> 22) & 0x1))
clk |= 0xff;
}
if (host->set_clk_div)
- host->set_clk_div(host->pdev, (clk>>22) & 1);
+ host->set_clk_div(host->pdev, (clk >> 22) & 1);
- sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, clk & 0x1ff);
- msleep(10);
+ sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, ~CLK_CTL_SCLKEN &
+ sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL));
+ sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, clk & CLK_CTL_DIV_MASK);
+ if (!(host->pdata->flags & TMIO_MMC_FAST_CLK_CHG))
+ msleep(10);
}
static void tmio_mmc_clk_stop(struct tmio_mmc_host *host)
{
- /* implicit BUG_ON(!res) */
if (host->pdata->flags & TMIO_MMC_HAVE_HIGH_REG) {
sd_ctrl_write16(host, CTL_CLK_AND_WAIT_CTL, 0x0000);
msleep(10);
}
- sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, ~0x0100 &
+ sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, ~CLK_CTL_SCLKEN &
sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL));
- msleep(10);
+ msleep(host->pdata->flags & TMIO_MMC_FAST_CLK_CHG ? 5 : 10);
}
static void tmio_mmc_clk_start(struct tmio_mmc_host *host)
{
- sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, 0x0100 |
+ sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, CLK_CTL_SCLKEN |
sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL));
- msleep(10);
+ msleep(host->pdata->flags & TMIO_MMC_FAST_CLK_CHG ? 1 : 10);
- /* implicit BUG_ON(!res) */
if (host->pdata->flags & TMIO_MMC_HAVE_HIGH_REG) {
sd_ctrl_write16(host, CTL_CLK_AND_WAIT_CTL, 0x0100);
msleep(10);
@@ -205,7 +209,6 @@ static void tmio_mmc_reset(struct tmio_mmc_host *host)
{
/* FIXME - should we set stop clock reg here */
sd_ctrl_write16(host, CTL_RESET_SD, 0x0000);
- /* implicit BUG_ON(!res) */
if (host->pdata->flags & TMIO_MMC_HAVE_HIGH_REG)
sd_ctrl_write16(host, CTL_RESET_SDIO, 0x0000);
msleep(10);
@@ -1122,7 +1125,7 @@ int tmio_mmc_host_probe(struct tmio_mmc_host *_host,
mmc->caps2 |= pdata->capabilities2;
mmc->max_segs = 32;
mmc->max_blk_size = 512;
- mmc->max_blk_count = (PAGE_CACHE_SIZE / mmc->max_blk_size) *
+ mmc->max_blk_count = (PAGE_SIZE / mmc->max_blk_size) *
mmc->max_segs;
mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
mmc->max_seg_size = mmc->max_req_size;
diff --git a/drivers/mmc/host/usdhi6rol0.c b/drivers/mmc/host/usdhi6rol0.c
index b47122d3e8d8..807c06e203c3 100644
--- a/drivers/mmc/host/usdhi6rol0.c
+++ b/drivers/mmc/host/usdhi6rol0.c
@@ -1630,7 +1630,7 @@ static irqreturn_t usdhi6_cd(int irq, void *dev_id)
*/
static void usdhi6_timeout_work(struct work_struct *work)
{
- struct delayed_work *d = container_of(work, struct delayed_work, work);
+ struct delayed_work *d = to_delayed_work(work);
struct usdhi6_host *host = container_of(d, struct usdhi6_host, timeout_work);
struct mmc_request *mrq = host->mrq;
struct mmc_data *data = mrq ? mrq->data : NULL;
@@ -1789,7 +1789,7 @@ static int usdhi6_probe(struct platform_device *pdev)
/* Set .max_segs to some random number. Feel free to adjust. */
mmc->max_segs = 32;
mmc->max_blk_size = 512;
- mmc->max_req_size = PAGE_CACHE_SIZE * mmc->max_segs;
+ mmc->max_req_size = PAGE_SIZE * mmc->max_segs;
mmc->max_blk_count = mmc->max_req_size / mmc->max_blk_size;
/*
* Setting .max_seg_size to 1 page would simplify our page-mapping code,
diff --git a/drivers/mtd/Kconfig b/drivers/mtd/Kconfig
index 42cc953309f1..e83a279f1217 100644
--- a/drivers/mtd/Kconfig
+++ b/drivers/mtd/Kconfig
@@ -142,7 +142,7 @@ config MTD_AR7_PARTS
config MTD_BCM63XX_PARTS
tristate "BCM63XX CFE partitioning support"
- depends on BCM63XX
+ depends on BCM63XX || BMIPS_GENERIC || COMPILE_TEST
select CRC32
help
This provides partions parsing for BCM63xx devices with CFE
diff --git a/drivers/mtd/bcm47xxpart.c b/drivers/mtd/bcm47xxpart.c
index 8282f47bcf5d..845dd27d9f41 100644
--- a/drivers/mtd/bcm47xxpart.c
+++ b/drivers/mtd/bcm47xxpart.c
@@ -66,11 +66,13 @@ static const char *bcm47xxpart_trx_data_part_name(struct mtd_info *master,
{
uint32_t buf;
size_t bytes_read;
+ int err;
- if (mtd_read(master, offset, sizeof(buf), &bytes_read,
- (uint8_t *)&buf) < 0) {
- pr_err("mtd_read error while parsing (offset: 0x%X)!\n",
- offset);
+ err = mtd_read(master, offset, sizeof(buf), &bytes_read,
+ (uint8_t *)&buf);
+ if (err && !mtd_is_bitflip(err)) {
+ pr_err("mtd_read error while parsing (offset: 0x%X): %d\n",
+ offset, err);
goto out_default;
}
@@ -95,6 +97,7 @@ static int bcm47xxpart_parse(struct mtd_info *master,
int trx_part = -1;
int last_trx_part = -1;
int possible_nvram_sizes[] = { 0x8000, 0xF000, 0x10000, };
+ int err;
/*
* Some really old flashes (like AT45DB*) had smaller erasesize-s, but
@@ -118,8 +121,8 @@ static int bcm47xxpart_parse(struct mtd_info *master,
/* Parse block by block looking for magics */
for (offset = 0; offset <= master->size - blocksize;
offset += blocksize) {
- /* Nothing more in higher memory */
- if (offset >= 0x2000000)
+ /* Nothing more in higher memory on BCM47XX (MIPS) */
+ if (config_enabled(CONFIG_BCM47XX) && offset >= 0x2000000)
break;
if (curr_part >= BCM47XXPART_MAX_PARTS) {
@@ -128,10 +131,11 @@ static int bcm47xxpart_parse(struct mtd_info *master,
}
/* Read beginning of the block */
- if (mtd_read(master, offset, BCM47XXPART_BYTES_TO_READ,
- &bytes_read, (uint8_t *)buf) < 0) {
- pr_err("mtd_read error while parsing (offset: 0x%X)!\n",
- offset);
+ err = mtd_read(master, offset, BCM47XXPART_BYTES_TO_READ,
+ &bytes_read, (uint8_t *)buf);
+ if (err && !mtd_is_bitflip(err)) {
+ pr_err("mtd_read error while parsing (offset: 0x%X): %d\n",
+ offset, err);
continue;
}
@@ -254,10 +258,11 @@ static int bcm47xxpart_parse(struct mtd_info *master,
}
/* Read middle of the block */
- if (mtd_read(master, offset + 0x8000, 0x4,
- &bytes_read, (uint8_t *)buf) < 0) {
- pr_err("mtd_read error while parsing (offset: 0x%X)!\n",
- offset);
+ err = mtd_read(master, offset + 0x8000, 0x4, &bytes_read,
+ (uint8_t *)buf);
+ if (err && !mtd_is_bitflip(err)) {
+ pr_err("mtd_read error while parsing (offset: 0x%X): %d\n",
+ offset, err);
continue;
}
@@ -277,10 +282,11 @@ static int bcm47xxpart_parse(struct mtd_info *master,
}
offset = master->size - possible_nvram_sizes[i];
- if (mtd_read(master, offset, 0x4, &bytes_read,
- (uint8_t *)buf) < 0) {
- pr_err("mtd_read error while reading at offset 0x%X!\n",
- offset);
+ err = mtd_read(master, offset, 0x4, &bytes_read,
+ (uint8_t *)buf);
+ if (err && !mtd_is_bitflip(err)) {
+ pr_err("mtd_read error while reading (offset 0x%X): %d\n",
+ offset, err);
continue;
}
diff --git a/drivers/mtd/bcm63xxpart.c b/drivers/mtd/bcm63xxpart.c
index cec3188a170d..41d1d3149c61 100644
--- a/drivers/mtd/bcm63xxpart.c
+++ b/drivers/mtd/bcm63xxpart.c
@@ -24,6 +24,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/bcm963xx_nvram.h>
#include <linux/bcm963xx_tag.h>
#include <linux/crc32.h>
#include <linux/module.h>
@@ -34,12 +35,15 @@
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
-#include <asm/mach-bcm63xx/bcm63xx_nvram.h>
-#include <asm/mach-bcm63xx/board_bcm963xx.h>
+#define BCM963XX_CFE_BLOCK_SIZE SZ_64K /* always at least 64KiB */
-#define BCM63XX_CFE_BLOCK_SIZE SZ_64K /* always at least 64KiB */
+#define BCM963XX_CFE_MAGIC_OFFSET 0x4e0
+#define BCM963XX_CFE_VERSION_OFFSET 0x570
+#define BCM963XX_NVRAM_OFFSET 0x580
-#define BCM63XX_CFE_MAGIC_OFFSET 0x4e0
+/* Ensure strings read from flash structs are null terminated */
+#define STR_NULL_TERMINATE(x) \
+ do { char *_str = (x); _str[sizeof(x) - 1] = 0; } while (0)
static int bcm63xx_detect_cfe(struct mtd_info *master)
{
@@ -58,68 +62,130 @@ static int bcm63xx_detect_cfe(struct mtd_info *master)
return 0;
/* very old CFE's do not have the cfe-v string, so check for magic */
- ret = mtd_read(master, BCM63XX_CFE_MAGIC_OFFSET, 8, &retlen,
+ ret = mtd_read(master, BCM963XX_CFE_MAGIC_OFFSET, 8, &retlen,
(void *)buf);
buf[retlen] = 0;
return strncmp("CFE1CFE1", buf, 8);
}
-static int bcm63xx_parse_cfe_partitions(struct mtd_info *master,
- const struct mtd_partition **pparts,
- struct mtd_part_parser_data *data)
+static int bcm63xx_read_nvram(struct mtd_info *master,
+ struct bcm963xx_nvram *nvram)
+{
+ u32 actual_crc, expected_crc;
+ size_t retlen;
+ int ret;
+
+ /* extract nvram data */
+ ret = mtd_read(master, BCM963XX_NVRAM_OFFSET, BCM963XX_NVRAM_V5_SIZE,
+ &retlen, (void *)nvram);
+ if (ret)
+ return ret;
+
+ ret = bcm963xx_nvram_checksum(nvram, &expected_crc, &actual_crc);
+ if (ret)
+ pr_warn("nvram checksum failed, contents may be invalid (expected %08x, got %08x)\n",
+ expected_crc, actual_crc);
+
+ if (!nvram->psi_size)
+ nvram->psi_size = BCM963XX_DEFAULT_PSI_SIZE;
+
+ return 0;
+}
+
+static int bcm63xx_read_image_tag(struct mtd_info *master, const char *name,
+ loff_t tag_offset, struct bcm_tag *buf)
+{
+ int ret;
+ size_t retlen;
+ u32 computed_crc;
+
+ ret = mtd_read(master, tag_offset, sizeof(*buf), &retlen, (void *)buf);
+ if (ret)
+ return ret;
+
+ if (retlen != sizeof(*buf))
+ return -EIO;
+
+ computed_crc = crc32_le(IMAGETAG_CRC_START, (u8 *)buf,
+ offsetof(struct bcm_tag, header_crc));
+ if (computed_crc == buf->header_crc) {
+ STR_NULL_TERMINATE(buf->board_id);
+ STR_NULL_TERMINATE(buf->tag_version);
+
+ pr_info("%s: CFE image tag found at 0x%llx with version %s, board type %s\n",
+ name, tag_offset, buf->tag_version, buf->board_id);
+
+ return 0;
+ }
+
+ pr_warn("%s: CFE image tag at 0x%llx CRC invalid (expected %08x, actual %08x)\n",
+ name, tag_offset, buf->header_crc, computed_crc);
+ return 1;
+}
+
+static int bcm63xx_parse_cfe_nor_partitions(struct mtd_info *master,
+ const struct mtd_partition **pparts, struct bcm963xx_nvram *nvram)
{
/* CFE, NVRAM and global Linux are always present */
int nrparts = 3, curpart = 0;
- struct bcm_tag *buf;
+ struct bcm_tag *buf = NULL;
struct mtd_partition *parts;
int ret;
- size_t retlen;
unsigned int rootfsaddr, kerneladdr, spareaddr;
unsigned int rootfslen, kernellen, sparelen, totallen;
unsigned int cfelen, nvramlen;
unsigned int cfe_erasesize;
int i;
- u32 computed_crc;
bool rootfs_first = false;
- if (bcm63xx_detect_cfe(master))
- return -EINVAL;
-
cfe_erasesize = max_t(uint32_t, master->erasesize,
- BCM63XX_CFE_BLOCK_SIZE);
+ BCM963XX_CFE_BLOCK_SIZE);
cfelen = cfe_erasesize;
- nvramlen = bcm63xx_nvram_get_psi_size() * SZ_1K;
+ nvramlen = nvram->psi_size * SZ_1K;
nvramlen = roundup(nvramlen, cfe_erasesize);
- /* Allocate memory for buffer */
buf = vmalloc(sizeof(struct bcm_tag));
if (!buf)
return -ENOMEM;
/* Get the tag */
- ret = mtd_read(master, cfelen, sizeof(struct bcm_tag), &retlen,
- (void *)buf);
-
- if (retlen != sizeof(struct bcm_tag)) {
- vfree(buf);
- return -EIO;
- }
+ ret = bcm63xx_read_image_tag(master, "rootfs", cfelen, buf);
+ if (!ret) {
+ STR_NULL_TERMINATE(buf->flash_image_start);
+ if (kstrtouint(buf->flash_image_start, 10, &rootfsaddr) ||
+ rootfsaddr < BCM963XX_EXTENDED_SIZE) {
+ pr_err("invalid rootfs address: %*ph\n",
+ (int)sizeof(buf->flash_image_start),
+ buf->flash_image_start);
+ goto invalid_tag;
+ }
- computed_crc = crc32_le(IMAGETAG_CRC_START, (u8 *)buf,
- offsetof(struct bcm_tag, header_crc));
- if (computed_crc == buf->header_crc) {
- char *boardid = &(buf->board_id[0]);
- char *tagversion = &(buf->tag_version[0]);
+ STR_NULL_TERMINATE(buf->kernel_address);
+ if (kstrtouint(buf->kernel_address, 10, &kerneladdr) ||
+ kerneladdr < BCM963XX_EXTENDED_SIZE) {
+ pr_err("invalid kernel address: %*ph\n",
+ (int)sizeof(buf->kernel_address),
+ buf->kernel_address);
+ goto invalid_tag;
+ }
- sscanf(buf->flash_image_start, "%u", &rootfsaddr);
- sscanf(buf->kernel_address, "%u", &kerneladdr);
- sscanf(buf->kernel_length, "%u", &kernellen);
- sscanf(buf->total_length, "%u", &totallen);
+ STR_NULL_TERMINATE(buf->kernel_length);
+ if (kstrtouint(buf->kernel_length, 10, &kernellen)) {
+ pr_err("invalid kernel length: %*ph\n",
+ (int)sizeof(buf->kernel_length),
+ buf->kernel_length);
+ goto invalid_tag;
+ }
- pr_info("CFE boot tag found with version %s and board type %s\n",
- tagversion, boardid);
+ STR_NULL_TERMINATE(buf->total_length);
+ if (kstrtouint(buf->total_length, 10, &totallen)) {
+ pr_err("invalid total length: %*ph\n",
+ (int)sizeof(buf->total_length),
+ buf->total_length);
+ goto invalid_tag;
+ }
kerneladdr = kerneladdr - BCM963XX_EXTENDED_SIZE;
rootfsaddr = rootfsaddr - BCM963XX_EXTENDED_SIZE;
@@ -134,13 +200,14 @@ static int bcm63xx_parse_cfe_partitions(struct mtd_info *master,
rootfsaddr = kerneladdr + kernellen;
rootfslen = spareaddr - rootfsaddr;
}
- } else {
- pr_warn("CFE boot tag CRC invalid (expected %08x, actual %08x)\n",
- buf->header_crc, computed_crc);
+ } else if (ret > 0) {
+invalid_tag:
kernellen = 0;
rootfslen = 0;
rootfsaddr = 0;
spareaddr = cfelen;
+ } else {
+ goto out;
}
sparelen = master->size - spareaddr - nvramlen;
@@ -151,11 +218,10 @@ static int bcm63xx_parse_cfe_partitions(struct mtd_info *master,
if (kernellen > 0)
nrparts++;
- /* Ask kernel for more memory */
parts = kzalloc(sizeof(*parts) * nrparts + 10 * nrparts, GFP_KERNEL);
if (!parts) {
- vfree(buf);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto out;
}
/* Start building partition list */
@@ -206,9 +272,43 @@ static int bcm63xx_parse_cfe_partitions(struct mtd_info *master,
sparelen);
*pparts = parts;
+ ret = 0;
+
+out:
vfree(buf);
+ if (ret)
+ return ret;
+
return nrparts;
+}
+
+static int bcm63xx_parse_cfe_partitions(struct mtd_info *master,
+ const struct mtd_partition **pparts,
+ struct mtd_part_parser_data *data)
+{
+ struct bcm963xx_nvram *nvram = NULL;
+ int ret;
+
+ if (bcm63xx_detect_cfe(master))
+ return -EINVAL;
+
+ nvram = vzalloc(sizeof(*nvram));
+ if (!nvram)
+ return -ENOMEM;
+
+ ret = bcm63xx_read_nvram(master, nvram);
+ if (ret)
+ goto out;
+
+ if (!mtd_type_is_nand(master))
+ ret = bcm63xx_parse_cfe_nor_partitions(master, pparts, nvram);
+ else
+ ret = -EINVAL;
+
+out:
+ vfree(nvram);
+ return ret;
};
static struct mtd_part_parser bcm63xx_cfe_parser = {
diff --git a/drivers/mtd/devices/block2mtd.c b/drivers/mtd/devices/block2mtd.c
index e2c0057737e6..7c887f111a7d 100644
--- a/drivers/mtd/devices/block2mtd.c
+++ b/drivers/mtd/devices/block2mtd.c
@@ -75,7 +75,7 @@ static int _block2mtd_erase(struct block2mtd_dev *dev, loff_t to, size_t len)
break;
}
- page_cache_release(page);
+ put_page(page);
pages--;
index++;
}
@@ -124,7 +124,7 @@ static int block2mtd_read(struct mtd_info *mtd, loff_t from, size_t len,
return PTR_ERR(page);
memcpy(buf, page_address(page) + offset, cpylen);
- page_cache_release(page);
+ put_page(page);
if (retlen)
*retlen += cpylen;
@@ -164,7 +164,7 @@ static int _block2mtd_write(struct block2mtd_dev *dev, const u_char *buf,
unlock_page(page);
balance_dirty_pages_ratelimited(mapping);
}
- page_cache_release(page);
+ put_page(page);
if (retlen)
*retlen += cpylen;
diff --git a/drivers/mtd/devices/docg3.c b/drivers/mtd/devices/docg3.c
index c3a2695a4420..e7b2e439696c 100644
--- a/drivers/mtd/devices/docg3.c
+++ b/drivers/mtd/devices/docg3.c
@@ -72,13 +72,11 @@ MODULE_PARM_DESC(reliable_mode, "Set the docg3 mode (0=normal MLC, 1=fast, "
* @eccbytes: 8 bytes are used (1 for Hamming ECC, 7 for BCH ECC)
* @eccpos: ecc positions (byte 7 is Hamming ECC, byte 8-14 are BCH ECC)
* @oobfree: free pageinfo bytes (byte 0 until byte 6, byte 15
- * @oobavail: 8 available bytes remaining after ECC toll
*/
static struct nand_ecclayout docg3_oobinfo = {
.eccbytes = 8,
.eccpos = {7, 8, 9, 10, 11, 12, 13, 14},
.oobfree = {{0, 7}, {15, 1} },
- .oobavail = 8,
};
static inline u8 doc_readb(struct docg3 *docg3, u16 reg)
@@ -1438,7 +1436,7 @@ static int doc_write_oob(struct mtd_info *mtd, loff_t ofs,
oobdelta = mtd->oobsize;
break;
case MTD_OPS_AUTO_OOB:
- oobdelta = mtd->ecclayout->oobavail;
+ oobdelta = mtd->oobavail;
break;
default:
return -EINVAL;
@@ -1860,6 +1858,7 @@ static int __init doc_set_driver_info(int chip_id, struct mtd_info *mtd)
mtd->_write_oob = doc_write_oob;
mtd->_block_isbad = doc_block_isbad;
mtd->ecclayout = &docg3_oobinfo;
+ mtd->oobavail = 8;
mtd->ecc_strength = DOC_ECC_BCH_T;
return 0;
diff --git a/drivers/mtd/devices/mtdram.c b/drivers/mtd/devices/mtdram.c
index 627a9bc37679..cbd8547d7aad 100644
--- a/drivers/mtd/devices/mtdram.c
+++ b/drivers/mtd/devices/mtdram.c
@@ -19,6 +19,7 @@
static unsigned long total_size = CONFIG_MTDRAM_TOTAL_SIZE;
static unsigned long erase_size = CONFIG_MTDRAM_ERASE_SIZE;
+static unsigned long writebuf_size = 64;
#define MTDRAM_TOTAL_SIZE (total_size * 1024)
#define MTDRAM_ERASE_SIZE (erase_size * 1024)
@@ -27,6 +28,8 @@ module_param(total_size, ulong, 0);
MODULE_PARM_DESC(total_size, "Total device size in KiB");
module_param(erase_size, ulong, 0);
MODULE_PARM_DESC(erase_size, "Device erase block size in KiB");
+module_param(writebuf_size, ulong, 0);
+MODULE_PARM_DESC(writebuf_size, "Device write buf size in Bytes (Default: 64)");
#endif
// We could store these in the mtd structure, but we only support 1 device..
@@ -123,7 +126,7 @@ int mtdram_init_device(struct mtd_info *mtd, void *mapped_address,
mtd->flags = MTD_CAP_RAM;
mtd->size = size;
mtd->writesize = 1;
- mtd->writebufsize = 64; /* Mimic CFI NOR flashes */
+ mtd->writebufsize = writebuf_size;
mtd->erasesize = MTDRAM_ERASE_SIZE;
mtd->priv = mapped_address;
diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c
index 10bf304027dd..08de4b2cf0f5 100644
--- a/drivers/mtd/mtdpart.c
+++ b/drivers/mtd/mtdpart.c
@@ -126,10 +126,7 @@ static int part_read_oob(struct mtd_info *mtd, loff_t from,
if (ops->oobbuf) {
size_t len, pages;
- if (ops->mode == MTD_OPS_AUTO_OOB)
- len = mtd->oobavail;
- else
- len = mtd->oobsize;
+ len = mtd_oobavail(mtd, ops);
pages = mtd_div_by_ws(mtd->size, mtd);
pages -= mtd_div_by_ws(from, mtd);
if (ops->ooboffs + ops->ooblen > pages * len)
diff --git a/drivers/mtd/mtdswap.c b/drivers/mtd/mtdswap.c
index fc8b3d16cce7..cb06bdd21a1b 100644
--- a/drivers/mtd/mtdswap.c
+++ b/drivers/mtd/mtdswap.c
@@ -346,7 +346,7 @@ static int mtdswap_read_markers(struct mtdswap_dev *d, struct swap_eb *eb)
if (mtd_can_have_bb(d->mtd) && mtd_block_isbad(d->mtd, offset))
return MTDSWAP_SCANNED_BAD;
- ops.ooblen = 2 * d->mtd->ecclayout->oobavail;
+ ops.ooblen = 2 * d->mtd->oobavail;
ops.oobbuf = d->oob_buf;
ops.ooboffs = 0;
ops.datbuf = NULL;
@@ -359,7 +359,7 @@ static int mtdswap_read_markers(struct mtdswap_dev *d, struct swap_eb *eb)
data = (struct mtdswap_oobdata *)d->oob_buf;
data2 = (struct mtdswap_oobdata *)
- (d->oob_buf + d->mtd->ecclayout->oobavail);
+ (d->oob_buf + d->mtd->oobavail);
if (le16_to_cpu(data->magic) == MTDSWAP_MAGIC_CLEAN) {
eb->erase_count = le32_to_cpu(data->count);
@@ -933,7 +933,7 @@ static unsigned int mtdswap_eblk_passes(struct mtdswap_dev *d,
ops.mode = MTD_OPS_AUTO_OOB;
ops.len = mtd->writesize;
- ops.ooblen = mtd->ecclayout->oobavail;
+ ops.ooblen = mtd->oobavail;
ops.ooboffs = 0;
ops.datbuf = d->page_buf;
ops.oobbuf = d->oob_buf;
@@ -945,7 +945,7 @@ static unsigned int mtdswap_eblk_passes(struct mtdswap_dev *d,
for (i = 0; i < mtd_pages; i++) {
patt = mtdswap_test_patt(test + i);
memset(d->page_buf, patt, mtd->writesize);
- memset(d->oob_buf, patt, mtd->ecclayout->oobavail);
+ memset(d->oob_buf, patt, mtd->oobavail);
ret = mtd_write_oob(mtd, pos, &ops);
if (ret)
goto error;
@@ -964,7 +964,7 @@ static unsigned int mtdswap_eblk_passes(struct mtdswap_dev *d,
if (p1[j] != patt)
goto error;
- for (j = 0; j < mtd->ecclayout->oobavail; j++)
+ for (j = 0; j < mtd->oobavail; j++)
if (p2[j] != (unsigned char)patt)
goto error;
@@ -1387,7 +1387,7 @@ static int mtdswap_init(struct mtdswap_dev *d, unsigned int eblocks,
if (!d->page_buf)
goto page_buf_fail;
- d->oob_buf = kmalloc(2 * mtd->ecclayout->oobavail, GFP_KERNEL);
+ d->oob_buf = kmalloc(2 * mtd->oobavail, GFP_KERNEL);
if (!d->oob_buf)
goto oob_buf_fail;
@@ -1417,7 +1417,6 @@ static void mtdswap_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
unsigned long part;
unsigned int eblocks, eavailable, bad_blocks, spare_cnt;
uint64_t swap_size, use_size, size_limit;
- struct nand_ecclayout *oinfo;
int ret;
parts = &partitions[0];
@@ -1447,17 +1446,10 @@ static void mtdswap_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
return;
}
- oinfo = mtd->ecclayout;
- if (!oinfo) {
- printk(KERN_ERR "%s: mtd%d does not have OOB\n",
- MTDSWAP_PREFIX, mtd->index);
- return;
- }
-
- if (!mtd->oobsize || oinfo->oobavail < MTDSWAP_OOBSIZE) {
+ if (!mtd->oobsize || mtd->oobavail < MTDSWAP_OOBSIZE) {
printk(KERN_ERR "%s: Not enough free bytes in OOB, "
"%d available, %zu needed.\n",
- MTDSWAP_PREFIX, oinfo->oobavail, MTDSWAP_OOBSIZE);
+ MTDSWAP_PREFIX, mtd->oobavail, MTDSWAP_OOBSIZE);
return;
}
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index 20f01b3ec23d..f05e0e9eb2f7 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -74,6 +74,7 @@ config MTD_NAND_DENALI_SCRATCH_REG_ADDR
config MTD_NAND_GPIO
tristate "GPIO assisted NAND Flash driver"
depends on GPIOLIB || COMPILE_TEST
+ depends on HAS_IOMEM
help
This enables a NAND flash driver where control signals are
connected to GPIO pins, and commands and data are communicated
@@ -310,6 +311,7 @@ config MTD_NAND_CAFE
config MTD_NAND_CS553X
tristate "NAND support for CS5535/CS5536 (AMD Geode companion chip)"
depends on X86_32
+ depends on !UML && HAS_IOMEM
help
The CS553x companion chips for the AMD Geode processor
include NAND flash controllers with built-in hardware ECC
@@ -463,6 +465,7 @@ config MTD_NAND_MPC5121_NFC
config MTD_NAND_VF610_NFC
tristate "Support for Freescale NFC for VF610/MPC5125"
depends on (SOC_VF610 || COMPILE_TEST)
+ depends on HAS_IOMEM
help
Enables support for NAND Flash Controller on some Freescale
processors like the VF610, MPC5125, MCF54418 or Kinetis K70.
@@ -553,4 +556,11 @@ config MTD_NAND_HISI504
help
Enables support for NAND controller on Hisilicon SoC Hip04.
+config MTD_NAND_QCOM
+ tristate "Support for NAND on QCOM SoCs"
+ depends on ARCH_QCOM
+ help
+ Enables support for NAND flash chips on SoCs containing the EBI2 NAND
+ controller. This controller is found on IPQ806x SoC.
+
endif # MTD_NAND
diff --git a/drivers/mtd/nand/Makefile b/drivers/mtd/nand/Makefile
index 9e3623308509..f55335373f7c 100644
--- a/drivers/mtd/nand/Makefile
+++ b/drivers/mtd/nand/Makefile
@@ -56,5 +56,6 @@ obj-$(CONFIG_MTD_NAND_BCM47XXNFLASH) += bcm47xxnflash/
obj-$(CONFIG_MTD_NAND_SUNXI) += sunxi_nand.o
obj-$(CONFIG_MTD_NAND_HISI504) += hisi504_nand.o
obj-$(CONFIG_MTD_NAND_BRCMNAND) += brcmnand/
+obj-$(CONFIG_MTD_NAND_QCOM) += qcom_nandc.o
nand-objs := nand_base.o nand_bbt.o nand_timings.o
diff --git a/drivers/mtd/nand/atmel_nand.c b/drivers/mtd/nand/atmel_nand.c
index bddcf83d6859..20cbaabb2959 100644
--- a/drivers/mtd/nand/atmel_nand.c
+++ b/drivers/mtd/nand/atmel_nand.c
@@ -65,6 +65,11 @@ module_param(on_flash_bbt, int, 0);
struct atmel_nand_caps {
bool pmecc_correct_erase_page;
+ uint8_t pmecc_max_correction;
+};
+
+struct atmel_nand_nfc_caps {
+ uint32_t rb_mask;
};
/* oob layout for large page size
@@ -111,6 +116,7 @@ struct atmel_nfc {
/* Point to the sram bank which include readed data via NFC */
void *data_in_sram;
bool will_write_sram;
+ const struct atmel_nand_nfc_caps *caps;
};
static struct atmel_nfc nand_nfc;
@@ -140,6 +146,7 @@ struct atmel_nand_host {
int pmecc_cw_len; /* Length of codeword */
void __iomem *pmerrloc_base;
+ void __iomem *pmerrloc_el_base;
void __iomem *pmecc_rom_base;
/* lookup table for alpha_to and index_of */
@@ -468,6 +475,7 @@ static void atmel_write_buf(struct mtd_info *mtd, const u8 *buf, int len)
* 8-bits 13-bytes 14-bytes
* 12-bits 20-bytes 21-bytes
* 24-bits 39-bytes 42-bytes
+ * 32-bits 52-bytes 56-bytes
*/
static int pmecc_get_ecc_bytes(int cap, int sector_size)
{
@@ -813,7 +821,7 @@ static void pmecc_correct_data(struct mtd_info *mtd, uint8_t *buf, uint8_t *ecc,
sector_size = host->pmecc_sector_size;
while (err_nbr) {
- tmp = pmerrloc_readl_el_relaxed(host->pmerrloc_base, i) - 1;
+ tmp = pmerrloc_readl_el_relaxed(host->pmerrloc_el_base, i) - 1;
byte_pos = tmp / 8;
bit_pos = tmp % 8;
@@ -825,7 +833,7 @@ static void pmecc_correct_data(struct mtd_info *mtd, uint8_t *buf, uint8_t *ecc,
*(buf + byte_pos) ^= (1 << bit_pos);
pos = sector_num * host->pmecc_sector_size + byte_pos;
- dev_info(host->dev, "Bit flip in data area, byte_pos: %d, bit_pos: %d, 0x%02x -> 0x%02x\n",
+ dev_dbg(host->dev, "Bit flip in data area, byte_pos: %d, bit_pos: %d, 0x%02x -> 0x%02x\n",
pos, bit_pos, err_byte, *(buf + byte_pos));
} else {
/* Bit flip in OOB area */
@@ -835,7 +843,7 @@ static void pmecc_correct_data(struct mtd_info *mtd, uint8_t *buf, uint8_t *ecc,
ecc[tmp] ^= (1 << bit_pos);
pos = tmp + nand_chip->ecc.layout->eccpos[0];
- dev_info(host->dev, "Bit flip in OOB, oob_byte_pos: %d, bit_pos: %d, 0x%02x -> 0x%02x\n",
+ dev_dbg(host->dev, "Bit flip in OOB, oob_byte_pos: %d, bit_pos: %d, 0x%02x -> 0x%02x\n",
pos, bit_pos, err_byte, ecc[tmp]);
}
@@ -1017,6 +1025,9 @@ static void atmel_pmecc_core_init(struct mtd_info *mtd)
case 24:
val = PMECC_CFG_BCH_ERR24;
break;
+ case 32:
+ val = PMECC_CFG_BCH_ERR32;
+ break;
}
if (host->pmecc_sector_size == 512)
@@ -1078,6 +1089,9 @@ static int pmecc_choose_ecc(struct atmel_nand_host *host,
/* If device tree doesn't specify, use NAND's minimum ECC parameters */
if (host->pmecc_corr_cap == 0) {
+ if (*cap > host->caps->pmecc_max_correction)
+ return -EINVAL;
+
/* use the most fitable ecc bits (the near bigger one ) */
if (*cap <= 2)
host->pmecc_corr_cap = 2;
@@ -1089,6 +1103,8 @@ static int pmecc_choose_ecc(struct atmel_nand_host *host,
host->pmecc_corr_cap = 12;
else if (*cap <= 24)
host->pmecc_corr_cap = 24;
+ else if (*cap <= 32)
+ host->pmecc_corr_cap = 32;
else
return -EINVAL;
}
@@ -1205,6 +1221,8 @@ static int atmel_pmecc_nand_init_params(struct platform_device *pdev,
err_no = PTR_ERR(host->pmerrloc_base);
goto err;
}
+ host->pmerrloc_el_base = host->pmerrloc_base + ATMEL_PMERRLOC_SIGMAx +
+ (host->caps->pmecc_max_correction + 1) * 4;
if (!host->has_no_lookup_table) {
regs_rom = platform_get_resource(pdev, IORESOURCE_MEM, 3);
@@ -1486,8 +1504,6 @@ static void atmel_nand_hwctl(struct mtd_info *mtd, int mode)
ecc_writel(host->ecc, CR, ATMEL_ECC_RST);
}
-static const struct of_device_id atmel_nand_dt_ids[];
-
static int atmel_of_init_port(struct atmel_nand_host *host,
struct device_node *np)
{
@@ -1498,7 +1514,7 @@ static int atmel_of_init_port(struct atmel_nand_host *host,
enum of_gpio_flags flags = 0;
host->caps = (struct atmel_nand_caps *)
- of_match_device(atmel_nand_dt_ids, host->dev)->data;
+ of_device_get_match_data(host->dev);
if (of_property_read_u32(np, "atmel,nand-addr-offset", &val) == 0) {
if (val >= 32) {
@@ -1547,10 +1563,16 @@ static int atmel_of_init_port(struct atmel_nand_host *host,
* them from NAND ONFI parameters.
*/
if (of_property_read_u32(np, "atmel,pmecc-cap", &val) == 0) {
- if ((val != 2) && (val != 4) && (val != 8) && (val != 12) &&
- (val != 24)) {
+ if (val > host->caps->pmecc_max_correction) {
dev_err(host->dev,
- "Unsupported PMECC correction capability: %d; should be 2, 4, 8, 12 or 24\n",
+ "Required ECC strength too high: %u max %u\n",
+ val, host->caps->pmecc_max_correction);
+ return -EINVAL;
+ }
+ if ((val != 2) && (val != 4) && (val != 8) &&
+ (val != 12) && (val != 24) && (val != 32)) {
+ dev_err(host->dev,
+ "Required ECC strength not supported: %u\n",
val);
return -EINVAL;
}
@@ -1560,7 +1582,7 @@ static int atmel_of_init_port(struct atmel_nand_host *host,
if (of_property_read_u32(np, "atmel,pmecc-sector-size", &val) == 0) {
if ((val != 512) && (val != 1024)) {
dev_err(host->dev,
- "Unsupported PMECC sector size: %d; should be 512 or 1024 bytes\n",
+ "Required ECC sector size not supported: %u\n",
val);
return -EINVAL;
}
@@ -1677,9 +1699,9 @@ static irqreturn_t hsmc_interrupt(int irq, void *dev_id)
nfc_writel(host->nfc->hsmc_regs, IDR, NFC_SR_XFR_DONE);
ret = IRQ_HANDLED;
}
- if (pending & NFC_SR_RB_EDGE) {
+ if (pending & host->nfc->caps->rb_mask) {
complete(&host->nfc->comp_ready);
- nfc_writel(host->nfc->hsmc_regs, IDR, NFC_SR_RB_EDGE);
+ nfc_writel(host->nfc->hsmc_regs, IDR, host->nfc->caps->rb_mask);
ret = IRQ_HANDLED;
}
if (pending & NFC_SR_CMD_DONE) {
@@ -1697,7 +1719,7 @@ static void nfc_prepare_interrupt(struct atmel_nand_host *host, u32 flag)
if (flag & NFC_SR_XFR_DONE)
init_completion(&host->nfc->comp_xfer_done);
- if (flag & NFC_SR_RB_EDGE)
+ if (flag & host->nfc->caps->rb_mask)
init_completion(&host->nfc->comp_ready);
if (flag & NFC_SR_CMD_DONE)
@@ -1715,7 +1737,7 @@ static int nfc_wait_interrupt(struct atmel_nand_host *host, u32 flag)
if (flag & NFC_SR_XFR_DONE)
comp[index++] = &host->nfc->comp_xfer_done;
- if (flag & NFC_SR_RB_EDGE)
+ if (flag & host->nfc->caps->rb_mask)
comp[index++] = &host->nfc->comp_ready;
if (flag & NFC_SR_CMD_DONE)
@@ -1783,7 +1805,7 @@ static int nfc_device_ready(struct mtd_info *mtd)
dev_err(host->dev, "Lost the interrupt flags: 0x%08x\n",
mask & status);
- return status & NFC_SR_RB_EDGE;
+ return status & host->nfc->caps->rb_mask;
}
static void nfc_select_chip(struct mtd_info *mtd, int chip)
@@ -1956,8 +1978,8 @@ static void nfc_nand_command(struct mtd_info *mtd, unsigned int command,
}
/* fall through */
default:
- nfc_prepare_interrupt(host, NFC_SR_RB_EDGE);
- nfc_wait_interrupt(host, NFC_SR_RB_EDGE);
+ nfc_prepare_interrupt(host, host->nfc->caps->rb_mask);
+ nfc_wait_interrupt(host, host->nfc->caps->rb_mask);
}
}
@@ -2304,17 +2326,34 @@ static int atmel_nand_remove(struct platform_device *pdev)
return 0;
}
+/*
+ * AT91RM9200 does not have PMECC or PMECC Errloc peripherals for
+ * BCH ECC. Combined with the "atmel,has-pmecc", it is used to describe
+ * devices from the SAM9 family that have those.
+ */
static const struct atmel_nand_caps at91rm9200_caps = {
.pmecc_correct_erase_page = false,
+ .pmecc_max_correction = 24,
};
static const struct atmel_nand_caps sama5d4_caps = {
.pmecc_correct_erase_page = true,
+ .pmecc_max_correction = 24,
+};
+
+/*
+ * The PMECC Errloc controller starting in SAMA5D2 is not compatible,
+ * as the increased correction strength requires more registers.
+ */
+static const struct atmel_nand_caps sama5d2_caps = {
+ .pmecc_correct_erase_page = true,
+ .pmecc_max_correction = 32,
};
static const struct of_device_id atmel_nand_dt_ids[] = {
{ .compatible = "atmel,at91rm9200-nand", .data = &at91rm9200_caps },
{ .compatible = "atmel,sama5d4-nand", .data = &sama5d4_caps },
+ { .compatible = "atmel,sama5d2-nand", .data = &sama5d2_caps },
{ /* sentinel */ }
};
@@ -2354,6 +2393,11 @@ static int atmel_nand_nfc_probe(struct platform_device *pdev)
}
}
+ nfc->caps = (const struct atmel_nand_nfc_caps *)
+ of_device_get_match_data(&pdev->dev);
+ if (!nfc->caps)
+ return -ENODEV;
+
nfc_writel(nfc->hsmc_regs, IDR, 0xffffffff);
nfc_readl(nfc->hsmc_regs, SR); /* clear the NFC_SR */
@@ -2382,8 +2426,17 @@ static int atmel_nand_nfc_remove(struct platform_device *pdev)
return 0;
}
+static const struct atmel_nand_nfc_caps sama5d3_nfc_caps = {
+ .rb_mask = NFC_SR_RB_EDGE0,
+};
+
+static const struct atmel_nand_nfc_caps sama5d4_nfc_caps = {
+ .rb_mask = NFC_SR_RB_EDGE3,
+};
+
static const struct of_device_id atmel_nand_nfc_match[] = {
- { .compatible = "atmel,sama5d3-nfc" },
+ { .compatible = "atmel,sama5d3-nfc", .data = &sama5d3_nfc_caps },
+ { .compatible = "atmel,sama5d4-nfc", .data = &sama5d4_nfc_caps },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, atmel_nand_nfc_match);
diff --git a/drivers/mtd/nand/atmel_nand_ecc.h b/drivers/mtd/nand/atmel_nand_ecc.h
index 668e7358f19b..834d694487bd 100644
--- a/drivers/mtd/nand/atmel_nand_ecc.h
+++ b/drivers/mtd/nand/atmel_nand_ecc.h
@@ -43,6 +43,7 @@
#define PMECC_CFG_BCH_ERR8 (2 << 0)
#define PMECC_CFG_BCH_ERR12 (3 << 0)
#define PMECC_CFG_BCH_ERR24 (4 << 0)
+#define PMECC_CFG_BCH_ERR32 (5 << 0)
#define PMECC_CFG_SECTOR512 (0 << 4)
#define PMECC_CFG_SECTOR1024 (1 << 4)
@@ -108,7 +109,11 @@
#define PMERRLOC_ERR_NUM_MASK (0x1f << 8)
#define PMERRLOC_CALC_DONE (1 << 0)
#define ATMEL_PMERRLOC_SIGMAx 0x028 /* Error location SIGMA x */
-#define ATMEL_PMERRLOC_ELx 0x08c /* Error location x */
+
+/*
+ * The ATMEL_PMERRLOC_ELx register location depends from the number of
+ * bits corrected by the PMECC controller. Do not use it.
+ */
/* Register access macros for PMECC */
#define pmecc_readl_relaxed(addr, reg) \
@@ -136,7 +141,7 @@
readl_relaxed((addr) + ATMEL_PMERRLOC_SIGMAx + ((n) * 4))
#define pmerrloc_readl_el_relaxed(addr, n) \
- readl_relaxed((addr) + ATMEL_PMERRLOC_ELx + ((n) * 4))
+ readl_relaxed((addr) + ((n) * 4))
/* Galois field dimension */
#define PMECC_GF_DIMENSION_13 13
diff --git a/drivers/mtd/nand/atmel_nand_nfc.h b/drivers/mtd/nand/atmel_nand_nfc.h
index 4d5d26221a7e..0bbc1fa97dba 100644
--- a/drivers/mtd/nand/atmel_nand_nfc.h
+++ b/drivers/mtd/nand/atmel_nand_nfc.h
@@ -42,7 +42,8 @@
#define NFC_SR_UNDEF (1 << 21)
#define NFC_SR_AWB (1 << 22)
#define NFC_SR_ASE (1 << 23)
-#define NFC_SR_RB_EDGE (1 << 24)
+#define NFC_SR_RB_EDGE0 (1 << 24)
+#define NFC_SR_RB_EDGE3 (1 << 27)
#define ATMEL_HSMC_NFC_IER 0x0c
#define ATMEL_HSMC_NFC_IDR 0x10
diff --git a/drivers/mtd/nand/brcmnand/brcmnand.c b/drivers/mtd/nand/brcmnand/brcmnand.c
index 844fc07d22cd..e0528397306a 100644
--- a/drivers/mtd/nand/brcmnand/brcmnand.c
+++ b/drivers/mtd/nand/brcmnand/brcmnand.c
@@ -311,6 +311,36 @@ static const u16 brcmnand_regs_v60[] = {
[BRCMNAND_FC_BASE] = 0x400,
};
+/* BRCMNAND v7.1 */
+static const u16 brcmnand_regs_v71[] = {
+ [BRCMNAND_CMD_START] = 0x04,
+ [BRCMNAND_CMD_EXT_ADDRESS] = 0x08,
+ [BRCMNAND_CMD_ADDRESS] = 0x0c,
+ [BRCMNAND_INTFC_STATUS] = 0x14,
+ [BRCMNAND_CS_SELECT] = 0x18,
+ [BRCMNAND_CS_XOR] = 0x1c,
+ [BRCMNAND_LL_OP] = 0x20,
+ [BRCMNAND_CS0_BASE] = 0x50,
+ [BRCMNAND_CS1_BASE] = 0,
+ [BRCMNAND_CORR_THRESHOLD] = 0xdc,
+ [BRCMNAND_CORR_THRESHOLD_EXT] = 0xe0,
+ [BRCMNAND_UNCORR_COUNT] = 0xfc,
+ [BRCMNAND_CORR_COUNT] = 0x100,
+ [BRCMNAND_CORR_EXT_ADDR] = 0x10c,
+ [BRCMNAND_CORR_ADDR] = 0x110,
+ [BRCMNAND_UNCORR_EXT_ADDR] = 0x114,
+ [BRCMNAND_UNCORR_ADDR] = 0x118,
+ [BRCMNAND_SEMAPHORE] = 0x150,
+ [BRCMNAND_ID] = 0x194,
+ [BRCMNAND_ID_EXT] = 0x198,
+ [BRCMNAND_LL_RDATA] = 0x19c,
+ [BRCMNAND_OOB_READ_BASE] = 0x200,
+ [BRCMNAND_OOB_READ_10_BASE] = 0,
+ [BRCMNAND_OOB_WRITE_BASE] = 0x280,
+ [BRCMNAND_OOB_WRITE_10_BASE] = 0,
+ [BRCMNAND_FC_BASE] = 0x400,
+};
+
enum brcmnand_cs_reg {
BRCMNAND_CS_CFG_EXT = 0,
BRCMNAND_CS_CFG,
@@ -406,7 +436,9 @@ static int brcmnand_revision_init(struct brcmnand_controller *ctrl)
}
/* Register offsets */
- if (ctrl->nand_version >= 0x0600)
+ if (ctrl->nand_version >= 0x0701)
+ ctrl->reg_offsets = brcmnand_regs_v71;
+ else if (ctrl->nand_version >= 0x0600)
ctrl->reg_offsets = brcmnand_regs_v60;
else if (ctrl->nand_version >= 0x0500)
ctrl->reg_offsets = brcmnand_regs_v50;
@@ -796,7 +828,8 @@ static struct nand_ecclayout *brcmnand_create_layout(int ecc_level,
idx2 >= MTD_MAX_OOBFREE_ENTRIES_LARGE - 1)
break;
}
- goto out;
+
+ return layout;
}
/*
@@ -847,10 +880,7 @@ static struct nand_ecclayout *brcmnand_create_layout(int ecc_level,
idx2 >= MTD_MAX_OOBFREE_ENTRIES_LARGE - 1)
break;
}
-out:
- /* Sum available OOB */
- for (i = 0; i < MTD_MAX_OOBFREE_ENTRIES_LARGE; i++)
- layout->oobavail += layout->oobfree[i].length;
+
return layout;
}
diff --git a/drivers/mtd/nand/cafe_nand.c b/drivers/mtd/nand/cafe_nand.c
index aa1a616b9fb6..e553aff68987 100644
--- a/drivers/mtd/nand/cafe_nand.c
+++ b/drivers/mtd/nand/cafe_nand.c
@@ -537,7 +537,7 @@ static int cafe_nand_write_page_lowlevel(struct mtd_info *mtd,
return 0;
}
-static int cafe_nand_block_bad(struct mtd_info *mtd, loff_t ofs, int getchip)
+static int cafe_nand_block_bad(struct mtd_info *mtd, loff_t ofs)
{
return 0;
}
diff --git a/drivers/mtd/nand/diskonchip.c b/drivers/mtd/nand/diskonchip.c
index f170f3c31b34..547c1002941d 100644
--- a/drivers/mtd/nand/diskonchip.c
+++ b/drivers/mtd/nand/diskonchip.c
@@ -794,7 +794,7 @@ static int doc200x_dev_ready(struct mtd_info *mtd)
}
}
-static int doc200x_block_bad(struct mtd_info *mtd, loff_t ofs, int getchip)
+static int doc200x_block_bad(struct mtd_info *mtd, loff_t ofs)
{
/* This is our last resort if we couldn't find or create a BBT. Just
pretend all blocks are good. */
diff --git a/drivers/mtd/nand/docg4.c b/drivers/mtd/nand/docg4.c
index df4165b02c62..d86a60e1bbcb 100644
--- a/drivers/mtd/nand/docg4.c
+++ b/drivers/mtd/nand/docg4.c
@@ -225,7 +225,6 @@ struct docg4_priv {
static struct nand_ecclayout docg4_oobinfo = {
.eccbytes = 9,
.eccpos = {7, 8, 9, 10, 11, 12, 13, 14, 15},
- .oobavail = 5,
.oobfree = { {.offset = 2, .length = 5} }
};
@@ -1121,7 +1120,7 @@ static int docg4_block_markbad(struct mtd_info *mtd, loff_t ofs)
return ret;
}
-static int docg4_block_neverbad(struct mtd_info *mtd, loff_t ofs, int getchip)
+static int docg4_block_neverbad(struct mtd_info *mtd, loff_t ofs)
{
/* only called when module_param ignore_badblocks is set */
return 0;
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
index 235ddcb58f39..8122c699ccf2 100644
--- a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
+++ b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
@@ -1,7 +1,7 @@
/*
* Freescale GPMI NAND Flash Driver
*
- * Copyright (C) 2010-2011 Freescale Semiconductor, Inc.
+ * Copyright (C) 2010-2015 Freescale Semiconductor, Inc.
* Copyright (C) 2008 Embedded Alley Solutions, Inc.
*
* This program is free software; you can redistribute it and/or modify
@@ -136,7 +136,7 @@ static inline bool gpmi_check_ecc(struct gpmi_nand_data *this)
*
* We may have available oob space in this case.
*/
-static bool set_geometry_by_ecc_info(struct gpmi_nand_data *this)
+static int set_geometry_by_ecc_info(struct gpmi_nand_data *this)
{
struct bch_geometry *geo = &this->bch_geometry;
struct nand_chip *chip = &this->nand;
@@ -145,7 +145,7 @@ static bool set_geometry_by_ecc_info(struct gpmi_nand_data *this)
unsigned int block_mark_bit_offset;
if (!(chip->ecc_strength_ds > 0 && chip->ecc_step_ds > 0))
- return false;
+ return -EINVAL;
switch (chip->ecc_step_ds) {
case SZ_512:
@@ -158,19 +158,19 @@ static bool set_geometry_by_ecc_info(struct gpmi_nand_data *this)
dev_err(this->dev,
"unsupported nand chip. ecc bits : %d, ecc size : %d\n",
chip->ecc_strength_ds, chip->ecc_step_ds);
- return false;
+ return -EINVAL;
}
geo->ecc_chunk_size = chip->ecc_step_ds;
geo->ecc_strength = round_up(chip->ecc_strength_ds, 2);
if (!gpmi_check_ecc(this))
- return false;
+ return -EINVAL;
/* Keep the C >= O */
if (geo->ecc_chunk_size < mtd->oobsize) {
dev_err(this->dev,
"unsupported nand chip. ecc size: %d, oob size : %d\n",
chip->ecc_step_ds, mtd->oobsize);
- return false;
+ return -EINVAL;
}
/* The default value, see comment in the legacy_set_geometry(). */
@@ -242,7 +242,7 @@ static bool set_geometry_by_ecc_info(struct gpmi_nand_data *this)
+ ALIGN(geo->ecc_chunk_count, 4);
if (!this->swap_block_mark)
- return true;
+ return 0;
/* For bit swap. */
block_mark_bit_offset = mtd->writesize * 8 -
@@ -251,7 +251,7 @@ static bool set_geometry_by_ecc_info(struct gpmi_nand_data *this)
geo->block_mark_byte_offset = block_mark_bit_offset / 8;
geo->block_mark_bit_offset = block_mark_bit_offset % 8;
- return true;
+ return 0;
}
static int legacy_set_geometry(struct gpmi_nand_data *this)
@@ -285,7 +285,8 @@ static int legacy_set_geometry(struct gpmi_nand_data *this)
geo->ecc_strength = get_ecc_strength(this);
if (!gpmi_check_ecc(this)) {
dev_err(this->dev,
- "required ecc strength of the NAND chip: %d is not supported by the GPMI controller (%d)\n",
+ "ecc strength: %d cannot be supported by the controller (%d)\n"
+ "try to use minimum ecc strength that NAND chip required\n",
geo->ecc_strength,
this->devdata->bch_max_ecc_strength);
return -EINVAL;
@@ -366,10 +367,11 @@ static int legacy_set_geometry(struct gpmi_nand_data *this)
int common_nfc_set_geometry(struct gpmi_nand_data *this)
{
- if (of_property_read_bool(this->dev->of_node, "fsl,use-minimum-ecc")
- && set_geometry_by_ecc_info(this))
- return 0;
- return legacy_set_geometry(this);
+ if ((of_property_read_bool(this->dev->of_node, "fsl,use-minimum-ecc"))
+ || legacy_set_geometry(this))
+ return set_geometry_by_ecc_info(this);
+
+ return 0;
}
struct dma_chan *get_dma_chan(struct gpmi_nand_data *this)
@@ -2033,9 +2035,54 @@ static int gpmi_nand_remove(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_PM_SLEEP
+static int gpmi_pm_suspend(struct device *dev)
+{
+ struct gpmi_nand_data *this = dev_get_drvdata(dev);
+
+ release_dma_channels(this);
+ return 0;
+}
+
+static int gpmi_pm_resume(struct device *dev)
+{
+ struct gpmi_nand_data *this = dev_get_drvdata(dev);
+ int ret;
+
+ ret = acquire_dma_channels(this);
+ if (ret < 0)
+ return ret;
+
+ /* re-init the GPMI registers */
+ this->flags &= ~GPMI_TIMING_INIT_OK;
+ ret = gpmi_init(this);
+ if (ret) {
+ dev_err(this->dev, "Error setting GPMI : %d\n", ret);
+ return ret;
+ }
+
+ /* re-init the BCH registers */
+ ret = bch_set_geometry(this);
+ if (ret) {
+ dev_err(this->dev, "Error setting BCH : %d\n", ret);
+ return ret;
+ }
+
+ /* re-init others */
+ gpmi_extra_init(this);
+
+ return 0;
+}
+#endif /* CONFIG_PM_SLEEP */
+
+static const struct dev_pm_ops gpmi_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(gpmi_pm_suspend, gpmi_pm_resume)
+};
+
static struct platform_driver gpmi_nand_driver = {
.driver = {
.name = "gpmi-nand",
+ .pm = &gpmi_pm_ops,
.of_match_table = gpmi_nand_id_table,
},
.probe = gpmi_nand_probe,
diff --git a/drivers/mtd/nand/hisi504_nand.c b/drivers/mtd/nand/hisi504_nand.c
index f8d37f36a81c..96502b624cfb 100644
--- a/drivers/mtd/nand/hisi504_nand.c
+++ b/drivers/mtd/nand/hisi504_nand.c
@@ -632,7 +632,6 @@ static void hisi_nfc_host_init(struct hinfc_host *host)
}
static struct nand_ecclayout nand_ecc_2K_16bits = {
- .oobavail = 6,
.oobfree = { {2, 6} },
};
diff --git a/drivers/mtd/nand/jz4740_nand.c b/drivers/mtd/nand/jz4740_nand.c
index b19d2a9a5eb9..673ceb2a0b44 100644
--- a/drivers/mtd/nand/jz4740_nand.c
+++ b/drivers/mtd/nand/jz4740_nand.c
@@ -427,9 +427,6 @@ static int jz_nand_probe(struct platform_device *pdev)
chip->ecc.strength = 4;
chip->ecc.options = NAND_ECC_GENERIC_ERASED_CHECK;
- if (pdata)
- chip->ecc.layout = pdata->ecc_layout;
-
chip->chip_delay = 50;
chip->cmd_ctrl = jz_nand_cmd_ctrl;
chip->select_chip = jz_nand_select_chip;
diff --git a/drivers/mtd/nand/lpc32xx_mlc.c b/drivers/mtd/nand/lpc32xx_mlc.c
index 9bc435d72a86..d8c3e7afcc0b 100644
--- a/drivers/mtd/nand/lpc32xx_mlc.c
+++ b/drivers/mtd/nand/lpc32xx_mlc.c
@@ -750,7 +750,7 @@ static int lpc32xx_nand_probe(struct platform_device *pdev)
}
nand_chip->ecc.mode = NAND_ECC_HW;
- nand_chip->ecc.size = mtd->writesize;
+ nand_chip->ecc.size = 512;
nand_chip->ecc.layout = &lpc32xx_nand_oob;
host->mlcsubpages = mtd->writesize / 512;
diff --git a/drivers/mtd/nand/mpc5121_nfc.c b/drivers/mtd/nand/mpc5121_nfc.c
index 6b93e899d4e9..5d7843ffff6a 100644
--- a/drivers/mtd/nand/mpc5121_nfc.c
+++ b/drivers/mtd/nand/mpc5121_nfc.c
@@ -626,7 +626,7 @@ static void mpc5121_nfc_free(struct device *dev, struct mtd_info *mtd)
static int mpc5121_nfc_probe(struct platform_device *op)
{
- struct device_node *rootnode, *dn = op->dev.of_node;
+ struct device_node *dn = op->dev.of_node;
struct clk *clk;
struct device *dev = &op->dev;
struct mpc5121_nfc_prv *prv;
@@ -712,18 +712,15 @@ static int mpc5121_nfc_probe(struct platform_device *op)
chip->ecc.mode = NAND_ECC_SOFT;
/* Support external chip-select logic on ADS5121 board */
- rootnode = of_find_node_by_path("/");
- if (of_device_is_compatible(rootnode, "fsl,mpc5121ads")) {
+ if (of_machine_is_compatible("fsl,mpc5121ads")) {
retval = ads5121_chipselect_init(mtd);
if (retval) {
dev_err(dev, "Chipselect init error!\n");
- of_node_put(rootnode);
return retval;
}
chip->select_chip = ads5121_select_chip;
}
- of_node_put(rootnode);
/* Enable NFC clock */
clk = devm_clk_get(dev, "ipg");
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index f2c8ff398d6c..557b8462f55e 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -313,13 +313,12 @@ static void nand_read_buf16(struct mtd_info *mtd, uint8_t *buf, int len)
* nand_block_bad - [DEFAULT] Read bad block marker from the chip
* @mtd: MTD device structure
* @ofs: offset from device start
- * @getchip: 0, if the chip is already selected
*
* Check, if the block is bad.
*/
-static int nand_block_bad(struct mtd_info *mtd, loff_t ofs, int getchip)
+static int nand_block_bad(struct mtd_info *mtd, loff_t ofs)
{
- int page, chipnr, res = 0, i = 0;
+ int page, res = 0, i = 0;
struct nand_chip *chip = mtd_to_nand(mtd);
u16 bad;
@@ -328,15 +327,6 @@ static int nand_block_bad(struct mtd_info *mtd, loff_t ofs, int getchip)
page = (int)(ofs >> chip->page_shift) & chip->pagemask;
- if (getchip) {
- chipnr = (int)(ofs >> chip->chip_shift);
-
- nand_get_device(mtd, FL_READING);
-
- /* Select the NAND device */
- chip->select_chip(mtd, chipnr);
- }
-
do {
if (chip->options & NAND_BUSWIDTH_16) {
chip->cmdfunc(mtd, NAND_CMD_READOOB,
@@ -361,11 +351,6 @@ static int nand_block_bad(struct mtd_info *mtd, loff_t ofs, int getchip)
i++;
} while (!res && i < 2 && (chip->bbt_options & NAND_BBT_SCAN2NDPAGE));
- if (getchip) {
- chip->select_chip(mtd, -1);
- nand_release_device(mtd);
- }
-
return res;
}
@@ -503,19 +488,17 @@ static int nand_block_isreserved(struct mtd_info *mtd, loff_t ofs)
* nand_block_checkbad - [GENERIC] Check if a block is marked bad
* @mtd: MTD device structure
* @ofs: offset from device start
- * @getchip: 0, if the chip is already selected
* @allowbbt: 1, if its allowed to access the bbt area
*
* Check, if the block is bad. Either by reading the bad block table or
* calling of the scan function.
*/
-static int nand_block_checkbad(struct mtd_info *mtd, loff_t ofs, int getchip,
- int allowbbt)
+static int nand_block_checkbad(struct mtd_info *mtd, loff_t ofs, int allowbbt)
{
struct nand_chip *chip = mtd_to_nand(mtd);
if (!chip->bbt)
- return chip->block_bad(mtd, ofs, getchip);
+ return chip->block_bad(mtd, ofs);
/* Return info from the table */
return nand_isbad_bbt(mtd, ofs, allowbbt);
@@ -566,8 +549,8 @@ void nand_wait_ready(struct mtd_info *mtd)
cond_resched();
} while (time_before(jiffies, timeo));
- pr_warn_ratelimited(
- "timeout while waiting for chip to become ready\n");
+ if (!chip->dev_ready(mtd))
+ pr_warn_ratelimited("timeout while waiting for chip to become ready\n");
out:
led_trigger_event(nand_led_trigger, LED_OFF);
}
@@ -1723,8 +1706,7 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
int ret = 0;
uint32_t readlen = ops->len;
uint32_t oobreadlen = ops->ooblen;
- uint32_t max_oobsize = ops->mode == MTD_OPS_AUTO_OOB ?
- mtd->oobavail : mtd->oobsize;
+ uint32_t max_oobsize = mtd_oobavail(mtd, ops);
uint8_t *bufpoi, *oob, *buf;
int use_bufpoi;
@@ -2075,10 +2057,7 @@ static int nand_do_read_oob(struct mtd_info *mtd, loff_t from,
stats = mtd->ecc_stats;
- if (ops->mode == MTD_OPS_AUTO_OOB)
- len = chip->ecc.layout->oobavail;
- else
- len = mtd->oobsize;
+ len = mtd_oobavail(mtd, ops);
if (unlikely(ops->ooboffs >= len)) {
pr_debug("%s: attempt to start read outside oob\n",
@@ -2575,8 +2554,7 @@ static int nand_do_write_ops(struct mtd_info *mtd, loff_t to,
uint32_t writelen = ops->len;
uint32_t oobwritelen = ops->ooblen;
- uint32_t oobmaxlen = ops->mode == MTD_OPS_AUTO_OOB ?
- mtd->oobavail : mtd->oobsize;
+ uint32_t oobmaxlen = mtd_oobavail(mtd, ops);
uint8_t *oob = ops->oobbuf;
uint8_t *buf = ops->datbuf;
@@ -2766,10 +2744,7 @@ static int nand_do_write_oob(struct mtd_info *mtd, loff_t to,
pr_debug("%s: to = 0x%08x, len = %i\n",
__func__, (unsigned int)to, (int)ops->ooblen);
- if (ops->mode == MTD_OPS_AUTO_OOB)
- len = chip->ecc.layout->oobavail;
- else
- len = mtd->oobsize;
+ len = mtd_oobavail(mtd, ops);
/* Do not allow write past end of page */
if ((ops->ooboffs + ops->ooblen) > len) {
@@ -2957,7 +2932,7 @@ int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr,
while (len) {
/* Check if we have a bad block, we do not erase bad blocks! */
if (nand_block_checkbad(mtd, ((loff_t) page) <<
- chip->page_shift, 0, allowbbt)) {
+ chip->page_shift, allowbbt)) {
pr_warn("%s: attempt to erase a bad block at page 0x%08x\n",
__func__, page);
instr->state = MTD_ERASE_FAILED;
@@ -3044,7 +3019,20 @@ static void nand_sync(struct mtd_info *mtd)
*/
static int nand_block_isbad(struct mtd_info *mtd, loff_t offs)
{
- return nand_block_checkbad(mtd, offs, 1, 0);
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ int chipnr = (int)(offs >> chip->chip_shift);
+ int ret;
+
+ /* Select the NAND device */
+ nand_get_device(mtd, FL_READING);
+ chip->select_chip(mtd, chipnr);
+
+ ret = nand_block_checkbad(mtd, offs, 0);
+
+ chip->select_chip(mtd, -1);
+ nand_release_device(mtd);
+
+ return ret;
}
/**
@@ -4021,7 +4009,6 @@ static int nand_dt_init(struct nand_chip *chip)
* This is the first phase of the normal nand_scan() function. It reads the
* flash ID and sets up MTD fields accordingly.
*
- * The mtd->owner field must be set to the module of the caller.
*/
int nand_scan_ident(struct mtd_info *mtd, int maxchips,
struct nand_flash_dev *table)
@@ -4287,10 +4274,8 @@ int nand_scan_tail(struct mtd_info *mtd)
}
/* See nand_bch_init() for details. */
- ecc->bytes = DIV_ROUND_UP(
- ecc->strength * fls(8 * ecc->size), 8);
- ecc->priv = nand_bch_init(mtd, ecc->size, ecc->bytes,
- &ecc->layout);
+ ecc->bytes = 0;
+ ecc->priv = nand_bch_init(mtd);
if (!ecc->priv) {
pr_warn("BCH ECC initialization failed!\n");
BUG();
@@ -4325,11 +4310,11 @@ int nand_scan_tail(struct mtd_info *mtd)
* The number of bytes available for a client to place data into
* the out of band area.
*/
- ecc->layout->oobavail = 0;
- for (i = 0; ecc->layout->oobfree[i].length
- && i < ARRAY_SIZE(ecc->layout->oobfree); i++)
- ecc->layout->oobavail += ecc->layout->oobfree[i].length;
- mtd->oobavail = ecc->layout->oobavail;
+ mtd->oobavail = 0;
+ if (ecc->layout) {
+ for (i = 0; ecc->layout->oobfree[i].length; i++)
+ mtd->oobavail += ecc->layout->oobfree[i].length;
+ }
/* ECC sanity check: warn if it's too weak */
if (!nand_ecc_strength_good(mtd))
@@ -4443,19 +4428,12 @@ EXPORT_SYMBOL(nand_scan_tail);
*
* This fills out all the uninitialized function pointers with the defaults.
* The flash ID is read and the mtd/chip structures are filled with the
- * appropriate values. The mtd->owner field must be set to the module of the
- * caller.
+ * appropriate values.
*/
int nand_scan(struct mtd_info *mtd, int maxchips)
{
int ret;
- /* Many callers got this wrong, so check for it for a while... */
- if (!mtd->owner && caller_is_module()) {
- pr_crit("%s called with NULL mtd->owner!\n", __func__);
- BUG();
- }
-
ret = nand_scan_ident(mtd, maxchips, NULL);
if (!ret)
ret = nand_scan_tail(mtd);
diff --git a/drivers/mtd/nand/nand_bbt.c b/drivers/mtd/nand/nand_bbt.c
index 4b6a7085b442..2fbb523df066 100644
--- a/drivers/mtd/nand/nand_bbt.c
+++ b/drivers/mtd/nand/nand_bbt.c
@@ -1373,5 +1373,3 @@ int nand_markbad_bbt(struct mtd_info *mtd, loff_t offs)
return ret;
}
-
-EXPORT_SYMBOL(nand_scan_bbt);
diff --git a/drivers/mtd/nand/nand_bch.c b/drivers/mtd/nand/nand_bch.c
index a87c1b628dfc..b585bae37929 100644
--- a/drivers/mtd/nand/nand_bch.c
+++ b/drivers/mtd/nand/nand_bch.c
@@ -107,9 +107,6 @@ EXPORT_SYMBOL(nand_bch_correct_data);
/**
* nand_bch_init - [NAND Interface] Initialize NAND BCH error correction
* @mtd: MTD block structure
- * @eccsize: ecc block size in bytes
- * @eccbytes: ecc length in bytes
- * @ecclayout: output default layout
*
* Returns:
* a pointer to a new NAND BCH control structure, or NULL upon failure
@@ -123,14 +120,21 @@ EXPORT_SYMBOL(nand_bch_correct_data);
* @eccsize = 512 (thus, m=13 is the smallest integer such that 2^m-1 > 512*8)
* @eccbytes = 7 (7 bytes are required to store m*t = 13*4 = 52 bits)
*/
-struct nand_bch_control *
-nand_bch_init(struct mtd_info *mtd, unsigned int eccsize, unsigned int eccbytes,
- struct nand_ecclayout **ecclayout)
+struct nand_bch_control *nand_bch_init(struct mtd_info *mtd)
{
+ struct nand_chip *nand = mtd_to_nand(mtd);
unsigned int m, t, eccsteps, i;
- struct nand_ecclayout *layout;
+ struct nand_ecclayout *layout = nand->ecc.layout;
struct nand_bch_control *nbc = NULL;
unsigned char *erased_page;
+ unsigned int eccsize = nand->ecc.size;
+ unsigned int eccbytes = nand->ecc.bytes;
+ unsigned int eccstrength = nand->ecc.strength;
+
+ if (!eccbytes && eccstrength) {
+ eccbytes = DIV_ROUND_UP(eccstrength * fls(8 * eccsize), 8);
+ nand->ecc.bytes = eccbytes;
+ }
if (!eccsize || !eccbytes) {
printk(KERN_WARNING "ecc parameters not supplied\n");
@@ -158,7 +162,7 @@ nand_bch_init(struct mtd_info *mtd, unsigned int eccsize, unsigned int eccbytes,
eccsteps = mtd->writesize/eccsize;
/* if no ecc placement scheme was provided, build one */
- if (!*ecclayout) {
+ if (!layout) {
/* handle large page devices only */
if (mtd->oobsize < 64) {
@@ -184,7 +188,7 @@ nand_bch_init(struct mtd_info *mtd, unsigned int eccsize, unsigned int eccbytes,
layout->oobfree[0].offset = 2;
layout->oobfree[0].length = mtd->oobsize-2-layout->eccbytes;
- *ecclayout = layout;
+ nand->ecc.layout = layout;
}
/* sanity checks */
@@ -192,7 +196,7 @@ nand_bch_init(struct mtd_info *mtd, unsigned int eccsize, unsigned int eccbytes,
printk(KERN_WARNING "eccsize %u is too large\n", eccsize);
goto fail;
}
- if ((*ecclayout)->eccbytes != (eccsteps*eccbytes)) {
+ if (layout->eccbytes != (eccsteps*eccbytes)) {
printk(KERN_WARNING "invalid ecc layout\n");
goto fail;
}
@@ -216,6 +220,9 @@ nand_bch_init(struct mtd_info *mtd, unsigned int eccsize, unsigned int eccbytes,
for (i = 0; i < eccbytes; i++)
nbc->eccmask[i] ^= 0xff;
+ if (!eccstrength)
+ nand->ecc.strength = (eccbytes * 8) / fls(8 * eccsize);
+
return nbc;
fail:
nand_bch_free(nbc);
diff --git a/drivers/mtd/nand/nand_ids.c b/drivers/mtd/nand/nand_ids.c
index a8804a3da076..ccc05f5b2695 100644
--- a/drivers/mtd/nand/nand_ids.c
+++ b/drivers/mtd/nand/nand_ids.c
@@ -50,8 +50,8 @@ struct nand_flash_dev nand_flash_ids[] = {
SZ_16K, SZ_8K, SZ_4M, 0, 6, 1280, NAND_ECC_INFO(40, SZ_1K) },
{"H27UCG8T2ATR-BC 64G 3.3V 8-bit",
{ .id = {0xad, 0xde, 0x94, 0xda, 0x74, 0xc4} },
- SZ_8K, SZ_8K, SZ_2M, 0, 6, 640, NAND_ECC_INFO(40, SZ_1K),
- 4 },
+ SZ_8K, SZ_8K, SZ_2M, NAND_NEED_SCRAMBLING, 6, 640,
+ NAND_ECC_INFO(40, SZ_1K), 4 },
LEGACY_ID_NAND("NAND 4MiB 5V 8-bit", 0x6B, 4, SZ_8K, SP_OPTIONS),
LEGACY_ID_NAND("NAND 4MiB 3,3V 8-bit", 0xE3, 4, SZ_8K, SP_OPTIONS),
diff --git a/drivers/mtd/nand/nandsim.c b/drivers/mtd/nand/nandsim.c
index 1fd519503bb1..a58169a28741 100644
--- a/drivers/mtd/nand/nandsim.c
+++ b/drivers/mtd/nand/nandsim.c
@@ -1339,7 +1339,7 @@ static void put_pages(struct nandsim *ns)
int i;
for (i = 0; i < ns->held_cnt; i++)
- page_cache_release(ns->held_pages[i]);
+ put_page(ns->held_pages[i]);
}
/* Get page cache pages in advance to provide NOFS memory allocation */
@@ -1349,8 +1349,8 @@ static int get_pages(struct nandsim *ns, struct file *file, size_t count, loff_t
struct page *page;
struct address_space *mapping = file->f_mapping;
- start_index = pos >> PAGE_CACHE_SHIFT;
- end_index = (pos + count - 1) >> PAGE_CACHE_SHIFT;
+ start_index = pos >> PAGE_SHIFT;
+ end_index = (pos + count - 1) >> PAGE_SHIFT;
if (end_index - start_index + 1 > NS_MAX_HELD_PAGES)
return -EINVAL;
ns->held_cnt = 0;
diff --git a/drivers/mtd/nand/nuc900_nand.c b/drivers/mtd/nand/nuc900_nand.c
index 220ddfcf29f5..dbc5b571c2bb 100644
--- a/drivers/mtd/nand/nuc900_nand.c
+++ b/drivers/mtd/nand/nuc900_nand.c
@@ -113,7 +113,7 @@ static int nuc900_check_rb(struct nuc900_nand *nand)
{
unsigned int val;
spin_lock(&nand->lock);
- val = __raw_readl(REG_SMISR);
+ val = __raw_readl(nand->reg + REG_SMISR);
val &= READYBUSY;
spin_unlock(&nand->lock);
diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c
index c553f78ab83f..0749ca1a1456 100644
--- a/drivers/mtd/nand/omap2.c
+++ b/drivers/mtd/nand/omap2.c
@@ -1807,13 +1807,19 @@ static int omap_nand_probe(struct platform_device *pdev)
goto return_error;
}
+ /*
+ * Bail out earlier to let NAND_ECC_SOFT code create its own
+ * ecclayout instead of using ours.
+ */
+ if (info->ecc_opt == OMAP_ECC_HAM1_CODE_SW) {
+ nand_chip->ecc.mode = NAND_ECC_SOFT;
+ goto scan_tail;
+ }
+
/* populate MTD interface based on ECC scheme */
ecclayout = &info->oobinfo;
+ nand_chip->ecc.layout = ecclayout;
switch (info->ecc_opt) {
- case OMAP_ECC_HAM1_CODE_SW:
- nand_chip->ecc.mode = NAND_ECC_SOFT;
- break;
-
case OMAP_ECC_HAM1_CODE_HW:
pr_info("nand: using OMAP_ECC_HAM1_CODE_HW\n");
nand_chip->ecc.mode = NAND_ECC_HW;
@@ -1861,10 +1867,7 @@ static int omap_nand_probe(struct platform_device *pdev)
ecclayout->oobfree->offset = 1 +
ecclayout->eccpos[ecclayout->eccbytes - 1] + 1;
/* software bch library is used for locating errors */
- nand_chip->ecc.priv = nand_bch_init(mtd,
- nand_chip->ecc.size,
- nand_chip->ecc.bytes,
- &ecclayout);
+ nand_chip->ecc.priv = nand_bch_init(mtd);
if (!nand_chip->ecc.priv) {
dev_err(&info->pdev->dev, "unable to use BCH library\n");
err = -EINVAL;
@@ -1925,10 +1928,7 @@ static int omap_nand_probe(struct platform_device *pdev)
ecclayout->oobfree->offset = 1 +
ecclayout->eccpos[ecclayout->eccbytes - 1] + 1;
/* software bch library is used for locating errors */
- nand_chip->ecc.priv = nand_bch_init(mtd,
- nand_chip->ecc.size,
- nand_chip->ecc.bytes,
- &ecclayout);
+ nand_chip->ecc.priv = nand_bch_init(mtd);
if (!nand_chip->ecc.priv) {
dev_err(&info->pdev->dev, "unable to use BCH library\n");
err = -EINVAL;
@@ -2002,9 +2002,6 @@ static int omap_nand_probe(struct platform_device *pdev)
goto return_error;
}
- if (info->ecc_opt == OMAP_ECC_HAM1_CODE_SW)
- goto scan_tail;
-
/* all OOB bytes from oobfree->offset till end off OOB are free */
ecclayout->oobfree->length = mtd->oobsize - ecclayout->oobfree->offset;
/* check if NAND device's OOB is enough to store ECC signatures */
@@ -2015,7 +2012,6 @@ static int omap_nand_probe(struct platform_device *pdev)
err = -EINVAL;
goto return_error;
}
- nand_chip->ecc.layout = ecclayout;
scan_tail:
/* second phase scan */
diff --git a/drivers/mtd/nand/plat_nand.c b/drivers/mtd/nand/plat_nand.c
index a0e26dea1424..e4e50da30444 100644
--- a/drivers/mtd/nand/plat_nand.c
+++ b/drivers/mtd/nand/plat_nand.c
@@ -73,7 +73,6 @@ static int plat_nand_probe(struct platform_device *pdev)
data->chip.bbt_options |= pdata->chip.bbt_options;
data->chip.ecc.hwctl = pdata->ctrl.hwcontrol;
- data->chip.ecc.layout = pdata->chip.ecclayout;
data->chip.ecc.mode = NAND_ECC_SOFT;
platform_set_drvdata(pdev, data);
diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c
index 86fc245dc71a..d6508856da99 100644
--- a/drivers/mtd/nand/pxa3xx_nand.c
+++ b/drivers/mtd/nand/pxa3xx_nand.c
@@ -131,11 +131,23 @@
#define READ_ID_BYTES 7
/* macros for registers read/write */
-#define nand_writel(info, off, val) \
- writel_relaxed((val), (info)->mmio_base + (off))
-
-#define nand_readl(info, off) \
- readl_relaxed((info)->mmio_base + (off))
+#define nand_writel(info, off, val) \
+ do { \
+ dev_vdbg(&info->pdev->dev, \
+ "%s():%d nand_writel(0x%x, 0x%04x)\n", \
+ __func__, __LINE__, (val), (off)); \
+ writel_relaxed((val), (info)->mmio_base + (off)); \
+ } while (0)
+
+#define nand_readl(info, off) \
+ ({ \
+ unsigned int _v; \
+ _v = readl_relaxed((info)->mmio_base + (off)); \
+ dev_vdbg(&info->pdev->dev, \
+ "%s():%d nand_readl(0x%04x) = 0x%x\n", \
+ __func__, __LINE__, (off), _v); \
+ _v; \
+ })
/* error code and state */
enum {
@@ -199,7 +211,6 @@ struct pxa3xx_nand_info {
struct dma_chan *dma_chan;
dma_cookie_t dma_cookie;
int drcmr_dat;
- int drcmr_cmd;
unsigned char *data_buff;
unsigned char *oob_buff;
@@ -222,15 +233,44 @@ struct pxa3xx_nand_info {
int use_spare; /* use spare ? */
int need_wait;
- unsigned int data_size; /* data to be read from FIFO */
- unsigned int chunk_size; /* split commands chunk size */
- unsigned int oob_size;
+ /* Amount of real data per full chunk */
+ unsigned int chunk_size;
+
+ /* Amount of spare data per full chunk */
unsigned int spare_size;
+
+ /* Number of full chunks (i.e chunk_size + spare_size) */
+ unsigned int nfullchunks;
+
+ /*
+ * Total number of chunks. If equal to nfullchunks, then there
+ * are only full chunks. Otherwise, there is one last chunk of
+ * size (last_chunk_size + last_spare_size)
+ */
+ unsigned int ntotalchunks;
+
+ /* Amount of real data in the last chunk */
+ unsigned int last_chunk_size;
+
+ /* Amount of spare data in the last chunk */
+ unsigned int last_spare_size;
+
unsigned int ecc_size;
unsigned int ecc_err_cnt;
unsigned int max_bitflips;
int retcode;
+ /*
+ * Variables only valid during command
+ * execution. step_chunk_size and step_spare_size is the
+ * amount of real data and spare data in the current
+ * chunk. cur_chunk is the current chunk being
+ * read/programmed.
+ */
+ unsigned int step_chunk_size;
+ unsigned int step_spare_size;
+ unsigned int cur_chunk;
+
/* cached register value */
uint32_t reg_ndcr;
uint32_t ndtr0cs0;
@@ -526,25 +566,6 @@ static int pxa3xx_nand_init(struct pxa3xx_nand_host *host)
return 0;
}
-/*
- * Set the data and OOB size, depending on the selected
- * spare and ECC configuration.
- * Only applicable to READ0, READOOB and PAGEPROG commands.
- */
-static void pxa3xx_set_datasize(struct pxa3xx_nand_info *info,
- struct mtd_info *mtd)
-{
- int oob_enable = info->reg_ndcr & NDCR_SPARE_EN;
-
- info->data_size = mtd->writesize;
- if (!oob_enable)
- return;
-
- info->oob_size = info->spare_size;
- if (!info->use_ecc)
- info->oob_size += info->ecc_size;
-}
-
/**
* NOTE: it is a must to set ND_RUN firstly, then write
* command buffer, otherwise, it does not work.
@@ -660,28 +681,28 @@ static void drain_fifo(struct pxa3xx_nand_info *info, void *data, int len)
static void handle_data_pio(struct pxa3xx_nand_info *info)
{
- unsigned int do_bytes = min(info->data_size, info->chunk_size);
-
switch (info->state) {
case STATE_PIO_WRITING:
- writesl(info->mmio_base + NDDB,
- info->data_buff + info->data_buff_pos,
- DIV_ROUND_UP(do_bytes, 4));
+ if (info->step_chunk_size)
+ writesl(info->mmio_base + NDDB,
+ info->data_buff + info->data_buff_pos,
+ DIV_ROUND_UP(info->step_chunk_size, 4));
- if (info->oob_size > 0)
+ if (info->step_spare_size)
writesl(info->mmio_base + NDDB,
info->oob_buff + info->oob_buff_pos,
- DIV_ROUND_UP(info->oob_size, 4));
+ DIV_ROUND_UP(info->step_spare_size, 4));
break;
case STATE_PIO_READING:
- drain_fifo(info,
- info->data_buff + info->data_buff_pos,
- DIV_ROUND_UP(do_bytes, 4));
+ if (info->step_chunk_size)
+ drain_fifo(info,
+ info->data_buff + info->data_buff_pos,
+ DIV_ROUND_UP(info->step_chunk_size, 4));
- if (info->oob_size > 0)
+ if (info->step_spare_size)
drain_fifo(info,
info->oob_buff + info->oob_buff_pos,
- DIV_ROUND_UP(info->oob_size, 4));
+ DIV_ROUND_UP(info->step_spare_size, 4));
break;
default:
dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__,
@@ -690,9 +711,8 @@ static void handle_data_pio(struct pxa3xx_nand_info *info)
}
/* Update buffer pointers for multi-page read/write */
- info->data_buff_pos += do_bytes;
- info->oob_buff_pos += info->oob_size;
- info->data_size -= do_bytes;
+ info->data_buff_pos += info->step_chunk_size;
+ info->oob_buff_pos += info->step_spare_size;
}
static void pxa3xx_nand_data_dma_irq(void *data)
@@ -733,8 +753,9 @@ static void start_data_dma(struct pxa3xx_nand_info *info)
info->state);
BUG();
}
- info->sg.length = info->data_size +
- (info->oob_size ? info->spare_size + info->ecc_size : 0);
+ info->sg.length = info->chunk_size;
+ if (info->use_spare)
+ info->sg.length += info->spare_size + info->ecc_size;
dma_map_sg(info->dma_chan->device->dev, &info->sg, 1, info->dma_dir);
tx = dmaengine_prep_slave_sg(info->dma_chan, &info->sg, 1, direction,
@@ -895,9 +916,11 @@ static void prepare_start_command(struct pxa3xx_nand_info *info, int command)
/* reset data and oob column point to handle data */
info->buf_start = 0;
info->buf_count = 0;
- info->oob_size = 0;
info->data_buff_pos = 0;
info->oob_buff_pos = 0;
+ info->step_chunk_size = 0;
+ info->step_spare_size = 0;
+ info->cur_chunk = 0;
info->use_ecc = 0;
info->use_spare = 1;
info->retcode = ERR_NONE;
@@ -909,8 +932,6 @@ static void prepare_start_command(struct pxa3xx_nand_info *info, int command)
case NAND_CMD_READ0:
case NAND_CMD_PAGEPROG:
info->use_ecc = 1;
- case NAND_CMD_READOOB:
- pxa3xx_set_datasize(info, mtd);
break;
case NAND_CMD_PARAM:
info->use_spare = 0;
@@ -969,6 +990,14 @@ static int prepare_set_command(struct pxa3xx_nand_info *info, int command,
if (command == NAND_CMD_READOOB)
info->buf_start += mtd->writesize;
+ if (info->cur_chunk < info->nfullchunks) {
+ info->step_chunk_size = info->chunk_size;
+ info->step_spare_size = info->spare_size;
+ } else {
+ info->step_chunk_size = info->last_chunk_size;
+ info->step_spare_size = info->last_spare_size;
+ }
+
/*
* Multiple page read needs an 'extended command type' field,
* which is either naked-read or last-read according to the
@@ -980,8 +1009,8 @@ static int prepare_set_command(struct pxa3xx_nand_info *info, int command,
info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8)
| NDCB0_LEN_OVRD
| NDCB0_EXT_CMD_TYPE(ext_cmd_type);
- info->ndcb3 = info->chunk_size +
- info->oob_size;
+ info->ndcb3 = info->step_chunk_size +
+ info->step_spare_size;
}
set_command_address(info, mtd->writesize, column, page_addr);
@@ -1001,8 +1030,6 @@ static int prepare_set_command(struct pxa3xx_nand_info *info, int command,
| NDCB0_EXT_CMD_TYPE(ext_cmd_type)
| addr_cycle
| command;
- /* No data transfer in this case */
- info->data_size = 0;
exec_cmd = 1;
}
break;
@@ -1014,6 +1041,14 @@ static int prepare_set_command(struct pxa3xx_nand_info *info, int command,
break;
}
+ if (info->cur_chunk < info->nfullchunks) {
+ info->step_chunk_size = info->chunk_size;
+ info->step_spare_size = info->spare_size;
+ } else {
+ info->step_chunk_size = info->last_chunk_size;
+ info->step_spare_size = info->last_spare_size;
+ }
+
/* Second command setting for large pages */
if (mtd->writesize > PAGE_CHUNK_SIZE) {
/*
@@ -1024,14 +1059,14 @@ static int prepare_set_command(struct pxa3xx_nand_info *info, int command,
info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
| NDCB0_LEN_OVRD
| NDCB0_EXT_CMD_TYPE(ext_cmd_type);
- info->ndcb3 = info->chunk_size +
- info->oob_size;
+ info->ndcb3 = info->step_chunk_size +
+ info->step_spare_size;
/*
* This is the command dispatch that completes a chunked
* page program operation.
*/
- if (info->data_size == 0) {
+ if (info->cur_chunk == info->ntotalchunks) {
info->ndcb0 = NDCB0_CMD_TYPE(0x1)
| NDCB0_EXT_CMD_TYPE(ext_cmd_type)
| command;
@@ -1058,7 +1093,7 @@ static int prepare_set_command(struct pxa3xx_nand_info *info, int command,
| command;
info->ndcb1 = (column & 0xFF);
info->ndcb3 = INIT_BUFFER_SIZE;
- info->data_size = INIT_BUFFER_SIZE;
+ info->step_chunk_size = INIT_BUFFER_SIZE;
break;
case NAND_CMD_READID:
@@ -1068,7 +1103,7 @@ static int prepare_set_command(struct pxa3xx_nand_info *info, int command,
| command;
info->ndcb1 = (column & 0xFF);
- info->data_size = 8;
+ info->step_chunk_size = 8;
break;
case NAND_CMD_STATUS:
info->buf_count = 1;
@@ -1076,7 +1111,7 @@ static int prepare_set_command(struct pxa3xx_nand_info *info, int command,
| NDCB0_ADDR_CYC(1)
| command;
- info->data_size = 8;
+ info->step_chunk_size = 8;
break;
case NAND_CMD_ERASE1:
@@ -1217,6 +1252,7 @@ static void nand_cmdfunc_extended(struct mtd_info *mtd,
init_completion(&info->dev_ready);
do {
info->state = STATE_PREPARED;
+
exec_cmd = prepare_set_command(info, command, ext_cmd_type,
column, page_addr);
if (!exec_cmd) {
@@ -1236,22 +1272,30 @@ static void nand_cmdfunc_extended(struct mtd_info *mtd,
break;
}
+ /* Only a few commands need several steps */
+ if (command != NAND_CMD_PAGEPROG &&
+ command != NAND_CMD_READ0 &&
+ command != NAND_CMD_READOOB)
+ break;
+
+ info->cur_chunk++;
+
/* Check if the sequence is complete */
- if (info->data_size == 0 && command != NAND_CMD_PAGEPROG)
+ if (info->cur_chunk == info->ntotalchunks && command != NAND_CMD_PAGEPROG)
break;
/*
* After a splitted program command sequence has issued
* the command dispatch, the command sequence is complete.
*/
- if (info->data_size == 0 &&
+ if (info->cur_chunk == (info->ntotalchunks + 1) &&
command == NAND_CMD_PAGEPROG &&
ext_cmd_type == EXT_CMD_TYPE_DISPATCH)
break;
if (command == NAND_CMD_READ0 || command == NAND_CMD_READOOB) {
/* Last read: issue a 'last naked read' */
- if (info->data_size == info->chunk_size)
+ if (info->cur_chunk == info->ntotalchunks - 1)
ext_cmd_type = EXT_CMD_TYPE_LAST_RW;
else
ext_cmd_type = EXT_CMD_TYPE_NAKED_RW;
@@ -1261,7 +1305,7 @@ static void nand_cmdfunc_extended(struct mtd_info *mtd,
* the command dispatch must be issued to complete.
*/
} else if (command == NAND_CMD_PAGEPROG &&
- info->data_size == 0) {
+ info->cur_chunk == info->ntotalchunks) {
ext_cmd_type = EXT_CMD_TYPE_DISPATCH;
}
} while (1);
@@ -1506,6 +1550,8 @@ static int pxa_ecc_init(struct pxa3xx_nand_info *info,
int strength, int ecc_stepsize, int page_size)
{
if (strength == 1 && ecc_stepsize == 512 && page_size == 2048) {
+ info->nfullchunks = 1;
+ info->ntotalchunks = 1;
info->chunk_size = 2048;
info->spare_size = 40;
info->ecc_size = 24;
@@ -1514,6 +1560,8 @@ static int pxa_ecc_init(struct pxa3xx_nand_info *info,
ecc->strength = 1;
} else if (strength == 1 && ecc_stepsize == 512 && page_size == 512) {
+ info->nfullchunks = 1;
+ info->ntotalchunks = 1;
info->chunk_size = 512;
info->spare_size = 8;
info->ecc_size = 8;
@@ -1527,6 +1575,8 @@ static int pxa_ecc_init(struct pxa3xx_nand_info *info,
*/
} else if (strength == 4 && ecc_stepsize == 512 && page_size == 2048) {
info->ecc_bch = 1;
+ info->nfullchunks = 1;
+ info->ntotalchunks = 1;
info->chunk_size = 2048;
info->spare_size = 32;
info->ecc_size = 32;
@@ -1537,6 +1587,8 @@ static int pxa_ecc_init(struct pxa3xx_nand_info *info,
} else if (strength == 4 && ecc_stepsize == 512 && page_size == 4096) {
info->ecc_bch = 1;
+ info->nfullchunks = 2;
+ info->ntotalchunks = 2;
info->chunk_size = 2048;
info->spare_size = 32;
info->ecc_size = 32;
@@ -1551,8 +1603,12 @@ static int pxa_ecc_init(struct pxa3xx_nand_info *info,
*/
} else if (strength == 8 && ecc_stepsize == 512 && page_size == 4096) {
info->ecc_bch = 1;
+ info->nfullchunks = 4;
+ info->ntotalchunks = 5;
info->chunk_size = 1024;
info->spare_size = 0;
+ info->last_chunk_size = 0;
+ info->last_spare_size = 64;
info->ecc_size = 32;
ecc->mode = NAND_ECC_HW;
ecc->size = info->chunk_size;
@@ -1738,7 +1794,7 @@ static int alloc_nand_resource(struct platform_device *pdev)
if (ret < 0)
return ret;
- if (use_dma) {
+ if (!np && use_dma) {
r = platform_get_resource(pdev, IORESOURCE_DMA, 0);
if (r == NULL) {
dev_err(&pdev->dev,
@@ -1747,15 +1803,6 @@ static int alloc_nand_resource(struct platform_device *pdev)
goto fail_disable_clk;
}
info->drcmr_dat = r->start;
-
- r = platform_get_resource(pdev, IORESOURCE_DMA, 1);
- if (r == NULL) {
- dev_err(&pdev->dev,
- "no resource defined for cmd DMA\n");
- ret = -ENXIO;
- goto fail_disable_clk;
- }
- info->drcmr_cmd = r->start;
}
irq = platform_get_irq(pdev, 0);
diff --git a/drivers/mtd/nand/qcom_nandc.c b/drivers/mtd/nand/qcom_nandc.c
new file mode 100644
index 000000000000..f550a57e6eea
--- /dev/null
+++ b/drivers/mtd/nand/qcom_nandc.c
@@ -0,0 +1,2223 @@
+/*
+ * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/slab.h>
+#include <linux/bitops.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
+#include <linux/module.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/partitions.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_mtd.h>
+#include <linux/delay.h>
+
+/* NANDc reg offsets */
+#define NAND_FLASH_CMD 0x00
+#define NAND_ADDR0 0x04
+#define NAND_ADDR1 0x08
+#define NAND_FLASH_CHIP_SELECT 0x0c
+#define NAND_EXEC_CMD 0x10
+#define NAND_FLASH_STATUS 0x14
+#define NAND_BUFFER_STATUS 0x18
+#define NAND_DEV0_CFG0 0x20
+#define NAND_DEV0_CFG1 0x24
+#define NAND_DEV0_ECC_CFG 0x28
+#define NAND_DEV1_ECC_CFG 0x2c
+#define NAND_DEV1_CFG0 0x30
+#define NAND_DEV1_CFG1 0x34
+#define NAND_READ_ID 0x40
+#define NAND_READ_STATUS 0x44
+#define NAND_DEV_CMD0 0xa0
+#define NAND_DEV_CMD1 0xa4
+#define NAND_DEV_CMD2 0xa8
+#define NAND_DEV_CMD_VLD 0xac
+#define SFLASHC_BURST_CFG 0xe0
+#define NAND_ERASED_CW_DETECT_CFG 0xe8
+#define NAND_ERASED_CW_DETECT_STATUS 0xec
+#define NAND_EBI2_ECC_BUF_CFG 0xf0
+#define FLASH_BUF_ACC 0x100
+
+#define NAND_CTRL 0xf00
+#define NAND_VERSION 0xf08
+#define NAND_READ_LOCATION_0 0xf20
+#define NAND_READ_LOCATION_1 0xf24
+
+/* dummy register offsets, used by write_reg_dma */
+#define NAND_DEV_CMD1_RESTORE 0xdead
+#define NAND_DEV_CMD_VLD_RESTORE 0xbeef
+
+/* NAND_FLASH_CMD bits */
+#define PAGE_ACC BIT(4)
+#define LAST_PAGE BIT(5)
+
+/* NAND_FLASH_CHIP_SELECT bits */
+#define NAND_DEV_SEL 0
+#define DM_EN BIT(2)
+
+/* NAND_FLASH_STATUS bits */
+#define FS_OP_ERR BIT(4)
+#define FS_READY_BSY_N BIT(5)
+#define FS_MPU_ERR BIT(8)
+#define FS_DEVICE_STS_ERR BIT(16)
+#define FS_DEVICE_WP BIT(23)
+
+/* NAND_BUFFER_STATUS bits */
+#define BS_UNCORRECTABLE_BIT BIT(8)
+#define BS_CORRECTABLE_ERR_MSK 0x1f
+
+/* NAND_DEVn_CFG0 bits */
+#define DISABLE_STATUS_AFTER_WRITE 4
+#define CW_PER_PAGE 6
+#define UD_SIZE_BYTES 9
+#define ECC_PARITY_SIZE_BYTES_RS 19
+#define SPARE_SIZE_BYTES 23
+#define NUM_ADDR_CYCLES 27
+#define STATUS_BFR_READ 30
+#define SET_RD_MODE_AFTER_STATUS 31
+
+/* NAND_DEVn_CFG0 bits */
+#define DEV0_CFG1_ECC_DISABLE 0
+#define WIDE_FLASH 1
+#define NAND_RECOVERY_CYCLES 2
+#define CS_ACTIVE_BSY 5
+#define BAD_BLOCK_BYTE_NUM 6
+#define BAD_BLOCK_IN_SPARE_AREA 16
+#define WR_RD_BSY_GAP 17
+#define ENABLE_BCH_ECC 27
+
+/* NAND_DEV0_ECC_CFG bits */
+#define ECC_CFG_ECC_DISABLE 0
+#define ECC_SW_RESET 1
+#define ECC_MODE 4
+#define ECC_PARITY_SIZE_BYTES_BCH 8
+#define ECC_NUM_DATA_BYTES 16
+#define ECC_FORCE_CLK_OPEN 30
+
+/* NAND_DEV_CMD1 bits */
+#define READ_ADDR 0
+
+/* NAND_DEV_CMD_VLD bits */
+#define READ_START_VLD 0
+
+/* NAND_EBI2_ECC_BUF_CFG bits */
+#define NUM_STEPS 0
+
+/* NAND_ERASED_CW_DETECT_CFG bits */
+#define ERASED_CW_ECC_MASK 1
+#define AUTO_DETECT_RES 0
+#define MASK_ECC (1 << ERASED_CW_ECC_MASK)
+#define RESET_ERASED_DET (1 << AUTO_DETECT_RES)
+#define ACTIVE_ERASED_DET (0 << AUTO_DETECT_RES)
+#define CLR_ERASED_PAGE_DET (RESET_ERASED_DET | MASK_ECC)
+#define SET_ERASED_PAGE_DET (ACTIVE_ERASED_DET | MASK_ECC)
+
+/* NAND_ERASED_CW_DETECT_STATUS bits */
+#define PAGE_ALL_ERASED BIT(7)
+#define CODEWORD_ALL_ERASED BIT(6)
+#define PAGE_ERASED BIT(5)
+#define CODEWORD_ERASED BIT(4)
+#define ERASED_PAGE (PAGE_ALL_ERASED | PAGE_ERASED)
+#define ERASED_CW (CODEWORD_ALL_ERASED | CODEWORD_ERASED)
+
+/* Version Mask */
+#define NAND_VERSION_MAJOR_MASK 0xf0000000
+#define NAND_VERSION_MAJOR_SHIFT 28
+#define NAND_VERSION_MINOR_MASK 0x0fff0000
+#define NAND_VERSION_MINOR_SHIFT 16
+
+/* NAND OP_CMDs */
+#define PAGE_READ 0x2
+#define PAGE_READ_WITH_ECC 0x3
+#define PAGE_READ_WITH_ECC_SPARE 0x4
+#define PROGRAM_PAGE 0x6
+#define PAGE_PROGRAM_WITH_ECC 0x7
+#define PROGRAM_PAGE_SPARE 0x9
+#define BLOCK_ERASE 0xa
+#define FETCH_ID 0xb
+#define RESET_DEVICE 0xd
+
+/*
+ * the NAND controller performs reads/writes with ECC in 516 byte chunks.
+ * the driver calls the chunks 'step' or 'codeword' interchangeably
+ */
+#define NANDC_STEP_SIZE 512
+
+/*
+ * the largest page size we support is 8K, this will have 16 steps/codewords
+ * of 512 bytes each
+ */
+#define MAX_NUM_STEPS (SZ_8K / NANDC_STEP_SIZE)
+
+/* we read at most 3 registers per codeword scan */
+#define MAX_REG_RD (3 * MAX_NUM_STEPS)
+
+/* ECC modes supported by the controller */
+#define ECC_NONE BIT(0)
+#define ECC_RS_4BIT BIT(1)
+#define ECC_BCH_4BIT BIT(2)
+#define ECC_BCH_8BIT BIT(3)
+
+struct desc_info {
+ struct list_head node;
+
+ enum dma_data_direction dir;
+ struct scatterlist sgl;
+ struct dma_async_tx_descriptor *dma_desc;
+};
+
+/*
+ * holds the current register values that we want to write. acts as a contiguous
+ * chunk of memory which we use to write the controller registers through DMA.
+ */
+struct nandc_regs {
+ __le32 cmd;
+ __le32 addr0;
+ __le32 addr1;
+ __le32 chip_sel;
+ __le32 exec;
+
+ __le32 cfg0;
+ __le32 cfg1;
+ __le32 ecc_bch_cfg;
+
+ __le32 clrflashstatus;
+ __le32 clrreadstatus;
+
+ __le32 cmd1;
+ __le32 vld;
+
+ __le32 orig_cmd1;
+ __le32 orig_vld;
+
+ __le32 ecc_buf_cfg;
+};
+
+/*
+ * NAND controller data struct
+ *
+ * @controller: base controller structure
+ * @host_list: list containing all the chips attached to the
+ * controller
+ * @dev: parent device
+ * @base: MMIO base
+ * @base_dma: physical base address of controller registers
+ * @core_clk: controller clock
+ * @aon_clk: another controller clock
+ *
+ * @chan: dma channel
+ * @cmd_crci: ADM DMA CRCI for command flow control
+ * @data_crci: ADM DMA CRCI for data flow control
+ * @desc_list: DMA descriptor list (list of desc_infos)
+ *
+ * @data_buffer: our local DMA buffer for page read/writes,
+ * used when we can't use the buffer provided
+ * by upper layers directly
+ * @buf_size/count/start: markers for chip->read_buf/write_buf functions
+ * @reg_read_buf: local buffer for reading back registers via DMA
+ * @reg_read_pos: marker for data read in reg_read_buf
+ *
+ * @regs: a contiguous chunk of memory for DMA register
+ * writes. contains the register values to be
+ * written to controller
+ * @cmd1/vld: some fixed controller register values
+ * @ecc_modes: supported ECC modes by the current controller,
+ * initialized via DT match data
+ */
+struct qcom_nand_controller {
+ struct nand_hw_control controller;
+ struct list_head host_list;
+
+ struct device *dev;
+
+ void __iomem *base;
+ dma_addr_t base_dma;
+
+ struct clk *core_clk;
+ struct clk *aon_clk;
+
+ struct dma_chan *chan;
+ unsigned int cmd_crci;
+ unsigned int data_crci;
+ struct list_head desc_list;
+
+ u8 *data_buffer;
+ int buf_size;
+ int buf_count;
+ int buf_start;
+
+ __le32 *reg_read_buf;
+ int reg_read_pos;
+
+ struct nandc_regs *regs;
+
+ u32 cmd1, vld;
+ u32 ecc_modes;
+};
+
+/*
+ * NAND chip structure
+ *
+ * @chip: base NAND chip structure
+ * @node: list node to add itself to host_list in
+ * qcom_nand_controller
+ *
+ * @cs: chip select value for this chip
+ * @cw_size: the number of bytes in a single step/codeword
+ * of a page, consisting of all data, ecc, spare
+ * and reserved bytes
+ * @cw_data: the number of bytes within a codeword protected
+ * by ECC
+ * @use_ecc: request the controller to use ECC for the
+ * upcoming read/write
+ * @bch_enabled: flag to tell whether BCH ECC mode is used
+ * @ecc_bytes_hw: ECC bytes used by controller hardware for this
+ * chip
+ * @status: value to be returned if NAND_CMD_STATUS command
+ * is executed
+ * @last_command: keeps track of last command on this chip. used
+ * for reading correct status
+ *
+ * @cfg0, cfg1, cfg0_raw..: NANDc register configurations needed for
+ * ecc/non-ecc mode for the current nand flash
+ * device
+ */
+struct qcom_nand_host {
+ struct nand_chip chip;
+ struct list_head node;
+
+ int cs;
+ int cw_size;
+ int cw_data;
+ bool use_ecc;
+ bool bch_enabled;
+ int ecc_bytes_hw;
+ int spare_bytes;
+ int bbm_size;
+ u8 status;
+ int last_command;
+
+ u32 cfg0, cfg1;
+ u32 cfg0_raw, cfg1_raw;
+ u32 ecc_buf_cfg;
+ u32 ecc_bch_cfg;
+ u32 clrflashstatus;
+ u32 clrreadstatus;
+};
+
+static inline struct qcom_nand_host *to_qcom_nand_host(struct nand_chip *chip)
+{
+ return container_of(chip, struct qcom_nand_host, chip);
+}
+
+static inline struct qcom_nand_controller *
+get_qcom_nand_controller(struct nand_chip *chip)
+{
+ return container_of(chip->controller, struct qcom_nand_controller,
+ controller);
+}
+
+static inline u32 nandc_read(struct qcom_nand_controller *nandc, int offset)
+{
+ return ioread32(nandc->base + offset);
+}
+
+static inline void nandc_write(struct qcom_nand_controller *nandc, int offset,
+ u32 val)
+{
+ iowrite32(val, nandc->base + offset);
+}
+
+static __le32 *offset_to_nandc_reg(struct nandc_regs *regs, int offset)
+{
+ switch (offset) {
+ case NAND_FLASH_CMD:
+ return &regs->cmd;
+ case NAND_ADDR0:
+ return &regs->addr0;
+ case NAND_ADDR1:
+ return &regs->addr1;
+ case NAND_FLASH_CHIP_SELECT:
+ return &regs->chip_sel;
+ case NAND_EXEC_CMD:
+ return &regs->exec;
+ case NAND_FLASH_STATUS:
+ return &regs->clrflashstatus;
+ case NAND_DEV0_CFG0:
+ return &regs->cfg0;
+ case NAND_DEV0_CFG1:
+ return &regs->cfg1;
+ case NAND_DEV0_ECC_CFG:
+ return &regs->ecc_bch_cfg;
+ case NAND_READ_STATUS:
+ return &regs->clrreadstatus;
+ case NAND_DEV_CMD1:
+ return &regs->cmd1;
+ case NAND_DEV_CMD1_RESTORE:
+ return &regs->orig_cmd1;
+ case NAND_DEV_CMD_VLD:
+ return &regs->vld;
+ case NAND_DEV_CMD_VLD_RESTORE:
+ return &regs->orig_vld;
+ case NAND_EBI2_ECC_BUF_CFG:
+ return &regs->ecc_buf_cfg;
+ default:
+ return NULL;
+ }
+}
+
+static void nandc_set_reg(struct qcom_nand_controller *nandc, int offset,
+ u32 val)
+{
+ struct nandc_regs *regs = nandc->regs;
+ __le32 *reg;
+
+ reg = offset_to_nandc_reg(regs, offset);
+
+ if (reg)
+ *reg = cpu_to_le32(val);
+}
+
+/* helper to configure address register values */
+static void set_address(struct qcom_nand_host *host, u16 column, int page)
+{
+ struct nand_chip *chip = &host->chip;
+ struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+
+ if (chip->options & NAND_BUSWIDTH_16)
+ column >>= 1;
+
+ nandc_set_reg(nandc, NAND_ADDR0, page << 16 | column);
+ nandc_set_reg(nandc, NAND_ADDR1, page >> 16 & 0xff);
+}
+
+/*
+ * update_rw_regs: set up read/write register values, these will be
+ * written to the NAND controller registers via DMA
+ *
+ * @num_cw: number of steps for the read/write operation
+ * @read: read or write operation
+ */
+static void update_rw_regs(struct qcom_nand_host *host, int num_cw, bool read)
+{
+ struct nand_chip *chip = &host->chip;
+ struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+ u32 cmd, cfg0, cfg1, ecc_bch_cfg;
+
+ if (read) {
+ if (host->use_ecc)
+ cmd = PAGE_READ_WITH_ECC | PAGE_ACC | LAST_PAGE;
+ else
+ cmd = PAGE_READ | PAGE_ACC | LAST_PAGE;
+ } else {
+ cmd = PROGRAM_PAGE | PAGE_ACC | LAST_PAGE;
+ }
+
+ if (host->use_ecc) {
+ cfg0 = (host->cfg0 & ~(7U << CW_PER_PAGE)) |
+ (num_cw - 1) << CW_PER_PAGE;
+
+ cfg1 = host->cfg1;
+ ecc_bch_cfg = host->ecc_bch_cfg;
+ } else {
+ cfg0 = (host->cfg0_raw & ~(7U << CW_PER_PAGE)) |
+ (num_cw - 1) << CW_PER_PAGE;
+
+ cfg1 = host->cfg1_raw;
+ ecc_bch_cfg = 1 << ECC_CFG_ECC_DISABLE;
+ }
+
+ nandc_set_reg(nandc, NAND_FLASH_CMD, cmd);
+ nandc_set_reg(nandc, NAND_DEV0_CFG0, cfg0);
+ nandc_set_reg(nandc, NAND_DEV0_CFG1, cfg1);
+ nandc_set_reg(nandc, NAND_DEV0_ECC_CFG, ecc_bch_cfg);
+ nandc_set_reg(nandc, NAND_EBI2_ECC_BUF_CFG, host->ecc_buf_cfg);
+ nandc_set_reg(nandc, NAND_FLASH_STATUS, host->clrflashstatus);
+ nandc_set_reg(nandc, NAND_READ_STATUS, host->clrreadstatus);
+ nandc_set_reg(nandc, NAND_EXEC_CMD, 1);
+}
+
+static int prep_dma_desc(struct qcom_nand_controller *nandc, bool read,
+ int reg_off, const void *vaddr, int size,
+ bool flow_control)
+{
+ struct desc_info *desc;
+ struct dma_async_tx_descriptor *dma_desc;
+ struct scatterlist *sgl;
+ struct dma_slave_config slave_conf;
+ enum dma_transfer_direction dir_eng;
+ int ret;
+
+ desc = kzalloc(sizeof(*desc), GFP_KERNEL);
+ if (!desc)
+ return -ENOMEM;
+
+ sgl = &desc->sgl;
+
+ sg_init_one(sgl, vaddr, size);
+
+ if (read) {
+ dir_eng = DMA_DEV_TO_MEM;
+ desc->dir = DMA_FROM_DEVICE;
+ } else {
+ dir_eng = DMA_MEM_TO_DEV;
+ desc->dir = DMA_TO_DEVICE;
+ }
+
+ ret = dma_map_sg(nandc->dev, sgl, 1, desc->dir);
+ if (ret == 0) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ memset(&slave_conf, 0x00, sizeof(slave_conf));
+
+ slave_conf.device_fc = flow_control;
+ if (read) {
+ slave_conf.src_maxburst = 16;
+ slave_conf.src_addr = nandc->base_dma + reg_off;
+ slave_conf.slave_id = nandc->data_crci;
+ } else {
+ slave_conf.dst_maxburst = 16;
+ slave_conf.dst_addr = nandc->base_dma + reg_off;
+ slave_conf.slave_id = nandc->cmd_crci;
+ }
+
+ ret = dmaengine_slave_config(nandc->chan, &slave_conf);
+ if (ret) {
+ dev_err(nandc->dev, "failed to configure dma channel\n");
+ goto err;
+ }
+
+ dma_desc = dmaengine_prep_slave_sg(nandc->chan, sgl, 1, dir_eng, 0);
+ if (!dma_desc) {
+ dev_err(nandc->dev, "failed to prepare desc\n");
+ ret = -EINVAL;
+ goto err;
+ }
+
+ desc->dma_desc = dma_desc;
+
+ list_add_tail(&desc->node, &nandc->desc_list);
+
+ return 0;
+err:
+ kfree(desc);
+
+ return ret;
+}
+
+/*
+ * read_reg_dma: prepares a descriptor to read a given number of
+ * contiguous registers to the reg_read_buf pointer
+ *
+ * @first: offset of the first register in the contiguous block
+ * @num_regs: number of registers to read
+ */
+static int read_reg_dma(struct qcom_nand_controller *nandc, int first,
+ int num_regs)
+{
+ bool flow_control = false;
+ void *vaddr;
+ int size;
+
+ if (first == NAND_READ_ID || first == NAND_FLASH_STATUS)
+ flow_control = true;
+
+ size = num_regs * sizeof(u32);
+ vaddr = nandc->reg_read_buf + nandc->reg_read_pos;
+ nandc->reg_read_pos += num_regs;
+
+ return prep_dma_desc(nandc, true, first, vaddr, size, flow_control);
+}
+
+/*
+ * write_reg_dma: prepares a descriptor to write a given number of
+ * contiguous registers
+ *
+ * @first: offset of the first register in the contiguous block
+ * @num_regs: number of registers to write
+ */
+static int write_reg_dma(struct qcom_nand_controller *nandc, int first,
+ int num_regs)
+{
+ bool flow_control = false;
+ struct nandc_regs *regs = nandc->regs;
+ void *vaddr;
+ int size;
+
+ vaddr = offset_to_nandc_reg(regs, first);
+
+ if (first == NAND_FLASH_CMD)
+ flow_control = true;
+
+ if (first == NAND_DEV_CMD1_RESTORE)
+ first = NAND_DEV_CMD1;
+
+ if (first == NAND_DEV_CMD_VLD_RESTORE)
+ first = NAND_DEV_CMD_VLD;
+
+ size = num_regs * sizeof(u32);
+
+ return prep_dma_desc(nandc, false, first, vaddr, size, flow_control);
+}
+
+/*
+ * read_data_dma: prepares a DMA descriptor to transfer data from the
+ * controller's internal buffer to the buffer 'vaddr'
+ *
+ * @reg_off: offset within the controller's data buffer
+ * @vaddr: virtual address of the buffer we want to write to
+ * @size: DMA transaction size in bytes
+ */
+static int read_data_dma(struct qcom_nand_controller *nandc, int reg_off,
+ const u8 *vaddr, int size)
+{
+ return prep_dma_desc(nandc, true, reg_off, vaddr, size, false);
+}
+
+/*
+ * write_data_dma: prepares a DMA descriptor to transfer data from
+ * 'vaddr' to the controller's internal buffer
+ *
+ * @reg_off: offset within the controller's data buffer
+ * @vaddr: virtual address of the buffer we want to read from
+ * @size: DMA transaction size in bytes
+ */
+static int write_data_dma(struct qcom_nand_controller *nandc, int reg_off,
+ const u8 *vaddr, int size)
+{
+ return prep_dma_desc(nandc, false, reg_off, vaddr, size, false);
+}
+
+/*
+ * helper to prepare dma descriptors to configure registers needed for reading a
+ * codeword/step in a page
+ */
+static void config_cw_read(struct qcom_nand_controller *nandc)
+{
+ write_reg_dma(nandc, NAND_FLASH_CMD, 3);
+ write_reg_dma(nandc, NAND_DEV0_CFG0, 3);
+ write_reg_dma(nandc, NAND_EBI2_ECC_BUF_CFG, 1);
+
+ write_reg_dma(nandc, NAND_EXEC_CMD, 1);
+
+ read_reg_dma(nandc, NAND_FLASH_STATUS, 2);
+ read_reg_dma(nandc, NAND_ERASED_CW_DETECT_STATUS, 1);
+}
+
+/*
+ * helpers to prepare dma descriptors used to configure registers needed for
+ * writing a codeword/step in a page
+ */
+static void config_cw_write_pre(struct qcom_nand_controller *nandc)
+{
+ write_reg_dma(nandc, NAND_FLASH_CMD, 3);
+ write_reg_dma(nandc, NAND_DEV0_CFG0, 3);
+ write_reg_dma(nandc, NAND_EBI2_ECC_BUF_CFG, 1);
+}
+
+static void config_cw_write_post(struct qcom_nand_controller *nandc)
+{
+ write_reg_dma(nandc, NAND_EXEC_CMD, 1);
+
+ read_reg_dma(nandc, NAND_FLASH_STATUS, 1);
+
+ write_reg_dma(nandc, NAND_FLASH_STATUS, 1);
+ write_reg_dma(nandc, NAND_READ_STATUS, 1);
+}
+
+/*
+ * the following functions are used within chip->cmdfunc() to perform different
+ * NAND_CMD_* commands
+ */
+
+/* sets up descriptors for NAND_CMD_PARAM */
+static int nandc_param(struct qcom_nand_host *host)
+{
+ struct nand_chip *chip = &host->chip;
+ struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+
+ /*
+ * NAND_CMD_PARAM is called before we know much about the FLASH chip
+ * in use. we configure the controller to perform a raw read of 512
+ * bytes to read onfi params
+ */
+ nandc_set_reg(nandc, NAND_FLASH_CMD, PAGE_READ | PAGE_ACC | LAST_PAGE);
+ nandc_set_reg(nandc, NAND_ADDR0, 0);
+ nandc_set_reg(nandc, NAND_ADDR1, 0);
+ nandc_set_reg(nandc, NAND_DEV0_CFG0, 0 << CW_PER_PAGE
+ | 512 << UD_SIZE_BYTES
+ | 5 << NUM_ADDR_CYCLES
+ | 0 << SPARE_SIZE_BYTES);
+ nandc_set_reg(nandc, NAND_DEV0_CFG1, 7 << NAND_RECOVERY_CYCLES
+ | 0 << CS_ACTIVE_BSY
+ | 17 << BAD_BLOCK_BYTE_NUM
+ | 1 << BAD_BLOCK_IN_SPARE_AREA
+ | 2 << WR_RD_BSY_GAP
+ | 0 << WIDE_FLASH
+ | 1 << DEV0_CFG1_ECC_DISABLE);
+ nandc_set_reg(nandc, NAND_EBI2_ECC_BUF_CFG, 1 << ECC_CFG_ECC_DISABLE);
+
+ /* configure CMD1 and VLD for ONFI param probing */
+ nandc_set_reg(nandc, NAND_DEV_CMD_VLD,
+ (nandc->vld & ~(1 << READ_START_VLD))
+ | 0 << READ_START_VLD);
+ nandc_set_reg(nandc, NAND_DEV_CMD1,
+ (nandc->cmd1 & ~(0xFF << READ_ADDR))
+ | NAND_CMD_PARAM << READ_ADDR);
+
+ nandc_set_reg(nandc, NAND_EXEC_CMD, 1);
+
+ nandc_set_reg(nandc, NAND_DEV_CMD1_RESTORE, nandc->cmd1);
+ nandc_set_reg(nandc, NAND_DEV_CMD_VLD_RESTORE, nandc->vld);
+
+ write_reg_dma(nandc, NAND_DEV_CMD_VLD, 1);
+ write_reg_dma(nandc, NAND_DEV_CMD1, 1);
+
+ nandc->buf_count = 512;
+ memset(nandc->data_buffer, 0xff, nandc->buf_count);
+
+ config_cw_read(nandc);
+
+ read_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer,
+ nandc->buf_count);
+
+ /* restore CMD1 and VLD regs */
+ write_reg_dma(nandc, NAND_DEV_CMD1_RESTORE, 1);
+ write_reg_dma(nandc, NAND_DEV_CMD_VLD_RESTORE, 1);
+
+ return 0;
+}
+
+/* sets up descriptors for NAND_CMD_ERASE1 */
+static int erase_block(struct qcom_nand_host *host, int page_addr)
+{
+ struct nand_chip *chip = &host->chip;
+ struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+
+ nandc_set_reg(nandc, NAND_FLASH_CMD,
+ BLOCK_ERASE | PAGE_ACC | LAST_PAGE);
+ nandc_set_reg(nandc, NAND_ADDR0, page_addr);
+ nandc_set_reg(nandc, NAND_ADDR1, 0);
+ nandc_set_reg(nandc, NAND_DEV0_CFG0,
+ host->cfg0_raw & ~(7 << CW_PER_PAGE));
+ nandc_set_reg(nandc, NAND_DEV0_CFG1, host->cfg1_raw);
+ nandc_set_reg(nandc, NAND_EXEC_CMD, 1);
+ nandc_set_reg(nandc, NAND_FLASH_STATUS, host->clrflashstatus);
+ nandc_set_reg(nandc, NAND_READ_STATUS, host->clrreadstatus);
+
+ write_reg_dma(nandc, NAND_FLASH_CMD, 3);
+ write_reg_dma(nandc, NAND_DEV0_CFG0, 2);
+ write_reg_dma(nandc, NAND_EXEC_CMD, 1);
+
+ read_reg_dma(nandc, NAND_FLASH_STATUS, 1);
+
+ write_reg_dma(nandc, NAND_FLASH_STATUS, 1);
+ write_reg_dma(nandc, NAND_READ_STATUS, 1);
+
+ return 0;
+}
+
+/* sets up descriptors for NAND_CMD_READID */
+static int read_id(struct qcom_nand_host *host, int column)
+{
+ struct nand_chip *chip = &host->chip;
+ struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+
+ if (column == -1)
+ return 0;
+
+ nandc_set_reg(nandc, NAND_FLASH_CMD, FETCH_ID);
+ nandc_set_reg(nandc, NAND_ADDR0, column);
+ nandc_set_reg(nandc, NAND_ADDR1, 0);
+ nandc_set_reg(nandc, NAND_FLASH_CHIP_SELECT, DM_EN);
+ nandc_set_reg(nandc, NAND_EXEC_CMD, 1);
+
+ write_reg_dma(nandc, NAND_FLASH_CMD, 4);
+ write_reg_dma(nandc, NAND_EXEC_CMD, 1);
+
+ read_reg_dma(nandc, NAND_READ_ID, 1);
+
+ return 0;
+}
+
+/* sets up descriptors for NAND_CMD_RESET */
+static int reset(struct qcom_nand_host *host)
+{
+ struct nand_chip *chip = &host->chip;
+ struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+
+ nandc_set_reg(nandc, NAND_FLASH_CMD, RESET_DEVICE);
+ nandc_set_reg(nandc, NAND_EXEC_CMD, 1);
+
+ write_reg_dma(nandc, NAND_FLASH_CMD, 1);
+ write_reg_dma(nandc, NAND_EXEC_CMD, 1);
+
+ read_reg_dma(nandc, NAND_FLASH_STATUS, 1);
+
+ return 0;
+}
+
+/* helpers to submit/free our list of dma descriptors */
+static int submit_descs(struct qcom_nand_controller *nandc)
+{
+ struct desc_info *desc;
+ dma_cookie_t cookie = 0;
+
+ list_for_each_entry(desc, &nandc->desc_list, node)
+ cookie = dmaengine_submit(desc->dma_desc);
+
+ if (dma_sync_wait(nandc->chan, cookie) != DMA_COMPLETE)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static void free_descs(struct qcom_nand_controller *nandc)
+{
+ struct desc_info *desc, *n;
+
+ list_for_each_entry_safe(desc, n, &nandc->desc_list, node) {
+ list_del(&desc->node);
+ dma_unmap_sg(nandc->dev, &desc->sgl, 1, desc->dir);
+ kfree(desc);
+ }
+}
+
+/* reset the register read buffer for next NAND operation */
+static void clear_read_regs(struct qcom_nand_controller *nandc)
+{
+ nandc->reg_read_pos = 0;
+ memset(nandc->reg_read_buf, 0,
+ MAX_REG_RD * sizeof(*nandc->reg_read_buf));
+}
+
+static void pre_command(struct qcom_nand_host *host, int command)
+{
+ struct nand_chip *chip = &host->chip;
+ struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+
+ nandc->buf_count = 0;
+ nandc->buf_start = 0;
+ host->use_ecc = false;
+ host->last_command = command;
+
+ clear_read_regs(nandc);
+}
+
+/*
+ * this is called after NAND_CMD_PAGEPROG and NAND_CMD_ERASE1 to set our
+ * privately maintained status byte, this status byte can be read after
+ * NAND_CMD_STATUS is called
+ */
+static void parse_erase_write_errors(struct qcom_nand_host *host, int command)
+{
+ struct nand_chip *chip = &host->chip;
+ struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+ int num_cw;
+ int i;
+
+ num_cw = command == NAND_CMD_PAGEPROG ? ecc->steps : 1;
+
+ for (i = 0; i < num_cw; i++) {
+ u32 flash_status = le32_to_cpu(nandc->reg_read_buf[i]);
+
+ if (flash_status & FS_MPU_ERR)
+ host->status &= ~NAND_STATUS_WP;
+
+ if (flash_status & FS_OP_ERR || (i == (num_cw - 1) &&
+ (flash_status &
+ FS_DEVICE_STS_ERR)))
+ host->status |= NAND_STATUS_FAIL;
+ }
+}
+
+static void post_command(struct qcom_nand_host *host, int command)
+{
+ struct nand_chip *chip = &host->chip;
+ struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+
+ switch (command) {
+ case NAND_CMD_READID:
+ memcpy(nandc->data_buffer, nandc->reg_read_buf,
+ nandc->buf_count);
+ break;
+ case NAND_CMD_PAGEPROG:
+ case NAND_CMD_ERASE1:
+ parse_erase_write_errors(host, command);
+ break;
+ default:
+ break;
+ }
+}
+
+/*
+ * Implements chip->cmdfunc. It's only used for a limited set of commands.
+ * The rest of the commands wouldn't be called by upper layers. For example,
+ * NAND_CMD_READOOB would never be called because we have our own versions
+ * of read_oob ops for nand_ecc_ctrl.
+ */
+static void qcom_nandc_command(struct mtd_info *mtd, unsigned int command,
+ int column, int page_addr)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct qcom_nand_host *host = to_qcom_nand_host(chip);
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+ struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+ bool wait = false;
+ int ret = 0;
+
+ pre_command(host, command);
+
+ switch (command) {
+ case NAND_CMD_RESET:
+ ret = reset(host);
+ wait = true;
+ break;
+
+ case NAND_CMD_READID:
+ nandc->buf_count = 4;
+ ret = read_id(host, column);
+ wait = true;
+ break;
+
+ case NAND_CMD_PARAM:
+ ret = nandc_param(host);
+ wait = true;
+ break;
+
+ case NAND_CMD_ERASE1:
+ ret = erase_block(host, page_addr);
+ wait = true;
+ break;
+
+ case NAND_CMD_READ0:
+ /* we read the entire page for now */
+ WARN_ON(column != 0);
+
+ host->use_ecc = true;
+ set_address(host, 0, page_addr);
+ update_rw_regs(host, ecc->steps, true);
+ break;
+
+ case NAND_CMD_SEQIN:
+ WARN_ON(column != 0);
+ set_address(host, 0, page_addr);
+ break;
+
+ case NAND_CMD_PAGEPROG:
+ case NAND_CMD_STATUS:
+ case NAND_CMD_NONE:
+ default:
+ break;
+ }
+
+ if (ret) {
+ dev_err(nandc->dev, "failure executing command %d\n",
+ command);
+ free_descs(nandc);
+ return;
+ }
+
+ if (wait) {
+ ret = submit_descs(nandc);
+ if (ret)
+ dev_err(nandc->dev,
+ "failure submitting descs for command %d\n",
+ command);
+ }
+
+ free_descs(nandc);
+
+ post_command(host, command);
+}
+
+/*
+ * when using BCH ECC, the HW flags an error in NAND_FLASH_STATUS if it read
+ * an erased CW, and reports an erased CW in NAND_ERASED_CW_DETECT_STATUS.
+ *
+ * when using RS ECC, the HW reports the same erros when reading an erased CW,
+ * but it notifies that it is an erased CW by placing special characters at
+ * certain offsets in the buffer.
+ *
+ * verify if the page is erased or not, and fix up the page for RS ECC by
+ * replacing the special characters with 0xff.
+ */
+static bool erased_chunk_check_and_fixup(u8 *data_buf, int data_len)
+{
+ u8 empty1, empty2;
+
+ /*
+ * an erased page flags an error in NAND_FLASH_STATUS, check if the page
+ * is erased by looking for 0x54s at offsets 3 and 175 from the
+ * beginning of each codeword
+ */
+
+ empty1 = data_buf[3];
+ empty2 = data_buf[175];
+
+ /*
+ * if the erased codework markers, if they exist override them with
+ * 0xffs
+ */
+ if ((empty1 == 0x54 && empty2 == 0xff) ||
+ (empty1 == 0xff && empty2 == 0x54)) {
+ data_buf[3] = 0xff;
+ data_buf[175] = 0xff;
+ }
+
+ /*
+ * check if the entire chunk contains 0xffs or not. if it doesn't, then
+ * restore the original values at the special offsets
+ */
+ if (memchr_inv(data_buf, 0xff, data_len)) {
+ data_buf[3] = empty1;
+ data_buf[175] = empty2;
+
+ return false;
+ }
+
+ return true;
+}
+
+struct read_stats {
+ __le32 flash;
+ __le32 buffer;
+ __le32 erased_cw;
+};
+
+/*
+ * reads back status registers set by the controller to notify page read
+ * errors. this is equivalent to what 'ecc->correct()' would do.
+ */
+static int parse_read_errors(struct qcom_nand_host *host, u8 *data_buf,
+ u8 *oob_buf)
+{
+ struct nand_chip *chip = &host->chip;
+ struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+ unsigned int max_bitflips = 0;
+ struct read_stats *buf;
+ int i;
+
+ buf = (struct read_stats *)nandc->reg_read_buf;
+
+ for (i = 0; i < ecc->steps; i++, buf++) {
+ u32 flash, buffer, erased_cw;
+ int data_len, oob_len;
+
+ if (i == (ecc->steps - 1)) {
+ data_len = ecc->size - ((ecc->steps - 1) << 2);
+ oob_len = ecc->steps << 2;
+ } else {
+ data_len = host->cw_data;
+ oob_len = 0;
+ }
+
+ flash = le32_to_cpu(buf->flash);
+ buffer = le32_to_cpu(buf->buffer);
+ erased_cw = le32_to_cpu(buf->erased_cw);
+
+ if (flash & (FS_OP_ERR | FS_MPU_ERR)) {
+ bool erased;
+
+ /* ignore erased codeword errors */
+ if (host->bch_enabled) {
+ erased = (erased_cw & ERASED_CW) == ERASED_CW ?
+ true : false;
+ } else {
+ erased = erased_chunk_check_and_fixup(data_buf,
+ data_len);
+ }
+
+ if (erased) {
+ data_buf += data_len;
+ if (oob_buf)
+ oob_buf += oob_len + ecc->bytes;
+ continue;
+ }
+
+ if (buffer & BS_UNCORRECTABLE_BIT) {
+ int ret, ecclen, extraooblen;
+ void *eccbuf;
+
+ eccbuf = oob_buf ? oob_buf + oob_len : NULL;
+ ecclen = oob_buf ? host->ecc_bytes_hw : 0;
+ extraooblen = oob_buf ? oob_len : 0;
+
+ /*
+ * make sure it isn't an erased page reported
+ * as not-erased by HW because of a few bitflips
+ */
+ ret = nand_check_erased_ecc_chunk(data_buf,
+ data_len, eccbuf, ecclen, oob_buf,
+ extraooblen, ecc->strength);
+ if (ret < 0) {
+ mtd->ecc_stats.failed++;
+ } else {
+ mtd->ecc_stats.corrected += ret;
+ max_bitflips =
+ max_t(unsigned int, max_bitflips, ret);
+ }
+ }
+ } else {
+ unsigned int stat;
+
+ stat = buffer & BS_CORRECTABLE_ERR_MSK;
+ mtd->ecc_stats.corrected += stat;
+ max_bitflips = max(max_bitflips, stat);
+ }
+
+ data_buf += data_len;
+ if (oob_buf)
+ oob_buf += oob_len + ecc->bytes;
+ }
+
+ return max_bitflips;
+}
+
+/*
+ * helper to perform the actual page read operation, used by ecc->read_page(),
+ * ecc->read_oob()
+ */
+static int read_page_ecc(struct qcom_nand_host *host, u8 *data_buf,
+ u8 *oob_buf)
+{
+ struct nand_chip *chip = &host->chip;
+ struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+ int i, ret;
+
+ /* queue cmd descs for each codeword */
+ for (i = 0; i < ecc->steps; i++) {
+ int data_size, oob_size;
+
+ if (i == (ecc->steps - 1)) {
+ data_size = ecc->size - ((ecc->steps - 1) << 2);
+ oob_size = (ecc->steps << 2) + host->ecc_bytes_hw +
+ host->spare_bytes;
+ } else {
+ data_size = host->cw_data;
+ oob_size = host->ecc_bytes_hw + host->spare_bytes;
+ }
+
+ config_cw_read(nandc);
+
+ if (data_buf)
+ read_data_dma(nandc, FLASH_BUF_ACC, data_buf,
+ data_size);
+
+ /*
+ * when ecc is enabled, the controller doesn't read the real
+ * or dummy bad block markers in each chunk. To maintain a
+ * consistent layout across RAW and ECC reads, we just
+ * leave the real/dummy BBM offsets empty (i.e, filled with
+ * 0xffs)
+ */
+ if (oob_buf) {
+ int j;
+
+ for (j = 0; j < host->bbm_size; j++)
+ *oob_buf++ = 0xff;
+
+ read_data_dma(nandc, FLASH_BUF_ACC + data_size,
+ oob_buf, oob_size);
+ }
+
+ if (data_buf)
+ data_buf += data_size;
+ if (oob_buf)
+ oob_buf += oob_size;
+ }
+
+ ret = submit_descs(nandc);
+ if (ret)
+ dev_err(nandc->dev, "failure to read page/oob\n");
+
+ free_descs(nandc);
+
+ return ret;
+}
+
+/*
+ * a helper that copies the last step/codeword of a page (containing free oob)
+ * into our local buffer
+ */
+static int copy_last_cw(struct qcom_nand_host *host, int page)
+{
+ struct nand_chip *chip = &host->chip;
+ struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+ int size;
+ int ret;
+
+ clear_read_regs(nandc);
+
+ size = host->use_ecc ? host->cw_data : host->cw_size;
+
+ /* prepare a clean read buffer */
+ memset(nandc->data_buffer, 0xff, size);
+
+ set_address(host, host->cw_size * (ecc->steps - 1), page);
+ update_rw_regs(host, 1, true);
+
+ config_cw_read(nandc);
+
+ read_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer, size);
+
+ ret = submit_descs(nandc);
+ if (ret)
+ dev_err(nandc->dev, "failed to copy last codeword\n");
+
+ free_descs(nandc);
+
+ return ret;
+}
+
+/* implements ecc->read_page() */
+static int qcom_nandc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
+ uint8_t *buf, int oob_required, int page)
+{
+ struct qcom_nand_host *host = to_qcom_nand_host(chip);
+ struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+ u8 *data_buf, *oob_buf = NULL;
+ int ret;
+
+ data_buf = buf;
+ oob_buf = oob_required ? chip->oob_poi : NULL;
+
+ ret = read_page_ecc(host, data_buf, oob_buf);
+ if (ret) {
+ dev_err(nandc->dev, "failure to read page\n");
+ return ret;
+ }
+
+ return parse_read_errors(host, data_buf, oob_buf);
+}
+
+/* implements ecc->read_page_raw() */
+static int qcom_nandc_read_page_raw(struct mtd_info *mtd,
+ struct nand_chip *chip, uint8_t *buf,
+ int oob_required, int page)
+{
+ struct qcom_nand_host *host = to_qcom_nand_host(chip);
+ struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+ u8 *data_buf, *oob_buf;
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+ int i, ret;
+
+ data_buf = buf;
+ oob_buf = chip->oob_poi;
+
+ host->use_ecc = false;
+ update_rw_regs(host, ecc->steps, true);
+
+ for (i = 0; i < ecc->steps; i++) {
+ int data_size1, data_size2, oob_size1, oob_size2;
+ int reg_off = FLASH_BUF_ACC;
+
+ data_size1 = mtd->writesize - host->cw_size * (ecc->steps - 1);
+ oob_size1 = host->bbm_size;
+
+ if (i == (ecc->steps - 1)) {
+ data_size2 = ecc->size - data_size1 -
+ ((ecc->steps - 1) << 2);
+ oob_size2 = (ecc->steps << 2) + host->ecc_bytes_hw +
+ host->spare_bytes;
+ } else {
+ data_size2 = host->cw_data - data_size1;
+ oob_size2 = host->ecc_bytes_hw + host->spare_bytes;
+ }
+
+ config_cw_read(nandc);
+
+ read_data_dma(nandc, reg_off, data_buf, data_size1);
+ reg_off += data_size1;
+ data_buf += data_size1;
+
+ read_data_dma(nandc, reg_off, oob_buf, oob_size1);
+ reg_off += oob_size1;
+ oob_buf += oob_size1;
+
+ read_data_dma(nandc, reg_off, data_buf, data_size2);
+ reg_off += data_size2;
+ data_buf += data_size2;
+
+ read_data_dma(nandc, reg_off, oob_buf, oob_size2);
+ oob_buf += oob_size2;
+ }
+
+ ret = submit_descs(nandc);
+ if (ret)
+ dev_err(nandc->dev, "failure to read raw page\n");
+
+ free_descs(nandc);
+
+ return 0;
+}
+
+/* implements ecc->read_oob() */
+static int qcom_nandc_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
+ int page)
+{
+ struct qcom_nand_host *host = to_qcom_nand_host(chip);
+ struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+ int ret;
+
+ clear_read_regs(nandc);
+
+ host->use_ecc = true;
+ set_address(host, 0, page);
+ update_rw_regs(host, ecc->steps, true);
+
+ ret = read_page_ecc(host, NULL, chip->oob_poi);
+ if (ret)
+ dev_err(nandc->dev, "failure to read oob\n");
+
+ return ret;
+}
+
+/* implements ecc->write_page() */
+static int qcom_nandc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
+ const uint8_t *buf, int oob_required, int page)
+{
+ struct qcom_nand_host *host = to_qcom_nand_host(chip);
+ struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+ u8 *data_buf, *oob_buf;
+ int i, ret;
+
+ clear_read_regs(nandc);
+
+ data_buf = (u8 *)buf;
+ oob_buf = chip->oob_poi;
+
+ host->use_ecc = true;
+ update_rw_regs(host, ecc->steps, false);
+
+ for (i = 0; i < ecc->steps; i++) {
+ int data_size, oob_size;
+
+ if (i == (ecc->steps - 1)) {
+ data_size = ecc->size - ((ecc->steps - 1) << 2);
+ oob_size = (ecc->steps << 2) + host->ecc_bytes_hw +
+ host->spare_bytes;
+ } else {
+ data_size = host->cw_data;
+ oob_size = ecc->bytes;
+ }
+
+ config_cw_write_pre(nandc);
+
+ write_data_dma(nandc, FLASH_BUF_ACC, data_buf, data_size);
+
+ /*
+ * when ECC is enabled, we don't really need to write anything
+ * to oob for the first n - 1 codewords since these oob regions
+ * just contain ECC bytes that's written by the controller
+ * itself. For the last codeword, we skip the bbm positions and
+ * write to the free oob area.
+ */
+ if (i == (ecc->steps - 1)) {
+ oob_buf += host->bbm_size;
+
+ write_data_dma(nandc, FLASH_BUF_ACC + data_size,
+ oob_buf, oob_size);
+ }
+
+ config_cw_write_post(nandc);
+
+ data_buf += data_size;
+ oob_buf += oob_size;
+ }
+
+ ret = submit_descs(nandc);
+ if (ret)
+ dev_err(nandc->dev, "failure to write page\n");
+
+ free_descs(nandc);
+
+ return ret;
+}
+
+/* implements ecc->write_page_raw() */
+static int qcom_nandc_write_page_raw(struct mtd_info *mtd,
+ struct nand_chip *chip, const uint8_t *buf,
+ int oob_required, int page)
+{
+ struct qcom_nand_host *host = to_qcom_nand_host(chip);
+ struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+ u8 *data_buf, *oob_buf;
+ int i, ret;
+
+ clear_read_regs(nandc);
+
+ data_buf = (u8 *)buf;
+ oob_buf = chip->oob_poi;
+
+ host->use_ecc = false;
+ update_rw_regs(host, ecc->steps, false);
+
+ for (i = 0; i < ecc->steps; i++) {
+ int data_size1, data_size2, oob_size1, oob_size2;
+ int reg_off = FLASH_BUF_ACC;
+
+ data_size1 = mtd->writesize - host->cw_size * (ecc->steps - 1);
+ oob_size1 = host->bbm_size;
+
+ if (i == (ecc->steps - 1)) {
+ data_size2 = ecc->size - data_size1 -
+ ((ecc->steps - 1) << 2);
+ oob_size2 = (ecc->steps << 2) + host->ecc_bytes_hw +
+ host->spare_bytes;
+ } else {
+ data_size2 = host->cw_data - data_size1;
+ oob_size2 = host->ecc_bytes_hw + host->spare_bytes;
+ }
+
+ config_cw_write_pre(nandc);
+
+ write_data_dma(nandc, reg_off, data_buf, data_size1);
+ reg_off += data_size1;
+ data_buf += data_size1;
+
+ write_data_dma(nandc, reg_off, oob_buf, oob_size1);
+ reg_off += oob_size1;
+ oob_buf += oob_size1;
+
+ write_data_dma(nandc, reg_off, data_buf, data_size2);
+ reg_off += data_size2;
+ data_buf += data_size2;
+
+ write_data_dma(nandc, reg_off, oob_buf, oob_size2);
+ oob_buf += oob_size2;
+
+ config_cw_write_post(nandc);
+ }
+
+ ret = submit_descs(nandc);
+ if (ret)
+ dev_err(nandc->dev, "failure to write raw page\n");
+
+ free_descs(nandc);
+
+ return ret;
+}
+
+/*
+ * implements ecc->write_oob()
+ *
+ * the NAND controller cannot write only data or only oob within a codeword,
+ * since ecc is calculated for the combined codeword. we first copy the
+ * entire contents for the last codeword(data + oob), replace the old oob
+ * with the new one in chip->oob_poi, and then write the entire codeword.
+ * this read-copy-write operation results in a slight performance loss.
+ */
+static int qcom_nandc_write_oob(struct mtd_info *mtd, struct nand_chip *chip,
+ int page)
+{
+ struct qcom_nand_host *host = to_qcom_nand_host(chip);
+ struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+ u8 *oob = chip->oob_poi;
+ int free_boff;
+ int data_size, oob_size;
+ int ret, status = 0;
+
+ host->use_ecc = true;
+
+ ret = copy_last_cw(host, page);
+ if (ret)
+ return ret;
+
+ clear_read_regs(nandc);
+
+ /* calculate the data and oob size for the last codeword/step */
+ data_size = ecc->size - ((ecc->steps - 1) << 2);
+ oob_size = ecc->steps << 2;
+
+ free_boff = ecc->layout->oobfree[0].offset;
+
+ /* override new oob content to last codeword */
+ memcpy(nandc->data_buffer + data_size, oob + free_boff, oob_size);
+
+ set_address(host, host->cw_size * (ecc->steps - 1), page);
+ update_rw_regs(host, 1, false);
+
+ config_cw_write_pre(nandc);
+ write_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer,
+ data_size + oob_size);
+ config_cw_write_post(nandc);
+
+ ret = submit_descs(nandc);
+
+ free_descs(nandc);
+
+ if (ret) {
+ dev_err(nandc->dev, "failure to write oob\n");
+ return -EIO;
+ }
+
+ chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
+
+ status = chip->waitfunc(mtd, chip);
+
+ return status & NAND_STATUS_FAIL ? -EIO : 0;
+}
+
+static int qcom_nandc_block_bad(struct mtd_info *mtd, loff_t ofs)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct qcom_nand_host *host = to_qcom_nand_host(chip);
+ struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+ int page, ret, bbpos, bad = 0;
+ u32 flash_status;
+
+ page = (int)(ofs >> chip->page_shift) & chip->pagemask;
+
+ /*
+ * configure registers for a raw sub page read, the address is set to
+ * the beginning of the last codeword, we don't care about reading ecc
+ * portion of oob. we just want the first few bytes from this codeword
+ * that contains the BBM
+ */
+ host->use_ecc = false;
+
+ ret = copy_last_cw(host, page);
+ if (ret)
+ goto err;
+
+ flash_status = le32_to_cpu(nandc->reg_read_buf[0]);
+
+ if (flash_status & (FS_OP_ERR | FS_MPU_ERR)) {
+ dev_warn(nandc->dev, "error when trying to read BBM\n");
+ goto err;
+ }
+
+ bbpos = mtd->writesize - host->cw_size * (ecc->steps - 1);
+
+ bad = nandc->data_buffer[bbpos] != 0xff;
+
+ if (chip->options & NAND_BUSWIDTH_16)
+ bad = bad || (nandc->data_buffer[bbpos + 1] != 0xff);
+err:
+ return bad;
+}
+
+static int qcom_nandc_block_markbad(struct mtd_info *mtd, loff_t ofs)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct qcom_nand_host *host = to_qcom_nand_host(chip);
+ struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+ int page, ret, status = 0;
+
+ clear_read_regs(nandc);
+
+ /*
+ * to mark the BBM as bad, we flash the entire last codeword with 0s.
+ * we don't care about the rest of the content in the codeword since
+ * we aren't going to use this block again
+ */
+ memset(nandc->data_buffer, 0x00, host->cw_size);
+
+ page = (int)(ofs >> chip->page_shift) & chip->pagemask;
+
+ /* prepare write */
+ host->use_ecc = false;
+ set_address(host, host->cw_size * (ecc->steps - 1), page);
+ update_rw_regs(host, 1, false);
+
+ config_cw_write_pre(nandc);
+ write_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer, host->cw_size);
+ config_cw_write_post(nandc);
+
+ ret = submit_descs(nandc);
+
+ free_descs(nandc);
+
+ if (ret) {
+ dev_err(nandc->dev, "failure to update BBM\n");
+ return -EIO;
+ }
+
+ chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
+
+ status = chip->waitfunc(mtd, chip);
+
+ return status & NAND_STATUS_FAIL ? -EIO : 0;
+}
+
+/*
+ * the three functions below implement chip->read_byte(), chip->read_buf()
+ * and chip->write_buf() respectively. these aren't used for
+ * reading/writing page data, they are used for smaller data like reading
+ * id, status etc
+ */
+static uint8_t qcom_nandc_read_byte(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct qcom_nand_host *host = to_qcom_nand_host(chip);
+ struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+ u8 *buf = nandc->data_buffer;
+ u8 ret = 0x0;
+
+ if (host->last_command == NAND_CMD_STATUS) {
+ ret = host->status;
+
+ host->status = NAND_STATUS_READY | NAND_STATUS_WP;
+
+ return ret;
+ }
+
+ if (nandc->buf_start < nandc->buf_count)
+ ret = buf[nandc->buf_start++];
+
+ return ret;
+}
+
+static void qcom_nandc_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+ int real_len = min_t(size_t, len, nandc->buf_count - nandc->buf_start);
+
+ memcpy(buf, nandc->data_buffer + nandc->buf_start, real_len);
+ nandc->buf_start += real_len;
+}
+
+static void qcom_nandc_write_buf(struct mtd_info *mtd, const uint8_t *buf,
+ int len)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+ int real_len = min_t(size_t, len, nandc->buf_count - nandc->buf_start);
+
+ memcpy(nandc->data_buffer + nandc->buf_start, buf, real_len);
+
+ nandc->buf_start += real_len;
+}
+
+/* we support only one external chip for now */
+static void qcom_nandc_select_chip(struct mtd_info *mtd, int chipnr)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+
+ if (chipnr <= 0)
+ return;
+
+ dev_warn(nandc->dev, "invalid chip select\n");
+}
+
+/*
+ * NAND controller page layout info
+ *
+ * Layout with ECC enabled:
+ *
+ * |----------------------| |---------------------------------|
+ * | xx.......yy| | *********xx.......yy|
+ * | DATA xx..ECC..yy| | DATA **SPARE**xx..ECC..yy|
+ * | (516) xx.......yy| | (516-n*4) **(n*4)**xx.......yy|
+ * | xx.......yy| | *********xx.......yy|
+ * |----------------------| |---------------------------------|
+ * codeword 1,2..n-1 codeword n
+ * <---(528/532 Bytes)--> <-------(528/532 Bytes)--------->
+ *
+ * n = Number of codewords in the page
+ * . = ECC bytes
+ * * = Spare/free bytes
+ * x = Unused byte(s)
+ * y = Reserved byte(s)
+ *
+ * 2K page: n = 4, spare = 16 bytes
+ * 4K page: n = 8, spare = 32 bytes
+ * 8K page: n = 16, spare = 64 bytes
+ *
+ * the qcom nand controller operates at a sub page/codeword level. each
+ * codeword is 528 and 532 bytes for 4 bit and 8 bit ECC modes respectively.
+ * the number of ECC bytes vary based on the ECC strength and the bus width.
+ *
+ * the first n - 1 codewords contains 516 bytes of user data, the remaining
+ * 12/16 bytes consist of ECC and reserved data. The nth codeword contains
+ * both user data and spare(oobavail) bytes that sum up to 516 bytes.
+ *
+ * When we access a page with ECC enabled, the reserved bytes(s) are not
+ * accessible at all. When reading, we fill up these unreadable positions
+ * with 0xffs. When writing, the controller skips writing the inaccessible
+ * bytes.
+ *
+ * Layout with ECC disabled:
+ *
+ * |------------------------------| |---------------------------------------|
+ * | yy xx.......| | bb *********xx.......|
+ * | DATA1 yy DATA2 xx..ECC..| | DATA1 bb DATA2 **SPARE**xx..ECC..|
+ * | (size1) yy (size2) xx.......| | (size1) bb (size2) **(n*4)**xx.......|
+ * | yy xx.......| | bb *********xx.......|
+ * |------------------------------| |---------------------------------------|
+ * codeword 1,2..n-1 codeword n
+ * <-------(528/532 Bytes)------> <-----------(528/532 Bytes)----------->
+ *
+ * n = Number of codewords in the page
+ * . = ECC bytes
+ * * = Spare/free bytes
+ * x = Unused byte(s)
+ * y = Dummy Bad Bock byte(s)
+ * b = Real Bad Block byte(s)
+ * size1/size2 = function of codeword size and 'n'
+ *
+ * when the ECC block is disabled, one reserved byte (or two for 16 bit bus
+ * width) is now accessible. For the first n - 1 codewords, these are dummy Bad
+ * Block Markers. In the last codeword, this position contains the real BBM
+ *
+ * In order to have a consistent layout between RAW and ECC modes, we assume
+ * the following OOB layout arrangement:
+ *
+ * |-----------| |--------------------|
+ * |yyxx.......| |bb*********xx.......|
+ * |yyxx..ECC..| |bb*FREEOOB*xx..ECC..|
+ * |yyxx.......| |bb*********xx.......|
+ * |yyxx.......| |bb*********xx.......|
+ * |-----------| |--------------------|
+ * first n - 1 nth OOB region
+ * OOB regions
+ *
+ * n = Number of codewords in the page
+ * . = ECC bytes
+ * * = FREE OOB bytes
+ * y = Dummy bad block byte(s) (inaccessible when ECC enabled)
+ * x = Unused byte(s)
+ * b = Real bad block byte(s) (inaccessible when ECC enabled)
+ *
+ * This layout is read as is when ECC is disabled. When ECC is enabled, the
+ * inaccessible Bad Block byte(s) are ignored when we write to a page/oob,
+ * and assumed as 0xffs when we read a page/oob. The ECC, unused and
+ * dummy/real bad block bytes are grouped as ecc bytes in nand_ecclayout (i.e,
+ * ecc->bytes is the sum of the three).
+ */
+
+static struct nand_ecclayout *
+qcom_nand_create_layout(struct qcom_nand_host *host)
+{
+ struct nand_chip *chip = &host->chip;
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+ struct nand_ecclayout *layout;
+ int i, j, steps, pos = 0, shift = 0;
+
+ layout = devm_kzalloc(nandc->dev, sizeof(*layout), GFP_KERNEL);
+ if (!layout)
+ return NULL;
+
+ steps = mtd->writesize / ecc->size;
+ layout->eccbytes = steps * ecc->bytes;
+
+ layout->oobfree[0].offset = (steps - 1) * ecc->bytes + host->bbm_size;
+ layout->oobfree[0].length = steps << 2;
+
+ /*
+ * the oob bytes in the first n - 1 codewords are all grouped together
+ * in the format:
+ * DUMMY_BBM + UNUSED + ECC
+ */
+ for (i = 0; i < steps - 1; i++) {
+ for (j = 0; j < ecc->bytes; j++)
+ layout->eccpos[pos++] = i * ecc->bytes + j;
+ }
+
+ /*
+ * the oob bytes in the last codeword are grouped in the format:
+ * BBM + FREE OOB + UNUSED + ECC
+ */
+
+ /* fill up the bbm positions */
+ for (j = 0; j < host->bbm_size; j++)
+ layout->eccpos[pos++] = i * ecc->bytes + j;
+
+ /*
+ * fill up the ecc and reserved positions, their indices are offseted
+ * by the free oob region
+ */
+ shift = layout->oobfree[0].length + host->bbm_size;
+
+ for (j = 0; j < (host->ecc_bytes_hw + host->spare_bytes); j++)
+ layout->eccpos[pos++] = i * ecc->bytes + shift + j;
+
+ return layout;
+}
+
+static int qcom_nand_host_setup(struct qcom_nand_host *host)
+{
+ struct nand_chip *chip = &host->chip;
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+ struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+ int cwperpage, bad_block_byte;
+ bool wide_bus;
+ int ecc_mode = 1;
+
+ /*
+ * the controller requires each step consists of 512 bytes of data.
+ * bail out if DT has populated a wrong step size.
+ */
+ if (ecc->size != NANDC_STEP_SIZE) {
+ dev_err(nandc->dev, "invalid ecc size\n");
+ return -EINVAL;
+ }
+
+ wide_bus = chip->options & NAND_BUSWIDTH_16 ? true : false;
+
+ if (ecc->strength >= 8) {
+ /* 8 bit ECC defaults to BCH ECC on all platforms */
+ host->bch_enabled = true;
+ ecc_mode = 1;
+
+ if (wide_bus) {
+ host->ecc_bytes_hw = 14;
+ host->spare_bytes = 0;
+ host->bbm_size = 2;
+ } else {
+ host->ecc_bytes_hw = 13;
+ host->spare_bytes = 2;
+ host->bbm_size = 1;
+ }
+ } else {
+ /*
+ * if the controller supports BCH for 4 bit ECC, the controller
+ * uses lesser bytes for ECC. If RS is used, the ECC bytes is
+ * always 10 bytes
+ */
+ if (nandc->ecc_modes & ECC_BCH_4BIT) {
+ /* BCH */
+ host->bch_enabled = true;
+ ecc_mode = 0;
+
+ if (wide_bus) {
+ host->ecc_bytes_hw = 8;
+ host->spare_bytes = 2;
+ host->bbm_size = 2;
+ } else {
+ host->ecc_bytes_hw = 7;
+ host->spare_bytes = 4;
+ host->bbm_size = 1;
+ }
+ } else {
+ /* RS */
+ host->ecc_bytes_hw = 10;
+
+ if (wide_bus) {
+ host->spare_bytes = 0;
+ host->bbm_size = 2;
+ } else {
+ host->spare_bytes = 1;
+ host->bbm_size = 1;
+ }
+ }
+ }
+
+ /*
+ * we consider ecc->bytes as the sum of all the non-data content in a
+ * step. It gives us a clean representation of the oob area (even if
+ * all the bytes aren't used for ECC).It is always 16 bytes for 8 bit
+ * ECC and 12 bytes for 4 bit ECC
+ */
+ ecc->bytes = host->ecc_bytes_hw + host->spare_bytes + host->bbm_size;
+
+ ecc->read_page = qcom_nandc_read_page;
+ ecc->read_page_raw = qcom_nandc_read_page_raw;
+ ecc->read_oob = qcom_nandc_read_oob;
+ ecc->write_page = qcom_nandc_write_page;
+ ecc->write_page_raw = qcom_nandc_write_page_raw;
+ ecc->write_oob = qcom_nandc_write_oob;
+
+ ecc->mode = NAND_ECC_HW;
+
+ ecc->layout = qcom_nand_create_layout(host);
+ if (!ecc->layout)
+ return -ENOMEM;
+
+ cwperpage = mtd->writesize / ecc->size;
+
+ /*
+ * DATA_UD_BYTES varies based on whether the read/write command protects
+ * spare data with ECC too. We protect spare data by default, so we set
+ * it to main + spare data, which are 512 and 4 bytes respectively.
+ */
+ host->cw_data = 516;
+
+ /*
+ * total bytes in a step, either 528 bytes for 4 bit ECC, or 532 bytes
+ * for 8 bit ECC
+ */
+ host->cw_size = host->cw_data + ecc->bytes;
+
+ if (ecc->bytes * (mtd->writesize / ecc->size) > mtd->oobsize) {
+ dev_err(nandc->dev, "ecc data doesn't fit in OOB area\n");
+ return -EINVAL;
+ }
+
+ bad_block_byte = mtd->writesize - host->cw_size * (cwperpage - 1) + 1;
+
+ host->cfg0 = (cwperpage - 1) << CW_PER_PAGE
+ | host->cw_data << UD_SIZE_BYTES
+ | 0 << DISABLE_STATUS_AFTER_WRITE
+ | 5 << NUM_ADDR_CYCLES
+ | host->ecc_bytes_hw << ECC_PARITY_SIZE_BYTES_RS
+ | 0 << STATUS_BFR_READ
+ | 1 << SET_RD_MODE_AFTER_STATUS
+ | host->spare_bytes << SPARE_SIZE_BYTES;
+
+ host->cfg1 = 7 << NAND_RECOVERY_CYCLES
+ | 0 << CS_ACTIVE_BSY
+ | bad_block_byte << BAD_BLOCK_BYTE_NUM
+ | 0 << BAD_BLOCK_IN_SPARE_AREA
+ | 2 << WR_RD_BSY_GAP
+ | wide_bus << WIDE_FLASH
+ | host->bch_enabled << ENABLE_BCH_ECC;
+
+ host->cfg0_raw = (cwperpage - 1) << CW_PER_PAGE
+ | host->cw_size << UD_SIZE_BYTES
+ | 5 << NUM_ADDR_CYCLES
+ | 0 << SPARE_SIZE_BYTES;
+
+ host->cfg1_raw = 7 << NAND_RECOVERY_CYCLES
+ | 0 << CS_ACTIVE_BSY
+ | 17 << BAD_BLOCK_BYTE_NUM
+ | 1 << BAD_BLOCK_IN_SPARE_AREA
+ | 2 << WR_RD_BSY_GAP
+ | wide_bus << WIDE_FLASH
+ | 1